id
string
paper_link
string
title
string
full_text
string
2412.21012v1
http://arxiv.org/abs/2412.21012v1
Braidings for Non-Split Tambara-Yamagami Categories over the Reals
\documentclass[12pt,reqno]{amsart} \input{resources/preamble} \title{Braidings for Non-Split Tambara-Yamagami Categories over the Reals} \author[D. Green]{David Green} \address{Department of Mathematics, The Ohio State University} \email{[email protected]} \author[Y. Jiang]{Yoyo Jiang} \address{Department of Mathematics, Johns Hopkins University} \email{[email protected]} \author[S. Sanford]{Sean Sanford} \address{Department of Mathematics, The Ohio State University} \email{[email protected]} \begin{document} \begin{abstract} Non-split Real Tambara-Yamagami categories are a family of fusion categories over the real numbers that were recently introduced and classified by Plavnik, Sanford, and Sconce. We consider which of these categories admit braidings, and classify the resulting braided equivalence classes. We also prove some new results about the split real and split complex Tambara-Yamagami Categories. \end{abstract} \maketitle \input{resources/string-diagram-macros} \input{sections/section-1} \input{sections/background} \input{sections/group-action-analysis} \input{sections/split-real-case} \input{sections/real-quaternionic-case} \input{sections/real-complex-case} \input{sections/split-complex-case} \input{sections/split-complex-crossed-braided-case} \newpage \printbibliography \end{document} \usepackage[margin=1.25in]{geometry} \usepackage[utf8]{inputenc} \usepackage{amsmath, amssymb, amsthm} \usepackage{mathtools} \usepackage{anyfontsize} \usepackage{lmodern} \usepackage{microtype} \usepackage{enumitem} \usepackage{ifthen} \usepackage{environ} \usepackage{xfrac} \usepackage{pdflscape} \usepackage{esvect} \usepackage{bbm} \usepackage{bm} \usepackage{makecell} \usepackage{tikz} \usetikzlibrary{calc} \usetikzlibrary{knots} \usetikzlibrary{math} \usetikzlibrary{shapes} \usetikzlibrary{arrows} \usetikzlibrary{cd} \usetikzlibrary{intersections} \usepackage{xcolor} \colorlet{DarkGreen}{green!50!black} \colorlet{DarkRed}{red!90!black} \colorlet{DarkBlue}{blue!90!black} \newcommand{\tc}{\textcolor} \newcommand{\yj}[1]{\textcolor{DarkRed}{(Yoyo) #1}} \newcommand{\dg}[1]{\textcolor{DarkBlue}{(David) #1}} \newcommand{\sean}[1]{\textcolor{DarkGreen}{(Sean) #1}} \usepackage[pdfencoding=unicode,pdfusetitle]{hyperref} \hypersetup{colorlinks=true, linkcolor=blue, filecolor=purple, urlcolor=[rgb]{0 0 .6}, psdextra} \usepackage{todonotes} \setuptodonotes{color=cyan!25,size=\tiny} \setlength{\marginparwidth}{2cm} \usepackage[backend=biber, style=alphabetic, citestyle=alphabetic, url=false, isbn=false, maxnames=99, maxalphanames=99]{biblatex} \addbibresource{ref.bib} \newcommand{\trieq}[3]{\begin{bmatrix} {#1},{#2}\\ {#3} \end{bmatrix}} \newcommand{\tetr}[4]{\big\{\begin{smallmatrix} {#1},{#2},{#3}\\{#4} \end{smallmatrix}\big\}} \newcommand{\trih}[3]{\big\{\begin{smallmatrix} {#1},{#2}\\{#3} \end{smallmatrix}\big\}} \newcommand{\pent}[5]{\begin{pmatrix} {#1},{#2},{#3},{#4} \\ {#5}\end{pmatrix}} \hyphenation{Tambara-Yamagami} \renewcommand{\arraystretch}{1.5} \newcommand{\KK}{\mathbb K} \newcommand{\id}{\textsf{id}} \newcommand{\1}{\mathbbm{1}} \renewcommand{\c}{\mathcal} \newcommand{\s}{\mathcal} \newcommand{\bb}{\mathbb} \newcommand{\f}{\mathfrak} \DeclareMathOperator{\Set}{Set} \DeclareMathOperator{\Hom}{Hom} \DeclareMathOperator{\End}{End} \DeclareMathOperator{\Aut}{Aut} \DeclareMathOperator{\Out}{Out} \DeclareMathOperator{\Fun}{Fun} \DeclareMathOperator{\ev}{ev} \DeclareMathOperator{\coev}{coev} \DeclareMathOperator{\im}{im} \DeclareMathOperator{\BrPic}{BrPic} \DeclareMathOperator{\Br}{Br} \DeclareMathOperator{\hofib}{hofib} \DeclareMathOperator{\Pic}{Pic} \DeclareMathOperator{\Mod}{Mod} \DeclareMathOperator{\FinSet}{FinSet} \DeclareMathOperator{\FPdim}{FPdim} \DeclareMathOperator{\rep}{Rep} \DeclareMathOperator{\ob}{Ob} \DeclareMathOperator{\Rep}{Rep} \DeclareMathOperator{\Tr}{Tr} \DeclareMathOperator{\op}{op} \DeclareMathOperator{\Vect}{Vect} \DeclareMathOperator{\fd}{fd} \DeclareMathOperator{\Gal}{Gal} \DeclareMathOperator{\sgn}{sgn} \DeclareMathOperator{\Lan}{Lan} \DeclareMathOperator{\QF}{QF} \newcommand{\TY}{\mathsf{TY}} \newcommand{\C}{\mathcal{C}} \newcommand{\D}{\mathcal{D}} \newcommand{\cC}{\mathcal{C}} \newcommand{\cD}{\mathcal{D}} \newcommand{\cS}{\mathcal{S}} \makeatletter \newtheorem*{rep@theorem}{\rep@title} \newcommand{\newreptheorem}[2]{\newenvironment{rep#1}[1]{ \def\rep@title{#2 \ref{##1}} \begin{rep@theorem}} {\end{rep@theorem}}} \makeatother \theoremstyle{definition} \newtheorem{theorem}{Theorem}[section] \newreptheorem{theorem}{Theorem} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{lemma}[theorem]{Lemma} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \newtheorem{note}[theorem]{Note} \newtheorem{remark}[theorem]{Remark} \newtheorem{assumption}[theorem]{Assumption} \newtheorem{notation}[theorem]{Notation} \newtheorem{derivation}[theorem]{Derivation} \NewEnviron{tikzineqn}[1][]{\vcenter{\hbox{\begin{tikzpicture}[#1] \BODY \end{tikzpicture}}}} \newcommand{\arxiv}[1]{\href{http://arxiv.org/abs/#1}{\tt arXiv:\nolinkurl{#1}}} \newcommand{\arXiv}[1]{\href{http://arxiv.org/abs/#1}{\tt arXiv:\nolinkurl{#1}}} \newcommand{\doi}[1]{\href{http://dx.doi.org/#1}{{\tt DOI:#1}}} \newcommand{\euclid}[1]{\href{http://projecteuclid.org/getRecord?id=#1}{{\tt #1}}} \newcommand{\mathscinet}[1]{\href{http://www.ams.org/mathscinet-getitem?mr=#1}{\tt #1}} \newcommand{\googlebooks}[1]{(preview at \href{http://books.google.com/books?id=#1}{google books})} \tikzmath{ \x=1; \topratio=2/3; \beadsizenum=\x/2; } \def\beadsize{\beadsizenum cm} \tikzstyle{strand a} = [thick,DarkRed] \tikzstyle{strand b} = [thick,DarkGreen] \tikzstyle{strand c} = [thick,orange] \tikzstyle{strand ab} = [thick,orange] \tikzstyle{strand bc} = [thick,orange] \tikzstyle{strand abc} = [thick,DarkBrown] \tikzstyle{strand m} = [thick,black] \tikzstyle{node a} = [DarkRed] \tikzstyle{node b} = [DarkGreen] \tikzstyle{node c} = [orange] \tikzstyle{node ab} = [orange] \tikzstyle{node bc} = [orange] \tikzstyle{node abc} = [DarkBrown] \tikzstyle{node m} = [black] \tikzstyle{smallbead} = [circle, fill=blue!20, draw=black, inner sep=0, minimum size=\beadsize*0.7, font=\tiny] \tikzstyle{bead} = [circle, fill=blue!20, draw=black, inner sep=0, minimum size=\beadsize, font=\tiny] \tikzstyle{longbead} = [rectangle, fill=blue!20, rounded corners=2mm, draw=black, inner sep=1mm, minimum size=\beadsize, font=\tiny] \newcommand{\TrivalentVertex}[3]{ \coordinate (mid) at (0,0); \coordinate (top) at (0,1); \coordinate (bottom left) at (-1,-1); \coordinate (bottom right) at (1,-1); \draw[strand #1] (mid) to (bottom left) node[below left] {$#1$}; \draw[strand #2] (mid) to (bottom right) node[below right] {$#2$}; \draw[strand #3] (mid) to (top) node[above] {$#3$}; } \newcommand{\DagTrivalentVertex}[3]{ \coordinate (mid) at (0,0); \coordinate (bot) at (0,-1); \coordinate (top left) at (-1,1); \coordinate (top right) at (1,1); \draw[strand #1] (mid) to (top left) node[above left] {$#1$}; \draw[strand #2] (mid) to (top right) node[above right] {$#2$}; \draw[strand #3] (mid) to (bot) node[below] {$#3$}; } \newcommand{\TetraTransformBeads}[7]{ \coordinate (mid) at (0,0); \coordinate (top) at (0,\topratio*\x); \coordinate (bottom left) at (-\x,-\x); \coordinate (bottom right) at (\x,-\x); \coordinate (bottom mid) at (0,-\x); \coordinate (right vertex) at ($1/2*(bottom right)$); \coordinate (left vertex) at ($1/2*(bottom left)$); \draw[strand #2] (mid) to (top); \draw[strand #3] (mid) to (left vertex); \draw[strand #4] (mid) to (right vertex); \draw[strand #5] (left vertex) to (bottom left); \draw[strand #7] (right vertex) to (bottom right); \ifthenelse{ \equal{#1}{left}} { \draw[strand #6] (left vertex) to (bottom mid); }{ \draw[strand #6] (right vertex) to (bottom mid); } \node[node #2][above] at (top) {$#2$}; \node[node #5][below] at (bottom left) {$#5$}; \node[node #6][below] at (bottom mid) {$#6$}; \node[node #7][below] at (bottom right) {$#7$}; \ifthenelse{ \equal{#1}{left}} { \node[node #3][above left] at ($(0,0)!1/2!(left vertex)$) {$#3$}; }{ \node[node #4][above right] at ($(0,0)!1/2!(right vertex)$) {$#4$}; } } \newcommand{\TetraTransform}[7]{ \begin{tikzineqn} \coordinate (mid) at (0,0); \coordinate (top) at (0,\topratio*\x); \coordinate (bottom left) at (-\x,-\x); \coordinate (bottom right) at (\x,-\x); \coordinate (bottom mid) at (0,-\x); \coordinate (right vertex) at ($1/2*(bottom right)$); \coordinate (left vertex) at ($1/2*(bottom left)$); \draw[strand #2] (mid) to (top); \draw[strand #3] (mid) to (left vertex); \draw[strand #4] (mid) to (right vertex); \draw[strand #5] (left vertex) to (bottom left); \draw[strand #7] (right vertex) to (bottom right); \ifthenelse{ \equal{#1}{left}} { \draw[strand #6] (left vertex) to (bottom mid); }{ \draw[strand #6] (right vertex) to (bottom mid); } \node[node #2][above] at (top) {$#2$}; \node[node #5][below] at (bottom left) {$#5$}; \node[node #6][below] at (bottom mid) {$#6$}; \node[node #7][below] at (bottom right) {$#7$}; \ifthenelse{ \equal{#1}{left}} { \node[node #3][above left] at ($(0,0)!1/2!(left vertex)$) {$#3$}; }{ \node[node #4][above right] at ($(0,0)!1/2!(right vertex)$) {$#4$}; } \end{tikzineqn} } \newcommand{\DrawBead}[4][]{ \node[bead,#1] at ($(#2)!1/2!(#3)$) {$#4$}; } \newcommand{\DrawSmallBead}[4][]{ \node[smallbead,#1] at ($(#2)!1/2!(#3)$) {$#4$}; } \newcommand{\DrawLongBead}[4][]{ \node[longbead,#1] at ($(#2)!1/2!(#3)$) {$#4$}; } \newcommand{\AMBraidCrossing}{\begin{knot}[clip width=10] \strand[strand a] (-1,-1) node[below] {$a$} to (1,1); \strand[strand m] (1,-1) node[below] {$m$} to (-1,1); \end{knot}} \newcommand{\MABraidCrossing}{\begin{knot}[clip width=10] \strand[strand m] (-1,-1) node[below] {$m$} to (1,1); \strand[strand a] (1,-1) node[below] {$a$} to (-1,1); \end{knot}} \section{Introduction} In \cite{pss23}, Plavnik, Sconce and our third author introduced and classified three infinite families of fusion categories over the real numbers. These categories are analogues of the classical Tambara-Yamagami fusion categories introduced and classified in \cite{ty98}. This new version of Tambara-Yamagami (TY) categories allowed for non-split simple objects: simples whose endomorphism algebras are division algebras, and not just $\mathbb R$. These non-split TY categories generalize classical examples such as $\Rep_{\mathbb R}(Q_8)$ and $\Rep_{\mathbb R}(\mathbb Z/4\mathbb Z)$, but also include many new fusion categories that fail to admit a fiber functor, i.e. they are not even $\Rep(H)$ for a semisimple Hopf-algebra. This paper provides a classification of all possible braidings that exist on these new non-split TY categories. Since their introduction, TY categories have been studied and generalized extensively (including the closely related notion of \textit{near-group} categories) \cite{Tambara2000, MR2677836, Izumi_2021, GALINDO_2022,SchopierayNonDegenExtension, galindo2024modular}. Their complexity lies just above the pointed fusion categories, and well below that of general fusion categories. This intermediate complexity allows for deep analysis of their structure, while simultaneously providing examples of interesting properties that cannot be observed in the more simplistic pointed categories. For example, in \cite{Nikshych2007NongrouptheoreticalSH} Nikshych showed that some TY categories provide examples of non-group-theoretical (not even Morita equivalent to pointed) fusion categories that admit fiber functors. The physical motivation for extending this theory of TY categories to the real numbers comes from time reversal symmetry. A time reversal symmetry on a fusion category $\mathcal C$ over $\mathbb C$ is a categorical action of $\mathbb Z/2\mathbb Z$ by $\mathbb R$-linear monoidal functors on $\mathcal C$, that behaves as complex conjugation on $\End(\1)$. Real fusion categories then arise as the equivariantization $\mathcal C^{\mathbb Z/2\mathbb Z}$ of $\mathcal C$ with respect to such a time reversal action. In condensed matter terminology, fusion categories describe the topological field theory that arises in the low-energy limit of a gapped quantum field theory in (1+1)D. Thus real fusion categories describe time reversal symmetric topological quantum field theories (TQFTs) in (1+1)D. In the (2+1)D setting, time reversal symmetric TQFTs should be described by \emph{braided} fusion categories over the reals. With an eye toward time reversal symmetry in (2+1)D, in this paper we classify all possible braidings admitted by non-split TY categories over $\mathbb R$. We proceed in the style of Siehler \cite{sie00}, by distilling invariants of a braiding that follow from the hexagon equations. Next, we leverage the description of monoidal equivalences given in \cite{pss23} in order to determine which braiding invariants produce braided equivalent categories, thus establishing a classification. Along the way we describe all braided classifications for split real and split complex TY categories as well. In Section \ref{sec:CrossedBraided}, we observe that the complex/complex (see section for terminology) TY categories can never admit a braiding, due to the presence of Galois-nontrivial objects. In spite of this, these categories can carry a related structure known as a $\mathbb{Z}/2\mathbb{Z}$-crossed braiding, and we fully classify all such structures by using techniques analogous to those outlined above. \subsection{Results} For all the split and non-split real Tambara-Yamagami categories over $\mathbb R$, there turns out to be a unique family of bicharacters $\chi$ such that the associated Tambara-Yamagami category can possibly admit a braiding. As has appeared previously in the literature, the classification is in terms of $\Aut(A, \chi)$ orbits of \textit{$\chi$-admissible forms}, these are quadratic forms with coboundary $\chi$. The results are summarized below, under the assumption that the group of invertible objects is not trivial (see the theorem statements for precise results in these cases). \begin{center} \begin{tabular}{|c|c|c|c|c|c|c|} \hline Case: & Split Real & $\mathbb{R} / \mathbb{C}, \id$ & $\mathbb{R} / \mathbb{C}, \bar \cdot $ & $\mathbb{R} / \mathbb{H}$ & $\mathbb{C} / \mathbb{C}^*$ \\ \hline $\chi$-admissible orbits & 2 & 2 & 2 & 2 & 2 \\ \hline Orbits extending to braidings & 1 & 2 & 2 & 1 & 2 \\ \hline Braidings per orbit & 2 & Varies & 2 & 2 & 1 \\ \hline Total braidings & 2 & 3 & 4 & 2 & 2 \\ \hline Is $\tau$ an invariant? & Yes & No & Yes & Yes & No \\ \hline Is $\sigma_3(1)$ an invariant? & Yes & No & Yes & Yes & No \\ \hline \end{tabular} \end{center} The entries in the $\mathbb{C} / \mathbb{C}^*$ column refer to $\mathbb{Z}/2\mathbb{Z}$-crossed braidings. In contrast to the real case, there are three families of bicharacters (not all of which are defined on a given 2-group) on the split complex Tambara-Yamagami categories. These are distinguished by the multiplicity (mod 3) in $\chi$ of the form $\ell$ on $\mathbb{Z}/2\mathbb{Z}$ with $\ell(g,g) = -1$. We write $|\ell|$ for this number. In this case all orbits of quadratic forms extend to braidings. The results are summarized below, under the assumption that the group of invertibles is not too small (see the theorem statements for precise results in these cases). \begin{center} \begin{tabular}{|c|c|c|c|} \hline $|\ell|$ & 0 & 1 & 2 \\ \hline $\chi$-admissible orbits & 2 & 4 & 4 \\ \hline Braidings per orbit & 2 & 2 & 2 \\ \hline Total braidings & 4 & 8 & 8 \\ \hline \end{tabular} \end{center} Here $\tau$ and $\sigma_3(1)$ are always invariants, and the classification is up to \textit{complex}-linear functors. Next, we collect a table describing when the various braidings we define are symmetric or non-degenerate (notation conventions can be found in the relevant sections). \begin{center} \begin{tabular}{|c|c|c|} \hline Case & Symmetric? & Nondegenerate? \\ \hline Split Real & Always & Never \\ \hline Real/Quaternionic & Always & Never \\ \hline \makecell{Real/Complex, $g = \id_\mathbb{C},$ \\ $\sgn(\sigma) = \sgn(\tau)$ }& Never & Never \\ \hline \makecell{Real/Complex, $g = \id_\mathbb{C},$ \\ $\sgn(\sigma) = -\sgn(\tau)$ }& Never & Only when $A_0 = *$ \\ \hline Real/Complex, $g = \bar \cdot$ & Always & Never \\ \hline Split Complex, $|\ell| = 0$ & Only when $\sgn(\sigma) = \sgn(\tau)$ & \makecell{Only when $A = *$ and \\$\sgn(\sigma) = -\sgn(\tau)$} \\ \hline Split Complex, $|\ell| = 1$ & Never & Never \\ \hline Split Complex, $|\ell| = 2$ & Never & Never \\ \hline \end{tabular} \end{center} Some cases include multiple equivalence classes of braidings, but in all cases, the results in the table above are immediate from the classifications of braidings we give. The nondegenerate split complex categories are the well-known semion and reverse semion categories respectively. \subsection{Acknowledgements} This project began during Summer 2023 as part of the Research Opportunities in Mathematics for Underrepresented Students, supported by NSF grants DMS CAREER 1654159 and DMS 2154389. DG would like to thank the Isaac Newton Institute for Mathematical Sciences, Cambridge, for support and hospitality during the \textit{Topology, Representation theory and Higher Structures} programme where work on this paper was undertaken. This work was supported by EPSRC grant no EP/R014604/1. YJ was supported by the Woodrow Wilson Research Fellowship at Johns Hopkins University. DG, SS, and YJ would all like to thank David Penneys for his guidance and support. \section{Background} We refer the reader to \cite{EGNO15} for the basic theory of fusion categories and to \cite{pss23} and \cite{MR4806973} for the basics of (non-split) fusion categories over non-algebraically closed fields. \begin{definition}\label{defn:BraidedMonodialCategory} A braiding on a monoidal category $\C$ is a set of isomorphisms \[ \{\beta_{x,y}\colon x\otimes y \xrightarrow{} y\otimes x\}_{x,y\in \C} \] such that the following diagrams commute (omitting $\otimes$) \begin{equation}\begin{tikzcd}\label{defn:ForwardsHexagon} & {x(yz)} & {(yz)x} \\ {(xy)z} &&& {y(zx)} \\ & {(yx)z} & {y(xz)} \arrow["\alpha_{x,y,z}", from=2-1, to=1-2] \arrow["{\beta_{x,yz}}", from=1-2, to=1-3] \arrow["\alpha_{y,z,x}", from=1-3, to=2-4] \arrow["{\beta_{x,y}\otimes \id_z}"', from=2-1, to=3-2] \arrow["\alpha_{y,x,z}"', from=3-2, to=3-3] \arrow["{\id_y \otimes \beta_{x,z}}"', from=3-3, to=2-4] \end{tikzcd}\end{equation} \begin{equation}\begin{tikzcd}\label{defn:BackwardsHexagon} & {(xy)z} & {z(xy)} \\ {x(yz)} &&& {(zx)y} \\ & {x(zy)} & {(xz)y} \arrow["\alpha^{-1}_{x,y,z}", from=2-1, to=1-2] \arrow["{\beta_{xy,z}}", from=1-2, to=1-3] \arrow["\alpha^{-1}_{z,x,y}", from=1-3, to=2-4] \arrow["{\id_x \otimes \beta_{y,z}}"', from=2-1, to=3-2] \arrow["\alpha^{-1}_{x,z,y}"', from=3-2, to=3-3] \arrow["{\beta_{x,z}\otimes \id_y}"', from=3-3, to=2-4] \end{tikzcd}\end{equation} for all objects $x,y,z\in \C$, where $\alpha_{x,y,z}$ is the associator. We will refer to the commutativity of the top diagram as the hexagon axiom and of the bottom diagram as the inverse hexagon axiom. Note that these encode commutative diagrams of natural transformations. \end{definition} Our goal is to classify braiding structures on a fusion category $\C$ with a fixed monoidal structure. To do this, we will use the Yoneda lemma to show that the data defining abstract braiding isomorphisms is given by a finite set of linear maps between Hom-spaces, which we can then specify by their values on basis vectors. Specifically, a braiding on $\cC$ is given by a natural transformation $\beta\colon (-)\otimes (=) \Rightarrow (=)\otimes (-)$, a morphism in the category of linear functors from $\cC\times \cC\to \cC$. By semisimplicity, it suffices to consider the components of $\beta$ on simple objects, and by the Yoneda lemma, this data is given by a natural transformation in $\Fun(\cS_{\cC}^{\op}\times \cS_{\cC}^{op}\times \cS_{\cC}, \Vect_k^{\fd})$, i.e. a finite set of linear maps \[ \Hom_{\cC}(s\otimes t,u)\xrightarrow[]{\beta_{t,s}^{*}} \Hom_{\cC}(t\otimes s,u) \] natural in simple objects $s,t,u\in \cC$. Furthermore, by Schur's lemma, it suffices to check naturality on endomorphisms of $s$, $t$ and $u$, which is in particular vacuous if the category is split. After fixing a set of basis vectors for the Hom sets, this reduces to a set of matrix coefficients, which we will refer to as the braiding coefficients. Similarly, to check that $\beta$ satisfies the hexagon axioms, it suffices to check that for any $s,t,u,v\in \cC$ simple, the two linear maps \[ \Hom_\cC(t(us),v)\xrightarrow[]{} \Hom_\cC((st)u,v) \] obtained by precomposing the top and bottom paths of \eqref{defn:ForwardsHexagon} are equal, and similarly for the inverse hexagon axiom. With the choice of a basis for Hom-sets, this condition is given by the set of polynomial equations in terms in the braiding coefficients, which we will refer to as the braiding equations. \section{Quadratic forms on elementary abelian 2-groups}\label{sec:QFAnalysis} Given a field $\mathbb K$, a quadratic form on a finite abelian group $A$ is a function $\sigma:A\to\mathbb K^\times$ such that $\sigma(x^{-1})=\sigma(x)$, and \[(\delta\sigma)(a,b)\,:=\frac{\sigma(ab)}{\sigma(a)\sigma(b)}\] is a bicharacter. When equipped with a quadratic form $\sigma$, the pair $(A,\sigma)$ is called a pre-metric group, and is called a metric group in the case where $\delta\sigma$ is nondegenerate. Pointed braided fusion categories $(\mathcal C,\{\beta_{X,Y}\}_{X,Y})$ over $\mathbb K$ are determined up to equivalence by their group of invertible objects $\mathrm{Inv}(\mathcal C)$ and the quadratic form $\sigma:\mathrm{Inv}(\mathcal C)\to\mathbb K^\times$ given by the formula \[\beta_{g,g}=\sigma(g)\cdot\id_{g^2}\,.\] In fact, this classification arises from an equivalence of categories, and is due to Joyal and Street in \cite[§3]{MR1250465} (their terminology differs from ours). This equivalence of categories implies that two pointed braided fusion categories are equivalent if and only if their corresponding pre-metric groups are isometric. Any braided TY category contains a pointed braided subcategory, and thus gives rise to a pre-metric group. Our analysis in the non-split TY cases will mirror that of the split cases, and it is interesting to note that the quadratic form that gives rise to a braiding on a TY category is a square root of the quadratic form on its own pointed subcategory. \begin{definition}\label{defn:ChiAdmissibleFunction} Given a bicharacter $\chi:A\times A\to\mathbb K^\times$, a quadratic form $\sigma:A\to\mathbb K^\times$ is said to be $\chi$-admissible if $\delta\sigma\,=\,\chi$. The collection of all $\chi$-admissible quadratic forms will be denoted $\QF_{\mathbb K}(\chi)$. For the majority of the paper, we are concerned with $\QF_{\mathbb R}(\chi)$, and so we simply write $\QF(\chi)$ when $\mathbb K=\mathbb R$. \end{definition} \begin{remark} In the literature the coboundary $\delta\sigma$ is often referred to as the associated bicharacter of the quadratic form $\sigma$ (see e.g. \cite[§2.11.1]{MR2609644}). Thus ``$\sigma$ is $\chi$-admissible'' is synonymous with ``the associated bicharacter of $\sigma$ is $\chi$''. We caution that our coboundary is inverted in order to align with the hexagon equations that appear later, though this is immaterial from a formal standpoint. Furthermore, in some conventions the phrase ``associated bicharacter'' or ``associated bilinear form'' refers to the square root of $\delta\sigma$ (see e.g. \cite[§7]{wall63}). Our general feeling is that while this square root is irrelevant for odd groups, it complicates the analysis unnecessarily for 2-groups, which are the main application in this paper. \end{remark} The group $\Aut(A, \chi)$ of automorphisms preserving the bicharacter acts on $\QF(\chi)$ by the formula $(f.\sigma)(g):=\sigma\big(f^{-1}(a)\big)$. We will be particularly concerned with the Klein four-group $K_4:=(\mathbb Z/2\mathbb Z)^2$ and powers $(\mathbb Z/2\mathbb Z)^n$ generally. We will occasionally think of $(\mathbb Z/2\mathbb Z)^n$ as an $\mathbb F_2$ vector space in order to refer to a basis, but we will still write the group multiplicatively. \begin{lemma} \label{lem:AdmissibleFunctionFromBasis} Given a bicharacter $\chi$ on $(\mathbb Z/2\mathbb Z)^n$, any set of values for $\sigma$ on a basis extends to a unique $\chi$-admissible quadratic form. \end{lemma} \begin{proof} Begin with the tentative definition that $\sigma(ab):=\sigma(a)\sigma(b)\chi(a,b)$. By the generalized associativity theorem, $\sigma$ will be well-defined on arbitrary products so long as it satisfies $\sigma\big((ab)c\big)=\sigma\big(a(bc)\big)$. This property holds if and only if $\chi$ is a 2-cocycle, and since $\chi$ is actually a bicharacter, the result follows. \end{proof} A key tool in the analysis of quadratic forms is the Gauss sum. \begin{definition} Given a quadratic form $\sigma:A\to\mathbb K^\times$, the Gauss sum $\Sigma(\sigma)\in\mathbb K$ of $\sigma$ is the sum $\Sigma_{a\in A}\sigma(a)$. Occasionally we will write this as $\Sigma(A)$, when the quadratic form can be inferred. \end{definition} Recall that a subgroup $H\leq A$ is said to be \emph{isotropic} if $\sigma|_H=1$. Isotropic subgroups automatically satisfy $H\leq H^\perp$, where $H^\perp$ is the orthogonal compliment of $H$ with respect to $\delta\sigma$. A metric group $(A,\sigma)$ is said to be \emph{anisotropic} if $\sigma(x)=1$ implies $x=1$. An isotropic subgroup is said to be \emph{Lagrangian} if $H=H^\perp$, and a pre-metric group is said to be \emph{hyperbolic} if it contains a Lagrangian subgroup. The following lemma records some important properties of Gauss sums with respect to isotropic subgroups. \begin{lemma}[{\cite[cf. Sec 6.1]{MR2609644}}]\label{lem:GaussSumProperties} Let $(A,\sigma)$ be a pre-metric group. \begin{enumerate}[label=(\roman*)] \item For any isotropic subgroup $H\leq A$, $\Sigma(A)=|H|\cdot\Sigma(H^\perp/H)$. \item If $A$ is hyperbolic, then $\Sigma(A)$ is a positive integer. \item If $\Sigma(A)$ is a positive integer, and $|A|$ is a prime power, then $A$ is hyperbolic. \item The Gauss sum is multiplicative with respect to orthogonal direct sums, i.e. $\Sigma\left(\bigoplus_iA_i\right)=\prod_i\Sigma(A_i)\,.$ \end{enumerate} \end{lemma} The following pre-metric groups will appear throughout this article, and so we give them some notation \begin{definition}\label{def:StandardHyperbolic} The \emph{standard hyperbolic} pairing on $K_4=\langle a,b\rangle$ is the nondegenerate bicharacter $h(a^ib^j,a^kb^\ell)=(-1)^{i\ell}$. There are two isometry classes of $h$-admissible quadratic forms over $\mathbb R$, and they are distinguished by the rules: \begin{itemize} \item $q_+(x)=-1$ for exactly 1 element $x\in K_4$, or \item $q_-(x)=-1$ for all $x\in K_4\setminus\{1\}$. \end{itemize} We will call the corresponding metric groups $K_{4,\pm}=(K_4,q_\pm)$ respectively. Note that $K_{4,+}$ is hyperbolic, whereas $K_{4,-}$ is anisotropic. \end{definition} \begin{remark} The terms hyperbolic, (an)isotropic, and Lagrangian all have analogues for bilinear forms, but the connection between the biliear form terminology and the quadratic form terminology can be subtle. For example, an element $a\in A$ is called isotropic with respect to $\chi$ if $\chi(a,-)$ is trivial, and this does not imply that $\sigma(a)=1$ in the case that $\chi=\delta\sigma$. The use of the word \emph{hyperbolic} in Definition \ref{def:StandardHyperbolic} refers to the fact that $h$ has a Lagrangian subgroup \emph{as a bilinear form} (bicharacter). Note in particular that non-hyperbolic quadratic forms can give rise to hyperbolic bicharacters. \end{remark} Observe that for any pre-metric group $(A,\sigma)$, its `norm-square' $(A,\sigma)\oplus(A,\sigma^{-1})$ is hyperbolic via the diagonal embedding, so in particular $(K_{4,-})^2$ is hyperbolic. In fact, more can be said. The isomorphism that sends the ordered basis $(a_1,b_1,a_2,b_2)$ to $(a_1,b_1b_2,a_1a_2,b_2)$ preserves $h^2$, and provides an isometry $(K_{4,-})^2\cong(K_{4,+})^2$. This observation leads to the following result. \begin{proposition} \label{prop:OrbitEquivalenceCharacterization} Suppose $\mathbb K=\mathbb R$, and that there is some basis for $K_4^n$ with respect to which $\delta\sigma=h^n$. The metric group $(K_{4}^n,\sigma)$ is hyperbolic if and only if $\Sigma(\sigma)=2^n$, and in this case, $(K_{4}^n,\sigma)\cong(K_{4,+})^n$. If not, then $\Sigma(\sigma)=-2^n$ and $(K_{4}^n,\sigma)\cong K_{4,-}\oplus (K_{4,+})^{n-1}$. \end{proposition} \begin{proof} By hypothesis, we can choose some basis for which $\delta\sigma=h^n$, and in this way, establish an isometry $(K_4^n,\sigma)\cong(K_{4,-})^k\oplus(K_{4,+})^{n-k}$. By our previous observation, $(K_{4,-})^2\cong(K_{4,+})^2$, and so copies of $(K_{4,-})$ can be canceled out in pairs until there is at most one copy left. The Gauss sum condition then follows from Lemma \ref{lem:GaussSumProperties} parts (ii) and (iii) and (iv). \end{proof} Because the sign of the Gauss sum of the pre-metric group $(K_4^n,\sigma)$ determines its isometry class (assuming $\delta\sigma=h^n$), it will be convenient to establish some notation. \begin{notation}\label{not:QF} For any $\sigma\in\QF(h^n)$, the sign $\sgn(\sigma)$ of the quadratic form $\sigma\colon K_4^n\to\mathbb R^\times$ is \[\sgn(\sigma):=\frac{\Sigma(\sigma)}{|\Sigma(\sigma)|}\, .\] We write $\QF_+^n$ and $\QF_-^n$ for the sets of $h^{n}$-admissibles with positive and negative sign, respectively. \end{notation} \begin{proposition} \label{prop:StabilizerCombinatorics} For all $n \geq 0$, \begin{align*} |\QF_+^n| &= 2^{n - 1}(2^n + 1) \\ |\QF^n_-| &= 2^{n - 1}(2^n - 1) = 2^{2n} - |\QF^n_+| \end{align*} Moreover, let $H^n_\pm$ be the stabilizers in $\Aut(K_4^n, h^{n})$ of elements in $\QF^n_\pm$. Then \begin{align*} |H^n_+| &= 2^{n^2 -n + 1}(2^n - 1)\prod_{i=1}^{n - 1}(2^{2i} - 1) \\ |H^n_-| &= 2^{n^2 -n + 1}(2^n + 1)\prod_{i=1}^{n - 1}(2^{2i} - 1) \end{align*} \end{proposition} \begin{proof} We begin with the first part of the theorem. Evaluation on the ordered basis $(a_1, b_1, a_2, b_2, \dots, a_n, b_n)$ induces a map $V \colon \QF(\chi) \to (\{ \pm 1 \} \times \{\pm 1\})^n$. By Lemma \ref{lem:AdmissibleFunctionFromBasis}, $V$ is a bijection. The proof of Proposition \ref{prop:OrbitEquivalenceCharacterization} shows that $(K_4^n, \sigma)$ is hyperbolic if and only if the parity of $(-1, -1)$ in the sequence $V(\sigma)$ is even. We obtain a formula for the number of such sequences from the OEIS (\cite[A007582]{oeis}). Subtracting from this number from the total number of quadratic forms gives the second equation. By Theorem 6.18 of \cite{jacobson2009basic}, \[ |\Aut(A, \chi)| = 2^{n^2}\prod_{i = 1}^{n} (2^{2i} - 1) \] The second part then follows by the orbit stabilizer theorem. \end{proof} Let $\ell$ be the bicharacter which takes the value $-1$ on the non-trivial element of $\mathbb{Z}/2\mathbb{Z}$. Observe that $\QF_{\mathbb R}(\ell^2)=\emptyset$, whereas $|\QF_{\mathbb C}(\ell^2)|=4$. Two of these forms over $\mathbb C$ are isometric to one another, so we find that there are exactly three isometry classes of quadratic forms on $K_4$ inducing $\ell^{2}$. \begin{proposition}\label{prop:StabilizerCombinatorics2ElectricBoogaloo} Let $n > 0$. Then there are exactly four equivalence classes of complex-valued quadratic forms on $K_4^n \times K_4$ inducing $h^{n} \oplus \ell^{2}$. When $n = 0$, there are three. \end{proposition} \begin{proof} By the remark preceding the proof, we may assume $n > 0$. A quadratic form on $K_4^n \times K_4$ with coboundary $h^{n} \oplus \ell^{2}$, determines and is uniquely determined by a pair of quadratic forms on $K_4^n$ and $K_4$ with coboundaries $h^{n}$ and $\ell^2$ respectively. So there are at most six equivalence classes of quadratic forms with coboundary $h^{n} \oplus \ell^{2}$. We claim there are exactly four. Let us fix some notation. We label the elements of the first factor $K_4^n$ by $a_k$ and $b_k$ respectively, and we let $g_1, g_2$ be the two elements of the second factor with self-pairing $-1$. Given a triple of signs $(\kappa, \epsilon_1, \epsilon_2)$ we denote by $\sigma(\kappa,\epsilon_1, \epsilon_2)$ the quadratic form with $$\sgn(\sigma|_{K_4^n}) = \kappa, \quad q(g_k) = i\epsilon_k.$$ Using the multiplicativity of the Gauss sum from in Lemma \ref{lem:GaussSumProperties}, the Gauss sums of these forms are given by the formula \[\Sigma\big(\sigma(\kappa,\epsilon_1,\epsilon_2)\big)\;=\;(\kappa\cdot2^n)\cdot(1+i\epsilon_1)\cdot(1+i\epsilon_2)\,.\] We collect the various values $\Sigma\big(\sigma(\kappa,\epsilon_1,\epsilon_2)\big)$ into a table: \begin{center} \begin{tabular}{|c|c|c|c|c|c|c|} \hline $(\kappa, \epsilon_1, \epsilon_2)$ & $(+--)$ & $(+++)$ & $(+-+)$ & $(---)$ &$(-++)$ & $(--+)$ \\ \hline $\Sigma\big(\sigma(\kappa, \epsilon_1, \epsilon_2)\big)$ & $-2^{n + 1}i$ & $2^{n + 1}i$ & $2^{n + 1}$ & $2^{n + 1}i$ & $-2^{n + 1}i$ & $-2^{n + 1}$ \\\hline \end{tabular} \end{center} Now let $f$ be the automorphism with $$f(a_1) = a_1g_1g_2, f(b_1) = b_1g_1g_2, f(g_1) = a_1b_1g_1, f(g_2) = a_1b_1g_2$$ and which fixes $a_j, b_j$ for $j > 1$. Direct computations show that $f$ interchanges the forms $(---)$ and $(+++)$, as well as $(+--)$ and $(-++)$, fixes the remaining two equivalence classes, and preserves $h^{n} \oplus \ell ^{2}$. The calculations of the Gauss sums in the above table show the resulting equivalence classes are indeed distinct. \end{proof} We conclude with a recognition theorem for the powers of the standard hyperbolic pairing $h^n$ due to Wall \cite{wall63} (see \cite{MR743731} for another exposition). \begin{theorem}[] \label{thm:WallClassification} Let $\chi$ be a symmetric nondegenerate bilinear form on $(\mathbb Z /2\mathbb Z)^n$. Suppose moreover that $\chi(a, a) = 1$ for all $a \in (\mathbb Z /2\mathbb Z)^n$. Then $((\mathbb Z /2\mathbb Z)^n, \chi)$ is isomorphic to a power of the standard hyperbolic pairing. In particular, $n$ must be even. \end{theorem} \section{Braidings on Split Real Tambara-Yamagami Categories} \label{sec:SplitReal} In this section we examine the split real case with the primary purpose of setting a foundation for the non-split cases and illustrating the method. We obtain some new results, but much of the analysis in this section is originally due to Siehler \cite{sie00}, with a more contemporary perspective on the results due to Galindo \cite{GALINDO_2022}. We begin by recalling the classification of monoidal structures on split Tambara-Yamagami categories in \cite{ty98}: \begin{theorem}[{\cite[Theorem 3.2]{ty98}}] Let $A$ be a finite group, let $\tau=\frac{\pm 1}{\sqrt{|A|}}$, and let $\chi\colon A\times A\to k^{\times }$ be a symmetric nondegenerate bicharacter. We define a split fusion category $\cC_{\mathbb{R}}(A,\chi,\tau)$ by taking the underlying fusion ring to be $\TY(A)$, the unitor isomorphisms to be identity, and the associators to be \begin{align*} \alpha_{a,b,c} &= 1_{abc}, \\ \alpha_{a,b,m} = \alpha_{m,a,b} &= 1_{m}, \\ \alpha_{a,m,b} &= \chi(a,b)\cdot 1_{m}, \\ \alpha_{a,m,m} = \alpha_{m,m,a} &= \bigoplus_{b\in A} 1_{b}, \\ \alpha_{m,a,m} &= \bigoplus_{b\in A} \chi(a,b)\cdot 1_b, \\ \alpha_{m,m,m} &= (\tau\chi(a,b)^{-1}\cdot 1_m)_{a,b}. \end{align*} All split fusion categories over $k$ with fusion ring $\TY(A)$ arise this way, and two fusion categories $\cC_{\mathbb{R}}(A,\chi,\tau)$ and $\cC_{\mathbb{R}}(A',\chi',\tau')$ are equivalent if and only if $\tau=\tau'$ and there exists group isomorphism $\phi\colon A\to A'$ such that $\chi(\phi(a),\phi(b))=\chi'(a,b)$ for all $a,b\in A$. \end{theorem} In the split case, \mbox{$\End(X)\cong \mathbb{R}$} for all simple objects $X\in \C$, and each Hom space is spanned by a single non-zero vector. The associators are computed in \cite{ty98} using a set of fixed normal bases, denoted in string diagrams by trivalent vertices: \newcommand{\TSize}{0.45} \newcommand{\abNode}{ \begin{tikzineqn}[scale=\TSize] \coordinate (top) at (0,1); \coordinate (bottom left) at (-1,-1); \coordinate (bottom right) at (1,-1); \draw[strand a] (0,0) to (bottom left) node[below left] {$a$}; \draw[strand b] (0,0) to (bottom right) node[below right, yshift=0.1cm] {$b$}; \draw[strand ab] (0,0) to (top) node[above] {$ab$}; \end{tikzineqn}} \[ \begin{matrix} [a,b] & = & \abNode \quad&\quad [a,m] & = & \begin{tikzineqn}[scale=\TSize] \TrivalentVertex{a}{m}{m} \end{tikzineqn} \\ [m,a] & = & \begin{tikzineqn}[scale=\TSize] \TrivalentVertex{m}{a}{m} \end{tikzineqn} \quad&\quad [a] & = & \begin{tikzineqn}[scale=\TSize] \TrivalentVertex{m}{m}{a} \end{tikzineqn} \end{matrix} \] Using the basis vectors, our set of non-trivial linear isomorphisms $(\beta_{x,y}^{*})_{z}\in \mathrm{GL}_1(\mathbb{R})$ can be written as a set of coefficients in $\mathbb{R}^{\times }$ \begin{align*} (\beta_{a,b}^{*})_{ab}([b,a]) &:= \sigma_{0}(a,b) [a,b] \\ (\beta_{a,m}^{*})_{m}([m,a]) &:= \sigma_{1}(a) [a,m] \\ (\beta_{m,a}^{*})_{m}([a,m]) &:= \sigma_{2}(a) [m,a] \\ (\beta_{m,m}^{*})_{a}([a]) &:= \sigma_{3}(a) [a] \end{align*} thus defining coefficient functions $\sigma_i$ that take inputs in $A$ and produce outputs in $\mathbb{R}^{\times}$. \begin{remark} Since $\chi\colon A\times A\to \mathbb{R}^{\times}$ is a bicharacter and $A$ is a finite group, the image of $\chi$ is a finite subgroup of $\mathbb{R}^{\times}$, so it is a subset of $\{\pm 1\}$. This implies that for all $a\in A$, we have \[ \chi(a^2,-) = \chi(a,-)^2 = 1, \] and by nondegeneracy we have $a^2=1_{A}$. Thus, $A$ is an elementary abelian 2-group with $A\cong (\mathbb{Z}/2\mathbb{Z})^{m}$ for some $m\in \mathbb{Z}_{\ge 0}$. In particular, we have $a^{-1}=a$ for all $a\in A$, so we may freely drop inverse signs on group elements and on $\chi$. \end{remark} \subsection{The hexagon equations} After fixing bases for the Hom spaces, we obtain a set of real valued equations by performing precomposition on our chosen basis vectors using graphical calculus. The resulting unsimplified hexagon equations are as follows: (hexagon equations) \begin{align} \sigma_0(c,ab) &= \sigma_0(c,a)\sigma_0(c,b), \label{eqn:hexR1} \\ \sigma_2(ab) &= \sigma_2(a)\chi(a,b)\sigma_2(b), \label{eqn:hexR2} \\ \sigma_0(b,a)\sigma_1(b) &= \sigma_1(b)\chi(a,b), \label{eqn:hexR3} \\ \sigma_1(b)\sigma_0(b,a) &= \chi(b,a)\sigma_1(b), \label{eqn:hexR4} \\ \chi(a,b)\sigma_3(b) &= \sigma_2(a)\sigma_3(a^{-1}b), \label{eqn:hexR5} \\ \sigma_3(b)\chi(a,b) &= \sigma_3(ba^{-1})\sigma_2(a), \label{eqn:hexR6} \\ \sigma_0(a,ba^{-1}) &= \sigma_1(a)\chi(a,b)\sigma_1(a), \label{eqn:hexR7} \\ \sigma_3(a)\tau\chi(a,b)^{-1}\sigma_3(b) &= \sum_{c\in A}\tau\chi(a,c)^{-1}\sigma_2(c)\tau\chi(c,b)^{-1}, \label{eqn:hexR8} \end{align} (inverse hexagon equations) \begin{align} \sigma_0(c,a)\sigma_0(b,a) &= \sigma_0(bc,a), \label{eqn:hexR9} \\ \chi(b,a)^{-1}\sigma_2(a) &= \sigma_2(a)\sigma_0(b,a), \label{eqn:hexR10} \\ \sigma_0(b,a)\sigma_2(a) &= \sigma_2(a)\chi(a,b)^{-1}, \label{eqn:hexR11} \\ \sigma_1(b)\chi(a,b)^{-1}\sigma_1(a) &= \sigma_1(ab), \label{eqn:hexR12} \\ \sigma_0(a^{-1}b,a) &= \sigma_2(a)\chi(a,b)^{-1}\sigma_2(a), \label{eqn:hexR13} \\ \sigma_3(a^{-1}b)\sigma_1(a) &= \sigma_3(b)\chi(a,b)^{-1}, \label{eqn:hexR14} \\ \sigma_1(a)\sigma_3(ba^{-1}) &= \chi(a,b)^{-1}\sigma_3(b), \label{eqn:hexR15} \\ \sigma_3(a)\tau \chi(a,b)\sigma_3(b) &= \sum_{c\in A} \tau \chi(a,c)\sigma_1(c)\tau \chi(c,b). \label{eqn:hexR16} \end{align} \subsection{Reduced hexagon equations} The following six equations are algebraically equivalent to the sixteen unsimplified hexagon equations: \begin{align} &\sigma_0(a,b) = \chi(a,b), \label{eqn:reducedR1} \\ &\sigma_1(a)^2 = \chi(a,a), \label{eqn:reducedR2} \\ &\sigma_1(ab) = \sigma_1(a)\sigma_1(b)\chi(a,b), \label{eqn:reducedR3} \\ &\sigma_2(a) = \sigma_1(a), \label{eqn:reducedR4} \\ &\sigma_3(1)^2 = \tau \sum_{c\in A}\sigma_1(c), \label{eqn:reducedR5} \\ &\sigma_3(a) = \sigma_3(1)\sigma_1(a)\chi(a,a). \label{eqn:reducedR6} \end{align} The process of eliminating redunduncies is as follows. First, we may eliminate any term that appears on both sides of any equation, as all functions are valued in the $\{\pm1\}$. Then, we have the following implications: \begin{center} \begin{tabular}{|c|c|c|} \hline (\ref{eqn:hexR3})$\implies$ (\ref{eqn:reducedR1}) & (\ref{eqn:hexR12})$\implies$ (\ref{eqn:reducedR3}) & (\ref{eqn:hexR16}), $a=b=1$ $\implies$ (\ref{eqn:reducedR5}) \\ \hline (\ref{eqn:hexR7})$\implies$ (\ref{eqn:reducedR2}) & (\ref{eqn:hexR6}), (\ref{eqn:hexR15}) $\implies$ (\ref{eqn:reducedR4}) & (\ref{eqn:hexR14}), $a=b$ $\implies$ (\ref{eqn:reducedR6}) \\ \hline \end{tabular} \end{center} To check that the reduced equations are indeed equivalent to the original sixteen, first note that the equality $\sigma_2=\sigma_1$ from equation (\ref{eqn:reducedR4}) identifies each of (\ref{eqn:hexR9})-(\ref{eqn:hexR16}) with one of (\ref{eqn:hexR1})-(\ref{eqn:hexR8}), so it suffices to prove the first eight hexagons from the reduced equations. Equations (\ref{eqn:hexR1}), (\ref{eqn:hexR3}) and (\ref{eqn:hexR4}) follows from equation (\ref{eqn:reducedR1}) which identifies $\sigma_0=\chi$ to be a bicharacter. Equation (\ref{eqn:hexR2}) follows from (\ref{eqn:reducedR3}) and (\ref{eqn:reducedR4}). Equation (\ref{eqn:hexR7}) follows from (\ref{eqn:reducedR2}). Equations (\ref{eqn:hexR5}) and (\ref{eqn:hexR6}) can be derived by expanding both sides in terms of $\sigma_1$ and $\chi$ using equations \eqref{eqn:reducedR4} and \eqref{eqn:reducedR6}. It remains to derive equation (\ref{eqn:hexR8}). First, equation \eqref{eqn:reducedR3} implies \begin{equation} \label{eqn:Sigma1Expansion} \sigma_1(a)\sigma_1(b)\sigma_1(d) = \frac{\sigma_1(abd)}{\chi(a, bd)\chi(b,d)} \end{equation} Finally we derive an equivalent form of \eqref{eqn:hexR8} from the reduced equations, along with the fact that $\chi$ is a $\{\pm 1\}$-valued symmetric bicharacter. \begin{align*} \sigma_3(a)\chi(a,b)^{-1}\sigma_3(b) &\overset{\eqref{eqn:reducedR6}}{=} \sigma_3(1)^2\sigma_1(a)\sigma_1(b)\chi(a,a)\chi(b,b)\chi(a,b)^{-1} \\ &\overset{\eqref{eqn:reducedR5}}{=} \tau \sum_{d\in A}\sigma_1(d)\sigma_1(a)\sigma_1(b)\chi(a,a)\chi(b,b)\chi(a,b)^{-1}\\ &\overset{\eqref{eqn:Sigma1Expansion}}{=} \tau \sum_{d\in A}\sigma_1(abd)\frac{\chi(a,a)\chi(b,b)}{\chi(a,b)\chi(a, bd)\chi(b,d)} \\ &\overset{c := abd}{=} \tau \sum_{c\in A}\sigma_1(c)\frac{\chi(a,a)\chi(b,b)}{\chi(a,b)\chi(a, a^{-1}c)\chi(b,b^{-1}a^{-1}c)}\\ &\overset{\eqref{eqn:reducedR4}}{=} \tau\sum_{c\in A}\chi(a,c)^{-1}\sigma_2(c)\chi(c,b)^{-1} \end{align*} \subsection{Classification of Braidings} By equation (\ref{eqn:reducedR2}) and the fact that all coefficients are real, we have the restriction that $\chi(a,a)>0$ for all $a\in A$. We conclude using Theorem \ref{thm:WallClassification}: \begin{proposition}\label{thm:SplitClassification} If $\C_{\mathbb{R}}(A,\chi,\tau)$ admits a braiding, then $A\cong K_4^{n}$ for some $n\in \mathbb{Z}_{\ge 0}$ and $\chi$ is the hyperbolic pairing $h^{n}$. \end{proposition} From the simplified hexagon equations, we have the following classification of braidings on a split TY category over $\mathbb{R}$. \begin{theorem}\label{thm:split-class-sols} A braiding on $\mathcal{C}_{\mathbb{R}}(K_4^n,h^{n},\tau)$ is given by a $\chi$-admissible function $\sigma$ with $\sgn\sigma=\sgn\tau$ and a coefficient $\epsilon\in \{\pm 1\}$. In other words, the set of braidings on $\mathcal{C}_{\mathbb{R}}(K_4^n,h^{n},\tau)$ is in bijection with $\QF_{\sgn\tau}^n \times \{\pm 1\}$. \end{theorem} \begin{proof} Given a braiding on $\mathcal{C}_{\mathbb{R}}(K_4^n,h^{n},\tau)$, we deduce from the reduced hexagon equations (namely \ref{eqn:reducedR3}) that $\sigma_1 \in \QF(h^{n})$ Equation (\ref{eqn:reducedR5}) gives the constraint \[ \tau \sum_{c\in A}\sigma_1(c) = 2^{n}\tau\sgn{\sigma_1}>0, \]which tells us that $\sigma_1 \in \QF^n_{\sgn(\tau)}$. We may also extract a sign $\epsilon$ which is defined by the equation \begin{equation} \label{eqn:RealSigma31Definition} \sigma_3(1) = \epsilon \sqrt{2^{n}\tau\sgn{\sigma_1}} . \end{equation} We thus obtain an element $(\sigma_1, \epsilon) \in \QF^n_{\text{sgn}(\tau)} \times \{\pm 1\}$. Conversely, given an element $(\sigma, \epsilon) \in \QF^n_{\text{sgn}(\tau)} \times \{\pm 1\}$, we let $\sigma_1 = \sigma_2 = \sigma$, $\sigma_0 = h^{n}$ and $\sigma_3(1)$ by Equation \eqref{eqn:RealSigma31Definition}. We can then extend $\sigma_3(1)$ to a function $\sigma_3(a)$ by equation \eqref{eqn:reducedR6}. Equations \eqref{eqn:reducedR1}-\eqref{eqn:reducedR4} and \eqref{eqn:reducedR6} hold by our definitions along with that fact that $\sigma \in \QF(h^{n})$. The remaining constraint \eqref{eqn:reducedR5} holds by Proposition \ref{prop:OrbitEquivalenceCharacterization}, our choice of $\sigma_3(1)$ and the definition of $\QF^n_{\text{sgn}(\tau)}$. Finally, we observe that these procedures are, by construction, mutually inverse. \end{proof} Note that when $n=0$, $\sgn(\sigma)$ is automatically equal to 1. In the proof above, this would force $\sigma_3(1)$ to be purely imaginary, and thus such categories can only exist over fields containing a square root of $-1$. Over $\mathbb C$, $\sigma_3(1)=i$ gives the semion category, and $\sigma_3(1)=-i$ gives the reverse semion. Over $\mathbb R$, \eqref{eqn:RealSigma31Definition} cannot be satisfied when $n=0$ and $\tau<0$, and so this category admits no braidings (i.e. $\QF^0_{-}=\emptyset$). As a consequence of Theorem \ref{thm:split-class-sols}, the following braidings are coherent. \begin{definition}\label{defn:ExplicitSplitRealBraidings} Given an element $(\sigma, \epsilon)$ of $\QF_{\sgn\tau}^n\times \{\pm 1\}$, we define a braided structure $\C_\mathbb{R}(K_4^n,h^{n},\tau,\sigma,\epsilon)$ on $\C_\mathbb{R}(K_4^n,h^{n},\tau)$ by: \begin{align*} \beta_{a,b} &= \chi(a,b)\cdot \id_{ab}, \\ \beta_{a,m} &= \beta_{m,a} = \sigma(a)\cdot \id_{m}, \\ \beta_{m,m} &= \sum_{a\in K_4^{n}} \epsilon\,\sigma(a) [a]^{\dag}[a]. \end{align*} Since the group $K_4^n$, bicharacter $h^{n}$, and coefficient $\tau$ are determined from context, we will abbreviate $\C_\mathbb{R}(K_4^n,h^{n},\tau,\sigma,\epsilon) := \C_\mathbb{R}(\sigma,\epsilon)$. \end{definition} We next analyze when $\C_\mathbb{R}(\sigma,\epsilon)$ is braided equivalent to $\C_\mathbb{R}(\sigma', \epsilon')$, by analyzing the properties of certain categorical groups attached to these categories. \begin{notation}\label{not:CatGrp} The autoequivalences of any ($\star=$ plain, monoidal, braided, etc.) category $\mathcal C$ form a categorical group $\Aut_{\star}(\mathcal C)$. The objects of $\Aut_{\star}(\mathcal C)$ are $\star$-autoequivalences of $\mathcal C$, and the morphisms are $\star$-natural isomorphisms. For any categorical group $\mathcal G$, the group of isomorphism classes of objects is denoted by $\pi_0\mathcal G$, and the automorphisms of the identity are denoted by $\pi_1\mathcal G$. \end{notation} \begin{lemma}\label{lem:SplitRealFunctorClassification} $$\pi_0\Aut_\otimes\big(\C_\mathbb{R}(K_4^n,h^{n},\tau)\big) \cong \Aut(K_4^n,h^{n})$$ \end{lemma} \begin{proof} This fact appears in several places in the literature (for instance \cite[Proposition 1]{Tambara2000}, \cite[Proposition 2.10]{Nikshych2007NongrouptheoreticalSH}, and \cite[Lemma 2.16]{EDIEMICHELL2022108364}) and is proved with arguments that do not depend on the algebraic closure of the field in question. They do, however, assume that the underlying semisimple category is split. We will see in future sections that this does affect the validity of the conclusion. \end{proof} \begin{proposition}\label{prop:RealFunctorBraided} The monoidal functor $F(f)$ determined by an automorphism $f\in\Aut(K_4^n,h^{n})$ forms a braided monoidal equivalence $\C_\mathbb{R}(\sigma,\epsilon) \to \C_\mathbb{R}(\sigma',\epsilon')$ if and only if $f \cdot \sigma = \sigma'$ and $\epsilon = \epsilon'$. \end{proposition} \begin{proof} Using Definition \ref{defn:ExplicitSplitRealBraidings}, the required constraints for $F(f)$ to be braided are \begin{align*} h^{n}(f(a), f(b)) &= h^{n}(a, b) \\ \sigma'(f(a)) &= \sigma(a) \\ \epsilon' &= \epsilon. \end{align*} These equations are indeed equivalent to $f \cdot \sigma = \sigma'$ and $\epsilon = \epsilon'$. \end{proof} The following theorem strengthens \cite{GALINDO_2022} in the split real case. \begin{theorem}\label{thm:SplitCaseEquivalence} There is a braided equivalence $\C_\mathbb{R}(\sigma,\epsilon) \sim \C_\mathbb{R}(\sigma',\epsilon')$ if and only if $\epsilon = \epsilon'$. In particular, there are exactly two equivalence classes of braidings on $\C_\mathbb{R}(K_4^n,h^{n},\tau)$ when $n > 0$, or when $n = 0$ and $\tau > 0$, and zero otherwise. \end{theorem} \begin{proof} By Lemma \ref{lem:SplitRealFunctorClassification}, the functors $F(f)$ form a complete set of representatives for $\pi_0(\Aut(\C_\mathbb{R}(K_4^n,h^{n},\tau)))$. Therefore it suffices to check when some $F(f)$ is a braided equivalence $\C_\mathbb{R}(\sigma,\epsilon) \to \C_\mathbb{R}(\sigma',\epsilon')$. By Proposition \ref{prop:RealFunctorBraided}, this occurs exactly when $\epsilon = \epsilon'$ and $\sigma$ is orbit equivalent to $\sigma'$. This last condition always holds by Proposition \ref{prop:OrbitEquivalenceCharacterization} since the sign of $\sigma$ is determined by $\tau$ (part of the underlying monoidal structure). \end{proof} Taking $\epsilon = \epsilon'$ and $\sigma = \sigma'$ in Proposition \ref{prop:RealFunctorBraided}, we obtain: \begin{proposition}\label{prop:SplitRealBraidedFunctorClassification} $$\pi_0(\Aut_{\text{br}}(\C_\mathbb{R}(\sigma, \epsilon))) \cong H^n_{\sgn \sigma},$$ where $H^n_{\sgn \sigma}$ is the stabilizer of $\sigma$ in $\Aut(K_4^n, h^{n})$. \end{proposition} Note that by Proposition \ref{prop:SplitRealBraidedFunctorClassification}, $|\pi_0\Aut_{\text{br}}(\C_\mathbb{R}(\sigma, \epsilon)|$ depends on $\tau$, while Lemma \ref{lem:SplitRealFunctorClassification} shows that $|\pi_0\Aut_\otimes(\C_\mathbb{R}(K_4^n,h^{n},\tau))|$ does not. \begin{remark} When $n = 1$ (but $\tau$ is not fixed), braidings on the split complex Tambara-Yamagami categories were classified in \cite[Example 2.5.2, Figures 3-5]{SchopierayNonDegenExtension}. We can see that the four symmetrically braided categories appearing in Figure 3 are defined over the reals, and our results here show that these are in fact the only possibilities. \end{remark} We conclude with a lemma on twist morphisms for these braidings. \begin{lemma} There are exactly two families of twist morphisms for any $\C_\mathbb{R}(\sigma,\epsilon)$, corresponding to a sign $\rho \in \{\pm 1\}$. These twists are indeed ribbon structures (in the sense of \cite[Definition 8.10.1]{EGNO15}). \end{lemma} \begin{proof} The first part of the remark is due to \cite{sie00}, who gives the components $\theta_x$ of the twist as $\theta_a = 1, \theta_m = \rho \sigma_3(1)^{-1}$. Since every simple object is self dual, the required axiom is simply $\theta_m = \theta_m^*$. But this holds as a result of the linearity of composition. \end{proof} \section{Braidings on Real/Quaternionic Tambara-Yamagami Categories} We will now examine the case where $\End(\mathbbm{1})\cong \mathbb{R}$ and $\End(m)\cong \mathbb{H}$. We first note that the four dimensional $\mathbb{R}$ vector spaces $\Hom(a\otimes m,m)$, $\Hom(m\otimes a,m)$ and $\Hom(m\otimes m,a)$ can be endowed with the structure of $(\mathbb{H},\mathbb{H})$-bimodules under pre- and postcomposition with quaternions. By naturality, the effect of precomposing with braiding isomorphisms for each of these hom-spaces is determined on an ($\mathbb{H},\mathbb{H}$)-basis. A preferred system of basis vectors (over $\mathbb{R}$ for $\Hom(a\otimes b,ab)$ and over $\mathbb{H}$ for the others) is chosen in \cite[Section 5.1]{pss23}, depicted again as trivalent vertices: \[ \begin{matrix} [a,b] & = & \abNode \quad&\quad [a,m] & = & \begin{tikzineqn}[scale=\TSize] \TrivalentVertex{a}{m}{m} \end{tikzineqn} \\ [m,a] & = & \begin{tikzineqn}[scale=\TSize] \TrivalentVertex{m}{a}{m} \end{tikzineqn} \quad&\quad [a] & = & \begin{tikzineqn}[scale=\TSize] \TrivalentVertex{m}{m}{a} \end{tikzineqn} \end{matrix} \] Splittings to each $[a]$ is chosen in \cite[Proposition 4.4]{pss23} and will be denoted by \[ [a]^\dagger = \begin{tikzineqn}[scale=\TSize,yscale=-1] \coordinate (mid) at (0,0); \coordinate (top) at (0,1); \coordinate (bottom left) at (-1,-1); \coordinate (bottom right) at (1,-1); \draw[strand m] (mid) to (bottom left) node[above left] {$m$}; \draw[strand m] (mid) to (bottom right) node[above right] {$m$}; \draw[strand a] (mid) to (top) node[below] {$a$}; \end{tikzineqn} \] such that \[ \id_{m\otimes m} \quad=\quad \begin{tikzineqn} \draw[strand m] (0,0) -- (0,2); \draw[strand m] (1,0) -- (1,2); \end{tikzineqn} \quad=\quad \sum_{\substack{a\in A\\ s\in S}} \begin{tikzineqn}[scale=0.5] \draw[strand a] (0,0) -- (0,2); \draw[strand m] (0,2) -- ++(1,1); \draw[strand m] (0,2) -- ++(-1,1); \draw[strand m] (0,0) -- ++(1,-1); \draw[strand m] (0,0) -- ++(-1,-1); \node[smallbead] at (0.5,2.5) {$s$}; \node[smallbead] at (0.5,-0.5) {$\overline{s}$}; \end{tikzineqn} \quad=\quad \sum_{\substack{a\in A\\ s\in S}} (\id_m\otimes s)[a]^{\dag}[a](\id_m\otimes \overline{s}) \] where $S:=\{1,i,j,k\}$. By \cite[Proposition 5.1]{pss23}, the basis vectors satisfy the convenient property that they commute \newcommand{\beadedTSize}{0.7} \[ \begin{tikzineqn}[scale=\beadedTSize] \TrivalentVertex{a}{m}{m} \DrawSmallBead{mid}{top}{v} \end{tikzineqn} \ = \ \begin{tikzineqn}[scale=\beadedTSize] \TrivalentVertex{a}{m}{m} \DrawSmallBead{mid}{bottom right}{v} \end{tikzineqn} \quad\quad \begin{tikzineqn}[scale=\beadedTSize] \TrivalentVertex{m}{a}{m} \DrawSmallBead{mid}{top}{v} \end{tikzineqn} \ = \ \begin{tikzineqn}[scale=\beadedTSize] \TrivalentVertex{m}{a}{m} \DrawSmallBead{mid}{bottom left}{v} \end{tikzineqn}\;\,, \] or conjugate-commute \[ \begin{tikzineqn}[scale=\beadedTSize] \TrivalentVertex{m}{m}{a} \DrawSmallBead{mid}{bottom left}{v} \end{tikzineqn} \ = \ \begin{tikzineqn}[scale=\beadedTSize] \TrivalentVertex{m}{m}{a} \DrawSmallBead{mid}{bottom right}{\overline{v}} \end{tikzineqn} \] with all quaternions $v\in \mathbb{H}$. We can now recall the classification of associators on these categories using the chosen bases. \begin{theorem}[{\cite[Theorem 5.4]{pss23}}] Let $A$ be a finite group, let $\tau=\frac{\pm1}{\sqrt{4|A|}}$, and let $\chi:A\times A\to \mathbb R^\times$ be a nongedegerate symmetric bicharacter on $A$. A triple of such data gives rise to a non-split Tambara-Yamagami category \mbox{$\C_{\bb H}(A,\chi,\tau)$}, with $\End(\1)\cong\bb R$ and $\End(m)\cong\bb H$, whose associators for $a, b, c\in A$ are given as follows: \begin{gather*} \alpha_{a,b,c}=\id_{abc}\,,\\ \alpha_{a,b,m}=\alpha_{m,b,c}=\id_{m}\,,\\ \alpha_{a,m,c}=\chi(a,c)\cdot\id_{m},\\ \alpha_{a,m,m}=\alpha_{m,m,c}=\id_{m\otimes m}\,,\\ \alpha_{m,b,m}=\bigoplus_{a\in A}\chi(a,b)\cdot\id_{a^{\oplus4}}\,,\\ \alpha_{m,m,m}=\tau\cdot\sum_{\substack{a,b\in A\\s,t\in S}}\chi(a,b)^{-1}\cdot(s\otimes(\id_m\otimes\overline{t}))(\id_m\otimes[a]^\dagger)([b]\otimes\id_m)((\id_m\otimes s)\otimes t), \end{gather*} where $S:=\{1,i,j,k\}\subseteq \mathbb{H}$. Furthermore, all equivalence classes of such categories arise in this way. Two categories $\C_{\bb H}(A,\chi,\tau)$ and $\C_{\bb H}(A',\chi',\tau')$ are equivalent if and only if $\tau=\tau'$ and there exists an isomorphism $f:A\to A'$ such that for all $a,b\in A$, \[\chi'\big(f(a),f(b)\big)\;=\;\chi(a,b)\,.\] \end{theorem} We can now write down our braiding coefficients, some of which are a priori quaternions: \newcommand{\myClipWidth}{10} \newcommand{\eqnscale}{0.4} \newcommand{\tscale}{0.8} \[ \begin{tikzineqn}[scale=\eqnscale] \draw[strand ab] (0,0) to ++(0,1) node[above] {$ab$}; \begin{knot}[clip width=10] \strand[strand a] (0,0) to ++(1,-1) to ++(-2,-2) node[below left] {$a$}; \strand[strand b] (0,0) to ++(-1,-1) to ++(2,-2) node[below right,yshift=0.1cm] {$b$}; \end{knot} \end{tikzineqn} := \ \sigma_0(a,b) \begin{tikzineqn}[scale=\tscale] \coordinate (top) at (0,1); \coordinate (bottom left) at (-1,-1); \coordinate (bottom right) at (1,-1); \draw[strand a] (0,0) to (bottom left) node[below left] {$a$}; \draw[strand b] (0,0) to (bottom right) node[below right, yshift=0.1cm] {$b$}; \draw[strand ab] (0,0) to (top) node[above] {$ab$}; \end{tikzineqn} \quad\quad \begin{tikzineqn}[scale=\eqnscale] \draw[strand m] (0,0) to ++(0,1) node[above] {$m$}; \begin{knot}[clip width=10] \strand[strand a] (0,0) to ++(1,-1) to ++(-2,-2) node[below left] {$a$}; \strand[strand m] (0,0) to ++(-1,-1) to ++(2,-2) node[below right] {$m$}; \end{knot} \end{tikzineqn} := \ \begin{tikzineqn}[scale=\tscale] \TrivalentVertex{a}{m}{m} \DrawLongBead{mid}{bottom right}{\sigma_1(a)} \end{tikzineqn} \] \vspace{-0.2cm} \[ \begin{tikzineqn}[scale=\eqnscale] \draw[strand m] (0,0) to ++(0,1) node[above] {$m$}; \begin{knot}[clip width=10] \strand[strand m] (0,0) to ++(1,-1) to ++(-2,-2) node[below left] {$m$}; \strand[strand a] (0,0) to ++(-1,-1) to ++(2,-2) node[below right] {$a$}; \end{knot} \end{tikzineqn} := \ \begin{tikzineqn}[scale=\tscale] \TrivalentVertex{m}{a}{m} \DrawLongBead{mid}{bottom left}{\sigma_2(a)} \end{tikzineqn} \quad\quad \ \begin{tikzineqn}[scale=\eqnscale] \draw[strand a] (0,0) to ++(0,1) node[above] {$a$}; \begin{knot}[clip width=10] \strand[strand m] (0,0) to ++(1,-1) to ++(-2,-2) node[below left] {$m$}; \strand[strand m] (0,0) to ++(-1,-1) to ++(2,-2) node[below right] {$m$}; \end{knot} \end{tikzineqn} := \ \begin{tikzineqn}[scale=\tscale] \TrivalentVertex{m}{m}{a} \DrawLongBead{mid}{bottom right}{\sigma_3(a)} \end{tikzineqn} \] It is clear that if the braiding coefficients are natural if they are real-valued. It turns out the the converse is true, in that naturality forces all braiding coefficients to be real. \begin{lemma} \label{lem:RQSigma12Real} The functions $\sigma_1$ and $\sigma_2$ are real-valued. \end{lemma} \begin{proof} For any $v\in \mathbb{H}$ and any $a\in A$, consider the following diagram: \[\begin{tikzcd} m &&& m \\ & {a\otimes m} & {m\otimes a} \\ & {a\otimes m} & {m\otimes a} \\ m &&& m \arrow["c_{a,m}", from=2-2, to=2-3] \arrow["{v\otimes \id_a}", from=2-3, to=3-3] \arrow["{\id_a\otimes v}"', from=2-2, to=3-2] \arrow["c_{a,m}"', from=3-2, to=3-3] \arrow["{[a,m]}"', from=2-2, to=1-1] \arrow["{[m,a]}", from=2-3, to=1-4] \arrow["{[a,m]}", from=3-2, to=4-1] \arrow["{[m,a]}"', from=3-3, to=4-4] \arrow["{\sigma_1(a)}", from=1-1, to=1-4] \arrow["v", from=1-4, to=4-4] \arrow["v"', from=1-1, to=4-1] \arrow["{\sigma_1(a)}"', from=4-1, to=4-4] \end{tikzcd}\] The middle diagram commutes by the naturality of the braiding, while the top and bottom quadrangles commute by the definition of $\sigma_1$. As our chosen basis vector $[a,m]$ commutes with quaternions, we have \[ v\circ f_1 = v \triangleright [a,m] = [a,m] \triangleleft v = f_1 \otimes (\id_a\otimes v) ,\] so the left quadrangle commutes, and the same argument can be made for the right quadrangle using the vector $[m,a]$. Since both $[a,m]$ and $[m,a]$ are isomorphisms, we have the commutativity of the outer rectangle, and thus we have that \[ (\forall v\in \mathbb{H}) \quad \sigma_1(a)\circ v = v \circ \sigma_1(a) \] or that $\sigma_1(a)$ lies in the center of $\mathbb{H}$. Alternatively, we can present the proof using graphical calculus. We first introduce a ``bubble" by precomposing with our basis vector and its inverse, and commute the quaternion through the trivalent vertex: \newcommand{\lemmascale}{1} \[ \begin{tikzineqn}[scale=\lemmascale] \coordinate (bot) at (0,-2); \coordinate (mid) at (0,0); \coordinate (top) at (0,2); \coordinate (bead1) at ($(bot)!1/3!(top)$); \coordinate (bead2) at ($(bot)!2/3!(top)$); \draw[strand m] (top) to (bot) node[below] {$m$}; \node[bead] at (bead1) {$v$}; \node[longbead] at (bead2) {$\sigma_1(a)$}; \end{tikzineqn} \quad=\quad \begin{tikzineqn}[scale=\lemmascale] \draw[strand m] node[below] {$m$} (0,0) to ++(0,1/2) coordinate (vert) to ++(1/2,1/2) to ++(-1/2,1/2) coordinate (triv) to (0,4); \draw[strand a] (vert) to ++(-1/2,1/2) node[left] {$a$} to ++(1/2,1/2); \node[bead] at ($(triv)!1/3!(0,4)$) {$v$}; \node[longbead] at ($(triv)!2/3!(0,4)$) {$\sigma_1(a)$}; \end{tikzineqn} \quad = \quad \begin{tikzineqn}[scale=\lemmascale] \begin{knot}[clip width=10] \strand[strand m] node[below] {$m$} (0,0) to ++(0,1) to ++(1/2,1/2) to ++(0,1) to ++(-1/2,1/2) to ++(0,1); \strand[strand a] (0,1) to ++(-1/2,1/2) to ++(0,1) to ++(1/2,1/2); \end{knot} \node[node a,left] at (-1/2,2) {$a$}; \node[longbead] at (0,3.5) {$\sigma_1(a)$}; \node[bead] at (1/2,2) {$v$}; \end{tikzineqn} \] Then, by the definition of $\sigma_1$ and naturality, we have \[ \begin{tikzineqn}[scale=\lemmascale] \begin{knot}[clip width=10] \strand[strand m] node[below] {$m$} (0,0) to ++(0,1) to ++(1/2,1/2) to ++(0,1) to ++(-1/2,1/2) to ++(0,1); \strand[strand a] (0,1) to ++(-1/2,1/2) to ++(0,1) to ++(1/2,1/2); \end{knot} \node[node a,left] at (-1/2,2) {$a$}; \node[longbead] at (0,3.5) {$\sigma_1(a)$}; \node[bead] at (1/2,2) {$v$}; \end{tikzineqn} \quad =\quad \begin{tikzineqn}[scale=\lemmascale] \begin{knot}[clip width=10] \strand[strand m] node[below] {$m$} (0,0) to ++(0,1) to ++(1/2,1/2) to ++(-1,1) to ++(1/2,1/2) to ++(0,1); \strand[strand a] (0,1) to ++(-1/2,1/2) to ++(1,1) to ++(-1/2,1/2); \end{knot} \node[smallbead,xshift=-0.1cm] at (1/2,3/2) {$v$}; \end{tikzineqn} \quad = \quad \begin{tikzineqn}[scale=\lemmascale] \begin{knot}[clip width=10] \strand[strand m] node[below] {$m$} (0,0) to ++(0,1) to ++(1/2,1/2) to ++(-1,1) to ++(1/2,1/2) to ++(0,1); \strand[strand a] (0,1) to ++(-1/2,1/2) to ++(1,1) to ++(-1/2,1/2); \end{knot} \node[smallbead,xshift=0.1cm] at (-1/2,5/2) {$v$}; \end{tikzineqn} \quad=\quad \begin{tikzineqn}[scale=\lemmascale] \begin{knot}[clip width=10] \strand[strand m] node[below] {$m$} (0,0) to ++(0,1) to ++(1/2,1/2) to ++(-1,1) to ++(1/2,1/2) to ++(0,1); \strand[strand a] (0,1) to ++(-1/2,1/2) to ++(1,1) to ++(-1/2,1/2); \end{knot} \node[bead] at (0,3.5) {$v$}; \end{tikzineqn} \quad=\quad \begin{tikzineqn}[scale=\lemmascale] \draw[strand m] node[below] {$m$} (0,0) to ++(0,1) to ++(1/2,1/2) to ++(0,1) to ++(-1/2,1/2) to ++(0,1); \draw[strand a] (0,1) to ++(-1/2,1/2) to ++(0,1) to ++(1/2,1/2); \node[bead] at (0,3.5) {$v$}; \node[longbead] at (1/2,2) {$\sigma_1(a)$}; \end{tikzineqn} \] and we can pass $\sigma_1(a)$ through the trivalent vertex to get \[ \begin{tikzineqn}[scale=\lemmascale] \draw[strand m] node[below] {$m$} (0,0) to ++(0,1) to ++(1/2,1/2) to ++(0,1) to ++(-1/2,1/2) to ++(0,1); \draw[strand a] (0,1) to ++(-1/2,1/2) to ++(0,1) to ++(1/2,1/2); \node[bead] at (0,3.5) {$v$}; \node[longbead] at (1/2,2) {$\sigma_1(a)$}; \end{tikzineqn} \quad=\quad \begin{tikzineqn}[scale=\lemmascale] \draw[strand m] node[below] {$m$} (0,0) to ++(0,1/2) coordinate (vert) to ++(1/2,1/2) to ++(-1/2,1/2) coordinate (triv) to (0,4); \draw[strand a] (vert) to ++(-1/2,1/2) to ++(1/2,1/2); \node[bead] at ($(triv)!2/3!(0,4)$) {$v$}; \node[longbead] at ($(triv)!1/3!(0,4)$) {$\sigma_1(a)$}; \end{tikzineqn} \quad=\quad \begin{tikzineqn}[scale=\lemmascale] \coordinate (bot) at (0,-2); \coordinate (mid) at (0,0); \coordinate (top) at (0,2); \coordinate (bead1) at ($(bot)!1/3!(top)$); \coordinate (bead2) at ($(bot)!2/3!(top)$); \draw[strand m] (top) to (bot) node[below] {$m$}; \node[bead] at (bead2) {$v$}; \node[longbead] at (bead1) {$\sigma_1(a)$}; \end{tikzineqn} \] as desired. A similar argument using either method can be applied to show that $\sigma_2$ is also real-valued. \end{proof} \begin{lemma}\label{lem:RQSigma3Real} The function $\sigma_3$ is real-valued. \end{lemma} \begin{proof} Let $a\in A$. We want to show that $\sigma_3(a)$ is in the center of $\mathbb{H}$. First, we will use the naturality of the braiding to show that \[ (\forall v\in \mathbb{H}) \quad [a]\triangleleft \big(\sigma_3(a)\cdot v\big) = [a]\triangleleft \big(v\cdot \sigma_3(a)\big) .\] First, we use naturality and the property of the trivalent vertex to get \[ \begin{tikzineqn}[scale=0.5] \draw[strand a] (0,0) -- (0,1.5); \draw[strand m] (0,0) -- (1,-1) -- ++(0,-4); \draw[strand m] (0,0) -- (-1,-1) -- ++(0,-4); \node[longbead] at (1,-2.2) {$\sigma_3(a)$}; \node[bead] at (1,-3.8) {$v$}; \node[below] at (-1,-5) {$m$}; \node[below] at (1,-5) {$m$}; \node[strand a,above] at (0,1.5) {$a$}; \end{tikzineqn} \quad=\quad \begin{tikzineqn}[scale=0.5] \draw[strand a] (0,0) -- (0,1.5); \draw[strand m] (0,0) -- (1,-1); \draw[strand m] (0,0) -- (-1,-1); \begin{knot}[clip width = 10] \strand[strand m] (1,-1) -- ++(-2,-2) -- ++(0,-2); \strand[strand m] (-1,-1) -- ++(2,-2) -- ++(0,-2); \end{knot} \node[bead] at (1,-3.8) {$v$}; \node[below] at (-1,-5) {$m$}; \node[below] at (1,-5) {$m$}; \node[strand a,above] at (0,1.5) {$a$}; \end{tikzineqn} \quad=\quad \begin{tikzineqn}[scale=0.5] \draw[strand a] (0,0) -- (0,1.5); \draw[strand m] (0,0) -- (1,-1); \draw[strand m] (0,0) -- (-1,-1); \begin{knot}[clip width = 10] \strand[strand m] (1,-1) -- ++(-2,-2) -- ++(0,-2); \strand[strand m] (-1,-1) -- ++(2,-2) -- ++(0,-2); \end{knot} \node[bead] at (-1,-3.8) {$\overline{v}$}; \node[below] at (-1,-5) {$m$}; \node[below] at (1,-5) {$m$}; \node[strand a,above] at (0,1.5) {$a$}; \node at (-1,-5.5) {$m$}; \node at (1,-5.5) {$m$}; \node[strand a] at (0,2) {$a$}; \end{tikzineqn} \quad=\quad \begin{tikzineqn}[scale=0.5] \draw[strand a] (0,0) -- (0,1.5); \draw[strand m] (0,0) -- (1,-1) -- ++(0,-4); \draw[strand m] (0,0) -- (-1,-1) -- ++(0,-4); \node[longbead] at (1,-2.2) {$\sigma_3(a)$}; \node[bead] at (-1,-3.8) {$\overline{v}$}; \node[below] at (-1,-5) {$m$}; \node[below] at (1,-5) {$m$}; \node[strand a,above] at (0,1.5) {$a$}; \end{tikzineqn} \quad=\quad \begin{tikzineqn}[scale=0.5] \draw[strand a] (0,0) -- (0,1.5); \draw[strand m] (0,0) -- (1,-1) -- ++(0,-4); \draw[strand m] (0,0) -- (-1,-1) -- ++(0,-4); \node[bead] at (1,-2.2) {$v$}; \node[longbead] at (1,-3.8) {$\sigma_3(a)$}; \node[below] at (-1,-5) {$m$}; \node[below] at (1,-5) {$m$}; \node[strand a,above] at (0,1.5) {$a$}; \end{tikzineqn} \] By self duality of $m$, we may ``rotate" the diagram up to a non-zero quaternionic constant by composing with the coevaluation map on the left strand, yielding \[ \begin{tikzineqn}[scale=0.5] \draw[strand a] (0,0) -- (1,1) node[above] {$a$}; \draw[strand m] (0,0) -- (-1,1) node[above] {$m$}; \draw[strand m] (0,0) -- (0,-5) node[below] {$m$}; \node[longbead] at (0,-1.5) {$\sigma_3(a)$}; \node[bead] at (0,-3.5) {$v$}; \end{tikzineqn} \quad=\quad \begin{tikzineqn}[scale=0.5] \draw[strand a] (0,0) -- (1,1) node[above] {$a$}; \draw[strand m] (0,0) -- (-1,1) node[above] {$m$}; \draw[strand m] (0,0) -- (0,-5) node[below] {$m$}; \node[longbead] at (0,-3.5) {$\sigma_3(a)$}; \node[bead] at (0,-1.5) {$v$}; \end{tikzineqn} \] which we may compose with the inverse to the trivalent vertex to conclude the desired result. \end{proof} \subsection{The Hexagon Equations} Since all the braiding coefficients are real, the only difference in the braiding equations arises from the fact that $m\otimes m\cong 4\bigoplus_{a\in A} a$ rather than $\bigoplus_{a\in A} a$. The graphical computations remain mostly the same except for the hexagon diagrams involving $\alpha_{m,m,m}$. The resulting braiding equations are equations (\ref{eqn:hexR1}) through (\ref{eqn:hexR7}), (\ref{eqn:hexR9}) through (\ref{eqn:hexR15}), and the following two, which differ from (\ref{eqn:hexR8}) and (\ref{eqn:hexR16}) by a coefficient of $-2$: \begin{equation} \sigma_3(a)\tau\chi(a,b)^{-1}\sigma_3(b) = -2\sum_{c\in A}\tau\chi(a,c)^{-1}\sigma_2(c)\tau\chi(c,b)^{-1}, \tag{8'}\label{eqn:hexH8} \end{equation} \begin{equation} \sigma_3(a)\tau \chi(a,b)\sigma_3(b) = -2\sum_{c\in A} \tau \chi(a,c)\sigma_1(c)\tau \chi(c,b). \tag{16'}\label{eqn:hexH16} \end{equation} The presence of the $-2$ does not affect the algebraic reduction process, and the reduced hexagon equations are thus \begin{align} &\sigma_0(a,b) = \chi(a,b), \label{eqn:RQreducedR1} \\ &\sigma_1(a)^2 = \chi(a,a), \label{eqn:RQreducedR2} \\ &\sigma_1(ab) = \sigma_1(a)\sigma_1(b)\chi(a,b), \label{eqn:RQreducedR3} \\ &\sigma_2(a) = \sigma_1(a), \label{eqn:RQreducedR4} \\ &\sigma_3(1)^2 = -2\tau \sum_{c\in A}\sigma_1(c), \label{eqn:RQreducedR5} \\ &\sigma_3(a) = \sigma_3(1)\sigma_1(a)\chi(a,a), \label{eqn:RQreducedR6} \end{align} which coincide with (\ref{eqn:reducedR1}) through (\ref{eqn:reducedR6}) except for the added $-2$ in (\ref{eqn:RQreducedR5}). \subsection{Classification} With the notation of Proposition \ref{prop:OrbitEquivalenceCharacterization}, we have: \begin{theorem} \label{thm:RQ-class-sols} Braidings on $\C_{\mathbb{H}}(K_4^n, h^{n}, \tau)$ are in bijection with $\QF^n_{-\text{sgn}(\tau)}\times \{\pm 1\}$. \end{theorem} \begin{proof} The argument is exactly parallel to the proof of Theorem \ref{thm:split-class-sols}, except that the extra factor of $-2$ in \eqref{eqn:RQreducedR5} gives $\sgn(\sigma_1) = -\sgn(\tau)$. \end{proof} \begin{theorem} A real/quaternionic Tambara-Yamagami category $\C_{\mathbb{H}}(A, \chi, \tau)$ admits a braiding if and only if either $(A, \chi) \cong (K_4^n, h^{n})$ for $n > 0$ or $(A, \chi)$ is trivial and $\tau < 0$. \end{theorem} \begin{proof} By Theorem \ref{thm:WallClassification}, we know $(A, \chi) \cong (K_4^n, h^{n})$. The conclusion then follows from the previous theorem, observing that $\QF^n_{-\text{sgn}(\tau)}$ is always nonempty except when $n = 0$ and $\tau > 0$. \end{proof} Since the group $K_4^n$, bicharacter $h^{\oplus n}$ and scaling coefficient $\tau$ are determined by context, we denote the braiding on $\C_{\mathbb{H}}(K_4^n, h^{n}, \tau)$ corresponding to $(\sigma, \epsilon) \in \QF^n_{-\text{sgn}(\tau)} \times \{\pm 1\}$ by $\C_{\mathbb{H}}(\sigma_{1}, \epsilon)$. \begin{definition}\label{defn:ExplicitRealQuaternionicBraidings} Given an element $(\sigma, \epsilon)$ of $\QF_{-\sgn\tau}\times \{\pm 1\}$, we define a braided structure $\C_\mathbb{H}(\sigma,\epsilon)$ on $\C_\mathbb{H}(K_4^n,h^{n},\tau)$ by: \begin{align*} \beta_{a,b} &= \chi(a,b)\cdot \id_{ab}, \\ \beta_{a,m} &= \beta_{m,a} = \sigma(a)\cdot \id_{m}, \\ \beta_{m,m} = \sum_{\substack{s\in S\\a\in K_4^n}} \epsilon\,&\sigma(a) (\id_m \otimes \bar{s})[a]^{\dag}[a] (s \otimes \id_m). \end{align*} \end{definition} As before, we now turn to the question of when $\C_\mathbb{H}(\sigma,\epsilon)$ and $\C_\mathbb{H}(\sigma',\epsilon')$ are braided equivalent. \begin{definition} Let $f \in \Aut(A, \chi)$ and $\kappa \in \{\pm1\}$. We let $F(f,\kappa)$ be the monoidal endofunctor of $\C_\mathbb{H}(K_4^n,h^{n},\tau)$ whose underlying action on grouplike simples is $f$ and fixes $m$ and $\End(m)$. The tensorator coefficients are: $$J_{a,b} = \id_{f(a)f(b)}, \quad J_{a,m} = \id_{f(a)} \otimes \id_m, \quad J_{m,a} = \id_m \otimes \id_{f(a)}, \quad J_{m,m} = \kappa\cdot\id_m \otimes \id_m.$$ \end{definition} \begin{lemma}\label{lem:RealQuaternionicFunctorClassification} For any $A,\chi, \tau$, $$\pi_0\Aut_\otimes\big(\C_\mathbb{H}(A,\chi,\tau)\big) \cong \Aut(A, \chi) \times \mathbb{Z}/2\mathbb{Z},$$ with representatives given by $F(f,\kappa)$. \end{lemma} \begin{proof} We first remark that every functor in $\Aut(\C_\mathbb{H}(A, \chi,\tau))$ is naturally equivalent to one which fixes $\End(m)$; the action of $F$ on $\End(m)$ must be conjugation by some quaternion, and this same quaternion forms the desired natural transformation together with the identity on the invertible objects. Let $\psi$ and $\omega$ be functions $A \to \mathbb{R}^\times$ with $\phi(a)\omega(a)$ constant. We define $F(f, \psi, \omega)$ to be the monoidal functor whose underlying homomorphism is $f$ and has \begin{align*} J_{a,b} = \delta \psi(a,b) \cdot \id_{f(a)f(b)}, &\quad J_{a,m} = \psi(a)\cdot \id_{f(a)} \otimes \id_m,\\ \quad J_{m,a} = \psi(a)\cdot \id_m \otimes \id_{f(a)}, &\quad J_{m,m} = \id_m \otimes \omega(a)\id_m. \end{align*} The proof of Theorem 5.4 of \cite{pss23} shows us that $F(f, \psi, \omega)$ is a monoidal functor and every monoidal functor with underlying homomorphism $f$ is monoidally isomorphic to $F(f, \psi, \omega)$ for some $\psi, \omega$. The consistency equations for a monoidal natural isomorphism $\mu \colon F(f, \psi, \omega) \to F(f, \psi', \omega')$ are: \begin{align*} \phi'(a) &= \phi(a)\mu_a \\ \omega'(a) &= \frac{\overline{\mu_m}\mu_m}{\mu_a}\omega(a) \end{align*} By setting $\mu_a = \phi(a)^{-1}$, and using that $\phi(a)\omega(a)$ is constant, we see that $\mu$ defines a natural isomorphism to $F(f, \sgn(\omega(1)))$. Moreover, these same consistency conditions rule out any natural isomorphisms $F(f, 1) \to F(f,-1)$; we must have $\mu_1 = 1$ and so would obtain $-1 = |\mu_m|^2$, a contradiction. \end{proof} The proofs of the following proposition and theorem are identical to those of Proposition \ref{prop:RealFunctorBraided} and Theorem \ref{thm:SplitCaseEquivalence} upon replacing Lemma \ref{lem:SplitRealFunctorClassification} with Lemma \ref{lem:RealQuaternionicFunctorClassification}. \begin{proposition}\label{prop:QuaternionincFunctorBraided} The monoidal functor $F(f, \kappa)$ forms a braided monoidal equivalence $\C_\mathbb{H}(\sigma,\epsilon) \to \C_\mathbb{H}(\sigma',\epsilon')$ if and only if $f \cdot \sigma = \sigma'$ and $\epsilon = \epsilon'$. \end{proposition} \begin{theorem}\label{thm:RealQuaternionicBraidedEquivalence} There is a braided monoidal equivalence $\C_\mathbb{H}(\sigma,\epsilon) \sim \C_\mathbb{H}(\sigma',\epsilon')$ if and only if $\epsilon = \epsilon'$. In particular, there is no braiding on $\C_\mathbb{H}(K_4^n,h^{\oplus n},\tau)$ when $n = 0$ and $\tau > 0$, and in all other cases there are exactly two equivalence classes of braidings. \end{theorem} \begin{remark} In the split real case, the $\Aut(A, \chi)$ orbit which extends to a braiding has the same sign as $\tau$. Here, the sign is reversed. In both cases the scalar $\sigma_3(1)$ is a braided invariant, and indeed determines the equivalence class. \end{remark} \begin{example}\label{eg:Q+HasNoBraiding} Let $\mathcal Q_{\pm}:=\mathcal C_{\mathbb H}(K_4^0,h^{\oplus0},\pm\tfrac12)$. It can be shown by direct computation\footnote{The direct computation referenced here is analogous to our analysis of hexagons, but where only forward hexagons are analyzed for the sake of finding half-braidings instead of full braidings.} that as a fusion category, $\mathcal Z(\mathcal Q_+)\simeq\mathcal C_{\mathbb C}(\mathbb Z/2\mathbb Z,\id_{\mathbb C},\textit{triv}\,,\tfrac12)$. In particular, $\mathcal Z(\mathcal Q_+)$ contains no quaternionic object, and therefore cannot contain $\mathcal Q_+$ as a fusion subcategory. This is equivalent to the observation that $\mathcal Q_+$ cannot have a braiding, as indicated by Theorem \ref{thm:RealQuaternionicBraidedEquivalence}. This is directly analogous to the fact that $\mathcal{C}_{\mathbb{R}}(K_4^0,h^{\oplus 0},-1)$ also admits no braiding. Here is yet another way to see why there cannot be a braiding in this case. The category $\mathcal Q_+$ can be realized as the time reversal equivariantization of $\Vect_{\mathbb C}^\omega(\mathbb Z/2\mathbb Z)$, where $0\neq[\omega]\in H^3(\mathbb Z/2\mathbb Z;\mathbb C^\times)$ (see \cite{MR2946231} for further details on categorical Galois descent). The time reversal symmetry that produces $\mathcal Q_+$ is anomalous in the sense that it uses a nontrivial tensorator $T_1\circ T_1\cong T_0=\id$. This anomaly is what causes the presence of a quaternionic object, because without it, equivariantization would just produce $\Vect_{\mathbb R}^\omega(\mathbb Z/2\mathbb Z)$. If $\mathcal Q_+$ were to admit a braiding, then by base extension it would produce one of the two braidings on the category $\Vect_{\mathbb C}^\omega(\mathbb Z/2\mathbb Z)$ \textemdash~ either the semion or reverse semion. However, the time reversal functor $T_1$ is not braided (it swaps these two braidings), and so neither of these braidings could have come from $\mathcal Q_+$. \end{example} Taking $\sigma = \sigma'$ and $\epsilon = \epsilon'$ in Proposition \ref{prop:QuaternionincFunctorBraided}, we obtain: \begin{corollary} $$\pi_0\Aut_{br}\big(\C_{\mathbb{H}}(K_4^n , h^{\oplus n}, \tau, \sigma, \epsilon)\big) \cong H_{\sgn(\sigma)}^n \times \mathbb{Z}/2\mathbb{Z}$$ \end{corollary} \begin{lemma} There are exactly two families of twist morphisms for any $\C_{\mathbb{H}}(\sigma, \epsilon)$, corresponding to a sign $\rho \in \{\pm 1\}$. These twists are ribbon structures. \end{lemma} \begin{proof} Denoting the components of the twist by $\theta_x$, the required equations can be derived identically to \cite[\S3.7]{sie00}, and algebraically reduced in an identical way using that $\mathbb{H}$ is a division algebra and $\sigma$ is real valued and so the values $\sigma(a)$ commute with $\theta_m$. The results are (still): \begin{align*} \theta_{ab}& = \theta_a\theta_b\\ \theta_a &= \sigma(a)^2 = 1\\ \theta_a &= \theta_m^2\sigma_3(a)^2 \end{align*} Thus, the square root required to define $\theta_m$ is always of a positive real number and therefore still determined by a sign. Since every simple object is self dual, the required axiom is simply $\theta_m = \theta_m^*$. But this holds as a result of the (real) linearity of composition. \end{proof} \section{Braidings on Real/Complex Tambara-Yamagami Categories}\label{sec:Real/Complex} In the case where the invertibles are real and $m$ is complex, the analysis in \cite{pss23} was much more involved than in the other cases. Part of this complexity arises due to the fact that $m$ can be either directly or conjugately self dual, and this property is a monoidal invariant, necessitating some degree of casework. \begin{theorem}[{\cite[Thm 6.10]{pss23}}]\label{thm:RealComplexFromPSS} Let $\tau=\sfrac{\pm 1}{\sqrt{2|A|}}$, let $(-)^g\in\text{Gal}(\mathbb C/\mathbb R)$, and let $\chi:A\times A\to \mathbb C^\times_*$ be a symmetric bicocycle on $A$ with respect to $(-)^g$, whose restriction $\chi\mid_{A_0\times A_0}$ is a nongedegerate bicharacter. A quadruple of such data gives rise to a non-split Tambara-Yamagami category $\C_{\bb C}(A,g,\chi,\tau)$, with $\End(\mathbbm{1})\cong\mathbb{R}$ and $\End(m)\cong\mathbb{C}$. Furthermore, all equivalence classes of such categories arise in this way. More explicitly, two categories $\C_{\bb C}(A,g,\chi,\tau)$ and $\C_{\mathbb{C}}(A',g',\chi',\tau')$ are equivalent if and only if $g=g'$, and there exists the following data: \begin{enumerate}[label = \roman*)] \item an isomorphism $f:A\to A'$, \item a map $(-)^h:\mathbb{C}\to\mathbb{C}$, either the identity or complex conjugation, \item a scalar $\lambda\in S^1\subset \mathbb C$, \end{enumerate} satisfying the following conditions for all $a,b\in A$ \begin{gather} \chi'\big(f(a),f(b)\big)=\frac{\lambda\cdot\lambda^{ab}}{\lambda^a\cdot\lambda^b}\cdot\chi(a,b)^h\;,\label{EquivCond1}\\ \frac{\tau'}{\tau}=\frac{\lambda}{\lambda^g}\label{EquivCond2}\,. \end{gather} \end{theorem} \begin{lemma}\label{lem:RCChiProperties} Suppose $\C_{\mathbb{C}}(A,g,\tau,\chi)$ admits a braiding, with $A\cong A_0\rtimes (\mathbb{Z}/2\mathbb{Z})\langle w \rangle$. Then, $A_0\cong \mathbb{Z}/2\mathbb{Z}^{n}$ is an elementary abelian 2-group with $n\in \mathbb{Z}_{\ge 0}$, and the symmetric bicocycle $\chi$ satisfies the following: \begin{enumerate}[label=(\roman*)] \item For all $a\in A_0$ and all $x\in A$, $\chi(a,x)$ is real-valued; \item $\chi$ is symmetric; \item $\chi(x,y)=\chi(x,y)^{gxy}=\chi(x,y)^{g}$ for all $x,y\in A$. \end{enumerate} \end{lemma} \begin{proof} If $\C_{\mathbb{C}}(A,g,\tau,\chi)$ admits a braiding, then $A$ is an abelian generalized dihedral group, so for any $x\in A$ we have \[ x=ww^{-1}x=wxw^{-1}=x^{-1} \implies x^2=1. \] Now we use the cocycle condition to see that for all $x\in A$, \[ \chi(1,x)=\chi(1,x)^2 \implies \chi(1,x)=1, \] and by the same argument in the other coordinate we have $\chi(x,1)=1$. Then, since $a^2=1$, we have \[ 1=\chi(a^2,x)=\chi(a,x)^{a}\chi(a,x)=\chi(a,x)^2, \] which tells us that $\chi(a,x)\in \{\pm 1\}$ (and similarly $\chi(x,a)\in \{\pm 1\}$). Note that this gives us symmetry on $(A\times A_0)\cup (A_0\times A)$ using the symmetric cocycle condition, on which $\chi$ is fixed by conjugation. For condition (ii), we check that for any $a,b\in A_0$, \begin{align*} \chi(aw,bw)&=\chi(a,bw)^{w}\chi(w,bw) \\ &=\chi(a,b)\chi(a,w)^{b}\chi(w,b)\chi(w,w)^{b}\\ &=\chi(a,b)\chi(a,w)\chi(w,b)\chi(w,w), \end{align*} which gives us symmetry of $\chi$. Note that in particular $\chi(aw,aw)=\chi(a,a)\chi(w,w)$. It suffices to check conditions (iii) on $A_0w\times A_0w$, since $\chi$ is real-valued on the rest. We use the symmetric cocycle and symmetric conditions to get that $\chi(x,y)=\chi(x,y)^{gxy}$, and since $|xy|=0$ we have the desired result. \end{proof} At this point, we have been using a choice of isomorphism $A\cong A_0\rtimes (\mathbb{Z}/2\mathbb{Z})\langle w \rangle$, which amounts to choosing an element $w\in A\setminus A_0$. It turns out that there is a canonical way to choose this element. \begin{lemma}\label{lem:CanonicalW} There is a unique $w\in A\setminus A_0$ with the property that $\chi(w,-)$ is trivial when restricted to $A_0$. Moreover restriction to $A_0$ gives an isomorphism $\Aut(A, \chi)$ to $\Aut(A_0, \chi|_{A_0 \times A_0})$. \end{lemma} \begin{proof} At first, let $w\in A\setminus A_0$ be any element. Since $\chi_{A_0\times A_0}$ is nondegenerate, there exists a unique $c\in A_0$ such that $\chi(w,a)=\chi(c,a)$ for every $a\in A_0$. It follows that $w'=cw\in A\setminus A_0$ is an element that satisfies \[\chi(w',a)=\chi(c,a)\chi(w,a)=\chi(w,a)^2=1\,,\] where the last equality follows from Lemma \ref{lem:RCChiProperties} parts (i) and (ii). Any other choice is of the form $bw'$ for $b\in A_0$. This implies that $\chi(bw',a)=\chi(b,a)\chi(w',a)=\chi(b,a)$ for every $a\in A_0$. Again by nondegeneracy, $\chi(bw',-)$ can only be trivial when $b=1$, so this $w'$ is unique. For the second part of the lemma, the defining property of $w$ implies $w$ is fixed by every $f \in \Aut(A,\chi)$, so that $f$ is completely determined by the homomorphism property together with its restriction to $A_0$. \end{proof} \begin{lemma} \label{lem:RCChiWWPositive} Up to monoidal equivalence, $\chi(w,w)$ can be taken to be 1 when $|g|=0$. \end{lemma} \begin{proof} By Theorem \ref{thm:RealComplexFromPSS}, for any $\lambda\in S^1\subset\mathbb C^\times$ there exists an equivalence $(\id_{\mathcal C},\id_{\mathbb C},\lambda):\mathcal C_{\mathbb C}(A,\id,\chi,\tau)\to\mathcal C_{\mathbb C}(A,\id,\chi',\tau)$, where $\chi'$ is the bicocycle defined by the equation \[\chi'(a,b)=\frac{\lambda\cdot\lambda^{ab}}{\lambda^a\cdot\lambda^b}\cdot\chi(a,b)\,.\] Whenever $|a|=0$ or $|b|=0$, it follows that $\chi'=\chi$. When both arguments conjugate, the bicocycles are related by $\chi'=\lambda^4\chi$. In particular, by setting $\lambda^4=\chi(w,w)^{-1}$, we can force $\chi'(w,w)=1$. \end{proof} \subsection{Hexagon Equations} From the graphical calculus computations, we get the following equations from the forward hexagon diagrams: \input{resources/SeansForwardHexagons} and the following from the backward hexagon diagrams: \input{resources/SeansBackwardHexagons} We first obtain a few useful equations through algebraic simplification. Evaluating at $y=x$ in \eqref{RCHexagon10} we get \begin{equation} \sigma_1(x)^2=\chi(x,x) \label{RCReduced2}. \end{equation} Rearranging \eqref{RCHexagon3} we get \begin{equation} \sigma_0(x,y)=\chi(x,y)\frac{\sigma_1(x)^{y}}{\sigma_1(x)}, \label{RCReduced1} \end{equation} which we combine with evaluating \eqref{RCHexagon5} at $y=1$ to get \begin{equation} \sigma_1(x)^g=\sigma_1(x). \label{RCReduced3} \end{equation} Lastly, evaluating \eqref{RCHexagon16} at $x=y=1$ yields \begin{equation} \sigma_3(1)^2=2\tau \sum_{|z|=|g|} \sigma_1(z). \label{RCReduced6} \end{equation} Using these, we will prove a few lemmas which we will use to reduce the hexagon equations down to a equivalent set of simpler equations. \begin{lemma}\label{lem:RCChiAAReal} For all $a\in A_0$, we have $\chi(a,a)=1$. \end{lemma} \begin{proof} Using equations (\ref{RCHexagon3}) and (\ref{RCHexagon11}), we can write \[ \sigma_0(x,y) =\chi(x,y)\frac{\sigma_1(x)^{y}}{\sigma_1(x)} =\chi(x,y)^{-1}\frac{\sigma_2(y)^{x}}{\sigma_2(y)}. \] Setting $x=a$ and $y=w$, we get \[ \chi(a,w)^2 =\frac{\sigma_1(a)}{\sigma_1(a)^{w}} \cdot \frac{\sigma_2(w)^{a}}{\sigma_2(w)}. \] Since $|a|=0$, we have \[ 1=\chi(a,w)^2 =\frac{\sigma_1(a)}{\sigma_1(a)^{w}} \implies \sigma_1(a)=\overline{\sigma_1(a)}. \] This tells us that $\sigma_1(a)\in \mathbb{R}$, which gives us that $\chi(a,a)>0$ by (\ref{RCReduced2}). \end{proof} \begin{corollary} \label{cor:RCHyperbolicPairing} The bicharacter $\chi|_{A_0\times A_0}$ is hyperbolic, and thus for some choice of basis for $A_0$, is equal to the standard hyperbolic pairing $h^{n}$ on $A_0\cong K_4^{n}$ for some $n\in \mathbb{Z}_{\ge 0}$. \end{corollary} \begin{corollary} \label{cor:RCSelfPairingis1} If $\C_{\mathbb{C}}(A,g,\tau,\chi)$ admits a braiding, then up to monoidal equivalence, $\chi$ is a real-valued symmetric bicharacter with $\chi(x,x)=1$ for all $x\in A$. \end{corollary} \begin{proof} By Lemma \ref{lem:RCChiProperties} and Lemma \ref{lem:RCChiAAReal}, it suffices to check that $\chi(w,w)=1$ and use the cocycle condition. When $g$ is trivial, this follows from Lemma \ref{lem:RCChiWWPositive}. When $g$ is nontrivial, this is implied by \eqref{RCReduced2} and \eqref{RCReduced3} which show us that $\chi(w,w)$ is the square of a real number. \end{proof} \begin{remark}\label{rmk:RCSigma1Real} In particular, this tells us that $\sigma_1$ is always $\{\pm 1\}$-valued by \eqref{RCReduced2}, and hence that $\sigma_0=\chi$ by \eqref{RCReduced1}. Note also that $\chi=\chi^{-1}$ is $\{\pm 1\}$-valued, since $\chi(x,y)^2=\chi(x^2,y)=\chi(1,y)=1$ for all $x,y\in A$. \end{remark} \begin{remark} Note that although we know that $\chi$ is nondegenerate on $A_0 \times A_0$, it is necessarily degenerate on the whole of $A$, thanks to Lemma \ref{lem:CanonicalW}. Hence the classification results for bilinear forms used previously to show that certain forms are hyperbolic do not apply here. \end{remark} \begin{lemma}\label{lem:RCSigma3Squared1} The scalar $\sigma_3(1)^2$ is real, and it can be computed by the formula \[\sigma_3(1)^2=2^{n+1}\tau\sigma_1(w)^{|g|}\sgn(\sigma_1|_{A_0}).\] Consequently, $\sigma_3(1)^4 = 1$. \end{lemma} \begin{proof} Recall that we have \[ \sigma_3(1)^2=2\tau \sum_{|z|=|g|} \sigma_1(z)\,. \] from \eqref{RCReduced6}. When $g$ is nontrivial, each summand is of the form \[\sigma_1(aw)=\sigma_1(a)\sigma_1(w)\chi(a,w)=\sigma_1(a)\sigma_1(w)\,,\] for some unique $a\in A_0$. After possibly factoring out the term $\sigma_1(w)$, both cases for $g$ then follow from Proposition \ref{prop:OrbitEquivalenceCharacterization}. \end{proof} \begin{corollary} The function $\sigma_2$ is real-valued on all of $A$. \end{corollary} \begin{proof} Comparing \eqref{RCHexagon6} and \eqref{RCHexagon13} at $y=1$ we get \begin{equation} \sigma_2(x)=\sigma_1(x)^{gx}\frac{\sigma_{3}(1)^{g}}{\sigma_3(1)^{gx}} =\sigma_1(x)\frac{\sigma_{3}(1)^{g}}{\sigma_3(1)^{gx}}. \end{equation} By Lemma \ref{lem:RCSigma3Squared1}, $\sigma_{3}(1)$ is purely real or imaginary, so $\frac{\sigma_{3}(1)^{g}}{\sigma_3(1)^{gx}}\in \{\pm 1\}$. \end{proof} In summary, we have: \begin{proposition} \label{prop:RCBraidingConstraintsFinal} The braiding coefficients $\sigma_0$, $\sigma_1$ and $\sigma_2$ in the real-complex category admitting a braiding are necessarily real-valued. The hexagon equations are equivalent to the following: \begin{align} & \sigma_0(x,y)=\chi(x,y) \label{RCVeryReduced1} \\ & \sigma_1(x)^2=\chi(x,x) \label{RCVeryReduced2} \\ & \sigma_1(xy)=\sigma_1(x)\sigma_1(y)\chi(x,y) \label{RCVeryReduced3} \\ & \sigma_3(1)^2=2\tau \sum_{|z|=|g|} \sigma_1(z) \label{RCVeryReduced4} \\ & \sigma_3(x)=\sigma_3(1)\sigma_1(x) \label{RCVeryReduced5} \\ & \sigma_3(x) = \sigma_3(x)^g \label{RCVeryReduced6} \\ & \sigma_2(x)=\sigma_1(x)\frac{\sigma_{3}(1)}{\sigma_3(1)^{x}} \label{RCVeryReduced7} \end{align} \end{proposition} \begin{proof} First, it remains to check that \eqref{RCVeryReduced5}, \eqref{RCVeryReduced6} and \eqref{RCVeryReduced7} follow from the hexagon equations. The first and last equations follow from setting $y = 1$ in \eqref{RCHexagon14} and \eqref{RCHexagon7}, respectively. We postpone the derivation of \eqref{RCVeryReduced6}. For the converse, we wish to derived the original hexagon equations from the reduced ones. We may rewrite \eqref{RCHexagon4} as \[ \sigma_1(y)\chi(x,y)\sigma_1(x) \frac{\sigma_3(1)^2}{\sigma_3(1)^{x}\sigma_3(1)^{y}} \stackrel{?}{=} \sigma_1(xy) \frac{\sigma_{3}(1)}{\sigma_3(1)^{xy}}, \] and that it holds in each of the cases $|x|=0$, $|y|=0$ and $|x|=|y|=1$ (in the last case using Lemma \ref{lem:RCSigma3Squared1}). Similarly \eqref{RCHexagon6} and \eqref{RCHexagon7} follow from the fact that $\sigma_3(1)^2$ is conjugate invariant. The derivation of \eqref{RCHexagon16} is exactly the same as in the split real case. The rest, except for \eqref{RCHexagon8}, follow from straightforward algebraic checks. We now show that \eqref{RCHexagon8} is equivalent to \eqref{RCVeryReduced6} in the presence of the other reduced hexagon equations. To begin, we can expand both sides of \eqref{RCHexagon8} using the definition of $\sigma_2$ and $\sigma_3$ and the properties of $\chi$ to arrive at the equivalent form: \begin{align*} \chi(x, y)\sigma_3(1)^x\sigma_3(1)^y\sigma_1(x)\sigma_1(y) &= 2\tau \sum_{|z| = |gxy|} \chi(x, z)\chi(z, y) \sigma_1(z) \frac{\sigma_3(1)^{gxy}}{\sigma_3(1)} \\ &\overset{\eqref{RCHexagon16}}{=} \sigma_3(x)\sigma_3(y)\chi(x,y)\frac{\sigma_3(1)^{gxy}}{\sigma_3(1)} \end{align*} Canceling terms we arrive at $$\sigma_3(1)^x\sigma_3(1)^y = \sigma_3(1)\sigma_3(1)^{gxy}$$ Since $\sigma_3(1)$ is a 4th root of unity, we have $(\sigma_3(1)^x\sigma_3(1)^y)/(\sigma_3(1)\sigma_3(1)^{xy}) = 1$, so that $\sigma_3(1)^{xy}$ is $g$-fixed for all $x, y$, and thus $\sigma_3(1)$ and $\sigma_3(x)$ are as well. \end{proof} \subsection{Classification of Braidings in the Real/Complex Case} Recalling Corollary \ref{cor:RCHyperbolicPairing}, we know that any real/complex Tambara-Yamagami category admitting a braiding has $A \cong K_4^n \rtimes (\mathbb{Z}/2\mathbb{Z})\langle w \rangle$. Moreover, in all cases we can assume $\chi(x,x) = 1$. \begin{theorem} \label{thm:RCGTrivialBijectionClassification} Braidings on $\C_{\mathbb{C}}(K_4^n \rtimes \mathbb{Z}/2\mathbb{Z}, \id, \chi, \tau)$ are in bijection with pairs $(\sigma, \epsilon) \in \QF(\chi) \times \{\pm 1\}$. \end{theorem} \begin{proof} In this case, since $g = \id$ is trivial, the constraints of Proposition \ref{prop:RCBraidingConstraintsFinal} are the same as in the split real case. The proof of this theorem is therefore the same as Theorem \ref{thm:split-class-sols} (without the requirement that $\sigma_3(1)$ is real). \end{proof} \begin{theorem}\label{thm:RCGNontrivialBijectionClassification} Braidings on $\C_{\mathbb{C}}(K_4^n \rtimes \mathbb{Z}/2\mathbb{Z}, \bar{\cdot}, \chi, \tau)$ are in bijection with pairs $(\sigma, \epsilon) \in \QF(\chi) \times \{\pm 1\}$ satisfying $$\sgn(\sigma|_{K_4^n})\sgn(\tau)\sigma(w) = 1.$$ \end{theorem} \begin{proof} We produce the data $(\sigma, \epsilon)$ in an identical way to the previous classification theorems. In this case, there is an extra constraint, namely that $\sigma_3$ is real, which holds if and only if $\sigma_3(1)$ is real. By Lemma \ref{lem:RCSigma3Squared1} and the definition of $\epsilon$, we have $$\sigma_3(1) = \epsilon \sqrt{2^{n + 1}\tau\sigma_1(w)\sgn(\sigma|_{K_4^n})},$$ which shows the constraint $\sgn(\sigma|_{K_4^n})\sgn(\tau)\sigma(w) = 1$ is necessary and sufficient for $\sigma_3$ to be real. \end{proof} \begin{notation} We denote a braiding on $\C(A, g ,\chi, \tau)$ by $\C_{\mathbb{C}, g}(\sigma, \epsilon)$. Note that $\tau$ is not necessarily determined by context, and the constraint $\sgn(\sigma|_{K_4^n})\sgn(\tau)\sigma(w)$ is also suppressed when $g$ is nontrivial. Moreover, we write $\sgn(\sigma) := \sgn(\sigma|_{K_4^n})$. No confusion should arise, since the sign of a quadratic form on $G$ is not defined. \end{notation} The remainder of this section is dedicated to determining which of these braidings are equivalent, and some corollaries of this process. \begin{definition} Let $f \in \Aut(A),~ \xi \in \Gal(\mathbb{C}/\mathbb{R})$ and $\lambda \in S^1$. We let $F(f,\xi,\lambda)$ be the candidate monoidal endofunctor of $\C_{\mathbb{C}}(A, g, \chi, \tau)$ whose underlying action on grouplike simples is $f$, fixes $m$ and applies $\xi$ to $\End(m)$. The tensorator coefficients are: $$J_{a,b} = \id_{f(a)f(b)}, \quad J_{a,m} = \id_{f(a) \otimes m}, \quad J_{m,a} = \frac{\lambda}{\lambda^a}\id_m \otimes \id_{f(a)}, \quad J_{m,m} = \id_m \otimes \lambda \id_m.$$ We stress that in general, $F(f, \xi, \lambda)$ is not a monoidal functor. The consistency equations (simplified for our context from \cite[Theorem 6.10]{pss23}) are \begin{align} \chi\big(f(a), f(b)\big) &= \frac{\lambda \cdot \lambda^{ab}}{\lambda^a \cdot \lambda^b}\cdot \chi(a,b) \label{eqn:RCEndomorphismConsistency1}\\ \lambda^g &= \lambda. \label{eqn:RCEndomorphismConsistency2} \end{align} Still, in the cases where $F(f, \xi, \lambda)$ is monoidal, the composition rule can be seen to be $$F(f, \xi, \lambda) \circ F(f', \xi', \lambda') \cong F\big(f \circ f', \xi\circ \xi', \lambda \cdot \xi(\lambda')\big)$$ \end{definition} \begin{remark} The proof of \cite{pss23} Theorem 6.10, shows that the functors $F(f, \xi, \lambda)$ satisfying the two consistency equations \eqref{eqn:RCEndomorphismConsistency1}, \eqref{eqn:RCEndomorphismConsistency2} are a complete set of representatives for $\pi_0\Aut_{\otimes}(\C_{\bb C}(A, g, \chi, \tau))$. \end{remark} \begin{lemma} \label{lem:RCFunctorClassification} We have $$\pi_0\Aut_{\otimes}\big(\C_{\bb C}(A, g, \chi, \tau)\big) \cong \Aut(A, \chi) \times K_4$$ whenever $\chi$ is real-valued. When $g$ is nontrivial, the functors $F(f, \xi, \pm 1)$ form a complete set of representatives. When $g$ is trivial, we instead take $F(f, \xi, 1)$ and $F(f, \xi, i)$ as representatives. \end{lemma} \begin{proof} We first observe the function $f$ and automorphism $\xi$ are invariants of the underlying functor. We next extract the consistency equations from \cite[35]{pss23} for a monoidal equivalence $\mu \colon F(f,\xi, \lambda) \to F(f, \xi, \lambda')$. In the notation used in \textit{loc. cit.}, our assumptions are that $\theta, \theta',\varphi, \varphi'$ are identically 1. The consistency equations thus trivialize to: \begin{align*} \mu_a&= \frac{\mu_m^a}{\mu_m} \\ \frac{\lambda'}{(\lambda')^a} &= \frac{\lambda}{\lambda^a} \\ \lambda' &= \frac{\mu_m^{ga}\mu_m}{\mu_a}\lambda \end{align*} We begin with the case when $g$ is nontrivial. In this case, the monoidal functor consistency equations \eqref{eqn:RCEndomorphismConsistency1}, \eqref{eqn:RCEndomorphismConsistency2} imply $\lambda$ is real and $f \in \Aut(A, \chi)$. Substituting the first consistency equation for $\mu$ into the third (with $a = w$) shows that $F(f, \xi, 1)$ is not monoidally isomorphic to $F(f, \xi, -1)$. When $g$ is trivial, we can set $a = b = w$ in \eqref{eqn:RCEndomorphismConsistency2} and use that $\chi(f(w), f(w)) = \chi(w,w) = 1$ (Corollary \ref{cor:RCSelfPairingis1}) to conclude $\lambda^4 = 1$. The second of the three consistency conditions implies that whether or not $\lambda$ is real is a monoidal invariant. It remains to show that the two functors $F(f, \xi, \pm 1)$ are isomorphic, and likewise for $F(f, \xi, \pm i)$. This can be achieved by setting $\mu_m = i$ and then defining $\mu_a$ according to the first consistency equation. The last equation holds since $g$ is trivial. Equation \eqref{eqn:RCEndomorphismConsistency1}, together with the restrictions on $\lambda$ now implies $f \in \Aut(A, \chi)$. \end{proof} \begin{proposition} \label{prop:RCFunctorBraided} The monoidal functor $F(f, \xi, \lambda)$ is a braided equivalence $\C_{\mathbb{C}, g}(\sigma, \epsilon) \to \C_{\mathbb{C}, g}(\sigma', \epsilon')$ if and only if $f \cdot \sigma|_{K_4^n} = \sigma'|_{K_4^n}$, and \begin{align} \sigma'(w) &= \lambda^2\sigma(w)\label{eqn:FinalRCBraidingSquare1}\\ \sigma_3'(1) &= \sigma_3(1)^\xi. \label{eqn:FinalRCBraidingSquare2} \end{align} \end{proposition} \begin{proof} The conditions for $F(f, \xi, \lambda)$ to be a braided equivalence $\C_{\mathbb{C}, g}(\sigma, \epsilon) \to \C_{\mathbb{C}, g}(\sigma', \epsilon')$ are: \begin{align} \chi\big(f(a), f(b)\big) &= \chi(a,b)^\xi \label{eqn:RCBraidingSquare1}\\ \sigma_1'\big(f(a)\big) &= \frac{\lambda^a}{\lambda}\sigma_1(a)^\xi \label{eqn:RCBraidingSquare2}\\ \sigma_2'\big(f(a)\big) &= \frac{\lambda}{\lambda^a}\sigma_2(a)^\xi \label{eqn:RCBraidingSquare3}\\ \sigma_3'\big(f(a)\big) &= \sigma_3(a)^\xi. \label{eqn:RCBraidingSquare4} \end{align} The first of these equations always holds since $f \in \Aut(A, \chi)$. Additionally, since $f$ fixes $w$, $f$ must take conjugating elements to conjugating elements. We may also assume $\lambda^4 = 1$, so that $\lambda/\lambda^a = \lambda^a/\lambda$. These facts allow the derivation of Equation \eqref{eqn:RCBraidingSquare3} from Equations \eqref{eqn:RCBraidingSquare2} and \eqref{eqn:RCBraidingSquare4}. Finally, using that $\sigma_{1}$ is real, we can drop the $\xi$ in \eqref{eqn:RCBraidingSquare2}, as well as prove that \eqref{eqn:RCBraidingSquare4} holds for all $a$ if and only if it holds at $1$, which is exactly \eqref{eqn:FinalRCBraidingSquare2}. Evaluating \eqref{eqn:RCBraidingSquare2} on elements in $A$ gives $f \cdot \sigma = \sigma'$, and evaluating at $w$ gives \eqref{eqn:FinalRCBraidingSquare1}. These conditions are indeed equivalent to \eqref{eqn:RCBraidingSquare2}, as $$\sigma_1'\big(f(aw)\big) = \sigma_1'\big(f(a)\big)\sigma_1'(w) = \frac{\lambda}{\lambda^{aw}}\sigma_1(a)\sigma_1(w) = \frac{\lambda}{\lambda^{aw}}\sigma_1(aw).$$ \end{proof} As with the rest of this section, the case when $|g|=1$ is significantly easier since the structure constants are $g$ fixed. \begin{theorem} When $n > 0$, there are exactly three equivalence classes of braidings on $\C_{\mathbb{C}}(K_4^n \rtimes \mathbb{Z}/2\mathbb{Z}, \id, \chi, \tau)$. When $n = 0$ and $\tau < 0$, there is a unique equivalence class, and when $n = 0$ and $\tau > 0$, there are precisely two. These braidings are distinguished as follows: \begin{itemize} \item The braidings $\C_\mathbb{C, \id}(\sigma, \epsilon)$ are all equivalent if $\sgn(\sigma) = -\sgn(\tau)$. \item If $\sgn(\sigma) = \sgn(\tau)$, then there are exactly two equivalence classes of braidings, distinguished by $\epsilon$. \end{itemize} \end{theorem} \begin{proof} First, observe that only one of the two distinguished cases can occur when $n = 0$. We begin with the first case. Suppose we are given $\C_\mathbb{C, \id}(\sigma, \epsilon)$ and $\C_\mathbb{C, \id}(\sigma', \epsilon)$ with $\sgn(\sigma) =\sgn(\sigma') = -\sgn(\tau)$. In this case $\sigma_3(1)$ and $\sigma_3'(1)$ are square roots of negative reals, and are thus purely imaginary. So, we can choose an $\xi \in \Gal(\mathbb{C}/\mathbb{R})$ such that $\sigma_3(1)^\xi = \sigma_3'(1)$. Moreover, we can also find a 4th root of unity $\lambda$ such that $\lambda^2\sigma(w) = \sigma'(w)$. Finally, since the restrictions of $\sigma$ and $\sigma'$ to $K_4^n$, have the same sign, they are orbit equivalent and thus there exists an $f \in \Aut(K_4^n, \chi|_{K_4^n})$ with $f \cdot \sigma = \sigma'$ on $K_4^n$. By Lemma \ref{lem:CanonicalW}, $f$ has a unique extension (also denoted $f$) to $\Aut(A, \chi)$. Then $F(f, h, \lambda)$ is a braided equivalence $\C_\mathbb{C, \id}(\sigma, \epsilon) \to \C_\mathbb{C, \id}(\sigma', \epsilon')$ by Proposition \ref{prop:RCFunctorBraided}. In the second case, the value $\sigma_3(1)$ is real and thus fixed by all braided functors, and thus $\epsilon$ is a braided invariant. It remains to show that the value of $\sigma(w)$ can be changed. We choose $\lambda$ with $\lambda^2\sigma(w) = \sigma'(w)$, and $f$ satisfying $f \cdot \sigma = \sigma'$ on $K_4^n$, extend $f$ to $A$, and deduce that $F(f, h, \lambda)$ is the desired equivalence using Proposition \ref{prop:RCFunctorBraided}. \end{proof} If we let $(\sigma, \epsilon) = (\sigma', \epsilon')$ in Proposition \ref{prop:RCFunctorBraided}, we conclude: \begin{corollary} Suppose $\sgn(\sigma) = -\sgn(\tau)$. Then $$\pi_0\Aut_{\text{br}}\big(\C_\mathbb{C, \id}(\sigma, \epsilon)\big) \cong H_{\sgn(\sigma)}.$$ If $\sgn(\sigma) = \sgn(\tau)$, then $$\pi_0\Aut_{\text{br}}\big(\C_\mathbb{C, \id}(\sigma, \epsilon)\big) \cong H_{\sgn(\sigma)}\times \mathbb{Z}/2\mathbb{Z}.$$ \end{corollary} \begin{theorem} When $n \geq 0$, there are exactly four equivalence classes of braidings on $\C_{\mathbb{C}}(K_4^n \rtimes \mathbb{Z}/2\mathbb{Z}, \bar \cdot, \chi, \tau)$. When $n = 0$, there are two. Two braidings $\C_{\mathbb{C}, \bar \cdot}(\sigma, \epsilon)$ and $\C_{\mathbb{C}, \bar \cdot}(\sigma', \epsilon')$ are equivalent if and only if $\sgn(\sigma) = \sgn(\sigma')$ and $\epsilon = \epsilon'$. \end{theorem} \begin{proof} The ``only if'' direction follows from Proposition \ref{prop:RCFunctorBraided}, noting that in this case all $F(f, \xi, \lambda)$ have $\lambda^2 = 1$, and moreover that $\sigma_3(1)$ is real and so $\epsilon$ is fixed. Note that in this case the value $\sigma(w)$ is determined by the sign of $\sigma$ (restricted to $K_4^n)$ and so is automatically preserved. The functor required for the converse can be constructed from any $f$ such that $f \cdot \sigma = \sigma'$ as the monoidal functor $F(f, \id, 1)$, again by Proposition \ref{prop:RCFunctorBraided}. \end{proof} Again choosing $(\sigma, \epsilon) = (\sigma', \epsilon')$ in Proposition \ref{prop:RCFunctorBraided}: \begin{corollary} $$\pi_0\Aut_{\text{br}}\big(\C_{\mathbb{C}, \bar \cdot}(\sigma, \epsilon)\big) \cong H_{\sgn(\sigma)} \times K_4$$ \end{corollary} \begin{lemma} There are exactly two families of twist morphisms for any $\C_{\mathbb{C}, \bar \cdot}(\sigma, \epsilon)$, corresponding to a sign $\rho \in \{\pm 1\}$. These twists are indeed ribbon structures (in the sense of \cite[Definition 8.10.1]{EGNO15}). \end{lemma} \begin{align} &\sigma_0(x,y)\sigma_0(x,z)=\sigma_0(x,yz) \label{RCHexagon1} \\ &\sigma_1(x)\sigma_0(x,y)=\chi(y,x)\sigma_1(x)^y \label{RCHexagon2} \\ &\sigma_0(x,y)\sigma_1(x)=\sigma_1(x)^y\chi(x,y) \label{RCHexagon3} \\ &\sigma_2(y)\chi(x,y)\sigma_2(x)=\sigma_2(xy) \label{RCHexagon4} \\ &\chi(x,y)^y\sigma_1(x)^{gxy}\sigma_1(x)=\sigma_0(x,xy) \label{RCHexagon5} \\ &\sigma_2(x)^{gxy}\sigma_3(xy)=\sigma_3(y)^x\chi(x,y)^y \label{RCHexagon6} \\ &\sigma_3(xy)\sigma_2(x)^{gxy} =\sigma_3(y)^x\chi(x,y)^{gx} \label{RCHexagon7} \\ &\chi(x,y)^{-g}\sigma_3(x)^y\sigma_3(y)^x =2\tau\sum_{|z|=|gxy|}\chi(x,z)^{-g} \chi(z,y)^{-g}\sigma_2(z)^z \label{RCHexagon8} \end{align} \begin{align} &\sigma_0(xy,z)=\sigma_0(x,z)\sigma_0(y,z) \label{RCHexagon9}\\ &\sigma_1(xy)=\sigma_1(x)\sigma_1(y)\chi(x,y)^{-1} \label{RCHexagon10}\\ &\sigma_2(y)^x\chi(x,y)^{-1}=\sigma_0(x,y)\sigma_2(y) \label{RCHexagon11}\\ &\sigma_2(y)^x\chi(y,x)^{-1}=\sigma_2(y)\sigma_0(x,y) \label{RCHexagon12}\\ &\sigma_3(y)\chi(x,y)^{-gx}=\sigma_1(x)\sigma_3(xy) \label{RCHexagon13}\\ &\sigma_3(y)\chi(x,y)^{-y}=\sigma_1(x)\sigma_{3}(xy) \label{RCHexagon14}\\ &\sigma_0(xy,x)=\sigma_2(x)^{gxy}\chi(x,y)^{-y}\sigma_2(x) \label{RCHexagon15}\\ &\sigma_3(x)\sigma_3(y)\chi(x,y)^{xy}=2\tau\sum_{|z|=|gxy|}\chi(x,z)^{gz}\chi(z,y)^{gz}\sigma_1(z) \label{RCHexagon16} \end{align} \section{Braidings on Split Complex Tambara-Yamagami Categories} In this section, we use the results of sections \ref{sec:QFAnalysis} and \ref{sec:SplitReal} to determine the number of braidings on split complex Tambara-Yamagami categories. While the classification in terms of equivalence classes of quadratic forms was determined by Galindo (\cite{GALINDO_2022}) already, the precise number of equivalence classes was not. Moreover, most previous computations were done in the case when the rank of the underlying group is small. We show here there there are fewer equivalence classes of Tambara-Yamagami categories in these cases than in general. This process does not require any new computations. We begin by recalling the discussion of \cite[\S2.5]{SchopierayNonDegenExtension}, which computes the number of equivalence classes of split complex Tambara-Yamagami categories with underlying group of rank $\leq 2$. Let $\ell$ be the nontrivial bicharacter on $\mathbb{Z}/2\mathbb{Z}$. There are two quadratic forms with coboundary $\ell$; these are inequivalent. Moreover, there are exactly three equivalence classes of quadratic forms on $K_4$ inducing $\ell^{2}$. Now let $\C_\mathbb{C}((\mathbb{Z}/2\mathbb{Z})^n, \chi, \tau)$ be a split complex Tambara-Yamagami category. Due to the fact that $\chi$ is symmetric, we can use the results of Wall \cite[\S5]{wall63} to deduce that if $n$ is even, there are exactly two choices for $\chi$ and if $n$ is odd there is exactly one. Indeed, when $n > 0$ is even, the representatives are $h^{ n/2}$ and $h^{(n - 2)/2} \oplus \ell ^{ 2}$. When $n$ is odd, the representative is $h^{ (n-1)/2} \oplus \ell$. The following theorem both relies on, and strengthens the results of Galindo (\cite{GALINDO_2022}). \begin{theorem} Let $\C_\mathbb{C}((\mathbb{Z}/2\mathbb{Z})^n, \chi, \tau)$ be a split complex Tambara-Yamagami category ($\chi$ and $\tau$ are fixed). Then \begin{itemize} \item If $n > 0$ is even and $\chi \cong h^{ n/2}$, there are exactly four equivalence classes of braidings on $\C_\mathbb{C}((\mathbb{Z}/2\mathbb{Z})^n, \chi, \tau)$. When $n = 0$, there are two. These are classified precisely by a free choice of a quadratic form $\sigma$ inducing $\chi$, together with a sign $\epsilon$. The formulas for the braidings are identical to Definition \ref{defn:ExplicitSplitRealBraidings}. These categories are symmetric if and only if they are defined over the reals, which occurs precisely when $\sgn(\sigma) = \sgn(\tau)$. Moreover, in this case $$\pi_0\Aut_{\text{br}}\Big(\C_\mathbb{C}\big((\mathbb{Z}/2\mathbb{Z})^n, \chi, \tau, \sigma, \epsilon\big)\Big) \cong H_{\sgn \sigma}^{n / 2}.$$ \item If $n \geq 4$ is even and $\chi \cong h^{(n - 2)/2} \oplus \ell^{ 2}$, there are exactly eight equivalence classes of braidings on $\C_\mathbb{C}((\mathbb{Z}/2\mathbb{Z})^n, \chi, \tau)$. When $n = 2$, there are six. These are classified precisely by a free choice of a quadratic form $\zeta$ inducing $ h^{(n - 2)/2} \oplus \ell^{ 2}$, together with a sign $\epsilon$. These categories are never symmetric and are never defined over the reals. In this case, $$\pi_0\Aut_{\text{br}}\big(\C_\mathbb{C}((\mathbb{Z}/2\mathbb{Z})^n, \chi, \tau, \zeta, \epsilon)\big) \cong \text{Stab}_{\Aut((\mathbb{Z}/2\mathbb{Z})^n, \chi)}(\zeta).$$ \item If $n \geq 3$ is odd and $\chi \cong h^{ (n-1)/2} \oplus \ell$, there are exactly eight equivalence classes of braidings on $\C_\mathbb{C}((\mathbb{Z}/2\mathbb{Z})^n, \chi, \tau)$. If $n = 1$, then there are four. These are classified precisely by a free choice of a quadratic form $\sigma$ inducing $ h^{(n - 2)/2}$, a quadratic form $\nu$ inducing $\ell$, and a sign $\epsilon$. These categories are never symmetric and are never defined over the reals. In this case $$\pi_0(\Aut_{\text{br}}(\C_\mathbb{C}((\mathbb{Z}/2\mathbb{Z})^n, \chi, \tau, \sigma,\nu ,\epsilon))) \cong H_{\sgn \sigma}^{(n - 1)/2}.$$ \end{itemize} \end{theorem} \begin{corollary} A split complex braided Tambara-Yamagami category is symmetric if and only if it is defined over the reals. \end{corollary} \begin{proof} By \cite[Theorem 4.9]{GALINDO_2022}, we are reduced to calculating the number of orbits of quadratic forms inducing the three possible bicharacters, together with their stabilizers. We have already done this for $\chi = h^{ n}$ in Proposition \ref{prop:OrbitEquivalenceCharacterization} which gives most of the claims in this case. Indeed if $\chi = h^{ n}$ , the braiding coefficients $\sigma_1$ and $\sigma_2$ are always real. Thus, the braiding is symmetric if and only if the function $\sigma_3(x) = \sigma_3(1)\sigma_1(x)$ is pointwise a sign. This occurs exactly when $\sigma_3(1)$ is real (so that the braiding is defined over the reals), which is again equivalent to $\sgn(\sigma) = \sgn(\tau)$. We tackle the case when $n$ is odd next. It is not too hard to see that extension by the identity of $\mathbb{Z}/2\mathbb{Z}$ gives an isomorphism $$ \Aut(K_4^{(n - 1)/2}, h^{ {(n - 1)/2}}) \cong \Aut(K_4^{(n - 1)/2} \times \mathbb{Z}/2\mathbb{Z}, h^{ {(n - 1)/2}} \oplus \ell).$$ In particular, the quadratic forms inducing $ h^{ {(n - 1)/2}} \oplus \ell$ decompose as products of quadratic forms on $K_4^{(n - 1)/2}$ and $\mathbb{Z}/2\mathbb{Z}$ inducing $h^{ {(n - 1)/2}}$ and $\ell$ respectively, and this decomposition is respected by $\Aut(K_4^{(n - 1)/2} \times \mathbb{Z}/2\mathbb{Z}, h^{ {(n - 1)/2}} \oplus \ell).$ This implies the results in the odd case, noting that any quadratic form inducing $\ell$ is complex valued and therefore not pointwise self-inverse. The last case is when the multiplicity of $\ell$ in $\chi$ is 2. This case follows from Proposition \ref{prop:StabilizerCombinatorics2ElectricBoogaloo} and the arguments above. To conclude the statements about the groups of braided autoequivalences, observe that Proposition \ref{prop:RealFunctorBraided} remains valid over the complex numbers, and all endofunctors of the split Tambara-Yamagami categories in question are still of the form $F(f)$. When the multiplicity of $\ell$ in $\chi$ is 2, the sign of $\sigma$ is not (in general) well defined and so we choose not to pursue a better description of its stabilizer. \end{proof} \section{\texorpdfstring{$G$-}{G-}Crossed Braidings on Complex/Complex Tambara-Yamagami Categories }\label{sec:CrossedBraided} In this section we analyze possible braidings in the complex/complex case, where the endomorphism algebra of every simple object is isomorphic to the complex numbers. The argument at the beginning of section 4 of \cite{pss23} shows that we need only focus on the case when $m$ is the only Galois nontrivial simple object, otherwise the classification theorems in the previous section remain valid (as the category under consideration is in fact fusion over the complex numbers). The following lemma is initially disheartening: \begin{lemma}\label{lem:noComplexComplexBraidings} There are no braidings on any complex/complex Tambara-Yamagami category where $m$ is the only Galois nontrivial simple object. \end{lemma} \begin{proof} Let $a$ be a Galois trivial simple object (such as the monoidal unit). By naturality of the braiding and Galois nontriviality of $m$, we have $$ic_{a, m} = \begin{tikzineqn} \AMBraidCrossing \node[smallbead] at (-.5, -.5) {$i$}; \end{tikzineqn} = \begin{tikzineqn} \AMBraidCrossing \node[smallbead] at (.5, .5) {$i$}; \end{tikzineqn} = \begin{tikzineqn} \AMBraidCrossing \node[smallbead] at (0, .75) {$i$}; \end{tikzineqn} = \begin{tikzineqn} \AMBraidCrossing \node[smallbead] at (-1, 0) {$\bar i$}; \end{tikzineqn} =-ic_{a,m} $$ which proves that the braiding is zero, a contradiction. \end{proof} In light of this lemma, we expand our focus to $G$-crossed braidings. $G$-crossed braidings are generalizations of braidings (indeed, a $G$-crossed braiding for $G$ trivial is a braiding) which play an important role in extension theory (\cite{MR2677836}) and also appear in physics \cite{MR1923177,1410.4540}. $\mathbb{Z}/2\mathbb{Z}$-crossed braidings on the split complex Tambara-Yamagami categories were studied in \cite{EDIEMICHELL2022108364, GALINDO_2022}. The former article used techniques similar to the ones we employ here, whereas the latter article primarily leveraged extension theory. We begin with \cite[Definition 8.24.1]{EGNO15}: \begin{definition} \label{defn:CrossedBraidedCategory} A braided $G$-crossed fusion category is a fusion category $\C$ equipped with the following structures: \begin{enumerate} \item a (not necessarily faithful) grading $\C = \bigoplus_{g \in G}\C_g$, \item an action $(T_\bullet, \gamma) \colon G \to \Aut_\otimes(\C)$ such that $T_g(\C_h) \subset \C_{gh g^{-1}}$, \item a natural collection of isomorphisms, called the $G$-braiding: $$c_{a,b} \colon a \otimes b \simeq T_g(b) \otimes a, \quad \quad a \in \C_g, g \in G, \text{ and } b \in \C.$$ \end{enumerate} Let $\mu_g$ be the tensor structure of the monoidal functor $T_g$. Then the above structures are required to satisfy the following three axioms. \input{sections/G-crossed-coherence-diagrams} \end{definition} The first axiom gives the compatibility between $T_g(c_{x,y})$ and $c_{T_gx, T_gy}$. The latter two axioms generalize the familiar hexagon axioms by adding an additional coherence, but have the same graphical interpretation. Note that if we assume the $G$-grading on $\C_{\overline{\mathbb{C}}}(A, \chi)$ is faithful, then it can be proved immediately from the fusion rules that $G\leq \mathbb{Z}/2\mathbb{Z}$, and there is a unique grading when $G = \mathbb{Z}/2\mathbb{Z}$. A $G$-crossed braiding for $G$ trivial is equivalent to a braiding. Since $\C_{\overline{\mathbb{C}}}(A, \chi)$ does not admit a braiding by Lemma \ref{lem:noComplexComplexBraidings}, when classifying faithfully graded $G$-crossed braidings on $\C_{\overline{\mathbb{C}}}(A, \chi)$ we may assume $G$ is $\mathbb{Z}/2\mathbb{Z}$ and the grading $(1)$ in Definition \ref{defn:CrossedBraidedCategory} is the Galois grading. Without loss of generality, we further assume $\C_{\overline{\mathbb{C}}}(A, \chi)$ is \textit{skeletal}, i.e isomorphism classes are all singletons and the scaling coefficient $\tau$ is positive. Before seriously involving the braiding coherences, we will constrain possible actions. We first analyze $T_1$ using Theorem 7.1 of \cite{pss23}. \begin{proposition}\label{FactsAboutTheFunctor} The functor $T_1$: \begin{itemize} \item Coincides on invertible simple objects with some order 2 group automorphism $f$ of $A$, and fixes $m$. \item For a simple object $X$, the map \[\epsilon_X \colon \mathbb{C} \to \End(X) \to \End(T_1 X) \to \mathbb{C}\] is always either the identity or complex conjugation, and is the same for every simple. We write $\epsilon$ for this map. \item Satisfies \[\chi(f(a), f(b)) = \epsilon(\chi(a,b))\] \end{itemize} \end{proposition} \begin{definition} An endofunctor of $\C_{\overline{\mathbb{C}}}(A, \chi)$ is called \textit{conjugating} if $\epsilon$ is conjugation. \end{definition} \begin{lemma}\label{lem:TMustConjugate} If $T_\bullet$ underlies a $\mathbb{Z}/2\mathbb{Z}$-crossed braiding, then $T_1$ is conjugating. \end{lemma} \begin{proof} This proof follows the same reasoning as Lemma \ref{lem:noComplexComplexBraidings}. Let $a$ be a Galois trivial simple object (such as the monoidal unit). By naturality of the braiding and Galois nontriviality of $m$, we have $$\epsilon(i)c_{m,a} = \begin{tikzineqn} \MABraidCrossing \node[longbead] at (-.5, .5) {$T(i)$}; \end{tikzineqn} = \begin{tikzineqn} \MABraidCrossing \node[smallbead] at (.5, -.5) {$i$}; \end{tikzineqn} = \begin{tikzineqn} \MABraidCrossing \node[smallbead] at (0, -.75) {$i$}; \end{tikzineqn} = \begin{tikzineqn} \MABraidCrossing \node[smallbead] at (-1, 0) {$\bar i$}; \end{tikzineqn} =\bar i c_{m,a}. $$ Therefore $\epsilon(i) = \bar{i}$. \end{proof} We are thus justified in thinking of $T$ as the Galois action of $\mathbb{Z}/2\mathbb{Z}$ on $\C_{\overline{\mathbb{C}}}(A, \chi)$, twisted by some automorphism of $A$. This automorphism is in fact trivial: \begin{proposition}\label{prop:TFixesAllObjects} Let $\C_{\overline{\mathbb{C}}}(A, \chi)$ be a complex/complex Tambara-Yamagami category. Suppose $\C_{\overline{\mathbb{C}}}(A, \chi)$ admits a conjugating monoidal endofunctor $(T, J)$ whose underlying group homomorphism $f$ is an involution. Then: \begin{itemize} \item $T$ fixes all objects (i.e $f$ is the identity), \item $\chi$ is real valued, \item and $A \cong (\mathbb{Z}/2\mathbb{Z})^n$. \end{itemize} \end{proposition} \begin{proof} We begin by examining the hexagon axiom for $T$, at $a,m,c$ where $a$ and $c$ are invertible. The diagram is (using $Tm = m$): \begin{equation}\label{AMCHexagon} \begin{tikzcd}[ampersand replacement=\&,column sep=3.0em] {(T(a) \otimes m) \otimes T(c)} \&\& {T(a) \otimes (m\otimes T(c))} \\ {T(a \otimes m) \otimes T(c)} \&\& {T(a) \otimes T(m\otimes c)} \\ {T((a \otimes m) \otimes c)} \&\& {T(a \otimes (m \otimes c))} \arrow["{\chi(T(a),T(c)) \cdot \id_m}", from=1-1, to=1-3] \arrow["{J_{a,m} \otimes 1}"', from=1-1, to=2-1] \arrow["{1 \otimes J_{m,c}}", from=1-3, to=2-3] \arrow["{J_{a \otimes m, c}}"', from=2-1, to=3-1] \arrow["{J_{a, m \otimes c}}", from=2-3, to=3-3] \arrow["{\overline{\chi(a,c)} \cdot \id_m}"', from=3-1, to=3-3] \end{tikzcd} \end{equation} Since $a$ is Galois trivial and $a \otimes m = m = m\otimes c$, the vertical legs of the diagram are multiplication by the same scalar in $\End(m)$, and so \begin{equation} \label{eq:AMCHexagonConsequence} \chi(T(a), T(c)) = \overline{\chi(a,c)} \end{equation} We then consider two cases to show that $T$ acts by inversion, i.e $cT(c) = 1$ for all $c$. \begin{itemize} \item Suppose $T$ has a nontrivial fixed point $a$. Then for all $c$, we have $$1 = \chi(a,c)\chi(a, T(c)) = \chi(a, cT(c))$$ Since $a$ is not the identity, non-degeneracy of $\chi$ gives $cT(c) = 1$. \item Suppose $T$ has no nontrivial fixed points, and let $c \in A$. Then $T(cT(c)) = cT(c)$ since $T$ is an involution and $A$ is abelian. Since $cT(c)$ is fixed, it must be the identity. \end{itemize} Since $\chi$ is a skew-symmetric bicharacter, we can use equation \eqref{eq:AMCHexagonConsequence} to manipulate $$\chi(a, c) = \chi(a^{-1}, c^{-1}) = \chi(T(a), T(c)) = \overline{\chi(a,c)} = \chi(c, a).$$ Thus $\chi$ is symmetric, skew symmetric, and real valued. Consequently $A \cong (\mathbb{Z}/2\mathbb{Z})^n$ by non-degeneracy and we conclude $T$ fixes all objects. \end{proof} \begin{lemma} \label{lem:FunctorClassification} Let $\chi$ be a real valued, nondegenerate bicharacter on $A$. Then isomorphism classes of monoidal autoequivalences of $\C_{\overline{\mathbb{C}}}(A, \chi)$ are determined by \begin{itemize} \item An element $f$ of $\Aut(A, \chi)$, \item An element $\xi$ of $\Gal(\mathbb{C}/\mathbb{R})$, \item A sign $\kappa \in \{\pm 1\}$. \end{itemize} As a consequence, $$\pi_0\Aut_\otimes\big(\C_{\overline{\mathbb{C}}}(A, \chi)\big) \cong \Aut(A, \chi) \times K_4.$$ \end{lemma} \begin{proof} We begin by constructing some chosen representatives of each equivalence class. Given $(f, \xi, \kappa)$ as above, let $F(f, \xi, \kappa)$ be the monoidal functor which \begin{itemize} \item fixes $m$, and acts on grouplikes by $f$, \item applies $\xi$ on endomorphism algebras of simple objects, \item has $J_{a,b}, J_{a,m},$ and $J_{m,a}$ the appropriate identity morphism, \item has $J_{m,m} = \kappa \cdot \id_{m \otimes m}$. \end{itemize} It is clear that $F(f, \xi, \kappa)$ is a monoidal functor and that $$F(f, \xi, \kappa) \circ F(f', \xi', \kappa') = F(f \circ f', \xi\xi', \kappa\kappa').$$ That every monoidal autoequivalence of $\C_{\overline{\mathbb{C}}}(A, \chi)$ is monoidally isomorphic to some $F(f, \xi, \kappa)$ follows from the statement and proof of Theorem 7.1 in \cite{pss23}. Finally, we must show that if $F(f, \xi, \kappa)$ is monoidally isomorphic to $F(f', \xi', \kappa')$ then $f = f', \xi = \xi'$ and $\kappa= \kappa'$. That $f = f'$ and $\xi = \xi'$ is clear from the underlying natural isomorphism of plain functors, and that $\kappa = \kappa'$ follows from the monoidality axiom at $(m,m)$. \end{proof} We now turn to classifying the braiding. As in the analysis in the un-crossed case, we will employ a fixed set of normal bases and the Yoneda embedding to produce equations. By Lemma \ref{lem:TMustConjugate} and Proposition \ref{prop:TFixesAllObjects} we may assume $T = F(\id, \bar \cdot , \kappa)$. Without loss of generality we may further assume that $\gamma_{0,0}, \gamma_{1,0}$ and $\gamma_{0,1}$ have identity components. We denote $\gamma \coloneqq \gamma_{1,1}$. Since $T$ fixes objects we may define as before the $\mathbb{C}^\times$ valued functions: \begin{align*} (c_{a,b}^{*})_{ab}([b,a]) &:= \sigma_{0}(a,b) [a,b] \\ (c_{a,m}^{*})_{m}([m,a]) &:= \sigma_{1}(a) [a,m] \\ (c_{m,a}^{*})_{m}([a,m]) &:= \sigma_{2}(a) [m,a] \\ (c_{m,m}^{*})_{a}([a]) &:= \sigma_{3}(a) [a] \end{align*} We begin the analysis with the braiding compatibility hexagon \eqref{eqn:BraidedHexagon}. When $g = 1$, the constraints are trivial as $T_0$ is the identity monoidal functor and the natural transformations $\gamma_{1,-}$ and $\gamma_{-,1}$ have identity components. When $g = \xi$ we obtain that the $\sigma_i$ must all be real functions. We now examine the heptagon equations. The eight unsimplified families of equations arising from the constraint \eqref{eqn:Heptagon} are (using that the $\sigma_i$ are real to omit conjugations): \begin{align} \sigma_{0}(a, bc) &= \sigma_{0}(a, b)\sigma_{0}(a, c) \label{eqn:ForwardHeptagonEquation1} \\ \sigma_{0}(a, b) \sigma_{1}(a) &= \chi(b,a)\sigma_{1}(a) \label{eqn:ForwardHeptagonEquation2}\\ \chi(a, b)\sigma_{1}(a) &= \sigma_{1}(a)\sigma_{0}(a, b) \label{eqn:ForwardHeptagonEquation3} \\ \sigma_{0}(a, a^{-1}b) &= \chi(b, a)^{-1}\sigma_{1}(a)\sigma_{1}(a) \label{eqn:ForwardHeptagonEquation4}\\ \sigma_{2}(ab) &= \chi(a,b)\sigma_{2}(a)\sigma_{2}(b) \label{eqn:ForwardHeptagonEquation5}\\ \chi(b, a)^{-1}\sigma_{3}(b) &= \sigma_2(a)\sigma_{3}(a^{-1}b) \label{eqn:ForwardHeptagonEquation6} \\ \chi(ba, a)^{-1}\sigma_{3}(ab) &= \sigma_3(b)\sigma_{2}(a) \label{eqn:ForwardHeptagonEquation7}\\ \chi(a,b)\sigma_{3}(a)\sigma_{3}(b) &= \tau \kappa \sum_{c \in A}\chi(c,b)\chi(a,c)\sigma_2(c) \label{eqn:ForwardHeptagonEquation8} \end{align} The first four equations correspond to $g = 1$ and the last four to $g = \xi$. Next we have the sets of equations arising from the final heptagon axiom $\eqref{eqn:InverseHeptagon}$: \begin{align} \sigma_{0}(bc,a)^{-1}\sigma_0(c,a)\sigma_{0}(b,a) &= 1 \label{eqn:BackwardHeptagonEquation1}\\ \chi(a,b)^{-1}\sigma_{1}(ab)^{-1}\sigma_1(b)\sigma_{1}(a) &= 1 \label{eqn:BackwardHeptagonEquation2}\\ \chi(b,a)^{-1}\sigma_{2}(a)^{-1}\sigma_{2}(a)\sigma_{0}(b,a) &=1\label{eqn:BackwardHeptagonEquation3} \\ \sigma_{3}(b)^{-1}\sigma_{3}(a^{-1}b)\sigma_{1}(a) &= \chi(b,a)\label{eqn:BackwardHeptagonEquation4} \\ \sigma_{2}(a){\sigma_{0}(b,a)}\sigma_{2}(a)^{-1} &= \chi(a,b)^{-1} \label{eqn:BackwardHeptagonEquation5}\\ \chi(a,b)^{-1}\sigma_{3}(a)^{-1}{\sigma_1(b)} \sigma_{3}(ab^{-1}) &= 1\label{eqn:BackwardHeptagonEquation6} \\ \gamma_a \sigma_{0}(b,a)^{-1}{\sigma_2(a)}\chi(ba, a)\sigma_2(a) &= 1\label{eqn:BackwardHeptagonEquation7} \\ \tau \gamma_m \sigma_3(b)\sum_{c \in A} \chi(a, c)\chi(c, b){\sigma_{3}(c)}\sigma_1(a)^{-1} &= \chi(a,b) \label{eqn:BackwardHeptagonEquation8}\end{align} The first pair arise from $g = h =1 $, the second and third pairs are from $g = 1, h = \xi$ and $ g = \xi, h = 1$ respectively, and the final two are $g = h = \xi$. There are two families of constraints left. First, $\gamma$ must be monoidal, which is equivalent to: \begin{align} \gamma_a &=1 \label{gammaMonoidalEqn1}\\ |\gamma_m|^2 &= 1. \label{gammaMonoidalEqn4} \end{align} Next, $\gamma = \gamma_{1,1}$ must satisfy the hexagon axiom together with $\gamma_{0,0}, \gamma_{1,0}$ so that $T_\bullet$ is a monoidal functor. The constraint is trivially satisfied except at $(1,1,1)$ where the requisite equality is: $$(\gamma_{1,1})_{Tx} = T((\gamma_{1,1})_x).$$ Since $T = F(\id, \bar{\cdot}, \kappa)$ fixes objects, we see $\gamma_{i,j}$ satisfies the hexagon axiom if and only if $\gamma$ is pointwise real valued. \begin{remark} Since $\chi$ is real valued, $\chi(a,b) = \chi(a,b)^{-1}$, and the expressions for the associator in the complex / complex case are equivalent to those originally studied by Tambara and Yamagami. As a consequence, the forward (backward) heptagon equations are very similar to the forward (backward) hexagon equations of Siehler. In particular, they are the same after omitting any occurences of the symbols $\gamma$ and $\rho$. \end{remark} As a consequence, the algebraic reduction step is only a slight modification to those in the previous sections. \begin{lemma} \label{lem:KCrossedAlgebraicReduction} The following eight equations, together with the assertions that $\kappa^2 = 1$ and $\sigma_3(1)$ is real, are algebraically equivalent to the unsimplified heptagon equations along with the monoidality and coherence equations for $\gamma$: \begin{align} \sigma_{0}(a,b) &= \chi(a,b) = \chi(b,a) \label{eqn:ReducedCrossedBraiding1}\\ \sigma_{1}(ab) &= \chi(a,b)\sigma_1(a)\sigma_1(b) \label{eqn:ReducedCrossedBraiding2}\\ \sigma_1(a)^2 &= \chi(a,a) = 1 \label{eqn:ReducedCrossedBraiding3}\\ \sigma_{3}(a) &= \sigma_3(1)\sigma_1(a) \\ \sigma_1(a) &= \sigma_2(a) \\ \gamma_a &= 1 \\ \gamma_m &= \kappa \\ \kappa\sigma_{3}(1)^2 &= \tau \sum_{a \in G}\sigma_{1}(a) \label{eqn:ReducedCrossedBraiding11}, \end{align} \end{lemma} We are now in a position to prove the first theorem of this section. \begin{theorem} \label{thm:ComplexComplexClassificationWithProof} The complex/complex Tambara-Yamagami categories $\C_{\overline{\mathbb{C}}}(A, \chi)$ admit faithfully graded $G$-crossed braidings only if $G \cong \mathbb{Z}/2\mathbb{Z}$ and $(A, \chi) \cong (K_4^n, h^{n})$. With our standing assumptions on the monoidal functor $T_\bullet$ and natural transformations $\gamma_{i,j}$, $\mathbb{Z}/2\mathbb{Z}$-crossed braidings are in bijection with pairs $(\sigma, \epsilon) \in \QF(\chi) \times \{\pm 1\}$. \end{theorem} \begin{proof} The first statement follows immediately from the previous results in this section, Theorem \ref{thm:WallClassification}, and equation \eqref{eqn:ReducedCrossedBraiding3}. As in the previous sections, $\sigma$ corresponds to $\sigma_1$ and $\epsilon$ to the choice of square root needed to define $\sigma_3(1)$. The new data is the tensorator $\kappa$ of $T$, but equation \eqref{eqn:ReducedCrossedBraiding11} shows $\kappa = \sgn(\sigma)$ since $\sigma_3(1)$ is real. \end{proof} \begin{remark} In the previous classifications, the space of braidings up to bijection was identified and was discrete. In this case, the data of the monoidal functor $T$ means the space of $G$-crossed braidings (up to bijection) has nontrivial topology despite being homotopy equivalent to a discrete space. Our strictification assumptions essentially perform the referenced homotopy, allowing us to give a bijection from the resulting space. \end{remark} \begin{notation} Given a pair $(\sigma, \epsilon) \in \QF(\chi) \times \{\pm 1\}$, we denote the resulting $\mathbb{Z}/2\mathbb{Z}$-crossed category by $\C(\sigma, \epsilon)$. We will still refer to the monoidal functor $T$ and the natural transformation $\gamma$ with the understanding that their data is determined by the pair $(\sigma, \epsilon)$. \end{notation} We now turn to the question of when two $\mathbb{Z}/2\mathbb{Z}$-crossed braidings on $\C_{\overline{\mathbb{C}}}(K_4^n, h^{ n})$ are equivalent. We begin with the definition of a $G$-crossed braided equivalence from \cite[6, 16]{GALINDO2017118} specialized to our case. A $\mathbb{Z}/2\mathbb{Z}$-crossed braided equivalence $\C(\sigma, \epsilon) \to \C(\sigma', \epsilon')$ consists of: \begin{itemize} \item A monoidal autoequivalence $F := F(f, \xi, \kappa)$ of $\C_{\overline{\mathbb{C}}}(K_4^n, h^{ n})$ with its distinguished identity morphism $\eta^0$. \item A monoidal natural transformation $\eta \colon T'F \to FT$ such that the diagrams (3.4) and (5.4) of \cite{GALINDO2017118} commute. \end{itemize} Note that our conventions for the direction of $\gamma$ are different than that of \cite{GALINDO2017118}. Simplyifing the referenced commutative diagrams, the constraints on $\eta_x$ reduce to \begin{align} \eta_a &= 1\label{eqn:EtaConsistency1} \\ \kappa'&= \kappa \label{eqn:EtaConsistency2} \\ |\eta_m|^2 &= 1 \label{eqn:EtaMonoidality4} \\ \chi\big(f(a), f(b)\big) &= \chi(a,b)\label{eqn:BraidedPentagon1} \\ \sigma_1'\big(f(a)\big) &= \sigma_1(a) \label{eqn:BraidedPentagon2} \\ \sigma_3'\big(f(a)\big)\eta_m &= \sigma_{3}(a)\label{eqn:BraidedPentagon4} \end{align} We have used that the structure constants $\sigma_1, \sigma_2, \sigma_3(a)$ are real so that the action of $\xi$ does not appear. Algebraically reducing these equations, we observe: \begin{corollary} \label{cor:KCrossedFunctorIsBraided} \leavevmode \begin{enumerate} \item A pair $(F(f, \xi, \kappa), \eta)$ is a $\mathbb{Z}/2\mathbb{Z}$-crossed braided equivalence $\C(\sigma, \epsilon) \to \C(\sigma', \epsilon')$ if and only if $f \cdot \sigma = \sigma'$, $\eta_a = 1$ and $\eta_m = \epsilon\epsilon'$. \item If $(F(f, \xi, \kappa), \eta)$ and $(F(f', \xi', \kappa'), \eta')$ are two equivalences $\C(\sigma, \epsilon) \to \C(\sigma', \epsilon')$, then $\eta_x = \eta'_x$ for all $x$. \item If $(F(f, \xi, \kappa), \eta)$ satisfies the consistency equations, then so does $(F(f, \xi', \kappa'), \eta)$. This notation is slightly abusive since the two natural transformations labeled $\eta$ have different (co)domains; we mean they have the same components. \end{enumerate} \end{corollary} \begin{theorem} \label{thm:ComplexComplexEquivalenceClassification} The $\mathbb{Z}/2\mathbb{Z}$-crossed braided categories $\C(\sigma, \epsilon)$ and $\C(\sigma', \epsilon')$ are equivalent if and only if $\sgn(\sigma) = \sgn(\sigma')$. In particular, when the underlying group of invertible objects is nontrivial, there are exactly two braided equivalence classes, and one otherwise. \end{theorem} \begin{proof} The only if follows from the first statement of Corollary \ref{cor:KCrossedFunctorIsBraided}. Conversely if $\sgn(\sigma) = \sgn(\sigma')$, then $\sigma$ and $\sigma'$ are orbit equivalent by Proposition \ref{prop:OrbitEquivalenceCharacterization}, and thus there exists an $f$ in $\Aut(A, \chi)$ with $f \cdot \sigma = \sigma'$. Corollary \ref{cor:KCrossedFunctorIsBraided} implies there exists a unique $\eta$ such that $(F(f, 1, 1), \eta)$ is a $\mathbb{Z}/2\mathbb{Z}$-crossed braided equivalence $\C(\sigma, \epsilon) \to \C(\sigma', \epsilon')$. \end{proof} \begin{definition} \cite[6]{GALINDO2017118} A \textit{strong equivalence} $\C(\sigma,\epsilon) \to \C(\sigma', \epsilon')$ is a $\mathbb{Z}/2\mathbb{Z}$-crossed braided equivalence of the form $(\id_{\C_{\overline{\mathbb{C}}}(K_4^n, h^{ n})}, \eta)$. \end{definition} \begin{corollary} There is a unique strong equivalence $\C(\sigma, \epsilon) \to \C(\sigma', \epsilon')$ if and only if $\sigma = \sigma'$. \end{corollary} \begin{lemma} $K$-crossed braided natural transformations $(F, \eta) \to (F', \eta')$ are in bijection with real-valued monoidal natural transformations $F \to F'$. \end{lemma} \begin{proof} The second part of Corollary \ref{cor:KCrossedFunctorIsBraided} shows that $\eta$ and $\eta'$ have the same components, so that the diagram defining a $\mathbb{Z}/2\mathbb{Z}$-crossed braided natural transformation \cite[Diagram 3.5]{GALINDO2017118} $\lambda$ becomes simply $\lambda_x = \overline{\lambda_x}$ by Lemma \ref{lem:TMustConjugate}. \end{proof} Combining Lemma \ref{lem:FunctorClassification} and Corollary \ref{cor:KCrossedFunctorIsBraided} we have (in the notation of \S\ref{sec:QFAnalysis}): \begin{corollary} \label{cor:CCBraidedAutomorphisms} $$\pi_0\Aut_{\text{br}}\big(\C(\sigma, \epsilon)\big) \cong H_{\sgn(\sigma)} \times K_4,$$ \end{corollary} First, the diagram \begin{equation} \label{eqn:BraidedHexagon} \begin{tikzcd}[column sep = huge] T_g(x) \otimes T_g(y) \ar[r, "c_{T_g(x), T_g(y)}"] & T_{gh g^{-1}}(T_g(y) \otimes T_g(x)) \ar[d, "(\gamma_{gh g^{-1}, g})_y \otimes \id_{T_g(x)}"] \\ T_g(x \otimes y) \ar[u, "(\mu_g)^{-1}_{x,y}"] \ar[d, swap, "T_g(c_{x,y})"] & T_{gh }(y) \otimes T_g(x) \\ T_g(T_h (y) \otimes x) \ar[r, swap, "(\mu_g)^{-1}_{T_g(y), x}"] & T_g(T_h (y)) \otimes T_g(x) \ar[u, swap, "(\gamma_{g,h })_y \otimes \id_{T_g(x)}"] \end{tikzcd} \end{equation} commutes for all $g,h \in G$ and $x \in \C_h , y \in \C$. \\ Second, the diagram \begin{equation}\label{eqn:Heptagon} \begin{tikzcd}[ampersand replacement=\&] \& {(x \otimes y) \otimes z} \\ {x \otimes (y \otimes z)} \&\& {(T_g(y) \otimes x) \otimes z} \\ {T_g(y \otimes z) \otimes x} \&\& {T_g(y) \otimes(x \otimes z)} \\ {(T_g(y) \otimes T_g(z)) \otimes x} \&\& {T_g(y) \otimes (T_g(z) \otimes x)} \arrow["{\alpha_{x,y,z}}"', from=1-2, to=2-1] \arrow["{c_{x, y \otimes z} }"', from=2-1, to=3-1] \arrow["{(\mu_g)^{-1}_{y,z} \otimes \id_x}"', from=3-1, to=4-1] \arrow["{\alpha_{T_g(y), T_g(z), x}}", from=4-1, to=4-3] \arrow["{c_{x,y} \otimes \id_z}", from=1-2, to=2-3] \arrow["{\alpha_{T_g(y),x,z}}", from=2-3, to=3-3] \arrow["{\id_{T_g(y)} \otimes c_{x,z}}", from=3-3, to=4-3] \end{tikzcd} \end{equation} commutes for all $g\in G, x \in \C_g $ and $y,z \in \C$. \\ Finally, the diagram \begin{equation}\label{eqn:InverseHeptagon} \begin{tikzcd}[ampersand replacement=\&] \& {x \otimes (y \otimes z)} \\ {(x \otimes y) \otimes z} \&\& {x \otimes (T_h (z) \otimes y) } \\ {T_{gh }(z) \otimes (x \otimes y)} \&\& { (x \otimes T_h (z)) \otimes y} \\ {T_gT_h (z) \otimes (x \otimes y)} \&\& {(T_gT_h (z) \otimes x)\otimes y} \arrow["{\alpha_{x,y,z}}", from=2-1, to=1-2] \arrow["{c_{x \otimes y, z}^{-1} }", from=3-1, to=2-1] \arrow["{(\gamma_{g, h })_{z} \otimes \id_{x \otimes y}}", from=4-1, to=3-1] \arrow["{\alpha_{T_gT_h (z), x,y}^{-1}}", from=4-1, to=4-3] \arrow["{\id_x \otimes c_{y,z} }", from=1-2, to=2-3] \arrow["{\alpha_{x, T_h (z),y}^{-1}}", from=2-3, to=3-3] \arrow["{c_{x,T_h (z)} \otimes \id_{y} }", from=3-3, to=4-3] \end{tikzcd} \end{equation} commutes for all $g,h \in G$ and $x \in \C_g, y \in \C_h $ and $z \in \C$.
2412.21011v2
http://arxiv.org/abs/2412.21011v2
The Turán density of the tight 5-cycle minus one edge
\documentclass[11pt,a4paper]{article} \usepackage[nobysame,initials]{amsrefs} \newcommand{\todo}[1]{\textcolor{red}{TODO: #1}} \newcommand{\xl}[1]{\textcolor{magenta}{XL: #1}} \newcommand{\op}[1]{\textcolor{blue}{OP: #1}} \newcommand{\lb}[1]{\textcolor{brown}{LB: #1}} \usepackage{bbm} \usepackage{epsf,epsfig,amsfonts,amsgen,amsmath,amstext,amsbsy,amsopn,amsthm,cases,listings,color } \usepackage{ebezier,eepic} \usepackage{color} \usepackage{multirow} \usepackage{epstopdf} \usepackage{graphicx} \usepackage{pgf,tikz} \usepackage{mathrsfs} \usepackage[marginal]{footmisc} \usepackage{enumitem} \usepackage[titletoc]{appendix} \usepackage{booktabs} \usepackage{mathtools} \usepackage{pgfplots} \usepackage{authblk} \usepackage{amssymb} \usepackage{wasysym} \usepackage{empheq} \usepackage{dsfont} \usepackage{caption} \usepackage{subcaption} \pgfplotsset{compat=1.18} \usepackage{mathrsfs} \usepackage{wasysym} \usetikzlibrary{arrows} \usepackage{aligned-overset} \usepackage{bm} \usepackage{hyperref} \usepackage{xurl} \hypersetup{breaklinks=true} \usepackage[normalem]{ulem} \usepackage{makecell} \newcommand{\Mod}[1]{\ (\mathrm{mod}\ #1)} \allowdisplaybreaks[1] \definecolor{uuuuuu}{rgb}{0.27,0.27,0.27} \definecolor{sqsqsq}{rgb}{0.1255,0.1255,0.1255} \setlength{\textwidth}{150mm} \setlength{\oddsidemargin}{7mm} \setlength{\evensidemargin}{7mm} \setlength{\topmargin}{-5mm} \setlength{\textheight}{245mm} \topmargin -18mm \newtheorem{definition}{Definition} [section] \newtheorem{theorem}[definition]{Theorem} \newtheorem{lemma}[definition]{Lemma} \newtheorem{proposition}[definition]{Proposition} \newtheorem{corollary}[definition]{Corollary} \newtheorem{conjecture}[definition]{Conjecture} \newtheorem{claim}[definition]{Claim} \newtheorem{problem}[definition]{Problem} \newtheorem{observation}[definition]{Observation} \newtheorem{fact}[definition]{Fact} \newenvironment{pf}{\noindent{\bf Proof}} \theoremstyle{remark} \newtheorem{remark}[definition]{Remark} \newcommand{\uproduct}{\mathbin{\;{\rotatebox{90}{\textnormal{$\small\Bowtie$}}}}} \newcommand{\rep}[2]{{#1}^{(#2)}} \newcommand{\blow}[2]{#1(\!(#2)\!)} \newcommand{\multiset}[1]{\{\hspace{-0.25em}\{\hspace{0.1em}#1\hspace{0.1em}\}\hspace{-0.25em}\}} \def\qed{\hfill \rule{4pt}{7pt}} \def\pf{\noindent {\it Proof }} \newcommand{\C}[1]{\mathcal{#1}} \newcommand{\I}[1]{{\mathbbm #1}} \newcommand{\Qn}[1]{\textbf{#1}} \newcommand{\eval}[1]{[\![#1]\!]} \newcommand{\ra}[1]{\cite[#1]{Razborov10}} \newcommand{\e}{\varepsilon} \newcommand{\hide}[1]{} \setlength{\parindent}{0pt} \parskip=8pt \begin{document} \title{\bf\Large The Tur\'{a}n density of the tight $5$-cycle minus one edge} \author{Levente Bodn\'ar} \author{Jared Le\'on} \author{Xizhi Liu} \author{Oleg Pikhurko} \affil{Mathematics Institute and DIMAP, University of Warwick, Coventry, UK } \date{\today} \maketitle \begin{abstract} Let the \textbf{tight $\ell$-cycle minus one edge} $C_\ell^{3-}$ be the $3$-graph on $\{1,\dots,\ell\}$ consisting of $\ell-1$ consecutive triples in the cyclic order. We show that, for every $\ell\ge 5$ not divisible by $3$, the Tur\'{a}n density of $C_{\ell}^{3-}$ is $1/4$ and also prove some finer structure results. This proves a conjecture of Mubayi--Sudakov--Pikhurko from 2011 and extends the results of Balogh--Luo [\emph{Combinatorica} 44 (2024) 949–976] who established analogous claims for all sufficiently large~$\ell$. Results similar to ours were independently obtained by Lidick\'y--Mattes--Pfender [arXiv:2409.14257]. \end{abstract} \section{Introduction}\label{SEC:Intorduction} Given an integer $r\ge 2$, an \textbf{$r$-uniform hypergraph} (henceforth an \textbf{$r$-graph}) $\mathcal{H}$ is a collection of $r$-subsets of some set $V$. We call $V$ the \textbf{vertex set} of $\C H$ and denote it by $V(\C H)$. When $V$ is understood, we usually identify a hypergraph $\mathcal{H}$ with its set of edges. Given a family $\mathcal{F}$ of $r$-graphs, we say an $r$-graph $\mathcal{H}$ is \textbf{$\mathcal{F}$-free} if it does not contain any member of $\mathcal{F}$ as a subgraph. The \textbf{Tur\'{a}n number} $\mathrm{ex}(n, \mathcal{F})$ of $\mathcal{F}$ is the maximum number of edges in an $\mathcal{F}$-free $r$-graph on $n$ vertices. The \textbf{Tur\'{a}n density} of $\mathcal{F}$ is defined as $\pi(\mathcal{F})\coloneq \lim_{n\to\infty}\mathrm{ex}(n,\mathcal{F})/{n\choose r}$. The existence of this limit follows from a simple averaging argument of Katona--Nemetz--Simonovits~\cite{KNS64}, which shows that $\mathrm{ex}(n,\mathcal{F})/{n\choose r}$ is non-increasing in $n$. For $r=2$, the value $\pi(\mathcal{F})$ is well understood thanks to the classical work of Erd\H{o}s--Stone~\cite{ES46} (see also~\cite{ES66}), which extends Tur\'{a}n's seminal theorem from~\cite{Tur41}. For $r \ge 3$, determining $\pi(\mathcal{F})$ is notoriously difficult in general, despite significant effort devoted to this area. For results up to~2011, we refer the reader to the excellent survey by Keevash~\cite{Kee11}. In this paper, we focus on the Tur\'{a}n density of $3$-uniform tight cycles minus one edge. For an integer $\ell \ge 4$, the \textbf{tight $\ell$-cycle} $C_{\ell}^{3}$ is the $3$-graph on $[\ell]\coloneqq \{1,\dots,\ell\}$ with edge set \begin{align*} \big\{\,\{1,2,3\}, \{2,3,4\}, \cdots, \{\ell-2,\ell-1,\ell\},\{\ell-1,\ell,1\},\{\ell,1,2\}\,\big\}, \end{align*} that is, we take all consecutive triples in the cyclic order on $[\ell]$. The \textbf{tight $\ell$-cycle minus one edge} $C_{\ell}^{3-}$ is the $3$-graph on $[\ell]$ with edge set \begin{align*} \big\{\,\{1,2,3\}, \{2,3,4\}, \cdots, \{\ell-2,\ell-1,\ell\},\{\ell-1,\ell,1\}\,\big\}, \end{align*} that is, $C_{\ell}^{3-}$ is obtained from $C_{\ell}^{3}$ by removing one edge. If $\ell \equiv 0 \Mod{3}$ (i.e.\ $\ell$ is divisible by $3$) then both $C_{\ell}^{3}$ and $C_{\ell}^{3-}$ are $3$-partite and thus it holds that $\pi(C_{\ell}^{3}) = \pi(C_{\ell}^{3-})=0$ by the classical general result of Erd\H{o}s~\cite{Erd64KST}. Very recently, using a sophisticated stability argument combined with results from digraph theory, Kam{\v c}ev--Letzter--Pokrovskiy~\cite{KLP24} proved that $\pi(C_{\ell}^{3}) = 2\sqrt{3}-3$ for all sufficiently large $\ell$ satisfying $\ell \not\equiv 0 \Mod{3}$. Later, being partially inspired by~\cite{KLP24}, Balogh--Luo~\cite{BL24C5Minus} proved that $\pi(C_{\ell}^{3-}) = \frac{1}{4}$ for all sufficiently large $\ell$ satisfying $\ell \not\equiv 0 \Mod{3}$. Recall that the lower bound $\pi(C_{\ell}^{3-})\ge \frac14$ for each $\ell\ge 5$ not divisible by 3 is provided by the following construction from {\cite{MPS11}}. For $n \in \{0, 1,2\}$, the $n$-vertex $T_{\mathrm{rec}}$-construction is the empty $3$-graph on $n$ vertices. For $n \ge 3$, an $n$-vertex $3$-graph $\mathcal{H}$ is a \textbf{$T_{\mathrm{rec}}$-construction} if there exists a partition $V_1 \cup V_2 \cup V_3 = V(\mathcal{H})$ into non-empty parts such that $\mathcal{H}$ is obtained from $\mathcal{K}[V_1,V_2,V_3]$, the complete $3$-partite $3$-graph with parts $V_1, V_2, V_3$, by adding a copy of $T_{\mathrm{rec}}$-construction into each $V_i$ for $i \in [3]$. It is easy to see that the obtained $3$-graph is $C_{\ell}^{3-}$-free for every $\ell \not\equiv 0 \Mod{3}$, in particular, for $\ell=5$. Let $t_{\mathrm{rec}}(n)$ denote the maximum number of edges in an $n$-vertex $T_{\mathrm{rec}}$-construction. It is clear from the definition that, for each $n\ge 3$, \begin{align*} t_{\mathrm{rec}}(n) = \max\big\{ \ & n_1 n_2 n_3 + t_{\mathrm{rec}}(n_1) + t_{\mathrm{rec}}(n_2) + t_{\mathrm{rec}}(n_3): \\ & n_1 + n_2 + n_3 = n \quad\text{and}\quad n_i \ge 1~\text{for}~i \in [3]\ \big\}. \end{align*} A simple calculation shows that $t_{\mathrm{rec}}(n)= (\frac{1}{4}+o(1)) \binom{n}{3}$ as $n\to\infty$. In this paper, we determine the Tur\'an density of the tight 5-cycle minus one edge, thus confirming a conjecture of Mubayi--Pikhurko--Sudakov~{\cite[Conjecture~8]{MPS11}}. \begin{theorem}\label{THM:turan-density-C5-} It holds that $\pi(C_{5}^{3-}) = \frac{1}{4}$. \end{theorem} For an arbitrary integer $\ell\ge 5$ with $\ell \not\equiv 0 \Mod{3}$, Theorem~\ref{THM:turan-density-C5-} implies by the general standard results on the Tur\'an density of blowups (see e.g.~{\cite[Theorem~2.2]{Kee11}} or~{\cite[Claim~5.14]{BL24C5Minus}}) that $\pi(C_{\ell}^{3-}) \le \frac{1}{4}$ (which is equality by the above construction). We also establish the Erd{\H o}s--Simonovits-type stability property~\cite{Erdos67a,Sim68} for $C_{5}^{3-}$ in the following theorem. A $3$-graph is called a \textbf{$T_{\mathrm{rec}}$-subconstruction} if it a subgraph of some $T_{\mathrm{rec}}$-construction. \begin{theorem}\label{THM:C5Minus-stability} For every $\varepsilon > 0$, there exist $\delta > 0$ and $N_0$ such that the following statement holds for all $n \ge N_0$. Suppose that $\mathcal{H}$ is an $n$-vertex $C_{5}^{3-}$-free $3$-graph with at least $\frac{1}{4}\binom{n}{3} - \delta n^3$ edges. Then $\mathcal{H}$ is a $T_{\mathrm{rec}}$-subconstruction after removing at most $\varepsilon n^3$ edges. \end{theorem} In the following theorem, we establish a refined structural property for maximum $C_{5}^{3-}$-free 3-graphs on sufficiently large vertex set. \begin{theorem}\label{THM:exact-level-one} For every $\varepsilon > 0$ there exists $n_0$ such that the following holds for every $n \ge n_0$. Suppose that $\mathcal{H}$ is an $n$-vertex $C_{5}^{3-}$-free $3$-graph with $\mathrm{ex}(n,C_{5}^{3-})$ edges. Then there exists a partition $V_1 \cup V_2 \cup V_3 = V(\mathcal{H})$ such that \begin{enumerate}[label=(\roman*)] \item\label{THM:exact-level-one-1} $\left||V_i| - \frac{n}{3}\right| \le \varepsilon n$ for every $i \in [3]$, and \item\label{THM:exact-level-one-3} $\mathcal{H} \setminus \bigcup_{i\in [3]}\mathcal{H}[V_i] = \mathcal{K}[V_1, V_2, V_3]$. \end{enumerate} \end{theorem} Thus, Conclusion~\ref{THM:exact-level-one-3} of the theorem states that $\mathcal{H}$ is the complete $3$-partite $3$-graph plus possibly some edges inside parts. It is easy to see that $\C H$ remains $C_{5}^{3-}$-free if we replace $\C H[V_i]$ by any $C_{5}^{3-}$-free $3$-graph on~$V_i$. Thus, each induced subgraph $\C H[V_i]$ is maximum $C_{5}^{3-}$-free and Theorem~\ref{THM:exact-level-one} applies to it provided $|V_i|\ge n_0$, and so on. We see that $\C H$ exactly follows the construction for $T_{\mathrm{rec}}$, except for parts of size less than $n_0$. It follows that there is a constant $C=C(n_0)$ such that \begin{align*} t_{\mathrm{rec}}(n) \le \mathrm{ex}(n,C_{5}^{3-}) \le t_{\mathrm{rec}}(n) + Cn,\quad\mbox{for all $n\ge 1$}. \end{align*} Thus, we know the function $\mathrm{ex}(n,C_{5}^{3-})$ within additive $O(n)$. \begin{remark} With some additional arguments (see Proposition~\ref{PROP:C5minus-max-degree}), one can show that for every $\delta > 0$, there exists $N_0$ such that for every $n \ge N_0$, \begin{align}\label{equ:smoothness-C5-} \left|\mathrm{ex}(n,C_{5}^{3-}) - \mathrm{ex}(n-1,C_{5}^{3-}) - \frac{n^2}{8}\right| \le \delta n^2. \end{align} Using this result and further arguments, Theorem~\ref{THM:exact-level-one}~\ref{THM:exact-level-one-1} can be strengthened to $\left||V_i| - \frac{n}{3}\right| \le \varepsilon \sqrt{n}$ for every $i \in [3]$. Since we were unable to further improve $\varepsilon \sqrt{n}$ to $1$, we omit the details here. \end{remark} Our proofs of the above results crucially use the flag algebra machinery developed by Razborov~\cite{Raz07} and are computer-assisted. Independently of this work, analogous results by a similar method were obtained by Lidick\'y--Mattes--Pfender \cite{LMP24}. We adopt the general strategy of Balogh--Hu--Lidick{\'y}--Pfender used in~\cite{BHLP16C5} for determining the inducibility of the $5$-cycle $C_5$ (where asymptotically extremal graphs are also obtained via a recursive construction). Rather roughly, our proof is based on the following two crucial claims about an unknown $C_{5}^{3-}$-free $3$-graph $\C H$ with $n\to\infty$ vertices and at least $(\frac14+o(1)){n\choose 3}$ edges. \begin{enumerate} \item\label{it:A} Proposition~\ref{pr:2} shows that there exists a vertex partition $V_1\cup V_2\cup V_3=V(\C H)$ such that $|\C H\cap \C K[V_1,V_2,V_3]|\ge(0.194...+o(1)){n\choose 3}$ (which is not too far from the upper bound $(n/3)^3=(0.222...+o(1)) {n\choose 3}$). \item\label{it:B} Proposition~\ref{pr:3} shows that if the partition in Item~\ref{it:A} is additionally assumed to be \textbf{locally maximal} (meaning that by moving any one vertex between parts we cannot increase the number of \textbf{transversal edges}, that is, the edges in $\C H\cap \mathcal{K}[V_1,V_2,V_3]$) and we compare $\C H\setminus\bigcup_{i=1}^3\C H[V_i]$ with the complete 3-partite $3$-graph $\mathcal{K}[V_1,V_2,V_3]$ then the number of additional edges is at most 0.99 times the number of missing edges . \end{enumerate} Thus have identified a top-level partition such that, ignoring triples inside parts, $\C H$ does not perform better than a copy of $T_{\mathrm{rec}}$ that uses the same parts. We can recursively apply this result to each part $\C H[V_i]$ as long $|V_i|$ is sufficiently large. Now, routine calculations imply that $|\C H|\le (\frac14+o(1)){n\choose 3}$. Our main new idea, when compared to~\cite{BHLP16C5,LMP24}, is a trivial combinatorial observation that for every partition $V_1\cup V_2\cup V_3$ of $V(\C H)$ there is also a locally maximal one with at least as many transversal edges. It seems that the standard flag algebra calculations do not ``capture'' this kind of argument and doing an intermediate \textbf{local refinement} step (when the flag algebra version of the local maximality is manually added to the SDP program) may considerably improve the power of the method. A possible way to compare computer-generated results is by the size of their certificates (say, as a single compressed zip-file). Our proof, even without attempting to reduce the set of used types, occupies around 1MB of space while that from~\cite{LMP24} has size over 7GB. \section{Preliminaries}\label{SEC:Prelim} For pairwise disjoint sets $V_1, \ldots, V_{\ell}$, we use $\mathcal{K}[V_1, \ldots, V_{\ell}]$ to denote the complete $\ell$-partite $\ell$-graph with parts $V_1, \ldots, V_{\ell}$. Also, $\mathcal{K}^2[V_1, \ldots, V_{\ell}]$ denotes the complete $\ell$-partite $2$-graph with parts $V_1, \ldots, V_{\ell}$ (thus its edge set is $\bigcup_{1\le i<j\le \ell}\mathcal{K}[V_i,V_j]$). For a set $X$ and an integer $k\ge 0$, let ${X\choose k}:=\{S \subseteq X \colon |S|=k\}$ denote the family of all $k$-subsets of~$X$. Let $\C H$ be a $3$-graph $\mathcal{H}$. Its \textbf{order} is $v(\C H):=|V(\C H)|$. For a vertex $v \in V(\mathcal{H})$, the \textbf{link} of $v$ is the following 2-graph on $V(\C H)\setminus\{v\}$: \begin{align*} L_{\mathcal{H}}(v) \coloneqq \left\{e\in \binom{V(\mathcal{H})\setminus \{v\}}{2} \colon e \cup \{v\} \in \mathcal{H}\right\}. \end{align*} The \textbf{degree} $d_{\mathcal{H}}(v)$ of $v$ in $\mathcal{H}$ is given by $d_{\mathcal{H}}(v) \coloneqq |L_{\mathcal{H}}(v)|$. We use $\delta(\mathcal{H})$, $\Delta(\mathcal{H})$, and $d(\mathcal{H})$ to denote the \textbf{minimum}, \textbf{maximum}, and \textbf{average degree} of $\mathcal{H}$, respectively. For a pair of vertices $\{u,v\} \subseteq V(\mathcal{H})$, the \textbf{codegree} $d_{\mathcal{H}}(uv)$ of $\{u,v\}$ is the number of edges containing $\{u,v\}$. \hide{ Given another $3$-graph $F$, we let $N(F,\mathcal{H})$ denote the number of copies of $F$ in $\mathcal{H}$. More precisely, \begin{align*} N(F,\mathcal{H}) \coloneqq \left|\left\{\{e_1, \ldots, e_{|F|}\} \subseteq \mathcal{H} \colon \{e_1, \ldots, e_{|F|}\} \text{ spans a copy of } F\right\}\right|. \end{align*} The \textbf{$F$-density} of $\mathcal{H}$ is given by $d(F,\mathcal{H}) \coloneqq \frac{N(F,\mathcal{H})}{\binom{v(\mathcal{H})}{v(F)}}$.} The \textbf{$k$-blowup} $\C H^{(k)}$ of $\C H$ is the 3-graph whose vertex set is the union $\bigcup_{v\in V(\C H)} U_v$ of some disjoint $k$-sets $U_v$, one per each vertex $v\in V(\C H)$, and whose edge set is the union of the complete $3$-partite $3$-graphs $\mathcal{K}[U_x,U_y,U_y]$ over all edges $\{x,y,z\}\in\C H$. Informally speaking, $\C H^{(k)}$ is obtained from $\C H$ by cloning each vertex $k$ times. For another $3$-graph $F$ of order $k$, the \textbf{(induced) density} $p(F,\C H)$ in $\C H$ is the number of $k$-subsets of $V(\C H)$ that span a subgraph isomorphic to $F$, divided by ${v(\C H)\choose k}$. When $F=K_3^3$ is the single edge, we get the \textbf{edge density} $\rho(\mathcal{H}) \coloneqq |\mathcal{H}|/{v(\mathcal{H})\choose 3}$. \hide{For three pairwise disjoint parts $V_1, V_2, V_3 \subseteq V(\mathcal{H})$, the induced $3$-partite subgraph $\mathcal{H}[V_1, V_2, V_3]$ is defined as \begin{align*} \mathcal{H}[V_1, V_2, V_3] \coloneqq \left\{X\in \mathcal{H} \colon |X\cap V_i| = 1 \text{ for every } i \in [3]\right\}; \end{align*} equivalently, $\mathcal{H}[V_1, V_2, V_3]=\C H\cap \mathcal{K}[V_1,V_2,V_3]$. } Given two $r$-graphs $\mathcal{H}$ and $\mathcal{G}$, a map $\psi \colon V(\mathcal{H})\to V(\mathcal{G})$ is called a \textbf{homomorphism} if $\psi(e) \in \mathcal{G}$ for all $e\in \mathcal{H}$. Let $K_{4}^{3-}$ denote the $3$-graph on $\{1,2,3,4\}$ with edge set $\left\{\{1,2,3\},\{1,2,4\},\{1,3,4\}\right\}$. Observe that there exists a homomorphism from $C_{5}^{3-}$ to~$K_{4}^{3-}$. Thus, the Supersaturation Method (see e.g.~\cite[Theorem~2.2]{Kee11}) gives that \begin{align} \pi(C_{5}^{3-}) = \pi(\{C_{5}^{3-},K_{4}^{3-}\}).\label{eq:K4} \end{align} Therefore, in order to prove Theorem~\ref{THM:turan-density-C5-}, it suffices to show that $\pi(\{C_{5}^{3-},K_{4}^{3-}\}) \le \frac{1}{4}$. The \textbf{standard 2-dimensional simplex} is \begin{align*} \mathbb{S}^3 \coloneqq \left\{(x_1, x_2, x_3) \in \mathbb{R}^3 \colon x_1+x_2+x_3 = 1 \text{ and } x_i \ge 0 \text{ for } i \in [3]\right\}. \end{align*} The following fact is straightforward to verify. \begin{fact}\label{FACT:ineqality} The following inequalities hold for every $(x_1, x_2, x_3) \in \mathbb{S}^{3}$$\colon$ \begin{enumerate}[label=(\roman*)] \item\label{FACT:ineqality-1} $\frac{6x_1x_2x_3}{1-(x_1^3+x_2^3+x_3^3)} \le \frac{1}{4}$. \item\label{FACT:ineqality-2} If $\min\{x_1, x_2, x_3\} \ge \frac{1}{5}$, then $x_1^3+x_2^3+x_3^3\le \frac{29}{125}<\frac13$ and $\frac{1+x_1^3+x_2^3+x_3^3}{1-(x_1^3+x_2^3+x_3^3)} < 2$. \item\label{FACT:ineqality-3} $x_1 x_2 x_3 + \frac{x_1^3 + x_2^3 + x_3^3}{24} \le \frac{1}{24}$. Moreover, if $\max_{i\in [3]}\left|x_i - 1/3\right| \ge \varepsilon$ for some $\varepsilon \in [0, 1/10]$ and $x_i \in [1/5, 1/2]$ for every $i \in [3]$, then \begin{align*} x_1 x_2 x_3 + \frac{x_1^3 + x_2^3 + x_3^3}{24} \le \frac{1}{24} - \frac{\varepsilon^2}{16}. \end{align*} \end{enumerate} \end{fact} \section{Computer-generated results}\label{se:Flag-Algebra} \hide{ \xl{added on 2024-10-21: I slightly modified and re-run Levente's no\_c5m file. The results I get are as follows: \begin{itemize} \item I get $\alpha_{3.1} = \frac{27606157}{110100480} = 0.250736...$ with only the assumption that minimum degree is at least $\frac{1}{4}\binom{n}{2}$. \item I get $\rho(F_{2,2,2}) \ge \beta = \frac{669843554483}{13762560000000} = 0.048671...$ with only the assumption that the edge density is at least $\frac{1}{4} - \frac{1}{10^6}$, \item These two values give $\alpha_{3.2} \ge \frac{\beta}{\alpha_{3.1}} = \frac{669843554483}{3450769625000} = 0.194114...$, which is still good enough for the step $B$ vs $M$. \end{itemize}} \op{The constants still may change. My calculations (folder no\_C5\_files) show that we also do not need to use the min-degree assumption in Prop~\ref{pr:1}. The paper may look cleaner if we use as input the exact outputs from previous propositions, e.g. assuming in Proposition~\ref{pr:3} that the number of transversal edges is at least exactly the value returned by Proposition~\ref{pr:2}, etc. I am not sure: this may result in final rational numbers having larger denominators. I can experiment with this once I am back, or someone else can try it. } \lb{I re-run all the calculations, the final file is here: https://github.com/bodnalev/supplementary_files/blob/main/no_c5m/no_c5m.ipynb } } In this section, we present the results derived by computer using the flag algebra method of Razborov~\cite{Raz07}, which is also described in e.g.~\cite{Razborov10,BaberTalbot11,SFS16,GilboaGlebovHefetzLinialMorgenstein22}. Since this method is well-known by now, we will be very brief. In particular, we omit many definitions, referring the reader to~\cite{Raz07,Razborov10} for any missing notions. Roughly speaking, a \textbf{flag algebra proof using $0$-flags on $m$ vertices} of an upper bound $u\in\I R$ on the given objective function $f$ consists of an identity $$ u-f(\C H)=\mathrm{SOS}+\sum_{F\in\C F_m^0}c_Fp(F,\C H)+o(1), $$ which is asymptotically true for any admissible $\C H$ with $|V(\C H)|\to\infty$, where the $\mathrm{SOS}$-term can be represented as a sum of squares (as described e.g.\ in~\ra{Section~3}), each coefficient $c_F\in\I R$ is non-negative, and $\C F_m^0$ consists of isomorphism types of \textbf{$0$-flags} (unlabelled $3$-graphs) with $m$ vertices. If $f(\C H)$ can be represented as a linear combination of the densities of members of $\C F_m^0$ in $\C H$ then finding the smallest possible $u$ amounts to solving a semi-definite program (SDP) with $|\C F_m^0|$ linear constraints. (So we write the size of $\C F_m^0$ in each case to give the reader some idea of the size of the programs that we had to solve.) We formed the corresponding SDPs and then analysed the solutions returned by computer, using a modified version of the SageMath package. This package is still under development, for short guide on how to install it and its current functionality can be found in the GitHub repository \href{https://github.com/bodnalev/sage}{\url{https://github.com/bodnalev/sage}}. The calculations used for this paper and the generated certificates can be found in the ancillary folder of the arXiv version of this paper or in \href{https://github.com/bodnalev/supplementary_files}{\url{https://github.com/bodnalev/supplementary_files}}, a separate repository. As far as we can see, none of the certificates (even that for Proposition~\ref{pr:4}) can be made sufficiently compact to be human-checkable. Hence we did not make any systematic efforts to reduce the size of the obtained certificates, being content with ones that can be generated and verified on an average modern PC. In particular, we did not try to reduce the set of the used types needed for the proofs, although we did use the (standard) observation of Razborov~\ra{Section 4} that each unknown SDP matrix can be assumed to consist of 2 blocks (namely, the invariant and anti-invariant parts). \newcommand{\al}[1]{\alpha_{\mathrm{\ref{pr:#1}}}} \newcommand{\be}[1]{\beta_{\mathrm{\ref{pr:#1}}}} \newcommand{\ep}[1]{\e_{\mathrm{\ref{pr:#1}}}} \newcommand{\AlphaThreeOneRat}{\frac{126373441}{504000000}} \newcommand{\AlphaThreeOneAppr}{0.25074} \newcommand{\AlphaThreeTwoRat}{\frac{1607168566087}{8282009829376}} \newcommand{\AlphaThreeTwoAppr}{0.19405} The first result implies by~\eqref{eq:K4} that the Tur\'an density of $C_{5}^{3-}$ is at most $$ \al1:=\AlphaThreeOneRat \simeq \AlphaThreeOneAppr..., $$ which is rather close to $1/4=0.25$, the value that we ultimately aim for. \begin{proposition}\label{pr:Flag-raw-upper-bound}\label{pr:1} For every integer $n\ge 1$, every $\{C_{5}^{3-},K_{4}^{3-}\}$-free $n$-vertex $3$-graph $\C H$ has at most $ \al1\,\frac{n^3}6$ edges. \end{proposition} \begin{proof} Suppose on the contrary that some $3$-graph $\C H$ contradicts the proposition. Thus, $\beta:=6|\C H|/(v(\C H))^3$ is greater than $\al1$. For every integer $k\ge 1$, the $k$-blowup $\C H^{(k)}$ has $k\, v(\C H)$ vertices and $k^3\,|\C H|$ edges, so the ratio $6|\C H^{(k)}|/(v(\C H^{(k)}))^3$ for $\C H^{(k)}$ is also equal to~$\beta$. Also, $\C H^{(k)}$ is still $\{C_{5}^{3-},K_{4}^{3-}\}$-free since this family is closed under taking images under homomorphisms. The sequence $(\C H^{(k)})_{k=1}^\infty$ converges as $k\to\infty$ to a limit $\phi$, where $$ \phi(F):=\lim_{k\to\infty} p(F,\C H^{(k)}),\quad\mbox{for a 3-graph $F$,} $$ that is, $\phi$ sends a 3-graph $F$ to the limiting density of $F$ in $\C H^{(k)}$. We extend $\phi$ by linearity to formal linear combinations of $3$-graphs, obtaining a positive homomorphism $\C A^0\to \I R$ from the flag algebra $\C A^0$ to the reals, see~\cite[Section~3.1]{Raz07}. It satisfies that $\phi(K_3^3)=\beta$, that is, the edge density in the limit $\phi$ is $\beta$. However, our (standard) application of the flag algebra method using $\{C_{5}^{3-},K_{4}^{3-}\}$-free $3$-graphs on $7$ vertices (resulting in an SDP with $|\C F_7^0|=1127$ inequality constraints) gives $\al1$ as an upper bound on the edge density. This contradicts our assumption $\beta>\al1$. \hide{We also included the assumptions (that hold for every Tur\'an-extremal $3$-graph) that the degrees of every two vertices differ by at most $o(n^2)$ and each is at least $(\frac14-o(1)){n\choose 2}$. Each of these assumptions can be multiplied by an (unknown) non-negative combination of flag of the same type, averaged and added to the final identity.} \end{proof} For an $n$-vertex $3$-graph $\C H$ define the \textbf{max-cut ratio} to be $$ \mu(\C H):=\frac{6}{n^3}\,\max\big\{\,|\C H\cap \C K[V_1,V_2,V_3]|: V_1,V_2,V_2\mbox{ form a partition of }V(\C H)\,\big\}. $$ Observe that $\mu(\C H^{(k)})=\mu(\C H)$ for every $k\ge 1$. Indeed, let the $k$-blowup $\C H^{(k)}$ have $n$ groups of twins $U_1,\dots,U_n$ corresponding to the $n$ vertices of $\C H$. Every $3$-partition $V_1\cup V_2\cup V_2$ of $V(\C H)$ lifts to the $3$-partition $V_1^{(1)}\cup V_2^{(k)}\cup V_3^{(k)}$ of $V(\C H^{(k)})$, where $V_i^{(k)}:=\bigcup_{v\in V_i} U_v$, implying that $\mu(\C H^{(k)})\ge \mu(\C H)$. On the other hand, take any partition $W_1, W_2, W_3$ of $V(\C H^{(k)})$. Suppose we change the 3-partition on some $U_i$ by putting $k_j$ vertices into $W_j$ for $j\in [3]$. Since every edge of $\C H^{(k)}$ intersects $U_i$ in at most one vertex, the number of transversal edges is an affine function of $(k_1,k_2,k_3)$ so it attains its maximum (over non-negative integers $k_1,k_2,k_3$ summing to $k$) when some $k_j=k$, that is, when we put the entire set $U_i$ into some~$W_j$. We can iteratively repeat this for each $i\in [n]$, without decreasing the number of transversal edges until we get a 3-partition of $\C H^{(k)}$ with each set $U_i$ being entirely inside a part. It follows that $\mu(\C H^{(k)})\le \mu(\C H)$, as desired. The following result shows that an arbitrary $\{C_{5}^{3-}, K_{4}^{3-}\}$-free $3$-graph $\C H$ of fairly large size contains a large $3$-partite subgraph, namely with at least $\al2 n^3/6$ edges, where \begin{equation}\label{eq:al2} \al2:=\AlphaThreeTwoRat \simeq \AlphaThreeTwoAppr...\ . \end{equation} Note that this is not too far from the upper bound of $(n/3)^3=0.2222...\cdot n^3/6$. \begin{proposition}\label{pr:Flag-3-parts}\label{pr:2} Every $\{C_{5}^{3-}, K_{4}^{3-}\}$-free $n$-vertex $3$-graph $\C H$ with at least $\be2\,\frac{n^3}{6}$ edges has the max-cut ratio $\mu(\C H)$ at least $\al2$, where $\be2:=\frac{2499}{10000}=\frac14-10^{-5}$. \hide{admits a vertex partition $V(\C H)=V_1\cup V_2\cup V_3$ with $$ \left|\C H\cap \mathcal{K}[V_1, V_2, V_3]\right|\ge \al2\,\frac{n^3}{6}. $$} \hide{ For every $\e>0$ there is $n_0$ such that every $\{C_{5}^{3-}, K_{4}^{3-}\}$-free $3$-graph $\C H$ with $n\ge n_0$ vertices and minimum degree at least $(\frac14-10^{-6}){n-1\choose 2}$ \xl{new calculations show that we can replace it with $|\mathcal{H}| \ge \left(\frac{1}{4} - \frac{1}{10^6}\right) \binom{n}{3}$} \op{We need that $(\frac14-10^{-6})\le \al1$, which is not true, so we need to adjust $10^{-6}$.} admits a vertex partition $V(\C H)=V_1\cup V_2\cup V_3$ with $$ \left|\C H\cap \mathcal{K}[V_1, V_2, V_3]\right|\ge (\al2-\e){n\choose 3}. $$ } \end{proposition} \begin{proof} Informally speaking, the main idea is that any edge $X$ of $\C H$ defines a partition of $V(\C H)\setminus X$, where the part of a vertex $v$ depends on how the link graph of $v$ looks inside $X$, that is, on $L_{\C H}(v)\cap {X\choose 2}$. By the $K_{4}^{3-}$-freeness, this intersection has at most one element, so we have at most 4 non-empty parts. We ignore those vertices $v\in V(\C H)$ for which the intersection is empty. (We could have assigned these vertices e.g.\ randomly into parts and obtained a slightly better bound, but the stated bound suffices for our purposes.) Thus, we obtain three disjoint subsets $V_1^X,V_2^X,V_3^X\subseteq V(\C H)$. We pick $X\in\C H$ such that the size of $\C G^X:=\C H\cap \C K[V_1^X,V_2^X,V_3^X]$ is at least the average value when $X$ is a uniformly random edge of~$\C H$. We can express the product $P:=\rho(\C H)\cdot \I E|\C G^X|/{n-3\choose 3}$ via densities of $6$-vertex subgraphs. Indeed, $P$ is the probability, over random disjoint $3$-subsets $X,Y\subseteq V(\C H)$, of $X\in\C H$ and $Y\in\C G^X$; this in turn can be determined by first sampling a random 6-subset $X\cup Y\subseteq V(\C H)$ and computing its contribution to $P$ (which depends only on the isomorphism type of $\C H[X\cup Y]$). Thus, as a lower bound on $\max_{X\in\C H} |\C G^X|/{n-3\choose 3}$, we can take the ratio of the minimum value of $P$ to the maximum possible edge density (which was already upper bounded by Proposition~\ref{pr:Flag-raw-upper-bound}). We bound $P$ from below, via rather standard flag algebra calculations. Let us briefly give some formal details. We work with the limit theory of $\{C_{5}^{3-}, K_{4}^{3-}\}$-free $3$-graphs. For a $k$-vertex \textbf{type} (i.e.,\ a fully labelled 3-graph) $\tau$ and an integer $m\ge k$, let $\C F_m^\tau$ be the set of all \textbf{$\tau$-flags} on $m$ vertices (i.e.,\ $3$-graphs with with $k$ labelled vertices that span a copy of $\tau$) up to label-preserving isomorphisms. Let $\I R\C F_m^\tau$ consist of formal linear combinations $\sum_{F\in \C F_m^\tau} c_F F$ of $\tau$-flags with real coefficients (which we will call \textbf{quantum $\tau$-flags}). For $i\in \{0,1,2\}$, the unique type with $i$ vertices (and no edges) is denoted by $i$. Let $E$ denote the type which consists of three roots spanning an edge. We will use the following definitions, depending on an $E$-flag $(H, (x_1,x_2,x_3))$. (Thus, $\{x_1,x_2,x_3\}$ is an edge of a $\{C_{5}^{3-},K_{4}^{3-}\}$-free $3$-graph $H$.) For $i\in[3]$, we define $V_i=V_i(H, (x_1,x_2,x_3))$ to consist of those vertices $y\in V(H)\setminus X$ such that the $H$-link of $y$ contains the pair $X\setminus\{x_i\}$, where we denote $X\coloneqq \{x_1,x_2,x_3\}$. As we already observed, the sets $V_1,V_2,V_3$ are pairwise disjoint by the $K_{4}^{3-}$-freeness of $H$. Let $$ T=T(H, (x_1,x_2,x_3)) := H\cap \C K[V_1,V_2,V_3] $$ be the 3-graph consisting of those edges in $H$ that transverse the parts $V_1,V_2,V_3$. When we normalise $|T|$ by ${v(H)-3\choose 3}^{-1}$, the obtained ratio can be viewed as the probability over a uniformly random $3$-subset $Y$ of $V(H)\setminus\{x_1,x_2,x_3\}$ that $Y$ is an edge of $H$, the link of each vertex of $Y$ has exactly one pair inside $X$ and the obtained three pairs are pairwise different. This ratio is the \textbf{density} (where the roots has to be preserved) of the quantum $E$-flag \begin{equation}\label{eq:F222E} F_{2,2,2}^E\coloneqq \sum_{F\in \C F_6^E} |T(F)|\, F \end{equation} in the $E$-flag $(H,(x_1,x_2,x_3))$. Note that each coefficient $|T(F)|$ in~\eqref{eq:F222E} is either 0 or 1 since there is only one potential set $Y$ to test inside every $F\in\C F_6^E$ (while the scaling factor ${6-3\choose 3}^{-1}=1$ is omitted from~\eqref{eq:F222E}). For a $k$-vertex type $\tau$ and $(F,(x_1,\dots,x_k))\in\C F_m^\tau$, the \textbf{averaging} $\eval{F}$ is defined as the quantum (unlabelled) $0$-flag $q\, F\in\I R\C F_m^0$, where $q$ is the probability for a uniform random injection $f:[k]\to V(F)$ that $(F,(f(1),\dots,f(k)))$ is isomorphic to $(F,(x_1,\dots,x_k))$, and this definition is extended to $\I R\C F_m^\tau$ by linearity, see e.g.\ \cite[Section 2.2]{Raz07}. \hide{ Let us return to the lemma. Suppose that some $\e>0$ contradicts it. Then there is a sequence of counterexamples $\C H$ of order $n\to\infty$. By passing to a subsequence, we can assume that they converge to a flag algebra limit object, which is a positive algebra homomorphism $\phi:\C A^0\to\I R$. The min-degree assumption translates in the statement that the random homomorphism $\boldsymbol{\phi}^1:\C A^1\to\I R$ is at least $\frac14-10^{-6}$ with probability 1. We also add the extra assumption that the edge density is at most $\frac{25073}{100000}$, coming from Proposition~\ref{pr:Flag-raw-upper-bound} applied to each $3$-graph $\C H$. We deal with these assumptions in the standard way: each can be multiplied by an (unknown) non-negative combination of flags of the same type and the result is averaged and added to the final identity.} Let us return to the proposition. Suppose that some $n$-vertex $3$-graph $\C H$ contradicts it. Let $\beta:=\mu(\C H)$ be the max-cut ratio for $\C H$. By our assumption, $\beta<\al2$. Let $\phi:\C A^0\to \I R$ be the limit of the uniform blowups $\C H^{(k)}$ of $\C H$. The assumption on the edge density of $\C H$ gives that $\phi(K_3^3)\ge \be2$. \hide{Then there is a sequence of counterexamples $\C H$ of order $n\to\infty$. By passing to a subsequence, we can assume that they converge to a flag algebra limit object, which is a positive algebra homomorphism $\phi:\C A^0\to\I R$. The min-degree assumption translates in the statement that the random homomorphism $\boldsymbol{\phi}^1:\C A^1\to\I R$ is at least $\frac14-10^{-6}$ with probability 1. We also add the extra assumption that the edge density is at most $\frac{25073}{100000}$, coming from Proposition~\ref{pr:Flag-raw-upper-bound} applied to each $3$-graph $\C H$. We deal with these assumptions in the standard way: each can be multiplied by an (unknown) non-negative combination of flags of the same type and the result is averaged and added to the final identity.} For each $k$-blowup $\C H^{(k)}$ pick an edge $\{x_1,x_2,x_3\}\in \C H^{(k)}$ such that the density (when normalised by ${kn-3\choose 3}^{-1}$) of $\C H^{(k)}$-edges transversing the corresponding three parts is at least the average value. This average density tends to $\phi(\eval{F_{2,2,2}^E})/\phi(\eval{E})$ as $k\to\infty$. Flag algebra calculations on flags with at most $7$ vertices show that, under the assumption that $\phi(K_3^3)\ge \be2$, it holds that $\phi(\eval{F_{2,2,2}^E})\ge \al1\al2$. (In fact, we first computed the rational number $\gamma$ returned by computer and then defined $\al2\coloneqq \gamma/\al1$.) Thus, we have by Proposition~\ref{pr:Flag-raw-upper-bound} that $ {\phi(\eval{F_{2,2,2}^E})}/{\phi(\eval{E})}\ge \al2. $ However, this ratio is upper bounded by the limit as $k\to\infty$ of $\mu(\C H^{(k)})=\mu(H)=\beta<\al2$, a contradiction. \hide{ For each $n$-vertex counterexample $\C H$ from the convergence subsequence, pick $\{x_1,x_2,x_3\}\in \C H$ such that the density (when normalised by ${n-3\choose 3}^{-1}$) of $\C H$-edges transversing the corresponding three parts is at least the average value. If $n$ is sufficiently large, this is at least $\phi(\eval{F_{2,2,2}^E})/\phi(\eval{E})-\e/2$. By above and Proposition~\ref{pr:Flag-raw-upper-bound}, we have that $ {\phi(\eval{F_{2,2,2}^E})}/{\phi(\eval{E})}\ge \al2 $, giving the required bound and leading to a contradiction to our assumption that each $\C H$ is a counterexample.} \end{proof} In order to state the next result, we have to provide various definitions for a $3$-graph~$\C H$. Recall that a partition $V(\C H)=V_1\cup V_2\cup V_3$ of its vertex set is \textbf{locally maximal} if $|\C H\cap \mathcal{K}[V_1, V_2, V_3]|$ does not increase when we move one vertex from one part to another. For example, a partition that maximises $|\C H\cap \mathcal{K}[V_1, V_2, V_3]|$ is locally maximal; another (more efficient) way to find one is to start with an arbitrary partition and keep moving vertices one by one between parts as long as each move strictly increases the number of transversal edges. For a partition $V(\C H)=V_1\cup V_2\cup V_3$, let \begin{align} B_{\mathcal{H}}(V_1, V_2, V_3) &\coloneqq \left\{ X\in \C H\colon \{|X\cap V_1|,\,|X\cap V_2|,\, |X\cap V_3|\}=\{0,1,2\}\right\}, \label{equ:def-bad-triple}\\ M_{\mathcal{H}}(V_1, V_2, V_3) &\coloneqq \left\{ X\in {V(\C H)\choose 3}\setminus\C H\colon \{|X\cap V_1|,\,|X\cap V_2|,\, |X\cap V_3|\}=\{1,1,1\} \right\} \label{equ:def-missing-triple} \end{align} be the sets of \textbf{bad} and \textbf{missing} edges respectively. We will omit $(V_1, V_2, V_3)$ and the subscript $\mathcal{H}$ if it is clear from the context. If we compare $\C H$ with the recursive construction with the top parts $V_1,V_2,V_3$ then, with respect to the top level, $B$ consists of the additional edges of $\C H$ while $M=\mathcal{K}[V_1,V_2,V_3]\setminus \C H$ consists of the top triples not presented in~$\C H$. Note that the edges inside a part are not classified as bad or missing. The key result needed for our proof is the following. \begin{proposition}\label{pr:3} If $\C H$ is a $\{C_{5}^{3-},K_{4}^{3-}\}$-free $n$-vertex $3$-graph and $V(\C H)=V_1\cup V_2\cup V_3$ is a locally maximal partition with \begin{equation}\label{eq:3} |\C H\cap \mathcal{K}[V_1,V_2,V_3]|\ge \be3 \frac{n^3}{6}, \end{equation} where $\be3\coloneqq 19/100$, then with $B=B_{\C H}(V_1,V_2,V_3)$ and $M=M_{\C H}(V_1,V_2,V_3)$ defined in~\eqref{equ:def-bad-triple} and~\eqref{equ:def-missing-triple} respectively, we have \begin{align} \label{eq:BM} |B|-\frac{99}{100}\,|M| \le 0. \end{align} \end{proposition} \begin{proof} We would like to run flag algebra calculations on the limit $\phi$ of blowups $\C H^{(k)}$ of a hypothetical counterexample $(\C H,V_1,V_2,V_3)$ in the theory of $\{C_{5}^{3-},K_{4}^{3-}\}$-free $3$-graphs which are 3-coloured (that is, we have 3 unary relations $V_1,V_2,V_3$ such that each vertex in a flag satisfies exactly one of them). For $i\in [3]$, let $(1,i)$ denote the 1-vertex type where the colour of the unique vertex is~$i$. Consider the random homomorphism $\boldsymbol{\phi}^{(1,i)}$, which is the limit of taking a uniform random colour-$i$ root. Note that, by~\eqref{eq:3}, each part $V_i^{(k)}\subseteq V(\C H^{(k)})$ is non-empty (in fact, it occupies a positive fraction of vertices), so $\boldsymbol{\phi}^{(1,i)}$ is well-defined. The local maximality assumption directly translates in the limit to the statement that, for each $i\in [3]$, if $h$ and $j$ are denote the indices of the two remaining parts (that is, $\{i,j,h\}=[3]$) then \begin{equation}\label{eq:LM} \boldsymbol{\phi}^{(1,i)}(K_{j,h})\ge \max \left\{\boldsymbol{\phi}^{(1,i)}(K_{i,j}),\boldsymbol{\phi}^{(1,i)}(K_{i,h})\right\} \end{equation} with probability 1, where $K_{a,b}$ is the $(1,i)$-flag on three vertices that span an edge with the free vertices having colours $a$ and $b$ (and, of course, the root vertex having colour $i$). We can now run the usual flag algebra calculations where each of the inequalities in~\eqref{eq:3} and~\eqref{eq:LM} can be multiplied by an unknown non-negative combination of respectively 0-flags and $(1,i)$-flags (and then averaged out in the latter case). The final inequality should prove that the left-hand side of~\eqref{eq:BM} is non-positive. Note that the ratio $|M|/{n\choose 3}$ is the density of the 0-flag consisting of single rainbow non-edge in the 0-flag $(\C H,V_1,V_2,V_3)$. Likewise, $|B|/{n\choose 3}$ can be written as the density of an appropriate quantum 0-flag with $6$ constituents depending on the ordered sequence of intersections $(|X\cap V_i|)_{i=1}^3$ for $X\in B$ (which is a permutation of $(2,1,0)$ for every bad $X$). Now we face the standard flag algebra task of finding the maximum of the quantum 0-flag expressing $|B|-\frac{99}{100}\, |M|$ and checking if it is at most 0. However, an issue with this approach is that 5-vertex flags are not enough to prove the desired conclusion, while we have rather many 6-vertex flags (namely, $|\C F_6^0|=16181$) and the resulting SDP problem seems too large for a conventional PC. Our way around this is based on the observation that the parts play symmetric role in both the statement and the conclusion of Proposition~\ref{pr:3}, so instead we work with 3-partitions of the vertex set which are unordered. The easiest way to formally define an unordered partition $\{V_1,V_2,V_3\}$ of a set $V$ is to encode it by the complete 3-partite 2-graph with parts $V_1,V_2,V_3$. Thus, a 0-flag in our theory is a pair $(H,F)$ where $H$ is a ternary relation representing a $\{C_{5}^{3-},K_{4}^{3-}\}$-free $3$-graph and $F$ is a binary relation representing a complete 3-partite 2-graph on the same set $V$. Thus, $x$ and $y$ are adjacent in $F$ if and only if they are from different parts. A sub-flag induced by $V'\subseteq V$ is obtained by restricting the relations $H$ and $F$ to $V'$, etc. For example, the density of transversal edges is given by the 3-vertex 0-flag $(H,F)$ where the three vertices form an edge in $H$ and span a triangle in $F$. The family of complete 3-partite 2-graphs can be characterized by forbidding induced subgraphs (namely, 3 vertices spanning exactly one edge and 4 vertices spanning all 6 edges), so the flag algebra machinery developed in~\cite{Raz07} applies here. However, when we translate the assumption in~\eqref{eq:LM} to this theory using only 3-vertex flags, we lose some information. Namely, we use the weaker version of~\eqref{eq:LM} which, for finite 3-graphs, states that if we move any vertex to a random part among the other two then the expected change in the number of transversal edges is non-positive. Its limit version is that with probability~1 we have for each $i\in [3]$ that \begin{equation}\label{eq:LMSymm} \boldsymbol{\phi}^{1}(K_{\bullet|\circ|\circ}) \ge \frac12\, \boldsymbol{\phi}^{1}(K_{\bullet\circ|\circ}), \end{equation} where $\boldsymbol{\phi}^{1}$ is the limiting random homomorphism $\C A^1\to\I R$ corresponding to making a random vertex for the root, $K_{\bullet|\circ|\circ}$ is the 3-vertex 1-flag that spans an edge in $H$ and a triangle in $F$, and $K_{\bullet\circ|\circ}$ is the 3-vertex 1-flag that spans an edge in $H$ and induces a 2-edge path in $F$ with the root as its endpoint. (Also, let us observe that $\boldsymbol{\phi}^{1}$ is well-defined in this theory even if some part is empty.) Returning to the proposition, suppose that a $3$-graph $\C H$ and sets $V_1,V_2,V_3$ contradict it. Every $k$-blowup $\C H^{(k)}$ has the natural vertex $3$-partition, $V_1^{(k)}\cup V_2^{(k)}\cup V_3^{(k)}=V(\C H^{(k)})$ with the corresponding 3-graphs of bad and missing edges being exactly the $k$-blowups of $B$ and~$M$. It is easy to check that this partition is also locally maximal, the inequality in \eqref{eq:3} holds for $(\C H^{(k)},V_1^{(k)},V_2^{(k)},V_3^{(k)})$, and the left-hand of~\eqref{eq:BM} is at least $\Omega(k^3)$ as $k\to\infty$. The desired contradiction follows from standard flag algebra calculations on 6-vertex flags (when there are only $|\C F_6^0|=2840$ 0-flags), where we use~\eqref{eq:LMSymm} as an assumption (instead of the full local maximality). \hide{Our implementation of this theory was to use three unary relations (working with the complete 3-partite graph $F$ implicitly defined by them), as this was easier to implement given how we represent flags on computer.} \end{proof} Next we would like to show that if we add a vertex $v$ to a complete 3-partite 3-graph $\mathcal{K}[V_1, V_2, V_3]$ while keeping it $C_{5}^{3-}$-free then we lose in the degree of $v$ unless its link is ``correct''. This reduces to a 2-graph problem since we can operate with the link graph $L$ of $v$ plus a 3-partition $V_1\cup V_2\cup V_3$ of its vertex set. For $i,j\in [3]$, let $$ L_{i,j}\coloneqq L\cap \C K[V_i,V_j]=\{\{x,y\}\in L\colon x\in V_i, y\in V_j\} $$ be the set of edges of $L$ with the endpoints in $V_i$ and $V_j$. In particular, $L_{i,i}$ is $L\cap {V_i\choose 2}$. By symmetry between the parts we can assume that \begin{equation}\label{eq:L23} |L_{2,3}|\ge \max\{\,|L_{1,2}|,\, |L_{1,3}|\,\}. \end{equation} Thus, $V_1$ would be a part to include $v$ if we wanted to maximise the number of transversal edges containing $v$. With this in mind, we define the set of \textbf{bad} edges to be \begin{equation}\label{eq:GraphB} B_{v}\coloneqq L_{1,2}\cup L_{1,3}\cup L_{2,2}\cup L_{3,3}, \end{equation} while the set $M_{v}:=\C K[V_2,V_3]\setminus L$ of \textbf{missing} edges consists of non-adjacent pairs in $V_2\times V_3$. Note that no pair inside $V_1$ is designated as bad or missing. Since we cannot forbid $K_{4}^{3-}$ in this problem (as $L$ can contain many triangles without any $C_{5}^{3-}$ in the corresponding $3$-graph), we have to exclude the possibility that $L$ is the union of the cliques on $V_2$ and $V_3$, which is another configuration attaining $|L|\ge (\frac14+o(1)){n\choose 2}$. This is achieved by adding an assumption that $L$ contains some positive fraction of pairs between parts. With some experimenting, a result which suffices for us and which can be proved by computer is the following. \begin{proposition}\label{pr:4} For every $\e>0$ there is $\delta>0$ such that the following holds for every $n\ge 1/\delta$. If $L$ is a 2-graph on $[n]$ and $V_1\cup V_2\cup V_3=[n]$ is a vertex partition satisfying, in the notation above, the inequality in~\eqref{eq:L23} and \begin{enumerate}[label=(\roman*)] \item\label{it:41} $|V_i|\ge (\frac14-\delta)n$ for each $i\in [3]$, \item\label{it:42} $|L_{1,2}|+|L_{1,3}|+|L_{2,3}|\ge (\frac1{16}-\delta) n^2$, \item\label{it:43} for every distinct $h,i,j\in [3]$ there are at most $\delta n^4$ quadruples $(w,x,y,z)\in V_h\times V_i^2\times V_j$ with $wx,yz\in L$, \item\label{it:44} for every distinct $i,j\in [3]$ there are at most $\delta n^3$ triples $(x,y,z)\in V_i^2\times V_j$ with $xy,xz \in L$. \end{enumerate} then $|B_{v}|-\frac9{10}|M_{v}|\le \e n^2$. \end{proposition} \begin{proof} We work in the theory of 2-graphs with an ordered vertex $3$-partition. For example, $|L_{i,j}|/{n\choose 2}$ is the density of the 0-flag consisting of two adjacent vertices, one in the $i$-th part and the other in the $j$-the part. Suppose that the statement is false for some $\e>0$ and take a growing sequence of counterexamples as $\delta\to 0$. By compactness, we can pass to a subsequence which converges to some limit $\phi$. In the limit, $\delta$ disappears; for example, by Item~\ref{it:44}, we can assume that we forbid an edge inside a part incident to an edge across. Computer calculations show that, with the limit versions of Items~\ref{it:41}--\ref{it:44} as assumptions, the quantum graph that represents $(B_{v}-\frac9{10}M_{v})/{n\choose 2}$ can be proved to be non-positive, with the proof using flags on at most 5 vertices (with $|\C F_5^0|=450$). This contradiction proves the proposition. \end{proof} The following result is a corollary of Proposition~\ref{pr:4}. \begin{proposition}[Vertex Stability] \label{pr:5} For every $\varepsilon > 0$ there exist $\delta>0$ and $n_0$ such that the following statement holds for every $n \ge n_0$. Suppose that $\mathcal{H}$ is a $C_{5}^{3-}$-free $3$-graph on a disjoint union $V_1 \cup V_2 \cup V_3 \cup \{v\}$ satisfying \begin{enumerate}[label=(\roman*)] \item\label{it:51} $|V_1| + |V_2| + |V_3| = n$ and $\min_{i\in [3]}\{|V_i|\} \ge \frac{n}{4}$, \item\label{it:52} $|\mathcal{H}\cap \C K[V_1, V_2, V_3]| \ge |V_1||V_2||V_3| - \delta n^3$, \item\label{it:53} $|L_{\mathcal{H}}(v) \cap \mathcal{K}^2[V_1, V_2,V_3]| \ge \frac{n^2}{16}$, and \item\label{it:54} $|L_{\mathcal{H}}(v) \cap \mathcal{K}[V_2, V_3]| \ge \max\left\{\,|L_{\mathcal{H}}(v) \cap \mathcal{K}[V_1, V_2]|,\,|L_{\mathcal{H}}(v) \cap \mathcal{K}[V_1, V_3]|\,\right\}$. \end{enumerate} Then $|B_{v}| - \frac{9}{10}|M_{v}| \le \varepsilon n^2$, where \begin{align*} B_{v} \coloneqq L_{\mathcal{H}}(v) \cap \left( \binom{V_2}{2} \cup \binom{V_3}{2} \cup \mathcal{K}[V_1, V_2\cup V_3]\right) \quad\text{and}\quad M_{v} \coloneqq \mathcal{K}[V_2, V_3] \setminus L_{\mathcal{H}}(v). \end{align*} \hide{ In particular, \begin{align*} \left|L_{\mathcal{H}}(v) \setminus \binom{V_1}{2}\right| \le |V_2||V_3| - \max\left\{\frac{|B_{v}|}{9},\,\frac{|M_{v}|}{10} \right\} + \varepsilon n^2. \end{align*} } \end{proposition} \begin{proof} Given $\e>0$, choose in this order sufficiently small positive constants $\delta\gg 1/n_0$. Let $\C H$, $v$ and $V_i$'s be as in the proposition. Let $L\coloneqq L_{\C H}(v)$ be the link graph of $v$ in $\C H$. We would like to apply Proposition~\ref{pr:4} with $\e$ and $2\delta$. For this, we have to check that the last two items of Proposition~\ref{pr:4} are satisfied for the quadruple $(L,V_1,V_2,V_3)$ with respect to $2\delta$ (as the first two items follows from our assumptions). Let us check Item~\ref{it:43} of Proposition~\ref{pr:4}. Fix any distinct $h,i,j\in [3]$. For every quadruple $(w,x,y,z)\in V_h\times V_i^2\times V_j$ with $wx,yz\in L$ such that, additionally, we have $x\not=y$, at least one of the triples $wxz$ and $wzy$ is missing from $\C H$ as otherwise we get a copy of $C_{5}^{3-}$ visiting vertices $vyzwx$ in this cyclic order (having edges $vyz,yzw,zwx,wxv$). On the other hand, each edge in $\mathcal{K}[V_1,V_2,V_3]\setminus \C H$ appears at most $n$ times this way. (Indeed, we have to pick an extra vertex in the part $V_i$ of size at most $n/2$ and may also have two choices whether it plays the role of $x$ or $y$.) Since $|\C K[V_1,V_2,V_3]\setminus \C H|\le \delta n^3$, the number of quadruples $(w,x,y,z)$ in Proposition~\ref{pr:4}.\ref{it:43} is at most $\delta n^3\cdot n$ (those with $x\not=y$) plus $n^3$ (those with $x=y$), giving the desired by $n\ge n_0\gg 1/\delta$. Let us check Item~\ref{it:43} of Proposition~\ref{pr:4}. Fix any distinct $i,j\in [3]$. For each triple $(x,y,z)\in V_i^2\times V_j$ with $xy,yz\in L$ and for every choice of $w$ in the remaining part, at least one of the triples $wxz$ and $wyz$ is missing from $\C H$ for otherwise we have a copy of $C_{5}^{3-}$ on $wzxvy$ (with edges $wzx,zxv,xvy,ywz$). On the other hand, each edge in $\mathcal{K}[V_1,V_2,V_3]\setminus \C H$ appears at most $n$ times this way. A similar calculation as before shows that there are at most $2\delta n^3$ such triples $(x,y,z)$ in total, as desired. Thus, Proposition~\ref{pr:4} applies and gives the desired conclusion.\end{proof} \subsection{Some remarks on our implementation} There was some freedom in choosing constants that still make the whole proof work. It would be sufficient to prove the weaker versions of Propositions~\ref{pr:2} and~\ref{pr:3} where $\be2:=1/4$ (with some extra arguments) and $\be3:=\al2$ respectively. We decided to present somewhat stronger versions of these intermediate results, obtained by experimenting on computer. For example, $\be3:=0.185$ did not seem to suffice in Proposition~\ref{pr:3} (nor $\be2:=\frac14-10^{-4}$ in Proposition~\ref{pr:2}) so we opted for the current values that are close to optimal and simple to write. The stated values for $\al1$ and $\al2$ are exactly the constants returned by our rounding calculations. Likewise, we decided to use only the necessary assumptions in the flag proofs for Propositions~\ref{pr:1}--\ref{pr:4} (although we did not make any attempt to reduce the set of used types), even though this sometimes resulted in slightly weaker intermediate bounds. For example, we could have improved slightly the ``plain'' upper bound on $\pi(C_{5}^{3-})$ coming from Propositions~\ref{pr:1} if we have applied the flag algebra proof to a limit of a convergent subsequence of maximum $\{C_{5}^{3-},K_{4}^{3-}\}$-free $3$-graphs of order $n\to\infty$, where we could have additionally assumed that every two vertex degrees differ by at most $n-2=o(n^2)$ and each degree is at least $(\frac14+o(1)) {n\choose 2}$. Some extra steps were required in Proposition~\ref{pr:3} during \textbf{rounding} (when, given the floating-point matrices produced by an SDP solver, we have to find matrices with rational coefficients that certify the validity of the bound). The issue here is that the inequality $|B|\le \frac{99}{100}|M|$ is in a sense attained by many feasible configurations (in addition to varying the edges inside parts): namely, we can pick a partition $[n]=V_1\cup V_2\cup V_3$ with any part ratios $x_i\coloneqq |V_i|/n$ as long as $\prod_{i=1}^3|V_i|\ge \be3\,\frac{n^3}6$ and take $\C H\coloneqq \mathcal{K}[V_1,V_2,V_3]$ (when $B=M=\emptyset$). As it is well known, any asymptotically extremal construction forces various relations in the certificate (that are of the form that some inequalities have zero slack and that some positive semi-definite matrices must have specific zero eigenvectors). We do not dwell on the exact nature of these restrictions (since the task of verifying the certificates does not require any knowledge of them) but refer the reader to e.g.~\cite[Section~3.1]{PikhurkoVaughan13} for a general discussion. We observe that these constraints in Proposition~\ref{pr:2} amount to a system of explicit polynomial equations being satisfied for all $(x_1,x_2)$ in some open neighbourhood of $(\frac13,\frac13)$. Of course, this implies that all these polynomials are identically~0. We used the equivalent reformulation (that was easier to implement in the current code) that all partial derivatives of each polynomial (up to the maximum degree) vanish at $(\frac13,\frac13)$. \section{Proof of Theorem~\ref{THM:turan-density-C5-}}\label{SEC:Proof-turan-density-C5-} We present the proof of Theorem~\ref{THM:turan-density-C5-} in this section. Here (and in the subsequent sections), we will be rather loose with the constants in the lower-order terms. The following lemma will be crucial for the proof. \begin{lemma}\label{LEMMA:recursion-upper-bound} There exists a non-increasing function $N_{\ref{LEMMA:recursion-upper-bound}} \colon (0,1) \to \mathbb{N}$ such that the following holds for every $\xi > 0$ and for every $n \ge N_{\ref{LEMMA:recursion-upper-bound}}(\xi)$. Suppose that $\mathcal{H}$ is an $n$-vertex $C_{5}^{3-}$-free $3$-graph with at least $\left(\frac{1}{4} - \frac{1}{10^7}\right)\binom{n}{3}$ edges. Then there exists a partition $V_1 \cup V_2 \cup V_3 = V(\mathcal{H})$ such that $\frac{n}{5} \le |V_i| \le \frac{n}{2}$ for every $i \in [3]$, and \begin{align*} |\mathcal{H}| & \le |V_1||V_2||V_3| + \sum_{i\in [3]}|\mathcal{H}[V_i]| + \xi n^3 - \max\left\{\frac{|B|}{99},\,\frac{|M|}{100} \right\}, \end{align*} where $B=B_{\C H}(V_1,V_2,V_3)$ and $M=M_{\C H}(V_1,V_2,V_3)$ are defined in \eqref{equ:def-bad-triple} and \eqref{equ:def-missing-triple} respectively. \end{lemma} \begin{proof}[Proof of Lemma~\ref{LEMMA:recursion-upper-bound}] It is enough to show that, for each sufficiently large integer $m$, say $m\ge m_0$, there is $n_0(m)$ such that the conclusion holds when $\xi=\frac1m$ and $n\ge n_0(m)$, as then we can take, for example, $N_{\ref{LEMMA:recursion-upper-bound}}(x):=\max\{ n_0(m)\colon m_0\le m\le \lceil{1/x}\rceil\}$ for $x\in (0,1)$. Fix a sufficiently large $m$ (in particular, assume that $m>10^8$), let $\xi:=1/m$ and then let $n$ be sufficiently large. Let $\mathcal{H}$ be a $C_{5}^{3-}$-free $3$-graph on $n$ vertices with at least $\left(\frac{1}{4} - \frac{1}{10^7}\right)\binom{n}{3}$ edges. Let $V \coloneqq V(\mathcal{H})$. By the Hypergraph Removal Lemma (see e.g.~\cite{RS04,NRS06,Gow07}), $\mathcal{H}$ contains a $\{K_{4}^{3-}, C_{5}^{3-}\}$-free subgraph $\mathcal{G}$ on $V$ with at least $|\mathcal{H}| - \frac{\xi n^3}{3} > \left(\frac{1}{4} - \frac{1}{10^6}\right)\frac{n^3}{6}$ edges. Let $V(\mathcal{G}) = V_1\cup V_2\cup V_3$ be a partition such that $|\mathcal{G}[V_1, V_2, V_3]|$ is maximized. Notice that it suffices to show that \begin{align*} |\mathcal{G}| & \le |V_1||V_2||V_3| + \sum_{i\in [3]}|\mathcal{G}[V_i]| - \max\left\{\frac{|B|}{99},\,\frac{|M|}{100} \right\}, \end{align*} where we re-define $B \coloneqq B_{\mathcal{G}}(V_1, V_2, V_3)$ and $M \coloneqq M_{\mathcal{G}}(V_1, V_2, V_3)$. \hide{ \Qn{Add details for removing vertices of too small degree.} \xl{Proposition~\ref{pr:Flag-3-parts} can now be replaced with `average degree at least $\left(\frac{1}{4} - \frac{1}{10^6}\right)$'. Do we want to use the average-degree version? Suppose we use the minimum-degree version. Then in order to show that the set of small degree vertices is small, we would first need to prove Theorem~\ref{THM:turan-density-C5-} i.e. $\pi(C_{5}^{3-}) \le 1/4$. However, the current proof of Theorem~\ref{THM:turan-density-C5-} uses this lemma. We can work around this by using the fact that every extremal $C_{5}^{3-}$-free $3$-graph has large minimum degree. So the structure of the proof would be: prove Theorem~\ref{THM:turan-density-C5-} first, then prove this lemma. There will be some repetition in both proofs, although not exactly the same.} } Let $x_i \coloneqq |V_i|/n$ for $i \in [3]$. Since the partition $\{V_1, V_2, V_3\}$ is also locally maximal with respect to $\C G$, Proposition~\ref{pr:2} implies that $|\mathcal{G}[V_1,V_2,V_3]| \ge \al2 n^3$, where $\al2$ was defined in~\eqref{eq:al2}. From $\al2\ge \max\{\frac15\cdot \frac25\cdot\frac25, \frac12\cdot \frac14\cdot\frac14\}$, it follows that $x_i \in [1/5, 1/2]$ for every $i \in [3]$. Additionally, it follows from Proposition~\ref{pr:3} that $|B| \le \frac{99}{100}\, |M|$. Consequently, \begin{align}\label{equ:bad-vs-missing} |M| - |B| \ge \frac{|M|}{100}=\max\left\{\frac{|B|}{99},\,\frac{|M|}{100}\right\}. \end{align} It follows that \begin{align*} |\mathcal{G}| & = |V_1||V_2||V_3| - |M| + |B| + \sum_{i\in [3]}|\mathcal{G}[V_i]| \\ & \le |V_1||V_2||V_3| + \sum_{i\in [3]}|\mathcal{G}[V_i]| - \max\left\{\frac{|B|}{99},\,\frac{|M|}{100}\right\}. \end{align*} This completes the proof of Lemma~\ref{LEMMA:recursion-upper-bound}. \end{proof} Next, we prove Theorem~\ref{THM:turan-density-C5-}. \begin{proof}[Proof of Theorem~\ref{THM:turan-density-C5-}] Let $\alpha \coloneqq \pi(C_{5}^{3-})$. The $T_{\mathrm{rec}}$-construction from the Introduction shows that $\alpha\ge \frac14$. Fix an arbitrarily small $\xi > 0$ and then let $n$ be sufficiently large. Let $\mathcal{H}$ be an $n$-vertex $C_{5}^{3-}$-free $3$-graph with $\mathrm{ex}(n,C_{5}^{3-})$ edges, i.e., the maximum possible size. Since $\alpha \ge \frac{1}{4}$, we have $|\mathcal{H}| > \left(\frac{1}{4} - \frac{1}{10^7}\right)\binom{n}{3}$. By Lemma~\ref{LEMMA:recursion-upper-bound}, there exists a partition $V_1 \cup V_2 \cup V_3 = V(\mathcal{H})$ such that $\frac{n}{5} \le |V_i| \le \frac{n}{2}$ for every $i \in [3]$, and \begin{align}\label{equ:recursion-a} |\mathcal{H}| \le |V_1||V_2||V_3| + \sum_{i\in [3]}|\mathcal{H}[V_i]| + \xi n^3. \end{align} Let $x_i \coloneqq |V_i|/n$ for $i \in [3]$. We can choose $n$ sufficiently large in the beginning such that \begin{align}\label{equ:turan-number-concentrate} \left|\mathrm{ex}(N,C_{5}^{3-}) - \alpha \, \frac{N^3}{6} \right| \le \xi\,\frac{N^3}6, \quad\text{for every}~N \ge \frac{n}{5}. \end{align} Therefore, it follows from~\eqref{equ:recursion-a} that \begin{align*} (\alpha - \xi) \frac{n^3}{6} \le x_1x_2x_3 n^3 + \sum_{i\in [3]} (\alpha + \xi) \frac{(x_in)^3}{6} + \xi n^3. \end{align*} Combining this with Fact~\ref{FACT:ineqality}, we obtain \begin{align*} \alpha \le \frac{6x_1x_2x_3 + (7+x_1^3+x_2^3+x_3^3)\xi}{1-(x_1^3+x_2^3+x_3^3)} \le \frac{6x_1x_2x_3}{1-(x_1^3+x_2^3+x_3^3)} + 10 \xi \le \frac{1}{4}+ 10 \xi. \end{align*} Letting $\xi \to 0$, we obtain $\pi(C_{5}^{3-}) = \alpha \le \frac{1}{4}$. \end{proof} \section{Proof of Theorem~\ref{THM:C5Minus-stability}}\label{SEC:Proof-C5-stability} In this section, we prove Theorem~\ref{THM:C5Minus-stability}. We will begin by establishing the following weaker form of stability. \begin{lemma}\label{LEMMA:weak-stability} There exists a non-increasing function $N_{\ref{LEMMA:weak-stability}} \colon (0,1) \to \mathbb{N}$ such that the following holds for every $\xi \in (0, 10^{-8})$ and every $n \ge N_{\ref{LEMMA:weak-stability}}(\xi)$. Suppose that $\mathcal{H}$ is an $n$-vertex $C_{5}^{3-}$-free $3$-graph with at least $(1/24 - \xi) n^3$ edges. Then there exists a partition $V_1 \cup V_2 \cup V_3 = V(\mathcal{H})$ such that \begin{enumerate}[label=(\roman*)] \item\label{LEMMA:weak-stability-1} $\left||V_i| - n/3\right| \le 8 \xi^{1/2} n$ for each $i \in [3]$, \item\label{LEMMA:weak-stability-2} $\max\left\{|B|,~|M|\right\} \le 300 \xi n^3$, where $B=B_{\mathcal{H}}(V_1,V_2,V_3)$ and $M=M_{\mathcal{H}}(V_1,V_2,V_3)$ were defined in~\eqref{equ:def-bad-triple} and~\eqref{equ:def-missing-triple}, and \item\label{LEMMA:weak-stability-3} $|\mathcal{H}[V_i]| \ge \left(1/24 - 500\xi \right) |V_i|^3$ for each $i \in [3]$. \end{enumerate} \end{lemma} \begin{proof}[Proof of Lemma~\ref{LEMMA:weak-stability}] As in the proof of Lemma~\ref{LEMMA:recursion-upper-bound}, there is a non-increasing function $N':(0,1)\to \mathbb{N}$ such that~\eqref{equ:turan-number-concentrate} holds for every $\xi\in (0,1)$ and $n\ge N'(\xi)$. Let $$N_{\ref{LEMMA:weak-stability}}(\xi):=\max(N'(\xi), N_{\ref{LEMMA:recursion-upper-bound}}(\xi)),\quad\mbox{for $\xi\in (0,10^{-8})$}. $$ Take any $\xi\in (0,10^{-8})$ and $n\ge N_{\ref{LEMMA:weak-stability}}(\xi)$. Let $\C H$ be as in the lemma. Let $V(\mathcal{H}) = V_1 \cup V_2 \cup V_3$ be the partition returned by Lemma~\ref{LEMMA:recursion-upper-bound}. Let $x_i \coloneqq |V_i|/n$ for $i \in [3]$. It follows from Lemma~\ref{LEMMA:recursion-upper-bound} that $x_i \in [1/5, 1/2]$ for every $i \in [3]$ and \begin{align}\label{equ:LEMMA:recursion-upper-bound-1} |\mathcal{H}| & \le |V_1||V_2||V_3| + \sum_{i\in [3]}|\mathcal{H}[V_i]| - \max\left\{\frac{|B|}{99},~\frac{|M|}{100}\right\} + \xi n^3 \\ & \le x_1 x_2 x_3\, n^3 + \sum_{i\in [3]} \left(\frac{1}{4} + \xi \right) \frac{(x_in)^3}{6} + \xi n^3 \le \left(x_1 x_2 x_3 + \frac{x_1^3 + x_2^3 + x_3^3}{24}\right) n^3 + 2\xi n^3. \notag \end{align} If $\max_{i\in [3]}|x_i - 1/3| \ge 8 \xi^{1/2}$, then it follows from the inequality above and Fact~\ref{FACT:ineqality}~\ref{FACT:ineqality-3} that \begin{align*} |\mathcal{H}| \le \left(\frac{1}{24} -\frac{(8 \xi^{1/2})^2}{16}\right)n^3 + 2\xi n^3 < \left(\frac{1}{24} - \xi\right)n^3, \end{align*} contradicting the assumption that $|\mathcal{H}| \ge (1/24 - \xi)n^3$. This proves Lemma~\ref{LEMMA:weak-stability}~\ref{LEMMA:weak-stability-1}. Next, we prove Lemma~\ref{LEMMA:weak-stability}~\ref{LEMMA:weak-stability-2}. Suppose to the contrary that $|B| > 300 \xi n^{3}$ or $|M| > 300 \xi n^{3}$. Similarly to the proof above, if follows from~\eqref{equ:LEMMA:recursion-upper-bound-1} and Fact~\ref{FACT:ineqality}~\ref{FACT:ineqality-3} that \begin{align*} |\mathcal{H}| & \le x_1 x_2 x_3\, n^3 + \sum_{i\in [3]} \left(\frac{1}{4} + \xi \right) \frac{(x_in)^3}{6} - 3\xi n^3 + \xi n^3 \\ & \le \left(x_1 x_2 x_3 + \frac{x_1^3 + x_2^3 + x_3^3}{24}\right) n^3 + 3\cdot \xi\, \frac{n^3}{6} - 3\xi n^3 + \xi n^3 < \frac{n^3}{24} - \xi n^3, \end{align*} contradicting the assumption that $|\mathcal{H}| \ge (1/24 - \xi)n^3$. Next, we prove Lemma~\ref{LEMMA:weak-stability}~\ref{LEMMA:weak-stability-3}. Suppose to the contrary that $|\mathcal{H}[V_i]| \le (1/24 - 500\xi) |V_i|^3$ for some $i \in [3]$. By symmetry, we may assume that $i =1$. Then it follows from~\eqref{equ:LEMMA:recursion-upper-bound-1} and Fact~\ref{FACT:ineqality}~\ref{FACT:ineqality-3} that \begin{align*} |\mathcal{H}| & \le |V_1||V_2||V_3| + \sum_{i\in [3]}|\mathcal{H}[V_i]| +\xi n^3 \\ & \le |V_1||V_2||V_3| + \left(\frac{1}{24} - 500\xi \right) |V_1|^3 + \left(\frac{1}{24} +\xi \right) |V_2|^3 + \left(\frac{1}{24} + \xi \right) |V_3|^3 +\xi n^3\\ & = x_1 x_2 x_3\, n^3 + \frac{1}{24}\left(x_1^3 + x_2^3 + x_3^3\right)n^3 - 500\xi (x_1 n)^3 + \xi(x_2 n)^3 + \xi(x_3 n)^3 + \xi n^3\\ & \le \frac{n^3}{24} - 500\xi \left(\frac{n}{5}\right)^3 + \xi \left(\frac{n}{2}\right)^3 + \xi \left(\frac{n}{2}\right)^3 + \xi n^3 < \frac{n^3}{24} - \xi n^3, \end{align*} a contradiction. This completes the proof of Lemma~\ref{LEMMA:weak-stability}. \end{proof} Now we are ready to prove Theorem~\ref{THM:C5Minus-stability}. \begin{proof}[Proof of Theorem~\ref{THM:C5Minus-stability}] Fix $\varepsilon > 0$. We may assume that $\varepsilon$ is sufficiently small. Let $\delta \coloneqq \varepsilon^{11}/1200$. Let $n$ be sufficiently large; in particular, we can assume that $$\e n\ge \max\left\{N_{\ref{LEMMA:recursion-upper-bound}}(\delta),\,N_{\ref{LEMMA:weak-stability}}(\delta)\right\}, $$ where where $N_{\ref{LEMMA:recursion-upper-bound}}$ and $N_{\ref{LEMMA:weak-stability}}$ are the functions returned by Lemmas~\ref{LEMMA:recursion-upper-bound} and~\ref{LEMMA:weak-stability} respectively. Let us prove by induction on $m$ that every $m$-vertex $C_{5}^{3-}$-free $3$-graph $\C H$ with $m\le n$ and \begin{equation}\label{eq:MVertexH} |\mathcal{H}| \ge \left(\frac{1}{24} - \delta\left(\frac{n}{m}\right)^{10} \right)m^3 \end{equation} can be transformed into a $T_{\mathrm{rec}}$-subconstruction by removing at most $\frac{600 \delta}{\varepsilon^{10}} m^3 + \frac{\varepsilon n^2 m}{6}$ edges. The base case $m < \varepsilon n$ is trivially true since $\binom{m}{3} \le \frac{\varepsilon n^2 m}{6}$, so we may assume that $m \ge \varepsilon n$. Let $\mathcal{H}$ be an arbitrary $m$-vertex $C_{5}^{3-}$-free $3$-graph satisfying~\eqref{eq:MVertexH}. Let $\xi \coloneqq \delta (n/m)^{10}$, noting that \begin{align*} \xi \ge \delta = \frac{\varepsilon^{11}}{1200} \quad\text{and}\quad \xi \le \delta \left(\frac{n}{\varepsilon n}\right)^{10} \le \frac{\delta}{\varepsilon^{10}} = \frac{\varepsilon}{1200} \ll 1. \end{align*} Additionally, we have \begin{align*} m \ge \varepsilon n \gg \max\left\{N_{\ref{LEMMA:recursion-upper-bound}}(\delta),\,N_{\ref{LEMMA:weak-stability}}(\delta)\right\} \ge \max\left\{N_{\ref{LEMMA:recursion-upper-bound}}(\xi),\,N_{\ref{LEMMA:weak-stability}}(\xi)\right\}. \end{align*} Applying Lemma~\ref{LEMMA:weak-stability} to $\mathcal{H}$, we obtain a partition $V(\mathcal{H}) = V_1 \cup V_2 \cup V_3$ such that \begin{enumerate}[label=(\roman*)] \item\label{item:Vi-size} $m/5\le |V_i|\le m/2$ for each $i \in [3]$, \item\label{item:B-size} $|B| \le 300 \xi m^3 \le \frac{300\delta}{\varepsilon^{10}} m^3$, and \item\label{item:H-Vi-size} $|\mathcal{H}[V_i]| \ge (1/24 - 500\xi) |V_i|^3$ for each $i \in [3]$. \end{enumerate} For every $i \in [3]$, since $|V_i| \le m/2$, it follows from~\ref{item:H-Vi-size} that \begin{align*} |\mathcal{H}[V_i]| \ge \left(\frac{1}{24} - 500\xi \right) |V_i|^3 & = \left(\frac{1}{24} - 500\delta \left(\frac{n}{m}\right)^{10}\right) |V_i|^3 \\ & \ge \left(\frac{1}{24} - 500\delta \left(\frac{n}{2|V_i|}\right)^{10} \right) |V_i|^3 \ge \left(\frac{1}{24} - \delta \left(\frac{n}{|V_i|}\right)^{10}\right) |V_i|^3. \end{align*} It follows from the inductive hypothesis that $\mathcal{H}[V_i]$ is a $T_{\mathrm{rec}}$-subconstruction after removing at most $600\cdot \frac{\delta}{\varepsilon^{10}} |V_i|^3+ \frac{\varepsilon n^2 |V_i|}{6}$ edges. Therefore, $\mathcal{H}$ is a $T_{\mathrm{rec}}$-subconstruction after removing at most \begin{align*} |B| + \sum_{i\in [3]} \left(600 \cdot \frac{\delta}{\varepsilon^{10}} |V_i|^3+ \frac{\varepsilon n^2 |V_i|}{6}\right) & \le 300 \delta \left(\frac{n}{m}\right)^{10} m^3 + 3\cdot \frac{600\delta}{\varepsilon^{10}} \left(\frac{m}{2}\right)^3 + \frac{\varepsilon n^2 m}{6} \\ & \le 300 \delta \left(\frac{1}{\varepsilon}\right)^{10} m^3 + \frac{3}{8} \cdot \frac{600\delta}{\varepsilon^{10}}\, m^3 + \frac{\varepsilon n^2 m}{6} \\ & \le \frac{600 \delta}{\varepsilon^{10}}\, m^3 + \frac{\varepsilon n^2 m}{6} \end{align*} edges. This completes the proof for the inductive step. Taking $m = n$, we obtain that every $n$-vertex $C_{5}^{3-}$-free $3$-graph with at least $n^3/24 - \delta n^3$ edges is a $T_{\mathrm{rec}}$-subconstruction after removing at most \begin{align*} \frac{600 \delta}{\varepsilon^{10}} n^3 + \frac{\varepsilon n^3}{6} \le \frac{\varepsilon n^3}{2} + \frac{\varepsilon n^3}{6} < \varepsilon n^3 \end{align*} edges. This proves Theorem~\ref{THM:C5Minus-stability}. \end{proof} \section{Proof of Theorem~\ref{THM:exact-level-one}}\label{SEC:proof-C5-exact} In this section, we prove Theorem~\ref{THM:exact-level-one}. We will first establish an upper bound for the maximum degree of a nearly extremal $C_{5}^{3-}$-free $3$-graph and a lower bound for the minimum degree of an extremal $C_{5}^{3-}$-free $3$-graph. \begin{proposition}\label{PROP:C5minus-max-degree} The following statements hold. \begin{enumerate}[label=(\roman*)] \item\label{PROP:C5minus-max-degree-1} For every $\varepsilon > 0$ there exist $\delta>0$ and $n_0$ such that the following holds for all $n \ge n_0$. Suppose that $\mathcal{H}$ is a $C_{5}^{3-}$-free $3$-graph on $n$ vertices with at least $\left(\frac{1}{24}-\delta\right)n^3$ edges. Then $\Delta(\mathcal{H}) \le \left(\frac{1}{8} + \varepsilon^{1/5}\right) n^2$. \item\label{PROP:C5minus-max-degree-2} For every $\xi > 0$ there exists $n_1$ such that the following holds for every $n \ge n_1$. Suppose that $\mathcal{H}$ is a $C_{5}^{3-}$-free $3$-graph on $n$ vertices with exactly $\mathrm{ex}(n,C_{5}^{3-})$ edges. Then $\delta(\mathcal{H}) \ge \left(\frac{1}{8} - 24\xi^{1/5}\right) n^2$. \end{enumerate} \end{proposition} Before proving Proposition~\ref{PROP:C5minus-max-degree}, let us introduce some useful definitions related to recursive constructions, as drawn from~\cite{PI14,LP22}. Recall from the definition that a $3$-graph $\mathcal{H}$ is a $T_{\mathrm{rec}}$-subconstruction if and only if there exists a partition $V(\mathcal{H}) = V_1 \cup V_2 \cup V_3$ such that \begin{enumerate}[label=(\roman*)] \item\label{it:T-rec-prop-1} $V_i \neq \emptyset$ for each $i \in [3]$, \item\label{it:T-rec-prop-2} $\mathcal{H} \setminus \bigcup_{i\in [3]}\mathcal{H}[V_i] \subseteq \mathcal{K}[V_1, V_2, V_3]$, and \item\label{it:T-rec-prop-3} $\mathcal{H}[V_i]$ is a $T_{\mathrm{rec}}$-subconstruction for each $i \in [3]$. \end{enumerate} We refer to a partition that satisfies these three properties as a \textbf{$T_{\mathrm{rec}}$-partition} of $\mathcal{H}$. We fix a $T_{\mathrm{rec}}$-partition $V(\mathcal{H}) = V_1 \cup V_2 \cup V_3$ of $\mathcal{H}$ and call $V_1, V_2, V_3$ the \textbf{level-$1$} parts of $\mathcal{H}$. Similarly, for each $i \in [3]$, since the induced subgraph $\mathcal{H}[V_i]$ is again a $T_{\mathrm{rec}}$-subconstruction, there exists (and we fix one) a $T_{\mathrm{rec}}$-partition $V_i = V_{i, 1} \cup V_{i, 2} \cup V_{i, 3}$ for $\mathcal{H}[V_i]$. The parts $\left\{V_{i,j} \colon (i,j) \in [3]^2\right\}$ are called the \textbf{level-$2$} parts of $\mathcal{H}$. Inductively, for each level-$k$ part $V_{x_1, \ldots, x_{k}}$ of $\mathcal{H}$, fix the level-$1$ parts $V_{x_1, \ldots, x_{k},1}$, $V_{x_1, \ldots, x_{k},2}$, $V_{x_1, \ldots, x_{k},3}$ of the induced subgraph $\mathcal{H}[V_{x_1, \ldots, x_{k}}]$. Then the \textbf{level-$(k+1)$} parts of $\mathcal{H}$ are given by $\left\{V_{\mathbf{x},i} \colon (\mathbf{x},i) \in [3]^{k} \times [3]\right\}$. This process terminates when all parts has size at most~$2$. Also, the part $V_{\emptyset}$ corresponding to the empty sequence is defined to be the whole vertex set $V(\C H)$. \hide{ We will also need the following fact that guarantees a $T_{\mathrm{rec}}$-partition with approximately equal part sizes. Note that we cannot just take the level-1 partition of $\C H$ as it may have part sizes $n-o(n),o(n),o(n)$. \begin{fact}\label{FACT:T-rec-property} For every $\varepsilon > 0$ there exist $\delta > 0$ and $N_0$ such that the following holds for every $n \ge N_0$. Suppose that $\mathcal{H}$ is an $n$-vertex $T_{\mathrm{rec}}$-subconstruction with $|\mathcal{H}| \ge \left(\frac{1}{24} - \delta\right)n^3$. Then there exist a subgraph $\mathcal{G} \subseteq \mathcal{H}$ and a partition $V(\mathcal{H}) = V_1 \cup V_2 \cup V_3$ such that $V(\C G)=V(\C H)$ and \begin{enumerate}[label=(\roman*)] \item $|G| \ge |\mathcal{H}| - \varepsilon n^3$, \item $\left||V_i| - \frac{n}{3}\right| \le \varepsilon n$ for each $i \in [3]$, and \item $V(\mathcal{H}) = V_1 \cup V_2 \cup V_3$ is a $T_{\mathrm{rec}}$-partition of $\mathcal{G}$. \end{enumerate} \end{fact} \begin{proof}[Sketch of proof of Fact~\ref{FACT:T-rec-property}.] Given $\e>0$, choose small $\xi\gg \delta>0$. Let $Z:=\{v\in V(\C H)\colon d_{\C H}(v)\le (\frac14-\xi){n\choose 2}\}$ and let $\C H$ be the 3-graph on $V:=V(\C G)$ consisting of edges of $\C H$ that are disjoint from $Z$. This results in only a negligible loss in the number of edges (see e.g. the calculation in~\cite[Claim~30.1]{LP22}). We can apply~{\cite[Lemma~20]{LP22}} to $\C G[V\setminus Z]$; this general result guarantees, under high minimum degree, that the level-1 parts have ``correct'' sizes. Now we can distribute the vertices of $Z$ arbitrarily among the parts. We omit the details here.\end{proof} } \begin{proof}[Proof of Proposition~\ref{PROP:C5minus-max-degree}] Let us show the first part (about the maximum degree). Fix any $\varepsilon > 0$. We can assume that $\varepsilon$ is sufficiently small. Let $\ell$ be the integer such $2^{-\ell} \in (\varepsilon^{1/2}/2, \varepsilon^{1/2}]$. Next, let $\varepsilon_{\ell}\gg \delta_{\ell}\gg \cdots \gg \varepsilon_1 \gg \delta_1 \gg \delta$ be positive constants, each being sufficiently small depending on the previous constants and~$\varepsilon$. Let $n$ be sufficiently large and $\mathcal{H}$ be a $C_{5}^{3-}$-free $3$-graph on $n$ vertices with at least $(1/24 - \delta)n^3$ edges. In particular, by repeatedly applying Lemma~\ref{LEMMA:weak-stability}, we can ensure the existence of sets $V_{\mathbf{x}}$, indexed by sequences $\mathbf{x}$ over $[3]$ of length at most $\ell$, such that for every $i \in \{0,\dots, \ell-1\}$, every $(x_1, \ldots, x_i) \in [3]^{i}$, and every $j\in [3]$, the following statements hold: \begin{enumerate}[label=(\roman*)] \item\label{proof-level-one-assump-0} the sets $V_{x_1, \ldots, x_{i},1},V_{x_1, \ldots, x_{i},2}, V_{x_1, \ldots, x_{i},3}$ partition $V_{x_1, \ldots, x_{i}}$, \item\label{proof-level-one-assump-1} $\left||V_{x_1, \ldots, x_i,j}| - \frac{n}{3^{i+1}}\right| \le \varepsilon_{i+1} n$, \item\label{proof-level-one-assump-2} $\left||V_{x_1, \ldots, x_i, j}| - \frac{|V_{x_1, \ldots, x_i}|}{3}\right| \le \frac{\varepsilon_{i+1}}{\varepsilon^2} |V_{x_1, \ldots, x_i}| \le \varepsilon |V_{x_1, \ldots, x_i}|$, \item\label{proof-level-one-assump-3} $|\mathcal{H}[V_{x_1, \ldots, x_i}]| \ge \left(\frac{1}{24} - \varepsilon_{i+1}\right)|V_{x_1, \ldots, x_i}|^3$. \end{enumerate} Let $\mathcal{T}$ be the $T_{\mathrm{rec}}$-construction on $V$ of recursion depth\footnote{In other words, we first construct a $T_{\mathrm{rec}}$-construction and then remove all edges contained within level-$\ell$-parts.} $\ell$ such that, for each $i \in [\ell]$, the level-$i$ parts of $\mathcal{G}$ are exactly the sets in $\left\{V_{\mathbf{x}} \colon \mathbf{x} \in [3]^{i}\right\}$. Let $\mathcal{H}_1 \coloneqq \mathcal{H} \cap \mathcal{T}$, that is, $\C H_1$ is obtained form $\C H$ by removing all edges inside a level-$\ell$ part of $\C T$. Since $\varepsilon_{\ell}, \delta_{\ell}, \cdots, \varepsilon_1, \delta_1, \delta$ are sufficiently small, we have by~\ref{proof-level-one-assump-0}--\ref{proof-level-one-assump-3} that \begin{align}\label{equ:max-degree-H1} |\mathcal{H}_1| \ge |\mathcal{H}| - \frac{\varepsilon n^3}{2} \ge \left(\frac{1}{24}- \delta\right)n^3 - \frac{\varepsilon n^3}{2} \ge \left(\frac{1}{24}- \varepsilon\right)n^3. \end{align} Let $V \coloneqq V(\mathcal{H})$ and \begin{align*} Z \coloneqq \left\{v\in V \colon d_{\mathcal{H}_1}(v) \le \left(\frac{1}{8} - 2\varepsilon^{1/2}\right) n^2\right\}. \end{align*} \begin{claim}\label{CLAIM:Z-upper-bound} We have $|Z| \le \varepsilon^{1/2} n$. \end{claim} \begin{proof}[Proof of Claim~\ref{CLAIM:Z-upper-bound}] Suppose to the contrary that $|Z| > \varepsilon^{1/2} n$. Then take a subset $Z' \subseteq Z$ of size $\varepsilon^{1/2} n$. It follows from the definition of $Z$ that the induced subgraph $\mathcal{H}_1[V\setminus Z']$ satisfies \begin{align*} |\mathcal{H}_1[V\setminus Z']| & \ge |\mathcal{H}_1| - |Z'| \cdot \left(\frac{1}{8} - 2\varepsilon^{1/2}\right) n^2 \\ & \ge \left(\frac{1}{24}- \varepsilon\right)n^3 - \varepsilon^{1/2} n \cdot \left(\frac{1}{8} - 2\varepsilon^{1/2}\right) n^2 \\ & = \left(\frac{1}{24}- \frac{\varepsilon^{1/2}}{8} +\varepsilon\right)n^3 > \left(\frac{1}{24}- \frac{\varepsilon^{1/2}}{8} +\frac{\varepsilon}{8} - \frac{\varepsilon^{3/2}}{24}\right)n^3 = \frac{(n - \varepsilon^{1/2} n)^3}{24}, \end{align*} which contradicts the fact that every $m$-vertex $T_{\mathrm{rec}}$-subconstruction has at most $m^3/24$ edges. \end{proof} It follows from Claim~\ref{CLAIM:Z-upper-bound} that the induced subgraph $\mathcal{H}_2 \coloneqq \mathcal{H}_1[V\setminus Z]$ satisfies \begin{align}\label{equ:min-deg-G1} \delta(\mathcal{H}_2) \ge \left(\frac{1}{8} - 2\varepsilon^{1/2}\right) n^2 - |Z| \cdot n \ge \left(\frac{1}{8} - 3\varepsilon^{1/2}\right) n^2. \end{align} Fix an arbitrary vertex $v \in V \setminus Z$. We prove by a backward induction on $i \in [0, \ell]$ that \begin{align}\label{equ:max-deg-inductive} \left|L_{\mathcal{H}}(v) \cap \binom{V_{x_1, \ldots, x_{i}}}{2}\right| \le \frac{|V_{x_1, \ldots, x_{i}}|^2}{8} + 3^{\ell-i} \varepsilon n^2, \quad\text{for every } (x_1, \ldots, x_{i}) \in [3]^{i}. \end{align} The base case $i = \ell$ is trivially true since, for every $(x_1, \ldots, x_{\ell}) \in [3]^{\ell}$, we have \begin{align*} \binom{|V_{x_1, \ldots, x_{\ell}}|}{2} \le \binom{\left(\frac{1}{2}\right)^{\ell} n}{2} \le \binom{\varepsilon^{1/2} n}{2} \le \varepsilon n^2 = 3^{\ell-\ell} \varepsilon n^2. \end{align*} So we may focus on the inductive step. Fix $i \in [0,\ell-1]$. Take an arbitrary $(x_1, \ldots, x_{i}) \in [3]^{i}$. Let $U_j \coloneqq V_{x_1, \ldots, x_{i},j}$ for $j \in [3]$ and let $U \coloneqq V_{x_1, \ldots, x_{i}} = U_1 \cup U_2 \cup U_3$. By the inductive hypothesis, we have \begin{align}\label{equ:max-deg-Uj} \left|L_{\mathcal{H}}(v) \cap \binom{U_j}{2}\right| \le \frac{|U_j|^2}{8} + 3^{\ell-i-1} \varepsilon n^2, \quad\text{for every } j \in [3]. \end{align} Suppose to the contrary that \begin{align}\label{equ:max-deg-U} \left|L_{\mathcal{H}}(v) \cap \binom{U}{2}\right| > \frac{|U|^2}{8} + 3^{\ell-i} \varepsilon n^2. \end{align} Then, combining this with~\eqref{equ:max-deg-Uj}, we obtain \begin{align}\label{equ:level-one-Lv-U1U2U3} \left|L_{\mathcal{H}}(v) \cap \mathcal{K}^2[U_1, U_2, U_3]\right| & = \left|L_{\mathcal{H}}(v) \cap \binom{U}{2}\right| - \sum_{i\in [3]} \left|L_{\mathcal{H}}(v) \cap \binom{U_j}{2}\right| \notag \\ & \ge \frac{|U|^2}{8} +3^{\ell-i} \varepsilon n^2 - \sum_{i\in [3]}\left(\frac{|U_j|^2}{8} + 3^{\ell-i-1} \varepsilon n^2\right) \notag \\ & = \frac{|U|^2}{8} - \sum_{i\in [3]}\frac{|U_j|^2}{8} \notag \\ & \ge \frac{|U|^2}{8} - 3 \cdot \frac{1}{8}\left(\frac{|U|}{3} + \varepsilon |U|\right)^2 > \frac{|U|^2}{16}. \end{align} By symmetry, we can assume that \begin{align}\label{equ:level-one-Lv-local-max} \left|L_{\mathcal{H}}(v) \cap \mathcal{K}[U_2, U_3]\right| \ge \max\left\{\,|L_{\mathcal{H}}(v) \cap \mathcal{K}[U_1, U_2]|,|L_{\mathcal{H}}(v) \cap \mathcal{K}[U_1, U_3]|\,\right\}. \end{align} Let \begin{align*} B_{v} \coloneqq L_{\mathcal{H}}(v) \cap \left( \binom{U_2}{2} \cup \binom{U_3}{2} \cup \mathcal{K}[U_1, U_2\cup U_3]\right) \quad\text{and}\quad M_{v} \coloneqq \mathcal{K}[U_2, U_3] \setminus L_{\mathcal{H}}(v). \end{align*} Thus, these are the sets of \textbf{bad} and \textbf{missing} pairs in the link graph of $v$ when we add $v$ to $U_1$. Due to~\ref{proof-level-one-assump-2}, \ref{proof-level-one-assump-3}, \eqref{equ:level-one-Lv-U1U2U3}, \eqref{equ:level-one-Lv-local-max}, and the assumption that $\varepsilon_i \ll \varepsilon$, Proposition~\ref{pr:5} can be applied to $\C G:=\mathcal{H}[U \cup \{v\}]$, and hence, we obtain \begin{align*} \left|L_{\C G}(v) \setminus \binom{U_1}{2}\right| & = |U_2||U_3| - |M_v| + |B_v| \\ & \le |U_2||U_3| -\frac{|M_v|}{10} + \varepsilon n^2 \le \left(\frac{1}{3} + \varepsilon\right)^2 |U|^2 + \varepsilon n^2 \le \frac{|U|^2}{9} + 2\varepsilon n^2. \end{align*} Combining this with~\eqref{equ:max-deg-Uj}, we obtain \begin{align*} |L_{\C G}(v)| & = \left|L_{\C G}(v) \cap \binom{U_1}{2}\right| + \left|L_{\C G}(v) \setminus \binom{U_1}{2}\right| \\ & \le \frac{|U_1|^2}{8} + 3^{\ell-i-1} \varepsilon n^2 + \frac{|U|^2}{9} + 2\varepsilon n^2 \\ & \le \frac{1}{8}\left(\frac{1}{3} + \varepsilon \right)^2 |U|^2 + 3^{\ell-i-1} \varepsilon n^2 + \frac{|U|^2}{9} + 2\varepsilon n^2 \\ & < \frac{|U|^2}{72} + \varepsilon |U|^2 + 3^{\ell-i-1} \varepsilon n^2 + \frac{|U|^2}{9} + 2\varepsilon n^2 \le \frac{|U|^2}{8} + 3^{\ell-i} \varepsilon n^2, \end{align*} contradicting~\eqref{equ:max-deg-U}. This completes the proof for the inductive step; hence~\eqref{equ:max-deg-inductive} holds. Taking $i = 0$ in~\eqref{equ:max-deg-inductive}, we obtain \begin{align*} d_{\mathcal{H}}(v) = \left|L_{\mathcal{H}}(v) \cap \binom{V(\C H)}{2}\right| & \le \frac{|V(\C H)|^2}{8} + 3^{\ell} \varepsilon n^2 \\ & \le \frac{n^2}{8} + \left(2^{\ell}\right)^{\log_{2}3} \varepsilon n^2 \\ & \le \frac{n^2}{8} + 3\,\varepsilon^{-\frac{1}{2}\log_{2}3} \varepsilon n^2 < \frac{n^2}{8} + \varepsilon^{0.2} n^2. \end{align*} So it follows from the definition of $Z$ that \begin{align*} \Delta(\mathcal{H}) \le \max\left\{d_{\mathcal{H}}(u) \colon u\in V\setminus Z \right\} < \frac{n^2}{8} + \varepsilon^{0.2} n^2, \end{align*} proving Proposition~\ref{PROP:C5minus-max-degree}~\ref{PROP:C5minus-max-degree-1}. Next, we prove Proposition~\ref{PROP:C5minus-max-degree}~\ref{PROP:C5minus-max-degree-2}. In fact, it follows from a more general, unpublished result by Mubayi, Reiher, and the third author~\cite{LMR21mindeg}, which implies that if a finite family $\mathcal{F}$ of $r$-graphs satisfies $\pi(\mathcal{F}) > 0$ and, for large $n$, every extremal construction on $n$ vertices is structurally close to the blowup, recursive blowup, or mixed recursive blowup of some minimal patterns (see~\cite{PI14,LP22} for definitions), then $\delta(\mathcal{H}) = \left(\pi(\mathcal{F}) - o(1)\right)\binom{n-1}{r-1}$ for every extremal $\mathcal{F}$-free $n$-vertex $r$-graph $\mathcal{H}$. The proof relies on a straightforward deleting-duplicating argument, which we present below for the case $\mathcal{F} = \{C_{5}^{3-}\}$. Take any $\xi>0$ and then fix sufficiently small $\e>0$. Given $\e$, let all the notation and conventions from the argument for the first part apply, except $\C H$ is now a maximum $C_{5}^{3-}$-free $3$-graph with $n$ vertices. Since $n\gg 1/\delta$, $\C H$ has at least $(\frac1{24}-\delta)n^3$ edges and all the above results hold. \hide{ We may choose $\varepsilon>0$ in the proof above to be small enough that $\varepsilon \ll \xi$. Now, let $\mathcal{H}$ be an $n$-vertex $C_{5}^{3-}$-free $3$-graph with exactly $\mathrm{ex}(n, C_{5}^{3-})$ edges. Note that we can choose $n$ sufficiently large so that $|\mathcal{H}| = \mathrm{ex}(n, C_{5}^{3-}) \ge (1/24 - \delta)n^3$. Therefore, all the conclusions above also hold for the new $3$-graph $\mathcal{H}$. } Recall that $\mathcal{T}$ is the $T_{\mathrm{rec}}$-construction on $V$ of depth $\ell$ such that, for each $i \in [\ell]$, the level-$i$ parts of $\mathcal{G}$ are exactly the sets in $\left\{V_{\mathbf{x}} \colon \mathbf{x} \in [3]^{i}\right\}$ and $\mathcal{H}_1 = \mathcal{H} \cap \mathcal{T}$. In particular, it follows from~\eqref{equ:max-degree-H1} that $|\mathcal{T}| \ge |\mathcal{H}_1| \ge \left(1/24 - \varepsilon\right)n^3$. It is clear that $\mathcal{T}$ is $C_{5}^{3-}$-free. So, applying Proposition~\ref{PROP:C5minus-max-degree}~\ref{PROP:C5minus-max-degree-1} to $\mathcal{T}$ with $\varepsilon$ and $\delta$ there corresponding to $\xi$ and $\varepsilon$ here, we obtain \begin{align}\label{equ:max-deg-T-ell-max} \Delta(\mathcal{T}) \le \left(\frac{1}{8} + \xi^{1/5}\right)n^3. \end{align} Additionally, for every $v \in V\setminus Z$, it follows from~\eqref{equ:min-deg-G1} that \begin{align}\label{equ:max-deg-H1-ell-min} d_{\mathcal{H}_1}(v) \ge d_{\mathcal{H}_2}(v) \ge \left(\frac{1}{8} - 3\varepsilon^{1/2}\right) n^2. \end{align} Since $|Z| \le \varepsilon^{1/2} n$, there exists $\mathbf{x} = (x_1, \ldots, x_{\ell}) \in [3]^{\ell}$ such that $|V_{\mathbf{x}} \setminus Z| \ge 5$. Fix five distinct vertices $v_1, \ldots, v_{5} \in V_{\mathbf{x}} \setminus Z$, and let \begin{align*} L \coloneqq \bigcap_{i\in [5]}L_{\mathcal{H}_{1}}(v_i) \end{align*} Note that $L_{\mathcal{T}}(v_1) = \cdots = L_{\mathcal{T}}(v_5)$ and $L_{\mathcal{H}_{1}}(v_i) \subseteq L_{\mathcal{T}}(v_i)$ for every $i \in [5]$. So it follows from~\eqref{equ:max-deg-T-ell-max},~\eqref{equ:max-deg-H1-ell-min}, and the Inclusion-Exclusion Principle that \begin{align}\label{equ:max-deg-L-lower-bound} |L| \ge \left(\frac{1}{8} - 3\varepsilon^{1/2}\right) n^2 - 5 \left(\left(\frac{1}{8} + \xi^{1/5}\right)n^2 - \left(\frac{1}{8} - 3\varepsilon^{1/2}\right) n^2\right) \ge \left(\frac{1}{8} - 23 \xi^{1/5}\right)n^2. \end{align} Assume, for the sake of contradiction, that there exists a vertex $u \in V$ such that $d_{\mathcal{H}}(u) < \left(\frac{1}{8} - 24 \xi^{1/5}\right)n^2$. Then define the new $3$-graph $\hat{\mathcal{H}}$ as \begin{align*} \hat{\mathcal{H}} \coloneq \left(\mathcal{H} \setminus \left\{uvw \colon vw \in L_{\mathcal{H}}(u)\right\}\right) \cup \left\{uvw \in \binom{V}{3}\colon vw \in L\right\}. \end{align*} In other words, we change the link of $u$ from $L_{\mathcal{H}}(u)$ to $L$. We claim that $\hat{\mathcal{H}}$ is still $C_{5}^{3-}$-free. Indeed, suppose to the contrary that there exists a copy of $C_{5}^{3-}$, say on the set $S$ of $5$ vertices, in $\hat{\mathcal{H}}$. Then $S$ must contain $u$, meaning that $|S\setminus \{u\}| = 4$. So there exists a vertex in $\{v_1, \ldots, v_5\}$, say $v_1$, that is not contained in $S$. Let $\hat{S} \coloneqq (S\setminus \{u\}) \cup \{v_1\}$, noting that $\hat{\mathcal{H}}[\hat{S}] = \mathcal{H}[\hat{S}]$. Since $L_{\hat{\mathcal{H}}}(u) \subseteq L_{\hat{\mathcal{H}}}(v_1)$, the induced subgraph of $\hat{\mathcal{H}}$ on $\hat{S}$ must also contain a copy of $C_{5}^{3-}$. This means that $\mathcal{H}[\hat{S}]$ contains a copy of $C_{5}^{3-}$, a contradiction. Therefore, $\hat{\mathcal{H}}$ is $C_{5}^{3-}$-free. However, it follows from the defintion of $\hat{\mathcal{H}}$ and~\eqref{equ:max-deg-L-lower-bound} that \begin{align*} |\hat{\mathcal{H}}| \ge |\mathcal{H}| - d_{\mathcal{H}}(u) + |L| - n \ge |\mathcal{H}| - \left(\frac{1}{8} - 24 \xi^{1/5}\right)n^2 + \left(\frac{1}{8} - 23 \xi^{1/5}\right)n^2 - n > |\mathcal{H}|, \end{align*} contradicting the maximality of $\mathcal{H}$. This completes the proof of Proposition~\ref{PROP:C5minus-max-degree}~\ref{PROP:C5minus-max-degree-2}. \end{proof} We are now ready to prove Theorem~\ref{THM:exact-level-one}. \begin{proof}[Proof of Theorem~\ref{THM:exact-level-one}] Fix $\varepsilon >0$. We may assume that $\varepsilon$ is small. Choose $\delta = \delta(\varepsilon) \in (0, 10^{-8})$ to be sufficiently small, and let $n > 1/\delta$ be sufficiently large. Let $\mathcal{H}$ be an $n$-vertex $C_{5}^{3-}$-free $3$-graph with $\mathrm{ex}(n,C_{5}^{3-})$ edges, i.e., the maximum possible size. Since $n$ is sufficiently large and $\pi(C_{5}^{3-}) = \frac{1}{4}$, we have $|\mathcal{H}| \ge \left(\frac{1}{4} - \delta\right)\binom{n}{3}$. Let $V \coloneqq V(\mathcal{H})$. Let $U_1 \cup U_2 \cup U_3 = V$ be the partition returned by Lemma~\ref{LEMMA:weak-stability}. Let \begin{align*} \mathcal{G} \coloneqq \mathcal{H} \cap \mathcal{K}[U_1, U_2, U_3] \quad\text{and}\quad Z \coloneqq \left\{v\in V \colon d_{\mathcal{G}}(v) \le \left(1/9 - 2\sqrt{3}\delta^{1/4}\right)n^2\right\}. \end{align*} It follows from Lemma~\ref{LEMMA:weak-stability}~\ref{LEMMA:weak-stability-1} and~\ref{LEMMA:weak-stability-2} that \begin{align*} |\mathcal{G}| = |\mathcal{H} \cap \mathcal{K}[U_1, U_2, U_3]| & = |U_1||U_2||U_3| - |M_{\mathcal{H}}(U_1, U_2, U_3)| \\ & \ge \left(\frac{1}{3} - 8\delta^{1/2}\right)^3 n^3 - 300\delta n^{3} \ge \left(\frac{1}{27} - 3\delta^{1/2}\right) n^3, \end{align*} where the last inequality follows from the assumption that $\delta$ is sufficiently small. Therefore, a similar argument to that in the proof of Claim~\ref{CLAIM:Z-upper-bound} yields \begin{align}\label{equ:Z-upper-bound-2} |Z| \le \left(3\delta^{1/2}\right)^{1/2}n \le 3 \delta^{1/4} n. \end{align} Note that the partition $U_1 \cup U_2 \cup U_3 = V$ is not necessarily locally maximal. So, let us keep moving vertices in $Z$ one by one between parts, as long as each move strictly increase the number of transversal edges. Let $V_1 \cup V_2 \cup V_3 = V$ denote the finial partition. Note from the definition that $U_i \setminus Z \subseteq V_i$ for every $i \in [3]$. \begin{claim}\label{CLAIM:V1V2V3-locally-max} The partition $V_1 \cup V_2 \cup V_3 = V$ is locally maximal. \end{claim} \begin{proof}[Proof of Claim~\ref{CLAIM:V1V2V3-locally-max}] It suffices to show that for every $i \in [3]$ and for every vertex $v \in V_i$, \begin{align}\label{equ:locally-max-def} |L_{\mathcal{H}}(v) \cap \mathcal{K}[V_j, V_k]| \ge \max\left\{|L_{\mathcal{H}}(v) \cap \mathcal{K}[V_i, V_j]|,~|L_{\mathcal{H}}(v) \cap \mathcal{K}[V_i, V_k]|\right\}, \end{align} where $\{j,k\} = [3]\setminus \{i\}$. It is clear that~\eqref{equ:locally-max-def} holds for every $v\in Z$ due to the vertex-moving operations defined above. So it suffices to prove~\eqref{equ:locally-max-def} for $v \in V\setminus Z$. Suppose to the contrary that this is not true. Fix a vertex $v\in V\setminus Z$ for which~\eqref{equ:locally-max-def} fails. By symmetry, we may assume that $v\in V_1$, and \begin{align}\label{equ:v-L12-L23} |L_{\mathcal{H}}(v) \cap \mathcal{K}[V_1, V_2]| > |L_{\mathcal{H}}(v) \cap \mathcal{K}[V_2, V_3]|. \end{align} It follows from~\eqref{equ:Z-upper-bound-2} that \begin{align*} |L_{\mathcal{H}}(v) \cap \mathcal{K}[V_2, V_3]| \ge |L_{\mathcal{H}}(v) \cap \mathcal{K}[U_2\setminus Z, U_3\setminus Z]| \ge d_{\mathcal{G}}(v) - |Z| \cdot n \ge \left(\frac{1}{9} - 4\delta^{1/4}\right)n^2. \end{align*} Combining this with~\eqref{equ:v-L12-L23}, we obtain \begin{align*} d_{\mathcal{H}}(v) \ge |L_{\mathcal{H}}(v) \cap \mathcal{K}[V_2, V_3]| + |L_{\mathcal{H}}(v) \cap \mathcal{K}[V_1, V_2]| \ge 2 \left(\frac{1}{9} - 4\delta^{1/4}\right)n^2 > \left(\frac{1}{8} + \frac{1}{100}\right)n^2, \end{align*} which contradicts Proposition~\ref{PROP:C5minus-max-degree}~\ref{PROP:C5minus-max-degree-1}. \end{proof} Let $B:=B_{\C H}(V_1,V_2,V_3)$ and $M:=M_{\C H}(V_1,V_2,V_3)$ be respectively the sets of bad edges and missing edges, as defined in~\eqref{equ:def-bad-triple} and~\eqref{equ:def-missing-triple}. We can choose $\delta$ to be sufficiently small such that, by Lemma~\ref{LEMMA:weak-stability}~\ref{LEMMA:weak-stability-1},~\ref{LEMMA:weak-stability-2}, and~\eqref{equ:Z-upper-bound-2}, the following inequalities hold: \begin{align}\label{equ:vtx-stab-a} \min_{i\in [3]}|V_i| \ge \min_{i\in [3]}|U_i| - |Z| \ge \frac{n}{3} - \varepsilon n, \end{align} \begin{align}\label{equ:vtx-stab-b} \max\left\{|B|,\,|M|\right\} \le \max\left\{|B_{\mathcal{H}}(U_1, U_2, U_3)|,\,|M_{\mathcal{H}}(U_1, U_2, U_3)|\right\} + |Z| \cdot n^2 \le \varepsilon n^3, \end{align} \begin{align}\label{equ:vtx-stab-c} |\mathcal{H} \cap \mathcal{K}[V_1, V_2, V_3]| & = |V_1||V_2||V_3| - |M| \ge |V_1||V_2||V_3| - \varepsilon n^3. \end{align} Fix an arbitrary vertex $v$ in $V$. By symmetry, we may assume that $v \in V_1$. Let \begin{align*} B_{v} \coloneqq L_{\mathcal{H}}(v) \cap \left( \binom{V_2}{2} \cup \binom{V_3}{2} \cup \mathcal{K}[V_1, V_2\cup V_3]\right) \quad\text{and}\quad M_{v} \coloneqq \mathcal{K}[V_2, V_3] \setminus L_{\mathcal{H}}(v). \end{align*} It follows from Claim~\ref{CLAIM:V1V2V3-locally-max} that \begin{align}\label{equ:vtx-stab-d} |L_{\mathcal{H}}(v) \cap \mathcal{K}[V_2, V_3]| \ge \max\left\{|L_{\mathcal{H}}(v) \cap \mathcal{K}[V_1, V_2]|,~|L_{\mathcal{H}}(v) \cap \mathcal{K}[V_1, V_3]|\right\}. \end{align} For $i \in [3]$, let $\mathcal{H}_i \coloneqq \mathcal{H}[V_i \cup \{v\}]$, noting from Lemma~\ref{LEMMA:weak-stability}~\ref{LEMMA:weak-stability-3} that \begin{align*} |\mathcal{H}_i| \ge |\mathcal{H}[V_i]| \ge |\mathcal{H}[U_i \setminus Z]| & \ge |\mathcal{H}[U_i]| - |Z| \cdot n^2 \\ & \ge \left(\frac{1}{24} - 500 \delta\right)|V_i|^3 - 3\delta^{1/4}n^3 \ge \left(\frac{1}{24} - 600 \delta^{1/4}\right)|V_i|^3. \end{align*} We can choose $\delta$ to be sufficiently small such that for each $i \in [3]$, by Proposition~\ref{PROP:C5minus-max-degree}~\ref{PROP:C5minus-max-degree-1}, \begin{align*} \left|L_{\mathcal{H}}(v) \cap \binom{V_i}{2}\right| = d_{\mathcal{H}_i}(v) \le \frac{|V_i|^2}{8} + \varepsilon n^2 \le \frac{1}{8}\left(\frac{1}{3}+ \varepsilon\right)^2 n^2 + \varepsilon n^2 \le \frac{n^2}{72} + 2\varepsilon n^2. \end{align*} Hence, \begin{align}\label{equ:vtx-stab-e} |L_{\mathcal{H}}(v) \cap \mathcal{K}^2[V_1, V_2, V_3]| & = d_{\mathcal{H}}(v) - \sum_{i\in [3]}d_{\mathcal{H}_i}(v) \ge d_{\mathcal{H}}(v) - 3\left(\frac{n^2}{72} + 2\varepsilon n^2\right) \notag \\ & \ge \left(\frac{1}{8} - \varepsilon\right)n^2 - 3\left(\frac{n^2}{72} + 2\varepsilon n^2\right) > \frac{n^2}{16}. \end{align} From~\eqref{equ:vtx-stab-a},~\eqref{equ:vtx-stab-c},~\eqref{equ:vtx-stab-d}, and~\eqref{equ:vtx-stab-e}, we can apply Proposition~\ref{pr:5}. As a result, \begin{align*} \left|L_{\mathcal{H}}(v) \setminus \binom{V_1}{2}\right| = |V_2||V_3| - |M_v| \le |V_2||V_3| - \max\left\{\frac{|B_{v}|}{9},\,\frac{|M_{v}|}{10}\right\} + \varepsilon n^2. \end{align*} Combining this with Proposition~\ref{PROP:C5minus-max-degree}~\ref{PROP:C5minus-max-degree-1} (which is applied to $\mathcal{H}_1$), we obtain \begin{align*} |L_{\mathcal{H}}(v)| & = \left|L_{\mathcal{H}}(v) \cap \binom{V_1}{2}\right| + \left|L_{\mathcal{H}}(v) \setminus \binom{V_1}{2}\right| \\ & \le \left(\frac{1}{8}+\varepsilon\right)|V_1|^2 + |V_2||V_3| + \varepsilon n^2 - \max\left\{\frac{|B_{v}|}{9},\,\frac{|M_{v}|}{10}\right\} \\ & \le \left(\frac{1}{8}+\varepsilon\right) \left(\frac{n}{3}+\varepsilon n\right)^2 + \left(\frac{n}{3}+\varepsilon n\right)^2 + \varepsilon n^2 - \max\left\{\frac{|B_{v}|}{9},\,\frac{|M_{v}|}{10}\right\} \\ & \le \frac{n^2}{8} + 3\varepsilon n^2 - \max\left\{\frac{|B_{v}|}{9},\,\frac{|M_{v}|}{10}\right\}. \end{align*} Since, by Proposition~\ref{PROP:C5minus-max-degree}~\ref{PROP:C5minus-max-degree-2}, $|L_{\mathcal{H}}(v)| \ge \left(\frac{1}{8}-\varepsilon\right) n^2$, the inequality above implies that \begin{align}\label{equ:Bv-upper-bound} |B_v| \le 36\varepsilon n^2 \quad\text{and}\quad |M_{v}| \le 40 \varepsilon n^2. \end{align} Next, we return to the analysis of the number of bad edges and missing edges in $\mathcal{H}$. Our goal is to show that $|B| \le |M|$, with equality holding only if $M = B = \emptyset$. Suppose that $B\neq \emptyset$. We call a pair of vertices \textbf{crossing} if its two vertices belong to two different parts~$V_i$. \begin{claim}\label{CLAIM:M1e-M2e} For every $e = \{v_1, v_2, v_3\} \in B$ with $v_2v_3$ crossing, we have \begin{align*} d_{M}(v_2v_3) \ge \left(\frac{1}{3} - 300\varepsilon\right)n. \end{align*} \end{claim} \begin{proof}[Proof of Claim~\ref{CLAIM:M1e-M2e}] By the symmetry between parts, assume that $v_1,v_3\in V_1$ and $v_2\in V_2$. First, observe that for every pair $(v_4, v_5) \in V_3 \times (V_2 \setminus \{v_2\})$, \begin{align}\label{equ:missing-triple} \text{at least one triple in $\{v_2v_3v_4, v_3v_4v_5, v_4v_5v_1\}$ belongs to $M$}, \end{align} since otherwise, $v_1v_2v_3v_4v_5$ would form a copy of $C_{5}^{3-}$ in $\mathcal{H}$. Suppose to the contrary that $d_{M}(v_2v_3) < \left(\frac{1}{3} - 300\varepsilon\right)n$. Define \begin{align*} U \coloneqq \left\{u \in V_3 \colon v_2v_3u \in M\right\}. \end{align*} Since $U\subseteq N_{M}(v_2v_3)$, we have $|U| < \left(\frac{1}{3} - 300\varepsilon\right)n$. Consequently, \begin{align*} |V_3\setminus U| \ge \left(\frac{1}{3} - \varepsilon\right)n - \left(\frac{1}{3} - 300\varepsilon\right)n = 299 \varepsilon n. \end{align*} By~\eqref{equ:missing-triple}, for every pair $(v_4, v_5) \in (V_3\setminus U) \times (V_2 \setminus \{v_2\})$, either $\{v_3v_4v_5\} \in M$ or $\{v_1v_4v_5\} \in M$. Therefore, \begin{align*} d_{M}(v_1) + d_{M}(v_3) \ge |V_3\setminus U| \cdot |V_2 \setminus \{v_2\}| \ge 299 \varepsilon n \cdot \left(\frac{1}{3} - 2\varepsilon\right)n \ge 90 \varepsilon n^2. \end{align*} Thus, we have that \begin{align*} \max\left\{d_{M}(v_1),\,d_{M}(v_3)\right\} \ge \frac{d_{M}(v_1) + d_{M}(v_3)}{2} \ge 45\varepsilon n^2, \end{align*} contradicting~\eqref{equ:Bv-upper-bound}. \end{proof} \begin{claim}\label{CLAIM:max-codeg-bad} For every crossing pair $v_1v_2$, we have $d_B(v_1v_2) \le 300 \varepsilon n$. \end{claim} \begin{proof}[Proof of Claim~\ref{CLAIM:max-codeg-bad}] By symmetry, assume that $v_1\in V_1$ and $v_2\in V_2$. Suppose to the contrary that $d_{B}(v_1v_2) > 300 \varepsilon n$. By symmetry, we may assume that $N \coloneqq N_{B}(v_1v_2) \cap V_1$ has size at least $\frac{1}{2} d_{B}(v_1v_2) \ge 150 \varepsilon n$. It follows from Claim~\ref{CLAIM:M1e-M2e} that \begin{align*} d_{M}(v_2) \ge \sum_{v_3 \in N} d_{M}(v_2v_3) \ge |N| \cdot \left(\frac{1}{3} - 300\varepsilon\right)n \ge 150 \varepsilon n \cdot \left(\frac{1}{3} - 300\varepsilon\right)n > 40\varepsilon n^2, \end{align*} contradicting~\eqref{equ:Bv-upper-bound}. \end{proof} Let $\mathcal{S}$ consists of all crossing pairs that lie inside at least one bad edge. By Claim~\ref{CLAIM:M1e-M2e}, we have $d_{M}(uv) \ge \left(\frac{1}{3} - 100\varepsilon\right)n$ for every $uv\in \mathcal{S}$. Since every bad edge has two crossing pairs, it follows from Claim~\ref{CLAIM:max-codeg-bad} that \begin{align*} |B| \le \frac12 \sum_{uv \in \mathcal{S}} d_{B}(uv) \le \frac12\cdot |\mathcal{S}| \cdot 300 \varepsilon n = 150 \varepsilon n\,|\mathcal{S}|. \end{align*} On the other hand, it follows from the definition that \begin{align*} |M| \ge \frac{1}{3} \sum_{uv \in \mathcal{S}} d_{M}(uv) \ge \frac{1}{3} \cdot |\mathcal{S}| \cdot\left(\frac{1}{3} - 100\varepsilon\right)n \ge \frac{n}{10}\, |\mathcal{S}|, \end{align*} which is strictly greater than $|B|$. Let the $3$-graph $\mathcal{G}$ be obtained from $\mathcal{H}$ by removing all triples from $B$ and adding all triples in $M$. It is easy to see that $\mathcal{G}$ is $C_{5}^{3-}$-free, while $|\mathcal{G}| = |\mathcal{H}| + |M| - |B| > |\mathcal{H}|$, contradicting the maximality of $\mathcal{H}$. Therefore, we have that $B = \emptyset$. Also, $M=\emptyset$, again by the maximality of $\mathcal{H}$. Thus, $\mathcal{H} \setminus \bigcup_{i\in [3]}\mathcal{H}[V_i]$ is exactly the complete 3-partite 3-graph $\mathcal{K}[V_1,V_2,V_3]$. This completes the proof of Theorem~\ref{THM:exact-level-one}. \end{proof} \section{Concluding remarks}\label{SEC:remark} Proposition~\ref{PROP:C5minus-max-degree} implies that $C_{5}^{3-}$ is \textbf{bounded}, and~\eqref{equ:smoothness-C5-} implies that $C_{5}^{3-}$ is \textbf{smooth}, where these two properties were introduced in~\cite{HLLYZ23}. These two properties lead to applications in certain tilting-type extremal problems related to $C_{5}^{3-}$, and we refer the reader to~\cite{HLLYZ23,DHLY24} for further details. It would be very interesting to see which other problems become tractable with the \textbf{local refinement} method, which can generally be described as follows: \begin{description} \item[Step 1:] \label{it:LR1} prove that any extremal configuration $G$ of large order $n$ is not too far from a conjectured construction $C$ in some fixed measure of similarity, \item[Step 2:]\label{it:LR2} choose an instance of $C$ that is best fit to $G$ and add manually the flag algebra versions of the local optimality conditions (namely that, that no local change to $C$ can deteriorate the similarity measure between $C$ and $G$), \item[Step 3:]\label{it:RL3} check if flag algebras can prove the desired asymptotic results under the extra assumptions coming from the previous two steps. \end{description} For example, if the conjectured asymptotically maximum 3-graphs $\C H$ come from 3-graph $\C D(T)$ which consists triples that span a directed cycle in a tournament $T$ (this is the case for the extremal problems studied in~\cite{GlebovKralVolec16,ReiherRodlSchacht16jctb,FalgasPikhurkoVaughanVolec23}), then the local assumptions could say that changing the orientation of one pair in $T$ cannot increase $|\C H\cap \C D(T)|$ (or decrease $|\C H\setminus \C D(T)|$, etc). For problems with the conjectured constructions being recursive, it makes good sense to bound in Step 3 not the global function we try to optimize but the contribution of the top level to it, as we did for the Tur\'an problem for $C_{5}^{3-}$. Indeed, our computer experiments indicate that flag algebras using flags with at most 6-vertices (in the theory of 3-graphs with an unordered vertex 3-partition) cannot prove directly $|\C H|\le (\frac14+o(1)){n\choose 3}$ under the assumptions of Proposition~\ref{pr:3}. This is as expected since such hypothetical proof would apply to the union of three vertex-disjoint copies of an arbitrary $C_{5}^{3-}$-free $\C G$, on $V_1$, $V_2$ and $V_3$, and the complete $3$-partite $3$-graph $\mathcal{K}[V_1,V_2,V_3]$, and would imply that the edge density of $\C G$ is at most $\frac14+o(1)$. This could in turn be translated into a proof of $\pi(C_{5}^{3-})\le \frac 14$ in the theory of (uncoloured) $C_{5}^{3-}$-free $3$-graphs. \hide{ In a way, Proposition~\ref{pr:4} can also be regarded as Step 3 of this method where Step~1 did not require any computer calculations since the last step could be carried out with a very weak similarity assumption of Item~\ref{it:42} of Proposition~\ref{pr:4} (namely that the number of crossing pairs is at least $(\frac1{16}+o(1))n^2$, which is rather far from $(\frac19+o(1))n^2$ observed in the conjectured construction). } \hide{ A conjecture, often attributed to Mubayi--R\"{o}dl~\cite{MR02}, states that $\pi(C_{5}^{3}) = 2\sqrt{3} - 3$ (which is $0.46410...$). Recent work by Kam{\v c}ev--Letzter--Pokrovskiy~\cite{KLP24} shows that $\pi(C_{\ell}^{3}) = 2\sqrt{3} - 3$ for all sufficiently large $\ell$ satisfying $\ell \not\equiv 0 \pmod{3}$. The floating-point calculations indicate that it might be possible to prove that $\pi(\{K_{4}^{3}, C_{5}^{3}\})=2\sqrt{3} - 3$ and to improve the upper bound for $\pi(C_{5}^{3})$ to $2\sqrt{3} - 3 + 0.00053$. It seems plausible that, after a rounding process (which we have not yet attempted), it could be shown that $\pi(\{K_{4}^{3}, C_{5}^{3}\}) = 2\sqrt{3} - 3$. Such a result would imply that $\pi(C_{\ell}^{3}) = 2\sqrt{3} - 3$ for all $\ell \ge 8$ satisfying $\ell \not\equiv 0 \pmod{3}$, thus strengthening the result of Kam{\v c}ev--Letzter--Pokrovskiy. Additionally, the upper bound for $\pi(C_{5}^{3})$ could likely be further refined or even determined exactly with additional effort. We hope to return to this topic in the future. } A conjecture, often attributed to Mubayi--R\"{o}dl~\cite{MR02}, states that $\pi(C_{5}^{3}) = 2\sqrt{3} - 3$ (which is $0.46410...$). The recent work by Kam{\v c}ev--Letzter--Pokrovskiy~\cite{KLP24} shows that $\pi(C_{\ell}^{3}) = 2\sqrt{3} - 3$ for all sufficiently large $\ell$ satisfying $\ell \not\equiv 0 \pmod{3}$. Our floating-point calculations indicate that it might be possible to prove that $\pi(\{K_{4}^{3}, C_{5}^{3}\})=2\sqrt{3} - 3$ using the local refinement method described above, where $K_4^3$ denotes the complete $3$-graph on $4$ vertices. This result (if true) would imply that $\pi(C_{\ell}^{3}) = 2\sqrt{3} - 3$ for all $\ell \ge 8$ satisfying $\ell \not\equiv 0 \pmod{3}$, thus strengthening the result of Kam{\v c}ev--Letzter--Pokrovskiy. Unfortunately, the semi-definite programs that we have to solve are rather large: the largest one, the step of proving that $|B|\le |M|$ using 6-vertex $\{K_{4}^{3}, C_{5}^{3}\}$-free 2-coloured 3-graphs has $|\C F_6|=28080$ linear constraints. Rounding the obtained matrices is a challenging task and we have not been able to accomplish it yet. \section*{Acknowledgements} Levente Bodn\'ar, Xizhi Liu and Oleg Pikhurko were supported by ERC Advanced Grant 101020255. \bibliography{refs} \end{document}
2412.21145v2
http://arxiv.org/abs/2412.21145v2
The Shortest Interesting Binary Words
\documentclass[runningheads,11pt]{article} \usepackage{fixltx2e,breakurl} \usepackage{latexsym,amsmath,amssymb,amsfonts,amsthm} \usepackage{graphicx,epsfig} \usepackage{url} \usepackage{multirow} \usepackage[nocompress]{cite} \usepackage[scale=0.8]{geometry} \usepackage{shuffle} \usepackage{tikzsymbols} \usepackage{tabularx} \newcommand{\Suff}{\textit{Suff}} \newcommand{\Pref}{\textit{Pref}} \newcommand{\Fact}{\textit{Fact}} \renewcommand{\epsilon}{\varepsilon} \renewcommand{\alph}{\textit{alph}} \newcommand{\St}{\textit{St}} \newcommand{\Pal}{\textit{PAL}} \newcommand{\oc}{\textit{oc}} \newcommand{\PL}{\textit{PL}} \newcommand{\PPL}{\textit{PPL}} \newcommand{\ppt}{\textit{p}} \newcommand{\pref}{\textit{pref}} \theoremstyle{plain} \newtheorem{theorem}{Theorem} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} \theoremstyle{definition} \newtheorem{definition}{Definition} \newtheorem{example}{Example} \newtheorem*{conjecture}{Conjecture} \newtheorem{problem}{Problem} \theoremstyle{remark} \newtheorem*{remark}{Remark} \newtheorem*{note}{Note} \newtheorem{case}{Case} \newtheorem{2case}{Case} \DeclareFontFamily{U}{bigshuffle}{} \DeclareFontShape{U}{bigshuffle}{m}{n}{ <5-8> s*[1.7] shuffle7 <8-> s*[1.7] shuffle10 }{} \DeclareSymbolFont{BigShuffle}{U}{bigshuffle}{m}{n} \DeclareMathSymbol\bigshuffle{\mathop}{BigShuffle}{"001} \DeclareMathSymbol\bigcshuffle{\mathop}{BigShuffle}{"002} \newtheorem{observation}{Observation} \newtheorem{fact}{Fact} \newcommand{\todo}[1]{\marginpar{\small #1}} \begin{document} \sloppy \title{The Shortest Interesting Binary Words} \date{} \author{Gabriele Fici\\[1mm] Dipartimento di Matematica e Informatica, Universit\`a di Palermo, Italy\\[1mm] [email protected]} \maketitle \begin{abstract} I will show that there exist two binary words (one of length 4 and one of length 6) that play a special role in many different problems in combinatorics on words. They can therefore be considered \textit{the shortest interesting binary words}. My claim is supported by the fact that these two words appear in dozens of papers in combinatorics on words. \end{abstract} \section{Introduction} Many papers are devoted to the study of properties of some interesting infinite word, e.g., the Fibonacci word $f=0100101001001\cdots$, or the Thue--Morse word $t=0110100110010110\cdots$, or to the study of classes of words. But to the best of my knowledge no paper has been entirely devoted to just two short binary words! In this paper, I focus on the words: \[v=0011\] and \[w=001011.\] Why do I claim that these two words are interesting? An answer could be that they appear in no less than 50 papers in combinatorics on words. They are probably the shortest binary words that are not \emph{too} trivial. For this reason, they are often presented as an example for many classical definitions, e.g., primitive word, unbordered word, Lyndon word, Dyck word, etc. But, as it will be shown in this paper, they also have many other surprising properties. As usual in the field, I will use the last letters of the Latin alphabet to denote words, i.e., $u$, $v$, $w$, etc. To convince the reader that these two words are of \emph{particular} relevance in the field, think of the diagonal lattice representation of a binary word, that is, the diagonal lattice path obtained encoding each $0$ with a downstep ($\backslash$), i.e., a segment that goes from a point $(i, j)$ to $(i +1, j - 1)$, and each $1$ with an upstep ($/$), i.e., a segment that goes from a point $(i, j)$ to $(i +1, j + 1)$. Then the path encoding $v=0011$ has a \texttt{V} shape, and of course the path encoding $w=001011$ has a \texttt{W} shape! \Laughey[1.0] \section{Palindromes and Anti-palindromes}\label{sec:pal} A first observation is that for both words $v=0011$ and $w=001011$ it holds that the mirror image ($\tilde{v}=1100$ and $\tilde{w}=110100$, respectively) has a different character in each position. Words with this property are called \emph{anti-palindromes} (or \emph{sesqui-palindromes} \cite{DBLP:journals/ejc/CarpiL04}). But while for the word $v=0011$, the mirror image is a rotation (conjugate) of the word, this does not hold for the word $w=001011$, for which there is no rotation that yields the word $\tilde{w}=110100$. A word such that no two rotations coincide is called \emph{primitive}; a word such that no two rotations of the word or of its mirror image coincide, i.e., a word $u$ of length $n$ such that the set made by all rotations of $u$ and all rotations of $\tilde{u}$ has cardinality $2n$, is called \emph{asymmetric}~\cite{BrHaNiRe04}. No binary word of length smaller than $6$ is asymmetric, and there is a unique orbit of asymmetric binary words of length $6$, namely that of $w=001011$~\cite{BrHaNiRe04}. The word $v=0011$ is not a palindrome, but can be written as a concatenation of two palindromes ($00$ and $11$). In general, when this happens, for a primitive nonempty word, the factorization is unique~\cite{DBLP:journals/tcs/LucaM94}. The word $w=001011$, instead, cannot be written as the concatenation of two palindromes. It is easy to see that a word $u$ is a concatenation of two palindromes (one of which could be empty) if and only if $u$ is a rotation of its mirror image $\tilde{u}$. However, the word $w=001011$ can be written as the concatenation of 3 palindromes (for instance, $w=00\cdot 101 \cdot 1$; notice that, contrarily to the case of $2$ palindromes, there may exist different factorizations in more than $2$ palindromes). It therefore has (see~\cite{DBLP:journals/aam/FridPZ13}) \emph{palindromic length} 3, while the word $v=0011$ has palindromic length $2$. Actually, $w=001011$ is a binary word of minimal length having palindromic length 3, that is, which can be written as a concatenation of 3, but not fewer, palindromes. It is minimal in the sense that all of its proper factors have palindromic length at most $2$~\cite{DBLP:journals/combinatorics/BorchertR15}. The shortest binary word with palindromic length 4 is $w^{8/6}=00101100$, up to mirror image and character exchange; the shortest binary word with palindromic length 5 is $w^{11/6}=00101100101$, up to mirror image and character exchange. The shortest binary word with palindromic length 6 has length 14, but it is no longer a fractional power of $w$ (it is in fact the word $00101110001011=w10w$, up to mirror image). The word $w^{11/6}$ is in fact an exception for the sequence $P(n)$ of the maximum palindromic length a binary word of length $n$ can have, since Ravsky \cite{Ra03} showed that the sequence $P(n)$ is given by $$P(n)=\lfloor n/6 \rfloor + \lfloor (n+4)/6 \rfloor +1$$ for every $n\neq 11$, and $P(11)=5$. \bigskip Regarding the number of distinct palindromic factors, one has that $v$ has $5$ palindromic factors ($\epsilon$, $0$, $1$, $00$ and $11$) and $w$ has $7$ (the same of $v$ plus $010$ and $101$). It is well known (and indeed easy to prove, see~\cite{DBLP:journals/tcs/DroubayJP01}) that any word of length $n$ contains at most $n+1$ distinct palindromic factors, including the empty word $\epsilon$. A word of length $n$ containing $n+1$ distinct palindromic factors is called \emph{rich}, or \emph{full}. So the words $v$ and $w$ are both rich. Actually, every binary word of length $7$, or less, is rich. The word $w^{8/6}=00101100$ is a non-rich binary word of minimal length. Indeed, it has length $8$ and only $8$ palindromic factors, namely $\varepsilon,0,1,00,11,010,101$, and $0110$. It can be proved that a word is rich if and only if all of its factors also are. Hence, it is natural to extend the definition to infinite words: An infinite word is called rich if all its finite factors are rich. For example, the Fibonacci word $f=0100101001001\cdots$ is rich. The word $w^{8/6}$ is a factor of the Thue--Morse word $t=0110100110010110\cdots$, so the Thue--Morse word is not rich. One may wonder whether all binary palindromes are rich. This is not the case. An example (of minimal length) is the word $00101100110100$ of length $14$, the shortest palindrome that starts with $w^{8/6}$. A word $u$ is called \emph{circularly rich} if $u^2$ (or, equivalently, the infinite word $u^\infty=uuu\cdots$) is rich. Surprisingly, this is not equivalent to the fact that $u$ and all its rotations are rich. A counterexample is again provided by the word $w=001011$: all its rotations are rich but the word $w^2=001011001011$ is not rich, since it contains the non-rich factor $w^{8/6}=00101100$. Glen et al.~\cite{GlJuWiZa09} proved that a word $u$ is circularly rich if and only if $u$ and all its rotations are rich \emph{and} $u$ is the concatenation of two palindromes. For the same reason, the infinite word $w^\infty$ is not rich. Actually, it is the infinite binary word containing \emph{the least} number of palindromic factors! The set of palindromic factors of $w^\infty$ is $\{\epsilon, 0, 1, 00, 11, 010, 101, 0110, 1001\}$ and so has cardinality 9. It has been shown that every infinite binary word contains at least 9 distinct palindromic factors~\cite{FiZa13}. Moreover, an infinite binary word has exactly 9 distinct palindromic factors if and only if it is of the form $z^{\infty}$ where $z$ is a rotation of $w$ or a rotation of $\tilde{w}$. An \emph{aperiodic} binary word, instead, must contain at least 11 distinct palindromic factors~\cite{FiZa13}. An example of such a word is the fixed point of the morphism $0\mapsto 0001011,\ 1\mapsto 001011$, i.e., $0\mapsto 0w, \ 1\mapsto w$. It is aperiodic for known properties of fixed points of binary morphisms~(see for example~\cite[Proposition 14]{DBLP:conf/dlt/FrosiniMRRS22}). \bigskip A word $u$ is called a {\it palindromic periodicity} if there exist two palindromes $p$ and $s$ such that $|u| \geq |ps|$ and $u$ is a prefix of the word $(ps)^\omega = pspsps\cdots$~\cite{Simpson:2024}. No infinite binary word has fewer than 30 distinct palindromic periodicities. The periodic word $w^\infty$ has $30$~\cite{FSS:2024}. \bigskip A word is called \emph{weakly rich}~\cite{GlJuWiZa09} if the factor separating any two consecutive occurrences of the same character is always a palindrome. It can be proved that all rich words are weakly rich, but the converse does not always hold. For example, the word $w^{8/6}=00101100$ is weakly rich (since, trivially, all binary words are weakly rich) but it is not rich. The word $0010200$ is a non-binary word that is weakly rich but not rich; the word $0120$ is not rich nor even weakly rich. Every weakly rich word $u$ can be uniquely reconstructed (up to a permutation of characters) from the set \[S(u)=\{(i,j) \mid u[i..j]\mbox{ is a palindrome}\},\] since the pairs $(i,j)$ in $S(u)$ induce a set of equations that partitions $\{1,\ldots,|u|\}$ in subsets of positions containing the same character. To reconstruct the word, one assigns a different character to each part. If a word $u$ is not weakly rich, the information from the set $S(u)$ is not sufficient to uniquely reconstruct $u$. For example, for $u=0120$ and $z=0123$ one has $S(u)=S(z)=\{(1,1),(2,2),(3,3),(4,4)\}$. The \emph{minimal palindromic specification} of a weakly rich word $u$ is the cardinality of a smallest subset $S'(u)$ of $S(u)$ that allows one to uniquely reconstruct $u$, i.e., that induces the same set of equations as $S(u)$ (cf.~\cite{DBLP:journals/jct/HarjuHZ15}). For example, words for which the minimal palindromic specification is equal to $1$ are $u=00$ ($S'(u)=\{(1,2)\}$), $u=010$ ($S'(u)=\{(1,3)\}$), $u=0110$ ($S'(u)=\{(1,4)\}$), and $u=01210$ ($S'(u)=\{(1,5)\}$; the minimal palindromic specification of $u=0^n$, $n\geq 3$, is $2$ ($S'(u)=\{(1,n),(2,n)\}$); the minimal palindromic specification of $u$ is $0$ if and only if all characters in $u$ are distinct ($S'(u)=\emptyset$); finally, the minimal palindromic specification of $u=0100101001$ is $3$ ($S'(u)=\{(1,6),(4,6),(2,10)\}$. Actually, if $u$ is any binary balanced word (see Section~\ref{sec:bal}), then the minimal palindromic specification of $u$ is at most $3$~\cite{DBLP:journals/jct/HarjuHZ15}. A shortest word that has minimal palindromic specification equal to $4$ is the word $w=001011$. Indeed, $w$ can be uniquely reconstructed from $S'(w)=\{(1, 2), (2, 4), (3, 5), (5,6)\}$ but not from any subset of $S(w)$ of cardinality less than $4$. \bigskip The \emph{derivative} of a (finite or infinite) binary word is the sequence of consecutive differences of characters (interpreted as integers) and is, in general, a ternary word. In this way, the characters of the derivative are in $\{-1,0,1\}$. In order to obtain a word over the alphabet $\{0,1,2\}$, one can add $1$ to each consecutive difference. Thus, in this paper I define the derivative of the word $u_1u_2\cdots$ as the word whose $i$th character is $1+u_i-u_{i+1}$. For example, it is well known the derivative of the Thue--Morse word $t=0110100110010110\cdots$ is a square-free ternary word, $0120210121020120210\cdots$; while the derivative of the Fibonacci word is the word $2012020120120201202012\cdots$ obtained by applying the morphism $0\mapsto 201,\ 1 \mapsto 20$ to the Fibonacci word. In the case of finite words one has: \begin{proposition} The derivative of an anti-palindrome is always a palindrome. On the opposite, the derivative of a palindrome is never an anti-palindrome. \end{proposition} \begin{proof} The first statement is evident by symmetry. For the second statement, observe that the derivative of a palindrome is either the word $1^n$, for some $n$, or a word that contains at least one occurrence of the character $2$. \end{proof} The derivative of $v=0011$ is $101$, a shortest palindrome containing $2$ different characters. The derivative of $w=001011$ is $10201$, a shortest palindrome containing $3$ different characters. \begin{proposition} The derivative of $w^\infty=(001011)^{\infty}$, i.e., the word $(102012)^{\infty}$, is rich. \end{proposition} \begin{proof} An infinite periodic word $u^\infty$ is rich if and only if $u^2$ is rich~\cite{GlJuWiZa09}. The word $(102012)^2$ is rich. \end{proof} Another related transformation is the \emph{Pansiot coding}~\cite{DBLP:journals/dam/Pansiot84} (also called \emph{Lempel homomorphism}~\cite{DBLP:journals/tc/Lempel70}) of a binary word, which consists in taking the \emph{absolute value} of the consecutive differences (or, equivalently, the consecutive sums modulo $2$), and is therefore another binary word. For example, the Pansiot coding of the Thue--Morse word is the period-doubling word: $10111010101110111\cdots$; while the Pansiot coding of the Fibonacci word is the word $110111101101111011110\cdots$ obtained by applying the morphism $0\mapsto 11$, $1\mapsto 0$ to the Fibonacci word. The Pansiot coding of the word $v=0011$ is the word $010$, while that of the word $w=001011$ is the word $01110$. Let me call a binary word $u$ a \emph{Pansiot pre-palindrome} if its Pansiot coding is a palindrome. \begin{proposition} A binary word $u$ is a Pansiot pre-palindrome if and only if $u$ is a palindrome or an antipalindrome. \end{proposition} \begin{proof} By induction on the length. \end{proof} Analogously, a word $u$ is a \emph{Pansiot pre-antipalindrome} if its Pansiot coding is an antipalindrome. Pansiot pre-antipalindromes can be generated recursively. Such words clearly have odd lengths, since antipalindromes must have even length and the Pansiot coding reduces the length by 1. \begin{proposition} The Pansiot pre-antipalindromes of length $3$ are: $001$, $011$, $100$, and $110$, and for every $n\geq 1$: \begin{itemize} \item The Pansiot pre-antipalindromes of length $4n+1$ are precisely the words of the form $0u0$ or $1u1$, where $u$ is a Pansiot pre-antipalindrome of length $4n-1$. \item The Pansiot pre-antipalindromes of length $4n+3$ are precisely the words of the form $0u1$ or $1u0$, where $u$ is a Pansiot pre-antipalindrome of length $4n+1$. \end{itemize} \end{proposition} \begin{proof} By induction on $n$. \end{proof} \bigskip By the way, a transformation that maps palindromes to anti-palindromes and anti-palindromes to palindromes exists~\cite{DBLP:journals/ejc/CarpiL04}: it is the Thue--Morse morphism $\tau:0\mapsto 01,\ 1 \mapsto 10$. So, for example, $\tau(v)=01011010$ and $\tau(w)=010110011010$ are indeed palindromes. The Thue--Morse morphism has another fundamental property: Recall that an \emph{overlap} is a square followed by its first character, i.e., a word of the form $auaua$, with $a$ a character and $u$ a word, e.g., $0010010$. A binary word $u$ is overlap-free (i.e., none of its factors is an overlap) if and only if $\tau(u)$ is overlap-free (see~\cite{Berstel}). Hence, for example, both $\tau(v)$ and $\tau(w)$ are overlap-free. More generally, Richomme and S{\'{e}}{\'{e}}bold proved that a morphism $\mu$ is overlap-free, i.e., maps overlap-free words to overlap-free words, if and only if $\mu(w)=\mu(001011)$ is overlap-free~\cite{DBLP:journals/dam/RichommeS99}, and there is no shorter word that can replace $w$. \bigskip Consider now this problem: Given an integer $k>0$, is it possible to construct an infinite binary word that does not contain the mirror image of any of its factors of length $k$? Rampersad and Shallit~\cite{RaSh05} showed that this is impossible for $k<5$: every binary word of length greater than $8$ contains at the mirror image of at least one factor of length $k$. But they proved that the word $w^\infty=(001011)^{\infty}$ is an infinite binary word avoiding the mirror images of all its factors of length $\ge 5$. \bigskip Currie and Lafrance~\cite{DBLP:journals/combinatorics/CurrieL16} showed that if one replaces each $1$ by $w=001011$ in the Thue--Morse word, one obtains an infinite binary word such that no factor is of the form $xyx\tilde{y}x$, for nonempty words $x$ and $y$. If instead one replaces each $1$ by $w01111=00101101111$, one obtains an infinite binary word such that no factor is of the form $xyx\tilde{y}\tilde{x}$; while replacing each $1$ by $w11=00101111$ one obtains an infinite binary word such that no factor is of the form $xy\tilde{x}\tilde{y}x$~\cite{DBLP:journals/combinatorics/CurrieL16}. \section{Squares and Other Repetitions}\label{sec:rep} Every binary word of length at least $4$ contains a square factor. Fraenkel and Simpson considered the problem of determining the largest number of square factors in a binary word~\cite{DBLP:journals/jct/FraenkelS98}. In order to count square factors, it is convenient to restrict the attention to primitive rooted squares (squares of the from $uu$ with $u$ a primitive word). The maximum number of distinct primitive rooted squares in a binary word of length $n$ is presented in Table II of~\cite{DBLP:journals/jct/FraenkelS98}. In particular, the word $v=0011$ is a word of minimal length containing 2 distinct primitive rooted squares ($00$ and $11$), while the word $w=001011$ is a word of minimal length containing 3 distinct primitive rooted squares ($00$, $11$, and $0101$). So, infinite binary words cannot avoid squares. But there are infinite binary words avoiding overlaps. An example is the Thue--Morse word. The word $w=001011$ is also an extremal case for the following well-known result due to Restivo and Salemi~\cite{DBLP:conf/litp/RestivoS84} (see also~\cite{DBLP:journals/combinatorics/AlloucheCS98}): \begin{theorem} If a binary word $u$ is overlap-free, then there exist $x,y,z$ with $x,z\in\{\varepsilon,0,1,00,11\}$ and $y$ overlap-free word, such that $u=x\tau(y)z$, where $\tau$ is the Thue--Morse morphism. Furthermore this decomposition is unique if $|u|\geq 7$, and $x$ (resp.~$z$) is completely determined by the prefix (resp.~suffix) of length $7$ of $u$. The bound $7$ is sharp as shown by the example $w=001011 = 00\tau(1)11 = 0\tau(00)1$. \end{theorem} Currie and Rampersad~\cite{DBLP:journals/dmtcs/CurrieR10} proved that it is possible to construct an infinite binary word avoiding cubes but containing exponentially many distinct square factors. They considered the (uniform) cube-free morphism \begin{align*} 0 & \mapsto 001011\\ 1 & \mapsto 001101\\ 2 & \mapsto 011001 \end{align*} Notice that the morphism above maps $0$ to $w$, $2$ to a rotation of $w$, and $1$ to a rotation of the complement\footnote{The complement of a binary word is the word obtained by exchanging $0$s and $1$s.} of $w$. Recently, Dvořáková et al.~\cite{DOO24} proved that applying the $7$-uniform morphism \begin{align*} 0 & \mapsto 0001011\\ 1 & \mapsto 1001011 \end{align*} that is, the morphism that maps $0$ to $0w$ and $1$ to $1w$, to any binary $\frac{7}{3}^+$-free word (i.e., a word such that no factor has exponent larger than $7/3$, where the exponent is defined as the ratio between the length and the minimum period) gives a cube-free binary word containing at most $13$ palindromes, which is the least number of distinct palindromes a binary cube-free word can have. \bigskip The word $v=0011$ is also an \emph{anti-square}, i.e., a word of the form $u\overline{u}$, where $\overline{u}$ is the complement of $u$; while $w=001011$ is not. In particular, $v=0011$ is also a \emph{minimal} anti-square, that is, an anti-square that does not properly contain any anti-square factor, except possibly $01$ and $10$. Minimal anti-squares have been characterized in \cite{anti}. In the same paper, the authors proved that a binary word that does not contain any anti-square factor, except possibly $01$ and $10$, and has length at least $8$, must contain $w=001011$, or its complement $\overline{w}$, as a factor. \bigskip A different kind of repetition is the notion of a \textit{run} (or \textit{maximal repetition}). A pair $(i,j)$ is a run in a word $u=u[1..n]$, $1\leq i<j\leq n$, if the exponent of $u[i..j]$ is at least $2$ and is smaller than both the exponents of $u[i-1..j]$ and $u[i..j+1]$, if these are defined. For example, the runs of $w=001011$ are $(1,2)$, $(2,5)$ and $(5,6)$. Runs are particularly important in text processing, since they allow the design of efficient algorithms that process separately the repetitive and the non-repetitive portions of a string. It was conjectured in \cite{DBLP:conf/focs/KolpakovK99}, and then proved in \cite{DBLP:journals/siamcomp/BannaiIINTT17}, that the number of runs in a word of length $n$ is less than $n$. In \cite{DBLP:conf/focs/KolpakovK99}, the authors proved that the total sum of exponents of runs in a word $u$, noted $\sigma(u)$, is linear in the length of $u$. For example, the maximal value of $\sigma$ for a word of length $4$ is $4$, and this is realized by the word $v=0011$, which has two runs of exponent $2$, namely $(1,2)$ and $(3,4)$; while the maximal value of $\sigma$ for a word of length $6$ is $6$, and this is realized by the word $w=001011$, which has three runs of exponent $2$. However, one can have $\sigma(u)>|u|$ for larger values of $|u|$. For example, take the word $0010100101$, of length $10$. It has runs $(1,10)$, $(1,2)$, $(4,9)$, $(6,7)$, $(7,10)$, of exponent $2$; and $(2,6)$, of exponent $5/2$. So, $\sigma(u)=25/2$. No other word of length $10$ has a larger value of $\sigma$. \bigskip Recall that a \emph{Dyck word} is a binary word that, considering $0$ as a left parenthesis and $1$ as a right parenthesis, represents a string of balanced parentheses. The words $v=0011$ and $w=001011$ are both Dyck words. Consider the morphism $\mu$: \begin{align*} 0 & \mapsto 01\\ 1 & \mapsto 0011\\ 2 & \mapsto 001011 \end{align*} i.e., the morphsim that maps $0$ to $01$, $1$ to $v$ and $2$ to $w$. Mol, Rampersad and Shallit \cite{DBLP:conf/cwords/MolRS23} proved that a binary word is an overlap-free Dyck word if and only if it is of the form either $\mu(x)$ for a square-free word $x$ over $\{0,1,2\}$ that contains no $212$ or $20102$, or of the form $0\mu(x)1$, where $x$ is square-free word over $\{0,1,2\}$ that begins with $01$ and ends with $10$, and contains no $212$ or $20102$. \bigskip The \emph{perfect shuffle} of two words of the same length $x=x_1x_2\cdots x_n$ and $y=y_1y_2\cdots y_n$ is the word $x\shuffle y=x_1y_1x_2y_2\cdots x_ny_n$. Guo, Shallit and Shur \cite{DBLP:journals/corr/GuoSS15} observed that a word is an antipalindrome if and only if it is of the form $\overline{x}\shuffle \tilde{x}$. For example, $v=0011=01\shuffle 01=\overline{10}\shuffle \widetilde{10}$ and $w=001011=011\shuffle 001=\overline{100}\shuffle \widetilde{100}$. But the structure of the word $w=001011$ in terms of the perfect shuffle operator can be further specialized. In fact, $w=001011$ satisfies the equation $xy=y\shuffle x$. Actually, it is the shortest word with two different characters doing so. The \emph{ordinary shuffle} of two words $x$ and $y$ is the set of words obtainable from merging the words $x$ and $y$ from left to right, but choosing the next symbol arbitrarily from $x$ or $y$. More formally, the ordinary shuffle of $x$ and $y$ is the set $x\bigshuffle y = \{z \mid z = x_1y_1 x_2y_2 \cdots x_ny_n \mbox{ for some } n \geq 1 \mbox{ and words } x_1, \ldots, x_n, y_1, \ldots, y_n \mbox{ such that } x = x_1 \cdots x_n \mbox{ and } y = y_1 \cdots y_n\}$. A word that belongs to $x\bigshuffle x$ for some word $x$ is called a \emph{shuffle square}. Since $v\in 01\bigshuffle 01$, $v$ is a shuffle square; while $w$ is not a shuffle square. Actually, $v=0011$ is the shortest Dyck shuffle square. Deciding whether a binary word is a shuffle square is not an easy task. Indeed, Bulteau and Vialette~\cite{DBLP:journals/tcs/BulteauV20} proved that this problem is NP-hard. Recently, He et al.~\cite{DBLP:journals/ejc/HeHNT24} proved that for every $n\geq 3$, the number of binary shuffle squares of length $2n$ is strictly larger than $2n \choose n$. Words belonging to $x\bigshuffle \tilde{x}$ for some word $x$, instead, are called \emph{reverse shuffle squares}. Henshall, Rampersad, and Shallit~\cite{DBLP:journals/eatcs/HenshallRS12} proved that binary reverse shuffle squares are precisely the binary \emph{abelian squares}, i.e., binary words of the form $uu'$ where $u'$ is an anagram of $u$. Neither $v=0011$ nor $w=001011$ is an abelian square. But there is a rotation of $v$ that is an abelian square ($0110$), while no rotation of $w$ is an abelian square. This is because, in general, one has the following property: a binary word has at least one rotation (including the word itself) that is an abelian square if and only if it has an even number of $0$'s and an even number of $1$'s (a word in which all letters occur an even number of times is sometimes called a \emph{tangram}). \section{Lyndon and de Bruijn Words}\label{sec:Lyn} A \emph{Lyndon word} is a primitive word that is lexicographically smaller than all its rotations (or, equivalently, lexicographically smaller than all its proper suffixes). Here I use the order $0<1$. The words $v=0011$ and $w=001011$ are both Lyndon words. Moreover, the word $v=0011$ is the shortest binary word that has $2$ different factorizations in two Lyndon words: $0\cdot 011$ and $001\cdot 1$; while the word $w=001011$ is the shortest binary word that has $3$ different factorizations in two Lyndon words: $0\cdot 01011$, $001\cdot 011$, and $00101\cdot 1$ (cf.~\cite{DBLP:journals/dm/Melancon00,DBLP:conf/soda/BassinoCN04}). In fact, one has: \begin{proposition}\label{hmld} For every $n\geq 3$, the shortest binary word that has $n$ distinct factorizations in two Lyndon words is the word $00(10)^{n-2}11$, of length $2n$. \end{proposition} \begin{proof}[Sketch of proof] A Lyndon word of length $>1$ starts with $0$ and end with $1$. To have $n$ distinct factorizations in two Lyndon words, one needs at least $n$ occurrences of $10$. \end{proof} The \emph{right standard factorization} of a Lyndon word $u$ of length at least $2$ is $u=st$, where $t$ is the lexicographically least proper suffix of $u$ (or, equivalently, the longest proper suffix of $u$ that is a Lyndon word). For example, the right standard factorization of $v=0011$ is $0\cdot 011$, while that of $w=001011$ is $0\cdot 01011$. Since the words $s$ and $t$ in the right standard factorization are always Lyndon words (this can be proved by exercise), applying the right standard factorization recursively until one gets words of length $1$ defines the so-called \emph{right Lyndon tree} of a word $u$, i.e., the binary tree whose root is the word $u$, the leaves are single letters, and the children of a factor $u'$ of length greater than $1$ are the words in the right standard factorization of $u'$. There is also a \emph{left standard factorization} of a Lyndon word $u$ (a.k.a.~\emph{Viennot factorization}). It is the factorization $u=st$, where $s$ is the longest proper prefix of $u$ that is a Lyndon word (but in general $s$ is not the lexicographically least proper prefix of $u$, which is always a single letter!). The left and right standard factorizations are not the same, in general. For example, the left standard factorization of $v=0011$ is $001\cdot 1$. However, for some Lyndon words the right and the left standard factorizations can coincide yet their Lyndon trees are different (the left Lyndon tree is defined by applying recursively the left standard factorization). The class of binary words for which the right and the left Lyndon tree coincide is precisely the class of primitive lower Christoffel words (i.e., balanced Lyndon words, see below). \bigskip A \emph{de Bruijn word} of order $k$ is a word such that all words of length $k$ occur exactly once in it as (cyclic) factors, i.e., as factors if one concatenates the de Bruijn word with its prefix of length $k-1$. For example, the word $v=0011$ is a binary de Bruijn word of order $k=2$. The following famous result is due to Fredricksen and Maiorana~\cite{DBLP:journals/dm/FredricksenM78}: \begin{theorem}\label{fm} The lexicographically least de Bruijn word of order $k$ can be obtained by concatenating in lexicographic order the Lyndon words of length dividing $k$. \end{theorem} So, the word $v=0011$ is the lexicographically least binary de Bruijn word of order $2$, since it is the concatenation, in lexicographically order, of the Lyndon words of length dividing $2$, i.e., the words $0$, $1$ and $01$. The lexicographically least binary de Bruijn word of order $3$ is the word $0w1=00010111$. Indeed, it is the concatenation, in lexicographically order, of the Lyndon words of length dividing $3$, i.e., the words $0$, $001$, $011$ and $1$. It is known that a binary de Bruijn word of order $k$ cannot be extended to a binary de Bruijn word of order $k+1$, but it can be extended to a binary de Bruijn word of order $k+2$~\cite{DBLP:journals/ipl/BecherH11}. For example, the word $v=0011$, of order $2$, can be extended to the de Bruijn word $0011001011110100=v\cdot w\cdot \tilde{w}$ of order $4$. \bigskip There are several generalizations of de Bruijn words that have been proposed in the literature. One is the following: A generalized de Bruijn word of order $k$ is a word such that all primitive words of length $k$ occur exactly once in it as (cyclic) factors. The following result, due to Au~\cite{DBLP:journals/dm/Au15}, is analogous to Theorem~\ref{fm}: \begin{theorem} The lexicographically least generalized de Bruijn word of order $k$ can be obtained by concatenating in lexicographically order the Lyndon words of length $k$. \end{theorem} According to the previous theorem, the word $w=001011$ is the lexicographically least generalized binary de Bruijn word of order $3$, since it is the concatenation of the binary Lyndon words of length $3$: $001$ and $011$. The reader can verify that $w$ contains every binary primitive word of length $3$ as a cyclic factor exactly once. Another generalization of de Bruijn words has been proposed in \cite{DBLP:journals/iandc/GabricHS22}: a binary word of length $n$ is a generalized de Bruijn word if for all $0\leq i\leq n$, the number of cyclic factors of length $i$ is $\min(2^i,n)$. Clearly, when $n$ is a power of $2$, this definition coincides with that of ordinary de Bruijn word. For $n=6$ there are $3$ generalized de Bruijn words, namely $000111$, $w=001011$ and $\tilde{w}=110100$. \bigskip The \emph{Burrows--Wheeler Transform} (BWT) of a word $u$ is the word obtained by concatenating the last characters of the rotations of $u$ sorted in lexicographic order. For example, if $u=0120$, the list of sorted rotations of $u$ is $\{0012,0120,1200,2001\}$, so the BWT of $u$ is $2001$. By definition, the BWT of $u$ is the same as the BWT of any rotation of $u$, so here I consider only the BWT of Lyndon words. The BWT of $v=0011$ is $1010$, while the BWT of $w=001011$ is $101100$, which is a rotation of $w$. Actually, for each $n$, only a few binary Lyndon words of length $n$ (e.g., only 13 for length 20) are rotations of their BWT. A general combinatorial characterization of binary Lyndon words that are rotations of their BWT is missing, although partial results have been obtained~\cite{DBLP:journals/fuin/MantaciRRRS17}. Given a word $u$, its \emph{standard permutation} $\pi_u$ is defined by: $\pi_u(i)<\pi_u(j)$ if $u_i < u_j$, or $u_i = u_j$ and $i < j$. For example, the standard permutation of $101100$ is $ \pi_{101100} =\bigl(\begin{smallmatrix} 1 & 2 & 3 & 4 & 5 & 6 \\ 4 & 1 & 5 & 6 & 2 & 3 \end{smallmatrix}\bigr)$. A word $u$ is the BWT of some word if and only if its standard permutation is cyclic. Higgins~\cite{DBLP:journals/tcs/Higgins12} observed that a word $u$ is the BWT of a binary de Bruijn word of order $k$ if and only if $\pi_u$ is cyclic and $u=\tau(z)$ for some word $z$ of length $2^{k-1}$, where $\tau$ is the Thue--Morse morphism. Indeed, in a binary de Bruijn word of order $k$, each factor $z$ of length $k-1$ occurs preceded by $0$ and $1$, so in the matrix of sorted rotations, the two consecutive rows starting with $z$ end with $0$ and $1$, hence the BWT of the de Bruijn word $u$ is a word in $\{01,10\}^+$. For example, $1010=\tau(11)$, and $1010$ is the BWT of a de Bruijn word of order $2$ (namely the de Bruijn word $v=0011$ of order $2$). The binary words of length $8$ whose standard permutation is cyclic and that are images under $\tau$ of words of length $4$ are $10011010=\tau(1011)$ and $10100110=\tau(1101)$, which are the BWTs, respectively, of the order $3$ de Bruijn words $0w1=00010111$ and $00011101$. \section{Factors and Scattered Subwords}\label{sec:bal} A binary word of length $n$ has at most $2^{k+1}-1+\binom{n-k+1}{2}$ distinct factors, where $k$ is the unique integer such that $2^k+k-1 \leq n \leq 2^{k+1}+k$~\cite{DBLP:journals/gc/Shallit93}. Equivalently, the maximum number of distinct factors of a binary word of length $n$ is \begin{equation}\label{eq:maxbinary}d(n)=\sum_{i=0}^n\min(2^i,n-i+1) \end{equation} and for each $n$ there are binary words realizing this bound. \begin{table}[ht] \begin{center} \begin{tabular}{l!{\hspace{0.1em}}*{21}{wr{1.75em}}} $n$ & 1 & 2& 3& 4& 5& 6& 7& 8& 9& 10& 11 & 12 & 13 &14 &15 \\ \hline $d(n)$ & $2$& $4$& $6$& $9$& $13$& $17$& $22$& $28$& $35$& $43$ & $51$ & $60$ & $70$ & $81$ & $93$ \\[2mm] \end{tabular} \caption{The maximum number of distinct factors of a word of length $n$.} \end{center} \end{table} The words $v=0011$ and $w=001011$ both have the maximum number of distinct factors a word of the same length can have. The word $v$ has $9$ distinct factors: $\epsilon$, $0$, $00$, $001$, $0011$, $01$, $011$, $1$, and $11$; while $w$ has $17$ distinct factors: $\epsilon$, $0$, $00$, $001$, $0010$, $00101$, $001011$, $01$, $010$, $0101$, $01011$, $011$, $1$, $10$, $101$, $1011$, and $11$. \bigskip Given a word $u$ of length $n$, a set $A\subseteq \{1,\ldots,n\}$ is an \emph{attractor} for $u$ if every factor of $u$ has at least one occurrence in $u$ crossing a position in $A$~\cite{DBLP:journals/corr/abs-1709-05314}. For example, $A=\{2,3\}$ is an attractor of $v=0011$. Moreover, $A$ is minimal, since if a word contains two different characters, all its attractors must have cardinality at least $2$. The shortest binary word having no attractor of size $2$ is $w=001011$, up to mirror image. \bigskip A factor $x$ of a word $u$ is \emph{left} (resp.~\emph{right}) \emph{special} if $0x$ and $1x$ (resp.~$x0$ and $x1$) are factors of $u$; it is \emph{bispecial} if it is both left and right special. For example, the only bispecial factor of $v=0011$ is $\epsilon$; while the bispecial factors of $w=001011$ are: $\epsilon$, $0$, $01$, and $1$. A word of length $n$ can have at most $n-2$ distinct bispecial factors~\cite{Del99,DBLP:journals/tcs/CarpiL01,DBLP:journals/ipl/Raffinot01}. Let me call a binary word of length $n$ \emph{highly bispecial} if it has the maximum number of distinct bispecial factors among the binary words of length $n$. For example, for every $n\geq 4$, words of length $n$ with exactly $n-2$ distinct bispecial factors are $001^{n-3}0$ and $01^{n-2}0$. \begin{proposition} For every $n\geq 4$, the lexicographically smallest highly bispecial word of length $n$ is $001^{n-3}0$, with the exception of $n=6$, for which the lexicographically smallest highly bispecial word is $w=001011$. \end{proposition} \begin{proof}[Sketch of proof] For every $n\neq 6$, there are exactly three binary highly bispecial words of length $n$, namely $aab^{n-3}a$, $ab^{n-3}aa$, and $ab^{n-2}a$. For $n=6$, we have the same words plus $w$. \end{proof} \bigskip A word $x$ is a \emph{minimal forbidden factor} (a.k.a.~\emph{minimal forbidden word} or \emph{minimal absent word}) of a word $u$ if $x$ is not a factor of $u$ but every proper factor of $x$ is. For example, $v=0011$ is a minimal forbidden factor of $w=001011$. The set of minimal forbidden factors of a word uniquely characterizes it. The minimal forbidden factors of $v=0011$ are $000$, $10$, and $111$. The word $w=001011$, instead, has 6 minimal forbidden factors: $000$, $0011$, $100$, $1010$, $110$, and $111$; this is actually the maximum number of minimal forbidden factors a binary word of length $6$ can have. In fact, a binary word of length $n>2$ has at most $n$ distinct minimal forbidden factors~\cite{DBLP:journals/tcs/MignosiRS02}. One may wonder whether the minimal forbidden factors of a palindrome are always palindromes. The answer is no: the shortest palindrome starting with $w=001011$, that is, the word $0010110100$ (see~\cite[Sec.~2.3]{DBLP:conf/cai/Berstel07}), is a palindrome of minimal length having a minimal forbidden factor that is not a palindrome, namely the word $v=0011$ (and its mirror image). \bigskip A binary word $u$ is \emph{balanced} if for every pair of factors $x,y$ of $u$ of the same length, the occurrences of $0$ (or, equivalently, of $1$) in $x$ and $y$ differ by at most one. Balanced binary words are precisely the finite factors of Sturmian words (Sturmian words are infinite words with $n+1$ distinct factors of length $n$ for every $n\geq 0$; for more on Sturmian words see, e.g.,~\cite{LothaireAlg}). Every balanced word is rich~\cite{DelGlZa08}. A binary word $u$ is unbalanced (i.e., not balanced) if and only if there exists a palindrome $z$ such that $0z0$ and $1z1$ are both factors of $u$~\cite{LothaireAlg}. So, both $v=0011$ and $w=001011$ are unbalanced (taking $z=\varepsilon$). Therefore, they cannot appear as factors in any Sturmian word. Actually, the shortest unbalanced words are $v$ and its mirror image. A \emph{minimal unbalanced word} is an unbalanced word such that all its proper factors are balanced. The words $v=0011$ and $w=001011$ are minimal unbalanced words. Minimal unbalanced words have been characterized~\cite{DBLP:journals/jcss/Fici14}: \begin{proposition} A word $u=azb$, $\{a,b\}=\{0,1\}$, is a minimal unbalanced word if and only if the word $bza$ is a proper power of a Lyndon balanced word or its mirror image. \end{proposition} Since $01$ is a Lyndon balanced word, any word of the form $1(10)^{n-1}0$, $n> 1$, or its mirror image $0(01)^{n-1}1$ is a minimal unbalanced word -- and in this latter case one has the same words of Proposition~\ref{hmld}, since $0(01)^{n-1}1=00(10)^{n-2}11$. Lyndon balanced words are also called \emph{lower Christoffel words}. A fundamental property of Lyndon words is the following: if $u$ and $z$ are Lyndon words and $u<z$ (where $<$ denotes the lexicographic order induced by $0<1$) then $uz$ is a Lyndon word. This property is not preserved in the (sub)class of lower Christoffel words, as the following example (see~\cite{LapointePhd}) shows: $001$ and $011$ are lower Christoffel words, but $001\cdot 011=w$ is not, since it is not balanced. In fact, Borel and Laubie~\cite{JTNB_1993} proved that if $u$ and $z$ are lower Christoffel words and $u<z$, then $uz$ is a lower Christoffel word if and only if \[\det \begin{pmatrix} |u|_0 &\ |z|_0 \\ |u|_1 &\ |z|_1 \end{pmatrix}=1. \] \bigskip Let $u$ be a word of length $n$. A \emph{scattered subword} of length $l$ of $u$ is any word obtained by concatenating the characters appearing in $l$ distinct positions (even not contiguous). The set of these $l$ positions is called an \emph{embedding} of the scattered subword. For example, the scattered subwords of length $4$ of $w=001011$ are $0001$, $0011$, $0111$, and $1011$. The word $v=0011$ is a scattered subword of the word $w=001011$ (this is precisely the example given in~\cite{DBLP:journals/tcs/Metivier85}) and has 5 embeddings in $w$ (see Fig.~1 in\cite{DBLP:conf/lata/BoassonC15}). Clearly, every binary word contains a palindromic scattered subword of length at least half of its length -- a power of the prevalent character. A word is called \emph{minimal palindromic} if it contains no palindromic scattered subword longer than half of its length. Holub and Saari~\cite{DBLP:journals/dam/HolubS09} proved that minimal palindromic binary words are abelian unbordered, i.e., no prefix has the same number of $0$s as the suffix of the same length. The words $v=0011$ and $w=001011$ are minimal palindromic words (and therefore are abelian unbordered). \bibliographystyle{abbrv} \begin{thebibliography}{10} \bibitem{DBLP:journals/combinatorics/AlloucheCS98} J.-P. Allouche, J.~D. Currie, and J.~O. Shallit. \newblock {Extremal Infinite Overlap-Free Binary Words}. \newblock {\em Electron. J. Comb.}, 5, 1998. \bibitem{DBLP:journals/dm/Au15} Y.~H. Au. \newblock {Generalized de Bruijn words for primitive words and powers}. \newblock {\em Discret. Math.}, 338(12):2320--2331, 2015. \bibitem{DBLP:journals/siamcomp/BannaiIINTT17} H.~Bannai, T.~I, S.~Inenaga, Y.~Nakashima, M.~Takeda, and K.~Tsuruta. \newblock {The "Runs" Theorem}. \newblock {\em {SIAM} J. Comput.}, 46(5):1501--1514, 2017. \bibitem{anti} A.~Baranwal, J.~D. Currie, L.~Mol, P.~Ochem, N.~Rampersad, and J.~Shallit. \newblock {Antisquares and critical exponents}. \newblock {\em Discret. Math. Theor. Comput. Sci.}, 25(2):\#11, 2023. \bibitem{DBLP:conf/soda/BassinoCN04} F.~Bassino, J.~Cl{\'{e}}ment, and C.~Nicaud. \newblock {Lyndon words with a fixed standard right factor}. \newblock In J.~I. Munro, editor, {\em Proceedings of the Fifteenth Annual {ACM-SIAM} Symposium on Discrete Algorithms, {SODA} 2004, New Orleans, Louisiana, USA, January 11-14, 2004}, pages 653--654. {SIAM}, 2004. \bibitem{DBLP:journals/ipl/BecherH11} V.~Becher and P.~A. Heiber. \newblock {On extending de Bruijn sequences}. \newblock {\em Inf. Process. Lett.}, 111(18):930--932, 2011. \bibitem{Berstel} J.~Berstel. \newblock {Axel Thue’s papers on repetitions in words: a translation}. \newblock {\em Publications du LaCIM}, 20, 1995. \bibitem{DBLP:conf/cai/Berstel07} J.~Berstel. \newblock {Sturmian and Episturmian Words (A Survey of Some Recent Results)}. \newblock In S.~Bozapalidis and G.~Rahonis, editors, {\em Algebraic Informatics, Second International Conference, {CAI} 2007, Thessaloniki, Greece, May 21-25, 2007, Revised Selected and Invited Papers}, volume 4728 of {\em Lecture Notes in Computer Science}, pages 23--47. Springer, 2007. \bibitem{DBLP:conf/lata/BoassonC15} L.~Boasson and O.~Carton. \newblock {Rational Selecting Relations and Selectors}. \newblock In A.~Dediu, E.~Formenti, C.~Mart{\'{\i}}n{-}Vide, and B.~Truthe, editors, {\em Language and Automata Theory and Applications - 9th International Conference, {LATA} 2015, Nice, France, March 2-6, 2015, Proceedings}, volume 8977 of {\em Lecture Notes in Computer Science}, pages 716--726. Springer, 2015. \bibitem{DBLP:journals/combinatorics/BorchertR15} A.~Borchert and N.~Rampersad. \newblock Words with many palindrome pair factors. \newblock {\em Electron. J. Comb.}, 22(4):4, 2015. \bibitem{JTNB_1993} J.-P. Borel and F.~Laubie. \newblock {Quelques mots sur la droite projective r\'{e}elle}. \newblock {\em Journal de th\'eorie des nombres de Bordeaux}, 5(1):23--51, 1993. \bibitem{BrHaNiRe04} S.~Brlek, S.~Hamel, M.~Nivat, and C.~Reutenauer. \newblock On the palindromic complexity of infinite words. \newblock {\em International Journal of Foundations of Computer Science}, 15:293--306, 2004. \bibitem{DBLP:journals/tcs/BulteauV20} L.~Bulteau and S.~Vialette. \newblock {Recognizing binary shuffle squares is NP-hard}. \newblock {\em Theor. Comput. Sci.}, 806:116--132, 2020. \bibitem{DBLP:journals/tcs/CarpiL01} A.~Carpi and A.~de~Luca. \newblock {Words and special factors}. \newblock {\em Theor. Comput. Sci.}, 259(1-2):145--182, 2001. \bibitem{DBLP:journals/ejc/CarpiL04} A.~Carpi and A.~de~Luca. \newblock {Harmonic and gold Sturmian words}. \newblock {\em Eur. J. Comb.}, 25(5):685--705, 2004. \bibitem{DBLP:journals/combinatorics/CurrieL16} J.~D. Currie and P.~Lafrance. \newblock Avoidability index for binary patterns with reversal. \newblock {\em Electron. J. Comb.}, 23(1):1, 2016. \bibitem{DBLP:journals/dmtcs/CurrieR10} J.~D. Currie and N.~Rampersad. \newblock {Cubefree words with many squares}. \newblock {\em Discret. Math. Theor. Comput. Sci.}, 12(3):29--34, 2010. \bibitem{Del99} A.~de~Luca. \newblock {On the combinatorics of finite words}. \newblock {\em Theoret. Comput. Sci.}, 218:13--39, 1999. \bibitem{DelGlZa08} A.~de~Luca, A.~Glen, and L.~Q. Zamboni. \newblock Rich, {S}turmian, and trapezoidal words. \newblock {\em Theoret. Comput. Sci.}, 407:569--573, 2008. \bibitem{DBLP:journals/tcs/LucaM94} A.~de~Luca and F.~Mignosi. \newblock {Some Combinatorial Properties of Sturmian Words}. \newblock {\em Theor. Comput. Sci.}, 136(2):361--285, 1994. \bibitem{DBLP:journals/tcs/DroubayJP01} X.~Droubay, J.~Justin, and G.~Pirillo. \newblock {Episturmian words and some constructions of de Luca and Rauzy}. \newblock {\em Theor. Comput. Sci.}, 255(1-2):539--553, 2001. \bibitem{DOO24} L.~Dvořáková, P.~Ochem, and D.~Opočenská. \newblock {Critical Exponent of Binary Words with Few Distinct Palindromes}. \newblock {\em Electron. J. Comb.}, 31(2), 2024. \bibitem{DBLP:journals/jcss/Fici14} G.~Fici. \newblock {On the structure of bispecial Sturmian words}. \newblock {\em J. Comput. Syst. Sci.}, 80(4):711--719, 2014. \bibitem{FSS:2024} G.~Fici, J.~Shallit, and J.~Simpson. \newblock Some remarks on palindromic periodicities. \newblock ArXiv preprint arXiv:2407.10564 [math.CO]. Available at \url{https://arxiv.org/abs/2407.10564}., 2024. \bibitem{FiZa13} G.~Fici and L.~Q. Zamboni. \newblock On the least number of palindromes contained in an infinite word. \newblock {\em Theoret. Comput. Sci.}, 481:1--8, 2013. \bibitem{DBLP:journals/jct/FraenkelS98} A.~S. Fraenkel and J.~Simpson. \newblock {How Many Squares Can a String Contain?} \newblock {\em J. Comb. Theory, Ser. {A}}, 82(1):112--120, 1998. \bibitem{DBLP:journals/dm/FredricksenM78} H.~Fredricksen and J.~Maiorana. \newblock {Necklaces of beads in k colors and k-ary de Bruijn sequences}. \newblock {\em Discret. Math.}, 23(3):207--210, 1978. \bibitem{DBLP:journals/aam/FridPZ13} A.~E. Frid, S.~Puzynina, and L.~Q. Zamboni. \newblock On palindromic factorization of words. \newblock {\em Adv. Appl. Math.}, 50(5):737--748, 2013. \bibitem{DBLP:conf/dlt/FrosiniMRRS22} A.~Frosini, I.~Mancini, S.~Rinaldi, G.~Romana, and M.~Sciortino. \newblock {Logarithmic Equal-Letter Runs for BWT of Purely Morphic Words}. \newblock In V.~Diekert and M.~V. Volkov, editors, {\em Developments in Language Theory - 26th International Conference, {DLT} 2022, Tampa, FL, USA, May 9-13, 2022, Proceedings}, volume 13257 of {\em Lecture Notes in Computer Science}, pages 139--151. Springer, 2022. \bibitem{DBLP:journals/iandc/GabricHS22} D.~Gabric, S.~Holub, and J.~O. Shallit. \newblock {Maximal state complexity and generalized de Bruijn words}. \newblock {\em Inf. Comput.}, 284:104689, 2022. \bibitem{GlJuWiZa09} A.~Glen, J.~Justin, S.~Widmer, and L.~Q. Zamboni. \newblock Palindromic richness. \newblock {\em European J. Combin.}, 30:510--531, 2009. \bibitem{DBLP:journals/corr/GuoSS15} C.~Guo, J.~O. Shallit, and A.~M. Shur. \newblock On the combinatorics of palindromes and antipalindromes. \newblock {\em CoRR}, abs/1503.09112, 2015. \bibitem{DBLP:journals/jct/HarjuHZ15} T.~Harju, M.~Huova, and L.~Q. Zamboni. \newblock On generating binary words palindromically. \newblock {\em J. Comb. Theory, Ser. {A}}, 129:142--159, 2015. \bibitem{DBLP:journals/ejc/HeHNT24} X.~He, E.~Huang, I.~Nam, and R.~Thaper. \newblock {Shuffle squares and reverse shuffle squares}. \newblock {\em Eur. J. Comb.}, 116:103883, 2024. \bibitem{DBLP:journals/eatcs/HenshallRS12} D.~Henshall, N.~Rampersad, and J.~O. Shallit. \newblock {Shuffling and Unshuffling}. \newblock {\em Bull. {EATCS}}, 107:131--142, 2012. \bibitem{DBLP:journals/tcs/Higgins12} P.~M. Higgins. \newblock {Burrows-Wheeler transformations and de Bruijn words}. \newblock {\em Theor. Comput. Sci.}, 457:128--136, 2012. \bibitem{DBLP:journals/dam/HolubS09} S.~Holub and K.~Saari. \newblock On highly palindromic words. \newblock {\em Discret. Appl. Math.}, 157(5):953--959, 2009. \bibitem{DBLP:conf/focs/KolpakovK99} R.~M. Kolpakov and G.~Kucherov. \newblock {Finding Maximal Repetitions in a Word in Linear Time}. \newblock In {\em 40th Annual Symposium on Foundations of Computer Science, {FOCS} '99, 17-18 October, 1999, New York, NY, {USA}}, pages 596--604. {IEEE} Computer Society, 1999. \bibitem{LapointePhd} M.~Lapointe. \newblock {\em {Combinatoire des mots: Mots parfaitement amassants, triplets de Markoff et graphes chenilles}}. \newblock PhD thesis, UQAM, 2020. \bibitem{DBLP:journals/tc/Lempel70} A.~Lempel. \newblock {On a Homomorphism of the de Bruijn Graph and its Applications to the Design of Feedback Shift Registers}. \newblock {\em {IEEE} Trans. Computers}, 19(12):1204--1209, 1970. \bibitem{LothaireAlg} M.~Lothaire. \newblock {\em {Algebraic Combinatorics on Words}}. \newblock Encyclopedia of Mathematics and its Applications. Cambridge Univ. Press, New York, NY, USA, 2002. \bibitem{DBLP:journals/fuin/MantaciRRRS17} S.~Mantaci, A.~Restivo, G.~Rosone, F.~Russo, and M.~Sciortino. \newblock {On Fixed Points of the Burrows-Wheeler Transform}. \newblock {\em Fundam. Informaticae}, 154(1-4):277--288, 2017. \bibitem{DBLP:journals/dm/Melancon00} G.~Melan{\c{c}}on. \newblock {Lyndon factorization of Sturmian words}. \newblock {\em Discret. Math.}, 210(1-3):137--149, 2000. \bibitem{DBLP:journals/tcs/Metivier85} Y.~M{\'{e}}tivier. \newblock Calcul de longueurs de cha{\^{\i}}nes de r{\'{e}}{\'{e}}criture dans le mono{\"{\i}}de libre. \newblock {\em Theor. Comput. Sci.}, 35:71--87, 1985. \bibitem{DBLP:journals/tcs/MignosiRS02} F.~Mignosi, A.~Restivo, and M.~Sciortino. \newblock {Words and forbidden factors}. \newblock {\em Theor. Comput. Sci.}, 273(1-2):99--117, 2002. \bibitem{DBLP:conf/cwords/MolRS23} L.~Mol, N.~Rampersad, and J.~O. Shallit. \newblock {Dyck Words, Pattern Avoidance, and Automatic Sequences}. \newblock In A.~E. Frid and R.~Mercas, editors, {\em Combinatorics on Words - 14th International Conference, {WORDS} 2023, Ume{\aa}, Sweden, June 12-16, 2023, Proceedings}, volume 13899 of {\em Lecture Notes in Computer Science}, pages 220--232. Springer, 2023. \bibitem{DBLP:journals/dam/Pansiot84} J.~Pansiot. \newblock {A propos d'une conjecture de F. Dejean sur les r{\'{e}}p{\'{e}}titions dans les mots}. \newblock {\em Discret. Appl. Math.}, 7(3):297--311, 1984. \bibitem{DBLP:journals/corr/abs-1709-05314} N.~Prezza. \newblock String attractors. \newblock {\em CoRR}, abs/1709.05314, 2017. \bibitem{DBLP:journals/ipl/Raffinot01} M.~Raffinot. \newblock {On maximal repeats in strings}. \newblock {\em Inf. Process. Lett.}, 80(3):165--169, 2001. \bibitem{RaSh05} N.~Rampersad and J.~Shallit. \newblock Words avoiding reversed subwords. \newblock {\em J. Combin. Math. Combin. Comput.}, 54:157--164, 2005. \bibitem{Ra03} O.~Ravsky. \newblock On the palindromic decomposition of binary words. \newblock {\em Journal of Automata, Languages and Combinatorics}, 8(1):75--83, 2003. \bibitem{DBLP:conf/litp/RestivoS84} A.~Restivo and S.~Salemi. \newblock Overlap-free words on two symbols. \newblock In M.~Nivat and D.~Perrin, editors, {\em Automata on Infinite Words, Ecole de Printemps d'Informatique Th{\'{e}}orique, Le Mont Dore, France, May 14-18, 1984}, volume 192 of {\em Lecture Notes in Computer Science}, pages 198--206. Springer, 1984. \bibitem{DBLP:journals/dam/RichommeS99} G.~Richomme and P.~S{\'{e}}{\'{e}}bold. \newblock {Characterization of Test-sets for Overlap-free Morphisms}. \newblock {\em Discret. Appl. Math.}, 98(1-2):151--157, 1999. \bibitem{DBLP:journals/gc/Shallit93} J.~O. Shallit. \newblock {On the maximum number of distinct factors of a binary string}. \newblock {\em Graphs Comb.}, 9(2-4):197--200, 1993. \bibitem{Simpson:2024} J.~Simpson. \newblock Palindromic periodicities. \newblock ArXiv preprint arXiv:2402.05381 [math.CO]. Available at \url{https://arxiv.org/abs/2402.05381}., 2024. \end{thebibliography} \end{document}
2501.00105v1
http://arxiv.org/abs/2501.00105v1
A configuration space model for algebraic function spaces
\documentclass[12pt, twoside]{article} \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage{libertineRoman} \usepackage{amsthm, amsmath, amssymb, amsfonts, enumerate,mathrsfs} \usepackage[a4paper,margin=3.4cm, headsep=15pt, headheight=3.5cm]{geometry} \newcommand\smvee{\raise0.3ex\hbox{$\scriptscriptstyle\vee$}} \usepackage{tikz-cd, tikz} \usepackage{float} \usetikzlibrary[patterns, decorations.pathreplacing] \usetikzlibrary{shapes,trees, positioning} \usetikzlibrary{hobby} \usetikzlibrary{intersections} \usepackage{caption} \usepackage{graphicx, relsize} \usetikzlibrary{matrix, calc} \usepackage{mathtools} \usepackage{spectralsequences} \DeclarePairedDelimiter{\ceil}{\lceil}{\rceil} \DeclarePairedDelimiter{\floor}{\lfloor}{\rfloor} \usepackage{stackengine} \parskip 1em \newcommand\stackotimes[2]{ \mathrel{\stackunder[2pt]{\stackon[2pt]{$\otimes$}{$\scriptscriptstyle#1$}}{ $\scriptscriptstyle#2$}}} \DeclareUnicodeCharacter{200E}{} \numberwithin{equation}{subsection} \newtheorem{theorem}{Theorem}[subsection] \newtheorem{nmtheorem}{Theorem} \newtheorem{mmtheorem}{Theorem} \newtheorem{prop}[subsubsection]{Proposition} \newtheorem{project}[subsection]{Project} \newtheorem{lemma}[subsubsection]{Lemma} \newtheorem{notations}[subsection]{Notations} \newtheorem{cor}[nmtheorem]{Corollary} \newtheorem{defn}[subsection]{Definition} \newcommand{\myparagraph}{ \refstepcounter{subsubsection} \textbf{\thesubsubsection} } \theoremstyle{definition} \newtheorem{construction}[subsubsection]{Construction} \newtheorem{remark}[subsubsection]{Remark} \usepackage[colorlinks]{hyperref} \usepackage[nameinlink]{cleveref} \hypersetup{colorlinks,breaklinks, citecolor=[rgb]{0.5,0.0,0.0}, linkcolor=[rgb]{0.2,0.3,0.8}} \newcommand{\etale}{étale } \def\Sum{\sum\nolimits} \def\Prod{\prod\nolimits} \def\C{\mathbb{C}} \def\H{\mathscr{H}} \def\L{\mathcal{L}} \def\Q{\mathbf{Q}} \def\Qb{\mathbb{Q}} \def\Fb{\mathbb{F}} \def\A{\mathcal{A}} \def\bSigma{\mathbf{\Sigma}} \def\alg{\mathrm{Alg}} \def\mor{\mathrm{Mor}} \def\morp{\mathrm{Mor}_{\d}(X,\P^N)} \def\bmor{\overline{\mathrm{Mor}}} \def\bmorp{\overline{\mathrm{Mor}}_{\d}(X,\P^N)} \def\map{\mathrm{Map}} \def\jfrak{\mathfrak{j}} \def\ifrak{\mathfrak{i}} \def\P{\mathbb{P}} \def\Pc{\mathcal{P}} \def\N{\mathbb{N}} \def\d{\mathbf{d}} \def\E{\mathscr{E}} \def\Z{\mathcal{Z}} \def\V{\mathscr{V}} \def\T{\mathcal{T}} \def\F{\mathcal{F}} \def\I{\mathcal{I}} \def\M{\mathcal{M}} \def\X{\mathcal{X}} \def\Y{\mathcal{Y}} \def\Sp{\mathrm{Sp}} \def\Shv{\mathrm{Shv}} \def\Hom{\mathrm{Hom}} \def\et{\acute{e}t} \newcommand\Br{\mathrm{Branch}} \newcommand\Trace{\mathrm{Trace}} \newcommand\Aut{\mathrm{Aut}} \def\1{\mathbb{1}} \def\res{\mathit{Res}} \def\opn{\mathcal{O}_{\mathbb{P}^N}} \def\O{\mathcal{O}} \def\g{\mathbf{g}} \def\morn{\mathrm{Mor}_n(C,\P^r)} \def\mornone{\mathrm{Mor}_{n-1}(C,\P^r)} \def\morntwo{\mathrm{Mor}_{n-2}(C,\P^r)} \def\mornp{\mathrm{Mor}_{n-p}(C,\P^r)} \def\sgn{\mathit{sgn}} \def\pic{\mathrm{Pic}} \def\divisor{\mathrm{div}} \def\deg{\mathit{deg\,}} \def\rk{\mathit{rank\,}} \def\pconf{\mathit{PConf}} \def\uconf{\mathrm{UConf}} \def\length{\mathit{length}} \def\cmord{\mathrm{Mor}_{\vec{d}}(C,\PSig)} \def\pmord{\mathrm{Mor}_{\vec{d}}(\P^1,\PSig)} \def\Aut{\mathit{Aut}} \newcommand\Sym{\mathrm{Sym}} \def\typo#1{\alert{#1}} \def\ci{\perp\!\!\!\perp} \date{} \newcommand\Frob{\mathrm{Frob}} \newcommand{\holim}{\mathop{\mathrm{holim}}} \newcommand{\coholim}{\mathop{\mathrm{coholim}}} \DeclareMathOperator{\Shom}{\mathscr{H}\text{\kern -3pt {\calligra\large om}}\,} \DeclareMathOperator{\Sext}{\mathscr{E}\text{\kern -3pt {\calligra\large xt}}\,} \usepackage{fancyhdr} \newcommand\shorttitle{{A configuration space model for algebraic function spaces }} \newcommand\authors{{Oishee Banerjee}} \fancyhf{} \fancyhf[CE,CO]{\thepage} \renewcommand\headrulewidth{0.2pt} \fancyhead[CE]{\small\scshape\shorttitle} \fancyhead[CO]{\small\scshape\authors} \pagestyle{fancy} \usepackage{titlesec} \titleformat{\section} {\large\bfseries\scshape} {\thesection} {1em} {} \titleformat{\subsection} {\normalsize\bfseries\scshape} {\thesubsection} {1em} {} \titleformat{\subsubsection} {\normalsize\bfseries} {\thesubsubsection} {1em} {} \title{A configuration space model for algebraic function spaces \\ \vspace{1mm} \textit{\normalsize {To Benson Farb on his {57$^{th}$} birthday}}} \author{Oishee Banerjee} \begin{document} \maketitle \begin{abstract}We prove that the space of algebraic maps between two smooth projective varieties, under certain conditions, admit a configuration space model, thereby obtaining an algebro-geometric analogue of Bendersky-Gitler's result (\cite[Theorem 7.1]{BG91}) on topological function spaces. Our result should be a thought of as a natural higher dimensional counterpart of \cite[Theorem 3]{Ban24}. \end{abstract} \section{Introduction}\label{sec:introduction} \paragraph{Motivation.} The study of spaces of \emph{continuous} maps between two topological spaces, and their surprising connections with configuration spaces, has a rich history spanning several decades. Foundational works by Anderson, Bendersky-Gitler, Snaith, Cohen-May-Taylor, Arone, Ahearn-Kuhn, and others have explored this extensively (see \cite{BG91, Anderson72}, and also \cite{AK02, Arone99} and the references therein). One key result is the stable splitting of function spaces under certain constraints (e.g., connectivity conditions on the range), where the components of the splitting include, among other structures, configuration spaces on the domain. An exact analogue of this phenomenon for algebraic maps between algebraic varieties is unrealistic due to the rigidity of morphisms between varieties. Nevertheless, in this note, we show that the moduli space of algebraic morphisms between two smooth projective varieties, under certain strong conditions on the range, can, in a sense, admit a configuration space model. This establishes an algebro-geometric analogue of Bendersky-Gitler's result on the space of continuous maps between topological spaces (\cite[Theorem 7.1]{BG91}). Our result can be seen as a natural higher-dimensional counterpart of \cite[Theorem 3]{Ban24}. \paragraph{Setup.} Throughout the paper, we fix smooth projective varieties $X$ and $Y$ over an algebraically closed field of characteristic $0$. We fix $\Upsilon$ a polarization on $Y$ i.e. we fix an embedding $\upsilon: Y\hookrightarrow \P^N$ where $\Upsilon=\upsilon^* \opn(1)$ and $N=\dim |\Upsilon|$, the rank of the complete linear system $|\Upsilon|$. A morphism $f: X\to\P^N$ corresponds to a line bundle $L$ on $X$ such that $L = f^*\O_{\P^N}(1)$. Let $\d:= c_1(L) \in N^1(X)$. A morphism $f:X\to Y$ corresponds to a line bundle $L$ on $X$ such that $L=f^*\Upsilon = {f^*} {\circ} \upsilon^* \opn(1)$. We say $f$ has \emph{degree} $\d$ if $c_1(L) = \d$. Let $\mor_{\d}(X,Y)$ be the moduli space of morphisms $f: X\to Y$ such that $c_1(f^*\Upsilon) =\d$. We say a \emph{numerical class $\d\in N^1(X)$ separates $r$ points} if it is ample and if every line bundle $L\in \pic_{\d}(X)$ separates $r$ points\footnote{There are closely related (stronger) notions like $r$-very ampleness, $r$-spannedness etc. which have been studied for decades, pioneered by the work of Beltrametti, Sommese and others, however, the weaker notion of separating points (as opposed to jets) is sufficient for the purpose of this note. Aumonier in \cite{Aumonier2024} calls this property of line bundles $k$-interpolating. However, `separating points' is a standard phrase in algebraic geometry to describe the phenomenon, so we adhere to that.} (see \ref{lemma:r-separating}). Note that if a line bundle $L$ (respectively, a numerical class $\d$) separates $r$ points, then it separates $(r-1)$ points; let $r(L)$ (respectively, $r(\d)$) denote the maximum number $r$ for which $L$ (respectively, $\d$) separates $r$-points. If $\d \in N^1(X)$ separates at least one point, we define\begin{equation}\label{eq:rd} r(\d): = \max \{r: \d \text{ separates } r \text{ points} \}. \end{equation} Furthermore, we say $\d$ is \emph{acyclic} of all higher cohomologies of all line bundles in its numerical equivalence class vanish. Now we state our theorem. \begin{theorem}\label{theorem}Let $X$ be a smooth projective variety of dimension $n$, $(Y,\Upsilon)$ a polarized smooth projective variety, and $N:=\dim |\Upsilon|$. Let $\d$ be acyclic, and let $r(\d)$ be as in \eqref{eq:rd}. Then: \begin{enumerate} \item\label{statement1} Then there exists a first quadrant spectral sequence of Galois representations/mixed Hodge structures:\begin{gather}\label{ss:theorem} E_1^{p,q} \implies H^{p+q}_c(\mor_{\d}(X,Y);\Q) \end{gather} with \begin{gather}\label{ss:E1terms} E_1^{p,*} = (H^*(\mathring{X}^{p};\Q)\otimes \sgn_{S_p})^{S_p}\otimes H^*(\pic_{\d}(X);\Q)\otimes H_c^*(Y(D_{p-1});\Q) \end{gather} for all $p\leq r(\d)+1$, where $Y(D_p)$ are certain auxilliary schemes defined in, and satisfying the conditions of \S \ref{para:keyassumption}, and $$\mathring{X}^{p}:= X^p-\{\text{ diagonals}\}.$$ \item\label{statement2}\textbf{Homological stability:} In the case when $Y=\P^N$, the spectral sequence \eqref{ss:theorem} results in \begin{gather}\label{ss:E2terms} E_2^{p,q}=E_{\infty}^{p,q} \end{gather} for all $0\leq p\leq r(\d)+1$, and $\dim(\mor_{\d}(X,Y))-r(\d) \leq q\leq \dim(\mor_{\d}(X,Y))$. \item\label{statement3} If $\delta:= \d-c_1(K_X)$ is ample, then the stable bound $r(\d)$ satisfies the following equality:\begin{equation}\label{eq:rdbound} r(\d) =\left\lfloor\min_{\substack{[W]\in \mathrm{CH}_k(X),\\ 1\leq k\leq n}} \frac{2(\delta^k.[W])^{\frac{1}{k}}-n^2+n-1}{2}\right\rfloor -1 \end{equation} where $\mathrm{CH}_k(X)$ is the $k^{th}$ Chow group of $X$. \end{enumerate} \end{theorem} \paragraph{Some remarks and context} \begin{enumerate} \item \textbf{Poincare/Koszul duality.} A recurring phenomenon in the study of such algebraic function spaces, as demonstrated in this note, as well as in \cite{Ban24}, is the appearance of a Koszul-type cochain complex (see \eqref{isom:twistedbysgn}) in the analysis of the Poincare dual of $\mor_{\d}(X,Y)$. Whereas it appears naturally in our proof via the theory of hypercovers, it does beg the question of whether there is a factorization homology approach to proving something like Theorem \ref{theorem}. An affirmative answer has already been provided by Ho (\cite{Ho20}) in the case $X=\P^1$ and $Y=\P^n$ (because in such cases, algebraic maps simply boil down to studying zero-cycles on $X$) and there is paramount evidence of an affirmative answer for curves of higher genera by exploiting algebraic non-abelian Poincare duality in the sense of Gaitsgory-Lurie (\cite[\S 3]{GL17}). However, a naive translation of the curve-case to higher dimensional domains have several obvious pitfalls, some of which becomes clear as we go through the proof, and discussing the rest would take us too far afield (see e.g. \cite[\S 1.5]{GL19}). \item \textbf{$\mor_{\d}(X,Y)$ vs. its topological analogue $\mathrm{Top}_{\d}(X,Y)$.} In a direction distinct from the study of the relationship between (continuous) function spaces and configuration spaces, significant attention has been given to the comparison of spaces of holomorphic/algebraic functions with that of continuous functions between complex holomorphic manifolds since the '70s (for a brief history of it see \cite[\S 1.1]{Aumonier2024}). Two notable examples are: Mostovoy's work (\cite{Mostovoy06}) where $X,Y$ are both projective spaces, and a recent work of Aumonier's (see \cite{Aumonier2024}) where he compares stable homology of $\mor_{\d}(X,\P^N)$ to that of continuous maps $X \to \P^N$ for arbitrary smooth projective $X$, thus significantly extending Mostovoy's result. If our theorem relating the (cohomology of the) space of algebraic maps to configuration spaces appears somewhat unexpected, it is worth highlighting that results by Anderson, Bendersky-Gitler (\cite{Anderson72, BG91}), when combined with those of Aumonier or Mostovoy, lend credence to such a connection. Our theorem not only confirms this connection but also strengthens it in two key ways: (a) it demonstrates that the Galois representations or mixed Hodge structures on both sides are preserved, and (b) it underscores the role of the intersection theory of $X$, as evidenced by Equation \ref{eq:rdbound}. Notably, pulling back a 'configuration space model' for the space of continuous maps via Segal-type results from Aumonier or Mostovoy offers no insight into Hodge structures. In contrast, our approach—grounded entirely in algebraic geometry—makes these structures explicit and central to the discussion. The only prior instance of an explicit comparison between the space of algebraic function spaces and configuration spaces appears in the author's earlier work \cite{Ban24, Banerjee2022}--- those were in the case when $X$ is a curve. \item \text{Adjoint line bundles and point-separation.} The property of separating finitely many points is not numerical property of a line bundle $L$. They are, however, numerical properties on the \emph{adjoint} line bundle $L\otimes K_X$ (or, more generally, on $L\otimes K_X^{\otimes m}$ for suitable values of $m$). The interested reader may refer to the works of Angehrn-Siu, Reider, Demailly, Ein-Lazarsfeld-Nakamaye, Kollar etc (see \cite{AS95} and the references therein). Which is why explicit bounds like \eqref{eq:rdbound} is available only for adjoints of line bundles, not the line bundles themselves. \item \textbf{On the auxilliary schemes $Y(D_p)$.} The condition of the `auxilliary schemes' $Y(D_p)$ being of \emph{Leray-Hirsch type} (see paragraph \ref{para:keyassumption}) over $\pic_{\d}(X)$ in a range of values of $p$, is absolutely indispensable. For a general $Y$, the schemes $Y(D_p)$ can often be empty, and in the cases when they are non-empty, proving its non-emtpyness is usually highly nontrivial. Case at hand is the geometric Manin's conjecture: even in the seemingly simple case of $X=\P^1$, if $Y$ is a low degree hypersurface in a sufficiently high dimensional projective space, the proof is extremely difficult--- as shown by Browning-Sawin in \cite{BS20}. See \S\ref{para:keyassumption} for a further discussion on the terminology and the importance of the additional condition of being Leray-Hirsch type. \item \textbf{Divergence of $r(\d)$.} Whereas the range $Y$ often poses insurmountable difficulties, tackling an arbitrary domain is relatively simpler, at least in the context of questions like stable homology of these function spaces--- it only needs a part of the intersection theory of $X$ as an input. Observe that \eqref{eq:rdbound} for the stable bound depends solely on the intersection theory of $X$. In particular, higher the positivity of $\delta$, higher the intersection numbers $\delta^k.[W]$, and since the positivity of $\delta$ diverges, so does $r(\d)$. \item \textbf{Spaces of sections of vector bundles.} Observing that sections of vector bundles on a smooth projective variety $X$ can themselves be viewed as algebraic function spaces of a specific kind (locally maps to $\mathbb{A}^N$ on $X$), a careful reader following our proof would notice that our methods translate almost verbatim—indeed, more straightforwardly in the absence of geometric complexities from $Y$ or $\pic_{\d}(X)$—to providing an alternative proof of the cohomological results by Das and Howe (see \cite{DH24}). One only needs to replace the condition of vector bundles \emph{separating points} by various degrees fof \emph{jet-ampleness}. While the author does not claim expertise in their approach, it seems plausible that Das and Howe's cohomological inclusion-exclusion framework could, in principle, be adapted to derive results analogous to our Theorem \ref{theorem}. \end{enumerate} \noindent \textbf{Method of Proof.} Our proof is sheaf-theoretic, morally similar to \cite{Ban24, Banerjee2022}. However, due to the oft-encountered dichotomy between the algebro-geometric complexity of varieties of dimension $1$ and higher, we need to incorporate modern perspectives on certain classical results. A fundamental component of our approach is the use of cohomological descent for proper hypercoverings. We construct a natural compactification of $\mor_{\d}(X, Y)$, and a proper hypercover (\eqref{def:Xr}) augmented on its Poincare dual that admits cohomological descent. At the level of sheaves, this hypercover--- via a the passage through the \emph{symmetric simplicial category}, originally introduced in the context of additive K-theory by Feigin and Tsygan (\cite{FT87}), and later further developed by Fiederowicz and Loday (\cite{FL91})--- gives rise to a Koszul-type complex (see \eqref{isom:twistedbysgn}) whose cohomology produce \eqref{spectralsequence}. A minor technical note: we work in the derived $\infty$-category of constructible sheaves with coefficients in $\Q$-vector spaces, as developed in \cite{GL19, Lurie17}, adopting the framework of six-functor formalism by Liu-Zheng \cite[\S 9.3]{LZ24}. While we employ $\infty$-categorical formalism, it is largely cosmetic—most of the proof of Theorem \ref{theorem} translates seamlessly into the language of the triangulated derived category of constructible sheaves. The only caveat is the need to analyze group invariants of objects that traditionally reside in traingulated derived category of constructible sheaves. The $\infty$-categorical language offers a cleaner presentation of our core ideas, avoiding technical distractions that are well-established in the literature. Our estimate of $r(\d)$ in \eqref{eq:rdbound} follows directly from Angehrn-Siu's work on a conjecture of Fujita (see \cite[Theorem 0.1]{AS95}). \paragraph{Acknowledgement.} \textnormal{My deepest thanks to Robert Lazarsfeld for sharing his expertise on positivity properties of line bundles.} \section{The space of morphisms from $X$ to $Y$} To clearly convey the key ideas, we begin by developing our framework for the special case where $Y=\P^N$. In \S\ref{subsection:morXY} we will generalize these methods to the case of an arbitrary polarized smooth projective variety $Y$. Throughout this section, $X$ is a smooth projective variety of dimension $n$ over a fixed algebraically closed field of characteristic $0$. \subsection{Preliminaries on positivity of line bundles} Positivity of line bundles can be studied from various angles: from the concept of separation of points, jets at points, cohomological vanishing theorems, etc, and they are all deeply intertwined. In this subsection we record some definitions and facts pertaining to Theorem \ref{theorem}. \begin{defn}\label{def:r-separating} Let $r$ be a positive integer. We say a line bundle $L$ separates $r$ points if it is ample and its global sections separate any set for $r$ points in $X$ i.e. for all $Z\subset X$ with $|Z|=r$, one has \[H^1(L\otimes \mathcal{I}_Z)=0\] or equivalently, if the restriction map \[H^0(X,L)\twoheadrightarrow H^0(X,L\otimes \mathcal{O}_Z)\] is surjective, where $\mathcal{I}_Z$ and $\mathcal{O}_Z$ denote the ideal sheaf and structure sheaf of $Z$ respectively. We say a numerical class $\d\in N^1(X)$ separates $r$ points if it is ample and every line bundle in $L\in\pic_{\d}(X)$ separates $r$ points. \end{defn} Let us now record a theorem by Angehrn-Siu (see \cite[Theorem 0.3]{AS95}), suitably paraphrased to meet our requirements:\begin{theorem}\label{theorem:AS} Let $r$ be a positive integer. If \begin{equation}\label{ineq:AS} (L^k \cdot [W])^{\frac{1}{k}}>\frac{1}{2}n(n+2r-1) \end{equation} for any irreducible subvariety $W$ of dimension $1 \leq k \leq n=\dim X$ in $X$, then the global holomorphic sections of $L \otimes K_X$ over $X$ separate any set of $r$ distinct points of $X$. \end{theorem} Observe that by \cite[Proposition 20.1.4]{Vakil15}, the intersection number in the inequality \eqref{ineq:AS} depends only on the numerical equivalence class of $L$. \begin{defn} We say a line bundle $L$ is \emph{acyclic} is $H^i(X,L)=0$ for all $i>0$. We a numerical equivalence class $\d\in N^1(X)$ is acyclic if $H^i(X,L)=0$ for all $i>0$ and all $L\in \pic_{\d}(X)$. \end{defn} \noindent Note that acyclicity is not a numerical property. We now prove a simple but important lemma. \begin{lemma}\label{lemma:r-separating} Let $r$ be a positive integer. Then there exists $\d\in N^1(X)$ such that $\d$ separates $r$ points and is acyclic. \end{lemma} \begin{proof} Pick $\delta \in N^1(X)$ ample. Tensoring up if necessary, and in view of the observation that \eqref{ineq:AS} depends only on the numerical equivalence class of $L$, we can ensure that $\delta$ satisfies the inequality \eqref{ineq:AS} on the intersection numbers in Theorem \ref{theorem:AS}. Because $\delta$ is ample, by the Kodaira vanishing theorem (\cite{Kodaira53}), for all $L\in \pic_{\delta}(X)$ one has $H^i(X,K_X\otimes L)=0$ for all $i>0$. Furthermore, since $\delta$ has been chosen to satisfy the inequality \eqref{ineq:AS}, $L\otimes K_X$ separates $r$ points by Theorem \ref{theorem:AS}, for all $L\in \pic_{\delta}(X)$. Then the numerical equivalence class given by $\d :=c_1(K_X)+\delta$ satisfies the conclusion of the lemma. \end{proof} \noindent\myparagraph For the rest of the paper we fix $\delta\in N^1(X)$ ample, such that $\d=c_1(K_X)+\delta$ is acyclic and separates $r$ points (we know such a $\d$ exists by Lemma \ref{lemma:r-separating}), and denote by $r(\d)$ the maximum number of points $\d$ separates i.e. the maximum $r$ for which $\d$ separates $r$ points. By Angehrn-Siu's estimates (see \eqref{ineq:AS}), we have \begin{equation}\label{eq:rdestimate} r(\d) =\left\lfloor\min_{\substack{[W]\in \mathrm{CH}_k(X),\\ 1\leq k\leq n}} \frac{2(\delta^k.[W])^{\frac{1}{k}}-n^2+n-1}{2}\right\rfloor . \end{equation} \subsection{Geometry and topology of $\mor_{\d}(X,\P^N)$} In this subsection we construct a natural compactification of $\mor_{\d}(X,\P^N)$, and a hypercover augmented over its `discriminant locus'. \noindent \myparagraph Let $\d$ be as above. The space $\mor_{\d}(X,\P^N)$ is given by: \begin{align} \mor_{\d}(X,\P^N)=\bigg\{(L,[s_0:\ldots : s_N]): L\in \pic_{\d}(X), s_i\in \Gamma(X,L), 0\leq i\leq N, \nonumber\\ \bigcap_{0\leq i\leq N}\mathrm{div}(s_i)=\emptyset\bigg\} \end{align} \noindent Define the following space, which we dub as the `discriminant locus': \begin{align}\label{def:discriminant} \Z_{\d}(X,\P^N):= \bigg\{(L,[s_0:\ldots: s_N]): L\in \pic_{\d}(X), s_i\in \Gamma(X,L), 0\leq i\leq N, \nonumber\\ \bigcap_{0\leq i\leq N}\mathrm{div}(s_i)\neq\emptyset\bigg\}. \end{align} Whereas morphisms $X\to \P^N$ are given by basepoint free $(N+1)$-tuples of global sections of line bundles in $\pic_{\d}(X)$, the space $\Z_{\d}(X,\P^N)$ is given by $(N+1)$-tuples of global sections that vanish simultaneously at some point in $X$. \subsubsection{Compactification of $\mor_{\d}(X,\P^N)$}\label{para:Poincare} Let $\Pc(\d)$ denote the Poincare bundle on $X\times \pic_{\d}(X)$; denoting by \begin{equation}\label{map:pushforwardPoincare} \nu:X\times \pic_{\d}(X)\to \pic_{\d}(X) \end{equation} the projection to the second factor $\pic_{\d}(X)$. Note that $\d$ being acyclic implies $\nu_*\Pc(\d)$ is a locally free sheaf, and equivalently, a vector bundle on $\pic_{\d}(X)$, of rank \begin{gather}\label{eq:N_d} N_d:= \int_X \mathrm{ch}(L)\,\, \mathrm{td}(X)\end{gather} given by the Hirzebruch-Riemann-Roch theorem (see \cite{Hirzebruch1978}; note that $\mathrm{ch}(L)$ depends only on $\d$, and $td(X)$, the Todd class of $X$, is a geometric property of $X$). The fibre of $\nu_*\Pc(\d)^{\oplus(N+1)}$ over a point $L\in\pic_{\d}(X)$ is $\Gamma(X,L)$. In turn, we can now see that there is a natural open immersion \begin{gather}\label{map:openimmersion} \jfrak: \mor_{\d}(X,\P^N) \hookrightarrow \P(\nu_*\Pc(\d)^{\oplus(N+1)}) \end{gather} the latter being the (relative) projectivization of the vector bundle $\nu_*(\Pc(\d))^{\oplus (N+1)}$ over $\pic_{\d}(X)$, and the complement of (the image of) $\mor_{\d}(X,\P^N)$ in $\P(\nu_*\Pc(\d)^{\oplus(N+1)})$ is $\Z_{\d}(X,\P^N)$. Let us denote $\P(\nu_*\Pc(\d)^{\oplus(N+1)})$ by $\X_{-1}$ henceforth; in other words, the points of $\X_{-1}$ are described by:\begin{gather}\label{def:X_{-1}} \X_{-1}=\bigg\{(L,[s_0:\ldots: s_N]): L\in \pic_{\d}(X), s_i\in \Gamma(X,L), 0\leq i\leq N\bigg\}. \end{gather} \noindent\myparagraph\textbf{Hypercover over $\Z_{\d}(X,\P^N)$} For each $r\geq 0$, define the following spaces: \begin{align}\label{def:Xr} \X_r:= \bigg\{\big((L,[s_0:\ldots: s_N]), (x_0,\ldots, x_r)\big): L\in \pic_{\d}(X), s_i\in \Gamma(X,L), 0\leq i\leq N, \nonumber\\ x_j\in \bigcap_{0\leq i\leq N}\mathrm{div}(s_i), \text{ for all }0\leq j\leq r\bigg\}\nonumber\\\subset X^{r+1}\times \Z_{\d}(X,\P^N) \end{align} We now aim to understand the geometry of the spaces $\X_r$. For starters, we prove the following: \begin{lemma}\label{lemma:X0} The space $\X_0$ is a smooth projective variety. \end{lemma} \begin{proof}[Proof of Lemma \ref{lemma:X0}] Let \begin{gather} \mathrm{pr}_{23}:X\times X\times \pic_{\d}(X)\to X\times \pic_{\d}(X) \end{gather} be the projection to the last two factors, \begin{gather} \mathrm{pr}_{13}:X\times X\times \pic_{\d}(X)\to X\times \pic_{\d}(X) \end{gather} be the projection to the first and third factors, and \begin{gather} \mathrm{pr}_{23}:X\times X\times \pic_{\d}(X)\to X\times X \end{gather} be the projection to the first two factors. Let $D_0(X)$ denote the diagonal in $X\times X$. Then \begin{gather}\label{edf:E0} \E_0 := (\mathrm{pr}_{13})_{*}(\mathrm{pr}^*_{12}\mathcal{I}_{{D_0}(X)}\otimes \mathrm{pr}^*_{23}\Pc(\d)). \end{gather} where $\mathcal{I}_{{D_0}(X)}$ is the ideal sheaf of the diagonal $D_0(X)$. That $\E_0$ is a locally free sheaf is a simple consequence of Grauert's base change theorem (whose proof is complex analytic) or its algebraic rendition by Grothendieck (see \cite[Theorem 12.11]{Harshorne77} or \cite[EGA III 7.7]{Grothendieck63}). Indeed, first note that $\mathrm{pr}_{13}$ is a flat projective morphism, and $D_0(X)\times \pic_{\d}(X)$ is flat over $X\times \pic_{\d}(X)$. Then, letting $$\F:=\mathrm{pr}^*_{12}\mathcal{I}_{{D_0}(X)}\otimes \mathrm{pr}^*_{23}\Pc(\d)$$ note that \begin{gather} R^i{\mathrm{pr}_{13}}_*\F=0 \text{ for all } i>0 \end{gather} because its stalks are $H^i(X,L\otimes \I_{x})$ for a point $(x,L)\in X\times \pic_{\d}(X)$. Now, for any finite subset $Z\subset X$ with $\# Z\leq r$, consider the short exact sequence of coherent sheaves \begin{gather}\label{seq:I_Z} 0\to I_Z\to \mathcal{O}_X\to \O_Z\to 0, \end{gather}tensor with $L$ throughout and take cohomology to obtain \begin{gather}\label{leq:I_ZL} 0\to H^0(X,L\otimes \I_Z)\to H^0(X,L)\to H^0(X,L\otimes \O_Z)\to \cdots \\\cdots\to H^i(X,L\otimes \I_Z)\to H^i(X,L) \to H^i(X,L\otimes \O_Z)\to \cdots\nonumber \end{gather}Noting that $H^i(X,L\otimes \O_Z)=0$ for all $i>0$, ($L\otimes \O_Z$ are skyscraper sheaves), we obtain $$H^i(X,L\otimes \I_Z)\xrightarrow{\cong} H^i(X,L)=0$$ for all $i>0$, where the latter isomorphism to $0$ is because $\d$ is acyclic. Taking $r=1$ we obtain $\E_0$ is locally free over $X\times \pic_{\d}(X)$: the fibre of the corresponding vector bundle at a point $(x,L)\in X\times \pic_{\d}(X)$ is $\Gamma(X,L\otimes \I_x)$, i.e. global sections of $L$ that vanish at $x$. Finally, the observation that $\X_0$ is, by definition, the projectivization of the vector bundle $\bigoplus_{i=0}^{N}\E_0$ over $X\times\pic_{\d}(X)$, concludes the proof of the lemma. \end{proof} \noindent Thanks to Lemma \ref{lemma:X0}, the natural map\begin{gather} \X_0\to \X_{-1}\nonumber \\ \big((L,[s_0:\ldots: s_N]), x\big)\mapsto (L,[s_0:\ldots: s_N]) \end{gather} is projective, and hence proper; furthermore, its image is clearly $\Z_{\d}(X,\P^N)$, and $$\X_0\twoheadrightarrow \Z_{\d}(X,\P^N)$$ is birational---because a generic element in $\Z_{\d}(X,\P^N)$ is an $(N+1)$-tuple of sections in $\Gamma(X,L)$ for some $L\in \pic_{\d}(X)$ that have exactly one common zero, which, in turn, is because by our assumption $r(\d)\geq 1$, i.e. $L$ is very ample, and one can always choose $(N+1)$ hyperplane sections of the embedding of $X$ by the complete linear system $|L|$ such that those $(N+1)$ hyperplanes intersect at exactly one point--- and since $\X_0$ is smooth, we have now concluded the following: \begin{lemma}\label{lemma:ResolutionOfSingularities} The natural map $\X_0 \to \Z_{\d}(X,\P^N)$ is a resolution of singularities. \end{lemma} \noindent Observe that for all $r\geq 0$, we have, by its very definition, \begin{gather}\label{def:XrFibreProduct} \X_r= \underbrace{\X_0\times_{\X_{-1}} \X_0 \times_{\X_{-1}}\times \cdots\times_{\X_{-1}}\X_0}_{r+1}, \end{gather} and that there are natural maps \begin{gather}\label{map:pi_r} \pi_r: \X_r\to \Z_{\d}(X,\P^N) \\ \big((L,[s_0:\cdots: s_N]), (x_0,\cdots, x_r)\big)\mapsto (L,[s_0:\cdots: s_N]). \nonumber \end{gather} Recalling the standard fact that proper maps are stable under base change \cite[Corollary 4.8]{Harshorne77}, paired with our earlier observation that $\X_0\to \X_{-1}$ is projective, and hence proper, we come to the following conclusion: \begin{lemma}\label{lemma:hypercover} The simplicial scheme $\pi_{\bullet}:\X_{\bullet} \to \Z_{\d}(X,\P^N)$ is a proper hypercover augmented over $\Z_{\d}(X,\P^N)$, with the $i^{th}$ face maps $\X_r\to \X_{r-1}$ given by forgetting the $i^{th}$-factor from the expression \eqref{def:XrFibreProduct}, and the degeneracy maps given by the diagonal embeddings. \end{lemma} \noindent The proof of Lemma \ref{lemma:X0} translates almost verbatim to give us the geometry of $\X_r$ for any $r\leq r(\d)$. For $0<r\leq r(\d)$, the schemes $\X_r$ are not smooth, however, they are naturally projectivizations of coherent sheaves that are locally free over the locally closed strata provided by the diagonals in $X^r$. Indeed, let \begin{gather} \mathrm{pr}_{23}:X^{r+1}\times X\times \pic_{\d}(X)\to X\times \pic_{\d}(X) \end{gather} be the projection to the last two factors, \begin{gather} \mathrm{pr}_{13}:X^{r+1}\times X\times \pic_{\d}(X)\to X^{r+1}\times \pic_{\d}(X) \end{gather} be the projection to the first and third factors, and \begin{gather} \mathrm{pr}_{23}:X^{r+1}\times X\times \pic_{\d}(X)\to X^{r+1}\times X \end{gather} be the projection to the first two factors, and let $D_r(X)\subset X^{r+1}\times X$ be the closed subscheme given by \[D_r(X):= \bigg\{\big((x_0,\cdots, x_r),x\big): x=x_i \text{ for some } 0\leq i\leq r\bigg\}\] Then define \begin{gather}\label{edf:Er} \E_r := (\mathrm{pr}_{13})_{*}(\mathrm{pr}^*_{12}\mathcal{I}_{{D_0}(X)}\otimes \mathrm{pr}^*_{23}\Pc(\d)). \end{gather} where $\mathcal{I}_{{D_r}(X)}$ is the ideal sheaf of $D_r(X)\subset X^{r+1}\times X$. Then, following through the proof of Lemma \ref{lemma:X0} and invoking the sequences \eqref{seq:I_Z} and \eqref{leq:I_ZL}, we see that $\E_r$ is locally free on each of the locally closed strata provided by the intersection of the diagonals, its stalk at a point $$\big((x_0,\cdots, x_r), L\big)\in X^{r+1}\times\pic_{\d}(X)$$ (note that $x_i$s need not be distinct) is $$\Gamma(X,L\otimes \I_{\mathrm{Supp(\sum_i x_i)}})$$ where $\mathrm{Supp}(\sum_i x_i)$ is the (set-theoretic) support of the $0$-cycle $\sum_i x_i$ corresponding the $(r+1)$-tuple $(x_0,\cdots, x_r)$ of points in $X^{r+1}$; in turn, if $X^{r+1}_{(m)}\subset X^{r+1}$ is defined as the locus of $(r+1)$-tuples of points of which exactly $m$ are distinct, then we have the following: \begin{lemma}\label{lemma:Xr} For all $0\leq r\leq r(\d)$, we have \begin{gather} \X_r=\underline{\mathrm{Proj}}_{X^{r+1}\times \pic_{\d}(X)}\Sym(\E_r^{\oplus(N+1)}) \end{gather}where $$\E_r\to X^{r+1}\times \pic_{\d}(X)$$ is a stratified vector bundle: for $1\leq m\leq r+1$, on each locally closed strata $X_{(m)}^{r+1}\times \pic_{\d}(X)$, it is a vector bundle of rank $(N_d-m)(N+1)$. \end{lemma}We let $\mathring{X}^{r+1}$ denote $X_{(r+1)}^{r+1} = {X}^{r+1}-\text{ diagonals}$. \begin{remark} The notion of a stratified vector bundle has several approaches: all minor variants of each other, depending on what one's end goal is. A modern reference which speaks of its development and recent applications is by Ross (\cite{Ross2024}). In our case, the meaning has been made clear by specifying the strata explicitly, which does not require any outside knowledge on the theory of stratified vector bundles. The interested reader can refer to \cite{Ross2024} and the references therein. \end{remark} \subsection{Geometry and topology of $\mor_{\d}(X,Y)$}\label{subsection:morXY} Recall, from the introduction, that we fixed $Y$ to be a polarized smooth projective variety, $\Upsilon$ the polarization on $Y$ i.e. we fix an embedding $\upsilon: Y\hookrightarrow \P(\Gamma(Y,\Upsilon)^{*})\cong \P^N$, where $\Upsilon=\upsilon^* \opn(1)$ and $N=\dim |\Upsilon|$, the rank of the complete linear system $|\Upsilon|$. Furthermore, let $\g=(g_1,\cdots, g_m)$ denote the set of generators of the homogenous ideal of $Y$ in $\Sym(\Gamma(Y,\Upsilon))$. \subsubsection{Compactification of $\mor_{\d}(X,Y)$} Now we construct a natural compactification of $\mor_{\d}(X,Y)$, and a hypercover augmented over its `discriminant locus'. Let $\d\in N^1(X)$ be as in Lemma \ref{lemma:r-separating}. The space $\mor_{\d}(X,Y)$ is given by: \begin{align} \mor_{\d}(X,Y)=\bigg\{(L,[s_0:\ldots : s_N]): L\in \pic_{\d}(X), s_i\in \Gamma(X,L), 0\leq i\leq N, \nonumber\\ \bigcap_{0\leq i\leq N}\mathrm{div}(s_i)=\emptyset, \,\, \g(s_0,\cdots, s_N)=0\bigg\} \end{align}\noindent Thus, $\mor_{\d}(X,Y)$ is naturally a closed (not necessarily nonempty) subscheme of $\mor_{\d}(X,\P^N)$. As before, we define the `discriminant locus' as: \begin{align}\label{def:discriminantY} \Z_{\d}(X,Y):= \bigg\{(L,[s_0:\ldots: s_N]): L\in \pic_{\d}(X), s_i\in \Gamma(X,L), 0\leq i\leq N, \nonumber\\ \bigcap_{0\leq i\leq N}\mathrm{div}(s_i)\neq\emptyset, \,\, \g(s_0,\cdots, s_N)=0\bigg\}. \end{align} \subsubsection{Hypercover over $\Z_{\d}(X,Y)$} For each $r\geq 0$, we define the following spaces: \begin{align}\label{def:XrY} \X_r(Y):= \bigg\{\big((L,[s_0:\ldots: s_N]), (x_0,\ldots, x_r)\big): L\in \pic_{\d}(X), s_i\in \Gamma(X,L), 0\leq i\leq N, \nonumber\\ x_j\in \bigcap_{0\leq i\leq N}\mathrm{div}(s_i), \text{ for all }0\leq j\leq r, \,\, \g(s_0,\cdots, s_N)=0\bigg\}\nonumber\\\subset X^{r+1}\times \Z_{\d}(X,Y) \end{align}At this point, we refer back to paragraph \ref{para:Poincare}. The natural map in \ref{map:openimmersion}: $$\mor_{\d}(X,\P^N)\hookrightarrow \P(\nu_*\Pc(\d)^{\oplus(N+1)})$$ was shown to be an open immersion. We denote, by $\X_{-1}(Y)$, the following closed subscheme of $\P(\nu_*\Pc(\d)^{\oplus(N+1)})$---\begin{gather} \X_{-1}(Y)= \bigg\{(L,[s_0:\ldots: s_N]): L\in \pic_{\d}(X), s_i\in \Gamma(X,L), 0\leq i\leq N, \nonumber\\ \g(s_0,\cdots, s_N)=0\bigg\} \end{gather}Observe that when $Y=\P^N$ we get back our old definitions of $\X_r$, $r\geq -1$. Observe that we have a natural open immersion \begin{gather}\label{map:jfrak} \jfrak: \mor_{\d}(X,Y)\hookrightarrow\X_{-1}(Y) \end{gather} and its closed complement is $\Z_{\d}(X,Y)$; we let \begin{gather}\label{map:ifrak} \ifrak: \Z_{\d}(X,Y)\hookrightarrow\X_{-1}(Y) \end{gather}denote the closed inclusion. For an arbitrary $Y$, the space $\X_0(Y)$ as defined above is hardly ever smooth, so the statement of Lemma \ref{lemma:X0} no longer holds. However, a part of the proof of Lemma \ref{lemma:X0} clearly translates to the case of an arbitrary $Y$ to show that $\X_0(Y)$ is a projective variety. Indeed, in the proof of Lemma \ref{lemma:X0} we saw that $\X_0$ is the projectivization of the vector bundle $\nu_*(\E_0)$ over $X\times\pic_{\d}$. By definition of $\X_0(Y)$ we have the following: \[ \begin{tikzcd} \X_0(Y) \arrow[rr, hook] \arrow[dr] && \X_0 \arrow[dl] \\ & X\times \pic_{\d}(X) \end{tikzcd} \] where the horizontal arrow is a closed embedding, the fibres of $\X_0(Y)$ over a point $(x,L)\in X\times \pic_{\d}(X)$ is given by the locus of $[s_0:\cdots: s_N]$ such that $$\g(s_0:\cdots:s_N)=0,$$ where $s_0,\ldots, s_N\in \Gamma(X,L\otimes \I_x)$. By a similar reasoning $\X_{-1}(Y)$ is also a projective variety: \[ \begin{tikzcd} \X_{-1}(Y) \arrow[rr, hook] \arrow[dr] && \X_{-1}\arrow[dl] \\ & \mathrm{Pic}_{\d}(X) \end{tikzcd} \] where the horizontal arrow is a closed embedding, the fibres of $\X_{-1}(Y)$ over a point $L\in \pic_{\d}(X)$ is given by the locus of $[s_0:\cdots: s_N]$ such that $\g(s_0:\cdots:s_N)=0,$ where $s_0,\ldots, s_N\in \Gamma(X,L)$. The natural map \begin{gather} \X_0(Y)\twoheadrightarrow \Z_{\d}(X,Y)\subset \X_{-1}(Y)\nonumber\\(L,[s_0:\cdots: s_N]), x\mapsto (L,[s_0:\cdots: s_N]) \end{gather}is projective, and hence proper. Also, for all $r\geq 0$, we have, by its very definition, \begin{gather}\label{def:XrFibreProductY} \X_r(Y)= \underbrace{\X_0(Y)\times_{\X_{-1}(Y)} \X_0(Y) \times_{\X_{-1}(Y)}\times \cdots\times_{\X_{-1}(Y)}\X_0(Y)}_{r+1}, \end{gather} and there are natural maps, as one would expect,\begin{gather}\label{map:pi_rY} \pi_r: \X_r(Y)\to \Z_{\d}(X,Y) \end{gather}given by composing the face maps, the $i^{th}$ face map given by forgetting the $i^{th}$ factor: $$ \X_r(Y)\to \X_{r-1}(Y).$$ Recalling the standard fact that proper maps are stable under base change \cite[Corollary 4.8]{Harshorne77}, paired with our earlier observation that $\X_0(Y)\to \X_{-1}(Y)$ is projective, and hence proper, we obtain a generalization of Lemma \ref{lemma:hypercover}: \begin{prop}\label{lemma:hypercoverY} The simplicial scheme $\pi_{\bullet}:\X_{\bullet}(Y) \to \Z_{\d}(X,Y)$ is a proper hypercover augmented over $\Z_{\d}(X,Y)$, with the $i^{th}$ face maps $\X_r(Y)\to \X_{r-1}(Y)$ given by forgetting the $i^{th}$-factor from the expression \eqref{def:XrFibreProductY}, and the degeneracy maps given by the diagonal embeddings. \end{prop} \noindent\myparagraph\label{para:keyassumption} \textbf{Leray-Hirsch type.} As noted earlier, for an arbitrary $Y$ there is no guarantee the schemes $\mor_{\d}(X,Y)$ or $\X_r(Y)$ are nonempty, and even if they are, there is no general recipe to understand their geometry. Note that for $Y=\P^N$, Lemma \ref{lemma:Xr} not only implies $\X_r$ is a stratified fibre bundle in a range of values of $r$, but being projectivization of vector bundles on those strata, it satisfies the Leray-Hirsch theorem. Throughout this paper we work on basis of the assumption that $\mor_{\d}(X,Y)$ is nonempty, and that $\X_r(Y)$, for all $r\leq r(\d)$, is nonempty, and satisfies $\Q$-Leray-Hirsch (i.e. it satisfies the Leray-Hirsch theorem with coefficients in $\Q$) on each locally closed strata $X_{(m)}^{r+1}\times\pic_{\d}(X)\subset X^{r+1}\times\pic_{\d}(X)$ for $1\leq m\leq r(\d)$. Let $Y(D_r)$ denote the fibre of $\X_r(Y)$ over $\mathring{X}^{r+1}\times\pic_{\d}(X)$ (where, recall that $\mathring{X}^{r+1}= X^{r+1}-\text{ diagonals}$). \begin{remark} There are no sufficient conditions one can impose on a reasonably large class of $Y$s to result in $\X_r(Y)$ carrying the structure of a stratified fibre bundle a la Lemma \ref{lemma:Xr}, let alone satisfy Leray-Hirsch. An analogue of Lemma \ref{lemma:Xr} is not hard to prove when $Y$ carries a nice universal torsor description (see the proof of \cite[Theorem 1]{Banerjee2022}, and also works on Manin's conjecture in \cite{BT98}, \cite{Pieropan16} and the references therein). However, for a general $Y$, a proof of such a fact, when true, is largely elusive, and is deeply rooted in the theory of algebraic cycles in $Y$--- for a detailed discussion on algebraic fibre bundles being of \emph{Leray-Hirsch} type, and its relation to Hodge conjecture, see Meng's work in \cite{Meng2021}. \end{remark} \section{Symmetric (co)simplicial sheaves} By Lemma \ref{lemma:hypercover}, the hypercover defined in \eqref{def:XrFibreProduct} admits cohomological descent--- which is really the main ingredient for proving Theorem \ref{theorem}. However, we study the hypercover \eqref{def:XrFibreProduct} as a $\Delta S$-scheme, as opposed to a simplicial scheme, where $\Delta S$ denotes \emph{symmetric simplicial category}. The advantage of working over $\Delta S$, instead of the standard simplicial category $\Delta$, is that the second statement of Theorem \ref{theorem} almost comes for free; however, working over $\Delta S$ results in considering (co)invariants under group actions in the derived category of constructible sheaves, an operation that do not behave well in triangulated categories. In this section, we collect the necessary facts about $\Delta S$, and then overcome the obstacle of working in the derived category of constructible sheaves by rephrasing and using cohomological descent in the $\infty$-category of sheaves as developed in \cite{GL19}. \subsection{$\Delta S$-hypercovers} The category $\Delta S$ has appeared in various forms throughout the literature, often studied independently with different motivations. Notable examples include Feigin and Tsygan's work on additive K-theory (\cite{FT87}), Pirashvili and Richter's reinterpretation of $\Delta S$ as the category of \emph{non-commutative sets} (\cite{PR02}), and investigations into \emph{symmetric homology} by Fiedorowicz and Loday (\cite{FL91}), as well as Krasauskas's independent development of related ideas (\cite{Krasauskas87}). In this note, we draw upon the framework for the \emph{symmetric simplicial category} $\Delta S$ established in the context of hypercovers in \cite[\S 2]{Ban24}, which, in turn, is primarily based on the notations set up in \cite{FL91}, quoting only the results needed here. For further details, the interested reader can consult the relevant sections of \emph{loc. cit}. \noindent\myparagraph Observe that by \cite[Definition 2.1]{Ban24}, the simplicial scheme $\X_{\bullet}$ is a $\Delta S$-object in the category of schemes. Indeed, the symmetric group $S_{r+1}$ acts freely on $\X_r$ by permuting the factors of the fibre product in \eqref{def:XrFibreProduct}, and one immediately sees that the action maps compose with the face and degeneracy maps of the simplicial scheme $\X_r$ as per the requirements of \cite[Definition 2.1]{Ban24}. Paired with Lemma \ref{lemma:hypercover} we obtain the following: \begin{lemma}\label{lemma:DeltaS-scheme} The proper hypercover $\pi_{\bullet}:\X_{\bullet} \to \Z_{\d}(X,\P^N)$ is a $\Delta S$-scheme augmented over $\Z_{\d}(X,\P^N)$, with the $i^{th}$ face map $\X_r\to \X_{r-1}$ given by forgetting the $i^{th}$-factor from the expression \eqref{def:XrFibreProduct}, and the degeneracy maps $\X_{r-1}\to \X_r$ given by the diagonal embeddings. \end{lemma} \subsubsection{Sheaves on schemes} For a scheme $B$, let $\Shv(B)$ be the derived $\infty$-category of sheaves with coefficients in $\Q$-vector spaces (where $\Q$ is a field of characteristic $0$ that contains the rational numbers), equipped with Grothendieck's six functor formalism. It is a stable $\infty$-category with a $t$-structure, whose heart is the abelian category of constructible sheaves of $\Q$-vector spaces on $B$. For details on the construction of the sheaf theory we use in this section, the reader is directed to Gaitsgory-Lurie's work (\cite[Sections 2 and 3]{GL19}). For homological algebra on $\Shv(B)$ the reader may refer to \cite[\S 1.2, \S 1.3]{Lurie17}. For the theory of proper descent in this setup, see Liu-Zheng's \cite{LZ24}. \subsubsection{$\Delta S$-sheaves}\label{subsubsec:DeltaS} A $\Delta S$-sheaf on a space $B$ is a functor\[\F:\Delta S\to \Shv(B).\] As is customary, we denote such a sheaf by $\F_{\bullet}$. Note that a $\Delta S$-sheaf is naturally a $\Delta$-sheaf i.e. a cosimplicial object in $\Shv(B)$; henceforth, unless otherwise stated, notations like $\F_{\bullet}$ will always mean a $\Delta S$-sheaf. For $\Delta S$-sheaves $\F_{\bullet}$ and $\mathcal{G}_{\bullet}$, let $\mathrm{Hom}^{\Delta S}_{\Shv(B)}(\F_{\bullet}, \mathcal{G}_{\bullet})$ (respectively, $\mathrm{Hom}^{\Delta}_{\Shv(B)}(\F_{\bullet}, \mathcal{G}_{\bullet})$) denote the $\Delta S$-sheaf (respectively, $\Delta$-sheaf) given by maps $\F_n\to \mathcal{G}_n$ in $\Shv(B)$ that commute with the face and degeneracy maps in $\Delta S$ (respectively, $\Delta$). Since a $\Delta S$-sheaf $\F_{\bullet}$ is a naturally a $\Delta$-sheaf, we can consider the corresponding Moore cochain complex, which we denote by \begin{gather} C^*\bigg( (\F_n, d)\bigg) \end{gather}where the differentials are, by definition, given by the alternating sum of face maps: $d=\sum_i(-1)^i d_i$ (see \cite[remarks 1.2.4.3 and 1.2.4.4]{Lurie17}). Moreover, for a $\Delta S$-sheaf $\F_{\bullet}$, we get a \emph{twisted-by-sign sheaf} for each $n$: \[\big(\F_n\otimes \sgn_{S_{n+1}}\big)^{S_{n+1}}\]where the natural action of $S_{n+1}$ on $\F_n$ is twisted by a sign (see a similar construction in the discussion preceding \cite[Theorem 6.9]{FL91}). This supplies us with a cochain complex \begin{gather} C^*\Big(\big(\F_n\otimes \sgn_{S_{n+1}}\big)^{S_{n+1}},d\Big) \end{gather}where the differential $d$ is given by the alternating sum of face maps: $d=\sum_i(-1)^i d_i$. The next lemma focuses on relating these two. \begin{lemma}\label{lemma:isomorphismDeltaS} Let $\F_{\bullet}$ be a $\Delta S$-sheaf on a scheme $B$. Then the following surjection is an isomorphism: \begin{gather} C^*\bigg( (\F_n, d)\bigg)\xrightarrow{\cong} C^*\Big(\big(\F_n\otimes \sgn\big)^{S_{n+1}},d\Big) \end{gather} \end{lemma} \begin{proof}[Proof of Lemma \ref{lemma:isomorphismDeltaS}] In the case when the $\Delta S$-sheaf is in the abelian category of constructible sheaves, this is proved in \cite[Lemma 2.7]{Ban24} by a straightforward and direct adaptation of the proof of \cite[Corollary 6.17]{FL91}. The proof of \cite[Corollary 6.17]{FL91} readily translates to an $\infty$-categorical analogue in $\Shv(B)$. Indeed, like \cite[\S 6]{FL91}, we consider the co-bisimplicial sheaf \begin{gather}\label{def:cobisimplicial} \mathcal{G}_{p,q}:=\Q[(S_p)^q]\otimes \F_q \end{gather} which converges to $\pi_n(\mathrm{Hom}^{\Delta S}_{\Shv(B)}(\underline{\Q}_B, \mathcal{F}_{\bullet}))$ (the latter is equivalent to what Fiederowicz-Loday call `symmetric (co)homology', see \cite[\S 6.6]{FL91}). The sheaf $\mathcal{G}_{p,q}$ is a filtered object in $\Shv(B)$ in the sense of \cite[Lemma 1.2.2.4]{Lurie17}, which allows us to adapt the proof of \cite[Theorem 6.9]{FL91}: taking its horizontal filtration, and noting that $\Q$ is a field of characteristic $0$, we obtain--- \begin{equation} E_1^{p,q}=\begin{cases} \big(\F_p\otimes \sgn\big)^{S_{p+1}}& q=0\\0 & \text{otherwise.} \end{cases} \end{equation} with differentials, naturally, given by the alternating sum of face maps. On the other hand, $\F_{\bullet}$, now being considered a $\Delta$-sheaf, via the $\infty$-categorical Dold-Kan (\cite[Theorem 1.2.4.1]{Lurie17}) results in the cochain complex $C^*\Big(\big(\F_p, d\big)\Big)$ which converges to $\pi_n(\mathrm{Hom}^{\Delta}_{\Shv(B)}(\underline{\Q}_B, \mathcal{F}_{\bullet}))$. By the cohomological analogue of \cite[Theorem 6.16]{FL91}, the two cohomologies are isomorphic, i.e. $$\pi_n(\mathrm{Hom}^{\Delta}_{\Shv(B)}(\underline{\Q}_B, \mathcal{F}_{\bullet}))\cong \pi_n(\mathrm{Hom}^{\Delta S}_{\Shv(B)}(\underline{\Q}_B, \mathcal{F}_{\bullet}))$$ for all $n$, which, in turn, proves our lemma. \end{proof} \section{Proof of Theorem \ref{theorem}}\label{sec:proof} Observe that statement \ref{statement3} of Theorem \ref{theorem} immediately follows from Lemma \ref{lemma:r-separating} and Angehrn-Siu's estimate \eqref{eq:rdestimate}. We now prove the part of the theorem pertaining an arbitrary polarized smooth projective $Y$, i.e. statement \ref{statement1}. And then we prove the special natural of the spectral sequence \ref{isom:result} for $Y=\P^N$. \subsection{Proof of Theorem \ref{theorem}, \ref{statement1}} \begin{proof}[Proof of Statement \ref{statement1}] By Proposition \ref{lemma:hypercoverY} (or, in the case of $Y=\P^N$, Lemma \ref{lemma:hypercover}), \begin{gather}\label{eq:cohomologicaldescent} \pi_{\bullet}:\X_{\bullet}\to \Z_{\d}(X,Y) \end{gather} admits cohomological descent. In other words, the unit map \begin{gather}\label{map:unit} \mathrm{id}_{\Z_{\d}(X,Y)}\to {\pi_{\bullet}}_*\,\pi_{\bullet}^* \end{gather} is a natural isomorphism on $\Shv(\Z_{\d}(X,Y))$. In turn, we have:\begin{align} \Hom(\underline{\Q}_{\Z_{\d}(X,Y)}, \underline{\Q}_{\Z_{\d}(X,Y)} ) &\cong \Hom\big(\underline{\Q}_{\Z_{\d}(X,Y)}, {\pi_{\bullet}}_*\,\pi_{\bullet}^*\underline{\Q}_{\Z_{\d}(X,Y)}\big) \\ &\cong C^*\Big(\Hom\big(\underline{\Q}_{\Z_{\d}(X,Y)}, {\pi_{n}}_* \underline{\Q}_{\X_n(Y)}\big)\Big)\\ & \cong C^*\Big(\Hom\big(\underline{\Q}_{\Z_{\d}(X,Y)}, ({\pi_{n}}_* \underline{\Q}_{\X_n(Y)}\otimes\sgn)^{S_{n+1}}\big)\Big),\label{isom:twistedbysgn} \end{align} where the first isomorphism is a direct consequence of the cohomological descent in \eqref{eq:cohomologicaldescent}, the second isomorphism follows from the fact that if $f$ is a proper map, then $f^*=f^{!}$, and the third isomorphism follows from Lemma \ref{lemma:isomorphismDeltaS}. Let us recollect the maps in \eqref{map:ifrak} and \eqref{map:jfrak}: \begin{gather} \mor_{\d}(X,Y)\stackrel{\jfrak}{\hookrightarrow}\X_{-1}(Y)\stackrel{\ifrak}{\hookleftarrow}\Z_{\d}(X,Y). \end{gather}where $\jfrak$ is an open immersion, and $\ifrak$ closed. So we obtain a cofibre sequence in $\Shv(\X_{-1}(Y))$:\begin{gather}\label{seq:openclosed} \jfrak_!\jfrak^!\underline{\Q}_{\X_{-1}(Y)} \to {\underline{\Q}}_{\X_{-1}(Y)} \to \ifrak_*\ifrak^*{\underline{\Q}}_{\X_{-1}(Y)} \end{gather} (which, observe, is equivalent to the localization distinguished triangle in the derived category of constructible sheaves). Now, noting that $$\ifrak^*{\underline{\Q}}_{\X_{-1}(Y)}\cong \underline{\Q}_{\Z_{\d}(X,Y)},$$ take $\Hom(\underline{\Q}_{\X_{-1}(Y)}, \text{---})$ of \eqref{seq:openclosed}, plug in \eqref{isom:twistedbysgn} and take global sections of the resulting complex to obtain the following spectral sequence: \begin{gather}\label{spectralsequence} E_1^{p,q}= \Big(H_c^q(\X_{p-1}(Y);\Q)\otimes \sgn\Big)^{S_{p}}\implies H_c^{p+q}(\mor_{\d}(X,Y);\Q) \end{gather}where, for $p=0$, we simply let $S_0$ denote the trivial group. Since all our constructions are algebraic, this is a spectral sequence of Galois representations/mixed Hodge structures, as the case might be. Recalling Lemma \ref{lemma:Xr} on the geometry of $\X_r$, and the assumption in paragraph \ref{para:keyassumption} for the geometry of $\X_r(Y)$, we see that for all $p\leq r(\d)-1$ the spectral sequence \ref{spectralsequence} reads as: \begin{flalign}\label{isom:result} \Big(H_c^*(\X_{p-1}(Y);\Q)\otimes \sgn\Big)^{S_{p}} \nonumber\\ \cong (H^*(\mathring{X}^{p};\Q)\otimes \sgn)^{S_p}\otimes H^*(\pic_{\d}(X);\Q)\otimes H_c^*(Y(D_{p-1});\Q) \end{flalign} where recall that $\mathring{X}^{p}$ denotes the space $X^p-\text{diagonals}$. \end{proof} \begin{remark}The reader is encouraged to compare \eqref{isom:result} with \cite[Theorem 7.1]{BG91}.\end{remark} \subsection{Proof of Theorem \ref{theorem}, \ref{statement2}} For $Y=\P^N$, by Lemma \ref{lemma:Xr} the terms of the spectral sequence \eqref{spectralsequence}, following \eqref{isom:result}, reads as:\begin{gather}\label{ss:P^N} (H^*(\mathring{X}^{p};\Q)\otimes \sgn)^{S_p}\otimes H^*(\pic_{\d}(X);\Q)\otimes H^*(\P^{(N_d-p)(N+1)-1};\Q)\nonumber\\ \cong (H^*(X;\Q)^{\otimes p}\otimes \sgn)^{S_p}\otimes H^*(\pic_{\d}(X);\Q)\otimes H^*(\P^{(N_d-p)(N+1)-1};\Q) \end{gather} for all $p\leq r(\d)$. Indeed, this isomorphism follows from the simple observation that for any scheme $$(H^*(\mathring{X}^{p};\Q)\otimes \sgn)^{S_p}\cong (H^*(X;\Q)^{\otimes p}\otimes \sgn)^{S_p}.$$ Thus, the spectral sequence \eqref{spectralsequence} boils down, in case of $Y=\P^N$, to $E_1^{p,*} =$ \begin{align}\label{spectralsequence:P^N} \begin{cases} (H^*(X;\Q)^{\otimes p}\otimes \sgn)^{S_p}\otimes H^*(\pic_{\d}(X);\Q)\otimes H^*(\P^{(N_d-p)(N+1)-1};\Q) &p\leq r(\d)+1\\ \Big(H_c^q(\X_{p-1};\Q)\otimes \sgn\Big)^{S_{p}} & p>r(\d)+1 \end{cases} \end{align} For a smooth projective variety $V$, the cohomology $H^i(V;\Q)$ is pure of weight $i$, which implies that the differentials between the terms of the spectral sequence \eqref{spectralsequence:P^N} vanish on the $E_2$-page and above for $p\leq r(\d)+1$. The result in statement \ref{statement2} follows immediately by a simple application of the Lefschetz hyperplane theorem. Indeed, the codimension of $\Z_{\d}(X,\P^N)$ in $\X_{-1}$ is $nN$, and likewise, a simple computation shows that the codimension of the image of $\X_r$ in $\X_{-1}$ is $\geq (r+1)nN$ for all $r\geq 0$; in turn the terms of the spectral sequence \ref{spectralsequence:P^N} for $p>r(\d)+1$ have no contribution to $H^i(\mor_{\d}(X,\P^N);\Q)$ for $i\leq r(\d)+1$, which completes the proof of Theorem \ref{theorem}, statement \ref{statement2}. \bibliographystyle{alpha} \bibliography{ConfModel} \end{document}
2501.00171v1
http://arxiv.org/abs/2501.00171v1
On the Minimal Denominator Problem in Function Fields
\documentclass[11pt,a4paper,reqno]{amsart} \usepackage{amssymb,amsmath,amsthm} \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage{enumerate} \usepackage[all]{xy} \usepackage{fullpage} \usepackage{comment} \usepackage{array} \usepackage{longtable} \usepackage{stmaryrd} \usepackage{mathrsfs} \usepackage{xcolor} \usepackage{mathtools} \renewcommand{\refname}{References} \def\wt{{Z}} \def\Z{\mathbb{Z}} \def\N{\mathbb{N}} \def\Q{\mathbb{Q}} \def\F{\mathbb{F}} \def\oQ{\overline{\mathbb{Q}}} \def\oO{\overline{O}} \def\Gal{\mathrm{Gal}} \def\res{\mathrm{res}} \def\Aut{\mathrm{Aut}} \def\Cay{\mathrm{Cay}} \def\gcd{\mathrm{gcd}} \def\deg{\mathrm{deg}} \def\Dic{\mathrm{Dic}} \def\vol{\mathrm{Vol}} \def\dim{\mathrm{dim}} \DeclarePairedDelimiter\floor{\lfloor}{\rfloor} \usepackage{hyperref} \hypersetup{ colorlinks=true, linkcolor=blue, filecolor=red, citecolor=green, urlcolor=cyan, pdftitle={GON}, pdfpagemode=FullScreen, } \urlstyle{same} \usepackage{cleveref} \crefformat{section}{\S#2#1#3} \crefformat{subsection}{\S#2#1#3} \crefformat{subsubsection}{\S#2#1#3} \usepackage{enumitem} \usepackage{tikz} \usepackage{mathdots} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{claim}[theorem]{Claim} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{example}[theorem]{Example} \newtheorem{conjecture}[theorem]{Conjecture} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{xca}[theorem]{Exercise} \theoremstyle{remark} \newtheorem{remark}[theorem]{Remark} \newtheorem{question}[theorem]{Question} \makeatletter \newcommand{\subalign}[1]{ \vcenter{ \Let@ \restore@math@cr \default@tag \baselineskip\fontdimen10 \scriptfont\tw@ \advance\baselineskip\fontdimen12 \scriptfont\tw@ \lineskip\thr@@\fontdimen8 \scriptfont\thr@@ \lineskiplimit\lineskip \ialign{\hfil$\m@th\scriptstyle##$&$\m@th\scriptstyle{}##$\hfil\crcr #1\crcr } }} \makeatother \newcommand{\Mod}[1]{\ (\mathrm{mod} #1)} \numberwithin{equation}{section} \title{On the Minimal Denominator Problem in Function Fields} \author{Noy Soffer Aranov} \email{[email protected]} \address{Department of Mathematics, University of Utah, Salt Lake City, Utah, USA} \begin{document} \maketitle \begin{abstract} We study the minimal denominator problem in function fields. In particular, we compute the probability distribution function of the the random variable which returns the degree of the smallest denominator $Q$, for which the ball of a fixed radius around a point contains a rational function of the form $\frac{P}{Q}$. Moreover, we discuss the distribution of the random variable which returns the denominator of minimal degree, as well as higher dimensional and $P$-adic generalizations. This can be viewed as a function field generalization of a paper by Chen and Haynes. \end{abstract} \section{Introduction} Meiss and Sanders \cite{MS} described an experiment in which a distance $\delta>0$ is fixed, and for randomly chosen $x\in [0,1)$, they study the statistics of the function \begin{equation} q_{\min}(x,\delta)=\min\left\{q:\exists\frac{p}{q}\in B(x,\delta),\gcd(p,q)=1\right\}. \end{equation} Chen and Haynes \cite{CH} computed the the probability that $\mathbb{P}(q_{\min}(x,\delta)=q)$ for every $\delta>0$ and for every $q\leq \left[\frac{1}{\delta}\right]$. Moreover, they proved that $\mathbb{E}[q_{\min}(\cdot, \delta)]=\frac{16}{\pi^2\cdot \delta^{\frac{1}{2}}}+O(\log^2\delta)$. Markloff \cite{M} generalized the results of \cite{CH} to higher dimensions by studying the statistics of Farey fractions. The minimal denominator problem was investigated in the real setting in several other papers such as \cite{KM,St}, but it is not well studied over other fields. In this paper, we use linear algebra and number theory to study the function field analogue of the function $q_{\min}(x,\delta)$, as well as its higher dimensional and $P$-adic analogues in the function field setting. In particular, we prove a function field analogue of the results of \cite{CH}. We note that unlike \cite{CH,M}, we do not study the distribution of Farey fractions, rather we use linear algebra and lattice point counting techniques, which work better in ultrametric spaces. \subsection{Function Field Setting} In this setting, we let $q$ be a prime power and denote the ring of Laurent polynomials over $\mathbb{F}_q$ by $$\mathcal{R}=\left\{\sum_{n=0}^Na_nx^n:a_n\in \mathbb{F}_q,N\in \mathbb{N}\cup\{0\}\right\}.$$ We let $\mathcal{K}$ be the field of fractions of $\mathcal{R}$, and define an absolute value on $\mathcal{K}$ by $\left|\frac{f}{g}\right|=q^{\deg(f)-\deg(g)}$, where $f,g\in \mathcal{R}$ and $g\neq 0$. Then, the completion of $\mathcal{K}$ with respect to $\vert \cdot\vert$ is $$\mathcal{K}_{\infty}=\left\{\sum_{n=-N}^{\infty}a_nx^{-n}:a_n\in \mathbb{F}_q\right\}.$$ We let $\mathcal{O}=\{\alpha\in \mathcal{K}_{\infty}:\vert \alpha\vert\leq 1\}$, and let $$\mathfrak{m}=x^{-1}\mathcal{O}=\{\alpha\in \mathcal{K}_{\infty}:\vert \alpha\vert\leq q^{-1}\}.$$ For $\alpha\in \mathcal{K}_{\infty}$, we write $\alpha=[\alpha]+\{\alpha\}$, where $[\alpha]\in \mathcal{R}$ and $\{\alpha\}\in \mathfrak{m}$. In this paper, we define the Haar measure on $\mathcal{K}_{\infty}$ to be the unique translation invariant measure $\mu$, such that $\mu(\mathfrak{m})=1$. In $\mathcal{K}_{\infty}^n$, we define the supremum norm as $\Vert (v_1,\dots,v_n)\Vert=\max_{i=1,\dots,n}\Vert \mathbf{v}_i\Vert$. Similarly, for $\boldsymbol{\alpha}=(\alpha_1,\dots,\alpha_n)\in \mathcal{K}_{\infty}^n$, we let $[\boldsymbol{\alpha}]=([\alpha_1],\dots,[\alpha_n])$ and $\{\boldsymbol{\alpha}\}=(\{\alpha_1\},\dots,\{\alpha_n\})$. \subsection{Main Results} We prove a function field analogue of the main results of \cite{CH}. Let $n\in \mathbb{N}$. For $\delta>0$ and $\alpha\in\mathcal{K}_{\infty}^n$, we define the minimal denominator degree by $$\deg_{\min}(\boldsymbol{\alpha},\delta)=\min\left\{d:\exists\frac{P}{Q},\deg(Q)=d,\left|\boldsymbol{\alpha}-\frac{P}{Q}\right|<\delta\right\}.$$ We say that $Q$ is a minimal denominator for $\alpha$ if $\deg(Q)=\deg_{\min}(\boldsymbol{\alpha},\delta)$ and $\left|\alpha-\frac{P}{Q}\right|<\delta$. We note that if $Q$ is a minimal denominator for $\boldsymbol{\alpha}$, then, it is also a minimal denominator for $\{\boldsymbol{\alpha}\}$. Hence, we only focus on $\boldsymbol{\alpha}\in \mathfrak{m}^n$. Moreover, since the absolute value $\vert \cdot \vert$ obtains values in $\{0\}\cup\{q^{k}:k\in \mathbb{Z}\}$, then, for every $q^{-(k+1)}<\delta\leq q^{-k}$, we have $\deg_{\min}(\boldsymbol{\alpha},\delta)=\deg_{\min}(\boldsymbol{\alpha},q^{-k})$. Hence, we only focus on $\delta=q^{-k}$, where $k\in \mathbb{N}$. We firstly compute the probability distribution function of $\deg_{\min}(\cdot,q^{-k})$ when $n=1$. From now on, we denote the probability distribution by $\mathbb{P}$. \begin{theorem} \label{thm:deg_min1D} Let $k\in \mathbb{N}$. Then, we have $$\mathbb{P}\left(\deg_{\min}(\alpha,q^{-1})=d\right)=\begin{cases} \frac{1}{q}&d=0,\\ \frac{q-1}{q}&d=1 \end{cases},$$ and for every $k\geq 2$, we have \begin{equation} \mathbb{P}\left(\deg_{\min}(\alpha,q^{-k})=d\right)=\begin{cases} q^{-k}&d=0,\\ \frac{q-1}{q^{k-2d+1}}&d\leq \left\lceil\frac{k}{2}\right\rceil,d\in \mathbb{N},\\ 0&\text{ else}. \end{cases} \end{equation} \end{theorem} \begin{corollary} We have \begin{equation} \mathbb{E}[\deg_{\min}(\cdot,q^{-k})]=\begin{cases} \frac{q-1}{q}&k=1,\\ \frac{q-1}{q^k}\left(\frac{q^{2\left\lceil\frac{k}{2}\right\rceil+1}\left(\left\lceil\frac{k}{2}\right\rceil+1\right)-\left(\left\lceil\frac{k}{2}\right\rceil+2\right)q^{2\left\lceil\frac{k}{2}\right\rceil}+1}{(q^2-1)^2}\right)&\text{else}. \end{cases} \end{equation} \end{corollary} \begin{proof} When $k=1$, the claim is immediate. Otherwise, by Theorem \ref{thm:deg_min1D}, we have \begin{equation} \begin{split} \mathbb{E}\left[\deg_{\min}(\alpha,q^{-k})\right]=\sum_{d=0}^{\left\lceil\frac{k}{2} \right\rceil}d\frac{q-1}{q^k}q^{2d-1}=\frac{q-1}{q^{k}}\frac{d}{dt}\left(\sum_{d=0}^{\left\lceil\frac{k}{2}\right\rceil}t^d\right)_{t=q^2}\\ =\frac{q-1}{q^k}\frac{d}{dt}\left(\frac{t^{\left\lceil\frac{k}{2}\right\rceil+1}-1}{t-1}\right)_{t=q^2}=\frac{q-1}{q^k}\left(\frac{q^{2\left\lceil\frac{k}{2}\right\rceil+1}\left(\left\lceil\frac{k}{2}\right\rceil+1\right)-\left(\left\lceil\frac{k}{2}\right\rceil+2\right)q^{2\left\lceil\frac{k}{2}\right\rceil}+1}{(q^2-1)^2}\right). \end{split} \end{equation} \end{proof} Moreover, in every dimension, there is a unique monic polynomial which is a denominator of minimal degree. \begin{lemma} \label{lem:UniqueQ_min} For every $\boldsymbol{\alpha}\in \mathfrak{m}^n$ and for every $k\geq 1$, there exists a unique monic polynomial $Q\in \mathcal{R}$, such that $\deg(Q)=\deg_{\min}(\boldsymbol{\alpha},q^{-k})$ and $\Vert Q\boldsymbol{\alpha}\Vert<q^{-k}$. \end{lemma} This motivates the following definition. \begin{remark} Due to Lemma \ref{lem:UniqueQ_min}, we denote the unique monic polynomial $Q$ satisfying $\deg(Q)=\deg_{\min}(\alpha,q^{-k})$ and $\Vert Q\alpha\Vert<q^{-k}$ by $Q_{\min}(\alpha,q^{-k})$. \end{remark} We also compute the distribution of $Q_{\min}(\cdot,q^{-k})$. To do so, we shall use some notations from number theory. \begin{definition} For a polynomial $Q$, we let $d(Q)$ be the number of prime divisors of $Q$, we let $D(Q)$ be the number of monic divisors of $Q$, and we let $S(Q)$ be the set of divisors of $Q$. We define $$\mu(Q)=\begin{cases} (-1)^{d(Q)}&Q\text{ is square free},\\ 0&\text{if there exists }P\text{ such that }P^2\mid Q \end{cases}$$ \end{definition} \begin{definition} For a polynomial $Q\in \mathcal{R}$, we define $S_{\text{monic}}^{\P,\ell}(Q)$ to be the set of $\ell$ tuples $(a_1,\dots,a_{\ell})$, such that $a_i$ are distinct monic polynomials which divide $Q$, and $\deg(a_i)<\deg(Q)$. \end{definition} \begin{theorem} \label{thm:Q_min=Q} Let $Q$ be a monic polynomial with $\deg(Q)\leq \left\lceil\frac{k}{2}\right\rceil$. Then, for every $k\geq 1$, the probability that $Q_{\min}(\alpha,q^{-k})=Q$ is \begin{equation} \begin{split} \frac{1}{q^k}\left(\vert Q\vert+\sum_{N|Q,\deg(N)<\deg(Q)}\vert N\vert\sum_{\ell=1}^{D(N)}(-1)^{\ell}\left(\frac{D\left(\frac{Q}{N}\right)!}{\left(D\left(\frac{Q}{N}\right)-\ell\right)!}+\sum_{M\in S\left(\frac{Q}{N}\right):D\left(\frac{Q}{NM}\right)\geq \ell}\mu(M)\frac{D(M)!}{(D(M)-\ell)!}\right)\right). \end{split} \end{equation} In particular, if $Q$ is an irreducible monic polynomial of degree $d$, then, \begin{equation} \mathbb{P}(Q_{\min}(\alpha,q^{-k})=Q)=\frac{q^d-1}{q^k}. \end{equation} \end{theorem} The higher dimensional setting discusses a simultaneous solution for the equations $\vert \{Q\alpha_i\}\vert<q^{-k}$. \begin{lemma} \label{lem:deg_min>=maxdeg} For every $k\in \mathbb{N}$, and for every $\boldsymbol{\alpha}=(\alpha_1,\dots,\alpha_n)\in \mathfrak{m}^{n-1}$, we have $$\deg_{\min}(\boldsymbol{\alpha},q^{-k})\geq \max_{i=1,\dots,n} \deg_{\min}(\alpha_i,q^{-k}).$$ \end{lemma} \begin{proof} If there exists $Q\in \mathcal{R}$ with $\deg(Q)=\deg_{\min}(\boldsymbol{\alpha},q^{-k})$, such that for every $i=1,\dots,n$, we have $\Vert Q\alpha_i\Vert<q^{-k}$, then, $\deg(Q)\geq \deg_{\min}(\alpha_i,q^{-k})$ for every $i=1,\dots,n$. Hence, $\deg_{\min}(\boldsymbol{\alpha},q^{-k})\geq \max \deg_{\min}(\alpha_i,q^{-k})$. \end{proof} Hence, it is natural to ask what is the probability that $\deg_{\min}(\boldsymbol{\alpha},q^{-k})=\max_{i=1,\dots,n}\deg_{\min}(\alpha_i,q^{-k})$. This is an immediate corollary of Lemma \ref{lem:UniqueQ_min} and Lemma \ref{lem:deg_min>=maxdeg} \begin{corollary} \label{cor:Q_min=maxQ} Let $\boldsymbol{\alpha}\in \mathfrak{m}^n$ and let $k\geq 1$. Then, for $Q\in \mathcal{R}$ with $\deg(Q)=d$, for $0\leq d\leq \left\lceil\frac{k}{2}\right\rceil$, we have $Q_{\min}(\boldsymbol{\alpha},q^{-k})=Q$ if and only if \begin{enumerate} \item for every $i=1,\dots, n$, we have $Q_{\min}(\alpha_i,q^{-k})|Q$; \item there exists $i$ such that $Q_{\min}(\alpha_i,q^{-k})=Q$. \end{enumerate} \end{corollary} \begin{proof}[Proof of Corollary \ref{cor:Q_min=maxQ}] By Lemma \ref{lem:deg_min>=maxdeg}, we have $\deg_{\min}(\boldsymbol{\alpha},q^{-k})\geq \deg_{\min}(\alpha_i,q^{-k})$ for every $i=1,\dots,n$. For $i=1,\dots,n$, let $Q_i=Q_{\min}(\alpha_i,q^{-k})$. If $Q_i=Q$ for every $i$, then, there is nothing to prove. Otherwise, let $i$ be such that $Q_i\neq Q$. Then, by Lemma \ref{lem:UniqueQ_min}, if $\left|\alpha-\frac{P_i}{Q_i}\right|<q^{-k}$ and $\left|\alpha-\frac{P}{Q}\right|<q^{-k}$, then, $\frac{P}{Q}=\frac{P_i}{Q_i}$. Thus, $Q_i$ divides $Q$. \end{proof} Furthermore, we bound the probability that $\deg_{\min}(\boldsymbol{\alpha},q^{-k})\leq d$ in higher dimensions. \begin{lemma} \label{lem:multdimUppBnd} Let $n,k\in \mathbb{N}$. Then, for every $\boldsymbol{\alpha}\in \mathfrak{m}^n$, we have $\deg_{\min}(\boldsymbol{\alpha},q^{-k})\leq \left\lceil\frac{nk}{n+1}\right\rceil$. Moreover, for every $d\leq \left\lceil\frac{nk}{n+1}\right\rceil$, we have \begin{equation} \mathbb{P}(\deg_{\min}(\boldsymbol{\alpha},q^{-k})\leq d)\leq q^{-(kn-(n+1)d)}. \end{equation} \end{lemma} We shall also discuss a $P$-adic variant of the minimal denominator problem. Let $P\in \mathcal{R}$ be an irreducible polynomial, let $\alpha\in \mathfrak{m}$, and let $k\geq 1$. We define $$\deg_{\min,P}(\alpha,q^{-k})=\min\left\{d\geq 0:\exists m\geq 0, \exists\frac{a}{b}:\deg(b)=d, \frac{a}{P^mb}\in B(\alpha,q^{-k})\right\}=\inf_{m\geq 0}\deg_{\min}(P^m\alpha,q^{-k}).$$ \begin{theorem} \label{thm:Padic} For every irreducible polynomial $P$ and for almost every $\alpha\in \mathfrak{m}$, we have $\deg_{\min,P}(\alpha,q^{-k})=0$. Moreover, for every $1\leq d\leq \left\lceil\frac{k}{2}\right\rceil$, we have $$\dim_H\{\alpha\in \mathfrak{m}:\deg_{\min,t}(\alpha,q^{-k})\geq d\}=\frac{\log_q(\vert P\vert^k-\vert P\vert^{2(d-1)})}{k\deg(P)}.$$ Moreover, when $d=0$, we have an equality. \end{theorem} \begin{remark} A natural question pertains to the minimal denominator question in the infinite residue case, for example over $\mathbb{Q}[x]$ or $\mathbb{C}[x]$. Since many of our results rely on counting techniques, these results do not hold for function fields with an infinite residue field. \end{remark} \subsection{Acknowledgements} I would like to thank Eran Igra and Albert Artiles who accidentally introduced me to the minimal denominator problem, and thus led to the birth of this paper. \section{Hankel Matrices} \label{sec:HankelMatrix} We first translate the minimal denominator problem to the language of linear algebra. For $k,\ell\in \mathbb{N}$ and $\alpha=\sum_{i=1}^{\infty}\alpha_ix^{-i}\in \mathfrak{m}$, we define the Hankel matrix of $\alpha$ of order $(k,\ell)$ as $$\Delta_{\alpha}(k,\ell)=\begin{pmatrix} \alpha_1&\alpha_2&\dots&\alpha_{\ell}\\ \alpha_2&\alpha_3&\dots&\alpha_{\ell+1}\\ \vdots&\dots&\ddots&\vdots\\ \alpha_k&\alpha_{k+1}&\dots&\alpha_{k+\ell-1} \end{pmatrix}.$$ Assume that $\alpha\in \mathfrak{m}$ and $\frac{P}{Q}\in \mathbb{F}_q(x)$. Then, $\left|\alpha-\frac{P}{Q}\right|<q^{-k}$ if and only if $\vert Q\alpha-P\vert<q^{-(k-\deg(Q))}$. Let $d=\deg(Q)$. Note that by Dirichlet's theorem , $\deg_{\min}(\alpha, q^{-k})\leq k$. Hence, we can assume that $d\leq k$. Then, $\vert\{ Q\alpha\}\vert<q^{-(k-d)}$ if and only if \begin{equation} \label{eqn:HankelMinDenom} \begin{pmatrix} \alpha_1&\alpha_2&\dots&\alpha_{d+1}\\ \alpha_2&\alpha_3&\dots&\alpha_{d+2}\\ \vdots&\dots&\ddots&\vdots\\ \alpha_{k-d}&\alpha_{k-d+1}&\dots&\alpha_k \end{pmatrix}\begin{pmatrix} Q_0\\ Q_1\\ \vdots\\ Q_d \end{pmatrix}=0, \end{equation} where $Q=\sum_{i=0}^dQ_ix^i$. We notice that if $d+1\geq k-d+1$, that is if $d\geq \frac{k}{2}$, then, there exists a non-trivial solution to (\ref{eqn:HankelMinDenom}). Hence, $\deg_{\min}(\alpha, q^{-k})\leq \left\lceil\frac{k}{2}\right\rceil$. \begin{remark} \label{rem:degRank} We note that $\deg_{\min}(\alpha,q^{-k})=d$, for $d\leq \left\lceil\frac{k}{2}\right\rceil$, if and only if for every $j<d$, we have that the matrix $\Delta_{\alpha}(k-j,j+1)$ has rank $j+1$, but the matrix $\Delta_{\alpha}(k-d,d+1)$ has rank $d$. \end{remark} \begin{lemma} For every $0\neq \alpha\in \mathfrak{m}$, there exists a unique $d\leq \frac{k}{2}$ for which there exist coprime $P,Q$ with $\deg(Q)=d$ such that $\left|\alpha-\frac{P}{Q}\right|<q^{-k}$. \end{lemma} \begin{proof} Assume that there exist $P,Q,P',Q'\in \mathcal{R}$ such that \begin{enumerate} \item $Q,Q'\neq 0$ \item $\gcd(P,Q)=1=\gcd(P',Q')$, \item $\deg(Q')=d'<d=\deg(Q)\leq \frac{k}{2}$, \item $\left|\alpha-\frac{P}{Q}\right|<q^{-k}$, and \item $\left|\alpha-\frac{P'}{Q'}\right|<q^{-k}$. \end{enumerate}Then, by the ultrametric inequality and the fact that these fractions are reduced, we have \begin{equation} \left|\frac{PQ'-P'Q}{QQ'}\right|= \left|\frac{P}{Q}-\frac{P'}{Q'}\right|<\frac{1}{q^k}. \end{equation} Thus, $\vert PQ'-P'Q\vert<\frac{q^{d+d'}}{q^k}\leq q^{2d}{q^k}=q^{-(k-2d)}\leq 1$. Hence, $\frac{P}{Q}=\frac{P'}{Q'}$, which contradicts the assumption that $\operatorname{gcd}(P',Q')=\operatorname{gcd}(P,Q)=1$. \end{proof} We first use this reinterpretation to prove Lemma \ref{lem:UniqueQ_min}. To do so we define for $\boldsymbol{\alpha}\in \mathfrak{m}^n$ the matrix $$\Delta_{\boldsymbol{\alpha}}(k,\ell)=\begin{pmatrix} \Delta_{\alpha_1}(k,\ell)\\ \vdots\\ \Delta_{\alpha_n}(k,\ell) \end{pmatrix}.$$ \begin{proof}[Proof of Lemma \ref{lem:UniqueQ_min}] We notice that if $d=\deg_{\min}(\boldsymbol{\alpha},q^{-k})$, then, $$\operatorname{rank}(\Delta_{\boldsymbol{\alpha}}(k-d-1,d))=d=\operatorname{rank}(\Delta_{\boldsymbol{\alpha}}(k-d,d+1)).$$ Hence, $\dim\operatorname{Ker}(\Delta_{\boldsymbol{\alpha}}(k-d,d+1))=1$. Let $Q$ be a polynomial satisfying $\deg(Q)=d$ and $\Vert Q\alpha\Vert<q^{-k}$. Without loss of generality we can assume that $Q_d=1$. Thus, $$\Delta_{\boldsymbol{\alpha}}(k-d,d+1)(Q_0,Q_1,\dots,Q_{d-1},1)^t=0.$$ Since $\dim\operatorname{Ker}(\Delta_{\boldsymbol{\alpha}}(k-d,d+1))=1$, then, $(Q_0,Q_1,\dots,Q_{d-1},1)$ is the unique vector $\mathbf{v}=(v_0,\dots,v_d)$ with $v_d=1$, such that $\Delta_{\boldsymbol{\alpha}}(k-d,d+1)\mathbf{v}=0$. Thus, by (\ref{eqn:HankelMinDenom}), $Q$ is the unique monic polynomial of minimal degree with $\Vert Q\boldsymbol{\alpha}\Vert<q^{-k}$. \end{proof} We shall use several facts about ranks of Hankel matrices to prove Theorems \ref{thm:deg_min1D} and \ref{thm:Q_min=Q}. \begin{theorem}{\cite[Theorem 5.1]{AGR}} \label{thm:numHankMatrix} Let $r>0$. Then, the number of invertible $h\times h$ Hankel matrices with entries in $\mathbb{F}_q$ of rank $r$, $N(r,h;q)$, is equal to \begin{equation} N(r,h;q)=\begin{cases} 1&r=0\\ q^{2r-2}(q^2-1)&1\leq r\leq h-1\\ q^{2h-2}(q-1)&r=h \end{cases}. \end{equation} \end{theorem} We shall also use the following generalization of Theorem \ref{thm:numHankMatrix}. \begin{theorem}{\cite[Theorem 1.1]{DG}} \label{thm:DG} Let $k,\ell\in \mathbb{N}$, let $F$ be a finite field with $\vert F\vert=q$, and let $r\leq \min\{k,\ell\}-1$. Then, the number of Hankel matrices $\Delta_{\alpha}(k,\ell)$ over $F$ with rank at most $r$ is $q^{2r}$. \end{theorem} \begin{lemma}{\cite[Lemma 2.3]{ALN}} \label{lem:ALN} Let $m,n\in \mathbb{N}$, and let $k\leq \min\{m,n-1\}$. Let $H=\Delta_{\alpha}(m,n)$ be a Hankel matrix. If the first $k$ columns of $H$ are independent, but the first $k+1$ columns of $H$ are dependent, then, $\det(\Delta_{\alpha}(k,k))\neq 0$. \end{lemma} \section{Proofs in the One Dimensional Case} \begin{proof}[Proof of Theorem \ref{thm:deg_min1D}] By using the reinterpretation in \cref{sec:HankelMatrix}, we realize that if $k=1$, then, there is a non-trivial solution for (\ref{eqn:HankelMinDenom}) when $d\geq 1$. Moreover, there exists a solution for (\ref{eqn:HankelMinDenom}) when $k=1$ and $d=0$ if and only if $\alpha_1=0$. Hence, $$\mathbb{P}\left(\deg_{\min}(\alpha,q^{-1})=0\right)=\frac{1}{q}, \mathbb{P}\left(\deg_{\min}(\alpha,q^{-1})=1\right)=\frac{q-1}{q}.$$ If, $k\geq 2$ and $d\geq \frac{k}{2}$, then, there exists a non-trivial solution to (\ref{eqn:HankelMinDenom}). Hence, $\deg_{\min}(\alpha,q^{-k})\leq \left\lceil\frac{k}{2}\right\rceil$. Firstly, if $d=0$, then, $\alpha_1=\dots=\alpha_k$, and therefore, $\mathbb{P}(\deg_{\min}(\alpha,q^{-k})=0)=q^{-k}$. Let $1 \leq d\leq \frac{k}{2}$ and let $\alpha\in \mathbb{F}_q((x^{-1}))$. By Remark \ref{rem:degRank}, we have $d=\deg_{\min}(\alpha,q^{-k})$ if and only if the columns of the Hankel matrix $\Delta_{\alpha}(k-d,d+1)$ are linearly dependent, but the columns of $\Delta_{\alpha}(k-d-1,d)$ are linearly independent. Hence, by Lemma \ref{lem:ALN}, the matrix $\Delta_{\alpha}(d,d)$ is invertible. Hence, there exist unique $a_1,\dots, a_d\in \mathbb{F}_q$, such that \begin{equation} \label{eqn:(d,d)MinorSum} \begin{pmatrix} \alpha_{d+1}\\ \alpha_{d+2}\\ \vdots\\ \alpha_{2d} \end{pmatrix}=a_1\begin{pmatrix} \alpha_1\\ \alpha_2\\ \vdots\\ \alpha_d \end{pmatrix}+\dots+a_d\begin{pmatrix} \alpha_d\\ \alpha_{d+1}\\ \vdots\\ \alpha_{2d-1} \end{pmatrix}. \end{equation} On the other hand, since the columns of $\Delta_{\alpha}(k-d,d+1)$ are linearly dependent, and the columns of $\Delta_{\alpha}(k-d+1,d)$ are linearly independent, there exist $b_1,\dots,b_d\in \mathbb{F}_q$, such that \begin{equation} \label{eqn:(k-d,d+1)Sum} \begin{pmatrix} \alpha_{d+1}\\ \alpha_{d+2}\\ \vdots\\ \alpha_k \end{pmatrix}=b_1\begin{pmatrix} \alpha_1\\ \alpha_2\\ \vdots\\ \alpha_{k-d} \end{pmatrix}+\dots+b_d\begin{pmatrix} \alpha_d\\ \alpha_{d+1}\\ \vdots\\ \alpha_{k-1} \end{pmatrix}. \end{equation} Thus, by (\ref{eqn:(d,d)MinorSum}) and (\ref{eqn:(k-d,d+1)Sum}), we have $a_i=b_i$ for every $i=1,\dots, d$. Hence, given an invertible matrix $d\times d$ Hankel matrix $\Delta_{\alpha}(d,d)$ and some $\alpha_{2d}\in \mathbb{F}_q$, there is exactly one way to extend the word $(\alpha_1,\dots,\alpha_{2d})$ to a Laurent sequence $\sigma=\sum_{i=1}^{\infty}\alpha_ix^{-i}$ satisfying $\deg_{\min}(\sigma,q^{-k})=d$. Therefore, by Theorem \ref{thm:numHankMatrix} (see also Theorem \ref{thm:DG}), we have $$\mathbb{P}(\deg_{\min}(\alpha,q^{-k})=d)=\frac{q^{2d-1}(q-1)}{q^k}.$$ \end{proof} To prove Theorem \ref{thm:Q_min=Q}, we use the following fact from \cite{CR}: We have $\Vert Q\alpha\Vert<q^{-(k-d)}$, where $Q=Q_0+Q_1x+\dots+Q_{d-1}x^{d-1}+x^d$, if and only if \begin{equation} \label{eqn:CircMatrix} \begin{pmatrix} Q_0&\dots&Q_{d-1}&-1&0&\dots&0\\ 0&Q_0&\dots&Q_{d-1}&-1&0&\dots\\ \vdots&\dots&\ddots&\ddots&\dots&\ddots&\ddots\\ 0&\dots&\dots&Q_0&\dots&Q_{d-1}&-1 \end{pmatrix}\begin{pmatrix} \alpha_1\\ \alpha_2\\ \vdots\\ \alpha_{k} \end{pmatrix}=\boldsymbol{0}. \end{equation} We denote the matrix the right hand side of (\ref{eqn:CircMatrix}) by $A_Q$, and we denote $\pi_k(\alpha)=(\alpha_1,\dots,\alpha_k)^t$. Then, $A_Q\in M_{k-d\times k}(\mathbb{F}_q)$, and $\dim(\operatorname{Ker}(A_Q))=d$. Hence, $\vert \operatorname{Ker}(A_Q)\vert=q^d$. \begin{proposition} \label{prop:GCDCnt} The number of primitive vectors in $S_{\text{monic}}^{\P,\ell}(Q)$ is \begin{equation} \vert\widehat{S}_{\text{monic}}^{\P,\ell}\vert=\begin{cases} \frac{D(Q)!}{(D(Q)-\ell)!}+\sum_{N\in S(Q):D\left(\frac{Q}{N}\right)\geq \ell}\mu(N)\frac{D(N)!}{(D(N)-\ell)!} & D(Q)\geq \ell,\\ 0& \text{else}. \end{cases} \end{equation} \end{proposition} \begin{remark} We use Proposition \ref{prop:GCDCnt} to count the number of tuples whose greatest common denominator is $1$, instead of the classical counting method, since this yields a more compact expression in the proof of Theorem \ref{thm:Q_min=Q} \end{remark} \begin{proof}[Proof of Proposition \ref{prop:GCDCnt}] We first note that $$\vert S_{\text{monic}}^{\P,\ell}(Q)\vert=\begin{cases}\frac{D(Q)!}{(D(Q)-\ell)!}&\vert D(Q)\vert\geq \ell,\\ 0&\text{else}. \end{cases}$$ Hence, to count primitive vectors in $S_{\text{monic}}^{\P,\ell}$, we use the exclusion inclusion principle and induction to obtain that \begin{equation} \begin{split} \vert \widehat{S}_{\text{monic}}^{\P,\ell}\vert=\left|S_{\text{monic}}^{\P,\ell}\setminus\bigcup_{P\in S(Q)\text{ prime}}PS_{\text{monic}}^{\P,\ell}\left(\frac{Q}{P}\right)\right|\\ =\vert S_{\text{monic}}^{\P,\ell}\vert-\sum_{P_1,\dots,P_i\in S(Q)\text{ prime}}(-1)^{i+1}\left|P_1\cdots P_iS_{\text{monic}}^{\P,\ell}\left(\frac{Q}{P_1\cdots P_i}\right)\right|\\ =\frac{D(Q)!}{(D(Q)-\ell)!}+\sum_{N\in S(Q)}\mu(N)\left|S_{\text{monic}}^{\P,\ell}\left(\frac{Q}{N}\right)\right|\\ =\frac{D(Q)!}{(D(Q)-\ell)!}+\sum_{N\in S(Q):D\left(\frac{Q}{N}\right)\geq \ell}\mu(N)\frac{D(N)!}{(D(N)-\ell)!}. \end{split} \end{equation} \end{proof} \begin{proof}[Proof of Theorem \ref{thm:Q_min=Q}] Let $Q$ be a monic polynomial of degree at most $\left\lceil\frac{k}{2}\right\rceil$. By Lemma \ref{lem:UniqueQ_min}, if $\pi_k(\alpha)\in \operatorname{Ker}(A_D)\cap \operatorname{Ker}(A_Q)$, where $\deg(D)<\deg(Q)$, then, $D|Q$. Hence, by the exclusion inclusion principle, \begin{equation} \begin{split} \label{eqn:P(Q_min=Q)} \mathbb{P}(Q_{\min}(\alpha,q^{-k})=Q)=\mathbb{P}\left(\pi_k(\alpha)\in \left(\operatorname{Ker}(A_Q)\right)\setminus\bigcup_{D|Q,\deg(D)<\deg(Q)}\operatorname{Ker}(A_D)\right)\\ =\mathbb{P}(\pi_k(\alpha)\in \operatorname{Ker}(A_Q))-\sum_{D_1,\dots,D_{\ell}|Q}(-1)^{\ell+1}\mathbb{P}\left(\bigcap_{i=1}^{\ell}\operatorname{Ker}(A_{D_i})\right)\\ =q^{-(k-\deg(Q))}+\sum_{D_1,\dots,D_{\ell}|Q}(-1)^{\ell}\mathbb{P}\left(\operatorname{Ker}(A_{\gcd(D_1,\dots,D_{\ell}})\right). \end{split} \end{equation} We notice that if $N|Q$, then, $N=\gcd(D_1,\dots,D_{\ell})$ if and only if for every $i=1,\dots,\ell$, there exists a monic polynomial $a_i\in \mathcal{R}$ such that $D_i=a_iN$, $a_i\mid \frac{Q}{N}$, and $\gcd(a_1,\dots,a_{\ell})=1$. Hence, $(a_1,\dots,a_{\ell})\in \mathcal{R}^{\ell}$ is a primitive vector with distinct coordinates, which are all monic polynomials which divide $\frac{Q}{N}$, so that $(a_1,\dots,a_{\ell})\in S_{\text{monic}}^{\P,\ell}\left(\frac{Q}{N}\right)$. Hence, by Proposition \ref{prop:GCDCnt}, we have \begin{equation} \begin{split} \sum_{D_1,\dots,D_{\ell}|Q}(-1)^{\ell+1}\mathbb{P}(\operatorname{Ker}(A_{\operatorname{gcd}(D_1,\dots,D_{\ell})})\\ =\frac{1}{q^k}\sum_{N|Q,\deg(N)<\deg(Q)}\vert N\vert\sum_{\ell=1}^{D(Q)}(-1)^{\ell+1}\#\{(D_1,\dots,D_{\ell}):\gcd(D_1,\dots,D_{\ell})=N\} \\=\frac{1}{q^k}\sum_{N|Q,\deg(N)<\deg(Q)}\vert N\vert\sum_{\ell=1}^{D(N)}(-1)^{\ell+1}\left|\widehat{S}_{\text{monic}}^{\P,\ell}\left(\frac{Q}{N}\right)\right|\\ =\frac{1}{q^k}\sum_{N|Q,\deg(N)<\deg(Q)}\vert N\vert\sum_{\ell=1}^{D(N)}(-1)^{\ell+1}\left(\frac{D\left(\frac{Q}{N}\right)!}{\left(D\left(\frac{Q}{N}\right)-\ell\right)!}+\sum_{M\in S\left(\frac{Q}{N}\right):D\left(\frac{Q}{N}\right)\geq \ell}\mu(M)\frac{D(M)!}{(D(M)-\ell)!}\right). \end{split} \end{equation} Hence, the probability that $Q_{\min}(\alpha,q^{-k})=Q$, for $\deg(Q)\leq \left\lceil\frac{k}{2}\right\rceil$ is equal to \begin{equation*} \begin{split} \frac{1}{q^k}\left(\vert Q\vert+\sum_{N|Q,\deg(N)<\deg(Q)}\vert N\vert\sum_{\ell=1}^{D(N)}(-1)^{\ell}\left(\frac{D\left(\frac{Q}{N}\right)!}{\left(D\left(\frac{Q}{N}\right)-\ell\right)!}+\sum_{M\in S\left(\frac{Q}{N}\right):D\left(\frac{Q}{NM}\right)\geq \ell}\mu(M)\frac{D(M)!}{(D(M)-\ell)!}\right)\right). \end{split} \end{equation*} \end{proof} \color{black} \section{Proof of Theorem \ref{thm:Padic}} \begin{proof}[Proof of Theorem \ref{thm:Padic}] We notice that $\left|P^mb\alpha-a\right|<q^{-(k-d)}$, where $d=\deg(b)$, if and only if \begin{equation} \begin{pmatrix} \alpha_m&\alpha_{m+1}&\dots&\alpha_{m+d}\\ \alpha_{m+1}&\alpha_{m+2}&\dots&\alpha_{m+d+1}\\ \vdots&\dots&\ddots&\vdots\\ \alpha_{m+k-d}&\dots&\dots&\alpha_{m+k} \end{pmatrix}\begin{pmatrix} b_0\\ b_1\\ \vdots\\ b_d \end{pmatrix}=0, \end{equation} where $\alpha=\sum_{i=1}^{\infty}P^{-i}\alpha_i$, for some $\alpha_i\in \mathbb{F}_q[x]/P\mathbb{F}_q[x]$. Hence, $\deg_{\min,P}(\alpha,q^{-k})\leq \frac{k}{2}$. Since almost every string is normal \cite{Bor}, then, for every $k$, for every prime $P$, and for almost every $\alpha\in \mathfrak{m}$, the string $0^k$ appears in the infinite word $\{\alpha_i\}_{i\in \mathbb{N}}$. Hence, if $\alpha_m=\alpha_{m+1}=\dots=\alpha_{m+k-1}=0$, then, there exists $a$, such that $\vert P^m\alpha-a\vert<q^{-(k-d)}$. Thus, $\mathbb{P}(\deg_{\min,P}(\alpha,q^{-k})=0)=1$. By Theorem \ref{thm:DG}, for every $k$ and every $d\leq k-d$, the number of Hankel matrices $\operatorname{rank}(\Delta_{\alpha}(k-d-1,d))=d$ with entries in $\mathbb{F}_q[x]/P\mathbb{F}_q[x]$ of rank $d$ is $\vert P\vert^{k}-\vert P\vert ^{2(d-1)}$. Thus, for every $1\leq d\leq \frac{k}{2}$, we have $$\dim_H\{\alpha\in \mathfrak{m}:\deg_{\min,t}(\alpha,q^{-k})\geq d\}=\frac{\log_q(\vert P\vert ^k-\vert P\vert^{2(d-1)})}{k\deg(P)}.$$ \end{proof} \section{Proofs of Lemma \ref{lem:multdimUppBnd}} \begin{proof}[Proof of Lemma \ref{lem:multdimUppBnd}] We note that $\Vert Q\boldsymbol{\alpha}\Vert<q^{-k}$ if and only if \begin{equation}\label{eqn:highDimMinDenom}\Delta_{\boldsymbol{\alpha}}(k-d,d+1)(Q_0,Q_1,\dots,Q_d)^t=0.\end{equation} Thus, if $d\geq \frac{nk}{n+1}$, there will always be a non-zero solution to (\ref{eqn:highDimMinDenom}). If $d=0$, then, for every $i=1,\dots,n$, we have $(\alpha_{i1},\dots,\alpha_{ik})=0$, where $\alpha_i=\sum_{j=1}^{\infty}\alpha_{ij}x^{-j}$. Hence, \begin{equation}\label{eqncase:0}\mathbb{P}\left(\deg_{\min}(\boldsymbol{\alpha},q^{-k})=0\right)=q^{-nk}.\end{equation} Let $1\leq d\leq \left\lceil\frac{nk}{n+1}\right\rceil$, and let $Q$ be a monic polynomial with $\deg(Q)=d$. Since $\vert \operatorname{Ker}(A_Q)\setminus\{0\}\vert=q^d$, then, we have \begin{equation} \mathbb{P}\left(Q_{\min}(\boldsymbol{\alpha},q^{-k})=Q\right)\leq \mathbb{P}\left(\bigcap_{i=1}^n\left(\alpha_i\in \operatorname{Ker}(A_Q)\right)\right)=q^{-n(k-d)} \end{equation} Hence, \begin{equation} \mathbb{P}\left(\deg_{\min}(\boldsymbol{\alpha},q^{-k})=d\right)\leq q^{-nk+(n+1)d}. \end{equation} \end{proof} \bibliography{Ref} \bibliographystyle{amsalpha} \end{document}
2501.01383v1
http://arxiv.org/abs/2501.01383v1
Electrical networks and data analysis in phylogenetics
\documentclass[11pt,reqno]{amsart} \usepackage[utf8]{inputenc} \usepackage{tikz-cd} \usepackage{pdfpages} \usepackage{graphicx} \usepackage{amssymb,amsmath, amscd, latexsym,amsfonts,bbm, amsthm,stmaryrd, enumerate,phaistos} \usepackage{yhmath} \usepackage{comment}\usepackage{todonotes}\usepackage[all]{xy} \usepackage[vcentermath]{youngtab} \usepackage{float} \renewcommand{\familydefault}{cmss} \usepackage{tabularx} \usepackage{amsmath, amssymb, latexsym, enumerate, graphicx,tikz, stmaryrd,pifont,ifsym} \usetikzlibrary{arrows,decorations,decorations.pathmorphing,positioning} \usepackage{mdframed} \usepackage{mdwlist} \usepackage[pdftex]{hyperref} \usepackage{amscd,mathrsfs,epic,empheq,float} \usepackage{bbm} \usepackage{cases} \usepackage[all]{xy} \def\theequation{\arabic{section}.\arabic{equation}} \usepackage{tikz} \setcounter{tocdepth}{1} \usepackage{tikz-cd} \newenvironment{defn}{\vspace{0.3cm}\par\noindent\refstepcounter{theorem}\begin{exafont}Definition \thetheorem.\end{exafont}\hspace{\labelsep}}{\vspace{0.3cm}\par} \theoremstyle{definition} \renewcommand{\familydefault}{cmss} \newcommand{\dist} {3mm} \newcommand{\Xn}{{\op{X}}_n} \newcommand{\cS}{\mathcal{S}} \newcommand{\mV}{\mathbb{V}} \newcommand{\bt}{\mathbf t} \newcommand{\bb}{\mathbf b} \newcommand{\bd}{\mathbf d} \newcommand{\oS}{\op{S}} \newcommand{\Func}{\op{Func}_n} \newcommand{\eu}{\op{eu}} \newcommand{\inc}{\op{inc}} \newcommand{\Supp}{\op{Supp}} \newcommand{\bw}{\op{w}} \newcommand{\ii}{\mathbf i} \newcommand{\RR}{\mathbb R} \newcommand{\CC}{\mathbb C} \newcommand{\QQ}{\mathbb Q} \newcommand{\ZZ}{\mathbb Z} \usepackage{pict2e} \def\StrangeCross \usepackage{amsfonts} \usepackage{amssymb} \usepackage{amscd} \usepackage{amsmath} \def\bea{\begin{eqnarray}} \def\eea{\end{eqnarray}} \def\nn{\nonumber} \def\AA{\mathcal{A}} \def\ZZ{\mathbb{Z}} \def\CC{\mathbb{C}} \def\PP{\mathbb{P}} \def\RR{\mathbb{R}} \def\QQ{\mathbb{Q}} \def\GG{\mathbb{G}} \def\TT{\mathbb{T}} \def\kk{\Bbbk} \def\gg{\mathfrak{g}} \usepackage{pict2e} \newtheorem{theorem}{Theorem}[section] \newtheorem{question}[theorem]{Question} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{problem}[theorem]{Problem} \newenvironment{exafont}{\begin{bf}}{\end{bf}} \newtheorem{corollary}[theorem]{Corollary} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{remark}[theorem]{Remark} \newtheorem{example}[theorem]{Example} \newtheorem{conv}[theorem]{Conventions} \newtheorem{conjecture}[theorem]{Conjecture} \newcommand{\op}{\operatorname} \newcommand{\Mat}{\operatorname{Mat}} \newcommand{\mC}{\mathbb{C}} \newcommand{\ov}{\overline{\otimes}} \newcommand{\la}{\lambda} \newcommand{\bH}{{\bf H}} \newcommand{\bP}{{\bf P}} \newcommand{\End}{\op{End}} \newcommand{\cU}{\mathcal{U}} \newcommand{\cZ}{\mathcal{Z}} \newcommand{\mg}{\mathfrak{gl}_2} \newcommand{\modH}{{$\op{mod}$-$\bH$}} \begin{document} \author[V. Gorbounov]{V.~Gorbounov} \address{V.~G.: Faculty of Mathematics, National Research University Higher School of Economics, Usacheva 6, 119048 Moscow, Russia} \email{[email protected] } \author[A. Kazakov]{A.~Kazakov} \address{A.~K.: Lomonosov Moscow State University, Faculty of Mechanics and Mathematics, Russia, 119991, Moscow, GSP-1, 1 Leninskiye Gory, Main Building; Centre of Integrable Systems, P. G. Demidov Yaroslavl State University, Sovetskaya 14, 150003, Yaroslavl, Russia; Center of Pure Mathematics, Moscow Institute of Physics and Technology, 9 Institutskiy per., Dolgoprudny, Moscow Region, 141701, Russian Federation; Kazan Federal University, N.I. Lobachevsky Institute of Mathematics and Mechanics, Kazan, 420008, Russia} \email{[email protected]} \title{Electrical networks and data analysis in phylogenetics} \maketitle \begin{abstract} A classic problem in data analysis is studying the systems of subsets defined by either a similarity or a dissimilarity function on $X$ which is either observed directly or derived from a data set. For an electrical network there are two functions on the set of the nodes defined by the resistance matrix and the response matrix either of which defines the network completely. We argue that these functions should be viewed as a similarity and a dissimilarity function on the set of the nodes moreover they are related via the covariance mapping also known as the Farris transform or the Gromov product. We will explore the properties of electrical networks from this point of view. It has been known for a while that the resistance matrix defines a metric on the nodes of the electrical networks. Moreover for a circular electrical network this metric obeys the Kalmanson property as it was shown recently. We will call such a metric an electrical Kalmanson metric. The main results of this paper is a complete description of the electrical Kalmanson metrics in the set of all Kalmanson metrics in terms of the geometry of the positive Isotropic Grassmannian whose connection to the theory of electrical networks was discovered earlier. One important area of applications where Kalmanson metrics are actively used is the theory of phylogenetic networks which are a generalization of phylogenetic trees. Our results allow us to use in phylogenetics the powerful methods of reconstruction of the minimal graphs of electrical networks and possibly open the door into data analysis for the methods of the theory of cluster algebras. \end{abstract} \setcounter{tocdepth}{3} \tableofcontents MSC2020: 14M15, 82B20, 05E10, 05C50, 05C10, 92D15, 94C15, 90C05, 90C59, 05C12. Key words: Electrical networks, circular split metrics, Kalmanson metrics \section{Introduction} The theory of electrical networks goes back to the work of Gustav Kirchhoff around mid 1800 and since then it has been a source of remarkable achievements in combinatorics, algebra, geometry, mathematical physics, and electrical engineering. An electrical network is a graph with positive weights, conductances attached to the edges, and a chosen subset of the set of vertices which are called the boundary vertices or nodes. An important characteristic of an electrical network with only two nodes $i$ and $j$ is the effective resistance $R_{ij}$, that is the voltage at node $i$ which, when node $j$ is held at zero volts, causes a unit current to flow through the circuit from node $i$ to node $j$. The effective resistance defines a metric on the set of nodes that is widely used in chemistry, for example, \cite{Kl}. For convenience, we will organize the effective resistance $R_{ij}$ in a matrix $R$ setting $R_{ii}=0$ for all $i$. In the case where there are more than two nodes, there is an important generalization of $R_{ij}$. Given an electrical network with $n$ nodes, there is a linear endomorphism of the vector space of functions defined on the nodes, constructed as follows: for each such a function, there is a unique extension off to all the vertices which satisfies Kirchhoff's current law at each interior vertex. This function then gives the current $I$ in the network at the boundary vertices defining a linear map which is called the Dirichlet-to-Neumann map or the network response. The matrix of this map is called the response matrix. It plays a key role in the theory and applications of the electrical network \cite{CIM}. The above two matrices define each other and moreover it is possible to reconstruct a planar circular electrical network if these matrices are known \cite{CIM}. A classic problem in data analysis is studying the systems of subsets defined by either a similarity or a dissimilarity function on $X$ which is either observed directly or derived from a data set. While the latter makes use of splits and split metrics, the key ingredients of the former are systems of clusters, subsets of $X$, and elementary similarity functions. One can interpret splits as distinctive features and clusters as common features, see \cite{BD}, \cite{BD1} for an introduction to these ideas. We argue that the response and the resistance matrices should be viewed as a similarity and a dissimilarity function on the set of nodes of an electrical network and as such they are related via the covariance mapping also known as the Farris transform or the Gromov product. We will explore the properties of electrical networks from this point of view. In this paper we will work with the resistance matrix. The connection of the response matrix to data analysis will be presented in a future publication. In computational biology one can construct a distance between two species (such as Hamming distance) which records the proportion of characters where the two species differ. Such a record can be encoded by a real symmetric, nonnegative matrix called a dissimilarity matrix. An important problem in phylogenetics is to reconstruct a weighted tree which represents this matrix. In most cases, a tree structure is too constraining. The notion of a split network is a generalization of a tree in which multiple parallel edges signify divergence. A geometric space of such networks was introduced \cite{DP}, forming a natural extension of the work by Billera, Holmes, and Vogtmann on tree space. It has been studied from different points of view since it is related to a number of objects in mathematics: the compactification of the real moduli space of curves, to the Balanced Minimal Evolution polytopes and Symmetric Traveling Salesman polytopes to name a few. The appropriate metric on the set of species defined by a split network has a very special property called the Kalmanson property which distinguishes it completely in the set of all metrics \cite{BD}. It has been discovered recently that the resistance metric defined by a circular planar electric network obeys the Kalmanson property \cite{F}. We will call such a split metric an {\it electrical Kalmanson metric}. The main results of this paper is a complete description of the set of the electrical Kalmanson metrics inside the set of the Kalmanson metrics. For this we exploit the connection between the space of electrical networks and the positive Isotropic Grassmannian $\mathrm{IG}_{\geq 0}(n-1, 2n)$ investigated in \cite{L}, \cite{BGKT}, \cite{BGGK}, \cite{CGS}. It turns out that the Kalmanson property itself is a consequence of this connection and the description of the electrical Kalmanson metrics we provide is given entirely in terms of geometry of the positive part of this projective variety. It is remarkable that the theory of positivity might play a role in studying phylogenetic networks since it would allow us to apply the powerful machinery of the theory of cluster algebras developed for describing positivity in mathematical objects \cite{Z}. All this story should be extended to the compactifications of the respective spaces, taking cactus networks, the known strata in the compactification of electrical networks, to the newly defined compactified split systems as it was started in \cite{DF}. In this picture the cactus networks should correspond to the pseudometrics playing the role of dissimilarity functions. The connection of the tropical geometry of the Grassmannians and the space of trees and tree metrics found in \cite{SS} is another interesting direction for developing our work. {\bf Acknowledgments.} For V. G. this article is an output of a research project implemented as part of the Basic Research Program at the National Research University Higher School of Economics (HSE University). Working on this project V. G. also visited the Max Plank Institute of Mathematics in the Sciences in Leipzig, Germany in the Fall of 2024 and BIMSA in Beijing, China in the Summer 2024. Research of A.~K. on Sections \ref{sec:kalman} was supported by the Russian Science Foundation project No. 20-71-10110 (https://rscf.ru/en/project/23-71-50012) which finances the work of A.K. at P. G. Demidov Yaroslavl State University. Research of A.K. on Section \ref{sec:rec} was supported by the state assignment of MIPT (project FSMG-2023-0013). The authors are grateful to Borya Shapiro, Misha Shapiro, Anton Petrunin and Lazar Guterman for useful disscussions and suggestions. \section{The space of electrical networks and the positive Isotropic Grassmannian} \subsection{The space of electrical networks} \begin{definition} A connected electrical network $\mathcal E$ is a connected graph with positive weights (conductances) attached to the edges and a chosen subset of the set of vertices which are called the boundary vertices or the nodes. \end{definition} In this paper we will denote by $n$ the number of the nodes. As explained in the introduction the key result in the theory of electrical networks says the boundary voltages and the currents are related to each other linearly via a matrix $M_R(\mathcal E)=(x_{ij})$ called the {\it response matrix} of a network. $M_R(\mathcal E)$ has the following properties: \begin{itemize} \item $M_R(\mathcal E)$ is a $n\times n$ symmetric matrix; \item All the non-diagonal entries $x_{ij}$ of $M_R(\mathcal E)$ are non-positive; \item For each row the sum of all its entries is equal to $0$. \end{itemize} Given an electrical network $\mathcal E$ the response matrix $M_R(\mathcal E)$ can be calculated as the Schur complement of a submatrix in the Laplacian matrix of the graph of $\mathcal E$ \cite{CIM}. Suppose that $M$ is a square matrix and $D$ is a non-singular square lower right-hand corner submatrix of $M$, so that $M$ has the block structure. \[ \begin{pmatrix} A&B\\ C&D \end{pmatrix} \] The Schur complement of $D$ in $M$ is the matrix $M/D = A - BD^{-1} C$. The Schur complement satisfies the following identity \[\text{det} M = \text{det} (M/D)\text{det} D\] Labeling the vertices starting from the nodes we get the Laplacian matrix of the graph representing $\mathcal E$ in a two by two block form as above. The submatrix $D$ corresponds to the connections between the internal vertices and is known to be non degenerate. Then $M_R(\mathcal E)=L/D$. There are many electrical networks which have the same response matrix, we will describe them now. The following five local network transformations given below are called the {\it electrical transformations}. Two electrical networks are said to be equivalent if they can be obtained from each other by a sequence of the electrical transformations. This is an equivalence relation of course, so the set of electrical networks is split in the equivalences classes. \begin{theorem} [\cite{CIM}] \label{gen_el_1} The electrical transformations preserve the response matrix of an electrical network. Any two circular electrical networks which have the same response matrix are equivalent. \end{theorem} \begin{figure}[h!] \centering \includegraphics[width=0.9\textwidth]{eltranseng.jpg} \hspace{-1.5cm} \caption{Electrical transformations } \label{fig:el_trans} \end{figure} In this paper we will deal mostly with a particular type of connected electrical networks, the {\it connected circular electrical networks}. For these we require in addition the graph of $\mathcal E$ to be planar and such that the nodes are located on a circle and enumerated clockwise while the rest of the vertices are situated inside of this circle. We will denote by $E_n$ the set of equivalences classes of the connected circular electrical networks. The set $E_n$ allows the following elegant description. \begin{definition} Let $P=(p_1,\ldots,p_k)$ and $Q=(q_1,\ldots,q_k)$ be disjoint ordered subsets of the nodes arranged on a circle, then $(P;Q)=(p_1,\ldots,p_k;q_1,\ldots,q_k)$ is a {\it circular pair} if $p_1,\ldots, p_k,q_k,\ldots,q_1$ are in circular order around the boundary circle. Let $(P;Q)$ be a circular pair then the determinant of the submatrix $ M(P;Q)$ whose rows are labeled by $(p_1,\ldots,p_k)$ and the columns labeled by $(q_1,\ldots,q_k)$ is called the {\it circular minor} associated with a circular pair $(P;Q)$. \end{definition} \begin{theorem} \cite{CIM}, \cite{CdV} \label{Set of response matrices all network} The set of response matrices of the the elements of $E_n$ is precisely the set of the matrices $M$ such that \begin{itemize} \item $M$ is a symmetric matrix; \item All the non-diagonal entries of $M$ are non-positive; \item For each row the sum of all its entries is equal to $0$. \item For any $k\times k$ circular minor $(-1)^k\det M(P;Q) \geq 0$. \item The kernel of $M$ is generated by the vector $(1,1,…,1)$; \end{itemize} \end{theorem} We will need the following notion later. \begin{definition} Let $\mathcal E$ be a connected circular electrical network on a graph $\Gamma$. The {\it dual electrical network} $\mathcal E^*$ is defined in the following way \begin{itemize} \item the graph $\Gamma^*$ of $\mathcal E^*$ is the dual graph to $\Gamma$; \item for any pair of dual edges of $\Gamma$ and $\Gamma^*$ their conductances are reciprocal to each other; \item the labeling of the nodes of $\mathcal E^{*}$ is determined by the requirement that the first node of $\mathcal E^{*}$ lies between the first and second node of $\mathcal E$. \end{itemize} \end{definition} \subsection{Cactus electrical networks} Setting an edge conductance to zero or infinity in $\mathcal E$ makes sense. According to the Ohm law it means that we delete or contract this edge. Doing it we either get isolated nodes or some nodes get glued together. Will will consider the resulting network as a network with $n$ nodes, remembering how nodes get identified. Such a network is called {\it a cactus electrical network} with $n$ nodes. One can think of it as a collection of ordinary circular electrical networks with the total number of the nodes equal to $n$, glued along some of these nodes. Note, that the graph of such a network is planar, but it does not have to be connected. \begin{figure}[h!] \centering \includegraphics[width=1.0\textwidth]{cactuswithout.jpg} \caption{A cactus electrical network with 4 nodes } \label{fig:cactus} \end{figure} The electrical transformations can be applied to the cactus electrical networks. We denote by $\overline{E}_n$ the set of equivalence classes with respect to the electrical transformations of the cactus electrical networks with $n$ nodes. The definition of a cactus network was introduced in \cite{L} where it was proved that the set $\overline{E}_n$ is a compactification of $E_n$ in the appropriate sense. \subsection{Lam embedding} \label{sec: lam emb} Recall that the real Grassmannian $\mathrm{Gr}(k, n)$ is a differentiable manifold that parameterizes the set of all $k$-dimensional linear subspaces of a vector space $\Bbb R^n$. In fact it has a structure of a projective algebraic variety. The Pluecker embedding is an embedding of the Grassmannian $\mathrm{Gr}(k, n)$ into the projectivization of the $k$-th exterior power of the vector space $\Bbb R^n$ \[\iota :\mathrm {Gr} (k,n)\rightarrow \mathrm {P}(\Lambda ^{k}\Bbb R^n)\] Suppose $W\subset \Bbb R^n$ is a $k$-dimensional subspace. To define $\iota (W)$, choose a basis $(w_{1},\cdots ,w_{k})$ for $W$, and let $\iota (W)$ be the projectivization of the wedge product of these basis elements: $\iota (W)=[w_{1}\wedge \cdots \wedge w_{k}]$, where $ [\,\cdot \,]$ denotes the projective equivalence class. For practical calculations one can view the matrix whose rows are the coordinates of the basis vectors $(w_{1},\cdots ,w_{k})$ as a representative of this equivalence class. For any ordered sequence $I$ of $k$ positive integers $1\leq i_{1}<\cdots <i_{k}\leq n$ denote by $\Delta_I$ the determinant of a $k\times k$ submatrix of the above matrix with columns labeled by the numbers from $I$. The numbers $\Delta_I$ are called the Pluecker coordinates of the point $W$ of $\mathrm Gr(k,n)$. They are defined up to a common non zero factor. \begin{definition} The totally non-negative Grassmannian $\mathrm{Gr}_{\geq 0}(k, m)$ is the subset of the points of the Grassmannian $\mathrm{Gr}(k, n)$ whose Pluecker coordinates $\Delta_I$ have the same sign. \end{definition} The following theorem of T. Lam \cite{L} is one of the key results about the space of electrical nerworks. \begin{theorem} \label{th: main_gr} There is a bijection \[\overline {E}_n \cong \mathrm{Gr}_{\geq 0}(n-1,2n)\cap \Bbb PH\] where $H$ is a certain subspace of $\bigwedge^{n-1}\Bbb R^{2n}$ of dimension equal the Catalan number $C_n$. Moreover the image of the set $E_n$ under this bijection is exactly the set of points with the Pluecker coordinates $\Delta_{24\dots 2n-2}$ and $\Delta_{13\dots 2n-3}$ not equal to zero. \end{theorem} Notice that because $M_R(\mathcal E)$ has the rank equal to $n-1$ by \ref{Set of response matrices all network}, the dimension of the row space of $\Omega(\mathcal E)$ is equal to $n-1$, hence it defines a point in $\mathrm{Gr}(n-1,2n)$. The Pluecker coordinates of the point associated with $\Omega(\mathcal E)$ can be calculated as the maximal size minors of the matrix $\Omega'(\mathcal E)$ obtained from $\Omega(\mathcal E)$ by deleting, for example, the first row. We will recall the construction of the embedding of ${E}_n$ obtained in \cite{BGKT}, which is induced by the above bijection. Let $\mathcal E$ be a circular electrical network with the response matrix $M_R(\mathcal E)=(x_{ij}).$ The following $n\times 2n$ matrix \begin{equation} \label{omega_eq} \Omega(\mathcal E)=\left( \begin{array}{cccccccc} x_{11} & 1 & -x_{12} & 0 & x_{13} & 0 & \cdots & (-1)^n\\ -x_{21} & 1 & x_{22} & 1 & -x_{23} & 0 & \cdots & 0 \\ x_{31} & 0 & -x_{32} & 1 & x_{33} & 1 & \cdots & 0 \\ \vdots & \vdots & \vdots & \vdots & \vdots & \vdots & \ddots & \vdots \end{array} \right) \end{equation} gives the point in $\mathrm{Gr}_{\geq 0}(n-1,2n)\cap \Bbb PH$ which corresponds to $\mathcal E$ under the Lam bijection. \begin{theorem} \cite{BGK} \label{about sur} Let $A=(a_{ij})$ be a matrix with satisfies the first three conditions of Theorem \ref{Set of response matrices all network} and $\Omega(A)$ be a matrix constructed out of according to the formular \eqref{omega_eq}. If $\Omega(A)$ defines a point in $Gr_{\geq 0}(n-1, 2n)$ and the Pluecker coordinate $\Delta_{13\dots 2n-3}\bigl(\Omega(A)\bigr)$ is not equal to zero, then there is a connected electrical network $\mathcal E \in E_n$ such that $A=M_R(\mathcal E).$ \end{theorem} \begin{example} For the network $\mathcal E$ in $E_4$ on the Figure \ref{treedaul} the matrix $\Omega(\mathcal E)$ has the following form: \begin{equation*} \Omega(\mathcal E) = \left( \begin{array}{cccccccc} \dfrac{5}{8}& 1 & \dfrac{1}{8} & 0 & -\dfrac{1}{8} & 0 & \dfrac{3}{8} & 1 \\ & & & & & & & \\ \dfrac{1}{8}& 1 & \dfrac{5}{8} & 1 & \dfrac{3}{8} & 0 & -\dfrac{1}{8} & 0 \\ & & & & & & & \\ -\dfrac{1}{8}& 0 &\dfrac{3}{8} & 1 & \dfrac{5}{8} & 1 & \dfrac{1}{8} & 0 \\ & & & & & & & \\ \dfrac{3}{8}& 0 & -\dfrac{1}{8} & 0 & \dfrac{1}{8} & 1 & \dfrac{5}{8} & 1 \\ \end{array} \right). \end{equation*} \end{example} In fact the row space of $\Omega(\mathcal E)$ is isotropic with respect to a particular symplectic form. This refines the above embedding to a submanifold $$\mathrm{IG}_{\geq 0}(n-1, 2n)\subset \mathrm{Gr}_{\geq 0}(n-1, 2n)$$ made out of isotropic subspaces of $\Bbb R^{2n}$ \cite{CGS}, \cite{BGKT}. \section{Characterization of resistance distance} \label{sec:kalman} \subsection{Resistance metric} \begin{definition} \label{def:eff-resist} Let $\Gamma, \omega$ be a connected graph with a weight function $\omega$ on the edges and $n,\, m\in \Gamma$ be two of its vertices. Consider it as an electrical network $\mathcal E_{nm}(\Gamma, \omega)$ on a graph $\Gamma$ with a conductivity function $\omega$ by declaring the vertices $n$ and $m$ to be the nodes while the remaining vertices to be internal vertices. Apply the voltages $U=(U_n, U_m)$ to the nodes of $\mathcal E_{nm}(\Gamma, \omega)$ such that they induce the boundary currents $I=(1, -1)$. Then the {\it effective resistance} between the nodes $n$ and $m$ is defined as $$R_{nm}=|U_n-U_m|$$ Obviously $R_{nm}=R_{mn}$. \end{definition} The following lemma relates the effective resistances and the response matrix entries is well known. \begin{lemma} \cite{KW 2011} \label{lem:eff-resist} Let $\mathcal E$ be a connected electrical network with $n$ nodes, and let the boundary voltages $U = (U_1, \dots , U_n)$ be such that \begin{equation} \label{eq-resist} M_R(\mathcal E)U = -e_i + e_j, \end{equation} where $e_k, \ k \in \{1, \dots , n\}$ is the standard basis of $\mathbb{R}^n$. Then $$|U_i - U_j|=R_{ij}$$. \end{lemma} \begin{proof} Indeed, if the boundary voltages $U$ are such as in \eqref{eq-resist} we can consider all the vertices apart from $i$ and of $j$ as the inner vertices and then $|U_i - U_j|$ is precisely as in Definition \ref{def:eff-resist}. \end{proof} For convenience, we will organize the effective resistances $R_{ij}$ between the nodes of $\mathcal E$ in a symmetric matrix $R_{\mathcal E}$ setting $R_{ii}=0$ for all $i$. We call this matrix the {\it resistance matrix} of $\mathcal E$ and denote it by $R_{\mathcal E}$. From Lemma \ref{lem:eff-resist} it follows that $$R_{ij}=(-e_i+e_j)^t\bigl(M_R(\mathcal E)\bigr)^{-1}(-e_i+e_j),$$ where $\bigl(M_R(\mathcal E)\bigr)^{-1}(-e_i+e_j)$ means a vector $U$ which satisfies \eqref{eq-resist}. Notice that such a vector always exists. \begin{proposition}\cite{KW 2011} \label{th: about inverse resp} Let $\mathcal E$ be a connected electrical network. Denote by $M'_R(\mathcal E)$ the matrix obtained from $M_R(\mathcal E)$ by deleting the last row and the last column, then $M'_R(\mathcal E)$ is invertible. The matrix elements of its inverse are given by the formula \begin{equation*} M'_R(\mathcal E)^{-1}_{ij}=\begin{cases} R_{in},\, \text{if}\,\, i=j \\ \frac{1}{2}(R_{in}+R_{jn}-R_{ij}),\, \text{if}\,\, i\not = j,\\ \end{cases} \end{equation*} \end{proposition} The proposition \ref{th: about inverse resp} and the lemma \ref{eq-resist} show that the resistance and the response matrices define each other therefore the theorem \ref{gen_el_1} would hold if we replace the response matrix with the resistance matrix in its statement. \begin{remark} The formula from Proposition \ref{th: about inverse resp} is well known in different areas of mathematics. It appeared in the literature under the names the Gromov product, the Farris transform, the Covariance mapping between the Cut and the Covariance cones, see \cite{GandC} for more information. We are planning to explore the properties of the resistance matrix from these points of view in the future publications. \end{remark} Proposition \ref{th: about inverse resp} provides a simple proof that the effective resistances $R_{ij}$ satisfy the triangle inequality. \begin{theorem} \label{th:about metric} Let $\mathcal E$ be an electrical network on a connected network $\Gamma$ then for any of three nodes $k_1, k_2$ and $k_3$ the triangle inequality holds: $$R_{k_1k_3}+R_{k_2k_3}-R_{k_1k_2} \geq 0.$$ Hence the set of all $R_{vw}$ define a metric on the nodes of $\Gamma$. \end{theorem} \begin{proof} Let $\mathcal E_{k_1k_2k_3}$ be a connected electrical network obtained from $\mathcal E$ by declaring the vertices $k_1, k_2, k_3$ to be the boundary nodes, while remaining vertices are declared to be inner and $M_R(\mathcal E_{k_1k_2k_3})$ be its response matrix, than according to Proposition \ref{th: about inverse resp} we have that: $$M'_R(\mathcal E_{k_1k_2k_3})^{-1}_{k_1k_2}=\frac{1}{2}(R_{k_1k_3}+R_{k_2k_3}-R_{k_1k_2}),$$ therefore to get the statement it is enough to verify that $M'_R(\mathcal E_{k_1k_2k_3})^{-1}_{k_1k_2} \geq 0.$ Indeed, the matrix $M'_R(\mathcal E_{k_1k_2k_3})$ has the following from: \begin{equation*} M'_R(\mathcal E_{k_1k_2k_3})=\left(\begin{matrix} x_{k_1k_1} & x_{k_1k_2} \\ x_{k_1k_2} & x_{k_2k_2} \end{matrix} \right) = \left(\begin{matrix} -x_{k_1k_2}-x_{k_1k_3} & x_{k_1k_2} \\ x_{k_1k_2} & -x_{k_1k_2}-x_{k_2k_3} \end{matrix} \right). \end{equation*} By the direct computation we obtain that \begin{equation*} M'_R(\mathcal E_{k_1k_2k_3})^{-1}_{k_1k_2}=\frac{-x_{k_1k_2}}{\det M'_R(\mathcal E_{k_1k_2k_3}) }=\frac{-x_{k_1k_2}}{x_{k_1k_2}x_{k_2k_3}+x_{k_1k_3}x_{k_1k_2}+x_{k_1k_3}x_{k_2k_3}}, \end{equation*} By the definition of the response matrix all $x_{k_ik_j} \leq 0, i\neq j$ which implies the statement of the theorem. \end{proof} To describe the properties of the resistance metric associated with a connected circular electrical network we will provide a formula for the embedding of $E_n$ into $\mathrm{Gr}_{\geq 0}(n-1, 2n)$ described in \ref{th: main_gr} which uses the effective resistances matrix instead of the response matrix. Let $\mathcal E$ be a connected network with the resistance matrix $R_{\mathcal E}$, define a point in $Gr(n-1,2n)$ associated to it as the row space of the matrix: \begin{equation} \label{eq:omega_n,r} \Omega_{R}(\mathcal E)=\left(\begin{matrix} 1 & m_{11} & 1 & -m_{12} & 0 & m_{13} & 0 & \ldots \\ 0 & -m_{21} & 1 & m_{22} & 1 & -m_{23} & 0 & \ldots \\ 0 & m_{31} & 0 & -m_{32} & 1 & m_{33} & 1 & \ldots \\ \vdots & \vdots & \vdots & \vdots & \vdots & \vdots & \vdots & \ddots \end{matrix}\right), \end{equation} where $$m_{ij}= -\frac{1}{2}(R_{i,j}+R_{i+1,j+1}-R_{i,j+1}-R_{i+1,j}).$$ Notice that the matrix $M(R_{\mathcal E})=(m_{ij})$ is symmetric and the sum of the matrix entries in each row is zero, in other words it looks like a response matrix of an electrical network. There is a reason for this as it was discovered by R. Kenyon and D. Wilson. \begin{theorem} \cite{KW 2011} \label{ken-wen} Let $\mathcal E$ be a connected circular electrical network and $\mathcal E^{*}$ be its dual, then the following holds: \begin{equation} \label{form:xij} x^{*}_{ij}=-\frac{1}{2}(R_{i,j}+R_{i+1,j+1}-R_{i,j+1}-R_{i+1,j}) \end{equation} \begin{equation} \label{form:rij} R^{*}_{ij}=-\sum \limits_{i'<j': \ D_{S_{ij}}(i', j') \neq 0}x_{i'j'}, \end{equation} where $x^*_{ij}$ are the matrix elements of the response matrix of the dual network $\mathcal E^*$. \end{theorem} Since $M(R_{\mathcal E})$ is degenerated matrix the Pluecker coordinates of a point $Gr(n-1, 2n)$ associated with $\Omega_{R}(\mathcal E)$ can be calculated as the maximal size minors of the matrix $\Omega'_{ R}(\mathcal E)$ obtained from $\Omega_{R}(\mathcal E)$ by deleting, for example, the last row. \begin{theorem} \cite{BGGK}, \cite{CGS} \label{th:aboout omeganr} The row space of $\Omega_{R}(\mathcal E)$ defines the same point in the $Gr_{\geq 0}(n-1,2n)$ as the point $\Omega(\mathcal E)$ defined by the Lam embedding \ref{th: main_gr}. In particular the Pluecker coordinate $\Delta_{24\dots 2n-2}(\Omega_{R}(\mathcal E))$ is not 0 if and only if $\mathcal E$ is a connected circular electrical network. Putting it together \[ \Omega_{R}(\mathcal E)=\Omega(\mathcal E^{*})s=\Omega(\mathcal E)\] where $s$ is the shift operator \[s=\left(\begin{matrix} 0 & 1 & 0 & 0 & \cdots & 0 \\ 0 & 0 & 1 & 0 & \cdots & 0 \\ 0 & 0 & 0 & 1 & \cdots & 0 \\ \vdots & \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & 0 & 0 & \cdots & 1 \\ (-1)^{n} & 0 & 0 & 0 & \cdots & 0 \end{matrix}\right),\]\label{cyclic operator s} See the proof of \cite[Theorem 5.6]{BGGK} for more details. \end{theorem} Many interesting inequalities involving $R_{ij}$ follow from positivity of the Pluecker coordinates of the point represented by $\Omega_{R}(\mathcal E)$. For some of them we have found an explicit combinatorial meaning, others are still waiting to be interpreted. Below we will deduce the Kalmanson property for the metric $R_{\mathcal E}$ as a consequence of the positivity described above. This fact was established in \cite{F} earlier using different methods. \begin{theorem} \label{charkalm} Let $\mathcal E$ be a connected circular electrical network and let $i_1, i_2, i_3, i_4$ be any four nodes in the circular order as it is shown in Figure \ref{kalmanson}. Then the Kalmanson inequalities hold: \begin{equation} \label{kal_1} R_{i_1i_3}+R_{i_2i_4}\geq R_{i_2i_3}+R_{i_1i_4}, \end{equation} \begin{equation} \label{kal_2} R_{i_1i_3}+R_{i_2i_4}\geq R_{i_1i_2}+R_{i_3i_4}. \end{equation} \end{theorem} \begin{proof} Let $\mathcal E_{i_1i_2i_3i_4}$ be an electrical network obtained from $\mathcal E$ by declaring the vertices $i_1, i_2, i_3, i_4$ the boundary nodes, and the rest of the vertices are internal. Obviously $\mathcal E_{i_1i_2i_3i_4}$ is a connected circular electrical network in $E_4$ and let $\Omega'_{R}(\mathcal E_{i_1i_2i_3i_4})$ be its matrix defined by \ref{eq:omega_n,r}. By direct computations we conclude that $$\Delta_{567}\bigl(\Omega'_{ R}(\mathcal E_{i_1i_2i_3i_4})\bigr)=\frac{1}{2}(R_{i_1i_3}+R_{i_2i_4} -R_{i_1i_2}-R_{i_3i_4}),$$ $$\Delta_{123}\bigl(\Omega'_{R}(\mathcal E_{i_1i_2i_3i_4})\bigr)=\frac{1}{2}(R_{i_1i_3}+R_{i_2i_4}- R_{i_2i_3}-R_{i_1i_4}).$$ Taking into account that all the minors $\Delta_{I}\bigl(\Omega'_{ R}(\mathcal E_{i_1i_2i_3i_4})\bigr)$ are non-negative we obtain that the Kalmanson inequalities hold for the metric defined by the resistances. \end{proof} \begin{figure}[h!] \center \includegraphics[width=60mm]{kalmanson.jpg} \caption{$4$-point Kalmanson property} \label{kalmanson} \end{figure} \begin{definition} Let $X$ be a finite set and $D$ is a metric on it. If there is a circular order on $X$ such that the inequalities from the theorem above hold for any four points of $X$ in this circular order we call this metric the {\it Kalmanson metric}. \end{definition} Therefore the resistance metric $R_{\mathcal E}$ defined by a circular electrical network $\mathcal E$ is a Kalmanson metric. \subsection{Circular split systems and electrical networks} \label{sec:splitmetr} \begin{definition} A {\it split} $S$ of a set $X=\{1, \dots, n\}$ is a partition of $X$ into two non-empty, disjoint subsets $A$ and $B$, $ A \sqcup B = X$. A split is called trivial if either $A$ or $B$ has cardinality $1.$ A collection of splits $\mathcal{S}$ is called a {\it split system }. \end{definition} The pseudo metric associated to a spit $S$ is defined by the following matrix $D:$ \begin{equation*} D_S(k,l)=\begin{cases} 1, \ \text{if} \ |A \cap \{k,l\}|=1, \\ 0, \ \text{otherwise.} \end{cases} \end{equation*} A circular order of $X$ can be drawn as a polygon with the elements of $X$ labeling the sides. A {\it circular split system} is a split system for which a circular order exists such that all the splits can be simultaneously drawn as sides or diagonals of the labeled polygon. Trivial splits are sides of the polygon, separating the label of that side from the rest, while a non-trivial split $A|B$ is a diagonal separating the sides labeled by $A$ and $B$. For any circular split system we can visualize it by such a polygonal representation, or instead choose a visual representation using sets of parallel edges for each split; these representations are called {\it circular split networks}. A set of parallel edges displays a split $A|B$ if the removal of those edges leaves two connected components with respective sets of terminals $A$ and $B$, see the Figure \ref{splex}. \begin{definition} We call a {\it weighted circular split system} a circular split system with a positive weight attached to each set of parallel edges which display the splits. Such a weighted circular split system defines a metric on the set $X$ by the formula \begin{equation} \label{decom} D_S=\sum \limits_{A|B \in S } \alpha_{A|B}D_{A|B}, \end{equation} where $\alpha_{A|B}$ is the weight of the set of parallel edges which defines the split $A|B$. \end{definition} The split $(\{1,2,3,4\}|\{5,6,7,8\})$ in the Figure \ref{splex} corresponds to the set of edges with the weight $\alpha_1$. \begin{figure}[h!] \center \includegraphics[width=90mm]{1.jpeg} \caption{Circular split system and their polygon representations} \label{splex} \end{figure} The following theorem gives a beautiful characterization of the set of Kalmanson metrics \cite{BD}. \begin{theorem} \label{BD} A metric $d$ is a Kalmanson metric with respect to a circular order $c$ if and only if $d = D_S$ for a unique weighted circular split system $S$, (not necessarily containing all trivial splits) with each split $A|B$ of $S$ having both parts contiguous in that circular order $c$. \end{theorem} Since the resistance metric defined by a planar circular electrical network satisfies the Kalmanson condition it corresponds to a unique weighted circular split system, which we call an {\it electrical circular split system} following \cite{F} or, given the theorem \ref{charkalm}, an {\it electrical Kalmanson metric}. We will introduce a slightly different way of labeling the splits, it will be useful for us later. \begin{definition} \label{circ-system} Let $X$ be a set of nodes on a circle labeled clockwise by the symbols $1$ to $n$. Define the dual set $X^d$ consisting of nodes labeled by the symbols $\overline{1}$ to $\overline{n}$ in such a way that each $\overline{j}$ lies between $j$ and $j+1.$ Then each chord connecting $\overline{i}$ and $\overline{j}$ defines a split of $X$ which we will denote by $S_{ij}$. It is obvious that the set $S_{ij}$ forms a circular split system which we will denote by $\mathcal{S}_c$. \end{definition} The following theorem gives the formula for the weights of a weighted circular split systems \ref{BD}. \begin{theorem} \cite{GandC}, \cite{HKP} \label{th:decomp} Let $X$ be a set as in Definition \ref{circ-system} and let $D=(d_{ij})$ be a Kalmanson metric defined on $X$. Then the following split decomposition holds: \begin{equation} \label{decom} D=\sum \limits_{S_{ij} \in \mathcal{S}_c } \omega_{ij}D_{S_{ij}}, \end{equation} where the coefficients $\omega_{ij}$ are defined by the formula $$\omega_{ij}=\frac{1}{2}(d_{i,j}+d_{i+1,j+1}-d_{i,j+1}-d_{i+1,j})$$ \end{theorem} \begin{definition} Define a matrix $M(D)$ \begin{align} M(D)_{ij} = \begin{dcases*} \sum_{k \not= i}\omega_{ik}, & \text{if } i = j,\\ -{\omega_{ij}}, & \text{if } $i \not= j,$ \end{dcases*} \end{align} Notice that $\sum_{k \not= i}\omega_{ik}=-\omega_{ii}=d_{ii+1}.$ \end{definition} Here are our main results, they give a complete characterization of the Kalmanson metrics which are the resistance metrics of a circular electrical networks. \begin{theorem} \label{th: dual} Let $D$ be a Kalmanson metric matrix, then $D$ is the effective resistance matrix of a connected circular electrical network $\mathcal E$ if and only if the matrix $\Omega_{D}$ constructed from $D$ according to the formula \eqref{eq:omega_n,r} defines a point $X$ in $\mathrm{Gr}_{\geq 0}(n-1, 2n)$ and the Pluecker coordinate $\Delta_{24\dots 2n-2}(X)$ does not vanish. \end{theorem} \begin{proof} Necessity follows from Theorem \ref{th:aboout omeganr}. To prove sufficiency, assume that $\Omega_{D}$ defines a point $X$ in $\mathrm{Gr}_{\geq 0}(n-1, 2n)$ and $\Delta_{24\dots 2n-2}(\Omega'_{D}) \neq 0$. By direct computations we conclude that the matrix $\Omega_{D}s^{-1}$ has the form \eqref{omega_eq} and $\Delta_{13\dots 2n-3}\bigr((\Omega_{D}s^{-1})'\bigl)\neq 0$. Since the action of $s$ preserves the non-negativity according to \cite{L3}, the matrix $\Omega_{D}s^{-1}$ defines a point of $\mathrm{Gr}_{\geq 0}(n-1, 2n)$. Using the surjectivity of Lam's embedding (see Theorem \ref{about sur}) and Theorem \ref{th:aboout omeganr}, we obtain that both $\Omega_{D}$ and $\Omega_{D}s^{-1} $ are associated with connected networks. Finally, due to Theorem \ref{th:aboout omeganr} and Theorem \ref{ken-wen} we have that the matrix $M(D)=(m_{ij}),$ where \[m_{ij}=-\dfrac{1}{2}(d_{i,j}+d_{i+1,j+1}-d_{i,j+1}-d_{i+1,j})\] can be identified with the response matrix of a network $\mathcal E^{*},$ associated with $\Omega_{D}s^{-1}.$ It remains to prove that $d_{ij}$ are equal to the effective resistances $R_{ij}$ of the network $\mathcal E,$ associated with $\Omega_{D}.$ Since the metrics $d_{ij}$ and $R_{ij}$ are both Kalmanson and the weights in their split decomposition are equal, $\omega_{ij}=-m_{ij}$ (see Formula \eqref{decom}) , we conclude that: $$d_{ij}=-\sum \limits_{i'(i, j)j'(i, j)} m_{i'j'}= R_{ij}.$$ \end{proof} \begin{theorem} \label{th dual2} Let $D$ be a Kalmanson metric on $X$, then $D$ is an Electric Kalmanson metric of a connected circular electrical network $\mathcal E$ if and only if the rank of $M(D)$ is equal to $n-1$ and the circular minors of the matrix $M(D)$ are non-negative after multiplying by $(-1)^k$ where $k$ is the size of the minor. Given this, the matrix $M(D)$ is the response matrix of the dual to the electrical network $\mathcal E$. \end{theorem} \begin{proof} If any $k \times k$ circular minors of the matrix $M(D)$ is positive after multiplying by $(-1)^k$ then $M(D)$ defines a response matrix of a network $\mathcal E'$ due to Theorem \ref{Set of response matrices all network}. Hence we conclude that $\mathcal E'$ is connected \ref{Set of response matrices all network} given the condition on the rank of $M(D)$. The matrix $\Omega( \mathcal E')$ defines a point in $Gr_{\geq 0}(n-1, 2n)$ hence $\Omega(\mathcal E')s$ does as well. According to Theorem \ref{eq:omega_n,r} $$\Omega(\mathcal E')s=\Omega(\mathcal E)=\Omega_{R}(\mathcal E)$$ for a connected network $\mathcal E$ with the resistance matrix $R_{\mathcal E}=M(D)$, moreover $\mathcal E'=\mathcal E^{*} $. It remains to prove that $R_{ij}=d_{ij}, $ it can be done as it has been explained in the proof of Theorem \ref{th: dual}. Suppose now that $D$ comes from a circular electrical split system associated with a connected circular electrical network $\mathcal E$, then due to Theorem \ref{eq:omega_n,r} we conclude that $\Omega_{R}(\mathcal E)s^{-1}$ defines a point in $Gr_{\geq 0}(n-1, 2n)$ associated with a connected network $\mathcal E^{*}$ and $M(D)=M_R(\mathcal E^*).$ The properties of $M(D)$ in the statement of the theorem follow from Theorem \ref{Set of response matrices all network}. \end{proof} A few remarks are in order. \begin{remark} The resistance metric has the following important property its square root $\sqrt {D(i,j)}$ is $L_2$ embeddable \cite{K}, hence the electric Kalmanson metrics have that property. There is a well known condition for $L_2$ embeddability stated in terms of the minors of the Caley-Menger matrix \cite{GandC}. It is interesting to see if this condition along describes the electric Kalmanson metrics in the set of all Kalmanson metrics. \end{remark} \begin{remark} The formula \eqref{decom} for $R_{ij}$ can be obtained from Theorem \ref{ken-wen} since $\mathcal E^{**}=\mathcal E'$, where $\mathcal E'$ is obtained from $\mathcal E$ by shifting the numeration of the nodes of $\mathcal E$ by $1$ clockwise: \begin{align} \label{formdual} R_{ij}=R^{**}_{i-1j-1}=-\sum \limits_{\bar{i}'<\bar{j}': \ D_{S_{i-1j-1}}(\bar{i}', \bar{j}') \neq 0}x^{*}_{\bar{i}'\bar{j}'}=-\sum \limits_{i'<j': \ D_{S_{ij}}(i', j') \neq 0}x^{*}_{i'j'}= \\ \nonumber \hspace{-40mm} {\sum \limits_{i'<j': \ D_{S_{ij}}(i', j') \neq 0}\frac{1}{2}(R_{i,j}+R_{i+1,j+1}-R_{i,j+1}-R_{i+1,j})}, \end{align} where $\{1, \dots, n\}$ and $\{\overline1, \dots, \overline n\}$ are the labels of the nodes of $\mathcal E$ and $\mathcal E^{*}$ respectively. \end{remark} \begin{figure}[h!] \center \includegraphics[width=80mm]{toformualdual.jpg} \caption{Labeling in the formula \eqref{formdual} } \label{treedaul} \end{figure} \begin{remark} Many statements in this paper can be extended to the connected cactus networks. In particular, one can obtain that the effective resistances of a connected cactus network give rise to a Kalmanson pseudometric and a Kalmanson pseudometric matrix $D$ can be identified with the resistance matrix of a connected cactus networks if and only if the conditions similar to the conditions in Theorem \ref{th: dual} hold. It would allow to conclude that for a Kalmanson metric the non vanishing of a certain Pluecker coordinate condition in Theorem \ref{th: dual} and the rank condition in Theorem \ref{th dual2} are automatically satisfied. \end{remark} \begin{remark}\label{plueclerformetric} Let $X$ and $D$ be as in Theorem \ref{th:decomp}, then the matrix $M(D)$ defined above is the response matrix of a connected electrical network, not necessarily circular. Namely it is symmetric, all non diagonal elements are negative and the sum of the elements in any row is zero. The connection of the resistance matrix of this network to the original metric $D$ is an interesting question. Moreover such a matrix defines a point $$\mathrm{IG}(n-1, 2n)\subset \mathrm{Gr}(n-1, 2n)$$ according to \ref{th:aboout omeganr}. Its Pluecker coordinates are interesting invariants of the metric $D$. We are planning to address these questions in a future publication. \end{remark} \begin{figure}[h!] \center \includegraphics[width=70mm]{treedual.jpg} \caption{A tree network $\mathcal E$ and its dual network $\mathcal E^{*},$ \\ all conductances are equal to $1$} \label{treedaul} \end{figure} We will provide an example to illustrate the theorem above. \begin{example} Let $T$ be a tree with four leafs as in the Figure \ref{treedaul} with the weights of edges all equal to one. The resistance metric matrix of the tree is \[D= \begin{pmatrix} 0& 3 & 3& 2 \\ 3& 0& 2& 3\\ 3& 2& 0& 3 \\ 2&3&3&0 \end{pmatrix} \] It coincides with the matrix of the geodesic distance for trees. Its split decomposition is as follows as one sees easily \[D=D_{S_{12}}+D_{S_{13}}+D_{S_{14}}+D_{S_{23}}+D_{S_{34}}+D_{S_{43}}=D_{2|134}+D_{23|14}+D_{1|234}+D_{3|124}+D_{4|123}\] The response matrix of the dual network multiplied by $-1$ is \[ \begin{pmatrix} -3& 1& 1& 1 \\ 1& -2& 1& 0\\ 1& 1& -3 & 1 \\ 1&0&1&-2 \end{pmatrix} \] It coincides with the incidence matrix of the dual graph since there are no internal vertices. Our correspondence between the splits and pairs of numbers is given in the table \\ \[\begin{tabular}{ |c|c|c|c|c |} $(\bar1\bar2)$& $(\bar1\bar3)$& $(\bar1\bar4)$& $(\bar2\bar3)$&$ (\bar3\bar4)$ \\ \hline $(2|134)$ & $(14|23)$&$ (1|234)$&$(3|124)$&$(4|123)$ \\ \end{tabular}\] \\ matching the coefficients in the split decomposition of $D$ and the coefficients of the response matrix of the dual network. \end{example} \section{Reconstruction of network topology} \label{sec:rec} As we mentioned in the introduction one of the important problems in applied mathematics can be formulated as follows: \begin{problem} \label{black-box} Suppose we are given a matrix $D=(d_{ij})$, whose entries are the distances between $n$ terminal nodes of an unknown weighted graph $G$. It is required to recover the graph $G$ and the edge weights which are consistent with the given matrix. \end{problem} If the terminal nodes are the boundary vertices of a circular electrical networks $\mathcal E $ and $D=R_{\mathcal E}$, then due to Proposition \ref{th: about inverse resp} we conclude that Problem \ref{black-box} can be identified with the {\it black box problem}, see \cite{CIW}. It is also known as the discrete Calderon problem or the discrete inverse electro impedance tomography problem. If a graph $G$ is an unknown tree $T$ and the entries of $D_T=(d_{ij})$ are equal to the weights of the paths between vertices of $T$, then Problem \ref{black-box} can be identified with the minimal tree reconstruction problem, which plays an important role in phylogenetics \cite{HRS}. \begin{definition} We will call a matrix $D=(d_{ij})$ a {\it tree realizable}, if there is a tree $T$ such that \begin{itemize} \item $D=D_T$ i.e. $d_{ij}$ are equal to the weights of the paths between terminal nodes of $T$; \item a set of terminal nodes contains all leafs of $T;$ \item there are no vertices of degree $2.$ \end{itemize} \end{definition} \begin{theorem} \cite{CR}, \cite{HY} \label{minimaltree} If a matrix $D$ is tree realizable, then there is an unique minimal tree $T_{min}$ such that $ D=D_{T_{min}}$, where the minimality of $T_{min}$ means that for any other $T$ with the property $D=D_T$, $|E(T_{min})|<|E(T)|.$ \end{theorem} It is not difficult to see that if the graph of a circular electrical network $\mathcal E $ is a tree $T$ and its boundary nodes contain all tree leafs, then $R_{\mathcal E}=D_{\bar T}$ where $\bar T$ and $T$ are identical as unweighted trees and the weights of the corresponded edges are reciprocal. This observation allows us to use in phylogenetics the algorithm for reconstruction of electrical networks \cite{L} from a given resistance matrix. Our reconstruction method is different from the methods suggested in \cite{F}, \cite{FS}. \begin{definition} The median graph of a circular network $\mathcal E $ with the graph $\Gamma$ is the graph $\Gamma_M$ whose internal vertices are the midpoints of the edges of $\Gamma$ and two internal vertices are connected by an edge if the edges of the original graph $\Gamma$ are adjacent. The boundary vertices of $\Gamma_M$ are defined as the intersection of the natural extensions of the edges of $\Gamma_M$ with the boundary circle. Since the interior vertices of the median graph have degree four, we can define the strands of the median graph as the paths which always go straight through any degree four vertex. The strands naturally define a permutation $\tau(\mathcal E)$ on the set of points $\{1, \dots, 2n\}.$ \end{definition} \begin{figure}[h!] \centering \includegraphics[width=0.4\textwidth]{strad.jpg} \caption{Star-shape network, its median graph and the strand permutation $\tau(\mathcal E)=(14)(36)(25)$ } \label{fig:triangle} \end{figure} \begin{theorem} \cite{CIW} \label{strand} A circular electrical network is defined uniquely up to electrical transformations by its strand permutation. \end{theorem} Denote by $A_i$ the columns of the matrix $\Omega_{ R}(\mathcal E)$ and define the column permutation $g(\mathcal E)$ as follows: $g(\mathcal E)(i)=j,$ if $j$ is the minimal number such that $A_i \in \ \mathrm{span}(A_{i+1}, \dots, A_{j} ),$ where the indexes are taking modulo $2n$. \begin{theorem}\label{permutationj} The following holds $$g(\mathcal E)+1=\tau(\mathcal E).$$ \end{theorem} \begin{proof} This result follows from the fact that $\Omega_{ R}(\mathcal E)$ is an explicit parametrization of the Lam embedding, see Section $4.6$ \cite{L}. \end{proof} \begin{definition} A circular electrical network is called minimal if the strands of its median graph do not have self-intersections; any two strands intersect at most one point and the median graph has no loops or lenses, see the Figure \ref{fig:loop}. \end{definition} \begin{figure}[h!] \centering \includegraphics[width=0.3\textwidth]{loop.jpg} \caption{A lens obtained by the intersection of strands $ \alpha_1$ and $ \alpha_2$ } \label{fig:loop} \end{figure} Based on Theorem \ref{permutationj} we suggest the following reconstruction algorithm: \begin{itemize} \item For a given matrix $R_{\mathcal E}$ construct the matrix $\Omega_{ R}(\mathcal E);$ \item Using $\Omega_{ R}(\mathcal E)$ calculate a strand permutation $\tau(\mathcal E)$; \item The permutation $\tau(\mathcal E)$ defines a strand diagram, which can be transformed to a median graph of a \textit{minimal} circular electrical network $\mathcal E$ using the procedure described in \cite{CIW}. \item From the median graph we recover the network $\mathcal E$ as in \cite{CIW} or \cite{F}. \end{itemize} \begin{theorem} \cite{CIW}\label{electrominim} Any circular electrical network is equivalent to a minimal network. Any two minimal circular electrical networks which share the same response matrix, and hence the same effective resistance matrix, can be converted to each other only by the star-triangle transformation. As a consequence we obtain that any two equivalent minimal circular electrical networks have the same number of edges which is less or equal than $\frac{n(n-1)}{2}$. If two minimal circular electrical networks are equivalent, they have the same strand permutation. \end{theorem} Informally, the theorem says that removing all the loops, the internal vertices of degree $1$ and reduction of all the parallels and the series any network can be converted into a minimal network. Let us compare the last result with Theorem \ref{minimaltree}. \begin{theorem} \label{tomin} If there is a tree $T$ among the graphs representing a minimal circular electrical network $\mathcal E \in E_n$ then it is unique. \end{theorem} \begin{proof} Indeed, it is easy to see that $R_T$ is a tree realizable by both $\bar T$ and $T_{min},$ therefore $|E(T)|\geq |E(T_{min})|. $ Since $T$ and $\bar T_{min}$ have the same effective resistant matrix, they are equivalent. Therefore due to Theorem \ref{electrominim} $\bar T_{min}$ can be transform to $T$ which implies $|E(T_{min})|\geq |E(T)|$ which contradicts to the uniqueness of the minimal tree. \end{proof} We propose the following algorithm for reconstruction of the minimal tree for a given tree metric $D_T$ based on Theorem \ref{tomin} : \begin{itemize} \item Do all steps of the algorithm described above to obtain a minimal network $\mathcal E$ such that $R_{\mathcal E}=D_T$; \item Transform $\mathcal E$ to a minimal tree by a sequence of the star-triangle transformation. By Theorem $1$ from \cite{Bu} we suppose that heuristically this converting can be done monotonically by changing all the triangles to the stars. \end{itemize} \begin{remark} A more advanced technique called the chamber ansatz when applied to $\Omega_{ R}(\mathcal E)$ gives an algorithm for recovering not only the topology of the network but the weights of the edges as well. We will describe this method in a coming work \cite{Ka}. \end{remark} We will illustrate our algorithm by an example. \begin{figure}[H] \centering \includegraphics[width=1.2\textwidth]{rectop.jpg} \caption{ An example of a reconstruction of a network topology} \label{fig:recon} \end{figure} \begin{example} Consider a dissimilarity matrix \[D= \begin{pmatrix} 0& 3 & 3& 2 \\ 3& 0& 2& 3\\ 3& 2& 0& 3 \\ 2&3&3&0 \end{pmatrix} \] Then the matrix $\Omega'_{ R}(\mathcal E) \in Gr_{\geq 0}(n-1, 2n)$ has the form \[\Omega'_{ R}(\mathcal E)= \begin{pmatrix} 1& 3 & 1& 1 & 0 & -1& 0 &1 \\ 0& 1 & 1& 2 & 1 & 1& 0 &0\\ 0& -1 & 0& 1 & 1 & 3& 1 &1 \end{pmatrix} \] \end{example} By direct computations we verify that $g(\mathcal E)= [4\,6\,5\,7\,8\,2\,1\,3]$ in the one window notation as it is used in \cite{L}, therefore the strand permutation $\tau(\mathcal E)$ is $\tau(\mathcal E)=(15)(27)(36)(48).$ This strand permutation defines a minimal network as it is shown in the Figure \ref{fig:recon}, which can be transformed to a tree by applying one star-triangle transformation. \ \begin{thebibliography}{9999999} \bibitem{BD} H-J. Bandelt and A. Dress, Split decomposition: a new and useful approach to phylogenetic analysis of distance data, Molecular Phylogenetics and Evolution 1 (1992) 242–252. \bibitem{BD1} H-J. Bandelt and A. Dress, A canonical decomposition theory for metrics on a finite set, Advances in Mathematics Volume 92, Issue 1, March (1992) Pages 47-105. \bibitem{Bu} P. Buneman, A note on the metric properties of trees. J. Combin. Theory Ser. B., Vol. 17, No. 1, pp. 48-50 (1974). \bibitem{BGGK} B. Bychkov, V. Gorbounov, L. Guterman, A. Kazakov, Symplectic geometry of electrical networks, Journal of Geometry and Physics Volume 207, January 2025, 105323. \bibitem{BGKT} B. Bychkov, V. Gorbounov, A. Kazakov, D. Talalaev, Electrical Networks, Lagrangian Grassmannians, and Symplectic Groups, Moscow Mathematical Journal, Vol.23, No.2, pp.133-167 (2023). \bibitem{BGK} B. Bychkov, L. Guterman, A. Kazakov, Electrical networks via circular minors, in preparation. \bibitem{CGS} S. Chepuri, T. George and D. E. Speyer, Electrical networks and Lagrangian Grassmannians, https://arxiv.org/abs/2106.15418 (2021). \bibitem{CR} J. C. Culberson, P. Rudnicki, A fast algorithm for constructing trees from distance matrices. Information Processing Letters, Vol. 30, No. 4, pp. 215-220 (1989). \bibitem{CIM} E. B. Curtis, D. Ingerman, J. A. Morrow, Circular planar graphs and resistor networks. Linear algebra and its applications, Vol.283, No. 1-3, pp.115-150 (1998). \bibitem{CIW} B. Curtis, J. A. Morrow, Inverse problems for electrical networks. World Scientific. Vol.13 (2000). \bibitem{GandC} M. Deza, M. Laurent, Geometry of Cuts and Metrics, Springer Berlin, DOI https://doi.org/10.1007/978-3-642-04295-9. \bibitem{CdV} Colin de Verdi`ere, Yves R´eseaux ´electriques planaires. I. Comment. Math. Helv. 69 (1994), no. 3, 351374. \bibitem{DF} S. Devadoss, S. Forcey, Compactifications of phylogenetic systems and electrical networks, https://doi.org/10.48550/arXiv.2408.03431. \bibitem{DP} S. Devadoss, S. Petti, A Space of Phylogenetic Networks, https://doi.org/10.48550/arXiv.1607.06978. \bibitem{F} S. Forcey, Circular planar electrical networks, split systems, and phylogenetic networks, https://doi.org/10.48550/arXiv.2108.00550. \bibitem{FS} Forcey S, Scalzo D. Phylogenetic Networks as Circuits With Resistance Distance. Front Genet. 2020 Oct 15;11:586664. doi: 10.3389/fgene.2020.586664. PMID: 33193721; PMCID: PMC7593533. \bibitem{HY} S. L. Hakimi, S. S. Yau, Distance matrix of a graph and its realizability. Quarterly of applied mathematics, Vol. 22, No. 4, pp. 305-317 (1965). \bibitem{HRS} D. H. Huson, R. Rupp, C. Scornavacca, Phylogenetic networks: concepts, algorithms and applications. Cambridge University Press (2010). \bibitem{Ka} A. Kazakov, Inverse problems related to electrical networks and the geometry of non-negative Grassmannians, in preparation. \bibitem{KW 2011} R. W. Kenyon and D. B. Wilson, Boundary partitions in trees and dimers, Trans. Amer. Math. Soc., 363, pp. 1325–1364 \bibitem{Kl} Klein, D.J., Randić, M. Resistance distance. J Math Chem 12, 81–95 (1993). https://doi.org/10.1007/BF01164627. \bibitem{K} Klein, D., Zhu, H. Distances and volumina for graphs. Journal of Mathematical Chemistry 23, 179–195 (1998). https://doi.org/10.1023/A:1019108905697 \bibitem{HKP} A. Kleinman, M. Harel, L. Pachter, Affine and Projective Tree Metric Theorems. Ann. Comb. 17, 205–228 (2013). https://doi.org/10.1007/s00026-012-0173-2. \bibitem{L} T. Lam, Electroid varieties and a compactification of the space of electrical networks, Adv. Math. 338, pp. 549–600 (2018). \bibitem{L3} T. Lam, Totally nonnegative Grassmannian and Grassmann polytopes, Current developments in mathematics, V.2014 ( \bibitem{SS} D. Speyer, B. Sturmfels, The tropical Grassmannian. Adv. Geom. 4 (2004), 389-411. \bibitem{Z} A. Zelevinsky, "What Is . . . a Cluster Algebra?", AMS Notices, 54 (11): 1494–1495, 2007. \end{thebibliography} \end{document} \begin{remark} Almost all theorems in our work can be expanded to connected cactus networks without any changes. Particularly, we obtain that effective resistances of connected cactus network give rise to Kalmanson pseudometrics. Kalmanson pseudometric $D$ can be identified with effective resistances of a connected cactus networks if and only if: \begin{itemize} \item $\Omega_D \in Gr_{\geq 0}(n-1, 2n);$ \item the circular minors of the matrix $M(D)$ are non-negative after multiplying by $(-1)^k$. \end{itemize} Analyzing it we conclude that for Kalmanson metrics the non-degenerative conditions in Theorem \ref{th: dual} and Theorem \ref{th dual2} automatically satisfy due to $d_{lm}>0 \ \forall l\neq m. $ \end{remark}
2205.15418v1
http://arxiv.org/abs/2205.15418v1
Asymptotic welfare performance of Boston assignment algorithms
\documentclass{amsart} \newtheorem{defn}{Definition}[section] \newtheorem{remark}[defn]{Remark} \newtheorem{proposition}[defn]{Proposition} \newtheorem{lemma}[defn]{Lemma} \newtheorem{theorem}[defn]{Theorem} \newtheorem{corollary}[defn]{Corollary} \newtheorem{example}[defn]{Example} \usepackage{fourier} \usepackage{enumerate} \usepackage{graphicx} \newcommand{\mw}[1]{\textcolor{blue}{\textit{#1}}} \newcommand{\gp}[1]{\textcolor{red}{\textit{#1}}} \newcommand{\algo}{\mathcal{A}} \newcommand{\M}{\mathcal{M}} \newcommand{\F}{\mathcal{F}} \newcommand{\G}{\mathcal{G}} \newcommand{\cvginprob}{\overset{p}\to} \newcommand{\cvgindistn}{\overset{D}\to} \usepackage[colorlinks=true,breaklinks=true,bookmarks=true,urlcolor=blue, citecolor=blue,linkcolor=blue,bookmarksopen=false,draft=false]{hyperref} \title{Asymptotic welfare performance of Boston assignment algorithms} \date{\today} \author{Geoffrey Pritchard} \author{Mark C. Wilson} \begin{document} \begin{abstract} We make a detailed analysis of three key algorithms (Serial Dictatorship and the naive and adaptive variants of the Boston algorithm) for the housing allocation problem, under the assumption that agent preferences are chosen iid uniformly from linear orders on the items. We compute limiting distributions (with respect to some common utility functions) as $n\to \infty$ of both the utilitarian welfare and the order bias. To do this, we compute limiting distributions of the outcomes for an arbitrary agent whose initial relative position in the tiebreak order is $\theta\in[0,1]$, as a function of $\theta$. We expect that these fundamental results on the stochastic processes underlying these mechanisms will have wider applicability in future. Overall our results show that the differences in utilitarian welfare performance of the three algorithms are fairly small, but the differences in order bias are much greater. Also, Naive Boston beats Adaptive Boston, which beats Serial Dictatorship, on both welfare and order bias. \end{abstract} \maketitle \section{Introduction} \label{s:intro} Algorithms for allocation of indivisible goods are widely applicable and have been heavily studied. There are many variations on the problem, for example one-sided matching or housing allocation (each agent gets a unique item), school choice (each student gets a single school seat, and schools have limited preferences over students), and multi-unit assignment (for example each student is allocated a seat in each of several classes). One can also vary the type of preferences for agents over items, but here we focus on the most commonly studied case, of complete strict preferences. We focus on the housing allocation problem \cite{HyZe1979}, whose relative simplicity allows for more detailed analysis. \subsection{Our contribution} \label{ss:contrib} We make a detailed analysis of three prominent algorithms (Serial Dictatorship and the naive and adaptive variants of the Boston algorithm) for the housing allocation problem, under the standard assumption that agent preferences are independently chosen uniformly from linear orders on the items (often called the Impartial Culture distribution), and the further assumption that agents express truthful preferences. We compute limiting distributions as $n\to \infty$ of both the utilitarian welfare (with respect to some common utility functions) and the order bias (a recently introduced \cite{FrPW2021} fairness concept). In order to do this, we compute limiting distributions of the outcomes for an arbitrary agent whose initial relative position in the tiebreak order is $\theta\in[0,1]$, as a function of $\theta$. We expect that these fundamental results on the stochastic processes underlying these mechanisms will have wider applicability in future. While the results for Serial Dictatorship are easy to derive, the Boston mechanisms require substantial work. To our knowledge, no precise results of this type on average-case welfare performance of allocation algorithms have been published. In Section~\ref{s:conclude} we discuss the limitations and implications of our results, situate our work in the literature on welfare of allocation mechanisms, and point out opportunities for future work. We first derive the basic limiting results for exit time and rank of the item attained, for Naive Boston, Adaptive Boston and Serial Dictatorship in Sections~\ref{s:naive_asymptotics}, \ref{s:adaptive_asymptotics} and \ref{s:serial_dictatorship} respectively. Each section first deals with average-case results for an arbitrary initial segment of agents in the choosing order, and then with the fate of an individual agent at an arbitrary position. The core technical results are found in Theorems~\ref{thm:naive_asym_agent_numbers}, \ref{thm:naive_individual_asymptotics}, \ref{thm:adaptive_asym_agent_numbers}, \ref{thm:adaptive_asymptotics}, \ref{thm:adaptive_individual_asymptotics} and their corollaries. We apply the basic results to utilitarian welfare in Section~\ref{s:welfare} and order bias in Section~\ref{s:order_bias}, and discuss the implications, relation to previous work, and ideas for possible future work in Section~\ref{s:conclude}. The results for Serial Dictatorship are straightforwardly derived, but the other algorithms require nontrivial analysis. Of those, Naive Boston is much easier, because the nature of the algorithm means that the exit time of an agent immediately yields the preference rank of the item obtained by the agent. However in Adaptive Boston this link is much less direct and this necessitates substantial extra technical work. \section{Preliminaries} \label{s:preliminaries} We define the mechanisms Naive Boston, Adaptive Boston and Serial Dictatorship, and show how to model the assignments they give via stochastic processes. \subsection{The mechanisms} \label{s:algo} We assume throughout that we have $n$ agents and $n$ items, where each agent has a complete strict preference ordering of items. Each mechanism allows for strategic misrepresentation of preferences by agents, but we assume sincere behavior here for this baseline analysis. We are therefore studying the underlying preference aggregation algorithms. These can be described as centralized procedures that take an entire preference profile and output a matching of agents to items, but are more easily and commonly interpreted dynamically as explained below. Probably the most famous mechanism for housing allocation is \emph{Serial Dictatorship} (SD). In a common implementation, agents choose according to the exogenous order $\rho$, each agent in turn choosing the item he most prefers among those still available. The Boston algorithms in the housing allocation setting are as follows. \emph{Naive Boston} (NB) proceeds in rounds: in each round, some of the agents and items will be permanently matched, and the rest will be relegated to the following round. At round $r$ ($r=1,2\ldots$), each remaining unmatched agent bids for his $r$th choice among the items, and will be matched to that item if it is still available. If more than one agent chooses an item, then the order $\rho$ is used as a tiebreaker. \emph{Adaptive Boston} (AB) \cite{MeSe2014} differs from Naive Boston in the set of items available at each round. In each round of this algorithm, all remaining agents submit a bid for their most-preferred item among those still available at the start of the round, rather than for their most-preferred item among those for which they have not yet bid. The Adaptive Boston algorithm takes fewer rounds to finish than the naive version, because agents do not waste time bidding for their $r$th choice in round $r$ if it has already been assigned to someone else in a previous round. This means that the algorithm runs more quickly, but agents, especially those late in the choosing order, are more likely to have to settle for lower-ranked items. Note that both Naive and Adaptive Boston behave exactly the same in the first round, but differently thereafter. \subsection{Important stochastic processes in the IC model} \label{s:stoch} Under the Impartial Culture assumption, it is convenient to imagine the agents developing their preference orders as the algorithm proceeds, rather than in advance. This allows the evolution of the assignments for the Boston algorithms to be described by the following stochastic processes (for SD the analysis is easier). In the first round, the naive and adaptive Boston processes proceed identically: each agent randomly chooses one of the $n$ items, independently of other agents and with uniform probabilities $\frac{1}{n}$, as his most preferred item for which to bid. Each item that is so chosen is assigned to the first (in the sense of the agent order $\rho$) agent who bid for it; items not chosen by any agent are relegated, along with the unsuccessful agents, to the next round. In the $r$th round ($r\geq2$), the naive algorithm causes each remaining agent to randomly choose his $r$th most-preferred item, independently of other agents and of his own previous choices, uniformly from the $n-r+1$ items for which he has not previously bid. (Note that included among these are all the items still available in the current round.) Each item so chosen is assigned to the first agent who chose it; other items and unsuccessful agents are relegated to the next round. The adaptive Boston method is similar, except that agents may choose only from the items still available at the start of the round. This can be achieved by having each remaining agent choose his next most-preferred item by repeated sampling without replacement from the set of items he has not yet considered, until one of the items sampled is among those still available at this round. An essential feature of these bidding processes is captured in the following two results. \begin{lemma} \label{lem:A} Suppose we have $m$ items ($m\geq2$) and a sequence of agents (Agent 1, Agent 2, $\ldots$) who each randomly (independently and uniformly) choose an item. Let $A\subseteq{\mathbb N}$ be a subset of the agents, and $C_A$ be the number of items first chosen by a member of $A$. (Equivalently, $C_A$ is the number of members of $A$ who choose an item that no previous agent has chosen.) Then $$ \hbox{Var}(C_A) \;\leq\; E[C_A] \;=\; \sum_{a\in A} \left(1-\frac1m\right)^{a-1} $$ \end{lemma} \begin{lemma} \label{lem:Aprime} Suppose we have the situation of Lemma \ref{lem:A}, with the further stipulation that $\ell$ of the $m$ items are blue. Let $C_A$ be the number of blue items first chosen by a member of $A$ (equivalently, the number of members of $A$ who choose a blue item that no previous agent has chosen.) Then $$ \hbox{Var}(C_A) \;\leq\; E[C_A] \;=\; \frac{\ell}{m} \sum_{a\in A} \left(1-\frac1m\right)^{a-1} $$ \end{lemma} \begin{remark} Lemmas \ref{lem:A} and \ref{lem:Aprime} are applicable to the adaptive and naive Boston mechanisms, respectively. The blue items in Lemma \ref{lem:Aprime} correspond to those still available at the start of the round. In the actual naive Boston algorithm, the set of unavailable items that an agent may still bid for will typically be different for different agents, but the number of them ($m-\ell$) is the same for all agents, which is all that matters for our purposes. \end{remark} \begin{proof}{Proof of Lemmas \ref{lem:A} and \ref{lem:Aprime}.} Lemma \ref{lem:A} is simply the special case of Lemma \ref{lem:Aprime} with $\ell=m$, so the following direct proof of Lemma \ref{lem:Aprime} suffices for both. \newcommand{\rrm}{\left(1-\frac1m\right)} Let $F_i$ denote the agent who is first to choose item $i$, and $X_{ia}$ the indicator of the event $\{F_i=a\}$. That is, $X_{ia}=1$ if and only if $F_i=a$. We have $P(F_i=a)=\frac1m\rrm^{a-1}$: agent $a$ must choose $i$, while all previous agents choose items other than $i$. Let $B$ be the set of blue items. Then $C_A=\sum_{i\in B} \sum_{a\in A} X_{ia}$, so $$ E[C_A] \;=\; \sum_{i\in B} \sum_{a\in A} P(F_i=a) \;=\; \sum_{i\in B} \sum_{a\in A} \frac1m \rrm^{a-1} \;=\; \frac{\ell}{m} \sum_{a\in A} \rrm^{a-1}, $$ as claimed. Also, \begin{equation} \label{eq:ecasq} E\left[C_A^2\right] \;=\; \sum_{i\in B} \sum_{j\in B} \sum_{a\in A} \sum_{b\in A} E[X_{ia}X_{jb}] . \end{equation} For $a\neq b$ these summands are identical for all $i\neq j$ (and zero for $i=j$); for $a=b$, they are identical for all $i=j$ (and zero for $i\neq j$). Thus (\ref{eq:ecasq}) reduces to \begin{equation} \label{eq:ecasqq} E\left[C_A^2\right] \;=\; \ell(\ell-1) \sum_{a,b\in A; a\neq b} E[X_{1,a}X_{2,b}] \;+\; \ell \sum_{a\in A} E[X_{1,a}] . \end{equation} The second term of (\ref{eq:ecasqq}) is $E[C_A]$ again. For $a<b$ and $i\neq j$ we have $$ E[X_{ia}X_{jb}] \;=\; P(F_i=a\hbox{ and }F_j=b) = \left(1-\frac2m\right)^{a-1} \frac1m \rrm^{b-a-1} \frac1m $$ (Agents prior to $a$ must choose neither $i$ nor $j$, $a$ must choose $i$, agents between $a$ and $b$ must choose items other than $j$, and $b$ must choose $j$.) Since $1-\frac2m < \rrm^2$, this gives \begin{equation} \label{eq:xiaxjb} E[X_{ia}X_{jb}] \;\leq\; \frac{1}{m^2} \rrm^{a+b-3} . \end{equation} As this last expression is symmetric in $a$ and $b$, (\ref{eq:xiaxjb}) also holds for $a>b$. Hence, $$ E\left[C_A^2\right] \;\leq\; E[C_A] \;+\; \frac{\ell(\ell-1)}{m^2} \sum_{a,b\in A; a\neq b} \rrm^{a+b-3} . $$ We have $\frac{\ell(\ell-1)}{m^2} = \frac{\ell^2}{m^2}\left(1-\frac1\ell\right)\leq \frac{\ell^2}{m^2}\rrm$, since $\ell\leq m$. This gives $$ E\left[C_A^2\right] \;\leq\; E[C_A] \;+\; \frac{\ell^2}{m^2} \sum_{a,b\in A; a\neq b} \rrm^{a+b-2} , $$ enabling us to bound the variance as required: $\hbox{Var}(C_A) = E\left[C_A^2\right] - E[C_A]^2$ and so \begin{eqnarray*} \hbox{Var}(C_A) - E[C_A] &\leq& \frac{\ell^2}{m^2}\sum_{a,b\in A; a\neq b} \rrm^{a+b-2} \;-\; \left(\frac{\ell}{m} \sum_{a\in A} \rrm^{a-1} \right)^2 \\ &=& \frac{\ell^2}{m^2}\left( \sum_{a,b\in A; a\neq b} \rrm^{a+b-2} \;-\; \sum_{a,b\in A} \rrm^{a+b-2} \right)\\ &\leq& 0 . \end{eqnarray*} \hfill \qedsymbol \end{proof} The bounding of the variance of a random variable by its mean implies a distribution with relatively little variation about the mean when the mean is large. We put this to good use in the following two results. \begin{lemma} \label{lem:C} Let $(X_n)$ be a sequence of non-negative random variables with $\hbox{Var}(X_n)\leq E[X_n]$ and $\frac1n E[X_n]\to c$ as $n\to\infty$. Then $\frac1n X_n\cvginprob c$ as $n\to\infty$ (convergence in probability). \end{lemma} \begin{lemma} \label{lem:CC} Let $(X_n)$ be a sequence of non-negative random variables and $(\F_n)$ a sequence of $\sigma$-fields, with $\hbox{Var}(X_n|\F_n)\leq E[X_n|\F_n]$ and $\frac1n E[X_n|\F_n]\cvginprob c$ as $n\to\infty$. Then $\frac1n X_n\cvginprob c$ as $n\to\infty$. \end{lemma} \begin{proof}{Proof of Lemmas \ref{lem:C} and \ref{lem:CC}.} Lemma \ref{lem:C} is just the special case of Lemma \ref{lem:CC} in which all the $\sigma$-fields $\F_n$ are trivial. For a proof of Lemma \ref{lem:CC}, it suffices to show that $\frac1n (X_n - E[X_n|\F_n])\cvginprob 0$. For any $\epsilon>0$ we have by Chebyshev's inequality (\cite{Durrett}) $$ P\left(\Big|X_n - E[X_n|\F_n]\Big| > \epsilon n\Big|\F_n\right) \;\leq\; (\epsilon n)^{-2} \hbox{Var}(X_n|\F_n) \;\leq\; (\epsilon n)^{-2} E[X_n|\F_n] $$ Since $n^{-2} E[X_n|\F_n]\cvginprob 0$, it follows that $P\left(\Big|X_n - E[X_n|\F_n]\Big| > \epsilon n\Big|\F_n\right)\cvginprob0$. As these conditional probabilities are a bounded (and thus uniformly integrable) sequence, the convergence is also in ${\mathcal L}_1$ (Theorem 4.6.3 in \cite{Durrett}), and so $$ P\left(\frac1n\Big|X_n - E[X_n|\F_n]\Big| > \epsilon\right) \;=\; E\left[P\left(\Big|X_n - E[X_n|\F_n]\Big| > \epsilon n\Big|\F_n\right)\right] \;\to\; 0 , $$ giving the required convergence in probability. \hfill \qedsymbol \end{proof} The introduction of asymptotics ($n\to\infty$) implies that we are considering problems of ever-larger sizes. From now on, the reader should imagine that for each $n$, we have an instance of the house allocation problem of size $n$; most quantities will accordingly have $n$ as a subscript. In the upcoming sections, we shall need to consider the fortunes of agents as functions of their position in the choosing order $\rho$. \begin{defn} \label{defn:theta} Define the {\em relative position} of an agent $a$ in the order $\rho$ to be the fraction of all the agents whose position in $\rho$ is no worse than that of $a$. Thus, the first agent in $\rho$ has relative position $1/n$ and the last has relative position $1$. For $0\leq\theta\leq1$, let $A_n(\theta)$ denote the set of agents whose relative position is at most $\theta$, and let $a_n(\theta)$ be the last agent in $A_n(\theta)$. \end{defn} \begin{remark} For completeness, when $\theta < 1/n$ we let $a_n(\theta)$ be the first agent in $\rho$. This exceptional definition will cause no trouble, as for $\theta>0$ it applies to only finitely many $n$ and so does not affect asymptotic results, while for $\theta=0$ it allows us to say something about the first agent in $\rho$. \end{remark} \section{Naive Boston} \label{s:naive_asymptotics} We now consider the Naive Boston algorithm. We begin with results about initial segments of the queue of agents. \subsection{Groups of agents} \label{ss:naive_all} It will be useful to define the following sequence. \begin{defn} The sequence $(\omega_r)_{r=1}^{\infty}$ is defined by the initial condition $\omega_1 = 1$ and recursion $\omega_{r+1}=\omega_r e^{-\omega_r}$ for $r\geq 1$. \end{defn} Thus, for example, $\omega_1 = 1, \omega_2 = e^{-1}, \omega_3 = e^{-1} e^{-e^{-1}}$. The value of $\omega_r$ approximates $r^{-1}$, a relationship made more precise in the following result. \begin{lemma} \label{lem:wr_asymptotics} For all $r\geq3$, $$ \frac{1}{r+\log r} \;<\; \omega_r \;<\; \frac{1}{r}. $$ \end{lemma} \begin{proof}{Proof.} For $3\leq r \leq 8$ the inequalities can be verified by direct calculation. Beyond this, we rely on induction: assume the result for a given $r\geq8$ and consider $\omega_{r+1}$. Observe that the function $x\mapsto xe^{-x}$ is monotone increasing on $[0,1]$: this gives us $$ \omega_{r+1} \;=\; \omega_r e^{-\omega_r} \;<\; \frac{e^{-1/r}}{r} \;=\; \frac{1}{r+1} \exp\left(\log\left(1+\frac1r\right) - \frac1r\right) \;\leq\; \frac{1}{r+1}, $$ via the well-known inequality $\log(1+x)\leq x$. Also, \begin{eqnarray*} \omega_{r+1} \;=\; \omega_r e^{-\omega_r} &>& \frac{e^{-1/(r+\log r)}}{r+\log r} \\ &\geq& \frac{1}{r+1+\log(r+1)}\left(1 + \frac{1+\log(r+1)-\log r}{r+\log r}\right)\left(1 - \frac{1}{r+\log r}\right), \end{eqnarray*} via the well-known inequality $e^{-x}\geq1-x$. Thus \begin{eqnarray*} \omega_{r+1} &>& \frac{1}{r+1+\log(r+1)}\left(1 + \frac{(r-1+\log r)(\log(r+1)-\log r)-1}{(r+\log r)^2}\right) \\ &>& \frac{1}{r+1+\log(r+1)}\left(1 + \frac{-2+\log r}{(r+1)(r+\log r)^2}\right), \end{eqnarray*} since $$\log(r+1)-\log r=\int_r^{r+1}t^{-1}\, dt>\frac{1}{r+1}.$$ For $r\geq8$ we have $\log r > 2$ and so the result follows. \hfill \qedsymbol \end{proof} We can now state our main result on the asymptotics of naive Boston. \begin{theorem}[Number of agents remaining] \label{thm:naive_asym_agent_numbers} Consider the naive Boston algorithm. Fix $r\geq1$ and a relative position $\theta\in[0,1]$. Then the number $N_n(r,\theta)$ of members of $A_n(\theta)$ present at round $r$ satisfies $$ \frac1n N_n(r,\theta) \cvginprob z_r(\theta). $$ where $z_1(\theta)=\theta$ and \begin{equation} \label{eq:zr_recursion} z_{r+1}(\theta) = z_r(\theta) - \left(1 - e^{-z_r(\theta)}\right)\omega_r \qquad\text{ for $r\geq1$.} \end{equation} In particular, the total number $N_n(r)$ of agents (and of items) present at round $r$ satisfies $$ \frac1n N_n(r) \cvginprob z_r(1) = \omega_r . $$ \end{theorem} Some of the functions $z_r(\theta)$ are illustrated in Figure \ref{fig:Boston_agent_numbers}. Note that agents with an earlier position in $\rho$ are more likely to exit in the early rounds. A consequence is that the position of an unsuccessful agent relative to other unsuccessful agents tends to improve each time he fails to claim an item. \begin{figure}[hbtp] \centering \includegraphics[width=0.8\textwidth]{pictures/Boston_agent_numbers.pdf} \caption{The limiting fraction of the agents who have relative position $\theta$ or better and survive to participate in the $r$th round.} \label{fig:Boston_agent_numbers} \end{figure} A better understanding of the functions $z_r(\theta)$ is given by the following result. \begin{theorem} \label{thm:naive_zr} The functions $z_r(\theta)$ satisfy $z_r(\theta)=\int_0^\theta z'_r(\phi)\;d\phi$, where \begin{eqnarray} \label{eq:zr_prime_recursion} z'_r(\theta) &=& \prod_{k=1}^{r-1} f_k(\theta) \qquad\qquad\hbox{ for $r\geq2$, and }\qquad z'_1(\theta)=1\\ f_r(\theta) &=& 1 - \omega_r \exp\left(-z_r(\theta)\right) . \end{eqnarray} \end{theorem} \begin{proof}{Proof.} Differentiate (\ref{eq:zr_recursion}) with respect to $\theta$. Alternatively, integrate (\ref{eq:zr_prime_recursion}) by parts. \qedsymbol \end{proof} \smallbreak The quantity $f_r(\theta)$ can be interpreted (in a sense to be made precise later) as the conditional probability that an agent with relative position $\theta$, if present at round $r$, is unmatched at that round. The quantity $z'_r(\theta)$ can then be interpreted as the probability that an agent with relative position $\theta$ is still unmatched at the beginning of round $r$. For the particular case of the last agent, we may note that $f_r(1) = 1-\omega_{r+1}$. Other quantities for the first few rounds are shown in Table \ref{t:naive_limits}. \begin{table}[hbtp] \centering \begin{tabular}{|l|c|c|c|c|} \hline meaning at round $r$ & quantity & $r=1$ & $r=2$ & $r=3$\\ \hline Fraction of all agents: &&&&\\ $\bullet$ present & $\omega_r$ & $1$ & $e^{-1}\approx0.3679$ & $\exp(-1-e^{-1})\approx0.2546$\\ $\bullet$ in $A_n(\theta)$ and present & $z_r(\theta)$ & $\theta$ & $\theta+e^{-\theta}-1$ & $\theta+e^{-\theta}-1-e^{-1}+\exp(-\theta-e^{-\theta})$\\ For an agent with relative position $\theta$: &&&&\\ $\bullet$ P(present) & $z'_r(\theta)$ & $1$ & $1-e^{-\theta}$ & $(1-e^{-\theta})(1-\exp(-\theta-e^{-\theta}))$\\ $\bullet$ P(unmatched|present) & $f_r(\theta)$ & $1-e^{-\theta}$ & $1-\exp(-\theta-e^{-\theta})$ & $1-\exp(-\theta-e^{-\theta}-\exp(-\theta-e^{-\theta}))$\\ \hline \end{tabular} \vspace{10pt} \caption{Limiting quantities as $n\to\infty$ for the early rounds of the Naive Boston algorithm.} \label{t:naive_limits} \end{table} \begin{theorem} \label{thm:zbounds} For $r\geq2$ and $0\leq\theta\leq1$, \begin{equation} \label{eq:zbound1} c_1 \omega_r (1-e^{-\theta}) \;\leq\; z'_r(\theta) \;\leq\; c_2 \omega_r (1-e^{-\theta}) \end{equation} and \begin{equation} \label{eq:zbound2} c_1 \omega_r (\theta + e^{-\theta} - 1) \;\leq\; z_r(\theta) \;\leq\; c_2 \omega_r (\theta + e^{-\theta} - 1) \end{equation} where the constants $c_1=e-1\approx1.718$ and $c_2=\exp(1+e^{-1})\approx2.927$. \end{theorem} \begin{proof}{Proof.} It is enough to show (\ref{eq:zbound1}); (\ref{eq:zbound2}) then follows by integration. From (\ref{eq:zr_prime_recursion}) we have $$ f_1(\theta) \prod_{k=2}^{r-1} \left(1-\omega_r e^{-z_r(0)}\right) \;\leq\; z'_r(\theta) \;\leq\; f_1(\theta) \prod_{k=2}^{r-1} \left(1-\omega_r e^{-z_r(1)}\right) $$ since $z_r(\theta)$ is increasing in $\theta$. We have $f_1(\theta)=1-e^{-\theta}$, $z_r(0)=0$, and $z_r(1)=\omega_r$, so \begin{equation} \label{eq:zbound3} (1-e^{-\theta}) \prod_{k=2}^{r-1} \left(1-\omega_r\right) \;\leq\; z'_r(\theta) \;\leq\; (1-e^{-\theta}) \prod_{k=3}^{r} \left(1-\omega_r\right) . \end{equation} Let $L_r=\omega_r^{-1}\prod_{k=2}^{r} \left(1-\omega_r\right)$ for all $r\geq1$. This is an increasing sequence, since $L_{r+1}/L_r = e^{\omega_r}(1-\omega_{r+1}) = e^{\omega_r} - \omega_r > 1$. Hence, $L_r\geq L_2=\omega_2^{-1}(1-\omega_2)=e-1$ for all $r\geq2$; that is, $\prod_{k=2}^{r} \left(1-\omega_r\right)\geq(e-1)\omega_r$ for $r\geq2$. The lower bound in (\ref{eq:zbound3}) can thus be replaced by $(e-1) \omega_{r-1} (1-e^{-\theta}) \;\leq\; z'_r(\theta)$ when $r\geq3$. Since $\omega_{r-1}>\omega_r$, we obtain the lower bound in (\ref{eq:zbound1}) for $r\geq3$, and we may verify directly that $z'_2(\theta)=1-e^{-\theta}$ satisfies this bound also. A similar argument suffices for the upper bounds. Let $U_r=\omega_{r+1}^{-1}\prod_{k=3}^r \left(1-\omega_r\right)$ for $r\geq2$. This is a decreasing sequence, since $U_r/U_{r-1}=e^{\omega_r}(1-\omega_r)<e^{\omega_r}e^{-\omega_r}=1$. Hence, $U_r\leq U_2=\omega_3^{-1}$ for all $r\geq2$; that is, $\prod_{k=3}^r \left(1-\omega_r\right) \leq \omega_3^{-1}\omega_{r+1}$ for $r\geq2$. The upper bound in (\ref{eq:zbound3}) can thus be replaced by $z'_r(\theta) \;\leq\; \omega_3^{-1}\omega_{r+1} (1-e^{-\theta})$ for $r\geq2$. The constant $\omega_3^{-1}=\omega_2^{-1}e^{\omega_2}=\exp(1+e^{-1})$. Since $\omega_{r+1}<\omega_r$, we obtain the upper bound in (\ref{eq:zbound1}). \qedsymbol \end{proof} \begin{proof}{Proof of Theorem \ref{thm:naive_asym_agent_numbers}.} Induct on $r$. For $r=1$, the result is immediate because $N_n(1,\theta)=\lfloor n\theta\rfloor$. Now fix $r\geq1$ and assume the result for round $r$. Let $\F_r$ be the $\sigma$-field generated by events prior to round $r$. Conditional on $\F_r$, we have the situation of Lemma \ref{lem:Aprime}: there are $N_n(r)$ available items and $N_n(r,\theta)$ agents of $A_n(\theta)$ who will be the first to attempt to claim them, with the agents' bids chosen iid uniform from a larger pool of $n-r+1$ items. Letting $S_n$ denote the number of these agents whose bids are successful, Lemma \ref{lem:Aprime} gives $$ \hbox{Var}(S_n|\F_r) \;\leq\; E[S_n|\F_r] \;=\; \frac{N_n(r)}{n-r+1} \sum_{a=1}^{N_n(r,\theta)} \left(1 - \frac{1}{n-r+1}\right)^{a-1} . $$ Summing the geometric series, $$ E[S_n|\F_r] \;=\; N_n(r) \left(1-\left(1-\frac{1}{n-r+1}\right)^{N_n(r,\theta)}\right) . $$ It then follows by the inductive hypothesis that $$ \frac1n E[S_n|\F_r] \;\cvginprob\; \omega_r \left(1 - e^{-z_r(\theta)}\right) \qquad\hbox{ as $n\to\infty$.} $$ By Lemma \ref{lem:CC}, $$ \frac1n S_n \;\cvginprob\; \omega_r \left(1 - e^{-z_r(\theta)}\right) . $$ We have $N_n(r+1,\theta) = N_n(r,\theta) - S_n$, and so obtain $$ \frac1n N_n(r+1,\theta) \;\cvginprob\; z_r(\theta) - \omega_r \left(1 - e^{-z_r(\theta)}\right) = z_{r+1}(\theta). $$ The result follows. \qedsymbol \end{proof} \begin{corollary}[limiting distribution of preference rank obtained] \label{cor:naive_obtained} The number $S_n(s,\theta)$ of members of $A_n(\theta)$ matched to their $s$th preference satisfies $$ \frac1n S_n(s,\theta) \cvginprob \int_0^\theta q_s(\phi)\;d\phi . $$ where $$ q_s(\theta) = z'_s(\theta) - z'_{s+1}(\theta) = z'_s(\theta) \omega_s e^{-z_s(\theta)}. $$ \end{corollary} \begin{proof}{Proof.} An agent is matched to his $s$th preference if, and only if, he is present at round $s$ but not at round $s+1$. The result follows by Theorem \ref{thm:naive_asym_agent_numbers}. \qedsymbol \end{proof} The limiting functions $q_s(\theta)$ are illustrated in Figure \ref{fig:naive_exit_round}. For example, an agent at relative position $1/2$ has probability over 78\% of exiting at the first round while the last agent has corresponding probability just under $37\%$. \begin{figure}[hbtp] \centering \includegraphics[width=0.8\textwidth]{pictures/Boston_exit_round.pdf} \caption{The limiting probability that an agent exits the naive Boston mechanism at the $r$th round (and so obtains his $r$th preference), as a function of the agent's initial relative position $\theta$. Logarithmic scale on vertical axis.} \label{fig:naive_exit_round} \end{figure} \subsection{Individual agents} \label{ss:naive_individual} Theorem \ref{thm:naive_asym_agent_numbers} and Corollary \ref{cor:naive_obtained} are concerned with the outcomes achieved by the agent population collectively, and will be used in Section~\ref{s:welfare} to say something about utilitarian welfare. Suppose, though, that our interest lies with individual agents. It is tempting to informally ``differentiate" the result of Theorem \ref{thm:naive_asym_agent_numbers} with respect to $\theta$, and thereby draw conclusions about the fate of a single agent. The following result puts those conclusions on a sound footing. \begin{theorem}[exit time of individual agent] \label{thm:naive_individual_asymptotics} Consider the naive Boston algorithm. Fix $r\geq1$ and a relative position $\theta\in[0,1]$. Let $R_n(\theta)$ denote the round number at which the agent $a_n(\theta)$ (the last agent with relative position at most $\theta$) is matched. Equivalently, $R_n(\theta)$ is the preference rank of the item obtained by this agent. Then $$P(R_n(\theta)\geq r)\to z'_r(\theta) \quad \text{as $n\to\infty$}. $$ \end{theorem} \begin{remark} \label{rem:naive_qs} The result of Theorem \ref{thm:naive_individual_asymptotics} could equivalently be stated as $$ P(R_n(\theta)=r)\to q_r(\theta) $$ where $q_r(\theta)$ is as in Corollary \ref{cor:naive_obtained}. Note that $\sum_{r=1}^{\infty} q_r(\theta)=1$, consistent with the role of $q_r(\theta)$ as an asymptotic probability. \end{remark} \begin{remark} Theorem \ref{thm:naive_individual_asymptotics} tells us that an agent with fixed relative position $\theta$ has a good chance of obtaining one of his first few preferences, even if $n$ is large. This is even true of the very last agent ($\theta=1$). Figure~\ref{fig:naive_exit_round} displays the limiting values. \end{remark} \begin{proof}{Proof of Theorem \ref{thm:naive_individual_asymptotics}.} The result is trivial for $r=1$. Assume the result for a given value of $r$, and let $\F_r$ be the $\sigma$-field generated by events prior to round $r$. Conditional on $\F_r$, we can apply Lemma \ref{lem:Aprime} to the single agent $a_n(\theta)$ to obtain \begin{equation} \label{eq:naive_individual_induction} P(R_n(\theta)\geq r+1) \;=\; E\left[P(R_n(\theta)\geq r+1|\F_r)\right] \;=\; E\left[1_{R_n(\theta)\geq r} Y_n\right] , \end{equation} where $$ Y_n \;=\; 1 \;-\; \left(1 - \frac{1}{n-r+1}\right)^{N_n(r,\theta)-1} \left(\frac{N_n(r)}{n-r+1}\right) . $$ Observe that $Y_n\cvginprob 1 - \omega_r e^{-z_r(\theta)} = f_r(\theta)$ from Theorem \ref{thm:naive_asym_agent_numbers}. Equation (\ref{eq:naive_individual_induction}) gives $$ P(R_n(\theta)\geq r+1) \;-\; z'_{r+1}(\theta) \;=\; E\left[1_{R_n(\theta)\geq r}(Y_n - f_r(\theta))\right] \;+\; E\left[1_{R_n(\theta)\geq r} - z'_r(\theta)\right] f_r(\theta) . $$ The second term converges to 0 as $n\to\infty$ by the inductive hypothesis. For the first term, note that the convergence $Y_n - f_r(\theta) \cvginprob 0$ is also convergence in ${\mathcal L}_1$ by Theorem 4.6.3 in \cite{Durrett}, and so $1_{R_n(\theta)\geq r}(Y_n - f_r(\theta))\to 0$ in ${\mathcal L}_1$ also. \hfill \qedsymbol \end{proof} \section{Adaptive Boston} \label{s:adaptive_asymptotics} We again begin with results about initial segments of the queue of agents, and follow up with results about individual agents. \subsection{Groups of agents} \label{ss:adaptive_all} A simple stochastic model of IC bidding for the adaptive Boston mechanism can be similar to the naive case. At the beginning of the $r$th round, each remaining agent randomly chooses an item as his next preference for which to bid; the bid is successful, and the agent matched to that item, if no other agent with an earlier position in the order $\rho$ bids for the same item. But, whereas a naive-Boston participant chooses from the set of $n-r+1$ items for which he has not already bid, the adaptive-Boston participant chooses from a smaller set: the $N_n(r)$ items actually still available at the beginning of the round. This model allows a result analogous to Theorem \ref{thm:naive_asym_agent_numbers}. \begin{theorem}[Number of agents remaining] \label{thm:adaptive_asym_agent_numbers} Consider the adaptive Boston algorithm. Fix $r\geq1$ and a relative position $\theta\in[0,1]$. Then the number $N_n(r,\theta)$ of members of $A_n(\theta)$ present at round $r$ satisfies $$ \frac1n N_n(r,\theta) \cvginprob y_r(\theta). $$ where $y_1(\theta)=\theta$ and \begin{equation} \label{eq:yr_recursion} y_{r+1}(\theta) = y_r(\theta) - e^{1-r}\left(1 - \exp\left(-e^{r-1}y_r(\theta)\right)\right) \qquad\text{ for $r\geq1$.} \end{equation} In particular, the total number $N_n(r)$ of agents (and of items) present at round $r$ satisfies $$ \frac1n N_n(r) \cvginprob y_r(1) = e^{1-r} . $$ \end{theorem} Some of the functions $y_r(\theta)$ are illustrated in Figure \ref{fig:Boston_agent_numbers}. It is apparent that the adaptive Boston mechanism proceeds more quickly than naive Boston: $e^{1-r}$ decays much more quickly than $\omega_r$ as $r\to\infty$. Also, the tendency of advantageously-ranked agents to be matched in relatively early rounds is even greater for the adaptive version of the algorithm. In an adaptive-Boston assignment of a large number of items to agents with IC preferences, under 2\% of the agents will be unmatched after four rounds (vs. 16\% for naive Boston), and most of these (about 2/3) will be among the last 10\% of agents in the original agent order. A better understanding of the functions $y_r(\theta)$ is given by the following result, which is analogous to Theorem~\ref{thm:naive_zr}. \begin{theorem} \label{thm:adaptive_yr} The functions $y_r(\theta)$ satisfy $y_r(\theta)=\int_0^\theta y'_r(\phi)\;d\phi$, where \begin{eqnarray} \label{eq:yr_prime_recursion} y'_r(\theta) &=& \prod_{k=1}^{r-1} g_k(\theta) \qquad\hbox{ for $r\geq2$}, \quad\hbox{ and $y'_1(\theta)=1$}\\ g_r(\theta) &=& 1 - \exp\left(-e^{r-1} y_r(\theta)\right) \end{eqnarray} \end{theorem} \begin{proof}{Proof.} Differentiate (\ref{eq:yr_recursion}) with respect to $\theta$. Alternatively, integrate (\ref{eq:yr_prime_recursion}) by parts. \qedsymbol \end{proof} \smallbreak \begin{remark} \label{rem:adaptive_yr} The quantity $g_r(\theta)$ is analogous to $f_r(\theta)$ in the naive case, and can be interpreted (in a sense to be made precise later) as the conditional probability that an agent with relative position $\theta$, if present at round $r$, is unmatched at that round. The quantity $y'_r(\theta)$, analogous to $z'_r(\theta)$ in the naive case, can then be interpreted as the probability that an agent with relative position $\theta$ is still unmatched at the beginning of round $r$. For the particular case of the last agent, we may note that $g_r(1) = 1 - e^{-1}$ and $y'_r(1) = (1-e^{-1})^{r-1}$. Other quantities for the first two rounds are shown in Table \ref{t:adaptive_limits}. \end{remark} \begin{table}[hbtp] \centering \begin{tabular}{|l|c|c|c|c|} \hline meaning at round $r$ & quantity & $r=1$ & $r=2$\\ \hline Fraction of all agents: &&&\\ $\bullet$ present & $e^{1-r}$ & $1$ & $e^{-1}\approx 0.3679$ \\ $\bullet$ in $A_n(\theta)$ and present & $y_r(\theta)$ &$\theta$ &$\theta + e^{-\theta} - 1$\\ For an agent with relative position $\theta$: &&&\\ $\bullet$ P(present) & $y'_r(\theta)$ & $1$ & $1 - e^{-\theta}$\\ $\bullet$ P(unmatched|present) & $g_r(\theta)$ & $1 - e^{-\theta}$ & $1-\exp(-e(\theta+e^{-\theta}-1))$\\ $\bullet$ P(bids for $s$th preference|present) & $u_{rs}$ & $1_{s=1}$ & $e^{-1}(1-e^{-1})^{s-2}1_{s\geq2}$ \\ \hline \end{tabular} \vspace{10pt} \caption{Limiting quantities for the early rounds of the adaptive Boston algorithm.} \label{t:adaptive_limits} \end{table} \begin{proof}{Proof of Theorem \ref{thm:adaptive_asym_agent_numbers}.} Induct on $r$. For $r=1$ we have $N_n(1,\theta)=\lfloor n\theta\rfloor$; the result follows immediately. Now suppose the result for a given value of $r$, and consider $r+1$. Let $T_n$ be the number of agents of $A_n(\theta)$ matched at round $r$. Conditioning on the $\sigma$-field $\F_r$ generated by events prior to round $r$, we have the situation of Lemma \ref{lem:A}: there are $N_n(r)$ available items and $N_n(r,\theta)$ agents of $A_n(\theta)$ who will be the first to attempt to claim them, with each such agent bidding for one of the available items, chosen uniformly at random independently of other agents. Lemma \ref{lem:A} gives us $\hbox{Var}(T_n|\F_r)\leq E[T_n|\F_r]$ and $$ E[T_n|\F_r] \;=\; \sum_{a=1}^{N_n(r,\theta)} \left(1-\frac{1}{N_n(r)}\right)^{a-1} \;=\; N_n(r) \left(1 - \left(1-\frac{1}{N_n(r)}\right)^{N_n(r,\theta)} \right) . $$ By the inductive hypothesis, $$ \frac{N_n(r)}{n} \;\cvginprob\; e^{1-r} \qquad\hbox{ and }\qquad \left(1-\frac{1}{N_n(r)}\right)^{N_n(r,\theta)} \;\cvginprob\; \exp\left(-e^{r-1}y_r(\theta)\right) . $$ This gives us $$ \frac1n E[T_n|\F_r] \;\cvginprob\; e^{1-r}\left(1-\exp\left(-e^{r-1}y_r(\theta)\right)\right) \;=\; y_r(\theta) - y_{r+1}(\theta) . $$ By Lemma \ref{lem:CC}, then, $$ \frac1n T_n \;\cvginprob\; y_r(\theta) - y_{r+1}(\theta) . $$ Since $T_n=N_n(r,\theta) - N_n(r+1,\theta)$, it follows that $\frac1n N_n(r+1,\theta)\cvginprob y_{r+1}(\theta)$. Hence the result. \qedsymbol \end{proof} \subsubsection*{The rank of the item received} Theorem \ref{thm:adaptive_asym_agent_numbers} is less satisfying than Theorem \ref{thm:naive_asym_agent_numbers}. The naive Boston mechanism has a key simplifying feature: the rank of an item within its assigned agent's preference order is equal to the round number in which it was matched. This means that Theorem \ref{thm:naive_asym_agent_numbers} already enables some conclusions about agents' satisfaction with the outcome of the process (see Corollary \ref{cor:naive_obtained}). But, in the adaptive case, we know only that an item matched at round $r>1$ will be no better (and could be worse) than its assigned agent's $r$th preference. To do better, we need a more detailed stochastic bidding model. An agent $a$ still present at the beginning of the $r$th round will have thus far determined an initial sub-sequence of his preference order comprising some number $F_{a,r-1}$ of most-preferred items, and failed to obtain any of them. He thus has a pool of $n-F_{a,r-1}$ previously-unconsidered items from which to choose, of which the $N_n(r)$ items actually still available are a subset. In accordance with the IC model, let us imagine that he now generates further preferences by repeated random sampling without replacement from the previously-unconsidered items, until one of the available items is sampled; this item becomes his bid in the current round. Denote by $G_{ar}$ the number of items sampled to construct this bid; thus $F_{ar}=\sum_{j=1}^{r} G_{aj}$ and $G_{a,1}=1$. If the bid is successful, the agent will be matched to his $F_{ar}$th preference. Note that while the simple bidding model used in Theorem \ref{thm:adaptive_asym_agent_numbers} provides enough information to determine the matching of items to agents (along with the round numbers at which the items are matched), it does not completely determine the agents' preference orders. In particular, it does not determine the agents' preference ranks for the items they are assigned. The random variables $G_{ar}$ provide additional information sufficient to determine this interesting feature of the outcome. It is convenient to think of the $G_{ar}$ and $F_{ar}$ as being determined by an auxiliary process that runs after the simple bidding model has been run and the matching of agents to items determined. This auxiliary process can be described in the following way. Fix integers $n_1>n_2>\cdots>n_r>0$. \begin{itemize} \item Place $n_1$ balls, numbered from 1 to $n_1$, in an urn. \item For $i=1,\ldots,r$ \begin{itemize} \item Deem the $n_i$ lowest-numbered balls remaining in the urn ``good". \item Draw balls at random from the urn, without replacement, until a good ball is drawn. \end{itemize} \end{itemize} Let $H(n_1,\ldots,n_r)$ be the probability distribution of the total number of balls drawn, and $q(s;n_1,\ldots,n_r) = P(X=s)$ where $X\sim H(n_1,\ldots,n_r)$. Denote by $\M$ the $\sigma$-field generated by the simple bidding model, including the items on which each agent bids and the resulting matching. Conditional on $\M$, the random variable $F_{ar}$ for an agent $a$ still present at round $r$ has the $H(n,N_n(2),\ldots,N_n(r))$ distribution. That is, \begin{equation} \label{eq:adaptive_bidding_M} P(F_{ar}=s|\M) \;=\; q(s;n,N_n(2),\ldots,N_n(r)) . \end{equation} Also, the $\{F_{ar}:a\hbox{ present at round }r\}$ are conditionally independent given $\M$. \begin{lemma} \label{lem:H_distn} $q(1;n_1)=1$; $q(s;n_1)=0$ for $s>1$; and $q(s;n_1,\ldots,n_r)=0$ for $s<r$ or $s>n_1-n_r+1$. The $H(n_1,\ldots,n_r)$ distribution's other probabilities are given by the recurrence $$ q(s;n_1,\ldots,n_r) \;=\; \sum_{t=r-1}^{s-1} q(t;n_1,\ldots,n_{r-1}) \left(\frac{n_r}{n_1-s+1}\right) \prod_{0\leq i<s-t-1} \left(1-\frac{n_r}{n_1-t-i}\right) . $$ \end{lemma} \begin{proof}{Proof.} Let $N$ be the number of balls drawn in the first $r-1$ iterations of the process, and $M$ the number drawn in the final iteration. Then $P(N+M=s) \;=\; \sum_{t=r-1}^{s-1} P(N=t) P(M=s-t|N=t)$, and we have $$ P(M=s-t|N=t) \;=\; \left(\frac{n_r}{n_1-s+1}\right) \prod_{0\leq i<s-t-1} \left(1-\frac{n_r}{n_1-t-i}\right) . $$ (The final iteration must first sample $s-t-1$ consecutive non-good balls: the probabilities of achieving this are $1-\frac{n_r}{n_1-t}$ for the first, $1-\frac{n_r}{n_1-t-1}$ for the second, $\ldots1-\frac{n_r}{n_1-s+2}$ for the last. At last, a good ball must be drawn: the probability of this is $\frac{n_r}{n_1-s+1}$.) The result follows. \qedsymbol \end{proof} \smallbreak Our interest in the $H(n_1,\ldots,n_r)$ distribution mostly concerns its asymptotic limits as the numbers of balls become large, and the ``without replacement'' stipulation becomes unimportant. To this end, fix $p_1,\ldots,p_r\in(0,1]$ and let $u(s;p_1,\ldots,p_r)=P\left(r+\sum_{i=1}^r G_i = s\right)$, where $G_1,\ldots,G_r$ are independent random variables with geometric distributions: $P(G_i=x)=p_i(1-p_i)^x$ for $x=0,1,\ldots$. \begin{lemma} \label{lem:G_distn} $u(s;p)=p(1-p)^{s-1}$; $u(s;p_1,\ldots,p_r)=0$ for $s<r$; and $$ u(s;p_1,\ldots,p_r) \;=\; \sum_{t=r-1}^{s-1} u(t;p_1,\ldots,p_r) p_r (1-p_r)^{s-t-1} . $$ \end{lemma} \begin{proof}{Proof.} $$ P\left(r+\sum_{i=1}^r G_i = s\right) \;=\; \sum_{t=r-1}^{s-1} P\left(r-1 +\sum_{i=1}^{r-1} G_i = t\right) P(1+G_r = s-t) . \qedsymbol $$ \end{proof} \begin{lemma} $$ q(s;n_1,\ldots,n_r) \;\to\; u(s;p_1,\ldots,p_r) \qquad\text{as $n_1,\ldots,n_r\to\infty$ with $\frac{n_i}{n_1}\to p_i$.} $$ \end{lemma} \begin{proof}{Proof.} Take limits in Lemma \ref{lem:H_distn}; compare Lemma \ref{lem:G_distn}. \qedsymbol \end{proof} \begin{corollary} \label{cor:F_cvginprob} Consider the adaptive Boston mechanism, and fix $s$. We have $$ q(s;n,N_n(2),\ldots,N_n(r)) \;\cvginprob\; u(s;1,e^{-1},\ldots,e^{1-r}) \qquad\text{as $n\to\infty$.} $$ \end{corollary} \begin{proof}{Proof.} Use the convergence of $\frac1n N_n(i)$ given by Theorem \ref{thm:adaptive_asym_agent_numbers}. \qedsymbol \end{proof} Corollary \ref{cor:F_cvginprob} and (\ref{eq:adaptive_bidding_M}) give us an asymptotic limit for the distribution, conditional on $\M$, of $F_{ar}$, the preference rank of the bid made at round $r$ by an agent still present at that round. To condense notation, we will denote the limit $u(s;1,e^{-1},\ldots,e^{1-r})$ by $u_{rs}$. That is, $$ P(F_{ar}=s|\M) \;\cvginprob\; u_{rs} . $$ Note that the limit $u_{rs}$ does not depend on the position of the agent $a$ in the choosing order. It is fairly clear why this should be so: all remaining agents must enter their bids at the beginning of the round, before any other agent has bid, and so the bidding process, at least, treats them symmetrically. The advantage arising from a favourable position lies in a higher probability of obtaining the item bid for, not in constructing the bid itself. We make use of the following simplified recurrence. \begin{lemma} \label{lem:three_term_recurrence} $u(s;p)=p(1-p)^{s-1}$ and $u(s;p_1,\ldots,p_r)=0$ for $s<r$; other values are given by the recurrence $$ u(s;p_1,\ldots,p_r) \;=\; p_r u(s-1;p_1,\ldots,p_{r-1}) \;+\; (1-p_r) u(s-1;p_1,\ldots,p_{r}) . $$ In particular: $u_{11}=1$, $u_{1,s}=0$ for $s>1$, $u_{rs}=0$ for $s<r$, and \begin{equation} \label{eq:three_term_recurrence} u_{rs} \;=\; e^{1-r} u_{r-1,s-1} \;+\; (1-e^{1-r}) u_{r,s-1} . \end{equation} \end{lemma} \begin{proof}{Proof.} The recurrence in (\ref{eq:three_term_recurrence}) has a unique solution; as does the one in Lemma \ref{lem:G_distn}. It is easy to check that either solution also satisfies the other recurrence. \qedsymbol \end{proof} \begin{remark} \label{rem:sum_urs} It follows directly from \eqref{eq:three_term_recurrence} that the bivariate generating function $F(x,y) = \sum_{r,s} u_{rs} x^r y^s$ satisfies the defining equation $F(x,y)(1-y) = xy+F(x/e,y)(x-e)$. It follows directly (from substituting $y=1$) that $\sum_{s=r}^{\infty} u_{rs} = 1$, consistent with its role as a probability distribution. We have not found a nice explicit formula for $u_{rs}$. \end{remark} We can now state a more detailed version of Theorem \ref{thm:adaptive_asym_agent_numbers}. \begin{theorem}[the bidding process at a given round] \label{thm:adaptive_asymptotics} Consider the adaptive Boston algorithm. Fix $s\geq r\geq1$ and a relative position $\theta\in[0,1]$. Let $y_r(\theta)$ be as in Theorem \ref{thm:adaptive_asym_agent_numbers}, and $u_{rs}$ be as in Lemma \ref{lem:three_term_recurrence}. \begin{enumerate}[(i)] \item \label{adaptive_bidding} The number $N_n(r,s,\theta)$ of members of $A_n(\theta)$ making a bid for their $s$th preference at round $r$ satisfies $$ \frac1n N_n(r,s,\theta) \cvginprob u_{rs} y_r(\theta) $$ \item \label{adaptive_bidding_and_unmatched} The number $U_n(r,s,\theta)$ of members of $A_n(\theta)$ making an unsuccessful bid for their $s$th preference at round $r$ satisfies $$ \frac1n U_n(r,s,\theta) \cvginprob u_{rs} y_{r+1}(\theta). $$ \item \label{adaptive_bidding_and_matched} The number $S_n(r,s,\theta)$ of members of $A_n(\theta)$ making a successful bid for their $s$th preference at round $r$ satisfies $$ \frac1n S_n(r,s,\theta) \cvginprob u_{rs} (y_r(\theta) - y_{r+1}(\theta)). $$ \end{enumerate} \end{theorem} \begin{proof}{Proof.} Conditional on the $\sigma$-field $\M$, each agent $a$ participating in round $r$ enters a bid for his $F_{ar}$th preference; the $F_{ar}$ for this group of agents are conditionally independent given $\M$. Thus, the conditional distribution of $N(r,s,\theta)$ given $\M$ is the binomial distribution with $N_n(r,\theta)$ trials and success probability $P(F_{ar}=s|\M)$ given by (\ref{eq:adaptive_bidding_M}). The variance of a binomial distribution never exceeds its mean (\cite{Feller}), so Lemma \ref{lem:CC} applies. We will thus obtain Part (\ref{adaptive_bidding}) of the theorem if we can merely show that $\frac1n E[N_n(r,s,\theta)|\M] \cvginprob u_{rs} y_r(\theta)$; that is \begin{equation} \frac1n N_n(r,\theta) q(s;n,N_n(2),\ldots,N_n(r)) \cvginprob u_{rs} y_r(\theta) . \end{equation} Theorem \ref{thm:adaptive_asym_agent_numbers} gives $\frac1n N_n(r,\theta)\cvginprob y_r(\theta)$, and Corollary \ref{cor:F_cvginprob} gives $q(s;n,N_n(2),\ldots,N_n(r))\cvginprob u_{rs}$. Part (\ref{adaptive_bidding}) follows. The proof of Part (\ref{adaptive_bidding_and_unmatched}) is very similar: the conditional distribution of $U(r,s,\theta)$ given $\M$ is the binomial distribution with $N_n(r+1,\theta)$ trials and success probability $P(F_{ar}=s|\M)$ given by (\ref{eq:adaptive_bidding_M}). Part (\ref{adaptive_bidding_and_matched}) follows from Parts (\ref{adaptive_bidding}) and (\ref{adaptive_bidding_and_unmatched}). \qedsymbol \end{proof} We now have the analog for Adaptive Boston of Corollary~\ref{cor:naive_obtained}. \begin{corollary}[limiting distribution of preference rank obtained] \label{cor:adaptive_obtained} The number $S_n(s,\theta)$ of members of $A_n(\theta)$ matched to their $s$th preference satisfies $$ \frac1n S_n(s,\theta) \cvginprob \int_0^\theta q_s(\phi)\;d\phi . $$ where $$ q_s(\theta) = \sum_{r=1}^s u_{rs} \left(y'_r(\theta) - y'_{r+1}(\theta)\right) = \sum_{r=1}^s u_{rs} y'_r(\theta) \exp\left(-e^{r-1}y_r(\theta)\right). $$ \end{corollary} \begin{figure}[hbtp] \centering \includegraphics[width=0.8\textwidth]{pictures/adaptive_pref_obtained.pdf} \caption{The limiting probability $q_s(\theta)$ that an agent obtains his $s$th preference via the adaptive Boston mechanism, as a function of the agent's initial relative position $\theta$. Logarithmic scale on vertical axis.} \label{fig:adaptive_pref_obtained} \end{figure} The functions $q_s(\theta)$ are illustrated in Figure \ref{fig:adaptive_pref_obtained}. Figure~\ref{fig:adaptive_round_2_last} shows for the last agent ($\theta = 1$) the distribution of the rank of the item bid for and the item obtained at the second round. \begin{figure}[hbtp] \centering \includegraphics[width=0.8\textwidth]{pictures/adaptive_round_2_last.png} \caption{Distribution of rank of item for which the last agent bids (upper) and successfully bids (lower) in round 2, Adaptive Boston} \label{fig:adaptive_round_2_last} \end{figure} \begin{remark} \label{rem:adaptive_sum_qs} It is clear from the definition (and Remark \ref{rem:sum_urs}) that $\sum_{s=1}^{\infty} q_s(\theta)=1$. This is consistent with the implied role of $q_s(\theta)$ as a probability distribution: the limiting probability that an agent in position $\theta$ obtains his $s$th preference. See also Theorem \ref{thm:adaptive_individual_asymptotics} Part \ref{adaptive_individual_obtained}. \end{remark} \begin{proof}{Proof of Corollary \ref{cor:adaptive_obtained}.} This is an immediate consequence of Part (\ref{adaptive_bidding_and_matched}) of Theorem \ref{thm:adaptive_asymptotics}, with Theorem \ref{thm:adaptive_yr} providing the integral form of the limit. \qedsymbol \end{proof} \subsection{Individual agents} \label{ss:adaptive_individual} If we wish to follow the fate of a single agent in the adaptive Boston mechanism, we need limits analogous to that of Theorem \ref{thm:naive_individual_asymptotics}. These are provided by the following result. \begin{theorem}[exit time and rank obtained for individual agent] \label{thm:adaptive_individual_asymptotics} Consider the adaptive Boston algorithm. Fix $s\geq r\geq1$ and a relative position $\theta\in[0,1]$. Let $V_n(r,\theta)$ denote the preference rank of the item for which the agent $a_n(\theta)$ (the last agent with relative position at most $\theta$) bids at round $r$. (For completeness, set $V_n(r,\theta)=0$ whenever $a_n(\theta)$ is not present at round $r$.) Let $R_n(\theta)$ denote the round number at which $a_n(\theta)$ is matched. Then \begin{enumerate} \item \label{adaptive_individual_present} (Agent present at round $r$.) $$ P(R_n(\theta)\geq r)\to y'_r(\theta) \quad \text{as $n\to\infty$}. $$ \item \label{adaptive_individual_bidding} (Agent bids for $s$th preference at round $r$.) $$ P(V_n(r,\theta)=s)\to y'_r(\theta) u_{rs} \quad \text{as $n\to\infty$}. $$ \item \label{adaptive_individual_matched} (Agent matched to $s$th preference at round $r$.) $$ P(R_n(\theta)=r\hbox{ and }V_n(r,\theta)=s)\to y'_r(\theta) u_{rs} (1-g_r(\theta)) \quad \text{as $n\to\infty$}. $$ \item \label{adaptive_individual_obtained} (Agent matched to $s$th preference.) $$ P(V_n(R_n(\theta),\theta)=s)\to q_s(\theta) \quad \text{as $n\to\infty$}. $$ \end{enumerate} The limiting quantities $y'_r(\theta)$, $g_r(\theta)$, $u_{rs}$, and $q_s(\theta)$ are as defined in Theorem \ref{thm:adaptive_yr}, Lemma \ref{lem:three_term_recurrence} and Corollary \ref{cor:adaptive_obtained}. \end{theorem} \begin{proof}{Proof.} Part (\ref{adaptive_individual_present}) is proved in a similar way to Theorem \ref{thm:naive_individual_asymptotics}. The result is trivial for $r=1$. Assume the result for a given value of $r$, and let $\F_r$ be the $\sigma$-field generated by events prior to round $r$. Then \begin{equation} \label{eq:adaptive_individual_induction1} P(R_n(\theta)\geq r+1) \;=\; E\left[P(R_n(\theta)\geq r+1|\F_r)\right] \;=\; E\left[1_{R_n(\theta)\geq r} Y_n\right] , \end{equation} where (by applying Lemma \ref{lem:A} to the single agent $a_n(\theta)$) $$ Y_n \;=\; 1 \;-\; \left(1 - \frac{1}{N_n(r)}\right)^{N_n(r,\theta)-1} . $$ Observe that $Y_n\cvginprob 1 - \exp\left(-e^{r-1}y_r(\theta)\right) = g_r(\theta)$ by Theorem \ref{thm:adaptive_asym_agent_numbers}. Equation (\ref{eq:adaptive_individual_induction1}) gives $$ P(R_n(\theta)\geq r+1) \;-\; y'_{r+1}(\theta) \;=\; E\left[1_{R_n(\theta)\geq r}(Y_n - g_r(\theta))\right] \;+\; E\left[1_{R_n(\theta)\geq r} - y'_r(\theta)\right] g_r(\theta) . $$ The second term converges to 0 as $n\to\infty$ by the inductive hypothesis. For the first term, note that the convergence $Y_n - g_r(\theta) \cvginprob 0$ is also convergence in ${\mathcal L}_1$ by Theorem 4.6.3 in \cite{Durrett}, and so $1_{R_n(\theta)\geq r}(Y_n - g_r(\theta))\to 0$ in ${\mathcal L}_1$ also. Part (\ref{adaptive_individual_present}) follows. For Part (\ref{adaptive_individual_bidding}), we have \begin{equation*} P(V_n(r,\theta)=s) \;=\; E\left[1_{R_n(\theta)\geq r} P(F_{a_n(\theta),r}=s|\M)\right] \;=\; E\left[1_{R_n(\theta)\geq r} q(s;n, N_n(2),\ldots,N_n(r))\right] . \end{equation*} Hence, \begin{equation} \label{eq:adaptive_individual_induction2} P(V_n(r,\theta)=s) - u_{rs}y'_r(\theta) \;=\; E\left[1_{R_n(\theta)\geq r} (q(s;n, N_n(2),\ldots,N_n(r))-u_{rs})\right] \;+\; u_{rs}\left(P(R_n(\theta)\geq r) - y'_r(\theta)\right) . \end{equation} Both terms converge in probability to 0. For the second term, the convergence is given by Part (\ref{adaptive_individual_present}). For the first term, it is a consequence of Corollary \ref{cor:F_cvginprob}: $q(s;n,N_n(2),\ldots,N_n(r))\cvginprob u_{rs}$, which is also convergence in ${\mathcal L}_1$ by Theorem 4.6.3 in \cite{Durrett}. Part (\ref{adaptive_individual_bidding}) follows. The proof of Part (\ref{adaptive_individual_matched}) is very similar to that of Part (\ref{adaptive_individual_bidding}); just replace $1_{R_n(\theta)\geq r}$ by $1_{R_n(\theta)=r}$ and $y'_r(\theta)$ by $y'_r(\theta) - y'_{r+1}(\theta)$. Part (\ref{adaptive_individual_obtained}) is obtained from Part (\ref{adaptive_individual_matched}) by summation over $r$. \qedsymbol \end{proof} \section{Serial Dictatorship} \label{s:serial_dictatorship} Unlike the Boston algorithms, SD is strategyproof, but it is known to behave worse in welfare and fairness. However, we are not aware of detailed quantitative comparisons. The analysis for SD is very much simpler than for the Boston algorithms. In particular, the exit time is not interesting. In this section, we suppose that $n$ items and $n$ agents with Impartial Culture preferences are matched by the Serial Dictatorship algorithm. \subsection{Groups of agents} \label{ss:SD_group} Results analogous to those in Sections \ref{s:naive_asymptotics} and \ref{s:adaptive_asymptotics} are obtainable from the following explicit formula. \begin{theorem} \label{thm:SD_matching_probabilities} The probability that the $k$th agent obtains his $s$th preference is $\binom{n-s}{k-s}\big/\binom{n}{k-1}$ for $s=1,\ldots,k$, and zero for other values of $s$. \end{theorem} \begin{proof}{Proof.} By the time agent $k$ gets an item, a random subset $T$ of $k-1$ of the $n$ items is already taken. This agent's $s$th preference will be the best one left if and only if $T$ includes his first $s-1$ preferences, but not the $s$th preference. Of the $\binom{n}{k-1}$ equally-probable subsets $T$, the number satisfying this condition is $\binom{n-s}{k-s}$: the remaining $k-s$ items in $T$ must be chosen from $n-s$ possibilities. \hfill \qedsymbol \end{proof} In particular, the $n$th and last agent is equally likely to get each possible item. \begin{corollary}[preference rank obtained] \label{cor:SD_obtained} Consider the serial dictatorship algorithm. Fix $s\geq1$ and a relative position $\theta\in[0,1]$. The number $S_n(s,\theta)$ of members of $A_n(\theta)$ matched to their $s$th preference satisfies $$ \frac1n S_n(s,\theta) \cvginprob \int_0^\theta q_s(\phi)\;d\phi $$ where $q_s(\theta) = \theta^{s-1}(1-\theta)$. \end{corollary} \begin{proof}{Proof.} Let $p_{kn} = \binom{n-s}{k-s}\Big/\binom{n}{k-1}$. Let $X_{kn}$ be the indicator of the event that the $k$th agent (of $n$) is matched to his $s$th preference; thus $E[X_{kn}]=p_{kn}$ and $\hbox{Var}(X_{kn}) = p_{kn}(1-p_{kn})$. The Impartial Culture model requires agents to choose their preferences independently; thus the random variables $(X_{kn})_{k=1}^n$ are independent. We have $$ S_n(s,\theta)=\sum_{k=s}^{\lfloor n\theta\rfloor} X_{kn} $$ and so $E[S_n(s,\theta)] = \sum_{k=s}^{\lfloor n\theta\rfloor} p_{kn}$ and $\hbox{Var}(S_n(s,\theta)) = \sum_{k=s}^{\lfloor n\theta\rfloor} p_{kn}(1-p_{kn})$. Hence $\hbox{Var}(S_n(s,\theta)) \leq E[S_n(s,\theta)]$ and Lemma \ref{lem:C} applies. It now remains only to show that $\frac1n E[S_n(s,\theta)] \to \int_0^\theta q_s(\phi)\;d\phi$. Note that $$ p_{kn} \;=\; \frac{(n-k+1)\cdot(k-1)(k-2)\cdots(k-s+1)}{n(n-1)\cdots(n-s+1)} \;=\; \left(1-\frac{k-1}{n}\right)\prod_{j=1}^{s-1}\left(\frac{k-j}{n-j}\right) . $$ Hence, $$ \frac1n E[S_n(s,\theta)] \;=\; \frac1n \sum_{k=s}^{\lfloor n\theta\rfloor} \left(1-\frac{k-1}{n}\right)\prod_{j=1}^{s-1}\left(\frac{k-j}{n-j}\right) \;=\; \int_0^\theta f_n(\phi)\;d\phi , $$ where $$ f_n(\phi) \;=\; \begin{cases} \left(1-\frac{k-1}{n}\right)\prod_{j=1}^{s-1}\left(\frac{k-j}{n-j}\right) & \hbox{ for } \frac{k-1}{n}\leq\phi<\frac{k}{n}, \quad k=s,\ldots,\lfloor n\theta\rfloor\\ 0 & \hbox{ otherwise.} \end{cases} $$ As $n\to\infty$, $f_n(\phi)\to(1-\phi)\phi^{s-1}$ pointwise; since we also have $0\leq f_n(\phi)\leq1$, the dominated convergence theorem (\cite{Durrett}) ensures that $\int_0^\theta f_n(\phi)\;d\phi \to \int_0^\theta (1-\phi)\phi^{s-1}\;d\phi$. \hfill \qedsymbol \end{proof} \subsection{Individual agents} \label{ss:SD_indiv} For individual agents, we have the following analogous result. \begin{theorem}[preference rank obtained] \label{thm:SD_individual_asymptotics} Consider the serial dictatorship algorithm. Fix $s\geq1$ and a relative position $\theta\in[0,1]$. The probability that agent $a_n(\theta)$ (the last with relative position at most $\theta$) is matched to his $s$th preference converges to $q_s(\theta) = \theta^{s-1}(1-\theta)$ as $n\to\infty$. \end{theorem} \begin{proof}{Proof.} From Theorem~\ref{thm:SD_matching_probabilities}, this probability is $$ \left(1-\frac{\lfloor n\theta\rfloor -1}{n}\right)\prod_{j=1}^{s-1}\left(\frac{\lfloor n\theta\rfloor-j}{n-j}\right) . $$ The result follows immediately. \qedsymbol \end{proof} \section{Welfare} \label{s:welfare} In this section we obtain results on the utilitarian welfare achieved by the three mechanisms. We use the standard method of imputing utility to agents via scoring rules, since we know only their ordinal preferences. \begin{defn} \label{def:scoring_rule} A \emph{positional scoring rule} is given by a sequence $(\sigma_n(s))_{s=1}^n$ of real numbers with $0\leq\sigma_n(s)\leq\sigma_n(s-1)\leq1$ for $2\leq s\leq n$. \end{defn} Commonly used scoring rules include \emph{$k$-approval} defined by $(1,1,\ldots,1,0,0,\ldots,0)$ where the number of $1$'s is fixed at $k$ independent of $n$; when $k=1$ this is the usual plurality rule. Note that $k$-approval is \textit{coherent}: for all $n$ the utility of a fixed rank object depends only on the rank and not on $n$. Another well-known rule is \emph{Borda} defined by $\sigma_n(s) = \frac{n-s}{n-1}$; Borda is not coherent. Borda utility is often used in the literature, sometimes under the name ``linear utilities". Each positional scoring rule defines an \emph{induced rank utility function}, common to all agents: an agent matched to his $s$th preference derives utility $\sigma_n(s)$ therefrom. Suppose (adopting the notation of Corollary \ref{cor:naive_obtained}, Corollary \ref{cor:adaptive_obtained}, and Corollary \ref{cor:SD_obtained}) that an assignment mechanism for $n$ agents matches $S_n(s,\theta)$ of the agents with relative position at most $\theta$ to their $s$th preferences, for each $s=1,2,\ldots$. According to the utility function induced by the scoring rule $(\sigma_n(s))_{s=1}^n$, the welfare (total utility) of the agents with relative position at most $\theta$ is thus \begin{equation} \label{eq:welfare} W_n(\theta) = \sum_{s=1}^n \sigma_n(s) S_n(s,\theta) . \end{equation} \begin{theorem}[Asymptotic welfare of the mechanisms] \label{thm:welfare_asymptotics} Assume an assignment mechanism with $$ \frac1n S_n(s,\theta) \cvginprob \int_0^\theta q_s(\phi)\;d\phi \qquad\hbox{ as $n\to\infty$, for each $s=1,2,\ldots$} $$ where $\sum_{s=1}^\infty q_s(\theta)=1$. Suppose the scoring rule $(\sigma_n(s))_{s=1}^n$ satisfies $$ \sigma_n(s) \to \lambda_s \qquad\hbox{ as $n\to\infty$, for each $s=1,2,\ldots$} $$ Then the welfare given by (\ref{eq:welfare}) satisfies $$ \frac1n W_n(\theta) \cvginprob \sum_{s=1}^\infty \lambda_s \int_0^\theta q_s(\phi)\;d\phi . $$ \end{theorem} \begin{proof}{Proof of Theorem \ref{thm:welfare_asymptotics}.} For convenience, define $\sigma_n(s)=0$ when $n<s$; this allows us to write $W_n(\theta)=\sum_{s=1}^{\infty}\sigma_n(s) S_n(s,\theta)$. For any fixed $s'$, the finite sum $Y_n(s')$ defined by $$ Y_n(s') = \sum_{s=1}^{s'} \left(\sigma_n(s)\frac{S_n(s,\theta)}{n} - \lambda_s \int_0^\theta q_s(\phi)\;d\phi\right) $$ has $Y_n(s')\cvginprob0$ as $n\to\infty$. We have $$ \frac{W_n(\theta)}{n} - \sum_{s=1}^{\infty} \lambda_s \int_0^\theta q_s(\phi)\;d\phi \;\;=\;\; Y_n(s') \;+\; \sum_{s>s'} \sigma_n(s)\frac{S_n(s,\theta)}{n} \;-\; \sum_{s>s'} \lambda_s \int_0^\theta q_s(\phi)\;d\phi $$ and so \begin{equation} \label{eq:welfare_cvg_bound} \left|\frac{W_n(\theta)}{n} - \sum_{s=1}^{\infty} \lambda_s \int_0^\theta q_s(\phi)\;d\phi\right| \;\;\leq\;\; \left|Y_n(s')\right| \;+\; \sum_{s>s'} \frac{S_n(s,\theta)}{n} \;+\; \sum_{s>s'} \int_0^\theta q_s(\phi)\;d\phi \end{equation} (since $0\leq\sigma_n(s)\leq1$). Note also that $\sum_{s=1}^{s'}\frac{S_n(s,\theta)}{n}\;\cvginprob\;\sum_{s=1}^{s'}\int_0^\theta q_s(\phi)\;d\phi$, while $$ \sum_{s=1}^{\infty} \frac{S_n(s,\theta)}{n} \;=\; \frac{\lfloor n\theta\rfloor}{n} \;\to\; \theta \;=\; \sum_{s=1}^{\infty} \int_0^\theta q_s(\phi)\;d\phi , $$ and so $$ \sum_{s>s'} \frac{S_n(s,\theta)}{n} \;\cvginprob\; \sum_{s>s'} \int_0^\theta q_s(\phi)\;d\phi . $$ We can now establish the required convergence in probability. Let $\epsilon>0$, and choose $s'$ so that $\sum_{s>s'} \int_0^\theta q_s(\phi)\;d\phi < \epsilon/3$. Then (\ref{eq:welfare_cvg_bound}) gives $$ P\left(\left|\frac{W_n(\theta)}{n} - \sum_{s=1}^{\infty} \lambda_s \int_0^\theta q_s(\phi)\;d\phi\right|>\epsilon\right) \;\leq\; P\left(\left|Y_n(s')\right|>\epsilon/3\right) \;+\; P\left(\sum_{s>s'} \frac{S_n(s,\theta)}{n}>\epsilon/3\right) \;\to\; 0 $$ as $n\to\infty$. \hfill \qedsymbol \end{proof} Theorem \ref{thm:welfare_asymptotics} is applicable to naive Boston (via Corollary \ref{cor:naive_obtained}), adaptive Boston (via Corollary \ref{cor:adaptive_obtained}), and serial dictatorship (via Corollary \ref{cor:SD_obtained}). \begin{corollary} \label{cor:welfare_asymptotics_k_approval} The average $k$-approval welfare over all agents satisfies $$ \frac1n W_n(1) \;\cvginprob\; \begin{cases} 1 - \omega_{k+1} & \text{for Naive Boston}\\ (1 - e^{-1}) \sum_{\{(r,s): r\leq s\leq k\}} e^{1-r} u_{rs} & \text{for Adaptive Boston}\\ \frac{k}{k+1} & \text{for serial dictatorship}. \end{cases} $$ \end{corollary} \begin{proof}{Proof.} For the special case of $k$-approval utilities, the result of Theorem \ref{thm:welfare_asymptotics} reduces to $$ \frac1n W_n(\theta) \cvginprob \sum_{s=1}^k \int_0^\theta q_s(\phi)\;d\phi . $$ Setting $\theta=1$ and using the expressions for $q_s(\phi)$ found in Corollary \ref{cor:naive_obtained}, Corollary \ref{cor:adaptive_obtained}, and Corollary \ref{cor:SD_obtained} yields the results. \qedsymbol \end{proof} Corollary~\ref{cor:welfare_asymptotics_k_approval} and Lemma~\ref{lem:wr_asymptotics} show that for each fixed $k$, Naive Boston has higher average welfare than Serial Dictatorship. This is expected, because Naive Boston maximizes the number of agents receiving their first choice, then the number receiving their second choice, etc. Adaptive Boston apparently scores better than Serial Dictatorship for each $k$, although we do not have a formal proof. Figure~\ref{fig:welf comp} illustrates this for $1\leq k \leq 10$. Already for $k=3$, where the limiting values are 0.75, 0.776 and 0.803, the algorithms give similar welfare results, and they each asymptotically approach $1$ as $k\to \infty$. \begin{table}[hbtp] \centering \begin{tabular}{|c|c|c|c|} \hline algorithm & $k=1$ & $k=2$ & $k=3$\\ \hline Naive Boston & $1-e^{-1} \approx 0.632$ & $1 - e^{-1}e^{-e^{-1}}\approx 0.745$ & $1 - e^{-1}e^{-e^{-1}}e^{-e^{-e^{-1}}}\approx 0.803$\\ Adaptive Boston & $1-e^{-1} \approx 0.632$ & $(1-e^{-1})(1+e^{-2}) \approx 0.718$ & $(1-e^{-1})(1+2e^{-2}-e^{-3}+e^{-5})\approx 0.776$\\ Serial Dictatorship & $1/2 = 0.500$ & $2/3\approx 0.667$ & $3/4=0.750$ \\ \hline \end{tabular} \vspace{10pt} \caption{Limiting values as $n\to \infty$ of $k$-approval welfare.} \label{t:app_limits} \end{table} \begin{figure} \centering \includegraphics[width=0.8\textwidth]{pictures/welf_comp.png} \caption{Limiting values as $n\to \infty$ of $k$-approval welfare for $1\leq k \leq 10$. Top: Naive Boston. Middle: Adaptive Boston. Bottom: Serial Dictatorship.} \label{fig:welf comp} \end{figure} \begin{corollary} \label{cor:welfare_asymptotics_Borda} For an assignment mechanism as in Theorem \ref{thm:welfare_asymptotics}, the Borda welfare satisfies $$ \frac1n W_n(\theta) \cvginprob \theta . $$ \end{corollary} \begin{corollary} \label{cor:welfare_asymptotics_compare_borda} For each of Naive Boston, Adaptive Boston and Serial Dictatorship, the average normalized Borda welfare over all agents is asymptotically equal to $1$. \end{corollary} \begin{remark} Note that the Borda utility of a fixed preference rank $s$ has the limit $\lambda_s=1$, meaning that, in the asymptotic limit as $n\to\infty$, agents value the $s$th preference (of $n$) just as highly as the first preference. Consequently, mechanisms such as serial dictatorship or the Boston algorithms, which under IC are able to give most agents one of their first few preferences, achieve the same asymptotic Borda welfare as if every agent were matched to his first preference. This behaviour is really a consequence of the normalization of the Borda utilities $\sigma_n(s) = \frac{n-s}{n-1}$ to the interval $[0,1]$: the first few preferences all have utility close to 1. \end{remark} \section{Order bias} \label{s:order_bias} A recently introduced \cite{FrPW2021} average-case measure of fairness of discrete allocation algorithms is \emph{order bias}. The relevant definitions are recalled here for an arbitrary discrete assignment algorithm $\algo$ that fixes an order on agents (such as the order $\rho$ assumed in the present paper). \begin{defn} The \emph{expected rank distribution} under $\algo$ is the mapping $D_\algo$ on $\{1, \dots, n\} \times \{1, \dots, n\}$ whose value at $(r,j)$ is the probability under IC that $\algo$ assigns the $r$th agent his $j$th most-preferred item. \end{defn} We usually represent this mapping as a matrix where the rows represent agents and the columns represent items. \begin{defn} Let $u$ be a common rank utility function for all agents: $u(j)$ is the utility derived by an agent who obtains his $j$th preference. Define the order bias of $\algo$ by $$ \beta_n(\algo; u) = \frac{\max_{1\leq p, q \leq n} |U(p) - U(q)|}{u(1)-u(n)}, $$ where $U(p)=\sum_{j=1}^n D_\algo(p,j) u(j)$, the expected utility of the item obtained by the $p$th agent. \end{defn} It is desirable that $\beta_n$ be as small as possible, out of fairness to each position in the order in the absence of any knowledge of the profile. The mechanisms in this paper (naive and adaptive Boston, and serial dictatorship) treat agents unequally by using a choosing/tiebreak order $\rho$. In all of these mechanisms, the first agent in $\rho$ always obtains his first-choice item, and so has the best possible expected utility. The last agent in $\rho$ has the smallest expected utility; this is a consequence of the following result. \begin{theorem}[Earlier positions do better on average] \label{thm:last_agent_is_worst_off} Let $a$ be an agent in an instance of the house allocation problem with IC preferences. Let the random variable $S$ be the preference rank of the item obtained by $a$. The naive and adaptive Boston mechanisms and serial dictatorship all have the property that for all $s\geq1$, $P(S>s)$ is monotone increasing in the relative position of $a$ ({\it i.e.} greater for later agents in $\rho$). \end{theorem} \begin{remark} Thus in the expected rank distribution matrix, each row stochastically dominates the one below it. For each common rank utility function $u$, the expected utility of agent $a$ is $u(1) + \sum_{s=1}^{n-1} (u(s+1)-u(s))\;P(S>s)$, so Theorem \ref{thm:last_agent_is_worst_off} implies that the expected utility is monotone decreasing in the relative position of $a$. In particular, the first agent has the highest and the last agent the lowest expected utility. \end{remark} \begin{proof}{Proof of Theorem \ref{thm:last_agent_is_worst_off}.} Let $a_1$ and $a_2$ be consecutive agents, with $a_2$ immediately after $a_1$ in $\rho$. Let $S_1$ and $S_2$ be the preference ranks of the items obtained by $a_1$ and $a_2$. It will suffice to show that $P(S_1>s)\leq P(S_2>s)$. To this end, consider an alternative instance of the problem in which $a_1$ and $a_2$ exchange preference orders before the allocation mechanism is applied. We will refer to this instance and the original one as the ``exchanged'' and ``non-exchanged'' processes respectively. Denote by $S'_1$ and $S'_2$ the preference ranks of the items obtained by $a_1$ and $a_2$ in the exchanged process. Since the exchanged process also has IC preferences, $S_1$ and $S'_1$ have the same probability distribution; similarly $S_2$ and $S'_2$. We now show that all three of our allocation mechanisms have the property that $S_1\leq S'_2$. From this the result will follow, since $S_1\leq S'_2\implies P(S_1>s)\leq P(S'_2>s) = P(S_2>s)$. For serial dictatorship, the exchanged and non-exchanged processes evolve identically for agents preceding $a_1$ and $a_2$. In the non-exchanged process, agent $a_1$ then finds that his first $S_1-1$ preferences are already taken; in the exchanged process, these same items are the first $S_1-1$ preferences of $a_2$. Hence, $S'_2\geq S_1$. For the Boston mechanisms, let $R$ be the number of unsuccessful bids made by $a_1$ in the non-exchanged process. Then the exchanged and non-exchanged processes evolve identically for the first $R$ rounds, except that the bids of $a_1$ and $a_2$ are made in reversed order; this reversal has no effect on the availability of items to other agents. After these $R$ rounds, $a_1$ (in the non-exchanged process) and $a_2$ (in the exchanged process) have reached the same point in their common preference order; in the next round both will bid for the $S_1$th preference in this order. Hence, $S'_2\geq S_1$. \hfill \qedsymbol \end{proof} The order bias of Serial Dictatorship is easy to analyse. \begin{theorem} \label{thm:sd_bias_app_exact} Fix $k\geq 1$ and $n\geq 1$. Then \begin{enumerate}[(i)] \item The $k$-approval order bias for Serial Dictatorship equals $1 - \frac{k}{n}$. \item The Borda order bias for Serial Dictatorship equals $1/2$. \end{enumerate} \end{theorem} \begin{proof}{Proof.} The probability of getting each choice is $1/n$ for the last agent. Hence the expected utility under $k$-approval for that agent is $k/n$. The first agent always gets its first choice. This yields (i). For (ii), note that for the last agent, the probability of getting each rank in his preference order is $1/n$. Hence the expected utility under Borda for that agent is $$\frac{1}{n} \sum_{j=1}^{n} \frac{n-j}{n-1} = \frac{1}{n(n-1)} \sum_{j=0}^{n-1} j = \frac{1}{2}.$$ Again, the first agent always gets his first choice. \hfill \qedsymbol \end{proof} \begin{corollary} \label{cor:sd_bias_app_asymp} For each fixed $k$, the $k$-approval order bias of SD is asymptotically equal to $1$ and the Borda order bias is asymptotically equal to $1/2$. \end{corollary} We now move to the Boston mechanisms. \begin{theorem} \label{thm:na_boston bias} For each fixed $k$, the $k$-approval order bias of Naive Boston is asymptotically $z'_{k+1}(1).$ \end{theorem} \begin{proof}{Proof.} Since the first agent always gets its top choice with utility $1$, it follows that $\beta_n(NB)$ equals the probability that the last agent survives until round $k+1$, which asymptotically equals $z'_{k+1}(1)$. \qedsymbol \end{proof} \begin{theorem} \label{thm:adaptive_kapproval_bias} For each fixed $k$, the $k$-approval order bias of Adaptive Boston is asymptotically $$ 1 - e^{-1} \sum_{\{(r,s): r\leq s \leq k\}}\left(1 - e^{-1}\right)^{r-1} u_{rs}. $$ \end{theorem} \begin{proof}{Proof.} The probability that the last agent in $\rho$ is matched to one of his first $k$ preferences is $\sum_{s=1}^k D_\algo(n,s)$. According to Theorem \ref{thm:adaptive_individual_asymptotics} Part \ref{adaptive_individual_obtained}, the asymptotic limit of this quantity is $\sum_{s=1}^k q_s(1)$, where $$ q_s(1) \;=\; \sum_{r=1}^s u_{rs}(y'_r(1) - y'_{r+1}(1)) . $$ The asymptotic order bias is thus $$ \lim_n \left(1 - \sum_{s=1}^k D_\algo(n,s)\right) \;=\; 1 - \sum_{s=1}^k \sum_{r=1}^s u_{rs}(y'_r(1) - y'_{r+1}(1)) . $$ As noted in Remark \ref{rem:adaptive_yr}, we have $y'_r(1)=(1-e^{-1})^{r-1}$. The result follows. \qedsymbol \end{proof} \begin{theorem} \label{thm:boston_borda_bias} The Borda order bias of each Boston mechanism is asymptotically zero. \end{theorem} \begin{proof}{Proof.} Let $\ell_n$ denote the expected Borda utility of the last agent in $\rho$, that is $$ \ell_n \;=\; \sum_{s=1}^n \left(\frac{n-s}{n-1}\right)D_\algo(n,s) . $$ Then for any $s_0$, $$ \liminf_n \; \ell_n \;\geq\; \liminf_n \; \left(\frac{n-s_0}{n-1}\right) \sum_{s=1}^{s_0} D_\algo(n,s) \;=\; \sum_{s=1}^{s_0} q_s(1) , $$ where $q_s(1) = \lim_n D_\algo(n,s)$, as given by Theorem \ref{thm:naive_individual_asymptotics} (naive Boston) and Theorem \ref{thm:adaptive_individual_asymptotics} (adaptive Boston). Since $\sum_{s=1}^{\infty}q_s(1)=1$ (see Remarks \ref{rem:naive_qs} and \ref{rem:adaptive_sum_qs}) and $s_0$ was arbitrary, we obtain $\lim_n \ell_n = 1$. The order bias is $1-\ell_n$; hence the result. \qedsymbol \end{proof} \begin{table}[hbtp] \centering \begin{tabular}{|l|l|l|l|} \hline algorithm & $k=1$ & $k=2$ & $k=3$\\ \hline NB & $1-e^{-1}\approx 0.632$ & $(1 - e^{-1})(1-e^{-1}e^{-e^{-1}}) \approx 0.471$ & $(1 - e^{-1})(1 - e^{-1}e^{-e^{-1}})(1 - e^{-1}e^{-e^{-1}}e^{-e^{-{e^{-1}}}})\approx 0.378$ \\ AB & $1-e^{-1}\approx 0.632$ & $(1-e^{-1})(1-e^{-2})\approx 0.547$ & $(1-e^{-1})(1-e^{-2}) - (1-e^{-1})^2(e^{-2}+e^{-4}) \approx 0.485$ \\ SD & 1 & 1 & 1\\ \hline \end{tabular} \vspace{10pt} \caption{Limiting quantities for $k$-approval order bias.} \end{table} \begin{figure} \centering \includegraphics[width=0.8\textwidth]{pictures/bias_comp.png} \caption{Limiting values as $n\to \infty$ of $k$-approval order bias for $1\leq k \leq 10$. Top: Serial Dictatorship. Middle: Adaptive Boston. Bottom: Naive Boston.} \label{fig:bias comp} \end{figure} \section{Conclusion} \label{s:conclude} If we relax the IC assumption on preferences, we should expect different results, although the relative performance of the three algorithms will likely not vary. For example, simulations \cite{FrPW2021} with preferences drawn from the Mallows distribution show that for small values of the Mallows dispersion parameter it is much harder to satisfy all agents or keep order bias low, but nevertheless NB beats AB, which beats SD, over the entire range of parameters. A striking feature of our results, under the IC assumption on preferences and assuming sincere agent behavior, is that although the Boston algorithms have a welfare advantage over Serial Dictatorship, the advantage is rather small. The limiting results for average welfare gained by the agents up to position $\theta$ in the choosing order show that the limit is concave in $\theta$. For the Boston mechanisms, this concavity is slight: for example, even for plurality utilities the median of the cumulative Adaptive Boston welfare distribution occurs at position approximately $0.378$, and this becomes even more evenly distributed as $k$ increases and we choose $k$-approval utilities (the limiting case is the same as Borda, where the cumulative distribution is linear). However, there is a huge difference in the values of the more egalitarian fairness criterion order bias, with SD being asymptotically as biased as it could be, and the Boston algorithms being asymptotically unbiased with respect to our normalized Borda utilities and having much lower bias than SD even for utilities such as $k$-approval for small $k$. Thus Naive Boston beats Adaptive Boston on both welfare and order bias, and Adaptive Boston beats Serial Dictatorship. From a welfare viewpoint, then, SD should be avoided. Of course, there are always tradeoffs. A persistent theme of the research literature is the inevitable tradeoff between strategyproofness, economic efficiency and agent welfare, and there is still much to be learned about these issues. SD is strategyproof, while AB gives less incentive to strategize than NB \cite{MeSe2021}. The order bias of the Boston algorithms, although smaller than that of SD, is still rather large. Thus if this fairness criterion is important, it makes sense to use a mechanism like Top Trading Cycles, which is strategyproof and has zero order bias in this situation \cite{FrPW2021}. Note that since TTC (with a randomly chosen endowment) is equivalent to SD \cite{AbSo1998}, and SD does not give up much in welfare to NB, TTC may be a good choice if preferences of agents are well described by IC. A simple idea that will reduce order bias is to reverse the order in which agents choose at each round (or just at the second round). Quantifying the improvement via an analysis analogous to that in this paper is not easy, because it is no longer clear that the worst off agent will be the initially last one in the choosing order. We leave this for future work. The Boston algorithms discussed here are specializations of algorithms used for school choice to the case where each school has a single seat and schools have a common preference order over applicants. Further analysis of school choice mechanisms in the general case, from the viewpoint of welfare and order bias, would be very desirable. We have studied only sincere behavior by agents. Strategic behavior under the Boston mechanisms does occur in practice, and does cause welfare loss, but the social welfare cost of adopting a strategyproof alternative such as (random) Serial Dictatorship is often substantial, as shown in analysis of Harvard course matching \cite{BuCa2012}. It would be interesting to explore this issue further in the housing allocation model, and to study welfare and order bias in the multi-unit assignment model used in \cite{BuCa2012}. The $k$-approval utilities we have used here are widely used in assignment applications. For example, statistics such as the fraction of school choice students obtaining one of their top three choices, or their one favorite course, are commonly discussed. \bibliographystyle{plain} \bibliography{assignment.bib} \end{document}
2205.15369v1
http://arxiv.org/abs/2205.15369v1
Word Images and Their Impostors in Finite Nilpotent Groups
\documentclass[11pt, a4paper]{amsart} \usepackage{amsfonts,amssymb,amsmath,amsthm,amscd,mathtools,multicol,tikz, tikz-cd,caption,enumerate,mathrsfs,thmtools,cite} \usepackage{inputenc} \usepackage[foot]{amsaddr} \usepackage[pagebackref=true, colorlinks, linkcolor=blue, citecolor=red]{hyperref} \usepackage{latexsym} \usepackage{fullpage} \usepackage{microtype} \usepackage{subfiles} \renewcommand\backrefxxx[3]{ \hyperlink{page.#1}{$\uparrow$#1}} \usepackage{palatino} \parindent 0in \parskip .1in \makeatletter \makeindex \newcommand{\be}{\begin{equation}} \newcommand{\ee}{\end{equation}} \newcommand{\beano}{\begin{eqn*}} \newcommand{\eeano}{\end{eqnarray*}} \newcommand{\ba}{\begin{array}} \newcommand{\ea}{\end{array}} \declaretheoremstyle[headfont=\normalfont]{normalhead} \newtheorem{theorem}{Theorem}[section] \newtheorem{theoremalph}{Theorem}[section] \renewcommand*{\thetheoremalph}{\Alph{theoremalph}} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{definition}[theorem]{Definition} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{remark}[theorem]{Remark} \newtheorem{example}[theorem]{Example} \newcommand{\diag}{\mathrm{diag}} \newcommand{\trace}{\mathrm{trace}} \newcommand{\Sp}{\mathrm{Sp}} \newcommand{\Aut}{\mathrm{Aut}} \newcommand{\Inn}{\mathrm{Inn}} \newcommand{\Or}{\mathrm{O}} \numberwithin{equation}{section} \newcommand{\img}{\mathrm{image}} \def\rem{\refstepcounter{theorem}\paragraph{Remark \thethm}} \def\rems{\refstepcounter{theorem}\paragraph{Remarks \thetheorem}} \def\exam{\refstepcounter{theoremm}\paragraph{Example \thethm}} \renewcommand{\thesection}{\arabic{section}} \begin{document} \title{Word Images and Their Impostors in Finite Nilpotent Groups} \author{Dilpreet Kaur} \email{[email protected]} \address{Indian Institute of Technology Jodhpur} \author{Harish Kishnani} \email{[email protected]} \address{Indian Institute of Science Education and Research, Sector 81, Mohali 140306, India} \author{Amit Kulshrestha} \email{[email protected]} \address{Indian Institute of Science Education and Research, Sector 81, Mohali 140306, India} \thanks{We are thankful to William Cocke and Anupam Singh for their interest in our work.} \subjclass[2010]{20D15, 20D45, 20F10} \keywords{word maps, finite nilpotent groups, special $p$-groups} \maketitle \begin{abstract} It was shown in \cite{Lubotzky_2014} by Lubotzky that automorphism invariant subsets of finite simple groups which contain identity are always word images. In this article, we study word maps on finite nilpotent groups and show that for arbitrary finite groups, the number of automorphism invariant subsets containing identity which are not word images, referred to as word image impostors, may be arbitrarily larger than the number of actual word images. In the course of it, we construct a $2$-exhaustive set of word maps on nilpotent groups of class $2$ and demonstrate its minimality in some cases. \end{abstract} \section{Introduction} Let $F_d$ denote the free group on $d$ letters and $w \in F_d$. For a group $G$, let $G^d$ denote the group of $d$-tuples in $G$. The evaluation of $w$ on $d$-tuples induces a map $\tilde{w} : G^d \to G$. The map $\tilde{w}$ is called the \emph{word map} on $G$ corresponding to the word $w$. The image of $\tilde{w}$ is denoted by $w(G)$. A subset $A \subseteq G$ is defined to be a \emph{word image candidate} if \begin{enumerate}[(i).] \item $1 \in A$, and \item $A$ is \emph{${\rm Aut}(G)$-invariant}; \emph{i.e.}, if $g \in A$, then $\varphi(g) \in A$ for every automorphism $\varphi$ of $G$. \end{enumerate} All word images are word image candidates. In \cite{Lubotzky_2014}, Lubotzky proved that if $G$ is a finite simple group and $A \subseteq G$ is a word image candidate, then $A = w(G)$ for some $w \in F_d$. In fact, $d = 2$ suffices. His proof heavily uses properties of finite simple groups such as their $3/2$-generation \cite{Guralnick-Kantor_2000}. In this paper, we show that if $G$ is not simple, then there may exist word image candidates which are not word images. We refer to such word image candidates as \emph{word image impostors}. The groups of our main focus are the finite nilpotent groups. \begin{theoremalph}\label{TheoremA} A finite nilpotent group does not contain a word image impostor if and only if it is an abelian group of prime exponent. (Theorem \ref{Lubotzky-for-nilpotent}) \end{theoremalph} For a group $G$, a subset $W \subseteq F_d$ is called a $d$-\emph{exhaustive set} for word images on $G$, if for every $v \in F_d$ there exists $w \in W$ such that $v(G) = w(G)$. For nilpotent groups of class $2$, we exhibit a $2$-exhaustive set in the following theorem. The notation ${\rm exp}(G)$ denotes the exponent of $G$, and $G'$ denotes the commutator subgroup $[G,G]$. Symbols $x,y \in F_2$ are the free generators of $F_2$. \begin{theoremalph}\label{TheoremB} Let $G$ be a nilpotent group of class $2$. Let $e = {\rm exp}(G)$, $e' = {\rm exp}(G')$ and $f = {\rm exp}(G/Z(G))$. Then $$W := \{x^m[x,y^n] \in F_2: m \mid e, n \mid f \text{ and } n \leq e'\}$$ is a $2$-exhaustive set for word images on $G$. (Theorem \ref{exhaustive-set-in-nilpotent-class-2}) \end{theoremalph} Subsequently, we exhibit examples where the set $W$ in this theorem is a minimal $2$-exhaustive set (Example \ref{example-64} and Example \ref{example-p8}). It is evident from Theorem \ref{TheoremB} that if $G$ is a nilpotent group of class $2$ and $w \in F_2$, then $w(G)$ is closed under taking inverses and powers. It follows from Theorem \ref{TheoremA} that special $p$-groups (see \S\ref{preliminaries}) contain word image impostors. By Theorem \ref{TheoremB}, we have a complete description of word images $w(G); ~w \in F_2$, for such groups. For the subclasses of extraspecial $p$-groups, we make very explicit calculations to show that word image impostors may heavily outnumber word images. \begin{theoremalph} (Theorem \ref{counting-impostors-in-extraspecials}) Let $p$ be a prime and $G$ be an extraspecial-$p$ group. Then the only words images in $G$ are $\{1\}$, $Z(G)$ and $G$. Further, if $i_G$ is the number of word image impostors in $G$ then, \begin{enumerate}[(i).] \item If $p = 2$ then $$i_G = \begin{cases} 1, \quad \text{if } G\cong Q_2 \\ 5, \quad \text{if } G\ncong Q_2 \end{cases} $$ \item If $p \neq 2$ then $$i_G = \begin{cases} 1, ~\quad \quad \quad \quad \text{if } ${\rm exp}(G) = p$ \\ 2^{p+1}-3, \quad \text{if } {\rm exp}(G) = p^2 \text{ and } |G| = p^3 \\ 2^{p+2}-3, \quad \text{if } {\rm exp}(G) = p^2 \text{ and } |G| > p^3 \\ \end{cases} $$ \end{enumerate} \end{theoremalph} The organization of the article is as follows. In \S\ref{preliminaries}, we recall basics of special $p$-groups and recollect a result from \cite{Winter_1972} that describes automorphisms of extraspecial $p$-groups in terms of some linear groups over finite prime fields. In subsequent sections \S\ref{words-in-class-2-groups} and \S\ref{impostors-in-extraspecials} we prove main results (Theorem A, Theorem B, Theorem C) of the article. We conclude the article in \S\ref{special-p-using-word-images} with Theorem \ref{special-through-word-images} which establishes that a nonabelian finite group $G$ in which $\{1\}, Z(G)$ and $G$ are the only word images is necessarily a special $p$-group. \section{Special $p$-groups and a theorem of Winter}\label{preliminaries} Let $p$ be a prime. A $p$-group is called \emph{special $p$-group} if its center, derived subgroup and Frattini subgroup coincide and all are isomorphic to an elementary abelian $p$-group. Therefore, special $p$-groups are nilpotent groups of nilpotency class $2$. For a special $p$-group $G$, both the center $S := Z(G)$ and the quotient group $V := \frac{G}{Z(G)}$ are elementary abelian $p$-groups. Thus we can treat $S$ and $V$ as vector spaces over the prime field $GF(p).$ The map $B_G: V \times V \to S$ defined by $B_G(gZ(G), hZ(G)) = [g,h] := ghg^{-1}h^{-1}$, for $gZ(G), hZ(G) \in V$, is a nondegenrate alternating bilinear map. Also, the image of $B_G$ spans $S$ as a vector space over $GF(p)$, as it is equal to the derived subgroup of $G$. It is evident that the image of $B_G$ is same as the image of word $[x,y] := xyx^{-1}y^{-1} \in F_2$ on the group $G$. Let $p = 2$. The map $q_G: V \to S$ defined by $q_G(gZ(G))=g^2$, for $gZ(G) \in \frac{G}{Z(G)}$, is a quadratic map. Moreover, the polar map associated with the quadratic map $q_G$ is same as the bilinear map $B_G$ defined above. It follows from \cite[Theorem 1.4]{ObedPaper} that the converse of this result is also true. Let $V$ and $S$ be two vector spaces defined over the prime field $GF(2).$ Let $q: V\to S$ be a quadratic map. The group $G= \{ (v,s) ~:~ v\in V, s\in S \}$ with the group operation $$(v,s) + (v',s') = (v+v', s+s' + c(v,v'))$$ is a special $2$-group. Here, $c \in Z^2(V,S)$ is the $2$-cocycle corresponding to $q$, as in \cite[Prop. 1.2]{ObedPaper}. In fact, this is a one to one correspondance between isomorphism classes of special $2$-groups and isometry classes of quadratic maps defined over the field $GF(2)$. Similar result also holds for odd primes. Let $p$ be an odd prime and $G$ be a special $p$-group. From \cite[Ch. 2, Lemma 2.2$(ii)$]{GorensteinBook} and the fact that the derived subgroup of $G$ is elementary abelian, the map $T_G: V \to S$ defined by $T_G(gZ(G))=g^p$, $gZ(G) \in V$, is linear. Conversely, given a pair $(B,T)$, where $B : V \times V \to S$ is a nondegenerate alternating bilinear map and $T : V \to S$ is a linear map, the following proposition provides a construction of a special $p$-group $G$ such that $B = B_G$ and $T = T_G$. \begin{proposition}\label{from-b-T-to-special} Let $p$ be an odd prime. Let $V$ and $S$ be two finite dimensional vector spaces over $GF(p).$ Let $\{v_1 , v_2 ,\dots, v_n \}$ and $\{s_1 , s_2 ,\dots, s_m \}$ be bases of $V$ and $S$, respectively, over $GF(p)$. Let $B : V\times V \to S$ be a nondegenerate alternating bilinear map such that ${\rm span}({\rm image}(B)) = S$ and $T : V\to S$ be a linear map. Then, $$G = \langle s_i, v_j : s_i^p = [s_i , v_j] = [s_i, s_l] = 1, [v_j , v_k] = B(v_j, v_k ), v_j^p = T(v_j) ; 1\leq i,l \leq m, 1\leq j, k\leq n\rangle$$ is a special $p$-group, with $B_G = B$ and $T_G = T$. Here, the notation $s_i, v_j$ is used for both, the generating symbols of the group $G$ as well as the basis vectors of $S$ and $V$. \end{proposition} \begin{proof} It is clear from the presentation of $G$ that ${\rm exp}(G) = p$ or $p^2$. Thus, $G$ is a $p$-group. Again, from the presentation of $G$, we have $S\subseteq Z(G)$ and from the nondegeneracy of $B$ we have $S=Z(G)$. Since $B$ is bilinear, ${\rm span}({\rm image}(B)) = [G,G]$. Now, the Frattini subgroup $\Phi(G) = G^p[G,G] = S$, as $[G,G]=S$ and $G^p=\img(T)\subseteq S$. Thus, $Z(G)=[G,G]=\Phi(G)$ and $G$ is a special $p$-group. \end{proof} A special $p$-group $G$ is called \emph{extraspecial $p$-group} if $|Z(G)|=p$. For every $n\in \mathbb{N}$, there are two extraspecial $p$-groups, up to isomorphism, of order $p^{2n+1}$. There is no extraspecial $p$-group of order $p^{2n}$. If $p$ is an odd prime, then one of the two extraspecial $p$-groups of order $p^{2n+1}$ has exponent $p$. The linear map $T$ corresponding to this group is the zero map. The extraspecial $p$-group corresponding to nonzero linear map has exponent $p^2$. Winter, in \cite{Winter_1972}, explained the automorphisms of extraspecial $p$-groups in terms of symplectic group $\Sp(V)$, if $p \neq 2$; and orthogonal group $\Or(V,q)$, if $p = 2$. His main theorem is the following. \begin{theorem}\cite[Th. 1]{Winter_1972} \label{Winter-Theorem} Let $p$ be a prime, $G$ be an extraspecial $p$-group and $V = G/Z(G)$. Let $\Aut_{Z(G)}(G)$ be the subgroup of ${\Aut}(G)$ consisting of automorphisms which act trivially on the $Z(G)$. Let $\Inn(G)$ be the subgroup of $\Aut_{Z(G)}(G)$ consisting of inner automorphisms of $G$. \begin{enumerate}[(i).] \item There exists $\theta \in \Aut(G)$ such that the order of $\theta$ is $p-1$, $\Aut_{Z(G)}(G)\cap \langle \theta \rangle = \{1\}$, restriction of $\theta$ to $Z(G)$ is a surjective power map, and $\Aut(G)=\langle \theta \rangle \Aut_{Z(G)}(G)$. \item If $p$ is odd, the quotient $\Aut_{Z(G)}(G)/\Inn(G)$ is isomorphic to a subgroup $Q$ of $\Sp(V)$, where \begin{enumerate}[(a).] \item $Q = \Sp(V)$, if $\exp(G) = p$. \item $Q$ is a proper subgroup of $\Sp(V)$, if $\exp(G) = p^2$. \end{enumerate} \item If $p = 2$, then $Q = \Or(V,q)$, where $q:V\to GF(2)$ is the quadratic form associated to $G$. \end{enumerate} \end{theorem} \begin{lemma}\label{conjugacy-classes-of-extraspecial-p} Let $G$ be an extraspecial $p$-group. Let $g \in G \setminus Z(G)$. Then the coset $gZ(G) \subseteq G$ is the conjugacy class of $g$. \end{lemma} \begin{proof} For an arbitrary $h \in G$, it is clear that $[h,g] \in Z(G)$. Thus, $hgh^{-1} \in gZ(G)$ for all $h \in G$. Since $G$ is a $p$-group and $g$ is noncentral, the size of the conjugacy class of $g$ is divisible by $p$. This forces $gZ(G)$ to be the conjugacy class of $G$. \end{proof} \section{Words images on nilpotent groups of class $2$} \label{words-in-class-2-groups} Throughout this section, $G$ denotes a finite nilpotent group. In some results of this section, we shall impose an additional restriction on the nilpotency class. \begin{lemma} \label{if-nonsurjective-then-in-Frattini} Let $G$ be a finite $p$-group and $\Phi(G)$ be its Frattini subgroup. Let $w: G^{(d)} \to G$ be a nonsurjective word map. Then $w(G) \subseteq \Phi(G)$. \end{lemma} \begin{proof} Since $w$ is nonsurjective, its image $w(G)$ is equal to the image of a word of the form $x^{pr}c$, where $r \in \mathbb Z$ and $c \in [F_d, F_d]$ (see \cite[Lemma 2.3]{CockeHoChirality}). Thus, $w(G) \subseteq G^p[G,G] = \Phi(G)$, where the last equality of holds because $G$ is a $p$-group. \end{proof} \begin{theorem}\label{Lubotzky-for-nilpotent} Let $G$ be a finite nilpotent group. Then $G$ does not contain word image impostors if and only if $G$ is an abelian group of prime exponent. \end{theorem} \begin{proof} Let $G$ is an abelian $p$-group of exponent $p$. If $A$ is a word image candidate, then $A = \{1\}$ or $G$. In both cases, $A$ is the image of a word map. Thus, $G$ does not contain word image impostors. For the converse, let $G$ be a nilpotent group which does not contain word image impostors. We first assume that $G$ is a $p$-group. If $G$ is either nonabelian or not of the prime exponent, then, $\Phi(G) = G^p[G,G] \neq 1$. Let $A = (G\setminus \Phi(G)) \cup \{1\}$. Clearly, $A$ is an automorphism invariant proper subset of $G$ and $1 \in A$. We claim that if $w : G^{(d)} \to G$ is a word map then $A \neq w(G)$. Assume, to the contrary, that there is a word map $w : G^{(d)} \to G$ such that $A = w(G)$. Then, using Lemma \ref{if-nonsurjective-then-in-Frattini}, $(G\setminus \Phi(G)) \cup \{1\} = A = w(G) \subseteq \Phi(G)$. This is a contradiction. Hence, $G$ is an abelian group of prime exponent. Finally, suppose that $G$ is an arbitrary finite nilpotent group which does not contain word image impostors. We write $G$ as a direct product of its Sylow subgroups: $G=H_{p_1} \times \dots \times H_{p_k}$. Since ${\rm Aut}(G) = {\rm Aut}(H_{p_1}) \times {\rm Aut}(H_{p_2}) \times \cdots \times {\rm Aut}(H_{p_k})$, we conclude that none of the subgroups $H_{p_i}$ contains impostors. By the theorem in the case of $p$-groups, each $H_{p_i}$ is an abelian group of exponent $p_i$. Thus ${\rm exp}(G) = p_1 p_2 \cdots p_k$. Let $A'$ denote the subset of $G$ consisting of all elements of order $p_1 \dots p_k$ in $G$. Then, it is easy to check that $A = A' \cup \{1\}$ is a word image candidate and it is not the image of a power map if $k \geq 2$. Since $G$ is abelian, every word image is the image of a power map. Thus, $k = 1$ and the exponent of $G$ is prime. \end{proof} We now introduce some notation. For $r$-tuples $I = (i_1, i_2, \cdots, i_r), J = (j_1, j_2, \cdots, j_r) \in \mathbb Z^r$ and an integer $s < r$, we denote, \begin{align*} I_s &:= (i_1, i_2, \cdots, i_s), \quad J_s := (j_1, j_2, \cdots, j_s)\\ |I| &:= i_1 + i_2 + \cdots + i_r \\ |J| &:= j_1 + j_2 + \cdots + j_r \\ I.J & := i_1 j_1 + i_2 j_2 + \cdots + i_rj_r \\ w_{I,J} &:= x^{i_1}y^{j_1}x^{i_2}y^{j_2}\dots x^{i_r}y^{j_r} \in F_2\\ c_{I,J} &:= [x^{i_1},y^{j_1}][x^{i_2},y^{j_2}]\dots [x^{i_r},y^{j_r}] \in F_2 \end{align*} Here, $x,y \in F_2$ are its free generators. \begin{lemma}\label{nilpotent-2 groups-wIJ} Let $I, J \in \mathbb Z^r$, be such that $|I| = 0 = |J|$. Then, there exist $\tilde{I}, \tilde{J} \in \mathbb Z^{r}$ such that for all nilpotent groups of class $2$, the words $w_{I,J}$ and $c_{\tilde{I},\tilde{J}}$ have the same image. \end{lemma} \begin{proof} Let $G$ be a nilpotent group of class $2$. We use induction on $r$ to show the existence of $\tilde{I}, \tilde{J} \in \mathbb Z^r$ such that $w_{I,J}$ and $c_{\tilde{I},\tilde{J}}$ have the same image. If $r = 1$, then $w_{I,J} = 1 \in F_2$ and $c_{(0),(0)} = 1$. If $r = 2$, then $\tilde{I} = (i_1, 0), \tilde{J} = (j_1, 0)$ satisfy $w_{I,J} = c_{\tilde{I},\tilde{J}}$. For $r > 2$, let $g \in w_{I,J}(G)$, and $a, b \in G$ be such that $g = w_{I,J}(a,b)$. Then $g= w_{I_{r-2},J_{r-2}}(a,b) a^{i_{r-1}} b^{j_{r-1}} a^{i_r} b^{j_r}$. Since $|I| = 0 = |J|$, we substitute $i_r = -(i_{r-1} + i_{r-2} + \cdots +i_2 + i_1)$ and $j_r = -(j_{r-1} + j_{r-2} + \cdots + j_2 + j_1)$ to obtain $$g = w_{I_{r-2},J_{r-2}}(a,b) a^{i_{r-1}} b^{j_{r-1}} a^{-(i_{r-1} + i_{r-2} + \cdots + i_2 + i_1)} b^{-(j_{r-1} + j_{r-2} + \cdots + j_2 + j_1)}$$ Substituting $a^{-i_{r-1}}$ by $a^{-i_{r-1}} b^{-j_{r-1}} b^{j_{r-1}}$, we get $$g = w_{I_{r-2},J_{r-2}}(a,b) [a^{i_{r-1}}, b^{j_{r-1}}] b^{j_{r-1}} a^{-(i_{r-2} + \cdots + i_2 + i_1)} b^{-(j_{r-1} + j_{r-2} + \cdots + j_2 + j_1)}$$ Since $G$ is a $2$-step nilpotent group, $[G,G] \subseteq Z(G)$. Thus, $[a^{i_{r-1}}, b^{j_{r-1}}]$ is central and we bring it to the beginning of the expression so that $$g = [a^{i_{r-1}}, b^{j_{r-1}}] w_{I',J'}(a,b)$$ where \begin{align*} I' &= (i_1, i_2, \cdots, i_{r-2}, -(i_{r-2}+i_{r-3} + \cdots + i_2 + i_1)) \\ J' &= (j_1, j_2, \cdots, j_{r-3}, j_{r-2} + j_{r-1}, -(j_{r-1} + j_{r-2} + \cdots + j_2 + j_1)) \end{align*} are $(r-1)$-tuples of integers with $|I'| = 0 = |J'|$. Thus, arguing inductively on $r$ we complete the proof. \end{proof} \begin{lemma}\label{powers-of-commutators} Let $G$ be a nilpotent group of class $2$. For $a,b \in G$, denote $[a,b] := aba^{-1}b^{-1}$. Let $n \in \mathbb Z$. Then, \begin{enumerate} \item[(i).] $[a,b]^n = [a^n,b] = [a,b^n]$. Consequently, if $I, J \in \mathbb Z^r$ then $c_{I,J}(a,b)^n = c_{I,J}(a^n,b)$. \item[(ii).] $[a^ib^j,a^kb^l]=[a,b]^{il-jk}, \forall a,b\in G$. \item[(iii).] $(ab)^n=a^n b^n [b,a]^{\frac{n(n-1)}{2}}$. \item[(iv).] If $w\in F_2$ is a word and $a \in w(G)$ then $a^{n}\in w(G)$. \end{enumerate} \end{lemma} \begin{proof} $(i)$. First, let $n = -1$. Since $G$ is a nilpotent group of class $2$, conjugation fixes commutators. Thus $[a,b]^{-1} = [b,a] = a[b,a]a^{-1} = [a^{-1}, b]$. This allows us to assume that $n \in \mathbb N$, in which case the result follows from \cite[Ch. 2, Lemma 2.2$(i)$]{GorensteinBook}. \noindent $(ii).$ It is easy to check that for nilpotent groups of class $2$, $[g, h_1 h_2] = [g,h_1][g,h_2]$. Thus $[a^i b^j, a^k b^l] = [a^i,a^k b^l][b^j,a^k b^l] = [a^i, b^l][b^j, a^k]$. Now using part $(i)$, $[a^i, b^l] = [a,b]^{il}$ and $[b^j, a^k] = [b,a]^{jk} = [a,b]^{-jk}$. Thus $[a^i b^j, a^k b^l] = [a,b]^{il-jk}$. \noindent $(iii).$ For the case $n > 0$ we refer to \cite[Ch. 2, Lemma 2.2$(ii)$]{GorensteinBook}. When $n = -m < 0$, then $(ab)^n = (b^{-1} a^{-1})^m$ and the result follows from $n > 0$ case after an easy computation. \noindent $(iv).$ Since an arbitrary word in $w \in F_2$ is automorphic to a word of type $x^m w_{I,J}$ for suitable $I, J \in \mathbb N^r$ with $|I| = 0 = |J|$ (see \cite[Lemma 2.3]{CockeHoChirality}), by Lemma \ref{nilpotent-2 groups-wIJ} we may assume that $w = x^m c_{I,J}$. Let $g \in x^m c_{I,J}(G)$. Thus, there exist $a, b \in G$ such that $g=a^mc_{I,J}(a,b)$ for suitable $r$-tuples $I = (i_1, i_2, \cdots, i_r)$ and $J = (j_1, j_2, \cdots, j_r)$. Now, $g^n=(a^m)^n c_{I,J}(a,b)^n = (a^n)^m c_{I,J}(a^n,b)$, where the last equality holds due to part $(i)$ of this lemma. Thus $g^n$ is indeed in the image of $x^mc_{I,J}$. \end{proof} As a consequence of part $(iv)$ of this lemma we observe that if $G$ is a nilpotent group of class $2$ then for each $w \in F_2$, the word image $w(G)$ is closed under taking inverses. \begin{lemma}\label{product-of-commutators-nilpotent-class-2} Let $I, J \in \mathbb Z^r$. Then, for all nilpotent groups of class $2$ the words $c_{I,J}$ and $[x, y^{I.J}]$ have the same image. \end{lemma} \begin{proof} Let $G$ be a nilpotent group of class $2$. Let $g \in c_{I,J}(G)$ and $a, b \in G$ be such that $g = c_{I,J}(a,b) = [a^{i_1}, b^{j_1}] \cdots [a^{i_r}, b^{j_r}] $. Since $[a^{i_k}, b^{j_k}] \in [G,G] \subseteq Z(G)$ for each $k \in \{1, 2, \cdots, r\}$, the order of taking product does not matter and we write $g = \prod_{k = 1}^r [a^{i_k}, b^{j_k}]$. For each term $[a^{i_k}, b^{j_k}]$ in the product, we use Lemma \ref{powers-of-commutators}$(i)$ to obtain $$ [a^{i_k}, b^{j_k}] = [a^{i_{k}}, b]^{j_{k}} = [a,b]^{i_k j_k}$$ Thus $g = \prod_{k = 1}^r [a, b]^{i_{k}j_k} = [a, b]^{I.J} = [a,b^{I.J}]$, where the last equality follows from Lemma \ref{powers-of-commutators}$(i)$. Tracing back this calculation one may show that the image of $[x^{I.J},y]$ is contained in the image of $c_{I,J}$. \end{proof} \begin{lemma}\label{prime-divisors-set} Let $G$ be a nilpotent group of class $2$ and $w \in F_2$ be a word on $G$. Let $e := {\rm exp}(G)$, $e' := {\rm exp}(G')$ and $f := {\rm exp}(G/Z(G))$. For $r \in \mathbb N$, let $\mathcal P_r$ denote the set of prime divisors of $r$. Then, there exist $m, n \in \mathbb N$ such that $\mathcal P_m \subseteq \mathcal P_e$, $\mathcal P_n \subseteq \mathcal P_f$, $n \leq e'$, and the word maps $w$ and $x^m[x,y^n]$ have the same image. \end{lemma} \begin{proof} By \cite[Lemma 2.3]{CockeHoChirality}, Lemma \ref{nilpotent-2 groups-wIJ} and Lemma \ref{product-of-commutators-nilpotent-class-2}, we may assume that $w=x^m[x,y^n]$ for some $m,n \in \mathbb N$. Let $g = w(a,b) = a^m[a,b^n] \in w(G)$. Suppose, $p \in \mathcal P_m \setminus \mathcal P_e$. Then ${\rm gcd}(p,e) = 1$ and there exists $p' \in \mathbb N$ such that $pp' \equiv 1 \mod e$. Thus $a^{pp'} = a \in G$. Let $\ell \in \mathbb N$ be such that $m = p\ell$. Let $w' = x^{\ell}[x,y^n]$. Then $g = a^{p\ell}[a^{pp'},b^n] = (a^{p})^{\ell}[(a^p)^{p'},b^n] = (a^{p})^{\ell}[(a^p),b^{np'}]$. Thus, $g \in w'(G)$. Conversely, let $g = w'(a,b) \in G$. Then, $$g = a^{\ell}[a,b^n] = (a^{pp'})^{\ell}[a^{pp'}, b^n] = (a^{p'})^m[a^{p'},b^{np}],$$ and we conclude that $g \in w(G)$. Therefore, $w(G) = w'(G)$. A successive iteration of this process allows us to assume that $\mathcal P_m \setminus \mathcal P_e = \emptyset$, i.e. $\mathcal P_m \subseteq \mathcal P_e$.\\ Now, we show that we may also assume that $\mathcal P_n \subseteq \mathcal P_f$. Suppose, $p \in \mathcal P_n \setminus \mathcal P_f$. Then ${\rm gcd}(p,f) = 1$ and there exists $p' \in \mathbb N$ such that $pp' \equiv 1 \mod f$. Thus $b^{pp'}z = b \in G$ for some $z \in Z(G)$. Let $\ell \in \mathbb N$ be such that $n = p\ell$. Let $g = w(a,b)$. Then $g = a^m[a,b^n] = a^m[a, b^{p\ell}]$. Thus, $g \in w'(G)$, where $w' = x^m[x,y^{\ell}]$. Conversely, let $g = w'(a,b) \in G$. Then, $$g = a^m[a,b^{\ell}] = a^m[a,z^{\ell}b^{pp'\ell}] = a^m[a,(b^{p'})^{n}] .$$ Thus, $g \in w(G)$, and we conclude that $w(G) = w'(G)$. A successive iteration of this process allows us to assume that $\mathcal P_n \subseteq \mathcal P_f$. \\ Finally, since $[x,y^n] = [x,y]^n$ and $e' = {\rm exp}(G')$, the assumption $n \leq e'$ is natural. \end{proof} In the next theorem we claim that the assumptions $\mathcal P_m \subseteq \mathcal P_e$ and $\mathcal P_n \subseteq \mathcal P_f$ may be strengthened to $m \mid e$ and $n \mid f$, respectively. \begin{theorem}\label{exhaustive-set-in-nilpotent-class-2} Let $G$ be a nilpotent group of class $2$. Let $e = {\rm exp}(G)$, $e' = {\rm exp}(G')$ and $f = {\rm exp}(G/Z(G))$. Then $$W := \{x^m[x,y^n] : m \mid e, n \mid f \text{ and } n \leq e'\} \subseteq F_2$$ is a $2$-exhaustive set for word images on $G$. \end{theorem} \begin{proof} Let $w \in F_2$. From Lemma \ref{prime-divisors-set}, we may assume that $w=x^m[x,y^n]$, where $\mathcal P_m \subseteq \mathcal P_e$, $\mathcal P_n \subseteq \mathcal P_f$ and $n \leq e'$. Suppose, $m \nmid e$. Then, there exists a prime $p$ and integers $r, s, \ell, k \in \mathbb N$ with $r > s$ such that $m = p^r\ell$, $e = p^sk$ and ${\rm gcd}(p,\ell) = 1 = {\rm gcd}(p, k)$. We observe that $m \equiv p^s \ell \left(p^{r-s} + k\right) \mod e$ and ${\rm gcd}(p^{r-s} + k, e) = 1$. Thus, there exists $t \in \mathbb N$ such that $t(p^{r-s}+k) \equiv 1 \mod e$. \\ Let $w' = x^{{p^s} \ell}[x,y^n]$. We claim that $w(G) = w'(G)$. Let $g = w(a,b)$. Then, \begin{align*} g = a^m[a,b^n] &= \left(a^{p^{r-s} + k}\right)^{p^s\ell}[a, b^n] \\ &=\left(a^{p^{r-s} + k}\right)^{p^s\ell}[a^{t(p^{r-s} + k)}, b^n] \\ &= \left(a^{p^{r-s} + k}\right)^{p^s\ell}[a^{p^{r-s} + k}, b^{nt}]. \end{align*} Thus $g \in w'(G)$.\\ Conversely, if $g \in w'(G)$. Then, \begin{align*} g = a^{p^s \ell}[a,b^n] &= a^{t(p^{r-s} + k)p^s \ell}[a^{t(p^{r-s} + k)},b^n] \\ & = a^{tm}[a^t, (b^{p^{r-s}+k})^n]. \end{align*} Thus, $g \in w(G)$, and the claim follows. A successive iteration of this process allows us to assume that $m \mid e$. We follow a similar process to show that we may assume that $n \mid f$. Suppose, $n \nmid f$. Then, there exists a prime $p$ and integers $r, s, \ell, k \in \mathbb N$ with $r > s$ such that $n = p^r\ell$, $f = p^sk$ and ${\rm gcd}(p,\ell) = 1 = {\rm gcd}(p, k)$. We observe that $n \equiv p^s \ell \left(p^{r-s} + k\right) \mod f$ and ${\rm gcd}(p^{r-s} + k, f) = 1$. Thus, there exists $t \in \mathbb N$ such that $t(p^{r-s}+k) \equiv 1 \mod f$. \\ Let $w' = x^m[x,y^{{p^s} \ell}]$. We claim that $w(G) = w'(G)$. Let $g = w(a,b)$. Then, for some $z \in Z(G)$, \begin{align*} g = a^m[a,b^n] = a^m[a, (bz)^{p^s \ell \left(p^{r-s} + k\right)}] = a^m[a, b^{p^s \ell \left(p^{r-s} + k\right)}] \end{align*} Thus $g \in w'(G)$.\\ Conversely, if $g \in w'(G)$. Then, \begin{align*} g = a^m[a,b^{{p^s} \ell}] = a^m[a, b^{p^s \ell t(p^{r-s}+k)}] = a^m[a, b^{nt}] \end{align*} Thus, $g \in w(G)$, and the claim follows. A successive iteration of this process allows us to assume that $n \mid f$. These arguments shows that $W = \{x^m[x,y^n] : m \mid e \text{ and } n \mid f, e \leq e'\}$ is a $2$-exhaustive set for word images on $G$. \end{proof} We show that in many cases $W$ is a minimal $2$-exhaustive set. We pick these examples from the class of special $p$-groups. In special $p$-groups, $e = p^2$ and $f = p$. Thus, $W = \{1, x, x^p, [x,y], x^p[x,y]\}$ is $2$-exhaustive set for special $p$-groups. We express these words in terms of maps $q, B$ and $T$ associated to $G$ as in \S \ref{preliminaries}. When $p=2,$ we define the map $q+B : V \times V \to S$ by $$(q + B)(gZ(G), hZ(G)) = q(gZ(G)) + B(gZ(G), hZ(G))$$ for $gZ(G), hZ(G) \in V$. For odd primes $p$, we define the map $T+B : V \times V \to S$ by $$(T+B)(gZ(G), hZ(G)) = T(gZ(G))+ B(gZ(G), hZ(G))$$ for all $gZ(G), hZ(G) \in V$. The images of maps $q$ and $q+B$ are same as the images of words $x^2$ and $x^2[x,y]$, respectively, for special $2$-groups. The images of maps $T$ and $T+B$ are same as the images of words $x^p$ and $x^p[x,y]$, respectively, for special $p$-groups, when $p$ is odd. \begin{example}\label{example-64} \normalfont Let $V$ and $S$ be $3$-dimensional vector spaces over $GF(2)$. Let $q : V \to S$ the quadratic map, which is explicitly defined by the following, for a fixed choice of bases of $V$ and $S$. $$q(\alpha,\beta,\gamma) = (\alpha^2+\beta^2+\alpha \beta, \alpha^2+\alpha \gamma,\beta\gamma)$$ Let $B : V \times V \to S$ the polar map of $q$. Then $B$ is bilinear, and, for the same choice of bases, is given by $$B( (\alpha_1, \beta_1, \gamma_1), (\alpha_2, \beta_2, \gamma_2)) = (\alpha_1\beta_2-\alpha_2\beta_1, \alpha_1\gamma_2-\gamma_1\alpha_2, \beta_1\gamma_2-\gamma_1\beta_2)$$ Let $G$ be the special $2$-group associated with $q$. The order of $G$ is $2^6 = 64$. We claim that the images of three maps $q, B$ and $q+B$ are distinct nontrivial proper subsets of $G$. It is clear from the following table $B$ is surjective. Therefore its image is same as center of the group $G$. \begin{center} \begin{tabular}{|c|c|c|} \hline $v_1= (\alpha_1, \beta_1, \gamma_1)$ & $v_2=(\alpha_2, \beta_2, \gamma_2)$ & $B(v_1, v_2)$\\ \hline $(\alpha_1, \beta_1, \gamma_1)$ & $(0,0,1)$ & $(0, \alpha_1, \beta_1)$\\ \hline $(0,1,\gamma_1)$ & $(1,0,\gamma_2)$ & $(1, \gamma_1, \gamma_2)$\\ \hline \end{tabular} \end{center} We claim that $(0,0,1)\notin \img(q).$ If possible, let $q(\alpha,\beta,z)=(0,0,1)$. The definition of $q$ forces $\beta=\gamma=1$. We check that $q(0,1,1)=q(1,1,1)=(1,0,1)$, and conclude that the map $q$ is not surjective. Further, $\img(q)$ is different from $\img(q+B)$, since $$(0,0,1) = q(0,0,1)+B( (0,0,1), (0,1,0) ) \in \img(q+B) $$ However, $q+B$ is not surjective as $(1,1,1)\notin \img(q+B)$. This can be easily verified from the following table, with $v_2= (\alpha_2, \beta_2, \gamma_2)$. \begin{center} \begin{tabular}{|c|c|c|c|} \hline $v_1$ & $q(v_1)+B(v_1, v_2)$ & $v_1$ & $q(v_1)+B(v_1, v_2)$\\ \hline $(0,0,0)$ & $(0,0,0)$ & $(1, 0, 0)$ & $(1+\beta_2, 1+\gamma_2, 0)$\\ \hline $(0,1,0)$ & $(1-\alpha_2,0,\gamma_2)$ & $(0,0,1)$ & $(0, \alpha_2, \beta_2)$\\ \hline $(1,1,0)$ & $(1+\beta_2-\alpha_2,1+\gamma_2,\gamma_2)$ & $(1, 0, 1)$ & $(1+\beta_2, \gamma_2-\alpha_2, \beta_2)$\\ \hline $(0,1,1)$ & $(1-\alpha_2,-\alpha_2,1+\gamma_2-\beta_2)$ & $(1,1,1)$ & $(1+\beta_2-\alpha_2, \gamma_2-\alpha_2, 1+\gamma_2-\beta_2)$\\ \hline \end{tabular} \end{center} \end{example} We have verified using GAP that the group $G$ of this example is the only special $p$-group of order less than $256 = 2^8$ for which all five words in $W$ have distinct images. For groups of order $p^8$, such examples always exist. More explicitly, we have the following: \begin{example}\label{example-p8} \normalfont Let $V$ and $S$ be $4$-dimensional vector spaces over $GF(p)$. Consider the bilinear map $B: V\times V \to S$ defined by \begin{center} $B((\alpha_1, \beta_1, \gamma_1, w_1), (\alpha_2, \beta_2, \gamma_2, \delta_2)) = (\alpha_1\beta_2-\alpha_2\beta_1, \alpha_1\gamma_2-\gamma_1\alpha_2, \beta_1\gamma_2-\gamma_1\beta_2, \alpha_1\delta_2-\alpha_2\delta_1)$. \end{center} If $p = 2,$ then define $q:V\to S$ by $q(\alpha,\beta,\gamma,\delta)= (\beta^2+\alpha \beta, \alpha \gamma, \beta \gamma, \alpha \delta)$. If $p\neq 2,$ then define $T: V \to S$ by $T(\alpha,\beta,\gamma,\delta)= (\beta,0,0,0)$. We note that $q$ is a quadratic map and $T$ is a linear map. Let $G$ be the special $p$-group of order $p^8$ associated with $q$ or $(B,T)$, according as if $p = 2$ or $p \neq 2$. We claim that if $w_1 \neq w_2 \in W$ then $w_1(G) \neq w_2(G)$. To prove the claim, we first notice that if $p = 2$, the images of $B, q$ and $q+B$ are nontrivial proper subsets of $S$; and if $p \neq 2$, then the images of $B,T$ and $T+B$ are nontrivial proper subsets of $S$. We show that $B$ is not surjective. In fact, $(0,0,1,1)\notin \img(B)$. If possible, let $$B((\alpha_1, \beta_1, \gamma_1, \delta_1), (\alpha_2, \beta_2, \gamma_2, \delta_2))=(\alpha_1\beta_2-\alpha_2\beta_1, \alpha_1\gamma_2-\gamma_1\alpha_2, \beta_1\gamma_2-\gamma_1\beta_2, \alpha_1\delta_2-\alpha_2\delta_1)=(0,0,1,1)$$ Since $\alpha_1\delta_2-\alpha_2\delta_1=1$, both $\alpha_1$ and $\alpha_2$ can't be zero simultaneously. If $\alpha_1=0$, then $\alpha_2\neq 0$, $\alpha_1\beta_2-\alpha_2\beta_1=0$ and $\alpha_1\gamma_2-\gamma_1\alpha_2=0$ force $\beta_1=0$ and $\gamma_1=0$. This, in turn, implies $\beta_1\gamma_2-\gamma_1\beta_2=0,$ contradicting $\beta_1\gamma_2-\gamma_1\beta_2=1.$ The case $\alpha_1 \neq 0$ may be handled similarly. If $p = 2$, we show that $\img(B) \neq \img(q)$. Note that $b((0,1,0,0), (0,0,1,0) = (0,0,1,0)$. If possible, let $q(\alpha,\beta,\gamma,\delta)= (\beta^2+\alpha \beta, \alpha \gamma, \beta \gamma, \alpha \delta) =(0,0,1,0)$. Then $\beta=\gamma=1$. Now, if $\alpha=0$, then $\beta^2+\alpha \beta=1$. If $\alpha=1$, then, $\alpha z=1$. Thus, $q(\alpha,\beta,z,w)\neq (0,0,1,0)$ for all $(\alpha,\beta,z,w)$. If $p \neq 2$ then we show that $\img(B) \neq \img(T)$. Note that $B((0,1,0,0), (0,0,1,0)) = (0,0,1,0)$ and $T(\alpha,\beta,\gamma,\delta)\neq (0,0,1,0)$ for all $(\alpha,\beta,\gamma,\delta)$. If $p = 2$, we show in the following table, that $\img(q+B)$ is surjective. \begin{center} \begin{tabular}{|c|c|c|} \hline $v_1$ & $v_2$ & $q(v_1)+B(v_1, v_2)$\\ \hline $(1, 0,\gamma_1, \delta_1)$ & $(1,1,\gamma_2,\delta_2)$ & $(1, \gamma_2, \gamma_1, \delta_2)$\\ \hline $(0,1,\gamma_1,\delta_1)$ & $(1,1,\gamma_2,\delta_2)$ & $(0, \gamma_1, \gamma_2, \delta_1)$\\ \hline \end{tabular} \end{center} If $p \neq 2$, we show in the following table, that $\img(T+B)$ is surjective. \begin{center} \begin{tabular}{|c|c|c|} \hline $v_1$ & $v_2$ & $T(v_1)+B(v_1, v_2)$\\ \hline $(1, \beta_1, 0,0)$ & $(1,\beta_2,\gamma_2\neq 0,\delta_2)$ & $(\beta_2, \gamma_2, \beta_1\gamma_2, \delta_2)$\\ \hline $(1,\beta_1,1,\delta_1)$ & $(0,\beta_2,0,\delta_2)$ & $(\beta_2+\beta_1, 0, -\beta_2, \delta_2)$\\ \hline \end{tabular} \end{center} For all prime numbers $p$, this proves that $G$ has distinct sets as images of all possible five words in $W$. \end{example} \section{Word image impostors in extraspecial $p$-groups} \label{impostors-in-extraspecials} Let $G$ be an extraspecial $p$-group. Recall, from Theorem \ref{Winter-Theorem}, that $\Aut_{Z(G)}(G)/\Inn(G)$ is isomorphic to a subgroup $Q$ of the symplectic group $\Sp(V)$. In fact, for $\varphi \in \Aut_{Z(G)}(G)$, we define $f_{\varphi} : V \to V$ by $f_{\varphi}(gZ(G)) = \varphi(g)Z(G)$. Then, by \cite[(3A), p. 161]{Winter_1972}, $f_{\varphi} \in \Sp(V)$. Further, if $f \in Q \subseteq \Sp(V)$, then by \cite[(3D) (3E), p. 162]{Winter_1972}, there exists $\varphi_f \in \Aut_{Z(G)}(G)$ such that $f_{\varphi_{f}} = f$. We shall examine the action $\psi : Q \times V \to V$ given by $\psi(f,v) = f(v)$. \begin{proposition}\label{if-isometric-then-automorphic} Let $G, V, Q$ and $\psi$ be as above. Let $g, h \in G \setminus Z(G)$ and $v = gZ(G), w = hZ(G) \in V$. If $v$ and $w$ are in the same $\psi$-orbit then $g$ and $h$ are automorphic. \end{proposition} \begin{proof} Suppose ${\rm orbit}_{\psi}(v) = {\rm orbit}_{\psi}(w)$. Then, $f(v) = w$ for some $f \in Q$, and $$hZ(G) = w = f(v) = f_{\varphi_f}(v) = {\varphi}_f(g) Z(G).$$ Thus, ${\varphi}_f(g) = h z^{\ell}$ for some $\ell\in \{0,1,\dots, p-1\}$, where $z$ is a generator of $Z(G)$. Since $h$ and $hz^{\ell}$ are conjugates in $G$ (see Lemma \ref{conjugacy-classes-of-extraspecial-p}), there exists $\rho \in \Inn(G)$ such that $\rho(h) = hz^{\ell} = {\varphi}_f(g)$. Hence ${\rho}^{-1}{\varphi}_f (g) = h$, and $g$ and $h$ are automorphic. \end{proof} The following corollary is immediate from the above proposition. \begin{corollary} Let $G, V, Q$ and $\psi$ be as above. Let $n_o$ be the number of nonzero orbits of the action $\psi$ and $n_c$ be the number of noncentral $\Aut(G)$ components of the group $G$. Then, $n_c \leq n_o$. \end{corollary} Rest of the section is divided into two subsections : $p = 2$ and $p \neq 2$. \subsection{Case $p = 2$} Let $q : V \to GF(2)$ be the quadratic form associated to $G$. Then, by Theorem \ref{Winter-Theorem}, $Q$ is the orthogonal group $\Or(V,q)$. \begin{lemma}\label{Witt-and-Orbit} Let $G$ be an extraspecial $2$-group and $V = G/Z(G)$. Let $q : V \to GF(2)$ be the quadratic form associated to $G$. Then $v,w \in V \setminus \{0\}$ have the same orbit under the action $\psi : Q \times V \to V$ if and only if $q(v) = q(w)$. \end{lemma} \begin{proof} The lemma follows from Witt Extension Theorem in characteristic $2$ (see \cite[Theorem 8.3]{Elman-Karpenko-Merkurjev}), and the fact that in this characteristic, $Q = \Or(V,q)$. \end{proof} We observe that if $g \in G \setminus Z(G)$ and $v = gZ(G) \in V$ then order of $g$ is $2$ (resp. $4$) if and only if $q(v) = 0$ (resp. $q(v) = 1$). We use this observation in the proof of the following theorem. \begin{theorem}\label{aut-components-for-char-2} Let $G$ be an extraspecial $2$-group. \begin{enumerate}[(i).] \item Two elements $g, h \in G$ are automorphic if and only if the following holds: (a). $g$ and $h$ have same orders, and (b). $g \in Z(G)$ iff $h \in Z(G)$. \item Let $n$ be the number of orbits of natural ${\rm Aut}(G)$ action on $G$. Then, $$ n = \begin{cases} 3, \quad \text{if } G \cong Q_2 \\ 4, \quad \text{if } G \ncong Q_2 \end{cases} $$ Here, $Q_2$ is the quaternion group of order $8$. \end{enumerate} \end{theorem} \begin{proof} $(i)$. It is clear that if $g \in Z(G)$ then $g$ is automorphic to some $h \in G$ if and only if $g = h$. Now, let $g, h \in G \setminus Z(G)$ and $v,w$ be their respective images in $V$. If $g$ and $h$ are of the same order then $q(v) = q(w)$. By Lemma \ref{Witt-and-Orbit}, $v$ and $w$ are in the same $\psi$-orbit. Now, by Proposition \ref{if-isometric-then-automorphic}, $g$ and $h$ are automorphic. $(ii)$. It follows from $(i)$ that there are two central orbits. If $G \cong Q_2$ then all elements of $G \setminus Z(G)$ are of order $4$, hence these are in the same orbit by part $(i)$. If $G \ncong Q_2$ then $G \setminus Z(G)$ contains elements of order $2$ and $4$. Thus, by part $(i)$, there are two noncentral orbits in this case. \end{proof} \subsection{Case $p \neq 2$} Let $G$ be an extraspecial $p$-group and $(B,T)$ be the pair consisting of an alternating bilinear form $B:V \times V \to GF(p)$ and a linear map $T : V \to GF(p)$ that is associated to $G$. If ${\rm exp}(G) = p$ then $T = 0$. \begin{lemma}\label{Witt-and-Orbit-Odd-p} Let $G$ be the extraspecial $p$-group with ${\rm exp}(G) = p$. Let $V, Q, \psi$ be as in the beginning of this section. Then the action $\psi$ is transitive on $V \setminus \{0\}$. \end{lemma} \begin{proof} The lemma follows from the transitivity of $\Sp(V)$ action on $V \setminus \{0\}$ (see \cite[Theorem 3.3]{Wilson-Book}), and the fact that in odd characteristic, $Q = \Sp(V)$ for ${\rm exp}(G) = p$ case. \end{proof} \begin{theorem}\label{aut-components-for-char-p-exp-p} Let $G$ be the extraspecial $p$-group with ${\rm exp}(G) = p$. \begin{enumerate}[(i).] \item Two elements $g, h \in G$ are automorphic if and only if the following holds: (a). $g$ and $h$ have same orders, and (b). $g \in Z(G)$ iff $h \in Z(G)$. \item The natural ${\rm Aut}(G)$ action on $G$ has three orbits. \end{enumerate} \end{theorem} \begin{proof} $(i)$. By Theorem \ref{Winter-Theorem}$(i)$ , it is clear that if $g, h \in Z(G) \setminus \{1\}$ then $g$ and $h$ are automorphic. Now, let $g, h \in G \setminus Z(G)$ and $v,w$ be their respective images in $V$. By Lemma \ref{Witt-and-Orbit-Odd-p}, $v$ and $w$ are in the same $\psi$-orbit. Now, by Proposition \ref{if-isometric-then-automorphic}, $g$ and $h$ are automorphic. $(ii)$. From $(i)$ it follows that there are two central orbits. Since all elements of $G \setminus Z(G)$ have the same order $p$, they are in the same orbit. \end{proof} We now turn our attention to the case of extraspecial $p$-groups $G$ with ${\rm exp}(G) = p^2$, where $p$ is an odd prime. Let $B: V \times V \to S$ be the alternating nondegenerate bilinear form and $T : V \to S$ be the linear map associated to $T$, as in \S\ref{preliminaries}. Then, $V$ has a basis $\mathcal B = \{v_1, w_1, v_2, w_2, \cdots, v_n, w_n\}$ such that $B(v_i, w_i) = 1$ for $1 \leq i \leq n$, and, $B(v_i, w_j) = B(v_i, v_j) = B(w_i, w_j) = 0$ for $i \neq j$, $T(v_1) = 1$ and $T(u) = 0$ for $u \in \mathcal B \setminus \{v_1\}$ (see \cite[Prop. 2.5]{Dilpreet2019}). We refer to such a basis as a \emph{special symplectic basis} for $B$. \begin{lemma}\label{Witt-and-Orbit-Odd-p-minus} Let $G$ be the extraspecial-$p$ group with ${\rm exp}(G) = p^2$. Let $V, Q, \psi$ be as in the beginning of this section. Let $\mathcal B = \{v_1, w_1, v_2, w_2, \cdots, v_n, w_n\}$ be a special symplectic basis for $B$. \begin{enumerate}[(i).] \item Let $v,w \in V \setminus \{0\}$ be two distinct vectors. Then, ${\rm orbit}_{\psi}(v) = {\rm orbit}_{\psi}(w)$ if $T(v)=T(w)$ and either $v,w \notin {\rm ker}(T)$ or $v,w \notin {\rm span}(w_1)$. \item If $|G| = p^3$, the action $\psi$ has exactly $2p-2$ nonzero distinct orbits. These are represented by the elements of the form $av_1, bw_1$, where $a,b \in GF(p) \setminus \{0\}$. \item If $|G| > p^3$, the action $\psi$ has exactly $2p-1$ nonzero distinct orbits. These are represented the elements of the form $av_1, bw_1, v_2$, where $a,b \in GF(p) \setminus \{0\}$. \end{enumerate} \end{lemma} \begin{proof} We first prove $(i)$. We claim that there exists $v' \in {\rm orbit}_{\psi}(v)$ such that $v'$ is of the form $a_1v_1+b_1w_1+a_2v_2$, where $a_2 \in \{0, 1\} \subseteq GF(p)$. To see this, let $U := {\rm span}(\mathcal B \setminus \{v_1, w_1\})$. The restriction of $T$ to $U$ is the zero map and the restriction of $B$ to $U \times U$ is a nondegenerate alternating bilinear form. Let $p_U:V \to U$ be the natural projection by suppressing $v_1$ and $w_1$. If $p_U(v) = 0$ then the claim holds with $a_2 = 0$. If $p_U(v) \neq 0$, then by the transitivity of $\Sp(U)$ action on $U \setminus \{0\}$ (see \cite[Theorem 3.3]{Wilson-Book}), there exists $f \in \Sp(U)$ such that $f(p_U(v)) = v_2$. We extend $f$ to $f' \in \Sp(V)$ by defining $f'(v_1) = v_1$ and $f'(w_1) = w_1$. Then $v' := f'(v) \in {\rm orbit}_{\psi}(v)$ is of the form $a_1v_1 + b_1w_1 + v_2$. We use the same argument to assert that there exists $w' \in {\rm orbit}_{\psi}(v)$ such that $w'$ is of the form $c_1v_1 + d_1w_1 + c_2v_2$, where $c_2 \in \{0, 1\} \subseteq GF(p)$. Thus, to start with, we assume that $p_U(v)$ and $p_U(w)$ are either $0$ or $v_2$. Further, by the hypothesis $T(v) = T(w)$ we conclude that $a_1 = c_1$. Now, let us consider the two non-disjoint cases. \noindent {\bfseries Case 1}. $v,w \notin {\rm ker}(T)$. In this case we have $a_1\ne 0$. If $a_2=0$, then we define an isometry $f_1$ of $V$ whose matrix with respect to the basis $\mathcal B$ is $$\left( \begin{matrix} 1 & 0 & 0 & \dots & 0 \\ \alpha_1 & 1 & 0 & \dots & 0 \\ 0 & 0 & 1 & \dots & 0 \\ \hdotsfor{5} \\ 0 & 0 & 0 & \dots & 1 \end{matrix}\right). $$ Here $\alpha_1 \in GF(p)$ is such that ${a_1}\alpha_1 \equiv b_1 \mod p$. It is easy to check that $f_1 \in Q$ and $f_1(a_1v_1)=a_1v_1+b_1w_1=v$. Thus, $v$ and $a_1v_1$ are in the same $\psi$-orbit. If $a_2 =1$ then we define an isometry $f_2$ of $V$ whose matrix with respect to the basis $\mathcal B$ is $$\left( \begin{matrix} 1 & 0 & 0 & 0 & \dots & 0 & 0 \\ 0 & 1 & 0 & -1 & \dots & 0 & 0 \\ \beta_1 & 0 & \beta_1 & 0 & \dots & 0 & 0 \\ 0 & 0 & 0 & a_1 & \dots & 0 & 0 \\ \hdotsfor{7} \\ 0 & 0 & 0 & 0 & \dots & 1 & 0 \\ 0 & 0 & 0 & 0 & \dots & 0 & 1 \end{matrix}\right). $$ Here $\beta_1$ is such that ${a_1}\beta_1 \equiv 1 \mod p$. Again, it is easy to check that $f_2 \in Q$ and $f_1(f_2(a_1v_1))=f_1(a_1v_1+v_2)=a_1v_1+b_1w_1+v_2$. Since $a_2\in \{0,1\}$, we conclude that $v$ and $a_1v_1$ are in the same $\psi$-orbit in this case. Replacing $v$ by $w$ in the above argument we conclude that $w$ and $a_1v_1$ are in the same $\psi$-orbit. Thus ${\rm orbit}_{\psi}(v) = {\rm orbit}_{\psi}(w)$. \\ \noindent{\bfseries Case 2}. $v,w \notin {\rm span}(w_1)$. The case $1$ allows us to assume that $v,w \in {\rm ker}(T)$. Thus, $a_1 = c_1 = 0$. Further, since $v,w \notin {\rm span}(w_1)$, we have $a_2 = c_2 = 1$. We define an isometry $f_3$ of $V$ whose matrix with respect to the basis $\mathcal B$ is $$\left( \begin{matrix} 1 & 0 & 0 & 0 & 0 & \dots & 0 \\ 0 & 1 & b_1 & 0 & 0 & \dots & 0 \\ 0 & 0 & 1 & 0 & 0 & \dots & 0 \\ b_1 & 0 & 0 & 1 & 0 & \dots & 0 \\ 0 & 0 & 0 & 0 & 1 & \dots & 0 \\ \hdotsfor{5} \\ 0 & 0 & 0 & 0 & 0 & \dots & 1 \end{matrix}\right). $$ Again, $f_3 \in Q$ and $f_3(v_2)=b_1w_1+v_2=v$. Similarly, $w$ and $v_2$ are in the same $\psi$-orbit. Thus ${\rm orbit}_{\psi}(v) = {\rm orbit}_{\psi}(w)$. Now we prove $(ii)$ and $(iii)$. Let $v \in V \setminus\{0\}$. As in the proof of $(i)$, we may assume that $v = a_1v_1 + b_1w_1 + a_2 v_2$. If $v\notin {\rm ker}(T)$ then, again by part $(i)$, $v\in {\rm orbit}_{\psi}(a_1v_1)$. Since $T\circ f=T,\forall f\in Q$ and $T(\alpha v_1)\neq T(\beta v_1)$ if $\alpha \neq \beta$, the orbits ${\rm orbit}_{\psi}(a_1v_1), a_1\in GF(p)\setminus \{0\}$ are all distinct. If $v \in {\rm ker}(T)$, then $a_1 = 0$. Hence, $v = b_1w_1 + a_2 v_2$. If $a_2 = 0$, then $v= b_1w_1$. By \cite[(4A), p. 164]{Winter_1972}, we have $f(w_1) = w_1, \forall f\in Q$. Thus the orbits ${\rm orbit}_{\psi}(b_1w_1)$ are all singleton. If $a_2 \neq 0$ then $v = b_1w_1 + a_2v_2 \notin {\rm span}(w_1)$ and $|G| > p^3$. In this case by part $(i)$, $v \in {\rm orbit}_{\psi}(v_2)$. Since, $0 = T(v_2) \neq T(a_1v_1) = a_1$ for $a_1 \neq 0$, the orbit ${\rm orbit}_{\psi}(v_2)$ is distinct from the orbits ${\rm orbit}_{\psi}(a_1v_1)$. Thus, the orbits of $\psi$ are as asserted in $(ii)$ and $(iii)$. \end{proof} \begin{theorem}\label{aut-components-for-char-p-exp-p-square} Let $G$ be the extraspecial $p$-group with ${\rm exp}(G)=p^2$. \begin{enumerate}[(i).] \item Let $V, B, T, \psi$ be as in lemma \ref{Witt-and-Orbit-Odd-p-minus} and $\mathcal B = \{v_1, w_1, v_2, w_2, \cdots, v_n, w_n\}$ be the special symplectic basis for $B$. Let $g,h \in G$ be such that $gZ(G), hZ(G) \notin {\rm span}(w_1)\setminus\{0\} \subseteq V$. Two elements $g, h \in G$ are automorphic if and only if the following holds: (a). $g$ and $h$ have same orders, and (b). $g \in Z(G)$ iff $h \in Z(G)$. \item Let $n$ be the number of orbits of natural ${\rm Aut}(G)$ action on $G$. Then, $$ n = \begin{cases} p+2, \quad \text{if } |G| = p^3 \\ p+3, \quad \text{if } |G| > p^3 \end{cases} $$ \end{enumerate} \end{theorem} \begin{proof} $(i)$. Let $g,h \in G$ be the elements of the same order which are either both central or both noncentral. By Theorem \ref{Winter-Theorem}$(i)$ , it is clear that if $g, h \in Z(G)$ then $g$ and $h$ are automorphic. Now suppose that $g, h \in G \setminus Z(G)$. Let $v, w$ be their respective images in $V$. Since $g$ and $h$ have same orders, $v \in {\rm ker}(T)$ iff $w\in {\rm ker}(T)$. Suppose $v,w \in {\rm ker}(T)$. As $v, w \notin {\rm span}(w_1)$, we conclude from Lemma \ref{Witt-and-Orbit-Odd-p-minus}$(i)$ that $v$ and $w$ are in the same $\psi$-orbit. Thus, by Proposition \ref{if-isometric-then-automorphic}, $g$ and $h$ are automorphic. Suppose $v,w \notin {\rm ker}(T)$. Then $T(v) = T(\alpha v_1) = \alpha$ and $T(w) = T(\beta v_1) = \beta$ for some nonzero $\alpha, \beta \in GF(p)$. As $v, w \notin {\rm span}(w_1)$, from Lemma \ref{Witt-and-Orbit-Odd-p-minus}$(i)$, $v$ and $\alpha v_1$ are in the same orbit, and $w$ and $\beta v_1$ are in the same $\psi$-orbit. If $v_1 = g_1 Z(G) \in V$, then by Proposition \ref{if-isometric-then-automorphic}, $g$ and $g_1^{\alpha}$ are automorphic. Similarly, $h$ and $g_1^{\beta}$ are automorphic. Now, by \cite[(3B), p. 161]{Winter_1972}, $g_1^{\alpha}$ and $g_1^{\beta}$ are automorphic. This shows that $g$ and $h$ are automorphic. $(ii)$. By Theorem \ref{Winter-Theorem}$(i)$, the ${\rm Aut}(G)$ action has two central orbits. Let $g \in G\setminus Z(G)$ be such that $gZ(G) = w_1$. By \cite[Corollary 1]{Winter_1972}, $gZ(G)$ is an ${\rm Aut}(G)$-invariant subset. Let $\varphi \in {\rm Aut}(G)$. Then, $\varphi(g)=gh$ for some $h \in Z(G)$, and for each $\alpha \in GF(p) \setminus \{0\}$, $\alpha w_1 = g^{\alpha}Z(G) \in V$. If $z \in Z(G)$ then $\varphi (g^{\alpha}z) = g^{\alpha}{h}^{\alpha}\varphi(z) \in g^{\alpha} Z(G)$. Thus, for each $\alpha \in GF(p)\setminus \{0\}$, $\alpha w_1 \in V$ corresponds to a noncentral ${\rm Aut}(G)$-invariant subset of $G$. By Lemma \ref{conjugacy-classes-of-extraspecial-p}, this ${\rm Aut}(G)$-invariant subset is an orbit of ${\rm Aut}(G)$ action. If $|G|=p^3$ then, by part $(i)$, the elements $g$ in $G \setminus Z(G)$ such that $gZ(G) \notin {\rm span}(w_1) \subseteq V$ are in the same ${\rm Aut}(G)$ orbit. Thus, the total number of ${\rm Aut}(G)$ orbits in this case is $2$ (central orbits) + $p-1$ (corresponding to each $\alpha w_1$) + $1$ (corresponding to $gZ(G) \notin {\rm span}(w_1$)) = $p+2$. If $|G| > p^3$ then, by part $(i)$, the elements $g$ in $G \setminus Z(G)$ such that $gZ(G) \notin {\rm span}(w_1) \subseteq V$ split into two ${\rm Aut}(G)$ orbits. Thus, the total number of ${\rm Aut}(G)$ orbits in this case is $2$ (central orbits) + $p-1$ (corresponding to each $\alpha w_1$) + $2$ (corresponding to $gZ(G) \notin {\rm span}(w_1$)) = $p+3$. \end{proof} Now onward, $p$ denotes a prime without a restriction of being even or odd. The following theorem gives a complete description of word images for extraspecial-$p$ groups. \begin{theorem}\label{three-word-images} Let $G$ be an extraspecial-$p$ group. Then, the only word images of $G$ are $\{1\}$, $Z(G)$ and $G$. \end{theorem} \begin{proof} The images $\{1\}$ and $G$ are the obvious ones. Suppose $w : G^{(d)} \rightarrow G$ is a word map with $w(G) \neq \{1\}, G$. There are indeed such words, {\it e.g.} the commutator word $w = xyx^{-1}y^{-1}$. By Lemma \ref{if-nonsurjective-then-in-Frattini}, $w(G) \subseteq \Phi(G) = Z(G)$. The last equality holds because $G$ is an extraspecial-$p$ group. Since $w(G)$ is invariant under automorphisms and, by Theorem \ref{Winter-Theorem}$(i)$ , all elements of $Z(G) \setminus \{1\}$ are automorphic, $w(G) = Z(G)$. \end{proof} \begin{theorem}\label{counting-impostors-in-extraspecials} Let $G$ be an extraspecial-$p$ group and $i_G$ be the number of word image impostors in $G$. \begin{enumerate}[(i).] \item If $p = 2$ then $$i_G = \begin{cases} 1, \quad \text{if } G\cong Q_2 \\ 5, \quad \text{if } G\ncong Q_2 \end{cases} $$ \item If $p \neq 2$ then $$i_G = \begin{cases} 1, ~\quad \quad \quad \quad \text{if } ${\rm exp}(G) = p$ \\ 2^{p+1}-3, \quad \text{if } {\rm exp}(G) = p^2 \text{ and } |G| = p^3 \\ 2^{p+2}-3, \quad \text{if } {\rm exp}(G) = p^2 \text{ and } |G| > p^3 \\ \end{cases} $$ \end{enumerate} \end{theorem} \begin{proof} If the number of orbits of $G$ under the natural ${\rm Aut}(G)$ action is $n$, then the number of word image candidates is $2^{n-1}$. By Theorem \ref{three-word-images}, only $3$ of these are word images. Thus $i_G = 2^{n-1} -3$. The result follows directly by substituting the value of $n$ from Theorem \ref{aut-components-for-char-2}, Theorem \ref{aut-components-for-char-p-exp-p} and Theorem \ref{aut-components-for-char-p-exp-p-square}. \end{proof} It is evident from above theorem that if $p$ is odd and ${\rm exp}(G) = p^2$ then the number of word image impostors grows exponentially with $p$. \section{Detecting special $p$-groups through word images} \label{special-p-using-word-images} We end this article with an observation on the converse of the Theorem \ref{three-word-images}. The converse is not true. The direct products of extraspecial $p$-groups with themselves are counterexamples. Interestingly, a weaker form of the converse is true. To formulate it we first record a lemma. \begin{lemma}{\label{word-image}} The commutator subgroup of a group $G$ is always a word image. Moreover, if $G$ is a $p$-group then $\Phi(G)$ is also a word image. \end{lemma} \begin{proof} Let $g \in [G,G]$. Let $\ell_g$ be the minimal number of commutators required to write $g$ as a product of commutators. Let $l_G=\underset{g\in [G,G]}\max(l_g)$. Then, for $w = [x_1, x_2] \cdots [x_{2\ell_G-1}, x_{2\ell_G}] \in \mathbb{F}_{2\ell_G}$, we indeed have $w(G) = [G,G]$. Now, if $G$ is a $p$-group then $\Phi(G)=G^p[G,G] = w'(G)$, where $w' = x_{2\ell_G+1}^p w$. Thus $\Phi(G)$ is a word image. \end{proof} \begin{theorem}\label{special-through-word-images} Let $G$ be a nonabelian finite group such that the only word images of $G$ are $\{1\}$, $Z(G)$ and $G$. Then, $G$ is a special $p$-group for some prime $p$. \end{theorem} \begin{proof} We first observe that $\{1\}, Z(G)$ and $G$ are distinct. Since $G$ is nonabelian, $Z(G) \neq G$. If $Z(G) = \{1\}$ then, by hypothesis, $G$ does not have a nontrivial proper word image. Denote the $n^{\rm th}$ power word $x^n$ by $w_n$. If $p$ is a prime divisor of ${\rm exp}(G)$ then $w_p(G)$ is a proper subset of $G$. Thus $w_p(G) = \{1\}$. This shows that ${\rm exp}(G) = p$ and $G$ is a $p$-group. This is a contradiction to $Z(G) = \{1\}$. We now show that, under the hypothesis of the theorem, $G$ is a $p$-group. If we assume the contrary, then there exist distinct primes $p$ and $p'$ dividing $\exp(G)$. Since the word images $w_p(G)$ and $w_{p'}(G)$ are proper, $w_p(G)=w_{p'}(G)=Z(G)$. Consequently, for every $g \in G$ there exists $h \in G$ such that $g^p = h^{p'}$. Thus, for all $g \in G$, $$g^{p({\rm exp}(G)/p')} = h^{p'({\rm exp}(G)/p')} = h^{{\rm exp}(G)}=1$$ and ${\rm exp}(G)$ divides $p({\rm exp}(G)/p')$. This holds iff $p = p'$. This is a contradiction. We now show that ${\rm exp}(G)=p$ or $p^2$. If ${\rm exp}(G)>p^2$, then by the hypothesis, $w_p(G)=w_{p^2}(G)=Z(G)$. Along the lines of the argument in the previous paragraph, it is easy to arrive at the contradiction that ${\rm exp}(G)$ divides ${\rm exp}(G)/p$. By Lemma \ref{word-image}, $[G,G], \Phi (G)$ are nontrivial proper word images and by the hypothesis $Z(G)$ is a nontrivial proper word image. Thus, $[G,G] = \Phi (G) = Z(G)$. Thus far we have shown that $G$ is a $p$-group of exponent at most $p^2$ with $[G,G] = \Phi (G) = Z(G)$. To prove that $G$ is special $p$-group it remains to be shown that ${\rm exp}(\Phi(G)) = p$. If ${\rm exp}(G) = p$, then ${\rm exp}(\Phi(G)) = p$ is a triviality. If ${\rm exp}(G) = p^2$, then $w_p(G), \Phi (G)$, $Z(G)$ are nontrivial proper word images, and hence these are equal. Thus $w_p(\Phi(G)) = w_p(w_p(G)) = w_{p^2}(G) = \{1\}$. This show that ${\rm exp}(\Phi(G)) = p$ and we conclude that $G$ is a special $p$-group. \end{proof} \bibliographystyle{amsalpha} \bibliography{word-maps} \end{document}
2205.15362v2
http://arxiv.org/abs/2205.15362v2
Viscosity solutions for nonlocal equations with space-dependent operators
\documentclass[10pt]{amsart} \usepackage{amsmath,amsthm,amsopn,amsfonts,amssymb,color} \usepackage[colorlinks=true,linkcolor=blue,urlcolor=blue]{hyperref} \usepackage{cancel} \usepackage{graphicx} \usepackage{float} \usepackage{amsmath} \usepackage{fancyhdr} \usepackage{epstopdf} \usepackage{amsfonts} \usepackage[T1]{fontenc} \textwidth 18cm \oddsidemargin 5pt \evensidemargin 5pt \textheight21.5cm \parskip1mm \usepackage[normalem]{ulem} \usepackage{booktabs} \usepackage[USenglish]{babel} \usepackage{cancel} \usepackage{times} \usepackage{mathrsfs} \usepackage{multirow,multicol} \usepackage{amsfonts} \usepackage{amsthm} \usepackage{amsmath} \newtheorem{teo}{Theorem}[section] \newtheorem{prop}[teo]{Proposition} \newtheorem{defin}[teo]{Definition} \newtheorem{remark}[teo]{Remark} \newtheorem{cor}[teo]{Corollary} \newtheorem{lemma}[teo]{Lemma} \newtheorem{com}[teo]{Comment} \newtheorem{Conjecture}[teo]{Conjecture} \newcommand{\mres}{\mathbin{\vrule height 1.6ex depth 0pt width 0.13ex\vrule height 0.13ex depth 0pt width 1.3ex}} \DeclareMathOperator*{\esssup}{ess\sup} \newcommand{\linf}{L^{\mathcal {1}}(\Omega)} \newcommand{\infi}{\mathcal{1}} \newcommand{\unof}{\dot{\phi}} \newcommand{\duef}{\ddot{\phi}} \newcommand{\tref}{\dddot{\phi}} \newcommand\rn{\mathbb{R}^{N}} \newcommand\N{\mathbb{N}} \newcommand{\inn}{\mbox{ in }} \newcommand{\onn}{\mbox{ on }} \renewcommand{\O }{\Omega } \newcommand\R{\mathbb{R}} \def\ioo#1{\int_{\{| w_n|>{#1}\}}} \def\ik{\int_{|u_n|>k}} \def\adix#1#2{a(x, #1,#2 )} \def\elle#1{L^{#1}(\Omega)} \def\emme#1{M^{#1}(\Omega)} \def\lor#1#2{L^{#1,#2}(\Omega)} \def\lorr#1#2{\mathbb{L}^{#1,#2}(\Omega)} \def\diver{{\rm div}} \def\lio{L^{\infty}(\Omega)} \def\w{H_0^{1}(\Omega)} \def\io{\int_{\Omega}} \def\norma#1#2{\|#1\|_{\lower 4pt \hbox{$\scriptstyle #2$}}} \def\un{u_n} \def\en{\ep\tau_n} \def\um{u_m} \def\D{\nabla} \def\sign{{\rm sign}\,} \def\vp{\varphi} \def\omegat{\tilde{\omega}} \def\Omegat{\tilde{\Omega}} \def\van{\varphi_n} \def\gn{\gamma_n} \def\gtn{\tilde{\gamma}_n} \def\an{\alpha_n} \def\vep{\eps} \def\ve{v_{\eps}} \def\we{w_{\eps}} \def\pn{p_n} \def\qn{q_n} \def\bn{\beta_n} \def\tn{\theta_n} nedimo{\mbox{\ \rule{.1in}{.1in}}} nedim \def\gw{G_{\tilde{k}}(w_n)} \def\wn{w_n} \def\zn{z_n} \def\fn{f_n} \def\Tk{T_k} \def\Gk{G_k} \def\R{I \!\!R} \def\N{I \!\! N} \def\elle#1{L^{#1}(\Omega)} \def\emme#1{M^{#1}(\Omega)} \def\w{W_0^{1,2}(\Omega)} \def\wq{W_0^{1,q}(\Omega)} \def\wp{W_0^{1,p}(\Omega)} \def\w1{W_0^{1,1}(\Omega)} \def\misure{{ cal M}_b (\Omega)} \def\duale{W^{-1,p'}(\Omega)} \def\sn{\tau_n} \def\eps{\varepsilon} \def\lio{L^{\infty} (\Omega)} \def\lip{W^{1,\infty} (\Omega)} \def\lipo{W^{1,\infty}_0 (\Omega)} \def\dys{\displaystyle} \def\ss#1{\sigma_{#1}} \def\sh#1{\sigma_{h,#1}} \def\Lp{\mathcal{L}^+ } \def\Lm{\mathcal{L}^- } \def\w{H_0^{1}(\Omega)} \def\ie{\int\limits_{E}} \def\be{\begin{equation}} \def\ee{\end{equation}} \def\bc{\begin{cases}} \def\ec{\end{cases}} \def\dn{d_n} \def\vn{v_n} \def\zn{z_n} \def\be{\begin{equation}} \def\ee{\end{equation}} \oddsidemargin=.2in \evensidemargin=.2in \textheight = 617pt \textwidth 15.7cm \usepackage{color} \numberwithin{equation}{section} \newcommand{\UUU}{\color{blue}} \newcommand{\RRR}{\color{red}} \newcommand{\MMM}{\color{magenta}} \newcommand{\EEE}{\color{black}} \newcommand{\disp}{\displaystyle} \newcommand{\USC}{\text{\rm USC}} \newcommand{\LSC}{\text{\rm LSC}} \newcommand{\Rz}{\mathbb{R}} \usepackage{color} \numberwithin{equation}{section} \usepackage{todonotes} \newcommand{\comment}[1]{\todo[size=\small, color=orange!35]{#1}} \newcommand{\commentline}[1]{\todo[inline, size=\small, color=orange!35]{#1}} \title[Viscosity solutions for nonlocal equations]{ Viscosity solutions for nonlocal equations \\with space-dependent operators} \author[S. Buccheri] {Stefano Buccheri} \address[Stefano Buccheri]{Faculty of Mathematics, University of Vienna, Oskar-Morgenstern-Platz 1, A-1090 Vienna, Austria} \email{[email protected]} \author[U. Stefanelli] {Ulisse Stefanelli} \address[Ulisse Stefanelli]{Faculty of Mathematics, University of Vienna, Oskar-Morgenstern-Platz 1, A-1090 Vienna, Austria,\,\& Vienna Research Platform on Accelerating Photoreaction Discovery, University of Vienna, W\"ahringerstrasse 17, A-1090 Vienna, Austria,\,\& Istituto di Matematica Applicata e Tecnologie Informatiche E. Magenes, via Ferrata 1, I-27100 Pavia, Italy. } \email{[email protected]} \subjclass[2010]{} \keywords{Fractional laplacian, Perron method, principal eigenvale, refiend maximum principle, half-relaxed limit\EEE, long-time behavior} \begin{document} \begin{abstract} We consider a class of elliptic and parabolic problems, featuring a specific nonlocal operator of fractional-laplacian type, where integration is taken on variable domains. Both elliptic and parabolic problems are proved to be uniquely solvable in the viscosity sense. Moreover, some spectral properties of the elliptic operator are investigated, proving existence and simplicity of the first eigenvalue. Eventually, parabolic solutions are proven to converge to the corresponding limiting elliptic solution in the long-time limit. \end{abstract} \maketitle \section{Introduction} The study of PDE problems driven by nonlocal operators is attracting an ever growing attention. This is in part motivated by the great relevance of such operators in applications, among which L\'evy processes, differential games, and image processing, just to mention a few. The paramount example of nonlocal operator is the {\it fractional laplacian}, which can be defined in the following principal-value sense \be\label{def} (-\Delta)_ {s} u (x) = p.v.\int_{\mathbb{R}^N}\frac{u(x)-u(y)}{|x-y|^{N+2s}}dy \quad \text{ for} \ s\in(0,1). \ee In this paper we focus on elliptic and parabolic problems featuring a localized version of the classical fractional-laplacian operator, namely, \be\label{10-6bis0} u(x)\mapsto p.v. \int_{ \Omega(x)} \frac{u(x)-u(y)}{|x-y|^{N+2s}} dy. \ee In contrast with the classical fractional laplacian, the main feature in \eqref{10-6bis0} is that integration is taken with respect to a $x$-dependent bounded set $\Omega(x)$. Such a modification is inspired by the analysis of the hydrodynamic limit of kinetic equations \cite{pedro} and of peridynamics \cite{Silling00}. We comment on these connection in Subsection \ref{sec:connections} below. In order to specify our setting further, let us recall that the homogeneous Dirichlet problem associated with the classical fractional laplacian $(-\Delta)_ {s}$\eqref{def} in a bounded set $\Omega\subset \mathbb{R}^N$ requires to prescribe the nonlocal boundary condition $u=0$ on $\mathbb{R}^N\setminus \Omega$. Under such condition the fractional laplacian can be written as \begin{align}\label{decomposition} &(-\Delta)_ {s} u (x) = p.v.\int_{\mathbb{R}^N}\frac{u(x)-u(y)\chi_{\Omega}}{|x-y|^{N+2s}}dy=p.v.\int_{\Omega}\frac{u(x)-u(y)}{|x-y|^{N+2s}}dy+u(x)\int_{\mathbb{R}^N\setminus\Omega}\frac{1}{|x-y|^{N+2s}}dy \\ &\quad =:(-\Delta)^{\Omega}_ {s} u (x) +k(x) u(x)\nonumber, \end{align} where $\chi_\Omega$ indicates the characteristic function of $\Omega$. The nonlocal operator $(-\Delta)^{\Omega}_ {s}$ is usually called {\it regional fractional laplacian}. By indicating with $d(x)$ the distance of $x\in \Omega$ from the boundary $\partial \Omega$, under mild regularity assumption on $\partial\Omega$ the function $k(x)$ can be proved to satisfy \begin{equation}\label{killing} \frac{\alpha}{d(x)^{2s}}\le k(x)\le \frac{\beta}{d(x)^{2s}}, \end{equation} for some $0<\alpha<\beta$. We provide a proof of this property in Lemma \ref{hbeha}. Differently from the classical fractional laplacian \eqref{def}, the regional laplacian $(-\Delta)^{\Omega}_ {s}$ acts on functions defined in $\Omega$. Correspondingly, boundary conditions for the homogeneous Dirichlet problem for $(-\Delta)^{\Omega}_ {s}$ can be directly prescribed on $\partial\Omega$. The two operators $(-\Delta)_ {s}$ and $(-\Delta)^{\Omega}_ {s}$ differ especially in the vicinity of the boundary, as one can expect looking at \eqref{decomposition}. the boundary. While for any $s\in(0,1)$ the solution of the homogeneous Dirichlet problem associated with \eqref{def} behaves as $d(x)^s$ as $x$ approaches $\partial \Omega$, the one associated to $(-\Delta)^{\Omega}_ {s}$ goes to zero as $d(x)^{2s-1}$ for $s\in(1/2,1)$. In fact, for $s\in(0,1/2]$, the Dirichlet problem associated to the regional laplacian is not well-defined, independently of the regularity of $\partial \Omega$. This can be explained via trace theory (the trace operator exists only if $s>1/2$, see \cite{tar}, \cite{fall}), or in term of the probabilistic process associated to $(-\Delta)^{\Omega}_ {s}$ (such a process reaches the boundary only if $s>1/2$, see for instance \cite{bog}). Roughly speaking, we can say that the term $k(x) u(x)$ regularizes the operator $(-\Delta)^{\Omega}_ {s}$ close to the boundary, by forcing a quantified convergence of solution to zero on approaching $\partial \Omega$. The reader can find more on the relation between $(-\Delta)_ {s}$ and $(-\Delta)^{\Omega}_{s}$ in the recent survey \cite{alot} and in the references therein. Inspired by position \eqref{10-6bis0}, by decomposition \eqref{decomposition}, and by property \eqref{killing}, we aim at considering more general nonlocal operators of the following form \be\label{10-6} h(x) u (x)+\mathcal{L}_{s} (\Omega(x),u (x)), \ee where $\mathcal{L}_{s} (\Omega(x) ,u (x_0))$ is defined as \be\label{10-6bis} \mathcal{L}_{s} (\Omega(x),u (x_0))=p.v. \int_{y\in \Omega(x)} \frac{u(x_0)-u(y)}{|x-y|^{N+2s}} dy. \EEE \ee In the following, we often use the change of coordinates $z=x-y$. In this new reference frame we will write \be \label{omegatxb} \Omegat(x)=\{z\in\mathbb{R}^N \ : \ x-z\in \Omega(x)\}. \ee In \eqref{10-6}, the function $h(x)\in C(\Omega)$ is assumed to be given and to fulfill \begin{equation}\label{alpha} 0< \frac{\alpha}{d(x)^{2s}}\le h(x)\le \frac{\beta}{d(x)^{2s}}, \ \ \ \mbox{with} \ \ \ 0<\alpha\le\beta, \end{equation} and the set-valued function $x\to\Omega(x)\subset\Omega$ is assumed to satisfy \begin{align}\label{continuita} & \quad \forall x \in \Omega:\quad \lim_{y\to x}|\Omega(y)\triangle\Omega(x)|=0,\\ & \quad \exists \ \zeta\in \big(0,1/2\big) , \ \forall x \in \Omega :\quad \Omegat(x)\cap B_{r}(0)=\Sigma\cap B_{r}(0) \mbox{ for all } r\le \zeta d(x),\label{ostationary} \end{align} for some given open set $\Sigma$ such that \be\label{Sigmadef} \Sigma=-\Sigma \quad \mbox{and} \quad \left|\Sigma\cap \big(B_{2r}(0)\setminus B_r(0)\big)\right|\ge q|B_{2r}(0)\setminus B_r(0)| \mbox{ for any } r>0, \ee with $q\in(0,1)$. Note that the symmetry of $\Sigma$ is required for the well-definiteness of the operator on smooth functions and that assumption \eqref{continuita} ensure that operator $\mathcal{L}_{s}$ from \eqref{10-6bis} is continuous with respect to his arguments. For a more extensive discussion on the hypothesis look at Section \ref{existingliterature}. Our first goal is to show that the {\it elliptic problem} related to the nonlocal operator \eqref{10-6} given by \begin{equation}\label{10:17} \begin{cases} h(x)u (x)+\mathcal{L}_{s} (\Omega(x),u (x))= f(x) \qquad & \mbox{in } \Omega,\\ u(x) = 0 & \mbox{on } \partial\Omega ,\\ \end{cases} \end{equation} is uniquely solvable, for any $f\in C(\Omega)$ such that \be\label{fcon} \exists \ \eta_f\in (0,2s), C>0 \ : \ |f(x)|d(x)^{2s- \eta_f}\le C. \ee In the current setting it is natural to address problem \eqref{10:17} in the viscosity sense. Indeed, assumptions \eqref{continuita}-\eqref{Sigmadef} imply that the operator \eqref{10-6} satisfies the comparison principle. Moreover the growth conditions \eqref{alpha} and~\eqref{fcon} allow us to build a barrier for problem \eqref{10:17} of the form \be\label{barrierintro} C_\alpha d(x)^{ \eta} \ \ \ \mbox{for some small } \eta>0. \ee Having comparison and barriers at hand, we can implement the Perron method and prove in Theorem \ref{teide} the existence of a unique viscosity solution for \eqref{10:17}. Note that $C_\alpha$ degenerates with $\alpha$. In particular, if the term $h(x)$ degenerates at the boundary, solvability of problem \eqref{10:17} may fail, at least for $s\in(0,1/2]$, see the above discussion. A second aim of our paper is to address the spectral properties of the operator \eqref{10-6}. We focus on the first eigenvalue associated to \eqref{10-6} with homogeneous Dirichlet boundary conditions and we show that there exists a unique $\overline{\lambda}>0$ such that the problem \begin{equation}\label{10:17ter} \begin{cases} h(x)v (x)+\mathcal{L}_{s} (\Omega(x),v (x))= \overline{\lambda} v (x)\qquad & \mbox{in } \Omega,\\ v(x) = 0 & \mbox{on } \partial\Omega,\\ \end{cases} \end{equation} admits a strictly positive viscosity solution. Moreover such a solution is unique, up to multiplication by constants. As our setting in nonvariational, the characterization of such first eigenvalue follows the approach of the seminal work \cite{bere}. More precisely, we define $\overline{\lambda}$ as the supremum of the values $\lambda\in \mathbb{R}$ such that the problem \be\label{refineed} \begin{cases} h(x)u(x)+\mathcal{L}_{s} (\Omega(x),u (x))= \lambda u (x) + f (x)\EEE\qquad & \mbox{in } \Omega,\\ u(x) = 0 & \mbox{on } \partial\Omega\, ,\\ u(x) > 0 & \mbox{in } \Omega\, ,\\ \end{cases} \ee admits a solution, for some given nonnegative and nontrivial $f\in C(\Omega)$ satisfying \eqref{fcon}, see Theorems \ref{mussaka}-\ref{helmholtz}. As we shall see, this characterization does not depend on the particular choice of $f$. Such a definition is slightly different from the usual one (see for instance \cite{biri}, \cite{busca}, \cite{quaas} and \cite{biswas} for the same approach in both local and nonlocal settings), being based on the concept of solution instead of that of supersolution. A further focus of this paper is the study of the evolutionary counterpart of \eqref{10:17}, namely, the {\it parabolic problem} \begin{equation}\label{10:17bis} \begin{cases} \partial_t u (t,x) (x)+h(x)u (x)+\mathcal{L}_{s} (\Omega (t,x),u (t,x))= f(t, x) \qquad & \mbox{in } (0,T)\times\Omega,\\ u(t,x) = 0 & \mbox{on } (0,T)\times\partial\Omega ,\\ u(0,x) = u_0(x) & \mbox{on } \Omega. \end{cases} \end{equation} We prove existence and uniqueness of a global-in time viscosity solution to problem \eqref{10:17bis}. See Theorem \ref{existence} below for the precise statement in a more general setting, where a time-dependent version of assumption \eqref{ostationary} is considered. The behavior of the solution \EEE $u(x,t)$ for large times is then addressed in Theorem \ref{lalaguna}: Taking advantage of the characterization of $\overline{\lambda}$ we prove that for any $\lambda <\overline{\lambda}$ there exists $C_{\lambda}$ such that \[ |u(t,x)-u(x)|\le C_{\lambda} \overline{\varphi} (x) e^{-\lambda t}, \] where $\overline{\varphi}$ is the normalized positive eigenfunction associated to $\overline{\lambda}$, $u(x)$ is the solution of the elliptic problem \eqref{10:17} and $u(t,x)$ solves the parabolic problem \eqref{10:17bis} for the same time-independent forcing $f(x)$. \EEE \subsection{Relation with applications}\label{sec:connections} As mentioned, the specific form of operator $\mathcal{L}_{s}$, in particular the dependence of the integration domain $\Omega(x)$ on $x$, occurs in connection with different applications. A first occurrence of operators of the type of $\mathcal{L}_{s}$ is the study of the hydrodynamic limit of collisional kinetic equations with a heavy-tailed thermodynamic equilibrium. When posed in the whole space, the reference nonlocal operator in the limit is the classical fractional laplacian \eqref{def}, see \cite{melletbis}. The restriction of the dynamics to a bounded domain with a zero inflow condition at the boundary asks for considering \eqref{10-6bis0} instead \cite{pedro}. In this connection, $\Omega(x)$ is defined to be the largest star-shaped set centered at $x\in\Omega$ and contained in $\Omega$. The heuristics for this choice is that a particle centered at $x$ is allowed to move along straight paths and is removed from the system as soon as it reaches the boundary. Hence, the possible interaction range of a particle sitting at $x$ is exactly $\Omega(x)$. In particular, the resulting hydrodynamic limit under homogeneous Dirichlet conditions features the nonlocal functional \be\label{start} a(x)u (x)+ p.v.\int_{\Omega(x)} \frac{u(x)-u(y)}{|x-y|^{N+2s}} dy =: a(x)u (x) + (-\Delta)^\star_s u(x), \ \ \ s\in(0,1). \ee Here, the function $a(x)$ has the following specific form \be\label{15:15} a(x)=\int_{\mathbb{R}^N}\frac{1}{|y|^{N+2s}}e^{-\frac{d(x,\sigma(y))}{|y|}}dy, \ee with $d(x,\sigma(y))$ being the length of the segment joining $x\in \Omega$ with the closest intersection point between $\partial \Omega$ and the ray starting from $x$ with direction $\sigma(y) =\frac{y}{|y|}$. Clearly, if $\Omega$ is convex one has that $\Omega(x)\equiv \Omega$ for all $x$ and the function $a(x)$ coincides, up to a constant, with the function $k(x)$ of \eqref{decomposition} (see Lemma \ref{acca}). In this case we recover exactly (up to a constant) the operator in \eqref{decomposition}. Note however that even in the case of a nonconvex domain $\Omega$, the function $a(x)$ satisfies the condition \eqref{alpha} (see Lemma \ref{hbeha}). A second context where nonlocal operators of the type of \eqref{10-6bis} arise is that of {\it peridynamics} \cite{Silling00}. This is a nonlocal mechanical theory, based on the formulation of equilibrium systems in integral instead of differential terms. Forces acting on the material point $x$ are obtained as a combined effect of interactions with other points in a given neighborhood. This results in an integral featuring a radial weight which modulates the influence of nearby points in terms of their distance \cite{Emmrich,survey}. A reference nonlocal operator in this connection is \be\label{peridyn} u(x) \mapsto p.v.\int_{ B_{\rho}(x)} \frac{u(x)-u(y)}{|x-y|^{N+2s}} dy. \ee Here, $ B_{\rho}(x)$ is the call of radius $\rho>0$ centered at $x$. In particular, the parameter $\rho$ measures the interaction range. Such operators have used to approximate the fractional laplacian in numerical simulations (see \cite{duo} and the reference therein), note also the parametric analysis in \cite{burkovska}. The operator $\mathcal{L}_s$ in \eqref{10-6bis} corresponds hence to a natural generalization of the latter to the case of an interaction range which varies along the body, as could be the case in presence of a combination different material systems. This would correspond to choosing a varying $\rho(x)$. \subsection{Existing literature}\label{existingliterature} To our knowledge, operator \eqref{10-6} has not been studied yet. Despite its simple structure when compared to the general operators usually allowed in the fully nonlinear setting, most of the available tools seem not to directly apply. In this section, we aim at presenting a brief account of the literature in order to put our contribution in context. Following the seminal work \cite{CIL}, the existence theory of viscosity solutions, through comparison principle, barriers, and Perron method, has been generalized to a large class of elliptic and parabolic integral differential equations, see for instance \cite{barimb}, \cite{bci}, \cite{caff}, \cite{cd}, \cite{cdbis} and \cite{jk}. Comparison principles for nonlocal problems in the viscosity setting can be found in \cite{barimb}, see also \cite{jk}. One of the key structural assumptions of these works reads, in our notation, \be\label{bbarles} \int_{\mathbb{R}^N}|\chi_{\Omegat(x)}-\chi_{\Omegat(y)}||z|^{2-N-2s}dz\le c|x-y|^2 \ee for some positive constant $c>0$ (see assumption (35) in \cite{barimb}). This type of condition allows the authors to implement the variable-doubling strategy of \cite{CIL} for a large class of operators of so-called L\'evy-Ito type. This is not expected here, for our set of assumptions does not imply \eqref{bbarles} as the following simple argument shows: let $\Omega$ be the unitary ball centered at the origin and $\Omegat(x)=B_{\rho(x)}$ for any $x\in\Omega$ with $\rho(x)=d(x)^{ 1/(2-2s)}$. Then, for any $x,y\in\Omega$ we get \[ \int_{\mathbb{R}^N}|\chi_{\Omegat(x)}-\chi_{\Omegat(y)}||z|^{2-N-2s}dz=\frac{\omega_N}{2-2s}|\rho(x)^{2-2s}-\rho(y)^{2-2s}|=\frac{\omega_N}{2-2s}|x-y|. \] Our alternative strategy is to assume that the operator is somehow translation invariant \emph{close} to the singularity, with this property degenerating while \EEE approaching $\partial\Omega$, see assumption \eqref{ostationary} above. This allows for some cancellations that bypass the problem of the singularity of the kernel. Instead of doubling variables, we use the inf/sup-convolution technique to prove that sum of viscosity subsolution is still a viscosity subsolution. Eventually, we also quote the interesting comparison result in \cite{gms}. There, the doubling of variable is combined with an optimal-transport argument. Such an approach, however, requires the uniform continuity of solutions, and it is not clear how to adapt it for general viscosity solutions. As far as the construction of barriers is concerned, notice that the typical difficulty is to estimate from below a term of the type \[ \int_{\mathbb{R}^N\setminus\Omega}\frac{1}{|x-y|^{N+2s}}dy, \] which requires some regularity on the boundary of $\Omega$. Of course we refer here to the standard case of the fractional laplacian, but the same idea can be extended to more general operators, see \cite[Lemma 1]{bci}. In our case, since we impose a priori condition \eqref{alpha}, we can actually deal with any open domain, paying the price of a poor control on the decay of the solution close to the boundary, see \eqref{barrierintro}. For an alternative approach to the Perron method, not relying on the comparison principle and therefore produces discontinuous viscosity solutions, we refer the interested reader to \cite{mou}. For a comprehensive overview on the numerous contributions to the regularity theory for viscosity solutions to nonlocal elliptic and parabolic equations, we address the reader to the rather detailed introductions of \cite{schwabsil} and \cite{krs}. Here, we provide a small overview on some results more closely related to our work. The first fully PDE-oriented result about H\"older regularity for elliptic nonlocal operators has been obtained in \cite{silve}. A drawback of this approach is that it does not allow to consider the limit $s\to1$. The first H\"older estimate which is robust enough to pass to the limit as $s\to1$ has been obtained in \cite{caff} (see also \cite{caffbis}) and then generalized to parabolic equation in \cite{cd} and \cite{cdbis}. All these results apply to operators whose kernel is pointwise controlled from above and from below by the one of $(-\Delta)_s$. For results where such pointwise control is not avaliable, we refer again to \cite{schwabsil} and \cite{krs}. More in detail, our condition \eqref{Sigmadef} is a simplified version of assumption (A3) in \cite{schwabsil}. Condition \eqref{Sigmadef} allows us to deduce an interior regularity estimate, in the spirit of the more general \cite[Theorem 4.6]{mou}. \\ As mentioned, we define the first eigenvalue of \eqref{10-6} following the approach in \cite{bere}. The advantage of this approach is that it is independent of the variational structure of the operator by directly relying on the maximum principle, as well as on the existence of positive (super-)solutions. For this reason it has been fruitfully used in the framework of viscosity solution for second order fully nonlinear differential equations, see for instance \cite{biri}, \cite{busca}, and \cite{QS} An early result related to eigenvalues of nonlocal operators with singular kernel is in \cite{bego} where existence issues in presence of lower order terms are tackled. A closer reference is \cite{DQT}, where the principal eigenvalues of some fractional nonlinear equations, with inf/sup structure are studied. In this paper the authors prove, among other results, existence and simplicity of principal eigenvalues together with some isolation property and the antimaximum principle. Other results following the same line of investigation can be found in \cite{quaas} and \cite{biswas}. We point out that the operators considered in these works are just positively homogeneous (i.e. $\mathcal{L}(u)\neq-\mathcal{L}(-u)$), which gives rise to the existence of two principal {\it half-eigenvalues}, namely corresponding to differently signed eigenfunctions. We are not concerned with this phenomenon here. A common tool used in the previous works to prove existence of eingenvalues is a nonlinear version of the Krein-Rutmann theorem for compact operators, see again \cite{quaas} and \cite{biswas} and the references therein. Let us also mention the recent \cite{birga} and \cite{bismod}, that deal with a different kind of fractional operators. To prove existence of the first eigenvalue, we follow a direct approach based on the approximation of problem \eqref{10:17ter}. Unlike the previously quoted papers, we do not resort to a global regularity result to deduce either the compactness of the operator or the uniform convergence of approximating solutions. Instead, we combine the so-called {\it half-relaxed-limit} method, a version of the {\it refined maximum principle} and interior regularity result of \cite{schwabsil,mou}. The half-relaxed limit method is a powerful tool to pass to the limit with no other regularity then uniform boundedness, see for instance \cite{barles} and the references therein. In general, the price to allow such generality is to handle discontinuous viscosity solutions. We however avoind this, for we are able to prove a refined maximum principle for \eqref{refineed} in the spirit of \cite{bere}. This, in turns, provides a comparison between sub and super solutions, eventually ensuring \EEE continuity. Due to particular structure of our nonlocal operator, a key ingredient in our proof is a restriction procedure for \eqref{10-6bis} in subdomains of $\Omega$, which is where the density assumption \eqref{Sigmadef} is needed. Heuristically speaking, such assumption forces the operator to \emph{look the same at every scale}, as for the kernel singularity and the behavior \eqref{alpha} of the term $h$ (see Lemma \ref{localization}). Eventually, our refined maximum principle allows us to show uniqueness of the first eigenvalue and its simplicity. A general reference for the long-time behavior of solutions to nonlocal parabolic equation is the monograph \cite{bookjulio}. We also refer to \cite{berebis} and to the already mentioned \cite{quaas}. In the latter work, a fractional operator with drift term is considered and the viscosity solution of the associated homogeneous initial-boundary value problem is proved to converge to zero in the large-time limit. Here, we consider a non-homogeneous parabolic equation with initial datum and homogeneous Dirichled boundary condition. Moreover, we allow the coefficients of our operator to be time-dependent. Under suitable assumption on this time dependency, we prove that the parabolic solution converges to the stationary one exponentially in time. The exponenential rate of convergence depends on the principal eigenvalue. \section{Statement of the main results} In this section, we collect our notation and state our main results. Given any $D\subset \mathbb{R}^{M}$, we indicate \EEE upper and lower semicontinuous functions on $D$ as \[ \USC(D)=\left\{u:D\to \mathbb{R} \ : \ \limsup_{z \to z_0}u(z)\le u(z_0)\right\}, \] \[ \LSC(D)=\left\{u:D\to \mathbb{R} \ : \ \liminf_{z\to z_0}u(z)\ge u(z_0)\right\}. \] We also write $\USC_b(D)$ and $\LSC_b(D)$ for the set of upper and lower semicontinuous functions that are bounded. Given a function $u:D\to \mathbb R$ we indicate \EEE its {\it upper and lower semicontinuous envelopes} as \[ u^*(z_0)=\limsup_{z\to z_0}u(z) \quad \text{and} \quad u_*(z_0)=\liminf_{z\to z_0}u(z), \] respectively. \EEE \begin{defin}[Viscosity solutions]\label{def1} Elliptic case: We say that $u\in \USC_b( \Omega )$ ($\in \LSC_b( \Omega )$) is a \emph{viscosity sub (super) solution} to the equation \[ h(x)u (x)+\mathcal{L}_{s} (\Omega(x),u (x))= f(x) \]\EEE if, whenever $x \in \Omega $ and $\varphi\in C^2( \Omega)$ are such that $u( x )=\varphi(x )$ and $u(y)\le \varphi(y)$ for all $y\in\Omega$, then \begin{equation*} h( x ) \varphi(x )+\mathcal{L}_{s} (\Omega(x),\varphi(x ))\le (\ge) \ f(x ). \end{equation*} Moreover $u\in C(\overline \Omega)$ is a \emph{viscosity solution} to problem \eqref{10:17} if is both sub- and supersolution and satisfies the boundary condition $u=0$ on $\partial \Omega$ pointwise.\EEE Parabolic case: \EEE We say that $u\in \USC_b( (0,T)\times\Omega )$ ($\in \LSC_b( (0,T)\times\Omega )$) is a \emph{viscosity sub (super) solution} to the equation \[ \partial_tu(t,x) +h(x) u(t,x)+\mathcal{L}_{s} (\Omega(t,x),u(t,x) )= f(t,x) \qquad \mbox{in } (0,T)\times\Omega \] \EEE if, whenever $( t , x )\in (0,T)\times\Omega $ and $\varphi\in C^2((0,T)\times{\Omega})$ are such that $u( t , x )=\varphi( t , x )$ and $u(\tau,y)\le \varphi(\tau,y)$ for all $\tau \EEE,y\in(0,T)\times\Omega$, then \begin{equation*} \partial_t \varphi( t , x )+ h( x ) \varphi( t , x )+\mathcal{L}_{s} (\Omega(t,x),\varphi( t , x ))\le (\ge) \ f( t , x ). \end{equation*} Moreover $u\in C([0,T)\times\overline \Omega)$ is a \emph{viscosity solution} to \eqref{parabolic} if it is both sub- and supersolution and satisfies the boundary and initial conditions \begin{align} \label{parabolic} \left\{ \begin{array}{ll} u(t,x) = 0 & \mbox{on } (0,T)\times\partial\Omega\, ,\\ u(0,x) = u_0(x) & \mbox{on } \Omega, \end{array} \right. \end{align} pointwise.\EEE \end{defin} We are now in the position of stating our main results. \begin{teo}[Well-posedness of the elliptic problem]\label{teide} Let us assume \eqref{alpha}-\eqref{Sigmadef}, and \eqref{fcon}. Then, problem \eqref{10:17} admits a unique viscosity solution $u$. This satisfies $|u(x)|\le C d(x)^{\eta}$ for some suitable small $\eta>0$ and large $C >0$ . \end{teo} \begin{teo}[Well-posedness of the elliptic first-eigenvalue problem]\label{mussaka} Under the same assumption of Theorem \ref{teide}, there exists a unique $\overline{\lambda}>0$ \EEE such that \begin{equation}\label{eigenproblem} \begin{cases} {h}(x)v (x)+{\mathcal{L}}_s (\Omega(x), v(x))=\overline{\lambda} v (x)\qquad & \mbox{in } \Omega,\\ v(x) = 0 & \mbox{on } \partial \Omega, \end{cases} \end{equation} admits a nontrivial (strictly) positive viscosity solution. Such solution is unique, up to a multiplicative constant. Moreover, the first eigenvalue can be characterized as $\overline{\lambda}=\sup E$, where \begin{equation}\label{eq:E} E=\{\lambda\in\mathbb R \ : \ \exists v\in C(\overline{\Omega}), \ v>0 \mbox{ in } \Omega \ v=0 \mbox{ on } \partial\Omega \ \ \mbox{such that} \ \ hv+\mathcal{L}_s(v)= \lambda v+f\}. \end{equation} and $f\in C(\Omega)$ is any given positive and nonzero function satisfying \eqref{fcon}. Note in particular, that the set $E$ is independent of $f$. \end{teo} \begin{teo}[ Well-posedness of the elliptic problem below the first eignevalue]\label{helmholtz} Under the same assumption of Theorem \ref{teide}, for any $\lambda<\overline{\lambda}$, the problem \begin{equation}\label{helmeq} \begin{cases} {h}(x)u (x)\EEE+{\mathcal{L}}_s (\Omega(x),u(x)\EEE)=\lambda u (x)\EEE+ f(x) \qquad & \mbox{in } \Omega,\\ u(x) = 0 & \mbox{on } \partial \Omega, \end{cases} \end{equation} admits a unique viscosity solution. In particular, we have $(-\infty,\overline\lambda)=E$ where the set $E$ is defined in \eqref{eq:E}. \end{teo} Let us now turn to the parabolic problem. Before stating our results, we present a time-depending generalization of the hypothesis of Theorem \ref{teide}. We assume the set valued function $(t,x)\mapsto\Omega(t,x)\subset\Omega$ to fulfill \EEE the following assumptions \begin{align}\label{contpar} & \quad \forall (t,x) \in (0,\infty)\times\Omega: \ \ \ \lim_{(\tau,y)\to (t,x)}|\Omega(\tau,y)\triangle\Omega(t,x)|=0,\\ & \quad \forall \ T >0, \ \exists \ \zeta\in \big(0,1/2\big) \ :\ \forall (t,x) \in (0,T)\times\Omega \ \ \ \Omegat(t,x)\cap B_{r}(0)=\Sigma\cap B_{r}(0),\label{omegax} \end{align} for all $r\le \zeta d(x)$ and $\Sigma$ as in \eqref{Sigmadef}. Recall that $\tilde{\Omega}(t,x)=\{z\in\mathbb{R}^N \ : \ x-z\in \Omega(t,x)\}$. Moreover, we let $h\in C((0,\infty)\times\Omega)$ satisfy for $0<\alpha\le\beta$ \begin{equation}\label{alphap} \frac{\alpha}{d(x)^{2s}}\le h(t,x)\le \frac{\beta}{d(x)^{2s}}; \end{equation} and we assume that $f\in C((0,\infty)\times\Omega)$, $u_0\in C(\Omega)$, and that there exists $ \eta_1\in(0,2s)$ such that \be\label{fconintro} |f(x,t)|d(x)^{2s-\eta_1}\le C, \ee \be\label{u0con} |u_0(x)|d(x)^{-\eta_1}\le C. \ee \begin{teo}[Well-posedness of the parabolic problem] \label{existence} Let us fix $T\in(0,\infty]$. Under assumptions \eqref{Sigmadef}, \eqref{contpar}-\eqref{u0con} there exists a unique viscosity solution \EEE $u\in C([0,T)\times\overline\Omega)\cap L^{\infty}([0,T)\times \Omega)$ of \EEE \begin{align} \label{parabolic} \left\{ \begin{array}{ll} \partial_tu(t,x) +h(t , x) u(t,x)+\mathcal{L}_{s} (\Omega(t,x),u(t,x) )= f(t,x) \qquad &\mbox{in } (0,T)\times\Omega,\\ u(t,x) = 0 & \mbox{on } (0,T)\times\partial\Omega\, ,\\ u(0,x) = u_0(x) & \mbox{on } \Omega. \end{array} \right. \end{align} \end{teo} Finally, we address the asymptotic behavior of the solution provided by Theorem \ref{existence} as $T \to \infty$. In order to do that, we require that all the time-dependent data in \eqref{parabolic} suitably converge to their stationary counterparts in \eqref{10:17}. In particular, we assume that, for some $\eta_2\in(0,2s)$ and $\lambda<\overline{\lambda}$, \begin{align}\label{decaydata} &\left(|f(t,x)-f(x)|+|h(t,x)-h(x)|\right)d(x)^{2s-\eta_2}e^{\lambda t}\le C_1,\\ \label{decayomega} &|\Omega(t,x)\Delta\Omega(x)|d(x)^{-N-\eta_2}e^{\lambda t}\le C_2. \end{align} Assumption \eqref{omegax} must also be strengthen \EEE as follows \be\label{threeprime} \exists \ \zeta\in \big(0,1/2\big),\ \forall (t,x) \in I\times\Omega: \ \ \ \Omegat(t,x)\cap B_{r}(0)=\Sigma\cap B_{r}(0) \EEE . \ee \begin{teo}[Long-time behavior] \label{lalaguna} Let us assume \eqref{alpha}-\eqref{ostationary}, \eqref{contpar}-\eqref{u0con}, and \eqref{decaydata}-\eqref{threeprime}. Let $u$ \EEE be the unique viscosity solution of the parabolic problem \eqref{parabolic} on $(0,\infty)$, $v$ be the unique solution of the elliptic problem \eqref{10:17}, and let $\lambda$ fulfill \eqref{decaydata}. Then, there exists $C=C(\lambda)>0 $ such that $ |u(x,t)- v(x)|\le C d(x)^{\eta} e^{-\lambda t}$ for some small $\eta>0$. \end{teo} \section{Preliminary material} We collect in this section some preliminary lemmas, which will be used in the proofs later on. \subsection{Background on viscosity theory} In the following, we will often make use of the continuity of the integral operator with respect to a suitable convergence of its variables. We state this property in its full generality, in order to be able to apply it in different contests throughout the paper. \begin{lemma}[Continuity of the integral operator]\label{continuity} Let us consider a sequence of points $x_n\to\bar x\in\Omega$ and a family of bounded sets $\Theta(x_n), \Theta(\bar x)$ such that $\chi_{\Theta(x_n)}\to \chi_{\Theta(\bar x)}$ almost everywhere and, for some $\delta>0$, \[ \ \tilde{\Theta}(x_n)\cap B_{r}(0)=\Sigma\cap B_{r}(0) \quad \mbox{ for all } r\le \delta\mbox{ and } n>0, \]with $\Sigma$ as in \eqref{Sigmadef}. Assume moreover that $\phi_n\to\phi$ pointwise with \be\label{c11} \begin{split} \left|\phi_n(x_n)-\phi_n(x_n+z)-q_n \cdot z\right|&\le C|z|^2\\ \left|\phi(\bar x)-\phi(\bar x+z)-q \cdot z\right|&\le C|z|^2 \end{split} \ \ \ \ \ \ \mbox{ for } \ \ \ |z|\le \delta, \ee for some $q_n, q\subset \mathbb{R}^N$ and $C$ a positive constant that does not depend on $n$. Then \[ \mathcal{L}_{s} (\Theta(x_n),\phi_n(x_n))\to \mathcal{L}_{s} ( \Theta(\bar x),\phi(\bar x)). \] \end{lemma} \begin{proof} For any $r<\delta$, we can decompose the integral operator as follows \be \label{eq1} \mathcal{L}_{s} (\Theta(x_n),\phi_n(x_n))=\mathcal{L}_{s} (\Theta(x_n)\setminus B_{ r}(x_n),\phi_n(x_n))+\mathcal{L}_{s} (\Theta(x_n)\cap B_{ r}(x_n),\phi_n(x_n)). \ee Notice that the first integral in the right-hand side of \eqref{eq1} is nonsingular and, for any fixed $r$, it readily passes to the limit as $n\to\infty$, thanks to the convergence of $\Theta(x_n)$ and $\phi_n$. Instead, the second one has to be meant in the principal value sense \begin{align*} \mathcal{L}_{s} (\Theta(x_n)\cap B_{ r}(x_n)&,\phi_n(x_n))\\ =\lim_{\rho\to0}\int_{\rho\le|z|\le r}&\frac{\phi_n(x_n)-\phi_n(x_n+z)}{|z|^{N+2s}}\chi_{\Sigma}dz=\lim_{\rho\to0}\int_{\rho\le|z|\le r}\frac{\phi_n(x_n)-\phi_n(x_n+z)-q_n\cdot z}{|z|^{N+2s}}\chi_{\Sigma}dz, \end{align*} where we have used that $\Sigma=-\Sigma$. Thanks to assumption \eqref{c11} we get \[ |\mathcal{L}_{s} (\Theta(x_n)\cap B_{ r}(x_n),\phi_n(x_n))|\le C\frac{\omega_N}{2-2s} r^{(2-2s)}. \] Being a similar computation valid for $\mathcal{L}_{s} ( \Theta(\bar x),\phi(\bar x))$, we get that \[ |\mathcal{L}_{s} (\Theta(x_n),\phi_n(x_n))- \mathcal{L}_{s} ( \Theta(\bar x),\phi(\bar x))|\le |\mathcal{L}_{s} (\Theta(x_n)\setminus B_{ r},\phi_n(x_n))- \mathcal{L}_{s} ( \Theta(\bar x)\setminus B_{ r},\phi(\bar x))|+2C\frac{\omega_N}{2-2s} r^{(2-2s)}. \] The assertion follows by taking the limit in the inequality above, first as $n\to\infty$ and then as $ r\to 0$. \end{proof} Now we provide a suitably localized, equivalent definition of viscosity solutions, which will turn out useful in proving the comparison Lemma \ref{timecomp} below. Such \EEE equivalence is already known (see, for instance, \cite{barimb}). For completeness, we give here a statement and a proof \EEE in the elliptic \EEE case. \begin{lemma}[Equivalent definition]\label{def2} We have that \EEE $u\in \USC_b( \Omega )$ ($\in \LSC_b( \Omega )$) is a viscosity subsolution (supersolution, respectively) to the equation in \eqref{10:17} if and only if, \EEE whenever $x_0 \EEE\in \Omega $ and $\varphi\in C^2(\Omega )$ are such that $u( x_0 \EEE )=\varphi( x_0 \EEE )$ and $u( y)\le \varphi( y)$ for all $y \in \Omega$, then for all $B_r( x_0 \EEE )\subset \Omega$ the function \begin{equation}\label{16:57} \varphi_r(x)= \begin{cases} \varphi(x) \qquad & \mbox{in } B_r( x_0 ),\\ u(x) & \mbox{otherwise, } \end{cases} \end{equation} satisfies \begin{equation}\label{serve} h( x_0 \EEE ) \varphi_r( x_0 \EEE )+\mathcal{L}_{s} (\Omega( x_0 \EEE),\varphi_r( x_0 \EEE ))\le (\ge) \ f( x_0 \EEE ). \EEE \end{equation} A similar result hold in the parabolic case. \end{lemma} \begin{proof} We prove only the equivalence in the case of subsolutions, the case of supersolution being identical. \EEE Let us assume at first that $u\in \USC_b(\Omega)$ fulfills the conditions of Lemma \ref{def2}. We want to check that is a viscosity subsolution in the sense of Definition \ref{def1}. Let $\varphi \EEE \in C^2(\Omega)$ be such that $u-\varphi $ has a global maximum at $x_0$ and $\varphi(x_0)=u(x_0)$. It then follows that, for any $B_{r}(x_0)\subset\Omega$, \begin{align*} f(x_0)&\ge h(x_0)\varphi_r( x_0 )+\mathcal{L}_{s} (\Omega(x_0),\varphi_r( x_0 ))\\[1mm] &=h(x_0)u(x_0)+\int_{ \Omega(x)\setminus B_{r}(x_0) } \frac{u(x)-u(y)}{|x-y|^{N+2s}} dy+\int_{ \Omega(x)\cap B_{r}(x_0) } \frac{u(x)-\varphi(y)}{|x-y|^{N+2s}} dy\\ &\ge h(x_0)u(x_0)+\int_{ \Omega(x)} \frac{u(x)-\varphi(y)}{|x-y|^{N+2s}} dy\\[1mm] &=h(x_0)\varphi ( x_0 )+\mathcal{L}_{s} (\Omega(x_0),\varphi ( x_0 )) , \end{align*} where the first inequality $\ge$ comes from \eqref{serve} and the second one follows as $u\le \varphi$ in $\Omega$. To show the reverse implication, let us assume that $u\in \USC_b(\Omega)$ \EEE is a viscosity subsolution to \eqref{10:17} according to Definition \ref{def1}. Let $\varphi\in C^2({\Omega})$ be such that $u-\varphi$ has a maximum at $x_0\in\Omega$ and $\varphi(x_0)=u(x_0)$ and, for any $B_{r}(x_0)\subset\Omega$, let $\varphi_{r}$ be the auxiliary function defined in \eqref{16:57}. As a first step, \EEE we modify $\varphi_{r}$ \EEE as $\varphi_{r,n}(x)=\varphi_{r}(x) \EEE+\frac1n|x-x_0|^2$ and notice that, for any $n\in\mathbb{N}$, the function \EEE $u-\varphi_n$ has a strict local maximum at $x_0$ and \[ u-\varphi_{r,n}\le -\frac {r^2} {4n} \ \ \ \mbox{in} \ \ \ \Omega\setminus B_{\frac{ r}{2}}(x_0). \] Let $\psi_1,\psi_2\in C^{\infty}(\Omega)$ be a partition of unity \EEE associated to the concentric balls $B_{\frac{ r}{2}}(x_0)$ and $B_{\frac{3 r}{4}}(x_0)$, namely, $0\le\psi_ , \, \psi_2\le 1$, $\psi_1=1$ in $B_{\frac{ r}{2}}(x_0)$, $\psi_1=0$ in $\Omega\setminus B_{\frac{3 r}{4}}(x_0)$, and $\psi_1+\psi_2=1$. Let us finally set \[ \zeta_n=\psi_1\varphi_{r,n}+\rho_{m_n}* (\psi_2\varphi_{r,n}) \] where $\rho_{m_n}$ is a mollifier and $\{m_n\}_{n\in\mathbb{N}}$ is a sequence of numbers converging to $0$ to be suitably determined. Notice that $\zeta_n(x)\equiv \varphi_{r}(x)\EEE+\frac1n|x-x_0|^2$ in $B_{\frac{ r}{2}}(x_0)$ and that $\zeta_n(x_0)=u(x_0)$ for $n$ large. Moreover, thanks to the properties of mollifiers, for any $n\in \mathbb N$ there exists $m_n\in\mathbb{N}$ such that \[ u-\zeta_n\le -\frac{ r^2}{8n} \ \ \ \mbox{in} \ \ \ \Omega\setminus B_{\frac{ r}{2}}(x_0). \] Then, $\zeta_n\in C^{2}$ is a good test function for Definition \ref{def1} and we have \[ h(x_0)u(x_0)+\mathcal{L}_{s}(\Omega(x_0),\zeta_n(x_0)) \le \ f(x_0). \] Taking the limit as $n\to\infty$ we prove the assertion applying Lemma \ref{continuity}. Note indeed that $\zeta_n\to \varphi$ in $C_2(B_{\frac r2}(x_0))$ and $\zeta_n\to u$ pointwise in $\Omega\setminus B_{\frac r2}(x_0)$. \end{proof} In proving the stability of families of viscosity solutions, a suitable notion of limit for sequences of upper semicontinuous functions has to be considered, see for instance \cite{caff}. We introduce the following. \begin{defin}[$\Gamma$-convergence] A sequence of upper-semicontinuous functions $v_n$ is said to \emph{$\Gamma$-converge to $v$ in $ D\subset \mathbb{R}^M$} if \begin{align} &\label{gamma1} \mbox{for all converging sequences } z_n\to \bar z \mbox{ in } D \ : \quad \limsup_{n\to\infty}v_n(z_n)\le v(\bar z)\\ &\label{gamma2} \mbox{for all $\bar z\in D$ there exists a sequence } z_n\to \bar z \ \ : \ \ \lim_{n\to\infty}v_n(z_n)=v(\bar z). \end{align} \end{defin} This concept corresponds (up to a sign change) to a localized version of the classical $\Gamma$-convergence notion, see \cite{DalMaso93}, hence the same name. Clearly, uniformly converging sequences in $\Omega$ are also $\Gamma$-converging. Moreover, $\Gamma$-convergence readily ensues in connection with the upper-semicontinuous envelope of a family of functions. Both examples will play a role in the sequel. The following stability result is an adaptation of the classical one provided in Proposition 4.3 of \cite{CIL} (see also Lemma 4.5 in \cite{caff}). \begin{lemma}[Stability]\label{stability} Let us consider $v\in \USC_b((0,T)\times\Omega)$ and $f\in C((0,T)\times\Omega)$. Assume moreover that \begin{align*} i)&\quad \{v_n\}\subset \USC_b((0,T)\times\Omega) \ \ \text{$\Gamma-$converges to $v$ in $(0,T)\times\Omega$,}\\ ii)&\quad f_n \to f, h_n \to h \ \ \text{locally uniformly}, \mbox{ and } |\Omega_n\triangle\Omega|\to \mbox{ as } n\to\infty, \\ iii)&\quad \partial_tv_n(t,x) +h_n( t, x)v_n(t,x) +\mathcal{L}_{s} (\Omega_n( t, x),v_n( t, x) )\le f_n( t, x) \ \ \ \mbox{in} \ \ \ (0,T)\times\Omega \ \ \text{ in the viscosity sense.} \end{align*} Then, if $\Omega( t, x)$ satisfies \eqref{contpar},\eqref{omegax}, it follows that \[ \partial_tv( t, x)+h( t, x)v( t, x)+\mathcal{L}_{s} (\Omega( t, x),v( t, x))\le f( t, x) \] in the viscosity sense. \end{lemma} \begin{remark}\label{ellipticparabolic}\rm An elliptic version of the Lemma holds true by assuming \eqref{continuita}-\eqref{Sigmadef}. Notice that the stationary case can be straightforwardly obtained from the evolutionary one upon interpreting $u:\Omega \to \mathbb{R}$ as a trivial time-dependent function $\tilde u(t,x)= u(x)$ on $(0,T)\times\Omega$. In fact, if such a function is touched from above or from below by a smooth function $\phi\in C^2((0,T)\times\Omega)$ at some $(t,x)\in(0,T)\times\Omega$, we have that $\partial_t \varphi(t,x)=0$. We hence conclude that such time-dependent representation $\tilde u(t,x) $ of a subsolution (or supersolution) $u(x)$ of the elliptic problem is subsolution (supersolution, respectively) of its parabolic counterpart. \end{remark} \begin{proof}[ Proof of Lemma \ref{stability}] Let us assume that $v-\varphi$ has a strict global maximum, equal to $0$, at $(\bar t,\bar x)\in (0,T)\times \Omega$. Taking $\varphi_{\theta}=\varphi+\theta (|t-\bar t|^2+|x-\bar x|^2)$, we have that also the $\sup (v-\phi_{\theta})$ is reached only at $(\bar t,\bar x)$. Owing to the assumption $i)$, we know that there exists a sequence of points $\{( \tau_{n}, y_{n})\}\subset (0,T)\times\Omega$ such that \be\label{6.10} ( \tau_{n}, y_{n}, v_n(\tau_{n}, y_{n}))\to (\bar t,\bar x, v(\bar t,\bar x)). \ee Thanks to the penalization in the definition of $\varphi_{\theta}$ and assumption $i)$, for $n$ large enough we have that \[ v_n(\tau_{n}, y_{n})-\varphi_{\theta}(\tau_{n}, y_{n})\le \sup_{(0,T)\times \Omega} (v_n-\varphi_{\theta})=v_n(t_{n}, x_{n})-\varphi_{\theta}(t_{n}, x_{n}) =\epsilon_n, \] for some $\{(t_{n}, x_{n})\}\in (0,T)$ such that, up to a not relabeled subsequence, $(t_n,x_n)\to(\tilde t, \tilde x)\in (0,T)\times\Omega$. Using again the $\Gamma-$convergence of $v_n$, we find \[ (v-\varphi_{\theta})(\tilde t, \tilde x)\ge \limsup_{n\to\infty }(v_n-\varphi_{\theta})(t_{n}, x_{n}) \ge\lim_{n\to\infty}(v_n-\varphi_{\theta})(\tau_{n}, y_{n})=(v-\varphi_{\theta})(\bar t,\bar x)=0. \] Since the supremum of $v-\varphi$ is strict, this implies that $(\tilde t, \tilde x)=(\bar t, \bar x)$ and that $\epsilon_n\to 0$. Moreover, setting $\varphi_n=\varphi_{\theta}+\epsilon_n$, it follows that $\sup_{(0,T)\times \Omega}(v_n-\varphi_{n})=(v_n-\varphi_{\theta})(t_{n}, x_{n})=0$. In conclusion, we have proved that $v_n-\varphi_n$ has a global maximum at $(t_n,x_n)$ (for $n$ large enough) and $v_n(t_n,x_n)=\varphi_n(t_n,x_n)$, that $\epsilon_n\to 0$, and that $(t_n,x_n)\to(\bar t, \bar x)$. Since each $v_n$ is a subsolution at $(\bar t, \bar x)$ we get \[ \partial_t\varphi_n( t_n,x_n)+h_n( t_n,x_n)v_{n}( t_n,x_n)+\mathcal{L}_{s} (\Omega_n(t_n,x_n),\varphi_n( t_n,x_n))\le f_n( t_n,x_n). \] The first two terms in the left-hand side and the one in the right-hand side easily pass to the limit for $n \to \infty$, thanks to the continuity of $h$ and $f$ and to the definition of $\varphi_n$. In order to deal with the integral operator notice that \[ \mathcal{L}_{s} (\Omega_n(t_n,x_n),\varphi_n( t_n,x_n))=\mathcal{L}_{s} (\Omega_n(t_n,x_n),\varphi( t_n,x_n)). \] Since $\varphi$ is smooth, we can use Lemma \ref{continuity}, with $\Theta(x_n)=\Omega_n(t_n,x_n)$ and $\phi_n(\cdot)=\varphi( t_n,\cdot)$, and pass to the limit with respect to $n$. Eventually, we get \[ \partial_t\varphi_{\theta}( \bar t,\bar x)+h( \bar t,\bar x)v ( \bar t,\bar x)+\mathcal{L}_{s} (\Omega( \bar t,\bar x),\varphi_{\theta}( \bar t,\bar x))\le f( \bar t,\bar x), \] for any $\theta>0$. Taking the limit (using Lemma \ref{continuity} again) as $\theta\to 0$, we obtain the desired result.\EEE \end{proof} The previous stability result highlights the robustness of the notion of viscosity solution in relation to limit procedures. Notice that, given any uniformly bounded sequence of viscosity (sub/super) solutions of a certain family of equations, one can always find a $\Gamma-$limit and this is the candidate (sub/super) solution for the limiting equation. Such candidate is given by the lower/upper half relaxed limit \[ \overline{v}(x)=\sup\{\limsup_{n\to\infty} v_n(x_n) \ : \ x_n\to x\}, \ \ \ \ \underline{v}(x)=\inf\{\liminf_{n\to\infty} v_n(x_n) \ : \ x_n\to x\}. \] It is easy to check that $v_n$ $\Gamma-$converges to $\overline{v}$ and $-v_n$ $\Gamma-$converges to $-\underline{v}$. The key point here is that no compactness on the sequence $v_n$ is required for the existence of $\overline{v}$ and $\underline{v}$, as boundedness suffices. As we shall see in Section \ref{seceigenvalue}, this is a particularly powerful tool when dealing with equations that satisfy a comparison principle. \EEE \subsection{Sup- and infconvolution} In the sequel we often need to determine the equation (or inequality) solved by the difference of sub- or supersolutions. Note that, since such functions are not smooth, the property of being a sub- or supersolution may be not preserved by taking differences. To deal with this difficulty, we need to use suitable regularization of the involved functions. Let us start recalling the definition of {\it supconvolution} of $u\in \USC_b((0,T)\times\Omega)$, namely, \be\label{supcon} u^{\epsilon}(t,x)=\sup_{(\tau, y) \in (0,T)\times\Omega}\left\{u(\tau,y)-\frac1{\epsilon}(|x-y|^2+|t-\tau|^2)\right\}. \ee Notice that, since $u$ is upper semicontinuous and bounded, for $\epsilon$ small enough the supremum above is reached inside $(0,T)\times\Omega$. To be more precise, let us adopt the following notation: for any $(t,x)\in(0,T)\times\Omega$, let $(t^{\epsilon},x^{\epsilon})$ be a point with the following property \be\label{01-06bis} u^{\epsilon}(t,x)=u(t^{\epsilon},x^{\epsilon})-\frac1{\epsilon}(|x-x^{\epsilon}|^2+|t-t^{\epsilon}|^2). \ee Then, \be\label{control} (|x-x^{\epsilon}|^2+|t-t^{\epsilon}|^2)\le 2\epsilon \|u\|_{L^{\infty}((0,T)\times\Omega)}. \ee Moreover, by construction, it results that the parabola \[ P(t,x)=u(\bar{t}^{\epsilon},\bar{x}^{\epsilon})-\frac1{\epsilon}(|t-\bar{t}^{\epsilon}|^2+|x-\bar{x}^{\epsilon}|^2) \] touches $u^{\epsilon}$ from below at $(\bar t, \bar x)$. This shows that the supconvolution is semiconvex in $(0,T)\times\Omega$. Such a property is particularity useful in order to pointwise evaluate some viscosity inequality related to subsolutions. Indeed, let us assume that $u^{\epsilon}$ is touched from above by a smooth function at $(\bar t, \bar x)$. Then, thanks to its semiconvexity property, we deduce that $u^{\epsilon}\in C^{1,1}(\bar t, \bar x)$, namely there exist $q \in\mathbb{R}^{N+1}$ and $C>0$ such that, in a neighborhood of $(\bar t, \bar x)$, \be\label{c11} \left|u^{\epsilon}(t,x)-u^{\epsilon}(\bar t, \bar x)-q \cdot\binom{t-\bar t}{x-\bar x}\right|\le C(|t-\bar{t}|^2+|x-\bar{x}|^2). \ee This means that the time derivative and the fractional operator can be evaluated pointwise at $(\bar t, \bar x)$ (see Lemma \ref{pointsub} below). Similarly, the {\it infconvolution} of a function $v\in \LSC((0,T)\times\Omega)\cap L^{\infty}((0,T)\times\Omega)$ is defined as \be\label{infconv} u_{\epsilon}(t,x)=\inf_{(\tau, y) \in (0,T)\times\Omega}\left\{u(\tau,y)+\frac1{\epsilon}(|x-y|^2+|t-\tau|^2)\right\}, \ee and we let $(t_{\epsilon},x_{\epsilon})$ be the point where \be\label{01-06tris} u_{\epsilon}(t,x)=u(t_{\epsilon},x_{\epsilon})-\frac1{\epsilon}(|x-x_{\epsilon}|^2+|t-t_{\epsilon}|^2). \ee The property of the infconvolution correspond to those of the supconvolution up to the trivial transformation $v_{\epsilon}=-(-v)^{\epsilon}$. Omitting further details for the sake of brevity, we limit ourselves in proving the following inequality on supconvolutions. \begin{lemma}\label{pointsub} Let us assume \eqref{Sigmadef}, \eqref{contpar}, and \eqref{omegax} and that $u(t,x)$ is a viscosity subsolution to \eqref{parabolic} and let $u^{\epsilon}(t,x)$ be its supconvolution. If $u^{\epsilon}$ is touched from above by some smooth function at $(\bar t, \bar x)$, the following inequality holds in a classical sense \be\label{13:34uno} \partial_tu^{\epsilon}(\bar t, \bar x)+h(\bar{t}^{\epsilon},\bar{x}^{\epsilon})u^{\epsilon}(\bar t, \bar x)+\mathcal{L}_{s} (\Omega(\bar{t}^{\epsilon},\bar{x}^{\epsilon}),u^{\epsilon}(\bar t, \bar x))\le f(\bar{t}^{\epsilon},\bar{x}^{\epsilon}), \ee where $(\bar{t}^{\epsilon},\bar{x}^{\epsilon})$ satisfies \eqref{01-06bis}. \end{lemma} \begin{proof} Let us assume that $u^{\epsilon}$ is touched from above by a smooth $\varphi$ at $(\bar t,\bar x)$. Let us recall that there exists $(\bar{t}^{\epsilon},\bar{x}^{\epsilon})$ such that \[ u^{\epsilon}(\bar t,\bar x)=u^{\epsilon}(\bar{t}^{\epsilon},\bar{x}^{\epsilon})-\frac1{\epsilon}(|\bar{t}-\bar{t}^{\epsilon}|^2+|\bar{x}-\bar{x}^{\epsilon}|^2) \ \ \ \mbox{and} \ \ \ (\bar{t}^{\epsilon},\bar{x}^{\epsilon})\to(\bar t,\bar x) \ \ \ \mbox{as} \ \ \ \epsilon\to0. \] By definition of supconvolution we have that \[ u^{\epsilon}(t+\bar t-\bar{t}^{\epsilon},x+\bar x-\bar{x}^{\epsilon})\ge u(\tau,y)-\frac1{\epsilon}(|t+\bar t-\bar{t}^{\epsilon}-\tau|^2+|x+\bar x-\bar{x}^{\epsilon}-y|^2). \] Choosing $(\tau,y)=(t,x)$ it follows \[ u^{\epsilon}(t+\bar t-\bar{t}^{\epsilon},x+\bar x-\bar{x}^{\epsilon})\ge u(t,x)-\frac1{\epsilon}(|\bar t-\bar{t}^{\epsilon}|^2+|\bar x-\bar{x}^{\epsilon}|^2). \] Then, by defining \[ \bar{\varphi}(t,x)=\varphi(t+\bar t-\bar{t}^{\epsilon},x+\bar x-\bar{x}^{\epsilon})+\frac1{\epsilon}(|\bar t-\bar{t}^{\epsilon}|^2+|\bar x-\bar{x}^{\epsilon}|^2), \] we infer that $\bar{\varphi}$ touches from above $u$ at $(\bar{t}^{\epsilon},\bar{x}^{\epsilon})$. Since $u$ is a viscosity subsolution to \eqref{parabolic}, it follows that \be\label{01-06} \partial_t\bar{\varphi}_{r}(\bar{t}^{\epsilon},\bar{x}^{\epsilon})+h(\bar{t}^{\epsilon},\bar{x}^{\epsilon})\bar{\varphi}_{r}(\bar{t}^{\epsilon},\bar{x}^{\epsilon})+\mathcal{L}_{s} (\Omega(\bar{t}^{\epsilon},\bar{x}^{\epsilon}),\bar{\varphi}_{r}(\bar{t}^{\epsilon},\bar{x}^{\epsilon}))\le f(t,x). \ee Now notice that by construction of $\bar{\varphi}$ it results $\partial_t\bar{\varphi}_{r}(\bar{t}^{\epsilon},\bar{x}^{\epsilon})=\partial_t{\varphi}_{r}(\bar{t},\bar{x})$ and \[ \bar{\varphi}_{r}(\bar{t}^{\epsilon},\bar{x}^{\epsilon})-\bar{\varphi}_{r}(\bar{t}^{\epsilon},\bar{x}^{\epsilon}+z)= \varphi_{r}(\bar t,\bar x)-\varphi_{r}(\bar t,\bar x+z), \] that implies \[ \mathcal{L}_{s} (\Omega(\bar{t}^{\epsilon},\bar{x}^{\epsilon}),\bar{\varphi}_{r}(\bar{t}^{\epsilon},\bar{x}^{\epsilon}))=\int_{\Omegat(\bar{t}^{\epsilon},\bar{x}^{\epsilon})}\frac{\bar{\varphi}_{r}(\bar{t}^{\epsilon},\bar{x}^{\epsilon})-\bar{\varphi}_{r}(\bar{t}^{\epsilon},\bar{x}^{\epsilon}+z)}{|z|^{N+2s}}dz \] \[ =\int_{\Omegat(\bar{t}^{\epsilon},\bar{x}^{\epsilon})}\frac{\varphi_{r}(\bar t,\bar x)-\varphi_{r}(\bar t,\bar x+z)}{|z|^{N+2s}}dz=\mathcal{L}_{s} (\Omega(\bar{t}^{\epsilon},\bar{x}^{\epsilon}),{\varphi}_{r}(\bar{t},\bar{x})). \] Then, \eqref{01-06} becomes \[ \partial_t{\varphi}_{r}(\bar{t},\bar{x})+h(\bar{t}^{\epsilon},\bar{x}^{\epsilon})u^{\epsilon}(\bar{t},\bar{x})+\mathcal{L}_{s} (\Omega(\bar{t}^{\epsilon},\bar{x}^{\epsilon}),{\varphi}_{r}(\bar{t},\bar{x}))\le f(\bar{t}^{\epsilon},\bar{x}^{\epsilon}). \] Since $u^{\epsilon}$ is touched from above by a smooth function at $(\bar t,\bar x)$, we know that $u^{\epsilon}\in C^{1,1}(\bar t,\bar x)$ (see \eqref{c11}) and then $\partial_t{\varphi}_{r}(\bar{t},\bar{x})=\partial_tu^{\epsilon}(\bar{t},\bar{x})$. Recalling assumption \eqref{omegax} and since $(\bar{t}^{\epsilon},\bar{x}^{\epsilon})\to (\bar t,\bar x)\in (0,T)\times \Omega$, we deduce that there exists $\delta>0$ such that, for any $r<\delta$, we can decompose the nonlocal operator as follows \begin{align*} \mathcal{L}_{s} (\Omega(\bar{t}^{\epsilon},\bar{x}^{\epsilon}),{\varphi}_{r}(\bar{t},\bar{x})) &=\int_{\Sigma \cap B_r(0)}\frac{\varphi(\bar t,\bar x)-\varphi(\bar t,\bar x+z)}{|z|^{N+2s}}dz\\ &\quad +\int_{\Omegat(\bar{t}^{\epsilon},\bar{x}^{\epsilon})\setminus B_r(0)}\frac{u^{\epsilon}(\bar t,\bar x)-u^{\epsilon}(\bar t,\bar x+z)}{|z|^{N+2s}}dz. \end{align*} The integral on $\Sigma\cap B_r(0))$ is well defined converges to zero as $r\to0$, due to the smoothness of $\varphi$ and the symmetry of $\Sigma$. To deal with the second integral we apply Lemma \ref{continuity} with $x_n=\bar x^{\epsilon}$, $\Theta(x_n)=\Omega(\bar{t}^{\epsilon},x_n)\setminus B_{\frac1n}(x_n))$ and $\phi_{n}(\cdot)=\phi(\cdot)=u^{\epsilon}(\bar t,\cdot)$. We deduce that $\mathcal{L}_{s}(\Omega(\bar{t}^{\epsilon},\bar{x}^{\epsilon}),{\varphi}_{r}(\bar{t},\bar{x}))\to \mathcal{L}_{s} (\Omega(\bar{t}^{\epsilon},\bar{x}^{\epsilon}),u^{\epsilon}(\bar{t},\bar{x}))$ as $r\to0$. This completes the proof of the Lemma. \end{proof} A similar inequality holds for infconvolution. For the sake of later reference, we state it here below without proof. This can be obtained by straightforwardly adapting the argument of Lemma \ref{pointsub}. \begin{lemma}\label{pointsup} Let us assume that $v(t,x)$ is a viscosity supersolution to \eqref{parabolic} and let $v_{\epsilon}(t,x)$ be its infconvolution. If $v_{\epsilon}$ is touched from below by some smooth function at $(\bar t, \bar x)$, the following inequality holds in a classical sense \be\label{13:34due} \partial_tv_{\epsilon}(\bar t, \bar x)+h(\bar{t}_{\epsilon},\bar{x}_{\epsilon})v_{\epsilon}(\bar t, \bar x)+\mathcal{L}_{s} (\Omega(\bar{t}_{\epsilon},\bar{x}_{\epsilon}),v_{\epsilon}(\bar t, \bar x))\le f(\bar{t}_{\epsilon},\bar{x}_{\epsilon}), \ee where $(t_{\epsilon},x_{\epsilon})$ satisfies \eqref{01-06tris}. \end{lemma} In the following Lemma we eventually state that the difference of a super- and a subsolution is still a supersolution. \begin{lemma}[Difference]\label{differenza} Let us consider $h_1(t,x), h_2(t,x)$ that satisfy \eqref{alphap}, $\{\Omega_1(t,x)\}, \{\Omega_2(t,x)\}$ that satisfy \eqref{contpar},\eqref{omegax} and two functions $u\in USC_b((0,T)\times\Omega), v\in LSC_b((0,T)\times\Omega)$ that solve in the viscosity sense \[ \partial_tu(t,x)+h_1(t,x)u(t,x)+\mathcal{L}_{s} (\Omega_1(t,x),u(t,x))\le f_1(t,x) \qquad \mbox{in } (0,T)\times\Omega \] \[ \partial_tv(t,x)+h_2(t,x)v(t,x)+\mathcal{L}_{s} (\Omega_2(t,x),v(t,x))\ge f_2(t,x) \qquad \mbox{in } (0,T)\times\Omega, \] respectively. Then $w=u-v$ solves in the viscosity sense \[ \partial_tw(t,x)+h_1(t,x)w(t,x)+\mathcal{L}_{s} (\Omega_1(t,x),w(t,x))\le \tilde{f}(x,t) \qquad \mbox{in } (0,\infty)\times\Omega \] where \[ \tilde{f}(x,t)=f_1({t} ,{x} )-f_2({t} ,{x} )+M|h_1({t} ,{x} )-h_2({t} ,{x} )|+ 2M\int_{ |z|\ge \frac{\zeta}{2}d(\bar x) }\frac{|\chi_{\Omegat_1({t} ,{x} )}-\chi_{\Omegat_2({t} ,{x} )}|}{|z|^{N+2s}}dz,\EEE \] \end{lemma} with $M=\max\{\|u\|_{L^{\infty}((0,T)\times\Omega)},\|v\|_{L^{\infty}((0,T)\times\Omega)}\}$. \begin{proof} Recalling definitions \eqref{supcon} and \eqref{infconv}, let us consider the function $w^{\epsilon}(t,x)=u^{\epsilon}(t,x)-v_{\epsilon}(t,x)$ and assume that it is touched from above by a $\varphi\in C^2((0,\infty)\times\Omega)$ at point $ (\bar{t}, \bar{x})$. This means that \[ u^{\epsilon}(\bar{t}, \bar{x})-v_{\epsilon}(\bar{t}, \bar{x})=\varphi(\bar{t}, \bar{x}) \ \ \ \mbox{and} \ \ \ u^{\epsilon}-v_{\epsilon}\le \varphi \ \ \mbox{in} \ \ \Omega. \] This latter fact, together with the semiconvexity property of both $u^{\epsilon}$ and $-v_{\epsilon}$, implies that $u^{\epsilon}$ and $-v_{\epsilon}$ are $C^{1,1}(\bar{t},\bar{x})$ (see \eqref{c11}). We are hence in the position of applying Lemmas \ref{pointsub} and \ref{pointsup} and evaluating the inequalities satisfied by $u^{\epsilon}$ and $v_{\epsilon}$ pointwise. We have that \[ \partial_tu^{\epsilon} (\bar{t}, \bar{x})+h_1(\bar{t}^{\epsilon},\bar{x}^{\epsilon})u^{\epsilon} (\bar{t}, \bar{x})+\mathcal{L}_{s}(\Omega_1(\bar{t}^{\epsilon},\bar{x}^{\epsilon}),u^{\epsilon} (\bar{t}, \bar{x}))\le f_1(\bar{t}^{\epsilon},\bar{x}^{\epsilon}), \] and that \[ \partial_tv_{\epsilon} (\bar{t}, \bar{x})+h_2(\bar{t}_{\epsilon},\bar{x}_{\epsilon})v_{\epsilon} (\bar{t}, \bar{x})+\mathcal{L}_{s}(\Omega_2( \bar{t}_{\epsilon},\bar{x}_{\epsilon}), v_{\epsilon} (\bar{t}, \bar{x}))\ge f_2(\bar{t}_{\epsilon},\bar{x}_{\epsilon}). \] Recalling the ordering assumption between $w^{\epsilon}$ and $\varphi$ and combining the two inequalities above, we infer that, for $\epsilon$ small enough, \begin{align} \label{01-06tris} &\partial_t \varphi_r(\bar t,\bar x)+h_1(\bar{t}^{\epsilon},\bar{x}^{\epsilon})\varphi_r(\bar t,\bar x)+ \mathcal{L}_s (\Omega_1(\bar{t}^{\epsilon},\bar{x}^{\epsilon}), \varphi_r(\bar t,\bar x))\\[2mm] &\nonumber \quad \le \partial_t w^{\epsilon}(\bar t,\bar x)+h_1(\bar{t}^{\epsilon},\bar{x}^{\epsilon})w^{\epsilon}(\bar t,\bar x)+\mathcal{L}_s(\Omega_1(\bar{t}^{\epsilon},\bar{x}^{\epsilon}), w^{\epsilon}(\bar t,\bar x))\\[2mm] &\nonumber \quad \le f_1(\bar{t}^{\epsilon},\bar{x}^{\epsilon})-f_2(\bar{t}_{\epsilon},\bar{x}_{\epsilon})+M|h_1(\bar{t}^{\epsilon},\bar{x}^{\epsilon})-h_2(\bar{t}_{\epsilon},\bar{x}_{\epsilon})|\\ &\nonumber \qquad +2M\int_{\mathbb{R}^N}\frac{|\chi_{\Omegat_1(\bar{t}^{\epsilon},\bar{x}^{\epsilon})}-\chi_{\Omegat_2(\bar{t}_{\epsilon},\bar{x}_{\epsilon})}|}{|z|^{N+2s}}dz. \end{align} Let us stress that, for $\epsilon$ small enough, the integral term in the right hand side above is finite. Indeed, thanks to the assumption \eqref{omegax} and since $(\bar{t}^{\epsilon},\bar{x}^{\epsilon})\to (\bar{t},\bar{x})$ and $(\bar{t}_{\epsilon},\bar{x}_{\epsilon})\to (\bar{t},\bar{x})$ as $\epsilon\to 0$, then \[ \exists \ \epsilon_0>0 \ : \ \forall \epsilon\in(0,\epsilon_0) \ \ \ B_{\frac{\zeta}{2}d(\bar x)}\cap \Omegat(\bar{t}^{\epsilon},\bar{x}^{\epsilon})= B_{\frac{\zeta}{2}d(\bar x)}\cap \Omegat(\bar{t}_{\epsilon},\bar{x}_{\epsilon})=B_{\frac{\zeta}{2}d(\bar x)}\cap \Sigma. \] This implies that \begin{align*} &\int_{\mathbb{R}^N}\frac{|\chi_{\Omegat_1(\bar{t}^{\epsilon},\bar{x}^{\epsilon})}-\chi_{\Omegat_2(\bar{t}_{\epsilon},\bar{x}_{\epsilon})}|}{|z|^{N+2s}}dz=2M\int_{|z|\ge \frac{\zeta}{2}d(\bar x)}\frac{|\chi_{\Omegat_1(\bar{t}^{\epsilon},\bar{x}^{\epsilon})}-\chi_{\Omegat_2(\bar{t}_{\epsilon},\bar{x}_{\epsilon})}|}{|z|^{N+2s}}dz\\ &\qquad \le \left(\frac{2}{\zeta d(\bar x)}\right)^{N+2s}|\Omegat_1(\bar{t}^{\epsilon},\bar{x}^{\epsilon})\triangle\Omegat_2(\bar{t}_{\epsilon},\bar{x}_{\epsilon})|. \end{align*} Since \eqref{01-06tris} is true any time that $w^{\epsilon}$ is touched from above by a smooth $\varphi$ at some point in $(0,T)\times\Omega$, we can conclude that $w^{\epsilon}$ solves in the viscosity sense \[ \partial_t w^{\epsilon}( t, x)+h_{\epsilon}({t}^{\epsilon},{x}^{\epsilon})w^{\epsilon}( t, x)+\mathcal{L}_s(\Omega_1({t}^{\epsilon},{x}^{\epsilon}), w^{\epsilon}( t, x)) \le f^{\epsilon}( t, x), \] where \begin{align*} &h_{\epsilon}(t,x)=h_1({t}^{\epsilon},{x}^{\epsilon}),\\ &\Omega_{\epsilon}(t,x)=\Omega_1({t}^{\epsilon},{x}^{\epsilon}),\\ &f^{\epsilon}(\bar t, \bar x)= f_1(\bar{t}^{\epsilon},\bar{x}^{\epsilon})-f_2(\bar{t}_{\epsilon},\bar{x}_{\epsilon})+M|h_1(\bar{t}^{\epsilon},\bar{x}^{\epsilon})-h_2(\bar{t}_{\epsilon},\bar{x}_{\epsilon})| +2M\int_{|z|\ge \frac{\zeta}{2}d(\bar x)}\frac{|\chi_{\Omegat_1(\bar{t}^{\epsilon},\bar{x}^{\epsilon})}-\chi_{\Omegat_2(\bar{t}_{\epsilon},\bar{x}_{\epsilon})}|}{|z|^{N+2s}}dz \end{align*} and the point ${t}^{\epsilon},{x}^{\epsilon}$ is related to $( t, x)$ through \eqref{01-06bis} and \eqref{control}. Thanks to Lemma \ref{stability}, we can pass to the limit in \eqref{01-06tris} as $\epsilon\to 0$ and obtain the desired result. \end{proof} For later purpose we also explicitly state an elliptic version of Lemma \ref{differenza}. \begin{cor}\label{ellipticdif} Assume \eqref{alpha}-\eqref{ostationary}, that $f_1,f_2\in C(\Omega)$ satisfy \eqref{fcon} and that $u\in USC_b(\Omega)$, $v\in LSC_b(\Omega)$ solve \[ h(x)u(x)+\mathcal{L}_{s} (\Omega(x),u(x))\le f_1(x) \qquad \mbox{in } \Omega \] \[ h_2(x)v(x)+\mathcal{L}_{s} (\Omega(x),v(x))\ge f_2(x) \qquad \mbox{in } \Omega, \] respectively. Then $w=u-v$ solves \[ \partial_tw(x)+h(x)w(x)+\mathcal{L}_{s} (\Omega(x),w(x))\le f_1(x)-f_2(x) \qquad \mbox{in } \Omega. \] \end{cor} \begin{remark} For the sake of brevity, we do not provide a proof of Corollary \ref{ellipticdif}, see Remark \ref{ellipticparabolic}. \end{remark} \subsection{Regularity} By adapting the regularity theory for fully nonlinear integro-differential equations from \cite[Sec. 14]{caff} we can prove the following. \begin{teo}[H\"older regularity]\label{regelliptic} Let us assume \eqref{alpha}-\eqref{Sigmadef}, that $f\in C(\Omega)\cap\elle{\infty}$, and that $u\in C(\Omega)\cap\elle{\infty}$ solves in the viscosity sense \[ h(x)u(x)+\mathcal{L}_{s}(\Omega(x), u(x) )= f(x) \quad \mbox{in } \Omega. \] Then, for any open sets $\Omega'\subset\subset \Omega''\subset\subset\Omega$, it follows that \[ \|u\|_{C^{\gamma}(\Omega')}\le \tilde C, \] where $\gamma \in (0,1)$ and $\tilde C=\tilde C(\|f\|_{\elle{\infty}}, s, \zeta, d(\Omega'', \Omega'),\|u\|_{\elle{\infty}})$. \end{teo} \begin{proof} We claim that \[ \mathcal{L}_{s}(\Sigma({x}),u(x))= \tilde f(x) \quad \mbox{in } \Omega \] in the viscosity sense, where $\Sigma({x})=\Sigma+x$ and $\tilde f\in C(\Omega)$ is a suitable function such that $\tilde f(x)\approx d(x)^{-2s}$ close to $\partial\Omega$. Once such a property is verified, the proof of the Lemma follows from \cite[Theorem 4.6]{mou}. See also \cite[Theorem 7.2]{schwabsil}, were the parabolic problem is treated. In order to prove the claim, we follow the ideas of \cite[Sec. 14]{caff}. Let us assume that, any time $u$ is touched from above with a smooth function at some $x\in \Omega$, $u$ belongs to $C^{1,1}( x)$. Using Lemma \ref{continuity}, we deduce that \[ h({x})u( x)+\mathcal{L}_{s} (\Omega({x}),u( x))\le f(x) \] pointwise for any such a $x\in \Omega$. Thanks to assumption \eqref{ostationary}, the nonlocal operator can be estimated as follows \begin{align*} & \mathcal{L}_{s}(\Omega({x}), u(x))=\int_{\Sigma\cap B_{\zeta d({x}^{\epsilon})}}\frac{ u( {x} )- u(x +z )}{|z|^{N+2s}}dz+\int_{\Omegat(x)\setminus B_{\zeta d(x)}}\frac{ u( x )- u( x+z )}{|z|^{N+2s}}dz\\ &\quad =\int_{\Sigma}\frac{ u( x )- u( x+z )}{|z|^{N+2s}}dz+\int_{\Omegat(x)\setminus B_{\zeta d(x)}}\frac{ u( x )- u( x+z )}{|z|^{N+2s}}dz-\int_{\Sigma\setminus B_{\zeta d(x)}}\frac{ u( x )- u( x+z )}{|z|^{N+2s}}dz\\ &\quad = \mathcal{L}_{s}(\Sigma({x}), u(x)). \end{align*} Let us set \[ \tilde f(x)=f(x)-\int_{\Omegat(x)\setminus B_{\zeta d(x)}}\frac{ u( x )- u( x+z )}{|z|^{N+2s}}dz+\int_{\Sigma\setminus B_{\zeta d(x)}}\frac{ u( x )- u( x+z )}{|z|^{N+2s}}dz. \] This proves that \[ \mathcal{L}_{s}(\Sigma({x}), u(x)) \le \tilde f(x), \] assuming that $u$ belongs to $C^{1,1}(x)$. Let us apply this argument to the sup convolution $u^{\epsilon}$. By definition of the sup convolution, we recall that, any time $u^{\epsilon}$ is touched from above by a smooth function $\varphi$ at $x\in \Omega$, then $u^{\epsilon}\in C^{1,1}(x)$. Moreover, thanks to Theorem \ref{pointsub}, we have that \[ h(x^{\epsilon})u(x)+\mathcal{L}_{s}(\Omega(x^{\epsilon}), u(x) )\le f(x^{\epsilon}) \] point wise for any such a $x\in\Omega $. Then, thanks to the argument above, it follows that \[ \mathcal{L}_{s}(\Sigma({x^{\epsilon}}), u^{\epsilon}(x)) \le \tilde f(x^{\epsilon}). \] Eventually, thanks to the stability property of viscosity solution (see Lemma \ref{stability}), we can pass to the limit in the inequality above to conclude that \[ \mathcal{L}_{s}(\Sigma({x}), u(x)) \le \tilde f(x), \] in the viscosity sense. Similarly, one can check that \[ \mathcal{L}_{s}(\Sigma({x}), u(x))\ge \tilde f(x), \] and the proof of the initial claim follows. \end{proof} \subsection{Equivalence with the fractional laplacian} We now present two technical lemmas, shedding light on the relation between the operator in \eqref{10:17} and the classical fractional laplacian. \begin{lemma}[Equivalence]\label{acca} The function defined in \eqref{15:15} can be equivalently written as \[ a(x)=\Gamma(2s+1)\int_{\tilde{S}(x)^c}\frac{1}{|z|^{N+2s}}, \] where $S(x)$ is the largest star-shaped subset of $\Omega$ centered at $x$ and $\tilde{S}(x)=x-S(x)$. If $\Omega$ is convex, the fractional laplacian $(-\Delta)_s$ defined in \eqref{decomposition} is equivalent to the elliptic operator defined in \eqref{start}, as $$\Gamma(2s+1) (-\Delta)_s\varphi(x) = a(x) \varphi (x)+ (-\Delta)^\star_s\varphi (x)$$ on suitably smooth function $\varphi$. \end{lemma} \begin{proof} We firstly notice that \begin{align*} a(x)&=\int_{\mathbb{R}^N}\frac{1}{|y|^{N+2s}}e^{-\frac{d(x,\sigma(y))}{|y|}}dy\\ & =\int_{\omega^{N-1}}\int_0^{\infty}\frac{1}{\rho^{1+2s}}e^{-\frac{d(x,\sigma)}{\rho}}d\rho d\sigma=\int_{\omega^{N-1}}\frac{1}{d(x,\sigma)^{2s}}d\sigma \int_0^{\infty}\frac{1}{r^{1+2s}}e^{-\frac{1}{r}}dr\\ & =\int_{\omega^{N-1}}\frac{1}{d(x,\sigma)^{2s}}d\sigma\int_0^{\infty}t^{2s-1}e^{-t}dt=\Gamma(2s)\int_{\omega^{N-1}}\frac{d\sigma}{d(x,\sigma)^{2s}} \end{align*} where we recall that $d(x,\sigma(y))$ denotes the distance between $x$ and the first point reached on $\partial \Omega$ by the ray from $x$ with direction $\sigma(y) = y/|y|$. On the other hand, we have that \[ \int_{\tilde{S}(x)^c}\frac{1}{|z|^{N+2s}}dz=\int_{\omega^{N-1}}\int_{d(x,\sigma)}\rho^{-1-2s}d\rho d\sigma=\frac{1}{2s}\int_{\omega^{N-1}}\frac{d\sigma}{d(x,\sigma)^{2s}}. \] The conclusion follows from the fact that $\Gamma (2s+1)=\Gamma(2s)2s$.\\ Assume now that $\Omega$ is convex. Then $S(x)\equiv\Omega$ for any $x\in\Omega$ and \[ a(x)=\Gamma(2s+1)\int_{\{x-\Omega\}^c}\frac{1}{|z|^{N+2s}}dz=\Gamma(2s+1)\int_{\Omega^c}\frac{1}{|x-y|^{N+2s}}dy. \] Then, recalling \eqref{start}, we have that, for any $\varphi\in C^{\infty}_c(\Omega)$, \[ a(x)\varphi(x)+(-\Delta)_{s}^{\star}\varphi(x)=\Gamma(2s+1)\left[\int_{\Omega^c}\frac{1}{|x-y|^{N+2s}}dy \ \varphi(x)+p.v.\int_{\Omega}\frac{\varphi(x)-\varphi(y)}{|x-y|^{N+2s}}dy\right] \] \[ =\Gamma(2s+1)\ p.v.\int_{\mathbb{R}^N}\frac{\varphi(x)-\varphi(y)\chi_{\Omega}}{|x-y|^{N+2s}}dy=\Gamma(2s+1)(-\Delta)_ {s}\varphi (x).\qedhere \] \end{proof} We now introduce a class of domains for which the function $k(x)$ defined in \eqref{decomposition} satisfies the bounds \eqref{killing}. To this aim, we assume that the complement $\Omega^c $ satisfies a uniform positive density condition, namely that there exists $\rho_0>0$ and $\kappa >0 $ such that \be\label{density} |B_{\rho}(\bar x)\cap \Omega^c|\ge \kappa |B_{\rho}(\bar x)| \ \ \ \mbox{for all } \bar x\in\partial\Omega \ \mbox{ and } \ \rho\in (0,\rho_0). \ee Let us stress that \eqref{density} is weaker than the exterior cone condition. \begin{lemma}[Bounds on $h$]\label{hbeha} Let $\Omega$ be an open bounded set of $\mathbb R^N$ with $\Omega^c$ satisfying \eqref{density}. Then, the function \[ h (x)=\int_{\Omega^c}\frac{1}{|x-y|^{N+2s}}dy \] satisfies the bounds \eqref{killing}. \end{lemma} \begin{proof} To start with, notice that, we have $\Omega^c\subset B_{d(x)}(x)^c$ for all $x\in\Omega$. Then, \be\label{easypart} h(x)\le \int_{B_{d(x)}(x)^c}\frac{1}{|x-y|^{N+2s}}dy=\int_{|z|\ge d(x)}\frac{1}{|z|^{N+2s}}dz=\frac{\omega_N}{2s}\frac1{d(x)^{2s}} \ee whence the upper bound in \eqref{killing}. To prove the lower bound let us take $x\in\Omega$ such that $d(x)\le \rho_0$. We then have \[ h(x)=\int_{\Omega^c}\frac{1}{|x-y|^{N+2s}}d y \ge \int_{\Omega^c\cap B_{d(x)}(\bar x)}\frac{1}{|x-y|^{N+2s}}d y \] where $\bar x\in\partial\Omega$ is such that $d(x)=|x-\bar x|$. Using that if $y\in B_{d(x)}(\bar x)$ then $|x-y|\le |x|+|y|\le 2d(x)$ and taking advantage of \eqref{density} (recall that $d(x)\le \rho_0$), the inequality above becomes \[ h(x)\ge\frac{1}{(2d(x))^{N+2s}}|\Omega^c\cap B_{d(x)}(\bar x)|\ge C\frac{1}{d(x)^{2s}}.\qedhere \] \end{proof} \section{Existence and uniqueness} The existence of viscosity solutions follows by applying the classical Perron method. We give here full details of this construction in the parabolic case, hence proving Theorem \ref{existence}. The proof of Theorem \ref{teide} follows the same lines, being actually simpler. We comment on it at the end of the section. As a first step toward the implementation of the Perron method, we start by providing a suitable barrier for the elliptic problem. \begin{lemma}[Barriers]\label{barriercone} Let us assume that the set valued function $x\to\Omega(x)$ satisfies \eqref{ostationary}, \eqref{Sigmadef}. Then there exists a positive $\bar \eta=\bar \eta(N,s,\zeta,\alpha)$ such that, for any $\eta\in(0,\bar \eta]$, the function $u_{\eta}(x)=d(x)^{\eta}$ solves the inequality \be\label{sbomba} \frac{\alpha}{d(x)^{2s}}u_{\eta}(x)+\mathcal{L}_s(\Omega(x),u_{\eta} (x) )\ge \frac{\alpha}{2} d(x)^{\eta-2s} \ \ \ \mbox{in} \ \ \ \Omega \ee in the viscosity sense. \end{lemma} \begin{remark}\label{barpar}\rm Notice that under assumptions \eqref{Sigmadef} and \eqref{omegax}, the function $u_{\eta}$, with $\eta<\bar \eta(N,s,\zeta_T,\alpha)$ also satisfies in the viscosity sense \[ \partial_tu_{\eta}(x)+ \frac{\alpha}{d(x)^{2s}}u_{\eta}(x)+\mathcal{L}_s(\Omega(t,x),u_{\eta} (x) )\ge \frac{\alpha}{2} d(x)^{\eta-2s} \ \ \ \mbox{in} \ \ \ (0,T)\times\Omega, \] since for all $t\in(0,\infty)$ the set valued function $x\to\Omega_t(x)=\Omega(t,x)$ satisfies \eqref{ostationary} with $\zeta_T$. If we moreover assume \eqref{threeprime}, the barrier is uniform in time\EEE. \end{remark} \begin{proof}[Proof of Lemma \ref{barriercone}] Fix $x\in\Omega$ and assume that there exists $\varphi\in C^2(\Omega)$ such that $u_{\eta}-\varphi$ has a minimum in $\Omega$ at $x$ and that $u_{\eta}(x)=\varphi(x)$. We have to check (see Lemma \ref{def2}) that for any $B_r(x)\subset \Omega$ \be\label{11:32} \frac{\alpha}{d(x)^{2s}}u_{\eta}(x)+\mathcal{L}_s(\Omega(x),\varphi_r(x))\ge \frac{\alpha}{2} d(x)^{\eta-2s}. \ee Since $\varphi$ touches $u_{\eta}$ from below at $x$, we deduce \cite[Prop. 2.14]{BC} that there exists a unique $\bar x\in\partial \Omega$ such that $d(x)=|x-\bar x|$. To simplify notation, from now on we use a system of coordinates that is centered at $\bar x$, so that $d(x)=|x|$. Notice that \[ \varphi(x)-\varphi(x+z)\ge u_{\eta}(x)-u_{\eta}(x+z)\ge (|x|^{\eta}-|x+z|^{\eta}) \ \ \ \mbox{for all} \ \ \ z\in\Omegat(x), \] the last inequality following from the fact that $d(x+z)\le |x+z|$. We then have that %Then it results that \[ \mathcal{L}_s(\Omega(x),\varphi_r(x))\ge \int_{\Omegat(x)}\frac{|x|^{\eta}-|x+z|^{\eta}}{|z|^{N+2s}}dz \] \[ =\left(\int_{ \{|z|\le \zeta |x|\}\cap\Sigma }\frac{|x|^{\eta}-|x+z|^{\eta}}{|z|^{N+2s}}dz+\int_{\{|z|> \zeta |x|\}\cap\Omegat(x)}\frac{|x|^{\eta}-|x+z|^{\eta}}{|z|^{N+2s}}dz\right)=: I_1+I_2, \] where we have used that $\Omegat(x)\cap B_{\zeta |x|}=\Sigma\cap B_{\zeta|x|}$ (see assumption \eqref{ostationary})\EEE. Thanks to Taylor expansion, we get that \begin{align*} |x+z|^{\eta}&=|x|^{\eta}+\eta |x|^{\eta-2}x\cdot z +\frac12\left[\eta(\eta-2)|\xi|^{\eta-4}|\xi \cdot z|^2+\eta |\xi|^{\eta-2}|z|^2 \right]\\ & \le |x|^{\eta}+\eta |x|^{\eta-2}x\cdot z+2^{\eta-3}\eta |x|^{\eta-2}|z|^2 \ \ \ \mbox{ for } \ \ |z|\le \zeta |x| \end{align*} with $\xi= x+t z$ for some $t\in(0,1)$ and the inequality follows from neglecting a negative term and from the fact that $|\xi|\ge (1-\zeta)|x|\ge \frac12 |x|$. This implies that \begin{align}\label{16:42bis} I_1&\ge-\int_{\{|z|\le \zeta |x|\}\cap\Sigma}\left(\eta |x|^{\eta-2}x\cdot z+\eta 2^{\eta-3} |x|^{\eta-2}|z|^2\right)\frac{1}{|z|^{N+2s}}dz\\ & =- \eta 2^{\eta-3} |x|^{\eta-2}\int_{\{|z|\le \zeta |x|\}\cap\Sigma}|z|^{2-N-2s}dz\ge- \eta C|x|^{\eta-2s}. \nonumber \end{align} where, in the second line, we have used that the set $\{|z|\le \zeta |x|\}\cap\Sigma$ is radially symmetric and that the first order term of the expansion vanishes in the principal value sense\EEE. On the other hand, one has that \be\label{16:43bis} I_2\ge|x|^{\eta-2s}\int_{\{|y|\ge \zeta\}}\frac{1-(1+|y|)^{\eta}}{|y|^{N+2s}}dy. \ee Combining this last inequality with \eqref{16:42bis} and \eqref{16:43bis}, we obtain that \[ \mathcal{L}_s(\Omega(x),\varphi_r(x))\ge- g(\eta)d(x)^{\eta-2s}, \] where $$g(\eta)=C\left(\eta+\int_{\{|y|\ge \zeta\}}\frac{1-(1+|y|)^{\eta}}{|y|^{N+2s}}dy\right).$$ Notice that, thanks to the Lebesgue Dominated Convergence Theorem, the integral in the brackets above goes to zero as $\eta\to0$. It follows that \[ \frac{\alpha}{ d(x)^{2s}}u_{\eta}(x)+\mathcal{L}_s(\Omega(x),\phi_r(x))-f(x)\ge (\alpha-g(\eta))d(x)^{\eta-2s}. \] At this point it is enough to chose $\eta\le\bar \eta$ satisfying $ \alpha-g(\bar \eta)= {\alpha}/2$ in order to conclude the proof. \end{proof} Let us now provide a comparison principle for equation \eqref{parabolic}. This relies on Lemma \ref{differenza}, which is in turn based on the regularization of sub/super-solutions through sup/inf convolution. \begin{lemma}[Comparison] \label{timecomp} Assume \eqref{Sigmadef}, \eqref{contpar}-\eqref{alphap}, that $T\in (0,\infty)$, that $u(t,x)$ and $v(t,x)$ are sub- and supersolutions to \eqref{parabolic}, respectively, that they are ordered on the boundary, namely $u\le v$ on $ (0,T)\times\partial\Omega $, and that $u(0,\cdot)\le v (0,\cdot)$ on $\Omega$. Then, \begin{equation} u\le v \ \ \mbox{in} \ \ (0,T)\times\Omega . \end{equation} \end{lemma} \begin{proof} Given $\delta>0$, let us introduce the function $u_{\delta}(t,x)=u(t,x)-\frac{\delta}{T-t}$ and notice that it is a viscosity subsolution to \eqref{parabolic}, namely, \[ \partial_t u_{\delta}(t,x) +h(t,x)u_{\delta} (t,x) +\mathcal{L}_{s}(\Omega(t,x), u_{\delta} (t,x) )- f(t,x)\le -\frac{\delta}{T^2}<0. \] We firstly show that $u_{\delta}\le v$, for any $\delta>0$, and then conclude the proof by taking the limit as $\delta$ goes to $0$. Using Lemma \ref{differenza}, we deduce that $w=u_{\delta}-v$ solves in the viscosity sense \[ \partial_t w(t,x) +h(t,x)w(t,x)+\mathcal{L}_{s}(\Omega(t,x), w (t,x) )\le0 \ \ \ \mbox{in} \ \Omega. \] Let us assume by contradiction that $\sup_{(0,T)\times\Omega}w= M >0$. Due to the ordering assumption on the parabolic boundary $ (0,T)\times\partial\Omega $ and on the initial conditions, and the behavior of $u_{\delta}$ as $t\to T^-$, $M$ is attained inside at $(\bar t, \bar x)\in(0,T)\times\Omega$. This implies that the constant function $M$ touches from above $w$ at the point $(\bar t, \bar x)$, and then it is an admissible test function for $w$ to be a subsolution. It follows that \[ \frac{\alpha}{d^{2s}(\bar x)}M\le h(\bar t, \bar x)M\le 0, \] that is clearly a contradiction. Then $u_{\delta}-v=w\le 0$ for any $\delta>0$ and the assertion follows. \EEE \end{proof} \begin{cor}[Elliptic comparison] \label{ellcomp} Assume \eqref{alpha}-\eqref{Sigmadef} that $u(x)$ and $v(x)$ are sub- and supersolutions to \eqref{10:17}, respectively, and that $u\le v$ on $\partial\Omega$. Then, \begin{equation} u\le v\ \ \mbox{in} \ \ \Omega . \end{equation} \end{cor} The proof of this corollary can be easily deduced from that of Corollary \ref{ellipticdif} and we omit the details for the sake of brevity. We are now ready to present a first existence result, which relies on the possibility of finding suitable barriers for the parabolic problem. We will later check that such barriers can be easily obtained from Lemma \ref{barriercone}. \begin{teo}[Existence, given barriers] \label{perronpar}Assume \eqref{Sigmadef}, \eqref{contpar}-\eqref{alphap}. Let $T \in (0,\infty) $ and $\underline l(t,x)$ and $\overline l(t,x)$ be sub- and supersolution to \eqref{parabolic}, respectively, with $\underline l=\overline l=0$ on $(0,T) \times \partial \Omega$. Then, for any $u_0\in C(\Omega)$ such that $\underline l(0,x)\le u_0(x) \le \overline l(0,x)$ for all $x\in\Omega$, problem \eqref{parabolic} admits a unique viscosity solution. \end{teo} \begin{proof} We aim at applying Perron's method. Let us set \begin{align*} A&=\Big\{w\in \USC(\Omega\times(0,T)) \ : \ \underline l(t, x)\le w(t,x)\le \overline l(t, x) \ \mbox{for $(t, x) \in (0,T) \times \partial \Omega$, } \\ &\hspace{25mm} w \ \mbox{is a subsolution to \eqref{parabolic}, and } w(x,0)\le u_0(x) \Big\}. \end{align*} Since $\overline l \in A\not = \emptyset$, we can set \[ u(t,x)=\sup_{w\in A} w(t,x). \] By definition, it follows \EEE that for any $(\bar t, \bar x)\in(0,T)\times\Omega$ there exists a sequence $\{v_n\}\subset A$ that $\Gamma-$converges to the uppersemicontinuous envelop $u^*$ at $(\bar t, \bar x)$. We \EEE can use Lemma \ref{stability} to show that $u^*$ is a subsolution to the equation in \eqref{parabolic}. Moreover $u^*(\cdot,0)\le u_0(\cdot)$ in $\Omega$ and $u^*(t,\cdot)\le 0$ on $\partial\Omega$ for any $t\in(0,T)$. In fact, \EEE assume by contradiction that there exists $\bar x \in \Omega$ such that $u^*(x,0)- u_0(x)=\xi>0$. This would mean that there exist $\{x_n\}\subset\Omega$, $\{t_n\}\subset [0,T)$ and $\{w_n\}\subset A$ such that \[ x_n\to \bar x, \ \ \ t_n\to 0 \ \ \ \mbox{and} \ \ \ w_n(x_n,t_n)\to u_0(\bar x)+\xi, \] that is in contradiction with the definition of $u$. Similarly we check that $u^*(t,\cdot)\le 0$ on $\partial\Omega$. This implies that $u^*\in A$ and, by definition of $u$, we get that $u=u^*$ Now we claim that the lower-semicontinuous envelope \EEE $u_*$ is a supersolution to \eqref{parabolic} and that $u_*(x,0)\ge u_0(x)$ for all $x\in\Omega$. Once the claim is proved, we can apply the comparison principle of Lemma \ref{timecomp} to the subsolution $u$ and the \EEE supersolution $u_*$ to infer that \[ u\le u_* . \] This implies that $u_*=u^*=u$ is a viscosity solution to \eqref{parabolic} that satisfies the boundary and initial conditions \EEE in the classical sense.\\ Let us hence prove that $u_*$ is a supersolution with $u_*(x,0)\geq u_0(x)$. \EEE By contradiction, we assume there exists $\phi\in C^2( (0,T)\times\Omega )$ such that $u_*-\phi$ has a strict global minimum at $(t_0,x_0)$, $u_*(t_0,x_0)=\phi(t_0,x_0)$ and \be\label{16:03} \partial_t \phi(t_0,x_0)+ h(t_0,x_0)u_*(t_0,x_0)+\int_{\Omega(t_0,x_0)} \frac{\phi(t_0,x_0)-\phi(z,t_0)}{|x_0-z|^{N+2s}}dz<f(t_0,x_0). \ee This means that there exists $\epsilon>0$ such that the function \[ F(t,x)=\partial_t \phi(t,x) h(x)u_*(t,x)+h(t,x)\phi(t,x)+\mathcal{L}_{s} (\Omega(t,x),\phi (t,x) \EEE)-f(t,x) \] satisfies $F(t_0,x_0)=-\epsilon$. Since such a function is continuous at $(t_0,x_0)$, \EEE there exists $r>0$ such that $F(t,x)<-\frac{\epsilon}{2}$ for all $(t,x)\in \overline{B_r(t_0,x_0)}$ where $B_{r}(t_0,x_0)=\{(|t- t_0|^2+ |x- x_0|^2)^{\frac12}<r\}\subset (0,T)\times\Omega$. Let us define \EEE \[ \delta_1=\inf_{x\in\Omega \setminus B_{r}(t_0,x_0)} (v- \phi)(t,x)>0, \ \ \ \ \ \ \delta_2=\frac{\epsilon}{4 \sup_{x\in {B_r(x_0)}}h(x)}, \] and set $\delta=\min\{\delta_1,\delta_2\}$. With this choice of $r$ and $\delta$ we define \[ V = \begin{cases} \max\{v ,{\phi}+\delta\} \qquad & \mbox{in } B_{r}(t_0,x_0),\\ v & \mbox{otherwise}\, . \end{cases} \] Notice \EEE that, since $v$ is upper semincontinuous, \EEE the set $\{v -{\phi}-\delta<0\}$ is open and nonempty. (since, by definition of lower semicontinuous envelop, there exists a sequence $x_n\to x_0$ such that $v(z_n)\to u_*(x_0)=\phi(x_0)$ as $n\to\infty$). Moreover $\{v -{\phi}-\delta<0\}\subset B_{r}(t_0,x_0) $ thanks to the choice of $\delta_1$. We want to prove that $V $ is a subsolution. Let us consider now $\psi\in C^2( (0,T)\times\Omega )$ such that $V -\psi$ has a global maximum at $(\tau_0,y_0)$ \EEE and $\psi(\tau_0,y_0)=V (\tau_0,y_0)$. If $V (\tau_0,y_0)=v(\tau_0,y_0)$, since $V \ge v$, it results that $v -\psi$ has \EEE a global maximum at $(\tau_0,y_0)$ and $v(\tau_0,y_0)=\psi(\tau_0,y_0)$. Using that $v $ is a subsolution we get that \begin{align*} &\partial_t \psi(\tau_0,y_0)+ h(y_0)V(\tau_0,y_0)+\mathcal{L}_s(\Omega(\tau_0,y_0),\psi (\tau_0,y_0) \EEE)\\ &\quad =\partial_t \psi(\tau_0,y_0)+h(y_0) v(\tau_0,y_0)+\mathcal{L}_s(\Omega(\tau_0,y_0),\psi(\tau_0,y_0) \EEE) \le f(\tau_0,y_0). \end{align*} Let us now focus on the case $V (\tau_0,y_0)=\phi(\tau_0,y_0)+\delta\neq v(\tau_0,y_0)$. This implies that \[ \partial_t \psi(\tau_0,y_0)=\partial_t \phi(\tau_0,y_0). \] and that $(\tau_0,y_0)\in B_{r}(t_0,x_0)$. Then we have that \[ \phi+\delta-\psi\le V -\psi\le 0 \ \ \ \mbox{in} \ \ B_{r}(t_0,x_0), \] where we have used the fact that $\phi+\delta\le V $ in $B_{r}(t_0,x_0)$. Moreover, we readily check \EEE \[ \phi+\delta-\psi\le \phi+\delta-v \le 0 \ \ \ \mbox{in} \ \ (0,T)\times\Omega \setminus B_{r}(t_0,x_0), \] since $v \equiv V \le \psi$ in $ (0,T)\times\Omega \setminus B_{r}(t_0,x_0)$ and thanks to the definition of $\delta_1$. As effect of \EEE the two inequalities above, we deduce that $\phi+\delta\le \psi$ in $ (0,T)\times\Omega $. It follows that \EEE \begin{align*} &\partial_t \psi(\tau_0,y_0)+ h(\tau_0,y_0)V(\tau_0,y_0)+\mathcal{L}_s(\Omega(\tau_0,y_0),\psi (\tau_0,y_0) \EEE) \\ &\quad \le\partial_t \phi(\tau_0,y_0) +h(\tau_0,y_0)(\phi(\tau_0, \EEE y_0)+\delta)+\mathcal{L}_s(\Omega(\tau_0,y_0),\phi (\tau_0,y_0) \EEE) \\ &\quad \le f(\tau_0,y_0)-\frac{\epsilon}2+\frac{\epsilon}4<f(\tau_0,y_0), \end{align*} where the last inequality comes from the choice of $r$ and $\delta$. This leads \EEE to a contradiction since it implies \EEE that $V\in A$ and that $V>v\ge u$ somewhere in $ B_{r}(t_0,x_0)$. This proves that \EEE $u_*$ is a supersolution to \eqref{parabolic}. Finally let us \EEE prove that $u_*(x,0)\ge u_0(x)$ for all $x\in\Omega$. Again assume by contradiction that there exists $\bar x\in\Omega$ such that \be\label{spring} u_*(\bar x,0)< u_0(\bar x). \ee Our aim is to build \EEE a barrier from \EEE below for $u(t,x)$ in a neighborhood of $(0,\bar x)$ (hence a barrier for $u_*$, as well), contradicting \EEE \eqref{spring}. Thanks to the continuity of $u_0$, for any $\epsilon>0$ there exists $\delta_{\epsilon}< \frac12 d(\bar x)$ such that \[ |u_0(\bar x)-u_0(x)|\le\epsilon \ \ \ \mbox{if} \ \ \ |\bar x -x|\le \delta_{\epsilon}. \] Take now a function $\eta(x)\in C_c^{\infty}(B_1(0))$ with $0\le \eta\le 1$ and $\eta(0)=1$, and define \[ \tilde w(t,x) =a\eta\left(\frac{\bar x -x}{\delta_{\epsilon}}\right)-b-K\delta_{\epsilon}^{-2s}t, \] with $a=u_0(\bar x)-\epsilon+\|u_0\|_{\elle{\infty}}$, $b=\|u_0\|_{\elle{\infty}}$, and $K>0$ to be chosen below. \EEE Thanks to the choice of $a$ and \EEE $ b$ it is easy to check that $\tilde w(0,x)\le u_0(x)$. Moreover, recalling that supp$\left(\eta\left(\frac{\bar x -x}{\delta_{\epsilon}}\right)\right)\subset B_{\delta_{\epsilon}}(\bar x)$ and that the integral operator scales as $\delta^{-2s}$, we get that \[ \partial_t \tilde w+h \tilde w+\mathcal{L}_s(\Omega(t,x),\tilde w (t,x) \EEE )-f\le -K\delta_{\epsilon}^{-2s}+\delta^{-2s}C(\eta)+\|f\|_{\elle{\infty}}\le 0, \] where the last inequality follows by letting \EEE $K> C(\eta)+\delta_{\epsilon}^{2s}[aC(d(\bar x))+\|f\|_{\elle{\infty}}]$. Hence, \EEE $\tilde w\in A$ and, by definition of $u(t,x)$, $\tilde w(t,x)\le u(t,x)$. Now, \EEE for any $\epsilon>0$, there exists $\tilde{\delta}_{\epsilon}$ (possibly smaller then $\delta_{\epsilon}$) such that \[ u_0(\bar x)-2\epsilon\le \tilde w(t,x) \le u(t,x) \ \ \ \mbox{for} \ \ \ |\bar x -x|\le \tilde{\delta}_{\epsilon} \quad t\in [0,\tilde{\delta}_{\epsilon}). \] Then the same inequality holds for $u_*$, contradicting \EEE \eqref{spring}. \end{proof} \begin{proof}[Proof of Theorem \ref{existence}] Let us argue for $T < \infty$ first. Choose $\eta\le \min\{\bar{\eta},\eta_1\}$ where $\eta_1$ is from \eqref{fconintro}-\eqref{u0con} and $\overline \eta $ from Lemma \ref{barriercone} and Remark \ref{barpar}, and set $\overline l(t,x)= Q d(x)^{\eta}$ where $Q$ is a positive constant to be chosen later. Whenever a smooth function $\varphi$ touches $\overline l$ from above at $(t_0,x_0)$ we deduce that \begin{align*} &\partial_t\varphi_r(t_0,x_0)+h(t_0.x_0)\varphi_r(t_0,x_0)+\mathcal{L}_{s} (\Omega(t,x),\varphi_r(t_0.x_0))- f(t_0,x_0)\\ &\quad \ge\frac{\alpha}{d(x)^{2s}}\varphi_r(t_0,x_0)+\mathcal{L}_{s} (\Omega(t,x),\varphi_r(t_0,x_0))- |f(t_0,x_0)|\\ &\quad \ge \left(Q \frac{\alpha}{2}-|f(t_0,x_0)|d(x_0)^{2s-\eta}\right)d(x_0)^{\eta-2s}\ge 0. \end{align*} The first inequality comes from the fact that $\partial_t\varphi_r(t_0,x_0)$ must be zero and from assumption \eqref{alphap} whereas the second inequality follows by construction of $\overline l$ and by Lemma \ref{barriercone}. The third \EEE inequality follows from the assumption on $f$ (see \eqref{fconintro}) and by taking $Q$ large enough. This proves that $\overline{l}(t,x)$ is a supersolution of \eqref{parabolic}. Similarly, we can show that $\underline{l}(t,x)=-\overline{l}(t,x)$ is a subsolution. By possibly taking an even larger value of $Q$ if necessary, we deduce that $\underline l(0,x)\le u_0(x) \le \overline l(0,x)$, thanks to assumption \eqref{fconintro} on $u_0$ and to the choice of $\eta$. At this point, we can apply Theorem \ref{perronpar} and conclude the proof. The limiting case $T=\infty$ can be tackled by passing to the limit in the the sequence $\{u_n\}$ of solutions of problem \eqref{parabolic} in $(0,n)\times\Omega$. Thanks to Lemma \ref{timecomp} we have that \[ u_n(t,x)\equiv u_m(t,x) \ \ \ \mbox{in} \ \ (0,\min\{n,m\})\times\Omega. \] Then, for any $(t,x)\in(0,\infty)\times\Omega$, we can uniquely define $u(t,x)=u_{[t]+1}(t,x)$, where $[t]$ is the integer part of $t$. From the comparison principle applied on each domain $(0,n)\times\Omega$, this uniquely defines a solution for all times. \end{proof} As mentioned above, we are not giving the details of the proof of Theorem \ref{teide}. Indeed, the elliptic case of Theorem \ref{teide} follows again from by Perron method, by means of the barriers from Lemma \ref{barriercone}. Here, one is asked to use an elliptic version of the comparison Lemma \ref{timecomp}, which can be deduced using Corollary \ref{ellipticdif}. \section{The eigenvalue problem}\label{seceigenvalue} In this section, we focus on \EEE the eigenvalue problem associated to the operator \EEE \eqref{10-6}. Before discussing our specific \EEE notion of eigenvalue, we prepare some technical tools. \begin{lemma}[Strong maximum principle]\label{strongmax} Assume \EEE\eqref{alpha}-\eqref{Sigmadef} and let \EEE $u\in \LSC_b(\Omega)$ \EEE solve $$h(x) u(x)+\mathcal{L}(\Omega(x),u(x))\ge 0$$ in the viscosity sense in $\Omega$ and $u\ge0$ in $\partial\Omega$. Then, either $u\equiv0$ or $u>0$ in $\Omega$. \end{lemma} \begin{proof} Notice that, thanks to the comparison principle, we have that $u\ge0$ in $\Omega$. Let us assume that $u(x_0)=0$ at some $x_0\in\Omega$ and that, by contradiction, $u(y_0)>0$, for some $y_0\in\Omega$. If $y_0\in \Omega(x_0)$, since $x_0$ is a minimum for $u$, there exists $\varphi\in C^2(\Omega)$ such that $\varphi(x_0)=u(x_0)=0$, $\varphi(x_0)\le u(x_0)$ in $\Omega$. Moreover, since $u\in \LSC_b(\Omega)$\EEE, we can chose $\varphi$ nonnegative and nontrivial in $\Omega(x_0)$. Since $\varphi$ is an admissible test function for $u$ at point $x_0$ and it follows that \[ \int_{\Omega(x_0)}\frac{-\varphi(x_0+z)}{|z|^{N+2s}}\ge0. \] This is however contradicting the fact that $\varphi\ge0$ is nontrivial in $\Omega(x_0)$ and proves that $u(x_0)=0$ implies $u=0$ in $\Omega(x_0)$. If $y_0\notin \Omega(x_0)$, thanks to assumption \eqref{Sigmadef} and the fact that $\Sigma$ is open, there exists a finite set of points $\{x_i\}_{i=0}^K\subset\Omega$ such that $x_i\in \Omega(x_{i-1})$ for $i=1,\cdots, K$ and $y_0\in\Omega(x_K)$. Using inductively the previous part we deduce that $u=0$ in each $\Omega(x_i)$, that is $u(y_0)=0$, which is again a contradiction. \end{proof} The next technical Lemma allows us to restrict the operator to a subdomain of $\Omega$. This requires to modify both the sets $\Omega(x)$ and the function $h$. Thanks to the assumptions, in particular the density bound for $\Sigma$ in \eqref{Sigmadef}, it turns out the the restricted operator satisfies the same properties of the original one\EEE. \begin{lemma}[Localization]\label{localization} Let $f\in C(\Omega)$ and assume that $v$ solves in a viscosity sense \[ h(x)v(x)\EEE+\mathcal{L}_s(\Omega(x),v(x)\EEE)\le f(x)\EEE \ \ \ \mbox{in} \ \ \Omega. \] If the the open set $O\subset\Omega$ is such that $v\le 0$ in $\Omega\setminus O$, then $v$ also solves in the viscosity sense \[ j(x)v(x)\EEE+\mathcal{L}_s(\Xi(x),v(x)\EEE)\le f(x)\EEE \ \ \ \mbox{in} \ \ O, \] where $\Xi(x)=\Omega(x)\cap O$ and $j(x)=h(x)+\int_{\Omega(x)\setminus O}\frac{dy}{|x-y|^{N+2s}}$. By additionally assuming % Let that $O$ coincides with some ball $\tilde B\subset \Omega$ and by setting $\tilde d(x)=\mbox{dist}(x,\partial \tilde B)$, it holds true that \be\label{restrictedh} c_1\tilde d(x)^{-2s}\le j(x)\le c_2\tilde d(x)^{-2s} \ \ \ x\in \tilde B. \ee \end{lemma} \begin{proof} Let us assume that $\max_{O}( v-\varphi)=(v-\varphi)(\bar x)=0$ and that $B_r(\bar x)\subset\subset O$. It \EEE is possible to extend $\varphi$ to all $\Omega$ so that $\max_{\Omega} ( v-\varphi)=(v-\varphi)(\bar x)=0$ (with a slight abuse of notation, we still indicate \EEE the extension by \EEE $\varphi$). Then, we have \begin{align*} &f(\bar x)\ge h(\bar x)v(\bar x)+\int_{B_r(\bar x)\cap \Omega(\bar x)}\frac{\varphi(\bar x)-\varphi(y)}{|\bar x-y|^{N+2s}}dy+\int_{\Omega(\bar x)\setminus B_r(\bar x)}\frac{v(\bar x)-v(y)}{|\bar x-y|^{N+2s}}dy\\ &\quad =\left[h(\bar x)+\int_{\Omega(x)\setminus O}\frac{dy}{|x-y|^{N+2s}}\right]v(\bar x)+\int_{B_r(\bar x)\cap \Sigma(\bar x)}\frac{\varphi(\bar x)-\varphi(y)}{|\bar x-y|^{N+2s}}dy+\int_{\Xi(\bar x)\setminus B_r(\bar x)}\frac{v(\bar x)-v(y)}{|\bar x-y|^{N+2s}}dy\\ &\quad -\int_{\Omega(x)\setminus O}\frac{v(y)}{|x-y|^{N+2s}}dy\ge \left[h(\bar x)+\int_{\Omega(x)\setminus O}\frac{dy}{|x-y|^{N+2s}}\right]v(\bar x) +\int_{\Xi(\bar x)}\frac{\varphi_r(\bar x)-\varphi_r(y)}{|\bar x-y|^{N+2s}}dy, \end{align*} where the last \EEE inequality comes from the fact that \EEE $v\le 0$ in $\Omega\setminus O$. Let us consider now the case $O\equiv \tilde B$. The estimate from above in \eqref{restrictedh} can be deduced as in \eqref{easypart}. We omit the \EEE details. To show the estimate from below, fix $k>1$ so that \[ c- \frac 2{\omega_n}|B_1(0)\cap A(k)|\ge \frac12 c, \] where $c$ is the constant in \eqref{Sigmadef} and $A(k)=\{y\in\mathbb{R}^N \ : \ -k^{-1}\le y_1\le 0\}$. We also point out that the symmetry of $\Sigma$ implies, for $B^{\pm}_r(0)=B_r(0)\cap \{z\le0\}$ and $r>0$, that \be\label{auxilia} |\Sigma\cap B^{\pm}_{r}(0)|\ge \frac c2|B_{r}(0)|. \ee Moreover, without loss of generality, we assume that $\tilde B=\{|y|\le 1\}$ (this is always true up to a translation and dialation) and take $x\in \{|y|\le 1\}$ such that $k\tilde d(x)< \zeta d(x)$. Le us take now a system of coordinates with origin in the center of $\tilde B$ such that $|x|=-x_1=1-\tilde d(x)$. It follows that $\Omegat(x)\cap B_{k\tilde d(x)}(0)=\Sigma \cap B_{k\tilde d(x)}(0)$ and \begin{align}\label{cip} \int_{{\Omega}(x)\setminus \tilde B}\frac{dy}{|x-y|^{N+2s}}\ge \int_{{\Omega}(x)\cap B_{k\tilde d(x)}(x) \setminus \{y_1>-1\}}\frac{dy}{|x-y|^{N+2s}}=\int_{{\Sigma}\cap B^-_{k\tilde d(x)}(0) \setminus \{z_1>-\tilde d(x)\}}\frac{dz}{|z|^{N+2s}} \\ \ge\tilde d(x)^{-n-2s}|{\Sigma}\cap B^-_{k\tilde d(x)}(0) \setminus \{z_1>-\tilde d(x)\}|\nonumber. \end{align} We get that \begin{align}\label{ciop} |{\Sigma}\cap B^-_{k\tilde d(x)}(0) \setminus \{z_1>-d(x)\}|\ge |{\Sigma}\cap B^-_{k\tilde d(x)}(0)|-|\{-d(x)\le z_1<0\}\cap B^-_{k\tilde d(x)}(0)| \\ \ge \frac c2 |B_{k\tilde d(x)}|- (kd(x))^N| B_1(0)\cap A(k) |\ge \frac {\omega_n}{4}c(kd(x))^N,\nonumber \end{align} where we have used \eqref{auxilia} and the definition of $k$ in the last two inequality respectively. Putting together \eqref{cip} and \eqref{ciop} and recalling the condition $k\tilde d(x)< \zeta d(x)$, we deduce that \[ \int_{{\Omega}(x)\setminus \tilde B}\frac{dy}{|x-y|^{N+2s}}\ge c_1 \tilde d(x)^{-2s} \ \ \ \mbox{for all $x\in \tilde B$ with $k\tilde d(x)\le\zeta$dist$(\tilde B, \Omega)$}. \] This, together with the definition of $j(x)$, completes the proof of the Lemma. \EEE \end{proof} \begin{lemma}[Refined Maximum Principle]\label{aap} Assume \eqref{alpha} and \eqref{Sigmadef}. Let $\lambda>0$, $0 \leq \EEE f\in C(\Omega)$, and assume that $u\in \LSC_b(\Omega)$\EEE, with $u>0$ in $\Omega$ and $u=0$ on $\partial\Omega$, satisfies \[ h(x)u(x) +\mathcal{L}_s(\Omega(x),u(x) ) \ge \lambda u(x) +f(x) . \] Moreover, let $v\in USC_b(\Omega)$\EEE, with $v\le 0$ on $\partial \Omega$, satisfy \[ h(x)v(x) \EEE+\mathcal{L}_s(\Omega(x),v(x) \EEE)\le \lambda v(x) \EEE. \] If $f$ is non trivial then $v\le0$. If $f\equiv0$ and there exists $x_0\in\Omega$ such that $v(x_0)>0$ then $v=tu$ for some $t>0$. \end{lemma} \begin{proof} Let $z_t=v-tu$ for $t>0$. Then, thanks to Corollary \ref{ellipticdif} we have that $z_t$ satisfies \be\label{25.6} h(x)z_t(x)+\mathcal{L}_s(\Omega(x),z_t(x)\EEE)\le \lambda z_t(x). \ee Notice that for all $\rho>0$ and any $t>0$ such that $$t> \frac{\sup_{d(x)>\frac{\rho}{2}}v(x)}{\inf_{d(x)>\frac{\rho}{2}}u(x)}$$ (recall that $u>0$ in $\Omega$) we have that \[ \{x\in\Omega \ : \ d(x)\ge\rho \}\subset \{x\in\Omega \ : \ z_t<0 \}. \] We now use \EEE Lemma \ref{localization} to restrict \eqref{25.6} to \EEE $\Omega_\rho=\{x\in\Omega \ : \ d(x)<\rho \}$ and get \be\label{25.6bis} j(x)z_t+\mathcal{L}_s(\Xi(x),z_t)\le \lambda z_t \ \ \ \mbox{in} \ \ \ \Omega_\rho, \ee where $j(x)=h(x)+\int_{\tilde{\Omega}(x)\setminus\Omega_\rho }\frac{dz}{|z|^{N+2s}}$ and $\Xi(x)=\Omega(x)\cup \Omega_\rho$. Taking $\rho$ such that $\rho< \left(\frac{\alpha}{\lambda}\right)^{\frac1{2s}}$ and using the coercivity assumption \eqref{alpha} on $h(x)$, it follows that $j(x)-\lambda>0$. Then, since $z_t\le 0$ on $\partial\Omega_{\rho}$, we can apply the comparison principle to \eqref{25.6bis} and \EEE deduce that $z_t\le0$ in $\Omega_\rho$. This means that $z_t\le 0$ in $\Omega$.\\ Let us focus on the case $0\not = f \EEE \ge0$ and assume, by contradiction, that there exists $x_0\in\Omega$ such that $v(x_0)>0$. Then, up to a multiplication with a positive constant, we have that $v(x_0)>u(x_0)$.\\ Let us set \[ \tau=\inf\{ t \ : \ z_t\le0 \ \mbox{in} \ \Omega\} \] and recall that $\tau>1$ since $v(x_0)>u(x_0)$. As $z_{\tau}\le 0$ we get \[ h(x)z_t(x)\EEE+\mathcal{L}_s(\Omega(x),z_t(x)\EEE)\le \lambda z_t(x)\EEE\le 0 \quad \forall t \geq \tau. \] We can apply the strong maximum principle of Lemma \ref{strongmax} to prove that either $z_{\tau}\equiv0$ or $z_{\tau}<0$. This latter case is not possible since it would contradict the definition of $\tau$. Having that $z_{\tau}\equiv 0$ we get $v_{\tau} := v=\tau u$. We have \[ h(x)v_{\tau}(x)+\mathcal{L}_s(\Omega(x),v_{\tau}(x))\le \lambda v_{\tau} (x) \] by assumption and, since $v_{\tau}= \tau u$, \[ h(x)v_{\tau}(x)+\mathcal{L}_s(\Omega(x),v_{\tau}(x))\ge \lambda v_{\tau}(x)+ f (x). \] By combining these two inequality, using Corollary \ref{ellipticdif} and recalling that $f$ is nontrivial, we obtain a contradiction. Hence, $v\le0$. Let us now consider the case $f\equiv0$ and $v(x_0)>0$ for some $x_0\in\Omega$. Upon multiplying by a positive constant, we can assume that $v(x_0)>u(x_0)$. Following exactly the same argument and notation of the previous step we obtain that either $z_{\tau}\equiv0$ or $z_{\tau}<0$. The latter option again \EEE leads to a contradiction. Hence, $z_{\tau}\equiv0$, which corresponds to the assertion. \EEE \end{proof} \begin{teo}\label{approximation} Assume \eqref{alpha}-\eqref{Sigmadef}. Given $\lambda>0$ and a nonzero $0\le f\in C(\Omega)$ satisfying \eqref{fcon}, let us assume that there exists $0\le u\in \LSC_b(\Omega)$ \EEE such that \[ \begin{cases} h(x)u(x)\EEE+\mathcal{L}_{s} (\Omega(x),u(x)\EEE)\ge\lambda u(x)\EEE+ f(x) \qquad & \mbox{in } \Omega,\\ u(x) = 0 & \mbox{on } \partial \Omega. \end{cases} \] Then, for any $\mu\le \lambda$ and $ |g|\le f$, there exists a solution to \be\label{24-6bis} \begin{cases} h(x)v(x)\EEE+\mathcal{L}_{s} (\Omega(x),v(x)\EEE)= \mu v(x)\EEE+g(x) \qquad & \mbox{in } \Omega,\\ v(x) = 0 & \mbox{on } \partial \Omega. \end{cases} \ee If moreover $g$ is nonnegative and nontrivial then $v>0$.\EEE \end{teo} \begin{proof} Let us set $v_0=0$ and recursively \EEE define the sequence $\{v_n\}$ of solutions to \[ \begin{cases} h(x)v_n(x)\EEE+\mathcal{L}_{s} (\Omega(x),v_n(x))=\mu v_{n-1}+ g(x) \qquad & \mbox{in } \Omega,\\ v_n(x) = 0 & \mbox{on } \partial \Omega. \end{cases} \] Notice that the existence of each $v_n$ is ensured by Theorem \ref{existence}. We now prove that $ |v_n|\le u$ by induction on $n$. Let $n= 1$. As $|g|\leq f$ the comparison principle from Corollary \ref{ellcomp} ensures that $|v_1|\leq u$. Assume that $|v_{n-1}| \leq u$. Since $|\mu v_{n-1}+ g(x)|\le \lambda u(x)+ f(x)$, we can use the comparison principle (see Corollary \ref{ellcomp}) to deduce that $|v_{n}|\le u$. In case $g\ge0$ a similar argument shows that $0\le v_{n}\le v_{n+1}$.\\ This implies that $|\mu v_{n-1}+ g(x)|\le \lambda u+f \le C d(x)^{\eta_f-2s}$, where we have used assumption \eqref{fcon} for the last inequality. Using Lemma \ref{barriercone}, we can conclude that there exist a large $Q$ (independent of $n$) such that $\overline{l}(x)=Qd(x)^{\eta}$, with $\eta=\min\{\bar \eta, \eta_f\}$, solves \[ h(x)\overline{l}(x)+\mathcal{L}_{s} (\Omega(x),\overline{l}(x))\ge \mu v_{n-1}+ g(x) \qquad \mbox{in } \Omega. \] Thanks again to the comparison principle, we deduce that $v_n\le Qd(x)^{\eta}$. Similarly, it follows that $v_n\ge- Qd(x)^{\eta}$. Let us consider then the half-relaxed limits of the sequence $v_n$ \[ \overline{v}(x)=\sup\{\limsup_{n\to\infty} v_n(x_n) \ : \ x_n\to x\}, \ \ \ \ \underline{v}(x)=\inf\{\liminf_{n\to\infty} v_n(x_n) \ : \ x_n\to x\}. \] Notice that by construction both $\overline{v}$ and $\underline{v}$ vanish on $\partial\Omega$. Taking advantage of Lemma \ref{stability}, we deduce that $\overline{v}$ and $\underline{v}$ are respectively sub and super-solution to \eqref{24-6bis}. Moreover, Corollary \ref{ellipticdif} implies that $w=\overline{v}-\underline{v}$ solves \[ h(x)w(x)+\mathcal{L}_{s} (\Omega(x),w(x))\le \mu w(x) \ \ \mbox{ in }\ \ \Omega, \ \ \mbox{ and } \ \ w=0 \ \ \mbox{ on } \ \ \partial\Omega.\\ \] Since $u$ satisfies \[ h(x)u(x)+\mathcal{L}_{s} (\Omega(x),u(x))\ge\mu u(x)+ f(x) \qquad \mbox{in } \Omega,\\ \] we may use Lemma \ref{aap} to conclude that $w\le0$, namely $\overline{v}\le\underline{v}$. Due to the natural order between the two functions, we deduce that $v=\overline{v}=\underline{v}$ is a viscosity solution to \eqref{24-6bis}. If $g \geq 0$ and not trivial, by using the strong maximum principle we easily deduce that $v>0$. \EEE \end{proof} Let us assume that the nontrivial $0\le f\in C(\Omega)$ satisfies \eqref{fcon} and recall the definition of \EEE the set \[ E_f=\{\lambda\in\mathbb R \ : \ \exists v\in C(\overline{\Omega}), \ v>0 \mbox{ in } \Omega, \ v=0 \mbox{ on } \partial\Omega, \ \mbox{such that} \ hv+\mathcal{L}_s(\Omega, \EEE v)= \lambda v+f\}. \] Moreover, let \EEE \be\label{28.06} \lambda_f \EEE =\sup \ E_f. \ee As we shall see, $\lambda_f$ does not depend on the particular choice of $f$. By definition and thanks to Theorem \ref{approximation} we deduce that \[ \mbox{if} \ \ \ g\le f \ \ \ \mbox{then} \ \ \ \lambda_g\le \lambda_f. \] The following Lemma shows us that $\lambda_f$ is finite and that $E_f$ is a left semiline. \begin{lemma}[]\label{acotado} Assume \eqref{alpha}-\eqref{Sigmadef} and that $0\le f\in C(\Omega)$ is nonzero and satisfies \eqref{fconintro}. Then, $\lambda_f$ is positive and finite and $E_f$ is a left semiline with $E_f \not = \Rz$. \end{lemma} \begin{proof} Notice that for any $$\dys \lambda\in \left(-\infty,\frac{\alpha}{\mbox{diam}(\Omega)^{2s}}\right)$$ the operator \[ u\mapsto \EEE [h(x)-\lambda]u +\mathcal{L}(\Omega(x),u) \] fulfills assumptions \eqref{ostationary}-\eqref{alpha}. We can apply the existence results and the strong maximum principle of the previous chapter to deduce that $(-\infty,\frac{\alpha}{\mbox{diam}(\Omega)^{2s}})\subset E_f$. Moreover, if $\lambda\in E_f$, Theorem \eqref{approximation} assures that any $\mu<\lambda$ belongs to $E_f$ as well. This proves that $E_f$ is a left semiline. To show that $E_{f}\neq \mathbb{R}$ let us take $\lambda<\lambda_f $. Since $E_f$ is a left semiline, there exists some $v\in C(\overline \Omega)$ with $v=0$ on $\partial\Omega$ and strictly positive in $\Omega$, such that \EEE \[ h(x)v(x)+\mathcal{L}_s(\Omega(x), v(x))= \lambda v(x)+f(x) \ \ \ \mbox{in the viscosity sense in } \ \Omega. \] Now we want to \emph{restrict} this inequality to a ball $B\subset\subset\Omega$ such that $f>0$ in $B$\EEE. In order to do it, for any $x\in B$, we define $\Xi(x)=\Omega(x)\cap B$. Taking advantage of the positivity $v$, we can apply Lemma \ref{localization} to $-v$ and deduce that \be\label{cipcip} j(x)v+{\mathcal{L}}_s(\Xi(x),v) \ge \lambda v+ f(x) \ \ \ \mbox{in the viscosity sense in } \ B, \ee where \[ j(x)=h(x)+\int_{{\Omega}(x)\setminus B}\frac{dy}{|x-y|^{N+2s}}. \] Thanks to Lemma \ref{localization}, we have \EEE that $j(x)$ satisfies \eqref{alphap} (by possibly changing the \EEE constants) and that the family $\{\Xi(x)\}$ satisfies the same kind of assumptions of $\{{\Omega}(x)\}$. Then, for any positive continuous function $g$ \EEE with compact support in $B$ there exists a unique viscosity solution to \[ \begin{cases} j(x)w(x)\EEE+\mathcal{L}_s(\Xi(x),w(x)\EEE)= g(x) \qquad & \mbox{in } B,\\ w(x) = 0 & \mbox{on } \partial B. \end{cases} \] Thanks to the strong maximum principle of Lemma \ref{strongmax} and the fact that $g$ has compact support in $B$, it follows that $0<g\le C_0 w$ for some positive constant $C_0$. If $C_0<\lambda$, we would get that \be\label{ciopciop} j(x)w(x)\EEE+\mathcal{L}_s(\Xi(x),w(x)) \le \lambda w(x)\EEE \ \ \ \mbox{in the viscosity sense in } \ B. \ee Applying Lemma \ref{aap} to \eqref{cipcip} and \eqref{ciopciop}, it would follow $w\le0$, which is a contradiction. This proves that $\lambda\le C_0$. Since the constant $C_0$ does not depend on $\lambda$, we finally deduce \[ \lambda_f =\sup \ E_f\le C_0. \] This concludes the proof of the Lemma. \end{proof} Let us provide now the proof of the well-posedness of the first-eigenvalue problem. \begin{proof}[Proof of Theorem \ref{mussaka}] We split the argument into subsequent steps. \textbf{Step 1:} Let us show at first that for a given $f\in C(\Omega)$ that satisfies \eqref{fconintro} and the additional condition \be\label{temp} f(x)\ge \theta>0 \ \ \ \mbox{in} \ \ \ \Omega, \ee problem \eqref{eigenproblem} admits a solution $v_f>0$ with $\lambda_f:=\sup E_f$. Let $\{\lambda_n\}$ be a sequence that converges to ${\lambda_f}$ and consider the associate sequence $\{v_n\}\subset C(\overline{\Omega})$, $v_n>0$ of solutions to \be\label{aug} \begin{cases} h(x)v_n(x)+\mathcal{L}_{s} (\Omega(x),v_n(x))=\lambda_n v_n(x)+ f(x) \qquad & \mbox{in } \Omega,\\ v_n(x) = 0 & \mbox{on } \partial \Omega. \end{cases} \ee We first claim that $\|v_n\|_{\elle{\infty}}\to\infty$. Indeed, let us assume by contradiction that there exists $k>0$ such that $ |v_n| \le k$. Thanks to Lemma \ref{barriercone}, we deduce that there exists $Q=Q(k)$ such that the function $\overline{l}(x)=Qd(x)^{\eta}$, with $\eta=\min\{\bar \eta, \eta_f\}$, solves in the viscosity sense \[ h(x)\overline{l}(x)+\mathcal{L}_{s} (\Omega(x),\overline{l}(x))\ge \lambda_n v_n(x)+ f(x) \quad \mbox{in } \Omega\,\quad \forall \ n>0 . \] Thanks to Corollary \ref{ellcomp} and the sign of $v_n$, we have that \be\label{8mar} 0<v_n(x)\le Q d^{\eta}(x), \ee where the right-hand side does not depend on $n$. Using Lemma \ref{stability}, we deduce that $\underline{v}(x)=\inf\{\liminf_{n\to\infty} v_n(x_n) \ : \ x_n\to x\}$ is a supersolution to \EEE \be\label{29-11} \begin{cases} h(x)v(x)+\mathcal{L}_{s} (\Omega(x),v(x))= \lambda_f \EEE v(x)+ f(x) \qquad & \mbox{in } \Omega,\\ v(x) = 0 & \mbox{on } \partial \Omega. \end{cases} \ee Since $v_n> 0$ also $\underline{v}\ge0$ in $\Omega$ and Lemma \ref{strongmax} assures that $\underline{v}>0$ in $\Omega$. Then, Theorem \ref{approximation} provides a solution $v_{\infty}>0$ to \eqref{29-11}. \EEE Taking $\epsilon>0$ such that $f\ge \frac12f+\epsilon v_{\infty}$, which is possible thanks to \eqref{temp}, it follows that $\tilde{v}_{\infty}=2v_{\infty}$ satisfies \[ h(x)\tilde{v}_{\infty}(x)+\mathcal{L}_{s} (\Omega(x),\tilde{v}_{\infty}(x))\ge(\lambda_f+\epsilon) \tilde{v}_{\infty}(x)+ f(x) \qquad \mbox{in } \Omega. \] Taking again advantage of Theorem \ref{approximation} we reach a contradiction with respect to the definition of $\lambda_f$. \EEE We have then proved that $\|v_n\|_{\elle{\infty}}\to\infty$. Setting $u_n=v_n\|v_n\|_{\elle{\infty}}^{-1}$ we obtain that \[ \begin{cases} h(x)u_n(x)+\mathcal{L}_{s} (\Omega(x),u_n(x))=\lambda_n u_n(x)+ \frac{ f(x) }{\|v_n\|_{\elle{\infty}}} \qquad & \mbox{in } \Omega,\\ u_n(x) = 0 & \mbox{on } \partial \Omega. \end{cases} \] Since $0<u_n\le 1$, we deduce as in \eqref{8mar} that $u_n\le Qd^{\eta}(x) $ and then \[ \overline{u}(x)=\sup\{\limsup_{n\to\infty} u_n(x_n) \ : \ x_n\to x\}, \ \ \ \ \underline{u}(x)=\inf\{\liminf_{n\to\infty} u_n(x_n) \ : \ x_n\to x\}, \] are well defined and vanish on the boundary. Lemma \ref{stability} assures us that they are sub and super solution to \be\label{paris} \begin{cases} h(x)u(x)+\mathcal{L}_{s} (\Omega(x),u(x))= \lambda_f \EEE u(x)\qquad & \mbox{in } \Omega,\\ u_{\infty}(x) = 0 & \mbox{on } \partial \Omega. \end{cases} \ee By definition of $u_n$, there exists a sequence of points $\{x_n\}\subset\Omega$ such that $u_n(x_n)=1$. Thanks to the uniform bound $u_n\le Qd^{\eta}(x) $, we deduce that there exists $\Omega'\subset\subset \Omega$ and that $\{x_n\}\subset\Omega'$. Using Lemma \ref{regelliptic}, it follows that \[ \|u_n\|_{C^{\gamma}(\Omega')}\le \tilde C(C,s,\zeta,d(\Omega'', \Omega'), Q\|d\|_{\elle{\infty}}^{\eta}), \] with $\Omega'\subset\subset \Omega''\subset\subset \Omega$. Then, up to a not relabeled subsequence, $u_n$ uniformly converges to a continuous function $u\in C(\overline{\Omega'})$. Furthermore, $u\equiv \overline{u}\equiv \underline{u}$ in $\Omega'$ and $u_n(x_n)\to u(\bar x)=1$ for some $\bar x\in \Omega'$. This entails on the one hand that $\underline{u}$ is a non negative and nontrivial supersolution to \eqref{paris}, so that Lemma \ref{strongmax} implies $\underline{u}>0$. On the other hand, we obtain that there exists $\bar x\in \Omega'$ such that $\overline{u}(\bar x)>0$. By applying again Lemma \ref{aap} directly to $\overline u$ and $\underline u$, we conclude that there exist $t>0$ such that $\overline u= t \underline u$. This implies that both functions are continuous. Moreover, since $t>0$ we deduce that both $\underline u$ and $\overline u$ are at the same time sup- and super-solutions. Hence, $\underline u$ and $\overline u$ are eigenfunctions related to $\lambda_f$. \EEE Now we want to get rid of assumption \eqref{temp}. Notice that we used it to show that $\|v_n\|_{\elle{\infty}}$ must diverge. Then, we have to prove that $\|v_n\|_{\elle{\infty}}\to\infty$, assuming that the nontrivial positive continuous function $f$ solely satisfies \eqref{fcon}. Again, let us argue by contradiction and suppose that $\|v_n\|_{\elle{\infty}}\le k$. This would lead again to the existence of a non trivial $v_{\infty}$ solution to \eqref{29-11}. Setting $g=\sup\{f,\theta\}$, the previous argument provides us with $\lambda_g>0$ and $v_g>0$ solution to \eqref{eigenproblem}. Since $f\le g$, by construction we deduce that $\lambda_f\le \lambda_g$. Assume that $\lambda_f< \lambda_g$ and take $\mu\in (\lambda_f,\lambda_g)$. Then, thanks to the definition of $\lambda_g$, the fact that $f\le g$ and by using Theorem \ref{approximation}, it results that the following problem \[ \begin{cases} h(x)z(x)+\mathcal{L}_{s} (\Omega(x),z(x))=\mu z(x) + f(x) \qquad & \mbox{in } \Omega,\\ z(x) = 0 & \mbox{on } \partial \Omega, \end{cases} \] admits a positive solution. This however contradicts the fact that $\lambda_f$ is a supremum. On the other hand, if $\lambda_f= \lambda_g$, Lemma \ref{aap}, applied to $v_{\infty}$ and $v_g$, would imply $v_g\le0$, which is again a contradiction. \textbf{Step 2:} Let us assume that there exists another couple $(\mu, w)\in \mathbb (0,\infty)\times C(\overline \Omega)$ that solves \eqref{eigenproblem} with $w>0$. If $\mu< \lambda_f$, we deduce that there exists $\lambda\in (\mu,\lambda_f)$ and, by definition of $\lambda_f$ and Lemma \ref{acotado}, a function $u\in C(\overline{\Omega})$, with $u>0$ in $\Omega$, solving \[ \begin{cases} {h}(x)u(x)+{\mathcal{L}}_s (\Omega(x),u(x))=\lambda u(x)+ f(x) \qquad & \mbox{in } \Omega,\\ u(x) = 0 & \mbox{on } \partial \Omega. \end{cases} \] On the other hand, since $\mu<\lambda$ and $w>0$ we have that \[ \begin{cases} {h}(x)w(x)+{\mathcal{L}}_s (\Omega(x),w(x))\le\lambda w(x) \qquad & \mbox{in } \Omega,\\ w(x) = 0 & \mbox{on } \partial \Omega. \end{cases} \] Being in the same setting of Lemma \ref{aap}, we deduce that $w\le0$, which is a contradiction. This proves that $ \mu\ge\lambda_f$. Assume by contradiction, that $\mu> \lambda_f$. Take $\epsilon>0$ small enough in order to have $\lambda_f<\mu-\epsilon$ and a nonnegative nontrivial continuous function $g(x)$ such that $\epsilon w \ge g$ in $\Omega$. It follows that $w$ solves \[ {h}(x)w(x)+{\mathcal{L}}_s (\Omega(x),w(x))\ge(\mu-\epsilon) w(x) + g(x) \qquad \mbox{in } \Omega. \] Using Theorem \ref{approximation} we deduce that there exists $w_g$ solution to \[ \begin{cases} {h}(x)w_g(x)+{\mathcal{L}}_s (\Omega(x), w_g(x))=(\mu-\epsilon) w_g(x)+ g(x) \qquad & \mbox{in } \Omega,\\ w_g(x) = 0 & \mbox{on } \partial \Omega. \end{cases} \] Applying the refined comparison principle of Lemma \ref{aap} between $v_f$ and $w_g$ it follows that $v_f\le 0$, which is a contradiction. We eventually conclude that $\lambda_f=\mu$. This also implies that $\lambda_f=\lambda_g=\overline{\lambda}$ for all $f,g$ that satisfy \eqref{fconintro}. \textbf{Step 3.} In this last step we show that solutions of \eqref{eigenproblem} are unique up to a multiplicative constant. Assume that $w$ is a nontrivial solution to \eqref{eigenproblem} and let $v_f>0$ be the solution provided by Step 1. Since $w$ is nontrivial we can always assume, up to a multiplication with a (not necessarily positive) constant, that $w(x_0)>0$. Then, we can use the second part of Lemma \ref{aap} to conclude that $w=t v_f$ for some constant $t$. \end{proof} \begin{proof}[Proof of Theorem \ref{helmholtz}] Since $\lambda< \overline{\lambda}$, thanks to the assumptions on $f$ and using characterization \eqref{28.06}, we deduce that there exists a function $v>0$ in $\Omega$ solving \[ \begin{cases} {h}(x)v(x)+{\mathcal{L}}_s (\Omega(x),v(x))=\lambda v(x)+ |f(x)| \qquad & \mbox{in } \Omega,\\ v(x) = 0 & \mbox{on } \partial \Omega. \end{cases} \] Clearly, $v$ is a supersolution for \eqref{helmeq} and we can take advantage of Theorem \ref{approximation} to conclude that \eqref{helmeq} admits a solution. To deal with uniqueness we assume that \eqref{helmeq} has two solutions $v$ and $z$ and set $w=v-z$. Using Corollary \ref{ellipticdif} it follows that $w$ solves \[ \begin{cases} {h}(x)w(x)+{\mathcal{L}}_s (\Omega(x),w(x))=\lambda w(x) \qquad & \mbox{in } \Omega,\\ w(x) = 0 & \mbox{on } \partial \Omega. \end{cases} \] Applying Lemma \ref{aap} to $v$ and $w$, we conclude that $w\le 0$. The same conclusion holds for $ -w= z-v$, so that $w=0$ and $v\equiv z$. \end{proof} \section{Asymptotic analysis}\label{Asymptotic} In this last section, we address the large time behavior of the solution to \eqref{parabolic}. \begin{teo}\label{totoinfty} Let us assume \eqref{alpha}-\eqref{Sigmadef}, that $ g_0 \in C(\Omega)$ with supp$( g_0 )\subset\subset\Omega$, and that there exist constants $\eta_g,C_g>0$ such that the continuous nonnegative function $g:(0,\infty)\times\Omega\to\mathbb R^{+}$ satisfies \be\label{gcon} g(t,x)d(x)^{2s-\eta_g}e^{\lambda t}\le C_g \ \ \ \mbox{for some} \ \ \ \lambda<\overline{\lambda}, \ee where $\overline{\lambda}$ is the first eigenvalue provided by Theorem \emph{\ref{mussaka}}. Then, if $w\in C([0,\infty)\times\overline{\Omega})\cap L^{\infty}((0,\infty)\times\Omega)$ satisfies in the viscosity sense \[ -g(x,t)\le \partial_tw(t,x)+h(x)w(t,x)+\mathcal{L}_{s}(\Omega(x),w(t,x)) \le g(x,t) \ \ \ \mbox{in} \ (0,\infty)\times\Omega, \] coupled with boundary and initial conditions \[ \begin{cases} w(t,x) = 0 & \mbox{on } (0,T)\times\partial\Omega,\\ w(0,x) = g_0(x) & \mbox{on } \Omega, \end{cases} \] one has $|w(x,t)|\le Q_{\lambda}d(x)^{\tilde \eta}e^{-\lambda t}$, for all $\tilde \eta\le \min\{\bar \eta, \eta^g\}$ and some $Q_{\lambda} >0$ with $Q_\lambda \to \infty$ as $\lambda\to \overline \lambda$ . \end{teo} \begin{proof} Let us consider $\varphi_{\lambda}$ solving \begin{equation}\label{barrierup} \begin{cases} h(x)\varphi_{\lambda}(x)+\mathcal{L}_{s} (\Omega(x),\varphi_{\lambda}(x))= \lambda \varphi_{\lambda}(x) +C_g d(x)^{\eta_g-2s} \qquad & \mbox{in } \Omega,\\ \varphi_{\lambda}(x) = 0 & \mbox{on } \partial\Omega,\\ \varphi_{\lambda}(x) > 0 & \mbox{in } \Omega.\\ \end{cases} \end{equation} Such a function exists since $\lambda<\overline{\lambda}$ and thanks to the characterization of Theorem \ref{mussaka}. We want to prove that $\overline w(t,x)=e^{-\lambda t}\varphi_{\lambda}(x)$ solves in the viscosity sense \[ \partial_t\overline w(t,x)+h(x)\overline w(t,x)+\mathcal{L}_{s}(\Omega(x),\overline w(t,x)) \ge g(x,t) \qquad \mbox{in } (0,\infty)\times\Omega. \] In order to achieve this, let $\phi\in C^2(\Omega\times(0,\infty))$ and $(t,x)\in(0,\infty)\times\Omega$ such that $\overline w(t,x)=\phi(t,x)$ and that $\overline w(\tau,y)\ge\phi(\tau,y)$ for $(\tau,y)\in(0,\infty)\times\Omega$. We need to check (see Lemma \eqref{def2}) that for any $B_r(x)\subset \Omega$ \[ \partial_t \phi_r( t , x )+ h( x ) \phi_r( t , x )+\mathcal{L}_{s}(\Omega(x), \phi_r(t,x))\ge g(x,t) . \] By construction of $\phi_r$ we have that $\partial_t \phi_r( t , x )\ge -\lambda e^{-\lambda t}\varphi_{\lambda}(x)$. Moreover, the function $\psi^t(y)=\phi e^{\lambda t}$ touches $\varphi_{\lambda}$ at $x$ from below. Then, we infer that \begin{align*} & h( x ) \phi_r( t , x )+\mathcal{L}_{s}(\Omega(x), \phi_r(t,x))=e^{-\lambda t}\left[ h( x )\psi^t_r( x )+\mathcal{L}_{s}(\Omega(x), \psi^t_r( x )) \right]\\ &\quad \ge e^{-\lambda t} [ \lambda \varphi_{\lambda}(x) +C_g d(x)^{\eta_g-2s}], \end{align*} where the last inequality follows from the definition of $\varphi_{\lambda}$. By collecting the information obtained we get that \begin{align*} \quad& \partial_t \phi_r( t , x )+ h( x ) \phi_r( t , x )+\mathcal{L}_{s}(\Omega(x), \phi_r(t,x))- g(x,t)\\ &\quad \ge -\lambda e^{-\lambda t}\varphi_{\lambda}(x) + e^{-\lambda t} [ \lambda \varphi_{\lambda} +C_g d(x)^{\eta_g-2s}] - g(x,t)\\ &\quad=e^{-\lambda t}d(x)^{\eta_g-2s} [C_g -g(x,t)e^{\lambda t}d(x)^{2s-\eta_g}]\ge0, \end{align*} where the last inequality comes from assumption \eqref{gcon}. Similarly, we can prove that $\underline w(t,x)=-e^{-\lambda t}\varphi_{\lambda}(x)$ solves \[ g(x,t) \le \partial_t\overline w(t,x)+h(t,x)\overline w(t,x)+\mathcal{L}_{s}(\Omega(t,x),\overline w(t,x)) \qquad \mbox{in } (0,\infty)\times\Omega. \] Since $g_0$ has a compact support we can assume $| g_0 |\le\varphi_{\lambda}$, for otherwise we can consider $k\varphi_{\lambda}$ instead of $\varphi_{\lambda}$ for large $k>0$. Therefore, we can use the comparison principle to deduce that \[ |w(t,x)|\le e^{-\lambda t}\varphi_{\lambda}(x). \] At this point, notice that the right-hand side of the first equation in \eqref{barrierup} can be estimated as follows \[ \lambda \varphi_{\lambda}(x) +C_g d(x)^{\eta_g-2s}\le \lambda \|\varphi_{\lambda}\|_{\elle{\infty}}+C_g d(x)^{\eta_g-2s}\le C_{\lambda,g }d(x)^{\eta_g-2s}. \] This implies that there exists $Q=Q(C_{\lambda,g })$ large enough so that $\overline l= Qd(x)^{\tilde \eta}$ is a super solution to \eqref{barrierup} (see Lemma \ref{barriercone}). Then, we can use the comparison principle to conclude that $\varphi_{\lambda}(x)\le \overline l$, which concludes the proof. \end{proof} We conclude by presenting a proof of Theorem \ref{lalaguna}. \begin{proof}[Proof of Theorem \ref{lalaguna}] Using Lemma \ref{differenza}, it follows that $w(t,x)=u(t,x)-v(x)$ solves in the viscosity sense \be\label{4-6} \partial_tw(t,x)+h(t,x)w(t,x)+\mathcal{L}_{s} (\Omega(x),w(t,x))\le \tilde{f}(x,t) \qquad \mbox{in } (0,\infty)\times\Omega \ee where \[ \tilde{f}(x,t)=|f(x,t)-f({x} )|+M|h({t} ,{x} )-h({x} )|+2M\int_{\mathbb{R}^N}\frac{|\chi_{\Omegat({t} ,{x} )}-\chi_{\Omegat({x} )}|}{|z|^{N+2s}}dz. \] Similarly, we can apply again Lemma \ref{differenza} to $-w(t,x)=v(x)-u(t,x)$ to deduce that \be\label{4-6bis} \partial_tw+h(t,x)w+\mathcal{L}_{s} (\Omega(x),w)\ge -\tilde{f}(x,t) \qquad \mbox{in } (0,\infty)\times\Omega. \ee Notice now that \[ \int_{\mathbb{R}^N}\frac{|\chi_{\Omegat({t} ,{x} )}-\chi_{\Omegat({x} )}|}{|z|^{N+2s}}dz=\int_{|z|\ge\frac{\zeta}{2}d(\bar x)}\frac{|\chi_{\Omegat({t} ,{x} )}-\chi_{\Omegat({x} )}|}{|z|^{N+2s}}dz \] \[ \le \frac{C}{d(x)^{N+2s}}|\Omega(t,x)\Delta\Omega(x)|\le \frac{Ce^{-\lambda t}}{d(x)^{2s-\eta_1}}, \] where we have used assumptions \eqref{ostationary} and \eqref{omegax} to deduce the equation in the first line, and assumption \eqref{decayomega} to deduce the last inequality in the second line. Thanks to inequalities \eqref{4-6} and \eqref{4-6bis}, the assertion follows by a direct application of Theorem \ref{totoinfty}. \end{proof} \section*{Acknowledgement} \noindent S.B. is supported by the Austrian Science Fund (FWF) projects F65, P32788 and FW506004. U.S. is supported by the Austrian Science Fund (FWF) projects F\,65, W\,1245, I\,4354, I\,5149, and P\,32788 and by the OeAD-WTZ project CZ 01/2021. \EEE \begin{thebibliography}{999} \bibitem{pedro} P. Aceves-S\'anchez, C. Schmeiser, {\it Fractional diffusion limit of a linear kinetic equation in a bounded domain,} Kinet. Relat. Mod., \textbf{10} (2017), 541--551. \bibitem{bookjulio} F. Andreu-Vaillo, J. M. Mazón, J. D. Rossi, J. J. Toledo-Melero, {\it Nonlocal Diffusion Problems}, Mathematical surveys and monographs, 165, American Mathematical Soc., 2021. \bibitem{BC} M. Bardi, I. Capuzzo-Dolcetta, {\it Optimal Control and Viscosity Solutions of Hamilton-Jacobi-Bellman Equations}, Modern Birkh\"auser Classic, 1997. \bibitem{barles} G. Barles, {\it An Introduction to the Theory of Viscosity Solutions for First-Order Hamilton–Jacobi Equations and Applications}, In: Hamilton-Jacobi Equations: Approximations, Numerical Analysis and Applications. Lecture Notes in Mathematics, vol 2074. Springer, Berlin, Heidelberg. \bibitem{bci} G. Barles, E. Chasseigne, C. Imbert, {\it On the Dirichlet Problem for Second-Order Elliptic Integro-Differential Equations,} Indiana Univ. Math. J., \textbf{57} (2008), 213--246. \bibitem{bcibis} G. Barles, E. Chasseigne, and C. Imbert, {\it H\"older continuity of solutions of second-order non- linear elliptic integro-differential equations}, J. Eur. Math. Soc., \textbf{13} (2011), 1--26. \bibitem{barimb} G. Barles, C. Imbert, {\it Second-order elliptic integro-differential equation: viscosity solutions' theory revisited}, Ann. Inst. H. Poincar\'e Anal. Non Lin\' eaire, {\bf 25} (2008), 567--585. \bibitem{bego} B. Barrios, L. Del Pezzo, J. Garc\'ia-Meli\'an, A. Quaas, {\it A priori bounds and existence of solutions for some nonlocal elliptic problems}, Rev. Mat. Iberoam., \textbf{34} (2018), 195--220. \bibitem{bere} H. Berestycki, L. Nirenberg, S. R. S. Varadhan, {\it The principal eigenvalue and maximum principle for second-order elliptic operators in general domains}, Comm. Pure Appl. Math., \textbf{47} (1994), 47-92. \bibitem{berebis} H. Berestycki, J. M. Roquejoffre, L. Rossi, {\it The periodic patch model for population dynamics with fractional diffusion}, Discrete Contin. Dyn. Syst., \textbf{4} (2011), 1-13. \bibitem{biri} I. Birindelli, F. Demengel, {\it First eigenvalue and maximum principle for fully nonlinear singular operators}, Adv. Differential Equations \textbf{11} (1) (2006), 91-119. \bibitem{birga} I. Birindelli, G. Galise, D. Schiera, {\it Maximum principles and related problems for a class of nonlocal extremal operators}, preprint, arXiv:2107.07303, (2021). \bibitem{biswas} A. Biswas, {\it Principal eigenvalues of a class of nonlinear integro-differential operators}, J. Differential Equations, \textbf{268} (2020), 5257-5282. \bibitem{bismod} A. Biswas, M. Modasiya, {\it Mixed local-nonlocal operators: maximum principles, eigenvalue problems and their applications}, preprint, arXiv:2110.06746 (2021). \bibitem{bog} K. Bogdan, K. Burdzy, Z. Chen, {\it Censored stable processes}, Probab. Theory Related Fields, \textbf{127}, (2003), 89--152. \bibitem{burkovska} O. Burkovska, C. Glusa, M. D'Elia, {\it An optimization-based approach to parameter learning for fractional type nonlocal models}, Comput. Math. Appl., in press. \bibitem{busca} J. Busca, M. Esteban, A. Quaas, {\it Nonlinear eigenvalues and bifurcation problems for Pucci's operator}, Ann. Inst. Henri Poincar\'e, Anal. Non Lin\'eaire \textbf{22} (2) (2005) 187-206. \bibitem{caff} L. Caffarelli, L. Silvestre, Regularity theory for fully nonlinear integro- differential equations, Commun. Pur. Appl. Math., 2009, \textbf{62}, 597-638. \bibitem{caffbis} L. Caffarelli, L. Silvestre, {\it Regularity Results for Nonlocal Equations by Approximation}, Arch. Rational. Mech. Anal., \textbf{200} (2011) 59--88. \bibitem{CIL} M. Crandall, H. Ishii, P.L. Lions, {\it User's guide to viscosity solutions of second order partial differential equations}, Bull. Amer. Math. Soc. (N.S.), {\bf 27} (1992), 1--67. \bibitem{cd} H. Chang Lara, G. D\'avila, {\it Regularity for solutions of nonlocal parabolic equations}, Calc. Var. Partial Differential Equations, \textbf{49} (2014) 139-172. \bibitem{cdbis} H. Chang Lara, G. D\'avila, {\it H\"older estimates for nonlocal parabolic equations with critical drift}, J. Differential Equations, \textbf{260} (2016) 4237-4284. \bibitem{DalMaso93} G.~{Dal Maso}. \newblock {\em An introduction to {$\Gamma$}-convergence}. \newblock Progress in Nonlinear Differential Equations and their Applications, 8. Birkh\"auser Boston Inc., Boston, MA, 1993. \bibitem{DQT} G. D\'avila, A. Quaas, E. Topp, {\it Existence, nonexistence and multiplicity results for nonlocal Dirichlet problems}, J. Differential Equation \textbf{266} (2019) 5971--5997. \bibitem{duo} S. Duo, H. Wang, Y. Zhang, {\it A comparative study on nonlocal diffusion operators related to the fractional Laplacian}, Discrete Contin. Dyn. Syst. Ser. B \textbf{24} (2019), 231-256. \bibitem{Emmrich} {E. Emmrich, R.~B. Lehoucq, D. Puhst}, {\em Peridynamics: a nonlocal continuum theory}, In: M. Griebel, M.~A. Schweitzer (eds), Meshfree Methods for Partial Differential Equations VI, Lecture Notes in Computational Science and Engineering, vol 89, 45--65, Springer, Berlin, Heidelberg, 2008. \bibitem{fall} M. M. Fall, {\it Regional fractional Laplacians: Boundary regularity}, preprint, https://arxiv.org/abs/2007.04808. \bibitem{gms} N. Guillen, C. Mou, A. \'{S}wi\c{e}ch, {\it Coupling L\'{e}vy measures and comparison principles for viscosity solutions}, Trans. Amer. Math. Soc., \textbf{372} (2019), 7327--7370. \bibitem{imb} C. Imbert, {\it A nonlocal regularization of first order Hamilton-Jacobi equations}, J. Differential Equations \textbf{211} (2005), 214--246. \bibitem{jk} E.R. Jakobsen, K.H. Karlsen, {\it A maximum principle for semicontinuous functions applicable to integro-partial differential equations}, NoDEA Nonlinear Differential Equations Appl., \textbf{13} (2006), 137--165. \bibitem{krs} M. Kassmann, M. Rang, R. W. Schwab, {\it Integro-differential equations with nonlinear directional dependence}, Indiana Univ. Math. J., \textbf{63} (2014), 1467--1498. \bibitem{alot} A. Lischke, G. Panga, M. Gulian, F. Song, C. Glusa, X. Zheng, Z. Mao, W. Cai, M. M. Meerschaert, M. Ainsworth, G. E. Karniadakis, {\it What is the fractional Laplacian? A comparative review with new results}, J. Comput. Phys., 2020, \textbf{404}, https://doi.org/10.1016/j.jcp.2019.109009. \bibitem{mellet} A. Mellet, {\it Fractional diffusion limit for collisional kinetic equations: a moments method,} Indiana Univ. Math. J., \textbf{59} (2010) 1333-1360. \bibitem{melletbis} A. Mellet, S. Mischler, C. Mouhot, {\it Fractional diffusion limit for collisional kinetic equations}, Arch. Ration. Mech. Anal., 2011, \textbf{199}, 493-525, https://doi.org/10.1007/s00205-010-0354-2. \bibitem{mou} C. Mou, {\it Perron's method for nonlocal fully nonlinear equations}, Anal. PDE \textbf{10} (2017) 1227--1254 \bibitem{QS} A. Quaas, B. Sirakov, {\it Principal eigenvalues and the Dirichlet problem for fully nonlinear elliptic operators}, Adv. Math., \textbf{218} (2008) 105--135. \bibitem{quaas} A. Quaas, A. Salort, A. Xia, {\it Principal eigenvalues of fully nonlinear integro-differential elliptic equations with a drift term}, ESAIM Control Optim. Calc. Var., \textbf{26}, 2020. \bibitem{ros} X. Ros-Oton, J. Serra, {\it Boundary regularity for fully nonlinear integro-differential equations}, Duke Math. J. \textbf{165} (2016), 2079-2154. \bibitem{schwabsil} R. W. Schwab and L. Silvestre, {\it Regularity for parabolic integro-differential equations with very irregular kernels}, Anal. PDE \textbf{9} (2016), 727-772. \bibitem{Silling00} {S.~A. Silling}, {\em Reformulation of elasticity theory for discontinuities and long-range forces}, J. Mech. Phys. Solids, {\bf 48} (2000), 175-209. \bibitem{survey} S.~A. Silling, R.~B. Lehoucq, {\em Peridynamic theory of solid mechanics}, Adv. Appl. Mech., {\bf 44} (2010), 73-166. \bibitem{silve} L. Silvestre, {\it H\"older estimates for solutions of integro-differential equations like the fractional Laplace}, Indiana Univ. Math. J., \textbf{55} (2006) 1155--1174. \bibitem{tar} L. Tartar, {\it An Introduction to Sobolev Spaces and Interpolation Spaces}, Lect. Notes Unione Mat. Ital., \textbf{3}, Springer-Verlag, Berlin, Heidelberg, 2007 \end{thebibliography} \end{document}
2205.15289v2
http://arxiv.org/abs/2205.15289v2
Percolation for two-dimensional excursion clouds and the discrete Gaussian free field
\documentclass[11pt,a4paper]{article} \usepackage[hyphens]{url} \usepackage{dsfont} \usepackage[utf8]{inputenc} \usepackage{hyperref} \hypersetup{linktocpage, colorlinks = true, urlcolor = blue, linkcolor = blue, citecolor = blue } \usepackage[T1]{fontenc} \usepackage[english]{babel} \usepackage{amsmath} \usepackage{amssymb} \usepackage{amsthm} \usepackage{sansmath} \usepackage{calligra} \usepackage{mathtools} \usepackage[titletoc,toc,title]{appendix} \usepackage{appendix} \usepackage{ae} \usepackage{icomma} \usepackage{units} \usepackage{color} \usepackage{graphicx} \usepackage{caption} \usepackage{subcaption} \usepackage{bbm} \usepackage[square, numbers, sort]{natbib} \usepackage{float} \usepackage{multirow} \usepackage{array} \usepackage{geometry} \usepackage{fancyhdr} \usepackage{lettrine} \usepackage{theoremref} \usepackage{centernot} \usepackage[normalem]{ulem} \usepackage[shortlabels]{enumitem} \bibliographystyle{plain} \setenumerate[0]{label=\alph*)} \newcommand{\be}{\mathcal{I} } \newcommand{\capac}{\mathrm{cap}} \newcommand{\Pm}{\mathbb{P}} \newcommand{\F}{\mathcal{F}} \newcommand{\Om}{\Omega} \newcommand{\M}{\mathcal{M}} \newcommand{\E}{ \mathbb{E}} \newcommand{\Var}{\mbox{Var}} \newcommand{\cov}{\mbox{\textbf{Cov}}} \newcommand{\eqdist}{\overset{d}{=}} \newcommand{\iid}{\textit{i.i.d.}} \newcommand{\as}{\mathrm{a.s.}} \newcommand{\law}{\mathrm{Law}} \newcommand{\weakly}{\overset{w}{\rightarrow}} \newcommand{\indist}{\overset{d}{\rightarrow}} \newcommand{\I}{\mathbb{I}} \newcommand{\pspace}{\mathcal{P}} \renewcommand{\tilde}{\widetilde} \newcommand{\N}{\mathbb{N}} \newcommand{\Z}{\mathbb{Z}} \newcommand{\Q}{\mathbb{Q}} \newcommand{\R}{\mathbb{R}} \newcommand{\ch}{\mathsf{c}} \newcommand{\B}{\mathcal{B}} \newcommand{\En}{\mathcal{E}} \newcommand{\V}{\mathcal{V}} \newcommand{\haus}{\mathcal{H}} \newcommand{\Tr}{\mbox{Tr}} \newcommand{\Disc}{\mbox{Disc}} \newcommand{\leg}[2]{\left(\frac{#1}{#2}\right)} \newcommand{\id}{\mathrm{d}} \newcommand{\ale}{\mathrm{a.e}} \newcommand{\im}{\mathrm{i}} \newcommand{\e}{\mathrm{e}} \newcommand{\sgn}{\, \mathrm{sgn}} \newcommand{\supp}{\mathrm{supp}\,} \newcommand{\Span}{\mathbf{Span}} \newcommand{\dist}{\mathrm{dist}} \newcommand{\diam}{\mathrm{diam}\, } \newcommand{\isom}{\mathrm{Isom}} \newcommand{\proj}{\mathrm{Proj}} \newcommand{\imag}{\mathrm{Im} \,} \newcommand{\re}{\mathrm{Re} \, } \newcommand{\aut}{\mathrm{Aut}} \newcommand{\diverg}{\mathrm{div}} \newcommand{\ba}{a} \newcommand{\bp}{p} \newcommand{\auk}{a^{(k)}} \newcommand{\adk}{a_{(k)}} \newcommand{\puk}{p^{(k)}} \newcommand{\pdk}{p_{(k)}} \newcommand{\ck}{c_k} \newcommand{\cent}{\mathrm{cent}} \newcommand{\BA}{{\mathbb{A}}} \newcommand{\BB}{{\mathbb{B}}} \newcommand{\BC}{{\mathbb{C}}} \newcommand{\D}{{\mathbb{D}}} \newcommand{\BE}{{\mathbb{E}}} \newcommand{\BF}{{\mathbb{F}}} \newcommand{\BG}{{\mathbb{G}}} \newcommand{\BH}{{\mathbb{H}}} \newcommand{\BI}{{\mathbb{I}}} \newcommand{\BJ}{{\mathbb{J}}} \newcommand{\BK}{{\mathbb{K}}} \newcommand{\BL}{{\mathbb{L}}} \newcommand{\BM}{{\mathbb{M}}} \newcommand{\BN}{{\mathbb{N}}} \newcommand{\BO}{{\mathbb{O}}} \newcommand{\BP}{{\mathbb{P}}} \newcommand{\BQ}{{\mathbb{Q}}} \newcommand{\BR}{{\mathbb{R}}} \newcommand{\BS}{{\mathbb{S}}} \newcommand{\BT}{{\mathbb{T}}} \newcommand{\BU}{{\mathbb{U}}} \newcommand{\BV}{{\mathbb{V}}} \newcommand{\BW}{{\mathbb{W}}} \newcommand{\BX}{{\mathbb{X}}} \newcommand{\BY}{{\mathbb{Y}}} \newcommand{\BZ}{{\mathbb{Z}}} \newcommand{\FA}{{\mathfrak{A}}} \newcommand{\FB}{{\mathfrak{B}}} \newcommand{\FC}{{\mathfrak{E}}} \newcommand{\FD}{{\mathfrak{D}}} \newcommand{\FE}{{\mathfrak{E}}} \newcommand{\FF}{{\mathfrak{F}}} \newcommand{\FG}{{\mathfrak{G}}} \newcommand{\FH}{{\mathfrak{H}}} \newcommand{\FI}{{\mathfrak{I}}} \newcommand{\FJ}{{\mathfrak{J}}} \newcommand{\FK}{{\mathfrak{K}}} \newcommand{\FL}{{\mathfrak{L}}} \newcommand{\FM}{{\mathfrak{M}}} \newcommand{\FN}{{\mathfrak{N}}} \newcommand{\FO}{{\mathfrak{O}}} \newcommand{\FP}{{\mathfrak{P}}} \newcommand{\FQ}{{\mathfrak{Q}}} \newcommand{\FR}{{\mathfrak{R}}} \newcommand{\FS}{{\mathfrak{S}}} \newcommand{\FT}{{\mathfrak{T}}} \newcommand{\FU}{{\mathfrak{U}}} \newcommand{\FV}{{\mathfrak{V}}} \newcommand{\FW}{{\mathfrak{W}}} \newcommand{\FX}{{\mathfrak{X}}} \newcommand{\FY}{{\mathfrak{Y}}} \newcommand{\FZ}{{\mathfrak{Z}}} \newcommand{\CA}{{\mathcal{A}}} \newcommand{\CB}{{\mathcal{B}}} \newcommand{\CC}{{\mathcal{C}}} \newcommand{\CD}{{\mathcal{D}}} \newcommand{\CE}{{\mathcal{E}}} \newcommand{\CF}{{\mathcal{F}}} \newcommand{\CG}{{\mathcal{G}}} \newcommand{\CH}{{\mathcal{H}}} \newcommand{\CI}{{\mathcal{I}}} \newcommand{\CJ}{{\mathcal{J}}} \newcommand{\CK}{{\mathcal{K}}} \newcommand{\CL}{{\mathcal{L}}} \newcommand{\CM}{{\mathcal{M}}} \newcommand{\CN}{{\mathcal{N}}} \newcommand{\CO}{{\mathcal{O}}} \newcommand{\CP}{{\mathcal{P}}} \newcommand{\CQ}{{\mathcal{Q}}} \newcommand{\CR}{{\mathcal{R}}} \newcommand{\CS}{{\mathcal{S}}} \newcommand{\CT}{{\mathcal{T}}} \newcommand{\CU}{{\mathcal{U}}} \newcommand{\CV}{{\mathcal{V}}} \newcommand{\CW}{{\mathcal{W}}} \newcommand{\CX}{{\mathcal{X}}} \newcommand{\CY}{{\mathcal{Y}}} \newcommand{\CZ}{{\mathcal{Z}}} \newcommand{\SFA}{{\mathsf{A}}} \newcommand{\SFB}{{\mathsf{B}}} \newcommand{\SFC}{{\mathsf{C}}} \newcommand{\SFD}{{\mathsf{D}}} \newcommand{\SFE}{{\mathsf{E}}} \newcommand{\SFF}{{\mathsf{F}}} \newcommand{\SFG}{{\mathsf{G}}} \newcommand{\SFH}{{\mathsf{H}}} \newcommand{\SFI}{{\mathsf{I}}} \newcommand{\SFJ}{{\mathsf{J}}} \newcommand{\SFK}{{\mathsf{K}}} \newcommand{\SFL}{{\mathsf{L}}} \newcommand{\SFM}{{\mathsf{M}}} \newcommand{\SFN}{{\mathsf{N}}} \newcommand{\SFO}{{\mathsf{O}}} \newcommand{\SFP}{{\mathsf{P}}} \newcommand{\SFQ}{{\mathsf{Q}}} \newcommand{\SFR}{{\mathsf{R}}} \newcommand{\SFS}{{\mathsf{S}}} \newcommand{\SFT}{{\mathsf{T}}} \newcommand{\SFU}{{\mathsf{U}}} \newcommand{\SFV}{{\mathsf{V}}} \newcommand{\SFW}{{\mathsf{W}}} \newcommand{\SFX}{{\mathsf{X}}} \newcommand{\SFY}{{\mathsf{Y}}} \newcommand{\SFZ}{{\mathsf{Z}}} \newcommand{\HH}{\mathbb{H}} \newcommand{\DD}{\mathbb{D}} \newcommand{\RR}{\mathbb{R}} \newcommand{\ZZ}{\mathbb{Z}} \newcommand{\NN}{\mathbb{N}} \newcommand{\EE}{\mathbb{E}} \newcommand{\vp}{\varphi} \newcommand{\hcap}{\operatorname{hcap}} \newcommand{\height}{\operatorname{height}} \newcommand{\area}{\operatorname{area}} \newcommand{\SLE}{\operatorname{SLE}} \newcommand{\hm}{\omega} \newcommand{\dhm}{\operatorname{dhm}} \newcommand{\inrad}{\operatorname{inrad}} \newcommand{\LE}{\operatorname{L}} \newcommand{\ball}{\mathcal{B}} \newcommand{\cc}{\mathcal{C}} \newcommand{\intens}{u} \newcommand{\rmark}[1]{\begin{center}{\red {#1}}\end{center}} \newcommand{\ind}{\mathds} \newcommand{\floor}[1]{{\lfloor #1 \rfloor}} \newcommand{\T}{\ensuremath{\mathbb{T}}} \renewcommand{\H}{\ensuremath{\mathbb{H}}} \renewcommand{\Pm}{\ensuremath{\mathbb{P}}} \renewcommand{\i}{\ensuremath{{\rm i}}} \let \le \leqslant \let \leq \leqslant \let \ge \geqslant \let \geq \geqslant \let \epsilon \varepsilon \let \vp \varphi \let \el k \DeclareMathOperator*{\argmin}{arg\,min} \DeclareMathOperator*{\argmax}{arg\,max} \newtheorem{theorem}{Theorem}[section] \newtheorem{assumption}{Assumption} \newtheorem{condition}{Condition} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{claim}[theorem]{Claim} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \theoremstyle{definition} \newtheorem{remark}[theorem]{Remark} \newtheorem{conjecture}[theorem]{Conjecture} \newtheorem{question}[theorem]{Question} \newcommand{\tend}[2]{\displaystyle\mathop{\longrightarrow}_{#1\rightarrow#2}} \newcommand{\eps}{\varepsilon} \renewcommand{\labelenumi}{(\alph{enumi})} \definecolor{Red}{rgb}{1,0,0} \definecolor{Blue}{rgb}{0,0,1} \definecolor{Olive}{rgb}{0.41,0.55,0.13} \definecolor{Yarok}{rgb}{0,0.5,0} \definecolor{Green}{rgb}{0,1,0} \definecolor{MGreen}{rgb}{0,0.8,0} \definecolor{DGreen}{rgb}{0,0.55,0} \definecolor{Yellow}{rgb}{1,1,0} \definecolor{Cyan}{rgb}{0,1,1} \definecolor{Magenta}{rgb}{1,0,1} \definecolor{Orange}{rgb}{1,.5,0} \definecolor{Violet}{rgb}{.5,0,.5} \definecolor{Purple}{rgb}{.75,0,.25} \definecolor{Brown}{rgb}{.75,.5,.25} \definecolor{Grey}{rgb}{.7,.7,.7} \definecolor{Black}{rgb}{0,0,0} \definecolor{lightgrey}{gray}{0.65} \def\red{\color{Red}} \def\blue{\color{Blue}} \def\yarok{\color{Yarok}} \def\black{\color{Black}} \def\gray{\color{Grey}} \usepackage{constants} \newcommand{\parenthezises}[1]{\arabic{#1}} \numberwithin{equation}{section} \newconstantfamily{Const}{ symbol=C, format=\parenthezises, } \newconstantfamily{const}{ symbol=c, format=\parenthezises, } \usepackage{titletoc} \dottedcontents{section}[4em]{}{2.9em}{0.7pc} \usepackage{titling} \begin{document} \pagenumbering{arabic} \title{Percolation for two-dimensional excursion clouds and the discrete Gaussian free field} \thanksmarkseries{arabic} \author{A. Drewitz\thanks{University of Cologne. E-mail: \protect\url{[email protected]}}, O. Elias\thanks{University of Cologne. E-mail: \protect\url{[email protected]}}, A. Pr\'evost\thanks{University of Geneva. E-mail: \protect\url{[email protected]}}, J. Tykesson\thanks{Chalmers University of Technology and Gothenburg University. E-mail: \protect\url{[email protected]}}, F. Viklund\thanks{KTH Royal Institute of Technology. E-mail: \protect\url{[email protected]}}} \date{\today} \maketitle \thispagestyle{empty} \begin{abstract} We study percolative properties of excursion processes and the discrete Gaussian free field (dGFF) in the planar unit disk. We consider discrete excursion clouds, defined using random walks as a two-dimensional version of random interlacements, as well as its scaling limit, defined using Brownian motion. We prove that the critical parameters associated to vacant set percolation for the two models are the same and equal to $\pi/3.$ The value is obtained from a Schramm-Loewner evolution (SLE) computation. Via an isomorphism theorem, we use a generalization of the discrete result that also involves a loop soup (and an SLE computation) to show that the critical parameter associated to level set percolation for the dGFF is strictly positive and smaller than $\sqrt{\pi/2}.$ In particular this entails a strict inequality of the type $h_*<\sqrt{2u_*}$ between the critical percolation parameters of the dGFF and the two-dimensional excursion cloud. Similar strict inequalities are conjectured to hold in a general transient setup. \end{abstract} \setcounter{tocdepth}{1} \vspace{8mm} \renewcommand{\contentsname}{\centering {\small Contents}} \begin{minipage}{0.9\textwidth} {\small \tableofcontents } \end{minipage} \vspace{1cm} \pagestyle{fancy} \setlength{\headheight}{14pt} \fancyhf{} \lhead{Percolation for 2D excursion clouds and the dGFF} \cfoot{\thepage} \newpage \section{Introduction} \subsection{Background and main result} \label{sec:background} The Brownian excursion cloud on the unit disk $\D,$ introduced by Lawler and Werner in \cite{lawler2000universality}, is a conformally invariant Poissonian cloud of planar Brownian motion trajectories, starting and ending on the boundary of $\D$. Roughly speaking, the number of trajectories is controlled by an intensity parameter $\intens>0.$ One can view the Brownian excursion cloud as a natural version of Brownian interlacements in the hyperbolic disc. Varying the intensity parameter, one may consider percolative properties of the corresponding vacant set, that is, the set of points visited by no trajectory in the cloud. Given $r\in{[0,1)},$ consider the event that the disc of radius $r$ about $0$ can be connected to $\partial \mathbb{D}$ within the vacant set, and denote by $\intens_*^c(r)$ the associated percolation critical parameter, see \eqref{def:u*c}. We will verify that percolation occurs with positive probability if and only if $u<\pi/3$ independently of $r$, that is, $\intens_*^c(r)$ equals $\pi/3$ for all $r\in{[0,1)}$, cf.\ Theorem~\ref{t.mainthm}. (This follows from an SLE computation and is certainly known to experts; an essentially equivalent statement for $u_*^c$ is contained in the informal discussion of \cite{werner-qian}, Section~5.) There is an analogous discrete model in which one considers a random walk excursion cloud on $\D_n:=n^{-1}\Z^2\cap\D,$ see for instance \cite{ArLuSe-20a}. The corresponding question about vacant set percolation may then be formulated as follows. For fixed $r\in{[0,1)}$ and $u>0,$ we say that percolation occurs if the discrete ball $B_n(r)$ of radius $r$ (see below \eqref{eq:setDiscreteApprox}) can be connected to the boundary of $B_n(1-\eps)$ in the vacant set of the discrete excursion process at intensity $u$ on $\D_n$, as $n\rightarrow\infty$ and $\eps\rightarrow0$ in that order. (We will show that one may take $\eps \sim n^{-1/7}$ and only let $n \to \infty$.) In Theorem~\ref{the:maindisexc}, we prove that percolation in this sense occurs with positive probability if and only if $u<\pi/3,$ by comparing with the continuum scaling limit, the Brownian excursion cloud. In other words, the critical parameter $\intens_*^d(r)$ for the discrete excursion cloud, see \eqref{def:u*}, is also equal to $\pi/3$ for all $r\in{[0,1)},$ and similarly percolation does not occur at criticality. The third model we analyze is the discrete Gaussian free field (dGFF) on the disk $\D_n$ with zero boundary condition on $\D_n^\ch.$ In this case, we are interested in percolative properties of its \emph{level sets} or \emph{excursion sets} as $n\rightarrow \infty,$ that is, the set of vertices where the field is larger than $h$ for some $h\in{\R}.$ Via isomorphism theorems, see for instance \cite[Proposition~2.4]{ArLuSe-20a}, which are a reformulation of similar theorems \cite{MR2892408,MR2932978,sznitman2013scaling,MR3492939,Lu-14} for interlacements, percolative properties of the level sets can be related to those of discrete excursion clouds. Given $r\in{[0,1)},$ we study the event that $B_{n}(r)$ is connected to $B_n(1-\eps)$ for the level sets of the dGFF above level $h$ on $\D_n$ as $n\rightarrow\infty$ and $\eps\rightarrow0,$ and denote by $h_*^d(r)$ the associated critical level, see \eqref{def:h*}. This problem was for instance studied in \cite{DiLi-18,DiWiWu-20}, and it was proved in the last reference that percolation occurs above level zero for a slightly different percolation event. In Theorem~\ref{the:maindisexc} below, we adapt their argument to prove that there is percolation above some small positive level, that is $h_*^d(r)>0$ for all $r\in{(0,1)}.$ Moreover, we also prove that $h_*^d(r)\leq \sqrt{\pi/2}$ for all $r\in{[0,1)}.$ We thus obtain the following series of inequalities between the critical percolation parameters $u_*^c(r)$ and $u_*^d(r)$ associated to the vacant set of the continuous and discrete excursion cloud, and $h_*^d(r)$ associated to level sets of the dGFF, which is a combination of Theorems~\ref{the:maindisexc}, \ref{the:maindisgff} and \ref{t.mainthm}. \begin{theorem} \label{the:main} For all $r\in{(0,1)},$ \begin{equation} \label{eq:strictinequalitycritparaintro} 0<h_*^d(r)\leq\sqrt{\frac{\pi}2}<\sqrt{\frac{2\pi}3}=\sqrt{2\intens_*^d(r)}=\sqrt{2\intens_*^c(r)}. \end{equation} \end{theorem} Note that the exact values $\sqrt{\pi/2}$ and $\sqrt{2\pi/3}$ in Theorem~\ref{the:main} depend on our choice of normalization in the definition of the excursion clouds and the dGFF, and we refer to Remark~\ref{rk:othercontperco},\ref{rk:differentnormalization} for more details. \subsection{Relation to other models and further motivation} We now explain how Theorem~\ref{the:main} is related to existing and conjectured results for other models, in particular random interlacements and the dGFF in dimension larger than three. By Proposition~\ref{prop:localdescription} below, the problem of vacant set percolation for the Brownian excursion cloud on $\D$ can be seen as a natural two-dimensional equivalent of percolation for the vacant set of Brownian interlacements \cite{sznitman2013scaling,li2016percolative} on $\R^d,$ $d\geq3.$ But it is different from the model introduced in \cite{MR4125109}. In \cite{elias2017visibility}, a related property of the two-dimensional Brownian excursion cloud was studied, namely that of "visibility to infinity'', that is, the event that the origin can be connected to $\partial \mathbb{D}$ by a line segment contained in the vacant set started from $0$. (Infinity here is understood in the sense of hyperbolic distance.) It was proved that there is visibility to infinity from the origin if and only if $\intens<\pi/4,$ which directly implies that the associated critical parameter for vacant set percolation satisfies $\intens_*^c(0)\geq\pi/4.$ It was moreover proved in \cite{eliaslic2018} that $\intens_*^c(0)\leq \pi/2$ by comparing with a Poisson process of hyperbolic geodesics. The fact that $\intens_*^c(0)=\pi/3$ shows that this critical parameter sits strictly between these two previously obtained bounds. In the discrete setting, the excursion cloud can be identified with random interlacements on $\D_n$ with infinite killing measure on $\D_n^\ch,$ see below \eqref{eq:muexcasinter}. In dimension $d\geq3,$ random interlacements on $\Z^d$ have been introduced in \cite{sznitman2010vacant}, but their definition can be extended to general transient weighted graphs, see \cite{teixeira2009interlacement}, even with a non-zero killing measure, see for instance \cite[Section~3]{Pre1}. The percolative properties of the vacant set associated to random interlacements on transient graphs have been studied intensely, see, e.g., \cite{sznitman2010vacant,teixeira2009interlacement,MR2891880}. In dimension two, one cannot define random interlacements directly on the whole recurrent graph $\Z^2,$ and a possible alternative definition has been introduced in \cite{MR3475663}, where one essentially conditions on the origin being avoided by the walk to obtain a transient graph. In this context the percolation question is perhaps less natural, since the connected components of the associated vacant set at level $\alpha$ are always bounded, but depending on $\alpha$ its cardinality can be either finite or infinite. It is shown in \cite{MR3737923}, that this vacant set is infinite if and only if $\alpha\leq 1$. When blowing up the set $\D_n$ to $B(n):=\{x\in{\Z^2}:\,|x|_2\leq n\},$ one can identify percolation for the vacant set of the two-dimensional discrete excursion process with percolation for the vacant set of random interlacements on $B(n)$ with infinite killing on $B(n)^\ch,$ which, as $n\rightarrow\infty,$ can be considered as another natural definition of percolation for the vacant set of interlacements in dimension two. Indeed, in dimension $d\geq3,$ random interlacements on the ball of radius $n,$ killed outside this ball, converge to random interlacements on $\Z^d,$ but this limit does not seem to be well-defined in dimension two. Let us now turn to the dGFF on $\D_n,$ which is linked to the discrete excursion cloud on $\D_n$ via an isomorphism \cite{ArLuSe-20a}. This isomorphism can alternatively be seen from the point of view of random interlacements \cite{MR2892408, Lu-14,MR3492939,DrePreRod3}, or from the point of view of the random walk on finite graphs \cite{MR1813843,MR3978220}, see Remark~\ref{rk:otheriso}. A direct consequence of this isomorphism is the weak inequality $h_*^d(r)\leq\sqrt{2\intens_*^d(r)}$ between the critical parameter for the vacant set of random interlacements and the critical parameter for the level sets of the dGFF. On transient graphs, level set percolation for the dGFF has also received significant attention in recent years \cite{MR3053773, MR3492939}. The isomorphism theorem relating its law with random interlacements has been a powerful tool for the study of the percolation of its level sets \cite{Sz-16,DrePreRod,DrePreRod2}, and also implies the weak inequality $h_*\leq\sqrt{2u_*}$ between the respective critical parameters of the level sets of the dGFF and the vacant set of random interlacements, see \cite[Theorem~3]{Lu-14} on $\Z^d,$ $d\geq3,$ or \cite[Corollary~2.5]{MR3940195} and \cite{DrePreRod3} on more general transient graphs. This inequality is strict on a large class of trees \cite{MR3492939,MR3765885}, and conjectured \cite[Remark~4.6]{MR3492939} to be strict on $\Z^d$ for all $d\geq3.$ The isomorphism between the discrete excursion cloud and the dGFF also involves a third process, which corresponds to an independent loop soup, see Proposition~\ref{prop:BEisom}. On $\D_n,$ taking advantage of this additional loop soup leads to the previously mentioned inequality $h_*^d(r)\leq \sqrt{\pi/2}.$ What is more, combined with our percolation results for the discrete excursion cloud, this entails the strict inequality $h_*^d(r)<\sqrt{2\intens_*^d(r)}$ in dimension two -- note that the weak version $h_*^d(r)\le\sqrt{2\intens_*^d(r)}$ of this inequality follows at once from the isomorphism. It is in general a challenge to establish this strong version of it, which so far has only been established on certain trees \cite{Sz-16,MR3765885}. In the continuous setting, one can also deduce from a similar isomorphism \cite{ArLuSe-20a} that the critical parameter associated to the complement of the first passage set of the two-dimensional dGFF is $\sqrt{\pi/2},$ see Corollary~\ref{cor:percocontGFF} for details. The inequality corresponding to $h_*^d(r)>0$ has also been obtained for the dGFF on $\Z^d,$ $d\geq3$ in \cite{DrePreRod2}, and on more general transient graphs in \cite{DrePreRod}. It also corresponds to a strict inequality between critical parameters, as we now explain. Denote by $\tilde{h}_*^d(r)$ the critical parameter associated to the percolation for the level sets of the dGFF on the cable system, or metric graph, associated to $\D_n,$ studied in dimension two in \cite{DiWi-18,DiWiWu-20,ArLuSe-20a}. This critical parameter is equal to zero for our notion of two-dimensional percolation, see \cite{ArLuSe-20a} (this can be proved by combining Lemma~4.13 and Corollary~5.1 therein), see also \cite{DiWiWu-20} for a similar result in a slightly different context. This equality $\tilde{h}_*=0$ has also been obtained for the dGFF on the cable system of a large class of transient graphs, see \cite{Lu-14,MR3492939,DrePreRod3}. Combining this with the positivity of $h_*^d(r),$ we obtain the following strict inequality between critical parameters: $\tilde{h}_*^d(r)<h_*^d(r).$ Summarizing, Theorem~\ref{the:main} contains several strict inequalities between the critical parameters $\tilde{h}_*^d(r),$ $h_*^d(r)$ and $u_*^d(r),$ which complement known and conjectured results on transient graphs. See the discussion below \eqref{eq:strictinequalitycritpara} for more details. \subsection{Comments on the proofs and outline of the paper} Let us now discuss the ideas for the proof of Theorem~\ref{the:main}. The parameter $u_*^c(r),$ associated to percolation for the vacant set of Brownian excursions exploits the relation between excursion clouds and (variants of) Schramm-Loewner evolution processes, specifically the SLE$_{8/3}(\rho)$ process. Here, the weight $\rho$ is an explicit function of the intensity of the excursion process, see \eqref{eq:rhokappaalpha}. By the theory of conformal restriction \cite{lawler2003conformal,werner2005conformal}, the excursions which start and end on the bottom half of $\partial\D$ produce an interface which can be described by an $\mathrm{SLE}_{8/3}(\rho)$ curve in $\mathbb{D}$ from $-1$ to $1$, see Lemma~\ref{l.brownianexcursionandlooprestriction}. Moreover, it is well-known \cite{lawler2003conformal} for which $\rho$ an $\mathrm{SLE}_{8/3}(\rho)$ curve hits $\partial\D,$ see Lemma~\ref{lem:SLE-ka-r}. This directly implies that the vacant set of the excursions that start and end on the bottom half of $\partial\D$ are connected to the bottom half of $\partial\D$ with positive probability if and only if the Brownian excursions have intensity less than $\pi/3.$ (See also Section~5 of \cite{werner-qian}.) Combining this with a symmetric result for the top part of $\partial\D$ and an argument involving the restriction property, see Lemma~\ref{lem:resexc}, the equality $u_*^c(r)=\pi/3$ follows easily. We now comment on the fact that the value of $u_*^c(r)$ does not depend on the choice $r \in [0,1).$ Any ball of radius $r \in [0,1)$ centred at the origin is almost surely hit by only finitely many excursions of the Brownian excursion process. These, however, could be removed at finite probabilistic cost due to an insertion-deletion tolerance property, as can be argued by use of the explicit formulas for the underlying Poisson point process, see Remark~\ref{rk:othercontperco},\ref{finiteenergy}. Hence the question of occurrence of percolation can be answered by looking at the excursions that are entirely contained in the complement of this ball within the disk. This heuristic idea is also consistent with an interpretation of the unit disk as a model for hyperbolic space, where, in fact, each Brownian excursion can be interpreted as an ``infinitely long loop''. From this point of view, the infinitely long part corresponds to that part of the excursion which is infinitesimally close to the boundary of the unit disk, which hence is the crucial part for the investigation of percolation. We now turn to the proof that the discrete critical parameter $u_*^d(r)$ is equal to $\pi/3.$ The Brownian excursions cloud is the scaling limit of the random walk excursion cloud on $\D_n,$ see \cite{kozdron_scaling_2006,ArLuSe-20a}. In particular, one can compare the probabilities of connection events for the two models, see Lemma~\ref{lem:approxconnection}, which yields the equality $u_*^d(r)=u_*^c(r).$ We work with the KMT coupling of random walk with Brownian motion. The proof relies on counting the number of trajectories hitting a large ball, see \eqref{e.eqmeasexpr1} and Proposition~\ref{prop:localdescription}, and Beurling type-estimates, see Lemma~\ref{Lem:simpleRWresult}, to ensure that two random walk excursions which are asymptotically (as $n\rightarrow\infty$) arbitrarily close to each other eventually intersect. This will allow us to conclude that the random walk excursion cloud will eventually form an interface between $0$ and $\partial\D$ whenever the Brownian excursion cloud does. Actually, the statement from \cite{ArLuSe-20a} about the scaling limit of random excursion cloud only lets us prove our discrete percolation result when considering the event that $0$ is connected to a ball of radius $1-\eps,$ for a fixed $\eps>0.$ It is natural to wonder if one can in fact connect $0$ to the boundary of the discrete lattice $\D_n$ in the supercritical regime for the vacant set of random walk excursions. We partially answer this question by proving that $0$ is connected to a point in the vacant set at a mesoscopic distance of order $n^{-1/7}$ to the boundary, see Remark~\ref{rk:connectiontoboundary} for details. This is achieved by improving the coupling between discrete and continuous excursions, see Theorem~\ref{the:couplingdiscontexc1}. Let us now turn to the proof of the inequality $h_*^d(r)\leq \sqrt{\pi/2}$ for the percolation of the level sets of the dGFF. As we have already mentioned, the dGFF can be coupled via an isomorphism theorem to excursion clouds with intensity related to the height of the field and loop soups with intensity $1/2,$ see Proposition~\ref{prop:BEisom}. In particular, the level sets of the dGFF are contained in the vacant set associated to the union of the loop clusters which intersect the excursion cloud. It was proved in \cite{werner-wu-cle} that the interfaces of this union are related to $\mathrm{SLE}_{4}(\rho)$ processes in a similar way as the interfaces of the excursion cloud were related to $\mathrm{SLE}_{8/3}(\rho).$ Using again explicit conditions on $\rho$ so that the $\mathrm{SLE}_{\kappa}(\rho)$ curve hit $\partial\D,$ but now for $\kappa=4,$ we deduce $h_*^d(r)\leq \sqrt{\pi/2}.$ Actually, the level $\sqrt{\pi/2}$ corresponds to the critical parameter for the vacant set associated to the union of the clusters of the level sets of the cable system GFF which intersect the boundary of $\D_n,$ see Remark~\ref{rk:exactcritparaGFF}. A similar result can be proved for the continuous GFF, see Corollary~\ref{cor:percocontGFF}. We also refer to Theorem~\ref{the:maindisexcloops} for an extension of the previous percolation result on excursion clouds plus loop soup when the intensity of the loop soup is between $0$ and $1/2$ (i.e.\ for $\kappa$ between $8/3$ and $4$). Next, we comment on the proof of the strict inequality $h_*^d(r)>0.$ In \cite{DiWiWu-20}, it is proved that there is percolation for the level sets of the dGFF above level $0$ when the domain is a rectangle, in the sense that one can connect the left-hand side of this rectangle to its right hand-side with non-trivial probability. We adapt their technique to our context, that is when the domain is the unit disk $\D$, considering the event that $B(r)$ is connected to the boundary of the disk at a small, but positive, level for the dGFF. Note that contrary to usual Bernoulli percolation on $\Z^2,$ existence of a large component of $B(r)$ is not clearly equivalent to their left to right crossing event. Although very similar in spirit to \cite{DiWiWu-20}, our proof of $h_*^d(r)>0$ exhibits numerous technical differences, see for instance our definition of the exploration martingale below \eqref{eq:Mdef} or Lemma~\ref{Lem:bigcaponboundary}. Moreover, the strict inequality $h_*^d(r)>0$ implies a phase coexistence result for the level sets of the dGFF contrary to the percolation of the dGFF above level $0$ from \cite{DiWiWu-20}, see Remark~\ref{rk:mainthmgff},\ref{rk:phasecoexistence} for details. Finally note that we expect that our results can be extended to other sufficiently regular Jordan domains besides the unit disk $\D$, for instance rectangles as studied in \cite{DiWiWu-20}. This is clearly the case for the Brownian excursion clouds by conformal invariance. For our results in the discrete setting some additional work would be required to prove convergence of discrete excursions to Brownian excursions in Section~\ref{s.coupling}, as well as the inequality $h_*^d(r)>0;$ see for instance Lemma~\ref{Lem:bigcaponboundary}. Since our main motivation for this article is the comparison with similar percolation results on $\Z^d,$ $d\geq3,$ which can be interpreted as finding large clusters connected to the boundary of the ball $B(n)$ as $n\rightarrow\infty,$ we have chosen to focus on the domain $\D$ here for the sake of exposition. The rest of this article is organized as follows. In Section~\ref{s.prelimin} we fix notation, define the discrete objects studied in this paper (the dGFF, random walk excursion cloud and loop soup), and state the main theorems~in the discrete setting. Similarly, we define the objects and state our theorems~in the continuous setting in Section~\ref{s.contdef}, as well as recall some standard results from SLE theory. Section~\ref{s.coupling} is dedicated to the construction of various couplings between the discrete and continuous excursion clouds, which are used in Section~\ref{sec:discretecritpara} to compute the critical value associated to the vacant set of the excursion cloud, plus loop soups, in the discrete setting. Section~\ref{s:gfflvlperc} is centred around percolation for the Gaussian free field, and in particular the proof of the positivity of the associated critical parameter in the discrete setting. We also comment on the percolation for the Gaussian free field on the cable system. Finally, Appendix~\ref{sec:contperco} proves our main result on the critical value associated to the vacant set of the excursion cloud, plus loop soups, in the continuum setting. Appendix~\ref{sec:KMT} establishes some results on the KMT coupling (sometimes also referred to as the dyadic coupling) between a simple random walk and Brownian motion and Appendix~\ref{app:convcap} proves convergence of discrete capacities to continuous capacities for compact sets. We use the following convention for constants. With $c,c ',c''$ we denote strictly positive and finite constants that do not depend on anything unless explicitly stated otherwise. Their values might change from place to place. Numbered constants $c_1$ and $c_2$ follow a similar convention, except that they are fixed throughout the paper. \subsection*{Acknowledgements} The research of AD has been supported by the Deutsche Forschungsgemeinschaft (DFG) grants DR 1096/1-1 and DR 1096/2-1. OE is supported by the ERC Consolidator Grant 772466 ``NOISE''. AP was supported by the Engineering and Physical Sciences Research Council (EPSRC) grant EP/R022615/1, Isaac Newton Trust (INT) grant G101121, European Research Council (ERC) starting grant 804166 (SPRS) and the Swiss NSF. JT is supported by the Swedish research council. FV was supported by the Knut and Alice Wallenberg Foundation, the Swedish research council, and the Ruth and Nils-Erik Stenb\"ack Foundation. We are grateful to Juhan Aru and Titus Lupu for helpful discussions. \section{Definitions and results for the discrete models}\label{s.prelimin} \subsection{Notation} \label{sec:notation} Throughout this paper we let $\D \subset \BC$ denote the open unit disk, endowed with the Euclidean distance $d(\cdot,\cdot).$ We denote by $\partial\D$ its boundary and by $\overline{\D}$ its closure, the closed unit disk. We define $B(x,r):=\{y\in{\D}:\,d(x,y)< r\}$ for all $x\in{\D}$ and $r\in{(0,1)},$ $\overline{B}(x,r):=\{y\in{\D}:\,d(x,y)\leq r\}$ and for each $K\subset\D,$ define $B(K,r):=\{x\in{\D}:\,\inf_{y\in{K}}d(x,y)<r\}.$ We abbreviate $B(r):=B(0,r)$ and $\overline{B}(r):=\overline{B}(0,r),$ and also take the convention $B(0):=\{0\}.$ We also define the distance $d(A,B)$ between two sets $A$ and $B$ as the smallest Euclidean distance between points in $A$ and points in $B.$ We write $K \Subset \D$ to indicate that $K$ is a compact subset of $\D,$ and let let $\I_A$ denote the indicator function of a set $A$. Let $\D_n$ be the graph with vertex set $n^{-1} \Z^2 \cap \D$ and edge set given by the nearest-neighbor edges in $n^{-1}\Z^2$ that are included in $\D.$ For $K \subset \D,$ we write \begin{equation} \label{eq:setDiscreteApprox} K_n := \D_n \cap K. \end{equation} Furthermore, denote by $\partial{\D}_n$ the neighboring vertices of $\D_n$ in $n^{-1}\Z^2 \setminus \D_n,$ and let $\overline{\D}_n:=\D_n\cup\partial\D_n.$ Let $\widetilde \D_n$ be the cable system associated to the graph $ \D_n$ with unit weights and infinite killing on $\partial\D_n,$ also sometimes called the metric graph. It is constructed by gluing together intervals $I_{\{x,y\}}$ of length $1/2$ (independent of $n$) through their endpoints for all $x\sim y\in{\overline \D_n},$ ($x\sim y$ means that $x$ and $y$ are neighbors in $n^{-1}\Z^2$) where $I_{\{x,y\}}$ is open at $x$ if $x\in{\partial\D_n}$ and closed at $x$ if $x\in{\D_n},$ and similarly at $y.$ We refer to \cite[Section~2]{Lu-14} for a more general setting. For each $x\in{\D_n}$ and $r\in{[0,1)},$ we define $B_n(x,r):=(B(x,r))\cap \D_n,$ abbreviate $B_n(r):=B_n(0,r),$ let $\overline{B}_n(r):=\overline{B}(r)\cap\D_n,$ and $\tilde{B}_n(r)$ be the union of $I_e$ over all edges $e$ between two vertices of $B_n(r),$ and let $B_n(K,r):=B(K,r)\cap\D_n$ for each $K\subset\D_n.$ For $A,B \subset \D$ and $F\subset\overline{\D}$ we write \begin{equation} \label{eq:discreteConnect} A \stackrel{F}{\longleftrightarrow}B, \end{equation} or sometimes $A\leftrightarrow B$ in $F,$ to express one of the following facts: if $F\cap(\D\setminus{\D}_n)\neq\varnothing,$ then there is a continuous path $\pi\subset F$ starting in $A$ and ending in $B;$ otherwise, it means that there is a nearest neighbor path $(x_1, x_2, \ldots, x_m)$ in $F=F_n$ with $x_1 \in A_n$ and $x_m \in B_n.$ In other words, if $F$ is a non-discrete set $\longleftrightarrow$ means continuous connections, whereas if $F$ is discrete it means discrete connections, and we see subsets of the cable system as non-discrete sets. We write $A\stackrel{F}{\centernot \longleftrightarrow}B$ for the complement of the event \eqref{eq:discreteConnect}. Moreover, we say that a set $F\subset \D$ is connected if $x\leftrightarrow y$ in $F$ for all $x,y\in{F}.$ Note that the notations $\longleftrightarrow$ and $\centernot \longleftrightarrow,$ as well as the notion of connectedness, can depend on the choice of $n,$ which should always either be clear from context, or not depend of the choice of $n$ (if $F\cap(\D\setminus\D_n)\neq\varnothing$ for all $n\in\N$). We let $(X_k^{(n)})_{k \geq 0}$ denote the simple random walk on $n^{-1} \Z^2$ killed upon hitting the outer boundary $\partial \D_n$, which we abbreviate by $X:=(X_k)_{k\ge 0}$ whenever the dependency on $n$ is clear. We denote by $\Pm_x^{(n)}$ the law of $(X_k^{(n)})_{k\geq0}$ on $\D_{n}$ starting at $x\in{\D_n}.$ For $K \subset \D_n,$ denote for $x,y \in \D_n$ the Green function \begin{equation} \label{eq:killedGreen} G_{K}^{(n)}(x,y) := \frac14\E_x^{(n)} \Big[\sum_{k = 0 }^{\tau_{\partial K\cup\partial\D_n} } \I \{ X_k^{(n)} =y \} \Big] \end{equation} killed outside $K.$ We abbreviate $G^{(n)}:=G^{(n)}_{\D_n}.$ We define the hitting and return time of $X^{(n)}$ for a set $K \subset \overline{\D}_n$ as \[ \tau_K^{(n)} := \inf \left\{ k \geq 0:\, X_k^{(n)} \in K\right\}\text{ and }\widetilde{\tau}_{K}^{(n)}:=\inf\left\{k\geq1:\,X_k^{(n)}\in{K}\right\}, \] with the convention $\inf\varnothing=+\infty,$ and the last exit time is defined by \begin{equation} \label{eq:deftauLn} L_K^{(n)} := \sup \left\{k \geq 0 :\, X_k^{(n)} \in K\right\}, \end{equation} with the convention $\sup\varnothing=-\infty.$ We define the equilibrium measure and capacity of a set $K\subset \D_{n}$ as follows \begin{align} \begin{split} \label{defequiandcap} e_{K}^{(n)}(x)&:=4\Pm_x^{(n)}(\tau_{\partial \D_n}^{(n)}<\widetilde{\tau}_{K}^{(n)}), \text{ for all }x \in K,\text{ and } \\ \mathrm{cap}^{(n)}(K)&:=\sum_{x\in{K}}e^{(n)}_{K}(x). \end{split} \end{align} We also write $\overline{e}_K^{(n)}:=e_K^{(n)}/\mathrm{cap}^{(n)}(K)$ for the normalized equilibrium measure of $K.$ In view of \cite[(1.56)]{MR2932978} we have \begin{equation} \label{eq:lastexitdis} \Pm_x^{(n)} \big(L_K^{(n)}>0,X_{L_K^{(n)}}=y\big) = G^{(n)}(x,y) e_{K}^{(n)}(y)\text{ for all }K\subset \D_n,\ x\in{\D_n}\text{ and }y\in{K}. \end{equation} Denote for $K_n \subset \D_n$ its ``discrete outer"\ part visible from $\partial \D_n $ by \begin{equation} \label{eq:outer} \partial K_n:=\big \{ x \in \overline \D_n \setminus K_n \, : \, \exists\, y \in K_n \text{ with } x \sim y, \, \Pm_x(\tau_{\partial \D_n } < \tau_{K_n}) > 0 \big \} \end{equation} as well as ``the support of its equilibrium measure as seen from $\partial \D_n$"\ by \begin{equation}\label{eq:interior} \widehat{\partial} K_n:=\big\{ x \in K_n \, : e_{K_n}^{(n)}(x)>0\big \}. \end{equation} Note that the notation $\partial\D_n$ introduced below \eqref{eq:setDiscreteApprox} and in \eqref{eq:outer} are consistent, and that $\widehat{\partial}\D_n$ are just the neighbors in $\D_n$ of $\partial\D_n.$ On the other hand, if $K\subset \D$ is such that $K\cap(\D\setminus\D_n)\neq\varnothing$ for all $n\in{\N},$ we denote by $\partial K$ the topological boundary of $K.$ \subsection{Discrete Gaussian Free Field} \label{sec:dGFF} The discrete Gaussian free field (dGFF) on $\D_n$ with zero boundary condition is the centred Gaussian vector $(\varphi_x)_{x \in \D_n} $ with covariance given by \begin{equation} \label{eq:defgff} \E \left[\varphi_x\varphi_y\right]= G^{(n)}(x,y)\text{ for all }x,y\in{\D_n}, \end{equation} cf.\ \eqref{eq:killedGreen} for notation. We write \begin{equation} \label{eq:deflevelsets} E^{\ge h}_n:= \left\{ x \in \D_n : \varphi_x \geq h \right\} \end{equation} for the excursion set above level $h$ of $\varphi$ in $\D_n.$ \subsection{Discrete excursion process and loop soup}\label{s.de} The discrete excursion measure is a measure which is supported on nearest neighbor paths that start and end in $\partial \D_n,$ and otherwise are contained in $\D_n.$ Let \[ W^{(n)} := \left\{ e \in \bigcup_{m \geq1} \overline{\D}_n^{m+1} : [e] \subset \D_n, e(0), e(m)\in \partial \D_n ,\,e(i)\sim e(i+1)\,\forall i<m \right\} \] denote the set of discrete excursions in $\D_n,$ where $[e]:=\{e(1),\dots,e(m-1)\}$ denotes the trace of the discrete excursion on $\D_n.$ Given an excursion $e=(e_0,e_1,\ldots,e_m)$ we let $t_e=m$ denote its lifetime. Given $K \subset \D_n,$ let $W_K^{(n)} := \left\{ e \in W^{(n)}: [e] \cap K \neq \varnothing \right\}$ denote all excursions that intersect $K$. Moreover, we denote by ${\Pm}^{(n)}_x$ the law under which $X$ is simple random walk on $\frac1n\Z^2,$ starting at $x\in{\overline{\D}_n},$ and killed after the first return time $\widetilde{\tau}_{\partial \D_n}$ to $\partial\D_n$ (note that $\widetilde{\tau}_{\partial \D_n}=\tau_{\partial\D_n}$ if $x\in{\D_n}$). For $x,y \in \overline{\D}_n$ we let \begin{equation} \label{eq:defdisPoisson} \CH_n (x,y) := 4{\Pm}_x^{(n)}( X_1 \in \D_n, X_{\widetilde{\tau}_{\partial \D_n}} =y) \end{equation} denote the discrete boundary Poisson kernel and note that $\CH$ is symmetric. We let \[ {\Pm}_{x,y}^{(n)} \left(\, \cdot \, \right) := {\Pm}_x^{(n)} \left( \cdot \, |\, X_1 \in \D_n, X_{\widetilde{\tau}_{\partial \D_n}} =y \right) \] denote the law of the random walk excursion from $x$ to $y.$ The discrete excursion measure is then defined by \begin{equation} \label{eq:defmun} \mu^{(n)}_{\rm exc} := \sum_{x,y \in \partial \D_n} \CH_n(x,y) {\Pm}^{(n)}_{x,y}. \end{equation} Alternatively, one can write the measure $\mu^{(n)}_{\rm exc}$ as follows \begin{equation*} \mu^{(n)}_{\rm exc} = \sum_{x \in \partial \D_n}4{\Pm}_x^{(n)}(\, \cdot \, , X_1\in{\D_n}). \end{equation*} Writing $\pi^{(n)}$ for the map that sends \( (e_0,e_1,...,e_m) \in W^{(n)} \) to $(e_1,...,e_{m-1})$ in the space of nearest-neighbor trajectories on $\D_n$ starting and ending in $\widehat \partial \D_n$ (and hence forgets about the first and last visited vertex), we get \begin{equation} \label{eq:muexcasinter} \mu_{{\rm exc}}^{(n)} \circ (\pi^{(n)})^{-1} =\sum_{x\in{\widehat{\partial} \D_n}} \kappa_x \Pm_x^{(\kappa,n)}, \end{equation} where $\kappa_x:=|\{y\in{\partial \D_n }:\,y\sim x\}|,$ and $\Pm_x^{(\kappa,n)}$ denotes the law of the random walk on the weighted graph $(\D_n,1,\kappa),$ that is $\D_n$ with unit conductance and killing measure $\kappa.$ In view of \cite[Theorem~3.2]{Pre1} (with $F=\D_n$), see also \cite[(2.12)]{MR2892408}, modulo time shift, the measure in \eqref{eq:muexcasinter} corresponds to the interlacements measure for the weighted graph $(\D_n,1,\kappa).$ The corresponding discrete excursion process is a Poisson point process $\omega^{(n)}$ on $W^{(n)}\times\R^+$ with intensity measure $\mu^{(n)}_{\rm exc}\otimes\mathrm{d}\intens,$ under some probability which we will denote by $\Pm.$ We write $\omega^{(n)}_{\intens}$ for the point process which consists of the trajectories in $\omega^{(n)}$ with label at most $\intens,$ and the associated occupied and vacant set are given by \begin{equation} \label{eq:VunDef} \CI^{\intens}_n := \bigcup_{e \in \supp(\omega^{(n)}_{\intens})} [e]\text{ and }\CV^{\intens}_n := \D_n \setminus \CI^{\intens}_n. \end{equation} In view of \eqref{eq:muexcasinter} and below, one can describe the restriction of $\omega_{\intens}^{(n)}$ to the trajectories hitting a compact $K\subset \D_n$ as follows. \begin{proposition} \label{prop:interoncompacts} For $K\subset \D_n,$ let $N _K^{(n)}\sim\text{Poi}(\intens \, \mathrm{cap}^{(n)}(K)).$ Conditionally on $N_K^{(n)},$ we let $(X^{(n),i})_{i=1}^{N_K^{(n)}}$ be a collection of independent random walks on $\D_n$ with law $\Pm_{\overline{e}_K^{(n)}}^{(n)}.$ Then $\sum_{i=1}^{N_K^{(n)}}\delta_{X^{(n),i}}$ has the same law as the point process of forward trajectories in $\omega_{\intens}^{(n)}$ hitting $K,$ started at their first hitting time of $K.$ \end{proposition} \begin{remark} \phantomsection\label{rk:defexcursion} \begin{enumerate}[label=\arabic*)] \item \label{rk:excursionviaoneRW} One can also describe directly the law of all the excursions in $\omega_{\intens}^{(n)}$ with a single random walk. Using network equivalence, one can collapse $\partial \D_n$ into a single vertex $x_n$ and we denote by $(Y_t)_{t\geq0}$ under $\overline{\Pm}^{(n)}_{x_n}$ the random walk on $\D_n \cup\{x_n\}$ starting from $x_n,$ which jumps along any edge between $x_n$ and $x\in \widehat \partial \D_n$ at rate $1,$ and along any edge between $x\in{\D_n}$ and $y\in{\D_n},$ $y\sim x,$ at rate $1.$ Let $\tau_{\intens}:=\inf\{t\geq0:\,\ell_{x_n}(t)\geq \intens\},$ where $\ell_{x_n}(t)$ denotes the total time spent by $Y$ in $x_n$ at time $t.$ Then the point process consisting of the excursions in $\D_n$ of $(Y_t)_{t\leq\tau_{\intens}}$ under $\overline{\Pm}^{(n)}_{x_n}$ has the same law as $\omega_{\intens}^{(n)}.$ We refer for instance to \cite[(2.8)]{MR2892408} for a proof. \item \label{rk:choiceofweight}We now comment on the reason for the choosing to include the multiple of $4$ in the definition \eqref{eq:defdisPoisson} of the discrete boundary Poisson kernel or the equilibrium measure \eqref{defequiandcap}. The reason is to ensure the convergence of the discrete excursion process to the continuous excursion process at the same level, see Theorem~\ref{the:couplingdiscontexc1}. Similarly, the factor $1/4$ in the definition \eqref{eq:killedGreen} of the discrete Green function is to ensure convergence to the continuous Green function, see \eqref{e.greenest}. One can interpret this choice of constants as considering $\D_n$ as a weighted graph with weight one between two neighbors (and thus total weight $4$ at each vertex) and infinite killing on $\partial\D_n,$ or equivalently the graph $(\D_n,1,\kappa)$ from below \eqref{eq:muexcasinter}. Similarly, the length of the cables $I_e$ on the cable system, see Section~\ref{sec:notation}, is chosen to be $1/2$ since it corresponds to our choice of unit conductances, see for instance \cite{Lu-14}. \end{enumerate} \end{remark} In a similar fashion one can define the random walk loop soup. We say that $\ell =(\ell_0, \ell_1,...,\ell_k)\in \D_n^{k+1}$ is a rooted loop if it is a nearest neighbor path such that $\ell_0=\ell_k,$ whereas an unrooted loop is a rooted loop modulo time shift, that is an equivalence class of rooted loops where two loops $\ell$ and $\ell'$ are equivalent if $\ell = \ell'(\cdot+t)$. The (rooted) loop measure, see for instance \cite{LJ-11}, on $\D_n$ is defined by \begin{equation} \label{eq:defdisloop} \nu_{{\rm loop}}^{r,(n)}(\cdot) := \sum_{x \in \D_n} \int_0^\infty \Pm_{x,x}^{t,(n)}(\, \cdot \,, {\tau}_{\partial \D_n} > t) p_t^{(n)}(x,x) \frac{1}{t}\, {\rm d}t, \end{equation} where $p_t^{(n)}(x,y)$ denotes the family of transition probabilities for the continuous time random walk induced by the (constant $1$) conductances on $n^{-1}\Z^2$ and $\Pm_{x,y}^{t,(n)}$ is the corresponding bridge probability measure. The unrooted loop measure $\nu_{{\rm loop}}^{(n)}$ is given by the image of $\nu_{{\rm loop}}^{r,(n)}$ via the canonical projection on unrooted loops modulo time shift. The random walk loop soup on $\D_n$ with parameter $\lambda>0$ is defined as a Poisson point process with intensity $\lambda\nu_{{\rm loop}}^{(n)}.$ \subsection{Cable system} \label{sec:cableSystemExc} Recall the definition of the cable system $\widetilde{\D}_n$ below \eqref{eq:setDiscreteApprox}. Under some probability $\tilde{\Pm}^{(n)}_x,$ $x\in{\widetilde \D_n},$ we denote by $\widetilde{X}^{(n)}$ the canonical diffusion on $\widetilde{\D}_n$ starting in $x,$ which behaves like a standard Brownian motion inside $I_{\{y,z\}},$ $y\sim z\in{\overline{\D}_n},$ killed when reaching the open end of $I_{\{y,z\}}$ if either $y\in{\partial\D_n}$ or $z\in{\partial\D_n},$ and such that the discrete time process which corresponds to the successive visits of $\widetilde{X}^{(n)}$ in $\D_n$ is the random walk $(X_k^{(n)})_{k\in{\N}}$ on $\D_n$ under $\Pm_x^{(n)}.$ We refer to \cite[Section~2]{Lu-14} for details. Let us remark that we chose to work with segments $I_{\{x,y\}}$ of length $1/2$ and standard Brownian motion, instead of segments $I_{\{x,y\}}$ of length $1$ and non-standard Brownian motion as in \cite{ArLuSe-20a}. This simply corresponds to a time change of the Brownian motion, and since our results will be independent of this time change we will from now on use the results of \cite{ArLuSe-20a} without paying attention to this different convention. For the cable graph $\widetilde{\D}_n$ we denote the analogously defined quantities by putting a tilde $\widetilde{\cdot}$ on them. In particular, the Gaussian free field (GFF) on the cable system, denoted by $(\tilde{\varphi}_x)_{x\in{\widetilde{\D}_n}}$, is defined as in \eqref{eq:defgff} but for all $x,y\in{\widetilde{\D}_n},$ where $G^{(n)}$ from \eqref{eq:killedGreen} can be extended consistently to the cable system $\widetilde{\D}_n$ as the Green function associated with the diffusion $\tilde{X}.$ There is a simpler construction of the GFF $\tilde{\varphi}$ on the cable system $\widetilde{\D}_n$ from the dGFF on $\D_n$: conditionally on the dGFF $\varphi$ on $\D_n,$ $\tilde{\varphi}_{|I_e}$ for each edge $e=\{x,y\}$ can be obtained by running a (conditionally) independent length-$1/2$ Brownian bridge (for a non-standard Brownian bridge obtained from a Brownian motion of variance $2$ at time $1$) on the interval $I_e$ starting from $\varphi_x$ and ending at $\varphi_y$. In particular, $\tilde{\varphi}_x=\varphi_x$ for all $x\in{\D_n},$ and, denoting by $\widetilde{E}^{\ge h}_n:=\{x\in{\widetilde{\D}_n}:\,\varphi_x\geq h\}$ the excursion set in the cable system at level $h\in\R,$ we have ${E}^{\ge h}_n=\widetilde{E}^{\ge h}_n\cap\D_n.$ The intensity $\tilde{\mu}_{\text{exc}}^{(n)}$ of the excursion process on the cable system can be defined by extending the definition \eqref{eq:defmun} to the cable system, see \cite[Section~2.2]{ArLuSe-20a} for $u(x)=\sqrt{2}$ for details (replacing non-standard Brownian motions by standard ones). Under some probability measure $\tilde{\Pm}^{(n)},$ for each $u>0$ we then define the excursion process $\tilde{\omega}_u^{(n)}$ as a Poisson point process with intensity measure $u\tilde{\mu}_{\text{exc}}^{(n)}.$ We furthermore let $\tilde{\be}_n^u\subset\widetilde{\D}_n$ be the set of points visited by a trajectory in $\tilde{\omega}_u^{(n)},$ and denote by $\tilde{\V}_n^u:=(\tilde{\be}_n^u)^\ch$ its complement in $\widetilde{\D}_n.$ Alternatively, one could equivalently define $\tilde{\omega}_u^{(n)}$ as the random interlacements process at level $u$ on the cable system of the weighted graph defined below \eqref{eq:muexcasinter}, see \cite[Section~3]{Pre1}; or, yet another equivalent way is to define it as the excursions on $\widetilde{\D}_n$ of the diffusion starting from $x_n,$ and run until spending total time $u$ in $x_n,$ for the cable system associated to the graph $\D_n\cup\{x_n\}$ from Remark~\ref{rk:defexcursion}, \ref{rk:excursionviaoneRW}. In particular, extending the definition of the equilibrium measure $e_K^{(n)}$ and capacity $\mathrm{cap}^{(n)}(K)$ of compacts $K\subset\widetilde{\D}$ similarly as in \cite[(2.16) and (2.19)]{DrePreRod3}, one can extend Proposition~\ref{prop:interoncompacts} to compacts $K\subset\widetilde{\D}_n,$ see \cite[Theorem~3.2]{Pre1}. Similarly as for the diffusion $\tilde{X}^{(n)},$ one can obtain $\tilde{\omega}_u^{(n)}$ by adding standard Brownian motion excursions on the cables $I_{\{x,y\}},$ $x\sim y$ to the excursions in $\omega_u^{(n)},$ and we will from now on always assume that $\tilde{\omega}_u^{(n)}$ and $\omega_u^{(n)}$ are coupled in that way. In particular $\be^u_n=\tilde{\be}^u_n\cap\D_n$ and $\V_u^n=\tilde{\V}_u^n\cap\D_n.$ We finally introduce the loop intensity measure $\tilde{\nu}_{\rm{loop}}$ on loops in the cable system $\widetilde{\D}_n,$ and refer to \cite{Lu-14} for a rigorous definition. The projection of this measure on the discrete part of the loops which hit $\D_n$ simply corresponds to the measure introduced in \eqref{eq:defdisloop}. The cable system loop soup with parameter $\lambda>0$ is a Poisson point process with intensity $\lambda\tilde{\nu}_{\rm{loop}}.$ We collect these loops in connected components which we will refer to as the loop clusters on the cable system. We always assume that under the probability $\Pm,$ the loop soup and the excursion process are independent. For each $u,\lambda\ge 0,$ we denote by $\tilde{\be}_n^{u,\lambda}$ the closure of the union of all the loop clusters on the cable system of the loop soup at intensity $\lambda,$ which intersect $\tilde{\be}_n^u.$ It is then consistent to set \begin{equation} \label{eq:BuuVuuDef} \tilde{\be}_n^{u,0}:=\tilde{\be}_n^u \quad \text{ as well as } \quad \tilde{\V}_n^{u,\lambda}:=\widetilde{\D}_n\setminus\tilde{\be}_n^{u,\lambda}\text{ for all } \lambda\geq0. \end{equation} \subsection{Isomorphism theorem} \label{sec:iso} A key tool in our investigations is provided by isomorphism theorems~on the cable system relating a Gaussian free field on the one hand, and independent Brownian excursions as well as an independent Gaussian free field on the other hand. Such results have a long history, dating back to Dynkin's isomorphism theorem~\cite{MR693227,MR734803}, which found its motivation in earlier work by Brydges, Fröhlich and Spencer \cite{BrFrSp-82} and Symanzik \cite{Sy-69}. More recent developments include the second Ray-Knight isomorphism for random walks \cite{MR1813843} or the isomorphism between random interlacements and the Gaussian free field \cite{MR2892408}. For our purposes, we will only need a relatively soft result, which is a direct consequence of this isomorphism for the excursion sets of the GFF as it can be found in \cite{ArLuSe-20a}. \begin{proposition} \label{prop:BEisom} For each $\intens>0$ and $n\in\N,$ there exists a coupling between $\widetilde{E}^{\geq \sqrt{2\intens}}_n$ and $\tilde{\V}^{\intens,1/2}_n$ such that almost surely, \begin{equation} \label{eq:iso} \widetilde{E}^{\geq \sqrt{2\intens}}_n\subset\tilde{\V}^{\intens,1/2}_n. \end{equation} \end{proposition} \begin{proof} We use \cite[Proposition~2.4]{ArLuSe-20a} applied with the constant boundary condition equal to $\sqrt{2u},$ and note that the excursion process with boundary condition $\sqrt{2u}$ defined therein has the same law as our excursion process at level $u$ (compare the normalization in \cite[(2.3)]{ArLuSe-20a} to the one in \eqref{eq:defmun}). On $\tilde{\be}_n^{u,1/2},$ the sign $\sigma$ from \cite[Proposition~2.4]{ArLuSe-20a} is equal to $1,$ and thus $\tilde{\be}_n^{u,1/2}$ is stochastically dominated by $\widetilde{E}^{\geq-\sqrt{2u}}_n.$ We can conclude by taking complement and by symmetry of the GFF. \end{proof} Note that to prove Proposition~\ref{prop:BEisom} one actually does not need the full strength of \cite[Proposition~2.4]{ArLuSe-20a}, but only a cable system version of \cite[Proposition~2.3]{ArLuSe-20a}, by proceeding similarly as in the proof of \cite[Theorem~3]{Lu-14}. We will assume from now that the field, the loop soup and the excursion process on the cable system are coupled under $\Pm$ so that \eqref{eq:iso} holds. Note that \eqref{eq:iso} implies that ${E}_n^{\geq\sqrt{2u}}\subset\tilde{\V}_n^{u,1/2}\cap\D_n\subset\V^u_n.$ One immediately deduces the weak inequality $h_*^d(r)\leq\sqrt{2\intens_*^d(r)}$ for all $r\in{(0,1)},$ see \eqref{def:u*} and \eqref{def:h*}, which will be strengthened as a consequence of our results, see \eqref{eq:strictinequalitycritpara}. \begin{remark} \label{rk:otheriso} As we explained below \eqref{eq:muexcasinter}, see also Proposition~\ref{prop:interoncompacts}, the excursion process $\omega_u^{(n)}$ can be seen as a random interlacements process on $\D_n$ with infinite killing on $\partial\D_n.$ An isomorphism theorem~between the GFF and random interlacements was first proved in \cite{MR2892408} on discrete graphs, and extended in \cite{Lu-14} to the cable system. Even if these references only consider random interlacements on graphs with zero killing measure, their results can easily be extended to graphs with any killing measure, see \cite[Remark~2.2]{DrePreRod3}. Combining this isomorphism between the GFF and random interlacements with the isomorphism between loop soups and random interlacements from \cite{LJ-11,Lu-14}, one easily obtains an alternative proof of the inclusion \eqref{eq:iso}. Using Remark~\ref{rk:defexcursion}, \ref{rk:excursionviaoneRW}, one could alternatively see the inclusion \eqref{eq:iso} as a consequence of the second Ray-Knight Theorem~between Markov processes and the GFF from \cite{MR1813843}. The stronger version of the isomorphism found in \cite[Proposition~2.4]{ArLuSe-20a} can also be seen as an isomorphism for random interlacements, see \cite[Theorem~2.4]{MR3492939} or \cite[Theorem~1.1, 2)]{DrePreRod3}, or for Markov processes, see the proof of \cite[Theorem~8]{MR3978220}. \end{remark} \subsection{Discrete results} Recall the notation \eqref{eq:discreteConnect}. For each $r\in{[0,1)}$ we define \begin{equation} \label{def:u*} \intens_*^d(r):=\inf\Big\{\intens\geq0:\,\lim\limits_{\eps\rightarrow0}\liminf_{n\rightarrow\infty}\Pm\Big(B_n(r)\stackrel{\mathcal{V}^{\intens}_n}{\longleftrightarrow} \partial B_n(1-\eps)\Big)=0\Big\}. \end{equation} Our first main result, proved at the end of Section~\ref{sec:discretecritpara}, identifies the parameter $\intens_*^d(r),$ and actually prove that in the supercritical phase one can have connection at polyonomial distance from the boundary, see \eqref{eq:percowithoutloops}. \begin{theorem} \label{the:maindisexc} For each $r\in{[0,1)}$ and $u\geq\pi/3$ \begin{equation} \label{eq:percowithoutloopssub} \lim\limits_{\eps\rightarrow0}\lim\limits_{n\rightarrow\infty}\Pm\Big(B_n(r)\stackrel{{\mathcal{V}}^{\intens}_n}{\longleftrightarrow} \partial B_n(1-\eps)\Big)=0, \end{equation} whereas for all $u<\pi/3$ and $r\in{[0,1)}$ \begin{equation} \label{eq:percowithoutloops} \liminf_{n\rightarrow\infty}\Pm\big(B_n(r)\stackrel{{\mathcal{V}}_n^{\intens}}{\longleftrightarrow}\partial B_n(1-n^{-1/7})\big)>0. \end{equation} In particular, $u_*^d(r)=\pi/3.$ \end{theorem} We now present an extension of Theorem~\ref{the:maindisexc} to the setting when there is an additional loop soup on top of the discrete excursion process. Recall the notation $\tilde{\V}_n^{u,\lambda}$ introduced in \eqref{eq:BuuVuuDef}, and define the corresponding parameter which equals $1/2$ times the central charge appearing in conformal field theory: \begin{equation} \label{eq:defckappa} \lambda(\kappa) := \frac{(8-3\kappa)(\kappa-6)}{4\kappa}\text{ for all }\kappa\in{[8/3,4]}. \end{equation} Note that $\tilde{\mathcal{V}}_n^{u,\lambda(8/3)}\cap\D_n={\mathcal{V}}_n^u,$ and that the connection probabilities for $\tilde{\mathcal{V}}_n^{u,\lambda(8/3)}$ are the same as for $\V_n^u,$ since for each $x\sim y\in{\D_n},$ $\{x,y\}\subset{\V_n^u}$ if and only if $I_{\{x,y\}}\subset \tilde{\mathcal{V}}_n^{u,\lambda(8/3)}.$ Our second main result concerns the percolation of $\tilde{\mathcal{V}}_n^{u,\lambda(\kappa)},$ and is also proved at the end of Section~\ref{sec:discretecritpara}. \begin{theorem} \label{the:maindisexcloops} For each $r\in{[0,1)}$ and $\kappa\in{[8/3,4]},$ \begin{equation} \label{eq:percowithloops} \lim\limits_{\eps\rightarrow0}\liminf_{n\rightarrow\infty}\Pm\Big(B_n(r)\stackrel{\tilde{\mathcal{V}}^{\intens,\lambda(\kappa)}_n}{\longleftrightarrow} \partial B_n(1-\eps)\Big)=0\text{ if and only if }u\geq\frac{(8-\kappa)\pi}{16}. \end{equation} \end{theorem} Note that the statement \eqref{eq:percowithoutloops} for $\kappa=8/3$ is stronger in the supercritical phase than \eqref{eq:percowithloops} for other values of $\kappa,$ and we refer to Remark~\ref{rk:polycloseboundary} for more details on this. We now turn to our results for the percolation of the level sets of the dGFF. For $r\in{[0,1)}$ we define the critical level \begin{equation} \label{def:h*} h_*^d (r):= \inf\Big\{ h \in \R \, : \, \lim\limits_{\eps\rightarrow0}\liminf_{n \to \infty} \Pm \big (B_n(r) \stackrel{E^{\ge h}_n}{\longleftrightarrow}\partial B_n(1-\eps) \big) = 0 \Big\}. \end{equation} It follows from \eqref{eq:percowithloops} with $\kappa=4$ that there is no percolation in $\tilde{\mathcal{V}}^{\intens,1/2}_n$ for all $\intens\geq {\pi/4}.$ One can directly deduce from the isomorphism \eqref{eq:iso} that $\widetilde{E}^{\ge h}_n$ also does not percolate for all $h\geq\sqrt{\pi/2}.$ We refer to Remark~\ref{rk:exactcritparaGFF} as to why the parameter $\sqrt{\pi/2}$ can also be seen as a critical percolation parameter for the dGFF. Combining this observation with the methods from \cite{DiWiWu-20} to prove percolation of the dGFF at small positive levels, we obtain the following theorem, proved in Section~\ref{s:gfflvlperc}. \begin{theorem} \label{the:maindisgff} For each $r\in{(0,1)}$ we have \begin{equation} \label{eq:h*ineq} 0<h_*^d(r)\leq\sqrt{\frac{\pi}2}. \end{equation} Moreover, there exists $h>0$ such that \begin{equation} \label{eq:GFFconnectionBds} 0 < \liminf_{n \to \infty} \Pm \Big (B_n(r) \stackrel{E^{\ge h}_n}{\longleftrightarrow} \widehat{\partial}\D_n \Big) \le \limsup_{n \to \infty} \Pm \Big (B_n(r) \stackrel{E^{\ge h}_n}{\longleftrightarrow}\widehat{\partial} \D_n \Big) < 1. \end{equation} \end{theorem} In view of \eqref{eq:GFFconnectionBds}, one could also prove positivity of the critical parameter for the dGFF when replacing $1-\eps$ by $1$ in \eqref{def:h*}, and removing the limit as $\eps\rightarrow0.$ We refer to Remark~\ref{rk:connectiontoboundary} for more details on this alternative choice of the definition of the critical parameter. \begin{remark} \phantomsection\label{rk:mainthmgff} \begin{enumerate}[label=\arabic*)] \item This is drastically different from Bernoulli percolation where the density at criticality for bond percolation is $1/2,$ and is strictly larger than $1/2$ for site percolation (see \cite{Ke-80} for the former, which in combination with \cite[Theorem~3.28]{grimmett1999percolation} yields the latter). On the other hand, in our setting of site percolation for the dGFF level sets the density at criticality is strictly smaller than $1/2$ by Theorem~\ref{the:maindisgff}; our results thus support the heuristics that 'positive correlations help percolation'. \item \label{rk:phasecoexistence} Theorem~2.7 entails a phase coexistence result for the components of the level sets of the Gaussian free field connected to $\partial\D.$ Indeed fix some $h\in{(0,h_*^d(1/2))},$ then by monotonicity, for each $r\in{[1/2,1)},$ the limit as $n\rightarrow\infty$ of the probability that $B_n(r)$ is connected to $\widehat{\partial}\D_n$ in $E^{\ge h}_n$ is larger than $c,$ for some positive constant $c$ not depending on $r.$ Moreover, using the isomorphism~\eqref{eq:iso}, the symmetry of the dGFF and the fact that the limit as $n\rightarrow\infty$ of the probability that the random walk excursion cloud hits $B_n(r)$ converges to $1$ as $r\rightarrow1,$ see \eqref{capball} and Lemma~\ref{l.capconv}, one can easily show that there exists $r<1$ so that the probability that $B_n(r)$ is connected to $\widehat{\partial}\D_n$ in $\{x\in{\D_n}:\varphi_x<h\}$ is larger than $1-c/2.$ Therefore, for this choice of $r,$ there is with positive probability as $n\rightarrow\infty$ coexistence of a path in $\{x\in{\D_n}:\varphi_x<h\}$ and a path in $\{x\in{\D_n}:\varphi_x\geq h\},$ both connecting $B_n(r)$ to $\widehat{\partial}\D_n.$ Note that one could not easily deduce phase coexistence from the percolation of the sign clusters of the dGFF from \cite{DiWiWu-20}. \end{enumerate} \end{remark} A consequence of \eqref{eq:GFFconnectionBds} and the isomorphism theorem, Proposition~\ref{prop:BEisom}, is the following result for the excursion process, proved at the end of Section~\ref{s:gfflvlperc}. \begin{corollary} \label{cor:VunPercBds} For all $r\in{[0,1)},$ there exists $\intens > 0$ such that \begin{equation} \label{eq:ineqsVacantSetPerc} 0< \liminf_{n \to \infty} \Pm\big(B_n(r) \stackrel{\mathcal{V}_{n}^{\intens}}{\longleftrightarrow} \widehat{\partial}\D_n \big) \le \limsup_{n \to \infty} \Pm\big(B_n(r) \stackrel{\mathcal{V}_{n}^{\intens}}{\longleftrightarrow} \widehat{\partial}\D_n \big) < 1. \end{equation} \end{corollary} Compared to Theorem~\ref{the:maindisexc}, Corollary~\ref{cor:VunPercBds} is stronger in the sense that we do have a full connection up to the boundary; however, it is weaker in the sense that it only holds for some $\intens> 0$ small enough, whereas the connectivity of Theorem~\ref{the:maindisexc} is valid throughout the entire supercritical phase. On the cable system one can define a critical parameter $\tilde{h}_*^d(r)$ similarly to $h_*^d(r)$ in \eqref{def:h*} but with ${E}_n^{\ge h}$ replaced by $\widetilde{E}_n^{\ge h}.$ Combining \cite[Lemma~4.13 and Corollary~5.1]{ArLuSe-20a} with \cite[Theorem~1]{Lu-14}, we have the following result. \begin{proposition}[\cite{ArLuSe-20a}] \label{the:htilde=0} For all $r\in{(0,1)},$ $\tilde{h}_*^d(r)=0,$ and there is no percolation above level $h=0.$ \end{proposition} Although \cite[Corollary~5.1]{ArLuSe-20a} is stated on the square $\{-n,\dots,n\}^d,$ its proof could easily be adapted to the disk $\D_n,$ and we give an alternative proof in Remark~\ref{rk:cablegff}, \ref{rk:tilh*>0}. Moreover, contrary to what is stated therein, \cite[Corollary~5.1]{ArLuSe-20a} does not hold for $r=0,$ see Remark~\ref{rk:cablegff}, \ref{rk:r=0cablegff}. Therefore, combining Theorems~\ref{the:maindisexc} and \ref{the:maindisgff} with Proposition~\ref{the:htilde=0}, we obtain for each $r\in{(0,1)}$ \begin{equation} \label{eq:strictinequalitycritpara} \tilde{h}_*^d(r)=0<h_*^d(r)<\sqrt{2\intens_*^d(r)}<\infty. \end{equation} Note that when $r=0,$ the situation for the Gaussian free field is different, see Remark~\ref{rk:cablegff}, \ref{rk:r=0cablegff} and \cite{2dGFFperc}. As explained in Section~\ref{sec:background}, the inequalities \eqref{eq:strictinequalitycritpara} were already proved on certain transient graphs for random interlacements and the Gaussian free field: $\tilde{h}_*^d=0$ is proved in \cite{Lu-14,MR3492939,DrePreRod3,Pre1} on all vertex-transitive transient graphs, and in particular on $\Z^d,$ $d\geq3,$ or for the the pinned dGFF in dimension $2$ ; $h_*^d>0$ is proved in \cite{DrePreRod,DrePreRod2,MR3940195,MR3765885,MR4169171} on $\Z^d,$ $d\geq3,$ a class of fractal or Cayley graphs with polynomial growth, a large class of trees, and expander graphs; $h_*^d<\sqrt{2\intens_*^d}$ is proved in \cite{MR3492939,MR3765885} on a large class of trees; $\intens_*^d<\infty$ is proved in \cite{sznitman2010vacant,teixeira2009interlacement,MR2891880,DrePreRod2} on $\Z^d,$ $d\geq3,$ the same class of fractal or Cayley graphs with polynomial growth as before, and non-amenable graphs. However, the question of the strict inequality $h_*^d<\sqrt{2\intens_*^d}$ is still open on $\Z^d,$ $d\geq3,$ see \cite[Remark~4.6]{MR3492939}, and we refer to \cite{MR3940195} for an extension of this question. \begin{remark} \phantomsection\label{rk:connectiontoboundary} \begin{enumerate}[label=\arabic*)] \item \label{rk:othercritexp} Let us comment on our definition of the critical parameters \eqref{def:u*} and \eqref{def:h*}. An alternative possible definition would be to inverse the order of the limits $\eps\rightarrow0$ and $n\rightarrow\infty$ in \eqref{def:u*} and \eqref{def:h*}. More generally, for all $r\in{[0,1)}$ and any sequence $\boldsymbol{\eps}=(\eps_n)_{n\in\N}\in{[0,\infty)^{\N}}$ decreasing to $0,$ we define \begin{equation} \label{eq:defu*depsr} u_*^d(\boldsymbol{\eps},r):=\inf\Big\{\intens\geq0:\,\liminf_{n\rightarrow\infty}\Pm\Big(B_n(r)\stackrel{\mathcal{V}^{\intens}_n}{\longleftrightarrow} \widehat{\partial}B_n(1-\eps_n)\Big)=0\Big\}. \end{equation} Note that if $\eps_n\leq \eps'_n$ for all $n\in\N,$ then $u_*^d(\boldsymbol{\eps},r)\leq u_*^d(\boldsymbol{\eps}',r),$ and that $u_*^d(r)=\sup u_*^d(\boldsymbol{\eps},r),$ where the supremum is taken over all sequences $\boldsymbol{\eps}=(\eps_n)_{n\in\N}\in{[0,\infty)^{\N}}$ decreasing to $0.$ By Theorem~\ref{the:maindisexc} and Corollary~\ref{cor:VunPercBds} we have that for all $r\in{[0,1)}$ and any sequences $\boldsymbol{\eps}=(\eps_n)_{n\in\N}$ decreasing to $0$ \begin{equation*} u_*^d(\boldsymbol{\eps},r)\begin{cases} =\pi/3&\text{if }\eps_n\geq n^{-1/7}\text{ for each }n\in\N, \\\in{(0,\pi/3]}&\text{otherwise.} \end{cases} \end{equation*} and there is no percolation at $\pi/3.$ One can define similarly the critical parameter $h_*^d(\boldsymbol{\eps},r)$ by replacing $\V_n^u$ by $E_n^{\geq u}$ in \eqref{eq:defu*depsr}. Using Theorem~\ref{the:maindisgff}, see in particular \eqref{eq:GFFconnectionBds} for the lower bound, we have for all $r\in{(0,1)}$ and any sequences $\boldsymbol{\eps}=(\eps_n)_{n\in\N}$ decreasing to $0$ \begin{equation*} 0<h_*^d(\boldsymbol{\eps},r)\leq \sqrt{\frac{\pi}{2}}. \end{equation*} Finally, for the GFF on the cable system, \cite[Lemma~4.13 and Corollary~5.1]{ArLuSe-20a} actually imply that $\tilde{h}_*^d(\boldsymbol{\eps},r)=0$ for all $r\in{(0,1)}$ and for any sequences $\boldsymbol{\eps}$ decreasing to $0,$ where $\tilde{h}_*^d(\boldsymbol{\eps},r)$ is defined similarly as \eqref{eq:defu*depsr}, replacing $\V_n^u$ by $\widetilde{E}^{\geq u}_n.$ In particular, the inequalities \eqref{eq:strictinequalitycritpara} still holds with these new definitions of the critical parameters, as long as the sequence $\boldsymbol{\eps}$ satisfies $\eps_n\geq n^{-1/7}$ for all $n\in\N.$ \item We conjecture that $u_*^d(\boldsymbol{\eps},r)=\pi/3$ for any sequence $\boldsymbol{\eps}=(\eps_n)_{n\in\N}$ decreasing to $0,$ and in particular for the choice $\eps_n=0$ for all $n\in\N,$ which seems to be another natural definition of the critical parameter. The difficulty in proving this statement is that the equality $u_*^d(r)=\pi/3$ from Theorem~\ref{the:maindisexc} follows from the coupling between discrete and continuous excursion processes, and a similar result for continuous excursions, see Theorem~\ref{t.mainthm}. However, this coupling fails near the boundary of the disk $\D,$ see Theorem~\ref{the:couplingdiscontexc1}, and it seems that proving the equality $u_*^d(0,r)=\pi/3,$ that is finding a large cluster of $\V_n^u$ reaching the boundary of $\D_n,$ would require an additional purely discrete argument. The result \eqref{eq:percowithoutloops} shows that one can at least have a large cluster of $\V_n^u$ at polynomial distance from the boundary, and can thus be seen as a first step to obtain $u_*^d(0,r)=\pi/3.$ On the other hand, it is not clear if the critical parameter $h_*^d(\boldsymbol{\eps},r)$ depends on the sequence $\boldsymbol{\eps}$ or not, or in fact on $r\in{(0,1)}.$ \end{enumerate} \end{remark} \section{Definitions and results for the continuum models} \label{s.contdef} \subsection{The Brownian excursion cloud and loop soups}\label{s.be} The Brownian excursion measure $\mu$ originated as a tool for studying intersection exponents of planar Brownian motion, see for instance \cite{MR2883393} and is the continuum analogue of the discrete excursion measure. It is a $\sigma$-finite measure on trajectories that spend their life time in the unit disk with endpoints on the boundary $\partial \D$. We now briefly recall one way to construct this measure. See also for instance \cite{lawler2000universality}, \cite{virag2003beads}, \cite{lawler2005conformally} and \cite{lawler2004soup}. Let \begin{equation*} W_\D := \left\{ w \in C([0,T_w],\overline{\D}) : w(t) \in \D, \forall t \in (0,T_w) \right\}, \end{equation*} and for $K \Subset \D$ we let $W_{K}$ be the set of trajectories in $W_{\D}$ that hit $K$. For $w$ in $W_{\D}$ we write $Z_t(w)=w(t)$. For $x\in \D$, let $\Pm_x$ denote the law under which $(Z_t)_{t \geq0}$ is a complex Brownian motion started at $x$ killed upon hitting $\partial \D$. For each probability measure $m$ on $\D,$ let $\Pm_{m}:=\int_{x\in \D} \Pm_x\, m({\rm d} x).$ The Brownian excursion measure $\mu$ on general domains is for instance defined in \cite[Section~5.2]{lawler2005conformally}. When the domain is the unit disk $\D,$ it corresponds to the limit \begin{equation}\label{e.bemeas} \mu = \lim_{\epsilon \to 0} \frac{2 \pi}{\epsilon} \Pm_{\sigma_{1-\epsilon}}, \end{equation} where $\sigma_r$ is the uniform probability measure on $\partial B(r)$ for each $r\in{(0,1)}.$ The limit in \eqref{e.bemeas} is meant in terms of the Prokhorov metric on the set of measures on $W_{\D},$ when $W_{\D}$ is endowed with some canonical distance on curves, and we refer to \cite[Section~5.1]{lawler2005conformally} for a precise definition of these notions. The formula \eqref{e.bemeas} can be deduced from combining the bottom of p.124 and (5.12) in \cite{lawler2005conformally}, see also the paragraph after (7) in \cite{lawler2000universality}. Note that $\mu$ is supported on excursions which start and end on $\partial\D.$ Under $\Pm,$ the excursion process $\omega$ is a Poisson point process on $W_\D \times \BR_+$ with intensity measure $\mu \otimes \id \intens$. For $\intens>0,$ writing $\omega = \sum_{i \geq 0} \delta_{(w_i, \intens_i)},$ where $\delta$ is a Dirac mass, we let \begin{equation}\label{e.omega_alpha} \omega_\intens := \sum_{i \geq 0} \delta_{w_i} \mathbb{I} \{\intens_i \leq \intens\}, \end{equation} and note that under $\Pm$ the process $\omega_\intens$ is a Poisson point process with intensity measure $\intens \mu.$ We refer to $\omega_\intens$ as the Brownian excursion process at level $\intens$. For $\intens>0$, the Brownian excursion set at level $\intens$ is then defined as \begin{equation*} \CI^{\intens} := \bigcup_{\intens_i \le \intens } \bigcup_{s =0}^{T_{w_i}} w_i(s) \end{equation*} and we let $\V^{\intens} := \D\setminus \be^{\intens}$ denote the corresponding vacant set. \cite[Proposition~5.8]{lawler2005conformally} says that $\mu$, and consequently $\Pm$, are invariant under the conformal automorphisms of $\D$. We now discuss how the random set $\be^{\intens}\cap K$ can be generated for a compact $K$ in ${\mathbb D}$. This is what we refer to as ``local picture". We first introduce some additional notation. For $K\Subset \D$, let $\tau_K:=\inf\{t>0;\,Z_t\in K\}$ be the hitting time of $K$ and let $L_K:=\sup\{0<t\le \tau_{\partial \D}\,:\,Z_t\in K\}$ denote the last exit time, with the conventions $\inf\varnothing=+\infty$ and $\sup\varnothing=-\infty.$ A point $x$ is said to be regular for $K$ if $\Pm_x(\tau_K=0)=1$. We will assume that all compact sets $K$ appearing below satisfy the condition that all $x\in K$ are regular. For $K \Subset \D$ let $e_K( {\rm d}y)$ denote the equilibrium measure (for Brownian motion in $\D$) of $K$, see for example \cite[Theorem~$24.14$]{kallenberg_2002}. It is the finite measure supported on $\partial K$ satisfying \begin{equation}\label{e.eqmeas} \Pm_x \left( Z(L_K) \in {\rm d}y,\, L_K>0 \right) = G(x,y) e_K( {\rm d}y), \end{equation} where $G(x,y)$ is the Green's function for Brownian motion in $\D$ stopped upon hitting $\partial \D$. We recall that \begin{equation} \label{eq:Greeneverywhere} G(w,z)=\frac{1}{2\pi}\log\frac{|1-\bar{w}z|}{|w-z|}\mbox{ for }w,z\in \D, \end{equation} so that in particular \begin{equation} \label{eq:Greenat0} G(0,r e^{i \theta})=\frac{\log(1/r)}{2\pi}\mbox{ for }0<r<1\mbox{ and }0\le \theta<2\pi. \end{equation} Furthermore, the capacity (relative to $\D$) of $K\Subset \D$ is denoted by $\capac(K)$ and is defined as the total mass of $e_K,$ and we denote by $\overline{e}_K:=e_K/\capac(K)$ the normalized equilibrium measure. The expression for $e_{B(r)}$ for $0<r<1$ is known and can be derived at once; indeed, using the above and~\eqref{e.eqmeas}, we have \begin{equation}\label{e.eqmeasexpr1} \Pm_0 \left( Z(L_{B(r)}) \in {\rm d}y, 0 < L_{B(r)} \right)=\frac{\log(1/r)}{2\pi}e_{B(r)}( {\rm d}y). \end{equation} On the other hand, by rotational invariance, we have that \begin{equation}\label{e.eqmeasexpr2} \Pm_0 \left( Z(L_{B(r)}) \in {\rm d}y, 0 < L_{B(r)} \right)=\sigma_r( {\rm d}y), \end{equation} From~\eqref{e.eqmeasexpr1} and~\eqref{e.eqmeasexpr2} we get that \begin{equation}\label{e.eqmeasdisc} e_{B(r)}({\rm d}y)=\frac{2\pi}{\log(1/r)}\sigma_r({\rm d}y). \end{equation} The capacity of $B(r)$ is therefore given by \begin{equation} \label{capball} \capac(B(r))=\int_{y\in \partial B(r)} e_{B(r)}({\rm d}y)=\frac{2\pi}{\log(1/r)}. \end{equation} For $K\Subset \D$, the hitting kernel is defined as \[ h_K(x,{\rm d}y):=\Pm_x(Z(\tau_K)\in {\rm d}y,\,\tau_K<\infty). \] The equilibrium measure satisfies the following consistency property, see \cite[Proposition~24.15]{kallenberg_2002}: If $K_1\Subset K_2 \Subset \D$, then \begin{equation}\label{e.consistency} e_{K_1}({\rm d}y) = \int_{x\in \partial K_2} h_{K_1}(x,{\rm d}y) e_{K_2}( {\rm d}x). \end{equation} For all $x\in{\R^2}$ and $r<|x|<R,$ it thus follows from \eqref{e.eqmeasdisc}, \eqref{e.consistency} and invariance by scaling and rotation of Brownian motion that \begin{equation} \label{eq:hittingBM} \Pm_x \left( \tau_{B(r)}< \tau_{B(R)^\ch} \right) = \frac{\log(R/|x|)}{\log(R/r)}. \end{equation} When $B=B(r)$ for some $r\in{(0,1)},$ we can pointwise define a probability measure $\Pm_x^B, x \in \partial B,$ as the weak limit $\lim_{z \to x} \Pm_z ( \, \cdot \, | \, \tau_B = \infty),$ where the limit is taken for $z\in{\D\setminus B}.$ The existence of this limit can be proven using \cite[Theorem~4.1]{MR932248} for the domain $D=\D\setminus B,$ and $\Pm_x^B$ corresponds to the normalized excursion measure from \cite[Definition~3.1]{MR932248}, restricted to the trajectories hitting $\partial\D$ (this restricted measure is finite, see the end of p.\ 34 in \cite{MR932248}). Note moreover that the limit as $z\rightarrow x$ of $G_D(z,y)/P_z(\tau_B=\infty)$ is positive by \eqref{eq:hittingBM} and since the normal derivative of $G_D(x,y)$ is positive for each $y\in{D}$ (and is in fact a multiple of the Poisson kernel). Then $\Pm_x^B$ corresponds informally to the law of $(Z_{s+L_B})_{s\geq0}$ under $\Pm_{y}(\cdot\,|\,L_B\geq0,Z({L_B})=x)$, and for $x\in{\overline{B}^c}$ we define $\Pm_x^B=\Pm_x(\cdot\,|\,\tau_B=\infty)$. Using invariance by time reversal of Brownian motion, see \cite[Theorem~24.18]{kallenberg_2002} we thus have that for all $s\in{(0,1)}$ with $\overline{B}\subset B(s),$ \begin{align} \begin{split} \label{lawbeforeHK} \text{ under }&\Pm_{e_{B(s)}}(\cdot\,|\,\tau_B<\infty,Z(\tau_B)),\ \text{the process } (Z(\tau_B-t))_{t\in{(0,\tau_B]}}\text{ has } \\ &\text{ law }\Pm_{Z(\tau_B)}^B\big((Z(t))_{t\in{(0,L_{B(s)}]}}\in{\cdot}\big). \end{split} \end{align} One can prove \eqref{lawbeforeHK} as follows: let $B^{(\eps)}$ be a ball with the same center as $B,$ but with radius increased by $\eps>0,$ and define $L^{(\eps)}:=\sup\{t\leq \tau_B:\,X_t\in{\partial B^{(\eps)}}\}.$ One can use invariance by time reversal and the definition of $\Pm_{\cdot}^B$ to show that, on the event $L^{(\eps)}\geq0,$ under $\Pm_{e_{B(s)}}(\cdot\,|\,\tau_B<\infty,Z({L^{(\eps)}})),$ $(Z(L^{(\eps)}-t))_{t\in{(0,L^{(\eps)}]}}$ has law $\Pm_{Z(L^{(\eps)})}^B((Z(t))_{t\in{[0,L_{B(s)})}}\in{\cdot}).$ Letting $\eps\rightarrow0,$ we obtain \eqref{lawbeforeHK}. For any measurable sets of trajectories $A, A'$ in ${W}_{\D},$ any $r\in{(0,1)}$ and $\eps\in{(0,1)}$ with $r<1-\eps,$ using the strong Markov property at time $\tau_B$, with $B=B(r)$, we have \begin{equation}\label{e.beforwardmeas} \begin{split} & \frac{2 \pi}{ \epsilon} \Pm_{\sigma_{1-\epsilon}} \left( ( Z({\tau_B-t}))_{t \in{(0,\tau_B]}} \in A', ( Z({t+\tau_B}))_{t \geq 0} \in A, \tau_B< \infty \right) \\ &= \frac{2 \pi }{ \epsilon } \E_{\sigma_{1-\epsilon}} \left[ \Pm_{Z(\tau_B)} (A) \I\{ ( Z({\tau_B-t}))_{t \in{(0,\tau_B]}} \in A',\ \tau_B< \infty\} \right] \\ &\overset{\eqref{e.eqmeasdisc}}{=} \frac{- \log(1-\epsilon) }{ \epsilon } \E_{e_{B(1-\epsilon)}} \left[ \Pm_{Z(\tau_B)} (A) \I\{ ( Z({\tau_B-t}))_{t \in{(0,\tau_B]}} \in A',\ \tau_B< \infty\} \right] \\ &\overset{\eqref{lawbeforeHK}}{=} \frac{- \log(1-\epsilon) }{ \epsilon }\E_{e_{B(1-\epsilon)}} \left[ \Pm_{Z(\tau_B)} (A)\Pm_{Z(\tau_B)}^B( ( Z({t}))_{t \in{(0,L_{B(1-\eps)}]}} \in A') \I\{ \tau_B< \infty\} \right] \\ &= \frac{- \log(1-\epsilon) }{ \epsilon } \int_{\partial B(1-\epsilon)} \int_{\partial B} {\Pm}_y(A)\Pm_{y}^B( ( Z({t}))_{t \in{(0,L_{B(1-\eps)}]}} \in A') h_B(x,{\rm d}y) e_{B(1-\epsilon)}( {\rm d}x) \\ &=\frac{- \log(1-\epsilon) }{ \epsilon } \int_{\partial B} {\Pm}_y(A) \Pm_{y}^B( ( Z({t}))_{t \in{(0,L_{B(1-\eps)}]}} \in A') \int_{\partial B(1-\epsilon)}h_B(x,{\rm d}y)e_{B(0,1-\epsilon)}( {\rm d}x) \\ &\stackrel{~\eqref{e.consistency}}{=} \frac{- \log(1-\epsilon) }{ \epsilon } \int_{\partial B} \Pm_y(A)\Pm_{y}^B( ( Z({t}))_{t \in{(0,L_{B(1-\eps)}]}} \in A') e_B({\rm d}y). \end{split} \end{equation} In particular, letting $\eps\rightarrow0,$ we obtain \begin{equation} \label{e.beforwardmeas2} \mu\big( ( Z({\tau_B-t}))_{t \in{(0,\tau_B]}} \in A' , ( Z({t+\tau_B}))_{t \geq 0} \in A,\tau_B< \infty \big)=\int_{\partial B} \Pm_y(A)\Pm_y^B(A') e_B( {\rm d}y). \end{equation} Indeed, consider the map from \[\{(w_1,w_2)\in{W_{\D}\times W_{\D}}:w_1(0)=w_2(0)\in{\partial B}\text{ and }w_1(t)\notin{B}, \,\forall\, t>0\} \] to $W_{\overline{B}}$ which associates to $(w_1,w_2)$ the trajectory $w$ characterized via $w(t)=w_1(T_{w_1}-t)$ if $t\leq T_{w_1}$ and $w(t)=w_2(t-T_{w_1})$ if $t\in{[T_{w_1},T_{w_1}+T_{w_2}]}.$ It is then not hard to check that this map defines a homeomorphism for the relative topologies, and thus \eqref{e.beforwardmeas} entirely characterizes the law of $2\pi\eps^{-1}\mathbb{P}_{\sigma_{1-\eps}}.$ Since the measure on the right-hand side of \eqref{e.beforwardmeas2} is clearly the limit (for the Prokhorov metric introduced below \eqref{e.bemeas}) of the measure in the last line of \eqref{e.beforwardmeas} as $\eps\rightarrow0$, we obtain \eqref{e.beforwardmeas2} by \eqref{e.bemeas}. Note that one could alternatively see \eqref{e.beforwardmeas2} as a consequence of \cite[(5.6)]{lawler2005conformally}, but our current proof of \eqref{e.beforwardmeas2} highlights the link between the Brownian excursion cloud and Brownian interlacements, as it is proved similarly as \cite[Lemma~2.1]{sznitman2013scaling}. One can rephrase \eqref{e.beforwardmeas2} in terms of Poisson point processes as follows. \begin{proposition} \label{prop:localdescription} For each $r\in{(0,1)},$ writing $B=B(r),$ let $N_B \sim \text{Poi}( \intens \capac (B)),$ let $(\sigma_i)_{i\geq1}$ be an i.i.d.\ family of random variables on $\partial B$ with distribution $\overline{e}_B.$ Conditionally on the $(\sigma_i)_{i\geq1},$ let $(Z_i^+)_{i\geq1},$ resp.\ $(Z_i^-)_{i\geq1},$ be two families of independent Brownian motions resp.\ excursions with law $\Pm_{\sigma_i},$ resp.\ $\Pm_{\sigma_i}^B,$ for each $i\geq1.$ Let us also define $Z_i(t)=Z_i^-(T_{Z_i^-}-t)$ if $t\leq T_{Z_i^-},$ and $Z_i(t+T_{Z_i^-})=Z_i^+(t)$ if $t\in{(0,T_{Z_i^+}]}$ for each $i\geq1.$ Then $\sum_{i=1}^{N_B}\delta_{Z_i}$ has the same law as the point process of trajectories in the support of $\omega_\intens$ hitting $B.$ \end{proposition} \begin{remark} There is a similar local description for Brownian interlacements on balls in ${\mathbb R}^d$ for $d\ge 3$, see \cite[(2.3)]{sznitman2013scaling}. Actually, modulo time shift, this description can be extended to any compact $K$ when considering forwards part of trajectories only, see \cite[(2.24)]{sznitman2013scaling}, and one could also prove a similar statement in our context. Namely, for a compact $K\Subset \D,$ let $N_K\sim \text{Poi}( \intens \capac (K)),$ let $(\sigma_i)_{i\geq1}$ be an i.i.d.\ family of random variables on $\partial K$ with distribution $\overline{e}_K,$ and conditionally on the former let $(Z_i^+)_{i\geq1}$ be a family of independent Brownian motions with law $\Pm_{\sigma_i}$ for each $i\geq1.$ Then $\sum_{i=1}^{N_K}\delta_{Z_i^+}$ has the same law as the point process of trajectories in the support of $\omega_\intens$ hitting $K,$ started at their first hitting time of $K.$ \end{remark} We end this section by defining the Brownian loop soup. This is done in complete analogy with the random walk loop soup, and the first construction is due to Lawler and Werner in \cite{lawler2004soup}. The (rooted) Brownian loop measure on $\BC$ is defined by \begin{equation} \label{eq:defcontloops} \mu_{\rm loop}^r (\cdot) := \int_{\mathbb{C}} \int_0^\infty \frac{1}{2 \pi t^2}\Pm_{x,x}^t(\cdot )\, \id t \, \id x, \end{equation} where $\Pm_{x,x}^t(\cdot )$ denotes the law of a Brownian bridge of duration $t$. By an unrooted loop we mean an equivalence class of loops modulo time shift, where two loops $\gamma, \gamma'$ are equivalent if one can be obtained by a time shift of the other: $\gamma(\cdot) = \gamma'(\cdot +h), h >0$. The unrooted loop measure, $\mu_{\rm loop}$, is then defined by the image of the canonical projection of rooted loops on unrooted loops. The Brownian loop soup in $\D$ with parameter $\lambda>0$ is then defined as a Poisson point process on the space of unrooted loops with intensity measure $\lambda \mu_{\rm loop}^{\D},$ where $\mu_{\rm loop}^{\D}$ is the restriction of $\mu_{\rm loop}$ to the set of unrooted loops contained in $\D$. The loops in the loop soup form clusters: two loops $\el, \el'$ are in the same cluster if there is a finite set of loops $\{\el_0, \ldots, \el_n\}$ such $\el_0=\el, \el_n=\el'$ and $\el_{j-1} \cap \el_j \neq \varnothing.$ Up to extending the underlying probability space, we moreover always assume that the Brownian loop soup is defined under $\Pm,$ and independent from the Brownian excursion process $\omega_u,$ see \eqref{e.omega_alpha}. For each $\lambda\geq0$ and $\intens>0,$ we denote by $\be^{\intens,\lambda}$ the closure of the union of all the clusters of loops, for the loop soup at level $\lambda,$ which hit the Brownian excursion set $\be^\intens$ at level $\intens.$ Moreover, we denote by $\V^{\intens,\lambda}:=(\be^{\intens,\lambda})^{\ch}$ the corresponding vacant set. Note that $\be^{\intens,0}=\be^{\intens}$ and $\V^{\intens,0}=\V^{\intens}.$ \subsection{Conformal restriction and the Schramm-Loewner evolution}\label{s.sle} The determination of the critical values in the continuum setting is based on a Schramm-Loewner evolution (SLE) computation and the well-known link to restriction measures. This section recalls the needed facts. While most (if not all) of this is standard material, we have chosen to give statements and provide references. See, e.g.\ \cite[Section~8]{lawler2003conformal} for further discussion. Let $\HH := \{z: \Im \, z > 0\}$ be the complex upper half-plane. Let $X_-$ be the set of continuous curves $\gamma$ connecting $0$ with $\infty$ in $\overline{\mathbb{H}}$ and with the property that $\gamma \cap \mathbb{R} \subset (-\infty,0]$. One may turn the set of curves into a metric space either by viewing them as continuous functions up to increasing reparametrization (with the associated supremum norm) or as compact sets with the Hausdorff topology after mapping to the disc. The exact point of view is not important in the present context and we will not discuss this in further detail. We say that a probability measure $\Pm=\Pm_{\mathbb{H},0, \infty}$ on $X_{-}$ satisfies one-sided conformal restriction with exponent $\alpha > 0$ if for any (relatively) compact $A$ such that $\overline{A}\cap\H=A,$ $\HH \setminus A$ is simply connected and $\overline A \cap \mathbb{R} \subset (0,\infty)$, we have that \begin{equation} \label{eq:onesidedrestriction} \Pm\left( \gamma \cap A = \varnothing \right) = \phi_A'(0)^\alpha. \end{equation} Here $\phi_A: \HH \setminus A \to \HH$ is the conformal map fixing $0$ and satisfying $\phi_A(z) = z + o(z)$ as $z \to \infty$. A conformally invariant measure defined in some other simply connected domain with two marked boundary points is said to satisfy one-sided conformal restriction if its image in $\HH$ does so. If $A$ is as above then the law $\Pm_{\mathbb{H}, 0, \infty}$ conditioned on $\gamma \cap A = \varnothing$ is the same as $\Pm_{\mathbb{H} \setminus A, 0, \infty}$, the latter defined by push-forward via conformal transformation. An important example of a probability measure which satisfies one-sided conformal restriction can be obtained from the (right boundary of a) cloud sampled from a Poisson realization of the Brownian excursion measure, as we now recall. Let $D\neq\BC$ be a simply connected Jordan domain, let $\phi^D:\D\rightarrow D$ be some fixed conformal transformation, and, for each $u>0,$ denote by $\omega_u^D$ the point process obtained by replacing each excursion $w_i$ in the definition \eqref{e.omega_alpha} of $\omega_u$ by its image via $\phi^D.$ This corresponds to the usual definition of the Brownian excursion process on $D$ by \cite[Proposition~5.8]{lawler2005conformally}, and its law does not depend on the particular choice of the conformal transformation $\phi^D.$ For two arcs $\Gamma_1,\Gamma_2\subset\partial D,$ let $\be^{\intens,D}_{\Gamma_1,\Gamma_2}$ be the union of the traces of the trajectories in $\omega_u^D$ which start on $\Gamma_1$ and end on $\Gamma_2.$ Let $\Gamma\subset \partial D$ be a Jordan arc and suppose $K\subset \overline{D}$ is a closed set whose intersection with $\partial D$ is contained in $\Gamma$. Write $\Gamma^\ch = \partial D \setminus \Gamma$ and let $F_{D,\Gamma}(K)$ be the filling of $K$ with respect to $\Gamma$, that is, $F_{D,\Gamma}(K)$ is the union of $K$ with all $z \in \overline D$ such that $K$ separates $z$ from $\Gamma^\ch$ in $\overline D$. We write \begin{equation}\label{e.sepbry} \partial_{D,\Gamma} K :=( \partial (D \setminus F_{D,\Gamma}(K)) ) \setminus \Gamma^\ch. \end{equation} Then, as shown in \cite[Theorem~8]{werner2005conformal} with a multiplicative constant $c\alpha,$ and \cite[Theorem~2.12]{wu2015conformal} for the explicit constant $\pi \alpha,$ we have the following. \begin{lemma}[\cite{werner2005conformal,wu2015conformal}] \label{lem:excarerestriction} For each $\alpha>0,$ $\partial_{\H,\R^-}\mathcal{I}^{\pi\alpha,\H}_{\R^-,\R^-}$ satisfies one-sided conformal restriction with exponent $\alpha.$ \end{lemma} We now discuss the link between one-sided restriction and SLE, following \cite{lawler2003conformal,werner-wu-cle}. Let $B_{t}$ be a one-dimensional standard Brownian motion and for $\kappa > 0$, set $U_{t} = B_{\kappa t}$. The SLE$_{\kappa}$ Loewner chain is defined by \[ \partial_{t} g_{t}(z) = \frac{2}{g_{t}(z)-U_{t}}, \qquad 0 \le t < T_{z}, \qquad g_{0}(z)=z, \] where $T_{z} := \inf\{t \ge 0 : \textrm{Im}\, g_{t}(z) = 0\}$. The associated hulls are defined by $K_t = \{z: T_z \le t\}, \, t \ge 0$. For each $t$, $g_t$ is a conformal map from the unbounded connected component $H_t$ of $\mathbb{H}\setminus K_t$ onto $\mathbb{H}$. The SLE$_{\kappa}$ curve, which connects $0$ with $\infty$ in $\mathbb{H}$ can then be defined by $\gamma(t) := \lim_{y \downarrow 0} g_{t}^{-1}(U_{t} + iy), \, t\ge 0$. The SLE curve in other domains is defined by conformal transformation. Next, for $\rho>-2$ and $v\in{\R}$ consider the SDE \[ dW_{t} = \sqrt{\kappa}dB_{t} - \frac{\rho \, dt}{V_{t}-W_{t}}, \quad dV_{t}=\frac{2 \, dt}{V_{t}-W_{t}}, \quad (W_{0}, V_{0}) = (0,v). \] The SLE$_{\kappa}(\rho)$ Loewner chain with force point $v$ is the Loewner chain driven by $(W_{t})$ as above. The case of relevance to this paper is $v=0^-.$ Care is needed if $\rho$ is too large in absolute value and negative, though this is not an issue in the cases we will consider. Define for each $\kappa\in{[8/3,4]}$ and $\alpha > 0$ the function \begin{equation} \label{eq:rhokappaalpha} \rho_\kappa(\alpha) := \frac{-8 + \kappa + \sqrt{ 16 + \kappa(16\alpha-8) + \kappa^2}}{2}. \end{equation} It was proved in \cite[Theorem~8.4]{lawler2003conformal} that the left-filling of the hulls of an SLE$_{8/3}(\rho)$ process with $\rho=\rho_{8/3}(\alpha)$ satisfies one-sided restriction with exponent $\alpha$ thereby providing a ``Brownian'' construction of SLE curves. By adding an independent CLE (conformal loop ensemble) process this can be extended to other values of $\kappa$, see \cite{werner-wu-cle}: Let $\kappa \in [8/3,4]$. Sample a Brownian loop soup in $\mathbb{D}$ of intensity $\lambda(\kappa),$ as defined below \eqref{eq:defcontloops}, where $2\lambda(\kappa),$ see \eqref{eq:defckappa}, is the corresponding \emph{central charge} parameter, which also appears in the context of Conformal Field Theory. (See the discussion of the choice of intensity on p1 of \cite{Lupu-CLE}; it differs by a factor $2$ to the choice made in \cite{lawler2004soup}.) For a domain $D,$ we define the Brownian loop soup in $D$ with intensity $\lambda(\kappa)$ as the image by $\phi^D$ of the Brownian loop soup in $\D$ at intensity $\lambda(\kappa),$ defined below \eqref{eq:defcontloops} We define a cluster of loops in $D$ similarly as in $\D,$ see below \eqref{eq:defcontloops}. The collection of outer boundaries of the set of clusters form a conformal loop ensemble, CLE$_\kappa$ process in $D$, see \cite{sheffield-werner}. Now consider a curve $\gamma$ from $0$ to $\infty$ in $\H$ which satisfies one-sided restriction with exponent $\alpha,$ independent of the loop soup, and let $S(\kappa,\alpha)$ be the closure of the set of loop clusters, for the loop soup in $\H$ of intensity $\lambda(\kappa),$ which hit $\gamma,$ and $\eta(\kappa,\alpha)=\partial_{\H,\R^-}S(\kappa,\alpha).$ \begin{lemma}[\cite{werner-wu-cle}] \label{lem:etaisSLE} The set $\eta(\kappa,\alpha)$ has the law of the trace of an SLE$_\kappa(\rho_{\kappa}(\alpha))$ curve. \end{lemma} In view of Lemma~\ref{lem:excarerestriction}, if we take $\gamma$ to be the rightmost boundary of $\be_{\R^-,\R^-}^{\pi\alpha,\H}$, then adding loop clusters with intensity $\lambda(\kappa)$ to $\be_{\R^-,\R^-}^{\pi\alpha,\H},$ we obtain SLE$_\kappa(\rho_{\kappa}(\alpha)).$ This property can be extended to the restriction of the Brownian excursion process to subsets of $\D$ (or $\H$). For a domain $D\subset\D$ with $\partial D\cap\partial\D\neq\varnothing$ and $\Gamma_1,\Gamma_2\subset \partial D\cap\partial\D,$ let $\be^{\intens}_{\Gamma_1,\Gamma_2,D}$ be the union of the traces of the trajectories in $\omega_u$ entirely included in $D,$ starting on $\Gamma_1$ and ending on $\Gamma_2.$ In other words, $\be^{\intens}_{\Gamma_1,\Gamma_2,D}$ corresponds to the trajectories in $\be^{\intens,\D}_{\Gamma_1,\Gamma_2}$ which are included in $D.$ Moreover for each $\lambda>0,$ let $\be^{\intens,\lambda}_{\Gamma_1,\Gamma_2,D},$ resp.\ $\be^{\intens,\lambda,D}_{\Gamma_1,\Gamma_2},$ be the closure of the union of the loop clusters with intensity $\lambda,$ for the restriction of the Brownian loop soup in $\D$ to loops entirely included in $D,$ resp.\ for the Brownian loop soup in $D,$ which intersect $\be^{\intens}_{\Gamma_1,\Gamma_2,D},$ resp.\ $\be^{\intens,D}_{\Gamma_1,\Gamma_2}.$ Note that $\be^{\intens,\lambda}_{\partial\D,\partial\D,\D}=\be^{\intens,\lambda,\D}_{\partial\D,\partial\D}=\be^{\intens,\lambda},$ as defined below \eqref{eq:defcontloops}. We also take the convention $\be^{u,0}_{\Gamma_1,\Gamma_2,D}:=\be^{u}_{\Gamma_1,\Gamma_2,D}$ and $\be^{u,0,D}_{\Gamma_1,\Gamma_2}:=\be^{u,D}_{\Gamma_1,\Gamma_2}.$ Following \cite[Proposition~5.12]{lawler2005conformally} or \cite[(7)]{lawler2000universality}, the Brownian excursion set satisfies the following form of restriction property. \begin{lemma} \label{lem:resexc} Let $D\subset \D$ be a Jordan domain with $\partial D\cap\partial\D\neq\varnothing$, and let $\Gamma_1,$ $\Gamma_2$ be two closed arcs of $\partial D\cap\partial \D.$ Then for each $u>0$ and $\lambda\geq0,$ $\be^{\intens,\lambda}_{\Gamma_1,\Gamma_2,D}$ and $\be^{\intens,\lambda,D}_{\Gamma_1,\Gamma_2}$ have the same law. \end{lemma} \begin{proof} Let $\Gamma_1^{(n)}$ and $\Gamma_2^{(n)}$ be two sequences of closed arcs such that $\Gamma_1^{(n)}$ and $\Gamma_2^{(n)}$ are disjoint for each $n\in\N,$ and the sets $\Gamma_1^{(n)}\times\Gamma_2^{(n)},$ $n\in\N,$ form a partition of $\{(x,y)\in{\Gamma_1\times\Gamma_2}:x\neq y\}.$ Then by \cite[ Proposition~5.2]{lawler2005conformally}, the sets $\be^{\intens}_{\Gamma_1^{(n)},\Gamma_2^{(n)},D}$ and $\be^{\intens,D}_{\Gamma_1^{(n)},\Gamma_2^{(n)}}$ have the same law, and by independence we can conclude since a.s.\ there are no trajectory starting and ending at the same point. Adding the loop soup clusters, which have the same law for both sets by \cite[Proposition~6]{lawler2004soup}, we can conclude. \end{proof} Let $\Gamma\subset \partial \D$ be a Jordan arc, and abbreviate $\partial_{\Gamma}K:=\partial_{\Gamma,\D}K,$ see \eqref{e.sepbry}. For a Jordan domain $D\subset\D$ such that $\Gamma\subset\partial D\cap\partial\D,$ $\Gamma\neq\partial \D,$ we also denote by $\phi_{\Gamma}^D$ some choice of conformal transformation from $D$ to $\H,$ which maps $\Gamma$ to $\R^-.$ Combining Lemmas~\ref{lem:excarerestriction}, \ref{lem:etaisSLE} and \ref{lem:resexc}, with conformal invariance we obtain the following result, which is our main tool to study percolation of $\be^{\intens,\lambda}$ in Appendix~\ref{sec:contperco}. \begin{lemma}\label{l.brownianexcursionandlooprestriction} Let $D\subset \D$ be a simply connected Jordan domain with $\partial D\cap\partial\D\neq\varnothing$, and $\Gamma$ be a closed arc of $\partial D\cap\partial \D,$ $\Gamma\neq\partial \D.$ Then, for any $\kappa\in{[8/3,4]}$ and $\alpha>0,$ $\phi_{\Gamma}^D(\partial_{\Gamma}\be^{\pi \alpha,\lambda(\kappa)}_{\Gamma,\Gamma,D})$ has the same law as the trace of an $\text{SLE}_{\kappa}(\rho_{\kappa}(\alpha))$ curve. \end{lemma} Let us finish this section with the following consequence of \cite[Lemma~8.3]{lawler2003conformal} and \eqref{eq:rhokappaalpha}. \begin{lemma}[\cite{lawler2003conformal}] \label{lem:SLE-ka-r} For $\alpha > 0$, let $\gamma=\gamma([0,\infty))$ be the trace of the SLE$_{\kappa}(\rho_{\kappa}(\alpha))$ curve in $\mathbb{H}.$ If $0 < \alpha < (8-\kappa)/16$ then almost surely $\gamma$ intersects $(-\infty, 0),$ and if $\alpha \ge (8-\kappa)/16$ then almost surely $\gamma$ does not intersect $(-\infty, 0)$. \end{lemma} \subsection{Statements of continuum results} For $r\in{[0,1)}$, recall that $B(r){\longleftrightarrow}\partial\D\text{ in }\CV^{\intens,\lambda}$ corresponds to the event that the closure of a component of $\CV^{\intens,\lambda}$ intersecting $B(r)$ also intersects $\partial\D$. When $\lambda=0,$ define the critical parameter \begin{equation} \label{def:u*c} \intens_*^c(r) := \inf \left\{ \intens \ge 0 : \Pm\left(B(r)\stackrel{\V^{\intens}}{\longleftrightarrow}\partial\D\right)=0 \right\}. \end{equation} Recall the definition of $\lambda(\kappa)$ from \eqref{eq:defckappa}. \begin{theorem}\label{t.mainthm} For all $\kappa\in{[8/3,4]}$ and $r\in{[0,1)}$ \begin{equation*} \Pm\big(B(r)\stackrel{\CV^{\intens,\lambda(\kappa)}}{\longleftrightarrow}\partial\D\big)=0\text{ if and only if }u\geq\frac{(8-\kappa)\pi}{16}. \end{equation*} In particular, the critical value for percolation in $\CV^{\intens}$ satisfies $u_*^c(r)=\pi/3.$ \end{theorem} The statement in Theorem~\ref{t.mainthm} is given in \cite[Section~5]{werner-qian}, and the main ingredients of the proof are found there. In Appendix~\ref{sec:contperco}, for the convenience of the reader we complete the details of the proof. A consequence of Theorem~\ref{t.mainthm} and the result on visibility from \cite{elias2017visibility} is that for $\intens\in [\pi/4,\pi/3)$, we have $ \Pm\big(0{\leftrightarrow}\partial\D\text{ in }\CV^{\intens}\big)>0$, but a.s.\ no visibility to infinity from the origin, as well as no percolation in $\V^{u,1/2}.$ The percolative properties of $\V^{\intens,\lambda(\kappa)}$ are particularly interesting for two special values of $\kappa$: first $\kappa=8/3,$ with $\lambda(8/3)=0,$ which simply corresponds to $\V^{\intens}$ and gives us the equality $u_*^c=\pi/3.$ The other value of special interest is $\kappa=4,$ and $\lambda(4)=1/2,$ which is linked to the Gaussian free field (GFF). Indeed, for each $h>0$ denote by $\mathbb{A}_{-h}$ the first passage set of the GFF on $\D$ with zero-boundary condition, as defined in \cite{ArLuSe-20b}, which informally corresponds to the set of points in $\D$ which can be connected to $\partial\D$ by a path above level $-h$ for the continuous GFF on $\D.$ Then by \cite[Proposition~5.3]{ArLuSe-20a}, $\mathbb{A}_{-h}$ has the same law as $\be^{\frac{h^2}{2},1/2}\cup\partial\D$. (Note that $\be^{\frac{h^2}2}$ corresponds to the Brownian excursion set at level $h$ in the parametrization of \cite{ArLuSe-20a}.) We thus directly deduce the following from Theorem~\ref{t.mainthm}. \begin{corollary} \label{cor:percocontGFF} For each $r\in{[0,1)},$ the probability that $B(r)$ intersects a connected component of the complement of $\mathbb{A}_{-h}$ intersecting $\partial \D$, is $0$ if and only if $h\geq\sqrt{\pi/2}.$ \end{corollary} \begin{remark} \begin{enumerate}[label=\arabic*)] \phantomsection\label{rk:othercontperco} \item \label{rk:differentnormalization} The exact value $u_*^c(r)=u_*^d(r)=\pi/3,$ as well as the bound $h_*^d(r)\leq\sqrt{\pi/2}$ from Theorem~\ref{the:main} depend on our choice of normalization for the definition of the Gaussian free field and the excursion clouds, but the inequalities $0<h_*^d(r)<\sqrt{2u_*^d(r)}$ do not (as long as the normalization is consistent). We shortly explain how these values would change for a different choice of normalization. Indeed, if one divides by some $t>0$ the discrete Green function \eqref{eq:killedGreen}, multiplies by $t$ the discrete boundary Poisson kernel \eqref{eq:defdisPoisson}, and also multiplies by $t$ the measure $\mu$ of Brownian excursion from \eqref{e.bemeas}, then we would obtain $u_*^c(r)=u_*^d(r)=\pi/(3t),$ as well as the bound $h_*^d(r)\leq\sqrt{\pi/(2t)}.$ Our specific choice of normalization is consistent with the usual literature on the Brownian excursion cloud, and thus for instance consistent with the normalization from \cite{ArLuSe-20a}, but another possible natural choice of normalization from the discrete point of view would be to take $t=1/4$ to remove the division by $4$ in \eqref{eq:killedGreen} or the multiplication by $4$ in \eqref{eq:defdisPoisson}. This corresponds to considering $\D_n$ as a weighted graph with total weight $1$ instead of $4$ at each vertex $x\in{\D_n},$ see Remark~\ref{rk:defexcursion},\ref{rk:choiceofweight}. \item Using \eqref{eq:defckappa}, one could also equivalently phrase Theorem~\ref{t.mainthm} in terms of the intensity $\lambda$ of the Brownian loop soup. Namely for all $\intens>0$ and $\lambda\in{[0,1/2]},$ if either $\intens<\pi/4$ or \begin{equation*} \intens<\frac\pi3\text{ and }\lambda<\frac{(\pi-3\intens)(8\intens-\pi)}{\pi(\pi-2\intens)}, \end{equation*} then the probability that $0$ is in an infinite component of the vacant set $\V^{\intens,\lambda}$ is strictly positive, and otherwise this probability is $0.$ Note that if $\lambda>1/2,$ then $\mathcal{V}^{u,\lambda}=\varnothing$ a.s.\ for all $u>0,$ see \cite[Lemma~9.4 and Proposition~11.1]{sheffield-werner}. \item \label{finiteenergy}By Theorem~\ref{t.mainthm}, the positivity of the probability that $B(r)$ is connected to $\partial\D$ in the vacant set $\V^{\intens,\lambda}$ does not depend on the choice of $r\in{[0,1)}.$ This fact could actually be proved directly using a type of finite energy property for $\be^{\intens,\lambda}.$ Indeed, on the event that $B(r)$ is connected to $\partial\D$ in $\V^{\intens,\lambda}$ and that $B(r)\cap \be^{\intens,\lambda}=\varnothing,$ we have that $0$ is connected to $\partial\D$ in $\V^{\intens}.$ Since the probability that $B(r)\cap \be^{\intens,\lambda}=\varnothing$ is positive (this is simply the probability that $\be^{\intens}$ avoids all the loop clusters hitting $B(r),$ which is compact), we thus obtain by the FKG inequality that \begin{equation*} c\Pm(B(r)\stackrel{\CV^{\intens,\lambda}}{\longleftrightarrow}\partial\D)\leq\Pm(0\stackrel{\CV^{\intens,\lambda}}{\longleftrightarrow}\partial\D)\leq\Pm(B(r)\stackrel{\CV^{\intens,\lambda}}{\longleftrightarrow}\partial\D), \end{equation*} for some constant $c=c(u,\lambda,r),$ and we can conclude. \end{enumerate} \end{remark} \section{Coupling random walk and Brownian excursions}\label{s.coupling} In this section, we prove in Theorem~\ref{the:couplingdiscontexc1} that one can couple the random walk excursion cloud, defined above \eqref{eq:VunDef}, with the Brownian excursion cloud, defined in \eqref{e.omega_alpha}. Convergence for the corresponding excursion measures, for excursions starting on some arc $\Gamma\subset\partial\D$ and ending on some disjoint arc $\Gamma'\subset\partial\D,$ was first proved in \cite{kozdron_scaling_2006}. Convergence for the full excursion set has been proved in \cite[Lemma~4.6.2]{ArLuSe-20a}. However, these two results are not explicit at all on the exact distance between the random walk and the Brownian motion excursions, as well as on the exact distance from the boundary of $\D$ at which this coupling is valid. They are therefore not adequate for proving quantitative results for percolation near the boundary such as \eqref{eq:percowithoutloops}, see also Remark~\ref{rk:connectiontoboundary}. When the domain is the unit disk $\D,$ we remedy this problem in this section, as well as present a more detailed proof of the coupling in \cite{ArLuSe-20a}. There are three main steps in our proof of the coupling between the random walk excursions and the Brownian excursions. The first is to show that with high probability, the same number of trajectories from random walk excursions and Brownian excursions hit a fixed ball $B,$ see Lemma~\ref{l.capconv}. The second is to show that we can couple the corresponding hitting distributions, see Lemma~\ref{l.EQcoup}. The third is to show that the trajectories inside the ball $B$ can be coupled so that they are close to each other, which is done in Appendix~\ref{sec:KMT}. Moreover, in Theorems~\ref{the:couplingloops} and \ref{the:couplingwithloops}, we explain how to add clusters of loops to the coupling using results from \cite{MR2255196,ArLuSe-20a}. \subsection{Convergence of capacities and equilibrium measures} Recall that for a compact set $K \Subset \D$ the (continuous) equilibrium measure of $K$ is denoted by $e_K$ while for $K' \subset \D_n$ we denote the discrete equilibrium measure by $e_{K'}^{(n)},$ see \eqref{defequiandcap} and \eqref{e.eqmeas}. Combining \cite[Theorem~4.4.4, (6.11) and Proposition~6.3.5]{MR2677157} with \eqref{eq:killedGreen} and \eqref{eq:Greenat0} we see that for all $y\in{\D_n},$ \begin{equation}\label{e.greenest} | G(0,y)-G^{(n)}(0,y)| \leq O \left(\frac{1}{|y|n} \right). \end{equation} Our first result states that the sequence of discrete approximations of the capacity of a ball at mesoscopic distance from the boundary indeed converges to the capacity of the continuous ball. \begin{lemma}\label{l.capconv} There exists $c<\infty$ such that for all $n\in\N$ and $r\in{(2/n,1)},$ \begin{equation} \label{eq:approcap} \left| \capac^{(n)}(B_n(r))-\capac(B(r)) \right| \leq c \frac{\capac^{(n)}(B_n(r))\capac(B(r))}{rn}. \end{equation} \end{lemma} \begin{proof} Abbreviate $B=B(r) \subset \D$ and $B_n = B_n(r).$ First, recall the last exit decomposition for the simple random walk \eqref{eq:lastexitdis}, which implies \begin{align*} 1=\Pm_0^{(n)}\big(L_{B_n}^{(n)}>0\big)= \sum_{y \in \widehat{\partial} B_n} G^{(n)}(0,y) e_{B_n}^{(n)}(y). \end{align*} Using \eqref{eq:Greenat0} and \eqref{capball} we thus obtain by rearranging \begin{align*} \frac1{2\pi}\log(1/r)\left(\mathrm{cap}(B)-\mathrm{cap}^{(n)}(B_n)\right)&=\sum_{y\in{\widehat{\partial}B_n}}e_{B_n}^{(n)}(y)\left(G^{(n)}(0,y)-\frac1{2\pi}\log(1/r)\right) \\&=\sum_{y\in{\widehat{\partial}B_n}}e_{B_n}^{(n)}(y)\left(G^{(n)}(0,y)-G(0,y)\right) \\&\quad+\frac1{2\pi}\sum_{y\in{\widehat{\partial}B_n}}e_{B_n}^{(n)}(y)\log(r/|y|). \end{align*} Using \eqref{e.greenest} and the fact that $||y|-r|\leq 1/n$ for all $y\in{\widehat{\partial}B_n}$ we thus obtain taking absolute values that \begin{align*} \left|\mathrm{cap}(B)-\mathrm{cap}^{(n)}(B_n)\right|\leq \frac{c\,\mathrm{cap}^{(n)}(B_n)}{rn\log(1/r)}, \end{align*} and we can conclude by \eqref{capball}. \end{proof} \begin{remark} If we let $B= B(1- \epsilon_n)$ and $B_n=B_n(1-\epsilon_n)$, where $\epsilon_n \to 0$ as $n \to \infty, $ then \eqref{capball} and the last lemma~provides an estimate of the form \[ \left| \capac^{(n)}(B_n) - \capac(B) \right| \leq c \frac{1}{n\epsilon_n^2}, \] implying that the capacities are close as long as $\epsilon_n\sqrt{n}\gg 1$. \end{remark} As a corollary~we obtain an estimate of the total variation norm for the last exit distribution and the normalized equilibrium measure in the case of balls. \begin{corollary}\label{cor.LEEQCoup} There exists $c<\infty$ such that for all $n\in\N,$ $r\in{(2/n,1)}$ and $y\in{\widehat{\partial}B_n(r)}$ \begin{equation*} \left|\Pm_0^{(n)}\left( X_{L_{B_n}} =y \right) - \overline{e}_{B_n}^{(n)}(y)\right|\leq \frac{c{e}^{(n)}_{B_n}(y)}{rn}, \end{equation*} where $B_n=B_n(r).$ In particular, \begin{equation} \label{eq:equiexit} \left\| \Pm_0^{(n)}\left( X_{L_{B_n}} \in \cdot \right)-\overline{e}^{(n)}_{B_n}(\cdot) \right\|_{TV}\leq \frac{c\,\mathrm{cap}^{(n)}(B_n)}{rn}. \end{equation} \end{corollary} Note that the corresponding statement for the Brownian motion is exact, see \eqref{e.eqmeasexpr1} and \eqref{capball}. \begin{proof} The proof follows by \eqref{e.greenest} in combination with Lemma~\ref{l.capconv} as follows. By \eqref{e.greenest}, \eqref{eq:Greenat0} and \eqref{capball}, together with the fact that $|y| \in(r-2/n,r+2/n) $ we have \begin{equation}\label{e.greenapprox} \left|G^{(n)}(0,y)-\frac1{\mathrm{cap}(B)}\right| \leq \left|G^{(n)}(0,y)-G(0,y)\right|+\frac1{2\pi}|\log(r/|y|)|=O\left( \frac{1}{rn} \right). \end{equation} Moreover, in light of Lemma~\ref{l.capconv} we deduce that \begin{equation}\label{e.capquot} \left|\frac{1}{\mathrm{cap}^{(n)}(B_n)}-\frac1{\mathrm{cap}(B)}\right|\overset{\eqref{eq:approcap}}{=}O\left( \frac{1}{rn} \right). \end{equation} Combining \eqref{eq:lastexitdis}, \eqref{e.greenapprox} and \eqref{e.capquot} we obtain \begin{align*} \left|\Pm^{(n)}_0\left( X_{L_{B_n}^{(n)}}=y \right)-\overline{e}^{(n)}_{B_n}(y)\right|&= \left|G^{(n)}(0,y) -\frac1{\capac^{(n)}(B_n)}\right|{e}^{(n)}_{B_n}(y) \\ & {=} O\left( \frac{{e}^{(n)}_{B_n}(y)}{rn} \right), \end{align*} and \eqref{eq:equiexit} follows by summing over $y\in{\widehat{\partial}B_n}.$ \end{proof} We now combine Corollary~\ref{cor.LEEQCoup} with a coupling result between the last exit time of a ball by a Brownian motion and a random walk, see Lemma~\ref{l.gtype}, to obtain the desired coupling between the normalized equilibrium measures. \begin{lemma}\label{l.EQcoup} There exist $s_0>0$ and $c<\infty$ such that for all $r\in{(\frac12,1)}$ and $n\in{\N},$ writing $B=B(r)$ and $B_n=B_n(r),$ there exists a coupling $\Q_r$ between random variables $E_{B_n}^{(n)}$ and $E_B$ with distributions $\overline{e}^{(n)}_{B_n}$ and $\overline{e}_B,$ respectively, satisfying for all $s\geq s_0$ that \begin{equation} \label{e.boundapproequi} \Q_r \left(\left| E_{B_n}^{(n)}- E_B\right| \geq \frac{s \log(n)}{n}\right) \leq \frac{c}{s}+ \frac{c\log(n)}{n(1-r)}. \end{equation} \end{lemma} \begin{proof} Assume that $X^{(n)}$ and $Z$ at times ${{L}_{B_n}^{(n)}}$ and ${L_{B }}$, respectively, are coupled as in Lemma~\ref{l.gtype} under $\Pm_{0,0}^{(n)}.$ We let $E_B{=} Z_{L_B},$ which has law $\overline{e}_B$ by rotational invariance. By Corollary~\ref{cor.LEEQCoup}, up to increasing $\Pm_{0,0}^{(n)}$ to a bigger probability space with probability measure denoted by $\Q_r,$ there is a coupling of a random variable $E_{B_n}^{(n)}$ with $X_{L_{B_n}^{(n)}}^{(n)}$ such that $E_{B_n}^{(n)}$ has law $\overline{e}_{B_n}^{(n)}$ and \[ \Q_r \left( E_{B_n}^{(n)} \neq X_{L_{B_n}^{(n)}}^{(n)} \right) \leq c \frac{\capac^{(n)}(B_n)}{n}. \] Using \eqref{e:boundlastexit}, we can easily conclude since $\capac^{(n)}(B_n)\leq c\,\capac(B)\leq c'/(1-r)$ by \eqref{eq:approcap} and \eqref{capball} if $r\geq1/2$, and since we can assume w.l.o.g.\ that $1-r\geq c/n$. \end{proof} \subsection{The coupling} \label{sec:coupling} Using Lemmas~\ref{l.capconv}, \ref{l.EQcoup} and \ref{lem:KMTuntilboundary}, we obtain the following coupling between the Brownian excursion cloud and the random walk excursion cloud. For each $r\in{[0,1)},$ $n\in\N$ and $\intens>0,$ we denote by \begin{equation} \label{eq:defOmega} \Omega_{\intens}^{(n)}(r):=\left\{\big(e(2n^2t+\tau_{B_n(r)}^{(n)}(e))\big)_{t\in{\Big[0,\frac{t_e-\tau_{B_n(r)}^{(n)}(e)}{2n^2}\Big]}}:\,e\in{\text{supp}(\omega_{\intens}^{(n)})},\,\tau_{B_n(r)}^{(n)}(e)<\infty\right\} \end{equation} the set of all excursions in $\omega_u^{(n)}$ that hit $B_n(r)$ started from their location at the first time of hitting $B_n(r)$ with time rescaled by $2n^2.$ The value of $e(t)$ for non-integer $t$ is obtained by linear interpolation, and for $w\in{\Omega_{\intens}^{(n)}(r)}$ we take $w(t)=\Delta$ for all $t\geq (t_e-\tau_{B_n(r)}^{(n)}(e))/(2n^2),$ where $\Delta$ is some cemetery point. We similarly define $\Omega_{\intens}(r)$ as the set of trajectories in the support of $\omega_{\intens}$ hitting $B(r),$ started from the first time of hitting $B(r)$, and equal to $\Delta$ after hitting $\partial\D.$ We moreover take the convention $|\Delta-x|=\infty$ for all $x\in{\D}.$ \begin{theorem} \label{the:couplingdiscontexc1} There exist constants $c,C>0$ and $s_0<\infty$ such that for all $n\in\N,$ $1/2\leq r\leq 1-C/n,$ $u>0$ and $s\geq s_0$ there is a coupling between $\omega^{(n)}_{\intens}$ and $\omega_{\intens}$ such that on an event $\mathcal{E}_1^{(n)}$ with probability at least \begin{equation} \label{eq:probacoupling} 1-\frac{c\intens}{(1-r)} \left( \frac{1}{s}+ \sqrt{\frac{\log(n)}{n(1-r)}}\right), \end{equation} there exists a bijection $F:\Omega_{\intens}(r)\rightarrow\Omega_{\intens}^{(n)}(r)$ such that \begin{equation} \label{eq:boundcoupling} \sup_{t\in{[0,\overline{L}_{B(r)}(w)]}}|w(t)-F(w)(t)|\leq \frac{s\log(n)}{n}\text{ for all $w\in{\Omega_{\intens}(r)}$}, \end{equation} where $\overline{L}_{B(r)}(w):=\sup\{t\geq0:|w(t)|\wedge|F(w)(t)|\leq r\}$ is the last time at which either $w$ or $F(w)$ are in $B(r).$ \end{theorem} \begin{proof} Abbreviate $B=B(r)$ and $B_n=B_n(r).$ By Lemma~\ref{l.EQcoup}, one can find an i.i.d.\ sequence of random variables $(\sigma_n^{(i)},\sigma^{(i)}),$ $i\in\N,$ such that for each $i\in\N$ the law of $\sigma_n^{(i)}$ is $\overline{e}_{B_n}^{(n)},$ the law of $\sigma^{(i)}$ is $\overline{e}_B,$ and for all $s\geq s_0,$ \begin{equation} \label{eq:couplingentrancepoint} \Pm\Big(|{\sigma}_n^{(i)}-\sigma^{(i)}|\geq \frac{s\log(n)}{2n}\Big)\leq \frac{c}{s}+ \frac{c\log(n)}{n(1-r)}. \end{equation} Now, using the KMT coupling from Lemma~\ref{lem:KMTuntilboundary}, we can produce a sequence of independent pairs $\big(\widehat{X}^{(n,i)},Z^{(i)} \big),$ $i\in{\N},$ of rescaled simple random walks on $\D_n$ and Brownian motions on $\D$ such that for each $i\in\N,$ $\widehat{X}^{(n,i)}$ starts in ${\sigma}_n^{(i)},$ $Z^{(i)}$ starts in ${\sigma}^{(i)},$ and \begin{equation} \label{eq:KMTforallwalks1} \Pm \left(\sup_{t\in{[0,\overline{L}_{B(r)}^{(i)}]}} |\widehat{X}^{(n,i)}_t-Z^{(i)}_t| \geq \frac{s\log(n)}{n} \,\Big|\,|\sigma_n^{(i)}-\sigma^{(i)}|\leq \frac{s\log(n)}{2n}\right) \leq \frac{cs\log(n)}{n(1-r)}, \end{equation} where $\overline{L}_{B(r)}^{(i)}:=\sup\{t\geq0:|\widehat{X}^{(n,i)}_t|\wedge|Z^{(i)}_t|\leq r\}.$ Let us now couple the number of discrete and continuous excursions. Since $1-r\geq C/n,$ for a large enough constant $C,$ then combining \eqref{capball} and \eqref{eq:approcap} one infers that $\mathrm{cap}^{(n)}(B_n)\leq2\mathrm{cap}(B).$ Therefore, using \eqref{eq:approcap} again, there exists a standard coupling between Poisson random variables $Y_n\sim \text{Poi}\left(\intens\mathrm{cap}^{(n)}(B_n)\right)$ and $Y\sim \text{Poi}\left(\intens\mathrm{cap}(B)\right)$ such that \begin{equation} \label{eq:1stcouplingPoisson} \Pm \left( Y_n \neq Y \right)\leq \frac{c\intens\mathrm{cap}(B)^2}{n}. \end{equation} By \eqref{capball} we moreover have \begin{equation} \label{eq:boundoncap1} \mathrm{cap}(B(r))=\frac{2\pi}{\log(1/r)}\leq\frac{2\pi}{1-r}. \end{equation} Using \eqref{eq:1stcouplingPoisson}, combining \eqref{eq:couplingentrancepoint}, \eqref{eq:KMTforallwalks1} and \eqref{eq:boundoncap1} with a union bound, noting that $Y$ has mean $u\mathrm{cap}(B(r))$ and that $\{{Z^{(i)}},\,i\in{\{1,\dots,Y\}}\}$ and $\{{\widehat{X}^{(n,i)}},\,i\in{\{1,\dots,Y_n\}}\}$ have respectively the same law as $\Omega_{\intens}(r)$ and $\Omega_{\intens}^{(n)}(r),$ we obtain a bijection $F$ satisfying \eqref{eq:boundcoupling} with probability \begin{equation*} 1-\frac{c\intens}{(1-r)} \left( \frac{1}{s}+ \frac{s\log(n)}{n(1-r)}\right). \end{equation*} Noting that the probability to find a coupling satisfying \eqref{eq:boundcoupling} is increasing in $s,$ one can replace $s$ by $\sqrt{n(1-r)/\log(n)}$ whenever $s\geq\sqrt{n(1-r)/\log(n)},$ and we obtain the probability \eqref{eq:probacoupling}. \end{proof} \begin{remark} For fixed $r,u>0,$ Theorem~\ref{the:couplingdiscontexc1} allows us to approximate continuous excursions by discrete excursions with high probability as $n\rightarrow\infty$ for $s$ large enough. However, if $r=r(n),$ our approximation is only valid with high probability as $n\rightarrow\infty$ only if $r\leq 1-(c^2\log(n)/n)^{1/3}$ for a large constant $c$ (and $s=(cn/\log(n))^{1/3}$ for instance) i.e., the coupling fails once one gets too close to the boundary of the unit disk. If one is only interested in coupling the excursions in a small region $A\subset B(r),$ Theorem~\ref{the:couplingdiscontexc1} could be improved by changing the factor $u/(1-r)$ in \eqref{eq:probacoupling} by $u\mathrm{cap}(A).$ \end{remark} As a direct consequence of Theorem~\ref{the:couplingdiscontexc1}, one can moreover couple the discrete and continuous excursion sets. We denote by $d_H(A,A')$ the Hausdorff distance between two sets $A,A'\subset\D,$ that is $d_H(A,A')$ is the smallest $\delta>0$ such that $A\subset A'+B(\delta)$ and $A'\subset A+B(\delta).$ \begin{corollary} \label{cor:couplingexcursions} For all $n\in\N,$ $u>0$ and $s\geq s_0,$ there exists a coupling between $\tilde{\be}_n^{\intens}$ and $\be^{\intens}$ such that, \begin{equation*} d_H(\tilde{\be}_n^{\intens},\be^{\intens})\leq \frac{s\log(n)}{n}\text{ with probability at least } 1-\frac{cun}{s^{3/2}\log(n)}. \end{equation*} \end{corollary} \begin{proof} This is a direct consequence of Theorem~\ref{the:couplingdiscontexc1} for $r=1-s\log(n)/n,$ noting that $d_H(\be_n^u,\tilde{\be}_n^u)\leq 1/n,$ $\partial\D\subset B(\tilde{\be}^u_n,1/n)$ and $\partial\D\subset \overline{{\be}^u}$ a.s. \end{proof} Let us now explain how to couple connected components of loop soups, following \cite{MR2255196} and \cite{MR3485399}. Recall the definition of the loop soups on the cable system $\widetilde{\D}_n$ introduced at the end of Section~\ref{sec:cableSystemExc}, whose restriction to $\D_n$ is the random walk loop soup introduced below \eqref{eq:defdisloop}, and of the Brownian loop soup from below \eqref{eq:defcontloops}. We call $e$ an excursion of the Brownian loop soup if there exists a loop $\ell$ in the Brownian loop soup and some stopping times $T_1<T_2$ for $\ell$ such that $e=(\ell(t))_{t\in{[T_1,T_2]}},$ and we denote by $\text{trace}(e)\subset\D$ the set $\{\ell(t),\,t\in{[T_1,T_2]}\}.$ Moreover two excursions $e=(\ell(t))_{t\in{[T_1,T_2]}}$ and $e'=(\ell'(t))_{t\in{[T_1',T_2']}}$ are called disjoint if either $\ell$ and $\ell'$ are two different loops, or $\ell=\ell'$ and $[T_1,T_2]\cap[T_1',T_2']=\varnothing.$ The following Theorem~is tailored to our purpose in Section~\ref{sec:discretecritpara}. \begin{theorem} \label{the:couplingloops} For each $\lambda>0,$ there exist constants $c,c'>0$ and for each $k\in{\N}$, there is a coupling between a Brownian loop soup on $\D$ and a cable system loop soup on $\widetilde{\D}_k$ at level $\lambda$, as well as an event $\mathcal{E}_2^{(k)}$ with probability at least $1-c/\sqrt{k},$ such that the following holds true. For each disjoint family of excursions $(e_i)_{i\leq E},$ with $E\in{\N},$ in the Brownian loop soup such that \begin{equation*} \mathcal{L}=\bigcup_{i\leq E}\text{trace}(e_i) \end{equation*} is connected, there exists $N\in{\N}$ (depending on the choice of the family $(e_i)_{i\leq E}$) so that for all $n\geq N$, on the event $\mathcal{E}_2^{(n)}$ there is a connected subset $\mathcal{L}^{(n)}$ of the trace on $\widetilde{\D}_n$ of the cable system loop soup satisfying \begin{equation} \label{eq:couplingloops} d_H(\mathcal{L},\mathcal{L}^{(n)})\leq \frac{c'\log(n)}{n}. \end{equation} \end{theorem} \begin{proof} By \cite[Corollary~5.4]{MR2255196} with $\theta\in{(5/3,2)}$, on an event $\mathcal{E}_2^{(n)}$ with probability at least $1-c/\sqrt{n},$ we can couple each Brownian loop soup $\ell$ with time duration $t_{\ell}\geq n^{-1/3}$ with a time-changed discrete-time random walk loop $\ell^{(n)}$ with time duration $t_{\ell^{(n)}}\geq n^{-1/3}$ such that \begin{equation} \label{eq:loopsareclose} \big|\ell(st_{\ell})-\ell^{(n)}(st_{\ell^{(n)}})\big|\leq \frac{c'\log(n)}{n}\text{ for all }s\in{[0,1]}, \end{equation} where $\ell(s)$ is obtained for $s\notin{\N}$ by linear interpolation. Note that since $E<\infty,$ there exists $\delta>0$ such that the time duration of each loop in $\mathcal{L}$ is at least $\delta,$ and we assume from now on that $n$ is large enough so that $n^{-1/3}\leq \delta.$ If $e_i=(\ell_i(s))_{s\in{[T_{i,1},T_{i,2}]}},$ we write $e_i^{(n)}=(\ell_i^{(n)}(st_{\ell_i^{(n)}}/t_{\ell_i}))_{s\in{[T_{i,1},T_{i,2}]}}.$ Let us denote by $\mathcal{L}^{(n)}$ the union of the traces of the cable system excursions corresponding to $(e_i^{(n)})_{i\leq E}.$ Using \cite[(2.4)]{LJ-11}, one knows that discrete time loops from \cite{MR2255196} and continuous time loops from \eqref{eq:defdisloop} have the same trace on $\D_n,$ and since the distance between a random walk soup and its corresponding cable system loop is at most $1/n,$ \eqref{eq:couplingloops} clearly holds. We now show that $\mathcal{L}^{(n)}$ is connected for $n$ large enough. Proceeding similarly as in the proof of \cite[Lemma~2.7]{MR3485399}, the following is a consequence of the conditions $\mathcal{C}_j,$ ${j\in{\N}},$ in \cite[Lemma~2.6]{MR3485399}: for each loop $\ell$ in the Brownian loop soup, and each stopping time $T$ for $\ell,$ there exists a.s.\ a sequence $(\eps_j)_{j\in{\N}}$ decreasing to $0$ such that, for any continuous functions $f:[0,t_{\ell}]\rightarrow\D$ with $\|f\|_{\infty}\leq \eps_j/12$ and any connected paths $\gamma$ such that $B(\ell(T),\eps_j/2)\leftrightarrow B(\ell(T),\eps_j)^{\ch}$ in $\gamma,$ we have $\gamma\cap A\neq\varnothing,$ where $A=\{\ell(s)+f(s):\,s\in{[T,H]}\}$ and $H=\inf\{t\geq T:\ell(t)\in{B(\ell(T),\eps_j)^{\ch}}\}.$ In other words, if $\gamma$ is a path starting close to $\ell(T)$ going far enough from $\ell(T),$ and $A$ is a set close enough to $\ell$ in a neighborhood of $\ell(T),$ then $\gamma$ intersects $A.$ Since $\mathcal{L}$ is connected, there exists for each $i,j\in{\{1,\dots,E\}}$ a sequence $k_1,\dots,k_p$ such that $k_1=i,$ $k_p=j$ and $\text{trace}(e_{k_i})\cap\text{trace}(e_{k_{i+1}})\neq\varnothing$ for each $i<p.$ For each $k,k'\in{\{1,\dots,E\}}$ such that $\text{trace}(e_{k})\cap\text{trace}(e_{k'})\neq\varnothing,$ define $H_{k,k'}$ as the hitting time of $\text{trace}(e_{k'})$ by $e_{k}.$ There exists a sequence $(\eps_j^{(k,k')})_{j\in{\N}}$ decreasing to $0,$ such that for each $j\in{\N}$ with $\text{trace}(e_{k'})\cap B(e_{k}(H_{k,k'}),2\eps_j^{(k,k')})^{\ch}\neq\varnothing,$ we have $\text{trace}(e_{k}^{(n)})\cap\text{trace}(e_{k'}^{(n)})\neq\varnothing$ if $c'\log(n)/n\leq \eps_j^{(k,k')}/2,$ since then $B(e_{k}(H_{k,k'}),\eps_j^{(k,k')}/2)\leftrightarrow B(e_{k}(H_{k,k'}),\eps_j^{(k,k')})^{\ch}$ in $\text{trace}(e_{k'}^{(n)})$ by \eqref{eq:loopsareclose}. Fixing for each $k,k'$ some $j$ large enough so that $\text{trace}(e_{k'})\cap B(e_{k}(H_{k,k'}),2\eps_j^{(k,k')})^{\ch}\neq\varnothing,$ and $n$ large enough so that $c'\log(n)/n\leq \eps_j^{(k,k')}/2$ uniformly in $k,k'$ (there are at most $E^2$ such $k,k'$), we thus have $\text{trace}(e_{k_i}^{(n)})\cap\text{trace}(e_{k_{i+1}}^{(n)})\neq\varnothing$ for each $i<p,$ and thus the set $\mathcal{L}^{(n)}$ is connected for $n$ large enough. \end{proof} Recall the definition of the sets of excursion plus loops $\tilde{\be}^{u,\lambda}_n$ from Section~\ref{sec:cableSystemExc} and $\be^{u,\lambda}$ from below \eqref{eq:defcontloops}. Combining Theorems~\ref{the:couplingdiscontexc1} and \ref{the:couplingloops}, one can show that each loop soup cluster $\mathcal{L}$ hitting $\be^{u}$ can be approximated by a set $\mathcal{L}^{(n)}$ of cable system loops which is connected and intersects $\tilde{\be}^{u,\lambda}_n$ for $n$ large enough. In other words, one can show that $\be^{u,\lambda}\subset B(\tilde{\be}^{u,\lambda}_n,\eps_n)$ for $n$ large enough and a sequence $\eps_n\rightarrow0.$ However, the reverse inclusion is more difficult to obtain, since the clusters of small loops on the cable system cannot be well approximated by Brownian motion loops, and thus might be asymptotically strictly larger than the Brownian motion loop clusters. This problem is solved in \cite{ArLuSe-20a} using the non-percolation of the loop soup clusters on general domains, see \cite[Lemma~4.13]{ArLuSe-20a}. More precisely, recalling the definition of $\tilde{\be}^{u,\lambda}_n$ from Section~\ref{sec:cableSystemExc} and of $\be^{u,\lambda}$ from below \eqref{eq:defcontloops}, the following follows from \cite[Proposition~4.11]{ArLuSe-20a} and Skorokhod's representation theorem. \begin{theorem}[\cite{ArLuSe-20a}] \label{the:couplingwithloops} For each $u,\lambda>0,$ there exist for all $n\in\N$ a coupling of $\be^{\intens,\lambda}$ and $\widetilde{\be}_n^{\intens,\lambda},$ such that $d_H(\be^{\intens,\lambda},\widetilde{\be}_n^{\intens,\lambda})\rightarrow0$ as $n\rightarrow\infty.$ \end{theorem} Note that, contrary to Corollary~\ref{cor:couplingexcursions} and Theorem~\ref{the:couplingloops}, the coupling from Theorem~\ref{the:couplingwithloops} does not give explicitly the rate at which $\tilde{\be}_n^{u,\lambda}$ converges to $\be^{u,\lambda}.$ One could try to make the arguments from \cite{ArLuSe-20a} more explicit in $n,$ but this would not lead to any significant improvement of our main results, see Remark~\ref{rk:polycloseboundary}. \section{Discrete critical values} \label{sec:discretecritpara} In this section, we prove that the discrete percolation parameters are asymptotically the same as the critical values obtained for the continuous percolation in Appendix~\ref{sec:contperco}. Our main tool will be the couplings between the continuous and discrete models from the previous section, that is Theorem~\ref{the:couplingdiscontexc1} for excursions, Theorem~\ref{the:couplingloops} for loop soups, and Theorem~\ref{the:couplingwithloops} for sets of excursions plus loops. We first recall the Beurling estimate, which will also be useful in Section~\ref{s:gfflvlperc}. \begin{lemma} \label{lem:beurling} There exists $c<\infty$ such that for all $n\in{\N},$ all connected sets $A\subset \D_n,$ $R>0$ and $x\in{\D_n}$ with $\varnothing\neq A\cap\partial B_n(x,R)\subset{\D}_n,$ \begin{equation} \label{eq:disbeurling} \Pm_x^{(n)}\big(\tau_A>\tau_{\partial B_n(x,R)}\big)\leq c\left(\frac{d(x, A)}{R}\right)^{1/2}. \end{equation} Moreover, the same result holds for the continuum case, that is, when replacing $\D_n$ by $\D,$ $B_n(x,t)$ by $B(x,R),$ and $\Pm_x^{(n)}$ by $\Pm_x.$ \end{lemma} A proof of Lemma~\ref{lem:beurling} can be found in \cite[Lemma~2.3]{MR1249129} for random walks, and is in fact a consequence of \cite[Lemma~2.5.2]{MR2985195}; and in \cite[Proposition~3.79]{lawler2005conformally} for Brownian motion. We now present an application of the Beurling estimate for certain sets, tailored to our future purposes. For $r\in [0,1),$ $r'\in{(r,1)},$ $\eps\in{(0,(1-r')/6)},$ and $j\in{\{0,1,2\}}$ let \begin{equation} \label{eq:defHi+} \begin{split} \H^{+}_{j,\eps}:=&\Big\{x\in{{B}(1-(j+1)\eps)\setminus \overline{B}(r+j(r'-r)/2)}:\,\text{arg}(x)\in{\big((j-7)\pi/28,-j\pi/28\big)}\Big\} \\\cup&\Big\{x\in{{B}(1-(j+1)\eps)\setminus \overline{B}(r+j(r'-r)/2)}:\,\text{arg}(x)\in{\big(\pi+j\pi/28,\pi+(7-j)\pi/28\big)}\Big\} \\\cup&\Big\{x\in{{B}(1-(j+4)\eps)\setminus \overline{B}(r+j(r'-r)/2)}:\,\text{arg}(x)\in{\big(-j\pi/28,\pi+j\pi/28\big)}\Big\}. \end{split} \end{equation} Note that $\H^{\pm}_{2,\eps}\subset\H^{\pm}_{1,\eps}\subset\H^{\pm}_{0,\eps}$ and that we made the dependency of $\H^{\pm}_{j,\eps}$ on $r,r'$ implicit to simplify notation. Moreover, for $j\in{\{0,2\}}$ we let \begin{equation} \label{eq:defS+} \begin{gathered} S_{j,\eps}^{+,{r}}:=\Big\{x\in{\partial B(1-(4-j/4)\eps)}:\,\text{arg}(x)\in{\big((j-7)\pi/28,-j\pi/28\big)}\Big\}, \\ S_{j,\eps}^{+,{l}}:=\Big\{x\in{\partial B(1-(4-j/4)\eps)}:\,\text{arg}(x)\in{\big(\pi+j\pi/28,\pi+(7-j)\pi/28\big)}\Big\}, \\\overline{S}_{j,\eps}^{+,r}:=\Big\{x\in{B(1-(j+4)\eps)\setminus \overline{B}(r+j(r'-r)/2)}:\,\text{arg}(x)=-j\pi/28\Big\}, \\ \overline{S}_{j,\eps}^{+,l}:=\Big\{x\in{B(1-(j+4)\eps)\setminus \overline{B}(r+j(r'-r)/2)}:\,\text{arg}(x)=\pi+j\pi/28\Big\}. \end{gathered} \end{equation} We also define the sets $\H_{j,\eps}^-$, $S_{j,\eps}^{-,l}$, $S_{j,\eps}^{-,r}$, $\overline{S}_{j,\eps}^{-,l}$ and $\overline{S}_{j,\eps}^{-,r}$ as the respective reflections through $\R$ of the sets $\H_{j,\eps}^+$, $S_{j,\eps}^{+,r}$, $S_{j,\eps}^{+,l}$, $\overline{S}_{j,\eps}^{+,r}$ and $\overline{S}_{j,\eps}^{+,l}$; here, superscripts $l$ and $r$ stand for left and right, and in particular should not be confused with the radius $r$. We refer to Figure \ref{F:discretecritpar} below for an illustration of all these sets when $r=0$. For $A\subset \D$, $j\in{\{0,2\}}$ and $\pm\in{\{-,+\}}$ let us introduce the events \begin{equation} \label{eq:defFi+-} \begin{gathered} F_{j,\eps}^{\pm}(A)=\left\{ \text{$A$ contains a path included in $\H_{j,\eps}^{\pm}$ and hitting both $S_{j,\eps}^{\pm,r}$ and $S_{j,\eps}^{\pm,l}$} \right\}, \\ \overline{F}_{j,\eps}^{\pm}(A)=\left\{ \text{$A$ contains a path included in $\H_{j,\eps}^{\pm}$ and hitting both $\overline{S}_{j,\eps}^{\pm,r}$ and $\overline{S}_{j,\eps}^{\pm,l}$} \right\}. \end{gathered} \end{equation} In the rest of the section, when we write that $F_{j,\eps}^{\pm}$ satisfies some property, it means that both $F_{j,\eps}^+$ and $F_{j,\eps}^-$ satisfy this property. Let us quickly explain the reason for introducing these events, and the strategy of the proof of Theorem~\ref{the:maindisexcloops}. At first glance, it might seem enough, in view of Theorem~\ref{t.mainthm} and the couplings from Section~\ref{sec:coupling}, to prove that equivalently on the continuum and on the cable system, there is a path of excursions plus clusters of loops in $\{x\in{\D}:\,\pm\Im(x)\geq0\}$ which hits both $\{x:\text{arg}(x)=\pi\}$ and $\{x:\text{arg}(x)=0\}$. However, one additionally needs that the union of the previous excursions plus clusters of loops in the upper and lower part of the disk form together a surface blocking $0$ from $\partial \D$, which is not always the case (think for instance of a spiral around $0$). To avoid this problem, it will be easier to consider the events $F_{j,\eps}^\pm$ and $\overline{F}_{j,\eps}^{\pm}$ instead, as we now explain. When $u\geq (8-\kappa)\pi/16$, one can show by a similar reasoning as in the proof of Lemma~\ref{lem:2} that the event $F_{2,0}^{\pm}(\be^{u,\lambda})$ occurs a.s, and thus $F_{2,\eps}^{\pm}(\be^{u,\lambda})$ occurs with high probability for $\eps$ small enough. Under the appropriate couplings of discrete and continuous excursions and loops in $B(1-\eps)$ from Section~\ref{sec:coupling}, one deduces that $F_{0,\eps}^{\pm}(\tilde{\be}_n^{u,\lambda})$ also occurs with high probability, see \eqref{eq:approxconnection1}. Since for any $A\subset\D$ the event $F_{0,\eps}^{\pm}(A)$ implies that there is a path in $A$ disconnecting $B(r)$ from $\{x\in{\partial B(1-4\eps)}:\,\pm\Im(x)\geq0\}$ in $B(1-4\eps)$, see the left-hand side of Figure~\ref{F:discretecritpar}, we have that \begin{equation} \label{eq:interestofF} \text{ if }F_{0,\eps}^{+}(A)\text{ and }F_{0,\eps}^{-}(A)\text{ both occur, then }B(r)\not\leftrightarrow \partial B(1-4\eps)\text{ in }A^{\ch}. \end{equation} By combining the previous observations we can conclude the proof in the case $u\geq (8-\kappa)\pi/16$ of Theorem~\ref{the:maindisexcloops}. On the other hand, when $\intens<(8-\kappa)\pi/16$, by a similar reasoning as in the proof of Lemma~\ref{lem:1}, one can show that $\overline{F}_{0,0}^\pm(\be^{u,\lambda})^{\ch}$ occurs with positive probability, and thus $\overline{F}_{0,\eps}^\pm(\be^{u,\lambda})^{\ch}$ as well for $\eps$ small enough. Moreover, under a similar coupling as before, see \eqref{eq:approxconnection2}, we deduce that $\overline{F}_{2,\eps}^\pm(\tilde{\be}_n^{u,\lambda})^{\ch}$ also occurs with positive probability. Finally by the observation that for any $A\subset\D$ \begin{equation} \label{eq:interestofFbar} \text{ if }\overline{F}_{2,\eps}^{\pm}(A)^{\ch}\text{ occurs, then }B(r')\leftrightarrow \partial B(1-6\eps)\text{ in }A^{\ch}, \end{equation} see the right-hand side of Figure~\ref{F:discretecritpar}, we are able to finish the proof of Theorem~\ref{the:maindisexcloops}. Note that in the proof of Theorem~\ref{the:maindisexcloops}, we will actually use an easier argument based on the coupling from Theorem~\ref{the:couplingwithloops} in the case $\intens<(8-\kappa)\pi/16$, but the previous reasoning relying on \eqref{eq:interestofFbar} will still be useful when trying to get precise control on the dependency of $\eps$ on $n$ in the case $\lambda=0$ from \eqref{eq:percowithoutloops}. Let us now give the details of the previous strategy. One of the main obstacles is to prove that if there is a path of connected excursions and loop clusters in the continuum, then there is also such a similar connected path in the cable system. Indeed, the couplings from Section~\ref{sec:coupling} only imply that when two continuous excursions, or an excursion and a loop cluster, intersect each other, then the cable system excursions, or excursion and loop cluster, are close to one another, but do not necessarily intersect each other. As we shall see in Lemma~\ref{Lem:simpleRWresult}, using Lemma~\ref{lem:beurling}, one can actually show that they will intersect each other with high probability. Moreover, for each $n\in\N,$ denote by ${X}^{\pm}$ under $\Pm_x^{(n)},$ $x\in{\H_{1,\eps}^{\pm}},$ the trace on ${\D}_n$ of the random walk ${X}$ on ${\D}_n,$ killed on hitting $(\H_{0,\eps}^{\pm})^{\ch}.$ We define similarly $Z^{\pm}$ as the trace on $\D$ of the Brownian motion $Z$ on $\D$ killed on hitting $(\H_{0,\eps}^{\pm})^{\ch}.$ \begin{lemma} \label{Lem:simpleRWresult} There exists a constant $c<\infty$ such that for all $r\in [0,1),$ $r'\in{(r,1)},$ $\eps\leq c'$ (for some constant $c'>0$ depending only on $r,r'$), $\delta>0,$ $n\in\N,$ $x\in{\H_{0,\eps}^{\pm}},$ and any connected set $\mathcal{C}\subset\H_{0,\eps}^{\pm}\cap \D_n$ with diameter at least $\eps/4$ intersecting $\H_{1,\eps}^{\pm},$ \begin{equation} \label{eq:simpleRWresult} \Pm_x^{(n)}\Big({X}^{(\pm)}\cap \mathcal{C}=\varnothing,\ d\big(X^{(\pm)},\, \mathcal{C}\cap\H_{1,\eps}^{\pm}\big)\leq\delta\Big)\leq c\left(\frac{\delta}{\eps}\right)^{1/2}. \end{equation} Moreover, the same result still holds for the continuum setting $n=\infty,$ that is when replacing $\D_n$ by $\D,$ $\Pm_x^{(n)}$ by $\Pm_x$ and $X^{\pm}$ by $Z^{\pm}.$ \end{lemma} \begin{proof} We do the proof for the random walk $X$ on ${\D}_n$; the proof for the continuum case proceeds analogously. Let $H_{\delta}$ be the first time $X$ hits $B_n(\mathcal{C}\cap\H_{1,\eps}^{\pm},\delta),$ following the standard notation $H_{\delta} = \infty$ if $X \cap B_n(\mathcal{C}\cap\H_{1,\eps}^{\pm},\delta) = \varnothing.$ Without loss of generality, in order to prove \eqref{eq:simpleRWresult} we can and will assume from now on that $H_{\delta}<\infty$ and $\delta<\eps/40.$ Note that $\H_{1,\eps}^{\pm}\neq\varnothing$ and $B_n(\H_{1,\eps}^{\pm},\eps/10)\subset \H_{0,\eps}^{\pm}$ for $\eps$ small enough, depending on the choice of $r,r',$ and that $\mathcal{C}\cap \partial B_n(X(H_{\delta}),\eps/20)\neq\varnothing.$ Due to the Markov property, conditionally on $H_{\delta}$ and $X(H_{\delta}),$ the process $(X(t))_{t\geq H_{\delta}}$ until the first time it exits $B_n(X(H_{\delta}),\eps/20)(\subset\H_{0,\eps}^{\pm})$ has the same law as a random walk on $\D_n$ started in $X(H_{\delta})$ until the first time it exits $B_n(X(H_{\delta}),\eps/20).$ As a consequence, by the Beurling estimate, see Lemma~\ref{lem:beurling}, we have that there exists a constant $c<\infty$ such that for all $n\in\N,$ \begin{equation*} \Pm_x^{(n)}\Big((X(t))_{t\geq H_{\delta}} \text{ leaves }B_n(X(H_{\delta}),\eps/20)\text{ before hitting }\mathcal{C}\,\big|\,H_{\delta},X(H_{\delta})\Big)\leq c\left(\frac{\delta}{\eps}\right)^{1/2}. \end{equation*} Integrating, \eqref{eq:simpleRWresult} follows. \end{proof} We begin with the following lemma, which essentially controls the probability to have a connection in $\V^{\intens,\lambda}$ but not in $\widetilde{\V}^{\intens,\lambda}_n,$ and vice versa. \begin{lemma} \label{lem:approxconnection} For all $\intens>0,$ $\lambda\geq0$ and $\eps\in{(0,1)}$ there exists a coupling $\Q^{u,\lambda,\eps}_n$ between $\tilde{\V}_n^{u,\lambda}$ and $\V^{u,\lambda}$ such that the following holds: for all $r\in [0,1-6\eps)$ and $r'\in{(r,1-6\eps)}$, \begin{equation} \label{eq:approxconnection1} \lim\limits_{n\rightarrow\infty}\Q^{u,\lambda,\eps}_n\big(F_{0,\eps}^{\pm}(\tilde{\be}_n^{u,\lambda})^{\ch},F_{2,\eps}^\pm({\be}^{u,\lambda})\big)=0, \end{equation} and for all $r\in [0,1)$ and $r'\in{(r,1)}$, letting $\eps_n=7n^{-1/7}$, \begin{equation} \label{eq:approxconnection2} \lim\limits_{n\rightarrow\infty}\Q^{u,0,\eps_n}_n\big(\overline{F}_{0,\eps_n}^{\pm}({\be}^{u})^{\ch},\overline{F}_{2,\eps_n}^\pm(\tilde{\be}_n^{u})\big)=0. \end{equation} \end{lemma} \begin{proof} We first describe how the coupling $\Q_n^{u,\lambda,\eps}$ is constructed, and then explain why this coupling satisfies \eqref{eq:approxconnection1} and \eqref{eq:approxconnection2}. First couple the discrete excursions $\be^{\intens}_n$ and the continuous excursion $\be^{\intens}$ as in Theorem~\ref{the:couplingdiscontexc1} for $s=\log(n)/\eps$ on an event $\mathcal{E}_1^{(n)}$ with probability at least $1-c(\log(n))^{-1}-\sqrt{c\log(n)/(n\eps^3)},$ for some large enough constant $c=c(u),$ so that, there exists a bijection $F:\Omega_{\intens}(1-\eps)\rightarrow\Omega_{\intens}^{(n)}(1-\eps)$ (see \eqref{eq:defOmega} and below) such that \begin{equation} \label{eq:boundcoupling2} \sup_{t\in{[0,\overline{L}_{B(1-\eps)}(w)]}}|w(t)-F(w)(t)|\leq \frac{c\log(n)^2}{\eps n}=:f(n,\eps)\text{ for all $w\in{\Omega_{\intens}(1-\eps)}$}. \end{equation} We also couple the cable system excursion process $\tilde{\be}^u_n$ with $\be^u_n$ so that the trace of $\tilde{\be}^u_n$ on $\D_n$ is $\be^u_n,$ and for each $w\in{\Omega_u(1-\eps)},$ we denote by $\tilde{F}(w)$ the cable system excursion corresponding to $F(w),$ see Section~\ref{sec:cableSystemExc}. Moreover, if $\lambda\neq0,$ we couple the cable system loop soup and the Brownian loop soup at level $\lambda$ as in Theorem~\ref{the:couplingloops}, and in particular there is an event $\mathcal{E}_2^{(n)}$ with probability at least $1-o_n(1)$ under which \eqref{eq:couplingloops} is satisfied. We start with the proof of \eqref{eq:approxconnection1}. Let $(w_i)_{i\in{\{1,\dots,N_{\eps}\}}}$ be some enumeration of the Brownian excursions hitting $B(1-\eps),$ and let $w'_i$ be the part of $w_i$ in $\Omega_u(1-\eps).$ For each $i\leq N_{\eps},$ we decompose the cable system trajectory $\tilde{F}(w_i')$ into subexcursions in $\H_{0,\eps}^{+}$, starting and ending in $\partial \H_{0,\eps}^{+}$. We denote by $(E_{i,j}^{(n),+})_{j=1,\dots,K_i^{\pm}}$ the subexcursions which hit $\H_{1,\eps}^{+}$. We decompose the trajectory $\tilde{F}(w_i')$ again, this time into subexcursions in $\H_{0,\eps}^{-}$ starting and ending in $\partial \H_{0,\eps}^{-}$, and denote by $(E_{i,j}^{(n),-})_{j=1,\dots,K_i^{\pm}}$ the subexcursions which hit $\H_{1,\eps}^{-}$. We remark that some (parts) of the excursions $(E_{i,j}^{(n),-})_{j}$ and $(E_{i,j}^{(n),+})_{j}$ may coincide. Note that upon choosing $\eps>0$ small enough, $\partial\H_{0,\eps}^{\pm}$ and $\H_{1,\eps}^{\pm}$ are at positive distance. Therefore $K_i^{\pm}$ is a.s.\ finite, and it is possible that $\H_{1,\eps}^{\pm}$ is never visited by $\tilde{F}(w_i'),$ and in this case $K_i^{\pm}=0.$ Note also that $\tilde{F}(w_i')$ could hit $B(r),$ but its trajectory inside ${B}(r)$ does not appear in the decomposition $(E_{i,j}^{(n),\pm})_{j=1,\dots,K_i^{\pm}}.$ For each $i\in{\{1,\dots,N_{\eps}}\}$ and $j\in{\{1,\dots,K_i^{\pm}\}}$ let $E^{\pm}_{i,j}$ be the subtrajectory of $w_i',$ whose starting and ending time are the same as the ones of $E^{(n),\pm}_{i,j}$ for $\tilde{F}(w_i').$ Note that $E^{\pm}_{i,j}$ starts and end at random times which depend on the random walk excursions, and thus might not be Markovian. Then using \eqref{eq:boundcoupling2} and noting that cable system excursions and discrete excursions are at Hausdorff distance at most $1/n,$ on the event $\mathcal{E}_1^{(n)},$ $d_H\big(E^{\pm}_{i,j},E^{(n),\pm}_{i,j}\big)\leq f(n,\eps)$ (up to changing the constant $c$ in \eqref{eq:boundcoupling2}), where we identify trajectories with their trace on $\D,$ and the Hausdorff distance is defined above Corollary~\ref{cor:couplingexcursions}. Moreover, in view of \eqref{eq:defHi+}, if $\eps\geq cf(\eps,n)$ and $r'-r\geq cf(\eps,n),$ each excursion of $w_i$ in $\H_{2,\eps}^{\pm}$ is part of $E^{\pm}_{i,j}$ for some $j\leq K_i^{\pm}.$ If $\lambda>0,$ we moreover denote by $(e_j^{\pm})_{j\leq E_{\delta,\eps}^{\pm}}$ the excursions in $\H_{1,\eps}^{\pm}$ which intersect $\H_{2,\eps}^{\pm}$ and come from loops with diameter at least $\delta,$ in the Brownian loop soup at level $\lambda,$ where $\delta>0$ is a parameter we will choose in \eqref{eq:choicedelta}. The sets $(\text{trace}(e_j^{\pm}))_{j\leq E_{\delta,\eps}^{\pm}}$ form connected components in $\H_{1,\eps}^{\pm}$, that we denote by $(\mathcal{L}_i^{\pm})_{i\leq L_{\delta,\eps}^{\pm}}.$ Then, since our choice of $\delta$ will not depend on $n,$ see \eqref{eq:choicedelta}, on the coupling event $\mathcal{E}_2^{(n)},$ for $n$ large enough and for each $i\leq L_{\delta,\eps}^{\pm}$ there exists a.s.\ a connected set $\mathcal{L}_i^{(n),\pm}$ included in the trace on $\widetilde{\D}_n$ of the cable system loop soup, and such that $d_{H}(\mathcal{L}_i^{(n),\pm},\mathcal{L}_i^{\pm})\leq c\log(n)/n\leq f(n,\eps).$ Let $\mathcal{C}^{(n),\pm}_k,$ $k\leq C^{\pm}_{\eps,\delta},$ be some enumeration of the excursions $E_{i,j}^{(n),\pm},$ $i\in{\{1,\dots,N_{\eps}\}}$ and $j\in{\{1,\dots,K_i^{\pm}\}},$ plus (if $\lambda\neq0$) the connected clusters of loop excursions $\mathcal{L}_i^{(n),\pm},$ $i\leq L_{\delta,\eps}^{\pm}.$ For each $k\in{\{1,\dots,C_{\eps,\delta}^{\pm}\}},$ there is a continuous excursion, or cluster of loops, $\mathcal{C}_k^{\pm}$ at Hausdorff distance at most $f(n,\eps)$ from $\mathcal{C}_k^{(n),\pm},$ and all the parts in $\H_{2,\eps}^{\pm}$ of continuous loop clusters, of loops with diameter at least $\delta$, and of excursions are in some $\mathcal{C}_k^{\pm}$ if $\eps\wedge(r'-r)\geq cf(n,\eps).$ We define for each $i\in{\{1,\dots,N_{\eps}\}},$ $j\in{\{1,\dots,K_i^{\pm}\}}$ and $k\in{\{1,\dots,C^{\pm}_{\eps,\delta}\}}$ the events \begin{equation} \label{eq:defApmnij} A_{i,j,k}^{(n),\pm}:=\left\{d\big(E^{(n),\pm}_{i,j},\mathcal{C}^{(n),\pm}_k\cap\H_{1,\eps}^{\pm}\big)>2f(n,\eps)\right\}\cup\left\{E^{(n),\pm}_{i,j}\cap \mathcal{C}^{(n),\pm}_k\neq\varnothing\right\} \end{equation} and \begin{equation} \label{eq:defApmn} A^{(n),\pm}_{\eps,\delta}:=\bigcap_{i\in{\{1,\dots,N_{\eps}\}}}\bigcap_{j\in{\{1,\dots,K_i^{\pm}\}}}\bigcap_{ k\in{\{1,\dots,C_{\eps,\delta}^{\pm}\}}}A_{i,j,k}^{(n),\pm}. \end{equation} Note that on this event, any two excursions, or any excursion and loop cluster, which are close enough will intersect each other. We will now argue that, on this event, the event $F_{2,\eps}^{\pm}$ for Brownian excursions plus loops imply $F_{0,\eps}^{\pm}$ for cable system excursions plus loops when they are close to each other, see \eqref{eq:ifAnepsdeltathendiscretedisconnection}. Let $\be^{\intens,\lambda,\delta}$ be the union of the clusters, consisting of continuous loops with diameter at least $\delta$ for the Brownian loop soup with intensity $\lambda,$ which hit $\be^{\intens}$. On the event $F_{2,\eps}^\pm({\be}^{u,\lambda,\delta}),$ see \eqref{eq:defFi+-}, there exist $M^{\pm}_{\eps,\delta}\in\N_0$ and $k_0,\dots,k_{M^{\pm}_{\eps,\delta}}$ such that \begin{enumerate}[i)] \item $\mathcal{C}_{k_{i-1}}^{\pm}\cap\mathcal{C}_{k_i}^{\pm}\cap \H_{2,\eps}^{\pm}\neq\varnothing$ for all $i\in\{1,\dots,M^{\pm}_{\eps,\delta}\},$ \item if $\mathcal{C}_{k_{i-1}}^{\pm}$ is a connected cluster of loop excursions for some $i\in\{1,\dots,M^{\pm}_{\eps,\delta}\},$ then $\mathcal{C}_{k_i}^{\pm}$ is a Brownian excursion, \item $\mathcal{C}_{k_0}^{+}\cap S_{2,\eps}^{\pm,l}\neq\varnothing$ and $\mathcal{C}_{k_{M^{+}_{\eps,\delta}}}^{+}\cap S_{2,\eps}^{\pm,r}\neq\varnothing$. \end{enumerate} \begin{figure}[ht] \centering \includegraphics[scale=0.76]{Discretecritpar_new.eps} \caption{Blue lines correspond to excursions and red sets to loop clusters. Dashed lines represent the sets $\mathcal{C}_k^{(n),\pm}$, that is excursions and loop clusters on the cable system $\widetilde{\D}_n,$ whereas continuous lines represent the sets $\mathcal{C}_k^{\pm}$, that is excursions and loop clusters on $\D.$ The dotted purple lines correspond to the sets $S_{j,\eps}^{+,l}$ and $S_{j,\eps}^{+,r}$ on the left, and to the sets $\overline{S}_{j,\eps}^{+,l}$ and $\overline{S}_{j,\eps}^{+,r}$ on the right, $j\in{\{0,2\}}$. On the left the events $F_{2,\eps}^+(\be^{u,\lambda,\delta})$ and $F_{0,\eps}^+(\tilde{\be}^{u,\lambda}_n)$ both occur. On the right, the event $\overline{F}_{2,\eps}^-(\be^{u})$ occurs, but not the event $\overline{F}_{0,\eps}^-(\tilde{\be}^{u}_n)$ since the dashed excursions $\mathcal{C}_1^{(n),-}$ and $\mathcal{C}_2^{(n),-}$ are close and do not intersect.} \label{F:discretecritpar} \end{figure} We refer to the left-hand side of Figure~\ref{F:discretecritpar} for details. If $\eps\wedge (r'-r)\geq cf(n,\eps),$ see \eqref{eq:boundcoupling2}, for each $i\in\{1,\dots,M^{\pm}_{\eps,\delta}\},$ one can easily deduce from i) that $d\big(\mathcal{C}^{(n),\pm}_{k_{i-1}}\cap\H_{1,\eps}^{\pm},\mathcal{C}^{(n),\pm}_{k_i}\cap\H_{1,\eps}^{\pm}\big)\leq 2f(n,\eps)$ on the event $\mathcal{E}_1^{(n)}\cap\mathcal{E}_2^{(n)},$ and thus by ii) $\mathcal{C}^{(n),\pm}_{k_{i-1}}\cap \mathcal{C}^{(n),\pm}_{k_i}\neq\varnothing$ on the event $A_{\eps,\delta}^{(n),\pm}.$ On the intersection of these events, $\bigcup_{k=0}^{M^{\pm}_{\eps,\delta}}\mathcal{C}^{(n),\pm}_{k_i}$ is thus a set connected in $\H_{0,\eps}^{\pm}$ for $n$ large enough, and, by \eqref{eq:defS+} as well as iii), $\bigcup_{k=0}^{M^{+}_{\eps,\delta}}\mathcal{C}^{(n),+}_{k_i}$ intersects both $\{x\in{\D\setminus B(1-4\eps)}:\,\text{arg}(x)\in{[-\pi/4,0]}\}$ and $\{x\in{\D\setminus B(1-4\eps)}:\,\text{arg}(x)\in{[\pi,5\pi/4]}\}$. By a simple geometric argument, see Figure~\ref{F:discretecritpar}, one deduces that $\bigcup_{k=0}^{M^{+}_{\eps,\delta}}\mathcal{C}^{(n),+}_{k_i}$ intersects both $S_{0,\eps}^{+,r}$ and $S_{0,\eps}^{+,l}$. Let us denote by $\mathcal{A}^{(n),+}_{\eps,\delta}$ the event that $\bigcup_{k=0}^{M^{+}_{\eps,\delta}}\mathcal{C}^{(n),+}_{k_i}$ intersects $\tilde{\be}_n^u$, and is thus included in $\tilde{\be}_n^{u,\lambda}$ by definition. Proceeding similarly for $\bigcup_{k=0}^{M^{-}_{\eps,\delta}}\mathcal{C}^{(n),-}_{k_i}$, for each $\delta>0,$ $\eps>0,$ and $n$ large enough so that $\eps\wedge(r'-r)\geq cf(n,\eps)$ we therefore have \begin{equation} \label{eq:ifAnepsdeltathendiscretedisconnection} \mathcal{E}_1^{(n)}\cap\mathcal{E}_2^{(n)}\cap A_{\eps,\delta}^{(n),\pm}\cap \mathcal{A}_{\eps,\delta}^{(n),\pm}\cap F_{2,\eps}^\pm({\be}^{u,\lambda,\delta})\subset F_{0,\eps}^\pm(\widetilde{\be}_n^{u,\lambda}). \end{equation} We first consider the event $\mathcal{A}^{(n),\pm}_{\eps,\delta}$, which trivially always occurs on the event $\mathcal{E}_1^{(n)}\cap\mathcal{E}_2^{(n)}\cap A_{\eps,\delta}^{(n),\pm}$ in case $M_{\eps,\delta}^\pm\geq2$ since one of the sets $\mathcal{C}^{(n),\pm}_{k_i}$ is then included in $\tilde{\be}_n^u$, and their union is connected. On the previous event, the only possibility for the event $\mathcal{A}^{(n),\pm}_{\eps,\delta}$ to not occur is when $\mathcal{C}_{k_0}^{\pm}$ is a continuous loop cluster that intersects both $S_{2,\eps}^{\pm,r}$ and $S_{2,\eps}^{\pm,l}$. Since $\mathcal{C}_{k_0}^{\pm}\subset\be^{u,\lambda,\delta}$, the loop cluster $\mathcal{L}$ of loops in $\D$ with diameter at least $\delta$ which contains $\mathcal{C}_{k_0}^{\pm}$ intersects some excursion $w$ in $\omega_u$. The excursion $w$ belongs to $\Omega_u(1-n^{-1/4})$ for $n$ large enough, and is thus at distance less than $n^{-1/2}$ from a discrete excursion $w^{(n)}$ in $\Omega_u^{(n)}(1-n^{-1/4})$ with high probability as $n\rightarrow\infty$ by Theorem~\ref{the:couplingdiscontexc1}. Moreover, the set $\mathcal{L}$ is at distance less than $\log(n)/n$ from a connected set of discrete loops $\mathcal{L}^{(n)}$ with high probability as $n\rightarrow\infty$ by Theorem~\ref{the:couplingloops}. If the event $\mathcal{A}^{(n),\pm}_{\eps,\delta}$ does not occur, we then have that $w^{(n)}$ and $\mathcal{L}^{(n)}$ do not intersect each other, but since they are both at distance at most $n^{-1/2}$ from a given point which does not depend of $n$, we deduce from \eqref{eq:disbeurling} that for all $\delta,\eps>0$ \begin{equation*} \lim\limits_{n\rightarrow\infty}\mathbb{P}\big(\mathcal{E}_1^{(n)}\cap\mathcal{E}_2^{(n)}\cap A_{\eps,\delta}^{(n),\pm}\cap \big(\mathcal{A}^{(n),\pm}_{\eps,\delta}\big)^{\ch}\big)=0. \end{equation*} Let us now prove that the event $A_{\eps,\delta}^{(n),\pm}$ occurs with high probability. Each subexcursion $E_{i,j}^{(n),\pm}$ intersects $\H_{1,\eps}^{\pm}$ and thus has diameter at least $\eps/10$ and, assuming that $\delta\wedge\eps\geq cf(n,\eps),$ each cluster $\mathcal{L}_i^{(n),\pm}$ intersects $\H_{1,\eps}^{\pm}$ and has diameter at least $\delta/2$ since $\mathcal{L}_i^{\pm}$ intersects $\H_{2,\eps}^{\pm}$ and has diameter at least $\delta.$ Note that if the discrete excursions intersect each other then the cable system excursions also intersect each other. Therefore, using a union bound and the strong Markov property, Lemma~\ref{Lem:simpleRWresult} implies that for any $p,m,n\in\N,$ on the event that the number of Brownian excursions is smaller than $p$ and the number of loop soup excursions is smaller than $m,$ if $\delta\wedge\eps\geq cf(n,\eps),$ one has \begin{equation} \label{eq:consbeurling} \begin{split} \Pm\Big((A_{\eps,\delta}^{(n),\pm})^\ch,\sum_{i=1}^{N_{\eps}}K_i^{\pm}\leq p,L_{\delta,\eps}^{\pm}\leq m,\,\Big|\,x_{i,j}^{(n),\pm},&i\in{\{1,\dots,N_b^{\pm}\}},j\in{\{1,\dots,K_i^{\pm}\}}\Big) \\&\leq cp(p+m)\left(\frac{f(n,\eps)}{\eps\wedge\delta}\right)^{1/2} \end{split} \end{equation} where $x_{i,j}^{(n),\pm}$ denotes the hitting point of $\H_{1,\eps}^{\pm}$ for the subexcursion $E_{i,j}^{(n),\pm}.$ Note also that if $\eps\wedge(r'-r)\geq cf(n,\eps)$, on the event $\mathcal{E}_1^{(n)}$ one can upper bound $K_i^{\pm}$ by the number of subexcursions in $B(1-4\eps-\eps/3)$ for $w_i$ which hit $B(1-4\eps-2\eps/3)$, plus the number of subexcursions in $B(1-\eps-\eps/3)$ for $w_i$ which hit $B(1-\eps-2\eps/3)$, plus the number of subexcursions $\D\setminus B(r+(r'-r)/6)$ for $w_i$ which hit $\D\setminus B(r+(r'-r)/3)$, plus the number of subexcursions in $\{x\in{\D\setminus B((r+r')/4)}:\pm\text{arg}(x)\in{[-20\pi/84,\pi+20\pi/84]} \}$ for $w_i$ which hit $\{x\in{\D\setminus B((r+r')/4)}:\pm\text{arg}(x)\in{[-19\pi/84,\pi+19\pi/84]}$ \}, plus the number of subexcursions in $\{x\in{\D\setminus B((r+r')/4)}:\pm\text{arg}(x)\in{[-\pi+\pi/84,-\pi/84]} \}$ for $w_i$ which hit $\{x\in{\D\setminus B((r+r')/4)}:\pm\text{arg}(x)\in{[-\pi+2\pi/84,-2\pi/84]} \}$. In view of \eqref{eq:hittingBM}, we deduce that $K_i^{\pm}$ can be upper bounded by a sum of five geometric random variables with constant parameters, depending only on $r,r'$. Combining this with \eqref{capball} and recalling that $N_{\eps}$ is a Poisson random variable with parameter $u\mathrm{cap}(B(1-\eps)),$ one can thus find a constant $C<\infty,$ depending only on $u,r,r',$ such that if $\eps\wedge(r'-r)\geq cf(n,\eps)$ then the total number of Brownian excursions we consider satisfies \begin{equation*} \E\Big[\sum_{i=1}^{N_{\eps}}K_i^{\pm}\I\big\{\mathcal{E}_1^{(n)}\big\}\Big]\leq C\eps^{-1}.\end{equation*} Markov's inequality then yields for all $t>0$ \begin{equation} \label{eq:poissonbound} \Pm\left(\sum_{i=1}^{N_{\eps}}K_i^{\pm}\geq (t\eta\eps)^{-1},\,\mathcal{E}_1^{(n)}\right)\leq t\eta\eps \E\Big[\sum_{i=1}^{N_{\eps}}K_i^{\pm}\I\big\{\mathcal{E}_1^{(n)}\big\}\Big]\leq Ct\eta. \end{equation} If $\lambda=0$ take $\delta=1$ and otherwise, for each $\eta>0,$ take $\delta=\delta(\eta,\eps,r,r')>0$ small enough so that \begin{equation} \label{eq:choicedelta} \Pm\big(F_{2,\eps}^\pm({\be}^{u,\lambda,\delta})^\ch,F_{2,\eps}^\pm({\be}^{u,\lambda})\big)\leq \eta/7. \end{equation} The existence of such a $\delta$ follows from the fact that if $F_{2,\eps}^\pm(\be^{u,\lambda})$ occurs, then there is a continuous path $\pi$ in $\mathcal{I}^{\intens,\lambda}\cap \H_{2,\eps}^{\pm}$ which hits both $S_{2,\eps}^{\pm,l}$ and $S_{2,\eps}^{\pm,r}$ and which consists of finitely many loops and excursions. Since the details are slightly cumbersome, we defer the proof of this fact to the end of this proof. Further choose $t$ small enough so that \eqref{eq:poissonbound} is bounded by $\eta/7$, $m=m(\eta,\eps,r,r')$ large enough (if $\lambda\neq0$) so that, with the previous choice of $\delta,$ the number of loop excursions satisfies \begin{equation} \label{eq:choicem} \Pm(L_{\delta,\eps}^{\pm}> m)\leq\eta/7, \end{equation} and $n=n(\eta,\eps,r,r')$ large enough so that recalling \eqref{eq:boundcoupling2}, $\eps\geq cf(n,\eps),$ $r'-r\geq cf(n,\eps)$ and $\delta\geq cf(n,\eps)$ (for $\delta$ as in \eqref{eq:choicedelta}), as required for \eqref{eq:ifAnepsdeltathendiscretedisconnection}, \eqref{eq:consbeurling} and \eqref{eq:poissonbound} to hold; and so that \eqref{eq:consbeurling} (for $p=(t\eta\eps)^{-1}$, $\delta$ as in \eqref{eq:choicedelta} and $m$ as in \eqref{eq:choicem}), $\Pm\big(\mathcal{E}_1^{(n)}\cap\mathcal{E}_2^{(n)}\cap A_{\eps,\delta}^{(n),\pm}\cap\big(\mathcal{A}^{(n),\pm}_{\eps,\delta}\big)^{\ch}\big)$, $\Pm\big((\mathcal{E}_1^{(n)})^\ch\big)$ and $\Pm\big((\mathcal{E}_2^{(n)})^\ch\big)$ are all bounded by $\eta/7$. It then follows from \eqref{eq:ifAnepsdeltathendiscretedisconnection} that the probability in \eqref{eq:approxconnection1} is smaller than $\eta,$ and \eqref{eq:approxconnection1} follows readily. The proof of \eqref{eq:approxconnection2} is similar to the proof of \eqref{eq:approxconnection1} when $\lambda=0,$ that is when there is no loops, but exchanging the roles of the cable system excursions and Brownian excursions on $\mathbb{D}$: first now define $E^{\pm}_{i,j}$ as the subexcursions of $w_i'$ in $\H_{0,\eps}^{\pm}$ hitting $\H_{1,\eps}^{\pm}$, and $E_{i,j}^{(n),\pm}$ as the part of $\tilde{F}(w_i')$ close to $E_{i,j}^{\pm}.$ Then defining $A_{\eps}^{\pm}$ similarly as in \eqref{eq:defApmnij} and \eqref{eq:defApmn} but for the excursions $E^{\pm}_{i,j},$ and forgetting about the loops, one has similarly as in \eqref{eq:ifAnepsdeltathendiscretedisconnection} that $\mathcal{E}_1^{(n)}\cap\mathcal{E}_2^{(n)}\cap A_{\eps}^{\pm}\cap \overline{F}_{2,\eps}^\pm(\widetilde{\be}^{u}_n)\subset \overline{F}_{0,\eps}^\pm({\be}^{u}).$ We refer to the right-hand side of Figure~\ref{F:discretecritpar} for an illustration. Moreover, the bound \eqref{eq:consbeurling} still holds for $A_{\eps}^{\pm}$ by Lemma~\ref{Lem:simpleRWresult}, considering the hitting points $x_{i,j}^{\pm}$ of $\H_{1,\eps}^{\pm}$ for $E_{i,j}^{\pm}$ instead of $x_{i,j}^{(n),\pm}.$ Note additionally that when $\eps=\eps_n$ and $n$ is large enough, then $\eps\geq cf(n,\eps),$ see \eqref{eq:boundcoupling2}, and both the right-hand side of \eqref{eq:consbeurling}, for $m=0$, $\delta=1$ and $p=c\eps^{-1},$ as allowed by \eqref{eq:poissonbound}, and $\Pm\big((\mathcal{E}_1^{(n)})^{\ch}\big)$, converge to zero. This finishes the proof of \eqref{eq:approxconnection2}. It remains to prove \eqref{eq:choicedelta}. If $F_{2,\eps}^\pm({\be}^{u,\lambda})$ occurs, then by \eqref{eq:defFi+-} there is a continuous path $\pi$ in $\mathcal{I}^{\intens,\lambda}\cap \H_{2,\eps}^{\pm}$ which hits both $S_{2,\eps}^{\pm,l}$ and $S_{2,\eps}^{\pm,r}$. Moreover since $\H_{2,\eps}^{\pm}$ is open, $\pi$ is at positive distance $s$ from $\partial \H_{2,\eps}^{\pm}$. By local finiteness of loop clusters, see Lemmas~9.4 and~9.7 as well as Propositions~10.3 and~11.1 in \cite{sheffield-werner}, the clusters of loops included in $\H_{2,\eps}^{\pm}$ which reach distance at least $s/2$ from $\partial \H_{2,\eps}^{\pm}$ are all at positive distance $s'$ from $\partial \H_{2,\eps}^{\pm}$. We decompose the excursions in $\omega_u$ into subexcursions in $\H_{2,\eps}^{\pm}$, as well as the loops at level $\lambda$ which intersect $\partial \H_{2,\eps}^{\pm}$ and whose loop cluster in $\D$ intersect an excursion in $\omega_u$, into subexcursions in $\H_{2,\eps}^{\pm}$. We denote by $w_1,\dots,w_P$, the previous subexcursions of excursions or loops which reach distance $s'$ from $\partial \H_{2,\eps}^{\pm}$. Note that there are only finitely many such subexcursions by Proposition~\ref{prop:localdescription} and local finiteness of loops, as well as properties of Brownian motion. We further decompose the loops entirely included in $\H_{2,\eps}^{\pm}$ into loop clusters, and we write $\mathbf{C}_i$, $1\leq i\leq P$, for the union of $w_i$ and all those loop clusters which intersect $w_i$, as well as $\overline{\mathbf{C}}_i$ for its closure. Then by construction any point in $\H_{2,\eps}^{\pm}$ at distance at least $s/2$ from $\partial \H_{2,\eps}^{\pm}$, which belongs to a loop whose loop cluster in $\D$ intersect an excursion of $\omega_u$, belongs to one of the sets $\mathbf{C}_i$, $1\leq i\leq P$. In particular, $\pi$ is included in the union of the sets $\overline{\mathbf{C}}_i$ for $1\leq i\leq P$. Moreover, for $1\leq i\neq j\leq P$, if $\overline{\mathbf{C}}_i$ intersects $\overline{\mathbf{C}}_j$ in some point $x\in{\H_{2,\eps}^{\pm}}$, then a.s.\ $\mathbf{C}_i$ intersects $\mathbf{C}_j$. Indeed, either $w_i$ or $w_j$ intersects $x$, and then the previous statement follows from properties of Brownian motion on $\D$; or both $w_i$ and $w_j$ are at positive distance from $x$, and then $x$ is in the closure of a loop cluster (for the loops included in $\H_{2,\eps}^{\pm}$) intersecting $w_i$, resp.\ $w_j$, by local finiteness of loop clusters, and these two clusters a.s.\ actually coincide since the outer boundaries of distinct loop clusters are a.s.\ always at positive distance from one another by \cite[Proposition~10.3 and~11.1]{sheffield-werner}, see also p.\ 1899 therein. Define recursively on $k\geq1$ the number $i_k$ as the smallest index $i\in{\{1,\dots,P\}\setminus \{i_1,\dots,i_{k-1}\}}$ such that $\pi$ is in $\overline{\mathbf{C}}_i$ when first exiting the union of $\overline{\mathbf{C}}_{i_{k'}}$, $1\leq k'\leq k-1$. Assuming that $P'$ clusters are explored during the previous recursion, then for each $1\leq k\leq P'$, we have by definition that $\overline{\mathbf{C}}_{i_k}\cap\overline{\mathbf{C}}_{{j_k}}\neq\varnothing$ for some $j_k<i_k$, and thus a.s.\ ${\mathbf{C}}_{i_k}\cap {\mathbf{C}}_{j_{k}}\neq\varnothing$. By definition of loop clusters, see below \eqref{eq:defcontloops}, one can moreover connect any point of $\mathbf{C}_{j_k}$ to $\mathbf{C}_{i_k}$ using a finite number of loops and excursions in $\H_{2,\eps}^{\pm}$. Iterating, we obtain that one can connect any two points in the union of $\mathbf{C}_{i_j}$, $1\leq j\leq P'$, using only finitely many loops and subexcursions therein. Since the union of $\overline{\mathbf{C}}_{i_j}$, $1\leq j\leq P'$, intersect both $S_{2,\eps}^{\pm,l}$ and $S_{2,\eps}^{\pm,r}$, this is a.s.\ also the case for the union of ${\mathbf{C}}_{i_j}$, $1\leq j\leq P'$. Therefore, one can a.s.\ find a path in $\H_{2,\eps}^{\pm}$ connecting $S_{2,\eps}^{\pm,l}$ to $S_{2,\eps}^{\pm,r}$ using a finite number of loops and subexcursions in the union of ${\mathbf{C}}_{k}$, $1\leq k\leq P$. In particular, there exists $\delta>0$ such that all the loops along this path have diameter at least $\delta$, and such that any loop among the subexcursions $w_1,\dots,w_P$ is connected to $\be^u$ using only loops of diameter at least $\delta$. In particular, the event $F_{2,\eps}^\pm({\be}^{u,\lambda,\delta})$ occurs, which finishes the proof of \eqref{eq:choicedelta}. \end{proof} \begin{remark} In the proof of Lemma~\ref{lem:approxconnection}, the main strategy when $\lambda=0$ is the following: if $F_{2,\eps}^{\pm}(\be^u)\cap\mathcal{E}_1^{(n)}$ occurs then there is a sequence of cable system excursions in $\H_{0,\eps}^{\pm}$ almost connecting $S_{0,\eps}^{\pm,r}$ to $S_{0,\eps}^{\pm,l}$, that is with only finitely many small gaps between the excursions, and we then show that these excursions actually fill these gaps with high probability by Lemma~\ref{Lem:simpleRWresult}, and vice versa. Another possible approach would be to proceed similarly as in \cite[Lemma~2.7]{MR3485399}, see also \cite[Theorem~5.1]{MR3547746} for a similar approach : if $F_{2,\eps}^{\pm}(\be^u)$ occurs then there is a finite set of continuous excursions in $\H_{2,\eps}^{\pm}$ almost connecting $S_{2,\eps}^{\pm,r}$ to $S_{2,\eps}^{\pm,l}$, and then with high probability these excursions are strongly entangled, that is any set close enough (with respect to the Hausdorff distance) to these excursions still connects $S_{0,\eps}^{\pm,r}$ to $S_{0,\eps}^{\pm,l}$ in $\H_{0,\eps}^{\pm}$, and thus $F_{0,\eps}^{\pm}(\tilde{\be}_n^u)$ occurs on the coupling event $\mathcal{E}_1^{(n)}$. However this argument seems more complicated to implement for the other direction, that is when starting with $F_{2,\eps}^{\pm}(\tilde{\be}_n^u)$, and in particular it is hard to have good control on the connectivity of these excursions near the boundary, hence our different approach to prove Lemma~\ref{lem:approxconnection}. \end{remark} We can now establish Theorem~\ref{the:maindisexcloops}. \begin{proof}[Proof of Theorem~\ref{the:maindisexcloops}] Let us first consider the case $u\geq (8-\kappa)\pi/16$, which uses an argument similar to the proof of Lemma~\ref{lem:2}. By Lemma~\ref{l.brownianexcursionandlooprestriction} for $\Gamma=\big\{x\in{\partial\D}:\,\pm\arg(x)\in{\big[-4\pi/28,4\pi/28+\pi\big]}\big\}$ and $D=\H^{\pm}_{2,0}$, combined with Lemma~\ref{lem:SLE-ka-r}, the event $F_{2,0}^\pm({\be}^{u,\lambda}\big)$ occurs with probability one. Since the liminf of the events $F_{2,\eps}^\pm({\be}^{u,\lambda}\big)$ as $\eps\searrow0$ is contained in $F_{2,0}^\pm({\be}^{u,\lambda}\big)$, combining the previous observation with \eqref{eq:approxconnection1} we have for all $\eps\in{(0,1/6)}$, $r\in [0,1-6\eps)$ and $r'\in{(r,1-6\eps)}$ \begin{equation} \label{eq:subcriticalproof} \limsup_{\eps\rightarrow0}\limsup_{n\rightarrow\infty}\Pm\big(F_{0,\eps}^{\pm}(\tilde{\be}_n^{u,\lambda})^{\ch}\big)\leq \limsup_{\eps\rightarrow0}\Pm\big(F_{2,\eps}^\pm({\be}^{u,\lambda}\big)^{\ch}\big)=0, \end{equation} Using \eqref{eq:interestofF}, one easily deduces \eqref{eq:percowithloops} for $u\geq (8-\kappa)\pi/16$. Let us now turn to the case $u< (8-\kappa)\pi/16$, which follows from the following simple observation: for all $\eps\in{(0,1)},$ \begin{equation} \label{supercriteqc2} \liminf_{n\rightarrow\infty}\Pm\big(0\stackrel{\tilde{\mathcal{V}}_n^{\intens,\lambda(\kappa)}}{\longleftrightarrow}{\partial}B_n(1-\eps)\big)\geq \Pm\big(0\stackrel{\mathcal{V}^{\intens,\lambda(\kappa)}}{\longleftrightarrow}\partial\D\big). \end{equation} Indeed, if $0\leftrightarrow \partial\D$ in $\V^{\intens,\lambda},$ then there exists a path $\pi$ between $0$ and $\partial B(1-\eps/2)$ and $\delta\in{(0,\eps)}$ so that $B(\pi,\delta)\subset \V^{\intens,\lambda}.$ Then, under the coupling from Theorem~\ref{the:couplingwithloops}, $\pi\subset \widetilde{\V}_n^{\intens,\lambda}$ for $n$ large enough, which implies \eqref{supercriteqc2}. Combined with Theorem~\ref{t.mainthm}, this proves \eqref{eq:percowithloops} for $u< (8-\kappa)\pi/16$. \end{proof} \begin{proof}[Proof of Theorem~\ref{the:maindisexc}] The equality \eqref{eq:percowithoutloopssub} follows from \eqref{eq:percowithloops} for $\kappa=8/3,$ and we now prove \eqref{eq:percowithoutloops} using an argument similar to the proof of Lemma~\ref{lem:1} for $\lambda=0$, from which we will use the notation. The only difference is that instead of using Lemmas~\ref{l.brownianexcursionandlooprestriction} and~\ref{lem:SLE-ka-r} for $D=\D$ to build a path $\gamma$ in $\D\setminus (\be^{\intens,\lambda(\kappa)}_{\mathbb{T}_+,\mathbb{T}_+,\D})$ from $0$ to $\mathring{\mathbb{T}}_+$ on the event $E_1$ from \eqref{eq:defE1}, we use them for $D=\{x\in{\D}:\,\Im(x)>0\}$, which imply that we can a.s.\ build a path $\gamma$ now in $D\setminus (\be^{\intens,\lambda(\kappa)}_{\mathbb{T}_+,\mathbb{T}_+,D})$, still from $0$ to $\mathring{\mathbb{T}}_+$. We then replace the event $E_1$ by the event \begin{equation*} E'_1=\big\{(\be^{\intens}_{\mathbb{T}_+,\mathbb{T}_+,\D}\setminus \be^{\intens}_{\mathbb{T}_+,\mathbb{T}_+,D})\cap\gamma=\varnothing\big\}. \end{equation*} Note that $E'_1$ is contained in the intersection of the independent events $E'_2=\big\{{\be}^{\intens}_{\Gamma,\Gamma,\D}\cap \gamma=\varnothing\big\}$ and $E'_3$ that there is no excursions starting or ending in $\partial \D\setminus\Gamma$ which hit $\D\setminus D$, where we recall that $\Gamma$ is a closed arc in $\partial\D$ not intersecting $\gamma$ and containing $\mathbb{T}_-$ in its interior. One can show similarly as for $E_2$ and $E_3$ defined in \eqref{eq:defE2} and below that the events $E'_2$ and $E'_3$ have positive probability, and hence $E'_1$ as well. Moreover, on the event $E'_1$, $\gamma$ is a path in $D\setminus (\be^{\intens}_{\mathbb{T}_+,\mathbb{T}_+,\D})$ from $0$ to $\mathring{\mathbb{T}}_+$. Since the event $E'_1$ is measurable with respect to $\be^{\intens}_{\mathbb{T}_+,\mathbb{T}_+,\D}$, one can follow the proof Lemma~\ref{lem:1} replacing the event $E_1$ by $E'_1$ to deduce that $\gamma$ is a path in $D\setminus (\be^{\intens})$ from $0$ to $\mathring{\mathbb{T}}_+$ with positive probability, which implies that $\overline{F}_{0,0}^+({\be}^{u}\big)^{\ch}$ occurs with positive probability. Since the events $\overline{F}_{0,\eps}^+({\be}^{u}\big)$ are decreasing to $\overline{F}_{0,0}^+({\be}^{u}\big)$ as $\eps\searrow0$, combining the previous observation with \eqref{eq:approxconnection1} we have for all $r\in [0,1)$ and $r'\in{(r,1)}$ \begin{equation} \label{eq:supercriticalproof} \liminf_{n\rightarrow\infty}\Pm\big(\overline{F}_{2,\eps_n}^{+}(\tilde{\be}_n^{u})^{\ch}\big)\geq \liminf_{n\rightarrow\infty}\Pm\big(\overline{F}_{0,\eps_n}^+({\be}^{u}\big)^{\ch}\big)>0, \end{equation} Using \eqref{eq:interestofFbar} and noting that $7\eps_n=n^{-1/7}$, we deduce \eqref{eq:percowithoutloops} for $2r'(>0)$ instead of $r$. It remains to prove \eqref{eq:percowithoutloops} for $r=0$. We have \begin{equation*} \Pm\big(0\stackrel{\tilde{\mathcal{V}}_n^{\intens}}{\longleftrightarrow}{\partial}B_n(1-n^{-1/7})\big)\geq \Pm\big(B_n(r)\stackrel{\tilde{\mathcal{V}}_n^{\intens}}{\longleftrightarrow}{\partial} B_n(1-n^{-1/7})\big)-\Pm\big(B_n(r)\cap \tilde{\mathcal{I}}_n^{\intens}\neq\varnothing\big). \end{equation*} It follows from \eqref{capball} and Proposition~\ref{prop:localdescription} that a.s.\ $B(r')\cap\be^u=\varnothing$ for $r'$ small enough. Therefore by Corollary~\ref{cor:couplingexcursions} we obtain \begin{equation*} \lim_{r\rightarrow0}\limsup_{n\rightarrow\infty}\Pm\big(B_n(r)\cap\tilde{\mathcal{I}}_n^{\intens}\neq\varnothing\big)=\lim_{r\rightarrow0}\Pm\big(B(r)\cap {\mathcal{I}}^{\intens}\neq\varnothing\big)=0, \end{equation*} and \eqref{eq:percowithoutloops} for $r=0$ then follows from \eqref{eq:percowithoutloops} and \eqref{eq:supercriticalproof} for $r=0$. Since it is harder to connect $B_n(r)$ to ${\partial}B_n(1-n^{-1/7})$ than to ${\partial}B_n(1-\eps)$ for $n$ large enough, we conclude that $u_*^d(r)=\pi/3.$ \end{proof} \begin{remark} \label{rk:polycloseboundary} In \eqref{eq:percowithloops}, we only obtain connection at distance $\eps>0$ from the boundary in the supercritical phase when $\kappa\in{(8/3,4]},$ and not at polynomial distance from the boundary as when $\kappa=8/3,$ see \eqref{eq:percowithoutloops}. In order to obtain connection at polynomial distance from the boundary when $\kappa\in{(8/3,4]},$ one would first need to extend Theorem~\ref{the:couplingwithloops} to obtain a coupling at polynomial distance from the boundary of the disk between discrete and continuous loops plus excursions, similarly as in Corollary~\ref{cor:couplingexcursions}, but that result would not be so interesting here. Indeed, we are mainly interested in two particular values of $\kappa$: first $\kappa=8/3,$ that is removing the loops, which is already covered in Theorem~\ref{the:couplingdiscontexc1}, and then $\kappa=4,$ due to its link with the GFF \eqref{eq:iso}. In our main dGFF result, Theorem~\ref{the:maindisgff}, we only use the isomorphism \eqref{eq:iso} to obtain the inequality $h_*^d(r)\leq \sqrt{\pi/2}.$ But this already implies $h_*^d(\boldsymbol{\eps},r)\leq \sqrt{\pi/2}$ for any sequences $\boldsymbol{\eps}$ decreasing to $0,$ see Remark~\ref{rk:connectiontoboundary}, \ref{rk:othercritexp}, and thus is already as strong as we want. In other words, improving Theorem~\ref{the:couplingwithloops} to obtain a coupling at polynomial distance from the boundary mainly leads to better percolation results in the supercritical phase of $\tilde{\V}^{u,\lambda}_n$ for $\lambda>0$, but we are mainly interested in the subcritical phase of $\tilde{\V}^{u,\lambda}_n$ for $\lambda=1/2$ due its link with the GFF. \end{remark} \section{Discrete Gaussian free field}\label{s:gfflvlperc} In this section we prove Theorem~\ref{the:maindisgff}. Since $\lambda(4)=1/2$, the bound $h_*^d(r)\leq\sqrt{\pi/2}$ is a simple consequence of Theorem~\ref{the:maindisexcloops} for $\kappa=4$ and the isomorphism theorem, Proposition~\ref{prop:BEisom}, and we thus focus on the proof of \eqref{eq:GFFconnectionBds}, that is we prove that one can find $h> 0$ so that with probability bounded away from zero for $n$ large, $B_n(r),$ $r\in{(0,1)},$ can be connected to the boundary of $\D_n$ in $E^{\ge h}_n.$ Using a simple consequence of the isomorphism~(cf.\ Proposition~\ref{prop:BEisom}), we also deduce Corollary~\ref{cor:VunPercBds}. We conclude this section by some remarks about percolation for the Gaussian free field on the cable system, see Remark~\ref{rk:cablegff}. The proof of \eqref{eq:GFFconnectionBds} is based on methods tracing back to \cite{BrLeMa-87} at least. These have been significantly extended in \cite{DiLi-18} and \cite{DiWiWu-20} by the use of martingale theory. In the latter, a weak version of \eqref{eq:GFFconnectionBds}, i.e., an analogue of \[ \liminf_{n\to \infty} \Pm \big(B_n(r) \stackrel{E_n^{\ge 0}}{\longleftrightarrow} \partial \widehat{\partial}\D_n \big)>0 \] is proven, where however $B_n(r)$ and $\partial \D_n$ are replaced by the left and right boundary of a discrete rectangle, respectively. The approach of \cite{DiWiWu-20} will also serve as a guideline for our proof. However, quite a number of technical adaptations are in order due to the difference of connecting a macroscopic subset of $\D_n$ to the boundary $\partial \D_n$ in $E_n^{\ge h}$ for some $h>0$ instead of connecting the left boundary of a rectangle to the right boundary in $E_n^{\ge 0}.$ Since for us it is more adequate and convenient to work in the setting of the unit disk as explained at the end of Section~\ref{sec:background} (see e.g.\ the proof Lemma~\ref{Lem:bigcaponboundary}), we provide a self-contained proof of \eqref{eq:GFFconnectionBds} in the rest of this section. For $r\in{(0,1)}$ as well as $n \in \N$ fixed, we will define for each $h\in{\R}$ the exploration process $(\widetilde{\mathcal E}_t^{\ge h}),$ $t \in [0,\infty),$ which will take values in the set of measurable subsets of $\widetilde \D_n.$ More precisely, let \begin{equation} \label{eq:E=K} \widetilde{\mathcal{E}}_0^{\ge h} := \widetilde B_n(r), \end{equation} and for $t >0$ define $\widetilde{\mathcal E}_t^{\ge h} \subset \widetilde \D_n$ to be the union of $\widetilde{\mathcal{E}}_0^{\ge h}$ with the set consisting of all points $y \in \widetilde \D_n$ for which there exists $t_\gamma \in [0,t]$ and a continuous path $\gamma:[0,t_{\gamma}]\rightarrow\widetilde \D_n$ parametrized by arc length (attributing entire cables $I_e$ length $1/2$ when $e$ is an edge of $\D_n,$ with linear interpolation in between, and infinite length when $e$ is an edge between $\D_n$ and $\partial\D_n$), with the following properties: \begin{enumerate} \item $\gamma(0) \in \widetilde{\mathcal{E}}_0^{\ge h};$ \item $\gamma(t_{\gamma}) = y;$ \item for all $x \in \{\gamma(s):\,s\in{(0,t_{\gamma})}\}\cap \D_n $ we have $\varphi_x \ge h.$ \end{enumerate} In other words, $(\widetilde{\mathcal{E}}_t^{\ge h})_{t\geq0}$ continuously explores $\widetilde{\D}_n$ starting from $\widetilde{\mathcal{E}}_0^{\ge h},$ stopping the exploration along a path whenever a vertex $x\in{\D_n}$ with $\varphi_x<h$ is reached. Note that in order to simplify notation, we made the dependence of $\widetilde{\mathcal{E}}_t^{\ge h}$ on $n$ implicit, and we will do so for all the notation introduced in this section. For any $K\subset\widetilde{\D}_n$ set \begin{equation} \label{eq:Mdef} M_{K} := \sum_{x\in{\widehat \partial K}}e_{K}^{(n)}(x)\varphi_x, \end{equation} where $\widehat{\partial}K$ is defined as in \eqref{eq:interior} for subsets $K$ of the cable system, and the cable system equilibrium measure has been introduced in Section~\ref{sec:cableSystemExc}. Writing $\mathcal M_t^{\ge h} := M_{{\mathcal E}_t^{\ge h}}$ we have that $(\mathcal M_t^{\ge h})_{t\geq0}$ is a martingale, the so-called exploration martingale; see \cite[Section~IV.6]{kups11574} for further details. In addition, since $\widetilde{\mathcal E}_t^{\ge h}$ is increasing in $t \in [0,\infty)$, the limit \begin{equation} \label{eq:calEInfty} \widetilde{\mathcal E}_\infty^{\ge h} := \bigcup_{t \in [0,\infty)} \widetilde{\mathcal E}_t^{ \ge h} \end{equation} is well-defined. When $\widetilde{\mathcal{E}}_{\infty}^{\ge h}$ is compact, that is when $\widetilde{\mathcal{E}}_{\infty}^{\ge h}$ does not contain an entire cable between $\D_n$ and $\partial\D_n$ (since these cables are half-open by definition of $\widetilde{\D}_n$ in Section~\ref{sec:notation}), we note that $\widetilde{\mathcal{E}}_t^{\ge h}=\widetilde{\mathcal{E}}_{\infty}^{\ge h}$ for $t$ large enough, and that $\widetilde{\mathcal{E}}_{\infty}^{\ge h}$ is a union of entire cables. Moreover, $\widetilde{\mathcal{E}}_{\infty}^{\ge h}$ is non-compact if and only if an edge between $\widehat{\partial}\D_n$ and $\partial\D_n$ is explored, which happens if and only if $B_n(r)$ is connected to $\widehat{\partial}\D_n$ in $E_n^{\geqslant h}$ by definition. Therefore, \[ \mathcal M_\infty^{\ge h} := \lim_{t \to \infty} \mathcal M_t^{\ge h} \] is well-defined when $B_n(r)$ is not connected to $\widehat \partial \D_n$ in $E_n^{\ge h},$ and is then equal to $M_{\widetilde{\mathcal{E}}_{\infty}^{\ge h}}$. We also write \begin{equation*} {\mathcal{E}}_t^{\ge h} := \widetilde{\mathcal{E}}_t^{\ge h} \cap \D_n\text{ for all }t\in{[0,\infty)\cup\{\infty\}}. \end{equation*} Since $\tilde{\mathcal{E}}_{\infty}^{\ge h}$ is a union of entire cables, including the endpoints, the equilibrium measure of $\tilde{\mathcal{E}}_{\infty}^{\ge h}$ and ${\mathcal{E}}_{\infty}^{\ge h}$ are equal when $\tilde{\mathcal{E}}_{\infty}^{\ge h}$ is compact, and so \begin{equation} \label{eq:equalityCaps} \M_{\infty}^{\ge h}=M_{\mathcal{E}_{\infty}^{\ge h}}\text{ on }\big\{B_n(r)\stackrel{E^{\ge h}_n}{\longleftrightarrow}\widehat \partial \D_n\big\}^\ch. \end{equation} We now introduce some further notation that will prove useful in the rest of this section. For $r\in{(0,1)}$ we consider $K$ such that \begin{equation} \label{eq:Lass} K\text{ is a connected subset of } \D_n \text{ with } B_n(r)\subset K \end{equation} and $F$ such that \begin{equation} \label{eq:LK} F\subset K \subset \D_n \text{ such that } F \supset \widehat \partial K\text{ and }\forall\,x\in{\widehat{\partial}K},\ \exists\ y\sim x\text{ with }y\in{K\setminus F}. \end{equation} Note that the sets $F$ and $K$ depend implicitly on $n.$ In addition, let \begin{equation} \label{eq:configDef} \mathcal B^{h}_{K,F} := \big \{\varphi_x < h \text{ for all } x \in F, \, \varphi_x \ge h\, \text{ for all } x \in K \setminus F\big \}. \end{equation} Intuitively, configurations of the form $\mathcal B^{h}_{K,F}$ will play the role of (discretized) final configurations of the exploration process in case the clusters of $E^{\ge h}_n$ intersecting $B_n(r)$ do not connect $B_n(r)$ to $\widehat\partial \D_n$ in $E^{\ge h}_n,$ see \eqref{proof0connect2} below. We now formulate some results which are modifications of findings from \cite{DiWiWu-20}, and which will prove useful in the following. For this purpose, recalling the definition of the interior boundary of a set from \eqref{eq:interior} as well as the equilibrium measure and capacity from \eqref{defequiandcap}, we let \begin{equation} \label{eq:supHarmMeas} {\rm Es}^{(n)}(K) := \frac{\sup_{y \in \widehat{\partial}(K\setminus \widehat{\partial}K)} e_{ K\setminus \widehat{\partial}{K}}^{(n)}(y)}{\mathrm{cap}^{(n)}(K)}. \end{equation} \begin{proposition} \label{prop:condNegative} There exists a function $\varepsilon : (0,\infty) \to (0,\infty)$ with $\varepsilon(t) \to 0$ as $t \searrow 0$ and a constant $\Cl[const]{repul}\in (0,1)$ such that for all $n \in \N,$ $r\in{(0,1)},$ $h\leq \Cr{repul}$ and all $K$ and $F$ as in \eqref{eq:Lass} and \eqref{eq:LK}, we have \begin{align} \label{eq:condNegative} \begin{split} \Pm\big(M_{K}\le - \Cr{repul} \mathrm{cap}^{(n)}(K)\, | \, \mathcal B^{h}_{K,F} \big) \ge 1-\varepsilon \big({\rm Es}^{(n)}(K)\big). \end{split} \end{align} \end{proposition} \begin{proof} We use \cite[Proposition~4.1]{DiWiWu-20} applied with $\lambda=1,$ $\tilde{K}=\widetilde{\D}_n,$ $U=\widehat{\partial}\D_n,$ $I=K,$ $I^-=F,$ $I^+=K\setminus F,$ boundary condition $f=-h.$ Note that the set of points in $K$ which can be reached by the random walk starting from $\widehat{\partial}\D_n$ is $\widehat{\partial}K,$ see \eqref{eq:interior}, and that the set of points in $K\setminus \widehat{\partial}K$ in which the random walk starting from $\widehat{\partial}\D_n$ can hit $K\setminus \widehat{\partial}K$ for the first time is given by $\widehat{\partial}(K\setminus \widehat{\partial}K),$ cf.\ \eqref{eq:configDef}. Then, writing \begin{equation*} Y:=\!\!\sum_{x\in{\widehat{\partial}}K}\sum_{y\in{\widehat{\partial}\D_n}}\!\!\Pm_y^{(n)}(X_{H_K}=x,H_K<\infty)(\varphi_x-h)\text{ and }{\rm Hm}^{(n)}(K):=\!\!\sum_{y\in{\widehat{\partial}\D_n}}\!\Pm_y^{(n)}(H_K<\infty), \end{equation*} there exists $\Cr{repul}\in{(0,1)}$ and a function $r$ converging to $0$ at $0$ such that uniformly in $h\in{[0,1)},$ $n \in \N$ and $K$ and $F$ as in \eqref{eq:Lass} and \eqref{eq:LK}, \begin{equation*} \Pm\big(Y\le - 8\Cr{repul} {\rm Hm}^{(n)}(K)\, | \, \mathcal B^{h}_{K,F} \big) \ge 1-r \big(\xi^{(n)}(K)\big), \end{equation*} where \begin{equation*} \xi^{(n)}(K):=\frac{1}{{\rm Hm}^{(n)}(K)}\sup_{x\in{\widehat{\partial}(K\setminus \widehat{\partial}K)}}\sum_{y\in{\widehat{\partial}\D_n}}\Pm_y^{(n)}\big(X_{H_{K\setminus \widehat{\partial}K}}=x,H_{K\setminus \widehat{\partial}K}<\infty\big). \end{equation*} We now observe that for each $x\in{\widehat{\partial}\D_n},$ we have that $e_{\D_n}^{(n)}(x)=|\{y\in{\partial \D_n }:\,y\sim x\}|\in{[1,4]}$ by \eqref{defequiandcap}, since $\{\tau_{\partial \D_n}<\widetilde{\tau}_{\widehat{\partial}\D_n}\}$ can only occur if $X$ jumps directly from $x$ to $\partial\D_n.$ Therefore, on the event $\mathcal{B}_{K,F}^h$ we have that \begin{equation*} Y\geq \sum_{x\in{\widehat{\partial}}K}\sum_{y\in{\widehat{\partial}\D_n}}e_{\D_n}^{(n)}(y)\Pm_y^{(n)}(X_{H_K}=x,H_K<\infty)(\varphi_x-h)=M_K-h\mathrm{cap}^{(n)}(K), \end{equation*} where we used the discrete sweeping identity, see for instance \cite[(1.59)]{MR2932978}, as well as \eqref{eq:Mdef} in the last equality. Using again the discrete sweeping identity, we can upper bound similarly $\mathrm{cap}^{(n)}(K)$ by $4{\rm Hm}^{(n)}(K)$ and $\xi^{(n)}(K)$ by $4{\rm Es}^{(n)}(K).$ Taking $h\leq \Cr{repul}$ and $\eps(t)=r(4t)$ for all $t>0,$ we can conclude. \end{proof} In order to apply Proposition~\ref{prop:condNegative}, we will use the following fact. \begin{proposition} \label{prop:supHM} For each $r\in{(0,1)}$ there exists a function $\eps':\N \to(0,\infty)$ with $\eps'(n)\rightarrow0$ for $n \to \infty$ such that for all $n \in \N$ and $K$ as in \eqref{eq:Lass} we have that \begin{equation} \label{eq:supHM} {\rm Es}^{(n)}(K) \leq \eps'(n). \end{equation} \end{proposition} The proof of Proposition~\ref{prop:supHM} is provided at the end of this section. We now explain how combining Proposition~\ref{prop:supHM} with Proposition~\ref{prop:condNegative} entails Theorem \ref{the:maindisgff}. For this purpose, we consider the quadratic variation process $\langle \mathcal M^{\ge h} \rangle_t$ of the continuous square integrable martingale $\mathcal M_t^{\ge h},$ cf.\ also \cite[(2.3)--(2.5), (4.6)]{DiWiWu-20}. Following \cite[Lemma~IV.6.1]{kups11574} one can easily prove that \begin{equation} \label{eq:quadVarUB} \langle \mathcal M^{\ge h} \rangle_t = \mathrm{cap}^{(n)}(\widetilde{\mathcal E}_t^{\ge h})-\mathrm{cap}^{(n)}(\widetilde{\mathcal E}_0^{\ge h}) \quad \text{for all $t \ge 0.$} \end{equation} Next, we have the following standard result on continuous martingales from \cite[Chapter~V, Theorem~1.7]{ReYo-99}. \begin{proposition} \label{prop:BMconvMart} Let $\mathcal L_t$ be a continuous square integrable martingale, $T_t:= \inf \{s \in [0, \infty)\, : \, \langle \mathcal L\rangle_s > t\}$ its parametrization by quadratic variation and $(W_t)_{t\geq0}$ be an independent Brownian motion starting in the origin. Then the process \begin{align*} B_t:= \left\{ \begin{array}{ll} \mathcal L_{T_t} - \mathcal L_0, \quad & t < \langle \mathcal L\rangle_\infty,\\ W_{t-\langle \mathcal L\rangle_{\infty}}+\mathcal L_\infty - \mathcal L_0, \quad & t \ge \langle \mathcal L\rangle_\infty, \end{array} \right. \end{align*} where $\langle \mathcal L\rangle_\infty:= \lim_{t\to \infty} \langle \mathcal L\rangle_t$, is a Brownian motion starting in the origin and stopped at time $\langle \mathcal L \rangle_\infty.$ \end{proposition} With the above results at hand, we are now ready to prove Theorem~\ref{the:maindisgff}. \begin{proof}[Proof of Theorem~\ref{the:maindisgff}] First note that the bound $h_*^d(r)\leq\sqrt{\pi/2}$ is a simple consequence of Theorem~\ref{the:maindisexcloops} for $\kappa=4$ and the isomorphism theorem, Proposition~\ref{prop:BEisom}. We will now prove the first inequality in \eqref{eq:GFFconnectionBds}, which will also imply the first inequality in \eqref{eq:h*ineq}. On the event that $B_n(r)$ is not connected to $\widehat{\partial}\D_n$ in $E_n^{\geq h},$ taking $K={\mathcal{E}}_{\infty}^{\geq h}$ and $F=\{x\in{K}:\,\varphi_x< h\},$ it is clear by definition of the exploration that \eqref{eq:Lass} and \eqref{eq:LK} are satisfied, and that the event $\mathcal{B}_{K,F}^h$ occurs. Taking advantage of \eqref{eq:equalityCaps} and above, we obtain that for $h\leq \Cr{repul}$ we have \begin{equation}\begin{split} \label{proof0connect2} &\Pm\Big(\mathcal M_t^{\ge h} > - \Cr{repul} \mathrm{cap}^{(n)}(\widetilde{\mathcal E}_t^{\ge h})\text{ for all }t\geq0, \, \{B_n(r) \stackrel{E^{\ge h}_n}{\longleftrightarrow} \widehat \partial \D_n \}^\ch\Big)\\& \leq\Pm\Big(\mathcal M_\infty^{\ge h} > - \Cr{repul} \mathrm{cap}^{(n)}({\mathcal E}_\infty^{\ge h}), \, \{B_n(r) \stackrel{E^{\ge h}_n}{\longleftrightarrow} \widehat \partial \D_n \}^\ch\Big) \\&\leq \sum_{K,F}\Pm\Big(M_K > - \Cr{repul} \mathrm{cap}^{(n)}(K), \, \mathcal{B}_{K,F}^h \Big) \\ &\leq \sum_{K,F} \eps\big({\rm Es}^{(n)}(K)\big) \Pm( \mathcal{B}_{K,F}^h), \end{split} \end{equation} where the sum is over all $K,\, F$ as in \eqref{eq:Lass} and \eqref{eq:LK}, and we used Proposition~\ref{prop:condNegative} to obtain the last inequality. Regarding the right-hand side of the previous display, it follows from Proposition~\ref{prop:supHM} -- in particular the uniformity of the bound \eqref{eq:supHM} for $K$ as in \eqref{eq:Lass} -- and the fact that $\varepsilon'(r) \to 0$ as $r \searrow 0$, that \begin{equation} \label{eq:EsConv0} \limsup_{n\rightarrow \infty} \sum_{K,F} \eps\big({\rm Es}^{(n)}( K)\big) \Pm( \mathcal{B}_{K,F}^h)=0, \end{equation} since the events $\mathcal{B}_{K,F}^h,$ for $K,\, F$ as in \eqref{eq:Lass} and \eqref{eq:LK}, are pairwise disjoint. Displays \eqref{proof0connect2} and \eqref{eq:EsConv0} immediately entail that there exists $h>0$ such that \begin{align} \label{eq:probaEgoesto0} \lim_{n\to \infty}\Pm\Big(\mathcal M_t^{\ge h} > - \Cr{repul} \mathrm{cap}_A^{(n)}(\widetilde{\mathcal E}_t^{\ge h})\text{ for all }t\geq0, \, \{B_n(r) \stackrel{E^{\ge h}_n}{\longleftrightarrow} \widehat \partial \D_n \}^\ch \Big) =0. \end{align} Next, for $a,b>0$ let us denote by \[ p(a,b) := \Pm\big (W_t > -at - b \, \forall t \ge 0\big) \] the probability that the standard Brownian motion $(W_t)_{t\geq0}$ stays above the line $-at-b$ for all $t\geq0.$ From Excercise $2.16$ in~\cite{PeresBM} we know that \begin{equation} \label{eq:persProbPos} p(a,b)=1-e^{-2 a b}>0. \end{equation} In order to derive a lower bound on $\Pm(B_n(r) \stackrel{E^{\ge h}_n}{\longleftrightarrow} \widehat \partial \D_n ),$ we now construct a lower bound for the unrestricted probability $\Pm\big(\mathcal M_\infty^{\ge h} > - \Cr{repul} \mathrm{cap}^{(n)}({\mathcal E}_\infty^{\ge h}) \big).$ For this purpose, denote by $(B_t^{\ge h})$ the Brownian motion defined as the time change of the continuous martingale $\M^{\ge h}_t$ through Proposition~\ref{prop:BMconvMart}, with $\M^{\ge h}_t$ playing the role of $\mathcal L_t$. Using \eqref{eq:quadVarUB} and the continuity of the quantities involved we infer that \begin{align} \label{proof0connect1} \begin{split} &\Pm\Big(\mathcal M_t^{\ge h} > - \Cr{repul} \mathrm{cap}^{(n)}(\widetilde{\mathcal E}_t^{\ge h})\text{ for all }t\geq0 \Big)\\ &\quad = \Pm\Big(\mathcal M_{T_t}^{\ge h} >-\Cr{repul}\mathrm{cap}^{(n)}(\widetilde{\mathcal{E}}_{T_t}^{\ge h}) \text{ for all }t\geq0\text{ such that }\,T_t<\infty\Big)\\ & \quad \ge \Pm\Big(B_t^{\ge h}>-\Cr{repul}\big(t+\mathrm{cap}^{(n)}({\mathcal E}_0^{\ge h})\big)-\M_0^{\ge h}\text{ for all }t\geq0\Big). \end{split} \end{align} Proceeding similarly as in \cite[(IV.6.3)]{kups11574}, one can easily show that $(B_t^{\ge h})_{t\geq0}$ is independent of $\M_0^{\ge h},$ and since $\Pm(\M_0^{\ge h}\geq0)=\frac12$, we can lower bound the right-hand side of \eqref{proof0connect1} by \[ \frac12 \Pm\Big(B_t^{\ge h}>-\Cr{repul}\big(t+\mathrm{cap}^{(n)}({\mathcal E}_0^{\ge h})\big)\text{ for all }t\geq0\Big). \] Furthermore, since $r\in{(0,1)},$ one can easily deduce from Lemma~\ref{l.capconv} that there exists a constant $\Cl[const]{ccapE0}=\Cr{ccapE0}(r)$ such that \begin{equation*} \mathrm{cap}^{(n)}(\widetilde{\mathcal{E}}_0^{\ge h}) \ge \mathrm{cap}^{(n)}(B_n(r)) \geq \Cr{ccapE0} \quad \text{ for all }n>0. \end{equation*} Therefore, we have for all $n>0$ that \begin{align} \label{proof0connect4} \begin{split} &\frac12p(\Cr{repul}, \Cr{repul}\Cr{ccapE0}) \leq\Pm\Big(B_t^{\ge h}>-\Cr{repul} \big(t+\mathrm{cap}_A^{(n)}(\widetilde{\mathcal E}_0^{\ge h} )\big)-\M_0^{\ge h}\text{ for all }t\geq0\Big). \end{split} \end{align} Displays \eqref{proof0connect1} and \eqref{proof0connect4} then supply us with \begin{equation} \label{eq:proof0connect5} \frac12p(\Cr{repul},\Cr{repul}\Cr{ccapE0}) \le \Pm\Big(\mathcal M_t^{\ge h} > - \Cr{repul} \mathrm{cap}_A^{(n)}(\widetilde{\mathcal E}_t^{\ge h})\text{ for all }t\geq0 \Big). \end{equation} Combining \eqref{eq:probaEgoesto0}, \eqref{eq:persProbPos} and \eqref{eq:proof0connect5} we finally infer that \begin{equation*} \liminf_{n\rightarrow0}\Pm\big(B_n(r) \stackrel{E^{\ge h}_n}{\longleftrightarrow} \widehat \partial \D_n \big )\geq \frac12p(\Cr{repul},\Cr{repul}\Cr{ccapE0}) > 0, \end{equation*} which finishes the proof of the first inequality in \eqref{eq:GFFconnectionBds}. The second inequality in \eqref{eq:GFFconnectionBds} is trivial, so we proceed with proving the third inequality. If there is a single excursion of the random walk excursion process at level $\intens$ which encloses the set $B_n(r)$, then by duality there can be no nearest-neighbor path -- in fact, not even a $*$-connected one -- from $B_n(r)$ to $\widehat{\partial} \D_n$ in $\mathcal{\V}_n^{\intens},$ and so there is also no such path in $E^{\ge \sqrt{2\intens}}_n$ by Proposition~\ref{prop:BEisom}. Now for any $\intens > 0$, one can easily deduce from the coupling in Theorem~\ref{the:couplingdiscontexc1} that this random walk excursion event has a probability bounded away from $0$, uniformly in $n \in \N.$ As a consequence, the inequality follows. \end{proof} \begin{remark} \label{rk:exactcritparaGFF} Let us denote by $\mathbb{A}_{h}^{(n)}$ the union of all the clusters of $\widetilde{E}^{\ge h}_n$ hitting $\partial\D_n.$ One can use the full isomorphism between the GFF and the discrete excursion process, \cite[Proposition~2.4]{ArLuSe-20a}, to show that $\tilde{\V}_n^{u,1/2}$ (see \eqref{eq:BuuVuuDef} for definition) has the same law as the complement of $\mathbb{A}_{-\sqrt{2u}}^{(n)}.$ Therefore, we deduce from Theorem~\ref{the:maindisexcloops} with $\kappa=4$ that the probability that $B_n(r)$ is connected to $\partial B_n(1-\eps)$ in the complement of $\mathbb{A}_{-h}^{(n)}$ goes to $0$ as $n\rightarrow\infty$ and $\eps\rightarrow0$ if and only if $h\geq\sqrt{\pi/2}.$ See also Corollary~\ref{cor:percocontGFF} for the respective result in the continuous setting. Since $E^{\ge h}_n$ is included in the complement of $\mathbb{A}_{-h}^{(n)},$ the inequality $h_*^d(r)\leq\sqrt{\pi/2}$ can be seen as a simple consequence of this result, and we conjecture that this inequality is strict. The percolation problem of $E^{\ge h}_n$ is more classical in higher dimension than the one of the complement of $\mathbb{A}_{-h}^{(n)},$ and we have thus chosen to focus on it in this article. \end{remark} Let us now turn to the proof of Proposition~\ref{prop:supHM}. We are first going to need the following estimate on the capacity of sets close to $A_{n}.$ \begin{lemma} \label{Lem:bigcaponboundary} For each $r\in{(0,1)},$ there exists a constant $c>0$ such that for all $n\in\N$ and $K$ as in \eqref{eq:Lass} with $d(K,\widehat{\partial}\D_n)\leq n^{-\frac1{2}}$ we have \begin{equation*} \mathrm{cap}^{(n)}(K)\geq {c\log(n)}. \end{equation*} \end{lemma} \begin{proof} For some $s_0 > 0$ let us define $\pi_n:[0,s_0]\rightarrow\widetilde{\D}_n$ a continuous and injective path starting in $B_n(r),$ ending at a vertex at distance at most $n^{-1/2}$ from $\widehat{\partial}\D_n,$ and such that $\pi_n(s)$ is on a cable between two vertices in $K$ for all $s\in{[0,s_0]}.$ Define also the map $p_n:[|\pi_n(0)|,|\pi_n(s_0)|]\rightarrow\widetilde{\D}_n$ via $s\mapsto\pi_n(u_s),$ where $u_s=\inf\{r\geq0:\,|\pi_n(r)|\geq s\}.$ Note that $p_n$ is measurable since, for each segment $I$ included in one of the cables crossed by $\pi_n,$ the set $p_n^{-1}(I)$ is a finite union of intervals. Since $r\in{(0,1)},$ we can moreover assume that $|\pi_n(0)|\geq c$ for some constant $c>0$ only depending on $r.$ Let us define the measure \begin{equation*} \mu:\mathcal{B}(\D)\ni A\mapsto\nu\big(p_n^{-1}(A)\big),\text{ where }\nu(\mathrm{d}t):=\frac{\mathrm{d}t}{1-t}, \end{equation*} which is supported on $\pi_n([0,s_0]).$ Moreover, by \eqref{eq:Greenat0} we have \begin{equation*} \int_{\D} G(0,x)\, \mathrm{d}\mu(x)\leq \frac{1}{2\pi}\int_{c}^{1}\frac{\log(1/t)}{1-t}\, \mathrm{d}t \, (<\infty). \end{equation*} Therefore, by \cite[Chapter~2, Proposition~4.2]{sznitman1998brownian} there exists a constant $c'>0$ such that \begin{equation*} \mathrm{cap}(\pi_n([0,s_0]))\geq c'\mu(\pi_n([0,s_0]))\geq \frac{c'}{2\pi}\int_{c}^{1-n^{-1/2}}\frac{1}{1-t}\, \mathrm{d}t\geq c''\log(n). \end{equation*} It moreover follows from Proposition~\ref{t.gencapconv} that \begin{equation*} \mathrm{cap}^{(n)}(K)\geq \mathrm{cap}^{(n)}(\pi_n([0,s_0])\cap \D_n)\geq c\cdot\mathrm{cap}(\pi_n([0,s_0])), \end{equation*} and we can conclude. \end{proof} \begin{proof}[Proof of Proposition~\ref{prop:supHM}] Using the Beurling estimate Lemma~\ref{lem:beurling} and recalling $e_{K\setminus \widehat{\partial}K}^{(n)}$ as well as ${\rm cap}^{(n)}(K)$ from \eqref{defequiandcap}, for all $x\in{\widehat{\partial} (K\setminus \widehat{\partial}K)}$ we have the upper bound \begin{equation} \label{eq:hmUB} e_{K\setminus \widehat{\partial}K}^{(n)}(x) =\sum_{y\sim x}\Pm_y^{(n)}(\tau_{K\setminus \widehat{\partial}K}> \tau_{\partial \D_n})\leq C\left(\frac{1}{nd(x,\widehat{\partial}\D_n)}\right)^{\frac12}. \end{equation} Since $B_n(r)\subset K$ and $r\in{(0,1)},$ by Lemma~\ref{l.capconv} there exists a constant $c>0,$ depending on $r$ only, such that $\mathrm{cap}^{(n)}(K)\geq c.$ Moreover $d(\widehat{\partial}(K\setminus \widehat{\partial}K),\widehat{\partial}\D_n)\geq d(K,\widehat{\partial}\D_n),$ and so in view of \eqref{eq:hmUB}, \begin{equation*} {\rm Es}^{(n)}(K)\leq Cn^{-\frac14} \quad \text{ if }d(K,\widehat{\partial}\D_n)\geq n^{-\frac12}. \end{equation*} On the other hand, if $d(K,\widehat{\partial}\D_n)\leq n^{-\frac12}$ then we can easily conclude since in view of Lemma~\ref{Lem:bigcaponboundary} the denominator in the definition \eqref{eq:supHarmMeas} of ${\rm Es}^{(n)}(K)$ is larger than $c\log(n)$ while the numerator is bounded from above by $4.$ \end{proof} We conclude with the following. \begin{proof}[Proof of Corollary~\ref{cor:VunPercBds}] When $r\in{(0,1)},$ the first inequality follows from \eqref{eq:GFFconnectionBds} and the isomorphism theorem, Proposition~\ref{prop:BEisom}. When $r=0,$ similarly as in Remark~\ref{rk:othercontperco},\ref{finiteenergy}, one can prove that there is a type of finite energy property for the percolation of $\V_n^{\intens},$ that is for each $r\in{(0,1)},$ $\Pm\big(0\stackrel{\mathcal{V}_{n}^{\intens}}{\longleftrightarrow} \widehat{\partial}\D_n\big)$ is larger than $c\Pm\big(B_n(r) \stackrel{\mathcal{V}_{n}^{\intens}}{\longleftrightarrow} \widehat{\partial}\D_n\big)$ for a constant $c=c(r,u)$ not depending on $n$ by Proposition~\ref{prop:interoncompacts} and Lemma~\ref{l.capconv}. The second inequality is trivial. The last inequality of \eqref{eq:ineqsVacantSetPerc} follows from the fact that with probability uniformly bounded away from $0,$ for all $n$ large enough there exists a single excursion enclosing $B_n(r)$ and hence separating it from $\widehat{\partial} \D_n,$ which follows from the coupling with continuous excursions Theorem~\ref{the:couplingdiscontexc1}. \end{proof} \begin{remark} \label{rk:h*dependonr} Note that the finite energy property used in the proof of Corollary~\ref{cor:VunPercBds} implies that $\intens_*^d(r)$ does not depend on $r\in{[0,1)}$ -- for $h_*^d(r)$ this does not seem to be clear. \end{remark} We finish this section by some remarks on the percolation for the level sets of the GFF on the cable system. \begin{remark} \phantomsection\label{rk:cablegff} \begin{enumerate}[label=\arabic*)] \item \label{rk:tilh*>0}In \cite[Corollary~5.1]{ArLuSe-20a}, the inequality $\tilde{h}_*^d(r)\geq0$ for $r\in{(0,1)}$ is proved using the explicit formulas on the effective resistance between $0$ and $\widetilde{E}_n^{\geq h}$ for $h<0$ from \cite[Corollary~14]{MR3827222}. In fact, they prove the following stronger statement: for all $h<0$ and $r\in{(0,1)}$ \begin{equation} \label{eq:connectiontboundarycable} 0 < \liminf_{n \to \infty} \Pm \Big (B_n(r) \stackrel{\widetilde{E}^{\ge h}_n}{\longleftrightarrow} \widehat{\partial}\D_n \Big) \le \limsup_{n \to \infty} \Pm \Big (B_n(r) \stackrel{\widetilde{E}^{\ge h}_n}{\longleftrightarrow}\widehat{\partial} \D_n \Big) < 1, \end{equation} which corresponds to the statement \eqref{eq:GFFconnectionBds} for the cable system at negative levels. Here $\longleftrightarrow$ denotes connection on the cable system $\widetilde{\D}_n,$ and not discrete connection in $\D_n.$ Let us present another short proof of the first inequality in \eqref{eq:connectiontboundarycable}: it follows from Proposition~\ref{prop:interoncompacts}, \eqref{capball} and Lemma~\ref{l.capconv} that for each $u>0$ and $r\in{(0,1)},$ the limit as $n\rightarrow\infty$ of the probability that the Brownian excursion set $\be_n^{u}$ intersect $B_n(r)$ is positive, which implies the first inequality in \eqref{eq:connectiontboundarycable} by the isomorphism \eqref{eq:iso}. \item \label{rk:r=0cablegff} When $r=0,$ one can easily prove that \eqref{eq:connectiontboundarycable} does not hold, contrary to what is stated in \cite[Corollary~5.1]{ArLuSe-20a}. Indeed, by either \cite[Theorem~3.7]{DrePreRod3} or \cite[Corollary~1]{MR3827222} we have for all $h>0$ \begin{equation*} \Pm\Big(0\stackrel{\widetilde{E}^{\ge -h}_n}{\longleftrightarrow}\partial\D_n\Big)=\Pm\left(\varphi_0^{(n)}\in{(-h,h)}\right)\tend{n}{\infty}0 \end{equation*} since $G^{(n)}(0,0),$ the variance of $\varphi_0^{(n)},$ diverges as $n\rightarrow\infty.$ Moreover, if $0\leftrightarrow\widehat{\partial}\D_n$ in $\widetilde{E}_n^{\geq -h},$ then $0\leftrightarrow{\partial}\D_n$ in $\widetilde{E}_n^{\geq -h}$ with positive probability, since the probability to cross the last edge between $\widehat{\partial}\D_n$ and $\partial\D_n$ is positive conditionally on $\varphi^{(n)}_{|\D_n}.$ In fact, using the asymptotic $G^{(n)}(0,0)\geq c\log(n)$ as $n\rightarrow\infty,$ one can show that the probability that $0\leftrightarrow\widehat{\partial}\D_n$ in $\widetilde{E}_n^{\geq -(\log(n))^a}$ goes to $0$ as $n\rightarrow\infty$ for all $a<1/2.$ This indicates that, for the level sets of the GFF, it is way harder to connect $0$ to the boundary of $\D_n,$ than $B_n(r)$ to the boundary of $\D_n$ for $r\in{(0,1)}.$ Therefore, it would not be surprising that $h_*^d(0)\leq0$ for the dGFF, hence the restriction to $r>0$ in Theorem~\ref{the:maindisgff}, contrary to the case of the excursion vacant set in Theorem~\ref{the:maindisexc}. In \cite{2dGFFperc} it is actually argued that $h_*^d(0)=0.$ \end{enumerate} \end{remark} \appendix \section{Appendix: Continuum critical values} \label{sec:contperco} \renewcommand*{\thetheorem}{A.\arabic{theorem}} \renewcommand{\theequation}{A.\arabic{equation}} In this appendix, we prove Theorem~\ref{t.mainthm}. Recall the definitions of $\V^{\intens,\lambda}$ from below \eqref{eq:defcontloops}, of $\lambda(\kappa)$ from \eqref{eq:defckappa}, and of $B(r)\stackrel{\CV^{\intens,\lambda(\kappa)}}{\longleftrightarrow}\partial\D$ from above \eqref{def:u*c}. Given the link to the SLE process the idea for the computation of the critical values is very simple: consider independent excursion clouds starting and ending on the upper and lower parts of the unit circle $\mathbb{T}_{\pm}=\{z\in{\partial\D}:\pm\Im(z)\geq0\}.$ By Lemma~\ref{l.brownianexcursionandlooprestriction} and conformal invariance, the boundaries of the excursion clouds plus Brownian loop clusters $\partial_{\mathbb{T}_\pm}\mathcal{I}^{u,\lambda(\kappa)}_{\mathbb{T}_\pm,\mathbb{T}_\pm,\D}$ are SLE$_{\kappa}(\rho)$ paths in $\mathbb{D}$ connecting $-1$ with $1$. The $\rho(\intens)$ relation is such that the SLE paths intersect the boundary away from $-1$ and $1$ if and only if $\intens < (8-\kappa)\pi/16$ (Lemma~\ref{lem:SLE-ka-r}), and the excursions plus loops do not separate $0$ from $\partial \mathbb{D}$ with positive probability. We refer to Figure \ref{fig:superandsubcrit} for a simulation when there is no loop soup, that is when $\kappa=8/3.$ \begin{figure}[ht!] \setlength\arrayrulewidth{0.8pt} \noindent\begin{tabular}{|c|c|} \hline\rule{0pt}{42.4ex}\hspace{-1.4mm} \includegraphics[width=6.53cm]{SLE_and_BE.eps} & \includegraphics[width=6.53cm]{SLE_and_BE2.eps} \\ \hline \end{tabular} \caption{A simulation of the Brownian excursion set $\mathcal{I}^u_{\mathbb{T}_-,\mathbb{T}_-,\D}$ in black, and the corresponding $\text{SLE}_{8/3}(\rho_{8/3}(u/\pi))$ curve $\partial_{\mathbb{T}_-}\mathcal{I}^u_{\mathbb{T}_-,\mathbb{T}_-,\D}$ in red. On the left the supercritical case where $u<\pi/3,$ and on the right the subcritical case where $u\geq\pi/3.$} \label{fig:superandsubcrit} \end{figure} This reasoning suggests that the critical parameter associated to $\mathcal{V}^{u,\lambda(\kappa)}$ is equal to $(8-\kappa)\pi/16,$ as noted in \cite[Section~5]{werner-qian}. But some points need to be dealt with in order to make this rigorous. For instance, if $u<(8-\kappa)\pi/16,$ then even if $0$ is not disconnected from $\partial\D$ by the SLE paths $\partial_{\mathbb{T}_-}\mathcal{I}^{u,\lambda(\kappa)}_{\mathbb{T}_-,\mathbb{T}_-,\D}$ and $\partial_{\mathbb{T}_+}\mathcal{I}^{u,\lambda(\kappa)}_{\mathbb{T}_+,\mathbb{T}_+,\D},$ $0$ could still be disconnected from $\partial\D$ by $\mathcal{I}^{u,\lambda(\kappa)},$ via trajectories starting in $\mathbb{T}_+$ and ending in $\mathbb{T}_-.$ Similarly, if $u\geq (8-\kappa)\pi/16,$ even if $\partial_{\mathbb{T}_-}\mathcal{I}^{u,\lambda(\kappa)}_{\mathbb{T}_-,\mathbb{T}_-,\D}$ does not hit $\partial\D,$ $0$ could be connected to $\mathbb{T}_-$ in $\mathcal{V}^{u,\lambda(\kappa)}$ if the curve $\partial_{\mathbb{T}_-}\mathcal{I}^{u,\lambda(\kappa)}_{\mathbb{T}_-,\mathbb{T}_-,\D}$ passes ``above" $0.$ \begin{lemma}\label{lem:1} Suppose $\kappa\in{[8/3,4]}$ and $0<\intens<(8-\kappa)\pi/16,$ then $\Pm\big(0\stackrel{\CV^{\intens,\lambda(\kappa)}}{\longleftrightarrow}\partial\D\big)>0.$ \end{lemma} \begin{proof} Let $A\subset\D$ be an arbitrary choice of deterministic line segment connecting $0$ with the interior of $\mathbb{T}_-$ (seen as a subset of $\partial\D$) inside $\D$ and let $A_{\lambda(\kappa)}$ be the closure of the set of clusters of loops at intensity $\lambda(\kappa)$ intersecting $A.$ By Lemmas~\ref{l.brownianexcursionandlooprestriction} and \ref{lem:SLE-ka-r}, $\eta : = \partial_{\mathbb{T}_+}\be^{\intens,\lambda(\kappa)}_{\mathbb{T}_+,\mathbb{T}_+,\D}$ is an SLE$_\kappa(\rho)$ curve in $\D$ from $-1$ to $1$ which intersects $\mathbb{T}_+$ away from the end points a.s.\ and does not intersect $\mathbb{T}_-$. Therefore, on the event \begin{equation} \label{eq:defE1} E_1:=\big\{\be^{\intens,\lambda(\kappa)}_{\mathbb{T}_+,\mathbb{T}_+,\D}\cap A=\varnothing\big\}=\big\{\be^{\intens}_{\mathbb{T}_+,\mathbb{T}_+,\D}\cap A_{\lambda(\kappa)}=\varnothing\big\}, \end{equation} the path $\eta$ does not separate $0$ from $\mathbb{T}_-$ in $\D$ and so on $E_1$ there exists a (random) closed path $\gamma$ in $(\be^{\intens,\lambda(\kappa)}_{\mathbb{T}_+,\mathbb{T}_+,\D})^\ch$ from $0$ to $\mathring{\mathbb{T}}_+.$ We now claim that $\be^{\intens,\lambda(\kappa)}\setminus \be^{\intens,\lambda(\kappa)}_{\mathbb{T}_+,\mathbb{T}_+,\D}$ avoids $\gamma$ with positive probability. Let $\Gamma$ be a (random) closed arc in $\partial\D$ not intersecting $\gamma$ and containing $\mathbb{T}_-$ in its interior. We will resample $\be^{\intens}_{\mathbb{T}_+,\mathbb{T}_+,\D}$ in order to be able to consider conditionally independent ``good'' events on which $\be^{\intens,\lambda(\kappa)}\setminus \be^{\intens,\lambda(\kappa)}_{\mathbb{T}_+,\mathbb{T}_+,\D}$ avoids $\gamma$. The probabilities of the good events are strictly positive using the restriction formula \eqref{eq:onesidedrestriction}. Now for the details. Let $\widehat{\be}^{\intens}_{\mathbb{T}_+,\mathbb{T}_+,\D}$ be an independent random set with the same law as ${\be}^{\intens}_{\mathbb{T}_+,\mathbb{T}_+,\D},$ and let $\widehat{\be}^{\intens}$ be the union of $\widehat{\be}^{\intens}_{\mathbb{T}_+,\mathbb{T}_+,\D}$ and the set of points intersected by an excursion in the support of $\omega_u$ which does not start and end in $\mathbb{T}_+$. Then $\widehat{\be}^{\intens}$ has the same law as $\be^{\intens}.$ For any $\Gamma'\subset\partial\D,$ we also define $\widehat{\be}_{\Gamma',\Gamma',\D}^{\intens},$ $\widehat{\be}^{\intens,\lambda(\kappa)}$ and $\widehat{\be}_{\Gamma',\Gamma',\D}^{\intens,\lambda(\kappa)}$ analogously to ${\be}_{\Gamma',\Gamma',\D}^{\intens},$ $\be^{\intens,\lambda(\kappa)}$ and $\be^{\intens,\lambda(\kappa)}_{\Gamma',\Gamma',\D},$ but for the excursions associated with $\widehat{\be}^{\intens}$ instead of ${\be}^{\intens}.$ On the event that $\gamma$ exists, let $\gamma_{\lambda(\kappa)}$ be the closure of the set of clusters of loops at intensity $\lambda(\kappa)$ hitting $\gamma,$ and define \begin{equation} \label{eq:defE2} E_2:=\big\{\widehat{\be}^{\intens,\lambda(\kappa)}_{\Gamma,\Gamma,\D}\cap \gamma=\varnothing\big\}=\big\{\widehat{\be}^{\intens}_{\Gamma,\Gamma,\D}\cap \gamma_{\lambda(\kappa)}=\varnothing\big\}. \end{equation} Let $E_3$ be the event that $\widehat{\be}^{\intens}$ contains no excursions starting on $\mathbb{T}_-$ and ending on $\Gamma^\ch,$ or starting on $\Gamma^\ch$ and ending on $\mathbb{T}_-.$ Each loop cluster intersecting $\be^u$ but not $\be^u_{\mathbb{T}_+,\mathbb{T}_+,\D}$ intersects an excursion of $\widehat{\be}^u$ which does not start and end on $\mathbb{T}_+,$ and thus also intersects $\widehat{\be}^{u}_{\Gamma,\Gamma,\D}$ on the event $E_3.$ Therefore, the path $\gamma$ does not intersect $\be^{\intens,\lambda(\kappa)}\setminus \be^{\intens,\lambda(\kappa)}_{\mathbb{T}_+,\mathbb{T}_+,\D}$ on the event $E_2\cap E_3,$ and so $\gamma$ is included in $\V^{\intens,\lambda(\kappa)}$ on the event $E_1\cap E_2\cap E_3.$ We refer to Figure~\ref{F:Lemma4.1} for details. \begin{figure}[ht!] \centering \includegraphics[scale=0.76]{Lemma4-1.eps} \caption{Some selected Brownian excursions, in black for $\be^{\intens}_{\mathbb{T}_+,\mathbb{T}_+,\D},$ in green for $\widehat{\be}^{\intens}_{\Gamma,\Gamma,\D},$ and in orange for the excursions of $\widehat{\be}^{\intens}$ starting on $\mathbb{T}_-$ and ending on $\Gamma^\ch,$ or starting on $\Gamma^\ch$ and ending on $\mathbb{T}_-.$ In blue the path $\gamma$ and in red the clusters of the loop soup at level $\lambda(\kappa)$ hitting $\gamma.$ On the left the event $E_2$ does not occur since there is a green trajectory hitting a red cluster, and the event $E_3$ also does not occur since there is an orange trajectory. On the right the events $E_2$ and $E_3$ occur, and thus $\gamma$ is a path from $0$ to $\partial\D$ in $\V^{\intens,\lambda(\kappa)}.$} \label{F:Lemma4.1} \end{figure} Note that it follows from the local finiteness of loop clusters, see \cite[Lemma~9.7 and Proposition~11.1]{sheffield-werner}, that both $\overline{A_{\lambda(\kappa)}}\cap\partial\D\subset\mathring{\mathbb{T}}_-$ and $\overline{\gamma_{\lambda(\kappa)}}\cap\partial\D\subset\mathring{\Gamma^\ch}.$ Therefore, conditionally on the loop soup, by Lemma~\ref{lem:excarerestriction}, the probabilities of $E_1,$ and of $E_2$ conditionally on $\be^{\intens}_{\mathbb{T}_+,\mathbb{T}_+,\D},$ can both be computed using \eqref{eq:onesidedrestriction} (with a random set to avoid), and in particular we see that these probabilities are strictly positive. Moreover, the probability of $E_3$ conditionally on the loop soup and $\be^{\intens}_{\mathbb{T}_+,\mathbb{T}_+,\D}$ is positive since the restriction of the excursion measure $\mu$ to excursions starting on a closed arc and ending on a disjoint closed arc is finite, see below (5.10) in \cite{lawler2005conformally}. Since the events $E_2$ and $E_3$ are independent conditionally on $\be^{\intens}_{\mathbb{T}_+,\mathbb{T}_+,\D}$ and the loop soup, we are done. \end{proof} We now turn to the subcritical case. As we now explain, when $u\geq(8-\kappa)\pi/16$ it is easy to construct from Lemmas~\ref{l.brownianexcursionandlooprestriction} and \ref{lem:SLE-ka-r} a curve in $\be^{\intens,\lambda(\kappa)}$ which surrounds $B(r).$ \begin{lemma}\label{lem:2} Suppose $\kappa\in{[8/3,4]}$ and $\intens\geq(8-\kappa)\pi/16.$ Then $\Pm\big(B(r)\stackrel{\CV^{\intens,\lambda(\kappa)}}{\longleftrightarrow}\partial\D\big)=0$ for all $r\in{[0,1)}.$ \end{lemma} \begin{proof} For $k\in{\{0,1\}},$ let $\Gamma_k=\{e^{i\theta}:\,\theta\in{[-\pi/4,\, 5\pi/4]+k\pi}\}$ and $D_k=\{tx, \, t\in{(r,1)}, \, x\in{\Gamma_i}\}.$ By Lemmas~\ref{l.brownianexcursionandlooprestriction} and \ref{lem:SLE-ka-r}, the chord $\partial_{\Gamma_k}\be_{\Gamma_k,\Gamma_k,D_k}^{\intens,\lambda(\kappa)}$ almost surely does not intersect $\Gamma_k$ except at the start and end points, and it does not intersect $B(r)$ by definition of $D_k.$ Therefore, it therefore separates $B(r)$ from $\mathring{\Gamma_k},$ and since $\mathring{\Gamma_0}\cup\mathring{\Gamma_1}=\partial\D,$ we are done. \end{proof} Note that while our proof of Lemma~\ref{lem:2} exploits the restriction property from Lemma~\ref{lem:resexc}, the proof of Lemma~\ref{lem:1} does not use it. \begin{proof}[Proof of Theorem~\ref{t.mainthm}] The proof follows immediately from Lemma~\ref{lem:1} and Lemma~\ref{lem:2} noting that $\Pm\big(B(r)\stackrel{\CV^{\intens,\lambda(\kappa)}}{\longleftrightarrow}\partial\D\big)\geq\Pm\big(0\stackrel{\CV^{\intens,\lambda(\kappa)}}{\longleftrightarrow}\partial\D\big).$ \end{proof} \section{Appendix: Coupling of random walk and Brownian motion} \label{sec:KMT} \renewcommand*{\thetheorem}{B.\arabic{theorem}} \renewcommand{\theequation}{B.\arabic{equation}} In order to obtain our approximation result of Brownian excursions by random walk excursions, see Theorem~\ref{the:couplingdiscontexc1}, one first needs to approximate one Brownian motion by one random walk. To do this we shall utilize the KMT coupling \cite{MR375412}, and refer to \cite[Theorem~7.6.1]{MR2677157} for the version that we use here. For a random walk $X,$ we define $X_t$ for non-integer $t$ by linear interpolation. \begin{theorem} \label{the:KMT} There exists $c<\infty$ and a coupling $\Pm$ between a Brownian motion $B$ on $\R^2$ and a random walk $Y$ on $\Z^2$ both starting at $0$ satisfying for all $n\in\N$ \begin{equation*} \Pm \left(\max_{0 \leq t \leq 2n^3 } \left|\frac{1}{\sqrt{2}} B_t-Y_t \right| > c\log(n) \right) \leq \frac{c}{n}. \end{equation*} \end{theorem} We now turn Theorem~\ref{the:KMT} into a coupling between Brownian motions on $\D,$ that is killed on hitting $\partial\D,$ and random walks on $\D_n,$ that is killed on hitting $\partial\D_n.$ \begin{corollary} \label{cor:KMTonD} There exists $c<\infty$ such that, for all $n\in\N,$ $z\in{\D}$ and $x\in{\D_n},$ there exists a coupling $\Pm_{z,x}^{(n)}$ between a two-dimensional Brownian motion $Z$ on $\D$ starting in $z$ and a random walk $X^{(n)}$ on $\D_n$ starting in $x$ satisfying \begin{equation*} \Pm_{z,x}^{(n)}\left(\sup_{t\in{[0,\overline{\tau}_{\partial\D}]}}|Z_t-\widehat{X}_t^{(n)}|>|z-x|+ \frac{c\log(n)}{n}\right)\leq \frac{c}{n}, \end{equation*} where $\widehat{X}_t^{(n)}:=X_{2tn^2}^{(n)}$ for all $t\geq0$ and $\overline{\tau}_{\partial\D}:=\inf\{t\geq0:|\widehat{X}_t^{(n)}|\vee|Z_t|=\infty\}$ is the first time at which either $\widehat{X}^{(n)}$ or $Z$ are killed, with the convention that, after being killed, $X$ and $Z$ are both equal to a cemetery point $\Delta$ with $|\Delta|=\infty.$ \end{corollary} \begin{proof} Note that, under the coupling from Theorem~\ref{the:KMT}, $B^{(n)}:=(\frac1{\sqrt{2}n}B_{2tn^2}+z)_{t\geq0}$ is a Brownian motion starting in $z,$ $Y^{(n)}:=(\frac1nY_k+x)_{k\geq0}$ is a two-dimensional random walk on $\frac1n\Z^2$ starting in $x,$ and \begin{equation*} \Pm\left(\sup_{t\in{[0,n]}}|B^{(n)}_t-\widehat{Y}_t^{(n)}|>|x-y|+ \frac{c\log(n)}{n}\right)\leq \frac{c}{n}, \end{equation*} where $\widehat{Y}_t^{(n)}:=Y^{(n)}_{2tn^2}$ for all $t\geq0.$ Letting $\overline{\tau}_{\partial\D}:=\inf\{t\geq0:|B^{(n)}_t|\vee|\widehat{Y}_t^{(n)}|\geq1\},$ we moreover have \begin{equation*} \Pm(\overline{\tau}_{\partial\D}\geq n)\leq \Pm(|B^{(n)}_n|\leq1)\leq\exp(-cn). \end{equation*} We can conclude by defining under some probability $\Pm_{z,x}^{(n)}$ the processes $(Z,X^{(n)})$ with the same law as $(B^{(n)},Y^{(n)})$ killed respectively on hitting $\partial \D$ and $\partial \D_n$ under $\Pm.$ \end{proof} In order to couple discrete and continuous excursions, we need to use the coupling from Corollary~\ref{cor:KMTonD} until the last exit time $\overline{L}_{K}:=\sup\{t\geq0:\widehat{X}_t^{(n)}\in{K}\text{ or }Z_t\in{K}\}$ of a set $K\Subset\D$ by both $\widehat{X}^{(n)}$ and $Z,$ with the convention $\sup\varnothing=0.$ Recall also the convention $Z_t=\Delta$ for all $t$ after $Z$ has been killed with $|\Delta|=\infty,$ and similarly for $X^{(n)}.$ \begin{lemma} \label{lem:KMTuntilboundary} Under the coupling from Corollary~\ref{cor:KMTonD}, there exists $c>0$ and $s_0>0$ such that for all $K\Subset\D$ $s\geq s_0,$ $z\in{\D},$ $x\in{\D_n}$ with $|z-x|\leq \frac{s\log(n)}{2n},$ \begin{equation} \label{eq:KMTuntilboundary} \Pm_{z,x}^{(n)}\left(\sup_{t\in{[0,\overline{L}_{K}]}}|Z_t-\widehat{X}_t^{(n)}|> \frac{s\log(n)}{n}\right)\leq \frac{cs\log(n)}{n(1-r)}, \end{equation} where $r=\sup\{|z|:\,z\in{K}\}.$ \end{lemma} \begin{proof} Abbreviate $\eps_n=s\log(n)/n,$ and note that we can assume without loss of generality that $1-r\geq \eps_n.$ By Corollary~\ref{cor:KMTonD}, we have for all $s$ large enough \begin{equation*} \Pm_{z,x}^{(n)}(\overline{L}_{K}\geq \overline{\tau}_{\partial\D})\leq \Pm_z\left(L_{K}\geq \tau_{B(1-\eps_n)^\ch}\right)+ \Pm_x^{(n)}\left(L_{K_n}^{(n)}\geq \tau_{B_n(1-\eps_n)^\ch}^{(n)}\right)+\frac{c}{n}. \end{equation*} It moreover follows from \eqref{eq:hittingBM} and the strong Markov property that \begin{equation*} \Pm_z\left(L_{B(r)}\geq \tau_{ B(1-\eps_n)^\ch}\right)\leq\frac{\log(1-\eps_n)}{\log(r)}\leq \frac{cs\log(n)}{n(1-r)} \end{equation*} since $1-x \leq \log(1/x), x \in (0,1)$ and $|\log(1-x)|<2x$ for $x$ small enough. Using a similar formula for the random walk, see for instance \cite[Lemma~2.1]{MR3737923}, we have similarly \begin{equation*} \Pm_x^{(n)}\left(L_{B_n(r)}^{(n)}\geq \tau_{B_n(1-\eps_n)^\ch}^{(n)}\right)\leq\frac{\log(1-\eps_n)+c/(nr)}{\log(r)}\leq \frac{cs\log(n)}{n(1-r)} \end{equation*} for all $r\geq\frac12.$ Since the above probability is increasing in $r$ and $K\subset B(r),$ we can conclude. \end{proof} Finally, to establish a coupling between the normalized equilibrium measures, we need a bound on the distance between the last exit time of a ball for $Z$ and for $X^{(n)},$ which bears some similarities with \cite[Proposition~7.7.1]{MR2677157}. Recall the definition of $L_B$ from above \eqref{e.eqmeas} and $L_{B_n}^{(n)}$ from \eqref{eq:deftauLn}. \begin{lemma}\label{l.gtype} Under the coupling from Corollary~\ref{cor:KMTonD}, there exists $c>0$ and $s_0<\infty$ such that for all $r\in{(1/2,1)}$ and $s\geq s_0,$ \begin{equation} \label{e:boundlastexit} \Pm_{0,0}^{(n)}\left(\left| {X}^{(n)}_{{L}_{B_n}^{(n)}}-Z_{L_{B }}\right|\geq \frac{s \log(n)}{n} \right) \leq \frac{c }{s }+\frac{c\log(n)}{n(1-r)}, \end{equation} where $B=B(r)$ and $B_n=B\cap\D_n.$ \end{lemma} \begin{proof} First recall that by Lemma~\ref{lem:KMTuntilboundary} we can couple a time-changed random walk $X^{(n)}$ on $\D_n$ and a Brownian motion $Z$ on $\D$ such that \begin{equation}\label{e.lekmt} |\widehat{X}_t^{(n)}-Z_t|\leq \frac{c\log(n)}{n} , \forall t \leq \overline{L}_{B(r)}=\sup\{t\geq0:|\widehat{X}_t^{(n)}|\wedge|Z_t|\leq r\}, \end{equation} with probability at least $1- \frac{c\log(n)}{n(1-r)}.$ Let us denote by $\widehat{L}_{B_n}^{(n)}:=\sup\{t\geq0:\widehat{X}^{(n)}_t\in{B_n}\}$ the last exit time of $B_n$ by $\widehat{X}^{(n)},$ and then $\widehat{X}^{(n)}_{\widehat{L}_{B_n}^{(n)}}=X^{(n)}_{L_{B_n}^{(n)}}.$ Let \begin{equation} \label{e:defrhoL} \rho_n^{\pm}:=r\pm\frac{3c\log(n)}{n}\text{ and } \gamma_n^{\pm} :=L_{B(\rho_n^{\pm})}. \end{equation} Note that $\widehat{L}_{B_n}^{(n)} \in [\gamma_n^-,\gamma_n^+]$ on the event \eqref{e.lekmt}, and $L_B\in [\gamma_n^-,\gamma_n^+].$ In particular, on the event \eqref{e.lekmt}, \begin{align*} |{X}^{(n)}_{{L}_{B_n}^{(n)}}-Z_{L_{B}}| &\leq \sup_{\gamma_n^- \leq t \leq \gamma_n^+ } |Z_t-Z_{L_{B }}|+|Z_{\widehat{L}_{B_n}^{(n)}}-\widehat{X}^{(n)}_{\widehat{L}_{B_n}^{(n)}}| \\&\leq 2 \sup_{\gamma_n^- \leq t \leq \gamma_n^+ }|Z_t-Z_{\gamma_n^+}|+\frac{c\log(n)}{n}. \end{align*} Hence \begin{equation}\label{e:bmgambler} \begin{split} \Pm_{0,0}^{(n)}&\left( \left| {X}^{(n)}_{{L}_{B_n}^{(n)}}-Z_{L_{B}}\right| \geq \frac{(2s+c) \log(n)}{n} \right) \\ &\leq \Pm_{0}\left( \sup_{\gamma_n^- \leq t \leq \gamma_n^+} | Z_t-Z_{\gamma_n^+}|\geq \frac{s \log(n)}{n} \right) +\frac{c\log(n)}{n(1-r)}. \end{split} \end{equation} According to \cite[p.74]{MR521533}, conditionally on $Z_{\gamma_n^-},$ the law of $(Z_t)_{t\geq \gamma_n^-}$ is independent of $(Z_t)_{t\leq \gamma_n^-}.$ Since $\sigma_{\rho_n^-},$ the uniform measure on $\partial B(\rho_n^-),$ is the law of $Z_{\gamma_n^-}$ starting from either $0$ or $\sigma_{\rho_n^+}$ by rotational invariance, the probability on the last line of \eqref{e:bmgambler} is equal to \begin{align*} &\Pm_{\sigma_{\rho_n^+}}\left( \sup_{\gamma_n^- \leq t \leq \gamma_n^+} | Z_t-Z_{\gamma_n^+}\,|\,\geq \frac{s \log(n)}{n}\,\Big|\,\gamma_n^->0\right) \\&= \Pm_{\sigma_{\rho_n^+}} \left( \sup_{0 \leq t \leq \tau_{B(\rho_n^-)} } | Z_t-Z_{0}|\geq \frac{s \log(n)}{n} \,\Big|\, \tau_{B(\rho_n^-)}<\tau_{\partial \D } \right), \end{align*} where the last equality follows from invariance under time-reversal of the Brownian motion, see for instance \cite[Theorem~24.18]{kallenberg_2002}. Therefore, using again rotational invariance and changing notation, which makes the rest of the argument more clear, we obtain \begin{equation} \label{e:proofboundonexit0} \Pm_0\left( \sup_{\gamma \leq t \leq \bar{\gamma}} | Z_t-Z_{\bar{\gamma}}|\geq \frac{s \log(n)}{n} \right)=\Pm_{{\rho_n^+}}\left( Z[0,\tau_{B(\rho_n^-)}] \not\subset B\big(\rho_n^+,\frac{s \log(n)}{n}\big) \,\Big |\, \tau_{B(\rho_n^-)} < \tau_{\partial \D} \right). \end{equation} First note that by \eqref{eq:hittingBM}, \begin{equation} \label{e:proofboundonexit3} \Pm_{\rho^+_n}(\tau_{B(\rho_n^-)} < \tau_{\partial \D})=\frac{|{\log( \rho^+_n)}|}{{|\log( \rho^-_n)}|}\geq c \end{equation} if $\log(n)/n\leq c|\log(r)|,$ which we can assume without loss of generality since $1-r\leq |\log(r)|.$ We now bound the right-hand side of \eqref{e:proofboundonexit0} without the conditioning. Using conformal invariance of Brownian motion under $z\mapsto \log(z),$ see for instance \cite[Theorem~2.2]{lawler2005conformally}, we moreover have \begin{equation} \label{e:proofboundonexit00} \begin{split} \Pm_{{\rho_n^+}}&\left( Z[0,\tau_{B(\rho_n^-)}] \not\subset B(\rho_n^+,\frac{s \log(n)}{n}) , \tau_{B(\rho_n^-)} < \tau_{\partial \D} \right) \\ &\leq \Pm_{\log(\rho_n^+)} \left( W[0,\tau_{ \log( \rho_n^- )}'] \not\subset \log\Big(B\big(\rho_n^+, \frac{s\log(n)}{n}\big)\Big) \right) \end{split} \end{equation} where $W$ is a Brownian motion started at $\log(\rho_n^+)$ and killed upon hitting $\{z\in{\mathbb{C}}:\Re(z)<0,\Im(z)\in{(-\pi,\pi)}\}^{\ch},$ and $\tau_a'$ is the minimum between the killing time of $W$, and the first hitting time of $\left\{ \Re(z) = a\right\}$ by $W$ for each $a<0.$ Now using the inequality $|e^x-e^y|\leq C|x-y|$ for all $x,y\in{\{z:|z|\leq |\log(r)|+1\}},$ one can find a constant $c>0$ such that, \begin{equation} \label{e:inclusionballs} B\left(\log(\rho_n^+), \eps_n \right) \subset \log \left( B\Big(\rho_n^+, \frac{s \log(n)}{n}\Big)\right),\text{ where }\eps_n=\frac{cs\log(n)}{n}. \end{equation} Hence we only need to estimate the probability that \( \{ W[0,\tau_{\log( \rho_n^- )}'] \subset B(W_0, \eps_n) \}, \) conditionally on $\tau'_{\log(\rho_n^-)}<\tau'_0,$ and the remainder part of the proof is to obtain a lower bound on this probability. We begin with some elementary geometrical observations. First by \eqref{e:defrhoL} the distance between the two points $\log(\rho_n^\pm)$ is $\log(\rho_n^+/\rho_n^-)$ and satisfies \begin{equation} \label{e:boundrho} \log(\rho_n^+/\rho_n^-) \leq \frac{c\log(n)}{n}. \end{equation} Thus for $s$ large enough, the ball $B\left(\log(\rho_n^+), \eps_n\right)$ intersects the line \( \{\Re (z) = \log( \rho_n^-) \}, \) and, letting $h$ denote the distance from the point $\log(\rho_n^+)+\eps_n/2$ and one of the points $\{\Re(z)= \log( \rho_n^+)+\eps_n/2 \} \cap B\left(\log(\rho_n^+), \eps_n\right),$ and $h'$ the distance between $\log( \rho_n^-)$ and one of the points $\{\Re(z)= \log( \rho_n^{-} ) \} \cap B\left(\log(\rho_n^+), \eps_n\right)$ we have for $s$ large enough \begin{align} \label{e:defh*} h'= \sqrt{\eps_n^2 -\log\left( \frac{\rho_n^+}{\rho_n^-} \right)^2 } \geq \frac{\sqrt{3}}{2}\eps_n= h. \end{align} Now we can estimate the probability as follows. For each $t$ before $W$ is killed on $\{\Im(z)\in{[-\pi,\pi]^\ch}\},$ we can write $W_t := Y^1_t + \im Y^2_t,$ where $Y^1$ and $Y^2$ are two independent one-dimensional Brownian motions, and then, assuming without loss of generality that $s\log(n)/n$ is small enough so that $h<\pi,$ we have under $\Pm_{\log(\rho_n^+)}$ \begin{align*} &\left\{ \tau_{\log(\rho^-_n)}(Y^1)< \tau_{ \log(\rho_n^+)+\eps_n/2 }(Y^1) \right\} \cap \{ \tau_{\log(\rho^-_n)}(Y^1)< \tau_{h}(Y^2)\wedge \tau_{-h}(Y^2) \} \\ & \subset \left\{ W[0, \tau_{ \log( \rho_n^- )}'] \subset B\left(W_0, \eps_n\right)\right\}. \end{align*} Therefore, \begin{equation} \label{e:proofboundonexit1} \begin{split} \Pm_{\log( \rho^+_n)} \left( W[0, \tau_{ \log( \rho^-_n )}'] \not \subset B\left(W_0, \eps_n\right)\right) &\leq \Pm_{\log( \rho^+_n)} \left(\tau_{\log(\rho^-_n)}(Y^1)> \tau_{ \log(\rho_n^+)+\frac{\eps_n}{2} }(Y^1) \right) \\ &\ + \Pm_{\log( \rho^+_n)} \left( \tau_{\log(\rho^-_n)}(Y^1)> \tau_{h}(Y^2)\wedge \tau_{-h}(Y^2) \right). \end{split} \end{equation} Using the well-known formula for one-dimensional hitting times, see for instance Part II.1, Equation 2.1.2 in \cite{MR1912205}, we see that by \eqref{e:inclusionballs} and \eqref{e:boundrho} \begin{equation} \label{e:proofboundonexit2} \begin{split} &\Pm_{\log( \rho^+_n)} \left(\tau_{\log(\rho^-_n)}(Y^1)> \tau_{\log(\rho_n^+)+ \frac{\eps_n}{2} }(Y^1) \right)= \frac{|\log(\rho_n^-/\rho_n^+)|}{|\log(\rho_n^-/\rho_n^+)|+ \eps_n/2} \leq\frac{c}{s} \end{split} \end{equation} and by \eqref{e:defrhoL} The second term in \eqref{e:proofboundonexit1} can be bounded by \begin{equation}\label{e:bmhit1} \begin{split} &\Pm_{\log( \rho^+_n)} \left( \tau_{\log(\rho^-_n)}(Y^1)> \tau_{h}(Y^2)\wedge \tau_{-h}(Y^2) \right)\leq 2\Pm_{0} \left( \tau_{\log(\rho^+_n/\rho^-_n)}(Y^1)> \tau_{h}(Y^2)\right) \\&\quad\leq 4 \E_0\left[1-\Phi\left( \frac{h}{\sqrt{\tau_{\log(\rho_n^+/\rho_n^-)}(Y^1)}} \right)\right] \leq 4 \E_0\left[1-\Phi\left( \frac{cs}{\sqrt{\tau_{1}(Y^1)}} \right)\right], \end{split} \end{equation} where $\Phi(x)$ denotes the CDF of a standard normal random variable, we used the reflection principle in the second inequality, and scaling invariance as well as \eqref{e:boundrho} and \eqref{e:defh*} in the last inequality. Using that $1-\Phi(y) \leq \frac{1}{\sqrt{2\pi}y}\e^{-y^2/2}$ for all $y>0,$ as well as a formula for the density of $\tau_{1},$ see for instance \cite[Part II.1, Equation 2.0.2]{MR1912205}, we moreover have \begin{equation} \label{e:bXYsmall} \E_0\left[\left(1-\Phi\left( \frac{cs}{\sqrt{\tau_{1}(Y^1)}}\right)\right)\I_{\tau_{1}(Y^1)<s^{2}} \right]\leq \int_0^{s^{2}}\frac{c}{st}e^{-\frac{cs^{2}}{t}}\mathrm{d}t=\frac{c'}{s}, \end{equation} where the last equality follows from the change of variable $t\mapsto s^{-2}t.$ Combining \eqref{e:proofboundonexit3}, \eqref{e:proofboundonexit1}, \eqref{e:proofboundonexit2}, \eqref{e:bmhit1}, \eqref{e:bXYsmall} together with the bound $\Pm_0(\tau_1(Y^1)\geq s^{2})=2\Phi(1/s)-1\leq C/s$ we thus obtain \[ \Pm_{\log(\rho_n^+)} \left( W[0,\tau_{ \log( \rho_n^- )}'] \not \subset B\big(\log(\rho_n^+), \eps_n\big)\, \middle|\, \tau'_{ \log( \rho_n^- )}<\tau_0' \right) \leq \frac{c}{s}, \] which, in view of \eqref{e:bmgambler}, \eqref{e:proofboundonexit0}, \eqref{e:proofboundonexit00} and \eqref{e:inclusionballs}, completes the proof. \end{proof} \section{Appendix: Convergence of capacities} \label{app:convcap} \renewcommand*{\thetheorem}{C.\arabic{theorem}} \renewcommand{\theequation}{C.\arabic{equation}} In this section, we prove convergence of the discrete capacity to the continuous capacity of general connected compacts $K,$ which generalizes Lemma~\ref{l.capconv}, and is used in the proof of Lemma~\ref{Lem:bigcaponboundary}. Recall that for $K\subset \D$ we write $K_n:= \D_n \cap K.$ \begin{proposition} \label{t.gencapconv} There exists $c<\infty$ such that for all connected sets $K\Subset \D$ and $n\in{\N}$ satisfying $K\subset B(K_n,2/n),$ letting $r= \sup_{x \in K} |x|,$ we have \begin{equation} \label{e.gencapconv} \left| \capac(K)-\capac^{(n)}(K_n)\right| \leq \frac{c}{\sqrt{1-r}} \left(\left(1+\mathrm{cap}(K)+\mathrm{cap}^{(n)}(K_n)\right)^2\frac{\log(n)}{n} \right)^{1/3} \end{equation} \end{proposition} \begin{proof} Write $B=B(r\vee(1/2))$ and let $B_n=B\cap\D_n.$ By the sweeping identity \eqref{e.consistency} and the corresponding discrete identity, see for instance \cite[(1.59)]{MR2932978}, we have \begin{align*} \frac{\capac(K)}{\capac(B)}-\frac{ \capac^{(n)}(K_n)}{\capac^{(n)}(B_n)}&= \Pm_{\overline{e}_B} \left( \tau_K<\infty \right)- \Pm_{\overline{e}_{B_n}^{(n)}}^{(n)} \left( \tau_{K_n}^{(n)} < \infty \right) \\&=\E_{\Q_r}\left[\Pm_{E_B}(\tau_K<\infty)-\Pm_{E_B^{(n)}}^{(n)}\left(\tau_{K_n}^{(n)}<\infty\right)\right], \end{align*} where $\Q_r$ denotes the coupling from Lemma~\ref{l.EQcoup}. Using Lemma~\ref{l.capconv} we thus have \begin{equation} \label{e.proofgenK1} \begin{split} \left|\frac{\capac(K)-\capac^{(n)}(K_n)}{\capac(B)} \right|&\leq \left|\frac{\capac(K)}{\capac(B)}-\frac{ \capac^{(n)}(K_n)}{\capac^{(n)}(B_n)}\right|+\capac^{(n)}(K_n)\left|\frac{1}{\mathrm{cap}(B)}-\frac{1}{\mathrm{cap}^{(n)}(B_n)}\right| \\&\leq\E_{\Q_r}\left[\left|\Pm_{E_B}(\tau_K<\infty)-\Pm_{E_{B_n}^{(n)}}^{(n)}\left(\tau_{K_n}^{(n)}<\infty\right)\right|\right]+\frac{c\,\mathrm{cap}^{(n)}(K_n)}{n}. \end{split} \end{equation} Now, using \eqref{e.boundapproequi} and recalling the coupling $\Pm_{z,x}^{(n)}$ from Lemma~\ref{lem:KMTuntilboundary}, we write for all $s>0$ \begin{equation} \label{e.proofgenK2} \begin{split} & \E_{\Q_r}\left[\left|\Pm_{E_B}(\tau_K<\infty)-\Pm_{E_{B_n}^{(n)}}^{(n)}\left(\tau_{K_n}^{(n)}<\infty\right)\right|\right] \\& \leq \frac{c}{s}+ \frac{c\log(n)}{n(1-r)} + \E_{\Q_r}\left[ \Pm_{E_B,E_{B_n}^{(n)}}^{(n)} \big( \{\tau_K< \infty\}\Delta\{ \tau_{K_n}^{(n)}<\infty\}\big)\I\left\{|E_B-E_{B_n}^{(n)}|\leq \frac{s\log(n)}{n}\right\} \right] \end{split} \end{equation} We now bound the expectation on the right hand side of \eqref{e.proofgenK2}. Let $\overline{\tau}_K:=\inf\{t\geq0:\widehat{X}_t^{(n)}\in{K}\text{ or }Z_t\in{K}\}$ be the first time either $\widehat{X}^{(n)}$ or $Z$ hit $K,$ with the convention $\inf\varnothing=0,$ and note that $\overline{\tau}_K\leq\overline{L}_K,$ see above Lemma~\ref{lem:KMTuntilboundary}. Moreover due to our assumption on $K,$ we have $K_n\subset K\subset B(K_n,2/n).$ Letting \[ A = \left \{ \sup_{0 \leq t \leq \overline{\tau}_K } | Z_t-\widehat{X}_t^{(n)}| \leq \frac{2s \log(n)}{n} \right\}, \] we then have for all $z,x$ with $|z-x|\leq s\log(n)/n$ and all $s\geq2$ \begin{align*} \Pm_{z,x}^{(n)} &\left( \tau_K< \infty, \tau_{K_n}^{(n)} = \infty, A \right) \leq \Pm_{x}^{(n)} \left( \tau_{B_n(K_n,\eps_n)}^{(n)}< \infty, \tau_{K_n}^{(n)} = \infty \right), \\ \Pm_{z,x}^{(n)} &\left( \tau_K= \infty, \tau_{K_n}^{(n)} < \infty, A \right) \leq \Pm_{z} \left( \tau_{B(K,\eps_n)}< \infty, \tau_{K} = \infty \right), \end{align*} where $\eps_n=3s\log(n)/n.$ Therefore, we have \begin{equation} \label{e.proofgenK3} \begin{split} & \E_{\Q_r}\left[ \Pm_{E_B,E_{B_n}^{(n)}} \left( \{\tau_K< \infty\}\Delta\{ \tau_{K_n}^{(n)}<\infty\}\right)\BI\left\{|E_B-E_{B_n}^{(n)}|\leq \frac{s\log(n)}{n}\right\} \right] \\ &\leq \Pm_{\overline{e}_B} (\tau_{B(K,\eps_n)}< \infty, \tau_{K}=\infty)+\Pm^{(n)}_{\overline{e}^{(n)}_{B_n}} \left( \tau_{B_n(K_n,\eps_n)}^{(n)}<\infty,\tau_{K_n}^{(n)}= \infty \right) \\&\quad+ \sup_{x\in{\D_n},z\in{\D}:|x-z|\leq \frac{s\log(n)}{n}}\Pm_{z,x}^{(n)}(A^\ch). \end{split} \end{equation} Using the Markov property and the sweeping identity \eqref{e.consistency} we can estimate these quantities as follows \begin{align*} \Pm_{\overline{e}_B} (\tau_{B(K,\eps_n)}< \infty, \tau_{K}=\infty) &= \E_{\overline{e}_B} \left[ \Pm_{Z_{\tau_{B(K,\eps_n)}}} \left(\tau_K= \infty\right) \I\{\tau_{B(K,\eps_n)} < \infty\} \right] \\&=\frac{\Pm_{e_{B(K,\eps_n)}} (\tau_K= \infty)}{\mathrm{cap}(B)}. \end{align*} Moreover, by the Beurling estimate, see Lemma~\ref{lem:beurling}, we have \begin{equation} \label{e.Petau=infty} \Pm_{e_{B(K,\eps_n)}} (\tau_K= \infty) \leq \Pm_{{e}_{B(K,\eps_n)}} (\tau_{K}> \tau_{\partial B(Z_0,1-r) }) \leq\sqrt{\frac{cs\log(n)}{n(1-r)}} \capac(B(K,\eps_n)). \end{equation} Note that by the sweeping identity \eqref{e.consistency}, the left-hand side of \eqref{e.Petau=infty} is actually equal to $\mathrm{cap}(B(K,\eps_n))-\mathrm{cap}(K),$ and in particular for $s\log(n)/(n(1-r))$ small enough, it implies $\mathrm{cap}(B(K,\eps_n))\leq 2\mathrm{cap}(K).$ Therefore using \eqref{capball} we obtain \begin{equation} \label{e.boundKsKcont} \Pm_{\overline{e}_B} (\tau_{B(K,\eps_n)}< \infty, \tau_{K}=\infty)\leq \sqrt{\frac{cs(1-r)\log(n)}{n}}\capac(K) . \end{equation} Similarly, using the discrete sweeping identity, Beurling estimate \eqref{eq:disbeurling}, and \eqref{eq:approcap}, one can easily prove \begin{equation} \label{e.boundKsKdis} \Pm_{\overline{e}_{B_n}^{(n)}}^{(n)} (\tau_{B_n(K_n,\eps_n)}< \infty, \tau_{K_n}^{(n)}=\infty)\leq \sqrt{\frac{cs(1-r)\log(n)}{n}} \capac^{(n)}(K_n). \end{equation} Combining \eqref{eq:KMTuntilboundary}, \eqref{e.proofgenK2}, \eqref{e.proofgenK3}, \eqref{e.boundKsKcont} and \eqref{e.boundKsKdis}, we obtain for all $s$ satisfying $s\geq s_0$ and $s\log(n)/(n(1-r))$ small enough, \begin{equation} \label{eq:proofgenKf} \begin{split} &\E_{\Q_r}\left[\left|\Pm_{e_B}(\tau_K<\infty)-\Pm_{e_{B_n}^{(n)}}\left(\tau_{K_n}^{(n)}<\infty\right)\right|\right] \\&\leq \frac{c}{s}+ \frac{cs\log(n)}{n(1-r)}+\left(\mathrm{cap}(K)+\mathrm{cap}^{(n)}(K_n)\right)\sqrt{\frac{cs(1-r)\log(n)}{n}} \end{split} \end{equation} and, taking $s=\big(cn/(\log(n))\big)^{1/3}\left(1+\mathrm{cap}(K)+\mathrm{cap}^{(n)}(K_n)\right)^{-2/3}\sqrt{1-r}$ for some large enough constant $c,$ which we can assume without loss of generality satisfies $s\geq s_0$ and $s\log(n)/(n(1-r))$ is small enough, we can conclude by \eqref{capball} and \eqref{e.proofgenK1}. \end{proof} Note that the choice of $s$ below \eqref{eq:proofgenKf} is not optimal, but the optimal choice depends on the relation between $r,$ $n$ and $\mathrm{cap}(K).$ Thus for certain values of these parameters the bound \eqref{e.gencapconv} could be improved, but we anyway do not believe that the bounds obtained via this method would be optimal. \bibliography{references} \end{document}
2205.15267v2
http://arxiv.org/abs/2205.15267v2
Double Null Data and the Characteristic Problem in General Relativity
\documentclass[12pt,a4paper,final]{article} \input{paquetes.tex} \title{\vspace*{-1.35cm}\textbf{Double Null Data and the Characteristic Problem in General Relativity}} \author{Marc Mars\footnote{\href{mailto:[email protected]}{[email protected]}}\,\, and Gabriel Sánchez-Pérez\footnote{\href{mailto:[email protected]}{[email protected]}} \\ Departamento de Física Fundamental, Universidad de Salamanca\\ Plaza de la Merced s/n, 37008 Salamanca, Spain } \date{\today} \begin{document} \maketitle \vspace{-2mm} \begin{abstract} General hypersurfaces of any causal character can be studied abstractly using the hypersurface data formalism. In the null case, we write down all tangential components of the ambient Ricci tensor in terms of the abstract data. Using this formalism, we formulate and solve in a completely abstract way the characteristic Cauchy problem of the Einstein vacuum field equations. The initial data is detached from any spacetime notion, and it is fully diffeomorphism and gauge covariant. The results of this paper put the characteristic problem on a similar footing as the standard Cauchy problem in General Relativity. \end{abstract} \section{Introduction} The study of the Einstein field equations (EFE) is of unquestionable interest both in mathematics and in physics. From a physical point of view, the solutions of the EFE describe the gravitational field outside the matter sources and model a large amount of physical phenomena including black holes or propagation of gravitational waves. From a mathematical perspective they constitute a system of geometric PDE for a Lorentzian metric which can be studied in many different ways. One particularly relevant is the initial value (or Cauchy) problem. The fundamental well-posedness result of Choquet-Bruhat \cite{Choquet} established that prescribing the (full) ambient metric and its first normal derivative on a spacelike hypersurface subject to certain constraint equations gives rise to an ambient metric solving the vacuum EFE realizing this initial data. Since the Einstein equations are of geometric nature, the strategy was to solve a reduced system of quasi-linear hyperbolic PDE, called the reduced Einstein equations, obtained from the original EFE under the assumption of harmonic coordinates, and then proving that the solution of the reduced equations is indeed Ricci flat by showing that the coordinates in which the solution is obtained are actually harmonic. A detailed account of this beautiful argument, and extensions thereof, can be found in the excellent work \cite{ringstrom}.\\ This original statement for the initial value problem has evolved over time into a much more abstract notion where, instead of prescribing a full metric and its normal derivative, one prescribes an abstract $m$-dimensional manifold $\Sigma$, together with a Riemannian metric $h$ and a symmetric two covariant tensor field $K$ satisfying the so-called constraint equations. In this approach, the well-posedness of the initial value problem becomes a completely geometric (i.e. independent of any choice of coordinates) statement, namely that there exists an $(m+1)$-dimensional ambient manifold $(\mc M,g)$ and an embedding $f : \Sigma \rightarrow \mc M$ such that $h$ is the induced (pull-back) metric and $K$ the second fundamental form. Thus, the initial data has become completely detached from the spacetime one wishes to construct. This is very satisfactory because it puts the initial data at the same geometric level as the field equations themselves.\\ One of the main objectives of this paper is to achieve a similar geometrization for the characteristic initial value problem, where the data is posed, instead of on a time slice, on a pair of null hypersurfaces that intersect transversely. The essential result of Rendall \cite{Rendall} is that by prescribing the full spacetime metric on two transversely intersecting null hypersurfaces there exists a unique spacetime solution of the reduced Einstein equations in a future neighbourhood of the intersection compatible with the initial data. Moreover, working in the so-called \textit{standard coordinates}, Rendall gives a procedure to reconstruct all the components of the spacetime metric on the hypersurfaces from a minimal amount of initial data in such a way that the solution of the reduced equations is indeed a solution of the vacuum Einstein equations. Other approaches to this problem can be found in \cite{cabet,It,Hilditch_Kroon_Zhao,Book,Luk,Yau}. In \cite{Luk} Luk extends Rendall's result provided the intersection surface is a topological 2-sphere by showing that the spacetime exists in a future neighbourhood of the full two initial hypersurfaces (and not only of their intersection). In \cite{rodnianski} the authors state that Luk's result is valid in arbitrary dimension, and in \cite{cabet} this result is extended for a large class of symmetric hyperbolic systems including the Einstein equations in four spacetime dimensions without any assumption on the topology of the intersection spacelike surface. The characteristic problem is also known to be well-posed when the data is given on the future null cone of a point \cite{cone}.\\ Of special interest for the present work is the paper \cite{CandP} in which Chruściel and Paetz present new approaches to the characteristic problem and review the existing ones. The main difference with the approach followed by Rendall is that, instead of prescribing the minimal amount of data, they provide redundant initial data (and therefore this data must fulfill several constraint equations). In fact, one could add more initial data on the hypersurfaces by providing additional constraint equations (which in the resulting spacetime will become the null structure equations, i.e., the pullback of the Einstein equations on the hypersurfaces (cf. \cite{Book})). One of the main purposes of our paper is to follow this philosophy by providing enough abstract initial data (with the corresponding equations constraining it) to reconstruct the whole spacetime metric and the transverse derivative of its tangent components in the embedded case. One of the advantages of our approach is that it encompasses all the possible initial data constructed from the metric and its first derivatives for the characteristic initial value problem of the Einstein field equations. In order to write the initial data and its constraints from a geometrically satisfactory point of view, it is necessary to employ some abstract formalism capable of detaching the initial data from the spacetime one wishes to construct.\\ In \cite{Marc1,Marc2} a formalism to study hypersurfaces at the abstract level (i.e. by making no reference to any ambient space) has been developed. This data is called hypersurface data and it consists of an abstract manifold $\mc H$ together with a set of tensors on it. From such tensors one is able to reconstruct the full spacetime metric at the hypersurface as well as the transverse derivatives of its tangent components in the embedded case. This data is endowed with an internal gauge structure related to the freedom present in the choice of an everywhere transverse vector field in the embedded case. In this paper, we employ this formalism in its null version to formulate and solve the characteristic problem from an abstract point of view. In particular we show that all tangential components of the ambient Ricci tensor in the embedded case can be written in terms of the abstract data on (null) the hypersurface. This allows us to encode the tangential components of the $\Lambda$-vacuum field equations as a set of constraint equations written fully in terms of the hypersurface data. Moreover, these constraints turn out to be gauge-covariant, which means that once they are satisfied in a particular gauge, they are necessarily satisfied in any other gauge. In order to define the abstract notion of the two null and transverse hypersurfaces we need to extract the essential properties that two null hypersurface data must have so that they can be simultaneously embedded as two null hypersurfaces with a common spacelike boundary. It turns out that one requires a number of compatibility conditions at the boundary. These conditions define the abstract notion of double null data (Definition \ref{def_DND}).\\ The next step is to find a solution of the EFE from an initial double null data satisfying the abstract constraint equations. Before solving the reduced equations and proving that this solution is also a solution of the EFE we need to capture at the abstract level the notion of the harmonic condition on the coordinates. Given a set of $m$ independent functions on the abstract hypersurface we construct a vector field depending on these functions and on the hypersurface data. This vector field has the crucial property that it vanishes in the embedded case if and only if the given functions are harmonic at the hypersurfaces. The gauge behaviour of this vector field allows us to prove that there always exists an essentially unique gauge where it vanishes. This gauge, called ``harmonic gauge'', is still fully covariant and plays a key role in solving the characteristic problem in the present ~framework.\\ Given double null data written in the harmonic gauge and satisfying the constraint equations we solve the reduced Einstein equations with the metric data as initial condition. In order to prove that the solution of the reduced equations is indeed a solution of the EFE we follow a different approach to that of Rendall. While Rendall reconstructs the data integrating 2nd order ODE for the unknown components of the metric in a specific coordinate system, we have the full metric as given data, so we can solve the reduced Einstein equations straight away. From this solution we can build new embedded data which is a priori different from the original one. To conclude the proof we must show two things. Firstly, that the two data are actually the same, so that we have been able to embed the full given data (and not only the metric part of it). Secondly, that the spacetime is actually a solution of the Einstein field equations. We achieve both things in one go by combining the constraint equations and the gauge conditions. An informal version of the main Theorem that we prove is:\\ \textbf{Theorem 1.} Given double null data satisfying the abstract constraint equations, then there exists a unique spacetime $(\mc M,g)$ solution of the $\Lambda$-vacuum Einstein equations such that the the double null data is embedded on $(\mc M,g)$.\\ This result achieves our two main goals in this paper. Firstly, the initial data and the constraint equations are fully detached from the spacetime one wishes to construct, so we obtain a satisfactory geometrization of the characteristic initial value problem similar to the one of the standard Cauchy problem. Secondly, since in the resulting spacetime the initial data corresponds to the full metric and the transverse derivatives of its tangent components, our result encompasses all possible initial data constructed from them. There are many possible ways of trading prescribed data by constraints. Each consistent choice would lead to a different version of the characteristic initial value problem, but all of them can be viewed as a particularization of Theorem 1. This recovers (and greatly extends) ``The many ways of the characteristic Cauchy problem'' in \cite{CandP}. Moreover, we discuss how the Theorem can be accommodated to cover matter fields. We believe that the abstract geometrization of the characteristic initial value problem of General Relativity provides interesting new insights into the problem and that it will open up a new world of possibilities.\\ This paper is structured as follows. In Section \ref{preliminaries} we recall the definitions and general results of hypersurface data. Sect. \ref{section_CHD} is devoted to studying the notion of characteristic hypersurface data and its fundamental properties. In Section \ref{section_gauge} we compute the components of the ambient Ricci tensor in terms of the abstract (null) data and then promote these expressions into the abstract definition of the so-called constraint tensor. Sect. \ref{sec_fol} is devoted to computing the constraint tensor adapted to the foliation. The paper includes two Appendices. In Appendix \ref{appendix} we prove the gauge covariance of two tensors ($A$ and $B$) defined in terms of the curvature tensor of the hypersurface data connection. This result is used in Sect. \ref{section_gauge}. In Appendix \ref{appendixB} we derive a number of contractions of the tensors $A$ and $B$ which are needed in Sect. \ref{sec_fol}. In Section \ref{sec_HG} we introduce the harmonic gauge which plays a crucial role in solving the characteristic problem from an abstract point of view. Finally in Sect. \ref{sec_CP} we define the notion of double null data, we study some of its fundamental properties and we finish the section with the statement and proof of the main Theorem of this paper. \section{Preliminaries} \label{preliminaries} In this section we summarize the hypersurface data formalism, which is the general framework of this paper. This formalism was developed in \cite{Marc1,Marc2} with precursor \cite{Marc3}. Let us start by fixing some notation and conventions. In a differentiable manifold $\mc H$ we let $\mc F(\mc H)=\mc C^{\infty}(\mc H,\real)$ and $\mc{F}^{\star}(\mc H)$ be the subset of nowhere-vanishing functions. $T\mc H$ is the tangent bundle of $\mc H$ and $\Gamma(T\mc H)$ the sections of $T\mc H$. Similarly, $T^{\star}\mc H$ is the cotangent bundle of $\mc H$ and $\Gamma(T^{\star}\mc H)$ the corresponding space of one-forms. The interior contraction of a covariant tensor $T$ with a vector $X$ is the tensor $i_X T \d T(X,\cdot,\cdots,\cdot)$. Abstract indices on $\mc H$ are denoted with small Latin letters from the beginning of the alphabet ($a,b,c,...$). As usual, parenthesis (resp. brackets) denote symmetrization (resp. antisymmetrization) of indices, and the symbol $\otimes_s$ denotes the symmetrized tensor product $T_1\otimes_s T_2 = \frac{1}{2}(T_1\otimes T_2 + T_2\otimes T_1)$. In this paper spacetime means a smooth, orientable manifold endowed with a time-oriented Lorentzian metric. All manifolds are connected unless otherwise indicated. \begin{defi} \label{defi_hypersurfacedata} Let $\mc H$ be a smooth $m$-dimensional manifold, $\bg$ a symmetric two-covariant tensor field, $\bm\ell$ a one-form and $\ell^{(2)}$ a scalar on $\mc H$. A four-tuple $\mc D^{met}=\{\mc H,\bg,\bm\ell,\ell^{(2)}\}$ defines metric hypersurface data (of dimension $m$) provided that the symmetric two-covariant tensor $\mc A|_p$ on $T_p\mc H\times\real$ defined by $$\mc A|_p\left((W,a),(Z,b)\right) \d \bg|_p (W,Z) + a\bm\ell|_p(Z)+b\bm\ell|_p(W)+ab\ell^{(2)}|_p$$ is non-degenerate at every $p\in\mc H$. A five-tuple $\mc D=\mc D^{met}\cup \{\bY\}$, where $\bY$ is a symmetric two-covariant tensor field on $\mc H$, is called hypersurface data. \end{defi} Since $\mc A|_p$ is symmetric and non-degenerate for every $p\in \mc H$, there exists a unique symmetric, two-contravariant tensor $\mc A^{\sharp}|_p$ defined by $\mc A^{\sharp}|_p(\mc A|_p(V,\cdot),\cdot) = V$ for every $V\in T_p\mc H\times \real$. Let $a,b\in\real$ and $\bm\alpha,\bm\beta\in T^{\star}_p\mc H$. Then we can define a symmetric, two-contravariant tensor $P|_p$, a vector $n|_p$ and a scalar $n^{(2)}|_p$ on $T_p\mc H$ by \begin{equation} \label{Asharp} \mc A^{\sharp}\left((\bm\alpha,a),(\bm\beta,b)\right) = P|_p (\bm\alpha,\bm\beta)+a n|_p(\bm\beta)+bn|_p(\bm\alpha)+ab n^{(2)}|_p. \end{equation} The definition of $\mc A^{\sharp}$ is equivalent to \begin{align} \bg_{ab}n^b+n^{(2)}\bm\ell_a&=0,\label{gamman}\\ \bm\ell_a n^a+n^{(2)}\ell^{(2)}&=1,\label{ell(n)}\\ P^{ab}\bm\ell_b+\ell^{(2)} n^a&=0,\label{Pell}\\ P^{ab}\bg_{bc} +n^a\bm\ell_c &= \delta^a_c.\label{Pgamma} \end{align} As usual, the radical of $\bg$ is defined by $$\rad(\bg)_p \d \left\{X\in T_p\mc H\ : \ \bg(X,\cdot)=0\right\}\subset T_p\mc H.$$ An important property is that $\rad(\bg)_p$ is either empty or one-dimensional \cite{Marc2}. The points $p\in\mc H$ such that $\rad(\bg)_p\neq \langle 0 \rangle$ are called null points. The condition for $p$ being a null point is equivalent to $\rad(\bg)_p = \langle n|_p\rangle$ and also equivalent to $n^{(2)}|_p=0$. Despite its name, the notion of (metric) hypersurface data does not view $\mc H$ as a hypersurface of another manifold. The connection between the abstract data and the standard notion of hypersurface is as follows. \begin{defi} \label{defi_embedded} A metric hypersurface data $\{\mc H,\bg,\bm\ell,\ell^{(2)}\}$ is embedded in a pseudo-Riemannian manifold $(\mc M,g)$ if there exists an embedding $f:\mc H\hookrightarrow\mc M$ and a vector field $\xi$ along $f(\mc H)$ everywhere transversal to $f(\mc H)$, called rigging, such that \begin{equation} f^{\star}(g)=\bg, \hspace{0.5cm} f^{\star}\left(g(\xi,\cdot)\right) = \bm\ell,\hspace{0.5cm} f^{\star}\left(g(\xi,\xi)\right) = \ell^{(2)}. \end{equation} The hypersurface data $\{\mc H,\bg,\bm\ell,\ell^{(2)},\bY\}$ is embedded provided that, in addition, \begin{equation} \label{Yembedded} \dfrac{1}{2}f^{\star}\left(\lie_{\xi} g\right) = \bY. \end{equation} \end{defi} Motivated from this geometric picture, hypersurface data satisfying $n^{(2)}=0$ everywhere on $\mc H$ will be called \textbf{null hypersurface data}. The necessary and sufficient condition for $f(\mc H)$ to admit a rigging is that $f(\mc H)$ is orientable (see \cite{Marc1}). Observe that the signature of the ambient metric $g$ and of the tensor $\mc A$ are necessarily the same.\\ Given hypersurface data one defines the tensor \begin{equation} \label{defK} \bK\d n^{(2)} \bY +\dfrac{1}{2}\lie_n\bg + \bm{\ell}\otimes_s \dd n^{(2)}, \end{equation} which when the data is embedded coincides \cite{Marc1} with the second fundamental form of $f(\mc H)$ with respect to the unique normal one-form $\bm{\nu}$ satisfying $\bm{\nu}(\xi)=1$. For embedded hypersurfaces the set of transversal vector fields is given by $z(\xi+f_{\star}\zeta)$, where $z\in\mc{F}^{\star}(\mc H)$ and $\zeta\in\Gamma(T\mc H)$. In terms of the abstract data, this translates into a gauge freedom. \begin{defi} \label{defi_gauge} Let $\{\mc H,\bg,\bm\ell,\ell^{(2)},\bY\}$ be hypersurface data. Let $z\in\mc{F}^{\star}(\mc H)$ and ${\zeta\in\Gamma(T\mc H)}$. The gauge transformed hypersurface data with gauge parameters $(z,\zeta)$ are \begin{align} \mc{G}_{(z,\zeta)}\left(\bg \right)&\d \bg,\label{transgamma}\\ \mc{G}_{(z,\zeta)}\left( \bm{\ell}\right)&\d z\left(\bm{\ell}+\bg(\zeta,\cdot)\right),\label{tranfell}\\ \mc{G}_{(z,\zeta)}\left( \ell^{(2)} \right)&\d z^2\left(\ell^{(2)}+2\bm\ell(\zeta)+\bg(\zeta,\zeta)\right),\label{transell2}\\ \mc{G}_{(z,\zeta)}\left( \bY\right)&\d z \bY + \bm\ell\otimes_s \dd z +\dfrac{1}{2}\lie_{z\zeta}\bg.\label{transY} \end{align} \end{defi} The set of gauge transformations defines a group with composition law and inverse \cite{Marc1} \begin{align} \mc{G}_{(z_1,\zeta_1)}\circ \mc{G}_{(z_2,\zeta_2)} &= \mc{G}_{(z_1 z_2,\zeta_2+z_2^{-1}\zeta_1)}\label{group}\\ \mc{G}_{(z,\zeta)}^{-1}& = \mc{G}_{(z^{-1},-z\zeta)}\label{gaugelaw} \end{align} and the identity element is $\mc G_{(1,0)}$. A gauge transformation on the data induces another on the contravariant data given by \begin{align} \mc{G}_{(z,\zeta)}\left(P \right) &= P + n^{(2)}\zeta\otimes\zeta-2\zeta\otimes_s n,\label{gaugeP}\\ \mc{G}_{(z,\zeta)}\left( n \right)&= z^{-1}(n-n^{(2)}\zeta),\label{transn}\\ \mc{G}_{(z,\zeta)}\left( n^{(2)} \right)&= z^{-2}n^{(2)}. \end{align} In agreement with its geometric interpretation in the embedded case, \begin{equation} \label{Ktrans} \mc{G}_{(z,\zeta)}\left(\bK \right)= z^{-1}\bK. \end{equation} Hypersurface data admits a torsion-free connection $\ol\nabla$ defined by the conditions \cite{Marc1} \begin{align} \left(\ol\nabla_X\bg\right)(Z,W) &= - \bm\ell(Z) \bK(X,W)- \bm\ell(W) \bK(X,Z),\label{olnablagamma}\\ \left(\ol\nabla_X\bm{\ell}\right)(Z)& = \bY(X,Z) + \bF(X,Z)-\ell^{(2)} \bK(X,Z).\label{olnablaell} \end{align} where \begin{equation} \label{def_F} \bF\d \dfrac{1}{2}\dd\bm\ell. \end{equation} Equations \eqref{olnablagamma} and \eqref{olnablaell} can be thought as a generalization of the Koszul formula. Under a gauge transformation $\ol\nabla$ transforms as \cite{Marc1} \begin{equation} \label{gaugeconnection} \mc G_{(z,\zeta)}\left(\ol\nabla\right) = \ol\nabla + \zeta\otimes \bK. \end{equation} In the context of embedded data (with ambient $(\mc M,g)$, rigging $\xi$ and corresponding Levi-Civita connection $\nabla$), it satisfies \cite{Marc1,Marc3} \begin{equation} \label{nablambient} \nabla_X Z = \ol\nabla_X Z - \bK(X,Z)\xi, \end{equation} where $X,Z$ in the LHS of the previous equation are the push forward of $X,Z\in\Gamma(T\mc H)$. Here and in the rest of this paper we will make the (standard) abuse of notation of identifying a vector field and its image under $f_{\star}$ and let the context determine the meaning. Let $\{e_a\}$ be a (local) basis on $\Gamma(T\mc H)$. The derivatives $\nabla_{e_a}e_b$ and $\nabla_{e_a}\xi$ can be decomposed as \cite{Marc1} \begin{align} \nabla_{e_a} e_b &= \ol\Gamma_{ba}^c e_c-\bK_{ab}\xi,\label{nablatt}\\ \nabla_{e_a}\xi &= \left(\dfrac{1}{2}n^{(2)}\ol\nabla_a\ell^{(2)}+(\bY_{ab}+\bF_{ab})n^b\right)\xi + \left(P^{bc}(\bY_{ac}+\bF_{ac})+\dfrac{1}{2}n^b\ol\nabla_a\ell^{(2)}\right)e_b,\label{nablatxi} \end{align} where $\ol\Gamma_{ba}^c$ are the connection coefficients of $\ol\nabla$. Equations \eqref{olnablagamma}-\eqref{olnablaell} together with \eqref{gamman}-\eqref{Pgamma} yields \cite{Marc1} \begin{align} \ol\nabla_a n^b & = P^{bc}\left(\bK_{ac}-n^{(2)}(\bF_{ac}+\bY_{ac})\right)-(\bY_{ac}+\bF_{ac})n^b n^c -n^{(2)} n^b \ol\nabla_a \ell^{(2)},\label{olnablan}\\ \ol\nabla_a P^{bc} & = -P^{cd}(\bY_{ad}+\bF_{ad}) n^b -P^{bd}(\bY_{ad}+\bF_{ad})n^c -n^c n^b \ol\nabla_a\ell^{(2)}.\label{olnablaP} \end{align} \section{Characteristic Hypersurface Data} \label{section_CHD} In this Section we particularize the general results of Section \ref{preliminaries} to the specific setup of characteristic data, which is the central object of this paper. \begin{defi} \label{def_CHD} Let $\mc D=\{\mc H,\bg,\bm\ell,\ell^{(2)},\bY\}$ be hypersurface data of dimension $m$. We say that the set $\mc D$ is ``characteristic hypersurface data'' (CHD) provided that \begin{enumerate} \item $\rad(\bg)\neq\{0\}$ and $\bg$ is semi-positive definite. \item There exists $\ul u\in\mc{F}(\mc H)$ satisfying $\lambda\d n(\ul u)\neq 0$. Such functions are called ``foliation functions'' (FF). \item The leaves $\mc S_{\ul u} \d \left\{p\in\mc H: \ \ul u(p)=\ul u\right\}$ are all diffeomorphic. \end{enumerate} \end{defi} \begin{rmk} All the results of Sections \ref{section_CHD}-\ref{sec_HG} are insensitive to the signature of $\bg$ provided $\rad(\bg)\neq\{0\}$. The condition that $\bg$ is semi-positive definite is only needed in Sec. \ref{sec_CP}. \end{rmk} Let $\mc S$ be the underlying topological space of each $\mc S_{\ul u}$. Then the topology of $\mc H$ is fixed to be a product of the form $\mc H\simeq \mc I\times\mc S$, where $\mc I\subset \real$ is an interval, and $\{\mc S_{\ul u}\}$ defines a foliation on $\mc H$. In this paper we will always assume that $\mc S$ is orientable, and thus $\mc H$. With a foliation on $\mc H$, we can define the following decomposition of the tangent space. Let $p\in\mc H$ and $X\in T_p\mc H$. We say that $X$ is tangent to the leaf $\mc S_{\ul u(p)}$ at the point $p$ provided that $X(\ul u)|_p = 0$. The set of all these vectors defines $T_p\mc S_{\ul u(p)}$. Hence, at every point $p\in\mc H$, the tangent space $T_p\mc H$ decomposes as \begin{equation} \label{decomposition} T_p\mc H = T_p\mc S_{\ul u(p)}\oplus \langle n|_p\rangle. \end{equation} This decomposition induces another on the cotangent space, $T_p^{\star}\mc H = T_p^{\star}\mc S_{\ul u(p)}\oplus \langle \dd\ul u|_p\rangle$. Now let $T\mc S_{\ul u}=\bigcup_{q\in\mc S_{\ul u}} T_q\mc S_{\ul u}$ and $T\mc S = \bigcup_{\ul u\in\mc I} T\mc S_{\ul u}$. Then, the tangent bundle $T\mc H$ is the direct sum $T\mc H= T\mc S\oplus\langle n\rangle$, and therefore every vector field $X\in\Gamma(T\mc H)$ can be written in a unique way as $X = X_{\para}+ X_{n} n$, with $X_{\para}\in \Gamma(T\mc S)$ and $X_{n}\in\mc F(\mc H)$. A vector field $X\in\Gamma(T\mc H)$ is said to be tangent to the foliation $\{\mc S_{\ul u}\}$ provided that $X_n$ vanishes on $\mc H$. Analogously we have the cotangent bundle decomposition \begin{equation} \label{decocotang} T^{\star}\mc H=T^{\star}\mc S\oplus \langle\dd \ul u\rangle \end{equation} and we can talk about 1-forms tangent to the foliation when they belong to $T^{\star}\mc S$. Finally one can generalize all this to any tensor field. In this paper we will abuse the notation and denote with $T_p\mc S_{\ul u}$ (resp. $T_p^{\star}\mc S_{\ul u}$) both the tangent (resp. cotangent) space of the manifold $\mc S_{\ul u}$ and the subset of $T_p\mc H$ (resp. $T_p^{\star}\mc H$) as defined above. The precise meaning will be clear from the context. Let $\mc D$ be CHD endowed with a foliation function $\ul u$ and consider the unique vector field $N\in\rad(\bg)$ such that $N(\ul u)=1$\footnote{$N$ and $n$ are proportional to each other, and related by $n=n(\ul u)N=\lambda N$.}. Given $v\in \Gamma(T\mc S_{\ul u_0})$ for some $\ul u_0\in\mc I$, we can define a unique vector field $X$ on $\mc H$ by integrating the equation $\lie_N X=0$ with the initial conditions $X_{\para}|_{\mc S_{\ul u_0}} = v$ and $X_n|_{\mc S_{\ul u_0}} =0$. This field is tangent to the foliation because $X(\ul u)=0$ on $\mc S_{\ul u_0}$ and $\lie_N X = 0$ implies $N\left(X(\ul u)\right) = X\left(N(\ul u)\right)=0$, so $X(\ul u)$ is constant along $N$. By virtue of decomposition \eqref{decomposition}, a torsion-free connection $\ol\nabla^{\mc S}$ on the leaves $\{\mc S_{\ul u}\}$ can be defined by \begin{equation} \label{decompnabla} \ol\nabla_X Z = \ol\nabla_X^{\mc S} Z - \bQ(X,Z)n, \hspace{1cm} X,Z\in \Gamma(T\mc S_{\ul u}), \end{equation} where $\ol\nabla_X^{\mc S} Z\in\Gamma(T\mc S_{\ul u})$ and $\bQ$ is a symmetric\footnote{If $X,Z\in \Gamma(T\mc S_{\ul u})$, then $X(\ul u)=Z(\ul u)=0$ and therefore $[X,Z](\ul u)=0$, so $[X,Z]\in \Gamma(T\mc S_{\ul u})$. Since $\ol\nabla$ is torsion-free, $\ol\nabla_X Z -\ol\nabla_Z X =[X,Z]$ is tangent to the foliation. This proves both that $\ol\nabla^{\mc S}$ is torsion-free and that $\bQ$ is symmetric.}, two-covariant tensor field on $\mc S_{\ul u}$. In order to identify this tensor, we recall that $\lambda= n(\ul u)\neq 0$ and $\dd\ul u(\ol\nabla_X^{\mc S} Z)=0$, so \begin{equation} \label{Q1} \lambda \bQ(X,Z) = -\dd\ul u \left(\ol\nabla_X Z\right) = -\ol\nabla_X\left(\dd\ul u(Z)\right)+\left(\ol\nabla_X\dd\ul u\right)(Z) = \left(\ol\nabla_X\dd\ul u\right)(Z), \end{equation} where in the last equality we used that $\dd\ul u(Z)=Z(\ul u)=0$. Let $\phi_{\ul u}:\mc S_{\ul u}\hookrightarrow\mc H$ be an embedding. Defining the one-form $\bm\ell_{\para} \d \phi_{\ul u}^{\star}\bm\ell\in T^{\star}\mc S_{\ul u}$ we can decompose \begin{equation} \label{elldu} \dd\ul u = \lambda \left(\bm\ell-\bm\ell_{\para}\right), \end{equation} which follows at once by applying both sides to tangential vectors and to $n$. Here we are abusing the notation by identifying $\bm\ell_{\para}$ with the one-form on $\mc H$ that coincides with $\bm\ell_{\para}$ acting over tangent vectors and vanishes when it acts on $n$. Then, \begin{align*} \left(\ol\nabla_X\dd\ul u\right)(Z)& = \lambda \left(\ol\nabla_X\bm\ell\right)(Z) - \lambda\left(\ol\nabla_X\bm\ell_{\para}\right)(Z)\\ & = \lambda \left(\ol\nabla_X\bm\ell\right)(Z) - \lambda X\left(\bm\ell_{\para}(Z)\right) + \lambda \bm\ell_{\para}\big(\ol\nabla_X^{\mc S} Z - \bQ(X,Z)n\big)\\ &=\lambda \left(\ol\nabla_X\bm\ell\right)(Z) - \lambda\big(\ol\nabla^{\mc S}_X\bm\ell_{\para}\big)(Z), \end{align*} since $\bm\ell_{\para}(n)=0$. Inserting this together with \eqref{olnablaell} into \eqref{Q1}, it follows \begin{equation} \label{tensorQ} \bQ(X,Z) = \bY(X,Z) + \bF(X,Z) - \ell^{(2)} \bK(X,Z) - \big(\ol\nabla^{\mc S}_X\bm\ell_{\para}\big)(Z). \end{equation} Let $\mc D$ be CHD and $p\in \mc H$. Since $\rad(\bg)|_p=\langle n|_p\rangle$ the tensor $h\d\phi_{\ul u}^{\star}\bg$ is a metric on $\mc S_{\ul u}$. It is convenient to have an explicit relation between its Levi--Civita connection, $\nabla^h$, and the induced connection $\ol\nabla^{\mc S}$. First we prove some intermediate results. \begin{prop} \label{nablah} Let $h=\phi_{\ul u}^{\star}\bg$ be the induced metric on $\mc S_{\ul u}$ and ${\bm\chi}\d \phi_{\ul u}^{\star} \bK$. Then, \begin{equation} \label{eqnablah} \ol\nabla^{\mc S}_X h = -2 \bm\ell_{\para}\otimes_s {\bm\chi}(X,\cdot) \hspace{0.5cm} \forall X\in\Gamma(T\mc S). \end{equation} \begin{proof} Let $V,X,Z\in \Gamma(TS_{\ul u})$. Then, \begin{align*} \big(\ol\nabla^{\mc S}_X h\big) (V,Z)&= \ol\nabla^{\mc S}_X \left(h(V,Z)\right) - h (\ol\nabla^{\mc S}_X V,Z)-h(V, \ol\nabla^{\mc S}_X Z)\\ &=\ol\nabla_X \left(\bg(V,Z)\right) - \bg(\ol\nabla_X V,Z)-\bg(V, \ol\nabla_X Z)\\ &=\phi_{\ul u}^{\star}\left(\ol\nabla_X\bg\right)(V,Z). \end{align*} which, upon inserting \eqref{olnablagamma}, gives \eqref{eqnablah}. \end{proof} \end{prop} In order to obtain a general relation between $\ol\nabla^{\mc S}$ and $\nabla^h$ we first determine how $\ol\nabla^{\mc S}$ transforms under a gauge transformation. \begin{prop} \label{gaugeconection} Let $\mc D=\{\mc H,\bg,\bm\ell,\ell^{(2)},\bY\}$ be CHD and $(z,\zeta)\in\mc F^{\star}(\mc H)\times\Gamma(T\mc H)$. Then, \begin{equation*} \mc{G}_{(z,\zeta)}(\ol\nabla^{\mc S}) = \ol\nabla^{\mc S} + \zeta_{\para}\otimes {\bm\chi}. \end{equation*} \begin{proof} Let $\ol\nabla'=\mc G_{(z,\zeta)}\ol\nabla$. Since by \eqref{gaugeconnection} $$\ol\nabla'_X Z = \ol\nabla_X Z +\bK(X,Z)\zeta = \ol\nabla_X^{\mc S} Z + \bK (X,Z)\zeta_{\para} + \left(\bK(X,Z)\zeta_n-\bQ(X,Z)\right)n$$ for every $X,Z\in\Gamma(T\mc S)$, the result follows. \end{proof} \end{prop} Now we can find the relation between $\nabla^{h}$ and the induced connection $\ol\nabla^{\mc S}$ by making use of this gauge transformation. \begin{prop} \label{nablaSandnablah} Let $h=\phi^{\star}_{\ul u}\bg$ be the induced metric on $\mc S_{\ul u}$, $\nabla^{h}$ its Levi--Civita connection and $\ol\nabla^{\mc S}$ the induced one. Then, $$\ol\nabla^{\mc S} = \nabla^{h}+\ell^{\sharp}\otimes {\bm\chi},$$ where $\ell^{\sharp}\d h^{\sharp}(\bm\ell_{\para},\cdot)$ and $h^{\sharp}$ is the inverse metric of $h$. \begin{proof} Since $\ol\nabla^{\mc S}$ is torsion-free, by Proposition \ref{nablah} the connection $\ol\nabla^{\mc S}$ coincides with $\nabla^{h}$ when $\bm\ell_{\para}=0$. Given CHD $\mc D=\{\mc H,\bg,\bm\ell,\ell^{(2)},\bY\}$, the transformed data $\mc D'=\mc G_{(1,-\ell^{\sharp})}\mc D$ satisfies $\bm\ell_{\para}'=0$, so $$\ol\nabla^{\mc S} = \mc G_{(1,\ell^{\sharp})}\mc G_{(1,-\ell^{\sharp})}(\ol\nabla^{\mc S}) = \mc G_{(1,\ell^{\sharp})}(\nabla^{h}) = \nabla^{h}+ \ell^{\sharp}\otimes {\bm\chi},$$ where we have used that $(1,-\ell^{\sharp})=(1,\ell^{\sharp})^{-1}$ (see \eqref{gaugelaw}) and Proposition \ref{gaugeconection}. \end{proof} \end{prop} In the following sections we will need the transformation law of the curvature tensor $\ol R$ of the connection $\ol\nabla$ under a gauge transformation. This can be computed from the transformation law of Proposition \ref{gaugeconection}. \begin{prop} \label{curvatura} Let $\mc D=\{\mc H,\bg,\bm\ell,\ell^{(2)},\bY\}$ be hypersurface data and $(z,\zeta)\in\mc F^{\star}(\mc H)\times\Gamma(T\mc H)$. Let $\ol R$ be the curvature tensor of $\ol\nabla$. Then, $$\mc G_{(z,\zeta)}\left(\ol R^{f}{}_{bcd}\right) = \ol R^f{}_{bcd}+2\ol\nabla_{[c}\left(\zeta^f \bK_{d]b}\right)+2\zeta^f\zeta^g \bK_{g[c}\bK_{d]b}.$$ \begin{proof} Let ${}^{(1)}\nabla$ and ${}^{(2)}\nabla$ be two connections and let $S\d {}^{(2)}\nabla-{}^{(1)}\nabla$. Then the curvatures of both connections are related by (see e.g. \cite{Wald}) $${}^{(2)}R^f{}_{bcd} = {}^{(1)}R^f{}_{bcd} + 2{}^{(1)}\nabla_{[c}S^f{}_{d]b}+2S^f{}_{d[c}S^d{}_{d]b}.$$ Particularizing to ${}^{(2)}\nabla=\mc G_{(z,\zeta)}\left(\ol\nabla\right)$, ${}^{(1)}\nabla = \ol\nabla$ and $S=\zeta\otimes \bK$ (see Proposition \ref{gaugeconection}), the result follows at once. \end{proof} \end{prop} From equation \eqref{olnablan} with $n^{(2)}=0$ it follows \begin{equation} \label{normalndeco} \ol\nabla_X n = \chi^{\sharp}(X) - \left(\bPi(X,n)+{\bm\chi}(X,\ell^{\sharp})\right)n, \quad X\in\Gamma(T\mc S), \end{equation} where $\chi^{\sharp}$ is the endomorphism defined by $h\big(\chi^{\sharp}(X),\cdot) = {\bm\chi}(X,\cdot)$ and we introduce the tensor $\bPi\d \bY+\bF$ because it will arise frequently below. An interesting property of $\bPi$ is \begin{equation} \label{Pipi} \bPi(n,X) -\bPi(X,n) = \left(\lie_n\bm\ell\right)(X)=\left(\lie_n\bm\ell_{\para}\right)(X)+\ol\nabla_X^{\mc S}\log\left|\lambda\right|. \end{equation} The first equality follows from the Cartan formula $\lie_n \bm\ell = \dd(\bm\ell(n)) + \dd\bm\ell (n,\cdot)=\dd\bm\ell (n,\cdot)$, \begin{equation} \label{PiLienell} \bPi(n,X) -\bPi(X,n) = 2 \bF(n,X) = \dd\bm\ell(n,X) =\left(\lie_n\bm\ell\right)(X), \end{equation} and the second follows from $$\dd\bm\ell(n,X) = \lambda^{-1}\dd\lambda(X) + \dd\bm\ell_{\para}(n,X) = X\left(\log\left|\lambda\right|\right) + \left(\lie_n\bm\ell_{\para}\right)(X),$$ where we have used the differential of $\bm\ell = \lambda^{-1}\dd\ul u + \bm\ell_{\para}$ and that $\left(\dd\lambda\wedge\dd\ul u\right)(n,X)= -\lambda\dd\lambda(X),$ as well as the Cartan formula applied to $\bm{\ell}_{\para}$.\\ Next we compute the gauge transformation of $\bPi(\cdot,n)$. From \eqref{olnablaell} and the property $\bK(\cdot,n)=0$ we have the equality $\bPi(\cdot,n)=\left(\ol\nabla\bm\ell\right)(n)$. Using the transformation law of $\ol\nabla$ in \eqref{gaugeconnection}, \begin{align*} \bPi'(\cdot,n') = \left(\ol\nabla'\bm\ell'\right)(n')= \left(\ol\nabla\bm\ell'\right)(n')=\left(\ol\nabla\bm\ell\right)(n) + z^{-1} dz + \left(\ol\nabla\bg(\zeta,\cdot)\right)(n). \end{align*} The last term is $\left(\ol\nabla\bg(\zeta,\cdot)\right)(n) = -\bK(\cdot,\zeta)$ because of \eqref{olnablagamma} and recalling $\bg(n,\cdot)=0$ and $\bm\ell(n)=1$. So finally \begin{equation} \label{transPiXn} \bPi'(\cdot,n') = \bPi(\cdot,n) - \bK(\cdot,\zeta) + d\log|z|. \end{equation} We now can relate the curvature tensor of $\ol\nabla^{\mc S}$ with that of $\ol\nabla$. \begin{prop}[Gauss identity] \label{gauss} Let $X,Z,V,W\in\Gamma(T\mc S_{\ul u})$. Then, \begin{equation} \label{gausseq} \bg( V, \ol R(X,W)Z) = h(V,R^{\mc S}(X,W)Z)-\bQ(W,Z){\bm\chi}(X,V)+\bQ(X,Z){\bm\chi}(W,V), \end{equation} where $R^{\mc S}$ is the curvature of $\ol\nabla^{\mc S}$ and $\bQ$ is given by \eqref{tensorQ}. \begin{proof} By definition of the curvature tensor and decompositions \eqref{decompnabla} and \eqref{normalndeco}, \begin{align*} \ol R(X,W)Z & = \ol\nabla_X\ol\nabla_W Z -\ol\nabla_W\ol\nabla_X Z-\ol\nabla_{[X,W]} Z\\ &= \ol\nabla_X\big(\ol\nabla^{\mc S}_W Z-\bQ(W,Z)n\big)-\ol\nabla_W\big(\ol\nabla^{\mc S}_X Z-\bQ(X,Z)n\big) - \ol\nabla^{\mc S}_{[X,W]}Z+\bQ\left([X,W],Z\right)n\\ &=\ol\nabla_X^{\mc S}\ol\nabla_W^{\mc S} Z - \bQ\big(X,\ol\nabla_W^{\mc S} Z\big)n-\ol\nabla^{\mc S}_X\left(\bQ(W,Z)\right) n - \bQ(W,Z)\big(\ol\nabla^{\bot}_X n+\chi^{\sharp}(X)\big) \\ &\quad - \left( X\leftrightarrow W\right) - \ol\nabla^{\mc S}_{[X,W]}Z+\bQ\left([X,W],Z\right)n, \end{align*} which, after identifying $R^{\mc S}(X,W)Z =\ol\nabla_X^{\mc S}\ol\nabla_W^{\mc S} Z -\ol\nabla_W^{\mc S}\ol\nabla_X^{\mc S} Z- \ol\nabla^{\mc S}_{[X,W]}Z$, and using that $\ol\nabla^{\mc S}$ is torsion-free, $[X,W]=\ol\nabla^{\mc S}_X W - \ol\nabla^{\mc S}_W X$, simplifies to \begin{align*} \ol R(X,W)Z & = R^{\mc S}(X,W)Z -\chi^{\sharp}(X)\bQ(W,Z) +\chi^{\sharp}(W)\bQ(X,Z) \\ &\quad +\big(\big(\ol\nabla^{\mc S}_W \bQ\big)(X,Z)-\big(\ol\nabla^{\mc S}_X \bQ\big)(W,Z)\big)n +\bQ(X,Z)\ol\nabla^{\bot}_W n-\bQ(W,Z)\ol\nabla^{\bot}_X n. \end{align*} The $\bg$-product of this expression with $V$ gives \eqref{gausseq} after taking into account $\bg(n,\cdot)=0$ and the definition of $\chi^{\sharp}$. \end{proof} \end{prop} To conclude this section we define a connection induced from $\ol\nabla$. Let $X$ be a vector field tangent to the foliation. Then one can decompose $\ol\nabla_n X$ as \begin{equation} \label{nablan} \ol\nabla_n X = \wt\nabla_n X +{\bm{\eta}}(X)n, \end{equation} where $\wt\nabla_n X$ is tangent to the foliation and, as we show next, ${\bm{\eta}}\in\Gamma(T^{\star}\mc S_{\ul u})$ given by \begin{equation} \label{Psin} \begin{aligned} {\bm{\eta}}(X)&=-\bPi(X,n)-{\bm\chi}(X,\ell^{\sharp})-X\left(\log\left|\lambda\right|\right) \\ &= \left(\lie_n\bm\ell_{\para}\right)(X)-\bPi(n,X)-{\bm\chi}(X,\ell^{\sharp}). \end{aligned} \end{equation} Indeed, $\lambda{\bm{\eta}}(X) = \dd\ul u\left(\ol\nabla_n X\right)=\ol\nabla_n\left(\dd\ul u(X)\right) - \left(\ol\nabla_n\dd\ul u\right)(X)$, and since $X(\ul u)=0$ and the Hessian of a function is symmetric, it follows $\lambda{\bm{\eta}}(X) = -\left(\ol\nabla_X \dd\ul u\right)(n) = \dd\ul u\left(\ol\nabla_X n\right)-\ol\nabla_X\left(\dd\ul u(n)\right)$. Then, making use of \eqref{normalndeco} and $\dd\ul u(n)=\lambda\neq 0$, the first equality in \eqref{Psin} follows. The second is a direct consequence of \eqref{Pipi}. The connection $\wt\nabla_n$ extends to a covariant derivative acting on general tensors tangent to the foliation. For example, if $\bm{\alpha}\in\Gamma(T^{\star}\mc S)$, we can define \begin{equation} \label{nablatildeeta} \big(\wt{\nabla}_n\bm{\alpha}\big)(X) \d n\left(\bm{\alpha}(X)\right)-\bm{\alpha}\big(\wt\nabla_n X\big), \end{equation} and analogously for any other tensor field tangent to the foliation. A key property of the connection $\wt\nabla_n$ is that it is independent of the tensor $\bY$. Indeed, taking into account equations \eqref{normalndeco}, \eqref{nablan} and \eqref{Pipi} and recalling that $\ol\nabla$ is torsion-free, $$[X,n] = \ol\nabla_X n-\ol\nabla_n X = \chi^{\sharp}(X)-\wt\nabla_n X +X(\log|\lambda|)n,$$ from where it yields \begin{equation} \label{indpendentY} \wt\nabla_n X = \lie_n X +\chi^{\sharp}(X)+X(\log|\lambda|)n, \end{equation} which does not involve the tensor $\bY$. \section{Covariance of the constraint tensors} \label{section_gauge} Let $\mc D$ be embedded null hypersurface data on an ambient manifold $(\mc M,g)$ with rigging $\xi$. It is possible to write all the tangent components of the Ricci tensor of $g$ on the hypersurface $\mc H$ in terms of the data. In this section we obtain those expressions and promote them to abstract tensors without mention of any ambient space. By construction these tensors will coincide with the tangent components of the Ricci when the data is embedded.\\ Assume that the data is embedded on an ambient manifold whose Riemann tensor is $R$. By Corollary 4 of \cite{Marc1}, \begin{align} R_{\alpha\beta\mu\nu}\xi^{\alpha}e^{\beta}_be^{\mu}_ce^{\nu}_d&\st{\mc H}{=} \bm\ell_a\ol{R}^a{}_{bcd}+2\ell^{(2)}\ol{\nabla}_{[d} \bK_{c]b} +\bK_{b[c}\ol\nabla_{d]} \ell^{(2)}, \label{proj1}\\ R_{\alpha\beta\mu\nu} e^{\alpha}_ae^{\beta}_be^{\mu}_ce^{\nu}_d&\st{\mc H}{=} \bg_{af}\ol{R}^f{}_{bcd}+2\ol\nabla_{[d}\left(\bK_{c]b}\bm\ell_a\right)+2\ell^{(2)}\bK_{b[c}\bK_{d]a},\label{proj2} \end{align} where $\{e_a\}$ is a (local) frame on $\mc H$ and the Greek letters denote ambient indices. We promote the right-hand side into new tensors on $\mc H$ and define $A$ and $B$ on any hypersurface data by means of \begin{align} A_{bcd}&\d\bm\ell_a\ol{R}^a{}_{bcd}+2\ell^{(2)}\ol{\nabla}_{[d} \bK_{c]b} +\bK_{b[c}\ol\nabla_{d]} \ell^{(2)},\label{A} \\ B_{abcd}&\d\bg_{af}\ol{R}^f{}_{bcd}+2\ol\nabla_{[d}\left(\bK_{c]b}\bm\ell_a\right)+2\ell^{(2)}\bK_{b[c}\bK_{d]a},\label{B} \end{align} so that for embedded data it holds $R_{\alpha\beta\mu\nu}\xi^{\alpha}e^{\beta}_be^{\mu}_ce^{\nu}_d \st{\mc H}{=} A_{bcd}$ and $R_{\alpha\beta\mu\nu} e^{\alpha}_ae^{\beta}_be^{\mu}_ce^{\nu}_d \st{\mc H}{=} B_{abcd}$. In Appendix \ref{appendix} the gauge behaviour and the symmetries of the tensors $A$ and $B$ are studied. From \eqref{Asharp} and $n^{(2)}=0$, the inverse metric $g^{\alpha\beta}$ on $\mc H$ is given by \begin{equation} \label{inverse} g^{\alpha\beta}\st{\mc H}{=}P^{cd} e_c^{\alpha} e_d^{\beta} + n^c (e_c^{\alpha}\xi^{\beta}+\xi^{\alpha} e_c^{\beta}), \end{equation} and thus the pullback to $\mc H$ of the ambient Ricci tensor can be written as \begin{align*} g^{\alpha\beta} R_{\alpha \mu \beta \nu} e_a^{\mu} e_b^{\nu}&\st{\mc H}{=}\left[P^{cd} e_c^{\alpha} e_d^{\beta} + n^c (e_c^{\alpha}\xi^{\beta}+\xi^{\alpha} e_c^{\beta})\right] R_{\alpha \mu \beta \nu} e_a^{\mu} e_b^{\nu}\\ &\st{\mc H}{=} B_{cadb}P^{cd}+A_{bca}n^c + A_{acb}n^c\\ &\st{\mc H}{=} B_{acbd}P^{cd}- (A_{bac}+A_{abc})n^c, \end{align*} where in the last equality we used the symmetries of $B$ in Prop. \ref{symmetries}. This leads naturally to the introduction of the abstract tensor \begin{equation} \label{ricci} {\bm{\mc R}}_{ab}\d B_{acbd}P^{cd}- (A_{bac}+A_{abc})n^c \end{equation} in the context of null hypersurface data. Inserting \eqref{A} and \eqref{B}, \begin{align} {\bm{\mc R}}_{ab}&= \left(\bg_{af}\ol{R}^f{}_{cbd} + 2\ol\nabla_{[d}\left(\bK_{b]c}\bm\ell_a\right)+2\ell^{(2)} \bK_{c[b}\bK_{d]a}\right)P^{cd}\label{ricci2}\\ &\quad\, - \left(\bm\ell_d\ol{R}^d{}_{bac} + 2\ell^{(2)}\ol\nabla_{[c}\bK_{a]b} + \bK_{b[a}\ol\nabla_{c]}\ell^{(2)} + \bm\ell_d\ol{R}^d{}_{abc} + 2\ell^{(2)}\ol\nabla_{[c}\bK_{b]a} + \bK_{a[b}\ol\nabla_{c]}\ell^{(2)}\right)n^c.\nonumber \end{align} By construction, when the data is embedded ${\bm{\mc R}}_{ab}$ coincides with the pull-back of the ambient Ricci tensor into the hypersurface. As expected from the embedded case, ${\bm{\mc R}}_{ab}$ is symmetric. Indeed, using that $P$ is symmetric and item 5. of Prop. \ref{symmetries} one gets \begin{align*} {\bm{\mc R}}_{ab} & = \dfrac{1}{2}\left(B_{acbd}+ B_{adbc}\right)P^{cd} -\left(A_{bac}+A_{abc}\right)n^c\\ &= \dfrac{1}{2}\left(B_{bdac}+ B_{bcad}\right)P^{cd} - \left(A_{bac}+A_{abc}\right)n^c\\ &={\bm{\mc R}}_{ba}. \end{align*} For future convenience we define the following two contractions of ${\bm{\mc R}}_{ab}$ \begin{align} H&\d -\dfrac{1}{2}P^{ab}{\bm{\mc R}}_{ab}=-\dfrac{1}{2} B_{cadb} P^{cd} P^{ab}+A_{bac}P^{ab}n^c,\label{hamil2}\\ {\bm J}_b &\d {\bm{\mc R}}_{ab}n^a=B_{acbd} n^a P^{cd}-A_{abc} n^c n^a .\label{momentum2} \end{align} For obvious reasons we will refer to Definitions \eqref{ricci}, \eqref{hamil2} and \eqref{momentum2} as \textbf{constraint tensors}. In the following Proposition we introduce a special class of gauges that will be used frequently below. \begin{prop} \label{propcaracteristico} Let $\mc D$ be CHD and $\ul u$ a FF. Then there exists $(z,\zeta)\in\mc F^{\star}(\mc H)\times \Gamma(T\mc H)$ such that $\mc D'=\mc G_{(z,\zeta)}\mc D$ satisfies the following properties \begin{enumerate} \item $\bm\ell'(X)=0$ for all $X\in\Gamma(T\mc S)$, \item $\ell'{}^{(2)}=0$. \end{enumerate} The gauge transformations respecting 1. and 2. are $\mc{G}_{(z,0)}$ for arbitrary $z\in\mc{F}^{\star}(\mc H)$. Moreover, there exists a unique pair $(z,\zeta)\in\mc F^{\star}(\mc H)\times \Gamma(T\mc H)$ such that, in addition, \begin{enumerate} \item[3.] $\lambda'\d n'(\ul u)=1$. \end{enumerate} \begin{proof} Directly from the transformations in Def. \ref{defi_gauge} and the decomposition $\zeta = \zeta_{\para} + \zeta_n n$, \begin{align} \bm\ell(X)+h(\zeta_{\para},X)&=0,\label{xinormal2}\\ \ell^{(2)}+2\bm\ell(\zeta_{\para})+2\zeta_n+h(\zeta_{\para},\zeta_{\para})&=0.\label{xinull2} \end{align} Equation \eqref{xinormal2} admits a unique solution for $\zeta_{\para}$, which substituted in the equation \eqref{xinull2} fixes completely $\zeta_n$. Therefore, there always exist a gauge satisfying the conditions (1) and (2). Any transformation of the form $(z,0)$ keeps these two conditions invariant and by the uniqueness of $\zeta$ it is clear that no other transformation does. To fulfill (3) simply apply an additional gauge transformation with $(z=\lambda,\zeta=0)$, and use \eqref{transn}. Uniqueness of the gauge satisfying (1), (2), (3) is immediate from the argument. \end{proof} \end{prop} \begin{defi} \label{def_ch_gauge} A gauge in which (1) and (2) hold is called \textbf{characteristic gauge} (CG). The gauge satisfying (1), (2), (3) is called \textbf{adapted characteristic gauge} (ACG). We emphasize that the ACG is \textit{unique} once the FF $\ul u$ is chosen. A change in $\ul u$ affects the corresponding ACG gauge. \end{defi} \begin{rmk} \label{rmkCG} When the data is embedded on an ambient manifold $(\mc M,g)$ with rigging $\xi$, the abstract conditions (1) and (2) are equivalent to $\xi$ being orthogonal to the leaves and null, respectively. \end{rmk} The proof of Proposition \ref{propcaracteristico} has the following immediate Corollary. \begin{cor} \label{CGcorollary} Let $\mc D$ be CHD and $\mc S\subset\mc H$ any section. Let $f\in\mc F(\mc S)$ and $\bm\alpha\in \Gamma(T^{\star}\mc S)$. Then there always exist a gauge in which $$\bm\ell_{\para}|_{\mc S}=\bm\alpha, \hspace{1cm} \ell^{(2)}|_{\mc S}=f.$$ Moreover, the freedom of this gauge is parametrized by the pair $(z,\zeta)$ satisfying $\zeta|_{\mc S}=0$. \end{cor} \begin{obs} \label{observación} By Proposition \ref{nablah}, in a CG the induced connection on the foliation, $\ol\nabla^{\mc S}$, coincides with the Levi--Civita connection, $\nabla^h$. Moreover $\bm\ell_{\para}=0$ and hence $\dd \ul u=\lambda\bm\ell$ by \eqref{elldu}. Then, $\dd\lambda\wedge\bm\ell+\lambda\dd\bm\ell=0$, so $\bF=-\frac{1}{2\lambda}\dd\lambda\wedge\bm\ell$ and hence $\bPi_{AB}=\bY_{AB}$, where here capital Latin letters denote abstract indices on the foliation. Moreover, from equation \eqref{tensorQ}, $\bQ_{AB}=\bY_{AB}$ in a CG and therefore the Gauss identity \eqref{gausseq} takes the form \begin{equation} \label{gaussfacil} \bg( V, \ol R(X,W)Z) = h(V,R^{h}(X,W)Z)-\bY(W,Z){\bm\chi}(X,V)+\bY(X,Z){\bm\chi}(W,V). \end{equation} \end{obs} Next we prove that the constraint tensors are gauge-covariant. This is the expected behaviour if one thinks the data as embedded. However, we prove this statement in full generality without assuming the existence of any ambient spacetime. \begin{teo} \label{teo_gauge} Let $\mc D=\{\mc H,\bg,\bm\ell,\ell^{(2)},\bY\}$ be CHD and $(z,\zeta)\in\mc F^{\star}(\mc H)\times\Gamma(T\mc H)$. Then given a gauge transformation $\mc D' = \mc G_{(z,\zeta)}\mc D$, the tensors $H$, ${\bm J}$ and ${\bm{\mc R}}_{ab}$ transform as: \begin{enumerate} \item $H' = H + \zeta^a{\bm J}_a$, \item ${\bm J}'_a = z^{-1} {\bm J}_a$, \item ${\bm{\mc R}}_{ab}'={\bm{\mc R}}_{ab}$. \end{enumerate} \begin{proof} Suppose that (3) is true. Then, \eqref{transn} and \eqref{gaugeP} imply \begin{align*} {\bm J}'_a &= {\bm{\mc R}}_{ab}' n'{}^b = z^{-1} {\bm{\mc R}}_{ab} n^b = z^{-1} {\bm J}_a,\\ H' &= -\dfrac{1}{2}P'{}^{ab} {\bm{\mc R}}'_{ab} = -\dfrac{1}{2} P^{ab} {\bm{\mc R}}_{ab} + {\bm{\mc R}}_{ab}\zeta^an^b = H + {\bm J}_a\zeta^a, \end{align*} where in the equation for $H'$ we use the symmetry of ${\bm{\mc R}}_{ab}$. So it suffices to show (3). Using again \eqref{transn}-\eqref{gaugeP} as well as the transformation laws of $A$ and $B$ of Proposition \ref{symmetries}, \begin{align*} {\bm{\mc R}}'_{ab} & = B_{cadb}' P'{}^{cd}-n'{}^c (A_{bac}'+A_{abc}')\\ &={\bm{\mc R}}_{ab}-\zeta^c n^d B_{cadb}-\zeta^d n^c B_{cadb}-\zeta^d n^c B_{dbac}-n^c\zeta^d B_{dabc}\\ &={\bm{\mc R}}_{ab}, \end{align*} where the last equality follows from the symmetries of Proposition \ref{symmetries}. \end{proof} \end{teo} Once the constraint tensors have been defined, the next step is to write them in a frame adapted to the foliation. Let $\{e_A\}$ be a (local) basis of $T\mc S_{\ul u}$ with dual basis $\{\theta^A\}$, i.e., $\theta^A(e_B)=\delta^A_B$, where recall that capital Latin letters denote abstract indices on the foliation. Then $\{n,e_A\}$ can be considered as a (local) basis of $T\mc S_{\ul u}\oplus \langle n\rangle$ with dual basis $\{\bm q\d \lambda^{-1}\dd\ul u,\theta^A\}$, where as before $\lambda=n(\ul u)$ and $\bm{q}(n)=1$, $\bm{q}(e_A)=0$, $\theta^A(n)=0$. This decomposition motivates the following definitions \begin{equation} \label{constrainttensors} J(n)\d {\bm J}_a n^a,\hspace{1cm} {\bm J}_A\d {\bm J}_a e^a_A\hspace{1cm} \text{and} \hspace{1cm} {\bm{\mc R}}_{AB}\d {\bm{\mc R}}_{ab}e^a_A e^b_B. \end{equation} These objects are not all independent as shown next. \begin{prop} \label{constraint} Let $\mc D$ be CHD and let $H$, $J(n)$, ${\bm J}_A$ and ${\bm{\mc R}}_{AB}$ be the constraint tensors as before. Let $\ell^{\sharp}\d h^{\sharp}(\bm\ell_{\para},\cdot)$, $\ell^A \d (\ell^{\sharp})^A$ and $\ell_{\sharp}^{(2)}\d h_{AB}\ell^A\ell^B$. Then, the following identity holds \begin{equation} \label{RJH} h^{AB}{\bm{\mc R}}_{AB}-2{\bm J}(\ell^{\sharp})+\big(\ell_{\sharp}^{(2)}-\ell^{(2)}\big)J(n)+2H=0. \end{equation} \begin{proof} In the frame $\{n,e_A\}$ we can decompose \begin{align} \bm{\ell}&=\bm q+\bm\ell_A\theta^A,\\ P&=P^{AB}e_A\otimes e_B + 2P^{n\, A} n\otimes_s e_A + P^{n\, n}n\otimes n. \end{align} Since in this basis $n^A=0$ and $\bg_{n \, b}=0$ it follows from \eqref{Pgamma} that $P^{AB}\bg_{BC}=\delta^A_C$, and hence we can identify the components $P^{AB}$ with the inverse metric $h^{\sharp}$, that is, $P^{AB}=h^{AB}$. To compute $P^{n\,A}$ and $P^{n\, n}$ we first note that \eqref{Pell}, namely $P(\cdot,\bm\ell)=-\ell^{(2)}n$, gives $P(\theta^A,\bm\ell)=0$ and $P(\bm\ell,\bm\ell)=-\ell^{(2)}$ (by \eqref{ell(n)}). Hence, \begin{align*} P^{n\,A}&=P(\bm q,\theta^A) = P(\theta^A,\bm\ell-\bm\ell_B\theta^B) = -\bm\ell_BP^{AB}=-h^{AB}\bm\ell_B=- \ell^A,\\ P^{n\,n}&=P(\bm q,\bm q)=P(\bm\ell,\bm\ell)-2P(\bm\ell,\bm\ell_A\theta^A)+P(\bm\ell_A\theta^A,\bm\ell_B\theta^B) = \ell_{\sharp}^{(2)}-\ell^{(2)}. \end{align*} Inserting this decomposition into the definition of $H$ in \eqref{hamil2}, \begin{align*} -2H &= P^{AB}{\bm{\mc R}}_{AB} + 2P^{n\,A} {\bm{\mc R}}_{A\,n}+P^{n\,n}{\bm{\mc R}}_{n\,n}\\ &=h^{AB}{\bm{\mc R}}_{AB}-2{\bm J}(\ell^{\sharp})+(\ell_{\sharp}^{(2)}-\ell^{(2)})J(n), \end{align*} so \eqref{RJH} is established. \end{proof} \end{prop} A by-product of the proof is that in a general gauge the tensor $P$ decomposes as \begin{equation} \label{Pdecomposition} P = h^{AB} e_A\otimes e_B -2\ell^A n\otimes_s e_A -\big(\ell^{(2)}-\ell_{\sharp}^{(2)}\big) n\otimes n. \end{equation} In a CG, where $\ell^{(2)}=0$ and $\ell^{\sharp}=0$, this decomposition simplifies to \begin{equation} \label{PAB} P=h^{AB} e_A\otimes e_B, \end{equation} so the previous identity reduces to \begin{equation} \label{RJHCG} h^{AB}{\bm{\mc R}}_{AB}+2H=0. \end{equation} We conclude the section by computing two contractions of the curvature $\ol R$ with $\bm\ell$ and $n$ needed later. \begin{lema} \label{lema} Let $\mc D=\{\mc H,\bg,\bm\ell,\ell^{(2)},\bY\}$ be CHD. Then, \begin{align} \bm\ell_a \ol R^a{}_{bcd}&=2\ol\nabla_{[d} \bPi_{c]b}+2\bK_{b[d}\ol\nabla_{c]} \ell^{(2)}+2\ell^{(2)}\ol\nabla_{[c}\bK_{d]b}\label{lemalR}\\ n^c \bm\ell_a \ol R^a{}_{bcd} &= \ol\nabla_d\left(\bPi_{cb}n^c\right)-\ol\nabla_n \bPi_{db}-P^{ca}\bK_{ad}\bPi_{cb}+\bPi_{cb}\bPi_{da} n^c n^a+\bK_{db}\ol\nabla_n \ell^{(2)}\label{nlRgeneral}\\ &\quad +\ell^{(2)}\left(\ol\nabla_n \bK_{db}+P^{ca}\bK_{cb} \bK_{ad}\right).\nonumber \end{align} \begin{proof} The first equation follows from the Ricci identity and the fact that $\ol\nabla_a \bm\ell_b = \bPi_{ab}-\ell^{(2)} \bK_{ab}$ (see \eqref{olnablaell}), \begin{align*} \bm\ell_a \ol R^a{}_{bcd} & = \ol\nabla_d\ol\nabla_c \bm\ell_b-\ol\nabla_c\ol\nabla_d \bm\ell_b = 2\ol\nabla_{[d} \bPi_{c]b}+2\bK_{b[d}\ol\nabla_{c]} \ell^{(2)}+2\ell^{(2)}\ol\nabla_{[c}\bK_{d]b}. \end{align*} For the second, we ``integrate'' by parts equation \eqref{lemalR} and use $\bK(n,\cdot)=0$ to find $$n^c \bm\ell_a \ol R^a{}_{bcd} = \ol\nabla_d\left(\bPi_{cb}n^c\right) - \bPi_{cb}\ol\nabla_d n^c - \ol\nabla_n \bPi_{db}+\bK_{db}\ol\nabla_n \ell^{(2)}+\ell^{(2)}\left(\ol\nabla_n \bK_{db} + \bK_{cb}\ol\nabla_d n^c\right).$$ Inserting \eqref{olnablan} particularized to CHD, namely \begin{equation} \label{olnablan2} \ol\nabla_d n^c =P^{ca}\bK_{ad}-\bPi_{da}n^c n^a, \end{equation} equation \eqref{nlRgeneral} follows. \end{proof} \end{lema} Notice that in a CG equation \eqref{nlRgeneral} simplifies to \begin{equation} \label{nlR} n^c \bm\ell_a \ol R^a{}_{bcd}=\ol\nabla_d\left(\bPi_{cb}n^c\right)-\ol\nabla_n \bPi_{db}-P^{ca}\bK_{ad}\bPi_{cb}+\bPi_{cb}\bPi_{da} n^c n^a \end{equation} and the identity \eqref{lemalR} becomes $\bm\ell_a \ol R^a{}_{bcd}=2\ol\nabla_{[d} \bPi_{c]b}$. Moreover in the ACG, $\bm\ell_a \ol R^a{}_{bcd}=2\ol\nabla_{[d} \bY_{c]b}$ because $\bm\ell=\dd\ul u$ (cf. Remark \ref{observación} combined with $\lambda=1$) and hence $\bF=0$. \section{Constraint tensors on the foliation} \label{sec_fol} In this section we compute the constraint tensors $H$, $J(n)$, ${\bm J}_A$ and ${\bm{\mc R}}_{AB}$ in terms of the intrinsic geometry of the foliation. The first step is to define a set of tensors tangent to the foliation which capture all the information of the hypersurface data. Two such tensors (${\bm\chi}$ and ${\bm{\eta}}$) have already been introduced before. We recall their definition and introduce two additional ones. \begin{defi} \label{def_tensors} Let $\mc D$ be CHD and $\ul u$ a FF. We define the ``foliation tensors'' ${\bm\chi}$, ${\bm{\Upsilon}}$, ${\bm{\eta}}$ and $\omega$ on each leaf $\mc S_{\ul u}$ by \begin{equation*} {\bm\chi} \d \phi^{\star}_{\ul u} \bK, \qquad {\bm{\Upsilon}} \d \phi^{\star}_{\ul u} \bY, \qquad {\bm{\eta}} \d\lie_n\bm\ell_{\para}- \phi_{\ul u}^{\star}\left(\bPi(n,\cdot)\right)-{\bm\chi}(\ell^{\sharp},\cdot),\qquad \omega \d \dfrac{1}{2}\bY(n,n). \end{equation*} \end{defi} The motivation behind these definitions is the following. Suppose that a CHD $\mc D$ is embedded on an ambient manifold $(\mc M,g)$ with rigging $\xi$. Recall that $\bm\nu$ is the unique one-form normal to $f(\mc H)$ satisfying $\bm\nu(\xi)=1$. The vector $\nu\d g^{\sharp}(\bm\nu,\cdot)$ is the push-forward of $n$. From the definition of the tensor $\bK$ (see \eqref{defK}), it is clear that ${\bm\chi}$ is the null second fundamental form of the leaf $\mc S_{\ul u}$ w.r.t. $\nu$. For the interpretation of the remaining terms we restrict ourselves to the case when $\mc D$ is in a CG. Then the rigging vector is null and orthogonal to the leaves (see Remark \ref{rmkCG}), and hence from equation \eqref{Yembedded} we see that ${\bm{\Upsilon}}$ coincides with the null second fundamental form of $\mc S_{\ul u}$ w.r.t. $\xi$. Now let $X$ be a vector field tangent to the foliation. Taking into account decomposition \eqref{nablan}, \begin{equation} \label{etaambient} {\bm{\eta}}(X) = \bm\ell\left(\ol\nabla_n X\right) = g\left(\xi,\ol\nabla_n X\right) = g\left(\xi,\nabla_{\nu} X\right), \end{equation} where in the last equality we used \eqref{nablambient} and the fact that $\bK(n,\cdot)=0$. Thus, ${\bm{\eta}}$ coincides with the torsion one-form of $\mc S_{\ul u}$ with respect to the normal null basis $\{\nu,\xi\}$ (cf. \cite{It,Book,Luk}). Finally, from equation \eqref{olnablan2} it follows \begin{equation} \label{nablann=Y} \ol\nabla_n n = -\bY(n,n)n=-2\omega n. \end{equation} So \begin{equation} \label{omegambient} \omega =-\dfrac{1}{2} g(\ol\nabla_n n, \xi) = -\dfrac{1}{2} g(\nabla_{\nu} {\nu}, \xi) \end{equation} measures the deviation of ${\nu}$ from being affinely parametrized. Thus, the tensors ${\bm\chi}$, ${\bm{\Upsilon}}$, ${\bm{\eta}}$ and $\omega$ are a generalization of the coefficient components in the double null foliation (cf. \cite{It,Chris_and_Klain,Book,Luk}) and coincide with them when the data is embedded and written in a characteristic gauge. In terms of the hypersurface data, the tensor ${\bm{\Upsilon}}$ captures the information of the tangent components of $\bY$, the tensor ${\bm{\eta}}$ encodes the normal-tangent part, whereas $\omega$ carries the information of the normal-normal component, and hence it does not depend on the specific foliation. Once the motivation of the Definition \ref{def_tensors} is clear, we compute the gauge transformation laws of the foliation tensors. \begin{lema} \label{transformationtensors} Let $\mc D$ be CHD with FF $\ul u$, $(z,\zeta)\in\mc F^{\star}(\mc H)\times\Gamma(\mc H)$ and $\mc D' = \mc{G}_{(z,\zeta)}\mc D$. Then, \begin{align} {\bm\chi}' &= z^{-1}{\bm\chi},\label{trans_chi}\\ {\bm{\Upsilon}}' &= z{\bm{\Upsilon}} + \bm\ell_{\para}\otimes_{s} \phi_{\ul u}^{\star}(\dd z)+\dfrac{1}{2}\lie_{z\zeta_{\para}}h+\zeta_nz {\bm\chi},\label{transulchi}\\ {\bm{\eta}}' &={\bm{\eta}},\label{transeta}\\ \omega' &= z^{-1}\omega - \dfrac{1}{2}n\left(z^{-1}\right).\label{trans_omega} \end{align} Moreover, the connection $\wt\nabla$ is gauge invariant. \begin{proof} The first is a consequence of \eqref{Ktrans}. For the second, taking the pullback of \eqref{transY}, $${\bm{\Upsilon}}' = z{\bm{\Upsilon}} + \bm\ell_{\para}\otimes_{s} \phi_{\ul u}^{\star}(\dd z)+\dfrac{1}{2}\phi_{\ul u}^{\star}\left(\lie_{z\zeta}\bg\right).$$ Writing $\zeta=\zeta_{\para}+\zeta_n n$ and recalling that $\lie_{f n}\bg = f \lie_n\bg$ for any scalar $f$, the transformation law \eqref{transulchi} follows because $$\dfrac{1}{2}\phi_{\ul u}^{\star}\left(\lie_{z\zeta}\bg\right) = \dfrac{1}{2}\phi_{\ul u}^{\star}\big(\lie_{z\zeta_{\para}}\bg\big) + \dfrac{1}{2}\zeta_n z \phi_{\ul u}^{\star}\left(\lie_{n}\bg\right)=\dfrac{1}{2}\lie_{z\zeta_{\para}}h +\zeta_n z {\bm\chi},$$ where we have used the following standard property of the Lie derivative $\Phi^{\star}\left(\lie_{\Phi_{\star} V} T\right) = \lie_V \left(\Phi^{\star} T \right)$ valid for any injective map $\Phi$, vector $V$ and covariant tensor $T$. Next we prove the invariance of ${\bm{\eta}}$ under gauge transformations. Let $X$ be a vector tangent to the foliation. Firstly, from \eqref{transn}, Proposition \ref{gaugeconection} and the fact that $\bK(n,\cdot)=0$, $$\ol\nabla'_{n'} X = \ol\nabla_{n'} X = z^{-1}\ol\nabla_n X.$$ Secondly, equation \eqref{nablan} in the primed gauge reads \begin{align*} \ol\nabla'_{n'} X = \wt\nabla'_{n'} X + {\bm{\eta}}'(X) n' = z^{-1} \wt\nabla_{n} X + z^{-1} {\bm{\eta}}(X) n. \end{align*} Hence, we conclude that both $\wt\nabla$ and ${\bm{\eta}}$ are gauge invariant. Finally, the transformation of $\omega$ is immediate from \eqref{transY} and \eqref{transn}. \end{proof} \end{lema} When the transformation takes place between characteristic gauges, the transformation laws of ${\bm\chi}$, ${\bm{\Upsilon}}$, ${\bm{\eta}}$ and $\omega$ become particularly simple. \begin{cor} \label{transformationtensors_CG} Let $\mc D$ be CHD with FF $\ul u$ written in a CG, $z\in\mc F^{\star}(\mc H)$ and $\mc D' = \mc{G}_{(z,0)}\mc D$. Then, \begin{equation*} {\bm\chi}' = z^{-1}{\bm\chi}, \qquad {\bm{\Upsilon}}' = z{\bm{\Upsilon}}, \qquad {\bm{\eta}}' ={\bm{\eta}}, \qquad \omega' = z^{-1}\omega - \dfrac{1}{2}n\left(z^{-1}\right). \end{equation*} \end{cor} Our next aim is to write the constraint tensors $H$, $J(n)$, $\bm J_A$ and ${\bm{\mc R}}_{AB}$ in terms of the quantities we just introduced. Since the constraints are covariant we will compute them in a CG. In order to obtain them in any gauge it suffices to employ Theorem \ref{teo_gauge} and the transformation laws in Lemma \ref{transformationtensors}. Let us begin by writing the tensors $J(n)$ and ${\bm J}_A$. The computation requires several contractions of the tensors $A$ and $B$. The computation is somewhat long and has been postponed to Appendix \ref{appendixB} in order not to interrupt the presentation. Using identities \eqref{propJ1} and \eqref{propJ2} of Lemma \ref{LemaB}, expression \eqref{momentum2} becomes \begin{equation} \label{J(V)} \begin{aligned} {\bm J}(V)&=(\ol\nabla_V \bPi)(n,n)-(\ol\nabla_n \bPi)(V,n)-\bPi(V,n) \tr_P \bK\\ &\quad +\left(\bK*\bPi\right)(V,n)- \ol\nabla_V\tr_P \bK+\div_P(\bK)(V), \end{aligned} \end{equation} for an arbitrary vector field $V$, where $\tr_P \bK\d P^{ab}\bK_{ab}$, $\left(\bK*\bPi\right)_{ca} \d P^{bd} \bK_{bc}\bPi_{da}$ and $\div_P(\bK)(V)\d P^{ab}V^c\ol\nabla_a \bK_{bc}$, so in particular \begin{equation*} J(n)=-\bY(n,n) \tr_P \bK-\ol\nabla_n \tr_P \bK+\div_P(\bK)(n). \end{equation*} In order to write this in terms of the geometry of the foliation, we need to express $\tr_P \bK$ and $\div_P(\bK)(n)$ in terms of the foliation tensors. For the first one, recalling equation \eqref{PAB}, $\tr_P \bK = \tr_h {\bm\chi}$. For the second we use \eqref{PAB} and \eqref{olnablan2}, \begin{equation*} \div_P(\bK)(n) = -P^{bd}\bK_{ba}\ol\nabla_d n^a = -P^{bd}\bK_{ba}P^{ac}\bK_{cd}=-h^{BD}h^{AC}{\bm\chi}_{BA}{\bm\chi}_{CD} \eqqcolon -|{\bm\chi}|^2. \end{equation*} Then, $$-J(n)= n\left(\tr_h{\bm\chi}\right) + 2\omega \tr_h{\bm\chi} +|{\bm\chi}|^2,$$ which is the abstract data form of the Raychaudhuri equation. From Theorem \ref{teo_gauge}, \eqref{transn} and the transformations in Lemma \ref{transformationtensors} it follows that the constraint tensor $J(n)$ takes the same form in any gauge (not necessarily CG). Indeed, given gauge parameters $(z,\zeta)$, \begin{align} - z^2 J'(n') & = n\left(\tr_h{\bm\chi}\right) + 2\omega \tr_h{\bm\chi} +|{\bm\chi}|^2\nonumber\\ &= z^2 n' \left(\tr_h{\bm\chi}'\right) +n(z)\tr_h{\bm\chi}' + 2z^2\omega'\tr_h{\bm\chi}' - n(z) \tr_h{\bm\chi}' + z^2|{\bm\chi}'|^2\nonumber\\ &= z^2\left(n' \left(\tr_h{\bm\chi}'\right) + 2\omega'\tr_h{\bm\chi}' + |{\bm\chi}'|^2\right) .\label{Jnanygauge} \end{align} Next we proceed by taking $V=X$, a tangent vector to the foliation, and rewrite ${\bm J}(X)$ in terms of the foliation tensors. The term $\tr_P \bK = \tr_h{\bm\chi}$ has already been computed. To compute $\div_P(\bK)(X)$ we start by showing that $\phi^{\star}_{\ul u}(\ol\nabla_X \bK) = \nabla^{h}_X {\bm\chi}$ for every $X\in\Gamma(T\mc S_{\ul u})$. For any pair of vectors $Z_1$, $Z_2$ tangent to the foliation it holds \begin{align*} (\ol\nabla_X \bK)(Z_1,Z_2) & = X(\bK(Z_1,Z_2))-\bK(\ol\nabla_X Z_1,Z_2)-\bK(Z_1,\ol\nabla_X Z_2)\\ &=X({\bm\chi}(Z_1,Z_2))-{\bm\chi}(\nabla_X^{h} Z_1,Z_2)-{\bm\chi}(Z_1,\nabla_X^{h} Z_2)\\ &=(\nabla^{h}_X {\bm\chi})(Z_1,Z_2), \end{align*} where we have inserted the decomposition \eqref{decompnabla} with $\nabla^h$ instead of $\ol\nabla^{\mc S}$ because the data is written in a CG (see Remark \ref{observación}). Consequently $$\div_P \bK (X) \d P^{ab} X^c \ol\nabla_a \bK_{bc} = h^{AB}X^C\nabla^{h}{\bm\chi}_{BC}=\div_h ({\bm\chi}) (X).$$ Substituting these two terms, the tensor ${\bm J}(X)$ becomes \begin{equation} \label{JXaux} \begin{aligned} {\bm J}(X)&=(\ol\nabla_X \bPi)(n,n)-(\ol\nabla_n \bPi)(X,n)-\bPi(X,n) \tr_h {\bm\chi} \\ &\quad+\left(\bK*\bPi\right)(X,n)- \ol\nabla_X\tr_h {\bm\chi}+\div_h({\bm\chi})(X). \end{aligned} \end{equation} Before we elaborate this expression, we introduce the one-form ${\bm{\tau}}\in\Gamma(T^{\star}\mc S_{\ul u})$ by \begin{equation} \label{taueta} {\bm{{\bm{\tau}}}} \d -{\bm{\eta}} - \dd\left(\log\left|\lambda\right|\right) , \end{equation} which is a combination that will appear frequently below. Taking into account the definition of ${\bm{\eta}}$ in Def. \ref{def_tensors} and equation \eqref{Pipi}, \begin{equation} \label{tau2} {\bm{\tau}} = \phi^{\star}_{\ul u}\left(\bPi(\cdot,n)\right)+{\bm\chi}(\ell^{\sharp},\cdot), \end{equation} from which \eqref{normalndeco} gets rewritten as \begin{equation} \label{olnablatau} \ol\nabla_X n = \chi^{\sharp}(X)-{\bm{\tau}}(X)n. \end{equation} The transformation law of ${\bm{\tau}}$ follows from those of ${\bm{\eta}}$ \eqref{transeta} and $\lambda$ (see \eqref{transn} and Def. \ref{def_CHD}), \begin{equation} \label{transtau} {\bm{\tau}}' = {\bm{\tau}} +\dd\left(\log|z|\right). \end{equation} As a direct consequence of the expression of ${\bm{\eta}}$ in Def. \ref{def_tensors} and \eqref{tau2}, in a CG the tensors ${\bm{\eta}}$ and ${\bm{\tau}}$ can be written as \begin{align} {\bm{\eta}} & = -\phi_{\ul u}^{\star}\left(\bPi(n,\cdot)\right),\label{etaPi}\\ {\bm{\tau}}&= \phi^{\star}_{\ul u}\left(\bPi(\cdot,n)\right).\label{tauPi} \end{align} The geometric interpretation of ${\bm{\tau}}$ is similar to the one of ${\bm{\eta}}$. In the following Lemma we compute the first and second terms of \eqref{JXaux} in terms of the foliation tensors. \begin{lema} \label{lemaB2} Let $\mc D$ be CHD written in a characteristic gauge and $X\in\Gamma(T\mc S)$. Then, \begin{align} \left(\ol\nabla_X \bPi\right)(n,n) &= 2\nabla^{h}_X\omega -2\left({\bm\chi}\cdot{\bm{\tau}}\right) (X)+4\omega{\bm{\tau}}(X)-\left({\bm\chi}\cdot\dd\left(\log|\lambda|\right)\right)(X),\label{nablaXPinn}\\ \left(\ol\nabla_n \bPi\right)(X,n)&=\big(\wt\nabla_n {\bm{\tau}}\big)(X)+4\omega{\bm{\tau}}(X)+2\omega\ \dd\left(\log|\lambda|\right)(X), \label{nablanPiXn} \end{align} where $\left(T\cdot\bm\alpha\right)_A\d h^{BC}T_{BA}\bm\alpha_C$. \begin{proof} We start with the computation of $\left(\ol\nabla_X\bPi\right)(n,n)$. From equations \eqref{olnablatau} and \eqref{tauPi}, \begin{align*} \left(\ol\nabla_X \bPi\right)(n,n) & = \nabla^{h}_X\left(\bY(n,n)\right)-\bPi(\ol\nabla_{X}n,n)-\bPi(n,\ol\nabla_{X}n)\\ &=2\nabla_X^{h}\omega -\left({\bm\chi}\cdot{\bm{\tau}}\right)(X)+\bY(n,n){\bm{\tau}}(X)+\left({\bm\chi}\cdot{\bm{{\bm{\eta}}}}\right)(X)+\bY(n,n)\bm\tau(X)\\ &=2\nabla^{h}_X\omega -2\left({\bm\chi}\cdot{\bm{\tau}}\right) (X)+4\omega{\bm{\tau}}(X)-\left({\bm\chi}\cdot\dd\left(\log|\lambda|\right)\right)(X), \end{align*} where in the third line we replaced ${\bm{\eta}}$ by ${\bm{\tau}}$ according to \eqref{taueta}. The term $\left(\ol\nabla_n \bPi\right)(X,n)$ can be computed analogously: \begin{align*} \left(\ol\nabla_n \bPi\right)(X,n) & = n\left({\bm{\tau}}(X)\right)-\bPi\big(\wt\nabla_n X+{\bm{\eta}}(X)n,n\big)-\bPi\left(X,\ol\nabla_n n\right)\\ &= n\left({\bm{\tau}}(X)\right)-{\bm{\tau}}\big(\wt\nabla_n X\big)-\bY(n,n){\bm{\eta}}(X)+\bY(n,n)\bm\tau(X)\\ &=\big(\wt\nabla_n {\bm{\tau}}\big)(X)+4\omega{\bm{\tau}}(X)+2\omega\ \dd\left(\log|\lambda|\right)(X), \end{align*} where we used \eqref{nablan}, \eqref{nablann=Y} and Def. \eqref{nablatildeeta} together with \eqref{taueta}. Finally, from \eqref{PAB} and \eqref{tauPi}, the term $\left(\bK*\bPi\right)(X,n)$ becomes $\left({\bm\chi}\cdot{\bm{\tau}}\right)(X)$. \end{proof} \end{lema} Using equations \eqref{PAB} and \eqref{tauPi}, the third and fourth terms in \eqref{JXaux} become $-\tr_h{\bm\chi}\ {\bm{\tau}}(X)$ and $\left({\bm\chi}\cdot{\bm{\tau}}\right)(X)$, respectively. Consequently, combining all the terms, the constraint tensor $\bm J(X)$ in \eqref{JXaux} can be written in any CG as \begin{equation} \begin{split} \label{JACG} {\bm J}(X)&=-\big(\wt\nabla_n{\bm{\tau}}\big)(X)+2 \nabla^{h}_X\omega -\left({\bm{\tau}}\cdot{\bm\chi}\right)(X) - \tr_h{\bm\chi} \ \bm\tau(X)\\ &\quad\, +\div_h({\bm\chi})(X)-\nabla^{h}_X\tr_h{\bm\chi}-2\omega \nabla^h_X\log|\lambda|-\left({\bm\chi}\cdot\dd\left(\log|\lambda|\right)\right)(X). \end{split} \end{equation} Next we compute the constraint tensor ${\bm{\mc R}}_{AB}$, which from Definition \eqref{ricci} and the symmetries in Proposition \ref{symmetries} can be written as \begin{equation} \label{RAB1} {\bm{\mc R}}_{AB} = \left(B_{acbd} P^{cd} + (A_{bca}+A_{acb})n^c\right)e_A^ae_B^b. \end{equation} These contractions of $A$ and $B$ in the right-hand side are computed in Lemma \ref{LemaB}, so introducing equations \eqref{BP} and \eqref{An} and the symmetrized version of \eqref{An} into \eqref{RAB1} we conclude \begin{equation} \label{RABfinal} {\bm{\mc R}}_{AB} = R^{h}_{AB}-2\wt\nabla_n {\bm{\Upsilon}}_{AB}+4\omega {\bm{\Upsilon}}_{AB}-\nabla_A^{h}{\bm{\eta}}_B-\nabla^{h}_B{\bm{\eta}}_A-2{\bm{\eta}}_A{\bm{\eta}}_B-{\bm\chi}_{AB}\tr_h {\bm{\Upsilon}} -{\bm{\Upsilon}}_{AB}\tr_h {\bm\chi} . \end{equation} Finally recalling that $2H+h^{AB}R_{AB}=0$ \eqref{RJHCG} and taking the trace of \eqref{RABfinal} w.r.t. $h$, $$H = - \dfrac{1}{2}R^{h}+ n\left(\tr_h{\bm{\Upsilon}}\right) +\div({\bm{\eta}}) + |{\bm{\eta}}|^2 +\left(\tr_h{\bm\chi}-2\omega\right)\tr_h{\bm{\Upsilon}},$$ where we have defined $|{\bm{\eta}}|^2\d h^{AB}{\bm{\eta}}_A{\bm{\eta}}_B$ and used that $\nabla^h h=0$ and $\wt\nabla_n h=0$. Indeed, \begin{align*} \big(\wt\nabla_n h\big)(X,Z) & = n\left(h(X,Z)\right) - h\big(\wt\nabla_n X,Z\big) - h\big(X,\wt\nabla_n Z\big)\\ &=\left(\ol\nabla_n\bg\right)(X,Z)+{\bm{\eta}}(X)\bg(n,Z)+{\bm{\eta}}(Z)\bg(X,n)\\ &=0, \end{align*} where in the second equality we used \eqref{nablan} and in the last one \eqref{olnablagamma} together with $\bK(n,\cdot)=\bg(n,\cdot)=0$.\\ We summarize the results of this section in the following Theorem. \begin{teo} \label{constraints} Let $\mc D$ be CHD and $\ul u$ a foliation function. Then the constraint tensors take the following form in a characteristic gauge \begin{align} J(n)&= -n\left(\tr_h{\bm\chi}\right) - 2\omega \tr_h{\bm\chi} -|{\bm\chi}|^2,\label{Ray}\\ {\bm J}(X)&=-\big(\wt\nabla_n{\bm{\tau}}\big)(X)+2 \nabla^{h}_X\omega -\left({\bm\chi}\cdot{\bm{\tau}}\right)(X) - {\bm{\tau}}(X)\tr_h{\bm\chi}+\div({\bm\chi})(X)\nonumber \\ &\quad\,-\nabla^{h}_X\tr_h{\bm\chi}-2\omega \nabla^h_X\log|\lambda|-\left({\bm\chi}\cdot\dd\left(\log|\lambda|\right)\right)(X),\label{Jdata}\\ {\bm{\mc R}}_{AB} & = R^{h}_{AB}-2\wt\nabla_n {\bm{\Upsilon}}_{AB}+4\omega {\bm{\Upsilon}}_{AB}-\nabla_A^{h}{\bm{\eta}}_B-\nabla^{h}_B{\bm{\eta}}_A-2{\bm{\eta}}_A{\bm{\eta}}_B\nonumber\\ &\quad\, -{\bm\chi}_{AB}\tr_h {\bm{\Upsilon}} -{\bm{\Upsilon}}_{AB}\tr_h {\bm\chi} ,\label{Rdata}\\ H &= n\left(\tr_h{\bm{\Upsilon}}\right) + |{\bm{\eta}}|^2+\div({\bm{\eta}}) - \dfrac{1}{2}R^{h}+\left(\tr_h{\bm\chi}-2\omega\right)\tr_h{\bm{\Upsilon}}.\label{Hdata} \end{align} \end{teo} Under further gauge restrictions, one can show that the expressions in Theorem \ref{constraints} agree with the standard null structure equations provided the data is embedded. We do not give the explicit comparison here since it is not needed for the proof. \section{The Harmonic Gauge} \label{sec_HG} When solving any initial value problem in General Relativity one has to deal with the issue of the coordinates: since GR is a geometric theory the Einstein equations cannot have a unique solution. For that reason one has to ``fix the gauge'' by choosing an appropriate coordinate system in order to solve them. The standard approach to deal with this issue, both in the classical Cauchy problem and in the characteristic one, is to write the Einstein equations in some well-chosen gauge (e.g. harmonic gauge). This yields a new system of geometric PDE, called reduced Einstein equations, which does have a well-posed initial value problem in a PDE sense, i.e., that there is a unique solution in some neighbourhood of the initial hypersurface(s) (see \cite{HawkingEllis,Wald} and \cite{Luk,Rendall}, respectively). This approach requires showing, at the very end, that the solution of the reduced system is in fact a solution of the full Einstein field equations. Thus, the first step is to translate the harmonic condition on the coordinates, namely $\square_g x^{\mu}=0$, into a condition on our data. We start with the embedded case and then promote the definition to the abstract level. \begin{prop} \label{propbox} Let $\mc D$ be embedded CHD on a spacetime $(\mc M,g)$ with rigging $\xi$ and Levi--Civita connection $\nabla$ and let $f$ be a smooth function on $\mc M$. Then, \begin{equation} \label{box} \square_g f\st{\mc H}{=}\square_P f + 2\ol\nabla_n\left(\xi(f)\right) + \left(\tr_P \bK - 4\omega\right) \xi(f) -\left(2P^{bc}\ol\nabla_n\bm\ell_b+n\left(\ell^{(2)}\right)n^c\right) e_c(f), \end{equation} where $\square_P f \d P^{ab}\ol\nabla_a\ol\nabla_b f$. \begin{proof} From \eqref{inverse}, \begin{align} \square_g f & = g^{\mu\nu}\nabla_{\mu}\nabla_{\nu} f \nonumber\\ & \st{\mc H}{=} \left(P^{ab}e_a^{\mu}e_b^{\nu}+n^a\xi^{\nu} e^{\mu}_a+n^a\xi^{\mu}e_a^{\nu}\right) \nabla_{\mu}\nabla_{\nu} f \nonumber\\ &=P^{ab} \nabla_{e_a}\nabla_{e_b} f -P^{ab}\nabla_{\nu} f \nabla_{e_a} e_b^{\nu}+2\nabla_n\left(\xi(f)\right)-2\nabla_{\nu}f\nabla_n\xi^{\nu}.\label{box1} \end{align} Particularizing equation \eqref{nablatxi} to CHD, contracting it with $n^a$ and using \eqref{olnablaell} as well as the expression of $\omega$ in Def. \eqref{def_tensors}, \begin{equation} \label{nablanxi} \nabla_n\xi =2\omega \xi + \big(P^{cb}\ol\nabla_n\bm\ell_b+\dfrac{1}{2}n\big(\ell^{(2)}\big)n^c\big)e_c. \end{equation} Introducing \eqref{nablatt} and \eqref{nablanxi} into \eqref{box1} and using $\nabla_{e_a}\nabla_{e_b} f = e_a\left(e_b(f)\right)=\ol\nabla_{e_a}\ol\nabla_{e_b} f$, \begin{align*} \square_g f &\st{\mc H}{=} P^{ab}\left(\ol\nabla_{e_a}\ol\nabla_{e_b} f -\ol\Gamma_{ab}^c e_c(f)\right)+ P^{ab} \bK_{ab}\xi(f)+ 2\ol\nabla_n\left(\xi(f)\right) \\ &\quad\,-4\omega\xi(f)-\left(2P^{cb}\ol\nabla_n\bm\ell_b+n^c\ol\nabla_n\ell^{(2)}\right) e_c(f), \end{align*} and hence equation \eqref{box} follows. \end{proof} \end{prop} Let $\mc D$ be embedded CHD of dimension $m$ on a spacetime $(\mc M,g)$ with rigging $\xi$. In order to write $\square_g x^{\mu}=0$ in terms of the abstract data consider a set of independent functions $\{x^{\ul a}\}_{a=1}^m$ on $\mc H$ and extend them to $\mc M$ in such a way that $\xi(x^{\ul a})=0$. Consider also a function $u$ on $\mc M$ satisfying $u|_{\mc H}=0$ and $\xi(u)=1$. Let $\Xi_{c}{}^{\ul a} \d e_c(x^{\ul a})$ and $\Xi^{c}{}_{\ul a}$ its inverse. Since $\Xi_{c}{}^{\ul a}$ is a covector (\ul{a} is \textit{not} a tensorial index), $\Xi^{c}{}_{\ul a}$ is a vector and so it is $V^c\d \Xi^{c}{}_{\ul a} \square_P x^{\ul a}$. In terms of this vector and by virtue of Proposition \ref{propbox} the conditions $\square_g x^{\ul a}=0$ are equivalent to \begin{equation} \label{V^c} 2P^{bc}\ol\nabla_n\bm\ell_b+n\left(\ell^{(2)}\right)n^c=V^c, \end{equation} which is a covariant equation. In the following Theorem we prove abstractly that given a set of independent functions $\{x^{\ul a}\}$ on $\mc H$ there exists essentially a unique gauge in which $\tr_P \bK -4\omega=0$ and equation \eqref{V^c} holds. \begin{teo} \label{teo_HG} Let $\mc D$ be CHD, $\{x^{\ul a}\}$ a set of $m$ independent functions and $V^c=\Xi^c{}_{\ul a}\square_P x^{\ul a}$. Select a section $\mc S\subset \mc H$ and a pair $(z_0,\zeta_0)\in\mc F^{\star}(\mc S)\times\Gamma(T\mc H)$. Then there exists a unique gauge satisfying the following conditions on $\mc H$, \begin{align} \tr_P \bK -4\omega&=0,\label{combGamma}\\ 2P\left(\ol\nabla_n\bm\ell,\cdot\right)+n\left(\ell^{(2)}\right)n&=V\label{V^c2} \end{align} together with $(z,\zeta)|_{\mc S}=(z_0,\zeta_0)$. Such gauges will be called ``harmonic gauge'' (HG). \begin{proof} First consider equation \eqref{combGamma} in the primed gauge. Taking into account transformation laws \eqref{Ktrans} and \eqref{trans_omega} together with $\bK(n,\cdot)=0$, $$\tr_{P'} \bK' -4\omega' = z^{-1}\tr_P \bK-4z^{-1}\omega-2z^{-2} n\left( z\right) = 0.$$ Then, there exists a unique $z$ solving the previous equation with initial condition $z|_{\mc S}=z_0$ as initial condition. Moreover, since $z_0 \neq 0$, then $z$ does not vanish in some neighbourhood of $\mc H$ containing the initial section $\mc S$. The next task is to show that there is a gauge in which \eqref{V^c2} holds. This requires determining the gauge behavior of both sides in \eqref{V^c2}. Concerning the vector $V$ it suffices to study the transformation of $\square_P F$ with $F\in\mc F(\mc H)$. From equation \eqref{gaugeP} and Proposition \ref{gaugeconection}, \begin{align*} \square_{P'} F & = P'{}^{ab}\ol\nabla'_a\ol\nabla'_b F\\ &=\left(P^{ab}-\zeta^an^b-\zeta^b n^a\right)\left(\ol\nabla_a\ol\nabla_b F-\zeta(F) \bK_{ab}\right)\\ &=P^{ab}\ol\nabla_a\ol\nabla_b F-\zeta(F)P^{ab} \bK_{ab}-\zeta^an^b\ol\nabla_a\ol\nabla_b F-\zeta^bn^a\ol\nabla_a\ol\nabla_b F\\ &=\square_P F -\zeta(F)\tr_P \bK -2\zeta\left(n(F)\right) + 2\left(\ol\nabla_{\zeta} n\right)(F), \end{align*} where in the third line we used $\bK(n,\cdot)=0$ and in the last equality that $\ol\nabla$ is torsion-free. Hence, applying to $F=x^{\ul a}$, \begin{equation} \label{Vprimac} V'{}^c= V^c - \Psi^c{}_a \left(\zeta(x^a)\tr_P \bK +2\zeta\left(n(x^a)\right) - 2\left(\ol\nabla_{\zeta} n\right)(x^a)\right). \end{equation} Concerning the LHS of \eqref{V^c2} we will use transformations \eqref{transn}, \eqref{gaugeconnection} and $\bK(n,\cdot)=0$ as well as $\ol\nabla_n \bg = 0$, which follows from \eqref{olnablagamma}. We analyze each term in \eqref{V^c2} separately. For the first one we recall $\bm\ell'_b = z^{-1}\left(\bm\ell_b+\bg_{ba}\zeta^a\right)$ (see \eqref{tranfell}) and compute $$\ol\nabla'_{n'}\bm\ell'_b =z^{-1}\ol\nabla_n\bm\ell'_b= z^{-2} n(z)\bm\ell'_b+\ol\nabla_n\bm\ell_b+\bg_{ba}\ol\nabla_n\zeta^a.$$ Contracting with $P'{}^{bc}$ and using the primed versions of \eqref{Pell} and \eqref{Pgamma}, namely \begin{align*} P'{}^{bc}\bm\ell'_b =-\ell'{}^{(2)}n'{}^c,\qquad P'{}^{bc}\bg_{ba} =\delta^c_a-\bm\ell_a'n'{}^c, \end{align*} as well as the transformation law \eqref{gaugeP}, \begin{align} 2P'{}^{bc}\ol\nabla'_{n'}\bm\ell_b' & = 2z^{-2} n(z) P'{}^{bc}\bm\ell'_b + 2P'{}^{bc}\ol\nabla_n\bm\ell_b+2P'{}^{bc}\bg_{ba}\ol\nabla_n\zeta^a\nonumber\\ & = -2z^{-2} n(z)\ell'{}^{(2)}n'{}^c +2P^{bc}\ol\nabla_n\bm\ell_b-2\zeta^bn^c\ol\nabla_n\bm\ell_b-2\zeta^cn^b\ol\nabla_n\bm\ell_b\nonumber\\ &\quad\,+2\ol\nabla_n\zeta^c-2zn'{}^c\left(\bm\ell_a\ol\nabla_n\zeta^a+\bg_{ab}\zeta^b\ol\nabla_n\zeta^a\right).\label{HGaux1} \end{align} To compute the term $n'\left(\ell'{}^{(2)}\right) n'{}^c$ we insert $\ell'{}^{(2)} = z^2\left(\ell^{(2)}+2\bm\ell(\zeta)+\bg(\zeta,\zeta)\right)$ \eqref{transell2} and get \begin{equation} \label{HGaux2} \hspace{-1mm} {n'}\left(\ell'{}^{(2)}\right)n'{}^c = 2z^{-2} n(z)\ell'{}^{(2)}n'{}^c + z\left(\ol\nabla_n\ell^{(2)} + 2\zeta^a\ol\nabla_n\bm\ell_a + 2\bm\ell_a\ol\nabla_n\zeta^a + 2\bg_{ab}\zeta^a\ol\nabla_n\zeta^b\right)n'{}^c. \end{equation} Adding \eqref{HGaux1} and \eqref{HGaux2} and using $n^d\ol\nabla_n\bm\ell_d = 2\omega$, which follows from \eqref{olnablaell} and Def. \eqref{def_tensors}, \begin{align} 2P'{}^{bc}\ol\nabla_{n'}\bm\ell_b'+{n'}\left(\ell'{}^{(2)} \right) n'{}^c = 2P^{cd}\ol\nabla_{n}\bm\ell_b+ n\left(\ell^{(2)}\right)n^c- 4\omega\zeta^c +2\ol\nabla_n\zeta^c.\label{LHSprima} \end{align} Finally taking into account \eqref{Vprimac} and \eqref{LHSprima}, equation \eqref{V^c2} constitutes a first order ODE for $\zeta$ with unique solution given $\zeta_0$. \end{proof} \end{teo} \begin{obs} Observe that when the data is embedded and written in the HG, Proposition \ref{propbox} ensures that the functions $\{u,x^{\ul a}\}$ satisfy $\square_g x^{\ul a}=0$ and $\square_g u=0$ on $\mc H$ provided that $\xi(x^{\ul a})\st{\mc H}{=}0$ and $\xi(u)|_{\mc H}$ is constant along each null generator of $\mc H$. \end{obs} Let $\mc D$ be embedded CHD on a spacetime $(\mc M,g)$ with rigging $\xi$. Consider a set of $m$ independent functions adapted to the foliation, i.e., $\{x^{\ul a}\}=\{\ul u, x^A\}$, where $\{x^A\}$ is a set of $m-1$ functions on $\mc H$ satisfying $n(x^A)=0$ and $n\left(\ul u\right)\neq 0$. We want to compute explicitly $\square_g u$, $\square_g x^A$ and $\square_g \ul u$ in terms of the foliation tensors in the embedded case and then promote these expressions into abstract definitions on $\mc H$. The first one is immediate from Proposition \ref{propbox} together with $\xi(u)=1$ and the fact that $\tr_P \bK = \tr_h{\bm\chi}$, \begin{equation} \label{boxu} \square_g u = \tr_h {\bm\chi}-4\omega. \end{equation} For the other two we first prove an intermediate result. \begin{prop} \label{propbox2} Let $\mc D$ be CHD and $\beta\in\mc F(\mc H)$. Then, \begin{align} \square_P \beta &= \square_h \beta -\ell^{\sharp}(\beta)\tr_h{\bm\chi} - 2\ell^{\sharp}\left(n(\beta)\right)+2\chi^{\sharp}(\ell^{\sharp})(\beta)-\big(\ell^{(2)}-\ell_{\sharp}^{(2)}\big)n\left(n(\beta)\right)\nonumber\\ &\quad\,+\big(\tr_h{\bm{\Upsilon}} -\left(2\omega+\tr_h{\bm\chi}\right)\big(\ell^{(2)}-\ell_{\sharp}^{(2)}\big)-2{\bm{\tau}}(\ell^{\sharp})-\div_h\bm\ell_{\para}\big)n(\beta),\label{boxpf}\\ e_c(\beta)P^{cb}\ol\nabla_n\bm\ell_b &= h^{\sharp}\left(\dd \beta, \lie_n\bm\ell+\bPi(\cdot,n)\right)-2\omega\ell^{\sharp}(\beta)\nonumber\\ &\quad\,-\big(2\omega \big(\ell^{(2)}-\ell_{\sharp}^{(2)}\big)+\left(\lie_n\bm\ell\right)(\ell^{\sharp})+\bPi(\ell^{\sharp},n)\big)n(\beta).\label{Pnablal} \end{align} \begin{proof} From decomposition \eqref{Pdecomposition} and the fact that the Hessian of a function is symmetric, \begin{align} \square_P \beta & = P^{ab}\left(e_a\left(e_b(\beta)\right) - \left(\ol\nabla_{e_a} e_b\right)(\beta) \right)\nonumber\\ &=h^{AB} \big(e_A\left(e_B(\beta)\right)- \big(\ol\nabla_{e_A}^{\mc S} e_B\big) (\beta) + \bQ(e_A,e_B)n(\beta) \big) - 2\ell^A\left(e_A\left(n(\beta)\right)-\left(\ol\nabla_{e_A} n\right)(\beta)\right)\nonumber\\ &\quad\,-\big(\ell^{(2)}-\ell_{\sharp}^{(2)}\big)\left(n\left(n(\beta)\right)-\left(\ol\nabla_n n\right)(\beta)\right)\nonumber\\ &=h^{AB}\big(e_A\left(e_B(\beta)\right) - \big(\ol\nabla_{e_A}^{h} e_B\big) (\beta) -\ell^{\sharp}(\beta){\bm\chi}_{AB} + \bQ(e_A,e_B)n(\beta) \big)\label{boxaux1}\\ &\quad\,-2\ell^{\sharp}\left(n(\beta)\right) +2\chi^{\sharp}(\ell^{\sharp})(\beta)-2{\bm{\tau}}(\ell^{\sharp}) n(\beta) - \big(\ell^{(2)}-\ell_{\sharp}^{(2)}\big)\left(n\left(n(\beta)\right)+2\omega n(\beta)\right),\nonumber \end{align} where in the second equality we used \eqref{decompnabla} and in the third one Proposition \ref{nablaSandnablah}, \eqref{olnablatau} and \eqref{nablann=Y}. The term $h^{AB}\bQ(e_A,e_B)$ can be computed from \eqref{tensorQ}, \begin{align} h^{AB} \bQ(e_A,e_B) & = h^{AB}\big(\bY(e_A,e_B)+\bF(e_A,e_B)-\ell^{(2)}{\bm\chi}_{AB} - \big(\ol\nabla^{\mc S}_{e_A}\bm\ell_{\para}\big)(e_B)\big)\nonumber\\ &= \tr_h{\bm{\Upsilon}} -\big(\ell^{(2)}-\ell_{\sharp}^{(2)}\big)\tr_h{\bm\chi} - \div_h\bm\ell_{\para},\label{boxaux2} \end{align} where in the second equality we used the definition of ${\bm{\Upsilon}}$ (Def. \ref{def_tensors}), the fact that $\bF$ is antisymmetric and Proposition \ref{nablaSandnablah}. Introducing \eqref{boxaux2} into \eqref{boxaux1}, \eqref{boxpf} follows. In order to show \eqref{Pnablal} we employ again decomposition \eqref{Pdecomposition}, \begin{align*} e_c(\beta) P^{cb}\ol\nabla_n\bm\ell_b & = h^{CB}e_C(\beta)\left(\ol\nabla_n\bm\ell\right)(e_B)-n(\beta)\ell^B\left(\ol\nabla_n\bm\ell\right)(e_B)\\ &\quad -2\omega \ell^{\sharp}(\beta)-2\omega \big(\ell^{(2)}-\ell_{\sharp}^{(2)}\big) n(\beta), \end{align*} where we used $\left(\ol\nabla_n\bm\ell\right) (n)=2\omega$ (see \eqref{olnablaell}). Equation \eqref{Pnablal} follows after taking into account that $\left(\ol\nabla_n\bm\ell\right)(e_B) = n\left(\bm\ell(e_B)\right) -\bm\ell\left(\ol\nabla_n e_B\right)$ and thus \begin{align*} \left(\ol\nabla_n\bm\ell\right)(e_B) =n\left(\bm\ell(e_B)\right)-\bm\ell\left(\lie_n e_B -\ol\nabla_{e_B} n\right)=\left(\lie_n\bm\ell\right)(e_B)+\bPi(e_B,n), \end{align*} where we used $\bm\ell(n)=1$ and equation \eqref{normalndeco}. \end{proof} \end{prop} From Propositions \ref{propbox} and \ref{propbox2} as well as relation \eqref{tau2} one has the following Corollary. \begin{cor} \label{corollarybox} Let $\mc D$ be embedded CHD with rigging $\xi$ and FF $\ul u$ and let $\{x^A\}$ be a set of functions satisfying $n(x^A)=0$ and extended off $\mc H$ by means of $\xi(\ul u)=\xi(x^A)=0$. Then, \begin{align} \hspace{-6mm}\square_g x^A&=\square_h x^A +\left(4\omega-\tr_h{\bm\chi}\right)\ell^{\sharp}(x^A)+ 2\dd x^A\left(\chi^{\sharp}(\ell^{\sharp}) - h^{\sharp}\big(\lie_n\bm\ell +\bPi\left(\cdot,n\right),\cdot\big) \right),\label{boxA}\\ \hspace{-6mm}\square_g \ul u &= \lambda\tr_h{\bm{\Upsilon}} + \lambda\Phi\left(\bm\ell_{\para},\omega,h,\ell^{(2)}\right),\label{boxulu} \end{align} where \begin{equation} \label{Phi} \begin{aligned} \Phi\left(\bm\ell,\omega,h,\ell^{(2)}\right)&=\big(\ell^{(2)}-\ell_{\sharp}^{(2)}\big)\left(2\omega-\tr_h{\bm\chi}-n(\log|\lambda|)\right)-\div_h\bm\ell_{\para}\\ &\quad\, +2\left(\lie_n\bm\ell\right)(\ell^{\sharp})-2{\bm\chi}(\ell^{\sharp},\ell^{\sharp})- n\left(\ell^{(2)}\right). \end{aligned} \end{equation} \end{cor} Equations \eqref{boxu}, \eqref{boxA} and \eqref{boxulu} lead naturally to the definition of the following abstract functions on $\mc H$, \begin{align} \hspace{-3mm} \Gamma^u_{\mc H} &\d \tr_h {\bm\chi}-4\omega,\label{Gammau0}\\ \hspace{-3mm} \Gamma^A_{\mc H} & \d \square_h x^A +\left(4\omega-\tr_h{\bm\chi}\right)\ell^{\sharp}(x^A)+ 2\dd x^A\left(\chi^{\sharp}(\ell^{\sharp})\right) -2\left( \lie_n\bm\ell +\bPi\left(\cdot,n\right) \right)\left(\grad_h x^A\right),\label{GammaA2}\\ \hspace{-3mm} \Gamma^{\ul u}_{\mc H} & \d \lambda\tr{\bm{\Upsilon}}+ \lambda\Phi\left(\bm\ell,\omega,h,\ell^{(2)}\right).\label{Gammau} \end{align} For the proof of our main Theorem it is crucial to construct a linear combination of the constraint tensors and the functions $\Gamma^A_{\mc H}$ and $\Gamma^{\ul u}_{\mc H}$ that is hierarchically independent of the tensor $\bY$. The explicit combinations are computed in the following Lemma. \begin{lema} \label{lema_indepY} Let $\mc D$ be CHD with FF $\ul u$ and let $\{x^A\}$ be a set of functionally independent functions satisfying $n(x^A)=0$. Express all the tensors in the coordinate basis $\{\ul u,x^A\}$. Then the following combination depends neither on ${\bm{\tau}}$ nor on ${\bm{\Upsilon}}$, \begin{equation} \label{combJ} \mc L_A\left({\bm J}_B,\Gamma^B_{\mc H},n\left(\Gamma^B_{\mc H}\right)\right)\d {\bm J}_A - \dfrac{1}{2}h_{AB}\ n\left(\Gamma^B_{\mc H}\right) +\dfrac{1}{2}\big(\Omega_A{}^B h_{CB}-{\bm\chi}_{BA}-\tr{\bm\chi}\ h_{AB}\big)\Gamma^B_{\mc H}, \end{equation} where $\Omega_A{}^B$ are the connection coefficients of $\wt\nabla_n$ in this basis, i.e., $\wt\nabla_n \partial_{x^A} =\Omega_A{}^B \partial_{x^B}$. Moreover the combination \begin{equation} \label{combH} \lie\left(H,\Gamma^{\ul u}_{\mc H},n(\Gamma^{\ul u}_{\mc H})\right)\d H-n\left(\lambda^{-1}\Gamma^{\ul u}_{\mc H}\right)-\lambda^{-1}\left(\tr_h{\bm\chi}-2\omega\right)\Gamma^{\ul u}_{\mc H}, \end{equation} does not depend on ${\bm{\Upsilon}}$. \begin{proof} We first prove the claim assuming that the data is written in a CG and then we show that it is actually true in any gauge. Let us start by finding the linear combination $\mc L_A\left({\bm J}_B,\Gamma^B_{\mc H},n\left(\Gamma^B_{\mc H}\right)\right)$ which does not depend on the tensor ${\bm{\tau}}$. Let $X\in\Gamma(T\mc S)$. Using \eqref{tau2} the functions $\Gamma^A_{\mc H}$ defined above in the coordinates $\{x^A\}$ read \begin{equation} \label{Gammatau} \Gamma^A_{\mc H} = \square_h x^A +\left(4\omega-\tr_h{\bm\chi}\right)\ell^A+ 4\left(\chi^{\sharp}(\ell^{\sharp})\right)^A -2\left( \lie_n\bm\ell\right)^A-2{\bm{\tau}}^A. \end{equation} Recall that the previous equation is still valid in any gauge. The expression of ${\bm J}_A$ in a CG was found in Theorem \ref{constraints} and takes the form (see \eqref{Jdata}) $${\bm J}_A = -\big(\wt\nabla_n{\bm{\tau}}\big)_A-\left({\bm\chi}\cdot{\bm{\tau}}\right)_A-\tr{\bm\chi}\ {\bm{\tau}}_A + \cdots,$$ where the dots represent terms involving metric data and $\omega$. Thus, the combination ${\bm J}_A-\frac{1}{2}h_{AB}\ n\left(\Gamma^B_{\mc H}\right)$ does not carry any derivative of $\bm\tau$ and its explicit form is $${\bm J}_A-\dfrac{1}{2}h_{AB}\ n\left(\Gamma^B_{\mc H}\right)=\Omega_A{}^B{\bm{\tau}}_B - h^{BC}{\bm\chi}_{BA}{\bm{\tau}}_C-\tr{\bm\chi}\ \tau_A + \cdots,$$ where the connection coefficients $\Omega_A{}^B$ do not depend on the tensor $\bY$ (see equation \eqref{indpendentY} and the comment below). Consequently the combination $$\mc L_A\left({\bm J}_B,\Gamma^B_{\mc H},n\left(\Gamma^B_{\mc H}\right)\right)={\bm J}_A - \dfrac{1}{2}h_{AB}\ n\left(\Gamma^B_{\mc H}\right) +\dfrac{1}{2}\Omega_A{}^B h_{CB}\Gamma^B_{\mc H} -\dfrac{1}{2}{\bm\chi}_{BA}\Gamma^B_{\mc H}-\dfrac{1}{2}\tr{\bm\chi}\ h_{AB}\Gamma^B_{\mc H}$$ does not depend on $\bm\tau$. Now suppose we change to an arbitrary gauge $\mc D'$. Since $\wt\nabla$ is gauge independent (see Lemma \ref{transformationtensors}) and $\bm\tau$ transforms as in \eqref{transtau}, the combination $\mc L_A\left({\bm J}'_B,\Gamma'{}^B_{\mc H},n'\left(\Gamma'{}^B_{\mc H}\right)\right)$, which in general will differ from $\mc L_A\left({\bm J}_B,\Gamma^B_{\mc H},n\left(\Gamma^B_{\mc H}\right)\right)$, is still independent on $\bm\tau'$, by virtue of item 2. in Theorem \ref{teo_gauge} and the transformations laws in Lemma \ref{transformationtensors}. Now we proceed analogously with the second identity. By Theorem \ref{constraints} the constraint scalar $H$ reads $$H=n\left(\tr_h{\bm{\Upsilon}}\right)+\left(\tr_h{\bm\chi}-2\omega\right)\tr_h{\bm{\Upsilon}}+\cdots,$$ where the dots are now terms which do not depend on $\bm\Upsilon$. Then the combination $$\lie\left(H,\Gamma^{\ul u}_{\mc H},n(\Gamma^{\ul u}_{\mc H})\right)=H-n\left(\lambda^{-1}\Gamma^{\ul u}_{\mc H}\right)-\lambda^{-1}\left(\tr_h{\bm\chi}-2\omega\right)\Gamma^{\ul u}_{\mc H}$$ does not depend on $\bm\Upsilon$. As before, recalling \eqref{transulchi} and item 1. in Theorem \ref{teo_gauge} it will be still true that the combination $\lie\left(H,\Gamma^{\ul u}_{\mc H},n(\Gamma^{\ul u}_{\mc H})\right)$ does not depend on $\bm\Upsilon$ in any gauge since the constraint tensors $J(n)$ and ${\bm J}_A$ do not depend on $\bm\Upsilon$ (see \eqref{Ray} and \eqref{Jdata}). \end{proof} \end{lema} \section{The Characteristic Problem in General Relativity} \label{sec_CP} So far we have only worked with one abstract hypersurface $\mc H$. However, the characteristic problem is stated on two transverse, null hypersurfaces, so the next step is to construct the appropriate framework to formulate the characteristic problem in terms of CHD. First we define the notion of \textit{double embedding}. \begin{defi} \label{def_double} Let $\mc H$ and $\mc{\ul H}$ be two manifolds with boundary, $(\mc M,g)$ a spacetime and $\mc S_0$ an orientable manifold. Let $\mc D=\{\mc H,\bg,\bm\ell,\ell^{(2)},\bY\}$ and $\ul{\mc D}=\{\ul{\mc H},\ul\bg,\bm{\ul\ell},\ul\ell^{(2)},\ul \bY\}$ be CHD. We say that the pair $\left\{\mc D,\mc{\ul D}\right\}$ is double embedded (with intersection $\mc S_0$) provided that there exist embeddings $\varphi,\ul\varphi,i,\ul i$ making this diagram commutative \begin{diagram} \mc M & \mc H \arrow[l, "\varphi", hook] \\ \mc{\ul H} \arrow[u, "\ul\varphi", hook] & \mc S_0 \arrow[l, "\ul i", hook] \arrow[u, "i", hook] \arrow[lu, hook] \end{diagram} and satisfying \begin{enumerate} \item $\varphi$ and $\ul\varphi$ are embeddings for $\mc H$ and $\ul{\mc H}$ on $(\mc M,g)$ in the sense of Def. \ref{defi_embedded}, \item $i(\mc S_0) = \partial\mc H$ and $\ul i(\mc S_0)=\partial\ul{\mc H}$, \item $i^{\star}\bg = \ul i^{\star}\ul\bg \eqqcolon h_0$ is a Riemannian metric on $\mc S_0$, \item The function $\mu\in \mc F(\mc S_0)$ defined by $\mu\d g(\nu,\ul\nu)$ is negative everywhere on $\mc S_0$, where $\nu$ and $\ul\nu$ are the push-forwards of $n$ and $\ul n$, respectively. \end{enumerate} In general, objects in $\mc{\ul D}$ will carry an underline. \end{defi} In order not to overload the notation we will identify $i(\mc S_0)=\ul i(\mc S_0)=\mc S_0$, $\varphi(\mc H)=\mc H$ and $\ul\varphi(\ul{\mc H})=\ul{\mc H}$. The precise meaning will be clear from the context. From the definition of $\mu$ it follows that under a gauge transformation with parameters $(z,\zeta)$ and $(\ul z,\ul\zeta)$ on $\mc D$ and $\mc{\ul D}$, respectively, $\mu$ transforms as \begin{equation} \label{transmu} \mu'=z^{-1}\ul z^{-1}\mu. \end{equation} The definition of double embedded CHD imposes some additional constraints between the data of both hypersurfaces. Before writing them we prove an intermediate result. First note that setting $\bm\alpha=0$ and $f=0$ in Corollary \ref{CGcorollary} we get the following. \begin{lema} \label{lemacompatible} Let $\mc D$ and $\ul{\mc D}$ be CHD. Then there exists a gauge in which the following conditions hold \begin{equation} \label{compatible} \begin{gathered} \bm\ell_{\para}|_{\mc S_0} = \ul{\bm\ell}_{\para}|_{\mc S_0} = 0,\\ \ell^{(2)}|_{\mc S_0} = \ul\ell^{(2)}|_{\mc S_0} = 0. \end{gathered} \end{equation} Moreover, the freedom of this gauge is parametrized by pairs $(z,\zeta)$ and $(\ul z,\ul\zeta)$ satisfying $\zeta|_{\mc S_0}=\ul\zeta|_{\mc S_0}=0$. \end{lema} \begin{prop} \label{prop_compatible} Let $\left\{\mc D,\mc{\ul D}\right\}$ be double embedded CHD written in any gauge in which conditions \eqref{compatible} hold and let $X,Z\in\Gamma(T\mc S_0)$. Then, \begin{equation} \label{nrigg} \ul\nu\st{\mc S_0}{=}\mu \xi, \qquad \nu \st{\mc S_0}{=} \mu\ul\xi, \end{equation} and the following identities hold at $\mc S_0$ \begin{align} \bPi(X,n) + \ul\bPi(X,\ul n) & =-X\left(\log|\mu|\right),\label{competa}\\ \bY(X,Z) & = \mu^{-1}\ul{\bK}(X,Z),\label{compchi}\\ \ul \bY(X,Z) &= \mu^{-1} \bK(X,Z).\label{compulchi} \end{align} \begin{proof} By \eqref{compatible} the rigging $\xi$ is null, orthogonal to $\mc S_0$ and linearly independent to the normal $\nu$. The normal $\ul\nu$ has the same properties, so it follows that $\xi$ and $\ul\nu$ are proportional. Conditions $g(\xi,\nu)=1$ and $g(\nu,\ul\nu)=\mu$ then imply \begin{equation*} \ul\nu\st{\mc S_0}{=}\mu \xi \hspace{0.5cm} \text{and similarly} \hspace{0.5cm} \nu \st{\mc S_0}{=} \mu\ul\xi. \end{equation*} We can therefore interchange $\nu \longleftrightarrow \mu\ul\xi$ and $\ul\nu\longleftrightarrow \mu\xi$ in any spacetime expression at $\mc S_0$ that involves only tangential derivatives. This will be used repeatedly in the following without further notice. From equations \eqref{normalndeco} and \eqref{nablambient} it follows that $\bPi(X,n) \st{\mc S_0}{=} -g\left(\xi,\nabla_X \nu\right)$ and for the same reason $\ul\bPi(X,\ul n) \st{\mc S_0}{=} -g\left(\ul\xi,\nabla_X \ul\nu\right)$. Then, using \eqref{nrigg} it follows $\bPi(X,n)\st{\mc S_0}{=} -g\left(\ul\nu,\nabla_X\ul\xi\right) - \mu^{-1} X(\mu)$ and thus \begin{align*} \bPi(X,n) \st{\mc S_0}{=} g\left(\ul\xi,\nabla_X\ul\nu\right)-X\left(\log|\mu|\right)\st{\mc S_0}{=} -\ul\bPi(X,\ul n)-X\left(\log|\mu|\right), \end{align*} so \eqref{competa} is obtained. Concerning \eqref{compchi}, \begin{align*} 2{\bY}(X,Z)\st{\mc H}{=}\left(\lie_{\xi} g\right)(X,Z) \st{\mc S_0}{=}\left(\lie_{\mu^{-1}\ul\nu} g\right)(X,Z)\st{\mc S_0}{=} 2\mu^{-1}\ul{\bK}(X,Z), \end{align*} where in the last equality we used $\lie_{\mu^{-1}\ul\nu} g = \mu^{-1}\lie_{\ul\nu} g$ acting on vectors tangent to $\mc S_0$. This proves \eqref{compchi} and \eqref{compulchi} is analogous. \end{proof} \end{prop} \begin{rmk} \label{rmk} As a self-consistency check we prove that equations \eqref{competa}-\eqref{compulchi} stay invariant under the remaining gauge freedom. Let $(z,\zeta)$ and $(\ul z,\ul\zeta)$ be gauge parameters satisfying $\zeta|_{\mc S_0}=\ul\zeta|_{\mc S_0}=0$. From \eqref{transPiXn} the LHS of \eqref{competa} transforms as $$\text{LHS}\,'=\bPi'(X,n') +\ul\bPi'(X,\ul n') =\text{LHS}+X(\log|z|)+X(\log|\ul z|).$$ From \eqref{transmu} it is immediate that the RHS of \eqref{competa} transforms in the same way, so the gauge invariance of \eqref{competa} follows. Finally, \eqref{transY} (together with $\bm\ell_{\para}|_{\mc S_0}=0$ and $\zeta|_{\mc S_0}=0$) gives the transformation law $\bY'(X,Z)=z\bY(X,Z)$, while from $\ul{\bK}'=\ul z^{-1}\ul \bK$ we have $\mu'{}^{-1}\ul \bK' = z \mu^{-1}\ul \bK$, so the gauge invariance of \eqref{compchi}, and by analogy of \eqref{compulchi}, follow. \end{rmk} Apart from the conditions \eqref{competa}-\eqref{compulchi}, there is another condition that must be fulfilled at $\mc S_0$, namely that the pullbacks to $\mc H$ and $\mc{\ul H}$ of the ambient Ricci tensor agree on $\mc S_0$. This can be translated into a completely abstract condition by recalling that the tensor $R_{ab}$ as defined in \eqref{ricci} coincides with the pullback of the ambient Ricci tensor when the data happens to be embedded. This motivates the following abstract Definition. \begin{defi} \label{def_DND} Let $\mc H$ and $\mc{\ul H}$ be two manifolds with boundary, $\mc S_0$ an orientable manifold and $\mu\in\mc{F}(\mc S_0)$ everywhere negative. Let $i:\mc S_0\hookrightarrow\mc H$ and $\ul i:\mc S_0\hookrightarrow \mc{\ul H}$ be two embeddings. Let $\mc D=\{\mc H,\bg,\bm\ell,\ell^{(2)},\bY\}$ and $\ul{\mc D}=\{\ul{\mc H},\ul\bg,\bm{\ul\ell},\ul\ell^{(2)},\ul \bY\}$ be CHD satisfying \begin{enumerate} \item $i(\mc S_0) = \partial\mc H$ and $\ul i(\mc S_0)=\partial\ul{\mc H}$, and \item $i^{\star}\bg = \ul i^{\star}\ul\bg \eqqcolon h_0$ is a Riemannian metric on $\mc S_0$. \item $i^{\star} {\bm{\mc R}} = \ul i^{\star}\ul{\bm{\mc R}}$. \end{enumerate} We say that the triple $\{\mc D,\mc{\ul D},\mu\}$ is double null data (DND) provided that under the gauge restrictions of Lemma \ref{lemacompatible} the following conditions hold at $\mc S_0$ \begin{align} \bPi(X,n) + \ul\bPi(X,\ul n) & =-X\left(\log|\mu|\right),\label{competa2}\\ \bY(X,Z) & = \mu^{-1}\ul{\bK}(X,Z),\label{compchi2}\\ \ul \bY(X,Z) &= \mu^{-1} \bK(X,Z).\label{compulchi2} \end{align} \end{defi} \begin{rmk} Condition 1. implies that $\mc H$ and $\ul{\mc H}$ are of the same dimension (we refer to it as the dimension of the DND). Condition 2. implies that $\bg$ and $\ul\bg$ have signature $(0,1,...,1)$ at the boundaries $\partial\mc H$ and $\partial{\ul{\mc H}}$, respectively. Since $\bg$ and $\ul\bg$ have exactly one degeneration direction at every point it follows that they have this signature everywhere. Condition 3. will play no role in this paper because we are interested in solving the characteristic problem in vacuum and thus this condition will be automatically fulfilled. \end{rmk} Next we extend the notion of gauge transformation and embeddedness to the context of double null data. \begin{defi} Let $\{\mc D,\mc{\ul D},\mu\}$ be DND and $z\in\mc F^{\star}(\mc H)$, $\ul z\in\mc F^{\star}(\mc{\ul H})$, $\zeta\in\Gamma(T\mc H)$ and $\ul\zeta\in\Gamma(T\mc{\ul H})$. The transformed data is given by $\{\mc D',\mc{\ul D}',\mu'\}$, where $\mc D'$ and $\mc{\ul D}'$ are the transformed CHD in the sense of Definition \ref{defi_gauge} and $\mu' \d z^{-1}\ul z^{-1}\mu$ on $\mc S_0$. \end{defi} \begin{defi} \label{def_embDND} Let $\{\mc D,\mc{\ul D},\mu\}$ be DND and $(\mc M,g)$ a spacetime. We say that $\{\mc D,\mc{\ul D},\mu\}$ is embedded double null data on $(\mc M,g)$ provided that the pair $\{\mc D,\mc{\ul D}\}$ is double embedded and $\mu = g(\nu,\ul\nu)$, where $\nu$ and $\ul\nu$ are the spacetime versions of $n$ and $\ul n$, respectively. \end{defi} \begin{obs} Equations \eqref{competa2}-\eqref{compulchi2} together with items 2. and 3. in Def. \ref{def_DND} can be interpreted as necessary conditions for the data in order to ``match'' in the embedded case. While conditions $\bg=\ul\bg$ and ${\bm{\mc R}}=\ul{\bm{\mc R}}$ are already gauge invariant (by virtue of \eqref{transgamma} and Theorem \ref{teo_gauge}), conditions \eqref{competa2}-\eqref{compulchi2} are not. However, Remark \ref{rmk} guarantees that \eqref{competa2}-\eqref{compulchi2} are invariant under the remaining freedom in Lemma \ref{lemacompatible} (whose existence is always granted), and thus the notion of double null data is well-defined. A definition of double null data where the compatibility conditions are written gauge-covariantly will be developed in a forthcoming paper. \end{obs} The next proposition shows that a gauge transformation on a double null data does not affect its embeddedness properties on a spacetime. For general hypersurface data this is a known fact (Proposition 3.5 on \cite{Marc2}). Here we show that the extra structure involved in the double null data does not spoil this property. \begin{prop} Let $\{\mc D,\mc{\ul D},\mu\}$ be embedded double null data in a spacetime $(\mc M,g)$ with embeddings $f,\ul f$ and riggings $\xi,\ul\xi$, respectively. For any pair of gauge parameters $(z,\zeta)$ and $(\ul z,\ul\zeta)$ belonging to the subgroup of Lemma \ref{lemacompatible}, the transformed data $\{\mc D',\mc{\ul D}',\mu'\}$ is embedded double null data in the same spacetime $(\mc M,g)$, with the same embeddings $f,\ul f$ and with riggings $\xi',\ul\xi'$ given by $$\xi' = z(\xi+f_{\star}\zeta), \qquad \ul\xi' = \ul z(\ul\xi+\ul f_{\star}\ul\zeta).$$ \begin{proof} From the definition of embedded double null data (Def. \ref{def_embDND}) we need to check three things. Firstly, that \begin{equation} \label{gauge1} f^{\star}\left( g(\xi',\cdot)\right) = z\left(\bm\ell + \bg(\zeta,\cdot)\right), \qquad \ul f^{\star}\left( g(\ul\xi',\cdot)\right) = \ul z\left(\ul{\bm\ell} + \bg(\ul\zeta,\cdot)\right), \end{equation} \begin{equation} \label{gauge2} g(\xi',\xi') = z^2\left(\ell^{(2)} + 2\bm\ell(\zeta)+\bg(\zeta,\zeta)\right), \qquad g(\ul\xi',\ul\xi') = \ul z^2\big(\ul\ell^{(2)} + 2\bm{\ul\ell}(\ul\zeta)+\bg(\ul\zeta,\ul\zeta)\big), \end{equation} and \begin{equation} \label{gauge3} \frac{1}{2}f^{\star}\big(\lie_{\xi'} g\big) = z \bY + \bm\ell\otimes_s \dd z + \dfrac{1}{2}\lie_{z\zeta}\bg, \qquad \frac{1}{2}\ul f^{\star}\left(\lie_{\ul\xi'} g\right) = \ul z \ul \bY + \ul{\bm\ell}\otimes_s \dd \ul z + \dfrac{1}{2}\lie_{\ul z\ul\zeta}\ul\bg. \end{equation} Secondly, that \begin{equation} \label{gauge4} g(\nu',\ul\nu') = z^{-1}\ul z^{-1} g(\nu,\ul\nu), \end{equation} and finally, that the compatibility conditions \eqref{competa2}-\eqref{compulchi2} still hold in the new gauge. Equations \eqref{gauge1}-\eqref{gauge3} are proven in Proposition 3.5 of \cite{Marc2} in the context of general embedded hypersurface data. Concerning \eqref{gauge4}, from $\xi' = z(\xi+f_{\star}\zeta)$ and $\ul\xi' = \ul z(\ul\xi+\ul f_{\star}\ul\zeta)$ together with $g(\xi',\nu')=1$ and $g(\ul\xi',\ul\nu')=1$ it turns out that $\nu' = z^{-1}\nu$ and $\ul\nu'=\ul z^{-1}\ul\nu$, so \eqref{gauge4} also holds. Finally, from equations \eqref{gauge1}-\eqref{gauge3} and Remark \ref{rmk} one concludes that the compatibility conditions hold too. \end{proof} \end{prop} \begin{lema} \label{lema_compHG} Let $\{\mc D,\mc{\ul D},\mu\}$ be DND and consider two set of independent functions $\{\ul u, x^{A}\}$ and $\{u,\ul x^{A}\}$ on $\mc H$ and $\mc{\ul H}$, respectively, satisfying $n(\ul u)\neq 0$ and $\ul n(u)\neq 0$. Then there exists a unique harmonic gauge w.r.t $\{\ul u,x^{A}\}$ and $\{u,\ul x^{A}\}$ in $\mc D$ and $\ul{\mc D}$, respectively, in which \eqref{compatible} hold together with $\mu=\lambda=\ul\lambda$ on $\mc S_0$. \begin{proof} Let $\mc D$ and $\mc{\ul D}$ be CHD written in a HG. By Theorem \ref{teo_HG}, the HG is defined uniquely for each choice of $z|_{\mc S_0}$ and $\zeta|_{\mc S_0}$. From Corollary \ref{CGcorollary} we can gauge transform within the HG and the transformed data $\mc D'$ and $\mc{\ul D}'$ satisfy $\bm\ell'_{\para}=0$, $\ell'{}^{(2)}=0$, $\bm{\ul\ell}'_{\para}=0$ and $\ul\ell'{}^{(2)}=0$ on $\mc S_0$. By the same Corollary the remaining gauge freedom is parametrized by the pair $(z_0,\ul z_0)$. Recalling the transformation of $\lambda$, namely $\lambda'=z^{-1}\lambda$, and the one of $\mu$ (see \eqref{transmu}) we can choose $\ul z_0=\mu\lambda^{-1}$ and $z_0=\mu\ul\lambda^{-1}$ so that $\mu'=\lambda'=\ul\lambda'$. \end{proof} \end{lema} \begin{prop} \label{prop_compatible2} Let $\{\mc D,\mc{\ul D}\}$ be double embedded CHD on a spacetime $(\mc M,g)$ with riggings $\xi$ and $\ul\xi$, respectively. Consider a set of coordinates $\{u,\ul u,x^A\}$ on $\mc M$ satisfying \begin{enumerate} \item $n\left(x^A|_{\mc H}\right)=0$ and $\ul n\left(x^A|_{\mc{\ul H}}\right)=0$, \item $\lambda\d n\left(\ul u|_{\mc H}\right)\neq 0$ and $\ul\lambda\d \ul n\left(u|_{\mc{\ul H}}\right)\neq 0$, \item $\ul u|_{\mc{\ul H}}=0$ and $u|_{\mc H}=0$, \item $\xi\st{\mc H}{=}\partial_u$ and $\ul\xi=\partial_{\ul u}$ on $\ul{\mc H}$. \end{enumerate} Let $X\in\Gamma(T\mc S_0)$. Then the following relations hold at $\mc S_0$, \begin{align} 2\bY(n,n) &=\ul\lambda\ul n\big(\ul\ell^{(2)}\big) ,\label{compomega}\\ 2\ul \bY(\ul n,\ul n) &=\lambda n\left(\ell^{(2)}\right) ,\label{compulomega}\\ 2\bPi(X,n) &=\left(\lie_{\ul n}\bm{\ul\ell}\right)(X) -X\left(\log|\ul\lambda|\right) - \left(\lie_n\bm\ell\right)(X),\label{comptau2}\\ 2\ul\bPi(X,\ul n) &=\left( \lie_{n}\bm{\ell}\right)(X) -X\left(\log|\lambda|\right) - \left(\lie_{\ul n}\bm{\ul\ell}\right)(X).\label{compultau2} \end{align} \begin{proof} From items (1) and (2) we have $\nu \st{\mc H}{=} \lambda \partial_{\ul u}$ and $\ul\nu = \ul\lambda \partial_{u}$ on $\mc{\ul H}$. This and (3) imply that we can replace $\nu\leftrightarrow \lambda\partial_{\ul u}$, $\xi \leftrightarrow \partial_u$ (resp. $\ul\nu\leftrightarrow \ul\lambda\partial_{u}$, $\ul\xi \leftrightarrow \partial_{\ul u}$) in any spacetime calculation at $\mc H$ (resp. $\ul{\mc H}$) that only involves tangential derivatives. We apply this without further warning. From $g(\xi,\nu)=1$ and $g(\ul\xi,\ul\nu)=1$ it follows that $\lambda\st{\mc S_0}{=}\ul\lambda$. Indeed, $1=g(\xi,\nu)= \lambda g(\partial_u,\partial_{\ul u}) \st{\mc S_0}{=} \lambda g\left(\ul\lambda^{-1}\ul\nu , \ul\xi\right) = \lambda\ul\lambda^{-1}$, so \begin{align*} 2\bY(n,n) \st{\mc H}{=} \left(\lie_{\xi} g\right)(\nu,\nu)\st{\mc H}{=}\left(\lie_{\partial_u} g\right)\left(\lambda\partial_{\ul u},\lambda\partial_{\ul u}\right)\st{\mc S_0}{=}\lambda^2\partial_u\left(g(\partial_{\ul u},\partial_{\ul u})\right)\st{\mc S_0}{=}\ul\lambda\ul n\big(\ul\ell^{(2)}\big), \end{align*} where in the third equality we used that $\lambda\partial_{\ul u}$ is null on $\mc S_0$. Hence \eqref{compomega}, and by analogy \eqref{compulomega}, follow. Next we prove \eqref{comptau2} and \eqref{compultau2}. Let $X\in\Gamma(T\mc S_0)$ and consider any extension of it outside $\mc S_0$. Equation \eqref{PiLienell} gives $2\bF(X,n)= -\left(\lie_n\bm\ell\right)(X)$ and from \eqref{Yembedded} one gets $2\bY(X,n) \st{\mc H}{=} \left(\lie_{\xi} g\right)(X,\nu)$ and thus \begin{align*} 2\bY(X,n) &\st{\mc S_0}{=} \ul\lambda\left( \partial_{u}\left(g(X,\partial_{\ul u})\right) - g\left(\lie_{\partial_u}X,\partial_{\ul u}\right) - g\left(X,[\partial_u,\partial_{\ul u}]\right)\right) \\ &\st{\mc S_0}{=} \ul n\left( g(X,\partial_{\ul u})\right) - g\left(\ul\lambda\lie_{\partial_u}X,\partial_{\ul u}\right) \\ &\st{\mc S_0}{=}\left(\lie_{\ul n}\bm{\ul\ell}\right)(X) -X\left(\log|\lambda|\right), \end{align*} where we used $\lambda\st{\mc S_0}{=} \ul\lambda$ and in the last equality we inserted $g(\partial_{\ul u},X)= g(\xi,X)=\bm\ell(X)$ and used $\ul\nu = \ul\lambda \partial_u$ so that $\ul\lambda \lie_{\partial_u} X = \lie_{\ul n}X+X\left(\log|\ul\lambda|\right)\ul n$ on $\mc{\ul H}$. Observe that the result is independent of the extension of $X$. Hence \eqref{comptau2} (and similarly \eqref{compultau2}) follows. \end{proof} \end{prop} In the following Proposition we prove that conditions \eqref{compomega}--\eqref{compultau2} are always satisfied for any DND (i.e. at the abstract level) in the harmonic gauge of Lemma \ref{lema_compHG}. \begin{prop} \label{prop_compatible3} Let $\{\mc D,\mc{\ul D},\mu\}$ be DND written in the harmonic gauge of Lemma \ref{lema_compHG}, $X\in\Gamma(T\mc S_0)$ and $\ul u$, $u$ functions on $\mc H$ and $\ul{\mc H}$ satisfying $\lambda\d n(\ul u)\neq 0$ and $\ul\lambda\d \ul n(u)\neq 0$. Then the following relations at $\mc S_0$ hold: \begin{align} 2\bY(n,n)&=\ul\lambda\ul n\big(\ul\ell^{(2)}\big) \label{omega},\\ 2\ul \bY(\ul n,\ul n) &=\lambda n\left(\ell^{(2)}\right) \label{ulomega},\\ 2\bPi(X, n) &=\left(\lie_{\ul n}\bm{\ul\ell}\right)(X) -X\left(\log|\ul\lambda|\right) - \left(\lie_n\bm\ell\right)(X),\label{tau}\\ 2\ul\bPi(X,\ul n) &= \left(\lie_{n}\bm{\ell}\right)(X) -X\left(\log|\lambda|\right) - \left(\lie_{\ul n}\bm{\ul\ell}\right)(X).\label{ultau} \end{align} \begin{proof} The derivative $n\left(\ell^{(2)}\right)$ on $\mc S_0$ in the HG is obtained from the condition ${\Gamma^{\ul u}=0}$. Directly from \eqref{Gammau} and the fact that $\Phi\st{\mc S_0}{=}- n\left(\ell^{(2)}\right)$ (see \eqref{Phi}) it follows that ${n\left(\ell^{(2)}\right)\st{\mc S_0}{=}\tr_h{\bm{\Upsilon}}}$. By \eqref{Gammau0} the condition $\Gamma^{\ul u}_{\ul{\mc H}}=0$ is equivalent to $4\ul\omega=\tr_h\ul{\bm\chi} \st{\mc S_0}{=}\tr_{\ul P}\ul \bK$. We can connect both by means of \eqref{compchi2} and get $$2\ul \bY(\ul n,\ul n)= 4 \ul\omega = \lambda n \left(\ell^{(2)}\right).$$ This proves \eqref{ulomega} and the analogous \eqref{omega}. Concerning the other two relations we use $\Gamma^A_{\mc H}=0$ and its underlined version. Evaluating \eqref{GammaA2} at $\mc S_0$ one gets \begin{equation} \left(\lie_n\bm\ell\right)(X) +\bPi(X,n) \st{\mc S_0}{=} \left(\lie_{\ul n}\bm{\ul\ell}\right)(X) +\ul\bPi(X,\ul n). \end{equation} Taking into account \eqref{competa2} as well as $\mu\st{\mc S_0}{=}\lambda\st{\mc S_0}{=}\ul\lambda$, equations \eqref{tau} and \eqref{ultau} follow. \end{proof} \end{prop} As we already mentioned GR is a geometric theory, so the Einstein equations as a PDE problem cannot have a unique solution. The standard approach to deal with this issue is to solve the reduced Einstein equations (with cosmological constant $\Lambda$) instead, namely (cf. \cite{Luk,Rendall}) \begin{equation} \label{reduced} R^h_{\alpha\beta}\d R_{\alpha\beta} + g_{\mu(\alpha}\Gamma^{\mu}{}_{,\beta)}=\dfrac{2\Lambda}{D-2} g_{\alpha\beta}, \end{equation} where $D=m+1$ is the dimension of the spacetime. This system admits a well-posed initial value problem essentially because the principal symbol of \eqref{reduced} is hyperbolic. In the following Lemma we compute the tangent components of \eqref{reduced} on any embedded CHD in terms of the constraint tensors and the $\Gamma$-functions (see \eqref{Gammau0}-\eqref{Gammau}). \begin{lema} \label{lema_red} Let $\mc D$ be embedded CHD on $(\mc M,g)$ with corresponding rigging $\xi$ and $\{u,\ul u,x^A\}$ a coordinate system on $\mc M$ satisfying $\xi(x^A)\st{\mc H}{=}\xi(\ul u)\st{\mc H}{=}0$, $\xi(u)\st{\mc H}{=}1$, $u|_{\mc H}=0$, $n(x^A|_{\mc H})=0$ and that $\ul u|_{\mc H}$ is a foliation function of $\mc D$. Suppose also that $(\mc M,g)$ is a solution of the reduced Einstein equations. Then, \begin{align} J(n) + n\left(\Gamma^{u}_{\mc H}\right) &=0,\label{reducedRay}\\ \bm J_A+\dfrac{1}{2}\partial_{x^A}\left(\Gamma^u_{\mc H}\right) + \dfrac{1}{2} \bm\ell_A\ n\left(\Gamma^u_{\mc H}\right)+\dfrac{1}{2} h_{AB} \ n\left(\Gamma^B_{\mc H}\right)&=0,\label{reducedJA0}\\ \hspace{-0.25cm} \bm{\mc R}_{AB} + h_{C(B}\ \partial_{x^{A)}}\Gamma^C_{\mc H} + \bm\ell_{(A}\ \partial_{x^{B)}}\left(\Gamma^u_{\mc H}\right) & = \dfrac{2\Lambda}{m-1} h_{AB}.\label{reducedH0} \end{align} \begin{proof} We have already shown $\nu\st{\mc H}{=}\lambda\partial_{\ul u}$ so contracting \eqref{reduced} with $\nu^{\alpha}\nu^{\beta}$ and taking into account that $\nu$ is null and satisfies $g(\nu,\partial_{x^A})\st{\mc H}{=}0$ it yields \begin{align*} R^h_{\alpha\beta}\nu^{\alpha}\nu^{\beta} \st{\mc H}{=} R_{\alpha\beta}\nu^{\alpha}\nu^{\beta} + g(\partial_u,\nu) \ \nu\left(\Gamma^u_{\mc H}\right)\st{\mc H}{=} J(n) +n\left(\Gamma^u_{\mc H}\right), \end{align*} where we used $R_{\alpha\beta}\nu^{\alpha}\nu^{\beta} \st{\mc H}{=} J(n)$ and that $\xi\st{\mc H}{=}\partial_u$. Since $(\mc M,g)$ is a solution of \eqref{reduced}, equation \eqref{reducedRay} follows. Similarly the ``normal-tangent'' components of \eqref{reduced} are \begin{align*} R^h_{\alpha\beta}\nu^{\alpha}(\partial_{x^A})^{\beta} & \st{\mc H}{=} R_{\alpha\beta}\nu^{\alpha}(\partial_{x^A})^{\beta} + \dfrac{1}{2} g(\partial_u,\nu) \partial_{x^A} \left(\Gamma^u_{\mc H}\right) \\ &\quad + \dfrac{1}{2} g(\partial_u,\partial_{x^A}) \nu \left(\Gamma^u_{\mc H}\right) + \dfrac{1}{2} g(\partial_{x^A},\partial_{x^B})\ \nu\left(\Gamma^B_{\mc H}\right) \\ &\st{\mc H}{=} {\bm J}_A+\dfrac{1}{2}\partial_{x^A}\left(\Gamma^u_{\mc H}\right) + \dfrac{1}{2} \bm\ell_A\ n\left(\Gamma^u_{\mc H}\right)+\dfrac{1}{2} n\left(\Gamma^B_{\mc H}\right)h_{BA}, \end{align*} where we used $g(\partial_u,\partial_{x^A})\st{\mc H}{=}g(\xi,\partial_{x^A})\st{\mc H}{=}\bm\ell_A$ and $g(\partial_{x^A},\partial_{x^B})\st{\mc H}{=} h_{AB}$. Finally the ``tangent-tangent'' component of \eqref{reduced} is \begin{align*} R^h_{\alpha\beta}(\partial_{x^A})^{\alpha}(\partial_{x^B})^{\beta} & = R_{\alpha\beta}(\partial_{x^A})^{\alpha}(\partial_{x^B})^{\beta} + \dfrac{1}{2} g(\partial_u,\partial_{x^A}) \partial_{x^B}\Gamma^u_{\mc H} + \dfrac{1}{2} g(\partial_u,\partial_{x^B}) \partial_{x^A}\Gamma^u_{\mc H}\\ &\quad\, + \dfrac{1}{2} g(\partial_{x^C},\partial_{x^A}) \partial_{x^B}\Gamma^C_{\mc H} + \dfrac{1}{2} g(\partial_{x^C},\partial_{x^A}) \partial_{x^B}\Gamma^C_{\mc H} \end{align*} and after using $R^h_{\alpha\beta}(\partial_{x^A})^{\alpha}(\partial_{x^B})^{\beta} =\frac{2\Lambda}{m-1} h_{AB}$ equation \eqref{reducedH0} follows. \end{proof} \end{lema} We are ready to state and prove the main result of this paper. \begin{teo} \label{main} Let $\{\mc D,\mc{\ul D},\mu\}$ be double null data of dimension $m> 1$ as defined in Def. \ref{def_DND} satisfying the abstract constraint equations \begin{equation} \label{constraintsL} {\bm{\mc R}} = \dfrac{2\Lambda}{m-1}\bg \quad \text{and}\quad \ul{\bm{\mc R}}=\dfrac{2\Lambda}{m-1}\ul\bg, \end{equation} where ${\bm{\mc R}}$ is defined in \eqref{ricci2}, $\ul{\bm{\mc R}}$ is its underlined version and $\Lambda\in\real$. Then, after restricting the data $\{\mc D,\mc{\ul D},\mu\}$ if necessary, there exists a spacetime $(\mc M,g)$ solution of the $\Lambda$-vacuum Einstein equations such that $\{\mc D,\mc{\ul D},\mu\}$ is embedded double null data on $(\mc M,g)$ in the sense of Def. \ref{def_embDND}. Moreover for any two such spacetimes $(\mc M,g)$ and $(\mc{\wh M},\wh g)$ there exist neighbourhoods of $\mc H\cup\mc{\ul H}$, $\mc U\subseteq\mc M$ and $\wh{\mc U}\subseteq\mc{\wh M}$, and a diffeomorphism $\varphi: \mc U\to \wh{\mc U}$, such that $\varphi^{\star}\wh g=g$. \begin{proof} We will use ${\bm{\mc R}}_{ab}=\frac{2\Lambda}{m-1}\bg_{ab}$ (and its underlined version) in the forms $J(n)=0$, ${\bm J}_A=0$ and ${\bm{\mc R}}_{AB}=\frac{2\Lambda}{m-1} h_{AB}$ (see \eqref{hamil2}, \eqref{momentum2} and \eqref{constrainttensors}). The first step of the proof is to solve the reduced Einstein equations. Let $\{\ul u, x^A\}$ and $\{u, x^A\}$ be coordinates on $\mc H$ and $\mc{\ul H}$ satisfying $\ul u\ge 0$, $\lambda\d n(\ul u)\neq 0$, $n(x^A)=0$ on $\mc H$ and $u\ge 0$, $\ul\lambda\d \ul n(u)\neq 0$, $\ul n(x^A)=0$ on $\mc{\ul H}$, as well as $\mc S_0=\{u=\ul u=0\}$. Consider a manifold $\mc N$ with coordinates $\{u,\ul u, x^A\}$ defined as $\mc N=\{u,\ul u\ge 0\}\subset \real^2\times\mc S$ and two embeddings $f:\mc H\hookrightarrow\mc N$ and $\ul f:\mc{\ul H}\hookrightarrow\mc N$ such that $f\left(\mc H\right)=\{u=0\}$, $\ul f\left(\ul{\mc H}\right)=\{\ul u=0\}$, $f(\mc H\cup\mc{\ul H})=\mc S_0$ and $\{\ul u|_{\mc H},x^A|_{\mc H}\}$ and $\{u|_{\mc H},x^A|_{\mc H}\}$ are the given coordinates on $\mc H$ and $\ul{\mc H}$, respectively. Throughout the proof we identify $\mc H$ and $\mc{\ul H}$ with their images under $f$ and $\ul f$, respectively. We want to construct a metric $g$ solution of the reduced Einstein equations on some neighbourhood $\mc M\subseteq \mc N$ of $\mc S_0$. From Theorem 1 of \cite{Rendall} we need to provide initial data for the metric $g_{\mu\nu}$ on $\mc H\cup\ul{\mc H}$ continuous at $\mc S_0$ and with smooth restrictions on $\mc H$ and $\ul{\mc H}$. In order to do so we write $\{\mc D,\mc{\ul D},\mu\}$ in the gauge of Lemma \ref{lema_compHG} w.r.t. the functions $\{\ul u, x^A\}$ and $\{u,x^A\}$ on $\mc H$ and $\mc{\ul H}$, respectively, and we provide the following initial data on $\mc H$ $$g_{u\, u}=\ell^{(2)},\qquad g_{u \, \ul u}=\lambda^{-1}, \qquad g_{u\, A} = \bm\ell_A, \qquad g_{\ul u\, \ul u}=0, \qquad g_{\ul u\, A}=0, \qquad g_{AB}=h_{AB},$$ and on $\mc{\ul H}$, $$g_{u\, u}=0,\qquad g_{u \, \ul u}=\ul\lambda^{-1}, \qquad g_{u\, A} = 0, \qquad g_{\ul u\, \ul u}=\ul\ell^{(2)}, \qquad g_{\ul u\, A}=\bm{\ul\ell}_A, \qquad g_{AB}={\ul h}_{AB},$$ in the coordinates $\{u,\ul u, x^A\}$. Since $\{\mc D,\mc{\ul D},\mu\}$ is written in a gauge in which \eqref{compatible} and $\lambda\st{\mc S_0}{=}\ul\lambda$ hold, the functions $g_{\mu\nu}$ are continuous on $\mc H\cup\ul{\mc H}$ and their restrictions to $\mc H$ and $\mc{\ul H}$ are smooth. Then from Rendall's Theorem 1 \cite{Rendall} there exists an open neighbourhood $U$ of $\mc S_0$ and a unique metric $g$ on $\mc M\d U\cap\mc N$ solution of the reduced Einstein equations \eqref{reduced} such that the components of $g$ in the coordinates $\{u,\ul u,x^A\}$ on $U\cap\left(\mc H\cup\ul{\mc H}\right)$ coincide with the given ones. By construction $f^{\star}\left(g(\partial_u,\cdot)\right)=\bm\ell$, $f^{\star}\left(g(\partial_u,\partial_u)\right)=\ell^{(2)}$, $\ul f^{\star}\left(g(\partial_{\ul u},\cdot)\right)=\bm{\ul\ell}$ and $\ul f^{\star}\left(g(\partial_{\ul u},\partial_{\ul u})\right)=\ul\ell^{(2)}$, so the only riggings that have a chance to make the data embedded in the sense of Definition \ref{defi_embedded} are $\xi=\partial_u$ and $\ul\xi=\partial_{\ul u}$, respectively. Let $\wt{\bY}$ and $\wt{\ul \bY}$ be defined as $\wt{\bY}\d\frac{1}{2}f^{\star}\left(\lie_{\xi} g\right)$ and $\wt{\ul \bY}\d\frac{1}{2}\ul f^{\star}\big(\lie_{\ul\xi} g\big)$. Then $\wt{\mc D}=\{\mc H,\wt\bg\d\bg,\wt{\bm\ell}\d\bm\ell,\wt\ell{}^{(2)}\d\ell^{(2)},\wt{\bY}\}$ and $\wt{\ul{\mc D}}=\{\mc{\ul H},\wt{\ul\bg}\d\ul\bg,\wt{\bm{\ul\ell}}\d\bm{\ul\ell},\wt{\ul\ell}{}^{(2)}\d\ul{\ell}^{(2)},\wt{\ul \bY}\}$ are embedded CHD on $(\mc M,g)$ with embeddings $f$, $\ul f$ and riggings $\xi$, $\ul\xi$, respectively, as in Def. \ref{defi_embedded} (in what follows we denote with a tilde the expressions depending on $\wt{\mc D}$ and $\wt{\ul{\mc D}}$). By construction the metric part of $\wt{\mc D},\wt{\ul{\mc D}}$ coincides with the one of $\{\mc D,\mc{\ul D},\mu\}$ and $g\left(\wt\nu,\wt{\ul\nu}\right)=\mu$. Hence the pair $\{\wt{\mc D},\wt{\ul{\mc D}}\}$ is double embedded in the sense of Def. \ref{def_double}. To prove that $\{\mc D,\mc{\ul D},\mu\}$ is actually embedded DND we need to show that the original tensors $\bY$ and $\ul \bY$ coincide with the embedded ones $\wt{\bY}$ and $\wt{\ul \bY}$, respectively.\\ For the existence part of the Theorem we need to prove two things: (1) that the solution of the reduced EFE is indeed a solution of the EFE and (2) that the tensors $\bY$ and $\ul \bY$ coincide with $\wt{\bY}$ and $\wt{\ul \bY}$, respectively. In order to prove (1) we will show that the coordinates are harmonic w.r.t the metric $g$. To prove (2) we will write homogeneous ODE for the tensors $\bY-\wt \bY$ and $\ul \bY-\wt{\ul \bY}$ and show that they vanish on $\mc S_0$. Both goals are achieved simultaneously.\\ From Propositions \ref{prop_compatible2} and \ref{prop_compatible3} applied to the data $\{\wt{\mc D},\wt{\ul{\mc D}}\}$ and to the original DND $\{\mc D,\mc{\ul D},\mu\}$, respectively, it follows that $\wt{\bY}(n,n)\st{\mc S_0}{=} \bY(n,n)$, $\wt{\ul \bY}(\ul n,\ul n)\st{\mc S_0}{=} \ul \bY(\ul n,\ul n)$, $\wt{\bY}(X,n) \st{\mc S_0}{=} \bY(X,n)$ and $\wt{\ul \bY}(X,n)\st{\mc S_0}{=}\ul \bY(X,\ul n)$ for every $X\in\Gamma(T\mc S_0)$, or in terms of the foliation tensors, $\wt\omega \st{\mc S_0}{=} \omega$, $\wt{\ul\omega}\st{\mc S_0}{=}\ul\omega$, $\wt{\bm{\tau}}\st{\mc S_0}{=}\bm\tau$ and $\wt{\ul{\bm{\tau}}}\st{\mc S_0}{=}\bm{\ul\tau}$. Moreover, from Proposition \ref{prop_compatible} applied to $\{\wt{\mc D},\wt{\ul{\mc D}}\}$ and Def. \ref{def_DND} applied to $\{\mc D,\mc{\ul D},\mu\}$, $\wt{{\bm{\Upsilon}}}={\bm{\Upsilon}}$ and $\wt{\ul{\bm{\Upsilon}}}=\ul{\bm{\Upsilon}}$ on $\mc S_0$. In order to prove that these equations hold everywhere and not only on $\mc S_0$ we start by considering the tilde version of equation \eqref{reducedRay} on $\mc H$, namely \begin{equation} \label{red} \wt J(n) +n\big(\wt\Gamma^u_{\mc H}\big)=0. \end{equation} From the abstract constraint $J(n)=0$ it follows $2\omega \tr_h{\bm\chi} = -n\left(\tr_h{\bm\chi}\right)-|{\bm\chi}|^2$ (see \eqref{Ray} and recall that $J(n)$ takes the same form in any gauge, as shown in \eqref{Jnanygauge}). Since the metric data from $\mc D$ and $\mc{\wt D}$ coincides, $$-\wt{J}(n) = n\left(\tr_h{\bm\chi}\right) + 2\wt{\omega} \tr_h{\bm\chi} + |{\bm\chi}|^2 = 2(\wt\omega-\omega)\tr{\bm\chi}.$$ Recall that in the harmonic gauge $\tr{\bm\chi}-4\omega=0$ (see \eqref{combGamma}) and therefore $\wt\Gamma^u_{\mc H} = \tr_h{\bm\chi} - 4\wt\omega = 4(\omega-\wt\omega)$. Hence equation \eqref{red} can be rewritten as $- 2(\wt\omega-\omega)\tr{\bm\chi}+4n\left(\omega-\wt\omega\right)=0$, which is an homogeneous ODE for $\wt\omega-\omega$, which together with $\wt\omega \st{\mc S_0}{=} \omega$ implies $\wt\omega=\omega$ on $\mc H$ and then also $\wt\Gamma^u_{\mc H}=0$. The corresponding argument applied to $\ul{\mc H}$ gives $\wt{\Gamma}_{\mc{\ul H}}^{\ul u}=0$ and $\ul\omega=\wt{\ul\omega}$ on $\mc{\ul H}$. Taking into account $\wt\Gamma^u_{\mc H}=0$ the tilde version of equation \eqref{reducedJA0} reads \begin{equation} \label{reducedJA} \bm{\wt J}_A + \dfrac{1}{2} n\big(\wt{\Gamma}^B_{\mc H}\big)h_{AB}=0. \end{equation} Since ${\bm J}_A=0$ and the functions $\Gamma^A_{\mc H}$ \eqref{GammaA2} vanish in the harmonic gauge in which $\mc D$ is written, the combination $\lie_A\left({\bm J}_B,\Gamma^B_{\mc H}, n\left(\Gamma^B_{\mc H}\right)\right)$ defined in \eqref{combJ} also vanishes. From Lemma \ref{lema_indepY} this particular combination depends neither on ${\bm{\tau}}$ nor in ${\bm{\Upsilon}}$, so its tilde version also vanishes, $\mc L_A\big(\bm{\wt J}_B,\wt \Gamma^B_{\mc H},n\big(\wt \Gamma^B_{\mc H}\big)\big)=0$ (recall that $\omega$ is not problematic anymore). This gives $\bm{\wt{J}}_A$ in terms of $\wt{\Gamma}_{\mc H}^A$, $n(\wt{\Gamma}_{\mc H}^A)$, which inserted in \eqref{reducedJA} yields an homogeneous ODE for the functions $\wt\Gamma^A_{\mc H}$. Since $\wt{\bm{\tau}} \st{\mc S_0}{=} {\bm{\tau}}$ and $\Gamma^A_{\mc H}=0$, expression \eqref{Gammatau} gives $\wt\Gamma^A_{\mc H}\st{\mc S_0}{=}0$ and hence $\wt\Gamma^A_{\mc H}=0$ and ${\bm{\tau}}=\bm{\wt\tau}$ on $\mc H$. The same argument on $\mc{\ul H}$ proves $\wt{\Gamma}^A_{\mc{\ul H}}=0$ and $\ul{\bm{\tau}}=\bm{\ul{\wt\tau}}$ on $\mc{\ul H}$. Finally consider the trace of the tilde version of \eqref{reducedH0} w.r.t $h$, which taking into account $\wt\Gamma^u_{\mc H}=\wt\Gamma^A_{\mc H}=0$ reads \begin{equation} \label{reducedH} \wt H =2\Lambda. \end{equation} The abstract constraint equation $H=2\Lambda$ together with $\Gamma^{\ul u}_{\mc H}=0$ (since $\mc D$ is written in the HG) imply that the combination \eqref{combH} is equal to $2\Lambda$, and also is $\lie (\wt H,\wt\Gamma^{\ul u}_{\mc H}, n(\wt\Gamma^{\ul u}_{\mc H}))$ since this expression was constructed precisely so that it does not depend on ${\bm{\Upsilon}}$ (recall that $\omega$ and ${\bm{\tau}}$ are not problematic anymore). Inserting \eqref{reducedH} into the tilde version of \eqref{combH} we get an homogeneous ODE for $\wt\Gamma^{\ul u}_{\mc H}$. Since $\tr\wt{{\bm{\Upsilon}}}\st{\mc S_0}{=}\tr{\bm\Upsilon}$ and $\Gamma^{\ul u}_{\mc H}=0$, expression \eqref{Gammau} gives $\wt\Gamma^{\ul u}_{\mc H}\st{\mc S_0}{=}0$. Hence $\wt\Gamma^{\ul u}_{\mc H}=0$ and $\tr\wt{{\bm{\Upsilon}}}=\tr{\bm\Upsilon}$ everywhere on $\mc H$. The corresponding argument on $\mc{\ul H}$ gives $\tr\wt{\ul{\bm{\Upsilon}}}=\tr{\ul{\bm{\Upsilon}}}$ and $\wt\Gamma^{\ul u}_{\mc{\ul H}}=0$ on $\ul{\mc H}$. \\ The rest of the argument is standard (cf. \cite{HawkingEllis,Straumann,Wald}). As a consequence of the Bianchi identity the functions $\square_g u$, $\square_g \ul u$ and $\square_g x^A$ satisfy a homogeneous wave equation, which together with the fact that $\wt\Gamma^{\mu}_{\mc H}\st{\mc H}{=}0$ and $\wt{\Gamma}^{\mu}_{\mc{\ul H}}=0$ on $\ul{\mc H}$ yields $\square_g u=\square_g \ul u=\square_g x^A=0$ everywhere on $\mc M$. Consequently $(\mc M, g)$ is indeed a solution of the Einstein field equations with cosmological constant $\Lambda$. In order to show that $\{\mc D,\mc{\ul D},\mu\}$ is embedded DND on $(\mc M,g)$, as represented in Fig. \ref{fig1}, we still need to prove that the trace-free part of the tensors ${\bm{\Upsilon}}$ and $\ul{\bm{\Upsilon}}$ coincide with the ones of $\bm{\wt{\Upsilon}}$ and $\bm{\wt{\ul\Upsilon}}$, respectively. Since $\ric(g)=\Lambda g$ and the rest of the data coincides, the tensor $\bm{\wt{\Upsilon}}$ satisfies the same equation as the original $\bm\Upsilon$, namely ${\bm{\mc R}}_{AB}=\frac{2\Lambda}{m-1} h_{AB}$ (see \eqref{Rdata}), and thus the tensor ${\bm{\Upsilon}}-\bm{\wt{\Upsilon}}$ satisfies an homogeneous first order ODE, which together with ${\bm{\Upsilon}}\st{\mc S_0}{=}\wt{\bm{\Upsilon}}$ yields $\bm\Upsilon= \bm{\wt\Upsilon}$ on $\mc H$. The same argument on $\mc{\ul H}$ proves $\ul{\bm{\Upsilon}}= \bm{\wt{\ul\Upsilon}}$ on $\mc{\ul H}$. Then the original tensors $\bY$, $\ul \bY$ coincide with the embedded ones $\wt \bY$, $\wt{\ul \bY}$ and consequently the original abstract constraint equations are the pullback of the Einstein $\Lambda$-vacuum equations to $\mc H\cup\mc{\ul H}$.\\ \begin{figure} \centering \psfrag{a}{$\Psi$} \psfrag{b}{$\{\mc D,\mc{\ul D},\mu\}$} \psfrag{c}{$(\mc M,g)$} \includegraphics[width=10cm]{picture.eps} \caption{Embedded double null data $\{\mc D,\mc{\ul D},\mu\}$ with embedding $\Psi$ (in the sense of Def. \ref{def_embDND}) in a spacetime $(\mc M,g)$ solution of the $\Lambda$-vacuum Einstein field equations.} \label{fig1} \end{figure} In order to prove the uniqueness part of the theorem consider $\{\mc D,\mc{\ul D},\mu\}$ as embedded DND in another spacetime $(\mc{\wh M},\wh g)$ solution of the EFE with cosmological constant $\Lambda$ with embeddings $f,\ul f$ and riggings $\xi,\ul\xi$. The aim is to show that there exist neighbourhoods of $\mc H\cup\mc{\ul H}$, $\mc U\subseteq\mc M$ and $\mc{\wh U}\subseteq\mc{\wh M}$, and a diffeomorphism ${\varphi:\mc U{\to}\ \mc{\wh U}}$, such that $\varphi^{\star}\wh g=g$. By Theorem 1 of \cite{Rendall}, for each set of independent functions $\{{\ul u},{x}^A\}$ on $\mc H$ and $\{{u},{x}^A\}$ on $\mc{\ul H}$ satisfying $\lambda\d n({\ul u})\neq 0$, $n( x^A)=0$ on $\mc H$ and $\ul\lambda\d\ul n( u)\neq 0$, $\ul n( x^A)=0$ on $\mc{\ul H}$, there exist an open neighbourhood $U$ of $\mc S_0$ and unique smooth functions $\{\wh u,\wh{\ul u},\wh x^A\}$ on $\mc{\wh U}\d U\cap\mc{\wh M}$ such that $\square_{\wh g} \wh u = \square_{\wh g} \wh{\ul u} = \square_{\wh g} \wh x^A=0$ on $\mc{\wh U}$ with the given functions on $\mc{\wh U}\cap\left(\mc H\cup\mc{\ul H}\right)$ as initial conditions and $f(\mc H)=\{\wh u=0\}$, $\ul f(\mc{\ul H})=\{\wh{\ul u}=0\}$.\\ First we prove that when the data is written in the gauge of Lemma \ref{lemacompatible}, the riggings are $\xi=\partial_{\wh u}$ and $\ul\xi=\partial_{\wh{\ul u}}$ on $f(\mc H)$ and $\ul f(\ul{\mc H})$, respectively. Proposition \ref{propbox} together with the fact that $\wh u$ is harmonic w.r.t. $\wh g$ imply $n\left(\xi(\wh u)\right)=0$ on $\mc H$. Since the data is written in a gauge in which $\bm\ell_{\para}\st{\mc S_0}{=}0$ and $\ell^{(2)}\st{\mc S_0}{=}0$, relations \eqref{nrigg} hold, so $\xi(\wh u)\st{\mc S_0}{=}\mu^{-1}\ul n(\wh u) = \mu^{-1}\ul\lambda = 1$, since $\mu=\lambda=\ul\lambda$ on $\mc S_0$ in the gauge of Lemma \ref{lema_compHG}. Consequently $\xi(\wh u)=1$ on $\mc H$, and by analogy, $\ul\xi(\wh{\ul u})=1$ on $\mc{\ul H}$. Concerning the functions $\{\wh{\ul u},\wh{x}^A\}$, Proposition \ref{propbox}, equations $\square_{\wh g} \wh{\ul u} = \square_{\wh g} \wh x^A=0$ and the fact that the gauge is harmonic imply $n\left(\xi(\wh{\ul u})\right)=0$ and $n\left(\xi(\wh{x}^A)\right)=0$ on $\mc H$. By the same argument as before, $\xi(\wh{\ul u})\st{\mc S_0}{=} \mu^{-1}\ul n(\wh{\ul u})=0$ and $\xi(\wh{x}^A)\st{\mc S_0}{=} \mu^{-1}\ul n(\wh{x}^A)=0$, from where one concludes that $\xi(\wh{\ul u})=\xi(\wh{x}^A)=0$ everywhere on $\mc H$ (and similarly $\ul\xi(\wh{u})=\ul\xi(\wh{x}^A)=0$ on $\mc{\ul H}$). This proves $\xi=\partial_{\wh u}$ and $\ul\xi=\partial_{\wh{\ul u}}$ on $f(\mc H)$ and $\ul f(\ul{\mc H})$, respectively. Then, from the definition of embedded data, $$g_{\wh u\, \wh u}=\ell^{(2)},\qquad g_{\wh u \, \wh{\ul u}}=\lambda^{-1}, \qquad g_{\wh u\, \wh A} = \bm\ell_{A}, \qquad g_{\wh{\ul u}\, \wh{\ul u}}=0, \qquad g_{\wh{\ul u}\, \wh A}=0, \qquad g_{\wh A\wh B}=h_{ A B},$$ on $\mc H$ and $$g_{\wh u\, \wh u}=0,\qquad g_{\wh u \, \wh{\ul u}}=\ul\lambda^{-1}, \qquad g_{\wh u\, \wh A} = 0, \qquad g_{\wh{\ul u}\, \wh{\ul u}}=\ul\ell^{(2)}, \qquad g_{\wh{\ul u}\, \wh A}=\bm{\ul\ell}_A, \qquad g_{\wh A\wh B}={\ul h}_{AB}$$ on $\mc{\ul H}$. After restricting $\mc{\wh U}$ further, if necessary, there exists a neighbourhood $\mc U\subseteq\mc M$ and a diffeomorphism $\varphi:\mc U\to\mc{\wh U}$ defined by $x^{\mu} = \wh{x}^{\mu}\circ\varphi$. By construction $\mc U\cap \left(\mc H\cup\mc{\ul H}\right)\neq\emptyset$. Since $0=\varphi^{\star}\left(\square_{\wh g}\wh x^{\mu}\right)=\square_{\varphi^{\star}\wh g} x^{\mu}$, the coordinates $x^{\mu}$ are harmonic w.r.t. $\varphi^{\star}\wh g$. Moreover, from the fact that $\wh g$ is a solution of the $\Lambda$-vacuum EFE, $\ric\left[\wh g\right] = \frac{2\Lambda}{m-1}\wh g$, so $$\varphi^{\star}\left(\ric\left[\wh g\right]\right) =\ric\left[\varphi^{\star}\wh g\right] =\dfrac{2\Lambda}{m-1}\varphi^{\star}\wh g,$$ and therefore $\varphi^{\star}\wh g$ is a solution of the reduced equations in the coordinates $\{x^{\mu}\}$, just like $g$. In order to prove that $\varphi^{\star}\wh g$ and $g$ are actually the same, by Theorem 1 of \cite{Rendall} we only need to show that their restrictions on $\mc U\cap\left(\mc H\cup\mc{\ul H}\right)$ agree. This follows directly from the fact that the push-forward $\varphi_{\star}$ is the identity, because $\varphi_{\star}\partial_u = \partial_{\wh u}$, $\varphi_{\star}\partial_{\ul u} = \partial_{\wh{\ul u}}$ and $\varphi_{\star}\partial_{x^A} = \partial_{\wh x^A}$, and therefore $(\varphi^{\star}\wh g)_{\mu\nu} = g_{\mu\nu}$ on $\mc U\cap\left(\mc H\cup\mc{\ul H}\right)$. \end{proof} \end{teo} \begin{rmk} The argument in Theorem \ref{main} can be immediately generalized to matter fields admitting a well-posed characteristic initial value problem in which their energy-momentum tensor on the hypersurfaces have the following dependence on the initial data: (i) $T_{n\, n}$ depends on the matter field, the metric data, and it is algebraic on $\bY(n,n)$; (ii) $T_{n\, A}$ depends on the matter field, the metric data, $\bY(n,n)$ and is algebraic on $\bY_{n\, A}$; (iii) The combination $h^{AB}T_{AB}-\frac{m-1}{2}\left(P^{ab}T_{ab}+2T(n,\xi)\right)$ depends on the matter field, the metric data, $\bY(n,n)$, $\bY_{n\, A}$ and it is algebraic on $h^{AB}\bY_{AB}$; (iv) $T_{AB}$ depends on the matter field, the metric data, $\bY(n,n)$, $\bY_{n\, A}$, $h^{AB}\bY_{AB}$ and it is algebraic on the trace-free part of $\bY_{AB}$. The third requirement follows from the fact that, from \eqref{inverse}, \begin{align*} g^{\mu\nu} T_{\mu\nu} \st{\mc H}{=} \left(P^{ab} e^{\mu}_a e_b^{\nu} + 2n^a e_a^{\mu}\xi^{\nu}\right) T_{\mu\nu} \st{\mc H}{=} P^{ab} T_{ab} + 2T(n,\xi). \end{align*} \end{rmk} \begin{appendices} \section{Gauge-covariance of the tensors $A$ and $B$} \label{appendix} In this appendix we prove that the tensors $A$ and $B$ as defined in \eqref{A} and \eqref{B} transform as \begin{equation} \label{transAyB} A'_{abc} = z(A_{abc}+\zeta^d B_{dabc}),\hspace{1cm} B'_{abcd}=B_{abcd} \end{equation} under a gauge transformation $(z,\zeta)$. These are the expected transformations if one thinks the data as embedded, but we prove this statement in full generality. \begin{prop} \label{covariance1} Let $\mc D=\{\mc H,\bg,\bm\ell,\ell^{(2)},\bY\}$ be null hypersurface data and $(z,\zeta)$ gauge parameters. Let $A$ and $B$ be the tensors defined in \eqref{A} and \eqref{B}, respectively. Then \begin{enumerate} \item $\mc G_{(z,\zeta)}(A) = z(A+i_{\zeta}B),$ \item $\mc G_{(z,\zeta)}(B) = B$. \end{enumerate} \begin{proof} By the composition law $(z,\zeta)=(z,0)\circ (1,\zeta)$ (see \eqref{group}) it suffices to prove the result with $(z,0)$ and $(1,\zeta)$ independently. We start by assuming that $\mc D$ is null hypersurface data written in a CG (we shall deal later with the general case). Then, \begin{align} A_{bcd}&=\bm\ell_a\ol{R}^a{}_{bcd},\label{Achar}\\ B_{abcd}&=\bg_{af}\ol{R}^f{}_{bcd}-2\ol\nabla_{[c}\left(\bK_{d]b}\bm\ell_a\right).\label{Bchar} \end{align} By the transformation laws \eqref{tranfell}, \eqref{Ktrans} and \eqref{gaugeconnection} the expression of $B$ above is insensitive to the transformations $(z,0)$, and thus we only need to show the invariance of $B$ under transformations of the form $(1,\zeta)$. Denote with a prime the transformed data, \begin{align} \label{Bprima} B_{abcd}' = \bg_{af}\ol R'^f{}_{bcd}-2\ol\nabla'_{[c}\left(\bK_{d]b}'\bm\ell_a'\right)+2\ell'{}^{(2)}\bK_{a[d}'\bK'_{c]b}. \end{align} Since no products of $\bK$ appear in equation \eqref{Bchar}, a good strategy is to identify the elements with $\bK\cdot \bK$ in the two first terms and see that they cancel out with those in the third one. The first term is given by Proposition \ref{curvatura}, \begin{equation} \label{aux2} \bg_{af}\ol R'^f{}_{bcd} = \bg_{af}\left(\ol R^f{}_{bcd}+2\ol\nabla_{[c}\left(\zeta^f \bK_{d]b}\right)+2\zeta^f\zeta^g \bK_{g[c}\bK_{d]b}\right). \end{equation} For the second one we apply \eqref{tranfell} and \eqref{Ktrans}, as well as Proposition \ref{gaugeconection}, \begin{equation*} \begin{aligned} 2\ol\nabla'_{[c}\left(\bK_{d]b}'\bm\ell_a'\right) & = 2\ol\nabla_{[c}\left(\bK_{d]b}\bm\ell_a\right)+2\ol\nabla_{[c}\left(\bK_{d]b}\bg_{af}\zeta^f\right)\\ &\quad+2\left(\bm\ell_a+\bg_{af}\zeta^f\right)\zeta^g \bK_{b[d}\bK_{c]g}+2\left(\bm\ell(\zeta)+\bg(\zeta,\zeta)\right) \bK_{a[d}\bK_{c]b}. \end{aligned} \end{equation*} Expanding the second term and inserting \eqref{olnablagamma}, \begin{equation} \label{aux3} \begin{aligned} 2\ol\nabla'_{[c}\left(\bK_{d]b}'\bm\ell_a'\right) & = 2\ol\nabla_{[c}\left(\bK_{d]b}\bm\ell_a\right)+ 2\bg_{af}\ol\nabla_{[c}\left(\zeta^f\bK_{d]b}\right)\\ &\quad +2\left(2\bm\ell(\zeta)+\bg(\zeta,\zeta)\right)\bK_{a[d}\bK_{c]b}+2\bg_{af}\zeta^f\zeta^g \bK_{g[c}\bK_{d]b} . \end{aligned} \end{equation} Introducing \eqref{aux2} and \eqref{aux3} into \eqref{Bprima}, and taking into account $\ell'{}^{(2)} = z^2\left(2\bm\ell(\zeta)+\bg(\zeta,\zeta)\right)$, \begin{align} \label{aux4} B'_{abcd} = \bg_{af} \ol R^f{}_{bcd} - 2\ol\nabla_{[c}\left(\bK_{d]b}\bm\ell_a\right) , \end{align} so the gauge invariance of $B$ follows. Now we study the transformation of $A$ in the same way. In the primed gauge, \begin{equation} \label{Aprima} A_{bcd}'=\bm\ell_a'\ol{R}'{}^a{}_{bcd}+2\ell'{}^{(2)}\ol{\nabla}'_{[d} \bK'_{c]b} +\bK'_{b[c}\ol\nabla_{d]} \ell'{}^{(2)}, \end{equation} while in a characteristic gauge $A$ takes the form \eqref{Achar}. From \eqref{tranfell}, \eqref{transell2}, \eqref{gaugeconnection} and \eqref{Ktrans} it follows $\mc G_{(z,0)}(A) = zA$. Concerning transformations of the form $(1,\zeta)$, our strategy is to get rid of the terms with products of $\bK$ and products of $\zeta$. Firstly from \eqref{olnablagamma} it follows \begin{equation} \label{nablagammagamma} \ol\nabla_a \left(\bg(\zeta,\zeta)\right) = 2\bg_{cb}\zeta^c\ol\nabla_a\zeta^b-2\bK_{ab}\bm\ell(\zeta)\zeta^b. \end{equation} Since $\ell'{}^{(2)}=2\bm\ell(\zeta)+\bg(\zeta,\zeta)$, the second term of \eqref{Aprima} can be written as \begin{align*} 2\ell'{}^{(2)}\ol\nabla'_{[d}\bK'_{c]b} = 2\left(2\bm\ell(\zeta)+\bg(\zeta,\zeta)\right)\left(\ol\nabla_{[d}\bK_{c]b}-\zeta^a\bK_{b[d}\bK_{c]a}\right), \end{align*} while using \eqref{nablagammagamma} the third one is \begin{align*} \bK'_{b[c}\ol\nabla'_{d]}\ell'{}^{(2)} &=2z\bK_{b[c}\left(\bg_{fg}\zeta^f\ol\nabla_{d]}\zeta^g-\bK_{d]f}\bm\ell(\zeta)\zeta^f\right)+2z\bK_{b[c}\left(\zeta^f\ol\nabla_{d]}\bm\ell_f+\bm\ell_f\ol\nabla_{d]}\zeta^f\right). \end{align*} For the first term in \eqref{Aprima} we use Proposition \ref{curvatura} and equation \eqref{tranfell}. Putting everything together and simplifying, \begin{align*} z^{-1} A'_{bcd} & = \bm\ell_a \ol R^a{}_{bcd}+\bg_{af}\zeta^f\ol R^a{}_{bcd}+2\bm\ell_a \ol\nabla_{[c}\left(\zeta^a \bK_{d]b}\right)\\ &\quad\, +4\bm\ell(\zeta)\ol\nabla_{[d} \bK_{c]b}+2\zeta^f \bK_{b[c}\ol\nabla_{d]}\bm\ell_f + 2\bm\ell_f \bK_{b[c}\ol\nabla_{d]}\zeta^f\\ &=\bm\ell_a \ol R^a{}_{bcd}+\bg_{af}\zeta^f\ol R^a{}_{bcd}+2\bm\ell(\zeta)\ol\nabla_{[d}\bK_{c]b}+2\zeta^f \bK_{b[c}\ol\nabla_{d]}\bm\ell_f, \end{align*} and the claim $z^{-1} A'_{bcd} = A_{bcd}+\zeta^a B_{abcd}$ follows because $\zeta^a\ol\nabla_{[d}\left(\bK_{c]b}\bm\ell_a\right) = \bm\ell(\zeta)\ol\nabla_{[d}\bK_{c]b}+\zeta^f \bK_{b[c}\ol\nabla_{d]}\bm\ell_f$. To finish the proof we still need to show that the assumption that the initial gauge is characteristic does not spoil the generality of the argument. Let $\mc D$ be CHD and $\mc D''=\mc{G}_{(z'',\zeta'')}\mc D$. From Proposition \ref{propcaracteristico} there always exist gauge parameters $(z,\zeta)$ such that $\mc D' \d \mc G_{(z,\zeta)}\mc D$ is in a CG. Let $(z',\zeta')$ be the parameters making the following diagram commutative \begin{diagram} \mc D \arrow[rr, "\mc G_{(z'',\zeta'')}"] \arrow[rd, bend right, "\mc G_{(z,\zeta)}"] & & \mc D''\\ & \mc D' \arrow[ur, bend right, "\mc G_{(z',\zeta')}"] & \end{diagram} \vspace{0.3cm} In other words, $(z',\zeta') =(z'',\zeta'')\circ (z,\zeta)^{-1} = (z'' z^{-1}, z(\zeta''-\zeta))$, where we have made use of \eqref{group} and \eqref{gaugelaw}. We already know that $\mc G_{(z',\zeta')} (A') = A'' = z'(A'+i_{\zeta'}B')$ and $\mc G_{(z',\zeta')}(B')=B'' = B'$, as well as $\mc G_{(z,\zeta)^{-1}} (A') = A = z^{-1}(A'+i_{-z\zeta}B)$ and $\mc G_{(z,\zeta)^{-1}} (B')=B=B'$, after using \eqref{gaugelaw} again. Then, since $A'' = z'(A'+i_{\zeta'}B) $ it follows \begin{align*} A'' =z'' \left(z^{-1} A' + i_{\zeta''}B' -i_{\zeta} B'\right) = z''\left(A+i_{\zeta} B' +i_{\zeta''} B'-i_{\zeta} B' \right) = z''\left(A+i_{\zeta''} B\right), \end{align*} and hence $\mc G_{(z'',\zeta'')}(A) = z''\left(A+i_{\zeta''} B\right)$. The fact that $\mc G_{(z'',\zeta'')}(B) = B$ is obvious. \end{proof} \end{prop} The next natural step is to study the symmetries of $A$ and $B$. When the data is embedded, these symmetries are the ones inherited from the curvature tensor of the ambient space. Here we establish them without assuming embeddedness of the data. \begin{prop} \label{symmetries} The tensors $A$ and $B$ possess the following symmetries: \begin{enumerate} \item $A_{bcd}=-A_{bdc}$, \item $A_{bcd}+A_{cdb}+A_{dbc}=0$, \item $B_{abcd} + B_{acdb} + B_{adbc}=0$, \item $B_{abcd}=-B_{abdc}=-B_{bacd}$, \item $B_{abcd}=B_{cdab}.$ \end{enumerate} \begin{proof} The symmetries $A_{bcd}=-A_{bdc}$ and $B_{abcd}=-B_{abdc}$ are obvious. The second one is a consequence of the first Bianchi identity for $\ol R$ and the fact that $\bK$ is symmetric. The third one is analogous. In order to prove $B_{abcd}+B_{bacd}=0$ we first compute the symmetrization of the first term in \eqref{B} which, taking into account \eqref{olnablagamma}, is \begin{align*} \bg_{af}\ol R^f{}_{bcd}+\bg_{bf}\ol R^f{}_{acd} & = \ol\nabla_d\ol\nabla_c\bg_{ab}-\ol\nabla_c\ol\nabla_d\bg_{ab}\\ &=-\ol\nabla_d\left(\bK_{ca}\bm\ell_b+\bK_{cb}\bm\ell_a\right)+\ol\nabla_c\left(\bK_{da}\bm\ell_b+\bK_{db}\bm\ell_a\right)\\ &= 2\ol\nabla_{[c}\left(\bK_{d]a}\bm\ell_b\right)+2\ol\nabla_{[c}\left(\bK_{d]b}\bm\ell_a\right). \end{align*} For any symmetric tensor it holds $\bK_{b[d}\bK_{c]a}+\bK_{a[d}\bK_{c]b}=0$, so the symmetrization relative to the indices $a,b$ in the third term in \eqref{B} vanishes. Thus, \begin{align*} B_{abcd}+B_{bacd} = 2\ol\nabla_{[c}\left(\bK_{d]a}\bm\ell_b\right)+2\ol\nabla_{[c}\left(\bK_{d]b}\bm\ell_a\right)-2\ol\nabla_{[c}\left(\bK_{d]b}\bm\ell_a\right)-2\ol\nabla_{[c}\left(\bK_{d]a}\bm\ell_b\right)=0. \end{align*} The fifth symmetry is consequence of items (3) and (4). Indeed, let $\accentset{\circ}{B}_{abcd}$ be the cyclic permutation $\accentset{\circ}{B}_{abcd} \d B_{abcd} + B_{acdb} + B_{adbc}$, which by the third item vanishes. Then, taking into account item (4), $$\hspace{-5mm}0 = \accentset{\circ}{B}_{abcd} - \accentset{\circ}{B}_{bcda} -\accentset{\circ}{B}_{cdab}+\accentset{\circ}{B}_{dcab}= B_{abcd} - B_{bacd} - B_{cdab} + B_{dcab}=2B_{abcd} - 2B_{cdab}.$$ \vskip -9mm \end{proof} \end{prop} \section{Some contractions of the tensors $A$ and $B$} \label{appendixB} In this appendix we compute the contractions of the tensors $A$ and $B$ that are needed in Section \ref{sec_fol} to write down the constraint tensors in terms of the foliation tensors. \begin{lema} \label{LemaB} Let $\mc D$ be CHD written in a characteristic gauge, $\{e_A\}$ a basis of $\Gamma(T\mc S)$, $V\in\Gamma(T\mc H)$ and let $A$ and $B$ the tensors defined in \eqref{A} and \eqref{B}. Then, \begin{align} A_{bcd} n^b n^d V^c & = (\ol\nabla_n \bPi)(V,n)-(\ol\nabla_V \bPi)(n,n),\label{propJ1}\\ B_{abcd} n^a P^{bd} V^c & =-\bPi(V,n) \tr_P \bK +\left(\bK*\bPi\right)(V,n) - \ol\nabla_V \tr_P \bK+\div_P(\bK)(V),\label{propJ2}\\ B_{cadb} P^{cd}e_A^ae_B^b &= {}^h R_{AB} - \tr_h{\bm\chi}\ {\bm{\Upsilon}}_{AB}-\tr_h{\bm{\Upsilon}}\ {\bm\chi}_{AB}+\left({\bm\chi}\cdot{\bm{\Upsilon}}\right)_{AB}+\left({\bm{\Upsilon}}\cdot{\bm\chi}\right)_{AB},\label{BP}\\ A_{bca}n^ce_A^ae_B^b &= -\nabla^h_A{\bm{\eta}}_B + 2\omega {\bm{\Upsilon}}_{AB}-\wt\nabla_n {\bm{\Upsilon}}_{AB}-{\bm{\eta}}_A{\bm{\eta}}_B-\left({\bm\chi}\cdot{\bm{\Upsilon}}\right)_{AB},\label{An} \end{align} where we define $\tr_P \bK\d P^{ab}\bK_{ab}$, $\left(\bK*\bPi\right)_{ca} \d P^{bd} \bK_{bc}\bPi_{da}$, $\div_P(\bK)(V)\d P^{ab}V^c\ol\nabla_a \bK_{bc}$ and $(S\cdot T)_{AB}\d h^{CD} S_{AC}T_{BD}$ for any pair of two-covariant tensors $S$ and $T$. \begin{proof} The first one follows from the expression of $A$ in \eqref{Achar} and Lemma \ref{lema} in a CG. Now, from \eqref{Bchar} and taking into account $\bg_{af}\ol R^f{}_{bcd}n^a=0$, \begin{align*} B_{abcd} n^a = 2\bm\ell_a \bK_{b[d}\ol\nabla_{c]}n^a-2\ol\nabla_{[c} \bK_{d]b}= 2\bK_{b[c}\bPi_{d]a}n^a - 2\ol\nabla_{[c} \bK_{d]b}, \end{align*} where in both equalities we used $\bm\ell(n)=1$ and in the second we inserted \eqref{olnablan2} and used $P(\bm\ell,\cdot)=0$ (see \eqref{Pell}). Contracting with $P^{bd} V^c$ and using $\bK_{bc}\ol\nabla_a P^{bc}=0$, which is a direct consequence of \eqref{olnablaP} and $\bK(n,\cdot)=0$, $$B_{abcd} n^a P^{bd} V^c = -P^{bd} \bK_{bd} \bPi(V,n) + P^{bd} \bK_{bc}V^c\bPi_{da}n^a - \ol\nabla_V\left(P^{bd} K_{bd}\right)+P^{bd}V^c\ol\nabla_d \bK_{bc},$$ and hence \eqref{propJ2} follows. Next we proceed with the third one. Inserting \eqref{olnablagamma} into \eqref{Bchar}, the tensor $B$ in a CG is $B_{cadb} = \bg_{cf}\ol R^f{}_{adb}-2\bK_{a[b} \bPi_{d]c} - 2\bm\ell_c\ol\nabla_{[d}\bK_{b]a}$, so using \eqref{PAB} and that $\bPi_{DC}=\bY_{DC}$ in a CG (see Remark \ref{observación}) the contraction with $P^{cd}e^a_Ae^b_B$ becomes $$B_{cadb} P^{cd}e_A^ae_B^b = \bg\left(e_C,\ol R(e_D,e_B)e_A\right) h^{CD} - 2{\bm\chi}_{A[B}\bY_{D]C}h^{CD},$$ which is \eqref{BP} after using the Gauss identity \eqref{gaussfacil}. Finally, in order to compute the term $A_{bca}n^ce_A^ae_B^b$ we first recall the expression \eqref{Achar} for $A$ in a CG and equation \eqref{nlR}, namely \begin{equation} \label{auxRAB} A_{bca}n^c=n^c\bm\ell_d\ol R^d{}_{bca} = \ol\nabla_a\left(\bPi_{cb}n^c\right)-\ol\nabla_n \bPi_{ab} - P^{cd}\bK_{da}\bPi_{cb}+\bPi_{cb}\bPi_{ad}n^cn^d. \end{equation} From decomposition \eqref{nablan}, Remark \ref{observación} and the definitions of ${\bm{\eta}}$, ${\bm{\Upsilon}}$ and $\omega$, the term $e^a_Ae^b_B\ol\nabla_n\bPi_{ab}$ is given by \begin{align*} e^a_Ae^b_B\ol\nabla_n\bPi_{ab} &=n\big(\bPi(e_A,e_B)\big) - \bPi\big(\wt\nabla_n e_A+{\bm{\eta}}(e_A)n, e_B\big)- \bPi\left(e_A, \wt\nabla_n e_B+{\bm{\eta}}(e_B)n,\right)\\ &=\wt\nabla_n {\bm{\Upsilon}}_{AB} +{\bm{\eta}}_A{\bm{\eta}}_B -{\bm{\tau}}_A{\bm{\eta}}_B\\ &= \wt\nabla_n {\bm{\Upsilon}}_{AB}+2{\bm{\eta}}_A{\bm{\eta}}_B+{\bm{\eta}}_B\nabla^{h}_A\log|\lambda|, \end{align*} where in the second line we employed equations \eqref{etaPi} and \eqref{tauPi} and in the last equality we replaced ${\bm{\tau}}$ by ${\bm{\eta}}$ according to \eqref{taueta}. The term $e_A^ae_B^b \ol\nabla_a\left(\bPi_{cb}n^c\right)$ can be computed using decomposition \eqref{decompnabla}, Remark \ref{observación} and the definitions of $\omega$ and ${\bm{\Upsilon}}$, \begin{align*} e_A^ae_B^b \ol\nabla_a\left(\bPi_{cb}n^c\right) = e_A\left(-{\bm{\eta}}_B\right)-\bPi\left(n,\nabla^h_{e_A} e_B-{\bm{\Upsilon}}_{AB}n\right)=-\nabla^h_A{\bm{\eta}}_B + 2\omega {\bm{\Upsilon}}_{AB}, \end{align*} where again \eqref{etaPi} has been taken into account. From \eqref{PAB}, \eqref{etaPi} and \eqref{tauPi}, $$\bPi_{cb}\bPi_{ad}n^cn^d e^a_A e^b_B=-{\bm{\eta}}_B{\bm{\tau}}_A,$$ which becomes ${\bm{\eta}}_A{\bm{\eta}}_B+{\bm{\eta}}_B\nabla_A^h\log|\lambda|$ after using \eqref{taueta}. Contracting \eqref{auxRAB} with $e_A^ae_B^b$ and inserting the expressions above, as well as \eqref{PAB}, equation \eqref{An} follows. \end{proof} \end{lema} \end{appendices} \section*{Acknowledgements} We thank P.T. Chruściel for very useful comments on the uniqueness part of the main result which helped us improving the presentation. This work has been supported by Projects PGC2018-096038-B-I00, PID2021-122938NB-I00 (Spanish Ministerio de Ciencia e Innovación and FEDER ``A way of making Europe'') and SA096P20 (JCyL). G. Sánchez-Pérez also acknowledges support of the PhD. grant FPU20/03751 from Spanish Ministerio de Universidades. \begingroup \let\itshape\upshape \renewcommand{\bibname}{References} \bibliographystyle{acm} \bibliography{biblio} \end{document} \usepackage[utf8]{inputenc} \usepackage[english]{babel} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{appendix} \usepackage{faktor} \usepackage[hidelinks]{hyperref} \hypersetup{ colorlinks=true, urlcolor=blue, linkcolor=blue, citecolor=red, } \urlstyle{same} \usepackage{amsthm} \usepackage{bbold} \usepackage{mathrsfs} \usepackage{wasysym} \usepackage{mathtools} \usepackage{bm} \usepackage{cancel} \usepackage{color} \usepackage{tikz-cd} \usepackage[Glenn]{fncychap} \usepackage{subfig} \usepackage{accents} \usepackage{slashed} \usepackage{psfrag} \usepackage{stackengine} \newcommand\ubar[1]{\stackunder[1.2pt]{$#1$}{\rule{.8ex}{.1ex}}} \usetikzlibrary{babel} \makeatletter \newcommand*\owedge{\mathpalette\@owedge\relax} \newcommand*\@owedge[1]{ \mathbin{ \ooalign{ $#1\m@th\bigcirc$\cr \hidewidth$#1\m@th\wedge$\hidewidth\cr } }} \makeatother \newenvironment{diagram} { \centering \begin{tikzcd} } { \end{tikzcd}\par } \renewcommand{\baselinestretch}{0.95} \setlength{\parindent}{0cm} \newtheorem{teo}{Theorem}[section] \newtheorem{cor}[teo]{Corollary} \newtheorem{prop}[teo]{Proposition} \newtheorem{lema}[teo]{Lemma} \newtheorem{defi}[teo]{Definition} \newtheorem{eje}[teo]{Example} \newtheorem{obs}[teo]{Remark} \newtheorem{ejer}[teo]{Exercise} \newtheorem{rmk}[teo]{Remark} \newtheorem{pro}[teo]{Problema} \usepackage{makeidx} \usepackage{graphicx} \usepackage[left=2.60cm, right=2.60cm, top=2.40cm, bottom=2.60cm]{geometry} \usepackage{scalerel} \usepackage{stackengine,wasysym} \newcommand{\longsim}{{\resizebox{2cm}{0.3cm}{$\sim$}}} \newcommand\reallywidetilde[1]{\ThisStyle{ \setbox0=\hbox{$\SavedStyle#1$} \stackengine{-.1\LMpt}{$\SavedStyle#1$}{ \stretchto{\scaleto{\SavedStyle\mkern.2mu\sim}{.5467\wd0}}{.7\ht0} }{O}{c}{F}{T}{S}}} \usepackage{color} \makeatletter \renewcommand\part{ \if@openright \cleardoublepage \else \clearpage \thispagestyle{empty} \if@twocolumn \onecolumn \@tempswatrue \else \@tempswafalse \null\vfil \secdef\@part\@spart} \makeatother \newcommand{\II}{\mathrm {I\!I}} \newcommand{\scri}{\mathscr{J}} \newcommand{\aut}{\operatorname{Aut}} \newcommand{\wt}{\widetilde} \newcommand{\wh}{\widehat} \newcommand{\sch}{\operatorname{Sch}} \newcommand{\sii}{\Longleftrightarrow} \newcommand{\scal}{\operatorname{Scal}} \newcommand{\ul}{\underline} \newcommand{\ol}{\overline} \newcommand{\Cot}{\operatorname{Cot}} \newcommand{\weyl}{\operatorname{Weyl}} \newcommand{\red}{\textcolor{red}} \newcommand{\rad}{\operatorname{Rad}} \newcommand{\pt}{ \ \forall} \newcommand{\spec}{\text{Spec }} \newcommand{\func}{\rightsquigarrow} \newcommand{\obj}{\text{Obj }} \newcommand{\proj}{\text{Proj }} \newcommand{\bY}{\textup{\textbf Y}} \newcommand{\bU}{\textup{\textbf U}} \newcommand{\bQ}{\textup{\textbf Q}} \newcommand{\bF}{\textup{\textbf F}} \newcommand{\bK}{\textup{\textbf K}} \newcommand{\bPi}{\bm\Pi} \newcommand{\bg}{\bm{\gamma}} \newcommand{\bt}{\bm{\tau}} \newcommand{\bh}{\textup{\textbf h}} \newcommand{\proy}{\mathbb{P}} \renewcommand{\hom}{\text{Hom}} \newcommand{\mf}{\mathfrak} \newcommand{\real}{\mathbb R} \newcommand{\tr}{\operatorname{tr}} \newcommand{\quo}[2]{{\raisebox{.2em}{$#1$}\left/\raisebox{-.2em}{$#2$}\right.}} \newcommand{\rot}{\text{rot }} \renewcommand{\div}{\operatorname{div}} \newcommand{\grad}{\text{grad}} \newcommand{\mb}{\mathbb} \newcommand{\st}{\stackrel} \newcommand{\hess}{\operatorname{Hess }} \newcommand{\norm}{\bot} \newcommand{\para}{\parallel} \newcommand{\sph}{\mathbb S} \newcommand{\eps}{\varepsilon} \newcommand{\X}{\mathfrak{X}} \renewcommand{\d}{\coloneqq} \newcommand{\fsl}[1]{{\ooalign{\(#1\)\cr\hidewidth\(/\)\hidewidth\cr}}} \newcommand{\emb}{\hookrightarrow} \newcommand{\dd}{\text{d}} \newcommand{\riem}{\operatorname{Riem}} \newcommand{\ric}{\operatorname{Ric}} \newcommand{\ein}{\operatorname{Ein}} \newcommand{\lie}{\mathcal{L}} \newcommand{\mc}{\mathcal} \newcommand{\sto}{\rightarrow} \renewcommand{\to}{\longrightarrow} \renewcommand{\iff}{\Leftrightarrow} \renewcommand{\mapsto}{\longmapsto}
2205.15727v1
http://arxiv.org/abs/2205.15727v1
Analysis of a quasilinear coupled magneto-quasistatic model: solvability and regularity of solutions
\documentclass{ws-m3as} \usepackage[utf8]{inputenc} \usepackage{amssymb} \usepackage{amsmath,bbm,bm} \usepackage{amsfonts} \usepackage{mathrsfs} \usepackage{graphicx} \usepackage{enumerate} \usepackage{color} \usepackage{cite} \usepackage{soul} \newtheorem{assumption}[theorem]{Assumption} \DeclareMathAlphabet{\mathpzc}{OT1}{pzc}{m}{it} \newcommand{\cEl}{\scalebox{1.27}{$\mathpzc{E}$}} \newcommand{\cBl}{\scalebox{1.27}{$\mathpzc{B}$}} \newcommand{\cAl}{\scalebox{1.27}{$\mathpzc{A}$}} \newcommand{\R}{\mathbb{R}} \DeclareMathOperator{\curl}{curl} \DeclareMathOperator{\divg}{div} \begin{document} \markboth{R. Chill, T. Reis, T. Stykel} {Analysis of a quasilinear coupled MQS model.} \catchline{}{}{}{}{} \title{Analysis of a quasilinear coupled magneto-quasistatic model: \\ solvability and regularity of solutions } \author{Ralph Chill } \address{Institut f\"ur Analysis, Technische Universit\"at Dresden \\ 01062 Dresden, Germany \\ [email protected]} \author{Timo Reis} \address{Institut für Mathematik, Technische Universität Ilmenau, Weimarer Str. 32 \\ 98693 Ilmenau, Germany\\ [email protected]} \author{Tatjana Stykel} \address{Institut f\"ur Mathematik \& Centre for Advanced Analytics and Predictive Sciences (CAAPS), Universit\"at Augsburg, Universit\"atsstr. 12a, 86159 Augsburg, Germany \\ [email protected]} \maketitle \begin{history} \received{(Day Month Year)} \revised{(Day Month Year)} \comby{(xxxxxxxxxx)} \end{history} \begin{abstract} We consider a~quasilinear model arising from dynamical magnetization. This model is described by a~magneto-quasistatic (MQS) approximation of Maxwell's equations. Assuming that the medium consists of a~conducting and a~non-conducting part, the derivative with respect to time is not fully entering, whence the system can be described by an abstract differential-algebraic equation. Furthermore, via magnetic induction, the system is coupled with an equation which contains the induced electrical currents along the associated voltages, which form the input of the system. The aim of this paper is to study well-posedness of the coupled MQS system and regularity of its solutions. Thereby, we rely on the classical theory of gradient systems on Hilbert spaces combined with the concept of $\mathcal{E}$-subgradients using in particular the magnetic energy. The coupled MQS system precisely fits into this general framework. \end{abstract} \keywords{magneto-quasistatic systems; eddy current model; magnetic energy; abstract differential-algebraic equations; gradient systems.} \ccode{AMS Subject Classification: 12H20, 34A09, 35B65, 37L05, 78A30, } \section{Introduction}\label{sec:intro} Maxwell's equations play a~fundamental role in modeling and numerical analysis of electromagnetic field problems. They describe the dynamic and spatial behavior of the electromagnetic field in a~medium. These equations were discovered in the early 1860s and have since then received a~lot of attention by mathematicians, physicists and engineers \cite{Ja99}. The unknown variables are given by the $\R^3$-valued functions \[\begin{aligned} \bm{D}:&\text{ electric~displacement},&\bm{B}:& \text{ magnetic flux intensity},\\ \bm{E}:&\text{ electric~field intensity},&\bm{H}:&\text{ magnetic field intensity},\\ \bm{J}:&\text{ electric~current density},\end{aligned}\] which depend on a~spatial variable $\xi\in\mathit{\Omega}\subseteq\R^3$ and time $t\in[0,T]\subset\mathbb{R}$. Assuming that there are no electric charges, Maxwell's equations are given by \begin{align*} \qquad\qquad\nabla\cdot \bm{D}&=0&&\qquad\qquad\text{(the medium contains no electric charges)},\\ \nabla\cdot \bm{B}&=0&&\qquad\qquad\text{(field lines of the magnetic~flux are closed)},\\ \nabla\times \bm{E}&=-{\textstyle\frac{\partial}{\partial t}}\bm{B}&&\qquad\qquad\text{(Faraday's law of induction)},\\ \nabla\times \bm{H}&=\bm{J}+{\textstyle\frac{\partial}{\partial t}}\bm{D} &&\qquad\qquad\text{(magnetic~flux law)}, \end{align*} where $\nabla\cdot$ stands for the divergence and $\nabla\times$ denotes the curl of a~vector field. In addition, the above variables fulfill {\em constitutive relations}, which are determined by the physical properties of the medium. Denoting the Euclidean norm by $\|\cdot\|_2$, the constitutive relations are, in the quasilinear and isotropic case, of the form \begin{align*} &\bm{D}(\xi,t)=\epsilon(\xi , \|\bm{E}(\xi,t)\|_2 )\bm{E}(\xi,t),\\ &\bm{H}(\xi,t)=\nu(\xi , \|\bm{B}(\xi,t)\|_2 ) \bm{B}(\xi,t),\\ &\bm{J}(\xi,t)=\sigma(\xi , \|\bm{E}(\xi,t)\|_2 ) \bm{E}(\xi,t)+\bm{J}_{\rm ext}(\xi,t) \end{align*} for some functions $\epsilon$, $\nu$, $\sigma:\mathit{\Omega}\times\R\to\R$ which respectively express the electric permittivity, magnetic reluctivity and electric conductivity of the material, and $\bm{J}_{\rm ext}$ stands for the externally injected currents. In this paper, we consider a~problem where the displacement currents $\tfrac{\partial}{\partial t}\bm{D}$ are negligible compared to the conduction currents, and therefore they can be omitted. We also assume that the conductivity is linear, that is, $\sigma(\xi):=\sigma(\xi , \|\bm{E}(\xi,t)\|_2 )$ does not depend on $\bm{E}$. Further, under some additional topological conditions on $\mathit{\Omega}$, the fact that the magnetic flux intensity is divergence-free implies that we can make the ansatz $\bm{B}=\nabla\times\bm{A}$ for some function $\bm{A}$, which is called the {\em magnetic vector potential}. Plugging this into Faraday's law of induction, we obtain \mbox{$\nabla\times \bm{E}=-{\textstyle\frac{\partial}{\partial t}}\nabla\times\bm{A}$} whence $\bm{A}$ can be chosen in a~way that $\bm{E}=-{\textstyle\frac{\partial}{\partial t}}\bm{A}$. Finally, inserting the constitutive relations for $\bm{H}$ and $\bm{J}$ into the magnetic flux law and using the derived representations for $\bm{B}$ and $\bm{E}$ in terms of $\bm{A}$, we obtain the so-called {\em magneto-quasistatic} (MQS) {\em approximation of Maxwell's equations} (also called {\em eddy current model}) given by \begin{equation} \tfrac{\partial}{\partial t}\left(\sigma\bm{A}\right) + \nabla \times \left(\nu(\cdot,\|\nabla \times \bm{A}\|_2) \nabla \times \bm{A}\right)=\bm{J}_{\rm ext}\quad \text{ in } \mathit{\Omega}\times (0,T], \label{eq:MQS01} \end{equation} see \cite{IdaBastos97,BdGCS18}. Such equations are used, for example, in the modeling of accelerator magnets, electric machines and transformers operating at low frequencies. If a~part of the medium is non-conducting, then the function $\sigma$ vanishes on some subset of~$\mathit{\Omega}$. In this case, the MQS equation \eqref{eq:MQS01} becomes of degenerate parabolic or mixed parabolic-elliptic type. The coupling of electromagnetic devices to an~external circuit can be realized as a~solid conductor model or as a~stranded conductor model, see \cite{SchoepsDGW13} for details. Here, we restrict ourselves to the stranded conductor model where the external current is induced by $m$~windings \begin{equation} \bm{J}_{\rm ext}(\xi,t)=\chi(\xi) \bm{i}(t),\label{eq:MQS02} \end{equation} where $\bm{i}$ is the $\R^m$-valued current function, and $\chi$ is the $\mathbb{R}^{3\times m}$-valued winding density function which expresses the geometry of the windings. The windings are assumed to have an~internal resistance $R\in\R^{m\times m}$ to which $m$ time-dependent voltages are applied, where the latter is expressed by the $\R^m$-valued prescribed function $\bm{v}$. Further, by using the fact that the electric field induces another~voltage $\int_\mathit{\Omega} \chi^\top\bm{E} \, {\rm d}\xi$ along the windings, the relation $\bm{E}=-{\textstyle\frac{\partial}{\partial t}}\bm{A}$ together with Kirchhoff's voltage law gives rise to \begin{equation} \tfrac{{\rm d}}{{\rm d} t} \int_\mathit{\Omega} \chi^\top\bm{A} \, {\rm d}\xi + R\, \bm{i} = \;\bm{v}\quad \text{ on } (0,T]. \label{eq:MQS03} \end{equation} Altogether, we obtain the quasilinear partial integro-differential-algebraic equations with unknown functions $\bm{A}$ and $\bm{i}$. These equations are further equipped with some initial and boundary conditions which are specified in Section~\ref{sec:solution}. Existence, uniqueness and regularity results for the linear MQS system \eqref{eq:MQS01} with the coupling relations \eqref{eq:MQS02} and \eqref{eq:MQS03} are - under some additional topological conditions on the conducting domain - presented in \cite{NicT14}. They are based on a~theo\-rem by Showalter on degenerate linear parabolic equations \cite{Show77}. Quasilinear elliptic and non-degenerate parabolic equations in MQS field problems, that is, \eqref{eq:MQS01} and~\eqref{eq:MQS02} with a~prescribed function $\bm{i}$ and bounded and strictly positive mapping~$\sigma$, have been studied in the context of optimal control in \cite{You13} and \cite{NicT17}, respectively. The solvability of linear de\-ge\-ne\-ra\-te MQS equations have been investigated in \cite{ArnH12} by deriving a~unified variational formulation and in \cite{PauPTW21} by using the theory of evolutionary equations. Further, a~comprehensive analysis of quasilinear MQS problems based on a~Schur complement approach has been provided in \cite{BLS05}. However, the extension of these results to MQS systems with the coupling relation has remained a~challenging problem which requires considerable care due to structurally different properties of the solution on conducting and non-conducting subdomains, and the additional integral constraint. In \cite{PauPic17}, a~convergence analysis of solutions of linear Maxwell equations to the MQS model is performed while taking the limit of the electric permittivity to zero. In this paper, we follow a different approach to analyze the existence and uniqueness of solutions of the general coupled MQS model \eqref{eq:MQS01}--\eqref{eq:MQS03}. It relies on a~formulation of this model as an abstract differential-algebraic equation involving the subgradient of the magnetic energy. This novel approach gives rise not only to well-posedness but further allows us to prove regularity results for the solutions which are new even in the linear case. Hereby, the well-established theory of gradient systems involving subgradients of convex functions \cite{Bre73,Bar10} forms the basis for our generalization to the differential-algebraic case, which is subsequently applied to the coupled quasilinear MQS model. The paper is organized as follows. Section~\ref{ssec:spaces} contains a~brief overview on the notation and function spaces used in the subsequent analysis. In Section~\ref{ssec:mqs}, we present the MQS model problem and state assumptions on geometry and material parameters. In Section~\ref{sec:solution}, we define the solution concept and prove the uniqueness result. In Section~\ref{sec:energy}, we introduce the magnetic energy and examine its essential properties. Section~\ref{sec:gradsys} and Section~\ref{sec:MQSsolv} contain our main results. First, we present some operator-theoretic results on a~class of abstract differential-algebraic systems involving subgradients. Thereafter, we show that the coupled nonlinear MQS system fits into this framework, which allows us to establish the existence and regularity properties of solutions to this model. \section{Notations and Function Spaces} \label{ssec:spaces} Let $\mathbb{R}_{\ge0}$ denote the set of all nonne\-gative real numbers, and $\R^{m\times n}$ the set of real matrices of size $m\times n$. Further, $x\cdot y$ and $x\times y$ stand, respectively, for the Euclidean inner product and the cross product of $x,y\in\R^3$, and $\|x\|_2$ is the Euclidean norm of $x\in\R^3$. For a~function \mbox{$\bm{A}:\Omega\to\R^3$}, the expression $\|\bm{A}\|_2$ stands for the scalar-valued function $\xi\mapsto\|\bm{A}(\xi)\|_2$. The restriction of a~function $f$ to a~subset $S$ of its domain is denoted by~$\left.f\right|_S$. The inner product on a Hilbert space $H$ is denoted by $\left\langle\cdot,\cdot\right\langle_H$, and the induced norm is denoted by $\|\cdot\|_H$. The duality pairing between a Hilbert space $H$ and its dual space $H'$ is denoted by $\langle\cdot,\cdot\rangle$. Note that, throughout this paper, all spaces are assumed to be real. The set of linear, bounded operators between two Banach spaces $X$ and $Y$ is denoted by $\mathcal{L}(X,Y)$ and, in the case $X=Y$, simply by $\mathcal{L}(X)$. In the case when $X$ and $Y$ are Hilbert spaces, $A^*\in\mathcal{L}(Y,X)$ stands for the adjoint of $A$. Moreover, ${M^\top}\in\R^{n\times m}$ denotes the transpose of a matrix $M\in\R^{m\times n}$. Lebesgue and first order Sobolev spaces of functions defined on a domain \mbox{$\mathit{\Omega}\subseteq\R^m$} and with values in a Banach space $X$ are denoted by $L^{p}(\mathit{\Omega};X)$, $W^{1,p}(\mathit{\Omega};X)$ and $H^{1}(\mathit{\Omega};X)$, respectively. We shortly write $L^{p}(\mathit{\Omega})$, $W^{1,p}(\mathit{\Omega})$ and $H^{1}(\mathit{\Omega})$ when $X=\R$. Especially, when $\mathit{\Omega} = \mathbb{I}$ is an interval, we also consider the space $L^p_{\rm loc} (\mathbb{I} ;X)$ which consists of all (equivalence classes of) functions $f:\mathbb{I}\to X$ such that $f\in L^{p}(\mathbb{K};X)$ for all compact intervals $\mathbb{K}\subseteq \mathbb{I}$. Similarly, one defines $W^{1,{p}}_{\rm loc}(\mathbb{I};X)$ and $H^{1}_{\rm loc}(\mathbb{I};X)$. The integrals of Banach space valued functions are to be understood in the Bochner sense. Writing $f\in C(\mathbb{I};X)$ for some measurable function $f:\mathbb{I}\to X$ means that there is a~representative in the equivalence class of $f$ which is continuous on $\mathbb{I}$. In this case, we use the notation $f(s+) := \lim\limits_{t\to s+} f(t)$, where the limit on the right-hand side is taken by using the continuous representative. Let $\mathit{\Omega}\subseteq\R^3$ be an open domain. The weak (distributional) gradient of $\phi\in L^{{2}}(\mathit{\Omega})$ is denoted by $\nabla \phi$, and $\nabla\times \bm{A}$ stands for the weak curl of a vector field $\bm{A}\in L^{{2}}(\mathit{\Omega};\mathbb{R}^3)$. We consider the Sobolev space \[\begin{aligned} H(\curl,\mathit{\Omega}) & = \, \bigl\{\,\bm{A}\in L^2(\mathit{\Omega};\mathbb{R}^3)\enskip:\enskip\nabla\times\bm{A}\in L^2(\mathit{\Omega};\mathbb{R}^3)\,\bigr\}, \end{aligned}\] which is a Hilbert space endowed with the inner product $$ \langle\bm{A},\bm{F} \rangle_{H(\curl,\mathit{\Omega})} = \langle\bm{A},\bm{F} \rangle_{L^2(\mathit{\Omega};\mathbb{R}^3)}+ \langle\nabla\times\bm{A},\nabla\times\bm{F} \rangle_{L^2(\mathit{\Omega};\mathbb{R}^3)}. $$ If $\mathit{\Omega}\subset\R^3$ is bounded and has a~Lipschitz boundary $\partial\mathit{\Omega}$, then for almost any $\xi\in\partial\mathit{\Omega}$, there exists the outward unit normal vector $\bm{n}_o(\xi)\in\R^3$ of $\mathit{\Omega}$ in $\xi$. Here, ``almost any'' refers to the hypersurface Lebesgue measure in $\R^3$. It has been proven in \linebreak \cite[Theorem~I.2.11]{GiraRavi86} that any $\bm{A}\in H(\curl,\mathit{\Omega})$ has a~well-defined tangential trace $\bm{A}\times \bm{n}_o\in L^2(\partial\mathit{\Omega};\mathbb{R}^3)$. This allows us to define the space \[\begin{aligned} H_0(\curl,\mathit{\Omega}) = &\, \bigl\{\,\bm{A}\in H(\curl,\mathit{\Omega})\enskip:\enskip \bm{A}\times \bm{n}_o = 0 \text{ on } \partial\mathit{\Omega} \,\bigr\}, \end{aligned}\] which is a~closed subspace of {$H(\curl,\mathit{\Omega})$}. It has also been proven in \cite[Theorem~I.2.11]{GiraRavi86} that for all $\bm{A}\in H_0(\curl,\mathit{\Omega})$ and $\bm{F}\in H(\curl,\mathit{\Omega})$, \begin{equation} \langle\nabla\times\bm{A},\bm{F} \rangle_{L^2(\mathit{\Omega};\mathbb{R}^3)}=\langle\bm{A},\nabla\times\bm{F} \rangle_{L^2(\mathit{\Omega};\mathbb{R}^3)}. \label{eq:curladj} \end{equation} This relation is an extension of the formula of integration by parts to the weak curl operator. The space of di\-ver\-gen\-ce-\-free and square integrable functions is defined as \begin{multline} L^2(\divg\!=\!0,\mathit{\Omega};\mathbb{R}^3) \\ = \bigl\{\bm{A}\in L^2(\mathit{\Omega};\R^3) \,\, : \,\, \langle \bm{A},\nabla \psi\rangle_{L^2(\mathit{\Omega};\mathbb{R}^3)}=0 \text{ for all }\psi\in H^1_0 (\mathit{\Omega}) \,\bigr\} . \label{eq:graddivorth} \end{multline} {It} is a~closed subspace of $L^{2}(\mathit{\Omega};\mathbb{R}^3)$ and, therefore, a~Hilbert space with respect to the standard inner product in $L^{2}(\mathit{\Omega};\mathbb{R}^3)$. Recall here, that as usual the Sobolev space $H^1_0 (\Omega )$ is the closure of the space of test functions in $H^1 (\Omega )$. \section{Model Problem and Assumptions} \label{ssec:mqs} In this section, we consider the coupled MQS system as motivated in Section~\ref{sec:intro} in more detail. We start with the introduction of the model, and, thereafter, we collect the assumptions on the spatial domain and the system parameters. \subsection{The coupled MQS model} Let $\mathit{\Omega}\subset \mathbb{R}^3$ be a~bounded domain with boundary $\partial \mathit{\Omega}$ and let $T>0$. We consider the~coupled MQS system in magnetic vector potential formulation \begin{subequations}\label{eq:MQS} \begin{align} \tfrac{\partial}{{\partial}t}\left(\sigma\bm{A}\right) + \nabla \times \left(\nu(\cdot,\|\nabla \times \bm{A}\|_2) \nabla \times \bm{A}\right) = & \; \chi\,\bm{i} & \text{ in } &\mathit{\Omega}\times (0,T], \label{eq:MQS1} \\ \tfrac{{\rm d}}{{\rm d} t} \int_\mathit{\Omega} \chi^\top\bm{A} \, {\rm d}\xi + R\, \bm{i} = &\;\bm{v}\label{eq:MQScoupl} & \text{ on } &(0,T], \\ \bm{A}\times \bm{n}_o = &\; 0 & \mbox{in }& \partial \mathit{\Omega}\times (0,T], \label{eq:MQSbc}\\[2mm] \sigma\bm{A}(\cdot,0) = &\; \sigma\bm{A}_0 &\text{ in }&\mathit{\Omega}, \label{eq:MQSic1}\\ \int_\mathit{\Omega} \chi^\top\bm{A}(\cdot,0) \, {\rm d}\xi=&\, \int_\mathit{\Omega} \chi^\top\bm{A}_0\, {\rm d}\xi,&& \label{eq:MQSic2} \end{align} \end{subequations} where $\bm{A}:\mathit{\Omega} \times [0,T]\to\mathbb{R}^3$ is the magnetic vector potential, $\nu:\mathit{\Omega}\times\mathbb{R}_{\ge0}\to\mathbb{R}_{\ge0}$ is the magnetic reluctivity, $\sigma:\mathit{\Omega}\to\mathbb{R}_{\ge0}$ is the electric conductivity, $\bm{v}:[0,T]\to\mathbb{R}^m$ and $\bm{i}:[0,T]\to\mathbb{R}^m$ are, respectively, the voltage and the electrical current through the electromagnetic conductive contacts. Furthermore, $\chi:\mathit{\Omega}\to\mathbb{R}^{3\times m}$ is the winding function, $R\in\mathbb{R}^{m\times m}$ is the resistance of the winding, and $\bm{A}_0: \mathit{\Omega}\to\mathbb{R}^3$ is the initial value for the magnetic vector potential. The boundary condition \eqref{eq:MQSbc} implies that the magnetic flux through the boundary $\partial \mathit{\Omega}$ is zero. Moreover, equations \eqref{eq:MQSic1} and \eqref{eq:MQSic2} describe the initial conditions for the magnetic vector potential. Note that we only initialize the parts of $\bm{A}$ whose derivatives occur in \eqref{eq:MQS1} and \eqref{eq:MQScoupl}. The coupled MQS system \eqref{eq:MQS} can be considered as a~control system, where the voltage~$\bm{v}$ takes the role of the input and $(\bm{A},\bm{i})$ is the state. \subsection{The spatial domain} This subsection contains the assumptions on the spatial domain $\mathit{\Omega}$ which are made throughout this paper. \begin{assumption}[Spatial domain, geometry and topology] \label{ass:omega} {\em The set \mbox{$\mathit{\Omega}\subset\mathbb{R}^3$} is a~simply connected bounded Lipschitz domain, which is decomposed into two Lipschitz regular, open subsets \mbox{$\mathit{\Omega}_{C}$, $\mathit{\Omega}_{I}\subset \mathit{\Omega}$}, called, respectively, {\em conducting} and {\em non-conducting subdomains}, such that $\overline{\mathit{\Omega}}_{C}\subset {\mathit{\Omega}}$ and $\mathit{\Omega}_{I}=\mathit{\Omega}\setminus \overline{\mathit{\Omega}}_C$. Furthermore, the subdomain $\mathit{\Omega}_C$ is connected, and $\mathit{\Omega}_{I}$ has finitely many connected components $\mathit{\Omega}_{I,1},\ldots,\mathit{\Omega}_{I,q}$ and $\mathit{\Omega}_{I,{\rm ext}}$, where \vspace*{-1mm} \begin{romanlist}[a)] \item each of the sets $\mathit{\Omega}_{I,1},\ldots,\mathit{\Omega}_{I,q}$ has exactly one boundary component, these are denoted by $\Gamma_1,\ldots,\Gamma_q$ and called the {\em internal interfaces}; \item the {\em external non-conducting subdomain} $\mathit{\Omega}_{I,{\rm ext}}$ has two boundary components $\partial\mathit{\Omega}$ and the {\em external interface} $\Gamma_{{\rm ext}}:=\overline{\mathit{\Omega}}_{I,{\rm ext}}\cap \overline{\mathit{\Omega}}_{C}$. \end{romanlist} } \end{assumption} Note that by a~{\em boundary component} of a~subdomain $\mathit{\Omega}_*\subseteq\mathit{\Omega}$, we mean a~connected component of its boundary $\partial\mathit{\Omega}_*$. Since $\mathit{\Omega}_{I}$ has a~Lipschitz boundary, the closed connected components $\overline{\mathit{\Omega}}_{I,1},\ldots,\overline{\mathit{\Omega}}_{I,q}$ and $\overline{\mathit{\Omega}}_{I,{\rm ext}}$ are disjoint. The subdomains $\mathit{\Omega}_{I,1},\ldots,\mathit{\Omega}_{I,q}$ can be interpreted as ``interior cavities'' of the conducting subdomain $\mathit{\Omega}_C$. In particular, we do not assume that the conducting subdomain is simply connected, it may also have some ``handles''. Note that the lapidarily formulated notions of ``interior cavities'' and ``handles'' can be made mathematically precise in terms of the so-called {\em Betti numbers}~\cite{Ros19}. We later assume that the electric conductivity is a~scalar multiple of the indicator function on the conducting subdomain, which justifies the above naming. \subsection{The space $X(\mathit{\Omega},\mathit{\Omega}_C)$, the initial conditions and the winding function} Next, we present a~space in which the solutions of the coupled MQS system \eqref{eq:MQS} evolve. As a~preliminary thought, note that equation~\eqref{eq:MQS1} does not change, if we replace $\bm{A}$ by $\bm{A} + \nabla \psi$ for an arbitrary but fixed $\psi \in H^1(\mathit{\Omega})$ which is constant on each boundary component $\Gamma_1,\ldots,\Gamma_q,\Gamma_{\rm ext}$ and $\partial\mathit{\Omega}$. Therefore, we restrict our considerations to solutions which are pointwise orthogonal to all gradient fields of functions being constant on each set $\Gamma_1,\ldots,\Gamma_q,\Gamma_{\rm ext}$ and $\partial\mathit{\Omega}$. Since the conducting and non-conducting subdomains are both Lipschitz regular, the trace theorem \cite[Theorem~1.39]{Yagi10} yields that the following space is well-defined: \begin{align} \label{eq:statespaceorth} G(\mathit{\Omega},\mathit{\Omega}_C) =\Bigl\{ \nabla \psi \Bigr. \enskip:\enskip &\psi\in H^1_0(\mathit{\Omega})\text{ s.t.\ } \exists\, c_1,\ldots,c_q,c_{\rm ext}\in\R\text{ with }\\ &\!\left.\psi\right|_{\Gamma_i} \equiv c_i \text{ for } i=1,\ldots,q,\; \Bigl. \left.\psi\right|_{\Gamma_{\rm ext}}\! \equiv c_{\rm ext} \Bigr\}. \!\!\!\!\!\!\nonumber \end{align} We are seeking for solutions with values in the orthogonal space of $G(\mathit{\Omega},\mathit{\Omega}_C)$ in $L^2(\mathit{\Omega};\mathbb{R}^3)$, that is, in the space \begin{equation} X(\mathit{\Omega},\mathit{\Omega}_C)=\left\{\bm{A} \in L^2(\mathit{\Omega};\mathbb{R}^3)\enskip:\enskip \langle\bm{A}, \bm{F} \rangle_{L^2(\mathit{\Omega};\mathbb{R}^3)} = 0 \;\text{ for all } \bm{F}\in G(\mathit{\Omega},\mathit{\Omega}_C)\right\}, \label{eq:statespace} \end{equation} which is a~Hilbert space when it is equipped with the inner product $\langle \cdot , \cdot \rangle_{L^2(\mathit{\Omega};\mathbb{R}^3)}$. We further consider the space \begin{equation} X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)=H_0(\curl,\mathit{\Omega})\cap X(\mathit{\Omega},\mathit{\Omega}_C), \label{eq:statespace2} \end{equation} which is again a Hilbert space, now provided with the inner product in $H_0(\curl,\mathit{\Omega})$. The space $X(\mathit{\Omega},\mathit{\Omega}_C)$ enables us to formulate our assumption on the initial magnetic vector potential and the winding function. \begin{assumption}[Initial magnetic vector potential and winding function]\label{ass:init}\!\! \vspace*{-6mm} {\em \begin{romanlist}[a)] \item\label{ass:initial} The initial magnetic vector potential $\bm{A}_0:\mathit{\Omega}\to\mathbb{R}^3$ belongs to $X(\mathit{\Omega},\mathit{\Omega}_C)$. \item\label{ass:winding} The columns of the winding function $\chi:\mathit{\Omega}\to\mathbb{R}^{3\times m}$, denoted by $\chi_1,\ldots,\chi_m$, belong to $X(\mathit{\Omega},\mathit{\Omega}_C)$. \end{romanlist} } \end{assumption} Note that \[ \bigl\{ \nabla\psi\; :\; \psi \in H^1_0(\mathit{\Omega}_C \cup \mathit{\Omega}_I ) \bigr\} \subseteq G(\mathit{\Omega},\mathit{\Omega}_C) \subseteq \bigl\{ \nabla\psi\; :\; \psi\in H^1_0(\mathit{\Omega} ) \bigr\} , \] Therefore, by using \eqref{eq:graddivorth}, we obtain \begin{equation}\label{eq:divfree} L^2(\divg\!=\!0, \mathit{\Omega};\R^3) \subseteq X(\mathit{\Omega},\mathit{\Omega}_C) \subseteq L^2(\divg\!=\!0, \mathit{\Omega}_C \cup \mathit{\Omega}_I ;\R^3) . \end{equation} In particular, the first inclusion in \eqref{eq:divfree} implies that Assumption~\ref{ass:init}~\ref{ass:winding}) on the winding function $\chi$ is fulfilled, if the columns of~$\chi$ belong to $L^2(\divg\!=\!0, \mathit{\Omega};\R^3)$. In practice, the current is often injected through the contacts in the non-conducting subdomain $\mathit{\Omega}_I$, that is, $\mbox{supp}(\chi) \subset\mathit{\Omega}_I$. In this case, $\chi_1,\ldots,\chi_m\in X(\mathit{\Omega},\mathit{\Omega}_C)$ is even equivalent to $\chi_1,\ldots,\chi_m\in L^2(\divg\!=\!0,\mathit{\Omega};\R^3)$. Further, note that any $\bm{A}\in X(\mathit{\Omega},\mathit{\Omega}_C)$ is indeed divergence-free on the conducting subdomain $\mathit{\Omega}_C$ as well as on the non-conducting subdomain $\mathit{\Omega}_I$. Since the curl of a~function is divergence-free, this yields \begin{equation*} \nabla\times \bm{A}\in X(\mathit{\Omega},\mathit{\Omega}_C)\qquad \text{for all } \bm{A}\in H_0(\curl,\mathit{\Omega}). \label{eq:curlsubset} \end{equation*} In the following, we collect some further properties of the space $X(\mathit{\Omega},\mathit{\Omega}_C)$. The subsequent lemma establishes that this space is closed with respect to multiplication by the indicator function of the conducting subdomain $\mathit{\Omega}_C$. \begin{lemma}\label{lem:sigmamult} Let $\mathit{\Omega}\subset\mathbb{R}^3$ and $\mathit{\Omega}_C$, $\mathit{\Omega}_I\subseteq\mathit{\Omega}$ be as in Assumption~\textup{\ref{ass:omega}}, and let $\mathbbm{1}_{\mathit{\Omega}_C}:\mathit{\Omega}\to\R$ be the indicator function of the set $\mathit{\Omega}_C$, that is, \begin{equation*}\label{eq:indfun} \mathbbm{1}_{\mathit{\Omega}_C}(\xi)=\begin{cases}1, &\,\xi\in\mathit{\Omega}_C,\\0,&\,\xi\notin\mathit{\Omega}_C .\end{cases} \end{equation*} Then, for every $\bm{A}\in X(\mathit{\Omega},\mathit{\Omega}_C)$, one has $\mathbbm{1}_{\mathit{\Omega}_C} \bm{A}\in X(\mathit{\Omega},\mathit{\Omega}_C)$. \end{lemma} \begin{proof} Let $\bm{A}\in X(\mathit{\Omega},\mathit{\Omega}_C)$ and let $\bm{F}\in G(\mathit{\Omega},\mathit{\Omega}_C)$ be arbitrary. Then there exists some $\psi\in H^1_0(\mathit{\Omega})$ and $c_1,\ldots,c_q,c_{\rm ext}\in\R$ such that $\bm{F}=\nabla \psi$, $\left.\psi\right|_{\Gamma_i} \equiv c_i$ for \mbox{$i=1,\ldots,q$}, and $\left.\psi\right|_{\Gamma_{\rm ext}} \equiv c_{\rm ext}$. Consider the function \[ \widetilde{\psi}(\xi)=\begin{cases}\psi(\xi)-c_{\rm ext},&\,\xi\in\mathit{\Omega}_C,\\c_i-c_{\rm ext},&\,\xi\in\mathit{\Omega}_{I,i},\; i=1,\ldots,q,\\0,&\,\xi\in\mathit{\Omega}_{I,{\rm ext}}.\end{cases} \] Then $\widetilde{\psi}\in H^1(\mathit{\Omega}_C \cup \mathit{\Omega}_I)$ and the traces of $\widetilde{\psi}$ from both sides of the interfaces $\Gamma_1,\ldots,\Gamma_q$ and $\Gamma_{{\rm ext}}$ coincide. Hence, $\widetilde{\psi}\in H^1(\mathit{\Omega})$ and, by $\widetilde{\psi}|_{\mathit{\Omega}_{I,{\rm ext}}}=0$, we even have $\widetilde{\psi}\in H^1_0(\mathit{\Omega})$. This gives $\nabla \widetilde{\psi}\in G(\mathit{\Omega},\mathit{\Omega}_C)$. Taking into account that $\nabla\widetilde{\psi}$ vanishes on $\mathit{\Omega}_I$, we obtain $$ 0 =\int_{\mathit{\Omega}} \bm{A} \cdot \nabla \widetilde{\psi}\, {\rm d}\xi =\int_{\mathit{\Omega}_C} \bm{A} \cdot \nabla \widetilde{\psi}\, {\rm d}\xi =\int_{\mathit{\Omega}_C} \bm{A}\cdot \nabla {\psi}\, {\rm d}\xi =\int_{\mathit{\Omega}} (\mathbbm{1}_{\mathit{\Omega}_C}\bm{A}) \cdot \bm{F}\, {\rm d}\xi, $$ and, thus, $\mathbbm{1}_{\mathit{\Omega}_C}\bm{A}\in X(\mathit{\Omega},\mathit{\Omega}_C)$. \end{proof} Next, we show that $X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$ is dense in $X(\mathit{\Omega},\mathit{\Omega}_C)$. Moreover, we derive an~estimate on the $L^2$-norm of $\bm{A}\in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$ by means of the $L^2$-norm of $\nabla\times \bm{A}$ and the $L^2$-norm of the restriction of $\bm{A}$ to the conducting subdomain $\mathit{\Omega}_C$. \newpage \begin{lemma}\label{lem:denscoerc} Let $\mathit{\Omega}\subset\mathbb{R}^3$ and $\mathit{\Omega}_C$, $\mathit{\Omega}_I\subseteq\mathit{\Omega}$ be as in Assumption~\textup{\ref{ass:omega}}. Then $X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$ is dense in $X(\mathit{\Omega},\mathit{\Omega}_C)$. Further, there exists $L_C>0$ such that, for all $\bm{A}\in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$, \begin{equation} \|\bm{A}\|_{L^2(\mathit{\Omega};\R^3)}^2\leq L_C\,\left(\|\bm{A}\|_{L^2(\mathit{\Omega}_C;\R^3)}^2+\|\nabla\times \bm{A}\|_{L^2(\mathit{\Omega};\R^3)}^2\right) . \label{eq:curlest} \end{equation} \end{lemma} \begin{proof} The existence of a~constant $L_C>0$ such that the estimate \eqref{eq:curlest} holds for all \mbox{$\bm{A}\in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$} immediately follows from \cite[Lemma~4]{BLS05}. Hence, it only remains to prove the density statement. To this end, let $\bm{A}\in X(\mathit{\Omega},\mathit{\Omega}_C)$ and $\varepsilon >0$. {Since $H_0(\curl,\mathit{\Omega})$ contains the space of test functions on $\mathit{\Omega}$, it is dense in $L^2(\mathit{\Omega};\mathbb{R}^3)$. Hence,} there exists some $\bm{C}\in H_0(\curl,\mathit{\Omega})$ such that \[ \|\bm{A}-\bm{C}\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}<\varepsilon. \] Now consider the orthogonal decomposition $\bm{C}=\bm{G}+\nabla \psi$ with $\bm{G}\in X(\mathit{\Omega},\mathit{\Omega}_C)$ and $\nabla\psi\in G(\mathit{\Omega},\mathit{\Omega}_C)$. Since $\nabla\times(\nabla \psi)=0$, we have $\bm{G}=\bm{C}-\nabla {\psi}\in H(\curl,\mathit{\Omega})$. Further, since the boundary trace of $\psi\in H^1_0(\mathit{\Omega})$ is constant, the tangential component of the gradient of $\psi$ vanishes at $\partial\mathit{\Omega}$, that is, $\nabla\psi\times \bm{n}_o=0$ on $\partial\mathit{\Omega}$. This gives rise to $\bm{G}\in H_0(\curl,\mathit{\Omega})$. Then, by using the Pythagorean identity, we obtain \[\begin{aligned} \|\bm{A}-\bm{G}\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}^2\leq\,& \|\bm{A}-\bm{G}\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}^2+\|\nabla \psi\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}^2\\ =\,&\|\bm{A}-(\bm{G}+\nabla \psi)\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}^2=\|\bm{A}-\bm{C}\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}^2<\varepsilon^2. \end{aligned}\] This completes the proof. \end{proof} {\subsection{The material parameters} We now state the assumptions on the magnetic reluctivity, the electric conductivity and the resistance matrix.} Note that we consider only isotropic materials without hysteresis effects. \begin{assumption}[Material parameters]\!\!\label{ass:material} \vspace*{-1mm} {\em \begin{romanlist}[a)] \item \label{ass:material1} The electric conductivity $\sigma:\mathit{\Omega}\to\mathbb{R}_{\geq 0}$ is of the form $\sigma=\sigma_C\mathbbm{1}_{\mathit{\Omega}_C}$ with a~real number $\sigma_C>0$. \item \label{ass:material2} The magnetic reluctivity $\nu:\mathit{\Omega}\times \mathbb{R}_{\ge0}\to\mathbb{R}_{\ge0}$ has the following properties: \vspace*{-1mm} \begin{romanlist}[(i)] \item\label{ass:material2a} $\nu$ is measurable; \item\label{ass:material2c} the function $\zeta\mapsto\nu(\xi,\zeta)\zeta$ is strongly monotone with a~monotonicity constant \mbox{$m_{\nu}>0$} independent of $\xi\in\mathit{\Omega}$. In other words, there exists $m_{\nu}>0$ such that \[ \bigl(\nu(\xi,\zeta) \zeta-\nu(\xi,\varsigma)\varsigma\bigr)(\zeta-\varsigma)\geq m_{\nu} (\zeta-\varsigma)^2 \enskip\text{ for all } \xi\in\mathit{\Omega}, \enskip \zeta,\varsigma\in\mathbb{R}_{\ge0}; \] \item\label{ass:material2d} the function $\zeta\mapsto\nu(\xi,\zeta)\zeta$ is Lipschitz continuous with a~Lipschitz constant $L_{\nu}>0$ independent of $\xi\in\mathit{\Omega}$. In other words, there exists $L_{\nu}>0$ such that \[ |\nu(\xi,\zeta)\zeta-\nu(\xi,\varsigma)\varsigma| \leq L_{\nu} |\zeta-\varsigma|\quad\text{ for all } \xi\in\mathit{\Omega}, \enskip\zeta,\varsigma\in\mathbb{R}_{\ge0}. \] \end{romanlist} \item\label{ass:resistance} The resistance matrix $R\in\mathbb{R}^{m\times m}$ is symmetric and positive definite. \end{romanlist} } \end{assumption} It immediately follows from the second and third conditions on the magnetic reluctivity $\nu$ that \begin{equation}\label{eq:Mnugreater} m_\nu \leq \nu(\xi,\zeta)\leq L_{\nu} \quad \text{ for all } \xi\in \mathit{\Omega} \text{ and all } \zeta \,>0. \end{equation} \section{The Solution Concept} \label{sec:solution} In this section, we explain what we mean by a~solution of the coupled MQS system~\eqref{eq:MQS} and prove the uniqueness result. Notice that by using the canonical isomorphism $L^1 (\mathit{\Omega}\times [0,T]) = L^1 ([0,T] ; L^1 (\mathit{\Omega} ))$, we identify integrable functions defined on $\mathit{\Omega}\times[0,T]$ with integrable functions $[0,T]\to L^1 (\mathit{\Omega} )$. Sometimes we skip the placeholders for the arguments for sake of brevity. \begin{definition}[Solution of the MQS system]\label{def:sol} Let $\mathit{\Omega}\subset\mathbb{R}^3$ with subdomains $\mathit{\Omega}_C$ and $\mathit{\Omega}_I$ satisfy Assumption~\textup{\ref{ass:omega}}, and let $X(\mathit{\Omega},\mathit{\Omega}_C)$ and $X_0(\curl\mathit{\Omega},\mathit{\Omega}_C)$ be defined as in \eqref{eq:statespace} and \eqref{eq:statespace2}, respectively. Further, let the initial and winding functions be as in Assumption~\textup{\ref{ass:init}} and the material parameters as in Assumption~\textup{\ref{ass:material}}. Let $T>0$ be fixed and $\bm{v}\in L^2([0,T];\mathbb{R}^m)$. Then $(\bm{A},\bm{i})$ with $\bm{A}:\overline{\mathit{\Omega}}\times [0, T]\to\mathbb{R}^3$ and $\bm{i}:[0,T]\to\mathbb{R}^m$ is called a~{\em weak solution} of the coupled MQS system \eqref{eq:MQS}, if\vspace*{-1mm} \begin{romanlist}[a)] \item\label{item:sol1} $\sigma\bm{A} \in C([0,T]; X(\mathit{\Omega},\mathit{\Omega}_C))\cap H_{\rm loc}^1((0,T];X(\mathit{\Omega},\mathit{\Omega}_C))$ and $\sigma\bm{A}(0)=\sigma\bm{A}_0$, \vspace*{.5mm} \item\label{item:sol2} $\int_\mathit{\Omega} \chi^\top\bm{A} \, {\rm d}\xi \in C([0,T] ; \R^m) \cap H_{\rm loc}^1((0,T];\mathbb{R}^m)$ and $\int_\mathit{\Omega} \chi^\top\bm{A}(0) \, {\rm d}\xi=\int_\mathit{\Omega} \chi^\top\bm{A}_0 \, {\rm d}\xi$, \vspace*{.5mm} \item\label{item:sol3} $\bm{A}\in L^2([0,T];X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C))$ and $\bm{i}\in L_{\rm loc}^2((0,T];\mathbb{R}^m)$, \vspace*{.5mm} \item\label{item:sol7} for all $\bm{F}\in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$, the equations \begin{equation} \arraycolsep=2pt \begin{array}{rcl} \displaystyle{\tfrac{\rm d }{{\rm d} t}\!\int_\mathit{\Omega} \!\sigma \bm{A}(t)\cdot \bm{F}\, {\rm d}\xi +\!\! \int_\mathit{\Omega} \!\nu(\cdot,\|\nabla\!\times\! \bm{A}(t)\|_2)(\nabla\!\times\! \bm{A}(t)) \cdot(\nabla\!\times\! \bm{F})\, {\rm d}\xi} & \!= &\!\! \displaystyle{\int_\mathit{\Omega} \!\chi\, \bm{i}(t)\cdot \bm{F}\, {\rm d}\xi,} \\[2mm] \displaystyle{\tfrac{\rm d }{{\rm d} t}\!\int_\mathit{\Omega} \chi^\top\bm{A}(t)\, {\rm d}\xi+R\,\bm{i}(t) }& = &\! \bm{v}(t) \end{array} \label{eq:weak} \end{equation} hold for almost all $t\in[0,T]$. \end{romanlist} \end{definition} \begin{remark}\label{rem:infdimds} The first equation in \eqref{eq:weak} is motivated by an integration by parts with the weak curl operator \textup{(}cf.\ \eqref{eq:curladj}\textup{)}. In particular, if $(\bm{A},\bm{i})$ is a~classical solution in the sense that all partial derivatives in \eqref{eq:MQS} exist in the classical sense and are continuous up to the boundary of $\mathit{\Omega}$, and \eqref{eq:MQS} holds pointwise everywhere in $\mathit{\Omega}\times [0,T]$ together with the boundary condition, then $(\bm{A},\bm{i})$ is a~weak solution. \end{remark} Condition \ref{item:sol1}) in Definition~\ref{def:sol} means that $\tfrac{d}{dt}\sigma\bm{A}:[0,T]\to X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)'$ is measurable, where $X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)'$ is the dual of $X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$ with respect to the pivot space $X(\mathit{\Omega},\mathit{\Omega}_C)$. Define the operators \allowdisplaybreaks \begin{subequations}\label{eq:EAiiop} \begin{align} \cEl_{11}:\!\!\!\!&& X(\mathit{\Omega},\mathit{\Omega}_C)&\,\to\,X(\mathit{\Omega},\mathit{\Omega}_C),\label{eq:E11} \\ && \bm{A}&\;\mapsto \, \sqrt{\sigma} \bm{A} ,\nonumber\\[2mm] \cEl_{21}:\!\!\!\!&& X(\mathit{\Omega},\mathit{\Omega}_C)&\,\to\,\mathbb{R}^m,\label{eq:E21} \\ && \bm{A}&\,\mapsto \displaystyle{\,\int_\mathit{\Omega} \chi^\top\bm{A} \, {\rm d}\xi,} \nonumber\\[2mm] \cAl_{11}:\!\!\!\!&&\hspace*{-4mm}X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C) &\,\to \,X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)',\label{eq:A11}\\ &&\bm{A}&\,\mapsto\,\displaystyle{\biggl(\bm{F}\mapsto \int_\mathit{\Omega} \nu(\cdot,\|\nabla\times \bm{A}\|_2)(\nabla\times \bm{A})\cdot(\nabla\times \bm{F})\, {\rm d}\xi\biggr),}\hspace*{-10mm}\nonumber \\[2mm] \cAl_{12}:\!\!\!\!&&\mathbb{R}^m &\,\to\, X(\mathit{\Omega},\mathit{\Omega}_C),\label{eq:A12}\\ &&\bm{i}&\,\mapsto\, \chi\, \bm{i},\nonumber \\[2mm] \cAl_{22}:\!\!\!\!&&\mathbb{R}^m &\,\to\,\mathbb{R}^m,\label{eq:A22} \\ &&\bm{i}&\,\mapsto\, R^{1/2} \,\bm{i}, \nonumber \end{align} \end{subequations} and $$ \arraycolsep=2pt \begin{array}{rrcl} \qquad \cEl:&X(\mathit{\Omega},\mathit{\Omega}_C)\times \mathbb{R}^m\,&\to& X(\mathit{\Omega},\mathit{\Omega}_C) \times \mathbb{R}^m,\qquad\qquad\qquad\qquad\qquad\qquad\\ &(\bm{A},\bm{i})\,&\mapsto&(\cEl_{11}^* \cEl_{11}^{}\bm{A},\cEl_{21}^{}\bm{A}),\\[2.5mm] \qquad \cAl: & X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C) \times \mathbb{R}^m\, & \to & X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)'\times \mathbb{R}^m,\\ &(\bm{A},\bm{i})\,&\mapsto&(-\cAl_{11}^{}(\bm{A}) +\cAl_{12}^{}\,\bm{i},-\cAl_{22}^*\cAl_{22}^{}\,\bm{i}),\\[2.5mm] \qquad \cBl:& \mathbb{R}^m\,&\to & X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)' \times \mathbb{R}^m,\\ &\bm{v}\,&\mapsto&(0,\bm{v}). \end{array} $$ Then the coupled MQS system \eqref{eq:MQS} can equivalently be written as an~abstract differential-algebraic system \begin{equation}\label{eq:DS1} \tfrac{\rm d}{{\rm d}t}\cEl x(t) \,= \cAl (x(t))\, +\, \cBl u(t),\qquad \cEl x(0)=\cEl x_0, \end{equation} with the input $u(t)=\bm{v}(t)$, the state $x(t)=(\bm{A}(t), \bm{i}(t))$, and the initial condition $x_0=(\bm{A}_0, 0)$. Note that the operators $\cEl_{11}$, $\cEl_{21}$ (and thus also $\cEl$), $\cAl_{12}$ and $\cAl_{22}$ are linear, whereas $\cAl_{11}$ (and thus also $\cAl$) is nonlinear unless the reluctivity $\nu$ is constant with respect to the second argument. Our aim is to derive existence, uniqueness and qualitative behavior of solutions of the MQS system \eqref{eq:MQS}. The existence proof is more involved and is subject of Section~\ref{sec:gradsys}. The proof of uniqueness is by far more simple and is presented here in Theorem \ref{thm:uniqueness} below. The essential ingredient is that the operator $\cAl_{11}$ is monotone in some sense. This is subject of the subsequent lemma, which is a~straightforward consequence of Assumption~\ref{ass:material}~b)(ii), whence the proof is omitted, see \cite{You13} for details. \begin{lemma}\label{lem:A11mon} Let $\mathit{\Omega}\subset\mathbb{R}^3$ be a~bounded Lipschitz domain and let the operator $\cAl_{11}$ be defined as in \eqref{eq:A11} with $\nu$ satisfying Assumption~\textup{\ref{ass:material}~b)(ii)}. Then for all functions \mbox{$\bm{A}_1,\bm{A}_2\in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$}, the operator $\cAl_{11}$ fulfills \begin{equation} \bigl\langle {\bm{A}}_1-{\bm{A}}_2,\cAl_{11}({\bm{A}}_1)-\cAl_{11}({\bm{A}}_2) \bigr\rangle \geq m_\nu \, \|\nabla\times ({\bm{A}}_1-{\bm{A}}_2)\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}^2, \label{eq:A11diss} \end{equation} where $m_\nu$ is the monotonicity constant of $\nu$. \end{lemma} We now present the uniqueness result. \begin{theorem}[Uniqueness of the solutions] \label{thm:uniqueness} Let $\mathit{\Omega}\subset\mathbb{R}^3$ with subdomains $\mathit{\Omega}_C$ and $\mathit{\Omega}_I$ satisfy Assumption~\textup{\ref{ass:omega}}, and let $X(\mathit{\Omega},\mathit{\Omega}_C)$ be defined as in \eqref{eq:statespace}. Further, let the initial and winding functions be as in Assumption~\textup{\ref{ass:init}} and the material parameters as in Assumption~\textup{\ref{ass:material}}. Let $T>0$ be fixed and let $\bm{v}\in L^2([0,T];\mathbb{R}^m)$ be a given voltage. Then the coupled MQS system \eqref{eq:MQS} admits at most one weak solution $(\bm{A},\bm{i})$ on $[0,T]$. \end{theorem} \begin{proof} Assume that $(\bm{A}_k,\bm{i}_k)$, $k=1$, $2$, are two weak solutions of the coupled MQS system \eqref{eq:MQS}. Consider the operators $\cEl_{ij}$ and $\cAl_{ij}$ as defined in \eqref{eq:EAiiop}. Note that $\cAl_{12}$ is the adjoint of $\cEl_{21}$, that is, $\cAl_{12}^{}=\cEl_{21}^*$, and that $\cEl_{11}^* \cEl_{11}^{}:X(\mathit{\Omega},\mathit{\Omega}_C)\to X(\mathit{\Omega},\mathit{\Omega}_C)$ and $\cAl_{22}^{} \cAl_{22}^*:\mathbb{R}^m\to \mathbb{R}^m$ are both self-adjoint and positive. Then \begin{equation}\label{eq:opdae} \begin{aligned} \tfrac{\rm d}{{\rm d}t}\cEl_{11}^* \cEl_{11}^{} \bm{A}_k(t) =&\,- \cAl_{11}(\bm{A}_k(t)) + \cEl_{21}^* \bm{i}_k(t),\\ \tfrac{\rm d}{{\rm d}t}\cEl_{21} \bm{A}_k(t) = & - \cAl_{22}^*\cAl_{22}^{} \bm{i}_k(t) + \bm{v}(t),\\ \cEl_{11}^* \cEl_{11}^{} \bm{A}_k(0)=&\,\cEl_{11}^* \cEl_{11}^{} \bm{A}_0,\\ \cEl_{21} \bm{A}_k(0)=&\,\cEl_{21} \bm{A}_0 \end{aligned} \end{equation} for $k=1$, $2$. Resolving the second equation in \eqref{eq:opdae} for \begin{equation} \bm{i}_k(t) = - \tfrac{\rm d}{{\rm d}t}(\cAl_{22}^*\cAl_{22}^{})^{-1}\cEl_{21}^{}\bm{A}_k(t) + (\cAl_{22}^*\cAl_{22}^{})^{-1}\bm{v}(t) \label{eq:iM} \end{equation} and substituting it into the first one, we obtain, for $k=1,2$, \begin{equation} \tfrac{\rm d}{{\rm d}t}\bigl(\cEl_{11}^* \cEl_{11}^{}\! + \cEl_{21}^* (\cAl_{22}^*\cAl_{22}^{})^{-1}\cEl_{21}^{}\bigr)\bm{A}_k(t) =- \cAl_{11}^{}(\bm{A}_k(t))\, +\, \cEl_{21}^* (\cAl_{22}^*\cAl_{22}^{})^{-1}\bm{v}(t). \label{eq:schur} \end{equation} Using \eqref{eq:schur} and Lemma~\ref{lem:A11mon}, we obtain for \mbox{$0<t_0\leq t\leq T$} that \begin{equation}\begin{array}{l} \displaystyle{\int_{{t_0}}^{{t}}\bigl\langle \bm{A}_1(\tau)-\bm{A}_2(\tau), \tfrac{\rm d}{{\rm d}\tau}(\cEl_{11}^*\cEl_{11}^{} + \cEl_{21}^* (\cAl_{22}^*\cAl_{22}^{})^{-1} \cEl_{21}^{}) (\bm{A}_1(\tau)-\bm{A}_2(\tau))\bigr\rangle\, {\rm d}\tau}\\ \displaystyle{\qquad\quad = - \int_{{t_0}}^{{t}}\bigl\langle \bm{A}_1(\tau)-\bm{A}_2(\tau), \cAl_{11}(\bm{A}_1(\tau))-\cAl_{11}(\bm{A}_2(\tau))\bigr\rangle \,{\rm d}\tau}\\ \displaystyle{\qquad\quad \leq - m_\nu \int_{{t_0}}^{{t}}\|\nabla\times ({\bm{A}}_1(\tau)-{\bm{A}}_2(\tau))\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}^2 \,{\rm d}\tau.} \end{array} \label{eq:est2} \end{equation} On the other hand, the self-adjointness of $\cEl_{11}^* \cEl_{11}^{}$ and $\cAl_{22}^*\cAl_{22}^{}$ together with the product rule imply that \begin{equation} \begin{array}{l} \displaystyle{\int_{{t_0}}^{{t}}\bigl\langle \bm{A}_1(\tau)-\bm{A}_2(\tau), \tfrac{\rm d}{{\rm d}\tau}(\cEl_{11}^* \cEl_{11}^{} + \cEl_{21}^* (\cAl_{22}^*\cAl_{22}^{})^{-1}\cEl_{21}^{} ) (\bm{A}_1(\tau)-\bm{A}_2(\tau))\bigr\rangle\, {\rm d}\tau}\\[4mm] \displaystyle{\quad =\frac12\bigl\langle \bm{A}_1(t)-\bm{A}_2(t), (\cEl_{11}^* \cEl_{11}^{} + \cEl_{21}^* (\cAl_{22}^*\cAl_{22}^{})^{-1}\cEl_{21}^{}) (\bm{A}_1(t)-\bm{A}_2(t))\bigr\rangle}\\[4mm] \displaystyle{\qquad-\frac12\bigl\langle \bm{A}_1(t_0)-\bm{A}_2(t_0), (\cEl_{11}^* \cEl_{11}^{} + \cEl_{21}^*(\cAl_{22}^*\cAl_{22}^{})^{-1}\cEl_{21}) (\bm{A}_1(t_0)-\bm{A}_2(t_0))\bigr\rangle}\\[4mm] \displaystyle{\quad =\frac{\sigma_C}2\!\int_{\mathit{\Omega}_C}\|\bm{A}_1(t)-\bm{A}_2(t)\|_2^2\,{\rm d}\xi+\frac12\Bigr\|R^{-1/2}\int_{\mathit{\Omega}}\!\chi^\top(\bm{A}_1(t)-\bm{A}_2(T))\,{\rm d}\xi\Bigl\|_2^2}\\[4mm] \displaystyle{\qquad -\frac{\sigma_C}2\!\int_{\mathit{\Omega}_C}\|\bm{A}_1(t_0)-\bm{A}_2(t_0)\|_2^2\,{\rm d}\xi-\frac{1}2\Bigl\|R^{-1/2}\int_{\mathit{\Omega}}\!\chi^\top(\bm{A}_1(t_0)-\bm{A}_2(t_0))\,{\rm d}\xi\Bigr\|_2^2.} \end{array}\label{eq:est3}\end{equation} Since $\bm{A}_1$ and $\bm{A}_2$ satisfy the continuity properties in Definition~\ref{def:sol}~\ref{item:sol1}), \ref{item:sol2}) and the initial conditions \[\begin{aligned} & (\sigma\bm{A}_1)(0)=\, \sigma\bm{A}_0=(\sigma\bm{A}_2)(0) , \\ & \int_{\mathit{\Omega}}\chi^\top\bm{A}_1(0)\,{\rm d}\xi=\,\int_{\mathit{\Omega}}\chi^\top\bm{A}_0\,{\rm d}\xi=\int_{\mathit{\Omega}}\chi^\top\bm{A}_2(0)\,{\rm d}\xi, \qquad \end{aligned} \] then taking the limit ${t_0}\to 0+$, we obtain from \eqref{eq:est2} and \eqref{eq:est3} that \[\begin{aligned} \frac{\sigma_C}2\int_{\mathit{\Omega}_C}&\|\bm{A}_1(t)-\bm{A}_2(t)\|_2^2\,{\rm d}\xi+\frac{1}2\Bigl\|R^{-1/2}\int_{\mathit{\Omega}}\chi^\top(\bm{A}_1(t)-\bm{A}_2(t))\,{\rm d}\xi\Bigr\|_2^2\\ &\qquad \leq - m_\nu \int_{0}^{t}\|\nabla\times ({\bm{A}}_1(\tau)-{\bm{A}}_2(\tau))\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}^2 \,{\rm d}\tau \leq 0. \end{aligned}\] Since the left-hand side is nonnegative and the right-hand side is nonpositive, we obtain that $\|{\bm{A}}_1(t)-{\bm{A}}_2(t)\|_{L^2(\mathit{\Omega}_C;\mathbb{R}^3)}=0$ and $\|\nabla\times ({\bm{A}}_1(t)-{\bm{A}}_2(t))\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}=0$ for almost all $t\in [0,T]$. Then the estimate \eqref{eq:curlest} in Lemma~\ref{lem:denscoerc} implies \mbox{$\bm{A}_1(t)=\bm{A}_2(t)$} for almost all $t\in [0,T]$. As a consequence, we obtain from \eqref{eq:iM} that \mbox{$\bm{i}_1(t)=\bm{i}_2(t)$} for almost all $t\in{[0,T]}$. This completes the proof. \end{proof} \section{The Magnetic Energy} \label{sec:energy} Our existence and regularity results for the coupled MQS model \eqref{eq:MQS} rely on the observation that this model is a~special instance of a~differential-algebraic gradient system. In this system, the magnetic energy plays a~central role. Therefore, in the following, we define the magnetic energy for the coupled MQS system \eqref{eq:MQS} and collect some of its properties. First, however, we introduce some useful general concepts and notation. \begin{definition}\label{def:phi} Let $X$ and $Z$ be Hilbert spaces, and let $\varphi:X\to \mathbb{R}\cup\{\infty\}$ be a~function with values in the extended real line. We call $D(\varphi)= \varphi^{-1}[0,\infty)$ the {\em effective domain} of $\varphi$, and we say that $\varphi$ is {\em proper}, if its effective domain is nonempty. The function $\varphi$ is {\em convex}, if \[ \forall\,x_1,x_2\in X, \lambda\in [0,1]:\;\; \varphi(\lambda x_1+(1-\lambda)x_2)\leq \lambda \varphi(x_1)+(1-\lambda)\varphi(x_2) , \] and it is {\em lower semicontinuous}, if for every $\lambda\in\mathbb{R}$, the sublevel set $\varphi^{-1}[0,\lambda]$ is closed in~$X$. We say that $\varphi$ is {\em coercive} if for every $\lambda\in\mathbb{R}$ the sublevel set $\varphi^{-1}[0,\lambda]$ is bounded. Finally, given $\mathcal{E}\in \mathcal{L}(X,Z)$, we say that $\varphi$ is {\em $\mathcal{E}$-elliptic} if there exists $\omega\in\mathbb{R}$ such that the shifted functional \[ \begin{aligned} \varphi_\omega:&&X\to &\,\mathbb{R} \cup\{\infty\},\\&&x\mapsto&\,\tfrac\omega2\|\mathcal{E} x\|_Z^2+\varphi(x) \end{aligned} \] is convex and coercive. The~relation \[ \partial\varphi=\!\left\{ (x,q)\!\in\! X\!\times\! X \; : \; \displaystyle{x\!\in\! D(\varphi) \text{ and } \lim_{\lambda\searrow0}\frac{\varphi(x\!+\!\lambda v)\!-\!\varphi(x)}{\lambda}\geq \langle q,v\rangle_X \text{ for all } v\!\in\! X}\right\} \] on $X$ is called {\em subgradient of $\varphi$}. For $x\in X$, we write \[ \partial\varphi(x)=\left\{q\in X\enskip :\enskip (x,q)\in \partial\varphi\right\} , \] and we call \[ D(\partial\varphi)=\left\{x\in X\enskip : \enskip \exists\; q\in X\text{ such that }(x,q)\in\partial\varphi\right\} \] the {\em domain} of the subgradient $\partial\varphi$. \end{definition} Now, starting from the magnetic reluctivity $\nu$ as in Assumption~\ref{ass:material}~b), consider the~function $\vartheta:\mathit{\Omega}\times\mathbb{R}_{\ge0}\to\mathbb{R}_{\ge0}$ defined by \begin{equation} \vartheta(\xi,\varrho)=\frac{1}{2}\int_0^\varrho \nu(\xi,\sqrt{\zeta})\, {\rm d}\zeta = \int_0^{\sqrt{\varrho}} \nu(\xi,\zeta)\zeta\,{\rm d}\zeta. \label{eq:gamma} \end{equation} Using this function, we further define the functional $E:X(\mathit{\Omega},\mathit{\Omega}_C)\to \mathbb{R} \cup\{\infty\}$ by \begin{equation}\label{eq:varphiA} E(\bm{A}{})=\begin{cases}\displaystyle{\int_{\mathit{\Omega}} \vartheta\bigl(\xi,\|\nabla\times \bm{A}(\xi )\|_2^2\bigr)\,{\rm d}\xi}\; & \text{if } \bm{A}{}\in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C),\\ \infty\; & \text{else}.\end{cases} \end{equation} Note that for the magnetic vector potential $\bm{A}$, the function $\vartheta\bigl(\xi,\|\nabla\times \bm{A}(\xi,t)\|_2^2\bigr)$ is the {\em magnetic energy density}, and $E$ describes the {\em magnetic energy} \cite{HauM89}. This is a~special kind of energy function in the sense that its effective domain \mbox {$D(E)= X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$} is a~vector space, actually a~Hilbert space with the natural inner product induced from $H_0(\curl,\mathit{\Omega})$. In the following, we collect some properties of the magnetic energy, where we further use the notions of {\em G\^{a}teaux differentiability} and {\em G\^{a}teaux derivative} as introduced in \cite[Definition~4.5]{Zeid86}. \begin{proposition}[Properties of the magnetic energy function]\label{prop:enfun} Let \mbox{$\mathit{\Omega}\subset\mathbb{R}^3$} with subdomains $\mathit{\Omega}_C$ and $\mathit{\Omega}_I$ satisfy Assumption~\textup{\ref{ass:omega}}, and let $X(\mathit{\Omega},\mathit{\Omega}_C)$ and $X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$ be as in \eqref{eq:statespace} and \eqref{eq:statespace2}, respectively. Further, let the material parameters satisfy Assumption~\textup{\ref{ass:material}}, let $\vartheta$ be as in \eqref{eq:gamma}, and let the magnetic energy $E$ be defined as in~\eqref{eq:varphiA}. Then the following statements hold: \vspace*{-1mm} \begin{enumerate}[a)] \item\label{prop:enfun-1} For all $\bm{A}_1,\bm{A}_2\in {D(E)}$, $$ \arraycolsep=2pt \begin{array}{ll} |{E}(&\!\bm{A}_1)-{E}(\bm{A}_2)|\\ & \leq\displaystyle{\frac{L_\nu}2\bigl(\|\nabla\times\bm{A}_1\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}+\|\nabla\times\bm{A}_2\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}\bigr)\, \|\nabla\times(\bm{A}_1-\bm{A}_2)\|_{L^2(\mathit{\Omega};\mathbb{R}^3)},} \end{array} $$ where $L_\nu$ is the Lipschitz constant of $\nu$. \item\label{prop:enfun-2} For all $\bm{A}\in {D(E)}$, \begin{equation}\label{eq:estE} \frac{m_\nu}2\|\nabla\times\bm{A}\|^2_{L^2(\mathit{\Omega};\mathbb{R}^3)} \leq {E}(\bm{A}) \leq \frac{L_\nu}2\|\nabla\times\bm{A}\|^2_{L^2(\mathit{\Omega};\mathbb{R}^3)}, \end{equation} where $m_\nu$ and $L_\nu$ are the monotonicity and Lipschitz constants of $\nu$. \item\label{prop:enfun-0} If $E$ is considered as a~mapping from the Hilbert space $X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$ to $\R$, then $E$ is G\^{a}teaux differentiable, and for all $\bm{A}\in D(E)=X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$, \begin{equation} \forall\,\bm{F}\in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C):\;\; \langle\bm{F},{\rm D}E(\bm{A})\rangle = \int_{\mathit{\Omega}} \nu (\cdot, \| \nabla\times \bm{A}\|_2) \, (\nabla\times \bm{A}) \cdot (\nabla\times \bm{F})\; d\xi, \end{equation} where ${\rm D}E(\bm{A})\in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)'$ denotes the G\^{a}teaux derivative of $E$ at $\bm{A}$. \item\label{prop:enfun1} The magnetic energy $E$ is convex and lower semicontinuous. Its effective domain $D(E)=X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$ is dense in $X(\mathit{\Omega},\mathit{\Omega}_C)$. \item\label{prop:enfun3} The subgradient of $E$ is given by \begin{multline*} \partial E=\Bigl\{(\bm{A},\bm{C})\in X(\mathit{\Omega},\mathit{\Omega}_C) \times X(\mathit{\Omega},\mathit{\Omega}_C)\enskip:\enskip \bm{A}\in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C) \text{ and} \Bigr.\\ \Bigl. \int_{\mathit{\Omega}} \nu(\cdot,\|\nabla \times \bm{A}\|_2) \, (\nabla \times \bm{A}) \cdot (\nabla\times \bm{F} )\,{\rm d}\xi = \int_{\mathit{\Omega}} \bm{C} \cdot \bm{F}\,{\rm d}\xi \Bigr. \\ \Bigl. \text{ for all } \bm{F} \in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C) \Bigr\}. \end{multline*} The subgradient $\partial E$ is single-valued and its domain $D(\partial E)$ is dense in $X(\mathit{\Omega},\mathit{\Omega}_C)$. \item\label{prop:enfun5} Let $\mathcal{E}\in \mathcal{L}(X(\mathit{\Omega},\mathit{\Omega}_C),X(\mathit{\Omega},\mathit{\Omega}_C)\times \R^m)$ be defined as \begin{equation}\label{eq:operatorE} \mathcal{E}\bm{A}=\Bigl(\sqrt{\sigma}\bm{A},R^{-1/2}\int_\mathit{\Omega} \chi^\top\bm{A}\, {\rm d}\xi\Bigr), \end{equation} where $R^{-1/2}$ denotes the inverse of the principal square root of $R$. Then $ E$ is \mbox{$\mathcal{E}$-elliptic}. \end{enumerate} \end{proposition} \begin{proof} \ref{prop:enfun-1}) Let $\bm{A}_1$, $\bm{A}_2\in {D(E)}$. Then using \eqref{eq:Mnugreater} and the Cauchy-Schwarz inequa\-li\-ty, we obtain {\allowdisplaybreaks\begin{align*} |E(\bm{A}_1)\,-&\,E(\bm{A}_2)|\\ \leq \; &\int_\mathit{\Omega}\left|\int_0^{\|\nabla\times\bm{A}_1(\xi)\|_2} \nu(\xi,\zeta)\zeta\,{\rm d}\zeta-\int_0^{\|\nabla\times\bm{A}_2(\xi)\|_2} \nu(\xi,\zeta)\zeta\,{\rm d}\zeta \right|\,{\rm d}\xi\\ = \;&\int_\mathit{\Omega}\left|\int_{\|\nabla\times\bm{A}_2(\xi)\|_2}^{\|\nabla\times\bm{A}_1(\xi)\|_2} \nu(\xi,\zeta)\zeta\,{\rm d}\zeta\right|\,{\rm d}\xi\\ \leq\;& L_\nu\int_\mathit{\Omega}\left|\int_{\|\nabla\times\bm{A}_2(\xi)\|_2}^{\|\nabla\times\bm{A}_1(\xi)\|_2}\zeta\,{\rm d}\zeta\right|\,{\rm d}\xi\\ =\;&\frac{L_\nu}2\int_\mathit{\Omega}\Bigl|\|\nabla\times\bm{A}_1(\xi)\|_2^2-\|\nabla\times\bm{A}_2(\xi)\|_2^2\Bigr|\,{\rm d}\xi\\ =\;&\frac{L_\nu}2\!\!\int_\mathit{\Omega}\bigl(\|\nabla\!\times\!\bm{A}_1(\xi)\|_2\!+\!\|\nabla\!\times\!\bm{A}_2(\xi)\|_2\bigr)\bigl|\,\|\nabla\!\times\!\bm{A}_1(\xi)\|_2\!-\!\|\nabla\!\times\!\bm{A}_2(\xi)\|_2\,\bigr|\,{\rm d}\xi\\ \leq\;&\frac{L_\nu}2\int_\mathit{\Omega}\bigl(\|\nabla\times\bm{A}_1(\xi)\|_2+\|\nabla\times\bm{A}_2(\xi)\|_2\bigr)\, \|\nabla\times(\bm{A}_1(\xi)-\bm{A}_2(\xi))\|_2\,{\rm d}\xi\\ \leq\;&\frac{L_\nu}2\bigl(\|\nabla\times\bm{A}_1\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}+\|\nabla\times\bm{A}_2\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}\bigr)\, \|\nabla\times(\bm{A}_1-\bm{A}_2)\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}. \end{align*}} \ref{prop:enfun-2}) {This statement follows immediately from \eqref{eq:Mnugreater}.} \ref{prop:enfun-0}) The proof of this assertion is perhaps tedious but elementary; it mainly relies on the differentiability of $\vartheta$ and growth estimates of $\nu$ (compare with \eqref{eq:Mnugreater}). We omit the details. \ref{prop:enfun1}) By assumption, for almost every $\xi\in\mathit{\Omega}$, the function $\zeta\mapsto\nu (\xi , \zeta ) \zeta$ is positive and increasing on $\mathbb{R}_{\geq 0}$. As a consequence, by definition of $\vartheta$, for almost every $\xi\in\mathit{\Omega}$, the function $\varrho \mapsto \vartheta (\xi , \varrho^2)$ is increasing and convex on $\mathbb{R}_{\geq 0}$. Using these properties and the triangle inequality for the norm, we can easily show the convexity of $E$. Indeed, for all $\bm{A}_1$, $\bm{A}_2\in D(E)$ and all $\lambda\in [0,1]$, we have \begin{align*} E(\lambda\bm{A}_1+(1-\lambda) \bm{A}_2) & = \int_{\mathit{\Omega}} \vartheta (\xi , \| \nabla\times (\lambda\bm{A}_1(\xi)+(1-\lambda) \bm{A}_2(\xi))\|_2^2 ) \; {\rm d}\xi \\ & \leq \int_{\mathit{\Omega}} \vartheta (\xi , ( \lambda \| \nabla\times\bm{A}_1(\xi) \|_2 + (1-\lambda) \| \nabla\times\bm{A}_2(\xi)\|_2 )^2 )\; {\rm d}\xi \\ & \leq \int_{\mathit{\Omega}} ( \lambda \vartheta (\xi , \| \nabla\times\bm{A}_1(\xi) \|_2^2 ) + (1-\lambda) \vartheta (\xi , \| \nabla\times\bm{A}_2(\xi)\|_2^2 ) ) \; {\rm d}\xi \\ & = \lambda E (\bm{A}_1 ) + (1-\lambda ) E(\bm{A}_2 ) . \end{align*} In order to prove lower semicontinuity of $E$, let $\lambda\in\mathbb{R}$. From assertion \ref{prop:enfun-1}), or alternatively from \ref{prop:enfun-0}), we see that the magnetic energy is continuous as a mapping from the Hilbert space $X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$ to $\R$, and, therefore, the sublevel set $$ \{ \bm{A}\in X(\mathit{\Omega},\mathit{\Omega}_C)\; |\; E (\bm{A}) + \| \bm{A}\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}^2 \leq \lambda \} $$ is closed in that space. By convexity of~$E$, this set is also convex, and hence, by Mazur's theorem, this set is weakly closed. Further, by \eqref{eq:estE}, the sublevel set is bounded in $X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$, and hence, since every Hilbert space is reflexive, it is weakly compact. By continuity of the embedding of $X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$ into $X(\mathit{\Omega},\mathit{\Omega}_C)$, it follows that this sublevel set is also weakly compact in the latter space, and then it is necessarily norm closed there. We have thus proved that the mapping $\bm{A}\mapsto E(\bm{A}) + \| \bm{A}\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}^2$ is lower semicontinuous on $X(\mathit{\Omega},\mathit{\Omega}_C)$. Since $\|\cdot \|_{L^2(\mathit{\Omega};\mathbb{R}^3)}^2$ is continuous on that space, it follows that the magnetic energy itself is lower semicontinuous. The fact that $E$ is densely defined follows from Lemma~\ref{lem:denscoerc}. Assertion \ref{prop:enfun3}) is a direct consequence of the definition of the subgradient, the fact that the effective domain of $E$ is a linear space, and from assertion \ref{prop:enfun-0}). By \cite[Proposition~1.6]{Bar10}, $D(\partial E)$ and $D( E)$ have the same closure, and hence the subgradient is densely defined, too. \ref{prop:enfun5}) Let $Z=X(\mathit{\Omega},\mathit{\Omega}_C)\times \R^m$ and let $\mathcal{E}\in\mathcal{L}(X(\mathit{\Omega},\mathit{\Omega}_C),Z)$ be as in \eqref{eq:operatorE}. First, note that $\mathcal{E}$ is well-defined, since by Assumption~\ref{ass:material}~c) the matrix $R$ is symmetric and positive definite. In order to prove that $ E$ is $\mathcal{E}$-elliptic, we show that the shifted functional $$ \arraycolsep=2pt \begin{array}{rcl} E_{\omega}:X(\mathit{\Omega},\mathit{\Omega}_C)&\to& \mathbb{R}_{\ge0}\cup\{\infty\},\\ {\bm{A}}& \mapsto & \displaystyle{\tfrac\omega2\|\mathcal{E}{\bm{A}}\|_Z^2+ E({\bm{A}})} \end{array} $$ is convex and coercive for all $\omega>0$. Convexity follows from the fact that $E_\omega$ is the sum of two convex functions. To show coercivity, we use Lemma~\ref{lem:denscoerc}, which states that there exists some $L_C>0$ such that \eqref{eq:curlest} holds for all $\bm{A}\in D( E)$. Further, by using \eqref{eq:estE}, we obtain \[\begin{aligned} { E_{\omega}}(\bm{A})=&\,{ E}(\bm{A})+\frac{\omega}{2} \|\mathcal{E}\bm{A}\|_{Z}^2\\ =&\;{ E}(\bm{A})+\frac{\omega\sigma_C}{2} \int_{\mathit{\Omega}_C} \|\bm{A}\|_2^2\,{\rm d}\xi + \frac{\omega}{2} \Bigl\|R^{-1/2}\int_{\mathit{\Omega}} \chi^\top\bm{A}\,{\rm d}\xi\Bigr\|_2^2\\ \geq&\; \frac{m_\nu}2\,\|\nabla\times\bm{A}\|^2_{L^2(\mathit{\Omega};\mathbb{R}^3)}+ \frac{\omega\sigma_C}{2} \|\bm{A}\|_{L^2(\mathit{\Omega}_C;\mathbb{R}^3)}^2\\ \geq&\;c\,\|\bm{A}\|^2_{L^2(\mathit{\Omega};\mathbb{R}^3)} \end{aligned}\] with ${c}=\min\{m_\nu,\omega\sigma_C\}/(2L_C)$. This implies the coercivity of $E_\omega$. \end{proof} \section{On a~Class of Abstract Differential-Algebraic Gradient Systems} \label{sec:gradsys} In this section, we study the solvability of an~abstract differential-algebraic gradient system \begin{equation} \label{eq:symmdae} \mathcal{E}^*f(t)-\mathcal{E}^*\tfrac{\rm d}{{\rm d}t}\mathcal{E} x(t)\in \partial\varphi(x(t)), \quad\mathcal{E} x(0+)=z_0, \end{equation} where $\mathcal{E}\in\mathcal{L}(X,Z)$, $X$ and $Z$ are Hilbert spaces, $f\in L^2([0,T],Z)$, $\varphi$ is a~densely defined, convex, lower semicontinuous and $\mathcal{E}$-elliptic functional with a~subgradient $\partial\varphi$, and $z_0\in Z$. We present an~extension of some results from \cite{Bar10,Bre73} to this more general class of gradient systems, which will be useful in establishing the existence of solutions of the MQS system \eqref{eq:MQS}. We start by proving an auxiliary lemma providing the concept of $\mathcal{E}$-subgradients. \begin{lemma}\label{lem:modfun} Let $X$ and $Z$ be Hilbert spaces, and let $\mathcal{E}\in\mathcal{L}(X,Z)$ have a~dense range. Assume that the functional $\varphi:X\to\mathbb{R} \cup\{\infty\}$ is densely defined, convex, lower semicontinuous and $\mathcal{E}$-elliptic with a~subgradient $\partial\varphi$. Define the functional \begin{equation} \arraycolsep=2pt \begin{array}{rcl} \varphi_{\mathcal{E}}: Z & \to &\R, \\ z & \mapsto & \displaystyle{\inf_{x\in \mathcal{E}^{-1}\{z\}}\varphi(x)} . \end{array} \label{eq:modfun} \end{equation} Then $D(\varphi_{\mathcal{E}})={\mathcal{E}}D(\varphi)$, and $\varphi_{\mathcal{E}}$ is densely defined, convex and lower semicontinuous. Its subgradient is given by \[ \begin{array}{rl} \partial\varphi_{\mathcal{E}}=\Bigl\{ (z,g)\in Z\times Z\enskip : \enskip \Bigr.& \exists\, x\in D(\varphi)\text{ such that }\mathcal{E} x=z \text{ and }\\ & \Bigl.\displaystyle{\lim_{\lambda\searrow0}\frac{\varphi(x+\lambda v)-\varphi(x)}{\lambda}\geq \langle g,\mathcal{E} v\rangle_Z} \text{ for all } v\in X\Bigr\}. \end{array} \] In particular, $(z,g)\in\partial\varphi_{\mathcal{E}}$ if and only if there exists $(x,q)\in\partial\varphi$ such that $\mathcal{E} x=z$, $\mathcal{E}^*g=q$, and $\varphi(x)=\varphi_{\mathcal{E}}(z)$. \end{lemma} \begin{proof} The statement is, except for the assertion on the density of the domain of $\varphi_{\mathcal{E}}$, proven in {\cite[Theorem~2.9]{CHK16}}. The density of the domain of $\varphi_{\mathcal{E}}$ can be inferred from the identity $D(\varphi_{\mathcal{E}})=\mathcal{E} D(\varphi)$, the assumption that $\varphi$ is densely defined and by employing the property that $\mathcal{E}$ has a~dense range. \end{proof} Next, we prove the existence and regularity properties of solutions of the abstract differential-algebraic gradient system \eqref{eq:symmdae}. Note that the initial value in \eqref{eq:symmdae} is only in the closure of the range of $\mathcal{E}$, and that the initial condition is to be understood to hold for the continuous representative of $\mathcal{E} x$. \begin{theorem}\label{lem:symmdae} Let $X$ and $Z$ be Hilbert spaces, let $\mathcal{E}\in\mathcal{L}(X,Z)$, and let $\widetilde{Z}\subseteq Z$ be the closure of the range of $\mathcal{E}$. Furthermore, let $\varphi:X\to\mathbb{R} \cup\{\infty\}$ be a~densely defined, convex, lower semicontinuous and $\mathcal{E}$-elliptic functional with subgradient $\partial\varphi$. Then for every $T>0$, $f\in L^2({[0,T]};Z)$ and $z_0\in \widetilde{Z}$, the {abstract differential-algebraic gradient system} \eqref{eq:symmdae} admits a~solution $x:{[0,T]}\to X$ in the following sense: \vspace*{-1mm} \begin{romanlist}[a)] \item\label{item:sola} $\mathcal{E} x \in C([0,T] ;Z) \cap H^{1}_{\rm loc}((0,T];Z)$ and $\mathcal{E} x(0+)= z_0$,\vspace*{1mm} \item\label{item:solc} $x(t)\in D(\partial\varphi)$ and the differential inclusion in \eqref{eq:symmdae} hold for almost all $t\in [0,T]$. \end{romanlist} This solution has the following properties: \begin{align} \bigl(t\mapsto \varphi(x(t))\bigr)\in&\, L^1([0,T])\cap {W^{1,1}_{\rm loc}((0,T])}, \label{eq:DAEBarb1}\\ \bigl(t\mapsto t\,\varphi(x(t))\bigr)\in&\, L^\infty([0,T]),\label{eq:DAEBarb1.5}\\ \bigl(t\mapsto t^{1/2}\tfrac{\rm d}{{\rm d}t}\mathcal{E} {x}(t)\bigr)\in&\, L^2([0,T];Z),\label{eq:DAEBarb2} \intertext{and for almost all $0<t_0\leq t_1\leq T$} \quad \varphi(x(t_1))-\varphi(x(t_0))&\,=\int_{t_0}^{t_1}\langle f(\tau),\tfrac{\rm d\,}{{\rm d}\tau}\mathcal{E} x(\tau)\rangle_Z\,{\rm d}\tau-\int_{t_0}^{t_1}\|\tfrac{\rm d\,}{{\rm d}\tau} \mathcal{E} {x}(\tau)\|^2_Z\,{\rm d}\tau.\label{eq:DAEBarb5} \end{align} If, further, $y_0\in \mathcal{E} D(\varphi)$, then \begin{align} \bigl(t\mapsto \varphi(x(t))\bigr)\in&\, W^{1,1}([0,T]), \label{eq:DAEBarb3}\\ \mathcal{E} x\in&\, H^1({[0,T]};Z),\label{eq:DAEBarb4} \end{align} and the identity \eqref{eq:DAEBarb5} holds for all $0\leq t_0\leq t_1\leq T$. \end{theorem} Before proving this result, we note that, except for the relation \eqref{eq:DAEBarb1.5}, a~proof of Theorem~\ref{lem:symmdae} for the special case $X=Z$ and $\mathcal{E}=I$ can be found in \cite[Th\'eor\`eme~3.6, p.72]{Bre73} or \cite[Theorem~4.11 \& Lemma~4.4]{Bar10}. These results for $\mathcal{E}=I$ are indeed the basis for our proof of Theorem~\ref{lem:symmdae}. \begin{proof} Since $\widetilde{Z}\subseteq Z$ is the closure of the range of $\mathcal{E}$, then $\mathcal{E} :X\to\widetilde{Z}$ has dense range, and Lemma~\ref{lem:modfun} implies that the functional $\varphi_{\mathcal{E}}:\widetilde{Z}\to\mathbb{R} \cup\{\infty\}$ in \eqref{eq:modfun} is densely defined, convex and lower semicontinuous. Further, let $\varPi\in\mathcal{L}(Z)$ be the orthogonal projection onto $\widetilde{Z}$. Then $\varPi f\in L^2({[0,T]};\widetilde{Z})$, and, by \cite[Th\'eor\`eme~3.6, p.72]{Bre73} or \cite[Theorem~4.11]{Bar10}, the {gradient system} \begin{equation} \varPi{f}(t)-\tfrac{\rm d}{{\rm d}t}z(t)\in \partial\varphi_{\mathcal{E}}(z(t)), \quad z(0) = z_0\label{eq:resolODE} \end{equation} admits a~unique solution $z\in C ([0,T] ; \widetilde{Z}) \cap H^{1}_{\rm loc}((0,T];{\widetilde{Z}})$ in the sense that $z(0) = z_0$, $z(t)\in D(\varphi_{\mathcal{E}} )$ and the differential inclusion in \eqref{eq:resolODE} hold for almost all $t\in [0,T]$. Moreover, this solution has the following properties: \begin{align} \bigl(t\mapsto \varphi_{\mathcal{E}}(z(t))\bigr)\in&\, L^1([0,T]),\label{eq:Barb1}\\ \bigl(t\mapsto t^{1/2}\tfrac{\rm d}{{\rm d}t}z(t)\bigr)\in&\, L^2([0,T];{\widetilde{Z}}).\label{eq:Barb2} \end{align} The fact that the differential inclusion \eqref{eq:resolODE} holds for almost all $t\in [0,T]$ is equivalent to saying that for almost all $t\in{[0,T]}$, \[ \bigl(z(t),\varPi f(t)-\tfrac{\rm d}{{\rm d}t}z(t)\bigr)\in \partial\varphi_{\mathcal{E}}. \] Then, by Lemma~\ref{lem:modfun}, there exist functions $x$, $w:[0,T]\to X$ such that, for almost all $t\in[0,T]$, $(x(t),w(t))\in \partial\varphi$, $\mathcal{E} x(t)=z(t)$ and $w(t)=\mathcal{E}^*\varPi f(t)-\mathcal{E}^*\tfrac{\rm d}{{\rm d}t}z(t)$. In particular, $\mathcal{E} x=z\in C([0,T] ; \widetilde{Z} ) \cap H^1_{\rm loc}((0,T];\widetilde{Z})$, that is, $z$ is a continuous representative of $\mathcal{E} x$, and therefore $\mathcal{E} x(0+) = \lim_{t\to 0+} \mathcal{E} x(t) = \lim_{t\to 0+} z(t) = z_0$. In addition, by using that $\varPi\mathcal{E} = \mathcal{E}$ together with $\varPi=\varPi^*$ implies $\mathcal{E}^*=\mathcal{E}^*\varPi$, we have \begin{align*} \mathcal{E}^*f(t)-\mathcal{E}^*\tfrac{\rm d}{{\rm d}t} \mathcal{E} x(t)& = \mathcal{E}^*\varPi f(t)-\mathcal{E}^*\tfrac{\rm d}{{\rm d}t} \mathcal{E} x(t)\\ & =\mathcal{E}^*\varPi f(t)-\mathcal{E}^*\tfrac{\rm d}{{\rm d}t}z(t)=w(t)\in \partial\varphi(x(t)), \end{align*} that is, \ref{item:sola}) and \ref{item:solc}) hold. Moreover, \eqref{eq:Barb2} implies \eqref{eq:DAEBarb2}. Using Lemma~\ref{lem:modfun}, we obtain \begin{equation} \varphi_{\mathcal{E}}(z(t))=\varphi(x(t)) \quad\text{for almost all } t\in[0,T] , \label{eq:phiErel} \end{equation} and then \eqref{eq:Barb1} leads to $\bigl(t\mapsto \varphi(x(t))\bigr)\in\, L^1([0,T])$. An application of \cite[Lemme~3.3, p.73]{Bre73} or \cite[Lemma~4.4]{Bar10} gives $\bigl(t\mapsto \varphi_{\mathcal{E}}(z(t))\bigr)\in\, W^{1,1}_{\rm loc}((0,T])$, whence \eqref{eq:phiErel} implies $\left(t\mapsto \varphi(x(t))\right)\in\, W^{1,1}_{\rm loc}((0,T])$, which shows \eqref{eq:DAEBarb1}. In order to prove \eqref{eq:DAEBarb5}, recall that the property $z\in H^1_{\rm loc}((0,T];\tilde{Z})$ together with \cite[Lemma~4.4]{Bar10} implies that the weak derivative of $\bigl(t\mapsto \varphi_{\mathcal{E}}(z(t))\bigr)$ fulfills \[ \tfrac{\rm d}{{\rm d}t}\varphi_{\mathcal{E}}(z(t))=\bigl\langle \varPi f(t),\tfrac{\rm d}{{\rm d}t}z(t) \bigr\rangle_Z-\bigl\|\tfrac{\rm d}{{\rm d}t}z(t)\bigr\|_Z^2\enskip\text{ for almost all }t\in[0,T]. \] Then, for $0<t_0\leq t_1\leq T$, an integration over $[t_0,t_1]$ gives \[ \varphi_{{\mathcal{E}}}(z(t_1))-\varphi_{{\mathcal{E}}}(z(t_0))=\int_{t_0}^{t_1}\bigl\langle \varPi f(\tau),\tfrac{\rm d}{{\rm d}\tau}z(\tau) \bigr\rangle_Z-\bigl\|\tfrac{\rm d}{{\rm d}\tau}z(\tau)\bigr\|^2_Z\,{\rm d}\tau. \] Using the equality $\mathcal{E} x=z$, \eqref{eq:phiErel} and the self-adjointness of $\varPi$, we obtain \eqref{eq:DAEBarb5}. For the proof of \eqref{eq:DAEBarb1.5}, let $t\in(0,T]$. Then \eqref{eq:DAEBarb5} together with Young's inequality \cite[p.~53]{Alt16} leads to \[\begin{aligned} t\,\varphi(x(t)) =&\,t\,\varphi(x(T))+t\, \int_{t}^{T}\|\tfrac{\rm d\,}{{\rm d}\tau} \mathcal{E}{x}(\tau)\|^2_Z\,{\rm d}\tau- t\,\int_{t}^{T}\langle f(\tau),\tfrac{\rm d\,}{{\rm d}\tau}\mathcal{E} x(\tau)\rangle_Z\,{\rm d}\tau\\ \leq&\,t\,\varphi(x(T))+t\, \int_{t}^{T}\|\tfrac{\rm d\,}{{\rm d}\tau} \mathcal{E}{x}(\tau)\|^2_Z\,{\rm d}\tau+ \frac{t}2\,\int_{t}^{T} \Bigl( \|\tfrac{\rm d\,}{{\rm d}\tau}\mathcal{E} x(\tau)\|_{Z}^2+\|f(\tau)\|_{Z}^2\Bigr) \,{\rm d}\tau\\ \leq&\,T\,\varphi(x(T))+ \frac32\int_{t}^{T}\|\tau^{1/2}\tfrac{\rm d\,}{{\rm d}\tau} \mathcal{E}{x}(\tau)\|^2_Z\,{\rm d}\tau+ \frac{T}2\,\int_{t}^{T}\|f(\tau)\|_{{Z}}^2\,{\rm d}\tau\\ \leq&\,T\,\varphi(x(T))+ \frac32\int_{0}^{T}\|\tau^{1/2}\tfrac{\rm d\,}{{\rm d}\tau} \mathcal{E}{x}(\tau)\|^2_Z\,{\rm d}\tau+ \frac{T}2\,\int_{0}^{T}\|f(\tau)\|_{Z}^2\,{\rm d}\tau. \end{aligned}\] As the latter expression is independent of $t$ and finite by $f\in L^2({[0,T]};Z)$ and the already proved relation \eqref{eq:DAEBarb2}, we obtain \eqref{eq:DAEBarb1.5}. It remains to prove the statements under the additional assumption that the initial value fulfills $z_0\!\in\! \mathcal{E} D(\varphi)$ or, by Lemma~\ref{lem:modfun}, $z_0\in D(\varphi_{\mathcal{E}})$. Then we can apply \cite[Theorem~4.11]{Bar10} to obtain that the solution $z$ of \eqref{eq:resolODE} fulfills \begin{align} \bigl(t\mapsto \varphi_{\mathcal{E}}(z(t))\bigr)\in&\, W^{1,1}([0,T]),\label{eq:Barbsmooth1}\\ z\in&\, H^{1}({[0,T]};{\widetilde{Z})}.\label{eq:Barbsmooth2} \end{align} Then $\mathcal{E} x=z$ together with \eqref{eq:phiErel}, \eqref{eq:Barbsmooth1} and \eqref{eq:Barbsmooth2} implies \eqref{eq:DAEBarb3} and \eqref{eq:DAEBarb4}. The statement that the identity \eqref{eq:DAEBarb5} further holds for all $0\leq t_0\leq t_1\leq T$ can be concluded from \eqref{eq:Barbsmooth1} and the argumentation of the proof of \eqref{eq:DAEBarb5} in the case $t_0>0$. \end{proof} \begin{remark} In numerical analysis of finite-dimensional differential-algebraic equations, the notion of {\em (differentiation) index} plays a~fundamental role \textup{\cite{LamMT13,KunM06}}. That is, the number of differentiations needed until an ordinary differential equation is obtained. Though there exist several attempts to generalize the index to infinite-dimensional differential-algebraic equations \textup{\cite{RT05,Rei06,Rei07,TrosWaur18,TrosWaur19}}, these approaches have in common that they are applicable to a~rather limited class, which in general excludes equations of type \eqref{eq:symmdae} even when $\partial\varphi$ is linear and single-valued. On the other hand, system \eqref{eq:symmdae} has intrinsic properties which are - in the finite-dimensional case - only fulfilled by differential-algebraic equations with index at most one. Namely, it follows from \textup{\cite[Theorem 3.53]{LamMT13}} that a~finite-dimensional differential-algebraic equation of type $\tfrac{\rm d}{{\rm d}t}\cEl{x}=a(t,x)$ with $\cEl\in\R^{n\times n}$ has index at most one if and only if for any $x_0\in\R^n$, there exists a~solution which fulfills the initial condition $\cEl x(0)=\cEl x_0$. Note that, by~Theorem~\textup{\ref{lem:symmdae}}, system \eqref{eq:symmdae} has this property of unrestricted initializability. \end{remark} \newpage Let us now apply Theorem~\ref{lem:symmdae} to the following special differential-algebraic gradient system \begin{equation}\label{eq:opdaegrad} \begin{aligned} \cEl_{21}^*x_2(t)-\cEl_{11}^*\tfrac{d}{dt}\cEl_{11} x_1(t)\in & \,\partial\varphi(x_1(t)),\\ \cAl_{22}\cAl_{22}^* x_2(t)+\tfrac{d}{dt}\cEl_{21} x_1(t) = & \, u(t),\\ \cEl_{11} x_1(0+)= & \,z_{1,0},\\ \cEl_{21} x_1(0+)= & \,z_{2,0} \end{aligned} \end{equation} with a~given function $u:[0,T]\to U$ and given initial values $z_{1,0}\in Y$, $z_{2,0}\in U$. In the subsequent section, we show that, by involving the magnetic energy and its subgradient, the coupled MQS system \eqref{eq:MQS} fits into this abstract framework, and we apply our results for \eqref{eq:opdaegrad} to prove the existence of solutions together with some further regularity results. The following corollary establishes the existence result for system \eqref{eq:opdaegrad}. \begin{corollary}\label{thm:infDAEsys} Let $X$, $Y$ and $U$ be Hilbert spaces and let $\cAl_{22}\in\mathcal{L}({U})$ have a~bounded inverse, $\cEl_{11}\in\mathcal{L}(X,Y)$, \mbox{$\cEl_{21}\in\mathcal{L}(X,U)$}, and $\varphi:X\to\mathbb{R} \cup\{\infty\}$ be densely defined, convex, lower semicontinuous and $\mathcal{E}$-elliptic for $\mathcal{E}\in\mathcal{L}(X,Y\times U)$ given by \begin{equation} \mathcal{E} x=(\cEl_{11}x, \cAl_{22}^{-1}\cEl_{21}x).\label{eq:Edef} \end{equation} Further, let $T>0$, $u\in L^2({[0,T]};U)$ and $(z_{1,0},z_{2,0})$ belong to the closure of the range of $\mathcal{E}$ in $Y\times U$. Then the {abstract differential-algebraic gradient system} \eqref{eq:opdaegrad} has a~solution $(x_1,x_2):[0,T]\to X\times U$ in the following sense: \vspace*{-1mm} \begin{romanlist}[a)] \item\label{item:solsysa1} $\cEl_{11}x_1 \in C([0,T]; Y) \cap H^{1}_{\rm loc}((0,T];Y)$ and $\cEl_{11}x_1 (0+) = z_{1,0}$;\vspace*{1mm} \item\label{item:solsysa2} $\cEl_{21}x_1 \in C([0,T]; U) \cap H^{1}_{\rm loc}((0,T];U)$ and $\cEl_{21}x_1 (0+) = z_{2,0}$;\vspace*{1mm} \item\label{item:solsysb3} $x_2\in L^{2}_{\rm loc}((0,T],U)$;\vspace*{1mm} \item\label{item:solsysc} $x_1(t)\in D(\partial\varphi)$, and the differential inclusion as well as the differential equation in \eqref{eq:opdaegrad} are satisfied for almost all $t\in [0,T]$. \end{romanlist} This solution has the following properties: \begin{align} \bigl(t\mapsto \varphi(x_1(t))\bigr)\in&\, L^1({[0,T]})\cap W^{1,1}_{\rm loc}((0,T]),\label{eq:DAEBarbsys1}\\ \bigl(t\mapsto t\,\varphi(x_1(t))\bigr)\in &\, L^\infty([0,T]),\label{eq:DAEBarbsys1.5}\\ \bigl(t\mapsto t^{1/2}\tfrac{\rm d}{{\rm d}t}\cEl_{11}{x}_1(t)\bigr)\in&\, L^2([0,T];Y),\label{eq:DAEBarbsys2}\\ \bigl(t\mapsto t^{1/2}\tfrac{\rm d}{{\rm d}t}\cEl_{21}{x}_1(t)\bigr)\in&\, L^2([0,T];U),\label{eq:DAEBarbsys3}\\ \bigl(t\mapsto t^{1/2}{x}_2(t)\bigr)\in&\, L^2([0,T];U),\label{eq:DAEBarbsys3.5} \end{align} and for all $0<t_0\leq t_1\leq T$, \begin{align} \quad \varphi(x_1(t_1))-\varphi(x_1(t_0))&\,= \int_{t_0}^{t_1}\langle x_2(\tau),u(\tau)\rangle_U \,{\rm d}\tau -\int_{t_0}^{t_1}\|\cAl_{22}^*{x}_2(\tau)\|^2_{{U}}\,{\rm d}\tau\label{eq:DAEBarbsys4}\\ &\qquad-\int_{t_0}^{t_1}\|\tfrac{\rm d}{{\rm d}\tau} \cEl_{11}{x}_1(\tau)\|^2_Y\,{\rm d}\tau.\nonumber \end{align} If, further, $(z_{1,0},\cAl_{22}^{-1}z_{2,0})\in \mathcal{E} D(\varphi)$, then \begin{align} \bigl(t\mapsto \varphi(x_1(t))\bigr)\in&\, W^{1,1}([0,T]),\label{eq:DAEBarbsys5}\\ \cEl_{11}{x}_1\in&\, H^1([0,T];Y),\label{eq:DAEBarbsys6}\\ \cEl_{21}{x}_1\in&\, H^1([0,T];U),\label{eq:DAEBarbsys7}\\ {x}_2\in&\, L^2([0,T];U),\label{eq:DAEBarbsys8} \end{align} and the identity {\eqref{eq:DAEBarbsys4}} holds for all $0\leq t_0\leq t_1\leq T$. \end{corollary} \begin{proof} Let $\mathcal{E}\in\mathcal{L}(X,Y\times U)$ be as in \eqref{eq:Edef}. By the assumption, the functional $\varphi$ is $\mathcal{E}$-elliptic. Consider the function \begin{equation} f=\bigl(0,\cAl_{22}^{-1}u\bigr)\in L^2([0,T];Y\times U).\label{eq:fdaedef} \end{equation} Theorem~\ref{lem:symmdae} implies that the abstract differential-algebraic gradient system \eqref{eq:symmdae} with $\mathcal{E}$ as in \eqref{eq:Edef} and $z_0=(z_{1,0},\mathcal{A}_{22}^{-1}z_{2,0})$ has a~{solution} $x:{[0,T]}\to X$ in the sense that $\mathcal{E} x \in C([0,T] ; Y\times U) \cap H^{1}_{\rm loc}((0,T];Y\times U)$, $\mathcal{E} x(0+) = (z_{1,0}, \mathcal{A}_{22}^{-1}z_{2,0})$, and $x(t)\in D(\partial\varphi)$ and the differential inclusion in \eqref{eq:symmdae} hold for almost all $t\in [0,T]$. We now consider $(x_1,x_2):[0,T]\to X\times U$ with \begin{equation} \begin{aligned} x_1(t)=&\,x(t),\\ x_2(t)=&\,(\cAl_{22}^{-1})^*\cAl_{22}^{-1}\bigl(u(t)-\tfrac{\rm d}{{\rm d}t} \cEl_{21}x(t)\bigr). \end{aligned}\label{eq:subsdaeref} \end{equation} Then the above properties of the solution $x$ imply that $\cEl_{11}x_1:[0,T]\to Y$ and \mbox{$\cEl_{21}x_1:[0,T]\to U$} are both continuous with $\cEl_{11}x_1(0+)=z_{1,0}$, $\cEl_{21}x_1(0+)=z_{2,0}$, $\cEl_{11}x_1\in H^{1}_{\rm loc}((0,T],Y)$, $\cEl_{21}x_1\in H^{1}_{\rm loc}((0,T],U)$ and $x_2\in L^{2}_{\rm loc}((0,T],U)$. Furthermore, for almost all \mbox{$t\!\in\! [0,T]$}, $x_1(t)\in D(\partial\varphi)$ and \[\begin{aligned} \cEl_{21}^*x_2(t)\!-\!\cEl_{11}^*\tfrac{\rm d}{{\rm d}t}\cEl_{11} x_1(t) \overset{\eqref{eq:subsdaeref}}{=}&\cEl_{21}^*(\cAl_{22}^{-1})^*\cAl_{22}^{-1}\bigl(u(t)\!-\!\tfrac{\rm d}{{\rm d}t} \cEl_{21}x(t)\bigr)\!-\!\cEl_{11}^*\tfrac{\rm d}{{\rm d}t}\cEl_{11} x(t)\\ \overset{\eqref{eq:Edef}}{\underset{\&\eqref{eq:fdaedef}}{=}}&\,\mathcal{E}^*f(t)-\mathcal{E}^*\tfrac{\rm d}{{\rm d}t}\mathcal{E} x(t)\in\partial\varphi(x(t))=\partial\varphi(x_1(t)),\\[1mm] \cAl_{22}\cAl_{22}^*x_2(t)+\tfrac{\rm d}{{\rm d}t}\cEl_{21} x_1(t) \overset{\eqref{eq:subsdaeref}}{=}&\,u(t). \end{aligned}\] So far, we have proven that $(x_1,x_2)$ fulfills \ref{item:solsysa1})-\ref{item:solsysc}). Since, by Theorem~\ref{lem:symmdae}, $x$ satisfies \eqref{eq:DAEBarb1}--\eqref{eq:DAEBarb2}, we obtain from \eqref{eq:Edef} and \eqref{eq:subsdaeref} that \eqref{eq:DAEBarbsys1}--\eqref{eq:DAEBarbsys3.5} hold. Moreover, for all $0<t_0 \leq t_1\leq T$, {\allowdisplaybreaks\begin{align*} \varphi(x_1(t_1))-&\,\varphi({x_1}(t_0))\overset{\eqref{eq:subsdaeref}}{=}\varphi(x(t_1))-\varphi(x(t_0))\\ \overset{\eqref{eq:DAEBarb5}}{=} &\,\int_{t_0}^{t_1}\langle f(\tau),\tfrac{\rm d\,}{{\rm d}\tau}\mathcal{E} x(\tau)\rangle_{Y\times U}\,{\rm d}\tau-\int_{t_0}^{t_1}\|\tfrac{\rm d\,}{{\rm d}\tau} \mathcal{E}{x}(\tau)\|^2_{Y\times U}\,{\rm d}\tau\\ \overset{\eqref{eq:Edef}}{\underset{\&\eqref{eq:fdaedef}}{=}}& \int_{t_0}^{t_1}\bigl\langle\cAl_{22}^{-1}u(\tau),\cAl_{22}^{-1}\tfrac{\rm d\,}{{\rm d}\tau}\cEl_{21}x(\tau)\bigr\rangle_{U} \,{\rm d}\tau\\ &\quad-\int_{t_0}^{t_1}\|\tfrac{\rm d\,}{{\rm d}\tau} \cEl_{11}{x}(\tau)\|^2_{Y} +\|\cAl_{22}^{-1}\tfrac{\rm d\,}{{\rm d}\tau} \cEl_{21}{x}(\tau)\|^2_{U}\,{\rm d}\tau\\ \overset{\eqref{eq:subsdaeref}}{=}& \int_{t_0}^{t_1}\bigl\langle \cAl_{22}^{-1}u(\tau), \cAl_{22}^{-1}u(\tau)-\cAl_{22}^*x_2(\tau)\bigr\rangle_{{U}}\,{\rm d}\tau\\ &\quad-\int_{t_0}^{t_1}\bigl\|\tfrac{\rm d\,}{{\rm d}\tau} \cEl_{11}{x_1}(\tau)\bigr\|^2_{Y} +\bigl\|\cAl_{22}^{-1}u(\tau)-\cAl_{22}^*x_2(\tau)\bigr\|^2_{U}\,{\rm d}\tau\\ =\enskip & \!\! \int_{t_0}^{t_1}\bigl\|\cAl_{22}^{-1}u(\tau)\bigr\|^2_{{U}} -\bigl\langle \cAl_{22}^*x_2(\tau),\cAl_{22}^{-1}u(\tau)\bigr\rangle_{U}-\bigl\|\cAl_{22}^*x_2(\tau)\bigr\|^2_{U} \,{\rm d}\tau\\ &\quad -\!\int_{t_0}^{t_1} \bigl\|\tfrac{\rm d\,}{{\rm d}\tau} \cEl_{11}{x_1}(\tau)\|^2_{Y} +\|\cAl_{22}^{-1}u(\tau)\bigr\|^2_{U} -\!2\bigl\langle\cAl_{22}^*x_2(\tau),\cAl_{22}^{-1}u(\tau)\bigr\rangle_{U}\,{\rm d}\tau\\ =\;&\!\!\int_{t_0}^{t_1}\bigl\langle x_2(\tau), u(\tau)\bigr\rangle_{U}\,{\rm d}\tau -\int_{t_0}^{t_1}\bigl\|\cAl_{22}^*x_2(\tau)\bigr\|^2_{U}\, {\rm d}\tau -\int_{t_0}^{t_1}\bigl\|\tfrac{\rm d\,}{{\rm d}\tau} \cEl_{11}{x_1}(\tau)\bigr\|^2_{Y}\, {\rm d}\tau. \end{align*}} Thus, \eqref{eq:DAEBarbsys4} is fulfilled. If, further, $(z_{1,0},\mathcal{A}_{22}^{-1}z_{2,0})\in \mathcal{E} D(\varphi)$, then Theorem~\ref{lem:symmdae} yields \eqref{eq:DAEBarb3} and \eqref{eq:DAEBarb4}, whence \eqref{eq:subsdaeref} leads to \eqref{eq:DAEBarbsys5}--\eqref{eq:DAEBarbsys8}. The identity \eqref{eq:DAEBarbsys4} for all \mbox{$0\leq t_0\leq t_1\leq T$} can be concluded from the corresponding statement in Theorem~\ref{lem:symmdae} and the argumentation of the proof of \eqref{eq:DAEBarbsys4}.\vspace*{-1mm} \end{proof} \vspace*{-3mm} \begin{remark}\ \vspace*{-2mm} \begin{romanlist}[a)] \item The additional features of the solution of system \eqref{eq:opdaegrad} in the case where \linebreak $(z_{1,0},\mathcal{A}_{22}^{-1}z_{2,0})\in \mathcal{E} D(\varphi)$ are guaranteed if and only if $(z_{1,0},\mathcal{A}_{22}^{-1}z_{2,0}) = \mathcal{E} x_{{0}}$ for some \mbox{$x_{0}\in D(\varphi)+(\ker\cEl_{11}\cap \ker\cEl_{21})$}. \item Loosely speaking, Corollary~\ref{thm:infDAEsys} states that $\cEl_{11}x_1$ and $\cEl_{21}x_1$ are differentiable almost everywhere on the open interval $(0,\infty )$, and this for every choice of initial values in the closure of the range of $\mathcal{E}$. This regularization effect occurs in the theory of linear semigroups in the case of differentiable semigroups or, in particular, in the case of analytic semigroups. In other words, in the theory of linear abstract ordinary differential equations of type $\dot{x}(t) + \cAl x(t) = f(t)$, this phenomenon occurs, for example, if the operator $\cAl$ is densely defined and {\em sectorial} \cite[Chapter~II.4]{EngeNage00}. If system \eqref{eq:opdaegrad} is linear (equivalently, $\varphi$ is a~quadratic functional), then by a~careful inspection of the proofs of Lemma~\ref{lem:modfun} and Corollary~\ref{thm:infDAEsys}, it can be seen that the dynamics of \eqref{eq:opdaegrad} is governed by a~nonnegative and self-adjoint linear operator $\cAl$. Such an~operator is sectorial, whence the associated abstract ordinary differential equation has the aforementioned smoothing property. \item Note that we have not proven the measurability of the solution $(x_1,x_2)$ of \eqref{eq:opdaegrad}, but only measurability of the functions $t\mapsto \tfrac{d}{dt}\cEl_{11} x_1(t)$, \mbox{$t\mapsto \tfrac{d}{dt}\cEl_{21} x_1(t)$}, $t\mapsto x_2(t)$ and, if additionally $\partial\varphi$ is a~function, $t\mapsto\partial\varphi(x(t))$. To prove measurability of $x_2$, some additional assumptions have to be imposed. As compositions of continuous and measurable functions are measurable, such an assumption guaranteeing measurability of $x_1$ can be, for instance, that the mapping $(\cEl_{11} x_1,\cEl_{21} x_1,\partial\varphi(x_1))\mapsto x_1$ is well-defined in some sense and moreover continuous. Such an argument is used in the forthcoming section, where we study the solvability of the coupled MQS system \eqref{eq:MQS}. \end{romanlist} \end{remark} \section{Back to the Coupled MQS System: Existence and Regularity of Solutions} \label{sec:MQSsolv} Having developed the framework on abstract differential-algebraic gradient systems, we are now ready to prove the main result of this paper, namely the existence of solutions to the coupled MQS system \eqref{eq:MQS}. A~key ingredient is that, by Proposition~\ref{prop:enfun}~\ref{prop:enfun3}), the second summand in the equation~\eqref{eq:MQS1} of the coupled MQS is the subgradient of the magnetic energy $E$ as defined in \eqref{eq:varphiA}. \begin{theorem}[Existence, uniqueness and regularity of solutions]\label{thm:solMQS} Let \mbox{$\mathit{\Omega}\subset\mathbb{R}^3$} with subdomains $\mathit{\Omega}_C$ and $\mathit{\Omega}_I$ satisfy Assumption~\textup{\ref{ass:omega}}, and let $X(\mathit{\Omega},\mathit{\Omega}_C)$ and $X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$ be defined as in \eqref{eq:statespace} and \eqref{eq:statespace2}, respectively. Further, let the initial and winding functions be as in Assumption~\textup{\ref{ass:init}} and the material parameters as in Assumption~\textup{\ref{ass:material}}. Let $T>0$ be fixed and $\bm{v}\in L^2([0,T];\mathbb{R}^m)$. Then the coupled MQS system \eqref{eq:MQS} admits a unique weak solution $(\bm{A},\bm{i})$ on $[0,T]$ in the sense of Definition~\textup{\ref{def:sol}}. This solution has the following properties: \begin{align} \left(t\mapsto t^{1/2}\tfrac{\rm d}{{\rm d}t}(\sigma\bm{A}(t))\right)\in&\, L^2([0,T];X(\mathit{\Omega},\mathit{\Omega}_C)),\label{eq:MQSDAEsys2}\\ \left(t\mapsto t^{1/2}\tfrac{\rm d}{{\rm d}t}\int_\mathit{\Omega} \chi^\top\bm{A}(t) \, {\rm d}\xi\right)\in&\, L^2([0,T];\R^m),\label{eq:MQSDAEsys3}\\ \left(t\mapsto t^{1/2}\;(\nabla\times\bm{A}(t))\right)\in&\, L^\infty([0,T];X(\mathit{\Omega},\mathit{\Omega}_C)),\label{eq:MQSDAEsys2.5}\\ \left(t\mapsto t^{1/2}\, \bm{i}(t)\right)\in&\, L^2([0,T];\R^m),\label{eq:MQSDAEsys3.5}\\ \left(t\mapsto t^{1/2}\, \nu(\cdot,\|\nabla \times \bm{A}(t)\|_2) \nabla \times \bm{A}(t)\right)\in&\,L^2([0,T];H(\curl,\mathit{\Omega})).\label{eq:MQSDAEsys4} \end{align} For almost all $t\in[0,T]$, \begin{align} \tfrac{\partial}{\partial t}\left(\sigma\bm{A}(t)\right) + \nabla \times \left(\nu(\cdot,\|\nabla \times \bm{A}(t)\|_2) \nabla \times \bm{A}(t)\right) & = \, \chi\, \bm{i}(t) , \label{eq:classsolMQS} \\ \tfrac{\rm d }{{\rm d} t}\int_\mathit{\Omega} \chi^\top\bm{A}(t)\, {\rm d}\xi + R\,\bm{i}(t) & =\, \bm{v}(t) .\label{eq:classsolMQS2} \end{align} If, moreover, $\bm{A}_0\in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$, then the solution fulfills \begin{align} \nabla\times\bm{A}\in&\, L^\infty([0,T];X(\mathit{\Omega},\mathit{\Omega}_C)),\label{eq:MQSDAEsys0.5s}\\ \sigma\bm{A}\in&\, H^1([0,T];X(\mathit{\Omega},\mathit{\Omega}_C)),\label{eq:MQSDAEsys2s}\\ \int_\mathit{\Omega} \chi^\top\bm{A} \, {\rm d}\xi\in&\, H^1([0,T];\R^m),\label{eq:MQSDAEsys3s}\\ \bm{i}\in&\, L^2([0,T];\R^m).\label{eq:MQSDAEsys3.5s} \end{align} \end{theorem} \begin{proof} Step 1: First, we verify that, by taking the spaces $X=Y=X(\mathit{\Omega},\mathit{\Omega}_C)$ and $U=\R^m$, the operators $\cEl_{11} : X(\mathit{\Omega},\mathit{\Omega}_C) \to X(\mathit{\Omega},\mathit{\Omega}_C)$, \mbox{$\cEl_{12} : X(\mathit{\Omega},\mathit{\Omega}_C) \to \R^m$} and $\cAl_{22} : \R^m \to\R^m$ defined in \eqref{eq:EAiiop} and the functional $\varphi= E$ with the magnetic energy~$E$ as in \eqref{eq:varphiA} fulfill the assumptions of Corollary~\ref{thm:infDAEsys}. It follows from Proposition~\ref{prop:enfun} that $ E:X\to\mathbb{R}_{\ge0}\cup\{\infty\}$ is densely defined, convex, lower semicontinuous and $\mathcal{E}$-elliptic for $\mathcal{E}\in\mathcal{L}(X(\mathit{\Omega},\mathit{\Omega}_C),X(\mathit{\Omega},\mathit{\Omega}_C)\times \mathbb{R}^m)$ as in \eqref{eq:Edef}. Step~2: By using the representation of $\partial E$ from Proposition~\ref{prop:enfun}~\ref{prop:enfun3}), we conclude from Corollary~\ref{thm:infDAEsys} that there exists a function $(\bm{A},\bm{i}):[0,T]\to X(\mathit{\Omega},\mathit{\Omega}_C)\times\R^m$ with the following properties, by respectively referring to \ref{item:solsysa1})-\ref{item:solsysc}) in Corollary~\ref{thm:infDAEsys}:\vspace*{-1mm} \begin{romanlist}[a)] \item\label{item:solproof1} $\sigma \bm{A} \in C([0,T] ; X(\mathit{\Omega},\mathit{\Omega}_C)) \cap H^1_{\rm loc} ((0,T] ; X(\mathit{\Omega},\mathit{\Omega}_C))$ and $\sigma \bm{A}(0)=\sigma \bm{A}_0$; \item\label{item:solproof1a} $\int_\mathit{\Omega} \chi^\top\bm{A} \, {\rm d}\xi \in C ([0,T]; \R^m) \cap H^1_{\rm loc} ((0,T] ; \R^m )$ and $\int_\mathit{\Omega} \chi^\top\bm{A}(0) \, {\rm d}\xi=\int_\mathit{\Omega} \chi^\top\bm{A}_0 \, {\rm d}\xi$; \item\label{item:solproof5} $\bm{i}\in L^{2}_{\rm loc}((0,T];\R^m)$; \item\label{item:solproof6} $\nu(\cdot,\|\nabla \times \bm{A}(t)\|_2) \nabla \times \bm{A}(t)\in H(\curl,\mathit{\Omega})$, and the equations \eqref{eq:classsolMQS} and \eqref{eq:classsolMQS2} hold for almost all $t\in[0,T]$. \end{romanlist} \vspace*{-2mm} Step~3: We show that $(\bm{A},\bm{i})$ is a weak solution of the coupled MQS system \eqref{eq:MQS} in the sense of Definition~\ref{def:sol}. By using the results from Step~2, it remains to prove that\vspace*{-2mm} \begin{romanlist}[(i)] \item $\bm{A}\in L^2([0,T],X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C))$, and \item for all $\bm{F}\in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$, equations \eqref{eq:weak} are fulfilled for almost all $t\in[0,T]$. \end{romanlist} Statement (ii) is a~simple consequence of \ref{item:solproof6}), that is, equations \eqref{eq:classsolMQS} and \eqref{eq:classsolMQS2}, and the integration by parts formula \eqref{eq:curladj}. In order to prove (i), first note that by the properties \ref{item:solproof1}) and \ref{item:solproof5}) above, \[ \tfrac{{\rm d}}{{\rm d} t}\left(\sigma\bm{A}\right) , \, \chi\, \bm{i} \in L^2_{\rm loc} ((0,T] ; X(\mathit{\Omega},\mathit{\Omega}_C) ) . \] Hence, by property \ref{item:solproof6}) (more precisely, by equation \eqref{eq:classsolMQS}), \begin{equation} \label{eq:new1} \partial E(\bm{A} ) = \nabla \times \left(\nu(\cdot,\|\nabla \times \bm{A}\|_2) \nabla \times \bm{A}\right) \in L^2_{\rm loc} ((0,T] ; X(\mathit{\Omega},\mathit{\Omega}_C) ) . \end{equation} Second, let the operator $\cAl_{11}$ be defined as in \eqref{eq:A11}. Note that $\cAl_{11} = \partial E$ on $D(\partial E)$. By the estimates \eqref{eq:curlest} and \eqref{eq:A11diss} from Lemmas \ref{lem:denscoerc} and \ref{lem:A11mon}, respectively, for all \mbox{$\bm{A}_1$, $\bm{A}_2\in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$}, \begin{align*} \| \bm{A}_1 - \bm{A}_2&\|_{H(\curl , \mathit{\Omega})}^2 = \,\| \bm{A}_1 - \bm{A}_2\|_{L^2 (\mathit{\Omega} ;\R^3)}^2 + \| \nabla\times (\bm{A}_1 - \bm{A}_2)\|_{L^2 (\mathit{\Omega} ;\R^3)}^2 \\ &\leq\, \tfrac{L_C}{\sigma_C^2} \, \| \sigma (\bm{A}_1 - \bm{A}_2)\|_{L^2 (\mathit{\Omega};\R^3)}^2 + (L_C +1) \| \nabla\times (\bm{A}_1 - \bm{A}_2\|_{L^2 (\mathit{\Omega};\R^3)}^2 \\ &\leq \,\tfrac{L_C}{\sigma_C^2} \, \| \sigma (\bm{A}_1 -\bm{A}_2)\|_{L^2 (\mathit{\Omega};\R^3)}^2 + \tfrac{L_C+1}{m_\nu} \, \langle \bm{A}_1 - \bm{A}_2 , \cAl_{11}(\bm{A}_1) - \cAl_{11}(\bm{A}_2)\rangle \\ &\leq \,\tfrac{L_C}{\sigma_C^2} \, \| \sigma (\bm{A}_1 - \bm{A}_2)\|_{L^2 (\mathit{\Omega};\R^3)}^2 \\ &\,\phantom{\leq + } + \tfrac{L_C+1}{m_\nu} \, \| \bm{A}_1 - \bm{A}_2 \|_{H(\curl , \mathit{\Omega})} \| \cAl_{11}(\bm{A}_1) - \cAl_{11}(\bm{A}_2) \|_{X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)'}. \end{align*} This inequality combined with Young's inequality implies that there is a constant $C\geq 0$ such that, for all $\bm{A}_1$, $\bm{A}_2\in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$, \begin{equation} \| \bm{A}_1 - \bm{A}_2\|_{H(\curl , \mathit{\Omega})}^2 \leq C \bigl(\| \sigma(\bm{A}_1 -\bm{A}_2)\|_{L^2 (\mathit{\Omega};\R^3)}^2 + \| \cAl_{11}(\bm{A}_1) -\cAl_{11}(\bm{A}_2) \|_{X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)'}^2 \bigr). \end{equation} In other words, the mapping $$ \begin{array}{rcl} X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C) & \to &X(\mathit{\Omega},\mathit{\Omega}_C) \times X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)' , \\ \bm{A} & \mapsto& (\sigma \bm{A} , \cAl_{11}(\bm{A}) ) \end{array} $$ has a Lipschitz continuous inverse defined on the range of the above mapping. Thus, the continuity of $\sigma\bm{A}$ with values in $X(\mathit{\Omega},\mathit{\Omega}_C)$ and \eqref{eq:new1} imply \[ \bm{A} \in L^2_{\rm loc} ((0,T] ; X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)) . \] However, since the mapping $t\mapsto E(\bm{A}(t))$ is integrable on $[0,T]$ by Corollary~\ref{thm:infDAEsys}, and by the estimate \eqref{eq:estE}, we have $\nabla \times \bm{A}\in L^2([0,T];X(\mathit{\Omega},\mathit{\Omega}_C))$. Thus, the continuity of~$\sigma\bm{A}$ with values in $X(\mathit{\Omega},\mathit{\Omega}_C)$ and Lemma \ref{lem:denscoerc} actually imply the stronger statement \[ \bm{A} \in L^2 ([0,T] ; X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)) , \] which is property (i) above. Step~4: Next, we prove \eqref{eq:MQSDAEsys2}-\eqref{eq:MQSDAEsys4}. By using Step~1, \eqref{eq:MQSDAEsys2}, \eqref{eq:MQSDAEsys3} and \eqref{eq:MQSDAEsys3.5} are, respectively, consequences of \eqref{eq:DAEBarbsys2}, \eqref{eq:DAEBarbsys3} and \eqref{eq:DAEBarbsys3.5} in Corollary~\ref{thm:infDAEsys}. Further, by invoking \eqref{eq:estE} again, we see that \eqref{eq:DAEBarbsys1.5} implies \eqref{eq:MQSDAEsys2.5}. The remaining relation \eqref{eq:MQSDAEsys4} can be verified as follows. Using \eqref{eq:Mnugreater}, we obtain that $\nu(\cdot,\|\nabla \times \bm{A}\|_2)$ is essentially bounded. This together with \eqref{eq:MQSDAEsys2.5} yields that $\left(t\mapsto t^{1/2}\, \nu(\cdot,\|\nabla \times \bm{A}(t)\|_2) \nabla \times \bm{A}(t)\right)\in L^2([0,T];L^2(\mathit{\Omega};\mathbb{R}^3))$. Since, moreover, by \eqref{eq:classsolMQS}, we have \[ \nabla \times \left(\nu(\cdot,\|\nabla \times \bm{A}(t)\|_2) \nabla \times \bm{A}(t)\right) = \, -\tfrac{\rm d}{{\rm d} t}\left(\sigma\bm{A}(t)\right)+\chi \bm{i}(t), \] the relations \eqref{eq:MQSDAEsys2} and \eqref{eq:MQSDAEsys3.5} lead to \[\left(t\mapsto t^{1/2}\, \nabla \times \nu(\cdot,\|\nabla \times \bm{A}(t)\|_2) \nabla \times \bm{A}(t)\right)\in L^2([0,T];L^2(\mathit{\Omega};\mathbb{R}^3)),\] whence \eqref{eq:MQSDAEsys4} holds. Step~5: Finally, we show that \eqref{eq:MQSDAEsys0.5s}-\eqref{eq:MQSDAEsys3.5s} hold, if the initial value additionally fulfills $\bm{A}_0\in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C) = D(E)$. The statements \eqref{eq:MQSDAEsys2s}-\eqref{eq:MQSDAEsys3.5s} can be proven analogously to the results in Step~4 by invoking \eqref{eq:DAEBarbsys6}-\eqref{eq:DAEBarbsys8} in Corollary~\ref{thm:infDAEsys}. To prove \eqref{eq:MQSDAEsys0.5s}, we first make use of Corollary~\ref{thm:infDAEsys} which implies, via \eqref{eq:DAEBarbsys5}, that $\left(t\mapsto E(\bm{A}(t))\right)\in W^{1,1}([0,T])$ and, hence, {$\left(t\mapsto E(\bm{A}(t))\right)\in L^{\infty}([0,T])$.} This, together with \eqref{eq:estE}, yields that $\|\nabla\times\bm{A}(t)\|_{L^2(\mathit{\Omega};\mathbb{R}^3)}$ is essentially bounded. \end{proof} \begin{remark} As mentioned in the introduction, linear coupled MQS systems with~$\nu$ being independent of $\bm{A}$ have been studied in \textup{\cite{NicT14}}, where it has additionally been assumed that the non-conducting subdomain $\Omega_I$ is connected. In the language of Assumption~\ref{ass:omega}, this means that $q=0$ and $\mathit{\Omega}_I=\mathit{\Omega}_{I,{\rm ext}}$. Further, it has been seeked for solutions in which the magnetic vector potential evolves in the space \[ Y(\mathit{\Omega})=\bigl\{\bm{F}\in H_0(\curl,\mathit{\Omega})\cap L_2(\divg\!=\!0,\mathit{\Omega}_C\cup\mathit{\Omega}_I;\R^3) \;:\; \langle \left.\bm{F}\right|_{\Omega_I}\cdot\bm{n},1\rangle_{L^2(\Gamma_{{\rm ext}})}=0\bigr\}. \] Hereby, \mbox{$\left.\bm{F}\right|_{\Omega_I}\!\cdot\bm{n}$} stands for the normal boundary trace of the restriction of $\bm{F}$ to the non-conducting domain (this normal boundary trace is well-defined by \textup{\cite[Theo\-rem~I.2.5]{GiraRavi86}} and the fact that $\bm{F}$ has a~weak divergence $\nabla\cdot \bm{F}\in L^2(\Omega)$). Note that the space $Y(\mathit{\Omega})$ coincides with our space $X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$ under the assumption in \textup{\cite{NicT14}} that the non-conducting subdomain is connected. To see that $Y(\mathit{\Omega})\subset X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$, let $\psi\in H^1_0(\Omega)$ with $\left.\psi\right|_{\Gamma_{{\rm ext}}}=c_{{\rm ext}}$ for some $c_{{\rm ext}}\in\R$. Then, by using integration by parts with the weak divergence, we obtain for all $\bm{F}\in Y(\mathit{\Omega})$ that \[\begin{aligned} \langle\nabla\psi,\bm{F}\rangle_{L^2(\Omega;\R^3)} &=-\langle\psi,\underbrace{\nabla\cdot\bm{F}}_{=0}\rangle_{L^2(\Omega)}\!+\! \langle\left.\bm{F}\right|_{\Omega_I}\!\cdot\bm{n},\underbrace{\!\psi\!}_{=c_{{\rm ext}}}\rangle_{L^2(\Gamma_{{\rm ext}})}\!+\! \langle\left.\bm{F}\right|_{\Omega_I}\!\cdot\bm{n},\underbrace{\!\psi\!}_{=0}\rangle_{L^2(\partial\Omega)}\\ &=c_{{\rm ext}}\langle\left.\bm{F}\right|_{\Omega_I}\cdot\bm{n},1\rangle_{L^2(\Gamma_{{\rm ext}})}=0. \end{aligned}\] Hence, $\bm{F}\in X(\mathit{\Omega},\mathit{\Omega}_C)$. Further, $Y(\mathit{\Omega})\subset H_0(\curl,\mathit{\Omega})$ implies \mbox{$\bm{F}\in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$}. On the other hand, if $\bm{F}\in X_0(\curl,\mathit{\Omega},\mathit{\Omega}_C)$, then $\bm{F}\in L^2(\divg\!=\!0, \mathit{\Omega}_C \cup \mathit{\Omega}_I ;\R^3)$ by \eqref{eq:divfree}. To prove that $\bm{F}\in Y(\mathit{\Omega})$, it remains to show that the integral of the normal trace of $\left.\bm{F}\right|_{\Omega_I}$ over $\Gamma_{{\rm ext}}$ vanishes. To see this, let $\psi\in H^1_0(\Omega)$ be such that $\left.\psi\right|_{\Omega_C}\equiv1$ (which exists by $\overline{\mathit{\Omega}}_{C}\subseteq {\mathit{\Omega}}$). Then $\nabla\psi\in G(\Omega,\Omega_C)$, and, by further invoking that $\nabla\psi$ vanishes on $\Omega_C$, we obtain \[\begin{aligned} 0=&\,\langle\nabla\psi,\bm{F}\rangle_{L^2(\Omega;\R^3)}=\,\langle\nabla\psi,\bm{F}\rangle_{L^2(\Omega_I;\R^3)}\\ =&\,-\langle\psi,\underbrace{\nabla\cdot\bm{F}}_{=0}\rangle_{L^2(\Omega_I)}+ \langle\left.\bm{F}\right|_{\Omega_I}\cdot\bm{n},\underbrace{\psi}_{=1}\rangle_{L^2(\Gamma_{{\rm ext}})}+ \langle\left.\bm{F}\right|_{\Omega_I}\cdot\bm{n},\underbrace{\psi}_{=0}\rangle_{L^2(\partial\Omega)}\\ =&\,\langle\left.\bm{F}\right|_{\Omega_I}\cdot\bm{n},1\rangle_{L^2(\Gamma_{{\rm ext}})}. \end{aligned}\] Existence of a solution $\bm{A}\in L^2([0,T];Y(\mathit{\Omega}))$ with $\sigma\bm{A}\in W^{1,1}([0,T];Y(\mathit{\Omega})')$ is shown in \textup{\cite[Corollary~3.13]{NicT14}}. For the case where the voltage and initial value additionally fulfill $\bm{v}\in H^1([0,T];\R^m)$ and $\bm{A}_0\in H_0(\curl,\Omega)$ with $\nu \, \nabla\times \bm{A}_0\in H(\curl,\mathit{\Omega})$, it is proven in \textup{\cite[Theorem~3.11]{NicT14}} that $\bm{A}\in H^1([0,T];H_0(\curl,\mathit{\Omega}))$. \end{remark} \section{Conclusion} We have considered a~quasilinear MQS approximation of Maxwell's equations, which is furthermore coupled with an~integral equation. By employing the magnetic energy, this system can be reformulated as an abstract differential-algebraic equation involving a~subgradient. For this class of equations, we have developed novel well-posedness and regularity results which we have then applied to the coupled MQS system. \begin{thebibliography}{10} \newcommand{\enquote}[1]{#1} \bibitem{Alt16} H.~W. Alt, {\it Linear Functional Analysis}, Universitext (Springer-Verlag, London, 2016). \bibitem{ArnH12} L.~Arnold and B.~Harrach, \enquote{A uni{fi}ed variational formulation for the parabolic-elliptic eddy current equations}, {\it SIAM J. Appl. Math.} \textbf{72} (2012) 558--576. \bibitem{BLS05} F.~Bachinger, U.~Langer and J.~Sch\"oberl, \enquote{Numerical analysis of nonlinear multiharmonic eddy current problems}, {\it Numer. Math.} \textbf{100} (2005) 593--616. \bibitem{Bar10} V.~Barbu, {\it Nonlinear Differential Equations of Monotone Types in Banach Spaces}, Springer Monographs in Mathematics (Springer-Verlag, New York, 2010). \bibitem{Bre73} H.~Brezis, {\it Op\'erateurs maximaux monotones et semi-groupes de contractions dans les espaces de {H}ilbert}, volume~5 of {\it North Holland Mathematics Studies} (North-Holland, Amsterdam, London, 1973). \bibitem{CHK16} R.~Chill, D.~Hauer and J.~Kennedy, \enquote{Nonlinear semigroups generated by $j$-elliptic functionals}, {\it J. Math. Pures Appl.} \textbf{105} (2016) 415--450. \bibitem{BdGCS18} I.~Cortes~Garcia, S.~Sch\"ops, H.~De~Gersem and S.~Baumanns, \enquote{Systems of differential algebraic equations in computational electromagnetics}, in {\it Applications of Differential-Algebraic Equations: Examples and Benchmarks}, eds. S.~Campbell, A.~Ilchmann, V.~Mehrmann and T.~Reis (Springer, Cham, 2018), Differential-Algebraic Equations Forum, pp. 123--169. \bibitem{EngeNage00} K.-J. Engel and R.~Nagel, {\it One-Parameter Semigroups for Linear Evolution Equations}, volume 194 of {\it Graduate Texts in Mathematics} (Springer-Verlag, New York, 2000). \bibitem{GiraRavi86} V.~Girault and P.-A. Raviart, {\it Finite Element Methods for the Navier–Stokes Equations - Theory and Algorithms}, volume~5 of {\it Springer Series in Computational Mathematics} (Springer-Verlag, Berlin Heidelberg, 1986). \bibitem{HauM89} H.~Haus and J.~Melcher, {\it Electromagnetic Fields and Energy} (Prentice Hall, Englewood Cliffs, NJ, 1989). \bibitem{IdaBastos97} N.~Ida and J.~Bastos, {\it Electromagnetics and Calculation of Fields} (Springer, New York, 1997). \bibitem{Ja99} J.~Jackson, {\it Classical Electrodynamics} (Wiley, New York, 1999). \bibitem{KunM06} P.~Kunkel and V.~Mehrmann, {\it Differential-Algebraic Equations. {A}nalysis and Numerical Solution} (EMS Publishing House, Z{\"u}rich, Switzerland, 2006). \bibitem{LamMT13} R.~Lamour, R.~M\"arz and C.~Tischendorf, {\it Differential-Algebraic Equations: A Projector Based Analysis}, Differential-Algebraic Equations Forum (Springer-Verlag, Berlin, Heidelberg, 2013). \bibitem{NicT14} S.~Nicaise and F.~Tr\"oltzsch, \enquote{A coupled {M}axwell integrodifferential model for magnetization processes}, {\it Math. Nachr.} \textbf{287} (2014) 432--452. \bibitem{NicT17} S.~Nicaise and F.~Tr\"oltzsch, \enquote{Optimal control of some quasilinear {M}axwell equations of parabolic type}, {\it Discrete Contin. Dyn. Syst.} \textbf{10} (2017) 1375--1391. \bibitem{PauPic17} D.~Pauly and R.~Picard, \enquote{A note on the justification of the eddy current model in electrodynamics}, {\it Math. Methods Appl. Sci.} \textbf{40} (2017) 7104--7109. \bibitem{PauPTW21} D.~Pauly, R.~Picard, S.~Trostorff and M.~Waurick, \enquote{On a class of degenerate abstract parabolic problems and applications to some eddy current models}, {\it J. Funct. Anal.} \textbf{280} (2021) 108847. \bibitem{Rei06} T.~Reis, \enquote{Systems theoretic aspects of {PDAE}s and applications to electrical circuits}, {Ph.D.} thesis, Technische Universit\"at Kaiserslautern, 2006. \bibitem{Rei07} T.~Reis, \enquote{Consistent initialization and perturbation analysis for abstract differential-algebraic equations}, {\it Math. Control Signals Systems} \textbf{19} (2007) 255--281. \bibitem{RT05} T.~Reis and C.~Tischendorf, \enquote{Frequency domain methods and decoupling of linear infinite dimensional differential algebraic systems}, {\it J. Evol. Equ.} \textbf{5} (2005) 357--385. \bibitem{Ros19} A.~Ros\'en, {\it Geometric Multivector Analysis - From Grassmann to Dirac}, Birkh\"auser Advanced Texts, Basler Lehrb\"ucher (Birkh\"auser, Basel, 2019). \bibitem{SchoepsDGW13} S.~Sch\"ops, H.~De~Gersem and T.~Weiland, \enquote{Winding functions in transient magnetoquasistatic field-circuit coupled simulations}, {\it COMPEL} \textbf{32} (2013) 2063--2083. \bibitem{Show77} R.~Showalter, {\it Hilbert Space Methods in Partial Differential Equations}, volume~1 of {\it Monographs and Studies in Mathematics} (Pitman, London, 1977). \bibitem{TrosWaur18} S.~Trostorff and M.~Waurick, \enquote{On higher index differential-algebraic equations in infinite dimensions}, in {\it The Diversity and Beauty of Applied Operator Theory}, eds. A.~B\"{o}ttcher, D.~Potts, P.~Stollmann and D.~Wenzel (Birh\"{a}user, Basel, Switzerland, 2018), Operator Theory Advances Appl., pp. 477--486. \bibitem{TrosWaur19} S.~Trostorff and M.~Waurick, \enquote{On differential-algebraic equations in infinite dimensions}, {\it J. Differential Equations} \textbf{266} (2019) 526--561. \bibitem{Yagi10} A.~Yagi, {\it Abstract Parabolic Evolution Equations and their Applications}, Springer Monographs in Mathematics (Springer-Verlag, Berlin Heidelberg, 2010). \bibitem{You13} I.~Yousept, \enquote{Optimal control of quasilinear {$H(\mbox{curl})$}-elliptic partial differential equations in magnetostatic field problems}, {\it SIAM J. Control Optim.} \textbf{51} (2013) 3624--3651. \bibitem{Zeid86} E.~Zeidler, {\it Nonlinear Functional Analysis and its Applications I: Fixed Point Theorems} (Springer-Verlag, New York, 1986). \end{thebibliography} \end{document}
2205.15208v1
http://arxiv.org/abs/2205.15208v1
Defects and excitations in the Kitaev model
\documentclass[11pt,a4paper,twoside]{article} \usepackage[utf8]{inputenc} \usepackage{lmodern} \usepackage{microtype} \usepackage[pdfborder={0 0 0}]{hyperref} \usepackage{amsmath,amsthm} \usepackage[T1]{fontenc} \usepackage{mathtools} \usepackage[numbers]{natbib} \usepackage{graphicx} \usepackage{amssymb} \usepackage{color} \usepackage{hyperref} \usepackage{svg} \usepackage{float} \usepackage{caption} \usepackage{pstricks} \usepackage{pdfpages} \usepackage[all]{xy} \usepackage{fancyhdr} \usepackage{enumerate} \usepackage{tikz,pgfplots} \usetikzlibrary{cd} \usepackage{xr,xr-hyper} \usepackage{paralist} \usepackage{marginnote} \usepackage[text={16.5cm, 24.2cm}, centering]{geometry} \setlength{\parskip}{5pt} \setlength{\parindent}{0pt} \synctex=1 \newtheorem{lemma}{Lemma}[section] \newtheorem{corollary}[lemma]{Corollary} \newtheorem{theorem}[lemma]{Theorem} \newtheorem{proposition}[lemma]{Proposition} \newtheorem{example}[lemma]{Example} \newtheorem{remark}[lemma]{Remark} \newtheorem{claim}[lemma]{Claim} \theoremstyle{definition} \newtheorem{definition}[lemma]{Definition} \numberwithin{equation}{section} \newcommand{\RI}{R_{(1)}} \newcommand{\RII}{R_{(2)}} \newcommand{\ROP}{R_{21}} \renewcommand{\epsilon}{\varepsilon} \renewcommand{\phi}{\varphi} \renewcommand{\theta}{\vartheta} \newcommand{\TODO}{\text{\textcolor{red}{TODO}}} \newcommand{\todo} [1] {\marginnote{\bf to do}{\bf {#1}}} \renewcommand{\red}{\textcolor{red}} \renewcommand{\blue}{\textcolor{blue}} \newcommand{\odots}{\otimes \cdots \otimes} \newcommand{\actr}{\vartriangleleft} \newcommand{\acr}{\vartriangleleft} \newcommand{\acl}{\vartriangleright} \newcommand{\actl}{\vartriangleright} \newcommand{\dotcup}{\ensuremath{\mathaccent\cdot\cup}} \DeclareMathOperator*{\tensor}{\otimes} \DeclareMathOperator{\Hol}{Hol} \newcommand{\F}{\mathbb{F}} \newcommand{\Z}{\mathbb{Z}} \newcommand{\C}{\mathbb{C}} \newcommand{\oo}{\otimes} \newcommand{\kk}{K^{*}} \newcommand{\D}{\Delta} \newcommand{\ag}{\alpha} \newcommand{\bb}{B^{*}} \newcommand{\la}{\langle} \newcommand{\ra}{\rangle} \newcommand{\coev}{\mathrm{coev}} \newcommand{\soo}{\!\!\otimes\!\!} \DeclareMathOperator*{\mto}{\longmapsto} \newcommand{\GG}{\mathfrak{G}} \newcommand{\id}{\mathrm{id}} \newcommand{\End}{\mathrm{End}} \newcommand{\Transport}[1]{\mathcal{T}_{#1}} \newcommand{\VertexOp}[2]{A_{#1}^{#2}} \newcommand{\FaceOp}[2]{B_{#1}^{#2}} \newcommand{\HSpace}{\mathcal{N}} \newcommand{\Field}{\mathbb{F}} \newcommand{\mac}{\mathcal{C}} \newcommand{\tComma}{\quad\scalebox{1.5}{,}\quad\quad} \newcommand{\tDot}{\quad\scalebox{1.5}{.}} \newcommand{\vect}[0]{\mathrm{Vect}} \newcommand{\st}[0]{{\bf s}} \newcommand{\ta}[0]{{\bf t}} \newcommand {\coinva}[0]{{\mathrm{Coinv}}} \newcommand {\inva}[0]{\mathrm{Inv}} \newcommand{\inv}[0]{{-1}} \newcommand{\low}[2]{{#1}_{({#2})}} \allowdisplaybreaks \begin{document} \def\mytitle{Defects and excitations in the Kitaev model} \author{Thomas Vo\ss} \begin{center} {\huge\mytitle} \vspace{2em} {\large Thomas Vo\ss\footnote{{\tt [email protected]}} } Fachbereich Mathematik \\ Universit\"at Hamburg \\ Bundesstra\ss e 55, Hamburg, Germany\\[+2ex] {May 1, 2022} \begin{abstract} We construct a Kitaev model with defects using twists or 2-cocycles of semi-simple, finite-dimensional Hopf algebras as defect data. This data is derived by applying Tannaka duality to Turaev-Viro topological quantum field theories with defects. From this we also derive additional conditions for moving, fusing and braiding excitations in the Kitaev model with defects. We give a description of excitations in the Kitaev model and show that they satisfy conditions we derive from Turaev-Viro topological quantum field theories with defects. Assigning trivial defect data one obtains transparent defects and we show that they can be removed, yielding the Kitaev model without defects. \end{abstract} \end{center} \tableofcontents \section{Introduction} Kitaev's \emph{lattice model} or \emph{quantum double model} was introduced by Kitaev \cite{Ki} as a realistic physics model for a topological quantum computer. The models provide codes that are protected against errors by topological effects. For this reason, they are investigated extensively in topological quantum computing and in condensed matter physics. In \cite{BK12} a close relation between the Kitaev model and topological quantum field theories (TQFTs) of Turaev-Viro type \cite{TV, BW} was shown - both assign the same vector spaces to oriented surfaces. These TQFTs are of mathematical interest as they produce invariants of two- and three-dimensional manifolds. It was discovered in \cite{KKR} that Turaev-Viro invariants \cite{TV, BW} can be used to define error correcting quantum codes, providing an additional link to Kitaev models. The goal of this article is to introduce \emph{topological defects} in Kitaev models. These defects are of interest in topological quantum computing and condensed matter physics, because they are expected to occur in practical realizations of these and related models, and this requires a theoretical understanding of their effects \cite{BD, KK}. From the mathematical perspective, Kitaev models with defects are of interest as they would describe the two-dimensional parts of Turaev-Viro-TQFTs with defects. A TQFT is by definition a symmetric monoidal functor $\mathcal Z: \text{Cob}_n\to \mathcal C$ from a cobordism category $\text{Cob}_{n}$ into a symmetric monoidal category $\mathcal C$, e.g. $\mathcal{C}=\mathrm{Vect}$. In a defect TQFT the cobordism category $\text{Cob}_{n}$ is replaced by a modified cobordism category in which the $(n-1)$-manifolds are equipped with distinguished submanifolds that are assigned higher categorical data. These submanifolds can intersect with the incoming and outgoing boundaries of a cobordism and a Kitaev model with defects thus can contribute to the understanding of defect TQFTs. \\\\ \textbf{Kitaev lattice models and TQFTs}\\ \\ Kitaev's original lattice model from~\cite{Ki} was based on the algebraic data of a group algebra of a finite group. This model was then then generalized to finite-dimensional semisimple Hopf-$*$ algebras $H$ in~\cite{BMCA}, and the relation between the model for $H$ and its dual $H^{*}$ was clarified in~\cite{BCKA}. Further generalizations of Kitaev models were developed in~\cite{Ch} to a model based on unitary quantum groupoids and more recently in~\cite{KMM}, to a model based on crossed modules of semisimple finite-dimensional Hopf algebras. \\ The ingredients of the Kitaev model from \cite{BMCA} are a finite-dimensional semisimple Hopf-$*$ algebra $H$ and a \emph{ribbon graph} $\Gamma$ or, equivalently, a \emph{embedded graph} on a surface $\Sigma$. From this data the model constructs an \emph{extended Hilbert space} $\HSpace$ by assigning a copy of the Hopf algebra $H$ to every edge $e$ of $\Gamma$.\\ \\ Every pair of a face and an adjacent vertex of $\Gamma$, usually called \emph{site}, defines an action of $H$ on $\HSpace$ in terms of \emph{vertex operators} and an action of $H^{*}$ on $\HSpace$ in terms of \emph{face operators}. Together, the face and vertex operators define an action of the Drinfel'd double or quantum double $D(H)$ of $H$ on $\HSpace$. This action is local, i.e. it only affects the copies of $H$ assigned to edges in that face or incident at that vertex, and the actions for sites with distinct pairs of faces and vertices commute. For this reason, the model is sometimes referred to as \emph{quantum double model}. \\ \\ The invariants of the $D(H)$-actions associated with all sites in $\Gamma$ form the \emph{protected space} $\mathcal{L}$. It was shown in \cite{Ki, BMCA} that it is a topological invariant of the surface: it depends only on the surface, but not on the choice of the ribbon graph in the definition of the model. \emph{Ribbon operators} are operators on the Hilbert space $\HSpace$ associated to ribbons in the graph. When applied to the protected space, they generate a pair of non-trivial $D(H)$-modules at their endpoints, the \emph{excitations}. Quantum computation can be performed by moving and braiding these excitations~\cite{Ki}.\\ \\ The Turaev-Viro TQFT~\cite{TV,BW} based on the fusion category $H\text{-Mod}$ also assigns a topological invariant $Z_{TV}(\Sigma)$ to the surface $\Sigma$. It was shown in~\cite{BK12} that this topological invariant coincides with the protected space $\mathcal{L}$ of the Kitaev model. Excitations in the Kitaev model were also investigated in \cite{BK12} and interpreted as a special type of boundary structure in the model. These insights were also used in~\cite{BA,Kr} to relate Kitaev models to another class of models from topological quantum computing, the Levin-Wen models \cite{LW}. \\ \\ \textbf{Kitaev models and TQFTs with defects}\\ \\ As explained above, defects and also boundaries are of interest both from the viewpoint of Kitaev models and from the viewpoint of TQFTs. In a Kitaev model based on group algebras of finite groups defects and boundaries have first been described in~\cite{BD}. In~\cite{KK} Kitaev and Kong determined the categorical data for defects in a Levin-Wen model. Bulk regions, i.e. the regions separated by defects and bordered by boundaries, are labeled with unitary tensor categories $\mathcal{C},\mathcal{D}$ and defects are labeled with a $\mathcal{C}\mathrm{-}\mathcal{D}$-bimodule category $\mathcal{N}$. \\ \\ A Kitaev model with general defects was constructed very recently by Koppen~\cite{K} using the Hopf algebraic counterpart of this data. Here bulk regions are labeled with finite-dimensional semisimple Hopf algebras $H_{1},H_{2}$ and defects are labeled with finite-dimensional, semisimple $(H_{1},H_{2})$-bicomodule algebras. \\ \\ However, in this article we are interested in a more specific type of defects in a Kitaev model that behave well with respect to excitations and can be viewed as the two-dimensional part of a Turaev-Viro TQFT with \emph{topological defects and boundaries}. Three-dimensional TQFTs of Turaev-Viro and Reshetikhin-Turaev type with \emph{topological defects and boundaries} were investigated by Fuchs, Schweigert and Valentino in \cite{FSV}. They use physics considerations on the braiding, fusion and transport of excitations to determine the categorical data for these defects. More specifically, the categorical data for Turaev-Viro TQFTs with topological boundaries and defects from~\cite{FSV} is \begin{compactenum}[$\quad\bullet$] \item the center $\mathcal{Z}(\mathcal{A})$ of a fusion category $\mathcal{A}$ for every bulk region, \item a fusion category $\mathcal{W}_{a}$ (resp. $\mathcal{W}_{d}$) for every topological boundary $a$ (resp. topological surface defect $d$), \item a braided equivalence $\widetilde{F}_{\to a}:\mathcal{Z}(\mathcal{A}) \to \mathcal{Z}(\mathcal{W}_{a})$ for every pair of a topological boundary labeled with $\mathcal{W}_{a}$ and an adjacent bulk labeled with $\mathcal{Z(A)}$, \item a braided equivalence $\widetilde{F}_{\to d \leftarrow}: \mathcal{Z}(\mathcal{A}_1)\boxtimes \mathcal{Z}(\mathcal{A}_2)^{rev} \to \mathcal{Z}(\mathcal{W}_{d})$ for every topological surface defect labeled with $\mathcal{W}_{d}$ separating bulk regions labeled with $\mathcal{Z}(\mathcal{A}_{1})$ and $\mathcal{Z}(\mathcal{A}_2)$, composed of braided monoidal functors $\widetilde{F}_{\to d}:\mathcal{Z}\left( \mathcal{A}_{1} \right) \to \mathcal{Z}(\mathcal{W}_{d})$ and $\widetilde{F}_{d \leftarrow}:\mathcal{Z}\left( \mathcal{A}_{2} \right)^{rev} \to \mathcal{Z}(\mathcal{W}_{d})$. \end{compactenum} There are three types of excitation in a Turaev-Viro TQFT with topological boundaries and defects: \begin{compactenum}[$\quad\bullet$] \item bulk excitations are objects of $\mathcal{Z(A)}$, \item boundary excitations are objects of $\mathcal{W}_{a}$, \item defect excitations are objects of $\mathcal{W}_{d}$. \end{compactenum} These excitations can be moved, fused and braided and these procedures obey the following conditions: \begin{compactenum}[$\quad\bullet$] \item moving an excitation $M$ from a bulk region into an adjacent boundary or defect turns $M$ into $\widetilde{F}_{\to a}(M)$, resp. $\widetilde{F}_{\to d}(M)$ or $\widetilde{F}_{d\leftarrow}(M)$. \item fusion of excitations in a bulk region, boundary or defect uses the tensor product of $\mathcal{Z(A)},\mathcal{W}_{a}$ or $\mathcal{W}_{d}$, respectively. \item braiding two bulk excitations uses the braiding of $\mathcal{Z(A)}$, \item braiding a bulk excitation with a boundary excitation or defect excitation uses the half-braiding defined by the functors $\widetilde{F}_{\to a}, \widetilde{F}_{\to d}$ or $\widetilde{F}_{d\leftarrow}$. \end{compactenum} The direct and close relation between Turaev-Viro TQFTs and Kitaev models from \cite{BK12} shows that there must be an associated Kitaev model with topological defects and boundaries that satisfies analogous conditions. However, such a model has not been constructed so far, and we construct it in this article. \\ \\ \textbf{Summary of the results} \\ \\ In this article we consider Kitaev lattice models based on finite-dimensional, semisimple Hopf algebras. We extend the relation between Kitaev models and Turaev-Viro TQFTs by generalizing the Kitaev model to a model with \emph{topological defects and boundary conditions}. These defects are less general than the defects considered in~\cite{KK,K}, but allow for more structure. In~\cite{K} bicomodule algebras are used for defects, whereas our defects are labeled with twisted Hopf algebras. This allows us to move and fuse excitations inside a topological defect or boundary and we can braid bulk excitations with defect or boundary excitations. To obtain suitable conditions for our model, we use the link between Turaev-Viro TQFTs and the Kitaev model without defects \cite{BK12}. We first illustrate how the categorical data for a Turaev-Viro TQFT relates to the Hopf algebraic data in the Kitaev model. We then use the categorical data and the conditions for a Turaev-Viro TQFT with topological boundaries and defects from~\cite{FSV} to derive conditions for a Kitaev model with topological defects and boundaries. The algebraic input for a Turaev-Viro TQFT without defects is the center $\mathcal{Z}(\mathcal{A})$ of a fusion category $\mathcal{A}$, whereas the algebraic input for the Kitaev model without defects is the Drinfel'd double $D(H)$ of a finite-dimensional, semisimple Hopf algebra $H$. These two are related by \emph{Tannaka-Krein duality}~\cite{U,S}: \begin{compactenum}[$\quad\bullet$] \item the category $H\mathrm{-Mod}$ is a fusion category and its center is equivalent to $D(H)\mathrm{-Mod}$ as braided monoidal category, and \item a strict fiber functor $\mathcal{A}\to \mathrm{Vect}_{\C}$ defines a finite-dimensional, semisimple Hopf algebra $H$ such that $\mathcal{A}\cong H\mathrm{-Mod}$. \end{compactenum} In the Kitaev model, the Hopf algebra $D(H)$ not only enters as input data. The prominent symmetries of the extended space $\HSpace$ are local $D(H)$-module structures and to every object $M$ of $D(H)\mathrm{-Mod}$ and site $s$ in $\Gamma$ we can assign a subspace $\HSpace(s,M)$ - the excitation of type $M$ at $s$. The fusion of excitations in the Kitaev model uses the tensor product of $D(H)\mathrm{-Mod}$, i.e. the coalgebra structure of $D(H)$ and the braiding of excitations uses the $R$-matrix of $D(H)$. This comprehensive view of the relation between the Kitaev model based on $H$ and the braided monoidal category $D(H)\mathrm{-Mod}$ serves as a starting point for the translation of the conditions for a Turaev-Viro TQFT with topological defects and boundaries into conditions for a Kitaev model with topological defects and boundaries. In Section~\ref{section:TranslationKitaev} we apply Tannaka-Krein duality to the categorical data for Turaev-Viro TQFTS with topological defects and boundaries from~\cite{FSV} and obtain the following result:\\ \\ \textbf{Theorem 1:} \emph{ The algebraic data for Kitaev models with topological boundaries and defects is \begin{compactenum}[$\quad\bullet$] \item a Drinfel'd Double $D(H_{b})$ of a complex finite-dimensional, semisimple Hopf algebra $H_{b}$ for every bulk region $b$, \item a twist $F_{c}$ of $D(H_{b})$ for every boundary line $c$ adjacent to the bulk region $b$, \item a twist $F_{d}$ of $D(H_{b_{1}}) \oo D(H_{b_{2}})$ for every defect line $d$ separating two bulk regions $b_{1}$ and $b_{2}$. \end{compactenum} \emph{This data is Tannaka dual to the categorical data for Turaev-Viro TQFTS with topological defects and boundaries.} } We then use this data to define a Kitaev model with topological defects and boundaries. As a second ingredient our model uses a ribbon graph $\Gamma$ with additional structure for defects and boundaries. From this data we construct an extended space $\HSpace$ together with the following local operators \begin{compactenum}[$\quad\bullet$] \item For every bulk region $b$ and every boundary adjacent to $b$: Local operators which define a $D(H_{b})$-module structure on the extended space $\HSpace$. \item For every defect $d$ separating two boundary regions $b_{1}$ and $b_{2}$: Local operators which define a $D(H_{b_{1}}) \oo D(H_{b_{2}})$-module structure on the extended space $\HSpace$. \end{compactenum} Excitations in this model then are either $D(H_{b})$-modules or $D(H_{b_{1}}) \oo D(H_{b_{2}}) $-modules. We implement the movement, fusion (i.e. the tensor product) and the braiding of excitations in our model by a \emph{transport operator} $T_{\rho}:\HSpace\to \HSpace$. It depends on a path $\rho:s_{1}\to s_{2}$ in the thickened graph $D(\Gamma)$ and moves excitations from the site $s_{1}$ to $s_{2}$ and fulfills the following conditions we derived from the conditions for a Turaev-Viro TQFT with topological boundaries and defects in~\cite{FSV}:\\ \\ \textbf{Theorem 2:}\emph{ The map $T_{\rho}$ fuses excitations $M_{1}$ at $s_{1}$ and $M_{2}$ at $s_{2}$ into $M_{2} \oo M_{1}$ at $s_{2}$, where $ \oo $ is the tensor product \begin{compactenum}[$\quad\bullet$] \item of $D(H_{b})\mathrm{-Mod}$, if $s_{2}$ lies in a bulk region, \item of $D(H_{b})_{F_{c}}\mathrm{-Mod}$, if $s_{2}$ lies in a boundary line, \item of $\left( D(H_{b_{1}}) \oo D(H_{b_{2}}) \right)_{F_{d}}\mathrm{-Mod}$, if $s_{2}$ lies in a defect line. \end{compactenum} There is a path $\rho':s_{1}\to s_{2}$ canonically related to $\rho$ such that $T_{\rho'}$ fuses $M_{1}$ and $M_{2}$ into $M_{1} \oo M_{2}$. $T_{\rho}$ and $T_{\rho'}$ only differ up to a braiding \begin{compactenum}[$\quad\bullet$] \item of $D(H_{b})\mathrm{-Mod}$, if $s_{2}$ lies in a bulk region, \item of $D(H_{b})_{F_{c}}\mathrm{-Mod}$, if $s_{2}$ lies in a boundary line, \item of $\left( D(H_{b_{1}}) \oo D(H_{b_{2}}) \right)_{F_{d}}\mathrm{-Mod}$, if $s_{2}$ lies in a defect line. \end{compactenum} } These two theorems show that our model can indeed be considered a Kitaev model counterpart to a Turaev-Viro TQFT with topological boundaries and defects. We conclude this article by an examination of \emph{transparent defects}. These are trivial defects between two bulk regions labeled with the same Hopf algebra $H$. We show that these defects can be removed and doing so in a model with only transparent defects, one obtains the Kitaev model without defects and boundaries from \cite{BMCA}. \\ \\ \textbf{Structure of the article} \\ \\ In Section~\ref{section:HopfAlgebras} we introduce the required background on Hopf algebras and tensor categories. Section~\ref{sec:ribbon} contains the relevant information on ribbon graphs. In section~\ref{section:KitaevModel} we present the Kitaev model based on a finite-dimensional semisimple Hopf algebra from~\cite{BMCA}. In Section~\ref{section:FSVSummary} we summarize the categorical data for a Turaev-Viro-TQFT with topological defects and boundaries from~\cite{FSV} and use Tannaka-Krein reconstruction to obtain corresponding Hopf algebraic data (Theorem~1). \\ In Section~\ref{section:TranslationKitaev} we then determine how this Hopf algebraic data should appear in a Kitaev model with topological defects and boundaries. We translate the conditions for moving, fusion and braiding of excitations from~\cite{FSV} into conditions on a Kitaev model with topological defects and boundaries. \\ Section~\ref{sec:defect model} constructs a Kitaev model with topological boundaries and defects based on the data we determined in section~\ref{section:FSVSummary}. We define an extended space, face and vertex operators, holonomies and a transport operator for moving excitations. We then show that the conditions we derived in~\ref{section:TranslationKitaev} are satisfied by our model (Theorem~2). We conclude this section by relating the Kitaev model without defects from~\cite{BMCA} to a Kitaev model with transparent defects. \\ \\ \section{Hopf algebras, Heisenberg doubles, modules} \label{section:HopfAlgebras} In this section we summarize background on Hopf algebras. Most results we present can be found in standard textbooks such as~\cite{EGNO,Ka,Ma,Mo,R} and otherwise specific citations are given. \subsection{Hopf algebras} In the following we focus on finite-dimensional semisimple Hopf algebras over $\mathbb C$. Throughout the article we use Sweedler notation without summation signs and write $\Delta(h)=h_{(1)}\otimes h_{(2)}$ for for the comultiplication. For a Hopf algebra $H$ we denote by $H^{op}$, $H^{cop}$ and $H^{op,cop}$ the Hopf algebras with the opposite multiplication, comultiplication and the opposite multiplication and comultiplication. We denote by $\langle\;,\;\rangle: H^*\oo H\to \C$, $\langle \alpha, h\rangle=\alpha(h)$ the pairing between $H$ and $H^*$ and use the same symbol for the induced pairing $\langle\;,\;\rangle: H^{*\otimes n}\oo H^{\otimes n}\to\C$. Throughout the article, we use Roman letters for elements of $H$ and Greek letters for elements of $H^*$. For any finite-dimensional $H$-left module $M$ with the $h\in H$ acting on $m\in M$ denoted by $h \vartriangleright m$, the dual vector space $M^{*}$ is an $H$-right module and an $H$-left module. The action of $h\in H$ on $\alpha\in M^{*}$ are given by \begin{align*} \alpha \vartriangleleft h = \alpha\left( h \vartriangleright (\cdot) \right), \qquad h \vartriangleright \alpha = \alpha\left( S(h) \vartriangleright (\cdot) \right) \end{align*} By the Artin-Wedderburn theorem, see for instance \cite[Chapter II]{Kn}, any finite-dimensional semisimple complex Hopf algebra can be decomposed as a direct sum of a set of representatives of irreducible $H$-modules tensored with their duals. \begin{theorem}[Artin-Wedderburn] Let $H$ a semisimple finite-dimensional Hopf algebra and let $\mathrm{Irr}(H)$ be a system of representatives of irreducible $H$-modules. Then there is an isomorphism of bimodules \begin{align} H \cong \bigoplus_{s \in \mathrm{Irr}(H)} s \oo s^{*}, \label{eq:ArtinWedderburn} \end{align} where $H$ is equipped with the bimodule structure by left and right multiplication and $s \oo s^{*}$ with the left action of $H$ on $s$ and the right action on $s^{*}$ dual to the left action on $s$. \label{theorem:ArtinWedderburn} \end{theorem} Recall that by Larson-Radford's theorem a finite-dimensional Hopf algebra $H$ over $\C$ is semisimple if and only if $H^*$ is semisimple and if and only if its antipode is involutive. Recall also that by Maschke's theorem this is equivalent to the existence of a normalized Haar integral, and that this normalized Haar integral is unique. \begin{theorem} For every finite-dimensional semisimple Hopf algebra over $\C$, there is a unique element $\lambda\in H$, the \emph{Haar integral} of $H$, with \begin{align} \label{eq:HaarIntegral} \varepsilon(\lambda)=1,\qquad \lambda\cdot h=h\cdot \lambda=\varepsilon(h)\lambda\quad \forall h\in H. \end{align} \end{theorem} For a given Hopf algebra $H$, we also consider the Drinfel'd double $D(H)$, its dual and the Hopf algebras obtained by twisting $H$ and $ D(H)$, see for instance the textbooks \cite{Ma, EGNO, Ka, Mo}. \begin{definition} \label{def:Drinfel'dDouble} The Drinfel'd double of a finite-dimensional Hopf algebra $H$ is the quasitriangular Hopf algebra $D(H)=H^{*} \oo H$ with multiplication \begin{align} \left( \alpha \oo h \right)\cdot \left( \beta \oo k \right) := \langle \beta_{(1)} \oo \beta_{(3)} , S^{-1}(h_{(3)}) \oo h_{(1)} \rangle \alpha \beta_{(2)} \oo h_{(2)} k \label{eq:Drinfel'dMultiplication} \end{align} and comultiplication \begin{align} \Delta\left( \alpha \oo h \right) = \alpha_{(2)} \oo h_{(1)} \oo \alpha_{(1)} \oo h_{(2)}. \end{align} Its unit $1_{D(H)}$, counit $\varepsilon:D(H)\to \F$ and antipode $S:D(H) \to D(H)$ are \begin{align*} 1_{D(H)} &= \varepsilon \oo 1_{H},\qquad \varepsilon\left( \alpha \oo h \right) = \alpha(1)\varepsilon(h), \\ S(\alpha \oo h) &= \langle \alpha_{(1)} \oo \alpha_{(3)} , h_{(3)} \oo S^{-1}(h_{(1)}) \rangle S^{-1}(\alpha_{(2)}) \oo S(h_{(2)}). \end{align*} Its $R$-Matrix is the element $R\in D(H) \oo D(H)$ given by \begin{align}\label{eq:Rmatrix} R = \sum_{i=1}^{n} \varepsilon \oo a_{i} \oo \alpha_{i} \oo 1_{H}=: \varepsilon\oo x\oo X\oo 1_H, \end{align} where $\left( a_{1},\dots,a_{n} \right)$ is a basis of $H$ with dual basis $\left( \alpha_{1},\dots,\alpha_{n} \right)$ and the right-hand side is symbolic notation. \end{definition} By the formula of the antipode of $D(H)$ it is easy to see that the antipode of $D(H)$ is involutive, if and only if the antipode of $H$ is involutive. For $H$ finite-dimensional over $\C$ Larson-Radford's theorem then implies that $D(H)$ is semisimple, if and only if $H$ is semisimple. When using multiple copies of $x \oo X$ in the same equation we distinguish them by using different upper indices $x \oo X= x^{1} \oo X^{1} = x^{2} \oo X^{2}=\cdots $. By using Hopf algebra duality and some direct computations, one finds that the dual of the Drinfel'd double is given as follows. \begin{remark} \label{lemma:DualDrinfel'd} The dual of the Drinfel'd double of a finite-dimensional Hopf algebra $H$ is the vector space $D(H)^{*}= H \oo H^{*}$ with multiplication and comultiplication \begin{align} &\left( h \oo \alpha \right) \left( k \oo \beta \right) = kh \oo \alpha\beta \label{eq:Drinfel'dDualMultiplication}\\ &\Delta_{D(H)^{*}} (h \oo \alpha) = \sum_{i,j=1}^n h_{(1)} \oo \alpha_{i} \alpha_{(1)} \alpha_{j} \oo S(a_{j})h_{(2)}a_{i} \oo \alpha_{(2)} \label{eq:Drinfel'dDualComultiplication}\\ &\qquad\qquad \qquad =: h_{(1)}\oo X^1\alpha_{(1)} X^2\oo S(x^2)h_{(2)} x^1\oo \alpha_{(2)}.\nonumber \end{align} Its unit $1_{D(H)^{*}}$, counit $\varepsilon_{D(H)^{*}}$ and antipode $S_{D(H)^{*}}$ are \begin{align} 1_{D(H)^{*}}= 1_{H} \oo \varepsilon_{h},\quad \varepsilon_{D(H)^{*}}(h \oo \alpha) = \varepsilon(h)\alpha(1) \\ S_{D(H)^{*}}(h \oo \alpha) = x^{1}S^{-1}(h)x^{(2)} \oo S^{-1}(X^{2})S(\alpha)X^{1} \end{align} \end{remark} The dual of the Drinfel'd double is a special case of a twisted Hopf algebra, namely a twisting of the Hopf algebra $H\oo H^*$ with the opposite of the universal $R$-matrix in \eqref{eq:Rmatrix}. In the following, we consider more general twistings with unitary cocycles. \begin{definition} \label{def:Twist} Let $B$ be a bialgebra. A twist (or unitary 2-cocycle) for $B$ is an invertible element $F \in B \otimes B$ such that: \begin{align} (\varepsilon \otimes \id_{B}) (F) = 1_{B} = (\id_{B} \otimes \varepsilon ) (F) \label{eq:TwistEpsilon}\\ F_{12} \cdot (\Delta \otimes \id_{B}) (F) = F_{23} \cdot (\id_{B} \otimes \Delta) (F) \label{eq:TwistDelta} \end{align} We write $F = F^{(1)} \otimes F^{(2)}$ in Sweedler notation, and $F_{12} := F^{(1)} \otimes F^{(2)} \otimes 1_{B}$, $F_{23} := 1_{B} \otimes F^{(1)} \otimes F^{(2)}$. \end{definition} When using multiple copies of the same element $F\in B \oo B$ in the same equation we distinguish them by using different upper indices $F=F^{(1)} \oo F^{(2)}=F^{(3)} \oo F^{(4)}=\cdots$. For instance, Equation~\eqref{eq:TwistDelta} reads in Sweedler notation: \begin{align*} F^{(1)}F^{(3)}_{(1)} \oo F^{(2)}F^{(3)}_{(2)} \oo F^{(4)} = F^{(3)} \oo F^{(1)}F^{(4)}_{(1)} \oo F^{(2)}F^{(4)}_{(2)} \end{align*} If $F$ is invertible we similarly write $F^{-1}=F^{(-1)} \oo F^{(-2)}=F^{(-3)} \oo F^{(-4)}=\cdots$. \begin{example} \label{example:ProjectedTwist} Let $H$ and $K$ be bialgebras. \begin{itemize} \item Any universal R-matrix $R\in H \oo H$ also is a twist for $H$. \item If $H\subseteq K$ is a sub-bialgebra and $F \in H \oo H$ is a twist for $H$, then $F$ also is a twist for $K$. \item Let $F\in H \oo K \oo H \oo K$ a twist for the bialgebra $H \oo K$. Then the projections of $F$ onto $H \oo H$ and $K \oo K$ \begin{align} F_{H} &= \left(\id_{H} \oo \varepsilon_{K} \oo \id_{H} \oo \varepsilon_{K}\right)(F) \in H \oo H \label{eq:ProjectedTwistLeft} \\ F_{K} &= \left( \varepsilon_{H} \oo \id_{K} \oo \varepsilon_{H} \oo id_{K} \right)(F) \in K \oo K \label{eq:ProjectedTwistRight} \end{align} are twists for $H$ and $K$, respectively. \end{itemize} \end{example} Twists allow one to change a bialgebra or Hopf algebra by modifying its comultiplication and antipode. The following is a standard result, see for instance \cite[Chapter 5.14]{EGNO}. \begin{lemma}\label{lemma:TwistedHopfAlgebra} Let $H=\left( H,\mu,\eta, \Delta,\varepsilon,S \right)$ be a finite-dimensional Hopf algebra and $F\in H \oo H$ a twist for $H$. Then there is a twisted Hopf algebra $H_{F} = \left( H,\mu_{F}=\mu,\eta_{F}=\eta,\Delta_{F},\varepsilon_{F}=\varepsilon, S_{F} \right)$ with comultiplication and antipode given by \begin{align*} \Delta_{F}(h)&= F\cdot \Delta(h)\cdot F^{-1},\qquad S_{F}(h)=Q\cdot S(h) \cdot Q^{-1}, \end{align*} where \begin{align} \label{eq:TwistDrinfel'dElement} Q:=F^{(1)}S(F^{(2)}),\quad Q^{-1}=S(F^{(-1)})F^{(-2)}. \end{align} If $H$ is quasitriangular with universal $R$-matrix $R$, then $H_F$ is quasitriangular with universal $R$-matrix \begin{align} R^F=F_{21} R F^{-1}. \label{eq:TwistedRMatrix} \end{align} We write $\Delta_{F}(h)=h_{(F1)} \oo h_{(F2)}$ in Sweedler notation. \end{lemma} Another standard result relates the tensor categories defined by the two Hopf algebras $H$ and $H_{F}$, see for instance \cite[Proposition 5.14.4]{EGNO}: \begin{proposition} \label{proposition:FunctorTranslation} The following data defines a tensor equivalence $G:H_{F}\mathrm{-Mod}\to H\mathrm{-Mod} $ : \begin{itemize} \item For any $H$-module $M$, $GM$ is the same underlying vector space equipped with the same $H$-module structure. \item For any morphism $f:M\to N$ in $H\mathrm{-Mod}$, $Gf:GM \to GN$ is the same linear map. \item For the tensor unit $\mathbb{F}$ of $D(H)\mathrm{-Mod}$, the natural isomorphism $G\mathbb{F}\to \mathbb{F}$ to the tensor unit $\mathbb{F}$ of $H_{F}\mathrm{-Mod}$ is the identity. \item For $H$-modules $(M, \vartriangleright_{M}),(N, \vartriangleright_{N})$, the natural isomorphism $G\left( M \oo_{F} N \right) \to GM \oo GN$ is the linear map \begin{align} G\left(M\oo_{F} N\right) &\to GM \oo GN,\nonumber\\ m \oo n &\mapsto \left(F^{(-1)} \vartriangleright_{M}m \right) \oo \left( F^{(-2)} \vartriangleright_{N} n \right) \label{eq:TwistCoherenceData} \end{align} \end{itemize} If $H$ is quasitriangular, then $G$ is a braided tensor equivalence. \end{proposition} Twisting defines an equivalence relation on the class of complex Hopf algebras. In the following, we consider Hopf algebras that are isomorphic up to a twist and call such Hopf algebras \emph{twist equivalent.} \begin{definition} \label{definition:TwistEquivalence} Let $H$ and $K$ be Hopf algebras. A \emph{twist equivalence} is a pair $(F,\varphi)$ of a twist $F$ of $H$ and an isomorphism $\varphi:H_{F} \to K$ of Hopf algebras. If both $H$ and $K$ are quasitriangular and $\varphi$ is an isomorphism of quasitriangular Hopf algebras, then we call $(F,\varphi)$ a \emph{braided twist equivalence}. \end{definition} \begin{example}\cite[13.6.1]{R} If $(K,R)$ is a factorizable Hopf algebra, then $K \oo K$ is twist equivalent to $D(K)$, where $F=1_{K} \oo R^{(2)} \oo R^{(1)} \oo 1_{K}\in K^{\oo 4}$ is a twist of $K \oo K$ and the isomorphism $\varphi$ is given by \begin{align} \varphi: &D(K) \to (K \oo K)_{F}\nonumber\\ &\alpha \oo h \mapsto \langle \alpha_{(2)} , R^{(2)} \rangle \langle \alpha_{(1)} , R^{(3)} \rangle S(R^{(1)})\cdot h_{(1)} \oo R^{(4)}h_{(2)} \label{eq:Drinfel'dFactorisableIsomorphism} \end{align} in the notation introduced after Definition \ref{def:Twist}. The twist equivalence becomes braided, when $K \oo K$ is equipped with the $R$-matrix \begin{align*} R^{(-2)} \oo R^{(1)} \oo R^{(-1)} \oo R^{(2)}\in K^{ \oo 4}. \end{align*} \label{example:Drinfel'dQuadrupleTwist} \end{example} Twisting a semi-simple Hopf algebra $H$ preserves its semi-simplicity, as twisting does not change $H$'s algebra structure. Additionally, the Haar integral of $H$ and $H^*$ remain unchanged under twisting and the element $Q$ from \eqref{eq:TwistDrinfel'dElement} is preserved by the antipode: \begin{remark} \label{remark:TwistedSemisimpleHopfalgebra}\cite[Remark 3.8]{AE} If $H$ is a semisimple finite-dimensional Hopf algebra with Haar integrals $\lambda\in H$, $\smallint \in H^{*}$, then $S(Q)=Q$, $\lambda$ is an Haar integral of $H_{F}$ and $\smallint$ is an Haar integral of $(H_{F})^{*}$. \end{remark} \subsection{Module and comodule algebras} In this section we introduce certain module and comodule algebras over a Hopf algebra $H$ and its Drinfel'd double $D(H)$ that are required in the following. \begin{definition} Let $H$ be a Hopf algebra. \begin{enumerate} \item A \emph{$H$(-left) module algebra} is an associative unital algebra $(A,m,\eta)$ with an $H$-module structure $\rhd: H\oo A\to A$ such that $m$ and $\eta$ are morphisms of $H$-modules. \item An \emph{$H$(-left) comodule algebra} is an associative unital algebra $(A,m,\eta)$ with an $H$-comodule structure $\delta: A\to H\oo A$ such that $m$ and $\eta$ are morphisms of $H$-comodules. \end{enumerate} \end{definition} $H$-right module algebras are defined as $H$-left module algebras over $H^{op}$ and $(H,H)$-bimodule algebras as left module algebras over $H\oo H^{op}$. Analogously, $H$-right comodule algebras are left comodule algebras over $H^{cop}$ and $(H,H)$-bicomodule algebras are left comodule algebras over $H\oo H^{cop}$. By composing the action of $H$ with the antipode of $H$, one can transform $H$-right module (co)algebras into $H^{cop}$-left module algebras and $H$-right comodule (co)algebras into $H^{op}$-left comodule (co)algebras. By duality, any $H$-comodule algebra structure on a vector space $V$ corresponds to a $H^*$-right module algebra structure on $V$ and vice versa. \begin{example}\label{definition:CoregularAction} Let $H$ be a finite-dimensional Hopf algebra. Then $H$ is an $H$-bicomodule algebra with the \emph{left and right regular coaction} $\delta=\Delta: H\to H\oo H$. Its dual $H^*$ is an $H$-module algebra with the \emph{left coregular action} \begin{align} \label{eq:CoregularLeftAction} \vartriangleright: H \oo H^{*} &\to H^{*},\quad h \vartriangleright \alpha= \langle \alpha_{(2)} , h \rangle \alpha_{(1)} \end{align} and an $H$-right module algebra with the \emph{right coregular action} \begin{align} \label{eq:CoregularRightAction} \vartriangleleft: H^{*} \oo H &\to H^{*},\quad \alpha \vartriangleleft h= \langle \alpha_{(1)} , h \rangle \alpha_{(2)}. \end{align} The latter defines an $H^{cop}$-left module algebra structure given by \begin{align} \rhd': H \oo H^{*} \to H^{*},\quad h \oo \alpha\mapsto \alpha \vartriangleleft S(h). \label{eq:RightCoregularLeftAction} \end{align} \end{example} \begin{remark} For a Drinfel'd double $D(H)$, the left and right coregular actions take the form: \begin{align} \vartriangleright: D(H) \oo D(H)^{*} \;\; &\to D(H)^{*} \nonumber\\ \left( \beta \oo k \right) \vartriangleright \left( h \oo \alpha \right) &= \langle \beta \oo k, S(x^{2})h_{(2)}x^{1} \oo \alpha_{(2)} \rangle h_{(1)} \oo X^{1}\alpha_{(1)}X^{2}\nonumber \\ &= \langle \beta_{(2)} \oo k , h_{(2)} \oo \alpha_{(2)} \rangle h_{(1)} \oo \beta_{(3)}\alpha_{(1)}S(\beta_{(1)}) \label{eq:LeftCoregularActionDrinfel'd} \\ \vartriangleleft: D(H)^{*} \oo D(H) \;\;&\to D(H)^{*}\nonumber\\ \left( h \oo \alpha \right) \vartriangleleft \left( \beta \oo k \right) &= \langle \beta \oo k , h_{(1)} \oo X^{1}\alpha_{(1)} X^{2} \rangle S(x^{2})h_{(2)}x^{1} \oo \alpha_{(2)} \nonumber \\ &= \langle \beta \oo k_{(2)} , h_{(1)} \oo \alpha_{(1)} \rangle S(k_{(3)})h_{(2)}k_{(1)} \oo \alpha_{(2)}, \label{eq:RightCoregularActionDrinfel'd} \end{align} where we use the symbolic notation from Definition \ref{def:Drinfel'dDouble} and Remark \ref{lemma:DualDrinfel'd}. \end{remark} Given a module algebra $A$ over a Hopf algebra $H$, one can form the smash product algebra $A\# H$. For the module algebras from Example \ref{definition:CoregularAction} this yields the \emph{Heisenberg double} algebras. As $H$ is finite-dimensional, modules over the Heisenberg double are in bijection with Hopf modules over $H$, see for instance \cite[Cor 7.10.7, Defs 7.10.8, 7.10.9]{EGNO}. Just as Hopf modules, Heisenberg doubles exist in several versions, for the different combinations of $H$-left or right-actions on $H^*$. Further versions are obtained by exchanging the Hopf algebras $H$ and $H^*$. \begin{definition} \label{def:HeisenbergDouble} The Heisenberg double $\mathcal{H}_{R}(H)$ is the vector space $H \oo H^{*}$ with the algebra structure given by \begin{align} \left( h \oo \alpha \right) \cdot \left( k \oo \beta \right) := \langle \alpha_{(1)} , k_{(2)} \rangle hk_{(1)} \oo \alpha_{(2)}\beta. \label{eq:HeisenbergRightMult} \end{align} The Heisenberg double $\overline{\mathcal{H}}_{R}(H)$ is the vector space $H \oo H^{*}$ with the algebra structure given by \begin{align} \left( h \oo \alpha \right) \cdot \left( k \oo \beta \right) := \langle \alpha_{(1)} , S(k_{(2)}) \rangle k_{(1)}h_{(1)} \oo \beta\alpha_{(2)}. \label{eq:HeisenbergRightAlternativeMult} \end{align} \end{definition} \begin{remark}\label{rem:heisenbergdoubleendo} The right Heisenberg double $\mathcal{H}_{R}(H)$ is isomorphic to $\mathrm{End}_{\C}(H)$, where the isomorphism is given in terms of the action \begin{align} \vartriangleright: \mathcal{H}_{R}(H) \oo H &\to H\nonumber\\ \left( h \oo \alpha \right) \vartriangleright m = \langle \alpha , m_{(2)} \rangle hm_{(1)} \label{eq:HeisenbergAction} \end{align} of $\mathcal{H}_{R}(H)$ on $H$, see for instance \cite[9.4.3]{Mo}. \end{remark} \begin{remark}$\quad$ \label{remark:HeisenbergDoubleComoduleAlgebra} \begin{compactenum} \item $\mathcal{H_{R}}(H)$ is a $D(H)^{*,op}$-left comodule algebra with the coaction given by the comultiplication \begin{align*} \Delta_{D(H)^{*}}: \mathcal{H}_{R}(H) \to D(H)^{*,op} \oo \mathcal{H}_{R}(H) \end{align*} and a $D(H)^{*}$-right comodule algebra with the coaction given by the comultiplication \begin{align*} \Delta_{D(H)^{*}}: \mathcal{H}_{R}(H) \to \mathcal{H}_{R}(H) \oo D(H)^{*} \end{align*} \label{remarkPoint:RightHeisenbergComoduleAlgebra} \item $\overline{\mathcal{H}}_{R}(H)$ is a $D(H)^{*}$-left comodule algebra with the coaction given by the comultiplication \begin{align*} \Delta_{D(H)^{*}}: \overline{\mathcal{H}}_{R}(H) \to D(H)^{*} \oo \mathcal{H}_{R}(H) \end{align*} and a $D(H)^{*,op}$-right comodule algebra with the coaction given by the comultiplication \begin{align*} \Delta_{D(H)^{*}}: \mathcal{H}_{R}(H) \to \mathcal{H}_{R}(H) \oo D(H)^{*,op} \end{align*} \label{remarkPoint:RightHeisenbergAlternativeComoduleAlgebra} \item The comultiplication of $D(H)^{*}$ is an algebra homomorphism \begin{align} \Delta_{D(H)^{*}} : D(H)^{*} \to \overline{\mathcal{H}}_{R}(H) \oo \mathcal{H}_{R}(H) \label{eq:ComultHeisenbergDoubleAlgebraHom} \end{align} and an algebra homomorphism \begin{align} \Delta_{D(H)^{*}} : D(H)^{*,op} \to \mathcal{H}_{R}(H) \oo \overline{\mathcal{H}}_{R}(H) \label{eq:ComultHeisenbergDoubleOppositeAlgebraHom} \end{align} \end{compactenum} \end{remark} \begin{remark} The $D(H)^{*}$-comodule (resp. $D(H)^{*,op}$-comodule) algebras $\mathcal{H}_{R}(H)$ and $\overline{\mathcal{H}}_{R}(H)$ are related to $D(H)^{*}$ and $D(H)^{*,op}$ by a one-sided cotwist with the $R$-matrix from \eqref{eq:Rmatrix}. Concretely, we have for $h \oo \alpha,k \oo \beta \in H \oo H^{*}$: \begin{align} \left( h \oo \alpha \right)\cdot_{\mathcal{H}_{R}(H)} \left( k \oo \beta \right) &= \left( R^{(2)} \vartriangleright \left( k \oo \beta \right) \right) \cdot_{D(H)^{*}} \left( R^{(1)} \vartriangleright \left( h \oo \alpha \right) \right) \label{eq:RightHeisenbergDoubleRTwist} \\ \left( h \oo \alpha \right)\cdot_{\overline{\mathcal{H}}_{R}(H)} \left( k \oo \beta \right) &= \left( R^{(-1)} \vartriangleright \left( h \oo \alpha \right) \right) \cdot_{D(H)^{*}} \left( R^{(-2)} \vartriangleright \left( k \oo \beta \right) \right) \label{eq:RightHeisenbergDoubleAlternativeRTwist} \end{align} \end{remark} \subsection{Tensor categories and Tannaka duality} In this section we consider Tannaka reconstruction for fusion categories over $\C$ (see e.g.~\cite{EGNO,S,U,Ma,JS}). We follow the conventions of \cite[Chapters 6 and 9]{EGNO} and define \begin{definition}$\quad$ \label{definition:FusionCat} \begin{compactenum} \item A \emph{fusion category} is a $\C$-linear, finitely semisimple, rigid monoidal category. \item A \emph{fiber functor} is a faithful, $\C$-linear, strict monoidal functor from a fusion category to $\mathrm{Vect}_{\C}^{fin}$. \end{compactenum} \end{definition} For $\mathcal{C}$ a fusion category and $\omega_{\mathcal{C}}: \mathcal{C}\to \C$ a fiber functor, we denote $\mathrm{End}(\omega_{\mathcal{C}})$ the natural endomorphisms of $\omega_{\mathcal{C}}$. The composition of natural endomorphisms equips $\mathrm{End}(\omega_{\mathcal{C}})$ with a $\C$-algebra structure. The algebra $\mathrm{End}(\omega_{\mathcal{C}})$ also inherits a $\C$-coalgebra structure from the tensor product of $\mathcal{C}$, for details see~\cite[Chapter~5.2]{EGNO}. We denote by $\boxtimes$ the Deligne product of locally finite $\C$-linear categories, see \cite[Chapter 1.11]{EGNO} and by $\varphi_{\mathcal C}: \mathcal Z(\mathcal C)\to \mathcal C$ the canonical monoidal functor that forgets the half-braidings. We then have \begin{lemma} \label{lemma:FibreFunctorClassic} Let $\mathcal{C},\mathcal{D}$ be fusion categories and $\omega_{\mathcal{C}}, \omega_{\mathcal{D}}$ be fiber functors for $\mathcal{C}$ and $\mathcal{D}$. \begin{compactenum} \item Then $H=\mathrm{End}(\omega_{\mathcal{C}})$ is a semisimple, finite-dimensional Hopf algebra over $\C$. \item There is a strict tensor equivalence $\mathcal{C}\cong H\mathrm{-Mod}$. \item If $\mathcal{C}$ is braided, then $H$ admits a quasitriangular structure such that the equivalence is braided. \item There is an isomorphism $D(H) \cong \mathrm{End}(\omega_\mathcal{C}\circ \varphi_\mathcal{C})$ of quasitriangular Hopf algebras. \item There is an isomorphism $H \oo K \cong \mathrm{End}( \oo_{\C} \circ (\omega_{\mathcal{C}} \boxtimes \omega_{\mathcal{D}}))$ of Hopf algebras. \end{compactenum} \end{lemma} The first two statements in Lemma \ref{lemma:FibreFunctorClassic} follow directly from \cite[Theorem 5.3.12]{EGNO}, and the third is a direct consequence of the first two. For the fourth statement, see \cite[Chapter 7.14]{EGNO}, in particular the discussion before Definition 7.14.1. The last statement follows from the first two and the Definition of the Deligne product. \begin{lemma} \label{lemma:FibreFunctorTensorEquivalence} Let $\mathcal{C}, \mathcal{D}$ be fusion categories, $T:\mathcal{C} \to \mathcal{D}$ a $\C$-linear tensor equivalence and $\omega_{\mathcal{C}},\omega_{\mathcal{D}}$ be fiber functors for $\mathcal{C}$ and $\mathcal{D}$ such that there is a natural isomorphism \begin{align*} \omega_{\mathcal{C}} \cong \omega_{\mathcal{D}}\circ T \end{align*} of tensor functors. Then the Hopf algebra $H=\mathrm{End}(\omega_{\mathcal{C}})$ admits a twist $F\in H \oo H$ such that $K=\mathrm{End}(\omega_{\mathcal{D}})\cong H_{F}$ as Hopf algebras. If $\mathcal{C}$ and $\mathcal{D}$ are braided and $T$ is a braided equivalence, then $K\cong H_{F}$ as quasitriangular Hopf algebras. \end{lemma} \begin{proof} The statement for tensor categories $\mathcal{C},\mathcal{D}$ follows immediately from \cite[Prop 5.14.4]{EGNO}. The braided version follows by combining with Lemma~\ref{lemma:FibreFunctorClassic}.3. \end{proof} The following table shows the relation between categorical notions and algebraic structures given by a fiber functor. \\ \begin{tabular}[center]{|c|c|} \hline categorical notion & algebraic structure \\ \hline\hline (braided) fusion category $\mathcal{C},\mathcal{D}$ & (quasitriangular) semisimple Hopf algebras $H$, $K$ \\ \hline center $\mathcal{Z(D)}$ & Drinfel'd double $D(K)$ \\\hline Deligne product $\mathcal{C} \boxtimes \mathcal{D}$ & tensor product Hopf algebra $H \oo K$ \\\hline tensor equivalence $\mathcal{C}\to \mathcal{D}$ & twist $F$ with $H_{F} \cong K$ \\\hline \end{tabular} \section{Ribbon graphs} \label{sec:ribbon} In this section, we summarize the required background on {\em embedded graphs} or {\em ribbon graphs}. For more details, see for instance \cite{BL,LZ}. All graphs considered in this article are directed and finite, but we allow loops, multiple edges and univalent vertices. \subsection{Paths} \label{subsec:paths} Paths in a graph are most easily described by orienting the edges of $\Gamma$ and considering the free groupoid generated by the resulting directed graph. Note that different choices of orientation yield isomorphic groupoids. \begin{definition} \label{def:pathgroupoid}The \emph{path groupoid} $\mathcal G_\Gamma$ of a graph $\Gamma$ is the free groupoid generated by $\Gamma$. A \emph{path} in $\Gamma$ from a vertex $v$ to a vertex $w$ is a morphism $\gamma: v\to w$ in $\mathcal G_\Gamma$. \end{definition} The objects of $\mathcal G_\Gamma$ are the vertices of $\Gamma$. A morphism from $v$ to $w$ in $\mathcal G_\Gamma$ is a finite sequence $\rho=e_1^{\epsilon_1}\circ ...\circ e_n^{\epsilon_n}$, $\epsilon_i\in\{\pm 1\}$ of oriented edges $e_i$ and their inverses such that the starting vertex of the first edge $e_{n}^{\epsilon_{n}}$ is $v$, the target vertex of the last edge $e_{1}^{\epsilon_{1}}$ is $w$, and the starting vertex of each edge in the sequence is the target vertex of the preceding edge. These sequences are taken with the relations $e\circ e^\inv=1_{t(e)}$ and $e^\inv\circ e=1_{s(e)}$, where $e^\inv$ denotes the edge $e$ with the reversed orientation, $s(e)$ the starting and $t(e)$ the target vertex of $e$ and we set $s(e^{\pm 1})=t(e^{\mp 1})$. A sequence $e_{1}^{\varepsilon_{1}} \circ \cdots \circ e_{n}^{\varepsilon_{n}}$ is said to be a \emph{reduced word} or \emph{in reduced form}, if it does not contain subsequences of the form $e^{-1}\circ e$ and $e \circ e^{-1}$ for $e\in E$. An edge $e\in E$ with $s(e)=t(e)$ is called a {\em loop}, and a path $\rho\in \mathcal G_\Gamma$ is called {\em closed} if it is an automorphism of a vertex. We call a path $\rho\in\mathcal G_\Gamma$ a {\em subpath} of a path $\gamma \in \mathcal G_\Gamma$ if the expression for $\gamma$ as a reduced word in $E$ is of the form $\gamma=\gamma_1\circ\rho\circ\gamma_2$ with (possibly empty) reduced words $\gamma_1,\gamma_2$. We call it a {\em proper subpath} of $\gamma$ if $\gamma_1,\gamma_2$ are not both empty. We say that two paths $\rho,\gamma\in \mathcal G_\Gamma$ {\em overlap} if there is an edge in $\Gamma$ that is traversed by both $\rho$ and $\gamma$, and by both in the same direction. \subsection{Ribbon graphs} The graphs we consider have additional structure. They are called {\em ribbon graphs}, \emph{fat graphs} or \emph{embedded graphs} and give a combinatorial description of oriented surfaces with or without boundary. \begin{definition} A \emph{ribbon graph} is a directed graph together with a cyclic ordering of the edge ends at each vertex. A \emph{ciliated vertex} in a ribbon graph is a vertex together with a linear ordering of the incident edge ends that is compatible with their cyclic ordering. \end{definition} A ciliated vertex in a ribbon graph is obtained by selecting one of its incident edge ends as the starting end of the linear ordering. We indicate this in figures by assuming the counterclockwise cyclic ordering in the plane and inserting a line, the {\em cilium}, that separates the edges of minimal and maximal order, as shown in Figure \ref{fig:ciliated}. We say that an edge end $e_{2}$ at a ciliated vertex $v$ is {\em between} two edge ends $e_{1}$ and $e_{3}$ incident at $v$ if $e_{1}<e_{2}<e_{3}$ or $e_{3}<e_{2}<e_{1}$. We denote by $\st(e_{1})$ the starting end and by $\ta(e_{1})$ the target end of a directed edge $e_{1}$. The cyclic ordering of the edge ends at each vertex allows one to thicken the edges of a ribbon graph to strips or ribbons and its vertices to polygons. It also equips the ribbon graph with the notion of a face. One says that a path in a ribbon graph $\Gamma$ turns {\em maximally left} at a vertex $v$ if it enters $v$ through an edge end $\alpha$ and leaves it through an edge end $\beta$ that comes directly before $\alpha$ with respect to the cyclic ordering at $v$. \begin{definition}\label{def:face} Let $\Gamma$ be a ribbon graph. \begin{compactenum} \item A \emph{partial face} in $\Gamma$ is a path that turns maximally left at each vertex in the path and traverses each edge at most once in each direction. \item A \emph{ciliated face} in $\Gamma$ is a closed partial face whose cyclic permutations are also partial faces. \item A \emph{face} of $\Gamma$ is an equivalence class of ciliated faces under cyclic permutations. \end{compactenum} \end{definition} \begin{figure} \centering \begin{tikzpicture}[scale=.6] \draw [color=black, fill=black] (0,0) circle (.2); \draw[color=black, style=dotted, line width=1pt] (0,-.2)--(0,-1); \draw [black,->,>=stealth,domain=-90:240] plot ({cos(\x)}, {sin(\x)}); \draw[color=red, line width=1.5pt, ->,>=stealth] (.2,-.2)--(2,-2); \draw[color=violet, line width=1.5pt, <-,>=stealth] (.2,0)..controls (6,0) and (0,6).. (0,.2); \draw[color=cyan, line width=1.5pt, ->,>=stealth] (.2,.2)..controls (4,4) and (-4,4).. (-.2,.2); \draw[color=magenta, line width=1.5pt, <-,>=stealth] (-.2,0)--(-3,0); \node at (.7,-.7)[color=red, anchor=north]{${1}$}; \node at (1,-.3)[color=violet, anchor=west]{${2}$}; \node at (.7,.7)[color=cyan, anchor=west]{$3$}; \node at (0,1.2)[color=violet, anchor=west]{${4}$}; \node at (-.6,.8)[color=cyan, anchor=south]{$5$}; \node at (-.9,.4)[color=magenta, anchor=east]{$6$}; \end{tikzpicture} \qquad\qquad \begin{tikzpicture}[scale=.6] \begin{scope}[shift={(-6,0)}] \draw [color=black, fill=black] (-2,0) circle (.2); \draw [color=black, fill=black] (2,0) circle (.2); \draw [color=black, fill=black] (1,2) circle (.2); \draw [color=black, fill=black] (4,4) circle (.2); \draw [color=black, fill=black] (-4,4) circle (.2); \draw[color=black, line width=1pt, style=dotted](1.9,.1)--(1.3,.7); \draw[color=blue, line width=1.5pt, ->,>=stealth] (-1.8,0)--(1.8,0); \node at (.7,1.2)[color=blue, anchor=north]{$1$}; \draw[color=red, line width=1.5pt, ->,>=stealth] (-1.8,.2).. controls (2,0) and (-2,4).. (-2,.2); \node at (-1.5,2.5)[color=red, anchor=west]{$2$}; \draw[color=magenta, line width=1.5pt, ->,>=stealth] (-2.2,.2)--(-3.8,3.8); \node at (-2,2.3)[color=magenta, anchor=east]{$3$}; \draw[color=cyan, line width=1.5pt, ->,>=stealth] (2,.2)--(4,3.8); \node at (2.2,1.3)[color=cyan, anchor=east]{$7$}; \draw[color=violet, line width=1.5pt, ->,>=stealth] (3.8,4)--(-3.8,4); \node at (0,3.7)[color=violet, anchor=north]{${4}$}; \draw[color=brown, line width=1.5pt, ->,>=stealth] (3.8,3.8)--(1.2,2.2); \node at (2.1,2.4)[color=brown, anchor=north]{$6$}; \node at (1.9,2.8)[color=brown, anchor=south]{$5$}; \draw[color=black, line width=1.5pt] (-2.2,0)--(-2.8,0); \draw[color=black, line width=1.5pt ] (-2,-.2)--(-2,-.8); \draw[color=black, line width=1.5pt ] (-2.1,-.1)--(-2.6,-.6); \draw[color=black, line width=1.5pt] (2.2,0)--(2.8,0); \draw[color=black, line width=1.5pt ] (2,-.2)--(2,-.8); \draw[color=black, line width=1.5pt ] (2.1,-.1)--(2.6,-.6); \draw[color=black, line width=1.5pt] (-4,4.2)--(-4,4.6); \draw[color=black, line width=1.5pt ] (-4.2,4)--(-4.8,4); \draw[color=black, line width=1.5pt ] (-4.1,4.1)--(-4.6,4.6); \draw[color=black, line width=1.5pt] (4,4.2)--(4,4.6); \draw[color=black, line width=1.5pt ] (4.2,4)--(4.8,4); \draw[color=black, line width=1.5pt ] (4.1,4.1)--(4.6,4.6); \draw[line width=.5pt, color=black,->,>=stealth] plot [smooth, tension=0.6] coordinates {(1.65,.45)(3.4,3.2)(1.5,2)(1,1.5)(.5,2)(2.8,3.6)(0,3.6)(-3.2,3.6)(-2.6,2) (-2.2,.9)(-1.8,2)(-.5,2)(.3,1)(0,.4)(1.4,.4)}; \end{scope} \end{tikzpicture} \caption{A ciliated vertex and a ciliated face in a directed ribbon graph} \label{fig:ciliated} \end{figure} Examples of faces and partial faces are shown in Figure \ref{fig:ciliated} . Each face defines a cyclic ordering of the edges and their inverses in the face. A ciliated face is a face together with the choice of a starting vertex and induces a linear ordering of these edges. These orderings are taken counterclockwise, as shown in Figure \ref{fig:ciliated}. A graph $\Gamma$ embedded into an oriented surface $\Sigma$ inherits a cyclic ordering of the edge ends at each vertex from the orientation of $\Sigma$ and hence a ribbon graph structure. Conversely, every ribbon graph $\Gamma$ defines a compact oriented surface $\Sigma_\Gamma$ that is obtained by attaching a disc at each face. For a graph $\Gamma$ embedded into an oriented surface $\Sigma$, the surfaces $\Sigma_\Gamma$ and $\Sigma$ are homeomorphic if and only if $\Sigma\setminus\Gamma$ is a disjoint union of discs. An oriented surface with a boundary is obtained from a ribbon graph $\Gamma$ by attaching annuli instead of discs to some of its faces. For details on this topic see~\cite[Chapter 1.3]{LZ}. \subsection{Thickening of a ribbon graph} The cyclic ordering of the edge ends at each vertex allows one to thicken the edges of the graph to strips and its vertices to polygons, as shown in Figure \ref{figure:Thickening}. The resulting graph inherits a ribbon graph structure from $\Gamma$. \begin{definition} \label{def:sites} The \emph{thickening} of a ribbon graph $\Gamma$ with edge set $E$ is the ribbon graph $D(\Gamma)$ in which each edge $e\in E$ of $\Gamma$ is replaced by four edges $e^{s},e^{t},e^{L}, e^{R}$, as shown in Figure \ref{figure:Thickening}. Vertices of $D(\Gamma)$ are called \emph{sites}. \end{definition} Every site is associated to a vertex and a face of $\Gamma$, see Figure~\ref{figure:Thickening}. We call two such sites $s,t$ \emph{disjoint}, if neither the vertices nor the faces of $\Gamma$ associated to $s$ and $t$ coincide. \begin{figure}[H] \centering \begin{tikzpicture}[scale=1, baseline=(current bounding box.center)] \begin{scope}[shift={(0,0)}] \draw[color=black, line width=1,fill=black] (0,0) circle (.15); \draw[color=black, line width=1,fill=black] (2,0) circle (.15); \draw[color=red, line width=2.5,->,>=stealth] (-1,1) -- (-.1,.1); \draw[color=blue, line width=2.5,->,>=stealth] (-1,-1) -- (-.1,-.1); \draw[color=violet, line width=2.5,->,>=stealth] (0.15,0) -- node[above]{$e$} (1.85,0); \draw[color=green, line width=2.5,->,>=stealth] (2.1,.1) -- (3,1); \draw[color=cyan, line width=2.5,->,>=stealth] (2.1,-.1) -- (3,-1); \end{scope} \draw[color=black, line width=1.5pt,->,>=stealth] (4,0) -- (6,0); \begin{scope}[shift={(8,0)}] \draw[color=red, line width=2.5,->,>=stealth] (-.8,0.05) -- (-.05,.45); \draw[color=red, line width=2.5,->,>=stealth] (-1,1.5) -- (-.05,.45); \draw[color=red, line width=2.5,->,>=stealth] (-1.85,1) -- (-.9,.05); \draw[color=blue, line width=2.5,->,>=stealth] (-.05,-.45) -- (-.8,-.05); \draw[color=blue, line width=2.5,->,>=stealth] (-1,-1.5) -- (-.05,-.55); \draw[color=blue, line width=2.5,->,>=stealth] (-1.85,-1) -- (-.9,-.05); \draw[color=violet, line width=2.5,->,>=stealth] (0,-.4) --node[right]{$e^{s}$} (0,.4); \draw[color=violet, line width=2.5,->,>=stealth] (3,-.4) --node[left]{$e^{t}$} (3,.4); \draw[color=violet, line width=2.5,->,>=stealth] (.1,.5) --node[above]{$e^{L}$} (2.9,.5); \draw[color=violet, line width=2.5,->,>=stealth] (.1,-.5) --node[above]{$e^{R}$} (2.9,-.5); \draw[color=green, line width=2.5,->,>=stealth] (3.8,.05) -- (3.05,.45); \draw[color=green, line width=2.5,->,>=stealth] (3.05,.55) -- (4,1.5); \draw[color=green, line width=2.5,->,>=stealth] (3.9,.05) -- (4.85,1); \draw[color=cyan, line width=2.5,->,>=stealth] (3.05,-.45) -- (3.8,-.05); \draw[color=cyan, line width=2.5,->,>=stealth] (3.05,-.55) -- (4,-1.5); \draw[color=cyan, line width=2.5,->,>=stealth] (3.9,-.05) -- (4.85,-1); \draw[color=black, line width=1,fill=black] (0,.5) circle (.1); \draw[color=black, line width=1,fill=black] (0,-.5) circle (.1); \draw[color=black, line width=1,fill=black] (-.85,0) circle (.1); \draw[color=black, line width=1,fill=black] (3,.5) circle (.1); \draw[color=black, line width=1,fill=black] (3,-.5) circle (.1); \draw[color=black, line width=1,fill=black] (3.85,0) circle (.1); \end{scope} \end{tikzpicture} \caption{Thickening of a ribbon graph.} \label{figure:Thickening} \end{figure} In the following, we often consider various paths in the thickening $D(\Gamma)$, that is, morphisms in the associated path groupoid. We write $\rho: u\to v$ for a path $\rho$ in $D(\Gamma)$ from a site $u$ to a site $v$. We often draw paths in $D(\Gamma)$ in the graph $\Gamma$ for better legibility. In this case, $e^{\pm L}$ and $e^{\pm R}$ are drawn slightly to the left and right of an edge $e\in E$, viewed in the direction of its orientation, and $e^{\pm s}, e^{\pm t}$ cross $e$ at the starting and target end, as shown in Figure \ref{fig:paths}. We also consider the starting point and end point of paths in $D(\Gamma)$ relative to its sites. We say that a path ends to the left or right of a site, when it ends to the left or right of the site viewed in the direction from the adjacent vertex of $\Gamma$ to the adjacent face. This translates into the following condition involving the four edges in each rectangle. \begin{definition}\label{def:leftorright} Let $u,v$ be sites and $\rho=\rho_1^{\eta_1}\circ \ldots\circ \rho_n^{\eta_n}: u\to v$ the reduced expression for a path $\rho$ in $D(\Gamma)$ with $\rho_i\in E$ and $\eta_i\in \{\pm s,\pm t,\pm L,\pm R\}$. We say that $\rho$ \begin{itemize} \item \emph{ends to the left} of $v$, denoted $\rho: u\to v^L$, if $\eta_1\in\{-s,t,-R,L\}$, \item \emph{ends to the right} of $v$, denoted $\rho: u\to v^R$, if $\eta_1\in\{s,-t,R,-L\}$, \item \emph{starts to the left} of $u$, denoted $\rho: u^L\to v$, if $\eta_n\in\{s,-t,R,-L\}$, \item \emph{starts to the right} of $u$, denoted $\rho: u^R\to v$, if $\eta_n\in\{-s,t,-R,L\}$. \end{itemize} We call a path $\rho:u^{L}\to v^{R}$ a \emph{left-right path} and similarly use the terms \emph{left-left, right-left} and \emph{right-right path} (cf. Figure~\ref{fig:paths}). \end{definition} In the following, we often consider certain distinguished paths in thickened ribbon graphs known as \emph{ribbon paths}. These paths were considered in many publications concerned with Kitaev models such as \cite{Ki,BD, Me} to define vertex and face operators and ribbon operators. \begin{definition}\label{def:ribbon path} Let $\Gamma$ be a ribbon graph with thickening $D(\Gamma)$. \begin{compactenum} \item A \emph{ribbon path} is a path $\rho$ in $D(\Gamma)$ such that \begin{compactenum} \item every edge of $D(\Gamma)$ occurs at most once in the reduced form of $\gamma$. \item if the reduced form of $\gamma$ contains one of the edges $e^{\pm L}$ or $e^{\pm R}$ for an edge $e\in E$, then it does not contain the edges $e^{\pm s}$ and $e^{\pm t}$. \end{compactenum} \item The \emph{vertex path of the site $u$} is the closed ribbon path $v:u\to u$ in $D(\Gamma)$ whose reduced form only contains edges of the form $e^{- s}, e^{ t}$. \item The \emph{face path of the site $u$} is the closed ribbon path $f:u\to u$ in $D(\Gamma)$ whose reduced form only contains edges of the form $e^{- L}, e^{ R}$. \end{compactenum} \end{definition} Examples of ribbon paths are shown in Figure \ref{fig:paths}. As a consequence of the first property there are two types of ribbon paths with opposite orientation. A \emph{right-left} ribbon path traverses only edges of the form $e^{L}, e^{-R}, e^{-s}, e^{t}$ while a \emph{left-right} ribbon path only traverses edges of the form $e^{-L}, e^{R}, e^{s}, e^{-t}$, as shown in Figure \ref{fig:paths}. Note that vertex paths are right-left paths and face paths are left-right paths by definition. \begin{figure} \centering \def\svgscale{.4} \input{paths2.pdf_tex} \caption{Examples of simple paths, drawn in a ribbon graph:\\ $\bullet$ $\nu$ is a right-left vertex path, \\ $\bullet$ $\rho$ is a right-left ribbon path,\\ $\bullet$ $\sigma$ is a left-right face path, \\ $\bullet$ $\omega$ is a left-right ribbon path, \\ $\bullet$ $\tau$ is a right-right simple path, but not a ribbon path.\\ Only the paths $\tau$ and $\omega$ cross, and they form a left joint $(\tau,\omega)_{\prec}$. } \label{fig:paths} \end{figure} To describe the relative position of paths in $D(\Gamma)$, it is convenient to introduce a shorthand notation for the paths around the thickened edge $e$ in Figure \ref{figure:Thickening}. We set \begin{align}\label{eq:eddef} &e^{d}= e^{t}\circ e^{R}, & &e^{\overline{d}} = e^{-t}\circ e^{L} & &e^{d'}= e^{L}\circ e^{s} & &e^{\overline{d'}} = e^{R}\circ e^{-s}. \end{align} \begin{definition} \label{def:cross}Let $\rho_1,\rho_2$ be paths in $D(\Gamma)$. We say that $\rho_1$ and $\rho_2$ \begin{itemize} \item \emph{do not cross}, denoted $(\rho_{1}, \rho_{2})_{\emptyset}$, if for any edge $e \in E$ traversed by both $\rho_{1}$ and $\rho_{2}$ the following condition is fulfilled: If $\rho_{1}$ traverses $e^{\pm s}$, then $\rho_{2}$ only traverses $e^{\pm t}$ and if $\rho_{1}$ traverses $e^{\pm L}$ then $\rho_{2}$ only traverses $e^{\pm R}$ and vice versa. \item form a \emph{left joint}, denoted $(\rho_{1}, \rho_{2})_{\prec}$, if there are (possibly empty) paths $\rho, \rho_{1}', \rho_{2}'$ and an edge $e$ such that $\rho$ is a ribbon path, $\rho, \rho_{1}',\rho_{2}'$ do not cross and the reduced expressions for $\rho_{1},\rho_{2}$ are \begin{align*} \rho_{1} = \rho \circ e^{a} \circ \rho_{1}' \text{ and } \rho_{2} = \rho\circ e^{b} \circ \rho_{2'} \end{align*} with either $a\in \left\{ \pm L, \pm R, \pm d, \pm \overline{d}, \pm d', \pm \overline{d'} \right\}$, $b\in \left\{ \pm s, \pm t \right\}$ \\ $\qquad$ or $a\in \left\{ \pm L, \pm R \right\}$, $b\in \left\{ \pm s, \pm t, \pm d, \pm \overline{d},\pm d', \pm \overline{d'} \right\}$. \item form a \emph{right joint}, denoted $(\rho_{1}, \rho_{2})_{\succ}$, if $\left( \rho_{1}^{-1}, \rho_{2}^{-1} \right)_{\prec}$. \item form a \emph{middle joint}, denoted $\left( \rho_{1},\rho_{2} \right)_{\succ\prec}$, if there are decompositions $\rho_{1}= \rho_{1}'\circ\rho_{1}''$ and $\rho_{2}=\rho_{2}'\circ\rho_{2}''$ in reduced form such that either $(\rho_{1}',\rho_{2}')_{\succ}$ and $(\rho_{1}'',\rho_{2}'')_{\prec}$ or $(\rho_{2}',\rho_{1}')_{\succ}$ and $(\rho_{2}'',\rho_{1}'')_{\prec}$. \end{itemize} \end{definition} Examples of paths that do not cross and paths that form a left joint are shown in Figure \ref{fig:paths}. We also consider slight generalizations of ribbon paths. These are paths that are allowed to traverse two adjacent sides of the rectangle in $D(\Gamma)$ associated with an edge $e\in E$ in Figure \ref{figure:Thickening}. An example of such a path is shown in Figure \ref{fig:paths}. \begin{definition} \label{def:simplepath} A reduced path $\rho$ in $D(\Gamma)$ is called \emph{simple}, if for each decomposition $\rho=\rho_1\circ \rho_2$ with reduced paths $\rho_1,\rho_2$ one has either $(\rho_1,\rho_2)_\emptyset$ or $\rho_1=\rho'_1\circ x$ and $\rho_s=y\circ \rho'_2$ with $x\circ y\in \{e^{\pm d}, e^{\pm \bar d}, e^{\pm d'}, e^{\pm \bar d'}\}$ for an edge $e\in E$, $(\rho'_1,\rho'_2)_\emptyset$, $(x\circ y, \rho'_1)_\emptyset$ and $(x\circ y, \rho'_2)_\emptyset$. \end{definition} \section{Kitaev models} \label{section:KitaevModel} Kitaev lattice models were introduced in \cite{Ki} as a realistic model of a topological quantum computer. The models in \cite{Ki} were based on group algebras of finite groups and were later generalized in \cite{BMCA,BCKA} to finite-dimensional semisimple Hopf-${*}$ algebras. They were related to Levin-Wen models in \cite{BA, Kr} and interpreted as a Hopf algebra gauge theory in \cite{Me}. In \cite{BK12} it was shown that these models are closely related to topological quantum field theories of Turaev-Viro type \cite{TV}. More specifically, the \emph{protected space} of the Kitaev model for a finite-dimensional semisimple Hopf algebra $H$ on an oriented surface $\Sigma$ is the vector space a Turaev-Viro TQFT for the spherical fusion category $H\text{-Mod}$ assigns to $\Sigma$. We summarize the Kitaev models without defects and boundaries from \cite{BMCA}, but with some minor differences in conventions and in a formulation that prepares their generalization to Kitaev models with topological defects and boundaries in the following sections. The Kitaev model requires a ribbon graph $\Gamma=(V,E)$ and a finite-dimensional, semisimple Hopf algebra $H$. In its most basic form the model then features: \begin{itemize} \item The \emph{extended space} $\HSpace = H^{ \oo E}= \tensor_{e\in E}H$, i.e. the $|E|$-fold tensor product of $H$ with itself. Here we associate one copy of $H$ to every edge $e\in E$. \item \emph{Vertex operators} $A^{h}_{s}$ and \emph{face operators} $B^{\alpha}_{s}:H^{ \oo E}\to H^{ \oo E}$ for every site $s$ of $\Gamma$ and $h\in H, \alpha\in H^{*}$. As explained in Theorem \ref{ht:siterep} below, they define representations of the Drinfel'd double $D(H)$ on $H^{ \oo E}$. \item The \emph{protected space} $$\mathcal{L}= \left\{ m \in H^{ \oo E} \;|\; A^{h}_{s}m = \varepsilon(h) m, B^{\alpha}_{s}m = \alpha(1) m \text{ for all sites }s,h\in H,\alpha\in H^{*} \right\}$$ of invariants of the face and vertex operators. \end{itemize} To consider models with excitations and to investigate the transport and braiding of excitations one also requires a set of operators on the extended space $\HSpace$ called \emph{ribbon operators} in \cite{Ki,BD} and \emph{holonomies} in \cite{Me} and assigned to certain paths in $D(\Gamma)$: \begin{itemize} \item Holonomies $\Hol_{\rho}^{h \oo \alpha}\in \End_\C( \HSpace)$ for every path $\rho$ in $D(\Gamma)$ and $h \oo \alpha\in H \oo H^{*}$. \end{itemize} All linear endomorphisms on $\HSpace$ are composites of four operators, called \emph{triangle operators} in \cite{BD}, that are assigned to each edge of $D(\Gamma)$ and viewed as the holonomies of the associated paths $e^t,e^s, e^L,e^R$. Hence, every edge $e\in E$ is associated with four triangle operators. The operators are linear maps $\HSpace\to\HSpace$ which only act on the tensor factor associated to $e$. \begin{definition} \label{definition:HolonomyBasic} Let $e$ be an edge, $\eta\in \left\{ \pm t,\pm s,\pm R,\pm L \right\}$ and $h,m \in H, \alpha\in H^{*}$. The \emph{triangle operators} for $e$ are the following maps $\Hol_{e^{\eta}}^{h \oo \alpha} :\HSpace\to \HSpace$ which only act on the tensor factor associated to $e$: \begin{align} \Hol_{e^{s}}^{h \oo \alpha} (m) &= \alpha(1) \cdot mh, & \Hol_{e^{t}}^{h \oo \alpha} (m) &=\alpha(1) \cdot hm \label{eq:HolonomyEndsStandard} \\ \Hol_{e^{R}}^{h \oo \alpha} (m) &= \varepsilon(h) \langle \alpha , m_{(2)} \rangle m_{(1)}, & \Hol_{e^{L}}^{h \oo \alpha} (m) &= \varepsilon(h)\langle \alpha , m_{(1)} \rangle m_{(2)} \label{eq:HolonomySidesStandard} \\ \Hol_{e^{\eta}}^{h \oo \alpha} (m) &= \Hol_{e^{-\eta}}^{ S_{D(H)^{*}}(h \oo \alpha) } (m) & &\text{for $\eta\in \left\{ -s,-t,-R,-L \right\}$.} \label{eq:HolonomyOpposites} \end{align} Here $m\in H \subset \HSpace$ is the element in the tensor factor associated to $e$ and we extend linearly to $\HSpace$. \end{definition} A direct computation shows that for the element $m\in H$ associated to the edge $e$ we have \begin{align*} \Hol_{e^t}^{h\oo\varepsilon}\circ\Hol_{e^R}^{1\oo\alpha} (m)= \langle \alpha , m_{(2)} \rangle hm_{(1)}, \end{align*} i.e. the edge operators $\Hol_{e^t}^{h\oo\alpha}$ and $\Hol_{e^R}^{h\oo\alpha}$ generate an algebra isomorphic to the Heisenberg double $\mathcal H_R(H)$ and they act on the copy of $H$ associated to the edge $e$ by the action from~\eqref{eq:HeisenbergAction}. By Remark \ref{rem:heisenbergdoubleendo}, this implies that the edge operators for all edges $e\in E$ generate an algebra isomorphic to $\End_\C(\HSpace)$. Holonomies along paths in $D(\Gamma)$ are constructed from these basic building blocks by applying the comultiplication of $D(H)^*$ to their argument $h\oo\alpha$ and composing these maps as follows: \begin{definition} \label{definition:HolonomyComposite} Let $\rho=\rho_{1}\circ\rho_{2}$ a simple path in reduced form with $\rho_{2}:s \to t$ and $\rho_{1}:t \to u$.Write $\beta:=h \oo \alpha\in D(H)^{*}$ and $\Delta_{D(H)^{*}}(\beta)=\beta_{(1)} \oo \beta_{(2)}$. Then we define recursively \begin{align} &\Hol_{\rho}^{\beta} = \Hol_{\rho_{1}}^{\beta_{(1)}}\circ \Hol_{\rho_{2}}^{\beta_{(2)}} \quad &\text{if $(\rho_{1},\rho_{2})_{\emptyset}$ or $(\rho_{2},\rho_{1}^{-1})_{\prec}$} \label{eq:HolonomyRecursionStandard} \\ &\Hol_{\rho}^{\beta} = \Hol_{\rho_{2}}^{\beta_{(2)}}\circ \Hol_{\rho_{1}}^{\beta_{(1)}} &\text{if $(\rho_{1}^{-1},\rho_{2})_{\prec}$} \label{eq:HolonomyRecursionLeftJoin} \end{align} \end{definition} \begin{lemma} \label{lemma:HolonomyWellDefined} For a simple path $\rho$, the definition of $\Hol_{\rho}^{\beta} $ does not depend on the decomposition chosen in~\ref{definition:HolonomyComposite}. \end{lemma} \begin{proof} Let $\rho=\rho_{1}\circ\rho_{2}\circ\rho_{3}$ in reduced form. First let $(\rho_{1}^{-1},\rho_{2})_{\prec}$ and $(\rho_{2}^{-1},\rho_{3})_{\prec}$. Then we also have $(\rho_{1}^{-1},\rho_{2}\circ\rho_{3})_{\prec}$ and $((\rho_{1}\circ\rho_{2})^{-1},\rho_{3})_{\prec}$. Then \begin{align*} \Hol_{\rho_{1}\circ (\rho_{2}\circ\rho_{3})}^{ \beta} &= \Hol_{\rho_{2}\circ\rho_{3}}^{ \beta_{(2)}} \circ \Hol_{\rho_{1}}^{ \beta_{(1)}} = \Hol_{\rho_{3}}^{ \beta_{(3)}} \circ \Hol_{\rho_{2}}^{\beta} \circ\Hol_{\rho_{1}}^{ \beta_{(1)}} = \Hol_{\rho_{3}}^{\beta_{(2)}} \circ \Hol_{\rho_{1}\circ\rho_{2}}^{ \beta_{(1)}} =\Hol_{(\rho_{1}\circ \rho_{2})\circ\rho_{3}}^{ \beta} \end{align*} Here we see that applying the definition for the two decompositions $\rho_{1}\circ\left(\rho_{2}\circ\rho_{3} \right)$ and $\left( \rho_{1}\circ\rho_{2} \right)\circ\rho_{3}$ leads to the same result. This can be proven analogously for the other cases. \end{proof} We also consider symmetries on the extended space $\HSpace$, given by the vertex and face operators of a Kitaev model. These are obtained as holonomies along special paths, namely the vertex and face paths from Definition \ref{def:ribbon path} and Figure \ref{fig:paths}. \begin{definition} \label{definition:VertexFaceOperators} Let $s$ be a site with vertex path $v$ and face path $f$ and let $h\in H,\alpha\in H^{*}$. The vertex operator at $s$ is the map \begin{align} A_{s}^{h}:\, &\HSpace \to \HSpace, A_{s}^{h} = \Hol_{v}^{h \oo \varepsilon} \label{eq:VertexOperatorStandard} \end{align} The face operator at $s$ is the map \begin{align} B_{s}^{\alpha}:\, &\HSpace \to \HSpace, B_{s}^{\alpha} = \Hol_{f}^{1 \oo \alpha} \label{eq:FaceOperatorStandard} \end{align} \end{definition} It was shown in \cite{BMCA} that the commutation relation between the vertex and face operators associated with a given site form a representation of the Drinfel'd double $D(H)$ and that the $D(H)$-actions at disjoint sites commute: \begin{theorem}\label{ht:siterep}\cite{Ki,BMCA} Let $s$ be a site. Then \begin{align} D(H) \oo \HSpace &\to \HSpace\nonumber\\ (\alpha \oo h) \oo m &\mapsto B^{\alpha}_{s}A^{h}_{s}(m) \label{eq:Drinfel'dActionHSpace} \end{align} defines an action of $D(H)$ on $\HSpace$. The actions of $D(H)$ for disjoint sites commute. \label{theorem:Drinfel'dActionHSpace} \end{theorem} The invariants of these actions are of special interest, as the \emph{protected space} \begin{align} \mathcal{L} := \left\{ m \in H^{ \oo E} \;|\; B^{\alpha}_{s}A^{h}_{s}m = \alpha(1) \varepsilon(h) \text{ for all sites }s,\alpha \oo h\in D(H) \right\}. \label{eq:ProtectedSpace} \end{align} is a topological invariant, as shown in~\cite{Ki,BMCA}. For a given site $s$, the map \begin{align*} P_{s}:=B^{\smallint}_{s}A^{\lambda}_{s}:\HSpace\to\HSpace \end{align*} is a projector onto these invariants \cite{Ki,BMCA}. Here $\smallint \in H^{*}, \lambda\in H$ are the Haar integrals of $H^{*}$ and $H$. Multiplying the $P_{s}$ for all sites $s$ we obtain a projector onto the protected space: \begin{theorem}\cite{BMCA,Me} For any two sites $s,t$, the projectors $P_{s}$ and $P_{t}$ commute. The map \begin{align} P:= \prod_{s\text{ site}} P_{s}:\HSpace \to \mathcal{L} \label{eq:ProjectorProtected} \end{align} is a projector onto the protected space. \label{theorem:ProjectorProtected} \end{theorem} \subsection{Excitations} Recall that $H$ semi-simple implies that $D(H)$ also is semi-simple and that vertex and face operators at each site $s$ define a $D(H)$-module structure on $\HSpace$. This allows us to decompose the extended space into its isotypic components \begin{align} \HSpace= \bigoplus_{i \in \mathrm{Irr}(D(H))} \HSpace(s,i) \label{eq:HilbertSpaceDecomposition} \end{align} where $\mathrm{Irr}(D(H))$ is a set of representatives of irreducible $D(H)$-modules. For non-simple $D(H)$-modules $M$ we define the \emph{excitation of type $M$ at $s$} as \begin{align} \HSpace(s,M):=\bigoplus_{i \in \mathrm{Irr}(D(H)), M_{i}\neq 0} \HSpace(s,i)\subset \HSpace, \label{eq:SingleExcitationSpace} \end{align} where $M_i$ denotes the isotypic component of $M$ of type $i$. The \emph{trivial excitation} is the excitation corresponding to the trivial $D(H)$-representation. Finally, let $s_{1},\dots,s_{n}$ be $n$ disjoint sites and let $M_{1},\dots,M_{n}$ be $D(H)$-modules. We define \begin{align} \label{eq:MultipleExcitationSpace} \HSpace(s_{1},M_{1},\dots,s_{n},M_{n}):= \bigcap_{k=1}^{n} \HSpace(s_{k},M_{k}) \end{align} This space is closely related to the \emph{space of $n$-particle states} in~\cite{Ki} for disjoint sites $s_1,...,s_n$ \begin{align} \mathcal{L}(s_{1},\dots,s_{n})= \big\{ m\in \HSpace \;|\; &A_{s}^{h}m = \varepsilon(h) m, B^{\alpha}_{s}m = \alpha(1) m \text{ for } h\in H,\alpha\in H^{*},\nonumber\\ &s \text{ site disjoint to $s_{1},\dots,s_{n}$}\big\}. \label{eq:SpaceParticleStates} \end{align} Both $\HSpace(s_{1},M_{1},\dots,s_{n},M_{n})$ and $\mathcal{L}(s_{1},\dots,s_{n})$ have a $D(H)^{\otimes n}$-module structure from the face and vertex operators at the sites $s_{1},\dots,s_{n}$. There are two differences between these subspaces: \begin{compactenum}[(i)] \item Elements $l\in\mathcal{L}(s_{1},\dots,s_{n}) $ are invariants of the $D(H)$-action defined by sites disjoint to $s_{1},\dots,s_{n}$, while elements $m \in \HSpace(s_{1},M_{1},\dots,s_{n},M_{n}) $ need not be invariant. \item The isotypic decomposition of $\HSpace(s_{1},M_{1},\dots,s_{n},M_{n})$ for the $D(H)$-action defined by a site $s_{i}$ only contains the same irreducible representations as the $D(H)$-module $M_{i}$, possibly with different multiplicities. The isotypic decomposition of $\mathcal{L}(s_{1},\dots,s_{n})$ may contain arbitrary irreducible representations. \end{compactenum} In the following we will be interested in excitations of a type $M$ at a site $s$ and thus will only consider spaces of the form $\HSpace(s_{1},M_{1},\dots,s_{n},M_{n})$. \subsection{Holonomies and excitations} \label{subsection:HolonomiesExcitationClassic} We now explain how to generate excitations of a specific type $i\in \mathrm{Irr}(D(H))$ at a site $s$. Our goal is to obtain linear endomorphisms of the extended space $\HSpace$, which restrict to maps $\mathcal{L} \to \HSpace(s,i)$. Examples for such linear endomorphisms are given by \emph{holonomies}. The commutation relation for the holonomies of two paths $\rho_{1}, \rho_{2}$ depends on the relative constellation of the paths. The following technical lemma describes the commutation relation for several constellations. We will use this lemma in the remainder of the section to describe how holonomies generate excitations. The first four identities generalize the relations~(B17),(B18),(B23) from \cite{BD}. \begin{lemma} \label{lemma:HolonomyCommutators} Let $\alpha,\beta\in D(H)^{*}$, $\rho,\rho_{1},\rho_{2},\gamma,\gamma_{1},\gamma_{2}$ be simple paths such that $\rho_{1},\rho_{2}$ end to the right of a site and $\gamma_{1},\gamma_{2}$ start to the left of a site. Denote $R= \varepsilon \oo x \oo X \oo 1\in D(H)\oo D(H)$ the $R$-matrix of $D(H)$. Then we have \begin{align} \label{eq:NonOverlap} (\rho,\gamma)_{\emptyset} \Rightarrow &\Hol_{\rho_{1}}^{\alpha} \Hol_{\rho_{2}}^{\beta} = \Hol_{\rho_{2}}^{\beta} \Hol_{\rho_{1}}^{\alpha} \\ \label{eq:HolonomyReversal} &\Hol_{\rho^{-1}}^{ \alpha} = \Hol_{\rho}^{ S(\alpha)} \\ \label{eq:LeftRightHolonomy} \text{$\rho$ left-right path} \Rightarrow &\Hol_{\rho}^{\alpha}\Hol_{\rho}^{\beta} =\Hol_{\rho}^{\alpha\beta} \\ \label{eq:RightLeftHolonomy} \text{$\rho$ right-left path} \Rightarrow &\Hol_{\rho}^{\alpha}\Hol_{\rho}^{\beta} =\Hol_{\rho}^{\beta\alpha} \\ \label{eq:LeftLeftHolonomy} \text{$\rho$ left-left path} \Rightarrow &\Hol_{\rho}^{\alpha}\Hol_{\rho}^{\beta} = \Hol_{\rho}^{\left( R^{(2)} \vartriangleright \beta \right) \cdot \left( R^{(1)} \vartriangleright \alpha \right)} = \Hol_{\rho}^{ \alpha \cdot_{\mathcal{H}_{R}(H)} \beta} \\ \label{eq:RightRightHolonomy} \text{$\rho$ right-right path} \Rightarrow &\Hol_{\rho}^{\alpha}\Hol_{\rho}^{\beta} =\Hol_{\rho}^{\left( R^{(-1)} \vartriangleright \alpha \right) \cdot \left( R^{(-2)} \vartriangleright \beta \right)} = \Hol_{\rho}^{ \alpha \cdot_{\overline{\mathcal{H}}_{R}(H)} \beta} \\ \label{eq:LeftJoint} (\rho_{1},\rho_{2})_{\prec} \Rightarrow &\Hol_{\rho_{1}}^{\alpha} \Hol_{\rho_{2}}^{\beta} =\Hol_{\rho_{2}}^{\beta \vartriangleleft R^{(-2)}} \Hol_{\rho_{1}}^{\alpha \vartriangleleft R^{(-1)}} \\ \label{eq:RightJoint} (\gamma_{1},\gamma_{2})_{\succ} \Rightarrow &\Hol_{\gamma_{1}}^{\alpha} \Hol_{\gamma_{2}}^{ \beta} = \Hol_{\gamma_{2}}^{R^{(2)} \vartriangleright \beta} \Hol_{\gamma_{1}}^{R^{(1)} \vartriangleright \alpha} \\ \label{eq:MiddleJoint} (\rho,\gamma)_{\succ\prec} \Rightarrow &\Hol_{\rho_{1}}^{\alpha} \Hol_{\rho_{2}}^{\beta} = \Hol_{\rho_{2}}^{\beta} \Hol_{\rho_{1}}^{\alpha} \end{align} \end{lemma} \begin{proof} Proof of~\eqref{eq:NonOverlap}. For an edge $e$, the triangle operators for $e^{\pm s}$ commute with those for $e^{\pm t}$, see Definition~\ref{definition:HolonomyBasic}. The triangle operators for $e^{\pm L}$ commute with the ones for $e^{\pm R}$. Triangle operators of different edges also commute with one another, since they act on different copies of $H$. For $(\rho_{1}, \rho_{2})_{\emptyset}$, the holonomy along $\rho_{1}$ is therefore composed of triangle operators that commute with the triangle operators in the holonomy along $\rho_{2}$. \\ \\ Proof of~\eqref{eq:HolonomyReversal} by induction over $\rho$. For $\rho=e^{\nu}$ with $e$ an edge and $\nu\in \left\{ \pm s,\pm t,\pm L,\pm R \right\} $ the claim follows from~\eqref{eq:HolonomyOpposites} in Definition~\ref{definition:HolonomyBasic}. For $\rho =\rho_{1}\circ\rho_{2}$ as in Definition~\ref{definition:HolonomyComposite} we either have (i) $(\rho_{1},\rho_{2})_{\emptyset}$, (ii) $(\rho_{2},\rho_{1}^{-1})_{\prec}$, or (iii) $(\rho_{1}^{-1},\rho_{2})_{\prec}$. In case (i) we have \begin{align*} \Hol_{\rho^{-1}}^{ \alpha} &\overset{\eqref{eq:HolonomyRecursionStandard}}{=} \Hol_{\rho_{2}^{-1}}^{\alpha_{(1)}} \circ \Hol_{\rho_{1}^{-1}}^{ \alpha_{(2)}} \overset{\mathrm{(I)}}{=} \Hol_{\rho_{2}}^{S(\alpha_{(1)})} \circ \Hol_{\rho_{1}}^{S( \alpha_{(2)})} =\Hol_{\rho_{2}}^{S(\alpha)_{(2)}} \circ \Hol_{\rho_{1}}^{S( \alpha)_{(1)}} \\ &\overset{\eqref{eq:NonOverlap}}{=} \Hol_{\rho_{1}}^{S( \alpha)_{(1)}}\circ \Hol_{\rho_{2}}^{S(\alpha)_{(2)}} \overset{\eqref{eq:HolonomyRecursionStandard}}{=} \Hol_{\rho}^{ S(\alpha)} \end{align*} Here we have used induction in step $\mathrm{(I)}$. For case (ii) note that we need to apply the first case~\eqref{eq:HolonomyRecursionStandard} of the recursive Definition~\ref{definition:HolonomyComposite} for $\Hol_{\rho}$, but the second case~\eqref{eq:HolonomyRecursionLeftJoin} for $\Hol_{\rho^{-1}}$. We then obtain \begin{align*} \Hol_{\rho^{-1}}^{ \alpha} &\overset{\eqref{eq:HolonomyRecursionLeftJoin}}{=} \Hol_{\rho_{1}^{-1}}^{\alpha_{(2)}} \circ \Hol_{\rho_{2}^{-1}}^{ \alpha_{(1)}} \overset{\mathrm{(I)}}{=} \Hol_{\rho_{1}}^{S(\alpha_{(2)})} \circ \Hol_{\rho_{1}}^{S( \alpha_{(1)})} =\Hol_{\rho_{1}}^{S(\alpha)_{(2)}} \circ \Hol_{\rho_{2}}^{S( \alpha)_{(2)}} \overset{\eqref{eq:HolonomyRecursionStandard}}{=} \Hol_{\rho}^{ S(\alpha)} \end{align*} Here we again used induction in step $\mathrm{(I)}$. The third case follows analogously to the second case. \\ \\ Proof of \eqref{eq:LeftRightHolonomy} to \eqref{eq:RightRightHolonomy} by induction. We have four basic cases, one for each equation: \begin{compactenum} \item $\rho$ is a left-right ribbon path. \item $\rho$ is a right-left ribbon path. \item $\rho=e^{d}$ or $e^{-d}$. \item $\rho=e^{\overline{d}}$ or $\rho=e^{-\overline{d}}$. \end{compactenum} For case~1 we write $\rho=\rho_{1}^{\nu_{1}}\circ\cdots \rho_{n}^{\nu_{n}}$ in reduced form with $\rho_{i} \in E$, $\nu_{i} \in \left\{ s,-t,R,-L \right\}$. The triangle operators $\Hol_{\rho_{i}^{\nu_{i}}}$ from~\eqref{eq:HolonomyEndsStandard} to~\eqref{eq:HolonomyOpposites} define actions of $D(H)^{*}$ on $\HSpace$. Since $\rho$ is a ribbon path, the triangle operator for $\rho_{i}^{\nu_{i}}$ commutes with the one for $\rho_{k}^{\nu_{k}}$ for $i\neq k$. Combining this with~\eqref{eq:HolonomyRecursionStandard} we conclude that $\Hol_{\rho}$ is a tensor product of the $D(H)^{*}$-actions $\Hol_{\rho_{i}^{\nu_{i}}} $, i.e. a $D(H)^{*}$-action. The proof for case~2 is analogous and uses that the operators $\Hol_{\rho_{i}^{\nu_{i}}}$ for $\nu_{i}\in \left\{ -s,t,-R,L \right\}$ from~\eqref{eq:HolonomyEndsStandard} to~\eqref{eq:HolonomyOpposites} define actions of $D(H)^{*}$ on $\HSpace$. For case~3 let $m\in H\subseteq \HSpace$ be the element associated to the edge $e$. Then we have for $\alpha=h \oo \delta\in H \oo H^{*}$: \begin{align*} \Hol_{e^{d}}^{\alpha} (m) &\overset{\eqref{eq:HolonomyRecursionStandard}}{=} \Hol_{e^{t}}^{ h_{(1)} \oo X^{1}\delta_{(1)}X^{2}} \circ \Hol_{e^{R}}^{S(x^{2})h_{(2)}x^{1} \oo \delta_{(2)}} (m) \\ &= \langle \varepsilon , S(x^{2})h_{(2)}x^{(1) } \rangle \langle \delta_{(2)} , m_{(2)} \rangle \Hol_{e^{t}}^{ h_{(1)} \oo X^{1}\delta_{(1)}X^{2}} (m_{(1)}) \\ &= \langle \delta , m_{(2)} \rangle (hm_{(1)}) \end{align*} where we extend linearly to $\HSpace$. Using this identity together with $\beta=k \oo \theta\in H \oo H^{*}$ we obtain \begin{align*} \Hol_{e^{d}}^{\alpha}\circ \Hol_{e^{d}}^{\beta} (m) &=\langle \delta_{(1)} , k_{(2)} \rangle \langle \delta_{(2)} \theta ,m_{(2)} \rangle hk_{(1)}m_{(1)} = \langle \delta_{(1)} , k_{(2)} \rangle \Hol_{e^{d}}^{hk_{(1)} \oo \delta_{(2)}\theta} (m) \\ &\overset{\eqref{eq:RightHeisenbergDoubleRTwist}}{=} \Hol_{e^{d}}^{\left( R^{(2)} \vartriangleright\beta \right)\cdot \left( R^{(1)} \vartriangleright\alpha \right)}(m) \end{align*} The proof is analogous for $e^{-d}$, $ e^{\pm d'}$ and~case 4. \\ Now let $\rho$ be a simple left-right path that is not a ribbon path. \begin{figure}[H] \begin{center} \begin{tikzpicture}[scale=.7] \draw[line width=1pt, color=black, ->, >=stealth] (-3,0)--(0,0); \draw[line width=1pt, color=black, ] (3,0)--(0,0); \draw[line width=1pt, color=black, ->, >=stealth] (-3,4)--(0,4); \draw[line width=1pt, color=black, ] (3,4)--(0,4); \draw[line width=1pt, color=black, ->, >=stealth] (-3,0)--(-3,2); \draw[line width=1pt, color=black, ] (-3,4)--(-3,2); \draw[line width=1pt, color=black, ] (3,0)--(3,2); \draw[line width=1pt, color=black, ->, >=stealth ] (3,4)--(3,2); \draw[color=black, fill=black] (-3,0) circle (.2); \draw[color=black, fill=black] (3,0) circle (.2); \draw[color=black, fill=black] (-3,4) circle (.2); \draw[color=black, fill=black] (3,4) circle (.2); \draw[line width=.5, color=blue] (-2.5,-.5)--(-2.5,.5); \draw[line width=.5, color=blue, ->, >=stealth] (-2.5,.5)--(0,.5) node[anchor=south]{$\rho_2$}; \draw[line width=.5, color=blue, ->, >=stealth] (0,.5)--(2.3,.5); \draw[line width=.5, color=blue, ->, >=stealth] (2.5,.5)--(2.5, 2); \draw[line width=.5, color=blue,] (2.5,2)--(2.5,4.5); \draw[line width=.5, color=blue, ->, >=stealth] (2.5,4.5)--(0, 4.5) node[anchor=south]{$\rho_1$}; \draw[line width=.5, color=blue, ->, >=stealth] (0,4.5)--(-2.5, 4.5); \end{tikzpicture} \end{center} \caption{Splitting a left-right path $\rho=\rho_1\circ \rho_2$ into a right-right path $\rho_1$ and a left-left path $\rho_2$.} \label{fig:leftrightsplit} \end{figure} Then we can write $\rho=\rho_{1}\circ\rho_{2}$ with $(\rho_{1},\rho_{2})_{\emptyset}$, $\rho_{1}$ a nonempty right-right path and $\rho_{2}$ a nonempty left-left path, as shown in Figure \ref{fig:leftrightsplit}. We compute \begin{align*} \Hol_{\rho}^{ \alpha} \circ \Hol_{\rho}^{ \beta} &\overset{\eqref{eq:HolonomyRecursionStandard}}{=} \Hol_{\rho_{1}}^{ \alpha_{(1)}} \circ \Hol_{\rho_{2}}^{ \alpha_{(2)}} \circ \Hol_{\rho_{1}}^{ \beta_{(1)}} \circ \Hol_{\rho_{2}}^{ \alpha_{(2)}} \overset{\eqref{eq:NonOverlap}}{=} \Hol_{\rho_{1}}^{ \alpha_{(1)}}\circ \Hol_{\rho_{1}}^{ \beta_{(1)}} \circ \Hol_{\rho_{2}}^{ \alpha_{(2)}} \circ \Hol_{\rho_{2}}^{ \alpha_{(2)}} \\ &\overset{\eqref{eq:RightRightHolonomy},\eqref{eq:LeftLeftHolonomy}}{=} \Hol_{\rho_{1}}^{ \alpha_{(1)} \cdot_{\overline{\mathcal{H}}_{R}(H)}\beta_{(1)}} \circ \Hol_{\rho_{2}}^{ \alpha_{(2)} \cdot_{\mathcal{H}_{R}(H)}\beta_{(2)}} \overset{\eqref{eq:ComultHeisenbergDoubleAlgebraHom},\eqref{eq:HolonomyRecursionStandard}}{=} \Hol_{\rho}^{ \alpha \cdot_{D(H)^{*}} \beta} \end{align*} For right-left paths we proceed analogously and split $\rho$ into a left-left path $\rho_{1}$ and a right-right path $\rho_{2}$. Here we use~\eqref{eq:ComultHeisenbergDoubleOppositeAlgebraHom} instead of~\eqref{eq:ComultHeisenbergDoubleAlgebraHom}. If $\rho$ is a left-left path which is not of the form $e^{d}$ or $e^{-d}$, we can write $\rho=\rho_{1}\circ\rho_{2}$ such that $(\rho_{1},\rho_{2})_{\emptyset}$ and either \begin{itemize} \item $\rho_{1}$ is a left-left path and $\rho_{2}$ is a left-right path, or \item $\rho_{1}$ is a left-right path and $\rho_{2}$ is a left-left path. \end{itemize} The calculation is again analogous, but we use Remark~\ref{remark:HeisenbergDoubleComoduleAlgebra}.\ref{remarkPoint:RightHeisenbergComoduleAlgebra} instead of~\eqref{eq:ComultHeisenbergDoubleAlgebraHom}. The proof for right-right paths follows analogously from Remark~\ref{remark:HeisenbergDoubleComoduleAlgebra}.\ref{remarkPoint:RightHeisenbergAlternativeComoduleAlgebra}. \\ \\ Proof of~\eqref{eq:LeftJoint}: Let $(\rho_{1},\rho_{2})_{\prec}$ with $\rho_{1}=\rho\circ e^{a}\circ\rho_{1}'$ and $\rho_{2}=\rho\circ e^{b}\circ\rho_{2}'$ such that $\rho$ is ribbon and ends to the right of its target. W.l.o.g. we can assume $e^{a}\in\left\{ e^{R}, e^{R}\circ e^{-s}, e^{-t}\circ e^{L} \right\}$, since we can reverse the orientation of $e$. The possible combinations of $a$ and $b$ then are: \begin{align*} (e^{a},e^{b})\in \left\{ (e^{R},e^{\overline{d}}), (e^{R}, e^{-t}), (e^{\overline{d}}, e^{-t}) \right\} \end{align*} In all of these cases, direct calculations show \begin{align} \Hol_{e^{a}}^{h \oo \alpha} \circ \Hol_{e^{b}}^{k \oo \beta} &= \langle \alpha_{(1)} , S(k_{(2)}) \rangle \Hol_{e^{b}}^{k_{(4)} \oo \beta} \Hol_{e^{a}}^{k_{(1)}hS(k_{(3)}) \oo \alpha_{(2)}} \nonumber \\ &= \Hol_{e^{b}}^{ \left( k \oo \beta \right) \vartriangleleft X \oo 1} \Hol_{e^{a}}^{ \left( h \oo \alpha \right) \vartriangleleft \varepsilon \oo S(x)} \label{eq:LeftJointBasic} \end{align} We now consider the tensor product $D(H)^{*} \oo D(H)^{*}$ of $D(H)^{*}$ as $D(H)^{*}$-left comodule with the left regular coaction from Example \ref{definition:CoregularAction}. As $R^{(1)} \oo R^{(2)}:=\varepsilon \oo x \oo X \oo 1\in D(H) \oo D(H)$ is the $R$-matrix for $D(H)$ from \eqref{eq:Rmatrix}, the braiding \begin{align*} c: D(H)^{*} \oo D(H)^{*}&\to D(H)^{*} \oo D(H)^{*}\\ \alpha \oo \beta &\mapsto \left( \beta \vartriangleleft R^{(2)} \right) \oo \left( \alpha \vartriangleleft S(R^{(1)}) \right) \end{align*} is a $D(H)^{*}$-comodule morphism. Concretely, this means that for $\alpha,\beta\in D(H)^{*}$ \begin{align} &\alpha_{(1)}\beta_{(1)} \oo \left( \beta_{(2)} \vartriangleleft R^{(2)} \right) \oo \left( \alpha_{(2)} \vartriangleleft S(R^{(1)}) \right) \nonumber \\&\;=\left(\left( \beta \vartriangleleft R^{(2)} \right)_{(1)} \cdot \left( \alpha \vartriangleleft S(R^{(1)}) \right)_{(1)}\right) \oo \left( \beta \vartriangleleft R^{(2)} \right)_{(2)} \oo \left( \alpha \vartriangleleft S(R^{(1)}) \right)_{(2)} \label{eq:BraidingDH*Comodule} \end{align} For the paths $\rho\circ e^{a}, \rho\circ e^{b}$ and $\alpha,\beta\in D(H)^{*}$ we now compute \begin{align} \Hol_{\rho \circ e^{a}}^{\alpha} \circ \Hol_{\rho\circ e^{b}}^{\beta} &\overset{\eqref{eq:HolonomyRecursionStandard}}{=} \Hol_{\rho}^{\alpha_{(1)}} \Hol_{e^{a}}^{\alpha_{(2)}} \Hol_{\rho}^{\beta_{(1)}} \Hol_{e^{b}}^{\beta_{(2)}} \overset{\eqref{eq:NonOverlap}}{=} \Hol_{\rho}^{\alpha_{(1)}}\Hol_{\rho}^{\beta_{(1)}} \Hol_{e^{a}}^{\alpha_{(2)}} \Hol_{e^{b}}^{\beta_{(2)}} \nonumber \\&\overset{\eqref{eq:LeftRightHolonomy}}{=} \Hol_{\rho}^{\alpha_{(1)}\beta_{(1)}} \Hol_{e^{a}}^{\alpha_{(2)}} \Hol_{e^{b}}^{\beta_{(2)}} \overset{\eqref{eq:LeftJointBasic}}{=} \Hol_{\rho}^{\alpha_{(1)}\beta_{(1)}} \Hol_{e^{b}}^{\beta_{(2)} \vartriangleleft R^{(2)}} \Hol_{e^{a}}^{\alpha_{(2)} \vartriangleleft S(R^{(1)})}\nonumber \\&\overset{\eqref{eq:BraidingDH*Comodule}}{=} \Hol_{\rho}^{\left( \beta \vartriangleleft R^{(2)} \right)_{(1)} \cdot \left( \alpha \vartriangleleft S(R^{(1)}) \right)_{(1)}} \Hol_{e^{b}}^{\left( \beta \vartriangleleft R^{(2)} \right)_{(2)}} \Hol_{e^{a}}^{\left( \alpha \vartriangleleft S(R^{(1)}) \right)_{(2)}}\nonumber \\&\overset{\eqref{eq:LeftRightHolonomy}}{=} \Hol_{\rho}^{\left( \beta \vartriangleleft R^{(2)} \right)_{(1)}} \Hol_{\rho}^{\left( \alpha \vartriangleleft S(R^{(1)}) \right)_{(1)}} \Hol_{e^{b}}^{\left( \beta \vartriangleleft R^{(2)} \right)_{(2)}} \Hol_{e^{a}}^{\left( \alpha \vartriangleleft S(R^{(1)}) \right)_{(2)}}\nonumber \\ & \overset{\eqref{eq:NonOverlap}}{=} \Hol_{\rho}^{\left( \beta \vartriangleleft R^{(2)} \right)_{(1)}} \Hol_{e^{b}}^{\left( \beta \vartriangleleft R^{(2)} \right)_{(2)}} \Hol_{\rho}^{\left( \alpha \vartriangleleft S(R^{(1)}) \right)_{(1)}} \Hol_{e^{a}}^{\left( \alpha \vartriangleleft S(R^{(1)}) \right)_{(2)}}\nonumber \\ &\overset{\eqref{eq:HolonomyRecursionStandard}}{=} \Hol_{\rho\circ e^{b}}^{\beta \vartriangleleft R^{(2)}} \Hol_{\rho\circ e^{a}}^{\alpha \vartriangleleft S(R^{(1)}) } \label{eq:LeftJointSecond} \end{align} In the last step we also used the following identity for $g\in D(H), \alpha\in D(H)^{*}$ \begin{align} \left( \alpha_{(1)} \vartriangleleft g \right) \oo \alpha_{(2)} = \left( \alpha \vartriangleleft g \right)_{(1)} \oo \left( \alpha \vartriangleleft g \right)_{(2)} \label{eq:RightCoactionLeftActionCommute} \end{align} Finally, we obtain for the paths $\rho\circ e^{a}\circ \rho_{1}',\rho\circ e^{b}\circ \rho_{2}'$: \begin{align*} \Hol_{\rho\circ e^{a}\circ \rho_{1}'}^{\alpha} \Hol_{\rho\circ e^{b}\circ \rho_{2}'}^{ \beta} &= \Hol_{\rho\circ e^{a}}^{\alpha_{(1)}} \Hol_{\rho_{1}'}^{\alpha_{(2)}} \Hol_{\rho\circ e^{b}}^{\beta_{(1)}} \Hol_{\rho_{2}'}^{\beta_{(2)}} \overset{\eqref{eq:NonOverlap}}{=} \Hol_{\rho\circ e^{a}}^{\alpha_{(1)}} \Hol_{\rho\circ e^{b}}^{\beta_{(1)}} \Hol_{\rho_{2}'}^{\beta_{(2)}} \Hol_{\rho_{1}'}^{\alpha_{(2)}} \\&\overset{\eqref{eq:LeftJointSecond}}{=} \Hol_{\rho\circ e^{b}}^{\beta_{(1)} \vartriangleleft R^{(2)}} \Hol_{\rho\circ e^{a}}^{\alpha_{(1)} \vartriangleleft S(R^{(1)})} \Hol_{\rho_{2}'}^{\beta_{(2)}} \Hol_{\rho_{1}'}^{\alpha_{(2)}} \\&\overset{\eqref{eq:NonOverlap}}{=} \Hol_{\rho\circ e^{b}}^{\beta_{(1)} \vartriangleleft R^{(2)}} \Hol_{\rho_{2}'}^{\beta_{(2)}} \Hol_{\rho\circ e^{a}}^{\alpha_{(1)} \vartriangleleft S(R^{(1)})} \Hol_{\rho_{1}'}^{\alpha_{(2)}} \\&\overset{\eqref{eq:RightCoactionLeftActionCommute}}{=} \Hol_{\rho\circ e^{b}}^{\left(\beta \vartriangleleft R^{(2)}\right)_{(1)}} \Hol_{\rho_{2}'}^{\left(\beta \vartriangleleft R^{(2)}\right)_{(2)}} \Hol_{\rho\circ e^{a}}^{\left(\alpha \vartriangleleft S(R^{(1)})\right)_{(1)}} \Hol_{\rho_{1}'}^{\left(\alpha \vartriangleleft S(R^{(1)})\right)_{(2)}} \\ &\overset{\eqref{eq:HolonomyRecursionStandard}}{=} \Hol_{\rho\circ e^{b}\circ \rho_{2}'}^{ \beta \vartriangleleft R^{(2)} } \Hol_{\rho\circ e^{a} \circ \rho_{1}'}^{\alpha \vartriangleleft S(R^{(1)})} \end{align*} This shows the identity~\eqref{eq:LeftJoint}. Equation~\eqref{eq:RightJoint} follows by an analogous proof. \\ \\ Proof of \eqref{eq:MiddleJoint}. If $(\rho,\gamma)_{\succ\prec}$, then there are paths $\rho_{1},\rho_{2},\gamma_{1},\gamma_{2}$ such that $(\rho_{1},\gamma_{1})_{\succ}$, $(\rho_{2},\gamma_{2})_{\prec}$ and $\rho_{1}$ and $\gamma_{1}$ are disjoint to $\rho_{2},\gamma_{2}$. The claim follows by writing \begin{align*} \Hol_{\rho}^{ \alpha} = \Hol_{\rho_{1}}^{ \alpha_{(1)}} \circ \Hol_{\rho_{2}}^{ \alpha_{(2)}},\quad \Hol_{\gamma}^{ \beta} = \Hol_{\rho_{1}}^{ \alpha_{(1)}} \circ \Hol_{\rho_{2}}^{ \alpha_{(2)}} \end{align*} and using the equations~\eqref{eq:NonOverlap}, \eqref{eq:LeftJoint} and~\eqref{eq:RightJoint}. \\ \end{proof} We now explain how the holonomy along a ribbon path $\rho:s_{1}\to s_{2}$ creates excitations at the end points of that ribbon path in more detail. We will first describe the commutation relation of the vertex and face operators at $s_{1}$ and $s_{2}$ with the holonomy along $\rho$, generalizing the commutation relations~(B41) and~(B42) for Kitaev models based on groups from \cite{BD}. \begin{lemma} \label{lemma:RibbonHolonomyCommutators} Let $\rho:s_{1}^{\eta_{1}}\to s_{2}^{\eta_{2}}$ a simple path between disjoint sites $s_{1}$ and $s_{2}$ and $\eta_{1},\eta_{2}\in \left\{ L,R \right\}$. Then the holonomy along $\rho$ satisfies the following commutation relations with the vertex and face operators at $s_{1}$ and $s_{2}$: \begin{align} &B_{s_{1}}^{\beta} \circ A_{s_{1}}^{k} \circ \Hol_{\rho}^{h \oo \alpha} = \Hol_{\rho}^{(\beta_{(1)} \oo k_{(2)}) \vartriangleright\left( h \oo \alpha \right) } \circ B_{s_{1}}^{\beta_{(2)}} \circ A_{s_{1}}^{k_{(1)}}, \quad \text{if $\eta_{1}=R$} \label{eq:HolonomyCommutatorsStartRight} \\ &B_{s_{1}}^{\beta} \circ A_{s_{1}}^{k} \circ \Hol_{\rho}^{h \oo \alpha} = \Hol_{\rho}^{\left( \beta_{(2)} \oo k_{(1)} \right) \vartriangleright \left( h \oo \alpha \right)} \circ B_{s_{1}}^{\beta_{(1)}} \circ A_{s_{1}}^{k_{(2)}}, \quad \text{if $\eta_{1}=L$} \label{eq:HolonomyCommutatorsStartLeft} \\ &B_{s_{2}}^{\beta} \circ A_{s_{2}}^{k} \circ \Hol_{\rho}^{h \oo \alpha} = \Hol_{\rho}^{ \left( h \oo \alpha \right) \vartriangleleft S\left( \beta_{(2)} \oo k_{(1)} \right)} \circ B_{s_{2}}^{\beta_{(1)}} \circ A_{s_{2}}^{k_{(2)}},\quad \text{if $\eta_{2}=L$} \label{eq:HolonomyCommutatorsEndLeft} \\ &B_{s_{2}}^{\beta} \circ A_{s_{2}}^{k} \circ \Hol_{\rho}^{h \oo \alpha} = \Hol_{\rho}^{\left( h \oo \alpha \right) \vartriangleleft S(\beta_{(1)} \oo k_{(2)})} \circ B_{s_{2}}^{\beta_{(2)}} \circ A_{s_{2}}^{k_{(1)}},\quad \text{if $\eta_{2}=R$} \label{eq:HolonomyCommutatorsEndRight} \end{align} \end{lemma} \begin{proof} We only show~\eqref{eq:HolonomyCommutatorsEndRight}, the rest follows analogously. First note, that we can prove the identity separately for $\beta=\varepsilon$ and $k\in H$ and for $k=1$ and $\beta\in H^{*}$. Let $v$ be the vertex path of $s_{2}$. Then we have $(\rho,v^{-1})_{\prec}$. By writing $A_{s_{2}^{k}}= \Hol_{v}^{k \oo \varepsilon} $ we obtain \begin{align} \Hol_{v}^{k \oo \varepsilon} \Hol_{\rho}^{h \oo \alpha} &\overset{\eqref{eq:HolonomyReversal}}{=} \Hol_{v^{-1}}^{S(k) \oo \varepsilon} \Hol_{\rho}^{h \oo \alpha} \overset{\eqref{eq:LeftJoint}}{=} \Hol_{\rho}^{\left(h \oo \alpha\right) \vartriangleleft R^{(1)}} \Hol_{v^{-1}}^{\left(S(k) \oo \varepsilon\right) \vartriangleleft R^{(2)}} \\ &\overset{\eqref{eq:HolonomyReversal}}{=} \Hol_{\rho}^{\left(h \oo \alpha\right) \vartriangleleft R^{(1)}} \Hol_{v}^{S(R^{(2)}) \vartriangleright \left(k \oo \varepsilon\right)} \label{eq:1} \end{align} We now compute \begin{align*} \left(\left( h \oo \alpha \right) \vartriangleleft R^{(1)}\right) \oo \left( S(R^{(2)}) \vartriangleright (k \oo \varepsilon)\right) &= \left(\left( h \oo \alpha \right) \vartriangleleft \varepsilon \oo x\right) \oo \langle S(X) \oo 1 , k_{(2)} \oo \varepsilon \rangle \left(k_{(1)} \oo \varepsilon\right) \\ &= \left(\left( h \oo \alpha \right) \vartriangleleft \varepsilon \oo S(k_{(2)})\right) \oo \left(k_{(1)} \oo \varepsilon\right) \end{align*} By inserting this into~\eqref{eq:1} we obtain~\eqref{eq:HolonomyCommutatorsEndRight} for $\beta=\varepsilon$. The result for $k=1, \beta\in H^{*}$ follows analogously by writing $B_{s_{2}}^{\beta}= \Hol_{f}^{1 \oo \beta} $, where $f$ is the face path of $s_{2}$ and using $(f,\rho)_{\prec}$. \end{proof} \begin{corollary} Let $\rho:s_{1}\to s_{2}$ a simple path between disjoint vertices. Then $\Hol_{\rho}$ defines module homomorphisms \begin{align} D(H)^{*}_{ \vartriangleright} &\oo \HSpace_{s_{1}} \to \HSpace_{s_{1}} \quad&&\text{if $\rho$ starts to the left of $s_{1}$} \label{eq:HolonomyModuleLeftStart} \\ D(H)^{*}_{ \vartriangleright} &\oo^{cop} \HSpace_{s_{1}} \to \HSpace_{s_{1}}\quad&&\text{if $\rho$ starts to the right of $s_{1}$}\\ D(H)^{*}_{ \vartriangleleft} &\oo \HSpace_{s_{2}} \to \HSpace_{s_{2}}\quad&&\text{if $\rho$ ends to the left of $s_{2}$}\\ D(H)^{*}_{ \vartriangleleft} &\oo^{cop} \HSpace_{s_{2}} \to \HSpace_{s_{2}}\quad&&\text{if $\rho$ ends to the right of $s_{2}$} \label{eq:HolonomyModuleRightEnd} \end{align} Here $ \oo $ is the tensor product of $D(H)$-modules and $ \oo^{cop} $ the tensor product of $D(H)^{cop}$-modules, $\HSpace_{s_{i}}$ is the extended space with the $D(H)$-action defined by the site $s_{i}$, $D(H)^{*}_{ \vartriangleright}$ is the vector space $D(H)^{*}$ with the left regular action of $D(H)$ and $D(H)^{*}_{ \vartriangleleft}$ is the vector space $D(H)^{*}$ with the left action of $D(H)$ defined by~\eqref{eq:RightCoregularLeftAction}. \label{corollary:HolonomyModuleHom} \end{corollary} \begin{proof} We show the claim~\eqref{eq:HolonomyModuleLeftStart}. The action of $\beta \oo k\in D(H)$ on $(h \oo \alpha) \oo m\in D(H)^{*}_{ \vartriangleright} \oo \HSpace_{s_{1}}$ is given by \begin{align*} \left( \beta_{(2)} \oo k_{(1)} \vartriangleright\left( h \oo \alpha \right) \right) \oo \left( B_{s_{1}}^{\beta_{(1)}} \circ A_{s_{1}}^{k_{(2)}} (m) \right). \end{align*} and the action of $D(H)$ on $m\in \HSpace_{s_{1}}$ is given by \begin{align*} B_{s_{1}}^{\beta}\circ A_{s_{1}}^{k} (m) \end{align*} The claim that $\Hol_{\rho}$ is a module homomorphism then simply translates to the identity~\eqref{eq:HolonomyCommutatorsStartLeft}. The other three claims follow analogously from Lemma~\ref{lemma:RibbonHolonomyCommutators}. \end{proof} We now use this corollary to show how holonomies generate and fuse excitations. We first note that Theorem of Artin-Wedderburn (Theorem~\ref{theorem:ArtinWedderburn}) implies, that the dual $D(H)^{*}$ of the Drinfel'd double with left and right regular action decomposes as a $D(H)$-bimodule into \begin{align} \varphi: D(H)^{*} \xrightarrow{\sim} \bigoplus_{d \in \mathrm{Irr}\left( D(H) \right))} d \oo d^{*}. \label{eq:Drinfel'dDualArtinWedderburn} \end{align} Here $d$ runs through a set of representatives of irreducible $D(H)$-modules. Combining this with Corollary~\ref{corollary:HolonomyModuleHom}, we obtain the following statement, illustrated by Figure~\ref{figure:HolonomyFusion}. \begin{corollary} \label{corollary:HolonomyFusion} Let $\rho: s_{1} \to s_{2}$ a simple left-right path, $M_{1}, M_{2}$ $D(H)-modules$, $d$ a simple $D(H)$-module and $h \oo \alpha \in D(H)^{*}$ in the $d \oo d^{*}$-component of the decomposition in~\eqref{eq:Drinfel'dDualArtinWedderburn}. Then $\Hol_{\rho}^{h \oo \alpha} :\HSpace \to \HSpace$ restricts to a map \begin{align} \Hol_{\rho}^{h \oo \alpha} : \HSpace\left( s_{1},M_{1},s_{2},M_{2} \right) \to \HSpace \left( s_{1},d \oo M_{1}, s_{2}, M_{2} \oo d^{*} \right) \label{eq:HolonomyFusion} \end{align} \end{corollary} \begin{proof} Consider $D(H)^{*}$ with the left coregular action $ \vartriangleright$ of $D(H)$ from~\eqref{eq:LeftCoregularActionDrinfel'd}, $\HSpace$ with the left $D(H)$-action defined by the site $s_{1}$ and $D(H)^{*} \oo \HSpace$ the tensor product of these $D(H)$-modules in $D(H)\mathrm{-Mod}$. Then~\eqref{eq:HolonomyModuleLeftStart} states that \begin{align*} \Hol_{\rho}: D(H)^{*}_{ \vartriangleright} \oo \HSpace_{s_{1}} \to \HSpace_{s_{1}} \end{align*} is a homomorphism of $D(H)$-modules. After restricting $\Hol_{\rho}$ to the submodule $d \oo d^{*}\subseteq D(H)^{*}$ with the $D(H)$-left action on $d$ and the submodule $\HSpace(s_{1},M_{1})\subseteq \HSpace$, we obtain the corestriction \begin{align*} \Hol_{\rho} : (d \oo d^{*}) \oo \HSpace(s_{1} ,M_{1}) \to \HSpace(s_{1}, d \oo M_{1}). \end{align*} Now consider instead the $D(H)$-action on $D(H)^{*}$ from~\eqref{eq:RightCoregularLeftAction} and the $D(H)$-action on $\HSpace$ defined by the site $s_{2}$. Again $\HSpace \oo D(H)^{*}$ is the tensor product of these $D(H)$-modules in $D(H)\mathrm{-Mod}$. Then~\eqref{eq:HolonomyModuleRightEnd} states that \begin{align*} \Hol_{\rho}: \HSpace \oo D(H)^{*} \to \HSpace \end{align*} is a homomorphism of $D(H)$-modules. After restricting $\Hol_{\rho}$ to the submodule $d \oo d^{*}\subseteq D(H)^{*}$ with the $D(H)$-left action on $d^{*}$ and the submodule $\HSpace(s_{2},M_{2})\subseteq \HSpace$, we obtain the corestriction \begin{align*} \Hol_{\rho} : \HSpace(s_{2} ,M_{2} \oo (d \oo d^{*}) ) \to \HSpace(s_{2}, M_{2} \oo d^{*}). \end{align*} Combining these two restrictions and inserting $h \oo \alpha \in d \oo d^{*} \subseteq D(H)^{*}$, we obtain~\eqref{eq:HolonomyFusion}. \end{proof} \begin{figure}[H] \centering \scalebox{0.5}{\input{HolonomyFusion.pdf_tex}} \caption{The holonomy $\Hol_{\rho}^{h \oo \alpha} $ along a left-right path $\rho$ for $h \oo \alpha \in d \oo d^{*} \subseteq D(H)^{*}$ creates excitations of type $d$ and $d^{*}$ at the endpoints of $\rho$ and fuses them with the preexisting excitations at these points.} \label{figure:HolonomyFusion} \end{figure} \section{Turaev-Viro-TQFTs with topological boundaries and topological defects} \label{section:FSVSummary} It was shown in~\cite{BK12} that the Kitaev model for a semisimple, finite-dimensional Hopf algebra $H$ reproduces the two-dimensional parts of the Turaev-Viro-TQFT based on the finitely semisimple fusion category $H\mathrm{-Mod}$. If one starts with a Turaev-Viro-TQFT based on a finitely semisimple fusion category $\mathcal{A}$ one can use a fiber functor on $\mathcal{A}$ to construct a Hopf algebra $H$ with $\mathcal{A} \cong H\mathrm{-Mod}$. This Tannaka duality relates the input data for Kitaev models and Turaev-Viro-TQFTs. In this section we summarize the categorical data for Turaev-Viro-TQFTs with topological boundaries and topological surface defects from \cite{FSV}. We then equip this categorical data with fiber functors to obtain corresponding Hopf-algebraic data. In Section \ref{sec:defect model} we will use this Hopf algebraic data to define a Kitaev model with topological defects and topological boundaries. \begin{definition} \label{definition:TVTQFTDefect} A Turaev-Viro-TQFT with topological boundary conditions and topological surface defects as defined in \cite{FSV} consists of the following data: \begin{itemize} \item The center $\mathcal{Z}(\mathcal{A})$ of a fusion category $\mathcal{A}$ for every three-dimensional region. \item A fusion category $\mathcal{W}_{a}$ (resp. $\mathcal{W}_{d}$) for every topological boundary $a$ (resp. topological surface defect $d$). \item A braided equivalence $\widetilde{F}_{\to a}:\mathcal{Z}(\mathcal{A}) \to \mathcal{Z}(\mathcal{W}_{a})$ for every pair of a topological boundary labeled with $\mathcal{W}_{a}$ and an adjacent bulk labeled with $\mathcal{Z(A)}$. \item A braided equivalence $\widetilde{F}_{\to d \leftarrow}: \mathcal{Z}(\mathcal{A}_1)\boxtimes \mathcal{Z}(\mathcal{A}_2)^{rev} \to \mathcal{Z}(\mathcal{W}_{d})$ for every topological surface defect labeled with $\mathcal{W}_{d}$ separating three-dimensional regions labeled with $\mathcal{Z}(\mathcal{A}_{1})$ and $\mathcal{Z}(\mathcal{A}_2)$. The functor $\widetilde{F}_{\to d \leftarrow}$ is a composite of two braided monoidal functors $\widetilde{F}_{\to d}:\mathcal{Z}\left( \mathcal{A}_{1} \right) \to \mathcal{Z}(\mathcal{W}_{d})$ and $\widetilde{F}_{d \leftarrow}:\mathcal{Z}\left( \mathcal{A}_{2} \right)^{rev} \to \mathcal{Z}(\mathcal{W}_{d})$. \end{itemize} \end{definition} By composing $\widetilde{F}_{\to a}$ with the forgetful functor $\mathcal{Z}(\mathcal{W}_{a})\to \mathcal{W}_{a}$, one obtains a functor $F_{\to a}: \mathcal{Z(A)} \to \mathcal{W}_{a}$. Similarly one obtains functors $F_{\to d \leftarrow}, F_{\to d}, F_{d \leftarrow}$ by composing $\widetilde{F}_{\to d \leftarrow}, \widetilde{F}_{\to d}, \widetilde{F}_{d\leftarrow}$ with the forgetful functor $\mathcal{Z}(\mathcal{W}_{d}) \to \mathcal{W}_{d}$. A concrete example for the data for a defect and adjacent bulk regions in Definition~\ref{definition:TVTQFTDefect} is given by Hopf algebras: \begin{example} \label{example:FSVCategories} Let $H_{1}$ and $H_{2}$ be semisimple finite-dimensional Hopf algebras over $\C$, and $I\in H_{1} \oo H_{2} \oo H_{1} \oo H_{2}$ a twist for $H_{1} \oo H_{2}$. Then $\mathcal{A}_1 = H_{1}\mathrm{-Mod}, \mathcal{A}_2=H_{2}\mathrm{-Mod}$ and $ \mathcal{W}_{d} = \left( H_{1} \oo H_{2} \right)_{I}\mathrm{-Mod}$ are fusion categories. There is a braided equivalence \begin{align} \mathcal{Z}(\mathcal{A}_1) \boxtimes \mathcal{Z}(\mathcal{A}_2)^{rev}\cong D(H_{1} \oo H_{2})\mathrm{-Mod} \to D\left( \left( H_{1} \oo H_{2} \right)_{I} \right)\mathrm{-Mod} \cong \mathcal{Z}(\mathcal{W}_{d}). \label{eq:FSVExampleTransport} \end{align} \end{example} In a Turaev-Viro TQFT with topological boundaries and defects, the objects of the categories $ \mathcal{W}_{a},\mathcal{Z(A)}$ and $\mathcal{W}_{d}$ appear as insertions in Wilson lines. The behavior of the insertions when moved, fused and braided was analyzed in~\cite{FSV} to derive the data in~Definition~\ref{definition:TVTQFTDefect}. These insertions are the counterpart of the excitations in Kitaev models, as shown in~\cite[Theorem~6.1]{BK12}. From now on we therefore refer to them as excitations. We now describe the conditions from~\cite{FSV} for these excitations. \begin{itemize} \item[\textbf{Excitations:}] Excitations in a bulk labeled with $\mathcal{Z(A)}$ are objects of $\mathcal{Z(A)}$. Excitations in a topological surface defect (resp. boundary) labeled with $\mathcal{W}_{d}$ (resp. $\mathcal{W}_{a}$) are objects of $\mathcal{W}_{d}$ (resp. $\mathcal{W}_{a}$). \item[\textbf{Fusion in the bulk:}] Excitations can be moved around inside a bulk region. If an excitation $M_{1}\in \mathcal{Z(A)}$ is moved to a spot occupied by an excitation $M_{2}\in \mathcal{Z(A)}$, then $M_{1}$ and $M_{2}$ are fused to an excitation of type $M_{1} \oo M_{2}$ or to an excitation of type $M_{2} \oo M_{1}$. The first occurs if $M_{1}$ is moved to the left of $M_{2}$, the second if $M_{1}$ is moved to the right of $M_{2}$. Analogously, excitations can also be moved around inside a topological surface defect and inside a topological boundary and analogous rules apply. \item[\textbf{Fusion in the boundary:}] Moving an excitation $M\in \mathcal{Z(A)}$ from the bulk region labeled with $\mathcal{Z(A)}$ to an adjacent boundary labeled with $\mathcal{W_{a}}$ turns $M$ into a boundary excitation $F_{\to a}M$. \\ Fusing two excitations $M_{1},M_{2}\in \mathcal{Z(A)}$ in the bulk region and transporting them to the boundary results in a boundary excitation $F_{\to a}\left( M_{1} \oo M_{2} \right)$. Moving $M_{1}$ and $M_{2}$ to the boundary separately and fusing them there instead results in an excitation of type $F_{\to a}M_{1} \oo F_{\to a}M_2$. These two objects are related by the natural isomorphism $F_{\to a}\left( M_{1} \oo M_{2} \right) \cong F_{\to a}M_{1} \oo F_{\to a} M_2$ that is coherence data of the the monoidal functor $F_{\to a}$. \item[\textbf{Fusion in the defect:}] Moving an excitation $M\in \mathcal{Z}(\mathcal{A}_{1})$ from the bulk region labeled with $\mathcal{Z}(\mathcal{A}_{1})$ to the topological defect labeled with $\mathcal{W}_{d}$ is subject to analogous rules as moving an excitation from a bulk region into a boundary. Moving $M$ into the defect turns $M$ into $F_{\to d}M$. Again, one can fuse two excitations $M_{1},M_{2}\in \mathcal{Z}(\mathcal{A}_{1})$ in the bulk region and transport them to the defect results in a defect excitation $F_{\to d}\left( M_{1} \oo M_{2} \right)$. Instead moving $M_{1}$ and $M_{2}$ to the defect separately and fusing them there results in an excitation of type $F_{\to d}M_{1} \oo F_{\to d}M_2$. These two procedures only differ up to the natural isomorphism $F_{\to d}\left( M_{1} \oo M_{2} \right) \cong F_{\to d}M_{1} \oo F_{\to d} M_2$ given by the monoidal functor $F_{\to d}$. Moving an excitation $N\in \mathcal{Z}(\mathcal{A}_{2})$ from the bulk region labeled with $\mathcal{Z}(\mathcal{A}_{2})$ to the topological defect labeled with $\mathcal{W}_{d}$ is subject to same rules, where one replaces the monoidal functor $F_{\to d}$ with $F_{d \leftarrow}$. \item[\textbf{Braiding in the bulk:}] Let $M_{1},M_{2}\in \mathcal{Z(A)}$ be excitations in a bulk region labeled with $\mathcal{Z(A)}$. One can fuse these excitations into $M_{1} \oo M_{2}$ or $M_{2} \oo M_{1}$ by moving $M_{1}$ to $M_{2}$. The order of the tensor product depends on the relative position of the excitations. Moving $M_{1}$ around $M_{2}$ first relates the two products with the braiding $M_{1} \oo M_{2}\to M_{2} \oo M_{1}$ in $\mathcal{Z(A)}$. \item[\textbf{Braiding in the boundary:}] Moving an excitation $M\in \mathcal{Z(A)}$ from a bulk region to an excitation $N\in \mathcal{W}_{a}$ in the boundary fuses the excitations to $F_{\to a}M \oo N$ or $N \oo F_{\to a}M $. The order of the tensor product depends on the relative position of the excitations. Moving $M$ around $N$ first relates the two products with the half-braiding $M \oo F_{\to a}M \to F_{\to a}M \oo N$ of $\mathcal{Z}(\mathcal{W}_{a})$ with $\mathcal{W}_{a}$. \item[\textbf{Braiding in the defect:}] Braiding bulk excitations with defect excitations follows the same rules as braiding bulk excitations with boundary excitations, \emph{mutatis mutandis}. One simply needs to exchange $W_{a}$ with $W_{d}$, $\mathcal{Z}(A)$ with either $\mathcal{Z}(\mathcal{A}_{1})$ or $\mathcal{Z}(\mathcal{A}_{2})^{rev}$ and $F_{\to a}$ with either $F_{\to d}$ or $F_{d \leftarrow}$. \end{itemize} To construct a Kitaev model with topological boundaries and defects we require the Hopf algebraic counterparts corresponding to the categorical data in Definition~\ref{definition:TVTQFTDefect}. We determine these counterparts by utilizing Tannaka duality as follows. We take one fiber functor $\omega_{\mathcal{C}}$ for every fusion category $\mathcal{C}$ labeling a topological boundary, topological defect or three-dimensional region. If a bulk region labeled with $\mathcal{A}$ is adjacent to a boundary labeled with $\mathcal{W}_{a}$, we obtain two fiber functor for $\mathcal{Z(A)}$, given by the two sequences \begin{align*} \mathcal{Z(A)} \to \mathcal{A} \overset{\omega_{A}}{\to} \mathrm{Vect}_{\C}, \qquad \mathcal{Z(A)} \overset{F_{\to a}}{\to} \mathcal{Z}(\mathcal{W}_{a}) \to \mathcal{W}_{a} \overset{\omega_{\mathcal{W}_{a}}}{\to} \mathrm{Vect}_{\C} \end{align*} To determine a Hopf-algebraic counterpart to the braided equivalences $F_{\to a}$ we need to impose a compatibility on these fiber functors (cf. Lemma~\ref{lemma:FibreFunctorTensorEquivalence}). Concretely we require that these two functors are naturally isomorphic as tensor functor. For defects we similarly obtain two sets of fiber functors \begin{align*} &\mathcal{Z}(\mathcal{A}_{1}) \boxtimes \mathcal{Z}(\mathcal{A}_{2}) \to \mathcal{A}_{1} \boxtimes \mathcal{A}_{2} \overset{\omega_{\mathcal{A}_{1}}\boxtimes \omega_{\mathcal{A}_{2}} }{\to} \mathrm{Vect}_{\C}\boxtimes \mathrm{Vect}_{\C} \overset{\otimes}{\to} \mathrm{Vect}_{\C} \\ &\mathcal{Z}(\mathcal{A}_{1}) \boxtimes \mathcal{Z}(\mathcal{A}_{2}) \overset{F_{\to d \leftarrow}}{\to} \mathcal{W}_{d} \overset{\omega_{\mathcal{W}_{d}}}{\to} \mathrm{Vect}_{\C} \end{align*} and again we impose that these are isomorphic as tensor functors. \begin{proposition} The data from Definition~\ref{definition:TVTQFTDefect} is in Tannaka duality with the following Hopf-algebraic data (summarized in the table below the proof): \begin{compactenum} \item A semisimple, finite-dimensional Hopf algebra $H=\mathrm{End}(\omega_{\mathcal{A}})$ for every three-dimensional region labeled with $\mathcal{A}$. Excitations in this region are modules over $D(H)$. \item A semisimple, finite-dimensional Hopf algebra $K=\mathrm{End}(\omega_{\mathcal{W}})$ for every topological boundary or topological surface defect labeled with $\mathcal{W}$. Excitations in the topological boundary or topological surface defect are modules over $K$. \item For every pair $(H,K)$ of a topological boundary labeled with $K$ with an adjacent three-dimensional region labeled with $H$: A twist $J$ of $D(H)$ such that $D(H)_{J} \cong D(K)$ as quasitriangular Hopf algebras. \item For every triple $(H_{1},H_{2},K)$ of a topological defect surface labeled with $K$ separating two three-dimensional regions labeled with $H_{1}$ and $H_{2}$: A twist $J$ of $D(H) \oo D(K)^{rev}$ such that $\left( D(H_{1}) \oo D(H_{2})^{rev} \right)_{J} \cong D(K)$ as quasitriangular Hopf algebras, where $H^{rev}$ denotes the quasitriangular Hopf algebra $H$ with the opposite $R$-matrix. \end{compactenum} \label{proposition:FSVKitaevTranslation} \end{proposition} \begin{proof} In Lemma~\ref{lemma:FibreFunctorClassic} we have seen that fusion categories correspond to semisimple, finite-dimensional Hopf algebras and that centers of fusion categories correspond to the Drinfel'd doubles of said Hopf algebras. In Lemma~\ref{lemma:FibreFunctorTensorEquivalence} we have seen that braided tensor equivalences correspond to braided twist equivalences of Hopf algebras. Applying both lemmata to the data for Turaev-Viro-TQFTs with boundary conditions and surface defects yields the proposition. \end{proof} \begin{tabular}[center]{|p{3cm}|p{5.5cm}|p{6.5cm}|} \hline &TV-TQFT & Kitaev model \\\hline\hline bulk region& center $\mathcal{Z}(\mathcal{A})$ of fusion category $\mathcal{A}$ & Drinfel'd double $D(H)$ of semisimple, finite-dimensional Hopf algebra $H$ \\\hline boundary component & fusion category $\mathcal{W}_{a}$ &semisimple, finite-dimensional Hopf algebra $K$ \\\hline bulk $\to$ boundary &braided tensor equivalence $\mathcal{Z}(\mathcal{A}) \to \mathcal{Z}(\mathcal{W}_{a})$ & twist $J$ and isomorphism $D(H)_{J} \cong D(K)$ of quasitriangular Hopf algebras \\\hline codimension-one defect & fusion category $\mathcal{W}_{d}$ & semisimple, finite-dimensional Hopf algebra $K_{d}$ \\\hline bulk $\to$ defect & braided tensor equivalence $\mathcal{Z}(\mathcal{A}_{1})\boxtimes \mathcal{Z}(\mathcal{A}_{2})^{rev} \to \mathcal{Z}(\mathcal{W}_{d})$ & twist $F$ and isomorphism $(D(H_{1}) \oo D(H_{2}) )_{F} \cong D(K) $ of quasitriangular Hopf algebras \\\hline \end{tabular} \section{Conditions for a Kitaev model with topological defects and boundaries} \label{section:TranslationKitaev} In this section we derive the counterparts of the conditions in Section \ref{section:FSVSummary} for Kitaev models with topological defects and topological boundaries. We start by highlighting how the Hopf algebra $D(H)$ appears in the Kitaev model without defects and boundaries. In the model with defects and boundaries, the Drinfel'd double $D(H)$ is then replaced with the Hopf algebraic data from Proposition~\ref{proposition:FSVKitaevTranslation} at the edges and sites in defects and boundaries. In the Kitaev model without defects and boundaries the Hopf algebra $D(H)$ appears in the following manner: \begin{itemize} \item There are local operators which define a $D(H)$-module structure on the extended space $\HSpace$. These are the face and vertex operators for a site $s$ from Theorem~\ref{theorem:Drinfel'dActionHSpace}. \item For suitable paths $\rho:u\to v$, the holonomy is a $D(H)$-module homomorphism $\Hol_{\rho}: D(H)^{*} \oo \HSpace \to \HSpace$, where $ \oo $ is the tensor product of $D(H)$-modules (see Corollary~\ref{corollary:HolonomyModuleHom}), $D(H)^{*}$ is equipped with the left coregular action and $\HSpace$ with the action defined by the starting site $u$ of $\rho$. \end{itemize} Note that the coalgebra structure of $D(H)$ only plays a role for the second point, as the tensor product $D(H)^* \oo \HSpace$ is defined in terms of its comultiplication $\Delta$. We now give a slightly simpler description of the Hopf algebraic data from Proposition~\ref{proposition:FSVKitaevTranslation} for bulk regions, boundaries and defects. We no longer list the Hopf algebras $D(K)$ for boundaries from~\ref{proposition:FSVKitaevTranslation}.3, as they are isomorphic to $D(H)_{F}$. We similarly omit the Hopf algebras $D(K)$ for defects. A Kitaev model with topological boundaries and defects is then given by the following algebraic data: \begin{compactenum}[({D}1):] \item A Drinfel'd double $D(H_{b})$ of a semisimple finite-dimensional Hopf algebra $H_{b}$ for every bulk region $b$. \label{D1} \item A twist $F_{a}$ of $D(H_{b})$ for every boundary $a$ adjacent to a bulk region $b$. \label{D2} \item A twist $F_{d}$ of $D(H_{b_{1}}) \oo D(H_{b_{2}})$ for every defect $d$ separating two boundary regions $b_{1}$ and $b_{2}$. \label{D3} \end{compactenum} We now compare with the Kitaev model without defects and boundaries and propose that a Kitaev model with defects and boundaries should have the following local operators \begin{compactenum}[({O}1):] \item For every bulk region $b$ and every boundary adjacent to $b$: Local operators which define a $D(H_{b})$-module structure on the extended space $\HSpace$. \label{O1} \item For every defect $d$ separating two boundary regions $b_{1}$ and $b_{2}$.: Local operators which define a $D(H_{b_{1}}) \oo D(H_{b_{2}})$-module structure on the extended space $\HSpace$. \label{O2} \end{compactenum} In the model with defects and boundaries, we will use holonomies to describe the creation and fusion of excitations. As in the Kitaev model without defects, we define them as endomorphisms of $\HSpace$ assigned to paths in the underlying thickened ribbon graph $D(\Gamma)$. As we will only consider paths inside a bulk region and adjacent boundary or defect lines, these holonomies depend on the data assigned to the bulk regions. For a path $\rho$ in a bulk region $b$ and adjacent boundary and defect lines, we define them as linear maps \begin{align*} \widetilde{\Hol}_{b,\rho}: D(H_{b})^{*} \oo \HSpace \to \HSpace. \end{align*} As in Corollary \ref{corollary:HolonomyModuleHom}, we require that this map is a $D(H_{b})$-module homomorphism when $D(H_b)^*$ is equipped with one of the two $D(H_b)$-module structures associated with $\rho$, as in Corollary \ref{corollary:HolonomyModuleHom}, and where the tensor product in $ D(H_b)^*\oo\HSpace $ depends on whether $\rho$ starts or ends in a bulk region, a boundary or a defect: \begin{compactenum}[({H}1):] \item \label{H1} If $\rho$ starts in the bulk region $b$, then $ \oo $ is the tensor product of $D(H_{b})$-modules and $\HSpace$ is equipped with the local $D(H_{b})$-module structure associated to the starting site of $\rho$ from~(O\ref{O1}). \item If $\rho$ starts in a boundary $a$ adjacent to $b$, then $ \oo$ is the tensor product of $D(H_{b})_{F_{a}}$-modules, $\HSpace$ is equipped with the local $D(H_{b})$-module structure associated to the starting site of $\rho$ from~(O\ref{O1}). \label{H2} \item If $\rho$ starts in a defect $d$ separating bulk regions $b_{1}$ and $b_{2}$ and $b=b_{1}$, then $D(H_{b})=D(H_{b_{1}})\subseteq D(H_{b_{1}}) \oo D(H_{b_{2}})$ is a subalgebra. We equip $D(H_{b})^{*}$ with the trivial $D(H_{b_{2}})$-action and equip $\HSpace$ with the local $D(H_{b_{1}}) \oo D(H_{b_{2}})$-module structure associated to the starting site of $\rho$ from~(O\ref{O2}). We let $ \oo$ be the tensor product of $\left( D(H_{b_{1}}) \oo D(H_{b_{2}}) \right)_{F_{d}}$-modules. \label{H3} \end{compactenum} In Section~\ref{section:FSVSummary} we also described the movement of excitations in a Turaev-Viro-TQFT. The holonomy along a path $\rho$ in the Kitaev model without defects and boundaries generates excitations at the end points of $\rho$ (see Corollary~\ref{corollary:HolonomyFusion}), but it does not move excitations from one site to another. To implement the process of moving excitations we associate to each simple path $\rho$ in $D(\Gamma)$ that traverses a bulk region $b$ and adjacent defects and boundaries an additional operator on $\HSpace$, the \emph{transport operator} $T_{\rho}$. In analogy to the conditions for moving excitations in a Turaev-Viro TQFT from section~\ref{section:FSVSummary} we require that $T_{\rho}$ fulfills the following conditions: \begin{compactenum}[({T}1):] \item \textbf{Fusion:} If $\rho$ ends to the left of a site (see Definition~\ref{def:leftorright}), then $T_{\rho}$ restricts to a map: \begin{align} T_{\rho}:\HSpace(u,M,v,N) \to \HSpace(u,\C, v,M \oo N) \label{eq:fusion} \end{align} \label{T1} Here $\HSpace(u,M,v,N)$ is defined analogously to \eqref{eq:MultipleExcitationSpace}, $M$ and $N$ are $D(H_{b})$-modules and $v$ and $ \oo $ is the tensor product from (H\ref{H1}),(H\ref{H2}) or~(H\ref{H3}). If $\rho$ instead ends to the right of a site, then the order of the tensor product is reversed. We illustrate this by Figure~\ref{figure:FuseExcitations}. \item \textbf{Braiding:} For a path $\rho:u\to v$ in a bulk region $b$ such that $\rho$ ends to the left of $v$ there is another path $\rho':u\to v$, constructed from $\rho$, that ends to the right of $v$ such that \begin{align} T_{\rho'}= T_{\rho} \circ R \label{eq:KitaevBraidingCondition} \end{align} Here $R$ denotes an action of the $R$-matrix of $D(H_{b})$ on the sites $u$ and $v$, if $v$ is in the bulk region $b$. If instead $v$ is in an adjacent boundary or defect, then $R$ instead denotes an action with the twisted $R$-matrix of $D(H_{b})$. \label{T2} \end{compactenum} \begin{figure}[H] \centering \scalebox{0.35}{\input{FuseExcitations.pdf_tex}} \caption{Moving an excitation of type $M$ along $\rho:u\to v$ to an excitation of type $N$ fuses them to an excitation of type $M \oo N$, as $\rho$ ends to the left of $v$. Moving an excitation of type $P$ along $\gamma$ fuses $P$ with $Q$ to an excitation of type $Q \oo P$, as $\gamma:w\to x$ ends to the right of $x$.} \label{figure:FuseExcitations} \end{figure} \section{Kitaev models with topological defects and boundaries} \label{sec:defect model} Codimension 1 and 2 defect structures in Kitaev lattice models based on group algebras of finite groups were first investigated in \cite{BD}. The defect data for Kitaev lattice models based on unitary quantum groupoids was then identified in \cite{KK}, and an explicit Kitaev model with defects based on Hopf algebras was then constructed in \cite{K}. In \cite{KMM} defects were generalized to higher Kitaev models based on crossed modules for semisimple Hopf algebras. In this section, we construct a Kitaev model with \emph{topological} defects and boundaries, that satisfies the conditions derived in \cite{FSV}. More specifically, we construct a Kitaev model with defects and boundaries, which exhibits the structures and satisfies the conditions from Section~\ref{section:TranslationKitaev}, which are the Hopf algebraic counterparts of the conditions in \cite{FSV}. In a bulk region our model behaves identically to the Kitaev model without defects and boundaries. We also show that the model without defects and boundaries can be obtained as a special case of our model by choosing trivial defects. We show that the processes of moving and braiding excitations described in \cite{FSV} can be implemented in our model as linear endomorphisms of its extended space assigned to the the relevant paths. Just as the Kitaev model without defects and boundaries, our model is based on a ribbon graph. We require additional structure on these ribbon graphs to describe oriented surfaces with defects and boundaries. This structure is introduced in Subsection~\ref{subsec:ribdefect}. In Subsection~\ref{subsection:HilbertSpace}, we then define the extended space of the model, its vertex and face operators and the holonomies. We also give some basic properties of these operators. In Subsection~\ref{subsec:fusionbraiding} we describe the fusion, the transport and the braiding of excitations. We show that our implementation of these processes has the properties formulated in \cite{FSV} and summarized in Section~\ref{section:TranslationKitaev}. In Subsection \ref{subsec:transparent} we then show that our model reduced to the Kitaev model without defects and boundaries for trivial choices of defect data. \subsection{Ribbon graphs with defect lines and boundaries} \label{subsec:ribdefect} For the Kitaev model with defects and boundaries we require additional structure on ribbon graphs to describe defect and boundary lines. These defect and boundary lines are \emph{oriented cyclic} subgraphs, i.e. connected subgraphs in which every vertex has exactly one incoming and one outgoing edge end. Examples for ribbon graphs with defects and boundaries are given in Figure~\ref{fig:boundarygraph} and Figure~\ref{fig:defect}. \begin{definition} \label{definition:RibbonGraphDefect} A \emph{ribbon graph with defects and boundaries} is a ribbon graph $\Gamma=(V,E)$ with the following additional data \begin{itemize} \item a non-empty family $\left( \Gamma_{b} =(V_{b},E_{b})\right)_{b \in B}$ of connected subgraphs, the \emph{bulk regions} of $\Gamma$, with edges in these subgraphs called \emph{bulk edges}, \item a family $\left( \Gamma_{a} \right)_{a \in A}$ of connected, oriented cyclic subgraphs, the \emph{boundary lines}, with vertices and edges in these subgraphs called \emph{boundary vertices} and \emph{boundary edges}, \item for every $a\in A$ a bulk region $b_{a} \in B$, called the \emph{bulk region bordered by $a$,} \item a family $\left( \Gamma_{d} \right)_{d \in D}$ of connected, oriented cyclic subgraphs, the \emph{defect lines}, with vertices and edges in these subgraphs called \emph{defect vertices} and \emph{defect edges}, \item for every $d\in D$ a pair $(b_{dL},b_{dR}) \in B \times B$ of bulk regions with $b_{dL}\neq b_{dR}$, the \emph{bulk region to the left (resp. to the right) of $d$.} \end{itemize} This data has to satisfy the following conditions: \begin{itemize} \item For $d,d' \in D \cup A$ with $d\neq d'$, the graphs $\Gamma_{d}$ and $\Gamma_{d'}$ do not share any vertices. \item Every edge of $\Gamma$ is in exactly one of the subgraphs $\Gamma_{i}$ for $i\in B \cup D \cup A$. \item For $d \in D$, every vertex $v$ of $\Gamma_{d}$ also is a vertex of $\Gamma_{b_{dL}}$ and $\Gamma_{b_{dR}}$. The edges of $v$ in $\Gamma_{b_{dL}}$ (resp. $\Gamma_{b_{dR}}$) are to the left (resp. to the right) of the orientation of $\Gamma_{d}$. \item Every vertex is at least in one bulk region and at most in two bulk regions. If a vertex $v$ is in two bulk regions $b_1, b_2$, then there is a defect line $d$ containing $v$ such that $b_1,b_2$ are to the left and right of $d$. \item For $a \in A$, every vertex $v$ of $\Gamma_{a}$ also is a vertex of $\Gamma_{b_{a}}$. The edges of $v$ in $\Gamma_{b_{a}}$ are to the left of the orientation of $\Gamma_{a}$. \end{itemize} \end{definition} \begin{figure}[H] \begin{center} \begin{tikzpicture}[scale=.7] \draw[line width=1pt, color=black, ->, >=stealth] (-2,0)--(0,0); \draw[line width=1pt, color=black, ] (2,0)--(0,0); \draw[line width=1pt, color=black, ->, >=stealth] (-2,4)--(0,4); \draw[line width=1pt, color=black, ] (2,4)--(0,4); \draw[line width=1pt, color=black, ->, >=stealth] (-2,0)--(-2,2); \draw[line width=1pt, color=black, ] (-2,4)--(-2,2); \draw[line width=1pt, color=black, ] (2,0)--(2,2); \draw[line width=1pt, color=black, ->, >=stealth ] (2,4)--(2,2); \draw[color=black, fill=black] (-2,0) circle (.2); \draw[color=black, fill=black] (2,0) circle (.2); \draw[color=black, fill=black] (-2,4) circle (.2); \draw[color=black, fill=black] (2,4) circle (.2); \draw[line width=1pt, color=black, ] (-2,0)--(-1.5,.5) node[anchor=west]{$s$}; \draw[line width=1pt, color=black, ->, >=stealth] (-4,-2)--(-3,-1); \draw[line width=1pt, color=black,] (-2,0)--(-3,-1); \draw[line width=1pt, color=black, ->, >=stealth] (2,0)--(3,-1); \draw[line width=1pt, color=black,] (4,-2)--(3,-1); \draw[line width=1pt, color=black, ->, >=stealth] (4,6)--(3,5); \draw[line width=1pt, color=black,] (3,5)--(2,4); \draw[line width=1pt, color=black, ->, >=stealth] (-2,4)--(-3,5); \draw[line width=1pt, color=black,] (-3,5)--(-4,6); \draw[color=red, fill=red] (-4,-2) circle (.2); \draw[color=red, fill=red] (-4,6) circle (.2); \draw[color=red, fill=red] (4,-2) circle (.2); \draw[color=red, fill=red] (4,6) circle (.2); \draw[line width=1.5pt, color=red] (-4,-2)--(-3,-1.5) node[anchor=west]{$t$}; \draw[line width=1.5pt, color=red, ->, >=stealth] (-4,6)--(-4,2); \draw[line width=1.5pt, color=red,] (-4,-2)--(-4,2); \draw[line width=1.5pt, color=red, ->, >=stealth] (4,-2)--(4,2); \draw[line width=1.5pt, color=red,] (4,2)--(4,6); \draw[line width=1.5pt, color=red, ->, >=stealth] (4,6)--(0,6); \draw[line width=1.5pt, color=red,] (0,6)--(-4,6); \draw[line width=1.5pt, color=red, ->, >=stealth] (-4,-2)--(0,-2); \draw[line width=1.5pt, color=red,] (0,-2)--(4,-2); \node at (0,2) {$b$}; \node at (4.2,2)[color=red, anchor=west]{$c$}; \end{tikzpicture} \end{center} \caption{Bulk graph $b$ with a boundary component $a$, a bulk site $s$ and a boundary site $t$.} \label{fig:boundarygraph} \end{figure} \begin{figure}[H] \begin{center} \begin{tikzpicture}[scale=.7] \draw[line width=1.5pt, color=red, ->,>=stealth] (-2,-2)--(0,-2); \draw[line width=1.5pt, color=red] (0,-2)--(2,-2); \draw[line width=1.5pt, color=red, ->,>=stealth] (2,-2)--(2,0); \draw[line width=1.5pt, color=red] (2,0)--(2,2); \draw[line width=1.5pt, color=red, ->,>=stealth] (2,2)--(0,2); \draw[line width=1.5pt, color=red] (0,2)--(-2,2); \draw[line width=1.5pt, color=red, ->,>=stealth] (-2,2)--(-2,0); \draw[line width=1.5pt, color=red] (-2,0)--(-2,-2); \draw[line width=1pt, color=black, ->,>=stealth] (0,0)--(-1,1); \draw[line width=1pt, color=black] (-1,1)--(-2,2); \draw[line width=1pt, color=black, ->,>=stealth] (0,0)--(1,1); \draw[line width=1pt, color=black] (1,1)--(2,2); \draw[line width=1pt, color=black, ->,>=stealth] (-2,-2)--(-1,-1); \draw[line width=1pt, color=black] (-1,-1)--(0,0); \draw[line width=1pt, color=black, ->,>=stealth] (2,-2)--(1,-1); \draw[line width=1pt, color=black] (1,-1)--(0,0); \draw[line width=1pt, color=blue, ->,>=stealth] (2,2)--(1,3); \draw[line width=1pt, color=blue] (1,3)--(0,4); \draw[line width=1pt, color=blue, ->,>=stealth] (-2,2)--(-1,3); \draw[line width=1pt, color=blue] (-1,3)--(0,4); \draw[line width=1pt, color=blue, ->,>=stealth] (-4,0)--(-3,1); \draw[line width=1pt, color=blue] (-3,1)--(-2,2); \draw[line width=1pt, color=blue, ->,>=stealth] (-2,-2)--(-3,-1); \draw[line width=1pt, color=blue] (-3,-1)--(-4,0); \draw[line width=1pt, color=blue, ->,>=stealth] (0,-4)--(-1,-3); \draw[line width=1pt, color=blue] (-1,-3)--(-2,-2); \draw[line width=1pt, color=blue, ->,>=stealth] (2,-2)--(1,-3); \draw[line width=1pt, color=blue] (1,-3)--(0,-4); \draw[line width=1pt, color=blue, ->,>=stealth] (2,-2)--(3,-1); \draw[line width=1pt, color=blue] (3,-1)--(4,0); \draw[line width=1pt, color=blue, ->,>=stealth] (4,0)--(3,1); \draw[line width=1pt, color=blue] (3,1)--(2,2); \draw[color=black, line width=1pt] (-2,-2)--(-1, -1.5) node[anchor=west]{$s_L$}; \draw[color=blue, line width=1pt] (-2,-2)--(-1, -2.5) node[anchor=west]{$s_R$}; \draw[color=red, fill=red] (-2,-2) circle (.2); \draw[color=red, fill=red] (-2,2) circle (.2); \draw[color=red, fill=red] (2,-2) circle (.2); \draw[color=red, fill=red] (2,2) circle (.2); \draw[color=black, fill=black] (0,0) circle (.2); \draw[color=blue, fill=blue] (0,4) circle (.2); \draw[color=blue, fill=blue] (0,-4) circle (.2); \draw[color=blue, fill=blue] (-4,0) circle (.2); \draw[color=blue, fill=blue] (4,0) circle (.2); \node at (1,0)[color=black]{$b_{dL}$}; \node at (2.5,0)[anchor=west, color=blue]{$b_{dR}$}; \node at (-2,0)[color=red, anchor=east]{$d$}; \end{tikzpicture} \end{center} \caption{Defect $d$ separating bulk regions $b_{dL}$ and $b_{dR}$ with a pair $(s_L,s_R)$ of defect sites.} \label{fig:defect} \end{figure} The additional structure of a ribbon graph with defects and boundaries leads to a distinction of bulk sites, boundary sites and defect sites in its thickening. \begin{definition}\label{def:sitesdefect} Let $\Gamma$ be a ribbon graph with defects and boundaries and $D(\Gamma)$ its thickening. \begin{compactenum} \item A \emph{bulk site} in $b\in B$ is a site $s$, whose face path and vertex path only consist of edges in $D(\Gamma_{b})$. \item A \emph{pair of defect sites} in $d\in D$ at a defect vertex $v$ is the pair $(s_{L},s_{R})$ of sites $s_{L}$ and $s_{R}$ at $v$ such that $s_{L}$ ($s_{R}$) is directly to the left (to the right) of the outgoing defect edge, viewed in the direction of its orientation, as shown in see Figure~\ref{fig:defect}. The sites $s_{L}$ and $s_{R}$ are called \emph{defect sites} in $b_{dL}$ and $b_{dR}$, respectively. \item A \emph{boundary site} in $a \in A$ is a site $t$ at a boundary vertex $v$ in $\Gamma_{a}$ directly to the left of the outgoing boundary edge of $v$, as shown in Figure~\ref{fig:boundarygraph}. \end{compactenum} \end{definition} Paths in a ribbon graph with defects and boundaries can be characterized by their behavior with respect to defect lines and boundaries. In the following, we only consider simple paths that do not cross over defect or boundary edges. \begin{definition}\label{def:permissible} A simple path $\rho \in D(\Gamma)$ is called \emph{permissible}, if $\rho$ does not contain edges of the form $e^{\pm t},e^{\pm s}$ with $e$ a defect or boundary edge. \end{definition} \subsection{The extended space and operators} \label{subsection:HilbertSpace} In this section we define the basic structures of the Kitaev model with defects and boundaries: The extended space, the vertex and face operators, holonomies and a transport operator. We then show that this model fulfills the conditions stated in Section~\ref{section:TranslationKitaev}. The ingredients for the model are a ribbon graph with boundaries and defects $\Gamma$ together with the algebraic data derived in Section \ref{section:TranslationKitaev}, (D\ref{D1})-(D\ref{D3}): \begin{itemize} \item A semisimple, finite-dimensional Hopf algebra $H_{b}$ to every bulk region $b\in B$. \item A twist $F_{a}$ of $D(H_{b})$ to every boundary line $a \in A$. \item A twist $F_{d}$ of $D(H_{b_{dL}}) \oo D(H_{b_{dR}})$ to every defect line $d \in D$. \end{itemize} We now associate a twist $F$ of $D(H_{b})$ to every site $s$ in a bulk region $b$, the \emph{twist at $s$}. We distinguish whether $s$ is (i) a bulk site in $b$, (ii) a boundary site in a boundary line $a$ adjacent to $b$ or (iii) a defect site in a defect line $d$ adjacent to $b$. In case (i) $F$ is the trivial twist. In case (ii), $F$ is the twist $F_{a}$. In case (iii) we have $b\in\left\{b_{dL}, b_{dR} \right\}$ and $F$ is the the projection of the twist $F_{d}$ onto the Hopf subalgebra $D(H_{b}) \subseteq D(H_{b_{dL}}) \oo D(H_{b_{dR}})$ as in Example~\ref{example:ProjectedTwist}. To construct the extended space, we assign to every edge $e\in E$ a vector space $H_{e}$, where \begin{itemize} \item $H_{e}=H_{b}$, if $e\in E_{b}$ is an edge in a bulk region $b\in B$. \item $H_{e}=H_{b_{a}}$, if $e\in E_{a}$ is a boundary edge in a boundary line $a\in A$. \item $H_{e}=H_{b_{dL}} \oo H_{b_{dR}}$, if $e\in E_{d}$ is a defect edge in a defect line $d\in D$. \end{itemize} We define the \emph{extended space} of the model as the vector space \begin{align}\label{eq:hilbdef} \HSpace := \tensor_{e\in E}H_{e} \end{align} For every bulk region $b$, $h \oo \alpha\in D( H_b)^{*}$, and every simple path $\rho$ that is either (a) a permissible path in $b$ and the adjacent defect and boundary lines or (b) a vertex path around a defect vertex, we define a holonomy map \begin{align} \Hol_{b,\rho}^{h \oo \alpha } :\HSpace\to \HSpace. \label{eq:HolonomyBulk} \end{align} {Note that the index $b$ is superfluous in case (a), but necessary in case (b) to distinguish the two bulk regions separated by a defect line. As in the case of a Kitaev model without defects, these holonomies are defined in terms of edge operators associated to the edges $e\in E$ of the underlying ribbon graph $\Gamma=(V,E)$, but we have to distinguish four cases: \begin{compactenum}[(i)] \item $e\in E_{b}$ or $e\in E_{a}$, where $a\in A$ is a boundary line adjacent to $b$, \item $e\in E_{d}$, where $b$ is the bulk region to the left of the defect line $d$, \item $e\in E_{d}$, where $b$ is the bulk region to the right of the defect line $d$, \item $e$ is in a different bulk region or in a defect or boundary line not adjacent to $b$. \end{compactenum} In all cases, we define the triangle operators as the holonomies $\Hol_{b,e^{\nu}}^{h \oo \alpha} $ along the paths $e^{\nu}$ for $\nu\in \left\{\pm s,\pm t,\pm L,\pm R \right\}$ for $\alpha\oo h\in D(H_b^*)$. The four cases differ only in the vector space $H_e$ assigned to the edge $e$. In case (i) one has $H_e=H_b$, in case (ii) $H_e=H_b\oo H_{b'}$, where $b'$ is the bulk region to the right of $d$, in case (iii) $H_e=H_{b'} \oo H_{b}$, where $b'$ is the bulk region to the left of $d$, and in case (iv) $H_e=H_{b'}$ or $H_e=H_{b'}\oo H_{b''}$ for $b\neq b'$ and $b\neq b''$. Case (iv) is only required to inductively define holonomies for vertex paths around a defect vertex, and the associated holonomy is taken to be trivial. As in the model without boundaries and defects, we require that the triangle operators act trivially on all tensor factors in $\HSpace=\bigotimes_{e\in E} H_e$ except the one associated with $e\in E$. \begin{definition}\label{def:edgeopsdefect} The \emph{triangle operators} for an edge $e\in E$ are the holonomies $H^{h\oo \alpha}_{b, e^\nu}$ defined by \begin{align*} \Hol_{b,e^{-\nu}}^{h \oo \alpha} := \Hol_{b,e^{\nu}}^{S(h \oo \alpha)}\qquad \nu\in \left\{ L,R,s,t \right\}, \end{align*} where $S$ is the antipode of $D(H_{b})^{*}$, and by \begin{description} \item[case (i):] $m\in H_b$ \begin{align*} \Hol_{b,e^{s}}^{h \oo \alpha} (m) &= \alpha(1) \cdot mh, & \Hol_{b,e^{-t}}^{h \oo \alpha} (m) &=\alpha(1) \cdot S(h)m \\ \Hol_{b,e^{R}}^{h \oo \alpha} (m) &= \varepsilon(h) \langle \alpha , m_{(2)} \rangle m_{(1)}, & \Hol_{b,e^{-L}}^{h \oo \alpha} (m) &= \varepsilon(h)\langle \alpha , S(m_{(1)}) \rangle m_{(2)}, \end{align*} \item[case (ii):] $m\oo n\in H_b\oo H_{b'}$ \begin{align*} \Hol_{b,e^{s}}^{h \oo \alpha} (m \oo n) &=\alpha(1) mh \oo n, & \Hol_{b,e^{t}}^{h \oo \alpha} (m \oo n) &=\alpha(1) hm \oo n \\ \Hol_{b,e^{L}}^{h \oo \alpha} (m \oo n) &=\varepsilon(h) \langle \alpha , m_{(1)} \rangle m_{(2)} \oo n, & \Hol_{b,e^{R}}^{h \oo \alpha} (m \oo n) &=\varepsilon(h)\alpha(1) m\oo n, \end{align*} \item[case (iii):] $m\oo n\in H_{b'}\oo H_{b}$ \begin{align*} \Hol_{b,e^{s}}^{h \oo \alpha} (m \oo n) &=\alpha(1) m \oo nh, & \Hol_{b,e^{t}}^{h \oo \alpha} (m \oo n) &=\alpha(1) m \oo hn \\ \Hol_{b,e^{R}}^{h \oo \alpha} (m \oo n) &=\varepsilon(h) \langle \alpha , n_{(2)} \rangle m \oo n_{(1)}, & \Hol_{b,e^{L}}^{h \oo \alpha} (m \oo n) &=\varepsilon(h) \alpha(1) \rangle m \oo n, \end{align*} \item[case (iv):] $m\oo n\in H_{b'}\oo H_{b''}$ \begin{align*} \Hol_{b,e^{\nu}}^{h \oo \alpha}(m\oo n) = \varepsilon(h)\alpha(1) m\oo n\quad \nu\in\{s,t,R,L\}, \end{align*} \end{description} \end{definition} Note that the formulas for case (i) coincide with the formulas in Definition \ref{definition:HolonomyBasic} for the triangle operators in a model without defects and boundaries. Definition \ref{def:edgeopsdefect} thus generalizes the triangle operators from Definition \ref{definition:HolonomyBasic} for models without defects and boundaries. As in the model without defects, see Definition \ref{definition:HolonomyComposite}, we define the holonomies along simple paths $\rho$ by decomposing them into subpaths that form a left joint. \begin{definition} \label{definition:HolonomyCompositeDefect} Let $\rho=\rho_{1}\circ\rho_{2}$ a simple path in reduced form with $\rho_{2}:s \to t^{\eta_{2}}$ and $\rho_{1}:t^{\eta_{1}}\to u$ with $\eta_{1},\eta_{2}\in \left\{ L,R \right\}$. Write $\beta:=h \oo \alpha\in D(H_{b})^{*}$ and $\Delta_{D(H_{b})^{*}}(\beta)=\beta_{(1)} \oo \beta_{(2)}$. Then we define \begin{align} &\Hol_{b,\rho}^{\beta} = \Hol_{b,\rho_{1}}^{\beta_{(1)}}\circ \Hol_{b,\rho_{2}}^{\beta_{(2)}} \quad &\text{if $(\rho_{1},\rho_{2})_{\emptyset}$ or $(\rho_{2},\rho_{1}^{-1})_{\prec}$} \label{eq:HolonomyRecursionStandardDefect} \\ &\Hol_{b,\rho}^{\beta} = \Hol_{b,\rho_{2}}^{\beta_{(2)}}\circ \Hol_{b,\rho_{1}}^{\beta_{(1)}} &\text{if $(\rho_{1}^{-1},\rho_{2})_{\prec}$} \label{eq:HolonomyRecursionLeftJoinDefect} \end{align} \end{definition} By an argument that is fully analogous to the proof of Lemma \ref{lemma:HolonomyWellDefined} one shows that the resulting holonomy is independent of the decomposition of $\rho$ into subpaths $\rho_1,\rho_2$ in Definition \ref{definition:HolonomyCompositeDefect}. By considering vertex and face paths associated with sites in $\Gamma$, we can then define the associated vertex and face operators as the holonomies along these paths, thus generalizing Definition \ref{definition:VertexFaceOperators} for the model without defects and boundaries. We also associate generalized vertex and face operators to pairs of defect sites. \begin{definition}\label{def:vertexfacedefect} Let $\Gamma$ be a ribbon graph with defects and boundaries. \begin{enumerate} \item The \emph{vertex and face operator} for a site $s$ in a bulk region $b$ are the linear maps \begin{align} A_{b,s}^{h}=\Hol_{b,v}^{h \oo \varepsilon} :\, &\HSpace \to \HSpace & B_{b,s}^{\alpha}= \Hol_{b,f}^{1 \oo \alpha}:\, &\HSpace \to \HSpace, \label{eq:FaceOperatorBulk} \end{align} where $f$ and $v$ are the face and vertex path based at $s$ and $\alpha \oo h\in D(H_{b})$. We use the notation $BA_{b,s}^{\alpha \oo h}= B_{s}^{\alpha}A_{s}^{h} :\, \HSpace \to \HSpace$. \item The \emph{vertex and face operator} for a pair $(s_{L},s_{R})$ of defect sites in a defect line $d$ are \begin{align} BA_{d, (s_{L},s_{R})}^{h \oo k}:= BA_{b_{dL},s_{L}}^{h}\circ BA_{b_{dR},s_{R}}^{k}:\HSpace\to \HSpace, \label{eq:FaceVertexOperatorDefect} \end{align} where $h \oo k\in D(H_{b_{dL}}) \oo D(H_{b_{dR}})$. \end{enumerate} \end{definition} An example of the action of a vertex and face operator at a defect vertex vertex is given in Figure~\ref{fig:vertexdefect}. \begin{figure}[H] \begin{center} \begin{tikzpicture}[scale=.7] \begin{scope}[shift={(0,0)}] \draw[line width=1.5pt, color=red, ->,>=stealth] (-4,0)--(-2,0); \draw[line width=1.5pt, color=red] (-2,0)--(0,0); \draw[line width=1.5pt, color=red, ->,>=stealth] (0,0)--(2,0); \draw[line width=1.5pt, color=red] (2,0)--(5,0); \draw[line width=1pt, color=black,->,>=stealth] (0,4)--(0,2); \draw[line width=1pt, color=black,] (0,2) node[anchor=west]{$c$}--(0,0); \draw[line width=1pt, color=black,->,>=stealth] (0,4)--(2,4) node[anchor=south]{$d$}; \draw[line width=1pt, color=black,] (2,4)--(4,4); \draw[line width=1pt, color=black,->,>=stealth] (4,4)--(4,2) node[anchor=west]{$e$}; \draw[line width=1pt, color=black,] (4,2)--(4,0); \draw[line width=1pt, color=blue,->,>=stealth] (0,-2.5)node[anchor=north]{$h$}--(0,-1.5); \draw[line width=1pt, color=blue,] (0,-1.5)--(0,0); \draw[line width=1pt, color=black](0,0)--(.5,.5) node[anchor=west]{$s_L$}; \draw[color=red, fill=red] (0,0) circle (.2); \draw[color=red, fill=red] (4,0) circle (.2); \draw[color=black, fill=black] (0,4) circle (.2); \draw[color=black, fill=black] (4,4) circle (.2); \node at (2,0) [anchor=north]{$f\oo {\color{blue}g}$}; \node at (-2,0)[anchor=north]{$a\oo{\color{blue} b}$}; \end{scope} \draw[line width=1pt, color=black,->,>=stealth] (6,2)--(8,2); \node at (7,2)[anchor=south]{$BA^{\alpha\oo k}_{b,s_L}$}; \begin{scope}[shift={(12,0)}] \draw[line width=1.5pt, color=red, ->,>=stealth] (-4,0)--(-2,0); \draw[line width=1.5pt, color=red] (-2,0)--(0,0); \draw[line width=1.5pt, color=red, ->,>=stealth] (0,0)--(2,0); \draw[line width=1.5pt, color=red] (2,0)--(5,0); \draw[line width=1pt, color=black,->,>=stealth] (0,4)--(0,2); \draw[line width=1pt, color=black,] (0,2) node[anchor=west]{$k_{(2)}c_{(2)}$}--(0,0); \draw[line width=1pt, color=black,->,>=stealth] (0,4)--(2,4) node[anchor=south]{$d_{(1)}$}; \draw[line width=1pt, color=black,] (2,4)--(4,4); \draw[line width=1pt, color=black,->,>=stealth] (4,4)--(4,2) node[anchor=west]{$e_{(1)}$}; \draw[line width=1pt, color=black,] (4,2)--(4,0); \draw[line width=1pt, color=blue,->,>=stealth] (0,-2.5)node[anchor=north]{$h$}--(0,-1.5); \draw[line width=1pt, color=blue,] (0,-1.5)--(0,0); \draw[line width=1pt, color=black](0,0)--(.5,.5) node[anchor=west]{$s_L$}; \draw[color=red, fill=red] (0,0) circle (.2); \draw[color=red, fill=red] (4,0) circle (.2); \draw[color=black, fill=black] (0,4) circle (.2); \draw[color=black, fill=black] (4,4) circle (.2); \node at (2,0) [anchor=north]{$f_{(2)}S(k_{(4)})\oo {\color{blue}g}$}; \node at (-2,0)[anchor=north]{$k_{(3)}a\oo{\color{blue} b}$}; \node at (2,-4.5)[anchor=south]{$\langle \alpha, k_{(5)}S(f_{(1)})e_{(2)}d_{(2)}S(c_{(1)})S(k_{(1)})\rangle$}; \end{scope} \end{tikzpicture} \end{center} \caption{Action of the vertex and face operator at a defect vertex.} \label{fig:vertexdefect} \end{figure} Note that the first part of Definition \ref{def:vertexfacedefect} considers general sites of $\Gamma$, and just specifies the bulk region, in which they are located. This is not to be confused with the \emph{bulk sites} from Definition \ref{def:sitesdefect}. It follows directly from Definition \ref{def:edgeopsdefect} of the triangle operators and Definition \ref{definition:HolonomyCompositeDefect} of the holonomies, that the vertex and face operators from Definition \ref{def:vertexfacedefect} reduce to the ones from Definition \ref{definition:VertexFaceOperators}, whenever the site $s$ is a bulk site. Together with the definition of the extended space in \eqref{eq:hilbdef} this yields the following example. \begin{example} \label{example:StandardModelSpecialCase} If the ribbon graph $\Gamma$ only has a single bulk region and no defect or boundary lines, then the extended space, the vertex and face operators and the holonomies of the Kitaev model with boundaries and defects coincide with those of the Kitaev model without boundaries and defects. \end{example} After defining the the generalized vertex and face operators, we can now show that our model satisfies the condition~(O\ref{O1}) and~(O\ref{O2}) from Section \ref{section:TranslationKitaev}: \begin{proposition} \label{proposition:OperatorActionDefect} Let $s,t$ be sites in $\Gamma$, $b\in B$ a bulk region, $a\in A$ a boundary line adjacent to $b$ and $d\in D$ a defect line adjacent to $b$. Then \begin{compactenum} \item $BA_{b,s}: D(H_{b}) \oo \HSpace\to \HSpace$ defines an action of $D(H_{b})$, if $s$ is a bulk site in $b$, a boundary site in $a$ or a defect site in $b$. \label{item1} \item $BA_{d,s_{L},s_{R}}: D(H_{b_{dL}}) \oo D(H_{b_{dR}}) \oo \HSpace \to \HSpace$ defines an action of $D(H_{b_{dL}}) \oo D(H_{b_{dR}})$, if $(s_{L},s_{R})$ is a pair of defect sites in $d$. \label{item3} \end{compactenum} \end{proposition} \begin{proof} Proof of~\ref{item1}: For a bulk site or a boundary site $s$, the operators $A_{b,s},B_{b,s}$ are defined identically to the ones of a Kitaev model without defects and boundaries. They thus define an action of $D(H_{s})$, as proven in \cite{BMCA}. The proof is analogous for defect sites $s$, as they only act on the defect edges and the bulk edges in $b$ (cf. Figure~\ref{fig:vertexdefect}). The statement~\ref{item3} follows as a corollary of~\ref{item1} by using~\eqref{eq:FaceVertexOperatorDefect}. \end{proof} We also obtain a direct generalization of the second statement in Theorem \ref{ht:siterep} on the vertex and face operators of disjoint sites. An analogous result holds for pairs of defect sites at a defect vertex. \begin{proposition} Let $s,t$ be sites in bulk regions $b_{s}$ and $b_{t}$ and let $h\in D(H_{b_{s}}), k\in D(H_{b_{t}})$. Then $BA_{b_{s},s}^{h}$ and $BA_{b_{t},t}^{k}$ commute, if either \begin{itemize} \item $s$ and $t$ are disjoint sites. \item $(s,t)$ is a pair of defect sites. \end{itemize} \label{proposition:OperatorsCommute} \end{proposition} \begin{proof} The proof for the first case is analogous to the one for Kitaev models without defects and boundaries, see for instance \cite[Lemma~5.9]{Me}. For the second case we note that for a pair of defect sites $(s,t)$ the map $BA_{b_{s},s}^{h}$ only acts on copies of $H_{b_{s}}$ in the tensor product $\HSpace = \tensor_{e\in E} H_{e}$, while $BA_{b_{t},t}^{k}$ only acts on copies of $H_{b_{t}}$. They thus commute trivially. \end{proof} We will now investigate how holonomies along permissible paths in the thickening of a ribbon graph with defects and boundaries generate and fuse excitations at their endpoints. The first step is to study the interaction of these holonomies with the vertex and face operators for sites at the endpoints of the path. This yields a direct generalization of Lemma \ref{lemma:RibbonHolonomyCommutators} for models with defects. Just as Lemma~\ref{lemma:RibbonHolonomyCommutators} it shows that the action of its holonomy generates two inverse excitations at the path's start and target site and fuses them with the excitations already present at those sites. \begin{corollary} Let $\rho:s_{1}^{\eta_{1}}\to s_{2}^{\eta_{2}}$ be a permissible path in the bulk region $b$ and adjacent defects and boundaries. Then the identities from Lemma~\ref{lemma:RibbonHolonomyCommutators} hold, i.e. we have \begin{align} &BA_{b,s_{1}}^{k}\circ \Hol_{b,\rho}^{\alpha} = \Hol_{b,\rho}^{ k_{(2)} \vartriangleright \alpha } \circ BA_{b,s_{1}}^{k_{(1)}}, \quad \text{if $\eta_{1}=R$} \label{eq:HolonomyCommutatorsStartRightDefect} \\ &BA_{b,s_{1}}^{k}\circ \Hol_{b,\rho}^{\alpha} = \Hol_{b,\rho}^{k_{(1)} \vartriangleright\alpha } \circ BA_{b,s_{1}}^{k_{(2)}} , \quad \text{if $\eta_{1}=L$} \label{eq:HolonomyCommutatorsStartLeftDefect} \\ &BA_{b,s_{2}}^{k} \circ \Hol_{b,\rho}^{\alpha} = \Hol_{b,\rho}^{ \alpha \vartriangleleft S\left( k_{(1)} \right)} \circ BA_{b,s_{2}}^{k_{(2)}},\quad \text{if $\eta_{2}=L$} \label{eq:HolonomyCommutatorsEndLeftDefect} \\ &BA_{b,s_{2}}^{k} \circ \Hol_{b,\rho}^{\alpha} = \Hol_{b,\rho}^{\alpha \vartriangleleft S(k_{(2)})} \circ BA_{b,s_{2}}^{k_{(1)}},\quad \text{if $\eta_{2}=R$}. \label{eq:HolonomyCommutatorsEndRightDefect} \end{align} \label{corollary:DefectBoundaryHolonomyFaceVertex} \end{corollary} \begin{proof} The proof is analogous to the proof of Lemma~\ref{lemma:RibbonHolonomyCommutators}, since we can again write $A_{b,s_{i}}^{k}$ and $B_{b,s_{i}}^{\alpha}$ as holonomies along the face and vertex path of $s_{i}$. These paths again form left and right joints with the holonomies. Applying~\eqref{eq:LeftJoint} and~\eqref{eq:RightJoint} then produces the identities above. \end{proof} Corollary~\ref{corollary:DefectBoundaryHolonomyFaceVertex} shows, that the maps $\Hol_{b,\rho}^{ \alpha}$ for a simple path $\rho$ do not satisfy the conditions (H\ref{H2}) and (H\ref{H3}) from Section \ref{section:TranslationKitaev} for paths that start or end at boundary or defect sites. This is not surprising, since in the definition of the holonomies we did not take into account the twists associated with defect and boundary components. For instance, a path $\rho:s_{1}^{L}\to s_{2}$ starting to the left of a boundary site $s_{1}$ defines a module homomorphism $D(H_{b})^{*} \oo \HSpace\to \HSpace$, where $ \oo $ is the tensor product of $D(H_{b})$-modules (cf. Corollary~\ref{corollary:HolonomyFusion}). Instead we require module homomorphisms $D(H_{b})^{*} \oo_{F} \HSpace\to \HSpace$, where $F$ is the twist at $s_{1}$ and $ \oo_{F}$ the tensor product of $D(H_{b})_{F}$-modules. By Proposition \ref{proposition:FunctorTranslation}, the $D(H_b)$-modules $D(H_b)^*\oo\HSpace$ and $D(H_b)^*\oo_F \HSpace$ are isomorphic as $D(H_b)$-modules and the isomorphism is given by an action of the twist $F$. For the holonomies, we have to use different twists for the cases where the path starts or ends at the left or right of a defect or boundary site. This is due to the fact that these cases are associated with four different $D(H_{b})$-module structures on $D(H_{b})^{*} \oo \HSpace$ that are given in Corollary~\ref{corollary:HolonomyFusion}. For each of these $D(H_b)$-module structures, there is a different isomorphism relating the tensor product to a twisted tensor product. We start with paths that start or end at a boundary site. For this let $\rho:s^{\sigma}\to t^{\tau}$ a permissible path with $\sigma,\tau\in \left\{ L,R \right\}$ that starts or ends in the boundary region labeled with the twist $F$ of $D(H_{b})$ and denote $F_{L}=F$ and $F_{R}= F_{21}= F^{(2)} \oo F^{(1)}$. \begin{itemize} \item If the starting site $s^{\sigma}$ is boundary site, we define \begin{align}\label{eq:twistphidef} \varphi_{s_{\sigma}}: &D(H_{b})^{*} \oo_{F} \HSpace \to D(H_{b})^{*}\oo \HSpace,\quad \alpha \oo n \mapsto \left(F_{\sigma}^{(-1)} \vartriangleright \alpha \right) \oo BA_{b,s}^{F_{\sigma}^{(-2)}}(n) \end{align} \item If the target site $t^{\tau}$, is a boundary site, we define \begin{align}\label{eq:twistpsidef} \psi_{t_{\tau}}: &D(H_{b})^{*} \oo_{F} \HSpace \to D(H_{b})^{*}\oo \HSpace,\quad \alpha \oo n \mapsto \left( \alpha \vartriangleleft S\left( F_{\tau}^{(-1)} \right)\right) \oo BA_{b,t}^{F_{\tau}^{(-2)}}(n) \end{align} \end{itemize} We now consider paths that start or end in a defect site. For this let $\rho:s^{\sigma}\to t^{\tau}$ a permissible path with $\sigma,\tau\in \left\{ L,R \right\}$ that starts or ends on a defect $d$ labeled with the twist $F$ of $D(H_{b_{dL}})\oo D(H_{b d_R})$. Then $s\in\{s_R, s_L\}$, where $(s_{L},s_{R})$ is a pair of defect sites in $d$, or $t\in\{t_L, t_R\}$ for a pair $(t_{L},t_{R})$ of defect sites in $d$. Abusing notation we write $ \vartriangleright, \vartriangleleft$ for the left and right coregular action of $D(H_{b_{dL}}) \oo D(H_{b_{dR}})$ on the submodules $D(H_{b_{dL}})^{*} \oo 1 , 1 \oo D(H_{b_{dR}})^{*} \subseteq D(H_{b_{dL}})^{*} \oo D(H_{b_{dR}})^{*}$. \begin{itemize} \item If the starting site $s^{\sigma}$ is a defect site, we define \begin{align}\label{eq:twistphidef2} \varphi_{s_{\sigma}}: &D(H_{b})^{*} \oo_{F} \HSpace \to D(H_{b})^{*} \oo \HSpace,\quad \alpha \oo n \mapsto \left(F_{\sigma}^{(-1)} \vartriangleright \alpha \right) \oo BA_{d,s_{L},s_{R}}^{F_{\sigma}^{(-2)}}(n), \end{align} \item If the target site $t^{\tau}$ is a defect site, we define \begin{align}\label{eq:twistpsidef2} \psi_{t_{\tau}}: &D(H_{b})^{*} \oo_{F} \HSpace \to D(H_{b})^{*} \oo \HSpace,\quad \alpha \oo n \mapsto \left( \alpha \vartriangleleft S\left(F_{\tau}^{(-1)}\right)\right) \oo BA_{d,t_{L},t_{R}}^{F_{\tau}^{(-2)}}(n) \end{align} \end{itemize} To a permissible path $\rho:s^{\sigma}\to t^{\tau}$ with $\sigma,\tau\in \left\{ L,R \right\}$ that starts or ends on a bulk site, we assign the identity morphism. More specifically \begin{itemize} \item If the starting site $s^\sigma$ is a bulk site, we set \begin{align}\label{eq:twistphidef3} \varphi_{s_{\sigma}}=\psi_{s_{\sigma}} = \id :D(H_{b})^{*} \oo \HSpace \to D(H_{b})^{*} \oo \HSpace \end{align} \item If the target site $t^\tau$ is a bulk site we set \begin{align}\label{eq:twistpsidef3} \psi_{t_{\tau}} = \id :D(H_{b})^{*} \oo \HSpace \to D(H_{b})^{*} \oo \HSpace. \end{align} \end{itemize} We thus assigned different isomorphisms to all possible constellations of endpoints of permissible paths in $D(\Gamma)$. The twisted holonomy is then obtained by modifying the holonomy of a path $\rho$ by applying the isomorphisms associated with its starting and target end. \begin{definition} \label{definition:HolonomyTwisted} Let $\rho:s^{\sigma}\to t^{\tau}$ a permissible path in the bulk region $b$ with $\sigma,\tau\in \left\{ L,R \right\}$. The \emph{twisted holonomy} along $\rho$ is the map \begin{align} \widetilde{\Hol}_{b,\rho} = \Hol_{b,\rho} \circ\left( \varphi_{s^{\sigma}}\circ\psi_{t^{\tau}} \right) \label{eq:HolonomyTwistedDefinition} \end{align} \end{definition} \begin{example} If $\rho:s^{L}\to t^{R}$, then \begin{align} \label{eq:HolonomyTwistedLeftRight} \widetilde{\Hol}_{b,\rho}^{\alpha} = \Hol_{b,\rho}^{ F^{(-1)} \vartriangleright\alpha \vartriangleleft S(G^{(-2)})} BA_{s}^{F^{(-2)}} BA_{t}^{G^{(-1)}} \end{align} where either (i) $F$ is the twist at $s$ and $BA_{s}=BA_{b,s}$ if $s$ is a bulk or boundary site, or (ii) $F$ is the twist $F_{d}$ and $BA_{s}=BA_{d,s_{L},s_{R}}$ if $s$ is a defect site in $d$, and similar for $BA_{t}$. If instead $\rho$ starts to the right of $s$, one has to exchange $F^{(-1)}$ and $F^{(-2)}$ and if $\rho$ ends to the left of $t$ one has to exchange $G^{(-1)}$ and $G^{(-2)}$ in~\eqref{eq:HolonomyTwistedLeftRight}. \end{example} It is clear from the definitions that the twisted holonomies along a path $\rho$ coincide with the untwisted ones whenever the path $\rho$ starts and ends at a bulk site. We now give a counterpart of Corollary \ref{corollary:HolonomyModuleHom} for the \emph{twisted} holonomies from Definition \ref{definition:HolonomyTwisted}. It shows that the twisted holonomies generate excitations at the endpoints of the path $\rho$. If these sites already carry excitations, they are fused with the newly generated excitations. If the site in question is a boundary or defect site, then the fusion uses a twisted tensor product, i.e. the twisted holonomies satisfy the conditions~(H\ref{H1}) to~(H\ref{H3}) from Section \ref{section:TranslationKitaev}. \begin{proposition} \label{proposition:HolonomyTwistedCondition} Let $\rho:s_{1}\to s_{2}$ a permissible path between disjoint sites in a bulk region or adjacent defects and boundaries and denote $F$ the twist at $s_{1}$ and $G$ the twist at $s_{2}$. Then $\widetilde{\Hol}_{b,\rho}$ defines module homomorphisms \begin{align} D(H_{b})^{*}_{ \vartriangleright} \oo_{F} \HSpace_{s_{1}} \to \HSpace_{s_{1}} \quad&\text{if $\rho$ starts to the left of $s_{1}$}\\ D(H_{b})^{*}_{ \vartriangleright} \oo_{F}^{cop}\HSpace_{s_{1}} \to \HSpace_{s_{1}}\quad&\text{if $\rho$ starts to the right of $s_{1}$}\\ D(H_{b})^{*}_{ \vartriangleleft} \oo_{G} \HSpace_{s_{2}} \to \HSpace_{s_{2}}\quad&\text{if $\rho$ ends to the left of $s_{2}$}\\ D(H_{b})^{*}_{ \vartriangleleft} \oo_{G}^{cop} \HSpace_{s_{2}} \to \HSpace_{s_{2}}\quad&\text{if $\rho$ ends to the right of $s_{2}$} \end{align} Here $\HSpace_{s_{i}}$ is the extended space with the action defined by the site $s_{i}$, $D(H_b)^{*}_{ \vartriangleright}$ is the vector space $D(H_b)^{*}$ with the left regular action of $D(H_b)$ and $D(H_b)^{*}_{ \vartriangleleft}$ is the vector space $D(H_b)^{*}$ with the left action of $D(H_b)$ defined by~\eqref{eq:RightCoregularLeftAction}. \end{proposition} \begin{proof} We prove the first statement for the case where $s_{1}$ is a boundary site and and $\rho$ starts to the left of $s_{1}$ and ends to the right of $s_{2}$. The other cases and the proof for defect sites is analogous. We compute \begin{align*} BA_{b,s_{1}}^{h}\circ \widetilde{\Hol}_{b,\rho}^{\alpha} &\overset{\eqref{eq:HolonomyTwistedLeftRight}}{=} BA_{b,s_{1}}^{h}\Hol_{b,\rho}^{ F^{(-1)} \vartriangleright\alpha \vartriangleleft S(G^{(-2)})} BA_{b,s_{1}}^{F^{(-2)}} BA_{b,s_{2}}^{G^{(-1)}} \\& \overset{\eqref{eq:HolonomyCommutatorsStartLeftDefect}}{=} \Hol_{b,\rho}^{ \left(h_{(1)} F^{(-1)}\right) \vartriangleright\alpha \vartriangleleft S(G^{(-2)})} BA_{b,s_{1}}^{h_{(2)}} BA_{b,s_{1}}^{F^{(-2)}} BA_{b,s_{2}}^{G^{(-1)}} \\&= \Hol_{b,\rho}^{ \left(F^{(-3)}F^{(1)}h_{(1)} F^{(-1)}\right) \vartriangleright\alpha \vartriangleleft S(G^{(-2)})} BA_{b,s_{1}}^{F^{(-4)}}BA_{b,s_{1}}^{F^{(2)}h_{(2)}F^{(-2)}} BA_{b,s_{2}}^{G^{(-1)}} \\& \overset{\ref{lemma:TwistedHopfAlgebra}}{=} \Hol_{b,\rho}^{ \left(F^{(-3)} h_{(F1)}\right) \vartriangleright\alpha \vartriangleleft S(G^{(-2)})} BA_{b,s_{1}}^{F^{(-4)}}BA_{b,s_{1}}^{h_{(F2)}} BA_{b,s_{2}}^{G^{(-1)}} \\ &\overset{\eqref{eq:HolonomyTwistedLeftRight}}{=} \widetilde{\Hol}_{b,\rho}^{ h_{(F1)} \vartriangleright\alpha} BA_{b,s_{1}}^{h_{(F2)}} \end{align*} From this we conclude, that $\widetilde{\Hol}_{b,\rho}$ is a module homomorphism from $D(H_{b})^{*}_{ \vartriangleright} \oo_{F} \HSpace_{s_{1}}$ to $\HSpace_{s_{1}}$. \end{proof} \subsection{Moving and braiding of excitations} \label{subsec:fusionbraiding} The movement and braiding of excitations of the Kitaev model was first considered in \cite{Ki} for the model based on the group algebra of a finite group. The article \cite{BMCA} comments on the generalization to a finite-dimensional semisimple Hopf algebra $H$, but does not give a detailed and explicit description. In this section we describe how to move and braid excitations in the Kitaev model with topological defects and boundaries based on semisimple finite-dimensional Hopf algebras. Our description is based on the rules for fusion and braiding of excitations in a Turaev-Viro TQFT with topological defects and boundaries from~\cite{FSV} outlined in Section~\ref{section:FSVSummary} and translated into conditions for our model in Section~\ref{section:TranslationKitaev}. Our results also apply to a Kitaev model without defects and boundaries based on a semisimple finite-dimensional Hopf algebra, as this is just a special case (cf. Example~\ref{example:StandardModelSpecialCase}). Moving an excitation along a path $\rho:s_{1}\to s_{2}$ in $D(\Gamma)$ in a bulk region $b$ from the site $s_{1}$ to $s_{2}$ should leave no excitation at the site $s_{1}$. Holonomies do not satisfy this condition, as they generate excitations at both endpoints of $\rho$. This is remedied by applying the Haar integrals of $D(H_b)$ and $D(H_b)^*$. When inserted into the holonomy along $\rho$, the Haar integral of $D(H_b)^{*}$ generates pairs of dual excitations at the sites $s_1$ and $s_2$ and fuses them with the existing excitations at these sites. The Haar integral of $D(H_b)$ then destroys the excitation at $s_1$ and moves it to $s_2$. \begin{definition} \label{definition:TransportOperator} Let $\rho:s_{1}\to s_{2}^{\eta}$ a permissible path in the bulk region $b$ with $\eta\in\left\{ L,R \right\}$, $F$ the twist at $s_{2}$ and $\lambda\in D(H_{b}), \smallint \in D(H_{b})^{*}$ the Haar integrals of $D(H_{b})$ and $D(H_{b})^{*}$. The \emph{transport operator} is the linear map map \begin{align} &T_{\rho}:\HSpace\to \HSpace,\quad m\mapsto BA_{b,s_{1}}^{\lambda}\circ \Hol_{b,\rho}^{\smallint \vartriangleleft S(F^{(-2)})} \circ BA_{b,s_{2}}^{F^{(-1)}} (m), \quad\text{if $\eta=R$} \label{eq:TransportOperatorBulkRight} \\ &T_{\rho}:\HSpace\to \HSpace,\quad m\mapsto BA_{b,s_{1}}^{\lambda}\circ \Hol_{b,\rho}^{\smallint \vartriangleleft S(F^{(-1)})} \circ BA_{b,s_{2}}^{F^{(-2)}} (m) , \quad\text{if $\eta=L$} \label{eq:TransportOperatorBulkLeft} \end{align} \end{definition} We now present a concrete example for the transport operator in a Kitaev model without defects and boundaries. \begin{example} Let $\lambda\in H$, $\smallint\in H^{*}$ be the Haar integrals of $H$ and $H^{*}$ and consider the graph below with a single bulk region $b$ and three edges, labeled with elements $a,b,c \in H$. Then the transport operator can be computed by first applying the operator $\Hol_{\rho}^{\lambda \oo \smallint}$: \begin{align*} \input{GeneralTwoCiliaTransported.pdf_tex} \end{align*} Applying the operators $B^{\smallint}_{s_{1}}$ and $A^{\lambda}_{s_{1}}$ afterwards we obtain \begin{align*} \input{GeneralTwoCiliaTransported2.pdf_tex} \end{align*} Here we used the identity $\smallint(\lambda_{(2)}c) S(\lambda_{(1)}) = c$, which can be derived from the properties of the Haar integrals $\lambda$ and $\smallint$. \end{example} In the remainder of this section we prove that the transport operator satisfies the conditions~(T\ref{T1}) and~(T\ref{T2}) from Section \ref{section:TranslationKitaev}. For this, we first show a useful commutation property of the transport operator and holonomies. Commuting the transport operator $T_{\rho}$ with a holonomy along the path $\gamma$ extends the path $\gamma$ to $\rho\circ\gamma$ - under suitable conditions on the paths. \begin{lemma} \label{lemma:HolonomyExtension} Let $\rho:s_{1}\to s_{2}, \gamma:s_{0}\to s_{1}$ and $\rho\circ\gamma:s_{0}\to s_{2}$ be permissible paths between disjoint sites such that both $\gamma$ and $\rho\circ\gamma$ end to the right of a site and either (cf. Figure~\ref{fig:AllowedCompositions}) \begin{compactenum} \item $\rho$ starts to the left of $s_{1}$, or \item $(\gamma,\rho^{-1})_{\prec}$, or \item $\rho^{-1}$ is a subpath of $\gamma$. \end{compactenum} Denote $F$ the twist at $s_{2}$. Then \begin{align} T_{\rho}\circ \Hol_{b,\gamma}^{ \alpha \vartriangleleft S(F^{(-2)})} \circ BA_{b,s_{1}}^{F^{(-1)}} &= \Hol_{b,\rho\circ\gamma}^{\alpha \vartriangleleft S(F^{(-2)})}\circ BA_{b,s_{2}}^{F^{(-1)}} \circ T_{\rho} \label{eq:HolonomyStretching} \end{align} \end{lemma} \begin{figure}[H] \begin{center} \begin{tikzpicture}[scale=1] \draw[line width=1pt, ->,>=stealth](0,-2)--(2,-2); \draw[line width=1pt, ->,>=stealth](0,0)--(2,0); \draw[line width=1pt, ->,>=stealth](0,2)--(2,2); \draw[line width=1pt, ->,>=stealth](0,-2)--(0,-1); \draw[line width=1pt, ->,>=stealth](0,-1)--(0,1); \draw[line width=1pt](0,1)--(0,2); \draw[line width=1.5pt, color=red,->,>=stealth] (0,3)--(0,2.5); \draw[line width=1.5pt, color=red] (0,2)--(0,2.5); \draw[line width=1.5pt, color=red,->,>=stealth] (0,2)--(-1,2); \draw[line width=1pt, color=black](0,-2)--(.3,-1.7)node[anchor=west]{$s_0$}; \draw[line width=1pt, color=black](0,0)--(.3,.3)node[anchor=west]{$s_1$}; \draw[line width=1pt, color=black](0,2)--(.3,2.3)node[anchor=south west]{$s_2$}; \draw[line width=1pt, color=blue,->,>=stealth] (.6,-1.4)--(.6,-.5) node[anchor=west]{$\gamma$}; \draw[line width=1pt, color=blue] (.6,-.5)--(.6,.1); \draw[line width=1pt, color=violet,->,>=stealth] (.6,.6)--(.6,1.5) node[anchor=west]{$\rho$}; \draw[line width=1pt, color=violet] (.6,1.5)--(.6,2.2); \draw[color=red, fill=red] (0,2) circle (.13) ; \draw[color=black, fill=black] (0,0) circle (.13) ; \draw[color=black, fill=black] (0,-2) circle (.13) ; \end{tikzpicture} \qquad \qquad \begin{tikzpicture}[scale=1] \draw[line width=1pt, color=black,->,>=stealth](0,0)--(0,1); \draw[line width=1pt, color=black,->,>=stealth](0,1)--(0,3); \draw[line width=1pt, color=black, ->,>=stealth](0,0)--(1,0); \draw[line width=1pt, color=black,->,>=stealth](0,0)--(-1,0); \draw[line width=1pt, color=black,](-2,0)--(-1,0); \draw[line width=1pt, color=black, ->,>=stealth](0,2)--(1,2); \draw[line width=1.5pt, color=red,->,>=stealth] (-2,1.5)--(-2,1); \draw[line width=1.5pt, color=red,->,>=stealth] (-2,1)--(-2,-1); \draw[line width=1pt, color=black] (-2,0)--(-1.7,.3) node[anchor=south]{$s_2$}; \draw[line width=1pt, color=black] (0,0)--(.3,.3) node[anchor=west]{$s_0$}; \draw[line width=1pt, color=black] (0,2)--(.3,2.3) node[anchor=south]{$s_1$}; \draw[line width=1pt, color=blue,->,>=stealth] (.7,.6)--(.7, 1.5) node[anchor=west]{$\gamma$}; \draw[line width=1pt, color=blue,] (.7,1.5)--(.7, 2.3); \draw[line width=1pt, color=violet,->,>=stealth] (.4,2.3)--(.4, 1); \draw[line width=1pt, color=violet,] (.4,1)--(.4, .5); \draw[line width=1pt, color=violet,->,>=stealth] (.4,.5)--(-1, .5) node[anchor=south]{$\rho$}; \draw[line width=1pt, color=violet,] (-1,.5)--(-1.4, .5); \draw[color=red, fill=red] (-2,0) circle (.13) ; \draw[color=black, fill=black] (0,0) circle (.13) ; \draw[color=black, fill=black] (0,2) circle (.13) ; \end{tikzpicture} \qquad \qquad \begin{tikzpicture}[scale=1] \draw[line width=1pt, color=black, ->,>=stealth] (1.5,-2)--(1,-2); \draw[line width=1pt, color=black] (1,-2)--(0,-2); \draw[line width=1pt, color=black, ->,>=stealth] (1.5,0)--(1,0); \draw[line width=1pt, color=black] (1,0)--(0,0); \draw[line width=1pt, color=black, ->,>=stealth] (1.5,2)--(1,2); \draw[line width=1pt, color=black] (1,2)--(0,2); \draw[line width=1pt, color=black, ->,>=stealth] (0,-2)--(0,-1); \draw[line width=1pt, color=black] (0,-1)--(0,0); \draw[line width=1.5pt, color=red, ->,>=stealth] (0,3)--(0,1); \draw[line width=1.5pt, color=red] (0,1)--(0,0); \draw[line width=1.5pt, color=red, ->,>=stealth] (0,0)--(-1,0); \draw[line width=1.5pt, color=red] (-1,0)--(-1.5,0); \draw[line width=1pt, color=black] (0,-2)--(.3,-1.7)node[anchor=west]{$s_0$}; \draw[line width=1pt, color=black] (0,0)--(.3,.3)node[anchor=south]{$s_2$}; \draw[line width=1pt, color=black] (0,2)--(.3,2.3) node[anchor=south west]{$s_1$}; \draw[line width=1pt, color=blue, ->,>=stealth] (.6,-1.5)--(.6,-.7) node[anchor=west]{$\gamma$}; \draw[line width=1pt, color=blue, ] (.6,-.7)--(.6,1) ; \draw[line width=1pt, color=blue, ] (.6,1)--(.6,2.2); \draw[line width=1pt, color=violet, ->,>=stealth] (.9,2.2)--(.9,1.5) node[anchor=west]{$\rho$}; \draw[line width=1pt, color=violet, ] (.9,1.5)--(.9,.2); \draw[color=black, fill=black] (0,-2) circle (.13); \draw[color=red, fill=red] (0,0) circle (.13); \draw[color=red, fill=red] (0,2) circle (.13); \end{tikzpicture} \end{center} \caption{From left to right: Paths $\gamma:s_{0}\to s_{1}, \rho:s_{1}\to s_{2}$ such that 1. $\rho$ starts to the left of $s_{1}$, 2.$(\gamma,\rho^{-1})_{\prec}$, 3. $\rho^{-1}$ is a subpath of $\gamma$.} \label{fig:AllowedCompositions} \end{figure} \begin{proof} We first make some observations about $\rho$ in case~1 and case~3. If $\rho$ starts to the left of $s_{1}$, then $(\rho,\gamma)_{\emptyset}$, since $\rho\circ\gamma$ would not be a simple path otherwise. If $\rho^{-1}$ is a subpath of $\gamma$, then $(\rho, \rho\circ\gamma)_{\emptyset}$, since $\rho\circ\gamma$ would end to the left of $s_{2}$ otherwise. $\bullet$ Step 1: ~We first show that in all three cases we have \begin{align} \Hol_{b,\rho}^{\smallint \vartriangleleft S(h)} \Hol_{b,\gamma}^{\alpha} =\Hol_{b,\rho\circ\gamma}^{\alpha \vartriangleleft S(h_{(2)})} \Hol_{b,\rho}^{\smallint \vartriangleleft S(h_{(1)})}, \label{eq:TransportHelper1} \end{align} where $\lambda\in D(H_{b})$ and $\smallint \in D(H_{b})^{*}$ are the Haar integrals of $D(H)$ and $D(H)^{*}$ and $h\in D(H_{b})$. 1.~If $\rho$ starts to the left of $s_{1}$, we have \begin{align*} \Hol_{b,\rho}^{\smallint \vartriangleleft S(h)} \Hol_{b,\gamma}^{\alpha } &\overset{\eqref{eq:HaarIntegral}}{=} \Hol_{b,\rho}^{ (\alpha_{(1)} \smallint) \vartriangleleft S(h)} \Hol_{b,\gamma}^{\alpha_{(2)}} \overset{\eqref{eq:LeftRightHolonomy}}{=} \Hol_{b,\rho}^{ \alpha_{(1)}\vartriangleleft S(h_{(2)})} \Hol_{b,\rho}^{ \smallint \vartriangleleft S(h_{(1)})} \Hol_{b,\gamma}^{\alpha_{(2)}} \\ &\overset{\eqref{eq:NonOverlap}}{=} \Hol_{b,\rho}^{ \alpha_{(1)}\vartriangleleft S(h_{(2)})} \Hol_{b,\gamma}^{\alpha_{(2)}} \Hol_{b,\rho}^{ \smallint \vartriangleleft S(h_{(1)})} \overset{\eqref{eq:HolonomyRecursionStandard}}{=} \Hol_{b,\rho\circ\gamma}^{ \alpha\vartriangleleft S(h_{(2)})} \Hol_{b,\rho}^{ \smallint \vartriangleleft S(h_{(1)})} \end{align*} 2.~If $(\gamma, \rho^{-1})_{\prec}$, we obtain \begin{align*} \Hol_{b,\rho}^{\smallint \vartriangleleft S(h)} \Hol_{b,\gamma}^{\alpha } &\overset{\eqref{eq:HaarIntegral}}{=} \Hol_{b,\rho}^{\alpha_{(1)}\smallint \vartriangleleft S(h)} \Hol_{b,\gamma}^{\alpha_{(2)}} \overset{\eqref{eq:RightRightHolonomy}}{=} \Hol_{b,\rho}^{R^{(1)} \vartriangleright \alpha_{(1)} \vartriangleleft S(h_{(2)})} \Hol_{b,\rho}^{R^{(2)} \vartriangleright \smallint \vartriangleleft S(h_{(1)})} \Hol_{b,\gamma}^{\alpha_{(2)}} \\ &\overset{\eqref{eq:HolonomyReversal}}{=} \Hol_{b,\rho}^{R^{(1)} \vartriangleright \alpha_{(1)} \vartriangleleft S(h_{(2)})} \Hol_{b,\rho^{-1}}^{h_{(1)} \vartriangleright S(\smallint) \vartriangleleft S( R^{(2)} )} \Hol_{b,\gamma}^{\alpha_{(2)}} \\ &\overset{\eqref{eq:LeftJoint}}{=} \Hol_{b,\rho}^{R^{(1)} \vartriangleright \alpha_{(1)} \vartriangleleft S(h_{(2)})} \Hol_{b,\gamma}^{\alpha_{(2)} \vartriangleleft R^{(3)}} \Hol_{b,\rho^{-1}}^{ h_{(1)} \vartriangleright S(\smallint) \vartriangleleft S( R^{(2)}) R^{(4)} } \\ &\overset{(*)}{=} \Hol_{b,\rho}^{R^{(1)} R^{(3)} \vartriangleright \alpha_{(1)} \vartriangleleft S(h_{(2)})} \Hol_{b,\gamma}^{\alpha_{(2)} } \Hol_{b,\rho^{-1}}^{ h_{(1)} \vartriangleright S(\smallint) \vartriangleleft S( R^{(2)}) R^{(4)} } \\ &\overset{(**)}= \Hol_{b,\rho}^{\alpha_{(1)} \vartriangleleft S(h_{(2)})} \Hol_{b,\gamma}^{\alpha_{(2)} } \Hol_{b,\rho^{-1}}^{ h_{(1)} \vartriangleright S(\smallint) } \\ &\overset{\eqref{eq:HolonomyRecursionStandard}}= \Hol_{b,\rho\circ\gamma}^{\alpha \vartriangleleft S(h_{(2)})} \Hol_{b,\rho^{-1}}^{ h_{(1)} \vartriangleright S(\smallint) } \overset{\eqref{eq:HolonomyReversal}}{=} \Hol_{b,\rho\circ\gamma}^{\alpha \vartriangleleft S(h_{(2)})} \Hol_{b,\rho}^{\smallint \vartriangleleft S(h_{(1)}) }, \end{align*} where we used the identity $\left( k \vartriangleright \alpha_{(1)} \right) \oo \alpha_{(2)} = \alpha_{(1)} \oo \left( \alpha_{(2)} \vartriangleleft k \right)$ in $(*)$ and the identity $\left( \id \oo S \right)(R)=R^{-1}$ in $(**)$.\\ 3.~If $\rho^{-1}$ is a subpath of $\gamma$, then we have \begin{align*} \Hol_{b,\rho}^{\smallint \vartriangleleft S(h)} \Hol_{b,\gamma}^{\alpha } &\overset{\eqref{eq:HaarIntegral}}{=} \Hol_{b,\rho}^{ (\alpha_{(1)} \smallint) \vartriangleleft S(h)} \Hol_{b,\gamma}^{\alpha_{(2)}} \overset{\eqref{eq:LeftRightHolonomy}}{=} \Hol_{b,\rho}^{ \alpha_{(1)}\vartriangleleft S(h_{(2)})} \Hol_{b,\rho}^{ \smallint \vartriangleleft S(h_{(1)})} \Hol_{b,\gamma}^{\alpha_{(2)}} \\ &\overset{\eqref{eq:NonOverlap}}{=} \Hol_{b,\rho}^{ \alpha_{(1)}\vartriangleleft S(h_{(2)})} \Hol_{b,\gamma}^{\alpha_{(2)}} \Hol_{b,\rho}^{ \smallint \vartriangleleft S(h_{(1)})} \overset{\eqref{eq:HolonomyRecursionStandardDefect}}{=} \Hol_{b,\rho\circ\gamma}^{ \alpha\vartriangleleft S(h_{(2)})} \Hol_{b,\rho}^{ \smallint \vartriangleleft S(h_{(1)})}. \end{align*} $\bullet$ Step 2: We use \eqref{eq:TransportHelper1} to prove~\eqref{eq:HolonomyStretching}. For this, we have three cases, namely that $\rho$ is a left-right, right-right or right-left path. The case that $\rho$ is a left-left path does not arise, since this would imply that $\rho\circ\gamma$ ends to the left of a site, in contradiction to the assumptions. The other three cases correspond to case 1.~2.~and 3.~in Lemma \ref{lemma:HolonomyExtension}, respectively, and are depicted in Figure \ref{fig:AllowedCompositions}. 1.~If $\rho$ is a left-right path, we have \begin{align*} T_{\rho}\circ \Hol_{b,\gamma}^{ \alpha \vartriangleleft S( F^{(-2)})}BA_{b,s_{1}}^{F^{(-1)}} &\overset{\eqref{eq:TransportOperatorBulkRight}}{=} BA^{\lambda}_{b,s_{1}} \Hol_{b,\rho}^{\smallint \vartriangleleft S( F^{(-4)})} BA_{b,s_{2}}^{F^{(-3)}} \Hol_{b,\gamma}^{ \alpha \vartriangleleft S(F^{(-2)})} BA_{b,s_{1}}^{F^{(-1)}} \\ &\overset{\eqref{eq:NonOverlap}}= BA^{\lambda}_{b,s_{1}} \Hol_{b,\rho}^{\smallint \vartriangleleft S(F^{(-4)})} \Hol_{b,\gamma}^{ \alpha \vartriangleleft S(F^{(-2)})} BA_{b,s_{2}}^{F^{(-3)}} BA_{b,s_{1}}^{F^{(-1)}} \\ &\overset{\eqref{eq:TransportHelper1}}{=} BA^{\lambda}_{b,s_{1}} \Hol_{b,\rho\circ\gamma}^{ \alpha \vartriangleleft S(F^{(-4)}_{(2)}F^{(-2)})} \Hol_{b,\rho}^{\smallint \vartriangleleft S(F^{(-4)}_{(1)})} BA_{b,s_{2}}^{F^{(-3)}} BA_{b,s_{1}}^{F^{(-1)}} \\ &\overset{\eqref{eq:MiddleJoint}}= \Hol_{b,\rho\circ\gamma}^{ \alpha \vartriangleleft S(F^{(-4)}_{(2)}F^{(-2)})} BA^{\lambda}_{b,s_{1}} \Hol_{b,\rho}^{\smallint \vartriangleleft S(F^{(-4)}_{(1)})} BA_{b,s_{2}}^{F^{(-3)}} BA_{b,s_{1}}^{F^{(-1)}} \\ &\overset{\eqref{eq:HolonomyCommutatorsStartLeft}}{=} \Hol_{b,\rho\circ\gamma}^{ \alpha \vartriangleleft S(F^{(-4)}_{(2)}F^{(-2)})} BA_{b,s_{1}}^{\lambda F^{(-1)}_{(2)}} \Hol_{b,\rho}^{S(F^{-1}_{(1)}) \vartriangleright\smallint \vartriangleleft S(F^{(-4)}_{(1)})} BA_{b,s_{2}}^{F^{(-3)}} \\ &\overset{\eqref{eq:HaarIntegral}}= \Hol_{b,\rho\circ\gamma}^{ \alpha \vartriangleleft S(F^{(-4)}_{(2)}F^{(-2)})} BA^{\lambda}_{b,s_{1}} \Hol_{b,\rho}^{S(F^{-1}) \vartriangleright \smallint \vartriangleleft S(F^{(-4)}_{(1)})} BA_{b,s_{2}}^{F^{(-3)}} \\ &\overset{(*)}{=} \Hol_{b,\rho\circ\gamma}^{ \alpha \vartriangleleft S(F^{(-4)}_{(2)}F^{(-2)})} BA^{\lambda}_{b,s_{1}} \Hol_{b,\rho}^{\smallint \vartriangleleft S( F^{(-4)}_{(1)}F^{(-1)})} BA_{b,s_{2}}^{F^{(-3)}} \\ &\overset{\eqref{eq:TwistDelta}}{=} \Hol_{b,\rho\circ\gamma}^{ \alpha \vartriangleleft S(F^{(-4)})} BA^{\lambda}_{b,s_{1}} \Hol_{b,\rho}^{\smallint \vartriangleleft S(F^{(-3)}_{(2)} F^{(-2)})} BA_{b,s_{2}}^{F^{(-3)}_{(1)}F^{(-1)}} \\ &\overset{\eqref{eq:HolonomyCommutatorsEndRight}}{=} \Hol_{b,\rho\circ\gamma}^{ \alpha \vartriangleleft S(F^{(-4)})} BA^{\lambda}_{b,s_{1}} BA_{b,s_{2}}^{F^{(-3)}} \Hol_{b,\rho}^{\smallint \vartriangleleft S( F^{(-2)})} BA_{b,s_{2}}^{F^{(-1)}} \\ &\overset{\ref{proposition:OperatorsCommute}}= \Hol_{b,\rho\circ\gamma}^{ \alpha \vartriangleleft S(F^{(-4)})} BA_{b,s_{2}}^{F^{(-3)}} BA^{\lambda}_{b,s_{1}} \Hol_{b,\rho}^{\smallint \vartriangleleft S (F^{(-2)})} BA_{b,s_{2}}^{F^{(-1)}} \\ &\overset{\eqref{eq:TransportOperatorBulkRight}}{=} \Hol_{b,\rho\circ\gamma}^{ \alpha \vartriangleleft S(F^{(-4)})} BA_{b,s_{2}}^{F^{(-3)}} T_{\rho} \end{align*} where we used the identity $h \vartriangleright \smallint = \smallint \vartriangleleft h $ for $h\in D(H_{b})$ in (*). 2.~and 3.:~The computations for right-right and right-left paths are analogous. If $\rho$ ends to the left of $s_1$, one simply has to exchange $F^{(-4)}$ and $F^{(-3)}$ and use~\eqref{eq:HolonomyCommutatorsEndLeft} instead of~\eqref{eq:HolonomyCommutatorsEndRight}. If $\rho$ starts to the right of $s_1$, one has to use~\eqref{eq:HolonomyCommutatorsStartRight} instead of~\eqref{eq:HolonomyCommutatorsStartLeft}. \end{proof} The following proposition shows that the transport operator $T_{\rho}$ does indeed move excitations along a permissible path $\rho:s_{1}\to s_{2}$ from $s_{1}$ to $s_{2}$. It fuses the excitations $M_{1}$ at $s_{1}$ and $M_{2}$ at $s_{2}$ via the (twisted) tensor product at the site $s_2$. In other words, $T_{\rho}$ satisfies condition~(T\ref{T1}) from Section~\ref{section:TranslationKitaev}. This result can also be seen as a counterpart of~Proposition~\ref{proposition:HolonomyTwistedCondition}, which describes the fusion of the $D(H_b)$-module $D(H_b)^*$ with the excitation at $s_1$ by the holonomy along $\rho$. \begin{proposition}[Fusion] \label{proposition:TransportOperatorTensorProduct} Let $\rho:s_{1} \to s_{2}^{\eta}$ a permissible path between disjoints sites in a bulk region $b$ with $\eta\in \left\{ L,R \right\}$ and denote $F$ the twist at the site $s_{2}$. Then $T_{\rho}$ induces a $D(H_{b})$-linear map \begin{align} \label{eq:TransportOperatorTensorProduct} T_{\rho}: \HSpace\left( s_{1}, M_{1}, s_{2},M_{2} \right) \to \HSpace\left( s_{1}, \C, s_{2}, M_{2} \oo_{F} M_{1} \right) \quad\text{if $\eta=R$}. \\ \label{eq:TransportOperatorTensorProductLeft} T_{\rho}: \HSpace\left( s_{1}, M_{1}, s_{2},M_{2} \right) \to \HSpace\left( s_{1}, \C, s_{2}, M_{1} \oo_{F} M_{2}\right) \quad\text{if $\eta=L$}. \end{align} Here $M_{1},M_{2}$ are $D(H_{b})$-modules and $\HSpace\left( s_{1}, M_{1}, s_{2},M_{2} \right)$ is defined as in~\eqref{eq:MultipleExcitationSpace}. \end{proposition} \begin{proof} We only prove the first claim as the second claim follows by analogous computation. In this case we need to show the two identities \begin{align} BA_{b,s_{1}}^{k}\circ T_{\rho} = \varepsilon(k) T_{\rho}, \quad BA_{b,s_{2}}^{k} \circ T_{\rho} = T_{\rho} \circ BA_{b,s_{2}}^{k_{(F1)}} \circ BA_{b,s_{1}}^{k_{(F2)}} \qquad \text{for $k\in D(H)$}, \label{eq:H2} \end{align} where $F\Delta F^{-1}: D(H)\to D(H)\oo D(H)$, $k\mapsto k_{(F1)}\oo k_{(F2)}$ is the twisted comultiplication. The first identity in~\eqref{eq:H2} follows by direct computation: \begin{align*} BA_{b,s_{1}}^{k}\circ T_{\rho} &\overset{\eqref{eq:TransportOperatorBulkRight}}{=} BA_{b,s_{1}}^{k} BA_{b,s_{1}}^{\lambda} \Hol_{b,\rho}^{ \smallint \vartriangleleft F^{(-2)}} BA_{b,s_{2}}^{F^{(-1)}} \\ &= \varepsilon(k) BA_{b,s_{1}}^{\lambda} \Hol_{b,\rho}^{ \smallint \vartriangleleft F^{(-2)}} BA_{b,s_{2}}^{F^{(-1)}} = \varepsilon(k) T_{\rho} \end{align*} For the second identity in~\eqref{eq:H2} we additionally assume that $\rho$ starts to the left of $s_{1}$ and ends to the right of $s_2$. In this case, we have \begin{align*} BA_{b,s_{2}}^{k}T_{\rho} &\overset{\eqref{eq:TransportOperatorBulkRight}}{=} BA_{b,s_{2}}^{k} BA_{b,s_{1}}^{\lambda} \Hol_{b,\rho}^{ \smallint \vartriangleleft F^{(-2)}} BA_{b,s_{2}}^{F^{(-1)}} \overset{\ref{proposition:OperatorsCommute}}{=} BA_{b,s_{1}}^{\lambda} BA_{b,s_{2}}^{k} \Hol_{b,\rho}^{ \smallint \vartriangleleft F^{(-2)}} BA_{b,s_{2}}^{F^{(-1)}} \\ &\overset{\eqref{eq:HolonomyCommutatorsEndRightDefect}}{=} BA_{b,s_{1}}^{\lambda} \Hol_{b,\rho}^{ \smallint \vartriangleleft S(k_{(2)} F^{(-2)})} BA_{b,s_{2}}^{k_{(1)}F^{(-1)}} \overset{\ref{lemma:TwistedHopfAlgebra}}{=} BA_{b,s_{1}}^{\lambda} \Hol_{b,\rho}^{ \smallint \vartriangleleft S(F^{(-2)}k_{(F2)})} BA_{b,s_{2}}^{F^{(-1)}k_{(F1)}} \\ &\overset{(*)}{=} BA_{b,s_{1}}^{\lambda} \Hol_{b,\rho}^{ S(k_{(F2)}) \vartriangleright \smallint \vartriangleleft S(F^{(-2)})} BA_{b,s_{2}}^{F^{(-1)}k_{(F1)}} \\&\overset{\eqref{eq:HolonomyCommutatorsStartLeftDefect}}{=} BA_{b,s_{1}}^{\lambda S(k_{(F2)(2)})} \Hol_{b,\rho}^{ \smallint \vartriangleleft S(F^{(-2)})} BA_{b,s_{1}}^{ k_{(F2)(1)}} BA_{b,s_{2}}^{F^{(-1)}k_{(F1)}} \\ &\overset{\eqref{eq:HaarIntegral}}{=} BA_{b,s_{1}}^{\lambda } \Hol_{b,\rho}^{ \smallint \vartriangleleft S(F^{(-2)})} BA_{b,s_{1}}^{ k_{(F2)}} BA_{b,s_{2}}^{F^{(-1)}k_{(F1)}} \\&\overset{\ref{proposition:OperatorsCommute}}{=} BA_{b,s_{1}}^{\lambda } \Hol_{b,\rho}^{ \smallint \vartriangleleft S(F^{(-2)})} BA_{b,s_{2}}^{F^{(-1)}}BA_{b,s_{2}}^{k_{(F1)}} BA_{b,s_{1}}^{ k_{(F2)}} = T_{\rho}BA_{b,s_{2}}^{k_{(F1)}} BA_{b,s_{1}}^{ k_{(F2)}}. \end{align*} In $(*)$ we have used the identity \begin{align*} \smallint \vartriangleleft S(h) = \langle {\smallint}_{(1)} , S(h) \rangle {\smallint}_{(2)} = \langle {\smallint}_{(2)} , S(h) \rangle {\smallint}_{(1)} = S(h) \vartriangleright \smallint \qquad \text{for $h\in D(H)$ } \end{align*} which follows from the cyclicity of the Haar integral $\smallint$. The proof for the case, where $\rho$ starts to the right of $s_{1}$, is analogous, but uses identity \eqref{eq:HolonomyCommutatorsStartRightDefect} instead of \eqref{eq:HolonomyCommutatorsStartLeftDefect}. The same holds for the case, where $\rho$ ends to the right of $s_2$, but with \eqref{eq:HolonomyCommutatorsEndLeftDefect} instead of \eqref{eq:HolonomyCommutatorsEndRightDefect}. \end{proof} We now show that the fusion of multiple excitations satisfies an associativity condition. This associativity reflects the associativity of the tensor product of $D(H_{b})$-modules. For three $D(H_{b})$-modules $M_{1},M_{2},M_{3}$, the two tensor products $(M_{1} \oo M_{2}) \oo M_{3}$ and $M_{1} \oo \left( M_{2} \oo M_{3}\right)$ coincide as $D(H_{b})$-modules. In the Kitaev model, we have two ways of fusing three excitations of type $M_{1},M_{2},M_{3}$ in the bulk region $b$ into an excitation of type $M_{1} \oo M_{2} \oo M_{3}$, as shown in Figure~\ref{figure:BulkAssociativity}. \begin{figure}[H] \centering \scalebox{0.25}{\input{BulkAssociativity.pdf_tex}} \caption{The two ways of fusing three bulk excitations $M_1,M_2,M_3$.} \label{figure:BulkAssociativity} \end{figure} As the tensor product of $D(H_b)$-modules is associative, these two procedures must coincide, i.e. define the same map $\HSpace\to\HSpace$. If all three excitations are in a defect line or a boundary line, the same condition should hold after replacing the tensor product $ \oo $ with a twisted tensor product $ \oo_{F}$. \\ If the excitations $M_{1}$ and $M_{2}$ are in the bulk and $M_{3}$ is in an adjacent boundary or defect line, then we again have two ways of fusing the excitations illustrated by Figure~\ref{figure:BoundaryAssociativity}. \begin{figure}[H] \centering \scalebox{0.25}{\input{BoundaryAssociativity.pdf_tex}} \caption{The two ways of fusing bulk excitations $M_1,M_2$ and a boundary excitation $M_3$.} \label{figure:BoundaryAssociativity} \end{figure} The two $D(H_{b})$-modules $(M_{1} \oo M_{2}) \oo_{F} M_{3}$ and $M_{1} \oo_{F} (M_{2}\oo_{F} M_{3})$ are isomorphic with an isomorphism given by the action of the twist $F$ from~\eqref{eq:TwistCoherenceData} on $M_{1}$ and $M_{2}$. The two procedures in Figure~\ref{figure:BoundaryAssociativity} should therefore coincide up to a twist acting on $M_{1}$ and $M_{2}$. The following proposition shows that the transport operator $T_{\rho}$ indeed fulfills this associativity condition: \begin{proposition}[Associativity] \label{proposition:TransportAssociativity} Let $\gamma:s_{0}\to s_{1}, \rho:s_{1}\to s_{2}$ be permissible paths satisfying the conditions of Lemma~\ref{lemma:HolonomyExtension}. Denote by $F$ the twist at $s_{2}$ and by $G$ the twist at $s_{1}$ and let either $F=G$ or $G$ trivial. Then we have \begin{align} T_{\rho\circ\gamma}\circ T_{\rho} &= T_{\rho}\circ T_{\gamma}\quad&\text{if $F=G$.} \label{eq:TransportAssociativitySameRegion} \\ T_{\rho\circ\gamma}\circ T_{\rho} &= T_{\rho}\circ T_{\gamma}\circ BA_{b,s_{0}}^{F^{(-2)}} \circ BA_{b,s_{1}}^{F^{(-1)}} \quad&\text{if $G$ trivial.} \label{eq:TransportAssociativityBulkToDefect} \end{align} \end{proposition} \begin{proof} For $F=G$ we show the claim by direct computation \begin{align*} T_{\rho}\circ T_{\gamma} &\overset{\eqref{eq:TransportOperatorBulkRight}}{=} T_{\rho}\circ BA_{b,s_{0}}^{\lambda} \circ \Hol_{b,\gamma}^{\smallint \vartriangleleft S( F^{(-2)})} \circ BA_{b,s_{1}}^{F^{(-1)}} \overset{\ref{proposition:OperatorsCommute}}{=} BA_{b,s_{0}}^{\lambda} \circ T_{\rho} \circ \Hol_{b,\gamma}^{\smallint \vartriangleleft S(F^{(-2)})}\circ BA_{b,s_{1}}^{F^{(-1)}} \\ &\overset{\eqref{eq:HolonomyStretching}}{=} BA_{b,s_{0}}^{\lambda} \circ \Hol_{b,\rho\circ\gamma}^{\smallint \vartriangleleft S(F^{(-2)})}\circ BA_{b,s_{2}}^{F^{(-1)}} \circ T_{\rho} \overset{\eqref{eq:TransportOperatorBulkRight}}{=} T_{\rho\circ\gamma} \circ T_{\rho} \end{align*} For $G$ trivial we first assume that $\gamma$ starts to the left of $s_{0}$ and compute \begin{align} T_{\gamma}\circ BA_{b,s_{0}}^{F^{(-2)}} \circ BA_{b,s_{1}}^{F^{(-1)}} &\overset{\eqref{eq:TransportOperatorBulkRight}}{=} BA_{b,s_{0}}^{\lambda} \circ \Hol_{b,\gamma}^{ \smallint} \circ BA_{b,s_{0}}^{F^{(-2)}} \circ BA_{b,s_{1}}^{F^{(-1)}} \nonumber\\ &\overset{\eqref{eq:HolonomyCommutatorsStartLeftDefect}}{=} BA_{b,s_{0}}^{\lambda F^{(-2)}_{(2)}}\circ \Hol_{b,\gamma}^{ S(F^{(-2)}_{(1)}) \vartriangleright \smallint} \circ BA_{b,s_{1}}^{F^{(-1)}} \nonumber\\ &= BA_{b,s_{0}}^{\lambda} \circ \Hol_{b,\gamma}^{ S(F^{(-2)}) \vartriangleright \smallint} \circ BA_{b,s_{1}}^{F^{(-1)}} \nonumber\\ &\overset{(*)}{=} BA_{b,s_{0}}^{\lambda} \circ \Hol_{b,\gamma}^{ \smallint \vartriangleleft S(F^{(-2)})} \circ BA_{b,s_{1}}^{F^{(-1)}} \label{eq:H1} \end{align} where we used the identity $ h \vartriangleright \smallint = \smallint \vartriangleleft h $ for $h\in D(H_{b})$ in $(*)$. If $\gamma$ starts to the right of $s_{0}$, the identity~\eqref{eq:H1} follows analogously, but we have to use~\eqref{eq:HolonomyCommutatorsStartRightDefect} instead of~\eqref{eq:HolonomyCommutatorsStartRightDefect}. Inserting~\eqref{eq:H1} into the right hand side of~\eqref{eq:TransportAssociativityBulkToDefect}, we obtain the second term of the proof of the case $F=G$. Proceeding identically, we then obtain~\eqref{eq:TransportAssociativityBulkToDefect}. \end{proof} When fusing two excitations $M_{1},M_{2}$ with $T_{\rho}$ into an excitation $M_{1} \oo_{F} M_{2}$, the order of $M_1$ and $M_2$ in the tensor product depends on whether $\rho:s_{1}\to s_{2}$ ends to the left or the right of $s_{2}$ (cf. Proposition~\ref{proposition:TransportOperatorTensorProduct}). The two $D(H_{b})$-modules $M_{1} \oo M_{2}$ and $M_{2} \oo M_{1}$ are related by the braiding of $D(H_{b})\mathrm{-Mod}$. The following proposition shows that the transport operator satisfies condition~(T\ref{T2}) from Section \ref{section:TranslationKitaev}. In other words, for every permissible path $\rho: s_1\to s_2^R$ in a bulk region, there is a canonical path $\rho': s_1\to s_2^L$, obtained by composing $\rho$ with a face path, such that the maps $T_{\rho}$ and $T_{\rho'}$ are also related by that braiding. If $s_{2}$ is a defect or boundary site, then they instead are related by a twisted braiding. \begin{proposition}[Braiding] Let $\rho:s_{1}^{L}\to s_{2}^{R}$ a permissible path in $b$, $f$ the face path of $s_{2}$, $F$ the twist at $s_{2}$ and $R_{F}\in D(H_{b}) \oo D(H_{b})$ the twisted $R$-matrix of $D(H_{b})$ from \eqref{eq:TwistedRMatrix}. Then we have \begin{align} T_{f^{-1}\circ\rho} = T_{\rho}\circ BA_{b,s_{1}}^{R_{F}^{(2)}}\circ BA_{b,s_{2}}^{R_{F}^{(1)}} \label{eq:TransportOperatorBraidingBulk} \end{align} \end{proposition} \begin{proof} We denote the $R$-matrix of $D(H_{b})$ by $R=\varepsilon \oo x \oo X \oo 1$. We then show the claim by direct computation: \begin{align*} T_{\rho}\circ BA_{b,s_{1}}^{R_{F}^{(2)}}\circ BA_{b,s_{2}}^{R_{F}^{(1)}} &\overset{\eqref{eq:TransportOperatorBulkRight}}{=} BA_{b,s_{1}}^{\lambda} \Hol_{b,\rho}^{ \smallint \vartriangleleft S(F^{(-2)})} BA_{b,s_{2}}^{F^{(-1)}}BA_{b,s_{1}}^{R_{F}^{(2)}} BA_{b,s_{2}}^{R_{F}^{(1)}} \\ &\overset{\eqref{eq:HolonomyCommutatorsStartLeftDefect}}= BA_{b,s_{1}}^{\lambda R^{(2)}_{F,(2)}} \Hol_{b,\rho}^{ S(R_{F,(1)}^{(2)}) \vartriangleright \smallint \vartriangleleft S(F^{(-2)})} BA_{b,s_{2}}^{F^{(-1)}R_{F}^{(1)}} \\ &\overset{\eqref{eq:HaarIntegral}}= BA_{b,s_{1}}^{\lambda} \Hol_{b,\rho}^{ S(R_F^{(2)}) \vartriangleright \smallint \vartriangleleft S(F^{(-2)})} BA_{b,s_{2}}^{F^{(-1)}R_{F}^{(1)}} \\ &\overset{(*)}= BA_{b,s_{1}}^{\lambda} \Hol_{b,\rho}^{ \smallint \vartriangleleft S(F^{(-2)} R_{F}^{(2)})} BA_{b,s_{2}}^{F^{(-1)}R_{F}^{(1)}} \\ &\overset{\eqref{eq:TwistedRMatrix}}{=} BA_{b,s_{1}}^{\lambda} \Hol_{b,\rho}^{ \smallint \vartriangleleft S(R^{(2)} F^{(-1)})} BA_{b,s_{2}}^{R^{(1)} F^{(-2)}} \\ &= BA_{b,s_{1}}^{\lambda} \Hol_{b,\rho}^{ \smallint \vartriangleleft S(R^{(2)} F^{(-1)})} BA_{b,s_{2}}^{R^{(1)}} BA_{b,s_{2}}^{F^{(-2)}} \\ &\overset{\eqref{eq:Rmatrix}}= BA_{b,s_{ 1}}^{\lambda} \Hol_{b,\rho}^{ \smallint \vartriangleleft S((\varepsilon \oo x) F^{(-1)})} BA_{b,s_{2}}^{X \oo 1} BA_{b,s_{2}}^{F^{(-2)}} \\ &\overset{\eqref{eq:FaceOperatorBulk}}{=} BA_{b,s_{1}}^{\lambda} \Hol_{b,\rho}^{ \smallint \vartriangleleft S( (\varepsilon \oo x) F^{(-1)})} \Hol_{b,f}^{X \oo 1} BA_{b,s_{2}}^{F^{(-2)}} \\ &\overset{\eqref{eq:CoregularRightAction}}= \langle {\smallint}_{(1)} , S(F^{(-1)}) S(\varepsilon \oo x) \rangle BA_{b,s_{1}}^{\lambda} \Hol_{b,\rho}^{ \smallint_{(2)} } \Hol_{b,f}^{1 \oo X} BA_{b,s_{2}}^{F^{(-2)}} \\ &\overset{(**)}= \langle {\smallint}_{(1)} , S(F^{(-1)}) \rangle BA_{b,s_{1}}^{\lambda} \Hol_{b,\rho}^{ \smallint_{(3)} } \Hol_{b,f}^{\smallint_{(2)}} BA_{b,s_{2}}^{F^{(-2)}} \\ &\overset{\eqref{eq:HolonomyRecursionLeftJoinDefect}}= BA_{b,s_{1}}^{\lambda} \Hol_{b,f^{-1}\circ\rho}^{ \smallint \vartriangleleft S(F^{(-1)})} BA_{b,s_{2}}^{F^{(-2)}} \\ &=T_{f^{-1}\circ\rho}, \end{align*} where we used the identity $ h \vartriangleright \smallint = \smallint \vartriangleleft h $ for $h\in D(H_{b})$ in $(*)$ and in $(**)$ we used the identity \begin{align*} \Hol_{b,f}^{\beta} = \langle h,\varepsilon \rangle \Hol_{b,f}^{ 1 \oo \alpha} = \langle h \oo \beta , \varepsilon \oo x \rangle \Hol_{b,f}^{1 \oo X} \end{align*} for the face path $f$ and $\beta:=h \oo \alpha\in D(H_{b})^{*}$ that follows because $x$ and $X$ stand for dual bases. \end{proof} \subsection{Transparent defects} \label{subsec:transparent} In this section we show that the Kitaev model without defects arises as a special case of the model with defects, if one considers defects labeled with trivial defect data, called \emph{transparent defects} in the following. These are defects between two bulk regions labeled with the same Hopf algebra $H$, that are decorated with trivial defect data, namely the twist from Example \ref{example:Drinfel'dQuadrupleTwist} that relates the Drinfel'd double of a factorizable Hopf algebra $K$ to the tensor product Hopf algebra $K\oo K$. \begin{definition} \label{definition:TransparentDefect} A \emph{transparent defect} is a defect $d$ between bulk regions $b_{dL},b_{dR}$ such that \begin{compactenum} \item the bulk regions $b_{dL},b_{dR}$ are labeled with the same Hopf algebra $H=H_{b_{dL}}=H_{b_{dR}}$, \item the defect $d$ is labeled with the twist of $D(H) \oo D(H)$ from Example~\ref{example:Drinfel'dQuadrupleTwist} for $K=D(H)$. \end{compactenum} \end{definition} We now show that the extended space of the model with a transparent defect and of the model without the defects are related by a module homomorphism for the action defined by any site of the graph except the defect sites. For this, we consider the Kitaev models with defects and boundaries related by removing a defect line $d$ labeled with transparent defect data. Removing such a transparent defect line involves two steps: (i) modifying the underlying ribbon graph $\Gamma$ to obtain a graph $\Gamma'$, (ii) applying a linear map $\mathcal R:\HSpace\to \HSpace'$ from the extended space $\HSpace $ for $\Gamma$ to the extended space $\HSpace'$ of $\Gamma'$. The modified graph $\Gamma'$ is the ribbon graph with defects and boundaries obtained by removing the defect line $d$ and all the edges of the associated cyclic subgraph $\Gamma_{d}$, but not its vertices, and by identifying the bulk regions $b_{dL}$ and $b_{dR}$. We consider the linear map $\mathcal R: \HSpace\to \HSpace'$ that acts on the tensor factors of $H \oo H$ for edges of $d$ by \begin{align}\label{eq:rdef} m\oo n \mapsto \smallint\left( mS(n) \right) \end{align} and as the identity map on the tensor factors associated to other edges, see Figure~\ref{fig:removingdefect}. \begin{figure}[H] \begin{center} \begin{tikzpicture}[scale=.7] \begin{scope}[shift={(0,0)}] \draw[line width=1.5pt, color=red, ->,>=stealth] (-2,0)--(-1,0) node[anchor=south]{$a\oo b$}; \draw[line width=1.5pt, color=red,->,>=stealth] (-1,0)--(2,0) node[anchor=south]{$c\oo d$}; \draw[line width=1.5pt, color=red, ->,>=stealth] (2,0)--(5,0) node[anchor=south]{$p\oo q$}; \draw[line width=1.5pt, color=red,] (5,0)--(6,0); \draw[line width=1pt, color=black] (0,-3)--(0,3); \draw[line width=1pt, color=black] (4,-3)--(4,3); \draw[line width=1pt, color=black] (-1,2)--(5,2); \draw[line width=1pt, color=black] (-1,-2)--(5,-2); \draw[color=red, fill=red] (0,0) circle (.2); \draw[color=red, fill=red] (4,0) circle (.2); \draw[color=black, fill=black] (0,2) circle (.2); \draw[color=black, fill=black] (0,-2) circle (.2); \draw[color=black, fill=black] (4,2) circle (.2); \draw[color=black, fill=black] (4,-2) circle (.2); \end{scope} \draw[line width=1pt, color=black,->,>=stealth] (7,0)--(9,0); \node at (8,0)[anchor=south]{$R$}; \begin{scope}[shift={(13,0)}] \node at (-1,0) [anchor=east, color=red]{$\int(aS(b))$}; \node at (2,0) [color=red]{$\int (cS(d))$}; \node at (5,0) [anchor=west, color=red]{$\int (pS(q))$}; \draw[line width=1pt, color=black] (0,-3)--(0,3); \draw[line width=1pt, color=black] (4,-3)--(4,3); \draw[line width=1pt, color=black] (-1,2)--(5,2); \draw[line width=1pt, color=black] (-1,-2)--(5,-2); \draw[color=black, fill=black] (0,0) circle (.2); \draw[color=black, fill=black] (4,0) circle (.2); \draw[color=black, fill=black] (0,2) circle (.2); \draw[color=black, fill=black] (0,-2) circle (.2); \draw[color=black, fill=black] (4,2) circle (.2); \draw[color=black, fill=black] (4,-2) circle (.2); \end{scope} \end{tikzpicture} \end{center} \caption{Removing a defect} \label{fig:removingdefect} \end{figure} It is clear from this definition that $\mathcal{R}$ commutes with the holonomies along all paths $\rho$ in $D(\Gamma)$ that do not traverse edges of $\Gamma_{d}$. In particular, $\mathcal{R}$ is a module homomorphism with respect to the $D(H_b)$-module structures and $D(H_{b_{dL}})\oo D(H_{b_{dR}})$-module structures from Proposition \ref{proposition:OperatorActionDefect} that are associated with sites that are disjoint to all sites in $d$. We now consider a pair of defect sites $(s_{L},s_{R})$ in $d$. Removing the defect turns $(s_{L},s_{R})$ into a single site $s \in \Gamma'$, as shown in Figures~\ref{fig:vertextransparentdefect} and Figure~\ref{fig:facevertexbulk}. The pair $(s_{L},s_{R})$ is associated with the $D(H)$-action on $\HSpace$ from Proposition \ref{proposition:OperatorActionDefect}, 2.~defined by \begin{align}\label{eq:sitepair} BA_{d,s_{L},s_{R}}^{t_{(1)} \oo t_{(2)}}:\HSpace\to\HSpace \quad\text{ for $t\in D(H)$}, \end{align} see Figure~\ref{fig:vertextransparentdefect} and Figure~\ref{fig:facetransparentdefect} for a concrete example. The bulk site $s\in \Gamma'$ is associated with $D(H)$-action on $\HSpace'$ from from Proposition \ref{proposition:OperatorActionDefect}, 1.~defined by \begin{align}\label{eq:singlesite} BA_{s,b}^{t}:\HSpace'\to\HSpace' \quad\text{ for $t\in D(H)$}, \end{align} for a concrete example, see Figure~\ref{fig:facevertexbulk}. It turns out that the map $\mathcal R:\HSpace\to\HSpace'$ from \eqref{eq:rdef} is a module homomorphism, when $\HSpace$ and $\HSpace'$ are equipped with these two actions. \begin{figure}[H] \begin{center} \begin{tikzpicture}[scale=.7] \begin{scope}[shift={(0,0)},scale=1.3] \draw[line width=1.5pt, color=red, ->,>=stealth] (-2,0)--(-1,0) node[anchor=south]{$a\oo b\quad$}; \draw[line width=1.5pt, color=red,->,>=stealth] (-1,0)--(2,0) node[anchor=south]{$\quad c\oo d$}; \draw[line width=1.5pt, color=red, ->,>=stealth] (2,0)--(5,0); \draw[line width=1pt, color=black,->,>=stealth] (0,-3)--(0,-1); \draw[line width=1pt, color=black] (0,-1)node[anchor=east]{$f$}--(0,1)node[anchor=east]{$e$}; \draw[line width=1pt, color=black,->,>=stealth] (0,3)--(0,1); \draw[line width=1pt, color=black] (4,-3)--(4,3); \draw[line width=1pt, color=black] (-1,2)--(5,2); \draw[line width=1pt, color=black] (-1,-2)--(5,-2); \draw[line width=1pt, color=black] (0,0)--(.5,.5) node[anchor=west]{$s_L$}; \draw[line width=1pt, color=black] (0,0)--(.5,-.5) node[anchor=west]{$s_R$}; \draw[color=red, fill=red] (0,0) circle (.15); \draw[color=red, fill=red] (4,0) circle (.15); \draw[color=black, fill=black] (0,2) circle (.15); \draw[color=black, fill=black] (0,-2) circle (.15); \draw[color=black, fill=black] (4,2) circle (.15); \draw[color=black, fill=black] (4,-2) circle (.15); \end{scope} \draw[line width=1pt, color=black,->,>=stealth] (7,0)--(9,0); \node at (8,0)[anchor=south]{$BA^{\epsilon\oo h}_{d,s_L,s_R}$}; \begin{scope}[shift={(13,0)}, scale=1.3] \draw[line width=1.5pt, color=red, ->,>=stealth] (-2,0)--(-1,0) node[anchor=south]{$h_{(2)}a\oo h_{(5)}b\qquad$}; \draw[line width=1.5pt, color=red,->,>=stealth] (-1,0)--(2,0) node[anchor=south]{$\; c S(h_{(3)})\oo d S(h_{(4)})$}; \draw[line width=1.5pt, color=red, ->,>=stealth] (2,0)--(5,0); \draw[line width=1pt, color=black,->,>=stealth] (0,-3)--(0,-1); \draw[line width=1pt, color=black] (0,-1)node[anchor=east]{$h_{(6)}f$}--(0,1)node[anchor=east]{$h_{(1)}e$}; \draw[line width=1pt, color=black,->,>=stealth] (0,3)--(0,1); \draw[line width=1pt, color=black] (4,-3)--(4,3); \draw[line width=1pt, color=black] (-1,2)--(5,2); \draw[line width=1pt, color=black] (-1,-2)--(5,-2); \draw[color=red, fill=red] (0,0) circle (.15); \draw[color=red, fill=red] (4,0) circle (.15); \draw[color=black, fill=black] (0,2) circle (.15); \draw[color=black, fill=black] (0,-2) circle (.15); \draw[color=black, fill=black] (4,2) circle (.15); \draw[color=black, fill=black] (4,-2) circle (.15); \end{scope} \end{tikzpicture} \end{center} \caption{$H$-action defined by a vertex and face operator at a transparent defect} \label{fig:vertextransparentdefect} \end{figure} \begin{figure}[H] \begin{center} \begin{tikzpicture}[scale=.7] \begin{scope}[shift={(0,0)},scale=1.3] \draw[line width=1.5pt, color=red, ->,>=stealth] (-1,0)--(-.5,0); \draw[line width=1.5pt, color=red,->,>=stealth] (-.5,0)--(2,0) node[anchor=south]{$\quad c\oo d$}; \draw[line width=1.5pt, color=red, ->,>=stealth] (2,0)--(5,0); \draw[line width=1pt, color=black,->,>=stealth] (0,-3)--(0,-1); \draw[line width=1pt, color=black] (0,-1)node[anchor=east]{$f$}--(0,1)node[anchor=east]{$e$}; \draw[line width=1pt, color=black,->,>=stealth] (0,3)--(0,1); \draw[line width=1pt, color=black,->,>=stealth] (4,3)--(4,1) node[anchor=west]{$k$}; \draw[line width=1pt, color=black,->,>=stealth] (4,1)--(4,-1)node[anchor=west]{$m$}; \draw[line width=1pt, color=black,] (4,-1)--(4,-3); \draw[line width=1pt, color=black, ->,>=stealth] (-1,2)--(2,2) node[anchor=north]{$g$}; \draw[line width=1pt, color=black] (2,2)--(5,2); \draw[line width=1pt, color=black, ->,>=stealth] (5,-2)--(2,-2) node[anchor=south]{$n$}; \draw[line width=1pt, color=black] (-1,-2)--(2,-2); \draw[line width=1pt, color=black] (0,0)--(.5,.5) node[anchor=west]{$s_L$}; \draw[line width=1pt, color=black] (0,0)--(.5,-.5) node[anchor=west]{$s_R$}; \draw[color=red, fill=red] (0,0) circle (.15); \draw[color=red, fill=red] (4,0) circle (.15); \draw[color=black, fill=black] (0,2) circle (.15); \draw[color=black, fill=black] (0,-2) circle (.15); \draw[color=black, fill=black] (4,2) circle (.15); \draw[color=black, fill=black] (4,-2) circle (.15); \end{scope} \draw[line width=1pt, color=black,->,>=stealth] (7,0)--(9,0); \node at (8,0)[anchor=south]{$BA^{\alpha\oo 1}_{d,s_L,s_R}$}; \begin{scope}[shift={(13,0)}, scale=1.3] \draw[line width=1.5pt, color=red, ->,>=stealth] (-1,0)--(-.5,0); \draw[line width=1.5pt, color=red,->,>=stealth] (-.5,0)--(2,0) node[anchor=south]{$\quad c_{(2)}\oo d_{(1)}$}; \draw[line width=1.5pt, color=red, ->,>=stealth] (2,0)--(5,0); \draw[line width=1pt, color=black,->,>=stealth] (0,-3)--(0,-1); \draw[line width=1pt, color=black] (0,-1)node[anchor=east]{$f_{(1)}$}--(0,1)node[anchor=east]{$e_{(2)}$}; \draw[line width=1pt, color=black,->,>=stealth] (0,3)--(0,1); \draw[line width=1pt, color=black,->,>=stealth] (4,3)--(4,1) node[anchor=west]{$k_{(1)}$}; \draw[line width=1pt, color=black,->,>=stealth] (4,1)--(4,-1)node[anchor=west]{$m_{(1)}$}; \draw[line width=1pt, color=black,] (4,-1)--(4,-3); \draw[line width=1pt, color=black, ->,>=stealth] (-1,2)--(2,2) node[anchor=north]{$g_{(1)}$}; \draw[line width=1pt, color=black] (2,2)--(5,2); \draw[line width=1pt, color=black, ->,>=stealth] (5,-2)--(2,-2) node[anchor=south]{$n_{(1)}$}; \draw[line width=1pt, color=black] (-1,-2)--(2,-2); \draw[color=red, fill=red] (0,0) circle (.15); \draw[color=red, fill=red] (4,0) circle (.15); \draw[color=black, fill=black] (0,2) circle (.15); \draw[color=black, fill=black] (0,-2) circle (.15); \draw[color=black, fill=black] (4,2) circle (.15); \draw[color=black, fill=black] (4,-2) circle (.15); \node at (2,-3.5)[color=black]{$\langle \alpha, f_{(2)}n_{(2)}m_{(2)}d_{(2)}S(c_{(1)})k_{(2)}g_{(2)}S(e_{(1)})$}; \end{scope} \end{tikzpicture} \end{center} \caption{$H^*$-action defined by a vertex and face operator at a transparent defect.} \label{fig:facetransparentdefect} \end{figure} \begin{figure}[H] \begin{center} \begin{tikzpicture}[scale=.7] \begin{scope}[shift={(0,0)},scale=1.3] \draw[line width=1pt, color=black,->,>=stealth] (0,-3)--(0,-1); \draw[line width=1pt, color=black] (0,-1)node[anchor=east]{$f$}--(0,1)node[anchor=east]{$e$}; \draw[line width=1pt, color=black,->,>=stealth] (0,3)--(0,1); \draw[line width=1pt, color=black,->,>=stealth] (4,3)--(4,1) node[anchor=west]{$k$}; \draw[line width=1pt, color=black,->,>=stealth] (4,1)--(4,-1)node[anchor=west]{$m$}; \draw[line width=1pt, color=black,] (4,-1)--(4,-3); \draw[line width=1pt, color=black, ->,>=stealth] (-1,2)--(2,2) node[anchor=north]{$g$}; \draw[line width=1pt, color=black] (2,2)--(5,2); \draw[line width=1pt, color=black, ->,>=stealth] (5,-2)--(2,-2) node[anchor=south]{$n$}; \draw[line width=1pt, color=black] (-1,-2)--(2,-2); \draw[line width=1pt, color=black] (0,0)--(.5,0) node[anchor=west]{$s$}; \draw[color=black, fill=black] (0,0) circle (.15); \draw[color=black, fill=black] (4,0) circle (.15); \draw[color=black, fill=black] (0,2) circle (.15); \draw[color=black, fill=black] (0,-2) circle (.15); \draw[color=black, fill=black] (4,2) circle (.15); \draw[color=black, fill=black] (4,-2) circle (.15); \node at (-1,0) [anchor=east, color=red]{$\int(aS(b))$}; \node at (2,0) [color=red]{$\int (cS(d))$}; \end{scope} \draw[line width=1pt, color=black,->,>=stealth] (7,0)--(9,0); \node at (8,0)[anchor=south]{$BA^{\alpha\oo h}_{s}$}; \begin{scope}[shift={(13,0)}, scale=1.3] \draw[line width=1pt, color=black,->,>=stealth] (0,-3)--(0,-1); \draw[line width=1pt, color=black] (0,-1)node[anchor=east]{$h_{(3)}f_{(1)}$}--(0,1)node[anchor=east]{$h_{(2)}e_{(2)}$}; \draw[line width=1pt, color=black,->,>=stealth] (0,3)--(0,1); \draw[line width=1pt, color=black,->,>=stealth] (4,3)--(4,1) node[anchor=west]{$k_{(1)}$}; \draw[line width=1pt, color=black,->,>=stealth] (4,1)--(4,-1)node[anchor=west]{$m_{(1)}$}; \draw[line width=1pt, color=black,] (4,-1)--(4,-3); \draw[line width=1pt, color=black, ->,>=stealth] (-1,2)--(2,2) node[anchor=north]{$g_{(1)}$}; \draw[line width=1pt, color=black] (2,2)--(5,2); \draw[line width=1pt, color=black, ->,>=stealth] (5,-2)--(2,-2) node[anchor=south]{$n_{(1)}$}; \draw[line width=1pt, color=black] (-1,-2)--(2,-2); \draw[color=black, fill=black] (0,0) circle (.15); \draw[color=black, fill=black] (4,0) circle (.15); \draw[color=black, fill=black] (0,2) circle (.15); \draw[color=black, fill=black] (0,-2) circle (.15); \draw[color=black, fill=black] (4,2) circle (.15); \draw[color=black, fill=black] (4,-2) circle (.15); \node at (2, -3.5)[color=black]{$\langle \alpha, h_{(4)}f_{(2)}n_{(2)}m_{(2)}k_{(2)}g_{(2)}S(e_{(1)})S(h_{(1)})$}; \node at (-1,0) [anchor=east, color=red]{$\int(aS(b))$}; \node at (2,0) [color=red]{$\int (cS(d))$}; \end{scope} \end{tikzpicture} \end{center} \caption{$D(H)$-action defined by a vertex and face operator in the bulk after removing the defect line.} \label{fig:facevertexbulk} \end{figure} \begin{proposition} \label{proposition:RemoveTransparentHomomorphism} The map $\mathcal{R}:\HSpace\to\HSpace'$ is a $D(H)$-module homomorphism with respect to the $D(H)$-actions defined by \eqref{eq:sitepair} and \eqref{eq:singlesite}: \begin{align*} \mathcal R\circ BA^{t_{(1)}\oo t_{(2)}}_{d,s_L,s_R}=BA^t_s\circ \mathcal R. \end{align*} \end{proposition} \begin{proof} For $t=\alpha \oo h\in D(H)$ we have \begin{align} BA_{d,s_{L},s_{R}}^{t_{(1)} \oo t_{(2)}} &= BA_{b_{dL},s_{L}}^{t_{(1)} } \circ BA_{b_{dR},s_{R}}^{t_{(2)}} = B_{b_{dL},s_{L}}^{\alpha_{(2)}}\circ A_{b_{dL},s_{L}}^{h_{(1)}}\circ B_{b_{dR},s_{R}}^{\alpha_{(1)}} \circ A_{b_{dR},s_{R}}^{h_{(2)}} \nonumber \\ &\overset{\ref{proposition:OperatorsCommute}}{=} B_{b_{dL},s_{L}}^{\alpha_{(2)}}\circ B_{b_{dR},s_{R}}^{\alpha_{(1)}} \circ A_{b_{dL},s_{L}}^{h_{(1)}} \circ A_{b_{dR},s_{R}}^{h_{(2)}} \label{eq:TransparentDefectAction} \end{align} We now show the claim for the example from Figure~\ref{fig:removingdefect}. We start by computing $\mathcal R\circ BA_{d,s_{L},s_{R}}^{t_{(1)} \oo t_{(2)}} $. Using the formulas from Figure~\ref{fig:vertextransparentdefect} and \ref{fig:facetransparentdefect} we obtain: \begin{align*} &BA_{d,s_{L},s_{R}}^{t_{(1)} \oo t_{(2)}} \left( a \oo b \oo c \oo d \oo e \oo f \oo g \oo k \oo m \oo n\right) \\ &\; \overset{\eqref{eq:TransparentDefectAction}}{=} B_{b_{dL},s_{L}}^{\alpha_{(2)}}\circ B_{b_{dR},s_{R}}^{\alpha_{(1)}} \circ A_{b_{dL},s_{L}}^{h_{(1)}} \circ A_{b_{dR},s_{R}}^{h_{(2)}} \left( a \oo b \oo c \oo d \oo e \oo f \oo g \oo k \oo m \oo n\right) \\ &\overset{\ref{fig:vertextransparentdefect}}{=} B_{b_{dL},s_{L}}^{\alpha_{(2)}}\circ B_{b_{dR},s_{R}}^{\alpha_{(1)}} ( h_{(2)}a \oo h_{(5)}b \oo c_{(2)}S(h_{(3)}) \oo d_{(1)}S(h_{(4)}) \\ &\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\oo h_{(1)}e_{(2)} \oo h_{(6)} f_{(1)} \oo g_{(1)} \oo k_{(1)} \oo m_{(1)} \oo n_{(1)} ) \\& \overset{\ref{fig:facetransparentdefect}}= \langle \alpha ,h_{(10)} f_{(2)}n_{(2)}m_{(2)}d_{(2)}S(h_{(6)})h_{(5)} S(c_{(1)})k_{(2)}g_{(2)}S(e_{(1)}) S(h_{(1)}) \rangle \\ &\qquad h_{(3)}a \oo h_{(8)}b \oo c_{(2)}S(h_{(4)}) \oo d_{(1)}S(h_{(7)}) \oo h_{(2)}e_{(2)} \oo h_{(9)} f_{(1)} \oo g_{(1)} \oo k_{(1)} \oo m_{(1)} \oo n_{(1)} \\ &\;=\langle \alpha ,h_{(8)} f_{(2)}n_{(2)}m_{(2)}d_{(2)}S(c_{(1)})k_{(2)}g_{(2)}S(e_{(1)}) S(h_{(1)}) \rangle \\ &\qquad h_{(3)}a \oo h_{(6)}b \oo c_{(2)}S(h_{(4)}) \oo d_{(1)}S(h_{(5)}) \oo h_{(2)}e_{(2)} \oo h_{(7)} f_{(1)} \oo g_{(1)} \oo k_{(1)} \oo m_{(1)} \oo n_{(1)} \end{align*} Applying the map $\mathcal{R}$ to this expression yields \begin{align} &\mathcal{R}\circ BA_{d,s_{L},s_{R}}^{t_{(1)} \oo t_{(2)}} \left( a \oo b \oo c \oo d \oo e \oo f \oo g \oo k \oo m \oo n\right)\nonumber \\ \nonumber &\;= \langle \alpha ,h_{(8)} f_{(2)}n_{(2)}m_{(2)}d_{(2)}S(c_{(1)})k_{(2)}g_{(2)}S(e_{(1)}) S(h_{(1)}) \rangle \cdot\langle \smallint , h_{(3)}a S( h_{(6)}b) \rangle \\ \nonumber &\qquad \langle \smallint , c_{(2)}S(h_{(4)}) h_{(5)}S(d_{(1)}) \rangle h_{(2)}e_{(2)} \oo h_{(7)} f_{(1)} \oo g_{(1)} \oo k_{(1)} \oo m_{(1)} \oo n_{(1)} \\ \nonumber &\stackrel{(*)}= \langle \alpha ,h_{(6)} f_{(2)}n_{(2)}m_{(2)}d_{(2)}S(c_{(1)})k_{(2)}g_{(2)}S(e_{(1)}) S(h_{(1)}) \rangle \cdot\langle \smallint , h_{(3)}a S(b) S( h_{(4)}) \rangle \\ \nonumber &\qquad\langle \smallint , c_{(2)}S(d_{(1)}) \rangle h_{(2)}e_{(2)} \oo h_{(5)} f_{(1)} \oo g_{(1)} \oo k_{(1)} \oo m_{(1)} \oo n_{(1)} \\ \nonumber &\stackrel{(**)}= \langle \alpha ,h_{(4)} f_{(2)}n_{(2)}m_{(2)}d_{(2)}S(c_{(1)})k_{(2)}g_{(2)}S(e_{(1)}) S(h_{(1)}) \rangle \cdot\langle \smallint , a S(b) \rangle \\ \nonumber &\qquad\langle \smallint , c_{(2)}S(d_{(1)}) \rangle h_{(2)}e_{(2)} \oo h_{(3)} f_{(1)} \oo g_{(1)} \oo k_{(1)} \oo m_{(1)} \oo n_{(1)} \\ \nonumber &\stackrel{(***)}= \langle \alpha ,h_{(4)} f_{(2)}n_{(2)}m_{(2)}k_{(2)}g_{(2)}S(e_{(1)}) S(h_{(1)}) \rangle \cdot\langle \smallint , a S(b) \rangle \\ &\qquad\langle \smallint , cS(d) \rangle h_{(2)}e_{(2)} \oo h_{(3)} f_{(1)} \oo g_{(1)} \oo k_{(1)} \oo m_{(1)} \oo n_{(1)}. \label{eq:RemoveDefectVertexFaceOp} \end{align} Here, we used in $(*)$ the defining property of the antipode $S$, in $(**)$ used the cyclicity property $\langle \smallint , pq \rangle = \langle \smallint , qp \rangle $ of the Haar integral $\smallint$ and the properties of the antipode, and in $(***)$ the identity $ \langle \smallint , p_{(2)} \rangle p_{(1)} = \langle \smallint , p \rangle 1 $ for the Haar integral $\smallint$ applied to $p=cS(d)$. The result of applying the operator $BA_s^t\circ \mathcal R$ instead is given in Figure~\ref{fig:facevertexbulk}. Comparing~\eqref{eq:RemoveDefectVertexFaceOp} to this result we find both terms to be identical. This shows that $\mathcal{R}$ is a $D(H)$-module homomorphism in this example. The proof for a pair of defect sites in a general graph is analogous. In the general case, we may have a different number of edges at the vertex and the two faces of $s_{L}$ and $s_{R}$. Still, we have two defect edges at the vertex which are still labeled with elements $a \oo b, c \oo d\in H \oo H$. Applying $BA_{d,s_{L},s_{R}}^{t_{(1)} \oo t_{(2)}}$ we obtain the following term \begin{align*} &BA_{d,s_{L},s_{R}}^{t_{(1)} \oo t_{(2)}} \left( a \oo b \oo c \oo d \oo \cdots \right) \\&\quad=\langle \alpha , h_{(max)} \cdots d_{(2)}S(c_{(1)})\cdots S(h_{(1)}) \rangle \cdot h_{(n)}a \oo h_{(n+3)}b \oo c_{(2)}S(h_{(n+1)}) \oo d_{(1)}S(h_{(n+2)}) \oo \cdots \end{align*} where $n\in \mathbb{N}$ depends on the number of edges at the vertex of $(s_{L},s_{R})$ and $\cdots$ stands for terms coming from the other edges at the faces and vertices. Applying $\mathcal{R}$ and proceeding analogously to the example, the terms coming from the defect edges cancel. The result again coincides with the one obtained by computing $BA_s^t\circ \mathcal R$. \end{proof} In this article we have constructed a Kitaev model with topological boundaries and defects which satisfies the axioms~(D\ref{D1}) to~(T\ref{T2}) from Section~\ref{section:TranslationKitaev}, that are the counterparts of conditions postulated in~\cite{FSV} for a Turaev-Viro TQFT with topological boundaries and defects. The vertex and face operators at a site define a representation of a Drinfeld double $D(H)$ on the extended space $\HSpace$. This in turn allows us to define excitations and we can generate, move, fuse and braid these excitations by using (twisted) holonomies and the transport operator. The rules governing these operations are the Hopf algebraic counterpart of the categorical rules for Turaev-Viro TQFTs from~\cite{FSV}. The last result shows how we regain the Kitaev model without defects from a model that only has transparent defects. Our construction suggests multiple questions for further research. The relation between the model with transparent defects and the model without defects suggests a definition for the protected space of our model. It is plausible that this would define a topological invariant and coincide with the one from the Kitaev model without defects for a model with only transparent defects. It would also be interesting to give an additional extension of our model with codimension-two defects between different defect lines and to investigate the passage of excitations through defect lines. It would also be interesting to compare our models with Kitaev models with other, not necessarily topological, types of defects such as the ones in~\cite{BD,KK} or the defects based on bicomodule algebras in~\cite{K}. While some of these models admit more general defect data, the interaction of defects with excitations is not investigated there, and the works \cite{KK,K} do not derive transport, fusion or braiding operators that allow one to investigate the behavior of excitations. It would also be interesting to compare our construction with models inspired by TQFTs with defects that investigate mapping class group action such as ~\cite{FSS}. \newpage \begin{thebibliography}{999} \bibitem[AEGN]{AE} Aljadeff, E., Etingof, P., Gelaki, S., \& Nikshych, D. (2002). On twisting of finite-dimensional Hopf algebras. Journal of Algebra, 256(2), 484-501. \bibitem[BA]{BA} Buerschaper, O., \& Aguado, M. (2009). Mapping Kitaev's quantum double lattice models to Levin and Wen's string-net models. Physical Review B, 80(15), 155136. \bibitem[BCKA]{BCKA} Buerschaper, O., Christandl, M., Kong, L., \& Aguado, M. (2013). Electric-magnetic duality of lattice systems with topological order. Nuclear Physics B, 876(2), 619-636. \bibitem[BD]{BD} Bombin, H., \& Martin-Delgado, M. A. (2008). Family of non-Abelian Kitaev models on a lattice: Topological condensation and confinement. Physical Review B, 78(11), 115421. \bibitem[BK10]{BK10} Balsam, B., 2010. Turaev-Viro invariants as an extended TQFT III. arXiv preprint arXiv:1012.0560. \bibitem[BK12]{BK12} Balsam, B., \& Kirillov Jr, A. (2012). Kitaev's lattice model and Turaev-Viro TQFTs. arXiv preprint arXiv:1206.2308. \bibitem[BL]{BL} Bonningtion, C.P and Little C,H.C, 1995. The Foundations of Topological Graph Theory. Springer, New York, NY. \bibitem[BMCA]{BMCA} Buerschaper, O., Mombelli, J. M., Christandl, M., \& Aguado, M. (2013). A hierarchy of topological tensor network states. Journal of Mathematical Physics, 54(1), 012201. \bibitem[BW96]{BW} Barrett, J., \& Westbury, B. (1996). Invariants of piecewise-linear 3-manifolds. Transactions of the American Mathematical Society, 348(10), 3997-4022. \bibitem[Ch]{Ch} Chang, Liang. Kitaev models based on unitary quantum groupoids. Journal of Mathematical Physics 55.4 (2014): 041703. \bibitem[EGNO]{EGNO} Etingof, P., Gelaki, S., Nikshych, D., \& Ostrik, V. (2016). Tensor categories (Vol. 205). American Mathematical Soc.. \bibitem[FSS] {FSS} Fuchs, J., Schaumann, G., \& Schweigert, C. (2019). A modular functor from state sums for finite tensor categories and their bimodules. ZMP-HH/19-22, Hamburger Beitr\"age zur Mathematik Nr. 812 \bibitem[FSV]{FSV} Fuchs, J., Schweigert, C., \& Valentino, A. (2013). Bicategories for boundary conditions and for surface defects in 3-d TFT. Communications in Mathematical Physics, 321(2), 543-575. \bibitem[JS]{JS} Joyal, A., \& Street, R. (1991). An introduction to Tannaka duality and quantum groups. In Category theory (pp. 413-492). Springer, Berlin, Heidelberg. \bibitem[Ka]{Ka} Kassel, C. (2012). Quantum groups (Vol. 155). Springer Science \& Business Media. \bibitem[Ki]{Ki} Kitaev, A. Y. (2003). Fault-tolerant quantum computation by anyons. Annals of Physics, 303(1), 2-30. \bibitem[KK]{KK} Kitaev, A., \& Kong, L. (2012). Models for gapped boundaries and domain walls. Communications in Mathematical Physics, 313(2), 351-373. \bibitem[KKR]{KKR} Koenig, R., Kuperberg, G., \& Reichardt, B. W. (2010). Quantum computation with Turaev-Viro codes. Annals of Physics, 325(12), 2707-2749. \bibitem[KN]{Kn} Knapp, A. W. (2007). Advanced algebra. Springer Science \& Business Media. \bibitem[Ko]{K} Koppen, V. (2020). Defects in Kitaev models and bicomodule algebras. ZMP-HH/20-1, Hamburger Beitr\"age zur Mathematik Nr. 819. \bibitem[Kr]{Kr} Kirillov Jr, A. (2011). String-net model of Turaev-Viro invariants. arXiv preprint arXiv:1106.6033. \bibitem[KT]{KT} Kassel, C. and Turaev, V., 2008. Braid groups (Vol. 247). Springer Science \& Business Media. \bibitem[KMM]{KMM} Koppen, V., Martins, J. F., \& Martin, P. P. (2021). Exactly solvable models for 2+ 1D topological phases derived from crossed modules of semisimple Hopf algebras. arXiv preprint arXiv:2104.02766. \bibitem[LW]{LW} Levin, M. A., \& Wen, X. G. (2005). String-net condensation: A physical mechanism for topological phases. Physical Review B, 71(4), 045110. \bibitem[LZ]{LZ} Lando, S.K. and Zvonkin, A.K., 2013. Graphs on surfaces and their applications (Vol. 141). Springer Science \& Business Media. \bibitem[Ma]{Ma} Majid, S. (2000). Foundations of quantum group theory. Cambridge university press. \bibitem[Me]{Me} Meusburger, C. (2017). Kitaev lattice models as a Hopf algebra gauge theory. Communications in Mathematical Physics, 353(1), 413-468. \bibitem[Mo]{Mo} Montgomery, S. (1993). Hopf algebras and their actions on rings (No. 82). American Mathematical Soc.. \bibitem[R]{R} Radford, D. E. (2011). Hopf algebras (Vol. 49). World Scientific. \bibitem[RT]{RT} Reshetikhin, N.Y. and Turaev, V.G., 1990. Ribbon graphs and their invariants derived from quantum groups. Communications in Mathematical Physics, 127(1), pp.1-26. \bibitem[Sc92]{S} Schauenburg, P. (1992). Tannaka duality for arbitrary Hopf algebras (No. 66). R. Fischer. \bibitem[TV]{TV} Turaev, V. G., \& Viro, O. Y. (1992). State sum invariants of 3-manifolds and quantum 6j-symbols. Topology, 31(4), 865-902. \bibitem[U]{U} Ulbrich, K. H. (1990). On Hopf algebras and rigid monoidal categories. Israel Journal of Mathematics, 72(1-2), 252-256. \end{thebibliography} \end{document}
2205.15200v5
http://arxiv.org/abs/2205.15200v5
Markov selections and Feller properties of nonlinear diffusions
\documentclass{amsart} \usepackage[margin=2.8cm]{geometry} \linespread{1.025} \usepackage{amssymb} \usepackage{graphicx} \usepackage[mathscr]{euscript} \usepackage{enumerate} \usepackage{xspace} \usepackage{color} \usepackage{enumerate} \usepackage{enumitem} \usepackage{amsthm} \usepackage{dsfont} \usepackage{mathrsfs} \usepackage{bbm} \usepackage[colorlinks=true, linkcolor = blue, citecolor = blue]{hyperref} \usepackage[numbers]{natbib} \usepackage[backgroundcolor=white,bordercolor=red]{todonotes} \DeclareMathAlphabet{\mathpzc}{OT1}{pzc}{m}{it} \usepackage[nameinlink]{cleveref} \numberwithin{equation}{section} \begin{document} \theoremstyle{plain} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{example}[theorem]{Example} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{definition}[theorem]{Definition} \newtheorem{Ass}[theorem]{Assumption} \newtheorem{condition}[theorem]{Condition} \theoremstyle{definition} \newtheorem{remark}[theorem]{Remark} \newtheorem{SA}[theorem]{Standing Assumption} \newcommand{\of}{[\hspace{-0.06cm}[} \newcommand{\gs}{]\hspace{-0.06cm}]} \newcommand\llambda{{\mathchoice {\lambda\mkern-4.5mu{\raisebox{.4ex}{\scriptsize$\backslash$}}} {\lambda\mkern-4.83mu{\raisebox{.4ex}{\scriptsize$\backslash$}}} {\lambda\mkern-4.5mu{\raisebox{.2ex}{\footnotesize$\scriptscriptstyle\backslash$}}} {\lambda\mkern-5.0mu{\raisebox{.2ex}{\tiny$\scriptscriptstyle\backslash$}}}}} \newcommand{\1}{\mathds{1}} \newcommand{\F}{\mathbf{F}} \newcommand{\G}{\mathbf{G}} \newcommand{\B}{\mathbf{B}} \newcommand{\M}{\mathcal{M}} \newcommand{\la}{\langle} \newcommand{\ra}{\rangle} \newcommand{\lle}{\langle\hspace{-0.085cm}\langle} \newcommand{\rre}{\rangle\hspace{-0.085cm}\rangle} \newcommand{\blle}{\Big\langle\hspace{-0.155cm}\Big\langle} \newcommand{\brre}{\Big\rangle\hspace{-0.155cm}\Big\rangle} \newcommand{\X}{\mathsf{X}} \newcommand{\bx}{\mathsf{x}} \newcommand{\bX}{\mathsf{X}} \newcommand{\tr}{\operatorname{tr}} \newcommand{\N}{{\mathbb{N}}} \newcommand{\cadlag}{c\`adl\`ag } \newcommand{\on}{\operatorname} \newcommand{\oP}{\overline{P}} \newcommand{\oQ}{\overline{Q}} \newcommand{\oO}{\mathcal{O}} \newcommand{\D}{D(\mathbb{R}_+; \mathbb{R})} \renewcommand{\epsilon}{\varepsilon} \newcommand{\fPs}{\mathfrak{P}_{\textup{sem}}} \newcommand{\fPas}{\mathfrak{P}^{\textup{ac}}_{\textup{sem}}} \newcommand{\rrarrow}{\twoheadrightarrow} \newcommand{\cC}{\mathcal{C}} \newcommand{\cK}{\mathcal{K}} \newcommand{\cH}{\mathcal{H}} \newcommand{\cD}{\mathcal{D}} \newcommand{\cE}{\mathcal{E}} \newcommand{\cR}{\mathcal{R}} \newcommand{\cQ}{\mathcal{Q}} \newcommand{\cF}{\mathcal{F}} \newcommand{\bth}{\overset{\leftarrow}\theta} \renewcommand{\th}{\theta} \newcommand{\bR}{\mathbb{R}} \newcommand{\nnabla}{\nabla} \newcommand{\f}{\mathfrak{f}} \newcommand{\g}{\mathfrak{g}} \newcommand{\oconv}{\overline{\operatorname{conv}}\hspace{0.1cm}} \newcommand{\usa}{\on{usa}} \newcommand{\usc}{\textit{USC}} \newcommand{\uc}{\textit{UC}} \newcommand{\lip}{\textit{Lip}} \newcommand{\C}{\mathsf{C}} \newcommand{\ou}{\overline{u}} \newcommand{\ua}{\underline{a}} \newcommand{\uu}{\underline{u}} \newcommand{\p}{\mathsf{P}} \newcommand{\cU}{\mathcal{U}} \renewcommand{\emptyset}{\varnothing} \allowdisplaybreaks \makeatletter \@namedef{subjclassname@2020}{ \textup{2020} Mathematics Subject Classification} \makeatother \title[Markov Selections and Feller Properties of nonlinear Diffusions]{Markov Selections and Feller Properties \\ of nonlinear Diffusions} \author[D. Criens]{David Criens} \author[L. Niemann]{Lars Niemann} \address{Albert-Ludwigs University of Freiburg, Ernst-Zermelo-Str. 1, 79104 Freiburg, Germany} \email{[email protected]} \email{[email protected]} \keywords{ nonlinear diffusion; nonlinear semimartingales; nonlinear Markov processes; sublinear expectation; sublinear semigroup; nonlinear expectation; partial differential equation; viscosity solution; semimartingale characteristics; Knightian uncertainty} \subjclass[2020]{47H20, 49L25, 60G53, 60G65, 60J60} \thanks{We thank two anonymous referees for many comments and suggestions that helped us to improve the paper.} \thanks{DC acknowledges financial support from the DFG project SCHM 2160/15-1 and LN acknowledges financial support from the DFG project SCHM 2160/13-1.} \date{\today} \maketitle \begin{abstract} In this paper we study a family of nonlinear (conditional) expectations that can be understood as a diffusion with uncertain local characteristics. Here, the differential characteristics are prescribed by a set-valued function. We establish its Feller properties and examine how to linearize the associated sublinear Markovian semigroup. In particular, we observe a novel smoothing effect of sublinear semigroups in frameworks which carry enough randomness. Furthermore, we link the value function corresponding to the semigroup to a nonlinear Kolmogorov equation. This provides a connection to the so-called Nisio semigroup. \end{abstract} \section{Introduction} A \emph{nonlinear diffusion}, or \emph{nonlinear continuous Markov process}, is a family of sublinear expectations \( \{\cE^x \colon x \in \bR\} \) on the Wiener space \( C(\bR_+; \bR) \) with \( \cE^x \circ X_0^{-1} = \delta_x \) for each \( x \in \bR \) such that the Markov property \begin{equation} \label{eq: markov property} \cE^x(\cE^{X_t}(\psi (X_s))) = \cE^x(\psi (X_{t+s}) ), \quad x \in \bR, \ s,t \in \bR_+, \end{equation} holds. Here, \(\psi\) runs through a collection of suitable test functions and \( X \) denotes the canonical process on \( C(\bR_+; \bR) \). Building upon the pioneering work of Peng \cite{peng2007g, peng2008multi} on the \(G\)-Brownian motion, nonlinear Markov processes have been intensively studied in recent years, both from the perspective of processes under uncertainty \cite{fadina2019affine, hu2021g, neufeld2017nonlinear}, as well as sublinear Markovian semigroups \cite{denk2020semigroup,GNR22, hol16,K21, NR}. Using the techniques developed in \cite{NVH}, a general framework for constructing nonlinear Markov processes was established in \cite{hol16}. To be more precise, for given \( x \in \bR\), the sublinear expectation \( \cE^x \) has the form \( \cE^x = \sup_{P \in \cR(x)} E^P \) with a collection \( \cR(x) \) of semimartingale laws \(P\) on the path space, with initial distribution \( \delta_x \) and absolutely continuous semimartingale characteristics \((B^{P}, C^{P})\), where the differential characteristics \((dB^{P} /d\llambda, dC^{P}/d\llambda)\) are prescribed in a Markovian way. In this paper, we parameterize drift and quadratic variation by a compact parameter space \(F\) and two functions \(b \colon F \times \bR \to \bR\) and \(a \colon F \times \bR \to \bR_+\) such that \[ \cR (x) := \big\{ P \in \fPas \colon P \circ X_0^{-1} = \delta_{x},\ (\llambda \otimes P)\text{-a.e. } (dB^{P} /d\llambda, dC^{P}/d\llambda) \in \Theta(X) \big\}, \quad x \in \bR, \] where \[ \Theta (x) := \big\{(b (f, x), a (f, x)) \colon f \in F \big\}, \quad x \in \bR. \] As in the theory of (linear) Markov processes, there is a strong link to semigroups. Indeed, the Markov property \eqref{eq: markov property} ensures the semigroup property \( T_t T_s = T_{s+t}, s,t \in \bR_+ \), where the sublinear operators \( T_t, t \in \bR_+, \) are defined by \begin{equation} \label{eq: def semigroup} T_t(\psi)(x) := \cE^x(\psi(X_t)) = \sup_{P \in \cR(x)} E^P \big[ \psi(X_t) \big] \end{equation} for suitable functions \(\psi\). Using the general theory of \cite{ElKa15, NVH}, the operators \(T_t, t \in \mathbb{R}_+\), are well-defined on the cone of upper semianalytic functions. The purpose of this article is to study two aspects of nonlinear diffusions. First, we examine the \emph{Feller property} of its associated semigroup and second, we investigate how to \emph{linearize} a nonlinear diffusion, respectively its associated semigroup. Let us explain our contributions in more detail. In the case of nonlinear L\'evy processes, i.e., where the set of possible local characteristics is independent of time and path, Equation \eqref{eq: def semigroup} takes the form \begin{equation} \label{eq: levy semigroup} T_t(\psi)(x) = \sup_{P \in \cR(0)} E^P \big[ \psi(x + X_t) \big] = \cE^0(\psi(x + X_t)), \end{equation} and the additive structure in \eqref{eq: levy semigroup} gives access to the \emph{\(C_b\)--Feller property} of \( (T_t)_{t \in \bR_+} \), i.e., \( T_t(C_b(\bR; \bR)) \) \(\subset C_b(\bR; \bR)\) for all \(t \in \mathbb{R}_+\). However, in general, this property seems to be hard to verify, see \cite[Remark~4.43]{hol16}, \cite[Remark 3.4]{K19} and \cite[Remark 5.4]{K21} for comments. By virtue of Berge's maximum theorem, it is a natural idea to deduce regularity properties of the sublinear semigroup \((T_t)_{t \in \mathbb{R}_+}\) from the corresponding properties of the set-valued mapping \(x \mapsto \cR(x)\). The strategy to deduce certain regularity properties from related properties of a correspondence is not new. For instance, it has been used in the seminal paper \cite{nicole1987compactification} to obtain conditions for upper and lower semicontinuity of a value function in a relaxed framework for controlled diffusions. In our setting, we prove that \(x \mapsto \cR(x)\) is upper hemicontinuous and compact-valued in case \(b\) and \(a\) are continuous and of linear growth, and the set \(\Theta(x)\) is convex for every \(x \in \bR\). This result establishes the \emph{\(\usc_b\)--Feller property} of \( (T_t)_{t \in \bR_+} \), i.e., \( T_t( \usc_b(\bR; \bR)) \subset \usc_b(\bR; \bR) \) for all~\(t \in \mathbb{R}_+\). Lower hemicontinuity of the correspondence \(x \mapsto \cR(x)\) appears to be more difficult to establish. In particular, the example from \cite{SV} for the non-existence of a Feller selection from the set of solutions to a non-wellposed martingale problem shows that our conditions for the upper hemicontinuity of \(x \mapsto \mathcal{R} (x)\) are not sufficient for its lower hemicontinuity. There are indications in the literature that additional Lipschitz conditions on \(b\) and \(a\) might suffice for lower hemicontinuity. Indeed, such a result was established in \cite{nicole1987compactification} for the relaxed framework of controlled diffusions. After this paper was submitted, local Lipschitz conditions for lower hemicontinuity of a fully path-dependent nonlinear continuous semimartingale framework have been proved in an update of our paper~\cite{CN22}. In this paper we present a new approach to show Feller properties of \( (T_t)_{t \in \bR_+} \) by means of a \emph{Feller selection principle}. Let us detail our ideas. As the correspondence \( x \mapsto \cR (x) \) is compact-valued, for every upper semicontinuous function \( \psi \colon C(\bR_+; \bR) \to \bR \), and every \( x \in \bR \), there exists a measure \(P_x \) in the set of maximizers \( \cR^*(x) \) of~\eqref{eq: def semigroup}. Building on ideas of Krylov \cite{krylov1973selection} about Markovian selection, and leaning on the techniques from \cite{nicole1987compactification, hausmann86, SV}, we show that for every bounded and upper semicontinuous \( \psi \colon \bR \to \bR\) and every \(t > 0\), there exists a time inhomogeneous \emph{strong Markov selection} \(\{P_{(s, x)} \colon (s, x) \in \bR_+ \times \bR\}\) such that~\(P_{(0, x)} \in \cR (x)\) and \begin{equation*} T_t (\psi) (x) = E^{P_{(0, x)}} \big[ \psi (X_t) \big] . \end{equation*} Under the additional ellipticity assumption \(a > 0\), using some fundamental results of Stroock and Varadhan \cite{SV}, we prove that the strong Markov family \(\{P_{(t, x)} \colon (t, x) \in \bR_+ \times \bR\}\) is even a (time inhomogeneous) strong Feller family. In particular, this Feller selection principle shows that the semigroup \( (T_t)_{t \in \bR_+} \) has the \emph{strong \(\usc_b\)--Feller property}, i.e., \(T_t (\usc_b (\mathbb{R}; \mathbb{R})) \subset C_b (\mathbb{R}; \mathbb{R})\) for all \(t > 0\). Under a uniform ellipticity and boundedness assumption, it even follows that \((T_t)_{t \in \bR_+}\) has the {\em uniform strong Feller property} in the sense that \(T_t\) for \(t > 0\) maps bounded upper semicontinuous functions to bounded uniformly continuous functions. To the best of our knowledge, \emph{smoothing effects} of this specific form were not reported before in the context of nonlinear Markov processes. Related, but generally different, smoothing effects are known for viscosity solutions to parabolic Hamilton--Jacobi--Bellman (HJB) (or more general nonlinear) PDEs, see, e.g., \cite{Cra02,kry18, kry17} and the references therein. Under suitable conditions, in the HJB case a continuous terminal function leads to viscosity solutions of class \(C^{1 + \alpha}_{\on{loc}}\) (cf. \cite{Cra02} for details on this result and the notation). In our situation, plugging a possibly {\em discontinuous} function into a sublinear semigroup leads to a {\em continuous} function. We emphasise that our idea of first proving the existence of a strong Markov selection and then verifying its (strong) Feller property extends to higher dimensional situations. Let us also relate our result to the recent local Lipschitz conditions from \cite{CN22}. It is well-known in the literature on stochastic differential equations (SDEs) that (local) Lipschitz conditions imply pathwise uniqueness (even in the presence of random coefficients, see, e.g., \cite{jacod79}). Indeed, the proof from \cite{CN22} for lower hemicontinuity relies in a crucial manner on the strong existence and pathwise uniqueness of SDEs with random coefficients. We are not aware of ellipticity conditions for existence and uniqueness of SDEs with random coefficients. Therefore, we think that a new strategy as proposed in this paper is useful. Sublinear semigroups can also be constructed by analytic methods. A general approach leading to the so-called {\em Nisio semigroup} and a corresponding viscosity theory was recently established in the paper \cite{NR}. The framework from \cite{NR} allows for general state spaces, and provides conditions for Feller properties on spaces of weighted continuous functions. If the weight functions is vanishing at infinity, this includes the \(C_b\)--Feller property. Further, also in case the weight function is bounded from below and the Nisio semigroup is continuous from above (in a suitable sense), the \(C_b\)--Feller property can be derived. For the case of convolution semigroups, corresponding to the L\'evy framework, it has been shown in \cite{K21} that \((T_t)_{t \in \bR_+}\) coincides with the Nisio semigroup. This relation is based on the link between the semigroup and its generator. We investigate this for our framework. More precisely, the \(C_b\)--Feller property allows us to identify the so-called value function \([0, T] \times \mathbb{R} \ni (t, x) \mapsto v (t, x) := \cE^x(\psi(X_{T-t})) \) as a bounded viscosity solution to the nonlinear Kolmogorov type PDE \begin{equation} \label{eq: intro PDE} \begin{cases} \partial_t v (t, x) + G (t, x, v) = 0, & \text{for } (t, x) \in [0, T) \times \mathbb{R}, \\ v (T, x) = \psi (x), & \text{for } x \in \bR, \end{cases} \end{equation} where \begin{align*} G(t, x, \phi) := \sup \Big\{b (f, x) \partial_x \phi (t,x) + \tfrac{1}{2} a (f, x) \partial^2_x\phi (t,x) \colon f \in F \Big\}. \end{align*} Under suitable Lipschitz and boundedness conditions, we can use uniqueness results for the generator equation \eqref{eq: intro PDE} to show that the semigroup \((T_t)_{t \in \bR_+}\) from \eqref{eq: def semigroup} coincides with the Nisio semigroup from \cite{NR} on the space \(\uc_b (\bR; \bR)\) of bounded uniformly continuous functions from \(\bR\) into \(\bR\). In particular, this relation shows that \((T_t)_{t\in \bR_+}\) is a sublinear semigroup on \(\uc_b(\bR; \bR)\) under these conditions. As a referee has pointed out, under Lipschitz conditions, one can prove that the upper and lower envelopes of the value function are viscosity sub- and supersolutions, respectively. A (strong) comparison result (see, e.g., \cite{Pham}) then implies that the value function is already the unique bounded viscosity solution. In particular, its continuity is established en passant. We also detail this (mainly) analytic approach in this paper. \smallskip Let us now explain the idea of \emph{linearization} of a sublinear Markovian semigroup. In the presence of uncertainty, it is not possible to choose a single family \( \{ P_x \colon x \in \bR \} \) such that \begin{equation*} T_t (\psi) (x) = E^{P_x} \big[ \psi (X_t) \big] \end{equation*} for all functions \( \psi \) and \(t > 0\). However, under some structural assumptions, for example in the case without drift, we are able to construct a time homogeneous \emph{strong Feller family} \( \{ P^*_x \colon x \in \bR \} \) such that \begin{equation} \label{eq: intro convex} \cE^x(\psi(X_t)) = E^{P^*_x} \big[ \psi(X_t) \big] , \end{equation} for \emph{all} convex functions \( \psi \colon \bR \to \bR \) of polynomial growth, and all \( t \in \bR_+ \). We derive \eqref{eq: intro convex} by means of convex stochastic ordering, where we adapt techniques from \cite{hajek85}. Similarly, we present a linearization result for the class of increasing Borel functions \(\psi \colon C(\mathbb{R}_+;\mathbb{R}) \to \mathbb{R}\) in case of certain volatility. Finally, this linearization allows us to simplify the PDE \eqref{eq: intro PDE}. More precisely, we prove that for convex \(\psi \) of polynomial growth, the function \( (t, x) \mapsto \cE^x(\psi(X_{T - t})) \) is the unique viscosity solution (of polynomial growth) to the \emph{linear}~PDE \begin{equation} \label{eq: intro linear PDE} \begin{cases} \partial_t u (t, x) + \frac{1}{2}a^*(x) \partial_x^2 u(t,x) = 0, & \text{for } (t, x) \in [0, T) \times \mathbb{R}, \\ u (T, x) = \psi (x), & \text{for } x \in \bR, \end{cases} \end{equation} where \(a^*(x) := \sup\{ a(f,x) \colon f \in F \} \). Additionally, this linearization allows us to deduce that \( (t, x) \mapsto \cE^x(\psi(X_{T - t})) \) is the unique \emph{classical solution} of \eqref{eq: intro linear PDE}. \smallskip This paper is structured as follows: in Section \ref{subsec: setting} we introduce our setting. Section \ref{subsec: markovian semigroups} is devoted to the construction of sublinear Markovian semigroups and their Feller properties. Section \ref{subsec: linearization} shows how to linearize a sublinear Markovian semigroup, while Section \ref{subsec: viscosity} links the nonlinear expectation to the nonlinear Kolmogorov equation \eqref{eq: intro PDE}. Section \ref{sec: nisio} investigates the link to the Nisio semigroup. Section \ref{sec: regularity r} establishes the required regularity of the set-valued mapping \( \cR \). The proofs for our main results are given in the remaining sections. More precisely, the Feller properties of sublinear Markovian semigroups are shown in Section \ref{sec: proof main result}, the selection principles are proved in Section~\ref{sec: feller selection}, the linearization is proved in Section~\ref{sec: uniform selection} and the PDE connection is proved in Section~\ref{sec: viscosity property}. In Section~\ref{sec: pf nisio} the proofs for the relation to the Nisio semigroup are given. \section{Main Results} \subsection{The Setting}\label{subsec: setting} Define $\Omega$ to be the space of continuous functions \(\mathbb{R}_+ \to \mathbb{R}\) endowed with the local uniform topology. The canonical process on $\Omega$ is denoted by \(X\), i.e., \(X_t (\omega) = \omega (t)\) for \(\omega \in \Omega\) and \(t \in \mathbb{R}_+\). It is well-known that \(\mathcal{F} := \mathcal{B}(\Omega) = \sigma (X_t, t \in \bR_+)\). We define $\F := (\mathcal{F}_t)_{t \in \bR_+}$ as the canonical filtration generated by $X$, i.e., \(\mathcal{F}_t := \sigma (X_s, s \in [0, t])\) for \(t \in \mathbb{R}_+\). Notice that we do not make the filtration \(\F\) right-continuous. The set of probability measures on \((\Omega, \mathcal{F})\) is denoted by \(\mathfrak{P}(\Omega)\) and endowed with the usual topology of convergence in distribution. Let \(F\) be a Polish space and let \(b \colon F \times \mathbb{R} \to \mathbb{R}\) and \(a \colon F \times \mathbb{R} \to \mathbb{R}_+\) be Borel functions. We define the correspondence, i.e., the set-valued mapping, \(\Theta \colon \bR \twoheadrightarrow \mathbb{R} \times \mathbb{R}_+\) by \[ \Theta (x) := \big\{(b (f, x), a (f, x)) \colon f \in F \big\}. \] \begin{SA} \label{SA: compact} \(F\) is compact. \end{SA} \begin{SA} \label{SA: meas gr} \(\Theta\) has a measurable graph, i.e., the graph \[ \on{gr} \Theta = \big\{ (x, b, a) \in \bR \times \mathbb{R} \times \mathbb{R}_+ \colon (b, a) \in \Theta (x) \big\} \] is Borel. \end{SA} \begin{remark} By virtue of \cite[Lemma 2.8]{CN22}, if Standing Assumption \ref{SA: compact} is in force, Standing Assumption~\ref{SA: meas gr} holds once \(b\) and \(a\) are continuous in their first variables. \end{remark} We call a real-valued continuous process \(Y = (Y_t)_{t \geq 0}\) a (continuous) \emph{semimartingale after a time \(t^* \in \mathbb{R}_+\)} if the process \(Y_{\cdot + t^*} = (Y_{t + t^*})_{t \geq 0}\) is a semimartingale for its natural right-continuous filtration. Notice that it comes without loss of generality that we consider the right-continuous version of the filtration (see \cite[Proposition~2.2]{neufeld2014measurability}). The law of a semimartingale after \(t^*\) is said to be a \emph{semimartingale law after \(t^*\)} and the set of them is denoted by \(\fPs (t^*)\). Notice also that \(P \in \fPs(t^*)\) if and only if the coordinate process is a semimartingale after \(t^*\). For \(P \in \fPs (t^*)\) we denote the semimartingale characteristics of the shifted coordinate process \(X_{\cdot + t^*}\) by \((B^P_{\cdot + t^*}, C^P_{\cdot + t^*})\). Moreover, we set \[ \fPas (t^*) := \big\{ P \in \fPs (t^*) \colon P\text{-a.s. } (B^P_{\cdot + t^*}, C^P_{\cdot + t^*}) \ll \llambda \big\}, \quad \fPas := \fPas(0), \] where \(\llambda\) denotes the Lebesgue measure. For \( x \in \bR \), we define \[ \cR (x) := \big\{ P \in \fPas \colon P \circ X_0^{-1} = \delta_{x},\ (\llambda \otimes P)\text{-a.e. } (dB^{P} /d\llambda, dC^{P}/d\llambda) \in \Theta(X) \big\}. \] While the correspondence \( \cR \) is in the focus of interest for our study, it is convenient to introduce another correspondence \( \cC \). For \((t,\omega) \in \of 0, \infty\of \,:= \bR_+ \times \Omega\), we define \begin{align*} \cC(t,\omega) := \big\{ P \in \mathfrak{P}_{\text{sem}}^{\text{ac}}(t)\colon P(&X = \omega \text{ on } [0, t]) = 1, \\ &(\llambda \otimes P)\text{-a.e. } (dB^P_{\cdot + t} /d\llambda, dC^P_{\cdot + t}/d\llambda) \in \Theta (X_{\cdot + t}) \big\}. \end{align*} Note that \( \cC(0,x) = \cR(x) \) for every \( x \in \bR\). \begin{SA} \label{SA: non empty} \(\cC (t, \omega) \not = \emptyset\) for all \((t, \omega) \in \of 0, \infty\of\). \end{SA} \begin{remark} By virtue of \cite[Lemma 2.10]{CN22}, Standing Assumption \ref{SA: non empty} holds under continuity and linear growth conditions on \(b\) and \(a\). In particular, Standing Assumption \ref{SA: non empty} is implied by the Conditions \ref{cond: LG} and \ref{cond: continuity} below. \end{remark} \subsection{Sublinear Markovian Semigroups} \label{subsec: markovian semigroups} For each \( x \in \bR \), we define the sublinear operator \( \cE^x \) on the convex cone of upper semianalytic functions \(\psi \colon \Omega \to \bR\) by \( \cE^x(\psi) := \sup_{P \in \cR(x)} E^P[ \psi ] \). For every \( x \in \bR \), we have by construction that \( \cE^x(\psi(X_0)) = \psi(x) \) for every upper semianalytic function \(\psi \colon \bR \to \bR\). \begin{definition} Let \( \mathcal{H} \) be a convex cone of functions \( f \colon \bR \to \bR \) containing all constant functions. A family of sublinear operators \( T_t \colon \mathcal{H} \to \mathcal{H}, \ t \in \bR_+,\) is called a \emph{sublinear Markovian semigroup} on \( \mathcal{H} \) if it satisfies the following properties: \begin{enumerate} \item[\textup{(i)}] \( (T_t)_{t \in \bR_+} \) has the semigroup property, i.e., \( T_s T_t = T_{s+t} \) for all \(s, t \in \bR_+ \) and \( T_0 = \on{id} \), \item[\textup{(ii)}] \( T_t \) is monotone for each \( t \in \bR_+\), i.e., \( f, g \in \mathcal{H} \) with \( f \leq g \) implies \(T_t (f) \leq T_t (g) \), \item[\textup{(iii)}] \( T_t \) preserves constants for each \( t \in \bR_+\), i.e., \( T_t(c) = c \) for each \( c \in \bR \). \end{enumerate} \end{definition} The following proposition should be compared to \cite[Lemma 4.32]{hol16}. Note that the framework of \cite{ElKa15}, which is also used in our article, allows for more flexibility regarding initial values in \( \cR \) compared to \cite{hol16}. Indeed, as it relies on the results of \cite{NVH}, \cite{hol16} needs to introduce, for every \( x \in \bR\), the space \( \Omega_x := \{ \omega \in \Omega \colon w (0) =x\} \) to capture the initial value. In consequence, the sublinear expectation \( \cE^x \) constructed in \cite{hol16} is only defined on \( \Omega_x \), which requires more notational care. \vspace{1em} Denote, for \(t \in \mathbb{R}_+\), the shift operator \(\theta_t \colon \Omega \to \Omega\) by \(\theta_t (\omega) := \omega(\cdot + t)\) for all \(\omega \in \Omega\). \begin{proposition} \label{prop: markov property} For every upper semianalytic function \( \psi \colon \Omega \to \bR \), the equality \[ \cE^x( \psi \circ \theta_t) = \cE^x ( \cE^{X_t} (\psi)) \] holds for every \((t, x) \in \bR_+ \times \bR\). \end{proposition} \begin{proof} For every \( (t, \omega) \in \of 0, \infty \of \), we define \[ \mathcal{E}_t (\psi) (\omega) := \sup_{P \in \cC (t, \omega)} E^P \big[ \psi\big]. \] Now, we get from \cite[Corollary 7.3]{CN22} that \[ \cE_t(\psi \circ \theta_t)(\omega) = \sup_{P \in \cR(\omega(t))} E^{P}\big[\psi( \omega(t) + X - X_0) \big] = \sup_{P \in \cR (\omega(t))} E^P \big[ \psi\big]. \] Hence, \begin{align} \label{eq: equality markov shift} \cE_t(\psi \circ \theta_t)(\omega) = \cE^{\omega(t)}(\psi). \end{align} Finally, the dynamic programming principle (\cite[Theorem 3.1]{CN22}) yields \[ \cE^x(\psi \circ \theta_t ) = \cE^x( \cE_t( \psi \circ \theta_t)) = \cE^x( \cE^{X_t}(\psi)). \] The proof is complete. \end{proof} We point out that Proposition \ref{prop: markov property} confirms the intuition that the coordinate process is a nonlinear Markov process under the family \( \{\cE^x \colon x \in \bR\} \), as it implies the equality \[ \cE^x(\psi(X_{s+t})) = \cE^x( \cE^{X_t}( \psi (X_s))) \] for every upper semianalytic function \(\psi \colon \bR \to \mathbb{R}\), \(s,t \in \bR_+ \) and \(x \in \bR\). Using Proposition~\ref{prop: markov property}, the following proposition is a restatement of \cite[Remark 4.33]{hol16} for our framework. \begin{proposition} The family of operators \( (T_t)_{t \in \bR_+} \) given by \[ T_t ( \psi )(x) := \cE^x(\psi(X_t)), \quad (t, x) \in \bR_+ \times \bR,\] defines a sublinear Markovian semigroup on the set of bounded upper semianalytic functions. \end{proposition} \begin{remark} \label{rem: nonMarkov laws} It is worth pointing out that \(\cR(x)\) contains {\em non-}Markovian laws. This is already the case in the L\'evy situation where \(b (f, x) \equiv b (f)\) and \(a (f, x) \equiv a (f)\). To give a concrete example, consider \(F = [\underline{a}, \overline{a}]\) for \(\underline{a} < \overline{a}\), \(b = 0\) and \(a (f, x) = f\). Then, \(\{\cE^x \colon x \in \bR\}\) corresponds to a \(G\)-Brownian motion with \(G\)-function \(G (x) = (\overline{a} x^+ - \underline{a} x^-) / 2\). In this case, the set \(\cR (x)\) contains also laws of processes of the type \[ d Y_t = \sigma (t, Y) \, d W_t, \quad Y_0 = x, \] where \(\sigma \colon \of 0, \infty\of \, \to \mathbb{R}\) is an arbitrary predictable functional that is continuous on \(\of 0, \infty\of\) and such that \(\underline{a} \leq \sigma^2 \leq \overline{a}\).\footnote{The continuity and boundedness assumptions on \(\sigma\) entail the existence of \(Y\) by Skorokhod's existence theorem.} Such processes are non-Markovian in general. \end{remark} In the following, we investigate the semigroup property of \((T_t)_{t \in \mathbb{R}_+}\) on convex cones consisting of more regular functions. \begin{definition} We say that the sublinear Markovian semigroup \((T_t)_{t \in \mathbb{R}_+}\) has the \begin{enumerate} \item[\textup{(a)}] \emph{\(\usc_b\)--Feller property} if it is a sublinear Markovian semigroup on the space \(\usc_b(\mathbb{R}; \mathbb{R})\) of bounded upper semicontinuous functions from \(\mathbb{R}\) into \(\mathbb{R}\); \item[\textup{(b)}] \emph{\(C_b\)--Feller property} if it is a sublinear Markovian semigroup on the space \(C_b(\mathbb{R}; \mathbb{R})\) of bounded continuous functions from \(\mathbb{R}\) into \(\mathbb{R}\); \item[\textup{(c)}] \emph{\(\uc_b\)--Feller property} if it is a sublinear Markovian semigroup on the space \(\uc_b(\mathbb{R}; \mathbb{R})\) of bounded uniformly continuous\footnote{Of course, uniform continuity refers to the Euclidean metric.} functions from \(\mathbb{R}\) into \(\mathbb{R}\); \item[\textup{(d)}] \emph{\(C_0\)--Feller property} if it is a sublinear Markovian semigroup on the space \(C_0(\mathbb{R}; \mathbb{R})\) of continuous functions from \(\mathbb{R}\) into \(\mathbb{R}\) which are vanishing at infinity; \item[\textup{(e)}] \emph{strong \(\usc_b\)--Feller property} if \(T_t (\usc_b (\mathbb{R}; \mathbb{R})) \subset C_b (\mathbb{R}; \mathbb{R})\) for all \(t > 0\). \item[\textup{(f)}] \emph{uniform strong \(\usc_b\)--Feller property} if \(T_t (\usc_b (\mathbb{R}; \mathbb{R})) \subset \uc_b (\mathbb{R}; \mathbb{R})\) for all \(t > 0\). \end{enumerate} \end{definition} \begin{remark} \label{rem: schilling} As observed in \cite{schilling98}, in case of linear semigroups, the \(\usc_b\)--Feller property is equivalent to the \(C_b\)--Feller property. Indeed, this follows simply from the fact that \(\usc_b (\mathbb{R}; \mathbb{R}) = - \textit{LSC}_b (\mathbb{R}; \mathbb{R})\), where the latter denotes the space of bounded lower semicontinuous functions. \end{remark} To formulate our main results we need to introduce some conditions. \begin{condition}[Convexity] \label{cond: convexity} For every \(x \in \mathbb{R}\), the set \(\Theta(x)\) is convex. \end{condition} \begin{condition}[Linear Growth] \label{cond: LG} There exists a constant \(\C > 0\) such that \[ |b (f, x)|^2 + |a (f, x)| \leq \C\, ( 1 + |x|^2 ) \] for all \(f \in F\) and \(x \in \mathbb{R}\). \end{condition} \begin{condition}[Boundedness]\label{cond: bdd} \(\sup\, \{ | b (f, x) | + a (f, x) \colon (f, x) \in F \times \bR \} < \infty\). \end{condition} \begin{condition}[Continuity] \label{cond: continuity} \(b\) and \(a\) are continuous. \end{condition} \begin{condition}[Local Lipschitz Continuity] \label{cond: loc Lip} For every \(N > 0\), there exists a constant \(\C = \C (N) > 0\) such that \[ |b (f, x) - b(f, y)| + | \sqrt{a} (f, x) - \sqrt{a} (f, y)| \leq \C \, |x - y|, \] for all \(f \in F\) and \(x, y \in [- N, N]\). \end{condition} \begin{condition}[Ellipticity] \label{cond: ellipticity} \(a > 0\). \end{condition} \begin{condition}[Uniform Ellipticity] \label{cond: uniform ellipticity} \(\inf\, \{ a (f, x) \colon (f, x) \in F \times \bR\} > 0\). \end{condition} \begin{theorem} \label{thm: new very main} Suppose throughout that the Conditions \ref{cond: convexity}, \ref{cond: LG} and \ref{cond: continuity} hold. \begin{enumerate} \item[\textup{(i)}] The sublinear Markovian semigroup \((T_t)_{t \in \mathbb{R}_+}\) has the \(\usc_b\)--Feller property. \item[\textup{(ii)}] Assume that Condition~\ref{cond: loc Lip} holds. Then, \((T_t)_{t \in \bR_+}\) has the \(C_b\)--Feller property. \item[\textup{(iii)}] Assume that Condition~\ref{cond: ellipticity} holds. Then, \((T_t)_{t \in \mathbb{R}_+}\) has the \(C_0\) and the strong \(\usc_b\)--Feller properties. \item[\textup{(iv)}] Assume that the Conditions~\ref{cond: bdd} and \ref{cond: uniform ellipticity} hold. Then, \((T_t)_{t \in \bR_+}\) has the uniform strong \(\usc_b\)--Feller property. \end{enumerate} \end{theorem} \begin{remark} \label{rem: no uniqueness} Let us shortly discuss the relation of Theorem \ref{thm: new very main} to the setting where \(a (f, x) = a(x)\) and \(b(f, x) = b(x)\) or, equivalently, where \(F\) is a singleton. It is important to notice that even in this case the semigroup \((T_t)_{t \in \mathbb{R}_+}\) is \emph{not necessarily} linear. Indeed, the nonlinearity stems from the possibility that the martingale problem associated to the coefficients \(b\) and \(a\) might not be well-posed. Furthermore, the example in \cite[Exercise 12.4.2]{SV} shows that the Conditions \ref{cond: convexity}, \ref{cond: LG} and \ref{cond: continuity} are \emph{not} sufficient for the \(C_b\) nor for the \(C_0\)--Feller property of \((T_t)_{t \in \mathbb{R}_+}\). In particular, this means that neither the local Lipschitz nor the ellipticity assumption \(a > 0\) can be dropped in Theorem \ref{thm: new very main} (ii) and (iii). \end{remark} For a nonlinear framework \emph{with jumps}, the \(\usc_b\)--Feller property was proved in \cite[Proposition~4.36, Theorem 4.41, Lemma 4.42]{hol16} under uniform boundedness and global Lipschitz conditions.\footnote{In the presence of jumps, there appears to be a gap in the proof of \cite[Lemma~4.42]{hol16}, as the map \(\omega \mapsto \omega (t)\) is not upper semicontinuous on the Skorokhod space of \cadlag functions. Indeed, by linearity, upper semicontinuity would already imply continuity, which is not the case.} Theorem \ref{thm: new very main} shows that in our framework one can weaken these assumptions substantially. The \(C_b\)--Feller property is of fundamental importance for the relation of nonlinear processes and semigroups. In Section \ref{subsec: viscosity} it enables us to derive a new existence and uniqueness result and a stochastic representation for certain Kolmogorov type PDEs. To the best of our knowledge, the uniform strong and the strong \(\usc_b\)--Feller properties of nonlinear Markov processes have not been investigated before. We highlight that both provide a smoothing effect which seems to be new in the literature on nonlinear Markov processes. Let us now discuss the proofs of Theorem \ref{thm: new very main}. We start with (iii) and (iv), i.e., the \(C_0\) and (uniform) strong \(\usc_b\)--Feller properties, which we prove simultaneously. The first main tool for our proof is the following \emph{strong Markov selection principle} which we believe to be of interest in its own. Before we can state our result we need more notation and terminology. For a probability measure \(P\) on \((\Omega, \mathcal{F})\), a kernel \(\Omega \ni \omega \mapsto Q_\omega \in \mathfrak{P}(\Omega)\), and a finite stopping time \(\tau\), we define the pasting measure \[ (P \otimes_\tau Q) (A) := \iint \1_A (\omega \otimes_{\tau(\omega)} \omega') Q_\omega (d \omega') P(d \omega) \] for all \(A \in \cF\), where \[ \omega \otimes_t \omega' := \omega \1_{[ 0, t)} + (\omega (t) + \omega' - \omega' (t))\1_{[t, \infty)}. \] \begin{definition}[Time inhomogeneous Markov Family] A family \(\{P_{(s, x)} \colon (s, x) \in \bR_+ \times \bR\} \subset \mathfrak{P}(\Omega)\) is said to be a \emph{strong Markov family} if \((t, x) \mapsto P_{(t, x)}\) is Borel and the strong Markov property holds, i.e., for every \((s, x) \in \bR_+ \times \bR\) and every finite stopping time \(\tau \geq s\), \[ P_{(s, x)} (\,\cdot\, | \cF_\tau) (\omega) = \omega \otimes_{\tau (\omega)} P_{(\tau (\omega), \omega (\tau (\omega)))} \] for \(P_{(s, x)}\)-a.a. \(\omega \in \Omega\). \end{definition} Further, we introduce a correspondence \(\mathcal{K} \colon \bR_+ \times \bR \twoheadrightarrow \mathfrak{P}(\Omega)\) by \[ \cK (t, x) := \cC (t, \bx), \] where \(\bx \in \Omega\) is the constant function \(\bx (s) = x\) for all \(s \in \bR_+\). \begin{theorem}[Strong Markov Selection Principle] \label{theo: strong Markov selection} Suppose that the Conditions \ref{cond: convexity}, \ref{cond: LG} and \ref{cond: continuity} hold. For every \(\psi \in \usc_b(\bR; \mathbb{R})\) and every \(t > 0\), there exists a strong Markov family \(\{P_{(s, x)} \colon (s, x) \in \bR_+ \times \mathbb{R}\}\) such that, for all \((s, x)\in \bR_+ \times \mathbb{R}\), \(P_{(s, x)} \in \cK (s, x)\) and \[ E^{P_{ (s, x) }} \big[ \psi (X_t) \big] = \sup_{P \in \cK (s, x)} E^P \big[ \psi (X_t) \big]. \] In particular, for every \(x \in \bR\), \[ T_t (\psi) (x) = E^{P_{(0, x)}} \big[ \psi (X_t) \big]. \] \end{theorem} In general, the set \(\cK\) contains non-Markovian laws (see Remark~\ref{rem: nonMarkov laws}). In this regard, it is interesting that the supremum of \(P \mapsto E^P [ \psi (X_t) ]\) over \((s, x) \mapsto \cK (s, x)\) is attained at a strong Markov family. We emphasise that the strong Markov selection depends on the input function \(\psi\) and the time \(t > 0\). At first glance, it appears that abstract selection theorems only provide measurability in the initial value, as results on continuous selection, like Michael's theorem \cite[Theorem 3.2]{michael}, seem not applicable. Under Condition~\ref{cond: ellipticity}, the system carries enough randomness to conclude additional regularity properties. More precisely, if \(a\) is elliptic, we prove in Theorem \ref{theo: selection is Feller} below that every strong Markov selection is already a \(C_0\) and strong Feller selection in the sense explained now. \begin{definition} [Feller Properties of time inhomogeneous Markov Families] \quad \begin{enumerate} \item[\textup{(i)}] We say that a strong Markov family \(\{P_{ (s, x)} \colon (s, x) \in \bR_+ \times \bR\}\) has the \emph{\(C_b\)--Feller property} if, for every \(t > 0\) and every \(\phi \in C_b (\bR; \bR)\), the map \([0, t) \times \bR \ni (s, x) \mapsto E^{P_{(s, x)}} [ \phi (X_t)]\) is continuous. \item[\textup{(ii)}] We say that a strong Markov family \(\{P_{ (s, x)} \colon (s, x) \in \bR_+ \times \bR\}\) has the \emph{\(C_0\)--Feller property} if, for every \(0 \leq s < t\) and every continuous \(\phi \colon \bR \to \bR\) which is vanishing at infinity, the map \(x \mapsto E^{P_{(s, x)}} [ \phi (X_t)]\) is continuous and vanishing at infinity. \item[\textup{(iii)}] We say that a strong Markov family \(\{P_{ (s, x)} \colon (s, x) \in \bR_+ \times \bR\}\) has the \emph{strong Feller property} if, for every \(t > 0\) and every bounded Borel function \(\phi \colon \bR \to \bR\), the map \([0, t) \times \bR \ni (s, x) \mapsto E^{P_{(s, x)}}[ \phi (X_t) ]\) is continuous. \item[\textup{(iv)}] We say that a strong Markov family \(\{P_{ (s, x)} \colon (s, x) \in \bR_+ \times \bR\}\) has the \emph{uniform strong Feller property} if, for every \(t > 0\) and every bounded Borel function \(\phi \colon \bR \to \bR\), the map \([0, t - h] \times \bR \ni (s, x) \mapsto E^{P_{(s, x)}}[ \phi (X_t) ]\) is uniformly continuous for every \(h \in (0, t)\). \end{enumerate} \end{definition} Clearly, the uniform strong Feller property entails the strong Feller property, which itself implies the \(C_b\)--Feller property. \begin{theorem}[Feller Selection Principle] \label{theo: Feller selection} Suppose that the Conditions \ref{cond: convexity}, \ref{cond: LG}, \ref{cond: continuity} and \ref{cond: ellipticity} hold. For every \(\psi \in \usc_b(\bR; \mathbb{R})\) and every \(t > 0\), there exists a \(C_0\) and strong Feller family \(\{P_{(s, x)} \colon (s, x) \in \bR_+ \times \mathbb{R}\}\) such that, for all \((s, x)\in \bR_+ \times \mathbb{R}\), \(P_{(s, x)} \in \cK (s, x)\) and \[ E^{P_{ (s, x) }} \big[ \psi (X_t) \big] = \sup_{P \in \cK (s, x)} E^P \big[ \psi (X_t) \big]. \] In particular, for all \(x \in \bR\), \[ T_t (\psi) (x) = E^{P_{(0, x)}} \big[ \psi (X_t) \big]. \] Moreover, if the Conditions~\ref{cond: bdd} and \ref{cond: uniform ellipticity} hold in addition, then \(\{P_{(s, x)} \colon (s, x) \in \bR_+ \times \mathbb{R}\}\) is also a uniform strong Feller family. \end{theorem} The Feller selection principle from Theorem \ref{theo: Feller selection} immediately implies the \(C_0\) and (uniform) strong \(\usc_b\)--Feller properties of the sublinear Markovian semigroup \((T_t)_{t \in \mathbb{R}_+}\), i.e., it proves Theorem~\ref{thm: new very main} (iii) and (iv). \begin{remark} \label{rem: feller selection not possible} We highlight the following interesting observation: continuity and linear growth conditions on \(b\) and \(a\) suffice to get the \(\usc_b\)--Feller property in the nonlinear setting but these assumptions do not suffice to select a \( \usc_b\)--Feller family (equivalently, a \(C_b\)--Feller family by Remark~\ref{rem: schilling}), see \cite[Exercise~12.4.2]{SV} for a counterexample. In particular, the counterexample shows that Theorem~\ref{theo: Feller selection} \emph{fails} without the ellipticity assumption on~\(a\). \end{remark} It turns out that for some classes of input functions \(\psi\) it is possible to select (via an explicit construction) a (time homogeneous) strong Feller family which is \emph{uniform} in \(\psi\) and \(t\). Such a uniform selection principle can be viewed as a \emph{linearization} of the sublinear expectation~\(\mathcal{E}\). We discuss this topic in Section~\ref{subsec: linearization} below. Next, we comment on the proof of the \(\usc_b\)--Feller property. Recall that for this part of Theorem~\ref{thm: new very main} we do not impose local Lipschitz or ellipticity assumptions. In our proof we use general theory of correspondences which, from our point of view, provides a rather simple presentation of the argument. More precisely, since \[ T_t (\psi) (x) = \sup_{P \in \cR(x)} E^P \big[ \psi (X_t) \big], \] the \(\usc_b\)--Feller property follows from Berge's maximum theorem once the correspondence \(x \mapsto \cR (x)\) is upper hemicontinuous with compact values. We have the following general result: \begin{theorem} \label{thm: main r} Suppose that the Conditions \ref{cond: convexity}, \ref{cond: LG} and \ref{cond: continuity} hold. Then, the correspondence \( x \mapsto \cR (x) \) is upper hemicontinuous with compact values. \end{theorem} \begin{remark} \label{rem: lower hemicontinuity} It is natural to ask whether it is possible to use Theorem \ref{theo: Feller selection} to prove the continuity of the correspondence \(x \mapsto \cR(x)\) and conversely, whether one can establish continuity of \(x \mapsto \cR(x)\) to deduce the \(C_b\)--Feller property from Theorem \ref{theo: Feller selection}. By virtue of the generalized version of Berge's maximum theorem \cite[Theorem~1]{berge}, Theorem \ref{theo: Feller selection} implies that the correspondence \[ x \mapsto \big \{ y \in \bR \colon \exists P \in \cR(x) \text{ such that } y \leq E^P[\psi (X_t)] \big \}, \quad \psi \in C_b(\bR; \bR),\ t > 0, \] is lower hemicontinuous. This, however, seems not to give access to the lower hemicontinuity of \( \cR \). One particular example where continuity of \(\cR\) is rather straightforward to verify is the framework of nonlinear L{\'e}vy processes from \cite{neufeld2017nonlinear}, reduced to our path-continuous setting. That is, the case where the correspondence \( \Theta \) is independent of time and path, i.e., \[ \cR(x) = \big\{ P \in \fPas \colon P \circ X_0^{-1} = \delta_{x}, \ (\llambda \otimes P)\text{-a.e. } (dB^{P} /d\llambda, dC^{P}/d\llambda) \in \Theta \big\}, \] and the convex and compact set \( \Theta \subset \mathbb{R} \times \mathbb{R_+} \) represents the set of possible means and variances. We refer to \cite[Appendix A]{CN22} for details. It is, however, worth mentioning that compared to Theorem~\ref{theo: Feller selection}, continuity of \( \cR \) is not sufficient to deduce the \(C_0\) or the (uniform) strong \( \usc_b\)--Feller property. In general, lower hemicontinuity appears to be difficult to verify due to its relation to martingale problems with possibly non-regular coefficients, see \cite[Remark~4.43]{hol16}, \cite[Remark~3.4]{K19} and \cite[Remark~5.4]{K21} for comments in this direction. In an update of our paper \cite{CN22}, that appeared after the present paper was submitted, we proved lower hemicontinuity of a time- and path-dependent correspondence related to nonlinear continuous semimartingales. The proof from \cite{CN22} relies in a crucial manner on strong existence and pathwise uniqueness properties of SDEs with random coefficients that are implied by the local Lipschitz and linear growth conditions. As already mentioned in the introduction, we are not aware of such existence and uniqueness results under ellipticity assumptions. The following is a restatement of \cite[Theorem~4.7]{CN22} tailored to our Markovian situation. \begin{theorem} \label{theo: lower hemi} Suppose that the Conditions~\ref{cond: convexity}, \ref{cond: LG}, \ref{cond: continuity} and \ref{cond: loc Lip} hold. Then, the correspondence \( x \mapsto \cR (x) \) is lower hemicontinuous. \end{theorem} Notice that part (ii) from Theorem~\ref{thm: new very main} follows from part (i) of the same theorem, Theorem~\ref{theo: lower hemi} and Berge's maximum theorem (\cite[Lemma~17.29]{hitchi}). \end{remark} \subsection{Linearization} \label{subsec: linearization} We now present a uniform strong Feller selection principle for two types of nonlinear diffusions and certain classes of input functions. \begin{condition} [Continuity in Control] \label{cond: continuity in control} For every \(x \in \mathbb{R}\), \(f \mapsto a(f, x)\) is continuous. \end{condition} \begin{condition} [Local H\"older Continuity in Space] \label{cond: local holder} For every \(M > 0\) there exists a constant \(\C = \C(M) > 0\) such that \[ | \sqrt{a} (f, x) - \sqrt{a} (f, y) | \leq \C\, | x - y |^{1/2} \] for all \(f \in F\) and \(x,y \in [-M, M]\). \end{condition} Let \(\mathbb{G}_{cx}\) be the set of all convex functions \(\psi \colon \mathbb{R} \to \mathbb{R}\) such that \begin{equation} \label{eq: df G_cx} \exists \C = \C (\psi) > 0, \ m = m(\psi) \in \mathbb{N}\colon \qquad \forall x \in \mathbb{R} \quad |\psi (x)| \leq \C (1 + |x|^m). \end{equation} \begin{remark} Thanks to Lemma \ref{lem: maximal inequality} below, under Condition \ref{cond: LG}, for every \(P \in \cR(x), T \in \bR_+\) and \(\psi \in \mathbb{G}_{cx}\), it holds that \(\psi (X_T) \in L^1(P)\). \end{remark} Recall that a (time homogeneous) strong Markov family \(\{P_x \colon x \in \mathbb{R}\}\) is said to be \emph{strongly Feller} if \(x \mapsto E^{P_x} [ \psi (X_t) ]\) is continuous for every \(t > 0\) and every bounded Borel function \(\psi \colon \mathbb{R} \to \mathbb{R}\). \begin{theorem}[Uniform Strong Feller Selection Principle] \label{theo: UFSP} Suppose that the Conditions \ref{cond: LG}, \ref{cond: ellipticity}, \ref{cond: continuity in control} and \ref{cond: local holder} hold. Furthermore, suppose that \(b \equiv 0\). Then, there exists a strong Feller family \(\{P^*_x \colon x \in \mathbb{R}\}\) such that, for all \(x \in \mathbb{R}\), \(P^*_x \in \cR (x)\) and \begin{align}\label{eq: USFSP} \mathcal{E}^x ( \psi(X_T) ) = E^{P^*_x} \big[ \psi (X_T) \big] \end{align} for all times \(T \in \bR_+\) and \(\psi \in \mathbb{G}_{cx}\) as defined in \eqref{eq: df G_cx}. Moreover, for each \(x \in \mathbb{R}\), \(P^*_x\) is the unique law of a solution process to the SDE \[ d Y_t = \sqrt{a^*} (Y_t) d W_t, \quad Y_0 = x, \] where \(W\) is a one-dimensional Brownian motion and \(a^* (y) := \sup \{ a (f, y) \colon f \in F\}\) for \(y \in \mathbb{R}\). \end{theorem} The key idea behind Theorem \ref{theo: UFSP} is a stochastic order property. Namely, as is intuitively clear, ordered diffusion coefficients imply a convex stochastic order for one-dimensional distributions. Such a result traces back to the paper \cite{hajek85} whose ideas we also adapt in the proof of Theorem~\ref{theo: UFSP}. Having said all this, it is not hard to believe that a similar result can be proved for the class of continuous increasing functions and nonlinear diffusions with volatility certainty. We think that this application is also of independent interest and therefore we give a precise statement. \begin{condition} [Continuity of Drift] \label{cond: continuity drift} \(b\) is continuous. \end{condition} \begin{condition} [Certainty, Ellipticity and H\"older Continuity of Volatility] \label{cond: holder} There exists a \(1/2\)-H\"older continuous function \(a^* \colon \mathbb{R} \to (0, \infty)\) such that \(a (f, x) = a^* (x)\) for all \(f \in F\) and \(x \in \mathbb{R}\). \end{condition} \begin{theorem}[Uniform Strong Feller Selection Principle] \label{theo: UFSP 2} Suppose that the Conditions \ref{cond: LG}, \ref{cond: continuity drift} and \ref{cond: holder} hold. Then, there exists a strong Feller family \(\{P^*_x \colon x \in \mathbb{R}\}\) such that, for all \(x \in \mathbb{R}\), \(P^*_x \in \cR (x)\) and \begin{align}\label{eq: USFSP 2} \mathcal{E}^x ( \psi ) = E^{P^*_x} \big[ \psi \big] \end{align} for all bounded increasing (for the pointwise order) Borel functions \(\psi \colon \Omega \to \mathbb{R}\). Moreover, for each \(x \in \mathbb{R}\), \(P^*_x\) is the unique law of a solution process to the SDE \[ d Y_t = b^* (Y_t) dt + \sqrt{a^*} (Y_t) d W_t, \quad Y_0 = x, \] where \(W\) is a one-dimensional Brownian motion and \(b^* (y) := \sup \{ b (f, y) \colon f \in F\}\) for \(y \in \mathbb{R}\). \end{theorem} Similar to the linear case, nonlinear Markovian semigroups have a close relation to solutions of certain PDEs comparable to Kolmogorov's equation. In the following section we make this relation more precise. In particular, we discuss the connection of the sublinear Markovian semigroup to its \emph{pointwise generator}. \subsection{A nonlinear Kolmogorov Equation} \label{subsec: viscosity} Let us start with a formal introduction to the class of nonlinear PDEs under consideration. We fix a finite time horizon \(T > 0\). For \((t, x,\phi) \in \mathbb{R}_+ \times \mathbb{R} \times C^{1, 2}(\mathbb{R}_+ \times \mathbb{R}; \bR)\), we define \begin{align*} G(t, x, \phi) := \sup \Big\{b (f, x) \partial_x \phi (t,x) + \tfrac{1}{2} a (f, x) \partial^2_x\phi (t,x) \colon f \in F \Big\}. \end{align*} In our paper \cite{CN22} we proved, under suitable assumptions on \( b\) and \(a\), that the value function \[ v (t, x) := \sup_{P \in \cR (x)} E^P \big[ \psi (X_{T - t})\big], \quad (t, x) \in [0, T] \times \mathbb{R}, \] is a \emph{weak-sense viscosity solution} to the nonlinear Kolmogorov type partial differential equation \begin{equation} \label{eq: PDE} \begin{cases} \partial_t v (t, x) + G (t, x, v) = 0, & \text{for } (t, x) \in [0, T) \times \mathbb{R}, \\ v (T, x) = \psi (x), & \text{for } x \in \bR, \end{cases} \end{equation} where \(\psi \in C_b(\mathbb{R}; \mathbb{R})\). Recall that a function \(u \colon [0, T] \times \bR \to \mathbb{R}\) is said to be a \emph{weak sense viscosity subsolution} to \eqref{eq: PDE} if the following two properties hold: \begin{enumerate} \item[\textup{(a)}] \(u(T, \cdot) \leq \psi\); \item[\textup{(b)}] \( \partial_t \phi (t, x) + G (t, x, \phi) \geq 0 \) for all \(\phi \in C^{1, 2}([0, T] \times \bR; \bR)\) such that \(\phi \geq u\) and \(\phi (t, x) = u(t, x)\) for some \((t, x) \in [0, T) \times \bR \). \end{enumerate} A \emph{weak sense viscosity supersolution} is obtained by reversing the inequalities. Further, \(u\) is called \emph{weak sense viscosity solution} if it is a weak sense viscosity sub- and supersolution. Furthermore, \( u \) is called \emph{viscosity subsolution} if it is both, a weak sense viscosity subsolution, and upper semicontinuous. The notions of viscosity supersolution and viscosity solution are defined accordingly. Using Theorem~\ref{thm: new very main}, we are able to present conditions for \(v\) to be a viscosity solution (with regularity). We emphasise that we do not require Lipschitz regularity of the coefficients in all cases. \begin{theorem} \label{thm: new viscosity no unique} Suppose that the Conditions \ref{cond: convexity}, \ref{cond: LG} and \ref{cond: continuity} hold. Further, assume either Condition~\ref{cond: loc Lip} or \ref{cond: ellipticity}. Then, the value function \(v\) is a viscosity solution to the nonlinear PDE \eqref{eq: PDE}. \end{theorem} When the local Lipschitz condition is strengthened to a global one, classical comparison results (see, e.g., \cite{hol16,nisio,Pham}) imply that the value function is the unique bounded viscosity solution to the PDE~\eqref{eq: PDE}. As a referee has pointed out, such a uniqueness result can also be proved by viscosity methods, i.e., without probabilistic arguments for the continuity of the value function. In particular, the continuity of the value function can be established en passant. To detail the strategy, denote the upper and lower envelops of \(v\) by \(v^*\) and \(v_*\), i.e., we set \[ v^* (t, x) := \limsup_{ (s, y) \to (t, x) } v (s, y), \qquad v_* (t, x) := \liminf_{ (s, y) \to (t, x) } v (s, y). \] Notice that \(v^*\) is upper semicontinuous while \(v_*\) is lower semicontinuous. The key observation is provided by the following lemma. \begin{condition}[Lipschitz Continuity in Space] \label{cond: Lipschitz continuity} There exists a constant \(\C > 0\) such that \[ |b(f, x) - b(f, y)| + |\sqrt{a} (f, x) - \sqrt{a} (f, y)| \leq \C\, |x - y|, \] for all \(f \in F\) and \(x, y \in \mathbb{R}\). \end{condition} \begin{lemma} \label{lem: en upper lower} Suppose that the Conditions~\ref{cond: LG}, \ref{cond: continuity} and \ref{cond: Lipschitz continuity} hold. Then, \(v^*\) is a viscosity subsolution and \(v_*\) is a viscosity supersolution to the nonlinear PDE \eqref{eq: PDE}. \end{lemma} Under the hypothesis of Lemma~\ref{lem: en upper lower}, a classical (strong) comparison result as given by \cite[Theorem~4.4.5]{Pham} yields that \(v^* \leq v_*\), which entails that \(v = v^* = v_*. \) It follows that \(v\) is a viscosity solution to~\eqref{eq: PDE}, in fact the unique bounded viscosity solution (again by the comparison result). In summary, we proved the following uniqueness result. \begin{theorem} \label{theo: viscosity with ell} Suppose that the Conditions \ref{cond: LG}, \ref{cond: continuity} and \ref{cond: Lipschitz continuity} hold. Then, the value function \(v\) is the unique bounded viscosity solution to the nonlinear PDE \eqref{eq: PDE}. \end{theorem} Notice that Theorem~\ref{theo: viscosity with ell} does not require the convexity Condition~\ref{cond: convexity}. \begin{remark} For a sublinear Markovian semigroup \( (T_t)_{t \in \bR_+} \) on the convex cone \( \cH \), its \emph{pointwise infinitesimal generator} \( A \colon \cD(A) \to \cH \) is defined by \begin{align*} A(\phi)(x) & := \lim_{t \to 0} \frac{T_t(\phi)(x) - \phi(x)}{t}, \quad x \in \bR, \ \phi \in \cD(A), \\ \cD(A) &:= \Big\{ \phi \in \cH \colon \exists g \in \cH \text{ such that } \lim_{t \to 0} \frac{T_t(\phi)(x) - \phi(x)}{t} = g(x) \ \ \forall x \in \bR \Big\}. \end{align*} For the sublinear Markovian semigroup \( (T_t)_{t \in \bR_+} \) associated to a nonlinear diffusion, following the proof of \cite[Lemmata~7.9, 7.12]{CN22} shows that, under Conditions \ref{cond: convexity}, \ref{cond: LG} and \ref{cond: continuity}, the inclusion \( C^2_b(\bR; \bR) \subset \cD(A) \) holds with \[ A (\phi) (x) = \sup \Big\{b (f, x) \phi' (x) + \tfrac{1}{2} a (f, x) \phi'' (x) \colon f \in F \Big\}, \quad x \in \bR, \] for \( \phi \in C^2_b(\bR; \bR) \). Hence, the Theorems \ref{thm: new viscosity no unique} and \ref{theo: viscosity with ell} link the sublinear semigroup \((T_t)_{t \in \bR_+}\) to its (pointwise) generator. \end{remark} In the following, we show that for convex input functions of polynomial growth the value function solves a \emph{linear} PDE in a unique manner. More precisely, we prove that for \(\psi \in \mathbb{G}_{cx}\) the value function \(v\) is the unique viscosity and classical solution to the linear PDE \begin{equation} \label{eq: linear PDE} \begin{cases} \partial_t u (t, x) + \frac{1}{2}a^*(x) \partial_x^2 u(t,x) = 0, & \text{for } (t, x) \in [0, T) \times \mathbb{R}, \\ u (T, x) = \psi (x), & \text{for } x \in \bR, \end{cases} \end{equation} where \(a^*(x) := \sup\{ a(f,x) \colon f \in F \} \). \begin{condition}[Local Lipschitz Continuity in Space] \label{cond: local Lipschitz continuity} For every \(M > 0\) there exists a constant \(\C = \C(M) > 0\) such that \[ |\sqrt{a} (f, x) - \sqrt{a} (f, y)| \leq \C |x - y|, \] for all \(f \in F\) and \(x, y \in [- M, M]\). \end{condition} We say that a function \(g \colon [0, T] \times \mathbb{R} \to \mathbb{R}\) is of {\em \(m\)-polynomial growth} if there exists a constant \(\C > 0\) such that \(|g (t, x)| \leq \C (1 + |x|^m)\) for all \((t, x) \in [0, T] \times \mathbb{R}\). \begin{corollary} \label{coro: uni con cx} Suppose that the Conditions \ref{cond: LG}, \ref{cond: ellipticity}, \ref{cond: continuity in control} and \ref{cond: local Lipschitz continuity} hold. Furthermore, suppose that \(b \equiv 0\). If \( \psi \in \mathbb{G}_{cx} \) is of \(m\)-polynomial growth, then the value function \(v\) is the unique viscosity and the unique classical solution of \(m\)-polynomial growth to the linear PDE \eqref{eq: linear PDE}. \end{corollary} \begin{proof} Let \( \psi \in \mathbb{G}_{cx}\) be of \(m\)-polynomial growth. Notice that \(\sqrt{a^*}\) is locally Lipschitz continuous and of linear growth thanks to the Conditions \ref{cond: LG} and \ref{cond: local Lipschitz continuity}. For \(x \in \mathbb{R}\), denote by \(P^*_x\) the unique law of a solution process to the SDE \[ d Y_t = \sqrt{a^*} (Y_t) d W_t, \quad Y_0 = x, \] where \(W\) is a one-dimensional Brownian motion. Of course, the existence and uniqueness of \(P^*_x\) is classical (see, e.g., Chapter 5 in \cite{KaraShre}). As the Conditions \ref{cond: LG} and \ref{cond: local Lipschitz continuity} imply Condition \ref{cond: local holder}, Theorem \ref{theo: UFSP} implies \[ v(t,x) = E^{P^*_x} \big[ \psi(X_{T-t}) \big], \] for every \( (t,x) \in [0,T] \times \bR \). Thanks to this observation, the viscosity part of the corollary follows from \cite[Theorem 1]{FK}. As \(v\) is continuous by the previous considerations, it further follows from \cite[Theorem~2.7]{JaTy06} that \(v\) is also a classical solution to \eqref{eq: linear PDE} and finally, the uniqueness among classical solutions of \(m\)-polynomical growth follows from \cite[Corollary 6.4.4]{friedman75}. The proof is complete. \end{proof} \subsection{Relation to Nisio semigroups and the \(\uc_b\)--Feller property} \label{sec: nisio} Another approach to sublinear semigroups is discussed in the recent paper \cite{NR}. There, the authors start with a family \((S^\lambda)_{\lambda \in \Lambda}\) of linear semigroups on the space \(\uc_\kappa(\bR;\bR)\) of weighted uniformly continuous functions, where the weight function \(\kappa \colon \bR \to (0, \infty)\) is assumed to be bounded and continuous.\footnote{The paper \cite{NR} allows for more general state spaces, but for the sake of comparison we focus on the one-dimensional case.} They present general conditions for the {\em upper semigroup envelope} \((\mathscr{S}_t)_{t \in \bR_+}\) (sometimes also called {\em Nisio semigroup}) of the family \((S^\lambda)_{\lambda \in \Lambda}\) to be a sublinear semigroup on \(\uc_\kappa(\bR;\bR)\). Furthermore, they establish a viscosity theory for their framework. Using this theory, certain Nisio semigroups can be related to the sublinear semigroup \((T_t)_{t \in \bR_+}\) that is studied in this paper. In this section, choosing \(\kappa =1\), we relate the Nisio semigroup from \cite{NR} to nonlinear diffusions with bounded Lipschitz continuous coefficients. The connection to the Nisio semigroup also provides conditions for the \(\uc_b\)--Feller property of the semigroup \((T_t)_{t \in \bR_+}\) that are different from those in Theorem~\ref{thm: new very main}. The \(\uc_b\)--Feller property was already observed in \cite{denk2020semigroup,K19} for nonlinear L\'evy processes. The space of bounded Lipschitz continuous functions from \(\bR\) into \(\bR\) is denoted by \(\lip_b (\bR; \bR)\) and the corresponding Lipschitz norm is denoted by~\(\|\cdot\|_{\lip}\). \begin{remark} In case the weight function \(\kappa\) vanishes at infinity, the space \(\uc_\kappa(\bR;\bR)\) has the explicit description \[ \uc_\kappa(\bR;\bR) = \big\{ u \in C(\bR; \bR) \colon u \kappa \in C_0(\bR;\bR) \big\}, \] cf. \cite[Remark~5.3 (b)]{NR}. Notice that in this case \(C_b(\bR; \bR) \subset \uc_\kappa(\bR;\bR)\) and consequently, the Nisio semigroup \((\mathscr{S}_t)_{t \in \bR_+}\) has the \(C_b\)--Feller property. In particular, \cite[Section 6.3]{NR} provides examples for nonlinear Markov processes whose associated Nisio semigroups entail the \(C_b\)--Feller property that go beyond the L\'evy case that was discussed in \cite{denk2020semigroup,K19}. If the weight function is bounded from below, then \(\uc_\kappa(\bR;\bR)\) coincides with the space \(\uc_b(\bR;\bR)\) of bounded uniformly continuous functions. Extension of the semigroup to \(C_b(\bR;\bR)\) is then ensured by continuity from above on \(\lip_b(\bR;\bR)\), cf. \cite[Remark~5.3 (c)]{NR}. Under boundedness assumptions, this is verified for the class of nonlinear L\'evy processes in \cite[Example~7.2]{NR}, see also \cite[Proposition~2.8]{denk2020semigroup} and \cite[Proposition~4.10]{K21}. Throughout the paper \cite{NR}, the following assumption is imposed on the family of semigroups \((S^\lambda)_{\lambda \in \Lambda}\): \begin{align} \label{eq: A2 from NR} \exists\, \alpha, \beta \in \bR \colon \qquad \|S^\lambda_t (u)\|_\kappa \leq e^{\alpha t} \|u\|_\kappa, \qquad \|S^\lambda_t (u)\|_{\lip} \leq e^{\beta t} \|u\|_{\lip} \end{align} for all \(u \in \lip_b (\bR; \bR)\), \(\lambda \in \Lambda\) and \(t \in \bR_+\). This allows to propagate the \(\uc_\kappa\)--Feller Property of \(S^\lambda\), \(\lambda \in \Lambda\), to the Nisio semigroup \((\mathscr{S}_t)_{t \in \bR_+}\). Example \ref{ex: A2 fails} below shows that the second part of \eqref{eq: A2 from NR} fails for semigroups related to SDEs under mere continuity and linear growth assumptions on the drift and volatility coefficients. Below, we verify \eqref{eq: A2 from NR} for a family of semigroups related to SDEs under Lipschitz and H\"older conditions. It is worth mentioning that the paper \cite{NR} establishes a stochastic representation for Nisio semigroups that are continuous from above on \(\lip_b(\bR;\bR)\). To the best of our knowledge, it is not known in general that this representation coincides with \((T_t)_{t \in \bR_+}\). For the L\'evy case, i.e., sublinear Markovian convolution semigroups, this was shown in \cite[Theorem~6.4]{K21} by identifying the associated generators, and uniqueness of the corresponding evolution equation in the viscosity sense. Using this approach, we verify below that \((T_t)_{t \in \bR_+}\) agrees with the Nisio semigroup on \(\uc_b(\bR;\bR)\) under Lipschitz and boundedness conditions. In particular, this gives access to the \(\uc_b\)--Feller property of \((T_t)_{t \in \bR_+}\). \end{remark} \smallskip In order to define the Nisio semigroup, we will impose the following condition. \begin{condition} \label{cond: fix f cond} For every \(f \in F\), the map \(x \mapsto b(f, x)\) is of linear growth, and the map \(x \mapsto \sqrt{a} (f, x)\) is of linear growth and locally H\"older continuous with exponent \(1 / 2\). Furthermore, there exists a constant \(\beta > 0\) such that \[ | b(f, x) - b (f, y) | \leq \beta\, | x- y| \] for all \(f \in F\) and \(x, y \in \bR\). \end{condition} Let \(\mathbb{B} = (\Sigma, \mathcal{A}, (\mathcal{A}_t)_{t \in \bR_+}, P)\) be a filtered probability space that supports a one-dimensional standard Brownian motion \(W\). In case Condition~\ref{cond: fix f cond} holds, for every \((f, x) \in F \times \bR\), there exists a continuous adapted process \(Y^{f, x}\) on the stochastic basis \(\mathbb{B}\) with dynamics \[ d Y_t^{f, x} = b (f, Y^{f, x}_t) dt + \sqrt{a} (f, Y^{f, x}_t) d W_t, \quad Y^{f, x}_0 = x, \] cf. \cite[Chapter~5]{KaraShre} or \cite[Chapter~IX]{RY}. In particular, martingale problem arguments (see, e.g., \cite[Theorem~5.4.20, Remark~5.4.21]{KaraShre}) show that each \(\{P \circ (Y^{f, x})^{-1} \colon x \in \bR\}\) is a strong Markov family. Hence, the operators \[ S^f_t (u) (x) := E^P \big[ u (Y^{f, x}_t) \big], \quad u \in \uc_b (\bR; \bR), \] satisfy the semigroup property \(S^f_{t + s} = S^f_t S^f_s\) for \(s, t \in \bR_+\). In fact, as the following lemma shows, each \((S^f_t)_{t \in \mathbb{R}_+}\) is a linear semigroup on \(\uc_b (\bR; \bR)\), whose (pointwise) generator \(A^f\) satisfies \[ A^f (u)(x) = b(f,x) u'(x) + \tfrac{1}{2} a(f,x) u''(x), \quad u \in C^2_b(\bR; \bR). \] \begin{lemma} \label{lem: Sf semigroup} Suppose that Condition~\ref{cond: fix f cond} holds. Then, for every \(f \in F\), the family \((S^f_t)_{t \in \mathbb{R}_+}\) is a linear semigroup on \(\uc_b (\bR; \bR)\) (that is also monotone and continuous from below in the sense of \cite[Definition~1.1]{NR}). \end{lemma} Following \cite{NR}, we define the nonlinear operator \[ \mathcal{J}_t := \sup_{f \in F} S^f_t, \quad t \in \bR_+. \] Let \(\Pi_t\) be the set of finite partitions \(0 = t_0 < \dots < t_m = t\) of the interval \([0, t]\). For a partition \(\pi = \{t_0, \dots, t_m\}\), we set \[ \mathcal{J}_\pi := \mathcal{J}_{t_1 - t_0} \cdots \mathcal{J}_{t_m - t_{m - 1}}. \] Finally, we define \[ \mathscr{S}_t := \sup_{\pi \in \Pi_t} \mathcal{J}_{\pi}, \quad t \in \bR_+. \] The family \((\mathscr{S}_t)_{t \in \bR_+}\) is called the {\em Nisio semigroup} associated to \(\{ (S^f_t)_{t \in \bR_+} \colon f \in F\}\). The next result shows that \((\mathscr{S}_t)_{t \in \bR_+}\) deserves to be called ``semigroup''. \begin{proposition} \label{prop: first NR} Suppose that Condition~\ref{cond: fix f cond} holds. \begin{enumerate} \item[\textup{(i)}] \((\mathscr{S}_t)_{t \in \bR_+}\) is a sublinear Markovian semigroup on \(\uc_b (\bR; \bR)\) that is continuous from below, i.e., for every \(t \in \bR_+\) and any sequence \((u^n)_{n = 0}^\infty \subset \uc_b (\bR; \bR)\) with \(u^n \nearrow u^0\) pointwise it holds that \(\mathscr{S}_t (u^n) \nearrow \mathscr{S}_t (u^0)\) pointwise as \(n \to \infty\). \end{enumerate} Assume in addition that Condition~\ref{cond: bdd} holds. \begin{enumerate} \item[\textup{(ii)}] \((\mathscr{S}_t)_{t \in \bR_+}\) is strongly continuous, i.e., \(t \mapsto \mathscr{S}_t (u)\) is continuous from \(\bR_+\) into \(\uc_b (\bR; \bR)\) for every \(u \in \uc_b (\bR; \bR)\). \item[\textup{(iii)}] \((\mathscr{S}_t)_{t \in \bR_+}\) is continuous from above on \(\lip_b (\bR; \bR)\), i.e., for every \(t \in \bR_+\) and any sequence \((u^n)_{n = 1}^\infty \subset \lip_b (\bR; \bR)\) with \(u^n \searrow 0\) pointwise it holds that \(\mathscr{S}_t (u^n) \searrow 0\) pointwise as \(n \to \infty\). \end{enumerate} \end{proposition} Continuity from below and above of the semigroup \((\mathscr{S}_t)_{t \in \bR_+}\) allows us to extend the operators \(\mathscr{S}_t\) to \(C_b(\bR;\bR)\). We record this in the following proposition. \begin{proposition}\label{prop: extension} Suppose that the Conditions~\ref{cond: bdd} and \ref{cond: fix f cond} hold. There exists a unique sublinear Markovian semigroup \((\widehat{\mathscr{S}}_t)_{t \in \bR_+}\) on \(C_b(\bR;\bR)\) that is continuous from above and such that \(\widehat{\mathscr{S}}_t =\mathscr{S}_t\) on \(\uc_b(\bR; \bR)\). \end{proposition} Recall that a standing assumption (when the growth function \(\kappa\) is taken to be constantly one, as we do here) in the paper \cite{NR} is the following: there are constants \(\alpha, \beta \in \bR\) such that \begin{align*} \|S^f_t (u)\|_\infty \leq e^{\alpha t} \|u\|_\infty, \qquad \|S^f_t (u)\|_{\lip} \leq e^{\beta t} \|u\|_{\lip} \end{align*} for all \(u \in \lip_b (\bR; \bR)\), \(f \in F\) and \(t \in \bR_+\). In the proof of Proposition~\ref{prop: first NR}, we show that this assumption holds under Condition~\ref{cond: fix f cond}. The following example shows that the second estimate fails under mere continuity and linear growth conditions on the coefficients. \begin{example} \label{ex: A2 fails} Let \(B\) be a one-dimensional standard Brownian motion starting in zero. The semigroup \[ S_t (u) (x) := E \big[ u (Y^x_t) \big], \quad Y^x := (B + x^{1/3})^3, \] does not satisfy \(\|S_t (u)\|_{\lip} \leq e^{\beta t}\|u\|_{\lip}\) for all \(u \in \lip_b(\bR; \bR)\) and \(t \in \bR_+\). Indeed, this follows\footnote{The argument is indirect: If \(\|S_1 (u)\|_{\lip} \leq e^{\beta} \|u\|_{\lip}\) holds for all \(u \in \lip_b(\bR; \bR)\), then the same inequality must also hold for \(u = \on{id}\).} from the observation that \[ S_1 ( \on{id})(x) = E \big[ (B_1 + x^{1/3})^3 \big] = x + 3 x^{1/3}. \] Furthermore, It\^o's formula yields that \[ d Y^x_t = 3 (Y^x_t)^{1/3} dt + 3 (Y^x_t)^{2/3} d B_t, \quad Y^x_0 = x, \] which shows that the generator of \((S_t)_{t \in \bR_+}\) satisfies \[ A (u) (x) = 3 x^{1/3} u' (x) + \tfrac{9}{2} x^{4/3} u'' (x), \quad u \in C^2_b (\bR; \bR). \] It is interesting to note that the class \(C^2_b (\bR; \bR)\) does not suffice to characterize the generator uniquely (cf. \cite[Exercise~5.2.17]{KaraShre}). \end{example} By Theorem~\ref{thm: new viscosity no unique} and results from \cite[Section~4]{NR}, both the Nisio semigroup \((\mathscr{S}_t)_{t \in \bR_+}\) and our semigroup \((T_t)_{t \in \bR_+}\) can be related to the same \emph{generator equation} \begin{align} \label{eq: real generator eq} \begin{cases} \partial_t u (t, x) = \sup_{f \in F} A^f(u) (t, x) = G (t, x, u), & \text{for } (t, x) \in [0, T) \times \bR,\\ u (T, x) = \psi (x), & \text{for } x \in \mathbb{R}. \end{cases} \end{align} Under an additional global Lipschitz condition that ensures uniqueness for \eqref{eq: real generator eq}, see Theorem~\ref{theo: viscosity with ell} above, we can prove that \((\mathscr{S}_t)_{t \in \bR_+} = (T_t)_{t \in \bR_+}\). \begin{theorem} \label{thm: nisio} Suppose that the Conditions~\ref{cond: bdd}, \ref{cond: continuity} and \ref{cond: Lipschitz continuity} hold. Then, \((\mathscr{S}_t)_{t \in \bR_+} = (T_t)_{t \in \bR_+}\) on \(\uc_b (\bR; \bR)\). In particular, \((T_t)_{t \in \bR_+}\) has the \(\uc_b\)--Feller property. \end{theorem} The semigroup \((T_t)_{t \in \bR_+}\) is continuous from above on \(C_b(\bR; \bR)\) by \cite[Proposition~3.5]{denk2018} and Proposition~\ref{prop: compactness} below. Hence, combining Theorem \ref{thm: nisio} with Theorem~\ref{theo: viscosity with ell} and Proposition~\ref{prop: extension}, we obtain the following corollary. \begin{corollary} Suppose that the Conditions~\ref{cond: bdd}, \ref{cond: continuity} and \ref{cond: Lipschitz continuity} hold. Then, \((T_t)_{t \in \bR_+}\) is the unique extension of \((\mathscr{S}_t)_{t \in \bR_+}\) to \(C_b(\bR;\bR)\) that is continuous from above. \end{corollary} When arguing based on the generator equation, it might be difficult to relate our framework to the one from \cite{NR} without suitable regularity conditions on the coefficients \(b\) and \(a\). In the examples from \cite[Section~6.3]{NR}, \(b\) and \(a\) satisfy global Lipschitz conditions (comparable to Condition~\ref{cond: Lipschitz continuity}). \section{The Regularity of \( \cR \)} \label{sec: regularity r} In this section we prove that \(\cR\) is upper hemicontinuous with compact values. \subsection{Some Preparations} We start with a few properties of the correspondence \(\Theta\). \begin{lemma} \label{lem: continuity theta} Suppose that Condition \ref{cond: continuity} holds. Then, the correspondence \(x \mapsto \Theta (x)\) is compact-valued and continuous. \end{lemma} The previous lemma is a direct consequence of the following general observation. \begin{lemma} \label{lem: continuity abstract} Let \(F, E \) and \( D \) be topological spaces. If \( g \colon F \times E \to D \) is continuous, and \(F \) is compact, then the correspondence \( \varphi \colon E \twoheadrightarrow D \) defined by \( \varphi(x) := g(F,x) \) is compact-valued and continuous. \end{lemma} \begin{proof} By construction, \( \varphi \) has compact values. Regarding the continuity, note that \( \varphi \) is the composition of the correspondence \( E \ni x \mapsto F \times \{ x \} \) and the (single-valued) correspondence \( F \times E \ni (f,x) \mapsto \{ g(f,x) \} \). While the latter correspondence is continuous due to continuity of \( g \), the former is continuous being the finite product of compact-valued continuous correspondences, cf. \cite[Theorem 17.28]{hitchi}. Thus, continuity of \( \varphi \) follows from \cite[Theorem 17.23]{hitchi}. \end{proof} The next lemma is an auxiliary result regarding a large class of continuous correspondences. \begin{lemma} \label{lem: continuity interval} Let \( E \) be a topological space, and let \(f, g \colon E \to \mathbb{R} \) be continuous functions with \( f(x) \leq g(x) \) for every \( x \in E\). Then, the correspondence \( E \ni x \mapsto [f(x), g(x)] \) is continuous with compact values. \end{lemma} \begin{proof} By \cite[Theorem 17.15]{hitchi}, continuity of the correspondence \( E \ni x \mapsto [f(x), g(x)] \) is equivalent to continuity of the function \( E \ni x \mapsto [f(x), g(x)] \in \mathcal{K}(\bR)\), where \( \mathcal{K}(\bR) \), the collection of nonempty compact subsets of \( \bR\), is equipped with the Hausdorff metric \( d_H\). As \[ d_H([a,b], [c,d]) = \max \big\{ | a - c|, |b-d| \big\}, \] for every \(a \leq b\), \(c \leq d \), continuity of \( E \ni x \mapsto [f(x), g(x)] \in \mathcal{K}(\bR)\) follows from continuity of \(f \) and~\(g \). \end{proof} For a subset \(G\) of a locally convex space we denote by \(\oconv G\) the closure of the convex hull generated by \(G\). \begin{lemma} \label{lem: continuity result for theta} Let \(D\) be a locally convex space and let \(\varphi \colon \mathbb{R}_+ \twoheadrightarrow D\) be an upper hemicontinuous correspondence with convex and compact values such that \(\oconv \varphi([t, t + 1])\) is compact. Then, for every \(t \in \mathbb{R}_+\), we have \[ (a_n)_{n \in \mathbb{N}} \subset (0, 1], \ a_n \to 0\ \Longrightarrow \ \bigcap_{m \in \mathbb{N}} \oconv \varphi ([t, t + a_m]) \subset \varphi (t). \] \end{lemma} \begin{proof} Notice that \( [0,1] \ni s \mapsto \psi (s) := \varphi ([t, t + s])\) is upper hemicontinuous as a composition of the continuous (Lemma \ref{lem: continuity interval}) correspondence \(s \mapsto [t, t + s] \) and the upper hemicontinuous correspondence~\(\varphi\), see \cite[Theorem 17.23]{hitchi}. By our assumption, \(\oconv \psi (s)\) is compact, being a closed subset of the compact set \(\oconv \varphi([t, t + 1])\), and we deduce from \cite[Theorem~17.35]{hitchi} that \(s \mapsto \phi (s) := \oconv \psi(s)\) is upper hemicontinuous. Take \(x \in \bigcap_{m \in \mathbb{N}} \phi (a_m)\). Then, for each \(m \in \mathbb{N}\), \((a_m, x) \in \on{gr} \phi\) and hence, by \cite[Theorem~17.16]{hitchi}, as \(\phi\) is compact-valued, the upper hemicontinuity and \(a_m \to 0\) imply that \(x \in \phi(0)\). Finally, observing that \(\phi (0) = \varphi(t)\) completes the proof. \end{proof} \begin{lemma} \label{lem: oconv inclusion theta} Suppose that the Conditions \ref{cond: convexity} and \ref{cond: continuity} hold. Then, \[ \bigcap_{m \in \mathbb{N}} \oconv \Theta (\omega([t, t + 1/m])) \subset \Theta (\omega(t)) \] for all \((t, \omega) \in \of 0, \infty\of\). \end{lemma} \begin{proof} By Lemma \ref{lem: continuity theta} and Condition \ref{cond: convexity}, \(t \mapsto \Theta (\omega (t))\) is continuous with compact and convex values. Furthermore, by the continuity of \(b\) and \(a\), i.e., Condition \ref{cond: continuity}, the set \(\Theta (\omega([t, t + 1]))\) is compact. Consequently, as in completely metrizable locally convex spaces the closed convex hull of a compact set is itself compact (\cite[Theorem 5.35]{hitchi}), we conclude that \(\oconv \Theta ( \omega([t, t + 1]))\) is compact. Finally, the claim follows from Lemma~\ref{lem: continuity result for theta}. \end{proof} \subsection{\(\cR\) is compact-valued} We start with a first auxiliary observation. For \(M > 0\) and \(\omega \in \Omega\), define \[ \tau_M (\omega) := \inf \{t \geq 0 \colon |\omega (t)| \geq M\} \wedge M. \] Furthermore, for \(\omega = (\omega^{(1)}, \omega^{(2)}) \in \Omega \times \Omega\), we set \[ \zeta_M (\omega) := \sup \Big\{ \frac{|\omega^{(2)} (t \wedge \tau_M (\omega^{(1)})) - \omega^{(2)}(s \wedge \tau_M(\omega^{(1)}))|}{t - s} \colon 0 \leq s < t\Big\}. \] \begin{lemma} \label{lem: Lipschitz Contant} Let \(P\) be a Borel probability measure on \(\Omega \times \Omega\). There exists a set \(D \subset \mathbb{R}_+\) with countable complement such that for every \(M \in D\) there exists a \(P\)-null set \(N = N(M)\) such that \(\zeta_M\) is lower semicontinuous at all \(\omega \not \in N\). \end{lemma} \begin{proof} Due to \cite[Lemma 11.1.2]{SV}, for all but countably many \(M \in \mathbb{R}_+\), there exists a \(P\)-null set \(N = N(M)\) such that \(\omega \mapsto \tau_M (\omega^{(1)})\) is continuous at all \(\omega \not \in N\). Take such an \(M \in \mathbb{R}_+\) and \(\omega \not \in N\). Furthermore, let \((\omega_n)_{n \in \mathbb{N}} \subset \Omega \times \Omega\) be such that \(\omega_n \to \omega\). Then, \begin{align*} \zeta_M (\omega) &= \sup \Big\{ \liminf_{n \to \infty} \frac{| \omega_n^{(2)} (t \wedge \tau_M(\omega^{(1)}_n)) - \omega^{(2)}_n (s \wedge \tau^{(1)}_M (\omega_n))|}{t - s} \colon 0 \leq s < t\Big\} \leq \liminf_{n \to \infty} \zeta_M (\omega_n). \end{align*} The proof is complete. \end{proof} The next lemma follows similar to the proof (of the second part) of \cite[Lemma~7.4]{CN22}, see also \cite[Problem~5.3.15]{KaraShre}. We skip the details for brevity. \begin{lemma} \label{lem: maximal inequality} Suppose that Condition \ref{cond: LG} holds. For every bounded set \(K \subset \mathbb{R}\) and \(T, m > 0\), there exists a constant \(\C > 0\) such that, for all \(s, t \in [0, T]\), \[ \sup_{x \in K} \sup_{P \in \cR(x)} E^P \Big[ \sup_{r \in [0, T]} |X_r|^m \Big] < \infty, \qquad \sup_{x \in K} \sup_{P \in \cR(x)} E^P \big[ |X_{t} - X_{s}|^m \big] \leq \C |t - s|^{m/2}. \] \end{lemma} The next results extend \cite[Theorem 4.41]{hol16} and \cite[Theorem 2.5]{neufeld} beyond the case where \(b\) and \(a\) are uniformly bounded and globally Lipschitz continuous. \begin{proposition} \label{prop: closedness} Suppose that the Conditions \ref{cond: convexity}, \ref{cond: LG} and \ref{cond: continuity} hold. The set \begin{align*} \mathfrak{P}(\Theta) := \big\{ P \in \fPas \colon P &\circ X_0^{-1} \in \{\delta_x \colon x \in \bR\}, \ (\llambda \otimes P)\text{-a.e. } (dB^{P} /d\llambda, dC^{P}/d\llambda) \in \Theta(X) \big\} \end{align*} is closed in \(\mathfrak{P}(\Omega)\). \end{proposition} \begin{proof} Let \((P^n)_{n \in \mathbb{N}} \subset \mathfrak{P}(\Theta)\) be such that \(P^n \to P\) weakly. By definition of \(\mathfrak{P}(\Theta)\), for every \(n \in \mathbb{N}\), there exists a point \(x^n \in \mathbb{R}\) such that \(P^n \circ X_0^{-1} = \delta_{x^n}\) and hence, \(P^n \in \cR(x^n)\). Since \(P^n \to P\) and \(\{\delta_x \colon x \in \mathbb{R}\}\) is closed (\cite[Theorem 15.8]{hitchi}), there exists a \(x^0 \in \mathbb{R}\) such that \(P \circ X^{-1}_0 = \delta_{x^0}\). In particular, \(x^n \to x^0\) and the set \(U := \{x^n \colon n \in \mathbb{N}\}\) is bounded. It remains to prove that \(P\in \fPas\) with differential characteristics in \(\Theta\). The proof of this is split into four steps. In order to execute our program, we need a last bit of auxiliary notation. For each \(n \in \mathbb{N}\), denote the \(P^n\)-characteristics of \(X\) by \((B^n, C^n)\). Define \(\Omega^* := \Omega \times \Omega \times \Omega\) and denote the coordinate process on \(\Omega^*\) by \(Y = (Y^{(1)}, Y^{(2)}, Y^{(3)})\). Further, set \(\cF^* := \sigma (Y_s, s \geq 0)\) and let \(\F^* = (\cF^*_s)_{s \geq 0}\) be the right-continuous filtration generated by \(Y\). \emph{Step 1.} We start by showing that \(\{P^n \circ (X, B^n, C^n)^{-1} \colon n \in \mathbb{N}\}\) is tight on the space \((\Omega^*, \cF^*)\). Since \(P^n \to P\), it suffices to prove tightness of \(\{P^n \circ (B^n, C^n)^{-1} \colon n \in \mathbb{N}\}\). We use Aldous' tightness criterion (\cite[Theorem~VI.4.5]{JS}), i.e., we show the following two conditions: \begin{enumerate} \item[(a)] for every \(N, \varepsilon > 0\) there exists a \(K \in \mathbb{R}_+\) such that \[ \sup_{n \in \mathbb{N}} P^n \Big( \sup_{s \in [0, N]} |B^n_s| + \sup_{s \in [0, N]} |C^n_s| \geq K \Big) \leq \varepsilon; \] \item[(b)] for every \(N, \varepsilon > 0\), \[ \lim_{\theta \searrow 0} \limsup_{n \to \infty} \sup \big\{P^n (|B^n_T - B^n_S| + |C^n_T - C^n_S| \geq \varepsilon) \big\} = 0, \] where the \(\sup\) is taken over all stopping times \(S, T \leq N\) such that \(S \leq T \leq S + \theta\). \end{enumerate} For a moment, let us fix \(N > 0\). Thanks to Lemma \ref{lem: maximal inequality}, recalling that \(P^n \in \cR(x^n)\) and that \(U = \{x^n \colon n \in \mathbb{N}\}\) is bounded, we have \begin{align} \label{eq: second moment bound} \sup_{n \in \mathbb{N}} E^{P^n} \Big[ \sup_{s \in [0, N]} |X_s|^2 \Big] \leq \sup_{x \in U} \sup_{P \in \cR (x)} E^P \Big[ \sup_{s \in [0, N]} |X_s|^2 \Big] < \infty. \end{align} Now, by the definition of \(\Theta\) and the linear growth assumption (Condition \ref{cond: LG}), we get that \(P^n\)-a.s. \[ \sup_{s \in [0, N]} |B^n_s| + \sup_{s \in [0, N]} |C^n_s| \leq \C \Big( 1 + \sup_{s \in [0, N]} |X_s|^2 \Big), \] where the constant \(\C>0\) might depend on \(N\) but is independent of \(n\). By virtue of \eqref{eq: second moment bound}, this bound immediately yields (a). For (b), take two stopping times \(S, T \leq N\) such that \(S \leq T \leq S + \theta\) for some \(\theta > 0\). Then, using again the definition of \(\Theta\) and the linear growth assumptions, we get \(P^n\)-a.s. \[ |B^n_T - B^n_S| + |C^n_T - C^n_S| \leq \C (T - S) \Big( 1 + \sup_{s \in [0, N]} |X_s|^2 \Big) \leq \C \theta \Big( 1 + \sup_{s \in [0, N]} |X_s|^2 \Big), \] which yields (b) by virtue of \eqref{eq: second moment bound}. We conclude that \(\{P^n \circ (X, B^n, C^n)^{-1} \colon n \in \mathbb{N}\}\) is tight. Up to passing to a subsequence, from now on we assume that \(P^n \circ (X, B^n, C^n)^{-1} \to Q\) weakly, where \(Q\) is a probability measure on \((\Omega^*, \cF^*)\). \emph{Step 2.} Next, we show that \(Y^{(2)}\) and \(Y^{(3)}\) are \(Q\)-a.s. locally absolutely continuous. Thanks to Lemma~\ref{lem: Lipschitz Contant}, there exists a dense set \(D \subset \mathbb{R}_+\) such that, for every \(M \in D\), the map \(\zeta_M\) is \(Q \circ (Y^{(1)}, Y^{(2)})^{-1}\)-a.s. lower semicontinuous. By virtue of Condition \ref{cond: LG} and the definition of \(\tau_M\), for every \(M \in D\) there exists a constant \(\C = \C (M) > 0\) such that \(P^n(\zeta_M (X, B^n) \leq \C) = 1\) for all \(n \in \mathbb{N}\). As \(\zeta_M\) is \(Q \circ (Y^{(1)}, Y^{(2)})^{-1}\)-a.s. lower semicontinuous, \cite[Example 17, p. 73]{pollard} yields that \[ 0 = \liminf_{n \to \infty} P^n (\zeta_M (X, B^n) > \C) \geq Q (\zeta_M (Y^{(1)}, Y^{(2)}) > \C). \] Further, since \(D\) is dense in \(\mathbb{R}_+\), we conclude that \(Q\)-a.s. \(Y^{(2)}\) is locally Lipschitz continuous, i.e., in particular locally absolutely continuous. Similarly, we get that \(Y^{(3)}\) is \(Q\)-a.s. locally Lipschitz and hence, locally absolutely continuous. \emph{Step 3.} We define a map \( \Phi \colon \Omega^* \to \Omega \) by \(\Phi (\omega^{(1)}, \omega^{(2)}, \omega^{(3)}) := \omega^{(1)}\). Clearly, we have \(Q \circ \Phi^{-1} = P\) and \(Y^{(1)} = X \circ \Phi\). In this step, we prove that \((\llambda \otimes Q)\)-a.e. \((dY^{(2)} /d \llambda, dY^{(3)}/ d \llambda) \in \Theta(Y^{(1)})\). For a moment, let us fix \(m \in \mathbb{N}\). By virtue of \cite[Corollary 8, p. 48]{diestel}, \(P^n\)-a.s. for \(\llambda\)-a.a. \(t \in \mathbb{R}_+\), we have \begin{equation}\label{eq: P as inclusion theta} \begin{split} m (B^n_{t + 1/m} - B^n_t, C^n_{t + 1/m} - C^n_t) &\in \oconv ( dB^n / d \llambda, d C^n / d \llambda) ([ t, t + 1/m ]) \\&\subset \oconv \Theta (X([t, t + 1/m])). \end{split} \end{equation} By Skorokhod's coupling theorem, with little abuse of notation, there exist random variables \[(X^0, B^0, C^0), (X^1, B^1, C^1), (X^2, B^2, C^2), \dots\] defined on some probability space \((\Sigma, \mathcal{G}, R)\) such that \((X^0, B^0, C^0)\) has distribution \(Q\), \((X^n, B^n, C^n)\) has distribution \(P^n\circ (X, B^n, C^n)^{-1}\) and \(R\)-a.s. \((X^n, B^n, C^n) \to (X^0, B^0, C^0)\) in the local uniform topology. We deduce from Lemma \ref{lem: continuity abstract} that the correspondence \(\omega \mapsto \Theta (\omega([t, t + 1 /m]))\) is continuous for every \(t \in \bR_+\). Furthermore, for every \(t \in \bR_+\), as \(\oconv \Theta (\omega([t, t + 1/m]))\) is compact (by \cite[Theorem 5.35]{hitchi}) for every \(\omega \in \Omega\), it follows from \cite[Theorem 17.35]{hitchi} that the correspondence \(\omega \mapsto \oconv \Theta (\omega([t, t + 1/m]))\) is upper hemicontinuous and compact-valued. Thus, by virtue of \eqref{eq: P as inclusion theta} and \cite[Theorem 17.20]{hitchi}, we get, \(R\)-a.s. for \(\llambda\)-a.a. \(t \in \bR_+\), that \[ m (B^0_{t + 1/m} - B^0_t, C^0_{t + 1/m} - C^0_t) \in \oconv \Theta ( X^0([t, t + 1/m])). \] Notice that \((\llambda \otimes R)\)-a.e. \[ (d B^0 / d \llambda, d C^0 / d \llambda) = \lim_{m \to \infty} m (B^0_{\cdot + 1/m} - B^0_\cdot, C^0_{\cdot + 1/m} - C^0_\cdot). \] Now, using Lemma \ref{lem: oconv inclusion theta}, we conclude that \(R\)-a.s. for \(\llambda\)-a.a. \(t \in \mathbb{R}_+\) \[ (d B^0 / d \llambda, d C^0 / d \llambda) (t) \in \bigcap_{m \in \mathbb{N}} \oconv \Theta (X^0([t, t + 1/m])) \subset \Theta (X^0_t). \] This shows that \( (\llambda \otimes Q)\)-a.e. \((dY^{(2)} /d \llambda, dY^{(3)}/ d \llambda) \in \Theta (Y^{(1)})\). \emph{Step 4.} In the final step of the proof, we show that \(P \in \fPas\) and we relate \((Y^{(2)}, Y^{(3)})\) to the \(P\)-semimartingale characteristics of the coordinate process. Thanks to \cite[Lemma 11.1.2]{SV}, there exists a dense set \(D \subset \bR_+\) such that \(\tau_M \circ \Phi\) is \(Q\)-a.s. continuous for all \(M \in D\). Take some \(M \in D\). Since \(P^n \in \fPas\), it follows from the definition of the first characteristic that the process \(X_{\cdot \wedge \tau_M} - B^n_{\cdot \wedge \tau_M}\) is a local \(P^n\)-\(\F_+\)-martingale. Furthermore, by the definition of the stopping time \(\tau_M\) and the linear growth assumption (Condition \ref{cond: LG}), we see that \(X_{\cdot \wedge \tau_M} - B^n_{\cdot \wedge \tau_M}\) is \(P^n\)-a.s. bounded by a constant independent of \(n\), which, in particular, implies that it is a true \(P^n\)-\(\F_+\)-martingale. Now, it follows from \cite[Proposition~IX.1.4]{JS} that \(Y^{(1)}_{\cdot \wedge \tau_M \circ \Phi} - Y^{(2)}_{\cdot \wedge \tau_M \circ \Phi}\) is a \(Q\)-\(\F^*\)-martingale. Recalling that \(Y^{(2)}\) is \(Q\)-a.s. locally absolutely continuous by Step 2, this means that \(Y^{(1)}\) is a \(Q\)-\(\F^*\)-semimartingale with first characteristic \(Y^{(2)}\). Similarly, we see that the second characteristic is given by \(Y^{(3)}\). Finally, we need to relate these observations to the probability measure \(P\) and the filtration \(\F_+\). We denote by \(A^{p, \Phi^{-1}(\F_+)}\) the dual predictable projection of some process \(A\), defined on \((\Omega^*, \cF^*)\), to the filtration \(\Phi^{-1}(\F_+)\). Recall from \cite[Lemma 10.42]{jacod79} that, for every \(t \in \bR_+\), a random variable \(Z\) on \((\Omega^*, \cF^*)\) is \(\Phi^{-1}(\cF_{t+})\)-measurable if and only if it is \(\cF^*_t\)-measurable and \(Z (\omega^{(1)}, \omega^{(2)}, \omega^{(3)})\) does not depend on \((\omega^{(2)}, \omega^{(3)})\). Thanks to Stricker's theorem (see, e.g., \cite[Lemma~2.7]{jacod80}), \(Y^{(1)}\) is a \(Q\)-\(\Phi^{-1} (\F_+)\)-semimartingale. Notice that each \(\tau_M \circ \Phi\) is a \(\Phi^{-1}(\F_+)\)-stopping time and recall from Step 3 that \((\llambda \otimes Q)\)-a.e. \((d Y^{(2)}/ d \llambda, d Y^{(3)}/ d \llambda) \in \Theta(Y^{(1)})\). Hence, by definition of \(\tau_M\) and the linear growth assumption, for every \(M \in D\), we have \[ E^Q \big[ \on{Var} (Y^{(2)})_{\tau_M \circ \Phi} \big] + E^Q \big[ \on{Var}(Y^{(3)})_{\tau_M \circ \Phi} \big] = E^Q \Big[ \int_0^{\tau_M} \Big(\Big| \frac{d Y^{(2)}}{d \llambda} \Big| + \Big| \frac{d Y^{(3)}}{d \llambda} \Big| \Big) d \llambda \Big] < \infty, \] where \(\on{Var} (\cdot)\) denotes the variation process. By virtue of this, we get from \cite[Proposition 9.24]{jacod79} that the \(Q\)-\(\Phi^{-1}(\F_+)\)-characteristics of \(Y^{(1)}\) are given by \(((Y^{(2)})^{p, \Phi^{-1}(\F_+)}, (Y^{(3)})^{p, \Phi^{-1}(\F_+)})\). Hence, thanks to Lemma~\ref{lem: jacod restatements} below, the coordinate process \(X\) is a \(P\)-\(\F_+\)-semimartingale whose characteristics \((B^P, C^P)\) satisfy \(Q\)-a.s. \[(B^P, C^P) \circ \Phi = ((Y^{(2)})^{p, \Phi^{-1}(\F_+)}, (Y^{(3)})^{p, \Phi^{-1}(\F_+)}).\] Consequently, we deduce from the Steps~2 and 3, and \cite[Theorem~5.25]{HWY}, that \(P\)-a.s. \((B^P, C^P) \ll \llambda\) and \begin{align*} (\llambda \otimes P) \big( (d B^P / d \llambda&, d C^P / d \llambda) \not \in \Theta(X) \big) \\&= (\llambda \otimes Q \circ \Phi^{-1}) \big( (d B^P / d \llambda, d C^P / d \llambda) \not \in \Theta(X) \big) \\&= (\llambda \otimes Q) \big( E^Q [(d Y^{(2)} / d \llambda, d Y^{(3)} / d \llambda) | \Phi^{-1} (\F_+)_-] \not \in \Theta(Y^{(1)})\big) = 0, \end{align*} where we use \cite[Corollary 8, p. 48]{diestel} for the final equality. This means that \(P \in \mathfrak{P}(\Theta) \) and the proof is complete. \end{proof} \begin{proposition} \label{prop: compactness} Suppose that the Conditions \ref{cond: convexity}, \ref{cond: LG} and \ref{cond: continuity} hold. For any compact set \(K \subset \mathbb{R}\), the set \begin{align*} \cR^\circ := \big\{ P \in \fPas \colon P &\circ X_0^{-1} \in \{\delta_x \colon x \in K\}, \ (\llambda \otimes P)\text{-a.e. } (dB^{P} /d\llambda, dC^{P}/d\llambda) \in \Theta(X) \big\} \end{align*} is compact in \(\mathfrak{P}(\Omega)\). \end{proposition} \begin{proof} Thanks to \cite[Lemma 7.4]{CN22}, we already know that \(\cR^\circ\) is relatively compact. We note that \(\cR^\circ \) is closed, being the intersection of the closed sets \( \mathfrak{P}(\Theta)\) and \[ \big\{ P \in \mathfrak{P}(\Omega) \colon P \circ X_0^{-1} \in \{ \delta_x \colon x \in K \} \big \}. \] While \( \mathfrak{P}(\Theta)\) is closed by Proposition \ref{prop: closedness}, the latter set is closed as \(K\) is closed (\cite[Theorem 15.8]{hitchi}). This completes the proof. \end{proof} \subsection{Upper Hemicontinuity of \(\mathcal{R}\)} \begin{proposition} \label{prop: upper hemicontinuous} Suppose that the Conditions \ref{cond: convexity}. \ref{cond: LG} and \ref{cond: continuity} hold. Then, the correspondence \( \cR \) is upper hemicontinuous. \end{proposition} \begin{proof} This follows as a special case of Proposition \ref{prop: K upper hemi and compact} below. \end{proof} \subsection{Proof of Theorem \ref{thm: main r}} By Proposition \ref{prop: compactness}, \( \cR \) is compact-valued, while Proposition \ref{prop: upper hemicontinuous} provides upper hemicontinuity of \( \cR \). \qed \section{Proof of Theorem \ref{thm: new very main}} \label{sec: proof main result} First of all, parts (iii) and (iv), i.e., the \(C_0\) and the (uniform) strong \(\usc_b\)--Feller properties, follow directly from the Feller selection principle given by Theorem \ref{theo: Feller selection}. We now discuss part (i), i.e., the \(\usc_b\)--Feller property. Let \( \psi \in \usc_b(\mathbb{R}; \mathbb{R})\). Notice that \( \of 0, \infty \of \hspace{0.1cm} \ni (t, \omega) \mapsto \psi(\omega(t)) \) is upper semicontinuous and bounded. Thus, thanks to \cite[Theorem~8.10.61]{bogachev}, the map \begin{align} \label{eq: joint continuity semigroup proof} \mathbb{R}_+ \times \mathfrak{P}(\Omega) \ni (t, P) \mapsto E^{P}\big[\psi(X_{ t}) \big]\end{align} is upper semicontinuous, too. By Theorem \ref{thm: main r}, the compact-valued correspondence \( \bR_+ \times \bR \ni (t,x) \mapsto \{t\} \times \cR(x) \) is upper hemicontinuous, being the finite product of upper hemicontinuous correspondences with compact values, cf. \cite[Theorem 17.28]{hitchi}. Thus, upper semicontinuity of \( (t,x) \mapsto \cE^x(\psi(X_t)) \) follows from the upper semicontinuity of \eqref{eq: joint continuity semigroup proof} and (a version of) Berge's maximum theorem as given by \cite[Lemma 17.30]{hitchi}. This completes the proof of (i). Part (ii), i.e., the \(C_b\)--Feller property, follows along the same lines when additionally Theorem~\ref{theo: lower hemi} and \cite[Lemma~17.29]{hitchi} are taken into consideration. We omit the details for brevity. \qed \section{Markov and Feller Selection Principles: Proof of Theorems \ref{theo: strong Markov selection} and \ref{theo: Feller selection} } \label{sec: feller selection} The proof of the strong Markov selection principle, given by Theorem \ref{theo: strong Markov selection}, is based on some fundamental ideas of Krylov \cite{krylov1973selection} for Markovian selection as worked out in the monograph \cite{SV} of Stroock and Varadhan, see also \cite{nicole1987compactification,hausmann86}. The main technical steps in the argument are to establish stability under conditioning and pasting of a certain sequence of correspondences. The proof of the Feller selection principle, given by Theorem \ref{theo: Feller selection}, is based on the observation that any strong Markov selection is already a (uniform) strong Feller and \(C_0\)--Feller selection in case the system carries enough randomness, which is ensured by our (uniform) ellipticity condition. \subsection{Proof of the Markov Selection Principle: Theorem \ref{theo: strong Markov selection}} This section is split into two parts. We start with some properties of the correspondence \(\cK\) and then finalize the proof in the second part. \subsubsection{Preparations} The following lemma is a restatement of a path-continuous version of \cite[Lemma~III.3.38, Theorem~III.3.40]{JS}. \begin{lemma} \label{lem: JS convex} Let \(P, Q \in \fPs := \fPs (0)\) and denote the characteristics of the coordinate process by \((B^P, C^P)\) and \((B^Q, C^Q)\), respectively. Further, take \(\alpha \in (0, 1)\) and set \[ R := \alpha P + (1 - \alpha) Q. \] Then, \(P \ll R, Q \ll R\) and there are versions of the Radon--Nikodym density processes \(dP/dR |_{\mathcal{F}_\cdot} = Z^P\) and \(dQ/dR |_{\mathcal{F}_\cdot} = Z^Q\) such that identically \begin{align}\label{eq: ZP ZQ convex combi} \alpha Z^P + (1 - \alpha) Z^Q = 1, \quad 0 \leq Z^P \leq 1/\alpha, \quad 0 \leq Z^Q \leq 1/(1 - \alpha). \end{align} Moreover, \(R \in \fPs\) and the \(R\)-characteristics \((B^R, C^R)\) of the coordinate process satisfy \[ d B^R = \alpha Z^P d B^P + (1 - \alpha) Z^Q d B^Q, \qquad d C^R = \alpha Z^P d C^P + (1 - \alpha) Z^Q d C^Q. \] \end{lemma} The following lemma is a restatement of \cite[Lemma 2.9 (a)]{jacod80} for a path-continuous setting. \begin{lemma} \label{lem: jacod restatements} Take two filtered probability spaces \(\B^* = (\Omega^*, \mathcal{F}^*, \F^* = (\mathcal{F}^*_t)_{t \geq 0}, P^*)\) and \(\B' = (\Omega', \mathcal{F}', \F' = (\mathcal{F}'_t)_{t \geq 0}, P')\) with right-continuous filtrations and the property that there is a map \(\phi \colon \Omega' \to \Omega^*\) such that \( \phi^{-1} (\mathcal{F}^*) \subset \mathcal{F}',P^* = P' \circ \phi^{-1}\) and \(\phi^{-1} (\mathcal{F}^*_t) = \mathcal{F}'_t\) for all \(t \in \mathbb{R}_+\). Then, \(X^*\) is a continuous semimartingale on \(\B^*\) if and only if \(X' = X^* \circ \phi\) is a continuous semimartingale on \(\B'\). Moreover, \((B^*, C^*)\) are the characteristics of \(X^*\) if and only if \((B^* \circ \phi, C^* \circ \phi)\) are the characteristics of \(X' = X^* \circ \phi\). \end{lemma} For \(t \in \mathbb{R}_+\), we define \(\gamma_t \colon \Omega \to \Omega\) by \(\gamma_t (\omega) := \omega ( (\cdot - t)^+ ) \) for \(\omega \in \Omega\). Moreover, for \(P \in \mathfrak{P}(\Omega)\) and \(t \in \mathbb{R}_+\), we set \[P_t := P \circ \theta_t^{-1}, \qquad P^t := P \circ \gamma_t^{-1}.\] \begin{lemma} \label{lem: p^t cont} The maps \((t, P) \mapsto P_t\) and \((t, P) \mapsto P^t\) are continuous. \end{lemma} \begin{proof} Notice that \((t, \omega) \mapsto \theta_t (\omega)\) and \((t, \omega) \mapsto \gamma_t (\omega)\) are continuous by the Arzel\`a--Ascoli theorem. Now, the claim follows from \cite[Theorem 8.10.61]{bogachev}. \end{proof} \begin{lemma} \label{lem: implication c^*} For every \((t, \omega) \in \of 0, \infty\of\), \(P \in \cC(t, \omega)\) implies \(P_t \in \cK (0, \omega(t))\). \end{lemma} \begin{proof} Let \((t, \omega) \in \of 0, \infty\of\) and take \(P \in \cC(t, \omega)\). Obviously, \( P_t \circ X_0^{-1} = \delta_{\omega(t)} \) and, thanks to Lemma~\ref{lem: jacod restatements}, we also get \(P_t \in \fPas \) and \( (\llambda \otimes P_t)\text{-a.e. } (dB^{P_t} /d\llambda, dC^{P_t}/d\llambda) \in \Theta(X), \) which proves \( P_t \in \cK(0,\omega(t)) \). \end{proof} \begin{lemma} \label{lem: p^t} For every \( (t, x) \in \bR_+ \times \bR\), we have \( P \in \cK(0,x) \) if and only if \( P^t \in \cK(t,x) \). \end{lemma} \begin{proof} Let \(x \in \bR\) and \( P \in \cK(0,x) \). As \( \gamma_t^{-1} (\{ X = x \text{ on } [0,t] \} ) = \{ X_0 = x \}, \) we have \( P^t( X = x \text{ on } [0,t]) = 1\). Next, it follows from Lemma \ref{lem: jacod restatements} that \(P^t \in \fPas (t) \) and \[ (\llambda \otimes P^t)\text{-a.e. } (dB^{P^t}_{\cdot + t} /d\llambda, dC^{P^t}_{\cdot + t}/d\llambda) \in \Theta (X_{\cdot + t}), \] which proves \( P^t \in \cK(t,x) \). Conversely, take \(P^t \in \cK(t,x) \). Due to the identity \( \theta_t \circ \gamma_t = \on{id} \), we have \( P = (P^t)_t \). Thus, \( P \circ X_0 = \delta_x \), and applying Lemma \ref{lem: jacod restatements} once more, we conclude that~\(P \in \cK(0,x) \). The proof is complete. \end{proof} \begin{lemma} \label{lem: k idenity} For all \((t, x) \in \bR_+ \times \bR\), we have \(\cK (t, x) = \{P^t \colon P \in \cK(0, x)\}\). \end{lemma} \begin{proof} Take \((t, x) \in \bR_+ \times \bR\). Lemma \ref{lem: p^t} yields the inclusion \(\{P^t \colon P \in \cK(0, x)\} \subset \cK(t, x)\). Conversely, take \(P \in \cK(t, x)\). As \( P( X = x \text{ on } [0,t]) = 1 \), the equality \( (P_t)^t = P \) holds. Now, Lemma~\ref{lem: implication c^*} yields that \(P_t \in \cK(0, x)\) and hence, we get the inclusion \(\cK(t, x) \subset \{P^t \colon P \in \cK(0, x)\}\). \end{proof} \begin{proposition} \label{prop: K upper hemi and compact} The correpondence \((t, x) \mapsto \cK(t, x)\) is upper hemicontinuous with nonempty and compact values. \end{proposition} \begin{proof} As \( x \mapsto \cK(0, x) \) has nonempty compact values by Proposition \ref{prop: compactness} and Standing Assumption~\ref{SA: non empty}, Lemmata~\ref{lem: p^t cont} and \ref{lem: k idenity} yield that the same is true for \((t, x) \mapsto \cK (t, x)\). It remains to show that \(\cK\) is upper hemicontinuous. Let \( F \subset \mathfrak{P}(\Omega) \) be closed. We need to show that \( \cK^l(F) = \{ (t, x) \in \bR_+ \times \bR \colon \cK(t, x) \cap F \neq \emptyset \} \) is closed. Suppose that the sequence \( (t^n, x^n)_{n \in \mathbb{N}} \subset \cK^l(F) \) converges to \((t, x) \in \bR_+ \times \bR\). For each \(n \in \mathbb{N} \), there exists a probability measure \( P^n \in \cK (t^n, x^n) \cap F \). Thanks to Proposition \ref{prop: compactness}, the set \begin{align*} \cR^\circ := \big\{ P \in \fPas \colon P &\circ X_0^{-1} \in \{\delta_{x^n}, \delta_{x} \colon n \in \mathbb{N}\},\ (\llambda \otimes P)\text{-a.e. } (dB^{P} /d\llambda, dC^{P}/d\llambda) \in \Theta(X) \big\} \end{align*} is compact. Hence, by Lemma \ref{lem: p^t cont}, so is the set \[ \cK^\circ := \{ P^t \colon (t, P) \in \{t^n, t \colon n \in \mathbb{N}\} \times \cR^\circ \}. \] Thus, by virtue of Lemma \ref{lem: k idenity}, we conclude that \(\{P^n \colon n \in \mathbb{N}\} \subset \cK^\circ\) is relatively compact. Hence, passing to a subsequence if necessary, we can assume that \( P^n \to P \) weakly for some \( P \in \cK^\circ \cap F\). Notice that, for every \(\varepsilon \in (0, t)\), the set \(\{|X_s - x| \leq \varepsilon \text{ for all } s \in [0, t - \varepsilon]\} \subset \Omega\) is closed. Consequently, by the Portmanteau theorem, for every \(\varepsilon \in (0, t)\), we get \[ 1 = \limsup_{n \to \infty} P^n( |X_s - x| \leq \varepsilon \text{ for all } s \in [0, t - \varepsilon] ) \leq P( |X_s - x| \leq \varepsilon \text{ for all } s \in [0, t - \varepsilon] ). \] Consequently, \(P( X = x \text{ on } [0, t] ) = 1\), which implies that \(P = (P_t)^t\). By Lemmata \ref{lem: p^t cont} and \ref{lem: implication c^*}, we have \((P^n)_{t^n} \in \cK(0, x^n)\) and \((P^n)_{t^n} \to P_t\) weakly. Further, since \(P_t \circ X_0^{-1} = \delta_x\), Proposition~\ref{prop: closedness} yields that \(P_t \in \cK(0, x)\). Thus, by Lemma \ref{lem: p^t}, \(P \in \cK^\circ \cap F \cap \cK(t, x) = \cK(t, x) \cap F,\) which implies \((t, x) \in \cK^l(F) \). We conclude that \(\cK\) is upper hemicontinuous. \end{proof} \begin{lemma} \label{lem: r^* compact} The correspondence \( (t, x) \mapsto \cK(t, x) \) has convex values. \end{lemma} \begin{proof} By virtue of Lemma \ref{lem: k idenity}, it suffices to prove that \(\cK (0, x)\) is convex for every fixed \(x \in \mathbb{R}\). Indeed, for every \(P, Q \in \cK(t, x)\) and \(\alpha \in (0, 1)\), there are probability measures \(\oP, \oQ \in \cK(0, x)\) such that \(\oP^t = P\) and \(\oQ^t = Q\). Then, \(\alpha P + (1 - \alpha) Q = (\alpha \oP + (1 - \alpha) \oQ)^t\) and consequently, from Lemma~\ref{lem: p^t}, we get \(\alpha P + (1 - \alpha) Q \in \cK(t, x)\) once \(\alpha \oP + (1 - \alpha) \oQ \in \cK(0, x)\). We now prove the convexity of \(\cK(0, x)\). Take \(P, Q \in \cK (0, x)\) and \(\alpha \in (0, 1)\). Furthermore, set \(R := \alpha P + (1 - \alpha) Q\). It is easy to see that \( R \circ X^{-1}_0 = \delta_x. \) By Lemma~\ref{lem: JS convex}, using also its notation, \(R \in \fPas\) and the Lebesgue densities \((b^R, a^R)\) of the \(R\)-characteristics of the coordinate process are given by \[ b^R = \alpha Z^P b^P + (1 - \alpha) Z^Q b^Q, \qquad a^R = \alpha Z^P a^P + (1 - \alpha) Z^Q a^Q. \] Since \(P \in \cK (0, x)\), we have \[ \iint Z^P \1_{\{(b^P, a^P) \hspace{0.05cm}\not \in \hspace{0.05cm} \Theta(X), \hspace{0.05cm} Z^P \hspace{0.05cm} >\hspace{0.05cm} 0\}} d (\llambda \otimes R) = (\llambda \otimes P) ( (b^P, a^P) \not \in \Theta(X), Z^P > 0) = 0. \] Thus, \((\llambda \otimes R)\)-a.e. \[\1_{\{ (b^P, a^P) \hspace{0.05cm}\not \in \hspace{0.05cm} \Theta(X), \hspace{0.05cm} Z^P > 0\}} = 0.\] Similarly, we obtain that \((\llambda \otimes R)\)-a.e. \[\1_{\{ (b^Q, a^Q) \hspace{0.05cm}\not \in \hspace{0.05cm} \Theta(X), \hspace{0.05cm} Z^Q > 0\}} = 0.\] Consequently, recalling that \(\{Z^P = 0, Z^Q = 0\} = \emptyset\), by virtue of \eqref{eq: ZP ZQ convex combi}, and using that \(\Theta\) is convex-valued (Condition~\ref{cond: convexity}), we get \begin{align*} (\llambda \otimes R) &\big( (b^R, a^R) \not \in \Theta(X) \big) \\&=(\llambda \otimes R) \big( (b^R, a^R) \not \in \Theta(X), (b^P, a^P) \in \Theta(X), Z^P > 0, (b^Q, a^Q) \in \Theta(X), Z^Q > 0 \big) \\& \qquad \qquad + (\llambda \otimes R) \big( (b^P, a^P) \not \in \Theta(X), (b^P, a^P) \in \Theta(X), Z^P > 0, Z^Q = 0 \big) \\& \qquad \qquad + (\llambda \otimes R) \big( (b^Q, b^Q) \not \in \Theta(X), Z^P = 0, (b^Q, a^Q) \in \Theta(X), Z^Q > 0 \big) \\& = 0. \end{align*} We conclude that \(R \in \cK(0, x)\). The proof is complete. \end{proof} \begin{lemma} \label{lem: iwie Markov} Let \(Q \in \mathfrak{P}(\Omega)\) and take \(t \in \bR_+\) and \(\omega, \alpha \in \Omega\) such that \(\omega (t) = \alpha (t)\). Then, \[ \delta_\alpha \otimes_t Q \in \cC(t, \alpha) \quad \Longleftrightarrow \quad \delta_\omega \otimes_t Q \in \cC (t, \omega). \] \end{lemma} \begin{proof} Set \(\oQ := \delta_\alpha \otimes_t Q\) and \(\oP := \delta_\omega \otimes_t Q\). Suppose that \( \oQ \in \cC(t,\alpha) \). Thanks to Lemma \ref{lem: implication c^*}, we have \(\oQ_t \in \cC(0, \omega (t))\). Since \(\oQ_t = \oP_t\), we also have \(\oP_t \in \cC(0, \omega (t))\). Thus, Lemma \ref{lem: jacod restatements} yields that \(\oP \in \fPas (t)\) and \((\oP \otimes \llambda)\)-a.e. \[ (d B^{\oP}_{\cdot + t}/d \llambda, d C^{\oP}_{\cdot + t}/ d \llambda) \in \Theta ( X \circ \theta_t ) = \Theta (X_{\cdot + t}), \] which implies \( \oP \in \cC(t, \omega) \). The converse implication follows by symmetry. \end{proof} \begin{definition} A correspondence \(\cU \colon \bR_+ \times \bR \twoheadrightarrow \mathfrak{P}(\Omega)\) is said to be \begin{enumerate} \item[\textup{(i)}] \emph{stable under conditioning} if for any \((t, x) \in \bR_+ \times \bR\), any stopping time \(\tau\) with \(t \leq \tau < \infty\), and any \(P \in \cU(t, x)\), there exists a \(P\)-null set \(N \in \cF_\tau\) such that \(\delta_{\omega (\tau(\omega))} \otimes_{\tau (\omega)} P (\cdot | \mathcal{F}_\tau) (\omega) \in \cU(\tau (\omega), \omega (\tau (\omega)))\) for all \(\omega \not \in N\); \item[\textup{(ii)}] \emph{stable under pasting} if for any \((t, x) \in \bR_+ \times \bR\), any stopping time \(\tau\) with \(t \leq \tau < \infty\), any \(P \in \cU(t, x)\) and any \(\mathcal{F}_\tau\)-measurable map \(\Omega \ni \omega \mapsto Q_\omega \in \mathfrak{P}(\Omega)\) the following implication holds: \[ P\text{-a.a. } \omega \in \Omega \quad \delta_{\omega (\tau(\omega))} \otimes_{\tau (\omega)} Q_\omega \in \cU (\tau (\omega), \omega (\tau(\omega)))\quad \Longrightarrow \quad P \otimes_\tau Q \in \cU(t, x). \] \end{enumerate} \end{definition} \begin{lemma} \label{lem: K stable under both} The correspondence \(\cK\) is stable under conditioning and pasting. \end{lemma} \begin{proof} Stability under conditioning follows from \cite[Corollary~6.12]{CN22} and Lemma~\ref{lem: iwie Markov}, and stability under pasting follows from Lemma \ref{lem: iwie Markov} and \cite[Lemma~6.17]{CN22}. \end{proof} Recall from \cite[Definition 18.1]{hitchi} that a correspondence \(\mathcal{U} \colon \bR_+ \times \bR \twoheadrightarrow \mathfrak{P}(\Omega)\) is called \emph{measurable} if the lower inverse \(\{ (t, x) \in \bR_+ \times \bR \colon \mathcal{U} (t, x) \cap F \not = \emptyset\}\) is Borel for every closed set \(F \subset \mathfrak{P}(\Omega)\). \begin{lemma} \label{lem: U to U*} Suppose that \(\cU \colon \bR_+ \times \bR \twoheadrightarrow \mathfrak{P}(\Omega)\) is a measurable correspondence with nonempty and compact values such that, for all \((t, x) \in \bR_+ \times \bR\) and \(P \in \cU (t, x)\), \(P (X_s = x\text{ for all } s \in [0, t])= 1\). Suppose further that \(\cU\) is stable under conditioning and pasting. Then, for any \(\phi \in \usc_b (\bR; \bR)\), the correspondence \[ \cU^* (t, x) := \Big\{ P \in \cU (t, x) \colon E^P \big[ \phi (X_T) \big] = \sup_{Q \in \cU (t, x)} E^Q \big[ \phi (X_T) \big] \Big\} \] is also measurable with nonempty and compact values and it is stable under conditioning and pasting. Further, if \(\cU\) has convex values, then so does \(\cU^*\). \end{lemma} \begin{proof} We adapt the proof of \cite[Lemma 12.2.2]{SV}, see also the proof of \cite[Lemma~3.4 (a, d)]{hausmann86}. As \(\psi\) is assumed to be upper semicontinuous, \cite[Theorem 2.43]{hitchi} implies that \( \cU^* \) has nonempty and compact values. Moreover, \cite[Theorem~18.10]{hitchi} and \cite[Lemma~12.1.7]{SV} imply that \(\cU^*\) is measurable. The final claim for the convexity is obvious. It is left to show that \(\cU^*\) is stable under conditioning and pasting. Take \((t, x) \in \bR_+ \times \bR, P \in \cU^* (t, x)\) and let \(\tau\) be a stopping time such that \(t \leq \tau < \infty\). We define \begin{align*} N &:= \big\{ \omega \in \Omega \colon P_\omega := \delta_{\omega (\tau (\omega))} \otimes_{\tau (\omega)} P (\cdot | \cF_\tau) (\omega) \not \in \cU (\tau (\omega), \omega (\tau (\omega)))\big\}, \\ A &:= \big\{ \omega \in \Omega \backslash N \colon P_\omega \not \in \cU^* (\tau (\omega), \omega (\tau (\omega)))\big\}. \end{align*} As \(\cU\) is stable under conditioning, we have \(P(N) = 0\). By \cite[Lemma 12.1.9]{SV}, \(N, A \in \cF_\tau\). As we already know that \(\cU^*\) is measurable, by virtue of \cite[Theorem 12.1.10]{SV}, there exists a measurable map \((s, y) \mapsto R (s, y)\) such that \(R (s, y) \in \cU^* (s, y)\). We set \(R_\omega := R (\tau (\omega), \omega (\tau (\omega)))\), for \(\omega \in \Omega\), and note that \(\omega \mapsto R_\omega\) is \(\cF_\tau\)-measurable. Further, we set \[ Q_\omega := \begin{cases} R_\omega, & \omega \in N \cup A,\\ P_\omega, & \omega \not \in N \cup A. \end{cases} \] By definition of \(R\) and \(N\), \(Q_\omega \in \cU (\tau(\omega), \omega (\tau (\omega)))\) for all \(\omega \in \Omega\). As \(\cU\) is stable under pasting, we have \(P \otimes_{\tau} Q\in \cU(t, x)\) and we obtain \begin{align*} \sup_{Q^* \in \cU (t, x)} &E^{Q^*} \big[ \phi (X_T) \big] \\&\geq E^{P \otimes_\tau Q} \big[ \phi (X_T) \big] \\&= \int_{N\cup A} E^{\delta_\omega \otimes_{\tau (\omega)} R_\omega} \big[ \phi (X_T) \big] P (d \omega) + E^P \big[ \1_{N^c \cap A^c}E^P \big[ \phi (X_T) | \cF_\tau \big] \big] \\&= \int_{A} \big[ E^{\delta_\omega \otimes_{\tau (\omega)} R_\omega} \big[ \phi (X_T) \big] - E^{\delta_\omega \otimes_{\tau (\omega)} P_\omega} \big[ \phi (X_T) \big] \big] P (d \omega) + \sup_{Q^* \in \cU (t, x)} E^{Q^*} \big[ \phi (X_T) \big] \\&= \int_{A} \big[ E^{R_\omega} \big[ \phi (X_T) \big] - E^{P_\omega} \big[ \phi (X_T) \big] \big] P (d \omega) + \sup_{Q^* \in \cU (t, x)} E^{Q^*} \big[ \phi (X_T) \big]. \end{align*} As \(E^{R_\omega} \big[ \phi (X_T) \big] > E^{P_\omega} \big[ \phi (X_T) \big]\) for all \(\omega \in A\), we conclude that \(P (A) = 0\). This proves that \(\cU^*\) is stable under conditioning. Next, we prove stability under pasting. Let \((t, x) \in \bR_+ \times \bR\), take a stopping time \(\tau\) with \(t \leq \tau < \infty\), a probability measure \(P \in \cU^*(t, x)\) and an \(\mathcal{F}_\tau\)-measurable map \(\Omega \ni \omega \mapsto Q_\omega \in \mathfrak{P}(\Omega)\) such that, for \(P\)-a.a. \(\omega \in \Omega\), \(\delta_{\omega (\tau(\omega))} \otimes_{\tau (\omega)} Q_\omega \in \cU^* (\tau (\omega), \omega (\tau (\omega)))\). As \(\cU\) is stable under pasting, we have \(P \otimes_\tau Q \in \cU (t, x)\). Further, recall that \(\delta_{\omega (\tau (\omega))} \otimes_{\tau (\omega)} P (\cdot | \cF_\tau) (\omega)\in \cU (\tau (\omega), \omega (\tau (\omega)))\) for \(P\)-a.a. \(\omega \in \Omega\), as \(\cU\) is stable under conditioning. Thus, we get \begin{align*} \sup_{Q^* \in \cU (t, x)} E^{Q^*} \big[ \phi (X_T) \big] &\geq E^{P \otimes_\tau Q} \big[ \phi (X_T) \big] \\&= \int E^{\delta_\omega \otimes_{\tau (\omega)} Q_\omega} \big[ \phi (X_T) \big] P (d \omega) \\&= \int E^{\delta_{\omega (\tau (\omega))} \otimes_{\tau (\omega)} Q_\omega} \big[ \phi (X_T) \big] \1_{\{\tau (\omega) < T\}}P (d \omega) + E^P \big[ \phi (X_T) \1_{\{T \leq \tau\}}\big] \\&= \int \sup_{Q^* \in \cU (\tau (\omega), \omega (\tau (\omega)))} E^{Q^*} \big[ \phi (X_T) \big] \1_{\{\tau (\omega) < T\}} P (d \omega) + E^P \big[ \phi (X_T) \1_{\{T \leq \tau\}}\big] \\&\geq \int E^{\delta_{\omega (\tau (\omega))} \otimes_{\tau (\omega)} P (\cdot | \cF_\tau)(\omega)} \big[ \phi (X_T) \big] \1_{\{\tau (\omega) < T\}} P (d \omega) + E^P \big[ \phi (X_T) \1_{\{T \leq \tau\}}\big] \\&= E^P \big[ E^P \big[ \phi (X_T) | \cF_\tau \big] \1_{\{\tau < T\}} \big] + E^P \big[ \phi (X_T) \1_{\{T \leq \tau\}}\big] \\&= E^P \big[ \phi (X_T) \big] \\&= \sup_{Q^* \in \cU (t, x)} E^{Q^*} \big[ \phi (X_T) \big]. \end{align*} This implies that \(P \otimes_\tau Q\in \cU^* (t, x)\). The proof is complete. \end{proof} \subsubsection{Proof of Theorem \ref{theo: strong Markov selection}} We adapt the proofs of \cite[Theorems 6.2.3 and 12.2.3]{SV}, cf. also the proofs of \cite[Proposition 6.6]{nicole1987compactification} and \cite[Proposition 3.2]{hausmann86}. Fix a finite time horizon \(T > 0\) and a function \(\psi \in \usc_b(\bR; \bR)\). Let \(\{\sigma_n \colon n \in \mathbb{N}\}\) be a dense subset of \((0, \infty)\) and let \(\{\phi_n \colon n \in \mathbb{N}\}\) be a dense subset of \(C_c (\bR)\). Furthermore, let \((\lambda_N, f_N)_{N \in \mathbb{N}}\) be an enumeration of \(\{(\sigma_m, \phi_n) \colon n, m \in \mathbb{N}\}\). For \((t, x) \in \bR_+ \times \bR\), define inductively \[ \cK^*_0 (t, x) := \Big\{ P \in \cK (t, x)\colon E^P \big[ \psi (X_T) \big] = \sup_{Q \in \cK(t, x)} E^Q \big[ \psi (X_T) \big] \Big\} \] and \[ \cK^*_{N + 1} (t, x) := \Big\{ P \in \cK^*_N (t, x) \colon E^P \big[ f_{N + 1} (X_{\lambda_{N + 1}}) \big] = \sup_{Q \in \cK^*_N (t, x)} E^Q \big[ f_{N + 1} (X_{\lambda_{N + 1}}) \big] \Big\}, \quad N \in \mathbb{Z}_+. \] Moreover, we set \[\cK^*_\infty (t, x) := \bigcap_{N = 0}^\infty \cK_N (t, x).\] Thanks to Proposition \ref{prop: K upper hemi and compact} and Lemmata \ref{lem: r^* compact} and \ref{lem: K stable under both}, the correspondence \(\cK\) is measurable with nonempty convex and compact values and it is further stable under conditioning and pasting. Thus, by Lemma \ref{lem: U to U*}, the same is true for \(\cK^*_0\) and, by induction, also for every \(\cK^*_N, N \in \mathbb{N}\). As (arbitrary) intersections of convex and compact sets are itself convex and compact, \(\cK^*_\infty\) has convex and compact values. Further, by Cantor's intersection theorem, \(\cK^*_\infty\) has nonempty values, and, by \cite[Lemma~18.4]{hitchi}, \(\cK^*_\infty\) is measurable. Moreover, it is clear that \(\cK^*_\infty\) is stable under conditioning, as this is the case for every \(\cK^*_N, N \in \mathbb{Z}_+\). We now show that \(\cK^*_\infty\) is singleton-valued. Take \(P, Q \in \cK^*_\infty (t, x)\) for some \((t, x) \in \bR_+ \times \bR\). By definition of \(\cK^*_\infty\), we have \[ E^P \big[ f_N (X_{\lambda_N}) \big] = E^Q \big[ f_N (X_{\lambda_N})\big], \quad N \in \mathbb{N}. \] This implies that \(P \circ X_s^{-1} = Q \circ X_s^{-1}\) for all \(s \in \bR_+\). Next, we prove that \[ E^P \Big[ \prod_{k = 1}^n g_k (X_{t_k}) \Big] = E^Q \Big[ \prod_{k = 1}^n g_k (X_{t_k}) \Big] \] for all \(g_1, \dots, g_n \in C_b (\bR; \bR), t \leq t_1 < t_2 < \dots < t_n < \infty\) and \(n \in \mathbb{N}\). We use induction over \(n\). For \(n = 1\) the claim is implied by the equality \(P \circ X_s^{-1} = Q \circ X_s^{-1}\) for all \(s \in \bR_+\). Suppose that the claim holds for \(n \in \mathbb{N}\) and take test functions \(g_1, \dots, g_{n + 1} \in C_b (\bR; \bR)\) and times \(t \leq t_1 < \dots < t_{n + 1} < \infty\). We define \[ \mathcal{G}_n := \sigma (X_{t_k}, k = 1, \dots, n). \] Since \[ E^P \Big[ \prod_{k = 1}^{n + 1} g_k (X_{t_k}) \Big] = E^P \Big[ E^P \big[ g_{n + 1} (X_{t_{n + 1}}) | \mathcal{G}_n \big] \prod_{k = 1}^n g_k (X_{t_k}) \Big], \] it suffices to show that \(P\)-a.s. \[ E^P \big[ g_{n + 1} (X_{t_{n + 1}}) | \mathcal{G}_n \big] = E^Q \big[ g_{n + 1} (X_{t_{n + 1}}) | \mathcal{G}_n \big]. \] As \(\cK^*_\infty\) is stable under conditioning, there exists a null set \(N_1 \in \cF_{t_n}\) such that \(\delta_{\omega (t_n)} \otimes_{t_n} P (\cdot | \cF_{t_n}) (\omega) \in \cK^*_\infty (t_n, \omega (t_n))\) for all \(\omega \not \in N_1\). Notice that, by the tower rule, there exists a \(P\)-null set \(N_2 \in \mathcal{G}_n\) such that, for all \(\omega \not \in N_2\) and all \(A \in \cF\), \begin{equation} \label{eq: condi} \begin{split} \int \delta_{\omega' (t_n)} \otimes_{t_n} P (A | \cF_{t_n}) (\omega') P (d \omega' | \mathcal{G}_n) (\omega) &= \iint \1_A (\omega' (t_n) \otimes_{t_n} \alpha) P (d \alpha | \cF_{t_n}) (\omega' ) P (d \omega' | \mathcal{G}_n) (\omega) \\&= \iint \1_A (\omega (t_n) \otimes_{t_n} \alpha) P (d \alpha | \cF_{t_n}) (\omega' ) P (d \omega' | \mathcal{G}_n) (\omega) \\&= \int \1_A (\omega (t_n) \otimes_{t_n} \omega') P (d \omega' | \mathcal{G}_n) (\omega) \\&= (\delta_{\omega (t_n)} \otimes_{t_n} P (\cdot | \mathcal{G}_n) (\omega)) (A). \end{split} \end{equation} Let \(N_3 := \{P (N_1 | \mathcal{G}_n) > 0\} \in \mathcal{G}_n\). Clearly, \(E^P [ P (N_1| \mathcal{G}_n)] = P(N_1) = 0\), which implies that \(P (N_3) = 0\). Take \(\omega \not \in N_2 \cup N_3\). As \(\cK^*_\infty\) has convex and compact values and \(\delta_{\omega' (t_n)} \otimes_{t_n} P (\cdot | \cF_{t_n}) (\omega') \in \cK^*_\infty (t_n, \omega' (t_n))\) for all \(\omega' \not \in N_1\), we have \[ \int \delta_{\omega' (t_n)} \otimes_{t_n} P (A | \cF_{t_n}) (\omega') P (d \omega' | \mathcal{G}_n) (\omega) \in \cK^*_\infty (t_n, \omega (t_n)). \] Consequently, by virtue of \eqref{eq: condi}, we conclude that \(\delta_{\omega (t_n)} \otimes_{t_n} P (\cdot | \mathcal{G}_n) (\omega) \in \cK^*_\infty (t_n, \omega (t_n))\). Similarly, there exists a \(Q\)-null set \(N_4 \in \mathcal{G}_n\) such that \(\delta_{\omega (t_n)} \otimes_{t_n} Q (\cdot | \mathcal{G}_n) (\omega) \in \cK^*_\infty (t_n, \omega (t_n))\) for all \(\omega \not \in N_4\). Set \(N := N_2 \cup N_3 \cup N_4\). As \(P = Q\) on \(\mathcal{G}_n\), we get that \(P (N) = 0\). For all \(\omega \not \in N\), the induction base implies that \begin{align*} E^P\big[ g_{n + 1} (X_{t_{n + 1}}) | \mathcal{G}_n\big] (\omega) &= E^{\delta_{\omega (t_n)} \otimes_{t_n} P (\cdot | \mathcal{G}_n)(\omega)} \big[ g_{n + 1} (X_{t_{n + 1}})\big] \\&= E^{\delta_{\omega (t_n)} \otimes_{t_n} Q (\cdot | \mathcal{G}_n)(\omega)} \big[ g_{n + 1} (X_{t_{n + 1}})\big] \\&= E^Q\big[ g_{n + 1} (X_{t_{n + 1}}) | \mathcal{G}_n\big] (\omega). \end{align*} The induction step is complete and hence, \(P = Q\). We proved that \(\cK^*_\infty\) is singleton-valued and we write \(\cK^*_\infty (s, y) = \{P_{(s, y)}\}\). By the measurability of \(\cK^*_\infty\), the map \((s, y) \mapsto P_{(s,y)}\) is measurable. It remains to show the strong Markov property of the family \(\{P_{(s, y)} \colon (s, y) \in \bR_+ \times \bR\}\). Take \((s, y) \in \bR_+ \times \bR\). As \(\cK^*_\infty\) is stable under conditioning, for every finite stopping time \(\tau \geq s\), there exists a \(P_{(s, x)}\)-null set \(N\) such that, for all \(\omega \not \in N\), \[ \delta_{\omega (\tau (\omega))} \otimes_{\tau (\omega)} P_{(s, y)} (\cdot | \cF_{\tau})(\omega) \in \cU_\infty (\tau (\omega), \omega (\tau (\omega))) = \{P_{(\tau (\omega), \omega (\tau (\omega)))}\}.\] This yields, for all \(\omega \not \in N\), that \[ P_{(s, y)} (\cdot | \cF_\tau)(\omega) = \delta_\omega \otimes_{\tau (\omega)} \big[ \delta_{\omega (\tau (\omega))} \otimes_{\tau (\omega)} P_{(s, y)} (\cdot | \cF_{\tau})(\omega)\big] = \delta_\omega \otimes_{\tau (\omega)} P_{(\tau (\omega), \omega (\tau (\omega)))}. \] This is the strong Markov property and consequently, the proof is complete. \qed \begin{remark} Notice that the strong Markov property of the selection \(\{P_{(s, y)} \colon (s, y) \in \bR_+ \times \bR\}\) follows solely from the stability under conditioning property of \(\cK^*_\infty\). We emphasise that the stability under pasting property of each \(\cK^*_N, N \in \mathbb{Z}_+,\) is crucial for its proof. Indeed, in Lemma \ref{lem: U to U*}, the fact that \(\cU\) is stable under pasting has been used to establish that \(\cU^*\) is stable under conditioning. \end{remark} \subsection{Proof of the Strong Feller Selection Principle: Theorem \ref{theo: Feller selection}} We start with the following partial extension of Lemma \ref{lem: maximal inequality}. \begin{lemma}\label{lem: moment bound abvanced} Suppose that Condition \ref{cond: LG} holds. Let \(T, m > 0\) and let \(K \subset \bR\) be bounded. Then, \[ \sup_{s \in [0, T]} \sup_{x \in K} \sup_{P \in \cK (s, x)} E^P \Big[ \sup_{r \in [0, T]} | X_r |^m \Big] < \infty. \] \end{lemma} \begin{proof} For every \((s, x) \in [0, T] \times \bR\), Lemma \ref{lem: k idenity} yields that \[ \sup_{P \in \cK (s, x)} E^P \Big[ \sup_{r \in [0, T]} | X_r |^m \Big] = \sup_{P \in \cK (0, x)} E^{P^s}\Big[ \sup_{r \in [0, T]} | X_r |^m \Big] \leq \sup_{P \in \cK (0, x)} E^P \Big[ \sup_{r \in [0, T]} | X_r |^m \Big]. \] Now, the claim follows from Lemma \ref{lem: maximal inequality}. \end{proof} \begin{theorem} \label{theo: selection is Feller} Suppose that the Conditions \ref{cond: LG}, \ref{cond: continuity} and \ref{cond: ellipticity} hold. Let \(\p := \{P_{ (t, x)} \colon (t, x) \in \bR_+ \times \bR\}\) be a strong Markov family such that \(P_{(t, x)} \in \cK (t, x)\) for all \((t, x) \in \bR_+ \times \bR\). Then, \(\p\) has the strong Feller and the \(C_0\)--Feller property. If in addition the Conditions \ref{cond: bdd} and \ref{cond: uniform ellipticity} hold, then \(\p\) has the uniform strong Feller property. \end{theorem} \begin{proof} First of all, thanks to the fundamental results \cite[Theorems 6.24, 7.14 (iii) and 7.16 (i)]{cinlar80} about Markovian It\^o semimartingales, there are two Borel functions \(\mu \colon \bR_+ \times \bR \to \bR\) and \(\sigma^2 \colon \bR_+ \times \bR \to \bR_+\) such that, for every \((s, x) \in \bR_+ \times \bR\), \(P_{(s, x)}\)-a.s. for \(\llambda\)-a.a. \(t \in \bR_+\) \[ b^P_{t + s} = \mu (t + s, X_t), \quad a^P_{t + s} = \sigma^2 (t + s, X_t). \] By virtue of the Conditions \ref{cond: LG}, \ref{cond: continuity} and \ref{cond: ellipticity}, we can w.l.o.g. assume that \(b\) and \(\sigma^2\) are locally bounded and that \(\sigma^2\) is locally bounded away from zero. For \(M > 0\), we set \begin{align*} \mu_M (t, x) &:= \begin{cases} \mu (t, x), & (t, x) \in [0, M] \times [-M, M], \\ 0, & \text{otherwise}, \end{cases} \\ \sigma^2_M (t, x) &:= \begin{cases} \sigma^2 (t, x), & (t, x) \in [0, M] \times [-M, M], \\ \sigma^2 (t \wedge M, x_0), & \text{otherwise}, \end{cases} \end{align*} where \(x_0 \in \bR\) is an arbitrary, but fixed, reference point. Furthermore, we define \[ \rho_M^s := \inf \{t \geq s \colon |X_t| \geq M\} \wedge M, \quad s \in \bR_+, M > 0. \] Recall from \cite{SV} that a probability measure \(P\) on \((\Omega, \cF)\) is said to be a \emph{solution to the martingale problem for \((\mu_M, \sigma^2_M)\) starting from \((s, x) \in \bR_+ \times \bR\)} if \(P( X_t = x \text{ for all } t \in [0, s]) = 1\) and the processes \[ f (X_t) - \int_s^t \big[ \mu (r, X_r) f' (X_r) + \tfrac{1}{2} \sigma^2 (r, X_r) f'' (X_r) \big] dr \colon \ t \geq s, \ \ f \in C^\infty_c (\bR; \bR), \] are \(P\)-martingales. Due to \cite[Exercise 7.3.3]{SV} (see also \cite[Theorem 7.1.6]{SV}), for every starting value \((s, x) \in \bR_+ \times \bR\), there exists a unique solution \(P^M_{(s, x)}\) to the martingale problem for \((\mu_M, \sigma^2_M)\) starting from \((s, x)\). Furthermore, by \cite[Corollary 10.1.2]{SV}, we have \(P_{(s, x)} = P^M_{(s, x)}\) on \(\cF_{\rho^s_M}\) for all \(M > 0\) and \((s, x) \in \bR_+ \times \bR\). Next, take \(T, \varepsilon > 0\) and fix a bounded Borel function \(\phi \colon \bR\to \bR\). W.l.o.g., we assume that \(| \phi | \leq 1\). Take \((t^n, x^n)_{n \in \mathbb{Z}_+} \in [0, T) \times \bR\) such that \((t^n, x^n) \to (t^0, x^0)\). For \(M > T\), by Lemma \ref{lem: moment bound abvanced}, there exists a constant \(C > 0\), which is independent of \(M\), such that, for all \(n \in \mathbb{Z}_+\), \[ P_{(t^n, x^n)} ( \rho^{t^n}_M \leq T) = P_{(t^n, x^n)} \Big( \sup_{s \in [t^n, T]} |X_s| \geq M \Big) \leq \frac{C}{M}. \] We take \(M > T\) large enough such that \[ \sup_{n \in \mathbb{Z}_+} P_{(t^n, x^n)} ( \rho^{t^n}_M \leq T) \leq \varepsilon. \] Thanks to \cite[Exercise 7.3.5]{SV} (see also \cite[Theorem 7.1.9, Exercise 7.3.3]{SV}), there exists an \(N \in \mathbb{N}\), which in particular depends on \(M\), such that, for all \(n \geq N\), \[ \big| E^{P^M_{(t^n, x^n)}} \big[ \phi (X_T) \big] - E^{P^M_{(t^0, x^0)}}\big[ \phi (X_T) \big] \big| \leq \varepsilon. \] Now, for all \(n \geq N\), we obtain \begin{align*} \big| E^{P_{(t^n, x^n)}} \big[ \phi (X_T) \big] &- E^{P_{(t^0, x^0)}}\big[ \phi (X_T) \big] \big| \\&\leq \big| E^{P^M_{(t^n, x^n)}} \big[ \phi (X_T) \1_{\{T < \rho^{t_n}_M\}} \big] - E^{P^M_{(t^0, x^0)}}\big[ \phi (X_T) \1_{\{T < \rho^{t^0}_M\}} \big] \big| \\&\hspace{4.925cm} + P_{(t^n, x^n)} ( \rho^{t^n}_M \leq T) + P_{ (t^0, x^0)} (\rho^{t^0}_M \leq T) \\&\leq \big| E^{P^M_{(t^n, x^n)}} \big[ \phi (X_T) \1_{\{T < \rho^{t_n}_M\}} \big] - E^{P^M_{(t^0, x^0)}}\big[ \phi (X_T) \1_{\{T < \rho^{t^0}_M\}} \big] \big| +2 \varepsilon \\&\leq \big| E^{P^M_{(t^n, x^n)}} \big[ \phi (X_T) \big] - E^{P^M_{(t^0, x^0)}}\big[ \phi (X_T) \big] \big| \\&\hspace{4.925cm} + P^M_{(t^n, x^n)} ( \rho^{t^n}_M \leq T) + P^M_{ (t^0, x^0)} (\rho^{t^0}_M \leq T) +2 \varepsilon \\&\leq 3 \varepsilon + P_{(t^n, x^n)} ( \rho^{t^n}_M \leq T) + P_{ (t^0, x^0)} (\rho^{t^0}_M \leq T) \leq 5 \varepsilon, \end{align*} where we use that \(\mathcal{F}_T \cap \{T < \rho^s_M\} \subset \cF_{\rho^s_M}, \{\rho^s_M \leq T\} \in \cF_{\rho^s_M}\) and that \(P_{(s, x)} = P^M_{(s, x)}\) on \(\cF_{\rho^s_M}\). This proves the strong Feller property of \(\p=\{P_{(s, x)} \colon (s, x) \in \bR_+ \times \bR\}\). Take \(0 \leq s < T\) and let \(\phi \colon \bR \to \bR\) be a continuous function vanishing at infinity such that \(|\phi| \leq 1\). By the strong Feller property, the map \(x \mapsto E^{P_{(s, x)}} [ \phi (X_T) ]\) is continuous. We now adapt the proof of \cite[Theorem~1]{criens20SPA} to conclude the \(C_0\)--Feller property. Fix \(\varepsilon > 0\). As \(\phi\) vanishes at infinity, there exists an \(M = M (\varepsilon) > 0\) such that \(|f (y)| \leq \varepsilon\) for all \(|y| > M\). We obtain \begin{align} \label{eq: ineq to show C0} E^{P_{(s, x)}} \big[ \phi (X_T) \big] &\leq \varepsilon + P_{(s, x)} ( |X_T| \leq M ). \end{align} In the following we establish an estimate for the second term. Set \(V (y) := 1 / (1 + y^2)\) for \(y \in \bR\) and let \((b^{P_{(s, x)}}_{\cdot + s}, a^{P_{(s, x)}}_{\cdot + s})\) be the Lebesgue densities of the \(P_{(s, x)}\)-characteristics of the shifted coordinate process \(X_{\cdot + s}\). Then, by Condition \ref{cond: LG}, we get \((\llambda \otimes P_{(s, y)})\)-a.e. \begin{align*} b^{P_{(s,x)}}_{\cdot + s} V' (X_{\cdot + s}) + \frac{a^{P_{(s, x)}}_{\cdot + s} V'' (X_{\cdot + s})}{2} &= \frac{- 2 b^{P_{(s, x)}}_{\cdot + s} X_{\cdot + s}}{(1 + X_{\cdot + s}^2)^2} + \frac{a^{P_{(s, x)}}_{\cdot + s}}{2} \Big( \frac{8 X_{\cdot + s}^2}{(1 + X_{\cdot + s}^2)^3} - \frac{2}{(1 + X_{\cdot + s}^2)^2}\Big) \\&\leq \C \Big( \frac{|b^{P_{(s, x)}}_{\cdot + s}| |X_{\cdot + s}|}{(1 + X_{\cdot + s}^2)^2} + \frac{a^{P_{(s, x)}}_{\cdot + s}}{(1 + X_{\cdot + s}^2)^2}\Big) \\&\leq \C \Big( \frac{ |X_{\cdot + s}| + X_{\cdot + s}^2}{(1 + X_{\cdot + s}^2)^2} + \frac{1}{1 + X_{\cdot + s}^2} \Big) \\&\leq \frac{\C}{1 + X_{\cdot + s}^2} = \C V(X), \end{align*} where the constant \(\C > 0\) depends only on the linear growth constant from Condition \ref{cond: LG}. In the above computation, \(\C\) might have changed from line to line. In the following, let \(\C > 0\) be the constant from the last inequality. By It\^o's formula, the process \[ e^{- \C \cdot} V (X_{\cdot + s}) - \int_0^\cdot e^{- \C r} \Big( - \C V (X_{r + s}) + b^{P_{(s, x)}}_{r + s} V' (X_{r + s}) + \frac{a^{P_{(s, x)}}_{r + s} V'' (X_{r + s})}{2} \Big) dr \] is a local \(P_{(s, x)}\)-martingale. Thus, as \[ \int_0^\cdot e^{- \C r} \Big( - \C V (X_{r + s}) + b^{P_{(s, x)}}_{r + s} V' (X_{r + s}) + \frac{a^{P_{(s, x)}}_{r + s} V'' (X_{r + s})}{2} \Big) dr \] is a decreasing process, \(e^{- \C \hspace{0.025cm} \cdot} V (X_{\cdot + s})\) is a local \(P_{(s, x)}\)-supermartingale and hence, as it is bounded, a true \(P_{(s, x)}\)-supermartingale. By Chebyshev's inequality and the supermartingale property, we obtain \begin{align*} P_{(s, x)} ( |X_T| \leq M ) &= P_{(s, x)} ( V (X_T) \geq V (M) ) \\&\leq \frac{ E^{P_{(s, x)}} \big[ V (X_T) \big] }{ V (M) } \\&\leq \frac{e^{\C T} V (x)}{V (M)} \longrightarrow 0 \text{ as } |x| \to \infty. \end{align*} Using this observation and \eqref{eq: ineq to show C0}, we obtain the existence of a compact set \(K = K(\varepsilon) \subset \bR\) such that \[ E^{P_{(s, x )}}\big[ \phi (X_T) \big] \leq 2 \varepsilon \] for all \(x \not \in K\). We conclude that \(x \mapsto E^{P_{(s, x)}} [ \phi (X_T) ]\) vanishes at infinity and therefore, that \(\p=\{P_{(s, x)} \colon (s, x) \in \bR_+ \times \bR\}\) has the \(C_0\)--Feller property. Finally, we presume that the Conditions~\ref{cond: bdd} and \ref{cond: uniform ellipticity} hold aditionally. Then, \(\mu\) and \(\sigma^2\) are both bounded and \(\sigma^2\) is uniformly bounded away from zero. Thanks to these observations, it follows from \cite[Exercise~7.3.5]{SV} (see also \cite[Theorem~7.1.9, Exercises~7.3.3]{SV}), that \(\p\) is a uniform strong Feller family. The proof is complete. \end{proof} \begin{proof}[Proof of Theorem \ref{theo: Feller selection}] The theorem follows directly from the Theorems \ref{theo: strong Markov selection} and \ref{theo: selection is Feller}. \end{proof} \section{Uniform Feller Selection Principles: Proof of Theorems \ref{theo: UFSP} and \ref{theo: UFSP 2}} \label{sec: uniform selection} \subsection{Proof of Theorem \ref{theo: UFSP}} The following lemma can be seen as a version of \cite[Theorem~3]{hajek85} where a global Lipschitz assumption is replaced by a local H\"older and ellipticity assumption. The idea of proof is the same, i.e., we use a time change argument and the optional sampling theorem. \begin{lemma} \label{lem: convex order} Suppose that \(P\in \mathfrak{P}(\Omega)\) is such that \(X\) is a continuous local \(P\)-martingale starting at \(x_0\) with quadratic variation process \(\int_0^\cdot a_s^P ds\) such that \((\llambda \otimes P)\)-a.e. \(a^P > 0\). Let \(a \colon \mathbb{R} \to (0, \infty)\) be such that \(\sqrt{a}\) is locally H\"older continuous with exponent \(1/2\) and \(a (x) \leq \C(1 + |x|^2)\) for all \(x \in \mathbb{R}\). Suppose that \(P\)-a.s. \(a^P_t \leq a(X_t)\) for \(\llambda\)-a.a. \(t \in \mathbb{R}_+\). Then, the SDE \begin{align}\label{eq: SDE comparison} d Y_t = \sqrt{a} (Y_t) d W_t, \quad Y_0 = x_0, \end{align} satisfies strong existence and pathwise uniqueness and we denote its unique law by \(Q\).\footnote{Uniqueness in law follows from pathwise uniqueness by the Yamada--Watanabe theorem (\cite[Proposition 5.3.20]{KaraShre}).} Moreover, for every convex function \(\psi \colon \mathbb{R} \to \mathbb{R}\) of polynomial growth, i.e., such that \[ | \psi (x) | \leq \C(1 + |x|^m) \text{ for some } m \in \mathbb{N}, \] and time \(T \in \bR_+\), we have \[ E^P \big[ \psi (X_T) \big] \leq E^Q \big[ \psi (X_T)\big]. \] \end{lemma} \begin{proof} The fact that the SDE \eqref{eq: SDE comparison} satisfies strong existence and pathwise uniqueness is classical (see, e.g., \cite[Corollary 5.5.10, Remark 5.5.11]{KaraShre}). We now prove the second claim. Clearly, it suffices to consider \(T > 0\). Define \[ L_t := \begin{cases} \int_0^t \frac{a^P_s ds}{a(X_s)},& t \leq 2T, \\ L_{2T} + (t - 2T),& t \geq 2T.\end{cases} \] By our assumptions, \(P\)-a.s. \(L\) is continuous, strictly increasing, finite and \(L_t \leq t\) for all \(t \in \mathbb{R}_+\). Furthermore, \(P\)-a.s. \(L_t \to \infty\) as \(t \to \infty\). Denote the right inverse of \(L\) by \(S\), i.e., define \[S_t := \inf \{s \geq 0 \colon L_s > t\}\] for \(t \in \mathbb{R}_+\). By the above properties of \(L\), it is well-known (\cite[p. 180]{RY}) that \(P\)-a.s. \(S\) is also continuous, strictly increasing and finite, and furthermore, \(S_t \geq t\) for all \(t \in \mathbb{R}_+\). Using standard rules for Stieltjes integrals (see, e.g., \cite[Proposition V.1.4]{RY}), we obtain that \(P\)-a.s. for all \(t \in [0, L_T]\) \[ S_t = \int_0^{S_t} \frac{a (X_s)}{a^P_s} d L_s = \int_0^t \frac{a (X_{S_s})}{a^P_{S_s}} d L_{S_s} = \int_0^t \frac{a (X_{S_s})}{a^P_{S_s}} ds. \] In other words, \(P\)-a.s. \[ \1_{[0, L_T]} (t) d S_t = \1_{[0, L_T]}(t) \frac{a (X_{S_t})}{a^P_{S_t}} dt. \] By \cite[Proposition V.1.5]{RY}, the time changed process \(X_S\) is a continuous local \(P\)-martingale (for a time changed filtration) such that, \(P\)-a.s. for all \(t \in [0, L_T]\), we have \[ \langle X_S, X_S \rangle_{t} = \langle X, X\rangle_{S_{t}} = \int^{S_{t}}_0 a^P_s ds = \int_0^t a^P_{S_s} d S_s = \int_0^t a (X_{S_s}) ds. \] Thus, it is classical (\cite[Proposition 5.4.6]{KaraShre}) that, possibly on a standard extension of the underlying probability space, there exists a one-dimensional Brownian motion \(W\) such that \[ X_{S_{\cdot \wedge L_T}} = x_0 + \int_0^{\cdot \wedge L_T} \sqrt{a}(X_{S_s}) dW_s. \] With little abuse of notation, we denote the underlying probability measure still by \(P\). Thanks to the strong existence property of the SDE \eqref{eq: SDE comparison}, there exists a continuous adapted process \(Y\) such that \[ Y = x_0 + \int_0^\cdot \sqrt{a} (Y_s) d W_s. \] As the SDE \eqref{eq: SDE comparison} satisfies pathwise uniqueness, it follows from \cite[Lemma 3]{criens20SPA} that \(P\)-a.s. \(X_{S_{ \cdot \wedge L_T}} = Y_{\cdot \wedge L_T}\). Notice that, by the linear growth assumption on \(\sqrt{a}\), the process \(Y\) is a \(P\)-martingale. Indeed, this follows readily from a second moment bound (see, e.g., \cite[Problem 5.3.15]{KaraShre}), which implies integrability of the quadratic variation process. We are in the position to complete the proof. Let \(\psi\) be a convex function of polynomial growth. Using again the linear growth assumption, we have polynomial moment bounds (see, e.g., \cite[Problem 5.3.15]{KaraShre}) which imply that \(\psi (Y_t) \in L^1(P)\) for all \(t \in \mathbb{R}_+\). Consequently, \(\psi (Y)\) is a \(P\)-submartingale. As \(P\)-a.s. \(L_T \leq T\), using the optional sampling theorem, we finally obtain that \[ E^P \big[ \psi (X_T) \big] = E^P \big[ \psi (X_{S_{L_T}})\big] = E^P \big[ \psi (Y_{L_T}) \big] \leq E^P \big[ \psi (Y_T)\big] = E^Q \big[ \psi (X_T)\big]. \] The proof is complete. \end{proof} \begin{proof}[Proof of Theorem \ref{theo: UFSP}] We set \[ a^* (x) := \sup \big\{ a (f, x) \colon f \in F \big\} \] for \(x \in \mathbb{R}\). Notice that \(a^*\) is locally H\"older continuous with exponent \(1/2\) thanks to Condition \ref{cond: local holder}. Furthermore, as \(a > 0\) by Condition \ref{cond: ellipticity}, compactness of \(F\) and continuity of \(a\) in the control variable (Condition \ref{cond: continuity in control}) show that \(a^* > 0\). Finally, \(\sqrt{a^*}\) is of linear growth by Condition \ref{cond: LG}. For every \(x\in \mathbb{R}\), let \(P^*_x\) be the unique law of a solution process to the SDE \[ d Y_t = \sqrt{a^*} (Y_t) d W_t, \quad Y_0 = x, \] where \(W\) is a one-dimensional Brownian motion. The existence of \(P^*_x\) is classical (or follows from Lemma~\ref{lem: convex order}). Moreover, \cite[Corollary 10.1.4, Theorem 10.2.2]{SV} yield that \(\{P^*_x \colon x \in \mathbb{R}\}\) is a strong Feller family. It is left to prove the formula \eqref{eq: USFSP}. Take \(\psi \in \mathbb{G}_{cx}\) and \(T \in \bR_+\). Now, Lemma~\ref{lem: convex order} yields that \[ \mathcal{E}^x (\psi (X_T)) \leq E^{P^*_x} \big[ \psi (X_T) \big]. \] As \(P^*_x \in \cC(0, x)\), we also have \[ E^{P^*_x} \big[ \psi (X_T) \big] \leq \mathcal{E}^x (\psi (X_T)). \] Putting these pieces together yields the formula \eqref{eq: USFSP} and hence, the proof is complete. \end{proof} \subsection{Proof of Theorem \ref{theo: UFSP 2}} We set \[ b^* (x) := \sup \big\{ b (f, x) \colon f \in F \big\} \] for \(x \in \mathbb{R}\). As \(F\) is compact and \(b\) is continuous, \(b^*\) is continuous by Berge's maximum theorem (\cite[Theorem 17.31]{hitchi}). Moreover, \(b^*\) and \(a^*\) are of linear growth by Condition \ref{cond: LG}. Consequently, taking Condition \ref{cond: holder} into consideration, \cite[Theorem 4.53]{engelbert1991strong} and \cite[Theorem 10.2.2]{SV} yield that the SDE \begin{align} \label{eq: SDE comparison 2} d Y_t = b^* (Y_t) dt + \sqrt{a^*} (Y_t) d W_t \end{align} satisfies weak existence and pathwise uniqueness. Let \(P^*_x\) be the unique law of a solution process starting at \(x \in \mathbb{R}\). Then, by \cite[Corollary 10.1.4, Theorem 10.2.2]{SV}, the family \(\{P^*_x \colon x \in \mathbb{R}\}\) is strongly Feller. Let \(\psi \colon \Omega \to \mathbb{R}\) be a bounded increasing Borel function. By construction, we have \[ E^{P^*_x} \big[ \psi \big] \leq \mathcal{E}^x (\psi), \quad x \in \mathbb{R}. \] Take \(x \in \mathbb{R}\) and \(P \in \cR( x )\). Thanks to \cite[Theorem VI.1.1]{ikeda2014stochastic}, possibly on a standard extension of \((\Omega, \mathcal{F}, \F, P)\), there exists a solution process \(Y\) to the SDE \eqref{eq: SDE comparison 2} with \(Y_0 = x\) such that a.s. \(X_t \leq Y_t\) for all \(t \in \mathbb{R}_+\). Clearly, this yields that \[ E^P \big[ \psi \big] \leq E^{P^*_x} \big[ \psi \big], \] and taking the \(\sup\) over all \(P \in \cR(x)\) finally gives \[ \mathcal{E}^x (\psi) \leq E^{P^*_x} \big[ \psi \big]. \] The proof is complete. \qed \section{A nonlinear Kolmogorov Equation: Proof of Theorems \ref{thm: new viscosity no unique} and \ref{theo: viscosity with ell}} \label{sec: viscosity property} \subsection{Proof of Theorem \ref{thm: new viscosity no unique}} It follows from \cite[Theorem 4.3]{CN22} that \( v \) is a weak sense viscosity solution to \eqref{eq: PDE}. Hence, it suffices to show continuity of \([0, T] \times \mathbb{R} \ni (t,x) \mapsto \cE^x(\psi(X_{T-t})) \). Due to Theorem~\ref{thm: new very main}, the map \(x \mapsto \cE^x (\psi (X_{T- t}))\) is continuous for every \(t \in [0, T]\). We now show that, for any compact set~\(K \subset \mathbb{R}\), \[ \sup_{x \in K} \big| \cE^x (\psi (X_{T - t})) - \cE^x (\psi (X_{T - s}))\big| \to 0 \text{ as } s \to t. \] Clearly, by the triangle inequality, this then implies continuity of \([0, T] \times \mathbb{R} \ni (t,x) \mapsto \cE^x(\psi(X_{T-t})) \). Take a compact set \(K \subset \mathbb{R}\), \(r, \varepsilon > 0\) and \(s, t \in [0, T]\). In the following \(\C > 0\) denotes a generic constant which is independent of \(r, \varepsilon, s, t\). By Lemma \ref{lem: maximal inequality}, we get \begin{align*} \sup_{x \in K} \sup_{P \in \cR(x)} P ( |X_{T - t}| > r \text{ or } |X_{T - s}| > r) \leq \tfrac{\C}{r}. \end{align*} Using Lemma \ref{lem: maximal inequality} again, we further obtain that \begin{align*} \sup_{x \in K} \big| \cE^x & (\psi (X_{T - t})) - \cE^x (\psi (X_{T - s}))\big| \\ & \leq \sup_{x \in K} \sup_{P \in \cR(x)} E^P \big[ | \psi (X_{T - t}) - \psi (X_{T - s})| \big] \\ &\leq \sup_{x \in K} \sup_{P \in \cR(x)} E^P \big[ | \psi (X_{T - t}) - \psi (X_{T - s})| \1_{\{ |X_{T - t} - X_{T - s}| \leq \varepsilon\} \hspace{0.05cm}\cap \hspace{0.05cm} \{ |X_{T - t}| > r \text{ or } |X_{T - s}| > r \}}\big] \\&\hspace{2cm} + \sup_{x \in K} \sup_{P \in \cR(x)} E^P \big[ | \psi (X_{T - t}) - \psi (X_{T - s})| \1_{\{ |X_{T - t} - X_{T - s}| \leq \varepsilon,\ |X_{T - t}| \leq r,\ |X_{T - s}| \leq r \}}\big] \\&\hspace{2cm} +\sup_{x \in K} \sup_{P \in \cR(x)} E^P \big[ | \psi (X_{T - t}) - \psi (X_{T - s})| \1_{\{ |X_{T - t} - X_{T - s}| > \varepsilon \}}\big] \\&\leq \frac{2 \C \|\psi\|_\infty}{r} + \sup \big\{ |\psi (z) - \psi(y)| \colon |z-y| \leq \varepsilon, |z| \leq r, |y| \leq r \big\} \\&\hspace{2cm}+ 2 \|\psi\|_\infty \sup_{x \in K} \sup_{P \in \cR(x)} P(|X_{T - t} - X_{T - s}| > \varepsilon) \\&\leq \frac{2 \C \|\psi\|_\infty}{r} + \sup \big\{ |\psi (z) - \psi(y)| \colon |z-y| \leq \varepsilon, |z| \leq r, |y| \leq r \big\} + \frac{ \C \|\psi\|_\infty}{\varepsilon} |t - s|^{1/2}. \end{align*} Notice that the middle term converges to zero as \(\varepsilon \to 0\), since continuous functions are uniformly continuous on compact sets. Thus, choosing first \(r\) large enough and then \(\varepsilon\) small enough, we can make \[ \sup_{x \in K} \big| \cE^x (\psi (X_{T - t})) - \cE^x (\psi (X_{T - s}))\big| \] arbitrarily small when \(s \to t\). This yields the claim. \qed \subsection{Proof of Lemma~\ref{lem: en upper lower}} \label{sec: pf en upper lower} We only detail the proof for the subsolution property of \(v^*\). The supersolution property of \(v_*\) will follow in the same spirit. Let \(\phi \in C^{2, 3}_b([0, T] \times \bR; \bR)\) such that \(\phi \geq v^*\) and \(\phi (t, x) = v^*(t, x)\) for some \((t, x) \in [0, T) \times \bR \). Notice that we use a test function of higher regularity than in our definition of ``viscosity subsolution''. This is without loss of generality, cf. \cite[Lemma~2.4, Remark~2.5]{hol16}. There exists a sequence \((t^n, x^n)_{n = 1}^\infty \subset [0, T) \times \bR\) such that \((t^n, x^n) \to (t, x)\) and \[ v^* (t, x) = \lim_{n \to \infty} v (t^n, x^n). \] We take an arbitrary \(u \in (0, T - t)\). By the dynamic programming principle (\cite[Theorem 3.1]{CN22}), for every \(n \in \mathbb{N}\), we obtain that \begin{equation} \label{eq: main ev upper} \begin{split} 0 &= \sup_{P \in \cR (x^n)} E^P \big[ v (t^n + u, X_u) \big] - v (t^n, x^n) \\&\leq \sup_{P \in \cR (x^n)} E^P \big[ v^* (t^n + u, X_u) \big] - v^* (t, x) + v^* (t, x) - v (t^n, x^n) \\&= \sup_{P \in \cR (x^n)} E^P \big[ v^* (t^n + u, X_u) \big] - \phi (t, x) + v^* (t, x) - v (t^n, x^n) \\&\leq \sup_{P \in \cR (x^n)} E^P \big[ \phi (t^n + u, X_u) \big] - \phi (t^n, x^n) + \phi (t^n, x^n) - \phi (t, x) + v^* (t, x) - v (t^n, x^n). \end{split} \end{equation} Now, we use an argument as in the proof of \cite[Proposition~5.4]{neufeld2017nonlinear}. Take \(P \in \cR(x^n)\). It\^o's formula yields that \(P\)-a.s. \begin{align*} \phi (t^n + u, X_u) - \phi (t^n, x^n) = \int_0^u \big[ \partial_t \phi (t^n + s, X_s) + b^P_s \partial_x \phi (t^n + s, X_s) + \tfrac{1}{2} c^P_s & \partial^2_x \phi (t^n + s, X_s) \big] ds \\&+ \text{local \(P\)-martingale}. \end{align*} Thanks to the linear growth Condition~\ref{cond: LG} and Lemma~\ref{lem: maximal inequality}, it follows that the local \(P\)-martingale part is actually a true \(P\)-martingale. Hence, we obtain that \begin{align*} E^P \big[ \phi (t^n &+ u, X_u) - \phi (t^n, x^n) \big] \\&= \int_0^u E^P \big[ \partial_t \phi (t^n + s, X_s) + b^P_s \partial_x \phi (t^n + s, X_s) + \tfrac{1}{2} c^P_s \partial^2_x \phi (t^n + s, X_s) \big] ds \\&=: I_u. \end{align*} As \(\phi \in C^{2, 3}_b ([0, T] \times \bR; \bR)\), the derivatives \(\partial_t \phi, \partial_x \phi\) and \(\partial^2_x \phi\) are (globally) Lipschitz continuous. Hence, we obtain that \begin{align*} \int_0^u \partial_t \phi (t^n + s , X_s) ds &\leq u \partial_t \phi (t^n, x^n) + \int_0^u | \partial_t \phi (t^n + s , X_s) - \partial_t \phi (t^n, x^n) | ds \\&\leq u \partial_t \phi (t^n, x^n) + \C \int_0^u \big( s + |X_s - x^n| \big) ds \\&= u \partial_t \phi (t^n, x^n) + \C u^2 + \C \int_0^u|X_s - x^n| ds. \end{align*} By Lemma~\ref{lem: maximal inequality}, this implies that \begin{align*} E^P \Big[ \int_0^u \partial_t \phi (t^n + s , X_s) ds \Big] &\leq u \partial_t \phi (t^n, x^n) + \C u^2 + \C \int_0^u E^P \big[ |X_s - x^n| \big] ds \\&\leq u \partial_t \phi (t^n, x^n) + \C u^2 + \C \int_0^u s^{1/2} ds \\&\leq u \partial_t \phi (t^n, x^n) + \C u^{3/2}. \end{align*} Similarly, using also the linear growth Condition~\ref{cond: LG}, we obtain that \begin{align*} E^P \Big[ \int_0^u \big| b^P_s \big| \, \big| \partial_x &\phi (t^n + s, X_s) - \partial_x \phi (t^n, x^n) \big| ds \Big] \\&\leq E^P \Big[ \int_0^u \C \big( s + |X_s - x^n| \big) \big(1 + |X_s| \big) ds \Big] \\&\leq E^P \Big[ \int_0^u \C \big( s + |X_s - x^n| \big) \big(1 + |X_s - x^n| \big) ds \Big] \\&\leq \C u^{3/2}, \end{align*} and that \[ E^P \Big[ \int_0^u \big| c^P_s \big| \, \big| \partial_x^2 \phi (t^n + s, X_s) - \partial_x^2 \phi (t^n, x^n) \big| ds \Big] \leq \C u^{3/2}. \] In summary, we conclude that \begin{align*} I_u \leq \C u^{3/2} + u \partial_t \phi (t^n, x^n) + \int_0^u E^P \big[ G^n (X_s) \big] ds, \end{align*} where \[ G^n (x) := \sup \Big\{ b (f, x) \partial_x \phi (t^n, x^n) + \tfrac{1}{2} a (f, x) \partial^2_x \phi (t^n, x^n) \colon f \in F \Big\}. \] Using Condition \ref{cond: Lipschitz continuity}, we obtain that \begin{align*} G^n (x) \leq G (t^n, x^n, \phi) + C |x - x^n|. \end{align*} Hence, using again Lemma \ref{lem: maximal inequality}, it follows that \begin{align*} I_u \leq \C u^{3/2} + u \partial_t \phi (t^n, x^n) + u G (t^n, x^n, \phi) + \C u^{3/2}. \end{align*} Thanks to Condition~\ref{cond: continuity} and Berge's maximum theorem, the map \((s, y) \mapsto G (s, y, \phi)\) is continuous. Recalling \eqref{eq: main ev upper} and taking the limit \(n \to \infty\), we obtain that \begin{align*} 0 &\leq \C u^{3/2} + u \partial_t \phi (t^n, x^n) + u G (t^n, x^n, \phi) + \phi (t^n, x^n) - \phi (t, x) + v^* (t, x) - v (t^n, x^n) \\&\to \C u^{3/2} + u \partial_t \phi (t, x) + u G (t, x, \phi). \end{align*} Dividing the last term by \(u\) and then letting \(u \searrow 0\), we finally conclude that \begin{align*} 0 \leq \partial_t \phi (t, x) + G (t, x, \phi). \end{align*} We proved that \(v^*\) is a viscosity subsolution to \eqref{eq: PDE}. \qed \section{Relation to the Nisio Semigroup} \label{sec: pf nisio} \subsection{Proof of Lemma~\ref{lem: Sf semigroup}} We first establish an auxiliary result. Recall that \(Y^{f, x}\) are continuous processes with dynamics \[ d Y_t^{f, x} = b (f, Y^{f, x}_t) dt + \sqrt{a} (f, Y^{f, x}_t) d W_t, \quad Y^{f, x}_0 = x, \] which are defined on the same probability space w.r.t. the same Brownian motion \(W\). \begin{lemma} \label{lem: A1} Assume that Condition~\ref{cond: fix f cond} holds and let \(\beta\) be the uniform (in the \(F\)-variable) Lipschitz constant of the drift coefficient. Then, for every \(f \in F, t \in \bR_+\) and \(x, y \in \bR\), \[ E^P \big[ | Y^{f, x}_t - Y^{f, y}_t | \big] \leq |x - y| \, e^{\beta t}. \] \end{lemma} \begin{proof} Take \(f \in F, x, y \in \bR\) and set \[ Z := Y^{f, x} - Y^{f, y} = x - y + \int_0^\cdot \big( b (f, Y^{f, x}_t) - b (f, Y^{f, y}_t) \big) dt + \int_0^\cdot \big( \sqrt{a} (f, Y^{f, x}_t) - \sqrt{a} (f, Y^{f, y}_t) \big) dW_t. \] Fix \(M > 0\), set \[ T_M := \inf \big \{t \geq 0 \colon |Y^{f, x}_t| \vee |Y^{f, y}_t| \geq M \big \}. \] For every \(t > 0\), we have \(P\)-a.s. \begin{align*} \int_0^{t \wedge T_M} \frac{\1_{\{ 0 < Z_s \leq \varepsilon\}} d [ Z, Z ]_s}{|Z_s|} &\leq \int_0^{t \wedge T_M} \frac{( \sqrt{a} (f, Y^{f, x}_s) - \sqrt{a} (f, Y^{f, y}_s) )^2 ds }{|Z_s|} \leq \C t < \infty. \end{align*} Hence, \cite[Lemma~IX.3.3]{RY} yields that \(P\)-a.s. \(L^0_{\cdot \wedge T_M} (Z) = 0\), where \(L^0(Z)\) denotes the semimartingale local time of \(Z\) in zero. As \(P\)-a.s. \(T_M \nearrow \infty\) with \(M \to \infty\), this implies that \(P\)-a.s. \(L^0 (Z) = 0\). Now, we get from Tanaka's formula (\cite[Theorem~VI.1.2]{RY}) that \(P\)-a.s. \[ |Z| = |x - y| + \int_0^\cdot \on{sgn} (Z_s) d Z_s. \] Using that the local martingale part of the stochastic integral is a true martingale (which follows from the linear growth part of Condition~\ref{cond: fix f cond}), we obtain, for every \(t \in \bR_+\), that \begin{align*} E^P \big[ |Z_t| \big] &\leq |x - y| + \int_0^t E^P \big[ |b (f, Y^{f, x}_s) - b (f, Y^{f, y}_s) | \big] ds \\&\leq |x - y| + \int_0^t \beta E^P \big[ |Z_s| \big] ds. \end{align*} Gronwall's lemma yields that \(E^P [ |Z_t|] \leq |x - y| e^{\beta t}\). This is the claim. \end{proof} \begin{proof}[Proof of Lemma~\ref{lem: Sf semigroup}] As we already know that \((S^f_t)_{t \in \bR_+}\) has the semigroup property, it suffices to prove that \(S^f_t\) maps \(\uc_b (\bR; \bR)\) into \(\uc_b (\bR; \bR)\). Take \(u \in \uc_b (\bR; \bR)\) and \(\varepsilon > 0\). By the definition of uniform continuity, there exists a constant \(\delta = \delta (\varepsilon) > 0\) such that \[ x, y \in \bR,\, |x - y| < \delta \ \Longrightarrow \ | u (x) - u (y) | < \varepsilon / 2. \] For all \(x, y \in \bR\) and \(t > 0\), Lemma~\ref{lem: A1} yields that \begin{align*} \big| S^{f}_t (u) (x) - S^f_t (u) (y) \big| &\leq E \big[ \big| u (Y^{f, x}_t) - u (Y^{f, y}_t) \big| \big] \\&\leq \varepsilon / 2 + 2 \|u\|_\infty P (| Y^{f, x}_t - Y^{f, y}_t | \geq \delta) \\&\leq \varepsilon / 2 + |x - y| \, 2 \|u\|_\infty e^{\beta t}/ \delta. \end{align*} Therefore, there exists a \(\delta' = \delta' (\varepsilon) > 0\) such that \[ x, y \in \bR,\, |x - y| < \delta' \ \Longrightarrow \ \big| S^{f}_t (u) (x) - S^f_t (u) (y) \big| < \varepsilon. \] This means that \(S^f_t (u) \in \uc_b (\bR; \bR)\) and hence, completes the proof. \end{proof} \subsection{Proof of Proposition~\ref{prop: first NR}} (i). We have to check the assumptions (A1) and (A2) from \cite{NR}. Then, the claim follows from \cite[Theorem~2.5]{NR}. In our setting, Condition~\ref{cond: fix f cond} implies these standing assumptions by virtue of the Lemmata~\ref{lem: Sf semigroup} and \ref{lem: A1}. \smallskip (ii). We denote the space of all bounded twice differentiable uniformly continuous functions from \(\bR\) into \(\bR\) with bounded and uniformly continuous derivatives by \(\uc_b^{\,2} (\bR; \bR)\). Take a function \(u \in \uc_b^{\, 2} (\bR; \bR)\) and set \[ L_u := \sup \big\{ \big|b (f, x) u' (x) + \tfrac{1}{2} a ( f, x ) u'' (x)\big| \colon f \in F, x \in \bR \big\}. \] Thanks to Condition~\ref{cond: bdd}, \(L_u < \infty\). Take \(x \in \bR\). By It\^o's formula, we obtain that \[ \big| E^P \big[ u (Y^{f, x}_t) \big] - u (x) \big| \leq t L_u \] for all \(t \in \bR_+\). Hence, as \(\uc^{\, 2}_b (\bR; \bR)\) is dense in \(\uc_b(\bR; \bR)\) for the uniform topology, it follows from \cite[Proposition~3.5]{NR} that \((\mathscr{S}_t)_{t \in \bR_+}\) is strongly continuous. The proof is complete. \smallskip (iii). Let \(C^2_0 (\bR; \bR)\) be the space of all twice continuously differentiable functions \(g \colon \bR \to \bR\) such that \(g, g', g'' \in C_0 (\bR; \bR)\). We set \[ D (A^f) := \Big\{ u \in \uc_b (\bR; \bR) \colon \exists g \in \uc_b (\bR; \bR) \text{ s.t. } \lim_{h \searrow 0} \Big\| \frac{S^f_h (u) - u}{h} - g \Big\|_\infty = 0 \Big\}. \] It is well-known that each \((S^f_t)_{t \in \bR_+}\) is a strongly continuous semigroup on \(C_0(\bR; \bR)\), cf. \cite[Theorem~32.11]{Kallenberg}. Hence, \(C^2_0 (\bR; \bR) \subset \bigcap_{f \in F} D (A^f)\) by \cite[Proposition~VII.1.7]{RY}. Now, thanks to \cite[Proposition~5.4]{NR}, the claim follows once we prove that for every \(y \in \bR\) and \(\delta > 0\) there exists a function \(\phi = \phi_{y, \delta} \in C_0^2 (\bR; \bR)\) such that \(\phi (y) = 1, 0 \leq \phi \leq 1\) and \(\sup_{f \in F} \|A^f (\phi) \|_\infty \leq \delta\). Fix \(y \in \bR\) and \(\delta > 0\). For \(R > 0\), set \[ \phi (x) := \frac{R}{R + (x - y)^2}. \] Clearly, \(\phi (y) = 1\) and \(\phi \in C_0 (\bR; [0, 1])\). Furthermore, as \begin{align*} \phi' (x) &= \frac{- 2 R (x - y)}{(R + (x - y)^2)^2}, \\ \phi'' (x) &= \frac{8 R (x - y)^2}{(R + (x - y)^2)^3} - \frac{2R}{(R + (x - y)^2)^2}, \end{align*} we have \(\phi \in C^2_0 (\bR; \bR)\). Using Condition~\ref{cond: bdd}, we obtain that \begin{align*} | A^f (\phi) (x) | &\leq \C \Big[ \frac{R |x - y|}{(R + (x - y)^2)^2} + \frac{8 R (x - y)^2}{(R + (x - y)^2)^3} + \frac{2}{R} \Big] \\&\leq \C \Big[ \frac{32}{27 R} + \frac{3 \sqrt{3}}{16 \sqrt{R}} + \frac{2}{R} \Big], \end{align*} where we use that \(z \mapsto R |z|/(R + z^2)^2\) attains its maximum at \(z^2 = R/3\) and \(z \mapsto 8 R z^2/ (R + z^2)^3\) attains its maximum at \(z^2 = R/2\). Now, we can choose \(R = R(\delta)\) large enough such that \(\sup_{f \in F} \|A^f ( \phi) \|_\infty \leq \delta\). The proof is complete. \qed \subsection{Proof of Proposition~\ref{prop: extension}} Thanks to Proposition \ref{prop: first NR}, it follows from \cite[Remark~5.4 (b, c)]{denk2018} that, for every \(t \in \bR_+\), there exists a unique sublinear operator \(\widehat{\mathscr{S}}_t \colon C_b (\bR; \bR) \to C_b (\bR; \bR)\) such that \(\mathscr{S}_t ( \phi) = \widehat{\mathscr{S}}_t (\phi)\) for all \(\phi \in \uc_b(\bR; \bR)\) that is continuous from above on \(C_b(\bR;\bR)\); see also \cite[Remark~5.3~(c)]{NR}. Moreover, \cite[Remark~5.4 (d)]{denk2018} ensures the semigroup property of \((\widehat{\mathscr{S}}_t)_{t \in \bR_+}\). This completes the proof.\qed \subsection{Proof of Theorem~\ref{thm: nisio}} {\em Step 1.} Let \(\lip^2_b (\bR; \bR)\) be the space of all bounded Lipschitz continuous and twice continuously differentiable functions from \(\bR\) into \(\bR\) with bounded and Lipschitz continuous first and second derivatives. First, we prove that \(\lip^2_b(\bR; \bR) \subset \bigcap_{f \in F} D (A^f)\). Take \(u \in \lip^2_b (\bR; \bR)\). Recall the following fact: If \(g_1\) and \(g_2\) are bounded and Lipschitz continuous with Lipschitz constants \(L_1\) and \(L_2\), then \(g_1 g_2\) is also Lipschitz continuous with Lipschitz constant \(\|g_1\|_\infty L_2 + \|g_2\|_\infty L_1\). Using this fact and the Conditions~\ref{cond: bdd} and \ref{cond: Lipschitz continuity}, we obtain the existence of a constant \(\C = \C_u\), that is in particular independent of \(f\), such that, for all \(x, y \in \bR\), \begin{align} \label{eq: A lip} | A^f (u)(x) - A^f (u) (y) | \leq \C |x - y|. \end{align} Now, for \(h \in (0, 1)\) and \(x \in \bR\), It\^o's formula, the Lipschitz bound \eqref{eq: A lip}, the Burkholder--Davis--Gundy inequality and Condition~\ref{cond: bdd} yield that \begin{align*} \Big|\frac{S^f_h (u) (x) - u(x)}{h} - A^f (u) (x)\Big| &= \Big| \frac{1}{h} E^P \Big[ \int_0^h (A^f (u) (Y^{f, x}_s) - A^f (u) (x)) ds \Big]\Big| \\&\leq \frac{\C}{h} \int_0^h E^P \big[ | Y^{f, x}_s - x |^2 \big]^{1/2} ds \\&\leq \frac{\C}{h} \int_0^h s^{1/2} ds = \C h^{1/2}. \end{align*} This proves that \(u \in \bigcap_{f \in F} D(A^f)\). \smallskip {\em Step 2.} Next, we prove that \begin{align} \label{eq: prop 4.2 NR} \sup_{f \in F} \| S^f_h (A^f (u)) - A^f (u) \|_\infty \to 0, \quad h \searrow 0, \end{align} for all \(u \in \lip^2_b (\bR; \bR)\). For every \(h \in (0, 1)\), using \eqref{eq: A lip}, the boundedness of \(b\) and \(a\) and the Burkholder--Davis--Gundy inequality, we obtain that \begin{align*} \big| E^P \big[ A^f (u) (Y^{f, x}_h) \big] - A^f (u) (x) \big| &\leq \C E^P \big[ | Y^{f, x}_h - x| \big] \leq \C h^{1/2}. \end{align*} We stress that the constant \(\C\) does not depend on \(f, x\) and \(h\). Hence, \eqref{eq: prop 4.2 NR} holds. \smallskip {\em Step 3.} Recalling that \((\mathscr{S}_t)_{t\in \bR_+}\) is strongly continuous by Proposition~\ref{prop: first NR}, and thanks to the Steps~1 and 2, and the boundedness Condition~\ref{cond: bdd}, it follows from \cite[Proposition~4.2]{NR} that the class \(\lip^{2}_b (\bR; \bR)\) is contained in the domain \(\mathcal{D}\) of the generator of \((\mathscr{S}_t)_{t\in \bR_+}\) that is defined on p. 4414 in \cite{NR}. Using this observation and \cite[Remark~4.4, Theorem~4.5]{NR}, we get, for every \(u_0 \in \uc_b (\bR; \bR)\) and every \(T > 0\), that the map \([0, T] \ni t \mapsto \mathscr{S}_{T - t} (u_0)\) is a viscosity solution to the PDE \[ \partial_t u + \sup_{f \in F} A^f (u) = 0 \text{ on } [0, T) \times \bR, \quad u (T, \cdot\,) = u_0, \] where the class of test functions is now \(\lip^{1, 2}_b ( [0, T) \times \bR )\). It is well-known (see, e.g., \cite[Lemma~2.4,~Remark 2.5]{hol16} or \cite[p. 4419]{NR}) that the class \(\lip^{1, 2}_b ( [0, T) \times \bR )\) leads to the same definition of viscosity solution as the standard class \(C^{1, 2}_b ([0, T) \times \bR)\). Hence, it follows from Theorem~\ref{theo: viscosity with ell} that \(\mathscr{S}_{T - t} (u_0) = T_{T - t} (u_0)\) for all \(t \in [0, T]\). As \(u_0 \in \uc_b (\bR; \bR)\) and \(T > 0\) were arbitrary, we conclude that \((\mathscr{S}_t)_{t\in \bR_+} = (T_t)_{t \in \bR_+}\) on \(\uc_b(\bR; \bR)\). The proof is complete. \qed \begin{thebibliography}{1} \bibitem{hitchi} C.~D.~Aliprantis and K.~B.~Border. \newblock {\em Infinite Dimensional Analysis: A Hitchhiker's Guide}. \newblock Springer Berlin Heidelberg, 3rd ed., 2006. \bibitem{FK} C.~Beck, M.~Hutzenthaler and A.~Jentzen. \newblock On nonlinear Feynman--Kac formulas for viscosity solutions of semilinear parabolic partial differential equations. \newblock {\em Stochastics and Dynamics}, 21(8):2150048, 2021. \bibitem{bogachev} V.~I.~Bogachev. \newblock {\em Measure Theory}. \newblock Springer Berlin Heidelberg, 2007. \bibitem{cinlar80} E.~\c{C}inlar, J.~Jacod, P.~Protter and M.~J.~Sharpe. \newblock Semimartingales and Markov Processes. \newblock {\em Zeitschrift f\"ur Wahrscheinlichkeitstheorie und verwandte Gebiete}, 54:161--219, 1980. \bibitem{Cra02} M.~G.~Crandall, M.~Kocan and A.~\'Swiech. \newblock \(L^p\)-Theory for fully nonlinear uniformly parabolic equations. \newblock {\em Communications in Partial Differential Equations}, 25:1997-2053, 2000. \bibitem{criens20SPA} D.~Criens. \newblock Lyapunov criteria for the Feller–Dynkin property of martingale problems. \newblock {\em Stochastic Processes and their Applications}, 130:2693--2736, 2020. \bibitem{CN22} D.~Criens and L.~Niemann. \newblock Nonlinear continuous semimartingales. \newblock arXiv:2204.07823v4, 2023. \bibitem{berge} R.J.~Deneckere and L.~M.~Lawrence. \newblock A generalized theorem of the maximum. \newblock {\em Economic Theory}, 3(1):99--107, 1993. \bibitem{denk2018} R.~Denk, M.~Kupper, and M.~Nendel. \newblock Kolmogorov-type and general extension results for nonlinear expectations. \newblock {\em Banach Journal of Mathematical Analysis}, 12(3):515--540, 2018. \bibitem{denk2020semigroup} R.~Denk, M.~Kupper, and M.~Nendel. \newblock A semigroup approach to nonlinear L{\'e}vy processes. \newblock {\em Stochastic Processes and their Applications}, 130:1616--1642, 2020. \bibitem{diestel} J.~Diestel and J.~J.~Uhl, Jr. \newblock {\em Vector Measures}. \newblock American Mathematical Society, 1977. \bibitem{nicole1987compactification} N.~El Karoui, D. Nguyen and M. Jeanblanc-Picqu{\'e}. \newblock Compactification methods in the control of degenerate diffusions: existence of an optimal control. \newblock {\em Stochastics}, 20(3):169--219, 1987. \bibitem{ElKa15} N.~El Karoui and X.~Tan. \newblock Capacities, measurable selection and dynamic programming part II: application in stochastic control problems. \newblock arXiv:1310.3364v2, 2015. \bibitem{engelbert1991strong} H.~J.~Engelbert and W.~Schmidt. \newblock Strong Markov Continuous Local Martingales and Solutions of One-Dimensional Stochastic Differential Equations (Part III). \newblock {\em Mathematische Nachrichten}, 151(1):149--197, 1991. \bibitem{fadina2019affine} T.~Fadina, A.~Neufeld, and T.~Schmidt. \newblock Affine processes under parameter uncertainty. \newblock {\em Probability, Uncertainty and Quantitative Risk}, 4(5), 2019. \bibitem{friedman75} A.~Friedman. \newblock {\em Stochastic Differential Equations and Applications Volume 1}. \newblock Academic Press Inc., 1975. \bibitem{GNR22} B.~Goldys, M.~Nendel and M.~R{\"o}ckner. \newblock Operator semigroups in the mixed topology and the infinitesimal description of Markov processes. \newblock arXiv:2204.07484, 2022. \bibitem{hajek85} B.~Hajek. \newblock Mean stochastic comparison of diffusions. \newblock {\em Zeitschrift f\"ur Wahrscheinlichkeitstheorie und verwandte Gebiete}, 68:315--329, 1985. \bibitem{hausmann86} U.~G. Haussmann. \newblock Existence of optimal markovian controls for degenerate diffusions. \newblock In: Christopeit, N., Helmes, K., Kohlmann, M. (eds.) {\em Stochastic Differential Systems}, Lecture Notes in Control and Information Sciences, vol 78, Springer Berlin Heidelberg, 1986. \bibitem{HWY} S.-W.~He, J.-G.~Wang and J.-A-~Yan. \newblock {\em Semimartingale Theory and Stochastic Calculus}. \newblock Routledge, 1992. \bibitem{hol16} J.~Hollender. \newblock { \em L{\'e}vy-Type Processes under Uncertainty and Related Nonlocal Equations. } \newblock PhD thesis, TU Dresden, 2016. \bibitem{hu2021g} M.~Hu and S.~Peng. \newblock G-Lévy processes under sublinear expectations. \newblock {\em Probability, Uncertainty and Quantitative Risk}, 6(1), 2021. \bibitem{ikeda2014stochastic} N.~Ikeda and S.~Watanabe \newblock {\em Stochastic differential equations and diffusion processes} \newblock Elsevier, 2nd ed., 2014. \bibitem{jacod79} J.~Jacod. \newblock {\em Calcul stochastique et probl\`emes de martingales}. \newblock Springer Berlin Heidelberg New York, 1979. \bibitem{jacod80} J.~Jacod. \newblock Weak and strong solutions of stochastic differential equations. \newblock {\em Stochastics}, 3:171--191, 1980. \bibitem{JS} J.~Jacod and A.~N.~Shiryaev. \newblock {\em Limit Theorems for Stochastic Processes}. \newblock Springer Berlin Heidelberg, 2nd ed., 2003. \bibitem{JaTy06} S.~Janson and J.~Tysk. \newblock Feynman--Kac formulas for Black--Scholes-type operators. \newblock {\em Bulletin of the London Mathematical Society}, 38:269-–282, 2006. \bibitem{Kallenberg} O.~Kallenberg. \newblock {\em Foundations of Modern Probability.} \newblock Springer Nature Switzerland, 3rd edition, 2021. \bibitem{KaraShre} I.~Karatzas and S.~E.~Shreve. \newblock {\em Brownian Motion and Stochastic Calculus}. \newblock Springer New York, 2nd edition, 1991. \bibitem{krylov1973selection} N.~V.~Krylov. \newblock On the selection of a Markov process from a system of processes and the construction of quasi-diffusion processes. \newblock {\em Mathematics of the USSR-Izvestiya}, 7(3):691--708, 1973. \bibitem{kry18} N.~V.~Krylov. \newblock \(C^{1 + \alpha}\)-Regularity of viscosity solutions of general nonlinear parabolic equations. \newblock {\em Journal of Mathematical Sciences}, 232(4):403--427, 2018. \bibitem{kry17} N.~V.~Krylov. \newblock {\em Sobolev Viscosity Solutions for Fully Nonlinear Elliptic and Parabolic Equations}. \newblock American Mathematical Society, 2018. \bibitem{K19} F.~K\"uhn. \newblock Viscosity solutions to Hamilton–Jacobi–Bellman equations associated with sublinear L{\'e}vy(-type) processes. \newblock {\em ALEA}, 16:531--559, 2019. \bibitem{K21} F.~K\"uhn. \newblock On infinitesimal generators of sublinear Markov semigroups. \newblock {\em Osaka Journal of Mathematics}, 58(3):487--508, 2021. \bibitem{neufeld} C. ~Liu and A. ~Neufeld. \newblock Compactness criterion for semimartingale laws and semimartingale optimal transport. \newblock {\em Transactions of the American Mathematical Society}, 372(1):187--231, 2019. \bibitem{michael} E.~ Michael. \newblock Continuous selections I. \newblock {\em Annals of Mathematics}, 63(2):361--382, 1956. \bibitem{NR} M.~Nendel and M.~R{\"o}ckner. \newblock Upper envelopes of families of Feller semigroups and viscosity solutions to a class of nonlinear Cauchy problems. \newblock {\em SIAM Journal on Control and Optimization}, 59(6):4400--4428, 2021. \bibitem{neufeld2014measurability} A.~Neufeld and M.~Nutz. \newblock Measurability of semimartingale characteristics with respect to the probability law. \newblock {\em Stochastic Processes and their Applications}, 124:3819--3845, 2014. \bibitem{neufeld2017nonlinear} A.~Neufeld and M.~Nutz. \newblock Nonlinear L{\'e}vy processes and their characteristics. \newblock {\em Transactions of the American Mathematical Society}, 369:69--95, 2017. \bibitem{nisio} M.~Nisio. \newblock {\em Stochastic Control Theory}. \newblock Springer Japan, 2nd ed., 2015. \bibitem{NVH} M.~Nutz and R.~van Handel. \newblock Constructing sublinear expectations on path space. \newblock {\em Stochastic Processes and their Applications}, 123(8):3100--3121, 2013. \bibitem{peng2007g} S.~Peng. \newblock G-expectation, G-Brownian motion and related stochastic calculus of It{\^o} type. \newblock In F.~E.~Benth et. al., editors, {\em Stochastic Analysis and Applications: The Abel Symposium 2005}, pages 541--567, Springer Berlin Heidelberg, 2007. \bibitem{peng2008multi} S.~Peng. \newblock Multi-dimensional G-Brownian motion and related stochastic calculus under G-expectation. \newblock {\em Stochastic Processes and their Applications}, 118:2223--2253, 2008. \bibitem{Pham} H.~Pham. \newblock {\em Continuous-time Stochastic Control and Optimization with Financial Applications.} \newblock Springer Berlin Heidelberg, 2009. \bibitem{pollard} D.~Pollard. \newblock {\em Convergence of Stochastic Processes}. \newblock Springer New York, 1984. \bibitem{RY} D.~Revuz and M.~Yor. \newblock {\em Continuous Martingales and Brownian Motion}. \newblock Springer Berlin Heidelberg, 3rd edition, 1999. \bibitem{schilling98} R.~L.~Schilling. \newblock Conservativeness and extensions of Feller semigroups. \newblock {\em Positivity}, 2:239–-256, 1998. \bibitem{SV} D.~W.~Stroock and S.R.S.~Varadhan. \newblock {\em Multidimensional Diffusion Processes}. \newblock Springer Berlin Heidelberg, reprint of 1997 ed., 2006. \end{thebibliography} \end{document}
2205.15189v1
http://arxiv.org/abs/2205.15189v1
Independence number of intersection graphs of axis-parallel segments
\documentclass[11pt]{article} \usepackage[margin=1in]{geometry} \usepackage{libertine} \usepackage{datetime} \usepackage{multirow} \usepackage{booktabs} \usepackage{enumitem} \usepackage{wrapfig} \usepackage{subcaption} \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{pgfplots} \usepackage{amsmath, amssymb} \usepackage{amsthm} \usepackage{color} \usepackage{cases} \usepackage{xparse} \usepackage{xargs} \usepackage{appendix} \usepackage[boxed,commentsnumbered,noend]{algorithm2e} \usepackage{algpseudocode} \usepackage{verbatim, xspace} \usepackage{tikz} \usepackage{mathtools} \usepackage{hyperref} \usepackage[capitalize,noabbrev,nameinlink]{cleveref} \usepackage{pgfplots} \usepackage{xcolor} \newcommand{\citet}[1]{\cite{#1}} \SetAlgorithmName{Isolation scheme}{isolation scheme}{List of isolaiton schemes} \usepackage[colorinlistoftodos,prependcaption,textsize=tiny]{todonotes} \newcommandx{\unsure}[2][1=]{\todo[linecolor=green,backgroundcolor=green!25,bordercolor=green,#1]{\normalsize #2}} \newcommandx{\improvement}[2][1=]{\todo[inline,linecolor=blue,backgroundcolor=blue!05,bordercolor=blue,#1]{\normalsize #2}} \newcommandx{\info}[2][1=]{\todo[linecolor=yellow,backgroundcolor=yellow!25,bordercolor=yellow,#1]{#2}} \newcommandx{\floatmodel}[2][1=]{\todo[inline,linecolor=red,backgroundcolor=yellow!25,bordercolor=yellow,#1]{#2}} \newcommandx{\thiswillnotshow}[2][1=]{\todo[disable,#1]{#2}} \newcommandx{\karol}[2][1=]{\todo[inline,linecolor=blue,backgroundcolor=blue!25,bordercolor=blue,caption={\normalsize \textbf{Karol}},#1]{\normalsize #2}} \newcommandx{\jana}[2][1=]{\todo[inline,linecolor=red,backgroundcolor=red!25,bordercolor=red,caption={\normalsize \textbf{Jana}},#1]{\normalsize #2}} \newcommandx{\michal}[2][1=]{\todo[inline,linecolor=gray,backgroundcolor=red!25,bordercolor=red,caption={\normalsize \textbf{Micha\l{}}},#1]{\normalsize #2}} \newcommandx{\marco}[2][1=]{\todo[inline,linecolor=green,backgroundcolor=green!25,bordercolor=green,caption={\normalsize \textbf{Marco}},#1]{\normalsize #2}} \crefformat{page}{#2page~#1#3}\Crefformat{page}{#2Page~#1#3}\crefformat{equation}{#2(#1)#3}\Crefformat{equation}{#2(#1)#3}\crefformat{enumi}{#2(#1)#3} \Crefformat{enumi}{#2(#1)#3} \newtheorem{theorem}{Theorem}\newtheorem{lemma}{Lemma} \newtheorem{conjecture}{Conjecture}\newtheorem{proposition}{Proposition} \newtheorem{observation}{Observation} \newtheorem{corollary}{Corollary} \newtheorem{claim}{Claim} \newtheorem{example}{Example} \newtheorem{remark}{Remark} \newtheorem{definition}{Definition} \newcommand{\IH}[1]{\smallskip \noindent \fbox{ \begin{minipage}{\textwidth} \paragraph{Induction hypothesis.} {#1} \end{minipage}}} \newcommand{\Prb}{\mathbb{P}} \newcommand{\bnd}{\partial} \newcommand\myatop[2]{\genfrac{}{}{0pt}{}{#1}{#2}} \newcommand{\floor}[1]{\left\lfloor #1 \right\rfloor} \newcommand{\ceil}[1]{\left\lceil #1 \right\rceil} \newcommand{\half}{\frac{1}{2}} \newcommand{\sm}{\setminus} \newcommand{\eps}{\varepsilon} \newcommand{\receps}{\frac{1}{\eps}} \newcommand{\Oh}{\mathcal{O}} \newcommand{\Os}{\Oh^{\star}} \newcommand{\Otilde}{\widetilde{\Oh}} \newcommand{\Ot}{\Otilde} \newcommand{\symdiff}{\bigtriangleup} \newcommand{\tOh}{\Otilde} \newcommand{\hh}{\mathcal{H}} \newcommand{\ff}{\mathcal{F}} \newcommand{\nat}{\mathbb{N}} \newcommand{\N}{\mathbb{N}} \newcommand{\Z}{\mathbb{Z}} \newcommand{\Ff}{\mathcal{F}} \newcommand{\Rr}{\mathcal{R}} \newcommand{\Ss}{S} \newcommand{\Cc}{\mathcal{C}} \newcommand{\Mm}{\mathcal{M}} \newcommand{\Zz}{\mathcal{Z}} \newcommand{\Gg}{\mathcal{G}} \newcommand{\Pp}{\mathcal{P}} \newcommand{\Hh}{\mathcal{H}} \newcommand{\Bb}{\mathcal{B}} \newcommand{\Ii}{\mathcal{I}} \newcommand{\Jj}{\mathcal{J}} \newcommand{\real}{\mathbb{R}_{+}} \newcommand{\pow}{\mathrm{pow}} \newcommand{\Zp}{\mathbb{Z}_p} \newcommand{\poly}{\mathrm{poly}} \newcommand{\polylog}{\mathrm{polylog}} \newcommand{\Mod}[1]{\ (\mathrm{mod}\ #1)} \newcommand{\Sgn}[1]{\mathrm{sgn}(#1)} \newcommand{\prob}[2]{\mathbb{P}_{#2}\left[ #1 \right]} \newcommand{\Ex}[1]{\mathbb{E}\left[ #1 \right]} \newcommand{\een}{\textbf{1}} \newcommand{\ol}{\overline} \newcommand{\CMSOtwo}{\mathsf{CMSO}_2} \newcommand{\angles}[1]{\langle {#1} \rangle} \newcommand{\graphs}{\Gg_\mathrm{seg}} \renewcommand{\leq}{\leqslant} \renewcommand{\geq}{\geqslant} \renewcommand{\le}{\leqslant} \renewcommand{\ge}{\geqslant} \DeclareMathOperator*{\argmin}{arg\,min} \DeclareMathOperator*{\argmax}{arg\,max} \newcommand{\defproblem}[3]{ \vspace{2mm} \vspace{1mm} \noindent\fbox{ \begin{minipage}{0.95\textwidth} #1 \\ {\bf{Input:}} #2 \\ {\bf{Task:}} #3 \end{minipage} } \vspace{2mm} } \title{Independence number of intersection graphs of axis-parallel segments} \date{} \author{ Marco Caoduro\footnote{Laboratoire G-SCOP, Univ. Grenoble Alpes, France, \textsf{[email protected]}.} \and Jana Cslovjecsek\footnote{EPFL, Switzerland, \textsf{[email protected]}.} \and Micha\l{} Pilipczuk\footnote{Institute of Informatics, University of Warsaw, Poland, \textsf{[email protected]}. This work is a part of the project BOBR that has received funding from the European Research Council (ERC) under the European Union's Horizon 2020 research and innovation programme (grant agreement No 948057).} \and Karol W\k{e}grzycki\footnote{Saarland University and Max Planck Institute for Informatics, Saarbr\"ucken, Germany, \textsf{[email protected]}. This work is part of the project TIPEA that has received funding from the European Research Council (ERC) under the European Union's Horizon 2020 research and innovation programme (grant agreement No 850979).} } \begin{document} \maketitle \thispagestyle{empty} \input{chapters/abstract} \begin{picture}(0,0) \put(462,-370) {\hbox{\includegraphics[width=40px]{img/logo-erc.jpg}}} \put(452,-430) {\hbox{\includegraphics[width=60px]{img/logo-eu.pdf}}} \end{picture} \clearpage \setcounter{page}{1} \input{chapters/intro2} \input{chapters/algorithm} \input{chapters/section1} \input{chapters/acknowledgement} \bibliographystyle{abbrv} \bibliography{bib} \end{document} \begin{abstract} We prove that for any triangle-free intersection graph of $n$ axis-parallel segments in the plane, the independence number $\alpha$ of this graph is at least $\alpha \ge n/4 + \Omega(\sqrt{n})$. We complement this with a construction of a graph in this class satisfying $\alpha \le n/4 + c \sqrt{n}$ for an absolute constant $c$, which demonstrates the optimality of our~result. \end{abstract} \section{Introduction} For a graph $G$, the {\em{independence number}} $\alpha(G)$ is the maximum size of an independent set in $G$. Both lower and upper bounds on the independence number were intensively studied in various graph classes. In this paper, we study the independence number in classes of geometric intersection graphs. For a family of geometric objects $\Ss$ in the plane, the intersection graph $G(\Ss)$ has vertex set $\Ss$ and two objects are considered adjacent if they intersect. Naturally, the independence number $\alpha(\Ss)$ is defined as the maximum size of a subset of objects that are pairwise disjoint. A simple lower bound on the independence number can be often obtained by studying the \emph{chromatic number} $\chi(G)$ --- the minimum number of colors needed to properly color the vertices of $G$ --- and using the obvious inequality $\alpha(G) \ge n/\chi(G)$. This strategy does not alway provide optimum lower bounds, which will be also the case in this work. Specifically, we consider intersection graphs of axis-parallel segments in the plane where no three segments intersect at a single point. For simplicity, we will denote this class of graphs by $\graphs$. Observe that we have $\chi(G)\le 4$ for every $G \in \graphs$, because we can use two colors to properly color the horizontal segments, and another two for the vertical segments. Hence, if $G\in \graphs$ has $n$ vertices, then $\alpha(G)\ge n/4$. Our two main results, presented below, prove that this simple lower bound can be always improved by an additive term of the order $\sqrt{n}$, but no further improvement is possible. \begin{theorem}\label{thm:lower bound} Let $G$ be a graph in $\graphs$ with $n$ vertices. Then the independence number of $G$ is at least \begin{displaymath} \alpha(G) \ge \frac{n}{4} + c_1\sqrt{n}, \end{displaymath} for some absolute constant $c_1$. \end{theorem} \begin{theorem} \label{thm:upper_bound} For any $n \in \nat$ there exists a graph $G$ in $\graphs$ on $n$ vertices with independence number \begin{displaymath} \alpha(G) \le \frac{n}{4} + c_2 \sqrt{n}, \end{displaymath} for some absolute constant $c_2$. \end{theorem} \paragraph*{Consequences.} The independence number is often studied in relation to the clique covering number. A \emph{clique} in a graph is a set of pairwise adjacent vertices, and the \emph{clique covering number} $\theta(G)$ of a graph $G$ is defined as the minimal size of a partition of the vertex set of $G$ into cliques. For any graph $G$, the clique covering number $\theta(G)$ is a natural upper bound on the independence number $\alpha(G)$. Indeed, an independent set contains at most one vertex from each clique. This implies that for any graph $G$ the ratio $\theta(G)/\alpha(G)$ is at least one. Giving an upper bound on this ratio is a question that was studied for several classes of intersection graphs (\textit{e}.\textit{g}.\ \cite{1985_Gyarfas}, \cite{2008_Kim}). In this topic, the main open question concerns the relation between the independence number and the clique covering number in intersection graphs of axis-parallel rectangles. \begin{conjecture}[Wegner \cite{1965_Wegner}, 1965] \label{conj:wegner} Let $G$ be the intersection graph of a set of axis-parallel rectangles in the plane. Then \[\theta(G) \leq 2\alpha(G) - 1.\] \end{conjecture} For an intersection graph $G$ of axis-parallel rectangles the best known bound on the clique covering number is $\theta(G) = \Oh(\alpha(G)\log^2(\log(\alpha(G))))$ by Correa et. al.~\cite{2015_Correa}. In particular, no linear upper bounds are known. Even, obtaining lower bounds on the maximal ratio $\theta/\alpha$ was until recently an elusive task. For nearly thirty years after Wegner formulated his conjecture, the largest known ratio remained $3/2$, obtained by taking five axis-parallel rectangles forming a cycle. In 1993, Fon-Der-Flaass and Kostochka presented a family of axis-parallel rectangles with clique cover number 5 and independence number 3~\cite{1993_FonDerFlaass}. Only in 2015, Jel\'inek constructed families of rectangles with ratio $\theta/\alpha$ arbitrarily close to 2, showing that the constant of $2$ in Wegner's conjecture cannot be improved\footnote{The former construction is attributed to Jel\'inek in \cite[Ackowledgment]{2015_Correa}. }. One consequence of our results is a proof that the ratio $2$ in Wegner's conjecture cannot be improved even in the highly restricted case of axis-parallel segments, even with the assumption of triangle-freeness. More precisely, we have the following corollary. \begin{corollary} \label{cor:ratio} For any $\eps>0$, there exists a graph $G$ in $\graphs$ such that \[\theta(G)\ge (2-\eps)\alpha(G).\] \end{corollary} \Cref{cor:ratio} is a consequence of the full version of \cref{thm:upper_bound} (see Section~\ref{sec:upper_bound}). \Cref{cor:ratio} can be further strengthened to the fractional setting, implying a lower bound on the integrality gap of the standard LP relaxation of the independent set problem. Namely, consider the {\em{fractional independence number}} of a graph $G$, denoted $\alpha^\star(G)$, which is defined similarly to $\alpha(G)$, but every vertex $u$ can be included in the solution with a fractional multiplicity $x_u\in [0,1]$, and the constraints are that $x_u+x_v\leq 1$ for every edge $uv$ of $G$. Similarly, in the {\em{fractional clique cover number}} $\theta^\star(G)$ every clique $K$ in $G$ can be included in the cover with a fractional multiplicity $y_K\in [0,1]$, and the constraints are that $\sum_{K\colon v\in K} y_K\geq 1$ for every vertex $v$. In triangle-free graphs the linear programs defining $\alpha^\star(G)$ and $\theta^\star(G)$ are dual to each other, hence $$\alpha(G)\leq \alpha^\star(G) = \theta^\star(G)\leq \theta(G)\qquad\textrm{for every triangle-free }G.$$ The proof of \Cref{cor:ratio}, based on the full version of \cref{thm:upper_bound}, actually gives the following. \begin{corollary}\label{cor:LP} For any $\eps>0$, there exists a graph $G$ in $\graphs$ such that \[\alpha^\star(G)\ge (2-\eps)\alpha(G).\] Consequently, the integrality gap of the standard LP relaxation of the maximum independent set problem in graphs from $\graphs$ is not smaller than $2$. \end{corollary} We note that recently, G\'alvez et al. gave a polynomial-time $(2+\eps)$-approximation algorithm for the maximum independent set problem in intersection graphs of axis-parallel rectangles~\cite{GalvezKMMPW22-arxiv,GalvezKMMPW22}. Thus, \cref{cor:LP} shows that one cannot improve upon the approximation ratio of $2$ by only relying on the standard LP relaxation, even in the case of axis-parallel segments. Note that in this case, obtaining a $2$-approximation algorithm is very easy: restricting attention to either horizontal or vertical segments reduces the problem to the setting of interval graphs, where it is polynomial-time solvable. \section{The lower bound: proof of \cref{thm:lower bound}} \label{sec:lower_bound} \newcommand{\ply}{\mathrm{ply}} \newcommand{\lhor}{\ell_{\mathrm{horizontal}}} \newcommand{\lver}{\ell_{\mathrm{vertical}}} \newcommand{\leven}{\ell_{\mathrm{even}}} \newcommand{\lodd}{\ell_{\mathrm{odd}}} \newcommand{\seven}{s_{\mathrm{even}}} \newcommand{\sodd}{s_{\mathrm{odd}}} \newcommand{\hor}{\mathrm{horizontal}} \newcommand{\ver}{\mathrm{vertical}} The goal of this section is to prove \cref{thm:lower bound}. For this, we examine a graph $G\in \graphs$, and we exhibit three different independent sets in $G$ by constructing three different subsets of disjoint segments. A trade-off between these three independent sets then results in a lower bound. The set of geometric objects $S$ is called a \emph{representation} of its intersection graph $G(S)$ (note that a graph can have multiple representations). Our proof starts with some observations on the possible sets of segments representing a graph in the class $\graphs$. Let $G$ be a graph in $\graphs$ with $n$ vertices and let $\Ss$ be a representation of $G$. Thus, $\Ss$ consists of axis-parallel segments, no three of which meet at one point. We may assume that in $\Ss$ every two parallel segments that intersect meet at a single point, called the \emph{meeting point}. If two segments do not meet at a single point, we can choose any common point and shorten both segments up to this common point. Since no three segments of $\Ss$ meet at one point, all intersections are preserved and the modified set of segments is still a representation of $G$. Further, we may assume that if two orthogonal segments intersect, their intersection point lies in the interiors of both of them. Indeed, otherwise we could slightly extend one or both of these segments around the meeting point. Finally, we may assume that the segments of $\Ss$ lie on a grid of size $\lhor\times\lver$ so that the segments lying on the same grid line induce a path in the intersection graph. Indeed, if on a single grid line the segments induce a disjoint union of several paths, then we can move these paths slightly so that they are realized on separate grid lines. A representation $\Ss$ of $G$ with the properties described above is called \emph{favorable}. To give constructions for the subsets of pairwise disjoint segments in a favorable representation $\Ss$, we first need some notation. Suppose the $\lhor\times\lver$ grid has $\leven$ grid lines with an even number of segments and $\lodd$ grid lines with an odd number of segments. In total there are $\seven$ segments which lie on a grid line with an even number of segments and $\sodd$ segments which lie on a grid line with an odd number of segments. The maximum number of segments lying on a single grid line is $t$. The following three lemmas correspond each to a different set of pairwise disjoint segments in $\Ss$. In all three lemmas, we assume $\Ss$ to be a favorable representation of a graph in $\graphs$ with $n$ vertices. \begin{lemma}\label{lem:odd technique} There exists a subset of $\Ss$ consisting of $\frac{n}{4} + \frac{\lodd}{4}$ pairwise disjoint segments. \end{lemma} \begin{lemma}\label{lem:line technique} There exists a subset of $\Ss$ consisting of $\frac{n}{4} + \frac{t}{4}$ pairwise disjoint segments. \end{lemma} \begin{lemma}\label{lem:even technique} There exists a subset of $\Ss$ consisting of $\frac{n}{4} + \frac{\sqrt{2\seven}}{4} - \frac{\lodd}{4}$ pairwise disjoint segments. \end{lemma} Before proving these lemmas, we use them to conclude \cref{thm:lower bound}. \begin{proof}[Proof of \cref{thm:lower bound}] Let $G$ be a graph in the class $\graphs$ with $n$ vertices and let $\Ss$ be a favorable representation of $G$. A subset of pairwise disjoint segments in $\Ss$ corresponds to an independent set in $G$ of the same size. We distinguish three cases. If $\lodd\ge\sqrt{n}/c$ for some constant $c$, by \cref{lem:odd technique} $G$ has an independent set of size at least \[\frac{n}{4} + \frac{1}{4c}\cdot \sqrt{n}.\] If $\lodd\le\sqrt{n}/c$ and $\seven \ge 2n/c^2$, by \cref{lem:even technique} $G$ has an independent set of size at least \begin{align*} \frac{n}{4} + \frac{\sqrt{2\seven}}{4} - \frac{\lodd}{4}&\ge \frac{n}{4} + \frac{\sqrt{4n}}{4c} - \frac{\sqrt{n}}{4c}\\ &\ge \frac{n}{4} + \frac{1}{4c}\cdot \sqrt{n}. \end{align*} If $\lodd\le\sqrt{n}/c$ and $\seven \le 2n/c^2$, we get $\sodd \ge n(1-2/c^2)$ using $\seven+\sodd=n$. Then the maximum number of segments $t$ lying on a single line is at least \[ t \ge \frac{\sodd}{\lodd} \ge \frac{n(1-2/c^2)}{\sqrt{n}/c} = \frac{c^2-2}{c}\cdot \sqrt{n}. \] By \cref{lem:line technique} we get an independent set of $G$ of size at least \[\frac{n}{4} + \frac{c^2-2}{4c}\cdot \sqrt{n}.\] Setting $c=\sqrt{3}$ gives the desired result: there is always an independent set of size at least $\frac{n}{4}+\frac{1}{4\sqrt{3}}\cdot \sqrt{n}$. \end{proof} It remains to prove the three lemmas. \begin{proof}[Proof of \cref{lem:odd technique}] This construction exploits grid lines with an odd number of segments on them. For each grid line, select every second segment lying on that line, starting from the leftmost. If the grid line has an even number of segments, exactly half of the segments are selected. If the grid line has an odd number of segments, the selected number of segments is half rounded up. This corresponds to selecting exactly half of all the segments and adding $1/2$ for each grid line with an odd number of segments. In total, \[\frac{n}{2}+\frac{\lodd}{2}\] segments are selected. By construction of this subset, two segments are only intersecting if one is horizontal and the other one is vertical. The set can be partitioned into horizontal and vertical segments with both parts only containing pairwise disjoint segments. By the pigeonhole principle, one of the two parts contains at least half of the selected segments. \end{proof} \begin{proof}[Proof of \cref{lem:line technique}] This construction exploits a single grid line with many segments on it. Let $g_\hor$ be a horizontal grid line with the maximum number of segments $t_\hor$ lying on it. Let $s_\ver$ be the total number of vertical segments. Let $\Ss_\hor$ be the set of consisting of all segments lying on $g_\hor$ and all vertical segments. Analogously define $g_\ver$, $t_\ver$, $s_\hor$, and $\Ss_\ver$. Now we choose the larger set among $\Ss_\ver$ and $\Ss_\hor$. The size of this set is \begin{align*} \max\{s_\hor + t_\ver,s_\ver + t_\hor\} &\ge \frac{s_\hor + t_\ver + s_\ver + t_\hor}{2}\\ &\ge \frac{n + t}{2} \end{align*} For the second inequality, we use assertions $s_\hor+s_\ver = n$ and $t=\max\{t_\hor,t_\ver\}$. We now observe that the intersection graphs of both sets $S_\ver$ and $S_\hor$ are bipartite. Indeed, any cycle in the intersection graph has to contain at least two horizontal segments lying on thwo different horizontal grid lines, and two vertical segments lying on two different vertical grid lines. But $S_\ver$ contains horizontal segments from only one horizontal grid line, while $S_\hor$ contains vertical segments from only one vertical grid line. In a bipartite graph the vertices can be partitioned into two independent sets $A$ and $B$, one of which contains at least half of the vertices. Hence, the larger of the sets $S_\ver$ and $S_\hor$ contains an independent set of size at least $\frac{n+t}{4}$. \end{proof} The proof of \cref{lem:even technique} heavily depends on the following classic theorem of Erd\H{o}s and Szekeres, here rephrased in the plane setting. We say that a sequence of points in the plane is {\em{non-decreasing}} if both their first and second coordinates are non-decreasing along the sequence; it is {\em{non-increasing}} if the first coordinate is non-decreasing along the sequence while the second is non-increasing. \begin{theorem}[Erd\H{o}s, Szekeres \cite{erdos1935combinatorial}]\label{thm:erdos szekeres} Given $n$ distinct points on the plane, it is always possible to choose at least $\sqrt{n}$ of them and arrange into a sequence so that this sequence is either non-increasing or non-decreasing. \end{theorem} \begin{proof}[Proof of \cref{lem:even technique}] The construction exploits grid lines with an even number of segments. With the help of \cref{thm:erdos szekeres} we first construct a polyline that cuts through the segments. Then we use this polyline to define two sets of pairwise disjoint segments in $\Ss$, one of which has the desired size. Recall that meeting points are the points in which two parallel segments intersect. A meeting point on a grid line naturally partitions the segments lying on this line into two parts: those to the left of it and to the right of it (for horizontal lines), or those above it and below it (for vertical lines). Call a meeting point a \emph{candidate point} if both those parts have odd cardinalities. Note that thus, candidate points only occur on grid lines with an even number of segments. Further, in total there are $\seven/2$ candidate points. By \cref{thm:erdos szekeres}, there exists either a non-increasing or a non-decreasing sequence of $\sqrt{\seven/2}$ candidate points. Suppose without loss of generality that the sequence is non-increasing and of maximum possible length. We call \emph{cutting points} the candidate points in the sequence and we use $C$ to denote the number of cutting points. Observe that $C \ge \sqrt{\seven/2}$. For every two consecutive cutting points, connect them with a segment. Then consider two half-lines with negative inclinations, one ending at the first cutting point and one starting at the last cutting point. This gives a polyline intersecting all vertical and horizontal grid lines. We call this path the \emph{cut}. \begin{figure}[ht] \centering \includegraphics[scale=0.7]{draws/Blue_Orange.pdf} \caption{Selection of the two independent sets in the proof of \cref{lem:even technique}. Crosses are the meeting points, disks are the candidate points, and gray disks are the cut points. The black dotted line is the cut. Dashed blue and solid orange segments are those chosen to the sets $\Ss_\mathrm{blue}$ and $\Ss_\mathrm{orange}$, respectively.} \label{fig:even-technique} \end{figure} Using the cut, we construct two sets of segments $\Ss_\mathrm{blue}$ and $\Ss_\mathrm{orange}$; see \cref{fig:even-technique}. \subparagraph*{Construction of $\Ss_\mathrm{blue}$:} The set $\Ss_\mathrm{blue}$ is constructed as follows. For each vertical grid line, start from the segment with the lowest endpoint and choose every second segment with the upper endpoint on the cut or below. Next, for each horizontal grid line, start from the segment with the right-most endpoint and choose every second segment with the left endpoint on the cut or to the right. \subparagraph*{Construction of $\Ss_\mathrm{orange}$:} The set $\Ss_\mathrm{orange}$ is symmetrically to $\Ss_\mathrm{blue}$. Namely, for each vertical grid line, start from the segment with the highest endpoint and choose every second segment with the lower endpoint on the cut or above. For each horizontal grid line, start from the segment with the left-most endpoint and choose every second segment with the right endpoint on the cut or to the left. If the sequence would be non-decreasing, the choice strategy for horizontal segments would be inverted between $\Ss_\mathrm{blue}$ and $\Ss_\mathrm{orange}$. We argue that the segments of $\Ss_\mathrm{blue}$ are pairwise disjoint. Note that the segments lying in the bottom-left side of the cut are vertical and pairwise disjoint by the construction, whole those lying in the top-right side of the cut are horizontal and pairwise disjoint. So it remains to argue that there is no pair of a vertical segment and a horizontal from $\Ss_\mathrm{blue}$ that would intersect at a point lying on the cut. Recall that since the representation is favorable, this intersection point would lie in the interiors of both segments. This would imply that either the vertical segment would have the top endpoint strictly above the cut, or the horizontal segment would have the left endpoint strictly to the left of the cut. This is a contradiction with the construction of $\Ss_\mathrm{blue}$. A symmetric argument shows that also the segments of $\Ss_\mathrm{orange}$ are pairwise~disjoint. It remains to show that $\Ss_\mathrm{blue} \cup \Ss_\mathrm{orange}$ has at least $\frac{n}{2} + \frac{\sqrt{2\seven}}{2} - \frac{\lodd}{2}$ segments. Consider a grid line with an even number of segments. For each candidate point on this line which is not a cutting point, exactly one segment containing this cutting point is in $\Ss_\mathrm{blue} \cup \Ss_\mathrm{orange}$. However, for each cutting point on this line, both segments meeting at this point are included in $\Ss_\mathrm{blue} \cup \Ss_\mathrm{orange}$, as there is an odd number of segments on either side. This means that on each such grid line, the total number of segments included in $\Ss_\mathrm{blue} \cup \Ss_\mathrm{orange}$ is exactly half of all the segments, plus one segment for each cutting point on the grid line. Consider now a grid line with an odd number of segments. The sets $\Ss_\mathrm{blue}$ and $\Ss_\mathrm{orange}$ contain every second segment starting from the outermost ones. Without the cut, this would include half of the segments lying on the line rounded up. Since there is an odd number of segments on the grid line, the cut crosses it only at one point. So at most one segment is removed from $\Ss_\mathrm{blue} \cup \Ss_\mathrm{orange}$ due to this. This means that among the segments lying on the line, at least half rounded down is included in $\Ss_\mathrm{blue} \cup \Ss_\mathrm{orange}$. This means we lose at most $1/2$ of a segment for each odd grid line. Together, this gives that $\Ss_\mathrm{blue} \cup \Ss_\mathrm{orange}$ contains at least \[\frac{\seven}{2} + C + \frac{\sodd}{2} - \frac{\lodd}{2} \ge \frac{n}{2} + \frac{\sqrt{2\seven}}{2} - \frac{\lodd}{2}\] segments. By choosing the larger of the two sets, we obtain an independent set of the desired size. \end{proof} \section{The upper bound: proof of \cref{thm:upper_bound}} \label{sec:upper_bound} In this section, we construct families of axis-parallel segments whose intersection graphs satisfying the requirements of \cref{thm:upper_bound}. In fact, we prove the following stronger statement. \begin{theorem}[Full version of Theorem~\ref{thm:upper_bound}]\label{thm:ub-full} For any integer $k \geq 1$, there exists a graph $G_k$ in $\graphs$ on $4k^2$ vertices with clique covering number $\theta(G_k)=2k^2$, fractional independence number $\alpha^\star(G_k)=2k^2$, and independence number \[\alpha(G_k) = k^2 + 3k - 2.\] \end{theorem} Note that \Cref{cor:ratio,cor:LP} follow from \cref{thm:ub-full} by considering $G=G_k$ for $k$ large enough depending on $1/\eps$. The remainder of this section is devoted to the proof of \cref{thm:ub-full}. Fix an integer $k\geq 1$. We construct a set of $4k^2$ axis-parallel segments $\Mm_k$. The set $\Mm_k$ will consist of $k$ sets with $4k$ segments each; these sets will be called {\em{$k$-boxes}}. A \emph{$k$-box} is a set of $4k$ axis-parallel segments distributed on $k$ horizontal and $k$ vertical lines, each with exactly two segments on it. For every line, the two segments on this line intersect at a single point, which we call their \emph{meeting point}. In the construction of a $k$-box, the meeting points are arranged in a diagonal from the top left to the bottom right, see the case $k=6$ in \cref{fig:k_box}. The \emph{up segments} (resp. \emph{down segments}) of a $k$-box are the segments lying vertically above (resp. below) a meeting point. Similarly, we define the \emph{left} and \emph{right segments} of a $k$-box. \begin{figure}[ht] \centering \includegraphics[scale =0.6]{draws/k_box.pdf} \caption{A 6-box. The meeting points are represented with crosses. Thus, every line contains two segments of the box, whose only intersection is the meeting point on this line.} \label{fig:k_box} \end{figure} To construct $\Mm_k$, consider a large square and place $k$ different $k$-boxes $\{\Bb_i\}_{i=1}^k$ along its diagonal from the bottom left to the top right. Then, prolong each segment away from the meeting point until it touches a side of the square, see \cref{fig:Michal_family}. The construction results in the set $\Mm_k$ consisting of $4k^2$ segments. We note that $\Mm_k$ is a favorable representation of its intersection graph in the sense introduced in \cref{sec:lower_bound}. Also, perhaps not surprisingly, the construction is inspired by a tight example for the Erd\H{o}s-Szekeres Theorem (\cref{thm:erdos szekeres}), so that it proves tightness of the bound provided by \cref{lem:even technique}. \begin{figure}[ht] \centering \includegraphics[scale =0.6]{draws/M_family.pdf} \caption{The set $\Mm_3$. The meeting points are represented by crosses. The dashed lines indicate the sides of the large square and of the $k$-boxes.} \label{fig:Michal_family} \end{figure} We are left with verifying the asserted properties of $\Mm_k$. First, we introduce some notation and definitions. Let $\Ii$ be a set of pairwise disjoint segments in $\Mm_k$. A $k$-box $\Bb_i$ of $\Mm_k$ is said to be \emph{interesting} for $\Ii$ if $\Bb_i \cap \Ii$ contains either at least one down segment and one right segment, or at least one up segment and one left segment. Otherwise, the $k$-box is \emph{boring} for $\Ii$. Distinguishing between interesting and boring boxes allows more precise estimates on the maximum possible cardinality of $\Ii$. In the next two lemmas, we consider $\Ii$ to be a set of pairwise disjoint segments in $\Mm_k$. \begin{lemma}\label{lem:boring} For any $k$-box $\Bb$ in $\Mm_k$, $|\Bb \cap \Ii| \leq 2k$. Moreover, if $\Bb$ is boring for $\Ii$, then $|\Bb \cap \Ii| \leq k+1$. \end{lemma} \begin{proof} The first statement holds because $\Ii$ contains at most one segment per line, and there are $2k$ lines in a box: $k$ vertical and $k$ horizontal. Assume now that $\Bb$ is a box that is boring for $\Ii$. Enumerate the up and down segments of $\Bb$ from left to right as $U = \{u_1, \ldots, u_k\}$ and $D = \{d_1, \ldots, d_k\}$, and the right and left segments from top to bottom as $R =\{r_1, \ldots, r_k\}$ and $L =\{l_1, \ldots, l_k\}$; see \cref{fig:k_box}. If all segments of $\Bb\cap \Ii$ are pairwise parallel (that is, they are either all vertical or all horizontal), then $|\Bb \cap \Ii| \leq k$ since $\Ii$ can contain only one segment per line. Then, there are two cases left to check: either $\Bb$ contains only up and right segments, or only down and left segments. Observe that $U \cup R$ can be partitioned into $k+1$ parts as follows: $u_1$ and $r_k$ are in singleton parts, and we have $k-1$ pairs of intersecting segments $ \{u_{i+1},r_i\}_{i=1}^{k-1}$. Similarly, $D \cup L$ can be partitioned into $k$ pairs of intersecting segments $ \{d_i,l_i\}_{i=1}^k$. The independent set $\Ii$ can contain at most one segment from each part of these partitions. Hence, $|\Bb \cap \Ii| \leq k+1$ in both cases. \end{proof} \begin{lemma}\label{lem:interesting} There are at most two boxes that are interesting for $\Ii$. \end{lemma} \begin{proof} We show that there is at most one interesting box with at least one up and one left segment included in $\Ii$. Then a symmetric argument shows that there is at most one interesting box with at least one down and one right segment included in $\Ii$, implying that there are at most two interesting boxes in total. For the sake of contradiction, assume $\Mm_k$ there are two distinct interesting boxes $\Bb, \Bb'$ of the first kind. Then, either an up segment of $\Bb \cap \Ii$ intersects a left segment of $\Bb' \cap \Ii$, or vice-versa. This contradicts the fact that segments of $\Ii$ are pairwise disjoint. \end{proof} With the lemmas in place, we are in position to finish the proof of \cref{thm:ub-full}. Let $G_k$ be the intersection graph of $\Mm_k$. By construction, the set $\Mm_k$ consists of $4k^2$ axis-parallel segments and $G_k$ is in $\graphs$. First, we compute the clique covering number and the fractional independence number of $G_k$. Observe that $G_k$ is triangle-free, hence every clique in $G_k$ is of size at most $2$. It follows that every clique covering of $G_k$ is of size at least $\frac{|\Mm_k|}{2}=2k^2$, that is, $\theta(G_k)\leq 2k^2$. On the other hand, taking every vertex of $G_k$ with multiplicity $1/2$ gives a fractional independent set of size $\frac{|\Mm_k|}{2}=2k^2$, implying that $\alpha^\star(G_k)\geq 2k^2$. Since $\theta(H)\geq \alpha^\star(H)$ for every triangle-free graph $H$, we conclude that $$\theta(G_k)=\alpha^\star(G_k)=2k^2.$$ It remains to prove that $\alpha(G_{k})= k^2 + 3k - 2$. We give a set of pairwise disjoint segments in $\Mm_k$, corresponding to an independent set in $G_k$. This shows that $\alpha(G_k) \geq k^2 + 3k - 2$. The set of segments in $\Mm_k$ consists of: (i) the left and up segments of $\Bb_1$, and (ii) the right and down segments of $\Bb_2$, and (iii) the right segments and the topmost up segment of $B_i$, for each $3 \leq i\leq k$. This is a set of pairwise disjoint segments in $\Mm_k$ and it contains $2\cdot (2k) + (k-2)(k+1) = k^2 + 3k - 2$ segments. To show that $\alpha(G_k) \leq k^2 + 3k - 2$ we apply \cref{lem:boring} and \cref{lem:interesting} to obtain, for any set $\Ii$ of pairwise disjoint segments in $\Mm_k$, that $$ | \Ii | = |\Mm_k \cap \Ii | = \sum_{i=1}^k |\Bb_i \cap \Ii| \leq 2\cdot (2k) + (k-2)(k+1) = k^2 + 3k -2.$$ This concludes the proof of \cref{thm:ub-full}. \paragraph*{Acknowledgements.} The results presented in this paper were obtained during the trimester on Discrete Optimization at Hausdorff Research Institute for Mathematics (HIM) in Bonn, Germany. We are thankful for the possibility of working in the stimulating and creative research environment at HIM.
2205.15105v1
http://arxiv.org/abs/2205.15105v1
Center and Lie algebra of outer derivations for algebras of differential operators associated to hyperplane arrangements
\documentclass[fleqn,reqno]{amsart} \usepackage[a4paper,twoside=false]{geometry} \usepackage{lmodern} \usepackage[defaultsans]{lato} \usepackage[utf8]{inputenc} \usepackage{mathtools} \usepackage{amsmath,amssymb,amsfonts} \usepackage{stmaryrd} \usepackage{enumitem} \usepackage{microtype} \usepackage[dvipsnames]{xcolor} \usepackage[colorlinks, allcolors = blue]{hyperref} \usepackage{varioref} \usepackage{leading} \leading{14pt} \usepackage{tikz} \usetikzlibrary{cd,babel} \usetikzlibrary{matrix,calc,arrows} \makeatletter \AtBeginDocument{\expandafter\let\csname[\endcsname\relax \expandafter\let\csname]\endcsname\relax \expandafter\DeclareRobustCommand\csname[\expandafter\endcsname\expandafter{ \csname begin\endcsname{equation}}\expandafter\DeclareRobustCommand\csname]\expandafter\endcsname\expandafter{ \csname end\endcsname{equation} }} \mathtoolsset{showonlyrefs,showmanualtags} \let\cir\relax \usepackage[initials, alphabetic, msc-links]{amsrefs} \newtheorem{Theorem}{Theorem}[section] \newtheorem{Definition}[Theorem]{Definition} \newtheorem{Proposition}[Theorem]{Proposition} \newtheorem{Lemma}[Theorem]{Lemma} \newtheorem{Corollary}[Theorem]{Corollary} \newtheorem{TheoremIntro}{Theorem} \renewcommand{\theTheoremIntro}{\Alph{TheoremIntro}} \theoremstyle{remark} \newtheorem{Example}[Theorem]{Example} \newtheorem{Remark}[Theorem]{Remark} \newlist{thmlist}{enumerate}{1} \setlist[thmlist]{ nolistsep, ref={\mdseries\textup{(\emph{\roman*})}}, label={\mdseries\textup{(\emph{\roman*})}}, } \newcommand\thmitem[1]{\textup{(\emph{\romannumeral #1})}} \newlist{tfae}{enumerate}{1} \setlist[tfae]{ nolistsep, ref={\mdseries\textup{(\emph{\alph*})}}, label={\mdseries\textup{(\emph{\alph*})}}, before={\advance\mathindent\leftmargin}} \makeatletter \newcommand\tfaeitem[1]{{\textup{(\emph{\@alph #1})}}} \newcommand\implication[2]{$\mbox{\tfaeitem{#1}}\Rightarrow\mbox{\tfaeitem{#2}}$} \makeatother \newcommand\claim[1]{ \begin{minipage}{.8\displaywidth} \itshape #1 \end{minipage}} \DeclareMathOperator{\im}{Im} \DeclareMathOperator{\gr}{gr} \DeclareMathOperator{\rank}{rank} \DeclareMathOperator{\Ext}{Ext} \DeclareMathOperator{\Hom}{Hom} \DeclareMathOperator{\Der}{Der} \DeclareMathOperator{\Aut}{Aut} \DeclareMathOperator{\End}{End} \DeclareMathOperator{\Diff}{Diff} \DeclareMathOperator{\coker}{coker} \DeclareMathOperator{\Gr}{Gr} \DeclareMathOperator{\GL}{GL} \DeclarePairedDelimiter\paren{\lparen}{\rparen} \DeclarePairedDelimiter\abs{\lvert}{\rvert} \DeclarePairedDelimiter\norm{\lVert}{\rVert} \DeclarePairedDelimiter\lin{\langle}{\rangle} \DeclarePairedDelimiter\Lin{\langle\!\langle}{\rangle\!\rangle} \newcommand\Bimod[2]{{}_{#1}\mathsf{Mod}_{#2}} \newcommand\lmod[1]{{}_{#1}\mathsf{Mod}} \newcommand\rmod[1]{\mathsf{Mod}_{#1}} \newcommand\pres[1]{\prescript{}{#1}} \newcommand\note[1]{\marginpar{\sffamily\footnotesize\raggedright\color{red}{#1}}} \newcommand\nset[1]{\llbracket#1\rrbracket} \newcommand\todo[1]{\textcolor{red}{#1}} \newcommand\something[1]{\fbox{$#1$}\,} \newcommand\NN{\mathbb{N}} \newcommand\ZZ{\mathbb{Z}} \newcommand\RR{\mathbb{R}} \newcommand\CC{\mathbb{C}} \renewcommand\epsilon{\varepsilon} \newcommand\id{\mathrm{id}} \let\*\bullet \newcommand\g{\mathfrak{g}} \renewcommand\SS{\mathfrak{S}} \newcommand\HH{H\!H} \newcommand\kk{\Bbbk} \newcommand\Y[1]{C_S^{#1}(L,N)} \newcommand\X{\mathfrak{X}} \newcommand\D{\Diff} \newcommand\A{\mathcal{A}} \renewcommand\AA{\mathbb A} \newcommand\B{\mathcal{B}} \renewcommand\emptyset{\varnothing} \newcommand\epi{\twoheadrightarrow} \newcommand\inc{\hookrightarrow} \newcommand\hey{\marginpar{\todo{!}}} \newcommand\zz{\hat{z}} \newcommand\xx{\hat{x}} \newcommand\yy{\hat{y}} \newcommand\hh{\hat{h}} \newcommand\ee{\hat{e}} \newcommand\EE{\hat{E}} \newcommand\DD{\hat{D}} \newcommand\dd{\hat\delta} \renewcommand\P{\mathbf P} \DeclareMathOperator{\Out}{OutDer} \newcommand\citar{[\textcolor{blue}{\textsf{?}}]\marginpar{\textcolor{blue}{\textsf{ref}}}~} \setitemize{nolistsep, listparindent=\parindent} \title[The Hochschild cohomology~$H(S,U)$]{ Center and Lie algebra of outer derivations for algebras of differential operators associated to hyperplane arrangements} \author{Francisco Kordon} \author{Thierry Lambre} \address{ CONICET and Instituto Balseiro, Universidad Nacional de Cuyo – CNEA. Av.\,Bustillo 9500, San Carlos de Bariloche, R8402AGP, R\'io Negro, Argentina} \email{[email protected]} \address{ Laboratoire de Mathématiques Blaise Pascal, UMR6620 CNRS, Université Clermont Auvergne, Campus des Cézeaux, 3 place Vasarely, 63178 Aubière cedex, France} \email{[email protected]} \date{\today} \begin{document} \begin{abstract} We compute the center and the Lie algebra of outer derivations of a familiy of algebras of differential operators associated to hyperplane arrangements of the affine space $\mathbb A^3$. The results are completed for $4$-braid arrangements and for reflection arrangements associated to the wreath product of a cyclic group with the symmetric group $\SS_3$. To achieve this we use tools from homological algebra and Lie--Rinehart algebras of differential operators. \end{abstract} \maketitle \section*{Introduction} Let $V$ be a finite-dimensional $\kk$-vector space over a field $\kk$ of characteristic zero, $S$ the algebra of coordinates on $V$ and $\A$ a central hyperplane arrangement in $V$. We assume throughout the article that $\A$ is a free arrangement in the sense given by K.\,Saito in~\cite{saito}: we require that the Lie algebra~$\Der\A$ of derivations of $S$ tangent to $\A$ is a free $S$-module. The algebra $\Diff\A$ of differential operators tangent to~$\A$, as seen by F.\,J.\,Calderon Moreno in~\cite{calderon} and by M.\,Suárez--Álvarez in~\cite{differential-arrangements}, is the subalgebra of~$\End(S)$ generated by $\Der\A$ and $S$. Our results concern the center and the Lie algebra of outer derivations of $\Diff\A$. The first and simplest example of a free arrangement is the case of a central line arrangement in $V=\kk^2$. This case is studied by the first author and M.\,Suárez-Álvarez in ~\cite{ksa} when there are at least $5$ lines: a description of the Hochschild cohomology~$\HH^\*\left( \Diff\A \right)$, including its cup product and Gerstenhaber bracket, is given explicitly in detail through a calculation independent of the methods that we now use. The second and most important family of examples is that of the braid arrangements $\B_n$, given for $n\geq2$ by the hyperplanes $H_{ij}=\{x\in\kk^n :x_i=x_j\}$ with $i\neq j$: these arrangements are free and have served historically as a proxy to obtain general results, for instance in V.\,I.\,Arnold's classical article~\cite{arnold}. In virtue of the freeness of $\A$ the algebra $\Diff\A$ is isomorphic to the enveloping algebra of a Lie--Rinehart algebra~$(S,L)$ ---see L.\,Narvaez Macarro's~\cite{narvaez} and the first author's thesis~\cite{tesis}--- and then the spectral sequence introduced by both authors in~\cite{kola} permits the computation of~$\HH^\bullet(\Diff\A)$ in terms of the Hochschild cohomology $H^\*(S,\Diff\A)$ of~$S$ with values on $\Diff\A$ and the Lie--Rinehart cohomology of~$L$. This was successfully applied to arrangements of three lines in~\cite{kola}, and, ultimately, to $\A = \B_3$ ---see Corollary~\ref{coro:hhB3}. The homological approach described above allows us to compute the center of $\Diff\A$ under the hypothesis that the Saito's matrix of the arrangement~$\A$ is triangular: more generally, we can state this result resorting to the hypothesis of triangularizability of Lie--Rinehart algebras that we give in Definition~\ref{def:conditions}. \begin{TheoremIntro}[Theorem~\ref{thm:hh0}] Let $(S,L)$ be a triangularizable Lie--Rinehart algebra with enveloping algebra $U$. The center of $U$ is $\kk$. \end{TheoremIntro} Let $\A_r$, $r\geq1$, be the arrangement in $\CC^3$ defined by $0 = xyz(z^r-y^r)(z^r-x^r)(y^r-x^r)$. This arrangement is~$\B_4$ when $r=1$. When $r\geq2$, it is the reflection arrangement of the wreath product of the cyclic group of order $r$ and the symmetric group~$\SS_3$. The homological method yields the following result. \begin{TheoremIntro}[Corollary~\ref{coro:abelian}] Let $r\geq1$. For each hyperplane $H$ in $\A_r$ let $f_H$ be a linear form with kernel $H$ and $\partial_H$ the derivation of $\Diff\A_r$ determined by \[ \begin{cases*} \partial_H(g) = 0 & if $g\in \kk[x_1,x_2,x_3]$; \\ \partial_H(\theta) = \theta(f_H)/f_H & if $\theta\in\Der\A_r$. \end{cases*} \] The Lie algebra of outer derivations of $\Diff\A_r$ together with the commutator is an abelian Lie algebra of dimension~$3r+3$, the numbers of hyperplanes of $\A_r$, and is generated by the classes of the derivations $\partial_H$ with $H\in\A_r$. \end{TheoremIntro} In the pursuit of $\HH^\*(U)$ a key step is the computation of $H^\*(S,U)$. We succeeded in its calculation when $\*=0,1$ for a family of Lie--Rinehart algebras that generalizes $\Der\A_r$. The result in Corollary~\ref{coro:H1:result:graded} relates $H^1(S,U)$ to the cokernel of the Saito's matrix --- this is an important object of the theory with a rich algebraic structure studied, for instance, by M.\,Granger, D.\,Mond and M.\,Schulze in~\cite{schulze}. There are several ways in which the calculations performed in this article can be continued. In particular, following the methods of J.\,Alev and M.\,Chamarie in~\cite{AC} our findings on the algebra of outer derivations of $\Diff\A$ can lead to a description of $\Aut(\Diff\A)$ as in \cite{ksa}*{\S7} and M.\,Suárez-Álvarez and Q.\,Vivas'~\cite{SAV}. \bigskip The first author is currently a CONICET postdoctoral fellow and received support from BID PICT 2019-00099. We thank the Universit\'e Clermont Auvergne for hosting the first author in a postdoctoral position at the Laboratoire de Math\'ematiques Blaise Pascal during the year 2019-2020. \bigskip Unadorned $\Hom$ and $\End$ are taken over $\kk$. The set of natural numbers ~$\NN$ is that of nonnegative integers. If $n$ and $m$ are positive integers, we denote by $\nset{n,m}$ the set of integers $k$ such that $n\leq k\leq m$, and $\nset{m} \coloneqq \nset{1,m}$. \section{Generalities} \subsection{Hyperplane arrangements} \begin{Definition} A central \emph{hyperplane arrangement} $\A$ in a finite dimensional vector space $V$ is a finite set $\{H_1,\ldots,H_\ell\}$ of subspaces of codimension 1. Choosing a basis of~$V$ we may identify the algebra $S(V^*)$ of coordinates of~$V$ with $S=\kk[x_1,\ldots,x_n]$: for each $i\in\nset{\ell}$ let $\lambda_i\in S$ be a linear form with kernel $H_i$. Up to a nonzero scalar, the \emph{defining polynomial} $Q=\lambda_1\cdots\lambda_\ell\in S $ depends only on~$\A$. \end{Definition} \begin{Definition} The set of \emph{derivations tangent to the arrangement} $\A$ is \[\label{eq:arrangements:derivations} \Der \A \coloneqq \left\{ \theta\in\Der(S)~: \text{$\lambda_i$ divides $\theta (\lambda_i)$ for every $i\in\nset{\ell}$ } \right\}. \] This is a Lie--subalgebra and a sub-$S$-module of the Lie algebra of derivations $\Der(S)$ of $S$. \end{Definition} \begin{Definition} The arrangement $\A$ is \emph{free} if $\Der\A$ is a free $S$-module. \end{Definition} \begin{Theorem}[Saito's criterion,~\cite{saito}*{Theorem 1.8.ii}] \label{thm:saito} A family of $\ell$ derivations $ (\theta_1,\ldots,\theta_\ell )$ in $\Der\A$ is an $S$-basis of $\Der\A$ if and only if the determinant of \emph{Saito's matrix} \[\label{eq:saitomatrix} M(\theta_1,\ldots,\theta_\ell) \coloneqq \begin{pmatrix} \theta_1(x_1) & \cdots & \theta_1(x_\ell) \\ \vdots & ~ & \vdots \\ \theta_\ell(x_1) & \cdots & \theta_\ell(x_\ell) \end{pmatrix} \] is a nonzero scalar multiple of $Q$. \end{Theorem} The notion of freeness connects arrangements of hyperplanes with commutative algebra, algebraic geometry and combinatorics. While not a property of generic hyperplane arrangements, many of the motivating examples of hyperplane arrangements are free. Saito’s criterion in Theorem~\ref{thm:saito} is perhaps the most practical way to prove freeness, though there are other methods to prove this condition ---see A.\,Bigatti, E.\,Palezzato and M.\,Torielli's~\cite{bigatti} for a discussion on the state of the art. \begin{Example}\label{ex:braids:notilde} Let $n\geq2$, $E=\AA^n$ the affine space with coordinate ring $S=\kk[x_1,\ldots,x_n]$. The braid arrangement $\B_n$ in $E$ has hyperplanes $H_{ij}$ with equation $x_i-x_j = 0$, $1\leq i < j \leq n$, so that the defining polynomial is $Q = \prod_{1\leq i < j \leq n}\left( x_j-x_i \right)$. Consider the derivations $\theta_1,\ldots,\theta_n$ of $S$ defined for $k\in\nset{n}$ by \begin{align}\label{eq:braids:notilde:basis} & \theta_{1}(x_k) = 1, && \theta_i(x_k) = (x_k-x_1)\ldots(x_k-x_{i-1}) \qquad\text{if $i\geq2$}. \end{align} These derivations satisfy $(x_k-x_j) \mid \theta_i(x_k-x_j)$ for any $i,j,k\in\nset{n}$ and therefore belong to $\Der\B_{n}$. The Saito's matrix $\left( \theta_i(x_k) \right)$ is triangular and its determinant is $Q$. By Saito's Criterion, $\Der\B_{n}$ is a free $S$-module with basis $\left\{ \theta_1,\ldots,\theta_n \right\}$. \end{Example} \begin{Example}\label{ex:braids:tilde} Let $n\geq1$ and $E=\AA^{n+1}$ be the affine space with coordinate ring $S=\kk[x_0,x_1,\ldots,x_n]$. As in Example~\ref{ex:braids:notilde} above, the arrangement $\B_{n+1}$ in $E$ has equation $\prod_{0\leq i < j\leq n}(x_i-x_j)$. Consider the subspace $V=\left\{ 0 \right\}\times\AA^n$ of $E$, defined by the equation $ x_0 = 0$, and the hyperplanes $\tilde H_{ij}$ of $V$ defined by $x_i-x_j=0$ for $1\leq i < j \leq n$. We call $\tilde\B_n$ the arrangement formed by these hyperplanes, so that $\tilde\B_n$ is defined by equation $x_1\ldots x_n\prod_{0\leq i < j\leq n}(x_i-x_j)=0$. The derivations $\alpha_1,\ldots,\alpha_n$ of $S=\kk[x_1,\ldots,x_n]$ defined for $k\in\nset{n}$~by \begin{align*} \alpha_1(x_k) = x_k, && \alpha_i(x_k) = \begin{cases*} 0 & if $i>k$; \\ x_k\prod_{j< i}(x_k-x_j) & if $i\leq k$ \end{cases*} \quad\text{if $i\geq2$} \end{align*} belong to $\Der\tilde\B_n$. Thanks to Saito's Criterion, $(\alpha_1,\ldots,\alpha_n)$ is a basis of $\Der\tilde\B_n$. \end{Example} \begin{Example} Let $V$ be a finite-dimensional vector space. We say that $\sigma\in\GL(V)$ is a pseudo-reflection if $\sigma$ is of finite order and fixes a hyperplane $H_\sigma$ of $V$, and it is a reflection if this order is~$2$. A finite subgroup $G$ of the group of automorphisms of $V$ is a \emph{(pseudo-) reflection group} if it is generated by (pseudo-) reflections, and the set of reflecting hyperplanes~$\A(G)$ of a reflection group $G$ is the \emph{reflection arrangement} of~$G$. It is a result by H\@. Terao in~\cite{terao} that every reflection arrangement over $\kk = \CC$ is free. Consider the $n$th braid arrangement $\B_n$ of Example~\ref{ex:braids:notilde}: identifying the reflection with respect to the plane $x_i-x_j=0$ with the permutation $(ij)\in\SS_n$ we see that $\B_n=\A(\SS_n)$. \end{Example} \begin{Example}\label{ex:wreath} Let $r,n\geq1$ and consider the arrangement $\A_r^n$ in $V=\kk^n$ defined by \[\label{eq:Q:wreath} 0 = x_1\ldots x_n\prod_{1\leq i<j\leq n}(x_j^r-x_i^r). \] Taking $r=1$ we see that $\A_1^n=\tilde\B_n$ for every $n$. When $r\geq2$, let $G=C_r\wr\SS_n$ be the wreath product of the cyclic group $C_r$ of order $r$ and the symmetric group $\SS_n$. We see that $\A_r^n$ is the reflection arrangement of $G$, this is, $\A_r^n = \A(C_r\wr\SS_n)$. There is a well-known basis of $\Der\A_r^n$ in \cite{OT}*{\S B} that consists of the derivations $\theta_1,\ldots,\theta_n$ of $S=\kk[x_1,\ldots,x_n]$ defined for $1\leq k,m\leq n$ by \( \theta_m(x_k) = x_k^{(m-1)r+1}. \) Consider the derivations $\alpha_1,\ldots,\alpha_n$ of $S$ defined for $1\leq k\leq n$ and $2\leq m\leq n$ by \begin{align}\label{eq:symmetric} & \alpha_1(x_k) = x_k, &&\alpha_m(x_k) = x_k\prod_{i=1}^{m-1}(x_k^r - x_i^r). \end{align} These derivations belong to $\Der\A_r^n$: evidently $\alpha_1=\theta_1$, and if $m\geq2$ then \[ \alpha_m = \theta_m - s_1\theta_{m-1} + \ldots +(-1)^{m-1}s_{m-1}\theta_1, \] where $s_j = \sum_{1\leq i_1<\ldots<i_{j}\leq m-1}x_{i_1}^r\cdots x_{i_j}^r$ is the $j$th elementary symmetric polynomial in variables $x_1^r,\ldots,x_{m-1}^r$ for $1\leq j\leq m-1$. For $1\leq k\leq n$ \begin{align*} \MoveEqLeft \left( \theta_m - s_1\theta_{m-1} + \ldots +(-1)^{m-1}s_{m-1}\theta_1 \right) (x_k) \\ & = x_k^{(m-1)r+1} - s_1x_k^{(m-2)r+1} + \ldots + (-1)^{m-1}s_{m-1}x^k = x_k\prod_{i=1}^{m-1}(x_k^r - x_i^r), \end{align*} which equals $\alpha_m(x_k)$. Saito's matrix $\left(\alpha_m(x_k) \right)$ is diagonal and its determinant is \[ \prod_{k=1}^n\alpha_k(x_k) = \prod_{k=1}^n x_k\prod_{i=1}^{k-1}(x_k^r - x_i^r) = x_1\ldots x_n\prod_{1\leq i<k\leq n}(x_k^r-x_i^r). \] It follows from Saito's criterion that $\alpha_1,\ldots,\alpha_n$ is a basis of $\Der\A_r^n$. \end{Example} \begin{Example}\label{ex:Ar} Let $r\geq1$. The arrangement $\A_r\coloneqq\A_r^3$ is defined by the nullity of \[ Q(\A_r) \coloneqq x_1x_2x_3(x_2^r-x_1^r)(x_3^r-x_1^r)(x_3^r-x_2^r). \] The basis of $\Der\A_{r}$ in~\eqref{eq:symmetric} consist in this case of the derivations $\alpha_1,\alpha_2,\alpha_3$ of $S= \kk[x_1,x_2,x_3]$ with Saito's matrix \[ \begin{pmatrix} x_1 & x_2 & x_3 \\ 0 & x_2(x_2^r-x_1^r) & x_3(x_3^r-x_1^r) \\ 0 & 0 & x_3(x_3^r-x_2^r)(x_3^r-x_1^r) \end{pmatrix} \] \end{Example} \subsection{Lie--Rinehart algebras}\label{subsec:l-r} \begin{Definition} Let $S$ and $(L,[-,-])$ be, respectively, a commutative and a Lie algebra endowed with a morphism of Lie algebras $L\to\Der(S)$ that we write $\alpha\mapsto\alpha_S$ and a left $S$-module structure on $L$ which we simply denote by juxtaposition. We say that the pair $(S,L)$ is a \emph{Lie--Rinehart algebra}, or that $L$ is a Lie--Rinehart algebra over $S$, if the equalities \begin{align} &(s\alpha )_S(t) = s\alpha_S(t), &&[\alpha,s\beta] = s[\alpha,\beta] + \alpha_S(s)\beta \end{align} hold whenever $s,t\in S$ and $\alpha, \beta \in L$. \end{Definition} If $S$ is a commutative algebra and $L$ is a Lie-subalgebra of the Lie algebra of derivations $\Der S $ that is at the same time an $S$-submodule then $L$ is an Lie--Rinehart algebra over $S$. This applies to our situation of interest: \begin{Proposition} Let $\A$ be a hyperplane arrangement in a vector space $V$. The Lie algebra of derivations $\Der\A$ of $\A$ is a Lie--Rinehart algebra over the algebra of coordinates of $V$. \end{Proposition} \begin{Definition}\label{def:conditions} Let $n\geq1$, $S=\kk[x_1,\ldots,x_n]$ and $L$ be a subset of derivations of $S$ such that $(S,L)$ is a Lie-Rinehart algebra.\begin{thmlist} \item We call $L$ \emph{triangularizable} if $L$ is a free $S$-module that admits a basis given by derivations $\alpha_1,\ldots,\alpha_n$ satisfying the two conditions \begin{align*} &\alpha_i(x_j) = 0 \quad\text{if $i>j$,} &&\alpha_1(x_1)\cdots\alpha_n(x_n)\neq0. \end{align*} \item We say that $L$ satisfies the \emph{Bézout condition} if in addition for each $k$ in $\nset{n-1}$, the element $\alpha_k(x_k)$ of $S$ is coprime with the determinant of the matrix \[ \begin{pmatrix} \alpha_k(x_{k+1}) & \alpha_{k+1}(x_{k+1}) & 0 & \cdots & 0 \\ \vdots & \vdots & \ddots & \ddots & \vdots \\ \alpha_k(x_{n-2}) & \alpha_{k+1}(x_{n-2}) & \cdots & \alpha_{n-2}(x_{n-2}) & 0 \\ \alpha_{k}(x_{n-1}) & \alpha_{k+1}(x_{n-1}) &\cdots &\cdots & \alpha_{n-1}(x_{n-1}) \\ \alpha_{k}(x_{n}) &\alpha_{k+1}(x_{n}) & \cdots& \cdots & \alpha_{n-1}(x_{n}) \end{pmatrix} \] \end{thmlist} \end{Definition} \begin{Example}\label{ex:braids:2conditions} For any $n\geq2$ the Lie--Rinehart algebra $\Der\B_n$ is triangular and satisfies the Bézout condition with the basis~$\left\{ \theta_1,\ldots,\theta_n \right\}$ given in Example~\ref{ex:braids:notilde}. The same goes to $\Der\tilde\B_n$ with the basis given in Example~\ref{ex:braids:tilde}. \end{Example} \begin{Example}\label{ex:wreath:triangular} Let $r,n\geq1$. The Lie--Rinehart algebra associated to $\A(C_r\wr\SS_n)$ is triangularizable, as follows immediately from Example~\ref{ex:wreath}. \end{Example} \begin{Example} Let $r\geq1$. The arrangement $\A_r=\A(C_r\wr\SS_3)$ from Example~\ref{ex:Ar} is triangularizable thanks to Example~\ref{ex:wreath}. Moreover, it satisfies the Bézout condition: indeed, $\alpha_2(x_2) = x_2 (x_2^r-x_1^r)$ is coprime with $\alpha_2(x_3) = x_3 (x_3^r-x_1^r)$, and the determinant \[ \det \begin{pmatrix} \alpha_1(x_2) & \alpha_1(x_3) \\ \alpha_2(x_2) & \alpha_2(x_3) \end{pmatrix} = \det \begin{pmatrix} x_2 & x_3 \\ x_2 (x_2^r-x_1^r) & x_3 (x_3^r-x_1^r) \end{pmatrix} = x_2 x_3 \left( x_3^r-x_2^r \right) \] is coprime with $\alpha_1(x_1) = x_1 $. \end{Example} \subsection{Differential operators associated to an arrangement} Remember from J.\,C.\,McConnell and J.\,C.\,Robson's~\cite{MR}*{\S 15} that the algebra~$\Diff S$ of differential operators on $S=\kk[x_1,\ldots,x_n]$ is the subalgebra of $\End S$ generated by $\Der S$ and the set of maps given by left multiplication by elements of $S$. Recall as well from~\cite{MR}*{\S5} that if $R$ is an algebra and $I\subset R$ is a right ideal, the largest subalgebra $\mathbb I_R(I)$ of $R$ that contains $I$ as an ideal ---the \emph{idealizer} of $I$ in $R$--- is~$\{r\in R:rI\subset I\}$. \begin{Definition} Let $\A$ be a central arrangement of hyperplanes with defining polynomial~$Q$. The \emph{algebra of differential operators tangent to the arrangement}~$\A$ is \[ \Diff(\A) = \bigcap_{n\geq1} \mathbb I_{\Diff(S)}(Q^n\Diff (S)). \] \end{Definition} As seen in \cite{calderon} for $\kk=\CC$ or in~\cite{differential-arrangements} for $\kk$ of characteristic zero, if $\A$ is free then the algebra~$\Diff\A$ coincides with the sub-associative algebra of~$\End(S)$ generated by $\Der\A$ and the set of maps given by left multiplication by elements of $S$. \begin{Example} The arrangement $\A = \tilde\B_2$ in $\kk^2$ with equation $0=xy(y-x)$ admits, by~\cite{kola}*{\S5}, a presentation of $\Diff\A$ adapted from~\cite{ksa}: the two derivations \begin{align*} &E=x\partial_x + y\partial_y, && D = y(y-x)\partial_y \end{align*} of $\kk[x,y]$ form a basis of $\Der\A$, and the algebra~$\Diff\A$ is generated by the symbols $x$, $y$, $D$ and $E$ subject to the relations \begin{align} & [y,x] = 0, \\ & [D,x] = 0, && [D,y] = y(y-x), \\ & [E,x] = x, && [E,y] = y, && [E,D] = D. \end{align} \end{Example} Given a Lie-Rinehart algebra $(S,L)$, a \emph{Lie--Rinehart module} ---or $(S,L)$-module--- is a vector space $M$ which is at the same time an $S$-module and an $L$-Lie module in such a way that if $s\in S$, $\alpha\in L$ and $m\in M$ then \begin{align}\label{eq:L-Rmodule} &\left( s\alpha \right)\cdot m = s\cdot(\alpha\cdot m), &&\alpha\cdot (s\cdot m) = (s\alpha)\cdot m + \alpha_S(s)\cdot m. \end{align} \begin{Theorem}[\cite{hueb}*{\S1}] Let $(S,L)$ be a Lie-Rinehart algebra. \begin{thmlist} \item There exists an associative algebra $U=U(S,L)$, the \emph{universal enveloping algebra of $(S,L)$}, endowed with a morphism of algebras $i:S\to U$ and a morphism of Lie algebras $j:L\to U$ satisfying, for $s\in S$ and $\alpha\in L$, \begin{align}\label{eq:universal} &i(s)j(\alpha) = j(s\alpha), && j(\alpha)i(s) -i(s)j(\alpha) = i(\alpha_S(s)). \end{align} \item The algebra $U$ is universal with these properties. \item The category of $U$-modules is isomorphic to the category of $(S,L)$-modules. \end{thmlist} \end{Theorem} \begin{Example}\label{ex:weyl} If $S = \kk[x_1 , \ldots , x_n ]$ then the full Lie algebra of derivations $L = \Der S$ is a Lie–Rinehart algebra and its enveloping algebra is isomorphic to the algebra of differential operators $\Diff(S) = A_n$, the $n$th Weyl algebra. \end{Example} The following result ---\cite{narvaez}*{\S12} when $\kk=\CC$ and~\cite{tesis}*{Theorem 2.19} for $\kk$ of characteristic zero--- is our motivation to consider Lie--Rinehart algebras in the algebraic aspects of hyperplane arrangements. \begin{Theorem} \label{thm:diff-LR} Let $\A$ be a free hyperplane arrangement on a vector space~$V$ and let $S$ be the algebra of coordinate functions on $V$. There is a canonical isomorphism of algebras \[ U(S,\Der\A)\cong\D(\A). \] \end{Theorem} \begin{Proposition}\label{prop:braidshh} For $n\geq1$ there is an isomorphism of algebras \[\label{eq:diffBn} \Diff \B_{n+1} \cong A_1 \otimes \Diff\tilde\B_n. \] \end{Proposition} \begin{proof} Let $n\in\NN$, $S= \kk[x_0,x_1,\ldots,x_n]$, $T= \kk[y_1,\ldots,y_n]$ and observe that the unique morphism of algebras $\kk[z]\otimes T\to S$ given by $z\mapsto x_0$ and $y_k\mapsto x_k-x_0$ if $k\geq1$ is an isomorphism ---we are identifying $z$ with $z\otimes 1$ and $y_k$ with $1\otimes y_k$. The derivations in the basis of $\Der\B_{n+1}$ given in Example~\ref{ex:braids:notilde} induce derivations $\tilde\theta_1,\ldots,\tilde\theta_{n+1}$ on $\kk[z]\otimes T$. For $1\leq i\leq n+1$ and $1\leq k \leq n$ these derivations satisfy \begin{align*} & \tilde \theta_{1} : \begin{cases*} z\mapsto 1 ; \\ y_k \mapsto 0 ; \end{cases*} && \tilde \theta_{2} : \begin{cases*} z\mapsto 0 ; \\ y_k \mapsto y_k; \end{cases*} && \tilde \theta_{i} : \begin{cases*} z\mapsto 0 ; \\ y_k \mapsto y_k \prod_{j=1}^{i-1}(y_k-y_j) \end{cases*} \quad\text{if $i\geq3$.} \end{align*} The Lie algebra $\Der S$ is isomorphic to the Lie algebra product $\Der\kk[z]\times \Der\tilde\B_n$: the derivations $\tilde\theta_i$ with $i\geq2$ correspond to the $\alpha_i$'s in Example~\ref{ex:braids:tilde}. It follows that the enveloping algebra of the Lie--Rinehart pair $(S,\Der\B_{n+1})$ is isomorphic to the product $U(\kk[z],\Der\kk[z])\times U(T,\Der\tilde\B_n)$. The result is now a consequence of Theorem~\ref{thm:diff-LR} and Example~\ref{ex:weyl}. \end{proof} \begin{Definition}\label{def:ort} Let $n\geq1$, $ S=\kk[x_1,\ldots,x_n]$ and $L$ a triangularizable Lie--Rinehart algebra over $S$ with basis $(\alpha_1,\ldots,\alpha_n)$. We say that $L$ satisfies the \emph{orthogonality condition} if there exists a family $(u_1,\ldots,u_n)$ of elements of $U$ and $f_k^i\in S$ for $1\leq k \leq n$ and $1\leq i\leq n-1$ such that \begin{align*} & u_k = \alpha_n + \sum_{i=1}^{n-1}f_k^i\alpha_i, && [u_k,x_l]=0 \quad\text{if $k\neq l$.} \end{align*} \end{Definition} \begin{Example}\label{ex:braids:orthogonality} Consider for $n\geq2$ the Lie--Rinehart algebra $\Der\B_n$ from Example~\ref{ex:braids:notilde}. The family $( u_1,\ldots,u_n )$ of elements of~$U$ defined for $k\in\nset{n}$ by \[ u_k = \sum_{i=k}^n(-1)^{n-i}\prod_{j=i+1}^n\left( x_j-x_k \right)\theta_i \] is such that $[u_k,x_l]=0$, whence the orthogonality condition is satisfied. The Lie--Rinehart algebra $\Der\tilde\B_n$ from Example~\ref{ex:braids:tilde} also satisfies this condition with a similar choice of orthogonal~elements. \end{Example} Let $r\geq1$ and $\A_r=\A(C_r\wr\SS_3)$. Let $S=\kk[x_1,x_2,x_3]$ and $L=\Der\A_{r}$ be the Lie-Rinehart algebra associated to $\A_{r}$. The derivations $\left\{ \alpha_1,\alpha_2,\alpha_3 \right\}$ given in Example~\ref{ex:Ar} make of $L$ a triangular Lie algebra that satisfies the Bézout condition. We identify the universal enveloping algebra of $L$ with $\Diff\A_r$. \begin{Proposition}\label{prop:Ar:basis} The Lie--Rinehart algebra associated to $\A_r$ together with the family $\left\{ u_1,u_2,u_3\right\}$ of elements of $\Diff\A_r$ defined by \begin{align*} & u_1 = \alpha_3 -(x_3^r-x_1^r)\alpha_2 + (x_3^r-x_1^r)(x_2^r-x_1^r)\alpha_1, && u_2 = \alpha_3 - ( x_3^r-x_2^r )\alpha_2, && u_3 = \alpha_3 \end{align*} satisfies the orthogonality condition. \end{Proposition} \begin{proof} The condition $[u_k,x_l]=0$ if $k,l\in\nset{3}$ and $l\neq k$ holds true whenever $l<k$, so we suppose that $l>k$. If $k=3$ there is nothing to see; the case $k=2$ amounts to the verification that \begin{align*} [u_2,x_3] & =\alpha_3(x_3) - (x_3^r-x_2^r)\alpha_2(x_3) \\ &= x_3(x_3^r-x_2^r)(x_3^r-x_1^r)- ( x_3^r-x_2^r ) x_3(x_3^r-x_1^r) = 0 \end{align*} and for or $k=1$ we have \begin{align*} [u_1,x_2] & = \alpha_3(x_2) -(x_3^r-x_1^r)\alpha_2(x_2) + (x_3^r-x_1^r)(x_2^r-x_1^r)\alpha_1(x_2) \\ & = -(x_3^r-x_1^r)x_2(x_2^r-x_1^r) + (x_3^r-x_1^r)(x_2^r-x_1^r)x_2 = 0 \shortintertext{and} [u_1,x_3] & = \alpha_3(x_3) -(x_3^r-x_1^r)\alpha_2(x_3) + (x_3^r-x_1^r)(x_2^r-x_1^r)\alpha_1(x_3) \\ & = x_3(x_3^r-x_2^r)(x_3^r-x_2^r) -(x_3^r-x_1^r)x_3(x_3^r-x_1^r) + (x_3^r-x_1^r)(x_2^r-x_1^r)x_3 = 0. \end{align*} \end{proof} \subsection{Cohomology} Given an associative algebra $A$ the (associative) enveloping algebra $A^e$ is the vector space $A\otimes A$ endowed with the product~$\cdot$ defined by $\left( a_1\otimes a_2 \right)\cdot \left( b_1\otimes b_2 \right)= a_1b_1\otimes b_2a_2$, so that the category of left $A^e$-modules is equivalent to that of $A$-bimodules. The \emph{Hochschild cohomology} of $A$ with values on an $A^e$-module $M$ is \[ H^\*(A,M)\coloneqq \Ext_{A^e}^\*(A,M). \] When $M=A$ we write $\HH^\*(A)\coloneqq H^\*(A,M)$. C.\,Weibel's book~\cite{weibel} may serve as general reference on this subject. \begin{Definition}[\cite{rinehart}]\label{def:cohom:l-r} Let $(S,L)$ be a Lie--Rinehart algebra with enveloping algebra $U$ and let $N$ be an $U$-module. The \emph{Lie--Rinehart cohomology of~$(S,L)$ with values on $N$} is \[ H_S^\*(L,N)\coloneqq\Ext^\*_{U}(S,N). \] \end{Definition} \begin{Remark}[\cite{rinehart}]\label{rk:ch-ei} In the setting of Definition~\ref{def:cohom:l-r} above, suppose that $L$ is $S$-projective and let $\Lambda_S^\*L$ denote the exterior algebra of $L$ over $S$. The complex $\Hom_S(\Lambda_S^\bullet L,N)$ with Chevalley--Eilenberg differentials computes~$H_S^\*(L,N)$. \end{Remark} \begin{Theorem}[\cite{kola}]\label{thm:spectral} Let $(S,L)$ be a Lie--Rinehart algebra with enveloping algebra~$U$, and suppose that $L$ is an $S$-projective module. There exist a $U$-module structure on $H^\*(S,U)$ and a first-quadrant spectral sequence $E_\*$ converging to $\HH^\bullet(U)$ with second page \[ E_2^{p,q} = H_S^p(L,H^q (S,U)). \] \end{Theorem} \begin{Proposition}\label{prop:Diff:HH} There are isomorphisms $\HH^\*(\Diff\B_{n+1})\cong\HH^\*(\Diff\tilde\B_n)$ for any $n\geq1$. \end{Proposition} \begin{proof} This is a consequence of applying the K\"unneth's formula for Hochschild cohomology as in H.\,Cartan and S.\,Eilenberg's~\cite{cartan-eilenberg}*{XI.3.I} to the isomorphism $ \Diff \B_{n+1} \cong A_1 \otimes \Diff\tilde\B_n$ in Proposition~\ref{prop:braidshh} and the observation that $\HH^0(A_1)\cong \kk$ and $\HH^i(A_1)\cong\kk$ if $i\neq 0$. \end{proof} \begin{Corollary}\label{coro:hhB3} The Hilbert series of the Hochschild cohomology of $\Diff\B_3$ is \[ h(t)=1 + 3t + 6t^2 + 4t^3. \] \end{Corollary} \begin{proof} Proposition~\ref{prop:Diff:HH} particularizes to $\HH^\*(\Diff\B_3)\cong\HH^\*(\Diff\tilde\B_2)$, and then \cite{kola}*{Corollary 5.8} reads \( h_{\HH^*(\Diff\B_3)} = h_{\HH^*(\Diff\tilde\B_2)} = 1 + 3t + 6t^2 + 4t^3. \) \end{proof} \section{Combinatorics of the Koszul complex} We let $n\geq1$ and assume throughout this section that $(S,L)$ is a Lie--Rinehart algebra with $S=\kk[x_1,\ldots,x_n]$ and $L$ a free $S$-module with basis $\left( \alpha_1,\ldots,\alpha_n \right)$. Let $U=U(S,L)$ be its Lie--Rinehart enveloping algebra. To compute the Hochschild cohomology of $S$ we use the Koszul resolution of $S$ available in~\cite{weibel}*{\S 4.5}. \begin{Lemma}\label{lem:koszul} Let $W$ be the subspace of $S$ with basis $(x_1,\ldots,x_n)$. The complex $P_\*=S^e\otimes\Lambda^\* W$ with differentials $b_\* :P_\*\to P_{\*-1}$ defined for $s, t\in S$, $q\in\nset{n}$ and $1\leq i_1<\dots<i_{q}\leq n$ by \begin{align*} &b_q(s|t\otimes x_{i_1}\wedge \dots\wedge x_{i_q}) = \sum_{j=1}^q(-1)^{j+1} [(sx_{i_j}|t) -(s|x_{i_j}t) ]\otimes x_{i_1}\wedge\dots\wedge\check x_{i_j}\wedge\dots\wedge x_{i_q} \end{align*} and augmentation $\varepsilon : S^e\to S $ given by $\varepsilon(s|t) = st$ is a resolution of $S$ by free $S^e$-modules. The notation is the usual one: the symbol $|$ denotes the tensor product inside $S^e$ and $\check x_{i_j}$ means that $x_{i_j}$ is omitted. \end{Lemma} Through a classical adjunction, the complex $\Hom_{S^e}(P_\*,U)$ is isomorphic to \[\label{eq:koszul-adjunction} \Hom(\Lambda^\*W,U) \cong U\otimes \Hom(\Lambda^\*W,\kk) \eqqcolon \X^\*. \] We compute the Hochschild cohomology $H^\bullet(S,U)$ from the complex $(\X^\*,d^\*)$. For each $q$ in $\nset{0,n}$ the basis \( \{ \xx_{k_1}\wedge\ldots\wedge\xx_{k_q} : 1\leq k_1<\ldots<k_q\leq n\} \) of $\Hom(\Lambda^qW,\kk)$ dual to the basis $\{x_{k_1}\wedge\ldots\wedge x_{k_q}\}$ of $\Lambda^qW$ induces a basis of $\X^q$ as a $U$-module. Write $\alpha^I\coloneqq \alpha_{n}^{i_{n}}\ldots\alpha_{1}^{i_1}$ for each $n$-tuple of nonnegative integers $I=(i_{n},\dots,i_{1})$, and call $\abs{I} = i_{n}+\ldots+i_{1}$ the \emph{order} of $I$. A result \textit{à la} Poincaré-Birkhoff-Witt in~\cite{rinehart}*{\S3} assures that the set \[\label{eq:pbw} \left\{ \alpha^I : I\in\NN^n\right\} \] is an $S$-basis of $U$. Moreover, $U$ is a filtered algebra, with filtration $(F_pU:p\geq0)$ given by \emph{the order of differential operators}: \( F_pU = \langle f\alpha^I : f\in S, \abs I\leq p \rangle \) for each~$p\geq0$. \begin{Proposition}\label{prop:X-PBW} Let $q\in\{0,\ldots,n\}$. \begin{thmlist} \item The set formed by \( \alpha^I\xx_{k_1}\wedge\cdots\wedge\xx_{k_q} \) with $I\in\NN^n$ and $1\leq k_1<\ldots<k_q\leq n$ is an $S$-basis of $\X^q$. \item There is a filtration $(F_p\X^q:p\geq0)$ of vector spaces on $\X^q$ determined for each $p\geq0$ by \[\label{eq:filtU} F_p\X^q = \langle f\alpha^I\xx_{k_1}\wedge\cdots\wedge\xx_{k_q} : f\in S, 1\leq k_1<\ldots<k_q\leq n, I\in\NN^n \text{ such that } \abs I\leq p \rangle. \] \end{thmlist} \end{Proposition} \begin{proof} In view of \eqref{eq:koszul-adjunction}, for each $q$ the $U$-module $\X^q$ admits $ \left\{ \xx_{k_1}\wedge\cdots\wedge\xx_{k_q} : 1\leq k_1<\ldots<k_q\leq n \right\} $ as a basis. The claim follows from this and the $S$-basis of $U$ in~\eqref{eq:pbw} above. \end{proof} The differentials $d^q:\X^q\to\X^{q+1}$ induced by $b_\* :P_\*\to P_{\*-1}$ satisfy for $q=0,1$ \begin{align*} & d^0 : u \mapsto \sum_{k=1}^n[u,x_k]\xx_k, && d^1 : \sum_{k=1}^nu_k\xx_k \mapsto \sum_{1\leq k<l\leq n} \left( [u_k,x_l]-[u_l,x_k] \right)\xx_k\wedge\xx_l. \end{align*} Given $m\in\nset{n}$ we denote by $e_m$ the $n$-tuple whose components are all zero except for the~$(n-m)$th, where there is a~$1$. \begin{Lemma}\label{lem:axkalphai} Let $a = \sum_{\abs{I}=p}f^I\alpha^I$ for $f^I\in S$ with $I\in\NN^n$. If $k\in\nset{n}$ and $J=(j_{n},\ldots,j_{1})\in\NN^n$ has order $p-1$ then the component of $[a,x_k]$ in $\alpha^J$ is \[ \sum_{m=1}^{n} (j_m+1)\alpha_m(x_k)f^{J+e_m}. \] \end{Lemma} \begin{proof} If $I=(i_{n},\ldots,i_{1})\in\NN^n$ has order $p$ then \begin{align*} [f^I\alpha^I,x_k] &\equiv i_{n}f^I\alpha_{n}(x_k)\alpha^{I-e_{n}} + \ldots + i_{1}f^{I}\alpha_{1}(x_k)\alpha^{I-e_{1}} \mod F_{p-2}U. \end{align*} If there exists a monomial in this expression belonging to $S\alpha^J$ then there exists $m\in\nset{n}$ such that $I-e_m=J$. This happens when the component of $[f^I\alpha^I,x_k]$ in $\alpha^J$ is \[ i_m\alpha_m(x_k)f^{I} = (j_m+1)\alpha_m(x_k)f^{J+e_m}, \] and therefore \( [a,x_k] \equiv \sum_{\abs J = p-1} \sum_{m=1}^{n} (j_m+1)\alpha_m(x_k)f^{J+e_m} \alpha^J \) modulo $F_{p-2}U$. \end{proof} For $g^1,\ldots,g^n\in S $ and $f^1=(f^1_1,\ldots,f^1_n),\ldots,f^n=(f^n_1,\ldots,f^n_n)\in S^{\times n }$ we let \begin{align}\label{eq:OmegaF1H0} & \Omega^0(g^{n},\ldots,g^{1}) \coloneqq \sum_{i=1}^{n}g^i\alpha_i \in F_1U, && \Omega^1(f^{n},\ldots,f^{1}) \coloneqq \sum_{l=1}^n\sum_{i=1}^{n}f_k^i\alpha_i\xx_k \in F_1\X^1. \end{align} \begin{Proposition}\label{prop:differentials} Let $p\geq0$, $u\in F_pU$ and $\omega\in F_p\X^1$. \begin{thmlist} \item\label{prop:differentials0} If $\{f^I : I\in \NN^n, \abs I = p\}\subset S $ is such that $ u \equiv \sum_{\abs{I} =p} f^I\alpha^I \mod F_{p-1}U $~then \[ d^0(u) \equiv \sum_{\abs{J}=p-1} d^0 \left( \Omega^0\left( (j_{n}+1)f^{J+e_{n}},(j_{n-1}+1)f^{J+e_{n-1}},\ldots,(j_{1}+1)f^{J+e_{1}} \right) \right) \alpha^J \] modulo $F_{p-2}\X^1$. \item\label{prop:differentials1} If $ \omega \equiv \sum_{l=1}^n\sum_{\abs{I} = p} f_l^I\alpha^I \xx_i \mod F_{p-1}\X^1 $ for $\left\{f^I_l : I\in\NN^n , \abs I = p, l\in\nset{n}\right\}\subset S $ then \[ d^1(\omega) \equiv \sum_{\abs{J}=p-1} d^1 \left( \Omega^1\left( (j_{n}+1)f^{J+e_{n}},(j_{n-1}+1)f^{J+e_{n-1}},\ldots,(j_{1}+1)f^{J+e_{1}} \right) \right) \alpha^J \] modulo $F_{p-2}\X^2$. \end{thmlist} \end{Proposition} \begin{proof} To prove~\ref{prop:differentials0} it suffices to see that the desired equality holds in each coefficient of the $S$-basis $\left( \alpha^J\xx_k : J\in\NN^n, k\in\nset{n} \right)$ of $\X^1$ given in Proposition~\ref{prop:X-PBW}. Let then $J=(j_{n},\ldots,j_{1})\in\NN^n$ of order $p-1$ and $k\in\nset{n}$. Thanks to Lemma~\ref{lem:axkalphai} the component in $\alpha^J\xx_k$ of $d^0(u)=\sum_{l=1}^n[u,x_l]\xx_l$~is \[\label{eq:differentials0-omegap} \sum_{m=1}^{n} (j_m+1)\alpha_m(x_k)f^{J+e_m}. \] On the other hand, given $f^{n},\ldots,f^{1}\in S$ a direct calculation shows that the component in $\xx_k$ of $d^0\left( \Omega^0\left(f^{n},\ldots,f^{1}\right) \right)$ is \( \sum_{i={1}}^{n}f^i\alpha_i(x_k). \) It follows that the component of \[ d^0\left( \Omega^0\left( (j_{n} + 1)f^{J+e_{n}}, \ldots, (j_{1} +1)f^{J+e_{1}} \right) \right) \] in $\xx_k$ is equal to~\eqref{eq:differentials0-omegap}, which is tantamount to what we wanted to see. The proof of~\ref{prop:differentials1} is completely analogous. \end{proof} \section{Cohomologies in degree zero and centers} \label{sec:h0} In this section $(S,L)$ is a triangularizable Lie algebra: $S=\kk[x_1,\ldots,x_n]$ for some $n\geq1$ and $L$ is a sub-$S$-module of derivations of $S$ with a basis given by derivations $\alpha_1,\ldots,\alpha_n$ that satisfy $\alpha_i(x_j) = 0$ if $i>j$ and $\alpha_i(x_i)\neq0$ for every $i\in\nset{n}$. Let $U$ be the enveloping algebra of $(S,L)$. \subsection{The cohomology of $S$ with values on $U$} The Hochschild cohomology $H^\*(S,U)$ is the cohomology of the complex~$(\X^\*,d^\*)$ of~\eqref{eq:koszul-adjunction}. \begin{Lemma}\label{lem:h0key} The restriction of $d^0:\X^0\to\X^1$ to $F_1\X^0$ has kernel $F_0\X^0$. \end{Lemma} \begin{proof} It is evident that $F_0\X^0=S$ is contained in~$\ker d^0$. Let $u\in F_1U$ and $f^{1},\ldots,f^{n}\in S$ such that $ u\equiv \sum_{i=1}^{n}f^i\alpha_i$ modulo $S$. We examine the equations $d^0(u)(1|x_l|1)=0$, that is, $[u,x_l]=0$ for each $1\leq l\leq n$. We first observe that \[ 0 = [u,x_1] = \sum_{i=1}^{n}f^i\alpha_i(x_1) = f^{1}\alpha_{1}(x_1), \] and then $f^1=0$. Proceeding inductively on $k$, we assume that $ u= \sum_{i = k }^{n}f^i\alpha_i$ and compute \[ 0 = [u,x_k] = f^{k}\alpha_{k}(x_k) + \ldots + f^{n}\alpha_{n}(x_k) = f^k\alpha_k(x_k). \] We deduce that $f^{k}=0$ and conclude that $u\in S$. \end{proof} \begin{Proposition}\label{prop:h0key} If $p>0$ and $u\in F_pU$ are such that $d^0(u) \equiv 0$ modulo $F_{p-2}\X^1$ then $u\in F_{p-1}U$. \end{Proposition} \begin{proof} Let $\{ f^I : I\in\NN^n, \abs I = p \}\subset S$ be such that $u\equiv \sum_{\abs I=p} f^I\alpha^I$ modulo $F_{p-1}U$: thanks to Proposition~\ref{prop:differentials} we have that \[ d^0(u) \equiv \sum_{\abs{J=(j_{n},\ldots,j_{1})}=p-1} d \left( \Omega^0\left( (j_{n}+1)f^{J+e_{n}},\ldots,(j_{1}+1)f^{J+e_{1}} \right) \right) \alpha^J \mod F_{p-2}\X^1. \] We deduce that \( 0 = d^0 \left( \Omega^0\left( (j_{n}+1)f^{J+e_{n}},\ldots,(j_{1}+1)f^{J+e_{1}} \right) \right) \) for each $J$ with $\abs J = p-1$, provided that $p-1\geq0$. Thanks to Lemma~\ref{lem:h0key} we deduce that $0 =f^{J+e_{N-2}}=\ldots=f^{J+e_{-1}}$. Since we can write every $I\in\NN$ with $\abs I = p$ as $I = J + e_m$ for some $m\in\nset{n}$ we conclude that if $ p-1\geq0$ then $f^I = 0$ for every $I$ with $\abs I =p$. \end{proof} \begin{Proposition}\label{prop:h0} The inclusion $S\inc U=\X^0$ induces an isomorphism of graded $U$-modules $H^0(S,U) = S$ \end{Proposition} \begin{proof} Let us write $u = u_0+\ldots+ u_p$ with $u_q\in F_{q}U\setminus F_{q-1}U$ and $p\geq0$ maximal among those $q$ such that $u_q\neq 0$. As $d^0(u) = 0$ and $d^0(u_q)\in F_{q-1}\X^1$ for every $q\in\nset{0,p}$ we have that $d(u_p) \equiv 0 \mod F_{p-2}X^1$, and we may use Proposition~\ref{prop:h0key} to see that if $p>0$ then $u_p=0$. We conclude then that $p=0$, so that actually $u\in S$. We obtain the result with the evident observation that every element of $S$ is a $0$-cocycle in $\X^\*$. \end{proof} \subsection{The cohomology of~$U$} Our recent calculation of $H^0(S,U)$ leaves us just one step away from the zeroth Hochschild cohomology space of~$U$. \begin{Theorem}\label{thm:hh0} Let $(S,L)$ be a triangularizable Lie--Rinehart algebra with enveloping algebra~$U$. There is an isomorphism of vector spaces $\HH^0(U)\cong \kk$. \end{Theorem} \begin{proof} As a consequence of the immediate degeneracy of the spectral sequence of Theorem~\ref{thm:spectral} there is an isomorphism of vector spaces $\HH^0(U) \cong H_S^0(L,H^0(S,U))$. In view of Proposition~\ref{prop:h0}, this isomorphism amounts to \[ \HH^0(U) \cong H_S^0(L,S) \cong \{ f\in S : \alpha_i(f) = 0\text{ if $i\in\nset{n}$}\} \] Since $\alpha_i(f)=\sum_{j=1}^n \alpha_i(x_j)\partial_jf$ for $f\in S$, the condition that $\alpha_i(f) = 0$ if $i\in\nset{n}$ means that $(\partial_1f,\ldots,\partial_nf)$ belongs to the kernel of the Saito's matrix $M=\left( \alpha_i(x_j) \right)_{i,j=1}^n$. As this matrix is triangular and its determinant is nonzero, the condition $\alpha_i(f)=0$ for all $i\in\nset{n}$ is equivalent to $\partial_jf=0$ for all $j\in\nset{n}$, which is to say that $f\in\kk$. \end{proof} \begin{Corollary} Let $r,n\geq1$. The centers of $\Diff\left( \A(C_r\wr\SS_n) \right)$ and of $\Diff\B_n$ are~$\kk$. \end{Corollary} \begin{proof} The algebras considered have been shown to satisfy the hypotheses of Theorem~\ref{thm:hh0} in Examples~\ref{ex:braids:2conditions} and~\ref{ex:wreath}. \end{proof} \section{The first cohomology space \texorpdfstring{$H^1(S,U)$}{H1}} \label{sec:H1SU} We now restrict our attention to the case in which $n=3$. Let then $(S,L)$ be a Lie-Rinehart algebra with $S=\kk[x_1,x_2,x_3]$ and $L$ the free $S$-module generated by the subset of derivations $\{\alpha_1,\alpha_2,\alpha_3\}$ in $\Der S$. We suppose that $(S,L)$ is triangularizable, this is, $\alpha_i(x_j) = 0$ if $i>j$ and $\alpha_1(x_1)\alpha_2(x_2)\alpha_3(x_3)\neq0$, and that $(S,L)$ satisfies the Bézout condition: \begin{itemize} \item the polynomials $\alpha_2(x_2)$ and $\alpha_2(x_3)$ are coprime; \item the polynomials $\alpha_1(x_1)$ and \( \det \begin{psmallmatrix} \alpha_1(x_2) & \alpha_1(x_3) \\ \alpha_2(x_2) & \alpha_2(x_2) \end{psmallmatrix} \) are coprime. \end{itemize} \begin{Lemma}\label{lem:A3:H1SU:F1} Let $\left\{f_l^i: i\in\left\{ 1,2 \right\}, l\in\nset{3}\right\}\subset S$ and write $\omega=\sum_{l=1}^3\left( f_l^2\alpha_2 + f_l^1\alpha_1 \right)\xx_l\in\X^1$. If $\omega$ is a cocycle then there exist unique elements $g_{11},g_{12},g_{22}$ of $S$ such that $g_{11}\alpha_1(x_1) = f_1^1$, $g_{12}\alpha_1(x_1)=f_1^2$ and $g_{22}\alpha_2(x_2) = f_2^2-g_{12}\alpha_1(x_2)$. These elements satisfy \[ \omega \equiv d(\tfrac{1}{2}g_{11}\alpha_1^2 + g_{12}\alpha_2\alpha_1 +\tfrac{1}{2}g_{22}\alpha_2^2 ) \mod F_0\X^1. \] \end{Lemma} \begin{proof} The components in $\xx_1\wedge\xx_2$ and $\xx_1\wedge\xx_3$ of $d\omega=0$ tell us that $\alpha_1(x_j)f_1^1 +\alpha_2(x_j)f_1^j =\alpha_1(x_1)f_j^1 $ for $j\in\{2,3\}$. We can arrange these two equations as \[\label{eq:exA3:1-j} \begin{pmatrix} \alpha_1(x_2) & \alpha_2(x_2) \\ \alpha_1(x_3) & \alpha_2(x_3) \end{pmatrix} \begin{pmatrix} f_1^{1} \\ f_1^{2} \end{pmatrix} = \alpha_1(x_1 ) \begin{pmatrix} f_{2}^{1} \\ f_{3}^{1} \end{pmatrix} \] and then Cramer's rule tells us that if $i\in\{1,2\}$ then \( f_1^i\det \tilde M = \alpha_1(x_1) \det \tilde M_i, \) where $\tilde M$ is the matrix on the left hand of \eqref{eq:exA3:1-j} and $\tilde M_i$ is the matrix obtained by replacing the $i$th column of $\tilde M$ by $ \begin{psmallmatrix} f_{2}^{1} \\ f_{3}^{1} \end{psmallmatrix}. $ It follows that $\alpha_1(x_1)$ divides $f_1^i$ ---because it is coprime with $\det \tilde M$ in view of the Bézout hypothesis--- and then there exist $g_{11}$ and $g_{12}$ in $S$ such that $g_{1i}\alpha_1(x_1)=f_1^i$. Let $u_1\coloneqq \tfrac{1}{2}g_{11}\alpha_1^2 + g_{12}\alpha_2\alpha_1$ and $\tilde\omega \coloneqq \omega - d(u_1)$, and write $\tilde\omega=\sum_{l=1}^3\left( \tilde f_l^2\alpha_2 + \tilde f_l^1\alpha_1 \right)\xx_l$.~Since \begin{align} d(u_1):x_1 & \mapsto [u,x_1] \equiv g_{11}\alpha(x_1)\alpha_1 + g_{12}\alpha_1(x_1)\alpha_2 \mod S \\ &\hphantom{\mapsto [u,x_1]} \;= f_1^2\alpha_1 + f_1^2\alpha_2 , \\ x_2 & \mapsto [u,x_2] \equiv g_{11}\alpha_1(x_2)\alpha_1 + g_{12}\alpha_2(x_2)\alpha_1 + g_{12}\alpha_1(x_2)\alpha_2 \mod S \label{eq:exA3:du1:2} \end{align} we have $\tilde f_1^1=\tilde f_1^2=0$. Now, the equation $d\tilde\omega=0$ in $\xx_1\wedge\xx_2$ and $\xx_1\wedge\xx_3$ tells us, as in~\eqref{eq:exA3:1-j}, that $\tilde f_2^1=\tilde f_3^1=0$, and in $\xx_2\wedge\xx_3$ that $ \alpha_2(x_2)\tilde f_3^2 = \alpha_2(x_3)\tilde f_2^2 $. Thanks to the Bézout condition there exists $g_{22}\in S$ such that $g_{22}\alpha_2(x_2) = \tilde f_2^2$; in view of~\eqref{eq:exA3:du1:2}, $\tilde f_2^2$ is equal to $f_2^2-g_{12}\alpha_1(x_2)$. Put $u_2 \coloneqq \tfrac{1}{2} g_{22}\alpha_2^2$. We see that $d(u_2)(x_1)=0$ and that \begin{align} \MoveEqLeft d(u_2)(x_2) =[u_2,x_2] \equiv g_{22}\alpha_2(x_2) \alpha_2 \mod S \\ &= \tilde f_2^2\alpha_2. \end{align} The difference $\bar\omega\coloneqq\tilde\omega-d(u_2)$ is therefore a coboundary with no component modulo $S$ in $\xx_1$ nor in $\xx_2$, so we can write $\bar\omega \equiv \left( f^1\alpha_1 + f^2\alpha_2 \right)\xx_3\mod F_0\X^1$. Now, the equations that come from $\bar\omega$ being a coboundary are $0=f^1\alpha_1(x_1)$ in $\xx_1\wedge\xx_3$, from which $f^1=0$, and $0=f^2\alpha_2(x_2)$ in $\xx_2\wedge\xx_3$, whence finally $\bar\omega\in F_0\X^1$. We have in this way obtained that $\omega \equiv d(u_1+u_2)\mod F_0\X^1$, as desired. \end{proof} \begin{Proposition}\label{prop:A3:H1SU:Fp} Let $p\geq0$ and $\omega\in F_p\X^1$, and let $\left\{ f^{(i_3,i_2,i_1)}_l : l\in\nset{3}, i_3,i_2,i_1\geq0 \right\}\subset S $ such~that \[\label{eq:exA3:omega:p} \omega \equiv \sum_{l=1}^3\sum_{i_1+i_2+i_3= p} f_l^{(i_3,i_2,i_1)}\alpha^{(i_3,i_2,i_1)} \xx_l \mod F_{p-1}\X^1. \] If $\omega$ is a cocycle and $f^{(p,0,0)}_1=f^{(p,0,0)}_2=f^{(p,0,0)}_3=0$ then $\omega\in F_{p-1}\X^1$. \end{Proposition} \begin{proof} Let us prove by descending induction on $i$ from $p$ to $0$ that \[\label{claim:exA3:H1SU:Fp:induct} \claim{the cocycle $\omega$ is cohomologous modulo $F_{p-1}\X^1$ to a cocycle of the form~\eqref{eq:exA3:omega:p} with $f^{(i_{3},i_2,i_{1})}_l=0$ if $l\in\nset{3}$ and $i_{3} \geq i$. } \] Our hypotheses give us the truth of~\eqref{claim:exA3:H1SU:Fp:induct} for $i=p$. Suppose now that~\eqref{claim:exA3:H1SU:Fp:induct} is true for $p,\ldots,i$ and assume, without loss of generality, that $\omega$ \emph{is} of the form~\eqref{eq:exA3:omega:p} with $f^{(i_{3},i_2,i_{1})}_l=0$ if $l\in\nset{3}$, $i_1+i_2+i_3= p$ and~$i_{3} \geq i$. \begin{Lemma}\label{lem:A3:H1SU:Fp:induct2} Let $q\in\{0,\dots,p-i+1\}$. The cocycle $\omega$ is cohomologous modulo $F_{p-1}\X^1$ to a cocycle of the form~\eqref{eq:exA3:omega:p} with $f^{(i-1,p-i+1,0)}_l=\ldots=f^{(i-1,p-i+1-q,q)}_l=0$ and $f^{(i_{3},i_2,i_{1})}_l=0$ if $i_1+i_2+i_3= p$ and $i_{3} \geq i$ for every $l\in\nset{3}$. \end{Lemma} The auxiliary result above implies at once the truth of the inductive step of the proof of~\eqref{claim:exA3:H1SU:Fp:induct}, thus demonstrating Proposition~\ref{prop:A3:H1SU:Fp}. \end{proof} \begin{proof}[Proof of Lemma~\ref{lem:A3:H1SU:Fp:induct2}] Suppose that $q=0$. Equation $d\omega=0$ in its component $(i-1,p-i,0)$ reads, thanks to~Proposition~\ref{prop:differentials}, \[ 0 = d\left( \Omega^1(if^{(i,p-i,0)},(p-i+1)f^{(i-1,p-i+1,0)},f^{(i-1,p-i,1)}) \right) \] and the inductive hypothesis~\eqref{claim:exA3:H1SU:Fp:induct} tells us that $f^{(i,p-i,0)}=0$. Applying now Lemma~\ref{lem:A3:H1SU:F1} in we obtain that there are $g_{11},g_{12},g_{22} \in S $ such that \begin{align} & g_{11}\alpha_1(x_1) = f^{(i-1,p-i,1)}_1, \qquad g_{12}\alpha_1(x_1) =(p-i+1)f^{(i-1,p-i+1,0)}_1, \\ & g_{22}\alpha_2(x_2) = (p-i+1)f^{(i-1,p-i+1,0)}_2-g_{12}\alpha_1(x_2) \end{align} Let $v = (\tfrac{1}{2} g_{11}\alpha_1^2 + \tfrac{1}{(p-i+1)} g_{12}\alpha_2\alpha_1)\alpha^{(i-1,p-i,0)}$ and write $\tilde\omega=\omega-d(v)$, so that there exists $\left\{ \tilde f_l^{(i_3,i_2,i_1)} \right\}\subset S$ such that \[ \tilde\omega \equiv \sum_{l=3}^n\sum_{i_1+i_2+i_3= p} \tilde f_l^{(i_3,i_2,i_1)}\alpha^{(i_3,i_2,i_1)} \xx_l \mod F_{p-1}\X^1. \] Recall that $d(v):x_l \mapsto [v,x_l]$ for $l\in\nset{3}$. Since \begin{align} \MoveEqLeft[2] [v,x_1] \equiv (g_{11}\alpha_1(x_1)\alpha_1 + \tfrac{1}{(p-i+1)}g_{12}\alpha_1(x_1)\alpha_2)\alpha^{(i-1,p-i,0)} \mod F_{p-1}U \\ & = f^{(i-1,p-i,1)}_1\alpha^{(i-1,p-i,1)} + f^{(i-1,p-i+1,0)}_1\alpha^{(i-1,p-i+1,0)}, \end{align} we have that $\tilde f^{(i-1,p-i,1)}_1 =\tilde f^{(i-1,p-i+1,0)}_1=0$. Moreover, as $[v,x_2],[v,x_3]\in \bigoplus_{ i_3< i}S\alpha^{(i_3,i_2,i_1)}$ the coefficients $\tilde f_l^{(i_3,i_2,i_1)}$ are equal to $f_l^{(i_3,i_2,i_1)}$ and therefore to zero if $i_1+i_2+i_3= p$, $i_3\geq i$ and $l\in\nset{3}$. We now look at equation $d\tilde\omega=0$, again in its coefficient of $\alpha^{(i-1,p-i,0)}$ to obtain that \( 0 = d\left( \Omega^1(0,(p-i+1)\tilde f^{(i-1,p-i+1,0)},\tilde f^{(i-1,p-i,1)}) \right). \) This equation in its component in $\xx_1\wedge\xx_2$ tells us, thanks to~\eqref{eq:exA3:1-j}, that $\tilde f^{(i-1,p-i,1)}_2=\tilde f^{(i-1,p-i,1)}_3=0$. On the other hand, applying~Lemma~\ref{lem:A3:H1SU:F1} we get $g\in S$ such that $g\alpha_2(x_2) = (p-i+1)\tilde f^{(i-1,p-i+1,0)}_2$. Let now $\lambda = 1/(p-i+2)(p-i+1)$ and $\tilde v =\lambda g \alpha^{(i-1,p-i+2,0)}$. Since $[\tilde v,x_1]=0$, \begin{align} \MoveEqLeft[2] [\tilde v,x_2] \equiv \lambda g (p-i+2)\alpha_2(x_2)\alpha^{(i-1,p-i+1,0)} \mod F_{p-1}U \\ & = \lambda (p-i+1)\tilde f^{(i-1,p-i+1,0)}_2(p-i+2)\alpha^{(i-1,p-i+1,0)} \\ & =\tilde f^{(i-1,p-i+1,0)}_2\alpha^{(i-1,p-i+1,0)} \end{align} and $[\tilde v,x_3] \in \bigoplus_{ i_3<i}S\alpha^{(i_3,i_2,i_1)}$, the difference $\tilde\omega - d(\tilde v)$ is a cohomologous modulo $F_{p-1}\X^1$ to a cocycle $\eta = \sum_{l=3}^n\sum_{i_1+i_2+i_3= p} h_l^{(i_3,i_2,i_1)}\alpha^{(i_3,i_2,i_1)} \xx_l$ with $h^{(i_3,i_2,i_1)}_l=0$ if $i_1+i_2+i_3=p$, $i_3\geq i $ and $l\in\nset{3}$ and $h^{(i-1,p-i,1)}_l = h^{(i-1,p-i+1,0)}_l=0$ if $l\in\{1,2\}$. Applying~Lemma~\ref{lem:A3:H1SU:F1} one final time we obtain $\tilde g_{11},\tilde g_{12},\tilde g_{22} \in S $ such that $\tilde g_{11}\alpha_1(x_1) =(p-i+1) h^{(i-1,p-i+1,0)}_1$, $\tilde g_{12}\alpha_1(x_1)=h^{(i-1,p-i,1)}_1$ and $\tilde g_{22}\alpha_2(x_2) = (p-i+1)h^{(i-1,p-i,1)}_2-\tilde g_{12}\alpha_1(x_2)$ ---and therefore $\tilde g_{11}$, $\tilde g_{12}$ and $\tilde g_{22}$ must be equal to~$0$--- and that satisfy \[ \Omega^1(0,(p-i+1)h^{(i-1,p-i+1,0)},h^{(i-1,p-i,1)}) \equiv d(\tfrac{1}{2}\tilde g_{11}\alpha_1^2 + \tilde g_{12}\alpha_2\alpha_1 + \tfrac{1}{2}\tilde g_{22}\alpha_2^2 ) \mod F_0\X^1. \] It follows that $h^{(i-1,p-i+1,0)}=h^{(i-1,p-i,1)}=0$, and therefore that $\eta\equiv 0\mod F_{p-1}\X^1$. This finishes the proof of the base step of~Lemma~\ref{lem:A3:H1SU:Fp:induct2}. \bigskip We finally deal with the inductive step of~Lemma~\ref{lem:A3:H1SU:Fp:induct2}. Let $q$, $i$ and $\omega$ be as in the statement. The component in $\alpha^{(i-1,p-i-q,q)}$ of equation $d\omega =0$ yields \[ 0 = d\left( \Omega^1\left( if^{(i,p-i-q,q)},(p-i-q+1)f^{(i-1,p-i-q+1,q)},(q+1)f^{(i-1,p-i-q,q+1)} \right) \right). \] Now, our inductive hypotheses of~\eqref{claim:exA3:H1SU:Fp:induct} and of Lemma~\ref{lem:A3:H1SU:Fp:induct2} tell us, respectively, that $f^{(i,p-i-q,q)}=0$ and that $f^{(i-1,p-i-q+1,q)}=0$, and therefore our equation above reduces to \[ 0 = d\left( \Omega^1\left( 0,0,f^{(i-1,p-i-q,q+1)} \right) \right). \] Applying to this situation~Lemma~\ref{lem:A3:H1SU:F1} we obtain $g\in S$ such that $g\alpha_1(x_l) = f^{(i-1,p-i-q,q+1)}_l$ for $l\in\nset{3}$. Let $v=\tfrac{1}{(q+2)} g\alpha^{(i-1,p-i-q,q+2)}$ and write $\tilde\omega=\omega-d(v)$: let $\left\{ f_l^{(i_3,i_2,i_1)} \right\}\subset S$ such that \( \tilde\omega \equiv \sum_{l=3}^n\sum_{i_1+i_2+i_3= p} \tilde f_l^{(i_3,i_2,i_1)}\alpha^{(i_3,i_2,i_1)} \xx_l \) modulo $ F_{p-1}\X^1$. As \begin{align*} &[v,x_1] \equiv g\alpha_1(x_1)\alpha^{(i-1,p-i-q,q+1)} = f^{(i-1,p-i-q,q+1)}_1\alpha^{(i-1,p-i-q,q+1)} \mod F_{p-1}U \intertext{and if $j\in\left\{ 2,3 \right\}$ then } &[v,x_j] \in S\alpha^{(i-2,p-i-q,q+2)} \oplus S\alpha^{(i-1,p-i-q-1,q+2)} \oplus S\alpha^{(i-1,p-i-q,q+1)} \oplus F_{p-1}U, \end{align*} we obtain that $\tilde f_l^{(i_3,i_2,i_1)}=0$ whenever $i_3\geq i$ and $\tilde f^{(i-1,p-i+1,0)}_l=\ldots=\tilde f^{(i-1,p-i+1-q,q)}_l=0$ for every $l\in\nset{3}$ and, in addition, that $\tilde f^{(i-1,p-i-q,q+1)}_1=0$. As a consequence of this, the component in $\alpha^{(i-1,p-i-q,q)}$ of equation $d\tilde\omega =0$ reduces to \( 0 = d\left( \Omega^1\left( 0,0,\tilde f^{(i-1,p-i-q,q+1)} \right) \right). \) The element $\tilde g$ that is provided for this situation by~Lemma~\ref{lem:A3:H1SU:F1} satisfies $\tilde g\alpha_1(x_l) =\tilde f^{(i-1,p-i-q,q+1)}_l$ for $l\in\nset{3}$: it follows that $g=0$ and hence $\tilde f^{(i-1,p-i-q,q+1)}_l=0$ for $l\in\nset{3}$ and $\tilde\omega\equiv 0\mod F_{p-1}\X^1$. This finishes the proof of Lemma~\ref{lem:A3:H1SU:Fp:induct2}. \end{proof} From this point on we demand to $(S,L)$ that in addition it satisfy the orthogonality condition: that there be a family $u_1,u_2,u_3$ of elements of $U$ that can be written as $u_k = \alpha_3 + h_k^2\alpha_2 + h_k^1\alpha_1$ for some $\left\{ h_k^i : k\in\nset{3}, i\in\nset{2}\right\}\subset S$ and such that $[u_k,x_l]=0$ whenever $k\neq l$. The idea is that that we can add to any cocycle in $F_p\X^1$ an $S$-linear combination of $u_k^p\xx_k$ to remove its components in the maximum power of $\alpha_3$ and in this way obtain a cocycle that falls in the hypotheses of Proposition~\ref{prop:A3:H1SU:Fp}. \begin{Corollary}\label{coro:A3:H1generators} Let $\{u_1,u_2,u_3\}$ be the family that makes $(S,L)$ satisfy the orthogonality condition. \begin{thmlist} \item The cochains $\eta^p_k=u_k^p\xx_k\in F_p\X^1$ defined for~$p\geq0$ and~$k\in\nset{3}$ are cocycles. \item\label{coro:A3:H1generators:these} Every cocycle in $\X^1$ is cohomologous to one in the $S$-submodule of $\X^1$ generated by $\left\{ \eta_k^p : k\in\nset{3}, p\geq0 \right\}$. \end{thmlist} \end{Corollary} \begin{proof} Let us denote by $Z^1$ the $S$-module generated by $\left\{ \eta_l^p : l\in\nset{3}, p\geq0 \right\}$. We prove by induction on $p\geq0$ that if $\omega\in F_p\X^1$ is a cocycle then there exist $z\in Z^1$ and $u\in U$ such that $ \omega = d^0(u) + z$. We first observe that $F_0(\X^1)=F_0(Z^1)$ because $\xx_l = \eta_l^0$, and then for $p=0$ we have that $\omega\in F_0(\X^1)\subset Z^1$. Assume now that $p>0$ and let $\left\{ f^I_l : l\in\nset{3}, I\in\NN^3 \right\}\subset S $ such that \( \omega \equiv \sum_{l=1}^3\sum_{\abs{I} = p} f_l^I\alpha^I \xx_l \mod F_{p-1}\X^1. \) Defining $z = \sum_{l=1}^3 f_l^{(p,0,0)}\eta_l^p$ we see that the cocycle \( \tilde\omega \coloneqq \omega - z \) has its components in $\alpha^{p}\xx_1,\alpha^{p}\xx_2,\alpha^{p}\xx_3$ equal to zero, and applying Proposition~\ref{prop:A3:H1SU:Fp} we deduce that $\tilde\omega$ is a coboundary modulo $F_{p-1}\X^1$: let $u\in U$ and $\omega'\in F_{p-1}\X^1$ be such that $\tilde\omega = d^0(u) +\omega'$. The inductive hypothesis tells us that there exist $u'\in U$ and $z'\in Z^1$ such that $\omega' = d^0(u') + z'$, and thus $\omega = \tilde\omega + z = d^0(u+u') +(z+z')$, as we wanted. \end{proof} \begin{Proposition} Let $p\geq0$. \begin{thmlist} \item Let $\omega\in F_p\X^1$ be a cocycle, so that there exist $\{f_1,f_2,f_3\}\subset S$ and $u\in U$ such that $\omega \equiv \sum_{l=1}^3f_l\eta^p_l + du$ modulo $F_{p-1}\X^1$. The cocycle $\omega$ is equivalent to a coboundary modulo $F_{p-1}\X^1$ if and only if $\sum_{l=1}^3f_l\xx_l$ is a coboundary. \item The unique $S$-linear map $\gamma_p : F_0\X^1 \to F_pX^1$ such that $\xx_l\mapsto\eta_l^p$ if $1\leq l\leq 3$ induces an isomorphism of $S$-modules \[ F_{p}H^1(S,U) / F_{p-1}H^1(S,U) \cong F_0H^1(S,U). \] \end{thmlist} \end{Proposition} \begin{proof} Suppose that $\omega_0 = \sum_{l=1}^nf_l\xx_l$ is a coboundary and let $v\in U$ such that $d^0(v) = \omega_0$. Thanks to Proposition~\ref{prop:h0key} we may assume that $v\in F_1\X^1$ and write $v\equiv g^{3}\alpha_3+g^2\alpha_2+g^1\alpha_1\mod S$ for some $g^{3},g^2,g^{1}\in S$. In view of Proposition~\ref{prop:differentials} there exist $f_0^{I} \in F_0\X^1$ such that we may write \begin{align*} \MoveEqLeft d^0\left( \frac{g^{3}}{p+1}\alpha_{3}^{p+1} + g^2\alpha_{3}^p\alpha_{2} + g^1\alpha_{3}^p\alpha_{1} \right) \equiv d^0\left(v \right)\alpha^p_{3} + \sum_{i_3<p} f_0^{(i_{3},i_2,i_{1})} \alpha^{(i_{3},i_2,i_{1})} \mod F_{p-1}\X^1. \end{align*} It follows that the difference \( \omega - d^0\left( \frac{g^{3}}{p+1}\alpha_{3}^{p+1} + g^2\alpha_{3}^p\alpha_{2} + g^1\alpha_{3}^p\alpha_{1} \right) \) is a cochain whose components in $\alpha_{3}^p\xx_1,\alpha_3^p\xx_2,\alpha_{3}^p\xx_N$ are zero. Applying Proposition~\ref{prop:A3:H1SU:Fp} we see that $\omega$ is equivalent to a coboundary modulo $F_{p-1}\X^1$. Reciprocally, let $u\in U$ such that $d^0(u)= \omega$. Thanks to Proposition~\ref{prop:h0key} we know that $u\in F_{p+1}U$: let us write $u \equiv \sum_{\abs K = p+1}h^{K} \alpha^K$ with $\{h^{K}: K\in\NN^3, \abs K = p+1\}\subset S$. Taking into account Proposition~\ref{prop:differentials} again we see that \[ d^0(u) \equiv d\left( \Omega^0\left( (p+1)h^{(p+1,0,0)},h^{(p,1,0)},h^{(p,0,1)} \right) \right) \alpha^p_{3} + \sum_{i_{3}<p} f_0^{(i_{3},i_2,i_{1})} \alpha^{(i_{3},i_2,i_{1})} \] modulo $F_{p-1}\X^1$ for some $f_0^{I} \in F_0\X^1$. The equality of this to $\omega$ implies, looking at the components in $\alpha_{3}^p\xx_1,\alpha_3^p\xx_2,\alpha_{3}^p\xx_3$, that \( d\left( \Omega^0\left( (p+1)h^{(p+1,0,0)},h^{(p,1,0)},h^{(p,0,1)} \right) \right) = \sum_{l=1}^3 f_l\xx_l. \) This completes the proof of the first item. Now, the truth of the first item implies two things: first, that the composition $F_0\X^1\to F_p\X^1/F_{p-1}\X^1$ of $\gamma_p$ with the projection to the quotient descends to cohomology and, second, that the map induced in cohomology by this composition is a monomorphism. It is also surjective thanks to Corollary~\ref{coro:A3:H1generators}\ref{coro:A3:H1generators:these}. \end{proof} Recall that the filtered $S$-module $F_\*H^1(S,U)$ has a graded associated $S$-module $\Gr_\*H^1(S,U) = \bigoplus_{p\geq0} \Gr_pH^1(S,U)$ given by $\Gr_pH^1(S,U) \coloneqq F_pH^1(S,U)/F_{p-1}H^1(S,U)$. We have just seen that $\Gr_pH^1(S,U)$ is isomorphic as an $S$-module to $F_0H^1(S,U)$ for any $p\geq0$: we claim that we can make it an isomorphism of \emph{graded} $S$-modules. Given $p\geq0$, the map $\gamma_p : F_0\X^1 \to F_pX^1$ induces an isomorphism of $S$-modules $F_0H^1(S,U)\cong \Gr_pH^1(S,U)$ that shifts the polynomial degree in $3(p-1)$: indeed, for each $l\in\nset{3}$ the class of $\eta_l\in\X^1$, which has polynomial degree $2$, is sent to the class of $\eta_l^p$, which has polynomial degree $3p-1$. On the other hand, the morphism of $S$-modules $\gamma:F_0(H^1(S,U))\otimes \kk[\alpha_3]\to\Gr H^1(S,U)$ such that $[\eta_l]\otimes \alpha_3^p \mapsto [\eta_l^p]$ for $l\in\nset{3}$ and $p\geq0$ does respect the graduation and is an isomorphism because so is each $\gamma_p$. In addition to this, we observe that \[ F_0(H^1(S,U)) = \frac{S\otimes\lin{\xx_1,\xx_2,\xx_3}} {Sd^0(\alpha_{1})+Sd^0(\alpha_2)+Sd^0(\alpha_{3})} \cong \coker M. \] We summarize our findings in the following statement. \begin{Corollary}\label{coro:H1:result:graded} Let $S=\kk[x_1,x_2,x_3]$ and $L$ a free $S$-submodule of $\Der S$ generated by derivations $\alpha_1,\alpha_2,\alpha_3$ in such a way that $(S,L)$ is a triangularizable Lie--Rinehart algebra that satisfies the Bézout and orthogonality conditions. Let~$U$ be the Lie–Rinehart enveloping algebra of~$L$. There is an isomorphism of $S$-graded modules \begin{align*} H^1(S,U)\cong \coker M \otimes \kk[\alpha_3], \end{align*} where $M$ is the Saito's matrix of $(S,L)$. \end{Corollary} Recall that the cokernel of $M$ has a rich algebraic structure --- see M.\,Granger, D.\,Mond and M.\,Schulze's~\cite{schulze}. \section{Computation of $\HH^1(U)$} \label{sec:hh1} The spectral sequence in Theorem~\ref{thm:spectral}, regardless of its degeneracy, gives us an strategy to obtain the first Hochschild cohomology space $\HH^\*(U)$ of the enveloping algebra $U$ of a Lie--Rinehart algebra $(S,L)$: indeed, $\HH^1(U)$ is isomophic to $H^1_S(L,H^0(S,U))\oplus H^0_S(L,H^1(S,U))$. In Sections~\ref{sec:h0} and~\ref{sec:H1SU} we computed $H^0(S,U)$ and $H^1(S,U)$ when $(S,L)$ is as in Corollary~\ref{coro:H1:result:graded}, which is the conclusion of Section~\ref{sec:H1SU}. In this section we describe their $L$-module structure and use it to compute their respective Lie--Rinehart cohomology spaces for the case in which $(S,L)$ is associated to a hyperplane arrangement of the form $\A_r=\A(C_r\wr\SS_3)$ as in Example~\ref{ex:Ar}. \subsection{The $L$-module structure on \texorpdfstring{$H^\*(S,U)$}{H(S,U)}} \label{subsec:H1SU:actionofL} Let $(S,L)$ be a Lie-Rinehart pair with enveloping algebra $U$. Let us describe the construction in~\cite{kola} that gives an $L$-module structure to the Hochschild cohomology $H^\*(S,U)$ of $S$ with values on $U$. Fix $\alpha\in L$ and an $S^e$-projective resolution $\varepsilon:P_\*\to S$. Let $\alpha_\*$ be an $\alpha^e_S$-lifting of $\alpha_S:S\to S $ to~$P_\*$, that is, a morphism of complexes $\alpha_\*=(\alpha_{q}:P_q\to P_q)_{q\geq0}$ such that $\varepsilon\circ\alpha_0= \alpha_S\circ \varepsilon$ and for each $q\geq0$, $s$, $t\in S$ and $p\in P_q$ \[ \alpha_q( ( s\otimes t)\cdot p) =\left( \alpha_S(s)\otimes t + s\otimes\alpha_S(t) \right)\cdot p +(s\otimes t)\cdot p. \] The endomorphism $\alpha_\*^\sharp$ of $\Hom_{S^e}(P_\*,U)$, defined for each $q\geq0$ to be \begin{align}\label{eq:alphasharp} \alpha^\sharp_q (\phi): p \mapsto [\alpha,\phi(p)] - \phi\circ\alpha_q (p) \quad\text{whenever $\phi\in\Hom_{S^e}(P_q,U)$ and $p\in P_q$,} \end{align} allows us to define the map~$\nabla_\alpha^\*:H^\*(S,U)\to H^\*(S,U)$ as the unique graded endomorphism such that \[\label{eq:alphanabla} \nabla_\alpha^q([\phi]) = [\alpha_q^\sharp(\phi)], \] where [-] denotes class in cohomology. The final result is that $\alpha\mapsto \nabla^q_\alpha$ defines an $L$-module structure on~$H^q(S,U)$ for each $q\geq0$. \subsection{The liftings} From now on we work on the Lie--Rinehart algebra associated to $\A_r$ and put $E\coloneqq \alpha_1$, $D\coloneqq \alpha_2$ and $C\coloneqq \alpha_3$. The commuting relations in $L$ are determined by the rules \[\label{eq:A_r:commuting} \begin{aligned} & [E,C] = (2r+1)C, &&[E,D] = (r+1)D, \\ & [D,C] = r(x_3^r+x_2^r-x_1^r), \end{aligned} \] as a straightforward calculation shows. \begin{Proposition}\label{prop:liftings} The rules \begin{align}\label{eq:Dlifting} & D_1(1|x_1|1) = 0, \\ & D_1(1|x_k|1) = \sum_{s+t=r}x_k^s|x_k|x_k^t -\sum_{s+t=r-1}x_k^s|x_1|x_1^tx_k - x_1^r|x_k|1 \quad\text{if $k=2,3$} \end{align} define a $D^e$-lifting of $D:S\to S$. \end{Proposition} \begin{proof} It is evident that $d_1\circ D_1$ and $D_0\circ d_1$ coincide at $1|x_1|1$; if $k=2,3$ then $d_1\circ D_1(1|x_k|1) $ is \[ \!\begin{multlined}[.85\displaywidth] \sum_{s+t=r}x_k^s(x_k|1-1|x_k)x_k^t -\sum_{s+t=r-1}x_k^s(x_1|1 - 1 |x_1) x_1^tx_k - x_1^rx_k|1 + x_1^r|x_k \\ = \left( x_k^{r+1} - x_kx_1^r \right)|1 - 1|\left( x_k^{r+1} - x_kx_1^r \right), \end{multlined} \] which equals $D_0\circ d_1(1|x_k|1) = D_0(x_k|1-1|x_k) = D(x_k)|1-1|D(x_k)$ because $x_k(x_k^r-x_1^r)$ is~$D(x_k)$. \end{proof} \begin{Proposition}\label{prop:Dsharp1} For every $p\geq0$ we have that \begin{align} \label{eq:Dsharp1} &D_1^\sharp(\eta_1^p) \equiv pr(x_3^r+x_2^r-x_1^r)\eta_1^p + rx_1^{r-1}x_2\eta_2^p + rx_1^{r-1}x_3\eta_3^p\mod F_{p-1}\X^1 \mod \im d^0 \\ &D_1^\sharp ( \eta_2^p) \equiv \left( (1-p)x_1^r +(p-r-1)x_2^r + px_3^r \right) \eta_2^p \mod F_{p-1}\X^1, \\ & D_1^\sharp ( \eta_3^p) \equiv \left( (1-p)x_1^r+px_2^r + (p-r-1)x_3^r \right) \eta_3^p \mod F_{p-1}\X^1. \end{align} \end{Proposition} \begin{proof} Recall from Corollary~\ref{coro:A3:H1generators} the cocycle $\eta_l^p=u_l^p\xx_l$ for each $l\in\nset{3}$ that is such that $u_l$ commutes with every $x_j$ with $j\neq l$. The commuting relations~\eqref{eq:A_r:commuting} in $L$ give \begin{align} [D,u_3] &= [D,C] = r(x_3^r+x_2^r-x_1^r)C = r(x_3^r+x_2^r-x_1^r)u_3 \shortintertext{and} [D,u_2] &= [D,C-(x_3^r-x_2^r)D] \\ & = r(x_3^r+x_2^r-x_1^r)C - \left(rx_3^{r}(x_3^r-x_1^r) - rx_2^{r}(x_2^r-x_1^r)\right)D \\ &= r(x_3^r+x_2^r-x_1^r)\left( C-(x_3^r-x_2^r)D \right) =r(x_3^r+x_2^r-x_1^r)u_2. \end{align} It follows that if $l=2,3$ then $[D,u_l^p]\equiv p(x_3^r+x_2^r-x_1^r)u_l^p$ modulo $F_{p-1}U$. We can now compute \begin{align*} \MoveEqLeft D_1^\sharp(\eta_l^p)(1|x_1|1) = [D,\eta_l^p(1|x_1|1)] -\eta_l^p\circ D_1(1|x_1|1) = [D,0] - \eta_l^p(0) = 0 ; \\ \MoveEqLeft D_1^\sharp(\eta_l^p)(1|x_l|1) = [D,\eta_l^p(1|x_l|1)] -\eta_l^p\circ D_1(1|x_l|1) \\ &= [D,u_l^p] - \eta_l^p \left( \sum_{s+t=r}x_l^s|x_l|x_l^t -\sum_{s+t=r-1}x_l^s|x_1|x_1^tx_l - x_1^r|x_l|1 \right) \\ &\equiv p(x_3^r+x_2^r-x_1^r)u_l^p - \left( (r+1)x_l^r - x_1^r \right) u_l^p \mod F_{p-1}U \intertext{and for $m\neq 1,l$} \MoveEqLeft D_1^\sharp(\eta_l^p)(1|x_m|1) = [D,\eta_l^p(1|x_m|1)] - \eta_l^p\circ D_1(1|x_m|1) \\ &= [D,0] - \eta_l^p \left( \sum_{s+t=r}x_m^s|x_m|x_m^t -\sum_{s+t=r-1}x_1^s|x_1|x_1^tx_m - x_1^r|x_m|1 \right) = 0. \end{align*} With this information at hand we are able to see that \begin{align*} &D_1^\sharp ( \eta_2^p) \equiv \left( (1-p)x_1^r +(p-r-1)x_2^r + px_3^r \right) \eta_2^p \mod F_{p-1}\X^1, \\ & D_1^\sharp ( \eta_3^p) \equiv \left( (1-p)x_1^r+px_2^r + (p-r-1)x_3^r \right) \eta_3^p \mod F_{p-1}\X^1. \end{align*} Let us now consider the action of $D$ on $\eta_1^p$. To begin with, we have \begin{align} \MoveEqLeft[2] [D,u_1^p] \equiv pu_1^{p-1}[D,u_1] \mod F_{p-1}U \\ &= pu_1^{p-1}[D,(C- (x_3^r-x_1^r)D + (x_3^r-x_1^r)(x_2^r-x_1^r)E)] \\ &\!\begin{multlined}[.8\displaywidth] = pu_1^{p-1} \Big( r(x_3^r+x_2^r-x_1^r)C - r x_3^r(x_3^r-x_1^r)D \\ + D\left( (x_3^r-x_1^r)(x_2^r-x_1^r) \right)E - (r+1)(x_3^r-x_1^r)(x_2^r-x_1^r)D \Big) \end{multlined} \end{align} and we observe that \( D_1^\sharp(\eta_1^p)(1|x_1|1) = [D,u_1^p] -\eta_1^p(D_1(1|x_1|1)) = [D,u_1^p]. \) On the other hand, if $m\in\{2,3\}$ then \begin{align*} \MoveEqLeft D_1^\sharp(\eta_1^p)(1|x_m|1) = [D,\eta_1^p(1|x_m|1)] -\eta_1^p(D_1(1|x_m|1)) \\ &= [D,0] - \eta_1^p \left( \sum_{s+t=r}x_m^s|x_m|x_m^t - \sum_{s+t=r-1}x_1^s|x_1|x_1^tx_m - x_1^r|x_m|1 \right) \\ &\equiv rx_1^{r-1}x_mu_1^p. \end{align*} From these computations we see that the cocycle \[ D_1^\sharp(\eta_1^p) - pr(x_3^r+x_2^r-x_1^r)\eta_1^p - rx_1^{r-1}x_2\eta_2^p - rx_1^{r-1}x_3\eta_3^p \] has component zero in $C^p\xx_1$, $C^p\xx_2$ and $C^p\xx_3$, and then Proposition~\ref{prop:A3:H1SU:Fp} tells us that $ D_1^\sharp(\eta_1^p)$ is cohomologous modulo $ F_{p-1}\X^1$ to $pr(x_3^r+x_2^r-x_1^r)\eta_1^p + rx_1^{r-1}x_2\eta_2^p + rx_1^{r-1}x_3\eta_3^p$. \end{proof} \subsection{Invariants of $H^1(S,U)$ by the action of $L$} We already have explicit descriptions of $H^1(S,U)$, in Section~\ref{sec:H1SU}, and of the action of $L$ thereon, in Subsection~\ref{subsec:H1SU:actionofL} above: the next step is to calculate the intersection of the kernels of the actions of $E$, $D$ and $C$ on $H^1(S,U)$. \begin{Proposition}\label{prop:H0(H1)} $H^0_S(L,H^1(S,U))=0$. \end{Proposition} \begin{proof} Recall that the polynomial grading on $S$ induces a grading on $S$, on $U$ and on the cohomology $H^\*(S,U)$. Since the derivation $E$ induces the linear endomorphism $\nabla_E^1$ of $H^1(S,U)$ that sends the class of an homogeneous element $a$ of degree $\abs{a}$ to the class of $\abs{a}a$ it follows that \( \ker \nabla^1_E = H^1(S,U)_0, \) where $H^1(S,U)_0$ is the subspace of $ H^1(S,U)$ formed by elements of degree zero. Remember that if $k\in\nset{3}$ then $\abs{u_k} = \abs{\alpha_3} = 2r$, and therefore $\abs{\eta_k} = 2r-1$. In view of our calculation in Corollary~\ref{coro:H1:result:graded} this means that \[ \ker \nabla^1_E = H^1(S,U)_0 \cong \begin{cases*} \frac{ S_1\otimes\lin{\xx_1,\xx_2,\xx_3} }{\kk(x_1\xx+x_2\xx_2+x_3\xx_3) } \oplus \lin{\eta_1,\eta_2,\eta_3} & if $r=1$; \\ \frac{ S_1\otimes\lin{\xx_1,\xx_2,\xx_3} }{\kk(x_1\xx+x_2\xx_2+x_3\xx_3) } & if $r\geq2$. \end{cases*} \] We begin by supposing that $r\geq2$. We observe that if $f_1,f_2,f_3\in S_1$ then \begin{align*} \MoveEqLeft[2] D_1^\sharp \left( \sum f_i\xx_i \right) = \sum \left( D(f_i)\xx_i + f_iD_1^\sharp(\xx_i) \right) \\ &\!\begin{multlined}[.8\displaywidth] = \sum D(f_i)\xx_i + f_1r\left( x_1^{r-1}x_2\xx_2 + x_1^{r-1}x_3\xx_3 \right) + f_2 (x_1^r-(r+1)x_2^r)\xx_2 \\ + f_3 (x_1^r-(r+1)x_3^r)\xx_3 \end{multlined} \\ &\!\begin{multlined}[.8\displaywidth] = D(f_1) \xx_1 + \left( D(f_2) + f_1rx_1^{r-1}x_2 + f_2(x_1^r-(r+1)x_2^r)\right)\xx_2 \\ + \left( D(f_3) + f_1rx_1^{r-1}x_3 + f_3(x_1^r-(r+1)x_3^r)\right)\xx_3 \end{multlined} \end{align*} belongs to the homogeneous component of degree $r$ of $F_0H^1(S,U)$, which is precisely \[ \left( \frac{S\otimes\lin{\xx_1,\xx_2,\xx_3}} {Sd^0(E)+Sd^0(D)+Sd^0(C)} \right)_r = \frac{S_{r+1}\otimes\lin{\xx_1,\xx_2,\xx_3}} {S_rd^0(E)+\kk d^0(D)}. \] It follows that if \( \nabla_D^1\left( [ \sum f_i\xx_i ] \right) = \left[D_1^\sharp \left( \sum f_i\xx_i \right)\right] \) is zero in cohomology there must exist $g\in S_r$ and $\mu\in\kk$ such that \[\label{eq:H1SU-noDetas} D_1^\sharp \left( \sum f_i\xx_i \right) = g(x_1\xx_1 + x_2\xx_2+ x_3\xx_3 ) + \mu\left( x_2(x_2^r-x_1^r)\xx_2 - x_3(x_3^r-x_1^r)\xx_3 \right). \] Let us write $f_i = f_{i,1}x_1 + f_{i,2}x_2 + f_{i,3}x_3$ with $f_{i,j}\in\kk$ for $i,j\in\nset{3}$. Up to the addition of coboundary that is a scalar multiple of $d^0(E) = x_1\xx_1+x_2\xx_2+x_3\xx_3$ we may suppose that $f_{1,1}=0$. In $\xx_1$ we have $D(f_1) =gx_1$, or, in other words, \[ f_{1,2}(x_2^{r+1}-x_1x_2^r) + f_{1,3}(x_3^{r+1}-x_1x_3^r) = gx_1. \] The components in $x_2^{r+1} $ and in $x_3^{r+1}$ of this equality read $f_{1,2}= 0$ and $f_{1,3}=0$: this implies that $g=0$, and of course that $f_1=0$. Next, equation~\eqref{eq:H1SU-noDetas} in $\xx_2$ yields the equality in $S_{r+1} $ \[ D(f_2) + f_2(x_1^r-(r+1)x_2^r) =\mu( x_2(x_2^r-x_1^r)). \] In $x_1^{r+1}$ and $x_3^{r+1}$ we have $f_{2,1}=0$ and $f_{2,3}=0$, and what remains is $ -rf_{2,2}x_2^{r+1}=\mu( x_2(x_2^r-x_1^r)$. It follows that $\mu=0$ and therefore $f_2=0$; analogously, $f_3=0$. We conclude that $\ker\nabla_D^1\vert_{H^1(S,U)_0}=0$ when $r\geq2$. \bigskip Let us now suppose that $r=1$ and compute the kernel of the restriction of $\nabla^1_D$ to $ H^1(S,U)_0 $. Let then $f_1,f_2,f_3\in S $ and $\lambda_1,\lambda_2,\lambda_3$ be such that $\nabla_D^1\left( [ \sum f_i\xx_i + \lambda_i\eta_i] \right)$ is zero in cohomology. Since \[ \!\begin{multlined}[.85\displaywidth] H^1(S,U)_1 \cong \frac{ S_2\otimes\lin{\xx_1,\xx_2,\xx_3} } {S_1(x_1\xx+x_2\xx_2+x_3\xx_3) + \kk(x_2(x_2-x_1)\xx_2+x_3(x_3-x_1)\xx_3)} \\ \oplus \frac{ S_1 \lin{\eta_1,\eta_2,\eta_3}}{ \kk(x_1\eta_1+x_2\eta_2 + x_3\eta_3) } \oplus \lin{\eta_1^2,\eta_2^2,\eta_3^2} \end{multlined} \] there exist $\mu_1,\mu_2\in\kk$ and $g\in S_1$ such that \[\label{eq:H1SU-Detas} \!\begin{multlined}[.9\displaywidth] D_1^\sharp \left( \sum f_i\xx_i + \lambda_i\eta_i \right) = g(x_1\xx+x_2\xx_2+x_3\xx_3) + \mu_2( x_2(x_2-x_1)\xx_2+x_3(x_3-x_1)\xx_3) \\ + \mu_1(x_1\eta_1+x_2\eta_2 + x_3\eta_3) \end{multlined} \] We know from Proposition~\ref{prop:Dsharp1} that modulo $S$ $D_1^\sharp(\eta_1) \equiv (x_3+x_2-x_1)\eta_1+x_2\eta_2+x_3\eta_3$, $D_1^\sharp(\eta_2) \equiv (-x_2+x_3)\eta_2$ and $D_1^\sharp(\eta_3) \equiv (x_2-x_3)\eta_3$ and since $D_1^\sharp(\sum S\xx_i)\subset S$ the equality~\eqref{eq:H1SU-Detas} implies that \[ \!\begin{multlined}[.9\displaywidth] \lambda_1(x_3+x_2-x_1)\eta_1 + \left( \lambda_1x_2 + \lambda_2 (-x_2+x_3)\right)\eta_2 + \left( \lambda_1 x_3 + \lambda_3 (x_2-x_3) \right)\eta_3 \\ = \mu_1(x_1\eta_1+x_2\eta_2 + x_3\eta_3) \end{multlined} \] This is an equality in $\bigoplus_{i=1}^3S_1\eta_i$. In $S_1\eta_3$ we have \( \lambda_3x_2 + (\lambda_1-\lambda_3)x_3 = \mu_1x_3, \) so $\lambda_3 = 0$ and $\lambda_1 = \mu_1$. In $S_1\eta_2$ an analogous argument shows that $\lambda_2=0$ and $\lambda_1 = \mu_1$ again, and finally in $S_1\eta_1$ we have \[ \lambda_1(x_3+x_2-x_1)=\lambda_1x_1. \] It follows that $\lambda_1=\mu_1=0$. Consider now what is left of~\eqref{eq:H1SU-Detas}: it is precisely~\eqref{eq:H1SU-noDetas} replacing $r$ by $1$. The same argument, therefore, allows us to see that $\ker\nabla_D^1\vert_{H^1(S,U)_0}=0$ when $r=1$. \bigskip We conclude that \( H^0_S(L,H^0(S,L))\subset \ker\left( \nabla_D^1 :H^1(S,U)_0\to H^1(S,U)_1 \right) = 0, \) from which $H^0_S(L,H^1(S,L))=0$ independently of $r\geq1$. \end{proof} \begin{Corollary}\label{coro:Ar:HH1} Let $r\geq1$ and $A_r=\A(C_r\wr\SS_3)$. If $(S,L)$ is its associated Lie--Rinehart algebra and $U$ its enveloping algebra then $\HH^1(U)\cong H^1_S(L,S)$. In particular, the dimension of $\HH^1(U)$ is $3r+3$, the number of hyperplanes of $\A_r$. \end{Corollary} \begin{proof} Thanks to Theorem~\ref{thm:spectral} $\HH^1(U)\cong H^1_S(L,H^0(S,U))\oplus H^0_S(L,H^1(S,U))$; Proposition~\ref{prop:h0} tells us that $H^0(S,U)=S$ and Proposition~\ref{prop:H0(H1)} above that the second summand is zero. \end{proof} Let $f\in S_1$ be a linear form whose kernel is one of the hyperplanes in $\A_3$. It is a direct verification that there is a unique derivation $\partial_f:U\to U$ such that \[ \begin{cases*} \partial_f(g) = 0 & if $g\in S$; \\ \partial_f(\theta) = \theta(f)/f & if $\theta\in\Der\A_r$. \end{cases*} \] Fix as well $\kk=\mathbb C$ and factorize the defining polynomial $Q(\A_r) = x_1x_2x_3\prod_{1\leq i<j\leq 3}(x_j^r-x_i^r)$ as \[\label{eq:Ar:factorized} Q(\A_r) = x_1x_2x_3\prod_{j=0}^{r-1}(x_2-e^{2j\pi i/r}x_1 ) (x_3-e^{2j\pi i/r}x_1) (x_3-e^{2j\pi i/r}x_2) \] \begin{Corollary}\label{coro:abelian} The Lie algebra of outer derivations of $\Diff\A_r$ together with the commutator is an abelian Lie algebra of dimension $3r+3$ generated by the classes of the derivations $\partial_f$ with $f$ in a linear factor of~\eqref{eq:Ar:factorized}. \end{Corollary} \begin{proof} We claim that the classes of $\partial_f$, with $f$ one of the linear factors in~\eqref{eq:Ar:factorized}, are linearly independent in $\Out(U)$. Indeed, let $u\in U$ and $\lambda_f\in\kk$ be such that \[\label{eq:gerst} \sum\lambda_f\partial_f(v)=[u,v] \qquad\text{for every $v\in U$.} \] Evaluating~\eqref{eq:gerst} on each $v=g\in S$ we obtain that the left side vanishes and therefore $u\in H^0(S,U)$, which is equal to $S$ in view of Proposition~\ref{prop:h0}. Write $u=\sum_{j\geq0}u_j$ with $u_j\in S_j$. Evaluating now~\eqref{eq:gerst} on $E$ we obtain that $\sum_{f\in\AA}\lambda_f=-\sum_{j\geq0}ju_j$. In each homogeneous component $S_j$ with $j\neq0$ we have $ju_j=0$ and therefore $u\in S_0=\kk$ and, when $j=0$, $\sum_f\lambda_f=0$. Evaluating the left hand side of~\eqref{eq:gerst} on $C$ gives $\sum_f\lambda_f\partial_f(C)$. Now, if $\partial_f(C) = C(f)/f = \partial_3(f)C(x_3)/f$ is nonzero then $\partial_3(f)\neq0$ and thus $f$ is a factor of $C(x_3)$: let us, then, factor $C(x_3)$ by $x_3$ and $f_{l,j}=x_3-e^{2j\pi i/r}x_l$ for $l=1,2$ and $j\in\nset{0,r-1}$, and in this way reformulate the evaluation of~\eqref{eq:gerst} at $C$ as the nullity of \[ \sum_{f\in\AA}\partial_3(f)C(x_3)/f = \lambda_{x_3}(x_3^r-x_2^r)(x_3^r-x_1^r) + \sum_{l=1,2}\sum_{j=0}^{r-1} \lambda_{f_{l,j}}x_3(x_3^r-x_2^r)(x_3^r-x_1^r)/f_{l,j}. \] Fix now $l\in\nset{2}$ and $j\in\nset{0,r-1}$ and apply the morphism of algebras $\epsilon_{l,j}:S\to\kk[x_1,x_2]$ that sends $x_3$ to $e^{2k\pi i/r}x_l$: since $\epsilon_{l,j}\left( (x_3^r-x_{l'}^r)/f_{j',l'} \right)=0$ whenever $l\neq l'$ and $j\neq j'$ we obtain that \[ \epsilon_{l,j} : \sum_{f\in\AA}\partial_3(f)C(x_3)/f \mapsto \lambda_{f_{l,j}}x_3(x_3^r-x_2^r)(x_3^r-x_1^r)/f_{l,j}. \] As the expression at which we evaluated $\epsilon_{l,j}$ was zero, it follows that $\lambda_{f_{l,j}}=0$ and, immediately, that also $\lambda_{x_3}=0$. We observe that the indexes that survive in the sum $\sum\lambda_f\partial_f$ are $x_1$, $x_2$ and $f_j=x_2-e^{2j\pi i/r}x_1$ with $j\in\nset{0,r-1}$; evaluating at $D$ we obtain \[ \sum\lambda_f\partial_f (D) = \lambda_{x_2}(x_2^r-x_1^r) + \sum_{j=0}^{r-1} \lambda_{f_j}x_2(x_2^r-x_1^r)/f_j. \] Reasoning as above we get that $\lambda_{x_2} = \lambda_{f_j}=0$ for every $j$. Recalling now that $\sum_{f\in\AA}\lambda_f=0$ we see that $\lambda_{x_1}=0$ as well. The classes of $\partial_f$ with $f$ a linear factor in~\eqref{eq:Ar:factorized} span $\Out U$ because the dimension of $\Out U\cong\HH^1(U)$ is, thanks to Corollary~\ref{coro:Ar:HH1}, precisely $\abs\A$. The composition $\partial_f\circ\partial_g:U\to U$ is evidently equal to zero for any $f,g\in\AA$, as a straightforward calculation shows, and therefore the Lie algebra structure in $\Out U$~vanishes. \end{proof} \begin{bibdiv} \begin{biblist} \bib{AC}{article}{ author={Alev, J.}, author={Chamarie, M.}, title={D\'{e}rivations et automorphismes de quelques alg\`ebres quantiques}, language={French}, journal={Comm. Algebra}, volume={20}, date={1992}, number={6}, pages={1787--1802}, issn={0092-7872}, review={\MR{1162608}}, doi={10.1080/00927879208824431}, }\bib{arnold}{article}{ author={Arnol'd, Vladimir I.}, title={The cohomology ring of the colored braid group}, journal={Mathematical Notes}, volume={5}, number={2}, pages={138--140}, year={1969}, publisher={Springer} } \bib{bigatti}{article}{ author={Bigatti, Anna Maria}, author={Palezzato, Elisa}, author={Torielli, Michele}, title={New characterizations of freeness for hyperplane arrangements}, journal={J. Algebraic Combin.}, volume={51}, date={2020}, number={2}, pages={297--315}, issn={0925-9899}, review={\MR{4069344}}, doi={10.1007/s10801-019-00876-9}, } \bib{calderon}{article}{ AUTHOR = {Calder\'on-Moreno, Francisco J.}, TITLE = {Logarithmic differential operators and logarithmic de {R}ham complexes relative to a free divisor}, JOURNAL = {Ann. Sci. \'Ecole Norm. Sup. (4)}, VOLUME = {32}, year = {1999}, NUMBER = {5}, PAGES = {701--714}, ISSN = {0012-9593}, DOI = {10.1016/S0012-9593(01)80004-5}, URL = {https://doi.org/10.1016/S0012-9593(01)80004-5}, } \bib{cartan-eilenberg}{book}{ AUTHOR = {Cartan, Henri}, author = {Eilenberg, Samuel}, TITLE = {Homological algebra}, PUBLISHER = {Princeton University Press, Princeton, N. J.}, year = {1956}, PAGES = {xv+390}, } \bib{schulze}{article}{ title={Partial normalizations of Coxeter arrangements and discriminants}, author={Granger, Michel}, author={Mond, David}, author={Schulze, Mathias}, eprint ={arXiv:1108.0718}, year={2011} } \bib{hueb}{article}{ AUTHOR = {Huebschmann, Johannes}, TITLE = {Poisson cohomology and quantization}, JOURNAL = {J. Reine Angew. Math.}, VOLUME = {408}, year = {1990}, PAGES = {57--113}, ISSN = {0075-4102}, DOI = {10.1515/crll.1990.408.57}, URL = {https://doi.org/10.1515/crll.1990.408.57}, } \bib{tesis}{thesis}{ author={Kordon, Francisco}, title={Hochschild cohomology of algebras of differential operators associated with hyperplane arrangements}, school={Universidad de Buenos Aires, Facultad de Ciencias Exactas y Naturales}, year={2019}, type={Doctoral thesis}, } \bib{kola}{article}{ author={Kordon, Francisco}, author={Lambre, Thierry}, title={Lie-Rinehart and Hochschild cohomology for algebras of differential operators}, journal={J. Pure Appl. Algebra}, volume={225}, date={2021}, number={1}, pages={Paper No. 106456, 28}, issn={0022-4049}, review={\MR{4123254}}, doi={10.1016/j.jpaa.2020.106456}, } \bib{ksa}{article}{ author={Kordon, Francisco}, author={Suárez-Álvarez, Mariano}, title={Hochschild cohomology of algebras of differential operators tangent to a central arrangement of lines}, date={2018}, eprint={arXiv:1807.10372}, note={Accepted for publication by Documenta Mathematica} } \bib{th-pa}{article}{ author={Lambre, Thierry}, author={Le Meur, Patrick}, title={Duality for differential operators of Lie--Rinehart algebras}, journal={Pacific J. Math.}, volume={297}, date={2018}, number={2}, pages={405--454}, issn={0030-8730}, review={\MR{3893434}}, doi={10.2140/pjm.2018.297.405}, } \bib{MR}{book}{ author={McConnell, J. C.}, author={Robson, J. C.}, title={Noncommutative Noetherian rings}, series={Graduate Studies in Mathematics}, volume={30}, edition={Revised edition}, note={With the cooperation of L. W. Small}, publisher={American Mathematical Society, Providence, RI}, date={2001}, pages={xx+636}, isbn={0-8218-2169-5}, review={\MR{1811901 (2001i:16039)}}, } \bib{narvaez}{article}{ AUTHOR = {Narv\'aez Macarro, L.}, TITLE = {Linearity conditions on the {J}acobian ideal and logarithmic-meromorphic comparison for free divisors}, BOOKTITLE = {Singularities {I}}, SERIES = {Contemp. Math.}, VOLUME = {474}, PAGES = {245--269}, PUBLISHER = {Amer. Math. Soc., Providence, RI}, year = {2008}, DOI = {10.1090/conm/474/09259}, URL = {https://doi.org/10.1090/conm/474/09259}, } \bib{OS}{article}{ AUTHOR = {Orlik, Peter}, AUTHOR = {Solomon, Louis}, TITLE = {Combinatorics and topology of complements of hyperplanes}, JOURNAL = {Invent. Math.}, VOLUME = {56}, year = {1980}, NUMBER = {2}, PAGES = {167--189}, ISSN = {0020-9910}, DOI = {10.1007/BF01392549}, URL = {https://doi.org/10.1007/BF01392549}, } \bib{OT}{book}{ AUTHOR = {Orlik, Peter}, AUTHOR = {Terao, Hiroaki}, TITLE = {Arrangements of hyperplanes}, SERIES = {Grundlehren der Mathematischen Wissenschaften }, VOLUME = {300}, PUBLISHER = {Springer-Verlag, Berlin}, Year = {1992}, PAGES = {xviii+325}, ISBN = {3-540-55259-6}, DOI = {10.1007/978-3-662-02772-1}, URL = {http://dx.doi.org/10.1007/978-3-662-02772-1}, } \bib{rinehart}{article}{ ISSN = {00029947}, URL = {http://www.jstor.org/stable/1993603}, author = {Rinehart, George S.}, journal = {Transactions of the American Mathematical Society}, number = {2}, pages = {195-222}, publisher = {American Mathematical Society}, title = {Differential Forms on General Commutative Algebras}, volume = {108}, year = {1963} } \bib{saito}{article}{ author={Saito, Kyoji}, title={Theory of logarithmic differential forms and logarithmic vector fields}, journal={J. Fac. Sci. Univ. Tokyo Sect. IA Math.}, volume={27}, date={1980}, number={2}, pages={265--291}, issn={0040-8980}, review={\MR{586450}}, } \bib{differential-arrangements}{article}{ author={Suárez-Álvarez, Mariano}, title={The algebra of differential operators tangent to a hyperplane arrangement}, date={2018}, eprint={arXiv:1806.05410}, } \bib{SAV}{article}{ author={Su\'{a}rez-Alvarez, Mariano}, author={Vivas, Quimey}, title={Automorphisms and isomorphisms of quantum generalized Weyl algebras}, journal={J. Algebra}, volume={424}, date={2015}, pages={540--552}, issn={0021-8693}, review={\MR{3293233}}, doi={10.1016/j.jalgebra.2014.08.045}, } \bib{terao}{article}{ author={Terao, Hiroaki}, title={Free arrangements of hyperplanes and unitary reflection groups}, journal={Proc. Japan Acad. Ser. A Math. Sci.}, volume={56}, date={1980}, number={8}, pages={389--392}, issn={0386-2194}, review={\MR{596011}}, } \bib{weibel}{book}{ author = {Weibel, C.}, title = {An introduction to Homological algebra}, date = {1994}, publisher = {Cambridge University Press}, } \bib{wy}{article}{ AUTHOR = {Wiens, Jonathan}, author = {Yuzvinsky, Sergey}, TITLE = {De {R}ham cohomology of logarithmic forms on arrangements of hyperplanes}, JOURNAL = {Trans. Amer. Math. Soc.}, VOLUME = {349}, year = {1997}, NUMBER = {4}, PAGES = {1653--1662}, ISSN = {0002-9947}, DOI = {10.1090/S0002-9947-97-01894-1}, URL = {https://doi.org/10.1090/S0002-9947-97-01894-1}, } \end{biblist} \end{bibdiv} \end{document}
2205.15085v1
http://arxiv.org/abs/2205.15085v1
Local Systems, Algebraic Foliations and Fibrations
\documentclass[12pt]{amsart} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{amsthm} \usepackage{amssymb} \usepackage{amscd} \usepackage[all]{xy} \usepackage{enumerate} \usepackage{hyperref} \usepackage{stackrel} \usepackage{graphicx} \textheight22truecm \textwidth17truecm \oddsidemargin-0.5truecm \evensidemargin-0.5truecm \keywords{} \subjclass[2010]{} \pagestyle{myheadings} \newcommand*{\ext}{\mathcal{E}\kern -.7pt xt} \theoremstyle{plain} \newtheorem{thm}{Theorem}[section] \newtheorem{thml}{Theorem} \renewcommand*{\thethml}{[\Alph{thml}]} \newtheorem{mainthm}[thm]{Main Theorem} \newtheorem{prop}[thm]{Proposition} \newtheorem{prope}[thm]{Property} \newtheorem{cor}[thm]{Corollary} \newtheorem{rem}[thm]{Remark} \newtheorem{lem}[thm]{Lemma} \newtheorem{cla}[thm]{Claim} \newtheorem*{clann}{Claim} \newtheorem{empthm}[thm]{} \newtheorem{op}[thm]{Operation} \theoremstyle{definition} \newtheorem{defn}[thm]{Definition} \newtheorem{empdefn}[thm]{} \newtheorem{case}[thm]{Case division} \newtheorem{conj}[thm]{Conjecture} \newtheorem{prob}[thm]{Problem} \newtheorem{probs}[thm]{Problems} \newtheorem{que}[thm]{Question} \newtheorem{expl}[thm]{Example} \newtheorem{assum}[thm]{Assumption} \newtheorem{mainassum}[thm]{Main Assumption} \newtheorem{nota}[thm]{Notation} \newtheorem{const}[thm]{Construction} \newtheorem{conpro}[thm]{Construction and Proposition} \newtheorem{conv}[thm]{Convention} \newtheorem{setup}[thm]{Set-up} \newtheorem*{ackn}{Acknowledgment} \newtheorem{fig}[thm]{Figure} \newtheorem{rmk}[thm]{Remark} \newcommand{\sA}{\mathcal{A}} \newcommand{\sB}{\mathcal{B}} \newcommand{\sC}{\mathcal{C}} \newcommand{\sD}{\mathcal{D}} \newcommand{\sE}{\mathcal{E}} \newcommand{\sF}{\mathcal{F}} \newcommand{\sG}{\mathcal{G}} \newcommand{\sH}{\mathcal{H}} \newcommand{\sI}{\mathcal{I}} \newcommand{\sJ}{\mathcal{J}} \newcommand{\sK}{\mathcal{K}} \newcommand{\sL}{\mathcal{L}} \newcommand{\sN}{\mathcal{N}} \newcommand{\sM}{\mathcal{M}} \newcommand{\sO}{\mathcal{O}} \newcommand{\sP}{\mathcal{P}} \newcommand{\sQ}{\mathcal{Q}} \newcommand{\sR}{\mathcal{R}} \newcommand{\sS}{\mathcal{S}} \newcommand{\sT}{\mathcal{T}} \newcommand{\sU}{\mathcal{U}} \newcommand{\sV}{\mathcal{V}} \newcommand{\sW}{\mathcal{W}} \newcommand{\sX}{\mathcal{X}} \newcommand{\sY}{\mathcal{Y}} \newcommand{\sZ}{\mathcal{Z}} \newcommand{\tA}{{\widetilde{A}}} \newcommand{\mA}{\mathbb{A}} \newcommand{\mC}{\mathbb{C}} \newcommand{\mD}{\mathbb{D}} \newcommand{\mF}{\mathbb{F}} \newcommand{\mG}{\mathbb{G}} \newcommand{\mH}{\mathbb{H}} \newcommand{\mL}{\mathbb{L}} \newcommand{\mN}{\mathbb{N}} \newcommand{\mP}{\mathbb{P}} \newcommand{\mQ}{\mathbb{Q}} \newcommand{\mZ}{\mathbb{Z}} \newcommand{\mR}{\mathbb{R}} \newcommand{\mW}{\mathbb{W}} \newcommand{\Ima}{\mathrm{Im}\,} \newcommand{\Ker}{\mathrm{Ker}\,} \newcommand{\Kod}{\mathrm{Kod}\,} \newcommand{\Alb}{\mathrm{Alb}\,} \newcommand{\ap}{\mathrm{ap}} \newcommand{\Bs}{\mathrm{Bs}\,} \newcommand{\Chow}{\mathrm{Chow}\,} \newcommand{\CP}{\mathrm{CP}} \newcommand{\Div}{\mathrm{Div}\,} \newcommand{\expdim}{\mathrm{expdim}\,} \newcommand{\ord}{\mathrm{ord}\,} \newcommand{\Aut}{\mathrm{Aut}\,} \newcommand{\Hilb}{\mathrm{Hilb}} \newcommand{\Hom}{\mathrm{Hom}} \newcommand{\sHom}{\mathcal{H}{\!}om\,} \newcommand{\Lie}{\mathrm{Lie}\,} \newcommand{\mult}{\mathrm{mult}} \newcommand{\Pic}{\mathrm{Pic}\,} \newcommand{\Spec}{\mathrm{Spec}\,} \newcommand{\Proj}{\mathrm{Proj}\,} \newcommand{\Rhom}{{\mathbb{R}\mathcal{H}{\!}om}\,} \newcommand{\aw}{\mathrm{aw}} \newcommand{\exc}{\mathrm{exc}\,} \newcommand{\emb}{\mathrm{emb\text{-}dim}} \newcommand{\codim}{\mathrm{codim}\,} \newcommand{\OG}{\mathrm{OG}} \newcommand{\Sing}{\mathrm{Sing}\,} \newcommand{\Supp}{\mathrm{Supp}\,} \newcommand{\SL}{\mathrm{SL}\,} \newcommand{\Reg}{\mathrm{Reg}\,} \newcommand{\rank}{\mathrm{rank}\,} \newcommand{\VSP}{\mathrm{VSP}\,} \newcommand{\B}{B} \newcommand{\Q}{Q} \newcommand{\PGL}{\mathrm{PGL}} \usepackage{color} \newcommand{\ThfC}[2]{\mathrm{\theta_{#1}^{#2}}} \newcommand{\Ab}{\mathrm{A}} \newcommand{\AD}{\mathcal{D}} \newcommand{\AL}{\mathcal{L}} \newcommand{\LATT}{\Lambda} \newcommand{\VSA}{V} \newcommand{\ConnHom}{f} \newcommand{\bigslant}[2]{{\raisebox{.2em}{$#1$}\left/\raisebox{-.2em}{$#2$}\right.}} \newcommand{\leftexp}[2]{{\vphantom{#2}}^{#1}{#2}} \newcommand{\traspose}[1]{\leftexp{t}{#1}} \newcommand{\IMh}[1]{\Im m \ #1} \newcommand{\Z}{\mZ} \newcommand{\C}{\mC} \newcommand{\R}{\mR} \numberwithin{equation}{section} \newcommand{\beba} {\begin{equation}\begin{array}{rcl}} \newcommand{\eaee} {\end{array}\end{equation}} \makeatletter \def\l@section{\@tocline{1}{0pt}{1pc}{}{}} \def\l@subsection{\@tocline{2}{0pt}{1pc}{4.6em}{}} \def\l@subsubsection{\@tocline{3}{0pt}{1pc}{7.6em}{}} \renewcommand{\tocsection}[3]{ \indentlabel{\@ifnotempty{#2}{\makebox[2.3em][l]{ \ignorespaces#1 #2.\hfill}}}#3} \renewcommand{\tocsubsection}[3]{ \indentlabel{\@ifnotempty{#2}{\hspace*{2.3em}\makebox[2.3em][l]{ \ignorespaces#1 #2.\hfill}}}#3} \renewcommand{\tocsubsubsection}[3]{ \indentlabel{\@ifnotempty{#2}{\hspace*{4.6em}\makebox[3em][l]{ \ignorespaces#1 #2.\hfill}}}#3} \makeatother \setcounter{tocdepth}{4} \title{Local systems, algebraic foliations, and fibrations} \keywords{Semistable Fibrations, Foliations, Local systems, Castelnuovo-de Franchis Theorem, MRC fibration, Iitaka fibration} \subjclass[2020]{14D06, 14E05, 14M22, 32M25} \author{Luca Rizzi} \address{Luca Rizzi\\ IBS Center for Complex Geometry, 55 EXPO-ro, Yuseong-gu, Daejeon, 34126, South Korea, \texttt{[email protected]}} \author{Francesco Zucconi} \address{Francesco Zucconi\\Department of Mathematics, Computer Science and Physics \\ Universit\`a di Udine\\ Udine, 33100\\ Italia \texttt{[email protected]}} \begin{document} \markboth{}{} \maketitle \begin{abstract} Given a semistable fibration $f\colon X\to B$ we introduce a correspondence between foliations $\sF$ on $X$ and local systems $\mL$ on $B$. Building up on this correspondence we find conditions that give maximal rationally connected fibrations in terms of data on the foliation. We prove the Castelnuovo-de Franchis theorem in the case of $p$-forms and we apply it to show when, under some natural conditions, a line subbundle of the sheaf of $p$-forms induces the Iitaka fibration. \end{abstract} \section{Introduction} Let $f\colon X\to B$ be a (semistable) fibration between a smooth complex $n$-dimensional algebraic variety $X$ and a smooth curve $B$. Its relative tangent sheaf is a standard example of algebraically integrable foliation, and so are its algebraically integrable sub-foliations. On the other hand in \cite{RZ4} we have associated to $f\colon X\to B$ the local systems $\mD_{X}^k$ on $B$ given by the $k$-forms on the fibers of $f$ which are locally liftable to closed holomorphic $k$-forms on $X$, $k=1,\cdots , n-1$. In this paper we merge these two dual approaches. Moreover we consider again $\mD_{X}^1$ but in light of the Castelnuovo-de Franchis theorem. \subsection{Foliations and local systems of $1$-forms} The relative tangent sheaf $T_{X/B}$ is the foliation given by the kernel of the differential map $T_X\to f^*T_B$. On the dual side, the local system $\mD_{X}^1$ is defined by the exact sequence \begin{equation} \label{seqintro} 0\to\omega_B\to f_{*}\Omega_{X,d}^1\to \mD_{X}^1\to 0 \end{equation} where $\Omega_{X,d}^1$ is the sheaf of $d$-closed holomorphic $1$-forms on $X$: $\mD_{X}^1$ has been deeply studied in \cite{PT} if $n=2$. See also Section \ref{sez2} for the necessary background on foliations of vector fields and local systems of relative differential forms. \subsubsection{The correspondence} In Section \ref{sez3} we show that each sub-foliation $\sF\subseteq T_{X/B}$ gives a local system $\mL_\sF\leq\mD_{X}^1$, and viceversa every local system $\mL\leq\mD_{X}^1$ gives a foliation $\sF_\mL\subseteq T_{X/B}$, hence we have a correspondence: \begin{equation} \label{corrispondeprima} \big\{\text{foliations } \sF\subseteq T_{X/B}\big\}\stackrel[\beta]{\alpha}{\rightleftarrows} \big\{\text{local systems } \mL\leq \mD_{X}^1\big\} \end{equation} defined by $\alpha(\sF)=\mL_{\sF}$ and $\beta(\mL)=\sF_{\mL}$. In Proposition \ref{uguaglianzalocalsys} we show that in general these maps are not inverse of each other. \subsubsection{The factorizing variety} In Section \ref{sez4} we solve the following problem. Let $Y$ be a normal $m$-dimensional variety which factors $f\colon X\to B$ as follows: \begin{equation} \xymatrix{ X\ar[r]^{h}& Y\ar[r]^{g}& B } \end{equation} where $g\circ h=f$ and $f,g$ are still fibrations. It is not difficult to see that in this case we have a sub local system $\mD_Y^1\leq\mD_{X}^1$ whose wedges satisfy some natural properties determined by the dimension of $Y$ and of the $g$-fibers. We ask to what extent we can obtain the viceversa in terms of local systems. More precisely, is it true that given a local system $\mL<\mD_X^1$ which mimic the properties of $\mD_Y^1$ then it gives back a variety $Y$ which factors $f\colon X\to Y\to B$? It turns out that this problem can be tackled in the framework presented in \cite{RZ4}. The wedge product naturally gives maps $\bigwedge^{i} \mL\to \mD_X^i$. The key definition is the following: \begin{defn} We say that $\mL$ is {\it{of Castelnuovo-type of order $m$}} if $\rank\mL \geq m+1$, $\bigwedge^{m} \mL\to \mD_X^m$ is zero while $\bigwedge^{m-1} \mL\to \mD_X^{m-1}$ is injective on decomposable elements. \end{defn} \noindent This leads us to show: \begin{thml} \label{thmA} Let $f\colon X\to B$ be a semistable fibration and $\mL\leq\mD_X$ a local system of Castelnuovo-type of order $m$. Then up to a covering $\widetilde{B}\to B$ and base change $\tilde{f}\colon \widetilde{X}\to \widetilde{B}$ there exist a normal $m$-dimensional complex space $\widetilde{Y}$ and holomorphic maps $\tilde{g}\colon\widetilde{Y}\to\widetilde{B} $ and $\tilde{h}\colon \widetilde{X}\to\widetilde{B}$ such that $\tilde{f}=\tilde{h}\circ \tilde{g}$. Furthermore if $\sF_{\mL}$ is the foliation associated to $\mL$ then it is algebraically integrable. \end{thml} \noindent See Corollary \ref{castel1}. Theorem \ref{thmA} can also be generalized in the case of a flag of local systems $\mL_{s}<\dots<\mL_{2}<\mL_{1}<\mD_{X}^1$, see Corollary \ref{flag}. \subsubsection{Local system versus algebraically integrable foliation} On the other hand it is well-known that an algebraically integrable foliation $\sF$ contained in $T_{X/B}$ allows to recover an intermediate variety $Y$ up to birational equivalence; for example: see \cite[Lemma 4.12]{L} recalled in Lemma \ref{lasic}. We show the following comparison theorem: \begin{thml} \label{thmB} Let $f\colon X\to B$ be a semistable fibration and $\mL$ a Castelnuovo-type local system of order $m$. Then $\mL$ gives normal variety $Y'$ and complex spaces $\widetilde{Y}$, and $\widehat{Y}$, all of dimension $m$, which fit into the following diagram \begin{equation} \xymatrix{ \widetilde{X}\ar[d]\ar[r]& X\ar[dd]& X'\ar[d]\ar[l]& \widehat{X}\ar[l]\ar[d]\ar@/_1.5pc/[lll]\\ \widetilde{Y}\ar[d]& & Y'\ar[d]& \widehat{Y}\ar[d]\ar@{-->}[l]\ar@{-->}@/_1.5pc/[lll]\\ \widetilde{B}\ar[r]& B& B\ar@{=}[l]& \widetilde{B}\ar[l]\ar@{=}@/^1.3pc/[lll] } \end{equation} \newline where $\widetilde{B}\to B$ is a covering, $\widehat{X}\to \widetilde{X}$ is a (generically) degree 1 map, $\widehat{Y}\dashrightarrow Y'$ is a dominant meromorphic map and $\widehat{Y}\dashrightarrow \widetilde{Y}$ is a bimeromorphic map. \end{thml} \noindent See Theorem \ref{dxsx}. This means that a local system of Castelnuovo-type recovers the intermediate variety either up to (possibly non finite) \'etale cover or up to birational morphism. In Theorem \ref{thmA} and \ref{thmB} the algebricity of $\widetilde{X}, \widehat{X},\widetilde{Y},\widehat{Y}$ highly depends on specific assumptions. In any case Theorem \ref{thmB} should be read in the light of the theory of towers of varieties; \cite{P}. Finally the notion of local system of Castelnuovo-type gives us a nice theorem to study some of the fixed points of the above correspondence, that is to find conditions on a local system $\mL\leq \mD_X$ or on an algebraically integrable foliation $\sF\subseteq T_{X/B}$ such that $\alpha(\beta(\mL))=\mL$ or $\beta(\alpha(\sF))=\sF$; see: Theorem \ref{fissi}. \subsection{Maximal Rationally Connected fibrations} Our point of view can give an answer to some problem concerning rational connected varieties. First we recall that once we fix an ample polarization $\alpha$ on $X$ it defines the $\alpha$-slope $\mu(\sQ)$ of any torsion-free coherent sheaves on $X$ as: $\mu(\sQ):=\frac{\deg\sQ}{\rank \sQ}$. Let $$ 0= \sF_0 \subsetneq \sF_1 \subsetneq\dots\subsetneq \sF_r =T_{X/B} $$ be the $\alpha$-Harder-Narasimhan filtration of the relative tangent sheaf $T_{X/B}$ and we call $\sQ_i$ the quotients $\sF_i/\sF_{i-1}$. We set $$ i_{\max}:=\max\{0<i<k\mid \mu(\sQ_i)>0\}\cup \{0\}. $$ If $i_{\max} >0$, it is well known that for every index $0< i\leq i_{\max} $, $\sF_i$ is an algebraically integrable foliation with rationally connected general leaf, see \cite[Proposition 3.8, Corollary 3.10]{K}. In particular for such $i$ we obtain rational maps $X\dashrightarrow W_i$ with rationally connected general fibers. We recall in Definition \ref{definitionMRC} the definition of an MRC-fibration, see also \cite[Sect. IV.5]{Kol}, \cite[Sect. 5]{D} and \cite[Thm. IV.5.5]{Kol} for the functoriality of the MRC-fibration. See also \cite{SCT}. In \cite{K}, Question 3.12 poses the problem of the characterization of MRC-fibrations. In other words it is an open problem to understand when the rational map $X\dashrightarrow W_{i_{\max}}$ is an MRC-fibration over the base $B$. The study of $\sF_{i_{\max}}$ together with the local system $\mD_{X}^1$ allows us to give a solution of this problem, again in the case when this local system is of Castelnuovo-type, see Theorem \ref{mrc}. \begin{thml} \label{thmC} Let $f\colon X\to B$ be a semistable fibration and $$ 0= \sF_0 \subsetneq \sF_1 \subsetneq\dots\subsetneq \sF_r =T_{X/B} $$ the $\alpha$-polarized Harder-Narasimhan filtration of $T_{X/B}$. Assume that $\mD_{X}^1$ is of Castelnuovo-type of order $m$. If the foliation $\sF_{i_{\max}}$ has generic rank $n-m$, then $X\dashrightarrow W_{i_{\max}}$ is the relative MRC fibration of $f$. \end{thml} \subsection{Generalisation of the Castelnuovo-de Franchis theorem} The Castelnuovo-de Franchis theorem plays an important role in algebraic geometry. This theorem, together its generalisations, essentially concerns 1-forms; in Section \ref{sez6} we show an extension to p-forms. Let $\omega_1,\dots, \omega_l \in H^0 (X,\Omega^p_X)$, $l\geq p+1$, be linearly independent $p$-forms such that $\omega_i\wedge\omega_j=0$ (as an element of $\bigwedge^2\Omega^p_X$ and not of $\Omega_X^{2p}$) for any choice of $i,j=1,\dots, l$. These forms generate a subsheaf of $\Omega^p_X$ generically of rank $1$. We denote it by $\sL$. Note that the quotients $\omega_i/\omega_j$ define a non-trivial global meromorphic function on $X$ for every $i\neq j$, $i,j=1,\dots, l$. By taking the differential $d (\omega_i/\omega_j)$ we then get global meromorphic $1$-forms on $X$. We ask that there exist $p$ of these meromorphic differential forms $d (\omega_i/\omega_j)$ that do not wedge to zero; if this is the case we call the subset $\{\omega_1,\dots, \omega_l \}\subset H^0 (X,\Omega^p_X)$ $p$-strict, see Definition \ref{definizionestrict}. For this new setting, this condition is analogous to the strictness condition considered in \cite[Definition 2.1 and 2.2]{Ca2}, see also \cite[Definition 4.4]{RZ4}. \begin{thml} \label{thmD} Let $X$ be an $n$-dimensional smooth variety and let $\{\omega_1,\dots, \omega_l \}\subset H^0 (X,\Omega^p_X)$ be a $p$-strict set. Then there exists a rational map $f\colon X\dashrightarrow Y$ over a $p$-dimensional smooth variety $Y$ such that the $\omega_i$ are pullback of some meromorphic $p$-forms $\eta_i$ on $Y$, $\omega_i=f^*\eta_i$, where $i=1,\dots , l$. \end{thml} \noindent See Theorem \ref{cast2}. If $p=1$ then the rational map $f\colon X\dashrightarrow Y$ turns out to be a morphism, if we allow $Y$ to be normal. If $p\geq 2$ this is not always the case. \subsection{Applications of the generalised Castelnuovo-de Franchis theorem: Iitaka fibrations} We can apply Theorem \ref{thmD} to the case of Iitaka fibrations arising from subbundles $\sL$ of $\Omega^p_X$. We recall that if $X$ is a smooth projective variety of Kodaira dimension $\Kod X\geq 0$, then by a well-known construction of Iitaka, see cf. \cite{F}, \cite{Laz}, there is a birational morphism $u_\infty \colon X_\infty \to X$ from a smooth projective variety $X_\infty$, and a contraction $\phi_\infty\colon X_\infty\to Y_\infty$ onto a projective variety $Y_\infty$ such that a (very) general fiber $F$ of $\phi_\infty\colon X_\infty\to Y_\infty$ is smooth with Kodaira dimension zero, and $\dim Y_\infty$ is equal to $\Kod X$. The map $\phi_\infty\colon X_\infty\to Y_\infty$ is unique up to birational equivalence and it is referred to as an Iitaka fibration of $X$. In the case of a line bundle $\sL=\sO_X(L)$ such that $\bigoplus H^0(X,nL)$ is a finitely generated $\mathbb C$-algebra, the dimension of ${\rm{Proj}} \bigoplus_{n\in\mathbb N}H^0(X,nL)$ is called Kodaira dimension of $\sL$ and it is denoted by $\Kod \sL$. \begin{thml} \label{thmE} Let $p\in\mathbb N$ and $1\leq p\leq n$. Let $X$ be a smooth variety of dimension $n$. If $\sL\hookrightarrow \Omega^p_X$ is an invertible subsheaf which is globally generated by a $p$-strict subset and if $\Kod X=\Kod \sL=p$, then the Stein factorization of $\varphi_{|\sL|}\colon X\to \mP(H^0(X,\sL)^\vee)$ induces the Iitaka fibration. \end{thml} See Theorem \ref{Iitakauno}. Finally we present another application of Theorem \ref{thmD}. We stress that this application is only conjectural. Indeed it asserts that the same conclusion of Theorem \ref{thmE} can be obtained even in the case where $\sL$ is not globally generated but assuming that there exists $a,b\in \mathbb N$ such that $aK_X-bL$ is nef where $\sL=\sO_X(L)$; see Theorem \ref{Iitakadue}. We maintain it in this paper because it can really be considered as an evidence for abundance conjecture. \begin{ackn} The first author has been supported by JSPS-Japan Society for the Promotion of Science (Postdoctoral Research Fellowship, The University of Tokyo) and the IBS Center for Complex Geometry. The second author has been supported by the grant DIMA Geometry PRIDZUCC and by PRIN 2017 Prot. 2017JTLHJR \lq\lq Geometric, algebraic and analytic methods in arithmetics\rq\rq. \end{ackn} \section{Setting: fibrations, foliations, local systems} \label{sez2} Let $X$ be a smooth complex $n$-dimensional variety and $B$ a smooth complex curve. In this paper we consider semistable fibrations $f\colon X\to B$; we denote by $X_b=f^{-1}(b)$ the fiber over a point $b\in B$ and assume that all the fibers $X_b$ are either smooth or reduced and normal crossing divisors. We recall that $\omega_{X/B}:=f^*\omega_B^\vee\otimes \omega_X$ is the relative dualizing sheaf and $\Omega^1_{X/B}$ is the sheaf of relative differentials defined by the short exact sequence \begin{equation} \label{relativo} 0\to f^*\omega_B\to \Omega^1_X\to \Omega^1_{X/B}\to 0. \end{equation} We also recall that in this setting $\omega_{X/B}$ is locally free while $\Omega^1_{X/B}$ and its wedges $\Omega^k_{X/B}=\bigwedge^k \Omega^1_{X/B}$ are in principle only torsion free, see: c.f. \cite[Section 2]{RZ4}. The direct images $f_*\Omega^k_{X/B}$ are torsion free on the curve $B$ and hence also locally free. \subsection{Foliations} Let's start by recalling the definition of foliation that we will use in this paper. \begin{defn} A foliation is a saturated subsheaf $\sF\subseteq T_X$ which is closed under the Lie bracket, i.e. $[\sF,\sF]\subseteq \sF$. The singularity locus of a foliation is the subset of $X$ on which $\sF$ is not locally free, and it has codimension at least 2. A leaf of $\sF$ is the maximal connected, locally closed submanifold $L$ such that $T_L =\sF|_L$. We also recall that a foliation $\sF$ is called algebraically integrable if its leaves are algebraic. \end{defn} Of course from the fibration $f\colon X\to B$ we have the foliation induced by the relative tangent sheaf, that is the kernel of the differential map. This kernel is usually denoted by $T_{X/B}$ and it fits in the following exact sequence, dual of (\ref{relativo}), \begin{equation} \label{tanrel} 0\to T_{X/B}\to T_X\to f^*T_B\to N\to 0 \end{equation} where $N=\ext^1(\Omega^1_{X/Y}, \sO_X)$ is a torsion sheaf supported on the critical locus of $f$. In this case we say that the foliation is induced by the fibration. Of course foliations induced by fibrations are algebraically integrable. Algebraically integrable foliations give in some sense a viceversa of this construction by the following result, see for example \cite[Lemma 4.12]{L} \begin{lem} \label{lasic} Let $X$ be a smooth projective variety and let $\sF$ be an algebraically integrable foliation on $X$. Then there is a unique irreducible closed subvariety $W$ of $\textnormal{Chow}(X)$ whose general point parametrizes the closure of a general leaf of $\sF$. In other words, if $U \subseteq W \times X$ is the universal cycle with projections $\pi\colon U\to W$ and $e \colon U \to X$, then $e$ is birational and $e(\pi^{-1})(w)\subseteq X$ is the closure of a leaf of $\sF$ for a general point $w\in W$. \begin{equation} \xymatrix{ U\ar[d]_{e}\ar[r]^{\pi}&W\\ X& } \end{equation} Then there exists a foliation $\widehat{\sF}$ on the normalisation $\nu\colon U^\nu\to U$ induced by $\pi\circ\nu$ and which coincides with $\sF$ on $(e\circ \nu)^{-1}(X^\circ)$, where $X^\circ$ is a big open subset of $X$. \end{lem} We also recall the definition of algebraic and transcendental part of a foliation. Let $\sF$ be a foliation on $X$. There exist a normal variety $Y$, unique up to birational equivalence, a dominant rational map with connected fibers $\varphi\colon X\dasharrow Y$, and a foliation $\sG$ on $Y$ such that the following conditions hold. \begin{enumerate} \item $\sG$ is purely transcendental, i.e., there is no positive-dimensional algebraic subvariety through a general point of $Y$ that is tangent to $\sG$. \item $\sF$ is the pull-back of $\sG$ via $\varphi$. This means the following. Let $X^\circ\subset X$ and $Y^\circ\subset Y$ be smooth open subsets such that $\varphi$ restricts to a smooth morphism $\varphi^\circ$. Then $\sF|_{X^\circ}=(d\varphi^\circ)^{-1}\sG|_{Y^\circ}$. \end{enumerate} \begin{defn} \label{algfol} The foliation $\sF^a$ induced by $\varphi$ is called the algebraic part of $\sF$ while $\sG$ is its transcendental part. \end{defn} See for example \cite{AD}. \subsection{Local systems} The local systems on $B$ associated to $f$ are defined as follows. Consider again Sequence (\ref{relativo}) and its wedges \begin{equation} 0\to f^*\omega_B\otimes \Omega^{k-1}_{X/B}\to \Omega^k_X\to \Omega^{k}_{X/B}\to 0 \end{equation} for $k=1,\dots,n-1$. By pushforward we can write \begin{equation} \label{seqhodge} 0\to \omega_B\otimes f_*\Omega^{k-1}_{X/B}\to f_*\Omega^k_X\to f_*\Omega^{k}_{X/B}\to R^1f_*\Omega^{k-1}_{X/B}\otimes \omega_B\to \dots. \end{equation} and we take the corresponding sub-sequence of de Rham closed holomorphic forms as follows \begin{equation} \label{diag} \xymatrix{ 0\ar[r] &\omega_B\otimes f_*\Omega_{X/B}^{k-1}\ar[r]\ar@{=}[d]& f_*\Omega_{X,d}^{k}\ar[r]\ar@{^{(}->}[d]& f_*\Omega^{k}_{X/B,d_{X/B}}\ar[r]\ar@{^{(}->}[d]& \dots\\ 0\ar[r] &\omega_B\otimes f_*\Omega_{X/B}^{k-1}\ar[r]& f_*\Omega_{X}^{k}\ar[r]& f_*\Omega^{k}_{X/B}\ar[r]& \dots\\ } \end{equation} This gives the following definition: \begin{defn} \label{locsys} We call $\mD^k_X$, for $k=1,\dots,n-1$, the image of the map $f_*\Omega_{X,d}^{k}\to f_*\Omega^{k}_{X/B,d_{X/B}}$. \end{defn} The $\mD^k_X$ are indeed local systems. In \cite{RZ4} we have proved this result for $\mD^1_X$ and $\mD^{n-1}_X$. The proof for $2\leq k\leq n-2$ is similar, hence it will be omitted here \begin{prop} $\mD^k_X$ is a local system on $B$ for $k=1,\dots,n-1$. \end{prop} \begin{proof} See \cite[Lemma 3.4 and 3.6]{RZ4}. \end{proof} Recall that by the famous Fujita's decomposition theorem, see \cite{Fu}, \cite{Fu2}, it holds that: $$ f_*\omega_{X/B}=\sU\oplus\sA $$ where $\sU$ is a unitary flat vector bundle and $\sA$ is an ample one. By the correspondence between unitary flat vector bundles and local systems, cf. \cite{De}, Fujita's decomposition gives naturally a local system $\mathbb U$ on $B$. There is a vast literature on this topic; see cf. \cite{BZ1}, \cite{BZ2}, \cite{CD1}, \cite{CD2}, \cite{CD3}, \cite{CK}. In \cite{RZ4} we have shed some light on the higher dimensional geometry associated to $\mathbb U$ and $\mD_{X}^1$ and their respective monodromies. In particular we have shown that $\mD_{X}^{n-1}=\mathbb U$ is the local system of relative top forms on the fibers. In this paper we are mostly concerned with $\mD^1_X$. We denote $\mD_X:=\mD^1_X$ for simplicity, and we point out that, by Definition \ref{locsys}, it fits into the following short exact sequence \begin{equation} \label{dx} 0\to \omega_B\to f_*\Omega^1_{X,d}\to \mD_X\to 0. \end{equation} \section{Natural correspondence between foliations and local systems} \label{sez3} The above discussion shows that in the case of a fibration $f\colon X\to B$, we can define an associated foliation and a local system. In this section we define a precise correspondence between these objects. \subsection{Local systems defined by foliations} Take $\sF\subseteq T_{X/B}\subset T_X$ a foliation on $X$ and consider the exact sequence of its inclusion in the tangent sheaf \begin{equation} 0\to \sF\to T_X\to \sK\to 0 \end{equation} where $\sK$ is torsion free because by definition $\sF$ is saturated. Actually since $X$ is smooth and hence $T_X$ is locally free, $\sF$ is reflexive. By taking the dual we obtain an exact sequence \begin{equation} \label{uno} 0\to \sK^\vee\to \Omega_X^1\to \sF^\vee\to M\to 0 \end{equation} where $M$ is supported on the singular locus of the singular fibers. We call $\sQ$ the cokernel \begin{equation} 0\to \sK^\vee\to \Omega_X^1\to \sQ\to 0 \end{equation} and pushing forward via $f_*$ we have \begin{equation} 0\to f_*\sK^\vee\to f_*\Omega_X^1\to f_*\sQ\to \dots \end{equation} Now it is straightforward to see from the condition $\sF\subseteq T_{X/B}$ that the inclusion of vector bundles $\omega_B\subset f_*\Omega_X^1$ factors through $f_*\sK^\vee$ \begin{equation} \xymatrix{ &\omega_B\ar@{^{(}->}[d]\ar@{^{(}->}[rd]&&&\\ 0\ar[r]&f_*\sK^\vee\ar[r]&f_*\Omega_X^1\ar[r]&f_*\sQ^\vee\ar[r]&\dots } \end{equation} Restricting this diagram to de Rham closed forms $f_*\Omega_{X,d}^1$ we obtain the commutative triangle \begin{equation} \xymatrix{ \omega_B\ar@{^{(}->}[d]\ar@{^{(}->}[rd]&\\ f_*\sK_d^\vee\ar[r]&f_*\Omega_{X,d}^1 } \end{equation} where $f_*\sK_d^\vee$ indicates the intersection of $f_*\sK^\vee$ with the sheaf of de Rham closed forms. The cokernel of the diagonal arrow is, by Sequence (\ref{dx}), the local system $\mD_X$, hence the cokernel of $\omega_B\hookrightarrow f_*\sK_d^\vee$ is a local subsystem of $\mD_X$ which we denote by $\mL_\sF$, and we call the local system obtained from the foliation $\sF$. By definition the following sequence is exact: \begin{equation} \label{kappa} 0\to \omega_B\to f_*\sK_d^\vee\to \mL_\sF\to 0. \end{equation} Actually $\mD_X/ \mL_\sF$ is also a local system and we have an exact sequence \begin{equation} \label{esattalocalsys} 0\to \mL_\sF\to \mD_X\to \mD_X/ \mL_\sF\to 0. \end{equation} In this paper we will work mainly with the local system $\mL_\sF$, but of course one could equivalently consider the cokernel $\mD_X/ \mL_\sF$. \begin{expl} \label{casibanali} We look at the two extreme cases $\sF=0$ and $\sF=T_{X/B}$. For $\sF=0$, we have that $\mL_\sF=\mD_{X}$ and Sequence \ref{esattalocalsys} is $$ 0\to \mD_{X}\to \mD_{X}\to 0\to 0. $$ On the other hand if $\sF=T_{X/B}$ we have $\mL_\sF=0$. In fact by (\ref{tanrel}) we have the exact sequence $$ 0\to \sK\to f^*T_B\to N\to 0. $$ Dualizing we have the inclusion $f^*\omega_B\to \sK^\vee$ which fits into the diagram \begin{equation} \xymatrix{ 0\ar[r]&f^*\omega_B\ar@{^{(}->}[d]\ar[r]&\Omega_X^1\ar@{=}[d]\ar[r]&\Omega^1_{X/B}\ar[d]\ar[r]&0\\ 0\ar[r]&\sK^\vee\ar[r]&\Omega_X^1\ar[r]&\sQ^\vee\ar[r]&0 } \end{equation} By the commutativity of the diagram the co-kernel of the injection $f^*\omega_B\to \sK^\vee$ is also the kernel of $\Omega^1_{X/B}\to \sQ^\vee$ hence it is zero because it should be a torsion sheaf inside the torsion free sheaf $\Omega^1_{X/B}$ (recall that $f$ is semistable). This means that $f^*\omega_B\cong\sK^\vee$ and we easily have the result. Sequence \ref{esattalocalsys} is $$ 0\to 0\to \mD_{X}\to \mD_{X}\to 0. $$ \end{expl} \begin{rmk} Note that so far we have not made any particular assumptions on the foliation $\sF$; we show now that in this framework it is not restrictive to consider algebraically integrable foliations. In fact take $\sF$ an arbitrary foliation and call $Y$ as in Definition \ref{algfol} where the dominant rational map $\varphi\colon X\dashrightarrow Y$ defines the algebraic and the transcendental part of $\sF$. In our setting we also have a morphism $Y\to B$ and, since $\sF^a\subseteq \sF\subseteq T_{X/B}\subset T_X$, we can associate a local system $\mL_{\sF^a}$ to the algebraic part $\sF^a$. It is not difficult to see that the inclusion $\sF^a\subseteq \sF$ is reversed at the level of local systems as $\mL_{\sF}\leq \mL_{\sF^a}$. This construction shows that, even in the case of a general foliation, we can always consider the local system associated to its algebraic part and reduce to the algebraic case. Therefore in this paper we will be mostly concerned with algebraically integrable foliations. \end{rmk} \subsubsection{The extrinsic construction of the local system $\mL_\sF$} The above construction of $\mL_{\sF}$ is fundamentally intrinsic. We present now a more extrinsic alternative construction, which will be very useful in the following. Take $\sF\subseteq T_{X/B}\subset T_X$ an algebraically integrable foliation. The following diagram \begin{equation} \label{diagfol} \xymatrix{ U\ar[d]_{e}\ar@/_2pc/[dd]_{\tilde{f}}\ar[r]^{\pi}&W\ar[ddl]\\ X\ar[d]_{f}&\\ B } \end{equation} follows by the one of Lemma \ref{lasic} in the relative setting. Possibly by blowing up we assume that $U$ and $W$ are smooth; hence note that $W$ is no longer necessarily in $\textnormal{Chow}(X)$. Call $\tilde{f}$ the composition $f\circ e$ and $\tilde{\sF}=T_{U/W}$. Note that $\tilde{\sF}\subseteq T_{U/B}$ hence $\tilde{f}$ factors through $W$. Now consider the sequence of the relative tangent sheaf of $\pi$: \begin{equation} 0\to \tilde{\sF}\to T_U\to \pi^*T_W\to N\to 0. \end{equation} We call $\sL$ the image of $T_U\to \pi^*T_W$ \begin{equation} \label{due} 0\to \tilde{\sF}\to T_U\to \sL\to 0 \end{equation} Taking the dual of $T_U\to \sL$ and the direct image via $\tilde{f}_*$ gives \begin{equation} \tilde{f}_*\sL^\vee\to \tilde{f}_*\Omega^1_U \end{equation} Now since $e$ is a birational morphism, we have that $\tilde{f}_*\Omega^1_U\cong {f}_*\Omega^1_X$, hence taking the closed forms as before we obtain \begin{equation} \label{ll} \xymatrix{ \omega_B\ar@{^{(}->}[d]\ar@{^{(}->}[rd]&\\ \tilde{f}_*\sL_d^\vee\ar[r]&f_*\Omega_{X,d}^1\cong \tilde{f}_*\Omega^1_{U,d}. } \end{equation} and the cokernel of the vertical map gives a sublocal system of $\mD_X$. By analogy we call $\mD_W$ this local system but we immediately prove that this construction agrees with the one seen before. We state the following easy Lemma for later reference. \begin{lem} \label{lemma} If we have a commutative diagram of sheaves as follows \begin{equation} \xymatrix{ 0\ar[r]&\sA\ar[r]&\sB\ar[r]&\sC \ar[r]&0\\ 0\ar[r]&\sA'\ar[r]&\sB\ar[r]\ar@{=}[u]&\sC'\ar@{^{(}->}[u]\ar[r]&0 } \end{equation} then $\sA\cong \sA'$, hence also $\sC\cong \sC'$. \end{lem} \begin{proof} It is immediate that we have an injective morphism $\sA'\hookrightarrow \sA$. The cokernel of this map is by commutativity isomorphic to the kernel of $\sC'\hookrightarrow \sC$, hence zero and we have proved the desired result. \end{proof} \begin{prop} \label{stesso} The local system $\mD_W$ coincides with $\mL_\sF$. \end{prop} \begin{proof} We compare the two constructions as follows. Recall that $\mL_{\sF}$ is defined by taking the pushforward via $f$ of the exact Sequence (\ref{uno}) \begin{equation} 0\to \sK^\vee\to \Omega_X^1\to \sF^\vee\to M\to 0 \end{equation} and considering the quotient of ${f}_*\sK^\vee_d$ with kernel $\omega_B$ as in (\ref{kappa}). On the other hand for $\mD_W$ we dualize Sequence (\ref{due}) to obtain \begin{equation}\label{primaoccorrenza} 0\to \sL^\vee\to \Omega^1_{U}\to \tilde{\sF}^\vee\to M'\to 0 \end{equation} and we proceed exactly as before after taking the pushforward via $\tilde{f}$. On $B$ we can compare the two sequences obtained after taking the direct image \begin{equation} \xymatrix{ 0\ar[r]&f_*\sK^\vee\ar[r]&f_*\Omega^1_{X}\ar[r]&f_*\sF^\vee\ar[r]&\dots\\ 0\ar[r]&\tilde{f}_*\sL^\vee\ar[r]&\tilde{f}_*\Omega^1_{U}\ar[r]\ar@{=}[u]&\tilde{f}_*\tilde{\sF}^\vee\ar[r]&\dots } \end{equation} The equality comes from the fact that $e$ is birational and $\Omega^1_X$ and $e_*\Omega^1_U$ coincide on a open subset of $X$ with complement of codimension at least 2, see also \cite[Exercise 5.3 page 419]{H} in the case of surfaces. Similarly we also have a map $\tilde{f}_*\tilde{\sF}^\vee\to f_*\sF^\vee$ as follows. If we consider $A\subset B$ an open subset, every section in $\Gamma(A,\tilde{f}_*\tilde{\sF}^\vee)$ is a section in $\Gamma(\tilde{f}^{-1}(A),\tilde{\sF}^\vee)$. Now $\tilde{\sF}^\vee$ and $\sF^\vee$ coincide on an open subset in $U$ of the form $e^{-1}(X^0)$, where $X^0$ is an open subset with complement of codimension at least 2 in $X$. Hence our section gives a section of $\sF^\vee$ on the intersection $X^0\cap f^{-1}(A)$. Since $\sF^\vee$ is reflexive, this gives a section of $\Gamma(f^{-1}(A),{\sF}^\vee)=\Gamma(A,{f}_*{\sF}^\vee)$ by the Hartogs principle. This map is injective hence by Lemma \ref{lemma} the above diagram can be completed as follows \begin{equation} \xymatrix{ 0\ar[r]&f_*\sK^\vee\ar[r]&f_*\Omega^1_{X}\ar[r]&f_*\sF^\vee\ar[r]&\dots\\ 0\ar[r]&\tilde{f}_*\sL^\vee\ar[r]\ar@{=}[u]&\tilde{f}_*\Omega^1_{U}\ar[r]\ar@{=}[u]&\tilde{f}_*\tilde{\sF}^\vee\ar[r]\ar[u]&\dots } \end{equation} The identity $f_*\sK^\vee=\tilde{f}_*\sL^\vee$ immediately gives the thesis by taking the de Rham closed forms and the quotient of kernel $\omega_B$. \end{proof} \subsection{Foliations defined by local systems} \label{localtofoliation} We can reverse the point of view to obtain a foliation $\sF\subseteq T_{X/B}$ starting from a local system $\mL\leq\mD_{X}$. By the exact Sequence \ref{dx} we obtain the following diagram \begin{equation} \label{ss} \xymatrix{ 0\ar[r]&\omega_B\ar[r]&f_*\Omega^1_{X,d}\ar[r]&\mD_{X}\ar[r]&0\\ 0\ar[r]&\omega_B\ar[r]\ar@{=}[u]&\sS\ar[r]\ar@{^{(}->}[u]&\mL\ar[r]\ar@{^{(}->}[u]&0 } \end{equation} which defines $\sS$ as a subsheaf of $f_*\Omega^1_{X,d}$. From the natural map $f^*f_*\Omega_{X}^1\to \Omega_{X}^1$ we then get the map $\eta\colon f^{-1}\sS\to \Omega_{X,d}^1\hookrightarrow\Omega_{X}^1$. The image sheaf $\sS_f$ is a subsheaf of $\Omega_{X}^1$. Taking the saturation of the subsheaf of $T_{X/B}$ given by the vector fields vanishing on $\sS_f$ we get a foliation $\sF_{\mL}$. More precisely call $\sF'$ \begin{equation} \label{defF} \sF'(U):=\{v\in T_{X/B}(U)\mid \forall x\in U, \exists U_x\text{ with }x\in U_x\subset U\text{ such that } \iota_v s=0 \text{ for every }s\in \sS_f(U_x) \} \end{equation} where as usual $\iota$ denotes the contraction. Note that actually $\sF'$ is a sheaf since it inherits its sheaf properties from $T_{X/B}$. We show that it is closed under Lie bracket. Let $v,w$ be sections of $\sF'$, then $$ \iota_{[v,w]} s = \mathcal{L}_v \iota_w s- \iota_w \mathcal{L}_v s=- \iota_w \mathcal{L}_v s $$ where $\sL$ is the Lie derivative. Since also $$ \mathcal{L}_v s=\iota_v ds+d(\iota_v s)=0 $$ we have the desired result since $s\in \sS_f\subset \Omega^1_{X,d}$ is closed. The saturation $\sF_{\mL}$ of $\sF'$ is then a foliation on $X$. Indeed $\sF_{\mL}$ and $\sF'$ coincide on an open dense subset $X^0$ of $X$. Hence if we take $v\in \Gamma(\sF_{\mL},V)$ a section of $\sF_{\mL}$ on an open subset $V$, then the map given by the Lie bracket $$ [v,-]\colon \sF_{\mL}|_V\to T_X/\sF_{\mL}|_V $$ is zero on $X^0\cap V$. By the very definition of saturation, $T_X/\sF_{\mL}$ is torsion free, this implies that the above map is identically zero, hence $\sF_{\mL}$ is also closed under Lie bracket. Finally we note that $\sF_{\mL}$ is not necessarily an algebraically integrable foliation. \subsection{The Correspondence} By the above constructions we have two maps between the set of foliations contained in $T_{X/B}$ and the set of local systems contained in $\mD_{X}$ \begin{equation} \label{corrisponde} \big\{\text{foliations } \sF\subseteq T_{X/B}\big\}\stackrel[\beta]{\alpha}{\rightleftarrows} \big\{\text{local systems } \mL\leq \mD_{X}\big\} \end{equation} defined by $\alpha(\sF)=\mL_{\sF}$ and $\beta(\mL)=\sF_{\mL}$. These maps are not in general inverse of each other; for example it may very well be that different foliations give the same local system. The following proposition is an example of this and will also be useful later. \begin{prop} \label{uguaglianzalocalsys} Take $\sF\subseteq T_{X/B}\subset T_X$ be an algebraically integrable foliation and $\pi$ the associated map as in Diagram (\ref{diagfol}). If the general fiber $F$ of $\pi$ is regular, that is $h^0(F,\Omega^1_F)=0$, then $\mL_\sF=\mD_X$. \end{prop} \begin{proof}By Proposition \ref{stesso}, we use the interpretation of the local system as $\mD_W$, that is we consider the exact sequence (\ref{primaoccorrenza}) \begin{equation} \label{seqlemma} 0\to \sL^\vee\to \Omega^1_{U}\to \tilde{\sF}^\vee\to M'\to 0. \end{equation} It will be enough to show that $\tilde{f}_*\sL^\vee\cong\tilde{f}_*\Omega^1_{U}$. One inclusion is trivial, to prove the opposite, take a section $s\in \Gamma(A,\tilde{f}_*\Omega^1_{U})$ on an open $A\subset B$. This is of course a section in $\Gamma(\tilde{f}^{-1}(A),\Omega^1_{U})$ and we show that it goes to zero in $\Gamma(\tilde{f}^{-1}(A),\tilde{\sF}^\vee)$. Note that $\tilde{\sF}^\vee$ is the double dual of $\Omega_{U/W}^1$ since Sequence \ref{seqlemma} is obtained by dualizing twice $$ 0\to \pi^*\Omega^1_W\to \Omega^1_U\to \Omega^1_{U/W}\to 0. $$ Hence $\tilde{\sF}^\vee$ is torsion free and, restricted on the general fiber, coincides with the sheaf of 1-forms on such a fiber. The section $s$ restricts to a global 1-form on the general fiber, hence vanishes by hypothesis. Since $\tilde{\sF}^\vee$ is torsion free we conclude that $s$ is zero in $\Gamma(\tilde{f}^{-1}(A),\tilde{\sF}^\vee)$ therefore $s\in \Gamma(A,\tilde{f}_*\sL^\vee)$ and this concludes the proof. \end{proof} In the setting of this Proposition, we immediately see that the local systems $\mL_\sF=\alpha(\sF)$ and $\mL_{0}=\alpha(0)$ are both equal to $\mD_X$ (see Example \ref{casibanali}) even if the foliation $\sF$ is different from the trivial foliation. Nevertheless in the next sections we will give some description of the fixed points of this correspondence, that is foliations $\sF$ with $\beta(\alpha(\sF))=\sF$ and local systems $\mL$ with $\alpha(\beta(\mL))=\mL$. \section{Towers of fibrations} \label{sez4} In this section we consider towers of semistable fibrations over a smooth curve $B$ and we study the relation with local systems $\mL$ and foliations $\sF$. \subsection{2-Towers} Let $X$ and $Y$ be two smooth algebraic varieties of dimension $n$ and $m$ respectively. Let $f\colon X\to B$ be a semistable fibrations and $h\colon X\to Y$, $g\colon Y\to B$ fibrations such that $f=h\circ g$. That is we are considering the following situation \begin{equation} \xymatrix{ X\ar[d]^{h}\ar@/_2pc/[dd]_{f}\\ Y\ar[d]^{g}\\ B } \end{equation} that we call a $2$-Tower. By Section \ref{sez2}, we have the local systems $\mD_X$ and $\mD_Y$ which we recall are defined by the exact sequences \begin{equation} \label{dx1} 0\to \omega_B\to f_*\Omega^1_{X,d}\to \mD_X\to 0. \end{equation} and \begin{equation} \label{dy} 0\to \omega_B\to g_*\Omega^1_{Y,d}\to \mD_Y\to 0 \end{equation} respectively. The first easy relation between $\mD_X$ and $\mD_Y$ is given by the following proposition \begin{prop} In our setting, we have an inclusion of local systems $\mD_Y\leq\mD_X$. \end{prop} \begin{proof} By the inclusion of sheaves $g_*\Omega^1_{Y,d}\subseteq f_*\Omega^1_{X,d}$ given by pullback via $h$, we immediately have the thesis comparing the above sequences (\ref{dx1}) and (\ref{dy}) \begin{equation} \xymatrix{ 0\ar[r]&\omega_B\ar[r]\ar@{=}[d]&f_*\Omega^1_{X,d}\ar[r]&\mD_X\ar[r]&0\\ 0\ar[r]&\omega_B\ar[r]&g_*\Omega^1_{Y,d}\ar[r]\ar@{^{(}->}[u]&\mD_Y\ar[r]\ar@{^{(}->}[u]&0 } \end{equation} \end{proof} Both Sequence (\ref{dx1}) and (\ref{dy}) split by \cite[Lemma 2.2]{RZ4} and the splitting on (\ref{dx1}) can be chosen in agreement with the one on (\ref{dy}). A natural question is determining to what extent the local system $\mD_Y$ recovers the variety $Y$. More precisely, if we have a semistable fibration $f\colon X\to B$ and a local system $\mL\leq\mD_X$, under which hypothesis on $\mL$ and in what sense can we recover the variety $Y$ and the morphisms $g$ and $h$? To approach this problem we need the famous classical theory of Castelnuovo and de Franchis. We briefly recall that this result states that if $S$ is a smooth complex surface with two linearly independent one forms $\eta_1,\eta_2$ such that $\eta_1\wedge\eta_2=0$, then there exists a morphism onto a smooth curve $p\colon S\to C$ and furthermore $\eta_1,\eta_2$ are pullback via $p$ of one forms on $C$. This result has been later generalized for higher dimensional varieties by Catanese \cite[Theorem 1.14]{Ca2} and Ran \cite[Prop II.1]{Ran}. We also have proved a generalization and a relative version of this result in \cite[Theorem 5.6 and Theorem 5.8]{RZ4}. The idea behind all these generalizations is the following. Let $X$ be an $n$-dimensional smooth variety and $\omega_1,\dots, \omega_l \in H^0 (X,\Omega^1_X)$ linearly independent 1-forms such that $\omega_{j_1}\wedge\dots\wedge \omega_{j_{k+1}}= 0$ for every $j_1,\dots,j_{k+1}$ and that no collection of $k$ linearly independent forms in the span of $\omega_1,\dots, \omega_{j_{k+1}}$ wedges to zero. Then there exists a holomorphic map $p\colon X\to Y$ over a $k$-dimensional normal variety $Y$ such that $\omega_i\in p^*H^0(Y,\Omega^1_Y)$. The crucial point in the proof is that these global 1-forms naturally define a foliation on $X$ whose leaves are closed on a good open set $X\setminus \Sigma$, with $\codim \Sigma\geq 2$. Furthermore on the universal covering $X_U\to X$ these forms are exact, $\omega_i=dF_i$, and the functions $F_i$ define a holomorphic map $\phi \colon X_U\to \mC^l$ constant on the leaves of the foliation. The action of the fundamental group of $X$ on the image of $\phi$ is induced by the effect of the deck transformations on the $F_i$ by \begin{equation} \label{action} F_i(\gamma x)=F_i(x)+c_\gamma \end{equation} Hence the action of $\phi(X_U)$ is properly discontinuous and we get $Y$ as the normalization of the quotient. Now note that given our local system $\mD_{X}$, the wedge product naturally gives a map $\bigwedge^{i} \mD_X\to \mD_X^i$ because the wedge of closed forms is a closed form. Considering $\mL\leq\mD_X$ as above we naturally have the restrictions $\bigwedge^{i} \mL\to \mD_X^i$ and inspired by the Castelnuovo-de Franchis result we give the following definition \begin{defn} \label{deflocsys} We say that $\mL\leq\mD_X$ is an {\it{order $m$ Castelnuovo generated}} local system if it is generated under the monodromy action by a vector space $V\leq\Gamma(A,\mL)$ on a contractible open set $A\subset B$ such that $\dim V \geq m+1$, the map $\bigwedge^{m} V\to \mD_X^m$ is zero while $\bigwedge^{m-1} V\to \mD_X^{m-1}$ is injective on decomposable elements. We say that $\mL$ is {\it{of Castelnuovo-type of order $m$}} if $\rank\mL \geq m+1$, $\bigwedge^{m} \mL\to \mD_X^m$ is zero while $\bigwedge^{m-1} \mL\to \mD_X^{m-1}$ is injective on decomposable elements. \end{defn} We will need the following Lemma which has its own interest. \begin{lem} \label{tecnico} If $s_1,\dots,s_{m+1}$ are 1-forms on $X$ such that the wedge product of every $m$-uple is an $m$-form which vanishes when restricted to the fibers of $f$, then the $m+1$-wedge product $s_1\wedge\dots\wedge s_{m+1}$ is zero on $X$. \end{lem} \begin{proof} It is enough to prove this vanishing on a suitable open subset of $X$. Consider local coordinates $x_1,\dots,x_{n-1},t$, with $t$ being the variable on the base $B$. The wedge product $s_1\wedge\dots\wedge s_{m+1}$ is an $m+1$-form locally given by the $m+1\times m+1$ minors of the $m+1\times n$ matrix obtained by the local expression of the $s_i$. The hypothesis that all the possible $m$-wedge products are zero when restricted to the fibers means that all the $m\times m$ minors where $dt$ does not appear are zero. From this it easily follows that all the $m+1\times m+1$ minors are zero. \end{proof} The first result is the following \begin{thm} \label{castel} Let $f\colon X\to B$ be a semistable fibration and $\mL\leq\mD_X$ an order $m$ Castelnuovo generated local system. Then up to a base change $\tilde{f}\colon \widetilde{X}\to \widetilde{B}$ there exist a normal $m$-dimensional complex space $\widetilde{Y}$ and holomorphic maps $\tilde{g}\colon\widetilde{Y}\to\widetilde{B} $ and $\tilde{h}\colon \widetilde{X}\to\widetilde{B}$ such that $\tilde{f}=\tilde{h}\circ \tilde{g}$. \end{thm} \begin{proof} To the local system $\mL$ we associate its monodromy group as follows. The action $\rho$ of the fundamental group $\pi_1(B, b)$ on the stalk of $\mD_X$ restricts to an action $\rho_\mL$ on the stalk of $\mL$. Denote $\ker \rho_\mL$ by $H_\mL$ and consider $\widetilde{B}\to B$ the covering classified by $H_\mL$. Of course $\tilde{f}\colon \widetilde{X}\to \widetilde{B}$ denotes the associated base change. The inverse image of the local system $\mL$ on $\widetilde{B}$ is trivial and so the lifting of the sections of $\mL$, obtained as recalled above by \cite[Lemma 2.2]{RZ4}, are global 1-forms in $\tilde{f}_*\Omega^{1}_{\widetilde{X},d}$, in particular $V\leq H^0(\widetilde{B},\tilde{f}_*\Omega^{1}_{\widetilde{X},d})$. The condition of triviality of $\bigwedge^{m} V\to \mD_X^m$ holds also on $\widetilde{X}$. Hence, as we have seen by Lemma \ref{tecnico}, we have the vanishing of $\bigwedge^{m+1} V\to\tilde{f}_*\Omega^{m+1}_{\widetilde{X},d}$ and we get a set of closed $1$-forms on $\widetilde{X}$ such that every possible $m+1$-wedge is zero. On the other hand, by the hypothesis on $\bigwedge^{m-1} V\to \mD_X^{m-1}$, no collection of $m-1$ linearly independent forms wedges to zero. Hence we have two cases, either no collection of $m$ linearly independent forms wedges to zero or there are at least $m$ of these forms with zero wedge. In the first case by the Castelnuovo-de Franchis Theorem \cite[Theorem 1.14]{Ca2} there exist a foliation on $\widetilde{X}$ defined by the elements of $V$ which gives, as recalled above, a normal $m$-dimensional complex space $\widetilde{Y}$ and a morphism $\tilde{h}\colon \widetilde{X}\to \widetilde{Y}$ such that all these global 1-forms on $\widetilde{X}$ are pullback via $\tilde{h}$ of 1-forms on $\widetilde{Y}$. Note that since the covering may not be finite, $\widetilde{B}$ and $\widetilde{X}$ may not be compact. Hence it is necessary that our 1-forms are closed so that they still define an integrable foliation. To conclude our proof in this case we need to show that $\tilde{f}$ factors via $\tilde{h}$. It is enough to consider the morphism $$ \widetilde{X}\times\widetilde{B}\xrightarrow{\tilde{h}\times\text{id}}\widetilde{Y}\times\widetilde{B}\xrightarrow{p}\widetilde{B} $$ where $p$ is the projection. Then $ \widetilde{X}$ is isomorphic to the incidence variety $I\subset \widetilde{X}\times\widetilde{B}$. Furthermore note that the hypotheses on $\bigwedge^{m} V\to \mD_X^m$ and $\bigwedge^{m-1} V\to \mD_X^{m-1}$ ensure that if $\widetilde{F}$ is a fiber of $\tilde{f}$ then $\tilde{h}(\widetilde{F})$ is exactly a $(m-1)$-dimensional subvariety of $\widetilde{Y}$. In particular for $y\in \tilde{h}(\widetilde{F})$ we have that $\tilde{h}_{|\widetilde{F}}^{-1}(y)\subseteq \tilde{h}^{-1}(y)$ are of the same dimension. Since the fibers of $\tilde{h}$ are connected, we have that $\tilde{h}_{|\widetilde{F}}^{-1}(y)= \tilde{h}^{-1}(y)$ and $\widetilde{Y}$ is isomorphic to the image $J:=(\tilde{h}\times\text{id})(I)$; we define $\tilde{g}$ as the restriction of $p$ to $J$. In the second case, that is when there are at least $m$ forms among the liftings of $V$ with zero wedge, we can also apply the Castelnuovo-de Franchis Theorem \cite[Theorem 1.14]{Ca2}. In this case however we obtain a map $\tilde{h}'$ to a $m-1$-dimensional variety $\widetilde{Z}$. By our hypothesis on $\bigwedge^{m-1} V\to \mD_X^{m-1}$ it immediately follows that the restriction $\tilde{h}'_{|\widetilde{F}}$ is surjective onto $\widetilde{Z}$. Hence we define $\widetilde{Y}$ as the Stein factorization of $\tilde{h}'\times\text{id}$ onto $\widetilde{Z}\times \widetilde{B}$. Note that $\tilde{g}\colon \widetilde{Y}\to \widetilde{B}$ is immediately induced by the projection on $\widetilde{B}$. \end{proof} Clearly the condition on $\bigwedge^{m-1} V\to \mD_{X}^{m-1}$ fixes the dimension of $\widetilde{Y}$ to be exactly $m$. Removing this condition we obtain the following corollary: \begin{cor} Let $f\colon X\to B$ be a semistable fibration and $\mL\leq\mD_X$ a local system generated by $V$ with $\dim V \geq m+1$ and such that the map $\bigwedge^{m} V\to \mD_X^m$ is zero. Then up to a base change $\tilde{f}\colon \widetilde{X}\to \widetilde{B}$ there exist a normal complex space $\widetilde{Y}$ of dimension $\leq m$ and morphisms $\tilde{g}$ and $\tilde{h}$ such that $\tilde{f}=\tilde{h}\circ \tilde{g}$. \end{cor} \begin{rmk} \label{algebrico} As highlighted in the proof of Theorem \ref{castel}, the monodromy group $G_\mL=\pi_1(B,b)/H_\mL$ may not be finite; in this case $ \widetilde{X}, \widetilde{Y},\widetilde{B}$ are not algebraic varieties but only complex spaces. On the other hand note that the fibers $\widetilde{F}$ are algebraic and so are their images $\tilde{h}(\widetilde{F})$ (by \cite[Theorem 5.6]{RZ4} they are actually varieties of general type) and the holomorphic map $ \tilde{h}_{|\widetilde{F}}$ is a morphism in the algebraic category. In particular $T_{\widetilde{X}/\widetilde{Y}}$ is an algebraically integrable foliation. If the monodromy group $G_\mL=\pi_1(B,b)/H_\mL$ is finite then everything is in the algebraic category. \end{rmk} \begin{rmk} Note that if we call as before $\phi \colon X_U\to \mC^l$ the map constant on the leaves of the foliation induced by the elements of the vector space $V$ given in Definition \ref{deflocsys}, we have in principle that only $\pi_1(\widetilde{X})$ acts on the image of $\phi$. Furthermore this action is properly discontinuous. In fact since $V$ is not necessarily closed under the monodromy action, it follows that if $\sigma\in V$ it may happen that $g(\sigma)\notin V$ for some $g\in \pi_1(B)$. So even if it is true that $\sigma=dF$ and $g(\sigma)=dH$ on the universal covering $X_U$, $g(\sigma)$ is not necessarily zero on the foliation and $H$ is not necessarily constant on the leaves. Hence (\ref{action}) becomes \begin{equation} \label{noazione} F(\gamma x)=H(x)+c_\gamma \end{equation} since $d (F\circ\gamma) =d (\gamma^* F)=\gamma^*d F=g(\sigma)=dH$. Equation (\ref{noazione}) does not define an action on the image of $\phi$ for an element $\gamma\in \pi_1({X})$ corresponding to $g$ via $\pi_1(X)\to \pi_1(B)$. On the other hand if $\gamma'\in \pi_1(\widetilde{X})< \pi_1({X})$ corresponds to $g'\in \pi_1(\widetilde{B})$ then the action of $\gamma'$ is properly discontinuous as in the usual case. See Figure \ref{fig1}. \end{rmk} \begin{figure}[h!] \includegraphics[scale=.9]{dis.pdf} \caption{The paths $\gamma$ and $\gamma'$ on, from left to right, $X_U$, $\widetilde{X}$, and $X$.} \label{fig1} \end{figure} \begin{rmk} In the case where $\mL$ is of Castelnuovo type on the other hand this does not happen and we have an action of $\pi_1(X)$ on $\phi(X_U)$, but not necessarily properly discontinuous. Indeed, if $\sigma\in \Gamma(A,\mL)$ is a local section, $g(\sigma)$ vanishes on the foliation and $H$ is constant on the leaves. So if the element $\gamma'$ is in the fundamental group $\pi_1(\widetilde{X})< \pi_1({X})$ then the actions is the same as in (\ref{action}) and it is properly discontinuous. On the other hand, for $\gamma$ not in $\pi_1(\widetilde{X})$, we can only say that it is given by an affinity of $\mC^l$, not necessarily a translation, and it may be not properly discontinuous (this affinity is determined by the matrix of the monodromy action of $g$ on the stalk of the local system $\mL$). \end{rmk} In the case where $\mL$ is of Castelnuovo type we can refine Theorem \ref{castel} as follows. \begin{cor} \label{castel1} Let $f\colon X\to B$ be a semistable fibration and $\mL\leq \mD_X$ a local system of Castelnuovo-type of order $m$. Then up to a base change $\tilde{f}\colon \widetilde{X}\to \widetilde{B}$ there exist a normal $m$-dimensional complex space $\widetilde{Y}$ and holomorphic maps $\tilde{g}$ and $\tilde{h}$ as in Theorem \ref{castel} such that $\tilde{f}=\tilde{h}\circ \tilde{g}$. Furthermore if $\sF_{\mL}$ is the foliation associated to $\mL$ then it is algebraically integrable. \end{cor} \begin{proof} The existence of $\widetilde{Y}$ follows by Theorem \ref{castel} because of course Castelnuovo type implies Castelnuovo generated. Hence we concentrate on the algebraic integrability of $\sF_{\mL}$. If we denote by $p\colon\widetilde{X}\to X$ the covering then we have the inclusion $T_{\widetilde{X}/\widetilde{Y}}\subseteq p^*\sF_{\mL}$ on an open subset. Furthermore in this setting they are foliations of the same rank $n-m$. Hence it is not difficult to see that $T_{\widetilde{X}/\widetilde{Y}}=p^*\sF_{\mL}$. By Remark \ref{algebrico}, $T_{\widetilde{X}/\widetilde{Y}}$ is algebraically integrable, hence $\sF_{\mL}$ is also algebraically integrable. Note that this argument is not true in the setting of Theorem \ref{castel} where $T_{\widetilde{X}/\widetilde{Y}}$ is the foliation given by $V$ and hence $p^*\sF_{\mL}$ can be in principle of rank strictly smaller than $T_{\widetilde{X}/\widetilde{Y}}$. \end{proof} By these results, a local system $\mL$ of Castelnuovo-type makes it possible to reconstruct an intermediate variety up to a covering of the base $B$. On the other hand by Lemma \ref{lasic} we know that also algebraically integrable foliations give a factorization of $f$ up to birational morphism, as in Diagram \ref{diagfol}. Hence we now correlate this two approaches. \begin{thm} \label{dxsx} Let $f\colon X\to B$ be a semistable fibration and $\mL$ a Castelnuovo-type local system. Then $\mL$ gives normal variety $Y'$ and complex spaces $\widetilde{Y}$, and $\widehat{Y}$, all of the same dimension, which fit into the following diagram \begin{equation} \label{diagdxsx} \xymatrix{ \widetilde{X}\ar[d]\ar[r]& X\ar[dd]& X'\ar[d]\ar[l]& \widehat{X}\ar[l]\ar[d]\ar@/_1.5pc/[lll]\\ \widetilde{Y}\ar[d]& & Y'\ar[d]& \widehat{Y}\ar[d]\ar@{-->}[l]\ar@{-->}@/_1.5pc/[lll]\\ \widetilde{B}\ar[r]& B& B\ar@{=}[l]& \widetilde{B}\ar[l]\ar@{=}@/^1.3pc/[lll] } \end{equation} \newline where $\widetilde{B}\to B$ is a covering, $\widehat{X}\to \widetilde{X}$ is a (generically) degree 1 map, $\widehat{Y}\dashrightarrow Y'$ is a dominant meromorphic map and $\widehat{Y}\dashrightarrow \widetilde{Y}$ is a bimeromorphic map. \end{thm} \begin{proof} Denote $\sF:=\sF_\mL$ the foliation associated to $\mL$. Applying Corollary \ref{castel1} we know that $\sF$ is algebraically integrable and that we have a covering $\widetilde{B}\to B$ of $B$ and a normal complex space $\widetilde{Y}$ which factors the base change of $\widetilde{X}\to \widetilde{B}$. We can also apply Lemma \ref{lasic} to the foliation $\sF$ and this will give a birational morphism $X'\to X$ and a variety $Y'$ which factors the fibration $X'\to B$. That is, putting these two together we have the diagram \begin{equation} \label{diagdd} \xymatrix{ \widetilde{X}\ar[d]\ar[r]& X\ar[dd]& X'\ar[d]\ar[l]\\ \widetilde{Y}\ar[d]& & Y'\ar[d]\\ \widetilde{B}\ar[r]& B& B\ar@{=}[l] } \end{equation} Now apply again Theorem \ref{castel} to the same local system $\mL$ but this time looking at the right side of Diagram (\ref{diagdd}). We find again our covering $\widetilde{B}\to B$ and the pullback $\widehat{X}\to \widetilde{B}$ factors via a normal complex space $\widehat{Y}$. \begin{equation} \xymatrix{ \widetilde{X}\ar[d]\ar[r]& X\ar[dd]& X'\ar[d]\ar[l]& \widehat{X}\ar[d]\ar[l]\\ \widetilde{Y}\ar[d]& & Y'\ar[d]& \widehat{Y}\ar[d]\\ \widetilde{B}\ar[r]& B& B\ar@{=}[l]& \widetilde{B}\ar[l] } \end{equation} Noticing that $\widetilde{X}=X\times_B\widetilde{B}$ and $\widehat{X}=X'\times_B\widetilde{B}$, from the birational morphism $X'\to X$ we immediately obtain a generically degree 1 morphism $\widehat{X}\to \widetilde{X}$. Finally since both $\widehat{Y}$ and $\widetilde{Y}$ are obtained by the application of Theorem \ref{castel}, we also have a bimeromorphic map $\widehat{Y}\dashrightarrow \widetilde{Y}$. On the other hand it is clear that we have just a meromorphic map, not necessarily holomorphic, between $\widehat{Y}$ and $Y'$. The following diagram sums up the situation and gives the final result. \begin{equation} \xymatrix{ \widetilde{X}\ar[d]\ar[r]& X\ar[dd]& X'\ar[d]\ar[l]& \widehat{X}\ar[l]\ar[d]\ar@/_1.5pc/[lll]\\ \widetilde{Y}\ar[d]& & Y'\ar[d]& \widehat{Y}\ar[d]\ar@{-->}[l]\ar@{-->}@/_1.5pc/[lll]\\ \widetilde{B}\ar[r]& B& B\ar@{=}[l]& \widetilde{B}\ar[l]\ar@{=}@/^1.3pc/[lll] } \end{equation} \end{proof} Using the same notation as before, note that the map $\widehat{Y}\dashrightarrow Y'$ is basically given by the quotient of the non properly discontinuous action of $\pi_1(X)$ on $\phi(X_U)$. Motivated by this result we give the following definition \begin{defn}\label{casttypef} We say that an algebraically integrable foliation $\sF$ of rank $n-m$ on $X$ is of Castelnuovo-type if the associated local system $\mL_\sF$ is of Castelnuovo-type of order $m$. \end{defn} \subsection{$s$-Towers} We have seen that a 2-Tower $X\to Y\to B$ gives an inclusion of local systems on $B$, $\mD_Y\leq\mD_X$. Viceversa Theorem \ref{castel} and Corollary \ref{castel1} say that an inclusion of local systems $\mL\leq\mD_X$ gives, under suitable hypothesis on $\mL$, an intermediate variety $\widetilde{Y}$ which factors the pullback of the fibration $\widetilde{X}\to \widetilde{B}$. In the same way it is clear that given an $s$-Tower \begin{equation} X\to Y_1\to Y_2\to \dots \to Y_s\to B \end{equation} we have a flag of local subsystems on $B$ \begin{equation} \mD_{Y_s}\leq\dots\leq\mD_{Y_2}\leq\mD_{Y_1}\leq\mD_{X} \end{equation} The following result gives the viceversa \begin{cor} \label{flag} Let $X\to B$ be a semistable fibration and $\mL_{s}<\dots<\mL_{2}<\mL_{1}<\mD_{X}$ a flag of local systems on $B$ such that \begin{equation} \bigwedge^{m_i}\mL_i\to \mD_X^{m_i} \end{equation} is zero for every $i$ while the quotient $\mL_i/\mL_{i+1}$ has rank at least $m_i$ and every $m_i-1$-uple of sections in $\mL_i\setminus\mL_{i+1}$ does not wedge to zero in $\mD_X^{m_i-1}$. Then there exists a covering $\widetilde{B}\to B$ and a factorization of the pullback $\widetilde{X}\to\widetilde{B}$ as \begin{equation} \xymatrix{ &\widetilde{X}\ar[ld]\ar[dd]\ar[ddddrr]&&\\ \widetilde{Y_1}\ar@{-->}[rd]\ar[ddddrr]&&&\\ &\widetilde{Y_2}\ar@{-->}[rd]\ar[dddr]&&\\ &&\ddots\ar@{-->}[rd]&\\ &&&\widetilde{Y_s}\ar[dl]\\ &&\widetilde{B}& } \end{equation} where the $\widetilde{Y_i}$ are normal complex spaces with meromorphic maps between them. \end{cor} \begin{proof} We consider the covering $\widetilde{B}\to B$ which trivializes $\mL_1$, hence all the $\mL_i$. By the hypothesis on the local systems $\mL_i$ we can easily find using Theorem \ref{castel} spaces $\widetilde{Y_i}$ such that $\widetilde{X}\to\widetilde{B}$ factors as $\widetilde{X}\to\widetilde{Y_i}\to\widetilde{B}$. As the last step we note that on the smooth part of every $\widetilde{Y_i}$ we can define a morphism to $\widetilde{Y}_{i+1}$. This comes from the fact that $\mL_{i+i}<\mL_i$, hence over smooth points of $\widetilde{Y_i}$ we have that $T_{\widetilde{X}/\widetilde{Y}_{i}}\subset T_{\widetilde{X}/\widetilde{Y}_{i+1}}$, hence in this open set every fiber of $\widetilde{X}\to \widetilde{Y_i}$ is contained in a fiber of $\widetilde{X}\to\widetilde{Y}_{i+1}$. \end{proof} \section{Foliations-Local systems: on the fixed points of the correspondence.} This section is dedicated to the study of the fixed points of the correspondence highlighted in (\ref{corrisponde}). We show that foliations and local systems of Castelnuovo type are indeed fixed points. \begin{thm} \label{fissi} Let $\mL\leq \mD_X$ and $\sF\subseteq T_{X/B}$ be respectively a local system and an algebraically integrable foliation of Castelnuovo type of order $m$. Furthermore assume that $\mL$ is maximal with the property that $\bigwedge^{m} \mL\to \mD_X^m$ is zero. Then they are fixed points of the correspondence, that is $\alpha(\beta(\mL))=\mL$ and $\beta(\alpha(\sF))=\sF$. \end{thm} \begin{proof} We divide the proof in two parts, first for the local system and then for the foliation. \textit{The local system.} Take $\mL$ as in the statement. First note that the foliation $\beta(\mL)$ is algebraically integrable by Corollary \ref{castel1}. Hence $\beta(\mL)$ gives a diagram \begin{equation} \xymatrix{ U\ar[d]_{e}\ar[r]^{\pi}&W\ar[ddl]\\ X\ar[d]_{f}&\\ B } \end{equation} with $\dim W=m$, as in (\ref{diagfol}) and $\alpha(\beta(\mL))=\mD_W$ using the notation of Section \ref{sez3}. Hence by the maximality hypothesis it will be enough to show that $\mL\leq\mD_W$. Now recall that $\mL$ and $\mD_W$ by definition fit into the exact sequences \begin{equation} \xymatrix{ 0\ar[r]&\omega_B\ar[r]&\tilde{f}_*\sL^\vee_{d}\ar[r]&\mD_{W}\ar[r]&0\\ 0\ar[r]&\omega_B\ar[r]\ar@{=}[u]&\sS\ar[r]&\mL\ar[r]&0 } \end{equation} $\tilde{f}=f\circ e$, see Diagrams (\ref{ll}) and (\ref{ss}). So our problem is reduced to showing that $\sS\subseteq \tilde{f}_*\sL^\vee_{d}\subseteq f_*\Omega_{X,d}^1$. It is not difficult to see that this is true since the foliation $\beta(\mL)$ is defined via (\ref{defF}), coincides with $T_{U/W}$ on an open set $X^0\subset X$ such that ${\rm{codim}}(X\setminus X_0)\geq 2$, and $\sL$ is \begin{equation} 0\to T_{U/W}\to T_U\to \sL\to 0 \end{equation} see the Sequence (\ref{due}). \textit{The foliation.} Take $\sF$ as in the statement. Once again we know that by definition $\alpha(\sF)=\mD_{W}$ hence $\beta(\alpha(\sF))$ is obtained as in (\ref{defF}) with $\sS=\tilde{f}_*\sL^\vee_{d}$. It immediately follows that $\sF\subseteq \beta(\alpha(\sF))$ on a good open subset hence they coincide since they are foliations of the same rank $n-m$. \end{proof} \begin{rmk} Note that by the above Theorem \ref{fissi}, Theorem \ref{dxsx} can also be stated in terms of foliations; that is if $\sF$ is a foliation of Castelnuovo-type then it uniquely corresponds to the local system $\mL_{\sF}$ and they give the varieties $Y',\widetilde{Y},\widehat{Y}$ as in Diagram (\ref{diagdxsx}). \end{rmk} \begin{rmk}Note that viceversa it is not true that a local system $\mL$ with $\alpha(\beta(\mL))=\mL$, and minimal with this property, is of Castelnuovo type, or even Castelnuovo generated. In fact even assuming that $\sF_{\mL}$ is algebraically integrable, it follows that $\alpha(\beta(\mL))=\alpha(\sF_{\mL})=\mD_W$ with the same notation of the previous proof. Hence if we assume that $\mL$ is a fixed point then we certainly have $\mL=\mD_{W}$. Now denote by $m$ the dimension of the variety $W$. Hence $\bigwedge^{m} \mL\to \mD_X^m$ is zero by dimensional reasons but we can not be sure that $\bigwedge^{m-1}\mL\to \mD_X^{m-1}$ is injective on decomposable elements. Of course there exists $V'<\Gamma(A,\mL)$ and an integer $m'<m$ such that $\bigwedge^{m'} V'\to \mD_X^m$ is zero and $\bigwedge^{m'-1} V'\to \mD_X^{m-1}$ is injective on decomposable elements. But the local system generated by $V'$ might not be a fixed point because $\dim V'$ is not necessarily $\geq m'+1$ hence it is not of Castelnuovo type (not even Castelnuovo generated) and Theorem \ref{fissi} does not apply (the associated foliation may be not algebraically integrable). \end{rmk} \begin{prob} For the viceversa, find conditions that imply the algebraicity of $\sF_{\mL}$ at least for those $\mL$ such that $\alpha(\beta(\mL))=\mL$. We plan to study this problem in the next future. \end{prob} \section{MRC fibrations and Castelnuovo-type local systems} \label{sez5} In this section we use local systems and foliations side by side to study some problems on Harder-Narasimhan filtrations and rational connectedness. We recall the key definitions and ideas. Let $X$ be a smooth projective variety and $\alpha$ an ample class. Given a coherent torsion-free sheaf $\sE$ on X, the slope $\mu(\sE)$ with respect to the class $\alpha$ is defined as the ratio $$ \mu(\sE)=\frac{\deg \sE}{\rank \sE} $$ where $\deg\sE=\sE\cdot \alpha^{n-1}.$ A torsion-free sheaf $\sE$ is said to be $\mu$–stable (respectively, $\mu$–semistable) if $\mu(\sF) < \mu(\sE)$ (respectively, $\mu(\sF) \leq \mu(\sE)$) for every proper coherent torsion-free subsheaf $\sF \subset \sE$. We recall the well known Harder-Narasimhan filtration of $\sE$ \begin{thm} Let X be a smooth projective variety and let $\sE$ be a torsion-free coherent sheaf of positive rank on $X$. Then there exists a unique Harder-Narasimhan filtration, that is, a filtration $0= \sE_0 \subsetneq \sE_1 \subsetneq\dots\subsetneq \sE_r =\sE$ where each quotient $\sQ_i := \sE_i/\sE_{i-1}$ is torsion-free, $\mu$-semistable, and where the sequence of slopes $\mu(\sQ_i)$ is strictly decreasing. \end{thm} We recall that a variety is rationally connected if every two general points can be connected by a rational curve. It is well known that if we consider the Harder-Narasimhan filtration of the tangent sheaf $T_X$, $$ 0= \sF_0 \subsetneq \sF_1 \subsetneq\dots\subsetneq \sF_r =T_X $$ and we set $$ i_{\max}=\max\{0<i<k\mid \mu(\sQ_i)>0\}\cup \{0\} $$ if we assume that $i_{\max} >0$ then for every index $0< i\leq i_{\max} $, $\sF_i$ is an algebraically integrable foliation with rationally connected general leaf. In particular we have the following diagram of rational maps \begin{equation} \label{hnmrc} \xymatrix{ &X\ar@{-->}^{\pi_1}[ld]\ar@{-->}^{\pi_2}[dd]\ar@{-->}^{\pi_{i_{\max }}}[ddddrr]&&\\ W_1\ar@{-->}[rd]&&&\\ &W_2\ar@{-->}[rd]&&\\ &&\ddots\ar@{-->}[rd]&\\ &&&W_{i_{\max}} } \end{equation} This means that the closure of the general fiber of the map $\pi_i$ is a rationally connected variety and it makes sense to study when $\pi_{i_{\max}}$ is a \emph{maximal} rationally connected fibration. This problem is posed, for example, in \cite{K}. The definition of maximal rationally connected fibration (usually abbreviated by MRC) is the following. \begin{defn}\label{definitionMRC} A dominant rational map $X\dashrightarrow Z$ is MRC if there exist an open subset $X_0$ of $X$ and an open subset $Z_0$ of $Z$ such that \begin{itemize} \item[a)] the induced map $X_0\to Z_0$ is a proper morphism, \item[b)] a general fiber is irreducible and rationally connected, \item[c)] all rational curves which meet a general fiber $F$ are contained in $F$ \end{itemize} \end{defn} It is known that given a variety $X$, then a MRC fibration $X\dashrightarrow Z$ always exists and it is unique up to birational morphism, see for example \cite[Theorem 4.8]{L}. We also have a notion of relative MRC fibration as follows. \begin{defn} Let $f \colon X \to Y$ be a morphism between smooth projective varieties. A relative MRC fibration is a dominant rational map $ \pi\colon X\dashrightarrow Z$ to a smooth projective variety $Z$ over $ Y$ such that for a general point $y\in Y$, the induced map $\pi_y\colon X_y \dashrightarrow Z_y$ is an MRC fibration. \end{defn} This also always exists, see \cite[Theorem 4.20]{L}. In our relative setting $X\to B$ note that, since $B$ is a curve, the two notions coincide if the genus of $B$ is $g(B)>0$. We consider the Harder-Narasimhan filtration of the relative tangent sheaf $T_{X/B}$ $$ 0= \sF_0 \subsetneq \sF_1 \subsetneq\dots\subsetneq \sF_r =T_{X/B} $$ The $W_i$ constructed as above in Diagram (\ref{hnmrc}), now fit a diagram as follows \begin{equation} \label{hnmrcB} \xymatrix{ X\ar@{-->}_{\pi_1}[rd]\ar[dddd]\ar@{-->}^{\pi_{i_{\max}}}@/^1.5pc/[dddrrr]&&&\\ &W_1\ar@{-->}[rd]\ar[dddl]&&\\ &&\ddots\ar@{-->}[rd]&\\ &&&W_{i_{\max}}\ar[dlll]\\ B } \end{equation} While in general it is not known when $\pi_{i_{\max}}$ is MRC fibration, in our case, helped by the study of the local systems, we can prove the following \begin{thm} \label{mrc} Let $f\colon X\to B$ be a semistable fibration and $$ 0= \sF_0 \subsetneq \sF_1 \subsetneq\dots\subsetneq \sF_r =T_{X/B} $$ the Harder-Narasimhan filtration of the relative tangent sheaf of $f$ determined by a suitable polarization. Assume that $\mD_{X}$ is of Castelnuovo-type of order $m$. If the foliation $\sF_{i_{\max}}$ has generic rank $n-m$, then $X\dashrightarrow W_{i_{\max}}$ is the relative MRC fibration of $f$. Furthermore if $B$ is not $\mP^1$, $X\dashrightarrow W_{i_{\max}}$ is the MRC fibration of $X$. \end{thm} \begin{proof} We know that the relative MRC fibration $X\dashrightarrow W$ exists. By the universal property we have a map $W_{i_{\max}}\dashrightarrow W$ and we want to see that it is birational. Call as before $\sF_{i_{\max}}$ and $\sF$ the foliations associated to $X\dashrightarrow W_{i_{\max}}$ and $X\dashrightarrow W$ respectively. Since the general fiber of these maps is rationally connected, in particular it is regular, hence by Proposition \ref{uguaglianzalocalsys}, we easily deduce that the associated local systems coincide, that is $$ \mD_{X}=\mL_\sF=\mL_{\sF_{i_{\max}}}. $$ By the hypothesis on the rank of $\sF_{i_{\max}}$ we know that $\dim W_{i_{\max}}=m$. On the other hand we have just proved that $\mD_{X}=\mL_\sF$, hence we can apply Theorem \ref{dxsx} to this local system. Note that $W$ plays the role of $Y'$ in this theorem, hence we obtain a meromorphic map $\widehat{W}\dashrightarrow W$ with $\dim W=\dim\widehat{W}$ and a bimeromorphic map $\widehat{W}\dashrightarrow \widetilde{W}$ where $\widetilde{W}$ by Theorem \ref{castel} has dimension $m$. This shows that $\dim W= \dim W_{i_{\max}}$ and this concludes the proof. Alternatively note that $\sF_{i_{\max}}$ is of Castelnuovo type according to Definition \ref{casttypef}, hence by Theorem \ref{fissi} it is a fixed points of the correspondence. We conclude that $\sF\subseteq \beta(\alpha(\sF))=\beta(\mD_{X}) =\beta(\alpha(\sF_{i_{\max}}))=\sF_{i_{\max}}$. From the rational map $W_{i_{\max}}\dashrightarrow W$ we also have the opposite inclusion hence $\sF_{i_{\max}}=\sF$ and $W_{i_{\max}}$ is birational to $W$. \end{proof} \section{A Castelnuovo-de Franchis theorem for $p$-forms} \label{sez6} The generalizations of the Castelnuovo-de Franchis theorem mentioned in the previous sections only consider $1$-forms, in this section we study a more general case that will involve the study of $p$ forms. \subsection{New notion of strictness and Castelnuovo-de Franchis theorem for $p$-forms} We need a new setup. Let $X$ be an $n$-dimensional smooth variety and $\omega_1,\dots, \omega_l \in H^0 (X,\Omega^p_X)$, $l\geq p+1$, linearly independent $p$-forms such that $\omega_i\wedge\omega_j=0$ (as an element of $\bigwedge^2\Omega^p_X$) for any choice of $i,j$. If this is the case then obviously the $\omega_i$ generate a subsheaf of $\Omega^p_X$ generically of rank 1; more concretely the quotients $\omega_i/\omega_j$ define global meromorphic functions on $X$. By taking the differential $d (\omega_i/\omega_j)$ we then get global meromorphic 1-forms on $X$ and the conditions we are interested in involves these 1-forms as follows. \begin{defn}\label{definizionestrict} We say that the set $\{\omega_1,\dots, \omega_l\}$ as above is $p$-strict if there are $p$ of these meromorphic differential forms $d (\omega_i/\omega_j)$ that do not wedge to zero. We say that the vector subspace generated by $\{\omega_1,\dots, \omega_l\}$ is a $p$-strict vector subspace. \end{defn} The choice of the term strict in this definition comes from the fact that this condition is analogous to the strictness condition considered in \cite[Definition 2.1 and 2.2]{Ca2}, \cite[Definition 4.4]{RZ4}. \begin{thm} \label{cast2} Let $X$ be an $n$-dimensional smooth variety and let $\{\omega_1,\dots, \omega_l \}\subset H^0 (X,\Omega^p_X)$ be a $p$-strict set. Then there exists a rational map $f\colon X\dashrightarrow Y$ over a $p$-dimensional smooth variety $Y$ such that the $\omega_i$ are pullback of some meromorphic $p$-forms $\eta_i$ on $Y$, $\omega_i=f^*\eta_i$, where $i=1,\cdots , l$. \end{thm} \begin{proof} Each $p$-form $\omega_i$ determines by contraction a homomorphism $$ T_X\to \Omega^{p-1}_X $$ and we denote by $\sF'$ the intersections of these kernels. Since the forms $\omega_i$ are global holomorphic forms hence closed, $\sF'$ is closed under Lie bracket. As we showed in Subsection \ref{localtofoliation}, the saturation $\sF$ of $\sF'$ is a foliation and it turns out to be reflexive (saturated inside reflexive is reflexive), and locally free outside a set $S$ of codimension at least 2. We consider then $\mC(\sF)$ the field of meromorphic functions on $X$ which are constant on the leaves of $\sF$. We take a smooth birational model $Y$ for $\mC(\sF)$. From $\mC(\sF)\subset \mC(X)$ we get a rational morphism $$ f\colon X\dashrightarrow Y. $$ Since $\sF$ is generically of rank $n-p$, $\dim Y\leq p$; we will prove that the dimension of $Y$ is exactly $p$. We consider the good open set $U$ where $f$ is a holomorphic submersion and not all the $\omega_i$ vanish. By our hypothesis of strictness of the $\omega_i$, there exists a point $x\in U$ and $p$ meromorphic functions $g_i$ defined as $$ g_i=\omega_{s_i}/\omega_{t_i} $$ for $i=1,\dots,p$ and certain $\omega_{s_i}, \omega_{t_i}$ such that $$ dg_1\wedge dg_2\wedge\dots\wedge dg_p\neq 0 $$ at $x$. Now consider, without loss of generality, the condition $g_1=\omega_{s_1}/\omega_{t_1}$, which written as $\omega_{t_1}g_1=\omega_{s_1}$ gives $$ \omega_{t_1}\wedge dg_1=0 $$ by the fact that the $\omega_i$ are all closed. This, together with the hypothesis that $\omega_i\wedge\omega_j=0$ for every $i,j$, implies that on a suitable open subset the meromorphic section of $\bigwedge^2\Omega^p_X$ given by $$ \omega_{t_1}\bigwedge dg_1\wedge dg_2\wedge\dots\wedge dg_p=0 $$ is zero (here of course we use that $dg_1\wedge dg_2\wedge\dots\wedge dg_p\neq0$). From $\omega_i\wedge\omega_j=0$ it follows that, again on an appropriate open subset of $X$, $$ \omega_i=f_idg_1\wedge dg_2\wedge\dots\wedge dg_p $$ for $i=1,\dots,l$. Since the forms $dg_1\wedge\dots\wedge \widehat{dg_j}\wedge\dots\wedge dg_p$ are independent, the 1-forms $dg_i$ vanish on the elements of $\sF$, hence the $g_i$ are locally constant on the leaves of the foliation. By the fact that the $g_i$ are global meromorphic it follows that they are also constant on the closure of the leaves and hence they are elements $g_i\in \mC(\sF)$. The same is true for the $f_i$, since $d\omega_i=0$, hence $f_i\in \mC(\sF)$. This exactly means that the forms $\omega_i$ are pullback of meromorphic forms on $Y$ and since the wedge $dg_1\wedge dg_2\wedge\dots\wedge dg_p$ is not zero, we have that $\dim Y=p$ and the leaves are closed on a good open subset of $X$. \end{proof} \begin{rmk} We point out the main difference with the classical case of 1-forms, that is when $p=1$. In this case, if $\omega$ is a holomorphic 1-form on $X$, pullback on a suitable open subset of a meromorphic 1-form $\eta$ on $Y$, by a local computation it turns out that $\eta$ must also be holomorphic, as the pullback can not get rid of the poles of $\eta$. In the general case however, that is $p\geq2$, it may very well happen that the pullback of a meromorphic form on $Y$ is holomorphic on $X$, as the following local example easily shows. Let consider $p=2$ and a map locally given in coordinates $x_1,x_2,x_3$ by $$ f(x_1,x_2,x_3)=(x_1,x_1x_2). $$ Denoting $y_1=x_1$ and $y_2=x_1x_2$ it immediately follows that the pullback of the meromorphic form $\frac{1}{y_1}dy_1\wedge dy_2$ is holomorphic: $$ f^*\frac{1}{y_1}dy_1\wedge dy_2=dx_1\wedge dx_2. $$ \end{rmk} Of course in some cases it may still very well be that $f\colon X\dashrightarrow Y$ is actually a morphism. Denote by $\sL\hookrightarrow \Omega_X^p$ the subsheaf generated by the $p$-forms $\omega_i$. Under the hypotheses of the Castelnuovo theorem \ref{cast2} we have that $\sL$ has generic rank 1. Its double dual $\sL^{\vee\vee}$ is also of generic rank 1 but it is also reflexive, therefore it is an invertible sheaf by \cite[Proposition 1.9]{har}. \subsection{Application to the Iitaka Fibration} We show that under suitable hypothesis, the map given by the Castelnuovo Theorem \ref{cast2} is the Iitaka fibration. We start with a lemma which has its own interest. \begin{lem} \label{lemmasuL} If the p-forms $\omega_i$ satisfy the hypothesis of Theorem \ref{cast2} and furthermore they globally generate $\sL$, then $f\colon X\to Y$ is a holomorphic map onto a normal p-dimensional variety and furthermore the $\omega_i$ are pullback of global top forms on $Y$. \end{lem} \begin{proof} The foliation $\sF$ defined as in the proof of Theorem \ref{cast2}, that is as the kernel of these sections, is locally free. Furthermore the normal sheaf $N$ to the foliation given by \begin{equation} 0\to \sF\to T_X\to N\to 0, \end{equation} is also locally free. In this setting it is well known that the foliation is induced by a morphism $f\colon X\to Y$ onto a normal variety $Y$, for example see: \cite[Section 6]{Dr} and the therein quoted bibliography. We have the following diagram \begin{equation} \xymatrix{ 0\ar[r]&\sF\ar@{=}[d]\ar[r]&T_X\ar@{=}[d]\ar[r]&N\ar@{^{(}->}[d]\ar[r]&0\\ 0\ar[r]&T_{X/Y}\ar[r]&T_X\ar[r]&f^*T_Y\ar[r]&K\ar[r]&0 } \end{equation} In particular in the exact sequence \begin{equation} 0\to N\to f^*T_Y\to K\to 0 \end{equation} $K$ is supported on a locus of codimension at least 2, by our hypothesis on the $\omega_i$'s. By dualisation it follows that: $$ \det N^\vee\cong f^*\omega_Y^{\vee\vee}. $$ Now it is not difficult to see that $\det N^\vee\cong \sL$. In fact the $\omega_i$'s, which generate $\sL$, are of course in $\det N^\vee$. Viceversa. Every section of $\det N^\vee$ vanishes on $\sF$, hence it is in the generated of the $\omega_i$'s, again by the fact that $\sF$ is defined as the kernel of these sections. Moreover for every point $x$ of $X$ there is at least one $\omega_i$ which is non zero at $x$. Hence we have $$ \sL\cong\det N^\vee\cong f^*\omega_Y^{\vee\vee}. $$ We consider the spaces of global sections and by the projection formula we have: $$ H^0(X,\sL)=H^0(X,f^*\omega_Y^{\vee\vee})=H^0(Y,\omega_Y^{\vee\vee})=H^0(Y,\omega_Y) $$ Hence the $\omega_i$ are pullback of top forms of $Y$. \end{proof} \begin{thm}\label{Iitakauno} Let $X$ be a smooth variety of dimension $n$. Assume that $\sL$ is globally generated by a $p$-strict subset. If the Kodaira dimension $\Kod X=\Kod \sL=p<n$ then the Stein factorization of $\varphi_{|\sL|}\colon X\to \mP(H^0(X,\sL))$ induces the $K_X$-Iitaka fibration. \end{thm} \begin{proof} By the previous Corollary and by \cite[Theorem 2.1.33]{Laz} we have the diagram \begin{equation} \xymatrix{ &Y\ar[dl]&X\ar_{\phi_k}@{-->}[dd]\ar^{f}[l]&X_\infty\ar_{\phi_\infty}[dd]\ar^{u_\infty}[l]\ar_{f_\infty}@/_1.4pc/[ll]\\ \mP(H^0(X,\sL))&&&\\ &&Z_k&Z_\infty\ar_{\nu_k}[l] } \end{equation} where $\phi_k$ is the map given by $|K_X|$ and $\phi_\infty$ is the $K_X$-Iitaka fibration. By the previous Lemma \ref{lemmasuL}, we know that $\sL=f^*\omega_Y^{\vee\vee}$ hence $\Kod Y=\Kod\sL=p$ and $Y$ is of general type. The very general fiber $X_z$ of $\phi_\infty$ is of Kodaira dimension 0 and we obtain a morphism $$ f_\infty|_{X_z}\colon X_z\to Y $$ between a variety of Kodaira dimension 0 to a variety of general type. There are two cases. Assume $\dim f_\infty(X_z)>0$. Then $f_\infty(X_z)$ must be a variety of general type if $z$ is a general point in $Z_\infty$. This is a contradiction since $\Kod X_z=0$. Hence $f_\infty(X_z)$ is zero dimensional for general $z$ is in $Z_\infty$. Since the general fiber of both $f_\infty$ and $\phi_\infty$ are irreducible we obtain a map $Z_\infty\to Y$, see the Rigidity Lemma in \cite[Lemma 1.6]{KM}. By the unicity of the Iitaka fibration up to birational modification we obtain \begin{equation} \xymatrix{ &Y&X\ar_{\phi_k}@{-->}[d]\ar^{f}[l]&X_\infty\ar_{\phi_\infty}[d]\ar^{u_\infty}[l]\ar_{f_\infty}@/_1.4pc/[ll]\\ &&Z_k&Z_\infty\ar_{\nu_k}[l]\ar@/^2.5pc/@{-->}[ull] } \end{equation} hence $f_\infty\colon X_\infty \to Y$ is the Iitaka fibration. \end{proof} Before stating our last theorem we point out the reader that it is only conjectural since in order it to be true we need to assume the following: \begin{enumerate} \item let $X$ be a nonsingular projective variety over $Z$. If $K_X$ is pseudoeffective$/Z$ then $X/Z$ has a minimal model. Otherwise it has a Mori fiber space$/ Z$. \item Let $U/Z$ be a normal projective variety with terminal singularities and $\mathbb Q$-Cartier canonical divisor. If $K_U$ is nef$/Z$ then it is semiample, that is it is the pull-back of a divisor that is ample$/Z$ \end{enumerate} The two assumptions put together is referred as the good minimal model conjecture; c.f.see \cite{T}. \begin{thm}\label{Iitakadue} Let $X$ be a smooth variety of dimension $n$. Let $\{\omega_1,\dots, \omega_m\} \subset H^0 (X,\Omega^p_X)$ be a $p$-strict subset and let $\sL\hookrightarrow \Omega_X^p$ be the sub-line bundle generically generated by them. Assume that: \begin{enumerate} \item $\bigoplus H^0(X, sL)$ is a finitely generated $\mathbb C$-algebra, where $\sL=\sO_X(L)$; \item $0\leq\Kod X=\Kod \sL=p<n$; \item $aK_X-bL$ is nef where $a,b\in\mathbb N_{>0}$. \end{enumerate} If the good minimal model conjecture holds for terminal projective varieties with zero Kodaira dimension up to dimension $n-p$ then the Iitaka fibration of $\sL$ is the Iitaka fibration. \end{thm} \begin{proof} Let $Y:={\rm{Proj}} \bigoplus_{n\in\mathbb N}H^0(X,nL)$. By \cite[Chapter III Section 1.2]{B} we can assume that there exists $l\in\mathbb N$ such that $\bigoplus_{n\in\mathbb N}H^0(X,nL)$ is generated by $H^0(X,lL)$ and so the natural map $\phi\colon X\dashrightarrow Y$ is induced by $\phi_{\mid lL\mid}\colon X\dashrightarrow\mathbb P(H^0(X,lL)^\vee)$. Since $aK_X-bL$ is nef then $l\cdot(aK_X-bL)$ is nef. Hence, up to consider $l\cdot bL$ instead of $L$, from now on, we can assume that there exists $r\in\mathbb N$ such that $rK_X-L$ is nef and that $H^0(X,L)$ generates $\bigoplus_{n\in\mathbb N}H^0(X,nL)$. By unicity up to birational modifications of the Iitaka fibration we obtain that the rational map $f=\colon X\dashrightarrow Y\hookrightarrow \mathbb P(H^0(X,lL)^\vee)$ induced by $\phi_{\mid lL\mid}$ is birationally equivalent to a fixed algebraic fiber space $\phi_\infty\colon X_\infty\to Y_\infty$ where $X_\infty$ is smooth, $Y_\infty$ is normal and as above we obtain \begin{equation} \xymatrix{ &&X\ar_{f}@{-->}[d]&X_\infty\ar_{\phi_\infty}[d]\ar^{u_\infty}[l]\\ &&Y&Y_\infty\ar_{\nu_k}[l] } \end{equation} \noindent where $\phi_\infty\colon X_\infty \to Y_\infty$ is the Iitaka fibration for $L$. By construction $u_\infty^{*}(rK_X-L)$ is nef. We set $L_\infty:=u_\infty^{*}L$ and also we set: $$ \mid L_\infty\mid=\mid M^L_\infty\mid + F_L $$ where $F_L$ is the fixed part of $\mid L_\infty\mid$, $M^L_\infty$ is b.p.f., and it is the pull-back of a divisor from $Y_\infty$. Since the Iitaka fibration is up to birational equivalence we can also set $$ \mid u_\infty^{*}(rK_X)\mid=\mid M\mid +F_{K_X} $$ where $\mid M\mid$ is b.p.f. and $F_{K_X}$ is the fixed part of $\mid u_\infty^{*}(rK_X)\mid$. Up to repeating the first step for a sufficiently high multiple $l\cdot(u_\infty^{*}(rK_X)-L_\infty)$ we can assume that $u_\infty^{*}(rK_X)-L_\infty$ is nef and that the morphism $\phi_{\mid M\mid}\colon X_\infty\to \mathbb P(H^0(X,M)^\vee)$ is factorised through a normal variety $Z_r\hookrightarrow \mathbb P(H^0(X,M)^\vee)$ via a morphism $\psi_r\colon X_\infty\to Z_r$ which has connected fiber of dimension $n-p$ and such that there exists the following commutative diagram \begin{equation} \xymatrix{ &Y_\infty&X_\infty\ar_{\psi_r}@{-->}[d]\ar^{\phi_\infty}[l]&X^1_\infty\ar_{\psi^1_\infty}[d]\ar^{v_\infty}[l]\ar_{f_\infty}@/_1.4pc/[ll]\\ &&Z_r&Z_\infty\ar_{\nu_r}[l]\ar@/^2.5pc/@{-->}[ull] } \end{equation} where $\psi^1_\infty\colon X^1_\infty \to Z_\infty$ is the Iitaka fibration of $u_\infty^*(K_X)$, or, which is the same, that it is also the Iitaka fibration of $K_{X^1_\infty}$. In particular we can also assume $X^1_\infty $ and $Z_\infty$ to be smooth. We claim that $v_\infty^*(u_\infty^{*}(rK_X)-M^L_\infty))$ is nef on the general movable curve $C_\infty$ contained inside the general fiber of $\psi^1_\infty\colon X^1_\infty \to Z_\infty$. Indeed if $$ v_\infty^*(u_\infty^{*}(rK_X)-M^L_\infty))\cdot C_\infty<0 $$ then letting $C:=v_{\infty_{*}}C_\infty$ it holds that $$ (u_\infty^{*}(rK_X)-M^L_\infty))\cdot C<0 $$ On the other hand $$ (u_\infty^{*}(rK_X)-L_\infty))\cdot C\geq 0 $$ then $$ -F_L\cdot C>0 $$ This means that $F_L\cdot C<0$, that is $C$ is inside the support of $F_L$: a contradiction. By the same proof it also holds that: \begin{equation}\label{stessaprova} (rK_{X^1_\infty}-v_\infty^* M^L_\infty)\cdot C_\infty\geq 0 \end{equation} By hypothesis the fibers of $\psi^1_\infty$ have dimension $n-p$ and by construction of the Iitaka fibration the general one has Kodaira dimension $0$. Since we have assumed that the good minimal model conjecture holds for terminal projective varieties with zero Kodaira dimension up to dimension $n-p$, we can run a MMP over $Z_\infty$ in order to find a terminal variety $X^2$ and a birational map $\tau^{-1}\colon X^1_\infty\dashrightarrow X^2$ such that \begin{equation} \xymatrix{ &&X^1_\infty\ar_{\psi^1_\infty}[d]&X^2\ar_{\psi^2}[d]\ar^{\tau}@{-->}[l]\\ &&Z_\infty&Z_\infty\ar_{=}[l] } \end{equation} and $K_{X^2}$ is $\psi^2$-nef. Moreover due to the fact that $\psi_\infty\colon X^1_\infty \to Z_\infty$ is up to birational equivalence we can assume that $\tau^{-1}\colon X^1_\infty\dashrightarrow X^2$ is actually a birational morphism. We stress that $\psi^2\colon X^2\to Z_\infty$ is a surjective morphism with connected fibers between normal projective varieties. Moreover $K_{X^2}$ is a $\psi^2$-nef $\mathbb Q$-Cartier divisor. Since we have assumed that the abundance conjecture holds for varieties of vanishing Kodaira dimension up to dimension $n-p$ we find that the canonical of the general fiber is torsion. By \cite[Lemma 3.4]{T} there exist birational morphisms $\rho\colon X^2_\infty\to X^2$, $\pi\colon Z^2_\infty\to Z_\infty$, a $\mathbb Q$-Cartier divisor $G$ in $Z^2_\infty$ and an equidimensional morphism $\psi^2_\infty \colon X^2_\infty\to Z^2_\infty$ such that $\rho^*(K_{X^2})=(\psi^2_\infty)^*(G)$. We build the following commutative diagram: \begin{equation}\label{grandediagramma} \xymatrix{ Y&X\ar@{-->}[l]&&X^1_\infty\ar@{=}[ld]\ar^{\tau^{-1}}[d]&X^3_\infty\ar^{\mu}[d]\ar_{\delta}[l]\\ Y_\infty\ar[u]&X_\infty\ar_-{\psi_r}@{-->}[d]\ar^{u_\infty}[u]\ar^{\phi_\infty}[l]&X^1_\infty\ar_-{\nu_\infty}[l]\ar^{\psi^1_\infty}[d]&X^2\ar_-{\tau}@{-->}[l]\ar^-{\psi^2}[dl]&X^2_\infty\ar^{\rho}[l]\ar^{\psi^2_\infty}[d]\\ &Z^1&Z_\infty\ar[l]&&Z^2_\infty\ar^{\pi}[ll] } \end{equation} where we have resolved $\phi_\infty \circ v_\infty \circ\tau\circ\rho\colon X^2_\infty \dashrightarrow Y_\infty$ and again by the fact that Iitaka construction is up to a birational maps we can assume that there exist birational morphisms $\delta\colon X^3_\infty\to X^1_\infty$, $\mu\colon X^3_\infty\to X^2_\infty$ such that \begin{equation}\label{prestocommuta} \xymatrix{ &&X^1_\infty\ar_{\tau^{-1}}[d]&X^3_\infty\ar_{\mu}[d]\ar^{\delta}[l]\\ &&X^2&X^2_\infty\ar_{\rho}[l] } \end{equation} is commutative and $f_\infty=\phi_\infty\circ\nu_\infty\circ\delta\colon X^3_\infty\to Y_\infty$ is a morphism with connected fiber. Now a generic movable curve $C_\infty$ inside the general fiber of $\psi^1_\infty\colon X^1_\infty \to Z_\infty$ is transformed in a generic movable curve $C^2_\infty$ inside the general fiber of $\psi^2_\infty\colon X^2_\infty \to Z^2_\infty$. We set: \begin{equation}\label{semipresto}C_\infty=(\tau^{-1})^{*}C^{(2)} \end{equation} where $C^{(2)}$ is a generic movable curve inside the general fiber of $\psi^2\colon X^2 \to Z_\infty$. By construction $K_{X^2}=\tau^{-1}_{*}K_{X^1_\infty}$ since $X^2$ is terminal and $\tau^{-1}$ is a morphism. Hence: \begin{equation}\label{presto} (rK_{X^2}-(\tau^{-1})_{*}v_\infty^* M^L_\infty )\cdot C^{(2)}=(\tau^{-1}_{*}(rK_{X^1_\infty}-v_\infty^* M^L_\infty)) \cdot C^{(2)} \end{equation} By projection formula applied to the morphism $\tau^{-1}$ it holds: By substitution using equation \ref{semipresto} \begin{equation}\label{prestopresto} (\tau^{-1}_{*}(rK_{X^1_\infty}-v_\infty^* M^L_\infty)) \cdot C^{(2)}=(rK_{X^1_\infty}-v_\infty^* M^L_\infty)\cdot (\tau^{-1})^{*}C^{(2)} \end{equation} By substitution using equation \ref{semipresto} \begin{equation}\label{prestoprestopresto} (rK_{X^1_\infty}-v_\infty^* M^L_\infty)\cdot (\tau^{-1})^{*}C^{(2)}=(rK_{X^1_\infty}-v_\infty^* M^L_\infty)\cdot C_\infty \end{equation} Since by equation \ref{stessaprova} we know that $(rK_{X^1_\infty}-v_\infty^* M^L_\infty)\cdot C_\infty\geq 0$ we see by equations \ref{presto}, \ref{semipresto} and \ref{prestopresto} that \begin{equation}\label {nontardi} (rK_{X^2}-(\tau^{-1})_{*}v_\infty^* M^L_\infty )\cdot C^{(2)}\geq 0 \end{equation} Finally let $C^{(2)}_{\infty}$ be a generic movable curve inside the general fiber of $\psi^2_\infty\colon X^2_\infty \to Z^2_\infty$. It holds that $$ \rho^{*}(rK_{X^2}-(\tau^{-1})_{*}v_\infty^* M^L_\infty)C^{(2)}_{\infty}\geq 0 $$ On the other hand since $\rho^{*}(K_{X^2})=(\psi^2_\infty)^*(G)$ it holds that $$\rho^{*}(rK_{X^2})\cdot C^{(2)}_{\infty}=0.$$ Hence \begin{equation}\label{eccoilpresto} -\rho^{*}((\tau^{-1})_{*}v_\infty^* M^L_\infty)C^{(2)}_{\infty}\geq 0 \end{equation} Since $M^L_\infty$ is movable by equation \ref{eccoilpresto} it holds that \begin{equation}\label{eccoilprestopresto} \rho^{*}((\tau^{-1})_{*}v_\infty^* M^L_\infty)C^{(2)}_{\infty}=0 \end{equation} By the commutative diagram \ref{prestocommuta} we obtain: \begin{equation}\label{finepresto} \delta^* (v_\infty^* M^L_\infty)\cdot \mu^{*} C^{(2)}_{\infty}=0 \end{equation} The equation \ref{finepresto} shows that the general fiber of $\psi^2_\infty\circ\mu\colon X^3_\infty\to Z^2_\infty$ is the general fiber of $f_\infty\colon X^3_\infty\to Y_\infty$, by the rigidity lemma we conclude. \end{proof} \begin{thebibliography}{Muk04} \bibitem[AD]{AD} C. Araujo, S. Druel, \emph{Characterization of generic projective space bundles and algebraicity of foliations}, Commentarii Mathematici Helvetici, European Mathematical Society, 94 (2019), Issue 4, 833--853. \bibitem[BZ1]{BZ1} M. \'A. Barja, F. Zucconi, \emph{A note on a conjecture of Xiao}, J. Math. Soc. Japan 52 (2000), no. 3, 633--635. \bibitem[BZ2]{BZ2} M. \'A. Barja, F. Zucconi, \emph{On the slope of fibred surfaces}, Nagoya Math. J. 164 (2001), 103--131. \bibitem[B]{B} N. Bourbaki, \emph{Commutative algebra. Chapters 1–7}, translated from the French, reprint of the 1989 English translation, Elements of Mathematics (Berlin), Springer-Verlag, Berlin, 1998. \bibitem[Ca]{Ca2} F. Catanese, \emph{Moduli and classification of irregular Kaehler manifolds (and algebraic varieties) with Albanese general type fibrations}, Invent. Math. 104 (1991), no. 2, 263--289. \bibitem[CD1]{CD1} F. Catanese, M. Dettweiler, \emph{Answer to a question by Fujita on Variation of Hodge Structures}, Higher Dimensional Algebraic Geometry: In honour of Professor Yujiro Kawamata's sixtieth birthday, Mathematical Society of Japan, Tokyo, Japan, (2017), 73--102. \bibitem[CD2]{CD2} F. Catanese, M. Dettweiler, \emph{The direct image of the relative dualizing sheaf needs not be semiample}, C. R. Math. Acad. Sci. Paris 352 (2014), no. 3, 241--244. \bibitem[CD3]{CD3} F. Catanese, M. Dettweiler, \emph{Vector bundles on curves coming from variation of Hodge structures}, Internat. J. Math. 27 (2016), no. 7, 1640001, 25 pp. \bibitem[CK]{CK} F. Catanese, Y. Kawamata, \emph{Fujita decomposition over higher dimensional base}, European Journal of Mathematics 5, (2019), 720--728. \bibitem[D]{D} O. Debarre, \emph{Higher-dimensional algebraic geometry}. Universitext, Springer-Verlag, New York, 2001. \bibitem[De]{De} P. Deligne, \emph{Equations Diff\'erentielles \`a Points Siunguliers R\'eguliers}, LNM 163, Springer-Verlag ,Berlin, Heidelberg, New York, 1970. \bibitem[Dr]{Dr} S. Druel, \emph{Codimension 1 foliations with numerically trivial canonical class on singular spaces.} Duke Math. J. 170 (1) 95--203, (2021). \bibitem[F]{F} O. Fujino, \emph{Iitaka Conjecture. An introduction}, SpringerBriefs in Mathematics. Springer, Singapore (2020). \bibitem[Fu1]{Fu} T. Fujita, \emph{On K\"ahler fiber spaces over curves}, J. Math. Soc. Japan 30 (1978), no. 4, 779--794. \bibitem[Fu2]{Fu2} T. Fujita, \emph{The sheaf of relative canonical forms of a K\"ahler fiber space over a curve}, Proc. Japan Acad. Ser. A Math. Sci. 54 (1978), no. 7, 183--184. \bibitem[H]{H} R. Hartshorne, \emph{Algebraic Geometry}, Graduate Texts in Mathematics, Vol. 52. Springer--Verlag, New York-Heidelberg, 1977. \bibitem[H2]{har} R. Hartshorne, \emph{Stable reflexive sheaves}, Math. Ann. 254 (1980), no. 2, 121--176. \bibitem[KM]{KM} J. Kollár, S. Mori, \emph{Birational Geometry of Algebraic Varieties}, Cambridge Tracts in Mathematics, 134, Cambridge University Press, (1998). \bibitem[K]{K} S. Kebekus. \emph{Uniruledness Criteria and Applications}, in: Bogomolov F., Hassett B., Tschinkel Y. (eds) Birational Geometry, Rational Curves, and Arithmetic. Springer, New York, NY (2013). \bibitem[Kol]{Kol} J. Kollár, \emph{Rational curves on algebraic varieties}, volume 32 of \emph{Ergebnisse der Mathematik und ihrer Grenzgebiete. 3. Folge. A Series of Modern Surveys in Mathematics}. Springer-Verlag, Berlin, 1996. \bibitem[Laz]{Laz} R. Lazarsfeld, \emph{Positivity in algebraic geometry. I–II}, Ergebnisse der Mathematik und ihrer Grenzgebiete. 3. Folge. Springer--Verlag, Berlin, 2004. \bibitem[L]{L} V. Lazić, \emph{Selected Topics in Algebraic Geometry-Foliations}, notes available at \url{https://www.uni-saarland.de/fileadmin/upload/lehrstuhl/lazic/Skripten/foliation.pdf}, (2017). \bibitem[P]{P} Z. Patakfalvi, \emph{Arakelov-Parshin rigidity of towers of curve fibrations}, Math. Z. 278 (2014), no. 3--4, 859--892. \bibitem[PT]{PT} G.P. Pirola, S. Torelli, \emph{Massey Products and Fujita decompositions on fibrations of curves}, Collectanea Mathematica, 71 (2020), 39--61. \bibitem[Ran]{Ran} Z. Ran \emph{On subvarieties of abelian varieties}, Invent. Math., 62(3) 459--479, (1981). \bibitem[RZ]{RZ4} L. Rizzi, F. Zucconi, {\em Fujita decomposition and Massey product for fibered varieties} \url{https://arxiv.org/abs/2007.01473}, (2020). \bibitem[SCT]{SCT} L. Solá Conde, M. Toma, \emph{Maximal rationally connected fibrations and movable curves}, Annales de l'Institut Fourier, Volume 59 (2009) no. 6, pp. 2359--2369. \bibitem[T]{T} B. Taji, \emph{Birational positivity in dimension $4$}, Annales de l'Inst. Fourier, Tome 64, Issue 1, (2014), 203--216. \end{thebibliography} \end{document} \textbf{FINO A QUI, DA QUI IN POI PER IL FUTURO} \section{Generalized Adjoint theorem} Let $X$ be a smooth $n$-dimensional variety and $f\colon \sX\to B$ a deformation of $X$ over a smooth 1-dimensional disk $B$. The associated conormal exact sequence on $X$ \begin{equation} 0\to \sO_X\to \Omega^1_{\sX|X}\to \Omega^1_X\to 0 \end{equation} corresponds to an element in $\xi\in \text{Ext}^1(\Omega^1_X,\sO_X)$. The classical Adjoint theorem gives conditions on the splitting of this exact sequence based on the study of differential 1-forms. With the new version of the Castelnuovo-de Franchis theorem proved in the previous section we can get similar results thanks to the study of $n$-forms on $X$. \begin{thm} Let $X$ and $\xi\in \textnormal{Ext}^1(\Omega^1_X,\sO_X)$ as above. Let $\omega_1,\dots, \omega_l \in H^0 (X,\Omega^n_X)$ linearly independent $n$-forms such that there exist meromorphically strict closed liftings $\widetilde{\omega}_1,\dots, \widetilde{\omega}_l\in H^0 (X,\Omega^n_{\sX|X})$ such that $\widetilde{\omega}_i\wedge \widetilde{\omega}_j=0$ for every $i,j$. Then $\xi$ is in the kernel of \begin{equation} \textnormal{Ext}^1(\Omega^1_X,\sO_X)\to \textnormal{Ext}^1(\Omega^1_X(-D),\sO_X) \end{equation} where $D$ is the divisor on $X$ which is the fixed part of the section $\omega_i$. \end{thm} \begin{proof} First note that the extension $\xi$, which we recall is associated to \begin{equation} \label{seq} 0\to \sO_X\to \Omega^1_{\sX|X}\to \Omega^1_X\to 0, \end{equation} via the isomorphism $\text{Ext}^1(\Omega^1_X,\sO_X)\cong\text{Ext}^1(\Omega^n_X,\Omega^{n-1}_X)$ also parametrizes the sequence \begin{equation} \label{seq2} 0\to \Omega^{n-1}_X\to \Omega^n_{\sX|X}\to \Omega^n_X\to 0, \end{equation} wedge of (\ref{seq}). Hence we will study (\ref{seq2}). The properties of the $\widetilde{\omega}_i$ of course allow us to apply the Castelnuovo CITARE. Note that even if $\sX$ is not compact, the fact that we require that the $\widetilde{\omega}_i$ are closed is enough for the argument in Theorem CItare to work. Hence we have a rational map $h\colon \sX\dashrightarrow Z$ to a $n$ dimensional variety $Z$. On a suitable MAXIMAL open subset $U\subset \sX$ we have the sheaf $h^*\Omega^n_Z$ and we take its restriction on $X$ denoted by $(h^*\Omega^n_Z)_{|X}$. We define the sheaf $\sG$ on $X$ as \begin{equation} \sG(V):=\{s\in\Omega^n_{\sX|X}(V)\mid s_{|U\cap V}\in (h^*\Omega^n_Z)_{|X}(U\cap V) \} \end{equation} Of course by CIT we have that $\widetilde{\omega}_i\in H^0(X,\sG)$. The sheaf $\sG$ fits in Sequence (\ref{seq2}) as \begin{equation} \xymatrix{ 0\ar[r]&\Omega^{n-1}_X\ar[r]&\Omega^n_{\sX|X}\ar[r]&\Omega^n_X\ar[r]&0\\ &&\sG\ar@{^{(}->}[u]\ar[ur]&& } \end{equation} Since $h$ induces a generically finite rational map between $X$ and $Z$, the diagonal map is injective since its kernel should be a torsion subsheaf of the locally free $\Omega^{n-1}_X$. On the other hand it is surjective on $\Omega^n_X(-D')\otimes \sI_S$ where $D'<D$ is an effective divisor and $S$ a $\codim \geq2$ locus in $X$. By taking the double dual of $\sG$, we have that $\sG^{\vee\vee}\cong \Omega^n_X(-D')$ and this gives a splitting of the exact sequence \begin{equation} \label{seq3} 0\to \Omega^{n-1}_X\to \sE\to \Omega^n_X(-D')\to 0, \end{equation} which fits into the diagram \begin{equation} \xymatrix{ 0\ar[r]&\Omega^{n-1}_X\ar[r]&\Omega^n_{\sX|X}\ar[r]&\Omega^n_X\ar[r]&0\\ 0\ar[r]&\Omega^{n-1}_X\ar[r]\ar@{=}[u]&\sE\ar[r]\ar@{^{(}->}[u]&\Omega^n_X(-D')\ar[r]\ar@{^{(}->}[u]&0. } \end{equation} This means that $\xi$ is in the kernel of the map $$ \text{Ext}^1(\Omega^1_X,\sO_X)\cong\text{Ext}^1(\Omega^n_X,\Omega^{n-1}_X)\to\text{Ext}^1(\Omega^n_X(-D'),\Omega^{n-1}_X). $$ We note that Sequence (\ref{seq3}) tensored by $\sO_X(-(n-1)D')$ gives \begin{equation} 0\to \Omega^{n-1}_X(-(n-1)D')\to \sE(-(n-1)D')\to \Omega^n_X(-nD')\to 0, \end{equation} which is exactly the top wedge of a sequence \begin{equation} 0\to \sO_X\to \sE'\to \Omega^1_X(-D')\to 0. \end{equation} In particular this means that $\text{Ext}^1(\Omega^1_X(-D'),\sO_X)\cong\text{Ext}^1(\Omega^n_X(-D'),\Omega^{n-1}_X)$. Therefore $\xi$ is in the kernel of $$ \text{Ext}^1(\Omega^1_X,\sO_X)\cong\text{Ext}^1(\Omega^n_X,\Omega^{n-1}_X)\to\text{Ext}^1(\Omega^n_X(-D'),\Omega^{n-1}_X)\cong \text{Ext}^1(\Omega^1_X(-D'),\sO_X) $$ and by $D'<D$ we have our thesis. \end{proof} \begin{rmk} Rivestimento non ramificato doppio \end{rmk} \begin{rmk} Of course since $D'<D$ this Theorem actually shows a slightly stronger conclusion, that is $\xi$ in the kernel of $$ \text{Ext}^1(\Omega^1_X,\sO_X)\to\text{Ext}^1(\Omega^1_X(-D'),\sO_X) $$ but it seems difficult a priori to control the divisor $D'$ which depends on the locus where the map $X\dashrightarrow Z$ is not defined and also on its ramification. On the other hand it is very natural to control $D$ by the analysis of the zero loci of the $\omega_i$. \end{rmk} \subsection{Note} \begin{rmk} CONFRONTO CON FABRIZIO/ Ricordare che ip 1) è fiberwise \end{rmk} \begin{rmk} Una delle due ip non verificata \end{rmk} Note that it is immediate by the definition of algebraic and transcendental part that $\mL^a_{\sF}$ is the maximal local system associated to $Y$, see Remark \ref{massimalita}. ATTENZIONE AL RANGO. FORSE SAREBBE DA ESPLICITARE. RIFLETTERE UN ATTIMO SUL FATTO CHE Y ALBANESE GEN TYPE/GEN TYPE This gives an exact sequence of local systems \begin{equation} 0\to \mL_{\sF}\to \mL_{\sF}^a\to \mL_{\sF}^a/\mL_{\sF}\to 0. \end{equation} DA CONTROLLARE BENE The idea is that since the local systems are unitary, the sequence splits because we can always take the orthogonal.
2205.15082v1
http://arxiv.org/abs/2205.15082v1
The zero-noise limit of SDEs with $L^\infty$ drift
\documentclass[a4paper,reqno]{amsart} \usepackage[utf8]{inputenc} \usepackage{amsmath,amssymb,amsthm,amsfonts} \usepackage{bbm} \usepackage{euscript} \usepackage{enumitem} \usepackage{nicefrac} \usepackage{mathtools} \theoremstyle{plain} \newtheorem{theorem}{Theorem} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{conjecture}[theorem]{Conjecture} \newtheorem{proposition}[theorem]{Proposition} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{remark}[theorem]{Remark} \newtheorem{example}[theorem]{Example} \numberwithin{theorem}{section} \numberwithin{equation}{section} \newcommand{\mbR}{{\mathbb R}} \newcommand{\mbN}{{\mathbb N}} \newcommand{\mbQ}{{\mathbb Q}} \newcommand{\mbZ}{{\mathbb Z}} \newcommand{\cB}{{\mathcal B}} \newcommand{\cF}{{\mathcal F}} \newcommand{\cK}{{\mathcal K}} \newcommand{\cI}{{\mathcal I}} \newcommand{\cH}{{\mathcal H}} \newcommand{\ind}{\mathbbm{1}} \newcommand{\abs}[1]{\lvert #1 \rvert} \newcommand{\drift}{a} \newcommand{\sign}{\mathop{\rm sign}} \newcommand{\vf}{\varphi} \newcommand{\Vf}{\Phi} \newcommand{\vk}{\varkappa} \newcommand{\ve}{\varepsilon} \renewcommand{\lg}{\langle} \newcommand{\rg}{\rangle} \newcommand{\pt}{\partial} \renewcommand{\Pr}{{\mathbb{P}}} \newcommand{\Exp}{{\mathbb{E}}} \newcommand{\Var}{\mathrm{Var}} \renewcommand{\leq}{\leqslant} \renewcommand{\le}{\leqslant} \renewcommand{\geq}{\geqslant} \renewcommand{\ge}{\geqslant} \DeclareMathOperator{\wlim}{wlim} \newcommand{\from}{\colon} \newcommand{\Lip}{{\mathrm{Lip}}} \newcommand{\nqquad}{\hspace{-2em}} \title{The zero-noise limit of SDEs with \(L^\infty\) drift} \author[U. S. Fjordholm]{Ulrik Skre Fjordholm} \author[M. Musch]{Markus Musch} \address{Department of Mathematics, University of Oslo, PO Box 1053 Blindern, N-316 Oslo, Norway} \author[A. Pilipenko]{Andrey Pilipenko} \address{Institute of Mathematics, National Academy of Sciences of Ukraine, Tereshchenkivska str. 3, 01601, Kiev, Ukraine} \begin{document} \begin{abstract} We study the zero-noise limit for autonomous, one-dimensional ordinary differential equations with discontinuous right-hand sides. Although the deterministic equation might have infinitely many solutions, we show, under rather general conditions, that the sequence of stochastically perturbed solutions converges to a unique distribution on classical solutions of the deterministic equation. We provide several tools for computing this limit distribution. \end{abstract} \maketitle \section{Introduction} Consider a scalar, autonomous ordinary differential equation (ODE) of the form \begin{equation}\label{eq:ode} \begin{split} \frac{dX}{dt}(t) &= \drift(X(t)) \qquad \text{for } t > 0, \\ X(0) &= 0 \end{split} \end{equation} where \( \drift\from\mbR \rightarrow \mbR \) is Borel measurable. (The initial data $X(0)=0$ can be translated to an arbitrary point $x_0\in\mbR$, if needed.) If the drift $a$ is non-smooth then uniqueness of solutions might fail --- this is the \emph{Peano phenomenon}. To distinguish physically reasonable solutions from non-physical ones, we add stochastic noise to the equation, with the aim of letting the noise go to zero. Thus, we consider a stochastic differential equation \begin{equation}\label{eq:ode_pert} \begin{split} dX_\ve(t) &= \drift(X_\ve(t)) dt + \ve dW(t), \\ X_\ve(0) &= 0. \end{split} \end{equation} where \( W(t) \) is a one-dimensional Brownian motion on a given probability space \( (\Omega, \cF, \Pr )\), and \( \ve > 0 \). By the Zvonkin--Veretennikov theorem \cite{Veretennikov1981,Zvonkin1974}, equation \eqref{eq:ode_pert} has a unique strong solution. In this paper we consider the following problem: \begin{quotation} \emph{Identify the limit $\lim_{\ve\to0} X_\ve$, and show that it satisfies \eqref{eq:ode}.} \end{quotation} Somewhat informally, the challenges are: \begin{itemize} \item determining whether the sequence $\{X_\ve\}_\ve$ (or a subsequence) converges, and in what sense; \item identifying the limit(s), either by a closed form expression or some defining property; \item proving that the limit solves \eqref{eq:ode} by passing to the limit in the (possibly discontinuous) term $a(X_\ve)$. \end{itemize} The problem originated in the 1981 paper by Veretennikov \cite{Veretennikov1981b}, and was treated extensively in the 1982 paper by Bafico and Baldi \cite{BaficoBaldi1982}. Only little work has been done on this problem since then, despite its great interest. The original work of Bafico and Baldi dealt with the Peano phenomenon for an autonomous ordinary differential equation. They considered continuous drifts which are zero at some point and are non-Lipschitz continuous on at least one side of the origin. In their paper they show that the $\ve\to0$ limit of the probability measure that represents the solution of the stochastic equation is concentrated on at most two trajectories. Further, they compute explicitly some limit probability measures for specific drifts. Unfortunately, since the result of Bafico and Baldi relies on the direct computation of the solution of an elliptic PDE, it only works in one dimension. In one dimension this elliptic PDE reduces to a second-order boundary value problem for which an explicit solution can be computed. Therefore, there is little hope that this approach will also work in higher dimensions. The only other work that is known to us dating back to the previous century is the paper by Mathieu from 1994 \cite{Mathieu1994}. In 2001 Grandinaru, Herrmann and Roynette published a paper \cite{GradinaruHerrmannRoynette2001} which showed some of the results of Bafico and Baldi using a large deviations approach. Herrmann did some more work on small-noise limits later on together with Tugaut \cite{HerrmannTugaut2010, HerrmannTugaut2012, HerrmannTugaut2014}. Yet another approach to Bafico and Baldi's original problem was presented by Delarue and Flandoli in \cite{DelarueFlandoli2014}. They apply a careful argument based on exit times. Noteworthy it also works in arbitrary dimension but with a very specific right-hand side, in contrast to the original assumption of a general continuous function; see also Trevisian \cite{Trevisian13}. We also point out the recent paper by Delarue and Maurelli \cite{DelarueMaurelli2020}, where multidimensional gradient dynamics with H\"older type coefficients was perturbed by a small Wiener noise. The 2008 paper by Buckdahn, Ouknine and Quincampoix \cite{BuckdahnOuknineQuincampoix2008} shows that the the zero noise limit is concentrated on the set of all Filippov solutions of \eqref{eq:ode}. Since this set is potentially very large, this result is of limited use to us. Even less work was done for zero noise limits with respect to partial differential equations. To our best knowledge the only paper published so far is Attanasio and Flandoli's note on the linear transport equation \cite{AttanasioFlandoli2009}. A new approach was proposed by Pilipenko and Proske when the drift in \eqref{eq:ode} has H\"older-type asymptotics in a neighborhood of $x=0$ and the perturbation is a self-similar noise \cite{PilipenkoProske2015}. They used space-time scaling and reduce a solution of the small-noise problem to a study of long time behaviour of a stochastic differential equation with a {\it fixed} noise. This approach can be generalized to multidimensional case and multiplicative Levy-noise perturbations \cite{PilipenkoProske2018, KulikPilipenko2020, PavlyukevichPilipenko2020, PilipenkoProske2021}. \subsection{Uniqueness of classical solutions} If the drift $a=a(x)$ is continuous then the question of existence and uniqueness of solutions of \eqref{eq:ode} is well established. If $a$ is {continuous} then it's known since Peano that there always exists at least one solution (at least for small times). Binding \cite{Binding1979} found that the solution is unique {if and only if} $a$ satisfies a so-called Osgood condition at all zeros $x_0$ of $a$: \begin{equation}\label{eq:osgood_cond} \int_{x_0-\delta}^{x_0} \frac{1}{a(z)\wedge0}\,dz= -\infty,\qquad \int_{x_0}^{x_0+\delta} \frac{1}{a(z)\vee0}\,dz = +\infty \end{equation} for all $\delta\in(0,\delta_0)$ for some $\delta_0>0$. (Here and in the remainder we denote \(\alpha \wedge \beta\coloneqq\min(\alpha,\beta)\) and $\alpha\vee\beta\coloneqq\max(\alpha,\beta)$.) The unique solution starting at $x$ is then given by \begin{equation}\label{eq:deterministicsolution} X(t;x) = \begin{cases} x & \text{if } a(x)=0 \\ A^{-1}(t) & \text{if } a(x)\neq0 \end{cases} \end{equation} (at least for small $t$), where $A(y)\coloneqq\int_{x}^y 1/\drift(z)\, dz$ and $A^{-1}$ is its inverse function. If $a$ is discontinuous --- say, $a\in L^\infty(\mbR)$ --- then the question of existence and uniqueness is much more delicate. The paper \cite{Fjordholm2018} gives necessary and sufficient conditions for the uniqueness of \emph{Filippov solutions} of \eqref{eq:ode}. We remark here that the extension to Filippov solutions might lead to non-uniqueness, even when the classical solution is unique. To see this, let $E\subset\mbR$ be measure-dense, i.e.~a set for which both $U\cap E$ and $U\setminus E$ have positive Lebesgue measure for any nonempty, open set $U\subset\mbR$ (see \cite{Rud83} for the construction of such a set), and let $a=1+\ind_E$. Then \eqref{eq:deterministicsolution} is the unique classical solution for any starting point $x\in\mbR$, whereas any function satisfying $\frac{d}{dt}X(t)\in[1,2]$ for a.e.~$t>0$ will be a Filippov solution. We will show that even in cases such as this one, the stochastically perturbed solutions converge to the classical solution, and not just any Filippov solution, as was shown in \cite{BuckdahnOuknineQuincampoix2008}. \subsection{Main result} We aim to prove that the distribution of solutions $X_\ve$ of \eqref{eq:ode_pert} converges to a distribution concentrated on either a single solution of the deterministic equation \eqref{eq:ode}, or two ``extremal'' solutions. Based on the discussion in the previous section, we can divide the argument into cases depending on whether $a$ is positive, negative or changes sign in a neighbourhood, and in each case, whether an Osgood-type condition such as \eqref{eq:osgood_cond} holds. The case of negative drift is clearly analogous to a positive drift, so we will merely state the results for negative drift, without proof. Under the sole assumption $a\in L^\infty(\mbR)$, the sequence $\{X_\ve\}_\ve$ is weakly relatively compact in $C([0,T])$, for any $T>0$. (Indeed, by \eqref{eq:ode_pert}, $X_\ve-\ve W$ is uniformly Lipschitz, and $\ve W\overset{P}{\to}0$ as $\ve\to0$. See e.g.~\cite{Billingsley1999} for the full argument.) Hence, the problems are to characterize the distributional limit of any convergent subsequence, to determine whether the entire sequence converges (i.e., to determine whether the limit is unique), and to determine whether the sense of convergence can be strengthened. Without loss of generality we will assume that the process starts at $x=0$. If $a(0)=0$ but $a$ does \textit{not} satisfy the Osgood condition \eqref{eq:osgood_cond} at $x=0$, then both $\psi_-$ and $\psi_+$ are classical solutions of \eqref{eq:ode} (along with infinitely many other solutions), where \begin{equation}\label{eq:maximalsolutions} \psi_\pm(t) \coloneqq A_\pm^{-1}(t), \qquad \text{where } A_\pm(x) \coloneqq \int_0^x \frac{1}{a(z)}\,dz \text{ for } x\in\mbR_\pm. \end{equation} Generally, the functions $\psi_\pm$ are defined in a neighborhood of 0. We have assumed that $a$ is bounded, so $\psi_\pm$ cannot blow up in finite time, but they can reach singular points $R_\pm$ where $A_\pm$ blow up. If $t_\pm\in(0,\infty]$ are the times when $\psi_\pm(t_\pm)=R_\pm$ then we set $\psi_\pm(t)\equiv R_\pm$ for all $t\geq t_\pm$. We aim to prove that the distribution of $X_\ve$ converges to a distribution concentrated on the two solutions $\psi_-,\ \psi_+$, and to determine the weighting of these two solutions. \begin{theorem}\label{thm:ZeroNoisePositiveDrift111} Let $a\in L^\infty(\mbR)$ {satisfy $a\geq 0$} a.e.~in $(-\delta_0, \delta_0)$ for some $\delta_0>0$, and \begin{equation}\label{eq:osgoodOnesided} \int_{0}^{\delta_0} \frac{1}{a(z)} dz<\infty. \end{equation} Then, for any $T>0$, $X_\ve$ converges uniformly in probability to $\psi_+$: \begin{equation}\label{eq:C2} \big\|X_\ve-\psi_+ \big\|_{C([0,T])} \overset{P} \to 0 \qquad\text{as } \ve\to0. \end{equation} An analogous result holds for \emph{negative} drifts, with obvious modifications. \end{theorem} The proof of Theorem \ref{thm:ZeroNoisePositiveDrift111} for strictly positive drifts $a$ is given in Section \ref{sec:positive_drift}, while the general case is considered in Section \ref{section:finalOfTheorem1.1}. The final theorem applies also to signed drifts: \begin{theorem}\label{thm:ZeroNoiseRepulsive} Let $a\in L^\infty(\mbR)$ satisfy \begin{equation}\label{eq:osgoodrepulsive} -\int_{\alpha}^{0} \frac{1}{a(z)\wedge0}\, dz<\infty, \qquad \int_{0}^{\beta} \frac{1}{a(z)\vee 0}\, dz<\infty \end{equation} for some $\alpha<0<\beta$ (compare with \eqref{eq:osgood_cond}). Let $\{\ve_k\}_k$ be some sequence satisfying $\ve_k>0$ and $\lim_{k\to\infty}\ve_k=0$, and define \begin{equation}\label{eq:weights} p_k \coloneqq \frac{s_{\ve_k}(0)-s_{\ve_k}(\alpha)}{s_{\ve_k}(\beta)- s_{\ve_k}(\alpha)} \in [0,1], \qquad s_\ve(r) \coloneqq \int_0^r \exp\Bigl(-\frac{2}{\ve^2}\int_0^z a(u)\,du\Bigr)\,dz. \end{equation} Then $\{P_{\ve_k}\}_k$ is weakly convergent if $\{p_k\}_k$ converges. Defining $p\coloneqq \lim_{k}p_k$ and $P\coloneqq\wlim_k P_{\nu_k}$, we have \begin{equation}\label{eq:limitMeasure} P = (1-p)\delta_{\psi_-} + p\delta_{\psi_+}. \end{equation} \end{theorem} The proof is given in Section \ref{sec:repulsive}, where we also provide tools for computing $p$. \subsection{Outline of the paper} We now give an outline of the rest of this manuscript. In Section \ref{sec:technical_results} we give several technical results on convergence of SDEs with respect to perturbations of the drift; the relation between the solution and its exit time; and the distribution of the solution of an SDE. The goal of Section \ref{sec:positive_drift} is to prove Theorem \ref{thm:ZeroNoisePositiveDrift111} in the case where $a>0$, and in Section \ref{section:finalOfTheorem1.1} we extend to the case $a\geq0$. In Section \ref{sec:repulsive} we prove Theorem \ref{thm:ZeroNoiseRepulsive} and provide several results on sufficient conditions for convergence. Finally, we give some examples in Section \ref{sec:examples}. \section{Technical results}\label{sec:technical_results} In this section we list a few technical results. The first two results are comparison principles. In order to prove them we use approximations by SDEs with smooth coefficients and the classical results on comparison. Since we do not suppose that the drift is smooth or even continuous, the results are not standard. \begin{theorem}\label{thm:convergenceSDE_Thm} Let $\{\drift_n\from \mbR \rightarrow \mbR \}_{n\geq0}$ be uniformly bounded measurable functions such that $\drift_n \to \drift_0$ pointwise a.e.~as $n\to\infty$. Let $X_n$ be a solution to the SDE \[ X_n (t )= x_n + \int_0^t \drift_n (X_n (s )) ds + W(t),\qquad t\in[0,T]. \] Then $\{X_n\}_n$ converges uniformly in probability: \[ \bigl\|X_n(t)-X_0(t)\bigr\|_{C([0,T])} \overset{P}\to 0 \qquad \text{as } n\to\infty. \] \end{theorem} For a proof, see e.g.~\cite[Theorem~2.1]{Pilipenko2013}. \begin{theorem}\label{thm:comparisonThm} Let \( \drift_1, \drift_2\from \mbR \rightarrow \mbR \) be locally bounded measurable functions satisfying \( \drift_1 \leq \drift_2\) and let $x_1\leq x_2$. Let \( X_1, X_2 \) be solutions to the equations \begin{align*} X_i (t )= x_i + \int_0^t \drift_i (X_i (s)) ds + W(t), \qquad i=1,2. \end{align*} Then \[ X_1 (t )\leq X_2 (t )\qquad \forall\ t \geq 0 \] with probability 1. \end{theorem} The proof is given in Appendix \ref{app:comparisonprinciple}. \begin{lemma}\label{lem:timeinversion} Let $\{f_n\}_{n\geq 1}\subset C([0,T])$ be a uniformly convergent sequence of non-random continuous functions and let $f_0\in C([0,T])$ be a strictly increasing function. Set $\tau^x_n\coloneqq\inf\bigl\{t\geq 0 : f_n(t)=x\bigr\}$ for every $n\geq 0$, and assume that \[ \tau^x_n \to\tau^x_0 \qquad \text{for every } x\in \big(f_0(0), f_0(T)\bigr)\cap\mbQ. \] Then \[ f_n\to f_0 \qquad \text{in } C([0,T]) \text{ as } n\to\infty. \] \end{lemma} \begin{proof} Let $\mathcal{T}\coloneqq f_0^{-1}(\mbQ)$, and note that this is a dense subset of $[0,T]$, since $f_0^{-1}$ is continuous. Let $t\in\mathcal{T}$ be arbitrary and let $x\coloneqq f_0(t)\in\mbQ$. By assumptions of the lemma we have $t=\tau_0^x=\lim_{n\to\infty}\tau_n^x.$ Moreover, since $f_n(\tau^x_n)=x$ for sufficiently large $n$, we have \begin{equation}\label{eq:240} f_0(t)=x=\lim_{n\to\infty}f_n(\tau^x_n) = \lim_{n\to\infty} f_n(t), \end{equation} the last step following from the fact that $f_n$ converges uniformly and $\tau^x_n\to \tau^x_0=t$ as $n\to\infty$. Thus, $\{f_n\}_n$ converges pointwise to $f_0$ on a dense subset of $[0,T]$. But $\{f_n\}_n$ is uniformly convergent by assumption, so necessarily $f_n\to f_0$ uniformly. \end{proof} \begin{corollary}\label{cor:ConvergenceOfPaths} Let $\{\xi_n\}_{n\geq 1} $ be a sequence of continuous stochastic processes $\xi_n\from[0,\infty)\to\mbR$ that is locally uniformly convergent with probability $1$. Let $\xi_0$ be a strictly increasing continuous process satisfying $\xi_0(0)=0$ and $\lim_{t\to\infty}\xi_0(t)=\infty$. Set $\tau_n^x\coloneqq\inf\{t\geq 0 : \xi_n(t)\geq x\}$ and assume that \[ \tau_n^x \overset{P}\to\tau_0^x \qquad \text{for every } x\in[0,\infty)\cap\mbQ. \] Then \[ \xi_n \to \xi_0 \qquad \text{locally uniformly with probability }1. \] \end{corollary} \begin{proof} Enumerate the positive rational numbers as $\mbQ\cap (0,\infty)=\{x_n\}_n$. Select a sequence $\{n^1_k\}_k$ such that \[ \lim_{k\to\infty}\tau^{x_1}_{n^1_k} = \tau^{x_1}_0 \qquad \text{$\Pr$-a.s.} \] Then select a sub-subsequence $\{n^2_k\}_k$ of $\{n^1_k\}_k$ such that \[ \lim_{k\to\infty}\tau^{x_2}_{n^2_k} = \tau^{x_2}_0 \qquad \text{$\Pr$-a.s.,} \] and so on. Then \[ \Pr\Bigl(\forall\ j\in\mbN \quad \lim_{k\to\infty}\tau^{x_j}_{n^k_k} = \tau^{x_j}_0 \Bigr) = 1. \] From Lemma \ref{lem:timeinversion} it follows that \[ \Pr\Bigl(\lim_{k\to\infty}\xi_{n^k_k}=\xi_0 \quad \text{uniformly in }[0,T]\Bigr)=1 \] for any $T>0$. This yields the result. \end{proof} Assume that $\drift, \sigma\from \mbR\to\mbR$ are bounded measurable functions, $\sigma$ is separated from zero. It is well known that the stochastic differential equation \[ d\xi(t) = \drift(\xi(t))dt+ \sigma(\xi(t)) dW(t), \qquad t\geq 0, \] has a unique (weak) solution, which is a continuous strong Markov process, i.e., $\xi$ is a diffusion process. Denote $L\coloneqq\drift(x)\frac{d}{dx}+\frac{1}2\sigma^2(x) \frac{d^2}{dx^2}$ and let $s$ and $m$ be a scale function and a speed measure of $\xi,$ see details in \cite[Chapter VII]{RevuzYor1999}. Define the hitting time of $\xi$ as $\tau^y\coloneqq\inf\{t\geq 0 : \xi(t) =y\}$. Recall that $s$ and $m$ are well-defined up to constants, and $s$ is a non-degenerate $L$-harmonic function, i.e., \begin{equation}\label{eq:Lharmonic} L s=0, \end{equation} in particular \begin{equation}\label{eq:eq_scale} s(x)\coloneqq\int_{y_1}^x\exp\left(-\int_{y_2}^y\frac{2 a(z)}{\sigma(z)^2}dz\right) dy, \end{equation} and \begin{equation}\label{eq:463} m(dy)=\frac{2}{s'(y)\sigma(y)^2}dy \end{equation} for any choices of $y_1, y_2,$ see \cite[Chapter VII, Exercise 3.20]{RevuzYor1999}. \begin{theorem}\label{thm:exit_time} Let $x_1<x_2$ be arbitrary. \begin{enumerate}[leftmargin=*,label=(\roman*)] \item \cite[Chapter VII, Proposition 3.2 and Exercise 3.20]{RevuzYor1999} \label{thm:exit_time1} \begin{align*} \Pr^{x}\big(\tau^{x_1}\wedge \tau^{x_2}<\infty\big)=1 \qquad &\forall\ x\in[x_1,x_2] \\ \intertext{and} \Pr^{x}\bigl(\tau^{x_1}< \tau^{x_2}\bigr)=\frac{s(x_2)-s(x)}{s(x_2)-s(x_1)} \qquad &\forall\ x\in[x_1,x_2], \end{align*} \item \label{thm:exit_time3}\cite[Chapter VII, Corollary 3.8]{RevuzYor1999} For any $I=(x_1,x_2) $, $x\in I$ and for any non-negative measurable function $f$ we have \begin{equation}\label{eq:194} \Exp^x\biggl(\int_0^{\tau^{x_1}\wedge \tau^{x_2}} \!\!f(\xi(t)) dt\biggr) = \int_{x_1}^{x_2} \!G(x,y) f(y) m(dy), \end{equation} where $G=G_I$ is a symmetric function such that \[ G_I(x,y)=\frac{(s(x)-s(x_1))(s(x_2)-s(y))}{s(x_2)-s(x_1)}, \qquad x_1\leq x\leq y\leq x_2. \] \end{enumerate} \end{theorem} \begin{remark}\label{rem:harmonic_functions}~ \begin{enumerate}[leftmargin=*,label=(\textit{\roman*})] \item The function $\tilde u(x)\coloneqq\Exp^x\Bigl(\int_0^{\tau^{x_1}\wedge \tau^{x_2}} f(\xi(t)) dt\Bigr)$ from the left-hand side of \eqref{eq:194} is a solution to \[ \begin{cases} L \tilde u(x) =-f(x), & x\in(x_1,x_2)\\ \tilde u(x_1)=\tilde u(x_2)=0. \end{cases} \] The function $G$ from \eqref{eq:194} is the corresponding Green function, in the sense that $\tilde{u}(x)$ can be written as the right-hand side of \eqref{eq:194}. \item \label{thm:exit_time2} If we take $f(x)=1$ in \eqref{eq:194}, then we get a formula for the expectation of the exit time $u(x)\coloneqq\Exp^x(\tau^{x_1}\wedge \tau^{x_2})$, $x\in[x_1,x_2]$. In particular, \[u(x)=-\int_{x_1}^x2\Phi(y)\int_{x_1}^y \frac{dz}{\sigma(z)^2\Phi(z)}dy+ \int_{x_1}^{x_2}2\Phi(y)\int_{x_1}^y \frac{dz}{\sigma(z)^2\Phi(z)}dy \frac{\int_{x_1}^{x}\Phi(y)dy}{\int_{x_1}^{x_2}\Phi(y)dy},\] where $\Phi(x)=\exp\left(-\int_{x_1}^x\frac{2 \drift(z)}{\sigma(z)^2}dz\right).$ \end{enumerate} \end{remark} Finally, the following result will be quite useful when taking limits {$\sigma=\sigma_\ve(x)\coloneqq\ve\to0$} in terms such as $s$ and $u$ above. \begin{lemma}\label{lem:approxidentity} Let $\alpha<\beta$ and $\ve\neq0$, let $f,g\in L^1((\alpha,\beta))$ with $f>0$ almost everywhere, and let \begin{equation*} g_\ve(y)\coloneqq\int_{y}^{\beta}\exp\left(-\int_{y}^z \frac{f(u)}{\ve^2}\,du\right)\frac{f(z)}{\ve^2}g(z)\,dz, \qquad y\in[\alpha,\beta]. \end{equation*} Then $g_\ve \to g$ as $\ve\to 0$ in $L^1((\alpha,\beta))$ and pointwise a.e.~ in $y\in(\alpha,\beta)$. The same is true if \begin{equation*} g_\ve(y)\coloneqq\int_{\alpha}^{y}\exp\left(-\int_z^y \frac{f(u)}{\ve^2}\,du\right)\frac{f(z)}{\ve^2}g(z)\,dz, \qquad y\in[\alpha,\beta]. \end{equation*} \end{lemma} The proof is given in Appendix \ref{app:comparisonprinciple}. Note that this lemma provides a positive answer to the question raised by Bafico and Baldi in \cite[Remark~b~in~Section~6]{BaficoBaldi1982} on whether \cite[Proposition 3.3]{BaficoBaldi1982} still holds under the sole assumption of \( \int_0^r 1/a(z)dz < + \infty \). \section{Positive drifts}\label{sec:positive_drift} This section is dedicated to the proof of Theorem \ref{thm:ZeroNoisePositiveDrift111}. In order to prove the theorem, we first prove the following: \begin{theorem}\label{thm:ZeroNoiseUnifPositive} Let $a\in L^\infty(\mbR)$ and assume that there exist positive constants $\delta_0,c_->0$ such that \begin{equation}\label{eq:assumption_c_pm} a(x)\geq c_- \quad \text{for a.e. } x\in(-\delta_0,\infty). \end{equation} Then we have the uniform convergence in probability \begin{equation}\label{eq:result} \|X_\ve- \psi_+\|_{C([0,T])}\overset{P}\to 0 \quad \text{as } \ve\to0 \text{ for all }T>0. \end{equation} \end{theorem} \begin{proof}[Proof of Theorem \ref{thm:ZeroNoiseUnifPositive}] The proof consists of these steps: \begin{enumerate}[label=\arabic*.] \item Show weak relative compactness of $\{X_\ve\}_\ve$.\item Show that $\bar X_0$ is strictly increasing, where $\bar X_0$ is a limit point of $\{X_\ve\}_\ve$. \item Reduce to proving convergence of the hitting times $\tau^\ve\to\tau$, see Lemma \ref{lem:timeinversion}. \end{enumerate} \medskip\noindent \textit{Step 1:} For any $T>0$ the family $\{X_\ve\}_{\ve\in (0,1]}$ is weakly relatively compact in $C([0,T])$ (see e.g.~\cite{Billingsley1999}). Since $\psi_+$ is non-random, the convergence statement \eqref{eq:result} is equivalent to the weak convergence \[ X_\ve\Rightarrow \psi_+ \qquad \text{ in } C([0,T]) \text{ as } \ve\to0 . \] for any $T>0$. To prove the latter, it suffices to verify that if $\{X_{\ve_k}\}_k$ is any convergent subsequence, then $\psi_+$ is its limit. \medskip \noindent \textit{Step 2:} Assume that $X_{\ve_k}\Rightarrow \bar X_0$ as $k\to\infty$. Since \[ X_{\ve_k}(t)=\int_0^t \drift(X_{\ve_k}(s))\, ds+\ve_k W(t) \qquad \forall\ t\in[0,T], \] and $\ve_k W \overset{P}{\to} 0$, Slutsky's theorem implies that also \begin{equation}\label{eq:Lip} \int_0^\cdot \drift(X_{\ve_k}(s))\, ds \Rightarrow \bar X_0 \qquad \text{in }C([0,T]). \end{equation} By Skorokhod's representation theorem \cite[Theorem 1.6.7]{Billingsley1999}, we may assume that the convergence in \eqref{eq:Lip} happens almost surely. Since $c_-\leq a \leq c_+$ (for some $c_+>0$), we conclude that \[ c_-\leq \frac{\bar X_0(t_2)-\bar X_0(t_1)}{t_2-t_1} \leq c_+ \qquad \forall\ t_1,t_2\in[0,T], \text{ almost surely.} \] In particular, $\bar{X}_0$ is strictly increasing. \medbreak \noindent \textit{Step 3:} Notice that assumption \eqref{eq:assumption_c_pm} implies that $\lim_{t\to\infty}\psi_+(t)=+\infty.$ Define \[ \tau_\ve^x\coloneqq\inf\{t\geq 0\,:\, X_\ve(t)=x\}, \qquad \tau_0^x \coloneqq \inf\{t\geq 0 \,:\, \psi_+(t)=x\} = A(x) \] where $A(x)\coloneqq \int_0^x a(z)^{-1}\,dz$ (cf.~\eqref{eq:deterministicsolution}). By Corollary \ref{cor:ConvergenceOfPaths} it is enough to show convergence in probability of $\tau_\ve$: \begin{equation}\label{eq:conv_hitting} \tau_\ve^x\overset{P}\to A(x) \qquad\text{as }\ve\to0 \text{ for every } x\in\mbQ\cap [0,\infty). \end{equation} To check \eqref{eq:conv_hitting} it is sufficient to verify that \begin{subequations} \begin{alignat}{2} &\lim_{\ve\to0} \Exp(\tau_\ve^x) = A(x) &\qquad&\text{for any } x\in\mbQ\cap [0,\infty), \label{eq:conv_hitting_expectation} \\ &\lim_{\ve\to0}\Var(\tau_\ve^x)= 0 &&\text{for any } x\in\mbQ\cap [0,\infty). \label{eq:conv_hitting_variance} \end{alignat} \end{subequations} We prove these properties under less restrictive conditions on $a$, given in the lemma below. \begin{lemma}\label{lem:properties_of_time} Let $R,\delta>0$ and let $a\in L^\infty(\mbR)$ satisfy $a > 0$ a.e.~in $(-\delta,R)$. Assume that the Osgood-type condition \begin{equation}\label{eq:positivedriftcondition} \int_{0}^R \frac{1}{a(z)}\, dz<\infty \end{equation} is satisfied. Denote $A(r)\coloneqq\int_0^r a(z)^{-1}\,dz$ for $r\in[0,R]$. Then \begin{subequations} \begin{alignat}{2} &\lim_{\ve\to0}\Pr^x\big(\tau^{-\delta}_\ve>\tau^{R}_\ve\big)=1 &&\forall \ 0\leq x\leq R, \label{eq:ProbabilityFirstExit} \\ &\lim_{\ve\to0}\Exp^x\big(\tau^{-\delta}_\ve\wedge \tau^r_\ve\big) = A(r) {-A(x)} &\qquad& \forall\ 0\leq x<r\leq R. \label{eq:ExpectedTrajectory} \\ \intertext{{Moreover, if $a(x)\geq c_-$ for $x\in(-\infty,-\delta)$ for some constant $c_->0$, then also}} & {\lim_{\ve\to0}\Exp^0 ( \tau^r_\ve) =A(r)} &&\forall\ 0<r\leq R, \label{eq:ConvergenceOfExpectationsExits} \\ \intertext{ and if $a(x)\geq c_->0$ for all $ x\in\mbR$, then} &{\lim_{\ve\to0}\Var^0( \tau^r_\ve) =0} &&\forall\ 0<r\leq R. \label{eq:VanishingVariance} \end{alignat} \end{subequations} \end{lemma} We finalize the proof of Theorem \ref{thm:ZeroNoiseUnifPositive} and then prove the claims of Lemma \ref{lem:properties_of_time} separately. Define the function \[ \tilde a(x):=\begin{cases} a(x) & \text{if } x>-\delta,\\ c_- & \text{if } x\leq -\delta,\end{cases} \] and denote the solution to the corresponding stochastic differential equation by $\tilde X_\ve$. It follows from Lemma \ref{lem:properties_of_time} that \[ \|\tilde X_\ve- \psi_+\|_{C([0,T])}\overset{P}\to 0 \qquad \text{as } \ve\to0 \text{ for all }T>0. \] Uniqueness of the solution yields $\Pr\bigl(\tilde X_\ve(t)= X_\ve(t) \text{ for } t\leq \tau_\ve^{-\delta}\bigr)=1.$ It is easy to see that $\Pr(\tau_\ve^{-\delta}=\infty)\to1 $ as $\ve\to0.$ This completes the proof of Theorem \ref{thm:ZeroNoiseUnifPositive}. \end{proof} \begin{proof}[Proof of \eqref{eq:ProbabilityFirstExit} in Lemma \ref{lem:properties_of_time}] By Theorem \ref{thm:exit_time}\ref{thm:exit_time1}, we can write \[ \Pr^x(\tau^r_\ve<\tau^{-\delta}_\ve) = \frac{s_\ve(x)}{s_\ve(r)} \geq \frac{s_\ve(0)}{s_\ve(r)} \] for every $x\in[0,r]$, where (cf.~\eqref{eq:eq_scale}) \begin{equation}\label{eq:scalefunction} s_\ve(x)\coloneqq\int_{-\delta}^xe^{-B(y)/\ve^2}\, dy, \qquad B(y) \coloneqq 2\int_{-\delta}^y a(z) dz. \end{equation} We have \begin{equation}\label{eq:scale-function-estimate} s_\ve(0) = \int_{-\delta}^0 e^{-B(y)/\ve^2}\,dy \geq \delta e^{-B(0)/\ve^2} \end{equation} since $B$ is nondecreasing. For sufficiently small $\ve>0$ we can find $y_\ve>0$ such that $B(y_\ve)=B(0)+\ve$. Note that $y_\ve\to0$ as $\ve\to0$. Again using the fact that $B$ is nondecreasing, we can estimate \begin{align*} s_\ve(r) &= s_\ve(0)+\int_0^r e^{-B(y)/\ve^2}\,dy \leq s_\ve(0) + y_\ve e^{-B(0)/\ve^2} + (r-y_\ve)e^{-B(y_\ve)/\ve^2} \\ &\leq e^{-B(0)/\ve^2}\Bigl(s_\ve(0) + y_\ve + re^{-1/\ve}\Bigr). \end{align*} Using \eqref{eq:scale-function-estimate}, we get \[ \Pr^x(\tau^r_\ve<\tau^{-\delta}_\ve) \geq \frac{s_\ve(0)e^{B(0)/\ve^2}}{s_\ve(0)e^{B(0)/\ve^2} + y_\ve+re^{-1/\ve}} \geq \frac{\delta}{\delta + y_\ve+re^{-1/\ve}}. \] Since $y_\ve+re^{-1/\ve}\to0$ as $\ve\to0$, we conclude that $\Pr^x(\tau^r_\ve<\tau^{-\delta}_\ve)\to1$ as $\ve\to0$. \end{proof} \begin{proof}[Proof of \eqref{eq:ExpectedTrajectory} in Lemma \ref{lem:properties_of_time}] We will show that for any $r\in(0,R]$ and $x\in[0,r]$, we have $\lim_{\ve\to0} \Exp^x\big(\tau^{-\delta}_\ve \wedge \tau^r_\ve\big) = \int_x^r\drift(z)^{-1}dz.$ It follows from Theorem \ref{thm:exit_time}\ref{thm:exit_time3}\ with $x_1=-\delta$, $x_2=r$, $f\equiv1$, $s= s_\ve$ (cf.~\eqref{eq:scalefunction}) and $m=m_\ve$ (cf.~\eqref{eq:463}) that for any $\delta>0$ and $x\in[0,r]$, \begin{equation}\label{eq:668} \begin{aligned} &\Exp^x\big(\tau^{-\delta}_\ve \wedge \tau^{r}_\ve\big) = \int_{-\delta}^r G_\ve(x,y)\,m_\ve(dy) \\ &= \int_{-\delta}^x G_\ve(y,x)\,m_\ve(dy)+\int_x^r G_\ve(x,y)\,m_\ve(dy) \\ &= \int_{-\delta}^x \frac{s_\ve(y)(s_\ve(r)-s_\ve(x))}{s_\ve(r)}\, m_\ve(dy)+ \int_x^r \frac{s_\ve(x)(s_\ve(r)-s_\ve(y))}{s_\ve(r)}\,m_\ve(dy) \\ &= \int_{-\delta}^x \underbrace{\frac{s_\ve(y)}{s_\ve(r)}}_{\eqqcolon\, p_\ve(y)} (s_\ve(r)-s_\ve(x))\, m_\ve(dy) + \underbrace{\frac{s_\ve(x)}{s_\ve(r)}}_{=\,p_\ve(x)} \int_x^r (s_\ve(r)-s_\ve(y))\, m_\ve(dy) \\ &= \int_{-\delta}^x p_\ve(y)\left[ \int_x^r\exp\left(-\int_{-\delta}^z\frac{2 \drift(u)}{\ve^2}du\right) dz\right] \frac{2}{\ve^2} \exp\left(\int_{-\delta}^y\frac{2 \drift(z)}{\ve^2}dz\right) dy \\ &\quad + p_\ve(x)\int_x^r\left[ \int_{y}^r\exp\left(-\int_{-\delta}^z\frac{2 \drift(u)}{\ve^2}du\right) dz \right] \frac{2}{\ve^2} \exp\left(\int_{-\delta}^y\frac{2 \drift(z)}{\ve^2}dz\right) dy \\ &= \int_{-\delta}^xp_\ve(y) \int_x^r\exp\left(-\int_y^z\frac{2 \drift(u)}{\ve^2}du\right)\frac{2}{\ve^2} \,dz dy \\ &\quad + p_\ve(x)\int_x^r\int_{y}^r\exp\left(-\int_y^z\frac{2 \drift(u)}{\ve^2}du\right)\frac{2}{\ve^2} \,dzdy \\ &= { \int_{-\delta}^xp_\ve(y) \int_y^r\exp\left(-\int_y^z\frac{2 \drift(u)}{\ve^2}du\right) \frac{2 \drift(z)}{\ve^2} \frac{\ind_{(x,r)}(z)}{ \drift(z)} \,dz dy }\\ &\quad + p_\ve(x)\int_x^r\int_{y}^r\exp\left(-\int_y^z\frac{2 \drift(u)}{\ve^2}du\right) \frac{2 \drift(z)}{\ve^2} \frac{1}{ \drift(z)} \,dzdy \\ &= I_\ve + \mathit{II}_\ve. \end{aligned} \end{equation} By Theorem \ref{thm:exit_time}\ref{thm:exit_time1} we have $p_\ve(x) = \Pr^x(\tau_\ve^{-\delta}>\tau_\ve^r)$, and \eqref{eq:ProbabilityFirstExit} in Lemma \ref{lem:properties_of_time} implies that $\lim_{\ve\to0}p_\ve(x)=1$ for every $x\in[0,r]$. Letting $f(z)=2a(z)$ and $g(z) = \frac{1}{\drift(z)}\ind_{(x,r)}(z)$ for $z\in[0,r]$, we see that the $z$-integral in $\mathit{II}_\ve$ can be written as \[ {\int_y^r\exp\left(-\int_y^z\frac{f(u)}{\ve^2}du\right)\frac{f(z)}{\ve^2}g(z) \,dz.} \] Note that $f,g\in L^1([0,r])$, by \eqref{eq:positivedriftcondition}. Thus, we can apply Lemma \ref{lem:approxidentity} with $\alpha=0$, $\beta=r$ to get \[ g_\ve(y)\coloneqq\int_y^r\exp\left(-\int_y^u\frac{2 \drift(z)}{\ve^2}dz\right)\frac{2}{\ve^2} \,du \to g(y) \] in $L^1([0,r])$ and pointwise a.e.\ as $\ve\to0$, so that \[ \mathit{II}_\ve \to \int_x^r g(y)\,dy = \int_x^r\frac{1}{a(y)}\,dy. \] A similar manipulation will hold for $I_\ve$, with the same functions $f$ and $g$, yielding \[ I_\ve \to \int_{-\delta}^x \frac{1}{a(y)}\ind_{(x,r)}(y)\,dy = 0. \] Putting these together gives \[ \lim_{\ve\to0}\Exp^x\big(\tau^{-\delta}_\ve \wedge \tau^{r}_\ve\big) = \lim_{\ve\to0} I_\ve+\mathit{II}_\ve = \int_x^r \frac{1}{a(y)}\,dy. \] This concludes the proof. \end{proof} \begin{proof}[Proof of \eqref{eq:ConvergenceOfExpectationsExits} in Lemma \ref{lem:properties_of_time}] {For any $x\in[0,r)$, note that $\lim_{\delta\to+\infty} \Exp^x(\tau^{-\delta}_\ve\wedge \tau^r_\ve)=\Exp^x(\tau^r_\ve)$. Using \eqref{eq:668} and the assumption $a\geq c_->0$ it is easy to obtain the uniform estimates for expectations and to see that $\lim_{\ve\to0} \Exp^0(\tau^r_\ve)= A(r).$} \end{proof} \begin{proof}[Proof of \eqref{eq:VanishingVariance} in Lemma \ref{lem:properties_of_time}] Let $X_\ve$ solve \eqref{eq:ode_pert} and define $Y_\ve(t) = \ve^{-2}X_\ve(\ve^2t)$. Substitution into \eqref{eq:ode_pert} then gives \begin{equation}\label{eq:scaledSDE} Y_\ve(t) = \int_0^t \drift\big(\ve^2 Y_\ve(s)\big)\,ds + B(t) \end{equation} where $B(t)=\ve^{-1}W(\ve^2t)$ is another Brownian motion. Applying the same scaling to $\tau$, we see that if $\pi^n_\ve$ is the exit time of $Y_\ve$ from $(-\infty,n]$ then $\pi^n_\ve = \ve^{-2}\tau^{\ve^2n}_\ve$. To this end, fix $x>0$, let $n=\ve^{-2} x$ (assumed for simplicity to be an integer) and define the increments $\zeta^1_\ve=\pi^1_\ve$, $\zeta^2_\ve=\pi^2_\ve-\pi^1_\ve$, $\dots$, $\zeta^n_\ve = \pi^n_\ve-\pi^{n-1}_\ve$. The strong Markov property ensures that $\zeta^1_\ve,\dots,\zeta^n_\ve$ are independent random variables. Hence, \begin{align*} \Var(\tau^x_\ve) &= \ve^4\Var(\pi^n_\ve) = \ve^4\Var\Biggl(\sum_{k=1}^n\zeta^k_\ve\Biggr) \\ &= \ve^4\sum_{k=1}^n\Var(\zeta^k_\ve). \end{align*} Hence, if we can bound $\Var(\zeta^k_\ve)$ by a constant independent of $\ve$, then $\Var(\tau^x_\ve) \leq \ve^4Cn = C x \ve^2 \to 0$, and we are done. To this end, note first the naive estimate $\Var(\zeta^k_\ve)\leq \Exp((\zeta^k_\ve)^2)$. Next, we invoke the comparison principle Theorem \ref{thm:comparisonThm} between $Y_\ve$ and \[ Z_\ve(t)\coloneqq\int_0^t c_-\,dt+B(t) = c_-t+B(t), \] yielding $Z_\ve(t)\leq Y_\ve(t)$ for all $t\geq0$, almost surely. Hence, $\pi^n_\ve \leq \tilde{\pi}^n_\ve$, where $\tilde{\pi}^n_\ve$ is the exit time of $Z_\ve$, and correspondingly, $\zeta^k_\ve\leq \tilde{\zeta}^k_\ve$ for $k=1,\dots,n$. Since $(\tilde{\zeta}^k_\ve)_{k=1}^n$ are identically distributed, we get \[ \Exp\big((\zeta^k_\ve)^2\big) \leq \Exp\big((\tilde{\zeta}^k_\ve)^2\big) = \Exp\big((\tilde{\zeta}^1_\ve)^2\big) = \Exp\big((\tilde{\pi}^1_\ve)^2\big). \] To estimate the latter, we have (letting $p_t = \mathrm{Law}(B_t) = \frac{1}{\sqrt{2\pi t}}e^{-|\cdot|^2/(2t)}$) \begin{align*} \Pr\big(\tilde{\pi}^1_\ve > t\big) &= \Pr\big(\tilde{\pi}^1_\ve > t,\ c_-t+B_t<1\big) + \underbrace{\Pr\big(\tilde{\pi}^1_\ve > t,\ c_-t+B_t \ge 1\big)}_{=\;0} \\ &\leq \Pr\big(c_-t+B_t<1\big) = \Pr\big(B_t<1-c_-t\big) \\ &= \int_{-\infty}^{1-c_-t} \frac{1}{\sqrt{2\pi t}}\exp\biggl(-\frac{|x|^2}{2t}\biggr)\,dx \\ &= \frac{1}{\sqrt{2\pi}}\int_{-\infty}^{(1-c_-t)/\sqrt{t}} \exp\biggl(-\frac{|y|^2}{2}\biggr)\,dy. \end{align*} It follows that \[ \Exp((\tilde{\pi}^1_\ve)^2) = \int_0^\infty 2t \Pr(\tilde{\pi}^1_\ve > t)\,dt \leq \frac{1}{\sqrt{2\pi}}\int_0^\infty 2t\int_{-\infty}^{(1-c_-t)/\sqrt{t}} \exp\left(-\frac{|y|^2}{2}\right)\,dy\,dt < \infty, \] and we are done. \end{proof} Using the above theorem and standard comparison principles, we extend the result to drifts satisfying an Osgood-type condition: \begin{lemma}\label{lem:ZeroNoiseOsgood} Let $a\in L^\infty(\mbR)$ satisfy $a>0$ a.e.~in $(-\delta_0,\infty)$ for some $\delta_0>0$. Assume that for all $R>0$, \[ \int_{0}^R \frac{1}{a(z)} dz<\infty. \] Then, for any $T>0$, $X_\ve$ converges to $\psi_+$: \begin{equation}\label{eq:C22} \big\|X_\ve-\psi_+\big\|_{C([0,T])} \overset{P} \to 0 \qquad\text{as } \ve\to0 \text{ for all } T>0 \end{equation} (where $\psi_+$ is the maximal solution \eqref{eq:maximalsolutions}). \end{lemma} \begin{proof} As in the proof of Theorem \ref{thm:ZeroNoiseUnifPositive} we know that $\{X_\ve\}_\ve$ is weakly relatively compact, so it has some weakly convergent subsequence $\{X_{\ve_k}\}_k$. Due to Skorokhod's representation theorem \cite[Theorem 1.6.7]{Billingsley1999} there exists a sequence of copies $\tilde X_{\ve_k}$ of $X_{\ve_k}$ that satisfy the corresponding SDEs with Wiener processes $B_{\ve_k}$ and such that $\{\tilde X_{\ve_k}\}_k$ converges almost surely to some continuous non-decreasing process $\tilde X$: \begin{equation}\label{eq:conv_tilde} \Pr\Bigl(\lim_{k\to\infty} \|\tilde X_{\ve_k}-\tilde X\|_{C([0,T])}=0 \quad \forall\ T>0\Bigr)=1. \end{equation} {The limit process is non-decreasing, so without loss of generality we may assume that function $\drift$ is such that $\drift(x)=c_-$ for all $x\in(-\infty,-\delta_0),$ where $c_->0$ is a constant.} Define \( \drift_n \coloneqq \drift + \nicefrac{1}{n} \), {let $\tilde X_{n,\ve}$ be the corresponding stochastic process and let $X_n$ denote the solution of the corresponding deterministic problem}. It holds for all \( n \in \mbN \) that \( \drift_n \geq \nicefrac{1}{n} \), thus the result above holds for \( \drift_n \). Let $\pi^x$, $\pi^x_{\ve_k}$, $\pi^x_{n,\ve_k}$, $\tau^x_n$ and $\tau^x$ be the hitting times of $\tilde X$, $\tilde X_{\ve_k}$, $\tilde X_{n,\ve_k}$, $X_n$ and $\psi_+$, respectively. By the comparison principle Theorem \ref{thm:comparisonThm}, we know that \begin{equation}\label{eq:ineq_limits1} \tilde X_{n,\ve_k} \geq \tilde X_{\ve_k}, \qquad \text{or equivalently,} \qquad \pi^x_{n,\ve_k} \leq \pi^x_{\ve_k} \; \forall\ x \end{equation} {(cf.~Lemma~\ref{lem:timeinversion}).} It follows from Theorem \ref{thm:ZeroNoiseUnifPositive} that $\tilde X_{n,\ve_k}\to X_n$ a.s.~as $k\to\infty$, which together with \eqref{eq:conv_tilde} and \eqref{eq:ineq_limits1} implies \begin{equation}\label{eq:ineq_limits2} X_n \geq \tilde X, \qquad\text{or equivalently,}\qquad \tau^x_{n} \leq \pi^x\;\forall\ x. \end{equation} The lower semi-continuity of a hitting time with respect to its process also implies that $\pi^x\leq \liminf_{k\to\infty} \pi^x_{\ve_k}$ a.s. for any $x\geq 0$. Hence, for any $x\geq 0$, \begin{align*} A(x)&=\lim_{n\to\infty}A_n(x) = \lim_{n\to\infty} \tau_n^x \leq \Exp(\pi^x) \\ &\leq \Exp\Bigl(\liminf_{k\to\infty} \pi_{\ve_k}^x\Bigr) \leq \liminf_{k\to\infty} \Exp\bigl(\pi_{\ve_k}^x\bigr) = A(x), \end{align*} the last equality following from \eqref{eq:ExpectedTrajectory} in Lemma \ref{lem:properties_of_time}. Hence, $\Exp(\pi^x)=A(x)$ for all $x\geq0$, and since $\pi^x\geq\tau_n^x\to A(x)$ as $n\to\infty$, we conclude that $\pi^x=A(x)$ almost surely for every $x\geq0$, so Corollary \ref{cor:ConvergenceOfPaths} implies that $\tilde X=A^{-1}=\psi_+$ almost surely. Since $\psi_+$ is non-random, we have the uniform convergence in probability \[ \Pr\biggl(\lim_{k\to\infty}\|X_{\ve_k}- \psi_+\|_{C([0,T])}=0 \quad\forall\ T>0\biggr)=1. \] And finally, since the limit $\psi_+$ is unique, we can conclude that the entire sequence $\{X_\ve\}_\ve$ converges. \end{proof} We are now ready to prove Theorem \ref{thm:ZeroNoisePositiveDrift111} under the additional condition that $a>0$ a.e.~in $(-\delta_0,0)$: \begin{proof}[Proof of Theorem \ref{thm:ZeroNoisePositiveDrift111} for positive $a$] The case when $ \int_{0}^{R} \frac{dx}{a(x)\vee0}<\infty $ for any $R>0$ (and hence, in particular, $a>0$ a.e.~in $(-\delta_0,\infty)$) has been considered in Lemma \ref{lem:ZeroNoiseOsgood}. Thus, we can assume that there is some $R>0$ such that $a>0$ a.e.~on $(-\delta_0,R)$, and for any (small) $\delta>0$, \begin{equation}\label{eq:osgoodblowup} \int_0^{R-\delta} \frac{dx}{a(x)}<\infty \quad\text{but}\quad \int_0^{R+\delta} \frac{dx}{a(x)\vee 0}=\infty. \end{equation} Recall that \[ \psi_+(x)= \begin{cases} A^{-1}(x),& x\in[0,A(R)),\\ R, & x\geq A(R). \end{cases} \] (Note that $A(R)$ may be equal to $\infty.$) The proof of the theorem consists of the following steps: \begin{enumerate}[label=\arabic*.] \item Prove the theorem for the stopped process $X_\ve(\cdot\wedge\tau^R_\ve)$ \item Prove the theorem for nonnegative drifts \item Extend to possibly negative drifts. \end{enumerate} \noindent\textit{Step 1.} Set $\widehat a_m(x)\coloneqq a(x)\ind_{x\leq R-\nicefrac{1}{m}}+\ind_{x>R-\nicefrac1m}$ for $m\in\mbN$, and note that $\widehat a_m$ satisfies the conditions of Lemma \ref{lem:ZeroNoiseOsgood}. Let $\widehat{X}_{m,\ve} $ denote the solution to the corresponding SDE, $\widehat{X}_{m} $ its limit, and $\widehat{\tau}_{m,\ve}^x,\ \widehat{\tau}_{m }^x$ the corresponding hitting times. It follows from the uniqueness of a solution that \[ \Pr\Bigl( \widehat{\tau}_{m,\ve}^{R-\nicefrac1m}=\widehat{\tau}_\ve^{R-\nicefrac1m}\Bigr)=1 \quad\text{and}\quad \Pr\Bigl(\widehat{X}_{m,\ve}(t) = X_{\ve}(t) \quad\forall\ t\leq \widehat{\tau}_\ve^{R-\nicefrac1m}\Bigr)=1. \] Thus, by Lemma \ref{lem:ZeroNoiseOsgood}, \begin{equation}\label{eq:605} \begin{split} \sup_{t\in[0,T]}\big|X_{\ve}\bigl(t\wedge \widehat{\tau}_\ve^{R-\nicefrac{1}{m}}\bigr)- A^{-1}\big(t\wedge \widehat{\tau}_\ve^{R-\nicefrac{1}{m}}\big)\big| &\overset{P} \to 0 \qquad\text{as } \ve\to0 \text{ for all } T>0, \\ \sup_{t\in[0,T]}\big|\widehat{X}_{m,\ve}\bigl(t\wedge \widehat{\tau}_\ve^{R-\nicefrac{1}{m}}\bigr)- A^{-1}\bigl(t\wedge \widehat{\tau}_\ve^{R-\nicefrac1m}\bigr)\big| &\overset{P} \to 0 \qquad\text{as } \ve\to0 \text{ for all } T>0, \end{split} \end{equation} for every $m\in\mbN$. Let $\overline X_0$ be a limit point of $\{X_\ve\}_\ve$ and $X_{\ve_k}\Rightarrow \overline X_0$ as $k\to\infty.$ It follows from \eqref{eq:605} that $\overline X_0(\cdot\wedge \tau^{R-\nicefrac1m}_m) = A^{-1}(\cdot\wedge \tau^{R-\nicefrac1m}_m )$, and since $m$ is arbitrary, we have $\overline{X}_0(\cdot\wedge \tau^{R} ) = A^{-1}(\cdot\wedge \tau^{R} )$, that is, $\overline X_0(\cdot\wedge\tau^R) = \psi_+(\cdot\wedge\tau^R)$. In particular, the entire sequence of stopped processes converges, by uniqueness of the limit. \medskip\noindent\textit{Step 2.} Assume next, in addition to \eqref{eq:osgoodblowup}, that $a\geq0$ a.e.~in $\mbR$. Any limit point of $\{X_\ve\}_\ve$ is a non-decreasing process, so to prove the theorem it suffices to verify that for any $\delta>0$ and $M>0$ \[ \limsup_{k\to\infty}\Pr \bigl( \tau^{R+\delta}_{\ve_k}<M\bigr)=0 \] Set $a_n\coloneqq a+\nicefrac{1}{n}$ and let $ X_{n,\ve}$ denote the solution to the corresponding SDE. It follows from comparison Theorem \ref{thm:comparisonThm} that for any $M>0$ \[ \limsup_{k\to\infty}\Pr\bigl(\tau^{R+\delta}_{\ve_k}<M\bigr)\leq \liminf_{n\to\infty}\limsup_{k\to\infty}\Pr\bigl(\tau^{R+\delta}_{n,\ve_k}<M\bigr). \] Theorem \ref{thm:ZeroNoiseUnifPositive} implies that $\lim_{\ve\to0} X_{n,\ve}=X_n=A^{-1}_n,$ so the right hand side of the above inequality equals zero for any $M$. This concludes the proof if $a$ is non-negative everywhere. \medskip\noindent\textit{Step 3.} In the case that $a$ takes negative values, we consider the processes $X_\ve^+$ satisfying the corresponding SDEs with drift $a^+(x)\coloneqq a(x)\vee 0$. We have already proved in Step 2 that \begin{alignat*}{2} \bigl\|X_\ve^+-\psi_+\bigr\|_{C([0,T])} \overset{P}\to 0 && \text{as }\ve\to0 \;\forall\ T>0 \\ \intertext{(since $a^+$ has the same deterministic solution $\psi_+$ as $a$ does), and in Step 1 that} \bigl\|X_\ve\big(\cdot\wedge \tau^R_0\big)-\psi_+\bigr\|_{C([0,T])} \overset{P}\to 0 &\qquad& \text{as }\ve\to0\;\forall\ T>0. \end{alignat*} Theorem \ref{thm:comparisonThm} yields $X_\ve^+(t)\geq X_\ve(t)$. Therefore, any (subsequential) limit of $\{X_\ve^+\}_\ve$ is greater than or equal to a limit of $\{ X_\ve\}_\ve$, and if $\bar X_0$ is a limit point of $\{X_\ve\}_\ve$ then \[ \Pr\Bigl(\bar X_0(t) = \psi_+(t) \ \forall\ t\leq\tau^R_0 \text{ and } \bar{X}_0(t) \leq R \ \forall\ t>\tau^R_0\Bigr) =1. \] On the other hand, it can be seen that any limit point $\bar X_0$ of $\{X_\ve\}_\ve$ satisfies \[ \Pr\Bigl(\exists\ t\geq \tau^0_R : \bar X_0(t)<R\Bigr)=0. \] Thus we have equality, $\bar X_0(t)=\psi_+(t)$ for all $t\geq 0 $ almost surely. This concludes the proof for the case $a(x)>0$ for $x\in(-\delta_0,0)$. The case $a(x)\geq 0$ for $x\in(-\delta_0,0)$ will be considered in \S\ref{section:finalOfTheorem1.1}. \end{proof} \section{Velocity with a change in sign}\label{sec:repulsive} In this section we consider the repulsive case and prove Theorem \ref{thm:ZeroNoiseRepulsive}. We also provide several tools for computing the zero noise probability distribution. \subsection{Convergence in the repulsive case} \begin{lemma}\label{lem:osgoodrepulsive} Let $\alpha<0<\beta$, assume that $a\in L^\infty(\mbR)$ satisfies the ``repulsive Osgood condition'' \eqref{eq:osgoodrepulsive}, and define $p_\ve$ by \begin{equation}\label{eq:weightdef} p_\ve \coloneqq \frac{- s_\ve(\alpha)}{s_\ve(\beta)- s_\ve(\alpha)}, \qquad s_\ve(r) \coloneqq \int_0^r e^{-B(z)/\ve^2} \,dz, \qquad B(z)\coloneqq 2\int_0^z a(u)\,du. \end{equation} Then \[ \limsup_{\ve\to0}\Exp^0\big(\tau_{\ve}^\alpha\wedge \tau_{\ve}^\beta\big) \leq \int_\alpha^\beta \frac{1}{|a(x)|}\,dx < \infty. \] If $p_{\ve_k}\to p$ as $k\to\infty$, then \[ \Exp^0\big(\tau_{\ve_k}^\alpha\wedge \tau_{\ve_k}^\beta\big) \to {(1-p)}\int_\alpha^0 \frac{-1}{a(z)}\,dz + {p}\int_0^\beta \frac{1}{a(z)}\,dz \qquad \text{as }k\to\infty. \] \end{lemma} \begin{proof} {By \eqref{eq:Lharmonic}, \eqref{eq:463}, and \eqref{eq:194} with $f=1$} we can write {\begin{align*} &\Exp^0\big(\tau_{\ve}^\alpha\wedge \tau_{\ve}^\beta\big) = \int_\alpha^0 \frac{(s_\ve(y)-s_\ve(\alpha))(s_\ve(\beta)-s_\ve(0))}{s_\ve(\beta)-s_\ve(\alpha)}\frac{2e^{B(y)/\ve^2}}{\ve^2}\,dy \\ &\qquad +\int_0^\beta \frac{(s_\ve(0)-s_\ve(\alpha))(s_\ve(\beta)-s_\ve(y))}{s_\ve(\beta)-s_\ve(\alpha)}\frac{2e^{B(y)/\ve^2}}{\ve^2}\,dy \\ &\quad= {(1-p_\ve)} \int_\alpha^0 (s_\ve(y)-s_\ve(\alpha))\frac{2e^{B(y)/\ve^2}}{\ve^2}\,dy + {p_\ve}\int_0^\beta (s_\ve(\beta)-s_\ve(y))\frac{2e^{B(y)/\ve^2}}{\ve^2}\,dy \\ &\quad= {(1-p_\ve) \int_\alpha^0\int_\alpha^y \frac{2e^{(B(y)-B(z))/\ve^2}}{\ve^2}\,dz\,dy+ p_\ve \int_0^\beta\int_y^\beta\frac{2e^{(B(y)-B(z))/\ve^2}}{\ve^2}\,dz\,dy}\\ &\quad= (1-p_\ve) \int_\alpha^0\int_\alpha^y \frac{2\exp\Bigl({\textstyle -\int_z^y \frac{2a(u)}{\ve^2} du}\Bigr)}{\ve^2}\,dz\,dy \\ &\qquad +p_\ve \int_0^\beta\int_y^\beta\frac{2\exp\Bigl({\textstyle -\int_z^y \frac{2a(u)}{\ve^2} du}\Bigr)}{\ve^2}\,dz\,dy\\ &\quad= (1-p_\ve) \int_\alpha^0\int_\alpha^y \exp\Bigl({\textstyle-\int_z^y \frac{2a(u)}{\ve^2} du}\Bigr) \frac{2 a(z)}{\ve^2}\frac{1}{a(z)}\,dz\,dy \\ &\qquad +p_\ve \int_0^\beta\int_y^\beta \exp\Bigl({\textstyle-\int_z^y \frac{2a(u)}{\ve^2} du}\Bigr) \frac{2 a(z)}{\ve^2}\frac{1}{a(z)}\,dz\,dy. \end{align*}} Setting $f(z)=2\sign(z)a(z)$ and $g(z)=\frac{1}{a(z)}$ in Lemma \ref{lem:approxidentity}, we find that the above two integrals with $\ve=\ve_k$ converge to \[ \int_\alpha^0 \frac{-1}{a(z)}\,dz \qquad\text{and}\qquad \int_0^\beta\frac{1}{a(z)}\,dz \] respectively, as $k\to\infty$. This concludes the proof. \end{proof} We can now prove the main theorem in the repulsive case. \begin{proof}[Proof of Theorem \ref{thm:ZeroNoiseRepulsive}] Let $X_{\ve_k'}$ be any weakly convergent subsequence of $\{X_{\ve_k}\}_k$, and let $\tau_{\ve_k'}$ and $\tau$ be the hitting times of $X_{\ve_k'}$ and its limit, respectively. By Lemma \ref{lem:osgoodrepulsive} we have for any $\alpha<0<\beta$ \[ \Exp^0(\tau^\alpha\wedge\tau^\beta)\leq \liminf_{k\to\infty}\Exp^0\bigl(\tau^\alpha_{\ve_k}\wedge\tau^\beta_{\ve_k}\bigr) = {(1-p)A(\alpha)+ pA(\beta)}. \] Consequently, $\Pr^0\bigl(\tau^\alpha\wedge\tau^\beta=\infty\bigr)=0$, so $\Pr^0(\tau^\alpha<\tau^\beta)=\lim_{k\to\infty}\Pr^0(\tau^\alpha_{\ve_k'}<\tau^\beta_{\ve_k'})={1-p}$ and $\Pr^0(\tau^\alpha>\tau^\beta)={p}$. Using Theorem \ref{thm:ZeroNoisePositiveDrift111} and the strong Markov property, the probability of convergence once the process escapes $(\alpha,\beta)$ at $x=\beta$ is one: \[ \lim_{k\to\infty}\Pr^0\Bigl(\bigl\|X_{\ve_k'}(\cdot-\tau_\beta)-\psi_+(\cdot-A(\beta))\bigr\|_{C([0,T])}>\delta \bigm| \tau^\alpha>\tau^\beta \Bigr) = 1, \] for any sufficiently small $\delta>0$, and likewise for those paths escaping at $x=\alpha$. Passing $\alpha,\beta\to0$ yields \begin{align*} &\lim_{\delta\to0}\lim_{k\to\infty}\Pr^0\Bigl(\|X_{\ve_k'}-\psi_-\|_{C([0,T])}>\delta\Bigr) = {1-p}, \\ &\lim_{\delta\to0}\lim_{k\to\infty}\Pr^0\Bigl(\|X_{\ve_k'}-\psi_+\|_{C([0,T])}>\delta\Bigr) = {p}. \end{align*} Since this is true for any weakly convergent subsequence $\ve_k'$, and the limit is unique, the entire sequence $\ve_k$ must converge. \end{proof} \subsection{Probabilities in the repulsive case} {Theorem \ref{thm:ZeroNoiseRepulsive} gives a concrete condition for convergence of the sequence of perturbed solutions, as well as a characterization of the limit distribution. In this section we give an explicit expression for the probabilities in the limit distribution, and an equivalent condition for convergence.} Consider the integral \[ B(x)\coloneqq \int_0^x a(y)\,dy \] and denote $B_\pm = B\bigr|_{\mbR_\pm}$. {Select any $\alpha>0, \beta>0$ such that the function $\mu\from[0,\beta)\to(\alpha,0]$ defined by $\mu=B_-^{-1}\circ B_+$ is well-defined --- that is, \[ B_+(x) = B_-(\mu(x)), \quad \forall\ x\in [0,\beta). \] Clearly, $B_\pm$ are Lipschitz continuous. Since $a$ is strictly positive (negative) for $x>0$ ($x<0$), the inverses of $B_\pm$ are absolutely continuous (see e.g.~\cite[Exercise 5.8.52]{Bogachev2007}), so $\mu$ is also absolutely continuous. We now rewrite the probability of choosing the left/right extremal solutions $X^\pm$ in terms of $\mu$.} \begin{theorem}\label{thm:limitprobs} Let $a\in L^\infty(\mbR)$ satisfy \eqref{eq:osgoodrepulsive} and let $\mu\from[0,\beta)\to(\alpha,0]$ be as above. Then $\{p_\ve\}_\ve$ converges if either the derivative $\mu'(0)$ exists, or if $\mu'(0)=-\infty$. In either case, we have \begin{subequations}\label{eq:limit_prob} \begin{equation}\label{eq:limit_prob1} \lim_{\ve\to0}p_\ve = {\frac{-\mu'(0)}{1-\mu'(0)}}. \end{equation} Moreover, the derivative $\mu'(0)$ exists if and only if the limit $\lim_{u\downarrow0}\frac{B_-^{-1}(u)}{B_+^{-1}(u)}$ exists, and we have the equality: \begin{equation} \label{eq:limit_prob2} \mu'(0)=\lim_{u\downarrow0}\frac{B_-^{-1}(u)}{B_+^{-1}(u)}. \end{equation} \end{subequations} \end{theorem} To prove the theorem we will need the following lemmas: \begin{lemma}\label{lem:limits} Let $\alpha<0<\beta$. Define $p_\ve$ as in \eqref{eq:weightdef} and $p_\ve'$ similarly, where $\alpha,\beta$ are exchanged with any $\alpha'<0<\beta'.$ Then $\lim_{\ve\to0}p_\ve'/p_\ve = 1$. In particular, $p_{\ve_k}$ converges to some $p$ as $k\to\infty$ if and only if $p_{\ve_k}'$ converges to $p$. \end{lemma} The proof follows from the following observation: Since $B$ is strictly increasing, then for any positive $r_1< r_2$ or negative $r_1>r_2$, \[ \lim_{\ve\to0}\frac{\int_{r_1}^{r_2} e^{-B(z)/\ve^2} \,dz}{\int_0^{r_1} e^{-B(z)/\ve^2} \,dz}=0. \] {Next, we prove a technical lemma:} \begin{lemma}\label{lem:approxidentity2} Let $0<a \in L^\infty([0,\beta])$ and $f\in L^1(\mbR)$, and for $\ve>0$ and $x\in[0,\beta)$ define \begin{gather*} B(x) = 2\int_0^x a(y)\,dy, \qquad \nu_\ve(x) = e^{-B(x)/\ve^2}\ind_{[0,\beta]}(x), \\ \bar{\nu}_\ve = \int_0^\beta \nu_\ve(y)\,dy, \qquad f_\ve(x) = \frac{1}{\bar{\nu}_\ve}\int_0^\beta f(x+y)\nu_\ve(y)\,dy. \end{gather*} Then $f_\ve(x) \to f(x)$ as $\ve\to0$ if and only if $x$ is a Lebesgue point of $f$. \end{lemma} \begin{proof} {Let $x\in[0,\beta)$. For $s\in(0,\beta-x)$, let \[ F(s) = \int_0^s |f(x+y)-f(x)|\,dy, \qquad C_s = \sup_{y\in(0,s)}\tfrac{F(y)}{y}. \] Then $C_s \to 0$ as $s\to0$ if and only if $x$ is a Lebesgue point.} We estimate \begin{align*} |f_\ve(x)-f(x)| &= \frac{1}{\bar\nu_\ve}\biggl|\int_0^\beta (f(x+y)-f(x))\nu_\ve(y)\,dy\biggr| \\ &\leq \underbrace{\frac{1}{\bar{\nu}_\ve}\int_0^s |f(x+y)-f(x)|\nu_\ve(y)\,dy}_{=\,I_1} + \underbrace{\frac{1}{\bar{\nu}_\ve}\int_s^\beta |f(x+y)-f(x)|\nu_\ve(y)\,dy}_{=\,I_2}. \end{align*} For the first term we integrate by parts several times to get \begin{align*} I_1 &= F(s)\frac{\nu_\ve(s)}{\bar\nu_\ve} - \frac{1}{\bar\nu_\ve}\int_0^s F(y)\nu_\ve'(y)\,dy \leq F(s)\frac{\nu_\ve(s)}{\bar\nu_\ve} - \frac{C_s}{\bar\nu_\ve}\int_0^s y\nu_\ve'(y)\,dy \\ &= F(s)\frac{\nu_\ve(s)}{\bar\nu_\ve} - \frac{C_s}{\bar\nu_\ve} s\nu_\ve(s) + \frac{C_s}{\bar\nu_\ve}\int_0^s \nu_\ve(y)\,dy \\ &\leq F(s)\frac{\nu_\ve(s)}{\bar\nu_\ve} + \frac{C_s}{\bar\nu_\ve}\int_0^\beta \nu_\ve(y)\,dy\\ &= F(s)\frac{\nu_\ve(s)}{\bar\nu_\ve} + C_s. \end{align*} For the second term we estimate \begin{align*} I_2 &\leq 2\|f\|_{L^1} \frac{\nu_\ve(s)}{\bar\nu_\ve}. \end{align*} If we can find $s=s_\ve$ such that both $s_\ve\to0$ and $\frac{\nu_\ve(s_\ve)}{\bar\nu_\ve} \to 0$ as $\ve\to0$, then both $I_1$ and $I_2$ vanish in the $\ve\to0$ limit, and we can conclude the result. Below we explain the existence of such a choice. Since $B$ is increasing and Lipschitz continuous, with $B(0)=0$ and $\|B\|_\Lip \leq 2\|a\|_{L^\infty}<\infty$, there is some $\kappa<s$ satisfying $B(\kappa)=\tfrac12 B(s)$, and $\kappa\geq \frac{1}{2\|B\|_\Lip}B(s)$. Moreover, since $\nu_\ve$ is decreasing we have \[ \bar\nu_\ve = \int_0^\beta \nu_\ve(y)\,dy \geq \kappa\nu_\ve(\kappa) = \kappa e^{-B(\kappa)/\ve^2} = \kappa e^{-B(s)/(2\ve^2)}, \] so \[ \frac{\nu_\ve(s)}{\bar\nu_\ve} \leq \frac{1}{\kappa}e^{-B(s)/(2\ve^2)} \leq 2\|B\|_\Lip \frac{e^{-B(s)/(2\ve^2)}}{B(s)}. \] Now choose $s=s_\ve$ such that $B(s_\ve) = \ve$. (Such a number exists for sufficiently small $\ve>0$.) Then $s_\ve\to0$ as $\ve\to0$, and \[ \frac{\nu_\ve(s)}{\bar\nu_\ve} \leq 2\|B\|_\Lip \frac{e^{-1/(2\ve)}}{\ve} \to 0 \] as $\ve\to0$. This finishes the proof. \end{proof} \begin{proof}[Proof of Theorem \ref{thm:limitprobs}] We have \[ p_\ve = \frac{{-s_\ve(\alpha)}}{s_\ve(\beta)-s_\ve(\alpha)} = {\frac{-\frac{s_\ve(\alpha)}{s_\ve(\beta)}}{1-\frac{s_\ve(\alpha)}{s_\ve(\beta)}}}. \] By Lemma~\ref{lem:limits} we may assume $\mu(\beta)=\alpha$, so \begin{align*} s_\ve(\alpha) &= \int_0^\alpha e^{-B(\mu^{-1}(x))/\ve^2}\,dx = \int_0^{\beta}e^{-B(x)/\ve^2}\mu'(x)\,dx. \end{align*} Thus, \[ \frac{s_\ve(\alpha)}{s_\ve(\beta)} = \frac{1}{\bar\nu_\ve}\int_0^\beta \nu_\ve(x)\mu'(x)\,dx \] where \[ \nu_\ve(x) = e^{-B(x)/\ve^2}, \qquad \bar\nu_\ve = \int_0^\beta e^{-B(z)/\ve^2}\,dz. \] From Lemma \ref{lem:approxidentity2} with $f(x)\coloneqq \mu'(x)$ it now follows that $p_\ve$ converges if either $0$ is a Lebesgue point for $\mu'$, or $\lim_{x\to0}\mu'(x)={-\infty}$. In the former case, we notice that $0$ is a Lebesgue point for $\mu'$ if the following limit exists: \[ {\lim_{h\downarrow 0}}\frac{\int_0^h \mu'(z) \,dz}{h}= {\lim_{h\downarrow 0}}\frac{ \mu(h) -\mu(0)}{h}. \] The right hand side of the last equation is the usual definition of the derivative. To prove \eqref{eq:limit_prob2} notice that \[ \lim_{h\downarrow 0}\frac{ \mu(h) -\mu(0)}{h}= \lim_{h\downarrow 0}\frac{ \mu(h)}{h}= \lim_{h\downarrow 0}\frac{B_-^{-1}\circ B_+(h)}{h} =\lim_{u\downarrow 0}\frac{B_-^{-1}(u)}{B_+^{-1}(u)}. \] \end{proof} lRegMax{b}{x} \) in integrals ad libitum. The Proof works the same as in \cite[p.286]{BaficoBaldi1982} then. \subsection{Repulsive, regularly varying drifts} Although Theorem \ref{thm:ZeroNoiseRepulsive} provides an explicit expression \eqref{eq:limit_prob} of the limit probabilities, the limit \eqref{eq:limit_prob2} might be difficult to evaluate in practice. It is clearly easier to study existence of the limits \begin{equation}\label{eq:equiv_a_B} \lim_{x\downarrow0} \frac{a(-x)}{a(x)} \end{equation} or \begin{equation}\label{eq:equiv_a_B1} \lim_{x\downarrow0} \frac{B(-x)}{B(x)} \end{equation} than that for the inverse functions in \eqref{eq:limit_prob}. We will show that the limit in \eqref{eq:limit_prob} can easily be calculated using \eqref{eq:equiv_a_B} or \eqref{eq:equiv_a_B1} if $a$ or $B$ are regularly varying at $0$. Recall that a positive, measurable function $f\colon [0,\infty)\to(0,\infty)$ is \emph{regularly varying} of index $\gamma$ at $+\infty$ if $\lim_{x\to\infty}\frac{f(\lambda x)}{f(x)}=\lambda^\rho$ for all $\lambda>0$. It is regularly varying of index $\rho$ at $0$ if the function $x\mapsto f(1/x)$ is a regularly varying function of index $-\rho$ at $+\infty$. The set of regularly varying functions of index $\rho$ (at $+\infty$) is denoted by $R_\rho.$ It is well known that if $f\in R_\rho$, then $f(x)=x^\rho \ell(x)$ for some slowly varying function $\ell$, i.e.~some $\ell\from[0,\infty)\to(0,\infty)$ for which $\lim_{x\to\infty}\frac{\ell(\lambda x)}{\ell(x)}=1$ for all $\lambda>0$. We first consider the case when $B$ is regularly varying, and then the case when $a$ is. Note that the latter implies the former, but not {vice versa}. \begin{proposition}\label{prop:B_regvar} Assume that the functions $x\mapsto B_\pm(\pm x)$ are regularly varying of index $\rho>0$ at 0, and that the limit $c\coloneqq \lim_{x\downarrow0} B_-(-x)/B_+(x)$ exists (or equals $\infty$). Then $\{p_\ve\}_{\ve>0}$ converges, and \begin{equation}\label{key} p \coloneqq \lim_{\ve\to0} p_{\ve} = \frac{{c^{-1/\rho}}}{1+c^{-1/\rho}}. \end{equation} If the functions $x\mapsto B_\pm(\pm x)$ are regularly varying of different indices $\rho_\pm,$ then \[ p \coloneqq \lim_{\ve\to0} p_{\ve} = \begin{cases} 1 & \rho_+<\rho_-\\ 0 & \rho_+>\rho_-. \end{cases} \] \end{proposition} \begin{proof} It follows from \cite[Exercise~14, p.~190]{BTG} that if $f_1, f_2\colon(0,\infty)\to(0,\infty)$ are non-decreasing, regularly varying functions at $0$ of index $\rho>0$, then \[ \lim_{x\to0}\frac{f_1(x)}{f_2(x)}=1 \qquad \text{if and only if} \qquad \lim_{x\to0}\frac{f_1^{-1}(x)}{f_2^{-1}(x)}=1, \] where $f_1^{-1}, f_2^{-1}$ are inverse functions. Write now $f_1(x)=B_-(-x)$, $f_2(x)=c B_+(x)$. Then \begin{equation}\label{eq:ratiolimit} \lim_{x\to 0}\frac{f_1(x)}{f_2(x)}=1. \end{equation} The inverse function for $x\mapsto c B_+(x)$ is $x\mapsto B_+^{-1}(x/c),$ and $B_+^{-1}$ is regularly varying of index $1/\rho$ (see \cite[Theorem 1.5.12]{BTG}), so \[ B_+^{-1}(x/c)= (x/c)^{1/\rho}\ell_1(x/c)\sim (x/c)^{1/\rho}\ell_1(x)= c^{-1/\rho} B_+^{-1}(x)\qquad \text{as } x\to 0 \] (where equivalence is meant in the sense of slowly varying functions). Hence, \eqref{eq:ratiolimit} yields \begin{equation}\label{eq:ratiolimitinv} \lim_{x\to0}\frac{B_-^{-1}(x)}{B_+^{-1}(x)}=c^{-1/\rho}. \end{equation} The same computation can be easily performed in reverse, so \eqref{eq:ratiolimit} and \eqref{eq:ratiolimitinv} are equivalent, and the result now follows from Theorem \ref{thm:limitprobs} if $B_\pm$ are of the same index. If $x\mapsto B_\pm(\pm x)$ are regularly varying of different indices $ {\rho_\pm},$ then the inverse functions are regularly varying functions of indices $\frac{1}{\rho_\pm},$ and the result is obvious. \end{proof} \begin{proposition}\label{prop:a_regvar} Assume that both $x\mapsto a(\pm x)$ (for $x\geq0$) are regularly varying at $0$ with index $\rho>0$, and that the limit $c\coloneqq \lim_{x\downarrow0} \frac{-a(-x)}{a(x)}$ exists. Then $\{p_\ve\}_{\ve>0}$ converges, and \[ p\coloneqq \lim_{\ve\to0}p_\ve = \frac{{c^{-1/(1+\rho)}}}{1+c^{-1/(1+\rho)}}. \] If the functions $x\mapsto a(\pm x)$ are regularly varying of different indices $\rho_\pm,$ then \[ p \coloneqq \lim_{\ve\to0} p_{\ve} = \begin{cases} 1 & \text{if } \rho_+<\rho_-\\ 0 & \text{if } \rho_+>\rho_-. \end{cases} \] \end{proposition} \begin{proof} It follows from the Karamata theorem, see \cite[Theorem 1.6.1]{BTG}, that for $x>0$, \begin{align*} B(x)&= 2\int_0^x a(y) \, dy= 2\int_{1/x}^\infty a(1/z)z^{-2} \, dz = 2\int_{1/x}^\infty \ell(z)z^{-2-\rho} \, dz& \\ &\sim \frac{2 \ell(1/x) x^{1+\rho}}{1+\rho} \sim \frac{x a(x)}{1+\rho} \end{align*} as $x\to0$, and likewise for $x<0$. Thus, $x\mapsto B(\pm x)$ are regularly varying of index $1+\rho$. Letting \[ c\coloneqq\lim_{x\downarrow0}\frac{-a(-x)}{a(x)}, \] we can now apply Proposition \ref{prop:B_regvar} with $1+\rho$ in place of $\rho$ and get the desired result. The case when $a(\pm x)$ are regularly varying with different indices can be considered similarly, cf. Proposition \ref{prop:B_regvar}. \end{proof} Finally, we provide a result which simplifies the computation of the limit distribution for severely oscillating drifts. \begin{proposition}\label{prop:oscillatingdrift} Let $a\from\mbR\to\mbR$ satisfy $xa(x)\geq0$ for all $x\in\mbR$, and assume that it is of the form \[ a(x) = b(x) + |x|^\gamma g(\tfrac1x), \] where $\gamma>0$, $b$ is regularly varying at $0$ of order $\rho<\gamma+1$, and $g\in L^\infty(\mbR)$ is such that its antiderivative $G(x)=\int_0^x g(y)\,dy$ also lies in $L^\infty(\mbR)$. Assume also that the limit $c\coloneqq \lim_{x\downarrow0} \frac{-b(-x)}{b(x)}$ exists. Then $\{p_\ve\}_{\ve>0}$ converges, and \[ p\coloneqq \lim_{\ve\to0}p_\ve = \frac{{c^{1/(1+\rho)}}}{1+c^{1/(1+\rho)}}. \] \end{proposition} \begin{proof} We claim first that \[ \int_0^x y^\gamma g(\tfrac1y)\,dy = O(x^{1+\gamma}) = o(x^\rho) \qquad \text{as } x\downarrow0. \] Indeed, \begin{align*} \biggl|\int_0^x y^\gamma g(\tfrac1y)\, dy\biggr| &= \biggl|\int_{1/x}^\infty z^{-2-\gamma} g(z)\, dz\biggr| \\ &= \biggl|-x^{2+\gamma}G(\tfrac1x) - (\rho+2)\int_{1/x}^\infty z^{-3-\gamma}G(z)\,dz\biggr| \\ &\leq x^{2+\gamma}\|G\|_{L^\infty} + (\rho+2)\|G\|_{L^\infty}\int_{1/x}^\infty z^{-3-\gamma}\,dz \\ &= \Bigl(1 + \frac{\rho+2}{\gamma+2}\Bigr)x^{2+\gamma}\|G\|_{L^\infty}. \end{align*} It follows that the antiderivative $B(x)=\int_0^x a(y)\,dy$ equals a regularly varying function of order $1+\rho$, plus a term of order $o(x^{1+\rho})$. Following the same procedure as in the proof of Proposition \ref{prop:a_regvar} yields the desired result. \end{proof} \section{Proof of Theorem \ref{thm:ZeroNoisePositiveDrift111}}\label{section:finalOfTheorem1.1} We have already proven the Theorem in Section \ref{sec:positive_drift} if $a>0$ a.e.~in a small neighborhood of 0. We will only prove the result for $a$ such that $a\geq 0$ for negative $x$ and $\int_0^{R}\frac{dy}{a(y)\vee 0}<\infty$ for all $R>0$. The general case, i.e., $a(x)\geq 0$ for a.e.~$x\in(-\delta_0,0)$ and $\int_0^{\delta_0}\frac{dy}{a(y)}<0$, is considered similarly to the reasoning in Section \ref{sec:positive_drift}. {It follows from the comparison theorem that for any $x>0$ we have the inequality $X_\ve(t)\leq X_\ve^x(t)$ for $t\geq 0$ with probability 1, where $X_\ve^x$ is a solution of \eqref{eq:ode_pert} that started from $x$, $X_\ve^x(0)=x.$ Since $a$ is a.e.~positive on $(0,x)$, we have already seen that $\{X_\ve^x(t)\}_{\ve}$ converges to $\psi_+(\psi_+^{-1}(x)+t)$ as $t\to\infty.$ Thus, any limit point of $\{X_\ve(t)\}_\ve$ must be less than or equal to $\psi_+(\psi_+^{-1}(x)+t)$ for any $x>0$, almost surely. Therefore, any limit point of $\{X_\ve(t)\}_\ve$ does not exceed $\psi_+(t).$} Define the function \[ a_n(x):=\begin{cases} a(x) & \text{if } x\geq 0 \\ -\tfrac1n a(-\tfrac{x}{n}) & \text{if } x<0, \end{cases} \] and denote the corresponding solutions to stochastic differential equations by $X_{n,\ve}(t).$ Let us apply Theorem \ref{thm:limitprobs} to the sequence $\{X_{n,\ve}\}_\ve.$ Calculate the limit \eqref{eq:limit_prob2}: \[ B_{n,+}(x)=\int_0^x a(y) dy,\qquad B_{n,-}(x)=\int_0^x a(y/n) dy/n=B_+(x/n). \] Thus, \[ (B_{n,-})^{-1}(u)=n (B_{n,+})^{-1}(u), \qquad \lim_{u\downarrow0}\frac{(B_{n,-})^{-1}(u)}{(B_{n,+})^{-1}(u)}=n, \] and we get convergence \[ P_{X_{n,\ve}}\Rightarrow \frac{1}{n+1}\delta_{-n\psi_+(n^{-2}t)}+\frac{n}{n+1}\delta_{\psi_+(t)} \qquad \text{as }\ve\to0. \] By the comparison theorem we have the inequality $X_{n,\ve}(t)\leq X_\ve(t), t\geq 0$ with probability 1. Therefore, any limit point of $\{X_\ve\}_\ve$ equals $\psi_+$ with probability at least $\frac{n}{n+1}.$ We conclude that the limit of $\{X_\ve\}_\ve$ exists and equals $\psi_+$ almost surely. The limit is non-random, so we have convergence in probability, as in \eqref{eq:C2}. This finishes the proof of Theorem \ref{thm:ZeroNoisePositiveDrift111}. \section{Examples}\label{sec:examples} \begin{example}\label{ex:1} For some fixed $\rho\in (0,1)$ we consider the function \[ a(x)\coloneqq \sign(x)|x|^\rho \bigl(1+ \tfrac{1}{2}\phi\bigl(\tfrac{1}{x}\bigr)\bigr) \qquad \text{where } \phi(y)\coloneqq\sum_{n\in\mbZ}\ind_{[2n-1,2n)} - \ind_{[2n,2n+1)}, \] defined for all $x\neq0$. Using Proposition \ref{prop:oscillatingdrift} with $b(x)=\sign(x)|x|^\rho$, $\gamma=\rho$ and $g(y)=\tfrac12\sign(y)\phi(y)$, we get $c=1$, and that $p_\ve \to \frac{1}{2}$. We also see that $a$ satisfies the repulsive condition \eqref{eq:osgoodrepulsive} of Theorem \ref{thm:ZeroNoiseRepulsive}, so we conclude that \[ P_\ve \Rightarrow \tfrac12\delta_{\psi_-} + \tfrac12\delta_{\psi_+} \qquad\text{as } \ve\to0 \] where $\psi_\pm$ are the maximal classical solutions. Figure \ref{fig:Example51} shows an ensemble of approximate solutions for the above drift. We used noise sizes \(\varepsilon = \frac{3^{-i}}{e} ,\ i=-2, \dots, -9\), and computed 150 samples of the solution with the Euler--Maruyama scheme with a step size $\Delta t=2.5\times 10^{-3}$ up to time $t=0.5$. The left-hand figure shows all sample paths (vertical axis) as a function of time (horizontal axis), where bigger \(\varepsilon\) were given a lighter shades of grey. The sample paths with the smallest \( \varepsilon \) are depicted in red. The right-hand figure shows the cumulative distribution function of the samples at the final time \( t = 0.5 \) using the smallest value for \( \varepsilon \). We can clearly see that the solution is concentrated on the extreme sample paths $\psi_-,\psi_+$, each with probability $\tfrac12$. \begin{figure} \includegraphics[width=0.49\linewidth]{./Example_5_1_plot.png} \includegraphics[width=0.49\linewidth]{./Example_5_1_histogram.png} \caption{Sample paths (left) and cumulative distribution function (right) for Example \ref{ex:1}.}\label{fig:Example51} \end{figure} \end{example} \begin{example}\label{ex:2} Let $a(x)=x^\beta$, $x>0,$ where $\beta\in (0,1).$ We claim that we can continuously extend $a$ to the set $(-\infty,0]$ such that \begin{enumerate}[label=(\alph*)] \item $-a(-x)\leq a(x)<0$ for all $x<0$; \item $\int_{-1}^0 \frac{1}{a(x)}dx=-\infty,$ i.e., the Osgood condition is not satisfied to the left of zero; \item $P_{X_\ve}\Rightarrow \tfrac12 \delta_{\psi_+}+\tfrac12 \delta_{0}$ as $\ve\to0,$ i.e., the limit process with probability $\tfrac12$ moves like the maximal positive solution $\psi_+(t)=((1-\beta)t)^{\frac{1}{1-\beta}}, t\geq 0$ and stays at 0 forever with probability $\tfrac12$ too. \end{enumerate} This example is not covered by the theory in the previous sections, and should therefore be read as a demonstration of the complex behaviours that can occur in the zero noise limit. Note also that the zero-noise limit is \textit{not} only concentrated on the maximal solution $\psi_+$, but also on the trivial solution $\psi_-\equiv0$. Before we construct the extension, let us provide some simple preliminary analysis. If a function $a\from\mbR\to \mbR$ satisfies the linear growth condition, then the family $\{X_\ve\}_\ve$ is weakly relatively compact. If additionally the function $a$ is continuous, then any limit point of $\{X_\ve\}_\ve$ satisfies \eqref{eq:ode}. Both conditions (a) and (b) yield that any solution to \eqref{eq:ode}, and hence any limit point of $\{X_\ve\}_\ve$ has a form \begin{equation}\label{eq:limit_sol} X_0(t)= \begin{cases} 0, & t\leq \tau\\ ((1-\beta)(t-\tau))^{\frac{1}{1-\beta}},& t> \tau, \end{cases} \end{equation} where $\tau\in[0,\infty].$ Our aim is to find an extension of $a$ such that \begin{equation}\label{eq:tau_probab} \Pr(\tau=0)= \Pr(\tau=\infty)=\tfrac12 \end{equation} for any limit point $X_0$ having representation \eqref{eq:limit_sol}. Let $A=\cup_{k\geq 1}[-\frac{1}{2^k}, -\frac{1}{2^k}+\frac{1}{4^k}].$ Set \begin{align*} \tilde a(x) &\coloneqq \sign(x) a(|x|)\ind_{x\notin A}= \begin{cases} x^\beta, & x> 0\\ -|x|^\beta,& x\leq 0,\ x\notin A \\ 0,& x\leq 0,\ x\in A, \end{cases} \\ \bar a(x) &\coloneqq \sign(x) a(|x|)= \sign(x) |x|^\beta= \begin{cases} x^\beta, & x> 0\\ -|x|^\beta ,& x\leq 0. \end{cases} \end{align*} Define $a$ on $(-\infty,0)$ to be any negative, continuous function such that $\int_{-\delta}^0 \frac{1}{a(x)}dx=-\infty$ for any $\delta>0$, and \[ \bar a(x)\leq a(x) \leq \tilde a(x) \text{ for all } x\in(-\infty,0). \] It is clear that there exists a function $a$ satisfying these properties. Introduce the transformed process \[ Y_\ve(t)\coloneqq \ve^{\frac{-2}{1+\beta}} X_\ve\bigl(\ve^{\frac{2(1-\beta)}{1+\beta}}t\bigr). \] It can be seen (see \cite{PilipenkoProske2018} for a more general case) that \begin{equation}\label{eq:1525} d Y_\ve(t)= a_\ve( Y_\ve(t))dt + d w_\ve(t), \end{equation} where $w_\ve(t)= \ve^{\frac{-(1-\beta)}{1+\beta}} w\bigl(\ve^{\frac{2(1-\beta)}{1+\beta}}t\bigr)$ is a Wiener process, and \begin{equation} \label{eq:1526} a_\ve(y)= \ve^{\frac{-2\beta}{1+\beta}}a\bigl(\ve^{\frac{ 2}{1+\beta}}y\bigr). \end{equation} Notice that $a_\ve(y)=a(y)$ for all $y\in (0,\infty)$ and for all $y<0$ such that $\ve^{\frac{ 2}{1+\beta}}y\notin A$. For all other $y<0$ we have the inequality $-|y|^\beta\leq a(y)<0$, by the choice of the function $a$. We have convergence $ a_\ve(y)\to \bar a(y)=\sign(y) |y|^\beta$ in Lebesgue measure on any interval $y\in[-R,R]$. Observe also that \[ \int_0^x a_\ve(y) dy \geq \int_0^x \hat a(y) dy \qquad \forall \ve>0,\ \forall x<0 \] where \[ \hat a(x)= \begin{cases} 0,\ & x\in [-\tfrac32\cdot 2^n, -2^n ] \text{ for some } n\in\mbZ \\ -|x|^\beta & \text{otherwise.} \end{cases} \] In particular, the last estimate yields \[ \sup_{\ve\in(0,1]}\lim_{R\to+\infty} \int_{-\infty}^{-R} \exp\biggl(-2\int_0^x a_\ve(y)\,dy\biggr) dx =0. \] Set \[ \sigma^X_{\ve}(p)\coloneqq\inf\{t\geq 0 : X_\ve(t)=p\}, \] \[ \sigma^Y_{\ve}(p)\coloneqq\inf\{t\geq 0 : Y_\ve(t)=p\} . \] The observations above and formulas of Theorem \ref{thm:exit_time} yield that for any $R>0$, and for any sequences $\{R^\pm_\ve\}$ such that $\lim_{\ve\to0}R^\pm_\ve=\pm\infty$ we have \begin{align*} &\nqquad\lim_{\ve\to0}\Pr\Bigl(\sigma^Y_{\ve}(R) < \sigma^Y_{\ve}(-R) \mid Y_\ve(0)=0\Bigr) = \lim_{\ve\to0}\Pr\Big( \sigma^Y_{\ve}(-R)< \sigma^Y_{\ve}(R) \mid Y_\ve(0)=0\Big) \\ ={}&\lim_{\ve\to0}\Pr\Big( \sigma^Y_{\ve}(R^+_\ve)< \sigma^Y_{\ve}(R^-_\ve) \mid Y_\ve(0)=0\Big) = \lim_{\ve\to0}\Pr\Big( \sigma^Y_{\ve}(R^-_\ve)< \sigma^Y_{\ve}(R^+_\ve) \mid Y_\ve(0)=0\Big)\\ ={}&\tfrac12. \end{align*} Hence, for any $\delta^\pm>0$ we have \begin{equation}\begin{aligned}\label{eq:1566} &\nqquad\lim_{\ve\to0}\Pr\Big( \sigma^X_{\ve}\big(R \ve^{\frac{2}{1+\beta}}\big) < \sigma^X_{\ve}\bigl(-R \ve^{\frac{2}{1+\beta}}\big) \mid X_\ve(0)=0\Big)\\ ={}& \lim_{\ve\to0}\Pr\Big( \sigma^X_{\ve}\bigl(-R \ve^{\frac{2}{1+\beta}}\big)< \sigma^X_{\ve}\big(R \ve^{\frac{2}{1+\beta}}\big) \mid X_\ve(0)=0\Big) \\ ={}&\lim_{\ve\to0}\Pr\Big(\sigma^X_{\ve}(\delta^+)< \sigma^X_{\ve}(\delta^-) \mid X_\ve(0)=0\Big) = \lim_{\ve\to0}\Pr\Big(\sigma^X_{\ve}(\delta^-)< \sigma^X_{\ve}(\delta^+) \mid X_\ve(0)=0\Big)\\ ={}&\tfrac12. \end{aligned} \end{equation} Hence, if $X_0$ is a limit point of $\{X_\ve\}$ having representation \eqref{eq:limit_sol}, then $\Pr(\tau=\infty)\geq \tfrac12.$ It also follows from Theorem \ref{thm:exit_time} that for any $R>0$ \[ \sup_{\ve>0}\Exp \Big(\sigma^Y_{\ve}(R)\wedge \sigma^Y_{\ve}(-R) \mid Y_\ve(0)=0\Big)<\infty. \] Thus, \begin{equation}\label{eq:1575} \sigma^X_{\ve}\big(R \ve^{\frac{2}{1+\beta}}\big)\wedge \sigma^X_{\ve}\bigl(-R \ve^{\frac{2}{1+\beta}}\big) \overset{\Pr}\to 0 \qquad \text{as } \ve\to0 \end{equation} if $X_{\ve}(0)=0.$ Let $\bar X_\ve $ be a solution to \[ d \bar X_\ve(t) =\bar a\big(\bar X_\ve(t)\big)dt +\ve d w(t) \] and define \[ \bar Y_\ve(t)\coloneqq \ve^{\frac{-2}{1+\beta}}\bar X_\ve\bigl(\ve^{\frac{2(1-\beta)}{1+\beta}}t\bigr). \] Then (cf.~\eqref{eq:1525}, \eqref{eq:1526}) \[ d \bar Y_\ve(t)= \bar a(\bar Y_\ve(t))dt + d w_\ve(t). \] In particular, if $\bar X_\ve(0)= R \ve^{\frac{2}{1+\beta}}$ for all $\ve>0,$ where $R$ is a constant, then all processes $\bar Y_\ve$ have the same distribution independent of $\ve.$ Notice that for any $R>0$, \begin{equation}\label{eq:1597} \Pr\Bigl(X_\ve(t) = \bar X_\ve(t), \ t\in [0, \sigma^X_{\ve}(0) ] \mid X_\ve(0)= \bar X_\ve(0)= R \ve^{\frac{2}{1+\beta}}\Bigr) =1. \end{equation} and \begin{equation}\begin{aligned}\label{eq:1600} p_R &\coloneqq \Pr\Bigl(\sigma^X_{\ve}(0)=\infty \mid X_\ve(0)= R \ve^{\frac{2}{1+\beta}}\Bigr) \\ &= \Pr\Bigl(X_\ve(t)>0, t\geq 0 \mid X_\ve(0)= R \ve^{\frac{2}{1+\beta}}\Bigr) \\ &=\Pr\Bigl(\bar X_\ve(t)>0, t\geq 0 \mid \bar X_\ve(0)= R \ve^{\frac{2}{1+\beta}}\Bigr) \\ &= \Pr\Bigl(\bar Y_\ve(t)>0 \mid \bar Y_\ve(0)= R\Bigr)\to 1 \qquad \text{as } R\to\infty. \end{aligned}\end{equation} It follows from \cite{PilipenkoProske2018} that if $\bar X_\ve(0)= R \ve^{\frac{2}{1+\beta}}, \ve>0$, then \begin{equation} \label{eq:1611} \bar X_\ve \Rightarrow p_R\delta_{\psi_+}+(1-p_R)\delta_{\psi_-} \qquad \text{as } \ve\to0. \end{equation} Hence, \eqref{eq:1566}, \eqref{eq:1575}, \eqref{eq:1597}, \eqref{eq:1600}, and \eqref{eq:1611} yield that for any limit point $X_0$ of $\{X_\ve\}$ we have $\Pr(\tau=0)\geq \tfrac12.$ This concludes the proof of the convergence $P_{X_\ve}\Rightarrow \tfrac12 \delta_{\psi_+}+\tfrac12 \delta_{0}$ as $\ve\to0.$ Figure \ref{fig:Example52} shows the same type of simulation as in Example \ref{ex:1}. From the figure it is clear that for small $\ve$, the samples split in two groups of equal size, one moving along $\psi_+$ and the other remaining around the origin. As the noise decreases, the left-going samples concentrate around the trivial solution $X\equiv0$. \begin{figure} \includegraphics[width=0.49\linewidth]{./Example_5_2_plot.png} \includegraphics[width=0.49\linewidth]{./Example_5_2_histogram.png} \caption{Sample paths (left) and cumulative distribution function (right) for Example \ref{ex:2}.}\label{fig:Example52} \end{figure} \end{example} \appendix \section{Appendix}\label{app:comparisonprinciple} \begin{proof}[Proof of Theorem \ref{thm:comparisonThm}] For a sequence of numbers $0<\ve_n\to0$, let $a_{i,n} = a_i*\omega_{\ve_n}$, where $\omega_\ve(z) = \ve^{-1}\omega\big(z\ve^{-1}\big)$ and $\omega\in C_c^\infty(\mbR)$ is a nonnegative mollifier. Let $X_{i,n}$ be the unique solution of \begin{equation} dX_{i,n} = \drift_{i,n}( X_{i,n}) dt + dW, \qquad i = 1,2,\ n\in\mbN. \label{eq:comparison} \end{equation} For the smoothened drift functions it still holds \( \drift_{1,n} \leq \drift_{2,n}\). Therefore, it follows from the classic comparison theorem that $X_{1,n} \leq X_{2,n}$ (see e.g.~the comparison theorem in \cite{IkedaWatanabe1981}). The application of Theorem \ref{thm:convergenceSDE_Thm} completes the proof in the case when $a_1, a_2\in L^\infty(\mbR)$. If $a_1, a_2$ are only locally bounded, then we approximate $X_1,X_2$ by solutions to SDEs with drifts $a_{i,M}\coloneqq a_i\ind_{[-M,M]}$. It follows from \cite[Remark 3b, p.~145]{Zvonkin1974} that \[ \Pr\bigl(X_i(t)=X_{i,M}(t) \;\forall\ t\leq \tau_{i,M}\bigr)=1, \qquad i=1,2, \] where $\tau_{i,M}= \inf\{t\geq 0 : |X_i(t)|\geq M\}.$ We have already proved that $X_{1,M}(t)\leq X_{2,M}(t)$ almost surely. This completes the proof of the theorem. \end{proof} \begin{proof}[Proof of Lemma \ref{lem:approxidentity}] We assume that $f$ is positive; the negative case follows similarly. Denote $B(z)\coloneqq\int_{\alpha}^z f(u)\,du$. Then $B$ is absolutely continuous and invertible, and since $B'(z)=f(z)>0$ for a.e.~$z$, the inverse $B^{-1}$ is also absolutely continuous (see e.g.~\cite[Exercise 5.8.52]{Bogachev2007}). Hence, we can write \begin{align*} g_\ve(y) &= \int_{y}^\beta e^{-(B(z)-B(y))/\ve^2}\frac{B'(z)}{\ve^2} g(z)\,dz \\ &= \int_{B(y)}^{B(\beta)} \frac{e^{-(v-B(y))/\ve^2}}{\ve^2}g\big(B^{-1}(v)\big)\,dv \end{align*} (where we made the change of variables $v=B(z)$). The function $[0,\infty)\ni v \mapsto \frac{e^{-v/\ve^2}}{\ve^2}$ is an approximate identity and therefore \[ g_\ve(y) \to g(B^{-1}(B(y))) = g(y) \qquad \text{as } \ve \to 0 \] in $L^1((\alpha,\beta))$, and pointwise whenever $v=B(y)$ is a Lebesgue point for $v \mapsto g\big(B^{-1}(v)\big)$; see e.g.~\cite[Theorems 8.14, 8.15]{Folland1999}. But $B$ and $B^{-1}$ are absolutely continuous, so these points coincide with the Lebesgue points for $g$. \end{proof} \section*{Acknowledgements} U.~S.~Fjordholm was partially supported by the Research Council of Norway project \textit{INICE}, project no.~301538. A.~Pilipenko acknowledges the support by the National Research Foundation of Ukraine (project 2020.02/0014 ``Asymptotic regimes of perturbed random walks: on the edge of modern and classical probability'') and the Senter for internasjonalisering av utdanning (SIU), within the project Norway--Ukrainian Cooperation in Mathematical Education, project number CPEA-LT-2016/10139. \begin{thebibliography}{99} \bibitem{AttanasioFlandoli2009} S.~Attanasio and F.~Flandoli. \newblock Zero-noise solutions of linear transport equations without uniqueness: an example. \newblock {\em Comptes Rendus Mathematique}, 347(13-14):753--756, 2009. \bibitem{BaficoBaldi1982} R.~Bafico and P.~Baldi. \newblock {Small random perturbations of Peano phenomena}. \newblock {\em Stochastics}, 6(3-4):272--292, 1982. \bibitem{Billingsley1999} P.~Billingsley. \newblock {\em Convergence of Probability Meaures}. \newblock Wiley Series in Probability and Statistics. John Wiley \& Sons, INC., 2nd edition edition, 1999. \bibitem{Binding1979} P.~Binding. \newblock The differential equation $\dot{x} = f \circ x$. \newblock \textit{J. Differ. Equations} 31(2):183--199, 1979. \bibitem{BTG} Bingham, N. H., Goldie, C. M., Teugels, J. L. (1989). Regular variation (No. 27). Cambridge university press. \bibitem{Bogachev2007} V.~I. Bogachev. \newblock {\em Measure Theory Volume I}. \newblock Springer-Verlag, 2007. \bibitem{BuckdahnOuknineQuincampoix2008} R.~Buckdahn, Y.~Ouknine, and M.~Quincampoix. \newblock On limiting values of stochastic differential equations with small noise intensity tending to zero. \newblock {\em Bulletin des Sciences Mathematiques}, 133(3):229--237, 2009. \bibitem{DelarueFlandoli2014} F.~Delarue and F.~Flandoli. \newblock {The transition point in the zero noise limit for a 1D Peano example}. \newblock {\em Discrete \& Continuous Dynammical Systems - A}, 34(10):4071--4083, 2014. \bibitem{DelarueMaurelli2020} M.~Maurelli F.~Delarue. \newblock Zero noise limit for multidimensional sdes driven by a pointy gradient. \newblock {\em arXiv preprint}, arXiv:1909.08702, 2019. \bibitem{Fjordholm2018} U.~S. Fjordholm. \newblock Sharp uniqueness conditions for one-dimensional, autonomous ordinary differential equations. \newblock {\em Comptes Rendus Mathematique}, 356(9):916--921, 2018. \bibitem{Folland1999} G.~B. Folland. \newblock {\em Real Analysis}. \newblock Pure and Applied Mathematics. John Wiley \& Sons, INC., 2nd edition edition, 1999. \bibitem{GradinaruHerrmannRoynette2001} M.~Gradinaru, S.~Herrmann, and B.~Roynette. \newblock A singular large deviations phenomenon. \newblock {\em Annales de l'Institute Henri Poincar\'e Probabilit\'es et Statistiques}, 37(5):555--580, 2001. \bibitem{HerrmannTugaut2010} S.~Herrmann and J.~Tugaut. \newblock Stationary measures for self-stabilizing processes: asymptotic analysis in the small noise limit. \newblock {\em Electronic Journal of Probability}, 15(69):2087--2116, 2010. \bibitem{HerrmannTugaut2012} S.~Herrmann and J.~Tugaut. \newblock Self-stabilizing processes: Uniqueness problem for stationary measures and convergence rate in the small noise limit. \newblock {\em ESAIM: Probability and Statistics}, 16:277--305, 2012. \bibitem{HerrmannTugaut2014} S.~Herrmann and J.~Tugaut. \newblock Mean-field limit versus small-noise limit for some interacting particle systems. \newblock {\em arXiv e-prints}, page arXiv:1409.1159, Sep 2014. \bibitem{IkedaWatanabe1981} N.~Ikeda and S.~Watanabe. \newblock {\em Stochastic Differential Equations and Diffusion Processes}. \newblock Number~24 in North-Holland Mathematical Library. North-Holland Publishing Company, 1981. \bibitem{KulikPilipenko2020} Kulik, A., Pilipenko, A. On Regularization by a Small Noise of Multidimensional Odes with Non-Lipschitz Coefficients. Ukr. Math. J. 72, 1445-1481 (2021). https://doi.org/10.1007/s11253-021-01865-7 \bibitem{Mathieu1994} Pierre Mathieu. \newblock Zero white noise limit through Dirichlet forms, with application to diffusions in a random medium. \newblock {\em Probability Theory and Related Fields}, 99:549--580, 1994. \bibitem{PavlyukevichPilipenko2020} Pavlyukevich, I., Pilipenko, A. (2020). Generalized Peano problem with Levy noise. Electronic Communications in Probability, 25. \bibitem{Pilipenko2013} A.~Pilipenko. \newblock On strong existence and continuous dependence for solutions of one-dimensional stochastic equations with additive L\'evy noise. \newblock {\em Theory of Stochastic Processes}, 18 (34)(2):77--82, 2012. \bibitem{PilipenkoProske2015} A.~Pilipenko and F.~N. Proske. \newblock On a selection problem for small noise perturbation in multidimensional case. \newblock {\em Stochastics and Dynamics}, 18(6), 2018. \bibitem{PilipenkoProske2018} A.~Pilipenko and F.~N. Proske. \newblock On perturbations of an ode with non-Lipschitz coefficients by a small self-similar noise. \newblock {\em Statistics \& Probability Letters}, 132:62--73, 2018. \bibitem{PilipenkoProske2021} Pilipenko, A., Proske, F. N. (2021). Small Noise Perturbations in Multidimensional Case. arXiv preprint arXiv:2106.09935. \bibitem{RevuzYor1999} D.~Revuz and M.~Yor. \newblock {\em Continuous Martingales and Brownian Motion}. \newblock Number 293 in A Series of Comprehensive Studies in Mathematics. Springer-Verlag, 3rd edition 3rd corrected printing edition, 2005. \bibitem{Rud83} W. Rudin. \newblock {Well-Distributed Measurable Sets}. \newblock {\em The American Mathematical Monthly}, 90(1):41–42, 1983. \bibitem{Trevisian13} D.~Trevisian. \newblock Zero noise limits using local times. \newblock \textit{Electron. Commun. Probab.}, 18(31):1--7, 2013. \bibitem{Veretennikov1981b} A.~Yu.~Veretennikov. Approximation of ordinary differential equations by stochastic differential equations. \textit{Matematicheskie Zametki} 33(6):929--932, 1981. Translation in \textit{Mathematical notes of the Academy of Sciences of the USSR} 33:476--477, 1983. \bibitem{Veretennikov1981} A.~Yu.~Veretennikov. On strong solutions and explicit formulas for solutions of stochastic integral equations. \textit{Mathematics of the USSR-Sbornik}, 39(3):387--403, 1981. \bibitem{Zvonkin1974} A.~K. Zvonkin. \newblock A transformation of the phase space of a diffusion process that removes the drift. \newblock {\em Mathematics of the USSR-Sbornik}, 22(1):129--149, 1974. \end{thebibliography} \end{document}
2205.15728v1
http://arxiv.org/abs/2205.15728v1
Multiplicative Maps on Generalized n-matrix Rings
\documentclass[12]{article} \pagestyle{plain} \usepackage{amsmath,amssymb,amsthm,color} \usepackage{times,fancyhdr} \usepackage{graphicx} \usepackage{geometry} \usepackage{titlesec} \usepackage{cite} \usepackage{amssymb} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{amsthm,amscd} \usepackage{latexsym} \usepackage{comment} \renewcommand{\baselinestretch}{1.2} \setlength{\textwidth}{16.5cm} \setlength{\textheight}{22cm} \newtheorem{thm}{Theorem}[section] \newtheorem{lemma}{Lemma}[section] \newtheorem{prop}{Proposition}[section] \newtheorem{cor}{Corollary}[section] \newtheorem{defn}{Definition}[section] \newtheorem{rem}{Remark}[section] \newtheorem{cla}{Claim}[section] \newcommand{\GF}{\mathbb{F}} \newcommand{\GL}{\mathbb{L}} \def\R{{\mathfrak R}\, } \def\M{{\mathfrak M}\, } \def\T{{\mathfrak T}\, } \def\G{{\mathfrak G}\, } \def\Z{{\mathfrak Z}\, } \def\ci{\begin{color}{red}\,} \def\cf{\end{color}\,} \def\proofname{\bf Proof} \begin{document} \begin{center}{\bf \LARGE Multiplicative Maps on Generalized $n$-matrix Rings}\\ \vspace{.2in} {\bf Bruno L. M. Ferreira}\\ {\it Federal University of Technology,\\ Professora Laura Pacheco Bastos Avenue, 800,\\ 85053-510, Guarapuava, Brazil.}\\ e-mail: [email protected]\\ and\\ {\bf Aisha Jabeen}\\ {\it Department of Applied Sciences \& Humanities,\\ Jamia Millia Islamia,\\ New Delhi-110025, India.}\\ e-mail: [email protected]\\ \end{center} \begin{abstract} Let $\mathfrak{R}$ and $\mathfrak{R}'$ be two associative rings (not necessarily with the identity elements). A bijective map $\varphi$ of $\mathfrak{R}$ onto $\mathfrak{R}'$ is called a \textit{$m$-multiplicative isomorphism} if {$\varphi (x_{1} \cdots x_{m}) = \varphi(x_{1}) \cdots \varphi(x_{m})$} for all $x_{1}, \cdots ,x_{m}\in \mathfrak{R}.$ In this article, we establish a condition on generalized $n$-matrix rings, that assures that multiplicative maps are additive on generalized $n$-matrix rings under certain restrictions. And then, we apply our result for study of $m$-multiplicative isomorphism and $m$-multiplicative derivation on generalized $n$-matrix rings. \end{abstract} \noindent {\bf 2010 Mathematics Subject Classification.} 16W99, 47B47, 47L35. \\ {\bf Keyword:} $m$-multiplicative maps, $m$-multiplicative derivations, generalized $n-$matrix rings, additivity. \section{Introduction} Let $\mathfrak{R}$ and $\mathfrak{R}'$ be two associative rings (not necessarily with the identity elements). We denote by $\mathfrak{Z}(\mathfrak{R})$ the center of $\mathfrak{R}.$ A bijective map $\varphi$ of $\mathfrak{R}$ onto $\mathfrak{R}'$ is called a \textit{$m$-multiplicative isomorphism} if\\ \centerline{$\varphi (x_{1} \cdots x_{m}) = \varphi(x_{1}) \cdots \varphi(x_{m})$}\\ for all $x_{1}, \cdots ,x_{m}\in \mathfrak{R}.$ In particular, if $m = 2$ then $\varphi$ is called a \textit{multiplicative isomorphism}. Similarly, a map $d$ of $\mathfrak{R}$ is called a \textit{$m$-multiplicative derivation} if\\ \centerline{$d(x_{1} \cdots x_{m}) = \sum _{i=1}^{m} x_{1} \cdots d(x_{i}) \cdots x_{m}$}\\ for all $x_{1}, \cdots ,x_{m}\in \mathfrak{R}.$ If $d(xy)=d(x)y + xd(y)$ for all $x, y\in \mathfrak{R}$, we just say that $d$ is a {\it multiplicative derivation} of $\mathfrak{R}$. \par In last few decades, the multiplicative mappings on rings and algebras has been studied by many authors \cite{Mart, Wang, Lu02, LuXie06, ChengJing08, LiXiao11}. Martindale \cite{Mart} established a condition on a ring such that multiplicative bijective mappings on this ring are all additive. In particular, every multiplicative bijective mapping from a prime ring containing a nontrivial idempotent onto an arbitrary ring is additive. Lu \cite{Lu02} studied multiplicative isomorphisms of subalgebras of nest algebras which contain all finite rank operators but might contain no idempotents and proved that these multiplicative mappings are automatically additive and linear or conjugate linear. Further, Wang in \cite{Wangc, Wang} considered the additivity of multiplicative maps on rings with idempotents and triangular rings respectively. Recently, in order to generalize the result in \cite{Wang} first author \cite{Ferreira}, defined a class of ring called triangular $n$-matrix ring and studied the additivity of multiplicative maps on that class of rings. In view of above discussed literature, in this article we discuss the additivity of multiplicative maps on a more general class of rings called generalized $n$-matrix rings. \par We adopt and follow the same structure of the article and demonstration presented in \cite{Ferreira}, in order to preserve the author ideas and to highlight the generalization of the triangular $n$-matrix results to the generalized $n$-matrix results. \begin{defn}\label{pri} Let $\R_1, \R_2, \cdots, \R_n$ be rings and $\M_{ij}$ $(\R_i, \R_j)$-bimodules with $\M_{ii} = \R_i$ for all $i, j \in \left\{1, \ldots, n\right\}$. Let $\varphi_{ijk}: \M_{ij} \otimes_{\R_j} \M_{jk} \longrightarrow \M_{ik}$ be $(\R_i, \R_k)$-bimodules homomorphisms with $\varphi_{iij}: \R_i \otimes_{\R_i} \M_{ij} \longrightarrow \M_{ij}$ and $\varphi_{ijj}: \M_{ij} \otimes_{\R_j} \R_j \longrightarrow \M_{ij}$ the canonical isomorphisms for all $i, j, k \in \left\{1, \ldots, n\right\}$. Write $a \circ b = \varphi_{ijk}(a \otimes b)$ for $a \in \M_{ij},$ $b \in \M_{jk}.$ We consider \begin{enumerate} \item[{\it (i)}] $\M_{ij}$ is faithful as a left $\R_i$-module and faithful as a right $\R_j$-module with $i\neq j,$ \item[{\it (ii)}] if $m_{ij} \in \M_{ij}$ is such that $\R_i m_{ij} \R_j = 0$ then $m_{ij} = 0$ with $i\neq j.$ \end{enumerate} Let \begin{eqnarray*} \G = \left\{\left( \begin{array}{cccc} r_{11} & m_{12} & \ldots & m_{1n}\\ m_{21}& r_{22} & \ldots & m_{2n}\\ \vdots & \vdots & \ddots & \vdots\\ m_{n1} & m_{n2} & \ldots & r_{nn}\\ \end{array} \right)_{n \times n}~ : ~\underbrace{ r_{ii} \in \R_{i} ~(= \M_{ii}), ~ m_{ij} \in \M_{ij}}_{(i, j \in \left\{1, \ldots, n\right\})} \right\}\end{eqnarray*} be the set of all $n \times n$ matrices $[m_{ij}]$ with the $(i, j)$-entry $m_{ij} \in \M_{ij}$ for all $i,j \in \left\{1, \ldots , n\right\}$. Observe that, with the obvious matrix operations of addition and multiplication, $\G$ is a ring iff $a \circ (b \circ c) = (a \circ b) \circ c$ for all $a \in \M_{ik}$, $b \in \M_{kl}$ and $c \in \M_{lj}$ for all $i, j, k, l \in \left\{1, \ldots, n\right\}$. When $\G$ is a ring, it is called a \textit{generalized $n-$matrix ring}. \end{defn} Note that if $n = 2,$ then we have the generalized matrix ring. We denote by $ \bigoplus^{n}_{i = 1} r_{ii}$ the element $$\left(\begin{array}{cccc} r_{11} & & & \\ & r_{22} & & \\ & & \ddots & \\ & & & r_{nn}\\ \end{array}\right)$$ in $\G.$ \pagestyle{fancy} \fancyhead{} \fancyhead[EC]{B. L. M. Ferreira} \fancyhead[EL,OR]{\thepage} \fancyhead[OC]{Multiplicative Maps on Generalized $n$-matrix Rings} \fancyfoot{} \renewcommand\headrulewidth{0.5pt} Set $\G_{ij}= \left\{\left(m_{kt}\right):~ m_{kt} = \left\{{ \begin{matrix} m_{ij}, & \textrm{if}~(k,t)=(i,j)\\ 0, & \textrm{if}~(k,t)\neq (i,j)\end{matrix}}, ~i, j \in \left\{1, \ldots, n\right\} \right\}.\right.$ Then we can write $\displaystyle \G = \bigoplus_{ i, j \in \left\{1, \ldots , n\right\}}\G_{ij}.$ Henceforth the element $a_{ij}$ belongs $\G_{ij}$ and the corresponding elements are in $\R_1, \cdots, \R_n$ or $\M_{ij}.$ By a direct calculation $a_{ij}a_{kl} = 0$ if $j \neq k.$ We define natural projections $\pi_{\R_{i}} : \G \longrightarrow \R_{i}$ $(1\leq i\leq n)$ by $$\left(\begin{array}{cccc} r_{11} & m_{12} & \ldots & m_{1n}\\ m_{21} & r_{22} & \ldots & m_{2n}\\ \vdots & \vdots & \ddots & \vdots\\ m_{n1 }& m_{n2} & \ddots & r_{nn}\\ \end{array}\right)\longmapsto r_{ii}.$$ The following result is a characterization of center of generalized $n$-matrix ring. \begin{prop}\label{seg} Let $\G$ be a generalized $n-$matrix ring. The center of $\G$ is \\ \centerline{$\mathfrak{Z}(\G) = \left\{ \bigoplus_{i=1}^{n} r_{ii} ~\Big|~ r_{ii}m_{ij} = m_{ij}r_{jj} \mbox{ for all } m_{ij} \in \M_{ij}, ~i \neq j\right\}.$}\\ Furthermore, $\mathfrak{\Z}(\G)_{ii} \cong \pi_{\R_i}(\mathfrak{Z}(\G))\subseteq \mathfrak{\Z}(\R_i)$, and there exists a unique ring isomorphism $\tau^j_{i}$ from $\pi_{\R_i}(\Z(\G))$ to $\pi_{\R_j}(\Z(\G))$ $i \neq j$ such that $r_{ii}m_{ij} = m_{ij}\tau^j_{i}(r_{ii})$ for all $m_{ij} \in \M_{ij}.$ \end{prop} \begin{proof} Let $S = \left\{ \bigoplus_{i=1}^{n} r_{ii} ~\Big|~ r_{ii}m_{ij} = m_{ij}r_{jj} \mbox{ for all } m_{ij} \in \M_{ij}, ~i \neq j\right\}.$ By a direct calculation we have that if $r_{ii} \in \Z(\R_i)$ and $r_{ii}m_{ij} = m_{ij}r_{jj}$ for every $m_{ij} \in \M_{ij}$ for all $ i \neq j $, then $ \bigoplus_{i=1}^{n} r_{ii} \in \Z(\G)$; that is, $ \left( \bigoplus_{i=1}^{n} \Z(\R_i) \right)\cap S \subseteq \Z(\G).$ To prove that $S = \Z(\G),$ we must show that $\Z(\G) \subseteq S$ and $S \subseteq \bigoplus_{i=1}^{n} \Z(\R_i).$\\ Suppose that $x = \left(\begin{array}{cccc} r_{11} & m_{12} & \ldots & m_{1n}\\ m_{21} & r_{22} & \ldots & m_{2n}\\ \vdots& \vdots & \ddots & \vdots\\ m_{n1} & m_{n2} & \ddots & r_{nn}\\ \end{array}\right) \in \Z(\G).$ Since $x\big( \bigoplus_{i=1}^{n} a_{ii}\big) = \big( \bigoplus_{i=1}^{n} a_{ii}\big)x$ for all $a_{ii} \in \R_{i},$ we have $a_{ii}m_{ij} = m_{ij}a_{jj}$ for $i \neq j$. Making $a_{jj} = 0$ we conclude $a_{ii}m_{ij} = 0$ for all $a_{ii} \in \R_{i}$ and so $m_{ij} = 0$ for all $i \neq j$ which implies that $x= \bigoplus_{i=1}^{n} r_{ii}$. Moreover, for any $m_{ij} \in \M_{ij}$ as $$x \left(\begin{array}{cccccccc} 0 & \ldots & 0 & \ldots & 0 & \cdots & 0\\ \vdots & \ddots & \vdots & & \vdots & & \vdots\\ 0 & \ldots & 0 & \ldots & m_{ij}& \ldots & 0\\ \vdots & &\vdots & \ddots & \vdots & & \vdots\\ 0 &\ldots & 0&\ldots & 0 & \ldots & 0 \\ \vdots & &\vdots & & \vdots & \ddots & \vdots \\ 0 & \ldots & 0 & \ldots & 0 & \ldots & 0 \end{array}\right) =\left(\begin{array}{cccccccc} 0 & \ldots & 0 & \ldots & 0 & \cdots & 0\\ \vdots & \ddots & \vdots & & \vdots & & \vdots\\ 0 & \ldots & 0 & \ldots & m_{ij}& \ldots & 0\\ \vdots & &\vdots & \ddots & \vdots & & \vdots\\ 0 &\ldots & 0&\ldots & 0 & \ldots & 0 \\ \vdots & &\vdots & & \vdots & \ddots & \vdots \\ 0 & \ldots & 0 & \ldots & 0 & \ldots & 0 \end{array}\right)x,$$ then $r_{ii}m_{ij} = m_{ij}r_{jj}$ for all $i \neq j$ which results in $\Z(\G) \subseteq S$. Now suppose $ x=\bigoplus_{i=1}^{n} r_{ii} \in S.$ Then for any $a_{ii} \in \R_i$ $(i=1, \cdots ,n-1),$ we have $(r_{ii}a_{ii} - a_{ii}r_{ii})m_{ij} = r_{ii}(a_{ii}m_{ij}) - a_{ii}(r_{ii}m_{ij}) = (a_{ii}m_{ij})r_{jj} - a_{ii}(m_{ij}r_{jj}) = 0$ for all $m_{ij} \in \M_{ij}$ $(i \neq j)$ and hence $r_{ii}a_{ii} - a_{ii}r_{ii} = 0$ as $\M_{ij}$ is left faithful $\R_i$-module. Now for $i = n$ we have $m_{in}(r_{nn}a_{nn} - a_{nn}r_{nn}) = m_{in}(r_{nn}a_{nn}) - m_{in}(a_{nn}r_{nn}) =(m_{in}r_{nn})a_{nn} - (m_{in}a_{nn})r_{nn}= (r_{ii}m_{in})a_{nn} - r_{ii}(m_{in}a_{nn}) = 0$ and hence $r_{nn}a_{nn} - a_{nn}r_{nn} = 0$ as $\M_{in}$ is right faithful $\R_n$-module. Therefore $r_{ii} \in \Z(\R_i),$ $i = 1, \cdots, n$. Hence, $ S \subseteq \bigoplus_{i=1}^{n} \Z(\R_i).$ \par The fact that $\pi_{\R_i}(\Z(\G)) \subseteq \Z(\R_i)$ for $i = 1 , \cdots , n$ are direct consequences of $ \Z(\G) = S\subseteq \bigoplus_{i=1}^{n} \Z(\R_i).$ Now we prove the existence of the ring isomorphism $\tau^j_i : \pi_{\R_i}(\Z(\G)) \longrightarrow \pi_{\R_j}(\Z(\G))$ for $i \neq j$. For this, let us consider a pair of indices $(i, j)$ such that $ i \neq j$. For any $ r=\bigoplus_{k=1}^{n} r_{kk} \in \Z(\G)$ let us define $\tau ^j_i(r_{ii})=r_{jj}$. The application is well defined because if $s= \bigoplus_{k=1}^{n} s_{kk} \in \Z(\G)$ is such that $s_{ii} = r_{ii}$, then we have $m_{ij}r_{jj} = r_{ii}m_{ij} = s_{ii}m_{ij}=m_{ij}s_{jj}$ for all $m_{ij} \in \M_{ij}$. Since $\M_{ij}$ is right faithful $\R_j$-module, we conclude that $r_{jj} = s_{jj}$. Therefore, for any $r_{ii} \in \pi_{\R_i}(\Z(\G)),$ there exists a unique $r_{jj} \in \pi_{\R_j}(\Z(\G)),$ denoted by $\tau ^j_i(r_{ii})$. It is easy to see that $\tau^j_i$ is bijective. Moreover, for any $r_{ii}, s _{ii} \in \pi_{\R_i}(\Z(\G))$ we have $m_{ij}\tau ^j_i(r_{ii} + s_{ii})=(r_{ii} + s_{ii})m_{ij} =m_{ij}(r_{jj} + s_{jj})=m_{ij}\big(\tau^j_i(r_{ii}) + \tau^j_i(s_{ii})\big)$ and $m_{ij}\tau^j_i(r_{ii}s_{ii}) = (r_{ii}s_{ii})m_{ij} = r_{ii}(s_{ii}m_{ij}) = (s_{ii}m_{ij})\tau^j_i(r_{ii}) = s_{ii}\big(m_{ij}\tau^j_i(r_{ii})\big) = m_{ij}\big( \tau^j_i(r_{ii})\tau^j_i(s_{ii})\big)$. Thus $\tau^j_i(r_{ii} + s_{ii}) = \tau^j_i(r_{ii}) + \tau^j_i(s_{ii})$ and $\tau^j_i(r_{ii}s_{ii}) = \tau^j_i(r_{ii})\tau^j_i (s_{ii})$ and so $\tau^j_i$ is a ring isomorphism. \end{proof} \begin{prop}\label{ter} Let $\G$ be a generalized $n-$matrix ring and $ i \neq j$ such that: \begin{enumerate} \item[\it (i)] $a_{ii}\R_i = 0$ implies $a_{ii} = 0$ for $a_{ii} \in \R_i$; \item[\it (ii)] $\R_j b_{jj} = 0$ implies $b_{jj} = 0$ for all $b_{jj} \in \R_j$. \end{enumerate} Then $u \G = 0$ or $\G u = 0$ implies $u =0$ for $u \in \G$. \end{prop} \begin{proof} First, let us observe that if $i \neq j$ and $\R_i a_{ii} = 0,$ then we have $\R_i a_{ii}m_{ij}\R_{j} = 0$, for all $m_{ij} \in \M_{ij}$, which implies $a_{ii}m_{ij} = 0$ by condition {\it (ii)} of the Definition \ref{pri}. It follows that $a_{ii}\M_{ij} = 0$ resulting in $a_{ii} = 0$. Hence, suppose $ u = \bigoplus_{i, j \in \left\{1, \ldots, n \right\}} u_{ij}$, with $u_{ij} \in \G_{ij}$, satisfying $u\G = 0$. Then $u_{kk}\R_k = 0$ which yields $u_{kk} = 0$ for $k = 1, \cdots, n-1$, by condition {\it (i)}. Now for $k = n$, $u_{nn}\R_n = 0,$ we have $\R_{i}m_{in}u_{nn}\R_{n}= 0$, for all $m_{in} \in \M_{in}$, which implies $m_{in}u_{nn} = 0$ by condition {\it (ii)} of the Definition \ref{pri}. It follows that $\M_{in}u_{nn} = 0$ which implies $u_{nn} = 0$. Thus $u_{ij}\R_j = 0$ and then $u_{ij} = 0$ by condition {\it (ii)} of the Definition \ref{pri}. Therefore $u = 0$. Similarly, we prove that if $\G u = 0$ then $u=0$. \end{proof} \section{The Main Theorem} Follows our main result which has the purpose of generalizing Theorem $2.1$ in \cite{Ferreira}. Our main result reads as follows. \begin{thm}\label{t11} Let $B : \G \times \G \longrightarrow \G$ be a biadditive map such that: \begin{enumerate} \item[{\it (i)}] $B(\G_{pp},\G_{qq})\subseteq \G_{pp}\cap \G_{qq}$; $B(\G_{pp},\G_{rs})\in \G_{rs}$ and $B(\G_{rs},\G_{pp})\in \G_{rs}$; $B(\G_{pq},\G_{rs})=0$; \item[{\it (ii)}] if $B(\bigoplus_{1\leq p\neq q\leq n} c_{pq}, \G_{nn}) = 0$ or $B(\bigoplus _{1\leq r<n} \G_{rr},\bigoplus_{1\leq p\neq q\leq n} c_{pq}) = 0$, then $\bigoplus_{1\leq p\neq q\leq n} c_{pq} = 0$; \item[{\it (iii)}] $B(\G_{nn}, a_{nn}) = 0$ implies $a_{nn} = 0$; \item[{\it (iv)}] if $B(\bigoplus_{p=1}^{n} c_{pp},\G_{rs}) = B(\G_{rs},\bigoplus_{p=1}^{n} c_{pp}) = 0$ for all $1\leq r\neq s\leq n$, then $\bigoplus_{p=1}^{n-1} c_{pp} \oplus (-c_{nn}) \in \Z(\G)$; \item[{\it (v)}] $B(c_{pp},d_{pp}) = B(d_{pp},c_{pp})$ and $B(c_{pp},d_{pp})d_{pn}d_{nn} = d_{pp}d_{pn}B(c_{nn},d_{nn})$ for all $c=\bigoplus_{p=1}^{n} c_{pp} \in \Z(\G)$; \item[{\it (vi)}] $B\big(c_{rr},B(c_{kl},c_{nn})\big) = B\big(B(c_{rr},c_{kl}), c_{nn}\big)$. \end{enumerate} Suppose $f: \G \times \G \longrightarrow \G$ a map satisfying the following conditions: \begin{enumerate} \item[\it (vii)] $f(\G,0) = f(0,\G) = 0$; \item[\it (viii)] $B\big(f(x,y),z\big) = f\big(B(x,z),B(y,z)\big)$; \item[\it (ix)] $B\big(z,f(x,y)\big) = f\big(B(z,x),B(z,y)\big)$ \end{enumerate} for all $x,y,z \in \G$. Then $f = 0$. \end{thm} \begin{proof} Following the ideas of Ferreira in \cite{Ferreira} we divide the proof into the four cases. Then, let us consider arbitrary elements $x_{kl}, u_{kl}, a_{kl} \in \G_{kl}$ $( k, l \in \left\{1, \ldots, n\right\})$.\\ \noindent \textit{First case.} In this first case the reader should keep in mind that we want to show $$f \big(\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk}\big)=0.$$ From the hypotheses of the theorem, we have \begin{eqnarray*} B\left(f \big(\sum _{1\leq i< n} x_{ii}, \linebreak \sum _{1\leq j\neq k\leq n} x_{jk}\big),a_{nn}\right) &=& f\big(B\left(\sum _{1\leq i< n} x_{ii}, a_{nn}\right), B\left(\sum _{1\leq j\neq k\leq n} x_{jk}, a_{nn}\right)\big) \\ &=& f\big(0, B\left(\sum _{1\leq j\neq k\leq n} x_{jk}, a_{nn}\right)\big) \\&=& 0. \end{eqnarray*} Now by condition $(i)$, this implies that $$ B\left(\sum _{1\leq p, q\leq n} f (\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk})_{pq}, a_{nn}\right)=0.$$ Since $$\displaystyle B\left(\sum _{1\leq p< n}f(\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk})_{pp}, a_{nn}\right) = 0,$$ $$\displaystyle B\left(\sum _{1\leq p\neq q\leq n} \linebreak f (\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk})_{pq}, a_{nn}\right)\in \bigoplus _{1\leq p\neq q\leq n}\G_{pq}$$ and $$ B\left(f (\sum _{1\leq i< n} x_{ii},\linebreak \sum _{1\leq j\neq k\leq n} x_{jk})_{nn}, a_{nn}\right) \in \G_{nn},$$ then $$ \sum _{1\leq p\neq q\leq n}f(\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk})_{pq} = 0\mbox{~by~ condition ~(ii)~}.$$ Next, we have \begin{eqnarray*} B\left(a_{nn}, f (\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk})\right) &=& f\left(B(a_{nn}, \sum _{1\leq i< n} x_{ii}), B(a_{nn}, \sum _{1\leq j \neq k\leq n} x_{jk})\right)\\ &=& f\left(0, B(a_{nn}, \sum _{1\leq j\neq k\leq n} x_{jk})\right) \\&=& 0 \end{eqnarray*} which implies $$\sum _{1\leq p, q\leq n} B\left(a_{nn},f (\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk})_{pq}\right)=0.$$ It follows that $$B\left(a_{nn}, \sum _{1\leq p< n}f(\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk})_{pp}\right) = 0,$$ $$B\left(a_{nn}, \sum _{1\leq p\neq q\leq n}f (\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk})_{pq}\right)\in \bigoplus _{1\leq p\neq q\leq n}\G_{pq}$$ and $$B\left(a_{nn}, f (\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk})_{nn}\right) \in \G_{nn}.$$ Hence, $$B\left(a_{nn}, f (\sum _{1\leq i< n} x_{ii},\linebreak \sum _{1\leq j\neq k\leq n} x_{jk})_{nn}\right)=0$$ which yields $$ f (\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk})_{nn} = 0$$ by condition $(iii)$. Yet, we have \begin{eqnarray*} B\left(\sum _{1\leq p<n} f(\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk})_{pp}, a_{rs}\right) &=& B\left(f (\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk}), a_{rs}\right) \\ &=& f\left(B(\sum _{1\leq i< n} x_{ii}, a_{rs}), B(\sum _{1\leq j\neq k\leq n} x_{jk}, a_{rs})\right)\\ &=&f\left(B(\sum _{1\leq i< n} x_{ii}, a_{rs}),0\right)\\ &=&0 \end{eqnarray*} and \begin{eqnarray*} B\left(a_{rs}, \sum _{1\leq p<n} f (\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk})_{pp}\right) &=& B\left(a_{rs}, f (\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk})\right) \\ &=& f\left(B(a_{rs}, \sum _{1\leq i< n} x_{ii}), B(a_{rs}, \sum _{1\leq j\neq k\leq n} x_{jk}) \right)\\ &=& f\left(B(a_{rs}, \sum _{1\leq i< n} x_{ii}), 0 \right) \\ &=& 0. \end{eqnarray*} It follows that $\displaystyle \sum _{1\leq p<n} f (\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk})_{pp}+ 0 \in \Z(\G)$ and so $$ \sum _{1\leq p<n} f (\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk})_{pp} = 0$$ by Proposition \ref{seg}. Consequently, we have $ f \left(\sum _{1\leq i< n} x_{ii}, \sum _{1\leq j\neq k\leq n} x_{jk}\right)=0.$ \\ \noindent \textit{Second case. } In the second case it must be borne in mind that we want to show $$f (\sum _{1\leq i\neq j\leq n} x_{ij},\sum _{1\leq k\neq l\leq n} y_{kl})=0.$$ From the hypotheses of the theorem ,we have \begin{eqnarray*} B\left(\sum _{1\leq p, q\leq n} f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{pq}, a_{rs}\right) &=&B\left(f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl}), a_{rs}\right)\\ &=& f \left(B(\sum _{1\leq i\neq j\leq n} x_{ij}, a_{rs}), B(\sum _{1\leq k\neq l\leq n} y_{kl}, a_{rs})\right)\\ &=& f(0,0)\\ &=&0. \end{eqnarray*} Since $\displaystyle B\left(\sum _{1\leq p\neq q\leq n}f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{pq}, a_{rs}\right)=0$, then $$ \centerline{$\displaystyle B\big(\sum _{1\leq p\leq n} f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{pp}, a_{rs}\big)=0$.} $$ Smilarly, we prove that $$ B\left(a_{rs},\sum _{1\leq p\leq n} f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{pp}\right)=0.$$ By condition $(iv),$ it follows that \begin{eqnarray}\label{centro} &&\sum _{1\leq p< n} f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{pp} + \left(- f(\sum _{1\leq i\neq j\leq n}x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{nn}\right) \in \Z(\G). \end{eqnarray} Now, we observe that \begin{eqnarray*} B\left(f (\sum _{1\leq i\neq j\leq n} x_{ij},\sum _{1\leq k\neq l\leq n} y_{kl}), a_{nn}\right) &=&f\big(B\left(\sum _{1\leq i\neq j\leq n} x_{ij},a_{nn}\right),B\left(\sum _{1\leq k\neq l\leq n} y_{kl},a_{nn}\right)\big)\\ &=&f\big(\sum _{1\leq i\neq j\leq n} B(x_{ij},a_{nn}),\sum _{1\leq k\neq l\leq n}B( y_{kl},a_{nn})\big). \end{eqnarray*} With (\ref{centro}), this implies that $$ \sum _{1\leq p< n} B\left(f (\sum _{1\leq i\neq j\leq n}\linebreak x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl}),a_{nn}\right)_{pp}+\big(- B\left(f (\sum _{1\leq i\neq j\leq n} x_{ij},\sum _{1\leq k\neq l\leq n} y_{kl}),a_{nn}\right)_{nn}\big)\in \Z(\G).$$ Since $\displaystyle B\left(f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl}), a_{nn}\right)\in \bigoplus _{1\leq p\neq q\leq n} \G_{pq}\bigoplus \G_{nn}$ then\\ $\displaystyle \sum _{1\leq p< n} B\left(f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n}y_{kl}),a_{nn}\right)_{pp}=0$ which results in $$\displaystyle B\left(f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl}),a_{nn}\right)_{nn}=0 \mbox{~by ~Proposition ~\ref{seg}}.$$ Hence $\displaystyle B\left(f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl}),a_{nn}\right)\in \bigoplus _{1\leq p\neq q\leq n} \G_{pq}$. It follows that \begin{eqnarray*} \lefteqn{B\left(a_{rr}, B\left(f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl}), a_{nn}\right)\right)}\\ &=&B\left(a_{rr},f \big(B\left(\sum _{1\leq i\neq j\leq n} x_{ij}, a_{nn}\right), B\left(\sum _{1\leq k\neq l\leq n} y_{kl}, a_{nn}\right)\big)\right) \\ &=&f \big(B\left(a_{rr}, B\left(\sum _{1\leq i\neq j\leq n} x_{ij}, a_{nn}\right)\right), B\left(a_{rr}, B\left(\sum _{1\leq k\neq l\leq n} y_{kl}, a_{nn}\right)\right)\big) \\ &=&f \big(B\left(a_{rr}, B\left(\sum _{1\leq i\neq j\leq n} x_{ij}, a_{nn}\right)\right), B\left(B\left(a_{rr}, \sum _{1\leq k\neq l\leq n} y_{kl}\right), a_{nn}\right)\big) \\ &=&f\big(B\left(a_{rr}, a_{nn} + B\left(\sum _{1\leq i\neq j\leq n} x_{ij}, a_{nn}\right)\right), B\left(B\left(a_{rr}, \sum _{1\leq k\neq l\leq n} y_{kl}\right), a_{nn} + B\left(\sum _{1\leq i\neq j\leq n} x_{ij}, a_{nn}\right)\right)\big)\\ &=&B\left(f\big(a_{rr}, \linebreak B\left(a_{rr}, \sum _{1\leq k\neq l\leq n} y_{kl}\right)\big), a_{nn} + B\left(\sum _{1\leq i\neq j\leq n} x_{ij}, a_{nn}\right)\right) \\ &=& B\left(0, a_{nn} +B\left(\sum _{1\leq i\neq j\leq n} x_{ij}, a_{nn}\right)\right)\\ &=&0 \end{eqnarray*} by first case, for all $1\leq r<n$. \par So $\displaystyle B\left(f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl}), a_{nn}\right)= 0$, by condition $(ii)$. It follows that \begin{eqnarray*} \lefteqn{\sum _{1\leq p\leq n} B\left(f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{pp}, a_{nn}\right)}\\ &&+\sum _{1\leq p\neq q\leq n} B\left(f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{pq}, a_{nn}\right)=0 \end{eqnarray*} which yields $$B\left(\sum _{1\leq p\neq q\leq n}f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{pq}, a_{nn}\right)=0$$ and so $$\sum _{1\leq p\neq q\leq n}f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{pq}=0 \mbox{ ~by~ condition ~(ii).} $$ Hence, \begin{eqnarray*} B\left(a_{nn},f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{nn}\right) &=&B\left(a_{nn},f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})\right)\\ &=& f\big(B\left(a_{nn},\sum _{1\leq i\neq j\leq n} x_{ij}\right),B\left(a_{nn},\sum _{1\leq k\neq l\leq n} y_{kl}\right)\big) \end{eqnarray*} and by (\ref{centro}) above we have \begin{eqnarray*} \lefteqn{\sum _{1\leq p<n} B\left(a_{nn},f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{nn}\right)_{pp}}\\ && +\big(-B\left(a_{nn},f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{nn}\right)_{nn}\big)\in \Z(\G). \end{eqnarray*} Since $$B\left(a_{nn},f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{nn}\right)\in \G_{nn}$$ then we have $$\sum _{1\leq p<n} B\left(a_{nn},f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{nn}\right)_{pp}=0$$ and so \begin{eqnarray*} B\left(a_{nn},f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{nn}\right)&=&B\left(a_{nn},f (\sum _{1\leq i\neq j\leq n} x_{ij},\sum _{1\leq k\neq l\leq n} y_{kl})_{nn}\right)_{nn}=0, \end{eqnarray*} by Proposition \ref{seg}. It follows that $\displaystyle f (\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{nn}=0$, by condition $(iii)$, which implies $$\displaystyle \sum _{1\leq p< n}f(\sum _{1\leq i\neq j\leq n} x_{ij}, \sum _{1\leq k\neq l\leq n} y_{kl})_{pp}=0,$$ by (\ref{centro}). Consequently, we have $$\displaystyle f (\sum _{1\leq i\neq j\leq n} x_{ij},\sum _{1\leq k\neq l\leq n} y_{kl})=0.$$ \\ \noindent \textit{Third case.} Here, in the third case, we are interested in checking $$f\big(\sum _{1\leq p< n} x_{pp} + \sum _{1\leq p\neq q\leq n} x_{pq}, \sum _{1\leq k< n} u_{kk} + \sum _{1\leq k\neq l\leq n} u_{kl}\big)=0.$$ In view of second case, we Observe that \begin{eqnarray*} \lefteqn{B\left(f\big(\sum _{1\leq p< n} x_{pp} + \sum _{1\leq p\neq q\leq n} x_{pq}, \sum _{1\leq k< n} u_{kk} + \sum _{1\leq k\neq l\leq n} u_{kl}\big), a_{rs}\right)}\\ &=&f (B\left(\sum _{1\leq p< n} x_{pp} + \sum _{1\leq p\neq q\leq n} x_{pq}, a_{rs}\right), B\left(\sum _{1\leq k< n} u_{kk} + \sum _{1\leq k\neq l\leq n} u_{kl}, a_{rs}\right))\\ &=& f\big(\sum _{1\leq p<n}B(x_{pp}, a_{rs}),\sum _{1\leq k<n} B(u_{kk}, a_{rs})\big)\\&=& 0. \end{eqnarray*} It follows that $$\sum _{1\leq t\leq n} B\left(f \big(\sum _{1\leq p< n} x_{pp} + \sum _{1\leq p\neq q\leq n} x_{pq}, \sum _{1\leq k< n} u_{kk} + \sum _{1\leq k\neq l\leq n} u_{kl}\big)_{tt}, a_{rs}\right)= 0.$$ Similarly, we have $$\sum _{1\leq t\leq n} B\left(a_{rs},f\big( \sum _{1\leq p< n} x_{pp} + \sum _{1\leq p\neq q\leq n} x_{pq}, \sum _{1\leq k< n} u_{kk} + \sum _{1\leq k\neq l\leq n} u_{kl}\big)_{tt}\right)= 0.$$ It follows that \begin{eqnarray*} \lefteqn{\sum _{1\leq t< n} f\big(\sum _{1\leq p< n} x_{pp} + \sum _{1\leq p\neq q\leq n} x_{pq}, \sum _{1\leq k< n} u_{kk} + \sum _{1\leq k\neq l\leq n} u_{kl}\big)_{tt}}\\ &&+ \big(- f \big(\sum _{1\leq p< n} x_{pp} + \sum _{1\leq p\neq q\leq n} x_{pq}, \sum _{1\leq k< n} u_{kk} + \sum _{1\leq k\neq l\leq n} u_{kl}\big)_{nn}\in \Z(\G) \end{eqnarray*} by condition $(iv)$. But \begin{eqnarray*} \lefteqn{B\left(f\big(\sum _{1\leq p< n} x_{pp} + \sum _{1\leq p\neq q\leq n} x_{pq}, \sum _{1\leq k< n} u_{kk} + \sum _{1\leq k\neq l\leq n} u_{kl}\big), a_{nn}\right)}\\ &=&f \big(B\left(\sum _{1\leq p< n} x_{pp} + \sum _{1\leq p\neq q\leq n} x_{pq}, a_{nn}\right), B\left(\sum _{1\leq k< n} u_{kk} + \sum _{1\leq k\neq l\leq n} u_{kl}, a_{nn}\right)\big)\\ &=&f \big(B\left(\sum _{1\leq p\neq q\leq n} x_{pq}, a_{nn}\right), B\left(\sum _{1\leq k\neq l\leq n} u_{kl}, a_{nn}\right)\big)\\ &=&f \big(\sum_{1\leq p\neq q\leq n} B\left(x_{pq}, a_{nn}\right), \sum _{1\leq k\neq l\leq n} B\left(u_{kl}, a_{nn}\right)\big)\\&=&0 \end{eqnarray*} by second case. As a result, we have $$\displaystyle \sum _{1\leq r\neq s\leq n}f\big(\sum _{1\leq p< n} x_{pp} + \sum _{1\leq p\neq q\leq n} x_{pq}, \sum _{1\leq k< n} u_{kk} + \sum _{1\leq k\neq l\leq n} u_{kl}\big)_{rs}=0 \mbox{~by~ condition ~(ii).}$$ Hence from the second case \begin{eqnarray*} \lefteqn{B\left(a_{nn},f\big(\sum _{1\leq p< n} x_{pp} + \sum _{1\leq p\neq q\leq n} x_{pq}, \sum _{1\leq k< n} u_{kk} + \sum _{1\leq k\neq l\leq n} u_{kl}\big)\right)}\\ &=&f \big(B\left(a_{nn},\sum _{1\leq p< n} x_{pp} + \sum _{1\leq p\neq q\leq n} x_{pq}\right), B\left(a_{nn},\sum _{1\leq k< n} u_{kk} + \sum _{1\leq k\neq l\leq n} u_{kl}\right)\big)\\ &=&f \big(B\left(a_{nn},\sum _{1\leq p\neq q\leq n} x_{pq}\right), B\left(a_{nn},\sum _{1\leq k\neq l\leq n} u_{kl}\right)\big)\\ &=&f \big(\sum _{1\leq p\neq q\leq n} B\big(a_{nn},x_{pq}\big), \sum _{1\leq k\neq l\leq n} B\big(a_{nn},u_{kl}\big)\big)\\&=&0. \end{eqnarray*} This implies $$\displaystyle B\left(a_{nn},f\big(\sum _{1\leq p< n} x_{pp} + \sum _{1\leq p\neq q\leq n} x_{pq}, \sum _{1\leq k< n} u_{kk} + \sum _{1\leq k\neq l\leq n} u_{kl}\big)_{nn}\right)=0.$$ Thus $$\displaystyle f\big(\sum _{1\leq p< n} x_{pp} + \sum _{1\leq p\neq q\leq n} x_{pq}, \sum _{1\leq k< n} u_{kk} + \sum _{1\leq k\neq l\leq n} u_{kl}\big)_{nn}=0$$ implying $$\displaystyle \sum _{1\leq t< n} f\big(\sum _{1\leq p< n} x_{pp} + \sum _{1\leq p\neq q\leq n} x_{pq}, \sum _{1\leq k< n} u_{kk} + \sum _{1\leq k\neq l\leq n} u_{kl}\big)_{tt}=0$$ by condition $(iii)$. Therefore, $$\displaystyle f\big(\sum _{1\leq p< n} x_{pp} + \sum _{1\leq p\neq q\leq n} x_{pq}, \sum _{1\leq k< n} u_{kk} + \sum _{1\leq k\neq l\leq n} u_{kl}\big)=0.$$\\ \noindent \textit{Fourth case.} Finally in the last case we show that $f = 0$.\\ Since $\displaystyle B\left(\sum_{1 \leq p , q \leq n}x_{pq}, y_{rs}\right) \subseteq \G_{rs}$ we have $ B(f (x, u), a_{rs}) = f (B(x, a_{rs}), B(u, a_{rs})) = 0.$ Then by second case, we obtain $$\displaystyle B\left(\sum_{1\leq p \leq n}f (x, u)_{pp} , a_{rs}\right) = 0.$$ Similarly, we have $$\displaystyle B\left(a_{rs}, \sum_{1\leq p \leq n}f (x, u)_{pp}\right) = 0.$$ It follows from condition $(iv)$ that $\displaystyle \sum_{1\leq p< n}f (x, u)_{pp}+(-f(x,u)_{nn}) \in \Z(\G)$. \par Now as $\displaystyle B\left(\sum_{1 \leq r< n}y_{rr}, y\right) \subseteq \sum_{1 \leq r< n}\G_{rr}+\sum_{1 \leq r\neq s \leq n}\G_{rs}$ then by third case, we have $$\displaystyle B\left(\sum_{1 \leq r< n}a_{rr},f (x, u)\right) = f \big(B\left(\sum_{1 \leq r< n}a_{rr},x\right), B\left(\sum_{1 \leq r< n}a_{rr},u\right)\big) = 0.$$ It follows that $\displaystyle B\left(\sum_{1 \leq r< n}a_{rr},\sum_{1 \leq r< n}f (x, u)_{rr}+\sum_{1 \leq r\neq s\leq n}f(x,u)_{rs}\right)=0$ implying \begin{enumerate} \item[ (1)] $\displaystyle B\left(\sum_{1 \leq r< n}a_{rr},\sum_{1 \leq r< n}f (x, u)_{rr}\right)=0$, \item[ (2)] $\displaystyle B\left(\sum_{1 \leq r< n}a_{rr},\sum_{1 \leq r\neq s\leq n}f(x,u)_{rs}\right)=0$. \end{enumerate} By identity $(1)$ above we have $\displaystyle \sum_{1 \leq r< n}B\big(a_{rr},f (x, u)_{rr}\big)=0$ resulting $B\big(a_{rr},f (x, u)_{rr}\big)=0$ for all $1 \leq r< n$. We deduce \begin{eqnarray*} 0&=&B\big(a_{rr},f (x, u)_{rr}\big)a_{rn}a_{nn}\\ &=&B\big(f (x, u)_{rr},a_{rr}\big)a_{rn}a_{nn}\\ &=&a_{rr}a_{rn}B\big(-f(x, u)_{nn},a_{nn}\big)\\ &=&a_{rr}a_{rn}B\big(a_{nn},-f(x, u)_{nn}\big) \end{eqnarray*} for all $r<n$, by condition $(v)$. It follows that $B\big(a_{nn},f(x, u)_{nn}\big)=0$ which implies $f(x, u)_{nn}=0$, by condition $(iii)$. Thus, we have $\displaystyle \sum_{1\leq p< n}f (x, u)_{pp}=0$. Now, by identity $(2)$, we have $\displaystyle \sum_{1 \leq r\neq s\leq n}f(x,u)_{rs}=0$ by condition $(ii)$. Hence, we conclude that $f=0$. \end{proof} \begin{cor}\label{util} Let $\G$ be a generalized $n-$matrix ring such that \begin{enumerate} \item[\it (i)] For $a_{ii} \in \R_i$, if $a_{ii}\R_i$ = 0, then $a_{ii} = 0$; \item[\it (ii)] For $b_{jj} \in \R_j,$ if $\R_j b_{jj} = 0,$ then $b_{jj} = 0,$ \end{enumerate} where $1 \leq i \neq j\leq n.$ Let $k$ be a positive integer. If a map $f : \G \times \G \longrightarrow \G$ satisfies \begin{enumerate} \item[\it (i)] $f (\G, 0) = f (0, \G) = 0;$ \item[\it (ii)] $f (x, y)z_1z_2 \cdots z_k = f (xz_1z_2 \cdots z_k, yz_1z_2 \cdots z_k);$ \item[\it (iii)] $z_1z_2 \cdots z_kf (x, y) = f (z_1z_2 \cdots z_k x, z_1z_2 \cdots z_k y),$ \end{enumerate} for all $x, y, z_1, z_2, \cdots , z_k \in \G,$ then $f = 0.$ \end{cor} \begin{proof} We first claim that $f (x, y)z = f (xz, yz)$ and $zf (x, y) = f (zx, zy)$ for all $x, y, z \in \G.$ Indeed, since $$f (x, y)(zz_1)z_2 \cdots z_k = f (xzz_1z_2 \cdots z_k, yzz_1z_2 \cdots z_k) = f (xz, yz)z_1z_2 \cdots z_k,$$ that is, $(f (x, y)z - f (xz, yz))\G^k = 0.$ Hence $f (x, y)z = f (xz, yz)$ by Proposition \ref{ter}. Analogously, $zf (x, y) = f (zx, zy).$ Define $B : \G \times \G \longrightarrow \G$ by $B(x, y) = xy.$ It is easy to check that $B$ and $f$ satisfy the all conditions of Theorem \ref{t11}. Hence $f = 0.$ \end{proof} \section{Applications} \begin{thm} Let $\G$ be a generalized $n-$matrix ring such that \begin{enumerate} \item[\it (i)] For $a_{ii} \in \R_i$, if $a_{ii}\R_i$ = 0, then $a_{ii} = 0$; \item[\it (ii)] For $b_{jj} \in \R_j,$ if $\R_j b_{jj} = 0,$ then $b_{jj} = 0,$ \end{enumerate} where $1 \leq i \neq j\leq n.$ Then every $m-$multiplicative isomorphism from $\G$ onto a ring $\R$ is additive. \end{thm} \begin{proof} Suppose that $\varphi$ is a $m-$multiplicative isomorphism from $\G$ onto a ring $\R.$ Since $\varphi$ is onto, $\varphi(x) = 0$ for some $x \in \G.$ Then $\varphi(0) = \varphi(0 \cdots 0x) = \varphi(0) \cdots \varphi(0) \varphi(x) = \varphi(0) \cdots \varphi(0)0 = 0$ and so $\varphi^{-1}(0) = 0.$ Let us check that the conditions of the Corollary \ref{util} are satisfied. For every $x, y \in \G$ we define $f (x, y) = \varphi^{-1}(\varphi(x + y) - \varphi(x) - \varphi(y)), $ we see that $f (x, 0) = f (0, x) = 0$ for all $x \in \G.$ It is easy to check that $\varphi^{-1}$ is also a $m$-multiplicative isomorphism. Thus, for any $u_1, \cdots , u_{m-1} \in \G,$ we have \begin{eqnarray*} f (x, y)u_1 \cdots u_{m-1}&=& \varphi^{-1}(\varphi(x + y) - \varphi(x) - \varphi(y) ) \varphi^{-1}(\varphi(u_1)) \cdots \varphi^{-1}(\varphi(u_{m-1}))\\&=& \varphi^{-1}((\varphi(x + y) - \varphi(x) - \varphi(y))\varphi(u_1) \cdots \varphi(u_{m-1})) \\&=& f (xu_1 \cdots u_{m-1}, yu_1 \cdots u_{m-1}). \end{eqnarray*} Similarly we have $u_1 \cdots u_{m-1}f (x, y) = f (u_1 \cdots u_{m-1}x, u_1 \cdots u_{m-1}y)$. Therefore by Corollary \ref{util}, $f = 0.$ That is, $\varphi(x + y) = \varphi(x) + \varphi(y)$ for all $x, y \in \G.$ \end{proof} \begin{thm} Let $\G$ be a generalized $n-$matrix ring such that \begin{enumerate} \item[\it (i)] For $a_{ii} \in \R_i$, if $a_{ii}\R_i$ = 0, then $a_{ii} = 0$; \item[\it (ii)] For $b_{jj} \in \R_j,$ if $\R_j b_{jj} = 0,$ then $b_{jj} = 0,$ \end{enumerate} where $1 \leq i \neq j\leq n.$ Then any $m-$multiplicative derivation d of $\G$ is additive. \end{thm} \begin{proof} We define $f (x, y) = d(x + y) - d(x) - d(y)$, for any $x, y \in \G$. Hence $f$ defined in this way satisfy the conditions of Corollary \ref{util}. Therefore $f = 0$ and so $d(x + y) = d(x) + d(y).$ \end{proof} \begin{thebibliography}{99} \bibitem{ChengJing08} Cheng, X. H. and Jing, W. (2008) Additivity of maps on triangular algebras, {\it Electron. J. Linear Algebra} {17} 597-615. \bibitem{Daif} Daif, M. (1991) When is a multiplicative derivation additive?, {\it Internat. J Math. and Math. Sci.} { 14}, 615-618. \bibitem{Ferreira} Ferreira, B. L. M. (2014) Multiplicative maps on triangular $n$-matrix rings, {\it Internat. J. Math., Game Theory, and Algebra} { 23}, 1-14. \bibitem{LiXiao11} Li, Y. and Xiao. Z. (2011) Additivity of maps on generalized matrix algebras, {\it Electron. J. Linear Algebra} {22} 743-757. \bibitem{Lu02} Lu, F. (2002) Multiplicative mapping of operator algebras, {\it Linear Algebra Appl.} {347}, 283-291. \bibitem{LuXie06} Lu, F. Y. and Xie, J. H. (2006) Multiplicative Mappings of Rings, {\it Acta Math Sinica} {22}, 1017-1020. \bibitem{Mart} Martindale III, W. S. (1969) When are multiplicative mappings additive?, {\it Proc. Amer. Math. Soc.} { 21}, 695-698. \bibitem{Zou} Tang, G. and Zhou, Y. (2013) A class of formal matrix rings, {\it Linear Algebra and its Applications} { 438}, 4672-4688. \bibitem{Wangc} Wang, Y. (2009) The Additivity of multiplicative maps on rings,{\it Communications in Algebra} { 37}, 2351-2356. \bibitem{Wang} Wang, Y. (2011) Additivity of multiplicative maps on triangular rings, {\it Linear Algebra and its Applications} { 434}, 625-635. \end{thebibliography} \end{document}
2205.15032v4
http://arxiv.org/abs/2205.15032v4
Structure of non-negative posets of Dynkin type $\mathbb{A}_n$
\documentclass[a4paper,12pt]{article} \usepackage[top=2.5cm, bottom=2.5cm, left=2.5cm, right=2.5cm]{geometry} \def\cleverefoptions{capitalize} \usepackage{amsmath} \usepackage{amsthm} \usepackage{amssymb} \usepackage[colorlinks=true,citecolor=black,linkcolor=black,urlcolor=blue]{hyperref} \usepackage{cleveref} \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage[dvipsnames]{xcolor} \usepackage{mathtools} \usepackage{hyphsubst} \HyphSubstLet{english}{usenglishmax} \usepackage{longtable} \usepackage{booktabs} \usepackage{dashrule} \usepackage[width=.85\textwidth]{caption} \usepackage[inline]{enumitem} \usepackage{siunitx} \usepackage{tikz} \usetikzlibrary{matrix,decorations.pathmorphing,decorations.markings,decorations.pathreplacing,arrows.meta} \usepackage{stackengine} \usepackage{adjustbox} \usepackage{float} \usepackage{pgfplots} \pgfplotsset{compat=1.5} \usepackage{lscape} \usepackage{xifthen} \makeatletter \pgfdeclaredecoration{complete sines}{initial} { \state{initial}[ width=+0pt, next state=sine, persistent precomputation={\pgfmathsetmacro\matchinglength{ \pgfdecoratedinputsegmentlength / int(\pgfdecoratedinputsegmentlength/\pgfdecorationsegmentlength)} \setlength{\pgfdecorationsegmentlength}{\matchinglength pt} }] {} \state{sine}[width=\pgfdecorationsegmentlength]{ \pgfpathsine{\pgfpoint{0.25\pgfdecorationsegmentlength}{0.5\pgfdecorationsegmentamplitude}} \pgfpathcosine{\pgfpoint{0.25\pgfdecorationsegmentlength}{-0.5\pgfdecorationsegmentamplitude}} \pgfpathsine{\pgfpoint{0.25\pgfdecorationsegmentlength}{-0.5\pgfdecorationsegmentamplitude}} \pgfpathcosine{\pgfpoint{0.25\pgfdecorationsegmentlength}{0.5\pgfdecorationsegmentamplitude}} } \state{final}{} } \makeatletter \makeatletter \DeclareRobustCommand{\cev}[1]{ {\mathpalette\do@cev{#1}}} \newcommand{\do@cev}[2]{ \vbox{\offinterlineskip \sbox\z@{$\m@th#1 x$} \ialign{##\cr \hidewidth\reflectbox{$\m@th#1\vec{}\mkern4mu$}\hidewidth\cr \noalign{\kern-\ht\z@} $\m@th#1#2$\cr } }} \makeatother \frenchspacing \def\ov#1{\overline{#1}} \def\wh#1{\widehat{#1}} \def\wt#1{\widetilde{#1}} \newcommand{\whd}[1]{\stackon[-6.5pt]{$\dot {#1}$}{$\widehat{}$}} \def\mod{\mathrm{mod}} \DeclareMathSymbol{\shortminus}{\mathbin}{AMSa}{"39} \newcommand{\ab}{\allowbreak} \newcommand{\eqdef}{\coloneqq} \renewcommand{\AA}{\mathbb{A}} \newcommand{\bAA}{\mathbf{A}} \newcommand{\DD}{\mathbb{D}} \newcommand{\EE}{\mathbb{E}} \newcommand{\ZZ}{\mathbb{Z}} \newcommand{\NN}{\mathbb{N}} \newcommand{\QQ}{\mathbb{Q}} \newcommand{\RR}{\mathbb{R}} \newcommand{\PP}{\mathbb{P}} \newcommand{\CCC}{\mathbb{C}} \newcommand{\MM}{\mathbb{M}} \newcommand{\CA}{\mathcal{A}} \newcommand{\CD}{\mathcal{D}} \newcommand{\CH}{\mathcal{H}} \newcommand{\CF}{\mathcal{F}} \newcommand{\CN}{\mathcal{N}} \newcommand{\bh}{\mathbf{h}} \DeclareMathOperator{\crk}{\mathbf{crk}} \DeclareMathOperator{\specc}{{\mathbf{specc}}} \newcommand{\sgn}{\mathrm{sgn}} \newcommand{\Dyn}{\mathrm{Dyn}} \newcommand{\Gl}{\mathrm{Gl}} \newcommand{\Ad}{\mathrm{Ad}} \DeclareMathOperator{\Ker}{Ker} \providecommand{\tikzsetnextfilename}[1]{}\providecommand{\tikzexternalenable}{}\providecommand{\tikzexternaldisable}{}\newcommand{\mppms}{\scalebox{0.75}[1]{$\scriptstyle -$}} \newcommand{\mppps}{\raisebox{.5pt}{\scalebox{0.75}{$\scriptstyle +$}}} \newcommand{\mppmss}{\scalebox{0.75}[1]{$\scriptscriptstyle -$}} \newcommand{\mpppss}{\raisebox{.375pt}{\scalebox{0.75}{$\scriptscriptstyle +$}}} \newcommand{\grafcrkzA}[2]{\tikzsetnextfilename{grafcrkzA_#1_#2} \begin{tikzpicture}[baseline=(current bounding box),label distance=-2pt, xscale=#1, yscale=#2] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:{\mscript{1}}] (n3) at (2, 0.5) {}; \node (n4) at (3, 0.5) {{\mscript{}}}; \node (n5) at (4, 0.5) {{\mscript{}}}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:{\mscript{p-1}}] (n6) at (5, 0.5) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:{\mscript{p}}] (n7) at (6, 0.5) {}; \node[inner sep=0pt, minimum size=3.5pt, label=above:{\mscript{r+1}}] (n8) at (7, 0.5) {$*$}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:{\mscript{p+1}}] (n11) at (2.5 , 0) {}; \node (n12) at (3.5 , 0) {{\mscript{}}}; \node (n13) at (4.5 , 0) {{\mscript{}}}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:{\mscript{r-1}}] (n14) at (5.5 , 0) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:{\mscript{r}}] (n15) at (6.5 , 0) {}; \foreach \x/\y in {6/7, 7/8, 14/15} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {4/5, 12/13} \draw [dotted, -, shorten <= -2.50pt, shorten >= -2.50pt] (n\x) to (n\y); \foreach \x/\y in {3/4, 11/12} \draw [-stealth, shorten <= 2.50pt, shorten >= -2.50pt] (n\x) to (n\y); \foreach \x/\y in {5/6, 13/14} \draw [-stealth, shorten <= -2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-stealth, shorten <= 1.5pt, shorten >= 0.50pt] (n15) to (n8); \end{tikzpicture} } \newcommand{\grafcrkzDi}[2]{\tikzsetnextfilename{grafcrkzDi_#1_#2} \begin{tikzpicture}[baseline=(current bounding box),label distance=-2pt, xscale=#1, yscale=#2] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:{\mscript{2}}] (n4) at (2, 0.5) {}; \node (n5) at (3, 0.5) {{\mscript{}}}; \node (n6) at (4, 0.5) {{\mscript{}}}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:{\mscript{r-1}}] (n7) at (5, 0.5) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:{\mscript{\phantom{1-}r\phantom{-1}}}] (n8) at (6, 0.5) {}; \node[inner sep=0pt, minimum size=3.5pt, label=above:{\mscript{r+1}}] (n9) at (7, 0.5) {$*$}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:{\mscript{1}}] (n1) at (5.5 , 0) {}; \foreach \x/\y in { 6/7, 7/8, 8/9} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [dotted, -, shorten <= -2.50pt, shorten >= -2.50pt] (n5) to (n6); \draw [-stealth, shorten <= 2.50pt, shorten >= -2.50pt] (n4) to (n5); \draw [-stealth, shorten <= -2.50pt, shorten >= 2.50pt] (n6) to (n7); \draw [-stealth, shorten <= 1.5pt, shorten >= 2.00pt] (n1) to (n8); \end{tikzpicture} } \newcommand{\grafcrkzDii}[2]{\tikzsetnextfilename{grafcrkzDii_#1_#2} \begin{tikzpicture}[baseline=(current bounding box),label distance=-2pt, xscale=#1, yscale=#2] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:{\mscript{1}}] (n1) at (2, 0.5) {}; \node (n4) at (3, 0.5) {{\mscript{}}}; \node (n5) at (4, 0.5) {{\mscript{}}}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:{\mscript{p-1}}] (n6) at (5, 0.5) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:{\mscript{p}}] (n7) at (6, 0.5) {}; \node[inner sep=0pt, minimum size=3.5pt, label=above:{\mscript{r+1}}] (n8) at (7, 0.5) {$*$}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:{\mscript{p+1}}] (n11) at (2.5 , 0) {}; \node (n12) at (3.5 , 0) {{\mscript{}}}; \node (n13) at (4.5 , 0) {{\mscript{}}}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:{\mscript{r-1}}] (n14) at (5.5 , 0) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:{\mscript{r}}] (n15) at (6.5 , 0) {}; \foreach \x/\y in {6/7, 7/8, 14/15} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {4/5, 12/13} \draw [dotted, -, shorten <= -2.50pt, shorten >= -2.50pt] (n\x) to (n\y); \foreach \x/\y in {1/4, 11/12} \draw [-stealth, shorten <= 2.50pt, shorten >= -2.50pt] (n\x) to (n\y); \foreach \x/\y in {5/6, 13/14} \draw [-stealth, shorten <= -2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-stealth, shorten <= 1.5pt, shorten >= 0.50pt] (n15) to (n8); \draw [-stealth,shorten <= 2.50pt,shorten >= 2.50pt] ([yshift=-2.5pt]n1.east) to ([yshift=3.5pt]n15.west); \end{tikzpicture} } \newcommand{\grafcrkzDiii}[2]{\tikzsetnextfilename{grafcrkzDiii_#1_#2} \begin{tikzpicture}[baseline=(current bounding box),label distance=-2pt, xscale=#1, yscale=#2] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:{\mscript{s\phantom{1}}}] (n1) at (2, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label={[xshift=-0.6ex]left:{\mscript{s\mppps 1}}}] (n2) at (3, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:{\mscript{s\mppps 2}}] (n3) at (3, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label={[xshift=0.6ex]above:{\mscript{s\mppps 3}}}] (n4) at (4, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle 1$] (n5) at (0, 0.50) {}; \node (n6) at (1, 0.50) {}; \node (n7) at (5, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:{\mscript{\phantom{1}p\phantom{1}}}] (n8) at (6, 0.50) {}; \node[inner sep=0pt, minimum size=3.5pt, label=above:{\mscript{r+1}}] (n9) at (7, 0.50) {$*$}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:{\mscript{\phantom{1}r\phantom{1}}}] (n10) at (6.50, 0 ) {}; \node (n11) at (5.50, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:{\mscript{p\mppps 1}}] (n12) at (4.50, 0 ) {}; \foreach \x/\y in {6/1, 11/10} \draw [-, densely dotted, gray!90, -, shorten <= -2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, densely dotted, gray!90, -, shorten <= 2.50pt, shorten >= -2.50pt] (n4) to (n7); \foreach \x/\y in {1/2, 1/3, 2/4, 3/4} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {5/6, 12/11} \draw [-stealth, shorten <= 2.50pt, shorten >= -2.50pt] (n\x) to (n\y); \draw [-stealth, shorten <= -2.50pt, shorten >= 2.50pt] (n7) to (n8); \draw [-stealth, shorten <= 1.50pt, shorten >= 1.00pt] (n10) to (n9); \draw [-stealth, shorten <= 1.50pt, shorten >= 1.00pt] (n8) to (n9); \end{tikzpicture} } \newcommand{\ringv}[6]{\begin{adjustbox}{margin=0.5ex}\tikzsetnextfilename{ringv_#1_#2_#3_#4_#5_#6}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.95, yscale=0.95] \foreach \n/\x/\y in {1/0.95106/0.30902, 2/0/1.0000, 3/-0.95106/0.30902, 4/-0.58779/-0.80902, 5/0.58779/-0.80902} \node[draw, fill=green!30, inner sep=0pt, minimum size=3.5pt] (n\n) at (\x, \y) {}; \draw[gray,dashed,line width=0.7] (0,0) circle [radius=0.80902]; \ifthenelse{#2=0}{ \draw[-{Stealth[scale=1.5,open,fill=white]}, shorten <= .50pt, shorten >= .50pt] (n1) to (n2);\node[circle, draw, fill=white, inner sep=0pt, minimum size=5.5pt,line width=0.7pt] at (0.47553, 0.65451) {}; }{ \draw [{Stealth[scale=1.5]}-, shorten <= .50pt, shorten >= .50pt] (n1) to (n2);\node[circle, fill, inner sep=0pt, minimum size=5.5pt] at (0.47553, 0.65451) {}; } \ifthenelse{#3=0}{ \draw[-{Stealth[scale=1.5,open,fill=white]}, shorten <= .50pt, shorten >= .50pt] (n2) to (n3);\node[circle, draw, fill=white, inner sep=0pt, minimum size=5.5pt,line width=0.7pt] at (-0.47553, 0.65451) {}; }{ \draw [{Stealth[scale=1.5]}-, shorten <= .50pt, shorten >= .50pt] (n2) to (n3);\node[circle, fill, inner sep=0pt, minimum size=5.5pt] at (-0.47553, 0.65451) {}; } \ifthenelse{#4=0}{ \draw[-{Stealth[scale=1.5,open,fill=white]}, shorten <= .50pt, shorten >= .50pt] (n3) to (n4);\node[circle, draw, fill=white, inner sep=0pt, minimum size=5.5pt,line width=0.7pt] at (-0.76942, -0.25000) {}; }{ \draw [{Stealth[scale=1.5]}-, shorten <= .50pt, shorten >= .50pt] (n3) to (n4);\node[circle, fill, inner sep=0pt, minimum size=5.5pt] at (-0.76942, -0.25000) {}; } \ifthenelse{#5=0}{ \draw[-{Stealth[scale=1.5,open,fill=white]}, shorten <= .50pt, shorten >= .50pt] (n4) to (n5);\node[circle, draw, fill=white, inner sep=0pt, minimum size=5.5pt,line width=0.7pt] at (0, -0.80902) {}; }{ \draw [{Stealth[scale=1.5]}-, shorten <= .50pt, shorten >= .50pt] (n4) to (n5);\node[circle, fill, inner sep=0pt, minimum size=5.5pt] at (0, -0.80902) {}; } \ifthenelse{#6=0}{ \draw[-{Stealth[scale=1.5,open,fill=white]}, shorten <= .50pt, shorten >= .50pt] (n5) to (n1);\node[circle, draw, fill=white, inner sep=0pt, minimum size=5.5pt,line width=0.7pt] at (0.76942, -0.25000) {}; }{ \draw [{Stealth[scale=1.5]}-, shorten <= .50pt, shorten >= .50pt] (n5) to (n1);\node[circle, fill, inner sep=0pt, minimum size=5.5pt] at (0.76942, -0.25000) {}; } \end{tikzpicture}\end{adjustbox}}\newcommand{\ringvi}[7] {\begin{adjustbox}{margin=0.5ex}\tikzsetnextfilename{ringvi_#1_#2_#3_#4_#5_#6_#7}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)}, label distance=-2pt, xscale=0.95, yscale=0.95] \foreach \n/\x/\y in {1/1.0000/0, 2/0.50000/0.86602, 3/-0.50000/0.86602, 4/-1.0000/0, 5/-0.50000/-0.86602, 6/0.50000/-0.86602} \node[draw, fill=green!30, inner sep=0pt, minimum size=3.5pt] (n\n) at (\x, \y) {}; \draw[gray,dashed,line width=0.7] (0,0) circle [radius=0.86602]; \ifthenelse{#2=0}{ \draw[-{Stealth[scale=1.5,open,fill=white]}, shorten <= .50pt, shorten >= .50pt] (n1) to (n2);\node[circle, draw, fill=white, inner sep=0pt, minimum size=5.5pt,line width=0.7pt] at (0.75000, 0.43301) {}; }{ \draw [{Stealth[scale=1.5]}-, shorten <= .50pt, shorten >= .50pt] (n1) to (n2);\node[circle, fill, inner sep=0pt, minimum size=5.5pt] at (0.75000, 0.43301) {}; } \ifthenelse{#3=0}{ \draw[-{Stealth[scale=1.5,open,fill=white]}, shorten <= .50pt, shorten >= .50pt] (n2) to (n3);\node[circle, draw, fill=white, inner sep=0pt, minimum size=5.5pt,line width=0.7pt] at (0, 0.86602) {}; }{ \draw [{Stealth[scale=1.5]}-, shorten <= .50pt, shorten >= .50pt] (n2) to (n3);\node[circle, fill, inner sep=0pt, minimum size=5.5pt] at (0, 0.86602) {}; } \ifthenelse{#4=0}{ \draw[-{Stealth[scale=1.5,open,fill=white]}, shorten <= .50pt, shorten >= .50pt] (n3) to (n4);\node[circle, draw, fill=white, inner sep=0pt, minimum size=5.5pt,line width=0.7pt] at (-0.75000, 0.43301) {}; }{ \draw [{Stealth[scale=1.5]}-, shorten <= .50pt, shorten >= .50pt] (n3) to (n4);\node[circle, fill, inner sep=0pt, minimum size=5.5pt] at (-0.75000, 0.43301) {}; } \ifthenelse{#5=0}{ \draw[-{Stealth[scale=1.5,open,fill=white]}, shorten <= .50pt, shorten >= .50pt] (n4) to (n5);\node[circle, draw, fill=white, inner sep=0pt, minimum size=5.5pt,line width=0.7pt] at (-0.75000, -0.43301) {}; }{ \draw [{Stealth[scale=1.5]}-, shorten <= .50pt, shorten >= .50pt] (n4) to (n5);\node[circle, fill, inner sep=0pt, minimum size=5.5pt] at (-0.75000, -0.43301) {}; } \ifthenelse{#6=0}{ \draw[-{Stealth[scale=1.5,open,fill=white]}, shorten <= .50pt, shorten >= .50pt] (n5) to (n6);\node[circle, draw, fill=white, inner sep=0pt, minimum size=5.5pt,line width=0.7pt] at (0.000, -0.86602) {}; }{ \draw [{Stealth[scale=1.5]}-, shorten <= .50pt, shorten >= .50pt] (n5) to (n6);\node[circle, fill, inner sep=0pt, minimum size=5.5pt] at (0.000, -0.86602) {}; } \ifthenelse{#7=0}{ \draw[-{Stealth[scale=1.5,open,fill=white]}, shorten <= .50pt, shorten >= .50pt] (n6) to (n1);\node[circle, draw, fill=white, inner sep=0pt, minimum size=5.5pt,line width=0.7pt] at (0.75000, -0.43301) {}; }{ \draw [{Stealth[scale=1.5]}-, shorten <= .50pt, shorten >= .50pt] (n6) to (n1);\node[circle, fill, inner sep=0pt, minimum size=5.5pt] at (0.75000, -0.43301) {}; } \end{tikzpicture}\end{adjustbox}}\newcommand{\mscript}[1]{{$\scriptscriptstyle #1$}} \newcommand{\grapheAn}[2]{\tikzexternaldisable\begin{tikzpicture}[label distance=-2pt, xscale=#1, yscale=#2] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{1}] (n1) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{2}] (n2) at (1, 0 ) {}; \node (n3) at (2, 0 ) {\mscript{}}; \node (n4) at (3, 0 ) {\mscript{}}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{n-1}] (n5) at (4, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{n}] (n6) at (5, 0 ) {}; \draw [-, shorten <= -2.50pt, shorten >= 2.50pt] (n4) to (n5); \draw [dotted, -, shorten <= -2.50pt, shorten >= -2.50pt] (n3) to (n4); \foreach \x/\y in {1/2, 5/6} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= -2.50pt] (n2) to (n3); \end{tikzpicture}\tikzexternalenable} \newcommand{\grapheDn}[2]{\tikzexternaldisable\begin{tikzpicture}[label distance=-2pt, xscale=#1, yscale=#2] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{1}] (n1) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:\mscript{2}] (n2) at (1, 0.6) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above right:\mscript{3}] (n3) at (1, 0 ) {}; \node (n4) at (2, 0 ) {\mscript{}}; \node (n5) at (3, 0 ) {\mscript{}}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{n-1}] (n6) at (4, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{n}] (n7) at (5, 0 ) {}; \draw [-, shorten <= -2.50pt, shorten >= 2.50pt] (n5) to (n6); \draw [dotted, -, shorten <= -2.50pt, shorten >= -2.50pt] (n4) to (n5); \foreach \x/\y in {1/3, 2/3, 6/7} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= -2.50pt] (n3) to (n4); \end{tikzpicture}\tikzexternalenable} \newcommand{\grapheEsix}[2]{\tikzexternaldisable\begin{tikzpicture}[label distance=-2pt, xscale=#1, yscale=#2] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{1}] (n1) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{2}] (n2) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above left:\mscript{3}] (n3) at (2, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:\mscript{4}] (n4) at (2, 0.6 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{5}] (n5) at (3, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{6}] (n6) at (4, 0 ) {}; \foreach \x/\y in {1/2, 2/3, 3/5, 4/3, 5/6} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}\tikzexternalenable} \newcommand{\grapheEseven}[2]{\tikzexternaldisable\begin{tikzpicture}[label distance=-2pt, xscale=#1, yscale=#2] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{1}] (n1) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{2}] (n2) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{3}] (n3) at (2, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above right:\mscript{4}] (n4) at (3, 0) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:\mscript{5}] (n5) at (3, 0.6 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{6}] (n6) at (4, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{7}] (n7) at (5, 0 ) {}; \foreach \x/\y in {1/2, 2/3, 3/4, 4/5, 4/6, 6/7} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}\tikzexternalenable} \newcommand{\grapheEeight}[2]{\tikzexternaldisable\begin{tikzpicture}[label distance=-2pt, xscale=#1, yscale=#2] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{1}] (n1) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{2}] (n2) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above right:\mscript{3}] (n3) at (2, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:\mscript{4}] (n4) at (2, 0.6) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{5}] (n5) at (3, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{6}] (n6) at (4, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{7}] (n7) at (5, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{8}] (n8) at (6, 0 ) {}; \foreach \x/\y in {1/2, 2/3, 3/5, 4/3, 5/6, 6/7, 7/8} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}\tikzexternalenable} \newcommand{\grafPosDfiveCrkII}[2]{\tikzsetnextfilename{grafPosDfiveCrkII_#1_#2} \begin{tikzpicture}[baseline=(current bounding box),label distance=-2pt, xscale=#1, yscale=#2] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 1$] (n1) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 2$] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 3$] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 4$] (n4) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 5$] (n5) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 6$] (n6) at (2, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 7$] (n7) at (3, 0 ) {}; \node (n8) at (4, 0 ) {$\scriptscriptstyle \phantom{8}$}; \foreach \x/\y in {1/6, 3/6} \draw [dashed, -, shorten <= 1.50pt, shorten >= 1.50pt] (n\x) to (n\y); \foreach \x/\y in {1/2, 1/4, 2/5, 4/5, 5/6, 6/7} \draw [-, shorten <= 1.50pt, shorten >= 1.50pt] (n\x) to (n\y); \end{tikzpicture} } \newcommand{\grafPosDsixCrkII}[2]{\tikzsetnextfilename{grafPosDsixCrkII_#1_#2}\tikzsetnextfilename{grafPosDsixCrkII} \begin{tikzpicture}[baseline=(current bounding box),label distance=-2pt, xscale=#1, yscale=#2] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 1$] (n1) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 2$] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 3$] (n3) at (3, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 4$] (n4) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 5$] (n5) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 6$] (n6) at (2, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 7$] (n7) at (3, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 8$] (n8) at (4, 0 ) {}; \foreach \x/\y in {1/7, 3/7} \draw [dashed, -, shorten <= 1.50pt, shorten >= 1.50pt] (n\x) to (n\y); \foreach \x/\y in {2/5, 1/5, 4/5, 5/6, 6/7, 7/8} \draw [-, shorten <= 1.50pt, shorten >= 1.50pt] (n\x) to (n\y); \end{tikzpicture} } \newcommand{\grapheRAn}[2]{\tikzexternaldisable\begin{tikzpicture}[label distance=-2pt, xscale=#1, yscale=#2] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{1}] (n1) at (0 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above right:\mscript{2}] (n2) at (1 , 0 ) {}; \node (n3) at (2 , 0 ) {\mscript{}}; \node (n4) at (3 , 0 ) {\mscript{}}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{n+1}] (n5) at (2.5, 0.6) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above left:\mscript{n-1}] (n6) at (4 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{n}] (n7) at (5 , 0 ) {}; \foreach \x/\y in {1/5, 5/7} \draw [bend left=7.0, -, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= -2.50pt, shorten >= 2.50pt] (n4) to (n6); \draw [dotted, -, shorten <= -2.50pt, shorten >= -2.50pt] (n3) to (n4); \foreach \x/\y in {1/2, 6/7} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= -2.50pt] (n2) to (n3); \end{tikzpicture}\tikzexternalenable} \newcommand{\grapheRDn}[2]{\tikzexternaldisable\begin{tikzpicture}[label distance=-2pt, xscale=#1, yscale=#2] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{1}] (n1) at (0 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:\mscript{2}] (n2) at (1 , 0.6) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above left:\mscript{3}] (n3) at (1 , 0 ) {}; \node (n4) at (2 , 0 ) {\mscript{}}; \node (n5) at (3 , 0 ) {\mscript{}}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:\mscript{n+1}] (n6) at (4 , 0.6) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above left:\mscript{n-1}] (n7) at (4 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{n}] (n8) at (5 , 0 ) {}; \draw [-, shorten <= -2.50pt, shorten >= 2.50pt] (n5) to (n7); \draw [dotted, -, shorten <= -2.50pt, shorten >= -2.50pt] (n4) to (n5); \foreach \x/\y in {1/3, 2/3, 6/7, 7/8} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= -2.50pt] (n3) to (n4); \end{tikzpicture}\tikzexternalenable} \newcommand{\grapheREsix}[2]{\tikzexternaldisable\begin{tikzpicture}[label distance=-2pt, xscale=#1, yscale=#2] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{1}] (n1) at (0 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{2}] (n2) at (1 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above right:\mscript{3}] (n3) at (2 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:\mscript{4}] (n4) at (2 , 0.6) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:\mscript{5}] (n5) at (2 , 1.2) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{6}] (n6) at (3 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{7}] (n7) at (4 , 0 ) {}; \foreach \x/\y in {1/2, 2/3, 3/6, 4/3, 5/4, 6/7} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}\tikzexternalenable} \newcommand{\grapheREseven}[2]{\tikzexternaldisable\begin{tikzpicture}[label distance=-2pt, xscale=#1, yscale=#2] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{1}] (n1) at (0 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{2}] (n2) at (1 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above right:\mscript{3}] (n3) at (2 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above right:\mscript{4}] (n4) at (3 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:\mscript{5}] (n5) at (3 , 0.6) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{6}] (n6) at (4 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{7}] (n7) at (5 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{8}] (n8) at (6 , 0 ) {}; \foreach \x/\y in {1/2, 2/3, 3/4, 4/6, 5/4, 6/7, 7/8} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}\tikzexternalenable} \newcommand{\grapheREeight}[2]{\tikzexternaldisable\begin{tikzpicture}[label distance=-2pt, xscale=#1, yscale=#2] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{1}] (n1) at (0 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{2}] (n2) at (1 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above right:\mscript{3}] (n3) at (2 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:\mscript{4}] (n4) at (2 , 0.6) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{5}] (n5) at (3 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{6}] (n6) at (4 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{7}] (n7) at (5 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{8}] (n8) at (6 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:\mscript{9}] (n9) at (7 , 0 ) {}; \foreach \x/\y in {1/2, 2/3, 3/5, 4/3, 5/6, 6/7, 7/8, 8/9} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}\tikzexternalenable} \makeatletter \let\c@figure\c@equation \let\c@table\c@equation \let\c@algorithm\c@equation \let\ftype@table\ftype@figure \makeatother \newtheorem{theorem}[equation]{Theorem} \newtheorem{lemma}[equation]{Lemma} \newtheorem{corollary}[equation]{Corollary} \newtheorem{proposition}[equation]{Proposition} \newtheorem{conjecture}[equation]{Conjecture} \newtheorem{open}[equation]{Open problem} \newtheorem{fact}[equation]{Fact} \theoremstyle{definition} \newtheorem{definition}[equation]{Definition} \newtheorem{problem}[equation]{Problem} \newtheorem{remark}[equation]{Remark} \newtheorem{example}[equation]{Example} \numberwithin{equation}{section} \numberwithin{table}{section} \numberwithin{figure}{section} \title{Structure of non-negative posets of Dynkin type $\AA_n$} \author{Marcin G\k{a}siorek\\ \small Faculty of Mathematics and Computer Science\\[-0.8ex] \small Nicolaus Copernicus University\\[-0.8ex] \small ul. Chopina 12/18, 87-100 Toru\'n, Poland\\ \small\tt [email protected]} \begin{document} \maketitle \begin{abstract} A poset $I=(\{1,\ldots, n\}, \leq_I)$ is called \textit{non-negative} if the symmetric Gram matrix $G_I:=\frac{1}{2}(C_I + C_I^{tr})\in\MM_n(\RR)$ is positive semi-definite, where $C_I\in\MM_n(\ZZ)$ is the $(0,1)$-matrix encoding the relation $\leq_I$. Every such a connected poset $I$, up to the $\ZZ$-congruence of the $G_I$ matrix, is determined by a unique simply-laced Dynkin diagram $\Dyn_I\in\{\AA_m, \DD_m,\EE_6,\EE_7,\EE_8\}$. We show that $\Dyn_I=\AA_m$ implies that the matrix $G_I$ is of rank $n$ or $n-1$. Moreover, we depict explicit shapes of Hasse digraphs $\CH(I)$ of all such posets~$I$ and devise formulae for their number.\medskip \noindent\textbf{Mathematics Subject Classifications:} 05C50, 06A07, 06A11, 15A63, 05C30 \end{abstract} \section{Introduction}\label{sec:intro} By a finite partially ordered set (\textit{poset}) $I$ of \textit{size} $n$ we mean a pair $I=(V, \leq_I)$, where $V\eqdef \{1,\ldots, n\}$ and \(\leq_I\,\subseteq V\times V\) is a reflexive, antisymmetric and transitive binary relation. Every poset $I$ is uniquely determined by its \textit{incidence matrix} \begin{equation}\label{df:incmat} C_{I} = [c_{ij}] \in\MM_{n}(\ZZ),\textnormal{ where } c_{ij} = 1 \textnormal{ if } i \leq_I j\textnormal{ and } c_{ij} = 0\textnormal{ otherwise}, \end{equation} i.e., a square $(0,1)$-matrix that encodes the relation \(\leq_I\). It is known that various mathematical classification problems can be solved by a reduction to the classification of indecomposable $K$-linear representations ($K$~is a field) of finite digraphs or matrix representations of finite posets, see~\cite{Si92}. Inspired by these results, here we study posets that are non-negative in the following sense. A poset $I$\ is defined to be \textit{non-negative} of \textit{rank $m$} if its \textit{symmetric Gram matrix} $G_I\eqdef\tfrac{1}{2}(C_I+C_I^{tr})\in\MM_n(\RR)$ is positive semi-definite of rank~$m$. Non-negative posets are classified by means of signed simple graphs as follows. One associates with a poset $I=(V, \leq_I)$\ the signed graph $\Delta_I=(V,E,\sgn)$ with the set of edges $E=\{\{i,j\};\ i<_I j \textnormal{ or } j <_I i\}$ and the sign function $\sgn(e)\eqdef1$ for every edge (i.e., signed graph with \textit{positive} edges only), see~\cite{SimZaj_intmms} and \Cref{rmk:graphbigraph}. In particular, $I$ is called connected, if $\Delta_I$ is connected. We note that $\Delta_I$ is uniquely determined by its adjacency matrix $\Ad_{\Delta_I}\eqdef 2(G_I-\mathrm{id}_n)$, where $\mathrm{id}_n\in\MM_n(\ZZ)$ is an identity matrix. Analogously as in the case of posets, a signed graph $\Delta$ is defined to be \textit{non-negative} of rank $m$ if its \textit{symmetric Gram matrix} $G_\Delta\eqdef \frac{1}{2}\Ad_\Delta + \mathrm{id}_n$ is positive semi-definite of rank $m$. Following \cite{simsonCoxeterGramClassificationPositive2013}, we call two signed graphs $\Delta_1$ and $\Delta_2$ \textit{weakly Gram $\ZZ$-congruent} if $G_{\Delta_1}$ and $G_{\Delta_2}$ are \textit{$\ZZ$-congruent}, i.e., $G_{\Delta_2}=B^{tr}G_{\Delta_1}B$ for some $B\in\Gl_n(\ZZ)\eqdef\{A\in\MM_n(\ZZ);\,\det A=\pm 1\}$. It is easy to check that this relation preserves definiteness and rank. We recall from \cite{simsonSymbolicAlgorithmsComputing2016} and~\cite{zajacStructureLoopfreeNonnegative2019} that every connected non-negative signed simple graph $\Delta$ of rank $m=n-r$ is weakly Gram $\ZZ$-congruent with the canonical $r$-vertex extension of simply laced Dynkin diagram $\Dyn_\Delta \in \{\AA_m,\ab \DD_m,\ab \EE_6,\ab \EE_7,\ab \EE_8\}$, called the \textit{Dynkin type} of~$\Delta$. In particular, every \textit{positive} (i.e.,~of rank~$n$) connected $\Delta$ is weakly Gram $\ZZ$-congruent with a unique simply-laced Dynkin diagram $\Dyn_\Delta$ of Table \ref{tbl:Dynkin_diagrams}. \begin{longtable}{@{}r@{\,}l@{\,}l@{\quad}r@{\,}l@{}} $\AA_n\colon$ & \grapheAn{0.80}{1} & $\scriptstyle (n\geq 1);$\\[0.2cm] $\DD_n\colon$ & \grapheDn{0.80}{1} & $\scriptstyle (n\geq 1);$ & $\EE_6\colon$ & \grapheEsix{0.80}{1}\\[0.2cm] $\EE_7\colon$ & \grapheEseven{0.80}{1} & & $\EE_8\colon$ & \grapheEeight{0.80}{1}\\[0.2cm] \caption{Simply-laced Dynkin diagrams}\label{tbl:Dynkin_diagrams} \end{longtable} \noindent Analogously, every \textit{principal} (i.e.,~of rank~$n-1$) connected bigraph $\Delta$ is weakly Gram $\ZZ$-congruent with $\widetilde{\mathrm{D}}\mathrm{yn}_\Delta \in \{\widetilde{\AA}_n,\ab \widetilde{\DD}_n,\ab \widetilde{\EE}_6,\ab \widetilde{\EE}_7,\ab \widetilde{\EE}_8\}$ diagram of Table \ref{tbl:Euklid_diag}, which is a one point extension of a diagram of \Cref{tbl:Dynkin_diagrams}. \begin{longtable}{@{}r@{$\colon$}l@{\ \ \ }r@{$\colon$}l@{}} $\widetilde{\AA}_n$ & \grapheRAn{0.80}{1}\ {$\scriptstyle (n\geq 1)$;}\vspace{-0.3cm} \\ $\widetilde{\DD}_n$ & \grapheRDn{0.80}{1}\ {$\scriptstyle (n\geq 4)$;} & $\widetilde{\EE}_6$ & \grapheREsix{0.80}{1} \\ $\widetilde{\EE}_7$ & \grapheREseven{0.80}{1} & $\widetilde{\EE}_8$ & \grapheREeight{0.80}{1}\\[0.2cm] \caption{Simply-laced Euclidean diagrams}\label{tbl:Euklid_diag} \end{longtable}\vspace*{-2ex} \begin{remark}\label{rmk:graphbigraph} We are using the following notations, see \cite{barotQuadraticFormsCombinatorics2019,simsonCoxeterGramClassificationPositive2013,simsonSymbolicAlgorithmsComputing2016,SimZaj_intmms}. \begin{enumerate}[label={\textnormal{(\alph*)}},wide] \item\label{rmk:graphbigraph:graphasbigraph} A simple graph $G=(V,E)$ is viewed as the signed graph $\Delta_G=(V,E,\sgn)$ with a sign function $\sgn(e)\eqdef-1$ for every $e\in E$, i.e., signed graph with \textit{negative} edges only. \item\label{rmk:graphbigraph:bigraphdraw} We denote \textit{positive} edges by dotted lines and \textit{negative} as full~ones, see~\cite{barotQuadraticFormsCombinatorics2019,simsonCoxeterGramClassificationPositive2013}. \end{enumerate} \end{remark} By setting $\Dyn_I\eqdef \Dyn_{\Delta_I} $ one associates a Dynkin diagram with an arbitrary connected non-negative poset~$I$. In the present work, we give a complete description of connected non-negative posets $I=(V,\leq_I)$ of Dynkin type $\Dyn_I=\AA_m$ in terms of their \textit{Hasse digraphs} $\CH(I)$, where $\CH(I)$ is the transitive reduction of the acyclic digraph $\CD(I)=(V, A_I)$, with $i\to j\in A_I$ iff $i<_I j$ (see also Definition~\ref{df:hassedigraph}). The main result of the manuscript is the following theorem that establishes the correspondence between combinatorial and algebraic properties of non-negative posets of Dynkin type $\AA_m$.\pagebreak \begin{theorem}\label{thm:a:main} Assume that $I$ is a connected poset of size $n$ and $\CH(I)$ is its Hasse digraph. \begin{enumerate}[label=\normalfont{(\alph*)}] \item\label{thm:a:main:posit} $I$ is non-negative of Dynkin type $\Dyn_I=\AA_n$ if and only if $\ov \CH(I)$ is a path graph. \item\label{thm:a:main:princ} $I$ is non-negative of Dynkin type $\Dyn_I=\AA_{n-1}$ if and only if $\ov\CH(I)$ is a cycle graph and $\CH(I)$ has at least two sinks. \item\label{thm:a:main:crkbiggeri} If $I$ is non-negative of Dynkin type $\Dyn_I=\AA_{m}$, then $m\in \{n,n-1\}$. \end{enumerate} \end{theorem} In particular, we confirm Conjecture 6.4 stated in~\cite{gasiorekAlgorithmicCoxeterSpectral2020} by showing that in the case of connected non-negative posets of Dynkin type $\AA_m$, there is a one-to-one correspondence between positive posets and connected digraphs whose underlying graph is a path. We give a similar description of principal posets: there is a one-to-one correspondence between such posets and connected digraphs with at least two sinks, whose underlying graph is a cycle. We show that this characterization is complete: there are no connected non-negative posets of rank $m<n-1$. Moreover, using the results of Theorem~\ref{thm:a:main}, we devise a formula for the number of all, up to isomorphism, connected non-negative posets of Dynkin type $\AA_m$. \begin{theorem}\label{thm:typeanum} Let $Nneg(n,\AA)$ be the number of all non-negative posets $I$ of size $n\geq1$ and Dynkin type $\Dyn_I=\AA_{m}$. Then \begin{equation}\label{thm:typeanum:eq} Nneg(n, \AA)= \frac{1}{2n} \sum_{d\mid n}\big(2^{\frac{n}{d}}\varphi(d)\big) + \big\lfloor 2^{n - 2} + 2^{\lceil\frac{n}{2}-2\rceil} - \tfrac{n+1}{2}\big\rfloor, \end{equation} where $\varphi$ is Euler's totient function. \end{theorem} \section{Preliminaries} Throughout, we mainly use the terminology and notation introduced in~\cite{gasiorekOnepeakPosetsPositive2012,gasiorekAlgorithmicStudyNonnegative2015,Si92,SimZaj_intmms} (in regard to posets), \cite{barotQuadraticFormsCombinatorics2019,simsonIncidenceCoalgebrasIntervally2009,simsonCoxeterGramClassificationPositive2013,simsonSymbolicAlgorithmsComputing2016} (quadratic forms), and~\cite{diestelGraphTheory2017} (graph theory). In particular, by $\NN\subseteq\ZZ\subseteq\RR$ we denote the set of non-negative integers, the ring of integers, and the real number fields, respectively. We use a row notation for vectors $v=[v_1,\ldots,v_n]$ and write $v^{tr}$ to denote column vectors. Two (directed) graphs $G=(V,E)$ and $G'=(V',E')$ are called \textit{isomorphic} $G\simeq G'$ if there exist a bijection $f\colon V\to V'$ that preserves (arcs) edges. By degree $\deg_G(v)$ of a vertex $v\in V$ we mean the number of edges incident with $v$. We call $G$ a \textit{path graph} if $|V|=1$ and $|E|=0$ or \mbox{$G\simeq\,P(u,v)\eqdef \, u\scriptstyle \bullet\,\rule[1.5pt]{22pt}{0.4pt}\,\bullet\,\rule[1.5pt]{22pt}{0.4pt}\,\,\hdashrule[1.5pt]{12pt}{0.4pt}{1pt}\,\rule[1.5pt]{22pt}{0.4pt}\,\bullet \displaystyle v$}, where $u\neq v$. We call $G$ a \textit{cycle}, if $|G|\geq 3$ and $G\simeq\!\!\!\!\smash{% \tikzsetnextfilename{cyclepic_graph}\begin{tikzpicture}[baseline=(n11.base),label distance=-2pt,xscale=0.65, yscale=0.74] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label={[name=n11]left:\phantom{$|$}}] (n1) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n2) at (1, 0 ) {}; \node (n3) at (2, 0 ) {$ $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n4) at (5, 0 ) {}; \node (n5) at (3, 0 ) {$ $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n6) at (4, 0 ) {}; \draw[shorten <= 2.50pt, shorten >= 2.50pt] (n1) .. controls (0.2,0.45) and (4.8,0.45) .. (n4); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -2.50pt, shorten >= -2.50pt] (n3) to (n5); \foreach \x/\y in {1/2, 6/4} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= -2.50pt] (n2) to (n3); \draw [-, shorten <= -2.50pt, shorten >= 2.50pt] (n5) to (n6); \end{tikzpicture}}$. A graph $G$ is \textit{connected} if $P(u, v)\subseteq G$ for every $u\neq v\in V$. By \textit{underlying graph} $\ov \CD$ of a digraph $\CD$ we mean a graph obtained from $\CD$ by forgetting the orientation of its arcs. A digraph $\CD$ is connected if $\ov \CD$ is connected. A connected graph $G$ is called a \textit{tree} if $G$ does not contain any cycle. A digraph $\CD$ is called \textit{acyclic} if it contains no induced subdigraph isomorphic to\!\!\!\! \smash{\tikzsetnextfilename{cyclepic_orient}\begin{tikzpicture}[baseline=(n11.base),label distance=-2pt,xscale=0.65, yscale=0.74] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label={[name=n11]left:\phantom{$|$}}] (n1) at (0 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n2) at (1 , 0 ) {}; \node (n3) at (2 , 0 ) {$ $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n4) at (5 , 0 ) {}; \node (n5) at (3 , 0 ) {$ $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n6) at (4 , 0 ) {}; \draw[<-,shorten <= 2.50pt, shorten >= 3.50pt] (n1) .. controls (0.8,0.4) and (4.,0.4) .. (n4); \draw [line width=1.2pt, shorten <= 2.50pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= .50pt, shorten >= -4.50pt] (n3) to (n5); \foreach \x/\y in {1/2, 6/4} \draw [->, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [->, shorten <= 2.50pt, shorten >= -2.50pt] (n2) to (n3); \draw [->, shorten <= -2.50pt, shorten >= 2.50pt] (n5) to (n6); \end{tikzpicture}}. We call a vertex $v$ of a digraph $\CD=(V, A)$ a \textit{source} (minimum) if it is not a target of any arc $\alpha\in A$. Analogously, we call $v\in \CD$ a \textit{sink} (maximum) if it is not a source of any arc.\smallskip Given two elements $x,y\in V$ of a poset $I=(V,\leq_I)$ we write: \begin{itemize} \item $x <_I y$ when $x\leq_I y$ and $x\neq y$; \item $x\lessdot_I y$ when $y$ covers $x$, i.e., $x<_I y$ and there is no such $z\in V,$ that $x<_I z<_I y$. \end{itemize} Moreover, by $N_I(x)\eqdef\{z\in I;\, x \lessdot_{I} z\}\cup \{z\in I;\, z \lessdot_{I} x\}$ we denote the set of elements that either cover $x$ or are covered by $x$ in $I$. \begin{definition}\label{df:hassedigraph} \textit{Hasse digraph} of a poset $I=(V,\leq_I)$ is a simple directed graph $\CH(I)=(V,A)$ with the set of arcs defined as follows: $x\to y\in A$ iff $x\lessdot_I y$. \end{definition} We call a poset $I$ \textit{connected} if the graph $\ov \CH(I)\eqdef \ov{\CH(I)}$ is connected (equivalently, when the signed graph $\Delta_I$ is connected), and we note that every minimal (maximal) element in $I$ corresponds to a source (sink) in the Hasse digraph $\CH(I)$. We say that $I$ is a \textit{one-peak poset} if $I$ has exactly one maximal element. Every finite acyclic digraph $\CD=(V,A)$ defines the poset $I_\CD\eqdef(V, {\leq_\CD)}$, where \mbox{$a \leq_\CD b$} if $a=b$ or there is an oriented path $\vec P(a,b)\eqdef\, a\scriptstyle \bullet \raisebox{-1.5pt}{\parbox{25pt}{\rightarrowfill}} \bullet \raisebox{-1.5pt}{\parbox{25pt}{\rightarrowfill}} \,\hdashrule[1.5pt]{12pt}{0.4pt}{1pt} \raisebox{-1.5pt}{\parbox{25pt}{\rightarrowfill}} \bullet \displaystyle b\subseteq \CD$. We note that $\CH(I_\CD)\neq \CD$ in general, see Example~\ref{ex:definitions}. By $I^{(k_1,\ldots,k_r)}\eqdef I\setminus \{k_1,\ldots,k_r\}$ we denote the induced subposet of $I$ whose set of elements equals $\{1,\ldots,n\}\setminus\{k_1,\ldots,k_r\}$.\smallskip Following~\cite{gasiorekOnepeakPosetsPositive2012,simsonCoxeterGramClassificationPositive2013,SimZaj_intmms}, we associate with a poset $I$ of size $n$: \begin{itemize} \item the unit quadratic form $q_I\colon\ZZ^n\to\ZZ$ defined by the formula \begin{equation}\label{eq:quadratic_form} q_I(x):=\sum_{\mathclap{i\in\{1,\ldots,n\}}}x_i^2 + \sum_{\mathclap{i\,<_I\, j}}x_i x_j = v\cdot G_I\cdot v^{tr}, \end{equation} \item and its kernel \begin{equation}\label{eq:kernel} \Ker q_I \eqdef \{v \in\ZZ^n;\ q_I(v)=0\}\subseteq\ZZ^n, \end{equation} \end{itemize} where $G_I\in\MM_n(\ZZ)$ is the symmetric Gram matrix of $I$. It is known that a poset $I$ is non-negative of rank $m$ if and only if the quadratic form $q_I$ is positive semi-definite (i.e., $q_I(v)\geq 0$ for every $v\in\ZZ^n$) and its kernel $\Ker q_I\subseteq \ZZ^n$ is a free abelian subgroup of rank $n-m$, see~\cite{simsonCoxeterGramClassificationPositive2013}. We call a non-negative poset $I$ \textit{positive} if $m=n$, \textit{principal} if $m=n-1$, and \textit{indefinite} if its symmetric Gram matrix $G_I$ is not positive/negative semidefinite.\smallskip \begin{remark}\label{rmk:indef} A poset $I$ is indefinite if and only if there exist such vectors $v,w\in\ZZ^n$ that $q_I(v)>0$ and $q_I(w)<0$. Since $q_I([1,0,\ldots,0])=1>0$ for every poset $I$, to show that given $I$ is indefinite, it suffices to show that $q_I(w)<0$ for some $w\in\ZZ^n$. \end{remark} \begin{example}\label{ex:definitions} To illustrate the definitions, we consider digraph $\CD=(\{1,2,3,4\}, \{{2\to 1},\ab 2\to 3, 2\to 4,\ab 1\to 3, 4\to 3\})$. We have $I_\CD=(\{1,2,3,4\}, \{2 \leq_\CD 1, 2 \leq_\CD 3, 2 \leq_\CD 4, 1 \leq_\CD 3, 4 \leq_\CD 3\})$, {\newcommand{\mnum}[1]{\vbox to 11pt {\vfil\hbox to 9pt{\hfill$\scriptstyle #1$}\vfil }}\begin{align*} \CD&=\tikzsetnextfilename{expic_1}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=0.66, yscale=0.6] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptscriptstyle 1$] (n1) at (1, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 2$] (n2) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 3$] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptscriptstyle 4$] (n4) at (1, 0 ) {}; \foreach \x/\y in {1/3, 2/1, 2/4, 4/3} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-stealth, shorten <= 2.00pt, shorten >= 2.10pt] (n2) to (n3); \end{tikzpicture}, & \CH(I_\CD)&=\tikzsetnextfilename{expic_2}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.66, yscale=0.6] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptscriptstyle 1$] (n1) at (1, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 2$] (n2) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 3$] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptscriptstyle 4$] (n4) at (1, 0 ) {}; \foreach \x/\y in {1/3, 2/1, 2/4, 4/3} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}, & \Delta_{I_\CD}&= \tikzsetnextfilename{expic_3}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.66, yscale=0.6] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptscriptstyle 1$] (n1) at (1, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 2$] (n2) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 3$] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptscriptstyle 4$] (n4) at (1, 0 ) {}; \foreach \x/\y in {1/3, 2/1, 2/3, 2/4, 4/3} \draw [-, densely dashed, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture},\\ G_{I_\CD}&=\begin{bsmallmatrix*}[r] \mnum{1} & \mnum{\frac{1}{2}} & \mnum{\frac{1}{2}} & \mnum{0}\\ \mnum{\frac{1}{2}} & \mnum{1} & \mnum{\frac{1}{2}} & \mnum{\frac{1}{2}}\\ \mnum{\frac{1}{2}} & \mnum{\frac{1}{2}} & \mnum{1} & \mnum{\frac{1}{2}}\\ \mnum{0} & \mnum{\frac{1}{2}} & \mnum{\frac{1}{2}} & \mnum{1} \end{bsmallmatrix*}=G_{\Delta_{I_\CD}}, & C_{I_\CD}&=\begin{bsmallmatrix*}[r] \mnum{1} & \mnum{0} & \mnum{1} & \mnum{0}\\ \mnum{1} & \mnum{1} & \mnum{1} & \mnum{1}\\ \mnum{0} & \mnum{0} & \mnum{1} & \mnum{0}\\ \mnum{0} & \mnum{0} & \mnum{1} & \mnum{1} \end{bsmallmatrix*}, & \Ad_{I_\CD}&=\begin{bsmallmatrix*}[r] \mnum{0} & \mnum{1} & \mnum{1} & \mnum{0}\\ \mnum{1} & \mnum{0} & \mnum{1} & \mnum{1}\\ \mnum{1} & \mnum{1} & \mnum{0} & \mnum{1}\\ \mnum{0} & \mnum{1} & \mnum{1} & \mnum{0} \end{bsmallmatrix*}, \end{align*}}where we denote the positive edges of a signed graph by dotted lines. Moreover: \begin{itemize} \item $2$ is minimal in $I_\CD$ and $3$ is maximal in $I_\CD$, equivalently: $2$ is a source in $\CH(I_\CD)$ and $3$ is a sink in $\CH(I_\CD)$, \item $q_{I_\CD}(x)= x_{1}^{2} + x_{2}^{2} + x_{3}^{2} + x_{4}^{2} + x_{1}x_{2} + x_{2}x_{3} + x_{3}x_{4} + x_{1}x_{3} + x_{2}x_{4}= \left(x_{1} \!+\! \tfrac{1}{2}x_{2} \!+\! \tfrac{1}{2}x_{3}\right)^{2} \!+\! \tfrac{3}{4}\! \left(\tfrac{1}{3}x_{2} \!+\! x_{3} \!+\! \frac{2}{3} x_{4}\right)^{2} \!+\! \tfrac{2}{3}\! \left(x_{2} \!+\! \tfrac{1}{2}x_{4}\right)^{2} \!+\! \tfrac{1}{2}x_{4}^{2}$, \item $\Ker q_{I_\CD}=\{0\}\subseteq\ZZ^4$ and poset $I_\CD$ is non-negative of rank $4$, i.e., positive. \end{itemize}\pagebreak Since $G_{\DD_4}=B^{tr}G_{\Delta_{I_\CD}}B$, where \begin{center} \newcommand{\mnum}[1]{\vbox to 11pt {\vfil\hbox to 13pt{\hfill$\scriptstyle #1$}\vfil }} $\DD_4=$ \tikzsetnextfilename{expic_4}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.66, yscale=0.6] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 1$] (n1) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptscriptstyle 2$] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above right:$\scriptscriptstyle 3$] (n3) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 4$] (n4) at (2, 0 ) {}; \foreach \x/\y in {1/3, 2/3, 3/4} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture},\quad $G_{\DD_4}=\begin{bsmallmatrix*}[r] \mnum{1} & \mnum{0} & \mnum{-\frac{1}{2}} & \mnum{0}\\ \mnum{0} & \mnum{1} & \mnum{-\frac{1}{2}} & \mnum{0}\\ \mnum{-\frac{1}{2}} & \mnum{-\frac{1}{2}} & \mnum{1} & \mnum{-\frac{1}{2}}\\ \mnum{0} & \mnum{0} & \mnum{-\frac{1}{2}} & \mnum{1} \end{bsmallmatrix*}=\frac{1}{2}\Ad_{\DD_4} + \mathrm{id}_n$ and $B=\begin{bsmallmatrix*}[r] \mnum{0} & \mnum{0} & \mnum{-1} & \mnum{0}\\ \mnum{0} & \mnum{0} & \mnum{-1} & \mnum{0}\\ \mnum{-1} & \mnum{-1} & \mnum{0} & \mnum{-1}\\ \mnum{0} & \mnum{0} & \mnum{-1} & \mnum{0} \end{bsmallmatrix*}$, \end{center} we conclude that $\Dyn_{I_\CD}=\DD_4$. To finish the example, we note that elements of the adjacency matrix $\Ad_{\DD_4}$ are negative because we view graph $\DD_4$ as a signed graph. \end{example} We recall from Section~\ref{sec:intro}, that the Dynkin type of a connected non-negative poset $I$ of size $n$ and rank $m$ is such a simply-laced Dynkin diagram $\Dyn_I\in\{\AA_m,\ab \DD_m,\ab \EE_6,\ab \EE_7,\ab \EE_8\},$ that the signed graph $\Delta_I$ is weakly Gram $\ZZ$-congruent with the canonical $r$-vertex extension of $\Dyn_I$, where $r=n-m$. Equivalently, Dynkin type can be defined without referring to canonical $r$-vertex extensions, see~\cite{gasiorekAlgorithmicStudyNonnegative2015,simsonSymbolicAlgorithmsComputing2016,zajacStructureLoopfreeNonnegative2019,zajacPolynomialTimeInflation2020} and \cite{barotQuadraticFormsCombinatorics2019}. Although such a definition is more technical, it yields better insight into the combinatorial structure of non-negative posets. First, we need the following fact. \begin{fact}\label{fact:specialzbasis} Assume that $I$ is a connected non-negative poset of size $n$ and rank $m$, $r=n-m$, and $q_I\colon\ZZ^n\to\ZZ$~\eqref{eq:quadratic_form} is the quadratic form associated with $I$. \begin{enumerate}[label=\normalfont{(\alph*)}] \item\label{fact:specialzbasis:existance} There exists such a basis $h^{k_1},\ldots, h^{k_r}$ of the free abelian group $\Ker q_I\subseteq \ZZ^n$, that $h^{k_i}_{k_i} = 1$ and $h^{k_i}_{k_j} = 0$ for $1 \leq i,j \leq r$ and $i \neq j$, where $1 \leq k_1 < \ldots < k_r \leq n$. \item\label{fact:specialzbasis:subbigraph} $I^{(a_1,\ldots,a_s)}$ is a connected and non-negative poset of size $n-s$ and rank $m$, for every $\{a_1,\ldots,a_s\}\subseteq \{k_1,\ldots,k_r\}$ and $1\leq s\leq r$. \end{enumerate} \end{fact} \begin{proof} Apply \cite[Lemma 2.7]{zajacPolynomialTimeInflation2020} and \cite[Theorem 2.1]{zajacStructureLoopfreeNonnegative2019} to the bigraph $\Delta_I$. \end{proof} The Dynkin type of a connected non-negative poset $I$ is defined to be a Dynkin diagram $\Dyn_I \in \{\AA_m,\DD_m,\EE_6,\EE_7,\EE_8\}$ that determines $I$ uniquely, up to a weak Gram $\ZZ$-congruence. \begin{definition}\label{df:Dynkin_type} Assume that $I$ is a connected non-negative poset of rank $m$ and size $n$. The Dynkin type $\Dyn_I \in \{\AA_m,\DD_m,\EE_6,\EE_7,\EE_8\}$ is the unique simply-laced Dynkin diagram, viewed as a signed graph, such that \[ \Delta_{I'} \sim_\ZZ \Dyn_I, \] where $I'\eqdef I$ if $m=n$ and $I'\eqdef I^{(k_1,\ldots,k_r)}$ (see \Cref{fact:specialzbasis}\ref{fact:specialzbasis:subbigraph}) otherwise.\smallskip \end{definition} We note that Dynkin type $\Dyn_I$ can be calculated using the inflation algorithm~\cite[Algorithm 3.1]{simsonCoxeterGramClassificationPositive2013} applied to the bigraph $\Delta_{I'}$. {\tikzexternaldisable\newcommand{\mxs}{0.9} \begin{longtable}{@{}r@{\,}l@{\,}l@{\quad}r@{\,}l@{}} ${}_{p}\AA^*_{r}\colon$ & \grafcrkzA{\mxs}{1} & & $\wh\DD^*_{p} \diamond \AA_{r-p}\colon$ & \grafcrkzDii{\mxs}{1}\\[.6cm] $\DD^*_{r}\colon$ & \grafcrkzDi{\mxs}{1} & & ${}_{s}\DD^*_{p} \diamond \AA_{r-p}\colon$ & \grafcrkzDiii{0.7}{0.8}\\[-.1cm] \caption{One-peak positive posets of Dynkin type $\AA_{r+1}$ and $\DD_{r+1}$}\label{tbl:onepeak_posit} \end{longtable}\tikzexternalenable} The aim of this manuscript is to give a full structural characterization of connected non-negative posets $I$ of Dynkin type $\Dyn_I=\AA_m$. We note that such a result is known in the case of one-peak positive and principal posets, see \cite[Theorem 5.2]{gasiorekOnepeakPosetsPositive2012} and \cite[Theorem 3.5]{gasiorekCoxeterTypeClassification2019}. In particular, we have the following. \begin{theorem}\label{thm:onepeak_posit} One-peak poset $I$\ of size $n$ is positive if and only if its Hasse digraph $\CH(I)$ is isomorphic~to: \begin{enumerate}[label=\normalfont{(\alph*)}] \item the digraph ${}_{p}\AA^*_{n-1}$ \textnormal{(}in this case the Dynkin type equals $\Dyn_I=\AA_{n}$\textnormal{);} \item one of the digraphs $\wh\DD^*_{p} \diamond \AA_{n-p-1}$, $\DD^*_{n-1}$ or ${}_{s}\DD^*_{p} \diamond \AA_{n-p-1}$ \textnormal{(}$\Dyn_I=\DD_{n}$\textnormal{);} \item one of the digraphs $\PP_1,\ldots,\PP_{16}$ \textnormal{(}$\Dyn_I=\EE_{6}$\textnormal{)}, $\PP_{17},\ldots,\PP_{72}$ \textnormal{(}$\Dyn_I=\EE_{7}$\textnormal{)}, $\PP_{73},\ldots,\PP_{193}$ \textnormal{(}$\Dyn_I=\EE_{8}$\textnormal{)} presented in \cite[Tables 6.1–6.3]{gasiorekOnepeakPosetsPositive2012}.\vspace*{-2ex} \end{enumerate} \end{theorem} \section{Hanging path in a Hasse digraph} In this section, we formalize a very useful observation that changing the orientation of arcs on the ``hanging path'' in the Hasse digraph $\CH(I)$ does not change the definiteness nor rank of a poset $I$. Inspired by the ideas of~\cite[Prop. 16.15]{Si92} and~\cite{gasiorekOnepeakPosetsPositive2012}, we introduce the following definition. \begin{definition} Let $I_p\subseteq I$ be a connected subposet of a poset $I$ and $p$ be a point of $I_p$. The subposet $I_p$ is called \textit{$p$-anchored path} if: \begin{enumerate}[label=\normalfont{(\alph*)}] \item for every $a\in I_p^{(p)}$ we have $N_I(a)\subseteq I_p$ and $|N_I(a)|\in\{1,2\}$, \item $|N_I(p)\cap I_p|=1$. \end{enumerate} \end{definition} The following picture illustrates the definition of a $p$-anchored path. \begin{center} $\ov\CH(I)=$ \tikzsetnextfilename{anchpth_1}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.48, yscale=0.4] \coordinate (n1) at (4, 3 ) {}; \coordinate (n3) at (4, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n11) at (3.4 , 2.5 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n2) at (3.4 , 1.9 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n31) at (3.4 , 0.4 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle p$] (n4) at (5.50, 1.50) {}; \coordinate (n5) at (0, 3 ) {}; \coordinate (n6) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n7) at (7, 1.50) {}; \node (n8) at (8.50, 1.50) {}; \node (n9) at (10, 1.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n10) at (11.50, 1.50) {}; \draw[decoration={complete sines,segment length=3mm, amplitude=1mm},decorate] (n1) -- (n5); \draw (n5) -- (n6); \draw (n1) -- (n3); \draw[decoration={complete sines,segment length=3mm, amplitude=1mm,mirror},decorate] (n3) -- (n6); \foreach \x/\y in {4/7, 7/8, 9/10} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {11/4, 2/4, 31/4} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, -, shorten <= 2.50pt, shorten >= 2.50pt] (n2) to (n31); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -2.50pt, shorten >= -2.50pt] (n8) to (n9); \draw [decorate,decoration={brace,amplitude=5pt,mirror,raise=3ex}] (5.20, 1.50) -- (11.80, 1.50) node[midway,yshift=-6ex]{$p$-anchored path $I_p$}; \draw [decorate,decoration={brace,amplitude=5pt,mirror,raise=2ex}] (11.80, 1.50) -- (6.70, 1.50) node[midway,yshift=6ex]{$\ov\CH(I^{(p)}_p)$}; \end{tikzpicture} \end{center} \begin{definition} Let $I_p\subseteq I$ be a $p$-anchored path of a poset $I$. The $I_p$-reflection $\delta_{I_p} I$ is the poset defined by the Hasse digraph $\CH(\delta_{I_p}I)$ obtained from $\CH(I)$ by reversing all the arcs in the subdigraph $\CH(I_p)\subseteq \CH(I)$. \end{definition} We call $I_p\subseteq I$ an \textit{inward} ($\mkern-2mu$\textit{outward}) $p$-anchored path if $p\in I_p$ is a unique maximal (minimal) point in $I_p$. For example, given an outward $p$-anchored path $I_p\subseteq I$ consisting of $\{p,s_1,\dots,s_k\}$ elements, we have the following. \begin{center} $\CH(I)=$ \tikzsetnextfilename{anchpth_2}\begin{tikzpicture}[baseline=(n22.base),label distance=-2pt, xscale=0.44, yscale=0.4] \coordinate (n1) at (4, 3 ) {}; \coordinate (n3) at (4, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n11) at (3.4 , 2.5 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n2) at (3.4 , 1.9 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n31) at (3.4 , 0.4 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle p$] (n4) at (5.50, 1.50) {}; \coordinate (n22) at (0, 1.2) {}; \coordinate (n5) at (0, 3 ) {}; \coordinate (n6) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle s_1$] (n7) at (7, 1.50) {}; \node (n8) at (8.50, 1.50) {}; \node (n9) at (10, 1.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle s_k$] (n10) at (11.50, 1.50) {}; \draw[decoration={complete sines,segment length=3mm, amplitude=1mm},decorate] (n1) -- (n5); \draw (n5) -- (n6); \draw (n1) -- (n3); \draw[decoration={complete sines,segment length=3mm, amplitude=1mm,mirror},decorate] (n3) -- (n6); \foreach \x/\y in {4/7, 7/8, 9/10} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {11/4, 2/4, 31/4} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, -, shorten <= 2.50pt, shorten >= 2.50pt] (n2) to (n31); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -2.50pt, shorten >= -2.50pt] (n8) to (n9); \draw [decorate,decoration={brace,amplitude=5pt,mirror,raise=3ex}] (5.20, 1.50) -- (11.80, 1.50) node[midway,yshift=-6ex]{$\mathclap{\textnormal{outward }p\textnormal{-anchored path } I_p}$}; \end{tikzpicture} \tikzsetnextfilename{pic_ar1}\begin{tikzpicture}[baseline={([yshift=-9.5pt]current bounding box)}, label distance=-2pt,xscale=0.35, yscale=0.5] \coordinate (n1) at (0, 1.50); \coordinate (n2) at (2.50, 1.50); \draw [|-stealth] (n1) to node[above=-2.0pt, pos=0.5] {$\scriptscriptstyle \delta_{I_p} I$} (n2); \end{tikzpicture} \tikzsetnextfilename{anchpth_3}\begin{tikzpicture}[baseline=(n22.base),label distance=-2pt, xscale=0.44, yscale=0.4] \coordinate (n1) at (4, 3 ) {}; \coordinate (n3) at (4, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n11) at (3.4 , 2.5 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n2) at (3.4 , 1.9 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n31) at (3.4 , 0.4 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle p$] (n4) at (5.50, 1.50) {}; \coordinate (n22) at (0, 1.2) {}; \coordinate (n5) at (0, 3 ) {}; \coordinate (n6) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle s_1$] (n7) at (7, 1.50) {}; \node (n8) at (8.50, 1.50) {}; \node (n9) at (10, 1.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle s_k$] (n10) at (11.50, 1.50) {}; \draw[decoration={complete sines,segment length=3mm, amplitude=1mm},decorate] (n1) -- (n5); \draw (n5) -- (n6); \draw (n1) -- (n3); \draw[decoration={complete sines,segment length=3mm, amplitude=1mm,mirror},decorate] (n3) -- (n6); \foreach \x/\y in {4/7, 7/8, 9/10} \draw [stealth-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {11/4, 2/4, 31/4} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, -, shorten <= 2.50pt, shorten >= 2.50pt] (n2) to (n31); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -2.50pt, shorten >= -2.50pt] (n8) to (n9); \draw [decorate,decoration={brace,amplitude=5pt,mirror,raise=3ex}] (5.20, 1.50) -- (11.80, 1.50) node[midway,yshift=-6ex]{$\mathclap{\textnormal{inward }p\textnormal{-anchored path } I_p^{op}}$}; \end{tikzpicture}$=\CH(\delta_{I_p} I)$ \end{center} The $I_p$-reflection $I\mapsto\delta_{I_p} I$, a combinatorial operation defined at the digraph level, has an algebraic interpretation. We need one more definition from~\cite{simsonCoxeterGramClassificationPositive2013} to state it formally. \begin{definition} Two finite posets $I$ and $J$ are called \textit{strong Gram $\ZZ$-congruent} $I\approx_\ZZ J$ if there exists such a matrix $B\in\Gl_n(\ZZ)$, that $C_J=B^{tr} C_I B.$ \end{definition} It is straightforward to check that strong Gram $\ZZ$-congruence of posets implies a week one and the inverse implication is not true in general~\cite{gasiorekAlgorithmicStudyNonnegative2015}, although it holds in the case of one-peak positive~\cite{gasiorekOnepeakPosetsPositive2012} and principal~\cite{gasiorekCoxeterTypeClassification2019} posets. The reader is referred to~\cite{gasiorekCongruenceRationalMatrices2023} for a further discussion on $\ZZ$-congruence and its applications.\smallskip \begin{lemma}\label{lemma:preflection} If $I_p\subseteq I$ is an inward or outward $p$-anchored path, then $I\approx_\ZZ \delta_{I_p} I$. In par\-ticular, $I$ is non-negative of rank $m$ if and only if $\delta_{I_p} I$ is non-negative of rank~$m$. \end{lemma} \begin{proof} First, we note that the strong Gram $\ZZ$-congruence of posets implies a weak Gram $\ZZ$-congruence. Since congruent matrices have the same definiteness and rank, it suffices to show that $I\approx_\ZZ \delta_{I_p} I$.\smallskip Let $I_p\subseteq I$ be an inward $p$-anchored path of a poset $I$ and let $J\eqdef\delta_{I_p} I$. Then: \begin{itemize} \item $J_p\eqdef I_p^{op}$ is an outward $p$-anchored path in $J$, \item $I\setminus I^{(p)}_p = J\setminus J^{(p)}_p$. \end{itemize} Without loss of generality, we may assume that Hasse digraphs $\CH(I)$ and $\CH(J)$ have the forms \begin{center} $\CH(I)=$ \tikzsetnextfilename{anchpth_4}\begin{tikzpicture}[baseline=(n22.base),label distance=-2pt,xscale=0.48, yscale=0.4] \coordinate (n1) at (4, 3 ) {}; \coordinate (n3) at (4, 0 ) {}; \coordinate (n22) at (0, 1.2) {}; \coordinate (n113) at (5.9, 1.5 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n11) at (3.4 , 2.5 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n2) at (3.4 , 1.9 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n31) at (3.4 , 0.4 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle \phantom{+}\!\!\!p$] (n4) at (5.50, 1.50) {}; \coordinate (n5) at (0, 3 ) {}; \coordinate (n6) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle p+1$] (n7) at (7, 1.50) {}; \node (n8) at (8.50, 1.50) {}; \node (n9) at (10, 1.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label={below:$\scriptstyle p+k-1=n$}] (n10) at (11.50, 1.50) {}; \draw[decoration={complete sines,segment length=3mm, amplitude=1mm},decorate] (n1) -- (n5); \draw (n5) -- (n6); \draw[rounded corners] (n1) -- (n113) -- (n3); \draw[decoration={complete sines,segment length=3mm, amplitude=1mm},decorate] (n6) -- (n3); \path (n5) to node[pos=0.5,xshift=-1ex] {{$I\setminus I^{(p)}_p$}} (n3); \foreach \x/\y in {4/7, 7/8, 9/10} \draw [stealth-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {11/4, 2/4, 31/4} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, -, shorten <= 2.50pt, shorten >= 2.50pt] (n2) to (n31); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -2.50pt, shorten >= -2.50pt] (n8) to (n9); \draw [decorate,decoration={brace,amplitude=5pt,mirror,raise=3ex}] (5.20, 1.50) -- (11.80, 1.50) node[midway,yshift=-6ex]{$I_p$}; \end{tikzpicture} $\CH(J)=$ \tikzsetnextfilename{anchpth_5}\begin{tikzpicture}[baseline=(n22.base),label distance=-2pt,xscale=0.48, yscale=0.4] \coordinate (n1) at (4, 3 ) {}; \coordinate (n3) at (4, 0 ) {}; \coordinate (n22) at (0, 1.2) {}; \coordinate (n113) at (5.9, 1.5 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n11) at (3.4 , 2.5 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n2) at (3.4 , 1.9 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n31) at (3.4 , 0.4 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle \phantom{+}\!\!\!p$] (n4) at (5.50, 1.50) {}; \coordinate (n5) at (0, 3 ) {}; \coordinate (n6) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle p+1$] (n7) at (7, 1.50) {}; \node (n8) at (8.50, 1.50) {}; \node (n9) at (10, 1.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label={below:$\scriptstyle p+k-1=n$}] (n10) at (11.50, 1.50) {}; \draw[decoration={complete sines,segment length=3mm, amplitude=1mm},decorate] (n1) -- (n5); \draw (n5) -- (n6); \draw[rounded corners] (n1) -- (n113) -- (n3); \draw[decoration={complete sines,segment length=3mm, amplitude=1mm},decorate] (n6) -- (n3); \path (n5) to node[pos=0.5,xshift=-1ex] {{$J\setminus J^{(p)}_p$}} (n3); \foreach \x/\y in {4/7, 7/8, 9/10} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {11/4, 2/4, 31/4} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, -, shorten <= 2.50pt, shorten >= 2.50pt] (n2) to (n31); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -2.50pt, shorten >= -2.50pt] (n8) to (n9); \draw [decorate,decoration={brace,amplitude=5pt,mirror,raise=3ex}] (5.20, 1.50) -- (11.80, 1.50) node[midway,yshift=-6ex]{$J_p=I_p^{op}$}; \end{tikzpicture} \end{center} and the incidence matrices of posets $I$ and $J$ are as follows: \begin{center} $C_{I}=$ \tikzsetnextfilename{matrix_1}\begin{tikzpicture} [baseline=(n6.base), every node/.style={outer sep=0pt,inner sep=0pt},every left delimiter/.style={xshift=1pt}, every right delimiter/.style={xshift=-1pt}] \matrix (m1) [matrix of nodes, ampersand replacement=\&, nodes={minimum height=1.6em,minimum width=1.6em,text depth=-.25ex,text height=0.6ex,inner xsep=0.0pt,inner ysep=0.0pt, execute at begin node=$\scriptscriptstyle, execute at end node=$}, column sep={0pt,between borders},row sep={0pt,between borders}, left delimiter=\lbrack, right delimiter=\rbrack] { |(n1)| \& \& |(n22)| \& |(n3)| c_{1,p} \& |(n14)| \& \& |(n23)| \\ \& |(n8)| \& \& \& \& \& \\ \& \& |(n4)| \& |(n2)| c_{p\mppms 1,p} \& \& \& |(n24)| \\ |(n6)| c_{p,1} \& \& |(n5)| c_{p, p\mppms 1} \& |(n7)| 1 \& |(n16)| 0 \& \& |(n15)| 0\\ |(n19)| c_{p,1} \& \& |(n18)| c_{p, p\mppms 1} \& |(n17)| 1 \& |(n9)| 1 \& \& |(n25)| \\ \& \& \& \& \& |(n10)| \& \\ |(n21)| c_{p,1} \& \& |(n20)| c_{p, p\mppms 1} \& |(n13)| 1 \& |(n12)| 1 \& \& |(n11)| 1\\ }; \path (n1) to node[pos=0.5, scale=1.5] {{$\scriptstyle C_{I\setminus I_p}$}} (n4); \draw[-] ([xshift=-2.4pt]n3.north west) to ([xshift=-2.4pt]n13.south west); \draw[-] ([xshift=2.4pt]n13.south east) to ([xshift=2.4pt]n3.north east); \draw[-] (n6.south west) to (n15.south east); \draw[-] (n15.north east) to (n6.north west); \path (n3.north east) to node[pos=0.5, scale=2] {{\scriptsize $0$}} (n15.north east); \path (n12.south west) to node[pos=0.7, scale=1.5] {{\scriptsize $0$}} (n15.south east); \foreach \x/\y in {6/5, 9/11, 12/11, 16/15, 19/18, 21/20} \draw[line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth] (n\x) to (n\y); \foreach \x/\y in {9/12, 17/13} \draw[line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, shorten <= 2pt] (n\x) to (n\y); \foreach \x/\y in {3/2, 18/20, 19/21} \draw[line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, shorten <= 3pt] (n\x) to (n\y); \foreach \i/\j in {1/1, 22/p-1, 3/\phantom{+}p\phantom{+}, 14/p+1, 23/n} \node[gray, scale=0.7] at ([yshift=2mm]n\i.north) {$\scriptstyle \j$}; \foreach \i/\j in {23/1, 24/p-1, 15/p, 25/p+1, 11/n} \node[gray, scale=0.7,text width=1.5em,anchor=west] at ([xshift=3mm]n\i.east) {$\scriptstyle \j$}; \end{tikzpicture},\quad $C_{J}=$ \tikzsetnextfilename{matrix_2}\begin{tikzpicture} [baseline=(n6.base), every node/.style={outer sep=0pt,inner sep=0pt},every left delimiter/.style={xshift=1pt}, every right delimiter/.style={xshift=-1pt}] \matrix (m1) [matrix of nodes, ampersand replacement=\&, nodes={minimum height=1.6em,minimum width=1.6em,text depth=-.25ex,text height=0.6ex,inner xsep=0.0pt,inner ysep=0.0pt, execute at begin node=$\scriptscriptstyle, execute at end node=$},column sep={0pt,between borders}, row sep={0pt,between borders}, left delimiter=\lbrack, right delimiter=\rbrack] { |(n1)| \& \& |(n21)| \& |(n3)| c_{1,p} \& |(n18)| c_{1,p} \& \& |(n17)| c_{1,p} \\ \& |(n8)| \& \& \& \& \& \\ \& \& |(n4)| \& |(n2)| c_{p\mppms 1,p} \& |(n20)| c_{p\mppms 1,p} \& \& |(n19)| c_{p\mppms 1,p}\\ |(n6)| c_{p,1} \& \& |(n5)| c_{p, p\mppms 1} \& |(n7)| 1 \& |(n15)| 1 \& \& |(n14)| 1 \\ \& \& \& |(n16)| 0 \& |(n9)| 1 \& \& |(n12)| 1 \\ \& \& \& \& \& |(n10)| \& \\ \& \& \& |(n13)| 0 \& \& \& |(n11)| 1 \\ }; \path (n1) to node[pos=0.5, scale=1.5] {{$\scriptstyle C_{J\setminus J_p}$}} (n4); \draw[-] ([xshift=-2.4pt]n3.north west) to ([xshift=-2.4pt]n13.south west); \draw[-] ([xshift=2.4pt]n13.south east) to ([xshift=2.4pt]n3.north east); \draw[-] (n6.south west) to (n14.south east); \draw[-] (n14.north east) to (n6.north west); \path (n6.south west) to node[pos=0.5, scale=2] {{\scriptsize $0$}} (n13.south west); \path (n13.south east) to node[pos=0.30000000000000004, scale=1.5] {{\scriptsize $0$}} (n12.north east); \foreach \x/\y in {6/5, 9/11, 12/11, 15/14, 18/17, 20/19} \draw[line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth] (n\x) to (n\y); \foreach \x/\y in {9/12, 16/13} \draw[line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, shorten <= 2pt] (n\x) to (n\y); \foreach \x/\y in {3/2, 17/19, 18/20} \draw[line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, shorten <= 3pt] (n\x) to (n\y); \foreach \i/\j in {1/1, 21/p-1, 3/\phantom{+}p\phantom{+}, 18/p+1, 17/n} \node[gray, scale=0.7] at ([yshift=2mm]n\i.north) {$\scriptstyle \j$}; \foreach \i/\j in {17/1, 19/p-1, 14/p, 12/p+1, 11/n} \node[gray, scale=0.7,text width=1.5em,anchor=west] at ([xshift=3mm]n\i.east) {$\scriptstyle \j$}; \end{tikzpicture}. \end{center} It is straightforward to check that $C_{J} = B^{tr} C_{I} B$, where \begin{equation}\label{lemma:preflection:bmat} B\eqdef\!\! \tikzsetnextfilename{matrix_3}\begin{tikzpicture} [baseline=(n6.base), every node/.style={outer sep=0pt,inner sep=0pt},every left delimiter/.style={xshift=1pt}, every right delimiter/.style={xshift=-1pt}] \matrix (m1) [matrix of nodes, ampersand replacement=\&, nodes={minimum height=1.2em,minimum width=1.2em,text depth=-.25ex,text height=0.6ex,inner xsep=0.0pt,inner ysep=0.0pt, execute at begin node=$\scriptscriptstyle, execute at end node=$},column sep={0pt,between borders}, row sep={0pt,between borders}, left delimiter=\lbrack, right delimiter=\rbrack] { |(n1)| 1 \& \& |(n17)| \& |(n3)| 0 \& |(n13)| \& \& |(n18)| \\ \& |(n8)| \& \& \& \& \& \\ |(n21)| \& \& |(n4)| 1 \& |(n2)| 0 \& \& \& |(n20)| \\ |(n6)| 0 \& \& |(n5)| 0 \& |(n7)| 1 \& |(n15)| \phantom{-}1 \& \& |(n14)| \phantom{-}1\\ \& \& \& |(n16)| 0 \& \& \& |(n9)| -1 \\ \& \& \& \& \& |(n10)| \& \\ \& \& \& |(n12)| 0 \& |(n11)| -1 \& \& |(n19)| \\ }; \draw[-] (n3.north west) to (n12.south west); \draw[-] (n12.south east) to (n3.north east); \draw[-] (n6.south west) to (n14.south east); \draw[-] (n14.north east) to (n6.north west); \draw[line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, shorten <= 2pt, shorten >= -3pt] (n15) to (n14); \path (n3.north east) to node[pos=0.5, scale=2] {{\scriptsize $0$}} (n14.north east); \path (n6.south west) to node[pos=0.5, scale=2] {{\scriptsize $0$}} (n12.south west); \path (n21.south west) to node[pos=0.7, scale=1.2] {{\scriptsize $0$}} (n17.north east); \path (n17.north east) to node[pos=0.7, scale=1.2] {{\scriptsize $0$}} (n21.south west); \foreach \x/\y in {1/4, 9/11} \draw[line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, shorten <= 2pt] (n\x) to (n\y); \foreach \x/\y in {3/2, 6/5, 16/12} \draw[line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, shorten <= -1pt, shorten >= -2pt] (n\x) to (n\y); \foreach \x/\y in {7/19, 19/7} \path (n\x.south east) to node[pos=0.7, scale=1.2] {{\scriptsize $0$}} (n\y.south east); \foreach \i/\j in {1/1, 17/p-1, 3/\phantom{+}p\phantom{+}, 13/p+1, 18/n} \node[gray, scale=0.7] at ([yshift=2mm]n\i.north) {$\scriptstyle \j$}; \foreach \i/\j in {18/1, 20/p-1, 14/p, 9/p+1, 19/n} \node[gray, scale=0.7,text width=1.5em,anchor=west] at ([xshift=3mm]n\i.east) {$\scriptstyle \j$}; \end{tikzpicture}\in\Gl_n(\ZZ) \end{equation} is an involutory matrix. It follows that posets $I$ and $J$ are strong Gram $\ZZ$-congruent and \[ \delta_{J_p}J=I\approx_\ZZ J=\delta_{I_p}I. \] Now, assume $I_p\subseteq I$ be an outward $p$-anchored path of a poset $I$. By using arguments analogous to the previous case, one easily shows that the matrix $B$ defines the strong Gram $\ZZ$-congruence $I\approx_\ZZ \delta_{I_p}I$. \end{proof} \begin{remark}\label{rmk:arbitrary_path_refl} The matrix $B\in\Gl_n(\ZZ)$, given in \eqref{lemma:preflection:bmat}, defines the strong Gram $\ZZ$-congruence $I\approx_\ZZ \delta_{I_p}I$, with the assumption that $I_p\subseteq I$ is such an inward or outward $p$-anchored path, that $I_p=p\,\rule[1.5pt]{22pt}{0.4pt}\, p\!+\!1\,\rule[1.5pt]{22pt}{0.4pt} \ldots\rule[1.5pt]{22pt}{0.4pt}\, n$. Assume now that $I_p$ is numbered arbitrarily, i.e., $I_p=p\,\rule[1.5pt]{22pt}{0.4pt}\, s_1\,\rule[1.5pt]{22pt}{0.4pt} \ldots\rule[1.5pt]{22pt}{0.4pt}\, s_k$, where $k=|I_p|-1$, and consider the permutation $\pi\colon \{s_1,\ldots, s_k\}\to\{s_1,\ldots, s_k\}$ with $\pi(s_i)\eqdef s_{k-i+1}$. One checks that for $B_{I_p}\in\Gl_n(\ZZ)$ composed of columns $b_1,\ldots, b_n$ with \[ b_j=\begin{cases} e_p-e_{\pi(s_i)}, & \textnormal{if }j=s_i\in\{s_1, \ldots, s_k\},\\ e_j, &\textnormal{otherwise},\\ \end{cases} \] where $e_i$ is the $i$-th standard basis vector in $\ZZ^n$, we have $C_{\delta I_p I} = B_{I_p}^{tr} C_I B_{I_p}$, that is, the matrix $B_{I_p}$ defines the strong Gram $\ZZ$-congruence $I\approx_\ZZ \delta_{I_p}I$. \end{remark} Interchanging inward $p$-anchored paths with the outward ones (an operation defined at the Hasse digraph level) yields a strong Gram $\ZZ$-congruence of posets (defined at the level of incidence matrices). It is easy to generalize this fact to all, not necessarily inward/outward, $p$-anchored paths. \begin{corollary}\label{corr:ahchoredpath} Let $I_p\subseteq I$ be an arbitrary $p$-anchored path of a poset $I$. \begin{enumerate}[label=\normalfont{(\alph*)}] \item\label{corr:ahchoredpath:bilinear:out} $I\approx_\ZZ J$, where $J$ is a poset with such an outward $p$-anchored path that $I\setminus I^{(p)}_p = J\setminus J^{(p)}_p$ and $\ov\CH(J_p)=\ov\CH(I_p)$. \begin{center} $\CH(I)=$ \tikzsetnextfilename{apth_1}\begin{tikzpicture}[baseline=(n22.base),label distance=-2pt, xscale=0.44, yscale=0.4] \coordinate (n1) at (4, 3 ) {}; \coordinate (n3) at (4, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n11) at (3.4 , 2.5 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n2) at (3.4 , 1.9 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n31) at (3.4 , 0.4 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle p$] (n4) at (5.50, 1.50) {}; \coordinate (n22) at (0, 1.2) {}; \coordinate (n5) at (0.5, 3 ) {}; \coordinate (n6) at (0.5, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle s_1$] (n7) at (7, 1.50) {}; \node (n8) at (8.50, 1.50) {}; \node (n9) at (10, 1.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle s_k$] (n10) at (11.50, 1.50) {}; \draw[decoration={complete sines,segment length=3mm, amplitude=1mm},decorate] (n1) -- (n5); \draw (n5) -- (n6); \draw (n1) -- (n3); \draw[decoration={complete sines,segment length=3mm, amplitude=1mm,mirror},decorate] (n3) -- (n6); \foreach \x/\y in {4/7, 7/8, 9/10} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {11/4, 2/4, 31/4} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, -, shorten <= 2.50pt, shorten >= 2.50pt] (n2) to (n31); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -2.50pt, shorten >= -2.50pt] (n8) to (n9); \draw [decorate,decoration={brace,amplitude=5pt,mirror,raise=3ex}] (5.20, 1.50) -- (11.80, 1.50) node[midway,yshift=-6ex]{$p$-anchored path $I_p$}; \draw [decorate,decoration={brace,amplitude=5pt,mirror,raise=2ex}] (11.80, 1.50) -- (5.20, 1.50) node[midway,yshift=6ex]{arbitrary orientation}; \end{tikzpicture} \hfill $\CH(J)=$ \tikzsetnextfilename{apth_2}\begin{tikzpicture}[baseline=(n22.base),label distance=-2pt, xscale=0.44, yscale=0.4] \coordinate (n1) at (4, 3 ) {}; \coordinate (n3) at (4, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n11) at (3.4 , 2.5 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n2) at (3.4 , 1.9 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt] (n31) at (3.4 , 0.4 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle p$] (n4) at (5.50, 1.50) {}; \coordinate (n22) at (0, 1.2) {}; \coordinate (n5) at (0.5, 3 ) {}; \coordinate (n6) at (0.5, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle s_1$] (n7) at (7, 1.50) {}; \node (n8) at (8.50, 1.50) {}; \node (n9) at (10, 1.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptstyle s_k$] (n10) at (11.50, 1.50) {}; \draw[decoration={complete sines,segment length=3mm, amplitude=1mm},decorate] (n1) -- (n5); \draw (n5) -- (n6); \draw (n1) -- (n3); \draw[decoration={complete sines,segment length=3mm, amplitude=1mm,mirror},decorate] (n3) -- (n6); \foreach \x/\y in {4/7, 7/8, 9/10} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {11/4, 2/4, 31/4} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, -, shorten <= 2.50pt, shorten >= 2.50pt] (n2) to (n31); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -2.50pt, shorten >= -2.50pt] (n8) to (n9); \draw [decorate,decoration={brace,amplitude=5pt,mirror,raise=3ex}] (5.20, 1.50) -- (11.80, 1.50) node[midway,yshift=-6ex]{outward $p$-anchored path $J_p$}; \end{tikzpicture} \end{center} \item\label{corr:ahchoredpath:anyorient} For every orientation of arcs in $\CH(I_p)\subseteq\CH(I)$, the resulting poset $\wt I$ is non-negative of rank $m$ if and only if $I$ is non-negative of rank~$m$. \end{enumerate} \end{corollary} \begin{proof} It is easy to see that for every orientation of edges of the path graph $\ov\CH(I_{p})$, there exists a series of $I'_{p'}$-reflections, where $p'\in I'_{p'}\subseteq I_p$, that carries the $p$-anchored path $I_p$ into outward $p$-anchored path $J_p$, therefore~\ref{corr:ahchoredpath:bilinear:out} follows by Lemma~\ref{lemma:preflection}. Since~\ref{corr:ahchoredpath:anyorient} follows from~\ref{corr:ahchoredpath:bilinear:out}, the proof is finished. \end{proof} Summing up, changing the orientation of arcs in a $p$-anchored path does not change the non-negativity nor the rank. \begin{example} Consider the following triple of posets: $I$, $J$ and $J'$. \begin{center} \hfill $\CH(I)=$ \tikzsetnextfilename{ex16_1}\begin{tikzpicture}[baseline=(n7.base),label distance=-2pt,xscale=0.65, yscale=0.74] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n1) at (4, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 2$] (n2) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 3$] (n3) at (3, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 4$] (n4) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 5$] (n5) at (2, 0 ) {}; \node (n7) at (3, 0 ) {$\mathclap{\phantom{7}}$}; \foreach \x/\y in {1/3, 4/2, 5/2, 5/3} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture} \hfill $\CH(J)=$ \tikzsetnextfilename{ex16_2}\begin{tikzpicture}[baseline=(n7.base),label distance=-2pt,xscale=0.65, yscale=0.74] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n1) at (4, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 2$] (n2) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 3$] (n3) at (3, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 4$] (n4) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 5$] (n5) at (2, 0 ) {}; \node (n7) at (3, 0 ) {$\mathclap{\phantom{7}}$}; \foreach \x/\y in {2/5, 3/1, 4/2, 5/3} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture} \hfill $\CH(J')=$ \tikzsetnextfilename{ex16_3}\begin{tikzpicture}[baseline=(n7.base),label distance=-2pt,xscale=0.65, yscale=0.74] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n1) at (4, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 2$] (n2) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 3$] (n3) at (3, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 4$] (n4) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 5$] (n5) at (2, 0 ) {}; \node (n7) at (3, 0 ) {$\mathclap{\phantom{7}}$}; \foreach \x/\y in {1/3, 2/4, 3/5, 5/2} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture} \hfill \mbox{} \end{center} We have $I\approx_\ZZ J$ and $I\approx_\ZZ J'$, since $J\! =\! \delta_{\{2, 5,3,1\}} \delta_{\{5,3,1\}} \delta_{\{3,1\}} I$ and $J'\! =\! \delta_{\{3, 5, 2, 4\}} \delta_{\{5, 2, 4\}} \delta_{\{2, 4\}} I$. Moreover, using the description given in \Cref{lemma:preflection} and Remark~\ref{rmk:arbitrary_path_refl} we get the equality $C_{J}=B_1^{tr}C_IB_1$, where \begin{equation*} B_1= \begin{bsmallmatrix*}[r] \shortminus 1 & \phantom{\shortminus}0 & \phantom{\shortminus}0 & \phantom{\shortminus}0 & \phantom{\shortminus}0\\ 0 & 1 & 0 & 0 & 0\\ 1 & 0 & 1 & 0 & 0\\ 0 & 0 & 0 & 1 & 0\\ 0 & 0 & 0 & 0 & 1 \end{bsmallmatrix*} \begin{bsmallmatrix*}[r] 0 & \phantom{\shortminus}0 & \shortminus 1 &\phantom{\shortminus} 0 &\phantom{\shortminus} 0\\ 0 & 1 & 0 & 0 & 0\\ \shortminus 1 & 0 & 0 & 0 & 0\\ 0 & 0 & 0 & 1 & 0\\ 1 & 0 & 1 & 0 & 1 \end{bsmallmatrix*} \begin{bsmallmatrix*}[r] 0 &\phantom{\shortminus} 0 & 0 &\phantom{\shortminus} 0 & \shortminus 1\\ 1 & 1 & 1 & 0 & 1\\ 0 & 0 & \shortminus 1 & 0 & 0\\ 0 & 0 & 0 & 1 & 0\\ \shortminus 1 & 0 & 0 & 0 & 0 \end{bsmallmatrix*} = \begin{bsmallmatrix*}[r] 0 &\phantom{\shortminus} 0 & \shortminus 1 &\phantom{\shortminus} 0 & 0\\ 1 & 1 & 1 & 0 & 1\\ 0 & 0 & 1 & 0 & 1\\ 0 & 0 & 0 & 1 & 0\\ \shortminus 1 & 0 & \shortminus 1 & 0 & \shortminus 1 \end{bsmallmatrix*}. \end{equation*} Analogously, one can calculate such a matrix $B_2\in\Gl_5(\ZZ)$, that $C_{J'}=B_2^{tr}C_IB_2$. \end{example} \begin{remark}\label{remark:hassepath:anyorient} In view of \Cref{corr:ahchoredpath}\ref{corr:ahchoredpath:anyorient}, we usually omit the orientation of the edges in $p$-anchored paths when presenting Hasse digraphs of finite posets. \end{remark} \section{Main results} The main result of this work is a complete description of connected non-negative posets $I$ of Dynkin type $\Dyn_I=\AA_m$ in terms of their Hasse digraphs $\CH(I)$. First, we show that \Cref{thm:a:main} holds for ``trees'', i.e., posets $I$ with graph $\ov \CH(I)$ being a tree. \begin{lemma}\label{lemma:trees} If $I=(V,\leq_I)$ is such a connected poset of size $n$ that the graph $\ov\CH(I)$ is a tree, then exactly one of the following conditions holds. \begin{enumerate}[label=\normalfont{(\alph*)}] \item\label{lemma:trees:posit} The poset $I$ is non-negative of rank $n$ and $\ov \CH(I)$ is isomorphic to a simply-laced Dynkin diagram $\Dyn_I\in\{\AA_n, \DD_n, \EE_6, \EE_7, \EE_8 \}$ of Table~\ref{tbl:Dynkin_diagrams}. \item\label{lemma:trees:nneg} The poset $I$ is non-negative of rank $n-1$ and $\ov \CH(I)$ is isomorphic to a simply-laced Euclidean diagram $\widetilde{\mathrm{D}}\mathrm{yn}_I \in \{\widetilde{\DD}_n,\ab \widetilde{\EE}_6,\ab \widetilde{\EE}_7,\ab \widetilde{\EE}_8\}$ of Table~\ref{tbl:Euklid_diag}. \item\label{lemma:trees:indef} The poset $I$ is indefinite, i.e., the symmetric Gram matrix $G_I$ is indefinite. \end{enumerate} \end{lemma} \begin{proof} Assume that $I$ is such a connected poset that the graph $\ov\CH(I)$ is a tree, where $\CH(I)=(V, A)$ is the Hasse digraph of $I$, and let $C_I\in\MM_n(\ZZ)$ be its incidence matrix \eqref{df:incmat}. By \cite[Proposition 2.12]{simsonIncidenceCoalgebrasIntervally2009}, the matrix $C_{I}^{-1}=[c_{ab}]\in\MM_n(\ZZ)$ has coefficients \begin{equation*} c_{ab}=\begin{cases} \phantom{-}1,\textnormal{ iff }a=b,\\ -1,\textnormal{ iff } a\to b \in A,\\ \phantom{-}0, \textnormal{ otherwise}, \end{cases} \end{equation*} i.e., the matrix $C_I^{-1}$ uniquely encodes $\CH(I)$. It is straightforward to check that \begin{equation}\label{eq:digraph_euler_q} q_{\CH(I)}(x)\eqdef \tfrac{1}{2}x(C_{I}^{-1} +\ C_{I}^{-tr}) x^{tr} = \sum_{i\in V} x_i^2 - \sum_{a\to b\,\in A} x_{a}x_{b} \end{equation} is the \textit{Euler quadratic form} of $\CH(I)$ in the sense of~\cite[Section VII.4]{ASS}. Moreover, we have \[ C_I^{tr} G_{\CH(I)} C_I=\tfrac{1}{2} C_I^{tr} (C_{I}^{-1} +\ C_{I}^{-tr}) C_I = \tfrac{1}{2}(C_I^{tr}+C_I) = G_I, \] where $G_{\CH(I)}\eqdef \tfrac{1}{2}(C_{I}^{-1} +\ C_{I}^{-tr})$ is the symmetric Gram matrix of the Euler quadratic form $q_{\CH(I)}$ \eqref{eq:digraph_euler_q}. Hence, the lemma follows by~\cite[Corollary 2.4]{gasiorekOnepeakPosetsPositive2012} and~\cite[Proposition VII.4.5]{ASS}. \end{proof} One of the key observations used in the proof of \Cref{thm:a:main} is that the graph $\ov\CH(I)$, where $I$ is a connected non-negative-poset of Dynkin type $\Dyn_I=\AA_m$, has no vertices of degree larger than $2$. In the following theorem, we give a characterization of such posets. \begin{theorem}\label{thm:digraphdegmax2} Assume that $\CD=(V,A)$, where $V=\{1,\ldots,n\}$, is such a connected acyclic digraph that $\deg_{\ov D}(v)\leq 2$ for every $v\in V$. Exactly one of the following conditions holds. \begin{enumerate}[label=\normalfont{(\alph*)}] \item\label{thm:digraphdegmax2:path} $\ov \CD\simeq \AA_n$ and $\CD$ is the Hasse digraph of the positive poset $I_\CD$ with $\Dyn_{I_\CD}=\AA_n$. \item\label{thm:digraphdegmax2:cycle} $\ov \CD$ is a cycle graph. Moreover: \begin{enumerate}[label=\normalfont{(b\arabic*)}, leftmargin=4ex] \item\label{thm:digraphdegmax2:cycle:onesink} $\CD$ has exactly one sink and \begin{enumerate}[label=\normalfont{(\roman*)}, leftmargin=1ex] \item\label{thm:digraphdegmax2:cycle:onesink:posita} $\Dyn_{I_\CD}=\AA_n$, $I_\CD$ is positive, $ \CD\simeq\!\!\! \smash{\tikzsetnextfilename{th20_p1}\begin{tikzpicture}[baseline={([yshift=-10.75pt]current bounding box)},label distance=-2pt,xscale=0.65, yscale=0.74] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptscriptstyle 1$] (n1) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 2$] (n2) at (1, 0.50) {}; \node (n3) at (2, 0.50) {$\scriptscriptstyle $}; \node (n4) at (3, 0.50) {$\scriptscriptstyle $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle n\mppmss 2$] (n5) at (4, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle n\mppmss 1$] (n6) at (5, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptstyle n$] (n7) at (6, 0 ) {}; \foreach \x/\y in {1/2, 1/7, 5/6, 6/7} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-stealth, shorten <= 2.50pt, shorten >= -2.50pt] (n2) to (n3); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -1.00pt, shorten >= -2.50pt] (n3) to (n4); \draw [-stealth, shorten <= -2.50pt, shorten >= 2.50pt] (n4) to (n5); \end{tikzpicture}}$\!, and $\CH(I_\CD)\neq \CD$, \item\label{thm:digraphdegmax2:cycle:onesink:positd} $\Dyn_{I_\CD}=\DD_n$, $I_\CD$ is positive, $\CD\simeq$\!\!\! \tikzsetnextfilename{th20_p2}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=0.65, yscale=0.74] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptscriptstyle 1$] (n1) at (0, 0.25 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 2$] (n2) at (1, 0.50) {}; \node (n3) at (2, 0.50) {$\scriptscriptstyle $}; \node (n4) at (3, 0.50) {$\scriptscriptstyle $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle n\mppmss 3$] (n5) at (4, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle n\mppmss 2$] (n6) at (5, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptstyle n$] (n7) at (6, 0.25 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n\mppmss 1$] (n8) at (3, 0 ) {}; \foreach \x/\y in {1/2, 1/8, 5/6, 6/7, 8/7} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-stealth, shorten <= 2.50pt, shorten >= -2.50pt] (n2) to (n3); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -1.00pt, shorten >= -2.50pt] (n3) to (n4); \draw [-stealth, shorten <= -2.50pt, shorten >= 2.50pt] (n4) to (n5); \end{tikzpicture}\!, and $\CH(I_\CD)= \CD$, \item\label{thm:digraphdegmax2:cycle:onesink:posite} $\Dyn_{I_\CD}=\EE_\star$, $\CH(I_\CD)=\CD$ and \begin{enumerate}[label=\textnormal{\textbullet}, leftmargin=1ex] \item $I_\CD$ is positive, $\CD\simeq$\!\!\! \tikzsetnextfilename{th20_p3}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)}, label distance=-2pt,xscale=0.55, yscale=0.54] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n1) at (0, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 2$] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 3$] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 4$] (n4) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 5$] (n5) at (2, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 6$] (n6) at (3, 0.50) {}; \foreach \x/\y in {1/2, 1/4, 2/3, 3/6, 4/5, 5/6} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture} or\ \ $\CD\simeq$\!\!\! \tikzsetnextfilename{th20_p4}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)}, label distance=-2pt,xscale=0.55, yscale=0.54] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n1) at (0, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 2$] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 3$] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 4$] (n4) at (3, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 5$] (n5) at (1.50, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 6$] (n6) at (2.50, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 7$] (n7) at (4, 0.50) {}; \foreach \x/\y in {1/2, 1/5, 2/3, 3/4, 4/7, 5/6, 6/7} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture} or \ $\CD\simeq$\!\!\! \tikzsetnextfilename{th20_p5}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)}, label distance=-2pt,xscale=0.55, yscale=0.54] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n1) at (0, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 2$] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 3$] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 4$] (n4) at (3, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 5$] (n5) at (4, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 6$] (n6) at (2, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 7$] (n7) at (3, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 8$] (n8) at (5, 0.50) {}; \foreach \x/\y in {1/2, 1/6, 2/3, 3/4, 4/5, 5/8, 6/7, 7/8} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture} with $\Dyn_{I_\CD}=\EE_6$, $\EE_7$ and $\EE_8$, respectively; \item $I_\CD$ is principal, $\CD\simeq$\!\!\! \tikzsetnextfilename{th20_p6}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)}, label distance=-2pt,xscale=0.55, yscale=0.54] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n1) at (0, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 2$] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 3$] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 4$] (n4) at (3, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 5$] (n5) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 6$] (n6) at (2, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 7$] (n7) at (3, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 8$] (n8) at (4, 0.50) {}; \foreach \x/\y in {1/2, 1/5, 2/3, 3/4, 4/8, 5/6, 6/7, 7/8} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture} or \ $\CD\simeq$\!\!\! \tikzsetnextfilename{th20_p7}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)}, label distance=-2pt,xscale=0.55, yscale=0.54] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n1) at (0, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 2$] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 3$] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 4$] (n4) at (3, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 5$] (n5) at (4, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 6$] (n6) at (5, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 7$] (n7) at (2.50, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 8$] (n8) at (3.50, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 9$] (n9) at (6, 0.50) {}; \foreach \x/\y in {1/2, 1/7, 2/3, 3/4, 4/5, 5/6, 6/9, 7/8, 8/9} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}\\ with $\Dyn_{I_\CD}=\EE_7$ and $\EE_8$, respectively; \end{enumerate} \item\label{thm:digraphdegmax2:cycle:onesink:indef} otherwise, the poset $I_\CD$ is indefinite. \end{enumerate} \item\label{thm:digraphdegmax2:cycle:multsink} $\CD$ has more than one sink, $I_\CD$ is principal with $\Dyn_{I_\CD}=\AA_{n-1}$ and $\CH(I_\CD)=\CD$. \end{enumerate} \end{enumerate} \end{theorem} \begin{proof} If $\CD$ is such a connected acyclic digraph, that $\deg_{\ov D}(v)\leq 2$ for every $v\in V$, then $\ov \CD$ is either a cycle or a path graph.\smallskip \ref{thm:digraphdegmax2:path} If $\ov \CD$ is a path graph, then clearly $\ov\CD$ is a tree and \ref{thm:digraphdegmax2:path} follows by \Cref{lemma:trees}\ref{lemma:trees:posit}.\smallskip \ref{thm:digraphdegmax2:cycle} Assume that $\ov \CD$ is a cycle. It is easy to see that $\CD$ is composed of $2k$ oriented paths and has exactly $k$ sources and $k$ sinks, where $0\neq k\in\NN$. First, we assume that $k=1$. \ref{thm:digraphdegmax2:cycle:onesink} Since $\CD$ has exactly one sink, it is composed of two oriented paths, and we have \begin{equation}\label{eq:digraph_Apr} \CD \simeq \CA_{p,r}\ \ \eqdef \tikzsetnextfilename{th20prf_p1}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=0.65, yscale=0.74] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptscriptstyle 1$] (n1) at (0, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 2$] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle p\mpppss 1$] (n3) at (1.50, 0 ) {}; \node (n4) at (2, 1 ) {$\scriptscriptstyle $}; \node (n5) at (2.50, 0 ) {$\scriptscriptstyle $}; \node (n6) at (3, 1 ) {$\scriptscriptstyle $}; \node (n7) at (3.50, 0 ) {$\scriptscriptstyle $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle p\mppmss 1$] (n8) at (4, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle p$] (n9) at (5, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle p\mpppss r\mppmss 1$] (n10) at (4.50, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:{$\scriptstyle p\mppps r=n$}] (n11) at (6, 0.50) {}; \foreach \x/\y in {1/2, 1/3, 8/9, 9/11, 10/11} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {2/4, 3/5} \draw [-stealth, shorten <= 2.50pt, shorten >= -2.50pt] (n\x) to (n\y); \foreach \x/\y in {4/6, 5/7} \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -1.00pt, shorten >= -2.50pt] (n\x) to (n\y); \foreach \x/\y in {6/8, 7/10} \draw [-stealth, shorten <= -2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture} \end{equation} where $1\leq r\leq p$ and $p+r=n\geq 3$. We note that $I_\CD$ is a one-peak poset and recall that, up to the isomorphism of Hasse digraphs, all such positive and principal posets are classified in~\cite[Theorem 5.2]{gasiorekOnepeakPosetsPositive2012} and~\cite[Theorem~3.5]{gasiorekAlgorithmicStudyNonnegative2015} (see Theorem~\ref{thm:onepeak_posit} for the positive case). This classification shows that the following cases are possible. \ref{thm:digraphdegmax2:cycle:onesink:posita} For $r=1$ (i.e., $\CD \simeq \CA_{n-1,1}$) we have $\CH(I_\CD)={}_{0}\AA^*_{n-1}\neq \CD$ and $\Dyn_{I_\CD}=\AA_n$; \ref{thm:digraphdegmax2:cycle:onesink:positd} for $r=2$ (i.e., $\CD \simeq \CA_{n-2,2}$) we have $\CH(I_\CD)=\wh\DD^*_{n-2}\diamond \AA_{1} = \CD$ and $\Dyn_{I_\CD}=\DD_n$, hence thesis follows by~\cite[Theorem 5.2]{gasiorekOnepeakPosetsPositive2012}. \ref{thm:digraphdegmax2:cycle:onesink:posite} If $r>2$, then exactly $5$ digraphs of the shape~\eqref{eq:digraph_Apr} define non-negative posets: \begin{itemize} \item $\CA_{3,3}\simeq \PP_{5}$, $\CA_{4,3}\simeq \PP_{24}$ and $\CA_{5,3}\simeq \PP_{93}$ are Hasse digraphs of a positive poset of the Dynkin type $\EE_6$, $\EE_7$ and $\EE_8$, respectively, see~\cite[Tables 6.1–6.3]{gasiorekOnepeakPosetsPositive2012}; \item $\CA_{4,4}$ and $\CA_{6,3}$ are Hasse digraphs of a principal poset of the Dynkin type $\EE_7$ and $\EE_8$, respectively, see~\cite{gasiorekCoxeterTypeClassification2019}. \end{itemize} In each of the remaining cases, the poset $I_{\CA_{p,r}}$ contains one of the subposets: $I_{\CA_{7,3}}$ or $I_{\CA_{5,4}}$. \begin{center} $\CA_{7,3}=$ \tikzsetnextfilename{th20prf_p22}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=0.65, yscale=0.74] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n1) at (0, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 2$] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 3$] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 4$] (n4) at (3, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 5$] (n5) at (4, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 6$] (n6) at (5, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 7$] (n7) at (6, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 8$] (n8) at (3, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 9$] (n9) at (4, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 10$] (n10) at (7, 0.50) {}; \foreach \x/\y in {1/2, 1/8, 2/3, 3/4, 4/5, 5/6, 6/7, 7/10, 8/9, 9/10} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}\qquad $\CA_{5,4}=$ \tikzsetnextfilename{th20prf_p23}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=0.65, yscale=0.74] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n1) at (0, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 2$] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 3$] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 4$] (n4) at (3, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 5$] (n5) at (4, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 6$] (n6) at (1.50, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 7$] (n7) at (2.50, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 8$] (n8) at (3.50, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 9$] (n9) at (5, 0.50) {}; \foreach \x/\y in {1/2, 1/6, 2/3, 3/4, 4/5, 5/9, 6/7, 7/8, 8/9} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture} \end{center} Since \begin{itemize} \item $q_{\CA_{7,3}}([11,\ab -3,\ab -3,\ab -3,\ab -3,\ab -3,\ab -3,\ab -7,\ab -7,\ab 10])=-5$ and \item $q_{\CA_{5,4}}([11,\ab -4,\ab -4,\ab -4,\ab -4,\ab -5,\ab -5,\ab -5,\ab 9])=-9$, \end{itemize} these posets are indefinite and \ref{thm:digraphdegmax2:cycle:onesink:indef} follows (see Remark~\ref{rmk:indef}).\pagebreak \ref{thm:digraphdegmax2:cycle:multsink} Assume that $\ov \CD$ is a cycle that is composed of $s\eqdef 2k>2$ oriented paths. Without loss of generality, we can assume that \begin{center} $\ov \CD\ \ \simeq$ \tikzsetnextfilename{th20prf_p2}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=0.64, yscale=0.64] \draw[draw, fill=cyan!10](0,1) circle (19pt); \draw[draw, fill=cyan!10](5,2) circle (17pt); \draw[draw, fill=cyan!10](9,1) circle (19pt); \draw[draw, fill=cyan!10](5,0) circle (17pt); \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:{$\scriptscriptstyle 1=r_1$}] (n1) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above left:$\scriptscriptstyle 2$] (n2) at (1, 2 ) {}; \node (n3) at (2, 2 ) {$\scriptscriptstyle $}; \node (n4) at (3, 2 ) {$\scriptscriptstyle $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle r_2\mppmss 1$] (n5) at (4, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle r2$] (n6) at (5, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle r_2\mpppss 1$] (n7) at (6, 2 ) {}; \node (n8) at (7, 2 ) {$\scriptscriptstyle $}; \node (n9) at (8, 2 ) {$\scriptscriptstyle $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptscriptstyle r_t$] (n10) at (9, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below right:$\scriptscriptstyle r_t\mpppss 1$] (n11) at (8, 0 ) {}; \node (n12) at (7, 0 ) {$\scriptscriptstyle $}; \node (n13) at (6, 0 ) {$\scriptscriptstyle $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n\mppmss 1$] (n14) at (2, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below left:$\scriptscriptstyle n$] (n15) at (1, 0 ) {}; \node (n16) at (3, 0 ) {}; \node (n17) at (4, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle r_s$] (n18) at (5, 0 ) {}; \foreach \x/\y in {1/2, 5/6, 6/7, 10/11, 14/15, 15/1} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {2/3, 7/8, 11/12, 18/17} \draw [-, shorten <= 2.50pt, shorten >= -2.50pt] (n\x) to (n\y); \foreach \x/\y in {3/4, 8/9, 12/13, 17/16} \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -1.00pt, shorten >= -2.50pt] (n\x) to (n\y); \foreach \x/\y in {4/5, 9/10, 13/18, 16/14} \draw [-, shorten <= -2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture} \end{center} where every $r_1,\ldots,r_s\in \{1,\ldots, n \}$ is either a source or a sink and \begin{itemize} \item $1 = r_1 < r_2 < \cdots < r_t<\cdots < r_s$, \item $\{1,\ldots,r_2\}$, $\{r_2,\ldots,r_3\}$, $\ldots$\,, $\{r_{s-1},\ldots,r_s\},\{r_s,\ldots,n, 1\}\simeq\, \bullet \raisebox{-1.5pt}{\parbox{25pt}{\rightarrowfill}} \,\hdashrule[1.5pt]{12pt}{0.4pt}{1pt} \raisebox{-1.5pt}{\parbox{25pt}{\rightarrowfill}} \bullet$. \end{itemize} Since the quadratic form $q_{I_\CD}\colon \ZZ^n\to \ZZ$ \eqref{eq:quadratic_form} is given by the formula: {\allowdisplaybreaks\begin{align*} q_{I_\CD}(x) =& \sum_{i} x_i^2 \,\,+ \sum_{{1\leq t < s}} \Big(\sum_{r_t\leq i<j \leq r_{t+1}} x_i x_j\Big) + \sum_{r_s\leq i<j \leq n} x_i x_j + x_1(x_{r_s}+\cdots+ x_n)\\ =& \sum_{i\not\in\{r_1,\ldots,r_s \}} \frac{1}{2}x_i^2 + \frac{1}{2} \sum_{1\leq t < s} \Big(\sum_{r_t\leq i\leq r_{t+1}} x_i\Big)^2 + \frac{1}{2}(x_{r_s} + \cdots + x_{n} + x_1)^2, \end{align*}}the poset $I_\CD$ is non-negative. Consider the vector $h_\CD=[h_1,\ldots,h_n]\in\ZZ^n$, where $h_i=0$ if $i\not\in\{r_1,\ldots,r_s \}$, $h_{r_i}=1$ if $i$ is odd and $-1$ otherwise. That is, $h_\CD\in\ZZ^n$ has $s=2k$ non-zero coordinates (equal $1$ and $-1$ alternately). It is straightforward to check that \[ q_{I_\CD}(h_\CD) = \tfrac{1}{2}(h_{r_1}+h_{r_2})^2+\cdots+\tfrac{1}{2}(h_{r_{s-1}}+h_{r_s})^2 +\tfrac{1}{2}(h_{r_s}+h_1)^2=0, \] i.e., $I_\CD$ is not positive and the first coordinate of the vector $h_\CD\in\Ker q_{I_\CD}$ equals $h_1=1$. Since $\smash{\ov \CD^{(1)}}\simeq \AA_{n-1}$, by~\ref{thm:digraphdegmax2:path} the poset $I_{\CD^{(1)}}\subset I_\CD$ is positive and $\Dyn_{I_{\CD^{(1)}}}\!=\AA_{n-1}$. Hence, we conclude that $I_\CD$ is principal of Dynkin type $\Dyn_{I_\CD}\!=\AA_{n-1}$, see \Cref{df:Dynkin_type}. \end{proof} One of the methods to prove that a particular poset $I$ is indefinite is to show that it contains a subposet $J\subseteq I$ that is indefinite (we use this argument in the proof of Theorem~\ref{thm:digraphdegmax2}\ref{thm:digraphdegmax2:cycle:onesink:posite}). The following lemma presents a list of indefinite posets used further in the paper. \begin{lemma}\label{lemma:posindef} If $I$ is such a finite poset, that its Hasse digraph $\CH(I)$ is isomorphic with any of the digraphs $\CF_1,\ldots,\CF_7$ given in \Cref{tbl:indefposets}, then $I$ is indefinite.\vspace*{-0.3ex} {\newcommand{\tsep}{\,} \begin{longtable}{@{}c@{\tsep}c@{\tsep}c@{\tsep}c@{\tsep}c@{\tsep}c@{\tsep}c@{\tsep}c@{\tsep}c@{\tsep}c@{\tsep}c@{\tsep}c@{}} \multicolumn{3}{@{\tsep}c@{\tsep}}{$\CF_1\colon\!\!\!\!$ \tikzsetnextfilename{indef_f1}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.65, yscale=1.0] \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptscriptstyle \mppmss 2$] (n1) at (1, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 2$] (n2) at (0, 0.50) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptscriptstyle \mppmss 2$] (n3) at (1, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label={[xshift=-0.2ex]above right:$\scriptscriptstyle 2$}] (n4) at (2, 0.50) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 1$] (n5) at (3, 0.50) {}; \foreach \x/\y in {2/1, 2/3, 4/1, 4/3} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n4) to (n5); \end{tikzpicture}} & \multicolumn{3}{@{\tsep}c@{\tsep}}{$\CF_2\colon\!\!\!\!$ \tikzsetnextfilename{indef_f2}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.65, yscale=1.00] \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 4$] (n1) at (3, 0.50) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 4$] (n2) at (2, 0.50) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 6$] (n3) at (0, 0.50) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptscriptstyle 7$] (n4) at (1, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptscriptstyle 5$] (n5) at (1, 0 ) {}; \foreach \x/\y in {4/2, 4/3, 5/2, 5/3} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n2) to (n1); \end{tikzpicture}} & \multicolumn{3}{@{\tsep}c@{\tsep}}{$\CF_3\colon\!\!\!\!$ \tikzsetnextfilename{indef_f3}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.65, yscale=0.5] \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label={[xshift=0.2ex]below:$\scriptscriptstyle \mppmss 20$}] (n1) at (4, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 4$] (n2) at (3, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 4$] (n3) at (2, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 4$] (n4) at (1, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 4$] (n5) at (0, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 5$] (n6) at (3, 2 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 5$] (n7) at (2, 2 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 5$] (n8) at (1, 2 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptscriptstyle 9$] (n9) at (3, 0 ) {}; \foreach \x/\y in {1/2, 1/6, 1/9, 2/3, 3/4, 4/5, 6/7, 7/8} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}} & \multicolumn{3}{@{\tsep}l@{\tsep}}{$\CF_4\colon\!\!\!\!$ \tikzsetnextfilename{indef_f4}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.65, yscale=0.5] \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label={[xshift=0.1ex]below:$\scriptscriptstyle \mppmss 21$}] (n1) at (6, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 3$] (n2) at (5, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 3$] (n3) at (4, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 3$] (n4) at (3, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 3$] (n5) at (2, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 3$] (n6) at (1, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 3$] (n7) at (0, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 7$] (n8) at (5, 2 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 7$] (n9) at (4, 2 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptscriptstyle 10$] (n10) at (5, 0 ) {}; \foreach \x/\y in {1/2, 1/8, 1/10, 2/3, 3/4, 4/5, 5/6, 6/7, 8/9} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}}\\[-0.3ex] \multicolumn{4}{@{\tsep}c@{\tsep}}{$\CF_5\colon\!\!\!\!$ \tikzsetnextfilename{indef_f5}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.52, yscale=0.5] \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 7$] (n1) at (4, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 5$] (n2) at (0, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 2$] (n3) at (1, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 2$] (n4) at (2, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 2$] (n5) at (3, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 2$] (n6) at (4, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 2$] (n7) at (5, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 2$] (n8) at (6, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 5$] (n9) at (7, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 4$] (n10) at (8, 0 ) {}; \foreach \x/\y in {1/9, 2/1} \draw [bend left=15.0, -stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {2/3, 3/4, 4/5, 5/6, 6/7, 7/8, 8/9} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n9) to (n10); \end{tikzpicture}} & \multicolumn{4}{@{\tsep}c@{\tsep}}{$\CF_6\colon\!\!\!\!$ \tikzsetnextfilename{indef_f6}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.52, yscale=0.5] \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 7$] (n1) at (4, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 5$] (n2) at (0, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 5$] (n3) at (1, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 2$] (n4) at (2, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 2$] (n5) at (3, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 2$] (n6) at (4, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 2$] (n7) at (5, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 2$] (n8) at (6, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 2$] (n9) at (7, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 4$] (n10) at (8, 0 ) {}; \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n2) to (n3); \foreach \x/\y in {3/4, 4/5, 5/6, 6/7, 7/8, 8/9, 9/10} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {1/10, 3/1} \draw [bend left=15.0, -stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}} & \multicolumn{4}{@{\tsep}l@{\tsep}}{$\CF_7\colon\!\!\!\!$ \tikzsetnextfilename{indef_f7}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.52, yscale=0.5] \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 7$] (n1) at (0, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 2$] (n2) at (1, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 2$] (n3) at (2, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 2$] (n4) at (3, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 2$] (n5) at (4, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 2$] (n6) at (5, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 2$] (n7) at (6, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \mppmss 7$] (n8) at (7, 0 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 5$] (n9) at (3, 1 ) {}; \node[circle, draw, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 4$] (n10) at (4, 1 ) {}; \foreach \x/\y in {1/9, 10/8} \draw [bend left=15.0, -stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {1/2, 2/3, 3/4, 4/5, 5/6, 6/7, 7/8, 9/10} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}}\\[-1ex] \caption{Indefinite posets}\label{tbl:indefposets} \end{longtable}} \end{lemma} \begin{proof} \vspace*{-0.5ex}First, we recall that an `unoriented edge' in a diagram means that both arrow orientations are permissible, see \Cref{remark:hassepath:anyorient}. In other words, diagrams $\CF_1,\ldots,\CF_{7}$ describe $777$ different, non-isomorphic posets. By \Cref{corr:ahchoredpath}\ref{corr:ahchoredpath:anyorient}, without loss of generality, we may assume that all unoriented edges in diagrams presented in \Cref{tbl:indefposets} are oriented ``from left to right''. That is, the arcs in the Hasse digraph $\vec \CF_i$, where $i\in\{1,\ldots,7\}$ are oriented in such a way that: \begin{itemize} \item $\smash{\vec \CF_1}$ has three sinks, \item $\smash{\vec \CF_2}$ has two sinks and \item $\smash{\vec \CF_3,\ldots,\vec \CF_{7}}$ have exactly one sink. \end{itemize} It is straightforward to check that $q_{I_i}(v^i)<0$, where $I_i\eqdef I_{\vec \CF_i}$ is a poset defined by a digraph $\smash{\vec \CF_i\in\{\vec \CF_1, \ldots,\vec \CF_{7} \}}$ and $v^i\in\ZZ^n$ is an integer vector whose coordinates are given as the labels of vertices of diagram $\CF_i$ in \Cref{tbl:indefposets}. Therefore, by Remark \ref{rmk:indef}, we conclude that poset $I_i$ is indefinite. Since isomorphic posets are weakly Gram $\ZZ$-congruent (the congruence is defined by a permutation matrix), it follows that every poset that has its Hasse digraph isomorphic with $\CF_i$ is indefinite. \end{proof} We need one more technical result to prove \Cref{thm:a:main}. In \Cref{lemma:pathext} we describe connected posets $I$ with the following property: for some $p\in \{1,\ldots,n\}$ the graph $\ov\CH(I^{(p)})$ is isomorphic to a path graph. \begin{lemma}\label{lemma:pathext} Assume that $I$ is such a connected poset, that for some $p\in \{1,\ldots,n\}$ the graph $\ov\CH(I^{(p)})$ is isomorphic to a path graph. Exactly one of the following conditions holds. \begin{enumerate}[label=\normalfont{(\alph*)}] \item\label{lemma:pathext:posit} $I$ is positive and: \begin{enumerate}[label=\normalfont{(a\arabic*)}, leftmargin=3ex] \item\label{lemma:pathext:posit:a} $\Dyn_I=\AA_n$ with $\CH(I)\simeq \CA_n=$\tikzsetnextfilename{pth_an}\begin{tikzpicture}[baseline=(n7.base),label distance=-2pt,xscale=0.64, yscale=0.64] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n1) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 2$] (n2) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 3$] (n4) at (2, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n\mppmss 1$] (n5) at (5, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n$] (n6) at (6, 0 ) {}; \node (n7) at (3, 0 ) {$\mathclap{\phantom{7}}$}; \node (n8) at (4, 0 ) {}; \foreach \x/\y in {1/2, 2/4, 5/6} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= -2.50pt] (n4) to (n7); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -0.50pt, shorten >= -2.50pt] (n7) to (n8); \draw [-, shorten <= -2.50pt, shorten >= 2.50pt] (n8) to (n5); \end{tikzpicture}; \item\label{lemma:pathext:posit:d} $\Dyn_I=\DD_n$ and the Hasse digraph $\CH(I)\simeq \CD_I$ has a shape $\CD_I\in\{\CD_n^{[1]},\CD_{n,s}^{[2]},\CD_{n,s}^{[3]}\}$, \begin{center} \newcommand{\mxs}{0.59} \newcommand{\mys}{0.6} $\CD_n^{[1]}=$\!\!\! \tikzsetnextfilename{d1n}\begin{tikzpicture}[baseline=(nb.base),label distance=-2pt,xscale=\mxs, yscale=\mys] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n1) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 2$] (n2) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptscriptstyle 3$] (n3) at (1, 1 ) {}; \node (n4) at (2, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n\mppmss 1$] (n5) at (4, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n$] (n6) at (5, 0 ) {}; \node (n8) at (3, 0 ) {}; \node (nb) at (0.50, 0.50) {\phantom{7}}; \foreach \x/\y in {1/2, 2/3, 5/6} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= -2.50pt] (n2) to (n4); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 4\pgflinewidth, -, shorten <= -0.50pt, shorten >= -2.50pt] (n4) to (n8); \draw [-, shorten <= -2.50pt, shorten >= 2.50pt] (n8) to (n5); \end{tikzpicture} $\CD_{n,s}^{[2]}=$\!\!\! \tikzsetnextfilename{d2n}\begin{tikzpicture}[baseline=(n6.base),label distance=-2pt, xscale=\mxs, yscale=\mys] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle s$] (n1) at (3, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptscriptstyle s\mpppss 1$] (n2) at (4, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptscriptstyle s\mpppss 2$] (n3) at (4, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n5) at (1, 1 ) {}; \node (n6) at (2, 1 ) {$\mathclap{\phantom{7}}$}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below right:$\scriptscriptstyle s\mpppss 3$] (n4) at (5, 1 ) {}; \node (n9) at (6, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n$] (n10) at (7, 1 ) {}; \foreach \x/\y in {1/2, 1/3, 2/4, 3/4} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= -2.50pt] (n5) to (n6); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 4\pgflinewidth, -, shorten <= -1.5pt, shorten >= -.50pt] (n6) to (n1); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 4\pgflinewidth, -, shorten <= 2.pt, shorten >= -1.50pt] (n4) to (n9); \foreach \x/\y in { 9/10} \draw [-, shorten <= -2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture} $\CD_{n,s}^{[3]}=$\!\!\!\!\! \tikzsetnextfilename{d3n}\begin{tikzpicture}[baseline=(nb.base),label distance=-2pt, xscale=\mxs, yscale=\mys] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 1$] (n1) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle 2$] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle s$] (n3) at (4, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle n$] (n4) at (5, 1 ) {}; \node (n5) at (2, 1 ) {}; \node (n6) at (3, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle s\mpppss 1$] (n7) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle s\mpppss 2$] (n8) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n\mppmss 1$] (n9) at (4, 0 ) {}; \node (n10) at (2, 0 ) {}; \node (n11) at (3, 0 ) {}; \node (nb) at (0.50, .50) {\phantom{7}}; \foreach \x/\y in {1/2, 3/4, 9/4} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-stealth, shorten <= 2.50pt, shorten >= -2.50pt] (n2) to (n5); \foreach \x/\y in {5/6, 10/11} \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 4\pgflinewidth, -, shorten <= -0.50pt, shorten >= -2.50pt] (n\x) to (n\y); \draw [-stealth, shorten <= -2.50pt, shorten >= 2.50pt] (n6) to (n3); \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n7) to (n8); \draw [-, shorten <= 2.50pt, shorten >= -2.50pt] (n8) to (n10); \draw [-, shorten <= -2.50pt, shorten >= 2.50pt] (n11) to (n9); \draw [-stealth, shorten <= 3.50pt, shorten >= 1.90pt] ([yshift=-0.5]n1.south east) to ([yshift=1.5]n9.north west); \end{tikzpicture} \end{center} where $n\geq 4$ for $\CD_{I}=\CD_n^{[1]}$; $n\geq 4$, $s\geq 1$ for $\CD_{n,s}^{[2]}$, and $n\geq 5$, $s\geq 2$ for $\CD_{n,s}^{[3]}$. \item\label{lemma:pathext:posit:e} $\Dyn_I=\EE_n$, where $n\in\{6,7,8\}$, and the Hasse digraph $\CH(I)$, up to isomorphism, is one of $498$ digraphs \textnormal{[}$86$~up to orientation of hanging paths, see \Cref{corr:ahchoredpath}\ref{corr:ahchoredpath:anyorient}\textnormal{]}, i.e., there are $38, 145, 315$ \textnormal{[}$11, 30, 45$\textnormal{]} digraphs with $\Dyn_I=\EE_6, \EE_7$ and $\EE_8$, respectively. In particular, Hasse digraphs $\CH(I)$ of all posets $I$ with $\Dyn_I=\EE_6$ are depicted below. \begin{center} {\newcommand{\mxscale}{0.65} \newcommand{\myscale}{0.55} \hfill \tikzsetnextfilename{p_e6_1}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=\mxscale, yscale=\myscale] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n1) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n2) at (1, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n3) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n4) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n5) at (2, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n6) at (2, 1 ) {}; \foreach \x/\y in {1/2, 1/3, 1/4, 2/5, 3/6} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture} \hfill \tikzsetnextfilename{p_e6_2}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=\mxscale, yscale=\myscale] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n1) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n2) at (1, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n3) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n4) at (2, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n5) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n6) at (2, 0 ) {}; \foreach \x/\y in {1/2, 1/3, 2/5, 3/5} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {2/4, 3/6} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture} \hfill \tikzsetnextfilename{p_e6_3}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=\mxscale, yscale=\myscale] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n1) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n3) at (1, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n4) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n5) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n6) at (0, 2 ) {}; \foreach \x/\y in {1/2, 1/3, 2/5, 3/5, 6/3} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n1) to (n4); \end{tikzpicture} \hfill \tikzsetnextfilename{p_e6_4}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=\mxscale, yscale=\myscale] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n1) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n2) at (1, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n4) at (3, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n5) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n6) at (2, 0 ) {}; \foreach \x/\y in {1/2, 1/5, 2/3, 5/3, 5/6} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n3) to (n4); \end{tikzpicture} \hfill \tikzsetnextfilename{p_e6_5}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=\mxscale, yscale=\myscale] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n1) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n2) at (1, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n3) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n4) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n5) at (2, 1.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n6) at (2, 0.50) {}; \foreach \x/\y in {1/2, 1/3, 1/4, 2/5, 3/5, 3/6, 4/6} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture} \hfill \tikzsetnextfilename{p_e6_6}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=\mxscale, yscale=\myscale] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n1) at (0, 1.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n2) at (0, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n3) at (1, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n4) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n5) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n6) at (2, 1 ) {}; \foreach \x/\y in {1/3, 1/4, 2/4, 2/5, 3/6, 4/6, 5/6} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}\hfill\mbox{} \\[0.34cm] \hfill \tikzsetnextfilename{p_e6_7}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=\mxscale, yscale=\myscale] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n1) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n4) at (3, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n5) at (1.50, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n6) at (1, 0 ) {}; \foreach \x/\y in {1/2, 1/5, 2/3, 3/4, 5/4} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n1) to (n6); \end{tikzpicture} \hfill \tikzsetnextfilename{p_e6_8}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=\mxscale, yscale=\myscale] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n1) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n4) at (3, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n5) at (1.50, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n6) at (2, 0 ) {}; \foreach \x/\y in {1/2, 1/5, 2/3, 3/4, 5/4} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n2) to (n6); \end{tikzpicture} \hfill \tikzsetnextfilename{p_e6_9}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=\mxscale, yscale=\myscale] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n1) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n4) at (3, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n5) at (1.50, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n6) at (1, 0 ) {}; \foreach \x/\y in {1/2, 1/5, 2/3, 3/4, 5/4} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n3) to (n6); \end{tikzpicture} \hfill \tikzsetnextfilename{p_e6_10}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=\mxscale, yscale=\myscale] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n1) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n2) at (1, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n4) at (3, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n5) at (1.50, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n6) at (2, 0 ) {}; \foreach \x/\y in {1/2, 1/5, 2/3, 3/4, 5/4} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n4) to (n6); \end{tikzpicture}\hfill \tikzsetnextfilename{p_e6_11}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=\mxscale, yscale=\myscale] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n1) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n2) at (1, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n3) at (2, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n4) at (3, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n5) at (1, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n6) at (2, 0 ) {}; \foreach \x/\y in {1/2, 1/5, 2/3, 3/4, 5/6, 6/4} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}\hfill\mbox{} }\end{center} \end{enumerate} We note that, up to isomorphism, the first Hasse digraph describes exactly $20$ posets and the second exactly $3$ posets. \item\label{lemma:pathext:princ} $I$ is principal and: \begin{enumerate}[label=\normalfont{(b\arabic*)}, leftmargin=3ex] \item\label{lemma:pathext:princ:a} $\Dyn_I=\AA_{n-1}$, $\ov\CH(I)$ is a cycle graph and $\CH(I)$ has at least two sinks. \item\label{lemma:pathext:princ:e} $\Dyn_I=\EE_{n-1}$, where $n\in\{8,9\}$, and the Hasse digraph $\CH(I)$, up to isomorphism, is one of $850$ \textnormal{digraphs [}$98$~up to orientation of hanging paths\textnormal{]}, i.e., there are exactly $185, 665$ \textnormal{[}$36,62$\textnormal{]} digraphs with $\Dyn_I= \EE_7$ and $\EE_8$, respectively. \end{enumerate} \item\label{lemma:pathext:indef} $I$ is indefinite.\pagebreak \end{enumerate} \end{lemma} \begin{proof} Assume that $I$ is a connected poset of size $n$. By \Cref{thm:onepeak_posit} and \Cref{corr:ahchoredpath}\ref{corr:ahchoredpath:anyorient}, we know that posets $I$ with $\ov\CH(I)$ isomorphic to $\CA_n,\ab\CD_n^{[1]},\ab\CD_{n,s}^{[2]},\ab\CD_{n,s}^{[3]}$ are positive, with $\Dyn_I=\AA_n$ if $\ov\CH(I)\simeq \CA_n$ and $\Dyn_I=\DD_n$ otherwise. Furthermore, \Cref{thm:digraphdegmax2}\ref{thm:digraphdegmax2:cycle:multsink} asserts that posets $I$ with $\ov \CH(I)$ isomorphic to a cycle graph and $\CH(I)$ having at least two sinks are principal with $\Dyn_I=\AA_{n-1}$.\medskip The proof is divided into two parts. First, we prove the thesis by analyzing all posets having at most $11$ elements. Then, using induction, we prove it for posets $I$ of size $|I|>11$.\smallskip \textbf{Part $1^\circ$} It is easy to see that all connected posets $I$ of size $n\leq 3$ satisfy the assumptions: in this case $\CH(I)\in\{\!\! \tikzsetnextfilename{main_prf_1}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=0.65, yscale=0.74] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptstyle 1$] (n1) at (0 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptstyle 2$] (n2) at (1 , 0 ) {}; \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n1) to (n2); \end{tikzpicture}\!\!,\! \tikzsetnextfilename{main_prf_2}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=0.65, yscale=0.74] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptstyle 1$] (n1) at (0 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptstyle 2$] (n2) at (1 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptstyle 3$] (n3) at (2.35, 0 ) {}; \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n1) to (n2); \draw [-stealth, shorten <= 2.50pt, shorten >= 9.00pt] (n3) to (n2); \end{tikzpicture}\!\!,\! \tikzsetnextfilename{main_prf_3}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=0.65, yscale=0.74] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptstyle 1$] (n1) at (0 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptstyle 2$] (n2) at (1 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptstyle 3$] (n3) at (2.35, 0 ) {}; \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n2) to (n1); \draw [-stealth, shorten <= 9.00pt, shorten >= 2.50pt] (n2) to (n3); \end{tikzpicture}\!\!,\! \tikzsetnextfilename{main_prf_4}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=0.65, yscale=0.74] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptstyle 1$] (n1) at (0 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptstyle 2$] (n2) at (1 , 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptstyle 3$] (n3) at (2.35, 0 ) {}; \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n1) to (n2); \draw [-stealth, shorten <= 9.00pt, shorten >= 2.50pt] (n2) to (n3); \end{tikzpicture}\!\! \}$. Since $\ov\CH(I^{(1)})$ is isomorphic to a path graph, $\Dyn_I=\AA_{n}$ and the thesis follows. Now, using Computer Algebra System (e.g. SageMath, Maple), we compute all (up to isomorphism) posets $I$ of size at most $11$ using a suitably modified version of~\cite[Algorithm 7.1]{gasiorekOnepeakPosetsPositive2012} (see also~\cite{brinkmannPosets16Points2002} for a different approach). There are exactly $\num{49519383}$ [$\num{46485488}$ connected] posets $I$ of size $4\leq |I| \leq 11$. Moreover, $\num{58723}$ [$\num{58198}$ connected] posets $I$ have the graph $\ov\CH(I^{(p)})$ isomorphic to a path graph, for some $p\in I$. In particular, there are $\num{46749427}$ [$\num{43944974}$ connected] non-isomorphic posets of size $11$. In $\num{39335}$ [$\num{39079}$] cases, for some $p\in \{1,\ldots,11\}$, the graph $\ov\CH(I^{(p)})$ is isomorphic to a path graph and, up to isomorphism, there are exactly: \begin{itemize} \item $256$ disconnected positive posets $I$ with $\CH(I)\simeq \, \scriptstyle \bullet\hspace{22pt}\bullet\,\rule[1.5pt]{22pt}{0.4pt}\,\bullet\,\rule[1.5pt]{22pt}{0.4pt}\,\,\hdashrule[1.5pt]{12pt}{0.4pt}{1pt}\,\rule[1.5pt]{22pt}{0.4pt}\,\bullet$, \item $\num[group-minimum-digits = 4]{2575}$ connected positive posets $I$, of which $528$, $768$, $1024$ and $255$ have its Hasse digraph $\CH(I)$ isomorphic to $\CA_n$, $\CD_n^{[1]}$, $\CD_{n,s}^{[2]}$, and $\CD_{n,s}^{[3]}$, respectively; \item $88$ connected principal posets $I$, where $\ov\CH(I)$ is a cycle graph and $\CH(I)$ has at least two sinks; \item $\num{36416}$ connected indefinite posets. \end{itemize} A more precise analysis of connected posets $I$ is given in the following table. {\sisetup{group-minimum-digits=4}\begin{longtable}{lrrrrrrrrrrr}\toprule & & \multicolumn{5}{c}{positive} & \multicolumn{2}{c}{principal} & indefinite \\\cmidrule(lr){3-7}\cmidrule(lr){8-9}\cmidrule(ll){10-12} $n$& $\# I$ & $\CA_n$ & $\CD_n^{[1]}$ & $\CD_{n,s}^{[2]}$ & $\CD_{n,s}^{[3]}$ & $\EE_n$ & $\AA_{n-1}$ & $\EE_{n-1}$ & $\# I$ \\\midrule $4$ & $10$ & $4$ & $4$ & $1$ & & & $1$ & & \\ $5$ & $34$ & $10$ & $12$ & $4$ & $3$ & & $1$ & & $4$\\ $6$ & $129$ & $16$ & $24$ & $12$ & $7$ & $38$ & $5$ & & $27$\\ $7$ & $413$ & $36$ & $48$ & $32$ & $15$ & $145$ & $6$ & & $131$\\ $8$ & $\num{1369}$ & $64$ & $96$ & $80$ & $31$ & $315$ & $17$ & $185$ & $581$\\ $9$ & $\num{4184}$ & $136$ & $192$ & $192$ & $63$ & & $25$ & $665$ & $\num{2911}$\\ $10$ & $\num{12980}$ & $256$ & $384$ & $448$ & $127$ & & $56$ & & $\num{11709}$\\ $11$ & $\num{39079}$ & $528$ & $768$ & $\num{1024}$ & $255$ & & $88$ & & $\num{36416}$\\ \bottomrule \end{longtable}} \noindent This computer-assisted analysis completes the proof for posets $I$ of size $|I|\leq 11$.\smallskip \textbf{Part $2^\circ$} We proceed by induction. Assume that $I$ is such a finite connected poset of size $|I|=n>11$ that for some $p\in \{1,\ldots,n\}$ the graph $\ov \CH(I^{(p)})$ is isomorphic to a path graph, and the thesis holds for posets of size $n-1$. To prove the inductive step we show that $\CH(I)$ has one of the forms described in \ref{lemma:pathext:posit:a}, \ref{lemma:pathext:posit:d} and \ref{lemma:pathext:princ:a}, or is indefinite \ref{lemma:pathext:indef}. Without loss of generality, we may assume that $p=1$ and $\deg_{\ov\CH(I^{(1)})}(n)=1$, i.e., \vspace*{-1ex} \begin{align*} \ov\CH(I^{(1)})\simeq P(2,n) &= \!\!\!\tikzsetnextfilename{main_prf_5}\begin{tikzpicture}[baseline=(n2.base),label distance=-2pt] \matrix [matrix of nodes, ampersand replacement=\&, nodes={minimum height=1.3em,minimum width=1.3em, text depth=0ex,text height=1ex, execute at begin node=$, execute at end node=$} , column sep={15pt,between borders}, row sep={10pt,between borders}] { |(n2)|2 \& |(n3)|3 \& |(n4)| \& |(n5)| \& |(n6)|n-1 \& |(n7)|n \\ }; \foreach \x/\y in {2/3, 3/4, 5/6, 6/7} \draw [-, shorten <= -2.50pt, shorten >= -2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -2.50pt, shorten >= -2.50pt] (n4) to (n5); \end{tikzpicture}\!\!\!\! \simeq \CA_{n-1}.\\[-1ex] \intertext{\vspace*{-1ex}Consider the poset $J\eqdef I^{(n)}$.}\\[-1.2cm] \intertext{\indent(A) If $J$ is not connected, then the element $n$ is an articulation point in the graph $\ov\CH(J)$. Since degree of $n$ in $\ov\CH(I^{(1)})$ equals one, we conclude that $J$ has two connected components: $\{2,\ldots,n-1\}$ and $\{1\}$. Moreover, the graph $\ov\CH(I)$ have the shape} \ov\CH(I) &\simeq \!\!\!\tikzsetnextfilename{main_prf_6}\begin{tikzpicture}[baseline=(n2.base),label distance=-2pt] \matrix [matrix of nodes, ampersand replacement=\&, nodes={minimum height=1.3em,minimum width=1.3em,text depth=0ex,text height=1ex, execute at begin node=$, execute at end node=$},column sep={15pt,between borders}, row sep={10pt,between borders}] { |(n2)|2 \& |(n3)|3 \& |(n4)| \& |(n5)| \& |(n6)|n-1 \& |(n7)|n \& |(n1)|1 \\ }; \foreach \x/\y in {2/3, 3/4, 5/6, 6/7, 7/1} \draw [-, shorten <= -2.50pt, shorten >= -2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -2.50pt, shorten >= -2.50pt] (n4) to (n5); \draw [thick,decoration={brace,raise=0.35cm},decorate] (n2.west) -- (n7.east) node [pos=0.5,anchor=north,yshift=0.65cm] {$\smash{\scriptstyle \ov \CH(I^{(p)})}$}; \draw[draw=black, dashed, fill=green, fill opacity=0.2, rounded corners]([xshift=1pt]n1.north west)--([xshift=-1pt]n1.north east)--([xshift=-1pt,yshift=2pt]n1.south east)--([xshift=1pt,yshift=2pt]n1.south west)--cycle; \draw[draw=black, dashed, fill=green, fill opacity=0.2, rounded corners]([xshift=1pt]n2.north west)--([xshift=-1pt]n6.north east)--([xshift=-1pt,yshift=2pt]n6.south east)--([xshift=1pt,yshift=2pt]n2.south west)--cycle; \end{tikzpicture}\!\!\!\! \simeq \CA_n, \end{align*} where the elements of the poset $J$ are highlighted. Hence the thesis follows.\smallskip (B) Assume that $J$ is connected. Since $J^{(1)}=I^{(1,n)}$ is such a poset of size $n-1$, that the graph $\ov\CH(J^{(1)})$ is isomorphic to a path graph, by the inductive hypothesis one of the following conditions holds: \begin{enumerate}[label=\normalfont{(\roman*)}, itemsep=0ex, topsep=1ex, partopsep=1ex, leftmargin=10ex] \item $\CH(J)\simeq \CD_{J}$, where $\CD_{J}\in\{\CA_n,\CD_n^{[1]},\CD_{n,s}^{[2]},\CD_{n,s}^{[3]}\}$, as described in \ref{lemma:pathext:posit:a} and \ref{lemma:pathext:posit:d}; \item $\ov \CH(J)$ is a cycle graph and $\CH(J)$ has at least two sinks, as described in \ref{lemma:pathext:princ:a}; \item $J$ is indefinite, as described in \ref{lemma:pathext:indef}. \end{enumerate} We analyze these cases one by one. Since the digraphs $\CH(I)$ and $\CH(J)$ are connected, $\ov\CH(I^{(1)})\simeq \CA_{n-1}$ and $\deg_{\ov\CH(I^{(1)})}(n)=1$, we conclude that the degree of the vertex $n$ in the graph $\ov\CH(I)$ equals two, if elements $1$ and $n$ are in relation, and one otherwise.\smallskip (i) Assume that $\CH(J)\simeq \CD_{J}\in\{\CA_n,\ab\CD_n^{[1]}, \ab\CD_{n,s}^{[2]},\ab\CD_{n,s}^{[3]}\}$. We have the following: \begin{enumerate}[label=\normalfont{($\arabic*^\circ$)},wide,labelindent=0pt] \item\label{lemma:pathext:prf:b:i:i} if $\CD_J=\CA_{n-1}$, then either $\CH(I)\simeq \CD_I\in\{\CA_n, \CD_n^{[1]}, \CD_{n,s'}^{[3]}\}$, where $2\leq s'\leq n-2$, or $\CH(I)$ has at least two sinks and $\ov\CH(I)$ is a cycle graph; \item\label{lemma:pathext:prf:b:i:ii} if $\CD_J=\CD_{n-1}^{[1]}$, then either $\CH(I)\simeq \CD_I\in\{\CD_n^{[1]},\CD_{n,1}^{[2]},\CD_{n,n-3}^{[2]}, \CD_{n,2}^{[3]}\}$, or $I$ is indefinite, as it contains (as a subposet) some $\CF\in\{\CF_1,\ldots, \CF_6\}$; \item\label{lemma:pathext:prf:b:i:iii} if $\CD_J=\CD_{n-1,s}^{[2]}$, then either $\CH(I)\simeq \CD_{n,s'}^{[2]}$, where $1\leq s'\leq n-3$, or $I$ is indefinite, as it contains (as a subposet) some $\CF\in\{\CF_3, \ldots, \CF_6\}$; \item\label{lemma:pathext:prf:b:i:iiii} if $\CD_J=\CD_{n-1, s}^{[3]}$, then either $\CH(I)\simeq \CD_{n,s'}^{[3]}$, where $2\leq s'< n-1$, or $I$ is indefinite, as it contains (as a subposet) some $\CF\in\{\CF_2, \ldots, \CF_7\}$. \end{enumerate} We describe the case \ref{lemma:pathext:prf:b:i:iii} in detail. One has to consider all such finite posets $I$, that $\ov\CH(I^{(1)})\simeq P(2,n)$, $\CH(I^{(n)})\simeq \CD_{n-1,s}^{[2]}$ and $\deg_{\ov\CH(I^{(1)})}(n)=1$. These are the only possibilities:\medskip \noindent ($3^\circ a$) $\CH(I)\simeq \CD_{n-1,s}^{[2]}$ and $\CH(I)$\ has one of the following shapes:\medskip \tikzsetnextfilename{fig3a3}\noindent\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.68, yscale=0.58] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptscriptstyle 1$] (n1) at (3, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n$] (n2) at (0, 1 ) {}; \node (n3) at (1, 1 ) {$\scriptscriptstyle $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n4) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n5) at (4, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n6) at (3, 0 ) {}; \draw [-, shorten <= 2.50pt, shorten >= -2.50pt] (n2) to (n3); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, -, shorten <= -2.0pt, shorten >= 2.0pt] (n3) to (n4); \foreach \x/\y in {1/5, 4/1, 4/6, 6/5} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}, \qquad \tikzsetnextfilename{fig3a1}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.68, yscale=0.58] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptscriptstyle 1$] (n1) at (3, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n2) at (0, 1 ) {}; \node (n3) at (1, 1 ) {$\scriptscriptstyle $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n4) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n5) at (4, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n6) at (3, 0 ) {}; \node (n7) at (5, 1 ) {$\scriptscriptstyle $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n$] (n8) at (6, 1 ) {}; \draw [-, shorten <= 2.50pt, shorten >= -2.50pt] (n2) to (n3); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, -, shorten <= -2.0pt, shorten >= 2.0pt] (n3) to (n4); \foreach \x/\y in {1/5, 4/1, 4/6, 6/5} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, -, shorten <= 3.5pt, shorten >= -1.5pt] (n5) to (n7); \draw [-, shorten <= -2.50pt, shorten >= 2.50pt] (n7) to (n8); \end{tikzpicture},\qquad \tikzsetnextfilename{fig3a2}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.68, yscale=0.58] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptscriptstyle 1$] (n1) at (3, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n4) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n5) at (4, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt] (n6) at (3, 0 ) {}; \node (n7) at (5, 1 ) {$\scriptscriptstyle $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n$] (n8) at (6, 1 ) {}; \foreach \x/\y in {1/5, 4/1, 4/6, 6/5} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, -, shorten <= 3.5pt, shorten >= -1.5pt] (n5) to (n7); \draw [-, shorten <= -2.50pt, shorten >= 2.50pt] (n7) to (n8); \end{tikzpicture};\bigskip \noindent ($3^\circ b$) $I$ is indefinite, since $\CF_3\subseteq I$:\medskip \noindent \tikzsetnextfilename{fig3b}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.55, yscale=0.55] \node[circle, fill=Red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n\phantom{\mppmss }$] (n1) at (11, 2 ) {}; \node[circle, fill=Red, inner sep=0pt, minimum size=3.5pt] (n2) at (10, 2 ) {}; \node[circle, fill=, gray!90, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n3) at (9, 2 ) {}; \node[circle, fill=, gray!90, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n4) at (8, 1 ) {}; \node[circle, fill=, Red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n5) at (9, 1 ) {}; \node[circle, fill=Red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n6) at (9, 0 ) {}; \node[circle, fill=Red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n7) at (10, 0 ) {}; \node[circle, fill=Red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n8) at (11, 0 ) {}; \node[circle, fill=Red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n9) at (12, 0 ) {}; \node[circle, fill=Red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n10) at (13, 0 ) {}; \node[circle, fill=Red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n11) at (14, 0 ) {}; \node[circle, fill=, gray!90, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n12) at (15, 0 ) {}; \node[circle, fill=, gray!90, inner sep=0pt, minimum size=3.5pt] (n13) at (16, 0 ) {}; \foreach \x/\y in {2/3, 3/4, 5/4, 6/4} \draw [-, gray!90, stealth-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, , gray!90, -, shorten <= 1pt, shorten >= 1.0pt] (n13) to (n12); \foreach \x/\y in {1/2, 1/5, 7/5, 7/6} \draw [stealth-, line width=1.2pt, Red,shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {8/7, 9/8, 10/9, 11/10} \draw [-, line width=1.2pt, Red, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, gray!90, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, -, shorten <= 2.0pt, shorten >= 1.0pt] (n12) to (n11); \end{tikzpicture},\ \tikzsetnextfilename{fig3b2} \begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt, xscale=0.55, yscale=0.55] \node[circle, fill=Red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n\phantom{\mppmss }$] (n1) at (11, 2 ) {}; \node[circle, fill=Red, inner sep=0pt, minimum size=3.5pt] (n2) at (10, 2 ) {}; \node[circle, fill=, gray!90, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n3) at (9, 2 ) {}; \node[circle, fill=, gray!90, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n4) at (8, 1 ) {}; \node[circle, fill=, Red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n5) at (9, 1 ) {}; \node[circle, fill=Red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n6) at (9, 0 ) {}; \node[circle, fill=Red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n7) at (10, 0 ) {}; \node[circle, fill=Red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n8) at (11, 0 ) {}; \node[circle, fill=Red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n9) at (12, 0 ) {}; \node[circle, fill=Red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n10) at (13, 0 ) {}; \node[circle, fill=Red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n11) at (14, 0 ) {}; \node[circle, fill=, gray!90, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle $] (n12) at (15, 0 ) {}; \node[circle, fill=, gray!90, inner sep=0pt, minimum size=3.5pt] (n13) at (16, 0 ) {}; \foreach \x/\y in {3/2, 4/3, 4/5, 4/6} \draw [-, gray!90, stealth-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, , gray!90, -, shorten <= 1pt, shorten >= 1.0pt] (n13) to (n12); \foreach \x/\y in {2/1, 5/1, 5/7, 6/7} \draw [stealth-, line width=1.2pt, Red,shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {8/7, 9/8, 10/9, 11/10} \draw [-, line width=1.2pt, Red, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, gray!90, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, -, shorten <= 2.0pt, shorten >= 1.0pt] (n12) to (n11); \end{tikzpicture},\ \tikzsetnextfilename{fig3b3}\begin{tikzpicture}[baseline=(n1.base),label distance=-2pt,xscale=0.55, yscale=0.54] \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n1) at (0 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n2) at (1 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n3) at (1 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n4) at (2 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n5) at (3 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n6) at (4 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n7) at (5 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n$] (n8) at (5 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n9) at (4 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n10) at (3 , 2 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n11) at (1 , 2 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n12) at (2 , 2 ) {}; \foreach \x/\y in {1/2, 1/3, 1/11, 11/12, 12/10} \draw [gray!90, solid, -stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {2/4, 3/4, 9/8, 10/9} \draw [very thick, red, -stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [very thick, red, -stealth, shorten <= 2.50pt, shorten >= 3.50pt] (n2) to ([yshift=-5]n8); \foreach \x/\y in {4/5, 5/6, 6/7} \draw [very thick, red, -, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture},\ \tikzsetnextfilename{fig3b4}\begin{tikzpicture}[baseline=(n1.base),label distance=-2pt,xscale=0.55, yscale=0.54] \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n1) at (0 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n2) at (1 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n3) at (1 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n4) at (2 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n5) at (3 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n6) at (4 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n7) at (5 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n$] (n8) at (5 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n9) at (4 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n10) at (3 , 2 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n11) at (1 , 2 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n12) at (2 , 2 ) {}; \foreach \x/\y in {2/1, 3/1, 10/12, 11/1, 12/11} \draw [gray!90, solid, -stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {4/2, 4/3, 8/9, 9/10} \draw [very thick, red, -stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [very thick, red, -stealth, shorten <= 2.50pt, shorten >= 3.50pt] (n8) to ([yshift=-5]n2); \foreach \x/\y in {4/5, 5/6, 6/7} \draw [very thick, red, -, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture};\bigskip \noindent ($3^\circ c$) $I$ is indefinite, since $\CF_4\subseteq I$:\medskip \noindent \tikzsetnextfilename{fig3d11}\begin{tikzpicture}[baseline=(n1.base),label distance=-2pt,xscale=0.45, yscale=0.5] \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n1) at (0 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n2) at (1 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n3) at (1 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n4) at (2 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n5) at (3 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n6) at (4 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n7) at (5 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n$] (n8) at (2.5 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n9) at (6 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n10) at (7 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n11) at (8 , 1 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n12) at (9 , 1 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n13) at (10 , 1 ) {}; \foreach \x/\y in {2/1, 3/1} \draw [gray!90, solid, -stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, gray!90, -, shorten <= 2.20pt, shorten >= 1.50pt] (n11) to (n12); \draw [gray!90, solid, -, shorten <= 2.50pt, shorten >= 2.50pt] (n12) to (n13); \foreach \x/\y in {4/2, 4/3} \draw [very thick, red, -stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {4/5, 5/6, 6/7, 7/9, 9/10, 10/11} \draw [very thick, red, -, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [very thick, red, -stealth, shorten <= 2.50pt, shorten >= 1.50pt] (n8) to (n2); \end{tikzpicture},\ \tikzsetnextfilename{fig3d22}\begin{tikzpicture}[baseline=(n1.base),label distance=-2pt,xscale=0.45, yscale=0.5] \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n1) at (0 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n2) at (1 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n3) at (1 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n4) at (2 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n5) at (3 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n6) at (4 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n7) at (5 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n$] (n8) at (2.5 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n9) at (6 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n10) at (7 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n11) at (8 , 1 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n12) at (9 , 1 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n13) at (10 , 1 ) {}; \foreach \x/\y in {1/2, 1/3} \draw [gray!90, solid, -stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, gray!90, -, shorten <= 2.20pt, shorten >= 1.50pt] (n11) to (n12); \draw [gray!90, solid, -, shorten <= 2.50pt, shorten >= 2.50pt] (n12) to (n13); \foreach \x/\y in {2/4, 3/4} \draw [very thick, red, -stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {4/5, 5/6, 6/7, 7/9, 9/10, 10/11} \draw [very thick, red, -, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [very thick, red, -stealth, shorten <= 2.50pt, shorten >= 1.50pt] (n2) to (n8); \end{tikzpicture},\ \tikzsetnextfilename{fig3d33}\begin{tikzpicture}[baseline=(n1.base),label distance=-2pt,xscale=0.51, yscale=0.54] \\node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n1) at (0 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n2) at (1 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n3) at (2 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n4) at (3 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n5) at (4 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n6) at (1 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n7) at (2 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n8) at (3 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n9) at (4 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n10) at (5 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle n$] (n11) at (5.50, 1 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n12) at (1 , 0 ) {}; \foreach \x/\y in {2/1, 6/1, 12/1} \draw [gray!90, solid, -stealth, shorten <= 1.50pt, shorten >= 1.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, gray!90, -, shorten <= 1.50pt, shorten >= 1.50pt] (n12) to (n7); \foreach \x/\y in {3/2, 4/3, 5/4, 3/6, 11/6, 8/7, 9/8, 10/9, 11/10} \draw [very thick, red, -stealth, shorten <= 1.50pt, shorten >= 1.50pt] (n\x) to (n\y); \end{tikzpicture},\ \tikzsetnextfilename{fig3d44}\begin{tikzpicture}[baseline=(n1.base),label distance=-2pt,xscale=0.51, yscale=0.54] \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n1) at (0 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n2) at (1 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n3) at (2 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n4) at (3 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n5) at (4 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n6) at (1 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n7) at (2 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n8) at (3 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n9) at (4 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n10) at (5 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle n$] (n11) at (5.50, 1 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n12) at (1 , 0 ) {}; \foreach \x/\y in {1/2, 1/6, 1/12} \draw [gray!90, solid, -stealth, shorten <= 1.50pt, shorten >= 1.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, gray!90, -, shorten <= 1.50pt, shorten >= 1.50pt] (n12) to (n7); \foreach \x/\y in {2/3, 3/4, 4/5, 6/3, 6/11, 7/8, 8/9, 9/10, 10/11} \draw [very thick, red, -stealth, shorten <= 1.50pt, shorten >= 1.50pt] (n\x) to (n\y); \end{tikzpicture};\bigskip \noindent ($3^\circ d$) $I$ is indefinite as $\CF_5\subseteq I$ or $\CF_6\subseteq I$, respectively: \medskip \noindent \tikzsetnextfilename{fig4d51}\begin{tikzpicture}[baseline=(n1.base),label distance=-2pt,xscale=0.55, yscale=0.54] \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n1) at (7.50, 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n2) at (6.50, 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n3) at (6.50, 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n4) at (6.50, 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n5) at (5.50, 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n6) at (4.50, 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n7) at (3.50, 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n8) at (2.50, 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n9) at (1.50, 0 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n10) at (5.50, 2 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n11) at (4.50, 2 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n12) at (0.50, 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle n$] (n13) at (0 , 1 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n14) at (3.50, 2 ) {}; \foreach \x/\y in {11/10, 12/9} \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, gray!90, -, shorten <= 2.20pt, shorten >= 1.50pt] (n\x) to (n\y); \foreach \x/\y in {10/2, 10/3, 13/12, 14/11} \draw [gray!90, solid, -stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [bend left=10.0, very thick, red, dotted, -stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n13) to (n9); \foreach \x/\y in {2/1, 3/1, 4/1, 5/4, 6/5, 7/6, 8/7, 9/8, 13/3} \draw [very thick, red, -stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}\qquad or \qquad \tikzsetnextfilename{fig4d52}\begin{tikzpicture}[baseline=(n1.base),label distance=-2pt,xscale=0.55, yscale=0.54] \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n1) at (0 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n2) at (1 , 2 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle 1$] (n3) at (1 , 1 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n4) at (1 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n5) at (2 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n6) at (3 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n7) at (4 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n8) at (5 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt] (n9) at (6 , 0 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n10) at (2 , 2 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n11) at (3 , 2 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n12) at (7 , 0 ) {}; \node[circle, fill=red, inner sep=0pt, minimum size=3.5pt, label=above:$\scriptscriptstyle n$] (n13) at (8 , 1 ) {}; \node[circle, fill=gray!90, inner sep=0pt, minimum size=3.5pt] (n14) at (4 , 2 ) {}; \foreach \x/\y in {9/12, 10/11} \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 3\pgflinewidth, gray!90, -, shorten <= 2.20pt, shorten >= 1.50pt] (n\x) to (n\y); \foreach \x/\y in {2/10, 3/10, 11/14, 12/13} \draw [gray!90, solid, -stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [bend left=10.0, very thick, red, dotted, -stealth, shorten <= 2.50pt, shorten >= 5.50pt] (n9) to ([yshift=-2.6]n13); \foreach \x/\y in {1/2, 1/3, 1/4, 3/13, 4/5, 5/6, 6/7, 7/8, 8/9} \draw [very thick, red, -stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}.\bigskip The cases \ref{lemma:pathext:prf:b:i:i}, \ref{lemma:pathext:prf:b:i:ii} and \ref{lemma:pathext:prf:b:i:iiii} follow by similar arguments. Details are left to the reader.\medskip (ii) Assume that $J$ is principal, i.e., $\ov \CH(J)$ is a cycle graph and $\CH(J)$ has at least two sinks. There are two possibilities: degree of vertex $n$ in the graph $\ov\CH(I)$ equals either one or two. In the first case the poset $I$ is indefinite, as it contains a subposet $\CF_1$, $\CF_2$, $\CF_3$ or $\CF_4$. In the second case, $\ov \CH(I)$ is a cycle graph and $\CH(I)$ contains the identical number of sinks as $\CH(J)$. Therefore, by \Cref{thm:digraphdegmax2}\ref{thm:digraphdegmax2:cycle:multsink}, $I$ is principal and statement \ref{lemma:pathext:princ} follows.\medskip (iii) Since poset $J\subset I$ is indefinite, the poset $I$ is also indefinite. \end{proof} \begin{center} \textbf{Proof of \Cref{thm:a:main}} \end{center} Now we have all the necessary tools to prove the main result of this work. \begin{proof}[Proof of \Cref{thm:a:main}] Let $I=(V,\leq_I)$ be a finite connected non-negative poset of size $n$, rank $m$ and Dynkin type $\Dyn_I=\AA_m$.\smallskip \ref{thm:a:main:posit} Our aim is to show that $m=n$ if and only if \[ \ov\CH(I)\simeq P(1,n)=1 \,\rule[2.5pt]{22pt}{0.4pt}\,2\,\rule[2.5pt]{22pt}{0.4pt}\, \hdashrule[2.5pt]{12pt}{0.4pt}{1pt}\,\rule[2.5pt]{22pt}{.4pt}\,n. \] Since the implication ``$\Leftarrow$'' is a consequence of \Cref{lemma:trees}\ref{lemma:trees:posit}, it is sufficient to prove ``$\Rightarrow$''. First, we show that for every vertex $v\in V$ we have $\deg_{\ov \CH(I)}(v)\leq 2$. We proceed by contradiction. Assume that there exists a vertex $v$ of degree at least $3$. If that is the case, there exists such a subposet $J\subseteq I$ that its Hasse digraph $\CH(J)$ has the form \begin{center} $\CH(J)\colon$ \tikzsetnextfilename{scnd_prf_1}\begin{tikzpicture}[baseline={([yshift=-2.75pt]current bounding box)},label distance=-2pt,xscale=0.65, yscale=0.74] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle v$] (n1) at (1, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptscriptstyle $] (n2) at (2, 0.50) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptscriptstyle $] (n3) at (0, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptscriptstyle $] (n4) at (0, 0 ) {}; \foreach \x/\y in {1/2, 3/1, 4/1} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \end{tikzpicture}. \end{center} By \Cref{lemma:trees}\ref{lemma:trees:posit}, $\Dyn_{J}=\DD_4$. Since $J\subseteq I$, ~\cite[Proposition 2.25]{barotQuadraticFormsCombinatorics2019} yields $\Dyn_I\in\{\DD_n, \EE_n\}$, contrary to our assumptions. We conclude that $\deg_{\ov \CH(I)}(v)\leq 2$ for every vertex $v\in V$ and, in view of \Cref{thm:digraphdegmax2}, statement \ref{thm:a:main:posit} follows.\medskip \ref{thm:a:main:princ} We need to show that $m=n-1$ if and only if $\ov \CH(I)$ is a cycle graph and $\CH(I)$ has at least two sinks. To prove ``$\Rightarrow$'' assume that $I$ is a connected principal poset of the Dynkin type $\Dyn_I=\AA_{m}$. By definition, there exists such a $k\in I$, that the poset $J\eqdef I^{(k)}$ is positive of the Dynkin type $\Dyn_J=\AA_{m}$. By~\ref{thm:a:main:posit} we know that $\ov\CH(J)\simeq P(1,n)$ thus, by \Cref{lemma:pathext}\ref{lemma:pathext:princ}, $\ov \CH(I)$ is a cycle graph, $\CH(I)$ has at least two sinks, and ``$\Rightarrow$'' follows. ``$\Leftarrow$'' By \Cref{thm:digraphdegmax2}\ref{thm:digraphdegmax2:cycle:multsink}, every poset $I$ with $\ov \CH(I)$ being a cycle graph and $\CH(I)$ having at least two sinks is principal of the Dynkin type $\Dyn_I=\AA_{n-1}$.\medskip \ref{thm:a:main:crkbiggeri} Our aim is to show that the assumption $\Dyn_I=\AA_{m}$ yields $m\in\{n, n-1\}$, i.e., $I$ is either positive or principal. The proof is divided into two parts. First, we show that $m\neq n-2$. Then, using this result, we show that $m> n-2$.\smallskip \textbf{Part $1^\circ$} [$m\neq n-2$] Assume, by contradiction, that $I$ is a connected non-negative poset of rank $n-2$ and Dynkin type $\Dyn_I=\AA_{n-2}$. Since there are no such posets of size $n\leq16$ (see \cite[Corollary 4.4(b)]{gasiorekAlgorithmicStudyNonnegative2015}), without loss of generality, we can assume that $n>16$. By \Cref{fact:specialzbasis}\ref{fact:specialzbasis:existance}, there exists such a basis $h^{k_1}, h^{k_2}$ of the free abelian group $\Ker q_I\subseteq \ZZ^n$, that $h^{k_1}_{k_1} = h^{k_2}_{k_2} = 1$ and $h^{k_1}_{k_2} = h^{k_2}_{k_1} = 0$ where \mbox{$1 \leq k_1<k_2 \leq n$}. Consider the posets $J_1\eqdef I^{(k_1)}$ and $J_2\eqdef I^{(k_2)}$. Since $\smash{J_1^{(k_2)}=J_2^{(k_1)}=I^{(k_1,k_2)}}$, the posets $J_1$ and $J_2$ are connected principal of Dynkin type $\AA_{m}$, see \Cref{fact:specialzbasis}\ref{fact:specialzbasis:subbigraph} and \Cref{df:Dynkin_type}. It follows that $\ov \CH(J_1)$ and $\ov \CH(J_2)$ are cycle graphs and the Hasse digraphs $\CH(J_1)$ and $\CH(J_2)$ have at least two sinks, see~\ref{thm:a:main:princ}. That is, one of the following conditions holds for the poset $I$: \begin{enumerate}[label=\normalfont{(\roman*)}, itemsep=2.5ex] \item\label{thm:a:main:crkbiggeri:prf:i} $\ov\CH(I)$ is a cycle graph and $\CH(I)$ has at least two sinks; \item\label{thm:a:main:crkbiggeri:prf:ii} $\CH(I)$ has at least two sinks and is of the shape $\smash{\CH(I)\colon \tikzsetnextfilename{scnd_prf_2}\begin{tikzpicture}[baseline=(n5.base),label distance=-2pt,xscale=0.55, yscale=0.51] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$ $] (n1) at (0 , 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$ $] (n2) at (1 , 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$ $] (n3) at (2 , 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label={[yshift=0.2ex]right:$\scriptscriptstyle k_1$}] (n4) at (3 , 3 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptscriptstyle k_2$] (n5) at (3 , 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$ $] (n6) at (4 , 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$ $] (n7) at (5 , 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$ $] (n8) at (6 , 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$ $] (n9) at (6 , 0.4 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$ $] (n10) at (5 , 0.4 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$ $] (n11) at (4 , 0.4 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$ $] (n12) at (2 , 0.4 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$ $] (n13) at (1 , 0.4 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$ $] (n14) at (0 , 0.4 ) {}; \foreach \x/\y in {1/2, 2/3, 6/7, 7/8, 8/9, 9/10, 10/11, 12/13, 13/14, 14/1} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {3/4, 3/5, 4/6, 5/6} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -1.50pt, shorten >= 2.50pt] (n11) to (n12); \end{tikzpicture}}$; \item\label{thm:a:main:crkbiggeri:prf:iii} $I$ contains an indefinite subposet $\CF_1$, $\CF_2$, $\CF_3$ or $\CF_4$. \end{enumerate} In the case~\ref{thm:a:main:crkbiggeri:prf:i}, the poset $I$ is principal, i.e., $m=n-1$, which contradicts the assumption. Now, we show that the same goes for~\ref{thm:a:main:crkbiggeri:prf:ii}. Without loss of generality, we may assume that the Hasse digraph $\CH(I)$ has the following form \begin{center} $\CH(I)\colon$ \tikzsetnextfilename{scnd_prf_3}\begin{tikzpicture}[baseline={([yshift=-7pt]current bounding box)},label distance=-2pt,xscale=0.64, yscale=0.64] \draw[draw, fill=cyan!10](0,1) circle (19pt); \draw[draw, fill=cyan!10](5,2) circle (17pt); \draw[draw, fill=cyan!10](9,1) circle (19pt); \draw[draw, fill=cyan!10](5,0) circle (17pt); \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=above left:$\scriptscriptstyle j$] (n1) at (1, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:$\scriptscriptstyle j\mpppss 1$] (n2) at (2, 3 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$\scriptscriptstyle j\mpppss 2$] (n3) at (2, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label={[xshift=-0.7ex]above right:$\scriptscriptstyle j\mpppss 3$}] (n4) at (3, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=left:{$\scriptscriptstyle 1=r_1$}] (n5) at (0, 1 ) {}; \node (n6) at (4, 2 ) {$\scriptscriptstyle $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle \phantom{\mpppss }r_2\phantom{\mpppss }$] (n7) at (5, 2 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle r_2\mpppss 1$] (n8) at (6, 2 ) {}; \node (n9) at (7, 2 ) {$\scriptscriptstyle $}; \node (n10) at (8, 2 ) {$\scriptscriptstyle $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle r_t$] (n11) at (9, 1 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below right:$\scriptscriptstyle r_t\mpppss 1$] (n12) at (8, 0 ) {}; \node (n13) at (7, 0 ) {$\scriptscriptstyle $}; \node (n14) at (6, 0 ) {$\scriptscriptstyle $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle r_s$] (n15) at (5, 0 ) {}; \node (n16) at (4, 0 ) {$\scriptscriptstyle $}; \node (n17) at (3, 0 ) {$\scriptscriptstyle $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n\mppmss 1$] (n18) at (2, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=below:$\scriptscriptstyle n\phantom{\mppmss }$] (n19) at (1, 0 ) {}; \foreach \x/\y in {1/2, 1/3, 2/4, 3/4} \draw [-stealth, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= 4.50pt, shorten >= 4.50pt] (n5) to (n1); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -2.50pt, shorten >= -2.50pt] (n9) to (n10); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= 2.50pt, shorten >= -2.50pt] (n4) to (n6); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -3.50pt, shorten >= -3.50pt] (n14) to (n13); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= .10pt, shorten >= -2.50pt] (n17) to (n16); \foreach \x/\y in {6/7, 10/11, 16/15} \draw [-, shorten <= -2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \foreach \x/\y in {5/19, 7/8, 8/9, 12/11, 13/12, 15/14, 19/18} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= -2.50pt] (n18) to (n17); \end{tikzpicture} \end{center} where: \begin{itemize} \item every $r_1,\ldots,r_s\in \{1,\ldots, n\}\setminus \{j+1,j+2\}$ is either a source or a sink, \item $1 = r_1 < r_2 < \cdots < r_t<\cdots < r_s$ where $s>2$ is an even number, \item subdigraphs $\{1,\ldots,j,j+1,j+3,\ldots r_2\}$, $\{1,\ldots, j,j+2, j+3,\ldots,r_2\}$, $\{r_s,\ldots,n, 1\}$ and $\{r_{t-1},\ldots,r_{t}\}$, where $3\leq t\leq s$, have exactly one sink. \end{itemize} Since the quadratic form $q_{I}\colon \ZZ^n\to \ZZ$ \eqref{eq:quadratic_form} is given by the formula: {\allowdisplaybreaks\begin{align*} q_{I}(x) =&\sum_{\mathclap{1\leq i\leq n}} x_i^2 \,\,+ \sum_{\substack{1\leq i < r_2 \\ i\neq j+1}} x_i \sum_{\mathclap{i<k\leq r_2}}x_k + x_{j+1} \sum_{\mathclap{j+3\leq k \leq r_2}}x_k + \sum_{{2\leq t < s}}\left(\sum_{r_t\leq i<k \leq r_{t+1}} x_i x_k\right) \\ &+\sum_{r_s\leq i<k \leq n} x_i x_k + x_1(x_{r_s}\!+\cdots+ x_n)\\ =& \sum_{i\not\in\{r_1,\ldots,r_s,j+1,j+2\}} \frac{1}{2}x_i^2 + \frac{1}{2}(x_{j+1}-x_{j+2})^2 + \frac{1}{2} \sum_{1\leq t < s} \left(\sum_{r_t\leq i\leq r_{t+1}} x_i\right)^2\\ & +\frac{1}{2}(x_{r_s} + \cdots + x_{n} + x_1)^2, \end{align*}}then $q_I(v)\geq 0$ for every $v\in\ZZ^n$ and $I$ is non-negative. Consider the non-zero vector $h=[h_1,\ldots,h_n]\in\ZZ^n$, where $h_{r_i}=1$ if $i$ is odd, $-1$ if $i$ is even, and $h_k=0$ for $k\neq r_i$. It is straightforward to check that \[ q_I(h) = \tfrac{1}{2}(h_{r_1}+h_{r_2})^2+\cdots+\tfrac{1}{2}(h_{r_{s-1}}+h_{r_s})^2 +\tfrac{1}{2}(h_{r_s}+h_1)^2=0, \] i.e., the poset $I$ is not positive and $h\in\Ker q_I$. Since the vector $h$ has the first coordinate equal $h_1=1$ and $I^{(1)}\simeq\CD_{n-r,s}^{[2]}$ is a positive poset of Dynkin type $\DD_{n-1}$ (see \Cref{lemma:pathext}\ref{lemma:pathext:posit:d} and \Cref{df:Dynkin_type}), it follows that $I$ is principal of Dynkin type $\DD_{n-1}$. This contradicts the assumption that $\Dyn_I=\AA_{n-2}$.\smallskip To finish this part of the proof, we note that every poset $I$ that is not described in~\ref{thm:a:main:crkbiggeri:prf:i} or~\ref{thm:a:main:crkbiggeri:prf:ii} contains (as a subposet) one of the posets $\CF_1$, $\CF_2$, $\CF_3$ or $\CF_4$ presented in \Cref{tbl:indefposets}, hence is indefinite. This follows by the standard case-by-case inspection, as described in the proof of \Cref{lemma:pathext}. Details are left to the reader.\medskip \textbf{Part $2^\circ$} [$m> n-2$] Let $I$ be a connected non-negative poset of rank $m$ and Dynkin type $\Dyn_I=\AA_{m}$. We show that the assumption $m\in\{1,\ldots,n-3\}$ yields a contradiction. It follows from \Cref{fact:specialzbasis}\ref{fact:specialzbasis:existance} that there exists such a basis $h^{k_1},\ldots, h^{k_r}$ of the free abelian group $\Ker q_I\subseteq \ZZ^n$, that $h^{k_i}_{k_i} = 1$ and $h^{k_i}_{k_j} = 0$, for $1 \leq i,j \leq r$ and $i \neq j$, where $r=n-m$ and $1 \leq k_1 < \ldots < k_r \leq n$. Moreover, by \Cref{fact:specialzbasis}\ref{fact:specialzbasis:subbigraph}, the poset $J\eqdef I^{(k_3,\ldots,k_r)}$ is connected non-negative of size $n'=m+2$ and rank $m=n'-2$. Since $J^{(k_1,k_2)} = I^{(k_1,\ldots,k_r)}\sim_\ZZ\AA_m$, it follows that $\Dyn_J=\AA_{n'-2}$ which yields a contradiction with \textbf{Part $1^\circ$}. \end{proof} \section{Enumeration of \texorpdfstring{$\AA_n$}{An} Dynkin type non-negative posets} We finish the manuscript by giving explicit formulae~\eqref{fact:digrphnum:path:eq} and~\eqref{fact:digrphnum:cycle:eq} for the number of all possible orientations of the path and cycle graphs, up to isomorphism of \textit{unlabeled} digraphs. We apply these results to devise the formula~\eqref{thm:typeanum:eq} for the number of non-negative Dynkin type $\AA_m$ posets of size~$n$. \begin{fact}\label{fact:digrphnum:path} There are exactly \begin{equation}\label{fact:digrphnum:path:eq} ONum(P_n)= \begin{cases} 2^{\frac{n - 3}{2}} + 2^{n - 2}, & \textnormal{if $n\geq 1$ is odd,}\\ 2^{n-2}, & \textnormal{if $n\geq 2$ is even},\\ \end{cases} \end{equation} digraphs $D$ with $\ov D\simeq P_n\eqdef 1 \,\rule[2.5pt]{22pt}{0.4pt}\,2\,\rule[2.5pt]{22pt}{0.4pt}\, \hdashrule[2.5pt]{12pt}{0.4pt}{1pt}\,\rule[2.5pt]{22pt}{.4pt}\,n$, up to the isomorphism. \end{fact} \begin{proof} Here we follow arguments given in the proof of~\cite[Proposition 6.7]{gasiorekAlgorithmicCoxeterSpectral2020}. To calculate the number of non-isomorphic orientations among all $2^{n-1}$ possible orientations of edges of $P_n$, we consider two cases. \begin{enumerate}[label=\normalfont{(\roman*)},wide] \item\label{fact:digrphnum:path:prf:i} First, assume that $|I|=n\geq 2$ is an even number. In this case, every digraph $I$ has exactly two representatives among $2^{n-1}$ edge orientations: one drawn ``from the left'' and the other ``from the right'' (i.e., symmetric along the path). Therefore, the number $ONum(P_n)$ of all such non-isomorphic digraphs equals $2^{n-2}$. \item\label{fact:digrphnum:path:prf:ii} Now we assume that $|I|=n\geq 1$ is an odd number. If $n=1$, then, up to isomorphism, there exists exactly $1=2^{-1}+2^{-1}$ digraph. Otherwise, $n\geq 3$ and among all $2^{n-1}$ edge orientations: \begin{itemize} \item digraphs that are ``symmetric'' along the path have exactly one representation, and \item the rest of digraphs have exactly two representations, analogously as in~\ref{fact:digrphnum:path:prf:i}. \end{itemize} It is straightforward to check that there are $2^\frac{n-1}{2}$ ``symmetric'' path digraphs, therefore in this case we obtain \[ ONum(P_n)=\frac{2^{n-1}-2^\frac{n-1}{2}}{2}+2^\frac{n-1}{2}=2^{\frac{n - 3}{2}} + 2^{n - 2}.\qedhere \] \end{enumerate} \end{proof} \begin{remark} The formula~\eqref{fact:digrphnum:path:eq} describes the number of various combinatorial objects. For example the number of linear oriented trees with $n$ arrows or unique symmetrical triangle quilt patterns along the diagonal of an $n\times n$ square, see~\cite[OEIS sequence A051437]{oeis_A051437}. \end{remark} By \Cref{thm:a:main}\ref{thm:a:main:posit}, the Hasse digraph $\CH(I)$ of every positive connected poset $I$ of Dynkin type $\Dyn_I=\AA_n$ is an oriented path graph, as suggested in~\cite[Conjecture 6.4]{gasiorekAlgorithmicCoxeterSpectral2020}. Hence, \Cref{fact:digrphnum:path} gives an exact formula for the number of all, up to the poset isomorphism, such connected posets $I$ (see also \cite[Proposition 6.7]{gasiorekAlgorithmicCoxeterSpectral2020}). \begin{corollary}\label{cor:posit:num:poset} Given $n\geq 1$, the total number of all finite non-isomorphic connected positive posets $I$ of Dynkin type $\AA_n$ and size $n$ equals $Nneg(n,\AA_n) \eqdef ONum(P_n)$. \end{corollary} Similarly, the description given in \Cref{thm:a:main}\ref{thm:a:main:princ} makes it possible to count all connected principal posets $I$ of Dynkin type $\AA_{n-1}$. First, we need to know the exact number of all, up to isomorphism, orientations of the cycle graph $C_n$. \begin{fact}\label{fact:digrphnum:cycle} Let $C_n \eqdef$\tikzsetnextfilename{cycle_cn}\begin{tikzpicture}[baseline=(n11.base),label distance=-2pt,xscale=0.65, yscale=0.74] \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label={[name=n11]left:$1$}] (n1) at (0, 0 ) {}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label={[yshift=-0.3ex]above:$\scriptscriptstyle 2$}] (n2) at (1, 0 ) {}; \node (n3) at (2, 0 ) {$ $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label=right:$n$] (n4) at (5, 0 ) {}; \node (n5) at (3, 0 ) {$ $}; \node[circle, fill=black, inner sep=0pt, minimum size=3.5pt, label={[yshift=-0.5ex]above:$\scriptscriptstyle n\mppmss 1$}] (n6) at (4, 0 ) {}; \draw[shorten <= 2.50pt, shorten >= 2.50pt] (n1) .. controls (0.2,0.6) and (4.8,0.6) .. (n4); \draw [line width=1.2pt, line cap=round, dash pattern=on 0pt off 5\pgflinewidth, -, shorten <= -2.50pt, shorten >= -2.50pt] (n3) to (n5); \foreach \x/\y in {1/2, 6/4} \draw [-, shorten <= 2.50pt, shorten >= 2.50pt] (n\x) to (n\y); \draw [-, shorten <= 2.50pt, shorten >= -2.50pt] (n2) to (n3); \draw [-, shorten <= -2.50pt, shorten >= 2.50pt] (n5) to (n6); \end{tikzpicture}be the cycle graph on $n\geq 3$ vertices. The number $ONum(C_n)$ of digraphs $\CD$ with $\ov \CD=C_ n$, up to the isomorphism, equals \begin{equation}\label{fact:digrphnum:cycle:eq} ONum(C_n)= \begin{cases} \frac{1}{2n} \sum_{d\mid n}\left(2^{\frac{n}{d}}\varphi(d)\right), & \textnormal{if $n\geq 3$ is odd,}\\[0.1cm] \frac{1}{2n} \sum_{d\mid n}\left(2^{\frac{n}{d}}\varphi(d)\right)+ 2^{\frac{n}{2}-2}, & \textnormal{if $n\geq 4$ is even},\\ \end{cases} \end{equation} where $\varphi$ is the Euler's totient function. \end{fact} \begin{proof} Assume that $\CD$ is such a digraph that $\ov \CD=C_n$. Without loss of generality, we may assume that $\CD$ is depicted in the circle layout (on the plane), and its arrows are labeled with two colors: \begin{itemize} \item \textit{black}: if the arrow is clockwise oriented, and \item \textit{white}: if the arrow is counterclockwise oriented. \end{itemize} That is, every $\CD$ can be viewed as a binary combinatorial necklace $\CN_2(n)$. For example, for $n=5$ there exist $32$ orientations of edges of the cycle $C_5$ that yield exactly $8$ different binary necklaces of length $5$ shown in \Cref{tab:cycle_nekl_5}. \begin{longtable}{|@{}c@{}c@{}|@{}c@{}c@{}|}\hline \ringv{a}{0}{0}{0}{0}{0} & \ringv{b}{1}{1}{1}{1}{1} & \ringv{c}{0}{0}{0}{0}{1} & \ringv{d}{0}{1}{1}{1}{1}\\\hline \ringv{e}{0}{0}{0}{1}{1} & \ringv{f}{0}{0}{1}{1}{1} & \ringv{g}{0}{0}{1}{0}{1} & \ringv{h}{0}{1}{0}{1}{1}\\\hline \caption{Binary combinatorial necklaces of length $5$}\label{tab:cycle_nekl_5} \end{longtable} Moreover, up to digraph isomorphism, every $\CD$ has in this case exactly two representations among the necklaces: ``clockwise'' and ''anticlockwise'' ones, as shown in the \Cref{tab:cycle_nekl_5} (isomorphic digraphs are gathered in boxes).\pagebreak On the other hand, if $|\CD|=n\geq 4$ is an even number, certain digraphs have exactly one representation among necklaces. As an illustration, let us consider the $n=6$ case. \begin{longtable}{@{}|@{}c@{}c@{}|@{}c@{}c@{}|@{}c@{}c@{}c@{}}\cline{1-6} \ringvi{a}{0}{0}{0}{0}{0}{0} & \ringvi{b}{1}{1}{1}{1}{1}{1} & \ringvi{c}{0}{0}{0}{0}{0}{1} & \ringvi{d}{0}{1}{1}{1}{1}{1} & \ringvi{e}{0}{0}{0}{0}{1}{1} & \multicolumn{1}{@{}c@{}|@{}}{\ringvi{f}{0}{0}{1}{1}{1}{1}} & \ringvi{i}{0}{0}{0}{1}{1}{1}\\\cline{1-6} \ringvi{g}{0}{0}{0}{1}{0}{1} & \ringvi{h}{0}{1}{0}{1}{1}{1} & \ringvi{j}{0}{0}{1}{0}{0}{1} & \ringvi{k}{0}{1}{1}{0}{1}{1} & \ringvi{l}{0}{0}{1}{0}{1}{1} & \ringvi{m}{0}{1}{1}{0}{1}{0} & \ringvi{n}{0}{1}{0}{1}{0}{1}\\\cline{1-4} \caption{Binary combinatorial necklaces of length $6$}\label{tab:cycle_nekl_6} \end{longtable} Every such a ``rotationally symmetric'' digraph is uniquely determined by a directed path graph of length $\frac{n}{2} + 1$ and, by \Cref{fact:digrphnum:path}, there are exactly $2^{\frac{n}{2}-1}$ such digraphs. Now we show that every isomorphism $f\colon\{1,\ldots,n\}\to\{1,\ldots,n\}$ of digraphs $\CD_1$ and $\CD_2$ with $\ov \CD_1=\ov \CD_2=C_n$ has a form of a ``clockwise'' or ``anticlockwise'' \textit{rotation}. Fix a vertex $v_1\in \CD_1$ and consider the sequence $v_1,v_2,\ldots,v_n$ of vertices, where $v_{i+1}$ is a ``clockwise'' neighbour of $v_i$ (i.e., we have either $v_i\to v_{i+1}$ \textit{black} arrow or $v_i\gets v_{i+1}$ \textit{white} arrow in digraph $\CD_1$). One of two possibilities holds: $f(v_2)$ is either a ``clockwise'' neighbor of $f(v_{1})$ or an ``anticlockwise'' one. If the first possibility holds, then $f(v_{i+1})$ is a ``clockwise'' neighbour of $f(v_i)$ for every $2\leq i < n$, hence isomorphism $f$ encode a ``clockwise'' rotation. On the other hand, the assumption that $f(v_2)$ is an ``anticlockwise'' neighbour of $f(v_{1})$ implies that $f(v_{i+1})$ is an ``anticlockwise'' neighbour of $f(v_i)$, i.e., $f$ encodes an ``anticlockwise'' rotation.\smallskip Summing up, if $|\CD|=n\geq 3$ is an odd number, the digraph $\CD$ has exactly two representatives among binary necklaces $\CN_2(n)$: one ``clockwise'' and the other ``anticlockwise''. Since $|\CN_2(n)|=\frac{1}{n} \sum_{d\mid n}\left(2^{\frac{n}{d}}\varphi(d)\right)$, see~\cite{riordanCombinatorialSignificanceTheorem1957}, the first part of the equality~\eqref{fact:digrphnum:cycle:eq} follows. Assume now that $|\CD|=n\geq 4$ is an even number. In the formula $|\CN_2(n)|/2$ we count only half (i.e., $2^{\frac{n}{2}-2}$) of ``rotationally symmetric'' digraphs. Hence, $ONum(C_n)=\frac{1}{2n} \sum_{d\mid n}\left(2^{\frac{n}{d}}\varphi(d)\right)+ 2^{\frac{n}{2}-2}$ in this case. \end{proof} In the proof of \Cref{fact:digrphnum:cycle} we show that the number of cyclic graphs with oriented edges, up to the symmetry of the dihedral group, coincides with the number of such digraphs, up to isomorphism of unlabeled digraphs. \begin{remark} The formula~\eqref{fact:digrphnum:cycle:eq} is described in~\cite[OEIS sequence A053656]{oeis_A053656} and, among others, counts the number of minimal fibrations of a bidirectional $n$-cycle over the $2$-bouquet (up to precompositions with automorphisms of the $n$-cycle), see~\cite{boldiFibrationsGraphs2002}. \end{remark} \begin{corollary}\label{cor:cycle_pos:dag_dyna:num} Let $n\geq 3$ be an integer. Then, up to isomorphism, there exists exactly: \begin{enumerate}[label=\normalfont{(\alph*)}] \item\label{cor:cycle_pos:dag_dyna:num:cycle} $ONum(C_n)-1$ directed acyclic graphs $\CD$ whose underlying graph is $\ov \CD=C_n$, \item\label{cor:cycle_pos:dag_dyna:num:poset} $Nneg(n,\AA_{n-1})=ONum(C_n)-\lceil\frac{n+1}{2}\rceil$ principal posets $I$ of Dynkin type $\AA_{n-1}$, \end{enumerate} where $ONum(C_n)$ is given by the formula \eqref{fact:digrphnum:cycle:eq}. \end{corollary} \begin{proof} Since, up to isomorphism, there exists exactly one cyclic orientation of the $C_n$ graph, \ref{cor:cycle_pos:dag_dyna:num:cycle} follows directly from \Cref{fact:digrphnum:cycle}.\pagebreak To prove~\ref{cor:cycle_pos:dag_dyna:num:poset}, we note that by \Cref{thm:a:main}\ref{thm:a:main:princ} it is sufficient to count all oriented cycles that have at least two sinks. Since, among all possible orientations of a cycle, there are: \begin{itemize} \item $\lfloor\frac{n}{2}\rfloor$ cycles with exactly one sink, \item $1$ oriented cycle \end{itemize} and $\lfloor\frac{n}{2}\rfloor+1=\lceil\frac{n+1}{2}\rceil$, the statement~\ref{cor:cycle_pos:dag_dyna:num:poset} follows from \Cref{fact:digrphnum:cycle}. \end{proof} \begin{center} \textbf{Proof of \Cref{thm:typeanum}} \end{center} Now, we can devise an exact formula for the total number of non-negative posets of size $n$ and Dynkin type $\AA_m$. \begin{proof}[Proof of \Cref{thm:typeanum}] We note that $Nneg(n,\AA)=Nneg(n,\AA_n)+Nneg(n,\AA_{n-1})$ by \Cref{thm:a:main}, hence by \Cref{cor:posit:num:poset} and \Cref{cor:cycle_pos:dag_dyna:num}\ref{cor:cycle_pos:dag_dyna:num:poset}, for $n\ge 3$ we have \begin{equation*} Nneg(n,\AA)= \begin{cases} \frac{1}{2n} \sum_{d\mid n}\left(2^{\frac{n}{d}}\varphi(d)\right)+ 2^{n - 2} + 2^{\frac{n - 3}{2}}-\lceil\frac{n+1}{2}\rceil, & \textnormal{if $n\geq 3$ is odd,}\\ \frac{1}{2n} \sum_{d\mid n}\left(2^{\frac{n}{d}}\varphi(d)\right) + 2^{n-2} + 2^{\frac{n}{2}-2}-\lceil\frac{n+1}{2}\rceil, & \textnormal{if $n\geq 4$ is even}.\\ \end{cases}\end{equation*} Since $\frac{n - 3}{2} = \lceil\frac{n}{2}-2\rceil$ for odd values of $n$ and $Nneg(1,\AA)=Nneg(2,\AA) = 1$, it follows that \begin{equation*} Nneg(n,\AA)=\frac{1}{2n} \sum_{d\mid n}\big(2^{\frac{n}{d}}\varphi(d)\big) + \big\lfloor 2^{n - 2} + 2^{\lceil\frac{n}{2}-2\rceil} - \tfrac{n+1}{2}\big\rfloor \end{equation*} for any $n\geq 1$. \end{proof} Summing up, we get the following asymptotic description of connected non-negative posets $I$ of Dynkin type $\AA_m$. \begin{corollary} Let $Nneg(n,\AA_{n-1})$ be the number of principal posets $I$ of Dynkin type $\AA_{n-1}$ and $Nneg(n,\AA)$ be the number of all non-negative posets $I$ of size $n$ and Dynkin type $\AA_{m}$. Then \begin{enumerate}[label=\normalfont{(\alph*)}] \item $\lim_{n\to \infty} \frac{Nneg(n+1,\AA)}{Nneg(n,\AA)}=2$ and $Nneg(n,\AA)\approx 2^{n-2}$, \item the number of connected non-negative posets of Dynkin type $\AA_{m}$ grows exponentially, \item $\lim_{n\to\infty}\frac{Nneg(n,\AA_{n-1})}{Nneg(n,\AA)}=0$, hence almost all such posets are positive. \end{enumerate} \end{corollary} \begin{proof} Apply \Cref{cor:cycle_pos:dag_dyna:num}\ref{cor:cycle_pos:dag_dyna:num:poset} and \Cref{thm:typeanum}. \end{proof} \begin{landscape} \begin{figure}[H] \centering \tikzsetnextfilename{plot} \begin{tikzpicture} \definecolor{chocolate2267451}{RGB}{226,74,51} \definecolor{dimgray85}{RGB}{85,85,85} \definecolor{gainsboro229}{RGB}{229,229,229} \definecolor{lightgray204}{RGB}{204,204,204} \definecolor{steelblue52138189}{RGB}{52,138,189} \begin{axis}[ axis background/.style={fill=gainsboro229}, axis line style={white}, height=10cm, legend cell align={left}, legend style={ fill opacity=0.8, draw opacity=1, text opacity=1, at={(0.03,0.97)}, anchor=north west, draw=lightgray204 }, log basis y={10}, tick align=outside, tick pos=left, width=15cm, x grid style={white}, xlabel={Poset size}, xmajorgrids, xmin=-3.9, xmax=103.9, xtick style={color=dimgray85}, y grid style={white}, ylabel={Number of non isomorphic non-negative posets}, ymajorgrids, ymin=0.0346740460021202, ymax=4.56988275953839e+30, ymode=log, ytick style={color=dimgray85}, ytick={1e-06,0.01,100,1000000,10000000000,100000000000000,1e+18,1e+22,1e+26,1e+30,1e+34,1e+38}, yticklabels={ \(\displaystyle {10^{-6}}\), \(\displaystyle {10^{-2}}\), \(\displaystyle {10^{2}}\), \(\displaystyle {10^{6}}\), \(\displaystyle {10^{10}}\), \(\displaystyle {10^{14}}\), \(\displaystyle {10^{18}}\), \(\displaystyle {10^{22}}\), \(\displaystyle {10^{26}}\), \(\displaystyle {10^{30}}\), \(\displaystyle {10^{34}}\), \(\displaystyle {10^{38}}\) } ] \addplot [thick, chocolate2267451] table { 1 1 2 1 3 3 4 4 5 10 6 16 7 36 8 64 9 136 10 256 11 528 12 1024 13 2080 14 4096 15 8256 16 16384 17 32896 18 65536 19 131328 20 262144 21 524800 22 1048576 23 2098176 24 4194304 25 8390656 26 16777216 27 33558528 28 67108864 29 134225920 30 268435456 31 536887296 32 1073741824 33 2147516416 34 4294967296 35 8590000128 36 17179869184 37 34359869440 38 68719476736 39 137439215616 40 274877906944 41 549756338176 42 1099511627776 43 2199024304128 44 4398046511104 45 8796095119360 46 17592186044416 47 35184376283136 48 70368744177664 49 140737496743936 50 281474976710656 51 562949970198528 52 1.12589990684262e+15 53 2.25179984723968e+15 54 4.5035996273705e+15 55 9.00719932184986e+15 56 1.8014398509482e+16 57 3.60287971531817e+16 58 7.20575940379279e+16 59 1.44115188344291e+17 60 2.88230376151712e+17 61 5.76460752840294e+17 62 1.15292150460685e+18 63 2.30584301028744e+18 64 4.61168601842739e+18 65 9.22337203900226e+18 66 1.84467440737096e+19 67 3.68934881517141e+19 68 7.37869762948382e+19 69 1.47573952598266e+20 70 2.95147905179353e+20 71 5.90295810375886e+20 72 1.18059162071741e+21 73 2.36118324146918e+21 74 4.72236648286965e+21 75 9.44473296580801e+21 76 1.88894659314786e+22 77 3.77789318630946e+22 78 7.55578637259143e+22 79 1.51115727452104e+23 80 3.02231454903657e+23 81 6.04462909807864e+23 82 1.20892581961463e+24 83 2.41785163923036e+24 84 4.83570327845852e+24 85 9.67140655691923e+24 86 1.93428131138341e+25 87 3.86856262276725e+25 88 7.73712524553363e+25 89 1.54742504910681e+26 90 3.09485009821345e+26 91 6.18970019642708e+26 92 1.23794003928538e+27 93 2.4758800785708e+27 94 4.95176015714152e+27 95 9.90352031428311e+27 96 1.98070406285661e+28 97 3.96140812571323e+28 98 7.92281625142643e+28 99 1.58456325028529e+29 }; \addlegendentry{positive} \addplot [thick, densely dashed, steelblue52138189] table { 1 0 2 0 3 0 4 1 5 1 6 5 7 6 8 17 9 25 10 56 11 88 12 185 13 309 14 615 15 1088 16 2113 17 3847 18 7419 19 13788 20 26489 21 49929 22 95873 23 182350 24 350637 25 671079 26 1292748 27 2485520 28 4797871 29 9256381 30 17904460 31 34636818 32 67126265 33 130150571 34 252679814 35 490853398 36 954506467 37 1857283137 38 3616952517 39 7048151652 40 13744170619 41 26817356755 42 52358246192 43 102280151400 44 199912301315 45 390937468385 46 764879842415 47 1497207322906 48 2932035377905 49 5744387279793 50 11259007792596 51 22076468764166 52 43303859993497 53 84973577874889 54 166800021000937 55 327534518354268 56 643371444844535 57 1.26416831646405e+15 58 2.48474476084341e+15 59 4.88526061274085e+15 60 9.60767948245851e+15 61 1.89003525345384e+16 62 3.71910168318295e+16 63 7.32013653718966e+16 64 1.44115189183153e+17 65 2.83796062672455e+17 66 5.58992246870488e+17 67 1.1012981536543e+18 68 2.17020518956359e+18 69 4.27750587216466e+18 70 8.43279729967401e+18 71 1.66280509960199e+19 72 3.27942117042521e+19 73 6.46899518201321e+19 74 1.27631526599333e+20 75 2.51859545753048e+20 76 4.97091208793648e+20 77 9.81270957479407e+20 78 1.93738112131825e+21 79 3.82571461903364e+21 80 7.55578637287318e+21 81 1.49250101186991e+22 82 2.948599560092e+22 83 5.82614852826327e+22 84 1.15135792345376e+23 85 2.27562507221577e+23 86 4.4983286311467e+23 87 8.89324740865934e+23 88 1.75843755580759e+24 89 3.47735966091399e+24 90 6.87744466270555e+24 91 1.36037366954437e+25 92 2.69117399844828e+25 93 5.32447328724895e+25 94 1.05356599088153e+26 95 2.08495164511222e+26 96 4.12646679761865e+26 97 8.16785180559426e+26 98 1.61690127580146e+27 99 3.20113787936422e+27 }; \addlegendentry{principal} \end{axis} \end{tikzpicture} \captionsetup{width=.9\linewidth} \caption{Logarithmic scale plot of the number of connected non-negative posets $I$ of Dynkin type $\Dyn_I=\AA_m$} \label{fig:coxgrow} \end{figure} {\newcommand{\msep}{\,\,\,\,} \begin{longtable}{lr@{\msep}r@{\msep}r@{\msep}r@{\msep}r@{\msep}r@{\msep}r@{\msep}r@{\msep}r@{\msep}r@{\msep}r@{\msep}r@{\msep}r@{\msep}r@{\msep}r@{\msep}r@{\msep}r@{\msep}r@{\msep}r@{\msep}r}\toprule $n$ & $1$ & $2$ & $3$ & $4$ & $5$ & $6$ & $7$ & $8$ & $9$ & $10$ & $11$ & $12$ & $13$ & $14$ & $15$ & $16$ & $17$ & $18$ & $19$ & $20$\\\midrule positive & $\num{1}$ & $\num{1}$ & $\num{3}$ & $\num{4}$ & $\num{10}$ & $\num{16}$ & $\num{36}$ & $\num{64}$ & $\num{136}$ & $\num{256}$ & $\num{528}$ & $\num{1024}$ & $\num{2080}$ & $\num{4096}$ & $\num{8256}$ & $\num{16384}$ & $\num{32896}$ & $\num{65536}$ & $\num{131328}$ & $\num{262144}$\\ principal & $\num{0}$ & $\num{0}$ & $\num{0}$ & $\num{1}$ & $\num{1}$ & $\num{5}$ & $\num{6}$ & $\num{17}$ & $\num{25}$ & $\num{56}$ & $\num{88}$ & $\num{185}$ & $\num{309}$ & $\num{615}$ & $\num{1088}$ & $\num{2113}$ & $\num{3847}$ & $\num{7419}$ & $\num{13788}$ & $\num{26489}$\\\midrule all & $\num{1}$ & $\num{1}$ & $\num{3}$ & $\num{5}$ & $\num{11}$ & $\num{21}$ & $\num{42}$ & $\num{81}$ & $\num{161}$ & $\num{312}$ & $\num{616}$ & $\num{1209}$ & $\num{2389}$ & $\num{4711}$ & $\num{9344}$ & $\num{18497}$ & $\num{36743}$ & $\num{72955}$ & $\num{145116}$ & $\num{288633}$\\\bottomrule \captionsetup{width=.9\linewidth} \caption{Number of connected non-negative posets $I$ of Dynkin type $\Dyn_I=\AA_m$ and size $1\leq |I|\leq 20$} \end{longtable}}\end{landscape} \section{Future work}\label{sec:conclusions} In the present work, we give a complete description of connected non-negative posets $I$ of Dynkin type $\Dyn_I=\AA_m$ and, in particular, we show that $m\in\{n,n-1\}$. Computer experiments suggest that there is an upper bound for the rank of $\EE_n$ type posets as well. \begin{conjecture}\label{conj:typeE} If $I$ is a Dynkin type $\EE_m$ non-negative connected poset, then $m\geq n-3$. \end{conjecture} The conjecture yields $|I|\leq 11$, and consequently, we get the following. \begin{conjecture} If $I$ is a non-negative connected poset of size $n>11$ and rank $m<n-1$, then $\Dyn_I=\DD_m$. \end{conjecture} In other words, checking the Dynkin type of a connected non-negative poset $I$ that has at least $n\geq 12$ elements is straightforward (compare with \cite{makurackiQuadraticAlgorithmCompute2020} and \cite{zajacPolynomialTimeInflation2020}). \begin{proposition} If \Cref{conj:typeE} holds, the Dynkin type of a connected non-negative poset $I$ of size $n\geq 12$ and rank $m$, encoded in the form of the adjacency list of the Hasse digraph~$\CH(I)$, can be calculated in $O(n)$. Moreover, assuming that this adjacency list is sorted by degrees of vertices, $\Dyn_I$ can be calculated in $O(1)$. \end{proposition} \begin{proof} First, we note that the assumptions yield $\Dyn_I\in\{\AA_m, \DD_m\}$. Moreover, $\Dyn_I=\AA_m$ if and only if one of the following conditions hold: \begin{enumerate}[label=\normalfont{(\roman*)}] \item $\deg_{\ov \CH(I)}(v)=2$ for all $v\in \CH(I)$, or \item $\deg_{\ov \CH(I)}(v)=2$ for all but two $v_i\in \CH(I)$ with $\deg_{\ov \CH(I)}(v_i)=1$, \end{enumerate} see \Cref{thm:a:main}. In the pessimistic case, to verify these conditions, one has to examine the degrees of all $n$ vertices, thus we have $O(n)$ complexity. In the case of the adjacency list sorted by degrees of vertices, this can be simplified to checking degrees of at most two vertices, which yields $O(1)$ complexity. \end{proof} Nevertheless, this description does not give any insights into the structure of $\DD_m$ type non-negative connected posets. \begin{open} Give a structural description of Hasse digraphs of $\DD_m$ type non-negative connected posets. \end{open} \begin{thebibliography}{99} \bibitem{ASS} I.~Assem, A.~Skowro{\'n}ski, and D.~Simson. \newblock {\em Elements of the {{Representation Theory}} of {{Associative Algebras}}: {{Techniques}} of {{Representation Theory}}}, volume~65 of {\em London {{Math}}. {{Soc}}. {{Student Texts}}}. \newblock {Cambridge University Press}, {Cambridge}, 2006. \bibitem{barotQuadraticFormsCombinatorics2019} M.~Barot, J.~A. Jim{\'e}nez~Gonz{\'a}lez, and J.-A. {de la Pe{\~n}a}. \newblock {\em Quadratic {{Forms}}: {{Combinatorics}} and {{Numerical Results}}}, volume~25 of {\em Algebra and {{Applications}}}. \newblock {Springer International Publishing}, {Cham}, 2019. \bibitem{boldiFibrationsGraphs2002} P.~Boldi and S.~Vigna. \newblock Fibrations of graphs. \newblock {\em Discrete Math.}, 243(1-3):21--66, 2002. \bibitem{brinkmannPosets16Points2002} G.~Brinkmann and B.~D. McKay. \newblock Posets on up to 16 points. \newblock {\em Order}, 19(2):147--179, 2002. \bibitem{diestelGraphTheory2017} R.~Diestel. \newblock {\em Graph {{Theory}}}, volume 173 of {\em Graduate {{Texts}} in {{Mathematics}}}. \newblock {Springer Berlin Heidelberg}, {Berlin, Heidelberg}, 2017. \bibitem{gasiorekCongruenceRationalMatrices2023} M.~G{\k{a}}siorek. \newblock Congruence of rational matrices defined by an integer matrix. \newblock {\em Appl. Math. Comput.}, 440:127639, 2023. \bibitem{gasiorekCoxeterTypeClassification2019} M.~G{\k{a}}siorek. \newblock A {{Coxeter}} type classification of one-peak principal posets. \newblock {\em Linear Algebra Appl.}, 582:197--217, 2019. \bibitem{gasiorekAlgorithmicCoxeterSpectral2020} M.~G{\k{a}}siorek. \newblock On algorithmic {{Coxeter}} spectral analysis of positive posets. \newblock {\em Appl. Math. Comput.}, 386:125507, 2020. \bibitem{gasiorekOnepeakPosetsPositive2012} M.~G{\k{a}}siorek and D.~Simson. \newblock One-peak posets with positive quadratic {{Tits}} form, their mesh translation quivers of roots, and programming in {{Maple}} and {{Python}}. \newblock {\em Linear Algebra Appl.}, 436(7):2240--2272, 2012. \bibitem{gasiorekAlgorithmicStudyNonnegative2015} M.~G{\k{a}}siorek and K.~Zaj{\k{a}}c. \newblock On algorithmic study of non-negative posets of corank at most two and their {{Coxeter}}-{{Dynkin}} types. \newblock {\em Fundam. Inform.}, 139(4):347--367, 2015. \bibitem{makurackiQuadraticAlgorithmCompute2020} B.~Makuracki and A.~Mr{\'o}z. \newblock Quadratic algorithm to compute the {{Dynkin}} type of a positive definite quasi-{{Cartan}} matrix. \newblock {\em Math. Comp.}, 90(327):389--412, 2020. \bibitem{oeis_A051437} {OEIS Foundation Inc.} \newblock The on-line encyclopedia of integer sequences, 2021. \newblock Available from: \url{https://oeis.org/A051437}. \bibitem{oeis_A053656} {OEIS Foundation Inc.} \newblock The on-line encyclopedia of integer sequences, 2021. \newblock Available from: \url{https://oeis.org/A053656}. \bibitem{riordanCombinatorialSignificanceTheorem1957} J.~Riordan. \newblock The combinatorial significance of a theorem of {{P\'olya}}. \newblock {\em J. Soc. Ind. Appl. Math.}, 5(4):225--237, 1957. \bibitem{Si92} D.~Simson. \newblock {\em Linear representations of partially ordered sets and vector space categories}, volume~4 of {\em Algebra, logic and applications}. \newblock {Gordon and Breach Science Publishers}, {Montreux}, 1992. \bibitem{simsonIncidenceCoalgebrasIntervally2009} D.~Simson. \newblock Incidence coalgebras of intervally finite posets, their integral quadratic forms and comodule categories. \newblock {\em Colloq. Math.}, 115(2):259--295, 2009. \bibitem{simsonCoxeterGramClassificationPositive2013} D.~Simson. \newblock A {{Coxeter}}-{{Gram}} classification of positive simply laced edge-bipartite graphs. \newblock {\em SIAM J. Discrete Math.}, 27(2):827--854, 2013. \bibitem{simsonSymbolicAlgorithmsComputing2016} D.~Simson. \newblock Symbolic algorithms computing {{Gram}} congruences in the Coxeter spectral classification of edge-bipartite graphs, {{I}}. {{A Gram}} classification. \newblock {\em Fundam. Inform.}, 145(1):19--48, 2016. \bibitem{SimZaj_intmms} D.~Simson and K.~Zaj\k{a}c. \newblock A framework for {C}oxeter spectral classification of finite posets and their mesh geometries of roots. \newblock {\em Int. J. Math. Math. Sci.}, Article ID 743734, 22~pages, 2013. \bibitem{zajacStructureLoopfreeNonnegative2019} K.~Zaj{\k{a}}c. \newblock On the structure of loop-free non-negative edge-bipartite graphs. \newblock {\em Linear Algebra Appl.}, 579:262--283, 2019. \bibitem{zajacPolynomialTimeInflation2020} K.~Zaj{\k{a}}c. \newblock On polynomial time inflation algorithm for loop-free non-negative edge-bipartite graphs. \newblock {\em Discrete Appl. Math.}, 283:28--43, 2020. \end{thebibliography} \end{document}
2205.15013v5
http://arxiv.org/abs/2205.15013v5
Computation of q-Binomial Coefficients with the $P(n,m)$ Integer Partition Function
\documentclass{article} \usepackage{amsmath} \usepackage{amsthm} \usepackage{alltt} \usepackage{hyperref} \usepackage{algorithm} \usepackage{algpseudocode} \usepackage{graphicx} \setlength{\oddsidemargin}{1.0cm} \setlength{\evensidemargin}{1.0cm} \setlength{\textwidth}{14.7cm} \numberwithin{equation}{section} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}{Lemma}[section] \newtheorem{definition}{Definition}[section] \newtheorem{corollary}{Corollary}[section] \newcommand{\gaussian}[2] {\genfrac{(}{)}{0pt}{}{#1}{#2}_{\textstyle q}} \newcommand{\dblsum}[3] { \mathop{\sum\sum}_{ \genfrac{}{}{0pt}{}{\scriptstyle #1=0\ #2=0} {\scriptstyle #3}}^{\infty\ \ \infty} } \title{Computation of q-Binomial Coefficients\\ with the $P(n,m)$ Integer Partition Function} \author{M.J. Kronenburg} \date{} \begin{document} \maketitle \begin{abstract} Using $P(n,m)$, the number of integer partitions of $n$ into exactly $m$ parts, which was the subject of an earlier paper, $P(n,m,p)$, the number of integer partitions of $n$ into exactly $m$ parts with each part at most $p$, can be computed in $O(n^2)$, and the q-binomial coefficient can be computed in $O(n^3)$. Using the definition of the q-binomial coefficient, some properties of the q-binomial coefficient and $P(n,m,p)$ are derived. The q-multinomial coefficient can be computed as a product of q-binomial coefficients. A formula for $Q(n,m,p)$, the number of integer partitions of $n$ into exactly $m$ distinct parts with each part at most $p$, is given. Some formulas for the number of integer partitions with each part between a minimum and a maximum are derived. A computer algebra program is listed implementing these algorithms using the computer algebra program of the earlier paper. \end{abstract} \noindent \textbf{Keywords}: q-binomial coefficient, integer partition function.\\ \textbf{MSC 2010}: 05A17 11B65 11P81 \section{Definitions and Basic Identities} Let the coefficient of a power series be defined as: \begin{equation} [q^n] \sum_{k=0}^{\infty} a_k q^k = a_n \end{equation} Let $P(n)$ be the number of integer partitions of $n$, and let $P(n,m)$ be the number of integer partitions of $n$ into exactly $m$ parts. Let $P(n,m,p)$ be the number of integer partitions of $n$ into exactly $m$ parts with each part at most $p$, and let $P^*(n,m,p)$ be the number of integer partitions of $n$ into at most $m$ parts with each part at most $p$, which is the number of Ferrer diagrams that fit in a $m$ by $p$ rectangle: \begin{equation}\label{pnmpsum} P^*(n,m,p) = \sum_{k=0}^m P(n,k,p) \end{equation} Let the following definition of the q-binomial coefficient, also called the Gaussian polynomial, be given. \begin{definition} The q-binomial coefficient is defined by \cite{A84,AAR}: \begin{equation}\label{gaussdef} \gaussian{m+p}{m} = \prod_{j=1}^m \frac{1-q^{p+j}}{1-q^j} \end{equation} \end{definition} The q-binomial coefficient is the generating function of $P^*(n,m,p)$ \cite{A84}: \begin{equation}\label{pnmpgen} P^*(n,m,p) = [q^n] \gaussian{m+p}{m} \end{equation} The q-binomial coefficient is a product of cyclotomic polynomials \cite{knuth}. \section{Properties of q-Binomial Coefficients and $P(n,m,p)$} Some identities of the q-binomial coefficient are proved from its definition, and from these some properties of $P^*(n,m,p)$ and $P(n,m,p)$ are derived. \begin{theorem} \begin{equation} \gaussian{m+p}{m} = \gaussian{m+p-1}{m-1} + q^m \gaussian{m+p-1}{m} \end{equation} \end{theorem} \begin{proof} \begin{equation} \prod_{j=1}^m\frac{1-q^{p+j}}{1-q^j} = \prod_{j=1}^{m-1}\frac{1-q^{p+j}}{1-q^j} + q^m \prod_{j=1}^m\frac{1-q^{p+j-1}}{1-q^j} \end{equation} \begin{equation} \prod_{j=1}^m(1-q^{p+j}) = (1-q^m) \prod_{j=1}^{m-1}(1-q^{p+j}) + q^m \prod_{j=0}^{m-1}(1-q^{p+j}) \end{equation} \begin{equation} 1 = \frac{1-q^m}{1-q^{m+p}} + q^m \frac{1-q^p}{1-q^{m+p}} \end{equation} \begin{equation} 1-q^{m+p} = 1-q^m+q^m(1-q^p) \end{equation} \end{proof} \begin{theorem} \begin{equation} P^*(n,m,p) = P^*(n,m-1,p) + P^*(n-m,m,p-1) \end{equation} \end{theorem} \begin{proof} Using the previous theorem: \begin{equation} \begin{split} P^*(n,m,p) & = [q^n]\gaussian{m+p}{m} = [q^n]\gaussian{m+p-1}{m-1} + [q^{n-m}]\gaussian{m+p-1}{m} \\ & = P^*(n,m-1,p) + P^*(n-m,m,p-1) \\ \end{split} \end{equation} \end{proof} From this theorem and identity (\ref{pnmpsum}) follows: \begin{equation}\label{pnmpdif} P^*(n,m,p) - P^*(n,m-1,p) = P(n,m,p) = P^*(n-m,m,p-1) \end{equation} or equivalently: \begin{equation}\label{pnmpdef} P^*(n,m,p) = P(n+m,m,p+1) \end{equation} From this theorem and this identity follows: \begin{equation} P(n,m,p) = P(n-1,m-1,p) + P(n-m,m,p-1) \end{equation} \begin{theorem} \begin{equation} \gaussian{m+p}{m} = \gaussian{m+p-1}{m} + q^p \gaussian{m+p-1}{m-1} \end{equation} \end{theorem} \begin{proof} \begin{equation} \prod_{j=1}^m\frac{1-q^{p+j}}{1-q^j} = \prod_{j=1}^m\frac{1-q^{p+j-1}}{1-q^j} + q^p \prod_{j=1}^{m-1}\frac{1-q^{p+j}}{1-q^j} \end{equation} \begin{equation} \prod_{j=1}^m(1-q^{p+j}) = \prod_{j=0}^{m-1}(1-q^{p+j}) + q^p(1-q^m) \prod_{j=1}^{m-1}(1-q^{p+j}) \end{equation} \begin{equation} 1 = \frac{1-q^p}{1-q^{m+p}} + q^p \frac{1-q^m}{1-q^{m+p}} \end{equation} \begin{equation} 1-q^{m+p} = 1-q^p+q^p(1-q^m) \end{equation} \end{proof} \begin{theorem} \begin{equation} P^*(n,m,p) = P^*(n,m,p-1) + P^*(n-p,m-1,p) \end{equation} \end{theorem} \begin{proof} Using the previous theorem: \begin{equation} \begin{split} P^*(n,m,p) & = [q^n]\gaussian{m+p}{m} = [q^n]\gaussian{m+p-1}{m} + [q^{n-p}]\gaussian{m+p-1}{m-1} \\ & = P^*(n,m,p-1) + P^*(n-p,m-1,p) \\ \end{split} \end{equation} \end{proof} Using (\ref{pnmpdef}): \begin{equation} P(n,m,p) = P(n,m,p-1) + P(n-p,m-1,p) \end{equation} The following theorem is a symmetry identity: \begin{theorem}\label{gaussym} \begin{equation} \gaussian{m+p}{m} = \gaussian{m+p}{p} \end{equation} \end{theorem} \begin{proof} \begin{equation} \prod_{j=1}^m \frac{1-q^{p+j}}{1-q^j} = \prod_{j=1}^p \frac{1-q^{m+j}}{1-q^j} \end{equation} Using cross multiplication: \begin{equation} \prod_{j=1}^p(1-q^j)\prod_{j=1}^m(1-q^{p+j}) = \prod_{j=1}^m(1-q^j)\prod_{j=1}^p(1-q^{m+j}) = \prod_{j=1}^{m+p}(1-q^j) \end{equation} \end{proof} From this theorem follows: \begin{equation} P^*(n,m,p) = P^*(n,p,m) \end{equation} and using (\ref{pnmpdef}): \begin{equation} P(n,m,p) = P(n-m+p-1,p-1,m+1) \end{equation} Using (\ref{pnmpsum}) and (\ref{pnmpdif}): \begin{equation} P^*(n,m,p) = \sum_{k=0}^m P^*(n-k,k,p-1) \end{equation} Combining this identity with (\ref{pnmpgen}) and using theorem \ref{gaussym}: \begin{equation} \gaussian{m+p}{p} = \sum_{k=0}^m q^k \gaussian{p+k-1}{p-1} \end{equation} which is identity (3.3.9) in \cite{A84}. Taking $m=n$ in (\ref{pnmpsum}) and (\ref{pnmpdef}) and conjugation of Ferrer diagrams: \begin{equation}\label{pnnp} P(2n,n,p+1) = P(n+p,p) \end{equation} and taking $p=n$: \begin{equation} P(n) = P(2n,n) = P(2n,n,n+1) \end{equation} The partitions of $P(n,m,p)-P(n,m,p-1)$ have at least one part equal to $p$, and therefore by conjugation of Ferrer diagrams: \begin{equation} P(n,m,p) - P(n,m,p-1) = P(n,p,m) - P(n,p,m-1) \end{equation} This identity can also be derived from the other identities.\\ Using (\ref{pnmpsum}) and (5.7) in \cite{AE04}: \begin{equation} \sum_{m=0}^n P(n,m,p) = P^*(n,n,p) = [q^n] \frac{1}{\prod_{j=1}^p(1-q^j)} \end{equation} \begin{theorem} \begin{equation} \sum_{m=0}^n (-1)^m P(n,m,p) = [q^n] \frac{1}{\prod_{j=1}^p(1+q^j)} \end{equation} \end{theorem} \begin{proof} Using (\ref{pnmpdif}) and (\ref{pnmpgen}): \begin{equation} \begin{split} & \sum_{m=0}^n (-1)^m P(n,m,p) = \sum_{m=0}^n (-1)^m P^*(n-m,m,p-1) \\ & = \sum_{m=0}^n (-1)^m [q^{n-m}] \gaussian{m+p-1}{m} = [q^n] \sum_{m=0}^n (-1)^m q^m \gaussian{m+p-1}{m} \\ \end{split} \end{equation} Using the negative q-binomial theorem \cite{wiki1}: \begin{equation} \sum_{m=0}^{\infty} \gaussian{m+p-1}{m} t^m = \frac{1}{\prod_{j=0}^{p-1} (1-q^jt)} \end{equation} Taking $t=-q$ and the sum up to $n$, because only the coefficient $[q^n]$ is needed, gives the theorem. \end{proof} \section{The q-Multinomial Coefficient} Let $(m_i)_{i=1}^s$ be a sequence of $s$ nonnegative integers, and let $n$ be given by: \begin{equation} n = \sum_{i=1}^s m_i \end{equation} The q-multinomial coefficient is a product of q-binomial coefficients \cite{CP,wiki2}: \begin{equation} \gaussian{n}{m_1\cdots m_s} = \prod_{i=1}^s \gaussian{\sum_{j=1}^i m_j}{m_i} = \prod_{i=1}^s \gaussian{n-\sum_{j=1}^{i-1}m_j}{m_i} \end{equation} \section{Computation of $P(n,m,p)$ with $P(n,m)$} Let the coefficient $a_k^{(m,p)}$ be defined as: \begin{equation} a_k^{(m,p)} = [q^k] \prod_{j=1}^m (1-q^{p+j}) \end{equation} These coefficients can be computed by multiplying out the product, which up to $k=n-m$ is $O(m(n-m))=O(n^2)$. Using (\ref{pnmpgen}) and (\ref{pnmpdif}): \begin{equation} \begin{split} P(n,m,p) & = P^*(n-m,m,p-1) = [q^{n-m}] \prod_{j=1}^m \frac{1-q^{p+j-1}}{1-q^j} = [q^{n-m}] \sum_{k=0}^{n-m} a_k^{(m,p-1)} \frac{q^k}{\prod_{j=1}^m(1-q^j)} \\ & = \sum_{k=0}^{n-m} a_k^{(m,p-1)} [q^{n-k}] \frac{q^m}{\prod_{j=1}^m(1-q^j)} = \sum_{k=0}^{n-m} a_k^{(m,p-1)} P(n-k,m) \\ \end{split} \end{equation} The list of the $n-m+1$ values of $P(m,m)$ to $P(n,m)$ can be computed using the algorithm in \cite{MK} which is also $O(n^2)$, and therefore this algorithm computes $P(n,m,p)$ in $O(n^2)$. For computing $P^*(n,m,p)$ (\ref{pnmpdef}) can be used. \section{Computation of q-Binomial Coefficients} From definition (\ref{gaussdef}) the q-binomial coefficients are: \begin{equation} \begin{split} [q^n] \prod_{j=1}^m \frac{1-q^{p+j}}{1-q^j} & = [q^n] \sum_{k=0}^n a_k^{(m,p)} \frac{q^k}{\prod_{j=1}^m(1-q^j)} = \sum_{k=0}^n a_k^{(m,p)} [q^{n+m-k}] \frac{q^m}{\prod_{j=1}^m(1-q^j)} \\ & = \sum_{k=0}^n a_k^{(m,p)} P(n+m-k,m) \\ \end{split} \end{equation} Because $P^*(n,m,p)=0$ when $n>mp$ and (\ref{pnmpgen}), the coefficients $[q^n]$ are nonzero if and only if $0\leq n\leq mp$. The product coefficients $a_k^{(m,p)}$ can therefore be computed in $O(m^2p)$, and the list of $mp+1$ values of $P(m,m)$ to $P(mp+m,m)$ can also be computed in $O(m^2p)$ \cite{MK}. The sums are convolutions which can be done with \texttt{ListConvolve}, and therefore this algorithm computes the q-binomial coefficients in $O(m^2p)$. Because of symmetry theorem \ref{gaussym}, $m$ and $p$ can be interchanged when $m>p$, which makes the algorithm $O(\textrm{min}(m^2p,p^2m))$. Using a change of variables: \begin{equation} \gaussian{n}{m} = \gaussian{m+n-m}{m} \end{equation} The algorithm for computing this q-binomial coefficient is $O(\textrm{min}(m^2(n-m),(n-m)^2m))$. From this follows that when $m$ or $n-m$ is constant, then the algorithm is $O(n)$, and when $m=cn$ for some constant $c$, then the algorithm is $O(n^3)$. Because $P^*(n,m,p)=P^*(mp-n,m,p)$ only $P^*(n,m,p)$ for $0\leq n\leq \lceil mp/2\rceil$ needs to be computed, which makes the algorithm about two times faster. For comparison of results with the computer algebra program below an alternative algorithm using cyclotomic polynomials is given. \begin{verbatim} QBinomialAlternative[n_,m_]:=Block[{result={1},temp}, Do[Which[Floor[n/k]-Floor[m/k]-Floor[(n-m)/k]==1, temp=CoefficientList[Cyclotomic[k,q],q]; result=ListConvolve[result,temp,{1,-1},0]],{k,n}]; result] \end{verbatim} Computations show that this alternative algorithm is $O(n^4)$. \section{A Formula for $Q(n,m,p)$} Let $Q(n,m,p)$ be the number of integer partitions of $n$ into exactly $m$ distinct parts with each part at most $p$. \begin{theorem}\label{qnmp} \begin{equation} Q(n,m,p) = P(n-m(m-1)/2,m,p-m+1) \end{equation} \end{theorem} \begin{proof} The proof is with Ferrer diagrams and the "staircase" argument. Let a normal partition be a partition into $m$ parts, and let a distinct partition be a partition into $m$ distinct parts. Let the parts of a Ferrer diagram with $m$ parts be indexed from small to large by $s=1\cdots m$. Each distinct partition of $n$ contains a "staircase" partition with parts $s-1$ and a total size of $m(m-1)/2$, and subtracting this from such a partition gives a normal partition of $n-m(m-1)/2$, and the largest part is decreased by $m-1$. Vice versa adding the "staircase" partition to a normal partition of $n$ gives a distinct partition of $n+m(m-1)/2$, and the largest part is increased by $m-1$. When the parts of the distinct partition are at most $p$, then the parts of the corresponding normal partition are at most $p-(m-1)$. Because of this $1-1$ correspondence between the Ferrer diagrams of these two types of partitions the identity is valid. \end{proof} \section{Partitions with Each Part Between $p_{\rm min}$ and $p_{\rm max}$} Let $P^\#(n,p_{\min},p_{\max})$ be the number of partitions of $n$ with each part between $p_{\min}$ and $p_{\max}$, and let $Q^\#(n,p_{\min},p_{\max})$ be the number of partitions of $n$ into distinct parts with each part between $p_{\min}$ and $p_{\max}$, and let $P(n,m,p_{\min},p_{\max})$ be the number of partitions of $n$ into exactly $m$ parts with each part between $p_{\min}$ and $p_{\max}$, and let $Q(n,m,p_{\min},p_{\max})$ be the number of partitions of $n$ into exactly $m$ distinct parts with each part between $p_{\min}$ and $p_{\max}$. These functions are related by: \begin{equation} P^\#(n,p_{\min},p_{\max}) = \sum_{m=0}^{\lfloor n/p_{\min}\rfloor} P(n,m,p_{\min},p_{\max}) \end{equation} \begin{equation} Q^\#(n,p_{\min},p_{\max}) = \sum_{m=0}^{\lfloor n/p_{\min}\rfloor} Q(n,m,p_{\min},p_{\max}) \end{equation} Because the Ferrer diagrams of the partitions in $P(n,m,p_{\min},p_{\max})$ and $Q(n,m,p_{\min},p_{\max})$ all have a block of $m(p_{\min}-1)$ filled, the following relations are given: \begin{equation}\label{pminmaxdef} P(n,m,p_{\min},p_{\max}) = P(n-m p_{\min}+m,m,p_{\max}-p_{\min}+1) \end{equation} \begin{equation}\label{qminmaxdef} Q(n,m,p_{\min},p_{\max}) = Q(n-m p_{\min}+m,m,p_{\max}-p_{\min}+1) \end{equation} \begin{theorem}\label{pnmpminpmax} \begin{equation} P(n,m,p_{\min},p_{\max}) = \sum_{l=0}^{\min(m,p_{\min})} (-1)^l \sum_{k=l(l+1)/2}^{n-m+l} P(k-l(l-1)/2,l,p_{\min}-l)P(n-k,m-l,p_{\max}) \end{equation} \end{theorem} \begin{proof} From (5.11) in \cite{AE04}: \begin{equation} \sum_{n=0}^{\infty}\sum_{m=0}^{\infty} P(n,m,p_{\min},p_{\max}) q^nz^m = \frac{1}{\prod_{j=p_{\min}}^{p_{\max}}(1-zq^j)} = \frac{\prod_{j=1}^{p_{\min}-1}(1-zq^j)}{\prod_{j=1}^{p_{\max}}(1-zq^j)} \end{equation} From (5.9) and (5.11) in \cite{AE04}: \begin{equation} \prod_{j=1}^{p_{\min}-1}(1-zq^j) = \sum_{n=0}^{\infty}\sum_{m=0}^{\infty} Q(n,m,p_{\min}-1) (-1)^m z^mq^n \end{equation} \begin{equation} \frac{1}{\prod_{j=1}^{p_{\max}}(1-zq^j)} = \sum_{n=0}^{\infty}\sum_{m=0}^{\infty} P(n,m,p_{\max}) z^mq^n \end{equation} \begin{equation} \begin{split} & \sum_{n=0}^{\infty}\sum_{m=0}^{\infty} P(n,m,p_{\min},p_{\max}) q^nz^m \\ & = ( \sum_{n=0}^{\infty}\sum_{m=0}^{\infty} Q(n,m,p_{\min}-1) (-1)^m q^nz^m ) ( \sum_{n=0}^{\infty}\sum_{m=0}^{\infty} P(n,m,p_{\max}) q^nz^m ) \\ & = \sum_{n_1=0}^{\infty}\sum_{n_2=0}^{\infty}\sum_{m_1=0}^{\infty}\sum_{m_2=0}^{\infty} Q(n_1,m_1,p_{\min}-1) P(n_2,m_2,p_{\max}) (-1)^{m_1} q^{n_1+n_2} z^{m_1+m_2} \\ \end{split} \end{equation} The coefficients on both sides must be equal, so $n_1+n_2=n$ and $m_1+m_2=m$, which is equivalent to $n_2=n-n_1$ and $m_2=m-m_1$: \begin{equation}\label{pnmpminpmaxeq} P(n,m,p_{\min},p_{\max}) = \sum_{l=0}^m (-1)^l \sum_{k=l}^{n-m+l} Q(k,l,p_{\min}-1) P(n-k,m-l,p_{\max}) \end{equation} Application of theorem \ref{qnmp} to this identity gives this theorem. \end{proof} The following is a special case of $P(n,m,p_{\min},p_{\max})$: \begin{equation} P(n,m,p,p) = \begin{cases} 1 & \textrm{\rm if $mp=n$} \\ 0 & \textrm{\rm otherwise} \\ \end{cases} \end{equation} \begin{theorem}\label{qnmpminpmax} Let $P(n,m,p_{\min},p_{\max})$ be the formula of the previous theorem: \begin{equation} Q(n,m,p_{\min},p_{\max}) = (-1)^m P(n,m,p_{\max}+1,p_{\min}-1) \end{equation} \end{theorem} \begin{proof} \begin{equation} \sum_{n=0}^{\infty}\sum_{m=0}^{\infty} Q(n,m,p_{\min},p_{\max}) q^nz^m = \prod_{j=p_{\min}}^{p_{\max}} (1+zq^j) = \frac{\prod_{j=1}^{p_{\max}}(1+zq^j)}{\prod_{j=1}^{p_{\min}-1}(1+zq^j)} \end{equation} \begin{equation} \prod_{j=1}^{p_{\max}}(1+zq^j) = \sum_{n=0}^{\infty}\sum_{m=0}^{\infty} Q(n,m,p_{\max}) z^mq^n \end{equation} \begin{equation} \frac{1}{\prod_{j=1}^{p_{\min}-1}(1+zq^j)} = \sum_{n=0}^{\infty}\sum_{m=0}^{\infty} P(n,m,p_{\min}-1) (-1)^m z^mq^n \end{equation} \begin{equation} \begin{split} & \sum_{n=0}^{\infty}\sum_{m=0}^{\infty} Q(n,m,p_{\min},p_{\max}) q^nz^m \\ & = ( \sum_{n=0}^{\infty}\sum_{m=0}^{\infty} Q(n,m,p_{\max}) q^nz^m ) ( \sum_{n=0}^{\infty}\sum_{m=0}^{\infty} P(n,m,p_{\min}-1) (-1)^m q^nz^m ) \\ & = \sum_{n_1=0}^{\infty}\sum_{n_2=0}^{\infty}\sum_{m_1=0}^{\infty}\sum_{m_2=0}^{\infty} Q(n_1,m_1,p_{\max}) P(n_2,m_2,p_{\min}-1) (-1)^{m_2} q^{n_1+n_2} z^{m_1+m_2} \\ \end{split} \end{equation} The coefficients on both sides must be equal, so $n_1+n_2=n$ and $m_1+m_2=m$, which is equivalent to $n_2=n-n_1$ and $m_2=m-m_1$: \begin{equation}\label{qnmpminpmaxeq} Q(n,m,p_{\min},p_{\max}) = (-1)^m \sum_{l=0}^m (-1)^l \sum_{k=l}^{n-m+l} Q(k,l,p_{\max}) P(n-k,m-l,p_{\min}-1) \end{equation} Comparing this with (\ref{pnmpminpmaxeq}) in theorem \ref{pnmpminpmax} gives this theorem. \end{proof} From the generating functions in \cite{AE04} follows: \begin{equation} \begin{split} & P^\#(n,p_{\min},p_{\max}) = [q^n] \frac{1}{\prod_{j=p_{\min}}^{p_{\max}}(1-q^j)} = [q^n] \frac{\prod_{j=1}^{p_{\min}-1}(1-q^j)}{\prod_{j=1}^{p_{\max}}(1-q^j)} \\ & = [q^n] \sum_{k=0}^n q^k a_k^{(p_{\min}-1,0)} \frac{1}{\prod_{j=1}^{p_{\max}}(1-q^j)} = \sum_{k=0}^n a_k^{(p_{\min}-1,0)} [q^{p_{\max}+n-k}] \frac{q^{p_{\max}}}{\prod_{j=1}^{p_{\max}}(1-q^j)} \\ & = \sum_{k=0}^n a_k^{(p_{\min}-1,0)} P(p_{\max}+n-k,p_{\max}) \\ \end{split} \end{equation} \begin{equation} Q^\#(n,p_{\min},p_{\max}) = [q^n] \prod_{j=p_{\rm min}}^{p_{\max}} (1+q^j) = [q^n] \prod_{j=1}^{p_{\max}-p_{\min}+1} (1+q^{p_{\min}-1+j}) \end{equation} The following are special cases of $P^\#(n,p_{\min},p_{\max})$: \begin{equation} P^\#(n,1,m) = P(n+m,m) \end{equation} \begin{equation} P^\#(n,p,p) = \begin{cases} 1 & \textrm{\rm if $p$ divides $n$} \\ 0 & \textrm{\rm otherwise} \\ \end{cases} \end{equation} The following is lemma (5) in \cite{A83}: \begin{lemma}\label{mylemma} \begin{equation} \sum_{k=1}^m q^k \prod_{j=1}^{k-1} (1-q^j) = 1 - \prod_{j=1}^m (1-q^j) \end{equation} \end{lemma} \begin{proof} The lemma is true for $m=0$, and using induction on $m$, when it is true for $m$, then for $m+1$: \begin{equation} \begin{split} & \sum_{k=1}^{m+1} q^k \prod_{j=1}^{k-1} (1-q^j) = q^{m+1} \prod_{j=1}^m (1-q^j) + \sum_{k=1}^m q^k \prod_{j=1}^{k-1} (1-q^j) \\ & = q^{m+1} \prod_{j=1}^m (1-q^j) + 1 - \prod_{j=1}^m (1-q^j) = 1 - (1-q^{m+1}) \prod_{j=1}^m (1-q^j) = 1 - \prod_{j=1}^{m+1} (1-q^j) \\ \end{split} \end{equation} \end{proof} Dividing this lemma by $\prod_{j=1}^m(1-q^j)$ and taking the coefficient $[q^n]$: \begin{equation} [q^n] \sum_{k=1}^m q^k \frac{1}{\prod_{j=k}^m(1-q^j)} = \sum_{k=1}^m [q^{n-k}] \frac{1}{\prod_{j=k}^m(1-q^j)} = [q^n] \frac{1}{\prod_{j=1}^m(1-q^j)} - [q^n] 1 \end{equation} and using $P(0,p,p)=1$ gives the following identity:\\ For integer $m\leq n$: \begin{equation} \sum_{k=1}^{\min(m,\lfloor n/2\rfloor)} P^\#(n-k,k,m) = P^\#(n,1,m) - \delta_{n,m} \end{equation} Taking $m=n$ and using $P^\#(n,1,n)=P(n)$: \begin{equation} \sum_{k=1}^{\lfloor n/2\rfloor} P^\#(n-k,k,n) = P(n) - 1 \end{equation} The following was proved as lemma 1.1 in \cite{MK}: \begin{equation} \sum_{k=0}^m q^k \prod_{j=k+1}^m (1-q^j) = 1 \end{equation} Dividing this lemma by $\prod_{j=1}^m(1-q^j)$ and taking the coefficient $[q^n]$:\\ For integer $m\leq n$: \begin{equation} \sum_{k=1}^m P^\#(n-k,1,k) = P^\#(n,1,m) \end{equation} Taking $m=n$ and using $P^\#(n,1,n)=P(n)$: \begin{equation} \sum_{k=1}^n P^\#(n-k,1,k) = P(n) \end{equation} \section{The q-Binomial Coefficient for Negative Arguments} From another paper \cite{MK2} the following was proved:\\ For integer $n\geq 0$ and integer $k$: \begin{equation} \gaussian{n}{k} = 0 \textrm{~if~} k<0 \textrm{~or~} k>n \end{equation} and from that paper theorem 2.4 gives:\\ For negative integer $n$ and integer $k$: \begin{equation} \gaussian{n}{k} = \begin{cases} \displaystyle (-1)^k q^{nk-k(k-1)/2} \gaussian{-n+k-1}{k} & \text{if $k\geq 0$} \\ \displaystyle (-1)^{n-k} q^{(n-k)(n+k+1)/2} \gaussian{-k-1}{n-k} & \text{if $k\leq n$} \\ 0 & \text{otherwise} \\ \end{cases} \end{equation} \section{Computer Algebra Program} The following Mathematica\textsuperscript{\textregistered} functions are listed in the computer algebra program below.\\ \texttt{PartitionsPList[n,pmin,pmax]}\\ Gives a list of the $n$ numbers $P(1,p_{\min},p_{\max})..P(n,p_{\min},p_{\max})$, where $P(n,p_{\min},p_{\max})$ is the number of partitions of $n$ with each part between $p_{\min}$ and $p_{\max}$. This algorithm is $O(n^2)$.\\ \texttt{PartitionsQList[n,pmin,pmax]}\\ Gives a list of the $n$ numbers $Q(1,p_{\min},p_{\max})..Q(n,p_{\min},p_{\max})$, where $Q(n,p_{\min},p_{\max})$ is the number of partitions of $n$ into distinct parts with each part between $p_{\min}$ and $p_{\max}$. This algorithm is $O(n^2)$.\\ \texttt{PartitionsInPartsP[n,m,p]}\\ Gives the number of partitions of $n$ into exactly $m$ parts with each part at most $p$. This algorithm is $O(n^2)$.\\ \texttt{PartitionsInPartsQ[n,m,p]}\\ Gives the number of partitions of $n$ into exactly $m$ distinct parts with each part at most $p$, using the formula $Q(n,m,p)=P(n-m(m-1)/2,m,p-m+1)$. This algorithm is $O(n^2)$.\\ \texttt{PartitionsInPartsPList[n,m,p]}\\ Gives a list of $n-m+1$ numbers of $P(m,m,p)$..$P(n,m,p)$. This algorithm is $O(n^2)$.\\ \texttt{PartitionsInPartsQList[n,m,p]}\\ Gives a list of the $n-m(m+1)/2+1$ numbers $Q(m(m+1)/2,m,p)..Q(n,m,p)$, using the formula $Q(n,m,p)=P(n-m(m-1)/2,m,p-m+1)$. This algorithm is $O(n^2)$.\\ \texttt{QBinomialCoefficients[n,m,q]}\\ Gives the q-binomial coefficient $\binom{n}{m}_q$ as a polynomial in $q$, where $n$ and $m$ are integers and $q$ is a symbol. This algorithm is $O(n^3)$.\\ \texttt{QMultinomialCoefficients[mlist,q]}\\ Gives the q-multinomial coefficient $\binom{n}{m_1\cdots m_s}_q$ as a polynomial in $q$, where $s$ is the length of the list \texttt{mlist} containing the integers $m_1\cdots m_s$, and where $n=\sum_{i=1}^sm_i$, and where $q$ is a symbol.\\ Below is the listing of a Mathematica\textsuperscript{\textregistered} program that can be copied into a notebook, using the package taken from at least version 3 of the earlier paper \cite{MK}. The notebook must be saved in the directory of the package file. \begin{verbatim} SetDirectory[NotebookDirectory[]]; << "PartitionsInParts.m" partprod[n_,m_,p_,s_]:=Block[{prod=ConstantArray[0,n+1]},prod[[1]]=1; Do[prod[[Range[p+k+1,n+1]]]+=s prod[[Range[1,n-p-k+1]]],{k,Min[m,n-p]}]; prod] PartitionsPList[n_Integer?Positive,pmin_Integer?Positive, pmax_Integer?Positive]:=If[pmax<pmin,{}, Block[{prods,parts},prods=partprod[n,pmin-1,0,-1]; parts=PartitionsInPartsPList[n+pmax,pmax]; ListConvolve[prods,parts,{1,1},0][[Range[2,n+1]]]]] PartitionsQList[n_Integer?Positive,pmin_Integer?Positive, pmax_Integer?Positive]:=If[pmax<pmin,{}, partprod[n,pmax-pmin+1,pmin-1,1][[Range[2,n+1]]]] PartitionsInPartsP[n_Integer?NonNegative,m_Integer?NonNegative, p_Integer?NonNegative]:=If[n<m,0, Block[{prods,parts,result},prods=partprod[n-m,m,p-1,-1]; parts=PartitionsInPartsPList[n,m];result=0; Do[result+=prods[[k+1]]parts[[n-m-k+1]],{k,0,n-m}];result]] PartitionsInPartsQ[n_Integer?NonNegative,m_Integer?NonNegative, p_Integer?NonNegative]:=If[n-m(m-1)/2<m||p<m,0, PartitionsInPartsP[n-m(m-1)/2,m,p-m+1]] PartitionsInPartsPList[n_Integer?NonNegative,m_Integer?NonNegative, p_Integer?NonNegative]:=If[n<m,{}, Block[{prods,parts},prods=partprod[n-m,m,p-1,-1]; parts=PartitionsInPartsPList[n,m];ListConvolve[prods,parts,{1,1},0]]] PartitionsInPartsQList[n_Integer?NonNegative,m_Integer?NonNegative, p_Integer?NonNegative]:=If[n-m(m-1)/2<m||p<m,{}, PartitionsInPartsPList[n-m(m-1)/2,m,p-m+1]] QBinomialCompute[N_Integer,M_Integer,q_Symbol,doq_?BooleanQ]:= Block[{m=M,p=N-M,result,prods,parts,ceil},Which[m>p,m=p;p=M]; ceil=Ceiling[(m p+1)/2];prods=partprod[ceil-1,m,p,-1]; parts=PartitionsInPartsPList[ceil+m-1,m]; result=PadRight[ListConvolve[prods,parts,{1,1},0],m p+1]; result[[Range[ceil+1,m p+1]]]=result[[Range[m p-ceil+1,1,-1]]]; If[doq,q^Range[0,Length[result]-1].result,result]] QBinomialCoefficients[n_Integer,k_Integer,q_Symbol]:= If[(n>=0&&(k<0||k>n))||(n<0&&n<k<0),0,If[n>=0,QBinomialCompute[n,k,q,True], If[k>=0,(-1)^k q^(n k-k(k-1)/2)QBinomialCompute[-n+k-1,k,q,True], (-1)^(n-k)q^((n-k)(n+k+1)/2)QBinomialCompute[-k-1,n-k,q,True]]]] MListQ[alist_List]:=(alist!={}&&VectorQ[alist,IntegerQ]) QMultinomialCoefficients[mlist_List?MListQ,q_Symbol]:= If[!VectorQ[mlist,NonNegative],0, Block[{length=Length[mlist],bprod={1},msum=mlist[[1]],qbin}, Do[msum+=mlist[[k]];qbin=QBinomialCompute[msum,mlist[[k]],q,False]; bprod=ListConvolve[bprod,qbin,{1,-1},0],{k,2,length}]; q^Range[0,Length[bprod]-1].bprod]] \end{verbatim} \pdfbookmark[0]{References}{} \begin{thebibliography}{99} \bibitem{A83} G.E. Andrews, Euler's Pentagonal Number Theorem, \textit{Math. Mag.} 56~(1983)~279-284. \bibitem{A84} G.E. Andrews, \textit{The Theory of Partitions}, Cambridge University Press, 1984. \bibitem{AE04} G.E. Andrews, K. Eriksson, \textit{Integer Partitions}, Cambridge University Press, 2004. \bibitem{AAR} G.E. Andrews, R. Askey, R. Roy, \textit{Special Functions}, Cambridge University Press, 1999. \bibitem{CP} M. B\'{o}na, \textit{Combinatorics of Permutations}, 2nd ed., Taylor \& Francis, 2012. \bibitem{knuth} D.E. Knuth, H.S. Wilf, The Power of a Prime That Divides a Generalized Binomial Coefficient, Chapter 36 in D.E. Knuth, \textit{Selected Papers on Discrete Mathematics}, CSLI Publications, 2001. \bibitem{MK} M.J. Kronenburg, Computation of P(n,m), the Number of Integer Partitions of n into Exactly m Parts, \href{https://arxiv.org/abs/2205.04988}{{\tt arXiv:2205.04988}}{\tt~[math.NT]} \bibitem{MK2} M.J. Kronenburg, The q-Binomial Coefficient for Negative Arguments and Some q-Binomial Summation Identities, \href{https://arxiv.org/abs/2211.08256}{{\tt arXiv:2211.08256}}{\tt~[math.CO]} \bibitem{WPPP} E.W. Weisstein, \textit{q-Binomial Coefficient}. From Mathworld - A Wolfram Web Resource. \href{https://mathworld.wolfram.com/q-BinomialCoefficient.html} {{\tt https://mathworld.wolfram.com/q-BinomialCoefficient.html}} \bibitem{wiki1} Wikipedia, \textit{Gaussian binomial coefficient},\\ \href{https://en.wikipedia.org/wiki/Gaussian\_binomial\_coefficient} {{\tt https://en.wikipedia.org/wiki/Gaussian\_binomial\_coefficient}} \bibitem{wiki2} Wikipedia, \textit{Multinomial Theorem},\\ \href{https://en.wikipedia.org/wiki/Multinomial\_theorem} {{\tt https://en.wikipedia.org/wiki/Multinomial\_theorem}} \bibitem{W03} S. Wolfram, \textit{The Mathematica Book}, 5th ed., Wolfram Media, 2003. \end{thebibliography} \end{document}
2205.14924v2
http://arxiv.org/abs/2205.14924v2
Uniform approximation problems of expanding Markov maps
\documentclass[12pt]{amsart} \usepackage{amssymb,mathrsfs,bm,amsmath,amsthm,color,mathtools} \usepackage{graphicx,caption} \usepackage{float} \usepackage{enumerate} \usepackage[centering]{geometry} \geometry{a4paper,text={6in,9in}} \geometry{papersize={22cm,27cm},text={17cm,21cm}} \parskip1ex \linespread{1.1} \theoremstyle{plain} \newtheorem{thm}{Theorem}[section] \newtheorem{defn}{Definition}[section] \newtheorem{conj}{Conjecture} \newtheorem{prob}{Problem} \newtheorem{prop}[thm]{Proposition} \newtheorem{cor}[thm]{Corollary} \newtheorem{lem}[thm]{Lemma} \newtheorem{exmp}{Example} \theoremstyle{remark} \newtheorem{rem}{Remark} \numberwithin{equation}{section} \DeclareMathOperator{\hdim}{\dim_H} \renewcommand{\Im}{\operatorname{Im}} \renewcommand{\Re}{\operatorname{Re}} \newcommand{\dist}{\mathrm{dist}} \newcommand{\Q}{\mathbb Q} \newcommand{\N}{\mathbb N} \newcommand{\uk}{\mathcal U^\kappa(x)} \newcommand{\bk}{\mathcal B^\kappa(x)} \newcommand{\ud}{\underline d} \newcommand{\od}{\overline d} \newcommand{\ur}{\underline{R}} \newcommand{\ovr}{\overline{R}} \newcommand{\mi}{\mathcal M_{\mathrm{inv}}} \newcommand{\R}{\mathbb R} \newcommand{\Z}{\mathbb Z} \newcommand{\E}{\mathbb E} \newcommand{\uph}{\mu_\phi} \begin{document} \title{Uniform approximation problems of expanding Markov maps} \author{Yubin He} \address{Department of Mathematics, South China University of Technology, Guangzhou, Guangdong 510641, P.~R.\ China} \email{[email protected]} \author{Lingmin Liao} \address{School of Mathematics and Statistics, Wuhan University, Wuhan, Hubei 430072, P.~R.\ China} \email{[email protected]} \subjclass[2010]{28A80} \keywords{Uniform Diophantine approximation, Markov map, Hausdorff dimension, Gibbs measure, multifractal spectrum} \begin{abstract} Let $ T:[0,1]\to[0,1] $ be an expanding Markov map with a finite partition. Let $ \uph $ be the invariant Gibbs measure associated with a H\"older continuous potential $ \phi $. In this paper, we investigate the size of the uniform approximation set \[\uk:=\{y\in[0,1]:\forall N\gg1,~\exists n\le N, \text{ such that }|T^nx-y|<N^{-\kappa}\},\] where $ \kappa>0 $ and $ x\in[0,1] $. The critical value of $ \kappa $ such that $ \hdim\uk=1 $ for $ \uph $-a.e.\,$ x $ is proven to be $ 1/\alpha_{\max} $, where $ \alpha_{\max}=-\int \phi\,d\mu_{\max}/\int\log|T'|\,d\mu_{\max} $ and $ \mu_{\max} $ is the Gibbs measure associated with the potential $ -\log|T'| $. Moreover, when $ \kappa>1/\alpha_{\max} $, we show that for $ \uph $-a.e.\,$ x $, the Hausdorff dimension of $ \uk $ agrees with the multifractal spectrum of $ \uph $. \end{abstract} \maketitle \section{Introduction and motivation} Denote by $ \|\cdot\| $ the distance to the nearest integer. The famous Dirichlet Theorem asserts that for any real numbers $ x\in[0,1] $ and $ N\ge 1 $, there exists a positive integer $ n $ such that \begin{equation}\label{eq:Dirichlet's theorem} \|nx\|<\frac{1}{N}\quad\text{and}\quad n\le N. \end{equation} As a corollary, for any $ x\in[0,1] $, there exist infinitely many positive integers $ n $ such that \begin{equation}\label{eq:Dirichlet's corollary} \|nx\|\le\frac{1}{n}. \end{equation} Let $ (\{nx\})_{n\ge 0} $ be the orbit of $0$ of the irrational rotation by an irrational number $ x $, where $ \{nx\} $ is the fractional part of $ nx $. From the dynamical perspective, Dirichlet Theorem and its corollary describe the rate at which $ 0 $ is approximated by the orbit $ (\{nx\})_{n\ge 0} $ in a uniform and asymptotic way, respectively. In general, one can study the Hausdorff dimension of the set of points which are approximated by the orbit $ (\{nx\})_{n\ge 0} $ with a faster speed. For the asymptotic approximation, Bugeaud~\cite{Bug03} and, independently, Schmeling and Troubetzkoy~\cite{ScTr03} proved that \[\hdim \{y\in[0,1]:\|nx-y\|<n^{-\kappa}\text{ for infinitely many }n\}=\frac{1}{\kappa},\] where $ \hdim $ stands for the Hausdorff dimension. The corresponding uniform approximation problem was recently studied by Kim and Liao~\cite{KimLi19} who obtained the Hausdorff dimension of the set \[\{y\in[0,1]:\forall N\gg1,~\exists n\le N, \text{ such that }\|nx-y\|<N^{-\kappa}\}.\] Naturally, one wonders about the analog results when the orbit $ (\{nx\})_{n\ge 0} $ is replaced by an orbit $ (T^nx)_{n\ge 0} $ of a general dynamical system $ ([0,1], T) $. For any $ \kappa>0 $, Fan, Schmeling and Troubetzkoy~\cite{FaScTr13} considered the set \[\mathcal L^{\kappa}(x):=\{y\in[0,1]:|T^nx-y|<n^{-\kappa}\text{ for infinitely many }n\}\] of points that are asymptotically approximated by the orbit $ (T^nx)_{n\ge 0} $ with a given speed $ n^{-\kappa} $, where $ T $ is the doubling map. It seems difficult to investigate the size of $ \mathcal L^{\kappa}(x) $ when $ x $ is not a dyadic rational, as the distribution of $ (T^nx)_{n\ge 0} $ is not as well-studied as that of $ (\{nx\})_{n\ge 0} $, see for example~\cite{AlBe98} for more details about the distribution of $ (\{nx\})_{n\ge 0} $. However, from the viewpoint of ergodic theory, Fan, Schmeling and Troubetzkoy~\cite{FaScTr13} obtained the Hausdorff dimension of $ \mathcal L^{\kappa}(x) $ for $ \uph $-a.e.\,$ x $, where $ \uph $ is the Gibbs measure associated with a H\"older continuous potential $ \phi $. They found that the size of $ \mathcal L^{\kappa}(x) $ is closely related to the local dimension of $ \uph $ and to the first hitting time for shrinking targets. In their paper~\cite{LiaoSe13}, Liao and Seuret extended the results of~\cite{FaScTr13} to Markov maps. Later, Persson and Rams~\cite{PerRams17} considered more general piecewise expanding interval maps, and proved some similar results to those of~\cite{FaScTr13,LiaoSe13}. These studies are also closed related to the metric theory of random covering set; see~\cite{BaFa05, Dvo56,Fan02,JoSt08,Seu18,She72,Tan15} and references therein. As a counterpart of the dynamically defined asymptotic approximation set $ \mathcal L^{\kappa}(x) $, we would like to study the corresponding uniform approximation set $ \uk $ defined as \[\begin{split} \uk:&=\{y\in[0,1]:\forall N\gg1,~\exists n\le N, \text{ such that }|T^nx-y|<N^{-\kappa}\}\\ &=\bigcup_{i=1}^\infty\bigcap_{N= i}^\infty\bigcup_{n=1}^N B(T^nx, N^{-\kappa}), \end{split}\] where $ B(x,r) $ is the open ball of center $ x $ and radius $ r $, and $ T $ is a Markov map (see Definition~\ref{d:Markov map} below). As the studies on $ \mathcal L^\kappa(x) $, we are interested in the sizes (Lebesgue measure and Hausdorff dimension) of $ \uk $. By a simple argument, one can check that $ \uk\setminus\{T^nx\}_{n\ge 0}\subset \mathcal L^\kappa(x) $. Thus, trivially one has $ \lambda(\uk) \le \lambda(\mathcal L^\kappa(x))$ and $ \hdim \uk\le\hdim\mathcal L^\kappa(x) $. Here, $ \lambda $ denotes the Lebesgue measure on $ [0,1] $. Our first result asserts that for any $ \kappa>0 $, the Lebesgue measure and the Hausdorff dimension of $ \uk $ are constant almost surely with respect to a $ T $-invariant ergodic measure. \begin{thm}\label{t:sub} Let $ T $ be a Markov map on $ [0,1] $ and $ \nu $ be a $ T $-invariant ergodic measure. Then for any $ \kappa>0 $, both $ \lambda(\uk) $ and $ \hdim\uk $ are constants almost surely. \end{thm} To further describe the size of $\uk$ for all most all points, we impose a stronger condition, the same as that of Fan, Schmeling and Troubetzkoy~\cite{FaScTr13} and Liao and Seuret~\cite{LiaoSe13}, that $ \nu $ is a Gibbs measure. Precisely, let $\phi$ be a H\"older continuous potential and $ \uph $ be the associated Gibbs meaure. Let $ \bk:=[0,1]\setminus \uk $. We remark that the sets $\uk$ are decreasing (the sets $\bk$ are increasing) with respect to $\kappa$. Then, we want to ask, for $\mu_\phi$ almost all points $x$, how the size of $\uk$ (and $\bk$) changes with respect to $\kappa$. We thus would like to answer the following questions. \begin{enumerate}[(Q1)] \item When $\uk=[0,1]$ for $\mu_\phi$-a.e.\,$x$? What is the critical value \[ \kappa_{\phi}:=\sup\{\kappa\geq 0: \uk=[0,1] \ \text{for $\mu_\phi$-a.e.} \,x\}? \] \item When $\lambda(\uk)=1$ for $\mu_\phi$-a.e.\,$x$? What is the critical value \[ \kappa_{\phi}^\lambda:=\sup\{\kappa \geq 0: \lambda(\uk)=1 \ \text{for $\mu_\phi$-a.e.} \,x\}? \] \item What are the Hausdorff dimensions of $\uk$ and $\bk$ for $ \uph $-a.e.\,$ x $? What are the critical value \[ \kappa_{\phi}^H:=\sup\{\kappa \geq 0: \hdim(\uk)=0 \ \text{for $\mu_\phi$-a.e.} \,x\}? \] \end{enumerate} In this paper, we answer these questions when $ T $ is an expanding Markov map of the interval $ [0,1] $ with a finite partition---a Markov map, for short. \begin{defn}[Markov map]\label{d:Markov map} A transformation $ T:[0,1]\to[0,1] $ is an expanding Markov map with a finite partition provided that there is a partition of $ [0,1] $ into subintervals $ I(i)=(a_i, a_{i+1}) $ for $ i=0,\dots,Q-1 $ with endpoints $ 0=a_0<a_1<\cdots<a_Q=1 $ satisfying the following properties. \begin{enumerate}[\upshape(1)] \item There is an integer $ n_0 $ and a real number $ \rho $ such that $ |(T^{n_0})'|\ge\rho>1 $. \item $ T $ is strictly monotonic and can be extended to a $ C^2 $ function on each $ \overline{I(i)} $. \item If $ I(j)\cap T\big(I(k)\big)\ne\emptyset $, then $ I(j)\subset T\big(I(k)\big) $. \item There is an integer $ R $ such that $ I(j)\subset \cup_{n=1}^R T^n\big(I(k)\big) $ for every $ k $, $ j $. \item For every $ k\in\{0,1,\dots,Q-1\} $, $ \sup_{(x,y,z)\in I(k)^3}\big(|T''(x)|/|T'(y)||T'(z)|\big)<\infty $. \end{enumerate} \end{defn} For a probability measure $ \nu $ on $[0,1]$ and a point $ y\in[0,1] $, we set \[\ud_\nu(y):=\liminf_{r\to 0}\frac{\log\nu\big(B(y,r)\big)}{\log r}\quad\text{and}\quad\od_\nu(y):=\limsup_{r\to 0}\frac{\log\nu\big(B(y,r)\big)}{\log r},\] which are called, respectively, the lower and upper local dimensions of $ \nu $ at $y$. When $ \ud_\nu(y)=\od_\nu(y) $, their common value is denoted by $ d_\nu(y) $, and is simply called the local dimension of $ \nu $ at $ y $. Let $ D_\nu $ be the multifractal spectrum of $ \nu $ defined by \[D_\nu(s):= \hdim\{y\in[0,1]:d_\nu(y)=s\} \quad \forall s\in\mathbb{R}.\] Let $ T $ be a Markov map as in Definition \ref{d:Markov map} and let $ \phi $ be a H\"older continuous potential. Denote by $ \mi $ the set of $ T $-invariant probability measures on $ [0,1] $ and $ \mu_{\max} $ the Gibbs measure associated with the potential $ -\log|T'| $. Define \begin{align} \alpha_-:&=\min_{\nu\in\mi}\frac{-\int\phi \,d\nu}{\int\log|T'|\,d\nu},\label{eq:alpha-}\\ \alpha_{\max}:&=\frac{-\int\phi \, d\mu_{\max}}{\int\log|T'|\, d\mu_{\max}},\label{eq:alphamax}\\ \alpha_+:&=\max_{\nu\in\mi}\frac{-\int\phi \, d\nu}{\int\log|T'|\, d\nu}.\label{eq:alpha+} \end{align} By definition, it holds that $ \alpha_-\le \alpha_{\max}\le\alpha_+ $. Indeed, the quantities $ \alpha_- $, $ \alpha_{\max} $ and $ \alpha_+ $ depend on $ T $ and $ \phi $. However, for simplicity, we leave out the dependence unless the context requires specification. The following main theorem tells us that the three critical values demanded in questions (Q1)-(Q3) are $ \alpha_+ $, $ \alpha_{\max} $ and $ \alpha_-$, correspondingly. \begin{thm}\label{t:main} Let $ T $ be a Markov map. Let $ \phi $ be a H\"older continuous potential and $ \uph $ be the associated Gibbs measure. \begin{enumerate}[\upshape(1)] \item The critical value $ \kappa_{\phi} $ is $ \alpha_+ $. Namely, for $ \uph $-a.e.\,$ x $, \ $ \uk=[0,1] $ if $ 1/\kappa>\alpha_+ $, and $ \uk\ne[0,1] $ if $ 1/\kappa<\alpha_+ $. \item The critical value $ \kappa_{\phi}^\lambda $ is $ \alpha_{\max} $. Moreover, for $ \mu_\phi $-a.e.\,$ x $, \[\lambda\big(\uk\big)=1-\lambda\big(\bk\big)=\begin{cases} 0\quad&\text{if }1/\kappa\in(0,\alpha_{\max}),\\ 1\quad&\text{if }1/\kappa\in(\alpha_{\max},+\infty). \end{cases}\] \item The critical value $ \kappa_{\phi}^H $ is $ \alpha_{-} $. Moreover, for $ \mu_\phi $-a.e.\,$ x $, \[\hdim\uk=\begin{cases} D_{\mu_\phi}(1/\kappa) &\text{if }1/\kappa\in(0,\alpha_{\max}]\setminus\{\alpha_-\},\\ 1&\text{if }1/\kappa\in(\alpha_{\max},+\infty). \end{cases}\] \item For $ \mu_\phi $-a.e.\,$ x $, \[\hdim\bk=\begin{cases} 1&\text{if }1/\kappa\in(0,\alpha_{\max}),\\ D_{\mu_\phi}(1/\kappa) &\text{if }1/\kappa\in[\alpha_{\max},+\infty)\setminus \{\alpha_+\}. \end{cases}\] \end{enumerate} \end{thm} \begin{rem} It is worth noting that the multifractal spectrum $ D_{\uph}(s) $ vanishes if $ s\notin[\alpha_-,\alpha_+] $. So if $ 1/\kappa<\alpha_- $, then $ \hdim\uk=0 $ for $ \uph $-a.e.\,$ x $. \end{rem} \begin{rem} The cases $ 1/\kappa=\alpha_- $ and $ \alpha_+ $ are not covered by Theorem \ref{t:main}. However, if the multifractal spectrum $ D_{\uph} $ is continuous at $ \alpha_- $ (respectively $ \alpha_+ $), we get that $ \hdim\mathcal U^{\alpha_-}(x)=0 $ (respectively $ \hdim\mathcal B^{\alpha_+}(x)=0 $) for $ \uph $-a.e.\,$ x $. The situation becomes more subtle if $ D_{\uph}(\cdot) $ is discontinuous at $ \alpha_- $ (respectively $ \alpha_+ $). Our methods do not work for obtaining the value of $ \hdim\mathcal U^{\alpha_-}(x) $ (respectively $ \hdim\mathcal B^{\alpha_+}(x)$) for $ \uph $-a.e.\,$ x $. \end{rem} Let $ \hdim \nu $ be the dimension of the Borel probability measure $ \nu $ defined by \[\hdim \nu=\inf\{\hdim E:E\text{ is Borel set of }[0,1]\text{ and }\nu(E)>0\}.\] \begin{rem} As already discussed above, $ \uk\setminus\{T^nx\}_{n\ge 0}\subset \mathcal L^\kappa(x) $, one may wonder whether the sets $ \uk $ and $ \mathcal L^\kappa(x) $ are essentially different. More precisely, is it possible that $ \hdim\uk $ is strictly less than $ \hdim\mathcal L^\kappa(x) $? Theorem~\ref{t:main} affirmatively answers this question. Compared with the asymptotic approximation set $ \mathcal L^{\kappa}(x) $, the structure of the uniform approximation set $ \uk $ does have a notable feature. When $ 1/\kappa\in(0,\hdim\uph)\setminus\{\alpha_-\} $, the map $ 1/\kappa\mapsto\hdim\uk $ agrees with the multifractal spectrum $ D_{\uph}(1/\kappa) $, while the map $ 1/\kappa\mapsto\hdim\mathcal L^{\kappa}(x) $ is the linear function $ f(1/\kappa)=1/\kappa $ independent of the multifractal spectrum. Therefore, $ \hdim\uk<\hdim\mathcal L^\kappa(x) $. See Figure 1 for an illustration. \end{rem} \begin{figure}[H] \centering \includegraphics[height=10cm, width=15cm]{Graph} \caption*{\footnotesize Figure 1: The multifractal spectrum of $ \uph $ and the maps $ 1/\kappa\mapsto\hdim\bk $, $ 1/\kappa\mapsto\hdim\uk $ and $ 1/\kappa\mapsto\hdim\mathcal L^\kappa(x) $.} \end{figure} \begin{rem} For the asymptotic approximation set $ \mathcal L^\kappa(x) $, the most difficult part lies in establishing the lower bound for $ \hdim\mathcal L^\kappa(x)$ when $ 1/\kappa<\hdim\uph $, for which a multifractal mass transference principle for Gibbs measure is applied, see \cite[\S 8]{FaScTr13}, \cite[\S 5.2]{LiaoSe13} and~\cite[\S 6]{PerRams17}. Specifically, since $ \uph(\mathcal L^{\delta}(x))=1 $ for all $ 1/\delta>\hdim\uph $, the multifractal mass transference principle guarantees the lower bound $ \hdim \mathcal L^{\kappa}(x)\ge (\hdim\uph)\delta/\kappa $ for all $ 1/\kappa<\hdim\uph $. By letting $ 1/\delta $ monotonically decrease to $ \hdim\uph $ along a sequence $ (\delta_n) $, we get immediately the expected lower bound $ \hdim \mathcal L^{\kappa}(x)\ge 1/\kappa $ for all $ 1/\kappa<\hdim\uph $. However, recent progresses in uniform approximation~\cite{BuLi16,KimLi19,KoLiPe21,ZhWu20} indicate that there is no mass transference principle for uniform approximation set. Therefore, we can not expect that $ \hdim\uk $ decreases linearly with respect to $ 1/\kappa $ as $ \hdim\mathcal L^\kappa(x) $ does. The main new ingredient of this paper is the difficult upper bound for $ \hdim\uk $ when $ 1/\kappa<\hdim\uph $. To overcome the difficulty, we fully develop and combine the methods in~\cite{FaScTr13} and~\cite{KoLiPe21}. \end{rem} To illustrate our main theorem, let us give some examples. \begin{exmp}\label{ex:example 1} Suppose that $ T $ is the doubling map and $ \uph:=\lambda $ is the Lebesgue measure. Applying Theorem~\ref{t:main}, we have that for $ \lambda $-a.e.\,$ x $, \[\hdim\uk=\begin{cases} 0 &\text{if }1/\kappa\in(0,1),\\ 1&\text{if }1/\kappa\in(1,+\infty). \end{cases}\] The Lebesgue measure is monofractal and hence the corresponding multifractal spectrum $ D_\lambda $ is discontinuous at $ 1 $. Theorem~\ref{t:main} fails to provide any metric statement for the set $ \mathcal U^1(x) $ for $ \lambda $-a.e.\,$ x $. However, we can conclude that $ \mathcal U^1(x) $ is a Lebesgue null set for $ \lambda $-a.e.\,$ x $ from Fubini Theorem and a zero-one law established in~\cite[Theorem 2.1]{HKKP21}. Further, by Theorem~\ref{t:sub}, $ \hdim\mathcal U^1(x) $ is constant for $ \lambda $-a.e.\,$ x $. \end{exmp} Some sets similar to $ \mathcal U^1(x) $ in Example~\ref{ex:example 1} have recently been studied by Koivusalo, Liao and Persson~\cite{KoLiPe21}. In their paper, instead of the orbit $ (T^nx)_{n\ge 0} $, they investigated the sets of points uniformly approximated by an independent and identically distributed sequence $ (\omega_n)_{n\ge 1} $. Specifically, they showed that with probability one, the lower bound of the Hausdorff dimension of the set \[\begin{split} \{y\in[0,1]:\forall N\gg1,~\exists n\le N, \text{ such that }|\omega_n-y|<1/N\} \end{split}\] is larger than $ 0.2177444298485995 $ (\cite[Theorem 5]{KoLiPe21}). \begin{exmp} Let $ p\in(1/2,1) $. Suppose that $ T $ is the doubling map on $ [0,1] $ and $ \mu_p $ is the $ (p,1-p) $ Bernoulli measure. It is known that the multifractal spectrum $ D_{\mu_p} $ is continuous on $ (0,+\infty) $ and attains its unique maximal value $ 1 $ at $ -\log_2 \big(p(1-p)\big)/2 $. Theorem~\ref{t:main} then gives that for $ \mu_p $-a.e.\,$ x $, \[\hdim\uk=\begin{dcases} D_{\mu_p}(1/\kappa) &\text{if }1/\kappa\in\bigg(0,\frac{-\log_2 \big(p(1-p)\big)}{2}\bigg),\\ 1&\text{if }1/\kappa\in\bigg[\frac{-\log_2 \big(p(1-p)\big)}{2},+\infty\bigg). \end{dcases}\] \end{exmp} Our paper is organized as follows. We start in Section 2 with some preparations on Markov map, and then apply ergodic theory to give a proof of Theorem~\ref{t:sub}. Section 3 contains some recalls on multifractal analysis and a variational principle which are essential in the proof of Theorem~\ref{t:main}. Section 4 describes some relations among hitting time, approximating rate and local dimension of $ \uph $. From these relations we then derive items (1), (2) and (4) of Theorem~\ref{t:main} in Section 5.1, as well as the lower bound of $ \hdim\uk $ in Section 5.2. In the same Section 5.2, we establish the upper bound of $ \hdim\uk $, which is arguably the most substantial part. \section{Basic definitions and the proof of Theorem~\ref{t:sub}} \subsection{Covering of $ [0,1] $ by basic intervals} Let $ T $ be a Markov map as defined in Definition~\ref{d:Markov map}. For each $ (i_1i_2\cdots i_n)\in\{0,1,\dots,Q-1\}^n $, we call \[I(i_1i_2\cdots i_n):=I(i_1)\cap T^{-1}\big(I(i_2)\big)\cap\dots\cap T^{-n+1}\big(I(i_n)\big)\] a basic interval of generation $ n $. It is possible that $ I(i_1i_2\cdots i_n) $ is empty for some $ (i_1i_2\cdots i_n)\in\{0,1,\dots,Q-1\}^n $. The collection of non-empty basic intervals of a given generation $ n $ will be denoted by $ \Sigma_n $. For any $ x\in[0,1] $, we denote $ I_n(x) $ the unique basic interval $ I\in\Sigma_n $ containing $ x $. By the definition of Markov map, we obtain the following bounded distortion property on basic intervals: there is a constant $ L>1 $ such that for any $ x\in[0,1] $, \begin{equation}\label{eq:bdp} \text{for any }n\ge 1,\quad L^{-1}|(T^n)'(x)|^{-1}\le |I_n(x)|\le L|(T^n)'(x)|^{-1}, \end{equation} where $ |I| $ is the length of the interval $ I $. Consequently, we can find two constants $ 1<L_1<L_2 $ such that \begin{equation}\label{eq:length of basic interval} \text{for every } I\in \Sigma_n,\quad L_2^{-n}\le |I|\le L_1^{-n}. \end{equation} \subsection{Proof of Theorem~\ref{t:sub}} Let us start with a simple but crucial observation. \begin{lem}\label{l:invariant} Let $ T $ be a Markov map. For any $ \kappa>0 $, we have $ \uk\setminus\{Tx\}\subset \mathcal U^{\kappa}(Tx) $. \end{lem} \begin{proof} Let $ y\in\uk\setminus\{Tx\} $ and let $ i $ be the smallest integer satisfying $ y\notin B(Tx, 2^{-i\kappa}) $. By the definition of $ \uk $, for any integer $ N> 2^i $ large enough, there exists $ 1\le n\le N $ such that $ y\in B(T^nx, N^{-\kappa}) $. Moreover, the condition $ N> 2^i $ implies that $ n\ne1 $, hence $ y\in B\big(T^{n-1}(Tx),N^{-\kappa}\big) $. This gives that $ y\in\mathcal U^\kappa(Tx) $. Therefore, $ \uk\setminus\{Tx\}\subset \mathcal U^{\kappa}(Tx) $. \end{proof} Recall that a $ T $-invariant measure $ \nu $ is ergodic if and only if any $ T $-invariant function is constant almost surely. The proof of Theorem~\ref{t:sub} falls naturally into two parts. We first deal with the Lebesgue measure part. \begin{proof}[Proof of Theorem~\ref{t:sub}: Lebesgue measure part] For any $ \kappa>0 $, define the function $ g_\kappa:x\mapsto\lambda(\uk) $. We claim that $ g_\kappa $ is measurable. In fact, it suffices to observe that \[g_\kappa(x)=\lim_{i\to \infty}\lim_{m\to\infty}\lambda\bigg(\bigcap_{N=i}^m\bigcup_{n=1}^NB(T^nx,N^{-\kappa})\bigg)\] and that, by the piecewise continuity of $ T^n $ ($ n\ge 1 $), the set \[\bigg\{x\in[0,1]:\lambda\bigg(\bigcap_{N=i}^m\bigcup_{n=1}^NB(T^nx,N^{-\kappa})\bigg)>t\bigg\}\] is measurable for any $ t\in\R $. By Lemma~\ref{l:invariant}, we see that $ \lambda(\mathcal U^\kappa(Tx))\ge \lambda(\uk) $, or equivalently, $ g_\kappa(Tx)\ge g_\kappa(x) $. Since $ g_\kappa $ is measurable, by the fact $ 0\le g_\kappa\le 1 $ and the invariance of $ \nu $, we have that $ g_\kappa $ is invariant with repect to $ \nu $, that is, $ g_\kappa(Tx)=g_\kappa(x) $ for $ \nu $-a.e.\,$ x $. In the presence of ergodicity of $ \nu $, $ g_\kappa $ is constant almost surely. \end{proof} \begin{proof}[Proof of Theorem~\ref{t:sub}: Hausdorff dimension part] Fix $ \kappa>0 $, define the function $ f_\kappa:x\mapsto\hdim\uk $. Again by Lemma~\ref{l:invariant}, we see that $ f_\kappa(Tx)\ge f_\kappa(x) $. Proceed in the same way as in the Lebesgue measure part yields that $ f_\kappa $ is constant almost surely provided that $ f_\kappa $ is measurable. To show that $ f_\kappa $ is measurable, it suffices to prove that for any $ t>0 $, the set \[\begin{split} A(t):=\{x\in[0,1]:f_\kappa(x)<t\}=\bigg\{x\in[0,1]:\hdim\bigg(\bigcup_{i=1}^\infty\bigcap_{N= i}^\infty\bigcup_{n=1}^NB(T^nx, N^{-\kappa})\bigg)<t\bigg\} \end{split}\] is measurable. Through out the proof of this part, we will assume that the ball $ B(T^nx, N^{-\kappa}) $ is closed. This makes the proof achievable, and it does not change the Hausdorff dimension of $ \uk $. By the definition of Hausdorff dimension, a point $ x\in A(t) $ if and only if there exists $ h\in \N $ such that \[\mathcal H^{t-\frac{1}{h}}\bigg(\bigcup_{i=1}^\infty\bigcap_{N= i}^\infty\bigcup_{n=1}^NB(T^nx, N^{-\kappa})\bigg)=0,\] or equivalently for all $ i\ge 1 $, \begin{equation}\label{eq:ht-1/j=0} \mathcal H^{t-\frac{1}{h}}\bigg(\bigcap_{N= i}^\infty\bigcup_{n=1}^NB(T^nx, N^{-\kappa})\bigg)=0. \end{equation} By the definition of Hausdorff measure,~\eqref{eq:ht-1/j=0} holds if and only if for any $ j,k\in\N $, \[\mathcal H_{\frac{1}{j}}^{t-\frac{1}{h}}\bigg(\bigcap_{N= i}^\infty\bigcup_{n=1}^NB(T^nx, N^{-\kappa})\bigg)<\frac{1}{k}.\] Hence, we see that \[\begin{split} A(t)=\bigcup_{h=1}^\infty\bigcap_{i=1}^\infty\bigcap_{j=1}^\infty\bigcap_{k=1}^\infty\bigg\{ x\in [0,1]:\mathcal H_{\frac{1}{j}}^{t-\frac{1}{h}}\bigg(\bigcap_{N= i}^\infty\bigcup_{n=1}^NB(T^nx, N^{-\kappa})\bigg)<\frac{1}{k}\bigg\}=:\bigcup_{h=1}^\infty\bigcap_{i=1}^\infty\bigcap_{j=1}^\infty\bigcap_{k=1}^\infty B_{h,i,j,k}. \end{split}\] If $ x\in B_{h,i,j,k} $, then there is a countable open cover $ \{U_p\}_{p\ge 1} $ with $ 0<|U_p|<1/j $ satisfying \begin{equation}\label{eq:subset Up} \bigcap_{N= i}^\infty\bigcup_{n=1}^NB(T^nx, N^{-\kappa})\subset \bigcup_{p=1}^\infty U_p\quad \text{and}\quad\sum_{p=1}^\infty |U_p|^{t-\frac{1}{h}}<\frac{1}{k}. \end{equation} The set $ \bigcap_{N= i}^\infty\bigcup_{n=1}^NB(T^nx, N^{-\kappa}) $ can be viewed as the intersection of a family of decreasing compact sets $ \big\{\bigcap_{N= i}^l\bigcup_{n=1}^NB(T^nx, N^{-\kappa})\big\}_{l\ge i} $. Hence there exists $ l_0\ge i $ satisfying \[\bigcap_{N= i}^{l_0}\bigcup_{n=1}^NB(T^nx, N^{-\kappa})\subset\bigcup_{p=1}^\infty U_p,\] which implies \[\mathcal H_{\frac{1}{j}}^{t-\frac{1}{h}}\bigg(\bigcap_{N= i}^{l_0}\bigcup_{n=1}^NB(T^nx, N^{-\kappa})\bigg)<\frac1k.\] We then deduce that \begin{equation}\label{eq:Bijkl subset Cijklm} B_{h,i,j,k}\subset\bigcup_{l=i}^{\infty}\bigg\{ x\in [0,1]:\mathcal H_{\frac{1}{j}}^{t-\frac{1}{h}}\bigg(\bigcap_{N= i}^l\bigcup_{n=1}^NB(T^nx, N^{-\kappa})\bigg)<\frac1k\bigg\}=:\bigcup_{l=i}^{\infty}C_{h,i,j,k,l}. \end{equation} If $ x\in C_{h,i,j,k,l} $ for some $ l\ge 1 $, then \[\mathcal H_{\frac{1}{j}}^{t-\frac{1}{h}}\bigg(\bigcap_{N= i}^\infty\bigcup_{n=1}^NB(T^nx, N^{-\kappa})\bigg)\le\mathcal H_{\frac{1}{j}}^{t-\frac{1}{h}}\bigg(\bigcap_{N= i}^l\bigcup_{n=1}^NB(T^nx, N^{-\kappa})\bigg)<\frac{1}{k}.\] Hence $ x\in B_{i,j,k,l} $ and the reverse inclusion of~\eqref{eq:Bijkl subset Cijklm} is proved. Notice that $ T,T^2,\dots,T^l $ are continuous on every basic interval of generation greater than $ l $. For any $ x\in C_{h,i,j,k,l} $, denote $$ \mathcal S(x):=\bigcap_{N= i}^l\bigcup_{n=1}^NB(T^nx, N^{-\kappa}). $$ There is an open cover $ (V_p)_{p\ge 1} $ of $ \mathcal S(x) $ with $ 0<|V_p|<1/j $ and $ \sum_p |V_p|^{t-1/h}<1/k $. Since $ \mathcal S(x) $ is compact, we see that the distance $ \delta $ between $ \mathcal S(x) $ and the complement of $ \bigcup_{p\ge 1} V_p $ is positive. By the continuities of $ T,T^2,\dots,T^l $, there is some $ l_1:=l_1(\delta)>l $, for which if $ y\in I_{l_1}(x) $, then $ \mathcal S(y) $ is contained in a $ \delta/2 $-neighborhood of $ \mathcal S(x) $. Thus $ \mathcal S(y) $ can also be covered by $ \bigcup_{p\ge 1} V_p $, and $ I_{l_1}(x)\subset C_{h,i,j,k,l} $. Finally, $ C_{h,i,j,k,l} $ is a union of some basic intervals, which is measurable. Now combining the equalities obtained above, we have \[A(t)=\bigcup_{h=1}^\infty\bigcap_{i=1}^\infty\bigcap_{j=1}^\infty\bigcap_{k=1}^\infty\bigcup_{l=i}^{\infty}C_{h,i,j,k,l},\] which is a Borel measurable set. \end{proof} \section{Multifractal properties of Gibbs measures}\label{s:Multifractal properties} In this section, we review some standard facts on multifractal properties of Gibbs measures. \begin{defn}\label{d:Gibbs measure} A Gibbs measure $ \uph $ associated with a potential $ \phi $ is a probability measure satisfying the following: there exists a constant $ \gamma>0 $ such that \[\text{for any basic interval }I\in\Sigma_n, \quad\gamma^{-1}\le\frac{\uph(I)}{e^{S_n\phi(x)-nP(\phi)}}\le\gamma,\quad\text{for every }x\in I,\] where $ S_n\phi(x)=\phi(x)+\cdots+\phi(T^{n-1}x) $ is the $ n $th Birkhoff sum of $ \phi $ at $ x $, and $ P(\phi) $ is the topological pressure of $\phi$ defined by \[P(\phi)=\lim_{n\to\infty}\frac{1}{n}\log\sum_{I\in\Sigma_n}\sup_{x\in I}e^{S_n\phi(x)}.\] \end{defn} The following theorem ensures the existence and uniqueness of invariant Gibbs measure. \begin{thm}[\cite{Bal00,Wal78}] Let $ T:[0,1]\to[0,1] $ be a Markov map. Then for any H\"older continuous function $ \phi $, there exists a unique $ T $-invariant Gibbs measure $ \uph $ associated with $ \phi $. Further, $ \uph $ is ergodic. \end{thm} The Gibbs measure $ \uph $ also satisfies the quasi-Bernoulli property (see~\cite[Lemma 4.1]{LiaoSe13}), i.e. for any $ n>k\ge 1 $, for any basic interval $ I(i_1\cdots i_n)\in\Sigma_{n} $, the following holds \begin{equation}\label{eq:quasi-Bernoulli} \gamma^{-3}\uph(I')\uph(I'')\le\uph(I)=\uph(I'\cap T^{-k}I'')\le \gamma^3\uph(I')\uph(I''), \end{equation} where $ I'=I(i_1\cdots i_k)\in\Sigma_k $ and $ I''=I(i_{k+1}\cdots i_n)\in\Sigma_{n-k} $. It follows immediately that \begin{equation}\label{eq:quasi-Bernoulli consequence} \text{for any }m\ge k,\quad\uph(I'\cap T^{-m}U)\le \gamma^3\uph(I')\uph(U), \end{equation} where $ U $ is an open set in $ [0,1] $. We adopt the convention that $ \phi $ is normalized, i.e. $ P(\phi)=0 $. If it is not the case, we can replace $\phi$ by $ \phi-P(\phi) $. Now, let us recall some standard facts on multifractal analysis which aims at studying the multifractal spectrum $ D_{\uph} $. Some multifractal analysis results were summarised in~\cite{LiaoSe13} and we present them as follows. The proofs can also be found in the references~\cite{BaPeS97,BrMiP92, CoLeP87, PeWe97,Ran89, Sim94}. \begin{thm}[{\cite[Theorem 2.5]{LiaoSe13}}]\label{t:multifractal analysis} Let $ T $ be a Markov map. Let $ \phi $ be a H\"older continuous potential and $ \uph $ be the associated Gibbs measure. Then, the following hold. \begin{enumerate}[\upshape(1)] \item The function $ D_{\uph} $ of $ \uph $ is a concave real-analytic map on the interval $ (\alpha_-,\alpha_+) $, where $ \alpha_- $ and $ \alpha_+ $ are defined in~\eqref{eq:alpha-} and~\eqref{eq:alpha+}, respectively. \item The spectrum $ D_{\uph} $ reaches its maximum value $ 1 $ at $ \alpha_{\max} $ defined in~\eqref{eq:alphamax}. \item The graph of $ D_{\uph} $ and the first bisector intersect at a unique point which is $ (\hdim\uph,\hdim\uph) $. Moreover, $ \hdim\uph $ satisfies \[\hdim\uph=\frac{-\int\phi\, d\uph}{\int\log|T'|\, d\uph}.\] \end{enumerate} \end{thm} \begin{prop}[{\cite[\S 2.3]{LiaoSe13}}]\label{p:topological pressure equals to 0} For every $ q\in\R $, there is a unique real number $ \eta_\phi(q) $ such that the topological pressure $ P\big(-\eta_\phi(q)\log |T'|+q\phi\big) $ equals to $ 0 $. Further, $ \eta_\phi(q) $ is real-analytic and concave. \end{prop} \begin{rem}\label{r:Gibbs measure} For simplicity, we denote by $ \mu_q $ the $ T $-invariant Gibbs measure associated with the potential $ -\eta_\phi(q)\log |T'|+q\phi $. Certainly, $ \eta_\phi(0)=1 $ and the corresponding measure $ \mu_0 $ is associated with the potential $ -\log|T'| $. By the bounded distortion property~\eqref{eq:bdp}, the Gibbs measure $ \mu_0 $, coinciding with $ \mu_{\max} $, is strongly equivalent to the Lebesgue measure $ \lambda $. \end{rem} For every $ q\in\R $, we introduce the exponent \begin{equation}\label{eq:alpha(q)} \alpha(q)=\frac{-\int\phi\, d\mu_q}{\int\log|T'|\, d\mu_q}. \end{equation} \begin{prop}[{\cite[\S 2.3]{LiaoSe13}}]\label{p:proposition of muq and alpha(q)} Let $ \mu_q $ and $ \alpha(q) $ be as above. The following statements hold. \begin{enumerate}[\upshape(1)] \item The Gibbs measure $ \mu_q $ is supported by the level set $ \{y:d_{\uph}(y)=\alpha(q)\} $ and $ D_{\uph}\big(\alpha(q)\big)=\hdim\mu_q=\eta_\phi(q)+q\alpha(q) $. \item The map $ \alpha(q) $ is decreasing, and \begin{align*} \lim_{q\to+\infty}\alpha(q)=\alpha_-,&\quad\lim_{q\to-\infty}\alpha(q)=\alpha_+,\notag\\ \alpha(1)=\hdim\uph,&\ \ \quad\alpha(0)=\alpha_{\max}.\label{eq:alpha(0)} \end{align*} \item The inverse of $ \alpha(q) $ exists, and is denoted by $ q(\alpha) $. Moreover, $ q(\alpha)<0 $ if $ \alpha\in (\alpha_{\max},\alpha_+) $, and $ q(\alpha)\ge 0 $ if $ \alpha\in (\alpha_-,\alpha_{\max}] $. \end{enumerate} \end{prop} For a probability measure $ \nu $ on $[0,1]$ and a point $ y\in[0,1] $, define the lower and upper Markov pointwise dimensions respectively by \[\underline{M}_{\nu}(y):=\liminf_{n\to\infty}\frac{\log\nu\big(I_n(y)\big)}{\log |I_n(y)|},\quad \overline{M}_{\nu}(y):=\limsup_{n\to\infty}\frac{\log\nu\big(I_n(y)\big)}{\log |I_n(y)|}.\] When $ \underline{M}_{\nu}(y)=\overline{M}_{\nu}(y) $, their common value is denoted by $ M_\nu(y) $. By~\eqref{eq:bdp} and~\eqref{eq:length of basic interval}, we have \[\od_{\nu}(y)\le \overline{M}_{\nu}(y),\] which implies the inclusions \begin{equation}\label{eq:local<upper<Markov dimension} \{y:d_{\nu}(y)=s\} \subset\{y:\ud_{\nu}(y)\ge s\}\subset\{y:\od_{\nu}(y)\ge s\}\subset\{y:\overline{M}_{\nu}(y)\ge s\}. \end{equation} By the Gibbs property of $ \uph $ and the bounded distortion property on basic intervals~\eqref{eq:bdp}, the definitions of Markov pointwise dimensions can be reformulated as \begin{equation} \overline{M}_{\uph}(y)=\limsup_{n\to \infty}\frac{S_n\phi(y)}{S_n(-\log T')(y)}\quad\text{and}\quad M_{\uph}(y)=\lim_{n\to \infty}\frac{S_n\phi(y)}{S_n(-\log T')(y)}. \end{equation} This allows us to derive the following lemma, which is an alternative version of a proposition due to Jenkinson~\cite[Proposition 2.1]{Jen06}. We omit its proof since the argument is similar. \begin{lem}\label{l:>alpha+ empty} Let $ T $ be a Markov map. Let $ \phi $ be a H\"older continuous potential and let $ \uph $ be the corresponding Gibbs measure. Then, \[\sup_{y\in [0,1]}\overline{M}_{\uph}(y)=\sup_{y\colon M_{\uph}(y)\text{ exists}}M_{\uph}(y)=\max_{\nu\in \mi }\frac{-\int\phi \, d\nu}{\int\log|T'|\, d\nu}=\alpha_+.\] In particular, for any $ s>\alpha_+ $, \[\{y:d_{\uph}(y)=s\}=\{y:\od_{\uph}(y)\ge s\}=\emptyset.\] \end{lem} We finish the section with a variational principle. \begin{lem}\label{l:dimension spectrum} Let $ T $ be a Markov map. Let $ \phi $ be a H\"older continuous potential and $ \uph $ be the associated Gibbs measure. \begin{enumerate}[\upshape(1)] \item For every $ s<\alpha_{\max} $, $$ \hdim\{y:\ud_{\uph}(y)\le s\}=\hdim\{y:\od_{\uph}(y)\le s\}=D_{\uph}(s). $$ \item For every $ s\in(\alpha_{\max},+\infty)\setminus \alpha_+ $, $$ \hdim\{y:\ud_{\uph}(y)\ge s\}=\hdim\{y:\od_{\uph}(y)\ge s\}=D_{\uph}(s). $$ \end{enumerate} \end{lem} \begin{proof} (1) We point out that the following inclusions hold \begin{equation*} \{y:d_{\uph}(y)=s\}\subset\{y:\od_{\uph}(y)\le s\}\subset\{y:\ud_{\uph}(y)\le s\}. \end{equation*} In~\cite[proposition 2.8]{LiaoSe13}, the leftmost set and the rightmost set were shown to have the same Hausdorff dimension. This together with the above inclusions completes the proof of the first point of the lemma. (2) When $ T $ is the doubling map, the statement was formulated by Fan, Schmeling and Trobetzkoy~\cite[Theorem 3.3]{FaScTr13}. Our proof follows their idea closely, we include it for completeness. By Lemma~\ref{l:>alpha+ empty}, we can assume without loss of generality that $ s<\alpha_+ $. The inclusions in~\eqref{eq:local<upper<Markov dimension} imply the following inequalities: \[\hdim\{y:d_{\uph}(y)=s\} \le \hdim\{y:\od_{\uph}(y)\ge s\}\le\hdim\{y:\overline{M}_{\uph}(y)\ge s\}.\] We turn to prove the reverse inequalites. By Proposition~\ref{p:proposition of muq and alpha(q)} and the condition $ s>\alpha_{\max} $, there exists a real number $ q_s:=q(s)<0 $ such that \[s=\frac{-\int\phi\, d\mu_{q_s}}{\int\log|T'|\, d\mu_{q_s}} \quad\text{ and }\quad D_{\uph}(s)=\hdim\mu_{q_s}=\eta_\phi(q_s)+q_ss,\] where $ \mu_{q_s} $ is the Gibbs measure associated with the potential $ -\eta_\phi(q_s)\log|T'|+q_s\phi $. Now let $ y $ be any point such that $ \overline{M}_{\uph}(y)\ge s $. By Proposition~\ref{p:topological pressure equals to 0}, the topological pressure $ P(-\eta_\phi(q_s)\log|T'|+q_s\phi) $ is $ 0 $. Then we can apply the Gibbs property of $ \mu_{q_s} $ and~\eqref{eq:bdp} to yield \[\begin{split} \underline{M}_{\mu_{q_s}}(y)&=\liminf_{n\to\infty}\frac{\log e^{S_n(-\eta_\phi(q_s)\log|T'|+q_s\phi)(y)}}{\log|I_n(y)|}\\ &=\liminf_{n\to\infty}\bigg(\frac{-\eta_\phi(q_s)\log|(T^n)'(y)|}{\log|I_n(y)|}+q_s\cdot\frac{\log e^{S_n\phi(y)}}{\log|I_n(y)|}\bigg)\\ &=\eta_\phi(q_s)+q_s\cdot\limsup_{n\to\infty}\frac{\log\uph \big(I_n(y)\big)}{\log|I_n(y)|}\\ &=\eta_\phi(q_s)+q_s \overline{M}_{\uph}(y)\\ &\le \eta_\phi(q_s)+q_ss=D_{\uph}(s), \end{split}\] where the inequality holds because $ q_s<0 $. Finally, Billingsley's Lemma~\cite[Lemma 1.4.1]{BiPe17} gives \[\hdim\{y:\overline{M}_{\uph}(y)\ge s\}\le\hdim\{y:\underline{M}_{\mu_{q_s}}(y)\le D_{\uph}(s)\}\le D_{\uph}(s)=\hdim\{y:d_{\uph}(y)=s\}.\qedhere \] \end{proof} \section{Covering questions related to hitting time and local dimension} In Section~\ref{ss:covering hitting time} below, we reformulate the uniform approximation set $ \uk $ in terms of hitting time. Thereafter, we relate the first hitting time for shrinking balls to the local dimensions in Section~\ref{ss:hitting time local dimension}. \subsection{Covering questions and hitting time}\label{ss:covering hitting time} \ Denote $ \mathcal O^+(x):=\{T^nx:n\ge 1\} $. \begin{defn} For every $ x,y\in[0,1] $ and $ r>0 $, we define the first hitting time of the orbit of $ x $ into the ball $ B(y,r) $ by \[\tau_r(x,y):=\inf\{n\ge 1:T^nx\in B(y,r)\}.\] \end{defn} Set \[\ur(x,y):=\liminf_{r\to 0}\frac{\log\tau_r(x,y)}{-\log r}\quad\text{and}\quad\ovr(x,y):=\limsup_{r\to 0}\frac{\log\tau_r(x,y)}{-\log r}.\] For convenience, when $ \mathcal O^+(x)\cap B(y,r)=\emptyset $, we set $ \tau_r(x,y)=\infty $ and $ \ur(x,y)=\ovr(x,y)=\infty $. If $ \ur(x,y)=\ovr(x,y) $, we denote the common value by $ R(x,y) $. For any ball $ B\subset [0,1] $, we define the first hitting time $ \tau(x, B) $ by \[\tau(x, B):=\inf \{n\ge 1:T^nx\in B\}.\] Similarly, we set $ \tau(x, B)=\infty $ when $ \mathcal O^+(x)\cap B=\emptyset $. The following lemma exhibits a relation between $ \uk $ and hitting time. \begin{lem}\label{l:described by hitting time} For any $ \kappa>0 $, we have \begin{align*} \bigg\{ y\in[0,1]:\ovr(x,y)>\frac{1}{\kappa} \bigg\}&\subset\bk\subset\bigg\{ y\in[0,1]:\ovr(x,y)\ge\frac{1}{\kappa} \bigg\},\\ \bigg\{ y\in[0,1]:\ovr(x,y)<\frac{1}{\kappa} \bigg\}&\subset\uk\subset\bigg\{ y\in[0,1]:\ovr(x,y)\le\frac{1}{\kappa} \bigg\}. \end{align*} \end{lem} \begin{proof} The top left and bottom right inclusions imply one another. Let us prove the bottom right inclusion. Suppose that $ y\in \uk $. Then for all large enough $ N $ there is an $ n\le N $ such that $ T^nx \in B(y,N^{-\kappa})$. Thus $ \tau_{N^{-\kappa}}(x,y)\le N $ for all $ N $ large enough, which implies $ \ovr(x,y)\le 1/\kappa $. The top right and bottom left inclusions imply one another. So, it remains to prove the bottom left inclusion. Consider $ y $ such that $ \ovr(x,y)<1/\kappa $. If $ y\in\mathcal O^+(x) $ with $ y=T^{n_0}x $ for some $ n_0\ge 1 $, then the system \[|T^nx-y|=|T^nx-T^{n_0}x|<N^{_\kappa}\quad\text{and}\quad 1\le n\le N\] always has a trivial solution $ n=n_0 $ for all $ N\ge n_0 $. Therefore $ y\in \uk $. Now assume that $ y\notin\mathcal O^+(x) $. By the definition of $ \ovr(x,y) $, there is a positive real number $ r_0<1 $ such that \[\tau_r(x,y)<r^{-1/\kappa}, \quad\text{for all }0<r<r_0.\] Denote $ n_r:=\tau_r(x,y) $, for all $ 0<r<r_0 $. Since $ y\notin \mathcal O^+(x) $, the family of positive integers $ \{n_r:0<r<r_0\} $ is unbounded. For each $ N> r_0^{-1/\kappa} $, denote $ t:=N^{-\kappa} $. The definition of $ n_{t} $ implies that \[T^{n_{t}}x\in B(y,t)=B(y,N^{-\kappa}).\] We conclude $ y\in\uk $ by noting that $ n_{t}<t^{-1/\kappa}=N $. \end{proof} \subsection{Relation between hitting time and local dimension}\label{ss:hitting time local dimension} As Lemma~\ref{l:described by hitting time} shows, we need to study the hitting time $ \ovr(x,y) $ of the Gibbs measure $ \uph $. We will prove that the hitting time is related to local dimension when the measure is exponential mixing. \begin{defn} A $ T $-invariant measure $ \nu $ is exponential mixing if there exist two constants $ C>0 $ and $ 0<\beta<1 $ such that for any ball $ A $ and any Borel measurable set $ B $, \begin{equation}\label{eq:exponential mixing} |\nu(A\cap T^{-n}B)-\nu(A)\nu(B)|\le C\beta^n\nu(B). \end{equation} \end{defn} \begin{thm}[\cite{Bal00, LiSaV98,PaPo90, Rue04}]\label{t:exponential mixing} The $ T $-invariant Gibbs measure $ \uph $ associated with a H\"older continuous potential $ \phi $ of a Markov map $ T $ is exponential mixing. \end{thm} The exponential mixing property allows us to apply the following theorem which decribes a relation between hitting time and local dimension of invariant measure. \begin{thm}[\cite{Gal07}] Let $ (X,T,\nu) $ be a measure-theoretic dynamical system. If $ \nu $ is superpolynomial mixing and if $ d_\nu(y) $ exists, then for $ \nu $-a.e.\,$ x $, we have \[R(x,y)=d_\nu(y).\] \end{thm} It should be noticed that superpolynomial mixing property is much weaker than exponential mixing property. Now, we turn to the study of the Markov map $ T $ on the interval $ [0,1] $. An application of Fubini's theorem yields the following corollary. \begin{cor}[{\cite[Corollary 3.8]{LiaoSe13}}]\label{c:hitting time and local dimension} Let $ T $ be a Markov map. Let $ \uph $ and $ \mu_\psi $ be two $ T $-invariant Gibbs probability measures on $ [0,1] $ associated with H\"older potentials $ \phi $ and $ \psi $, respectively. Then, \[\text{for }\uph\times\mu_\psi\text{-a.e.\,}(x,y),\quad R(x,y)=d_{\uph}(y)=\frac{-\int\phi\, d\mu_\psi}{\int\log|T'|\, d\mu_\psi}.\] \end{cor} \section{Studies of $ \bk $ and $ \uk $} \subsection{The study of $ \bk $} In this subsection, we are going to prove Theorem~\ref{t:main} except the item (3). Let us start with the lower bound for $ \hdim\bk $. \begin{lem}\label{l:lower bound for hdimfk} Let $ T $ be a Markov map. Let $ \phi $ be a H\"older continuous potential and let $ \uph $ be the corresponding Gibbs measure. For any $ \kappa>0 $, the following hold. \begin{enumerate}[\upshape(a)] \item If $ 1/\kappa\in(0,\alpha_{\max}) $, then $ \lambda\big(\bk\big)=1 $ for $ \uph $-a.e.\,$ x $. \item If $ 1/\kappa\in[\alpha_{\max},+\infty)\setminus\{\alpha_+\} $, then $ \hdim\bk\ge D_{\uph}(1/\kappa) $ for $ \uph $-a.e.\,$ x $. \end{enumerate} \end{lem} \begin{proof} (a) Let $ 1/\kappa\in(0,\alpha_{\max}) $. As already observed in Section~\ref{s:Multifractal properties}, the Gibbs measure $ \mu_0 $ associated with $ \log|T'| $ is strongly equivalent to the Lebesgue measure $ \lambda $. Thus, a set $ F $ has full $ \mu_0 $-measure if and only if $ F $ has full $ \lambda $-measure. Corollary~\ref{c:hitting time and local dimension} implies that \[\text{for }\uph\times\mu_0\text{-}a.e.\,(x,y),\quad R(x,y)=d_{\uph}(y)=\frac{-\int\phi\, d\mu_0}{\int\log|T'|\, d\mu_0}=\alpha_{\max}.\] By Fubini's theorem, for $ \uph $-a.e.\,$ x $, the set $ \{y:R(x,y)=d_{\uph}(y)=\alpha_{\max}\} $ has full $ \mu_0 $-measure. Then for $ \uph $-a.e.\,$ x $, we have \[\mu_0\big(\{ y:\ovr(x,y)>1/\kappa \}\big)\ge \mu_0\big(\{y:R(x,y)=d_{\uph}(y)=\alpha_{\max}\}\big)=1.\] By Lemma~\ref{l:described by hitting time}, we arrive at the conclusion. (b) By Lemma~\ref{l:>alpha+ empty}, the level set $ \{y:d_{\uph}(y)=1/\kappa\} $ is empty if $ 1/\kappa>\alpha_+ $. Thus $ D_{\uph}(1/\kappa)=0 $, and therefore $ \hdim\bk\ge 0=D_{\uph}(1/\kappa) $ trivially holds for all $ 1/\kappa>\alpha_+ $. Let $ 1/\kappa\in[\alpha_{\max},\alpha_+) $. We can suppose that $ \alpha_{\max}\ne \alpha_+ $, since otherwise $ [\alpha_{\max},\alpha_+)=\emptyset $, there is nothing to prove. For any $ s\in (1/\kappa,\alpha_+) $, by Proposition~\ref{p:proposition of muq and alpha(q)}, there exists a real number $ q_s:=q(s) $ such that \[s=\frac{-\int\phi\, d\mu_{q_s}}{\int\log|T'|\, d\mu_{q_s}}\quad\text{and}\quad\mu_{q_s}(\{y:d_{\uph}(y)=s\})=1.\] Applying Corollary~\ref{c:hitting time and local dimension}, we obtain \[\text{for }\uph\times\mu_{q_s}\text{-}a.e.\,(x,y),\quad R(x,y)=d_{\uph}(y)=\frac{-\int\phi\, d\mu_{q_s}}{\int\log|T'|\, d\mu_{q_s}}=s.\] It follows from Fubini's theorem that, for $ \uph $-a.e.\,$ x $, the set $ \{y:R(x,y)=d_{\uph}(y)=s\} $ has full $ \mu_{q_s} $-measure. Consequently, for $ \uph $-a.e.\,$ x $, \[\begin{split} \hdim\{y:\ovr(x,y)>1/\kappa\}&\ge \hdim\{y:R(x,y)=d_{\uph}(y)=s\}\\ &\ge \hdim \mu_{q_s}=D_{\uph}(s). \end{split}\] We conclude by noting that $ s\in (1/\kappa,\alpha_+) $ is arbitrary and $ D_{\uph} $ is continuous on $ [\alpha_{\max},\alpha_+) $. \end{proof} We are left to determine the upper bound of $ \hdim\bk $. The following four lemmas were initially proved by Fan, Schmeling and Troubetzkoy~\cite{FaScTr13} for the doubling map, and later by Liao and Sereut~\cite{LiaoSe13} in the context of Markov maps. We follow their ideas and demonstrate more general results. In the Lemmas~\ref{l:multi-relation}--\ref{l:hitting time subset local dimension}, we will not assume that $ T $ is a Markov map. \begin{lem}\label{l:multi-relation} Let $ T $ be a map on $ [0,1] $ and $ \nu $ be a $ T $-invariant exponential mixing measure. Let $ A_1,A_2,\dots,A_k $ be $ k $ subsets of $ [0,1] $ such that each $ A_i $ is a union of at most $ m $ disjoint balls. Then \[\prod_{i=1}^{k}\bigg(1-\frac{mC\beta^d}{\nu(A_i)}\bigg)\le \frac{\nu(A_1\cap T^{-d}A_2\cap\cdots\cap T^{-d(k-1)}A_k)}{\nu(A_1)\nu(A_2)\cdots\nu(A_k)}\le \prod_{i=1}^{k}\bigg(1+\frac{mC\beta^d}{\nu(A_i)}\bigg),\] where $ \beta $ is the constant appearing in~\eqref{eq:exponential mixing}. \end{lem} \begin{proof} Since each $ A_i $ is a union of at most $ m $ disjoint balls, the exponential mixing property of $ \nu $ gives that, for every $ d\ge 1 $, \begin{equation}\label{eq:exponential mixing multi} |\nu(A_i\cap T^{-d}B)-\nu(A_i)\nu(B)|\le mC\beta^d\nu(B), \end{equation} where $ B $ is a Borel measurable set. In particular, defining \[B_i=A_i\cap T^{-d}A_{i+1}\cap\cdots\cap T^{-d(k-i)}A_k,\] we get, for any $ i<k $, \[|\nu(A_i\cap T^{-d}(B_{i+1}))-\nu(A_i)\nu(B_{i+1})|\le mC\beta^d\nu(B_{i+1}).\] The above inequality can be written as \begin{equation*} 1-\frac{mC\beta^d}{\nu(A_i)}\le \frac{\nu(A_i\cap T^{-d}B_{i+1})}{\nu(A_i)\nu(B_{i+1})}\le 1+\frac{mC\beta^d}{\nu(A_i)}. \end{equation*} Multiplying over all $ i\le k $ and using the identity \[B_{i+1}=A_{i+1}\cap T^{-d}B_{i+2},\] we have \[\prod_{i=1}^{k}\bigg(1-\frac{mC\beta^d}{\nu(A_i)}\bigg)\le \frac{\nu(A_1\cap T^{-d}A_2\cap\cdots\cap T^{-d(k-1)}A_k)}{\nu(A_1)\nu(A_2)\cdots\nu(A_k)}\le \prod_{i=1}^{k}\bigg(1+\frac{mC\beta^d}{\nu(A_i)}\bigg).\qedhere\] \end{proof} The following lemma illustrates that balls with small local dimension for exponential mixing measure are hit with big probability. \begin{lem}\label{l:big hitting probability} Let $ T $ be a map on $ [0,1] $ and $ \nu $ be a $ T $-invariant exponential mixing measure. Let $ h $ and $ \epsilon $ be two positive real numbers. For each $ n\in\N $, consider $ N\le 2^n $ distinct balls $ B_1,\dots,B_N $ satisfying $ |B_i|=2^{-n} $ and $ \nu(B_i)\ge 2^{-n(h-\epsilon)} $ for all $ 1\le i\le N $. Set \[\mathcal C_{n,N,h}=\{x\in[0,1]:\exists 1\le i\le N\text{ such that }\tau(x,B_i)\ge 2^{nh}\}.\] Then there exists an integer $ n_h\in\N $ independent of $ N $ such that \[\text{for every }n\ge n_h,\quad \nu(\mathcal C_{n,N,h})\le 2^{-n}.\] \end{lem} \begin{proof} For each $ i\le N $, let \[\Delta_{i}:=\{x\in[0,1]:\forall k\le 2^{nh}, T^kx\notin B_i \}.\] Obviously we have $ \mathcal C_{n,N,h}=\bigcup_{i=1}^N\Delta_{i} $, so it suffices to bound from above each $ \nu(\Delta_{i}) $. Pick an integer $ \omega $ such that $ \omega>\log_{\beta^{-1}}2^{h} $. Let $ k=[2^{nh}/(\omega n)] $ be the integer part of $ 2^{nh}/(\omega n) $. Then \[\Delta_{i}\subset\bigcap_{j=1}^{k}\{x\in[0,1]: T^{j\omega n}x\notin B_i\}=\bigcap_{j=1}^{k}T^{-j\omega n}B_i^c.\] Since $ \omega>\log_{\beta^{-1}}2^{h}\ $, there is an $ n_h $ large enough such that for any $ n\ge n_h $, \begin{equation}\label{eq:condition on nh} 2C\beta^{\omega n}<2^{-nh-1}\le \nu(B_i)/2 \end{equation} and \begin{equation}\label{eq:condition 2 on nh} 2^{n+1} \exp\bigg(\frac{-2^{n\epsilon}}{2\omega n}\bigg)\le 2^{-n}. \end{equation} Now applying Lemma~\ref{l:multi-relation} to $ A_l= B_i $ for all $ l\le N $ and to $ m=2 $, we conclude from~\eqref{eq:condition on nh} that \begin{align*} \nu(\Delta_{i})\le\nu(\cap_{j=1}^{k}T^{-j\omega n}B_i^c) &\le (\nu(B_i^c)+2C\beta^{\omega n})^{k}\\ &\le (1-\nu(B_i)/2)^{k}\\ &\le (1-2^{-n(h-\epsilon)-1})^{ 2^{nh}/(\omega n)-1}\\ &=(1-2^{-n(h-\epsilon)-1})^{-1} \exp\bigg(\frac{2^{nh}\log(1-2^{-n(h-\epsilon)-1})}{\omega n}\bigg)\\ &\le 2\exp\bigg(\frac{-2^{n\epsilon}}{2\omega n}\bigg). \end{align*} By~\eqref{eq:condition 2 on nh}, \[\nu(C_{n,N,h})\le \sum_{i=1}^{N}\nu(B_i)\le 2^{n+1} \exp\bigg(\frac{-2^{n\epsilon}}{2\omega n}\bigg)\le 2^{-n}.\qedhere\] \end{proof} Let us recall that $ \{y:\ovr(x,y)\ge s\} $ is random depending the random element $ x $, but $ \{y:\od_{\uph}(y)\ge s\} $ is independent of $ x $. The following lemma reveals a connection between the deterministic set $ \{y:\ovr(x,y)\ge s\} $ and the random set $ \{y:\od_{\uph}(y)\ge s\} $. \begin{lem}\label{l:hitting time subset local dimension} Let $ T $ be a map on $ [0,1] $ and $ \nu $ be a $ T $-invariant exponential mixing measure. Let $ s\ge 0 $. Then for $ \nu $-a.e.\,$ x $, \[\{y:\ovr(x,y)\ge s\}\subset\{y:\od_{\nu}(y)\ge s\}.\] \end{lem} \begin{proof} The case $ s=0 $ is obvious. We therefore assume $ s>0 $. For any integer $ n\ge 1 $, let \[\mathcal R_{n,s,\epsilon}(x)=\big\{y:\tau\big(x,B(y,2^{-n+1})\big)\ge 2^{n(s-\epsilon)}\big\},\quad\mathcal E_{n,s,\epsilon}=\big\{y:\nu\big(B(y,2^{-n})\big)\le 2^{-n(s-2\epsilon)}\big\}.\] By definition, $ y\in \{y:\ovr(x,y)\ge s\} $ if and only if for any $ \epsilon>0 $, there exist infinitely many integers $ n $ such that \[\frac{\log \tau(x,B(y,2^{-n+1}))}{\log 2^n}\ge s-\epsilon.\] Hence, we have \begin{equation}\label{eq:ovr>s=} \{y:\ovr(x,y)\ge s\}=\bigcap_{\epsilon>0}\limsup_{n\to\infty}\mathcal R_{n,s,\epsilon}(x). \end{equation} Similarly, \[\{y:\od_{\uph}(y)\ge s\}=\bigcap_{\epsilon>0}\limsup_{n\to\infty}\mathcal E_{n,s,\epsilon}.\] Thus, it is sufficient to prove that, for $ \nu $-a.e.\,$ x $, there exists some integer $ n(x) $ such that \begin{equation}\label{eq:Rnse subset Ense} \text{for all }n\ge n(x),\quad\mathcal R_{n,s,\epsilon}(x)\subset \mathcal E_{n,s,\epsilon}, \end{equation} or equivalently, \begin{equation}\label{eq:Rnsec subset Ensec} \text{for all }n\ge n(x),\quad\mathcal E^c_{n,s,\epsilon}(x)\subset \mathcal R^c_{n,s,\epsilon}. \end{equation} Notice that $ \mathcal E_{n,s,\epsilon}^c $ can be covered by $ N\le 2^n $ balls with center in $ \mathcal E_{n,s,\epsilon}^c $ and radius $ 2^{-n} $. Let $ \mathcal F_{n,s,\epsilon}:=\{B_1,B_2,\dots,B_N\} $ be the collection of these balls. By definition, we have $ \nu(B_i)\le 2^{-n(s-2\epsilon)} $. Applying Lemma~\ref{l:big hitting probability} to the collection $ \mathcal F_{n,s,\epsilon} $ of balls and to $ h=s-\epsilon $, we see that \[\sum_{n\ge n_h}\nu(\{x:\exists B\in\mathcal F_{n,s,\epsilon} \text{ such that }\tau(x,B)\ge 2^{n(s-\epsilon)}\})\le \sum_{n\ge n_h}2^{-n}<\infty.\] By Borel-Cantelli Lemma, for $ \nu $-a.e.\,$ x $, there exists an integer $ n(x) $ such that \[\text{for all }n\ge n(x),\text{ for all }B\in \mathcal F_{n,s,\epsilon},\quad \tau(x, B)< 2^{n(s-\epsilon)}.\] If $ y\in B $ for some $ B\in \mathcal F_{n,s,\epsilon} $ and $ n\ge n(x) $, then $ B\subset B(y,2^{-n+1}) $, which implies that $ \tau\big(x, B(y,2^{-n+1})\big)<\tau(x,B) $. We then deduce that $ B $ is included in $ \mathcal R_{n,s,\epsilon}^c $. This yields $ \mathcal E_{n,s,\epsilon}^c\subset \mathcal R_{n,s,\epsilon}^c $, which is what we want. \end{proof} \begin{rem} With the notation in Lemma~\ref{l:hitting time subset local dimension}, proceeding with the same argument as~\eqref{eq:ovr>s=}, we have \[ \{y:\ur(x,y)\ge s\}=\bigcap_{\epsilon>0}\liminf_{n\to\infty}\mathcal R_{n,s,\epsilon}(x)\quad \text{and}\quad\{y:\ud_{\uph}(y)\ge s\}=\bigcap_{\epsilon>0}\liminf_{n\to\infty}\mathcal E_{n,s,\epsilon}.\] It then follows from~\eqref{eq:Rnse subset Ense} that for $ \nu $-a.e. $ x $, \[\{y:\ur(x,y)\ge s\}\subset\{y:\ud_{\nu}(y)\ge s\}.\] \end{rem} Applying Lemma~\ref{l:hitting time subset local dimension} to the Gibbs measure $ \uph $, we get the following upper bound. \begin{lem}\label{l:upper bound for hdimfk} Let $ T $ be a Markov map. Let $ \phi $ be a H\"older continuous potential and $ \uph $ be the associated Gibbs measure. Let $ 1/\kappa\ge\alpha_{\max} $, then for $ \uph $-a.e.\,$ x $, \[\hdim\bk\le D_{\uph}(1/\kappa).\] Moreover, if $ 1/\kappa>\alpha_+ $, then for $ \uph $-a.e.\,$ x $, \[\bk=\emptyset.\] \end{lem} \begin{proof} Recall that Lemma~\ref{l:described by hitting time} asserts that \[\bk\subset\{y:\ovr(x,y)\ge 1/\kappa\}\cup\mathcal O^+(x).\] A direct application of Proposition~\ref{l:dimension spectrum} and Lemma~\ref{l:hitting time subset local dimension} yields the first conclusion. The second conclusion follows from Lemmas~\ref{l:>alpha+ empty} and~\ref{l:described by hitting time}. \end{proof} Collecting the results obtained in this subsection, we can prove Theorem~\ref{t:main} except the item (3). \begin{proof}[Proof of the items (1), (2) and (4) of Theorem~\ref{t:main}] Combining with Lemmas~\ref{l:lower bound for hdimfk} and~\ref{l:upper bound for hdimfk}, we get the desired result. \end{proof} \subsection{The study of $ \uk $} In this subsection, we prove the remaining part of Theorem~\ref{t:main}, that is, item (3). We begin by proving the lower bound of $ \hdim\uk $, which may be proved in much the same way as Lemma~\ref{l:lower bound for hdimfk}. \begin{lem} Let $ T $ be a Markov map. Let $ \phi $ be a H\"older continuous potential and let $ \uph $ be the corresponding Gibbs measure. \begin{enumerate}[\upshape(a)] \item If $ 1/\kappa\in(0,\alpha_{\max}]\setminus\{\alpha_-\} $, then $ \hdim\uk\ge D_{\uph}(1/\kappa) $ for $ \uph $-a.e.\,$ x $. \item If $ 1/\kappa\in(\alpha_{\max},+\infty) $, then $ \hdim\uk=1 $ for $ \uph $-a.e.\,$ x $. \end{enumerate} \end{lem} \begin{proof} (a) By Lemma~\ref{l:dimension spectrum}, the Hausdorff dimension of the level set $ \{y:d_{\uph}(y)=1/\kappa\} $ is zero if $ 1/\kappa<\alpha_- $. Therefore $ \hdim\uk\ge 0=D_{\uph}(1/\kappa) $. The remaining case $ 1/\kappa\in(\alpha_-,\alpha_{\max}] $ holds by the same reason as Lemma~\ref{l:lower bound for hdimfk} ($ b $). (b) Observe that the full Lebesgue measure statement implies the full Hausdorff dimension statement, it follows from item (2) of Theorem~\ref{t:main} that $ \hdim\uk= 1 $ when $ 1/\kappa\in(\alpha_{\max},+\infty) $. \end{proof} It is left to show the upper bound of $ \hdim\uk $ when $ 1/\kappa\le\alpha_{\max} $. The proof combines the methods developed in~\cite[\S 7]{FaScTr13} and~\cite[Theorem 8]{KoLiPe21}. Heuristically, the larger the local dimension of a point is, the less likely it is to be hit. \begin{lem}\label{l:upper bound of hdimuk} Let $ T $ be a Markov map. Let $ \phi $ be a H\"older continuous potential and $ \uph $ be the associated Gibbs measure. Let $ 1/\kappa\le\alpha_{\max} $, then for $ \uph $-a.e.\,$ x $, \[\hdim\uk\le D_{\uph}(1/\kappa).\] \end{lem} \begin{proof} The proof will be divided into two steps. Step 1. Given any $ a>1/\kappa $, we are going to prove that \begin{equation}\label{eq:ukcap} \hdim \big(\uk\cap\{y:\ud_{\uph}(y)>a\}\big)=0\quad\text{for }\mu_\phi\text{-}a.e.\,x. \end{equation} Suppose now that~\eqref{eq:ukcap} is established. Let $ (a_m)_{m\ge 1} $ be a monotonically decreasing sequence of real numbers converging to $ 1/\kappa $. Applying ~\eqref{eq:ukcap} to each $ a_m $ yields a full $ \uph $-measure set corresponding to $ a_m $. Then by taking the intersection of these countable full $ \uph $-measure sets, we conclude from the countable stability of Hausdorff dimension that \[\hdim \big(\uk\cap\{y:\ud_{\uph}(y)>1/\kappa\}\big)=0\quad\text{for }\mu_\phi\text{-}a.e.\,x.\] As a result, by Lemma~\ref{l:dimension spectrum}, for $ \uph $-a.e.\,$ x $, \begin{align*} \hdim \uk&=\hdim \big(\uk\cap\big(\{y:\underline{d}_{\mu_\phi}(y)\le 1/\kappa\}\cup \{y:\ud_{\uph}(y)>1/\kappa\}\big)\big)\notag\\ &=\hdim \big(\uk\cap\{y:\underline{d}_{\mu_\phi}(y)\le 1/\kappa\}\big)\notag\\ &\le\hdim\{y:\underline{d}_{\mu_\phi}(y)\le 1/\kappa\}\label{eq:upper bound of hdimuk}=D_{\uph}(1/\kappa). \end{align*} This clearly yields the lemma. Choose $ b\in(1/\kappa, a) $. Put $ A_n:=\{y:\mu_\phi(B(y, r))<r^b\text{ for all }r<2^{-n}\} $. By the definition of $ \ud_{\uph}(y) $, we have \[\{y:\ud_{\uph}(y)>a\}\subset\bigcup_{n=1}^\infty A_n.\] Thus,~\eqref{eq:ukcap} is reduced to show that for any $ n\ge 1 $, \begin{equation}\label{eq:ukcapan} \hdim (\uk\cap A_n)=0\quad\text{for }\mu_\phi\text{-}a.e.\,x. \end{equation} Step 2. The next objective is to prove~\eqref{eq:ukcapan}. Fix $ n\ge 1 $. Let $ \epsilon>0 $ be arbitrary. Choose a large integer $ l\ge n $ with \begin{equation}\label{eq:condition l} 12\times 2^{-\kappa l}<2^{-n}\quad\text{and}\quad \gamma^312^b2^{(1-b\kappa)l}< \epsilon, \end{equation} where the constant $ \gamma $ is defined in~\eqref{eq:quasi-Bernoulli}. Let $ \theta_j=[\kappa j\log_{L_1}2]+1 $, where $ L_1 $ is given in~\eqref{eq:length of basic interval}. Then, by~\eqref{eq:length of basic interval} the length of each basic interval of generation $ \theta_j $ is smaller than $ 2^{-\kappa j} $. Define \[\mathcal I_{j}(x):=\bigcup_{J\in\Sigma_{\theta_j}\colon d(I_{\theta_j}(x),J)<2^{-\kappa j}}J,\] where $ d(\cdot,\cdot) $ is the Euclidean metric. Clearly $ \mathcal I_j(x) $ covers the ball $ B(x, 2^{-\kappa j}) $ and is contained in $ B(x, 3\times2^{-\kappa j}) $. Moreover, if $ I_{\theta_j}(x)=I_{\theta_j}(y) $, then $ \mathcal I_j(x)=\mathcal I_j(y) $. With the notation $ \mathcal I_j(x) $, we consider the set \[G_{l,i}(x)= A_n\cap\bigg(\bigcap_{j=l}^i\bigcup_{k=1}^{2^j}\mathcal{I}_{j}(T^kx)\bigg).\] The advantage of using $ \mathcal I_j(x) $ rather than $ B(x, 2^{-\kappa j}) $ is that the map $ x\mapsto G_{l,i}(x) $ is constant on each basic interval of generation $ 2^i+\theta_i $. We are going to construct inductively a cover of $ G_{l,i}(x) $ by the family $ \big\{B(T^kx, 3\times2^{-\kappa i}):k\in S_i(x)\big\} $ of balls, where $ S_i(x)\subset \{1,2,\dots,2^i\} $. For $ i=l $, we let $ S_l(x)\subset\{1,2,\dots, 2^l\} $ consist of those $ k\le 2^l $ such that $ \mathcal I_l(T^kx) $ intersects $ A_n $. Suppose now that $ S_i(x) $ has been defined. We define $ S_{i+1}(x) $ to consist of those $ k\le 2^{i+1} $ such that $ \mathcal I_{i+1}(T^kx) $ intersects $ G_{l,i}(x) $. Then the family $ \big\{B(T^kx, 3\times2^{-\kappa (i+1)}):k\in S_{i+1}(x)\big\} $ of balls forms a cover of $ G_{l,i+1}(x) $, and the construction is completed. With the aid of the notation $ \mathcal I_{i+1}(T^kx) $, one can verify that $ x\mapsto S_{i+1}(x) $ is constant on each basic interval of generation $ 2^{i+1}+\theta_{i+1} $. Let $ N_{i+1}(x):=\sharp S_{i+1}(x) $, then $ x\mapsto N_{i+1}(x) $ is also constant on each basic interval of generation $ 2^{i+1}+\theta_{i+1} $. In order to establish~\eqref{eq:ukcapan}, we need to estimate $ N_{i+1}(x) $. For those $ k\in S_{i+1}(x)\cap\{1,2,\dots,2^i\} $, since $ \mathcal I_{i+1}(T^kx)\subset\mathcal I_i(T^kx) $ and $ G_{l,i}(x)\subset G_{l,i-1}(x) $, we must have that $ \mathcal I_i(T^kx) $ intersects $ G_{l,i-1}(x) $, hence $ k\in S_i(x) $. On the other hand, since $ \mathcal I_{i+1}(T^kx) $ is contained in $ B(T^kx,3\times2^{-\kappa (i+1)}) $, if $ \mathcal I_{i+1}(T^kx) $ has non-empty intersection with $ G_{l,i}(x) $, then the distance between $ T^kx $ and $ G_{l,i}(x) $ is less than $ 3\times2^{-\kappa (i+1)} $. In particular, \begin{equation}\label{eq:Gi(x)} T^kx \in\big\{y: d\big(y, G_{l,i}(x)\big)<3\times2^{-\kappa(i+1)}\big\}\subset \bigcup_{J\in\Sigma_{\theta_i}\colon d(J,G_{l,i}(x))<3\times2^{-\kappa (i+1)}}J. \end{equation} Denote the right hand side union as $ \hat G_{l,i}(x) $. The set $ \hat G_{l,i}(x) $ is nothing but the union of cylinders of level $ \theta_i $ whose distance from $ G_{l,i}(x) $ is less than $ 3\times 2^{-\kappa(i+1)} $. Thus, by the fact that $ x\mapsto G_{l,i}(x) $ is constant on each basic interval of generation $ 2^i+\theta_i $, we have \begin{equation}\label{eq:Gi(x)=Gi(y)} \hat G_{l,i}(x)=\hat G_{l,i}(y),\quad \text{whenever}\quad I_{2^i+\theta_i}(x)=I_{2^i+\theta_i}(y) . \end{equation} According to the above discussion, it holds that \[N_{i+1}(x)\le N_i(x)+M_{i+1}(x),\] where $ M_{i+1}(x) $ is the number of $ 2^i<k\le 2^{i+1} $ for which $ T^k x $ intersects $ \hat G_{l,i}(x) $. The function $ M_{i+1}(x) $ can further be written as: \begin{equation*} M_{i+1}(x)=\sum_{k=2^i+1}^{2^{i+1}}\chi_{\hat G_{l,i}(x)}(T^kx)=\sum_{k=2^i+1}^{2^i+\theta_i}\chi_{\hat G_{l,i}(x)}(T^kx)+\sum_{k=2^i+\theta_i+1}^{2^{i+1}}\chi_{\hat G_{l,i}(x)}(T^kx). \end{equation*} It then follows from the locally constant property of $ \hat G_{l,i(x)} $~\eqref{eq:Gi(x)=Gi(y)} that \begin{align} \notag\int M_{i+1}(x) d\uph(x)&=\sum_{k=2^i+1}^{2^i+\theta_i}\int \chi_{\hat G_{l,i}(x)}(T^kx)d\uph(x)+\sum_{k=2^i+\theta_i+1}^{2^{i+1}}\int \chi_{\hat G_{l,i}(x)}(T^kx)d\uph(x)\\ &\le \theta_i+\sum_{J\in\Sigma_{2^i+\theta_i}}\sum_{k=2^i+\theta_i+1}^{2^{i+1}}\int \chi_J(x) \chi_{\hat G_{l,i}(x)}(T^kx)d\uph(x)\notag\\ &=\theta_i+\sum_{J\in\Sigma_{2^i+\theta_i}}\sum_{k=2^i+\theta_i+1}^{2^{i+1}}\int \chi_J(x) \chi_{\hat G_{l,i}(x_J)}(T^kx)d\uph(x),\label{eq:sumsum} \end{align} where $ x_J $ is any fixed point of $ J $. Now the task is to deal with the right hand side summation. We deduce from the quasi-Bernoulli property of $ \uph $~\eqref{eq:quasi-Bernoulli consequence} that for each $ k>2^i+\theta_i $, \begin{equation}\label{eq:int<CJG} \int \chi_J(x) \chi_{\hat G_{l,i}(x_J)}(T^kx)d\uph(x)=\uph\big(J\cap T^{-k}\big(\hat G_{l,i}(x_J)\big)\big)\le \gamma^3\uph(J)\uph\big(\hat{G}_{l,i}(x_J)\big). \end{equation} Since $ G_{l,i}(x_J) $ can be covered by the family $ \big\{B(T^kx_J, 3\times2^{-\kappa i}):k\in S_i(x_J)\big\} $ of balls, then by~\eqref{eq:Gi(x)} the family $ \mathcal F_i(x_J):=\big\{B(T^kx_J, 6\times2^{-\kappa i}):k\in S_i(x_J)\big\} $ of enlarged balls forms a cover of $ \hat{G}_{l,i}(x_J) $. Observe that each enlarged ball $ B\in \mathcal F_i(x_J) $ intersects $ A_n $, thus $ B\subset B(y,12\times 2^{-\kappa i}) $ for some $ y\in A_n $. Then by the definition of $ A_n $ and~\eqref{eq:condition l}, \[\uph(B)\le \uph(B(y,12\times 2^{-\kappa i}))\le12^b2^{-b\kappa i}.\] Accordingly, \begin{equation}\label{eq:uphhatG} \uph\big(\hat{G}_{l,i}(x_J)\big)\le 12^b2^{-b\kappa i}N_i(x_J). \end{equation} Recall that $ x\mapsto N_i(x) $ is constant on each basic interval of generation $ 2^i+\theta_i $. Applying the upper bound~\eqref{eq:uphhatG} on $ \uph\big(\hat{G}_{l,i}(x_J)\big) $ to~\eqref{eq:int<CJG}, and then substituting~\eqref{eq:int<CJG} into~\eqref{eq:sumsum}, we have \begin{align*} \notag\int M_{i+1}(x) d\uph(x) &\le \theta_i+\sum_{J\in\Sigma_{2^i+\theta_i}}\sum_{k=2^i+\theta_i+1}^{2^{i+1}}\gamma^3\uph\big(\hat{G}_{l,i}(x_J)\big)\uph(J)\\ &\le \theta_i+\sum_{J\in\Sigma_{2^i+\theta_i}}\sum_{k=2^i+\theta_i+1}^{2^{i+1}}\gamma^312^b2^{-b\kappa i}N_i(x_J)\uph(J)\\ &= \theta_i+\gamma^312^b2^{-b\kappa i}(2^i-\theta_i)\int N_i(x)d\uph(x)\\ &\le \theta_i+\epsilon\int N_i(x)d\uph(x), \end{align*} where the last inequality follows from~\eqref{eq:condition l}. Since $ N_{i+1}(x)\le N_i(x)+M_{i+1}(x) $, we have \begin{equation}\label{eq:intNi+1x} \int N_{i+1}(x)d\uph(x)\le \theta_i+(1+\epsilon)\int N_i(x)d\uph(x). \end{equation} Note that~\eqref{eq:intNi+1x} holds for all $ i\ge l $ and $ N_l(x)\le 2^l $. Then \[\begin{split} \int N_{i+1}(x)d\uph(x)&\le \sum_{k=l}^{i}(1+\epsilon)^{i-k}\theta_k+(1+\epsilon)^{i-l+1}\int N_l(x)d\uph(x)\\ &<(1+\epsilon)^i(i\theta_i+2^l). \end{split}\] By Markov's inequality, \[\begin{split} \uph\big(\big\{x:N_{i+1}(x)\ge (1+\epsilon)^{2i}(i\theta_i+2^l)\big\}\big)&\le \uph\bigg(\bigg\{x:N_{i+1}(x)\ge (1+\epsilon)^i\int N_{i+1}(x)d\uph(x)\bigg\}\bigg)\\ &\le (1+\epsilon)^{-i}, \end{split}\] which is summable over $ i $. Hence, for $ \uph $-a.e.\,$ x $, there is an $ i_0(x) $ such that \begin{equation}\label{eq:Ni+1(x)le (1+epsilon)} N_{i+1}(x)\le (1+\epsilon)^{2i}(i\theta_i+2^l) \end{equation} holds for all $ i\ge i_0(x) $. Denote by $ F_{l,\epsilon} $ the full measure set on which the formula~\eqref{eq:Ni+1(x)le (1+epsilon)} holds. Let $ x\in F_{l,\epsilon} $. Then such an $ i_0(x) $ exists. With $ i\ge i_0(x) $, we may cover the set \[G_l(x)=A_n\cap\bigg(\bigcap_{j=l}^\infty\bigcup_{k=1}^{2^j}\mathcal I_j(T^kx)\bigg)\] by $ N_i(x) $ balls of radius $ 3\times2^{-\kappa i} $. Since $ \theta_i=[\kappa i\log_{L_1}2]+1\le ci $ for some $ c>0 $, we have \begin{equation}\label{eq:upper bound of Gl(x)} \hdim G_l(x)\le\limsup_{i\to\infty}\frac{\log N_i(x)}{\log 2^{\kappa i}}\le\limsup_{i\to\infty}\frac{\log \big((1+\epsilon)^{2i}(i\theta_i+2^l)\big)}{\log 2^{\kappa i}}=\frac{2\log (1+\epsilon)}{\kappa\log2}. \end{equation} Let $ (\epsilon_m)_{m\ge 1} $ be a monotonically decreasing sequence of real numbers converging to $ 0 $. For each $ \epsilon_m $, choose an integer $ l_m $ satisfying~\eqref{eq:condition l}, but with $ \epsilon $ replaced by $ \epsilon_m $. For every $ m\ge 1 $, by the same reason as~\eqref{eq:upper bound of Gl(x)}, there exists a full $ \uph $-measure set $ F_{l_m,\epsilon_m} $ such that \[\text{for all }x\in F_{l_m,\epsilon_m},\quad\hdim G_{l_m}(x)\le\frac{2\log (1+\epsilon_m)}{\kappa\log2}.\] By taking the intersection of the countable full $ \uph $-measure sets $ (F_{l_m,\epsilon_m})_{m\ge 1} $, and using the fact that $ G_l(x) $ is increasing in $ l $, we obtain that for $ \uph $-a.e.\,$ x $, \[\text{for any }l\ge 1,\quad\hdim G_l(x)\le\lim_{m\to \infty}\hdim G_{l_m}(x)=0.\] We conclude~\eqref{eq:ukcapan} by noting that \[\uk\cap A_n\subset\bigcup_{l\ge 1}G_l(x).\qedhere\] \end{proof} \begin{rem} Recall that Lemma~\ref{l:hitting time subset local dimension} exhibits a relation between $ \uk $ and hitting time: \[ \{ y:\ovr(x,y)<1/\kappa\}\setminus\mathcal O^+(x)\subset\uk\subset\{ y:\ovr(x,y)\le1/\kappa\}.\] With this relation in mind, it is natural to investigate the size of the level sets \[\{y:R(x,y)=1/\kappa\},\quad \kappa\in(0,\infty).\] Lemmas~\ref{l:hitting time subset local dimension} and~\ref{l:upper bound of hdimuk} together with the inclusions \[\{y:R(x,y)=1/\kappa\}\subset\{y:\ovr(x,y)\le 1/\kappa\}\quad\text{and}\quad \{y:R(x,y)=1/\kappa\}\subset\{y:\ovr(x,y)\ge 1/\kappa\}\] give the upper bound for Hausdorff dimension: \[\hdim\{y:R(x,y)=1/\kappa\}\le D_{\uph}(1/\kappa),\quad\text{for }\uph\text{-}a.e.\, x. \] The lower bound, coinciding with the upper bound, can be proved by the same argument as Lemma~\ref{l:lower bound for hdimfk}. Thus for $ \uph $-a.e.\,$ x $, \[\hdim\{y:R(x,y)=1/\kappa\}=D_{\uph}(1/\kappa),\quad\text{if }1/\kappa\notin \{\alpha_-,\alpha_+\}. \] \end{rem} {\bf Acknowledgements} The authors would like to thank Tomas Persson for pointing out that the Lebesgue measure statement in Example 1 can be concluded from the reference~\cite{HKKP21} by a Fubini based argument. \begin{thebibliography}{10} \bibitem{AlBe98} P. Alessandri and V. Berth\'e. Three distance theorems and combinatorics on words. {\em Enseignement Math.} 44 (1998), 103--132. \bibitem{Bal00} V. Baladi. \newblock {\em Positive Transfer Operators and Decay of Correlations}. \newblock (Advanced Series in Nonlinear Dynamics, 16). \newblock World Scientific, River Edge, NJ, 2000. \bibitem{BaFa05} J. Barral and A. H. Fan. Covering numbers of different points in Dvoretzky covering. {\em Bull. Sci. Math. Fr.} 129 (2005), 275--317. \bibitem{BaPeS97} L. Barreira, Y. Pesin and J. Schmeling. On a general concept of multifractality: multifractal spectra for dimensions, entropies, and Lyapunov exponents. Multifractal rigidity. {\em Chaos} 7 (1997), 27--38. \bibitem{BiPe17} C. J. Bishop and Y. Peres. {\em Fractals in probability and analysis.} Cambridge Studies in Advanced Mathematics, 162. Cambridge University Press, Cambridge, 2017. \bibitem{Bow75} R. Bowen. {\em Equilibrium States and the Ergodic Theory of Anosov Diffeomorphisms.} Springer, Berlin, 1975. \bibitem{BrMiP92} G. Brown, G. Michon and J. Peyri\`ere. On the multifractal analysis of measures. {\em J. Stat. Phys.} 66 (1992), 775--790. \bibitem{Bug03} Y. Bugeaud. \newblock A note on inhomogeneous Diophantine approximation. \newblock {\em Glasgow Math. J.} 45 (2003), 105--110. \bibitem{BuLi16} Y. Bugeaud and L. Liao. Uniform Diophantine approximation related to $ b $-ary and $ \beta $-expansions. {\em Ergod. Th. $ \& $ Dynam. Sys.} 36 (2016), 1--22. \bibitem{CoLeP87} P. Collet, J. Lebowitz and A. Porzio. The dimension spectrum of some dynamical systems. {\em J. Stat. Phys.} 47 (1987), 609--644. \bibitem{Dvo56} A. Dvoretzky. On covering the circle by randomly placed arcs. {\em Pro. Nat. Acad. Sci. USA} 42 (1956), 199--203. \bibitem{Fan02} A. H. Fan. How many intervals cover a point in Dvoretzky covering? {\em Israel J. Math.} 131 (2002), 157--184. \bibitem{FaScTr13} A. H. Fan, J. Schmeling and S. Troubetzkoy. \newblock A multifractal mass transference principle for Gibbs measures with applications to dynamical Diophantine approximation. \newblock {\em Proc. London Math. Soc.} 107 (2013), 1173--1219. \bibitem{Gal07} S. Galatolo. Dimension and hitting time in rapidly mixing systems. {\em Math. Res. Lett.} 14 (2007), 797--805. \bibitem{HKKP21} M. Holland, M. Kirsebom, P. Kunde and T. Persson. Dichotomy results for eventually always hitting time statistics and almost sure growth of extremes. arXiv:2109.06314, Sep 2021. \bibitem{Jen06} O. Jenkinson. Ergodic optimization. {\em Discrete Contin. Dyn. Syst.} 15 (2006), 197--224. \bibitem{JoSt08} J. Jonasson and J. Steif. Dynamical models for circle covering: Brownian motion and Poisson updating. {\em Ann. Probab.} 36 (2008), 739--764. \bibitem{KimLi19} D. H. Kim and L. Liao. \newblock Dirichlet uniformly well-approximated numbers. \newblock {\em Int. Math. Res. Not.} 24 (2019), 7691--7732. \bibitem{KoLiPe21} H. Koivusalo, L. Liao and T. Persson. \newblock Uniform random covering problems. \newblock {\em Int. Math. Res. Not.} pages 1--27, October 2021. \newblock Published online. DOI: 10.1093/imrn/rnab272. \bibitem{LiaoSe13} L. Liao and S. Seuret. \newblock Diophantine approximation by orbits of expanding Markov maps. \newblock {\em Ergod. Th. $ \& $ Dynam. Sys.} 33 (2013), 585--608. \bibitem{LiSaV98} C. Liverani, B. Saussol and S. Vaienti. \newblock Conformal measure and decay of correlation for covering weighted systems. \newblock {\em Ergod. Th. $ \& $ Dynam. Sys.} 18 (1998), 1399--1420. \bibitem{PaPo90} \newblock W. Parry and M. Pollicott. Zeta functions and the periodic orbit structure of hyperbolic dynamics. {\em Ast\'erisque} 1990, 187--188. \bibitem{PerRams17} T. Persson and M. Rams. \newblock On shrinking targets for piecewise expanding interval maps. \newblock {\em Ergod. Th. $ \& $ Dynam. Sys.} 37 (2017), 646--663. \bibitem{PeWe97} Y. Pesin and H. Weiss. The multifractal analysis of Gibbs measures: motivation, mathematical foundation, and examples. {\em Chaos} 7 (1997), 89--106. \bibitem{Ran89} D. A. Rand. The singularity spectrum $ f(\alpha) $ for cookie-cutters. {\em Ergod. Th. $ \& $ Dynam. Sys.} 9 (1989), 527--541. \bibitem{Rue04} D. Ruelle. {\em Thermodynamic formalism. The Mathematical Structures of Equilibrium Statistical Mechanics}. Cambridge University Press, Cambridge, 2nd edition, 2004. \bibitem{ScTr03} J. Schmeling and S. Troubetzkoy. \newblock Inhomogeneous Diophantine approximation and angular recurrence properties of the billiard flow in certain polygons. {\em Mat. Sb.} 194(2) (2003) 129--144; translation in {\em Mat. Sb.} 194(2) (2003), 295--309. \bibitem{Seu18} S. Seuret. Inhomogeneous random coverings of topological Markov shifts. {\em Math. Proc. Camb. Phil. Soc.} 165 (2018), 341--357. \bibitem{She72} L. Shepp. Covering the circle with random arcs. {\em Israel J. Math.} 11 (1972), 328--345. \bibitem{Sim94} D. Simpelaere. Dimension spectrum of Axiom A diffeomorphisms. II. Gibbs measures. {\em J. Stat. Phys.} 76 (1994), 1359--1375. \bibitem{Tan15} J. M. Tang. Random coverings of the circle with i.i.d. centers. {\em Sci. China Math.} 55 (2015), 1257--1268. \bibitem{Wal78} P. Walters. Invariant measures and equilibrium states for some mappings which expand distances. {\em Trans. Amer. Math. Soc.} 236 (1978), 121--153. \bibitem{ZhWu20} L. Zheng and M. Wu. Uniform recurrence properties for beta-transformation. {\em Nonlinearity} 33 (2020), 4590--4612. \end{thebibliography} \end{document}
2205.14827v2
http://arxiv.org/abs/2205.14827v2
Gaeta resolutions and strange duality over rational surfaces
\documentclass[12pt,reqno]{amsart} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{amscd} \usepackage{amssymb} \usepackage{amsthm} \usepackage{mathdots} \usepackage{mathtools} \usepackage{multicol} \usepackage{inputenc} \usepackage{caption} \usepackage{subcaption} \usepackage{float} \numberwithin{equation}{section} \usepackage{MnSymbol} \usepackage[linktocpage=true]{hyperref} \usepackage{mathrsfs} \usepackage{tikz,tikz-cd, color} \usepackage{adjustbox} \usepackage{appendix} \usetikzlibrary{matrix} \usetikzlibrary{decorations.pathmorphing} \tikzset{ symbol/.style={ draw=none, every to/.append style={ edge node={node [sloped, allow upside down, auto=false]{$#1$}}} } } \usepackage{stmaryrd} \usepackage{enumerate} \usepackage{xcolor} \usepackage[all]{xy} \topmargin-0.1in \textwidth6.4in \textheight8.6in \oddsidemargin=0.2in \evensidemargin=0.2in \newcommand{\info}[1]{\vspace{5 mm}\par \noindent \marginpar{\textsc{Info}} \framebox{\begin{minipage}[c]{0.95 \textwidth} \tt #1 \end{minipage}}\vspace{5 mm}\par} \newcommand{\todo}[1]{\vspace{5 mm}\par \noindent \marginpar{\textsc{ToDo}} \framebox{\begin{minipage}[c]{0.95 \textwidth} \tt #1 \end{minipage}}\vspace{5 mm}\par} \newtheorem{thm}{Theorem}[section] \newtheorem{lem}[thm]{Lemma} \newtheorem{prop}[thm]{Proposition} \newtheorem{cor}[thm]{Corollary} \newtheorem{conj}[thm]{Conjecture} \theoremstyle{definition} \newtheorem{defn}[thm]{Definition} \newtheorem{exmp}[thm]{Example} \theoremstyle{remark} \newtheorem{rem}[thm]{Remark} \newcommand{\oo}{\mathcal{O}} \newcommand{\bp}{\mathbb{P}} \newcommand{\bc}{\mathbb{C}} \newcommand{\Hom}{{\rm Hom}} \newcommand{\lHom}{\mathcal{H}om} \newcommand{\Ext}{{\rm Ext}} \newcommand{\lExt}{\mathcal{E}xt} \newcommand{\ext}{{\rm ext}} \newcommand{\Quot}{{\rm Quot}} \newcommand{\svn}{S_{V,n}} \newcommand{\rk}{{\rm rk\,}} \newcommand{\ch}{{\rm ch}} \newcommand{\im}{{\rm im\,}} \newcommand{\id}{{\rm id}} \newcommand{\vir}{{\rm vir}} \newcommand{\ozk}{{\mathcal{O}_{\mathscr{Z}}}} \newcommand{\tr}{{\rm tr}} \newcommand{\ad}{{\rm ad\,}} \newcommand{\codim}{{\rm codim}} \newcommand{\R}{{\rm R}} \newcommand{\ev}{{\rm ev}} \newcommand{\E}{\mathcal{E}} \newcommand{\dE}{{\vphantom{\E}}^{\vee}\!\E} \newcommand{\basefield}{{k}} \newcommand{\hilbl}{^{[\ell]}} \newcommand{\syml}{^{(\ell)}} \newcommand{\mumax}{{\mu_\max}} \newcommand{\mumin}{{\mu_\min}} \newcommand{\rcone}{{\hat{R}}} \DeclareMathOperator{\gr}{gr} \DeclareMathOperator{\SD}{SD} \DeclareMathOperator{\pt}{pt} \DeclareMathOperator{\Aut}{Aut} \DeclareMathOperator{\coker}{coker} \DeclareMathOperator{\db}{D^b} \DeclareMathOperator{\td}{td} \DeclareMathOperator{\TOR}{\mathcal{T}or} \DeclareMathOperator{\K}{K} \DeclareMathOperator{\Pic}{Pic} \DeclareMathOperator{\GL}{GL} \DeclareMathOperator{\Char}{Char} \DeclareMathOperator{\sstable}{ss} \DeclareMathOperator{\stable}{s} \newcommand{\rightarrowdbl}{\rightarrow\mathrel{\mkern-14mu}\rightarrow} \newcommand{\xrightarrowdbl}[2][]{ \xrightarrow[#1]{#2}\mathrel{\mkern-14mu}\rightarrow } \author[]{Thomas Goller} \address{The College of New Jersey, NJ, USA} \email{[email protected]} \author[]{Yinbang Lin} \address{Tongji University, Shanghai, China} \email{yinbang\textunderscore [email protected]} \title[Gaeta resolutions and strange duality]{Gaeta resolutions and strange duality\\ over rational surfaces} \subjclass[2020]{Primary: 14D20, 14F06; Secondary: 14F08, 14J26} \keywords{Exceptional sequence; Gaeta resolution; Moduli of sheaves; Strange duality; Quot scheme} \begin{document} \begin{abstract} Over the projective plane and at most two-step blowups of Hirzebruch surfaces, where there are strong full exceptional sequences of line bundles, we obtain foundational results about Gaeta resolutions of coherent sheaves by these line bundles. Under appropriate conditions, we show the locus of semistable sheaves not admitting Gaeta resolutions has codimension at least 2. We then study Le Potier's strange duality conjecture. Over these surfaces, for two orthogonal numerical classes where one has rank one and the other has sufficiently positive first Chern class, we show that the strange morphism is injective. The main step in the proof is to use Gaeta resolutions to show that certain relevant Quot schemes are finite and reduced, allowing them to be enumerated using the authors' previous paper. \end{abstract} \maketitle \section{Introduction} In the moduli theory of sheaves over complex algebraic surfaces, there is a famous conjecture by Le Potier \cite{LP05}, called the {\em strange duality conjecture}, which relates the global sections of two determinant line bundles on certain pairs of moduli spaces of sheaves. Known results over rational surfaces are mostly in the cases where one of the moduli spaces parametrizes pure dimension 1 sheaves, see e.g. \cite{Dan02,Abe10,Yua21}. On other surfaces, the conjecture requires different formulations, see e.g. \cite{BolMarOpr17}. In an attempt to provide a unified treatment of the conjecture over rational surfaces, the first author, Bertram, and Johnson \cite{BerGolJoh16} proposed to use Grothendieck's {\em Quot schemes} \cite{Gro60}, following Marian and Oprea's ideas \cite{MarOpr07,MarOpr07b} over curves. A key tool in the study of Quot schemes over $\mathbb{P}^2$ in \cite{BerGolJoh16} is {\em Gaeta resolutions} of coherent sheaves in terms of the {\em strong full exceptional sequence} of line bundles $(\oo(-2),\oo(-1),\oo)$. This leads us to the study of exceptional sequences and Gaeta resolutions over rational surfaces. A strong full exceptional sequence, if it exists, completely captures the derived category in an explicit way, by theorems of Baer \cite{Bae88} and Bondal \cite{Bon89}. While coherent sheaves can always be resolved by locally free sheaves by the Hilbert Syzygy Theorem, we can bring the resolution under better control if the locally free sheaves are taken from an exceptional sequence. Resolutions built from such exceptional sequences, called Gaeta resolutions, have been applied toward a variety of problems in the study of sheaves on rational surfaces \cite{Dre86,LeP94,CosHui18weakBN,CosHui20,coskun_existence_2019}. Though the existence of strong full exceptional sequences of line bundles in general is an open question, the answer is affirmative over a rational surface that can be obtained from a Hirzebruch surface $\mathbb{F}_e$ by blowing up at most two sets of points \cite{hille-perling-exceptional11}, which we call a \emph{two-step blowup of $\mathbb{F}_e$}. Over $\mathbb{P}^2$ or a two-step blowup of $\mathbb{F}_e$, we choose a particular strong full exceptional sequence, determine when a sheaf admits Gaeta resolutions, and study general properties of such sheaves. We then apply Gaeta resolutions to the study of strange duality, proving the injectivity of the strange morphism in some cases. One of the key points in the proof is to show that relevant Quot schemes are finite and reduced, which we accomplish using Gaeta resolutions. A parallel statement over $\mathbb{P}^2$ was proved in \cite{BerGolJoh16}. Another crucial point is to enumerate the length of the finite Quot scheme, which was settled in our previous paper \cite{GolLin22} via the study of the moduli space of limit stable pairs \cite{Lin18}. \medskip We now set up the study. Let $S$ be a smooth projective algebraic surface over an algebraically closed field $k$ of characteristic $0$, with a strong full exceptional sequence of line bundles \[(\E_1, \E_2,\dots, \E_n).\] Given a coherent sheaf $F$ on $S$, we would like to find a resolution of $F$ of the form\footnote{To avoid clumsy notation, we drop the direct sum symbol from the exponents. If we want to denote tensor products, we will use $\otimes$.} \begin{equation} 0 \to \E_1^{a_1} \oplus \cdots \oplus \E_d^{a_d} \to \E_{d+1}^{a_{d+1}} \oplus \cdots \oplus \E_{n}^{a_{n}} \to F \to 0, \end{equation} which is called a {Gaeta resolution}. If a Gaeta resolution exists, the exponents $a_1,\dots,a_n$ are uniquely determined by the numerical class of $F$. One of our main technical results is a criterion to determine when a Gaeta resolution exists. We state here the criterion for a two-step blowup of $\mathbb{F}_e$, where we know there are strong full exceptional sequences of line bundles, see \S~\ref{ss:ex-sfes}. Let $S_0$ denote the set of blown-up points in the first step, with corresponding exceptional divisors $E_i$ for $i \in S_0$. The second set of blown-up points, which lie on these exceptional divisors, is denoted $S_1$, and the corresponding exceptional divisors are $E_j$ for $j \in S_1$. Using the exceptional sequence \S~\ref{ss:ex-sfes}(c), we get the following result. \begin{prop}[Proposition~\ref{prop:special-GTR-criterion}(b)]\label{prop:crit-blowup} On a two-step blowup of $\mathbb{F}_e$, a torsion-free sheaf $F$ has a Gaeta resolution if and only if \begin{enumerate}[(i)] \item $H^p(F)$ vanishes for $p \ne 0$; \item $H^p(F(D))$ vanishes for $p \ne 1$ and $D$ the divisors $-A+\sum_{i \in S_0} E_i$, $-B+\sum_{i \in S_0} E_i$, and $-A-B+\sum_{i \in S_0} E_i + \sum_{j \in S_0} E_j$; and \item $H^1(F(E_i))=0$ and $H^1(F(E_j)) = 0$ for all $i \in S_0$ and $j \in S_1$. \end{enumerate} \end{prop} Section~\ref{sect:gtr} contains a similar statement for $\bp^2$, which is known. We will apply Gaeta resolutions towards moduli problems, using the connections to prioritary sheaves. We impose a mild technical condition on $X$ a two-step blowup of $\mathbb{F}_e$, which we call {\em admissibility} (Definition~\ref{defn:adm-blowup}). We use $A$ to denote both the class of the fibers of the ruling $\mathbb{F}_e\to \mathbb{P}^1$ and the pullback of this class to $X$. We denote a numerical class $f$ in the Grothendieck group $\K(X)$ by \[f=(r,L,\chi),\] where $r$ is the rank, $L$ is the first Chern class (equivalent to the determinant line bundle), and $\chi$ is the Euler characteristic. We have \begin{prop}\label{prop:unirat-large-L} Let $X$ be an admissible blowup of $\mathbb{F}_e$ and $H$ be a polarization such that $H\cdot(K_X+A)<0$. For a numerical class $f\in \K(X)$ with fixed rank $r>0$ and fixed $\chi\geqslant 0$, suppose the first Chern class $L$ is sufficiently positive. Then a general $H$-semistable sheaf of class $f$ admits Gaeta resolutions. \end{prop} We mostly use Gieseker stability and use either ``$H$-semistable'' or ``semistable''. If we want to use slope stability, we will specify. For the precise meaning of being sufficiently positive in Proposition \ref{prop:unirat-large-L}, see the conditions in Proposition~\ref{prop:chern-has-gr}(a) as well as Proposition \ref{prop:cokernel-properties}(b.ii). There is another statement, Proposition~\ref{prop:unirat-large-c2}, in which the rank and first Chern class are fixed, which asserts that if the discriminant is sufficiently large then general semistable sheaves admit Gaeta resolution up to a twist by a line bundle. In each case, we can immediately deduce that the moduli space $M(f)$ is unirational, which is known \cite{Bal87}. By imposing stronger conditions on the numerical class $f$ and the polarization $H$, we prove a refinement of Proposition~\ref{prop:unirat-large-L}: \begin{thm}\label{thm:moduli-nongr-codim2} Let $X$ be an admissible blowup. Assume the class $f$ is of rank $r\geqslant 2$, admits Gaeta resolutions in which the exponents $a_i$ are strictly positive and satisfy (\ref{eq:conditions-exponents}, \ref{eq:condition-alpha4}), that the polarization $H$ is general and satifies (\ref{eq:H-pos-lin-combo}, \ref{eq:conditions-on-H}), and that the discriminant of $f$ is sufficiently large in the sense of (\ref{eq:discriminant-bound}). Then the closed subset $Z\subset M(f)$ of S-equivalence classes of semistable sheaves whose Jordan-H\"older gradings do not admit Gaeta resolutions has codimension $\geqslant 2$ in $M(f)$. \end{thm} If $X=\mathbb{F}_e$, the conditions (\ref{eq:conditions-exponents}, \ref{eq:H-pos-lin-combo}, \ref{eq:conditions-on-H}) are vacuous. Using Proposition~\ref{prop:unirat-large-L} and Theorem~\ref{thm:moduli-nongr-codim2}, we deduce \begin{cor}\label{cor:gen-stable-prop}Assuming the conditions from Proposition \ref{prop:unirat-large-L} and appropriate conditions from Proposition \ref{prop:cokernel-properties}(b), a general sheaf in $M(f)$ is torsion-free, is locally-free if $r \geqslant 2$, satisfies the cohomological vanishing conditions in Proposition \ref{prop:special-GTR-criterion} (b.i-iii), and is globally generated if $\chi \geqslant r+2$. Assuming the stronger conditions from Theorem \ref{thm:moduli-nongr-codim2}, the same properties hold away from a locus of codimension $\geqslant 2$ in $M(f)$ (with locally free replaced by torsion-free). \end{cor} For the proof of Theorem~\ref{thm:moduli-nongr-codim2}, we will need the following statement which is of general interest. \begin{thm}\label{thm:yoshioka} Suppose $S$ is a rational surface other than $\mathbb{P}^2$ and $S\to \mathbb{P}^1$ is a morphism where a general fiber $D$ is isomorphic to $\mathbb{P}^1$. Let $H$ be a general ample divisor such that $H\cdot (K_S+2D)<0$. Assume there is a slope stable vector bundle with rank $r\geqslant 2$, first Chern class $c_1$, and second Chern class $c_2-1$ over $S$. Then \[\Pic M(r,c_1,c_2)\cong \mathbb{Z}\oplus \Pic S.\] \end{thm} We will apply Gaeta resolutions towards the study of the strange duality conjecture. Let $S$ be $\mathbb{P}^2$ or $X$ an admissible blowup of $\mathbb{F}_e$. In the Grothendieck group $\K(S)$, let \[\sigma = (r,L,r\ell) \mbox{ for } r \geqslant 2 \qquad\mbox{and}\qquad \rho = (1,0,1-\ell) \mbox{ for } \ell \geqslant 1 \] be two numerical classes. Notice that they are orthogonal: $\chi(\sigma\cdot \rho)=0$. Let $H$ denote the hyperplane class on $\bp^2$ or a polarization satisfying (\ref{eq:H-pos-lin-combo}, \ref{eq:conditions-on-H}) on $X$. Let $M(\sigma)$ and $M(\rho)$ denote the moduli spaces of $H$-semistable sheaves, where $M(\rho)$ is isomorphic to the Hilbert scheme $S^{[\ell]}$ of points. Let $\mathscr{Z}\subset S^{[\ell]}\times S$ be the universal subscheme and $I_{\mathscr{Z}}$ its ideal sheaf. For a coherent sheaf $W$ of class $\sigma$, consider the determinant line bundle \begin{equation*} \Theta_\sigma:=\det\left(p_{!}\left(I_{\mathscr{Z}}\stackrel{L}{\otimes} {q}^*W\right)\right)^{*} \end{equation*} on $S^{[\ell]}$, where ${p}$ and ${q}$ are the projections from $S^{[\ell]}\times S$ to the first and second factors, respectively. There is also a similar line bundle $\Theta_\rho$ on $M(\sigma)$. On $M(\sigma)\times M(\rho)$, the line bundle $\Theta_{\sigma,\rho}\cong \Theta_\rho\boxtimes \Theta_\sigma$ has a canonical section, which induces the {\em strange morphism} \[\operatorname{SD}_{\sigma,\rho}\colon H^0(S^{[\ell]}, \Theta_\sigma)^*\to H^0(M(\sigma),\Theta_\rho).\] The strange duality conjecture says that $\operatorname{SD}_{\sigma,\rho}$ is an isomorphism. For a more general setup, see \S~\ref{subsect:strange-mor}. In this context, we prove the following result in support of the strange duality conjecture. In the statement, for $V$ a vector bundle on $S$, we write \begin{equation}\label{eq:taut-bundle} V^{[\ell]}={p}_{*}(\oo_{\mathscr{Z}}\otimes {q}^* V). \end{equation} \begin{thm}\label{thm:sd-injective} Let $S$ be $\bp^2$ or $X$ an admissible blowup of $\mathbb{F}_e$, and $\sigma$, $\rho$, and $H$ as above. If $L$ is sufficiently positive, then: \begin{enumerate}[(a)] \item The rank of the strange morphism is bounded below by $\int_{S^{[\ell]}} c_{2\ell}({V}^{[\ell]})$, for $V$ a vector bundle with numerical class $\sigma + \rho$; \item The strange morphism $\mathrm{SD}_{\sigma,\rho}$ is injective. \end{enumerate} \end{thm} We sketch the proof. Let $V$ be a vector bundle of class $\sigma + \rho$ that admits a general Gaeta resolution and consider quotients of $V^*$ of class $\rho$. Then the Quot scheme has expected dimension $0$. If it is finite and reduced, a simple argument shows that its length provides a lower bound for $\operatorname{SD}_{\sigma,\rho}$. According to \cite{GolLin22}, in this case its length is $\int_{S^{[\ell]}}c_{2\ell}(V^{[\ell]})$. On the other hand, Theorem~\ref{numbers-match} relates this top Chern class to $\chi(S^{[\ell]}, \Theta_\sigma)$, and the determinant line bundle $\Theta_{\sigma}$ has no higher cohomology when $L$ is sufficiently positive, which finishes the proof. Thus, the crucial point is to establish that the Quot scheme is finite and reduced, which we prove by considering the relative Quot scheme over the space of Gaeta resolutions and calculating the dimension of the relative Quot scheme. The following theorem summarizes the key results related to the Quot scheme. \begin{thm}\label{thm:finite-quot-scheme} Let $S$, $\sigma$, $\rho$, and $H$ be as in Theorem~\ref{thm:sd-injective}, and $V$ be a vector bundle of class $\sigma+\rho$ that admits a general Gaeta resolution. If $L$ is sufficiently positive, then: \begin{enumerate}[(a)] \item The Quot scheme $\Quot(V^*,\rho)$ parametrizing quotient sheaves of $V^*$ with numerical class $\rho$ is finite and reduced; \item For every point $[V^* \twoheadrightarrow F]$ of $\Quot(V^*,\rho)$, $F$ is an ideal sheaf $I_Z$ for general $Z \in S^{[\ell]}$ and the kernel is semistable; \item The length of $ \Quot(V^*,\rho)$ is $\int_{S^{[\ell]}} c_{2\ell}({V}^{[\ell]})$. \end{enumerate} \end{thm} In \cite{BerGolJoh16}, parts (a) and (b) were proved over $\mathbb{P}^2$ and calculations were made that informed Johnson's expectation that the counting formula (c) should be true for del Pezzo surfaces \cite{Joh18}. The formula (c) was proved in \cite{GolLin22} by the authors of the current paper, for a general smooth regular projective surface, assuming that the Quot scheme is finite and reduced. The positivity conditions on $L$ in these theorems, which are stronger than for Proposition \ref{prop:unirat-large-L}, are summarized in the appendix. Theorem \ref{thm:finite-quot-scheme} requires (\ref{eq:first-three-conditions}, \ref{eq:sd-discriminant-bound}), and Theorem \ref{thm:sd-injective} requires (\ref{eq:fifth-condition}) as well. \medskip We organize the paper as follows. In \S~\ref{sect:hirzebruch}, we review basic facts about divisors and line bundles on Hirzebruch surfaces and their blowups. In \S~\ref{sect:exc-sequence}, we review exceptional sequences in the derived category. In \S~\ref{sect:gtr}, we obtain criteria for the existence of Gaeta resolutions, including Proposition~\ref{prop:crit-blowup}, and classify the numerical classes of sheaves admitting Gaeta resolutions. In \S~\ref{sect:prop-gr}, we prove some general properties of such sheaves and relate them to prioritary sheaves. In \S~\ref{sec:stability}, we discuss connections to semistable sheaves and prove Theorem~\ref{thm:moduli-nongr-codim2}. In \S~\ref{sect:sd}, we set up the strange morphism and prove Theorem~\ref{thm:sd-injective}. In \S~\ref{sect:finite-quot}, we prove Theorem~\ref{thm:finite-quot-scheme}. Finally, the appendix contains a summary of the positivity conditions on $L$ required in the proofs of Theorems \ref{thm:sd-injective} and \ref{thm:finite-quot-scheme}. {\em Acknowledgment. }YL would like to thank Alina Marian and Dragos Oprea for helpful correspondences. TG would like to thank Lothar G\"{o}ttsche for providing updates on his work with Anton Mellit. YL is supported by grants from the Fundamental Research Funds for the Central Universities and Applied Basic Research Programs of Science and Technology Commission Foundation of Shanghai Municipality. \section{Hirzebruch surfaces and blowups}\label{sect:hirzebruch} We review some basic results on divisors and cohomology of line bundles on Hirzebruch surfaces and their blow-ups. Along the way, we introduce two technical assumptions (\ref{cond:avoid-b}) and (\ref{cond:avoid-fiber-dir}). The first is not a restriction, while the second is a mild condition. \subsection{Divisors on blowups of Hirzebruch surfaces} Let $\mathbb{F}_e$ denote the Hirzebruch surface $\bp(\oo_{\bp^1} \oplus \oo_{\bp^1}(e))$ with $e\geqslant 0$, which is a rational surface that is ruled over $\bp^1$. Note that $\mathbb{F}_0 \cong \bp^1 \times \bp^1$. Letting $A$ denote the class of a fiber and $B$ the 0-section with self-intersection $-e$, $A$ and $B$ generate the effective cone of $\mathbb{F}_e$, and $A^2=0$, $A \cdot B = 1$, $B^2 = -e$. The canonical divisor is $K_{\mathbb{F}_e}=-(e+2)A-2B$. The divisor classes $eA+B$ and $A$ generate the nef cone. To simplify notation, let \[C=eA+B.\] The linear system $|A|$ induces the morphism to $\bp^1$ giving the ruling, while $|C|$ induces a morphism $\mathbb{F}_e \to \bp^{e+1}$; if $e=0$, this is the other ruling of $\bp^1 \times \bp^1$, while if $e > 0$, this contracts $B$ to a point and maps the fibers of the ruling to distinct lines through that point in $\bp^{e+1}$. In particular, suppose $x,y$ are distinct points on $\mathbb{F}_e$, where we allow $y$ to be infinitely near to $x$. Then $|A|$ separates $x,y$ unless $x,y$ are distinct points on the same fiber or $y$ corresponds to the tangent direction along the fiber at $x$. Similarly, $|C|$ separates $x,y$ unless $x,y$ are distinct points on $B$ or $x \in B$ and $y$ is the tangent direction along $B$ at $x$. \begin{rem}\label{rem:blowups} The blowup of $\bp^2$ at any point is isomorphic to $\mathbb{F}_1$. The blowup of $\bp^1 \times \bp^1$ at any point is isomorphic to the blowup of $\mathbb{F}_1$ at a point not on $B$. For $e>0$, the blowup of $\mathbb{F}_e$ at a point on $B$ is isomorphic to the blowup of $\mathbb{F}_{e+1}$ at a point not on $B$. Thus, when considering the surfaces that arise from blowing up $\bp^2$ or Hirzebruch surfaces, it suffices to consider blowups of $\mathbb{F}_e$ for $e>0$ where the blown-up points are not on $B$ \cite[p.519]{GriHar78}. So, quite often, in the case $e>0$, we impose the condition that \begin{equation}\label{cond:avoid-b} \text{the blowup avoids $B$} \end{equation} in the sense that none of the blown-up points $p_1,\dots,p_s$ is on $B$. \end{rem} Let $X$ be obtained from a sequence of blowups \[ X=X_t \to X_{t-1} \to \cdots \to X_1 \to X_0 = \mathbb{F}_e, \] where $b_i \colon X_i \to X_{i-1}$ is the $i$th blowup at a point $p_i\in X_{i-1}$. Assume that the indices $\{1,\dots,t\}$ can be partitioned into two sets \[S_0=\{1,\dots,s\} \quad \mbox{and} \quad S_1 = \{s+1,\dots,t\}\] such that the $b_i$ within each of the sets $\{b_1,\dots,b_s\}$ and $\{b_{s+1},\dots,b_t\}$ commute. In other words, $X$ can be obtained from $\mathbb{F}_e$ by up to two blowups, each possibly at multiple points. By Remark \ref{rem:blowups}, it suffices to consider the case $e>0$ and that the blowup avoids $B$. We define a partial ordering on the set $\{ p_1,\dots,p_t \}$ by $p_j \succ p_i$ if $p_j$ is on the exceptional divisor of $b_i$, and say that the height of $p_i$ is 0 if $p_i$ is minimal with respect to $\succ$, while otherwise $p_i$ has height 1. We can consider points of height 0 as lying on $\mathbb{F}_e$, while if $p_j \succ p_i$ then $p_j$ is infinitely near to $p_i$ and can be viewed as a tangent direction at $p_i$ on $\mathbb{F}_e$. For simplicity, we choose the partition $S_0$ and $S_1$ such that \begin{align*} i \in S_0 &\quad \mbox{ iff } p_i \mbox{ has height 0, while }\\ j \in S_1 &\quad \mbox{ iff } p_j \mbox{ has height }1. \end{align*} Thus, we think of $X$ as being obtained from $\mathbb{F}_e$ by first blowing up a collection of points $\{p_1,\dots,p_s \}$ on $\mathbb{F}_e$ and then blowing up a collection of points $\{ p_{s+1},\dots,p_t \}$ on the exceptional divisors of the first blowup. Let $E_i$ denote the total transform in $X$ of the exceptional divisor of $b_i$. We abuse notation by writing $A,B$ for the pullbacks of the divisors $A,B$ on $\mathbb{F}_e$. The Picard group of $X$ is generated by $A,B,E_1,\dots,E_t$, with the following intersections: \[ A^2=0, \quad A \cdot B=1, \quad B^2=-e, \quad A \cdot E_i=0, \quad B \cdot E_i=0, \quad E_i \cdot E_j=-\delta_{i,j}. \] Here, $\delta$ is the Kronecker delta function. The canonical divisor is \[ K_X = -(e+2)A-2B + \sum_{i=1}^t E_i. \] Let $\tilde{E}_i$ denote the strict transform of $E_i$. If $p_j$ has height 1, then $\tilde{E}_j = E_j$, while if $p_i$ has height 0, then $\tilde{E}_i = E_i - \sum_{j \colon p_j \succ p_i} E_j$. Note that $\tilde{E}_i^2 = -1 - \#\{ j \colon p_j \succ p_i \}$. The following classification of base loci of certain linear systems on $X$ will be useful. When describing the linear systems below, we write a divisor in parentheses, as in $(D)$, to indicate that it is a fixed part of the linear system. \begin{lem}\label{lem:LS_base_locus} Suppose $D$ on $X$ is the pullback of an effective divisor on $\mathbb{F}_e$. Then if $p_j \succ p_i$, \[ |D-E_j| = |D-E_i| + (E_i-E_j). \] \end{lem} \begin{proof} Tensoring the short exact sequence \[ 0 \to \oo(-E_i+E_j) \to \oo \to \oo_{(E_i-E_j)} \to 0 \] by $\oo(D-E_j)$ and taking cohomology, we get an exact sequence \[ 0 \to H^0(\oo(D-E_i)) \to H^0(\oo(D-E_j)) \to H^0(\oo(D-E_j)|_{(E_i-E_j)}). \] As $\tilde{E}_i \cdot (D-E_j) = -\tilde{E}_i \cdot E_j = -1$, we see that $H^0(\oo(D-E_j)|_{(E_i-E_j)}) = 0$ as $(E_i-E_j)$ is a connected (possibly reducible) curve and every section of this line bundle must be 0 on the component $\tilde{E}_i$. \end{proof} \begin{rem}\label{rem:basepoints} If $D$ is a divisor on $\mathbb{F}_e$ and $p_i$ is a point of height 0, then the curves in the linear series $|D-E_i|$ on $X$ are in bijection with curves in $|D|$ on $\mathbb{F}_e$ that contain $p_i$. Similarly, if $p_j \succ p_i$, then curves in $|D-E_i-E_j|$ on $X$ are in bijection with curves in $|D|$ on $\mathbb{F}_e$ that contain $p_i$ and have tangent direction $p_j$ at $p_i$. The curves in the linear system on $X$ are obtained as pullbacks of the corresponding curves on $\mathbb{F}_e$, with one copy of the appropriate exceptional divisors removed. \end{rem} The linear systems on $X$ in the following examples will play an important role. We assume that $e>0$ and that the blowup avoids $B$. \begin{exmp}\label{ex:LS_fiber} If $j \in S_1$, let $i \in S_0$ denote the index such that $p_j \succ p_i$. Then \[ |A-E_j|=(A-E_i) + (E_i-E_j). \] Moreover, if $p_j$ is the tangent direction along the fiber $A$ at $p_i$, then \[ |A-E_j|=(A-E_i-E_j) + (E_i) \] The curve $(A-E_i)$ can be obtained by considering $b_i$ to be the first blown-up point, taking the strict transform of the unique fiber $A$ containing $p_i$ under $b_i$, and then taking the pullback of that strict transform under the remaining blowups, which may be reducible if other $p_i$ lie on that fiber or are infinitely near to points on that fiber. In particular, if $p_j$ is the tangent direction along the fiber $A$ at $p_j$, then $p_j$ lies on $(A-E_i)$, and taking the strict transform with respect to $b_{j}$ yields the curve $(A-E_i-E_j)$. \end{exmp} \begin{exmp}\label{ex:LS_section} For $j \in S_1$, let $i \in S_0$ denote the index such that $p_j \succ p_i$. Then \[ |C-E_j| = |C-E_i| + (E_i-E_j), \] where $|C-E_i|$ is basepoint-free. This follows from the fact that $|C|$ separates points on $\mathbb{F}_e$ (including infinitely near points) as long as the points are not contained on $B$, and by assumption the blown-up points are not on $B$. \end{exmp} \begin{exmp}\label{ex:LS_ample} For $i \in S_0$ and $j \in S_1$, the base locus of $|C+A-E_i-E_j|$ can be described as follows: \begin{enumerate}[(a)] \item If $p_j \succ p_i$, then $|C+A-E_i-E_j|$ is basepoint-free unless $p_j$ is the tangent direction along the fiber $A$ containing $p_i$, in which case \[ |C+A-E_i-E_j| = |C| + (A-E_i-E_j). \] \item If $p_j \not \succ p_i$, let $i' \in S_0$ denote the index such that $p_j \succ p_{i'}$. Then \[ |C+A-E_i-E_j|=|C+A-E_i-E_{i'}| + (E_{i'}-E_j), \] and $|C+A-E_i-E_{i'}|$ is basepoint-free unless $p_i$ and $p_{i'}$ lie on the same fiber $A$, in which case \[ |C+A-E_i-E_j|=|C| + (A-E_i-E_{i'}) + (E_{i'}-E_j). \] \end{enumerate} We explain the two parts. For (b), the linear system contains the union of $(A-E_i)$ and a curve in $|C-E_{i'}|$. As the latter is basepoint-free, the only possible basepoints are on $(A-E_i)$. By the same argument with the roles of $i$ and $i'$ reversed, we see that the linear system is basepoint-free unless $p_i$ and $p_{i'}$ lie on the same fiber. For (a), we note that if the linear system has a basepoint $p$, which we may assume is a point of $\mathbb{F}_e$, then the linear system of curves in $|C+A|$ that contain $p_i$ and $p$ must have $p_j$ as an (infinitely near) basepoint, which, by a similar argument as the one for (b), implies that $p_i$ and $p$ lie on the same fiber and $p_j$ is the tangent direction along that fiber. \end{exmp} Some of the calculations in later sections are simplified if the linear systems $|C+A-E_i-E_j|$ for $p_j \succ p_i$ are basepoint-free. For this purpose, we will often assume that \begin{equation}\label{cond:avoid-fiber-dir}\mbox{the blowup avoids fiber directions}\end{equation} in the sense that $p_{s+1},\dots,p_t$ are distinct from the point where the strict transform of the fiber $A$ containing $p_i$ meets the exceptional divisor of $b_i$, for all $i \in S_0$. We summarize the assumptions on $X$ in the following definition: \begin{defn}\label{defn:adm-blowup} The rational surface $X$ is an {\em admissible blowup} of $\mathbb{F}_e$ if it is an at most two-step blowup and the following conditions hold: \begin{itemize} \item if $e=0$, then $S_0=S_1 = \emptyset$; \item if $e>0$, then the blowup avoids $B$ and avoids fiber directions. \end{itemize} \end{defn} We emphasize that $S_0$ or $S_1$ can be empty. In particular, the definition includes $\mathbb{F}_e$. Then, by the above discussion and particularly Example \ref{ex:LS_ample}, we have shown that if $X$ is admissible, then every divisor in the set \begin{equation} \label{eq:bpf-divisors} \mathcal{D} = \{A,C\} \cup \{ C-E_i \mid i \in S_0 \} \cup \{C+A-E_i-E_j \mid p_j \succ p_i \} \end{equation} is basepoint-free. This leads to the following result. \begin{prop}\label{prop:very-ample} Suppose $X$ is an admissible blowup of $\mathbb{F}_e$. Suppose $L$ is the line bundle associated to a positive integral linear combination of all divisors in the set \[ \mathcal{D} = \{A,C\} \cup \{ C-E_i \mid i \in S_0 \} \cup \{C+A-E_i-E_j \mid p_j \succ p_i \}. \] Then $L$ is very ample. \end{prop} \begin{proof} The linear system associated to $L$ contains unions of divisors in the linear systems associated to the divisors in $\mathcal{D}$, so since these divisors are all basepoint-free, it suffices to show that the divisors in $D$ collectively separate points and tangents on $X$ in the following sense: \begin{enumerate}[1)] \item For any distinct points $q_1,q_2 \in X$, there is a divisor $D$ linearly equivalent to a divisor in $\mathcal{D}$ such that $q_1 \in D$ and $q_2 \notin D$; \item For any $q \in X$, there are divisors $D_1,D_2$ that contain $q$, are each linearly equivalent to a divisor in $\mathcal{D}$, and whose images in $\mathfrak{m}_q/\mathfrak{m}_q^2$ are linearly independent. \end{enumerate} For 1), $|C|$ can be used to separate points on the complement of $B \cup \bigcup_{i \in S_0} E_i$ since it is very ample there, $|A|$ can separate two points on $B$, $|C-E_i|$ can separate two points on $\tilde{E}_i$, and $|C+A-E_i-E_j|$ separates any two points on $E_j$. Separating points on different exceptional curves or a point on the exceptional locus from a point on the complement is similarly easy. For 2), as $C$ is very ample on the complement of $B \cup \bigcup_{i \in S_0} E_i$, it suffices to consider the cases $q \in D$, where $D$ is $B$, $\tilde{E}_i$ for $i \in S_0$, or $E_j$ for $j \in S_1$. In each case, it suffices to choose $D_1$ transversal to $D$ at $q$ and $D_2$ which is a union of $D$ with another divisor that does not contain $q$. These divisors can be chosen to be general in the following linear subsystems: \begin{table}[H] \centering \begin{tabular}{c|c|c} & $D_1$ & $D_2$ \\ \hline $q \in B$ & $|A|$ & $(B) + |eA| \subset |C|$ \\ $q \in E_i \setminus \bigcup_{j \colon p_j \succ p_i} E_j$ & $|C-E_i|$ & $(E_i) + |C-E_i| \subset |C|$ \\ $q \in E_j$ & $|C+A-E_i-E_j|$ & $(E_j) + |C-E_i-E_j| \subset |C-E_i|$ \end{tabular} \end{table} \noindent This completes the proof. \end{proof} As very ample is equivalent to 1-very ample and $m$-very ampleness is additive under tensor products \cite{HTT05}, we immediately see that if $L$ is the line bundle associated to a positive integral linear combination of all divisors in $\mathcal{D}$ in which the weight of each divisor is $\geqslant m$, then $L$ is $m$-very ample. \subsection{Cohomology of line bundles}\label{subsect:coh-line-bdl} First, we summarize how to calculate the cohomology groups of line bundles on the Hirzebruch surface $\mathbb{F}_e$, following \cite{CosHui18weakBN}. By Hirzebruch-Riemann-Roch, \[\chi(\oo(aA+bB))=(a+1)(b+1)-\frac{1}{2}eb(b+1).\] Since the effective cone of $\mathbb{F}_e$ is generated by $A$ and $B$, \[H^0(\oo(aA+bB))\not=0 \quad \mbox{if and only if}\quad a,b\geqslant 0.\] Then Serre duality implies that \[H^2(\oo(aA+bB))\not=0\quad \mbox{if and only if}\quad a\leqslant -(e+2)\mbox{ and }b\leqslant -2.\] It suffices to assume that $b\geqslant -1$, as other cases can then be obtained via Serre duality. In this case, as $h^2$ vanishes and the Euler characteristic is known, it suffices to calculate $h^0$, which can be done as follows: \begin{enumerate}[(a)] \item $h^i(\oo(aA-B))=0$, for all $i$ and $a$. \item $h^0(\oo(aA))=a+1$ if $a\geqslant -1$ and $0$ otherwise. \item Let $b\geqslant 1$. If $a \geqslant be-1$, then \[h^0(\oo(aA+bB))=\chi(\oo(aA+bB)), \] while if $a \leqslant be-2$, then the equality \[h^0(\oo(aA+bB)=h^0(\oo(aA+(b-1)B))\] allows $h^0$ to be determined by induction on $b$. \end{enumerate} In particular, we deduce the following: \begin{lem}\label{lem:lb-van-higher-cohom} Let $L = \oo_{\mathbb{F}_e}(aA + bB)$. Then \begin{enumerate}[(a)] \item $H^2(L)=0$ if and only if $b \geqslant -1$ or $a \geqslant -1-e$; \item $H^1(L) = 0$ if $b=-1$, if $b=0$ and $a \geqslant -1$, or if $b \geqslant 1$ and $a \geqslant be-1$. \end{enumerate} \end{lem} In order to use these calculations on $\mathbb{F}_e$ to obtain information about the cohomology of line bundles on $X$ a two-step blowup of $\mathbb{F}_e$, we use the following general result. \begin{lem}\label{lem:line-bundle-pullback}Let $\pi\colon \tilde{Y}\to Y$ be a blowup of a smooth projective surface at distinct (possibly infinitely near) points. For a line bundle $L$ on $Y$, $H^i(\tilde{Y},\pi^*L)\cong H^i(Y, L)$. \end{lem} \begin{proof} According to \cite[V. Proposition 3.4]{Har77}, $\pi_*\oo_{\tilde{Y}}\cong \oo_Y$ and $R^i\pi_*\oo_{\tilde{Y}}=0$ for $i>0$. Then $\pi_*\pi^*L\cong L$ and $R^i\pi_*(\pi^*L)=0$ for $i>0$ by the projection formula. The spectral sequence $H^i(Y,R^j\pi_*(\pi^*L))\Rightarrow H^{i+j}(\tilde{Y},\pi^*L)$ gives the result. \end{proof} Then, letting $X$ denote a two-step blowup of $\mathbb{F}_e$, we have: \begin{lem}\label{lem:reduce-higher-cohom-blowup} Let $L$ be a line bundle on $X$ such that $L|_{E_i} \cong \oo_{E_i}$ for some $1 \leqslant i \leqslant t$. Then \begin{enumerate}[(a)] \item $H^p(L(E_i)) \cong H^p(L)$ for all $p$; \item $H^2(L(-E_i)) \cong H^2(L)$; \item If the base locus of $|L|$ does not contain $E_i$, then $H^1(L(-E_i)) \cong H^1(L)$. \end{enumerate} \end{lem} \begin{proof} For (a), consider the short exact sequence \[ 0 \to L \to L(E_i) \to L(E_i)|_{E_i} \cong \oo_{E_i}(-1) \to 0. \] Since $\oo_{E_i}(-1) \cong \oo_{\bp^1}(-1)$ has no cohomology, we get the result. For (b) and (c), consider the short exact sequence \[ 0 \to L(-E_i) \to L \to L|_{E_i} \cong \oo_{E_i} \to 0. \] Since $H^2(\oo_{E_i})=0$, we immediately obtain (b). If the base locus of $|L|$ does not contain $E_i$, then $H^0(L) \to H^0(\oo_{E_i}) \cong k$ is surjective, which gives the result since $H^1(\oo_{E_i})=0$. \end{proof} \section{Exceptional sequences}\label{sect:exc-sequence} We review basic facts about Hom functors and exceptional sequences in the bounded derived category of a smooth projective variety $Y$ over $k$. In particular, we discuss how to replace a general complex with a complex built from a strong full exceptional sequence $\mathfrak{E}$, which we call an $\mathfrak{E}${\em -complex}. \subsection{Hom functors} If $A^{\bullet}$ and $B^{\bullet}$ are bounded complexes of coherent sheaves, then $\Hom^{\bullet}(A^{\bullet},B^{\bullet})$ is the complex of vector spaces defined by \[ \Hom^i(A^{\bullet},B^{\bullet}) = \bigoplus_q \Hom(A^q,B^{q+i}) \quad \text{and} \quad d(f) = d_B \circ f - (-1)^i f \circ d_A. \] The degree-0 cohomology of this complex is the vector space of chain maps $A^{\bullet} \to B^{\bullet}$ modulo chain homotopy. This complex is especially useful when it computes the derived functor \[ R\Hom(A^{\bullet},B^{\bullet}) = \bigoplus_{j \in \mathbb{Z}} \Hom(A^{\bullet},B^{\bullet}[j]), \] as in the lemma below. The graded summands $R^j\Hom(A^{\bullet},B^{\bullet}) = \Hom(A^{\bullet},B^{\bullet}[j])$ are denoted $\Ext^j(A^{\bullet},B^{\bullet})$. In the case when $A^{\bullet}$ and $B^{\bullet}$ are sheaves $A$ and $B$ in degree 0, \[ R\Hom(A,B) = \bigoplus_{j\geqslant 0} \Ext^j(A,B)[-j] \] consists of the usual $\mathrm{Ext}^j$ groups for sheaves in each degree $j$. Similarly, $\lHom^{\bullet}(A^{\bullet},B^{\bullet})$ is defined by \[ \lHom^i(A^{\bullet},B^{\bullet}) = \bigoplus_q \lHom(A^q,B^{q+i}) \quad \text{and} \quad d(f) = d_B \circ f - (-1)^i f \circ d_A. \] If either ${A^\bullet}$ or $B^{\bullet}$ is a complex of locally free sheaves, then $\lHom^{\bullet}(A^{\bullet},B^{\bullet})$ represents $R\lHom(A^{\bullet},B^{\bullet})$. We also recall a few facts about derived functors. The cohomology groups of $R \Gamma$, often denoted $\mathbb{H}^i$, are called hypercohomology, and hypercohomology of a sheaf is just sheaf cohomology. The derived functor $R \Hom$ has the property that $R^i\Hom(A^{\bullet},B^{\bullet}) = \Hom(A^{\bullet},B^{\bullet}[i])$. Moreover, $R \Gamma R\lHom = R(\Gamma \circ \lHom) = R\Hom$ and there is a spectral sequence \[ E_1^{p,q}= H^p(A^q) \implies \mathbb{H}^{p+q}(A^{\bullet}). \] See \cite{Huy06} for more details. \begin{lem} \label{lem:chain-maps} If $A^{\bullet}$ and $B^{\bullet}$ are any complexes composed of locally free sheaves such that all higher Exts between $A^i$ and $B^j$ vanish for all $i,j$, then $\Hom^{\bullet}(A^{\bullet},B^{\bullet})$ computes $R\Hom(A^{\bullet},B^{\bullet})$. In particular, $\Hom(A^{\bullet},B^{\bullet})$ is the space of chain maps $A^{\bullet} \to B^{\bullet}$ modulo chain homotopy. \end{lem} \begin{proof} We calculate $R\Hom(A^{\bullet},B^{\bullet})$ as follows. First, we represent $R\lHom(A^{\bullet},B^{\bullet})$ by $\lHom(A^{\bullet},B^{\bullet})$. Then, since $A^i$ and $B^j$ have no higher Exts between them, $\lHom(A^i,B^j)$ has no higher cohomology, so by the above spectral sequence we can calculate $R\Gamma R\lHom(A^{\bullet},B^{\bullet})$ simply as $\Gamma \lHom(A^{\bullet},B^{\bullet}) = \Hom^{\bullet}(A^{\bullet},B^{\bullet})$. Thus, the complex $\Hom^{\bullet}(A^{\bullet},B^{\bullet})$ represents $R\Hom(A^{\bullet},B^{\bullet})$, which gives the result. \end{proof} \subsection{Exceptional sequences} The material reviewed in this section can be found in \cite{GK04}. \begin{defn} An object $\mathcal{E}\in \db(Y)$ is {\em exceptional} if \[\Hom(\E,\E[\ell])=\begin{cases} \basefield, &\ell=0 \\ 0, & \text{otherwise}.\end{cases}\] An {\em exceptional sequence} is a sequence $(\E_1,\dots,\E_n)$ of exceptional objects such that \[\Hom(\E_i,\E_j[\ell])= 0, \mbox{ for } i>j \mbox{ and all } \ell.\] It is {\em strong} if in addition \[\Hom(\E_i,\E_j[\ell])=0, \mbox{ for all }i,j \mbox{ and }\ell\not=0.\] It is {\em full} if $\{\E_i\}_{i=1}^n$ generates $\db(Y)$ as a triangulated category. \end{defn} Let $\mathfrak{E}=(\E_1,\dots,\E_{n})$ be a strong full exceptional sequence of locally free sheaves on $Y$. The full triangulated subcategories $\langle \E_i \rangle$ generated by individual $\E_i$ yield a semi-orthogonal decomposition of $D^b(Y)$. Thus, for each object $T$ in $D^b(Y)$, there is a diagram of morphisms \begin{equation}\label{eq:filtration}\xymatrix{ & C_{n} \ar[dl] && C_{n-1} \ar[dl] & \cdots & C_2 \ar[dl] && C_1 \ar[dl] \\ T=T_n \ar[rr] && T_{n-1} \ar[rr] \ar[ul]_{[1]} && \cdots \ar[rr] \ar[ul]_{[1]} && T_1 \ar[rr] \ar[ul]_{[1]} && T_0 \cong 0 \ar[ul]_{[1]} }\end{equation} in which each triangle $C_i \to T_i \to T_{i-1}$ is distinguished and $C_i$ is in $\langle \E_i \rangle$. Each $T_{i-1}$ can be constructed as the left mutation of $T_i$ through $\E_i$, namely as the cone \[ R\Hom(\E_i,T_i) \otimes \E_i \to T_i \to T_{i-1}. \] We call $C_i = R\Hom(\E_i,T_i) \otimes \E_i$ the \emph{factors} of $T$ with respect to the exceptional sequence. The diagram is functorial and in particular, the factors of $T$ are unique up to isomorphism. Using the diagram, the factors of $T$ can be assembled to produce a complex isomorphic to $T$. We define an \emph{$\mathfrak{E}$-complex} to be a bounded complex $A^{\bullet}$ such that each $A^i$ is a direct sum of sheaves in the exceptional sequence. \begin{lem}\label{lem:E-cpx} $T$ is isomorphic to an $\mathfrak{E}$-complex whose sheaves are the same as in the complex $\bigoplus_{i=1}^n C_i$ (but with different maps). \end{lem} \begin{proof} We prove that each $T_i$ is isomorphic to an $\mathfrak{E}$-complex by induction on $i$. Assume $T_{i-1}$ is isomorphic to an $\mathfrak{E}$-complex $A^{\bullet}$. By the previous lemma, the morphism $T_{i-1}[-1] \to C_i$ can be represented by a chain map $A^{\bullet}[-1] \to C_i$, whose mapping cone is an $\mathfrak{E}$-complex whose sheaves are the same as $A^{\bullet} \oplus C_i$ and which represents $T_i$. \end{proof} \begin{exmp} Suppose $n=3$ and $T$ is a sheaf in degree 0 that has a resolution $0 \to \E_1^{a_1} \to \E_2^{a_2} \oplus \E_3^{a_3} \to T \to 0$. Then the diagram (\ref{eq:filtration}) can be realized as \[\xymatrix{ & \E_3^{a_3} \ar[dl] && \E_2^{a_2} \ar[dl] & & \E_1^{a_1}[1] \ar[dl] \\ T \cong [\E_1^{a_1} \to \E_2^{a_2} \oplus \E_3^{a_3}] \ar[rr] && [\E_1^{a_1} \to \E_2^{a_2}] \ar[rr] \ar[ul]_{[1]} && \E_1^{a_1}[1] \ar[rr] \ar[ul]_{[1]} && 0 \ar[ul]_{[1]} }\] \end{exmp} The proof gives an inductive algorithm for assembling the factors of $T$ into an $\mathfrak{E}$-complex isomorphic to $T$. We say that an $\mathfrak{E}$-complex $A^{\bullet}$ is \emph{minimal} if, among all $\mathfrak{E}$-complexes isomorphic to $A^{\bullet}$, the total number of sheaves in the complex is as small as possible. If each $C_i$ is represented by the minimal $\mathfrak{E}$-complex described above, then the complex obtained from this algorithm is minimal as well, and we call it the \emph{minimal $\mathfrak{E}$-complex} of $T$. By Lemma \ref{lem:chain-maps}, it is unique up to quasi-isomorphism. Using $\mathfrak{E}$-complexes to represent objects in the derived category is useful because the sheaves in an exceptional sequence have no higher Exts between them, so Lemma $\ref{lem:chain-maps}$ implies that any morphism between two objects represented by $\mathfrak{E}$-complexes can be realized as a chain map between the $\mathfrak{E}$-complexes. There is a direct way to identify the factors of $T$ by making use of the dual of the exceptional sequence. The (left) \emph{dual} of a full exceptional sequence $(\E_1,\dots,\E_{n})$ is a full exceptional sequence $(\dE_n,\dots,\dE_1)$ with the property that \begin{equation}\label{eq:dual-excl-seq} \Hom(\dE_i,\E_j[\ell]) = \begin{cases} k, & \text{if $\ell=0$ and $i=j$;} \\ 0, & \text{otherwise.} \end{cases} \end{equation} The dual sequence always exists, can be constructed from $(\E_1,\dots,\E_n)$ by mutations, and is characterized up to isomorphism by (\ref{eq:dual-excl-seq}). \begin{lem}\label{lem:cohom-discernible} Suppose $(\E_1,\dots,\E_n)$ is a strong full exceptional sequence and $(\dE_n,\dots,\dE_1)$ is its dual. Then the factors $C_i$ of an object $T$ satisfy \[ C_i \cong R\Hom(\dE_i,T) \otimes \E_i. \] \end{lem} \begin{proof} Let $A^{\bullet}$ be the minimal $\mathfrak{E}$-complex of $T$, whose sheaves are the same as $\bigoplus_i C_i$. Then, as the higher Exts between $\dE_i$ and the sheaves in $(\E_1,\dots,\E_n)$ vanish, $\Hom^{\bullet}(\dE_i,A^{\bullet})$ computes $R\Hom(\dE_i,T)$. Thus, by property (\ref{eq:dual-excl-seq}), \[ R\Hom(\dE_i,T) \cong \Hom^{\bullet}(\dE_i,C_i) \cong R\Hom(\E_i,T_i), \] and tensoring by $\E_i$ gives the result. \end{proof} \subsection{Main examples of strong full exceptional sequences}\label{ss:ex-sfes} In later sections, we will focus on the following choices of strong full exceptional sequences of line bundles on $\bp^2$, Hirzebruch surfaces, and, more generally, two-step blowups of Hirzebruch surfaces. \begin{enumerate}[(a)] \item On $\bp^2$, the exceptional sequence $(\oo(-2),\oo(-1),\oo)$ is strong and full (see \cite[Corollary 8.29, Exercise 8.32]{Huy06} for a more general result on $\bp^n$). \item On $\mathbb{F}_e$, the exceptional sequence $(\oo(-C-A),\oo(-C),\oo(-A),\oo)$ is strong and full (\cite{Orl92projective-bundles}, \cite{hille-perling-exceptional11} Proposition 5.2). Note that the exceptional and strong properties can easily be checked using Lemma \ref{lem:lb-van-higher-cohom}. \item On $X$ a two-step blowup of $\mathbb{F}_e$, consider the exceptional sequence \begin{multline}\label{sequence-blowup} \oo(-C-A),\oo(-C-A+E_{s+1}),\dots, \oo(-C-A+E_{t}) ,\\ \oo(-C),\oo(-A), \oo(-E_1),\dots , \oo(-E_s),\oo. \end{multline} This sequence is obtained from the sequence on $\mathbb{F}_e$ in (b) by standard augmentations, so it is strong and full (\cite{hille-perling-exceptional11} Theorem 5.8). The exceptional and strong properties can easily be checked by using Lemma \ref{lem:reduce-higher-cohom-blowup} to reduce to calculations on $\mathbb{F}_e$. Note that this example specializes to (b) by allowing the set of blown-up points to be empty. \end{enumerate} The dual sequences are as follows and can be verified by checking (\ref{eq:dual-excl-seq}): \begin{itemize} \item On $\bp^2$, the dual exceptional sequence is \[ \oo, T(-1)[-1], \oo(1)[-2], \] where $T$ is the tangent sheaf, which can be checked by Bott's formula \cite{Bot57}. \item On $\mathbb{F}_e$, the dual exceptional sequence is \[ \oo, \oo(A)[-1], \oo(B)[-1], \oo(A+B)[-2], \] which follows from the calculations in \S~\ref{subsect:coh-line-bdl}. \item On the two-step blowup of $\mathbb{F}_e$, the dual exceptional sequence is \begin{align*} &\oo, \oo_{E_s}[-1],\dots,\oo_{E_1}[-1], \oo(A - \sum_{i \in S_0} E_i)[-1], \oo(B - \sum_{i \in S_0} E_i)[-1], \\ & \qquad \qquad \qquad \oo_{E_t}[-2],\dots,\oo_{E_{s+1}}[-2],\oo(A+B - \sum_{i \in S_0} E_i - \sum_{j \in S_1} E_j)[-2]. \end{align*} This can be seen by using the fact that $\oo(-C-2A-B + \sum_{i \in S_0} E_i + \sum_{j \in S_1} E_j) \cong K_X$, the short exact sequences $0 \to \oo(-E_i) \to \oo \to \oo_{E_i} \to 0$, and Lemma \ref{lem:reduce-higher-cohom-blowup}(a) to reduce to the calculations on $\mathbb{F}_e$. \end{itemize} \section{Gaeta resolutions}\label{sect:gtr} We study two-step resolutions of coherent sheaves by the exceptional sheaves in the previous section. We call such resolutions {\em Gaeta resolutions}. We provide a general criterion and a criterion specialized for rational surfaces that detect when a sheaf admits a Gaeta resolution. We classify numerical classes over two-step blowups of $\mathbb{F}_e$ of sheaves admitting Gaeta resolutions, including the case of allowing a twist by a line bundle on $\mathbb{F}_e$. These results lay the foundation for our applications of Gaeta resolutions in later sections. \subsection{Definition of Gaeta resolutions}\label{subsect:def-gaeta} Let $\mathfrak{E}=(\E_1,\dots,\E_n)$ be a strong full exceptional sequence on a smooth projective variety $Y$ over $k$. We are particularly interested in minimal $\mathfrak{E}$-complexes of the following form. \begin{defn}\label{defn:gaeta} For a coherent sheaf $F$, a resolution of $F$ of the form \[ 0 \to \E_1^{a_1} \oplus \cdots \oplus \E_d^{a_d} \to \E_{d+1}^{a_{d+1}} \oplus \cdots \oplus \E_{n}^{a_{n}} \to F \to 0 \] is a \emph{Gaeta resolution}. If the minimal $\mathfrak{E}$-complex of $F$ is of this form, then we say that $F$ \emph{admits a Gaeta resolution}. The non-negative integers $a_1,\dots,a_n$ are called the \emph{exponents} of the Gaeta resolution. \end{defn} Clearly, the exponents $a_i$ determine the numerical class of $F$. Conversely, the class of $F$ determines the exponents inductively using semi-orthogonality as \[ a_i = \begin{cases} \chi(\E_i,F) - \sum_{j=i+1}^n a_j \hom(\E_i,\E_j) & \text{for $i > d$;} \\ -\chi(F,\E_i) - \sum_{j = 1}^{i-1} a_j \hom(\E_j,\E_i) & \text{for $i \leqslant d$}. \end{cases} \] The dual sequence $(\dE_n,\dots,\dE_1)$ can be used to obtain a criterion for when a sheaf admits a Gaeta resolution. \begin{prop}[General criterion] \label{prop:general-GTR-criterion} Let $F$ be a coherent sheaf. Then $F$ admits a Gaeta resolution if and only if \[ \Hom(\dE_i[j],F) \] vanish for all $i$ and $j$ except possibly for \begin{align*} &\Hom(\dE_i,F), \quad d+1 \leqslant i \leqslant n; \\ &\Hom(\dE_i[1],F), \quad 1 \leqslant i \leqslant d. \end{align*} Moreover, if $F$ admits a Gaeta resolution, then the exponents are \[ a_i = \begin{cases} \hom(\dE_i,F), & d+1 \leqslant i \leqslant n; \\ \hom(\dE_i[1],F), & 1 \leqslant i \leqslant d. \end{cases} \] \end{prop} \begin{proof} By Lemma \ref{lem:cohom-discernible} and (\ref{eq:dual-excl-seq}), the condition on $\Hom(\dE_i[j],F)$ is equivalent to $C_i$ being a direct sum of copies of $\E_i$ in degree 0 if $d+1 \leqslant i \leqslant n$ and in degree $-1$ if $1 \leqslant i \leqslant d$. By Lemma \ref{lem:E-cpx}, this proves the first statement. The second statement also follows from the calculation of the $C_i$. \end{proof} \subsection{Gaeta resolutions on rational surfaces} In the context of the strong full exceptional sequences in \S~\ref{ss:ex-sfes}, we will focus on the following Gaeta resolutions. \begin{exmp}\label{exmp:gaeta} \begin{enumerate}[(a)] \item On $\bp^2$, we consider Gaeta resolutions of the form \begin{align}\label{gtr-P2} 0 \to \oo(-2)^{\alpha_1} \to \oo(-1)^{\alpha_2} \oplus \oo^{\alpha_3} \to & F \to 0. \end{align} \item On $X$ a two-step blowup of $\mathbb{F}_e$, we consider Gaeta resolutions of the form \begin{align}\label{gtr-blowup} 0 \to \oo(-C-A)^{\alpha_1} &\oplus \bigoplus_{j \in S_1} \oo(-C-A+E_j)^{\gamma_j} \nonumber \\ &\to \oo(-C)^{\alpha_2} \oplus \oo(-A)^{\alpha_3} \oplus \bigoplus_{i \in S_0} \oo(-E_i)^{\gamma_i} \oplus \oo^{\alpha_4} \to F \to 0. \end{align} \end{enumerate} Note that in the case when the set of blown-up points is empty, (\ref{gtr-blowup}) specializes to the Gaeta resolutions on $\mathbb{F}_e$ that were considered in \cite{CosHui18weakBN}. \end{exmp} For these examples, by applying the general criterion for having a Gaeta resolution (Proposition \ref{prop:general-GTR-criterion}) with the explicit dual sequences in \S~\ref{ss:ex-sfes}, we deduce a more explicit criterion. \begin{prop}\label{prop:special-GTR-criterion} \begin{enumerate}[(a)] \item On $\bp^2$, a sheaf $F$ admits a Gaeta resolution of the form (\ref{gtr-P2}) if and only if \[ \text{$H^p(F)=0$ for $p \ne 0$, \quad $H^p(F(-1)) = 0$ for $p \ne 1$, \quad and \quad $\Hom(T(-1),F)=0$.} \] \item On $X$ a two-step blowup of $\mathbb{F}_e$, a torsion-free sheaf $F$ admits a Gaeta resolution of the form (\ref{gtr-blowup}) if and only if \begin{enumerate}[(i)] \item $H^p(F)$ vanishes for $p \ne 0$; \item $H^p(F(D))$ vanishes for $p \ne 1$ and $D$ the divisors $-A+\sum_{i \in S_0} E_i$, $-B+\sum_{i \in S_0} E_i$, and $-A-B+\sum_{i \in S_0} E_i + \sum_{j \in S_0} E_j$; and \item $H^1(F(E_i))=0$ and $H^1(F(E_j)) = 0$ for all $i \in S_0$ and $j \in S_1$. \end{enumerate} \end{enumerate} \end{prop} \begin{proof} For (a), Proposition \ref{prop:general-GTR-criterion} includes the first two conditions as well as the condition that $\Ext^p(T(-1),F)$ vanishes for $p=0,2$. Applying $\Hom(-,F)$ to the short exact sequence $0 \to T(-1) \to \oo(1)^3 \to \oo(2) \to 0$ and using the vanishing of $H^2(F(-1))$ shows that the first two conditions already guarantee $\Ext^2(T(-1),F)=0$. For (b), Proposition \ref{prop:general-GTR-criterion} includes (i) and (ii) as well as the condition that $\Ext^p(\oo_{E_i},F)$ vanishes for $p=0$ and $p=2$ for all $i \in S_0 \cup S_1$. The vanishing $\Hom(\oo_{E_i},F)=0$ is guaranteed since $F$ is torsion-free, while applying $\Hom(-,F)$ to the short exact sequence $0 \to \oo(-E_i) \to \oo \to \oo_{E_i} \to 0$ and using $H^1(F)=H^2(F)=0$ shows that $\Ext^1(\oo(-E_i),F) \cong \Ext^2(\oo_{E_i},F)$, hence $H^1(F(E_i))=0$ is an equivalent condition. \end{proof} \subsection{Chern characters and Gaeta resolutions}\label{ss:chern-characters} Recall that the exponents in the Gaeta resolutions are determined by the numerical class of $F$. The results in this subsection classify numerical classes that arise as cokernels of Gaeta resolutions. First, we review some useful numerical invariants. On a smooth projective surface $S$ over $k$, if $F$ is a coherent sheaf of positive rank $r$, first Chern class $c_1$, and second Chern character $\ch_2$, set \[ \nu = \frac{c_1}{r} \qquad \text{and} \qquad \Delta = \frac{1}{2} \nu^2 - \frac{\ch_2}{r}, \] which are called the $\emph{total slope}$ and $\emph{discriminant}$, respectively, of $F$. A simple calculation shows that the discriminant of a line bundle is 0 and the discriminant of $F$ is unchanged when $F$ is tensored by a line bundle. Using these invariants, the second Chern class of $F$ can be written as \[ c_2 = \binom{r}{2} \nu^2 + r \Delta. \] Set $P(\nu) = \chi(\oo_S)+\frac{1}{2} \nu(\nu-K_S)$. Then by Riemann-Roch the Euler characteristic can be written as \[ \chi(F) = r(P(\nu)-\Delta), \] and similarly, if $F_1,F_2$ are sheaves and $r_i$, $\nu_i$, $\Delta_i$ are the rank, total slope, and discriminant of $F_i$, then the Euler pairing is \begin{equation}\label{eq:euler-pair} \chi(F_1,F_2) = r_1 r_2(P(\nu_2-\nu_1) - \Delta_1 - \Delta_2). \end{equation} On $X$ the two-step blowup of a Hirzebruch surface, writing $c_1 = \alpha A + \beta B - \sum_{i \in S_0} \gamma_i E_i - \sum_{j \in S_1} \gamma_j E_j$, we compute \[ P(\nu) = \left( \frac{\alpha}{r}+1- \frac{e \beta}{2r} \right) \left( \frac{\beta}{r}+1 \right) - \frac{1}{2} \sum_{i \in S_0 \cup S_1} \frac{\gamma_i}{r}\left( \frac{\gamma_i}{r}+1 \right). \] We write the numerical class of a sheaf as a triple $(r,c_1,\chi)$ in which $r$ is a non-negative integer, $c_1$ is an integral divisor class, and $\chi$ is an integer. We say that a numerical class $f = (r,c_1,\chi)$ \emph{admits Gaeta resolutions} if there is a sheaf $F$ of rank $r$, first Chern class $c_1$, and Euler characteristic $\chi$, such that $F$ admits a Gaeta resolution. For a sheaf $F$ of class $f$ and a line bundle $L$, we denote the class of $F\otimes L$ as $f(L)$, and we write $c_2(f)$, $\nu(f)$ and $\Delta(f)$ for the second Chern class, total slope, and discriminant of $F$, which depend only on $f$. \begin{prop}\label{prop:chern-has-gr} On $X$ a two-step blowup of $\mathbb{F}_e$, consider the numerical class \[ f=\Big(r,\alpha A + \beta B - \sum_{i \in S_0 \cup S_1} \gamma_i E_i, \chi \Big) \] of positive rank. Then \begin{enumerate}[(a)] \item $f$ admits Gaeta resolutions (\ref{gtr-blowup}) if and only if $\gamma_i$, $\gamma_j$, $\alpha_4:=\chi$, and the following three integers are all $\geqslant 0$: \begin{align*} \alpha_1 &:= -\chi\Big(f\Big(-A-B+ \sum_{i \in S_0} E_i + \sum_{j \in S_1} E_j\Big)\Big) \\ \alpha_2 &:= -\chi\Big(f\Big(-B+\sum_{i \in S_0} E_i\Big)\Big) \\ \alpha_3 &:= -\chi\Big(f\Big(-A+\sum_{i \in S_0} E_i\Big)\Big) \label{eq:chi-ineq-blowup} \end{align*} \item Assume $\gamma_i \geqslant 0$ and $\gamma_j \geqslant 0$. If the discriminant $\Delta(f)$ is sufficiently large, then there is a line bundle $L$ pulled back from $\mathbb{F}_e$ such that $f(L)$ admits Gaeta resolutions. \end{enumerate} \end{prop} \begin{proof} For (a), assuming $f$ admits Gaeta resolutions, by comparing first Chern classes, the exponent of $\oo(-C-A+E_j)$ must be $\gamma_j$ and the exponent of $\oo(-E_i)$ must be $\gamma_i$. The remaining exponents can be easily calculated. Conversely, the inequalities show that we can define the exponents in the same way, and a simple calculation shows that the numerical class of the cokernel must be $f$. For (b), consider the numerical class $f' = \Big(r, \alpha A + \beta B, \chi + \sum_{i \in S_0} \gamma_i \Big)$ on $\mathbb{F}_e$. An elementary calculation shows that \[ \Delta(f') = \Delta(f) + \sum_{i \in S_0} \binom{\gamma_i/r}{2} + \sum_{j \in S_1} \binom{(\gamma_j/r)+1}{2}. \] Let $M = 1 + \max(\sum_{i \in S_0} \gamma_i, \sum_{j \in S_1} \gamma_j)/r$. Since $\Delta(f') \geqslant \Delta(f) \gg M$, the following lemma ensures that we can choose a line bundle $L$ on $\mathbb{F}_e$ such that $f'(L)$ admits Gaeta resolutions \[ \oo(-C-A)^{\alpha_1'} \to \oo(-C)^{\alpha_2'} \oplus \oo(-A)^{\alpha_3'} \oplus \oo^{\alpha_4'} \] in which $\alpha_4' = \chi(f'(L)) \geqslant rM = r + \max\Big(\sum_{i \in S_0} \gamma_i, \sum_{j \in S_1} \gamma_j\Big)$. Here, the inequality follows from the following lemma. Hence, $\alpha_1' \geqslant \max(\sum_{i \in S_0} \gamma_i,\sum_{j \in S_1} \gamma_j)$ as well by comparing ranks. Then a simple calculation shows that the cokernels of Gaeta resolutions \[ \oo(-C-A)^{\alpha_1' - \sum_j \gamma_j} \oplus \bigoplus_{j \in S_1} \oo(-C-A+E_j)^{\gamma_j} \to \oo(-C)^{\alpha_2'} \oplus \oo(-A)^{\alpha_3'} \oplus \bigoplus_{i \in S_0} \oo(-E_i)^{\gamma_i} \oplus \oo^{\alpha_4'-\sum_i \gamma_i} \] have numerical class $f(L)$. \end{proof} \begin{lem} On $\mathbb{F}_e$, fix a rank $r > 0$ and a first Chern class $c_1$ and consider the numerical class $f = (r,c_1,\chi)$. Let $M$ be a positive real number. Then there are constants $C_1,C_2,C_3$ depending only on $e$ such that for all $\chi$ such that \[ \Delta(f) \geqslant \frac{e+2}{2}M^2 + C_1 M^{3/2} + C_2 M + C_3, \] there is a line bundle $L$ such that $\chi(f(L)) \geqslant rM$ and the class $f(L)$ admits Gaeta resolutions. \end{lem} \begin{proof} We use a setup similar to \cite[Lemma 4.5]{CosHui20}. Consider the curve $Q \colon \chi(f(L_{x,y}))=0$ in the $xy$-plane, where $L_{x,y}$ is the (in general non-integral) line bundle \[ L_{x,y} = xB + yA -\nu(f) + \frac{1}{2} K_{\mathbb{F}_e} . \] Set $\Delta = \Delta(f)$. By Riemann-Roch, \[ \frac{\chi(f(L_{x,y}))}{r} = x\left(y-\frac{e}{2}x \right)-\Delta, \] so $Q$ is the hyperbola $\Delta = x \left(y-\frac{e}{2}x \right)$, or, as a function of $x$, $y = Q(x) = \frac{\Delta}{x} + \frac{e}{2}x$. Let $\Lambda$ denote the lattice in the plane of points such that $L_{x,y}$ is integral, which is a shift of the standard integral lattice. We say that a point $(x,y) \in \Lambda$ is \emph{minimal} if \begin{itemize} \item $(x,y)$ is on or above the upper branch $Q_1$ of $Q$, and \item $(x-1,y)$ and $(x,y-1)$ are both on or below $Q_1$. \end{itemize} The minimal points exactly correspond to the line bundles $L_{x,y}$ such that $f(L_{x,y})$ admits Gaeta resolutions, according to Proposition~\ref{prop:chern-has-gr}(a). We need to find a minimal point $(x,y)$ such that $\chi(f(L_{x,y}))/r \geqslant M$. For this, consider the line $y=(e+1)x$, which intersects $Q_1$ at the point \[ (x',y') = \left(\sqrt{2\Delta/(e+2)}, (e+1)\sqrt{2 \Delta / (e+2)}\right). \] The tangent line to $Q_1$ at this point has equation $y = -x + \sqrt{2(e+2)\Delta}$ and $Q_1$ lies above the tangent line. See Figure 1. \begin{figure}[h] \begin{minipage}{.5\textwidth} \centering \includegraphics[height=3in]{Figure1.png} \captionof{figure}{The hyperbola $Q_1$} \label{fig:1} \end{minipage}\begin{minipage}{.5\textwidth} \centering \includegraphics[height=3in]{Figure2.png} \captionof{figure}{Example of minimal points near $(x',y')$} \label{fig:2} \end{minipage} \end{figure} Let $(x_0,y_0)$ be the unique minimal point such that $\epsilon_x := x'-x_0$ satisfies $0 \leqslant \epsilon_x < 1$. which lies between $Q_1$ and the shift $Q_1 + (0,1)$. Then $\epsilon_y := y_0 - y'$ satisfies $0 \leqslant \epsilon_y \leqslant 2$. Let $\epsilon := y_0 - Q(x_0)$ denote the vertical distance from $(x_0,y_0)$ to $Q_1$, which satisfies $0 \leqslant \epsilon < 1$. Then $\chi(f(L_{x_0,y_0}))/r = \epsilon x_0$. If $\epsilon x_0 \geqslant M$, then $L_{x_0,y_0}$ gives the result, so assume on the contrary that $\epsilon < M/{x_0}$. Then let $m$ be a positive integer $<x_0$ and consider the lattice point $(x_m,y_m) = (x_0 - m, y_0 + m)$. We wish to find $m$ as small as possible such that $Q_1$ lies above $(x_m,y_m)$, as then the point $(x_m,y_m+1)$ will be minimal and $y_m+1 - Q(x_m)$ will be close to 1. See Figure 2 for an example in which this is achieved with $m=4$. To find $m$ such that $Q_1$ lies above $(x_m,y_m)$, the rise in $Q$ between $x_0$ and $x_0-m$ should exceed $m+\epsilon$, namely \begin{align*} Q(x_0-m) - Q(x_0) - m - \epsilon = \frac{m \Delta}{x_0^2}\left( 1 + \frac{m}{x_0} + \left(\frac{m}{x_0}\right)^2 + \cdots \right) - \frac{e+2}{2}m - \epsilon \end{align*} should be positive. As this quantity exceeds the approximation obtained by truncating the geometric series at the first two terms, and as $x_0 \leqslant x'$, it suffices to take $m$ such that \[ \frac{m\Delta}{(x')^2}\left(1+\frac{m}{x_0} \right)-\frac{e+2}{2}m - \epsilon \geqslant 0, \] which yields $m \geqslant \sqrt{2\epsilon x_0/(e+2)}$. Setting $m_0 = \lceil \sqrt{2\epsilon x_0/(e+2)} \rceil$, we then have \begin{align*} \frac{\chi(L_{x_{m_0},y_{m_0+1}})}{r} &= (1+\epsilon) x_0 - \epsilon_x(e+1)m_0 - \epsilon_y m_0 - \frac{e+2}{2}m_0^2 - m_0, \end{align*} and for this to be $\geqslant M$ we need \[ (1+\epsilon) x_0 \geqslant \frac{e+2}{2}m_0^2 + (\epsilon_x(e+1)+\epsilon_y+1)m_0 + M. \] Let $\delta = m_0 - \sqrt{2 \epsilon x_0/(e+2)}$, which satisfies $0 \leqslant \delta < 1$. Then the inequality simplifies to \[ x_0 \geqslant (\delta(e+2)+\epsilon_x(e+1)+\epsilon_y+1)\sqrt{2\epsilon x_0/(e+2)} + (\epsilon_x(e+1)+\epsilon_y+1)\delta + \frac{e+2}{2}\delta^2 + M. \] Replacing $x_0$ by $x'-\epsilon_x$, $\epsilon x_0$ by its upper bound $M$, $\epsilon_x$, $\epsilon_y$, and $\delta$ by their upper bounds, and solving for $\Delta$, we get a sufficient bound for $\Delta$: \[ \Delta \geqslant \frac{e+2}{2}M^2 + C_1 M^{3/2} + C_2 M + C_3 \] for constants $C_1,C_2,C_3$ that depend only on $e$. \end{proof} \section{Properties of sheaves with general Gaeta resolutions} \label{sect:prop-gr} Given an exceptional sequence $(\E_1 ,\E_2,\dots,\E_n)$ from \S~\ref{ss:ex-sfes} with $d$ chosen as in Example \ref{exmp:gaeta} and a sequence of non-negative integers $\vec{a} = (a_1,\dots,a_n)$, consider the vector space \[ H_{\vec{a},d}=\Hom(\E_1^{a_1} \oplus \cdots \oplus \E_d^{a_d},\E_{d+1}^{a_{d+1}} \oplus \cdots \oplus \E_n^{a_n}). \] We let $\rcone \subset H_{\vec{a},d}$ denote the open subset of injective maps \[ \phi \colon \E_1^{a_1} \oplus \cdots \oplus \E_d^{a_d} \to \E_{d+1}^{a_{d+1}} \oplus \cdots \oplus \E_n^{a_n}, \] which is non-empty if and only if $r := a_{d+1} + \cdots + a_n - a_1 - \cdots - a_d \geqslant 0$. In this case, we set \begin{equation*} F_\phi:=\coker \phi, \end{equation*} let $f$ denote the numerical class of these cokernels, which have rank $r$, and call the following projectivization the \emph{space of Gaeta resolutions}: \begin{equation}\label{eq:res-space} R_f := \bp \rcone \subset \bp(H_{\vec{a},d}). \end{equation} Then $F_{\phi}$ satisfies various cohomology vanishing conditions (Proposition \ref{prop:special-GTR-criterion}). The purpose of this section is to prove additional properties of sheaves admitting {\em general} Gaeta resolutions, including the prioritary condition and a weak Brill-Noether result. \subsection{Basic properties} We begin by proving some basic properties of sheaves admitting a general Gaeta resolution. \begin{prop}\label{prop:cokernel-properties} {\ }\begin{enumerate}[(a)] \item On $\bp^2$ and $\mathbb{F}_e$, we have the following for general $\phi$: \begin{enumerate}[(i)] \item If $r = 0$, then $F_{\phi}$ is torsion supported on the determinant of $\phi$ (true even if $\phi$ is not general). \item If $r = 1$, then $F_\phi$ is torsion-free. \item If $r \geqslant 2$, then $F_\phi$ is locally free. \item If $a_n \geqslant r+2$, then $F_\phi$ is globally generated. \end{enumerate} \item On $X$ an admissible blowup of $\mathbb{F}_e$, using the notation in (\ref{gtr-blowup}), the same cases are true if we assume, for each $i \in S_0$ such that $\{j \colon p_j \succ p_i \}$ is nonempty: \begin{align*} \gamma_i &\geqslant \sum_{j \colon p_j \succ p_i} \gamma_j &\text{for (ii)}, \\ \gamma_i &\geqslant 1 + \sum_{j \colon p_j \succ p_i} \gamma_j &\text{for (iii)}, \\ \gamma_i &\geqslant r - \alpha_4 + \sum_{j \colon p_j \succ p_i} \gamma_j &\text{for (iv)}. \end{align*} \end{enumerate} \end{prop} The key to the proof of this proposition is a Bertini-type statement concerning the codimension on which a general map between vector bundles $\mathcal{A}$ and $\mathcal{B}$ drops rank. The case when $\mathcal{B} \otimes \mathcal{A}^*$ is globally generated is well known. \begin{prop}\label{prop:Bertini-type} On a smooth projective variety $Y$, consider maps $%\[ \phi \colon \mathcal{A} \to \mathcal{B} $, %\] where $\mathcal{A}$, $\mathcal{B}$ are fixed vector bundles of ranks $a,b$ such that $a \leqslant b$. Let \[ Z \subset Y \times \bp \Hom(\mathcal{A},\mathcal{B}) \] denote the locus of pairs $(y,\phi)$ such that the rank of $\phi$ at $y$ is $<a$. \begin{enumerate}[(a)] \item Suppose $\mathcal{A}^* \otimes \mathcal{B}$ is globally generated. Then the codimension of $Z$ in $Y \times \bp \Hom(\mathcal{A},\mathcal{B})$ is $b-a+1$. \item Suppose there are non-trivial decompositions $\mathcal{A} = \mathcal{A}_1 \oplus \mathcal{A}_2$ and $\mathcal{B}=\mathcal{B}_1 \oplus \mathcal{B}_2$ such that each $\mathcal{A}_i^* \otimes \mathcal{B}_j$ is globally generated except that $H^0(\mathcal{A}_2^* \otimes \mathcal{B}_1)=0$. Set $a_i = \mathrm{rk}(\mathcal{A}_i)$ and $b_j = \mathrm{rk}(\mathcal{B}_j)$. If $b_2 < a_2$, then $Z = Y \times \bp \Hom(\mathcal{A},\mathcal{B})$. If $b_2 \geqslant a_2$, then the codimension of $Z$ in $Y \times \bp \Hom(\mathcal{A},\mathcal{B})$ is $\min(b_2-a_2,b-a)+1$. \end{enumerate} \end{prop} \begin{proof} Part (a) is a special case of \cite[Teorema 2.8]{ottaviani1995varieta} (see also \cite[Proposition 2.6]{Hui16-interpolation}) and can be proved as follows. Consider the map of global sections \[ \pi \colon \Hom(\mathcal{A},\mathcal{B}) \otimes \oo_Y \to \lHom(\mathcal{A},\mathcal{B}), \] which is surjective since $\mathcal{A}^* \otimes \mathcal{B}$ is globally generated. This map induces a map of projective bundles \[ \ev \colon Y \times \bp \Hom(\mathcal{A},\mathcal{B}) \to \bp \lHom(\mathcal{A},\mathcal{B}), \qquad (y,\phi \colon \mathcal{A} \to \mathcal{B}) \mapsto (y,\phi|_y \colon \mathcal{A}|_y \to \mathcal{B}|_y). \] Let $\Sigma$ denote the locus in the target of points $(y,\phi_y \colon \mathcal{A}|_y \to \mathcal{B}|_y)$ such that $\phi_y \colon \mathcal{A}|_y \to \mathcal{B}|_y$ drops rank, and let $Z = \ev^{-1}(\Sigma)$. As $\Sigma$ has codimension $b-a+1$ in each $\bp \Hom(\mathcal{A}|_y,\mathcal{B}|_y)$ and $\pi|_y$ is surjective, $Z$ has codimension $b-a+1$ in $Y \times \bp \Hom(\mathcal{A},\mathcal{B})$. We prove (b) by adapting this argument. In this case, $\pi$ is not surjective, so the codimension of $Z$ may drop if the image of $\ev$ is not transversal to $\Sigma$. At each point $y$, fixing bases, $\phi|_y$ is a $b \times a$ matrix of the form \[ \begin{bmatrix} M & 0 \\ N & P \end{bmatrix} \] in which $M,N,P$ are general if $\phi$ is general. Since $b \geqslant a$, this matrix drops rank if and only if the columns are linearly dependent. If $b_2 < a_2$, the columns of $P$ are always linearly dependent, while if $b_2 \geqslant a_2$, there are two cases to consider in which the columns are linearly dependent: \begin{enumerate}[(i)] \item The columns of $P$ are linearly dependent. This occurs in codimension $1+b_2-a_2$ in the space of such block matrices. \item The columns of $P$ are linearly independent. Then the linear dependence involves a column $\vec{c}$ of $\begin{bmatrix} M \\ N \end{bmatrix}$, hence $\vec{c}$ is in the span of the remaining columns, which occurs in codimension $1+b-a$. (For such $\phi$, $\Sigma$ intersects the image of $\ev$ transversally.) \end{enumerate} Thus, $Z$ has codimension $1+\min(b_2-a_2,b-a)$ in $Y \times \bp\Hom(\mathcal{A},\mathcal{B})$. \end{proof} \begin{proof}[Proof of Proposition \ref{prop:cokernel-properties}] For (a), as each $\E_i \otimes \E_j^*$ for $j \leqslant d$ and $i > d$ is globally generated, Proposition \ref{prop:Bertini-type} (a) implies that for general $\phi$ the locus where $\phi$ drops rank is either empty or has codimension $r+1$, which proves $(i)$ and $(iii)$. Then $(ii)$ follows from the fact that $F_\phi$ cannot have zero-dimensional torsion as it has a two-step resolution by locally free sheaves. For (iv), there is a commutative diagram \[\xymatrix{ && \oo^{a_n} \ar[d] \ar@{=}[r] & \oo^{a_n} \ar[d] \\ 0 \ar[r] & \E_1^{a_1} \ar[r]^-{\phi} & \E_2^{a_2} \oplus \cdots \oplus \E_{n-1}^{a_{n-1}} \oplus \oo^{a_n} \ar[r] & F_{\phi} \ar[r] & 0 }\] with exact rows, hence $\oo^{a_n} \to F_\phi$ is surjective if and only if the induced map \[ \E_1^{a_1} \to \E_2^{a_2} \oplus \cdots \oplus \E_{n-1}^{a_{n-1}} \] is surjective. This is a map between a bundle of rank $a_1$ and a bundle of rank \[ a_2 + \cdots + a_{n-1} = a_1 + r - a_n \leqslant a_1 - 2, \] hence by dualizing the same Bertini-type statement, a general such map is surjective on all fibers. For (b), set \begin{align*} \mathcal{F}&= \oo(-C-A)^{\alpha_1} \oplus \bigoplus_{j \in S_1} \oo(-C-A+E_j)^{\gamma_j}\quad \mbox{and}\\ \mathcal{G}&= \oo(-C)^{\alpha_2} \oplus \oo(-A)^{\alpha_3} \oplus \bigoplus_{i \in S_0} \oo(-E_i)^{\gamma_i} \oplus \oo^{\alpha_4}. \end{align*} The complication is that for each $j \in S_1$, the global sections of the line bundle \[ L \otimes \oo(C+A-E_j) \] vanish on $E_i - E_j$, where $i$ is the index such that $p_i \prec p_j$ and $L$ is any line bundle in $\mathcal{G}$ except for $\oo(-E_i)$. Still, we can adapt the proof of the Bertini-type statement as follows. Consider \[ \pi \colon \Hom(\mathcal{F},\mathcal{G}) \otimes \oo_X \to \lHom(\mathcal{F},\mathcal{G}), \] which is not surjective on each $E_i-E_j$, and the induced map \[ \ev \colon X \times \bp \Hom(\mathcal{F},\mathcal{G}) \to \bp \lHom(\mathcal{F},\mathcal{G}), \qquad (y,\phi \colon \mathcal{F} \to \mathcal{G}) \mapsto (y,\phi|_y \colon \mathcal{F}|_y \to \mathcal{G}|_y). \] Let $\Sigma$ denote the locus in the target where the linear maps drop rank and $Z=\ev^{-1}(\Sigma)$. At points $p$ in the open complement $U$ of the exceptional locus $\bigsqcup_{i \in S_0} E_i$, $\pi|_p$ is surjective, hence the codimension of $Z|_U$ in $U \times \bp \Hom(\mathcal{F},\mathcal{G})$ is $r+1$. On each $\tilde{E}_i$, we apply Proposition \ref{prop:Bertini-type} with \[ Y=\tilde{E}_i, \quad \mathcal{A}_2 = \bigoplus_{j \colon p_j \succ p_i} \oo(-C-A+E_j)^{\gamma_j}|_{\tilde{E}_i}, \quad \mathcal{B}_2 = \oo(-E_i)^{\gamma_i}|_{\tilde{E}_i} \] and $\mathcal{A}_1,\mathcal{B}_1$ the restrictions of the remaining line bundles. Assuming $\gamma_i \geqslant \sum_{j \colon p_j \succ p_i} \gamma_j$ and using the fact that the global sections of each line bundle summand of $\mathcal{A}^* \otimes \mathcal{B}$ lift to $X$, we deduce that the codimension of $Z|_{\tilde{E}_i}$ in $\tilde{E}_i \times \bp \Hom(\mathcal{F},\mathcal{G})$ is \[ \min(\gamma_i - \sum_{j \colon p_j \succ p_i} \gamma_j, r)+1. \] On $E_j$, a similar argument with \[ Y = {E}_j, \quad \mathcal{A}_2 = \bigoplus_{j' \ne j \colon p_{j'} \succ p_i} \oo(-C-A+E_{j'})^{\gamma_{j'}}|_{E_j}, \quad \mathcal{B}_2 = \oo(-E_i)^{\gamma_i}|_{E_j} \] shows that $Z|_{E_j}$ has codimension \[ \min(\gamma_i + \gamma_j - \sum_{j \colon p_j \succ p_i} \gamma_j, r) + 1 \] in $E_j \times \bp\Hom(\mathcal{F},\mathcal{G})$. Altogether, since $\tilde{E}_i$ and $E_j$ have codimension 1 in $X$, we deduce that the general fiber of $Z$ over $\bp \Hom(\mathcal{F},\mathcal{G})$ is empty or has codimension at least \[ \min(r,\gamma_i-\sum_{j \colon p_j \succ p_i}\gamma_j + 1) + 1 \] in $X$. As this codimension is $\geqslant 2$ in the case $r=1$ and $\geqslant 3$ in the case $r \geqslant 2$ assuming $\gamma_i \geqslant 1 + \sum_{j \colon p_j \succ p_i} \gamma_j$ for all $i \in S_0$, we get the result for (b.ii) and (b.iii). For (b.iv), we dualize and use a similar argument. \end{proof} \subsection{Prioritary sheaves}\label{sect:prioritary} We relate sheaves which admit general Gaeta resolutions to prioritary sheaves, which will facilitate the study of stability in \S~\ref{sec:stability}. We begin by reviewing the prioritary condition. \begin{defn} Let $S$ be a smooth surface and $D$ be a divisor on it. A coherent sheaf $F$ on $S$ is $D$-{\em prioritary} if it is torsion-free and $\Ext^2(F,F(-D))=0$. If $S$ is a Hirzebruch surface or its blowup, $A$-prioritary sheaves are simply called {\em prioritary sheaves}. \end{defn} We will need the following lemma (\cite[Lemma 3.1]{coskun_existence_2019}) comparing prioritary conditions with respect to different divisors. \begin{lem}\label{lem:prioritary-div} Let $S$ be a smooth surface, $D_1$ and $D_2$ be two divisors such that $D_1\geqslant D_2$. Then $D_1$-prioritary sheaves are $D_2$-prioritary. \end{lem} Now let $X$ denote an admissible blowup of $\mathbb{F}_e$ and $f$ be a fixed class of positive rank admitting Gaeta resolutions. \begin{prop}\label{prop:prioritary} For a divisor $D$, consider the locus \[ \{ \phi \in R_f \mid \text{$F_{\phi}$ is torsion-free and not $D$-prioritary} \}. \] If $D = A$, then the locus is empty. If $D = A+C$, then the locus is empty if $e=0$ and otherwise has codimension $\geqslant \sum_{i \in S_0} \gamma_i + \alpha_4 - r + 1$ in $R_f$. \end{prop} \begin{proof} We begin by proving the second statement. Let $F=F_\phi$ and take $D = C+A$. We need to study $\Ext^2(F,F(-D)) \cong \Hom(F(-K-D),F)^\vee$. We twist the Gaeta resolution of $F$ by $\mathcal{O}(-K-D)$ and then apply $\Hom(-,F)$, obtaining the exact sequence \begin{align*} 0\to &\Hom(F(-K-D),F) \\ \to &H^0(F(C+K+D))^{\alpha_2} \oplus H^0(F(A+K+D))^{\alpha_3}\\ &\oplus \bigoplus_{i \in S_0} H^0(F(E_{i}+K+D))^{\gamma_{i}} \oplus H^0(F(K+D))^{\alpha_4} \\ \xrightarrow{h} & H^0(F(C+A+K+D))^{\alpha_1} \oplus \bigoplus_{j \in S_1} H^0(F(C+A+K+D-E_{j}))^{\gamma_{j}}. \end{align*} A calculation using the Gaeta resolution for $F$ shows that $H^0(F(K+D))$ vanishes as, after tensoring by $K+D$, the line bundles in degree 0 have no global sections and the line bundles in degree $-1$ have no $H^1$: \[ H^1(\oo(-A-C+K+D)) \cong H^1(\oo)^{\vee} \cong 0 \] and, for $j \in S_1$, \[ H^1(\oo(-A-C+E_j + K + D)) \cong H^1(\oo(-E_j))^{\vee} \cong 0. \] Similarly, for $i \in S_0$, we get $H^0(F(E_i+K+D)) \cong 0$ since $H^1(-\oo(E_i)) \cong 0$ and $H^1(\oo(-E_i - E_j)) \cong 0$. Thus, the map $h$ in the exact sequence reduces to the map obtained by applying $\Hom(-,F(K+D))$ to the map \[ \oo(-C-A)^{\alpha_1} \oplus \bigoplus_{j \in S_1} \oo(-C-A+E_{j})^{\gamma_{j}} \xrightarrow{\psi} \oo(-C)^{\alpha_2} \oplus \oo(-A)^{\alpha_3} \] from the Gaeta resolution. If this map is surjective, then $h$ must to be injective. The locus in $\bp(H_{\vec{a},d})$ of $\phi$ such that $\psi$ is not surjective has codimension $\geqslant \alpha_1 + \sum_{j \in S_1} \gamma_j - \alpha_2 - \alpha_3 + 1 = \sum_{i \in S_0} \gamma_i + \alpha_4 -r + 1$, giving the desired estimate. If $e=0$, then $B=C$ and $S_0$ and $S_1$ are empty since $X$ is admissible, from which it follows that $H^0(F(C+K+D)) \cong H^0(F(A+K+D)) \cong 0$. The first statement follows from a similar argument with $D=A$ by checking that $H^0(F(C+K+D))$, $H^0(F(A+K+D))$, $H^0(F(E_i+K+D))$ for $i \in S_0$, and $H^0(F(K+D))$ all vanish. \end{proof} The case $e=0$ (where $X=\mathbb{F}_0$) is in \cite[Proposition 2.20]{Ped21}. \begin{prop}\label{prop:prioritary-complete} Fix exponents such that $r \geqslant 1$, the condition in Proposition~\ref{prop:cokernel-properties}(b.ii) holds, and $\sum_{i \in S_0} \gamma_i + \alpha_4 \geqslant r$. Then the open family of Gaeta resolutions whose cokernels are torsion-free and $(C+A)$-prioritary is a complete family of $(C+A)$-prioritary sheaves. \end{prop} By Lemma \ref{lem:prioritary-div}, the same statement holds with $C+A$ replaced by any divisor $D \in \mathcal{D}$, where $\mathcal{D}$ is defined in (\ref{eq:bpf-divisors}). The inequality $\sum_{i \in S_0} \gamma_i + \alpha_4 \geqslant r$ is not needed for the case $D=A$. \begin{proof} If $F$ has a general Gaeta resolution, then Proposition~\ref{prop:cokernel-properties}(b) implies that $F$ is torsion-free, and Proposition~\ref{prop:prioritary} implies that $F$ is $(C+A)$-prioritary. Thus, the argument proving \cite[Proposition 3.6]{CosHui18weakBN} applies here: the sequence being strong full exceptional implies the Kodaira-Spencer map is surjective. \end{proof} \subsection{Weak Brill-Noether result} \label{weak-brill-noether} Let $S$ be $\bp^2$ or $X$ an admissible blowup of $\mathbb{F}_e$. Consider Gaeta resolutions $\E_1^{a_1} \oplus \cdots \oplus \E_d^{a_d} \to \E_{d+1}^{a_{d+1}} \oplus \cdots \oplus \E_n^{a_n}$ on $S$ of the types in Example~\ref{exmp:gaeta}. For integers $\ell \geqslant 1$ and $r \geqslant 1$, set $a_n = \ell r$ and assume the remaining exponents $a_1,\dots,a_{n-1} \geqslant 0$ satisfy \begin{equation}\label{eq:WBN-exponents} a_1 + \cdots + a_d = a_{d+1} + \cdots + a_{n-1} + (\ell-1)r. \end{equation} On $X$, we further assume \begin{equation}\label{eq:WBN-gammas} \gamma_i \geqslant \sum_{j \colon p_j \succ p_i} \gamma_j, \qquad \text{for all $i \in S_0$}. \end{equation} Consider a general map \[ \phi\colon \E_1^{a_1} \oplus \cdots \oplus \E_d^{a_d} \to \E_{d+1}^{a_{d+1}} \oplus \cdots \oplus \E_{n-1}^{a_{n-1}} \oplus \oo_X^{\ell r}. \] By Proposition~\ref{prop:cokernel-properties}, $\phi$ is injective and the cokernel $F_{\phi}$ is torsion-free of rank $r$. Furthermore, it has vanishing higher cohomology, and \[\oo_S^{\ell r} \cong H^0(F_{\phi}) \otimes \oo_S.\] Then, if $Z$ is a zero-dimensional subscheme of length $\ell$, $\chi(F_{\phi}\otimes I_Z)=0$ and we have the following weak Brill-Noether result. \begin{prop}\label{orth-ker-quot} Let $S$ denote $\bp^2$ or $X$ an admissible blowup of $\mathbb{F}_e$. Suppose the sequence of exponents $(a_1,\dots,a_n)$ satisfies $a_n = \ell r$ and (\ref{eq:WBN-exponents}) for some $r,\ell \geqslant 1$, as well as (\ref{eq:WBN-gammas}) in the case $S=X$. If $\phi$ is general and $Z \in S^{[\ell]}$ is general, then $F_{\phi} \otimes I_Z$ has vanishing cohomology in all degrees. \end{prop} \begin{proof} As the vanishing of the cohomology of $F_{\phi} \otimes I_Z$ is an open condition on families of $\phi$ and of $Z$, it suffices to prove the claim for a single choice of $\phi$. We construct $\phi$ as the direct sum of $r$ maps $\{ \phi_m \}_{1 \leqslant m \leqslant r}$, each of which is a general map of the form \[ \E_1^{a_1'} \oplus \cdots \oplus \E_d^{a_d'} \to \E_{d+1}^{a_{d+1}'} \oplus \cdots \oplus \E_{n-1}^{a_{n-1}'} \oplus \oo_X^\ell, \] where the exponents, which depend on $m$, are non-negative and satisfy \[ a_1' + \cdots + a_d' = a_{d+1}' + \cdots + a_{n-1}' + \ell - 1. \] On $X$, we need the additional condition that the exponent of $\oo(-E_i)$ is at least as large as the sum of the exponents of $\oo(-C-A+E_j)$ for $j \in S_1$ such that $p_j \succ p_i$, which can be ensured due to (\ref{eq:WBN-gammas}). Then, by Proposition \ref{prop:cokernel-properties}, the cokernel of $\phi$ is of the form $F_{\phi} \cong \bigoplus_{m=1}^r L_m \otimes I_{Z_m'}$, where, for each $m$, $L_m$ is a line bundle with vanishing higher cohomology, $Z_m'$ is a 0-dimensional subscheme, vanishing on $Z_m'$ imposes independent conditions on $H^0(L_m)$, and $H^0(L_m \otimes I_{Z_m'})$ is $\ell$-dimensional. Choose distinct points $Z = \{ q_1,\dots,q_{\ell} \}$ inductively so that each $q_u$ avoids the base loci of the linear systems of curves in $|L_m|$ that vanish on $Z_m' \sqcup \{ q_1,\dots,q_{u-1} \}$. Then $F_{\phi} \otimes I_Z \cong \bigoplus_{m=1}^r L_m \otimes I_{Z_m' \sqcup Z}$ has no cohomology. \end{proof} \subsection{No sections vanishing on curves} The following result will be used to apply \cite{GolLin22} in the study of finite Quot schemes in \S~\ref{sect:finite-quot}. \begin{prop}\label{prop:no-sec-van-curves} Suppose $\phi$ is a general Gaeta resolution and $F_\phi$ is the cokernel. \begin{enumerate}[(a)] \item On $\mathbb{P}^2$ or $\mathbb{F}_e$, $F_\phi$ has no sections vanishing on curves. \item On $X$ an admissible blowup of $\mathbb{F}_e$, assume \begin{enumerate}[(i)] \item $\gamma_j \geqslant \alpha_4-r$ for all $j \in S_1$, \item $\gamma_i \geqslant \sum_{j \in S_1 \colon p_j \succ p_i} \gamma_j + \max(0,\alpha_4-r)$ for all $i \in S_0$. \end{enumerate} Then $F_\phi$ has no sections vanishing on curves. \end{enumerate} \end{prop} \begin{proof} Let $F=F_\phi$. It suffices to prove that $H^0(F(-D))=0$ for all minimal nonzero effective divisors $D$. Twisting the Gaeta resolution by $-D$ and taking cohomology, we get a long exact sequence \[ 0 \to H^0(F(-D)) \to H^1(\E_1(-D)^{a_1} \oplus \cdots \oplus \E_d(-D)^{a_d}) \xrightarrow{\tilde{\phi}} H^1(\E_{d+1}(-D)^{a_{d+1}} \oplus \cdots \oplus \E_{n}(-D)^{a_{n}}). \] We need to show that the induced map $\tilde{\phi}$ is injective when $\phi$ is general. (a) On $\bp^2$, the only minimal effective divisor is the hyperplane class $H$, and $H^1(\oo(-3))$ vanishes, so injectivity of $\tilde{\phi}$ is trivial. Similarly, on $\mathbb{F}_e$, the minimal effective divisors are $A$ and $B$, and an easy calculation using \S~\ref{subsect:coh-line-bdl} shows that $\oo(-C-2A)$ and $\oo(-C-A-B)$ both have vanishing $H^1$. (b) Now consider the sequences on the two-step blowup of $\mathbb{F}_e$. We argue by cases. \underline{\emph{Case 1:}} $D$ is minimal nonzero effective not equal to any $\tilde{E}_i$ for $i \in S_0$ or $E_j$ for $j \in S_1$. In this case, as above, we show that the domain of $\tilde{\phi}$ vanishes. Fix a curve $Y$ in the linear equivalence class of $D$. As $D$ is minimal effective, $Y$ must be connected. As $C+A$ is ample on $\mathbb{F}_e$ and $D$ is not contained in $\bigcup E_i$, there is a curve $Y'$ in the linear series $|C+A|$ that is connected, does not contain $Y$, and intersects $Y$. Then $Y \cup Y'$ is a connected curve in the linear series $|C+A+D|$, so $H^1(\oo(-C-A-D)) = 0$ by Lemma~\ref{lem:conn-curve}. We use a similar argument to show the vanishing of $H^1(\oo(-C-A-E_j-D))$ for $j \in S_1$. Let $i \in S_0$ be the index such that $p_j \succ p_i$. By Lemma~\ref{lem:LS_base_locus}, \[ |C+A-E_j| = |C+A-E_i| + (E_i-E_j), \] where $|C+A-E_i|$ is basepoint-free (Example~\ref{ex:LS_section}) and contains connected curves that intersect $\tilde{E}_i$, so the union of such a curve and $(E_i-E_j)$ is connected. Thus, for $D$ minimal effective not equal to any $\tilde{E}_i$ or $E_j$, $H^1(\oo(-C-A+E_j-D))=0$ (and this vanishing holds for $D=E_j$ as well). \underline{\emph{Case 2:}} $D = E_j$ for $j \in S_1$. Note that if $L$ is a line bundle with no cohomology whose restriction to $E_j$ is trivial, then taking cohomology of the short exact sequence \[ 0 \to L(-E_j) \to L \to L|_{E_j} \to 0 \] yields an isomorphism $H^1(L(-E_j)) \cong H^0(\oo_{E_j})$. Moreover, because $H^1(\oo(-E_j))=0$ and $H^1(\oo(-C-A+E_j-E_j))=0$, we can view $\tilde{\phi}$ as the map obtained from \[ \oo(-C-A)^{\alpha_1} \oplus \bigoplus_{j' \in S_1 \setminus \{j\}} \oo(-C-A+E_{j'})^{\gamma_{j'}} \to \oo(-C)^{\alpha_2} \oplus \oo(-A)^{\alpha_3} \oplus \bigoplus_{i \in S_0} \oo(-E_i)^{\gamma_i} \] by restricting to $E_j$ and then taking the induced map on global sections. As the restriction to $E_j$ of each of these exceptional sheaves is trivial, $\tilde{\phi}$ is of the form \[ \tilde{\phi} \colon \basefield^{\alpha_1} \oplus \bigoplus_{j' \in S_1 \setminus \{j\}} \basefield^{\gamma_{j'}} \to \basefield^{\alpha_2} \oplus \basefield^{\alpha_3} \oplus \bigoplus_{i \in S_0} \basefield^{\gamma_i}. \] Thus, a necessary condition for $\tilde{\phi}$ to be injective is $\alpha_1 - \gamma_j + \sum_{j' \in S_1} \gamma_{j'} \leqslant \alpha_2 + \alpha_3 + \sum_{i \in S_0} \gamma_i$, or equivalently, as $\alpha_1 + \sum_{j' \in S_1} \gamma_{j'} + r = \alpha_2 + \alpha_3 + \sum_{i \in S_0} \gamma_i + \alpha_4$, \[ \gamma_j \geqslant \alpha_4 - r. \] To find additional sufficient conditions for $\tilde{\phi}$ to be injective, we observe that certain blocks of $\tilde{\phi}$ are injective when $\phi$ is general. Let $i$ denote the index such that $p_j \succ p_i$. Then: \begin{itemize} \item The block $\bigoplus_{j' \ne j \colon p_{j'} \succ p_i} \basefield^{\gamma_{j'}} \to \basefield^{\gamma_i}$ is injective for general $\phi$ if \[ \sum_{j' \ne j \colon p_{j'} \succ p_i} \gamma_{j'} \leqslant \gamma_i \] because the linear series $|C+A-E_i-E_{j'}|$ is basepoint-free. In fact, this inequality is necessary because $E_j$ is in the base locus of $|A-E_{j'}|$, $|eA+B-E_{j'}|$, and $|C+A-E_{i'}-E_{j'}|$ for all $j' \ne j$ such that $p_{j'} \succ p_i$ and all $i' \in S_0 \setminus \{i\}$, hence all other blocks involving these $k^{\gamma_{j'}}$ are 0. \item For a similar reason, for each $i' \in S_0 \setminus \{i\}$, $\bigoplus_{j' \colon p_{j'} \succ p_{i'} %\ne p_i } \basefield^{\gamma_{j'}} \to \basefield^{\gamma_{i'}}$ is injective for general $\phi$ if \[ \sum_{j' \colon p_{j'} \succ p_{i'}} \gamma_{j'} \leqslant \gamma_{i'} \] (though this inequality may not be necessary for $\tilde{\phi}$ to be injective). \end{itemize} As all blocks corresponding to $\basefield^{\alpha_1}$ are general if $\phi$ is general because the linear systems $|A|$, $|C|$, and $|C+A-E_{i'}|$ for $i' \in S_0$ are all basepoint-free, these inequalities suffice to ensure that $\tilde{\phi}$ is injective when $\phi$ is general. \underline{\emph{Case 3:}} $D = \tilde{E}_i$ for $i \in S_0$. Similar to the previous case, if $L$ is a line bundle on $X$ with no cohomology whose restriction to $\tilde{E}_i$ is trivial, then \[ H^1(L(-\tilde{E}_i)) \cong H^0(L|_{\tilde{E}_i}), \] while $H^1(\oo(-\tilde{E}_i))=0$. Thus, $\tilde{\phi}$ can be viewed as the map obtained by restricting \begin{align*} \oo(-C-A)^{\alpha_1} \oplus \bigoplus_{j \in S_1} &\oo(-C-A+E_{j})^{\gamma_{j}} \tag{$*$} \\ & \to \oo(-C)^{\alpha_2} \oplus \oo(-A)^{\alpha_3} \oplus \bigoplus_{i' \in S_0 \setminus \{i\}} \oo(-E_{i'})^{\gamma_{i'}} \oplus \oo(-E_i)^{\gamma_i} \end{align*} to $\tilde{E}_i$ and then taking the induced map on global sections. The restriction of each of the exceptional bundles in $(*)$ to $\tilde{E}_i$ is $\oo_{\tilde{E}_i}$, except for $\oo(-E_i)|_{\tilde{E}_i} \cong \oo_{\tilde{E}_i}(1)$ and $\oo(-C-A+E_j)|_{\tilde{E}_i} \cong \oo_{\tilde{E}_i}(1)$ for $p_j \succ p_i$, each of which has a two-dimensional space of global sections. Thus, the restriction of ($*$) to $\tilde{E}_i$ is \[ \oo_{\tilde{E}_i}^{\alpha_1} \oplus \bigoplus_{j \colon p_j \not \succ p_i} \oo_{\tilde{E}_i}^{\gamma_j} \oplus \bigoplus_{j \colon p_j \succ p_i} \oo_{\tilde{E}_i}(1)^{\gamma_j} \to \oo_{\tilde{E}_i}^{\alpha_2} \oplus \oo_{\tilde{E}_i}^{\alpha_3} \oplus \bigoplus_{i' \in S_0 \setminus \{i\}} \oo_{\tilde{E}_i}^{\gamma_{i'}} \oplus \oo_{\tilde{E}_i}(1)^{\gamma_i} \] and hence $\tilde{\phi}$ is of the form \[ \tilde{\phi} \colon \basefield^{\alpha_1} \oplus \bigoplus_{j \colon p_j \not \succ p_i} \basefield^{\gamma_j} \oplus \bigoplus_{j \colon p_j \succ p_i} H^0(\oo_{\tilde{E}_i}(1))^{\gamma_j} \to \basefield^{\alpha_2} \oplus \basefield^{\alpha_3} \oplus \bigoplus_{i' \in S_0 \setminus \{i\}} \basefield^{\gamma_{i'}} \oplus H^0(\oo_{\tilde{E}_i}(1))^{\gamma_i}. \] A necessary inequality for $\tilde{\phi}$ to be injective is $\alpha_1 + \sum_{j \in S_1} \gamma_j + \sum_{j \in S_1 \colon p_j \succ p_i} \gamma_j \leqslant \alpha_2 + \alpha_3 + \sum_{i' \in S_0} \gamma_{i'} + \gamma_i$, or equivalently, \[ \gamma_i \geqslant \alpha_4 - r + \sum_{j \colon p_j \succ p_i} \gamma_j. \] As in the previous case, we obtain sufficient conditions for $\tilde{\phi}$ to be injective when $\phi$ is general by looking at various blocks. \begin{itemize} \item The block $\bigoplus_{j \colon p_j \succ p_i} H^0(\oo_{\tilde{E}_i}(1))^{\gamma_j} \to H^0(\oo_{\tilde{E}_i}(1))^{\gamma_i}$ is injective for general $\phi$ if \[ \gamma_i \geqslant \sum_{j \colon p_j \succ p_i} \gamma_j \] because the linear system $|C+A-E_i-E_j|$ is basepoint-free. As there are no nonzero maps $\oo_{\tilde{E}_i}(1) \to \oo_{\tilde{E}_i}$, this condition is necessary for $\tilde{\phi}$ to be injective as all other blocks involving $\bigoplus_{j \colon p_j \succ p_i} H^0(\oo_{\tilde{E}_i}(1))^{\gamma_j}$ are 0. \item For $i' \in S_0 \setminus \{i\}$, the block $\bigoplus_{j \colon p_j \succ p_{i'}} \basefield^{\gamma_j} \to \basefield^{\gamma_{i'}}$ is injective for general $\phi$ if \[ \gamma_{i'} \geqslant \sum_{j \colon p_j \succ p_{i'}} \gamma_j \] (but this condition may not be necessary). \end{itemize} The blocks involving $\basefield^{\alpha_1}$ are all general for general $\phi$ as the linear systems $|A|$, $|C|$, $|C+A-E_{i'}|$ for $i' \ne i$ are all basepoint-free and the curves in $|C+A|$ containing $p_i$ have no fixed tangent direction at $p_i$, so the above inequalities are sufficient. \end{proof} \begin{lem}\label{lem:conn-curve} Let $D$ be a nonzero effective divisor on a rational surface and $|D|$ denote its linear series. The following are equivalent: \begin{enumerate}[(a)] \item $|D|$ contains a connected curve; \item Every curve in $|D|$ is connected; \item $H^1(\oo(-D))=0$. \end{enumerate} \end{lem} \begin{proof} Let $Y$ be a curve in the linear equivalence class of $D$ and consider the corresponding short exact sequence \[ 0 \to \oo(-D) \to \oo \to \oo_Y \to 0. \] Taking cohomology and using $H^1(\oo)=0$, we see that $H^1(\oo(-D))=0$ if and only if $H^0(\oo) \to H^0(\oo_Y)$ is an isomorphism, which is true if and only if $H^0(\oo_Y)$ is one-dimensional, which holds exactly when $Y$ is connected. (If $D$ is ample, $(c)$ also follows from Kodaira vanishing.) \end{proof} \section{Gaeta resolutions and stability}\label{sec:stability} For $X$ an admissible blowup, we study the connection between the existence of Gaeta resolutions and stability of a sheaf, which allows Gaeta resolutions to be applied in the study of moduli problems. First, we describe conditions ensuring that general stable sheaves in $M(f)$ admit Gaeta resolutions (Proposition~\ref{prop:unirat-large-L}). Then, by imposing stronger conditions on $f$ and on the polarization $H$, we show the locus of maps in the resolution space whose cokernels are unstable has codimension $\geqslant 2$ (Proposition~\ref{prop:bad-locus-res}), as well as the parallel statement in the moduli space that the locus of sheaves not admitting Gaeta resolutions has codimension $\geqslant 2$ (Theorem~\ref{thm:moduli-nongr-codim2}). In particular, the latter results imply that $M(f)$ is non-empty and that general stable sheaves away from a locus of codimension $\geqslant 2$ satisfy various nice properties (Corollary~\ref{cor:gen-stable-prop}). We have discussed about prioritary conditions in the previous section. One of the motivations to consider the prioritary condition is the following statement, which is essentially in the proof of \cite[Theorem 1]{Wal98}. \begin{lem}\label{lem:stable-is-priotary} Over a smooth projective surface $S$, if $D$ is a divisor and $H$ is a polarization such that $H\cdot (K_S+D)<0$, then any $H$-semistable torsion-free sheaf is $D$-prioritary. \end{lem} \begin{proof} If $F$ is torsion-free and $H$-semistable, then by Serre duality, $\Ext^2(F,F(-D)) \cong \Hom(F,F(K_S+D)) \cong 0$ by semistability and the fact that $H \cdot (K_S+D)<0$. \end{proof} As a warm-up, we prove Proposition~\ref{prop:unirat-large-L}. Recall that $X$ is an admissible blowup of $\mathbb{F}_e$, $H$ satisfies $H \cdot (K_X+A) < 0$, and $f \in K(X)$ is a class of rank $r > 0$, Euler characteristic $\chi \geqslant 0$, and first Chern class satisfying the inequalities in Propositions \ref{prop:chern-has-gr}(a) and \ref{prop:cokernel-properties}(b.ii). \begin{proof}[Proof of Proposition \ref{prop:unirat-large-L}] When non-empty, the moduli space $M(f)$ is irreducible (\cite[Theorem 1]{Wal98}), using the fact that the stack of prioritary sheaves is irreducible. Since semistability is an open property in families, Propositions~\ref{prop:chern-has-gr}(a), \ref{prop:prioritary-complete}, and Lemma~\ref{lem:stable-is-priotary} show that a general semistable sheaf in $M(f)$ has a Gaeta resolution. \end{proof} Using Proposition~\ref{prop:chern-has-gr}(b), we obtain a formulation similar to Proposition \ref{prop:unirat-large-L}. \begin{prop}\label{prop:unirat-large-c2} Let $X$ and $H$ be as in Proposition~\ref{prop:unirat-large-L}. Assume the class $f$ has fixed rank $r>0$ and first Chern class, and its discriminant is sufficiently large. Then there is a line bundle $L$ pulled back from $\mathbb{F}_e$ such that for general semistable sheaves $F$ of class $f$, $F \otimes L$ admits a Gaeta resolution. \end{prop} Since the space $R_f$ of Gaeta resolutions is rational, we immediately deduce the following special cases of \cite[Theorem 2.2]{Bal87}. \begin{cor}\label{cor:unirational} For $X$, $H$, and $f \in K(X)$ as in Propositions \ref{prop:unirat-large-L} or \ref{prop:unirat-large-c2}, $M(f)$ is unirational if it is nonempty. \end{cor} Over Hirzebruch surfaces, similar results were proved in \cite[Theorem 2.10, Proposition 4.4]{CosHui20}. The rest of this section is dedicated to the proof of Theorem~\ref{thm:moduli-nongr-codim2}, which requires additional technical conditions on the exponents of the Gaeta resolutions and on the polarization. Before stating these conditions, we set some notation. For simplicity, we write the Gaeta resolutions of the form (\ref{gtr-blowup}) on $X$ as \[ 0 \to \E_1^{a_1} \oplus \cdots \oplus \E_d^{a_d} \xrightarrow{\phi} \E_{d+1}^{a_{d+1}} \oplus \cdots \oplus \E_{d+1}^{a_{d+1}} \to F_{\phi} \to 0. \] Let $\rcone \subset H_{\vec{a},d}$ denote the locus of injective maps $\phi$ as in \S~\ref{sect:prop-gr}. Let $p$ and $q$ denote the projections from $\rcone \times X$ to the first and second factors, respectively. Let $\mathbb{F}$ denote the cokernel of the tautological map over $\rcone \times X$: \begin{align}\label{eq:univ-res} 0 \to \bigoplus_{i=1}^d q^*\E_i^{a_i} \to \bigoplus_{j=d+1}^n q^*\E_j^{a_j} \to \mathbb{F}\to 0. \end{align} Because it is the family of cokernels of injective maps, $\mathbb{F}$ is flat over $\rcone$. The technical conditions on the exponents are as follows. We assume \begin{equation}\label{eq:conditions-exponents} r \geqslant 2 \quad \text{and} \quad \gamma_i \geqslant \sum_{j \colon p_j \succ p_i} \gamma_j + 1 \; \quad \forall i \in S_0 \end{equation} to ensure that the complement of $\rcone$ in $H_{\vec{a},d}$ has codimension $\geqslant 2$ and that the cokernels $F_\phi$ are torsion-free away from a locus of codimension $\geqslant 2$ in $\rcone$ (the proof is similar to the proof of Proposition \ref{prop:cokernel-properties}, but we consider the image of the locus $Z$ in Proposition \ref{prop:Bertini-type} in $\bp \Hom(\mathcal{A},\mathcal{B})$). We make the additional assumption \begin{equation}\label{eq:condition-alpha4} \sum_{i \in S_0} \gamma_i + \alpha_4 \geqslant r+1, \end{equation} which ensures that the $F_\phi$ are $(C+A)$-prioritary away from a locus of codimension $\geqslant 2$ in $\rcone$ (Proposition \ref{prop:prioritary}), require all of the exponents $a_i$ to be strictly positive, and require the discriminant of the sheaves $F_\phi$ to be sufficiently large in the sense of (\ref{eq:discriminant-bound}). To state the conditions on the ample divisor $H$, which we assume is general, we write \[H=uA+vC-\sum_{i \in S_0 \cup S_1} d_i E_i \] for rational numbers $u,v,d_i > 0$ \footnote{As scaling $H$ does not affect stability, these weights could be taken as integers as well.}. We assume that \begin{equation}\label{eq:H-pos-lin-combo} u > \sum_{j \in S_1} d_j \mbox{ and } v > \sum_{i \in S_0} d_i > \sum_{j \in S_1} d_j, \end{equation} namely that $H$ is a positive linear combination of all divisors in $\mathcal{D}$, where $\mathcal{D}$ is defined in (\ref{eq:bpf-divisors}). Note that by Proposition \ref{prop:very-ample}, (\ref{eq:H-pos-lin-combo}) implies that $H$ is ample. This condition implies that \[H\cdot (K_X+D)<0,\ \forall D\in \mathcal{D}.\] as well as the condition $H \cdot (K_X+2A) < 0$ that appears in Theorem \ref{thm:yoshioka}. Moreover, we assume that no $d_i$ can be too close to $v$ in the sense that \begin{equation}\label{eq:conditions-on-H} \frac{d_i}{v} \leqslant \frac{\lambda}{\lambda+1} \quad \text{and} \quad \frac{d_i}{v} \leqslant \sqrt{\frac{2\lambda}{t}} \quad \text{for all $i$, where $\lambda = \frac{u}{v} + \frac{e}{2}$ \\ }. \end{equation} \begin{rem} Our arguments can be extended to allow $\mathcal{D}$ to include other divisors whose linear systems are basepoint-free with general member isomorphic to $\bp^1$, for instance other divisors of the form $C+A- \sum_{i \in I} E_i$ for $I \subset \{1,\dots,t\}$ such that $\# I \leqslant e+2$, which could expand the range of $H$ depending on the configuration of the blown-up points. \end{rem} \subsection{Locus of unstable sheaves in the space of Gaeta resolutions.}\label{subsect:bad-locus-res} In this subsection we prove the following proposition. \begin{prop}\label{prop:bad-locus-res} Assume that $\vec{a}$ and $H$ satisfy the conditions (\ref{eq:conditions-exponents}-\ref{eq:conditions-on-H}). Then the complement of $\{\phi\in \rcone \mid F_\phi% =\coker \phi \mbox{ is $H$-semistable}\}$ in $H_{\vec{a},d}$ has codimension $\geqslant 2$. In particular, the moduli space $M(f)$ is non-empty. \end{prop} As observed above, the complement of $\rcone$ has codimension at least $2$ in $H_{\vec{a},d}$, as does the locus of $\phi$ such that $F_\phi$ is not torsion-free. Moreover, by Proposition \ref{prop:prioritary}, the locus of $\phi$ such that $F_{\phi}$ is not $(C+A)$-prioritary also has codimension $\geqslant 2$ in $H_{\vec{a},d}$, so it suffices to show that the locus of unstable torsion-free sheaves that are $(C+A)$-prioritary has codimension at least 2. We will do this by showing various Harder-Narasimhan strata have codimension $\geqslant 2$. For $\phi\in \rcone$ whose cokernel $F_\phi$ is $(C+A)$-prioritary and unstable, let \[0=G_0\subsetneqq G_1\subsetneqq \cdots \subsetneqq G_\ell =F_\phi\] be the Harder-Narasimhan filtration of $F_\phi$ with respect to Gieseker stability, let $\gr_i=G_i/G_{i-1}$ denote the grading and let $r_i$, $\nu_i$, and $\Delta_i$ denote the rank, total slope, and discriminant of $\gr_i$. Then $\nu_1 \cdot H \geqslant \cdots \geqslant \nu_\ell \cdot H$ and $r_i \geqslant 1$ and $\Delta_i \geqslant 0$ for all $i$. \begin{lem}\label{large-hn} The following subset of $\rcone$ has codimension at least $2$: \begin{equation*} \left\{ \phi\in \rcone \, \middle\vert \begin{array}{l} F_\phi \mbox{ is unstable and $(C+A)$-prioritary};\\ \mbox{$(\nu_i-\nu_j)\cdot D>2$ for some $i<j$ and some $D \in \mathcal{D} \cup \{C+A\}$} \end{array} \right\}.\end{equation*} \end{lem} \begin{proof} All $D \in \mathcal{D} \cup \{C+A\}$ are basepoint-free, so Bertini's Theorem implies that general divisors in the corresponding linear systems are nonsingular. Moreover, these general divisors are isomorphic to $\mathbb{P}^1$. Let $D$ be general in the linear system $|D|$ so that it avoids the singularities of $F_{\phi}$. The restrictions of the cokernels in a neighborhood of $\phi$, which are locally free on $D$. As $(C+A)$-prioritary implies $D$-prioritary, $F_\phi$ is $D$-prioritary, so the Kodaira-Spencer map \[\operatorname{T}_\phi \rcone\to \Ext_X^1(F_\phi,F_\phi)\to \Ext^1_D(F_\phi|_D,F_\phi|_D)\] is surjective, according to \cite[Corollary 15.4.4]{LeP97} or \cite[Proposition 2.6]{CosHui20}. The restrictions to $D$ provide a complete family of vector bundles. On the other hand, the inequality implies that over $D$, $\mu_{\max{}} (F_\phi|_D)-\mu_{\min{}} (F_\phi|_D)>2$. Thus, the subset has codimension at least 2, according to \cite[Corollary 15.4.3]{LeP97}. \end{proof} The last step is to show that the locus of $\phi$ in $\rcone$ satisfying the following two conditions has codimension $\geqslant 2$: \begin{enumerate}[(a)] \item $F_\phi$ is unstable and $(C+A)$-prioritary; \item For all $i<j$, the inequality $(\nu_i-\nu_j)\cdot D\leqslant 2$ holds for all $D \in \mathcal{D} \cup \{C+A\}$. \end{enumerate} We will do this for each locally-closed stratum of this locus of fixed Harder-Narasimhan type, namely for an integer $\ell \geqslant 2$ and polynomials $P_1,\dots,P_\ell$, we let $Y_{P_1,\dots,P_\ell}$ denote the locus of $\phi$ in $\rcone$ such that (a) and (b) hold and the Harder-Narasimhan filtration of $F_{\phi}$ has length $\ell$ and the Hilbert polynomial of $\gr_i$ is $P_i$. Our strategy for showing the codimension of $Y_{P_1,\dots,P_\ell}$ in $\rcone$ is $\geqslant 2$ is based on similar ideas for $\mathbb{P}^2$ in \cite[Chapter 15]{LP05}. We begin with the following observation: \begin{lem}\label{lem:gr-ext} \begin{enumerate}[(i)] \item For $i<j$, $\Hom(\gr_i,\gr_j)=0$. \item Under the conditions (a) and (b) above, $\Ext^2(\gr_i,\gr_j)=0$ for all $i$ and $j$. \end{enumerate} \end{lem} \begin{proof}Part (i) follows from semistability. For (ii), (b) implies that when $i<j$, $(\nu_i - \nu_j + K) \cdot D \leqslant 0$ for all $D \in \mathcal{D}$ and $(\nu_i-\nu_j+K) \cdot (C+A) \leqslant -e-2 < 0$. As $H$ is a positive linear combination of all divisors in $\mathcal{D}$, letting $m$ denote the minimum of the weights of $A$ and $C$, $H$ can be written as $m(C+A)$ plus a non-negative linear combination of divisors in $\mathcal{D}$. Thus, $(\nu_i - \nu_j + K)\cdot H<0$ and this inequality is also true when $i \geqslant j$ since (\ref{eq:H-pos-lin-combo}) implies $H \cdot K < 0$. Thus, $\Ext^2(\gr_i,\gr_j)\cong\Hom(\gr_j,\gr_i\otimes K)^\vee=0 $ for all $i$ and $j$. \end{proof} Let $\operatorname{Flag} = \operatorname{Flag}(\mathbb{F}/\rcone;P_1,\dots,P_\ell) \to \rcone$ be the relative flag scheme of filtrations whose grading $\gr_i$ has Hilbert polynomial $P_i$. Given $\phi \in Y_{P_1,\dots,P_\ell}$ and a point $p=(\phi,(G_1,\dots,G_\ell))$ of the fiber over $\phi$, there is an exact sequence \begin{equation}\label{sequence-tangent} 0\to \Ext^0_+(F_\phi,F_\phi)\to \operatorname{T}_p\operatorname{Flag}\to \operatorname{T}_\phi \rcone\xrightarrow[]{\omega_+}\Ext^1_+(F_\phi,F_\phi), \end{equation} \cite[Proposition 15.4.1]{LeP97} realizing the vertical tangent space as the group $\Ext^0_+(F_\phi,F_\phi)$ and the normal space of $Y_{P_1,\dots,P_\ell}$ in $\rcone$ at $\phi$ as the image of $\omega_{+}$. Here the groups $\Ext^i_+$ are defined with respect to the filtration of $F_{\phi}$, and $\omega_+$ is the composite map \begin{equation}\label{eq:omega-plus}\operatorname{T}_\phi \rcone \xrightarrow{\operatorname{KS}} \Ext^1(F_\phi,F_\phi)\xrightarrow{h_+} \Ext^1_+(F_\phi,F_\phi).\end{equation} The Kodaira-Spencer map $\operatorname{KS}$ is surjective since $\rcone$ parametrizes a complete family of prioritary sheaves. The idea is to show that $h_+$ is also surjective and that $\ext^1_+(F_\phi,F_\phi) \geqslant 2$, which imply that $Y_{P_1,\dots,P_\ell}$ has codimension $\geqslant 2$ in $\rcone$. For foundational material on the groups $\Ext^i_\pm$, see \cite{DreLeP-85}. There is a canonical exact sequence \[\dots \to \Ext^{1}(F_\phi,F_\phi)\xrightarrow{h_+} \Ext^{1}_+(F_\phi,F_\phi)\to \Ext^{2}_-(F_\phi,F_\phi)\to \dots \] showing that surjectivity of $h_+$ is guaranteed by the vanishing of $\Ext^2_-(F_\phi,F_\phi)$. This group can be calculated using the spectral sequence \begin{equation*} E_1^{p,q}=\begin{cases} 0 & \text{if $p<0$,} \\ \bigoplus_i\Ext^{p+q}(\gr_i, \gr_{i-p} ) & \text{otherwise,} \end{cases} \end{equation*} converging to $\Ext^{p+q}_-(F_\phi,F_\phi)$. By Lemma~\ref{lem:gr-ext} (b), $E_1^{p,q}$ vanishes when $p+q=2$, hence $\Ext_-^2(F_\phi,F_\phi)=0$, so $h_+$ is surjective. To calculate $\Ext_+^1(F_\phi,F_\phi)$, we use the spectral sequence \begin{equation*} E_1^{p,q}=\begin{cases} \bigoplus_i\Ext^{p+q}(\gr_i, \gr_{i-p} ) & \text{if $p<0$,} \\ 0 & \text{otherwise,} \end{cases} \end{equation*} converging to $\Ext^{p+q}_+(F_\phi,F_\phi)$. By Lemma~\ref{lem:gr-ext}, $\Ext_+^0(F_\phi,F_\phi)= \Ext_+^2(F_\phi,F_\phi)=0$ and the spectral sequence degenerates on the first page, yielding \begin{equation*} \Ext^1_+(F_\phi,F_\phi)\cong \bigoplus_{i< j}\Ext^1(\gr_i,\gr_j). \end{equation*} Using (\ref{eq:euler-pair}), we thus calculate \begin{equation}\label{eq:ext^1_+} \ext_+^1(F_\phi,F_\phi)=\sum_{i<j}\ext^1(\gr_i,\gr_j)=-\sum_{i<j}\chi(\gr_i,\gr_j) = \sum_{i<j} r_i r_j(\Delta_i + \Delta_j - P(\nu_j - \nu_i)), \end{equation} where $P$ is the Hilbert polynomial of $\mathcal{O}X$, as in \S~\ref{ss:chern-characters}. To finish the proof of the proposition, we will show that $\ext^1_+(F_\phi,F_\phi) \geqslant 2$ given the conditions $(\nu_i-\nu_j) \cdot D \leqslant 2$ for all $D \in \mathcal{D} \cup \{C+A\}$ and $(\nu_i-\nu_j) \cdot H \geqslant 0$ for all $i<j$. As $\chi(\gr_i,\gr_j) \leqslant 0$ for each $i<j$, we see that \[ -\sum_{i < j} \chi(\gr_i,\gr_j) \geqslant -\chi(\gr_1,\oplus_{1 < j} \gr_j), \] which allows us to reduce to the case where the Harder-Narasimhan filtration has length 2. To see that the conditions (a) and (b) still hold, note that $\nu(\oplus_{1 < j} \gr_j) = \sum_{1 < j} r_j \nu_j/\sum_{1 < j} r_j$ is a weighted average of the $\nu_j$, hence we have $(\nu_1 - \nu(\oplus_{1 < j} \gr_j)) \cdot H \geqslant 0$ and $(\nu_1 - \nu(\oplus_{1 < j} \gr_j)) \cdot D \leqslant 2$ for all $D \in \mathcal{D} \cup \{C+A\}$. As we know $\Delta_1 \geqslant 0$ but no such inequality is guaranteed for the discriminant of $\oplus_{1 < j} \gr_j$, we need $\sum_{1 < j} r_j \geqslant r_1$ to apply the lemma below; if this does not hold, then a similar setup using $\oplus_{i < \ell} \gr_i$ and $\gr_\ell$, which satisfies $\sum_{i < \ell} r_i > r_\ell$ and $\Delta_\ell \geqslant 0$, meets the conditions of the lemma. Thus, it suffices to prove the following: \begin{lem} Assume the condition (\ref{eq:conditions-on-H}) on $H$. Suppose the class $(r,\nu,\Delta)$ is the sum of classes $(r_1,\nu_1,\Delta_1)$ and $(r_2,\nu_2,\Delta_2)$ of positive rank with the property that $(\nu_1 - \nu_2) \cdot D \leqslant 2$ for $D = A,C,A+C$, that $(\nu_1 - \nu_2) \cdot H \geqslant 0$, that $\Delta_1 \geqslant 0$ if $r_2 \geqslant r_1$, and that $\Delta_2 \geqslant 0$ if $r_1 \geqslant r_2$. Then the condition \begin{equation}\label{eq:discriminant-bound} \Delta \geqslant \frac{(\lambda+1)^2}{4\lambda} + \frac{t}{8} + \frac{1}{r}, \qquad \text{where $\lambda = \frac{u}{v} + \frac{e}{2}$}, \end{equation} is sufficient to ensure that $r_1 r_2(\Delta_1 + \Delta_2 - P(\nu_2 - \nu_1)) \geqslant 2$. \end{lem} \begin{proof} Note that $r_1 + r_2 = r$, $r_1 \nu_1 + r_2 \nu_2 = r \nu$, and $r_1 \Delta_1 + r_2 \Delta_2 = r \Delta + \frac{r_1 r_2}{2r} (\nu_2 - \nu_1)^2$. Using this, in the case $r_2 \geqslant r_1$, we write \begin{align*} r_1 r_2(\Delta_1 + \Delta_2 - P(\nu_2 - \nu_1)) &= r_1(r_1 \Delta_1 + r_2 \Delta_2) + (r_2-r_1)r_1 \Delta_1 - r_1 r_2 P(\nu_2 - \nu_1) \\ & = (r_2-r_1)r_1 \Delta_1 + r_1 r \Delta - r_1 r_2(\tfrac{r_2}{2r} (\nu_2 - \nu_1)^2 - \tfrac{1}{2}(\nu_2-\nu_1) \cdot K + 1). \end{align*} As $\Delta_1 \geqslant 0$, it suffices to find an upper bound for $\frac{r_2}{2r} (\nu_2 - \nu_1)^2 - \frac{1}{2}(\nu_2 - \nu_1) \cdot K + 1$. Setting $\xi = r/r_2$ and writing \[\nu_1 - \nu_2 = aA + bB - \sum_i e_i E_i,\] this equals \[ \xi^{-1} \left( (a-\xi - \frac{e}{2}b)(b-\xi) - \frac{1}{2} \sum_i e_i(e_i-\xi) \right) + 1 - \xi. \] The lemma below shows that given (\ref{eq:conditions-on-H}), an upper bound is \[ \xi^{-1} \left( \frac{\xi^2(\lambda+1)^2}{4\lambda} + \frac{t \xi^2}{8} \right) + 1 - \xi, \] where $\lambda = \tfrac{u}{v} + \tfrac{e}{2} > 0$ since if $e=0$ then $H$ ample ensures that $u,v>0$. This yields the bound \[ r_1 r_2(\Delta_1 + \Delta_2 - P(\nu_2 - \nu_1)) \geqslant r_1 r \Delta - r_1 r_2 \left( \frac{\xi (\lambda+1)^2}{4\lambda} + \frac{t \xi}{8} + 1 - \xi \right), \] and, as $r_2 \xi = r$, we can guarantee that the right side is $\geqslant 2$ by assuming $\Delta$ satisfies (\ref{eq:discriminant-bound}). The argument in the case $r_1 \geqslant r_2$ is similar. \end{proof} Before stating and proving the lemma below, we introduce some useful notation. Set $\lambda = \frac{u}{v} + \frac{e}{2}$ as above and consider the change of variables \[ J = a + \frac{u}{v}b. \] The conditions $(\nu_1 - \nu_2) \cdot D \leqslant 2$ can be written as $a \leqslant 2$, $b \leqslant 2$, $a+b \leqslant 2$, and the condition $(\nu_1 - \nu_2) \cdot H \geqslant 0$ is $J \geqslant \sum_i \tfrac{d_i}{v} e_i$. Moreover, thinking of $J$ as fixed, \[ (a-\xi-\frac{e}{2}b)(b-\xi) = -\lambda b^2 + (J+\xi(\lambda-1))b - \xi(J-\xi) \] is a quadratic function with maximum value \begin{equation}\label{eq:parabola-max} \frac{(J-\xi(\lambda+1))^2}{4\lambda} \qquad \text{occurring at} \quad b=\frac{J+\xi(\lambda-1)}{2\lambda}. \end{equation} In particular, assuming $J \geqslant 0$ and the constraints $a \leqslant 2$, $b \leqslant 2$, and $a+b \leqslant 2$, an upper bound is obtained by taking $J=0$, as the constraints imply $J \leqslant 2 \max\{ u/v,1 \}$ and this upper bound for $J$ yields a smaller value since $\max\{u/v,1\} < \xi(\lambda+1)$. \begin{lem} Assume $H$ satisfies (\ref{eq:conditions-on-H}). Given the constraints $J \geqslant \sum_i \tfrac{d_i}{v} e_i$, $a \leqslant 2$, $b \leqslant 2$, and $a+b \leqslant 2$, we have \[ (a-\xi - \frac{e}{2}b)(b-\xi) - \frac{1}{2} \sum_{i=1}^t e_i(e_i-\xi) \leqslant \frac{\xi^2(\lambda+1)^2}{4\lambda} + \frac{t \xi^2}{8}. \] \end{lem} \begin{proof} For $J \geqslant 0$, the result follows by bounding the first term on the left side by setting $J=0$ in (\ref{eq:parabola-max}), as well as the fact that $-\frac{1}{2}\sum_i e_i(e_i-\xi) \leqslant t\xi^2/8$ as the maximum value of $-e_i(e_i-\xi)$ is $\xi^2/4$. Now suppose that $J < 0$. Then $\sum_i \tfrac{d_i}{v} e_i \leqslant J$ is also negative. By (\ref{eq:parabola-max}), \[ (a-\xi - \frac{e}{2}b)(b-\xi) \leqslant \frac{(J-\xi(\lambda+1))^2}{4\lambda} = \frac{\xi^2(\lambda+1)^2}{4\lambda} + \frac{\xi(\lambda+1) (-J)}{2\lambda} + \frac{J^2}{4\lambda}. \] Using the inequality $-J \leqslant \sum \tfrac{d_i}{v}(-e_i) \leqslant -\sum_{i \in I} \tfrac{d_i}{v} e_i$, where $I \subset \{1,\dots,t\}$ is the subset of indices for which $e_i$ is negative, as well as the Cauchy-Schwarz inequality to deduce $(\sum_{i \in I} \frac{d_i}{v} e_i)^2 \leqslant \#I \sum_{i \in I} (\frac{d_i}{v})^2 e_i^2$, the two terms involving $J$ are bounded above by \begin{equation}\label{eq:terms-involving-J} -\frac{\xi(\lambda+1)}{2\lambda} \sum_{i \in I} \frac{d_i}{v} e_i + \frac{\#I}{4\lambda} \sum_{i \in I} \left(\frac{d_i}{v}\right)^2 e_i^2. \end{equation} Bounding each $-e_i(e_i-\xi)$ where $e_i \geqslant 0$ by its maximum value $\xi^2/4$, we see that $-\frac{1}{2} \sum_i e_i(e_i-\xi)$ is bounded above by \begin{equation}\label{eq:exceptional-term} \frac{\xi}{2} \sum_{i \in I} e_i -\frac{1}{2} \sum_{i \in I} e_i^2 + \frac{(t-\#I) \xi^2}{8}. \end{equation} As $e_i < 0$ for $i \in I$, the conditions on $d_i/v$ in (\ref{eq:conditions-on-H}) imply that the sum of (\ref{eq:terms-involving-J}) and (\ref{eq:exceptional-term}) is bounded by $(t-\#I)\xi^2/8$, which completes the proof. \end{proof} By a similar argument, we can show the following statement whose proof will be sketched. The statement is similar to \cite[Corollary 15.4.6]{LeP97}, but the proof is more involved since the Picard group is not as simple as $\mathbb{Z}$. \begin{lem}\label{lem:strictly-ss} Suppose $H$ is an ample divisor satisfying (\ref{eq:H-pos-lin-combo}) and (\ref{eq:conditions-on-H}). Consider a complete family of $\{F_y\}_{y\in Y}$ of semistable sheaves of a fixed class on $X$, parametrized by a smooth algebraic variety $Y$. When the discriminant is large, say as in (\ref{eq:discriminant-bound}), the set of points $y\in Y$ such that $F_y$ is strictly semistable forms a closed subset of codimension $\geqslant 2$. \end{lem} \begin{proof} For $y\in Y$ such that $F_y$ is strictly semistable, consider one of its Jordan-H\"older filtrations and let $\gr_i$ be the corresponding sub-quotients and $\nu_i=\nu(\gr_i)$. The proof of Lemma~\ref{large-hn} also shows that the set \begin{equation*} \left\{ y\in Y \middle\vert \begin{array}{l} F_y \mbox{ is strictly semistable and }\\ \mbox{$(\nu_i-\nu_j)\cdot D>2$ for some $i,j$ and some $D \in \mathcal{D} \cup \{C+A\}$} \end{array} \right\}\end{equation*} has codimension $\geqslant 2$. We next consider $y\in Y$ such that (a) $F_y$ is strictly semistable and (b) $(\nu_i-\nu_j)\cdot D\leqslant 2$, $\forall D\in \mathcal{D} \cup \{C+A\}$. Note that $\Ext^2(\gr_i,\gr_j)\cong \Hom(\gr_j,\gr_i\otimes K_X)^\vee=0$, for all $i,j$. Thus, $\Ext^2_-(F_y,F_y)=0$, which is calculated with respect to the fixed Jordan-H\"older filtration. Consider the relative flag scheme $\operatorname{Flag}$ of filtrations of the same type as the Jordan-H\"older-filtration. We have \[\operatorname{T}_p\operatorname{Flag}\to \operatorname{T}_yY\twoheadrightarrow{}\Ext_+^1(F_y,F_y).\] Moreover, the vanishing of $\Ext^2(\gr_i,\gr_j)$ implies $\Ext^2_+(F_y,F_y)=0$. The codimension of set of $y\in Y$ satifying conditions (a) and (b) is bounded below by $\ext_+^1(F_s,F_s)\geqslant \sum_{i<j}-\chi(\gr_i,\gr_j)\geqslant 2$. \end{proof} Using this lemma, we can immediately strengthen Proposition~\ref{prop:bad-locus-res} replacing ``semistable" by ``stable". \subsection{Locus of semistable sheaves not admitting Gaeta resolutions} This subsection is devoted to the proof of Theorem~\ref{thm:moduli-nongr-codim2}. First, the subset $Z$ is indeed closed. As in the construction of the moduli space using geometric invariant theory \cite{MumFogKir94}, let $\Quot(\oo_X(-m)^{\oplus N}, f)$ be the Quot scheme such that $M(f)$ is a good quotient of the semistable locus $\Quot^{\sstable}(\oo_X(-m)^{\oplus N}, f)$ with respect to the action by $\GL(N,\mathbb{C})$. According to Proposition~\ref{prop:special-GTR-criterion} and upper semicontinuity, the subset of quotient sheaves that do not admit Gaeta resolutions is closed and invariant under the action of $\GL(N,\mathbb{C})$. Under the good quotient map, the image of this subset is closed and is exactly $Z$. \footnote{If the Jordan-H\"older grading of a semistable sheaf admits a Gaeta resolution, the sheaf does as well.} Let \[G=\prod_{i=1}^n\GL(a_i,\mathbb{C}) ,\] which acts on $\rcone$ (\cite[\S~4.3]{Ped21}). Let $\bar{G}=G/\mathbb{C}^*(\id,\dots,\id)$. There is an induced action of $\bar{G}$ on $\rcone$. The universal cokernel (\ref{eq:univ-res}) induces a map \begin{align}\label{eq:don-f} \lambda_{\mathbb{F}}\colon \K(X)\to \Pic^{{G}}(\rcone), \quad w\mapsto \det\left(p_!\left([\mathbb{F}]\cdot q^*w\right)\right). \end{align} which will be shown to be an isomorphism. Since $\rcone$ is an open subset in a vector space and its complement has codimension $\geqslant 2$, $\Pic^{G}(\rcone)$ is isomorphic to the character group $\Char(G)\cong \mathbb{Z}^n$ and $\Pic^{\Bar{G}}(\rcone)\cong \Char(\Bar{G})\cong \mathbb{Z}^{n-1}$. The character groups can be explicitly described as follows: \begin{align*} \mathbb{Z}^n&\xrightarrow{\cong} \Char(G),\\ (x_1,\dots,x_n)&\mapsto [(M_1,\dots, M_n)\mapsto \prod _{i=1}^n\det(M_i)^{x_i}], \end{align*} and under this isomorphism, \begin{align}\label{eq:char-quotient-gp} \Char(\Bar{G})=\left\{(x_1,\dots,x_n)\in \mathbb{Z}^n \,\middle\vert \,\sum_{i=1}^n a_i x_i=0\right\}. \end{align} \begin{prop} The map $\lambda_{\mathbb{F}}$ in (\ref{eq:don-f}) is an isomorphism and it induces an isomorphism $f^\perp \xrightarrow{\cong} \Pic^{\Bar{G}}(\rcone)$. \end{prop} This is similar to \cite[Lemma 18.5.1]{LeP97} and \cite[Proposition 4.2]{Ped21}. \begin{proof} We calculate $\lambda_{\mathbb{F}}$ using the isomorphism $\Pic^{G}(\rcone)\cong \Char(G)\cong \mathbb{Z}^n$. In $\K(X)$, for $j=1,\dots,n$, let $\mathbf{e}_j$ be the class $[\E_j^\vee]$ of the dual bundle. Then the $\mathbf{e}_j$ form a basis of $\K(X)$. Let $V_j$ be a complex vector space of dimension $a_j$ for $j=1,\dots,n$. We can calculate $\lambda_{\mathbb{F}}(\mathbf{e}_j)$ using the $G$-equivariant short exact sequence (\ref{eq:univ-res}): \begin{align*} \lambda_{\mathbb{F}}(\mathbf{e}_j) =\det\left(-\sum_{j\leqslant i\leqslant d}[\oo_\rcone \otimes V_i\otimes \Hom(\E_j,\E_i)]+\sum_{\max\{j,d+1\}\leqslant i\leqslant n}[\oo_\rcone\otimes V_i\otimes \Hom(\E_j,\E_i)]\right). \end{align*} The map $\lambda_{\mathbb{F}}$ takes the following matrix form: \[ \left[\operatorname{sgn}\left(i-d-\frac{1}{2}\right)\chi(\E_j,\E_i)\right]_{1\leqslant i,j\leqslant n}. \] Since the matrix is lower triangular with $\pm 1$ on the diagonal, $\lambda_{\mathbb{F}}$ is an isomorphism. For $w=\sum_i w_i\mathbf{e}_i\in \K(X)$, $w\in f^\perp$ if and only if $\sum_i w_i\chi (\E_i,f)=0$, if and only if $\lambda_\mathbb{F}(w)\in \Char(\Bar{G})$. \end{proof} Let $\rcone^{\sstable}\subset \rcone$ denote the subset of cokernels which are semistable. According to Proposition~\ref{prop:bad-locus-res}, $\rcone \setminus \rcone^{\sstable}$ has codimension $\geqslant 2$ in $\rcone$. Let $\rcone^{\stable}\subset \rcone^{\sstable}$ denote the subset of cokernels which are stable. The coarse moduli property provides a map \[\pi\colon \rcone^{\stable} \to M(f),\] which factors through $U=M(f)\setminus Z$. According to Lemma~\ref{lem:strictly-ss}, the restriction map induces isomorphisms $\Pic^{{G}}(\rcone)\cong \Pic^{{G}}(\rcone^{\stable})$ and $\Pic^{\Bar{G}}(\rcone)\cong \Pic^{\Bar{G}}(\rcone^{\stable})$. The Donaldson map $\lambda_M\colon f^\perp \to \Pic (M(f))$ is an isomorphism according to Theorem~\ref{thm:yoshioka}, which will be proved at the end of the subsection. By the functoriality of the determinant line bundle construction, we have the following commutative diagram: \begin{equation*} \begin{tikzcd} f^\perp \ar[rr,"\lambda_{\mathbb{F}}","\cong"below] \ar[d,"\lambda_M"left,"\cong"right] & & \Pic^{\Bar{G}}(\rcone)\ar[d,"\cong"]\\ \Pic(M(f)) \ar[r,two heads] & \Pic(U) \ar[r] & \Pic^{\Bar{G}}(\rcone^{\stable}). \end{tikzcd} \end{equation*} It is clear from the diagram that the restriction map $\Pic(M(f))\to \Pic(U)$ is also injective. Since the polarization is general, $M(f)$ is locally factorial (\cite[Corollary 3.4]{Yos96}). Therefore, $Z$ has codimension $\geqslant 2$ in $M(f)$. We have proven Theorem~\ref{thm:moduli-nongr-codim2}. \begin{proof}[Proof of Corollary \ref{cor:gen-stable-prop}] The result follows directly from Propositions \ref{prop:unirat-large-L}, \ref{prop:special-GTR-criterion}(b), \ref{prop:cokernel-properties}(b), and Theorem~\ref{thm:moduli-nongr-codim2}. \end{proof} We are left to provide \begin{proof}[Proof of Theorem~\ref{thm:yoshioka}]Let $f=(r,c_1,c_2)\in \K(S)$ be the corresponding class. According to \cite[Corollary 3.4]{Yos96}, there is a surjective map $\mathbb{Z}\oplus \Pic S\cong f^\perp \twoheadrightarrow \Pic M(f)$. On the other hand, under our assumption on $c_2$, $\Pic M(f)$ contains a subgroup $\mathbb{Z}\oplus \Pic S$, as shown in \cite[Example 8.1.6]{HuyLeh10}. \end{proof} \section{Strange duality}\label{sect:sd} In this section, we review Le Potier's strange duality conjecture for rational surfaces over $\mathbb{C}$ and use our study of Gaeta resolutions to prove Theorem \ref{thm:sd-injective}, which states that the strange morphism is injective in various cases on $\mathbb{P}^2$ and on $X$ an admissible two-step blowup of $\mathbb{F}_e$. The argument is similar to what was shown on $\bp^2$ in \cite{BerGolJoh16}. We assume Theorem~\ref{thm:finite-quot-scheme}, which will be proved in \S~\ref{sect:finite-quot}. \subsection{Strange morphism}\label{subsect:strange-mor} Let $S$ be a smooth projective rational surface over $\mathbb{C}$ and $H$ be an ample divisor. Let $\sigma$ and $\rho$ denote two classes in the Grothendieck group $\K(S)$. On $\K(S)$, there is a pairing given by $\chi(\sigma\cdot\rho)$. Let $M(\sigma)$ and $M(\rho)$ be the moduli spaces of $H$-semistable sheaves of class $\sigma$ and $\rho$ respectively. For the moment, suppose there are no strictly semistable sheaves, namely $M(\sigma)=M^{\operatorname{s}}(\sigma)$, and there is a universal family $\mathcal{W}$ over $M(\sigma)\times S$. Let $F$ be a sheaf of class $\rho$, then we have a determinant line bundle on $M(\sigma)$, \begin{equation*} \Theta_\rho:=\det\left({p}_{!}\left(\mathcal{W}\stackrel{L}{\otimes} {q}^*F\right)\right)^{*}. \end{equation*} Here, ${p}$ and ${q}$ are projections from $M(\sigma)\times S$ to the first and second factors respectively. The isomorphism class of $\Theta_\rho$ does not depend on $F$ but only on its class in $\K(S)$, so we are justified in using the subscript $\rho$. Two universal families may differ by a line bundle pulled back from $M(\sigma)$, but if we assume $\chi(\sigma\cdot \rho)=0$, then they will provide isomorphic determinant line bundles on the moduli space. Thus, from now on, we assume $\chi(\sigma \cdot \rho)=0$, so that $\Theta_\rho$ is independent of the choice of $\mathcal{W}$. Even if there does not exist a universal family, we can still define $\Theta_\rho$ by carrying out the construction on the Quot scheme coming from the GIT construction where there is a universal family, and then showing that it descends to $M(\sigma)$. If there are strictly semistable sheaves of class $\sigma$, we need to further require $\rho\in \{1,h,h^2\}^{\perp \perp}$ for $h=[\oo_H]\in \K(S)$; see \cite[(2.9)]{LP92}. These conditions will be satisfied in our setting. Similarly, we can construct a determinant line bundle \[\Theta_\sigma\to M(\rho).\] Orthogonal classes $\sigma$ and $\rho$ are {\it candidates for strange duality} if the moduli spaces $M(\sigma)$ and $M(\rho)$ are non-empty, and if the following conditions on pairs $(W,F) \in M(\sigma) \times M(\rho)$ are satisfied: \begin{enumerate}[(a)]\label{cond:sd-candidates} \item $h^2(W \otimes F) = 0$ and $\TOR^1(W,F)=\TOR^2(W,F)=0$ for all $(W,F)$ away from a codimension $\geqslant 2$ subset in $M(\sigma)\times M(\rho)$, and \item $h^0(W \otimes F)=0$ for some $(W,F)$. \end{enumerate} Under these conditions, there is a line bundle \[\Theta_{\sigma,\rho}\to M(\sigma)\times M(\rho)\] with a canonical section whose zero locus is given by \[\{\, (W,F) \mid h^0(W \otimes F) > 0 \,\} \subset M(\sigma) \times M(\rho), \] (see \cite[Proposition 9]{LP05}). The see-saw theorem implies that \[\Theta_{\sigma,\rho}\cong \Theta_\rho\boxtimes \Theta_\sigma,\] see \cite[Lemme 8]{LP05}. Then, using the K\"{u}nneth formula, the canonical section of $\Theta_{\sigma,\rho}$ induces a linear map \[\operatorname{SD}_{\sigma,\rho}: H^0(M(\rho),\Theta_{\sigma}))^* \rightarrow H^0\big(M(\sigma),\Theta_\rho)\] that is well-defined up to a non-zero scalar. Following Le Potier, we call this the {\em strange morphism}. \begin{conj}[Le Potier] If $\mathrm{SD}_{\sigma,\rho}$ is nonzero, then it is an isomorphism. \end{conj} We focus on the case when $S$ is $\bp^2$ or $X$, an admissible blowup of $\mathbb{F}_e$. For the former, we take $H$ to be the hyperplane class, while for the latter, we assume $H$ is general and satisfies (\ref{eq:H-pos-lin-combo}, \ref{eq:conditions-on-H}). Let \[ \rho = (1,0,1-\ell) \] be the numerical class of an ideal sheaf of $\ell \geqslant 1$ points, so that $M(\rho) = S^{[\ell]}$ is the Hilbert scheme of points on $S$. Clearly $\rho\in \{1,h,h^2\}^{\perp \perp}$. Every numerical class orthogonal to $\rho$ has the form \[ \sigma = (r,L,\chi=r\ell). \] We assume $r$ and $\ell$ are fixed, that $r \geqslant 2$, and that \[ \text{$L$ is sufficiently positive}. \] The condition on $r$ ensures that general sheaves in $M(\sigma)$ are locally free, and it is no restriction in the study of strange duality as strange duality is known over all surfaces in the case when $\sigma$ and $\rho$ both have rank one. Assumptions about the positivity of $L$ are required to apply many results in this paper, and we assume here that $L$ is sufficiently positive such that the positivity assumptions of every result we need are met. A precise statement of the positivity conditions we impose on $L$ can be found in the appendix. In particular, since $L$ is sufficiently positive, $\sigma$ admits Gaeta resolutions by Proposition~\ref{prop:chern-has-gr}(a), the discriminant condition (\ref{eq:discriminant-bound}) holds, and $M(\sigma)$ is non-empty by Proposition \ref{prop:bad-locus-res}, so general sheaves in $M(\sigma)$ admit Gaeta resolutions by Proposition~\ref{prop:unirat-large-L} or Theorem \ref{thm:moduli-nongr-codim2}. In this situation, conditions (a) and (b) on p.~\pageref{cond:sd-candidates} are satisfied. Let $W\in M(\sigma)$ and $I_Z\in M(\rho)$ be such that $\operatorname{sing} W\cap Z=\emptyset$, which holds away from codimension $r+1$. Under this condition $\TOR_i(W,I_Z)=0$ for $i=1,2$. Furthermore, \[h^2(W\otimes I_Z)=h^2(W)=\hom(W,K_S)\] vanishes by semistability. Condition (b) also holds by Proposition~\ref{orth-ker-quot}. Thus, $\sigma$ and $\rho$ are candidates for strange duality, and we will study the strange morphism \[\SD_{\sigma,\rho}\colon H^0(S^{[\ell]},\Theta_\sigma)^*\to H^0(M(\sigma),\Theta_\rho).\] We begin with the determinant line bundle $\Theta_\sigma$ on the Hilbert scheme of points. \subsection{Determinant line bundles on the Hilbert scheme of points}\label{ss:det-line-bundles} We first review some general results about line bundles on the Hilbert scheme of points on surfaces. The Hilbert scheme of points, $S^{[\ell]}$, is a resolution of singularities of the symmetric product $S^{(\ell)}$. The resolution, which we denote by $\pi\colon S^{[\ell]}\to S\syml$, is called the {\em Hilbert-Chow morphism}. Given a line bundle $M$ on $S$, let $M^{\boxtimes \ell}$ be $\otimes_{i=1}^\ell \operatorname{pr}_i^*M$ on the $\ell$-fold product $S^\ell=S\times \cdots \times S$. There is an $\frak{S}_\ell$-action on $S^\ell$ such that $M^{\boxtimes \ell}$ is $\frak{S}_\ell$-equivariant. The line bundle $M^{\boxtimes \ell}$ descends onto $S^{(\ell)}$, giving a line bundle $M\syml$. We denote its pullback to $S\hilbl$ as \[M_\ell=\pi^* M\syml.\] Via this construction, we can view $\Pic S$ as a subgroup of $\Pic S\hilbl$, by sending $M$ to $M_\ell$. Fogarty~\cite{Fog73} showed that under this inclusion \[\Pic S\hilbl\cong \Pic S\oplus \mathbb{Z}\frac{E}{2}.\] Here, $E$ is the exceptional divisor of the Hilbert-Chow morphism, which parametrizes non-reduced subschemes. Furthermore, if $M$ is ample, then $M_\ell$ is nef, and the canonical divisor on $S^{[\ell]}$ is $K_{S^{[\ell]}} = (K_S)_{\ell}$. The determinant line bundle on $S^{[\ell]}$ induced by $\sigma$ is \[ \Theta_{\sigma} = L_{\ell} - \frac{r}{2} E. \] By the Kodaira vanishing theorem, if $\Theta_\sigma-K_{S^{[\ell]}}=L_{\ell}-(K_{S})_{\ell}-\frac{r}{2}E$ is ample on $S^{[\ell]}$, then $\Theta_\sigma$ has no higher cohomology. Results of Beltrametti, Sommese, Catanese, and G\"{o}ttsche \cite{BelSom88,CatGot90} show that if $M$ is a line bundle on $S$, then $M_{\ell} - \frac{1}{2} E$ is nef if $M$ is ($\ell-1$)-very ample and very ample if $M$ is $\ell$-very ample. Thus, we deduce the following: \begin{lem} \label{lem:high-coh-theta-0} Suppose $L$ is sufficiently positive, for instance $L -K_S$ is the tensor product of an $\ell$-very ample line bundle and $r-1$ ($\ell$-1)-very ample line bundles. Then $\Theta_{\sigma}$ has vanishing higher cohomology. \end{lem} Thus, the vanishing of the higher cohomology of $\Theta_\sigma$ follows from the assumption that $L$ is sufficiently positive. A precise statement of a sufficient condition on $L$ is (\ref{eq:last-condition}) in the appendix. \subsection{Injectivity of the strange morphism} To prove Theorem \ref{thm:sd-injective}, we will make use of a Quot scheme argument. As above, let $\sigma = (r,L,r\ell)$ and $\rho = (1,0,1-\ell)$ be orthogonal classes, where $\ell \geqslant 1$ and $r \geqslant 2$ are fixed, so sheaves of class $\sigma$ are expected to be locally free, and we assume $L$ is sufficiently positive in the sense explained in the appendix. Consider the class \[v = \sigma + \rho.\] The first part of the argument is to show that if $V$ is a vector bundle of class $v$ that admits a general Gaeta resolution, then $\Quot(V^*,\rho)$ is finite and reduced, which will be proved in \S~\ref{sect:finite-quot}. The strategy is to show that the relative Quot scheme over the space of Gaeta resolutions has relative dimension 0. This is known for $\bp^2$ by \cite{BerGolJoh16}, so we write the argument for $X$, an admissible blowup of $\mathbb{F}_e$, though it works for $\bp^2$ as well. Since $\sigma$ admits Gaeta resolutions, an easy calculation using Proposition \ref{prop:chern-has-gr}(a) shows that $v$ also admits Gaeta resolutions, with the same exponents $\gamma_i$ and $\gamma_j$ and with $\alpha_1,\alpha_2,\alpha_3$ each larger by $\ell$ and $\alpha_4$ smaller by $\ell-1$. The starting point is the following result. \begin{lem}\label{lem:isolated-quotient} There is a vector bundle $V$ of class $v$ such that $V$ has a Gaeta resolution and $\Quot(V^*,\rho)$ contains an isolated point. \end{lem} \begin{proof} Choose a vector bundle $W$ of class $\sigma$ that admits a Gaeta resolution and a general ideal sheaf $I_Z$ of class $\rho$ such that $H^0(W \otimes I_Z) = 0$, which is possible by Proposition \ref{orth-ker-quot}. Since $h^0(W) = r\ell \geqslant \ell$ and $Z$ is general, we can choose a quotient $W \to \oo_Z$ such that $H^0(W) \to H^0(\oo_Z)$ is surjective. Let $J$ denote the kernel of $W \to \oo_Z$. Since $L$ is sufficiently positive, $\Ext^1(J,\oo)$ is large (for instance, (\ref{eq:f-admits-gr}) suffices). Then a general extension $V$ of $J$ by $\oo$ is locally free (see the proof of \cite[Lemma 5.9]{BerGolJoh16}) and there are short exact sequences \begin{align*} &0 \to J \to W \to \oo_Z \to 0, \\ &0\to \oo \to V \to J \to 0, \end{align*} which together give a long exact sequence $0 \to \oo \to V \to W \to \oo_Z \to 0$ whose dual \[ 0 \to W^* \to V^* \to I_Z \to 0 \] is a point of $\Quot(V^*,\rho)$. As the tangent space at this point is $\Hom(W^*,I_Z) \cong H^0(W \otimes I_Z)=0$, this is an isolated point of $\Quot(V^*,\rho)$. Thus, all that remains is to show that $V$ admits a Gaeta resolution. We do this in two steps using Proposition~\ref{prop:special-GTR-criterion}, by first showing that $J$ admits a Gaeta resolution. First, we see that $H^p(J)=0$ for $p=1,2$ using the corresponding vanishings for $W$ and the fact that $H^0(W) \to H^0(\oo_Z)$ is surjective. Similarly, $H^1(J(E_i))=0$ for all $i\in S_0 \cup S_1$, as the induced map $H^0(W(E_i)) \to H^0(\oo_Z(E_i))$ is still surjective. Finally, the vanishing of $H^p(J(D))$ for $p \ne 1$ and $D$ one of the divisors appearing in Proposition~\ref{prop:special-GTR-criterion} (b.ii) follows from the same vanishing for $W$ and the vanishing of the higher cohomology of $\oo_Z$. Second, since $H^p(\oo)=0$ for $p=1,2$, $H^p(\oo_{E_i})=0$. Using the vanishings for $J$, we obtain that $H^p(V)=0$ for $p=1,2$ and that $H^1(V(E_i))=0$ for all $i \in S_0 \cup S_1$. For each divisor $D$ appearing in Proposition~\ref{prop:special-GTR-criterion} (b.ii), $H^p(\oo(D))=0$ for all $p$, so $H^p(V(D))\cong H^p(J(D))$ and this vanishes for $p \ne 1$. Therefore, $V$ also admits a Gaeta resolution. \end{proof} The lemma establishes that the relative Quot scheme contains a point with vanishing relative tangent space, which can be deformed to give an open set with this property. The main technical task is to prove that the relative Quot scheme cannot have any other components of the same dimension, which is carried out in the next section for $S = X$, and which was done for $\bp^2$ in \cite{BerGolJoh16}. The proof requires strong positivity assumptions on $L$. The second part of the argument is to count the points of $\Quot(V^*,\rho)$ and compare the result to $h^0(S^{[\ell]},\Theta_\sigma)$. In previous work \cite{GolLin22}, we showed that if the Quot scheme is finite and reduced, then the number of points is \[ \# \Quot(V^*,\rho) = \int_{S^{[\ell]}} c_{2\ell}({V}^{[\ell]}), \] where ${V}^{[\ell]}$ is the tautological vector bundle defined as in (\ref{eq:taut-bundle}), whose rank is $(r+1)\ell$ and whose fiber at a point $[I_Z] \in S^{[\ell]}$ is $H^0(V \otimes \oo_Z)$. By the following theorem, this top Chern class calculates the Euler characteristic of the determinant line bundle: \begin{thm}\label{numbers-match} Let $S$ be a smooth projective surface with $\chi(\oo_S)=1$. Then \[ \int_{S^{[\ell]}} c_{2\ell}({V}^{[\ell]}) = \chi(S^{[\ell]},\Theta_{\sigma}). \] \end{thm} Over Enriques surfaces, this statement was proved by Marian-Oprea-Pandharipande \cite[Proposition 3.2]{MarOprPan22} . In general, it was conjectured by Johnson and shown to be equivalent to another conjecture (\cite[Conjecture 1.3, Theorem 4.1]{Joh18}). The second conjecture was proven by the works of Marian-Opera-Pandharipande \cite{MarOprPan19,MarOprPan22} and G\"ottsche-Mellit \cite{GotMel22} combined. See for example \cite[Corollary 1.2]{GotMel22}. Now, using the fact that $L$ sufficiently positive ensures that $\Theta_\sigma$ has vanishing higher cohomology, we can deduce the injectivity of the strange morphism. \begin{proof}[Proof of Theorem \ref{thm:sd-injective}] Above, we checked the conditions for the strange morphism $\mathrm{SD}_{\sigma,\rho}$ to be well-defined. Let $V$ be a vector bundle of class $\sigma + \rho$ that admits a general Gaeta resolution. By Theorem \ref{thm:finite-quot-scheme}, $\Quot(V^*,\rho)$ is finite and reduced and the points of $\Quot(V^*,\rho)$ are short exact sequences \[ 0 \to W_i^* \to V^* \to I_{Z_i} \to 0 \] for sheaves $W_i$ of class $\sigma$ that are locally free and semistable. Since the Quot scheme is finite and reduced, the tangent space $\Hom(W_i^*,I_{Z_i}) \cong H^0(W_i \otimes I_{Z_i})$ is 0, while if $i \ne j$ then $\Hom(W_i^*,I_{Z_j}) \ne 0$ follows from semistability, as otherwise the induced map $W_i^* \to V^* \to I_{Z_j}$ would be zero, hence $W_i^* \to V^*$ would factor through $W_j^* \to V^*$, yielding an equality $W_i^*=W_j^*$ as subsheaves of $V^*$, identifying the two points of the Quot scheme. It follows that the hyperplanes in $H^0(M(\rho),\Theta_{\sigma})$ determined by the points $I_{Z_i}$ map under $\mathrm{SD}_{\sigma,\rho}$ to linearly independent lines $\Theta_{Z_i} \in \bp H^0(M(\sigma),\Theta_{\rho})$. Thus, the rank of $\mathrm{SD}_{\sigma,\rho}$ is at least \[ \# \Quot(V^*,\rho) = \int_{S^{[\ell]}} c_{2\ell}({V}^{[\ell]}) = \chi(S^{[\ell]},\Theta_{\sigma}) = h^0(S^{[\ell]},\Theta_{\sigma}), \] namely $\mathrm{SD}_{\sigma,\rho}$ is injective. \end{proof} \section{Finite Quot schemes}\label{sect:finite-quot} This section is devoted to the proof of Theorem~\ref{thm:finite-quot-scheme}. We will show that for $X$ an admissible blowup of $\mathbb{F}_e$ over $k$, under appropriate conditions, Quot schemes are indeed finite and reduced. This is an extension of the corresponding result in \cite{BerGolJoh16} for $\bp^2$. We then apply our previous work \cite{GolLin22} to enumerate the finite Quot scheme. We first sketch the ideas. For a vector bundle $V$ admitting a Gaeta resolution, instead of directly studying ideal sheaf quotients $V^* \twoheadrightarrow I_Z$, we replace $V^*$ by the dual of the Gaeta resolution of $V$ and $I_Z$ by the canonical map $\oo\twoheadrightarrow \oo_Z$. Namely, we consider commutative diagrams of the form (\ref{quot-comm-diag}), which we can view as a family over an open subset $R$ of the resolution space $R_v$ (\ref{eq:dual-res-sp}). We prove in \S~\ref{subsect:dim-count} that the main component of the family has the same dimension as $R$, hence we deduce that an open subscheme of the main component is isomorphic to a relative Quot scheme over an open subset of $R$. The fibers of the relative Quot scheme have dimension 0, and generically the relative Zariski tangent space has dimension $0$. We conclude that when $V$ is general with appropriate numerical constraints, the Quot scheme is finite and reduced. We prove Theorem~\ref{thm:finite-quot-scheme} in \S~\ref{subsect:proof-finite-quot}, except for leaving the technical dimension count argument for \S~\ref{subsect:dim-count}. \subsection{Finite Quot schemes}\label{subsect:proof-finite-quot} Over $X$ an admissible blowup of $\mathbb{F}_e$, as in the previous section, let $\rho=(1,0,1-\ell)$ be the class of an ideal sheaf of $\ell$ points for some fixed $\ell \geqslant 1$ and $\sigma = (r,L,\chi=r\ell)$ be an orthogonal class for some fixed $r \geqslant 2$ and $L$ satisfying the positivity conditions in the appendix. In particular, the need for the positivity condition (\ref{eq:first-three-conditions}) will be seen in the proof of Proposition \ref{dim-comm-diag}. Then $v = \sigma + \rho$ admits Gaeta resolutions, with exponents denoted $\alpha_1,\{ \gamma_j \}_{j \in S_1}, \alpha_2,\alpha_3,\{ \gamma_i \}_{i \in S_0}, \alpha_4$ as in (\ref{gtr-blowup}). Since $L$ is sufficiently positive, all these exponents are large except for $\alpha_4 = (r-1)\ell + 1$. \subsubsection{Morphisms vs. chain maps} Consider a vector bundle $V$ of class $v$ that admits a general Gaeta resolution. Then $V$ is locally free, so its dual has a resolution \begin{equation}\label{eq:dual-gr} 0 \to V^* \to \Lambda \xrightarrow{\varphi} \Omega \to 0, \end{equation} where \begin{align*}\Lambda &= \oo^{\alpha_4} \oplus \bigoplus_{i \in S_0} \oo(E_i)^{\gamma_i} \oplus \oo(A)^{\alpha_3} \oplus \oo(C)^{\alpha_2}, \mbox{ and }\\ \Omega &= \bigoplus_{j \in S_1} \oo(C+A-E_j)^{\gamma_j} \oplus \oo(C+A)^{\alpha_1}. \end{align*} We wish to study quotients of $V^*$ of class $\rho$, which are expected to be ideal sheaves of points. Instead of considering maps $V^* \twoheadrightarrow I_Z$, we replace them by chain maps of complexes using (\ref{eq:dual-gr}) and the short exact sequence $0 \to I_Z \to \oo \xrightarrow{1_Z} \oo_Z \to 0$. Namely, we consider commutative diagrams \begin{equation}\label{quot-comm-diag} \xymatrix{ \Lambda \ar[r]^\varphi \ar[d]_\pi & \Omega \ar[d]^\psi \\ \oo \ar[r]^{1_Z} & \oo_Z }\end{equation} where $\varphi$ is surjective and $Z$ has length $\ell$. Letting $R \subset R_v$ denote the open subset of the space of Gaeta resolutions for which the cokernel $V$ is locally free, dualization yields an inclusion \begin{equation}\label{eq:dual-res-sp} R \subset \mathbb{P}\Hom(\Lambda,\Omega)=:\mathbb{P} \end{equation} and a universal sequence \begin{equation}\label{univ-dual-gtr} 0\to \mathcal{V}^*\to \pi_X^*\Lambda\to \pi_R^*\mathcal{O}_{R}(1) \otimes \pi_X^*\Omega \to 0, \end{equation} where $\pi_R$ and $\pi_X$ denote the projections from $R\times X$ to the factors. Let $\Omega^{*[\ell]}$ be the Fourier-Mukai transform of $\Omega^*$ over $X^{[\ell]}$ and \[ \Xi = \{ (\varphi,\psi,\pi) \mid \psi \circ \varphi = 1_Z \circ \pi \mbox{ up to a non-zero scalar}\} \subset \bp \times \mathbb{P}(\Omega^{*[\ell]}) \times \bp^{\alpha_4-1} \] be the locus of diagrams (\ref{quot-comm-diag}), which are {\em commutative}, with the reduced induced scheme structure. We will show in Proposition~\ref{dim-comm-diag} that \[\dim \Xi=\dim \mathbb{P}.\] \subsubsection{Relative Quot schemes} On the other hand, we consider the relative Quot scheme $\Quot_{\pi_R}:=\Quot_{\pi_R}(\mathcal{V}^*, \rho)$. Letting \[ U_Q\subset \Quot_{\pi_R} \] denote the subset where the relative Zariski tangent space has dimension $0$, $U_Q$ is open (by upper semicontinuity), non-empty (by Lemma \ref{lem:isolated-quotient}), and smooth. We claim that quotients in $U_Q$ can only be ideal sheaves of points. A sheaf $F$ of class $\rho$ could take the following forms: $F$ is isomorphic to an ideal sheaf $I_Z$, $F$ contains dimension $0$ torsion, or $F$ contains dimension $1$ torsion. The second case cannot occur as it violates our assumption on the relative tangent space. In the third case, let $T$ denote the torsion subsheaf of $F$, so that $F/T$ is torsion free. Then the quotient $F\twoheadrightarrow F/T$ provides a nonzero morphism $V^*\to \oo(-D)$ where $D$ is a non-trivial effective curve given by $c_1(T)$. But this cannot happen under the conditions in Proposition~\ref{prop:no-sec-van-curves}, which are guaranteed when $L$ is sufficiently positive, see the appendix. \subsubsection{Finite Quot schemes} We next define a regular map \[\iota\colon U_Q \to \Xi\] over $\mathbb{P}$ by associating to each quotient $V^* \twoheadrightarrow I_Z$ a diagram of the form (\ref{quot-comm-diag}). It is enough to define it on the level of functor of points. Furthermore, it is enough to consider morphisms from an affine scheme. Let $S$ be the spectrum of some $k$-algebra. A morphism $s\colon S\to \Quot_{\pi_R}$ is equivalent to a family of quotients $s_X^*\mathcal{V}^*\to \mathcal{I}_\mathscr{Z}$ over $S\times X$. Here, $s_X=s\times \id_X$ and $\mathcal{I}_\mathscr{Z}$ is the ideal sheaf of a subscheme $\mathscr{Z}\subset S\times X$. We denote the projection maps on $S \times X$ by $\pi_S$ and $\pi_X$. Applying the functor $\lHom_{\pi_S}(-, \oo_{S\times X})$ to the pull-back of the sequence (\ref{univ-dual-gtr}) via $s_X$, we have the following exact sequence \begin{align*} 0&\to \lHom_{\pi_S}(\pi_X^*\Omega, \oo_{S\times X})\to \lHom_{\pi_S}(\pi_X^*\Lambda, \oo_{S\times X})\to\lHom_{\pi_S}(s_X^*\mathcal{V}^*, \oo_{S\times X}) \\ &\to \lExt^1_{\pi_S}(\pi_X^*\Omega, \oo_{S\times X}). \end{align*} According to our choice of $\Omega$, the first term and the last term are zero. Therefore, the family $s_X^*\mathcal{V}\to \mathcal{I}_\mathscr{Z}$ can be completed to a commutative diagram \begin{equation*} \begin{tikzcd} 0 \arrow{r} & s_X^*\mathcal{V}^* \arrow{r}\arrow{d} & \pi_X^*\Lambda \arrow{r}\arrow{d} & \pi_X^*\Omega\arrow{r}\arrow{d} &0\\ 0 \arrow{r} & \mathcal{I}_\mathscr{Z} \arrow{r} & \oo_{S\times X} \arrow{r} & \oo_{\mathscr{Z}} \arrow{r} & 0 \end{tikzcd} \end{equation*} Then the square on the right provides a morphism $S\to \Xi$. Clearly, the square uniquely determines the left-most vertical morphism. We have obtained an injective morphism $\iota\colon U_Q\to \Xi$ whose image is contained in the unique component with the maximal dimension. The complement $\Xi\setminus \im \iota$ of the image has dimension $<\dim \mathbb{P}$. Then $U\subset R$, the complement of the image of $\Xi\setminus \im \iota\to R$, is non-empty and open in $R$. For each sheaf $V$ parametrized by $U$, $V^*$ has an isolated quotient. By the definition of $U$, each fiber of $\Xi$ over $[V] \in U$, which parametrizes all non-zero maps of the form $V^* \to I_Z$, is entirely contained in the image of $\iota$. In particular, the maps have to be surjective. Therefore, $\iota$ induces an isomorphism $U_Q|_U\to \Xi|_U$. We have thus proved Theorem \ref{thm:finite-quot-scheme}(a) for every sheaf $V$ parametrized by $U$. \subsubsection{Genericity of kernels and cokernels} We have seen that Quot schemes for $V$ in the nonempty open set $U \subset R_v$ are finite and reduced and that the quotient sheaves are all ideal sheaves $I_Z$. Moreover, in the proof of Lemma \ref{lem:isolated-quotient}, the isolated point $[V^* \twoheadrightarrow I_Z]$ of $\Quot(V^*,\rho)$ has the property that $Z$ is general, hence we can shrink $U$ further if necessary to ensure all the ideal sheaves arising as quotients are general. Similarly, by Proposition~\ref{prop:unirat-large-L}, general sheaves in $M(\sigma)$ admit Gaeta resolutions, and since $M(\sigma)$ is nonempty, we may choose $W$ in the proof of Lemma \ref{lem:isolated-quotient} to be semistable, hence the isolated quotient $[V \twoheadrightarrow I_Z]$ has kernel $W^*$, which is also semistable. As semistability is an open condition in families, and as the relative Quot scheme can be viewed as a family of sheaves with invariants $\sigma^*$, there is a non-empty open set in the relative Quot scheme of quotients for which the kernel is semistable. Shrinking $U$ if necessary, we may assume that all kernels of the finite Quot schemes are semistable. This proves Theorem \ref{thm:finite-quot-scheme}(b). \subsubsection{Length of finite Quot schemes} According to \cite[Proposition 1.2]{GolLin22}, when the Quot scheme $\Quot(V^*,\rho)$ is finite and reduced, it is isomorphic to the moduli space $S(V^*,\rho)$ of limit stable pairs, which consist of torsion free sheaves $F$ of class $\rho$ together with nonzero morphisms $V^* \to F$. Then the virtual fundamental class of $S(V^*,\rho)$ agrees with the fundamental class, and its degree is as stated in Theorem \ref{thm:finite-quot-scheme}(c), by \cite[Theorem 1.1]{GolLin22}. We refer the reader to \cite{Lin18} for foundational material on the moduli space of limit stable pairs. \subsection{Dimension count}\label{subsect:dim-count} We prove that $\dim \Xi = \dim \bp$ by counting diagrams of the form (\ref{quot-comm-diag}) for fixed $\pi$ (nonzero) and $\psi$ (surjective). For fixed $\pi$ and $\psi$, the locus of $\varphi$ is $(\psi_*)^{-1}(1_Z \circ \pi)$ in the exact sequence \[ 0 \to \Hom(\Lambda,\ker \psi) \to \Hom(\Lambda,\Omega) \xrightarrow{\psi_*} \Hom(\Lambda,\oo_Z) \to \Ext^1(\Lambda,\ker \psi) \to 0. \] This locus, if it is non-empty, is an affine space in $\mathbb{P} = \mathbb{P}\Hom(\Lambda,\Omega)$ that is isomorphic to $\Hom(\Lambda,\ker \psi)$. Since $\pi$ is fixed, we may assume it is nonzero only on the first $\oo$-summand of $\Lambda$. For the locus to be non-empty, we need $1_Z$ to be in the image of the middle map of \[ 0 \to \Hom(\oo,\ker \psi) \to \Hom(\oo,\Omega) \to \Hom(\oo,\oo_Z) \to \Ext^1(\oo,\ker \psi) \to 0. \] Thus, for the purpose of a dimension count we may assume that the image of $\Hom(\oo,\Omega) \to \Hom(\oo,\oo_Z)$ contains $1_Z$. The idea is to control the dimension of $\Ext^1(\Lambda,\ker \psi)$. In particular, if $\Ext^1(\Lambda,\ker \psi) = 0$, which is true for general $\psi$ as we show below, then the dimension of $(\psi_*)^{-1}(1_Z \circ \pi)$ is $\hom(\Lambda,\Omega) - \hom(\Lambda,\oo_Z)$. Adding to this the dimension of the parameter spaces for $\pi$ and $\psi$, we get \begin{equation}\label{eq:dim-comm-diag} \hom(\Lambda,\Omega) - \hom(\Lambda,\oo_Z) + (\alpha_4 - 1) + (2\ell + \hom(\Omega,\oo_Z) - 1). \end{equation} Using the fact that $\hom(\Lambda,\oo_Z) = \ell \, \mathrm{rk}(\Lambda)$, $\hom(\Omega,\oo_Z) = \ell \, \mathrm{rk}(\Omega)$, $\rk(\Lambda) - \rk(\Omega) = r+1$, and $\alpha_4 = (r-1)\ell + 1$, we see that (\ref{eq:dim-comm-diag}) equals $\hom(\Lambda,\Omega) - 1$, which equals $\dim \bp$. Thus, the main technical difficulty is to control the dimension of $\Ext^1(\Lambda,\ker \psi)$ in the case when $\psi$ is not general. \begin{prop}\label{dim-comm-diag} Let \[ M = \max\{\,m(\ell + r + 1 - m) \mid 1 \leqslant m \leqslant \ell \,\} \] and assume \[ \alpha_2,\alpha_3 \geqslant M, \quad \gamma_j \geqslant M+\ell \mbox{ for }j \in S_1, \quad \text{and} \quad \gamma_i \geqslant M + \ell+ \sum_{j \colon p_j \succ p_i} \gamma_j\mbox{ for }i \in S_0. \] Then the space $\Xi$ of commutative diagrams has the expected dimension, namely $\dim \Xi=\dim \mathbb{P}$. Furthermore, there is only one component with this dimension, while other components have strictly smaller dimension. \end{prop} \begin{proof} Choose an open set $U \subset X$ containing $Z$ and trivializations on $U$ of $\oo(C+A)$ and $\oo(C+A-E_j)$ for $j \in S_1$. The components of the map $\psi \colon \Omega \to \oo_Z$ generate subspaces of $\Hom(\oo(C+A),\oo_Z)$ and $\Hom(\oo(C+A-E_j),\oo_Z)$, which we identify with subspaces $T, T_{s+1},\dots, T_{t}%\{ T_j \}_{j \in S_1} $ of $H^0(\oo_Z)$ using the trivializations on $U$. For $D = 0,E_1,\dots,E_s,A,$ or $C$, we calculate bounds on $\ext^1(\oo(D),\ker \psi)$ by considering the dimension of the image of \[ \Hom(\oo(D),\Omega) \to \Hom(\oo(D),\oo_Z). \] Picking a trivialization of $\oo(D)$ on $U$ (shrinking $U$ if necessary), this image is the image of the multiplication map \[ m_{D} \colon \left( H^0(\oo(C+A-D)) \otimes T \right) \oplus \bigoplus_{j \in S_1} \left(H^0(\oo(C+A-E_j-D)) \otimes T_j \right) \to H^0(\oo_Z) \] defined by $(\phi \otimes x,\sum \phi_j \otimes x_j) \mapsto \phi|_Z \cdot x + \sum \phi_j|_Z \cdot x_j$. As $|C+A-D|$ is basepoint-free, there is a section $\phi_D \in H^0(\oo(C+A-D))$ that is nowhere zero on the support of $Z$, hence multiplying by $\phi_D$ is injective, which implies that the image of $H^0(\oo(C+A-D)) \otimes T$ has dimension $\geqslant \dim T$. Similarly, as $|C+A-E_i-E_j|$ is basepoint-free when $p_j \succ p_i$ (Example~\ref{ex:LS_ample}), we can choose $\phi_j \in H^0(\oo(C+A-E_i-E_j))$ that is nowhere zero on the support of $Z$, so the image of $H^0(\oo(C+A-E_i-E_j)) \otimes T_j$ has dimension $\geqslant \dim T_j$. Define the stratum \[ W_{\lambda,\{ \lambda_j \}_{j \in S_1}, \{ \lambda_i \}_{i \in S_0}} \subset \Hom(\Omega,\oo_Z) \] by the conditions that $\dim T = \ell-\lambda$, $\dim T_j = \ell - \lambda_j$ for $j \in S_1$, and the sum $\phi_{E_i}|_Z \cdot T + \sum_{j \colon p_j \succ p_i} \phi_j|_Z \cdot T_j$ has dimension $\ell-\lambda_i$ for $i \in S_0$. By the above, the stratum is empty unless $0 \leqslant \lambda_i \leqslant \lambda,\lambda_j \leqslant \ell$ when $p_j \succ p_i$. For $\psi$ in this stratum, the image of $m_D$ has dimension $\geqslant \ell-\lambda$ and the image of $m_{E_i}$ has dimension $\geqslant \ell-\lambda_i$. Thus, $\ext^1(\oo(D),\ker \psi) \leqslant \lambda$ and $\ext^1(\oo(E_i),\ker \psi) \leqslant \lambda_i$, which yields the estimate \[ \ext^1(\Lambda,\ker \psi) \leqslant \lambda(\alpha_2 + \alpha_3 + \alpha_4) + \sum_{i \in S_0} \lambda_i \gamma_i = \lambda\Big(\alpha_1 + \sum_{j \in S_1} \gamma_j + r + 1\Big) - \sum_{i \in S_0}(\lambda-\lambda_i)\gamma_i. \] For strata with $\lambda=0$, then also $\lambda_i=0$, so $\ext^1(\Lambda,\ker \psi)=0$ and the dimension of the space of commutative diagrams for $\psi$ in the union of such strata is equal to the expected dimension. Thus, it suffices to show that for strata with $\lambda > 0$, the codimension of the stratum in $\Hom(\Omega,\oo_Z)$ is at least as large as $\ext^1(\Lambda,\ker \psi)$. When $\lambda > 0$, we will compare this estimate for $\ext^1(\Lambda,\ker \psi)$ to the codimension of the stratum in $\Hom(\Omega,\oo_Z)$. A map $\psi$ in this stratum can be obtained by first choosing a subspace $T_i \subset H^0(\oo_Z)$ of dimension $\ell-\lambda_i$, then choosing subspaces $T \subset (\phi_{E_i}|_Z)^{-1} \cdot T_i$ and $T_j \subset (\phi_j|_Z)^{-1} \cdot T_i$ of dimension $\ell-\lambda$ and $\ell-\lambda_j$, and finally using $T,T_j$ to define the map $\psi$. Comparing a dimension count based on this description to $\hom(\Omega,\oo_Z)$, we see that the stratum has codimension \[ \lambda(\alpha_1+\lambda-\ell) + \sum_{j \in S_1} \lambda_j (\gamma_j + \lambda_j - \ell) + \sum_{i \in S_0} \lambda_i \left( \lambda_i - \lambda + \sum_{j \colon p_j \succ p_i} (\ell-\lambda_j) \right). \] The difference between the upper bound for $\ext^1(\Lambda,\ker \psi)$ and this codimension is \[ \Delta = \lambda(\ell + r+1-\lambda) + \sum_{i \in S_0} \sum_{j \colon p_j \succ p_i} (\lambda_j-\lambda_i)(\ell-\lambda_j) - \sum_{j \in S_1} (\lambda_j-\lambda) \gamma_j - \sum_{i \in S_0}(\lambda - \lambda_i)(\gamma_i-\lambda_i). \] Since $\gamma_i \geqslant M+\ell + \sum_{j \colon p_j \succ p_i} \gamma_j$, we obtain \[ \Delta \leqslant \lambda(\ell + r+1-\lambda) - \sum_{i \in S_0} \sum_{j \colon p_j \succ p_i} (\lambda_j-\lambda_i)(\gamma_j+\lambda_j-\ell) - \sum_{i \in S_0} (\lambda-\lambda_i)(M+\ell-\lambda_i). \] As $\alpha_2,\alpha_3,\gamma_i \geqslant M \geqslant \lambda(\ell+r+1-\lambda)$, we may assume that the image of $m_D$ for $D=A,C$ is exactly $\phi_D|_Z \cdot T$ and that the image of $m_{E_i}$ has dimension exactly $\ell-\lambda_i$, as otherwise we can improve the upper bound for $\ext^1(\Lambda,\ker\psi)$ by subtracting $M$, leading to a new $\Delta$ that is non-positive. Similarly, as $M+\ell-\lambda_i$ and $\gamma_j+\lambda_j-\ell$ are each $\geqslant M$, $\Delta$ is non-positive unless $\lambda_i=\lambda_j=\lambda$. Thus, all that remains is the case when $\phi_{E_i}|_Z \cdot T = \phi_j|_Z \cdot T_j$ for all $i,j$ such that $p_j \succ p_i$ and the image of each $m_D$ has dimension $\ell-\lambda$. In other words, up to multiplication by units, $T$ is stable under multiplication by rational functions $\zeta$ in $H^0(\oo(C+A-D))$ for $D = A,C,E_i$ and $H^0(\oo(C+A-E_i-E_j)$ for $p_j \succ p_i$. This final case cannot happen. Since $1_Z$ is in the image of $\Hom(\oo,\Omega) \to \Hom(\oo,\oo_Z)$, $T$ must contain an element $\alpha$ that restricts to a unit at each point of the support of $Z$. But the linear systems $|A|$, $|C|$, $|C+A-E_i|$ for $i \in S_0$, and $|C+A-E_i-E_j|$ for $p_j \succ p_i$ collectively separate points and tangents on $X$ (compare with the proof of Proposition \ref{prop:very-ample}), hence multiplying $\alpha$ by the functions $\zeta$ generates all of $H^0(\oo_Z)$, so the only way $T$ can be stable under multiplication by all $\zeta$ is if $T = H^0(\oo_Z)$. This cannot happen as $\lambda > 0$. We finish the proof by arguing that there is a unique component with the maximal dimension. As shown above, over a general point $(\psi,\pi)\in \mathbb{P}\left(\Omega^{*[\ell]} \right) \times \mathbb{P}^{\alpha_4-1}$, the fiber of $\Xi$ is an affine space in $\bp$ isomorphic to $\Hom(\Lambda,\ker \psi)$. The fiber of any component with maximal dimension must contain a non-empty open set in this affine space for dimension reasons, but since any two non-empty open sets in an affine space must intersect, there can only be one such component. \end{proof} \section*{Appendix: Positivity Conditions} \renewcommand{\theequation}{A.\arabic{equation}} \setcounter{equation}{0} \renewcommand\theHequation{A.\arabic{equation}} We rephrase the conditions on the exponents of Gaeta resolutions in Proposition \ref{prop:chern-has-gr}(a) to explain the connection with the positivity of the first Chern class. We then summarize the inequalities required for the proof of Theorems \ref{thm:sd-injective} and \ref{thm:finite-quot-scheme}. Given the numerical class \[ f=\Big(r,\alpha A + \delta C - \sum_{i \in S_0} \gamma_i E_i - \sum_{j \in S_1} \gamma_j E_j,\chi \Big) \] on $X$ an admissible blowup of $\mathbb{F}_e$, the Euler characteristics in Proposition \ref{prop:chern-has-gr}(a) can be written more explicitly as \begin{align*} \alpha_1 &= \delta + \alpha + r - \chi - \sum_{i \in S_0} \gamma_i - \sum_{j \in S_1} \gamma_j; \\ \alpha_2 &= \alpha + r - \chi - \sum_{i \in S_0} \gamma_i; \\ \alpha_3 &= \delta + r - \chi - \sum_{i \in S_0} \gamma_i. \end{align*} Thus, rephrasing Proposition \ref{prop:chern-has-gr}(a) in the case when $\chi \geqslant r$ and $\gamma_i \geqslant \sum_{j \colon p_j \succ p_i} \gamma_j$ for all $i$, we can say that $f$ admits Gaeta resolutions if and only if $\gamma_i,\gamma_j,\chi$ are all $\geqslant 0$ and \begin{equation}\label{eq:f-admits-gr} \alpha, \delta \geqslant \sum_{i \in S_0} \gamma_i + \chi - r. \end{equation} Now, as in \S~\ref{sect:sd}, consider the orthogonal classes $\rho = (1,0,1-\ell)$ and \[ \sigma = \Big(r, L = \alpha A + \delta C - \sum_{i \in S_0} \gamma_i E_i - \sum_{j \in S_1} \gamma_j E_j, \chi = r\ell \Big), \] where $\ell \geqslant 1$ and $r \geqslant 2$ are fixed, and set $v = \sigma + \rho$. The assumption that $L$ is sufficiently positive in \S~\ref{sect:sd} includes three conditions. Let \[ M = \max\{\,m(\ell + r + 1 - m) \mid 1 \leqslant m \leqslant \ell \,\} \] The first set of conditions used in the proof of Theorem \ref{thm:sd-injective} is: \begin{align}\label{eq:first-three-conditions} \begin{split} \gamma_j &\geqslant M \qquad \qquad \quad \quad \: \: \text{for all $j \in S_1$}; \\ \gamma_i &\geqslant M + \sum_{j \colon p_j \succ p_i} \gamma_j \qquad \: \text{for all $i \in S_0$}; \\ \alpha,\delta &\geqslant M + \sum_{i \in S_0} \gamma_i + r(\ell - 1). \end{split} \end{align} These conditions ensure that $\sigma$ and $v$ admit Gaeta resolutions (Proposition \ref{prop:chern-has-gr}(a)), general Gaeta resolutions for $\sigma$ and $v$ are locally free (Proposition \ref{prop:cokernel-properties}), general Gaeta resolutions for $v$ have no sections vanishing on curves (Proposition \ref{prop:no-sec-van-curves}(b)), Weak Brill-Noether holds (Proposition \ref{orth-ker-quot}), and $\dim \Xi = \dim \bp$ (Proposition \ref{dim-comm-diag}). The inequalities (\ref{eq:first-three-conditions}) imply that $L$ is $M$-very ample, as $L$ can be expressed as a tensor product of $M$ very ample line bundles by Proposition \ref{prop:very-ample} and $m$-very ampleness is additive under tensor products (\cite{HTT05}). The second condition is the discriminant condition (\ref{eq:discriminant-bound}), which in this context is \begin{equation}\label{eq:sd-discriminant-bound} P\left(\frac{1}{r}L\right) \geqslant \frac{(\lambda+1)^2}{4\lambda} + \frac{t}{8} + \frac{1}{r} + \ell, \qquad \text{where $\lambda = \frac{u}{v} + \frac{e}{2}$}. \end{equation} The last condition is that \begin{equation}\label{eq:fifth-condition} \text{$\Theta_\sigma = L_\ell - \frac{r}{2}E$ has vanishing higher cohomology}, \end{equation} which by Proposition \ref{prop:very-ample} and Lemma \ref{lem:high-coh-theta-0} can be ensured by the following inequalities: \begin{align}\label{eq:last-condition} \begin{split} \gamma_j &\geqslant r(\ell-1) \hspace{4cm} \text{for all $j \in S_1$}; \\ \gamma_i &\geqslant r(\ell-1) + \sum_{j \colon p_j \succ p_i} (\gamma_j+1) \hspace{1.32cm} \text{for all $i \in S_0$}; \\ \delta &\geqslant r(\ell-1)-1 + \sum_{i \in S_0} (\gamma_i+1); \\ \alpha &\geqslant r(\ell-1)-1 +e + \sum_{i \in S_0} (\gamma_i+1). \end{split} \end{align} The sufficiency of these conditions follows from computing $L-K_X$ and observing that it can be decomposed as the tensor product of an $\ell$-very ample line bundle and $r-1$ $(\ell-1)$-very ample line bundles. For $e,s,t$ not too large relative to $\ell$ and $r$, (\ref{eq:last-condition}) is implied by (\ref{eq:first-three-conditions}). \bibliography{gaetabib}{} \bibliographystyle{alpha} \end{document}
2205.14811v1
http://arxiv.org/abs/2205.14811v1
Last-iterate convergence analysis of stochastic momentum methods for neural networks
\documentclass[preprint,12pt]{elsarticle} \usepackage[utf8]{inputenc} \usepackage{amssymb} \usepackage{amsmath} \usepackage{amsthm} \usepackage{arydshln} \usepackage{titletoc} \usepackage{titlesec} \usepackage{subfigure} \usepackage{epsfig} \usepackage{graphicx,amsmath} \usepackage{algpseudocode} \usepackage{algorithmicx,algorithm} \usepackage{colortbl} \usepackage{tabularx} \usepackage{booktabs} \usepackage{threeparttable} \usepackage{arydshln} \usepackage{multirow} \usepackage{pgfplots} \begin{document} \begin{frontmatter} \newtheorem{proposition}{Proposition}[section] \newtheorem{theorem}{Theorem}[section] \newtheorem{definition}{Definition}[section] \newtheorem{lemma}{Lemma}[section] \newtheorem{remark}{Remark}[section] \newtheorem{assumption}{Assumption}[section] \newtheorem{example}{Example}[section] \newtheorem{corollary}{Corollary}[section] \title{Last-iterate convergence analysis of stochastic momentum methods for neural networks\tnoteref{t1}} \tnotetext[t1]{This work was funded in part by the National Natural Science Foundation of China (No. 62176051), in part by National Key R\&D Program of China (No. 2020YFA0714102), and in part by the Fundamental Research Funds for the Central Universities of China. (No. 2412020FZ024).} \author[nenu]{Dongpo Xu\fnref{label2}} \ead{[email protected]} \fntext[label2]{The authors contributed equally to this work.} \author[nenu]{Jinlan Liu\fnref{label2}} \author[chsc]{Yinghua Lu\corref{cor1}} \ead{[email protected]} \author[kasnenu]{Jun Kong\corref{cor1}} \ead{[email protected]} \author[danilo]{Danilo P. Mandic} \ead{[email protected]} \cortext[cor1]{Corresponding authors} \address[nenu]{School of Mathematics and Statistics, Northeast Normal University, Changchun 130024, China} \address[chsc]{Institute for Intelligent Elderly Care, Changchun Humanities and Sciences College, Changchun 130117, China} \address[kasnenu]{Key Laboratory of Applied Statistics of MOE, Northeast Normal University, Changchun 130024, China} \address[danilo]{Department of Electrical and Electronic Engineering, Imperial College London, SW7 2AZ London, UK} \begin{abstract} The stochastic momentum method is a commonly used acceleration technique for solving large-scale stochastic optimization problems in artificial neural networks. Current convergence results of stochastic momentum methods under non-convex stochastic settings mostly discuss convergence in terms of the random output and minimum output. To this end, we address the convergence of the last iterate output (called \textit{last-iterate convergence}) of the stochastic momentum methods for non-convex stochastic optimization problems, in a way conformal with traditional optimization theory. We prove the last-iterate convergence of the stochastic momentum methods under a unified framework, covering both stochastic heavy ball momentum and stochastic Nesterov accelerated gradient momentum. The momentum factors can be fixed to be constant, rather than time-varying coefficients in existing analyses. Finally, the last-iterate convergence of the stochastic momentum methods is verified on the benchmark MNIST and CIFAR-10 datasets. \end{abstract} \begin{keyword} Neural Networks; Last-iterate convergence; Stochastic momentum method; Heavy ball momentum; Nesterov accelerated gradient momentum; Non-convex optimization. \end{keyword} \end{frontmatter} \section{Introduction.} We consider non-convex stochastic optimization problems of the form \begin{equation}\label{eq:optfprobmcons} \min_{x\in\mathbb{R}^d}f(x):=\mathbb{E}_{\xi}[\ell(x,\xi)], \end{equation} where $\ell$ is a smooth non-convex function and $\mathbb{E}_{\xi}[\cdot]$ denotes the statistical expectation with respect to the random variable $\xi$. Optimization problems of this form arise naturally in machine learning where $x\in\mathbb{R}^d$ are the parameters of neural networks \cite{LeCun,Schmidhuber,Mandicbk}, $\ell$ represents a loss from individual training examples or mini-batches and $f$ is the full training objective function. One of the most popular algorithms for solving such optimization problem is stochastic gradient descent (SGD) \cite{Robbins,Luo22}. The advantages of SGD for large scale stochastic optimization and the related issues of tradeoffs between computational and statistical efficiency was highlighted in \cite{Bottou07}. Starting from $x_1\in\mathbb{R}^d$, SGD updates the parameters of the network model until convergence \begin{equation} x_{t+1}=x_{t}-\eta_tg_t, \end{equation} where $\eta_t>0$ is the stepsize and $g_t$ is a noisy gradient such that $\mathbb{E}_t[g_t]=\nabla f(x_t)$ is an unbiased estimator of the full gradient $\nabla f(x_t)$. Classical convergence analysis of the SGD has established that in order for $\lim_{t\to\infty}\|\nabla f(x_t)\|=0$, the stepsize evolution should satisfy \begin{equation}\label{eq:etacondi} \sum_{t=1}^\infty\eta_t=\infty,\quad\quad \sum_{t=1}^\infty\eta_t^2<\infty\;. \end{equation} However, the basic SGD (also called vanilla SGD) suffers from slow convergence, and to this end a variety of techniques have been introduced to improve convergence speed, including adaptive stepsize methods \cite{Duchi,Tieleman, Xu21}, momentum acceleration methods \cite{Nesterov,Polyak,GhadimiE} and adaptive gradient methods \cite{Kingma, Luo, Xu22}. Among these algorithms, momentum methods are particularly desirable, since they merely require slightly more computation per iteration. Heavy Ball (HB) \cite{Polyak,GhadimiE} and Nesterov Accelerated Gradient (NAG) \cite{Nesterov} are two most popular momentum methods. In non-convex stochastic optimization setting, existing convergence results of the momentum methods usually have the following form \cite{Ghadimi, FZou,YYan} \begin{equation} \lim_{T\to\infty}\min_{t\in[T]}\mathbb{E}\|\nabla f(x_t)\|^2=0, \end{equation} or \begin{equation} \lim_{T\to\infty}\mathbb{E}\|\nabla f(x_\tau)\|^2=0, \end{equation} where $x_\tau$ is an iterate randomly chosen from $\{x_1, x_2$, $\cdots,x_T\}$ under some probability distribution. The most common type is the uniform distribution, where we have $\lim_{T\to\infty}\frac{1}{T}\sum_{t=1}^T\mathbb{E}\|\nabla f(x_t)\|^2=0$. Note that these two convergence properties are weaker than the usual convergence, given by \begin{equation} \lim_{t\to\infty}\mathbb{E}\|\nabla f(x_t)\|^2=0, \end{equation} where the convergence of the last iterate of the momentum methods is referred to as the \textit{last-iterate convergence}. In this work, we revisit the momentum methods with the aim to answer two basic questions: \begin{itemize} \item[1)] Can the stochastic momentum methods archive the last-iterate convergence in the non-convex setting? \item[2)] Can the momentum coefficient in the convergence analysis be fixed as a constant commonly used in practice? \end{itemize} Our analysis provides affirmative answers to both questions, with the main contributions of this work summarized as follows: \begin{itemize} \item[$\bullet$] Last-iterate convergence of the stochastic momentum methods is proven for the \textit{first} time in the non-convex setting, without bounded weight assumption. This theoretically supports the intuition of usually choosing the last iterate, rather than the minimum or random selection that relies on storage during the iterations \cite{FZou, YYan, ChenX, Zou}. \item[$\bullet$] The convergence condition for the momentum coefficient is extended to be a fixed constant, rather than time-varying coefficients (e.g., $\sum_{t=1}^\infty\mu_t^2<\infty$ or even more complicated) with bounded weight assumption \cite{Wangj,Zhangnm}, which mirrors the actual implementations of the stochastic momentum method in deep learning libraries \cite{Stevens,Zaccone}. \item[$\bullet$] Numerical experiments support the theoretical findings of the last-iterate convergence of the stochastic momentum methods, and show that the stochastic momentum methods have good convergence performance on the benchmark datasets, while exhibiting good robustness for different interpolation factors. \end{itemize} The rest of this paper is organized as follows. Section 2 introduces a unified view of the stochastic momentum methods. The main convergence results with the rigorous proofs are presented in Section 3. Experimental results are reported in Section 4. Finally, we conclude this work.\\ \noindent{\bf Notations.} We use $\mathbb{E}[\cdot]$ to denote the statistical expectation, and $\mathbb{E}_t[\cdot]$ as the conditional expectation with respect to $g_{t}$ conditioned on the the previous $g_1,g_2,\cdots,g_{t-1}$, $[T]$ denotes the set $\{1,2,\cdots,T\}$, while norm $\|x\|$ is designated by $\|x\|_2$ if not otherwise specified. \section{Stochastic unified momentum methods} We next study the stochastic HB and NAG using a unified formulation. The stochastic HB (SHB) is characterized by the iteration \begin{equation} {\rm SHB}: x_{t+1}=x_{t}-\eta_tg_{t}+\mu(x_{t}-x_{{t-1}}), \end{equation} with $x_0=x_1\in\mathbb{R}^d$, where $\mu$ is the momentum constant and $\eta_t$ is the step size. By introducing $m_t=x_{t+1}-x_{t}$ with $m_0=0$, the above update becomes \begin{equation} {\rm SHB}:\quad \begin{cases} m_{t}=\mu m_{t-1}-\eta_tg_{t}\\ x_{t+1}=x_{t}+ m_{t} \end{cases}. \end{equation} The update of stochastic NAG (SNAG) is given by \begin{equation} {\rm SNAG}:\quad \begin{cases} y_{t+1}=x_{t}-\eta_tg_{t}\\ x_{t+1}=y_{t+1}+\mu (y_{t+1}-y_t) \end{cases}, \end{equation} with $x_1=y_1\in\mathbb{R}^d$. By introducing $m_t=y_{t+1}-y_{t}$ with $m_0=0$, the iterate of SNAG can be equivalently written as \begin{equation} {\rm SNAG}:\quad \begin{cases} m_{t}=\mu m_{t-1}-\eta_tg_{t}\\ x_{t+1}=x_{t}-\eta_tg_{t}+\mu m_t \end{cases}. \end{equation} Then, SHB and SNAG can be rewriten in the following stochastic unified momentum (SUM) form \begin{equation}\label{eq:mupdaterl} {\rm SUM}:\quad \begin{cases} m_{t}=\mu m_{t-1}-\eta_tg_{t}\\ x_{t+1}=x_{t}-\lambda\eta_t g_{t}+(1-\tilde{\lambda}) m_{t} \end{cases}, \end{equation} where $\mu\in[0,1)$ is the momentum constant, $\lambda\geq0$ is a interpolation factor and $\tilde{\lambda}:=(1-\mu)\lambda$. When $\lambda=0$, we have SHB; when $\lambda=1$, we have SNAG. To ensure that $1-\tilde{\lambda}$ is non-negative, $\lambda$ should be selected from the interval $[0,1/(1-\mu)]$. \begin{remark} Zou et al. \cite{FZou} unified SHB and SNAG as a two-step iterative scheme, given by \begin{equation}\label{eq:zoumupdaterl} m_{t}=\mu m_{t-1}-\eta_tg_{t},\quad x_{t+1}=x_{t}+m_t+\lambda\mu(m_t-m_{t-1}), \end{equation} with $m_0=0$. Note that \eqref{eq:mupdaterl} and \eqref{eq:zoumupdaterl} are completely equivalent in a mathematical sense. However, there is a subtle difference in the actual implementation. The iteration in \eqref{eq:mupdaterl} only needs the knowledge of $m_t$ at time $t$, while the iteration in \eqref{eq:zoumupdaterl} requires both $m_{t-1}$ and $m_t$ at the same time. In addition, the convergence of the iteration in \eqref{eq:zoumupdaterl} is characterized by $$\lim_{T\to\infty}\frac{1}{T}\sum_{t=1}^T\left(\mathbb{E}\|\nabla f(x_t)\|^{4/3}\right)^{3/2}=0,$$ which is weaker than the last-iterate convergence $\lim_{t\to\infty}\mathbb{E}\|\nabla f(x_t)\|^2=0$. \end{remark} \begin{algorithm} \caption{SUM with the last-iterate output}\label{alg:sum1} {\bf Input:} Initial point $x_1\in \mathbb{R}^d$, step size $\{\eta_t\}_{t\geq1}$ and $\mu\in[0,1)$ \begin{algorithmic}[1] \State Set $m_0=0$ and $\lambda\in[0,1/(1-\mu)]$ \For{$t=1,2,...,T$} \State Get an unbiased noisy gradient $g_t$ \State $m_{t}=\mu m_{t-1}-\eta_tg_{t}$ \State $x_{t+1}=x_{t}-\lambda\eta_t g_{t}+(1-\lambda+\lambda\mu) m_{t}$ \EndFor \end{algorithmic} {\bf Output:} The last iterate $x_T$. \end{algorithm} \begin{remark} Yan et al. \cite{YYan} unified SHB and SNAG as a three-step iterative scheme as follows \begin{equation}\label{eq:yanmupdaterl} \begin{split} &y_{t+1}=x_t-\eta_tg_t,\quad \tilde{y}_{t+1}=x_t-\lambda\eta_tg_t,\\ &x_{t+1}=y_{t+1}+\mu(\tilde{y}_{t+1}-\tilde{y}_{t}), \end{split} \end{equation} with $\tilde{y}_1=x_1$. Note that \eqref{eq:mupdaterl} is slightly more economical than \eqref{eq:yanmupdaterl}, in terms of computation and storage. In addition, the convergence form of the iteration in \eqref{eq:yanmupdaterl} is $\lim_{T\to\infty}\min_{t\in[T]}\mathbb{E}\|\nabla f(x_t)\|^2=0$, which is a simple corollary of last-iterate convergence $\lim_{t\to\infty}\mathbb{E}\|\nabla f(x_t)\|^2=0$. \end{remark} Next, we introduce three different forms of the SUM method in terms of their convergence behaviour. Algorithm 1 corresponds to last-iterate convergence, Algorithm 2 corresponds to random selection convergence \cite{FZou}, while Algorithm 3 corresponds to the minimum gradient convergence \cite{YYan}. \begin{algorithm} \caption{SUM with random output} {\bf Input:} Initial point $x_1 \in \mathbb{R}^d$, step size $\{\eta_t\}_{t\geq1}$ and $\mu\in[0,1)$. \begin{algorithmic}[1] \State Set $\mathcal{S}=\emptyset$, $m_0=0$ and $\lambda\in[0,1/(1-\mu)]$ \For{$t=1,2,...,T$} \State $\mathcal{S}=\mathcal{S}\cup\{x_{t}\}$ \State Get an unbiased noisy gradient $g_t$ \State $m_{t}=\mu m_{t-1}-\eta_tg_{t}$ \State $x_{t+1}=x_{t}+m_t+\lambda\mu (m_{t}-m_{t-1})$ \EndFor \end{algorithmic} {\bf Output:} Choose $x_\tau$ from the set $\mathcal{S}$ with some probability distribution. \end{algorithm} \begin{algorithm}[t] \caption{SUM with minimum output} {\bf Input:} Initial point $x_1 \in \mathbb{R}^d$, step size $\{\eta_t\}_{t\geq1}$ and $\mu\in[0,1)$. \begin{algorithmic}[1] \State Set $x_{\tau}=\tilde{y}_1=x_1$, $v=\infty$ and $\lambda\in[0,1/(1-\mu)]$ \For{$t=1,2,...,T$} \State Calculate the full gradient $\nabla f(x_{t})$ \If{$\|\nabla f(x_{t})\|<v$} \State $x_{\tau} =x_{t}$, $v=\|\nabla f(x_{t})\|$ \EndIf \State Get an unbiased noisy gradient $g_t$ \State $y_{t+1}=x_t-\eta_tg_t$ \State $\tilde{y}_{t+1}=x_t-\lambda\eta_t g_t$ \State $x_{t+1}=y_{t+1}+\mu(\tilde{y}_{t+1}-\tilde{y}_{t})$ \EndFor \end{algorithmic} {\bf Output:} The minimum iterate $x_{\tau}$ that satisfies $\tau=\arg\min_{t\in[T]} \|\nabla f(x_{t})\|$. \end{algorithm} \begin{remark} It is worth mentioning that the three SUM methods are mathematically equivalent, but they may be slightly different in the actual implementation. The main difference lies in the output mode. Algorithm 1 and Algorithm 2 are basically the same regarding computational efficiency, but all updated parameters of Algorithm 2 need to be stored in memory. In addition, it is difficult to obtain the optimal parameters through the random selection of parameters in the actual implementation. Algorithm 3 makes it easy to archive the optimal parameters, but the computational cost of the full gradient in each iteration is too high to be adopted in practice. If the last-iterate convergence of Algorithm 1 can be guaranteed, then Algorithm 1 with the last iterate is undoubtedly the best tradeoff scheme, because it has the advantages in both computation and storage. \end{remark} \section{Main results} \subsection{Technical lemmas} We now provide some lemmas, which are pivotal in the proof of last-iterate convergence of the proposed SUM method. \begin{lemma}\label{lem:abcabp} Let $\{ a_n\}_{n\geq1}$, $\{ b_n\}_{n\geq1}$ and $\{\tilde{a}_n\}_{n\geq1}$ be three non-negative real sequences such that $\sum_{n=1}^\infty a_n=\infty$, $\sum_{n=1}^\infty a_nb_n^p<\infty$, $\lim_{n\to\infty}{a_n}/{\tilde{a}_n}=1$, and $|b_{n+1}-b_n|\leq C \tilde{a}_nb_n^{p-\epsilon}$ for some positive constants $C$, $p$ and $\epsilon\in[0,p]$. Then, we have \begin{equation} \lim_{n\to\infty} b_n=0\,. \end{equation} \begin{proof} Since $\sum_{n=1}^\infty a_nb_n^p$ converges, we necessarily have $\lim \inf_{n\to\infty} b_n=0$. Otherwise, it would contradict the assumption $\sum_{n=1}^\infty a_n$ diverges. We now proceed to establish the proof by contradiction, and assume that $\lim \sup_{n\to\infty} b_n\geq \nu >0$. Let $\{m_k\}_{k\geq1}$, $\{n_k\}_{k\geq1}$ be sequence of indexes such that $$ m_k<n_k<m_{k+1}$$, \begin{equation}\label{eq:3epsilon} \frac{\nu}{2}<b_n,\quad \text{ for $m_k\leq n<n_k$}\,, \end{equation} \begin{equation}\label{eq:3epsilonleq} b_n\leq\frac{\nu}{2},\quad \text{ for $n_k\leq n<m_{k+1}$}\,. \end{equation} Since $\sum_{n=1}^\infty a_nb_n^p$ converges and $\lim_{n\to\infty}{a_n}/{\tilde{a}_n}=1$, there exists sufficiently large $\tilde{k}$ such that \begin{equation}\label{eq:sertail} \sum_{j=m_{\tilde{k}}}^\infty \tilde{a}_jb_j^p<\frac{1}{2C}\left(\frac{\nu}{2}\right)^{\epsilon+1}\,. \end{equation} Then for all $k\geq \tilde{k}$ and all $n$ with $m_k\leq n\leq n_k-1$, we have \begin{equation}\label{eq:bnkbneps3} \begin{split} |b_{n_k}-b_n|&\leq\sum_{j=n}^{n_k-1}|b_{j+1}-b_j|\leq C\sum_{j=n}^{n_k-1}\tilde{a}_jb_j^{p-\epsilon}\\ &\leq C\left(\frac{\nu}{2}\right)^{-\epsilon}\sum_{j=n}^{n_k-1}\tilde{a}_jb_j^p\\ &\leq C\left(\frac{\nu}{2}\right)^{-\epsilon}\frac{1}{2C}\left(\frac{\nu}{2}\right)^{\epsilon+1}=\frac{\nu}{4}\,, \end{split} \end{equation} where the last two inequalities follow from \eqref{eq:3epsilon} and \eqref{eq:sertail}. Thus, \begin{equation} b_n\leq b_{n_k}+\frac{\nu}{4}\leq\frac{3\nu}{4},\quad \forall k\geq \tilde{k}, m_k\leq n\leq n_k-1\,. \end{equation} Upon combining the previous inequality with \eqref{eq:3epsilonleq}, we have $b_n\leq 3\nu/4,\quad \forall n\geq m_{\tilde{k}}$. This contradicts $\lim \sup_{n\to\infty} b_n\geq \nu >0$. Therefore, $\lim_{n\to\infty} b_n=0$. \end{proof} \end{lemma} From Lemma \ref{lem:abcabp}, we obtain the following corollary. \begin{corollary}\label{cor:abcabp} Let $\{ a_n\}_{n\geq1}$, $\{ b_n\}_{n\geq1}$ and $\{\tilde{a}_n\}_{n\geq1}$ be three non-negative real sequences such that $\sum_{n=1}^\infty a_n=\infty$, $\sum_{n=1}^\infty a_nb_n^2<\infty$, $\lim_{n\to\infty}{a_n}/{\tilde{a}_n}=1$, and $|b_{n+1}-b_n|\leq C \tilde{a}_n$ for a positive constant $C$. Then, we have \begin{equation} \lim_{n\to\infty} b_n=0\,. \end{equation} \end{corollary} The following lemma follows from Lemma 1 in \cite{Bertsekas}, and greatly simplifies the proof of the last-iterate convergence of the proposed SUM method. \begin{lemma}[\cite{Bertsekas}]\label{lem:xyz} Let $Y_t$, $W_t$, and $Z_t$ be three sequences such that $W_t$ is nonnegative for all $t$. Also, assume that \begin{equation} Y_{t+1}\leq Y_t-W_t+Z_t,\quad t=0, 1, \cdots, \end{equation} and that the series $\sum_{t=0}^TZ_t$ converges as $T\to\infty$. Then, either $Y_t\to-\infty$ or else $Y_t$ converges to a finite value and $\sum_{t=0}^\infty W_t<\infty$. \end{lemma} \subsection{Last-iterate convergence of the SUM methods} The following lemma is used to determine the convergence of the series of parameter increments. \begin{lemma}\label{lem:smbound} Assume that $\mathbb{E}\| g_t \|^2\leq G^2$. If the stepsize sequence satisfies \eqref{eq:etacondi}, then we have \begin{equation} \sum_{t=1}^\infty\mathbb{E}\|x_{t+1}-x_t\|^2<\infty,\quad \sum_{t=1}^\infty \left(\sum_{k=1}^{t-1}\mu ^{t-k}\mathbb{E}\|m_{k}\|^2\right)<\infty\,. \end{equation} \end{lemma} \begin{proof} From \eqref{eq:mupdaterl}, we have \begin{equation}\label{eq:mtisumbag} m_{t}=-\sum_{k=1}^t\mu ^{t-k}\eta_kg_{k}=-\sum_{k=0}^{t-1}\mu ^k\eta_{t-k}g_{t-k}\,, \end{equation} and \begin{equation}\label{eq:mtisumbagst} \begin{split} \|x_{t+1}-x_t\|^2&\leq\|-\lambda\eta_t g_{t}+(1-\tilde{\lambda}) m_{t}\|^2\\ &\leq2\lambda^2\eta_t^2 \|g_{t}\|^2+2(1-\tilde{\lambda})^2\| m_{t}\|^2\,. \end{split} \end{equation} By Jensen's inequality \cite{Linz}, we get \begin{equation} \begin{split} \|m_{t}\|^2&\leq\left(\sum_{k=1}^t\mu ^{t-k}\eta_k\|g_{k}\|\right)^2\leq\left(\sum_{k=1}^t\mu ^{t-k}\right)\sum_{k=1}^t\mu ^{t-k}\eta_k^2\|g_{k}\|^2\,. \end{split} \end{equation} Upon taking the total expectation, we have \begin{equation}\label{eq:emt2g2} \begin{split} \mathbb{E}[\|m_{t}\|^2]&\leq\left(\sum_{k=1}^t\mu ^{t-k}\right)\sum_{k=1}^t\mu ^{t-k}\eta_k^2\mathbb{E}[\|g_{k}\|^2]\leq \frac{G^2}{(1-\mu )}\sum_{k=1}^t\mu ^{t-k}\eta_k^2\,, \end{split} \end{equation} and \begin{equation} \begin{split} \mathbb{E}[\|x_{t+1}-x_t\|^2]&\leq2\lambda^2\eta_t^2 \mathbb{E}[\|g_{t}\|^2]+2(1-\tilde{\lambda})^2\mathbb{E}[\| m_{t}\|^2]\\ &\leq2\lambda^2 G^2\eta_t^2 +2(1-\tilde{\lambda})^2\mathbb{E}[\| m_{t}\|^2]\,. \end{split} \end{equation} From the expression for the stepsizes \eqref{eq:etacondi}, we arrive at \begin{equation} \sum_{t=1}^{\infty}\eta_t^2<\infty, \quad\quad \sum_{t=1}^{\infty}\mu ^{t-1}<\infty,\quad 0<\mu <1\,. \end{equation} Since that $\sum_{k=1}^{t}\eta_k^2\mu ^{t-k}$ is the Cauchy product of the previous two positive convergent series \cite{Rudin}, this yields \begin{equation}\label{eq:sumsumetauk} \sum_{t=1}^\infty \left(\sum_{k=1}^{t-1}\eta_k^2\mu ^{t-k}\right)<\infty\,. \end{equation} Upon combing \eqref{eq:emt2g2}-\eqref{eq:sumsumetauk}, we finally have \begin{equation} \sum_{t=1}^\infty\mathbb{E}[\|m_{t}\|^2]<\infty,\quad \sum_{t=1}^\infty\mathbb{E}[\|x_{t+1}-x_t\|^2]<\infty, \end{equation} and in a similar manner \begin{equation} \sum_{t=1}^\infty \left(\sum_{k=1}^{t-1}\mu ^{t-k}\mathbb{E}[\|m_{k}\|^2]\right)<\infty\,. \end{equation} This completes the proof. \end{proof} The following lemma is used to determine the consistency between the direction of momentum $m_t$ and the direction of the full negative gradient. \begin{lemma}\label{lem:fxsinequ} Assume that $\nabla f$ is $L$-Lipschitz continuous and $\mathbb{E}\| g_t \|^2\leq G^2$. Then for $x_t$ generated by SUM (Algorithm \ref{alg:sum1}), we have the following bound \begin{equation} \begin{split} \mathbb{E}\left[\nabla f(x_t)^Tm_{t}\right]&\leq -\sum_{k=1}^{t}\mu ^{t-k}\eta_k\mathbb{E}[\|\nabla f(x_k)\|^2]\\ &\quad +2L\sum_{k=1}^{t-1}\mu ^{t-k}\mathbb{E}\left\|m_{k}\right\|^2 +L\lambda^2G^2\sum_{k=1}^{t-1}\mu ^{t-k}\eta_k^2\,. \end{split} \end{equation} \end{lemma} \begin{proof} From \eqref{eq:mupdaterl}, we have \begin{equation} \begin{split} \nabla f(x_t)^Tm_{t}&=\mu \nabla f(x_t)^Tm_{t-1}-\eta_t\nabla f(x_t)^Tg_{t}\,. \end{split} \end{equation} Upon taking the conditional expectation with respect to $g_{t}$ conditioned on the the previous $g_1,g_2,\ldots,g_{t-1}$ and using $\mathbb{E}_t[g_t]=\nabla f(x_t)$ we arrive at \begin{equation}\label{eq:nabfsss} \begin{split} \mathbb{E}_t[\nabla f(x_t)^Tm_{t}]&=\mu \nabla f(x_t)^Tm_{t-1}-\eta_t\nabla f(x_t)^T\mathbb{E}_t[g_{t}]\\ &=\mu \nabla f(x_t)^Tm_{t-1}-\eta_t\|\nabla f(x_t)\|^2\,. \end{split} \end{equation} Here, the first equality follows from the fact that once we know $g_1,g_2,\ldots,g_{t-1}$, the values of $x_{t}$ and $m_{t-1}$ are not any more random. By \eqref{eq:mupdaterl}, we obtain \begin{equation} \begin{split} \|x_{t+1}-x_t\|^2&\leq\|-\lambda \eta_t g_{t}+(1-\tilde{\lambda})m_{t}\|^2\\ &\leq2\lambda^2\eta_t^2 \|g_{t}\|^2+2(1-\tilde{\lambda})^2\| m_{t}\|^2\,. \end{split} \end{equation} By the $L$-Lipschitz continuity of $\nabla f$ and Cauchy-Schwarz inequality \cite{Linz}, we have \begin{equation} \begin{split} [\nabla &f(x_t)-\nabla f(x_{t-1})]^Tm_{t-1}\\ &\leq\|\nabla f(x_t)-\nabla f(x_{t-1})\|\|m_{t-1}\|\\ &\leq L\|x_{t}-x_{t-1}\|\|m_{t-1}\|\leq \frac{L}{2}\|x_{t}-x_{t-1}\|^2+\frac{L}{2}\|m_{t-1}\|^2\\ &\leq L\lambda^2\eta_{t-1}^2 \|g_{t-1}\|^2+L(1-\tilde{\lambda})^2\| m_{t-1}\|^2+\frac{L}{2}\|m_{t-1}\|^2\\ &\leq L\lambda^2\eta_{t-1}^2 \|g_{t-1}\|^2+2L\|m_{t-1}\|^2\,, \end{split} \end{equation} where the last inequality follows from the fact that $\tilde{\lambda}\in[0,1]$. Next, using the previous inequality, the estimate \eqref{eq:nabfsss} becomes \begin{equation} \begin{split} &\mathbb{E}_t[\nabla f(x_t)^Tm_{t}]=\mu \nabla f(x_t)^Tm_{t-1}-\eta_t\|\nabla f(x_t)\|^2\\ &\leq\mu \nabla f(x_{t-1})^Tm_{t-1}\!-\!\eta_t\|\nabla f(x_t)\|^2+\mu [\nabla f(x_t)-\nabla f(x_{t-1})]^Tm_{t-1}\\ &\leq\mu \nabla f(x_{t-1})^Tm_{t-1}\!-\!\eta_t\|\nabla f(x_t)\|^2 +\mu L\lambda^2\eta_{t-1}^2 \|g_{t-1}\|^2+2\mu L\|m_{t-1}\|^2. \end{split} \end{equation} Upon taking the total expectation, we arrive at \begin{equation} \begin{split} \mathbb{E}[\nabla f(x_t)^Tm_{t}]&\leq\mu \mathbb{E}[\nabla f(x_{t-1})^Tm_{t-1}]-\eta_t\mathbb{E}[\|\nabla f(x_t)\|^2]\\ &\quad +\mu L\eta_{t-1}^2 \mathbb{E}[\|g_{t-1}\|^2]+2\mu L\mathbb{E}[\|m_{t-1}\|^2]\\ &\leq\mu \mathbb{E}[\nabla f(x_{t-1})^Tm_{t-1}]-\eta_t\mathbb{E}[\|\nabla f(x_t)\|^2]\\ &\quad +\mu L\lambda^2 G^2\eta_{t-1}^2+2\mu L\mathbb{E}[\|m_{t-1}\|^2]\,. \end{split} \end{equation} It is now straightforward to establish by induction that \begin{equation} \begin{split} \mathbb{E}\left[\nabla f(x_t)^Tm_{t}\right]&\leq -\sum_{k=1}^{t}\mu ^{t-k}\eta_k\mathbb{E}[\|\nabla f(x_k)\|^2]\\ &\quad +2L\sum_{k=1}^{t-1}\mu ^{t-k}\mathbb{E}\left\|m_{k}\right\|^2 +L\lambda^2G^2\sum_{k=1}^{t-1}\mu ^{t-k}\eta_k^2\,. \end{split} \end{equation} This completes the proof. \end{proof} Based on the above lemmas, we obtain the main convergence theorem of the SUM method in this paper. \begin{theorem}[Last-iterate Convergence]\label{thm:invconv} Let $x_t$ be the sequence obtained from Algorithm 1 with a stepsize sequence satisfying \eqref{eq:etacondi}. Assume that $f$ is lower-bounded by $f^*$, $\nabla f$ is $L$-Lipschitz continuous, $\mathbb{E}\| g_t \|^2\leq G^2$ and $\lim_{t\to\infty}\eta_{t-1}/\eta_t=1$. Then, we have \begin{equation} \lim_{t\to\infty}\mathbb{E}[\|\nabla f(x_t)\|]=0,\quad \lim_{t\to\infty}\mathbb{E}[{f(x_t)}]=F^*\,, \end{equation} where $F^*$ is a finite constant. \end{theorem} \begin{proof} By the $L$-Lipschitz continuity of $\nabla f$ and the descent lemma in \cite{Nesterovbk}, we have \begin{equation} \begin{split} f(x_{t+1})\leq f(x_t)+\nabla f(x_t)^T (x_{t+1}-x_t)+\frac{L}{2}\|x_{t+1}-x_t\|^2\,. \end{split} \end{equation} Recall \eqref{eq:mupdaterl}, then the previous inequality becomes \begin{equation} \begin{split} &f(x_{t+1})\leq f(x_t)+\nabla f(x_t)^T (x_{t+1}-x_t)+\frac{L}{2}\|x_{t+1}-x_t\|^2\\ &= f(x_t)-\lambda \eta_t\nabla f(x_t)^Tg_t+(1-\tilde{\lambda})\nabla f(x_t)^Tm_t+\frac{L}{2}\|x_{t+1}-x_t\|^2\,. \end{split} \end{equation} Upon taking the conditional expectation with respect to $g_{t}$ conditioned on the the previous $g_1,g_2,\ldots,g_{t-1}$ and using $\mathbb{E}_t[g_t]=\nabla f(x_t)$, we arrive at \begin{equation}\label{eq:econcfffg} \begin{split} \mathbb{E}_t[f(x_{t+1})]&\leq f(x_t)-\lambda\eta_t f(x_t)^T\mathbb{E}_t[g_t]\\ &\quad+ (1-\tilde{\lambda})\mathbb{E}_t[\nabla f(x_t)^Tm_t]+\frac{L}{2}\mathbb{E}_t[\|x_{t+1}-x_t\|^2]\\ &= f(x_t)-\lambda\eta_t\|\nabla f(x_t)\|^2\\ &\quad+ (1-\tilde{\lambda})\mathbb{E}_t[\nabla f(x_t)^Tm_t]+\frac{L}{2}\mathbb{E}_t[\|x_{t+1}-x_t\|^2]\,, \end{split} \end{equation} where the first equality follows from the fact that once we know $g_1,g_2,\ldots,g_{t-1}$, the value of $x_{t}$ is not any more random. Applying the law of total expectation \cite{Linz}, we take the total expectation on both sides of \eqref{eq:econcfffg}, and arrive at \begin{equation}\label{eq:efeftt1} \begin{split} &\mathbb{E}[f(x_{t+1})]\leq \mathbb{E}[f(x_t)]-\lambda\eta_t\mathbb{E}[\|\nabla f(x_t)\|^2]\\ &\quad+ (1-\tilde{\lambda})\mathbb{E}[\nabla f(x_t)^Tm_t]+\frac{L}{2}\mathbb{E}[\|x_{t+1}-x_t\|^2]\,. \end{split} \end{equation} Upon inserting the result from Lemma \ref{lem:fxsinequ} into \eqref{eq:efeftt1}, we have \begin{equation} \begin{split} \mathbb{E}[f(x_{t+1})]&\leq\mathbb{E}[f(x_t)]-\lambda\eta_t\mathbb{E}[\|\nabla f(x_t)\|^2]\\ &-(1-\tilde{\lambda})\sum_{k=1}^{t}\mu ^{t-k}\eta_k\mathbb{E}[\|\nabla f(x_k)\|^2]\\ &+2(1-\tilde{\lambda}) L\sum_{k=1}^{t-1}\mu ^{t-k}\mathbb{E}\left\|m_{k}\right\|^2\\ &+(1-\tilde{\lambda}) L\lambda^2G^2\sum_{k=1}^{t-1}\mu ^{t-k}\eta_k^2+\frac{L}{2}\mathbb{E}[\|x_{t+1}-x_t\|^2]\,. \end{split} \end{equation} From Lemma \ref{lem:smbound}, we know that \begin{equation} \sum_{t=1}^\infty\mathbb{E}\|x_{t+1}-x_t\|^2<\infty ,\quad \sum_{t=1}^\infty\left(\sum_{k=1}^{t-1}\mu ^{t-k}\eta_k^2\right)<\infty\,, \end{equation} and \begin{equation} \sum_{t=1}^\infty \left(\sum_{k=1}^{t-1}\mu ^{t-k}\mathbb{E}\|m_{k}\|^2\right)<\infty\,. \end{equation} Thus, the conditions of Lemma \ref{lem:xyz} are satisfied for $Y_t=\mathbb{E}[f(x_t)]$, $W_t=\lambda\eta_t\mathbb{E}[\|\nabla f(x_t)\|^2]{+}(1-\tilde{\lambda})\sum_{k=1}^{t}\mu ^{t-k}\eta_k\mathbb{E}[\|\nabla f(x_k)\|^2]$ and $Z_t=$ $\frac{L}{2}\mathbb{E}[\|x_{t+1}-x_t\|^2]$+$2(1-\tilde{\lambda}) L\sum_{k=1}^{t-1}\mu ^{t-k}\mathbb{E}\left\|m_{k}\right\|^2$+$(1-\tilde{\lambda}) L\lambda^2G^2\sum_{k=1}^{t-1}\mu ^{t-k}\eta_k^2$. By Lemma \ref{lem:xyz}, we have \begin{equation} \lim_{t\to\infty}Y_t=\lim_{t\to\infty}\mathbb{E}[{f(x_t)}]=F^*\,, \end{equation} where $F^*$ is a finite constant and \begin{equation}\label{eq:etakektozero} \begin{split} \sum_{t=1}^\infty\Bigl(\lambda\eta_t\mathbb{E}[\|\nabla f(x_t)\|^2]+(1-\tilde{\lambda})\sum_{k=1}^{t}\mu ^{t-k}\eta_k\mathbb{E}[\|\nabla f(x_k)\|^2]\Bigr)<\infty\,. \end{split} \end{equation} Since \begin{equation}\label{eq:beltaphefx} \begin{split} \sum_{t=1}^T\sum_{k=1}^{t}\mu ^{t-k}\eta_k\mathbb{E}[\|\nabla f(x_k)\|^2]&=\sum_{k=1}^T\eta_k\mathbb{E}[\|\nabla f(x_k)\|^2]\sum_{t=k}^{T}\mu ^{t-k}\\ &\geq \sum_{k=1}^T\eta_k\mathbb{E}[\|\nabla f(x_k)\|^2]\,. \end{split} \end{equation} where the last inequality follows from the fact that $\sum_{t=k}^{T}\mu ^{t-k}\geq 1$. We can now combine \eqref{eq:etakektozero} and \eqref{eq:beltaphefx}, to yield \begin{equation} \sum_{t=1}^\infty\left((1-\tilde{\lambda}+\lambda)\eta_t\mathbb{E}[\|\nabla f(x_t)\|^2]\right)<\infty\,. \end{equation} Note that $\tilde{\lambda}=(1-\mu)\lambda$, then $1-\tilde{\lambda}+\lambda=1+\mu\lambda\geq1$ is a non-zero constant. From Jensen's inequality \cite{Linz}, it follows that \begin{equation}\label{eq:alphnabfxk} \sum_{t=1}^\infty\eta_t\big(\mathbb{E}[\|\nabla f(x_t)\|]\big)^2\leq\sum_{t=1}^\infty\eta_t\mathbb{E}[\|\nabla f(x_t)\|^2]<\infty\,, \end{equation} while by involving \eqref{eq:mupdaterl} and \eqref{eq:mtisumbag}, we have \begin{equation} \begin{split} \|x_{t+1}-x_t\|&=\|-\lambda\eta_t g_{t}+(1-\tilde{\lambda}) m_{t}\|\leq\lambda\eta_t\|g_{t}\|+(1-\tilde{\lambda})\| m_{t}\|\\ &\leq\lambda\eta_t\|g_{t}\|+(1-\tilde{\lambda})\sum_{k=1}^t\mu ^{t-k}\eta_k\|g_{k}\|\,. \end{split} \end{equation} Upon taking the total expectation, noting that $\lambda\in[0,1/(1-\mu)]$, we finally obtain \begin{equation}\label{eq:mtbtkakgk} \begin{split} &\mathbb{E}[\|x_{t+1}-x_t\|]\leq\lambda\eta_t\mathbb{E}[\|g_{t}\|]+(1-\tilde{\lambda})\sum_{k=1}^t\mu ^{t-k}\eta_k\mathbb{E}[\|g_{k}\|]\\ &\leq \frac{2G}{1-\mu}\left(\frac{\eta_t}{2}+\frac{(1-\mu)\sum_{k=1}^t\mu ^{t-k}\eta_k}{2}\right)=\frac{2G}{(1-\mu)}\frac{\eta_t+\tilde{\eta}_t}{2}\,, \end{split} \end{equation} where $\tilde{\eta}_t=(1-\mu)\sum_{k=1}^t\mu ^{t-k}\eta_k$. By the Stolz theorem of real sequence \cite{Rudin}, we next have \begin{equation} \begin{split} \lim_{t\to\infty}\frac{\eta_t}{\tilde{\eta}_t}&=\frac{1}{1-\mu} \lim_{t\to\infty}\frac{\eta_t/\mu^t}{\eta_t/\mu^t+\eta_{t-1}/\mu^{t-1}+\cdots+\eta_1/\mu}\\ &=\frac{1}{1-\mu}\lim_{t\to\infty}\frac{\eta_t/\mu^t-\eta_{t-1}/\mu^{t-1}}{\eta_t/\mu^t}\\ &=\frac{1}{1-\mu}\lim_{t\to\infty}\left(1-\mu\frac{\eta_{t-1}}{\eta_{t}}\right)=1\,, \end{split} \end{equation} where the last equality follows from the condition $\lim_{t\to\infty}\eta_{t-1}/\eta_t=1$. By the $L$-Lipschitz continuity of $\nabla f$, we have \begin{equation} \begin{split} \big|\|\nabla f(x_{t+1})\|-\|\nabla f(x_t)\|\big|&\leq\|\nabla f(x_{t+1})-\nabla f(x_t)\|\\ &\leq L\| x_{t+1}-x_t\|\leq L\|x_{t+1}-x_t\|\,. \end{split} \end{equation} Upon taking the total expectation and using the Jensen's inequality \cite{Linz}, we arrive at \begin{equation}\label{eq:ffalpsamm} \begin{split} \Big|\mathbb{E}[\|\nabla f(x_{t+1})\|]-\mathbb{E}[\|\nabla f(x_t)\|]\Big|&\leq \mathbb{E}\left[\Bigl|\|\nabla f(x_{t+1})\|-\|\nabla f(x_t)\|\Bigr|\right]\\ &\leq L\mathbb{E}[\| x_{t+1}-x_t\|]\\ &\leq\frac{2LG}{(1-\mu)}\frac{\eta_t+\tilde{\eta}_t}{2}\,. \end{split} \end{equation} It follows from \eqref{eq:alphnabfxk} and \eqref{eq:ffalpsamm} that the conditions of Corollary \ref{cor:abcabp} are satisfied for $a_t=\eta_t$, $\tilde{a}_t=(\eta_t+\tilde{\eta}_t)/{2}$ and $b_t=\mathbb{E}[\|\nabla f(x_t)\|]$. Therefore, \begin{equation} \lim_{t\to\infty}\mathbb{E}[\|\nabla f(x_t)\|]=0\,. \end{equation} This completes the proof. \end{proof} \begin{remark} It should be noted that, including but not limited to, there is a large class of non-increasing sequences that satisfy the step size conditions of Theorem \ref{thm:invconv}, i.e., \begin{equation}\label{eq:noncrsstepsize} \eta_t= \begin{cases} \alpha,\quad &t\leq K \\ \alpha/(t-K)^p,\quad &t> K \end{cases},\quad p\in(1/2,1]\,. \end{equation} where $\alpha$ and $K$ are two positive constants. \end{remark} The following result falls as an immediate corollary of Theorem \ref{thm:invconv}. \begin{corollary} Suppose that the conditions in Theorem \ref{thm:invconv} hold. Then, \begin{equation} \lim_{T\to\infty}\min_{t\in[T]}\mathbb{E}\|\nabla f(x_t)\|^2=0,\quad \lim_{T\to\infty}\frac{1}{T}\sum_{t=1}^T\mathbb{E}\|\nabla f(x_t)\|^2=0\,. \end{equation} \end{corollary} \section{Experiments} This section validates the convergence theory of the proposed SUM method for training neural networks, and for all the tested algorithms, we take the sequence of step sizes according to \eqref{eq:noncrsstepsize}. By convention, the last-iterate convergences of the SUM method are evaluated via the training loss and test accuracy versus the number of epochs, respectively. For a fair comparison, every algorithm used the same amount of training data, the same initial step size $\alpha$, and the same initial weights. We run all the experiments on a server with AMD Ryzen TR 2990WX CPU, 64GB RAM, and one NVIDIA GTX-2080TI GPU using a public accessible code\footnote{https://github.com/kuangliu/pytorch-cifar/} with Python 3.7.9 and Pytorch 1.8.1. \subsection{Dataset} \noindent {\bf MNIST Dataset:} The MNIST dataset \cite{Lenet} contains 60,000 training samples and 10,000 test samples of the handwritten digits from 0 to 9. The images are grayscale, $28\times28$ pixels, and centered to reduce preprocessing. \noindent {\bf CIFAR-10 Dataset:} The CIFAR-10 dataset \cite{CIFAR} consists of 60,000 $32\times32$ color images drawn from 10 classes, which is divided into a traing set with 50,000 images and a test set with 10,000 images. \subsection{Results of LeNet on MNIST dataset} \begin{figure}[htbp] \centering \subfigure[Training loss]{\includegraphics[width=2.6in]{mnist-train-loss.pdf}} \hspace{-0.2in} \subfigure[Test accuracy]{\includegraphics[width=2.6in]{mnist-test-acc.pdf}} \hspace{-0.2in} \caption{Results of LeNet-5 on MNIST. The continuous lines and shadow areas represent the mean and standard deviation from 5 random initializations, respectively. } \label{fig:mnist} \end{figure} We first trained LeNet \cite{Lenet} on the MNIST dataset using SUM method with the initial step size $\alpha=0.01$ and the momentum constant $\mu=0.9$. In particular, we compared SUM with the interpolation factor $\lambda\in\{0, 0.5, 1.0,$ $5.0, 10.0\}$, where $\lambda=0$ corresponds to SHB, $\lambda=1$ corresponds to SNAG and $\lambda=10$ is the maximum value of $1/(1-\mu)$ because the momentum constant was $\mu = 0.9$. LeNet constants of 2 convolutional (Conv) and max-pooling layers and 3 fully connected (FC) layers, and the activation functions of the hidden layers are taken as ELU functions \cite{Calinbook}. The training was on mini-batches with 128 images per batch for 50 epochs, a total of $T=23,450$ iterations\footnote{The number of iterations is equal to the number of training samples divided by the batch size, and then multiplied by the number of epochs.}, and $K$ is set to $0.9T$. The performance of the SUM method with different interpolation factors is shown in Figure \ref{fig:mnist}. We can see that all the variants of the SUM method had similar performance in terms of the training loss, which is consistent with the result in Theorem \ref{thm:invconv}. For the test part, we found that SUM (minimum $\lambda=0.0$) yields a higher test accuracy than other variants, while SUM (maximum $\lambda=10.0$) exhibits larger oscillations. An interesting phenomenon is that non-increasing step size (recall \eqref{eq:noncrsstepsize}) has the effect of smoothing on the tail of the test accuracies. \subsection{Results on CIFAR-10 dataset} \begin{figure}[htbp] \centering \subfigure[Training loss]{\includegraphics[width=2.6in]{cifar-10-train-loss.pdf}} \hspace{-0.2in} \subfigure[Test accuracy]{\includegraphics[width=2.6in]{cifar-10-test-acc.pdf}} \caption{Results of ResNet-18 on CIFAR-10. The continuous lines and shadow areas represent the mean and standard deviation from 5 random initializations, respectively.} \label{fig:cifar} \end{figure} We next verify the convergence of the SUM method on CIFAR-10 by using ResNet-18 \cite{Hekm}. ResNet-18 contains a Conv layer, 4 convolution blocks with [2,2,2,2] layers, one FC layer, and the activation functions of the hidden layers are taken as ELU functions \cite{Calinbook}. The batch size is 128. The training stage lasts for 100 epochs, a total of $T=39,100$ iterations, and $K=0.9T$. We fix the momentum constant $\mu=0.9$ and the initial step size $\alpha=0.1$. Now, we study the influence of $\lambda$ on convergence of the SUM method by selecting $\lambda$ from the collection set $\{0, 0.5, 1.0, 5.0, 10.0\}$. Figure \ref{fig:cifar} illustrates the performance of the SUM with different $\lambda$ on the CIFAR-10 dataset. From Figure \ref{fig:cifar}(a), we can observe that the convergence of the SUM method can be guaranteed on the training set, as long as the interpolation factor $\lambda\in[0,1/(1-\mu)]$, which coincides with the convergence results in Theorem \ref{thm:invconv}. Figure \ref{fig:cifar}(b) shows that the test accuracies of SUM ($\lambda=0.5$ and $\lambda=5.0$) are comparable, followed by SUM ($\lambda=0.0$ and $\lambda=1.0$), and that SUM ($\lambda=10.0$) exhibits larger oscillations similar to the MNIST dataset. It should be noted that for theoretical consistency, the training loss was calculated after each epoch on the training set (not mini-batches), and so it was reasonable that the loss curves of the training set were monotonously decreasing. \section{Conclusions} We have addressed the last-iterate convergence of the stochastic momentum methods in a unified framework, covering both SHB and SNAG. For rigour, this has been archived: \textit{i)} in a non-convex optimization setting, \textit{ii)} under the condition that the momentum coefficient is constant, and \textit{iii)} without the assumption about the boundedness of the weight norm. Moreover, we have established that the existing minimum convergence and random selection convergence are both corollaries of last-iterate convergence, thus providing a new perspective on the understanding of the convergence of SUM method. We have also experimentally tested the convergence of SUM on the benchmark datasets. Experimental results verify the theoretical analysis. \bibliographystyle{model1a-num-names} \bibliography{<your-bib-database>} \begin{thebibliography}{00} \bibitem{LeCun} Y. LeCun, Y. Bengio, G. Hinton, Deep learning, Nature 521 (2015) 436-444. \bibitem{Schmidhuber} J. Schmidhuber, Deep learning in neural networks: An overview, Neural Netw. 61 (2015) 85-117. \bibitem{Mandicbk}D. Mandic, J. Chambers, Recurrent Neural Networks for Prediction: Architectures, Learning Algorithms and Stability, Wiley, New York, NY, USA, 2001. \bibitem{Robbins} H. Robbins, S. Monro, A stochastic approximation method, Ann. Math. Statistics 22 (3) (1951) 400-407. \bibitem{Luo22} J. Luo, J. Liu, D. Xu, H. Zhang, SGD-r$\alpha$: A real-time $\alpha$-suffix averaging method for SGD with biased gradient estimates, Neurocomputing 487 (2022) 1-8. \bibitem{Bottou07} L. Bottou, O. Bousquet, The tradeoffs of large scale learning, in: Proc. Adv. Neural Inf. Process. Syst. 2007, pp. 161-168. \bibitem{Duchi} J. Duchi, E. Hazan, Y. Singer, Adaptive subgradient methods for online learning and stochastic optimization, J. Mach. Learn. Res. 12 (2011) 2121-2159. \bibitem{Tieleman} T. Tieleman, G. Hinton, Lecture 6.5-RMSProp: Divide the gradient by a running average of its recent magnitude. COURSERA: Neural Netw. Mach. Learn. 4 (2012) 26-31. \bibitem{Xu21} D. Xu, S. Zhang, H. Zhang, D. Mandic, Convergence of the RMSProp deep learning method with penalty for nonconvex optimization, Neural Netw. 139 (2021) 17-23. \bibitem{Nesterov} Y. Nesterov, A method for solving the convex programming problem with convergence rate O($1/k^2$), in: Dokl. Akad. Nauk SSSR 1983, pp. 543-547. \bibitem{Polyak} B. Polyak, Some methods of speeding up the convergence of iteration methods, USSR Comp. Math. Math. Phys. 4 (5) (1964) 1-17. \bibitem{GhadimiE} E. Ghadimi, H. Feyzmahdavian, M. Johansson, Global convergence of the heavy-ball method for convex optimization, in: Europ. Cont. Conf. 2015, pp. 310-315. \bibitem{Kingma} D. Kingma, J. Ba, Adam: A method for stochastic optimization, in: Int. Conf. Learn. Repres. 2015, pp. 1-15. \bibitem{Luo} L. Luo, Y. Xiong, Y. Liu, Y. Sun, Adaptive gradient methods with dynamic bound of learning rate, in: Int. Conf. Learn. Repres. 2019, pp. 1-21. \bibitem{Xu22} J. Liu, J. Kong, D. Xu, M. Qi, Y. Lu, Convergence analysis of AdaBound with relaxed bound functions for non-convex optimization, Neural Netw. 145 (2022) 300-307. \bibitem{Ghadimi} S. Ghadimi, G. Lan, Accelerated gradient methods for nonconvex nonlinear and stochastic programming, Math. Program. 156 (1-2) (2016) 59-99. \bibitem{FZou} F. Zou, L. Shen, Z. Jie, J. Sun, W. Liu, Weighted AdaGrad with unified momentum, ArXiv preprint arXiv: 1808.03408, 2018. \bibitem{YYan} Y. Yan, T. Yang, Z. Li, Q. Lin, Y. Yang, A unified analysis of stochastic momentum methods for deep learning, in: Proc. Int. Joint Conf. Artif. Intell. 2018, pp. 2955-2961. \bibitem{ChenX} X. Chen, S. Liu, R. Sun, M. Hong, On the convergence of a class of Adam-type algorithms for non-convex optimization, in: Int. Conf. Learn. Repres. 2019, pp. 1-30. \bibitem{Zou} F. Zou, L. Shen, Z. Jie, W. Zhang, W. Liu, A sufficient condition for convergences of Adam and RMSProp, in: Proc. IEEE Conf. Comp. Vis. Patt. Recogn. 2019, pp. 11127-11135. \bibitem{Wangj} J. Wang, J. Yang, W. Wu, Convergence of cyclic and almost-cyclic learning with momentum for feedforward neural networks, IEEE Trans. Neural Netw. 22 (8) (2011) 1297-1306. \bibitem{Zhangnm} N. Zhang, An online gradient method with momentum for two-layer feedforward neural networks, Appl. Math. Comput. 212 (2) (2009) 488-498. \bibitem{Stevens} E. Stevens, L. Antiga, T. Viehmann, Deep Learning with PyTorch, Manning Publications, 2020. \bibitem{Zaccone} G. Zaccone, M. R. Karim, A. Menshawy, Deep Learning with TensorFlow, Packt Publishing Ltd, 2017. \bibitem{Bertsekas} D. Bertsekas, J. Tsitsiklis, Gradient convergence in gradient methods with errors, SIAM J. Optim. 10 (3) (2000) 627-642. \bibitem{Linz} Z. Lin, Z. Bai, Probability Inequalities, Springer Science and Business Media, 2011. \bibitem{Rudin} W. Rudin, Principles of Mathematical Analysis, 2nd ed. New York: McGraw-Hill, 1964. \bibitem{Nesterovbk} Y. Nesterov, Introductory Lectures on Convex Optimization: A Basic Course, Springer Science and Business Media, 2013. \bibitem{Lenet} Y. LeCun, L. Bottou, Y. Bengio, P. Haffner, Gradient-based learning applied to document recognition, Proc. IEEE. 86 (11) (1998) 2278-2324. \bibitem{CIFAR} A. Krizhevsky, Learning multiple layers of features from tiny images, Master's thesis, University of Toronto, 2009. \bibitem{Calinbook} O. Calin, Deep Learning Architectures: A Mathematical Approach, Springer, 2020. \bibitem{Hekm} K. He, X. Zhang, S. Ren, J. Sun, Deep residual learning for image recognition, in: Proc. IEEE Conf. Comp. Vis. Patt. Recogn. 2016, pp. 770-778. \bibitem{Goodfellow}I. Goodfellow, Y. Bengio, A. Courville, Deep Learning, Cambridge, MA: MIT Press, 2016. \end{thebibliography} \end{document}
2205.14771v3
http://arxiv.org/abs/2205.14771v3
Selective symplectic homology with applications to contact non-squeezing
\documentclass[a4paper,12pt]{extarticle} \renewcommand{\itshape}{\slshape} \usepackage{latexsym} \usepackage{amscd} \usepackage{graphics} \usepackage{amsmath} \usepackage{amssymb} \usepackage{bbold} \usepackage{mathrsfs} \usepackage{amsthm} \usepackage{xcolor} \usepackage{accents} \usepackage{enumerate} \usepackage{url} \usepackage{tikz-cd} \usetikzlibrary{decorations.pathreplacing} \usepackage{marginnote} \usepackage{hyperref} \usepackage{multicol,tikz} \usetikzlibrary{calc} \usepackage{marvosym} \usepackage{newpxtext} \usepackage[euler-digits]{eulervm} \theoremstyle{plain} \newtheorem{theorem}{\sc Theorem}[section] \makeatletter \newcommand{\settheoremtag}[1]{ \let\oldthetheorem\thetheorem \renewcommand{\thetheorem}{#1} \g@addto@macro\endtheorem{ \addtocounter{theorem}{0} \global\let\thetheorem\oldthetheorem} } \newtheorem{prop}[theorem]{\sc Proposition} \newtheorem{lem}[theorem]{\sc Lemma} \newtheorem{cor}[theorem]{\sc Corollary} \theoremstyle{definition} \newtheorem{defn}[theorem]{\sc Definition} \newtheorem{rem}[theorem]{\sc Remark} \newtheorem{qu}[theorem]{\sc Problem} \newtheorem{ex}[theorem]{\sc Example} \renewcommand{\qedsymbol}{\rule{0.55em}{0.55em}} \DeclareMathOperator{\im}{im} \DeclareMathOperator{\coker}{coker} \DeclareMathOperator{\R}{\mathbb{R}} \newcommand{\abs}[1]{\left\lvert#1\right\rvert} \newcommand{\norm}[1]{\left\lVert#1\right\rVert} \newcommand{\op}[1]{\operatorname{#1}} \renewcommand{\hat}[1]{\widehat{#1}} \numberwithin{equation}{section} \renewcommand{\emptyset}{\varnothing} \title{Selective symplectic homology with applications to contact non-squeezing} \author{Igor Uljarevi\'c} \date{June 2, 2023} \usepackage{biblatex} \addbibresource{document.bib} \begin{document} \maketitle \begin{abstract} We prove a contact non-squeezing phenomenon on homotopy spheres that are fillable by Liouville domains with large symplectic homology: there exists a smoothly embedded ball in such a sphere that cannot be made arbitrarily small by a contact isotopy. These homotopy spheres include examples that are diffeomorphic to standard spheres and whose contact structures are homotopic to standard contact structures. As the main tool, we construct a new version of symplectic homology, called \emph{selective symplectic homology}, that is associated to a Liouville domain and an open subset of its boundary. The selective symplectic homology is obtained as the direct limit of Floer homology groups for Hamiltonians whose slopes tend to $+\infty$ on the open subset but remain close to 0 and positive on the rest of the boundary. \end{abstract} \section{Introduction} One of the driving questions in contact geometry is how much it differs from smooth topology. How far does it go beyond topology? Does it, for instance, remember not only the shape but also the size of an object? In the absence of a natural measure, the size in contact geometry can conveniently be addressed via contact (non-)squeezing. We say that a subset $\Omega_a$ of a contact manifold $\Sigma$ can be contactly squeezed into a subset $\Omega_b\subset \Sigma$ if, and only if, there exists a contact isotopy $\varphi_t:\Sigma\to\Sigma, \: t\in[0,1]$ such that $\varphi_0=\op{id}$ and such that $\overline{\varphi_1(\Omega_a)}\subset \Omega_b$. The most basic examples of contact manifolds are pessimistic as far as contact geometry and size are concerned. Namely, every bounded subset of the standard $\R^{2n+1}$ (considered with the contact form $dz +\sum_{j=1}^n \left( x_jdy_j - y_j dx_j\right)$) can be contactly squeezed into an arbitrarily small ball. This is because the map \[ \R^{2n+1}\to\R^{2n+1}\quad:\quad (x,y,z)\mapsto \left(k\cdot x, k\cdot y, k^2\cdot z\right) \] is a contactomorphism for all $k\in\R^+$. Consequently, every subset of a contact manifold whose closure is contained in a contact Darboux chart can be contactly squeezed into any non-empty open subset. In other words, contact geometry does not remember the size on a small scale. Somewhat surprisingly, this is not true on a large scale in general. In the next theorem, $B(R)$ denotes the ball of radius $R$. \begin{theorem}[Eliashberg-Kim-Polterovich, Chiu]\label{thm:EKP} The subset $\hat{B}(R) := B(R)\times\mathbb{S}^1$ of $\mathbb{C}^n\times \mathbb{S}^1$ can be contactly squeezed into itself via a compactly supported contact isotopy if, and only if, $R<1$. \end{theorem} This remarkable phenomenon, that may be seen as a manifestation of the Heisenberg uncertainty principle, was first observed by Eliashberg, Kim, and Polterovich \cite{eliashberg2006geometry}. They proved the case where either $R<1$ or $R\in\mathbb{N}.$ Chiu \cite{chiu2017nonsqueezing} extended their result to radii that are not necessarily integer. Fraser \cite{fraser2016contact} presented an alternative proof of the case of non-integer radii that is more in line with the techniques used in \cite{eliashberg2006geometry}. (Fraser actually proved the following formally stronger statement: there does not exist a compactly supported contactomorphism of $\mathbb{C}^n\times\mathbb{S}^1$ that maps the closure of $\hat{B}(R)$ into $\hat{B}(R)$ if $R\geqslant 1.$ It seems not to be known whether the group of compactly supported contactomorphisms of $\mathbb{C}^n\times\mathbb{S}^1$ is connected.) Using generating functions, Sandon reproved the case of integer radii \cite{sandon2011contact}. The contact non-squeezing results are rare. Apart from Theorem~\ref{thm:EKP}, there are only few results about contact non-squeezing \cite{eliashberg2006geometry,albers2018orderability,allais2021contact,de2019orderability}, and they are all concerning the subsets of the form $ U\times\mathbb{S}^1$ in the prequantization of a Liouville manifold. The present paper provides examples of contact manifolds that are diffeomorphic to standard spheres and that exhibit non-trivial contact non-squeezing phenomena. The following theorem is the first example of contact non-squeezing for a contractible subset, namely an embedded standard smooth ball. \begin{theorem}\label{thm:Ustilovskyspheres} Let $\Sigma$ be an Ustilovsky sphere. Then, there exist two embedded closed balls $B_1, B_2\subset \Sigma$ of dimension equal to $\dim \Sigma$ such that $B_1$ cannot be contactly squeezed into $B_2$. \end{theorem} An Ustilovky sphere is the $(4m+1)$-dimensional Brieskorn manifold \[ \left\{ z=(z_0,\ldots, z_{2m+1})\in\mathbb{C}^{2m+2}\:|\: z_0^p + z_1^2 +\cdots + z_{2m+1}^2=0\:\&\: \abs{z}=1 \right\}\] associated with natural numbers $m, p\in\mathbb{N}$ with $p\equiv \pm 1 \pmod{8} $. The Ustilovsky sphere is endowed with the contact structure given by the contact from \[\alpha_p:= \frac{i p}{8}\cdot \left( z_0d\overline{z}_0-\overline{z}_0dz_0 \right) + \frac{i}{4}\cdot \sum_{j=1}^{2m+1}\left( z_jd\overline{z}_j-\overline{z}_jdz_j \right).\] These Brieskorn manifolds were used by Ustilovsky \cite{ustilovsky1999infinitely} to prove the existence of infinitely many exotic contact structures on the standard sphere that have the same homotopy type as the standard contact structure. The strength of Theorem~\ref{thm:Ustilovskyspheres} lies in the topological simplicity of the objects used. A closed ball embedded in a smooth manifold can always be smoothly squeezed into an arbitrarily small (non-empty) open subset. Moreover, the obstruction to contact squeezing in Theorem~\ref{thm:Ustilovskyspheres} does not lie in the homotopy properties of the contact distribution. Namely, the contact distribution of an Ustilovsky sphere for $p\equiv 1 \pmod{2(2m)!}$ is homotopic to the standard contact distribution on the sphere and the contact non-squeezing on the standard contact sphere is trivial. A consequence of Theorem~\ref{thm:Ustilovskyspheres} is a contact non-squeezing on $\R^{4m+1}$ endowed with a non-standard contact structure. \begin{cor}\label{cor:nonsqR} Let $m\in\mathbb{N}$. Then, there exist a contact structure $\xi$ on $\R^{4m+1}$ and an embedded $(4m+1)$-dimensional closed ball $B\subset \R^{4m+1}$ such that $B$ cannot be squeezed into an arbitrary open non-empty subset by a compactly supported contact isotopy of $\left(\R^{4m+1}, \xi\right)$. \end{cor} The exotic $\R^{4m+1}$ in Corollary~\ref{cor:nonsqR} is obtained by removing a point from an Ustilovsky sphere. In fact, the contact non-squeezing implies that $(\R^{4m+1}, \xi)$ constructed in this way (although tight) is not contactomorphic to the standard $\R^{4m+1}$. A more general result was proven by Fauteux-Chapleau and Helfer \cite{fauteux2021exotic} using a variant of contact homology: there exist infinitely many pairwise non-contactomorphic tight contact structures on $\R^{2n+1}$ if $n>1$. Theorem~\ref{thm:Ustilovskyspheres} is a consequence of the following theorem about homotopy spheres that bound Liouville domains with large symplectic homology. \begin{theorem}\label{thm:homologyspheres} Let $n> 2$ be a natural number and let $W$ be a $2n$-dimensional Liouville domain such that $\dim SH_\ast(W) > \sum_{j=1}^{2n} \dim H_j(W;\mathbb{Z}_2)$ and such that $\partial W$ is a homotopy sphere. Then, there exist two embedded closed balls $B_1, B_2\subset \partial W$ of dimension $2n-1$ such that $B_1$ cannot be contactly squeezed into $B_2$. \end{theorem} The smooth non-squeezing problem for a homotopy sphere is trivial: every non-dense subset of a homotopy sphere can be smoothly squeezed into an arbitrary non-empty open subset. This is due to the existence of Morse functions with precisely two critical points on the homotopy spheres. A smooth squeezing can be realized by the gradient flow of such a Morse function. Plenty of examples of Liouville domains that satisfy the conditions of Theorem~\ref{thm:homologyspheres} can be found among Brieskorn varieties. The Brieskorn variety $V(a_0,\ldots, a_m)$ is a Stein domain whose boundary is contactomorphic to the Brieskorn manifold $\Sigma(a_0,\ldots, a_m)$. Brieskorn \cite[Satz~1]{brieskorn1966beispiele} proved a simple sufficient and necessary condition (conjectured by Milnor) for a Brieskorn manifold to be homeomorphic to a sphere (see also \cite[Proposition~3.6]{kwon2016brieskorn}). Many of the corresponding Brieskorn varieties have infinite dimensional symplectic homology, for instance $V(3,2,2,\ldots,2)$. Thus, Theorem~\ref{thm:homologyspheres} also implies that there exists a non-trivial contact non-squeezing on the Kervaire spheres, i.e. on $\Sigma(3,2,\ldots, 2)$ for an odd number of 2's. Our non-squeezing results are obtained using a novel version of symplectic homology, called \emph{selective symplectic homology}, that is introduced in the present paper. It resembles the relative symplectic cohomology by Varolgunes \cite{varolgunes2021mayer}, although the relative symplectic (co)homology and the selective symplectic homology are not quite the same. The selective symplectic homology, $SH_\ast^\Omega(W)$, is associated to a Liouville domain $W$ and an open subset $\Omega\subset \partial W$ of its boundary. Informally, $SH_\ast^{\Omega}(W)$ is defined as the Floer homology for a Hamiltonian on $W$ that is equal to $+\infty$ on $\Omega$ and to 0 on $\partial W\setminus \Omega$ (whereas, in this simplified view, the symplectic homology corresponds to a Hamiltonian that is equal to $+\infty$ everywhere on $\partial W$). The precise definition of the selective symplectic homology is given in Section~\ref{sec:SSH} below. \sloppy The selective symplectic homology is related to the symplectic (co)homology of a Liouville sector that was introduced in \cite{ganatra2020covariantly} by Ganatra, Pardon, and Shende. As described in detail in \cite{ganatra2020covariantly}, every Liouville sector can be obtained from a Liouville manifold $X$ by removing the image of a stop. The notion of a stop on a Liouville manifold $X$ was defined by Sylvan \cite{sylvan2019partially} as a proper, codimension-0 embedding $\sigma: F\times\mathbb{C}_{\op{Re}<0}\to X$, where $F$ is a Liouville manifold, such that $\sigma^\ast \lambda_X= \lambda_F + \lambda_{\mathbb{C}} + df$, for a compactly supported $f$. Here, $ \lambda_X, \lambda_F, \lambda_{\mathbb{C}}$ are the Liouville forms on $X$, $F$, and $\mathbb{C}_{\op{Re}<0}$, respectively. We now compare the selective symplectic homology $SH_\ast^\Omega(W)$ and the symplectic homology $SH_\ast(X, \partial X)$, where $X= \hat{W}\setminus\op{im}\sigma$ is the Liouville sector obtained by removing a stop $\sigma$ from the completion $\hat{W}$, and $\Omega$ is the interior of the set $\partial W \setminus \op{im} \sigma$. Both $SH_\ast^\Omega(W)$ and $SH_\ast(X, \partial X)$ are, informaly speaking, Floer homologies for a Hamiltonian whose slope tends to infinity over $\Omega$. However, as opposed to $SH_\ast(X,\partial X)$, the selective symplectic homology $SH_\ast^\Omega(W)$ takes into account $\op{im} \sigma \cap W$, i.e. the part of the stop that lies outside of the conical end $\partial W\times(1,+\infty)$. Additionally, in the selective symplectic homology theory, there are no restrictions on $\Omega$: it can be any open subset, not necessarily the one obtained by removing a stop. On the technical side, $SH_\ast(X,\partial X)$ and $SH_\ast^\Omega(W)$ differ in the way the compactness issue is resolved. The symplectic homology of a Liouville sector is based on compactness arguments by Groman \cite{groman2015floer}, whereas the selective symplectic homology relies on a version of the Alexandrov maximum principle \cite[Theorem~9.1]{gilbarg1977elliptic}, \cite[Appendix~A]{abbondandolo2009estimates}, \cite{merry2019maximum}. It is an interesting question under what conditions $SH_\ast^\Omega(W)$ and $SH_\ast(X, \partial X)$ actually coincide. In simple terms, the non-squeezing results of the present paper are obtained by proving that a set $\Omega_b\subset \partial W$ with big selective symplectic homology cannot be contactly squeezed into a subset $\Omega_a\subset \partial W$ with $SH_\ast^{\Omega_a}(W)$ small (see Theorem~\ref{thm:ranknonsqueezing} on page~\pageref{thm:ranknonsqueezing}). The computation of the selective symplectic homology is somewhat challenging even in the simplest non-trivial cases. The key computations in the paper are that of $SH_\ast^D(W)$ where $D\subset\partial W$ is a contact Darboux chart, and that of $SH^{\partial W\setminus D}_\ast(W)$. We prove that $SH_\ast^D( W)$ is isomorphic to $SH_\ast^\emptyset(W)$ by analysing the dynamics of a specific suitably chosen family of contact Hamiltonians that are supported in $D$ (see Theorem~\ref{thm:sshdarboux} on page~\pageref{thm:sshdarboux}). On the other hand, by utilizing the existence of a contractible loop of contactomorphisms that is positive over $D$, one can prove that $SH^{\partial W\setminus D}_\ast (W)$ is big if $SH_\ast(W)$ is big itself (see Section~\ref{sec:immaterial}). The proof is indirect and not quite straightforward. This proof also requires a feature of Floer homology for contact Hamiltonians that could be of interest in its own right and that has not appeared in the literature so far. Namely, there exists a collection of isomorphisms $\mathcal{B}(\sigma): HF_\ast(h)\to HF_\ast(h\# f)$ (one isomorphism for each admissible $h$) furnished by a family $\sigma$ of contactomorphisms of $\partial W$ that is indexed by a disc. In the formula above, $f$ is the contact Hamiltonian that generates the ``boundary loop'' of $\sigma$, and $h\#f$ is the contact Hamiltonian of the contact isotopy $\varphi^h_t\circ\varphi^f_t$. In addition, the isomorphisms $ \mathcal{B}(\sigma)$ give rise to an automorphism of the symplectic homology $SH_\ast(W)$. \begin{rem} For the sake of simplicity, this paper defines the selective symplectic homology $SH_\ast^\Omega(W)$ in the framework of Liouville domains. The theory can actually be developed whenever $W$ is a symplectic manifold with contact type boundary such that the symplectic homology $SH_\ast(W)$ is well defined. This is the case, for instance, if $W$ is weakly+ monotone \cite{hofer1995floer} symplectic manifold with convex end. Theorem~\ref{thm:homologyspheres} and Theorem~\ref{thm:ranknonsqueezing} on page~\pageref{thm:ranknonsqueezing} are valid in this more general setting. \end{rem} What follows is a brief description of the main properties of the selective symplectic homology. \subsection{Empty set} The selective symplectic homology of the empty set is isomorphic, up to a shift in grading, to the singular homology of the Liouville domain relative its boundary: \[ SH_\ast^{\emptyset}(W)\cong H_{\ast+ n} (W,\partial W; \mathbb{Z}_2),\] where $2n=\dim W$. This is a straightforward consequence of the formal definition of the selective symplectic homology (Definition~\ref{def:SSH} on page \pageref{def:SSH}). Namely, it follows directly that $SH_\ast^\emptyset(W)$ is isomorphic to the Floer homology $HF_\ast(H)$ for a Hamiltonian $H_t:\hat{W}\to\R$ whose slope $\varepsilon>0$ is sufficiently small (smaller than any positive period of a closed Reeb orbit on $\partial W$). For such a Hamiltonian $H$, it is known (by a standard argument involving isomorphism of the Floer and Morse homologies for a $C^2$ small Morse function) that $HF_\ast(H)$ recoveres $H_{\ast+n}(W,\partial W;\mathbb{Z}_2)$. \subsection{Canonical identification}\label{sec:canid} Although not reflected in the notation, the group $SH_\ast^{\Omega}(W)$ depends only on the completion $\hat{W}$ and an open subset of the \emph{ideal contact boundary} of $\hat{W}$ (defined in \cite[page~1643]{eliashberg2006geometry}). More precisely, $ SH_\ast^{\Omega}(W)= SH^{\Omega_f}_\ast(W^f),$ whenever the pairs $(W, \Omega)$ and $(W^f, \Omega_f)$ are $\lambda$-related in the sense of the following definition. \begin{defn}\label{def:lambdarel} Let $(M,\lambda)$ be a Liouville manifold. Let $\Sigma_1,\Sigma_2\subset M$ be two hypersurfaces in $M$ that are transverse to the Liouville vector field. The subsets $\Omega_1\subset \Sigma_1$ and $\Omega_2\subset \Sigma_2$ are said to be $\lambda$-related if each trajectory of the Liouville vector field either intersects both $\Omega_1$ and $\Omega_2$ or neither of them. \end{defn} \subsection{Continuation maps} To a pair $\Omega_a\subset \Omega_b$ of open subsets of $\partial W$, one can associate a morphism \[\Phi=\Phi_{\Omega_a}^{\Omega_b} : SH_\ast^{\Omega_a}(W)\to SH_\ast^{\Omega_b}(W),\] called \emph{continuation map}. The groups $SH_\ast^\Omega(W)$ together with the continuation maps form a directed system of groups indexed by open subsets of $\partial W$. In other words, $\Phi_{\Omega}^\Omega$ is equal to the identity and $\Phi_{\Omega_b}^{\Omega_c}\circ \Phi_{\Omega_a}^{\Omega_b}=\Phi_{\Omega_a}^{\Omega_c}$. \subsection{Behaviour under direct limits} Let $\Omega_k\subset \partial W$, $k\in\mathbb{N}$ be an increasing sequence of open subsets, i.e. $\Omega_k\subset \Omega_{k+1}$ for all $k\in\mathbb{N}$. Denote $\Omega:=\bigcup_{k=1}^{\infty} \Omega_k$. Then, the map \[ \underset{k}{\lim_{\longrightarrow}}\: SH_\ast^{\Omega_k}(W) \to SH_\ast^{\Omega}(W), \] furnished by continuation maps is an isomorphism. The direct limit is taken with respect to continuation maps. \subsection{Conjugation isomorphisms}\label{sec:conjugationiso} The conjugation isomorphism \[\mathcal{C}(\psi) : SH_\ast^{\Omega_a}(W)\to SH_\ast^{\Omega_b}(W)\] is associated with a symplectomorphism $\psi:\hat{W}\to\hat{W}$, defined on the completion of $W$, that preserves the Liouville form outside of a compact set. With any such symplectomorphism $\psi$, one can associate a unique contactomorphism $\varphi:\partial W\to\partial W$, called \emph{ideal restriction}, such that \[\psi(x,r)= \left( \varphi(x), f(x)\cdot r \right)\] for $r\in\R^+$ large enough and for a certain positive function $f:\partial W\to \R^+$. The set $\Omega_b$ is the image of $\Omega_a$ under the contactomorphism $\varphi^{-1}:\partial W\to\partial W$. I.e. the conjugation isomorphism has the following form \[\mathcal{C}(\psi) : SH_\ast^{\Omega}(W)\to SH_\ast^{\varphi^{-1}(\Omega)}(W),\] where $\varphi$ is the ideal restriction of $\psi$. As a consequence, the groups $SH^{\Omega}_\ast(W)$ and $SH^{\varphi(\Omega)}_\ast(W)$ are isomorphic whenever the contactomorphism $\varphi$ is the ideal restriction of some symplectomorphism $\psi:\hat{W}\to\hat{W}$ (that preserves the Liouville form outside of a compact set). If a contactomorphism of $\partial W$ is contact isotopic to the identity, then it is equal to the ideal restriction of some symplectomorphism of $\hat{W}$. Hence, if $\Omega_a, \Omega_b\subset \partial W$ are two contact isotopic open subsets (i.e. there exists a contact isotopy $\varphi_t: \partial W\to \partial W$ such that $\varphi_0=\op{id}$ and such that $\varphi_1(\Omega_a)=\Omega_b$), then the groups $SH_\ast^{\Omega_a}(W)$ and $SH_\ast^{\Omega_b}(W)$ are isomorphic. The conjugation isomorphisms behave well with respect to the continuation maps, as asserted by the next theorem. \begin{theorem}\label{thm:conjVSsont} Let $W$ be a Liouville domain, let $\psi:\hat{W}\to\hat{W}$ be a symplectomorphism that preserves the Liouville form outside of a compact set, and let $\varphi:\partial W\to\partial W$ be the ideal restriction of $\psi$. Let $\Omega_a\subset \Omega_b\subset \partial W$ be open subsets. Then, the following diagram, consisting of conjugation isomorphisms and continuation maps, commutes \[\begin{tikzcd} SH_\ast^{\Omega_a}(W) \arrow{r}{\mathcal{C}(\psi)}\arrow{d}{\Phi}& SH_\ast^{\varphi^{-1}(\Omega_a)}(W)\arrow{d}{\Phi}\\ SH_\ast^{\Omega_b}(W) \arrow{r}{\mathcal{C}(\psi)}& SH_\ast^{\varphi^{-1}(\Omega_b)}(W). \end{tikzcd}\] \end{theorem} \subsection*{Applications} The selective symplectic homology is envisioned as a tool for studying contact geometry and dynamics of Liouville fillable contact manifolds. The present paper shows how it can be used to prove contact non-squeezing type of results. This is illustrated by the following abstract observation. \begin{theorem}\label{thm:ranknonsqueezing} Let $W$ be a Liouville domain and let $\Omega_a, \Omega_b\subset \partial W$ be open subsets. If the rank of the continuation map $SH_\ast^{\Omega_b}(W)\to SH_\ast(W)$ is (strictly) greater than the rank of the continuation map $SH_\ast^{\Omega_a}(W)\to SH_\ast(W),$ then $\Omega_b$ cannot be contactly squeezed into $\Omega_a$. \end{theorem} The theory of selective symplectic homology has rich algebraic structure that is beyond the scope of the present paper. For instance, \begin{enumerate} \item one can construct a persistent module associated to an open subset of a contact manifold, \item topological quantum field theory operations are well defined on $SH_\ast^\Omega(W),$ \item it is possible to define transfer morphisms for selective symplectic homology in analogy to Viterbo's transfer morphisms for symplectic homology, \item there exist positive selective symplectic homology, $\mathbb{S}^1$-equivariant selective symplectic homology, positive $\mathbb{S}^1$-equivariant selective symplectic homology... \end{enumerate} \subsection*{The structure of the paper} The paper is organized as follows. Section~\ref{sec:prelim} recalls the definition of Liouville domains and construction of the Hamiltonian-loop Floer homology. Sections~\ref{sec:SSH} - \ref{sec:conjugationisomorphisms} define the selective symplectic homology and derive its properties. Sections~\ref{sec:darboux} - \ref{sec:main} contain proofs of the applications to the contact non-squeezing and necessary computations. Section~\ref{sec:pathiso} discusses isomorphisms of contact Floer homology induced by families of contactomorphisms indexed by a disc. \subsection*{Acknowledgements} I would like to thank Paul Biran and Leonid Polterovich for their interest in this work and for valuable suggestions. This research was supported by the Science Fund of the Republic of Serbia, grant no.~7749891, Graphical Languages - GWORDS. \section{Preliminaries}\label{sec:prelim} \subsection{Liouville manifolds} This section recalls the notions of a Liouville domain and a Liouville manifold of finite type. Liouville manifolds (of finite type) play the role of an ambient space in this paper. The selective symplectic homology is built from objects on a Liouville manifold of finite type. \begin{defn} A Liouville manifold of finite type is an open manifold $M$ together with a 1-form $\lambda$ on it such that the following conditions hold. \begin{enumerate} \item The 2-form $d\lambda$ is a symplectic form on $M.$ \item \sloppy There exist a contact manifold $\Sigma$ with a contact form $\alpha$ and a codimension-0 embedding $ \iota : \Sigma\times\R^+\to M $ such that $M\setminus \iota(\Sigma\times\R^+)$ is a compact set, and such that $\iota^\ast \lambda=r\cdot \alpha,$ where $r$ stands for the $\R^+$ coordinate. \end{enumerate} \end{defn} We will refer to the map $\iota$ as a \emph{conical end} of the Liouville manifold $M.$ With slight abuse of terminology, the set $\iota(\Sigma\times \R^+)$ will also be called \emph{conical end}. A conical end is not unique. The Liouville vector field, $X_\lambda,$ of the Liouville manifold $(M, \lambda)$ of finite type is the complete vector field defined by $d\lambda(X_\lambda, \cdot)=\lambda.$ If $\Sigma\subset M$ is a closed hypersurface that is transverse to the Liouville vector field $X_\lambda,$ then $\left.\lambda\right|_{\Sigma}$ is a contact form on $\Sigma$ and there exists a unique codimension-0 embedding $ \iota_\Sigma: \Sigma\times\R^+\to M $ such that $\iota_\Sigma(x,1)=x$ and such that $\iota_\Sigma^\ast\lambda= r\cdot \left.\lambda\right|_{\Sigma}$. The notion of a Liouville manifold of finite type is closely related to that of a Liouville domain. \begin{defn} A Liouville domain is a compact manifold $W$ (with boundary) together with a 1-form $\lambda$ such that \begin{enumerate} \item $d\lambda$ is a symplectic form on $W,$ \item the Liouville vector field $X_\lambda$ points transversely outwards at the boundary. \end{enumerate} \end{defn} The Liouville vector field on a Liouville domain $(W,\lambda)$ is not complete. The completion of the Liouville domain is the Liouville manifold $(\hat{W},\hat{\lambda})$ of finite type obtained by extending the integral curves of the vector field $X_\lambda$ towards $+\infty.$ Explicitly, as a topological space, \[\hat{W}\quad:=\quad W\quad\cup_{\partial}\quad (\partial W)\times [1,+\infty).\] The manifolds $(\partial W)\times [1,+\infty)$ and $W$ are glued along the boundary via the map \[\partial W\times\{1\}\to\partial W\quad:\quad (x,1)\mapsto x. \] The completion $\hat{W}$ is endowed with the unique smooth structure such that the natural inclusions $W\hookrightarrow \hat{W}$ and $\partial W\times [1, +\infty)\hookrightarrow \hat{W}$ are smooth embeddings, and such that the vector field $X_\lambda$ extends smoothly to $\partial W\times [1,+\infty)$ by the vector field $r\partial_r.$ (Here, we tacitly identified $\partial W\times [1,+\infty)$ and $W$ with their images under the natural inclusions.) The 1-form $\hat{\lambda}$ is obtained by extending the 1-form $\lambda$ to $\partial W\times[1,+\infty)$ by $r\cdot \left.\lambda\right|_{\partial W.}$ The completion of a Liouville domain is a Liouville manifold of finite type. And, other way around, every Liouville manifold of finite type is the completion of some Liouville domain. Let $M$ be a Liouville manifold of finite type, let $W\subset M$ be a codimension-0 Liouville subdomain, and let $f:\partial W\to\R^+$ be a smooth function. The completion $\hat{W}$ can be seen as a subset of $M$. Throughout the paper, $W^f$ denotes the subset of $M$ defined by \[W^f:=\hat{W}\setminus\iota_{\partial W}\big(\{f(x)\cdot r>1\}\big).\] Here, $\{f(x)\cdot r>1\}$ stands for $\left\{(x,r)\in\partial W\times \R^+\:|\: f(x)\cdot r>1\right\}$. The set $W^f$ is a codimension-0 Liouville subdomain in its own right, and the completions of $W$ and $W^f$ can be identified. \subsection{Floer theory} In this section, we recall the definition of the Floer homology for a contact Hamiltonian, $HF_\ast(W,h).$ A contact Hamiltonian is called admissible if it does not have any $1$-periodic orbits and if it is 1-periodic in the time variable. The group $HF_\ast(W,h)$ is associated to a Liouville domain $(W,\lambda)$ and to an admissible contact Hamiltonian $h_t:\partial W\to \R$ that is defined on the boundary of $W.$ The Floer homology for contact Hamiltonians was introduced in \cite{merry2019maximum} by Merry and the author. It relies heavily on the Hamiltonian loop Floer homology \cite{floer1989symplectic} and symplectic homology \cite{floer1994symplectic,floer1994applications,cieliebak1995symplectic,cieliebak1996applications,viterbo1999functors,viterbo2018functors}, especially the version of symplectic homology by Viterbo \cite{viterbo1999functors}. \subsubsection{Auxiliary data} Let $(W,\lambda)$ be a Liouville domain, and let $h_t:\partial W\to \R$ be an admissible contact Hamiltonian. The group $HF_\ast(W, h)$ is defined as the Hamiltonian loop Floer homology, $HF_\ast(H,J),$ associated to a Hamiltonian $H$ and an almost complex structure $J.$ Both $H$ and $J$ are objects on the completion $\hat{W}=:M$ of the Liouville domain $W.$ Before stating the precise conditions that $H$ and $J$ are assumed to satisfy, we define the set $\mathcal{J}(\Sigma, \alpha)$ of almost complex structures of \emph{SFT type}. Let $\Sigma$ be a contact manifold with a contact form $\alpha$. The set $\mathcal{J}(\Sigma, \alpha)$ (or simply $\mathcal{J}(\Sigma)$ when it is clear from the context what the contact form is equal to) is the set of almost complex structures $J$ on the symplectization $\Sigma\times\R^+$ such that \begin{itemize} \item $J$ is invariant under the $\R^+$ action on $\Sigma\times\R^+$, \item $J(r\partial_r)= R_\alpha$, where $R_\alpha$ is the Reeb vector field on $\Sigma$ with respect to the contact form $\alpha$, \item the contact distribution $\xi:=\ker \alpha $ is invariant under $J$ and $\left.J\right|_{\xi}$ is a compatible complex structure on the symplectic vector bundle $(\xi, d\alpha)\to \Sigma$. \end{itemize} The list of the conditions for $(H,J)$ follows. \begin{enumerate} \item (Conditions on the conical end). There exist a positive number $a\in\R^+$ and a constant $c\in\R$ such that \[H_t\circ\iota_{\partial W}(x,r)= r\cdot h(x) + c,\] for all $t\in\R$ and $(x,r)\in\partial W\times[a,+\infty),$ and such that $\iota_{\partial W}^\ast J_t$ coincides with an element of $\mathcal{J}(\partial W)$ on $\partial W\times [a,+\infty)$ for all $t\in\R$. Here, $\iota_{\partial W}: \partial W\times\R^+\to M$ is the conical end of $M$ associated to $\partial W.$ \item (One-periodicity). For all $t\in\R,$ $H_{t+1}=H_t$ and $J_{t+1}=J_t.$ \item ($d\hat{\lambda}$-compatibility). $d\hat{\lambda}(\cdot, J_t\cdot)$ is a Riemannian metric on $M$ for all $t\in\R.$ \end{enumerate} The pair $(H,J)$ that satisfies the conditions above is called \emph{Floer data} (for the contact Hamiltonian $h$ and the Liouville domain $(W,\lambda)$). Floer data $(H,J)$ is called \emph{regular} if, additionally, the following two conditions hold. \begin{enumerate} \setcounter{enumi}{3} \item (Non-degeneracy). The linear map \[ d\phi^H_1(x)-\op{id}\quad:\quad T_xM\to T_xM \] is invertible for all fixed points $x$ of $\phi_1^H.$ \item(Regularity). The linearized operator of the Floer equation \[ u:\R\times (\R/\mathbb{Z})\to M,\quad \partial_s u+ J_t(u)(\partial_t u- X_{H_t}(u))=0 \] is surjective. \end{enumerate} \subsubsection{Floer complex} Let $(H,J)$ be regular Floer data. The Floer complex, $CF_\ast(H,J),$ is built up on the contractible 1-periodic orbits of the Hamiltonian $H$. For every 1-periodic orbit $\gamma$ of the Hamiltonian $H,$ there exists a fixed point $x$ of $\phi^H_1$ such that $\gamma(t)=\phi^H_t(x).$ The degree, $\deg\gamma=\deg_H\gamma,$ of a contractible 1-periodic orbit $\gamma=\phi^H_\cdot(x)$ of the Hamiltonian $H$ is defined to be the negative Conley-Zehnder index of the path of symplectic matrices that is obtained from $d\phi^H_t(x)$ by trivializing $TM$ along a disc that is bounded by $\gamma$ (see \cite{salamon1999lectures} for details concerning the Conley-Zehnder index). Different choices of the capping disc can lead to different values of the degree, however they all differ by an even multiple of the minimal Chern number \[N:=\min \left\{ c_1(u)>0\:|\: u:\mathbb{S}^2\to M \right\}.\] Therefore, $\deg \gamma$ is well defined as an element of $\mathbb{Z}_{2N}$ (but not as an element of $\mathbb{Z},$ in general). The Floer chain complex as a group is defined by \[CF_k(H,J):=\bigoplus_{\deg \gamma=k} \mathbb{Z}_2\left\langle\gamma\right\rangle.\] Since the Floer data $(H,J)$ is regular, the set $\mathcal{M}(H,J, \gamma^-, \gamma^+)$ of the solutions $u:\R\times(\R/\mathbb{Z})\to M$ of the Floer equation \[ \partial_s u + J_t(u)(\partial_t u - X_{H_t}(u))=0\] that join two 1-periodic orbits $\gamma^-$ and $\gamma^+$ of $H$ (i.e. $\displaystyle \lim_{s\mapsto\pm\infty} u(s,t)=\gamma^\pm(t)$) is a finite dimensional manifold (components of which might have different dimensions). There is a natural $\R$-action on $\mathcal{M}(H,J, \gamma^-, \gamma^+)$ given by \[ \R\:\times\: \mathcal{M}(H,J, \gamma^-, \gamma^+)\quad\mapsto\quad \mathcal{M}(H,J, \gamma^-, \gamma^+)\quad :\quad (a, u)\mapsto u(\cdot +a, \cdot). \] The quotient \[\tilde{\mathcal{M}}(H,J,\gamma^-,\gamma^+):=\mathcal{M}(H,J,\gamma^-,\gamma^+)/\mathbb{R}\] of $\mathcal{M}(H,J,\gamma^-,\gamma^+)$ by this action is also a finite dimensional manifold. Denote by $n(\gamma^-, \gamma^+)=n(H,J, \gamma^-, \gamma^+)\in\mathbb{Z}_2$ the parity of the number of 0-dimensional components of $\tilde{\mathcal{M}}(H,J,\gamma^-,\gamma^+).$ The boundary map \[\partial : CF_{k+1}(H,J)\to CF_k(H,J)\] is defined on the generators by \begin{equation}\label{eq:boundary}\partial \left\langle \gamma\right\rangle:=\sum_{\tilde{\gamma}} n(\gamma,\tilde{\gamma})\left\langle \tilde{\gamma} \right\rangle.\end{equation} \sloppy If $\deg\gamma\not=\deg\tilde{\gamma}+1$, there are no 0-dimensional components of $\tilde{\mathcal{M}}(H,J,\gamma^-,\gamma^+)$, and therefore, $n(\gamma,\tilde{\gamma})=0.$ Hence, the sum in \eqref{eq:boundary} can be taken only over $\tilde{\gamma}$ that satisfy $\op{deg}\tilde{\gamma}=\op{deg}\gamma-1$. The homology of the chain complex $CF_\ast(H,J)$ is denoted by $HF_\ast(H,J).$ \subsubsection{Continuation maps} Continuation maps compare Floer homologies for different choices of Floer data. They are associated to generic monotone homotopies of Floer data that join two given instances of Floer data. We refer to these homotopies as continuation data. Let $(H^-, J^-)$ and $(H^+, J^+)$ be regular Floer data. The continuation data from $(H^-, J^-)$ to $(H^+, J^+)$ is a pair $(\{H_{s,t}\}, \{J_{s,t}\})$ that consists of an $s$-dependent Hamiltonian $H_{s,t}:M\to\R$ and a family $J_{s,t}$ of almost complex structures on $M$ such that the following conditions hold: \begin{enumerate} \item (Homotopy of Floer data). For all $s\in\R,$ the pair $(H_{s,\cdot}, J_{s,\cdot})$ is Floer data (not necessarily regular) for some contact Hamiltonian. \item (Monotonicity). There exists $a\in\R^+$ such that $\partial_s H_{s,t}(x)\geqslant0,$ for all $s,t\in\R$ and $x\in\iota_{\partial W}(\partial W\times [a,+\infty)).$ \item ($s$-independence at the ends). There exists $b\in\R^+$ such that $H_{s,t}(x)= H^{\pm}_t(x),$ for all $t\in \R$ and $x\in M$, if $\pm s\in [b,+\infty)$. \end{enumerate} Continuation data $(\{H_{s,t}\},\{J_{s,t}\})$ is called \emph{regular} if the linearized operator of the $s$-dependent Floer equation \[ u:\R\times (\R/\mathbb{Z})\to M,\quad \partial_s u+ J_{s,t}(u)(\partial_t u- X_{H_{s,t}}(u))=0 \] is surjective. Given regular continuation data $(\{H_{s,t}\}, \{J_{s,t}\})$ from $(H^-, J^-)$ to $(H^+, J^+)$ and 1-periodic orbits $\gamma^-$ and $\gamma^+$ of $H^-$ and $H^+,$ respectively, the set of the solutions $u:\R\times(\R/\mathbb{Z})\to M$ of the problem \begin{align*} & \partial_s u + J_{s,t} (u) (\partial_t u - X_{H_{s,t}}(u))=0,\\ & \lim_{s\to\pm\infty} u(s,t)= \gamma^\pm(t) \end{align*} is a finite dimensional manifold. Its 0-dimensional part is compact, and therefore, a finite set. Denote by $m(\gamma^-,\gamma^+)$ the number modulo 2 of the 0-dimensional components of this manifold. The continuation map \[\Phi= \Phi(\{H_{s,t}\}, \{J_{s,t}\})\quad:\quad CF_\ast(H^-, J^-)\to CF_\ast(H^+, J^+)\] is the chain map defined on the generators by \[\Phi(\gamma^-):=\sum_{\gamma^+} m(\gamma^-, \gamma^+)\left\langle \gamma^+\right\rangle.\] The map $HF_\ast(H^-, J^-)\to HF_\ast(H^+, J^+)$ induced by a continuation map on the homology level (this map is also called \emph{continuation map}) does not depend on the choice of continuation data from $(H^-, J^-)$ to $(H^+, J^+).$ The groups $HF_\ast(H,J)$ together with the continuation maps form a directed system of groups. As a consequence, the groups $HF_\ast(H,J)$ and $HF_\ast(H', J')$ are canonically isomorphic whenever $(H,J)$ and $(H',J')$ are (regular) Floer data for the same admissible contact Hamiltonian. Therefore, the Floer homology $HF_\ast(h)= HF_\ast(W,h)$ for an admissible contact Hamiltonian $h_t:\partial W\to\R$ is well defined. The continuation maps carry over to Floer homology for contact Hamiltonians. Due to the ``monotonicity''condition for the continuation data, the continuation map $HF_\ast(h)\to HF_\ast(h')$ is not well defined unless $h_t,h'_t:\partial W\to\R$ are admissible contact Hamiltonians such that $h\leqslant h',$ pointwise. For a positive smooth function $f:\partial W\to \R^+$, the completions of the Liouville domains $W$ and $W^f$ can be naturally identified. If a Hamiltonian $H: \hat{W}= \hat{W^f}\to \R$ has the slope equal to $h$ with respect to the Liouville domain $W^f$, then it has the slope equal to $f\cdot h$ with respect to the Liouville domain $W$. Therefore, the groups $HF_\ast(W^f, h)$ and $HF_\ast(W, f\cdot h)$ are canonically isomorphic. Here, we tacitly identified $\partial W$ and $\partial W^f$ via the contactomorphism furnished by the Liouville vector field, and regarded $h$ as both the function on $\partial W$ and $\partial W^f$. \section{Selective symplectic homology}\label{sec:SSH} This section defines formally the selective symplectic homology $SH_\ast^{\Omega}(W)$. To this end, two sets of smooth functions on $\partial W$ are introduced : $\mathcal{H}_\Omega(\partial W)$ and $\Pi(h)$. The set $\mathcal{H}_\Omega(\partial W)$ consists of certain non-negative smooth functions on $\partial W$, and $\Pi(h)$ is a set associated to $h\in \mathcal{H}_\Omega(\partial W)$ that can be thought of as the set of perturbations. \begin{defn}\label{def:Hasigma} Let $\Sigma$ be a closed contact manifold with a contact form $\alpha,$ and let $\Omega\subset \Sigma$ be an open subset. Denote by $\mathcal{H}_\Omega(\Sigma)= \mathcal{H}_\Omega(\Sigma,\alpha)$ the set of smooth ($C^\infty$) autonomous contact Hamiltonians $h:\Sigma\to[0,+\infty)$ such that \begin{enumerate} \item $ \op{supp} h\subset \Omega$,\label{cond:van} \item $dY^h(p)=0$ for all $p\in \Sigma$ such that $h(p)=0$, \item the 1-periodic orbits of $h$ are constant. \end{enumerate} \end{defn} In the definition above, $Y^h$ denotes the contact vector field of the contact Hamiltonian $h$. More precisely, the vector field $Y^h$ is determined by the following relations \begin{align*} & \alpha(Y^h)=-h,\\ & d\alpha(Y^h, \cdot)= dh- dh(R)\cdot \alpha, \end{align*} where $R$ stands for the Reeb vector field with respect to $\alpha$. The condition $dY^h(p)=0$ holds for $p\in h^{-1}(0)$ if, for instance, the Hessian of $h$ is equal to 0 at the point $p$. The set $\mathcal{H}_\Omega(\Sigma)$ is non-empty. \begin{defn}\label{def:Pih} Let $\Sigma$ be a closed contact manifold with a contact form $\alpha,$ let $\Omega\subset \Sigma$ be an open subset, and let $h\in\mathcal{H}_\Omega(\Sigma).$ Denote by $\Pi(h)$ the set of smooth positive functions $f:\Sigma\to\R^+$ such that the contact Hamiltonian $h+f$ has no 1-periodic orbits. \end{defn} The next proposition implies that $\Pi(h)$ is non-empty for $h\in\mathcal{H}_\Omega(\Sigma)$. It is also used in the proof of Lemma~\ref{lem:invlimstab} below. \begin{prop}\label{prop:no1open} Let $\Sigma$ be a closed contact manifold with a contact form. Let $h:\Sigma\to\R$ be a contact Hamiltonian such that $h$ has no non-constant 1-periodic orbits, and such that $dY^h(p)=0$ for all $p\in\Sigma$ at which the vector field $Y^h$ vanishes. Then, there exists a $C^2$ neighbourhood of $h$ in $C^\infty(\Sigma)$ such that the flow of $g$ has no non-constant 1-periodic orbits for all $g$ in that neighbourhood. \end{prop} \begin{proof} Assume the contrary. Then, there exist a sequence of contact Hamiltonians $h_k$ and a sequence $x_k\in\Sigma$ such that $h_k\to h$ in $C^2$ topology, such that $x_k\to x_0,$ and such that $t\mapsto \varphi_t^{h_k}(x_k)$ is a non-constant 1-periodic orbit of $h_k.$ This implies that $t\mapsto \varphi_t^h(x_0)$ is a 1-periodic orbit of $h,$ and therefore, has to be constant. By assumptions, $dY^h(x_0)=0.$ The map $C^\infty(\Sigma)\to\mathfrak{X}(\Sigma)$ that assigns the contact vector field to a contact Hamiltonian is continuous with respect to $C^2$ topology on $C^\infty(\Sigma)$ and $C^1$ topology on $\mathfrak{X}(\Sigma)$. Consequently (since $h_k\to h$ in $C^2$ topology), $Y^{h_k}\to Y^h$ in $C^1$ topology. Therefore, for each $L>0,$ there exists a neighbourhood $U\subset \Sigma$ of $x_0$ and $N\in\mathbb{N}$ such that $\left. Y^{h_k}\right|_{U}$ is Lipschitz with Lipschitz constant $L$ for all $k\geqslant N.$ For $k$ big enough, the loop $t\mapsto \varphi_t^{h_k}(x_k)$ is contained in the neighbourhood $U.$ This contradicts \cite{yorke1969periods} because for $L$ small enough there are no non-constant 1-periodic orbits of $h_k$ in $U.$ \end{proof} The following definition introduces the selective symplectic homology. \begin{defn}\label{def:SSH} Let $W$ be a Liouville domain, and let $\Omega\subset \partial W$ be an open subset of the boundary $\Sigma:=\partial W.$ The \emph{selective symplectic homology} with respect to $\Omega$ is defined to be \[ SH_\ast^\Omega(W):=\underset{h\in\mathcal{H}_\Omega(\Sigma)}{\lim_{\longrightarrow}}\:\:\underset{f\in\Pi(h)}{\lim_{\longleftarrow}}\: HF_\ast(h+f). \] The limits are taken with respect to the continuation maps. \end{defn} Given $h\in\mathcal{H}_\Omega(\Sigma),$ Proposition~\ref{prop:no1open} implies that for $f:\Sigma\to\R^+$ smooth and small enough (with respect to the $C^2$ topology), the contact Hamiltonian $h+f$ has no 1-periodic orbits. As a consequence, the groups $HF_\ast(h+f_1)$ and $HF_\ast(h+f_2)$ are canonically isomorphic for $f_1$ and $f_2$ sufficiently small. In other words, the inverse limit \[\underset{f\in\Pi(h)}{\lim_{\longleftarrow}} HF_\ast (h+f)\] stabilizes for $h\in\mathcal{H}_\Omega(W)$. This is proven in the next lemma. \begin{lem}\label{lem:invlimstab} Let $W$ be a Liouville domain, let $\Omega\subset \partial W$ be an open subset, and let $h\in\mathcal{H}_\Omega(W)$. Then, there exists an open convex neighbourhood $U$ of 0 (seen as a constant function on $\partial W$) in $C^2$ topology such that the natural map \[\underset{f\in\Pi(h)}{\lim_{\longleftarrow}} HF_\ast (h+f) \to HF_\ast(h+g) \] is an isomorphism for all $g\in C^\infty(\partial W, \R^+)\cap U$. \end{lem} \begin{proof} Proposition~\ref{prop:no1open} implies that there exists a convex $C^2$ neighbourhood $U$ of the constant function $\partial W\to \R: p\mapsto 0$ such that $h+ f$ has no non-constant 1-periodic orbits if $f\in U$. Since $h+f$ is positive for a positive function $f\in U$, it does not have any constant orbits either (the corresponding vector field is nowhere 0). Hence, $h+f$ has no 1-periodic orbits for all positive functions $f:\partial W\to \R^+$ from $U$. This, in particular, implies $ \mathcal{O}:=C^\infty(\partial W, \R^+)\cap U \subset \Pi(h).$ The set $\mathcal{O}$ is also convex. Therefore, $(1-s)\cdot f_a + s\cdot f_b\in\mathcal{O}$ for all $f_a, f_b\in\mathcal{O}$ and $s\in[0,1]$. If, additionally, $f_a\leqslant f_b$, then $h+ (1-s)\cdot f_a + s\cdot f_b$ is an increasing family (in $s$-variable) of admissible contact Hamiltonians. Theorem~1.3 from \cite{uljarevic2022hamiltonian} asserts that the continuation map $HF_\ast(h+f_a)\to HF_\ast(h+f_b)$ is an isomorphism in this case. This implies the claim of the lemma. \end{proof} The set $U$ from Lemma~\ref{lem:invlimstab} is not unique. For technical reasons, it is useful to choose one specific such set (we will denote it by $\mathcal{U}(h)$)\label{p:U} for a given contact Hamiltonian $h\in\mathcal{H}_\Omega(\partial W)$. The construction of $\mathcal{U}(h)$ follows. Let $\psi_j: V_j\to\partial W$ be charts on $\partial W$ and let $K_j\subset \psi(V_j)$ be compact subsets, $j\in\{1,\ldots, m\}$, such that $\bigcup_{j=1}^m K_j=\partial W$. Denote by $\norm{\cdot}_{C^2}$ the norm on $C^\infty(\partial W, \R)$ defined by \[\norm{f}_{C^2}:= \underset{i\in\{0,1,2\}}{\max_{j\in\{1,\ldots, m\}}}\max_{K_j} \norm{D^i(f\circ\psi_j)}. \] The norm $\norm{\cdot}_{C^2}$ induces the $C^2$ topology on $C^\infty(\partial W, \R)$. Denote by $\mathcal{B}(\varrho)\subset C^\infty(\partial W, \R)$ the open ball with respect to $\norm{\cdot}_{C^2}$ centered at 0 of radius $\varrho$. Define $\mathcal{U}(h)$ as the union of the balls $\mathcal{B}(\varrho)$ that have the following property: the contact Hamiltonian $h+f$ has no non-constant 1-periodic orbits for all $f\in\mathcal{B}(\varrho)$. The set $\mathcal{U}(h)$ is open as the union of open subsets. It is convex as the union of nested convex sets. And, it is non-empty by Proposition~\ref{prop:no1open}. The subset of $\mathcal{U}(h)$ consisting of strictly positive functions is denoted by $\mathcal{O}(h)$, i.e. $\mathcal{O}(h):= \mathcal{U}(h)\cap C^\infty(\partial W, \R).$\label{p:O} \section{Behaviour under direct limits} The next theorem claims that the selective symplectic homology behaves well with respect to direct limits. \begin{theorem}\label{thm:limitsh} Let $(W,\lambda)$ be a Liouville domain, and let $\Omega_1,\Omega_2,\ldots$ be a sequence of open subsets of $\partial W$ such that $\Omega_k\subset \Omega_{k+1}$ for all $k\in\mathbb{N}.$ Denote $\Omega:=\bigcup_{k}\Omega_k.$ Then, the map \begin{align*} & \mathfrak{P} : \lim_{k\to +\infty} SH_\ast^{\Omega_k}(W)\to SH_\ast^\Omega(W), \end{align*} furnished by continuation maps, is an isomorphism. \end{theorem} \begin{proof} Let $h$ be an arbitrary contact Hamiltonian in $\mathcal{H}_\Omega(\partial W)$. Since $\op{supp} h$ is a compact subset of $\Omega$, and since $\bigcup\Omega_k=\Omega$, there exists $k\in\mathbb{N}$ such that $\op{supp} h\subset \Omega_k$. For such a $k$, we have $h\in\mathcal{H}_{\Omega_k}(\partial W)$. In other words, $\bigcup_k \mathcal{H}_{\Omega_k}(\partial W)= \mathcal{H}_\Omega(\partial W)$. The theorem now follows from the next abstract lemma. \end{proof} The following lemma was used in the proof of Theorem~\ref{thm:limitsh}. \begin{lem} Let $(P,\leqslant)$ be a directed set and let $P_1\subset P_2\subset\cdots\subset P$ be subsets of $P$ such that $(P_j,\leqslant)$ is a directed set for all $j\in \mathbb{N}$, and such that $\bigcup_j P_j= P$. Let $\{G_a\}_{a\in P}$ be a directed system over $P$. Then, there exists a canonical isomorphism \[\underset{j}{\lim_{\longrightarrow}}\:\underset{a\in P_j}{\lim_{\longrightarrow}}\: G_a\:\to\: \underset{a\in P}{\lim_{\longrightarrow}}\: G_a.\] \end{lem} \begin{proof} Denote by $f_a^b:G_a\to G_b$, $a\leqslant b$ the morphisms of the directed system $\{G_a\}$. Denote by \[\phi_a^j: G_a\to\underset{b\in P_j}{\lim_{\longrightarrow}} G_b\] the canonical map, defined if $a\in P_j$. Since $\phi_b^j\circ f_a^b=\phi_a^j$ whenever $a\leqslant b$ and $a,b\in P_j$, the morphisms $\{\phi_a^j\}_{a\in P_i}$ induce a morphism \[F_i^j: \underset{a\in P_i}{\lim_{\longrightarrow}} G_a \to \underset{a\in P_j}{\lim_{\longrightarrow}} G_a \] for positive integers $i\leqslant j$. The morphisms $\{F_i^j\}_{i\leqslant j}$ make $\displaystyle \left\{\underset{a\in P_j}{\lim_{\longrightarrow}} G_a\right\}_{j\in\mathbb{N}}$ into a directed system indexed by $(\mathbb{N}, \leqslant)$. Denote by \[\Phi_j: \underset{a\in P_j}{\lim_{\longrightarrow}} G_a \to \underset{j\in \mathbb{N}}{\lim_{\longrightarrow}}\: \underset{a\in P_j}{\lim_{\longrightarrow}} G_a\] the canonical map. We will prove the lemma by showing that $\displaystyle \underset{j\in \mathbb{N}}{\lim_{\longrightarrow}} \underset{a\in P_j}{\lim_{\longrightarrow}} G_a$ together with the maps $\Phi_j\circ\phi_a^j$, $a\in P$ satisfies the universal property of the direct limit. Let $\left(Y, \{\psi_a\}_{a\in P}\right)$ be a target, i.e. $\{\psi_a: G_a\to Y\}_a$ is a collection of morphisms that satisfy $\psi_b\circ f_a^b=\psi_a$ for all $a,b\in P$ such that $a\leqslant b$. Since $\left(Y, \{\psi_a\}_{a\in P_j}\right)$ is a target for the directed system $\{G_a\}_{a\in P_j}$, the universal property of the direct limit implies that there exists a unique morphism \[\Psi_j: \underset{a\in P_j}{\lim_{\longrightarrow}} G_a\to Y\] such that $\Psi_j\circ \phi_a^j= \psi_a$ for all positive integers $i\leqslant j$. By applying the universal property again, we conclude that there exists a unique morphism \[\Psi : \underset{j}{\lim_{\longrightarrow}}\:\underset{a\in P_j}{\lim_{\longrightarrow}}\: G_a\to Y \] such that $\Psi\circ\Phi_j=\Psi_j$. Since \[\Psi\circ\Phi_j\circ\phi_a^j= \Psi_j\circ \phi_a^j=\psi_a,\] this finishes the proof. \end{proof} \section{Conjugation isomorphisms}\label{sec:conjugationisomorphisms} Let $(M,\lambda)$ be a Liouville domain of finite type. The group of symplectomorphisms $\psi :M\to M$ that preserve the Liouville form outside of a compact subset is denoted by $\op{Symp}^\ast(M,\lambda)$. If $M=\hat{W}$ is the completion of a Liouville domain $(W, \lambda)$, then for $\psi\in \op{Symp}^\ast(M, \lambda)$ there exist a contactomorphism $\varphi:\partial W \to\partial W$ and a positive smooth function $f:\partial W\to\R^+$ such that \[ \psi(x,r)= (\varphi(x), r\cdot f(x)), \] for $x\in\partial W$ and $r\in\R^+$ large enough. The contactomorphism $\varphi$ is called the \emph{ideal restriction} of $\psi$. To an element $\psi\in\op{Symp}^\ast(M, \lambda)$, one can associate isomorphisms, called \emph{conjugation isomorphisms}, \begin{align*} & \mathcal{C}(\psi) : HF_\ast(H,J) \to HF_\ast(\psi^\ast H, \psi^\ast J), \end{align*} where $(H,J)$ is regular Floer data. The isomorphisms $\mathcal{C}(\psi)$ are defined on the generators by \[\gamma\mapsto \psi^\ast \gamma =\psi^{-1}\circ \gamma.\] They are isomorphisms already on the chain level, and already on the chain level, they commute with the continuation maps. \begin{prop} Let $(M,\lambda)$ be the completion of a Liouville domain $(W, \lambda)$, let $\psi\in\op{Symp}^\ast(M,\lambda)$, and let $\varphi:\partial W\to \partial W$ be the ideal restriction of $\psi$. Then, the conjugation isomorphisms with respect to $\psi$ give rise to isomorphisms (called the same) \begin{align*} &\mathcal{C}(\psi) : SH_\ast^{\Omega}(W)\to SH_\ast^{\varphi^{-1}(\Omega)}(W), \end{align*} for every open subset $\Omega\subset \partial W$. \end{prop} \begin{proof} Let $h\in\mathcal{H}_\Omega(\partial W)$, let $f\in \Pi(h)$, and let $(H,J)$ be Floer data for $W$ and for the contact Hamiltonian $h+f$. The Floer data $(\psi^\ast H, \psi^\ast J)$ corresponds to the contact Hamiltonain $g\cdot (h+f)\circ \varphi$, where $g:\partial W\to \R^+$ is a certain positive smooth function. Moreover, $g\cdot h\circ\varphi \in \mathcal{H}_{\varphi^{-1}(\Omega)}(W)$ and $g\cdot f\circ \varphi \in \Pi(g\cdot h\circ\varphi).$ Since the conjugation isomorphisms commute with the continuation maps and since the relations above hold, the conjugation isomorphisms give rise to an isomorphism \[\mathcal{C}(\psi) : SH_\ast^{\Omega}(W) \to SH_\ast^{\varphi^{-1}(\Omega)}(W).\] \end{proof} Now, the proof of Theorem~\ref{thm:conjVSsont} from the introduction follows directly. \settheoremtag{\ref{thm:conjVSsont}} \begin{theorem} Let $W$ be a Liouville domain, let $\psi:\hat{W}\to\hat{W}$ be a symplectomorphism that preserves the Liouville form outside of a compact set, and let $\varphi:\partial W\to\partial W$ be the ideal restriction of $\psi$. Let $\Omega_a\subset \Omega_b\subset \partial W$ be open subsets. Then, the following diagram, consisting of conjugation isomorphisms and continuation maps, commutes \[\begin{tikzcd} SH_\ast^{\Omega_a}(W) \arrow{r}{\mathcal{C}(\psi)}\arrow{d}{}& SH_\ast^{\varphi^{-1}(\Omega_a)}(W)\arrow{d}{}\\ SH_\ast^{\Omega_b}(W) \arrow{r}{\mathcal{C}(\psi)}& SH_\ast^{\varphi^{-1}(\Omega_b)}(W). \end{tikzcd}\] \end{theorem} \begin{proof} The proof follows directly from the commutativity of the conjugation isomorphisms and the continuation maps on the level of $HF_\ast(H,J)$. \end{proof} \section{Selective symplectic homology for a Darboux chart}\label{sec:darboux} This section proves that sufficiently small open subsets on the boundary of a Liouville domain have finite dimensional selective symplectic homology. Let $a_1, \ldots, a_n, b\in\R^+$. The contact polydisc $P=P(a_1,\ldots, a_n, b)$ is a subset of the standard contact $\R^{2n+1}$ (endowed with the contact form $dz + \sum_{j=1}^n(x_jdy_j -y_jdx_j)$) that is given by \[P:= \left\{ (x,y,z)\in\R^n\times\R^n\times\R\:|\: z^2\leqslant b^2\:\&\: (\forall j\in\{1,\ldots, n\})\: x_j^2+y_j^2\leqslant a_j^2 \right\}.\] \begin{theorem}\label{thm:sshdarboux} Let $W$ be a Liouville domain and let $P\subset \partial W$ be a contact polydisc in a Darboux chart. Then, the continuation map \[SH_\ast^{\emptyset}(W)\to SH_\ast^{\op{int}P}(W)\] is an isomorphism. \end{theorem} The next lemma is used in the proof of Theorem~\ref{thm:sshdarboux}. \begin{lem}\label{lem:bump} Let $\alpha := dz + \sum_{j=1}^n (x_j dy_j - y_j dx_j)$ be the standard contact form on $\R^{2n+1}$. Denote by $(r_j, \theta_j)$ polar coordinates in the $(x_j, y_j)$-plane, $j=1,\ldots, n$. Let $h:\R^{2n+1}\to [0,+\infty)$ be a contact Hamiltonian of the form \[h(r, \theta, z):= \varepsilon + g(z)\cdot \prod_{j=1}^n f_j(r_j),\] where $\varepsilon\in\R^+$, $g:\R\to [0,+\infty)$ is a smooth function, and $f_j:[0,+\infty)\to [0,+\infty)$ is a (not necessarily strictly) decreasing smooth function, $j=1,\ldots, n$. Then, the $z$-coordinate strictly decreases along the trajectories of the contact Hamiltonian $h$ (with respect to the contact form $\alpha$). \end{lem} \begin{proof} Let $Y^h$ be the vector field of the contact Hamiltonian $h$, i.e. the vector field that satisfies $\alpha(Y^h)=- h$ and $d\alpha(Y^h, \cdot)= dh - dh(\partial_z)\cdot \alpha$. Then, \[ dz(Y^h)= -\varepsilon + g(z)\cdot \left( -\prod_{k=1}^n f_k(r_k) +\frac{1}{2}\cdot \sum_{j=1}^n \left( r_j\cdot f'_j(r_j)\cdot \prod_{k\not=j} f_k(r_k) \right) \right). \] In particular, $dz(Y^h(p))\leqslant -\varepsilon$ for all $p\in\R^{2n+1}$. Let $\gamma:I\to \R^{2n+1}$ be a trajectory of the contact Hamiltonian $h$. Then, \[\frac{d}{dt}\left(z(\gamma(t)) \right)= dz(Y^h(\gamma(t)))\leqslant -\varepsilon.\] Consequently, the function $t\mapsto z(\gamma(t))$ is strictly decreasing. \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:sshdarboux}] By assumptions, there exists a Darboux chart $\psi:O\to \R^{2n+1}$, $O\subset \partial W$, such that $\psi(P)= P(a_1, \ldots, a_n, b)$ for some $a_1,\ldots, a_n, b\in\R^+$. Since $P(a_1, \ldots, a_n, b)$ is compact and $\psi(O)$ open, there exist $b', a_1',\ldots, a_n'\in\R^+$ such that \[P(a_1, \ldots, a_n, b)\subset \op{int} P(a_1', \ldots, a_n', b')\subset \psi(O).\] In particular, $b<b'$. Denote $\varepsilon_1 := b'-b$ Let $h\in\mathcal{H}_{\op{int} P}(\partial W)$ be such that \begin{equation}\label{eq:productlike} h\circ \psi^{-1} (r, \theta, z) = g(z)\cdot \prod_{j=1}^n f_j(r_j)\end{equation} for some smooth function $g:\R\to[0,+\infty)$ and some smooth decreasing functions $f_j:[0,+\infty)\to[0, +\infty)$, $j=1, \ldots, n$ such that $\op{supp} g \subset (0, b) $ and $\op{supp} f_j\subset (0, a_j)$. Let $\varepsilon_0\in\R^+$ be such that there are no closed Reeb orbits on $\partial W$ of period less than or equal to $\varepsilon_0$. Now, we show that the contact Hamiltonian $h+\varepsilon$ has no 1-periodic orbits if $0<\varepsilon<\min\{\varepsilon_0, \varepsilon_1\}$. This implies $\varepsilon\in \mathcal{O}(h)$ if $0<\varepsilon<\min\{\varepsilon_0, \varepsilon_1\}$. Let $\gamma:\R\to \partial W$ be a trajectory of the contact Hamiltonian $h+\varepsilon$. If $\gamma$ does not intersect $P$, then $\gamma$ is also a trajectory of the reparametrized Reeb flow $t\mapsto \varphi_{-\varepsilon\cdot t}$. Since $\varepsilon<\varepsilon_0$, this implies that $\gamma$ is not 1-periodic. Assume, now, that $\gamma$ does intersect $P$. If $\gamma$ is entirely contained in $O$, then Lemma~\ref{lem:bump} implies that $\gamma$ is not 1-periodic. If $\gamma$ is not entirely contained in $O$, then (by Lemma~\ref{lem:bump}) $\gamma$ intersects $\psi^{-1}\left( \R^{2n}\times[b, b'] \right)$. On $\psi^{-1}\left( \R^{2n}\times[b, b'] \right)$, the contact Hamiltonian $h+\varepsilon$ is equal to $\varepsilon$ and $\gamma(t)$ is equal to $\psi^{-1}(x,y, z-\varepsilon t)$ for some $(x,y,z)\in\R^{2n+1}$. In particular, $\gamma$ ``spends'' at least $\frac{b'-b}{\varepsilon}$ time passing through $\psi^{-1}\left( \R^{2n}\times[b, b'] \right)$. Since \[\frac{b'-b}{\varepsilon}> \frac{b'-b}{\varepsilon_1}=1,\] $\gamma$ cannot be 1-periodic. The same argument shows that the contact Hamiltonian $h^s:= s\cdot h+ \varepsilon$ has no 1-periodic orbits for all $s\in[0,1]$. Additionally, $\partial_sh^s\geqslant 0$. Therefore, the continuation map \[HF_\ast(\varepsilon)=HF_\ast(h^0)\to HF_\ast(h^1)= HF_\ast(h+\varepsilon)\] is an isomorphism \cite[Theorem~1.3]{uljarevic2022hamiltonian}. Since for every $\tilde{h}\in\mathcal{H}_{\op{int} P}(\partial W)$ there exists $h\in \mathcal{H}_{\op{int} P}(\partial W)$ of the form \eqref{eq:productlike} such that $\tilde{h}\leqslant h$, the theorem follows. \end{proof} \section{Immaterial transverse circles and selective symplectic homology of their complements}\label{sec:immaterial} This section provides non-trivial examples where the selective symplectic homology is ``large''. We start by defining \emph{immaterial} subsets of contact manifolds. \begin{defn} A subset $A$ of a contact manifold $\Sigma$ is called \emph{immaterial} if there exists a contractible loop $\varphi_t:\Sigma\to \Sigma$ of contactomorphisms such that its contact Hamiltonian $h_t:\Sigma\to\R$ (with respect to some contact form on $\Sigma$) is positive on $A$, i.e. such that it satisfies \[(\forall x\in A)(\forall t\in\R)\quad h_t(x)>0.\] \end{defn} If a compact subset $A$ of a contact manifold $\Sigma$ is immaterial, then there exists a contractible loop of contactomorphisms on $\Sigma$ whose contact Hamiltonian is arbitrarily large on $A$. In fact, this property of a compact subset $A$ is equivalent to $A$ being immaterial. \begin{lem} A compact subset $A$ of a contact manifold $\Sigma$ is immaterial if, and only if, for every $a\in\R^+$ there exists a contractible loop of contactomorphisms on $\Sigma$ such that its contact Hamiltonian $h_t:\Sigma\to \R$ satisfies \[(\forall x\in A)(\forall t\in\R)\quad h_t(x)\geqslant a.\] \end{lem} \begin{proof} Let $a\in\R^+$ be an arbitrarily large positive number and let $A$ be a compact immaterial subset of a contact manifold $\Sigma$. Then, there exists a contractible loop $\varphi:\Sigma\to\Sigma$ of contactomorphisms such that its contact Hamiltonian $h_t:\Sigma\to\R$ satisfies \[(\forall x\in A)(\forall t\in\R)\quad h_t(x)>0.\] Denote $m:= \min_{x\in A, t\in\R} h_t(x)>0$. Let $k\in\mathbb{N}$ be such that $k\cdot m> a$. Denote by $h^k_t:\Sigma\to\R$ the contact Hamiltonian defined by \mbox{$h^k_t(x):=k\cdot h_{kt}(x)$}. The contact Hamiltonian $h^k$ furnishes a loop of contactomorphisms that is obtained by concatenating $\varphi$ to itself $k$ times. In particular, $h^k$ generates a contractible loop of contactomorphisms. By construction \[(\forall x\in A)(\forall t\in \R)\quad h^k_t(x)\geqslant k\cdot m>a.\] This proves one direction of the lemma. The other direction is obvious. \end{proof} The next lemma implies that a singleton (i.e. a set consisting of a single point) is immaterial in every contact manifold of dimension greater than 3. By continuity, every point in a contact manifold of dimension greater than 3 has an immaterial neighbourhood. \begin{lem}\label{lem:ptnegl} Let $\Sigma$ be a contact manifold of dimension $2n+1 > 3$. Then, there exists a contractible loop $\varphi_t:\Sigma\to \Sigma$ of contactomorphisms such that its contact Hamiltonian is positive at some point (for all times $t$). \end{lem} \begin{proof} Let $\mathbb{S}^{2n+1}$ be the standard contact sphere seen as the unit sphere in $\mathbb{C}^{n+1}$ centered at the origin. The unitary matrices act on $\mathbb{S}^{2n+1}$ as contactomorphisms. Let $\psi_t:\mathbb{S}^{2n+1}\to \mathbb{S}^{2n+1}$ be the contact circle action given by \[ \psi_t(z):= \left( z_1, \ldots, z_{n-1}, e^{2\pi i t} z_n, e^{-2\pi i t} z_{n+1} \right). \] The loop \[t\mapsto \left[\begin{matrix} e^{2\pi i t} & 0\\ 0 & e^{-2\pi i t} \end{matrix}\right]\] is contractible in the unitary group $U(2)$. Hence, there exists a smooth $s$-family $A^s$, $s\in[0,1]$, of loops in $U(2)$ such that \[A^1(t)= \left[\begin{matrix} e^{2\pi i t} & 0\\ 0 & e^{-2\pi i t} \end{matrix}\right]\] and such that $A^0(t)= \left[\begin{matrix}1&0\\ 0&1\end{matrix}\right]$ for all $t$. Denote $\psi^s_t(z):=\left[ \begin{matrix} \mathbb{1}_{n-1} & \\ & A^s(t) \end{matrix}\right] z$. For all $s\in[0,1]$, $\psi^s$ is a loop of contactomorphisms of $\mathbb{S}^{2n+1}$ and $\psi_t^0=\op{id}$, $\psi_t^1=\psi_t$. Therefore, $\psi_t$ is a contractible loop of contactomorphisms. Denote by $h^s_t:\mathbb{S}^{2n+1}\to \mathbb{R}$ the contact Hamiltonian of $\psi^s_t$ and $h:=h^1$. Explicitly, $h(z_1,\ldots, z_{n+1})= 2\pi\cdot \left(\abs{z_{n+1}}^2-\abs{z_n}^2\right)$. In particular, $h$ is po\-si\-tive at the point $(0,\ldots, 0,1)$. Denote $V(r):=\left\{ z\in\mathbb{S}^{2n+1}\:|\: \abs{z_1}> 1-r \right\}$ and let $\varepsilon\in (0,1)$. Let $\mu: \mathbb{S}^{2n+1}\to[0,1]$ be a smooth cut-off function such that $\mu(x)=0$ for $x$ in a neighbourhood of $p:=(1,0,\ldots, 0)$ and such that $\mu(x)=1$ for $x\in\mathbb{S}^{2n+1}\setminus V(\frac{\varepsilon}{2})$. Let $f_t^s(x):= \mu(x)\cdot h^s_t(x)$. By the construction of $\mu$ and since $V(r)$ is invariant under $\psi^s_t$ for all $r,s$, and $t$, the contactomorphism $\varphi_1^{f^s}$ is compactly supported in $V(\varepsilon)$ for all $s$. Let $g^s_t:\mathbb{S}^{2n+1}\to\R$, $s\in[0,1]$ be the contact Hamiltonian that generates $t\mapsto \varphi_1^{f^{t\cdot s}}$, i.e. $\varphi_t^{g^s}= \varphi_1^{f^{t\cdot s}}$. Denote $g:=g^1.$ The map $\varphi^{f^1}_t\circ(\varphi_t^g)^{-1}$ is a loop of contactomorphisms. Its contact Hamiltonian $e_t:\mathbb{S}^{2n+1}\to\R$ is equal to 0 in a neighbourhood of $p$ and coincides with $f^1$ in $\mathbb{S}^{2n+1}\setminus V(\varepsilon)$. Consequently (since $f^1$ and $h$ coincide in $\mathbb{S}^{2n+1}\setminus V(\varepsilon)$), the contact Hamiltonians $e$ and $h$ coincide in $\mathbb{S}^{2n+1}\setminus V(\varepsilon)$. This implies that $\varphi^{f^1}_t\circ(\varphi_t^g)^{-1}$ is a loop of contactomorphisms of $\mathbb{S}^{2n+1}$ that are compactly supported in the complement of a neighbourhood of $p$. Additionally, this implies that there exists $q\in\mathbb{S}^{2n+1}\setminus V(\varepsilon)$ such that $e_t(q)=h(q)>0$ for all $t$. The loop $\varphi_t^e=\varphi^{f^1}_t\circ(\varphi_t^g)^{-1}$ is contractible via the homotopy $\left\{\varphi^{f^s}_t\circ(\varphi_t^{g^s})^{-1}\right\}_{s\in[0,1]}$ that is also compactly supported in the complement of a neighbourhood of $p$. Since $\mathbb{S}^{2n+1}\setminus \{p\}$ is contactomorphic to the standard $\R^{2n+1}$ and since every contact manifold has a contact Darboux chart around each of its points, the lemma follows. \end{proof} The following theorem implies that the complement of an immaterial circle has infinite dimensional selective symplectic homology under some additional assumptions. \begin{theorem}\label{thm:compnegl} Let $W$ be a Liouville domain and let $\Gamma\subset \partial W$ be an immaterial embedded circle that is transverse to the contact distribution. Denote $\Omega:=\partial W\setminus \Gamma$. Then, the rank of the continuation map $SH_\ast^\Omega(W)\to SH_\ast(W)$ is equal to $\dim SH_\ast(W)$. \end{theorem} \begin{proof} This proof assumes results of Section~\ref{sec:pathiso}. For an admissible contact Hamiltonian $h_t:\partial W\to \R$, denote by $r(h)=r(W, h)$ the rank of the canonical map $HF_\ast(h)\to SH_\ast(W)$. It is enough to prove that for every admissible $\ell\in\R$ there exists $h\in\mathcal{H}_\Omega(\partial W)$ and $\varepsilon\in\mathcal{O}(h)$ such that $r(\ell)\leqslant r(h+\varepsilon)$. Denote by $\alpha$ the contact form on $\partial W$ (the restriction of the Liouville form). Without loss of generality (see Theorem~2.5.15 and Example~2.5.16 in \cite{geiges2008introduction}), we may assume that there exists an open neighbourhood $U\subset \partial W$ of $\Gamma$ and an embedding $\psi: U\to \mathbb{C}^n\times\mathbb{S}^1$ such that $\psi(\Gamma)= \{0\}\times\mathbb{S}^1$ and such that \[\alpha=\psi^\ast\left( d\theta + \frac{i}{2}\sum_{j=1}^n (z_jd\overline{z}_j-\overline{z}_jdz_j)\right).\] Here, $z=(z_1,\ldots, z_n)\in\mathbb{C}^n$ and $\theta\in\mathbb{S}^1$. Let $\ell\in\R$ be an arbitrary admissible (constant) slope. Since $\Gamma$ is immaterial, there exists a contractible loop of contactomorphisms $\varphi^f_t:\partial W\to\partial W$ (which we see as a 1-periodic $\R$-family of contactomorphisms) such that its contact Hamiltonian $f_t:\partial W\to\R$ satisfies $\min_{x\in\Gamma, t\in\R} f_t(x)\geqslant 2\ell$. Denote $m:=\min_{x\in\partial W, t\in\R} f_t(x)$. Let $h\in\mathcal{H}_{\Omega}(\partial W)$ be a strict contact Hamiltonian (i.e. its flow preserves the contact form $\alpha$ ) such that $h(x)\geqslant \ell- m$ for $x$ in the set $ \left\{ x\in\partial W\:|\: \min_{t\in\R} f_t(x)\leqslant \ell \right\}.$ The contact Hamiltonian $h$ can be constructed as follows. Since the function $x\mapsto\min_{t\in\R} f_t(x)$ is continuous, the set $S:=\{x\in\partial W\:|\: \min_{t\in\R} f_t(x)\leqslant\ell\}$ is closed. Therefore, there exists a ball $B(r)\subset \mathbb{C}^n$ centered at the origin with sufficiently small radius $r$ such that $\overline{B(r)}\times\mathbb{S}^1\subset \psi(\partial W \setminus S)$. Now, we choose $h$ to be equal to a constant greater than $\ell-m$ on $\partial W\setminus \psi^{-1}\left( \overline{B(r)}\times \mathbb{S}^1 \right)$ and such that $h\circ\psi^{-1}(z, \theta)= \overline{h}(z_1^2+\cdots+ z_n^2)$ for $\abs{z}<r$ and for some smooth function $\overline{h}: [0,+\infty)\to [0,+\infty)$ that is equal to 0 near 0. Generically, $h$ has no non-constant 1-periodic orbits. Let $\varepsilon\in\R^+$ be a sufficiently small positive number such that $\varepsilon\in\mathcal{O}(h)$ and denote $h^\varepsilon:= h+\varepsilon.$ Let $g:=h^\varepsilon\# f$ be the contact Hamiltonian that generates the contact isotopy $\varphi_t^{h^\varepsilon}\circ\varphi_t^f$, i.e. \[ g_t(x) := h^{\varepsilon}(x) + f_t\circ \left(\varphi^{h^\varepsilon}_t\right)^{-1}(x). \] (In the last formula, we used that $h^\varepsilon$ is a strict contact Hamiltonian.) If $h^\varepsilon (x) < \ell-m$, then (since $h^\varepsilon$ is autonomous and strict) \mbox{$h^\varepsilon\circ\left(\varphi_t^{h^\varepsilon}\right)^{-1}(x)<\ell-m$} for all $t$. Consequently (by the choice of $h$), $\min_{s\in\R} f_s\circ \left( \varphi_t^{h^\varepsilon}\right)^{-1}(x)> \ell$. This implies $g_t(x)\geqslant \ell$ for all $x\in\partial W$ and $t\in\R$. Since $\varphi^f$ is a contractible loop of contactomorphisms, there exists a smooth homotopy $\sigma$ from the constant loop $t\mapsto \op{id}$ to $\varphi^f$. By Section~\ref{sec:pathiso}, there exist isomorphisms $\mathcal{B}(\varphi^f,\sigma) : HF_\ast(h^\varepsilon)\to HF_\ast(h^\varepsilon\# f)$ and $\mathcal{B}(\varphi^f,\sigma) : SH_\ast(W)\to SH_\ast(W)$ such that the following diagram \[\begin{tikzcd} SH_\ast(W) \arrow{r}{\mathcal{B}(\varphi^f,\sigma)}& SH_\ast(W)\\ HF_\ast(h^\varepsilon) \arrow{u}\arrow{r}{\mathcal{B}(\varphi^f,\sigma)}& HF_\ast(h^\varepsilon\#f),\arrow{u} \end{tikzcd}\] whose vertical arrows represent the continuation maps, commutes. Consequently, $r(h^\varepsilon)=r(h^\varepsilon\# f)= r(g)$. Since $g\geqslant \ell$, we have $r(g)\geqslant r(\ell).$ This further implies $r(h+\varepsilon)= r(h^\varepsilon)\geqslant r(\ell)$ and the proof is finished. \end{proof} \section{Applications to contact non-squeezing}\label{sec:main} We start with a proof of Theorem~\ref{thm:ranknonsqueezing} from the introduction. \settheoremtag{\ref{thm:ranknonsqueezing}} \begin{theorem} Let $W$ be a Liouville domain and let $\Omega_a, \Omega_b\subset \partial W$ be open subsets. If the rank of the continuation map $SH_\ast^{\Omega_b}(W)\to SH_\ast(W)$ is (strictly) greater than the rank of the continuation map $SH_\ast^{\Omega_a}(W)\to SH_\ast(W),$ then $\Omega_b$ cannot be contactly squeezed into $\Omega_a$. \end{theorem} \begin{proof} Denote by $r(\Omega)\in\mathbb{N}\cup\{0,\infty\}$ the rank of the continuation map $SH_\ast^\Omega(W)\to SH_\ast(W).$ Assume the contrary, i.e. that there exist open subsets $\Omega_a, \Omega_b\subset\partial W$ with $r(\Omega_a)<r(\Omega_b)$ and a contact isotopy $\varphi_t:\partial W\to\partial W$, $t\in[0,1]$ such that $\varphi_0=\op{id}$ and such that $\varphi_1(\Omega_b)\subset\Omega_a$. By Section~\ref{sec:conjugationiso} (and Section~\ref{sec:conjugationisomorphisms}), $r(\varphi_1(\Omega_b))= r(\Omega_b)$. Since $\varphi_1(\Omega_b)\subset\Omega_a$, the continuation map $SH_\ast^{\varphi_1(\Omega_b)}(W)\to SH_\ast(W)$ factors through the continuation map $SH_\ast^{\Omega_a}(W)\to SH_\ast(W).$ Hence, $r(\Omega_b)= r(\varphi_1(\Omega_b))\leqslant r(\Omega_a).$ This contradicts the assumption $r(\Omega_a)<r(\Omega_b)$. \end{proof} \settheoremtag{\ref{thm:homologyspheres}} \begin{theorem} Let $n > 2$ be a natural number and let $W$ be a $2n$-dimensional Liouville domain such that $\dim SH_\ast(W)> \sum_{j=0}^{2n} \dim H_j(W;\mathbb{Z}_2)$ and such that $\partial W$ is a homotopy sphere. Then, there exist two embedded closed balls $B_1, B_2\subset \partial W$ of dimension $2n-1$ such that $B_1$ cannot be contactly squeezed into $B_2$. \end{theorem} \begin{proof} Denote $\Sigma:=\partial W$, and denote by $r(\Omega)\in\mathbb{N}\cup\{0,\infty\}$ the rank of the continuation map $SH_\ast^{\Omega}(W)\to SH_\ast(W)$ for an open subset $\Omega\subset\Sigma$. \textbf{Step~1} (A subset with a small rank). Since $SH_\ast^\emptyset(W)$ is isomorphic to $H_\ast(W, \partial W; \mathbb{Z}_2)$, Theorem~\ref{thm:sshdarboux} implies that there exists a non-empty open subset $\Omega\subset \Sigma$ such that $r(\Omega)\leqslant \sum_{j=1}^{2n} H_j(W,\partial W; \mathbb{Z}_2)= \sum_{j=0}^{2n} H_j(W;\mathbb{Z}_2)$. \textbf{Step~2} (A subset with a large rank). This step proves that for every $c\in \R$ with $c\leqslant \dim SH_\ast(W)$, there exists an open non-dense subset $U\subset\Sigma$ such that $r(U)\geqslant c$. Assume the contrary, i.e. that there exists $c\in\R$ such that $r(U)< c$ for every open non-dense subset $U\subset \Sigma$. By Lemma~\ref{lem:ptnegl}, there exists a sufficiently small contact Darboux chart on $\Sigma$ that is immaterial. Let $\Gamma$ be an embedded circle in that chart that is transverse to the contact distribution. Then, $\Gamma$ is immaterial as well. Consequently (by Theorem~\ref{thm:compnegl}), the continuation map $\Phi: SH_\ast^{\Sigma\setminus\Gamma}(W)\to SH_\ast(W)$ has rank equal to $\dim SH_\ast(W)$, i.e. $r(\Sigma\setminus\Gamma)=\dim SH_\ast(W)$. Hence, there exist $a_1, \ldots, a_k\in SH_\ast^{\Sigma\setminus\Gamma}(W)$, with $k\geqslant c$, such that $\Phi(a_1), \ldots, \Phi(a_k)$ are linearly independent. Let $U_1, U_2, \ldots $ be an increasing family of open non-dense subsets of $\Sigma$ such that $\bigcup_{j=1}^\infty U_j=\Sigma\setminus\Gamma$. Since, by Theorem~\ref{thm:limitsh}, continuation maps furnish an isomorphism \[\underset{j}{\lim_{\longrightarrow}} SH_\ast^{U_j}(W)\to SH_\ast^{\Sigma\setminus\Gamma}(W),\] there exist $m\in\mathbb{N}$ and $b_1,\ldots, b_k \in SH^{U_m}_\ast(W)$ such that $b_1, \ldots, b_k$ are mapped to $a_1, \ldots, a_k$ via the continuation map $SH_\ast^{U_m}(W)\to SH_\ast^{\Sigma\setminus\Gamma}(W).$ The images of $b_1, \ldots, b_k$ under the continuation map $SH_\ast^{U_m}(W)\to SH_\ast(W)$ are equal to $\Phi(a_1), \ldots, \Phi(a_k)$, and therefore, are linearly independent. Hence, $r(U_m)\geqslant k \geqslant c.$ This contradicts the assumption and finishes Step~2. \textbf{Step~3} (The final details). This step finishes the proof. Let $\Omega, U\subset \Sigma$ be open non-empty subsets such that $U$ is non-dense, and such that $r(\Omega)<r(U)$. Step~1 and Step~2 prove the existence of such sets $\Omega$ and $U$. Let $a\in \Omega$ and $b\in \Sigma\setminus\{a\}\setminus\overline{U}$ be two points. Since $\Sigma$ is a homotopy sphere, there exists a Morse function $f:\Sigma\to \R$ that attains its minimum at $a$ and its maximum at $b$ and that has no other critical points. The existence of such a function $f$ is guaranteed by the results of Smale \cite{smale1956generalized,smale1962structure,smale1962structure5} and Cerf \cite{cerf1968diffeomorphismes} (see also \cite[Proposition~2.2]{saeki2006morse}). The Morse theory implies that $\Sigma_t:= f^{-1} \big( (-\infty, t]\big)$ is the standard $(2n-1)$-dimensional closed ball smoothly embedded into $\Sigma$ for all $t\in (f(a), f(b))$ (see, for instance, \cite{banyaga2013lectures}). For $s\in (f(a), f(b))$ sufficiently close to $f(a)$, $\Sigma_s\subset \Omega$. Similarly, for $\ell\in (f(a), f(b))$ sufficiently close to $f(b)$, $\op{int} \Sigma_\ell\supset U$. Since $r(\Omega)< r(U)$, by Theorem~\ref{thm:ranknonsqueezing}, $U$ cannot be contactly squeezed into $\Omega$. Hence, $\Sigma_\ell$ cannot be contactly squeezed into $\Sigma_s$ and the proof is finished (one can take $B_2:=\Sigma_s$ and $B_1:=\Sigma_\ell$). \end{proof} Now, we prove the contact non-squeezing for the Ustilovsky spheres. \settheoremtag{\ref{thm:Ustilovskyspheres}} \begin{theorem} Let $\Sigma$ be an Ustilovsky sphere. Then, there exist two embedded closed balls $B_1, B_2\subset \Sigma$ of dimension equal to $\dim \Sigma$ such that $B_1$ cannot be contactly squeezed into $B_2$. \end{theorem} \begin{proof} In the view of Theorem~\ref{thm:homologyspheres}, it is enough to show that the symplectic homology of the Brieskorn variety \[ W:=\left\{ z=(z_0,\ldots, z_{2m+1})\in\mathbb{C}^{2m+2}\:|\: z_0^p + z_1^2 +\cdots + z_{2m+1}^2=\varepsilon\:\&\: \abs{z}\leqslant1 \right\}\] is infinite demensional. Here, $m,p\in\mathbb{N}$ are natural numbers with $p\equiv \pm 1\pmod{8}$ and $\varepsilon\in\R^+$ is sufficiently small. The Brieskorn variety $W$ is a Liouville domain whose boundary is contactomorphic to an Ustilovsky sphere, and every Ustilovsky sphere is contactomorphic to $\partial W$ for some choice of $m$ and $p$. Let $\Sigma_k$ be the sequence of manifolds such that $\Sigma_k=\partial W$ if $p\mid k$ (we use this notation for ``$k$ is divisible by $p$'') and such that \[\Sigma_k:=\left\{z=(z_1,\ldots, z_{2m+1})\in\mathbb{C}^{2m+1}\:|\: z_1^2+\cdots z_{2m+1}^2=0\:\&\: \abs{z}=1\right\}\] if $p\nmid k$. Denote \[s_k:= \frac{k}{p}\cdot \left( (4m - 2)\cdot p + 4 \right) - 2m\] if $p\mid k$, and \[s_k:= (4m-2)\cdot k + 2\cdot \left\lfloor\frac{2k}{p}\right\rfloor -2m +2\] otherwise. Theorem~B.11 from \cite{kwon2016brieskorn} implies that there exists a spectral sequence $E_{k,\ell}^r$ that converges to $SH_\ast(W)$ such that its first page is given by \[ E^1_{k,\ell}=\left\{ \begin{matrix} H_{k+\ell-s_k}(\Sigma_k;\mathbb{Z}_2) &\text{if } k>0,\\ H_{\ell+ 2m+1}(W, \partial W; \mathbb{Z}_2) &\text{if } k=0,\\ 0 &\text{if } k<0. \end{matrix}\right. \] The terms of $E^1$ can be explicitly computed. If $p\nmid k$, then $\Sigma_k$ is diffeomorphic to the unit cotangent bundle $S^\ast\mathbb{S}^{2m}$ of the sphere. Otherwise, $\Sigma_k$ is diffeomorphic to $\mathbb{S}^{4m+1}$, because $p\equiv \pm 1\pmod{8}$ \cite{brieskorn1966beispiele}. Therefore, \[H_j(\Sigma_k; \mathbb{Z}_2)\cong \left\{ \begin{matrix}\mathbb{Z}_2 & \text{if } j\in\{0, 2m-1, 2m, 4m-1\},\\ 0 & \text{otherwise} \end{matrix}\right.\] if $p\nmid k$, and \[H_j(\Sigma_k; \mathbb{Z}_2)\cong \left\{ \begin{matrix}\mathbb{Z}_2 & \text{if } j\in\{0,4m+1\}, \\ 0 & \text{otherwise} \end{matrix}\right.\] if $p\mid k$. The Brieskorn variety $W$ is homotopy equivalent to the bouquet of $p-1$ spheres of dimension $2m+1$ \cite[Theorem~6.5]{milnor2016singular}. Therefore, \[ H_j(W, \partial W; \mathbb{Z}_2) \cong \left\{\begin{matrix} \mathbb{Z}_2 & \text{if } j=4m+2,\\ \mathbb{Z}_2^{p-1} & \text{if } j=2m+1,\\ 0 & \text{otherwise.}\end{matrix} \right. \] Figure~\ref{fig:spectralsequence} on page~\pageref{fig:spectralsequence} shows the page $E^1_{k,\ell}$ for $p=7$ and $m=1.$ For $k\in\mathbb{N}\cup \{0\}$, denote by $Q(k)$ the unique number in $\mathbb{Z}$ such that $E^{1}_{k, Q(k)}\not=0$ and such that $E^1_{k,\ell}=0$ for all $\ell>Q(k)$. Similarly, for $k\in\mathbb{N}\cup \{0\}$, denote by $q(k)$ the unique number in $\mathbb{Z}$ such that $E^{1}_{k, q(k)}\not=0$ and such that $E^1_{k,\ell}=0$ for all $\ell<q(k)$. Explicitly, \begin{align*} & Q(k)=\left\{\begin{matrix} 2m+1 &\text{if } k=0,\\ 4m-1+s_k-k & \text{if } p\nmid k,\\ 4m+1+s_k-k & \text{if }p\mid k, \end{matrix}\right.\\ & q(k)=\left\{\begin{matrix} 0 & \text{if } k=0,\\ s_k-k & \text{for } k\in\mathbb{N}. \end{matrix}\right. \end{align*} We will show that the element in $E^1_{ap-1, Q(ap-1)}$ ``survives'' in $SH_\ast(W)$ for $a\in\mathbb{N}$. (In Figure~\ref{fig:spectralsequence}, the fields $(ap-1, Q(ap-1))$, $a=1,2$ are emphasized by thicker edges.) Both sequences $Q(k)$ and $q(k)$ are strictly increasing in $k$. Since $Q(k)$ is strictly increasing, the element in $E^1_{ap-1, Q(ap-1)}$ cannot be ``killed'' by an element from $E^1_{k,\ell}$ if $k\leqslant ap-1$. Since \[q(ap+1)-Q(ap-1)= 4m-3\geqslant 1,\] the element in $E^1_{ap-1, Q(ap-1)}$ cannot be ``killed'' by an element from $E^1_{k,\ell}$ if $k\geqslant ap+1$. Finally, since $Q(ap-1)-q(ap)=2$, the non-zero groups $E_{ap, \ell}^1$ are the ones for $\ell= Q(ap-1)-2$ and $\ell=Q(ap-1)+4m-1$. In particular, $E^1_{ap, Q(ap-1)}=0$. Therefore, $E^\infty_{ap-1, Q(ap-1)}\not=0$ for all $a\in\mathbb{N}$. This implies $\dim SH_\ast(W)=\infty$ and the proof is finished. \end{proof} \begin{figure} \centering \begin{tikzpicture}[scale=0.5] \foreach \i in {0,1,..., 16}{ \draw[very thin, gray] (\i, 0)--(\i, 27); } \foreach \i in {0,...,27}{ \draw[very thin, gray] (0,\i)--(16, \i); } \node[below] at (0.5, 0) {$0$}; \node[below] at (7.5, 0) {$7$}; \node[below] at (14.5, 0) {$14$}; \node[left] at (0, 0.5) {$0$}; \node[left] at (0, 10.5) {$10$}; \node[left] at (0, 20.5) {$20$}; \node at (0.5, 0.5) {6}; \node at (0.5, 3.5) {1}; \node at (1.5, 1.5) {1}; \node at (1.5, 2.5) {1}; \node at (1.5, 3.5) {1}; \node at (1.5, 4.5) {1}; \node at (2.5, 2.5) {1}; \node at (2.5, 3.5) {1}; \node at (2.5, 4.5) {1}; \node at (2.5, 5.5) {1}; \node at (3.5, 3.5) {1}; \node at (3.5, 4.5) {1}; \node at (3.5, 5.5) {1}; \node at (3.5, 6.5) {1}; \node at (4.5, 6.5) {1}; \node at (4.5, 7.5) {1}; \node at (4.5, 8.5) {1}; \node at (4.5, 9.5) {1}; \node at (5.5, 7.5) {1}; \node at (5.5, 8.5) {1}; \node at (5.5, 9.5) {1}; \node at (5.5, 10.5) {1}; \node at (6.5, 8.5) {1}; \node at (6.5, 9.5) {1}; \node at (6.5, 10.5) {1}; \node at (6.5, 11.5) {1}; \node at (7.5, 9.5) {1}; \node at (7.5, 14.5) {1}; \node at (8.5, 12.5) {1}; \node at (8.5, 13.5) {1}; \node at (8.5, 14.5) {1}; \node at (8.5, 15.5) {1}; \node at (9.5, 13.5) {1}; \node at (9.5, 14.5) {1}; \node at (9.5, 15.5) {1}; \node at (9.5, 16.5) {1}; \node at (10.5, 14.5) {1}; \node at (10.5, 15.5) {1}; \node at (10.5, 16.5) {1}; \node at (10.5, 17.5) {1}; \node at (11.5, 17.5) {1}; \node at (11.5, 18.5) {1}; \node at (11.5, 19.5) {1}; \node at (11.5, 20.5) {1}; \node at (12.5, 18.5) {1}; \node at (12.5, 19.5) {1}; \node at (12.5, 20.5) {1}; \node at (12.5, 21.5) {1}; \node at (13.5, 19.5) {1}; \node at (13.5, 20.5) {1}; \node at (13.5, 21.5) {1}; \node at (13.5, 22.5) {1}; \node at (14.5, 20.5) {1}; \node at (14.5, 25.5) {1}; \node at (15.5, 23.5) {1}; \node at (15.5, 24.5) {1}; \node at (15.5, 25.5) {1}; \node at (15.5, 26.5) {1}; \draw[very thick] (6,11)--(7,11)--(7,12)--(6,12)--(6,11); \draw[very thick] (13,22)--(14,22)--(14,23)--(13,23)--(13, 22); \end{tikzpicture} \caption{The first page of the spectral sequence from the proof of Theorem~\ref{thm:Ustilovskyspheres} for $p=7$ and $m=1$. The number in the field $(k,\ell)$ represents $\dim E^1_{k,\ell}$. Empty fields are assumed to contain zeros. } \label{fig:spectralsequence} \end{figure} Finally, we prove Corollary~\ref{cor:nonsqR}. \settheoremtag{\ref{cor:nonsqR}} \begin{cor} Let $m\in\mathbb{N}$. Then, there exist a contact structure $\xi$ on $\R^{4m+1}$ and an embedded closed ball $B\subset \R^{4m+1}$ of dimension $4m+1$ such that $B$ cannot be contactly squeezed into an arbitrary non-empty open subset by a compactly supported contact isotopy of $\left(\R^{4m+1}, \xi\right)$. \end{cor} \begin{proof} Let $S$ be a $(4m+1)$-dimensional Ustilovsky sphere and let $B_1, B_2\subset S$ be two embedded closed balls such that $B_1\cup B_2\not= S$ and such that $B_2$ cannot be contactly squeezed into $B_1.$ Let $p\in S\setminus (B_1\cup B_2)$. The Ustilovsky sphere $S$ is diffeomorphic to the standard sphere. Hence, there exists a diffeomorphism $\psi: \R^{4m+1}\to S\setminus\{p\}.$ Let $\xi$ be the pull back of the contact structure on $S$ via $\psi$. Now, we prove that $\xi$ and $B:=\psi^{-1}(B_2)$ satisfy the conditions of the corollary. Assume the contrary. Then, there exists a compactly supported contact isotopy $\varphi_t: \R^{4m+1}\to\R^{4m+1}$ such that $\varphi_0=\op{id}$ and such that $\varphi_1(B)\subset\op{int} \left(\psi^{-1}(B_1)\right)$. Since $\varphi$ is a compactly supported isotopy, $\psi\circ\varphi_t\circ\psi^{-1}$ extends to a contact isotopy on $S$. This contact isotopy squeezes $B_2$ into $B_1$. This is a contradiction that finishes the proof. \end{proof} \section{Isomorphisms furnished by paths of admissible contact Hamiltonians}\label{sec:pathiso} In this section, we construct an isomorphism \[\mathcal{B}(\{h^a\}): HF_\ast(h^0)\to HF_\ast(h^1) \] associated with a smooth family $h^a_t:\partial W\to \R, a\in[0,1]$ of admissible contact Hamiltonians on the boundary of a Liouville domain $W$. As a particular instance of this construction, we associate an isomorphism \[\mathcal{B}(\varphi^f, \sigma) : HF_\ast(h)\to HF_\ast(h\#f)\] with a contractible loop $\varphi^f_t:\partial W\to\partial W$ of contactomorphisms and a homotopy $\sigma$ from the constant loop $t\mapsto\op{id}$ to $\varphi^f.$ Denote by $\mathfrak{S}$ the set of admissible contact Hamiltonians $h_t:\partial W\to \R$ on the boundary $\partial W$ of a Liouville domain $W$. The set $\mathfrak{S}$ is open in the space of the smooth functions $\partial W\times\mathbb{S}^1\to\R$ with respect to the $C^2$ topology. Let $\norm{\cdot}_{C^2}$ be a norm inducing the $C^2$-topology. Denote by $\mathcal{F}$ the family of open balls $B_R(\eta)$ with respect to $\norm{\cdot}_{C^2}$that satisfy $B_{9R}(\eta)\subset \mathfrak{S}$. For $\mathcal{O}=B_R(\eta)\in\mathcal{F}$, denote $\tilde{\mathcal{O}}:=B_{3R}(\eta)$. \begin{defn} Let $h^a, a\in[0,1]$ be a smooth family of admissible contact Hamiltonians on the boundary of a Liouville domain. The isomorphism \[ \mathcal{B}(\{h^a\}) : HF_\ast(h^0)\to HF_\ast(h^1)\] is defined in the following way: \begin{enumerate} \item Choose finitely many sets $\mathcal{O}_1, \ldots, \mathcal{O}_m\in \mathcal{F}$ such that $h^a\in \bigcup_{j=1}^m \mathcal{O}_j$ for all $a\in[0,1]$. \item Choose $0=a_0< a_1<\cdots< a_k=1$ such that for each $j=0,\ldots, k-1$ there exists $\ell_j\in\{1,\ldots m\}$ such that $h^a\in \mathcal{O}_{\ell_j}$ for all $a\in[a_j, a_{j+1}]$. \item Choose $g^0,\ldots g^{k-1}\in\mathfrak{S}$ such that $g_j\in \tilde{\mathcal{O}}_{\ell_j}$ and $g_j\leqslant h^{a_j}, h^{a_{j+1}}$ for $j=0,\ldots, k-1$. \item Denote by \begin{align*} & \Phi^j : HF_\ast(g^j)\to HF_\ast(h^{a_j}) \\ & \Psi^j : HF_\ast(g^j)\to HF_\ast(h^{a_{j+1}}) \end{align*} the continuation maps (they are isomorphisms because the contact Hamiltonians $(1-s)\cdot g^j + s\cdot h^{a_j} $ and $(1-s)\cdot g^j + s\cdot h^{a_{j+1}} $ are admissible and increasing with respect to the $s$-variable, see \cite[Theorem~1.3]{uljarevic2022hamiltonian} ), $j=0,\ldots, k-1$. \item Define \[\mathcal{B}(h):= \Psi^{k-1}\circ \left( \Phi^{k-1} \right)^{-1}\circ\cdots \circ \Psi^1\circ \left(\Phi^1\right)^{-1}\circ \Psi^0\circ \left(\Phi^0\right)^{-1}.\] \end{enumerate} \end{defn} The isomorphism $\mathcal{B}(\{h^a\})$ does not depend on the additional choices. Moreover, if $\{h^a\}$ and $\{f^a\}$, $a\in[0,1]$ are two smooth families of admissible contact Hamiltonians such that $h^a\leqslant f^a$ for all $a\in[0,1]$, then the following diagram commutes \[\begin{tikzcd} HF_\ast(h^0) \arrow{r}{\mathcal{B}(\{h^a\})}\arrow{d}& HF_\ast(h^1)\arrow{d}\\ HF_\ast(f^0) \arrow{r}{\mathcal{B}(\{f^a\})}& HF_\ast(f^1). \end{tikzcd}\] In the diagram, the vertical arrows represent the continuation maps. Now, we associate an isomorphism $ \mathcal{B}(\varphi^f,\sigma): HF_\ast(h)\to HF_\ast(h\#f)$ with a contractible (smooth) loop $\varphi^f_t:\partial W\to\partial W$ of contactomorphisms and a smooth homotopy $\sigma^a_t:\partial W\to\partial W$ from the constant loop based at the identity to $\varphi^f$. We see the homotopy $\sigma^a_t$ as a smooth $\R\times[0,1]$-family of contactomorphisms that is 1-periodic in the $t\in\R$ variable and such that the following holds \begin{enumerate} \item $ \sigma_t^0=\op{id} $ for all $t\in\R$, \item $\sigma_0^a=\sigma_1^a=\op{id}$ for all $a\in[0,1]$, \item $ \sigma_t^1=\varphi_t^f$ for all $ t\in\R$. \end{enumerate} For every admissible contact Hamiltonian $h\in\mathfrak{S}$, the homotopy $\sigma$ furnishes a smooth family $\eta^a:=h\#f^a$ of admissible contact Hamiltonians. Here, $f^a$ denotes the contact Hamiltonian of the contact isotopy $t\mapsto \sigma^a_t$. Define $ \mathcal{B}(\varphi^f,\sigma):= \mathcal{B}(\{\eta^a\})$. Now we show that $\sigma$ also induces an isomorphism $SH_\ast(W)\to SH_\ast(W)$ that behaves well with respect to the canonical maps $HF_\ast(h)\to SH_\ast(W)$. For a contact Hamiltonian $h$, denote $\op{osc}(h):=\max_{x,t} h_t(x)- \min_{x,t} h_t(x) $ and denote by $\kappa_t^h$ the smooth function determined by $(\varphi_t^h)^\ast\alpha=\kappa_t^h\cdot \alpha$, where $\alpha$ is the contact form. If $h,g$ are admissible contact Hamiltonians such that \[g-h\geqslant \max_{a\in[0,1]} \op{osc}(f^a\cdot \kappa^h),\] then $g\# f^a\geqslant h\#f^a$ for all $a\in[0,1]$. Consequently, the following diagram commutes \[\begin{tikzcd} HF_\ast(h) \arrow{r}{\mathcal{B}(\varphi^f,\sigma)}\arrow{d}& HF_\ast(h\#f)\arrow{d}\\ HF_\ast(g) \arrow{r}{\mathcal{B}(\varphi^f,\sigma)}& HF_\ast(g\#f). \end{tikzcd}\] In the diagram, the vertical arrows represent the continuation maps. Hence, there exists an isomorphism $\mathcal{B}(\varphi^f,\sigma): SH_\ast(W)\to SH_\ast(W)$ such that the diagram \[\begin{tikzcd} SH_\ast(W) \arrow{r}{\mathcal{B}(\varphi^f,\sigma)}& SH_\ast(W)\\ HF_\ast(h) \arrow{u}\arrow{r}{\mathcal{B}(\varphi^f,\sigma)}& HF_\ast(h\#f),\arrow{u} \end{tikzcd}\] whose vertical arrows are canonical morphisms, commutes for every admissible contact Hamiltonian $h$. \printbibliography \end{document}
2205.14745v2
http://arxiv.org/abs/2205.14745v2
Almost Witt Vectors
\documentclass{article} \usepackage{amsmath} \usepackage{amsthm} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{tikz-cd} \usepackage[numbers]{natbib} \usepackage{mathrsfs} \usepackage{lipsum,hyperref} \usepackage{enumerate} \hypersetup{colorlinks=true, linkcolor=black,urlcolor=black, citecolor=black} \let\amsamp=& \makeatletter \newcommand{\colim@}[2]{ \vtop{\m@th\ialign{##\cr \hfil$#1\operator@font colim$\hfil\cr \noalign{\nointerlineskip\kern1.5\ex@}#2\cr \noalign{\nointerlineskip\kern-\ex@}\cr}}} \newcommand{\colim}{ \mathop{\mathpalette\colim@{\rightarrowfill@\scriptscriptstyle}}\nmlimits@ } \renewcommand{\varprojlim}{ \mathop{\mathpalette\varlim@{\leftarrowfill@\scriptscriptstyle}}\nmlimits@ } \renewcommand{\varinjlim}{ \mathop{\mathpalette\varlim@{\rightarrowfill@\scriptscriptstyle}}\nmlimits@ } \makeatother \newtheorem{thm}{Theorem}[section] \newtheorem{lem}[thm]{Lemma} \newtheorem{prop}[thm]{Proposition} \newtheorem{cor}[thm]{Corollary} \newtheorem{definition}[thm]{Definition} \theoremstyle{remark} \newtheorem{rem}[thm]{Remark} \newtheorem{exmp}{Example} \newtheoremstyle{gabber} {6pt} {0pt} {\normalfont} {} {\normalfont} {.} {0.5em} {} \theoremstyle{gabber} \newtheorem{sect}[thm]{} \def\ackn{\par\textbf{Acknowledgements.} \ignorespaces} \def\endackn{} \newcommand{\obj}{\text{obj}} \newcommand{\mor}{\text{Mor}} \newcommand{\im}{\text{Im}} \newcommand{\restr}[2]{\left . #1 \right |_{#2}} \newcommand{\aclo}[1]{\overline{#1}} \newcommand{\comp}[1]{\widehat{#1}} \renewcommand{\ker}{\text{Ker}} \newcommand{\id}{\text{id}} \newcommand{\ideal}[1]{\mathfrak{#1}} \renewcommand{\lim}{\varprojlim} \newcommand{\inj}{\colim} \newcommand{\rnorm}[2]{\text{N}_{#1/#2}} \newcommand{\perf}{\text{perf}} \newcommand{\gal}[1]{\text{Gal}(#1)} \newcommand{\nf}[2]{\textbf{#1}_{#2}} \newcommand{\nfp}[2]{\textbf{#1}_{#2}^{+}} \newcommand{\nfpe}[2]{\widetilde{\textbf{#1}}_{#2}} \newcommand{\nfppe}[2]{\widetilde{\textbf{#1}_{#2}}^{+}} \newcommand{\norm}[1]{\left | #1 \right |} \newcommand{\Frac}[1]{\text{Frac}(#1)} \renewcommand{\b}[2]{\textbf{#1}_{#2}} \renewcommand{\bf}[1]{\textbf{#1}} \newcommand{\unram}[1]{{#1}^{\text{nr}}} \renewcommand{\cal}[1]{\mathcal{#1}} \renewcommand{\mapsto}{\longmapsto} \renewcommand{\to}{\longrightarrow} \newcommand{\spec}{\text{Spec}} \newcommand{\coker}{\text{Coker}} \newcommand{\tor}[4]{\text{Tor}_{#1}^{#2}(#3, #4)} \newcommand{\ext}[4]{\text{Ext}_{#2}^{#1}(#3, #4)} \newcommand{\alext}[4]{\text{alExt}_{#2}^{#1}(#3, #4)} \renewcommand{\hom}[3]{\text{Hom}_{#1}(#2, #3)} \newcommand{\alhom}[3]{\text{alHom}_{#1}(#2, #3)} \renewcommand{\cal}[1]{\mathcal{#1}} \newcommand{\ann}[2]{\text{Ann}_{#2}\left(#1\right)} \renewcommand{\tilde}[1]{\widetilde{#1}} \newcommand{\rele}[2]{\stackrel{#1}{#2}} \usepackage{authblk} \numberwithin{equation}{subsection} \title{Almost Witt vectors} \author{Ivan Zelich} \date{} \begin{document} \maketitle \begin{abstract} Our main goal in this paper is to prove results for Witt vectors in the almost category. We finish with an application to \textit{almost purity}, Theorem~\ref{thm:almpurchar0}. \end{abstract} \tableofcontents \newpage \section{Introduction} \begin{sect}Almost mathematics originated from the work of Faltings \cite{gerd} who took inspiration from Tate's work on ramification theory \cite{tatepdiv}. Faltings considered rings such $A_0 := \mathbb{Z}_p[T_1,...,T_n]$ and ramified extensions $A_k := \mathbb{Z}_p[p^{1/p^{k}}, T_1^{1/p^{k}}, ..., T_n^{1/p^{k}}]$. If we take a finite normal $A_0$-algebra $B_0$, supposing that it is finite \'{e}tale after inverting $p$, then in general $B_0$ could have some ramification in the special fibre above $p$. Motivated by Zariski-Negata purity, Faltings then considered the localised situation: $A_0':=\mathbb{Z}_p[T_1^{\pm 1},...,T_n^{\pm 1}]$ and $B_0'$ a finite normal algebra over $A_0'$ that is finite etale along the generic fibre. After constructing $A'_k$ and $B'_k$ analogously, the point is that the infinite extension $B'_{\infty} := \cup_{k} B'_k$ will be almost unramified over $A'_{\infty}:= \cup_k A'_k$ with respect to $(p^{1/p^{\infty}})$, and thus almost finite \'{e}tale. This result has since been called an `almost purity' theorem, but in the way that we have framed it, it could be seen also as a variant of Abhyankar's lemma for wild ramification, in the sense that adjoining enough $p^{\text{th}}$-power roots kills \text{almost all} ramification.\end{sect} \begin{sect}Let us now recall some main constructs for almost mathematics. First, one fixes an almost setting $(R, \ideal{m})$ consisting of a ring $R$ with an ideal $\ideal{m} \subset R$ where $\ideal{m}^2 = \ideal{m}$.\footnote{One seemingly artificial condition that can be given to $\ideal{m}$ is that $\tilde{\ideal{m}} := \ideal{m} \otimes_R \ideal{m}$ be an $R$-flat module, and as shown in \cite[Remark 2.1.4]{gabram}, this condition is preserved by base-change. However, the results of thesis do not rely on this hypothesis\textemdash see \ref{dercatalmmod} below. Condition (B) holds under this flatness condition \cite[Proposition 2.1.7]{gabram}.} In many situations, for any integer $k>1$, the $k^{\text{th}}$ power elements of $\ideal{m}$ generate $\ideal{m}$, which is called Condition (B) in \cite{gabram}\textemdash we will assume $\ideal{m}$ has this property. An $R$-module $N$ is called \textit{almost zero} if $\ideal{m}N = 0$, and one checks that this property is closed under extensions, thus ensuring that the subcategory of almost zero modules forms a Serre subcategory. Briefly, if we have an exact sequence: \[\begin{tikzcd} 0 \arrow[r]& M_1 \arrow[r]& M \arrow[r]& M_2 \arrow[r] & 0 \end{tikzcd}\] and $M$ is almost zero, then it's clear that $M_1$ and $M_2$ are almost zero too. For the converse, $\ideal{m}\cdot M$ is in $M_1$ since $M_2$ is almost zero, so $\ideal{m} \cdot M = \ideal{m}^2 \cdot M \subset \ideal{m} \cdot M_1= 0$ since $M_1$ is almost zero.\\ \indent We call a morphism $f: M\to N$ an \textit{almost isomorphism} if $\ideal{m} \cdot \ker(f) = \ideal{m} \cdot \coker(f) =0$. Set $\tilde{\ideal{m}}:= \ideal{m} \otimes_R \ideal{m}$. \end{sect} \begin{lem}\label{lem:almzero} \begin{enumerate}[(i)] \item An $R$-module $M$ is almost zero if and only if $\ideal{m} \otimes_R M \simeq 0$. \item $f: M \to N$ is an almost isomorphism if and only if the induced morphism $ \tilde{\ideal{m}} \otimes_R M \to \tilde{\ideal{m}} \otimes_R N$ is an isomorphism. \end{enumerate} \end{lem} \begin{proof}See~\cite[Remark 2.1.4]{gabram}. \end{proof} \begin{rem}\label{rem:iter} We note that the natural inclusion $\ideal{m} \hookrightarrow R$ is an almost isomorphism since the kernel and cokernel are trivially killed by $\ideal{m}$. As a direct result of (ii), it follows that $\ideal{m} \otimes_R \tilde{\ideal{m}} \to \tilde{\ideal{m}}$ is an isomorphism\textemdash in other words, repetitive tensoring $\ideal{m} \otimes_R \ideal{m} \otimes_R ...$ stops after the first iteration. Crucially, $\tilde{\ideal{m}} \otimes_R \tilde{\ideal{m}} \simeq \tilde{\ideal{m}}$. \end{rem} \begin{sect}Formally, the almost category $R^a\text{-Mod}$ is then the quotient of $R\text{-Mod}$ by the Serre subcategory of almost zero modules, which is an abelian category itself that is equipped with a localisation functor $(-)^a: R\text{-Mod} \to R^a\text{-Mod}$. \cite[02MN]{stacks} One typically constructs $R^a\text{-Mod}$ as the localised category at the multiplicative system of \text{almost isomorphisms}, and with this characterisation, we may understand the morphisms in $R^a\text{-Mod}$ via a calculus of fractions.\end{sect} \begin{lem}\label{lem:initalm} We have $\hom{R^a}{M^a}{N^a} = \hom{R}{\tilde{\ideal{m}} \otimes_R M}{N}$. \end{lem} \begin{proof} See~\cite[2.2.2]{gabram}. \end{proof} Combining this with Lemma~\ref{lem:almzero}, we see that if $f: M \to N$ is an almost isomorphism, then the induced morphism $f^a: M^a \to N^a$ is an isomorphism in the almost category. Moreover, an isomorphism $f: M^a \to N^a$in $R^a\text{-mod}$ can be represented uniquely by a morphism $\tilde{\ideal{m}} \otimes_R M \to \tilde{\ideal{m}} \otimes_R N$ in $R\text{-mod}$.\\ \indent There are two adjoints to $(-)^a$: for an almost module $M^a$ in $R^a\text{-mod}$, consider the following functors: \begin{itemize} \item the functor of \textit{almost elements} $(-)_*: R^a\text{-mod} \to R\text{-mod}$ which takes $M^a \mapsto \hom{R}{\tilde{\ideal{m}}}{M}$. \item The functor $(-)_!: R^a\text{-mod} \to R\text{-mod}$ which takes $M^a \mapsto \tilde{\ideal{m}} \otimes_R M_*$. \end{itemize} This is the right (resp. left adjoint) of $(-)^a$, and the counit (resp. unit) is a natual isomorphism in $R^a\text{-mod}$. \\ \indent There is a lot more foundational material for almost mathematics that can be found in \cite{gabram}, and we refer the reader there for more details. It may be useful for the reader to note while reading \cite[2.2]{gabram}, that due to Remark~\ref{rem:iter}, $\tilde{\ideal{m}}$ acts as a unit object on the essential image of $(-)_*$, so the theory of abelian tensor categories applies here, concretely. We will not discuss these foundations in this paper further, other than explaining key some key concepts. \begin{sect}\label{fibalm} In particular, we would like a formalism for base-change in almost mathematics, which we can achieve by defining the following category $\cal{B}$ of `almost set-ups': objects are pairs $(R, \ideal{m})$ of almost set-ups, and morphisms $(R, \ideal{m}_R) \to (S, \ideal{m}_S)$ between two objects are ring homomorphisms $f: R \to S$ such that $\ideal{m}_S = f(\ideal{m}_R) \cdot S$.\footnote{This latter condition is precisely what will ensure that the almost structures after extension of scalars are compatible.} We have fibered and cofibered categories $\cal{B}\text{-Mod} \to \cal{B}$, where objects in the former category are pairs $((R, \ideal{m}_R), M)$ with $M$ an $R$-module and morphisms between $((R, \ideal{m}_R), M)$ and $((S, \ideal{m}_S), N)$ are pairs $(f,g)$ where $f: (R, \ideal{m}_R) \to (S, \ideal{m}_S)$ is a morphism in $\cal{B}$ and $g: M \to N$ is $f$-linear. We also denote $\cal{B}\text{-Alg}$ and $\cal{B}\text{-Mon}$ to be the category of algebras and non-unital monoids of $\cal{B}$, which are fibered/cofibered over $\cal{B}$. \indent The almost isomorphisms in the fibers of $\cal{B}\text{-Mod} \to \cal{B}$ give a multiplicative system $\Sigma$ in $\cal{B}\text{-Mod}$. This allows us to form the localised category $\cal{B}^a\text{-Mod} := \Sigma^{-1}(\cal{B}\text{-Mod})$. The fibers of this localised category over the objects $(R, \ideal{m})$ are precisely the almost modules $R^a\text{-Mod}$, and similar assertions hold when restricting to $\cal{B}\text{-Alg}$ and $\cal{B}\text{-Mon}$. One can form adjoints to $(-)^a$ whose restrictions on the fibres induce the previously considered left and right adjoints $(-)_{!}$ and $(-)_*$.\\ \indent We can consider the category $\cal{B}/A$ defined to be the full subcategory of objects $(B, \ideal{m})$ where $B$ is an $A$-algebra. When $(R, \ideal{m})$ is an object of $\cal{B}/\mathbb{F}_p$, we can define a \textit{Frobenius} endomorphism $\Phi_R: (R,\ideal{m}) \to (R, \ideal{m})$ (which is a morphism in $\cal{B}$ as $\ideal{m}$ satisfies Condition (B)). For any $B \in \cal{B}$-$\text{Alg}/\mathbb{F}_p$ (resp. $\cal{B}$-$\text{Mon}/\mathbb{F}_p$), the Frobenius map induces a morphism $\Phi_B: B \to B$ over $\Phi_R$. The collection of Frobenius maps give us a natural transformation from the identity functor of $\cal{B}\text{-Alg}/\mathbb{F}_p$ (resp. $\cal{B}\text{-Mon}/\mathbb{F}_p$) to itself that induces a natural transformation on the identity functor of $\cal{B}^a\text{-Alg}/\mathbb{F}_p$ (resp. $\cal{B}\text{-Mon}/\mathbb{F}_p$). Using pull-back functors, any object of $\cal{B}\text{-Alg}$ over $R$ defines new objects $B_{(m)}$ of $\cal{B}\text{-Alg}$ $(m \in \mathbb{N})$ over $R$, where $B_{(m)} := (\Phi_R^m)_{*}(B)$.\end{sect} \begin{sect}The main outcome of this thesis was to find an alternative proof of almost purity for perfectoid valuation rings of rank one, without relying on too much analytic contexts, ultimately in the hope of potentially extending the proof to the general case. Throughout this discussion, we fix a perfectoid ring $R$ (in mixed characteristic) with an element $\varpi \in R$ admitting a compatible system of $p$-power roots \cite[Lemma 3.2]{perfectoid}; let us recall what we mean by this:\end{sect} \begin{definition} A complete, non-discrete valuation ring $R$ of rank $1$ is a \textit{perfectoid valuation ring} if the Frobenius $\Phi:R/pR \twoheadrightarrow R/pR$ is surjective. \end{definition} \noindent Using the theory of perfectoid spaces, one can prove almost purity for arbitrary perfectoid rings via descent to perfectoid valuation rings of rank one, using the fact that the fibered category of finite \'{e}tale covers of adic spaces forms a stack~\cite[Corollary 15.7.26]{gabram2}, as done in Scholze's thesis \cite{perfectoid}. Let us finally state the theorem we want to prove: \begin{thm}[Almost purity in Characteristic 0]\label{thminto:almpurchar0} Fix a perfectoid valuation ring $R$ of rank one. Let $S$ be a finitely presented $R$-algebra, and suppose $S[1/p]$ is a finite \'{e}tale algebra over $R[1/p]$. Then $\cal{O}_S$, the integral closure of $R$ in $S[1/p]$, is almost finitely presented \'{e}tale over $R$. \end{thm} \noindent Our approach is to reframe the usual `untilting' functor from characteristic $p$ to characteristic $0$ via finite length Witt vectors. In particular, we prove the following (see Theorem~\ref{thm:wittdescent2}): \begin{thm}\label{thminto:wittdescent} Let $A \to B$ be a weakly \'{e}tale flat morphism of $R^a$-algebras. Then, $W_n(f): W_n(A) \to W_n(B)$ is weakly \'{e}tale. \end{thm} \noindent See \ref{def:weaketale} for the appropriate definitions. Armed with this theorem, the following result isn't difficult (see Lemma~\ref{lem:absabh}): \begin{thm} Let $\overline{R}$ be the absolute closure of a perfectoid valuation ring $R$ of rank one. Then $\overline{R}/p^n$ is almost weakly \'{e}tale $(R/p^n, (\varpi^{1/p^{\infty}}))$-algebra for all $n \ge 1$. \end{thm} \noindent Almost purity then follows via descent arguments.\\ \begin{ackn} The author is incredibly grateful to Dr James Borger and Dr Lance Gurney for the insightful discussions, support, and supervision. The author was funded by an Australian Government Research Training Program Scholarship (2020-2021) during which this paper was completed for partial fulfillment of an MPhil degree. \end{ackn} \section{Technical results} \subsection{Derived categories of almost modules}\label{dercatalmmod}Here we shall extend some results from \cite[Section 2.4]{gabram} by removing the hypothesis that $\tilde{\ideal{m}}$ be a flat $R$-module. We note also that more is accomplished towards this aim in~\cite[14.1]{gabram2}. \begin{definition} Let $A$ be an $R^a$-algebra, and $M$ an $A$-module. \begin{enumerate}[(i)] \item We say that $M$ is flat (resp. faithfully flat) if the functor $N \mapsto M \otimes_A N$, from the category of $A$-modules to itself is exact (resp. exact and faithful). \item We say that $M$ is almost projective if the functor $N \mapsto \alhom{A}{M}{N}$ is exact. \end{enumerate} \end{definition} \begin{lem}\label{lem:abcatprop} Let $A$ be an $R^a$-algebra. \begin{enumerate}[(i)] \item $A\text{-Mod}$ and $A\text{-Alg}$ are both complete and cocomplete, with exact colimits. \item $(-)^a: A_*\text{-Mod} \to A\text{-Mod}$ preserves flat (resp. faithfully flat) $A$-modules, and sends projective objects to almost projective objects. \end{enumerate} \end{lem} \begin{proof} (i): We note that $A_*\text{-Mod}$ and $A_*\text{-Alg}$ are both complete and cocomplete. We show that $A\text{-Mod}$ is cocomplete, and the other assertions follow similarly. Suppose $I$ is a small indexing category and set $M := \inj_{i \in I} M(i)_*$; we see $M^a := \inj_{i \in I} M(i)$ as $(-)^a$ commutes with colimits (and limits) and the unit $(-)_*^a $ of the corresponding adjunction is naturally isomorphic to the identity.\\ \indent Note that this argument says that colimits are left exact since $(-)_*$ is; so by repeating the same argument with instead $(-)_{!}$ we get that colimits are right exact too, whence the assertion.\\ \indent (ii): Suppose that $M$ is a flat $A_*$-module i.e. the endofunctor $M \otimes_{A_*} -$ is exact. Since the aforementioned functor preserves the Serre subcategory of almost zero $A_*$-modules, $M^a \otimes_A -$ will too be an exact endofunctor on $A\text{-Mod}$. For the faithfully flat assertion, we only must show that if $M^a \otimes_{A} N \simeq 0$ then $N \simeq 0$. We're given that $M \otimes_{A_*} N_*$ is almost zero, so that $(M \otimes_{A_*} N_*) \otimes_{A_*} \ideal{m}A_* \simeq 0$. By faithful flatness, $N_* \otimes_{A_*} \ideal{m}A_* \simeq 0$, so that $N_*$ is almost zero, or indeed $N \simeq (N_*)^a \simeq 0$. The assertion for projective objects is clear by definition. \end{proof} Now let $B$ be any $R$-algebra; for any interval $I \subset \mathbb{N}$, we may extend $(-)^a$ termwise to a functor: \[(-)^a: C^I(B\text{-Mod}) \to C^I(B^a\text{-Mod}).\] We may consider the class of maps $\Sigma$ as those for which the modules $H^i(\text{Cone}(\phi))$ are almost zero. Then by the exactness of $(-)^a$, we see that it descends to a functor: \[(-)^a: \Sigma^{-1}D^I(B\text{-Mod}) \to D^I(B^a\text{-Mod}).\] We're interested in having a right/left adjoint on these derived categories: \begin{lem}\label{lem:deralmloc} The functor $(-)^a: \Sigma^{-1}D^I(B\text{-Mod}) \to D^I(B^a\text{-Mod})$ is an equivalence of categories with quasi-inverse $(-)_{!}$ or $(-)_*$. \end{lem} \begin{proof} Extend $(-)_{*}$ termwise to a functor $(-)_{*}: C^I(B^a\text{-Mod}) \to C^I(B\text{-Mod})$. We note then that by definition $(-)_{*}$ does indeed descend to a functor $D^I(B^a\text{-Mod}) \to \Sigma^{-1} D^I(B\text{-Mod})$ such that $(-)_{*}^a: D^I(B^a\text{-Mod}) \to D^I(B^a\text{-Mod})$ is naturally isomorphic to the identity.\footnote{It is not necessarily true that $(-)_{*}$ is a functor with target $D^I(B\text{-Mod})$, because it is only left-exact in general.} Now by construction $(-)^a_{*}: \Sigma^{-1} D^I(B\text{-Mod}) \to \Sigma^{-1} D^I(B\text{-Mod})$ is too naturally isomorphic to the identity, as desired. The same argument applies for $(-)_!$. \end{proof} By Lemma~\ref{lem:abcatprop}, we see that $A\text{-Mod}$ satisfies the AB5 axiom for abelian categories, and as the category has a generator, namely $A$, we can conclude that $A\text{-Mod}$ has enough injectives. From the same lemma, we may also conclude that $A\text{-Mod}$ has enough flat/almost projective objects.\\ \indent Given an $A$-module $M$, we can derive the functors $M\otimes_A -$ (resp. $\alhom{A}{M}{-}$, resp. $\alhom{A}{-}{N}$) by taking flat (resp. almost projective, resp. injective) resolutions. Indeed, bounded above exact complexes of flat (resp. almost projective) modules are acyclic for the functor $M \otimes_A -$ (resp. $\alhom{A}{-}{N}$), which enables us to compute the derived functors like usual. Let us sketch why in the case of $\alhom{A}{-}{N}$: suppose $P_{\bullet}$ is a bounded above complex of projective objects, and $J^{\bullet}$ an injective resolution for $N$. Note that, as discussed (see Lemma~\ref{lem:initalm}), for any $A$-module $N$ and a complex $\mathscr{C}$ of $A_*$-modules, $\alhom{A}{\mathscr{C}^a}{N} =\hom{A_*}{\mathscr{C}}{N_*}^a$. Thus, by considering when $H^i(\mathscr{C})^a\simeq 0$ and using that $(-)_{*}$ preserves injectives, we see that $\alhom{A}{-}{J^i}$ is exact. Considering $C$, the double complex $C_{ij}:=\alhom{A}{P_{i}}{J^j}$, we see firstly that $\text{Tot}(C)$ is quasi-isomorphic to the complex $\alhom{A}{P_{\bullet}}{M}$ by the vertically filtered spectral sequence, and secondly that $\text{Tot}(C)$ is quasi-isomorphic to $0$ by the horizontally filtered spectral sequence. Thus, the class of almost projective objects is `adapted' to $\alhom{A}{-}{M}$, in the sense of \cite[4.3]{algebragel}, and similarly the flat objects will be adapted to $M \otimes_A -$. \cite[Theorem 4.8]{algebragel} or \cite[Theorem 10.5.9]{weibel} gives us a way of deriving these functors.\\ \indent Combining Lemmas~\ref{lem:abcatprop}, \ref{lem:deralmloc}, we see that for $A$-modules $S,T$: \[\tor{n}{A}{S}{T} = \tor{n}{A_*}{S_*}{T_*}^a, \; \; \alext{n}{A}{S}{T} = \ext{n}{A_*}{S_*}{T_*}^a,\] where alExt is the derived functor of alHom. Thus, an $A$-module $M$ is flat/almost projective if and only if the the higher Tor/Ext groups for the pair $(M_*,N_*)$ are almost zero, for any $R^a$-module $N$. \begin{rem} We briefly remark that, due to the fact that filtered colimits of flat objects are flat, Tor commutes with filtered colimits. \end{rem} \subsection{Flatness criteria} We begin with a generalisation of a usual criterion for flatness in the almost category. In what follows, we will use the fact that any almost module can be written as a colimit of its finitely generated sub-modules, which follows from Lemma~\ref{lem:abcatprop} and the corresponding statement for modules. We fix throughout this section an almost set-up $(R, \ideal{m})$. \begin{thm}\label{thm:finflatness} Let $M$ be an $R^a$-module; $M$ is $R^a$-flat if and only if, for every finitely generated ideal $I \subset R^a$ we have $I \otimes_{R^a} M \to M$ is a monomorphism. \end{thm} \begin{proof} The `only if' direction is clear. For the converse, note that as tensoring commutes with colimits and colimits are exact, we may assume the supposition for any ideal $I \subset R^a$. Given an exact sequence of $R^a$-module $\mathscr{E}:=0 \to N_1 \to N_2 \to N_3 \to 0$, we need to show that $\mathscr{E} \otimes_{R^a} M$ is exact, for which it suffices to show that $\tor{1}{R^a}{N_3}{M}=0$. Now, write $N_3$ as a colimit of its finitely generated $R^a$-modules; as Tor commutes with colimits, we reduce to the case when $N_3 = (R^{a})^{n}/L$ where $L \subset (R^a)^n$ an $R^a$-submodule. Next, consider the following diagram: \[ \begin{tikzcd} 0 \arrow[r] & (R^{a})^{n-1} \arrow[r] & (R^a)^n \arrow[r] & R^a \arrow[r] & 0\\ 0 \arrow[r] & L \times_{(R^a)^n} (R^{a})^{n-1} \arrow[r] \arrow[hookrightarrow]{u} & L \arrow[r] \arrow[hookrightarrow]{u} & I' \arrow[r] \arrow[hookrightarrow]{u} & 0 \end{tikzcd}\] Here, $I'$ is the image of $L \subset (R^a)^n \to R^a$ (projection onto the last coordinate). After applying $-\otimes_{R^a} M$ to the diagram, we see that the bottom row remains right exact, and the top row remains exact as everything is free. By induction on $n$, we may assume that the left-up arrow is monomorphism, and by the hypothesis we know that the right-up arrow is. Snake lemma concludes. \end{proof} \begin{sect}\label{colimflat}Let $I$ be a (small) filtered category, and $F:I \to R^a\text{-Alg}$ a functor. Set $A_i := F(i)$, and for each $i \in I$, let $M_i, N_i$ be an $A_i$-modules. Set $A:= \colim_{i} A_i, M := \colim_i M_i$ and $N := \colim_i N_i$. \end{sect} \begin{lem}\label{lem:colimflat} In the situation of \ref{colimflat}: \begin{enumerate}[(i)] \item We have $\colim_{i} N_i \otimes_{A_i} M_i \simeq N \otimes_{A} M$. \item If $M_i$ is $A_i$-flat, then $M$ is $A$-flat. \end{enumerate} \end{lem} \begin{proof} (i): By applying $(-)_{*}$ to both sides we reduce to the statement for ordinary modules via the same argument as in Lemma~\ref{lem:abcatprop} (i).\\ \indent (ii): Let $I \subset A$ be a finitely generated ideal. There is some index $i$ such that $I' \subset A_i$ and $I=I'A$; by Theorem~\ref{thm:finflatness}, we need to show $I \otimes_{A} M \to M$ is a monomorphism. Note that $I'A_j \otimes_{A_j} M_j \to M_j$ is monomorphism for each $j \ge i$, so by passing to the colimit and using (i), we get that $I \otimes_{A} M \to M$ is monomorphism by the exactness of colimits. \end{proof} \begin{cor}\label{cor:colimwet} Let $A$ be an $R^a$-algebra and $B_i$ be weakly \'{e}tale algebras over $A$. Then, $\colim_{i} B_i$ is weakly \'{e}tale over $A$. \end{cor} \begin{proof} Clearly, $A \to B$ is flat, so we need to show that it is weakly unramified. Indeed, we need to ensure $B \otimes_{A} B \to B$ is flat; we note $\colim_{i}B_i \otimes_{A} B_i \simeq B \otimes_A B$ via Lemma~\ref{lem:colimflat} (i). Then by Lemma~\ref{lem:colimflat} (ii), we're done. \end{proof} We have the following almost version of the local flatness criterion, which we state more generally as follows: \begin{thm}\label{thm:genlocflat} Let $A$ be an $R^a$-algebra. Suppose that we are given morphisms $A \to A_i$ of (not necessarily distinct) $R^{a}$-algebras so that for every $A$-module $M$, one has a filtation $0 = F_{0}(M) \subset F_{1}(M) \subset ... \subset F_{n}(M) = M$ such that each graded piece $\text{gr}_{i}(M) := F_{i+1}(M)/F_i(M)$ is an $A_{i}$-module. Then the following are equivalent: \begin{enumerate}[(i)] \item $M$ is $A$-flat \item $M_i := M \otimes_{A} A_i$ is $A_i$-flat and $\tor{1}{A}{A_i}{M} = 0$ for each $i=1,...,n-1$ \end{enumerate} \end{thm} \begin{proof} We first begin with a lemma. \begin{lem}\label{lem:filtorext} In the situation above, to check that $\tor{1}{A}{M}{N}=0$ (or $\alext{1}{A}{M}{N}=0$) for arbitrary $A$-modules $N$, it is sufficient to do so for when $N$ is an $A_i$-module, for any index $i=1,...,n-1$. \end{lem} \begin{proof} By the supposition, we know that each module $N$ adopts a filtration $F_{i}(N)$ such that $\text{gr}_{i}(N)$ is an $A_i$-module. Assuming $\tor{1}{A}{M}{K}=0$ for any $A_i$-module $K$, we conclude by using Tor sequences applied to the exact sequences: \[\begin{tikzcd} 0 \arrow[r] & F_{i}(N) \arrow[r]& F_{i+1}(N) \arrow[r] & \text{gr}_{i}(N) \arrow[r] & 0 \end{tikzcd}\] that $\tor{1}{A}{M}{F_{i}(N)}=0 \Rightarrow \tor{1}{A}{M}{F_{i+1}(N)}=0$. By definition, $F_{n}(N) := N$ and $F_{1}$ is an $A_0$-module, so we eventually get that $\tor{1}{A}{M}{N} = \tor{1}{A}{M}{F_1} = 0$ via an induction, as desired. The statement for alExt follows with minor changes. \end{proof} To get the result, we utilise the Tor base change spectral sequence; for each index $i=1,...,n-1$, and $A_i$-modules $N$: \[E^{2}_{pq} := \tor{p}{A_i}{\tor{q}{A}{M}{A_i}}{N} \Rightarrow \tor{p+q}{A}{M}{N}.\] Now, by assumption, along $q=1$ and $p=1$ we have $0$'s, which implies that $\tor{1}{A}{M}{N}=0$ for any $A_i$-module $N$. By the lemma, this is enough to prove the theorem. \end{proof} \begin{cor}[Local flatness criterion]\label{cor:almlocflat} Suppose $A$ is an $R^a$-algebra, $M$ an $A$-module, and $I$ a nilpotent ideal. The following are equivalent: \begin{enumerate}[(i)] \item $M$ is $A$-flat \item $M_0:= M/IM$ is $A_0 := A/I$-flat and $I^k/I^{k+1} \otimes_{A_0} M_0 \to I^kM/I^{k+1}M$ is an isomorphism for each $k\ge 1$. \end{enumerate} \end{cor} \begin{proof} We prove the non-obvious direction. Indeed, the previous theorem applies by considering the filtation $F_{n-k}(M) := I^kM$. So, we only need to show that $\tor{1}{A}{A/I}{M}=0$. Consider exact sequences: \[\alpha_k := \begin{tikzcd} 0 \rar & \tor{1}{A}{A/I^k}{M} \rar & I^k \otimes_A M \rar & I^kM \rar & 0 \end{tikzcd}\] Applying Snake lemma to $\alpha_{k+1} \to \alpha_k$, one obtains that \[\coker\left(\tor{1}{A}{A/I^{k+1}}{M} \to \tor{1}{A}{A/I^k}{M}\right) = 0.\] Since $I$ is nilpotent, descending induction gives that $\tor{1}{A}{A/I}{M} = 0$ \end{proof} \subsection{Almost homological algebra} Here we collect some results on homological algebra in the almost category following \cite[Chapter 2.4]{gabram}. We note that Lemma~\ref{lem:deralmloc} and our subsequent results on deriving alHom and $\otimes$ in the almost category do not require the condition that $\tilde{\ideal{m}}$ be flat, but merely that $\ideal{m}^2=\ideal{m}$, in contrast to \textit{loc. cit}. \begin{prop}Let $M$ be an $A$-module. \begin{enumerate}[(i)] \item $M$ is almost finitely generated if and only if for every generated ideal $\ideal{m}_0 \subset \ideal{m}$ there exists a finitely generated submodule $M_0 \subset M$ such that $\ideal{m}_0M \subset M_0$. \item The following are equivalent: \begin{enumerate}[(a)] \item $M$ is almost finitely presented \item for arbitrary $\epsilon, \delta \in \ideal{m}$, there exists positive integers $n = n(\epsilon), m = m(\epsilon)$ and a three term complex \[\begin{tikzcd} A^m \arrow[r, "\psi_{\epsilon}"] & A^n \arrow[r, "\phi_{\epsilon}"] & M \end{tikzcd}\] with $\epsilon \cdot \coker(\phi_{\epsilon})=0$ and $\delta \cdot \ker(\phi_{\epsilon}) \subset \im(\psi_{\epsilon})$. \item For every finitely generated ideal $\ideal{m}_0 \subset \ideal{m}$, there is a complex \[\begin{tikzcd} A^m \arrow[r, "\psi"] & A^n \arrow[r, "\phi"] & M \end{tikzcd}\] with $\ideal{m}_0 \cdot \coker(\phi)=0$ and $\ideal{m}_0 \cdot \ker(\phi) \subset \im(\psi)$ \end{enumerate} \end{enumerate} \end{prop} \begin{proof} See \cite[Proposition 2.3.10]{gabram}. \end{proof} \begin{lem} Let $M$ be an almost finitely generated $A$-module and $B$ a flat $A$-algebra. Then $\text{Ann}_B(B \otimes_A M) \simeq B \otimes_A \text{Ann}_A(M)$. \end{lem} \begin{proof} See~\cite[Lemma 2.4.6]{gabram}. \end{proof} \begin{lem}\label{lem:almprojsplit} Let $M$ be an almost finitely generated $A$-module, $A$ an $R^a$-module. Then $M$ is almost finitely projective if and only if, for arbitrary $\epsilon \in \ideal{m}$, there exists $n(\epsilon) \in \mathbb{N}$ and $A$-linear morphisms \[\begin{tikzcd}M \arrow[r, "u_{\epsilon}"] & A^{n(\epsilon)} \arrow[r, "v_{\epsilon}"] & M \end{tikzcd}\] such that $v_{\epsilon} \circ u_{\epsilon} = \epsilon \cdot 1_{M}$. \end{lem} \begin{proof} See~\cite[Lemma 2.4.15]{gabram}. \end{proof} We have the following: \begin{thm}\label{thm:presented} Let $A$ be an $R^a$-algebra. \begin{enumerate}[(i)] \item Every almost finitely generated projective $A$-module is almost finitely presented. \item Every almost finitely presented flat $A$-module is almost projective. \end{enumerate} \end{thm} \begin{proof} See~\cite[Proposition 2.4.18]{gabram}. \end{proof} We finish with some definitions: \begin{definition}\label{def:weaketale} \begin{enumerate} \item We say that $\phi$ is flat (resp. fairthfully flat, resp. almost projective) if $B$ is a flat (resp. faithfully flat, resp. almost projective) A-module. \item We say that $\phi$ is almost finite (resp. finite) if $B$ is almost finitely generated (resp. finitely generated) as an $A$-module. \item We say that $\phi$ is weakly unramified (resp. unramified) if $B$ is a flat (resp. almost projective) $B \otimes_A B$-module (via the morphism $\mu_{B/A}$). \item $\phi$ is weakly \'{e}tale (resp. \'{e}tale) if it is flat and weakly unramified (resp. unramified). \end{enumerate} \end{definition} \begin{prop}\label{prop:assortlem} Let $\phi: A \to B$ and $\psi: B \to C$ be morphisms of almost algebras. \begin{enumerate}[(i)] \item (Base change) Let $A \to A'$ be any morphism of $R^a$-algebras; if $\phi$ is flat (rep. almost projective, resp. faithfully flat, resp. almost finite, resp. weakly unramified, resp. unramified, resp. weakly \'{e}tale, resp. \'{e}tale), then the same holds for $\phi \otimes_A 1_{A'}$. \item (Composition) If both $\phi, \psi$ are flat (resp. almost projective ...), then so is $\psi \circ \phi$. \item If $\phi$ is flat, and $\psi \circ \phi$ is faithfully flat, then $\phi$ is faithfully flat. \item If $\phi$ is weakly unramified and $\psi \circ \phi$ is flat (resp. weakly \'{e}tale), then $\psi$ is flat (resp. weakly \'{e}tale). \item If $\phi$ is unramified and $\psi \circ \phi$ is \'{e}tale, then $\psi$ is \'{e}tale. \item $\phi$ is faithfully flat if and only if it is a monomorphism and $B/A$ is a flat $A$-module. \item $\phi$ is almost finite and weakly unramified, then $\phi$ is unramified \item If $\psi$ is faithfully flat and $\psi \circ \phi$ is flat (resp. weakly unramified), then $\phi$ is flat (resp. weakly unramified). \end{enumerate} \end{prop} \begin{proof} We detail the proof of (viii) as will need this later on, for the rest see \cite[Lemma 3.1.2.]{gabram}.\\ \indent (viii): If $\psi \circ \phi$ is flat, then we conclude by noting $-\otimes_A C = (-\otimes_A B)\otimes_{B} C$. As a consequence, we have that if $D$ is an $A$-algebra such that $A \to D$ is faithfully flat, then $B \to C$ is flat if and only if $B \otimes_A D \to C \otimes_A D$ is flat (we use (i) also). \\ \indent Assume $A \to C$ is weakly unramified, or indeed that $C \otimes_A C \to C$ is flat. Note that showing $B \otimes_A B \to B$ is flat is equivalent to showing that $B \otimes_A C \to C$ is flat, but by (i) and our supposition, we know that each map in the composition $B \otimes_A C \to C \otimes_A C \to C$ is flat, hence conclude by (ii). \end{proof} The following is an expected outcome of the definitions is crucial for applications to \textit{almost purity}, see Theorem~\ref{thm:almpurcharp}. \begin{prop} A morphism $\phi: A \to B$ is unramified if and only if there exists an almost element $e_{B/A} \in (B \otimes_A B)_*$ such that \begin{enumerate}[(i)] \item $e_{B/A}^2 = e_{B/A}$ \item $(\mu_{B/A})_*(e_{B/A})=1_{B}$ \item $x \cdot e_{B/A} = 0 $ for all $x \in I_{B_*/A_*}$ \end{enumerate} \end{prop} \begin{proof} See \cite[Proposition 3.1.4]{gabram}. \end{proof} In what follows, we will call $B$ being almost finitely presented \'{e}tale if $B$ is an almost finitely presented $A$-module in addition to being \'{e}tale over $A$. \begin{thm}\label{thm:frobpushout} Let $f: A \to B$ be a weakly \'{e}tale morphism of $\mathbb{F}_p$-almost algebras. Then the diagram: \[\begin{tikzcd} B \arrow[r, "\Phi_B"] & B\\ A \arrow[u, "f"] \arrow[r, "\Phi_A"] & A \arrow[u, "f"] \end{tikzcd}\] is a push-out. In other words, the canonical morphism $g: B \otimes_{A, \Phi_A} A \to B_{\Phi_B}$ is an isomorphism of $A$-modules. \end{thm} \begin{proof} One replicates the proof~\cite[Theorem 3.5.13]{gabram}, noting that it doesn't need the flatness conditions on $\tilde{\ideal{m}}$ that was assumed from the begining in Section 3.5, but rather that $\tilde{\ideal{m}}$ satisfies Condition (B), as we have assumed. \end{proof} \subsection{Descent} In what follows, we have collected various results in \cite{gabram} necessary for us. Apart from some slight modifications in various proofs, the main contribution is the linearity in exposition.\\ \indent Given a morphism of almost $R^a$-algebras $A \to B$, we would like to ask under what conditions information about an $A$-module $M$ can be extracted from $M_B := M\otimes_A B$ as a $B$-module. We have the following: \begin{thm}\label{thm:descent1} Suppose that the morphism $A \to B$ satisfies the following property: There exists an integer $m\ge 0$ such that for any $A$-module $N$, $\text{Ann}_{A}(N_B)^m \subset \text{Ann}_{A}(N)$. (*) Then for an $A$-module $M$: \begin{enumerate}[(i)] \item If $M_B$ is an almost finitely generated $B$-module, then $M$ is an almost finitely generated $A$-module. \item If $\tor{A}{1}{B}{M}=0$ and $M_B$ is almost finitely presented over $B$, then $M$ is almost finitely presented over $A$. \end{enumerate} \end{thm} The condition (*) imposed on $M$ at first glance might seem rather unnatural, but notice $M \otimes_A B \simeq 0 \Rightarrow A = \text{Ann}_{A}(M_B)^m \subseteq \text{Ann}_{A}(M) \subseteq A$ so $M \simeq 0$. Since we're in the almost setting, the condition (*) could be viewed as a modification of the condition that $M_B \simeq 0 \Longleftrightarrow M \simeq 0$ to deal with the `limiting' arguments that arise with almost finitely generated/almost finitely presented objects. The above discussion leads to: \begin{cor}\label{cor:ffdescent} Let $A \to B$ is faithfully flat and $M$ be an $A$-module. \begin{enumerate}[(i)] \item If $M_B$ is almost finitely generated or almost finitely presented over $B$, then $M$ has the same property over $A$. \item If $M_B$ is almost finitely generated projective over $B$, then $M$ has the same property over $A$. \item If $A \to B$ is also almost finitely presented as an $A$-module, then if $M_B$ is almost projective, $M$ has the same property over $A$. \end{enumerate} \end{cor} \begin{proof} \begin{enumerate}[(i)] \item Follows from Theorem~\ref{thm:descent1} and the previous discussion. \item From Theorem~\ref{thm:presented}, one concludes that $M_B$ is finitely presented, so from (i), $M$ is too. But $M$ is by definition flat, so by (ii) of the same theorem, $M$ is almost projective. \item We know that $B$ is therefore almost finitely generated projective, so \cite[Lemma 2.4.31]{gabram} applies, which says that $B \otimes_A \alhom{A}{M}{N} \to \alhom{B}{B\otimes_A M}{ B \otimes_A N}$ is an isomorphism. Due to the faithful flatness of $B$, the proposition is evident. \end{enumerate} \end{proof} For morphisms satisfying the descent condition (*) we have the following bounds: \begin{lem}\label{lem:boundkercoker} Assume that condition (*) holds. For any $A$-linear morphism $\phi: M \to N$, set $\phi_B := \phi \otimes_A 1_B$; then: \begin{enumerate}[(i)] \item $\text{Ann}_A(\coker(\phi_B))^m \subset \text{Ann}_A(\coker(\phi))$. \item $(\text{Ann}_A(\ker(\phi_B)) \cdot \text{Ann}_A(\tor{1}{A}{B}{N}) \cdot \text{Ann}_A(\coker(\phi)))^m \subset \text{Ann}_A(\ker(\phi))$ \end{enumerate} \end{lem} \begin{proof} Our proof here is essentially part of the proof given in \cite[Lemma 3.2.23]{gabram}, but we include it for convenience.\\ \indent Let $\mathscr{C}$ be the complex $M \stackrel{\phi}{\longleftarrow} N$ concentrated in degrees $0$ and $1$ and consider the complex $\mathscr{C} \otimes_A B$. We have two converging spectral sequences: \[E_{p,q}^{2,h} := \tor{q}{A}{H_{p}(\mathscr{C})}{B} \Rightarrow H_{p+q}(\text{Tot}(\mathscr{C} \otimes_A B)),\] \[ E_{p,q}^{1,v} := \tor{q}{A}{\mathscr{C}_p}{B} \Rightarrow H_{p+q}(\text{Tot}(\mathscr{C} \otimes_A B)).\] We immediately derive that $\coker(\phi_B)=E_{0,0}^{\infty,v} = E_{0,0}^{\infty,h} = \coker(\phi)\otimes_A B$ and the two exact sequences: \[\tor{2}{A}{\coker(\phi)}{B} \to \ker(\phi) \otimes_A B \to H_1(\text{Tot}(\mathscr{C} \otimes_A B)),\] \[\coker(\tor{1}{A}{M}{B} \to \tor{1}{A}{N}{B}) \to H_1(\text{Tot}(\mathscr{C} \otimes_A B)) \to \ker(\phi_B).\] So we see that $\text{Ann}_A(\coker(\phi_B))^m = \text{Ann}_A(\coker(\phi)\otimes_A B)^m \subset \text{Ann}_A(\coker(\phi))$ and from the two exact sequences: \[(\text{Ann}_A(\ker(\phi_B)) \cdot \text{Ann}_A(\tor{1}{A}{B}{N}) \cdot \text{Ann}_A(\coker(\phi)))^m \subset \text{Ann}_A(\ker(\phi) \otimes_A B)^m \subset \text{Ann}_A(\ker(\phi)).\] \end{proof} Moreover, (*) comes up rather naturally in usual ring theory, and we discuss the almost variant: \begin{lem}\label{lem:nilpotent} Let $\phi: A \to B$ be a morphism of almost algebras that is finite with nilpotent kernel. Then, $\phi$ has property (*). \end{lem} \begin{proof} This is essentially a reworded version of \cite[Lemma 3.2.23]{gabram}. First we recall the lemma used there: \cite[Lemma 3.2.21]{gabram} \begin{lem} Let $R$ be a ring, $M$ a finitely generated $R$-module such that $\text{Ann}_R(M)$ is a nilpotent ideal. Then $R$ admits a filtration $0 = J_0 \subset ... \subset J_m = R$ such that each $J_{i+1}/J_i$ is a quotient of a direct sum of copies of $M$. \end{lem} We want to apply the lemma upon applying $(-)_*$ to the given situation, but it isn't necessarily true that $B_*$ is still a finitely generated $A_*$-module; nevertheless, we may find a finitely generated $A_*$-module $Q$ such that $\ideal{m} \cdot B_* \subset Q \subset B_*$ and that $Q$ satisfies the condition of the lemma (see Remark~\ref{rem:initrem} (i) and (iv)). We thus know that there is a filtation $0 = J_0 \subset ... \subset J_m = A_*$ such that each quotient $J_{i+1}/J_i$ is a quotient of a direct sum of copies of $Q$. So, as $Q \hookrightarrow B_*$ is an almost isomorphism: \[\text{Ann}_{A}(M \otimes_A B) = \text{Ann}_{A}(M \otimes_A Q^a) \subset \text{Ann}_A(M \otimes_A (J_{i+1}/J_{i})^a)\] for each $i=0,1,...,m-1$, and therefore: \[\text{Ann}_{A}(M \otimes_A B)^m \subset \prod_{i=0}^{m-1}\text{Ann}_A(M \otimes_A (J_{i+1}/J_{i})^a)\subset \text{Ann}_A(M).\] \end{proof} \begin{cor}\label{cor:nildescent} Assume in addition that $A \to B$ is an epimorphism and $M$ is flat, then $M_B$ being almost projective over $B$ implies that $M$ is almost projective over $A$. \end{cor} \begin{proof} We must show that $\alext{p}{A}{M}{N} =0$ for all $A$-modules $N$. Let $I$ be the kernel of $A \to B$; by the assumption, $I$ is nilpotent, so by usual devissage arguments one can can reduce to the case that $IN=0$ and therefore $N$ is a $B$-module. We then have a spectral sequence: \[\alext{p}{B}{\tor{q}{A}{M}{B}}{N} \Rightarrow \alext{p+q}{A}{M}{N},\footnote{Here, $M$ is an $A$-module and $N$ is a $B$-module.}\] which gives us the isomorphism $\alext{p}{B}{M \otimes_A B}{N} \simeq \alext{p}{A}{M}{N}$, so the result follows. \end{proof} \begin{proof}[Proof of Lemma~\ref{thm:descent1}] See~\cite[Lemma 3.2.25]{gabram}. \end{proof} While Theorem~\ref{thm:descent1} provides a very general framework for when we can descend almost finite and almost finitely presented modules, we would like a set-up that descends almost projective modules. Consider the following set-up: \[\begin{tikzcd} A_0 \arrow[r, "f_2"] \arrow[d, "f_1"]& A_2 \arrow[d, "g_2"] \\ A_1 \arrow[r, "g_1"] & A_3 \end{tikzcd}\] such that the square is cartesian and one of the morphisms $g_1, g_2$ is an epimorphism. Geometrically, one may view $\spec{A_0}$ as being formed by gluing $\spec{A_2}$ and $\spec{A_1}$ along a closed subscheme of one of the two; we call the diagram a \textit{gluing diagram}. Note that we have a corresponding essentially commutative diagram: \begin{equation}\label{eq:descenteq}\begin{tikzcd} A_0\text{-Mod} \arrow[r, "f_{2*}"] \arrow[d, "f_{1*}"]& A_2\text{-Mod} \arrow[d, "g_{2*}"] \\ A_1\text{-Mod} \arrow[r, "g_{1*}"] & A_3\text{-Mod} \end{tikzcd}\end{equation} where the subscripts denotes the extension of scalars functors. The theorem we want to prove is: \cite[Proposition 3.4.21]{gabram} \begin{thm}\label{thm:descent2} The above diagram is $2$-cartesian on the subcategory of almost projective modules and flat modules. \end{thm} We define the category of $\cal{D}$-modules as the $2$-fibre product $\cal{D}\text{-Mod} := A_{1}\text{-Mod} \times_{A_3\text{-Mod}} A_2\text{-Mod}$. In particular, the objects of this category are triples $(M_1,M_2, \xi)$ where $M_i$ is an $A_i$-module ($i=1,2$) and $\xi: A_3 \otimes_{A_1} M_1 \to A_{3} \otimes_{A_2} M_2$ is an $A_3$-linear isomorphism. We have a natural functor: \[\pi: A_0\text{-Mod} \to \cal{D}\text{-Mod}\] which takes a module $M_0 \in A_0\text{-Mod}$ and takes it to the triple $(A_1 \otimes_{A_0} M_0, A_2 \otimes_{A_0} M_0, \xi_{M_0})$ where $\xi_{M_0}: A_3 \otimes_{A_1}(A_1 \otimes_{A_0} M_0) \to A_3 \otimes_{A_2}(A_2 \otimes_{A_0} M_0)$ is a natural isomorphism arising from the commutativity $g_{1} \circ f_{1} = g_2 \circ f_2$. It's clear what this functor thus does on morphisms. We set $\cal{D}\text{-Mod}_{\text{fl}} := A_{1}\text{-Mod}_{\text{fl}} \times_{A_3\text{-Mod}_{\text{fl}}} A_2\text{-Mod}_{\text{fl}}$ and $\cal{D}\text{-Mod}_{\text{apr}} := A_{1}\text{-Mod}_{\text{apr}} \times_{A_3\text{-Mod}_{\text{apr}}} A_2\text{-Mod}_{\text{apr}}$ for the fibered product of the sub-categories of flat and almost projective modules, respectively.\\ \indent We can also construct a functor going in the other way. Indeed, given an object $(M_1, M_2, \xi)$ in $\cal{D}\text{-Mod}$, set $M_3 := A_3 \otimes_{A_2} M_2$; we have natural morphisms $M_2 \to M_3$ and $M_1 \to A_3 \otimes_{A_1} M_1 \stackrel{\xi}{\to} M_3$, so we can form the fibered product $T(M_1, M_2, \xi) := M_1 \times_{M_3} M_2$. One thus obtains a functor $T: \cal{D}\text{-Mod} \to A_0\text{-Mod}$ that is right adjoint to $\pi$, which follows from the universal property of fibered products and the natural isomorphism $\hom{A}{M}{N} \simeq \hom{B}{B \otimes_A M}{N}$ where $A \to B$ is a morphism of (almost) algebras, $M$ is an $A$-module and $N$ is a $B$-module. We set $\varepsilon: 1_{A_0\text{-Mod}} \to T \circ \pi$ and $\eta: \pi \circ T \to 1_{\cal{D}\text{-Mod}}$ to be the unit and counit of the adjunction, respectively. The next lemma is a general property of adjunctions. \begin{lem}\label{lem:adj} The functor $\pi$ induces an equivalence of full subcategories: \[\{X \in \text{Ob}(A_{0}\text{-Mod}) | \varepsilon_X \text{ is an isomorphism}\} \to \{Y \in \text{Ob}(\cal{D}\text{-Mod}) | \eta_Y \text{ is an isomorphism}\}.\] \end{lem} \begin{lem}\label{lem:descprop1} Let $M$ be any $A_0$-module. Then, $\varepsilon_M$ is an epimorphism with kernel $\im(\tor{1}{A_0}{M}{A_3} \to M)$. Thus if $\tor{1}{A_0}{M}{A_3}=0$, then $\varepsilon_M$ is an isomorphism. \end{lem} \begin{proof} See \cite[Lemma 3.4.10]{gabram}. \end{proof} \begin{lem}\label{lem:descprop2} $\eta_{(M_1,M_2,\xi)}$ is an isomorphism for all objects $(M_1,M_2, \xi)$ in $\cal{D}$-Mod. \end{lem} \begin{proof} See \cite[Lemma 3.4.14]{gabram}. \end{proof} Our first goal is to prove Theorem~\ref{thm:descent2} in the case of flat modules. We discuss the next series of lemmas in preparation for this. \begin{rem}\label{rem:flatdescover} For us, Lemmas~\ref{lem:descprop1} and \ref{lem:descprop2} will be very important; roughly speaking, they say that we can identify a flat module $M$ over $A_0$ via its `parts', and vice versa. Combining this with Lemma~\ref{lem:adj}, we reduce proving Theorem~\ref{thm:descent2}, in the case of flat modules, to showing that $T$ restricts to a functor $\cal{D}\text{-Mod}_{\text{fl}} \to A_0\text{-Mod}_{\text{fl}}$; likewise for almost projective modules. \end{rem} \begin{lem}\label{lem:descreltor} Let $M$ be any $A_0$-module and $n\ge 1$ an integer. The following conditions are equivalent: \begin{enumerate}[(i)] \item $\tor{j}{A_0}{M}{A_i} = 0$ for every $j \le n$ and $i=1,2,3.$ \item $\tor{j}{A_i}{A_{i} \otimes_{A_0} M}{A_3}=0$ for every $j \le n$ and $i=1,2$. \end{enumerate} \end{lem} \begin{proof} See~\cite[Lemma 3.4.15]{gabram}. \end{proof} The next lemma, \cite[Lemma 3.4.18]{gabram}, is crucial, and we reproduce the proof for the convenience of the reader. \begin{lem}\label{lem:flatdesc} Let $M$ be any $A_0$-module. We have: \begin{enumerate}[(i)] \item The map $A_0 \to A_1 \times A_2$ fulfils condition (*) i.e. $\text{Ann}_{A_0}( M \otimes_{A_0} A_1) \cdot \text{Ann}_{A_0}(M \otimes_{A_0} A_2) \subset \text{Ann}_{A_0}(M)$. \item $M$ admits a three-step filtration $0 \subset F_0(M) \subset F_1(M) \subset F_2(M) = M$ such that $F_0(M)$ and $\text{gr}_2(M)$ are $A_2$-modules and $\text{gr}_1(M)$ is an $A_1$-module. \item If $(A_1 \times A_2) \otimes_{A_0} M$ is flat over $A_1 \times A_2$, then $M$ is flat over $A_0$. \end{enumerate} \end{lem} \begin{proof}Fix $I$ to be the common kernel of $A_1 \to A_3$ and $A_0 \to A_2$. \begin{enumerate}[(i)] \item We note first that $\text{Ann}_{A_0}(M \otimes_{A_0} A_2) \cdot M \subset IM$. Next, have the composition $I \otimes_{A_1} (A_{1} \otimes_{A_0} M) \to I \otimes_{A_0} M \to IM$ is an epimorphism, thus $\text{Ann}_{A_0}( M \otimes_{A_0} A_1) \cdot IM = 0$. Hence, $\text{Ann}_{A_0}( M \otimes_{A_0} A_1) \cdot \text{Ann}_{A_0}(M \otimes_{A_0} A_2) \subset \text{Ann}_{A_0}(M)$, so $\text{Ann}_{A_0}( M \otimes_{A_0} (A_1 \times A_2))^2 \subset \text{Ann}_{A_0}(M)$. \item Set $F_0(M) := \ker(\varepsilon_M)$. By Lemma~\ref{lem:descprop1}, we derive that $F_0(M) \simeq \im(\tor{1}{A_0}{M}{A_3} \to M)$, which is killed by $I$ and therefore is an $A_0/I \simeq A_2$-module. We then set $F_1(M) := \varepsilon_M^{-1}(M \otimes_{A_0} A_1)$ and it is clear that this results in a valid filtration. \item We are given that $M \otimes_{A_0} A_i$ is flat over $A_i$ for $i=1,2$, and too, by Lemma~\ref{lem:descreltor}, that $\tor{1}{A_0}{M}{A_i} = 0$ for $i=1,2,3$. The result now follows from Theorem~\ref{thm:genlocflat}. \end{enumerate} \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:descent2}] In the case of flat modules, combine Remark~\ref{rem:flatdescover} and Lemma~\ref{lem:flatdesc}. Again, by Remark~\ref{rem:flatdescover}, we reduce to proving that if $A_i \otimes_{A_0} M$ is almost projective over $A_i$ for $i=1,2$, then $M$ is too over $A_0$. We already know that $M$ is flat over $A_0$, so for any $A_i$-module $N$, we have $\alext{1}{A_i}{A_i \otimes_{A_0} M}{N} = \alext{1}{A_0}{M}{N}$, as one can check by using the spectral sequence: \[\alext{p}{B}{\tor{q}{A}{M}{B}}{N} \Rightarrow \alext{p+q}{A}{M}{N}.\] This extends to all $A_0$-modules $N$ via Lemma~\ref{lem:filtorext}, so we're done. \end{proof} \begin{cor}\label{cor:descent3} In the situation of (\ref{eq:descenteq}), the diagram is $2$-cartesian upon restricting to the sub-categories $\text{Alg}_{\text{fl}}$, $\text{\'{E}t}$ (i.e. \'{e}tale algebras) $\text{w.\'{E}t}$ (i.e. weakly \'{e}tale algebas), $\text{Alg}_{\text{afgfl}}$ (i.e. almost finitely generated flat algebras), $\text{Alg}_{\text{afpfl}}$ (i.e. almost finitely presented flat algebras), $\text{\'{E}t}_{\text{afp}}$ (i.e. almost finitely presented \'{e}tale algebras\textemdash these we will call the finite \'{e}tale algebras in the almost category). \end{cor} \begin{proof} The first three follow from Theorem~\ref{thm:descent2} upon ensuring that $\pi$ and $T$ take algebras to algebras. Indeed, the claim for $\text{Alg}_{\text{fl}}$, and for the others, one further utilises that $\mu_{B_i/A_i} = \mu_{B_0/A_0} \otimes_{A_0} 1_{A_i}$ where $B_0$ is an $A_0$-algebra and $B_i := B_0 \otimes_{A_0} A_i$. For $\text{Alg}_{\text{afgfl}}$, one concludes via Theorem~\ref{thm:descent1} and Lemma~\ref{lem:flatdesc} (i). For $\text{Alg}_{\text{afpfl}}$, suppose $M$ is an $A_0$-algebra such that $A_i \otimes_{A_0} M$ that is both flat and almost finitely presented over $A_i$ for $i=1,2$. We know that $M$ is at the very least flat, but by Theorem~\ref{thm:descent1} and Lemma~\ref{lem:flatdesc} (i), this is enough to conclude that it is also almost finitely presented. $\text{\'{E}t}_{\text{afp}}$ follows similarly. \end{proof} \section{Almost Witt vectors and purity} Suppose we are given an almost set-up $(R, \ideal{m})$ that satisfies Condition (B). Our first question that we hope to answer concerns the lifting of almost set-ups to finite length Witt vectors.\footnote{It was told to the author that some of these results have been published by Gabber and Ramero in their sequel, \textit{Foundations of Almost Ring Theory}. To rectify the situation, in proofs where there was minor overlap, we have chosen to refer the reader to their book.} To fix notation, we set $W_0(A) := A$ as our indexing convention, $\omega_i$ to be the Witt polynomial of degree $p^i$, and $F$ the Witt-vector Frobenius. Before proceeding, we begin with a lemma, \begin{lem}\label{lem:powersubset} Let $R$ be a ring with a ideal $I \subset R$ satisfying Condition (B). Suppose that we have a subset $S \subset I$ of elements for which $I = \sum_{x \in S} Rx$, and that $S$ is both additively and multiplicatively closed. Then, $I = \sum_{x \in S} Rx^k$ for any $k > 1$. \end{lem} \begin{proof} One replicates the argument given in \cite[Claim 2.1.9]{gabram}. \end{proof} \begin{lem}\label{lem:nilpotentlift} Let $f: S \to R$ be a surjective map of rings with nilpotent kernel. Then there is a unique ideal lifting $\ideal{m}_S$ of $\ideal{m}$ such that $\ideal{m}_S^2 = \ideal{m}_S$. Furthermore, $\ideal{m}_S$ satisfies Condition (B) if $\ideal{m}$ does. \end{lem} \begin{proof} See~\cite[Lemma 14.7.1]{gabram2}. \end{proof} The following theorem will be used to define a `unique' almost set-up on finite length Witt vectors. \begin{thm}\label{thm:wittlift} Let $(R, \ideal{m})$ be an almost set-up, with $\ideal{m}$ satsifying Condition (B). Then there exist ideals $\ideal{m}_n \subset W_n(R)$ such that: \begin{enumerate}[(i)] \item $\ideal{m}_n^2 = \ideal{m}_n$, and hence $(W_n(R), \ideal{m}_n)$ is too an almost set-up. \item $\omega_n(\ideal{m}_n)R = \ideal{m}$ and $\text{pr}_n(\ideal{m}_n) = \ideal{m}_{n-1}$ where $\text{pr}_n: W_n(R) \to W_{n-1}(R)$ is the canonical projection. \end{enumerate} \end{thm} We have two maps $\omega_n: W_{n}(R) \to R$ and $\text{pr}_{n}: W_n(R) \twoheadrightarrow W_{n-1}(R)$. Denote by $\alpha_n$ the induced map $W_n(R) \to W_{n-1}(R) \times R$, and let $\overline{W_n}(R):= \im(\alpha_n)$. We have a gluing diagram: \begin{equation}\begin{tikzcd} \overline{W_n}(R) \arrow[r, "\text{pr}_n"] \arrow[d, "\omega'_n"] & W_{n-1}(R)\arrow[d, "\overline{\omega_n}"] \\ R \arrow[r, twoheadrightarrow, "\pi"] &R/p^n \end{tikzcd}\end{equation} One may check the kernel $I_R := \ker(\alpha_n)$ is square zero i.e. $I_R^2 = 0$. \cite[Proposition 8.1 (b)]{borger2015basic} Our first step to proving Theorem~\ref{thm:wittlift} is to lift $\ideal{m}$ to $\overline{W_n}(R)$. To do so, we work more broadly. \begin{thm}\label{thm:gluinglift} Suppose we are given a gluing diagram: \[\begin{tikzcd} A_0 \arrow[r, "f_1"] \arrow[d, "f_2"] & A_1 \arrow[d, "g_1"]\\ A_2 \arrow[r, "g_2"] & A_3 \end{tikzcd}\] with $g_2$ (and hence $f_1$) surjective, and ideals $\ideal{m}_1 \subset A_1$, $\ideal{m}_2 \subset A_2$ such that they agree over $A_3$ i.e. $g_1(\ideal{m}_1) A_3 = g_2(\ideal{m}_2)$. Then there exists a unique ideal $\ideal{m} \subset A_0$ with the following properties: \begin{enumerate}[(i)] \item $\ideal{m}^2 = \ideal{m}$, and hence $(A_0, \ideal{m})$ is an almost set-up. \item $f_1(\ideal{m})A_1 = \ideal{m}_1$ and $f_2(\ideal{m})A_2 = \ideal{m}_2$. \end{enumerate} Furthermore, if both $\ideal{m}_1$ and $\ideal{m}_2$ satisfy Condition (B), then so does $\ideal{m}$. \end{thm} \begin{proof} For the proofs we refer the reader to \cite[Proposition 14.7.5]{gabram2}. We also present a slightly different approach to the last statement: it suffices to show that the ideals $\ideal{m}'_{k} \subset \ideal{m}$ generated by the $k^{\text{th}}$-powers of $\ideal{m}$ satisfies condition (ii). It is indeed clear that $f_1(\ideal{m}'_{k}) = \ideal{m}_1$ because $\ideal{m}_1$ has Condition (B). Since $f_2(\ideal{m})A_2 = \ideal{m}_2$, by Lemma~\ref{lem:powersubset}, it is also clear that $f_2(\ideal{m}'_k)A_2 = \ideal{m}_2$. \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:wittlift}]We proceed by induction on $n$, where the base case $n=0$ is tautological. Now, assuming the existence of such lift $\ideal{m}_{n-1} \subset W_{n-1}(R)$, we shall construct $\ideal{m}_n$. Recall the gluing diagram: \[\begin{tikzcd} \overline{W_n}(R) \arrow[r, "\text{pr}_n'"] \arrow[d, "\omega'_n"] & W_{n-1}(R)\arrow[d, "\overline{\omega_n}"] \\ R \arrow[r, twoheadrightarrow, "\pi"] &R/p^n \end{tikzcd}\] We want to show that $\pi(\ideal{m}) R/p^n = \overline{\omega_n}(\ideal{m}_{n-1}) R/p^n$. Their reductions modulo $p$ are equal because $\ideal{m}_{n-1}$ satisfies Condition (B). Since both ideals are idempotent, and $p$ is nilpotent in $R/p^n$, we conclude equality in $R/p^n$ by Lemma~\ref{lem:nilpotentlift}.\\ \indent We can then apply Theorem~\ref{thm:gluinglift} to obtain an ideal $\overline{\ideal{m}_{n}}$ such that $\omega_n(\overline{\ideal{m}_n})R = \ideal{m}$ and $\text{pr}_n(\overline{\ideal{m}_n}) = \ideal{m}_{n-1}$. Since $I_R$ is nilpotent, the ideal $\overline{\ideal{m}_n} \subset \overline{W_n}(R)$ lifts to an ideal $\ideal{m}_n \subset W_n(R)$ satisfying the desired conditions. \end{proof} All in all, given an almost set-up $(R, \ideal{m})$, we have constructed almost set-ups $(W_n(R), \ideal{m}_n)$. We want to show that $W_n$ preserves almost isomorphisms. Given $R$-algebras $A, B$ and an almost isomorphism $f: A \to B$, we hope the kernel and cokernel of $W_nf$ are killed by $\ideal{m}_{n}$. For the kernel, by induction, this reduces to $V_n(\ker(f))/V_{n+1}(\ker(f)) \simeq \ker(f)$ being killed by $\omega_n(\ideal{m}_n) = \ideal{m}$; clear. A similar argument applies for the cokernel, and therefore, $W_n$ becomes a well defined endofunctor in the category of $\cal{B}^a$-Alg (notation of \ref{fibalm}). We also note that $W_n(A) = W_n(A_*)^a$, which will be used throughout this section without further notice. \begin{rem}If we were primarily interested in ideals such as $\ideal{m} = (f^{1/p^{\infty}})$, then the whole discussion above simplifies greatly. \end{rem} \indent Condition (B) gives $\omega_i(\ideal{m}_n) A = \ideal{m}$ and $F(\ideal{m}_n) A= \ideal{m}$, so the morphisms are defined in $\cal{B}$ and give natural transformations between $W_n(-)$ and the identity, $W_{n-1}(-)$ respectively. These results allow us to consider these morphisms in the almost category, and they behave as expected. For a morphism of rings $f: A \to B$ and a $B$-module $M$, we will use the notation $f_*(M)$ for when we want to view $M$ as an $A$-module. For an arbitrary $R^a$-algebra $A$, set $A_0:=A/pA$, and for $\mathbb{F}_p$-algebras $A_0 \to B_0$, let $\Phi_{B_0/A_0}$ denote the relative Frobenius. For an $A$-algebra $B$, set $V_n(B):=V_n(B_*)^a$, where it is straight forward to see the isomorphism $V_i(A)/V_{i+1}(A) \simeq \omega_{i*}(A)$. \begin{thm}\label{thm:wittdescent} Let $A \to B$ be a flat morphism of $R^a$-algebras, and suppose that $\Phi_{B_0/A_0}$ is an isomorphism. Then, $W_n(f): W_n(A) \to W_n(B)$ is flat and $\omega_{i*}(B) \simeq W_n(B) \otimes_{W_n(A)} \omega_{i*}(A)$ for each $0 \le i \le n$. \end{thm} Let us examine the immediate effects of this: \begin{cor} Suppose $f: A \to B$ is weakly \'{e}tale, then $W_n(B) \otimes_{W_n(A)} W_n(C) \simeq W_n(B \otimes_A C)$ for $C$ an $A$-algebra, and $W_n(f): W_n(A) \to W_n(B)$ is weakly \'{e}tale. \end{cor} \begin{proof} Note since $A \to B$ is weakly \'{e}tale, therefore so is $A_0 \to B_0$, and in particular $\Phi_{B_0/A_0}$ is an isomorphism. To prove the first proposition, by considering the exact sequences $0 \to V_{i+1}(C) \to V_i(C) \to w_{i*}(C) \to 0$, we reduce by induction to proving the following isomorphism of $W_n(A)$-modules: \[W_n(B) \otimes_{W_n(A)} \omega_{i*}(C) \simeq \omega_{i*}(B) \otimes_{\omega_{i*}(A)} \omega_{i*}(C).\] Noting that $\omega_{i*}(C)$ is an $\omega_{i*}(A)$-module, by base-change this reduces to the second part of Theorem~\ref{thm:wittdescent}. To prove $W_n(f)$ is weakly \'{e}tale, we have that the multiplication map $\mu_B: B \otimes_A B \to B$ is flat, but in particular also weakly \'{e}tale too. Therefore by the first part of this proposition, $W_n(\mu_B): W_n(B) \otimes_{W_n(A)} W_n(B) \to W_n(B)$ is flat, and the conclusion follows. \end{proof} \begin{cor}\label{cor:wittpushoutfrob} Assume the conditions of Theorem~\ref{thm:wittdescent}; the diagram: \[\begin{tikzcd}W_n(A) \arrow[r, "F"] \arrow[d, "W_n f"] & W_{n-1}(A) \arrow[d, "W_{n-1}f"] \\ W_n(B) \arrow[r, "F"] & W_{n-1}(B) \end{tikzcd}\] is cocartesian i.e. there exists an isomorphism $W_n(B) \otimes_{W_n(A)} W_{n-1}(A) \to W_{n-1}(B)$ of $W_{n-1}(A)$-modules. \end{cor} \begin{proof} We proceed via induction on $n$; the base case where $n=1$ asserts the isomorphism $W_1(B) \otimes_{W_1(A), F} A$, which reduces to the second part of Theorem~\ref{thm:wittdescent} by noticing that $F$ and $w_1$ coincide as maps from $W_1(A)$ to $A$. Now, consider the exact sequence of $W_{n+1}(A)$-modules: \[\begin{tikzcd} 0 \arrow[r] & V_{n}(A) \arrow[r] & W_{n}(A) \arrow[r, "\text{pr}_n"] & W_{n-1}(A) \arrow[r] & 0 \end{tikzcd}\] The action on the middle module is via $F: W_{n+1}(A) \to W_{n}(A)$, and the action on the right module is via $F \circ \text{pr}_{n+1} = \text{pr}_{n} \circ F$. The induced action on the ideal $V_n(A)$ is by $\omega_n \circ F = \omega_{n+1}$, so we identify $V_n(A) \simeq \omega_{n+1*}(A)$ as $W_{n+1}(A)$-modules. Now, we may tensor the sequence above with the flat $W_{n+1}(A)$-module $W_{n+1}(B)$ to obtain the exact sequence: \[\begin{tikzcd} 0 \arrow[r] & V_{n}(B) \arrow[r] & W_{n+1}(B) \otimes_{W_{n+1}(A), F} W_{n}(A) \arrow[r, "\text{pr}_n"] & W_{n-1}(B) \arrow[r] & 0 \end{tikzcd}\] It then directly follows that the natural map $W_{n+1}(B) \otimes_{W_{n+1}(A), F} W_{n}(A) \to W_{n}(B)$ is an isomorphism. \end{proof} The relative Frobenius condition appearing in Theorem~\ref{thm:wittdescent} is not usually seen in the literature. As one can see through the proof of Theorem~\ref{thm:wittdescent2}, the main obstruction to proving that $W_n$ preserves flat algebras is showing the claim $w_{n*}(A) \otimes_{W_{n}(A)} W_n(B) \simeq w_{n*}(B)$. When $p=0$ in $A$, this simply reduces to the relative Frobenius condition. Our proof of Theorem~\ref{thm:wittdescent} actually shows that the claim is independent of proving flatness, mostly due to the following lemma. \begin{lem}\label{lem:nilflatiso} Let $f: M \to N$ be a morphism of $R$-modules, with $N$ being flat. Let $S$ be an $R$-algebra satisfying Condition (*). If $f \otimes_R S$ is an isomorphism, then so is $f$. \end{lem} \begin{proof} We begin with the following fact: if an $R$-module $M$ satisfies $M\otimes_R S \simeq 0$ then $M \simeq 0$. Indeed, one obtains that $R=\text{Ann}_R(M \otimes_R S)^m \subseteq \text{Ann}_R(M) \subseteq R$ for some $m \in \mathbb{N}_{>0}$, so $M \simeq 0$.\\ \indent Using this fact, in combination with the right exactness of tensoring, one concludes that $\coker(f) \simeq 0$, or in other words, that $f$ is an epimorphism. Thus, we have a short exact sequence $0 \to \ker(f) \to M \to N \to 0$. Applying $-\otimes_R S$ and using Tor sequences, we see that $0 \to \ker(f) \otimes_R S \to M\otimes_R S \to N \otimes_R S \to 0$ is exact, so we know that $\ker(f) \otimes_R S \simeq 0$. Using the fact at the beginning, one concludes that $\ker(f) \simeq 0$ and, thusly, that $f$ is an isomorphism. \end{proof} \begin{lem}\label{lem:annwitt} Suppose that $\Phi_{B_0/A_0}$ is an epimorphism and $B$ is flat over $A$. Then, $I_A \otimes_{W_n(A)} W_n(B) \to I_B$ is an epimorphism.\footnote{Recall that $I_R:=\ker(W_n(R) \to \overline{W_n}(R))$, and note that $I_R \simeq \text{Ann}_{R}(p^n)$} \footnote{The author would like to thank Dr James Borger for helping write their initial proof in a more readable way.} \end{lem} \begin{proof} Note that we have an isomorphism $\text{Ann}_{A}(p^n) \otimes_A B \stackrel{\text{ev}}{\to} \text{Ann}_B(p^n)$ as $B$ is flat over $A$; hence $I_A \otimes_A B \simeq I_B$. As $I_A \otimes_A-$is right exact, it suffices to show that $w_{n*}(A) \otimes_{W_n(A)} W_n(B) \to B$ is an epimorphism of $A$-modules. \\ \indent Define a bifunctor by the rule $- \otimes W_n(-) := ((-)_* \otimes_{\mathbb{Z}} W((-)_*))^a$. Consider the morphism $\phi_{n}: A \otimes W_n(B)) \to B$ which takes $a \otimes b \mapsto aw_n(b)$ for almost elements $a \in A_*, b \in W_n(B_*)$. We have a factoring $A \otimes W_n(B) \twoheadrightarrow w_{n*}(A) \otimes_{W_n(A)} W_n(B) \to B $ and therefore it suffices to show that $\phi_n$ is an epimorphism.\\ \indent We proceed by induction on $n$; the case when $n=0$ is tautological. Consider the diagram with exact rows: \[\begin{tikzcd} A \otimes W_{n-1}(B) \arrow[r, "\text{id}_A\otimes V"] \arrow[d, "\phi_{n-1}"]& A \otimes W_n(B) \arrow[r] \arrow[d, "\phi_{n}"] & B \otimes A \arrow[r] \arrow[d, "b \otimes a \mapsto b^{p^n} a"]& 0\\ B \arrow[r, "p"] & B \arrow[r] & B/pB \arrow[r] & 0 \end{tikzcd}\] Commutativity of the left-hand square follows from the identity $w_n \circ V = pw_{n-1}$. The left-down arrow is an epimorphism by induction, and the right-down arrow is an epimorphism by the assumption on the relative Frobenius. We then conclude the middle-down arrow is an epimorphism, as desired. \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:wittdescent}] Recall the gluing diagram: \begin{equation}\label{eq:wittdescent}\begin{tikzcd} \overline{W_n}(A) \arrow[r, "\text{pr}_n'"] \arrow[d, "\omega'_n"] & W_{n-1}(A)\arrow[d, "\overline{\omega_n}"] \\ A \arrow[r, twoheadrightarrow, "\pi"] & A/p^n \end{tikzcd}\end{equation} Let $I_A := \ker(\alpha_n)$, which may be characterised as the $W_n(A)$-module $\text{Ann}_{\omega_{n*}(A)}(p^n)$. As an ideal of $W_n(A)$, one may check that $I_A^2=0$.\\ We will prove the statement that $W_{n}(B)$ is flat over $W_n(A)$ and $\omega_{n*}(A) \otimes_{W_n(A)} W_n(B) \simeq \omega_{n*}(B)$ via induction on $n$; the base case when $n=0$ is tautological. Assume the statement is true for $k=n-1$, we will show it then for $n$.\\ \noindent\textit{Claim:} $(W_{n-1}(B), B)$ is a gluing datum for Diagram~\ref{eq:wittdescent}.\\ We must show that $W_{n-1}(B) \otimes_{W_{n-1}(A)} \overline{\omega_n}_{*}(A/p^n) \to \overline{\omega_n}_{*}(B/p^n)$ is an isomorphism. Since $B/p^n$ is a flat $A/p^n$-module, and $p$ is nilpotent in $A/p^n$, we are in the situation of Lemma~\ref{lem:nilflatiso} (see Lemma~\ref{lem:nilpotent}) where we reduce to showing the isomorphism after tensoring with $- \otimes_{\mathbb{Z}} \mathbb{F}_p$. Thus, we need to show that $W_{n-1}(B) \otimes_{W_{n-1}(A)} \overline{\omega_n}_{*}(A_0) \simeq B_0$. The map $W_{n-1}(A) \stackrel{\overline{\omega_n}}{\to} A_0$ factors via $W_{n-1}(A) \stackrel{\omega_{n-1}}{\to} A \stackrel{\pi}{\to} A_0 \stackrel{\Phi_{A_0}}{\to}A_0$, so we have: \begin{align*}W_{n-1}(B) \otimes_{W_{n-1}(A)} \overline{\omega_n}_{*}(A_0) &\simeq (W_{n-1}(B) \otimes_{W_{n-1}(A)} \omega_{n-1*}(A)) \otimes_{A} \overline{\pi}_{*}(A_0) \otimes_{A_0} \Phi_{A_0*}(A_0)\\ &\simeq \omega_{n-1*}(B_0) \otimes_{A_0} \Phi_{A_0*}(A_0) \simeq \overline{\omega_{n}}_*(B_0), \end{align*} where the last isomorphism followed from our hypothesis on $\Phi_{B_0/A_0}$. $\Box$\\ Hence, according to our claim, setting $A_1 := W_{n-1}(A), A_2 := A, A_3 := A/p^n$ and $\xi: W_{n-1}(B) \otimes_{W_{n-1}(A)} A/p^n \simeq B \otimes_{A} A/p^n$ in Equation~\ref{eq:descenteq}, we can form the $\overline{W}_n(A)$-module $T(W_{n-1}(B), B, \xi) = \overline{W_n}(B)$. From Lemma~\ref{lem:descprop2} we immediately derive that $\overline{W_n}(B) \otimes_{\overline{W_n}(A)}w'_{n*}(A) \simeq w'_{n*}(B)$ and that $\overline{W}_n(B)$ is a flat $\overline{W}_n(A)$-module.\\ \indent By utilising Lemma~\ref{lem:annwitt}, we obtain that $I_A W_n(B) \simeq I_B$. Therefore, $W_n(B)\otimes_{W_n(A)} \overline{W_n}(A) \simeq \overline{W_n}(B)$. Since $\omega_n = \alpha_n \circ \omega'_n$, we have: \[W_n(B) \otimes_{W_n(A)} \omega_{n*}(A) \simeq (W_n(B) \otimes_{W_n(A)} \overline{W_n}(A)) \otimes_{\overline{W_n}(A)} \omega'_{n*}(A) \simeq \omega_{n*}(B).\] Lastly, to verify that $W_n(B)$ is flat over $W_n(A)$, we appeal to Corollary~\ref{cor:almlocflat}, where it suffices to show that $I_A \otimes_{W_n(A)} W_n(B) \simeq I_B$, or indeed that \[\text{Ann}_{\omega_{n*}(A)}(p^n) \otimes_{W_n(A)} W_n(B) \simeq \text{Ann}_{\omega_{n*}(B)}(p^n).\] But of course, since $I_A^2 = 0$, it is an $\overline{W_n}(A)$-module, so we simply must check that: \[\text{Ann}_{\omega'_{n*}(A)}(p^n) \otimes_{\overline{W_n}(A)} \overline{W_n}(B) \simeq \text{Ann}_{\omega'_{n*}(B)}(p^n)\] which follows from the flatness of $\overline{W_n}(B)$ over $\overline{W_n}(A)$. This completes the induction step. We are only left to show that $\omega_{i*}(A) \otimes_{W_n(A)} W_n(B) \simeq \omega_{i*}(B)$. But this follows from the isomorphism $\omega_{i*}(A) \otimes_{W_n(A)} W_n(B) \simeq \omega_{i*}(A) \otimes_{W_{i}(A)} W_{i}(B) $ and induction. \end{proof} \begin{thm}\label{thm:wittdescent2} Assuming the conditions of Theorem~\ref{thm:wittdescent}, if we also suppose that $A \to B$ is almost finitely presented, almost finitely generated, or almost projective, then so is $W_n(A) \to W_n(B)$. \end{thm} \begin{proof} We know that the map $W_n(A) \twoheadrightarrow \overline{W_n}(A)$ is an epimorphism with nilpotent kernel and thus satisfies Condition (*) in Theorem~\ref{thm:descent1}. By utilising Theorem~\ref{thm:wittdescent} together with Theorem~\ref{thm:descent1} and Corollary~\ref{cor:nildescent}, we reduce the statement to $\overline{W_n}(B)$ over $\overline{W_n}(A)$. By utilising Equation~\ref{eq:wittdescent}, Theorem~\ref{thm:wittdescent}, Theorem~\ref{thm:descent2} its Corollary~\ref{cor:descent3}, we reduce the statement to $W_{n-1}(B)$ as a $W_{n-1}(A)$-module, then by induction to $n=0$ where that statement is tautological. \end{proof} Finally, we get: \begin{cor}\label{cor:wittmain} If $A \to B$ is weakly \'{e}tale (resp. \'{e}tale, almost finite \'{e}tale, almost finitely presented \'{e}tale), then $W_n(A) \to W_n(B)$ is weakly \'{e}tale (resp. \'{e}tale, almost finite \'{e}tale, almost finitely presented \'{e}tale). \end{cor} \begin{proof} By Theorem~\ref{thm:wittdescent2}, it is sufficient to show this for when $A \to B$ is (weakly) \'{e}tale. All we need to show is that $W_n(B)$ is (weakly) unramified over $W_n(A)$, which is to say, for the multiplication map $\mu_{W_n(B)/W_n(A)}: W_n(B) \otimes_{W_n(A)} W_n(B) \to W_n(B)$, $W_n(B)$ is required to be an (flat) almost projective $W_n(B) \otimes_{W_n(A)} W_n(B)$-module. But this is clear, because we know that $W_n(B) \otimes_{W_n(A)} W_n(B) \simeq W_n(B\otimes_A B)$, and $B \otimes_A B \to B$ is (flat) almost projective, so Theorem~\ref{thm:wittdescent2} gives the result. \end{proof} We end this section with a modest application. In what follows, we will use the adjective `almost' for a property of modules or algebras in the usual category to be a statement for their almostification. We say a ring $A$ is Witt perfect if $F: W_n(A) \to W_{n-1}(A)$ is surjective. \begin{cor} If $A \to B$ is (almost) weakly \'{e}tale and $A$ is Witt perfect, then $B$ is almost Witt perfect. In the situation when $\ideal{m} = (p^{1/p^{\infty}})$, if $B$ is further assumed to be integrally closed in $B[1/p]$, then $B$ is Witt perfect. \end{cor} \begin{proof} The first part follows directly from Corollary~\ref{cor:wittpushoutfrob}. It now suffices to show the second part. Assuming that $\ideal{m} = (p^{1/p^{\infty}})$, and that $B$ is integrally closed in $B[1/p]$, we must show that $F: W_n(B) \to W_{n-1}(B)$ is surjective. At the very least, we know that it is almost surjective, i.e. the cokernel is killed by $(p^{1/p^{\infty}})$. We now refer to \cite{kedlaya}, where proving the surjectivity of $F: W_{n}(B) \to W_{n-1}(B)$ is equivalent to doing it when $n=1$. Set $c=1-1/p$. For any $b \in B$, we may write $p^{1/p^2}b = b_0^p + pb_1$, and similarly $p^{1/p}b_1 = b_2^p + pb_3$, giving \[p^{1/p^2}b = b_0^p +p^{c}b_2^p + p^{1 + c}b_3 = x_0'^{p} + p^{1+c/p}x_1\] where $x_0' = b_0 + p^{c/p}b_2$ and $x_1 \in B$. We then obtain that $x_0=x_0'/p^{1/p^3}$ solves the polynomial: \[X^p - p^{1 + \frac{c}{p}-\frac{1}{p^2}}x_1 - b \in B[X].\] Since $B$ is integrally closed in $B[1/p]$, we then obtain that $x_0 \in B$, and that $b-x_0^p \in pB$. \end{proof} \begin{rem} \begin{enumerate}[(i)] \item One can check that the condition that $A \to B$ is almost weakly \'{e}tale in the previous corollary could be relaxed to flat and $\phi_{B_0/A_0}$ being an almost isomorphism; see Theorem~\ref{thm:wittdescent}. \item It would be interesting to know whether the previous corollary could be specialised to perfectoid rings, the latter notion for our purposes being that the projection $\lim_{F} W_{n}(A) \to A$ has principal kernel in addition to $A$ being Witt-perfect. \end{enumerate} \end{rem} \subsection{Almost purity} We first deal with almost purity in characteristic $p$ following \cite{gabram}. Before getting into the discussion, let us consider a ring $R$, a non-zero divisor $t$ such that $\ideal{m} = (t^{1/p^{\infty}}) \subset R$. Note that $\ideal{m}^2 = \ideal{m}$, for the reason that $\ideal{m} \subset \ideal{m}^p$, and in this case, $\ideal{m}$ is flat as it is a colimit of free modules i.e. $\ideal{m} = \inj_{i} t^{1/p^{i}} R$ with the coresponding transition maps. Thus, $\tilde{\ideal{m}} \simeq \ideal{m}$, and we note that for an almost $R$-module $M^a$ such that $M$ is $t$-torsion-free: \begin{align*}(M^a)_{*} = \hom{R}{\ideal{m}}{M} = \hom{R}{\inj_{i} t^{1/p^{i}}R}{M} &= \lim_i \hom{R}{t^{1/p^{i}}R}{M} \\ &= \{m \in M[1/t]: t^{1/p^i}m \in M, \forall i \ge 0\}. \end{align*} \begin{thm}[Theorem 3.5.28, Gabber-Ramero]\label{thm:almpurcharp} Let $R$ be a perfect $\mathbb{F}_p$-algebra, and fix an almost set-up $\ideal{m} = (t^{1/p^{\infty}})$. We set $R^{a}\text{-f\'{E}t}$ as the category of almost finite presented \'{e}tale $R^a$-algebras. The functor $F$: \[R^{a}\text{-f\'{E}t} \to R[1/t]\text{-f\'{E}t}: \; \; \; A \to A_{*}[1/t]\] is an equivalence of categories. \end{thm} We invite the reader to read the proof given in \cite{gabram}, upon which it should be clear that existence of coperfection in characteristic $p$, which helps to `almost' characterise the integral closure of $R$ in $B$, combined with the existence of Frobenius makes almost purity not too difficult in this context. Each of these do not exist in the characteristic 0, at least in the strict sense. One take-away from the work of Bhatt and Scholze in their paper on prismatic cohomology \cite{prism} is that coperfection should be replaced by a suitable `perfectoidisation' in characteristic $0$, where one ends up with a perfectoid ring instead of a perfect ring. The Frobenius in characteristic $p$ could then be accessed in characteristic $0$ via Frobenius lifts and delta rings. All in all, the ideas in characteristic $p$ seem to, rather magically, apply more broadly.\\ Let us now prove the main theorem of this paper. Fix a perfectoid valuation ring $R$ of rank one with an element $\varpi$ admitting a compatible system of $p$-power roots. \begin{thm}[Almost purity in Characteristic 0]\label{thm:almpurchar0} Let $S$ be a finitely presented $R$-algebra, and suppose $S[1/p]$ is a finite \'{e}tale algebra over $R[1/p]$. Then $\cal{O}_S$, the integral closure of $R$ in $S[1/p]$, is almost finitely presented \'{e}tale over $(R,(\varpi^{1/p^{\infty}}))$. \end{thm} To proceed, we begin with a few lemmas. Recall that $\lim_{F} W_n(R) \twoheadrightarrow R$ has principally generated kernel~\cite[Definition 16.3.1]{gabram2}, which we denote by $\varepsilon$. \begin{lem}\label{lem:absabh} Define $R^{\flat}:= \lim_{\Phi} R/p = (\lim_{F} W_{n}(R))/p$, and let $\overline{R^{\flat}}$ be the absolute integral closure of $R^{\flat}$. Then, $\overline{R^{\flat}}$ is almost weakly \'{e}tale over $R^{\flat}$ and $\overline{R}:=W(\overline{R^{\flat}})/\varepsilon$ is absolutely integrally closed over $R$. Hence, $\overline{R}/p^n$ is almost weakly \'{e}tale over $R/p^n$ for each $n \ge 1$. \end{lem} \begin{proof} By Theorem~\ref{thm:almpurcharp}, we know that $\overline{R^{\flat}}$ is a colimit of almost weakly \'{e}tale algebras, and therefore is almost weakly \'{e}tale (Corollary~\ref{cor:colimwet}). For the second claim, we refer the reader to \cite[Proposition 3.8]{perfectoid}. For the third claim, we utilise Corollary~\ref{cor:wittmain} to conclude that $\overline{R}/p^n \simeq W_n(\overline{R^{\flat}})/\varepsilon$ is almost weakly \'{e}tale over $W_n(R^{\flat})/\varepsilon \simeq R/p^n$. \end{proof} \begin{lem} $\cal{O}_S$ is uniformly almost finitely presented with uniform rank $[S[1/p]:R[1/p]]$. \end{lem} \begin{proof} See~\cite[Chap 6.3.6]{gabram}. \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:almpurchar0}] First, we know that $\cal{O}_S$ is a Pr\"{u}fer domain, so $\overline{R}$ is faithfully flat over $\cal{O}_S$ due to the fact that $\overline{R}$ is torsion-free and integral over $\cal{O}_S$.\footnote{For our purposes Pr\"{u}fer domain is a commutative domain whose localisations at all prime ideals is a valuation ring; see~\cite[Chapter VI, Proposition 8.7.9]{nicb}. One can then see that flat modules over a Pr\"{u}fer domain are equivalent to torsion-free ones by localising at the maximal ideals and using this characterisation for valuation rings.} Therefore, for each $n \ge 1$, we conclude that $\overline{R}/p^n$ is faithfully flat over $\cal{O}_S/p^n$. By (viii) of Proposition~\ref{prop:assortlem}, we get that $\cal{O}_S/p^n$ is almost weakly \'{e}tale over $R/p^n$ for each $n$, and combined with the previous lemma and Theorem~\ref{thm:presented}, we conclude that $\cal{O}_S/p^n$ is almost finitely presented \'{e}tale over $R/p^n$ for each $n\ge 1$, for which one obtains via \cite[Theorem 5.3.27]{gabram} that $\cal{O}_S$ is almost finitely presented \'{e}tale over $R$, as desired. \end{proof} \bibliography{bip} \bibliographystyle{plain} \end{document}
2205.15313v1
http://arxiv.org/abs/2205.15313v1
Shalika models for general linear groups
\documentclass[12pt,a4paper]{amsart} \usepackage{amsmath} \usepackage{hyperref} \usepackage[all]{xy} \usepackage{amsxtra} \usepackage[usenames]{color} \usepackage{youngtab} \usepackage{amscd} \usepackage{amsthm} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{mathrsfs} \usepackage{mathdots} \usepackage{mathtools} \usepackage{enumerate} \usepackage{extarrows} \usepackage{multirow} \usepackage{epsfig,float,graphicx,verbatim} \usepackage[T1]{fontenc} \usepackage{stackrel} \usepackage{MnSymbol}\usepackage{wasysym}\usepackage{babel} \oddsidemargin=0cm \evensidemargin=0cm \baselineskip 18pt \textwidth 16cm \sloppy \theoremstyle{plain} \global\long\def\frk#1{\mathfrak{#1}}\newtheorem*{theorem*}{Theorem} \newtheorem*{remark*}{Remark} \newtheorem*{example*}{Example} \newtheorem{lemma}{Lemma}[section] \newtheorem{proposition}[lemma]{Proposition} \newtheorem{remark}[lemma]{Remark} \newtheorem{example}[lemma]{Example} \newtheorem{theorem}[lemma]{Theorem} \newtheorem{definition}[lemma]{Definition} \newtheorem{notation}[lemma]{Notation} \newtheorem{corollary}[lemma]{Corollary} \newtheorem{construction}[lemma]{Construction} \newtheorem{cond}[lemma]{Condition} \newtheorem{case}{Case} \newtheorem{conjecture}{Conjecture} \newtheorem*{conjecture*}{Conjecture} \newtheorem{thm}[lemma]{Theorem} \newtheorem{prop}[lemma]{Proposition} \newtheorem{lem}[lemma]{Lemma} \newtheorem{defn}[lemma]{Definition} \newtheorem{notn}[lemma]{Notation} \newtheorem{cor}[lemma]{Corollary} \newtheorem{exm}[lemma]{Example} \newtheorem{exc}[lemma]{Exercise} \newtheorem{conj}[lemma]{Conjecture} \newtheorem{rem}[lemma]{Remark} \newtheorem{conc}[lemma]{Conclusion} \newtheorem{introtheorem}{Theorem} \renewcommand{\theintrotheorem}{\Alph{introtheorem}} \newtheorem{introcorollary}[introtheorem]{Corollary} \newtheorem{introthm}[introtheorem]{Theorem} \newtheorem{introlem}[introtheorem]{Lemma} \newtheorem{introremark}[introtheorem]{Remark} \newcommand{\s}{\sigma} \makeatletter \def\keywordsa{\xdef\@thefnmark{}\@footnotetext} \makeatother \makeatletter \newtheorem{claim}{\protect\claimname} \theoremstyle{remark} \makeatother \providecommand{\claimname}{Claim} \providecommand{\lemmaname}{Lemma} \providecommand{\notationname}{Notation} \providecommand{\propositionname}{Proposition} \providecommand{\remarkname}{Remark} \title[Uneven Shalika models for $\GL_{n+m}$]{Uneven Shalika models for general linear groups} \author{Itay Naor} \date{\today} \begin{document} \global\long\def\C#1{\mathbb{C}^{#1}}\global\long\def\P#1{\mathbb{P}^{#1}}\global\long\def\Pr{\mathbb{P}}\global\long\def\E#1{\mathbb{E}\left[#1\right]}\global\long\def\R#1{\mathbb{R}^{#1}} \global\long\def\lbar#1{\bar{\ell}_{#1}}\global\long\def\xbar{\bar{p}}\global\long\def\norm#1{\left\lVert #1\right\rVert } \global\long\def\Xhat{\hat{X}}\global\long\def\homcor#1#2#3#4{[#1:#2:#3:#4]}\global\long\def\isom{\mathbb{\cong}}\global\long\def\Perp{^{\perp}}\global\long\def\F{\mathbb{F}}\global\long\def\l{\ell}\global\long\def\a{\alpha}\global\long\def\d{\delta}\global\long\def\b{\beta}\global\long\def\s{\sigma}\global\long\def\lb{\lambda}\global\long\def\pinv{^{+}} \global\long\def\del{\partial}\global\long\def\ra{\rightarrow}\global\long\def\da{\downarrow}\global\long\def\mt{\mapsto}\global\long\def\r{\rho}\global\long\def\e{\varepsilon}\global\long\def\vphi{\varphi}\global\long\def\qb{\overline{\mathbb{Q}}}\global\long\def\lbd{\lambda}\global\long\def\x{\chi}\global\long\def\Om{\Omega}\global\long\def\lhat#1{\widehat{#1}}\global\long\def\xh{\widetilde{\x}}\global\long\def\cA{\mathcal{A}}\global\long\def\fa{\forall}\global\long\def\cH{\mathcal{H}}\global\long\def\ca#1{\mathcal{#1}}\global\long\def\la{\leftarrow}\newcommand{\reals}{\mathbb{R}} \global\long\def\ast{\textasteriskcentered} \newcommand{\LL}{\mathbb{L}} \newcommand{\sign}{\mathrm{sign}} \newcommand{\half}{\frac{1}{2}} \newcommand{\argmin}[1]{\underset{#1}{\mathrm{argmin}}} \newcommand{\argmax}[1]{\underset{#1}{\mathrm{argmax}}} \newcommand{\summ}{\displaystyle \sum} \newcommand{\intt}{\displaystyle\int} \newcommand{\var}{\text{Var}} \newcommand{\nchoosek}[2]{\left(\begin{array}{*{20}c}#1\\#2\end{array}\right)} \newcommand{\sfO}{\mathsf{O}} \newcommand{\ba}{\mathbf{a}} \newcommand{\be}{\mathbf{e}} \newcommand{\bx}{\mathbf{x}} \newcommand{\bw}{\mathbf{w}} \newcommand{\bg}{\mathbf{g}} \newcommand{\bb}{\mathbf{b}} \newcommand{\bu}{\mathbf{u}} \newcommand{\bv}{\mathbf{v}} \newcommand{\bz}{\mathbf{z}} \newcommand{\br}{\mathbf{r}} \newcommand{\bc}{\mathbf{c}} \newcommand{\bd}{\mathbf{d}} \newcommand{\bh}{\mathbf{h}} \newcommand{\by}{\mathbf{y}} \newcommand{\bn}{\mathbf{n}} \newcommand{\bs}{\mathbf{s}} \newcommand{\bq}{\mathbf{q}} \newcommand{\bmu}{\boldsymbol{\mu}} \newcommand{\balpha}{\boldsymbol{\alpha}} \newcommand{\bbeta}{\boldsymbol{\beta}} \newcommand{\btau}{\boldsymbol{\tau}} \newcommand{\bxi}{\boldsymbol{\xi}} \newcommand{\blambda}{\boldsymbol{\lambda}} \newcommand{\bepsilon}{\boldsymbol{\epsilon}} \newcommand{\bsigma}{\boldsymbol{\sigma}} \newcommand{\btheta}{\boldsymbol{\theta}} \newcommand{\bomega}{\boldsymbol{\omega}} \newcommand{\Lcal}{\mathcal{L}} \newcommand{\Ocal}{\mathcal{O}} \newcommand{\Acal}{\mathcal{A}} \newcommand{\Gcal}{\mathcal{G}} \newcommand{\Ccal}{\mathcal{C}} \newcommand{\Xcal}{\mathcal{X}} \newcommand{\Jcal}{\mathcal{J}} \newcommand{\Dcal}{\mathcal{D}} \newcommand{\Fcal}{\mathcal{F}} \newcommand{\Hcal}{\mathcal{H}} \newcommand{\Rcal}{\mathcal{R}} \newcommand{\Ncal}{\mathcal{N}} \newcommand{\Scal}{\mathcal{S}} \newcommand{\Pcal}{\mathcal{P}} \newcommand{\Qcal}{\mathcal{Q}} \newcommand{\Wcal}{\mathcal{W}} \newcommand{\Ld}{\tilde{L}} \newcommand{\uloss}{\ell^\star} \newcommand{\loss}{\mathcal{L}} \newcommand{\losst}{\ell_t} \newcommand{\inner}[1]{\langle#1\rangle} \renewcommand{\comment}[1]{\textcolor{red}{\textbf{#1}}} \newcommand{\vol}{\texttt{Vol}} \newcommand{\secref}[1]{Sec.~\ref{#1}} \newcommand{\subsecref}[1]{Subsection~\ref{#1}} gref}[1]{Fig.~\ref{#1}} \renewcommand{\eqref}[1]{Eq.~(\ref{#1})} \newcommand{\lemref}[1]{Lemma~\ref{#1}} \newcommand{\corollaryref}[1]{Corollary~\ref{#1}} \newcommand{\thmref}[1]{Thm.~\ref{#1}} \newcommand{\propref}[1]{Proposition~\ref{#1}} \newcommand{\appref}[1]{Appendix~\ref{#1}} \newcommand{\note}[1]{\textcolor{red}{{#1}}} \global\long\def\linf#1{\underset{#1\rightarrow\infty}{\lim}}\global\long\def\li#1#2{\underset{#1\rightarrow#2}{\lim}}\global\long\def\spnr#1#2{\mathrm{span}(#1,...,#2)}\global\long\def\spn{\mathrm{span}}\global\long\def\im{\mathrm{Im}}\global\long\def\argmin#1{\underset{#1}{\text{argmin}}} \global\long\def\mins#1#2{\underset{#1}{\min}\left(#2\right)}\global\long\def\maxs#1#2{\underset{#1}{\max}\left(#2\right)}\global\long\def\summ#1{\underset{#1}{\sum}}\global\long\def\intg#1{\underset{#1}{\int}}\global\long\def\sumud#1#2{\stackrel[#1]{#2}{\sum}}\global\long\def\piud#1#2{\stackrel[#1]{#2}{\Pi}}\global\long\def\intud#1#2{\stackrel[#1]{#2}{\int}}\global\long\def\cups#1#2{\stackrel[#1]{#2}{\cup}}\global\long\def\dsum#1#2{\stackrel[#1]{#2}{\oplus}} \global\long\def\rom#1{\mathrm{#1}}\global\long\def\un#1#2{\underset{#2}{#1}} \global\long\def\crt#1#2{#1\times#2}\global\long\def\bb#1{\mathbb{#1}}\global\long\def\rahat#1{\overrightarrow{#1}}\global\long\def\cPr#1#2{\mathbb{P}(#1|#2)}\global\long\def\pder#1#2{\frac{\partial#1}{\partial#2}}\newcommand{\Ind}{\operatorname{Ind}} \newcommand{\Irr}{\operatorname{Irr}} \newcommand{\ind}{\operatorname{ind}} \newcommand{\Hom}{\operatorname{Hom}} \newcommand{\tr}{\operatorname{tr}} \newcommand{\diag}{\operatorname{diag}} \newcommand{\GL}{\operatorname{GL}} \newcommand{\Mat}{\operatorname{Mat}} \global\long\def\cal#1{\mathcal{#1}} \begin{abstract} We define a generalization of Shalika models for $\GL_{n+m}(F)$ and prove that they are multiplicity-free, where $F$ is either a non-Archimedean local field or a finite field and $n,m$ are any natural numbers. In particular, we give a new proof for the case of $n=m$. We also show that the Bernstein-Zelevinsky product of an irreducible representation of $\GL_n(F)$ and the trivial representation of $\GL_m(F)$ is multiplicity-free. We relate the two results by a conjecture about twisted parabolic induction of Gelfand pairs. \end{abstract} \maketitle \keywordsa{ 2020 \emph{Mathematics Subject Classification.} Primary 20G05; Secondary 46F10, 22E50, 20C33, 22E50.\\ \emph{Key words and phrases.} Multiplicity one, Gelfand pair, finite group, l-group, linear group, distribution.} \tableofcontents \section{Introduction} \subsection{Motivation} In representation theory we are often interested to know when induced representations are multiplicity-free. In other words, we want to know when a representation induced from a representation of a subgroup contains at most one copy of any irreducible representation. Therefore we define: \begin{definition} Let $H$ be a subgroup of a group $G$ and $\psi$ a character of $H$. We say that the triple $(G,H,\psi)$ is a twisted Gelfand pair if $\Ind_{H}^{G}\psi$ is multiplicity-free. In the case of a trivial character we say that $(G,H)$ is a Gelfand pair. \end{definition} The interest in multiplicity-free representations lies in the fact that every irreducible subrepresentation has a canonical embedding in it. For this reason multiplicity free representations are also called models. Gelfand pairs have many applications, among them in the study of representations of symmetric groups (for example in \cite{sym}) and number theory (e.g. \cite{NT}). Throughout this paper, $F$ is either a finite field or a non-Archimedean local field. Let $G_{n+m}=\GL_{n+m}(F)$ be the general linear group over $F$ for $n,m\in \bb{N}$. A classical Shalika model is the representation $\Ind_{H_{n,n}}^{G_{2n}}\psi$ where: \[ H_{n,n}=\left\{ \left[\begin{array}{cc} g & u\\ & g \end{array}\right]\mid g\in \GL_{n},u\in \Mat_{n}\right\} \] and $\psi$ is a generic character of: \[ U_{n,n}=\left\{ \left[\begin{array}{cc} I_{n} & u\\ & I_{n} \end{array}\right]\mid u\in \Mat_{n}\right\} \] extended trivially to $H_{n,n}$. Jacquet and Rallis have proved in \cite{JacRal} that classical Shalika models are multiplicity-free for p-adic fields. That is, in this case $(G_{2n},H_{n,n},\psi)$ is a twisted Gelfand pair. We define a subgroup $H_{n,m}< G_{n+m}$ generalizing the Shalika subgroup $H_{n,n}< G_{2n}$ and investigate a generalization of Shalika models. We show that the induction of a generic character of $H_{n,m}$ to $G_{n+m}$ is multiplicity-free. We call this induced representation an \textit{uneven Shalika model}. The main motivation for the second Gelfand pair studied in this work is a conjecture about twisted parabolic induction of Gelfand pairs. To describe the conjecture we consider a reductive group $G$ and assume that $P=LU$ is a Levi decomposition of a parabolic subgroup of $G$, where $L$ and $U$ are the corresponding Levi and unipotent subgroups respectively. Inspired by parabolic induction of spherical pairs, it has been conjectured that: \begin{conjecture}\label{con:dim} (Aizenbud-Gourevitch-Sayag) Let $H_0$ be a subgroup of $L$ and $\psi$ a character of $H_0U$ whose restriction to $U$ is generic. If $(L,H_0,\psi|_{H_0})$ is a twisted Gelfand pair then $(G,H_0U,\psi)$ is a twisted Gelfand pair. \end{conjecture} Embedding $\GL_{n}\times \GL_{m}$ as a Levi subgroup in $\GL_{n+m}(F)$, we will see that the conjecture ties the triple $(G_{n+m},H_{n,m},\psi)$ to a pair in $\GL_{n}\times \GL_{m}$ which we describe precisely in the next subsection. We prove in this paper that this pair is a Gelfand pair as well. \subsection{Main results} In this work we prove the Gelfand property for two pairs inspired by Shalika models. Let $G:=G_{n+m}$ and recall that $F$ is either a finite field or a non-Archimedean local field. \subsubsection{$\textsc{Uneven Shalika models}$} We define a generalization of Shalika models for $m\neq n$. Assume without loss of generality $n\leq m$. Let $P_{n,n,m-n}$ be the standard parabolic subgroup corresponding to the partition $\{n,n,m-n\}$, and let $P_{n,n,m-n}=LU$ be its Levi decomposition. Denote \[ H_{0}:=\{\diag(g,g,h) \mid g\in \GL_{n}(F),h\in \GL_{m-n}(F)\}<L \] and define the Shalika subgroup to be $H=H_{n,m}:=H_{0}U<P_{n,n,m-n}$. Choose a non-trivial character $\psi_{0}$ of the additive group of $F$. Let $\psi$ be any character of $H$ that satisfies \[ \psi(\left[\begin{array}{ccc} I_{n} & u & a\\ & I_{n} & b\\ & & I_{m-n} \end{array}\right]):=\psi_{0}(tr(u)) \] for all $a,b\in \Mat_{n,m-n}$ and $u\in \Mat_{n}$. We call any such character a twisted Shalika character. In the special case of $\psi|_{H_{0}}\equiv1$ we call $\psi$ a regular (non-twisted) Shalika character. \begin{introtheorem} \label{thm:main}The pair $(G_{n+m},H_{n,m},\psi)$ is a twisted Gelfand pair. Explicitly, \[ \dim\Hom_{G}(\pi,\Ind_{H}^{G}\psi)\leq1 \] for any $\pi\in \Irr(G_{n+m})$. \end{introtheorem} Note that in the case of $n=m$ we get the uniqueness of classical Shalika models. Our proof of theorem \ref{thm:main} is geometric in nature. \subsubsection{$\textsc{A Gelfand pair in } \GL_{n}(F)\times \GL_{m+n}(F)$} We now describe another Gelfand pair. We choose a standard parabolic subgroup corresponding to the partition $\{n,m\}$ of $n+m$: \[P=P_{n,m}:=\left\{ \left[\begin{array}{cc} g_{1} & u\\ & g_{2} \end{array}\right] \mid g_{1}\in \GL_{n}(F),g_{2}\in \GL_{m}(F),u\in \Mat_{n,m}(F)\right\}< G \] and define: \[\Delta P=\Delta P_{n,m}:=\left\{ (g_{1},\left[\begin{array}{cc} g_{1} & u\\ & g_{2} \end{array}\right])\in \GL_{n}(F) \times P_{n,m} \right\}. \] and let $\chi$ be a character of $\Delta P$ which is trivial on $\{(I_n,\left[\begin{array}{cc} I_n & u\\ & I_m \end{array}\right])\mid u\in \Mat_{n,m}\}.$ We will deduce the following theorem from Theorem \ref{thm:main}. \begin{introtheorem} \label{thm:summer}The pair $(\GL_n(F) \times G,\Delta P,\chi)$ is a Gelfand pair. \end{introtheorem} In the case of $\chi\equiv1$, the theorem can be formulated in an additional way. We denote the Bernstein-Zelevinski product, introduced in \cite{BZ-prod}, by $\times$. \begin{introtheorem} \label{thm:erez-version}If $\pi$ is an irreducible representation of $\GL_{n}(F)$ and $1_m$ is the trivial representation of $\GL_{m}(F)$ then $\pi\times1_m$ is multiplicity-free. \end{introtheorem} \subsection{Related work} Some special cases of the theorems are already known. \subsubsection{$\textsc{Shalika models}$} The uniqueness of classical Shalika models (the $m=n$ case) was first proven for p-adic fields in \cite{JacRal} by deducing it from the uniqueness of linear models. The uniqueness of Shalika models in the case of Archmidean local fields was proven in \cite{shalikaR} by embedding them in linear models as well. In \cite{Nien} Nien outlined a direct proof using a geometric technique similar to the one in the current paper, but there is a gap in Lemma 3.9. This is explained in Appendix \ref{app:nien}. This paper in particular fills this gap. In the case $m=n$, the uniqueness property was proven for twisted Shalika models in \cite{ChenSun} by embedding them in twisted linear models. The generalization to $n\neq m$, as well as the proof for finite fields, are novel. Several generalizations of Shalika models to other groups have been studied. For example, the uniqueness of Shalika models for $SO(4n)$ was shown in \cite{JiQi}. \subsubsection{$\textsc{A Gelfand pair in } \GL_{n}\times \GL_{m+n}$} In the case of a non-Archmidean local field, Theorem \ref{thm:erez-version} is a special case of \cite{erez}. It is shown there that $\text{soc}(\pi\times\tau)$ is actually irreducible for any $\square$-irreducible $\tau$, and in particular for any irreducible unitary representation $\tau$. The proof in \cite{erez} uses the classification of representations of $\GL_{n}(F)$ for a non-Archimedean field. We use more elementary method to show that $\text{soc}(\pi\times 1)$ is multiplicity-free. Our proof works over finite fields as well. Note that Theorem \ref{thm:summer} is weaker than the claim that $(\GL_{n+m},\GL_{n})$ is a Gelfand pair. This pair is indeed a Gelfand pair in the case of $n=1$ for any local field (see \cite{k1ch0}, \cite{k1chp} and \cite{k1arch}), but fails in the case of a finite field already for $n=m=1$. Another result generalizing Theorems \ref{thm:summer} is true for $n=1,2$. Recall that $L'$ is the Levi part of $P_{n,m}$. In \cite{dimaiz} it is proven that the pair $(L'\times G, \widetilde{\Delta P})$ is a Gelfand pair where $\widetilde{\Delta P}=\{(X,Y)\in L'\times P \mid X$ is the Levi part of $Y\}$. Yaron Brodsky proved a result similar to Theorems \ref{thm:summer} for permutation groups. He showed that $(S_{n+m}\times S_n, \Delta S)$ is a Gelfand pair, where $\Delta S=\{(\s,\s') \mid \s\in S_{n+m}\text{ which preserves $[n]$ and $\s'=\s|_{[n]}$}\}$. \subsection{Structure of this paper} Section \ref{sec:pre} presents some necessary preliminaries. In Subsection \ref{sec:ded} we prove that Theorem \ref{thm:main} implies Theorem \ref{thm:summer} and the rest of Section \ref{sec:main} is devoted to the proof of Theorem \ref{thm:main}. In Subsection \ref{sec:dec} we prove several useful lemmas we use in Subsections \ref{sec:dense} and \ref{sec:red} to reduce the problem to a simpler problem presented in Subsection \ref{sec:genset}. Finally, in Subsection \ref{sec:simprob} we prove this similar problem. \subsection{Acknowledgements} I wish to thank Avraham Aizenbud, Tomer Novikov, Lior Silberberg and Oksana Yanshyna for helpful discussion and suggestions. I would like to express my gratitude to my advisor Dmitry Gourevitch. The author was supported in part by the BSF grant N 2019724 and by a fellowship endowed by Mr. David Lopatie. \section{Preliminaries}\label{sec:pre} \subsection{Notations} We will use the terminology introduced in \cite{BZ} by Bernstein and Zelevinsky regarding representations of $l$-groups. \begin{itemize} \item Let $X$ be an $l$-space. Write $S(X)$ for for the space of compactly supported locally constant $\bb{C}$-valued functions on $X$. These functions are called Schwartz functions. The linear functionals in the dual space $S^{*}(X)$ are called distributions. \item For a representation $(\pi,G,V)$ of an $l$-group, the subset of smooth vectors (i.e. vectors in $V$ whose stabiliser is open) is denoted by $V^{\infty}$. A representation is called smooth if $V^\infty =V$. The representation is admissible if for any open subgroup $H< G$ the subspace of $H$-invariant vectors $V^H$ is finite-dimensional. \item If $(\pi,G,V)$ is a representation and $\chi$ is a character of a subgroup $H$, we define the subspace of invariants $V^{H,\chi}:=\{v\in V \mid \pi(h)v=\chi(h)v\}$ and the space of coinvariants $V_{H,\x}:=V/\text{span}\{\pi(h)v-\chi(h)v \mid h\in H,v\in V\}$. \item Denote by $e_{i}$ and $E_{ij}$ elements of the standard bases of $F^{N}$ and $\Mat_{N,M}$, the dimensions $M,N$ will be clear from context. \item For a set $S\subset \bb R$ and scalars $\alpha,\beta\in\bb {R}$ we denote $\alpha S+\beta=\left\{ \alpha s+\beta \mid s\in S\right\}$. We denote $[n]:=\left\{1,...,n\right\}$ as well. \item Write $$w_{k}=\left[\begin{array}{ccc} 0 & & 1\\ & \iddots\\ 1 & & 0 \end{array}\right]\in \GL_{k}.$$ \item The permutation matrices in this paper are in row representation. I.e., for a permutation $\s\in S_n$ corresponds the matrix $Q\in \GL_n$ satisfying $Q_{i,\downarrow}=e_{\s(i)}$ for all $i$. \item For a permutation $p\in S_{n}$ and a set $S\subset[n]$, we denote $p(S)=\{p(i)\mid i\in S\}$. \item Let $G_{x}$ and $[x]$ denote the stabiliser and orbit of a group $G$ acting on a space $X$ at the point $x\in X$. We write $x_1\sim x_2$ if they are in the same orbit or coset. \item Given a matrix $A\in \Mat_{n}$ and a partition $n=n_{1}+...+n_{k}$, the partition of $A$ to blocks according to $\{n_{1},...,n_{k}\}$ is the partition where the block in the position $(i,j)$ has $n_{i}$ rows and $n_{j}$ columns. Therefore, the standard parabolic subgroup corresponding to this partition, denoted by $P_{n_1,...,n_k}$, is the subgroup of upper triangular block matrices with diagonal blocks of dimensions $n_i\times n_i$. \item For a set $S$ we write $FG(S)$ for the free group on the elements of $S$. \end{itemize} \subsection{The Gelfand-Kazhdan criterion}\label{sec:GK} In this section, G is a general group (not necessarily $\GL_{n+m}(F)$) and $H$ is a subgroup. As before, we say that a representation $(\pi,G,V)$ is multiplicity-free if $\dim\Hom_{G}(\rho,\pi)\leq1$ for any $\rho\in\Irr(G)$. A map $\tau:G \to G$ is called an anti-involution if $\tau\circ\tau=\text{id}_{G}$ and $\tau(gg')=\tau(g')\tau(g)$ for any $g,g'\in G$. The Gelfand-Kazhdan criterion is an important tool for identifying Gelfand pairs. We will state the versions of the criterion we will need. We first deal with the case of a finite group. Let $\psi$ be a character of $H$ and $\tau$ an anti-involution. Write $H':=\tau(H)$ and $\widetilde{H}:=H'\times H$. Define an action of $\widetilde{H}$ on $G$ by $ (h_{1},h_{2}).g=h_{1}gh_{2}^{-1} $ for $h_{1}\in H'$, $h_{2}\in H$ and $g\in G$. \begin{definition} We say that $[g]$ is $\psi$-admissible if $\psi(\tau(h_{1})h_{2}^{-1})=1$ for all $(h_{1},h_{2})\in \widetilde{H}_{g}$ and that $[g]$ is $\psi$-$\tau$-invariant if there exist $(h_{1},h_{2})\in \widetilde{H}$ with $(h_{1},h_{2}).g=\tau(g)$ and $\psi(\tau(h_{1})h_{2}^{-1})=1$. \end{definition} Now we can formulate Gelfand's trick for finite groups, which is justified by the classical argument described in \cite{aiz}. \begin{thm}\label{thm:GKfinite} Let $G$ be a finite group and $H< G$ be a subgroup. If there exists an anti-involution $\tau$ of $G$ and a character $\psi$ of $H$ such that every $\psi$-admissible coset in $\tau(H) \backslash G / H$ is $\psi$-$\tau$-invariant, then $(G,H,\psi)$ is a twisted Gelfand pair. \end{thm} Now we move to the non-Archimedean case. Assume that $G$ is an $l$-group, $H$ is a subgroup and $\tau$ is an anti-involution. Define $H'$ and $\widetilde{H}$ as before. We define the action of $\widetilde{H}$ on $G$ in the same way as in the finite case. On $S(G)$ and $S^*(G)$ we define: \begin{equation*} \begin{gathered} ((h_{1},h_{2}).f)(g)=\psi(\tau(h_{1})^{-1}h_{2})f(h_{1}^{-1}gh_{2}),\\ ((h_{1},h_{2}).T)(f)=T((h_{1}^{-1},h_{2}^{-1}).f) \end{gathered} \end{equation*} for $h_{1}\in H'$, $h_{2}\in H$,$g\in G, f\in S(G)$ and $T\in S^*(G)$. We define $\psi$-admissibility and $\psi$-$\tau$-invariance in the same way as in the finite case. We need the following result. \begin{theorem}[Gelfand-Kazhdan Criterion] \label{thm:GK-orig}\cite[Theorem 4]{GK} Let $\pi\in\Irr(G)$. If every distribution in $S^*(G)$ invariant to the action of $\widetilde{H}$ defined above is also $\tau$ invariant, then: \[\dim \Hom _G ( \pi,Ind_H^G\psi) \cdot\dim \Hom _G (\widetilde{\pi},Ind_{H'}^G(\psi\circ \tau))\leq 1.\] \end{theorem} This result is true for any $l$-group $G$. From now until the end of the section we assume $G=\GL_{n_1}(F)\times...\times \GL_{n_k}(F)$ where $F$ is a non-Archimedean field. We can deduce a stronger result in this case under certain conditions on the anti-involution $\tau$. For $(g_1,...,g_k)\in G$ we define $(g_1,...,g_k)^t=(g_1^t,...,g_k^t)$. \begin{theorem}[Application of Gelfand-Kazhdan Criterion] \label{thm:GKcrit} Let $(\pi,V)$ be an irreducible representation of $G$. Assume that there exist some fixed $a,b\in G$ such that for all $g\in G$ and $h'\in H'$ hold $\tau(g)=ag^ta^{-1}$, $b\tau(h'^{-1})b^{-1}\in H$ and $\psi(b\tau(h'^{-1})b^{-1})=\psi(\tau(h'))$. If every distribution invariant to the action of $\widetilde{H}$ defined above is also $\tau$ invariant, then: \[\dim \Hom _G ( \pi,Ind_H^G\psi)\leq 1.\] \end{theorem} \begin{proof} The proof is standard and uses the fact that $\widetilde{\pi}\isom \pi\circ \kappa$ (see \cite[Theorem 2]{GK}). \end{proof} The following lemma is a useful tool for verification of the condition of the Gelfand-Kazhdan Criterion. Let $U$ be a unipotent subgroup of $G$ and denote $U':=\tau(U)$. We take any character of $\widetilde H$ which coincides with $\psi$ on $U'\times U\cap \widetilde{H}$ and denote it by $\psi_u$. For this lemma we also need to assume that $H$ is an algebraic subgroup of $G$. We also define an automorphism $\iota$ of $G\times G$ by $\iota(g_1,g_2)=(\tau(g_2^{-1}),\tau(g_1^{-1}))$ and denote $\widetilde{\psi}(h_1,h_2):=\psi(\tau(h_{1})h_{2}^{-1})$. \begin{lem}\label{lem:tauinv} Assume that every $\psi_u$-admissible double coset in $H'\backslash G/H$ is $\psi$-$\tau$-invariant. Then every distribution $T\in S^*(G)$ which is $\widetilde{H}$ invariant (in relation to the action defined above) is also $\tau$ invariant. \end{lem} \begin{proof} To get this result from \cite[Theorem 6.10]{BZ}, we have to show that: \begin{enumerate} \item The action of $\widetilde{H}$ on $G$ is constructible (i.e. the graph of the action of $\widetilde H$ on $G$ is a finite union of locally closed subsets). \item For any $(h_1,h_2)\in \widetilde{H}$ there exist $(h'_1,h'_2)\in \widetilde{H}$ such that $(h_1,h_2).\tau(g)=\tau((h'_1,h'_2).g)$ for all $g \in G$. \item There exists $n_0\in\bb{N}$ and $\tilde h\in \widetilde{H}$ such that $\tau^{n_0}$ induces the same action on $G$ as $\tilde h$. \item If $Y=H'\gamma H$ is an orbit and $T\in S^*(Y)$ is a non zero $\widetilde{H}$-invariant distribution on it, then $Y$ and $T$ are $\tau$ invariant. \end{enumerate} We now verify the conditions. Firstly, (1) follows from the fact that the action is algebraic and \cite[Theorem A in 6.15]{BZ}. For (2) note that $\tilde h.\tau(g)=\tau(\iota(\tilde h).g)$ for any $\tilde h\in \widetilde{H},g\in G$. (3) is clear since $\tau^2=id$. To show (4), note that $Y\isom \widetilde{H}/\widetilde{H}_\gamma$ (recall that $\widetilde{H}_\gamma$ is the stabilizer). We note that $\widetilde{\psi}\circ\iota(\tilde h)=\widetilde{\psi}(\tilde h)$ for any $\tilde h\in \widetilde{H},g\in G$. Consider the space of Schwarz functions $S(Y)$ with the standard action. We have $ind_{\widetilde{H}_\gamma}^{\widetilde{H}}(1)\isom S(Y)$ by the definition of the compact induction functor $ind$. Therefore by Frobenius reciprocity: \[ T\in \Hom_{\widetilde{H}} (S(Y), \widetilde{\psi})\isom \Hom_{\widetilde{H}_\gamma}(\Delta_{\widetilde{H}}/\Delta_{\widetilde{H}_\gamma},\widetilde{\psi}) \] Since $T\neq 0$, we must have $\widetilde{\psi}=\Delta_{\widetilde{H}}/\Delta_{\widetilde{H}_\gamma}$ on ${\widetilde{H}_\gamma}$. Since algebraic characters are trivial on unipotent subgroup and modular characters of algebraic groups are a composition of an algebraic character and an absolute value, the character $\Delta_{\widetilde{H}}/\Delta_{\widetilde{H}_\gamma}$ is trivial on $U'\times U$. We deduce that $Y$ is $\psi_u$-admissible. By our assumption, there exists $\tilde h_0\in \widetilde H$ with $\tilde h_0.\gamma=\tau(\gamma)$ and $\widetilde{\psi}(\tilde h_0)=1$. Therefore, $Y$ is $\tau$-invariant. If $T$ is not $\tau$ invariant then $T_1:=T-\tau(T)$ is not zero and is $\widetilde{H}$-invariant as well. We note that $\tau(T_1)=-T_1$. Now, by \cite[6.12]{BZ} $T_1$ is proportional to the distribution \[ T_0(f)=\int_{\widetilde{H}/\widetilde{H}_g}f(\tilde h.g)\widetilde{\psi}^{-1}(\tilde h)d\tilde h \] where the measure $d\tilde h$ is a left invariant Haar measure on $\widetilde{H}/\widetilde{H}_g$. Then: \begin{align*} -T_0(f)&=\tau T_0(f)=\int_{\widetilde{H}/\widetilde{H}_g}f(\tau(\tilde h.g)) \widetilde{\psi}^{-1}(\tilde h)d\tilde h =\int_{\widetilde{H}/\widetilde{H}_g}f(\iota(\tilde h).\tau(g))\widetilde{\psi}^{-1}(\tilde h)d\tilde h\\ &=\int_{\widetilde{H}/\widetilde{H}_g}f(\iota(\tilde h)\tilde h_0.g)\widetilde{\psi}^{-1}(\tilde h)d\tilde h =c\int_{\widetilde{H}/\widetilde{H}_g}f(\tilde k.g)\widetilde{\psi}^{-1}(\tilde k)\widetilde{\psi}(\tilde h_0)d\tilde k=cT_0(f) \end{align*} using a change variables $\tilde k=\iota(\tilde h) \tilde h_0$ and the fact that $\psi(\tilde h_0)=1$. The constant $c$ is the odulus of the automorphism $\iota$, which is positive. Thus $T_0(f)=0$ for all $f\in S(G)$. Therefore $T$ is $\tau$ invariant. \end{proof} Before we move to the proof, let us remark that we can replace the condition of $\psi$-admissbility in the finite case with $\psi_u$-admissbility, as the same argument still works. \section{Proof of Theorem \ref{thm:main}}\label{sec:main} Recall that we chose $G=GL_{n+m}$ and $$H=\left\{\left[\begin{array}{ccc} g_1 & u & a\\ & g_1 & b\\ & & g_2 \end{array}\right]\mid g_1\in \GL_{n}, g_2\in \GL_{m-n}, a,b\in \Mat_{n,m-n}, u\in \Mat_{n}\right\}.$$ We choose an anti-involution: \[ \tau(g)=\left(\begin{array}{cc} w_{2n}\\ & I_{m-n} \end{array}\right)g^{t}\left(\begin{array}{cc} w_{2n}\\ & I_{m-n} \end{array}\right) \] and consider the cosets $H^{'}\backslash G/ H$ where $H'=H_{n,m}':=\tau(H)<G$. Denote $\widetilde{H}:=H'\times H$. Recall that $\psi$ is a generic character of $H$. We define a character $\psi_u$ of $ H$ by setting $\psi_u=\psi$ on the unipotent subgroup corresponding to $P_{n,n,m-n}$ and $\psi_u|_{H_0}\equiv1$. In the following section we will prove the following proposition. \begin{prop}[Geometric statement] \label{prop:adm-inv}Every $\psi_u$-admissible double coset in $H^{'}\backslash G/H$ is $\psi$-$\tau$-invariant. \end{prop} From now on, whenever we say that a coset is admissible we mean that it is $\psi_u$-admissible unless otherwise specified. \begin{proof}[Proof of Theorem \ref{thm:main} assuming Proposition \ref{prop:adm-inv}] The the conditions of Lemma \ref{lem:tauinv} are satisfied, so it only remains to verify that the anti-involution we chose satisfies the conditions in Theorem \ref{thm:GKcrit}. Choose $a=\diag(w_{2n}, I_{m-n})$ and $b=\diag(I_n,-I_n, I_{m-n}).$ A routine calculation shows that $\tau(g)=ag^ta^{-1}$ and in addition $b\tau(h'^{-1})b^{-1}\in H$ and $\psi(b\tau(h'^{-1})b^{-1})=\psi\circ\tau(h')$ for all $g\in G, h'\in H'$. Theorem \ref{thm:main} follows now from Lemma \ref{lem:tauinv} and Theorem \ref{thm:GKcrit} (and Theorem \ref{thm:GKfinite} with the aforementioned altered Proposition \ref{prop:adm-inv}). \end{proof} The proof of the geometric statement consists of several steps. We divide the cosets in $H'\backslash G/H$ into two subsets. In Subsection \ref{sec:dense} we prove directly for the cosets in the first subset that every admissible coset is $\psi$-$\tau$-invariant. The union of these cosets is dense in $G$. In order to solve the problem for the second subset, we define in Subsection \ref{sec:genset} a similar problem which implies the claim for the remaining cosets. \subsection{Deduction of Theorem \ref{thm:summer} from Theorem \ref{thm:main}}\label{sec:ded} In this subsection we show how the uniqueness of uneven Shalika models implies Theorem \ref{thm:summer}. First, we note that $\Delta P_{n,m}$ can be embedded as $$H_1:=\left\{\diag(g_1,\left[\begin{matrix} g_1 & u \\ & g_2 \end{matrix}\right])\right\}$$ in $G_{n+m}$. Let $P_{n,m}=L'U'$ be the Levi decomposition of the parabolic subgroup corresponding to $\{n,m\}$. Theorem \ref{thm:summer} states that $(L',H_1,\psi|_{H_1})$ is a Gelfand pair. Assume to the contrary that $\dim \Hom_{L'}(\pi,\Ind_{H_1}^{L'}(\psi|_{H_1}))>1$ for some $\pi \in \Irr(L')$. Let $T_1,T_2$ be two linearly independent homomorphisms in $\Hom_{L'}(\pi,\Ind_{H_1}^{L'}(\psi|_{H_1}))$. Consider the map $T_1+T_2:\pi\oplus\pi\ra \Ind_{H_1}^{L'}(\psi|_{H_1})$ defined by $(v_1,v_2)\mt T_1(v_1)+T_2(v_2)$. Since the projections of the kernel to each of the coordinates are $L'$-invariant and $\pi$ is irreducible, they are either $0$ or $\pi$. We consider all the possible cases regarding the projections of $\ker (T_1+T_2)$. If both are $\pi$, we get an isomorphism that maps $v\in\pi$ to a $u\in\pi$ such that $T_1(v)+T_2(u)=0$. Using schur's lemma we deduce that $T_1$ and $T_2$ are linearly dependent, which contradicts our assumption. If exactly one of the projections is $\pi$ we get that one of the maps is zero, which is again a contradiction. Therefore both projections are 0 the map $T_1+T_2$ must be injective. We've shown that $\pi\oplus\pi$ is embedded in $\Ind_{H_1}^{L'}(\psi|_{H_1})$. Tensoring with $\psi_u$ and applying the induction functor, we get that $\Ind_{L'U'}^G(\pi\otimes\psi_u)\oplus\Ind_{L'U'}^G(\pi\otimes\psi_u)$ is embedded in $\Ind_{L'U'}^G(\Ind_{H_1}^{L'}(\psi|_{H_1})\otimes\psi_u)=\Ind_{H}^G(\psi)$, showing that $\Ind_{H}^G(\psi)$ is not multiplicity free in contradiction to Theorem \ref{thm:main}. \qed \subsection{Decomposing representatives as a direct sum}\label{sec:dec} In the reduction of Proposition \ref{prop:adm-inv} to the aforementioned similar problem, we will need to decompose matrices and solve the problem separately for each part. In order to do so, we define a notion of a direct sum of matrices and give conditions regarding its compatibility with admissibility and $\psi$-$\tau$-invariance. For any subset $S\subseteq[n]$ we will denote by $S_{i}$ the $i$-th smallest element. Define an operator $$E_{S_{1},S_{2}}(A)=\summ{\mathclap{i\leq|S_{1}|,j\leq|S_{2}|}}A_{ij}E_{(S_1)_{i},(S_2)_{j}}$$ for $S_{1},S_{2}\subset[n]$ and $A\in \Mat_{|S_{1}|,|S_{2}|}$ (for clarification, $E_{(S_1)_{i},(S_2)_{j}}$ is the standard basis matrix with 1 at the row and column corresponding to the $i$-th smallest element in $S_1$ and the $j$-th smallest element in $S_2$, respectively). \begin{lem} \label{lem:Eprops}For any $S_{1},S_{2},S_{3}\subset[n]$ we can state multiplication rules for the operator $E$: \[ E_{S_{1},S_{2}}(A)E_{S_{2},S_{3}}(B)=E_{S_{1},S_{3}}(AB) \] \[ E_{S_{1},S_{2}}(A)E_{S_{2}^{C},S_{3}}(B)=0. \]In addition, \[ E_{S_{1},S_{2}}(A)^{t}=E_{S_{2},S_{1}}(A^{t}). \] and if $A,B$ are invertible: \[ (E_{S_{1},S_{1}}(A)+E_{S_{1}^{C},S_{1}^{C}}(B))^{-1}=E_{S_{1},S_{1}}(A^{-1})+E_{S_{1}^{C},S_{1}^{C}}(B^{-1}) \] \end{lem} \begin{proof} The proof is a simple calculation we omit. It is based on the fact that $E_{ij}E_{k\l}=\d_{jk}E_{i\l}$. \end{proof} We now define direct sum of matrices by embedding the matrices in a larger matrix according to two index subsets. \begin{defn} For two subsets $S_{1},S_{2}\subset[N]$ and matrices $A\in \Mat_{|S_{1}|,|S_{2}|},B\in \Mat_{N-|S_{1}|,N-|S_{2}|}$, we define their welding according to $S_1, S_2$ to be: \[ A\oplus_{S_{1},S_{2}}B:=E_{S_{1},S_{2}}(A)+E_{S_{1}^{C},S_{2}^{C}}(B) \] and, for: $$h_{1}=(h_{1}^{1},h_{1}^{2})\in \Mat_{|S_{1}|}\times \Mat_{|S_{2}|}$$ $$h_{2}=(h_{2}^{1},h_{2}^{2})\in \Mat_{N-|S_{1}|}\times \Mat_{N-|S_{2}|},$$ denote: \[h_{1}\oplus_{S_{1},S_{2}}h_{2}:=(h_{1}^{1}\oplus_{S_{1},S_{1}}h_{2}^{1},h_{1}^{2}\oplus_{S_{2},S_{2}}h_{2}^{2}).\] \end{defn} \begin{rem} Note that for any $S_{1},S_{2},S_{3}\subset[n]$ we have \begin{align*} (A\oplus_{S_{1},S_{2}}B)(C\oplus_{S_{2},S_{3}}D) & =(AC\oplus_{S_{1},S_{3}}BD)\\ (h_{1}\oplus_{S_{1},S_{2}}h_{2})(k_{1}\oplus_{S_{1},S_{2}}k_{2}) & =(h_{1}^{1}k_{1}^{1}\oplus_{S_{1},S_{1}}h_{2}^{1}k_{2}^{1},h_{1}^{2}k_{1}^{2}\oplus_{S_{2},S_{2}}h_{2}^{2}k_{2}^{2})\\ & =h_{1}k_{1}\oplus_{S_{1},S_{2}}h_{2}k_{2} \end{align*} for any $A,B,C,D$ matrices of appropriate dimensions and $h_{i},k_{i}$ tuples of matrices of dimensions as dictated by the definition of $\oplus$. \end{rem} We first show that the action of welding together two elements of $\widetilde{H}_{n,k}$ is consistent with the definitions of the characters $\psi$ and the involution $\tau$. \begin{lem} \label{lem:char-comp}Let $S\subset[n+m]$ and denote $S_{0}:=S\cap[n]$ and $\overline{S}=S\backslash[2n]-2n$. We assume $S_0=S\cap\{n+1,...,2n\}-n$. If $h_{1}\in H_{|S_{0}|,|S_{0}|+|\overline{S}|}$, $h_{2}\in H_{n-|S_{0}|,m-|S_{0}|-|\overline{S}|}$ then $h:=h_{1}\oplus_{S,S}h_{2}$ is in $H_{n,m}$ and $\psi(h)=\psi(h_{1})\psi(h_{2})$. It still holds if we replace $H$ with $H'$ (with the character $\psi\circ\tau$), and if we replace $\psi$ with $\psi_u$. \end{lem} \begin{proof} We note that, writing for $i=1,2$: \[ h_{i}=\left[\begin{array}{ccc} g_{i} & u_{i} & a_{i}\\ & g_{i} & b_{i}\\ & & k_{i} \end{array}\right] \] then \begin{align} E_{S,S}(h_{1}) & =\left[\begin{array}{ccc} E_{S_{0},S_{0}}(g_{1}) & E_{S_{0},S_{0}}(u_{1}) & E_{S_{0},\overline{S}}(a_{1})\\ & E_{S_{0},S_{0}}(g_{1}) & E_{S_{0},\overline{S}}(b_{1})\\ & & E_{\overline{S},\overline{S}}(k_{1}) \end{array}\right].\label{eq:Eh} \end{align} and $E_{S^{C},S^{C}}(h_{2})$ has a similar form. Therefore $h$ has the appropriate block structure and it is easy to verify that $h$ satisfies the other requirements in the definition of $H$. Hence $h\in H_{n,m}$. Moreover, \begin{align*} \psi_u(h) & =\psi_u(E_{S,S}(h_{1})+E_{S^{C},S^{C}}(h_{2}))\\ & =\psi_{0}\left(\tr((E_{S_{0},S_{0}}(u_{1})+E_{S_{0}^{C},S_{0}^{C}}(u_{2}))(E_{S_{0},S_{0}}(g_{1}^{-1})+E_{S_{0}^{C},S_{0}^{C}}(g_{2}^{-1})))\right)\\ & =\psi_{0}(\tr(E_{S_{0},S_{0}}(u_{1}g_{1}^{-1})+E_{S_{0}^{C},S_{0}^{C}}(u_{2}g_{2}^{-1})))\\ & =\psi(h_{1})\psi(h_{2}). \end{align*} Showing the property for $\psi$ is very similar, as is proving it for $H'$. \end{proof} \begin{lem} \label{lem:inv-comp}Assume that $S_{2}\cap[2n]=2n+1-S_{1}\cap[2n]$ and $S_{1}\backslash[2n]=S_{2}\backslash[2n]$. Then $\tau(A\oplus_{S_{1},S_{2}}B)=\tau(A)\oplus_{S_{1},S_{2}}\tau(B)$. \end{lem} \begin{proof} We note that for example: \begin{align*} \tau(E_{S_{1},S_{2}}(A))= & \diag(w_{2n},I_{m-n})(E_{S_{1},S_{2}}(A))^{t}\diag(w_{2n},I_{m-n})\\ = & \diag(w_{2n},I_{m-n})(E_{S_{2},S_{1}}(A^{t}))\diag(w_{2n},I_{m-n})\\ =&E_{S_{1},S_{2}}(\diag(w_{|S_{2}\cap[2n]|},I_{m-n})A^{t}\diag(w_{|S_{2}\cap[2n]|},I_{m-n})\\=&E_{S_{1},S_{2}}(\tau(A)). \end{align*} and use the linearity of $\tau$. \end{proof} We are now ready to relate the admissibility and $\psi$-$\tau$-invariance of $A$,$B$ and $A\oplus_{S_{1},S_{2}}B$. \begin{lem} \label{lem:ind-sym-G}Assume that $S_{1}$ and $S_{2}$ satisfy the conditions in Lemmas \ref{lem:char-comp} and \ref{lem:inv-comp}. Then: (i) If $A\oplus_{S_{1},S_{2}}B$ is admissible then $A$ and $B$ are admissible. (ii) $A$ and $B$ are both $\psi$-$\tau$-invariant then $A\oplus_{S_{1},S_{2}}B$ is $\psi$-$\tau$-invariant. \end{lem} \begin{proof} Denote $x:=A\oplus_{S_{1},S_{2}}B$, $\widetilde{\psi}(h_1,h_2)=\psi(\tau(h_{1})h_{2}^{-1})$ and the same for $\widetilde{\psi}_u$. (i) Assume that $h_{1}\in \widetilde{H}_{A}$ and $h_{2}\in \widetilde{H}_{B}$. Using Lemmas \ref{lem:char-comp} and \ref{lem:Eprops} we get that $h_{1}\oplus_{S_{1},S_{2}}(I,I), (I,I)\oplus_{S_{1},S_{2}}h_{2}\in \widetilde{H}_{x}$. By the admissibility of $x$, $\widetilde{\psi}_u(h_{1}\oplus_{S_{1},S_{2}}(I,I))=\widetilde{\psi}_u(h_{1})$ and the same for $h_{2}$. Therefore, the $\psi_u$-admissibility of $x$ implies the $\psi_u$-admissibility of $A$ and $B$. The proof for $\psi$-admissibility is identical. (ii) Similarly, if $h_{1}.A=\tau(A)$ and $h_{2}.B=\tau(B)$ with $\psi(h_{1})=\psi(h_{2})=1$ then $(h_{1}\oplus_{S_{1},S_{2}}h_{2}).x=\tau(x)$ and $\widetilde{\psi}(h_{1}\oplus_{S_{1},S_{2}}h_{2})=\widetilde{\psi}(h_{1})\widetilde{\psi}(h_{2})=1$. Therefore, if $A$ and $B$ are both $\psi$-$\tau$-invariant then $x$ is $\psi$-$\tau$-invariant. \end{proof} Practically, we are going to choose $A=I_{k}$ for some $k$ and use Lemma \ref{lem:ind-sym-G} to get that we can assume $k=0$, or in other words, it is enough to show that if $B$ is admissible then it is $\psi$-$\tau$-invariant to get that if $x=E_{S_{1},S_{2}}(I_{k})+E_{S_{1}^{C},S_{2}^{C}}(B)$ is admissible then it is $\psi$-$\tau$-invariant, under the conditions of the last Lemma. Lastly, we show how for certain matrices the problem of showing that a coset in $G=\GL_{n+m}$ is admissible or $\psi$-$\tau$-invariant is equivalent to showing these properties for a matrix in $\Mat_{2n}$. Define an operator $C:\Mat_{n+m}\ra \Mat_{2n}$ which maps a matrix to its $2n\times 2n$ top left block, essentially cutting the matrix. We also extend the action of $\widetilde{H}$ on $\GL_{n+m}$ to an action on $\Mat_{n+m}$ and define admissibility and $\psi$-$\tau$-invariance in the same way as before. \begin{lemma}\label{lem:redclas} Let $\frac n2\leq k\leq n$ and $t\leq \min \{m-n,k\}$. The coset of the matrix of the form \[ \eta_{A_0,B_0,A'_0,B'_0} = \left[\begin{array}{cc|cc|cc} 0_k & & & A_0 & & A'_0\\ & I_{n-k} & & \\ \hline & & I_{n-k} & \\ B_0 & & & 0_k\\ \hline & & & & I_{m-n-t}\\ & & & B'_0 & &0_t \end{array}\right]\in \GL_{n+m} \] is admissible ($\psi$-$\tau$-invariant) in $\GL_{n+m}$ if and only if $C(\eta)$ is admissible ($\psi$-$\tau$-invariant) in $\Mat_{2n}$ with respect to the action of of $\widetilde{H}_{n,n}$. \end{lemma} \begin{proof} We prove the claim regarding $\psi$-$\tau$-invariance. The only if direction is obvious. The strategy of this proof is to reduce the claim to representatives with a relatively simple form. We define: \begin{defn} We say that $\eta_{A,B,A',B'}$ is in reduced form if there exist $S_{1},S_{2}\subseteq[k]$ of size $k-t$ and a permutation matrix $P$ such that $A=E_{S_{1},S_{2}}(P)$, $A'=E_{S_{1}^{C},[t]}(I_{t})$ and $B'=E_{[t],S_{2}^{t}}(I_{t})$. \end{defn} We claim that the coset of $\eta_{A_{0},B_{0},A'_{0},B'_{0}}$ has a representative in reduced form. To see that, note that for any matrices $$a=\left[\begin{array}{c|c} * & 0_{n-k,2k-n}\\ \hline * & * \end{array}\right],b=\left[\begin{array}{c|c} * & 0_{2k-n,n-k}\\ \hline * & * \end{array}\right]\in \GL_{k}$$ we can find $B_{1},A'_{1},B'_{1}$ such that $\eta_{A_{0},B_{0},A'_{0},B'_{0}}\sim\eta_{aA_{0}b,B_{1},A'_{1},B'_{1}}$. We choose $a,b$ such that $aA_{0}b=E_{S_{1},S_{2}}(P)$ for some $S_{1},S_{2}\subseteq[k]$ and a permutation matrix $P$. Acting with an appropriate element in $\widetilde{H}$ we get an element in reduced form. Note that $|S_{i}|=k-t$ follows now from the fact that the matrix is invertible. Since $\eta_{1}\sim\eta_{2}$ implies $C(\eta_{1})\sim C(\eta_{2})$ and the $\psi$-$\tau$-invariance property doesn't depend on the representative, it is enough to show the claim for matrices in reduced form. Assume that $\eta$ is in reduced form and $\tilde h.C(\eta)=\tau(C(\eta))$ with $\widetilde\psi(\tilde h)=1$. Then $C(\tau(\eta))=C((\tilde h \oplus_{[2n],[2n]}(I_{m-n},I_{m-n})).\eta)$. Using this property, one can find $\tilde{h}'\in\widetilde{H}_{n,m}$ which coincides with $\tilde h \oplus_{[2n],[2n]}(I_{m-n},I_{m-n})$ on the top left $2n\times2n$ block and satisfies $\tau(\eta)=\tilde{h}.\eta$. This matrix shows that $\eta$ is $\psi$-$\tau$-invariant. The proof of the part regarding admissibility is very similar. \end{proof} \subsection{Verifying Proposition \ref{prop:adm-inv} on a dense subset}\label{sec:dense} In this subsection we choose a set of representatives of the cosets $H'\backslash G/H$ and then divide the representatives into two types. We prove the geometric statement (Proposition \ref{prop:adm-inv}) for representatives of the first type. We will deal with the second type in later subsections. First, recall that $H=H_0U$ and $H_0\isom \GL_n\times \GL_{m-n}$. Therefore, there exist characters $\psi_1,\psi_2$ of $F^\times$ such that \[\psi(\diag(g,g,h))=\psi_1(\det(g))\psi_2(\det(h))\] for any $g\in \GL_n$ and $h\in \GL_{m-n}$. We denote: \[d(A,B)=\left[\begin{array}{ccc} A & B & 0\\ 0& A&0\\ 0& 0 & I \end{array}\right]\] and $\Delta(A):=d(A,0)$ and for $A,B\in \Mat_{n}$. Write $\s_{k_1,k_2,t,s}$ for \[ \left[\begin{array}{ccc|ccc|ccc} & & & & w_{k_1} & \\ & I_{n-t-k_1} & & & & \\ & & & & & & & & I_{t}\\ \hline & & & I_{n-k_2-s} & & \\ w_{k_2} & & & & & \\ & & & & & & & I_{s}\\ \hline & & & & & & I_{m-n-s-t}\\ & & & & & I_{k_2-k_1+s}\\ & & I_{k_1-k_2+t} & & & \end{array}\right] \] where \begin{align*} (k_1,k_2,t,s)\in\Omega:=\left\{(k_1,k_2,t,s) \mid \begin{array}{l} 0\leq k_1,k_2\leq n;\,0\leq s,t;\,k_2\leq t+k_1\leq n;\\k_1\leq s+k_2\leq n; \,s+t\leq m-n \end{array} \right\}. \end{align*} \begin{lemma} \label{claim:reps}Denote: \[ \gamma_{Y,Z,k_1,k_2,t,s} :=\diag(Y,I_{m})\s_{k_1,k_2,t,s}\diag(I_{n},Z,I_{m-n}). \] Then the following is a complete set of representatives of $H^{'}\backslash G/H$: \begin{align*} \{ \gamma_{Y,Z,k_1,k_2,t,s}\mid Y,Z\in \GL_{n},(k_1,k_2,t,s)\in\Omega\} \end{align*} \end{lemma} \begin{proof} Let $Q_1,Q_2$ be the standard parabolic subgroups corresponding to the partitions $\{n,n,m-n\}$ and $\{m-n,n,n\}$. Denote by $W_{n+m}$ and $W_{Q_i}$ the Weyl group of $G_{n+m}$ and the one corresponding to $Q_i$, for $i=1,2$. By the relative Bruhat decomposition, there exists a bijection $Q_1\backslash G/Q_2\longleftrightarrow W_{Q_1}\backslash W_{n+m}/W_{Q_2}$. The latter has the following complete set of representatives: \begin{align*} \{\left[\begin{array}{ccc|ccc|ccc} & & & & & & I_{m-n-s-t}\\ & & & & & I_{s+k_2-k_1}\\ & & I_{t+k_1-k_2} & & & \\ \hline & & & & w_{k_1} & \\ & I_{n-t-k_1} & & & & \\ & & & & & & & & I_{t}\\ \hline & & & I_{n-k_2-s} & & \\ w_{k_2} & & & & & \\ & & & & & & & I_{s}\\ \end{array}\right] \} \end{align*} with $\mid (k_1,k_2,t,s)\in\Omega$. Using $Q_1=\{\diag(I_{n},Z,I_{m-n}) \mid Z\in \GL_{n}\}\cdot H$ and \[ \left(\begin{array}{ccc} & & I_{n}\\ I_{n}\\ & I_{m-n} \end{array}\right)H'\cdot\{\diag(Y,I_{n},I_{m-n}) \mid Y\in \GL_{n}\}\left(\begin{array}{ccc} & I_{n}\\ & & I_{m-n}\\ I_{n} \end{array}\right)=Q_2 \] we get the result. \end{proof} Fix a representative $\gamma:=\gamma_{Y,Z,k_1,k_2,t,s}$. Write $$Y=\left(\begin{array}{ccc} Y_{1} & Y_{2} & Y_3\\ Y_{4} & Y_{5} & Y_6\\ Y_{7} & Y_{8} & Y_9 \end{array}\right), Z=\left(\begin{array}{ccc} Z_{1} & Z_{2} & Z_3\\ Z_{4} & Z_{5} & Z_6\\ Z_{7} & Z_{8} & Z_9 \end{array}\right)$$ with $Y_{4}\in \Mat_{k_2},Z_{4}\in \Mat_{k_1, k_2},Z_{9}\in \Mat_{s+k_2-k_1,t+k_1-k_2}$ and $Y_{9}\in \Mat_{s, t+k_1-k_2}$. The representative has the form: \[ \gamma=\left[\begin{array}{ccc|ccc|ccc} & Y_{2} & & C_1 & C_2 & C_3 & & & Y_{3}\\ & Y_{5} & & C_4 & C_5 & C_6 & & & Y_{6}\\ & Y_{8} & & C_7 & C_8 & C_9 & & & Y_{9}\\ \hline & & & Z_{1} & Z_{2} & Z_{3}\\ w_{k_2} & & & & & \\ & & & & & & & I_{s}\\ \hline & & & & & & I_{m-n-s-t}\\ & & & Z_{7} & Z_{8} & Z_{9}\\ & & I_{t+k_1-k_2} & & & \end{array}\right] \] with some blocks $C_i$ of the appropriate dimensions. We need two lemmas to rule out some non-admissible cosets. \begin{lem} \label{lem:col_t} Assume that $[\gamma]$ is admissible. \begin{enumerate}[(i)] \item Let $1\leq i\leq n+m$. If the first $n$ elements in $[\gamma]_{i,\ra}$ are zero then the elements in places $2n+1-t-k_1+k_2,...,2n$ in the row are zero as well. \item Similarly, Let $1\leq j\leq n+m$. If the column $[\gamma]_{\da,j}$ is zero in the indices $n+1,...,2n$ then it is zero in the indices $n-s+1,...,n$ as well. \end{enumerate} \end{lem} \begin{proof} \begin{enumerate}[(i)] \item For any $1\leq j\leq k_1-k_2+t$, write $A=I_{n+m}+E_{m+n-t-k_1+k_2+j,i}$ and $B=I_{n+m}-\sumud{\l=n+1}{n+m}\gamma_{i,\l}E_{n-t-k_1+k_2+j,\l}$. Observe that $A\in H'$, $B\in H$ and $A\gamma B=\gamma$. From admissibility we get that $\psi_u(A)=1=\psi_u(B)^{-1}=(\psi_{0}(-\gamma_{i,2n-t-k_1+k_2+j}))^{-1}$, i.e. $\gamma_{i,2n-t-k_1+k_2+j}=0$. \item Let $1\leq i\leq s$. Write $B=I_{n+m}+E_{j,n+m-s-t+i}$ and \[ A=I_{n+m}-\sum_{\mathclap{\l\in[n]\cup 2n+[m-n]}}\gamma_{\l,j}E_{\l,2n-s+i} .\] Again $A\in H'$, $B\in H$ and $A\gamma B=\gamma$. Therefore $\psi_u(A)=1=\psi_u(B)^{-1}=(\psi_{0}(\gamma_{n-s+i,j}))^{-1}$ and $\gamma_{n-s+i,j}=0$.\qedhere \end{enumerate} \end{proof} We have an immidiate corollary from the last lemma. \begin{cor}\label{cor:van} If $[\gamma]$ is admissible then: \begin{enumerate}[(i)] \item $Z_3$, $Z_9$, $Y_8$ and $Y_9$ are zero (unconditionally). \item $Y_2=0$ implies $C_3=0$. \item $Y_8=0$ implies $C_9=0$. \item $Z_2=0$ implies $C_8=0$. \end{enumerate} \end{cor} We rule out more cosets using the following lemma. This Lemma is adapted from \cite[Lemma 3.6]{Nien}. \begin{lemma} \label{claim:Y2=Z2}Assume that $[\gamma]$ is admissible. Then $Y_{2}=Z_{2}$. \end{lemma} \begin{proof} Let $$\tilde{X}:=\left(\begin{array}{ccc} & 0_{k_1,k_2}\\ X\\ & & 0_{t, s} \end{array}\right)\in \Mat_{n}$$ for any $X\in \Mat_{n-t-k_1,n-k_2-s}$. We have $ d(I_{n}, Y\tilde{X})\gamma d(I_{n},-\tilde{X}Z) =\gamma $ and by admissibility $$\tr(Y\tilde{X})=\tr(Y_2{X})=\tr(\tilde{X}Z)=\tr({X}Z_2)$$ for all $X\in \Mat_{n-t-k_1,n-k_2-s}$. \end{proof} We will finish the case of $Y_2=0$ in a later subsection. For now, we state the following proposition. \begin{prop}\label{prop:Y2} Assume $n-t-k_1,n-k_2-s>0$. If the coset of $\gamma$ is admissible and $Y_2=Z_2=0$, then it is $\psi$-$\tau$-invariant. \end{prop} We are now ready to prove Proposition \ref{prop:adm-inv} assuming Proposition \ref{prop:Y2}. \begin{proof}[Proof of Proposition \ref{prop:adm-inv} assuming Proposition \ref{prop:Y2}] We prove it by induction on $n$ (recall that we assume $n\leq m$). \textbf{Induction base}: The case of $n=0$ is easy since $H^{0,m}=H'^{0,m}=\GL_{m}(F)$, hence there is only one coset. \textbf{Induction step}: Assume that $n\geq1$. Using the notation from above, take a representative $\gamma:=\gamma_{Y,Z,k_1,k_2,t,s}$. We first deal with the case of $n-t-k_1=0$. Use Lemma \ref{lem:col_t} to deduce that columns $2n-t-k_1+k_2+1,...,2n$ must be zero. That contradicts $\gamma\in G$ unless $k_1+t-k_2=0$, which implies $k_2=n$ and $s=0$. By Lemma \ref{lem:redclas} in this case it is enough to show that the coset of the top left $2n\times 2n$ sub-matrix is $\psi$-$\tau$-invariant. This restriction has the form \[ C(\gamma)=\left[\begin{array}{cc} & A\\ w_{n} \end{array}\right]. \] Using \cite{sim}, we choose a matrix $X\in \GL_n$ such that $Xw_nAX^{-1}=A^tw_n$ we get that $$\Delta(w_nXw_n)C(\gamma)\Delta(X^{-1})=C(\gamma),$$ showing that $C(\gamma)$ is $\psi$-$\tau$-invariant. Therefore by Lemma \ref{lem:redclas}, $\gamma$ is $\psi$-$\tau$-invariant well, solving the case of $n-t-k_1=0$. Similarly, $n-k_2-s=0$ implies $n=k_1$ and is solved in the same fashion. We return to the general case and assume $n-t-k_1,n-k_2-s>0$. We have solved the case of $Y_{2}\neq Z_{2}$ in Lemma \ref{claim:Y2=Z2} and postpone the treatment of the case of $Y_{2}=Z_{2}=0$ (see Proposition \ref{prop:Y2}). It remains to solve the case of $Y_{2}=Z_{2}$ and $\text{rank} Y_{2}>0$. By Corollary \ref{cor:van}, $Y_{8}=0$ and $Z_{3}=0$. Choose matrices $A\in \GL_{n-k_2-s},B\in \GL_{n-t-k_1}$ such that \[AY_{2}B=\left[\begin{array}{cc} 0_{1, n-t-k_1-1} & 1\\ * & 0_{n-k_2-s-1,1} \end{array}\right]=:Y_{2}'.\] Denote $\gamma'=\Delta(A,I_{k+s})\gamma\Delta(I_{k},B,I_{t})$, $Y_{5}'=Y_{5}B$ and $Z_{1}'=AZ_{1}$. Choose matrices $C\in \Mat_{k,n-k-s},D\in \Mat_{n-k-t,k}$ such that last column of $Y_{5}'-CY_{2}'$ and the first row of $Z_{1}-Y_{2}'D$ are zero. Multiplying $\gamma'$ by $\Delta(\left[\begin{array}{ccc} I_{n-k_2-s}\\ -C & I_{k_2}\\ & & I_{s} \end{array}\right])$ on the left and by $\Delta(\left[\begin{array}{ccc} I_{k_2}\\ -D & I_{n-t-k_1}\\ & & I_{t+k_1-k_2} \end{array}\right])$ on the right we get: \[ \gamma\sim\left[\begin{array}{cccc|cccc|ccc} & & 1 & & * & * & * & * & & & *\\ & * & & & * & * & * & * & & & *\\ & * & & & * & * & * & * & & & *\\ & * & & & * & * & * & * & & & *\\ \hline & & & & & & 1 & \\ & & & & * & * & & *\\ w_{k_2} & & & & & & & \\ & & & & & & & & & I_{s}\\ \hline & & & & & & & & I_{m-n-s-t}\\ & & & & * & * & * & *\\ & & & I_{t-k_1-k_2} & & & & \end{array}\right]. \] Therefore: \[ \gamma\sim\left[\begin{array}{ccc|ccc|ccc} & & 1 & & & \\ M_{1} & & & M_{2} & M_{3}& & & & M_{4}\\ \hline & & & & &1 \\ M_{5} & & & M_{6} & 0&\\ & & & & & & & I_{s}\\ \hline & & & & & & I_{m-n-s-t}\\ & & & M_{7} & M_{8}&\\ & I_{t-k_1-k_2} & & & & \end{array}\right]. \] for some matrices $M_i$ of appropriate dimensions. Define: \[ \hat{\gamma}:=\left[\begin{array}{cc|cc|ccc} M_{1} & & M_{2} & M_{3} & & & M_{4}\\ \hline M_{5} & & M_{6} & 0\\ & & & & & I_{s}\\ \hline & & & & I_{m-n-s-t}\\ & & M_{7} & M_{8}\\ & I_{t} & & \end{array}\right]\in G_{n+m-2}. \] By Lemma \ref{lem:ind-sym-G} , $\hat \gamma$ is admissible. The induction hypothesis implies that $\hat \gamma$ is $\psi$-$\tau$-invariant, and using Lemma \ref{lem:ind-sym-G} again we deduce that $\gamma$ is $\psi$-$\tau$-invariant.\end{proof} \subsection{An alternative geometric statement}\label{sec:genset} We now describe a problem similar to Proposition \ref{prop:adm-inv}. In Subsection \ref{sec:red} we show by induction that it implies Proposition \ref{prop:Y2}. In Subsection \ref{sec:simprob} we prove the similar problem itself also by induction. Let $n\in\bb N$ and choose $k\in\bb N$ with $2k\leq n$. For a matrix $A\in \Mat_{n}$ we denote: \[ A=\left[\begin{array}{ccc} A_{1} & A_{2} & A_{3}\\ A_{4} & A_{5} & A_{6}\\ A_{7} & A_{8} & A_{9} \end{array}\right] \] with $A_{3},A_{5}\in \Mat_{k}$. Let $X:=X_{n,k}=\{x\in \Mat_{n} \mid x_{3}=0\}$. Let $P_{1},P_{2}$ be the block lower triangular subgroups of $\GL_{n}$ corresponding to the partitions $\{k,k,n-2k\}$ and $\{n-2k,k,k\}$ respectively. We enumerate the $9$ blocks of elements in each of them similarly. Define \[ T:=T_{n,k}=\{(a,b)\in P_{1}\times P_{2} \mid a_{1}=a_{5},b_{5}=b_{9},a_{9}=b_{1}\}. \] We define an action of $T$ on $X_{n,k}$: $(a,b).x=axb^{-1}$ and choose an involution $\s(x)=w_{n}x^{t}w_{n}$. We choose below a generating set $T=<E_{i}>$. Using it, we denote $F_{n,k}:=FG(\{E_{i}\})$. We define a map $\nu_{x}:F_{n,k}\ra\widetilde{H}_{n+k,n+k}$ which will translate a sequence of actions of $T$ on $X$ to an action of $\widetilde{H}$ on $G$. For an $x\in X$ we define functions $f_{x},f_x^u:F_{n,k}\ra F$ by $f_{x}(w)=\psi(\nu_{x}(w))$ and $f^u_{x}(w)=\psi_u(\nu_{x}(w))$. These functions send an action on $X$ to the value of the characters $\psi$ and $\psi_u$ on the corresponding action on $G$. \begin{defn} We say that an $x\in X$ is $*$-admissible if $x_{2}=x_{6}$ and for any $w\in F_{n,k}$ satisfying $w.x=x$ we have $f_{x}^u(w)=1$. We say that $x$ is $*$-invariant if there exist $w\in F_{n,k}$ such that $\sigma(x)=w.x$ and $f_{x}(w)=1$. \end{defn} Using these definitions, we state an new geometric statement. \begin{lem} \label{lem:Alternative-geometric-statement}(Alternative geometric statement) If $x$ is $*$-admissible then it is $*$-invariant. \end{lem} In the next subsection we show that Lemma \ref{lem:Alternative-geometric-statement} implies Proposition \ref{prop:Y2} and Subsection \ref{sec:simprob} it we prove Lemma \ref{lem:Alternative-geometric-statement}. In the reminder of this subsection, we define the map $\nu_x$ explicitly and prove a few properties of $\nu_x$, $f_x$ and $f_x^u$. We define for $X,Z\in \GL_{k}$ , $Y\in \GL_{n-2k}$, $a\in \Mat_{k},b,c^{t}\in \Mat_{n-2k,k}$: \begin{align*} E_{1}(X,Y,Z) & =(\diag(X,X,Y),\diag(Y,Z,Z))\\ E_{2}(a,b) & =\left[\begin{array}{ccc} I\\ a & I\\ b & & I \end{array}\right],E_{3}(b)=\left[\begin{array}{ccc} I\\ & I\\ & b & I \end{array}\right] \end{align*} and identify $E_{i}$ with $(E_{i},I_{n})\in T$ for $i=2,3$. Similarly, we define $E_{4}(c)=\s(E_{3}(\s(c)))$ and $E_{5}(a,c)=\s(E_{2}(\s(a),\s(c)))$ and embed them in the second coordinate of $T$. Clearly, \begin{align*} T= & <E_{1}(X,Y,Z),E_{2}(a,b),E{}_{3}(b),E_{4}(c),E_{5}(a,c) \mid \\ & X,Z\in \GL_{k},Y\in \GL_{n-2k},a\in \Mat_{k},b,c^{t}\in \Mat_{n-2k,k}>. \end{align*} We define a map $\nu_{A}:F_{n,k}\ra\widetilde{H}_{n+k,n+k}$ by setting: \[ \nu_{A}(E_{1}(X,Y,Z))=(\Delta(\diag(X,X,Y,Z)),\Delta(\diag(X,Y,Z,Z))) \] \[ \nu_{A}(E_{2}(a,b))=\left(\Delta\left(\begin{array}{cccc} I\\ a & I\\ b & & I\\ & & & I \end{array}\right),d\left(I_{n},\left[\begin{array}{cc} a\\ b\\ & 0_{k,n-k} \end{array}\right]\right)\right) \] and finally, writing: \[ \alpha_{A,b}:=\left[\begin{array}{cc} A_{1}b\\ A_{4}b\\ A_{7}b+bA_{4}b\\ & 0_{k,n-k} \end{array}\right] ,\] we define: \[ \nu_{A}(E_{3}(b))=\left(d\left(\left[\begin{array}{cccc} I\\ & I\\ & b & I\\ & & & I \end{array}\right],\alpha_{A,b}\right),\Delta\left(\begin{array}{ccc} I\\ b & I\\ & & I_{2k}\\ \end{array}\right)\right). \] To explain why we have defined the map in this way, we consider the map $$A\mt \eta_A:=\left[\begin{array}{cc|cc} & & & A\\ & I_{k}\\ \hline & & I_{k}\\ I_{n} & \end{array}\right].$$ We note that in each case $\eta_{E_{i}A}=\nu_{A}(E_{i})\eta_{A}$. One can extend the definition to $E_{4}$ and $E_{5}$ by applying $\s$ on both sides of $\eta_{E_{i}A}=\nu_{A}(E_{i})\eta_{A}$ for $i=2,3$ to get elements of $\widetilde{H}$ which satisfy this identity for $i=4,5$. We extend the definition to $F_{n,k}$ by $\nu_{x}(w_{1}w_{2})=\nu_{w_{2}x}(w_{1})\nu_{x}(w_{2})$ and deduce that: \begin{lemma} \label{lem:nueta} For any $w\in F_{n,k}$ holds $\eta_{w.A}=\nu_{A}(w)\eta_{A}$. \end{lemma} We will use this property in the next subsection to relate $*$-admissibility and $*$-invariance of $A$ to admissibility and $\psi$-$\tau$-invariance of $\eta_A$. We note that: \begin{align*} f_{x}: & E_{1}(X,Y,Z)\mt\psi_{1}(\frac{\det(X)}{\det(Z)}),E_{2}(a,b)\mt\psi_{0}(\tr(-a)),\\ & E_{3}(b)\mt\psi_{0}(\tr(x_{1}b)),E_{4}(c)\mt\psi_{0}(\tr(-cx_{9})),E_{5}(a,c)\mt\psi_{0}(\tr(a)) \end{align*} and that $f_{x}(w_{1}w_{2})=f_{w_{2}x}(w_{1})f_{x}(w_{2})$ for any $w_{i}\in F_{n,k}$. The same holds for $f_x^u$ apart from the value on $E_{1}(X,Y,Z)$, which is always $1$. We remark that the reason we act on $X_{n,k}$ with a free group instead of acting directly with $T_{n,k}$ is that the group $T_{n,k}$ has more relations than the corresponding actions in $\widetilde H$ preventing us from translating actions of $T_{n,k}$ to actions of $\widetilde H$ directly. We also need a version of Lemmas \ref{lem:char-comp}-- \ref{lem:ind-sym-G} for this problem. \begin{lem} \label{lem:char-comp-T}Assume that $S_{1},S_{2}\subset[n]$ satisfy the following conditions: \begin{itemize} \item $S_{1}\cap[k]=S_{0}$ and $S_{1}\cap\{k+1,...,2k\}=S_{0}+k$ for some $S_{0}\subset[k]$. \item $n+1-S_{2}\backslash[n-2k]=S_{1}\cap[2k]$. \item $S_{1}\backslash[2k]-2k=S_{2}\cap[n-2k]:=\overline{S}$. \end{itemize} Define a homomorphism $\lambda:F_{2|S_{0}|+|\overline{S}|,|S_{0}|}\times F_{n-2|S_{0}|-|\overline{S}|,k-|S_{0}|}\ra F_{n,k}$ by defining \begin{align*} (E_{i},I_{n-2|S_{0}|-|\overline{S}|}) & \mt E_{i}\oplus_{S_{1},S_{2}}I_{n-2|S_{0}|-|\overline{S}|}\\ (I_{2|S_{0}|+|\overline{S}|},E_{i}) & \mt I_{2|S_{0}|+|\overline{S}|}\oplus_{S_{1},S_{2}}E_{i} \end{align*} and extending in the usual way. Then $f_{A\oplus_{S_{1},S_{2}}B}(\lambda(w_{1},w_{2}))=f_{A}(w_{1})f_{B}(w_{2})$ for all $w_{1}\in F_{2|S_{0}|+|\overline{S}|,|S_{0}|}$, $w_{2}\in F_{n-2|S_{0}|-|\overline{S}|,k-|S_{0}|}$ and matrices $A, B$ of appropriate dimensions. The same holds for $f^u$. \end{lem} \begin{lem} \label{lem:inv-comp-T} Assume that $p\in \GL_{n-2k}$ is a permutation matrix which preserves the order of $\overline{S}$ and $\overline{S}^{C}:=[n-2k]\backslash \overline{S}$ (explicitly, if $i<j\in\overline{S}$ or $i<j\in\overline{S}^{C}$ then $p(i)<p(j)$) and denote $p_{1}:=I_{2k}\oplus_{[2k],[2k]}p$ and $ p_{2}:=p\oplus_{[n-2k],[n-2k]}I_{2k}$. Assume also that $n+1-S_{2}=p_{1}S_{1}$ and $n+1-S_{1}=p_{2}S_{2}$. Then $\s(A\oplus_{S_{1},S_{2}}B)=E_{1}(I_{k},p,I_{k})(\s(A)\oplus_{S_{1},S_{2}}\s(B))$. \end{lem} \begin{lem} \label{lem:ind-sym-T}Assume that $S_{1}$ and $S_{2}$ satisfy the conditions in Lemmas \ref{lem:char-comp-T} and \ref{lem:inv-comp-T} for some permutation $p\in \GL_{n-2k}$. If $A\oplus_{S_{1},S_{2}}B$ is $*$-admissible then $A$ and $B$ are $*$-admissible and if $A$ and $B$ are both $*$-invariant then $A\oplus_{S_{1},S_{2}}B$ is $*$-invariant. \end{lem} The proofs of these lemmas are very similar to the proofs of Lemmas \ref{lem:char-comp}--\ref{lem:ind-sym-G}. \subsection{Reduction of Proposition \ref{prop:Y2} to Lemma \ref{lem:Alternative-geometric-statement}}\label{sec:red} We now explain how the alternative geometric statement implies the claim for the representatives we have not solved yet. Recall that in Lemma \ref{claim:reps} and the discussion following it we defined representatives $\gamma=\gamma_{Y,Z,k_1,k_2,t,s}$ and divided $Y$ and $Z$ to blocks $Y_i,Z_i$ with $1\leq i\leq 9$. Using this notation, we remind the reader that the cosets we haven't solved yet are those with $Y_2=Z_2=0$. \begin{lemma} \label{claim:Y2=0}Assume that $[\gamma]$ is admissible and $Y_{2}=Z_{2}=0$. Then $s=t+k_1-k_2=0$. \end{lemma} \begin{proof} By Corollary \ref{cor:van}, $Y_{8}=0$. Since $\gamma$ is invertible, the columns of $Y_{5}$ must be linearly independent. We deduce that $k_2\geq n-t-k_1$. Choose a matrix $A\in \GL_{k_2}$ s.t. $AY_{5}=\left[\begin{array}{c} 0\\ I_{n-t-k_1} \end{array}\right]$. Multiplying $\gamma$ by $\Delta(I_{n-k_2-s},A,I_{s})$ on the left we get that: \[ \gamma\sim\left[\begin{array}{ccc|ccc|ccc} & 0 & & * & * & * & & & *\\ & I_{n-t-k_1} & & * & * & * & & & *\\ & 0_{s,n-t-k_1} & & * & * & * & & & *\\ \hline & & & Z_{1} & 0 & Z_{3}\\ Aw_{k_2} & & & & & \\ & & & & & & & I_{s}\\ \hline & & & & & & I_{m-n-s-t}\\ & & & Z_{7} & Z_{8} & Z_{9}\\ & & I_{t+k_1-k_2} & & & \end{array}\right] \] By Corollary \ref{cor:van}, $Z_3=0$. Similarly we deduce $k_2\geq n-k_2-s$ from the linear independence of the rows of $Z_1$ and choose $B\in \GL_{k_2}$ such that $Z_{1}B=\left[\begin{array}{cc} I_{n-k_2-s} & 0\end{array}\right]$. Applying Lemma \ref{lem:col_t} and acting with an element of $\widetilde{H}$, we get \[ \gamma\sim\left[\begin{array}{ccc|ccc|ccc} & 0 & & 0 & * & 0 & & & *\\ & I_{n-t-k_1} & & 0 & 0 & 0 & & & 0\\ & 0 & & 0 & 0 & 0_{s,t+k_1-k_2} & & & 0\\ \hline & & & I_{n-k_2-s} & 0 & 0\\ Aw_{k_2}B & & & & & \\ & & & & & & & I_{s}\\ \hline & & & & & & I_{m-n-s-t}\\ & & & 0 & * & 0\\ & & I_{t+k_1-k_2} & & & \end{array}\right]. \] Since we are in $\GL_{n+m}$, we must have $t+k_1-k_2=s=0$. \end{proof} Denote $k:=k_2=t+k_1$ and $e:=2k-n\geq 0$. Note that in the last lemma we have got that under the conditions of Proposition \ref{prop:Y2} we can assume that $\gamma$ has the form \[ \gamma = \left[\begin{array}{cc|cc|cc} & & & A & & A'\\ & I_{n-k} & & \\ \hline & & I_{n-k} & \\ B & & & \\ \hline & & & & I_{m-n-t}\\ & & & B' & & \end{array}\right] \] with some $B\in \GL_k$, $A\in \Mat_k$ and $A',B'^t\in \Mat_{k,t}$. By Lemma \ref{lem:redclas}, it is enough to prove the claim for cosets in $\Mat_{2n}$ which contain a representative of the form \[ \eta_{A,B}=\left[\begin{array}{cc|cc} & & & A\\ & I_{n-k}\\ \hline & & I_{n-k}\\ B & \end{array}\right]\in \Mat_{2n} \] with $B\in \GL_k$. An easy computation shows that if \[ B'=\left[\begin{array}{cc} X_1 & X_2\\ & X_3 \end{array}\right]B\left[\begin{array}{cc} X'_1 & X'_2\\ & X'_3 \end{array}\right]. \] with $X_1,X_3'\in \GL_{n-k}$ and $X_1,X_3'\in \GL_{e}$ then there exist $A'\in \Mat_{k}$ such that $\eta_{A,B}\sim\eta_{A',B'}$. Therefore we can assume \begin{equation*}\label{eq:B} B=\left[\begin{array}{cc|cc} 0 & I_{\l} & 0 & 0\\ 0 & 0 & I_{e-\l} & 0\\ \hline 0 & 0 & 0 & I_{\l}\\ I_{r} & 0 & 0 & 0 \end{array}\right] \end{equation*} for some $r\leq n-k$ and $\l:=n-k-r\leq e$. Write: \[ A=\left[\begin{array}{cc} A_{1} & A_{2}\\ A_{3} & A_{4}\\ \end{array}\right],\, B^{-1}=\left[\begin{array}{cc} B'_{1} & B'_{2}\\ B'_{3} & B'_{4}\\ \end{array}\right] \] with $A_{2},B'_{2}\in \Mat_{n-k}$. Then \[ \Delta(\left[\begin{array}{ccc} I_{n-k} & & \\ & I_{e} & \\ X & & I_{n-k} \end{array}\right] )\cdot\eta_{A,B}\cdot d(I_n, \left[\begin{array}{ccc} -B'_2X & & \\ -B'_4X & & \\ & -XA_{1} & -XA_{2} \end{array}\right] )=\eta_{A,B}. \] Therefore if $[\eta_{A,B}]$ is admissible, $\tr(-B'_2X-XA_{2})=0$ for all $X\in \Mat_{n-k}$. We deduce that $A_2=-B'_2=\left[\begin{array}{cc} 0 & -I_{r}\\ 0 & 0 \end{array}\right]$. We get that $\eta_{A,B}$ is equivalent to a matrix of the form: \[ \left[\begin{array}{ccc|ccc} & & & & & I_{r}\\ & & & & *\\ & & I_{n-k}\\ \hline & & & I_{n-k}\\ & * & \\ -I_{r} & & \end{array}\right] \] on which we use Lemma \ref{lem:ind-sym-G} with $S_1=S_2=\{[r],n-r+[2r],2n+1-[r]\}$ to get that we can assume $r=0$. We have reduced the problem to representatives of the form: \[ \eta_{A}:=\left[\begin{array}{cc|cc} & & & A\\ & I_{\l}\\ \hline & & I_{\l}\\ I_k & \end{array}\right] \] with $A\in X^{k,\l}$ (note that $\l=n-k\leq e$). Write $$A=\left[\begin{array}{ccc} A_{1} & A_{2} & 0_{\l}\\ A_{4} & A_{5} & A_{6}\\ A_{7} & A_{8} & A_{9} \end{array}\right].$$ with $A_5\in \Mat_\l$. Let $X\in \Mat_{\l}$. We denote: $$M_1=d(\left[\begin{array}{cccc} I_{\l}\\ & I_{\l}\\ & & I_{k-2\l}\\ & -X & & I_{\l} \end{array}\right],\left[\begin{array}{cc} -A_{2}X\\ -A_{5}X\\ -A_{8}X\\ & 0_{\l,k}\\ \end{array}\right])$$ $$M_2=d(\left[\begin{array}{cccc} I_{\l}\\ & I_{k-2\l}\\ X & & I_{\l}\\ & & & I_{\l} \end{array}\right],\left[\begin{array}{cccc} \\ \\ \\ XA_{5}X & XA_{4} & XA_{5} & XA_{6} \end{array}\right])$$ and observe that $ M_1\eta_{A}M_2=\eta_{A} $ for any $X\in \Mat_{\l}$. As before, we deduce $A_{2}=A_{6}$. Assume that $[\eta_{A}]$ is admissible. We want to show that $\eta_A$ is $\psi$-$\tau$-invariant. Assume that $w.A=A$ for some $w\in F_{n,k}$ with $f^u_A(w)=1$. Then, using Lemma \ref{lem:nueta}, $\eta_{w.A}=\nu_{A}(w)\eta_{A}=\eta_{A}$ and by the admissibility of $[\eta_{A}]$ we deduce that $\psi_u(\nu_{A}(w))=f_{A}^u(w)=1$. Together with $A_2=A_6$ we deduce that $A$ is $*$-admissible. Using Lemma \ref{lem:Alternative-geometric-statement} which we prove in the next Subsection we get that there exist $w'\in F_{n,k}$ such that $\sigma(A)=w'.A$ and $f_{A}(w')=1$. Therefore $\tau(\eta_{A})=\eta_{\s(A)}=\eta_{w'.A}=\nu_{A}(w')\eta_{A}$ with $\psi(\nu_{A}(w'))=f_{A}(w')=1$. This concludes the case of $Y_2=0$. \subsection{Proof of Lemma \ref{lem:Alternative-geometric-statement}}\label{sec:simprob} We prove the alternative geometric statement by induction on $n$. If $k=0$ then $A$ degenerates to the block $A_{7}$. We choose $Y\in \GL_{n}$ such that $YAY^{-1}=w_{n}A^{t}w_{n}$ as we did in the proof of Proposition \ref{prop:adm-inv}. Since $(Y,Y)\in T_{n,0}$ has a trivial associated character, $A$ is $*$-invariant. Assume that $k>0$ and $A=\left[\begin{array}{c|c|c} A_{1} & A_{2} & 0\\ \hline A_{4} & A_{5} & A_{6}\\ \hline A_{7} & A_{8} & A_{9} \end{array}\right]$ is $*$-admissible. Then $A_{2}=A_{6}$. Find $X,Z$ such that $XA_{2}Z^{-1}=\left[\begin{array}{cc} 0 & I_{d}\\ 0 & 0 \end{array}\right]$ for some $d\in\bb N$. We act on $A$ with $E_{1}(X,I_{n-2k},Z)$ and then with suitable $E_{2},E_{5}$ to get an element of the form: \[ A':=\left[\begin{array}{c|cc|cc} 0 & 0 & I_{d} & 0 & 0\\ A'_{1} & 0 & 0 & 0 & 0\\ \hline 0 & 0 & 0 & 0 & I_{d}\\ A'_{4} & A'_{5} & 0 & 0 & 0\\ \hline A'_{7} & A'_{8} & 0 & A'_{9} & 0 \end{array}\right] \] We now apply Lemma \ref{lem:ind-sym-T} on $S_{1}=[d]\cup k+[d]$, $S_{2}=n+1-S_{1}$ and trivial permutation $p$ to deduce that we can assume $d=0$. For any matrix $X\in \Mat_{k}$ such that $X'_1A=0$ we have $E_{2}(X,0)A=A$, hence $\tr(X)=0$. Consequently the rows of $A'_{1}$ are linearly independent. In particular $k\leq n-2k$. Similarly, the columns of $A'_{9}$ are linearly independent as well. Acting with an appropriate element of $T$ we get: \[ A'\sim\left[\begin{array}{cc|c|c} I_{k} & & \\ \hline & B_{4} & B_{5}\\ \hline & B_{7}^{1} & B_{8}^{1} & B_{9}^{1}\\ & B_{7}^{2} & B_{8}^{2} & B_{9}^{2} \end{array}\right]:=B \] with $B_{9}^{1}\in \Mat_{k}$. For any $X\in \Mat_{k}$, acting with $E_{2}(B_{5}X,0)E_{4}(\left[\begin{array}{cc} X & 0\end{array}\right])$ stabilises $B$ and has a character $\psi_{0}(\tr(-B_{5}X-X B_{9}^{1}))$. Therefore $B_{5}=-B_{9}^{1}$. Choose $U,V\in \GL_{k}$ such that $UB_{5}V=\left[\begin{array}{cc} 0 & -I_{d}\\ 0 & 0 \end{array}\right]$ for some $d$. Then \begin{align*} B\sim & E_{1}(U,\diag(U,I_{n-3k}),V)B=\left[\begin{array}{cc|c|c} I_{k} & 0 & 0 & 0\\ \hline 0 & UB_{4} & UB_{5}V & 0\\ \hline 0 & B_{7}^{1} & B_{8}^{1} & -UB_{5}V\\ 0 & B_{7}^{2} & B_{8}^{2} & B_{9}^{2}V \end{array}\right]\\ \sim & \left[\begin{array}{cccc|cc|cc} I_{d} & 0 & 0 & 0 & 0 & 0 & 0 & 0\\ 0 & I_{k-d} & 0 & 0 & 0 & 0 & 0 & 0\\ \hline 0 & 0 & 0 & 0 & 0 & -I_{d} & 0 & 0\\ 0 & 0 & C_{1} & C_{2} & 0 & 0 & 0 & 0\\ \hline 0 & 0 & 0 & 0 & 0 & 0 & 0 & I_{d}\\ 0 & 0 & C_{4} & C_{5} & C_{6} & 0 & 0 & 0\\ 0 & 0 & C_{7} & C_{8} & C_{9} & 0 & 0 & 0\\ 0 & 0 & 0 & 0 & 0 & 0 & I_{k-d} & 0 \end{array}\right]:=B' \end{align*} with $C_{2},C_{6}\in \Mat_{k-d}$. We apply Lemma \ref{lem:ind-sym-T} on $B'$ with: \begin{align*} S_{1}=&[d]\cup k+[d]\cup2k+[d],\, S_{2}=[d]\cup n-k+1-[d]\cup n+1-[d] ,\,\\p=&\left[\begin{array}{cc} & I_{n-2k-d}\\ I_{d} \end{array}\right]\end{align*} to get that it is enough to solve the problem for $d=0$. Therefore, we have \[ B'=\left[\begin{array}{ccc|c|c} I_{k} & 0 & 0 & 0 & 0\\ \hline 0 & C_{1} & C_{2} & 0 & 0\\ \hline 0 & C_{4} & C_{5} & C_{6} & 0\\ 0 & C_{7} & C_{8} & C_{9} & 0\\ 0 & 0 & 0 & 0 & I_{k} \end{array}\right]. \] For any $X\in \Mat_{k}$ we write $\beta_{C,X}=\left[\begin{array}{ccc} -C_{5}X & C_{4} & C_{5}\end{array}\right]$. We note that: \[ t:=E_{2}(C_{2}X,\left[\begin{array}{c} C_{5}\\ C_{8}\\ 0 \end{array}\right]X)E_{5}(X C_{6},X\beta_{C,X})E_{1}(I_{k},\left[\begin{array}{ccc} I\\ & I\\ X & & I \end{array}\right],I_{k}) \] stabilises $B'$ and has the character $f_{B'}^u(t)=\psi_{0}(\tr(X C_{6}-C_{2}X))$. Therefore $C_{2}=C_{6}$. For $C\in X^{n-2k,k}$ denote $\rho_{C}=\left[\begin{array}{ccc} I_{k}\\ & C\\ & & I_{k} \end{array}\right]\in X^{n,k}$. We define a map $\phi_{C}:F_{n-2k,k}\ra F_{n,k}$ by defining on generators: \begin{align*} \phi_{C}(E_{1}(X,Y,Z)) & =E_{1}(X,\diag(X,Y,Z),Z)\\ \phi_{C}(E_{2}(a,b)) & =E_{3}(\left[\begin{array}{c} a\\ b\\ 0 \end{array}\right])\\ \phi_{C}(E_{3}(b)) & =E_{2}(C_{1}b,\left[\begin{array}{c} C_{4}b\\ C_{7}b+bC_{4}b\\ 0 \end{array}\right])E_{1}(I,\left[\begin{array}{ccc} I\\ b & I\\ & & I \end{array}\right],I) \end{align*} and similarly for $E_{4},E_{5}$, as we did in the definition of $\nu$ above. We extend to $F_{n-2k,k}$ as before by $\phi_{C}(w_{1}w_{2})=\phi_{w_{2}C}(w_{1})\phi_{C}(w_{2})$. A simple calculation shows that $\rho_{w.C}=\phi_{C}(w)\rho_{C}$, $f_{\r_{C}}(\phi_{C}(w))=f_{C}(w)$ and $f_{\r_{C}}^u(\phi_{C}(w))=f_{C}^u(w)$ for any word $w\in F_{n-2k,k}$. We know that $\rho_{C}$ is $*$-admissible and that $C_{2}=C_{6}$. If $w.C=C$ for some $w\in F_{n-2k,k}$ then $\r_{C}=\phi_{C}(w)\r_{C}$ and $f_{C}^u(w)=f_{\r_{C}}^u(\phi_{C}(w))=1$. Therefore $C$ is $*$-admissible. By the induction hypothesis there exists $w'\in F_{n-2k,k}$ such that $w'.C=\s(w')$ and $f_{C}(w')=1$. Then $$\s(\r_{C})=\r_{\s(C)}=\r_{w.C}=\phi_{C}(w')\s(\r_{C})$$ and $f_{\r_{C}}(\phi_{C}(w'))=1$, showing that $\r_C$ is $*$-invariant. \qed \appendix \section{A gap in the proof of Lemma 3.9 in \texorpdfstring{\cite{Nien}}{[15]}}\label{app:nien} In this appendix we use the notation in \cite{Nien}. We will only state the gap and refer the reader to the relevant definitions in Lemma 3.9 there. In the proof of Lemma 3.9, it is claimed that \begin{align} s_{2} \in S_{n}\label{eq:s2} \end{align} if and only if \begin{align}\label{eq:weak} \quad Q\left(\begin{array}{l} Y_{1}\\ Y_{2} \end{array}\right)=\left(\begin{array}{c} R_{4}g_{3}C_{1}\\ p \end{array}\right),\quad\left(Y_{3},Y_{4}\right)R^{-1}=\left(p,Q_{4}g_{1}D_{1}\right). \end{align} However, this is not necessarily true. A computation shows that Condition (\ref{eq:s2}) also implies \begin{equation} B_{4}r_{2}C_{2}=0.\label{eq:add-cond} \end{equation} As a concrete example of this issue, choosing: \[ n=6,k=4,n'=1,R=\left(\begin{array}{cc|cc} & & & 1\\ & 1\\ \hline 1 & \\ & & 1 \end{array}\right),Q=\left(\begin{array}{cc|cc} & & & 1\\ & 1\\ \hline & & 1\\ -1 & \end{array}\right) \] and writing $g_{3}=\left(\begin{array}{cc} r_{2}^{1} & r_{2}^{2}\\ 0 & 0 \end{array}\right)$, we get that (\ref{eq:add-cond}) implies $r_{2}^{1}=0$. In contrast, (\ref{eq:weak}) has a solution for any $r_2^1\in \Mat_{n''}$. To demonstrate how it affects the rest of the proof, later it is shown that for any $g_{3}$ satisfying (\ref{eq:weak}) holds $$\left(\begin{array}{cc} r_{2}^{1} & r_{2}^{2}\\ 0 & 0 \end{array}\right)\left(\begin{array}{cc} V_{1}T_{2}-1\, & 0\\ V_{2}T_{2} & 0 \end{array}\right)=0$$ and then it is deduced there that $V_{1}T_{2}=1$. This deduction is not possible using (\ref{eq:s2}) since (\ref{eq:s2}) implies (\ref{eq:add-cond}) and in particular $r_{2}^{1}=0$. \begin{thebibliography}{Nien} \bibitem{aiz}Aizenbud, A. (2020). The Gelfand-Kazhdan criterion as a necessary and sufficient criterion. arXiv preprint arXiv:2009.04230. \bibitem{k1chp}Aizenbud, A., Avni, N. and Gourevitch, D. (2012). Spherical pairs over close local fields. Commentarii Mathematici Helvetici, 87(4), 929-962. \bibitem{dimaiz}Aizenbud, A. and Gourevitch, D. (2012). Multiplicity free Jacquet modules. Canadian Mathematical Bulletin, 55(4), 673-688. \bibitem{k1arch}Aizenbud, A. and Gourevitch, D. (2009). Multiplicity one theorem for $({\rm GL} _ {n+ 1}({\mathbb {R}}),{\rm GL} _ {n}({\mathbb {R}})) $. Selecta Mathematica, 15(2), 271-294. \bibitem{k1ch0} Aizenbud, A., Gourevitch, D., Rallis, S. and Schiffmann, G. (2010). Multiplicity one theorems. Annals of Mathematics, 1407-1434. \bibitem{shalikaR}Aizenbud, A., Gourevitch, D. and Jacquet, H. (2009). Uniqueness of Shalika functionals: the Archimedean case. Pacific journal of mathematics, 243(2), 201-212. \bibitem{BZ-prod} Bernstein, J. and Zelevinsky, A. V. (1977). Induced representations of reductive ${\mathfrak {p}} $-adic groups. I. In Annales scientifiques de l'École normale supérieure (Vol. 10, No. 4, pp. 441-472). \bibitem{BZ} Bernstein, J. and Zelevinski, A. V. (1976). Representations of the group GL(n,F) where F is a non-Archimedean local field. Uspekhi Matematicheskikh Nauk, 31(3), 5-70. \bibitem{ChenSun} Chen, F. and Sun, B. (2020). Uniqueness of twisted linear periods and twisted Shalika periods. Science China Mathematics, 63(1), 1-22. \bibitem{erez} Lapid, E. and Minguez, A. (2018). Geometric conditions for $\square$-irreducibility of certain representations of the general linear group over a non-archimedean local field. Advances in Mathematics, 339, 113-190. \bibitem{GK}Gelfand, I. M. and Kajdan, D. A. (1975). Representations of the group GL(n,K) where K is a local field. In Lie groups and their representations (pp. 95-118). \bibitem{NT}Gross, B. H. (1991). Some applications of Gelfand pairs to number theory. Bulletin (New Series) of the American Mathematical Society, 24(2), 277-301. \bibitem{JacRal}Jacquet, H. and Rallis, S. (1996). Uniqueness of linear periods. Compositio Mathematica, 102(1), 65-123. \bibitem{JiQi}Jiang, D. and Qin, Y. (2007). Residues of Eisenstein series and generalized Shalika models for SO4n. J. Ramanujan Math. Soc, 22(2), 1-33. \bibitem{Nien}Nien, C. (2009). Uniqueness of Shalika Models. Canadian Journal of Mathematics, 61(6), 1325-1340. \bibitem{sym}Okounkov, A. and Vershik, A. (1996). A new approach to representation theory of symmetric groups. Selecta Mathematica New Series, 2(4), 581-606. \bibitem{sim}Taussky, O. and Zassenhaus, H. (1959). On the similarity transformation between a matrix and its transpose. Pacific Journal of Mathematics, 9(3), 893-896. \end{thebibliography} {{ \bigskip \footnotesize Itay Naor, \textsc{Faculty of Mathematics and Computer Science, The Weizmann Institute of Science, POB 26, Rehovot 76100, ISRAEL}\par\nopagebreak \textit{E-mail address}: \texttt{[email protected]} }} \end{document}
2205.14689v1
http://arxiv.org/abs/2205.14689v1
Integral solutions of certain Diophantine equation in quadratic fields
\documentclass[12pt, 14paper,reqno]{amsart} \vsize=21.1truecm \hsize=15.2truecm \vskip.1in \usepackage{amsmath,amsfonts,amssymb} \newenvironment{dedication} {\vspace{0ex}\begin{quotation}\begin{center}\begin{em}} {\par\end{em}\end{center}\end{quotation}} \theoremstyle{plain} \newtheorem{theorem}{Theorem} \newtheorem{lemma}{Lemma} \newtheorem{corollary}{Corollary}[section] \newtheorem{proposition}{Proposition}[section] \theoremstyle{definition} \newtheorem{eg}{Example} \theoremstyle{remark} \newtheorem{remark}{Remark} \renewcommand{\Re}{{\mathrm Re \,}} \renewcommand{\Im}{{\mathrm Im \,}} \numberwithin{equation}{section} \numberwithin{lemma}{section} \numberwithin{theorem}{section} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{amssymb, amsmath, amsthm} \usepackage[breaklinks]{hyperref} \newtheorem{exa}{Example} \newtheorem*{rem}{Remark} \usepackage{graphicx} \usepackage{amsthm} \newtheorem{definition}{Definition} \begin{document} \title[A family of elliptic curves]{Integral solutions of certain Diophantine equation in quadratic fields } \author{Richa Sharma} \address{Richa Sharma @Kerala School of Mathematics, Kozhikode-673571, Kerala, India} \email{[email protected]} \keywords{ Elliptic curves, Diophantine equation} \subjclass[2010] {11D25, 11D41, 11G05} \maketitle \begin{abstract} \noindent Let $K= \mathbf{Q}(\sqrt{d})$ be a quadratic field and $\mathcal{O}_{K}$ be its ring of integers. We study the solvability of the Diophantine equation $r + s + t = rst = 2$ in $\mathcal{O}_{K}$. We prove that except for $d= -7, -1, 17$ and $101$ this system is not solvable in the ring of integers of other quadratic fields. \end{abstract} \section{\textbf{Introduction}} In 1960, Cassels \cite{Cassels} proved that the system of equations \begin{equation} \label{a} r + s + t = r s t = 1, \end{equation} is not solvable in rationals $r,s$ and $t$. Later in 1982, Small \cite{Charles} studied the solutions of \eqref{a} in the rings $\mathbb{Z}/m\mathbb{Z}$ and in the finite fields $F_{q}$ where $q = p^{n}$ with $p$ a prime and $n \ge 1$. Further in 1987, Mollin et al. \cite{Mollin} considered \eqref{a} in the ring of integers of $K=\mathbf{Q}(\sqrt{d})$ and proved that solutions exist if and only if $d=-1,2$ or $5$, where $x,y$ and $z$ are units in $\mathcal{O}_K$. Bremner \cite{Cubic, Quartic} in a series of two papers determined all cubic and quartic fields whose ring of integers contain a solution to \eqref{a}. Later in 1999, Chakraborty et al. \cite{Kalyan} also studied \eqref{a} in the ring of integers of quadratic fields reproducing the findings of Mollin et al. \cite{Mollin} for the original system by adopting a different technique. Extending the study further, we consider the equation \begin{equation} \label{1} r + s + t = rst = 2. \end{equation} The sum and product of numbers equals $1$ has natural interest where as sum and product equals other naturals is a curious question. The method adopted here may not be suitable to consider a general $n$ instead of $2$ as for each particular $n$ the system give rise to a particular elliptic curve which may have different `torsion' and `rank' respectively. The next case, i.e. when the sum and product equals to $3$ is discussed in the last section. To begin with we perform suitable change of variables and transform \eqref{1} to an elliptic curve with the Weierstrass form \begin{equation} \label{2} E_{297}: Y^2=X^3+135 X+297 \end{equation} and then study $E_{297}$ in the ring of integers of $K = \mathbb{Q}(\sqrt{d})$.\begin{remark} We transform \eqref{1} into an elliptic curve \eqref{2} to show that one of the $(r,s,t)$ has to belong to $\mathbb{Q}$ (shown in \S3). \end{remark} System \eqref{1} give rise to the quadratic equation $$ x^{2}-(2-r)x+\frac{2}{r}=0,~r \neq 0, $$ with discriminant \begin{equation} \label{r} \Delta = \frac{r(r^3-4r^2+4r-8)}{r}. \end{equation} At hindsight there are infinitely many choices for the quadratic fields contributed by each $r$ of the above form where the system could have solutions. The main result of this article is that the only possibilities are $r = \pm 1, 2$ and $-8$. Thus \eqref{1} is solvable only in $K=\mathbf{Q}(\sqrt{d})$ with $d = -7, -1, 17$ and $101$. Also the solutions are explicitly given. Throughout this article we denote ‘the point at infinity' of an elliptic curve by ${\mathcal{O}}$. Now we state the main result of the paper. \begin{theorem} \label{thm1} Let $ K = \mathbb{Q}(\sqrt{d})$ be a quadratic field and $\mathcal{O}_{K}$ denote its ring of integers. Then the system $$ r + s + t = rst = 2 $$ has no solution in $\mathcal{O}_K$ except for $d = -7, -1, 17$ and $ 101$. \end{theorem} In \S 4 we discuss the rank of $E_{297}$ in $\mathbb{Q}$ and in quadratic fields of our interest. \section{Preliminaries} In this section we mention some results which are needed for the proof of the Theorem \ref{thm1}. First we state a basic result from algebraic number theory.\\ \begin{theorem}\label{rs1} Let $K=\mathbb{Q}(\sqrt{d})$ with $d$ a square-free integer, then $$ \mathcal{O}_K=\begin{cases} \mathbb{Z}[\frac{1+\sqrt{d}}{2}] {\ \text{ if }\ d\equiv 1\pmod 4,}\\ \mathbb{Z}[\sqrt{d}]~~ {\ \text{ if }\ d\equiv 2, 3\pmod 4.} \end{cases} $$ \end{theorem} We study the solutions of a family of elliptic curves defined over $\mathbb{Q}$ in the ring of integers of a quadratic field. Let $K= \mathbb{Q}(\sqrt{d})$ and $s'$ be the conjugate of an element $s\in K$ over $\mathbb{Q}$. Further $R = \mathcal{O}_{K}[S^{-1}]$, where $S$ is some finite set of primes in $\mathcal{O}_{K}$. Thus $ \mathcal{O}_{K} \subset \mathcal{O}_{K}[S^{-1}] \subset K$. \\ Laska \cite{Laska} considered the equation (for $r \in \mathbb{Z}$ and $r \neq 0$) $$ \Gamma_{r}:~ y^2 = x^3 -r, $$ which defines the Weierstrass form of an elliptic curve (call it $E_{r}$) over $\mathbb{Q}$. For an elliptic curve $E$ over $\mathbb{Q}$ the ``trace map" $$ \sigma:E(K) \longrightarrow E(\mathbb{Q}) $$ is given by $$ \sigma(\mathcal{P}) = \mathcal{P} \oplus \mathcal{P}'. $$ Here $\mathcal{P}'$ is the conjugate of the element $\mathcal{P} \in E(K)$ arising from the conjugation in $K$ and $\oplus$ is the usual elliptic curve addition. Laska considered the ``trace map'' for $\Gamma_r$ and calls it $$ \sigma_{r,R}: \Gamma_{r}(R) \rightarrow \Gamma_{r}(\mathbb{Q}) \cup \{\mathcal{O}\}. $$ If $r,R$ are fixed we simply write $\sigma$ instead of $\sigma_{r,R}$. The aim was to study $\sigma^{-1}(P)$ for a given $P \in \Gamma_{r}(\mathbb{Q}) \cup \{\mathcal{O}\}$. He divided this inverse image set into two parts and called them `exceptional' and `non-exceptional' respectively. These two sets consist of:\\ $\bullet$~ an element $\mathcal{P} = (s,t) \in \Gamma_{r}(R)$ with $s \neq s'$ is called an exceptional solution of $\Gamma_{r}$ in $R$,\\ $\bullet$~non-exceptional solutions of $\Gamma_{r}$ in $R$ are contained in $\sigma^{-1}(P)$ for a given $P \in \Gamma_{r}(\mathbb{Q}) \cup \{\mathcal{O}\}$. If $\mathcal{P}=(s,t) \in \Gamma_{r}(R)$ is a non-exceptional solution then $s=s'$ and from the Weierstrass equation that implies $t= \pm t'$. Thus if $P = \mathcal{O}$, then all candidates in $\sigma^{-1}(P)$ are non-exceptional. More precisely, one can show that, \begin{eqnarray} && \sigma^{-1}(\mathcal{O})= \{(z^{-1}p, z^{-2}q\theta) : (p, q) \in \Gamma_{z^{3}r}(R \cap \mathbb{Q}), \nonumber\\ &&\hspace*{20mm} p \in z(R \cap \mathbb{Q}), q \in z^2(R \cap \mathbb{Q})\} \end{eqnarray} where for simplicity it is assumed that $\theta^{-1} \in R$. If $P \neq \mathcal{O}$, then the non-exceptional solutions $\mathcal{P}$ contained in $\sigma^{-1}(P)$ are exactly given by the conditions $$ \mathcal{P} \in \Gamma_{r}(R \cap \mathbb{Q}), \mathcal{P} \oplus \mathcal{P} = P. $$ Thus the non-exceptional solutions of $\Gamma_{r}$ in $R$ which are contained in $\sigma^{-1}(P)$ are obtained by solving the equation $\Gamma_{z^{3}r}$ respectively $\Gamma_{r}$ in $R \cap \mathbb{Q}$. Hence either $\mathcal{P} \in \Gamma_{r}(\mathbb{Q})$ or $\mathcal{P} \oplus \Bar{\mathcal{P}} = \mathcal{O}$. Here we substitute $\Gamma_{r}$ by $E_{297}$ and $R$ by $\mathcal{O}_{K}$ to study the solutions of \eqref{1} in $\mathcal{O}_{K}$ by pulling back the elements of $E_{297}(\mathbb{Q})$ using the above mentioned technique.\section{Proof of Theorem \ref{thm1}} \begin{proof} Let us first transform the system \eqref{1} to the desired Weierstrass form $E_{297}$. The system is \begin{equation*} \label{b} r + s + t = rst = 2. \end{equation*} Putting the value of $t$ upon simplification it becomes \begin{equation*} s + r + \frac{2}{rs} = 2. \end{equation*} Now substituting $r = - 2/x$ and $ s = -y/x $ in the last equation give \begin{equation} \label{c} y^{2} + 2y + 2xy = x^{3}. \end{equation} Putting $x = x_{1}$ - $ 1/2$ and $ y = \frac{y_{1}}{2} - x_{1} - \frac{1}{2}$ in \eqref{c} rids it from the $xy$ term \begin{equation} \label{d} y_{1}^{2} = 4x_{1}^{3} - 2 x_{1}^{2} + 7 x_{1} + \frac{1}{2}. \end{equation} Further substituting $ x_{1}= x_{2} + \frac{1}{6} $ and $y_{1}=y_{2}$ rids \eqref{d} the $x_{1}^{2}$ term \begin{equation} \label{e} y_{2}^{2}= 4x_{2}^{3} + \frac{20}{3} x_{2} + \frac{44}{27}. \end{equation} Now again putting $y_{2}= Y_{1}/27$ and $x_{2} = X_{1}/9$ give \begin{equation} \label{f} Y_{1}^{2} = 4X_{1}^{3} + 540 X_{1} + 1188. \end{equation} Finally substituting $Y_{1}=2Y$ and $X_{1}=X$ get us the required Weierstrass form \begin{equation} \label{g} E_{297}: Y^2=X^3+135 X+297. \end{equation} Here with the help of SAGE \cite{Ss45} we can conclude that $$ E_{297}(\mathbb{Q})_{tors} \cong \mathbb{Z}_{3} = \left\lbrace {\mathcal{O},(3,\pm{27}) }\right\rbrace $$ and the $\mathbb{Q}$-rank of $E_{297}$ is zero (we give a mathematical proof of this fact in \S4). Thus $\mathcal{O}$ and $(3,\pm{27})$ are the only $\mathbb{Q}$ rational points of \eqref{g}. It is not difficult to see that the inverse transformation $$ r = 18/(3-X) ~~\mbox{and}~~ s = (Y - 3X - 18)/(3(3 - X)) $$ allows us to pass from \eqref{g} to \eqref{1}. Before proceeding to final leg of the proof of Theorem \ref{thm1}, we make a couple of claims and give their proofs. \noindent {\textbf{Claim 1}}: One of the $(r,s,t)$ satisfying \eqref{1} must belong to $\mathbb{Q}$. \begin{proof}(Proof of claim 1) Two cases needed to be considered. Case I: $\mathcal{P}$ is non-exceptional. In this case either $\mathcal{P} \in E_{297}(\mathbb{Q})$ or $\mathcal{P} \oplus \Bar{\mathcal{P}} = \mathcal{O}$. If $\mathcal{P} \in E_{297}(\mathbb{Q})$ then this implies $r \in \mathbb{Q} $. Let $\mathcal{P} \oplus \Bar{\mathcal{P}} = \mathcal{O}$ and $\mathcal{P} =(a + b \sqrt{d}, k+l \sqrt{d})$. As $\mathcal{P}$ is non-exceptional, $b = 0$ and $k=0$. Thus $\mathcal{P}= (a, l \sqrt{d})$ and in this case too $$ r = 18/(3-a) \in \mathbb{Q}. $$ Case II: Now if $\mathcal{P}$ is exceptional then $\mathcal{P} + \Bar{\mathcal{P}} = (3, \pm 27)$. The curve \eqref{g} has exactly three elements over $\mathbb{Q}$ and the non-trivial elements are of order $3$. Lets call them $\mathcal{O}, \Omega$ and $2 \Omega$. If $\mathcal{P} + \Bar{\mathcal{P}} = \Omega$, then clearly $\mathcal{P} + \Omega$ is non-exceptional, since $$ (\overline{\mathcal{P} + \Omega} )+ \mathcal{P} + \Omega = \overline{\mathcal{P}} + \mathcal{P} + 2\Omega = 3 \Omega = \mathcal{O}. $$ Now if $\mathcal{P} + \overline{\mathcal{P}} = 2\Omega$, as before we can show that $\mathcal{P} + 2 \Omega$ is also non-exceptional. As the claim is valid for non-exceptional elements, it is true for $\mathcal{P} + \Omega$ and $\mathcal{P} + 2 \Omega$. Hence it is true for $\mathcal{P}$ itself. \end{proof} \noindent Hence without loss of generality we assume that $r \in \mathbb{Q}$. \noindent {\textbf{Claim 2}} The only possibilities for $r$ satisfying the system $$ r + s + t = rst = 2 $$ in $\mathcal{O}_K$ are $\pm 1, 2$ and $-8$. \begin{proof}(Proof of claim 2) As $r \in \mathbb{Q}$ and we are looking for solutions in $\mathcal{O}_{K}$, this would imply that $r \in \mathbb{Z}$. Three possibilities needed to be considered: \begin{itemize} \item If $r= \pm 1$. In this case solutions exist. \item If $r$ is odd. In this case the denominator of \eqref{r} will be multiple of an odd number but in $\mathcal{O}_{K}$ denominator is only $2$ or $1$ by Theorem \ref{rs1}. So in this case there do not exist any solution in $\mathcal{O}_{K}$. \item If $r$ is even. In this case save for $r=2$ and $-8$, the denominator of \eqref{r} will be multiple of $2$ and in some other cases denominator is $1$ but $d \equiv 1 \mod 4$. Thus again by Theorem \ref{rs1}, in this case too we don't (except for $r= 2, -8$) get any solution in $\mathcal{O}_{K}$. \end{itemize} Thus except these values of $r = \pm 1, 2$ and $-8$, this system of equation is not solvable in the ring of integers of other quadratic fields. \end{proof} \noindent We are now in a position to complete the proof of Theorem \ref{thm1}. We deal with all the four possibilities of $r$ separately. \noindent When $r = 1$, from \eqref{b} we have $s+t = 1$ and $st = 2$. Thus we get $$ (s, t) = (\frac{1-\sqrt{-7}}{2},\frac{1+\sqrt{-7}}{2}) ~\mbox{and}~ (\frac{1+\sqrt{-7}}{2},\frac{1-\sqrt{-7}}{2}). $$ When $r = -1$, we have $s + t = 3$ and $st = -2$. In this case $$ (s, t) = (\frac{3-\sqrt{17}}{2},\frac{3+\sqrt{17}}{2}) ~\mbox{and}~ (\frac{3+\sqrt{17}}{2},\frac{3-\sqrt{17}}{2}). $$ Similarly when $r=2$ and $-8$, we get $$ (s,t)= (i,-i) (-i,i), (\frac{10+\sqrt{101}}{2}, \frac{10-\sqrt{101}}{2}) $$ $$ \mbox{and}~ (\frac{10-\sqrt{101}}{2}, \frac{10+\sqrt{101}}{2}). $$ To conclude $$ (1,\frac{1-\sqrt{-7}}{2},\frac{1+\sqrt{-7}}{2}), (1,\frac{1+\sqrt{-7}}{2},\frac{1-\sqrt{-7}}{2}), $$ $$ (-1,\frac{3-\sqrt{17}}{2},\frac{3+\sqrt{17}}{2}), (-1, \frac{3+\sqrt{17}}{2},\frac{3-\sqrt{17}}{2}) $$ $$ (-8,\frac{10+\sqrt{101}}{2}, \frac{10-\sqrt{101}}{2}) (-8,\frac{10-\sqrt{101}}{2}, \frac{10+\sqrt{101}}{2}) $$ $$ (2,i,-i)~\mbox{and}~ (2,-i,i). $$ are the only solutions of \eqref{b} in $\mathcal{O}_K$. \end{proof} \section{Rank of $E_{297}$} In this section we discuss the rank of $E_{297}$ over $\mathbb{Q}$ and over $\mathbb{Q}(\sqrt{d})$ for $d=-7, -1, 17$ and $101$. \begin{lemma}The rank of $E_{297}(\mathbb{Q})$ is zero. \end{lemma} \begin{proof} \noindent If possible let rank $E_{297}(\mathbb{Q}) \neq 0$. This would imply that \eqref{g} have rational solutions. Let $(X,Y) = (\frac{x_{1}}{x_{2}}, \frac{y_{1}}{y_{2}})$ with $(x_{1},x_{2})=(y_{1},y_{2})=1$, be one such solution. Now putting the values of $X$ and $Y$ in \eqref{g}, we obtain \begin{equation} \label{h} y_{1}^{2}x_{2}^{3} = x_{1}^{3} y_{2}^{2} + 135 x_{1} y_{2}^{2} x_{2}^{2} + 297 y_{2}^{2}x_{2}^{3}. \end{equation} Let $p$ be a prime such that $y_{2} = p^{\alpha} y_{22}$ where $\alpha \in \mathbb{Z}, \alpha \geq 1$ and $(p,y_{22}) =1 $. Putting the value of $y_{2}$ in \eqref{h}, gives that right hand side of \eqref{h} is divisible by $p$. Therefore $ p \mid y_{1}x_{2}. $ Which implies either $p \mid y_{1}$ or $p \mid x_{2}$. Suppose $p \mid y_{1}$ then since $y_{2} = p^{\alpha} y_{22}$, we get a contradiction to the fact that $ (y_{1}, y_{2})$ = $1$. Thus $p \mid x_{2}$ and we write $x_{2}= p^{\beta} x_{22}$ where $ \beta \in \mathbb{Z}, \beta \geq 1$ and $(p,x_{22}) =1 $. Now putting this $y_{2}$ and $x_{2}$ in \eqref{h}, gives \begin{equation} \label{I} p^{3\beta} x_{22}^{3} y_{1}^{2} = p^{2\alpha} y_{22}^{2} x_{1}^{3} + 135 x_{1} p^{2\alpha + 2\beta} y_{22}^{2} x_{22}^{2} + 297 p^{2\alpha + 3\beta} y_{22}^{2}x_{22}^{3}. \end{equation} Three cases can occur.\\ Case I. Suppose $3\beta > 2\alpha$. In this case, \begin{equation} \label{J} p^{3\beta - 2\alpha} x_{22}^{3} y_{1}^{2} = y_{22}^{2} x_{1}^{3} + 135 x_{1} p^{2\beta} y_{22}^{2} x_{22}^{2} + 297 p^{3\beta} y_{22}^{2}x_{22}^{3}. \end{equation} Further from \eqref{J} $$ p \mid y_{22} x_{1} $$ and forces $p \mid x_{1}$ as $(p, y_{22})= 1$, which is a contradiction to the fact that $(x_{1}, x_{2}) = 1$. \\ Case II. Suppose $3\beta > 2\alpha$. In that case $p \mid y_{1}$, which is also contradiction to the fact that $ (y_{1}, y_{2}) = 1$. \\ Case III. This case is analogous to Case I. Hence \eqref{J} has no solution in $\mathbb{Z}$ and that in turn implies \eqref{g} has no solution in $\mathbb{Q}$. Thus the rank of $E_{297}(\mathbb{Q})$ defined by the equation \eqref{g} is zero. \end{proof} \begin{lemma} The rank of $E_{297}(\mathbb{Q}(\sqrt{d}))$ for $d=-7, 17$is $1$ and for $d=101$ it is $2$. \end{lemma} \begin{proof} Let $E/K$ be an elliptic curve and $d \in K^{*}$ be such that $L = K(\sqrt{d})$ is a quadratic extension. Let $E_{d}/K$ be the twist of $E/K$, then by \cite{S92}, \begin{equation}\label{rs} \text{rank}~E(L) = \text{rank}~E(K) + \text{rank}~ E_{d}(K). \end{equation} We conclude using SAGE \cite{Ss45} that (already we have noted that rank $E_{297}(\mathbb{Q}) = 0$) rank $E_{-7\cdot 297}(\mathbb{Q}) = 1$, rank $E_{-1\cdot 297}(\mathbb{Q}) = 1$, rank $E_{17\cdot 297}(\mathbb{Q}) = 1$ and rank $E_{101\cdot 297}(\mathbb{Q}) = 2$. Now using \eqref{rs} we have, $$ \text{rank}~(E(\mathbb{Q}(\sqrt{-7})) = \text{rank}~(E(\mathbb{Q}(\sqrt{-1})) = \text{rank}~(E(\mathbb{Q}(\sqrt{17})) = 1. $$ $$ \mbox{and}~ \text{rank}~(E(\mathbb{Q}(\sqrt{101})) = 2 $$ \end{proof} \section{Concluding remarks} We showed that the system $$ r + s + t = rst = 2 $$ has no solution in $\mathcal{O}_K$ except for $d = -7,-1,17$ and $ 101$ and the solutions are explicitly given. It would of interest to consider the next case, i.e, \begin{equation} \label{q} r + s + t = rst = 3. \end{equation} Suitable change of variables transform \eqref{q} to an elliptic curve with Weierstrass form $$ E_{13122}: y^2 = x^3 + 3645 x - 13122. $$ Torsion of $E_{13122}$ over $\mathbb{Q}$ is isomorphic to $\mathbb{Z}_{3}$ and it's rank is zero (using SAGE \cite{Ss45}). We can conclude that except for $d = -2, -1,7,10$ and $13$, the system \eqref{q} has no solutions in the ring of integers of $K$ and the solutions can be explicitly given (following analogous argument). Analogously other individual cases can be treated. \section{Acknowledgement} A part of this manuscript was completed while the author was visiting Prof. Sanoli Gun at the Institute of Mathematical Sciences (IMSc), Chennai. She is grateful to Prof. Sanoli for her support and encouragement. It is a pleasure to Prof. Michel Waldschmidt for his valuable comments and suggestions during the visit to IMSc. Last but not the least, the serene ambience of Kerala School of Mathematics (KSoM) is a constant energy booster and is of great help to research. \begin{thebibliography}{25} \bibitem{Cubic} A. Bremner, The equation $xyz=x+y+z=1$ in integers of a cubic field. {\it{Manuscripta Math.}}, {\bf 65(4)} (1989), 479--487. \bibitem{Quartic} A. Bremner, The equation $xyz=x+y+z=1$ in integers of a quartic field. {\it Acta Arith.}, {\bf 57(4)} (1991), 375--385. \bibitem{Charles} C. Small, On the equation $xyz = x + y + z = 1$, {\it Amer. Math. Monthly}, {\bf 89} (1982), 736–-749. \bibitem{Cassels} J. W. S. Cassels, On a diophantine equation, {\it Acta Arith.}, {\bf 6} (1960), 47--52. \bibitem{S92}J. H. Silverman, The Arithmetic of Elliptic Curves, {\it Grad. Texts in Math.}, {\bf 106}, Springer, 1986. \bibitem{Kalyan} K. Chakraborty, M Kulkarni, Solutions of cubic equations in quardratic fields, {\it Acta Arithmetica}, 1999. DOI:10.4064/AA-89-1-37-43 Corpus ID: 59378550. \bibitem{Laska} M. Laska, Solving the equation $x^3-y^2 = r$ in number fields, {\it J. Reine Angew. Math.}, {\bf 333} (1981), 73--85. \bibitem{Mollin} R. A. Mollin, C. Small, K. Varadarajan and P. G. Walsh, On unit solutions of the equation $xyz = x + y + z$ in the ring of integers of a quadratic field, {\it Acta Arith.}, {\bf 48} (1987), 341--345. \bibitem {Ss45} SAGE software, version 4.5.3, \url {http://www.sagemath.org}. \end{thebibliography} \end{document}
2205.14680v5
http://arxiv.org/abs/2205.14680v5
The strong chromatic index of 1-planar graphs
\documentclass[ final ]{dmtcs-episciences} \usepackage[utf8]{inputenc} \usepackage{subfigure} \usepackage{enumerate} \newtheorem{question}{Question} \newtheorem{theorem}{Theorem} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{definition}[theorem]{Definition} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{conjecture}{Conjecture} \usepackage[square, comma, sort&compress, numbers]{natbib} \author[Yiqiao Wang et. al]{Yiqiao Wang\affiliationmark{1}\thanks{Research supported partially by NSFC (Nos.\,12071048; 12161141006)} \and Ning Song\affiliationmark{2} \and Jianfeng Wang\affiliationmark{2}\thanks{Research supported partially by NSFC (Nos.\,11971274)} \and Weifan Wang\affiliationmark{2}\thanks{Research supported partially by NSFC (Nos.\,12031018; 12226303); Corresponding author. Email: [email protected]}} \title[The strong chromatic index of $1$-planar graphs]{The strong chromatic index of $1$-planar graphs} \affiliation{ Faculty of Science, Beijing University of Technology, Beijing, China\\ School of Mathematics and Statistics, Shandong University of Technology, Zibo, China} \keywords{Strong edge coloring, strong chromatic index, maximum average degree, 1-planar graph, matching.} \begin{document} \publicationdata{vol.25.1 }{2023}{11}{10.46298/dmtcs.9631}{2022-05-31; 2022-05-31; 2022-11-24}{2023-03-08} \maketitle \begin{abstract} The chromatic index $\chi'(G)$ of a graph $G$ is the smallest $k$ for which $G$ admits an edge $k$-coloring such that any two adjacent edges have distinct colors. The strong chromatic index $\chi'_s(G)$ of $G$ is the smallest $k$ such that $G$ has an edge $k$-coloring with the condition that any two edges at distance at most 2 receive distinct colors. A graph is 1-planar if it can be drawn in the plane so that each edge is crossed by at most one other edge. In this paper, we show that every graph $G$ with maximum average degree $\bar{d}(G)$ has $\chi'_{s}(G)\le (2\bar{d}(G)-1)\chi'(G)$. As a corollary, we prove that every 1-planar graph $G$ with maximum degree $\Delta$ has $\chi'_{\rm s}(G)\le 14\Delta$, which improves a result, due to Bensmail et al., which says that $\chi'_{\rm s}(G)\le 24\Delta$ if $\Delta\ge 56$. \end{abstract} \section{Introduction} \label{sec:in} Only simple graphs are considered in this paper unless otherwise stated. Let $G$ be a graph with vertex set $V(G)$, edge set $E(G)$, minimum degree $\delta(G)$, and maximum degree $\Delta(G)$ (for short, $\Delta$), respectively. A vertex $v$ is called a $k$-{\em vertex} if the degree $d_G(v)$ of $v$ is $k$. The {\em girth} $g(G)$ of a graph $G$ is the length of a shortest cycle in $G$. The {\em maximum average degree $\bar{d}(G)$} of a graph $G$ is defined as follows: $$\bar{d}(G)=\max\{\frac {2|E(H)|} {|V(H)|}\ |\ H \subseteq G\}.$$ A {\em proper edge $k$-coloring} of a graph $G$ is a mapping $\phi: E(G) \to \{1,2,\ldots ,k\}$ such that $\phi(e)\ne \phi(e')$ for any two adjacent edges $e$ and $e'$. The {\em chromatic index} $\chi'(G)$ of $G$ is the smallest $k$ such that $G$ has a proper edge $k$-coloring. The coloring $\phi$ is called {\em strong} if any two edges at distance at most two get distinct colors. Equivalently, each color class is an induced matching. The {\em strong chromatic index}, denoted $\chi'_{\rm s}(G)$, of $G$ is the smallest integer $k$ such that $G$ has a strong edge $k$-coloring. Strong edge coloring of graphs was introduced by Fouquet and Jolivet \cite{fou}. It holds trivially that $\chi'_s(G)\ge \chi'(G)\ge \Delta$ for any graph $G$. In 1985, during a seminar in Prague, Erd\H{o}s and Ne${\rm \breve{s}}$et${\rm \breve{r}}$il put forward the following conjecture: \begin{conjecture} For a simple graph $G$, \[ \chi'_s(G) \le \left\{ \begin{array}{ll} 1.25 \Delta^2, & \mbox{{\rm if} $\Delta$ {\rm is even;}}\\ 1.25\Delta^2-0.5\Delta+0.25, & \mbox{{\rm if} $\Delta$ {\rm is odd.}} \end{array}\right. \] \end{conjecture} Erd\H{o}s and Ne\v{s}et\v{r}il provided a construction showing that Conjecture 1 is tight if it were true. Using probabilistic method, Molloy and Reed \cite{mol} showed that $\chi'_s(G)\le 1.998\Delta^2$ for any graph $G$ with sufficiently large $\Delta$. This result was gradually improved to that $\chi'_s(G)\le 1.93\Delta^2$ in \cite{bru}, to that $\chi'_s(G)\le 1.835\Delta^2$ in \cite{bon}, and to that $\chi'_s(G)\le 1.772\Delta^2$ in \cite{hur}. Andersen \cite{and} and independently Hor$\acute{\rm a}$k et al.\,\cite{hor} confirmed Conjecture 1 for graphs with $\Delta=3$. If $\Delta=4$, then Conjecture 1 asserts that $\chi'_s(G)\le 20$. However, the currently best known upper bound is 21 for this case, see \cite{huang}. A graph $G$ is $d$-{\em degenerate} if each subgraph of $G$ contains a vertex of degree at most $d$. Chang and Narayanan \cite{chan1} showed that $\chi'_{\rm s}(G)\le 10\Delta-10$ for a 2-degenerate graph $G$. For a general $k$-degenerate graph $G$, it was shown that $\chi'_{\rm s}(G)\le (4k-2)\Delta-k(2k-1)+1$ in \cite{yu}, $\chi'_{\rm s}(G)\le (4k-1)\Delta-k(2k+1)+1$ in \cite{dbs}, and $\chi'_{\rm s}(G)\le (4k-2)\Delta- 2k^2+1$ in \cite{wan}. Suppose that $G$ is a planar graph. Faudree et al.\,\cite{fau} first gave an elegant proof for the result that $\chi'_{\rm s}(G)\le 4\Delta+4$, and constructed a class of planar graphs $G$ with $\Delta\ge 2$ such that $\chi'_{\rm s}(G)=4\Delta-4$. For the class of special planar graphs, some better results have been obtained. It was shown in \cite{hud} that $\chi'_{\rm s}(G)\le 3\Delta$ if $g(G)\ge 7$, and in \cite{bh} that $\chi'_{\rm s}(G)\le 3\Delta+1$ if $g(G)\ge 6$. Kostochka et al.\,\cite{kos} showed that if $\Delta=3$ then $\chi'_{\rm s}(G)\le 9$. Hocquard et al.\,\cite{hoc} showed that every outerplanar graph $G$ with $\Delta\ge 3$ has $\chi'_{\rm s}(G)\le 3\Delta-3$. Wang et al.\,\cite{yiqiao} showed that every $K_4$-minor-free graph $G$ with $\Delta\ge 3$ has $\chi'_{\rm s}(G)\le 3\Delta-2$. Moreover, all upper bounds $9, 3\Delta-3,3\Delta-2$ given in the above results are best possible. A $1$-{\em planar graph} is a graph that can be drawn in the plane such that each edge crosses at most one other edge. A number of interesting results about structures and parameters of 1-planar graphs have been obtained in recent years. Fabrici and Madaras \cite{fa} proved that every 1-planar graph $G$ has $|E(G)|\le 4|V(G)|-8$, which implies that $\delta(G)\le 7$, and constructed 7-regular 1-planar graphs. Borodin \cite{borodin} showed that every 1-planar graph is vertex 6-colorable. Wang and Lih \cite{wang} proved that the vertex-face total graph of a plane graph, which is a class of special 1-planar graphs, is vertex 7-choosable. Zhang and Wu \cite{zhang} studied the edge coloring of 1-planar graphs and showed that every 1-planar graph $G$ with $\Delta\ge 10$ satisfies $\chi'(G)=\Delta$. Bensmail et al.\,\cite{ben} investigated the strong edge coloring of 1-planar graphs and proved that every 1-planar graph $G$ has $\chi'_{\rm s}(G)\le \max\{18\Delta+330, 24\Delta-6\}$. This implies that if $\Delta\ge 56$, then $\chi'_{\rm s}(G)\le 24\Delta-6$. In this paper we will improve this result by showing that every 1-planar graph $G$ has $\chi'_{\rm s}(G)\le 14\Delta$. To obtain this result, we establish a connection between the strong chromatic index and maximum average degree of a graph. More precisely, we will show that $\chi'_{\rm s}(G)\le (2\bar{d}(G)-1)(\Delta+1)$ for any simple graph $G$. \section{Preliminary} In this section, we summarize some known results, which will be used later. A {\em proper $k$-coloring} of a graph $G$ is a mapping $\phi: V(G)\to \{1,2,\ldots,k\}$ such that $\phi(u)\ne \phi(v)$ for any two adjacent vertices $u$ and $v$. The {\em chromatic number}, denoted $\chi(G)$, of $G$ is the least $k$ such that $G$ has a proper $k$-coloring. Using a greedy algorithm, the following conclusion holds automatically. \begin{lemma}\label{vertex-cloring-1} If $G$ is a $d$-degenerate graph, then $\chi(G)\le d+1$. \end{lemma} As stated before, Borodin \cite{borodin} showed the following sharp result: \begin{theorem}\label{vertex-coloring-2} {\rm (\cite{borodin})} Every $1$-planar graph $G$ has $\chi(G)\le 6$. \end{theorem} Given a graph $G$, it is trivial that $\chi'(G)\ge \Delta$. On the other hand, the celebrated Vizing Theorem \cite{vizing} asserts: \begin{theorem}\label{edge-coloring-1} {\rm (\cite{vizing})} Every simple graph $G$ has $ \chi'(G)\le \Delta+1$. \end{theorem} A simple graph $G$ is of {\em Class I} if $\chi'(G)=\Delta$, and of {\em Class II} if $\chi'(G)=\Delta+1$. As early as in 1916, K$\ddot{\rm o}$nig \cite{konig} showed that bipartite graphs are of Class I. \begin{theorem}\label{konig} {\rm (\cite{konig})} If $G$ is a bipartite graph, then $\chi'(G)=\Delta$. \end{theorem} Sanders and Zhao \cite{sander}, and Zhang \cite{zlm} independently, showed that planar graphs with maximum degree at least seven are of Class I. \begin{theorem}\label{edge-coloring-2}{\rm (\cite{sander,zlm})} Every planar graph $G$ with $\Delta\ge 7$ has $\chi'(G)=\Delta$. \end{theorem} Another result, due to Zhang and Wu \cite{zhang}, claims that 1-planar graphs with maximum degree at least ten are of Class I. \begin{theorem}\label{edge-coloring-3} {\rm (\cite{zhang})} Every $1$-planar graph $G$ with $\Delta\ge 10$ has $\chi'(G)=\Delta$. \end{theorem} Zhou \cite{zhou} observed an interesting relation between the degeneracy and chromatic index of a graph. \begin{theorem}\label{zhou} {\rm (\cite{zhou})} If $G$ is a $k$-degenerate graph with $\Delta\ge 2k$, then $\chi'(G)=\Delta$. \end{theorem} \section{Contracting matchings in a graph} Let $G$ be a simple graph. An edge $e$ of $G$ is said to be {\em contracted} if it is deleted and its end-vertices are identified. An edge subset $M$ of $G$ is called a {\em matching} if no two edges in $M$ are adjacent in $G$. Specifically, a matching $M$ is called {\em strong} if no two edges in $M$ are adjacent to a common edge. This is equivalent to saying that $G[V(M)]=M$. Thus, a strong matching is also called an {\em induced matching}. Determining the chromatic index $\chi'(G)$ of a graph $G$ is certainly equivalent to finding the least $k$ such that $E(G)$ can be partitioned into $k$ edge-disjoint matchings, and determining the strong chromatic index $\chi'_{\rm s}(G)$ of a graph $G$ is equivalent to finding the least $k$ such that $E(G)$ can be partitioned into $k$ edge-disjoint strong matchings. In what follows, an edge $k$-coloring of $G$ with the color classes $E_1,E_2,\ldots,E_k$ will be denoted by $(E_1,E_2,\ldots,E_k)$. Given a graph $G$ and a matching $M$ of $G$, let $G_M$ denote the graph obtained from $G$ by contracting each edge in $M$. Note that $G_M$ may contain multi-edges, but no loops, even if $G$ is simple. Let $a\ge 1$ and $b\ge 0$ be integers. A graph $G$ is said to be {\em $(a,b)$-graph} if every subgraph $G'$ of $G$ (including itself) has $|E(G')|\le a|V(G')|-b$. \begin{theorem} \label{them1} Let $G$ be a $(a,b)$-graph with $a\ge 1$ and $b\ge 0$. Let $M$ be a matching of $G$. Then $G_M$ is a $(2a-1,b)$-graph. \end{theorem} \begin{proof} \ Let $H$ be any subgraph of $G_M$. Assume that $V(H)=V_1\cup V_2$, where $V_1$ is the set of vertices in $G_M$ which are formed by contracting some edges in $M$, and $V_2=V(H)\setminus V_1$, say, $V_1=\{x_1,x_2,\ldots,x_{n_1}\}$, and $V_2=\{y_1,y_2,\ldots,y_{n_2}\}$. Then $|V(H)|=n_1+n_2$. Splitting each vertex $x_i\in V_1$ into two vertices $u_i$ and $v_i$ and restoring corresponding incident edges for $u_i$ and $v_i$ in $G$, we get a subgraph $G'$ of $G$ with $$V(G')=\{u_1,u_2,\ldots,u_{n_1}; v_1,v_2,\ldots,v_{n_1};y_1,y_2,\ldots,y_{n_2}\}$$ and $$E(G')=E(H)\cup M',$$ \noindent where $$M'=\{u_1v_1,u_2v_2,\ldots,u_{n_1}v_{n_1}\} \subseteq M.$$ It is easy to compute that $|V(G')|=2n_1+n_2$ and $|E(G')|=|E(H)|+n_1$. By the assumption, $|E(G')|\le a|V(G')|-b$. Since $a\ge 1$, we have $2a-1\ge a$. Consequently, \begin{eqnarray*} |E(H)|&=& |E(G')|-n_1\\ &\le& a|V(G')|-b-n_1\\ &=& a(2n_1+n_2)-b-n_1\\ &=& (2a-1)n_1+an_2-b\\ &\le& (2a-1)(n_1+n_2)-b\\ &=& (2a-1)|V(H)|-b. \end{eqnarray*} This shows that $G_M$ is a $(2a-1,b)$-graph. \end{proof} \begin{corollary}\label{coro-4a-3} Let $G$ be a $(a,b)$-graph with $a,b\ge 1$. Let $M$ be a matching of $G$. Then $G_M$ is $(4a-3)$-degenerate. \end{corollary} \begin{proof} \ It suffices to verify that $\delta(H)\le 4a-3$ for any $H\subseteq G_M$. Suppose to the contrary that $\delta(H)\ge 4a-2$. Since $b\ge 1$, Theorem \ref{them1} and the Handshaking Theorem imply that $(4a-2)|V(H)|\le \delta(H)|V(H)|\le \sum\limits_{v\in V(H)}d_H(v)=2|E(H)|\le 2((2a-1)|V(H)|-b) = (4a-2) |V(H)|-2b<(4a-2)|V(H)|$. This leads to a contradiction. \end{proof} Similarly, we obtain the following consequence: \begin{corollary}\label{coro-4a-2} Let $G$ be a $(a,0)$-graph with $a\ge 1$. Let $M$ be a matching of $G$. Then $G_M$ is $(4a-2)$-degenerate. \end{corollary} A matching $M$ of a graph $G$ is said to be {\em partitioned} into $q$ strong matchings of $G$ if $M=M_1\cup M_2\cup \cdots \cup M_q$ and $M_i\cap M_j=\emptyset$ for $i\ne j$ such that each $M_i$ is a strong matching of $G$. Let $\rho_G(M)$ denote the least $q$ such that $M$ is partitioned into $q$ strong matchings. By definition, $1\le \rho_G(M)\le |M|$. The following result is highly inspired from a result of \cite{fau} on the strong chromatic index of planar graphs. For the sake of completeness, we here give the detailed proof. \begin{lemma}\label{lem11}\ Let $G$ be a graph and $M$ be a matching of $G$. Then $\rho_G(M)\le \chi(G_M)$. \end{lemma} \begin{proof} \ Let $V(G_M)=S_1\cup S_2$, where $S_1$ is the set of vertices in $G_M$ formed from $G$ by contracting edges in $M$ and $S_2=V(G)\setminus V(M)$. Set $k=\chi(G_M)$. Then $G_M$ admits a proper $k$-coloring $\phi: V(G_M)\to \{1,2,\ldots,k\}$. For $1\le i\le k$, let $V_i$ denote the set of vertices in $G_M$ with the color $i$. In $G$, for $1\le i\le k$, let \ \ \ \ \ \ \ $E^*_i=\{e\in M\,|\,e\ {\rm is\ contracted\ to\ some\ vertex}\ v_e\in S_1\ {\rm with}\ \phi(v_e)=i\}.$ Let $e_1,e_2\in E^*_i$ be any two edges. Since $e_1,e_2\in M$, $e_1$ and $e_2$ are not adjacent in $G$. We claim that no edge $e\in E(G)$ is simultaneously adjacent to both $e_1$ and $e_2$. Assume to the contrary, there exists $e=xy\in E(G)$ adjacent to $e_1$ and $e_2$. Without loss of generality, we may suppose that $e_1=xx'$ and $e_2=yy'$. Let $v_{e_1}$ and $v_{e_2}$ denote the corresponding vertices of $e_1$ and $e_2$ in $S_1$, respectively. Indeed, $x$ is $v_{e_1}$, and $y$ is $v_{e_2}$. Since $xy\notin M$, it follows that $xy\in E(G_M)$ and thus $x$ is adjacent to $y$ in $G_M$. By the definition of $\phi$, $\phi(x)\ne \phi(y)$. Let $\phi(x)=p$ and $\phi(y)=q$. Then $e_1\in E^*_p$ and $e_2\in E^*_q$ with $p\ne q$, which contradicts the assumption that $e_1,e_2\in E^*_i$. So, each of $E_1^*,E^*_2,\ldots,E^*_k$ is a strong matching of $G$. This confirms that $\rho_G(M)\le k=\chi(G_M)$. \end{proof} \section{Strong chromatic index} In this section, we will discuss the strong edge coloring of some graphs by using the previous preliminary results. \subsection{An upper bound} We first establish an upper bound of strong chromatic index for a general graph $G$, which reveals a relation between the strong chromatic index, chromatic index and maximum average degree of $G$. \begin{lemma}\label{average} Let $H$ be a subgraph of a graph $G$. Then $|E(H)|\le \frac 12\bar{d}(G)|V(H)|$. \end{lemma} \begin{proof}\ For any subgraph $H\subseteq G$, it follows from the definition of $\bar{d}(G)$ that $\frac {2|E(H)|}{|V(H)|}\le \bar{d}(G)$. Consequently, $|E(H)|\le \frac 12\bar{d}(G)|V(H)|$. \end{proof} \begin{theorem}\label{them3} Every graph $G$ has $\chi'_{\rm s}(G)\le (2\bar{d}(G)-1)\chi'(G)$. \end{theorem} \begin{proof} Let $k=\chi'(G)$. Then $G$ has an edge $k$-coloring $(E_1,E_2,\ldots,E_k)$, where each $E_i$ is a matching of $G$. Let $G_i$ be the graph obtained from $G$ by contracting each of edges in $E_i$. By Lemma \ref{average} and Corollary \ref{coro-4a-2}, $G_i$ is $(2\bar{d}(G)-2)$-degenerate. By Lemma \ref{vertex-cloring-1}, $\chi(G_i)\le 2\bar{d}(G)-1$. By Lemma \ref{lem11}, $\chi'_{\rm s}(G)\le (2\bar{d}(G)-1)k=(2\bar{d}(G)-1)\chi'(G)$. \end{proof} By Theorems \ref{edge-coloring-1}, \ref{konig} and \ref{them3}, the following two corollaries hold automatically. \begin{corollary}\label{coro-mav-1} Every graph $G$ has $\chi'_{\rm s}(G)\le (2\bar{d}(G)-1)(\Delta+1)$. \end{corollary} \begin{corollary}\label{coro-mav-2} Every bipartite graph $G$ has $\chi'_{\rm s}(G)\le (2\bar{d}(G)-1)\Delta$. \end{corollary} \begin{corollary}\label{coro-mav-3} If $G$ is a graph with $\Delta\ge 2\bar{d}(G)$, then $\chi'_{\rm s}(G)\le (2\bar{d}(G)-1)\Delta$. \end{corollary} \begin{proof} \ Since $G$ is $\bar{d}(G)$-degenerate and $\Delta\ge 2\bar{d}(G)$, Theorem \ref{zhou} asserts that $\chi'(G)=\Delta$. By Theorem \ref{them3}, $\chi'_{\rm s}(G)\le (2\bar{d}(G)-1)\Delta$. \end{proof} \subsection{1-planar graphs} Recently, Liu et al.\,\cite{liu} investigated the existence of light edges in a 1-planar graph with minimum degree at least three. For our purpose, we here list one of their results as follows: \begin{theorem}\label{7-7} {\rm (\cite{liu})} Every $1$-planar graph $G$ with $\delta(G)=7$ contains two adjacent $7$-vertices. \end{theorem} With a greedy coloring procedure, it can be constructively shown that the strong chromatic index of a simple graph $G$ is at most $2\Delta(\Delta-1)+1$. \begin{theorem}\label{1-planar}\ If $G$ is a $1$-planar graph, then $\chi'_{\rm s}(G)\le 14\Delta$. \end{theorem} \begin{proof} \ The proof is split into the following cases, depending on the size of $\Delta$. \begin{enumerate}[{Case }1:] \item \ $\Delta\le 7$. It is easy to check that $2\Delta(\Delta-1)+1\le 14\Delta$ and henceforth the result follows. \item \ $\Delta=8$. Since $G$ is $7$-degenerate, it follows from the result of \cite{wan} that $\chi'_{\rm s}(G)\le (4\times 7-2)\Delta-2\times 7^2+1=26\Delta-97=111<112=14\Delta$. \item \ $\Delta=9$. The proof is given by induction on the number of edges in $G$. If $|E(G)|\le 14\Delta=126$, the result holds trivially, since we may color all edges of $G$ with distinct colors. Let $G$ be a 1-planar graph with $\Delta=9$ and $|E(G)|> 126$. Without loss of generality, assume that $G$ is connected, hence $\delta(G)\ge 1$. We have to consider two subcases as follows. \begin{enumerate}[{Case }3.1:] \item\ $\delta(G)\le 6$. Let $u\in V(G)$ with $d_G(u)=\delta(G)\ge 1$. Let $u_0,u_1,\ldots,u_{s-1}$ denote the neighbors of $u$ in a cyclic order, where $1\le s=\delta(G)\le 6$. For $0\le i\le s-1$, let $x_i^1,x_i^2,\ldots,x_i^{p_i}$ denote the neighbors of $u_i$ other than $u$. Consider the graph $H=G-u$. Then $H$ is a 1-planar graph with $\Delta(H)\le 9$ and $|E(H)|<|E(G)|$. By the induction hypothesis or Cases 1 and 2, $H$ admits a strong edge coloring $\phi$ using the color set $C=\{1,2,\ldots,126\}$. For a vertex $v\in V(H)$, let $C(v)$ denote the set of colors assigned to the edges incident with $v$. For $i=0,1,\ldots,s-1$, define a list $L(uu_i)$ of available colors for the edge $uu_i$ as follows: $$L(uu_i)=C- \bigcup\limits_{0\le j\le s-1; \ j\ne i} C(u_j) - \bigcup\limits_{1\le t\le p_i} C(x_i^{t}).$$ It is easy to calculate that \begin{eqnarray*} |L(uu_i)| &\ge & |C|- |\bigcup\limits_{0\le j\le s-1; \ j\ne i} C(u_j)|-|\bigcup\limits_{1\le t\le p_i} C(x_i^{t})|\\ &\ge& 126-(s-1)(\Delta-1)-(\Delta-1)\Delta\\ &\ge& 126-(6-1)\times (9-1)-(9-1)\times 9\\ &=&14. \end{eqnarray*} Based on $\phi$, we color $uu_0$ with a color $a_0\in L(uu_0)$, $uu_1$ with a color $a_1\in L(uu_1)\setminus \{a_0\}$, $\cdots,$ $uu_{s-1}$ with a color $a_{s-1}\in L(uu_{s-1})\setminus \{a_0,a_1,\ldots,a_{s-2}\}$. It is easy to testify that $\phi$ is extended to whole graph $G$. \item \ $\delta(G)=7$. By Theorem \ref{7-7}, $G$ contains two adjacent 7-vertices. Let $u$ be a 7-vertex of $G$ with neighbors $u_0,u_1,\ldots,u_6$ such that $d_G(u_0)=7$ and $d_G(u_i)\le 9$ for $i=1,2,\ldots,6$. Similarly to Case 3.1, for $0\le i\le 6$, let $x_i^1,x_i^2,\ldots,x_i^{p_i}$ denote the neighbors of $u_i$ other than $u$. Note that $p_0=6$ and $p_i\le 8$ for $i\ge 1$. Let $H=G-u$, which has a strong edge coloring $\phi$ using the color set $C=\{1,2,\ldots,126\}$, by the induction hypothesis or Cases 1 and 2. For each $0\le i\le 6$, we define similarly a list $L(uu_i)$ of available colors. It is easy to check that \begin{eqnarray*} |L(uu_0)| &\ge & |C|- |\bigcup\limits_{1\le j\le 6} C(u_j)|-|\bigcup\limits_{1\le t\le 6} C(x_0^{t})|\\ &\ge& 126-6 (\Delta-1)-6\Delta\\ &=&24. \end{eqnarray*} For $1\le i\le 6$, \begin{eqnarray*} |L(uu_i)| &\ge & |C|- |C(u_0)|- |\bigcup\limits_{1\le j\le 6;\ j\ne i} C(u_j)|-|\bigcup\limits_{1\le t\le {p_i}} C(x_i^{t})|\\ &\ge& 126-6-5(\Delta-1)-8\Delta\\ &=&8. \end{eqnarray*} Based on $\phi$, we color $uu_0$ with a color $a_0\in L(uu_0)$, $uu_1$ with a color $a_1\in L(uu_1)\setminus \{a_0\}$, $\cdots,$ $uu_{6}$ with a color $a_{6}\in L(uu_{6})\setminus \{a_0,a_1,\ldots,a_{5}\}$. It is easy to confirm that $\phi$ is extended to $G$. \end{enumerate} \item \ $\Delta\ge 10$. By Theorem \ref{edge-coloring-3}, $G$ is of Class I. Let $(E_1,E_2,\ldots,E_{\Delta})$ be an edge $\Delta$-coloring of $G$, where each $E_i$ is a matching of $G$. Let $G_i$ be the graph obtained from $G$ by contracting each edge in $E_i$. Note that each subgraph $H$ of $G$ is 1-planar and therefore $|E(H)|\le 4|V(H)|-8$. Taking $a=4$ and $b=8$ in Corollary \ref{coro-4a-3}, we deduce that $G_i$ is $13$-degenerate. By Lemma \ref{vertex-cloring-1}, $\chi(G_i)\le 14$. Therefore $\chi'_{\rm s}(G)\le 14\Delta$. \end{enumerate} \end{proof} \subsection{Special 1-planar graphs} Suppose that $G$ is a 1-planar graph which is drawn in the plane so that each edge is crossed by at most one other edge. Let $E'$ and $E''$ denote the set of non-crossing edges and crossing edges of $G$, respectively. Let $H_1=G[E']$ and $H_2=G[E'']$. That is, $H_1$ and $H_2$ are the subgraphs of $G$ induced by non-crossing edges and crossing edges, respectively. \begin{theorem}\label{special} Let $G$ be a $1$-planar graph. Then $\chi'_{\rm s}(G)\le 6\chi'(H_1)+14\chi'(H_2)$. \end{theorem} \begin{proof} \ Let $k_1=\chi'(H_1)$ and $k_2=\chi'(H_2)$. Then $\chi'(G)\le k_1+k_2$. Let $(E_1,E_2,\ldots,E_{k_1})$ be an edge $k_1$-coloring of $H_1$, and $(F_1,F_2,\ldots,F_{k_2})$ be an edge $k_2$-coloring of $H_2$. Then each of $E_i$'s and $F_j$'s is a matching in $G$. So, $(E_1,E_2,\ldots,E_{k_1},F_1,F_2,\ldots,F_{k_2})$ is an edge $(k_1+k_2)$-coloring of $G$. Similarly to the proof of Case 4 in Theorem \ref{1-planar}, every $F_j$ can be partitioned into 14 strong matchings of $G$. Moreover, for each $1\le i\le k_1$, because $G_{E_i}$ is a 1-planar graph, we derive that $\chi(G_{E_i})\le 6$ by Theorem \ref{vertex-coloring-2}. By Lemma \ref{lem11}, $E_i$ can be partitioned into 6 strong matchings. Consequently, $\chi'_{\rm s}(G)\le 6k_1+14k_2$. \end{proof} An {\em IC-planar graph} is a $1$-planar graph such that two pairs of crossing edges have no common end-vertices. Equivalently, each vertex of this kind of 1-planar graph is incident with at most one crossing edge. It is easy to verify that every IC-planar graph $G$ has $|E(G)|\le 3.25|V(G)|-6$ and this bound is attainable. Kr\'{a}l and Stacho \cite{kral} showed that every IC-planar graph is vertex 5-colorable. Yang et al.\,\cite{ywwl} showed that every IC-planar graph is vertex 6-choosable. Furthermore, Dvo\v{r}\'ak et al.\,\cite{dv} proved that every graph drawn in the plane so that the distance between every pair of crossings is at least 15 is 5-choosable. Using Theorem \ref{special}, we can establish the smaller upper bound for the strong chromatic index of IC-planar graphs. \begin{theorem}\label{IC} Every IC-planar graph $G$ has $\chi'_{\rm s}(G)\le 6\Delta+20$. \end{theorem} \begin{proof} \ If $\Delta\le 5$, then it is easy to obtain that $\chi'_{\rm s}(G)\le 2\Delta(\Delta-1)+1\le 6\Delta+20$ and therefore the theorem holds. So assume that $\Delta\ge 6$. Let $H_1$ and $H_2$ denote the graphs induced by non-crossing edges and crossing edges of $G$, respectively. Since no two crossing-edges of $G$ are adjacent, $H_2$ is a matching of $G$. Thus, $\chi'(H_2)\le 1$. Note that $H_1$ is a planar graph with $\Delta(H_1)\le \Delta$. If $\Delta(H_1)\ge 7$, then $\chi'(H_1)=\Delta(H_1)\le \Delta$ by Theorem \ref{edge-coloring-2}. So, by Theorem \ref{special}, $\chi'_{\rm s}(G)\le 6\chi'(H_1)+14\chi'(H_2)\le 6\Delta+14$. Otherwise, we have to consider two cases as follows: $\bullet$\ $\Delta(H_1)=6$. Then $6\le \Delta\le 7$. By Theorem \ref{edge-coloring-1}, $\chi'(H_1)\le 7$. By Theorem \ref{special}, $\chi'_{\rm s}(G)\le 6\chi'(H_1)+14\chi'(H_2)\le 6\times 7+14= 56\le 6\Delta+20$. $\bullet$\ $\Delta(H_1)=5$. Then $\Delta=6$ by the assumption. By Theorem \ref{edge-coloring-1}, $\chi'(H_1)\le 6$. By Theorem \ref{special}, $\chi'_{\rm s}(G)\le 6\chi'(H_1)+14\chi'(H_2)\le 6\times 6+14= 50=6\Delta+20$. \end{proof} A 1-planar graph $G$ is called {\em optimal} if $|E(G)|=4|V(G)|-8$. A {\em plane quadrangulation} is a plane graph such that each face of $G$ is of degree 4. It is not hard to show that a 3-connected plane quadrangulation is a bipartite plane graph with minimum degree 3. Suzuki \cite{suz} showed that every simple optimal 1-planar graph $G$ can be obtained from a 3-connected plane quadrangulation by adding a pair of crossing edges to each face of $G$. So an optimal 1-planar graph is an Eulerian graph, i.e., each vertex is of even degree. It was shown in \cite{len} that every optimal 1-planar graph $G$ can be edge-partitioned into two planar graphs $G_1$ and $G_2$ such that $\Delta(G_2)\le 4$. \begin{theorem}\label{optimal} Every optimal 1-planar graph $G$ has $\chi'_{\rm s}(G)\le 10\Delta+14$. \end{theorem} \begin{proof} \ Let $G$ be an optimal 1-planar graph. Let $H_1$ and $H_2$ denote the graphs induced by non-crossing edges and crossing edges of $G$, respectively. Then $G=H_1\cup H_2$, where $H_1$ is a bipartite plane graph. For each vertex $v\in V(G)$, it is easy to see that $d_{H_1}(v)=d_{H_2}(v)=\frac 12 d_G(v)$; in particular, we have $\Delta(H_1)=\Delta(H_2)=\frac {\Delta}2$. Since $H_1$ is bipartite, $\chi'(H_1)=\Delta(H_1)=\frac {\Delta}2$ by Theorem \ref{konig}. By Theorem \ref{edge-coloring-1}, $\chi'(H_2)\le \Delta(H_2)+1=\frac {\Delta}2+1$. By Theorem \ref{special}, $\chi'_{\rm s}(G)\le 6\chi'(H_1)+14\chi'(H_2)\le 6 \times \frac {\Delta}2+14\times (\frac {\Delta}2+1) = 10\Delta+14$. \end{proof} \section{Concluding remarks} In this paper, we show that the strong chromatic index of every 1-planar graph is at most $14\Delta$. As for the lower bound of strong chromatic index, Bensmail et al.\,\cite{ben} showed that for each $\Delta\ge 5$, there exist 1-planar graphs with strong chromatic index $6\Delta-12$. Based on these facts, we put forward the following: \medskip \noindent{\bf Question 1.}\ {\em What is the least constant $c_1$ such that every $1$-planar graph $G$ satisfies $\chi'_{\rm s}(G)\le c_1\Delta\,?$} \medskip The foregoing discussion asserts that $6\le c_1\le 14$. We think that it is very difficult to reduce further the value of $c_1$ by employing the method used in this paper. This paper also involves the strong edge coloring of some special 1-planar graphs such as IC-planar graphs and optimal 1-planar graphs. In particular, we show that the strong chromatic index of every IC-planar graph is at most $6\Delta+20$. For $\Delta\ge 4$, by attaching $\Delta-4$ new pendant vertices to each vertex of the complete graph $K_5$, we get a graph $H_{\Delta}$. Since $K_5$ is an IC-planar graph, so is $H_{\Delta}$. It is easy to inspect that any two edges of $H_{\Delta}$ lie in a path of length 2 or 3. So it follows that $\chi'_{\rm s}(H_{\Delta})=|E(H_{\Delta})|=10+5(\Delta-4)=5\Delta-10$. \medskip \noindent{\bf Question 2.}\ {\em What is the least constant $c_2$ such that every IC-planar graph $G$ satisfies $\chi'_{\rm s}(G)\le c_2\Delta\,?$} \medskip Notice that $5\le c_2\le 6$. We conjecture that $c_2=5$. \begin{thebibliography}{s1} \bibitem{and} L.D. Andersen. The strong chromatic index of a cubic graph is at most 1. {\it Discrete Math.,} 108: 231--252, 1992. \bibitem{bh} J. Bensmail, A. Harutyunyan, H. Hocquard, P. Valicov. Strong edge-colouring of sparse planar graphs. {\it Discrete Appl. Math.,} 179: 229--234, 2014. \bibitem{ben} J. Bensmail, F. Dross, H. Hocquard, E. Sopena. From light edges to strong edge-colouring of $1$-planar graphs. {\it Discrete Math. Theoret. Comput. Sci.,} 22(1): Paper No.\,2, 8 pp., 2020--2021. \bibitem{bon} M. Bonamy, T. Perrett, L. Postle. Colouring graphs with sparse neighbourhoods: Bounds and applications. {\it J. Combin. Theory Ser. B,} 155: 278--317, 2022. \bibitem{borodin} O.V. Borodin. A new proof of the $6$ color theorem. {\it J. Graph Theory,} 19: 507--521, 1995. \bibitem{bru} H. Bruhn, F. Joos. A stronger bound for the strong chromatic index. {\it Combin. Probab. Comput.,} 27: 21--43, 2018. \bibitem{chan1} G.J. Chang, N. Narayanan. Strong chromatic index of 2-degenerate graphs. {\it J. Graph Theory,} 73: 119--126, 2013. \bibitem{dbs} M. D\c{e}bski, J. Grytczuk, M. \'{S}leszy\'{n}ska-Nowak. The strong chromatic index of sparse graphs. {\it Inform. Process. Lett.,} 115: 326--330, 2015. \bibitem{dv} Z. Dvo\v{r}\'ak, B. Lidick\'y, B. Mohar. $5$-choosability of graphs with crossings far apart. {\it J. Combin. Theory Ser. B,} 123: 54--96, 2017. \bibitem{fa} I. Fabrici, T. Madaras. The structure of 1-planar graphs. {\it Discrete Math.,} 307: 854--865, 2007. \bibitem{fau} R.J. Faudree, A. Gy\'arf\'as, R.H. Schelp, Zs. Tuza. The strong chromatic index of graphs. {\it Ars Combin.,} 29(B): 205--211, 1990. \bibitem{fou} J.L. Fouquet, J.L. Jolivet. Strong edge-colourings of graphs and applications to multi-$k$-gons. {\it Ars Combin.,} 16(A): 141--150, 1983. \bibitem{hoc} H. Hocquard, P. Ochem, P. Valicov. Strong edge-colouring and induced matchings. {\it Inform. Process. Lett.,} 113: 836--843, 2013. \bibitem{hor} P. Hor\'{a}k, Q. He, W.T. Trotter. Induced matchings in cubic graphs. {\it J. Graph Theory,} 17: 151--160, 1993. \bibitem{huang} M. Huang, M. Santana, G. Yu. Strong chromatic index of graphs with maximum degree four. {\it Electron. J. Combin.,} 25(3): Paper No. 3.31, 24 pp., 2018. \bibitem{hud} D. Hud\'{a}k, B. Lu\v{z}ar, R. Sot\'{a}k, R. \v{S}krekovski. Strong edge-colouring of planar graphs. {\it Discrete Math.,} 324: 41--49, 2014. \bibitem{hur} E. Hurley, R. de Joannis de Verclos, R.J. Kang. An improved procedure for colouring graphs of bounded local density. arXiv:2007.07874 [math.CO], https://arxiv.org/abs/2007.07874. \bibitem{konig} D. K$\ddot{\rm o}$nig. $\ddot{\rm U}$ber graphen ihre anwendung auf determinantentheorie und mengenlehre. {\it Math. Ann.,} 77: 453--465, 1916. \bibitem{kos} A.V. Kostochka, X. Li, W. Ruksasakchai, M. Santana, T. Wang, G. Yu. Strong chromatic index of subcubic planar multigraphs. {\it European J. Combin.,} 51: 380--397, 2016. \bibitem{kral} D. Kr\'al, L. Stacho. Coloring plane graphs with independent crossings. {\it J. Graph Theory,} 64: 184--205, 2010. \bibitem{len} W.J. Lenhart, G. Liotta, F. Montecchiani. On partitioning the edges of 1-plane graphs. {\it Theoret. Comput. Sci.,} 662: 59--65, 2017. \bibitem{liu} J. Liu, Y. Wang, W. Wang. Light edges in 1-planar graphs. {\it J. Graph Theory,} 101: 746--768, 2022. \bibitem{mol} M. Molloy, B. Reed. A bound on the strong chromatic index of a graph. {\it J. Combin. Theory Ser. B,} 69: 103--109, 1997. \bibitem{sander} D.P. Sanders, Y. Zhao. Planar graphs of maximum degree seven are class I. {\it J. Combin. Theory Ser. B,} 83: 201--212, 2001. \bibitem{suz} Y. Suzuki. Re-embedding of maximal 1-planar graphs. {\it SIAM J. Discrete Math.,} 24: 1527--1540, 2010. \bibitem{vizing} V.G. Vizing. On an estimate of the chromatic index of a $p$-graph,. {\it Diskret. Anal.,} 3: 25--30, 1964. \bibitem{wan} T. Wang. Strong chromatic index of $k$-degenerate graphs. {\it Discrete Math.,} 330: 17--19, 2014. \bibitem{wang} W. Wang, K.-W. Lih. Coupled choosability of plane graphs. {\it J. Graph Theory,} 58: 27--44, 2008. \bibitem{yiqiao} Y. Wang, P. Wang, W. Wang. Strong chromatic index of $K_4$-minor free graphs. {\it Inform. Process. Lett.,} 129: 53--56, 2018. \bibitem{ywwl} W. Yang, Y. Wang, W. Wang, K.-W. Lih. IC-planar graphs are $6$-choosable. {\it SIAM J. Discrete Math.,} 35: 1729--1745, 2021. \bibitem{yu} G. Yu. Strong edge-colorings for $k$-degenerate graphs. {\it Graphs Combin.,} 31: 1815--1818, 2015. \bibitem{zlm} L. Zhang. Every planar graph with maximum degree $7$ is of class 1. {\it Graphs Combin.,} 16: 467--495, 2000. \bibitem{zhang} X. Zhang, J. Wu. On edge colorings of $1$-planar graphs. {\it Inform. Process. Lett.,} 111: 124--128, 2011. \bibitem{zhou} G. Zhou. A note on graphs of class I. {\it Discrete Math.,} 262: 339--345, 2003. \end{thebibliography} \end{document}
2205.14662v1
http://arxiv.org/abs/2205.14662v1
No-Regret Learning in Network Stochastic Zero-Sum Games
\documentclass{article} \usepackage{amsmath,amsthm,amssymb,float,graphicx,geometry} \usepackage{bbm,bbding,amssymb,pifont,mathrsfs,amsfonts,graphicx,subfigure,mathtools,color}\usepackage{amscd,array,enumerate,dsfont,texdraw,tikz,multicol,bbm,authblk} \usepackage{algorithmic} \newtheorem{algorithm}{Algorithm} \usepackage{algorithm} \usepackage{footnote} \usepackage{lipsum} \usepackage{amsmath} \usepackage{cases} \usepackage{fancyhdr} \usepackage{CJK} \usepackage{bm} \usepackage{framed} \usepackage{listings} \usepackage{multirow} \allowdisplaybreaks[4] \usetikzlibrary{shapes.geometric, arrows} \newtheorem{assumption}{Assumption} \newtheorem{lem}{Lemma} \newtheorem{thm}{Theorem} \newtheorem{Def}{Definition} \newtheorem{Col}{Corollary} \newtheorem{remark}{Remark} \newtheorem{example}{Example} \newcommand{\blue}[1]{{\color{blue}#1}} \newcommand{\red}[1]{{\color{red}#1}} \lstset{numbers=left, numberstyle=\tiny, keywordstyle=\color{blue}, commentstyle=\color[cmyk]{1,0,1,0}, frame=single, escapeinside=``, extendedchars=false, xleftmargin=2em,xrightmargin=2em, aboveskip=1em, basicstyle=\footnotesize\tt, tabsize=4, showspaces=false } \linespread{1.2} \pagestyle{fancy} \lhead{} \chead{} \rhead{} \lfoot{} \cfoot{\thepage} \rfoot{} \renewcommand{\headrulewidth}{0.4pt} \renewcommand{\footrulewidth}{0pt} \newcommand{\sihao}{\fontsize{14.1pt}{\baselineskip}\selectfont} \renewcommand{\normalsize}{\fontsize{11pt}{\baselineskip}\selectfont} \geometry{left=3.17cm,right=3.17cm,top=2.54cm,bottom=2.54cm} \tikzstyle{startstop} = [rectangle, rounded corners, minimum width = 2cm, minimum height=1cm,text centered, draw = black, fill = red!40] \tikzstyle{io} = [rectangle, rounded corners, minimum width = 2cm, minimum height=1cm,text centered, draw = black, fill = blue!40] \tikzstyle{process} = [rectangle, rounded corners, minimum width = 2cm, minimum height=1cm,text centered, draw = black, fill = yellow!50] \tikzstyle{decision} = [rectangle, rounded corners, minimum width = 2cm, minimum height=1cm,text centered, draw = black, fill = green!40] \tikzstyle{arrow} = [->,>=stealth] \title{No-Regret Learning in Network Stochastic Zero-Sum Games} \author[a,b]{Shijie Huang} \author[c]{Jinlong Lei} \author[c,a]{Yiguang Hong} \affil[a]{Key Laboratory of Systems and Control, Academy of Mathematics and Systems Science, Chinese Academy of Sciences} \affil[b]{School of Mathematical Sciences, University of Chinese Academy of Sciences} \affil[c]{Department of Control Science and Engineering \& Shanghai Research Institute for Intelligent Autonomous Systems, Tongji University} \renewcommand*{\Affilfont}{\small\it} \renewcommand\Authands{ and } \date{} \begin{document} \maketitle \definecolor{shadecolor}{rgb}{0.9,0.9,0.9} \begin{abstract} No-regret learning has been widely used to compute a Nash equilibrium in two-person zero-sum games. However, there is still a lack of regret analysis for network stochastic zero-sum games, where players competing in two subnetworks only have access to some local information, and the cost functions include uncertainty. Such a game model can be found in security games, when a group of inspectors work together to detect a group of evaders. In this paper, we propose a distributed stochastic mirror descent (D-SMD) method, and establish the regret bounds $O(\sqrt{T})$ and $O(\log T)$ in the expected sense for convex-concave and strongly convex-strongly concave costs, respectively. Our bounds match those of the best known first-order online optimization algorithms. We then prove the convergence of the time-averaged iterates of D-SMD to the set of Nash equilibria. Finally, we show that the actual iterates of D-SMD almost surely converge to the Nash equilibrium in the strictly convex-strictly concave setting. \end{abstract} \section{INTRODUCTION} Two-person zero-sum games \cite{mv:53} are ubiquitous and well-researched topics in economics, convex optimization and robust optimization \cite{ben:09}. They are related to a variety of artificial intelligence problems, such as boosting \cite{fs:96}, generative adversarial networks (GAN) \cite{gp:14}, and poker games \cite{bb:15,ms:17}. So far, researchers have mainly focused on computing a Nash equilibrium (NE) \cite{n:51} and made significant progress. Specifically, no-regret learning has proved to be an extremely versatile tool in this direction. For instance, some typical no-regret algorithms such as follow the regularized leader, mirror descent (MD) and its variants \cite{rs:13a,ahk:12,z:17}, have become popular for finding a NE of a two-person zero-sum game. More recently, these algorithms have paved the way to designing algorithms with faster rates for both regret and convergence to a NE \cite{rs:13b,ddk:11,kh:18}. However, those algorithms rely on having access to complete information of the players. In practice, we may encounter the class of network games such as network zero-sum games, where the players are partitioned into two subnetworks and each player only has access to local information \cite{gharesifard2013distributed,lh:15}. The security game involving a group of evaders and a group of inspectors \cite{cc:16} could be an example. Additionally, many game problems in machine learning, such as GAN and model-based reinforcement learning \cite{rmk:20}, are complicated by uncertainty. Such problems may be modeled by stochastic Nash games, where the cost functions are expectation-valued. In this paper we consider no-regret learning in network stochastic zero-sum games. Compared with two-person zero-sum games, no-regret learning in network stochastic zero-sum games is more challenging. First of all, one needs to define a regret different from the classical regret due to the absence of complete information, which also brings difficulties to the regret analysis. Although the time-averaged iterates of no-regret learning algorithms are guaranteed to converge to a NE in two-person zero-sum games \cite{rs:13b}, whether such a property can be extended to a network stochastic zero-sum game remains unexplored. In addition, each player cannot accurately evaluate its (sub)gradient since the cost functions are expectation-valued. As a consequence, convergence analysis in a network stochastic zero-sum game may require different techniques. \subsection{Contributions} In this work, we propose a distributed stochastic mirror descent (D-SMD) method for network stochastic zero-sum games, and establish the theoretical results regarding the regret bounds and convergence guarantees. We elaborate on our contributions below. \begin{itemize} \item {\it Regret bounds of D-SMD}: We show that D-SMD achieves the regret bounds of $O(\sqrt{T})$ and $O(\log T)$ in the convex-concave and strongly convex-strongly concave cases, respectively. Despite the influence of the network parameters, our results match the regret order of MD in the convex and strongly convex cases \cite{s:11,ss:07}. \item {\it Convergence guarantees of D-SMD}: We establish the mean convergence of the time-averaged iterates to the set of Nash equilibria in the convex-concave and strongly convex-strongly concave settings, and provide the convergence rates. In addition, we also prove that the iterates of D-SMD converge to the unique NE with probability one in the strictly convex-strictly concave case. \end{itemize} For the sake of readability, we have moved all the omitted proofs to the appendix. \subsection{Related Work} We briefly review two kinds of related works: no-regret learning in games and NE seeking in zero-sum games. {\bf No-regret learning in games.} No-regret algorithms for two-person games were first proposed by \cite{h:57} and \cite{b:56}, and further studied in the work of \cite{fv:99}. In \cite{fs:99}, the regret bound of the multiplicative weights algorithm was established, which immediately yields $O(T^{-\frac{1}{2}})$ convergence rate. To obtain a faster algorithm, Nesterov's excessive gap technique was adopted to exhibit a near-optimal algorithm with convergence rate $O(\frac{(\ln T)^{3/2}}{T})$ in \cite{ddk:11}. Later a simpler no-regret framework with rate $O(\frac{\ln T}{T})$ based on the optimistic MD was proposed by \cite{rs:13b} and this rate was further improved to $O(\frac{1}{T})$ by \cite{kh:18}. In addition, no-regret learning was also widely studied in zero-sum extensive-form games, due to the success of CFR framework \cite{zj07} and its variants \cite{lw:09,t:14} in solving the game of limit Texas hold'em \cite{bb:15}. Recently, no-regret learning was extended to other form of games. \cite{hst:15} considered no-regret learning and its outcomes in Bayesian games, while \cite{sbk:19} proposed a no-regret algorithm for unknown games with correlated payoffs. \cite{mz:19} studied the last-iterate convergence of a no-regret algorithm to a NE of variationally stable games and \cite{l:20} further proved the finite-time last-iterate convergence rate of online gradient descent learning in cococercive games. {\bf NE seeking in zero-sum games.} There is a large amount of work on computing a NE of zero-sum games (or saddle point problems). We recall some works that focus on games with continuous strategy sets. \cite{hs:06} established the convergence of continuous-time best response dynamics to the NE set. \cite{nj:09} proposed a stochastic MD scheme to solve a convex-concave saddle point problem. Later on, \cite{clo:14} presented a stochastic accelerated primal-dual method with optimal convergence rate. In \cite{pb:16} and \cite{ykh:20}, the authors studied the computation of saddle points in strongly convex-strongly concave and non-convex-non-concave settings, respectively. Moreover, \cite{lei2020synchronous} considered the Nash equilibrium computation for general-sum stochastic Nash games. \cite{b:21} designed a primal-dual algorithm for constrained markov decision process (equivalent to a saddle point problem) and utilized regret analysis to prove zero constraint violation. In a different line of research, \cite{srivastava2013distributed} considered the case when a network of cooperative agents need to solve a zero-sum game and proposed a distributed Bregman-divergence algorithm to compute a NE. \cite{gharesifard2013distributed} introduced a continuous-time distributed dynamics for a more general framework of network zero-sum games, where two network of agents are involved in a zero-sum game. \cite{lh:15} further extended the framework to time-varying networks and designed a distributed projected subgradient descent algorithm. \section{Preliminaries \& Problem Formulation} {\bf Notations.} For a matrix $A = [a_{ij}]$, $a_{ij}$ denotes the element in the $i$th row and $j$th column. For a function $f(x_1,\dots,x_N)$, we use $\partial_i f$ to denote the subdifferential of $f$ with respect to $x_i$. Given a norm $\|\cdot\|$ on $\mathbb{R}^n$, $\|y\|_{\ast}:= \sup_{\|x\|\le 1}\langle y,x\rangle$ denotes the dual norm. A digraph is characterized by $\mathcal{G} = (\mathcal{V},\mathcal{E})$, where $\mathcal{V} = \{1,\dots,n\}$ is the set of nodes and $\mathcal{E}\subset\mathcal{V}\times\mathcal{V}$ is the set of edges. A path from $i_1$ to $i_p$ is an alternating sequence of edges $(i_1,i_2),(i_2,i_3),\dots, (i_{p-1},i_p)$ in the digraph with distinct nodes $i_m\in\mathcal{V}$, $\forall m: 1\le m\le p$. A digraph is strongly connected if there is a path between any pair of distinct nodes. \subsection{Two-Network Stochastic Zero-Sum Game} {\bf Two-Person Zero-Sum Game and Nash Equilibrium.} We recall the definition of a two-person zero-sum game and the sufficient conditions to ensure the existence of a Nash equilibrium. \begin{Def} A two-person zero-sum game consists of two players who select strategies from nonempty sets $X_1$ and $X_2$, respectively. Players observe a cost function $U_1: X_1\times X_2\to\mathbb{R}$ and $U_2: X_1\times X_2\to\mathbb{R}$ that satisfy $U_1(x_1,x_2) + U_2(x_1,x_2) = 0$ for all $(x_1,x_2)\in X_1\times X_2$. \end{Def} Define $U:= U_1 = -U_2$ as the cost function of the game. The most widely used solution concept in non-cooperative games is that of a Nash equilibrium (NE), which is formally defined as follows. \begin{Def} A strategy profile $x^{\ast} = (x_1^{\ast},x_2^{\ast})$ is a Nash equilibrium (NE) of a two-person zero-sum game if $U(x_1^{\ast},x_2)\le U(x_1^{\ast},x_2^{\ast})\le U(x_1,x_2^{\ast})$ for all $(x_1,x_2)\in X_1\times X_2$. \end{Def} \begin{thm}[Existence of NE \cite{gharesifard2013distributed}] Suppose that the strategy sets $X_1$ and $X_2$ are compact and convex. If the cost function $U$ is continuous and convex-concave (convex in $x_1$, concave in $x_2$) over $X_1\times X_2$, then there exists a NE for the considered two-person zero-sum game. \end{thm} {\bf A Two-Network Zero-Sum Game} \cite{gharesifard2013distributed,lh:15} is a generalized two-person zero-sum game, defined by a tuple $(\{\Sigma_1,\Sigma_2\},X_1\times X_2,U)$. The two players $\Sigma_1$ and $\Sigma_2$ are directed networks composed of $n_1$ agents and $n_2$ agents. For $l = 1,2$, $X_l\subset\mathbb{R}^{m_l}$, denoting the strategy set of $\Sigma_l$, is assumed to be compact and convex. The cost function $U: X_1\times X_2\to\mathbb{R}$ is defined by \[U(x_1,x_2) = \frac{1}{n_1}\sum_{i=1}^{n_1}f_{1,i}(x_1,x_2) = -\frac{1}{n_2}\sum_{j=1}^{n_2}f_{2,j}(x_1,x_2),\] where $f_{1,i}$ is a convex-concave continuous cost function associated with agent $i$ in $\Sigma_1$ and $f_{2,j}$ is a concave-convex continuous cost function associated with agent $j$ in $\Sigma_2$. The networks have no global decision-making capability and each agent only knows its own cost function. Within the same network, neighboring agents can exchange information. Moreover, the interaction between the two networks is specified by a bipartite network $\Sigma_{12}$, which means that each network can also obtain information about the other network through $\Sigma_{12}$. The goal of the agents in $\Sigma_1$ ($\Sigma_2$) is to collaboratively minimize (maximize) the cost function $U$ based on local information. More precisely, $\Sigma_1$, $\Sigma_2$ and $\Sigma_{12}$ are described by three directed graph sequences $\mathcal{G}_1(t) = (\mathcal{V}_1,\mathcal{E}_1(t))$, $\mathcal{G}_2(t) = (\mathcal{V}_2,\mathcal{E}_2(t))$ and $\mathcal{G}_{12}(t) = (\mathcal{V}_1\cup\mathcal{V}_2,\mathcal{E}_{12}(t))$, where $\{\mathcal{G}_1(t)\}$ and $\{\mathcal{G}_2(t)\}$ are uniformly jointly strongly connected\footnote{Namely, there exists an integer $B_l\ge 1$ such that the union graph $(\mathcal{V}_l,\bigcup_{t=k}^{k+B_l-1}\mathcal{E}_l(t))$ is strongly connected for $k\ge 0$.}. The communication between agents are modeled by mixing matrices $W_1(t)$, $W_2(t)$ and $W_{12}(t)$, which satisfy (i) for each $l\in\{1,2\}$, $w_{l,ij}(t)\ge\eta$ with $0 < \eta < 1$ when $(j,i)\in\mathcal{E}_l(t)$, and $w_{l,ij}(t) = 0$ otherwise; $w_{12,ij}(t) > 0$ only if $(i,j)\in\mathcal{E}_{12}(t)$; (ii) for each $i,j\in\mathcal{V}_l$, $\sum_{j = 1}^{n_l}w_{l,ij}(t) = \sum_{i = 1}^{n_l}w_{l,ij}(t) = 1$; (iii) for each $i\in\mathcal{V}_l$, $\sum_{j = 1}^{n_{3-l}}w_{12,ij}(t) = 1$. Agent $i\in\mathcal{V}_l$ can only communicate directly with its neighbors $\mathcal{N}_l^i(t) := \{j\mid(j,i)\in\mathcal{E}_l(t)\}$ and $\mathcal{N}_{12,l}^i(t) \triangleq \{j\mid (j,i)\in\mathcal{E}_{12}(t)\}$. \cite{no:10} proved the following result. \begin{lem}\label{lem_graph} Let $\Phi_l(t,s) = W_l(t)W_l(t-1)\cdots W_l(s)$ ($l = 1,2$) be the transition matrices. Then for all $t,s$ with $t\ge s\ge 0$, we have \[\bigg|[\Phi_l(t,s)]_{ij} - \frac{1}{n_l}\bigg| \le \Gamma_l\theta_l^{t-s},\quad l = 1,2\] where $\Gamma_l = (1 - \eta/4n_l^2)^{-2}$ and $\theta_l = (1 - \eta/4n_l^2)^{1/B_l}$. \end{lem} {\bf Two-Network Stochastic Zero-Sum Game.} Consider a stochastic generalization of a two-network zero-sum game, where the cost function of each agent is expectation-valued. To be specific, we assume that $f_{l,i}$ is the expected value of a stochastic mapping $\psi_{l,i}: X_1\times X_2\times\mathbb{R}^d\to\mathbb{R}$, i.e., \[f_{l,i}(x_1,x_2):=\mathbb{E}[\psi_{l,i}(x_1,x_2;\xi(\omega))],\quad l\in\{1,2\}\] where the expectation is taken with respect to the random vector $\xi: \Omega\to\mathbb{R}^d$ defined on a probability space $(\Omega,\mathcal{F},\mathbb{P})$. Such stochastic models represent a natural extension of two-network zero-sum games and find their applicability when the evaluation of the deterministic cost function is corrupted by errors. However, deterministic methods cannot be used to solve a two-network stochastic zero-sum game directly since generally the expectation cannot be evaluated efficiently or the underlying distribution $\mathbb{P}$ is unknown. This characteristic also makes the analysis of algorithm performance more complicated. \subsection{No-Regret Learning} In a no-regret learning framework \cite{z:03}, for $l = 1,2$, each agent $i\in\mathcal{V}_l$ plays repeatedly against the agents in $\Sigma_{3-l}$ by making a sequence of decisions from $X_l$. At each round $t = 1,\dots, T$ of a learning process, each agent $i$ in $\Sigma_l$ selects a strategy $x_{l,i}(t)\in X_l$ based on the available information, and receives a cost $f_{1,i}(x_{1,i}(t),u_{2,i}(t))$, where $u_{2,i}(t) \triangleq \sum_{j\in\mathcal{N}_{12,1}^i(t)}w_{12,ij}(t)x_{2,j}(t)$ is the weighted information received from its neighbors $\mathcal{N}_{12,1}^i(t):=\{j\in\mathcal{V}_2|(j,i)\in\mathcal{E}_{12}(t)\}$. Since $x_{1,i}(t)$ and $u_{2,i}(t)$ are generated with noisy information, we consider a notation different from the classical regret, called pseudo regret \cite[Section 2.1.2]{m:19}. \begin{Def} The pseudo regret of $\Sigma_1$ associated with agent $i$ cumulated up to time $T$ is defined as \begin{align} \quad\bar{R}_1^{(i)}(T) &= \mathbb{E}\left[\sum_{t=1}^TU(x_{1,i}(t),u_{2,i}(t))\right]\notag\\ &\quad - \min_{x_1\in\mathcal{X}_1}\mathbb{E}\left[\sum_{t=1}^TU(x_1,u_{2,i}(t))\right].\label{regret_def} \end{align} \end{Def} Intuitively, $\bar{R}_1^{(i)}(T)$ represents the maximum expected gain agent $i\in\Sigma_1$ could have achieved by playing the single best fixed strategy in case the estimated sequence of $\Sigma_2$'s strategies $\{u_{2,i}(t)\}_{t=1}^T$ and the cost functions were known in hindsight. An algorithm is referred to as no-regret for network $\Sigma_1$ if for all $i$, $\bar{R}_1^{(i)}(T)/T\to 0$ as $T\to\infty$. \section{Proposed D-SMD Algorithm} Our algorithm uses the notion of {\it prox-mapping}. For $l = 1,2$, $x,p\in X_l$, let the Bregman divergence be defined by \begin{equation}\label{breg_def} D_{\psi_l}(x,p) := \psi_l(x) - \psi_l(p) - \langle\nabla\psi_l(p),x - p\rangle, \end{equation} where $\psi_l$ is a $1$-strongly convex differentiable regularizer on $\mathcal{X}_l$. Then the Bregman divergence generates an associated prox-mapping defined as \begin{equation}\label{prox_mapping} P_x^l(y) := \arg\min_{x'\in X_l}\{\langle y,x - x'\rangle + D_{\psi_l}(x',x)\}. \end{equation} Two typical examples of prox-mapping includes Euclidean projection and multiplicative weights \cite{s:11}. \begin{example}\label{exm1} Let $\psi_l(x) = \frac{1}{2}\|x\|_2^2$. Then the associated prox-mappping is \[P_x^l(y) = \arg\min_{x'\in X_l}\|x' - x - y\|^2.\] \end{example} \begin{example}\label{exm2} Let $X_l$ be a $d_l$-dimension simplex and $\psi_l(x) = \sum_{j=1}^{d_l}x_j\log x_j$ be the entropic regularizer. The induced prox-mapping becomes the well-known multiplicative weights rule \cite{fs:99} \[P_x^l(y) = \frac{(x_j\exp(y_j))_{j=1}^{d_l}}{\sum_{j=1}^{d_l}x_j\exp(y_j)}.\] \end{example} The distributed stochastic mirror descent algorithm proceeds as follows. In the $t$-th iteration, each agent replaces its local estimates of the states of $\Sigma_1$ and $\Sigma_2$ with the weighted averages of its neighbors in $\Sigma_1$ and $\Sigma_2$, respectively. Then it calculates a sampled subgradient of its local cost at the replaced estimates, and updates its estimate by a prox-mapping. The complete algorithm is summarized in Algorithm \ref{alg1}. \begin{algorithm}[tb] \caption{Distributed Stochastic Mirror Descent (D-SMD)} \label{alg1} \textbf{Input}: Non-increasing nonnegative step-size sequence $\{\alpha(t)\ge 0\}_{t\ge 0}$, mixing matrices $\{W_1(t)\}_{t\ge 0}$, $\{W_2(t)\}_{t\ge 0}$ and $\{W_{12}(t)\}_{t\ge 0}$\\ \textbf{Initialize}: $x_{l,i}(0)\in\mathcal{X}_l$ for each $i \in\mathcal{V}_l$ and $l=1,2$ \begin{algorithmic}[1] \REPEAT \FOR {network $l=1,2$} \FOR {agent $i = 1,\dots,n_l$} \STATE Calculate weighted average of neighbors in $\Sigma_l$ and $\Sigma_{3-l}$:\\ $v_{l,i}(t) = \sum_{j=1}^{n_l}w_{l,ij}(t)x_{l,j}(t)$\\ $u_{3-l,i}(t) = \sum_{j=1}^{n_{3-l}}w_{12,ij}(t)x_{3-l,j}(t)$\\ \STATE Receive a sampled subgradient $\hat{g}_{l,i}(t)\in\partial_l\psi_{l,i}(v_{l,i}(t),u_{3-l,i}(t);\xi_{l,i}(t))$, where the terms $\xi_{l,i}(t)$ are independent and identically distributed realizations of the random variable $\xi$.\\ \STATE Update local estimates $x_{l,i}(t+1)$:\\ $x_{l,i}(t+1) = P_{v_{l,i}(t)}^l(-\alpha(t)\hat{g}_{l,i}(t))$ \ENDFOR \ENDFOR \UNTIL Convergence \end{algorithmic} \end{algorithm} \begin{remark} To compute a NE of network stochastic zero-sum games, the stochastic mirror descent method for convex-concave saddle point problems \cite{nj:09} requires the global information of the network. While in our algorithm, each agent merely communicates its decisions with its neighbors to update the estimates . \end{remark} Let $\mathcal{F}_t := \sigma\{x_{l,i}(0),\xi_{l,i}(s), l = 1,2, i\in\mathcal{V}_l, 0\le s\le t-1\}$ denote the $\sigma$-algebra generated by all the information up to time $t-1$. Then, by Algorithm \ref{alg1}, $x_{l,i}(t)$, $v_{l,i}(t)$ and $u_{l,i}(t)$ are adapted to $\mathcal{F}_t$. Denote by $g_{1,i}(t)\in\partial_1f_{1,i}(v_{1,i}(t),u_{2,i}(t))$ and $g_{2,i}(t)\in\partial_2f_{2,i}(u_{1,i}(t),v_{2,i}(t))$ the subgradients of $f_{1,i}$ and $f_{2,i}$ evaluated at $v_{1,i}(t)$ and $v_{2,i}(t)$. In the following, We draw three necessary assumptions, which are all standard and widely used in stochastic approximation and distributed optimization \cite{nj:09,srivastava2013distributed}. \begin{assumption}\label{asm1} For $l = 1,2$, $i\in\mathcal{V}_l$, the cost function $f_{l,i}(\cdot,\cdot)$ is Lipschitz continuous over $X_1\times X_2$, i.e., there exists a constant $L > 0$ such that, for all $x_1,x_1'\in X_1$ and $x_2,x_2'\in X_2$, $$|f_{l,i}(x_1,x_2) - f_{l,i}(x_1',x_2')|\le L(\|x_1 - x_1'\| + \|x_2 - x_2'\|).$$ \end{assumption} \begin{assumption}\label{asm2} There exists $\nu_l > 0$ such that for $l = 1,2$, and each $i\in\mathcal{V}_l$, \[\mathbb{E}[\hat{g}_{l,i}(t)|\mathcal{F}_t] = g_{l,i}(t)\ \text{and}\ \ \mathbb{E}[\|g_{l,i}(t) - \hat{g}_{l,i}(t)\|_{\ast}^2|\mathcal{F}_t]\le \nu_l^2.\] \end{assumption} \begin{assumption}\label{asm3} For $l = 1,2$, the Bregman divergence $D_{\psi_l}(x,y)$ is convex in $y$ and satisfies \[x_k\to x\quad\Rightarrow \quad D_{\psi_l}(x_k,x)\to 0\footnote{The regularizers mentioned in Examples \ref{exm1} and \ref{asm2} both satisfy this condition, which is called Bregman reciprocity \cite{mz:19}.}.\] \end{assumption} \section{Guarantees on Regret} In this section, we establish regret bounds of D-SMD in convex-concave and strongly convex-strongly concave settings. \subsection{Convex-Concave Case} This part presents a regret bound that holds at all time $T$ for D-SMD when the cost function $f_{1,i}(\cdot,\cdot)$ is convex-concave for all $i\in\mathcal{V}_1$. Theorem \ref{thm1} provides a general bound for any choice of (non-increasing) step-size sequence $\{\alpha(t)\}_{t=1}^T$. It will then be the basis for Corollary \ref{col1}, which gives a way to select the algorithm parameters to achieve a sublinear regret. \begin{thm}\label{thm1} Let the cost function $f_{1,i}(\cdot,\cdot)$ be convex-concave and Assumptions \ref{asm1}-\ref{asm3} hold. Then the pseudo regret of D-SMD defined by \eqref{regret_def} is bounded by \begin{align} \bar{R}_1^{(i)}(T)&\le \sum_{t=1}^T\sum_{l=1}^2(L+\nu_l)(9L + \nu_l)\alpha(t-1)\notag\\ &\quad + 4L\sum_{t=1}^T\sum_{l=1}^2n_l\Gamma_l(L + \nu_l)\sum_{s=1}^{t-1}\theta_l^{t-1-s}\alpha(s-1) \notag\\ &\quad + 4L\sum_{t=1}^T\sum_{l=1}^2n_l\Gamma_l\theta_l^{t-1}\Lambda_l + \frac{R_1^2}{\alpha(T)}\label{regret_bound_final} \end{align} where $R_1^2:=\max\{D_{\psi_1}(x_1,x_1^{'}): x_1,x_1^{'}\in X_1\}$ is the diameter of $X_1$. \end{thm} The constants of the regret bound depend on the Lipschitz constants, the connectivity of the communication networks and the sampling error of the subgradients. Compared to the regret bound of the centralized online mirror descent with step-sizes $\{\alpha(t)\}_{t\ge 1}$ \cite{s:11}, Theorem \ref{thm1} shows that D-SMD suffers from an additional term $4\sum_{t=1}^T\sum_{l=1}^2(L\frac{n_l(L + \nu_l)\Gamma_l}{1-\theta_l} + n_l\Gamma_l\theta_l^{t-1}\Lambda_l)$, which is caused by the incomplete information of the agents. An immediate corollary is that D-SMD achieves a sublinear regret when $\alpha(t) = t^{-(\frac{1}{2} + \epsilon)}$, where $\epsilon\in [0,1/2)$. Specifically, if we set $\alpha(t) = 1/\sqrt{t}$, it is easy to check that $\bar{R}_1^{(i)}(T) = O(\sqrt{T})$, matching the optimal regret order for convex objectives \cite{h:16}. We formally state the result in the following corollary. \begin{Col}\label{col1} Let the conditions stated in Theorem \ref{thm1} hold. Then, for $\epsilon\in[0,\frac{1}{2})$, Algorithm \ref{alg1} with step-size sequence $\alpha(t) = t^{-(\frac{1}{2} + \epsilon)}$ yields a pseudo regret of order \[\bar{R}_1^{(i)}(T) \le O(T^{\frac{1}{2} + \epsilon}).\] \end{Col} \begin{proof} By exchanging the order of summation, for each $l = 1,2$, \begin{align} \sum_{t=1}^T\sum_{s=1}^{t-1}\theta_l^{t-1-s}\alpha(s-1)&\le \sum_{t=1}^T\sum_{s=0}^{T-1}\theta_l^s\alpha(t-1)\label{exchange_sum}\\ &\le \frac{1}{1 - \theta_l}\sum_{t=1}^T\alpha(t-1).\notag \end{align} Thus, we obtain the result by noting that $\frac{1}{\alpha(T)} = T^{\frac{1}{2} + \epsilon}$ and $\sum_{t=1}^T\alpha(t-1)\le T^{\frac{1}{2} - \epsilon}$. \end{proof} Corollary 1 shows that the average pseudo regret of each local agent vanishes as $O(T^{-\frac{1}{2} + \epsilon})$, which indicates that agents can learn the optimal offline strategy merely using local information about the network. \subsection{Strongly Convex-Strongly Concave Case} \cite{ss:07} showed that by using a mirror descent algorithm, the regret bound can be improved to $O(\log T)$ for online optimization problem with generalized strongly convex losses. In this section, we extend this idea to network stochastic zero-sum games and establish a regret bound of order $O(\log T)$ for our D-SMD. In the following, we give the formal definition of the generalized strongly convex function. \begin{Def}[\cite{ss:07}]\label{strong_def} A function $f$ is $\eta$-strongly convex over $X$ with respect to a convex and differentiable function $\psi$ if for all $x,y\in X$, \[f(x) - f(y) - \langle x-y,\lambda\rangle\ge \eta D_{\psi}(x,y), \forall\lambda\in\partial f(y).\] \end{Def} As in \cite{ss:07}, we also need to select a specific step-size sequence depending on the strong convexity coefficient. We formulate the regret bound in this case in Theorem \ref{thm1_2}. \begin{thm}\label{thm1_2} Let the cost function $f_{1,i}(x_1,x_2)$, $i\in\mathcal{V}_1$, be $\eta$-strongly convex in $x_1\in\mathcal{X}_1$ with respect to $\psi_1$ for any $x_2\in\mathcal{X}_2$ and Assumptions \ref{asm1}-\ref{asm3} hold. If $\alpha(t) = \frac{1}{\eta (t+1)}$, then the pseudo regret of D-SMD defined by \eqref{regret_def} is bounded by \begin{align} \bar{R}_1^{(i)}(T)&\le \sum_{l=1}^2(L + \nu_l)\Big(9L + \nu_l + \frac{4Ln_l\Gamma_l}{1 - \theta_l}\Big)(1 + \log(T))\notag\\ &\quad + 4L\sum_{l=1}^2\frac{n_l\Gamma_l\Lambda_l\alpha(0)}{1 - \theta_l}.\label{regret_bound_final_1} \end{align} \end{thm} Comparing this result with Theorem \ref{thm1}, we see that the generalized strong convexity can eliminate the term $\frac{R_1^2}{\alpha(T)}$, which is the main factor restricting the regret order in the convex-concave setting. \section{Guarantees on Convergence} In general, \cite{bh:08} proved that a no-regret learning algorithm converges to a coarse correlated equilibrium, which is a relaxation of NE. In this section, we are interested in the convergence of D-SMD to a NE. \subsection{Convex-Concave Case} For a two-person zero-sum game, we can directly derive the convergence rate of the time-averaged strategy profile $(\sum_{t=1}^Tx_1(t)/T,\sum_{t=1}^Tx_2(t)/T)$ from the established regret bound \cite{rs:13b}. However, we cannot apply this result to the two-network stochastic zero-sum game, since each network does not have access to the actual state of its adversarial network. In detail, the difficulty lies in that the pseudo regret is defined from the weighted estimates $u_{l,i}(t)$, and hence, the cumulative cost of $\Sigma_1$ \Big(i.e.,$\mathbb{E}\left[\sum_{t=1}^TU(x_{1,i}(t),u_{2,i}(t))\right]$\Big) is not able to offset the cumulative cost of $\Sigma_2$ \Big(i.e., $\mathbb{E}\left[\sum_{t=1}^T-U(u_{1,i}(t),x_{2,i}(t))\right]$\Big). We now consider the following time-averaged iterates \[\hat{x}_{l,i}(t) = \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)x_{l,i}(s).\quad\text{for}\ t\ge 1, l=1,2\] In order to measure the approximation quality of the average sequence, we define the gap function \begin{align} &\quad\delta(\hat{x}_{1,i}(t),\hat{x}_{2,j}(t))\notag\\ &:= \max_{x_2\in X_2}U(\hat{x}_{1,i}(t),x_2) - \min_{x_1\in X_1}U(x_1,\hat{x}_{2,j}(t)).\label{gap_function} \end{align} Our goal is to present an expected bound for this gap function. \begin{thm}\label{thm5} Let $f_{1,i}(\cdot,\cdot)$ be convex-concave and $f_{2,j}(\cdot,\cdot)$ be concave-convex for all $i\in\mathcal{V}_1$, $j\in\mathcal{V}_2$. Suppose that Assumptions \ref{asm1}-\ref{asm3} hold. Let $\{x_{1,i}(s)\}_{0\le s\le t-1}$ and $\{x_{2,j}(s)\}_{0\le s\le t-1}$ be the sequences generated by D-SMD. Then \begin{align*} \mathbb{E}\left[\delta(\hat{x}_{1,i}(t),\hat{x}_{2,j}(t))\right]&\le \frac{M_1 + M_2\sum_{s=0}^{t-1}\alpha^2(s)}{\sum_{s=0}^{t-1}\alpha(s)}, \end{align*} where $M_1:= \sum_{l=1}^2\left(\frac{4Ln_l\Gamma_l\Lambda_l\alpha(0)}{1-\theta_l} + 2R_l^2\right)$ and $M_2 := \sum_{l=1}^2\Big(4L(L + \nu_l)\left(\frac{n_l\Gamma_l}{1 - \theta_l} + 2\right) + (L + \nu_l)^2 + \nu_l^2/2\Big)$. \end{thm} We remark that the gap measure in Theorem \ref{thm5} is also used to describe the generalization property of an empirical solution in stochastic saddle point problems \cite{ly:21,zh:21}. It was shown by \cite{ah:21} that $(\hat{x}_{1,i}(t),\hat{x}_{2,j}(t))$ is an $\epsilon$-equilibrium of the two-network stochastic zero-sum game when $\delta(\hat{x}_{1,i}(t),\hat{x}_{2,j}(t))\le \epsilon$. Our result matches the error bound of SMD for saddle point problems \cite{nj:09} with the constants $M_1$ and $M_2$ affected by the structure of the networks. Since the NE is unique when the cost function is strongly convex-strongly concave, we may transform the expected error bound of Theorem \ref{thm5} into the classical mean-squared error of $(\hat{x}_{1,i}(t),\hat{x}_{2,j}(t))$ in this case. \begin{Col}\label{col2} Suppose that $U(\cdot,\cdot)$ is $\mu$-strongly convex-strongly concave and Assumptions \ref{asm1}-\ref{asm3} hold, and let $\{x_{1,i}(s)\}_{0\le s\le t-1}$ and $\{x_{2,j}(s)\}_{0\le s\le t-1}$ be generated by D-SMD. If $(x_1^{\ast},x_2^{\ast})$ denotes the NE, then \begin{align*} &\quad\mathbb{E}[\|\hat{x}_{1,i}(t) - x_1^{\ast}\|^2 +\|\hat{x}_{2,j}(t) - x_2^{\ast}\|^2]\\ &\le \frac{2}{\mu}\frac{M_1 + M_2\sum_{s=0}^{t-1}\alpha^2(s)}{\sum_{s=0}^{t-1}\alpha(s)}, \end{align*} where $M_1$ and $M_2$ are as defined in Theorem \ref{thm5}. \end{Col} This corollary implies that when $\{\alpha(t)\}_{t\ge 0}$ satisfies $\sum_{t=0}^{\infty}\alpha(t) = \infty$ and $\sum_{t=0}^{\infty}\alpha^2(t) < \infty$, $(\hat{x}_{1,i}(t),\hat{x}_{2,j}(t))$ converges in mean square to the unique NE for all $i\in\mathcal{V}_1$, $j\in\mathcal{V}_2$. \subsection{Strictly Convex-Strictly Concave Case} We now study the almost sure convergence of the strategy profile generated by D-SMD in the strictly convex-strictly concave setting. This usually requires an analysis tool quite different from that of the time-averaged sequence \cite{mz:19}. \begin{thm}\label{thm2} Let $U(\cdot,\cdot)$ be strictly convex-strictly concave and Assumptions \ref{asm1}-\ref{asm3} hold. If the step-size sequence satisfies $\sum_{t=1}^{\infty}\alpha(t) = \infty$ and $\sum_{t=1}^{\infty}\alpha^2(t) < \infty$, then D-SMD almost surely converges to the unique NE, denoted by $x^{\ast} = (x_1^{\ast},x_2^{\ast})$, i.e., with probability $1$, \[\lim_{t\to\infty}x_{1,i}(t) = x_1^{\ast},\quad \lim_{t\to\infty}x_{2,j}(t) = x_2^{\ast},\ \forall i\in\mathcal{V}_1, j\in\mathcal{V}_2.\] \end{thm} We conclude from Corollary \ref{col1} and Theorem \ref{thm2} that for strongly convex-strongly concave network stochastic zero-sum games, D-SMD is a no-regret learning process that converges to the NE when $\sum_{t=0}^{\infty}\alpha(t) = \infty$ and $\sum_{t=0}^{\infty}\alpha^2(t) < \infty$. Moreover, this result generalizes Theorem 6 of \cite{srivastava2013distributed}, which is only applicable to a deterministic distributed saddle point problem. \section{Numerical Results} In this section, we conduct numerical experiments for a network version of the two-person stochastic matrix games \cite{zh:21} to evaluate the performance of the proposed D-SMD algorithm in convex-concave and strongly convex-strongly concave cases. For the above two cases, we set the number of players in both networks to be $N = 12$ and let each player have $K=20$ actions to choose from. We focus on studying the influence of the step-size sequence and the network topology on the regret bound and convergence of D-SMD. Specifically, we fix the structure of $\Sigma_2$ and consider three types of graphs with different degree of connectivity (Cycle graph $<$ Random graph $<$ Complete graph) for $\Sigma_1$ in our simulations. \begin{itemize} \item {\it Cycle graph} has a single cycle and each node has exactly two immediate neighbors. \item {\it Random graph} is constructed by connecting nodes randomly and each edge is included in the graph with probability $0.7$ independent from every other edge. \item {\it Complete graph} is constructed by connecting all of the node pairs. \end{itemize} \subsection{Convex-Concave Case} Consider the following network stochastic zero-sum game \[\min_{x_1\in\Delta_K}\max_{x_2\in\Delta_K}U(x_1,x_2) :=\frac{1}{N}\sum_{i=1}^Nx_1^T\mathbb{E}_{\xi}[A_{\xi}^i]x_2\] where $x_1$ and $x_2$ are the mixed strategies of players in $\Sigma_1$ and $\Sigma_2$, respectively, which belong to the simplex $\Delta_K:=\{z\in\mathbb{R}^K: z\ge 0, \textbf{1}^Tz = 1\}$. $A_{\xi}^i$ is the stochastic cost matrix of player $i\in\Sigma_1$. Let $\{x_{1,i}(t)\}_{t\ge 0}$ and $\{x_{2,i}(t)\}_{t\ge 0}$ be the outputs of D-SMD. Recalling the gap function \eqref{gap_function}, we use its average with respect to all players, defined as $\bar{\delta}(t):= \frac{1}{N^2}\sum_{i=1}^N\sum_{j=1}^N\delta(\hat{x}_{1,i}(t),\hat{x}_{2,j}(t))$, to demonstrate the convergence of D-SMD. In the first experiment, we run D-SMD from $t=1$ to $t=500$ for different learning rates $\alpha(t) = t^{-\frac{1}{2}}, t^{-\frac{2}{3}}, t^{-\frac{3}{4}}$ and estimate the expected average gap $\mathbb{E}[\bar{\delta}(t)]$ and the time-averaged pseudo regret $\bar{R}_1^{(i)}(t)/t$ by averaging across $50$ sample paths. The empirical results are shown in Figure \ref{fig1}, which shows that D-SMD can achieve sublinear regret bound and the slower learning rate yields a better regret rate and a faster convergence rate. These are consistent with our theoretical results in the convex-concave case. In the second experiment, we use the learning rate $\alpha(t) = 1/\sqrt{t}$ and compare the expected average gap and average pseudo regret with different network topologies in Figure \ref{fig2}, which demonstrates that the network topology has only a slight influence on the regret rate and convergence rate. \begin{figure}[h] \centering \subfigure[]{\includegraphics[width = 0.48\columnwidth]{regret_convex_stepsize_type2-eps-converted-to}} \subfigure[]{\includegraphics[width = 0.48\columnwidth]{convergence_convex_stepsize_1008-eps-converted-to}} \caption{Average pseudo regret and expected average gap and of D-SMD under different learning rates in the convex-concave case} \label{fig1} \end{figure} \begin{figure}[h] \centering \subfigure[]{\includegraphics[width = 0.48\columnwidth]{regret_convex_network_0904_2-eps-converted-to}} \subfigure[]{\includegraphics[width = 0.48\columnwidth]{convergence_convex_network_1008-eps-converted-to}} \caption{Average pseudo regret and expected average gap and of D-SMD under different network topologies in the convex-concave case} \label{fig2} \end{figure} \subsection{Strongly Convex-Strongly Concave Case} To investigate the performance of D-SMD in the strongly convex-strongly concave case, we consider the regularized network stochastic zero-sum game with cost function defined as follows \begin{align*} U(x_1,x_2) &:= \sum_{p=1}^Kx_1^{(p)}\log x_1^{(p)} + \frac{1}{N}\sum_{i=1}^Nx_1^T\mathbb{E}_{\xi}[A_{\xi}^i]x_2\\ &\quad - \sum_{p=1}^Kx_2^{(p)}\log x_2^{(p)}. \end{align*} Notice that $U(x_1,x_2)$ is $1$-strongly convex-strongly concave with respect to the regularizer $\psi(x) = \sum_{p=1}^Kx^{(p)}\log x^{(p)}$. We use a dual averaging algorithm proposed in \cite{m:19} to compute the Nash equilibrium, denoted by $(x_1^{\ast},x_2^{\ast})$. Due to the uniqueness of the NE in the strongly convex-strongly concave case, we consider the average absolute error $\bar{\delta}'(t):=\frac{1}{N}\sum_{i=1}^N\|x_{1,i}(t) - x_1^{\ast}\| + \|x_{2,i}(t) - x_2^{\ast}\|$ to illustrate the convergence of the sequences $\{(x_{1,i}(t),x_{2,i}(t))\}_{t\ge 0}$. The time-averaged pseudo regret $\bar{R}_1^{(i)}(t)/t$ and the expected absolute error $\mathbb{E}[\bar{\delta}'(t)]$ under different learning rates are plotted in Figure 3, and the performance under different network topologies with learning rate $\alpha(t) = 1/t$ are displayed in Figure 4. D-SMD produces a better regret rate in the strongly convex-strongly concave case and the influence of network topology and learning rate is similar to the convex-conacve case. \begin{figure}[h] \centering \subfigure[]{\includegraphics[width = 0.48\columnwidth]{regret_strongly_stepsize_type2-eps-converted-to}} \subfigure[]{\includegraphics[width = 0.48\columnwidth]{convergence_strongly_stepsize_type2-eps-converted-to}} \caption{Average pseudo regret and expected absolute error of D-SMD under different learning rates in the strongly convex-strongly concave case} \label{fig3} \end{figure} \begin{figure}[h] \centering \subfigure[]{\includegraphics[width = 0.48\columnwidth]{regret_strongly_netwrok_type2-eps-converted-to}} \subfigure[]{\includegraphics[width = 0.48\columnwidth]{convergence_strongly_network_type2-eps-converted-to}} \caption{Average pseudo regret and expected absolute error of D-SMD under different network topologies in the strongly convex-strongly concave case} \label{fig4} \end{figure} \section{Conclusion and Future Work} In this paper, we proposed distributed stochastic mirror descent (D-SMD) to extend no-regret learning in two-person zero-sum games to network stochastic zero-sum games. In contrast to the previous works on Nash equilibrium seeking for network zero-sum games, we not only derived the convergence of D-SMD to the set of Nash equilibria, but also established regret bounds of D-SMD for convex-concave and strongly convex-strongly concave costs. The theoretical results were empirically verified by experiments on solving network stochastic matrix games. It is of interest to study the convergence rate of the actual iterates of D-SMD in the strongly convex-strongly concave case. In addition, another interesting topic is to develop an optimistic variant of our algorithm as in \cite{ddk:11} to improve the regret rate and obtain the last-iteration convergence in merely convex-concave case. \section*{Appendix} \begin{lem}\cite[Lemma B.2, Proposition B.3]{ml:19}\label{lem1} Let $\psi$ be a continuously differentiable $\sigma$-strongly convex function on $\mathcal{X}$. Then, for all $x,y,z\in\mathcal{X}$, the Bregman divergence defined by \eqref{breg_def} satisfies \begin{align} D_{\psi}(y,x) - D_{\psi}(y,z) - D_{\psi}(z,x) &= \langle\nabla\psi(z) - \nabla\psi(x),y-z\rangle,\label{breg_prop1}\\ D_{\psi}(x,y)&\ge \frac{\sigma}{2}\|x - y\|^2\label{breg_prop2} \end{align} Moreover, let $x^{+} = P_x(v)$ for $v\in\mathcal{X}^{\ast}$, where $\mathcal{X}^{\ast}$ is the dual space of $\mathcal{X}$ and $P_x(v) := \arg\min_{x'\in \mathcal{X}}\{\langle v,x - x'\rangle + D_{\psi}(x',x)\}$. Then \begin{equation} D_{\psi}(y,x^{+}) \le D_{\psi}(y,x) + \langle v,x - y\rangle + \frac{1}{2\sigma}\|v\|_{\ast}^2, \end{equation} where $\|v\|_{\ast}:= \sup\{\langle v,x\rangle: x\in\mathcal{X},\|x\|\le 1\}$ denotes the dual norm on $\mathcal{X}$. \end{lem} \begin{lem}\cite{rs:71}\label{lem3} Let $\{X_t\}$, $\{Y_t\}$ and $\{Z_t\}$ be sequences of non-negative random variables with $\sum_{t=0}^{\infty}Z_t < \infty$ almost surely and let $\{\mathcal{F}_t\}$ be a filtration such that $\mathcal{F}_t\subset\mathcal{F}_{t+1}$. If $X_t$, $Y_t$, $Z_t$ are adapted to $\{\mathcal{F}_t\}$ and \[\mathbb{E}[Y_{t+1}\mid\mathcal{F}_t] \le Y_t - X_t + Z_t,\] then, almost surely, $\sum_{t=0}^{\infty}X_t < \infty$ and $Y_t$ converges to a non-negative random variable $Y$. \end{lem} \begin{lem}\label{lem2} Let Assumptions \ref{asm1}-\ref{asm2} hold. Suppose that $x_{l,i}(t)$, $v_{l,i}(t)$ and $u_{l,i}(t)$ are generated by Algorithm \ref{alg1}. Then, for each $l\in\{1,2\}$ and $i\in\mathcal{V}_l$, \begin{align} E[\|x_{l,i}(t) - \bar{x}_{l}(t)\|] &\le H_l(t),\label{lem_bound_2}\\ E[\|\bar{x}_{l}(t) - v_{l,i}(t)\|] &\le H_l(t),\label{lem_bound_3}\\ E[\|\bar{x}_{l}(t) - u_{l,i}(t)\|] &\le H_l(t),\label{lem_bound_4} \end{align} where \begin{align} H_l(t) &= n_l\Gamma_l\theta_l^{t-1}\Lambda_l + 2(L + \nu_l)\alpha(t-1) + n_l\Gamma_l(L + \nu_l)\sum_{s=1}^{t-1}\theta_l^{t-1-s}\alpha(s-1),\label{H_l} \end{align} and $\Lambda_l\triangleq \max_{i\in\mathcal{V}_l}\|x_{l,i}(0)\|$. \end{lem} \begin{proof} By the definition of $v_{l,i}(t)$, we write the iterates as follows \begin{align*} x_{l,i}(t) &= v_{l,i}(t-1) - (v_{l,i}(t-1) - x_{l,i}(t))\notag\\ &= \sum_{j=1}^{n_l}[\Phi_l(t-1,0)]_{ij}x_{l,j}(0) + \sum_{s=1}^{t-1}\sum_{j=1}^{n_l}[\Phi_l(t-1,s)]_{ij}d_{l,j}(s-1) + d_{l,i}(t-1), \end{align*} where $d_{l,i}(t-1) = x_{l,i}(t) - v_{l,i}(t-1)$. By using the doubly stochastic property of $W_l(t)$, we derive \begin{equation*} \bar{x}_l(t) = \frac{1}{n_l}\sum_{j=1}^{n_l}x_{l,j}(0) + \frac{1}{n_l}\sum_{s=1}^t\sum_{j=1}^{n_l}d_{l,j}(s-1). \end{equation*} Therefore, \begin{align} \|x_{l,i}(t) - \bar{x}_l(t)\|&\le \sum_{j=1}^{n_l}\|[\Phi_l(t-1,0)]_{ij} - \frac{1}{n_l}\|\|x_{l,j}(0)\|\notag\\ &\quad + \sum_{s=1}^{t-1}\sum_{j=1}^{n_l}\|[\Phi_l(t-1,s)]_{ij} - \frac{1}{n_l}\|\|d_{l,j}(s-1)\| + \|\frac{1}{n_l}\sum_{j=1}^{n_l}d_{l,j}(t-1) - d_{l,i}(t-1)\|.\label{bound_3} \end{align} Thus, we only need to bound the term $\|d_{l,i}(t)\|$. Recalling the definition of $x_{l,i}(t+1)$ and \eqref{prox_mapping}, we have \begin{equation}\label{def_x} x_{l,i}(t+1) = \arg\min_{x'\in X_l}\{\langle \alpha(t)\hat{g}_{l,i}(t),x' - v_{l,i}(t)\rangle + D_{\psi_l}(x',v_{l,i}(t))\}. \end{equation} From the first-order optimality condition, we derive that for all $x_l\in X_l$, \begin{equation}\label{proj} \langle \nabla \psi_l(x_{l,i}(t+1)) - \nabla \psi_l(v_{l,i}(t)) + \alpha(t)\hat{g}_{l,i}(t), x_{l,i}(t+1) - x_l\rangle \le 0. \end{equation} Setting $x_l = v_{l,i}(t)$ implies \begin{align*} \alpha(t)\|\hat{g}_{l,i}(t)\|_{\ast}\|d_{l,i}(t)\| &\ge \langle\alpha(t)\hat{g}_{l,i}(t), v_{l,i}(t) - x_{l,i}(t+1)\rangle\\ &\ge \langle \nabla \psi_l(x_{l,i}(t+1)) - \nabla \psi_l(v_{l,i}(t)), x_{l,i}(t+1) - v_{l,i}(t)\rangle\\ &\ge \|d_{l,i}(t)\|^2, \end{align*} where the last inequality follows by the strong convexity of $\psi_l$. Therefore, \begin{equation}\label{error_bound} \|d_{l,i}(t)\|\le \alpha(t)\|\hat{g}_{l,i}(t)\|_{\ast}. \end{equation} It follows from Assumption \ref{asm1} that $\|g_{l,i}(t)\|_{\ast}\le L$. By H\"{o}lder's inequality and the bounded second moment condition of Assumption \ref{asm2}, we further achieve \begin{equation}\label{sample_gradient_bound} \mathbb{E}[\|\hat{g}_{l,i}(t)\|_{\ast}^2|\mathcal{F}_t]\le (L + \nu_l)^2. \end{equation} Note that $\sqrt{x}$ is a concave function. Using Jensen's inequality, \[\mathbb{E}[\|\hat{g}_{l,i}(t)\|_{\ast}|\mathcal{F}_t]\le \sqrt{\mathbb{E}[\|\hat{g}_{l,i}(t)\|_{\ast}^2|\mathcal{F}_t]}\le L + \nu_l.\] According to the iterated expectation rule, $\mathbb{E}[\|\hat{g}_{l,i}(t)\|_{\ast}]\le L + \nu_l$. This together with \eqref{error_bound} produces $$\mathbb{E}[\|d_{l,i}(t)\|]\le (L + \nu_l)\alpha(t).$$ Then, by Lemma \ref{lem_graph} and taking the expectation in \eqref{bound_3}, we derive \eqref{lem_bound_2}. Furthermore, by the convexity of $\|\cdot\|$ and $\sum_{j=1}^{n_l}w_{l,ij}(t) = 1$, we obtain \begin{align*} \mathbb{E}[\|v_{l,i}(t) - \bar{x}_{l}(t)\|] & = \mathbb{E}\left[\left\|\sum_{j=1}^{n_l}w_{l,ij}(t)x_{l,j}(t) - \bar{x}_{l}(t)\right\|\right] \le \sum_{j=1}^{n_l}w_{l,ij}(t)\mathbb{E}[\|x_{l,j}(t) - \bar{x}_{l}(t)\|]\le H_l(t). \end{align*} Thus, \eqref{lem_bound_3} holds. In a similar way, by using $\sum_{j=1}^{n_l}w_{12,ij}(t) = 1$, we obtain \eqref{lem_bound_4}. \end{proof} \noindent{\bf Proof of Theorem 2}. By \eqref{proj} and using Lemma \ref{lem1}, we obtain that, for all $x_1\in\mathcal{X}_1$, \begin{align} \langle \alpha(t)\hat{g}_{1,i}(t),x_{1,i}(t+1) - x_1\rangle &\le \langle \nabla\psi_1(v_{1,i}(t)) - \nabla\psi_1(x_{1,i}(t+1)),x_{1,i}(t+1) - x_1\rangle\notag\\ &= D_{\psi_1}(x_1, v_{1,i}(t)) - D_{\psi_1}(x_1, x_{1,i}(t+1)) - D_{\psi_1}(x_{1,i}(t+1), v_{1,i}(t))\notag\\ &\le D_{\psi_1}(x_1, v_{1,i}(t)) - D_{\psi_1}(x_1, x_{1,i}(t+1)).\label{thm2-1} \end{align} Note by Assumption \ref{asm3} that \begin{align*} \sum_{t=1}^T\frac{1}{\alpha(t)}\sum_{i=1}^{n_1}D_{\psi_1}(x_1,v_{1,i}(t+1))&\le \sum_{t=1}^T\frac{1}{\alpha(t)}\sum_{i=1}^{n_1}\sum_{j=1}^{n_1}w_{1,ij}(t)D_{\psi_1}(x_1,x_{1,j}(t+1))\\ & = \sum_{t=1}^T\frac{1}{\alpha(t)}\sum_{i=1}^{n_1}D_{\psi_1}(x_1,x_{1,i}(t+1)). \end{align*} Thus, dividing $\alpha(t)$ from both sides of \eqref{thm2-1} and taking a summation for $i=1,\dots,n_1$ and $t=1,\dots,T$, we derive \begin{align} &\quad\sum_{t=1}^T\sum_{i=1}^{n_1}\langle \hat{g}_{1,i}(t), x_{1,i}(t+1) - x_1\rangle \notag\\ &\le \sum_{i=1}^{n_1}\sum_{t=1}^T\frac{1}{\alpha(t)}[D_{\psi_1}(x_1, v_{1,i}(t)) - D_{\psi_1}(x_1, v_{1,i}(t+1))] \label{inner_bound_1}\\ &\le \sum_{i=1}^{n_1}\left[\left(\frac{1}{\alpha(1)}\right)D_{\psi_1}(x_1, v_{1,i}(1)) + \sum_{t=2}^TD_{\psi_1}(x_1, v_{1,i}(t))\left(\frac{1}{\alpha(t)} - \frac{1}{\alpha(t-1)}\right)\right]\label{bound_2}\\ &\le \frac{n_1R_1^2}{\alpha(T)},\notag \end{align} where the last inequality follows from the definition of $R_1^2$ and the non-increasing of $\alpha(t)$. This together with \eqref{error_bound} and the definition $d_{l,i}(t) = x_{l,i}(t+1) - v_{l,i}(t)$ yields \begin{align} &\quad\mathbb{E}\left[\sum_{t=1}^T\sum_{i=1}^{n_1}\langle \hat{g}_{1,i}(t), v_{1,i}(t) - x_1\rangle\right]\notag\\ &= \sum_{t=1}^T\sum_{i=1}^{n_1}\mathbb{E}[\langle \hat{g}_{1,i}(t), x_{1,i}(t+1) - x_1\rangle] + \sum_{t=1}^T\sum_{i=1}^{n_1}\mathbb{E}[\langle \hat{g}_{1,i}(t), v_{1,i}(t) - x_{1,i}(t+1)\rangle]\notag\\ &\le \frac{n_1R_1^2}{\alpha(T)} + \sum_{t=1}^T\sum_{i=1}^{n_1}\alpha(t)\mathbb{E}[\|\hat{g}_{1,i}(t)\|_{\ast}^2]\le \frac{n_1R_1^2}{\alpha(T)} + n_1(L+\nu_1)^2\sum_{t=1}^T\alpha(t) ,\label{upper_bound_1} \end{align} where the last inequality follows from \eqref{sample_gradient_bound}. Since $v_{1,i}(t)$ is adapted to $\mathcal{F}_t$, by Assumption \ref{asm2}, \begin{equation}\label{condition_unbias} \mathbb{E}[\langle g_{1,i}(t) - \hat{g}_{1,i}(t),v_{1,i}(t) - x_1\rangle|\mathcal{F}_{t}] = 0. \end{equation} Therefore, \begin{equation}\label{unbias} \mathbb{E}[\langle g_{1,i}(t) - \hat{g}_{1,i}(t),v_{1,i}(t) - x_1\rangle] = 0. \end{equation} As a combination of \eqref{upper_bound_1} and \eqref{unbias}, we conclude \begin{equation}\label{upper_bound} \mathbb{E}\left[\sum_{t=1}^T\sum_{i=1}^{n_1}\langle g_{1,i}(t), v_{1,i}(t) - x_1\rangle\right] \le \frac{n_1R_1^2}{\alpha(T)} + n_1(L+\nu_1)^2\sum_{t=1}^T\alpha(t). \end{equation} Next, we establish a lower bound for $\sum_{i=1}^{n_1}\langle g_{1,i}(t), v_{1,i}(t) - x_1\rangle$. Due to the convexity of $f_{1,i}(\cdot,\cdot)$ with respect to the first element, \begin{align} \sum_{i=1}^{n_1}\langle g_{1,i}(t), v_{1,i}(t) - x_1\rangle &\ge \sum_{i=1}^{n_1}[f_{1,i}(v_{1,i}(t),u_{2,i}(t)) - f_{1,i}(x_1,u_{2,i}(t))].\label{lower_bound_1} \end{align} On the other hand, by adding and subtracting some terms, we get \begin{align} &\quad n_1(U(\bar{x}_1(t),\bar{x}_2(t)) - U(x_1,\bar{x}_2(t)))\notag\\ &= \sum_{i=1}^{n_1}[f_{1,i}(\bar{x}_1(t),\bar{x}_2(t)) - f_{1,i}(x_{1,i}(t),\bar{x}_2(t)) + f_{1,i}(x_{1,i}(t),\bar{x}_2(t)) - f_{1,i}(v_{1,i}(t),\bar{x}_2(t))\notag\\ &\quad + f_{1,i}(v_{1,i}(t),\bar{x}_{2}(t)) - f_{1,i}(v_{1,i}(t),u_{2,i}(t)) + f_{1,i}(v_{1,i}(t),u_{2,i}(t)) - f_{1,i}(x_1,u_{2,i}(t))\notag\\ &\quad + f_{1,i}(x_1,u_{2,i}(t)) - f_{1,i}(x_1,\bar{x}_2(t))]\notag\\ &\le \sum_{i=1}^{n_1}[L(\|x_{1,i}(t) - \bar{x}_1(t)\| + \|x_{1,i}(t) - v_{1,i}(t)\|) + 2L\|u_{2,i}(t) - \bar{x}_2(t)\|\notag\\ &\quad + f_{1,i}(v_{1,i}(t),u_{2,i}(t)) - f_{1,i}(x_1,u_{2,i}(t))],\label{add_substract} \end{align} where the last inequality follows from the Lipschitz continuity of $f_{1,i}$. Plugging the above inequality to \eqref{lower_bound_1}, we derive \begin{align} \sum_{i=1}^{n_1}\langle g_{1,i}(t), v_{1,i}(t) - x_1\rangle &\ge n_1(U(\bar{x}_1(t),\bar{x}_2(t)) - U(x_1,\bar{x}_2(t)))\notag\\ &\quad - \sum_{i=1}^{n_1}[L(\|x_{1,i}(t) - \bar{x}_1(t)\| + \|x_{1,i}(t) - v_{1,i}(t)\|) + 2L\|u_{2,i}(t) - \bar{x}_2(t)\|].\label{lower_bound_2} \end{align} It remains to connect this lower bound and $\bar{R}_1^{(i)}(T)$. Notice that \begin{align} &\quad U(\bar{x}_1(t),\bar{x}_2(t)) - U(x_1,\bar{x}_2(t))\notag\\ &= U(\bar{x}_1(t),\bar{x}_2(t)) - U(\bar{x}_1(t),u_{2,i}(t)) + U(\bar{x}_1(t),u_{2,i}(t)) - U(x_{1,i}(t),u_{2,i}(t))\notag\\ &\quad + U(x_{1,i}(t),u_{2,i}(t)) - U(x_1,u_{2,i}(t)) + U(x_1,u_{2,i}(t)) - U(x_1,\bar{x}_2(t))\notag\\ &\ge U(x_{1,i}(t),u_{2,i}(t)) - U(x_1,u_{2,i}(t)) - L\|x_{1,i}(t) - \bar{x}_1(t)\|\notag\\ &\quad - 2L\|u_{2,i}(t) - \bar{x}_2(t)\|.\label{lower_bound_3} \end{align} Recall from the definition of $\bar{R}_1^{(i)}(T)$ that \begin{align*} \max_{x_1\in X_1}\mathbb{E}\left[\sum_{t=1}^T(U(x_{1,i}(t),u_{2,i}(t)) - U(x_1,u_{2,i}(t)))\right]&= \bar{R}_1^{(i)}(T). \end{align*} By taking expectation on both sides of \eqref{lower_bound_2}-\eqref{lower_bound_3} and making a summation from $t=1$ to $t=T$, we obtain \begin{align} \max_{x_1\in X_1}\mathbb{E}\left[\sum_{t=1}^T\sum_{i=1}^{n_1}\langle g_{1,i}(t), v_{1,i}(t) - x_1\rangle\right] &\ge n_1\bar{R}_1^{(i)}(T) - n_1L\sum_{t=1}^T\mathbb{E}[\|x_{1,i}(t) - \bar{x}_1(t)\| + 2\|u_{2,i}(t) - \bar{x}_2(t)\|]\notag\\ &\quad -L\sum_{t=1}^T\sum_{i=1}^{n_1}\mathbb{E}\Big[\|x_{1,i}(t) - \bar{x}_1(t)\| + \|x_{1,i}(t) - v_{1,i}(t)\|\notag\\ &\quad + 2\|u_{2,i}(t) - \bar{x}_2(t)\|\Big]\label{lower_bound} \end{align} Note by Lemma \ref{lem2} and the elementary inequality $\|a + b\|\le \|a\| + \|b\|$ that $\mathbb{E}[\|x_{1,i}(t) - v_{1,i}(t)\|]\le 2H_1(t)$. Thus, combining \eqref{lower_bound} with \eqref{upper_bound}, we derive \begin{align*} \bar{R}_1^{(i)}(T)&\le \frac{R_1^2}{\alpha(T)} + (L+\nu_1)^2\sum_{t=1}^T\alpha(t) + 4L\sum_{t=1}^T(H_1(t) + H_2(t)). \end{align*} This together with \eqref{H_l} produces \eqref{regret_bound_final}. \noindent{\bf Proof of Theorem 3}. Taking the same idea as in the proof of Theorem \ref{thm1}, we first establish an upper bound for $\mathbb{E}\left[\sum_{t=1}^T\sum_{i=1}^{n_1}\langle g_{1,i}(t),v_{1,i}(t) - x_1\rangle\right]$. Setting $\alpha(t) = \frac{1}{\eta (t+1)}$ in \eqref{bound_2}, we obtain \[\sum_{t=1}^T\sum_{i=1}^{n_1}\langle \hat{g}_{1,i}(t), x_{1,i}(t+1) - x_1\rangle\le \sum_{t=1}^T\sum_{i=1}^{n_1}\eta D_{\psi_1}(x_1,v_{1,i}(t)).\] Similar to the procedure of obtaining \eqref{upper_bound_1}, we use \eqref{error_bound} to derive \begin{equation*} \sum_{t=1}^T\sum_{i=1}^{n_1}\langle \hat{g}_{1,i}(t),v_{1,i}(t) - x_1\rangle\le \sum_{t=1}^T\sum_{i=1}^{n_1}\left[\eta D_{\psi_1}(x_1,v_{1,i}(t)) + \alpha(t)\|\hat{g}_{1,i}(t)\|_{\ast}^2\right]. \end{equation*} It then follows from \eqref{unbias} that \begin{align} \mathbb{E}\left[\sum_{t=1}^T\sum_{i=1}^{n_1}\langle g_{1,i}(t),v_{1,i}(t) - x_1\rangle\right]&\le \sum_{t=1}^T\sum_{i=1}^{n_1}\left(\eta \mathbb{E}[D_{\psi_1}(x_1,v_{1,i}(t))] + \alpha(t)\mathbb{E}[\|\hat{g}_{1,i}(t)\|_{\ast}^2]\right).\label{strongly_upper_bound} \end{align} Since $f_{1,i}$ is strongly convex with respect to $\psi_1$, by Definition \ref{strong_def}, we have \begin{align*} \sum_{i=1}^{n_1}\langle g_{1,i}(t), v_{1,i}(t) - x_1\rangle - \eta D_{\psi_1}(x_1,v_{1,i}(t)) &\ge \sum_{i=1}^{n_1}[f_{1,i}(v_{1,i}(t),u_{2,i}(t)) - f_{1,i}(x_1,u_{2,i}(t))]. \end{align*} Then, by using the analysis procedure similar to that of deriving \eqref{lower_bound}, we get \begin{align} \max_{x_1\in X_1}\mathbb{E}\left[\sum_{t=1}^T\sum_{i=1}^{n_1}\langle g_{1,i}(t), v_{1,i}(t) - x_1\rangle\right] &\ge n_1\bar{R}_1^{(i)}(T) - n_1L\sum_{t=1}^T\mathbb{E}[\|x_{1,i}(t) - \bar{x}_1(t)\| + 2\|u_{2,i}(t) - \bar{x}_2(t)\|]\notag\\ &\quad -L\sum_{t=1}^T\sum_{i=1}^{n_1}\mathbb{E}\Big[\|x_{1,i}(t) - \bar{x}_1(t)\| + \|x_{1,i}(t) - v_{1,i}(t)\|\notag\\ &\quad + 2\|u_{2,i}(t) - \bar{x}_2(t)\|\Big] + \sum_{t=1}^T\sum_{i=1}^{n_1}\eta \mathbb{E}[D_{\psi_1}(x_1,v_{1,i}(t))].\label{strongly_lower_bound} \end{align} Combining \eqref{strongly_upper_bound}-\eqref{strongly_lower_bound} with Lemma \ref{lem2}, we get \begin{align*} \bar{R}_1^{(i)}(T)&\le 4L\sum_{t=1}^T\sum_{l=1}^2\left(n_l(L +\nu_l)\Gamma_l\sum_{s=1}^{t-1}\theta_l^{t-1-s}\alpha(s-1) + 2(L + \nu_l)\alpha(t-1)\right) \notag\\ &\quad + 4L\sum_{t=1}^T\sum_{l=1}^2n_l\Gamma_l\theta_l^{t-1}\Lambda_l + \sum_{t=1}^T\alpha(t)(L+\nu_1)^2. \end{align*} Since $\sum_{t=1}^T\frac{1}{t}\le 1 + \int_{1}^T\frac{1}{t}dt = 1 + \log T$, the above relation together with \eqref{exchange_sum} yields \eqref{regret_bound_final_1}. \noindent{\bf Proof of Theorem 4}. According to \eqref{inner_bound_1} and the definition of $R_l^2$, we have that, for all $l\in\{1,2\}$ and $x_l\in X_l$, \[\sum_{s=0}^{t-1}\sum_{i=1}^{n_l}\langle \alpha(s)\hat{g}_{l,i}(s),x_{l,i}(s+1) - x_l\rangle\le n_lR_l^2.\] Furthermore, by using a decomposition similar to \eqref{upper_bound_1}, we derive the following upper bound \begin{equation}\label{ergo_upper_bound_1} \sum_{s=0}^{t-1}\sum_{i=1}^{n_l}\mathbb{E}[\langle \alpha(s)\hat{g}_{l,i}(s), v_{l,i}(s) - x_l\rangle] \le n_lR_l^2 + \sum_{s=0}^{t-1}\sum_{i=1}^{n_l}\alpha^2(s)\mathbb{E}[\|\hat{g}_{l,i}(s)\|_{\ast}^2]. \end{equation} Construct an auxiliary sequence $\{\hat{v}_{l,i}(t)\}_{t\ge 0}$ by letting $\hat{v}_{l,i}(0) = x_{l,i}(0)$ and \[\hat{v}_{l,i}(t) = P_{\hat{v}_{l,i}(t-1)}^l(\alpha(t)(g_{l,i}(t) - \hat{g}_{l,i}(t))),\ \forall t\ge 1\] where $P_{\cdot}^l(\cdot)$ is the prox-mapping defined in \eqref{prox_mapping}. Then, by Assumption \ref{asm2} and Lemma 6.1 of \cite{nj:09}, \begin{align} \sum_{s=0}^{t-1}\sum_{i=1}^{n_l}\langle \alpha(s)(g_{l,i}(s) - \hat{g}_{l,i}(s)), \hat{v}_{l,i}(s) - x_l\rangle &\le n_lR_l^2 + \frac{n_l\nu_l^2}{2}\sum_{s=0}^{t-1}\alpha^2(s).\label{ergo_upper_bound_2} \end{align} Since $v_{l,i}(s)$ and $\hat{v}_{l,i}(s)$ are adapted to $\mathcal{F}_s$, it follows from an analysis similar to \eqref{unbias} that \begin{align} \mathbb{E}\left[\sum_{s=0}^{t-1}\sum_{i=1}^{n_l}\langle \alpha(s)(g_{l,i}(s) - \hat{g}_{l,i}(s)), v_{l,i}(s) - \hat{v}_{l,i}(s)\rangle\right] &= 0.\label{ergo_upper_bound_3} \end{align} As a combination of \eqref{ergo_upper_bound_1}-\eqref{ergo_upper_bound_3}, we obtain \begin{equation}\label{ergo_upper_bound} \mathbb{E}\left[\max_{x_l\in X_l}\sum_{s=0}^{t-1}\sum_{i=1}^{n_l}\langle \alpha(s)\hat{g}_{l,i}(s), v_{l,i}(s) - x_l\rangle\right] \le 2n_lR_l^2 + n_l\left((L + \nu_l)^2 + \frac{1}{2}\nu_l^2\right)\sum_{s=0}^{t-1}\alpha^2(s). \end{equation} On the other hand, it follows from the convexity of $f_{1,i}(\cdot,x_2)$ that \begin{align} \sum_{i=1}^{n_1}\langle \alpha(s)g_{1,i}(s), v_{1,i}(s) - x_1\rangle &\ge \sum_{i=1}^{n_1}\alpha(s)[f_{1,i}(v_{1,i}(s),u_{2,i}(s)) - f_{1,i}(x_1,u_{2,i}(s))].\label{ergo_lower_bound_1} \end{align} Derivation similar to \eqref{add_substract} yields \begin{align} \sum_{i=1}^{n_1}f_{1,i}(v_{1,i}(s),u_{2,i}(s))&\ge n_1U(\bar{x}_1(s),\bar{x}_2(s)) - L\sum_{i=1}^{n_1}(\|u_{2,i}(s) - \bar{x}_2(s)\| + \|v_{1,i}(s) - \bar{x}_1(s)\|) \end{align} Note that \begin{align} -\sum_{i=1}^{n_1}f_{1,i}(x_1,u_{2,i}(s)) &= -\sum_{i=1}^{n_1}f_{1,i}(x_1,u_{2,i}(s)) + \sum_{i=1}^{n_1}f_{1,i}(x_1,\bar{x}_{2}(s))- \sum_{i=1}^{n_1}f_{1,i}(x_1,\bar{x}_{2}(s))\notag\\ &\ge -L\sum_{i=1}^{n_1}\|u_{2,i}(s) - \bar{x}_2(s)\|- n_1U(x_1,\bar{x}_2(s)).\label{ergo_lower_bound_2} \end{align} By the concavity of $U(x_1,\cdot)$, \begin{align} U(x_1,\hat{x}_{2,j}(t)) &\ge \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)U(x_1,x_{2,j}(s)) \notag\\ &= \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)[U(x_1,x_{2,j}(s)) - U(x_1,\bar{x}_2(s)) + U(x_1,\bar{x}_2(s))]\notag\\ &\ge \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)[-L\|x_{2,j}(s) - \bar{x}_2(s)\| + U(x_1,\bar{x}_2(s))].\notag \end{align} Therefore, combining \eqref{ergo_lower_bound_1}-\eqref{ergo_lower_bound_2} and taking an ergodic average, we obtain \begin{align} &\quad\frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)\frac{1}{n_1}\sum_{i=1}^{n_1}\langle \alpha(s)g_{1,i}(s), v_{1,i}(s) - x_1\rangle\notag\\ &\ge \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)U(\bar{x}_1(s),\bar{x}_2(s)) - U(x_1,\hat{x}_{2,j}(t))\notag\\ &\quad - \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)\left[\frac{1}{n_1}\sum_{i=1}^{n_1}(L(2\|u_{2,i}(s) - \bar{x}_2(s)\| + \|v_{1,i}(s) - \bar{x}_1(s)\|)) + L\|x_{2,j}(s) - \bar{x}_2(s)\|\right].\label{final_lower_bound_1} \end{align} In a similar way, we show that for all $x_2\in X_2$, \begin{align} &\quad\frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)\frac{1}{n_2}\sum_{i=1}^{n_2}\langle \alpha(s)g_{2,i}(s), v_{2,i}(s) - x_2\rangle\notag\\ &\ge -\frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)U(\bar{x}_1(s),\bar{x}_2(s)) + U(\hat{x}_{1,i}(t),x_2)\notag\\ &\quad - \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)\left[\frac{1}{n_2}\sum_{i=1}^{n_2}(L(2\|u_{1,i}(s) - \bar{x}_1(s)\| + \|v_{2,i}(s) - \bar{x}_2(s)\|)) + L\|x_{1,i}(s) - \bar{x}_1(s)\|\right].\label{final_lower_bound_2} \end{align} Adding \eqref{final_lower_bound_1}-\eqref{final_lower_bound_2} and utilizing \eqref{ergo_upper_bound}, we get \begin{align*} &\quad\mathbb{E}\left[\max_{x_2\in X_2}U(\hat{x}_{1,i}(t),x_2) - \min_{x_1\in X_1}U(x_1,\hat{x}_{2,i}(t))\right]\\ &\le \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{l=1}^2\left(2R_l^2 + \left((L + \nu_l)^2 + \frac{\nu_l^2}{2}\right)\sum_{s=0}^{t-1}\alpha^2(s)\right)\\ &\quad + \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}4L\alpha(s)(H_1(s) + H_2(s)). \end{align*} Thus, the conclusion follows from Lemma \ref{lem2}. \noindent{\bf Proof of Corollary \ref{col2}}. Let $(x_1^{\ast},x_2^{\ast})$ be the NE and note that \begin{equation}\label{gap_lower_bound} \mathbb{E}\left[\max_{x_2\in X_2}U(\hat{x}_{1,i}(t),x_2) - \min_{x_1\in X_1}U(x_1,\hat{x}_{2,j}(t))\right] \ge \mathbb{E}\left[U(\hat{x}_{1,i}(t),x_2^{\ast}) - U(x_1^{\ast},\hat{x}_{2,j}(t))\right]. \end{equation} We further have the decomposition \begin{align*} \mathbb{E}\left[U(\hat{x}_{1,i}(t),x_2^{\ast}) - U(x_1^{\ast},\hat{x}_{2,j}(t))\right] &= \mathbb{E}\left[U(\hat{x}_{1,i}(t),x_2^{\ast}) - U(x_1^{\ast},x_2^{\ast}) + U(x_1^{\ast},x_2^{\ast}) - U(x_1^{\ast},\hat{x}_{2,j}(t))\right]. \end{align*} Since $U(\cdot,\cdot)$ is $\mu$-strongly convex-strongly concave, by the definition of NE, we obtain \[U(\hat{x}_{1,i}(t),x_2^{\ast}) - U(x_1^{\ast},x_2^{\ast})\ge \langle \partial_1U(x_1^{\ast},x_2^{\ast}),\hat{x}_{1,i}(t) - x_1^{\ast}\rangle + \frac{\mu}{2}\|\hat{x}_{1,i}(t) - x_1^{\ast}\|^2\ge \frac{\mu}{2}\|\hat{x}_{1,i}(t) - x_1^{\ast}\|^2.\] In a similar way, \[U(x_1^{\ast},x_2^{\ast}) - U(x_1^{\ast},\hat{x}_{2,j}(t)) \ge \frac{\mu}{2}\|\hat{x}_{2,j}(t) - x_2^{\ast}\|^2.\] Therefore, \[\mathbb{E}\left[U(\hat{x}_{1,i}(t),x_2^{\ast}) - U(x_1^{\ast},\hat{x}_{2,j}(t))\right]\ge \mathbb{E}\left[\frac{\mu}{2}\|\hat{x}_{1,i}(t) - x_1^{\ast}\|^2 + \frac{\mu}{2}\|\hat{x}_{2,j}(t) - x_2^{\ast}\|^2\right].\] By Theorem \ref{thm5} and \eqref{gap_lower_bound}, we get the conclusion. \noindent{\bf Proof of Theorem 5}. Applying Lemma \ref{lem1} to \eqref{def_x}, we get that for $l = 1,2$, \begin{align} D_{\psi_l}(x_l,x_{l,i}(t+1))&\le D_{\psi_l}(x_l,v_{l,i}(t)) + \langle \alpha(t)\hat{g}_{l,i}(t),x_l - v_{l,i}(t)\rangle + \frac{1}{2}\alpha^2(t)\|\hat{g}_{l,i}(t)\|_{\ast}^2.\label{recursive} \end{align} By Assumption \ref{asm3} and $\sum_{i=1}^{n_l}w_{l,ij}(t) = 1$, $\sum_{i=1}^{n_l}D_{\psi_l}(x_l,v_{l,i}(t))\le \sum_{i=1}^{n_l}\sum_{j=1}^{n_l}w_{l,ij}(t)D_{\psi_l}(x_l,x_{l,j}(t)) = \sum_{i=1}^{n_l}D_{\psi_l}(x_l,x_{l,i}(t))$. It then follows from \eqref{recursive} that \begin{align} \sum_{i=1}^{n_l}D_{\psi_l}(x_l,x_{l,i}(t+1))&\le \sum_{i=1}^{n_l}D_{\psi_l}(x_l,x_{l,i}(t)) + \sum_{i=1}^{n_l}\langle \alpha(t)\hat{g}_{l,i}(t),x_l - v_{l,i}(t)\rangle + \frac{1}{2}\alpha^2(t)\sum_{i=1}^{n_l}\|\hat{g}_{l,i}(t)\|_{\ast}^2\notag \end{align} Plugging \eqref{lower_bound_2} to this relation, we obtain \begin{align} \frac{1}{n_1}\sum_{i=1}^{n_1}D_{\psi_1}(x_1, x_{1,i}(t+1)) &\le \frac{1}{n_1}\sum_{i=1}^{n_1}D_{\psi_1}(x_1, x_{1,i}(t)) + \alpha(t)(U(x_1,\bar{x}_2(t)) - U(\bar{x}_1(t),\bar{x}_2(t)))\notag\\ &\quad + \alpha(t)L\frac{1}{n_1}\sum_{i=1}^{n_1}(\|x_{1,i}(t) - \bar{x}_1(t)\| + \|v_{1,i}(t) - x_{1,i}(t)\| + 2\|u_{2,i}(t) - \bar{x}_2(t)\|)\notag\\ &\quad + \alpha^2(t)\frac{(L + \nu_1)^2}{2} + \alpha(t)\frac{1}{n_1}\sum_{i=1}^{n_1}\langle\hat{g}_{1,i}(t) - g_{1,i}(t),x_1 - v_{1,i}(t)\rangle. \label{network_1} \end{align} Similar to \eqref{lower_bound_2}, we also derive a lower bound for $\sum_{i=1}^{n_2}\langle\alpha(t)g_{2,i}(t),x_2 - v_{2,i}(t)\rangle$. Furthermore, \begin{align} \frac{1}{n_2}\sum_{i=1}^{n_2}D_{\psi_2}(x_2, x_{2,i}(t+1)) &\le \frac{1}{n_2}\sum_{i=1}^{n_2}D_{\psi_2}(x_2, x_{2,i}(t)) + \alpha(t)(U(\bar{x}_1(t),\bar{x}_2(t)) - U(\bar{x}_1(t),x_2))\notag\\ &\quad + \alpha(t)L\frac{1}{n_2}\sum_{i=1}^{n_2}(\|x_{2,i}(t) - \bar{x}_2(t)\| + \|v_{2,i}(t) - x_{2,i}(t)\| + 2\|u_{1,i}(t) - \bar{x}_1(t)\|)\notag\\ &\quad + \alpha^2(t)\frac{(L + \nu_2)^2}{2} + \alpha(t)\frac{1}{n_2}\sum_{i=1}^{n_2}\langle\hat{g}_{2,i}(t) - g_{2,i}(t),x_2 - v_{2,i}(t)\rangle. \label{network_2} \end{align} Let $(x_1,x_2) = (x_1^{\ast},x_2^{\ast})$ be the NE and consider the following Lyapunov function \begin{equation}\label{def-v} V(t,x_1^{\ast},x_2^{\ast}) = \frac{1}{n_1}\sum_{i=1}^{n_1}D_{\psi_1}(x_1^{\ast},x_{1,i}(t)) + \frac{1}{n_2}\sum_{i=1}^{n_2}D_{\psi_2}(x_2^{\ast},x_{2,i}(t)). \end{equation} Recall that $v_{l,i}(t)$, $\bar{x}_l(t)$ and $u_{l,i}(t)$ are adapted to $\mathcal{F}_t$. By adding \eqref{network_1} and \eqref{network_2}, taking conditional expectation on $\mathcal{F}_t$, and using \eqref{condition_unbias}, we obtain \begin{align} \mathbb{E}[V(t+1,x_1^{\ast},x_2^{\ast})|\mathcal{F}_{t}] &\le V(t,x_1^{\ast},x_2^{\ast}) - \alpha(t)(U(\bar{x}_1(t), x_2^{\ast}) - U(x_1^{\ast},\bar{x}_2(t)))\notag\\ &\quad + \alpha^2(t)\left(\frac{(L + \nu_1)^2}{2} + \frac{(L + \nu_2)^2}{2}\right)\notag\\ &\quad + \alpha(t)L\sum_{l=1}^2\frac{1}{n_l}\sum_{i=1}^{n_l}e_{l,i}(t),\label{V_iter} \end{align} where $e_{l,i}(t) = \|x_{l,i}(t) - \bar{x}_l(t)\| + \|v_{l,i}(t) - x_{l,i}(t)\| + 2\|u_{3-l,i}(t) - \bar{x}_{3-l}(t)\|$.\\ By the definition of $H_l(t)$ in \eqref{H_l} and exchanging the order of summation, we obtain \[\sum_{t=1}^T\alpha(t)H_l(t)\le\frac{n_l\Gamma_l\Lambda_l\alpha(0)}{1 - \theta_l} + 2(L+\nu_l)\sum_{t=1}^T\alpha^2(t-1) + \frac{n_l\Gamma_l(L + \nu_l)}{1 - \theta_l}\sum_{t=1}^T\alpha^2(t-1).\] Therefore, it follows from $\sum_{t=1}^{\infty}\alpha^2(t) < \infty$ that $\sum_{t=1}^{\infty}\alpha(t)H_l(t) < \infty$. We further obtain $\sum_{t=1}^{\infty}\alpha(t)\mathbb{E}[e_{l,i}(t)] < \infty$ by using Lemma \ref{lem2}. By the monotone convergence theorem, \[\mathbb{E}[\sum_{t=1}^{\infty}\alpha(t)e_{l,i}(t)] = \sum_{t=1}^{\infty}\alpha(t)\mathbb{E}[e_{1,i}(t)] < \infty.\] Thus, $\sum_{t=1}^{\infty}\alpha(t)e_{l,i}(t) < \infty$ with probability $1$. Meanwhile, note by the definition of NE that \begin{equation}\label{U_diff} U(\bar{x}_1(t), x_2^{\ast})\ge U(x_1^{\ast},x_2^{\ast})\ge U(x_1^{\ast},\bar{x}_2(t)). \end{equation} By Lemma \ref{lem3}, $V(t,x_1^{\ast},x_2^{\ast})$ converges to a non-negative random variable with probability $1$ and \[0\le\sum_{t=0}^{\infty}\alpha(t)(U(\bar{x}_1(t),x_2^{\ast}) - U(x_1^{\ast},\bar{x}_2(t))) < \infty,\ a.s..\] Also, we have \[0\le \sum_{t=0}^{\infty}\alpha(t)\|x_{l,i}(t) - \bar{x}_l(t)\| < \infty,\ a.s.\] Therefore, by $\sum_{t=0}^{\infty}\alpha(t) = \infty$, there exists a subsequence $\{t_r\}$ such that with probability $1$, \[\lim_{r\to\infty}U(x_1^{\ast},\bar{x}_2(t_r)) = U(x_1^{\ast},x_2^{\ast}) = \lim_{r\to\infty}U(\bar{x}_1(t_r),x_2^{\ast}),\] and for all $i\in\mathcal{V}_1$, $j\in\mathcal{V}_2$, \begin{equation}\label{limit_error} \lim_{r\to\infty}x_{1,i}(t_r) = \lim_{r\to\infty}\bar{x}_1(t_r),\quad \lim_{r\to\infty}x_{2,j}(t_r) = \lim_{r\to\infty}\bar{x}_2(t_r). \end{equation} The bounded sequence $\{(\bar{x}_1(t_r),\bar{x}_2(t_r))\}$ has a convergent subsequence, and without loss of generality, we let it be indexed by the same index set $\{t_r,r = 1,2,\dots\}$. By the strict convexity-concavity of $U$, the NE is unique. Thus, according to the continuity of $U(\cdot,\cdot)$, $\bar{x}_1(t_r)\to x_1^{\ast}$ and $\bar{x}_2(t_r)\to x_2^{\ast}$ with probability $1$. Using \eqref{limit_error}, we further obtain $x_{1,i}(t_r)\to x_1^{\ast}$ and $x_{2,j}(t_r)\to x_2^{\ast}$. Therefore, by Assumption \ref{asm3} and the convergence of $V(t,x_1^{\ast},x_2^{\ast})$, $V(t,x_1^{\ast},x_2^{\ast})\to 0$ with probability $1$. Then, by \eqref{breg_prop2} and \eqref{def-v}, $x_{1,i}(t)\to x_1^{\ast}$ and $x_{2,j}(t)\to x_2^{\ast}$ with probability $1$. \bibliographystyle{IEEEtran} \bibliography{aistats2022.bib} \end{document} \documentclass{article} \usepackage{amsmath,amsthm,amssymb,float,graphicx,geometry} \usepackage{bbm,bbding,amssymb,pifont,mathrsfs,amsfonts,graphicx,subfigure,mathtools,color}\usepackage{amscd,array,enumerate,dsfont,texdraw,tikz,multicol,bbm,authblk} \usepackage{algorithmic} \newtheorem{algorithm}{Algorithm} \usepackage{algorithm} \usepackage{footnote} \usepackage{lipsum} \usepackage{amsmath} \usepackage{cases} \usepackage{fancyhdr} \usepackage{CJK} \usepackage{bm} \usepackage{framed} \usepackage{listings} \usepackage{multirow} \allowdisplaybreaks[4] \usetikzlibrary{shapes.geometric, arrows} \newtheorem{assumption}{Assumption} \newtheorem{lem}{Lemma} \newtheorem{thm}{Theorem} \newtheorem{Def}{Definition} \newtheorem{Col}{Corollary} \newtheorem{remark}{Remark} \newtheorem{example}{Example} \newcommand{\blue}[1]{{\color{blue}#1}} \newcommand{\red}[1]{{\color{red}#1}} \lstset{numbers=left, numberstyle=\tiny, keywordstyle=\color{blue}, commentstyle=\color[cmyk]{1,0,1,0}, frame=single, escapeinside=``, extendedchars=false, xleftmargin=2em,xrightmargin=2em, aboveskip=1em, basicstyle=\footnotesize\tt, tabsize=4, showspaces=false } \linespread{1.2} \pagestyle{fancy} \lhead{} \chead{} \rhead{} \lfoot{} \cfoot{\thepage} \rfoot{} \renewcommand{\headrulewidth}{0.4pt} \renewcommand{\footrulewidth}{0pt} \newcommand{\sihao}{\fontsize{14.1pt}{\baselineskip}\selectfont} \renewcommand{\normalsize}{\fontsize{11pt}{\baselineskip}\selectfont} \geometry{left=3.17cm,right=3.17cm,top=2.54cm,bottom=2.54cm} \tikzstyle{startstop} = [rectangle, rounded corners, minimum width = 2cm, minimum height=1cm,text centered, draw = black, fill = red!40] \tikzstyle{io} = [rectangle, rounded corners, minimum width = 2cm, minimum height=1cm,text centered, draw = black, fill = blue!40] \tikzstyle{process} = [rectangle, rounded corners, minimum width = 2cm, minimum height=1cm,text centered, draw = black, fill = yellow!50] \tikzstyle{decision} = [rectangle, rounded corners, minimum width = 2cm, minimum height=1cm,text centered, draw = black, fill = green!40] \tikzstyle{arrow} = [->,>=stealth] \title{No-Regret Learning in Network Stochastic Zero-Sum Games} \author[a,b]{Shijie Huang} \author[c]{Jinlong Lei} \author[c,a]{Yiguang Hong} \affil[a]{Key Laboratory of Systems and Control, Academy of Mathematics and Systems Science, Chinese Academy of Sciences} \affil[b]{School of Mathematical Sciences, University of Chinese Academy of Sciences} \affil[c]{Department of Control Science and Engineering \& Shanghai Research Institute for Intelligent Autonomous Systems, Tongji University} \renewcommand*{\Affilfont}{\small\it} \renewcommand\Authands{ and } \date{} \begin{document} \maketitle \definecolor{shadecolor}{rgb}{0.9,0.9,0.9} \begin{abstract} No-regret learning has been widely used to compute a Nash equilibrium in two-person zero-sum games. However, there is still a lack of regret analysis for network stochastic zero-sum games, where players competing in two subnetworks only have access to some local information, and the cost functions include uncertainty. Such a game model can be found in security games, when a group of inspectors work together to detect a group of evaders. In this paper, we propose a distributed stochastic mirror descent (D-SMD) method, and establish the regret bounds $O(\sqrt{T})$ and $O(\log T)$ in the expected sense for convex-concave and strongly convex-strongly concave costs, respectively. Our bounds match those of the best known first-order online optimization algorithms. We then prove the convergence of the time-averaged iterates of D-SMD to the set of Nash equilibria. Finally, we show that the actual iterates of D-SMD almost surely converge to the Nash equilibrium in the strictly convex-strictly concave setting. \end{abstract} \section{INTRODUCTION} Two-person zero-sum games \cite{mv:53} are ubiquitous and well-researched topics in economics, convex optimization and robust optimization \cite{ben:09}. They are related to a variety of artificial intelligence problems, such as boosting \cite{fs:96}, generative adversarial networks (GAN) \cite{gp:14}, and poker games \cite{bb:15,ms:17}. So far, researchers have mainly focused on computing a Nash equilibrium (NE) \cite{n:51} and made significant progress. Specifically, no-regret learning has proved to be an extremely versatile tool in this direction. For instance, some typical no-regret algorithms such as follow the regularized leader, mirror descent (MD) and its variants \cite{rs:13a,ahk:12,z:17}, have become popular for finding a NE of a two-person zero-sum game. More recently, these algorithms have paved the way to designing algorithms with faster rates for both regret and convergence to a NE \cite{rs:13b,ddk:11,kh:18}. However, those algorithms rely on having access to complete information of the players. In practice, we may encounter the class of network games such as network zero-sum games, where the players are partitioned into two subnetworks and each player only has access to local information \cite{gharesifard2013distributed,lh:15}. The security game involving a group of evaders and a group of inspectors \cite{cc:16} could be an example. Additionally, many game problems in machine learning, such as GAN and model-based reinforcement learning \cite{rmk:20}, are complicated by uncertainty. Such problems may be modeled by stochastic Nash games, where the cost functions are expectation-valued. In this paper we consider no-regret learning in network stochastic zero-sum games. Compared with two-person zero-sum games, no-regret learning in network stochastic zero-sum games is more challenging. First of all, one needs to define a regret different from the classical regret due to the absence of complete information, which also brings difficulties to the regret analysis. Although the time-averaged iterates of no-regret learning algorithms are guaranteed to converge to a NE in two-person zero-sum games \cite{rs:13b}, whether such a property can be extended to a network stochastic zero-sum game remains unexplored. In addition, each player cannot accurately evaluate its (sub)gradient since the cost functions are expectation-valued. As a consequence, convergence analysis in a network stochastic zero-sum game may require different techniques. \subsection{Contributions} In this work, we propose a distributed stochastic mirror descent (D-SMD) method for network stochastic zero-sum games, and establish the theoretical results regarding the regret bounds and convergence guarantees. We elaborate on our contributions below. \begin{itemize} \item {\it Regret bounds of D-SMD}: We show that D-SMD achieves the regret bounds of $O(\sqrt{T})$ and $O(\log T)$ in the convex-concave and strongly convex-strongly concave cases, respectively. Despite the influence of the network parameters, our results match the regret order of MD in the convex and strongly convex cases \cite{s:11,ss:07}. \item {\it Convergence guarantees of D-SMD}: We establish the mean convergence of the time-averaged iterates to the set of Nash equilibria in the convex-concave and strongly convex-strongly concave settings, and provide the convergence rates. In addition, we also prove that the iterates of D-SMD converge to the unique NE with probability one in the strictly convex-strictly concave case. \end{itemize} For the sake of readability, we have moved all the omitted proofs to the appendix. \subsection{Related Work} We briefly review two kinds of related works: no-regret learning in games and NE seeking in zero-sum games. {\bf No-regret learning in games.} No-regret algorithms for two-person games were first proposed by \cite{h:57} and \cite{b:56}, and further studied in the work of \cite{fv:99}. In \cite{fs:99}, the regret bound of the multiplicative weights algorithm was established, which immediately yields $O(T^{-\frac{1}{2}})$ convergence rate. To obtain a faster algorithm, Nesterov's excessive gap technique was adopted to exhibit a near-optimal algorithm with convergence rate $O(\frac{(\ln T)^{3/2}}{T})$ in \cite{ddk:11}. Later a simpler no-regret framework with rate $O(\frac{\ln T}{T})$ based on the optimistic MD was proposed by \cite{rs:13b} and this rate was further improved to $O(\frac{1}{T})$ by \cite{kh:18}. In addition, no-regret learning was also widely studied in zero-sum extensive-form games, due to the success of CFR framework \cite{zj07} and its variants \cite{lw:09,t:14} in solving the game of limit Texas hold'em \cite{bb:15}. Recently, no-regret learning was extended to other form of games. \cite{hst:15} considered no-regret learning and its outcomes in Bayesian games, while \cite{sbk:19} proposed a no-regret algorithm for unknown games with correlated payoffs. \cite{mz:19} studied the last-iterate convergence of a no-regret algorithm to a NE of variationally stable games and \cite{l:20} further proved the finite-time last-iterate convergence rate of online gradient descent learning in cococercive games. {\bf NE seeking in zero-sum games.} There is a large amount of work on computing a NE of zero-sum games (or saddle point problems). We recall some works that focus on games with continuous strategy sets. \cite{hs:06} established the convergence of continuous-time best response dynamics to the NE set. \cite{nj:09} proposed a stochastic MD scheme to solve a convex-concave saddle point problem. Later on, \cite{clo:14} presented a stochastic accelerated primal-dual method with optimal convergence rate. In \cite{pb:16} and \cite{ykh:20}, the authors studied the computation of saddle points in strongly convex-strongly concave and non-convex-non-concave settings, respectively. Moreover, \cite{lei2020synchronous} considered the Nash equilibrium computation for general-sum stochastic Nash games. \cite{b:21} designed a primal-dual algorithm for constrained markov decision process (equivalent to a saddle point problem) and utilized regret analysis to prove zero constraint violation. In a different line of research, \cite{srivastava2013distributed} considered the case when a network of cooperative agents need to solve a zero-sum game and proposed a distributed Bregman-divergence algorithm to compute a NE. \cite{gharesifard2013distributed} introduced a continuous-time distributed dynamics for a more general framework of network zero-sum games, where two network of agents are involved in a zero-sum game. \cite{lh:15} further extended the framework to time-varying networks and designed a distributed projected subgradient descent algorithm. \section{Preliminaries \& Problem Formulation} {\bf Notations.} For a matrix $A = [a_{ij}]$, $a_{ij}$ denotes the element in the $i$th row and $j$th column. For a function $f(x_1,\dots,x_N)$, we use $\partial_i f$ to denote the subdifferential of $f$ with respect to $x_i$. Given a norm $\|\cdot\|$ on $\mathbb{R}^n$, $\|y\|_{\ast}:= \sup_{\|x\|\le 1}\langle y,x\rangle$ denotes the dual norm. A digraph is characterized by $\mathcal{G} = (\mathcal{V},\mathcal{E})$, where $\mathcal{V} = \{1,\dots,n\}$ is the set of nodes and $\mathcal{E}\subset\mathcal{V}\times\mathcal{V}$ is the set of edges. A path from $i_1$ to $i_p$ is an alternating sequence of edges $(i_1,i_2),(i_2,i_3),\dots, (i_{p-1},i_p)$ in the digraph with distinct nodes $i_m\in\mathcal{V}$, $\forall m: 1\le m\le p$. A digraph is strongly connected if there is a path between any pair of distinct nodes. \subsection{Two-Network Stochastic Zero-Sum Game} {\bf Two-Person Zero-Sum Game and Nash Equilibrium.} We recall the definition of a two-person zero-sum game and the sufficient conditions to ensure the existence of a Nash equilibrium. \begin{Def} A two-person zero-sum game consists of two players who select strategies from nonempty sets $X_1$ and $X_2$, respectively. Players observe a cost function $U_1: X_1\times X_2\to\mathbb{R}$ and $U_2: X_1\times X_2\to\mathbb{R}$ that satisfy $U_1(x_1,x_2) + U_2(x_1,x_2) = 0$ for all $(x_1,x_2)\in X_1\times X_2$. \end{Def} Define $U:= U_1 = -U_2$ as the cost function of the game. The most widely used solution concept in non-cooperative games is that of a Nash equilibrium (NE), which is formally defined as follows. \begin{Def} A strategy profile $x^{\ast} = (x_1^{\ast},x_2^{\ast})$ is a Nash equilibrium (NE) of a two-person zero-sum game if $U(x_1^{\ast},x_2)\le U(x_1^{\ast},x_2^{\ast})\le U(x_1,x_2^{\ast})$ for all $(x_1,x_2)\in X_1\times X_2$. \end{Def} \begin{thm}[Existence of NE \cite{gharesifard2013distributed}] Suppose that the strategy sets $X_1$ and $X_2$ are compact and convex. If the cost function $U$ is continuous and convex-concave (convex in $x_1$, concave in $x_2$) over $X_1\times X_2$, then there exists a NE for the considered two-person zero-sum game. \end{thm} {\bf A Two-Network Zero-Sum Game} \cite{gharesifard2013distributed,lh:15} is a generalized two-person zero-sum game, defined by a tuple $(\{\Sigma_1,\Sigma_2\},X_1\times X_2,U)$. The two players $\Sigma_1$ and $\Sigma_2$ are directed networks composed of $n_1$ agents and $n_2$ agents. For $l = 1,2$, $X_l\subset\mathbb{R}^{m_l}$, denoting the strategy set of $\Sigma_l$, is assumed to be compact and convex. The cost function $U: X_1\times X_2\to\mathbb{R}$ is defined by \[U(x_1,x_2) = \frac{1}{n_1}\sum_{i=1}^{n_1}f_{1,i}(x_1,x_2) = -\frac{1}{n_2}\sum_{j=1}^{n_2}f_{2,j}(x_1,x_2),\] where $f_{1,i}$ is a convex-concave continuous cost function associated with agent $i$ in $\Sigma_1$ and $f_{2,j}$ is a concave-convex continuous cost function associated with agent $j$ in $\Sigma_2$. The networks have no global decision-making capability and each agent only knows its own cost function. Within the same network, neighboring agents can exchange information. Moreover, the interaction between the two networks is specified by a bipartite network $\Sigma_{12}$, which means that each network can also obtain information about the other network through $\Sigma_{12}$. The goal of the agents in $\Sigma_1$ ($\Sigma_2$) is to collaboratively minimize (maximize) the cost function $U$ based on local information. More precisely, $\Sigma_1$, $\Sigma_2$ and $\Sigma_{12}$ are described by three directed graph sequences $\mathcal{G}_1(t) = (\mathcal{V}_1,\mathcal{E}_1(t))$, $\mathcal{G}_2(t) = (\mathcal{V}_2,\mathcal{E}_2(t))$ and $\mathcal{G}_{12}(t) = (\mathcal{V}_1\cup\mathcal{V}_2,\mathcal{E}_{12}(t))$, where $\{\mathcal{G}_1(t)\}$ and $\{\mathcal{G}_2(t)\}$ are uniformly jointly strongly connected\footnote{Namely, there exists an integer $B_l\ge 1$ such that the union graph $(\mathcal{V}_l,\bigcup_{t=k}^{k+B_l-1}\mathcal{E}_l(t))$ is strongly connected for $k\ge 0$.}. The communication between agents are modeled by mixing matrices $W_1(t)$, $W_2(t)$ and $W_{12}(t)$, which satisfy (i) for each $l\in\{1,2\}$, $w_{l,ij}(t)\ge\eta$ with $0 < \eta < 1$ when $(j,i)\in\mathcal{E}_l(t)$, and $w_{l,ij}(t) = 0$ otherwise; $w_{12,ij}(t) > 0$ only if $(i,j)\in\mathcal{E}_{12}(t)$; (ii) for each $i,j\in\mathcal{V}_l$, $\sum_{j = 1}^{n_l}w_{l,ij}(t) = \sum_{i = 1}^{n_l}w_{l,ij}(t) = 1$; (iii) for each $i\in\mathcal{V}_l$, $\sum_{j = 1}^{n_{3-l}}w_{12,ij}(t) = 1$. Agent $i\in\mathcal{V}_l$ can only communicate directly with its neighbors $\mathcal{N}_l^i(t) := \{j\mid(j,i)\in\mathcal{E}_l(t)\}$ and $\mathcal{N}_{12,l}^i(t) \triangleq \{j\mid (j,i)\in\mathcal{E}_{12}(t)\}$. \cite{no:10} proved the following result. \begin{lem}\label{lem_graph} Let $\Phi_l(t,s) = W_l(t)W_l(t-1)\cdots W_l(s)$ ($l = 1,2$) be the transition matrices. Then for all $t,s$ with $t\ge s\ge 0$, we have \[\bigg|[\Phi_l(t,s)]_{ij} - \frac{1}{n_l}\bigg| \le \Gamma_l\theta_l^{t-s},\quad l = 1,2\] where $\Gamma_l = (1 - \eta/4n_l^2)^{-2}$ and $\theta_l = (1 - \eta/4n_l^2)^{1/B_l}$. \end{lem} {\bf Two-Network Stochastic Zero-Sum Game.} Consider a stochastic generalization of a two-network zero-sum game, where the cost function of each agent is expectation-valued. To be specific, we assume that $f_{l,i}$ is the expected value of a stochastic mapping $\psi_{l,i}: X_1\times X_2\times\mathbb{R}^d\to\mathbb{R}$, i.e., \[f_{l,i}(x_1,x_2):=\mathbb{E}[\psi_{l,i}(x_1,x_2;\xi(\omega))],\quad l\in\{1,2\}\] where the expectation is taken with respect to the random vector $\xi: \Omega\to\mathbb{R}^d$ defined on a probability space $(\Omega,\mathcal{F},\mathbb{P})$. Such stochastic models represent a natural extension of two-network zero-sum games and find their applicability when the evaluation of the deterministic cost function is corrupted by errors. However, deterministic methods cannot be used to solve a two-network stochastic zero-sum game directly since generally the expectation cannot be evaluated efficiently or the underlying distribution $\mathbb{P}$ is unknown. This characteristic also makes the analysis of algorithm performance more complicated. \subsection{No-Regret Learning} In a no-regret learning framework \cite{z:03}, for $l = 1,2$, each agent $i\in\mathcal{V}_l$ plays repeatedly against the agents in $\Sigma_{3-l}$ by making a sequence of decisions from $X_l$. At each round $t = 1,\dots, T$ of a learning process, each agent $i$ in $\Sigma_l$ selects a strategy $x_{l,i}(t)\in X_l$ based on the available information, and receives a cost $f_{1,i}(x_{1,i}(t),u_{2,i}(t))$, where $u_{2,i}(t) \triangleq \sum_{j\in\mathcal{N}_{12,1}^i(t)}w_{12,ij}(t)x_{2,j}(t)$ is the weighted information received from its neighbors $\mathcal{N}_{12,1}^i(t):=\{j\in\mathcal{V}_2|(j,i)\in\mathcal{E}_{12}(t)\}$. Since $x_{1,i}(t)$ and $u_{2,i}(t)$ are generated with noisy information, we consider a notation different from the classical regret, called pseudo regret \cite[Section 2.1.2]{m:19}. \begin{Def} The pseudo regret of $\Sigma_1$ associated with agent $i$ cumulated up to time $T$ is defined as \begin{align} \quad\bar{R}_1^{(i)}(T) &= \mathbb{E}\left[\sum_{t=1}^TU(x_{1,i}(t),u_{2,i}(t))\right]\notag\\ &\quad - \min_{x_1\in\mathcal{X}_1}\mathbb{E}\left[\sum_{t=1}^TU(x_1,u_{2,i}(t))\right].\label{regret_def} \end{align} \end{Def} Intuitively, $\bar{R}_1^{(i)}(T)$ represents the maximum expected gain agent $i\in\Sigma_1$ could have achieved by playing the single best fixed strategy in case the estimated sequence of $\Sigma_2$'s strategies $\{u_{2,i}(t)\}_{t=1}^T$ and the cost functions were known in hindsight. An algorithm is referred to as no-regret for network $\Sigma_1$ if for all $i$, $\bar{R}_1^{(i)}(T)/T\to 0$ as $T\to\infty$. \section{Proposed D-SMD Algorithm} Our algorithm uses the notion of {\it prox-mapping}. For $l = 1,2$, $x,p\in X_l$, let the Bregman divergence be defined by \begin{equation}\label{breg_def} D_{\psi_l}(x,p) := \psi_l(x) - \psi_l(p) - \langle\nabla\psi_l(p),x - p\rangle, \end{equation} where $\psi_l$ is a $1$-strongly convex differentiable regularizer on $\mathcal{X}_l$. Then the Bregman divergence generates an associated prox-mapping defined as \begin{equation}\label{prox_mapping} P_x^l(y) := \arg\min_{x'\in X_l}\{\langle y,x - x'\rangle + D_{\psi_l}(x',x)\}. \end{equation} Two typical examples of prox-mapping includes Euclidean projection and multiplicative weights \cite{s:11}. \begin{example}\label{exm1} Let $\psi_l(x) = \frac{1}{2}\|x\|_2^2$. Then the associated prox-mappping is \[P_x^l(y) = \arg\min_{x'\in X_l}\|x' - x - y\|^2.\] \end{example} \begin{example}\label{exm2} Let $X_l$ be a $d_l$-dimension simplex and $\psi_l(x) = \sum_{j=1}^{d_l}x_j\log x_j$ be the entropic regularizer. The induced prox-mapping becomes the well-known multiplicative weights rule \cite{fs:99} \[P_x^l(y) = \frac{(x_j\exp(y_j))_{j=1}^{d_l}}{\sum_{j=1}^{d_l}x_j\exp(y_j)}.\] \end{example} The distributed stochastic mirror descent algorithm proceeds as follows. In the $t$-th iteration, each agent replaces its local estimates of the states of $\Sigma_1$ and $\Sigma_2$ with the weighted averages of its neighbors in $\Sigma_1$ and $\Sigma_2$, respectively. Then it calculates a sampled subgradient of its local cost at the replaced estimates, and updates its estimate by a prox-mapping. The complete algorithm is summarized in Algorithm \ref{alg1}. \begin{algorithm}[tb] \caption{Distributed Stochastic Mirror Descent (D-SMD)} \label{alg1} \textbf{Input}: Non-increasing nonnegative step-size sequence $\{\alpha(t)\ge 0\}_{t\ge 0}$, mixing matrices $\{W_1(t)\}_{t\ge 0}$, $\{W_2(t)\}_{t\ge 0}$ and $\{W_{12}(t)\}_{t\ge 0}$\\ \textbf{Initialize}: $x_{l,i}(0)\in\mathcal{X}_l$ for each $i \in\mathcal{V}_l$ and $l=1,2$ \begin{algorithmic}[1] \REPEAT \FOR {network $l=1,2$} \FOR {agent $i = 1,\dots,n_l$} \STATE Calculate weighted average of neighbors in $\Sigma_l$ and $\Sigma_{3-l}$:\\ $v_{l,i}(t) = \sum_{j=1}^{n_l}w_{l,ij}(t)x_{l,j}(t)$\\ $u_{3-l,i}(t) = \sum_{j=1}^{n_{3-l}}w_{12,ij}(t)x_{3-l,j}(t)$\\ \STATE Receive a sampled subgradient $\hat{g}_{l,i}(t)\in\partial_l\psi_{l,i}(v_{l,i}(t),u_{3-l,i}(t);\xi_{l,i}(t))$, where the terms $\xi_{l,i}(t)$ are independent and identically distributed realizations of the random variable $\xi$.\\ \STATE Update local estimates $x_{l,i}(t+1)$:\\ $x_{l,i}(t+1) = P_{v_{l,i}(t)}^l(-\alpha(t)\hat{g}_{l,i}(t))$ \ENDFOR \ENDFOR \UNTIL Convergence \end{algorithmic} \end{algorithm} \begin{remark} To compute a NE of network stochastic zero-sum games, the stochastic mirror descent method for convex-concave saddle point problems \cite{nj:09} requires the global information of the network. While in our algorithm, each agent merely communicates its decisions with its neighbors to update the estimates . \end{remark} Let $\mathcal{F}_t := \sigma\{x_{l,i}(0),\xi_{l,i}(s), l = 1,2, i\in\mathcal{V}_l, 0\le s\le t-1\}$ denote the $\sigma$-algebra generated by all the information up to time $t-1$. Then, by Algorithm \ref{alg1}, $x_{l,i}(t)$, $v_{l,i}(t)$ and $u_{l,i}(t)$ are adapted to $\mathcal{F}_t$. Denote by $g_{1,i}(t)\in\partial_1f_{1,i}(v_{1,i}(t),u_{2,i}(t))$ and $g_{2,i}(t)\in\partial_2f_{2,i}(u_{1,i}(t),v_{2,i}(t))$ the subgradients of $f_{1,i}$ and $f_{2,i}$ evaluated at $v_{1,i}(t)$ and $v_{2,i}(t)$. In the following, We draw three necessary assumptions, which are all standard and widely used in stochastic approximation and distributed optimization \cite{nj:09,srivastava2013distributed}. \begin{assumption}\label{asm1} For $l = 1,2$, $i\in\mathcal{V}_l$, the cost function $f_{l,i}(\cdot,\cdot)$ is Lipschitz continuous over $X_1\times X_2$, i.e., there exists a constant $L > 0$ such that, for all $x_1,x_1'\in X_1$ and $x_2,x_2'\in X_2$, $$|f_{l,i}(x_1,x_2) - f_{l,i}(x_1',x_2')|\le L(\|x_1 - x_1'\| + \|x_2 - x_2'\|).$$ \end{assumption} \begin{assumption}\label{asm2} There exists $\nu_l > 0$ such that for $l = 1,2$, and each $i\in\mathcal{V}_l$, \[\mathbb{E}[\hat{g}_{l,i}(t)|\mathcal{F}_t] = g_{l,i}(t)\ \text{and}\ \ \mathbb{E}[\|g_{l,i}(t) - \hat{g}_{l,i}(t)\|_{\ast}^2|\mathcal{F}_t]\le \nu_l^2.\] \end{assumption} \begin{assumption}\label{asm3} For $l = 1,2$, the Bregman divergence $D_{\psi_l}(x,y)$ is convex in $y$ and satisfies \[x_k\to x\quad\Rightarrow \quad D_{\psi_l}(x_k,x)\to 0\footnote{The regularizers mentioned in Examples \ref{exm1} and \ref{asm2} both satisfy this condition, which is called Bregman reciprocity \cite{mz:19}.}.\] \end{assumption} \section{Guarantees on Regret} In this section, we establish regret bounds of D-SMD in convex-concave and strongly convex-strongly concave settings. \subsection{Convex-Concave Case} This part presents a regret bound that holds at all time $T$ for D-SMD when the cost function $f_{1,i}(\cdot,\cdot)$ is convex-concave for all $i\in\mathcal{V}_1$. Theorem \ref{thm1} provides a general bound for any choice of (non-increasing) step-size sequence $\{\alpha(t)\}_{t=1}^T$. It will then be the basis for Corollary \ref{col1}, which gives a way to select the algorithm parameters to achieve a sublinear regret. \begin{thm}\label{thm1} Let the cost function $f_{1,i}(\cdot,\cdot)$ be convex-concave and Assumptions \ref{asm1}-\ref{asm3} hold. Then the pseudo regret of D-SMD defined by \eqref{regret_def} is bounded by \begin{align} \bar{R}_1^{(i)}(T)&\le \sum_{t=1}^T\sum_{l=1}^2(L+\nu_l)(9L + \nu_l)\alpha(t-1)\notag\\ &\quad + 4L\sum_{t=1}^T\sum_{l=1}^2n_l\Gamma_l(L + \nu_l)\sum_{s=1}^{t-1}\theta_l^{t-1-s}\alpha(s-1) \notag\\ &\quad + 4L\sum_{t=1}^T\sum_{l=1}^2n_l\Gamma_l\theta_l^{t-1}\Lambda_l + \frac{R_1^2}{\alpha(T)}\label{regret_bound_final} \end{align} where $R_1^2:=\max\{D_{\psi_1}(x_1,x_1^{'}): x_1,x_1^{'}\in X_1\}$ is the diameter of $X_1$. \end{thm} The constants of the regret bound depend on the Lipschitz constants, the connectivity of the communication networks and the sampling error of the subgradients. Compared to the regret bound of the centralized online mirror descent with step-sizes $\{\alpha(t)\}_{t\ge 1}$ \cite{s:11}, Theorem \ref{thm1} shows that D-SMD suffers from an additional term $4\sum_{t=1}^T\sum_{l=1}^2(L\frac{n_l(L + \nu_l)\Gamma_l}{1-\theta_l} + n_l\Gamma_l\theta_l^{t-1}\Lambda_l)$, which is caused by the incomplete information of the agents. An immediate corollary is that D-SMD achieves a sublinear regret when $\alpha(t) = t^{-(\frac{1}{2} + \epsilon)}$, where $\epsilon\in [0,1/2)$. Specifically, if we set $\alpha(t) = 1/\sqrt{t}$, it is easy to check that $\bar{R}_1^{(i)}(T) = O(\sqrt{T})$, matching the optimal regret order for convex objectives \cite{h:16}. We formally state the result in the following corollary. \begin{Col}\label{col1} Let the conditions stated in Theorem \ref{thm1} hold. Then, for $\epsilon\in[0,\frac{1}{2})$, Algorithm \ref{alg1} with step-size sequence $\alpha(t) = t^{-(\frac{1}{2} + \epsilon)}$ yields a pseudo regret of order \[\bar{R}_1^{(i)}(T) \le O(T^{\frac{1}{2} + \epsilon}).\] \end{Col} \begin{proof} By exchanging the order of summation, for each $l = 1,2$, \begin{align} \sum_{t=1}^T\sum_{s=1}^{t-1}\theta_l^{t-1-s}\alpha(s-1)&\le \sum_{t=1}^T\sum_{s=0}^{T-1}\theta_l^s\alpha(t-1)\label{exchange_sum}\\ &\le \frac{1}{1 - \theta_l}\sum_{t=1}^T\alpha(t-1).\notag \end{align} Thus, we obtain the result by noting that $\frac{1}{\alpha(T)} = T^{\frac{1}{2} + \epsilon}$ and $\sum_{t=1}^T\alpha(t-1)\le T^{\frac{1}{2} - \epsilon}$. \end{proof} Corollary 1 shows that the average pseudo regret of each local agent vanishes as $O(T^{-\frac{1}{2} + \epsilon})$, which indicates that agents can learn the optimal offline strategy merely using local information about the network. \subsection{Strongly Convex-Strongly Concave Case} \cite{ss:07} showed that by using a mirror descent algorithm, the regret bound can be improved to $O(\log T)$ for online optimization problem with generalized strongly convex losses. In this section, we extend this idea to network stochastic zero-sum games and establish a regret bound of order $O(\log T)$ for our D-SMD. In the following, we give the formal definition of the generalized strongly convex function. \begin{Def}[\cite{ss:07}]\label{strong_def} A function $f$ is $\eta$-strongly convex over $X$ with respect to a convex and differentiable function $\psi$ if for all $x,y\in X$, \[f(x) - f(y) - \langle x-y,\lambda\rangle\ge \eta D_{\psi}(x,y), \forall\lambda\in\partial f(y).\] \end{Def} As in \cite{ss:07}, we also need to select a specific step-size sequence depending on the strong convexity coefficient. We formulate the regret bound in this case in Theorem \ref{thm1_2}. \begin{thm}\label{thm1_2} Let the cost function $f_{1,i}(x_1,x_2)$, $i\in\mathcal{V}_1$, be $\eta$-strongly convex in $x_1\in\mathcal{X}_1$ with respect to $\psi_1$ for any $x_2\in\mathcal{X}_2$ and Assumptions \ref{asm1}-\ref{asm3} hold. If $\alpha(t) = \frac{1}{\eta (t+1)}$, then the pseudo regret of D-SMD defined by \eqref{regret_def} is bounded by \begin{align} \bar{R}_1^{(i)}(T)&\le \sum_{l=1}^2(L + \nu_l)\Big(9L + \nu_l + \frac{4Ln_l\Gamma_l}{1 - \theta_l}\Big)(1 + \log(T))\notag\\ &\quad + 4L\sum_{l=1}^2\frac{n_l\Gamma_l\Lambda_l\alpha(0)}{1 - \theta_l}.\label{regret_bound_final_1} \end{align} \end{thm} Comparing this result with Theorem \ref{thm1}, we see that the generalized strong convexity can eliminate the term $\frac{R_1^2}{\alpha(T)}$, which is the main factor restricting the regret order in the convex-concave setting. \section{Guarantees on Convergence} In general, \cite{bh:08} proved that a no-regret learning algorithm converges to a coarse correlated equilibrium, which is a relaxation of NE. In this section, we are interested in the convergence of D-SMD to a NE. \subsection{Convex-Concave Case} For a two-person zero-sum game, we can directly derive the convergence rate of the time-averaged strategy profile $(\sum_{t=1}^Tx_1(t)/T,\sum_{t=1}^Tx_2(t)/T)$ from the established regret bound \cite{rs:13b}. However, we cannot apply this result to the two-network stochastic zero-sum game, since each network does not have access to the actual state of its adversarial network. In detail, the difficulty lies in that the pseudo regret is defined from the weighted estimates $u_{l,i}(t)$, and hence, the cumulative cost of $\Sigma_1$ \Big(i.e.,$\mathbb{E}\left[\sum_{t=1}^TU(x_{1,i}(t),u_{2,i}(t))\right]$\Big) is not able to offset the cumulative cost of $\Sigma_2$ \Big(i.e., $\mathbb{E}\left[\sum_{t=1}^T-U(u_{1,i}(t),x_{2,i}(t))\right]$\Big). We now consider the following time-averaged iterates \[\hat{x}_{l,i}(t) = \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)x_{l,i}(s).\quad\text{for}\ t\ge 1, l=1,2\] In order to measure the approximation quality of the average sequence, we define the gap function \begin{align} &\quad\delta(\hat{x}_{1,i}(t),\hat{x}_{2,j}(t))\notag\\ &:= \max_{x_2\in X_2}U(\hat{x}_{1,i}(t),x_2) - \min_{x_1\in X_1}U(x_1,\hat{x}_{2,j}(t)).\label{gap_function} \end{align} Our goal is to present an expected bound for this gap function. \begin{thm}\label{thm5} Let $f_{1,i}(\cdot,\cdot)$ be convex-concave and $f_{2,j}(\cdot,\cdot)$ be concave-convex for all $i\in\mathcal{V}_1$, $j\in\mathcal{V}_2$. Suppose that Assumptions \ref{asm1}-\ref{asm3} hold. Let $\{x_{1,i}(s)\}_{0\le s\le t-1}$ and $\{x_{2,j}(s)\}_{0\le s\le t-1}$ be the sequences generated by D-SMD. Then \begin{align*} \mathbb{E}\left[\delta(\hat{x}_{1,i}(t),\hat{x}_{2,j}(t))\right]&\le \frac{M_1 + M_2\sum_{s=0}^{t-1}\alpha^2(s)}{\sum_{s=0}^{t-1}\alpha(s)}, \end{align*} where $M_1:= \sum_{l=1}^2\left(\frac{4Ln_l\Gamma_l\Lambda_l\alpha(0)}{1-\theta_l} + 2R_l^2\right)$ and $M_2 := \sum_{l=1}^2\Big(4L(L + \nu_l)\left(\frac{n_l\Gamma_l}{1 - \theta_l} + 2\right) + (L + \nu_l)^2 + \nu_l^2/2\Big)$. \end{thm} We remark that the gap measure in Theorem \ref{thm5} is also used to describe the generalization property of an empirical solution in stochastic saddle point problems \cite{ly:21,zh:21}. It was shown by \cite{ah:21} that $(\hat{x}_{1,i}(t),\hat{x}_{2,j}(t))$ is an $\epsilon$-equilibrium of the two-network stochastic zero-sum game when $\delta(\hat{x}_{1,i}(t),\hat{x}_{2,j}(t))\le \epsilon$. Our result matches the error bound of SMD for saddle point problems \cite{nj:09} with the constants $M_1$ and $M_2$ affected by the structure of the networks. Since the NE is unique when the cost function is strongly convex-strongly concave, we may transform the expected error bound of Theorem \ref{thm5} into the classical mean-squared error of $(\hat{x}_{1,i}(t),\hat{x}_{2,j}(t))$ in this case. \begin{Col}\label{col2} Suppose that $U(\cdot,\cdot)$ is $\mu$-strongly convex-strongly concave and Assumptions \ref{asm1}-\ref{asm3} hold, and let $\{x_{1,i}(s)\}_{0\le s\le t-1}$ and $\{x_{2,j}(s)\}_{0\le s\le t-1}$ be generated by D-SMD. If $(x_1^{\ast},x_2^{\ast})$ denotes the NE, then \begin{align*} &\quad\mathbb{E}[\|\hat{x}_{1,i}(t) - x_1^{\ast}\|^2 +\|\hat{x}_{2,j}(t) - x_2^{\ast}\|^2]\\ &\le \frac{2}{\mu}\frac{M_1 + M_2\sum_{s=0}^{t-1}\alpha^2(s)}{\sum_{s=0}^{t-1}\alpha(s)}, \end{align*} where $M_1$ and $M_2$ are as defined in Theorem \ref{thm5}. \end{Col} This corollary implies that when $\{\alpha(t)\}_{t\ge 0}$ satisfies $\sum_{t=0}^{\infty}\alpha(t) = \infty$ and $\sum_{t=0}^{\infty}\alpha^2(t) < \infty$, $(\hat{x}_{1,i}(t),\hat{x}_{2,j}(t))$ converges in mean square to the unique NE for all $i\in\mathcal{V}_1$, $j\in\mathcal{V}_2$. \subsection{Strictly Convex-Strictly Concave Case} We now study the almost sure convergence of the strategy profile generated by D-SMD in the strictly convex-strictly concave setting. This usually requires an analysis tool quite different from that of the time-averaged sequence \cite{mz:19}. \begin{thm}\label{thm2} Let $U(\cdot,\cdot)$ be strictly convex-strictly concave and Assumptions \ref{asm1}-\ref{asm3} hold. If the step-size sequence satisfies $\sum_{t=1}^{\infty}\alpha(t) = \infty$ and $\sum_{t=1}^{\infty}\alpha^2(t) < \infty$, then D-SMD almost surely converges to the unique NE, denoted by $x^{\ast} = (x_1^{\ast},x_2^{\ast})$, i.e., with probability $1$, \[\lim_{t\to\infty}x_{1,i}(t) = x_1^{\ast},\quad \lim_{t\to\infty}x_{2,j}(t) = x_2^{\ast},\ \forall i\in\mathcal{V}_1, j\in\mathcal{V}_2.\] \end{thm} We conclude from Corollary \ref{col1} and Theorem \ref{thm2} that for strongly convex-strongly concave network stochastic zero-sum games, D-SMD is a no-regret learning process that converges to the NE when $\sum_{t=0}^{\infty}\alpha(t) = \infty$ and $\sum_{t=0}^{\infty}\alpha^2(t) < \infty$. Moreover, this result generalizes Theorem 6 of \cite{srivastava2013distributed}, which is only applicable to a deterministic distributed saddle point problem. \section{Numerical Results} In this section, we conduct numerical experiments for a network version of the two-person stochastic matrix games \cite{zh:21} to evaluate the performance of the proposed D-SMD algorithm in convex-concave and strongly convex-strongly concave cases. For the above two cases, we set the number of players in both networks to be $N = 12$ and let each player have $K=20$ actions to choose from. We focus on studying the influence of the step-size sequence and the network topology on the regret bound and convergence of D-SMD. Specifically, we fix the structure of $\Sigma_2$ and consider three types of graphs with different degree of connectivity (Cycle graph $<$ Random graph $<$ Complete graph) for $\Sigma_1$ in our simulations. \begin{itemize} \item {\it Cycle graph} has a single cycle and each node has exactly two immediate neighbors. \item {\it Random graph} is constructed by connecting nodes randomly and each edge is included in the graph with probability $0.7$ independent from every other edge. \item {\it Complete graph} is constructed by connecting all of the node pairs. \end{itemize} \subsection{Convex-Concave Case} Consider the following network stochastic zero-sum game \[\min_{x_1\in\Delta_K}\max_{x_2\in\Delta_K}U(x_1,x_2) :=\frac{1}{N}\sum_{i=1}^Nx_1^T\mathbb{E}_{\xi}[A_{\xi}^i]x_2\] where $x_1$ and $x_2$ are the mixed strategies of players in $\Sigma_1$ and $\Sigma_2$, respectively, which belong to the simplex $\Delta_K:=\{z\in\mathbb{R}^K: z\ge 0, \textbf{1}^Tz = 1\}$. $A_{\xi}^i$ is the stochastic cost matrix of player $i\in\Sigma_1$. Let $\{x_{1,i}(t)\}_{t\ge 0}$ and $\{x_{2,i}(t)\}_{t\ge 0}$ be the outputs of D-SMD. Recalling the gap function \eqref{gap_function}, we use its average with respect to all players, defined as $\bar{\delta}(t):= \frac{1}{N^2}\sum_{i=1}^N\sum_{j=1}^N\delta(\hat{x}_{1,i}(t),\hat{x}_{2,j}(t))$, to demonstrate the convergence of D-SMD. In the first experiment, we run D-SMD from $t=1$ to $t=500$ for different learning rates $\alpha(t) = t^{-\frac{1}{2}}, t^{-\frac{2}{3}}, t^{-\frac{3}{4}}$ and estimate the expected average gap $\mathbb{E}[\bar{\delta}(t)]$ and the time-averaged pseudo regret $\bar{R}_1^{(i)}(t)/t$ by averaging across $50$ sample paths. The empirical results are shown in Figure \ref{fig1}, which shows that D-SMD can achieve sublinear regret bound and the slower learning rate yields a better regret rate and a faster convergence rate. These are consistent with our theoretical results in the convex-concave case. In the second experiment, we use the learning rate $\alpha(t) = 1/\sqrt{t}$ and compare the expected average gap and average pseudo regret with different network topologies in Figure \ref{fig2}, which demonstrates that the network topology has only a slight influence on the regret rate and convergence rate. \begin{figure}[h] \centering \subfigure[]{\includegraphics[width = 0.48\columnwidth]{regret_convex_stepsize_type2-eps-converted-to}} \subfigure[]{\includegraphics[width = 0.48\columnwidth]{convergence_convex_stepsize_1008-eps-converted-to}} \caption{Average pseudo regret and expected average gap and of D-SMD under different learning rates in the convex-concave case} \label{fig1} \end{figure} \begin{figure}[h] \centering \subfigure[]{\includegraphics[width = 0.48\columnwidth]{regret_convex_network_0904_2-eps-converted-to}} \subfigure[]{\includegraphics[width = 0.48\columnwidth]{convergence_convex_network_1008-eps-converted-to}} \caption{Average pseudo regret and expected average gap and of D-SMD under different network topologies in the convex-concave case} \label{fig2} \end{figure} \subsection{Strongly Convex-Strongly Concave Case} To investigate the performance of D-SMD in the strongly convex-strongly concave case, we consider the regularized network stochastic zero-sum game with cost function defined as follows \begin{align*} U(x_1,x_2) &:= \sum_{p=1}^Kx_1^{(p)}\log x_1^{(p)} + \frac{1}{N}\sum_{i=1}^Nx_1^T\mathbb{E}_{\xi}[A_{\xi}^i]x_2\\ &\quad - \sum_{p=1}^Kx_2^{(p)}\log x_2^{(p)}. \end{align*} Notice that $U(x_1,x_2)$ is $1$-strongly convex-strongly concave with respect to the regularizer $\psi(x) = \sum_{p=1}^Kx^{(p)}\log x^{(p)}$. We use a dual averaging algorithm proposed in \cite{m:19} to compute the Nash equilibrium, denoted by $(x_1^{\ast},x_2^{\ast})$. Due to the uniqueness of the NE in the strongly convex-strongly concave case, we consider the average absolute error $\bar{\delta}'(t):=\frac{1}{N}\sum_{i=1}^N\|x_{1,i}(t) - x_1^{\ast}\| + \|x_{2,i}(t) - x_2^{\ast}\|$ to illustrate the convergence of the sequences $\{(x_{1,i}(t),x_{2,i}(t))\}_{t\ge 0}$. The time-averaged pseudo regret $\bar{R}_1^{(i)}(t)/t$ and the expected absolute error $\mathbb{E}[\bar{\delta}'(t)]$ under different learning rates are plotted in Figure 3, and the performance under different network topologies with learning rate $\alpha(t) = 1/t$ are displayed in Figure 4. D-SMD produces a better regret rate in the strongly convex-strongly concave case and the influence of network topology and learning rate is similar to the convex-conacve case. \begin{figure}[h] \centering \subfigure[]{\includegraphics[width = 0.48\columnwidth]{regret_strongly_stepsize_type2-eps-converted-to}} \subfigure[]{\includegraphics[width = 0.48\columnwidth]{convergence_strongly_stepsize_type2-eps-converted-to}} \caption{Average pseudo regret and expected absolute error of D-SMD under different learning rates in the strongly convex-strongly concave case} \label{fig3} \end{figure} \begin{figure}[h] \centering \subfigure[]{\includegraphics[width = 0.48\columnwidth]{regret_strongly_netwrok_type2-eps-converted-to}} \subfigure[]{\includegraphics[width = 0.48\columnwidth]{convergence_strongly_network_type2-eps-converted-to}} \caption{Average pseudo regret and expected absolute error of D-SMD under different network topologies in the strongly convex-strongly concave case} \label{fig4} \end{figure} \section{Conclusion and Future Work} In this paper, we proposed distributed stochastic mirror descent (D-SMD) to extend no-regret learning in two-person zero-sum games to network stochastic zero-sum games. In contrast to the previous works on Nash equilibrium seeking for network zero-sum games, we not only derived the convergence of D-SMD to the set of Nash equilibria, but also established regret bounds of D-SMD for convex-concave and strongly convex-strongly concave costs. The theoretical results were empirically verified by experiments on solving network stochastic matrix games. It is of interest to study the convergence rate of the actual iterates of D-SMD in the strongly convex-strongly concave case. In addition, another interesting topic is to develop an optimistic variant of our algorithm as in \cite{ddk:11} to improve the regret rate and obtain the last-iteration convergence in merely convex-concave case. \section*{Appendix} \begin{lem}\cite[Lemma B.2, Proposition B.3]{ml:19}\label{lem1} Let $\psi$ be a continuously differentiable $\sigma$-strongly convex function on $\mathcal{X}$. Then, for all $x,y,z\in\mathcal{X}$, the Bregman divergence defined by \eqref{breg_def} satisfies \begin{align} D_{\psi}(y,x) - D_{\psi}(y,z) - D_{\psi}(z,x) &= \langle\nabla\psi(z) - \nabla\psi(x),y-z\rangle,\label{breg_prop1}\\ D_{\psi}(x,y)&\ge \frac{\sigma}{2}\|x - y\|^2\label{breg_prop2} \end{align} Moreover, let $x^{+} = P_x(v)$ for $v\in\mathcal{X}^{\ast}$, where $\mathcal{X}^{\ast}$ is the dual space of $\mathcal{X}$ and $P_x(v) := \arg\min_{x'\in \mathcal{X}}\{\langle v,x - x'\rangle + D_{\psi}(x',x)\}$. Then \begin{equation} D_{\psi}(y,x^{+}) \le D_{\psi}(y,x) + \langle v,x - y\rangle + \frac{1}{2\sigma}\|v\|_{\ast}^2, \end{equation} where $\|v\|_{\ast}:= \sup\{\langle v,x\rangle: x\in\mathcal{X},\|x\|\le 1\}$ denotes the dual norm on $\mathcal{X}$. \end{lem} \begin{lem}\cite{rs:71}\label{lem3} Let $\{X_t\}$, $\{Y_t\}$ and $\{Z_t\}$ be sequences of non-negative random variables with $\sum_{t=0}^{\infty}Z_t < \infty$ almost surely and let $\{\mathcal{F}_t\}$ be a filtration such that $\mathcal{F}_t\subset\mathcal{F}_{t+1}$. If $X_t$, $Y_t$, $Z_t$ are adapted to $\{\mathcal{F}_t\}$ and \[\mathbb{E}[Y_{t+1}\mid\mathcal{F}_t] \le Y_t - X_t + Z_t,\] then, almost surely, $\sum_{t=0}^{\infty}X_t < \infty$ and $Y_t$ converges to a non-negative random variable $Y$. \end{lem} \begin{lem}\label{lem2} Let Assumptions \ref{asm1}-\ref{asm2} hold. Suppose that $x_{l,i}(t)$, $v_{l,i}(t)$ and $u_{l,i}(t)$ are generated by Algorithm \ref{alg1}. Then, for each $l\in\{1,2\}$ and $i\in\mathcal{V}_l$, \begin{align} E[\|x_{l,i}(t) - \bar{x}_{l}(t)\|] &\le H_l(t),\label{lem_bound_2}\\ E[\|\bar{x}_{l}(t) - v_{l,i}(t)\|] &\le H_l(t),\label{lem_bound_3}\\ E[\|\bar{x}_{l}(t) - u_{l,i}(t)\|] &\le H_l(t),\label{lem_bound_4} \end{align} where \begin{align} H_l(t) &= n_l\Gamma_l\theta_l^{t-1}\Lambda_l + 2(L + \nu_l)\alpha(t-1) + n_l\Gamma_l(L + \nu_l)\sum_{s=1}^{t-1}\theta_l^{t-1-s}\alpha(s-1),\label{H_l} \end{align} and $\Lambda_l\triangleq \max_{i\in\mathcal{V}_l}\|x_{l,i}(0)\|$. \end{lem} \begin{proof} By the definition of $v_{l,i}(t)$, we write the iterates as follows \begin{align*} x_{l,i}(t) &= v_{l,i}(t-1) - (v_{l,i}(t-1) - x_{l,i}(t))\notag\\ &= \sum_{j=1}^{n_l}[\Phi_l(t-1,0)]_{ij}x_{l,j}(0) + \sum_{s=1}^{t-1}\sum_{j=1}^{n_l}[\Phi_l(t-1,s)]_{ij}d_{l,j}(s-1) + d_{l,i}(t-1), \end{align*} where $d_{l,i}(t-1) = x_{l,i}(t) - v_{l,i}(t-1)$. By using the doubly stochastic property of $W_l(t)$, we derive \begin{equation*} \bar{x}_l(t) = \frac{1}{n_l}\sum_{j=1}^{n_l}x_{l,j}(0) + \frac{1}{n_l}\sum_{s=1}^t\sum_{j=1}^{n_l}d_{l,j}(s-1). \end{equation*} Therefore, \begin{align} \|x_{l,i}(t) - \bar{x}_l(t)\|&\le \sum_{j=1}^{n_l}\|[\Phi_l(t-1,0)]_{ij} - \frac{1}{n_l}\|\|x_{l,j}(0)\|\notag\\ &\quad + \sum_{s=1}^{t-1}\sum_{j=1}^{n_l}\|[\Phi_l(t-1,s)]_{ij} - \frac{1}{n_l}\|\|d_{l,j}(s-1)\| + \|\frac{1}{n_l}\sum_{j=1}^{n_l}d_{l,j}(t-1) - d_{l,i}(t-1)\|.\label{bound_3} \end{align} Thus, we only need to bound the term $\|d_{l,i}(t)\|$. Recalling the definition of $x_{l,i}(t+1)$ and \eqref{prox_mapping}, we have \begin{equation}\label{def_x} x_{l,i}(t+1) = \arg\min_{x'\in X_l}\{\langle \alpha(t)\hat{g}_{l,i}(t),x' - v_{l,i}(t)\rangle + D_{\psi_l}(x',v_{l,i}(t))\}. \end{equation} From the first-order optimality condition, we derive that for all $x_l\in X_l$, \begin{equation}\label{proj} \langle \nabla \psi_l(x_{l,i}(t+1)) - \nabla \psi_l(v_{l,i}(t)) + \alpha(t)\hat{g}_{l,i}(t), x_{l,i}(t+1) - x_l\rangle \le 0. \end{equation} Setting $x_l = v_{l,i}(t)$ implies \begin{align*} \alpha(t)\|\hat{g}_{l,i}(t)\|_{\ast}\|d_{l,i}(t)\| &\ge \langle\alpha(t)\hat{g}_{l,i}(t), v_{l,i}(t) - x_{l,i}(t+1)\rangle\\ &\ge \langle \nabla \psi_l(x_{l,i}(t+1)) - \nabla \psi_l(v_{l,i}(t)), x_{l,i}(t+1) - v_{l,i}(t)\rangle\\ &\ge \|d_{l,i}(t)\|^2, \end{align*} where the last inequality follows by the strong convexity of $\psi_l$. Therefore, \begin{equation}\label{error_bound} \|d_{l,i}(t)\|\le \alpha(t)\|\hat{g}_{l,i}(t)\|_{\ast}. \end{equation} It follows from Assumption \ref{asm1} that $\|g_{l,i}(t)\|_{\ast}\le L$. By H\"{o}lder's inequality and the bounded second moment condition of Assumption \ref{asm2}, we further achieve \begin{equation}\label{sample_gradient_bound} \mathbb{E}[\|\hat{g}_{l,i}(t)\|_{\ast}^2|\mathcal{F}_t]\le (L + \nu_l)^2. \end{equation} Note that $\sqrt{x}$ is a concave function. Using Jensen's inequality, \[\mathbb{E}[\|\hat{g}_{l,i}(t)\|_{\ast}|\mathcal{F}_t]\le \sqrt{\mathbb{E}[\|\hat{g}_{l,i}(t)\|_{\ast}^2|\mathcal{F}_t]}\le L + \nu_l.\] According to the iterated expectation rule, $\mathbb{E}[\|\hat{g}_{l,i}(t)\|_{\ast}]\le L + \nu_l$. This together with \eqref{error_bound} produces $$\mathbb{E}[\|d_{l,i}(t)\|]\le (L + \nu_l)\alpha(t).$$ Then, by Lemma \ref{lem_graph} and taking the expectation in \eqref{bound_3}, we derive \eqref{lem_bound_2}. Furthermore, by the convexity of $\|\cdot\|$ and $\sum_{j=1}^{n_l}w_{l,ij}(t) = 1$, we obtain \begin{align*} \mathbb{E}[\|v_{l,i}(t) - \bar{x}_{l}(t)\|] & = \mathbb{E}\left[\left\|\sum_{j=1}^{n_l}w_{l,ij}(t)x_{l,j}(t) - \bar{x}_{l}(t)\right\|\right] \le \sum_{j=1}^{n_l}w_{l,ij}(t)\mathbb{E}[\|x_{l,j}(t) - \bar{x}_{l}(t)\|]\le H_l(t). \end{align*} Thus, \eqref{lem_bound_3} holds. In a similar way, by using $\sum_{j=1}^{n_l}w_{12,ij}(t) = 1$, we obtain \eqref{lem_bound_4}. \end{proof} \noindent{\bf Proof of Theorem 2}. By \eqref{proj} and using Lemma \ref{lem1}, we obtain that, for all $x_1\in\mathcal{X}_1$, \begin{align} \langle \alpha(t)\hat{g}_{1,i}(t),x_{1,i}(t+1) - x_1\rangle &\le \langle \nabla\psi_1(v_{1,i}(t)) - \nabla\psi_1(x_{1,i}(t+1)),x_{1,i}(t+1) - x_1\rangle\notag\\ &= D_{\psi_1}(x_1, v_{1,i}(t)) - D_{\psi_1}(x_1, x_{1,i}(t+1)) - D_{\psi_1}(x_{1,i}(t+1), v_{1,i}(t))\notag\\ &\le D_{\psi_1}(x_1, v_{1,i}(t)) - D_{\psi_1}(x_1, x_{1,i}(t+1)).\label{thm2-1} \end{align} Note by Assumption \ref{asm3} that \begin{align*} \sum_{t=1}^T\frac{1}{\alpha(t)}\sum_{i=1}^{n_1}D_{\psi_1}(x_1,v_{1,i}(t+1))&\le \sum_{t=1}^T\frac{1}{\alpha(t)}\sum_{i=1}^{n_1}\sum_{j=1}^{n_1}w_{1,ij}(t)D_{\psi_1}(x_1,x_{1,j}(t+1))\\ & = \sum_{t=1}^T\frac{1}{\alpha(t)}\sum_{i=1}^{n_1}D_{\psi_1}(x_1,x_{1,i}(t+1)). \end{align*} Thus, dividing $\alpha(t)$ from both sides of \eqref{thm2-1} and taking a summation for $i=1,\dots,n_1$ and $t=1,\dots,T$, we derive \begin{align} &\quad\sum_{t=1}^T\sum_{i=1}^{n_1}\langle \hat{g}_{1,i}(t), x_{1,i}(t+1) - x_1\rangle \notag\\ &\le \sum_{i=1}^{n_1}\sum_{t=1}^T\frac{1}{\alpha(t)}[D_{\psi_1}(x_1, v_{1,i}(t)) - D_{\psi_1}(x_1, v_{1,i}(t+1))] \label{inner_bound_1}\\ &\le \sum_{i=1}^{n_1}\left[\left(\frac{1}{\alpha(1)}\right)D_{\psi_1}(x_1, v_{1,i}(1)) + \sum_{t=2}^TD_{\psi_1}(x_1, v_{1,i}(t))\left(\frac{1}{\alpha(t)} - \frac{1}{\alpha(t-1)}\right)\right]\label{bound_2}\\ &\le \frac{n_1R_1^2}{\alpha(T)},\notag \end{align} where the last inequality follows from the definition of $R_1^2$ and the non-increasing of $\alpha(t)$. This together with \eqref{error_bound} and the definition $d_{l,i}(t) = x_{l,i}(t+1) - v_{l,i}(t)$ yields \begin{align} &\quad\mathbb{E}\left[\sum_{t=1}^T\sum_{i=1}^{n_1}\langle \hat{g}_{1,i}(t), v_{1,i}(t) - x_1\rangle\right]\notag\\ &= \sum_{t=1}^T\sum_{i=1}^{n_1}\mathbb{E}[\langle \hat{g}_{1,i}(t), x_{1,i}(t+1) - x_1\rangle] + \sum_{t=1}^T\sum_{i=1}^{n_1}\mathbb{E}[\langle \hat{g}_{1,i}(t), v_{1,i}(t) - x_{1,i}(t+1)\rangle]\notag\\ &\le \frac{n_1R_1^2}{\alpha(T)} + \sum_{t=1}^T\sum_{i=1}^{n_1}\alpha(t)\mathbb{E}[\|\hat{g}_{1,i}(t)\|_{\ast}^2]\le \frac{n_1R_1^2}{\alpha(T)} + n_1(L+\nu_1)^2\sum_{t=1}^T\alpha(t) ,\label{upper_bound_1} \end{align} where the last inequality follows from \eqref{sample_gradient_bound}. Since $v_{1,i}(t)$ is adapted to $\mathcal{F}_t$, by Assumption \ref{asm2}, \begin{equation}\label{condition_unbias} \mathbb{E}[\langle g_{1,i}(t) - \hat{g}_{1,i}(t),v_{1,i}(t) - x_1\rangle|\mathcal{F}_{t}] = 0. \end{equation} Therefore, \begin{equation}\label{unbias} \mathbb{E}[\langle g_{1,i}(t) - \hat{g}_{1,i}(t),v_{1,i}(t) - x_1\rangle] = 0. \end{equation} As a combination of \eqref{upper_bound_1} and \eqref{unbias}, we conclude \begin{equation}\label{upper_bound} \mathbb{E}\left[\sum_{t=1}^T\sum_{i=1}^{n_1}\langle g_{1,i}(t), v_{1,i}(t) - x_1\rangle\right] \le \frac{n_1R_1^2}{\alpha(T)} + n_1(L+\nu_1)^2\sum_{t=1}^T\alpha(t). \end{equation} Next, we establish a lower bound for $\sum_{i=1}^{n_1}\langle g_{1,i}(t), v_{1,i}(t) - x_1\rangle$. Due to the convexity of $f_{1,i}(\cdot,\cdot)$ with respect to the first element, \begin{align} \sum_{i=1}^{n_1}\langle g_{1,i}(t), v_{1,i}(t) - x_1\rangle &\ge \sum_{i=1}^{n_1}[f_{1,i}(v_{1,i}(t),u_{2,i}(t)) - f_{1,i}(x_1,u_{2,i}(t))].\label{lower_bound_1} \end{align} On the other hand, by adding and subtracting some terms, we get \begin{align} &\quad n_1(U(\bar{x}_1(t),\bar{x}_2(t)) - U(x_1,\bar{x}_2(t)))\notag\\ &= \sum_{i=1}^{n_1}[f_{1,i}(\bar{x}_1(t),\bar{x}_2(t)) - f_{1,i}(x_{1,i}(t),\bar{x}_2(t)) + f_{1,i}(x_{1,i}(t),\bar{x}_2(t)) - f_{1,i}(v_{1,i}(t),\bar{x}_2(t))\notag\\ &\quad + f_{1,i}(v_{1,i}(t),\bar{x}_{2}(t)) - f_{1,i}(v_{1,i}(t),u_{2,i}(t)) + f_{1,i}(v_{1,i}(t),u_{2,i}(t)) - f_{1,i}(x_1,u_{2,i}(t))\notag\\ &\quad + f_{1,i}(x_1,u_{2,i}(t)) - f_{1,i}(x_1,\bar{x}_2(t))]\notag\\ &\le \sum_{i=1}^{n_1}[L(\|x_{1,i}(t) - \bar{x}_1(t)\| + \|x_{1,i}(t) - v_{1,i}(t)\|) + 2L\|u_{2,i}(t) - \bar{x}_2(t)\|\notag\\ &\quad + f_{1,i}(v_{1,i}(t),u_{2,i}(t)) - f_{1,i}(x_1,u_{2,i}(t))],\label{add_substract} \end{align} where the last inequality follows from the Lipschitz continuity of $f_{1,i}$. Plugging the above inequality to \eqref{lower_bound_1}, we derive \begin{align} \sum_{i=1}^{n_1}\langle g_{1,i}(t), v_{1,i}(t) - x_1\rangle &\ge n_1(U(\bar{x}_1(t),\bar{x}_2(t)) - U(x_1,\bar{x}_2(t)))\notag\\ &\quad - \sum_{i=1}^{n_1}[L(\|x_{1,i}(t) - \bar{x}_1(t)\| + \|x_{1,i}(t) - v_{1,i}(t)\|) + 2L\|u_{2,i}(t) - \bar{x}_2(t)\|].\label{lower_bound_2} \end{align} It remains to connect this lower bound and $\bar{R}_1^{(i)}(T)$. Notice that \begin{align} &\quad U(\bar{x}_1(t),\bar{x}_2(t)) - U(x_1,\bar{x}_2(t))\notag\\ &= U(\bar{x}_1(t),\bar{x}_2(t)) - U(\bar{x}_1(t),u_{2,i}(t)) + U(\bar{x}_1(t),u_{2,i}(t)) - U(x_{1,i}(t),u_{2,i}(t))\notag\\ &\quad + U(x_{1,i}(t),u_{2,i}(t)) - U(x_1,u_{2,i}(t)) + U(x_1,u_{2,i}(t)) - U(x_1,\bar{x}_2(t))\notag\\ &\ge U(x_{1,i}(t),u_{2,i}(t)) - U(x_1,u_{2,i}(t)) - L\|x_{1,i}(t) - \bar{x}_1(t)\|\notag\\ &\quad - 2L\|u_{2,i}(t) - \bar{x}_2(t)\|.\label{lower_bound_3} \end{align} Recall from the definition of $\bar{R}_1^{(i)}(T)$ that \begin{align*} \max_{x_1\in X_1}\mathbb{E}\left[\sum_{t=1}^T(U(x_{1,i}(t),u_{2,i}(t)) - U(x_1,u_{2,i}(t)))\right]&= \bar{R}_1^{(i)}(T). \end{align*} By taking expectation on both sides of \eqref{lower_bound_2}-\eqref{lower_bound_3} and making a summation from $t=1$ to $t=T$, we obtain \begin{align} \max_{x_1\in X_1}\mathbb{E}\left[\sum_{t=1}^T\sum_{i=1}^{n_1}\langle g_{1,i}(t), v_{1,i}(t) - x_1\rangle\right] &\ge n_1\bar{R}_1^{(i)}(T) - n_1L\sum_{t=1}^T\mathbb{E}[\|x_{1,i}(t) - \bar{x}_1(t)\| + 2\|u_{2,i}(t) - \bar{x}_2(t)\|]\notag\\ &\quad -L\sum_{t=1}^T\sum_{i=1}^{n_1}\mathbb{E}\Big[\|x_{1,i}(t) - \bar{x}_1(t)\| + \|x_{1,i}(t) - v_{1,i}(t)\|\notag\\ &\quad + 2\|u_{2,i}(t) - \bar{x}_2(t)\|\Big]\label{lower_bound} \end{align} Note by Lemma \ref{lem2} and the elementary inequality $\|a + b\|\le \|a\| + \|b\|$ that $\mathbb{E}[\|x_{1,i}(t) - v_{1,i}(t)\|]\le 2H_1(t)$. Thus, combining \eqref{lower_bound} with \eqref{upper_bound}, we derive \begin{align*} \bar{R}_1^{(i)}(T)&\le \frac{R_1^2}{\alpha(T)} + (L+\nu_1)^2\sum_{t=1}^T\alpha(t) + 4L\sum_{t=1}^T(H_1(t) + H_2(t)). \end{align*} This together with \eqref{H_l} produces \eqref{regret_bound_final}. \noindent{\bf Proof of Theorem 3}. Taking the same idea as in the proof of Theorem \ref{thm1}, we first establish an upper bound for $\mathbb{E}\left[\sum_{t=1}^T\sum_{i=1}^{n_1}\langle g_{1,i}(t),v_{1,i}(t) - x_1\rangle\right]$. Setting $\alpha(t) = \frac{1}{\eta (t+1)}$ in \eqref{bound_2}, we obtain \[\sum_{t=1}^T\sum_{i=1}^{n_1}\langle \hat{g}_{1,i}(t), x_{1,i}(t+1) - x_1\rangle\le \sum_{t=1}^T\sum_{i=1}^{n_1}\eta D_{\psi_1}(x_1,v_{1,i}(t)).\] Similar to the procedure of obtaining \eqref{upper_bound_1}, we use \eqref{error_bound} to derive \begin{equation*} \sum_{t=1}^T\sum_{i=1}^{n_1}\langle \hat{g}_{1,i}(t),v_{1,i}(t) - x_1\rangle\le \sum_{t=1}^T\sum_{i=1}^{n_1}\left[\eta D_{\psi_1}(x_1,v_{1,i}(t)) + \alpha(t)\|\hat{g}_{1,i}(t)\|_{\ast}^2\right]. \end{equation*} It then follows from \eqref{unbias} that \begin{align} \mathbb{E}\left[\sum_{t=1}^T\sum_{i=1}^{n_1}\langle g_{1,i}(t),v_{1,i}(t) - x_1\rangle\right]&\le \sum_{t=1}^T\sum_{i=1}^{n_1}\left(\eta \mathbb{E}[D_{\psi_1}(x_1,v_{1,i}(t))] + \alpha(t)\mathbb{E}[\|\hat{g}_{1,i}(t)\|_{\ast}^2]\right).\label{strongly_upper_bound} \end{align} Since $f_{1,i}$ is strongly convex with respect to $\psi_1$, by Definition \ref{strong_def}, we have \begin{align*} \sum_{i=1}^{n_1}\langle g_{1,i}(t), v_{1,i}(t) - x_1\rangle - \eta D_{\psi_1}(x_1,v_{1,i}(t)) &\ge \sum_{i=1}^{n_1}[f_{1,i}(v_{1,i}(t),u_{2,i}(t)) - f_{1,i}(x_1,u_{2,i}(t))]. \end{align*} Then, by using the analysis procedure similar to that of deriving \eqref{lower_bound}, we get \begin{align} \max_{x_1\in X_1}\mathbb{E}\left[\sum_{t=1}^T\sum_{i=1}^{n_1}\langle g_{1,i}(t), v_{1,i}(t) - x_1\rangle\right] &\ge n_1\bar{R}_1^{(i)}(T) - n_1L\sum_{t=1}^T\mathbb{E}[\|x_{1,i}(t) - \bar{x}_1(t)\| + 2\|u_{2,i}(t) - \bar{x}_2(t)\|]\notag\\ &\quad -L\sum_{t=1}^T\sum_{i=1}^{n_1}\mathbb{E}\Big[\|x_{1,i}(t) - \bar{x}_1(t)\| + \|x_{1,i}(t) - v_{1,i}(t)\|\notag\\ &\quad + 2\|u_{2,i}(t) - \bar{x}_2(t)\|\Big] + \sum_{t=1}^T\sum_{i=1}^{n_1}\eta \mathbb{E}[D_{\psi_1}(x_1,v_{1,i}(t))].\label{strongly_lower_bound} \end{align} Combining \eqref{strongly_upper_bound}-\eqref{strongly_lower_bound} with Lemma \ref{lem2}, we get \begin{align*} \bar{R}_1^{(i)}(T)&\le 4L\sum_{t=1}^T\sum_{l=1}^2\left(n_l(L +\nu_l)\Gamma_l\sum_{s=1}^{t-1}\theta_l^{t-1-s}\alpha(s-1) + 2(L + \nu_l)\alpha(t-1)\right) \notag\\ &\quad + 4L\sum_{t=1}^T\sum_{l=1}^2n_l\Gamma_l\theta_l^{t-1}\Lambda_l + \sum_{t=1}^T\alpha(t)(L+\nu_1)^2. \end{align*} Since $\sum_{t=1}^T\frac{1}{t}\le 1 + \int_{1}^T\frac{1}{t}dt = 1 + \log T$, the above relation together with \eqref{exchange_sum} yields \eqref{regret_bound_final_1}. \noindent{\bf Proof of Theorem 4}. According to \eqref{inner_bound_1} and the definition of $R_l^2$, we have that, for all $l\in\{1,2\}$ and $x_l\in X_l$, \[\sum_{s=0}^{t-1}\sum_{i=1}^{n_l}\langle \alpha(s)\hat{g}_{l,i}(s),x_{l,i}(s+1) - x_l\rangle\le n_lR_l^2.\] Furthermore, by using a decomposition similar to \eqref{upper_bound_1}, we derive the following upper bound \begin{equation}\label{ergo_upper_bound_1} \sum_{s=0}^{t-1}\sum_{i=1}^{n_l}\mathbb{E}[\langle \alpha(s)\hat{g}_{l,i}(s), v_{l,i}(s) - x_l\rangle] \le n_lR_l^2 + \sum_{s=0}^{t-1}\sum_{i=1}^{n_l}\alpha^2(s)\mathbb{E}[\|\hat{g}_{l,i}(s)\|_{\ast}^2]. \end{equation} Construct an auxiliary sequence $\{\hat{v}_{l,i}(t)\}_{t\ge 0}$ by letting $\hat{v}_{l,i}(0) = x_{l,i}(0)$ and \[\hat{v}_{l,i}(t) = P_{\hat{v}_{l,i}(t-1)}^l(\alpha(t)(g_{l,i}(t) - \hat{g}_{l,i}(t))),\ \forall t\ge 1\] where $P_{\cdot}^l(\cdot)$ is the prox-mapping defined in \eqref{prox_mapping}. Then, by Assumption \ref{asm2} and Lemma 6.1 of \cite{nj:09}, \begin{align} \sum_{s=0}^{t-1}\sum_{i=1}^{n_l}\langle \alpha(s)(g_{l,i}(s) - \hat{g}_{l,i}(s)), \hat{v}_{l,i}(s) - x_l\rangle &\le n_lR_l^2 + \frac{n_l\nu_l^2}{2}\sum_{s=0}^{t-1}\alpha^2(s).\label{ergo_upper_bound_2} \end{align} Since $v_{l,i}(s)$ and $\hat{v}_{l,i}(s)$ are adapted to $\mathcal{F}_s$, it follows from an analysis similar to \eqref{unbias} that \begin{align} \mathbb{E}\left[\sum_{s=0}^{t-1}\sum_{i=1}^{n_l}\langle \alpha(s)(g_{l,i}(s) - \hat{g}_{l,i}(s)), v_{l,i}(s) - \hat{v}_{l,i}(s)\rangle\right] &= 0.\label{ergo_upper_bound_3} \end{align} As a combination of \eqref{ergo_upper_bound_1}-\eqref{ergo_upper_bound_3}, we obtain \begin{equation}\label{ergo_upper_bound} \mathbb{E}\left[\max_{x_l\in X_l}\sum_{s=0}^{t-1}\sum_{i=1}^{n_l}\langle \alpha(s)\hat{g}_{l,i}(s), v_{l,i}(s) - x_l\rangle\right] \le 2n_lR_l^2 + n_l\left((L + \nu_l)^2 + \frac{1}{2}\nu_l^2\right)\sum_{s=0}^{t-1}\alpha^2(s). \end{equation} On the other hand, it follows from the convexity of $f_{1,i}(\cdot,x_2)$ that \begin{align} \sum_{i=1}^{n_1}\langle \alpha(s)g_{1,i}(s), v_{1,i}(s) - x_1\rangle &\ge \sum_{i=1}^{n_1}\alpha(s)[f_{1,i}(v_{1,i}(s),u_{2,i}(s)) - f_{1,i}(x_1,u_{2,i}(s))].\label{ergo_lower_bound_1} \end{align} Derivation similar to \eqref{add_substract} yields \begin{align} \sum_{i=1}^{n_1}f_{1,i}(v_{1,i}(s),u_{2,i}(s))&\ge n_1U(\bar{x}_1(s),\bar{x}_2(s)) - L\sum_{i=1}^{n_1}(\|u_{2,i}(s) - \bar{x}_2(s)\| + \|v_{1,i}(s) - \bar{x}_1(s)\|) \end{align} Note that \begin{align} -\sum_{i=1}^{n_1}f_{1,i}(x_1,u_{2,i}(s)) &= -\sum_{i=1}^{n_1}f_{1,i}(x_1,u_{2,i}(s)) + \sum_{i=1}^{n_1}f_{1,i}(x_1,\bar{x}_{2}(s))- \sum_{i=1}^{n_1}f_{1,i}(x_1,\bar{x}_{2}(s))\notag\\ &\ge -L\sum_{i=1}^{n_1}\|u_{2,i}(s) - \bar{x}_2(s)\|- n_1U(x_1,\bar{x}_2(s)).\label{ergo_lower_bound_2} \end{align} By the concavity of $U(x_1,\cdot)$, \begin{align} U(x_1,\hat{x}_{2,j}(t)) &\ge \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)U(x_1,x_{2,j}(s)) \notag\\ &= \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)[U(x_1,x_{2,j}(s)) - U(x_1,\bar{x}_2(s)) + U(x_1,\bar{x}_2(s))]\notag\\ &\ge \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)[-L\|x_{2,j}(s) - \bar{x}_2(s)\| + U(x_1,\bar{x}_2(s))].\notag \end{align} Therefore, combining \eqref{ergo_lower_bound_1}-\eqref{ergo_lower_bound_2} and taking an ergodic average, we obtain \begin{align} &\quad\frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)\frac{1}{n_1}\sum_{i=1}^{n_1}\langle \alpha(s)g_{1,i}(s), v_{1,i}(s) - x_1\rangle\notag\\ &\ge \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)U(\bar{x}_1(s),\bar{x}_2(s)) - U(x_1,\hat{x}_{2,j}(t))\notag\\ &\quad - \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)\left[\frac{1}{n_1}\sum_{i=1}^{n_1}(L(2\|u_{2,i}(s) - \bar{x}_2(s)\| + \|v_{1,i}(s) - \bar{x}_1(s)\|)) + L\|x_{2,j}(s) - \bar{x}_2(s)\|\right].\label{final_lower_bound_1} \end{align} In a similar way, we show that for all $x_2\in X_2$, \begin{align} &\quad\frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)\frac{1}{n_2}\sum_{i=1}^{n_2}\langle \alpha(s)g_{2,i}(s), v_{2,i}(s) - x_2\rangle\notag\\ &\ge -\frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)U(\bar{x}_1(s),\bar{x}_2(s)) + U(\hat{x}_{1,i}(t),x_2)\notag\\ &\quad - \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}\alpha(s)\left[\frac{1}{n_2}\sum_{i=1}^{n_2}(L(2\|u_{1,i}(s) - \bar{x}_1(s)\| + \|v_{2,i}(s) - \bar{x}_2(s)\|)) + L\|x_{1,i}(s) - \bar{x}_1(s)\|\right].\label{final_lower_bound_2} \end{align} Adding \eqref{final_lower_bound_1}-\eqref{final_lower_bound_2} and utilizing \eqref{ergo_upper_bound}, we get \begin{align*} &\quad\mathbb{E}\left[\max_{x_2\in X_2}U(\hat{x}_{1,i}(t),x_2) - \min_{x_1\in X_1}U(x_1,\hat{x}_{2,i}(t))\right]\\ &\le \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{l=1}^2\left(2R_l^2 + \left((L + \nu_l)^2 + \frac{\nu_l^2}{2}\right)\sum_{s=0}^{t-1}\alpha^2(s)\right)\\ &\quad + \frac{1}{\sum_{s=0}^{t-1}\alpha(s)}\sum_{s=0}^{t-1}4L\alpha(s)(H_1(s) + H_2(s)). \end{align*} Thus, the conclusion follows from Lemma \ref{lem2}. \noindent{\bf Proof of Corollary \ref{col2}}. Let $(x_1^{\ast},x_2^{\ast})$ be the NE and note that \begin{equation}\label{gap_lower_bound} \mathbb{E}\left[\max_{x_2\in X_2}U(\hat{x}_{1,i}(t),x_2) - \min_{x_1\in X_1}U(x_1,\hat{x}_{2,j}(t))\right] \ge \mathbb{E}\left[U(\hat{x}_{1,i}(t),x_2^{\ast}) - U(x_1^{\ast},\hat{x}_{2,j}(t))\right]. \end{equation} We further have the decomposition \begin{align*} \mathbb{E}\left[U(\hat{x}_{1,i}(t),x_2^{\ast}) - U(x_1^{\ast},\hat{x}_{2,j}(t))\right] &= \mathbb{E}\left[U(\hat{x}_{1,i}(t),x_2^{\ast}) - U(x_1^{\ast},x_2^{\ast}) + U(x_1^{\ast},x_2^{\ast}) - U(x_1^{\ast},\hat{x}_{2,j}(t))\right]. \end{align*} Since $U(\cdot,\cdot)$ is $\mu$-strongly convex-strongly concave, by the definition of NE, we obtain \[U(\hat{x}_{1,i}(t),x_2^{\ast}) - U(x_1^{\ast},x_2^{\ast})\ge \langle \partial_1U(x_1^{\ast},x_2^{\ast}),\hat{x}_{1,i}(t) - x_1^{\ast}\rangle + \frac{\mu}{2}\|\hat{x}_{1,i}(t) - x_1^{\ast}\|^2\ge \frac{\mu}{2}\|\hat{x}_{1,i}(t) - x_1^{\ast}\|^2.\] In a similar way, \[U(x_1^{\ast},x_2^{\ast}) - U(x_1^{\ast},\hat{x}_{2,j}(t)) \ge \frac{\mu}{2}\|\hat{x}_{2,j}(t) - x_2^{\ast}\|^2.\] Therefore, \[\mathbb{E}\left[U(\hat{x}_{1,i}(t),x_2^{\ast}) - U(x_1^{\ast},\hat{x}_{2,j}(t))\right]\ge \mathbb{E}\left[\frac{\mu}{2}\|\hat{x}_{1,i}(t) - x_1^{\ast}\|^2 + \frac{\mu}{2}\|\hat{x}_{2,j}(t) - x_2^{\ast}\|^2\right].\] By Theorem \ref{thm5} and \eqref{gap_lower_bound}, we get the conclusion. \noindent{\bf Proof of Theorem 5}. Applying Lemma \ref{lem1} to \eqref{def_x}, we get that for $l = 1,2$, \begin{align} D_{\psi_l}(x_l,x_{l,i}(t+1))&\le D_{\psi_l}(x_l,v_{l,i}(t)) + \langle \alpha(t)\hat{g}_{l,i}(t),x_l - v_{l,i}(t)\rangle + \frac{1}{2}\alpha^2(t)\|\hat{g}_{l,i}(t)\|_{\ast}^2.\label{recursive} \end{align} By Assumption \ref{asm3} and $\sum_{i=1}^{n_l}w_{l,ij}(t) = 1$, $\sum_{i=1}^{n_l}D_{\psi_l}(x_l,v_{l,i}(t))\le \sum_{i=1}^{n_l}\sum_{j=1}^{n_l}w_{l,ij}(t)D_{\psi_l}(x_l,x_{l,j}(t)) = \sum_{i=1}^{n_l}D_{\psi_l}(x_l,x_{l,i}(t))$. It then follows from \eqref{recursive} that \begin{align} \sum_{i=1}^{n_l}D_{\psi_l}(x_l,x_{l,i}(t+1))&\le \sum_{i=1}^{n_l}D_{\psi_l}(x_l,x_{l,i}(t)) + \sum_{i=1}^{n_l}\langle \alpha(t)\hat{g}_{l,i}(t),x_l - v_{l,i}(t)\rangle + \frac{1}{2}\alpha^2(t)\sum_{i=1}^{n_l}\|\hat{g}_{l,i}(t)\|_{\ast}^2\notag \end{align} Plugging \eqref{lower_bound_2} to this relation, we obtain \begin{align} \frac{1}{n_1}\sum_{i=1}^{n_1}D_{\psi_1}(x_1, x_{1,i}(t+1)) &\le \frac{1}{n_1}\sum_{i=1}^{n_1}D_{\psi_1}(x_1, x_{1,i}(t)) + \alpha(t)(U(x_1,\bar{x}_2(t)) - U(\bar{x}_1(t),\bar{x}_2(t)))\notag\\ &\quad + \alpha(t)L\frac{1}{n_1}\sum_{i=1}^{n_1}(\|x_{1,i}(t) - \bar{x}_1(t)\| + \|v_{1,i}(t) - x_{1,i}(t)\| + 2\|u_{2,i}(t) - \bar{x}_2(t)\|)\notag\\ &\quad + \alpha^2(t)\frac{(L + \nu_1)^2}{2} + \alpha(t)\frac{1}{n_1}\sum_{i=1}^{n_1}\langle\hat{g}_{1,i}(t) - g_{1,i}(t),x_1 - v_{1,i}(t)\rangle. \label{network_1} \end{align} Similar to \eqref{lower_bound_2}, we also derive a lower bound for $\sum_{i=1}^{n_2}\langle\alpha(t)g_{2,i}(t),x_2 - v_{2,i}(t)\rangle$. Furthermore, \begin{align} \frac{1}{n_2}\sum_{i=1}^{n_2}D_{\psi_2}(x_2, x_{2,i}(t+1)) &\le \frac{1}{n_2}\sum_{i=1}^{n_2}D_{\psi_2}(x_2, x_{2,i}(t)) + \alpha(t)(U(\bar{x}_1(t),\bar{x}_2(t)) - U(\bar{x}_1(t),x_2))\notag\\ &\quad + \alpha(t)L\frac{1}{n_2}\sum_{i=1}^{n_2}(\|x_{2,i}(t) - \bar{x}_2(t)\| + \|v_{2,i}(t) - x_{2,i}(t)\| + 2\|u_{1,i}(t) - \bar{x}_1(t)\|)\notag\\ &\quad + \alpha^2(t)\frac{(L + \nu_2)^2}{2} + \alpha(t)\frac{1}{n_2}\sum_{i=1}^{n_2}\langle\hat{g}_{2,i}(t) - g_{2,i}(t),x_2 - v_{2,i}(t)\rangle. \label{network_2} \end{align} Let $(x_1,x_2) = (x_1^{\ast},x_2^{\ast})$ be the NE and consider the following Lyapunov function \begin{equation}\label{def-v} V(t,x_1^{\ast},x_2^{\ast}) = \frac{1}{n_1}\sum_{i=1}^{n_1}D_{\psi_1}(x_1^{\ast},x_{1,i}(t)) + \frac{1}{n_2}\sum_{i=1}^{n_2}D_{\psi_2}(x_2^{\ast},x_{2,i}(t)). \end{equation} Recall that $v_{l,i}(t)$, $\bar{x}_l(t)$ and $u_{l,i}(t)$ are adapted to $\mathcal{F}_t$. By adding \eqref{network_1} and \eqref{network_2}, taking conditional expectation on $\mathcal{F}_t$, and using \eqref{condition_unbias}, we obtain \begin{align} \mathbb{E}[V(t+1,x_1^{\ast},x_2^{\ast})|\mathcal{F}_{t}] &\le V(t,x_1^{\ast},x_2^{\ast}) - \alpha(t)(U(\bar{x}_1(t), x_2^{\ast}) - U(x_1^{\ast},\bar{x}_2(t)))\notag\\ &\quad + \alpha^2(t)\left(\frac{(L + \nu_1)^2}{2} + \frac{(L + \nu_2)^2}{2}\right)\notag\\ &\quad + \alpha(t)L\sum_{l=1}^2\frac{1}{n_l}\sum_{i=1}^{n_l}e_{l,i}(t),\label{V_iter} \end{align} where $e_{l,i}(t) = \|x_{l,i}(t) - \bar{x}_l(t)\| + \|v_{l,i}(t) - x_{l,i}(t)\| + 2\|u_{3-l,i}(t) - \bar{x}_{3-l}(t)\|$.\\ By the definition of $H_l(t)$ in \eqref{H_l} and exchanging the order of summation, we obtain \[\sum_{t=1}^T\alpha(t)H_l(t)\le\frac{n_l\Gamma_l\Lambda_l\alpha(0)}{1 - \theta_l} + 2(L+\nu_l)\sum_{t=1}^T\alpha^2(t-1) + \frac{n_l\Gamma_l(L + \nu_l)}{1 - \theta_l}\sum_{t=1}^T\alpha^2(t-1).\] Therefore, it follows from $\sum_{t=1}^{\infty}\alpha^2(t) < \infty$ that $\sum_{t=1}^{\infty}\alpha(t)H_l(t) < \infty$. We further obtain $\sum_{t=1}^{\infty}\alpha(t)\mathbb{E}[e_{l,i}(t)] < \infty$ by using Lemma \ref{lem2}. By the monotone convergence theorem, \[\mathbb{E}[\sum_{t=1}^{\infty}\alpha(t)e_{l,i}(t)] = \sum_{t=1}^{\infty}\alpha(t)\mathbb{E}[e_{1,i}(t)] < \infty.\] Thus, $\sum_{t=1}^{\infty}\alpha(t)e_{l,i}(t) < \infty$ with probability $1$. Meanwhile, note by the definition of NE that \begin{equation}\label{U_diff} U(\bar{x}_1(t), x_2^{\ast})\ge U(x_1^{\ast},x_2^{\ast})\ge U(x_1^{\ast},\bar{x}_2(t)). \end{equation} By Lemma \ref{lem3}, $V(t,x_1^{\ast},x_2^{\ast})$ converges to a non-negative random variable with probability $1$ and \[0\le\sum_{t=0}^{\infty}\alpha(t)(U(\bar{x}_1(t),x_2^{\ast}) - U(x_1^{\ast},\bar{x}_2(t))) < \infty,\ a.s..\] Also, we have \[0\le \sum_{t=0}^{\infty}\alpha(t)\|x_{l,i}(t) - \bar{x}_l(t)\| < \infty,\ a.s.\] Therefore, by $\sum_{t=0}^{\infty}\alpha(t) = \infty$, there exists a subsequence $\{t_r\}$ such that with probability $1$, \[\lim_{r\to\infty}U(x_1^{\ast},\bar{x}_2(t_r)) = U(x_1^{\ast},x_2^{\ast}) = \lim_{r\to\infty}U(\bar{x}_1(t_r),x_2^{\ast}),\] and for all $i\in\mathcal{V}_1$, $j\in\mathcal{V}_2$, \begin{equation}\label{limit_error} \lim_{r\to\infty}x_{1,i}(t_r) = \lim_{r\to\infty}\bar{x}_1(t_r),\quad \lim_{r\to\infty}x_{2,j}(t_r) = \lim_{r\to\infty}\bar{x}_2(t_r). \end{equation} The bounded sequence $\{(\bar{x}_1(t_r),\bar{x}_2(t_r))\}$ has a convergent subsequence, and without loss of generality, we let it be indexed by the same index set $\{t_r,r = 1,2,\dots\}$. By the strict convexity-concavity of $U$, the NE is unique. Thus, according to the continuity of $U(\cdot,\cdot)$, $\bar{x}_1(t_r)\to x_1^{\ast}$ and $\bar{x}_2(t_r)\to x_2^{\ast}$ with probability $1$. Using \eqref{limit_error}, we further obtain $x_{1,i}(t_r)\to x_1^{\ast}$ and $x_{2,j}(t_r)\to x_2^{\ast}$. Therefore, by Assumption \ref{asm3} and the convergence of $V(t,x_1^{\ast},x_2^{\ast})$, $V(t,x_1^{\ast},x_2^{\ast})\to 0$ with probability $1$. Then, by \eqref{breg_prop2} and \eqref{def-v}, $x_{1,i}(t)\to x_1^{\ast}$ and $x_{2,j}(t)\to x_2^{\ast}$ with probability $1$. \bibliographystyle{IEEEtran} \bibliography{aistats2022.bib} \end{document}
2205.14658v1
http://arxiv.org/abs/2205.14658v1
Stability of measure solutions to a generalized Boltzmann equation with collisions of a random number of particles
\documentclass[leqno,a4paper, 12pt]{article} \usepackage{amsmath,amssymb,amsthm} \usepackage[polish,english]{babel} \usepackage{polski} \usepackage[cp1250]{inputenc} \usepackage[T1]{fontenc} \usepackage{makeidx} \pagestyle{empty} \setlength{\oddsidemargin}{-0.5cm} \setlength{\evensidemargin}{0.8cm} \setlength{\textheight}{25.0cm} \setlength{\textwidth}{17.0cm} \setlength{\topmargin}{-1.5cm} \newtheorem{rem}{Remark}[section] \newtheorem{coll}{Corollary}[section] \newtheorem{thm}{Theorem}[section] \newtheorem{lem}{Lemma}[section] \newtheorem{exmp}{Example}[section] \newtheorem{defin}{Definition}[section] \newtheorem{propos}{Proposition}[section] \newtheorem{pf}{Proof} \pagestyle{plain} \newcommand{\N}{\mathbb{N}} \newcommand{\Q}{\mathbb{Q}} \newcommand{\R}{\mathbb{R}} \newcommand{\C}{\mathbb{C}} \newcommand{\Om}{\Omega} \newcommand{\om}{\omega} \newcommand{\F}{\mathcal{F}} \newcommand{\M}{\mathcal{M}} \newcommand{\B}{\mathcal{B}} \newcommand{\bP}{\mathbb{P}} \newcommand{\D}{\mathcal{D}} \newcommand{\E}{\mathbb{E}} \newcommand{\tr}{\textrm} \newcommand{\DD}{\mathscr{D}} \newcommand{\Z}{\mathcal{Z}} \newcommand{\law}{\mathcal{L}} \newcommand{\dd}{\textrm{d}} \newcommand{\pp}{(P^t)_{t\in\R_+}} \newcommand{\m}{\mathcal{M}_{sig}} \newcommand{\s}{\mu_1-\mu_2} \newcommand{\sig}{\mu_+-\mu_-} \newcommand{\pt}{(P_t)_{t\in T}} \newcommand{\PT}{P^{t_0}(\nu-\mu)} \newcommand{\con}{\nu\ast\mu} \newcommand{\1}{\mathbb{1}} \newcommand{\splot}{P_{\ast n}} \newcommand{\cono}{\nu_0\ast\mu} \newcommand{\length}{\operatorname{length}} \newcommand{\ve}{\varepsilon} \makeindex \begin{document} \section*{\centerline{Stability of measure solutions to a generalized Boltzmann} \linebreak \centerline{equation with collisions of a random number of particles $^{\diamond}$}} \vskip5mm \centerline{Henryk Gacki$^{a}$ and ukasz Stettner$^{b, \star}$} \vskip4mm \centerline{$^{a}${\small{\it Faculty of Science and Technology, University of Silesia in Katowice, Bankowa 14,}}} \centerline{\small{\it 40-007 Katowice, Poland}} \vskip2mm \centerline{$^{b}${\small{\it Institute of Mathematics Polish Academy of Sciences, niadeckich 8,}}} \centerline{\small{\it 00-656 Warsaw, Poland }} \vskip0.8cm \section*{Abstract} In the paper we study a measure version of the evolutionary nonlinear Boltzmann-type equation in which we admit a random number of collisions of particles. We consider first a stationary model and use two methods to find its fixed points: the first based on Zolotarev seminorm and the second on Kantorovich-Rubinstein maximum principle. Then a dynamic version of Boltzmann-type equation is considered and its asymptotical stability is shown. \\ {\it Keywords}: Boltzmann equation, collisions of random particles, Zolotariev seminorm, \linebreak Kantorovich-Rubinstein maximum principle\\ {\it 2010 MSC}: 82B31, 82B21. \section{{\bf Introduction}}\label{sec1Int} In the paper we consider a nonlinear evolutionary measure valued Boltzmann type equation of the form \begin{equation}\label{s4.w1.11adodm2} \frac{d\psi}{dt} + \psi =\bP\,\psi \qquad\text{for}\qquad t \geq 0 \end{equation} where the operator $\bP$ maps $\mathcal{M}_{1}(\mathbb R_{+})$ the space of probability measures on $\mathbb{R}_{+}=[0,\infty)$ into itself. We are looking for $ \psi : \mathbb{R}_{+} \rightarrow \mathcal{M}_{sig}(\mathbb{R}_{+})$ with $\psi_0\in\mathcal{M}_1(\mathbb{R}_{+})$, where $\mathcal{M}_{sig}(\mathbb{R}_{+})$ (or shortly $\mathcal{M}_{sig}$) is the space of all signed measures on $\mathbb{R}_{+}$. Equation \eqref{s4.w1.11adodm2} is a generalized version of the equation considered in \cite{tjon wu} (see also section 8.9 in \cite{LM}, or \cite{bob} for the motivation), \begin{equation}\label{8.9.2} \frac{\partial u(t,x)}{\partial t}+ u(t,x)=\int\limits_{x}\limits^{\infty}\frac{dy}{y}\int\limits_{0}\limits^{y}u(t, y-z)u(t,z)dz:=P\,u(x) \qquad t\geq 0,\qquad x\geq 0, \end{equation} which describes energy changes subject to the collision operator $ P\,u(x)$ and which was obtained ------------------------------------------------------------------------------------------------------------------------ {} $^{\diamond}$ {\small{Research supported by National Science Center, Poland by grant UMO- $2016/23/B/ST1/00479$}} {} $^{\star}$ {\small{Corresponding author}} {} {\small{{\it Email addresses}: [email protected] (Henryk Gacki), [email protected] (ukasz Stettner)}} \noindent from Boltzmann equation corresponding to a spatially homogeneous gas with no external forces using Abel transformation. To be more precise, in the theory of dilute gases Boltzmann equation in the general form ${ dF(t,x,v)\over dt}=C(F(t,x,v))$ gives us an information about time, position and velocity of particles of the dilute gas. This equation is a base for many mathematical models of colliding particles. In particular, for a spatially homogeneous gas we come to the equation \eqref{8.9.2} with additional conditions saying that its solution $u$, for fixed $t$, is a density with first moment equal to $1$, which in turn corresponds to the conservation law of mass and energy. The operator $Pu$ is a density function of the random variable $\eta (\xi_{1} + \xi_{2})$ where random variables $\eta$, $\xi_{1}$ and $\xi_2$ are independent and $\eta$ is uniformly distributed, while $\xi_1$, $\xi_2$ have the same density function $u$. The assumption that $\eta$ has uniform distribution on $[0,1]$ is quite restrictive and there are no physical reasons to assume that the distribution of energy of particles can be described only by its density (is absolutely continuous). Moreover collision of two particles maybe replaced by collision of a random number of particles. This is a reason that in what follows we shall consider a measure valued version \eqref{s4.w1.11adodm2} of the equation \eqref{8.9.2}. Let \begin{equation}\label{s4.w1.20adodm} D := \big\{\mu\in \mathcal{M}_{1}(\mathbb R_{+}) : m_{1}(\mu) = 1 \big\},\quad\text{with}\quad m_{1}(\mu) = \int\limits_{0}\limits^{\infty}\,x\mu(dx), \end{equation} and denote by $\bar{D}$ a weak closure of $D$, which is of the form \begin{equation}\label{s4.w1.20adod} \bar{D} := \big\{\mu\in \mathcal{M}_{1}(\mathbb R_{+}) : m_{1}(\mu) \leq 1 \big\}. \end{equation} The operator $P$ defined in \eqref{8.9.2} describes collision of two particles. In what follows we shall consider a general situation of collision of a random number of particles. To describe the collision operator in this case we start from recalling the convolution operator of order $n$ and the linear operator $P_{\varphi}$, which is related to multiplication of random variables. For every $n \in \mathbb{N}$ let $P_{*n}:\mathcal{M}_{sig} \rightarrow \mathcal{M}_{sig}$, be given by the formula \begin{equation}\label{equ63} P_{*1} \mu :=\mu, \quad P_{*(n+1)} \mu:=\mu * P_{*n}\mu \quad\text{for}\quad \mu\in \mathcal{M}_{sig}. \end{equation} It is easy to verify that $P_{*n}(\mathcal{M}_{1}(\mathbb R_{+}))\subset \mathcal{M}_{1}(\mathbb R_{+})$ for every $n\in \mathbb{N}$. Moreover, $P_{*n}|_{\mathcal{M}_{1}(\mathbb R_{+})}$ has a simple probabilistic interpretation: if $\xi_1,...,\xi_n$ are independent random variables with the same probability distribution $\mu$, then $P_{*n}\mu$ is the probability distribution of $\xi_1+...+\xi_n$. \\ The second class of operators we are going to study is related to multiplication of random variables. Formal definition is as follows: given $\mu,v \in \mathcal{M}_{sig}$, we define product $u \circ v$ by \begin{equation} (\mu \circ v)(A):= \int_{\mathbb{R}_+} \int_{\mathbb{R}_+} \mathbf{1}_{A} (xy) \mu(dx)v(dy) \quad\text{for}\quad A\in {\mathcal{B}_{\mathbb{R}_+}}. \end{equation} and \begin{equation}\label{equ58} \big \langle f,\mu \circ v \big \rangle = \int_{\mathbb{R}_+} \int_{\mathbb{R}_+} f(xy) \mu(dx)v(dy) \end{equation} for every Borel measurable $f:\mathbb{R}_+ \rightarrow \mathbb{R}$ such that $(x,y) \mapsto f(xy)$ is integrable with respect to the product of $|\mu|$ and $|v|$. For fixed $\varphi \in \mathcal{M}_{1}$ define \begin{equation}\label{equ62} P_{\varphi}\mu := \phi \circ \mu \quad\text{for}\quad \mu\in \mathcal{M}_{sig}. \end{equation} Similarly as in the case of convolution it follows that $P_\varphi (\mathcal{M}_1) \subset \mathcal{M}_1$. For $\mu\in \mathcal{M}_1$ the measure $ P_\varphi \mu$ has an immediate probabilistic interpretation: if $\varphi$ and $\mu$ are probability distributions of random variables $\xi$ and $\eta$ respectively, then $P_{\varphi}\mu$ is the probability distribution of the product $\xi \eta$. We introduce now definition of more general version of $P$ allowing infinite number of collisions: \begin{equation}\label{equ61} \bP:=\sum_{i=1}^\infty \alpha_i P_{\varphi_i} P_{*_i}=\sum_{i=1}^\infty \alpha_i \bP_i, \end{equation} where we have $\bP_i:=P_{\varphi_i} P_{*_i}$, $\sum\limits_{i=1}\limits^\infty \alpha_i=1$, $\alpha_i\geq 0$, $\varphi_i \in \mathcal{M}_1 $ and $m_1 (\varphi_i)=1/i$ \linebreak and the limit of the series is considered in the weak topology sense that is, $\sum\limits_{i=1}\limits^n \alpha_i P_{\varphi_i} P_{*_i}\Rightarrow \sum\limits_{i=1}\limits^\infty \alpha_i P_{\varphi_i} P_{*_i}$, which means $\sum\limits_{i=1}\limits^n \alpha_i P_{\varphi_i} P_{*_i}(f)\to \sum\limits_{i=1}\limits^\infty \alpha_i \bP_i(f)$ as $n\to \infty$, for any continuous bounded function $f$ defined on $\mathbb{R}$. From (\ref{equ61}) it follows that $\bP \mathcal{M}_1 \subset \mathcal{M}_1 $. Using (\ref{equ63}) and (\ref{equ62}) it is easy to verify that for $\mu \in D$, \begin{equation} m_1 (P_{*i} \mu)=i \quad\text{and}\quad m_1 (P_{\varphi_i} \mu) =1/i. \end{equation} Given $\mu \in \mathcal{M}_1$ the value of $\bP\mu$ can be considered as the probability distribution of a random variable $\zeta$ defined as \begin{equation}\label{s4.w1.6adodm} \zeta:=\eta_\tau (\sum_{j=1}^\tau \xi_{\tau j}), \end{equation} where we have sequences of independent random variables $\eta_i$, $\xi_{ij}$, $j=1,2,\ldots,$ and $\tau$, such that $\xi_{ij}$ have the same probability distribution $\mu$ for $j=1,2,\ldots,$, random variables $\eta_i$ have the probability distribution $\varphi_i$ and random variable $\tau$ takes values in the set $\left\{1,2,\ldots\right\}$ such that $P\left\{\tau=j\right\}=\alpha_j$. Physically this means that the number of colliding particles is random and energies of particles before a collision are of independent quantities and that a particle after collision of $i$-th particles takes the $\eta_i$ part of the sum of the energies of the colliding particles. We can also define $\tilde{\eta}_i:=i \eta_i$ and consider \begin{equation}\label{defzeta} \zeta:=\tilde{\eta}_\tau ({1\over \tau}\sum_{j=1}^\tau \xi_{ \tau j}) \end{equation} and write \begin{equation}\label{impf} \bP=\sum_{i=1}^\infty \alpha_i \tilde{P}_{\varphi_i} \tilde{P}_{*_i} \end{equation} where $\tilde{P}_{\varphi_i}={P}_{\tilde{\varphi}_i}$ with $\tilde{\varphi}_i$ being the probability distribution of $i \eta_i$, and $ \tilde{P}_{*_i}\mu$ is the probability distribution of ${\xi_{i1}+\ldots + \xi_{ii} \over i}$. Measure valued solutions to the Boltzmann-type equations with different collision operator and even in multidimensional case were studied in a number of papers see e.g. \cite{lm}, \cite{abcl} and \cite{Mor}. In the paper \cite{lm} existence and stability of measure solutions to the spatially homogeneous Boltzmann equations that have polynomial and exponential moment production properties is shown. In the paper \cite{abcl} existence and uniqueness of measure solutions to one dimensional Boltzmann dissipative equation and then their asymtotics is considered. In this paper first stationary (steady state) equation for a specific collision operator is studied and then dynamic fixed point theorem is used. Asymptotics of solutions to the Boltzmann equation with infinite energy to so called self similar solutions was studied in \cite{bc}. Asymptotic property of self similar solutions to the Boltzmann Equation for Maxwell molecules was then shown in \cite{bct}. Long time behaviour of the solutions to the nonlinear Boltzmann equation for spacially uniform freely cooling inelastic Maxwell molecules was studied in \cite{bict}. Stability of Boltzmann equation with external potential was also considered in \cite{W} and in the case of exterior problem in \cite{Y}. Solutions of the Boltzmann equation with collision of $N$ particles and their limit behaviour when $n\to \infty$ over finite time horizon were studied in \cite{ap}. In \cite{bcg}, the N-particle model, which includes multi-particle interactions was considered. It is shown that under certain natural assumptions we obtain a class of equations which can be considered as the most general Maxwell-type model. In \cite{Rudzwol} an individual based model describing phenotypic evolution in hermaphroditic populations which includes random and assortative mating of individuals is introduced. By increasing the number of individuals to infinity a nonlinear transport equation is obtained, which describes the evolution of phenotypic probability distribution. The main result of the paper is a theorem on asymptotic stability of trait (which concerns the model with more general operator $P$) with respect to Fortet-Mourier metric. Stability problems of the Boltzmann-type equation \eqref{s4.w1.11adodm2} with operator $P$ corresponding to collision of two particles was studied in the paper \cite{1 lasota traple}. The case with infinite number of particles was considered in \cite{20 lasota traple} using Zolotariev seminorm approach. Properties of stationary solutions corresponding two collision of two particles were studied in \cite{lastr}. In this paper we study stability of solutions to one dimensional Boltzmann-type equation \eqref{s4.w1.11adodm2} with operator $P$ of the form $\bP$ defined in \eqref{equ61}. We show that if this equation has a stationary solution $\mu^*$, such that its support covers $\mathbb{R}_+$, then taking into account positivity of solutions to \eqref{s4.w1.11adodm2}-\eqref{equ61} we have its asymptotical stability in Kantorovich - Wasserstein metric to $\mu^*$. We consider first stationary equation and look for fixed points of the operator $\bP$. For this purpose we adopt two methods to show the existence of fixed point of $\bP$ with the first moment equal to $1$. The first method is based on Zolotarev seminorm and in some sense simplifies the method used in \cite{20 lasota traple}. The second method is based on Kantorovich-Rubinstein maximum principle and generalizes the results of \cite{7} and then also of \cite{1 lasota traple} to the case of a random number of colliding particles. We also show several characteristics of fixed points of $\bP$. Results on the stationary equation are then used to study stability of the dynamic Boltzmann equation. The novelty of the paper is that we consider the Boltzmann-type equation \eqref{s4.w1.11adodm2} with random unbounded number of colliding particles and show its stability in Kantorovich - Wasserstein metric using probabilistic methods, generalizing former results of \cite{1 lasota traple}, \cite{20 lasota traple} and \cite{7}. To improve readability of the paper an appendix is added to the paper where some important results, which are used in the paper, are formulated and their proofs are sketched. \section{Properties of the operator $\bP$} We study first several properties $\bP$. \begin{propos} \label{P1} Operator $\bP$ transforms the set $D$ or $\bar{D}$ into itself.\linebreak It is continuous with respect to weak topology in $\mathcal{M}_{1}(\mathbb R_{+})$ i.e. whenever \linebreak $\mathcal{M}_{1}(\mathbb R_{+})\ni\mu_n\Rightarrow \mu$ we have $\bP \mu_n\Rightarrow \bP \mu$ as $n\to \infty$. Furthermore whenever $m_r(\varphi_i)=\int\limits_{\mathbb R_+} x^r \varphi_i(dx)<\infty$ and $m_r(\mu)=\int\limits_{\mathbb R_+} x^r \mu(dx)<\infty$ for $r\geq 1$, $i=1,2,\ldots$ then \begin{equation}\label{rmom} m_r(\bP\mu)\leq\sum_{i=1}^\infty \alpha_i m_r(\varphi_i) i^r m_r(\mu). \end{equation} \end{propos} \begin{proof} Note first that for each $\mu\in \mathcal{M}_{1}(\mathbb R_{+})$ we have that $P_{\varphi_i} P_{*_i}\mu \in \mathcal{M}_{1}(\mathbb R_{+})$ for $i=1,2,\ldots$ and therefore $\bP\mu \in \mathcal{M}_{1}(\mathbb R_{+})$. Moreover for $\mu\in D$ \begin{equation} m_1(P_{\varphi_i}P_{*_i}\mu)=\int_0^\infty \ldots \int_0^\infty x(y_1+y_2+\ldots+y_i)\varphi_i(dx)\mu(dy_1)\ldots\mu(dy_i)={1\over i}i=1 \end{equation} so that $P_{\varphi_i}P_{*_i}\mu\in D$ and consequently $\bP:D \mapsto D$. Similarly $\bP:\bar{D} \mapsto \bar{D}$. \linebreak For a given continuous bounded function $f:\mathbb R_{+}\to \mathbb{R}$ and $\mathcal{M}_{1}(\mathbb R_{+})\ni\mu_n\Rightarrow \mu$, as $n\to \infty$ we have \begin{eqnarray}\label{conv1} && P_{\varphi_i} P_{*_i}\mu_n(f)=\int_0^\infty \ldots \int_0^\infty f(x(y_1+y_2+\ldots+y_i))\varphi_i(dx)\mu_n(dy_1)\ldots\mu_n(dy_i)\to \nonumber \\ &&\int_0^\infty \ldots \int_0^\infty f(x(y_1+y_2+\ldots+y_i))\varphi_i(dx)\mu(dy_1)\ldots\mu(dy_i)=P_{\varphi_i} P_{*_i}\mu(f) \end{eqnarray} as $n\to \infty$. In fact, from continuity of $$ (y_1,y_2,\ldots,y_i) \to \int_0^\infty f(x(y_1+y_2+\ldots+y_i))\varphi_i(dx), $$ and weak convergence of the measures $\mu_n(dy_1)\ldots\mu_n(dy_i)\Rightarrow \mu(dy_1)\ldots\mu(dy_i)$, using \eqref{conv1} we immediately obtain that $\bP \mu_n\Rightarrow \bP \mu$ which is desired continuity property. Now using $\zeta$ defined in \eqref{defzeta}, independency of random variables, as well as convexity we obtain \begin{eqnarray} m_r(\bP\mu)&=&\int\limits_{\mathbb R_+} x^r \bP\mu(dx)=E\left[\zeta^r\right]=\sum_{i=1}^\infty \alpha_i E\left[\eta^r_i\right] i^r E\left[\left({1\over i}\sum_{j=1}^i \xi_{ \tau j}\right)^r\right] \nonumber \\ &\leq& \sum_{i=1}^\infty \alpha_i m_r(\eta_i) i^r m_r(\mu), \end{eqnarray} which completes the proof. \end{proof} We comment below the formula \eqref{rmom} \begin{rem} Note that since $m_1(\varphi_i)={1\over i}$ we have $\int\limits_{\mathbb R_+} x \varphi_i(dx)\leq \left(\int\limits_{\mathbb R_+} x^r \varphi_i(dx)\right)^{{1\over r}}$ and consequently $m_r(\varphi_i)\geq {1\over i^r}$, so that in general we may not have that $\sum\limits_{i=1}\limits^\infty \alpha_i m_r(\varphi_i) i^r < \infty$. This sum is finite however when for example we know that for a sufficiently large $i$ we have that $\alpha_i m_r(\varphi_i)\leq {1\over i^{\beta}}$ with $\beta>1+r$. Finiteness of the sum above shall play an important role in the approach to study fixed points of $\bP$ with the use of Zolotariev seminorm (see section 3). \end{rem} In what follows we are interested to find a fix point of the operator $\bP$ in the set $D$. It is clear clear $\mu=\delta_0$ is a fixed point of $\bP$ in $\bar{D}$. Typical way to find a fixed point is to consider iterations $\bP\mu$ for $\mu\in D$ and since the measures $\left\{\bP\mu,\bP^2\mu, \ldots\right\}$ are tight, expect a limit to be a fixed point. However even when such iteration converges in a weak topology, its limit will be in $\bar{D}$ and it is not clear that such limit will be a fixed point. Note furthermore that when we have more than one particle, the operator $\bP$ is nonlinear and we can not use several techniques typical for linear operators. \begin{rem} Notice that fixed point is not necessary in $D$ since the set $D$ is not closed in the weak topology. In fact, when $\mu_n\left\{{1\over n}\right\}={n\over n+1}$ and $\mu_n\left\{{n}\right\}={1\over n+1}$, then $\mu_n\in D$ and $\mu_n\Rightarrow \mu:=\delta_0$ so that total mass of $\mu$ is concentrated at $0$, and $\int_0^\infty x \mu(dx)=0$. Similary for $0< \alpha <1$, letting $\mu_n(1-\alpha)=1-{1\over n}$, $\mu_n(1)={1-\alpha \over n}$ and $\mu_n(n)={\alpha \over n}$ we clearly have that $\mu_n\in D$ and $\mu_n\Rightarrow \delta_{1-\alpha}$, as $n\to \infty$. One can show that the closure $\bar{D}$ of the set $D$ in the weak topology consists of all probability measures $\nu \in \mathcal{M}_{1}(\mathbb R_{+})$ such that $m_1(\nu)\leq 1$. Consider the following example: $P\left\{\varphi_i=0\right\}={n\over n+1}$, $P\left\{\varphi_i=(n+1){1\over i}\right\}={1\over n+1}$ and $\mu([\Delta,\infty))=1$ with $m_1(\mu)=1$, $\Delta >0$ and fixed integer $n\geq 1$. Then the support of $\bP\mu$ consists of $0$ and the second part contained in $[\Delta({n+1\over i}), \infty)$ and inductively the support of $\bP^k\mu$ contains $0$ and its second part is contained in $[\Delta({(n+1)\over i})^{k}, \infty)$, for positive integer $k$. Since $\bP^k\mu(0)={n\over n+1}$ it is clear that $\bP^k\mu\Rightarrow \delta_0$, as $k\to \infty$, so that limit of $\bP^k\mu$, which is also a fixed point of $\bP$, is not in $D$. On the other hand, when $P\left\{\varphi_i=\delta\right\}={n\over (n+1)-i\delta}$ and $P\left\{\varphi_i=(n+1){1\over i}\right\}={1-i\delta\over (n+1)-i\delta}$ with $i\delta<1$ and $\mu([\Delta,\infty))=1$ with $m_1(\mu)=1$, $\Delta >0$ we have that $\cup_k supp(\bP^k\mu)$ is dense in $[0,\infty)$. When support of $\varphi_i$ contains a sequence of positive real numbers converging to $0$, then $\cup_{k} supp(\bP^k\mu)$ is dense in $[0,\infty)$ no matter what $\mu\in D$ is chosen. Assume additionally that ${1\over i}\in supp \varphi_i$ for each $i=1,2,\ldots$. Then $supp(\bP^k\mu)\subset supp(\bP^{k+1}\mu)$, so that we have an increasing sequence of closed sets $supp(\bP^k\mu)$ which cover the interval $(0,\infty)$. \end{rem} Next two Propositions show properties of the fixed point of $\bP$. Let $\Lambda:=\left\{i: \alpha_i>0 \right\}$ \begin{propos} Let $\mu\in {D}$ be a fixed point of $\bP$. When there are at least two elements of $\Lambda$ and $\tilde{\varphi}_i=\delta_1$ for $i \in \Lambda$ then either $\mu=\delta_1$ or $m_\beta(\mu)=\infty$ for any $\beta>1$. When $\mu=\delta_1$ then $\tilde{\varphi}_i=\delta_1$ for $i \in \Lambda$. \end{propos} \begin{proof} Using \eqref{impf} we have that the law of $\tilde{\eta}_\tau({1\over \tau}\sum_{j=1}^\tau \xi_{\tau j})$, when the law of $\xi_{ij}$ is $\mu$, is also $\mu$. Then for $\beta>1$ we have \begin{equation}\label{imineq0} E\left\{\tilde{\eta}_\tau^\beta({1\over \tau}\sum_{j=1}^\tau \xi_{\tau j})^\beta\right\}=\sum_{i=1}^\infty \alpha_i m_\beta(\tilde{\varphi}_i) E\left\{({1\over i}\sum_{j=1}^i \xi_{i j})^\beta \right\} = m_\beta(\mu). \end{equation} Notice now that by strict convexity we have for $\beta>1$ that $\left({1\over i} \sum\limits_{j=1}^i \xi_{i j}\right)^\beta < {1\over i} \sum\limits_{j=1}^i \xi_{i j}^\beta$ with equality only when $\xi_{i j}=\xi_{i j'}$ for $j'\in \left\{1,2,\ldots,i\right\}$. Therefore when there are at least one element in $\Lambda$ before $i\in \Lambda$ and $\tilde{\varphi}_j=\delta_1$ for $j \in \Lambda$ we have \begin{equation}\label{imineq1} E\left\{({1\over i}\sum_{j=1}^i \tilde{\eta}_i\xi_{i j})^\beta \right\} < \sum_{j=1}^i {1\over i}E\left\{\tilde{\eta}_i^\beta\xi_{ij}^\beta\right\}=m_\beta(\tilde{\eta}_i) m_\beta(\mu)=m_\beta(\mu) \end{equation} with strict inequality whenever $m_\beta(\mu)<\infty$. Since $\xi_{i j}$ and $\xi_{i j'}$ are independent for $j'\neq j$, they should be deterministic and because $m_1(\mu)=1$ we have that $\mu=\delta_1$. When $\mu=\delta_1$ by \eqref{imineq0} and \eqref{imineq1} we have \begin{equation} E\left\{\tilde{\eta}_\tau^\beta({1\over \tau}\sum_{j=1}^\tau \xi_{\tau j})^\beta\right\}=m_\beta(\tilde{\varphi}_i) m_\beta(\mu)=m_\beta(\mu) \end{equation} which implies that $m_\beta(\tilde{\varphi}_i)=1=m_1(\tilde{\varphi}_i)$, which is again true only when $\tilde{\varphi}_i=\delta_1$. \end{proof} In the next Proposition we adopt some arguments of Proposition 2.1 of \cite{lastr} \begin{propos}\label{supp} Assume that for $1\neq k\in \Lambda$ there are $q_1, q_2\in supp \varphi_k$ such that $q_1>{1\over k}$, $q_2<{1\over k}$. Under the above assumptions, when $\mu^*$ is a fixed point of $\bP$, its support is either $\left\{0\right\}$ or $[0,\infty)$. \end{propos} \begin{proof} Notice that the support of each $\varphi_i$ contains an element not greater that ${1\over i}$. Therefore taking into account that $q_2<{1\over k}$ we have that when $0\neq r\in supp \mu^*$ then the support of $\bP \mu^*$ contains an element not greater than $$\sum\limits_{i=1, i\neq k}\limits^\infty \alpha_i r+\alpha_k q_2kr=\sum\limits_{i=1}\limits^\infty \alpha_i r+\alpha_k(q_2-{1\over k})kr=r(1-(1-kq_2)\alpha_k)).$$ Since $(1-(1-kq_2)\alpha_k)<1$ iterating the operator $\bP$ we obtain that $0\in supp \mu^*$. Therefore to show that $supp \mu^*$ in the case when $supp \mu^*\neq \left\{0\right\}$ covers whole interval $[0,\infty)$ it suffices to show that for any $r\in (0,\infty)\cap supp \mu^*$ we have that $A:=supp \left\{\bP_k^i\mu^*, \ for \ i=1,2,\ldots\right\}$ is dense in $[0,r]$. For a given $\varepsilon>0$ we can find positive integer $m$ such that $\left({1\over k}\right)^m\leq {\varepsilon \over r k q_1}$ and $(kq_2)^m\leq 1$. Then we can find positive integer $n$ such that $(kq_1)^{n-1}(kq_2)^m\leq ({1 \over k})^mkq _1\leq 1$ and $(kq_1)^{n}(kq_2)^m > 1$. Consequently we have that $q_2^m (kq_1)^n\leq {\varepsilon\over r}$ and $k^m q_2^m (kq_1)^n \geq 1$. Therefore $iq_2^m (kq_1)^nr\in A$ for $i=1,2,\ldots,k^m$, and $q_2^m (kq_1)^nr\leq \varepsilon$ while $k^mq_2^m (kq_1)^nr\geq r$. Since $\varepsilon$ can be chosen arbitrarily small and $A$ is closed we have that $[0,\infty)\subset A$, which we obtain taking into account that together with $r$ the values $kq_1^i$ are in $supp \mu^*$. \end{proof} \begin{rem} In some cases there is only one fixed point of $\bP$ which is $\delta_0\notin D$. Given probability measure $\mu$ on $\mathbb R_{+}$ consider its characteristic function $\psi(t)=\int e^{itx}\mu(dx)$. If $\mu$ is a fixed point of $\bP$ then in the case when $\Lambda=\left\{2\right\}$, $\psi$ satisfies the following function equation \begin{equation}\label{eqqq} \psi(t)=\int\limits_{\mathbb R_{+}} \left[\psi(tz)\right]^2\varphi_2(dz) \end{equation} Assume $\varphi_2={1\over 2} \delta_0 + {1\over 2} \delta_1$. Then \eqref{eqqq} takes the form \begin{equation} \psi(t)={1\over 2} + {1\over 2} \left[\psi(t)\right]^2 \end{equation} The solutions to this quadratic equation are constant functions $\psi(t)$ and since $\psi(0)=1$ the only solution which is a characteristic function is $\psi(t)\equiv 1$, which corresponds to $\mu=\delta_0$. Consequently we don't have fixed point of $\bP$ in the set $D$. \end{rem} In the space $ {\mathcal M}_1(\mathbb R_{+}) $ consider {\it Kantorovich - Wasserstein metric} (see \cite{1 hutchinson}, Definition 4.3.1 or \cite{Bogachev}) given by the formula \begin{equation}\label{subsec2:1.2} \| \mu_1 - \mu_2 \|_{\mathcal K} = \sup \{ | \mu_1(f) - \mu_2(f) | : \ f \in {\mathcal K} \} \qquad\text{for}\qquad \mu_1, \mu_2\in {\mathcal M}_{1}(\mathbb R_{+}), \end{equation} where ${\mathcal K}$ is the set of functions $ f: \mathbb R_{+} \to R $ which satisfy the condition \begin{equation*} \ | f(x) - f(y) | \le |x-y| \quad \text{\rm for} \quad x, y \in \mathbb R_{+} . \end{equation*} We recall now another, Forter-Mourier metric $\|\cdot \|_{\mathcal F}$ in $ {\mathcal M}_1(\mathbb R_{+}) $ \begin{equation} \| \mu_1 - \mu_2 \|_{\mathcal F} = \sup \{ | \mu_1(f) - \mu_2(f) | : \ f \in {\mathcal F} \} \qquad\text{for}\qquad \mu_1, \mu_2\in {\mathcal M}_{1}(\mathbb R_{+}), \end{equation} where ${\mathcal F}$ consists of functions $f$ from ${\mathcal K}$ such that $\|f\|_{sup}=\sup\limits_{x\in \mathbb R_{+}}|f(x)|\leq 1$. Almost immediately we have that $\| f \|_{\mathcal F}\leq \| f \|_{\mathcal K}$ for $\mu\in {\mathcal M}_{1}(\mathbb R_{+})$ which means that Kantorovich - Wasserstein metric is stronger than Fortet Mourier metric. Notice however that the convergence of probability measures in Fortet Mourier metric is equivalent to weak convergence (see \cite{ethier kurtz} or \cite{Bogachev}). We have \begin{lem} The operator $\bP$ is nonexpansive in $\bar{D}$ in Kantorovich - Wasserstein metric. \end{lem} \begin{proof} We are going to show that for each $i=1,2,\ldots$ the operators $\bP_i$ are nonexpansive in Kantorovich - Wasserstein metric. In fact, for $f\in {\mathcal K}$ we and $\mu\in \bar{D}$ we have \begin{equation} \bP_i\mu(f)=\int\limits_{\mathbb R_{+}} \ldots \int\limits_{\mathbb R_{+}} f(r(x_1+x_2+\ldots+x_i))\varphi_i(dr)\mu(dx_1)\mu(dx_2)\ldots \mu(dx_i). \end{equation} Define for $\nu\in \bar{D}$ \begin{eqnarray} &&\tilde{f}(x):=\int\limits_{\mathbb R_{+}} \ldots \int\limits_{\mathbb R_{+}}f(r(x+x_2+\ldots+x_i)){\varphi}_i(dr)(\mu(dx_2)\ldots \mu(dx_i)+ \nonumber \\ && \nu(dx_2)\mu(dx_3)\ldots \mu(dx_i)+\nu(dx_2)\nu(dx_3)\mu(dx_4)\ldots \mu(dx_i)+\ldots + \nonumber \\ && \nu(dx_2)\nu(dx_3)\ldots \nu(dx_i)). \end{eqnarray} Then $\tilde{f}\in {\mathcal K}$ (since $m_1({\varphi_i})={1\over i}$). Furthermore \begin{equation} \bP_i\mu(f)-\bP_i\nu(f)=\mu(\tilde{f})-\nu(\tilde{f}). \end{equation} Hence $\|\bP_i\mu - \bP_i\nu \|_{\mathcal K}\leq \|\mu-\nu\|_{\mathcal K}$ and consequently \begin{equation} \|\bP\mu-\bP\nu\|_{\mathcal K}\leq \sum_{i=1}^\infty \alpha^i \|\bP_i\mu - \bP_i\nu \|_{\mathcal K}\leq \|\mu-\nu\|_{\mathcal K}, \end{equation} which completes the proof. \end{proof} \begin{rem} One can notice that operator $\bP$ is not nonexpansive in Fortet Mourier metric. \end{rem} We also have \begin{coll}\label{cornon} Operator $\bP$ transforms $D$ into itself and is defined also as a limit of $\sum\limits_{i=1}\limits^{n} \alpha_i \bP_i$ in Kantorovich - Wasserstein metric. Furthermore $D$ is convex and closed in Kantorovich - Wasserstein metric. \end{coll} \begin{proof} Notice that for $\mu\in D$ we have $\left(\sum\limits_{i=1}\limits^n \alpha_i\right)^{-1} \sum\limits_{i=1}\limits^n \alpha_i \bP_i\mu\Rightarrow \bP\mu$ as $n\to \infty$ and $$m_1(\left(\sum_{i=1}^n \alpha_i\right)^{-1} \sum_{i=1}^n \alpha_i \bP_i\mu)=1=m_1(\bP\mu).$$ Therefore by the second part of Theorem \ref{unif} we have that $\bP\mu$ is a limit of $\sum\limits_{i=1}\limits^n \alpha_i \bP_i\mu$ in Kantorovich - Wasserstein metric. Convexity of $D$ is obvious. Closedness of $D$ in Kantorovich - Wasserstein metric follows almost immediately from the following arguments: convergence in Kantorovich - Wasserstein metric implies weak convergence and therefore the limit is a probability measure. Furthermore convergence in Kantorovich - Wasserstein metric implies convergence of the first moments. \end{proof} \section{Fixed point of $\bP$ using Zolotariev seminorm} In this section we shall introduce Zolotariev seminorm. Namely for $\mu\in \mathcal{M}_{sig}(\mathbb{R}_{+})$ and $r\in (1,2)$ define \begin{equation} \|\mu\|_r:=\sup\left\{\mu(f): f\in \mathcal{F}_r\right\} \end{equation} where $\mathcal{F}_r$ consists of differentiable functions $f: \mathbb R_{+} \to R $, which satisfy the condition \begin{equation*} \ | f'(x) - f'(y) | \le |x-y|^{r-1} \quad \text{\rm for} \quad x, y \in \mathbb R_{+} . \end{equation*} One can notice that when $f\in \mathcal{F}_r$ then also function $f(x)+\alpha+\beta x$ is in $\mathcal{F}_r$. Therefore $\|\mu\|_r=\infty$ whenever $\mu(\mathbb{R}_{+})\neq0$ or $m_1(\mu)\neq 0$. The following properties of Zolotariev seminorm will be used later on \begin{lem} Assume that \begin{equation}\label{ass1} \sum_{i=1}^\infty \alpha_i m_r(\phi_i) i^r<\infty \end{equation} then for $\mu, \nu\in D$, whenever $m_r(\mu)<\infty$ and $m_r(\nu)<\infty$ for $i=1,2,\ldots$, we have \begin{equation}\label{prop1} {1\over r}|m_r(\mu)-m_r(\nu)| \leq \|\mu-\nu\|_r\leq {1 \over r} |m_r|(\mu-\nu):={1\over r}\int\limits_{\mathbb{R}_{+}} x^r |\mu-\nu|(dx), \end{equation} \begin{equation}\label{prop2} \|P_{*_i}\mu-P_{*_i}\nu\|_r\leq i \|\mu-\nu\|_r, \end{equation} \begin{equation}\label{prop3} \| P_{\varphi_i}P_{*_i}\mu- P_{\varphi_i} P_{*_i}\nu\|_r\leq m_r(\varphi_i) i \|\mu-\nu\|_r, \end{equation} \begin{equation}\label{prop4} \|\bP\mu-\bP\nu\|_r\leq \sum_{i=1}^\infty \alpha_i m_r(\phi_i) i \|\mu-\nu\|_r. \end{equation} \end{lem} \begin{proof} When $f\in \mathcal{F}_r$ then $f(x)=f(0)+\int_0^1 x f'(tx)dt$. Therefore \begin{eqnarray} \mu(f)-\nu(f) &=& \int\limits_{\mathbb{R}_{+}}\int\limits_0\limits^1 x f'(tx)\mu(dx)-\int\limits_{\mathbb{R}_{+}}\int\limits_0\limits^1 x f'(tx)dt\nu(dx) \nonumber \\ &=& \int\limits_{\mathbb{R}_{+}}\int\limits_0\limits^1 x(f'(tx)-f'(0))dt\mu(dx)-\int_{\mathbb{R}_{+}}\int_0^1 x (f'(tx)-f'(0))dt\nu(dx) \nonumber \\ &\leq & \int\limits_{\mathbb{R}_{+}}\int\limits_0\limits^1 x|f'(tx)-f'(0)|dt |\mu(dx)-\nu(dx)| \nonumber \\ &=&\int\limits_{\mathbb{R}_{+}}\int\limits_0\limits^1 x |t x|^{r-1}dt |\mu(dx)-\nu(dx)|={1\over r} |m_r|(\mu-\nu), \end{eqnarray} which completes the proof of the second part of \eqref{prop1}. For $f(x)={1\over r}x^r$ we have that $f\in \mathcal{F}_r$ and then \begin{equation} \|\mu-\nu\|_r\geq {1\over r} |\int\limits_{\mathbb{R}_{+}} x^r\mu(dx) - \int\limits_{\mathbb{R}_{+}} x^r \nu(dx)|\geq {1\over r} |m_r(\mu)-m_r(\nu)|. \end{equation} Therefore we have \eqref{prop1}. To prove \eqref{prop2} notice that $P_{*_i}\mu-P_{*_i}\nu=\sum\limits_{j=1}\limits^i \mu_j *\mu - \sum\limits_{j=1}\limits^i \mu_j *\nu$ where $\mu_j=(P_{*_{i-j}}\mu)*(P_{*_{j-1}}\nu)$ with $P_{*_0}\mu=P_{*_0}\nu=\delta_0$. When $f\in\mathcal{F}_r$ then $\bar{f}(y)=\int \limits_{\mathbb{R}_{+}}f(x+y)\mu_j(dx)$ is in $\mathcal{F}_r$. Therefore \begin{eqnarray} \|\mu_j*(\mu-\nu)\|_r&=&\sup_{f\in {\mathcal{F}_r}}|\int\limits_{\mathbb{R}_{+}}\int\limits_{\mathbb{R}_{+}} f((x+y)\mu_j(dx)(\mu-\nu)(dy)| \nonumber \\ &=&\sup_{f\in {\mathcal{F}_r}}|\int\limits_{\mathbb{R}_{+}} \bar{f}(y)(\mu-\nu)(dy)|\leq \|\mu-\nu\|_r, \end{eqnarray} from which \eqref{prop2} easily follows. Now \begin{eqnarray} \| P_{\varphi_i}P_{*_i}\mu- P_{\varphi_i} P_{*_i}\nu\|_r &=&\sup_{f\in {\mathcal{F}_r}} \left(\int\limits_{\mathbb{R}_{+}}\int\limits_{\mathbb{R}_{+}} f(zx)\varphi_i(dz)P_{*_i}\mu(dx)- \int\limits_{\mathbb{R}_{+}}\int\limits_{\mathbb{R}_{+}} f(zx)\varphi_i(dz)P_{*_i}\nu(dx)\right) \nonumber \\ &\leq & m_r(\varphi_i)\sup_{f\in {\mathcal{F}_r}} \left( \int\limits_{\mathbb{R}_{+}} \tilde{f}(x)P_{*_i}\mu(dx)- \int\limits_{\mathbb{R}_{+}} \tilde{f}(x)P_{*_i}\nu(dx)\right) \nonumber \\ &\leq& m_r(\varphi_i) \|P_{*_i}\mu-P_{*_i}\nu\|_r, \end{eqnarray} where $\tilde{f}(x)={1\over m_r(\varphi_i)}\int\limits_{\mathbb{R}_{+}}f(zx)\varphi_i(dx)$ is an element of $\mathcal{F}_r$, provided that $f\in \mathcal{F}_r$. The estimation \eqref{prop3} follows now directly from \eqref{prop2}. From \eqref{prop3} we immediately obtain \eqref{prop4}. \end{proof} The main result of this section is the existence of a fixed point of $\bP$. We use the same assumptions as in the paper \cite{20 lasota traple}, where similar result was obtained. Our proof is different and shows another properties of the fixed point of $\bP$. It is formulated as follows \begin{thm}\label{fixedpoint} Assume that for some $r\in (1,2)$ we have $m_r(\varphi_i)< {1\over i}$ for all $i \in \Lambda$, \eqref{ass1} is satisfied and there is $\mu \in D$ such that $m_r(\mu)<\infty$. Then $\bP^n\mu$ converges in Kantorovich - Wasserstein metric to a unique $\mu^*\in D$, which is a fixed point of $\bP$ such that $m_r(\mu^*)<\infty$. \end{thm} \begin{proof} Under \eqref{ass1} using Proposition \ref{P1} we have that $m_r(\bP^n\mu)<\infty$ for $n=1,2,\ldots$. By \eqref{prop4} we have that $\|\bP^{j+1}\mu-\bP^j\mu\|_r\leq \lambda^j |\bP\mu-\mu\|_r$, where $\lambda=\sum\limits_{i=1}\limits^\infty \alpha_i m_r(\phi_i) i$. Therefore \begin{equation} \|\bP^n\mu-\mu\|_r=\|\sum\limits_{j=1}\limits^n \bP^{j}\mu-\bP^{j-1}\mu\|_r\leq \sum\limits_{j=1}\limits^n \lambda^j \|\bP\mu-\mu\|_r, \end{equation} where $\bP^0\mu=\mu$. Since by \eqref{prop1} \begin{equation} \|\bP^n\mu-\mu\|_r\geq {1\over r}|m_r(\bP^n\mu)-m_r(\mu)|, \end{equation} we therefore have that for $n=1,2,\ldots$ \begin{equation}\label{rmom} m_r(\bP^n\mu)\leq {r\over 1-\lambda}\|\bP\mu-\mu\|_r+m_r(\mu):=\kappa < \infty. \end{equation} The sequence of probability measures $\left\{\mu,\bP\mu,\bP^2\mu,\ldots\right\}$ is tight and therefore there is $\mu^*$ and subsequence $(n_k)$ such that $\bP^{n_k}\mu\Rightarrow \mu^*$ as $k\to \infty$. By \eqref{rmom} and Corollary \ref{impcor} we have that $\|\bP^{n_k}\mu -\mu^*\|_{\mathcal{K}}\to 0$ as $k\to \infty$. Consequently $\mu^*\in D$. Since for $K>0$ \begin{equation} \int\limits_{\mathbb{R}_{+}}(x^r\wedge K)\bP^{n_k}\mu(dx)\leq m_r(\bP^{n_k}\mu)\leq \kappa \end{equation} and therefore letting $k\to \infty$ we have that $\int_{\mathbb{R}_{+}}(x^r\wedge K)\mu^*(dx)\leq \kappa$, which by Fatou lemma gives that $m_r(\mu^*)\leq \kappa$. Now \begin{equation}\label{rozw1} \|\bP^{n_{k}}\bP\mu-\bP^{n_{k}}\mu\|_{\mathcal{K}}=\|\bP\bP^{n_{k}}\mu-\bP^{n_{k}}\mu\|_{\mathcal{K}}\to \|\bP\mu^*-\mu^*\|_{\mathcal{K}}:=\beta \end{equation} and \begin{eqnarray}\label{rozw2} &&\|\bP \bP \mu^* - \bP\mu^*\|_{\mathcal{K}}=\lim_{k\to \infty} \|\bP \bP \bP^{n_{k}}\mu - \bP\bP^{n_{k}}\mu\|_{\mathcal{K}}\geq \nonumber \\ &&\lim_{k\to \infty} \|\bP \bP^{n_{k+1}}\mu - \bP^{n_{k+1}}\mu\|_{\mathcal{K}}=\|\bP\mu^*-\mu^*\|_{\mathcal{K}}=\beta, \end{eqnarray} so that taking into account that $\|\bP \bP \mu^* - \bP\mu^*\|_{\mathcal{K}}\leq \|\bP\mu^*-\mu^*\|_{\mathcal{K}}$ we obtain that $\|\bP \bP \mu^* - \bP\mu^*\|_{\mathcal{K}} = \|\bP\mu^*-\mu^*\|_{\mathcal{K}}$. By small modification of \eqref{rozw1} and \eqref{rozw2} we obtain that for any $n=0,1,\ldots$ \begin{equation} \|\bP^{n+1} \mu^* - \bP^n\mu^*\|_{\mathcal{K}} = \|\bP\mu^*-\mu^*\|_{\mathcal{K}}. \end{equation} On the other hand by \eqref{prop4} we have that $\|\bP^{n+1} \mu^* - \bP^n\mu^*\|_r\leq \lambda^n \|\bP\mu^*-\mu^*\|_r$. Therefore $\lim\limits_{n\to \infty}\|\bP^{n+1} \mu^* - \bP^n\mu^*\|_r=0$. By Theorem \ref{Rio} we also have that $\lim\limits_{n\to \infty}\|\bP^{n+1} \mu^* - \bP^n\mu^*\|_{\mathcal{K}}=0$. Therefore $\|\bP\mu^*-\mu^*\|_{\mathcal{K}}=0$ and $\mu^*$ is a fixed point of $\bP$. If there is another weak limit $\nu^*$ of subsequence of $\bP^n\mu$, then $\|\bP^n\mu^*-\bP^n\nu^*\|_r\to 0$, as $n\to \infty$ and by Theorem \ref{Rio} again we have that $\|\mu^*-\nu^*\|_{\mathcal{K}}=\lim\limits_{n\to \infty}\|\bP^{n+1} \mu^* - \bP^n\mu^*\|_{\mathcal{K}}=0$. Consequently any weak limit of a subsequence of $\bP^n\mu$ is equal to $\mu^*$, which means that $\bP^n\mu\Rightarrow \mu^*$ and the convergence also holds in Kantorovich - Wasserstein metric. \end{proof} \begin{rem} We may not exclude the case in which we have another fixed point $\nu^*\in D$ of $\bP$ such that $m_r(\nu^*)=\infty$ for each $r>1$. Notice furthermore that in the case when $\varphi_i$ is uniformly distributed over the interval $[0,{1\over i}]$ then we have $m_r(\varphi_i)={1\over r+1} \left({2\over i}\right)^r<{1\over i}$ for all $r\in (1,2)$ and $i\in \Lambda\setminus\left\{1\right\}$, so that if \eqref{ass1} is satisfied and $1\notin \Lambda$, by the above theorem we have existence of a unique fixed point of $\bP$ in $D$ with finite $r$-th moment. \end{rem} \section{Contraction property of the operator $\bP$} We have the following generalization of Theorem 5.2.2 of \cite{7} \begin{thm}\label{thm71} Assume for some $i\in \Lambda$ we have that $0$ is an accumulation point of $\varphi_i$, where $m_1(\varphi_i)={1\over i}$. Then for $\mu,\nu\in {D}$ such that $\mu\neq \nu$ and \begin{equation}\label{equ75} supp(P_{*(i-1)}(\mu+\nu))=\mathbb{R}_+ \end{equation} we have \begin{equation} \|\bP_i \mu-\bP_i \nu\|_{\mathcal{K}} < \|\mu-\nu\|_{{\mathcal{K}}} \end{equation} and consequently \begin{equation} \|\bP \mu-\bP \nu\|_{\mathcal{K}} < \|\mu-\nu\|_{\mathcal{K}}. \end{equation} \end{thm} \begin{proof} Recall that $\bP_i={P}_{\varphi_i} {P}_{*_i}$ and assume that $\|\bP_i \mu-\bP_i \nu\|_{\mathcal{K}} = \|\mu-\nu\|_{\mathcal{K}}$. By Theorem \ref{A2} there is $f_0\in \mathcal{K}$ such that \begin{equation}\label{=} \|\bP_i \mu-\bP_i \nu\|_{\mathcal{K}}=\langle f_0,\bP_i \mu-\bP_i \nu\rangle. \end{equation} Then \begin{eqnarray} &&\|\mu-\nu\|_{{\mathcal{K}}}=\int_{{\mathbb{R}_+}^{i+1}}f_0((x_1+x_2+\ldots+x_i)r){\varphi}_i(dr) \nonumber \\ && \left[\mu(dx_1)\mu(dx_2)\ldots \mu(dx_i)-\nu(dx_1)\nu(dx_2)\ldots \nu(dx_i)\right]=\langle f_1,\mu-\nu \rangle, \end{eqnarray} where \begin{eqnarray} &&f_1(x)=\int_{{\mathbb{R}_+}^{i}}f_0((x+x_2+\ldots+x_i)r){\varphi}_i(dr) \left[\mu(dx_2)\ldots\mu(dx_i)+ \right. \nonumber \\ && \nu(dx_2)\mu(dx_3)\ldots\mu(dx_i)+\nu(dx_2)\nu(dx_3)\mu(dx_4)\ldots \mu(dx_i)+ \ldots + \nonumber \\ &&\left. \nu(dx_2)\nu(dx_3)\nu(dx_4)\ldots \mu(dx_i) + \nu(dx_2)\nu(dx_3)\nu(dx_4)\ldots \nu(dx_i)\right]. \end{eqnarray} Clearly, using again the fact that $m_1(\varphi_i)={1\over i}$ we have that ${f}_1\in \mathcal{K}$. By Theorem \ref{A2} there are two points $x_1, x_2 \in \mathbb{R}_+$ such that $x_1<x_2$ and $|{f}_1(x_2)-{f}_1(x_1)|=x_2-x_1$. Since ${f}_1$ is nonexpansive (Lipschitz with constant less or equal to $1$) we have that ${f}_1(x)=\theta x+\sigma$, for $x\in (x_1,x_2)$ with $|\theta|=1$. Therefore $|{f}_1(x_1+\varepsilon)-{f}_1(x_1)|=\varepsilon$ for $\varepsilon\in(0,x_2-x_1)$. Replacing $f_0$ by $-f_0$ we may assume that ${f}_1(x_1+\varepsilon)-{f}_1(x_1)=\varepsilon$ for $\varepsilon\in(0,x_2-x_1)$. We are going now to show that for $x\in \mathbb{R}_+$ \begin{equation}\label{id} f_0(x)=x+c \end{equation} with a constant $c\in \mathbb{R}$. Consider now $u_1,u_2 \in \mathbb{R}_+$ such that $u_1<u_2$. We want to show that then \begin{equation}\label{contr1} f_0(u_2)-f_0(u_1)\geq u_2-u_1, \end{equation} which by nonexpansiveness of $f_0$ implies that $f_0(u_2)-f_0(u_1)=u_2-u_1$ and therefore $f_0$ is of the form \eqref{id}. Assume conversely that $f_0(u_2)-f_0(u_1)<u_2-u_1$. Since $f_0$ as a Lipschitzian mapping is almost everywhere differentiable there is $\bar{u}\in (u_1,u_2)$ such that $f_0'(\bar{u})<1$ and for $\delta\in (0,\delta_0)$ we have \begin{equation} {f_0(\bar{u}+\delta)-f_0(\bar{u}) \over \delta} <1. \end{equation} Define \begin{equation}\label{equal0} h(y_2,\ldots,y_i,r,\varepsilon)={f_0((x_1+\varepsilon+y_2+\ldots+y_i)r)-f_0((x_1+y_2+\ldots+y_i)r) \over \varepsilon r}. \end{equation} By definition of $f_1$ for $\varepsilon\in (0,x_2-x_1)$ we have \begin{eqnarray}\label{equal1} && 1={{f}_1(x_1+\varepsilon) - {f}_1(x_1) \over \ve}= \int_{(\mathbb{R}_+)^{i-1}} \int_{\mathbb{R}_+} h(y_2,\ldots,y_i,r,\ve)r {\phi}_i(dr) \left[\mu(dy_2) \right. \nonumber \\ &&\ldots\mu(dy_i)+ \nu(dy_2)\mu(dy_3)\ldots\mu(dy_i)+\nu(dy_2)\nu(dy_3)\mu(dy_4)\ldots \mu(dy_i)+ \nonumber \\ && \left. \ldots +\nu(dy_2)\nu(dy_3)\nu(dy_4)\ldots \mu(dy_i) + \nu(dy_2)\nu(dy_3)\nu(dy_4)\ldots \nu(dy_i)\right]:= \nonumber \\ &&\int_{(\mathbb{R}_+)^{i}} h(y_2,\ldots,y_i,r,\ve)q(dy_2,\ldots,dy_i,dr), \end{eqnarray} where we define implicitly probability measure $q$. Since $0$ is an accumulation point of $supp \phi$ there is $\bar{r}\in supp {\phi}$ such that $x_1 \bar{r} < \bar{u}$. Then there is $(\bar{y}_2,\bar{y}_3,\ldots,\bar{y}_i)\in supp(P_{*(i-1)}(\mu+\nu))$ such that \begin{equation}\label{equal2} \bar{u}-x_1\bar{r}=(\bar{y}_2+\bar{y}_3+\ldots,\bar{y}_i)\bar{r}. \end{equation} Consequently for every $\bar{\ve}\in (0,x_2-x_1)$ such that $\bar{\ve}\bar{r}<\delta_0$ we have \begin{equation} h(\bar{y}_2+\bar{y}_3+\ldots,\bar{y}_i,\bar{r},\bar{\ve})={f_0(\bar{u}+\bar{\ve} \bar{r})-f_0(\bar{u}) \over \bar{\ve}\bar{r}}<1. \end{equation} Since $h\leq 1$ by continuity of $h$ and full support of $P_{*(i-1)}(\mu+\nu)$ we have that \begin{equation} \int_{(\mathbb{R}_+)^{i}} h(y_2,\ldots,y_i,r,\ve)q(dy_2,\ldots,dy_i,dr)<1, \end{equation} a contradiction to \eqref{equal1}. Therefore we have equality in \eqref{contr1}. Consequently $f_0(x)=x+c$ for a constant $c$. Since $\bP_i\mu$ and $\bP_i\nu\in D$ we therefore have $\langle f_0,\bP_i\mu-\bP_i\nu\rangle=m_1(\bP_i\mu)-m_1(\bP_i\nu)=0$ and by \eqref{=} $$\|\bP_i\mu-\bP_i\nu\|_{\mathcal{K}}=\|\mu-\nu\|_{\mathcal{K}}=0,$$ which contradicts the fact that $\mu\neq \nu$. \end{proof} \begin{rem} Condition $supp(P_{*(i-1)}(\mu+\nu))=\mathbb{R}_+$ is not very restrictive. It holds in particular when $supp(\mu+\nu)=\mathbb{R}_+$ or when $supp \mu=\mathbb{R}_+$. \end{rem} We have the following consequences of Theorem \ref{thm71} \begin{coll}\label{c1} If for some $i \in \Lambda$ we have that $0$ is an accumulation point of $\phi_i$ and $\mu^*$ is a weak accumulation point of $\bP^n\mu$ for $\mu \in {D}$ i.e. there is a sequence $(n_k)$ such that $\bP^{n_k}\mu\Rightarrow \mu^*$, as $k\to \infty$ and $supp \mu^*=\mathbb{R}_+$, $\mu^*\in D$ then $\mu^*$ is a fixed point of $\bP$. \end{coll} \begin{proof} Notice that $m_1(\bP^{n_k}\mu)=m_1(\mu^*)$ so that by Theorem \ref{unif} we have that $\|\bP^{n_k}\mu-\mu^*\|_{\mathcal{K}}\to 0$ as $k\to \infty$. Therefore \begin{equation} \|\bP^{n_{k+1}}\bP\mu-\bP^{n_{k+1}}\mu\|_{\mathcal{K}}\leq \|\bP^{n_{k}+1}\bP\mu-\bP^{n_{k}+1}\mu\|_{\mathcal{K}}\leq \|\bP^{n_{k}}\bP\mu-\bP^{n_{k}}\mu\|_{\mathcal{K}} \leq \|\bP\mu-\mu\|_{\mathcal{K}}, \end{equation} so that there is a limit $\lim_{k\to \infty} \|\bP^{n_{k}}\bP\mu-\bP^{n_{k}}\mu\|_{\mathcal{F}_0}:=\beta$ and by continuity \begin{equation} \|\bP^{n_{k}}\bP\mu-\bP^{n_{k}}\mu\|_{\mathcal{K}}=\|\bP\bP^{n_{k}}\mu-\bP^{n_{k}}\mu\|_{\mathcal{K}}\to \|\bP\mu^*-\mu^*\|_{\mathcal{K}}=\beta. \end{equation} If $\bP\mu^*\neq \mu^*$ and assumptions of Corollary are satisfied then by Theorem \ref{thm71} we have that $\|\bP \bP \mu^* - \bP\mu^*\|_{\mathcal{K}}<\|\bP\mu^*-\mu^*\|_{\mathcal{K}}=\beta$, while \begin{eqnarray} &&\|\bP \bP \mu^* - \bP\mu^*\|_{\mathcal{K}}=\lim_{k\to \infty} \|\bP \bP \bP^{n_{k}}\mu - \bP\bP^{n_{k}}\mu\|_{\mathcal{K}}\geq \nonumber \\ &&\lim_{k\to \infty} \|\bP \bP^{n_{k+1}}\mu - \bP^{n_{k+1}}\mu\|_{\mathcal{K}}=\|\bP\mu^*-\mu^*\|_{\mathcal{K}}=\beta \end{eqnarray} and we have a contradiction. Therefore $\bP\mu^* = \mu^*$. \end{proof} \begin{coll}\label{c2} If for some $i \in \Lambda$ we have that $0$ is an accumulation point of $\phi_i$ and $\mu^*\in {D}$ is a fixed point of $\bP$, then there are no other fixed point $\nu^*\in {D}$. Consequently for $\nu\in D$ we have that any weakly convergent subsequence $\bP^{n_k} \nu$ either converges to $\mu^*$, as $n\to \infty$, or to a measure $\tilde{\mu}\in \bar{D}\setminus D$. \end{coll} \begin{proof} By Proposition \ref{supp} since $\mu^*\in {D}$ we have that $supp \mu^*= \mathbb{R}_+$. Then we use again Theorem \ref{thm71}. Namely when $\nu^*\in {D}$ is another fixed point of $\bP$ we have \begin{equation} \|\mu^*-\nu^*\|_{\mathcal{K}}=\|\bP\mu^*-\bP\nu^*\|_{\mathcal{K}}<\|\mu^*-\nu^*\|_{\mathcal{K}}, \end{equation} which is a contradiction. Any sequence $(\bP^n\nu)$ is compact in Fortet Mourier metric and its subsequence converges to a measure $\tilde{\mu}$, and the convergence is also in Kantorovich Wasserstein metric whenever $m_1(\mu)=1$. In the last case we have $\tilde{\mu}=\mu^*$ by uniqueness of fixed point of $\bP$ in $D$. \end{proof} Using Theorem \ref{contr*} we can now strengthen the last Corollary. \begin{coll}\label{thm72} Assume there is $i\in \Lambda$ such that $0$ be an accumulation point of $\varphi_i$. Assume that $\mu^*\in {D}$ is a fix point of $\bP$. Then for $\nu\in {D}$ for any limit of weakly convergent subsequence $\bP^{n_k} \nu$ which belongs to $D$ is also in Kantorovich Wasserstein metric. Furthermore if the sequence $n\to \bP^n\nu$ is uniformly integrable we have \begin{equation}\label{conv*} \lim_{n\to \infty}\|\bP^n \nu-\mu^*\|_{\mathcal{K}}=0. \end{equation} \end{coll} \begin{proof} As above using Proposition \ref{supp}, since $\mu^*\in {D}$, we have that $supp \mu^*= \mathbb{R}_+$. The first part easily follows from Corollary \ref{c2}. It remains to notice only that uniform integrability of $n\to \bP^n\nu$ together with weak compactness implies compactness of $n\to \bP^n\nu$ in Kantorovich Wasserstein metric. Since any subsequence converges to the same limit in Kantorovich Wasserstein metric we have \eqref{conv*}. \end{proof} \section{Asymptotic stability of the nonlinear Boltzmann-type equation } Consider now the following equation in the space of signed measures \begin{equation}\label{Bo1} \frac{d\psi}{dt} + \psi = \bP\,\psi \qquad\text{for}\qquad t \geq 0 \end{equation} with initial condition \begin{equation}\label{Bo2} \psi\,(0) = \psi_{0}, \end{equation} where $\psi_0\in \mathcal{M}_1(\mathbb{R}_{+})$ and $ \psi : \mathbb{R}_{+} \rightarrow \mathcal{M}_{sig}(\mathbb{R}_{+})$. In this section we show that the equation (\ref{s4.w1.11adodm2}) may by considered in a convex closed subset $D$ of a vector space of signed measures $\mathcal{M}_{sig}(\mathbb{R}_{+})$. This approach seems to be quite natural and it is related to the classical results concerning the semigroups and differential equations on convex subsets of Banach spaces (see \cite{crandall}, \cite{20 lasota traple}). For details see Appendix. We finish the paper with sufficient conditions for the asymptotic stability of solutions of the equation \eqref{Bo1} with respect to Kantorovich--Wasserstein metric. Equation \eqref{Bo1} together with the initial condition \eqref{Bo2} may be considered in a convex subset ${D}$ of the vector space of finite signed measures $\mathcal{M}_{sig}$. \begin{coll}\label{cola} Assume $\varphi_i \in \mathcal{M}_1, i\in \{1, 2, ...,\} $ is such that $m_1(\varphi_i)={1\over i}$, $\bP$ is given by \eqref{equ61} with $\sum\limits_{i=1}\limits^\infty \alpha_i=1$, $\alpha_i\geq 0$. Then for every $ \psi_{0} \in D$ there exists a unique solution $\psi$ of problem \eqref{Bo1}, \eqref{Bo2} taking values in ${D}$. \end{coll} The solutions of \eqref{Bo1}, \eqref{Bo2} generate a semigroup of Markov operators $(P^{t})_{t\geq 0}$ on D given by \begin{equation}\label{s4.1a19} {P}^{t}\,u_{0} = u(t) \qquad\text{for}\qquad t\in {\mathbb R}_{+},\qquad u_{0}\in {D}. \end{equation} Now using Theorem \ref{thm71} we can easily derive the following result \begin{thm}\label{thm72} Let $\bP$ be operator given by \eqref{equ61}. Moreover, let $(\varphi_1,\varphi_2,...)$ be a sequence of probability measures such that $m_1(\varphi)={1\over i}$ and $\alpha_i\geq 0$ be a sequence of nonnegative numbers such that $\sum\limits_{i=1}\limits^\infty \alpha_i=1$. Assume that $0$ is an accumulation point of $supp\varphi_i$ for some $i\in \Lambda$. If $\bP$ has a fixed point $\mu^* \in D $ then \begin{equation}\label{s4.1a20} \lim_{t\to\infty} ||\psi (t)- \mu^*||_{\mathcal{K}} = 0 \end{equation} for every sequentially compact (in Kantorovich Wasserstein metric) solution $ \psi $ of \eqref{Bo1}, \eqref{Bo2}. \end{thm} \begin{proof} First we have to prove that $(P^{t})_{t\geq 0}$ is nonexpansive on $D$ with respect to Kantorovich-Wasserstein metric. For this purpose let $\eta_{0}, \vartheta_{0} \in D$ be given. For $t\in {\mathbb R}_{+}$ define \begin{equation} \upsilon(t) = P^{t}\,\eta_{0} - P^{t}\,\vartheta_{0}. \end{equation} Using \eqref{s4.1a18}, Corollary \ref{cornon} and \eqref{s4.1a19} it is easy to see that \begin{equation} v(t)= e^{-t} v_0 + \int\limits_{0}\limits^{t} e^{-(t-s)} (\bP(P^s \eta _0) - \bP(P^s \vartheta_0) ) ds \quad\textnormal{for} \quad t\in \mathbb{R}_+. \end{equation} From Corollary \ref{cola} it follows immediately that \begin{equation} ||v(t)||_{\mathcal{K}} \leq e^{-t} ||v(0)||_{\mathcal{K}} + \int\limits_{0}\limits^{t} e^{-(t-s)} ||v(s)||_{\mathcal{K}} ds \quad\textnormal{for} \quad t\in \mathbb{R}_+. \end{equation} Consequently \begin{equation} f(t) \leq ||v(0)||_{\mathcal{K}} + \int\limits_{0}\limits^{t} f(s) ds \quad\textnormal{for} \quad t\in \mathbb{R}_+, \end{equation} where $f(t)=e^t ||v(t)||_{\mathcal{K}} $. From the Gronwall inequality it follows that \begin{equation} f(t) \leq e^{t} ||v(0)||_{\mathcal{K}} \end{equation} This is equivalent to the fact that $(P^t)_{\geq t}$ is nonexpansive on $D$ with respect to Kantorovich - Wasserstein metric. Notice that $\mu^*$ as a fixed point of $\bP$ is a stationary solution to the equation \eqref{Bo1} i.e. $P^t \mu^*=\mu^*$. To complete the proof it is sufficient to verify condition \eqref{strict} of Theorem \ref{contr*}. From (\ref{s4.1a18}) and Proposition \ref{supp} and Theorem \ref{thm71} it follows immediately that for $\psi_{0}\in D$ and $t > 0$ \begin{eqnarray}\label{rown} &&\|P^{t}\psi_{0} - \mu^*\|_{\mathcal{K}} \leq e^{-t}\,\|\,\psi_{0} - \mu^*\|_{\mathcal{K}} + \int\limits_{0}\limits^{t}\,e^{-(t-s)}\,\|\bP P^s\psi_{0} - \bP\mu^*\|_{\mathcal{K}}\, ds < \nonumber \\ &&e^{-t}\,\|\,\psi_{0} - \mu^*\|_{\mathcal{K}} + (1 - e^{-t})\,\|P^{s}\psi_{0} - \mu^*\|_{\mathcal{K}}\leq \|\,\psi_{0} - \mu^*\|_{\mathcal{K}}. \end{eqnarray} By Theorem \ref{contr*} we immediately obtain \eqref{s4.1a20}. \end{proof} We shall now study nonlinear Boltzmann equation \eqref{Bo1} using Zolotariev seminorm following the results of \cite{20 lasota traple}. Consider time discretized version of \eqref{Bo1} with discretization step $h\in (0,1)$ \begin{equation}\label{Bo3} \frac{d\psi_{h}}{dt}(d_h(t)) + \psi_{h}(d_h(t)) = \bP\,\psi_{h} (d_h(t)) \qquad\text{for}\qquad t \geq 0 \end{equation} with initial condition \begin{equation}\label{Bo4} \psi_h\,(0) = \psi_{0}, \end{equation} where $\psi_0\in \mathcal{M}_1(\mathbb{R}_{+})$ and $d_h(t)=nh$ for $t\in [nh,(n+1)h)$. Then \begin{equation} \psi_h((n+1)h)=(1-h)\psi_h(nh)+h \bP\psi_h(nh):=\bP_h\psi_h(nh). \end{equation} Notice that fixed point of the operator $\bP$ is also a fixed point of $\bP_h$ and vice versa. We have \begin{lem} Under assumptions of Theorem \ref{fixedpoint} we have that \begin{itemize} \item[(i)] {when $m_r(\mu)<\infty$ then $m_r(\bP_h^n \mu)<\infty$ for any positive integer $n$ and $\mu\in \mathcal{M}_{1}(\mathbb R_{+})$}, \item[(ii)] {$\|\bP_h \mu - \bP_h \nu\|_r\leq \lambda_h \|mu-\nu\|_r$ for $\mu,\nu\in \mathcal{M}_{1}(\mathbb R_{+})$ such that $m_r(\mu)<\infty$, $m_r(\nu)<\infty$ with $\lambda_h=1-h(1-\lambda)$, where $\lambda=\sum\limits_{i=1}\limits^\infty \alpha_i m_r(\varphi_i) i$}, \item[(iii)] {$\|\bP_h^n\mu-\mu^*\|_{\mathcal{K}} \leq 2^{1+{1\over r}}\lambda_h^{{n\over r}} \left(\|\mu-\mu^*\|_r\right)^{1\over r}\leq K $ with $\mu^*\in D$ being the unique fixed point of $\bP$ and $K={1\over r} 2^{1+{1\over r}}(m_r(\mu)+m_r(\mu^*))$.} \end{itemize} \end{lem} \begin{proof} Under \eqref{ass1} using Proposition \ref{P1} we have that $m_r(\bP\mu)<\infty$ and therefore also $m_r(\bP_h \mu)<\infty$. Then (i) follows by induction. (ii) follows directly from the definition of $\bP$ and \eqref{prop4}. For fixed point $\mu^*$ of $\bP$, which is also a fixed point of $\bP_h$ and $\mu\in \mathcal{M}_{1}(\mathbb R_{+})$ such that $m_r(\mu)<\infty$ we have \begin{equation} \|\bP_h^n \mu\|_r \leq \lambda_h^n\|\mu-\nu\|_r. \end{equation} Therefore using Theorem \ref{Rio} and then \eqref{prop1} we obtain \begin{equation} \|\bP_h^n \mu - \mu^*\|_{\mathcal{K}}\leq 2^{1+{1\over r}} \lambda_h^{n\over r} (\|\mu-\mu^*\|_{\mathcal{K}})^{1\over r}\leq K \end{equation} with $K={1\over r} 2^{1+{1\over r}}(m_r(\mu)+m_r(\mu^*))$. \end{proof} We recall now Lemma 3 of \cite{20 lasota traple}. \begin{lem}\label{Las1} Under the assumptions of Theorem \ref{fixedpoint}, when $\mu=\psi_0$ is such that $m_r(\mu)<\infty$, we have \begin{equation} \|\psi_h(t)-\psi(t)\|_{\mathcal{K}}\leq 4K h (e^{2t}-1). \end{equation} \end{lem} We can now formulate \begin{thm} Under assumptions of Theorem \ref{fixedpoint} when $\mu=\psi_0$ is such that $m_r(\mu)<\infty$ we have that \begin{equation} \|\psi(t)-\mu^*\|_{\mathcal{K}}\leq K e^{-{t\over r}(1-\lambda)}. \end{equation} \end{thm} \begin{proof} We follow the arguments of the proof of Theorem 1 of \cite{20 lasota traple}. Fix $t>0$ and for a given $\varepsilon>0$ find positive integer $n$ such that ${t\over n}4K(e^{2t}-1)\leq \varepsilon$ Then by Lemma \ref{Las1} \begin{equation}\label{Las2} \|\psi(t)-\mu^*\|_{\mathcal{K}}\leq \|\psi(t)-\psi_h(t)\|_{\mathcal{K}} + \|\psi_h(t)-\mu^*\|_{\mathcal{K}}\leq \varepsilon + K(1-{t\over n}(1-\lambda))^{n\over r}. \end{equation} Since $1-x\leq e^{-x}$ for $x\geq 0$ we have that $(1-{t\over n}(1-\lambda))^{n\over r}\leq e^{-{t\over r}(1-\lambda)}$ and the claim follows from \eqref{Las2} taking into account that $\varepsilon$ could be chosen arbitrarily small. \end{proof} \section{Appendix} On a given complete metric space $(E,\rho)$ consider a continuous operator $T$ or continuous semigroup $(T_t)$, for $t\geq 0$ transforming $(E,\rho)$ into itself. Denote by $\omega(x)$ the set of all limiting points of the trajectory $n\to T^nx$ or $t\to T_tx$ respectively. We say that $n\to T^nx$ or $t\to T_tx$ is sequentially compact if from every sequence $T^{n_k}x$, $T_{t_{n_k}}x$ respectively, one could choose a convergent subsequence. Let ${\mathcal Z}$ be the set of all $x$ such that the trajectory $t \to T_tx$ ($n\to T^nx$) is sequentially compact. We shall assume that ${\mathcal Z}$ is a nonempty set and let $\Omega=\bigcup\limits_{\mu\in {\mathcal Z}} \omega(\mu)$. We have the following result formulated for semigroup $T_t$, which naturally holds for continuous operator $T$ \begin{thm} \label{contr*}(see Theorem 5.1.2 of \cite{7}) Assume that $T_t$ is nonexpansive i.e. \begin{equation} \rho(T_tx,T_ty)\leq \rho(x,y) \end{equation} for $t\geq 0$ and there is $x^*\in \Omega$ such that for every $x\in \Omega$, $x\neq x^*$ there is $t(x)$ such that \begin{equation}\label{strict} \rho(T_{t(x)}x,T_{t(x)}x^*)<\rho(x,x^*). \end{equation} Then for $z\in {\mathcal Z}$ we have \begin{equation} \lim_{t\to \infty}\rho(T_tz,x^*)=0. \end{equation} \end{thm} \begin{defin}\label{uninte} Sequence of probability measures $\mu_n$ defined on $\mathbb{R}_+$ is uniformly integrable when \begin{equation}\label{unint1} \sup_n \int\limits_M\limits^\infty x \mu_n(dx) \to 0, \end{equation} whenever $M\to \infty$. \end{defin} \begin{thm}\label{unif} Assume that for sequence of probability measures $\mu_n$ defined on $\mathbb{R}_+$ we have that $m_1(\mu_n)<\infty$ and $\mu_n\Rightarrow \mu$, as $n\to \infty$. Then $m_1(\mu_n)\to m_1(\mu)$ if and only if measures $\mu_n$ are uniformly integrable. Furthermore for sequence of probability measures $\mu_n$ defined on $\mathbb{R}_+$ such that $m_1(\mu_n)<\infty$ and we have that convergence $\mu_n\Rightarrow \mu$, as $n\to \infty$, together with convergence of $m_1(\mu_n)\to m_1(\mu)$ is equivalent to convergence $\|\mu_n-\mu\|_{\mathcal{K}}\to 0$. \end{thm} \begin{proof} By Skorokhod theorem (25.6 of \cite{bil}) there is a probability space $(\Omega,F,P)$ and nonnegative random variables $X_n$, $X$ with laws $\mu_n$ and $\mu$ respectively such that $X_n(\omega)\to X(\omega)$ for each $\omega \in \Omega$. Uniform integrability of $\mu_n$ is equivalent to uniform integrability of $X_n$. By Theorem II T21 of \cite{Meyer} uniform integrability of $X_n$ is equivalent to the convergence $m_1(\mu_n)\to m_1(\mu)$. To prove the last statement of Theorem notice that when $\|\mu_n-\mu\|_{\mathcal{K}}\to 0$ we have also $\|\mu_n-\mu\|_{\mathcal{F}}\to 0$, so that $\mu_n\Rightarrow \mu$ and $m_1(\mu_n)\to m_1(\mu)$. Assume now that $\mu_n\Rightarrow \mu$, as $n\to \infty$ and $m_1(\mu_n)\to m_1(\mu)$. Then $X_n$ defined above converges to $X$ in $L^1(P)$ norm. In particular for any function $f$ with Lipschitz constant not greater than $1$ we have \begin{equation} |\mu_n(f)-\mu(f)|=|E\left[f(X_n)\right]-E\left[f(X)\right]|\leq E\left[|f(X_n)-f(X)|\right]\leq E|X_n-X|\to 0 \end{equation} as $n\to \infty$, which means that we have also convergence in $\|\cdot\|_{\mathcal{K}}$ norm, which completes the proof. \end{proof} \begin{rem} The result above in not unexpected. For given measure $\mu\in D$ define $\bar{\mu}(A):=\int\limits_A x\mu(dx)$ for Borel measurable set $A$. Then compactness of the closure of the sequence $\left\{\bar{\mu}_n\in D\right\}$ is by Theorem 6.2 of \cite{1 billingsley} equivalent to the tightness of measures $\left\{\bar{\mu}_n\right\}$, which is equivalent to \eqref{unint1}. \end{rem} \begin{coll}\label{impcor} Whenever $D\ni \mu_n\Rightarrow \mu$ and $\sup_n m_\beta(\mu_n)<\infty$ for some $\beta>1$ we have \linebreak $\|\mu_n-\mu\|_{\mathcal{K}}\to 0$ as $n\to \infty$. \end{coll} \begin{proof} It is clear that \begin{equation} \sup_n \int\limits_M\limits^\infty x \mu_n(dx)\leq \sup_n \Big(\int\limits_0\limits^\infty x^\beta \mu_n(dx)\Big)^{1\over \beta} \Big(\int\limits_0\limits^\infty 1_{x\geq M}\mu_n(dx)\Big)^{\beta-1\over \beta}. \end{equation} Now $\int\limits_0\limits^\infty 1_{x\geq M}\mu_n(dx)\leq {1\over M}$, so that $\mu_n$ is uniformly integrable and it remains to use Theorem \ref{unif}.\linebreak \end{proof} Before we formulate next theorem we define metric $d_r$ in the space of probability measures defined on $\mathbb{R}_+$ with finite $r$-th moments, where $r\in [1,2)$. Namely for probability measures $\mu$ and $\nu$ such that $m_r(\mu)<\infty$ and $m_r(\nu)<\infty$ let \begin{equation} d_r(\mu,\nu):=\inf\left\{\left(E(|X-Y|^r)\right)^{1\over r}\right\}, \end{equation} where infimum taken over probability measures $P$ on $\mathbb{R}_+^2$ such that their marginals are $\mu$ and $\nu$ respectively. We have \begin{thm}\label{Rio} For $\mu,\nu\in D$ such that $m_r(\mu)<\infty$ and $m_r(\nu)<\infty$ with $r\in (1,2)$ we have \begin{equation}\label{Rioin} \|\mu-\nu\|_{\mathcal{K}}\leq 2(2\|\mu-\nu\|_r)^{1\over r}. \end{equation} \end{thm} \begin{proof} By \cite{Rio} we have that $d_r(\mu,\nu)\leq 2(2\|\mu-\nu\|_r)^{1\over r}$. Clearly $d_1(\mu,\nu)\leq d_r(\mu,\nu)$. Since by Theorem 20.1 of \cite{Dudley} $d_1(\mu,\nu)=\|\mu-\nu\|_{\mathcal{K}}$ we obtain \eqref{Rioin}. \end{proof} We recall now Kantorovich-Rubinstein maximum principle for our metric $\|\cdot\|_{\mathcal{K}}$, see Corollary 6.2 of \cite{2 rachev} \begin{thm}\label{A2} For probability measures $\mu,\nu$ defined on $\mathbb{R}_+$ there exists $f_0\in {\mathcal{K}}$ such that \begin{equation}\label{equality} \|\mu-\nu\|_{\mathcal{K}}=\langle f_0,\mu-\nu\rangle. \end{equation} Moreover when $f_0\in {\mathcal{K}}$ satisfies \eqref{equality} for measures $\mu\neq \nu$ defined on $\mathbb{R}_+$ then there are two different points $x_1,x_2\in \mathbb{R}_+$ such that $|f_0(x_1)-f_0(x_2)|=|x_1-x_2|$. \end{thm} Finally we recall now some known results related with ordinary differential equations in Banach spaces. For details see \cite{crandall}. Let $( E, \|\cdot\|)$ be a Banach space and let $\tilde{D}$ be a closed, convex, nonempty subset of $E$. In the space $E$ we consider {\it an evolutionary differential equation}\index{evolutionary differential equation} \begin{equation}\label{s4.1a15} \frac{du}{dt}= - u + \tilde{P}\,u\qquad\text{for}\qquad t\in {\mathbb R}_{+} \end{equation} with the initial condition \begin{equation}\label{s4.1a16} u(0)=u_{0}, \qquad u_{0}\in \tilde{D}, \end{equation} where $\tilde{P} : \tilde{D} \to \tilde D$ is a given operator. Function $u: {\mathbb R}_{+} \to E$ is called {\it a solution to the problem} (\ref{s4.1a15}), (\ref{s4.1a16}) if it is strongly differentiable on ${\mathbb R}_{+}$, $u(t)\in \tilde{D}$ for all $t\in {\mathbb R}_{+}$ and $u$ satisfies relations (\ref{s4.1a15}), (\ref{s4.1a16}). We have \begin{thm}\label{sec4.twierdzenie1.3} Assume that the operator $\tilde{P}: \tilde{D}\to \tilde{D}$ satisfies Lipschitz condition \begin{equation}\label{s4.1a17} \|\tilde{P}\,v - \tilde{P}\,w\| \leq l\,\|v - w\|\qquad\textnormal{for}\qquad u, w\in \tilde{D}, \end{equation} where $l$ is a nonnegative constant. Then for every $u_{0}\in \tilde{D}$ there exists a unique solution $u$ to the problem (\ref{s4.1a15}), (\ref{s4.1a16}). \end{thm} The standard proof of Theorem \ref{sec4.twierdzenie1.3} is based on the fact, that function $u: \mathbb {R}_{+}\to \tilde{D}$ is a solution to (\ref{s4.1a15}), (\ref{s4.1a16}) if and only if it is continuous and satisfies the integral equation \begin{equation}\label{s4.1a18} u(t) = e^{-t}\,u_{0} + \int\limits_{0}\limits^{t}\,e^{-(t-s)}\,\tilde{P}\,u(s)\,ds \qquad\text{for}\qquad t\in {\mathbb R}_{+}. \end{equation} Due to completeness of $\tilde{D}$, the integral on the right hand side is well defined and equation (\ref{s4.1a18}) may be solved by the method of successive approximations. Observe that, thanks to the properties of $\tilde{D}$, for every $u_{0}\in \tilde{D}$ and for every continuous function $ u : {\mathbb {R}}_{+} \to \tilde{D}$ the right hand side of (\ref{s4.1a18}) is also a function with values in $\tilde{D}$. The solutions of (\ref{s4.1a18}) generate a semigroup of operators $(\tilde{P}^{\ t})_{t\geq 0}$ on $\tilde{D}$ given by the formula \begin{equation}\label{s4.1a19} \tilde{P}^{\ t}\,u_{0} = u(t) \qquad\text{for}\qquad t\in {\mathbb R}_{+},\qquad u_{0}\in \tilde{D}. \end{equation} \addcontentsline{toc}{section}{References} \begin{thebibliography}{00} \bibitem{abcl} R. Alonso, V. Bagland, Y. Cheng and B. Lods, \emph{One-Dimensional Dissipative Boltzmann Equation: Measure Solutions, Cooling Rate, and Self-Similar Profile}, SIAM J. Math. Anal. 50:1 (2018), 1278--1321. \bibitem{ap} I. Ampatzoglou and N. Pavlovic, \emph{A Rigorous Derivation of a Ternary Boltzmann Equation for a Classical System of Particles}, arXiv:1903.04279v2. \bibitem{bil} H. P. Billingsley, \emph{Probability and Measure\/}, John Wiley, New York, 1986. \bibitem{1 billingsley} H. P. Billingsley, \emph{Convergence of Probability Measures\/}, John Wiley, New York, 1968. \bibitem{bict} M. Bisi, J.A. Carillo and G. Toscani, \emph{Decay Rates in Probability Metrics Towards Homogeneous Cooling States for the Inelastic Maxwell Model}, Journal of Statistical Physics 124 (2006), 625--653. \bibitem{bob} A.V. Bobylev, A. V. 1976. \emph{Exact solutions of the Boltzmann equations}, Sov. Phys. Dokl. 20 (1976), 822--824. \bibitem{bc} A. V. Bobylev and C. Cercignani, \emph{Self-Similar Solutions of the Boltzman Equation and Their Applications}, Journal of Statistical Physics 106 (2002), 1039--1071. \bibitem{bcg} A.V. Bobylev, C. Cercignani and I. M. Gamba, \emph{On the self-similar asymptotics for generalized non-linear kinetic Maxwell models}, Commun. Mathematical Physics 291(2009), 599--644. \bibitem{bct} A.V. Bobylev, C. Cercignani and G. Toscani, \emph{Proof of an Asymptotic Property of Self-Similar Solutions of the Boltzmann Equation for Granular Materials}, Journal of Statistical Physics 111 (2003), 403--417. \bibitem{Bogachev} V.I. Bogachev and A.V. Koleshnikov, \emph{\em The Monge-Kantorovich problem: achievements, connections, and perspectives \/}, Russian Math. Surveys 67:5 (2012), 3--110. \bibitem{crandall} M. G. Crandall, \emph{\em Differential equations on convex sets\/}, J. Math. Soc. Japan {22} (1970), 443--455. \bibitem{Dudley} R. M. Dudley, \emph{Probabilities and Metrics\/}, Aarhaus Universitet, Aarhaus 1976. \bibitem{ethier kurtz} S. Ethier and T. Kurtz, Markov Processes, \emph{Characterization and Convergence\/}, John Wiley and Sons, New York 1986. \bibitem{7} H. Gacki, {\em Applications of the Kantorovich--Rubinstein maximum principle in the theory of Markov semigroups\/}, Dissertationes Math., 448 (2007), 1--59. \bibitem{1 hutchinson} J. Hutchinson, {\em Fractals and self--similarity\/}, Indiana Univ. J. 30 (1981), 713--747. \bibitem{Y} X. Jia, \emph{Exterior problem for the Boltzmann equation with temperature difference: asymptotic stability of steady solutions}, J. Differential Equations 262 (2017), 3642--3688. \bibitem{LM} A. Lasota and M.C. Mackey, \emph{Chaos, Fractals and Noise. Stochastic Aspects of Dynamics}, Springer, 1994. \bibitem{1 lasota traple} A. Lasota and J. Traple, \emph{Kantorovich--Rubinstein maximum principle in the theory of the Tjon--Wu equation}, J.~Differential Equations {159} (1999), 578--596. \bibitem{20 lasota traple} A. Lasota and J. Traple, \emph{Asymptotic stability of differential equations on convex sets}, J. Dynam. Differential Equations {15} (2003), 335--355. \bibitem{lastr} A. Lasota and J. Traple, \emph{Properties of stationary solutions of a generalized Tjon--Wu equation}, J.~Math. Anal. Appl. {335} (2007), 669 -- 682. \bibitem{Mor} Y. Morimoto, S. Wang and T. Yang, \emph{Measure Valued Solutions to the Spatially Homogeneous Boltzmann Equation Without Angular Cutoff}, J Stat Phys (2016) 165:866--906. \bibitem{lm} X. Lu and C. Mouchot, \emph{On Measure Solutions of the Boltzmann equation, Part I: Moment Production and Stability Estimates}, arXiv: 1102.0373v2. \bibitem{Meyer} P. A. Meyer, \emph{Probability and Potentials}, Blaidell, 1966. \bibitem{2 rachev} S. T. Rachev,\emph{ Probability Metrics and the Stability of Stochastic Models}, Wiley, New York, 1991. \bibitem{Rio} E. Rio, \emph{Distances minimales et distances id\'eales}, C. R. Acad. Sci. Paris 326, S\'erie I, Probabilit\'es, (1998) 1127--1130. \bibitem{Rudzwol} R. Rudnicki and P. Zwolenski, \emph{Model of phenotypic evolution in hermaphroditic populations}, J. Math. Biol. 70 (2015), 1295--1321. \bibitem{tjon wu} J. A. Tjon and T. T. Wu, \emph{ Numerical aspects of the approach to a Maxwellian equation}, Phys. Rev. A {19} (1979), 883--888. \bibitem{W} G. Wang, Y. Wang, \emph{Global stability of Boltzmann equation with large external potential for a class of large oscillation data}, Journal of Differential Equations, 267 (2019), 3610--3645. \end{thebibliography} \end{document} Bobylev, A. V. 1976. Exact solutions of the Boltzmann equations, Sov. Phys. Dokl., 20:822-824. \bibitem{ghhhu} I.M. Gamba; {J.R. Haack} and {C.D.Hauck} and J.Hu; \emph{A fast spectral method for the Boltzmann collision operator with general collision kernels}. SIAM J. Sci. Comp, Vol.39,No. 4 (2017), B658-B674. X. Jia, Exterior problem for the Boltzmann equation with temperature difference: asymptotic stability of steady solutions, J. Differential Equations 262 (2017), 36423688 Y. Morimoto, S. Wang and T. Yang, \emph{Measure Valued Solutions to the Spatially Homogeneous Boltzmann Equation Without Angular Cutoff}, J Stat Phys (2016) 165:866906
2205.14655v1
http://arxiv.org/abs/2205.14655v1
Network Decoding
\documentclass[11pt,a4paper]{article} \usepackage{cite} \usepackage{tikz}\usetikzlibrary{matrix,decorations.pathreplacing,positioning} \usepackage{hyperref} \usepackage{amsmath,amsthm,amssymb,bm,relsize} \usepackage{setspace} \setstretch{1.03} \usepackage{multirow} \usepackage{array} \newcolumntype{x}[1]{>{\centering\arraybackslash\hspace{0pt}}p{#1}} \usepackage{wasysym} \usepackage{caption} \usepackage{subcaption} \usepackage{enumitem} \setitemize{itemsep=-1pt} \setenumerate{itemsep=-1pt} \usepackage{titling} \setlength{\droptitle}{-1.4cm} \usepackage[margin=2.6cm]{geometry} \usepackage{pgfplots} \pgfplotsset{width=10cm,compat=1.9} \definecolor{myg}{RGB}{220,220,220} \theoremstyle{definition} \newtheorem{theorem}{Theorem}[section] \newtheorem{corollary}[theorem]{Corollary} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{conjecture}[theorem]{Conjecture} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \usepackage{etoolbox}\AtEndEnvironment{example}{\null\hfill$\blacktriangle$} \newtheorem{notation}[theorem]{Notation} \newtheorem{remark}[theorem]{Remark} \newtheorem{remarks}[theorem]{Remarks} \newtheorem{theoremapp}{Theorem}[section] \newtheorem{corollaryapp}[theoremapp]{Corollary} \newtheorem{propositionapp}[theoremapp]{Proposition} \newtheorem{lemmaapp}[theoremapp]{Lemma} \newtheorem{definitionapp}[theoremapp]{Definition} \newtheorem{claim}{Claim} \renewcommand*{\theclaim}{\Alph{claim}} \newcommand*{\myproofname}{Proof of the claim} \newenvironment{clproof}[1][\myproofname]{\begin{proof}[#1]\renewcommand*{\qedsymbol}{\(\clubsuit\)}}{\end{proof}} \usepackage{caption} \newcommand\mdoubleplus{\mathbin{+\mkern-10mu+}} \newcommand{\numberset}{\mathbb} \newcommand{\N}{\numberset{N}} \newcommand{\Z}{\numberset{Z}} \newcommand{\Q}{\numberset{Q}} \newcommand{\R}{\numberset{R}} \newcommand{\C}{\numberset{C}} \newcommand{\K}{\numberset{K}} \newcommand{\F}{\numberset{F}} \newcommand{\Mat}{\mbox{Mat}} \newcommand{\mS}{\mathcal{S}} \newcommand{\mC}{\mathcal{C}} \newcommand{\mP}{\mathcal{P}} \newcommand{\mG}{\mathcal{G}} \newcommand{\mL}{\mathcal{L}} \newcommand{\mA}{\mathcal{A}} \newcommand{\mN}{\mathcal{N}} \newcommand{\mB}{\mathcal{B}} \newcommand{\mF}{\mathcal{F}} \newcommand{\mM}{\mathcal{M}} \newcommand{\mD}{\mathcal{D}} \newcommand{\mU}{\mathcal{U}} \newcommand{\mQ}{\mathcal{Q}} \newcommand{\mX}{\mathcal{X}} \newcommand{\mY}{\mathcal{Y}} \newcommand{\dH}{d^\textnormal{H}} \newcommand{\sh}{d_\textnormal{Sh}} \newcommand{\dstar}{d_\star} \newcommand{\sT}{\{T\}} \newcommand{\st}{\, : \,} \newcommand{\hdH}{d_{\hat{\textnormal{H}}}} \newcommand{\hde}{\hat{d}_\textnormal{e}} \newcommand{\we}{\omega_\textnormal{e}} \newcommand{\mE}{\mathcal{E}} \renewcommand{\mG}{\mathcal{G}} \newcommand{\mV}{\mathcal{V}} \newcommand{\mR}{\mathcal{R}} \newcommand{\mJ}{\mathcal{J}} \newcommand{\gm}{\gamma^{\textnormal{mc}}} \newcommand{\mZ}{\mathcal{Z}} \newcommand{\mK}{\mathcal{K}} \newcommand{\mH}{\mathcal{H}} \newcommand{\inn}{\textnormal{in}} \newcommand{\out}{\textnormal{out}} \newcommand{\Id}{\textnormal{Id}} \newcommand{\rest}{\textnormal{cp}} \newcommand{\Enc}{\textnormal{Enc}} \newcommand{\Dec}{\textnormal{Dec}} \newcommand{\lin}{\textnormal{lin}} \newcommand{\CA}{\textnormal{C}_1} \newcommand{\CC}{\textnormal{C}} \newcommand{\CAz}{\textnormal{C}_0} \newcommand{\CAzr}{\textnormal{C}_{0,\rest}} \newcommand{\reg}{\mR_1} \newcommand{\regz}{\mR_0} \newcommand{\regzr}{\mR_{0,\rest}} \newcommand{\HH}{\textnormal{H}} \newcommand{\adv}{\textnormal{\textbf{A}}} \newcommand{\dto}{\dashrightarrow} \newcommand{\ALB}[1]{\textcolor{red}{#1}} \newcommand{\ALLISON}[1]{\textcolor{blue}{#1}} \newcommand{\ABK}[1]{\textcolor{cyan}{#1}} \newcommand{\red}[1]{\textcolor{red}{#1}} \newcommand{\blue}[1]{\textcolor{blue}{#1}} \newcommand{\bd}[1]{{\bf #1}} \newcommand{\bfS}{\bf S} \newcommand{\bfT}{\bf T} \newcommand{\mincut}{\textnormal{min-cut}} \newcommand{\vertprod}{\renewcommand{\arraystretch}{0.3}\begin{array}{c}\times\\\raisebox{1ex}{\vdots}\\\times\end{array}} \newcommand{\concat}{\RHD} \newcommand\HP[3]{\HH_{#1,#2}\langle #3 \rangle} \newcommand\restHP[3]{\overline{\HH}_{#1,#2}\langle #3 \rangle} \newcommand\HB[3]{\HH_{\bm{#1},\bm{#2}}\langle \bm{#3} \rangle} \newcommand\AP[3]{\adv_{#1,#2}\langle #3 \rangle} \newcommand\AB[3]{\adv_{\bm{#1},\bm{#2}}\langle \bm{#3} \rangle} \newcommand\HHB[2]{\HH_{\bm{#1}}\langle \bm{#2} \rangle} \newcommand\HHP[2]{\HH_{#1}\langle #2 \rangle} \newlength{\mynodespace} \setlength{\mynodespace}{6.5em} \newcommand{\degin}{\partial^-} \newcommand{\degout}{\partial^+} \newtheorem{family}{Family} \renewcommand*{\thefamily}{\Alph{family}} \title{\textbf{\huge Network Decoding}} \usepackage{authblk} \author[1]{Allison Beemer} \affil[1]{Department of Mathematics, University of Wisconsin-Eau Claire, U.S.A.} \author[2]{Altan B. K\i l\i\c{c}\thanks{A. B. K. is supported by the Dutch Research Council through grant VI.Vidi.203.045.}} \author[3]{Alberto Ravagnani\thanks{A. R. is supported by the Dutch Research Council through grants VI.Vidi.203.045, OCENW.KLEIN.539, and by the Royal Academy of Arts and Sciences of the Netherlands.}} \affil[2,3]{Department of Mathematics and Computer Science, Eindhoven University of Technology, the Netherlands} \date{} \begin{document} \maketitle \begin{abstract} We consider the problem of error control in a coded, multicast network, focusing on the scenario where the errors can occur only on a \textit{proper subset} of the network edges. We model this problem via an adversarial noise, presenting a formal framework and a series of techniques to obtain upper and lower bounds on the network's (1-shot) capacity, improving on the best currently known results. In particular, we show that traditional cut-set bounds are not tight in general in the presence of a restricted adversary, and that the non-tightness of these is caused precisely by the restrictions imposed on the noise (and not, as one may expect, by the alphabet size). We also show that, in sharp contrast with the typical situation within network coding, capacity cannot be achieved in general by combining linear network coding with end-to-end channel coding, not even when the underlying network has a single source and a single terminal. We finally illustrate how network \textit{decoding} techniques are necessary to achieve capacity in the scenarios we examine, exhibiting capacity-achieving schemes and lower bounds for various classes of networks.\unskip\parfillskip 0pt \par \end{abstract} \bigskip \section*{Introduction} Global propagation of interconnected devices and the ubiquity of communication demands in unsecured settings signify the importance of a unified understanding of the limits of communications in networks. The correction of errors modeled by an adversarial noise has been studied in a number of previous works, with results ranging from those concerning network capacity to specific code design (see~\cite{YC06,CY06, YY07, byzantine, M07, YNY07, YYZ08,MANIAC, RK18,kosut14,Zhang,nutmanlangberg} among many others). In this paper, we focus on the effects of a small, and yet crucial, modification of previous models, where a malicious actor can access and alter transmissions across a \textit{proper} subset of edges within a network. We show that not only does this modification disrupt the sharpness of known results on network capacity, but that more specialized network coding (in fact, network \textit{decoding}) becomes necessary to achieve the capacity. We adopt the setting of a communication network whose inputs are drawn from a finite alphabet and whose internal (hereafter referred to as intermediate) nodes may process incoming information before forwarding (this is known as network \textit{coding}; see e.g.~\cite{ahlswede,linearNC,koettermedard}). We phrase our work in terms of adversarial noise, but we note that our treatment truly addresses \textit{worst-case} errors, also providing guarantees in networks where there may be random noise, or a combination of random and adversarial noise. We assume that the adversary is omniscient in the sense that they may design their attacks given full knowledge of the network topology, of the symbols sent along all its edges, and of the operations performed at the intermediate nodes. Again, in contrast to most previous work in the area, we concentrate on networks with noise occurring only on a proper subset of the network edges: for example, an adversary who may see all transmitted symbols, but has limited access to edges in the network when actively corrupting symbols. We examine the {1-shot capacity} of adversarial networks with these restricted adversaries, which roughly measures the number of alphabet symbols that can be sent with zero error during a single transmission round. The case of multi-shot capacity, where more than one transmission round is considered, is also interesting, but will involve a separate treatment and different techniques from our work here. Compellingly, the simple act of restricting possible error locations fundamentally alters the problem of computing the 1-shot capacity, as well as the manner in which this capacity may be achieved. This is discussed in further detail below. This paper expands upon (and disrupts) the groundwork laid in \cite{RK18}, where a combinatorial framework for adversarial networks and a generalized method for porting point-to-point coding-theoretic results to the network setting are established. The work in \cite{RK18} makes a start on addressing the case of restricted adversaries, also unifying the best-known upper bounds on~1-shot capacity for such networks. These upper bounds fall under the category of cut-set bounds, where an edge-cut is taken between network source(s) and terminal(s) and a bound on capacity is derived from an induced channel that takes only the cut-set edges into account. We note that Cai and Yeung previously gave generalizations of the Hamming, Singleton, and Gibert-Varshamov bounds to the network setting using edge-cuts in \cite{YC06,CY06}. The work in \cite{RK18} allows for any point-to-point bound to be ported via an edge-cut. In the case of random noise in a single-source, single-terminal network, it is well-known that the cut-set upper bound on capacity given by the Max-Flow Min-Cut Theorem may be achieved simply by forwarding information at the intermediate network nodes; see e.g.~\cite{elgamalkim}. It has also been shown that in the scenarios of multiple terminals or where a malicious adversary may choose among \textit{all} network edges, cut-set bounds may be achieved via \textit{linear} operations~(over the relevant finite field) at intermediate nodes (combined with rank-metric codes in the presence of a malicious adversary); see~\cite{SKK,MANIAC,epfl2}. In prior, preliminary work in this area \cite{beemer2021curious}, we demonstrated a minimal example of a network, which we called the Diamond Network, with a restricted adversary, where both (1) the Singleton cut-set bound (the best cut-set bound for this network) cannot be achieved, and (2) the actual 1-shot capacity cannot be achieved using linear operations at intermediate nodes. In fact, this example of network requires network \textit{decoding} in order to achieve capacity. Via an extension of the Diamond Network, termed the {Mirrored Diamond Network}, we found that it is possible with restricted adversaries for the 1-shot capacity to meet the Singleton cut-set bound, but still be forced to use non-linear network codes to achieve this rate. Generally speaking, the act of limiting the location of adversarial action is enough to put an end to the tightness of the best currently known cut-set bounds, and also to destroy the ability to achieve capacity with linear network codes in combination with end-to-end coding/decoding. This paper is the first stepping stone aimed at understanding the effects of restricting an adversary to a proper subset of the networks' edges, and how to compute~(1-shot) capacities of such adversarial networks. The paper is organized as follows. Sections \ref{sec:motiv} and \ref{sec:channel} introduce our problem set-up and notation. Section \ref{sec:diamond} is devoted to the Diamond Network mentioned above, which gives the smallest example illustrating our results. In Subsection \ref{sec:info} we present an information-theoretic view of our problem for the case of random rather than adversarial noise, in order to generate further intuition. To address the more general problem at hand, we will later in the paper~(in Section \ref{sec:double-cut-bd}) present a technique showing that the capacity of any network is upper bounded by the capacity of an induced, much less complicated network. The induced network utilized borrows from the original idea of cut-sets, but instead of studying the information flow through a single edge-cut, it considers the information flow between \textit{two} edge-cuts. We call the result we obtain as an application of this idea the Double-Cut-Set Bound. The less-complicated networks induced by the bound's statement are introduced in Section \ref{sec:net-2-and-3} and the collection of families we study throughout the remainder of the paper are introduced in Subsection \ref{sec:families}. In Section \ref{sec:upper}, we propose new combinatorial techniques to obtain upper bounds for the 1-shot capacities of these families, and in Section \ref{sec:2level_lower}, we establish lower bounds. Section \ref{sec:linear} shows that there is a strong separation between linear and non-linear capacities, solidifying the necessity of network decoding in this setting. Finally, Section \ref{sec:open} is devoted to conclusions, a discussion of open problems, and possible future works. \section{Problem Statement and Motivation} \label{sec:motiv} We focus on the typical scenario studied within the context of network coding; see~\cite{ahlswede,CY06,YC06,randomHo,linearNC,KK1,koettermedard,epfl1,SKK,WSK,YY07,YNY07,YangYeung2,jaggi2005,RK18} among many others. Namely, one source of information attempts to transmit information packets to a collection of terminals through a network of intermediate nodes. The packets are elements of a finite alphabet $\mA$ and the network is acyclic and delay-free. We are interested in solving the multicast problem (that is, each terminal demands all packets) under the assumption that an omniscient adversary acts on the network edges according to some restrictions. These concepts will be formalized later in the paper; see Section~\ref{sec:channel}. The goal of this section is to illustrate the motivation behind our paper through an example. Consider the single-source, two-terminal network $\mN$ depicted in Figure \ref{fig:ie1}. We want to compute the number of alphabet packets that the source $S$ can transmit to the terminal $T$ in a single transmission round, called the \textit{1-shot capacity}. \begin{figure}[h!] \centering \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace of S1] (K) {}; \node[nnode,above=0.6\mynodespace of K] (V1) {$V_1$}; \node[nnode,below=0.6\mynodespace of K] (V2) {$V_2$}; \node[nnode,right=\mynodespace of K] (V3) {$V_3$}; \node[nnode,right=\mynodespace of V3] (V4) {$V_4$}; \node[vertex,right=3\mynodespace of V1 ] (T1) {$T_1$}; \node[vertex,right=3\mynodespace of V2] (T2) {$T_2$}; \draw[edge,bend left=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_1$} (V1); \draw[edge,bend right=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_2$} (V1); \draw[edge,bend left=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_3$} (V2); \draw[edge,bend right=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_4$} (V2); \draw[edge,bend left=0] (V1) to node[fill=white, inner sep=3pt]{\small $e_6$} (V3); \draw[edge,bend left=0] (V4) to node[fill=white, inner sep=3pt]{\small $e_{10}$} (T1); \draw[edge,bend left=0] (V4) to node[fill=white, inner sep=3pt]{\small $e_{11}$} (T2); \draw[edge,bend left=0] (V1) to node[fill=white, inner sep=3pt]{\small $e_{5}$} (T1); \draw[edge,bend left=0] (V2) to node[fill=white, inner sep=3pt]{\small $e_{8}$} (T2); \draw[edge,bend left=0] (V2) to node[fill=white, inner sep=3pt]{\small $e_7$} (V3); \draw[edge,bend left=0] (V3) to node[fill=white, inner sep=3pt]{\small $e_{9}$} (V4); \end{tikzpicture} \caption{An example of a network.\label{fig:ie1}} \end{figure} If no adversary acts on the network, then the traditional cut-set bounds are sharp, if the network alphabet is sufficiently large; see~\cite{ahlswede,linearNC,koettermedard}. Since the (edge) min-cut between $S$ and any $T \in \{T_1,T_2\}$ is~2, no more than 2 packets can be sent in a single transmission round. Furthermore, a strategy that achieves this bound is obtained by routing packet~1 across paths $e_1 \to e_5$ and $e_4 \to e_8$, and packet 2 across paths $e_2 \to e_6 \to e_9 \to e_{10}$ and $e_3 \to e_7 \to e_9 \to e_{11}$. This paper focuses on an adversarial model. That is, a malicious ``outside actor'' can corrupt up to $t$ information packets sent via the edges. Note that the term adversary has no cryptographic meaning in our setting and it simply models the situation where \textit{any} pattern of~$t$ errors needs to be corrected. Now suppose that the network $\mN$ is vulnerable, with an adversary able to change the value of up to $t=1$ of the network edges. Figure \ref{fig:ie2} represents the same network as Figure~\ref{fig:ie1}, but with vulnerable (dashed) edges. This scenario has been extensively investigated within network coding with possibly multiple terminals and multiple sources; see for instance~\cite{MANIAC,RK18,YC06,CY06,SKK,KK1}. In particular, it follows from the Network Singleton Bound of \cite{CY06,YC06,RK18} that the network has capacity 0, meaning that the largest unambiguous code has size 1 (the terminology will be formalized later). \begin{figure}[h!] \centering \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace of S1] (K) {}; \node[nnode,above=0.6\mynodespace of K] (V1) {$V_1$}; \node[nnode,below=0.6\mynodespace of K] (V2) {$V_2$}; \node[nnode,right=\mynodespace of K] (V3) {$V_3$}; \node[nnode,right=\mynodespace of V3] (V4) {$V_4$}; \node[vertex,right=3\mynodespace of V1 ] (T1) {$T_1$}; \node[vertex,right=3\mynodespace of V2] (T2) {$T_2$}; \draw[ddedge,bend left=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_1$} (V1); \draw[ddedge,bend right=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_2$} (V1); \draw[ddedge,bend left=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_3$} (V2); \draw[ddedge,bend right=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_4$} (V2); \draw[ddedge,bend left=0] (V1) to node[fill=white, inner sep=3pt]{\small $e_6$} (V3); \draw[ddedge,bend left=0] (V4) to node[fill=white, inner sep=3pt]{\small $e_{10}$} (T1); \draw[ddedge,bend left=0] (V4) to node[fill=white, inner sep=3pt]{\small $e_{11}$} (T2); \draw[ddedge,bend left=0] (V1) to node[fill=white, inner sep=3pt]{\small $e_{5}$} (T1); \draw[ddedge,bend left=0] (V2) to node[fill=white, inner sep=3pt]{\small $e_{8}$} (T2); \draw[ddedge,bend left=0] (V2) to node[fill=white, inner sep=3pt]{\small $e_7$} (V3); \draw[ddedge,bend left=0] (V3) to node[fill=white, inner sep=3pt]{\small $e_{9}$} (V4); \end{tikzpicture} \caption{\label{fig:ie2} The network of Figure \ref{fig:ie1}, where all edges are vulnerable (dashed).} \end{figure} We recall that in the case of a network with multiple sources, multiple terminals, and an adversary able to corrupt up to $t$ of the network edges, the capacity region was computed in~\cite{MANIAC,RK18,epfl2}. In the scenario just described, a capacity-achieving scheme can be obtained by combining linear network coding with rank-metric end-to-end coding. We will comment on this again in Theorem \ref{thm:mcm}. \begin{figure}[h!] \centering \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace of S1] (K) {}; \node[nnode,above=0.6\mynodespace of K] (V1) {$V_1$}; \node[nnode,below=0.6\mynodespace of K] (V2) {$V_2$}; \node[nnode,right=\mynodespace of K] (V3) {$V_3$}; \node[nnode,right=\mynodespace of V3] (V4) {$V_4$}; \node[vertex,right=3\mynodespace of V1 ] (T1) {$T_1$}; \node[vertex,right=3\mynodespace of V2] (T2) {$T_2$}; \draw[ddedge,bend left=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_1$} (V1); \draw[ddedge,bend right=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_2$} (V1); \draw[ddedge,bend left=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_3$} (V2); \draw[ddedge,bend right=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_4$} (V2); \draw[ddedge,bend left=0] (V1) to node[fill=white, inner sep=3pt]{\small $e_6$} (V3); \draw[edge,bend left=0] (V4) to node[fill=white, inner sep=3pt]{\small $e_{10}$} (T1); \draw[edge,bend left=0] (V4) to node[fill=white, inner sep=3pt]{\small $e_{11}$} (T2); \draw[edge,bend left=0] (V1) to node[fill=white, inner sep=3pt]{\small $e_{5}$} (T1); \draw[edge,bend left=0] (V2) to node[fill=white, inner sep=3pt]{\small $e_{8}$} (T2); \draw[ddedge,bend left=0] (V2) to node[fill=white, inner sep=3pt]{\small $e_7$} (V3); \draw[ddedge,bend left=0] (V3) to node[fill=white, inner sep=3pt]{\small $e_{9}$} (V4); \end{tikzpicture} \caption{{The network of Figure \ref{fig:ie1}, where only the dashed edges are vulnerable.}\label{fig:introex1}} \end{figure} We now turn to the scenario that motivates this paper. The network remains vulnerable, but this time the adversary can only corrupt at most one of the \textit{dashed} edges in Figure~\ref{fig:introex1}; the solid edges are \textit{not} vulnerable. Our main question is the same (what is the largest number of alphabet packets that can be transmitted?), but the answer is less obvious/known than before. By restricting the adversary to operate on a proper subset of the edges, one expects the capacity to increase with respect to the ``unrestricted'' situation. Therefore a natural question is whether the rate of one packet in a single channel use can be achieved. As we will show in Theorem~\ref{computC}, this is not possible: instead, the partially vulnerable network has capacity $\log_{|\mA|} (|\mA|-1)$, where~$\mA$ denotes the network alphabet. As we will see, capacity can be achieved by making nodes $V_1$, $V_2$ and $V_3$ partially \textit{decode} received information (which explains the title of our paper). This is in sharp contrast with the case of an unrestricted adversary, where capacity can be achieved with end-to-end encoding/decoding. The goal of this paper is to develop the theoretical framework needed to study networks that are partially vulnerable to adversarial noise, comparing results and strategies with those currently available in contexts that are to date much better understood. \section{Channels and Networks} \label{sec:channel} In this section we include some preliminary definitions and results that will be needed throughout the entire paper. This will also allow us to establish the notation and to state the problems we will investigate in rigorous mathematical language. This section is divided into two subsections. The first is devoted to arbitrary~(adversarial) channels, while in the second we focus our attention on communication networks and their capacities. \subsection{Adversarial Channels} \label{subsect:channels} In our treatment, we will use the definition of (adversarial) channels proposed in~\cite{RK18}, based on the notion of~\textit{fan-out sets}. This concept essentially dates back to Shannon's fundamental paper on the zero-error capacity of a channel~\cite{shannon_zero}. Under this approach, a channel is fully specified by the collection of symbols $y$ that can be received when a given symbol $x$ is transmitted. Considering transition probabilities does not make sense in this model since the noise is assumed to be of an ``adversarial'' nature. The latter assumption conveniently models all those scenarios where \textit{any} error pattern of a given type must be corrected and no positive probability of unsuccessful decoding is tolerated. This is the scenario considered (often implicitly) in standard network coding references such as~\cite{SKK,YC06,CY06,KK1} among many others. We will briefly consider a probabilistic regime in Subsection~\ref{sec:info} with the goal of forming intuition about certain information theory phenomena we will observe. \begin{definition} \label{dd1} A (\textbf{discrete}, \textbf{adversarial}) \textbf{channel} is a map $\Omega: \mX \to 2^{\mY} \setminus \{\emptyset\}$, where~$\mX$ and~$\mY$ are finite non-empty sets called the \textbf{input} and \textbf{output alphabets} respectively. The notation for such a channel is $\Omega: \mX \dashrightarrow \mY$. We say that $\Omega$ is \textbf{deterministic} if $|\Omega(x)|=1$ for all $x \in \mX$. We call~$\Omega(x)$ the \textbf{fan-out set} of $x$. \end{definition} Note that a deterministic channel $\Omega: \mX \dashrightarrow \mY$ can be naturally identified with a function~$\mX \to \mY$, which we denote by $\Omega$ as well. \begin{definition} \label{dd2} A \textbf{(outer) code} for a channel $\Omega: \mX \dashrightarrow \mY$ is a non-empty subset $\mC \subseteq \mX$. We say that~$\mC$ is \textbf{unambiguous} if $\Omega(x) \cap \Omega(x') =\emptyset$ for all $x, x' \in \mC$ with $x \neq x'$. \end{definition} The base-two logarithm of the largest size of an unambiguous code for a given channel is its~\textit{1-shot capacity}. In the graph theory representation of channels proposed by Shannon in~\cite{shannon_zero}, the~1-shot capacity coincides with the base-two logarithm of the largest cardinality of an independent set in the corresponding graph. We refer to~\cite[Section~II]{RK18} for a more detailed discussion. \begin{definition} \label{def:future} The (\textbf{$1$-shot}) \textbf{capacity} of a channel $\Omega:\mX \dashrightarrow \mY$ is the real number $$\CC_1(\Omega)=\max\left\{\log_2 |\mC| \; : \; \mC \subseteq \mX \mbox{ is an unambiguous code for $\Omega$}\right\}.$$ \end{definition} We give an example to illustrate the previous definitions. \begin{example} Let $\mX=\mY=\{0,1,2,3,4,5,6,7\}$. Define a channel $\Omega: \mX \dto \mY$ by setting $$\Omega(x)= \begin{cases} \{0,2\} & \text{if} \ \ x = 0, \\ \{0,1,4,6\} & \text{if} \ \ x = 1, \\ \{2,3,5\} & \text{if} \ \ x = 2, \\ \{2,3,4,7\} & \text{if} \ \ x = 3, \end{cases} \qquad \qquad \Omega(x)= \begin{cases} \{2,3,4,6\} & \text{if} \ \ x = 4, \\ \{0,1,5\} & \text{if} \ \ x = 5, \\ \{6\} & \text{if} \ \ x = 6, \\ \{0,1,5,7\} & \text{if} \ \ x = 7. \\ \end{cases}$$ Clearly, $\Omega$ is not deterministic. It can be checked that the only unambigious code for $\Omega$ of size~3 is~$\mC=\{3,5,6\}$, and that there are no unambiguous codes of size 4. Therefore we have~$\CC_1(\Omega)=\log_2 3$. \end{example} In this paper we focus solely on the 1-shot capacity of channels. While other capacity notions can be considered as well (e.g., the \textit{zero-error capacity}), in the networking context these are significantly more technical to treat than the 1-shot capacity, especially when focusing on restricted noise. We therefore omit them in this first paper on network decoding and leave them as a future research direction; see Section \ref{sec:open}. We next describe how channels can be compared and combined with each other, referring to~\cite{RK18} for a more complete treatment. \begin{definition} \label{deffiner} Let $\Omega_1,\Omega_2 : \mX \dashrightarrow \mY$ be channels. We say that $\Omega_1$ is \textbf{finer} than $\Omega_2$ (or that~$\Omega_2$ is \textbf{coarser} than $\Omega_1$) if $\Omega_1(x) \subseteq \Omega_2(x)$ for all $x \in \mX$. The notation is $\Omega_1 \le \Omega_2$. \end{definition} Finer channels have larger capacity, as the following simple result states. \begin{proposition} \label{prop:finer} Let $\Omega_1,\Omega_2 : \mX \dashrightarrow \mY$ be channels with $\Omega_1 \le \Omega_2$. We have $\CC_1(\Omega_1) \ge \CC_1(\Omega_2)$. \end{proposition} Channels with compatible output/input alphabets can be concatenated with each other via the following construction. \begin{definition} Let $\Omega_1:\mX_1 \dashrightarrow \mY_1$ and $\Omega_2:\mX_2 \dashrightarrow \mY_2$ be channels, with $\mY_1 \subseteq \mX_2$. The \textbf{concatenation} of $\Omega_1$ and $\Omega_2$ is the channel $\Omega_1 \blacktriangleright \Omega_2 : \mX_1 \dashrightarrow \mY_2$ defined by $$(\Omega_1 \blacktriangleright \Omega_2)(x):= \bigcup_{y \in \Omega_1(x)} \Omega_2(y).$$ \end{definition} The concatenation of channels is associative in the following precise sense. \begin{proposition} \label{prop:11} Let $\Omega_i:\mX_i \dashrightarrow \mY_i$ be channels, for $i \in \{1,2,3\}$, with $\mY_i \subseteq \mX_{i+1}$ for $i \in \{1,2\}$. We have $(\Omega_1 \blacktriangleright \Omega_2) \blacktriangleright \Omega_3 = \Omega_1 \blacktriangleright (\Omega_2 \blacktriangleright \Omega_3)$. \end{proposition} The previous result allows us to write expressions such as $\Omega_1 \blacktriangleright \Omega_2 \blacktriangleright \Omega_3$ without parentheses, when all concatenations are defined. We conclude this subsection with a discrete version of the \textit{data processing inequality} from classical information theory; see e.g.~\cite[Section 2.8]{coverthomas}. \begin{proposition} \label{dpi} Let $\Omega_1:\mX_1 \dashrightarrow \mY_1$ and $\Omega_2:\mX_2 \dashrightarrow \mY_2$ be channels, with $\mY_1 \subseteq \mX_2$. We have~$\CC_1(\Omega_1 \blacktriangleright \Omega_2) \le \min\{\CC_1(\Omega_1), \, \CC_1(\Omega_2)\}$. \end{proposition} \subsection{Networks and Their Capacities} In this subsection we formally define communication networks, network codes, and the channels they induce. Our approach is inspired by~\cite{RK18}, even though the notation used in this paper differs slightly. We omit some of the details and refer the interested reader directly to~\cite{RK18}. \begin{definition} \label{def:network} A (\textbf{single-source}) \textbf{network} is a 4-tuple $\mN=(\mV,\mE, S, \bfT)$ where: \begin{enumerate}[label=(\Alph*)] \item $(\mV,\mE)$ is a finite, directed, acyclic multigraph, \item $S \in \mV$ is the \textbf{source}, \item ${\bf T} \subseteq \mV$ is the set of \textbf{terminals} or \textbf{sinks}, \end{enumerate} Note that we allow multiple parallel directed edges. We also assume that the following hold. \begin{enumerate}[label=(\Alph*)] \setcounter{enumi}{3} \item $|{\bf T}| \ge 1$, $S \notin {\bf T}$. \item \label{prnE} For any $T \in {\bf T}$, there exists a directed path from $S$ to $T$. \item The source does not have incoming edges, and terminals do not have outgoing edges. \label{prnF} \item For every vertex $V \in \mV \setminus (\{S\} \cup \bfT)$, there exists a directed path from $S$ to $V$ and a directed path from $V$ to $T$ for some $T \in {\bf T}$. \label{prnG} \end{enumerate} The elements of $\mV$ are called \textbf{vertices} or \textbf{nodes}. The elements of $\mV \setminus (\{S\} \cup {\bf T})$ are called \textbf{intermediate} nodes. A (\textbf{network}) \textbf{alphabet} is a finite set $\mA$ with $|\mA| \ge 2$. The elements of~$\mA$ are called \textbf{symbols} or \textbf{packets}. We say that $\mN$ is a \textbf{single-terminal} network if $|\bfT| = 1$. \end{definition} The network alphabet is interpreted as the set of symbols that can be sent over the edges of the network. \begin{notation} \label{not:fixN} Throughout the paper, $\mN=(\mV,\mE,S,{\bf T})$ will always denote a network and $\mA$ an alphabet, as in Definition~\ref{def:network}, unless otherwise stated. We let $$\mincut_\mN(V,V')$$ be the minimum cardinality of an edge-cut (a set of edges that cuts the connection) between vertices $V,V' \in \mV$. We denote the set of incoming and outgoing edges of $V \in \mV$ by $\inn(V)$ and~$\out(V)$, respectively, and their cardinalities by $\degin(V)$ and $\degout(V)$. These cardinalities are called the \textbf{in-degree} and \textbf{out-degree} of the vertex $V$, respectively. \end{notation} The following concepts will be crucial in our approach. \begin{definition} \label{def:prece} The edges of a network $\mN=(\mV,\mE, S, \bfT)$ can be partially ordered as follows. For $e,e' \in \mE$, we say that $e$ \textbf{precedes} $e'$ if there exists a directed path in $\mN$ that starts with $e$ and ends with~$e'$. The notation is $e \preccurlyeq e'$. \end{definition} \begin{notation} \label{not:ext} Following the notation of Definition~\ref{def:prece}, it is well known in graph theory that the partial order on $\mE$ can be extended to a (not necessarily unique) total order. By definition, such an extension $\le$ satisfies the following property: $e \preccurlyeq e'$ implies $e \le e'$. Throughout the paper we will assume that such an order extension has been fixed in the various networks and we denote it by $\le$ (none of the results of this paper depend on the particular choice of the total order). Moreover, we illustrate the chosen total order via the labeling of the edges; see, for example, Figure~\ref{fig:ie1}. \end{notation} In our model, the intermediate nodes of a network process incoming packets according to prescribed functions. We do not assume any restrictions on these functions. In particular, even when $\mA$ is a linear space over a given finite field, we do \textit{not} require the functions to be linear over the underlying field. This is in strong contrast with the most common approach taken in the context of network coding; see, for instance, \cite{YNY07,Zhang,randomHo,randomlocations,ho2008,linearNC}. In fact, as we will argue in Section~\ref{sec:linear}, using non-linear network codes (e.g. \textit{decoding} at intermediate nodes) is often \textit{needed} to achieve capacity in the scenarios studied in this paper. \begin{definition} \label{def:nc} Let $\mN=(\mV,\mE, S, \bfT)$ be a network and $\mA$ an alphabet. A \textbf{network code} for $(\mN,\mA)$ is a family $\mF=\{\mF_V \st V \in \mV \setminus (\{S\} \cup {\bf T})\}$ of functions, where $$\mF_V : \mA^{\degin(V)} \to \mA^{\degout(V)} \quad \mbox{for all $V \in \mV \setminus (\{S\} \cup {\bf T})$}.$$ \end{definition} A network code $\mF$ fully specifies how the intermediate nodes of a network process information packets. Note that the interpretation of each function $\mF_V$ is unique precisely thanks to the choice of the total order $\le$; see Notation~\ref{not:ext}. \begin{example} \label{ex:adv} Consider the network depicted in Figure~\ref{easyex}, consisting of one source, one terminal, and one intermediate node. The edges are ordered according to their indices. \begin{figure}[h!] \centering \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace*0.5 of S1] (K) {}; \node[nnode,right=\mynodespace of K] (V) {$V$}; \node[vertex,right=\mynodespace*0.5 of V3] (T) {$T$}; \draw[edge,bend left=25] (S1) to node[fill=white, inner sep=3pt]{\small $e_1$} (V); \draw[edge,bend right=25] (S1) to node[fill=white, inner sep=3pt]{\small $e_3$} (V); \draw[edge,bend right=0] (S1) to node[fill=white, inner sep=3pt]{\small $e_2$} (V); \draw[edge,bend left=0] (V) to node[fill=white, inner sep=3pt]{\small $e_{4}$} (T); \end{tikzpicture} \caption{Network for Example~\ref{ex:adv}.\label{easyex}} \end{figure} The way vertex $V$ processes information is fully specified by a function $\mF_V:\mA^3 \to \mA$, thanks to the choice of the total order. For example, if $\mA=\F_5$ and $\mF_V(x_1,x_2,x_3)=x_1+2x_2+3x_3$ for all~$(x_1,x_2,x_3) \in \mA^3$, then vertex $V$ sends over edge $e_4$ the field element obtained by summing the field element collected on edge $e_1$ with twice the field element collected on edge $e_2$ and three times the field element collected on edge $e_3$. \end{example} This paper focuses on networks $\mN=(\mV,\mE, S, \bfT)$ affected by \textit{potentially restricted} adversarial noise. More precisely, we assume that at most $t$ of the alphabet symbols on a given edge set $\mU \subseteq \mE$ can be changed into any other alphabet symbols. We are interested in computing the largest number of packets that can be multicasted to all terminals in a single channel use. As already mentioned, the notion of error probability does not make sense in this context, since the noise is adversarial in nature. We can make the described problem rigorous with the aid of the following notation, which connects networks and network codes to the notion of (adversarial) channels introduced in Subsection~\ref{subsect:channels}. \begin{notation} \label{not:netch} Let $\mN=(\mV,\mE, S, \bfT)$ be a network, $\mA$ an alphabet, $T \in \bd{T}$ a terminal, $\mF$ a network code for $(\mN,\mA)$, $\mU \subseteq \mE$ an edge set, and $t \ge 0$ an integer. We denote by $$\Omega[\mN, \mA, \mF, S \to T,\mU,t] : \mA^{\degout(S)} \dashrightarrow \mA^{\degin(T)}$$ the channel representing the transfer from $S$ to terminal $T \in \bd{T}$, when the network code $\mF$ is used by the vertices and at most $t$ packets from the edges in $\mU$ are corrupted. In this context, we call $t$ the \textbf{adversarial power}. \end{notation} The following example illustrates how to formally describe the channel introduced in Notation~\ref{not:netch}. \begin{example} \label{ex:ad} Let $\mN$ be the network in Figure~\ref{fig:ad} and $\mA$ be an alphabet. We consider an adversary capable of corrupting up to one of the dashed edges, that is, one of the edges in~$\mU =\{e_1,e_2,e_3\}$. Let $\mF_{V_1}: \mA \to \mA$ be the identity function and let $\mF_{V_2}: \mA \to \mA$ be a function returning a constant value $a \in \mA$. \begin{figure}[htbh] \centering \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace of S1] (K) {}; \node[nnode,above=0.5\mynodespace of K] (V1) {$V_1$}; \node[nnode,below=0.5\mynodespace of K] (V2) {$V_2$}; \node[vertex,right=\mynodespace of K] (T) {$T$}; \draw[ddedge,bend left=0] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_1$} (V1); \draw[ddedge,bend left=0] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_2$} (T); \draw[ddedge,bend right=0] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_3$} (V2); \draw[ddedge,bend right=0] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_3$} (V2); \draw[edge,bend left=0] (V1) to node[sloped,fill=white, inner sep=1pt]{\small $e_4$} (T); \draw[edge,bend left=0] (V2) to node[sloped,fill=white, inner sep=1pt]{\small $e_{5}$} (T); \end{tikzpicture} \caption{{{Network for Example~\ref{ex:ad}.}}}\label{fig:ad} \end{figure} This scenario is fully modeled by the channel $\Omega[\mN, \mA, \mF, S \to T,\mU,1] : \mA^{3} \dashrightarrow \mA^{2}$, which we now describe. For $x=(x_1,x_2,x_3) \in \mA^3$, we have that $\Omega[\mN, \mA, \mF, S \to T,\mU,1](x)$ is the set of all alphabet vectors $y=(y_1,y_2,a) \in \mA^3$ for which $\dH((y_1,y_2),(x_1,x_2)) \le 1$, where~$\dH$ denotes the Hamming distance; see~\cite{macwilliams1977theory}. \end{example} We are finally ready to give a rigorous definition for the 1-shot capacity of a network. This is the main quantity we are concerned with in this paper. \begin{definition} \label{def:capacities} Let $\mN=(\mV,\mE, S, \bfT)$ be a network, $\mA$ an alphabet, $\mU \subseteq \mE$ an edge set, and~$t \ge 0$ an integer. The (\textbf{1-shot}) \textbf{capacity} of $(\mN,\mA,\mU,t)$ is the largest real number~$\kappa$ for which there exists an \textbf{outer code} $$\mC \subseteq \mA^{\degout(S)}$$ and a network code $\mF$ for~$(\mN,\mA)$ with $\kappa=\log_{|\mA|}(|\mC|)$ such that $\mC$ is unambiguous for each channel $\Omega[\mN,\mA,\mF,S \to T,\mU,t]$, $T \in \bd{T}$. The notation for this largest $\kappa$ is $$\CC_1(\mN,\mA,\mU,t).$$ The elements of the outer code $\mC$ are called \textbf{codewords}. \end{definition} The following bound, which is not sharp in general, is an immediate consequence of the definitions. \begin{proposition} \label{prop:aux} Following the notations of Definition \ref{def:future} and Definition~\ref{def:capacities}, we have $$\CC_1(\mN,\mA,\mU,t) \le \, \min_T \, \max_\mF \, \CC_1(\Omega[\mN,\mA,\mF,S \to T, \mU,t]),$$ where the minimum is taken over all network terminals $T \in \bfT$ and the maximum is taken over all network codes $\mF$ for $(\mN,\mA)$. \end{proposition} The main goal of this paper is to initiate the study of the quantity $\CC_1(\mN,\mA,\mU,t)$ for an arbitrary tuple $(\mN,\mA,\mU,t)$, where $t \ge 0$ is an integer, $\mA$ is an alphabet and $\mU$ is a \textit{proper} subset of the edges of the network $\mN$. The main difference between this work and previous work in the same field lies precisely in the restriction of the noise to the set $\mU$. Recall moreover that when all edges are vulnerable, i.e., when $\mE=\mU$, the problem of computing~$\CC_1(\mN,\mA,\mE,t)$ can be completely solved by combining cut-set bounds with \textit{linear} network coding and rank-metric codes (for the achievability). More precisely, the following hold. \begin{theorem}[\text{see \cite{SKK}}] \label{thm:mcm} Let $\mN=(\mV,\mE, S, \bfT)$ be a network, $\mA$ an alphabet, and $t \ge 0$ an integer. Let $\mu=\min_{T \in \bfT} \mincut_{\mN}(S,T)$. Suppose that~$\mA=\F_{q^m}$, with~$m \ge \mu$ and $q$ sufficiently large ($q \ge |{\bf T}|-1$ suffices). Then $$\CC_1(\mN,\mA,\mE,t) = \max\{0, \, \mu-2t\}.$$ \end{theorem} Moreover, it has been proven in \cite{SKK} that the capacity value $\max\{0, \, \mu-2t\}$ can be attained by taking as a network code $\mF$ a collection of $\F_q$-linear functions. In the case of an adversary having access to only a proper subset of the network edges, the generalization of Theorem \ref{thm:mcm} can be derived and is stated as follows. \begin{theorem}[Generalized Network Singleton Bound; see~\cite{RK18}] \label{sbound} Let $\mN=(\mV,\mE, S, \bfT)$ be a network, $\mA$ an alphabet,~$\mU \subseteq \mE$, and $t \ge 0$ an integer. We have \begin{equation*} \CC_1(\mN,\mA,\mU,t) \le \min_{T \in \bfT} \, \min_{\mE'} \left( |\mE'\setminus \mU| + \max\{0,|\mE' \cap \mU|-2t\} \right), \end{equation*} where $\mE' \subseteq \mE$ ranges over edge-cuts between $S$ and $T$. \end{theorem} Next, we give an example to illustrate Definition \ref{def:capacities} that also makes use of the Generalized Network Singleton Bound of Theorem \ref{sbound}. \begin{example} Consider the network $\mN$ depicted in Figure \ref{fig:ad}. We will show that $$\CC_1(\mN,\mA,\mU,t)=1.$$ We choose the network code to consist of identity functions $\mF_{V_1}$ and $\mF_{V_2}$. Let $\mC$ be the $3$-times repetition code, that is, $\mC=\{(x,x,x) \mid x \in \mA\}.$ Since at most $1$ symbol from the edges of $\mU$ is corrupted, it can easily be seen that $\mC$ is unambiguous for the channel $\Omega[\mN,\mA,\mF,S \to T,\mU,1]$. Since~$|\mC|=|\mA|$, this shows that $\CC_1(\mN,\mA,\mU,t) \ge 1$. Choosing $\mE'=\{e_1,e_2,e_3\}$ in Theorem~\ref{sbound}, we have $\CC_1(\mN,\mA,\mU,t) \le 1$, yielding the desired result. \end{example} \section{The Curious Case of the Diamond Network} \label{sec:diamond} This section is devoted to the smallest example of network that illustrates the problem we focus on in this paper. We call it the \textbf{Diamond Network}, due to its shape, and denote it by~$\mathfrak{A}_1$. The choice for the notation will become clear in Subsection~\ref{sec:families}, where we will introduce a family of networks (Family~\ref{fam:a}) of which the Diamond Network is the ``first'' member. The Diamond Network is depicted in Figure~\ref{fig:diamond}. \begin{figure}[htbh] \centering \centering \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace of S1] (K) {}; \node[nnode,above=0.35\mynodespace of K] (V1) {$V_1$}; \node[nnode,below=0.35\mynodespace of K] (V2) {$V_2$}; \node[vertex,right=\mynodespace of K] (T) {$T$}; \draw[edge,bend left=0] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_1$} (V1); \draw[edge,bend left=15] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_2$} (V2); \draw[edge,bend right=15] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_3$} (V2); \draw[edge,bend left=0] (V1) to node[sloped,fill=white, inner sep=1pt]{\small $e_4$} (T); \draw[edge,bend left=0] (V2) to node[sloped,fill=white, inner sep=1pt]{\small $e_{5}$} (T); \end{tikzpicture} \caption{The Diamond Network $\mathfrak{A}_1$.} \label{fig:diamond} \end{figure} \subsection{The Capacity of the Diamond Network} We are interested in computing the capacity of the Diamond Network $\mathfrak{A}_1$ of Figure~\ref{fig:diamond} when $\mA$ is an arbitrary alphabet, $\mU=\{e_1,e_2,e_3\}$ is the set of vulnerable edges, and $t=1$. Previously, the best known upper bound for the capacity of $\mathfrak{A}_1$ in this context was \begin{equation} \label{boundDN} \CC_1(\mathfrak{A}_1,\mA,\mU,1) \le 1, \end{equation} which follows from Theorem~\ref{sbound}. It was however shown in the preliminary work~\cite{beemer2021curious} that the upper bound in~\eqref{boundDN} is not tight, regardless of the choice of alphabet $\mA$. More precisely, the following hold. \begin{theorem} \label{thm:diamond_cap} For the Diamond Network $\mathfrak{A}_1$ of Figure \ref{fig:diamond}, any alphabet $\mA$, and $\mU=\{e_1,e_2,e_3\}$, we have $$\CC_1(\mathfrak{A}_1,\mA,\mU,1) = \log_{|\mA|} \, (|\mA|-1).$$ \end{theorem} An intuitive idea of why the capacity of the Diamond Network is strictly less than one can be seen by considering that information arriving at the terminal through $e_4$ is completely useless without information arriving through $e_5$. The opposite is also true. Thus, we must have some cooperation between the two different ``routes'' in order to achieve a positive capacity. Unfortunately, because $V_2$ has one more incoming than outgoing edge, the cooperation implicitly suggested by the Generalized Network Singleton Bound of Theorem~\ref{sbound} is impossible: a repetition code sent across $e_1$, $e_2$, and $e_3$ will fall short of guaranteed correction at the terminal. Cooperation is still possible, but it will come at a cost. To see achievability of Theorem \ref{thm:diamond_cap}, consider sending a repetition code across $e_1$, $e_2$, and $e_3$. Intermediate node~$V_1$ forwards its received symbol; $V_2$ forwards if the two incoming symbols match, and sends a special reserved~``alarm'' symbol if they do not. The terminal looks to see whether the alarm symbol was sent across~$e_5$. If so, it trusts $e_4$. If not, it trusts $e_5$. The (necessary) sacrifice of one alphabet symbol to be used as an alarm, or locator of the adversary, results in a rate~of $\log_{|\mA|} (|\mA|-1)$. A proof that this is the best rate possible was first presented in~\cite{beemer2021curious}, and the result is also shown in a new, more general way in Section \ref{sec:upper}. Interestingly, when we add an additional edge to the Diamond Network, resulting in the so-called \textbf{Mirrored Diamond Network} $\mathfrak{D}_1$ of Figure~\ref{fig:mirrored}, the Generalized Network Singleton Bound of Theorem~\ref{sbound} becomes tight (with the analogous adversarial action). More precisely, the following holds. It should be noted that the case of adding an extra incoming edge to $V_2$ of Figure \ref{fig:diamond} is covered by Corollary \ref{cor:conf}. \begin{theorem} For the Mirrored Diamond Network $\mathfrak{D}_1$ of Figure \ref{fig:mirrored}, any network alphabet $\mA$, and~$\mU=\{e_1,e_2,e_3,e_4\}$, we have $$\CC_1(\mathfrak{D}_1,\mA,\mU,1) = 1.$$ \end{theorem} By Theorem~\ref{sbound}, the previous result may be shown by simply exhibiting an explicit scheme achieving the upper bound of 1. We send a repetition code across $e_1$, $e_2$, $e_3$, and $e_4$. Each of~$V_1$ and~$V_2$ forwards if the two incoming symbols match, and sends a reserved alarm symbol if they do not. The terminal trusts the edge without the alarm symbol; if both send the alarm symbol, the terminal decodes to that symbol. Notice that we again make use of an alarm symbol, but that this symbol could also be sent as easily as any other alphabet symbol. This marks a striking difference from the alarm symbol used in the achievability of the Diamond Network capacity, which is instead sacrificed. \begin{figure}[htbh] \centering \centering \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace*1.3 of S1] (K) {}; \node[nnode,above=0.45\mynodespace of K] (V1) {$V_1$}; \node[nnode,below=0.45\mynodespace of K] (V2) {$V_2$}; \node[vertex,right=\mynodespace of K] (T) {$T$}; \draw[edge,bend left=15] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_1$} (V1); \draw[edge,bend right=15] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_2$} (V1); \draw[edge,bend left=15] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_3$} (V2); \draw[edge,bend right=15] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_4$} (V2); \draw[edge,bend left=0] (V1) to node[sloped,fill=white, inner sep=1pt]{\small $e_4$} (T); \draw[edge,bend left=0] (V2) to node[sloped,fill=white, inner sep=1pt]{\small $e_{5}$} (T); \end{tikzpicture} \caption{The Mirrored Diamond Network $\mathfrak{D}_1$.} \label{fig:mirrored} \end{figure} The example of the Mirrored Diamond Network of Figure \ref{fig:mirrored} indicates that the \textit{bottleneck} vertex $V_2$ of the Diamond Network of Figure \ref{fig:diamond} does not tell the whole story about whether the Network Singleton Bound is achievable. Instead, something more subtle is occurring with the manner in which information ``streams'' are split within the network. One may naturally wonder about the impact of adding additional edges to $V_1$ and/or $V_2$; we leave an exploration of a variety of such families to later sections. We next look at the scenario of random noise to build further intuition. \subsection{Information Theory Intuition} \label{sec:info} Partial information-theoretic intuition for the fact that the Generalized Network Singleton Bound of Theorem \ref{sbound} cannot be achieved in some cases when an adversary is restricted to a particular portion of the network can be seen even in the case of random noise (rather than adversarial). In this subsection, we will briefly consider the standard information-theoretic definition of capacity. That is, capacity will be defined as the supremum of rates for which an asymptotically vanishing decoding error probability (as opposed to zero error) is achievable. We do not include all the fundamental information theory definitions, instead referring the reader to e.g.~\cite{coverthomas} for more details. \begin{example} \label{ex:info_thy} Consider a unicast network with a single terminal and one intermediate node as illustrated in Figure \ref{fig:inf-thy-1a}. Suppose that the first three edges of the network experience random, binary symmetric noise. That is, in standard information theory notation and terminology, each dashed edge indicates a Binary Symmetric Channel with transition probability $p$, denoted BSC($p$), while the two edges from the intermediate node to the terminal are noiseless. In the sequel, we also let $$H(p)=-p\log_2(p)-(1-p)\log_2(1-p)$$ denote the entropy function associated with the transition probability~$p$. \begin{figure}[hbtp] \centering \begin{subfigure}{.4\textwidth} \centering \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[nnode,right=\mynodespace*0.8 of S1] (V1) {$V$}; \node[vertex,right=\mynodespace*0.6 of K] (T) {$T$}; \draw[ddedge,bend left=0] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_2$} (V1); \draw[ddedge,bend left=25] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_1$} (V1); \draw[ddedge,bend right=25] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_3$} (V1); \draw[edge,bend left=15] (V1) to node[sloped,fill=white, inner sep=1pt]{\small $e_4$} (T); \draw[edge,bend right=15] (V1) to node[sloped,fill=white, inner sep=1pt]{\small $e_{5}$} (T); \end{tikzpicture} \caption{\label{fig:inf-thy-1a} Dashed edges act as BSC($p$)s. The capacity of each is then $1-H(p)$, while solid edges each have capacity~$1$.} \end{subfigure} \hspace{0.1\textwidth} \begin{subfigure}{.4\textwidth} \centering \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[nnode,right=\mynodespace of S1] (V1) {$V$}; \node[vertex,right=\mynodespace*0.6 of K] (T) {$T$}; \draw[edge,bend left=0] (S1) to node[sloped,fill=white, inner sep=1pt]{\footnotesize $(3-3H(p))$} (V1); \draw[edge,bend left=0] (V1) to node[sloped,fill=white, inner sep=1pt]{\footnotesize $2$} (T); \end{tikzpicture} \caption{\label{fig:inf-thy-1b} The capacity of each (collapsed) edge is labeled.} \end{subfigure} \caption{\label{fig:inf-thy-1} A network with multi-edges, along with its simplified version with collapsed multi-edges labeled with their capacities.} \end{figure} Each of the multi-edge-sets $\{e_1,e_2,e_3\}$ and $\{e_4,e_5\}$ can then be considered as collapsed to a single edge of capacity $3(1-H(p))$ and $2$, respectively (see Figure \ref{fig:inf-thy-1b}). Recall that the Max-Flow Min-Cut Theorem (see e.g. \cite[Theorem 15.2]{elgamalkim}) states that the capacity of the network is equal to the minimum over all edge-cuts of the network of the sum of the capacities of the edges in the cut. Thus, the capacity of our network is equal to $\min\{2,3-3H(p)\}$. Next, we split the intermediate node into two nodes, as in Figure \ref{fig:inf-thy-2}. Again making use of the Max-Flow Min-Cut Theorem, the new network's capacity is equal to $$\min\{1,1-H(p)\}+\min\{1,2(1-H(p))\}=1-H(p)+\min\{1,2-2H(p)\}.$$ One can easily determine that this value is upper bounded by the capacity of the network in Figure~\ref{fig:inf-thy-1} for all $0\leq p\leq 0.5$. Furthermore, when $0< H(p) < 0.5$ (i.e., when $p$ is positive and less than approximately $0.11$), this bound is strict. In other words, splitting the intermediate node reduces capacity for an interval of small transition probabilities. \begin{figure}[hbtp] \centering \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace of S1] (K) {}; \node[nnode,above=0.25\mynodespace of K] (V1) {$V_1$}; \node[nnode,below=0.25\mynodespace of K] (V2) {$V_2$}; \node[vertex,right=\mynodespace of K] (T) {$T$}; \draw[ddedge,bend left=0] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_1$} (V1); \draw[ddedge,bend left=15] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_2$} (V2); \draw[ddedge,bend right=15] (S1) to node[sloped,fill=white, inner sep=1pt]{\small $e_3$} (V2); \draw[edge,bend left=0] (V1) to node[sloped,fill=white, inner sep=1pt]{\small $e_4$} (T); \draw[edge,bend left=0] (V2) to node[sloped,fill=white, inner sep=1pt]{\small $e_{5}$} (T); \end{tikzpicture} \caption{\label{fig:inf-thy-2} Vertex $V$ of Figure \ref{fig:inf-thy-1a} is split into two intermediate nodes.} \end{figure} As a first natural generalization, suppose the network of Figure \ref{fig:inf-thy-1} had $n+1$ edges from source to the intermediate node, $n$ edges from intermediate node to terminal, and that the~(extension of the) network shown in Figure~\ref{fig:inf-thy-2} peeled off just a single edge from each layer, resulting in~$\degin(V_1)=\degout(V_1)=1$, $\degin(V_2)=n-1$, and $\degout(V_2)=n-2$. Then the capacity gap between the first and second networks would be non-zero for all $0<H(p)<1/(n-1)$. This gap is illustrated for $n \in \{3,5,7\}$ in Figure~\ref{fig:inf-thy-3}. Denote by ``Scenario 1'' the original network for the given value of~$n$, and by ``Scenario 2'' the corresponding network with split intermediate node. In Section~\ref{sec:families} we return to this generalization with adversarial as opposed to random noise; there, it is termed Family~\ref{ex:s}. \begin{figure}[h] \centering \begin{tikzpicture}[scale=0.9] \begin{axis}[legend style={at={(0.6,0.93)}, anchor = north west}, legend cell align={left}, width=13cm,height=8cm, xlabel={$p$}, ylabel={Network capacity}, xmin=0, xmax=0.5, ymin=0, ymax=7, xtick={0,0.11,0.2,0.3, 0.4, 0.5}, ytick={1,2,3,4,5,6,7}, ymajorgrids=true, grid style=dashed, every axis plot/.append style={thick}, yticklabel style={/pgf/number format/fixed} ] \addplot[color=red,style={ultra thick}] coordinates { ( 0.0, 2 ) ( 0.01, 2.0 ) ( 0.02, 2.0 ) ( 0.03, 2.0 ) ( 0.04, 2.0 ) ( 0.05, 2.0 ) ( 0.06, 2.0 ) ( 0.07, 1.902229047 ) ( 0.08, 1.793462429 ) ( 0.09, 1.690590549 ) ( 0.1, 1.593013219 ) ( 0.11, 1.500252126 ) ( 0.12, 1.411917404 ) ( 0.13, 1.327685445 ) ( 0.14, 1.247283565 ) ( 0.15, 1.170479086 ) ( 0.16, 1.097071336 ) ( 0.17, 1.026885664 ) ( 0.18, 0.959768863 ) ( 0.19, 0.89558562 ) ( 0.2, 0.834215715 ) ( 0.21, 0.77555178 ) ( 0.22, 0.719497491 ) ( 0.23, 0.665966089 ) ( 0.24, 0.614879162 ) ( 0.25, 0.566165627 ) ( 0.26, 0.519760883 ) ( 0.27, 0.475606091 ) ( 0.28, 0.433647568 ) ( 0.29, 0.393836261 ) ( 0.3, 0.356127302 ) ( 0.31, 0.320479625 ) ( 0.32, 0.286855627 ) ( 0.33, 0.255220882 ) ( 0.34, 0.225543885 ) ( 0.35000000000000003, 0.197795834 ) ( 0.36, 0.171950432 ) ( 0.37, 0.147983722 ) ( 0.38, 0.125873933 ) ( 0.39, 0.105601354 ) ( 0.4, 0.087148217 ) ( 0.41000000000000003, 0.070498594 ) ( 0.42, 0.055638315 ) ( 0.43, 0.042554888 ) ( 0.44, 0.031237436 ) ( 0.45, 0.021676638 ) ( 0.46, 0.013864684 ) ( 0.47000000000000003, 0.007795234 ) ( 0.48, 0.003463392 ) ( 0.49, 0.000865675 ) ( 0.5, 0.0 ) }; \addplot[color=green,dashed,style={ultra thick}] coordinates {( 0.0, 2 ) ( 0.01, 1.919206864 ) ( 0.02, 1.858559457 ) ( 0.03, 1.805608142 ) ( 0.04, 1.757707811 ) ( 0.05, 1.713603043 ) ( 0.06, 1.672555081 ) ( 0.07, 1.634076349 ) ( 0.08, 1.59782081 ) ( 0.09, 1.563530183 ) ( 0.1, 1.531004406 ) ( 0.11, 1.500084042 ) ( 0.12, 1.411917404 ) ( 0.13, 1.327685445 ) ( 0.14, 1.247283565 ) ( 0.15, 1.170479086 ) ( 0.16, 1.097071336 ) ( 0.17, 1.026885664 ) ( 0.18, 0.959768863 ) ( 0.19, 0.89558562 ) ( 0.2, 0.834215715 ) ( 0.21, 0.77555178 ) ( 0.22, 0.719497491 ) ( 0.23, 0.665966089 ) ( 0.24, 0.614879162 ) ( 0.25, 0.566165627 ) ( 0.26, 0.519760883 ) ( 0.27, 0.475606091 ) ( 0.28, 0.433647568 ) ( 0.29, 0.393836261 ) ( 0.3, 0.356127302 ) ( 0.31, 0.320479625 ) ( 0.32, 0.286855627 ) ( 0.33, 0.255220882 ) ( 0.34, 0.225543885 ) ( 0.35000000000000003, 0.197795834 ) ( 0.36, 0.171950432 ) ( 0.37, 0.147983722 ) ( 0.38, 0.125873933 ) ( 0.39, 0.105601354 ) ( 0.4, 0.087148217 ) ( 0.41000000000000003, 0.070498594 ) ( 0.42, 0.055638315 ) ( 0.43, 0.042554888 ) ( 0.44, 0.031237436 ) ( 0.45, 0.021676638 ) ( 0.46, 0.013864684 ) ( 0.47000000000000003, 0.007795234 ) ( 0.48, 0.003463392 ) ( 0.49, 0.000865675 ) ( 0.5, 0.0 ) }; \addplot[color=blue,style={ultra thick}] coordinates { ( 0.0, 4 ) ( 0.01, 4.0 ) ( 0.02, 4.0 ) ( 0.03, 4.0 ) ( 0.04, 3.788539055 ) ( 0.05, 3.568015214 ) ( 0.06, 3.362775404 ) ( 0.07, 3.170381745 ) ( 0.08, 2.989104049 ) ( 0.09, 2.817650915 ) ( 0.1, 2.655022032 ) ( 0.11, 2.500420209 ) ( 0.12, 2.353195674 ) ( 0.13, 2.212809075 ) ( 0.14, 2.078805942 ) ( 0.15, 1.950798476 ) ( 0.16, 1.828452227 ) ( 0.17, 1.711476106 ) ( 0.18, 1.599614771 ) ( 0.19, 1.492642701 ) ( 0.2, 1.390359526 ) ( 0.21, 1.2925863 ) ( 0.22, 1.199162485 ) ( 0.23, 1.109943482 ) ( 0.24, 1.024798603 ) ( 0.25, 0.943609378 ) ( 0.26, 0.866268138 ) ( 0.27, 0.792676819 ) ( 0.28, 0.722745947 ) ( 0.29, 0.656393768 ) ( 0.3, 0.593545504 ) ( 0.31, 0.534132708 ) ( 0.32, 0.478092711 ) ( 0.33, 0.425368136 ) ( 0.34, 0.375906475 ) ( 0.35000000000000003, 0.329659723 ) ( 0.36, 0.286584054 ) ( 0.37, 0.246639537 ) ( 0.38, 0.209789889 ) ( 0.39, 0.176002257 ) ( 0.4, 0.145247028 ) ( 0.41000000000000003, 0.117497656 ) ( 0.42, 0.092730525 ) ( 0.43, 0.070924814 ) ( 0.44, 0.052062394 ) ( 0.45, 0.03612773 ) ( 0.46, 0.023107806 ) ( 0.47000000000000003, 0.012992057 ) ( 0.48, 0.00577232 ) ( 0.49, 0.001442791 ) ( 0.5, 0.0 ) }; \addplot[color=cyan,dashed,style={ultra thick}] coordinates {( 0.0, 4 ) ( 0.01, 3.919206864 ) ( 0.02, 3.858559457 ) ( 0.03, 3.805608142 ) ( 0.04, 3.757707811 ) ( 0.05, 3.568015214 ) ( 0.06, 3.362775404 ) ( 0.07, 3.170381745 ) ( 0.08, 2.989104049 ) ( 0.09, 2.817650915 ) ( 0.1, 2.655022032 ) ( 0.11, 2.500420209 ) ( 0.12, 2.353195674 ) ( 0.13, 2.212809075 ) ( 0.14, 2.078805942 ) ( 0.15, 1.950798476 ) ( 0.16, 1.828452227 ) ( 0.17, 1.711476106 ) ( 0.18, 1.599614771 ) ( 0.19, 1.492642701 ) ( 0.2, 1.390359526 ) ( 0.21, 1.2925863 ) ( 0.22, 1.199162485 ) ( 0.23, 1.109943482 ) ( 0.24, 1.024798603 ) ( 0.25, 0.943609378 ) ( 0.26, 0.866268138 ) ( 0.27, 0.792676819 ) ( 0.28, 0.722745947 ) ( 0.29, 0.656393768 ) ( 0.3, 0.593545504 ) ( 0.31, 0.534132708 ) ( 0.32, 0.478092711 ) ( 0.33, 0.425368136 ) ( 0.34, 0.375906475 ) ( 0.35000000000000003, 0.329659723 ) ( 0.36, 0.286584054 ) ( 0.37, 0.246639537 ) ( 0.38, 0.209789889 ) ( 0.39, 0.176002257 ) ( 0.4, 0.145247028 ) ( 0.41000000000000003, 0.117497656 ) ( 0.42, 0.092730525 ) ( 0.43, 0.070924814 ) ( 0.44, 0.052062394 ) ( 0.45, 0.03612773 ) ( 0.46, 0.023107806 ) ( 0.47000000000000003, 0.012992057 ) ( 0.48, 0.00577232 ) ( 0.49, 0.001442791 ) ( 0.5, 0.0 ) }; \addplot[color=magenta,style={ultra thick}] coordinates {( 0.0, 6 ) ( 0.01, 6.0 ) ( 0.02, 6.0 ) ( 0.03, 5.639256995 ) ( 0.04, 5.303954676 ) ( 0.05, 4.9952213 ) ( 0.06, 4.707885566 ) ( 0.07, 4.438534444 ) ( 0.08, 4.184745669 ) ( 0.09, 3.944711281 ) ( 0.1, 3.717030845 ) ( 0.11, 3.500588293 ) ( 0.12, 3.294473943 ) ( 0.13, 3.097932705 ) ( 0.14, 2.910328319 ) ( 0.15, 2.731117867 ) ( 0.16, 2.559833118 ) ( 0.17, 2.396066549 ) ( 0.18, 2.23946068 ) ( 0.19, 2.089699781 ) ( 0.2, 1.946503336 ) ( 0.21, 1.80962082 ) ( 0.22, 1.678827479 ) ( 0.23, 1.553920875 ) ( 0.24, 1.434718044 ) ( 0.25, 1.321053129 ) ( 0.26, 1.212775393 ) ( 0.27, 1.109747547 ) ( 0.28, 1.011844326 ) ( 0.29, 0.918951276 ) ( 0.3, 0.830963705 ) ( 0.31, 0.747785791 ) ( 0.32, 0.669329796 ) ( 0.33, 0.595515391 ) ( 0.34, 0.526269065 ) ( 0.35000000000000003, 0.461523612 ) ( 0.36, 0.401217675 ) ( 0.37, 0.345295351 ) ( 0.38, 0.293705844 ) ( 0.39, 0.24640316 ) ( 0.4, 0.203345839 ) ( 0.41000000000000003, 0.164496719 ) ( 0.42, 0.129822735 ) ( 0.43, 0.09929474 ) ( 0.44, 0.072887351 ) ( 0.45, 0.050578822 ) ( 0.46, 0.032350928 ) ( 0.47000000000000003, 0.01818888 ) ( 0.48, 0.008081248 ) ( 0.49, 0.002019908 ) ( 0.5, 0.0 ) }; \addplot[color=orange,dashed,style={ultra thick}] coordinates {( 0.0, 6 ) ( 0.01, 5.919206864 ) ( 0.02, 5.858559457 ) ( 0.03, 5.639256995 ) ( 0.04, 5.303954676 ) ( 0.05, 4.9952213 ) ( 0.06, 4.707885566 ) ( 0.07, 4.438534444 ) ( 0.08, 4.184745669 ) ( 0.09, 3.944711281 ) ( 0.1, 3.717030845 ) ( 0.11, 3.500588293 ) ( 0.12, 3.294473943 ) ( 0.13, 3.097932705 ) ( 0.14, 2.910328319 ) ( 0.15, 2.731117867 ) ( 0.16, 2.559833118 ) ( 0.17, 2.396066549 ) ( 0.18, 2.23946068 ) ( 0.19, 2.089699781 ) ( 0.2, 1.946503336 ) ( 0.21, 1.80962082 ) ( 0.22, 1.678827479 ) ( 0.23, 1.553920875 ) ( 0.24, 1.434718044 ) ( 0.25, 1.321053129 ) ( 0.26, 1.212775393 ) ( 0.27, 1.109747547 ) ( 0.28, 1.011844326 ) ( 0.29, 0.918951276 ) ( 0.3, 0.830963705 ) ( 0.31, 0.747785791 ) ( 0.32, 0.669329796 ) ( 0.33, 0.595515391 ) ( 0.34, 0.526269065 ) ( 0.35000000000000003, 0.461523612 ) ( 0.36, 0.401217675 ) ( 0.37, 0.345295351 ) ( 0.38, 0.293705844 ) ( 0.39, 0.24640316 ) ( 0.4, 0.203345839 ) ( 0.41000000000000003, 0.164496719 ) ( 0.42, 0.129822735 ) ( 0.43, 0.09929474 ) ( 0.44, 0.072887351 ) ( 0.45, 0.050578822 ) ( 0.46, 0.032350928 ) ( 0.47000000000000003, 0.01818888 ) ( 0.48, 0.008081248 ) ( 0.49, 0.002019908 ) ( 0.5, 0.0 ) }; \legend{\small{Scenario 1, $n=3$}, \small{Scenario 2, $n=3$},\small{Scenario 1, $n=5$},\small{Scenario 2, $n=5$},\small{Scenario 1, $n=7$},\small{Scenario 2, $n=7$}} \end{axis} \end{tikzpicture} \caption{\label{fig:inf-thy-3} Capacity gaps between the first generalized networks for $n \in \{3,5,7\}$.} \end{figure} A second possible generalization is as follows: suppose the network of Figure \ref{fig:inf-thy-1} had $3n$ edges from source to intermediate node, and $2n$ edges from intermediate node to terminal, and that the network of Figure \ref{fig:inf-thy-2} peeled off $n$ edges from each layer so that~$\degin(V_1)=\degout(V_1)=n$, $\degin(V_2)=2n$, and $\degout(V_2)=n$. Interestingly, the capacity gap between the first and second networks would be non-zero for all $0<H(p)<0.5$, regardless of the value of $n$. This is illustrated for~$n \in \{3,5,7\}$ in Figure~\ref{fig:inf-thy-4}. Denote by ``Scenario 1'' the original network for the given value of~$n$, and by ``Scenario 2'' the corresponding network with split intermediate node. In Section~\ref{sec:families} we return to this generalization with adversarial as opposed to random noise; there, it is termed Family~\ref{fam:a}. \end{example} \begin{figure}[h!] \centering \begin{tikzpicture}[scale=0.9] \begin{axis}[legend style={at={(0.6,0.93)}, anchor = north west}, legend cell align={left}, width=13cm,height=8cm, xlabel={$p$}, ylabel={Network capacity}, xmin=0, xmax=0.5, ymin=0, ymax=16, xtick={0,0.11,0.2,0.3, 0.4, 0.5}, ytick={2,4,6,8,10,12,14,16}, ymajorgrids=true, grid style=dashed, every axis plot/.append style={thick}, yticklabel style={/pgf/number format/fixed} ] \addplot[color=red,style={ultra thick}] coordinates { ( 0.0, 6 ) ( 0.01, 6.0 ) ( 0.02, 6.0 ) ( 0.03, 6.0 ) ( 0.04, 6.0 ) ( 0.05, 6.0 ) ( 0.06, 6.0 ) ( 0.07, 5.706687142 ) ( 0.08, 5.380387288 ) ( 0.09, 5.071771646 ) ( 0.1, 4.779039658 ) ( 0.11, 4.500756377 ) ( 0.12, 4.235752212 ) ( 0.13, 3.983056335 ) ( 0.14, 3.741850695 ) ( 0.15, 3.511437258 ) ( 0.16, 3.291214008 ) ( 0.17, 3.080656991 ) ( 0.18, 2.879306588 ) ( 0.19, 2.686756861 ) ( 0.2, 2.502647146 ) ( 0.21, 2.326655341 ) ( 0.22, 2.158492473 ) ( 0.23, 1.997898268 ) ( 0.24, 1.844637486 ) ( 0.25, 1.69849688 ) ( 0.26, 1.559282648 ) ( 0.27, 1.426818274 ) ( 0.28, 1.300942705 ) ( 0.29, 1.181508783 ) ( 0.3, 1.068381907 ) ( 0.31, 0.961438875 ) ( 0.32, 0.86056688 ) ( 0.33, 0.765662645 ) ( 0.34, 0.676631655 ) ( 0.35000000000000003, 0.593387502 ) ( 0.36, 0.515851297 ) ( 0.37, 0.443951166 ) ( 0.38, 0.3776218 ) ( 0.39, 0.316804063 ) ( 0.4, 0.26144465 ) ( 0.41000000000000003, 0.211495781 ) ( 0.42, 0.166914945 ) ( 0.43, 0.127664665 ) ( 0.44, 0.093712309 ) ( 0.45, 0.065029914 ) ( 0.46, 0.041594051 ) ( 0.47000000000000003, 0.023385703 ) ( 0.48, 0.010390176 ) ( 0.49, 0.002597024 ) ( 0.5, 0.0 ) }; \addplot[color=green,dashed,style={ultra thick}] coordinates { ( 0.0, 6 ) ( 0.01, 5.757620592 ) ( 0.02, 5.575678372 ) ( 0.03, 5.416824427 ) ( 0.04, 5.273123433 ) ( 0.05, 5.140809129 ) ( 0.06, 5.017665243 ) ( 0.07, 4.902229047 ) ( 0.08, 4.793462429 ) ( 0.09, 4.690590549 ) ( 0.1, 4.593013219 ) ( 0.11, 4.500252126 ) ( 0.12, 4.235752212 ) ( 0.13, 3.983056335 ) ( 0.14, 3.741850695 ) ( 0.15, 3.511437258 ) ( 0.16, 3.291214008 ) ( 0.17, 3.080656991 ) ( 0.18, 2.879306588 ) ( 0.19, 2.686756861 ) ( 0.2, 2.502647146 ) ( 0.21, 2.326655341 ) ( 0.22, 2.158492473 ) ( 0.23, 1.997898268 ) ( 0.24, 1.844637486 ) ( 0.25, 1.69849688 ) ( 0.26, 1.559282648 ) ( 0.27, 1.426818274 ) ( 0.28, 1.300942705 ) ( 0.29, 1.181508783 ) ( 0.3, 1.068381907 ) ( 0.31, 0.961438875 ) ( 0.32, 0.86056688 ) ( 0.33, 0.765662645 ) ( 0.34, 0.676631655 ) ( 0.35000000000000003, 0.593387502 ) ( 0.36, 0.515851297 ) ( 0.37, 0.443951166 ) ( 0.38, 0.3776218 ) ( 0.39, 0.316804063 ) ( 0.4, 0.26144465 ) ( 0.41000000000000003, 0.211495781 ) ( 0.42, 0.166914945 ) ( 0.43, 0.127664665 ) ( 0.44, 0.093712309 ) ( 0.45, 0.065029914 ) ( 0.46, 0.041594051 ) ( 0.47000000000000003, 0.023385703 ) ( 0.48, 0.010390176 ) ( 0.49, 0.002597024 ) ( 0.5, 0.0 ) }; \addplot[color=blue,style={ultra thick}] coordinates { ( 0.0, 10 ) ( 0.01, 10.0 ) ( 0.02, 10.0 ) ( 0.03, 10.0 ) ( 0.04, 10.0 ) ( 0.05, 10.0 ) ( 0.06, 10.0 ) ( 0.07, 9.511145236 ) ( 0.08, 8.967312147 ) ( 0.09, 8.452952744 ) ( 0.1, 7.965066096 ) ( 0.11, 7.501260628 ) ( 0.12, 7.059587021 ) ( 0.13, 6.638427225 ) ( 0.14, 6.236417825 ) ( 0.15, 5.852395429 ) ( 0.16, 5.48535668 ) ( 0.17, 5.134428319 ) ( 0.18, 4.798844314 ) ( 0.19, 4.477928102 ) ( 0.2, 4.171078577 ) ( 0.21, 3.877758901 ) ( 0.22, 3.597487456 ) ( 0.23, 3.329830447 ) ( 0.24, 3.074395809 ) ( 0.25, 2.830828133 ) ( 0.26, 2.598804413 ) ( 0.27, 2.378030457 ) ( 0.28, 2.168237842 ) ( 0.29, 1.969181305 ) ( 0.3, 1.780636512 ) ( 0.31, 1.602398124 ) ( 0.32, 1.434278134 ) ( 0.33, 1.276104408 ) ( 0.34, 1.127719425 ) ( 0.35000000000000003, 0.988979169 ) ( 0.36, 0.859752161 ) ( 0.37, 0.73991861 ) ( 0.38, 0.629369667 ) ( 0.39, 0.528006772 ) ( 0.4, 0.435741083 ) ( 0.41000000000000003, 0.352492969 ) ( 0.42, 0.278191574 ) ( 0.43, 0.212774442 ) ( 0.44, 0.156187182 ) ( 0.45, 0.10838319 ) ( 0.46, 0.069323418 ) ( 0.47000000000000003, 0.038976171 ) ( 0.48, 0.01731696 ) ( 0.49, 0.004328374 ) ( 0.5, 0.0 ) }; \addplot[color=cyan,dashed,style={ultra thick}] coordinates { ( 0.0, 10 ) ( 0.01, 9.596034321 ) ( 0.02, 9.292797287 ) ( 0.03, 9.028040711 ) ( 0.04, 8.788539055 ) ( 0.05, 8.568015214 ) ( 0.06, 8.362775404 ) ( 0.07, 8.170381745 ) ( 0.08, 7.989104049 ) ( 0.09, 7.817650915 ) ( 0.1, 7.655022032 ) ( 0.11, 7.500420209 ) ( 0.12, 7.059587021 ) ( 0.13, 6.638427225 ) ( 0.14, 6.236417825 ) ( 0.15, 5.852395429 ) ( 0.16, 5.48535668 ) ( 0.17, 5.134428319 ) ( 0.18, 4.798844314 ) ( 0.19, 4.477928102 ) ( 0.2, 4.171078577 ) ( 0.21, 3.877758901 ) ( 0.22, 3.597487456 ) ( 0.23, 3.329830447 ) ( 0.24, 3.074395809 ) ( 0.25, 2.830828133 ) ( 0.26, 2.598804413 ) ( 0.27, 2.378030457 ) ( 0.28, 2.168237842 ) ( 0.29, 1.969181305 ) ( 0.3, 1.780636512 ) ( 0.31, 1.602398124 ) ( 0.32, 1.434278134 ) ( 0.33, 1.276104408 ) ( 0.34, 1.127719425 ) ( 0.35000000000000003, 0.988979169 ) ( 0.36, 0.859752161 ) ( 0.37, 0.73991861 ) ( 0.38, 0.629369667 ) ( 0.39, 0.528006772 ) ( 0.4, 0.435741083 ) ( 0.41000000000000003, 0.352492969 ) ( 0.42, 0.278191574 ) ( 0.43, 0.212774442 ) ( 0.44, 0.156187182 ) ( 0.45, 0.10838319 ) ( 0.46, 0.069323418 ) ( 0.47000000000000003, 0.038976171 ) ( 0.48, 0.01731696 ) ( 0.49, 0.004328374 ) ( 0.5, 0.0 ) }; \addplot[color=magenta,style={ultra thick}] coordinates { ( 0.0, 14 ) ( 0.01, 14.0 ) ( 0.02, 14.0 ) ( 0.03, 14.0 ) ( 0.04, 14.0 ) ( 0.05, 14.0 ) ( 0.06, 14.0 ) ( 0.07, 13.31560333 ) ( 0.08, 12.55423701 ) ( 0.09, 11.83413384 ) ( 0.1, 11.15109253 ) ( 0.11, 10.50176488 ) ( 0.12, 9.883421829 ) ( 0.13, 9.293798114 ) ( 0.14, 8.730984956 ) ( 0.15, 8.193353601 ) ( 0.16, 7.679499353 ) ( 0.17, 7.188199646 ) ( 0.18, 6.71838204 ) ( 0.19, 6.269099342 ) ( 0.2, 5.839510007 ) ( 0.21, 5.428862461 ) ( 0.22, 5.036482438 ) ( 0.23, 4.661762626 ) ( 0.24, 4.304154133 ) ( 0.25, 3.963159386 ) ( 0.26, 3.638326178 ) ( 0.27, 3.32924264 ) ( 0.28, 3.035532978 ) ( 0.29, 2.756853827 ) ( 0.3, 2.492891116 ) ( 0.31, 2.243357374 ) ( 0.32, 2.007989388 ) ( 0.33, 1.786546172 ) ( 0.34, 1.578807196 ) ( 0.35000000000000003, 1.384570837 ) ( 0.36, 1.203653026 ) ( 0.37, 1.035886054 ) ( 0.38, 0.881117533 ) ( 0.39, 0.739209481 ) ( 0.4, 0.610037516 ) ( 0.41000000000000003, 0.493490156 ) ( 0.42, 0.389468204 ) ( 0.43, 0.297884219 ) ( 0.44, 0.218662054 ) ( 0.45, 0.151736466 ) ( 0.46, 0.097052785 ) ( 0.47000000000000003, 0.05456664 ) ( 0.48, 0.024243744 ) ( 0.49, 0.006059723 ) ( 0.5, 0.0 ) }; \addplot[color=orange,dashed,style={ultra thick}] coordinates { ( 0.0, 14 ) ( 0.01, 13.43444805 ) ( 0.02, 13.0099162 ) ( 0.03, 12.639257 ) ( 0.04, 12.30395468 ) ( 0.05, 11.9952213 ) ( 0.06, 11.70788557 ) ( 0.07, 11.43853444 ) ( 0.08, 11.18474567 ) ( 0.09, 10.94471128 ) ( 0.1, 10.71703084 ) ( 0.11, 10.50058829 ) ( 0.12, 9.883421829 ) ( 0.13, 9.293798114 ) ( 0.14, 8.730984956 ) ( 0.15, 8.193353601 ) ( 0.16, 7.679499353 ) ( 0.17, 7.188199646 ) ( 0.18, 6.71838204 ) ( 0.19, 6.269099342 ) ( 0.2, 5.839510007 ) ( 0.21, 5.428862461 ) ( 0.22, 5.036482438 ) ( 0.23, 4.661762626 ) ( 0.24, 4.304154133 ) ( 0.25, 3.963159386 ) ( 0.26, 3.638326178 ) ( 0.27, 3.32924264 ) ( 0.28, 3.035532978 ) ( 0.29, 2.756853827 ) ( 0.3, 2.492891116 ) ( 0.31, 2.243357374 ) ( 0.32, 2.007989388 ) ( 0.33, 1.786546172 ) ( 0.34, 1.578807196 ) ( 0.35000000000000003, 1.384570837 ) ( 0.36, 1.203653026 ) ( 0.37, 1.035886054 ) ( 0.38, 0.881117533 ) ( 0.39, 0.739209481 ) ( 0.4, 0.610037516 ) ( 0.41000000000000003, 0.493490156 ) ( 0.42, 0.389468204 ) ( 0.43, 0.297884219 ) ( 0.44, 0.218662054 ) ( 0.45, 0.151736466 ) ( 0.46, 0.097052785 ) ( 0.47000000000000003, 0.05456664 ) ( 0.48, 0.024243744 ) ( 0.49, 0.006059723 ) ( 0.5, 0.0 ) }; \legend{\small{Scenario 1, $n=3$}, \small{Scenario 2, $n=3$},\small{Scenario 1, $n=5$},\small{Scenario 2, $n=5$},\small{Scenario 1, $n=7$},\small{Scenario 2, $n=7$}} \end{axis} \end{tikzpicture} \caption{\label{fig:inf-thy-4} Capacity gaps between the second generalized networks for $n \in \{3,5,7\}$.} \end{figure} In both generalizations, we see that the intermediate node split in Example \ref{ex:info_thy} prevents edges $\{e_{1},e_{2},e_{3}\}$ from cooperating to send messages when the BSC transition probability is small: in the random noise scenario, the difference is due to a lower-capacity mixed-vulnerability edge-cut being present in the split network. This gap is mirrored by the gap in the 1-shot capacity we observe in adversarial networks with restricted adversaries, though the reason for the gap differs. In the case of a restricted adversary on these two networks who may corrupt up to one vulnerable edge (with no random noise), the Generalized Network Singleton Bound of Theorem~\ref{sbound} is achievable for the network in Scenario 1, while it is not achievable for the network in Scenario~2 (see Theorem~\ref{thm:diamond_cap}). For higher adversarial power, as for higher transition probability in the case of random noise, the two have matching capacities. In the case of adversarial noise, the difference in capacities for limited adversarial power is due to something beyond edge-cut differences, which are already baked into the Generalized Network Singleton Bound. Capacities with restricted adversaries are no longer additive, and so we must preserve the split structure by looking beyond both the Max-Flow Min-Cut Theorem and the Generalized Network Singleton Bound in order to establish improved upper bounds on capacity. \section{Networks with Two and Three Levels} \label{sec:net-2-and-3} In this section we focus on families of networks having 2 or 3 levels, a property which is defined formally below. Throughout this section, we assume basic graph theory knowledge; see e.g.~\cite{west2001introduction}. We show that one can upper bound the capacity of a special class of 3-level networks by the capacity of a corresponding 2-level network. We then define five families of 2-level networks, which will be key players of this paper and whose capacities will be computed or estimated in later sections. In Section \ref{sec:double-cut-bd} we will show how the results of this section can be ``ported'' to arbitrary networks using a general method that describes the information transfer from one edge-set to another; see in particular the Double-Cut-Set Bound of Theorem~\ref{thm:dcsb}. All of this will also allow us to compute the capacity of the network that opened the paper. \subsection{$m$-Level Networks} \begin{definition} \label{def:n-level} Let $\mN=(\mV,\mE,S,\mathbf{T})$ be a network and let $V,V' \in \mV$. We say that $V'$ \textbf{covers}~$V$ if $(V,V') \in \mE$. We call $\mN$ an \textbf{$m$-level} network if $\mV$ can be partitioned into $m+1$ sets $\mV_{0},\ldots,\mV_{m}$ such that $\mV_{0}=\{S\}$, $\mV_{m}=\mathbf{T}$, and each node in $\mV_{k}$, for $k\in \{1,...,m-1\}$, is only covered by elements of $\mV_{k+1}$ and only covers elements of $\mV_{k-1}$. We call $\mV_k$ the \textbf{$k$-th layer} of $\mN$. \end{definition} Notice that in an $m$-level network, any path from $S$ to any $T\in \mathbf{T}$ is of length $m$. Moreover, the value of $m$ and the layers $\mV_k$, for $k \in \{0,...,m\}$, in Definition~\ref{def:n-level} are uniquely determined by the network~$\mN$. Many of the results of this paper rely on particular classes of 2-level and~3-level networks, which we now define. \begin{definition} \label{def:special_3level} \label{def:special_2level} A 2-level network is \textbf{simple} if it has a single terminal. A 3-level network is \textbf{simple} if it has a single terminal, each intermediate node at distance 1 from the source has in-degree equal to~1, and each intermediate node at distance~1 from the terminal has out-degree equal to~1. \end{definition} In order to denote 2- and 3-level networks more compactly, we will utilize (simplified) adjacency matrices of the bipartite subgraphs induced by subsequent node layers. First we present the most general notation for an $m$-level network, then discuss the particular cases of~2- and~3-level networks. \begin{notation} \label{notmtx} Let $\mN_m=(\mV,\mE,S,\bfT)$ be an $m$-level network and let $\mV_0,...,\mV_m$ be as in Definition~\ref{def:n-level} (the subscript in $\mN_m$ has the sole function of stressing the number of levels). Fix an enumeration of the elements of each $\mV_k$, $k \in \{0,...,m\}$. We denote by $\smash{M^{m,k}}$ the matrix representing the graph induced by the nodes in layers $k-1$ and $k$ of $\mN_m$, for $k\in \{1,...,m\}$. Specifically, $\smash{M^{m,k}}$ has dimensions $\smash{|\mV_{k-1}|\times |\mV_{k}|}$, and $\smash{M^{m,k}_{ij}=\ell}$ if and only if there are $\ell$ edges from node $i$ of $\mV_{k-1}$ to node $j$ of $\mV_{k}$. We can then denote the network by $\smash{(M^{m,1},M^{m,2},\ldots,M^{m,m})}$. It is easy to check that $\mN_m$ is uniquely determined by this representation. In a 2-level network, we have two adjacency matrices $\smash{M^{2,1}}$ and $\smash{M^{2,2}}$, and in a 3-level network we have three adjacency matrices $\smash{M^{3,1}}$ and $\smash{M^{3,2}}$, and $\smash{M^{3,3}}$. Notice that in a simple~3-level network, $\smash{M^{3,1}}$ and $\smash{M^{3,3}}$ will always be all-ones vectors, and so we may drop them from the notation. With a slight abuse of notation, we will denote $\smash{M^{2,2}}$ as a row vector in a simple~2-level network (instead of as a column vector). \end{notation} We give two examples to illustrate the previous notation. \begin{example} \label{ex:simplified} Consider the Diamond Network of Section \ref{sec:diamond}; see Figure~\ref{fig:diamond}. Following Notation~\ref{notmtx}, we may represent this network as $\smash{([1,2],[1,1]^\top)}$. Because the network is simple, we may abuse notation and simplify this to $\smash{([1,2],[1,1])}$. Similarly, the Mirrored Diamond Network (see Figure~\ref{fig:mirrored}) may be represented as $\smash{([2,2],[1,1])}$. \end{example} \begin{example} \label{ex:Hexagon} Consider the 3-level network shown in Figure \ref{fig:Hexagon}. Following Notation~\ref{notmtx}, we may represent this network as \[ \left(\begin{bmatrix} 1 & 1 & 1 & 1 & 1 & 1 \end{bmatrix}, \begin{bmatrix} 1 & 1 & 0 & 0 \\ 1 & 1 & 0 & 0 \\ 1 & 1 & 0 & 0 \\ 1 & 1 & 0 & 0 \\ 0 & 0 & 1 & 1 \\ 0 & 0 & 1 & 1 \end{bmatrix}, \begin{bmatrix} 1 \\ 1 \\ 1 \\ 1 \end{bmatrix}\right) .\] More simply, the simple 3-level network may be represented using only the center matrix. \end{example} \begin{figure}[htbp] \centering \scalebox{0.90}{ \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace of S1] (K) {}; \node[nnode,above=1.2\mynodespace of K] (V1) {$V_1$}; \node[nnode,above=0.7\mynodespace of K] (V2) {$V_2$}; \node[nnode,above=0.3\mynodespace of K] (V3) {$V_3$}; \node[nnode,below=0\mynodespace of K] (V4) {$V_4$}; \node[nnode,below=0.4\mynodespace of K] (V5) {$V_5$}; \node[nnode,below=0.8\mynodespace of K] (V6) {$V_6$}; \node[vertex,right=3.5\mynodespace of S1] (T) {$T$}; \node[nnode,right=1.3\mynodespace of V1 ] (V7) {$V_7$}; \node[nnode,right=1.3\mynodespace of V3] (V8) {$V_8$}; \node[nnode,right=1.3\mynodespace of V4] (V9) {$V_9$}; \node[nnode,right=1.3\mynodespace of V6] (V10) {$V_{10}$}; \draw[ddedge,bend left=0] (S1) to node[sloped,fill=white, inner sep=0pt]{} (V1); \draw[ddedge,bend right=0] (S1) to node[sloped,fill=white, inner sep=0pt]{} (V2); \draw[ddedge,bend left=0] (S1) to node[sloped,fill=white, inner sep=0pt]{} (V3); \draw[ddedge,bend right=0] (S1) to node[sloped,fill=white, inner sep=0pt]{} (V4); \draw[ddedge,bend right=0] (S1) to node[sloped,fill=white, inner sep=0pt]{} (V5); \draw[ddedge,bend right=0] (S1) to node[sloped,fill=white, inner sep=0pt]{} (V6); \draw[edge,bend left=0] (V1) to node{} (V7); \draw[edge,bend left=0] (V1) to node{} (V8); \draw[edge,bend left=0] (V2) to node{} (V7); \draw[edge,bend left=0] (V2) to node{} (V8); \draw[edge,bend left=0] (V3) to node{} (V7); \draw[edge,bend left=0] (V3) to node{} (V8); \draw[edge,bend left=0] (V4) to node{} (V7); \draw[edge,bend left=0] (V4) to node{} (V8); \draw[edge,bend right=0] (V5) to node{} (V9); \draw[edge,bend right=0] (V5) to node{} (V10); \draw[edge,bend right=0] (V6) to node{} (V9); \draw[edge,bend right=0] (V6) to node{} (V10); \draw[edge,bend left=0] (V7) to node{} (T); \draw[edge,bend left=0] (V8) to node{} (T); \draw[edge,bend left=0] (V9) to node{} (T); \draw[edge,bend left=0] (V10) to node{} (T); \end{tikzpicture} } \caption{Network for Examples~\ref{ex:Hexagon} and \ref{ex:vulne}. \label{fig:Hexagon}} \end{figure} \subsection{Reduction from 3- to 2-Level Networks} \label{sec:3to2reduc} In this subsection we describe a procedure to obtain a simple 2-level network from a simple~3-level network. In Section \ref{sec:double-cut-bd} we will show that, under certain assumptions, the capacity of any network can be upper bounded by the capacity of a simple 3-level network constructed from it. Using the procedure described in this subsection, we will be able to upper bound the capacity of an arbitrary network with that of an \textit{induced} simple 2-level network (obtaining sharp bounds in some cases). Let $\mN_3$ be a simple 3-level network defined by matrix $\smash{M^{3,2}}$, along with all-ones matrices~$\smash{M^{3,1}}$~and~$\smash{M^{3,3}}$ (see Notation~\ref{notmtx}). We construct a simple 2-level network $\mN_2$, defined via~$\smash{M^{2,1}}$ and~$\smash{M^{2,2}}$ as follows. Consider the bipartite graph~$\smash{G^{3,2}}$ corresponding to adjacency matrix~$\smash{M^{3,2}}$; if~$\smash{G^{3,2}}$ has $\ell$ connected components, then let~$\smash{M^{2,1}}$ and~$\smash{M^{2,2}}$ both have dimensions~$1\times \ell$ (where we are considering the simplified representation for a simple 2-level network; see Example~\ref{ex:simplified}). Let~$\smash{M^{2,1}_{1i}=a}$ if and only if the $i$th connected component of~$\smash{G^{3,2}}$ has~$a$ vertices in~$\mV_{1}$, and let~$\smash{M^{2,2}_{1i}=b}$ if and only if the $i$th connected component of~$\smash{G^{3,2}}$ has~$b$ vertices in~$\mV_{2}$. Observe that the sum of the entries of~$\smash{M^{2,1}}$ is equal to the sum of the entries of~$\smash{M^{3,1}}$, and similarly with~$\smash{M^{2,2}}$ and~$\smash{M^{3,3}}$. \begin{definition}\label{def:associated} We call the network $\mN_2$ constructed above the 2-level network \textbf{associated} with the 3-level network $\mN_3$. \end{definition} \begin{example} Consider the network of Figure \ref{fig:Hexagon}. The corresponding 2-level network is depicted in Figure~\ref{fig:Hexagon-2level}. \end{example} \begin{figure}[htbp] \centering \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace of S1] (K) {}; \node[nnode,above=0.5\mynodespace of K] (V1) {$V_1$}; \node[nnode,below=0.5\mynodespace of K] (V2) {$V_2$}; \node[vertex,right=2\mynodespace of S1] (T) {$T$}; \draw[ddedge,bend left=10] (S1) to node[sloped,fill=white, inner sep=0pt]{$e_2$} (V1); \draw[ddedge,bend right=10] (S1) to node[sloped,fill=white, inner sep=0pt]{$e_3$} (V1); \draw[ddedge,bend left=30] (S1) to node[sloped,fill=white, inner sep=0pt]{$e_1$} (V1); \draw[ddedge,bend right=30] (S1) to node[sloped,fill=white, inner sep=0pt]{$e_4$} (V1); \draw[edge,bend right=15] (V1) to node[sloped,fill=white, inner sep=0pt]{$e_8$} (T); \draw[edge,bend left=15] (V1) to node[sloped,fill=white, inner sep=0pt]{$e_7$} (T); \draw[ddedge,bend right=15] (S1) to node[sloped,fill=white, inner sep=0pt]{$e_6$} (V2); \draw[ddedge,bend left=15] (S1) to node[sloped, fill=white, inner sep=0pt]{$e_5$} (V2); \draw[edge,bend right=15] (V2) to node[sloped,fill=white, inner sep=0pt]{$e_{10}$} (T); \draw[edge,bend left=15] (V2) to node[sloped,fill=white, inner sep=0pt]{$e_9$} (T); \end{tikzpicture} \caption{{{The simple 2-level network corresponding to the (simple, 3-level) network of Example~\ref{ex:Hexagon}.}}}\label{fig:Hexagon-2level} \end{figure} While this deterministic process results in a unique simple 2-level network given a simple~3-level network, there may be multiple 3-level networks that result in the same 2-level network. This however does not affect the following statement, which gives an upper bound for the capacity of a 3-level network in terms of the capacity of the corresponding 2-level network, when the vulnerable edges are in both cases those directly connected with the source. While the argument extends to more generalized choices of the vulnerable edges, in this paper we concentrate on this simplified scenario for ease of exposition. \begin{theorem} \label{thm:channel} Let $\mN_3$ be a simple 3-level network, and let $\mN_2$ be the simple 2-level network associated to it. Let $\mU_3$ and $\mU_2$ be the set of edges directly connected to the sources of $\mN_3$ and~$\mN_2$, respectively. Then for all network alphabets $\mA$ and for all $t \ge 0$ we have \[\CC_1(\mN_3,\mA,\mU_3,t)\leq \CC_1(\mN_2,\mA,\mU_2,t).\] \end{theorem} \begin{example} \label{ex:vulne} Consider the network of Figure \ref{fig:Hexagon} and the corresponding 2-level network in Figure~\ref{fig:Hexagon-2level}. Suppose that the vulnerable edges in both networks are those directly connected to the source, and in both cases we allow up to $t$ corrupted edges. Then Theorem~\ref{thm:channel} implies that the capacity of the network of Figure \ref{fig:Hexagon} is upper bounded by the capacity of the network of Figure \ref{fig:Hexagon-2level}. \end{example} \begin{proof}[Proof of Theorem~\ref{thm:channel}] Let $\mC_3$ be an outer code and $\mF_3$ be a network code for $(\mN_3,\mA)$ such that~$\mC_3$ is unambiguous for the channel $\Omega[\mN_3,\mA,\mF_3,S \to T,\mU_3,t]$. Let $\smash{M^{3,2}}$ be the matrix defining $\smash{\mN_3}$, and let $\smash{G^{3,2}}$ denote the bipartite graph with (simplified) adjacency matrix $\smash{M^{3,2}}$. Let $\smash{V^3_{ij}}$ denote the $j$th node in the right part of the $i$th connected component of~$\smash{G^{3,2}}$, and let $\smash{\mF_{V^3_{ij}}}$ denote the function at $\smash{V^3_{ij}}$ defined by $\smash{\mF_3}$. Let the neighborhood of $\smash{V^3_{ij}}$ in~$\smash{G^{3,2}}$ contain the (ordered) set of vertices \[\smash{V^3_{ij1},\, V^3_{ij2}, \, \ldots,\, V^3_{ij\degin(V^3_{ij})}},\] where $\smash{\degin(V^3_{ij})}$ is the in-degree of $\smash{V^3_{ij}}$. Then, for each $\smash{1\leq k\leq \degin(V^3_{ij})}$, denote the network code function at $\smash{V^3_{ijk}}$ by $\smash{\mF_{V^3_{ijk}}}$. Notice that every $\smash{\mF_{V^3_{ijk}}}$ is a function with domain $\mA$ and codomain~$\mA^{\degout(V^3_{ijk})}$, while each function $\smash{\mF_{V^3_{ij}}}$ has domain $\mA^{\degin(V^3_{ij})}$ and codomain $\mA$. Note that every node in the left part of~$\smash{G^{3,2}}$ has a label $V^{3}_{ijk}$ for some $i$ and $j$ due to assumption~\ref{prnG} of Definition~\ref{def:network}. Each such node can have multiple labels~$\smash{V^3_{ijk}}$ and~$\smash{V^3_{ij'k'}}$ where $(j,k)\neq (j',k')$; of course, we stipulate that $$\mF_{V^3_{ijk}}=\mF_{V^3_{ij'k'}}.$$ We claim there exists $\mF_2$ such that $\mC_3$ is also unambiguous for $\Omega[\mN_2,\mA,\mF_2,S \to T,\mU_2,t]$. Indeed, define $\mF_2$ for each intermediate node $V_i$ in $\mN_2$ that corresponds to connected component~$i$ of~$G^{3,2}$ as an appropriate composition of functions at nodes in $\mV_1$ and $\mV_2$ of the 3-level network. More technically, we define $\mF_{V_i}$ as follows (here the product symbols denote the Cartesian product): \begin{align*} \mF_{V_i} : \mA^{\degin(V_i)} &\to \mA^{\degout(V_i)},\\ x&\mapsto \prod_{j=1}^{\degout(V_i)} \mF_{V^3_{ij}}\left(\prod_{k=1}^{\degin(V^3_{ij})} \mF_{V^3_{ijk}}(x_{ijk})\vert_{V_{ij}^3}\right), \end{align*} where $x_{ijk}$ is the coordinate of the vector $x$ corresponding to node $\smash{V^{3}_{ijk}}$ in the left part of the~$i$th connected component of $\smash{G^{3,2}}$, and $\smash{\mF_{V^3_{ijk}}(x_{ijk})\vert_{V_{ij}^3}}$ is the restriction of $\smash{\mF_{V^3_{ijk}}(x_{ijk})}$ to the coordinate corresponding to $\smash{V_{ij}^3}$. We claim that the fan-out set of any $x\in \mC_3$ over the channel $\Omega[\mN_2,\mA,\mF_2,S \to T,\mU_2,t]$ is exactly equal to the fan-out set of $x$ over the channel $\Omega[\mN_3,\mA,\mF_3,S \to T,\mU_3,t]$. This follows directly from the definitions of $\mF_2$ and $\mF_3$, and the fact that both networks are corrupted in up to $t$ positions from their first layers. Suppose then, by way of contradiction, that $\mC_3$ is not unambiguous for $\Omega[\mN_2,\mA,\mF_2,S \to T,\mU_2,t]$. That is, there exist $x,x'\in \mC_3$ such that~$x\neq x'$ but the intersection of the fan-out sets of $x$ and $x'$ is nonempty. Then $\mC_3$ was not unambiguous for $\Omega[\mN_3,\mA,\mF_3,S \to T,\mU_3,t]$ to begin with. We conclude that $\mC_3$ is unambiguous for~$\Omega[\mN_2,\mA,\mF_2,S \to T,\mU_2,t]$. \end{proof} The proof above contains rather heavy notation and terminology. We therefore illustrate it with an example. \begin{figure}[htbp] \centering \scalebox{0.90}{ \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace of S1] (K) {}; \node[nnode,above=1.2\mynodespace of K] (V1) {$V_{111}^3$}; \node[nnode,above=0.7\mynodespace of K] (V2) {$V_{112}^3$}; \node[nnode,above=0.3\mynodespace of K] (V3) {$V_{113}^3$}; \node[nnode,below=0\mynodespace of K] (V4) {$V_{114}^3$}; \node[nnode,below=0.4\mynodespace of K] (V5) {$V_{211}^3$}; \node[nnode,below=0.8\mynodespace of K] (V6) {$V_{212}^3$}; \node[vertex,right=3.5\mynodespace of S1] (T) {$T$}; \node[nnode,right=1.3\mynodespace of V1 ] (V7) {$V_{11}^3$}; \node[nnode,right=1.3\mynodespace of V3] (V8) {$V_{12}^3$}; \node[nnode,right=1.3\mynodespace of V4] (V9) {$V_{21}^3$}; \node[nnode,right=1.3\mynodespace of V6] (V10) {$V_{22}^3$}; \draw[ddedge,bend left=0] (S1) to node[sloped,fill=white, inner sep=1pt]{$x_{111}$} (V1); \draw[ddedge,bend right=0] (S1) to node[sloped,fill=white, inner sep=1pt]{$x_{112}$} (V2); \draw[ddedge,bend left=0] (S1) to node[sloped,fill=white, inner sep=1pt]{$x_{113}$} (V3); \draw[ddedge,bend right=0] (S1) to node[sloped,fill=white, inner sep=1pt]{$x_{114}$} (V4); \draw[ddedge,bend right=0] (S1) to node[sloped,fill=white, inner sep=1pt]{$x_{211}$} (V5); \draw[ddedge,bend right=0] (S1) to node[sloped,fill=white, inner sep=1pt]{$x_{212}$} (V6); \draw[edge,bend left=0] (V1) to node{} (V7); \draw[edge,bend left=0] (V1) to node{} (V8); \draw[edge,bend left=0] (V2) to node{} (V7); \draw[edge,bend left=0] (V2) to node{} (V8); \draw[edge,bend left=0] (V3) to node{} (V7); \draw[edge,bend left=0] (V3) to node{} (V8); \draw[edge,bend left=0] (V4) to node{} (V7); \draw[edge,bend left=0] (V4) to node{} (V8); \draw[edge,bend right=0] (V5) to node{} (V9); \draw[edge,bend right=0] (V5) to node{} (V10); \draw[edge,bend right=0] (V6) to node{} (V9); \draw[edge,bend right=0] (V6) to node{} (V10); \draw[edge,bend left=0] (V7) to node{} (T); \draw[edge,bend left=0] (V8) to node{} (T); \draw[edge,bend left=0] (V9) to node{} (T); \draw[edge,bend left=0] (V10) to node{} (T); \end{tikzpicture} } \caption{Network for Example \ref{ex:hex}. \label{fig:Hex}} \end{figure} \begin{example} \label{ex:hex} Consider the labeling of vertices in Figure \ref{fig:Hex} of Network $\mN_3$. Suppose the capacity is achieved by a network code $$\{\mF_{V_{111}^3}, \, \mF_{V_{112}^3}, \, \mF_{V_{113}^3}, \, \mF_{V_{114}^3}, \, \mF_{V_{211}^3}, \, \mF_{V_{211}^3}, \, \mF_{V_{11}^3}, \, \mF_{V_{12}^3}, \, \mF_{V_{21}^3}, \, \mF_{V_{22}^3}\}$$ for $(\mN_3,\mA)$ and by an outer code $\mC_3\subseteq \mA^6$ unambiguous for the channel~$\Omega[\mN_3,\mA,\mF_3,S \to T,\mU_3,t]$. Let $x=(x_{111},x_{112},x_{113},x_{114},x_{211},x_{212}) \in \mC_3$ and consider the scheme in Figure \ref{fig:Hex}. The way functions $\mF_{V_1}$ and $\mF_{V_2}$ are defined in the proof of Theorem~\ref{thm:channel} gives that the alphabet symbols \begin{gather*} \mF_{V_{11}^3}\big(\mF_{V_{111}^3}(x_{111})\vert_{V_{11}^3},\mF_{V_{112}^3}(x_{112})\vert_{V_{11}^3},\mF_{V_{113}^3}(x_{113})\vert_{V_{11}^3},\mF_{V_{114}^3}(x_{114})\vert_{V_{11}^3}\big), \\ \mF_{V_{12}^3}\big(\mF_{V_{111}^3}(x_{111})\vert_{V_{12}^3},\mF_{V_{112}^3}(x_{112})\vert_{V_{12}^3},\mF_{V_{113}^3}(x_{113})\vert_{V_{12}^3},\mF_{V_{114}^3}(x_{114})\vert_{V_{12}^3}\big), \\ \mF_{V_{21}^3}\big(\mF_{V_{211}^3}(x_{211})\vert_{V_{21}^3},\mF_{V_{212}^3}(x_{212})\vert_{V_{21}^3}\big), \mbox{ and } \\ \mF_{V_{22}^3}\big(\mF_{V_{211}^3}(x_{211})\vert_{V_{22}^3},\mF_{V_{212}^3}(x_{212})\vert_{V_{22}^3}\big) \end{gather*} are carried over the edges $e_7$, $e_8$, $e_9$ and $e_{10}$ respectively in Figure \ref{fig:Hexagon-2level}. Observe that the the fan-out set of any $x\in \mC_3$ over the channel $\Omega[\mN_2,\mA,\mF_2,S \to T,\mU_2,t]$ is exactly equal to the fan-out set of $x$ over the channel $\Omega[\mN_3,\mA,\mF_3,S \to T,\mU_3,t]$, as desired. \end{example} \subsection{Some Families of Simple 2-Level Networks} \label{sec:families} In this section, we introduce five families of simple 2-level networks. Thanks to Theorem~\ref{thm:channel}, any upper bound for the capacities of these translates into an upper bound for the capacities of a 3-level networks associated with them; see Definition~\ref{def:associated}. The five families of networks introduced in this subsection should be regarded as the ``building blocks'' of the theory developed in this paper, since in Section~\ref{sec:double-cut-bd} we will argue how to use them to obtain upper bounds for the capacities of larger networks. We focus our attention on the scenario where the adversary acts on the edges directly connected to the source $S$, which we denote by $\mU_S$ throughout this section. The families we introduce will be studied in detail in later sections, but are collected here for preparation and ease of reference. Each family is parametrized by a positive integer (denoted by $t$ or $s$ for reasons that will become clear below). \begin{family} \label{fam:a} Define the simple 2-level networks $$\mathfrak{A}_t=([t,2t],[t,t]), \quad t \ge 1.$$ Note that they reduce to the Diamond Network of Section~\ref{sec:diamond} for $t=1$. The Generalized Network Singleton Bound of Theorem~\ref{sbound} reads $\CC_1(\mathfrak{A}_t,\mA,\mU_S,t) \le t$ for any alphabet $\mA$. Results related to this family can be found in Theorem \ref{thm:meta} and Proposition \ref{prop:atleasta}. \end{family} \begin{family} \label{ex:s} \label{fam:b} Define the simple 2-level networks $$\mathfrak{B}_s=([1,s+1],[1,s]), \quad s \ge 1.$$ The case where $s=1$ yields the Diamond Network of Section~\ref{sec:diamond}. The Generalized Network Singleton Bound of Theorem~\ref{sbound} for $t=1$ reads $\CC_1(\mathfrak{B}_s,\mA,\mU_S,1) \le s$ for any alphabet $\mA$. Note that for this family we will always take $t=1$, which explains our choice of using a different index,~$s$, for the family members. Results related to this family can be found in Theorem \ref{thm:notmet} and Corollary \ref{cor:sbs}. \end{family} \begin{family} \label{ex:u} \label{fam:c} Define the simple 2-level networks $$\mathfrak{C}_t=([t,t+1],[t,t]), \quad t \ge 2.$$ The case $t=1$ is covered in $\mathfrak{A}_1$ of Family \ref{fam:a} and thus formally excluded here for a reason we will explain in Remark \ref{rem:exclude}. The Generalized Network Singleton Bound of Theorem~\ref{sbound} reads $\CC_1(\mathfrak{C}_t,\mA,\mU_S,t) \le 1$ for any alphabet $\mA$. Our result related to this family can be found in Theorem \ref{thm:metc}. \end{family} \begin{family} \label{fam:d} Define the simple 2-level networks $$\mathfrak{D}_t=([2t,2t],[1,1]), \quad t \ge 1.$$ The case where $t=1$ yields the Mirrored Diamond Network of Section~\ref{sec:diamond}. The Generalized Network Singleton Bound of Theorem~\ref{sbound} reads $\CC_1(\mathfrak{D}_t,\mA,\mU_S,t) \le 1$ for any alphabet $\mA$. Results related to this family can be found in Theorems \ref{thm:metd} and \ref{thm:linmirr}. \end{family} \begin{family} \label{fam:e} Define the simple 2-level networks $$\mathfrak{E}_t=([t,t+1],[1,1]), \quad t \ge 1.$$ The case where $t=1$ yields the Diamond Network of Section~\ref{sec:diamond}. The Generalized Network Singleton Bound of Theorem~\ref{sbound} reads $\CC_1(\mathfrak{E}_t,\mA,\mU_S,t) \le 1$ for any alphabet $\mA$. Results related to this family can be found in Theorems \ref{thm:mete} and \ref{thm:8.4}. \end{family} As we will see, the results of this section and of the next show that the Generalized Network Singleton Bound of Theorem~\ref{sbound} is \textit{never} sharp for Families~\ref{fam:a},~\ref{fam:b}, and~\ref{fam:e}, \textit{no matter} what the alphabet is. The bound is, however, sharp for Families~\ref{fam:c} and~\ref{fam:d} under the assumption that the alphabet is a sufficiently large finite field, as we will show in Section~\ref{sec:2level_lower}. \section{Simple 2-level Networks: Upper Bounds} \label{sec:upper} In this section we present upper bounds on the capacity of simple 2-level networks based on a variety of techniques. We then apply these bounds to the families introduced in Subsection~\ref{sec:families}. We start by establishing the notation that we will follow in the sequel. \begin{notation} \label{not:s5} Throughout this section, $n \ge 2$ is an integer and $$\mN=(\mV,\mE,S,\{T\})=([a_1,\ldots,a_n],[b_1,\ldots,b_n])$$ is a simple 2-level network; see Definition~\ref{def:special_2level}. We denote by $\mU_S \subseteq \mE$ the set of edges directly connected to the source $S$ and let $\mA$ be a network alphabet. We denote the intermediate nodes of $\mN$ by $V_1,\ldots,V_n$, which correspond to the structural parameters $a_1, \ldots, a_n$ and $b_1, \ldots, b_n$. If~$\mF$ is a network code for $(\mN,\mA)$, then we simply write $\mF_i$ for $\smash{\mF_{V_i}}$. Observe moreover that the network code $\mF$ can be ``globally'' interpreted as a function $$\mF: \mA^{a_1+\ldots +a_n} \to \mA^{b_1+\ldots +b_n},$$ although of course allowed functions $\mF$ are restricted by the topology of the underlying 2-level network. \end{notation} We start by spelling out the Generalized Network Singleton Bound of Theorem~\ref{sbound} specifically for simple 2-level networks. \begin{corollary}[Generalized Network Singleton Bound for simple 2-level networks] \label{cor:sing} Following Notation~\ref{not:s5}, for all $t \ge 0$ we have \[\CC_1(\mN,\mA,\mU_S,t) \leq \min_{P_1\sqcup P_2=\{1,\ldots,n\}}\left(\sum_{i\in P_1} b_i+\max\left\{0,\sum_{i\in P_2} a_i - 2t\right\}\right),\] where the minimum is taken over all 2-partitions $P_1, P_2$ of the set $\{1,\ldots,n\}$. \end{corollary} The upper bounds we derive in this section use a ``mix'' of projection and packing arguments. We therefore continue by reminding the reader of the notions of the Hamming metric \textit{ball} and~\textit{shell}. \begin{notation} \label{not:ballsetc} Given an outer code $\smash{\mC \subseteq \mA^{a_1+a_2+\ldots+a_n}}$, we let $\smash{\pi_i(\mC)}$ be the projection of $\mC$ onto the $a_i$ coordinates corresponding to the edges to intermediate node $V_{i}$ of the simple 2-level network $\mN$. For example, $\smash{\pi_1(\mC)}$ is the projection onto the first $a_1$ coordinates of the codeword. Moreover, for a given~$x \in \mC$, we denote by $B^\HH_t(x)$ the \textbf{Hamming ball} of radius $t$ with center~$x$, and by $S^\HH_t(x)$ the \textbf{shell} of that ball. In symbols, $$B^\HH_t(x)=\{y \in \mA^{a_1+a_2+\ldots+a_n} \st d^\HH(x,y) \le t\}, \qquad S^\HH_t(x)=\{y \in B^\HH_t(x) \st d^\HH(x,y) = t\},$$ where $d^\HH$ denotes the usual Hamming distance. \end{notation} The next observation focuses on the case $n=2$ to illustrate an idea that will be generalized immediately after in Remark \ref{rem:id} below. \begin{remark} \label{rem:packing} If $n=2$, then for all $t \ge 0$ an outer code $\mC \subseteq \mA^{a_1+a_2}$ is unambiguous for the channel $\Omega[\mN,\mA,\mF,S\to T,\mU_S,t]$ if and only if $$\mF(x+e)=(\mF_1(\pi_1(x+e)), \mF_2(\pi_2(x+e))) \neq (\mF'_1(\pi_1(x'+e')), \mF_2(\pi_2(x'+e'))) =\mF(x'+e')$$ for all $x,x' \in \mC$ with $x \neq x'$ and for all $e,e' \in \mA^{a_1+a_2}$ of Hamming weight at most $t$. Therefore via a packing argument we obtain \begin{equation} \label{eqn:packing_2} \sum_{x \in \mC} |\mF\left(B^\HH_t(x)\right)| \le {|\mA|}^{b_1+b_2}. \end{equation} \end{remark} We will work towards extending the packing bound idea outlined in Remark~\ref{rem:packing} to higher numbers of intermediate nodes using the properties of simple 2-level networks. We start with the following result. \begin{lemma} \label{lem:id} Following Notation~\ref{not:s5}, suppose $n=2$ and $b_1 \ge a_1$. Let $t \ge 0$ and suppose that~$\mC \subseteq \mA^{a_1+a_2}$ is unambiguous for the channel $\Omega[\mN,\mA,\mF,S\to T,\mU_S,t]$. Let $\mF'_1:\mA^{a_1} \to \mA^{b_1}$ be any injective map. Then $\mC$ is unambiguous for the channel $\Omega[\mN,\mA,\{\mF_1',\mF_2\},S\to T,\mU_S,t]$ as well. \end{lemma} \begin{proof} Let $\mF=\{\mF_1,\mF_2\}$ and $\mF'=\{\mF'_1,\mF_2\}$. Towards a contradiction and using Remark~\ref{rem:packing}, suppose that there are $x,x' \in \mC$ with $x \neq x'$ and $e,e' \in \mA^{a_1+a_2}$ of Hamming weight at most $t$ with~$\mF'(x+e)=\mF'(x'+e')$. This implies $$(\mF'_1(\pi_1(x+e)), \mF_2(\pi_2(x+e)))= (\mF'_1(\pi_1(x'+e')), \mF_2(\pi_2(x'+e'))).$$ Since $\mF'_1$ is injective, we have $\pi_1(x+e) = \pi_1(x'+e')$ and thus $$(\mF_1(\pi_1(x+e)), \mF_2(\pi_2(x+e)))= (\mF_1(\pi_1(x'+e')), \mF_2(\pi_2(x'+e'))).$$ That is, $\mF(x+e)=\mF(x'+e')$, a contradiction. \end{proof} \begin{remark} \label{rem:id} Lemma \ref{lem:id} extends easily to an arbitrary number of intermediate nodes, showing that whenever~$b_i \ge a_i$ for some $i$, without loss of generality we can assume $b_i=a_i$ and take the corresponding function $\mF_i$ to be the identity (ignoring extraneous outgoing edges). In other words, the capacity obtained by maximizing over all possible choices of $\mF$ is the same as the capacity obtained by maximizing over the $\mF$'s where $\mF_i$ is equal to the identity whenever $b_i \ge a_i$ (again, where some edges can be disregarded). This will simplify the analysis of the network families we study. In particular, we can reduce the study of a simple 2-level network~$\mN=([a_1,...,a_n],[b_1,...,b_n])$ as in Notation~\ref{not:s5} to the study of $$\mN'=([a_1,\ldots,a_r,a_{r+1},\ldots,a_n],[a_1,\ldots,a_r,b_{r+1},\ldots,b_n]),$$ where, up to a permutation of the vertices and edges, \begin{equation*} r=\max\{i \st a_i \le b_i \mbox{ for all } 1 \le i \le r\}. \end{equation*} \end{remark} The next result combines the observation of Remark~\ref{rem:id} with a packing argument to derive an upper bound on the capacity of certain simple 2-level networks. \begin{theorem}[First Packing Bound] \label{thm:down} Following Notation~\ref{not:s5}, suppose that $a_i \le b_i$ for all $1\leq i \leq r$. Let $t \ge 0$ and let $\mC$ be an unambigious code for $\Omega[\mN,\mA,\mF,S \to T,\mU_S,t]$. Then $$\sum_{\substack{t_1,\ldots,t_r \ge 0 \\ t_1+\ldots+t_r \le t}} \, \prod_{i=1}^r\binom{a_i}{t_i}(|\mA|-1)^{t_i} \, \sum_{x \in \mC} \, \prod_{j=r+1}^n \left|\mF_j\left(B^\HH_{t-(t_1+\ldots+t_r)}(\pi_j(x))\right)\right| \le |\mA|^{b_1+b_2+\ldots +b_n}.$$ \end{theorem} \begin{proof} Let $\mF=\{\mF_1,\ldots,\mF_r,\ldots,\mF_n\}$ be a network code for $(\mN,\mA)$, where the first $r$ functions correspond to the pairs~$(a_1,b_1),\ldots,(a_r,b_r)$. By Lemma \ref{lem:id}, we shall assume without loss of generality that $\mF_i$ is an injective map for $1 \le i \le r.$ By Remark \ref{rem:id}, we can assume them to be identity by ignoring the extraneous outgoing edges. For $x \in \mC$, we have $$B^\HH_t(x) = \bigsqcup_{t_1+\ldots +t_n \le t} \left[ S^\HH_{t_1}(\pi_1(x)) \times \cdots \times S^\HH_{t_n}(\pi_n(x)) \right],$$ where $\sqcup$ emphasizes that the union is disjoint. Then, {\small \begin{align*} \mF(B^\HH_t(x)) &= \bigcup_{t_1+\ldots +t_n \le t} \mF \left[S^\HH_{t_1}(\pi_1(x)) \times \cdots \times S^\HH_{t_n}(\pi_n(x)) \right]\\ &= \bigcup_{t_1+\ldots +t_n \le t} \left[S^\HH_{t_1}(\pi_1(x)) \times \cdots \times S^\HH_{t_r}(\pi_r(x)) \times \mF_{r+1}(S^\HH_{t_{r+1}}(\pi_{r+1}(x)) \times \cdots \times \mF_{n}(S^\HH_{t_{n}}(\pi_{n}(x)) \right]\\ &= \bigcup_{t_1 + \ldots +t_r \le t} \ \bigcup_{t_{r+1}+\ldots+t_n\le t - (t_1 + \ldots +t_r)} \Bigl[ S^\HH_{t_1}(\pi_1(x)) \times \cdots \times S^\HH_{t_r}(\pi_r(x)) \, \times \\ & \qquad \qquad \qquad \times \mF_{r+1}(S^\HH_{t_{r+1}}(\pi_{r+1}(x)) \times \cdots \times \mF_{n}(S^\HH_{t_{n}}(\pi_{n}(x))\Bigr]. \end{align*} } The first union in the last equality is disjoint due to~$\mF_1,\ldots,\mF_r$ being the identity and the fact that we are considering the shells. So, {\small \begin{align*} \mF(B^\HH_t(x)) &= \bigsqcup_{t_1+\ldots+t_r \le t} \Bigl[S^\HH_{t_1}(\pi_1(x)) \times \cdots \times S^\HH_{t_r}(\pi_r(x)) \times \\ & \qquad \qquad \qquad \bigcup_{t_{r+1}+\ldots+t_n \le t - (t_1 + \ldots + t_r)}\biggl( \mF_{r+1}(S^\HH_{t_{r+1}}(\pi_{r+1}(x)) \times \cdots \times \mF_{n}(S^\HH_{t_{n}}(\pi_{n}(x))\biggr)\Bigr]. \end{align*} } By taking cardinalities in the previous identity we obtain \[ |\mF(B^\HH_t(x))|= \sum_{ t_1 + \ldots +t_r \le t} \, \prod_{i=1}^r\binom{a_i}{t_i}(|\mA|-1)^{t_i} \prod_{j=r+1}^n |\mF_j(B^\HH_{t-(t_1 + \ldots + t_r)}(\pi_j(x)))|,\] where the terms in the second product come from considering that all shells up to the shell of radius $t-(t_1 + \ldots + t_r)$ will be included for each projection. Summing over $x \in \mC$, exchanging summations, and using the same argument as in \eqref{eqn:packing_2}, we obtain the desired result. \end{proof} In this paper, we are mainly interested in the case $(n,r)=(2,1)$ of the previous result. For convenience of the reader, we state this as a corollary of Theorem~\ref{thm:down}. \begin{corollary} \label{cor:ub} Following Notation~\ref{not:s5}, suppose $n=2$ and $a_1\le b_1$. Let $t \ge 1$ and let $\mC$ be an unambiguous code for the channel $\Omega[\mN,\mA,\mF,S \to T,\mU_S,t]$. Then $$\sum_{t_1=0}^t \binom{a_1}{t_1}(|\mA|-1)^{t_1} \sum_{x \in \mC} \left|\mF_2\left(B^\HH_{t-t_1}(\pi_2(x)))\right)\right| \le |\mA|^{b_1+b_2}.$$ \end{corollary} We next apply the bound of Theorem~\ref{thm:down} (in the form of Corollary~\ref{cor:ub}) to some of the network families introduced in Subsection~\ref{sec:families}. With some additional information, Theorem~\ref{thm:down} gives us that the Generalized Network Singleton Bound of Corollary~\ref{cor:sing} is in general not achievable, no matter what the network alphabet~$\mA$~is. This is in strong contrast with what is commonly observed in classical coding theory, where the Singleton bound is always the sharpest (and in fact sharp) when the alphabet is sufficiently large. \begin{theorem} \label{thm:notmet} Let $\mathfrak{B}_s=(\mV,\mE,S,\{T\})$ be a member of Family~\ref{ex:s}. Let $\mA$ be any network alphabet and let $\mU_S$ be the set of edges of $\mathfrak{B}_s$ directly connected to $S$. We have $$\CC_1(\mathfrak{B}_s,\mA,\mU_S,1) <s.$$ In particular, the Generalized Network Singleton Bound of Corollary~\ref{cor:sing} is not met. \end{theorem} \begin{proof} Let $\mF$ be a network code for the pair $(\mathfrak{B}_s,\mA)$ and let $q:=|\mA|$ to simplify the notation. Suppose that $\mC$ is an unambiguous code for the channel $\Omega[\mathfrak{B}_s,\mA,\mF,S \to T,\mU_S,1]$. We want to show that $|\mC|<q^s$. Corollary \ref{cor:ub} gives \begin{equation} \label{touse} (q-1) \cdot |\mC| + \sum_{x \in \mC} \left|\mF_2\left(B^\HH_1(\pi_2(x))\right)\right| \le q^{s+1}. \end{equation} Suppose towards a contradiction that $|\mC|=q^s$. We claim that there exists a codeword $x \in \mC$ for which the cardinality of $$\left\{\mF_2(\pi_2(z)) \st \dH(\pi_2(x),\pi_2(z))\leq 1,\ z\in \mA^{s+2}\right\}$$ is at least 2. To see this, we start by observing that $\mF_2$ restricted to ${\pi_2(\mC)}$ must be injective, as otherwise the fan-out sets of at least two codewords would intersect non-trivially, making~$\mC$ ambiguous for the channel $\Omega[\mathfrak{B}_s,\mA,\mF,S \to T,\mU_S,1]$. Therefore it is now enough to show that~$B^\HH_1(\pi_2(x)) \cap B^\HH_1(\pi_2(y)) \neq \emptyset$ for some distinct $x,y\in\mC$, which would imply that the cardinality of one among $$\left\{\mF_2(\pi_2(z)) \st \dH(\pi_2(x),\pi_2(z))\leq 1,\ z\in \mA^{s+2}\right\}, \ \left\{\mF_2(\pi_2(z)) \st \dH(\pi_2(y),\pi_2(z))\leq 1,\ z\in \mA^{s+2}\right\}$$ is at least 2, since $\mF_2(\pi_2(x)) \neq \mF_2(\pi_2(y))$. Observe that $|B^\HH_1(\pi_2(x))| = (s+1)(q-1)+1$ for any $x \in \mC,$ and that $$\sum_{x \in \mC} |B^\HH_1(\pi_2(x))| = q^s(s+1)(q-1)+q^s = q^{s+1}(s+1) - sq^s > q^{s+1}= |\mA^{a_2}|,$$ where we use the fact that $q > 1$. If $B^\HH_1(\pi_2(x)) \cap B^\HH_1(\pi_2(y)) = \emptyset$ for all distinct $x,y\in\mC$, then~$\sum_{x \in \mC} |B^\HH_1(\pi_2(x))| \le |\mA^{a_2}|$, a contradiction. Therefore $$B^\HH_1(\pi_2(x)) \cap B^\HH_1(\pi_2(y)) \neq \emptyset,$$ proving our claim. We finally combine the said claim with the inequality in~\eqref{touse}, obtaining $$q|\mC|+1=(q-1)|\mC| + 2 +(|\mC|-1) \le (q-1) \, |\mC| + \sum_{x \in \mC} |\mF_2(B^\HH_1(\pi_2(x)))| \le q^{s+1}.$$ In particular, $|\mC| < q^s$, establishing the theorem. \end{proof} \begin{remark} While we cannot compute the exact capacity of the networks of Family~\ref{fam:b}, preliminary experimental results and case-by-case analyses seem to indicate that \begin{equation} \label{ourc} \CC_1(\mathfrak{B}_s,\mA,\mU_S,1) = \log_{|\mA|} \left(\frac{|\mA|^s + |\mA|}{2}-1\right), \end{equation} which would be consistent with Corollary \ref{cor:sbs}. At the time of writing this paper proving (or disproving) the equality in~\eqref{ourc} remains an open problem. \end{remark} Notice that Theorem \ref{thm:down} uses a packing argument ``downstream'' of the intermediate nodes in a simple 2-level network. Next, we work toward an upper bound on capacity that utilizes a packing argument ``upstream'' of the intermediate nodes. Later we will show that the packing argument ``upstream'' acts similarly to the Hamming Bound from classical coding theory and can sometimes give better results in the networking context. We start with the following lemma, which will be the starting point for the Second Packing Bound below. \begin{lemma} \label{lem:goodup} We follow Notation~\ref{not:s5} and let $t \ge 0$ be an integer. Let $\mC$ be an outer code for $(\mN,\mA)$. Then $\mC$ is unambiguous for the channel $\Omega[\mN,\mA,\mF,S\to T,\mU_S,t]$ if and only if~$\mF^{-1}(\mF(B^\HH_t(x))) \cap \mF^{-1}(\mF(B^\HH_t(x'))) = \emptyset$ for all distinct $x,x' \in \mC$. \end{lemma} \begin{proof} Suppose $\mF^{-1}(\mF(B^\HH_t(x))) \cap \mF^{-1}(\mF(B^\HH_t(x'))) \neq \emptyset$ for some distinct $x,x'\in\mC$, and let $y$ lie in that intersection. This implies $$\mF(y) \in \mF(B^\HH_t(x)) \cap \mF(B^\HH_t(x')) = \Omega(x) \cap \Omega(x').$$ In other words, $\mC$ is not unambiguous for the channel $\Omega[\mN,\mA,\mF,S\to T,\mU_S,t]$. For the other direction, it is straightforward to see that disjointness of the preimages implies disjointness of the fan-out sets. \end{proof} Following the notation of Lemma \ref{lem:goodup}, for $n=2$ and an unambiguous $\mC$ for the channel~$\Omega[\mN,\mA,\mF,S\to T,\mU_S,t]$ we obtain the ``packing'' result $$\sum_{x \in \mC} |\mF^{-1}(\mF(B^\HH_t(x)))| \le |\mA|^{a_1 + a_2}.$$ By extending this idea to more than two intermediate nodes, one obtains the following upper bound. \begin{theorem}[Second Packing Bound] \label{thm:up} Following Notation~\ref{not:s5}, suppose that $a_i \le b_i$ for $1\leq i \leq r$. Let $t \ge 0$ and let $\mC$ be an unambigious code for $\Omega[\mN,\mA,\mF,S \to T,\mU_S,t]$. Then $$\sum_{\substack{t_1,\ldots,t_r \ge 0 \\ t_1+\ldots+t_r \le t}}\prod_{i=1}^r\binom{a_i}{t_i}(|\mA|-1)^{t_i} \sum_{x \in \mC} \prod_{j=r+1}^n |\mF_j^{-1}(\mF_j(B^\HH_{t-(t_1+\ldots+t_r)}(\pi_j(x))))| \le |\mA|^{a_1+a_2+\cdots a_n}.$$ \end{theorem} The proof of the previous result follows from similar steps to those in the proof of Theorem~\ref{thm:down}, and we omit it here. As we did for the case of Theorem~\ref{thm:down}, we also spell out the case~$(n,r)=(2,1)$ for Theorem \ref{thm:up}. \begin{corollary} \label{down_n2} Following Notation~\ref{not:s5}, suppose $n=2$ and $a_1\le b_1$. Let $t \ge 1$ and let $\mC$ be an unambiguous code for the channel $\Omega[\mN,\mA,\mF,S \to T,\mU_S,t]$. Then $$\sum_{t_1=0}^t \binom{a_1}{t_1}(|\mA|-1)^{t_1} \sum_{x \in \mC} |\mF_2^{-1}(\mF_2(B^\HH_{t-t_1}(\pi_2(x))))| \le |\mA|^{a_1 + a_2}.$$ \end{corollary} \begin{remark} Although Theorem~\ref{thm:up} acts similarly to the Hamming Bound from classical coding theory, we find it significantly more difficult to apply than our Theorem~\ref{thm:down} in the networking context. The reason behind this lies in the fact that, in general, \begin{equation} \label{nono}\mF^{-1}(\mF(B^\HH_t(x))) \neq \mF_1^{-1}(\mF_1(\pi_1(B^\HH_t(x)))) \times \mF_2^{-1}(\mF_2(\pi_2(B^\HH_t(x)))). \end{equation} This makes it challenging to evaluate the quantities in the statement of Theorem~\ref{thm:up}, even in the case $n=2$ (Corollary~\ref{down_n2}). We give an example to illustrate the inequality in~\eqref{nono}. Consider the Diamond Network $\mathfrak{A}_1$ with $\mA=\F_3$; see Section~\ref{sec:diamond} and Subsection~\ref{sec:families}. We know from Theorem~\ref{thm:diamond_cap} that its capacity is $\log_3 2$. A capacity-achieving pair $(\mF,\mC)$ is given by~$\mC=\{(1,1,1),(2,2,2)\}$ and $\mF=\{\mF_1,\mF_2\}$, where $\mF_1(a) = a$ for all $a \in \mA$ and $$\mF_2(x,y)= \begin{cases} 1 & \text{if} \ \ x = y = 1 \\ 2 & \text{if} \ \ x = y = 2 \\ 0 & \text{otherwise.} \\ \end{cases}$$ However, \begin{align*} \mF^{-1}(\mF(B^\HH_1((1,1,1)))) &=\mF^{-1}(\{(0,1),(1,1),(2,1),(1,0)\}) \\ &\neq \mF_1^{-1}(\{0,1,2\}) \times \mF_2^{-1}(\{0,1\})\\ &=\mF_1^{-1}(\mF_1(B^\HH_1(1)))\times \mF_2^{-1}(\mF_2(B^\HH_1(1,1))). \end{align*} \end{remark} We next present an upper bound on the capacity of the networks of Family~\ref{fam:e}, showing that the Generalized Network Singleton Bound of Corollary~\ref{cor:sing} is not met for this family, no matter what the alphabet size is. Notice that the number of indices $i$ for which $a_{i}\leq b_{i}$ for a network of Family~\ref{fam:e} is 0 whenever~$t>1$. In particular, the strategy behind the proofs of Theorems~\ref{thm:down} and~\ref{thm:up} is of no help in this case. \begin{theorem} \label{thm:mete} Let $\mathfrak{E}_t=(\mV,\mE,S,\{T\})$ be a member of Family~\ref{fam:e}. Let $\mA$ be any network alphabet and let $\mU_S$ be the set of edges of $\mathfrak{E}_t$ directly connected to $S$. We have $$\CC_1(\mathfrak{E}_t,\mA,\mU_S,t) <1.$$ In particular, the Generalized Network Singleton Bound of Corollary~\ref{cor:sing} is not met. \end{theorem} \begin{proof} Let $q:=|\mA|$. Suppose by way of contradiction that there exists an unambiguous code~$\mC$ of size $q$ for $\Omega[\mathfrak{E}_{t},\mA,\mF,S \to T,\mU_S,t]$, for some network code $\mF=\{\mF_{1},\mF_{2}\}$. Since $\mC$ is unambiguous, it must be the case that $\mC$ has minimum distance equal to at least $2t+1$, making $\mC$ an MDS code of length and minumum distance $2t+1$. Thus, $\pi_1(\mC)$ must contain $q$ distinct elements. We also observe that the restriction of $\mF_1$ to $\pi_1(\mC)$ must be injective, since otherwise the intersection of fan-out sets would be nonempty for some pair of codewords in $\mC$. Therefore we have that $\mF_1(\pi_1(\mC))=\mA$. Putting all of the above together, we conclude that there must exist $x, y \in \mC$ and $e\in \mA^{2t+1}$ such that $x\neq y$, $\mF_1(\pi_1(x))=\mF_1(\pi_1(e))$, and $d^H(\pi_1(e),\pi_1(y))\le t-1$. We thus have \begin{align*} x'&:=(x_1,\ldots,x_t,x_{t+1},y_{t+2},y_{t+3},\ldots,y_{2t+1}) \in B^\HH_t(x),\\ y'&:=(e_1,\ldots,e_t,x_{t+1},y_{t+2},y_{t+3},\ldots,y_{2t+1}) \in B^\HH_t(y). \end{align*} Finally, observe that $(\mF_1(\pi_1(x')),\mF_2(\pi_2(x')))=(\mF_1(\pi_1(y')),\mF_2(\pi_2(y')))\in \Omega(x)\cap \Omega(y)$, a contradiction. \end{proof} We now turn to the networks of Family~\ref{fam:a}, which are probably the most natural generalization of the Diamond Network of Section~\ref{sec:diamond}. We show that none of the members of Family~\ref{fam:a} meet the Generalized Network Singleton Bound of Corollary~\ref{cor:sing} with equality, no matter what the underlying alphabet is. \begin{theorem} \label{thm:meta} Let $\mathfrak{A}_t=(\mV,\mE,S,\{T\})$ be a member of Family~\ref{fam:a}. Let $\mA$ be any network alphabet and let $\mU_S$ be the set of edges of $\mathfrak{A}_t$ directly connected to $S$. We have $$\CC_1(\mathfrak{A}_t,\mA,\mU_S,t) <t.$$ In particular, the Generalized Network Singleton Bound of Corollary~\ref{cor:sing} is not met. \end{theorem} \begin{proof} Let $q:=|\mA|$. Towards a contradiction, assume that the rate $t$ is achievable. That is, there exists an unambiguous code $\mC$ of size $q^t$ for $\Omega[\mathfrak{A}_{t},\mA,\mF,S \to T,\mU_S,t]$, where $\mF=\{\mF_{1},\mF_{2}\}$ is a network code for $(\mathfrak{A}_t,\mA)$. Note that $|\pi_2(\mC)|=|\mC|$ and that the restriction of $\mF_2$ to $\pi_2(\mC)$ is injective, as otherwise the intersection of fan-out sets would be nonempty for some pair of codewords in $\mC$, as one can easily check. We claim that there must exist two distinct codewords $x,y \in \mC$ such that~$B^\HH_t(\pi_2(x)) \cap B^\HH_t(\pi_2(y)) \neq \emptyset$. Observe that \begin{align} \sum_{x \in \mC} |B^\HH_t(\pi_2(x))| &= q^t\left(\sum_{k=0}^t \binom{2t}{k}(q-1)^k\right) \nonumber \\ &> q^t\left(\sum_{k=0}^t \binom{t}{k}(q-1)^k\right) \nonumber \\ &= q^{2t}, \label{eqn:factorial} \end{align} where Equation \eqref{eqn:factorial} follows from the Binomial Theorem. If $B^\HH_t(\pi_2(x)) \cap B^\HH_t(\pi_2(y)) = \emptyset$ for all distinct $x,y\in\mC$, then we would have $$\sum_{x \in \mC} |B^\HH_t(\pi_2(x))| = \left| \bigcup_{x \in \mC} B_t^\HH(\pi_2(x)) \right| \le q^{2t}.$$ Therefore it must be the case that some such intersection is nonempty. In other words, there exist some distinct $x,y\in \mC$ and $e \in \mA^{2t}$ such that $e \in B^\HH_t(\pi_2(x)) \cap B^\HH_t(\pi_2(y))$. By the fact that the restriction of $\mF_2$ to $\pi_2(\mC)$ is injective and its codomain has size~$q^t=|\pi_2(\mC)|$, the restriction of $\mF_2$ to $\pi_2(\mC)$ must be surjective as well. Thus $\mF_2(e) = \mF_2(\pi_2(a))$ for some $a \in \mC$. If $a=x$, then $\mF_2(e)=\mF_2(\pi_2(x))$ and we have~$(\mF_1(\pi_1(y)),\mF_2(e))=(\mF_1(\pi_1(y)),\mF_2(\pi_2(x))) \in \Omega(x) \cap \Omega(y)$, the intersection of the fan-out sets, which is a contradiction to $\mC$ being unambiguous. A similar argument holds if $a=y$. Otherwise, we still have $(\mF_1(\pi_1(x)),\mF_2(\pi_2(a))) \in \Omega(x) \cap \Omega(a),$ a contradiction. We conclude that there cannot be an unambiguous code of size $q^t$. \end{proof} We conclude this section by mentioning that the networks of both Family \ref{ex:u} and Family \ref{fam:d} do achieve the Generalized Network Singleton Bound of Corollary~\ref{cor:sing}: both capacities will be examined in the next section. \section{Simple 2-Level Networks: Lower Bounds} \label{sec:2level_lower} We devote this section to deriving lower bounds on the capacity of simple 2-level networks. In connection with Theorem \ref{thm:channel}, we note that a rate may be achievable for a simple 2-level network and yet not achievable for every corresponding simple 3-level network. We start by establishing the notation for this section. \begin{notation} Throughout this section we follow Notation~\ref{not:s5}. In particular, we work with simple 2-level networks $\mN=(\mV,\mE,S,\{T\})=([a_1,a_2,\ldots,a_n],[b_1,b_2,\ldots,b_n])$, denoting by $\mU_S$ the set of edges directly connected to the source $S$. For any $t \ge 1$, we partition the vertices of such a network into the following (disjoint and possibly empty) sets: \begin{align*} I_{1}(\mN,t)&=\{i \st a_i\geq b_i+2t\},\\ I_{2}(\mN,t)&=\{i \st a_i\leq b_i\},\\ I_{3}(\mN,t)&=\{i \st b_i + 1\leq a_i \leq b_i+2t-1\}. \end{align*} \end{notation} \begin{theorem} \label{thm:lowbound} Suppose that $\mA$ is a sufficiently large finite field. Let $t \ge 1$ and define the set~$\Tilde{I}_{3}(\mN,t) =\{i \in I_{3}(\mN,t) \st a_i > 2t\}$. Then, \begin{equation*} \CC_1(\mN,\mA,\mU_S,t)\geq \sum_{i \in I_1(\mN,t)} b_i + \max\left\{X,Y \right\}, \end{equation*} where \begin{align*} X &= \sum_{i\in \Tilde{I}_3(\mN,t)} (a_i-2t) + \max\left\{0,\left(\sum_{i\in I_2(\mN,t)} a_i\right) - 2t\right\}, \\ Y &= \max\left\{0,\left(\sum_{i\in I_2(\mN,t)} a_i + \sum_{i\in I_3(\mN,t)} b_i\right)- 2t\right\}. \end{align*} \end{theorem} \begin{proof} We will construct a network code $\mF = \{\mF_1,\ldots,\mF_n\}$ for~$(\mN,\mA)$ and an unambiguous outer code in various steps, indicating how each intermediate node operates. The alphabet~$\mA$ should be a large enough finite field that admits MDS codes with parameters as described below. For each $i \in I_1(\mN,t)$, let~$\mF_i$ be a minimum distance decoder for an MDS code with parameters $\left[b_i+2t,\ b_i,\ 2t+1\right]$ that is sent along the first $b_i +2t$ edges incoming to $V_i$. The source sends arbitrary symbols along the extraneous $a_i - (b_i+2t)$ edges and these will be disregarded by $V_i$. Via the intermediate nodes indexed by $I_1(\mN,t)$, we have thus achieved $\sum_{i\in I_{1}(\mN,t)} b_i$ information symbols decodable by the terminal. In the remainder of the proof we will show that either an extra $X$ symbols or an extra $Y$ symbols~(i.e., an extra $\max\{X,Y\}$ symbols in general) can be successfully transmitted. We show how the two quantities $X$ and $Y$ can be achieved separately, so that we may choose the better option of the two and achieve the result. \begin{enumerate} \item For each $i \in \Tilde{I}_3(\mN,t)$, let $\mF_i$ be a minimum distance decoder for an MDS code with parameters $\left[a_i,\ a_i-2t,\ 2t+1\right]$ that sends its output across the first $a_i -2t$ outgoing edges from $V_i$. Arbitrary symbols are sent along the remaining outgoing edges and they will be ignored at the destination. We disregard all intermediate nodes with indices from the set $I_3(\mN,t)\setminus \Tilde{I}_3(\mN,t)$ as they will not contribute to the lower bound in this scheme. The symbols sent through the vertices indexed by ${I_2}(\mN,t)$ will be globally encoded via an MDS code with parameters $$\left[\sum_{i\in I_{2}(\mN,t)} a_i,\ \left(\sum_{i\in I_{2}(\mN,t)} a_i\right) - 2t,\ 2t+1\right].$$ The intermediate nodes indexed by ${I_2}(\mN,t)$ will simply forward the incoming symbols along the first $a_i$ outgoing edges, sending arbitrary symbols along the other $b_i - a_i$ outgoing edges. Symbols sent on these outgoing edges will be disregarded at the destination. If~$\sum_{i\in I_{2}(\mN,t)} a_i \leq 2t$, we instead ignore the vertices indexed by $I_2(\mN,t)$ in this scheme. In conclusion, via the intermediate nodes indexed by $I_2(\mN,t)\cup I_3(\mN,t)$, we have added $X$ information symbols decodable by the terminal. \item For $i \in I_2(\mN,t) \cup I_3(\mN,t)$, let each $\mF_i$ simply forward up to $b_i$ received symbols as follows. If $i\in I_2(\mN,t)$, then $a_i$ symbols will be forwarded along the first $a_i$ outgoing edges and arbitrary symbols will be sent along the other $b_i - a_i$ outgoing edges. These edges will be disregarded at the destination. If $i\in I_3(\mN,t)$, then $b_i$ symbols sent over the first~$b_i$ edges incoming to $V_i$ will be forwarded. The source $S$ will send arbitrary symbols along the other $a_i - b_i$ incoming edges, which will however be ignored. At the concatenation of all (non-arbitrary) coordinates to an intermediate node with index in $I_2(\mN,t)\cup I_3(\mN,t)$, the outer code will be an MDS code with parameters $$\left[\sum_{i\in I_{2}(\mN,t)} a_i + \sum_{i\in I_{3}(\mN,t)} b_i,\ \left(\sum_{i\in I_{2}(\mN,t)} a_i + \sum_{i\in I_{3}(\mN,t)} b_i\right) -2t ,\ 2t+1\right].$$ The MDS code is then decoded at the terminal. If $\sum_{i\in I_{2}(\mN,t)} a_i + \sum_{i\in I_{3}(\mN,t)} b_i \leq 2t$, we ignore the coordinates corresponding to $I_2(\mN,t)\cup I_3(\mN,t)$ in this scheme. In conclusion, via the intermediate nodes in $I_2(\mN,t)\cup I_3(\mN,t)$ we have added $Y$ information symbols decodable at the terminal. \end{enumerate} This concludes the proof. \end{proof} We note that Theorem \ref{thm:lowbound} does not always yield a positive value, even for networks where the capacity is positive. For example, for the member $\mathfrak{A}_2$ of Family \ref{fam:a}, Theorem \ref{thm:lowbound} gives a lower bound of 0 for the capacity. However, the following result shows that $\CC_1(\mathfrak{A}_2,\mA,\mU_S,2)$ is, in fact, positive. \begin{proposition} \label{prop:atleasta} Let $\mathfrak{A}_t=(\mV,\mE,S,\{T\})$ be a member of Family~\ref{fam:a}. Let $\mA$ be any network alphabet and let $\mU_S$ be the set of edges of $\mathfrak{A}_t$ directly connected to $S$. We have $$\CC_1(\mathfrak{A}_2,\mA,\mU_S,2) \ge 1.$$ \end{proposition} \begin{proof} Fix two distinct alphabet symbols $*, *'\in \mA$. The source $S$ encodes each element of~$\mA$ using a six-times repetition code. Vertex~$V_1$ simply forwards the received symbols, while vertex~$V_2$ proceeds as follows. If, on the four incoming edges, an alphabet symbol appears at least three times, $V_2$ forwards that symbol on both outgoing edges. Otherwise $V_2$ outputs~$*$ on one edge, and~$*'$ on the other. At destination, if the incoming symbols from~$V_{2}$ match, then the terminal $T$ decodes to that symbol. Otherwise, it decodes to the alphabet symbol sent from~$V_{1}$. All symbols from $\mA$ can be sent with this scheme, including $*$ and $*'$, giving a capacity of at least $1$. \end{proof} We also give the following less sophisticated lower bound on the capacity of simple 2-level networks. The idea behind the proof is to construct a subnetwork of the original one where a lower bound for the capacity can be easily established. \begin{proposition} \label{prop:lin} Suppose that $\mA$ is a sufficiently large finite field. For all $t \ge 0$ we have \begin{equation} \label{albe} \CC_1(\mN,\mA,\mU_S,t) \ge \max\left\{0, \, \sum_{i=1}^n\min\{a_i,b_i\} -2t\right\}. \end{equation} \end{proposition} \begin{proof} We will construct a network $\mN'=(\mV,\mE',S,\{T\})$ such that $$\CC_1(\mN,\mA,\mU_S,t) \ge \CC_1(\mN',\mA,\mU'_S,t),$$ where $\mU'_S$ is the set of edges directly connected to $S$ in the new network $\mN'$. For $i \in I_2(\mN,t)$, remove the first $b_i - a_i$ outgoing edges from $V_i.$ Similarly, for each~$i \in \{1,\ldots,n\} \setminus I_2(\mN,t)$, remove the first $a_i-b_i$ incoming edges to $V_i$. Denote the new network obtained in this way by~$\mN'$ and observe that all intermediate nodes $V_i'$ in~$\mN'$ have $\degin(V_i')=\degout(V_i')$. This implies that~$I_2(\mN',t)=\{1,\ldots,n\}.$ Since removing edges cannot increase the capacity (technically, this is a consequence of Proposition~\ref{prop:finer}), we have $\CC_1(\mN,\mA,\mU_S,t) \ge \CC_1(\mN',\mA,\mU_S,t).$ We will give a coding scheme for $\mN'$ that achieves the right-hand side of~\eqref{albe} to conclude the proof. Consider the edge-cut $$\mE'= \bigcup_{i=1}^{n} \inn(V_{i}').$$ By the definition of $\mN'$, we have $|\mE'| = \sum_{i=1}^n\min\{a_i,b_i\}$. Since the left-hand side of~\eqref{albe} is always non-negative, we shall assume $|\mE'| > 2t$ without loss of generality. Under this assumption, we will prove that the rate $|\mE'| -2t$ is achievable. We choose the outer code $\mC$ to be an MDS code with parameters $[|\mE'|,|\mE'|-2t,2t+1]$ over the finite field~$\mA$. All of the intermediate nodes forward incoming packets (the number of outgoing edges will now allow this). The terminal receives a codeword of length $|\mE'|$ and decodes it according to the chosen MDS code. \end{proof} \begin{remark} The lower bound of Theorem~\ref{thm:lowbound} is larger than the one of Proposition~\ref{prop:lin}, and it can be strictly larger. For example, consider the simple 2-level network $\mN=([2,5,6],[2,2,2])$. By the Generalized Network Singleton Bound of Corollary~\ref{cor:sing}, we have ~$\CC_1(\mN,\mA,\mU_S,2) \le 4$. Theorem \ref{thm:lowbound} gives a lower bound of $3$ for $\CC_1(\mN,\mA,\mU_S,2)$, while Proposition \ref{prop:lin} gives a lower bound of 2 for the same quantity. \end{remark} As an application of Theorem~\ref{thm:lowbound} we provide a family of simple 2-level networks where the Generalized Network Singleton Bound of Corollary~\ref{cor:sing} is always met with equality. \begin{corollary}[see \cite{beemer2021curious}] \label{cor:conf} Suppose that $\mA$ is a sufficiently large finite field. Let $t \ge 0$ and suppose that $I_3(\mN,t) = \emptyset$. Then the Generalized Network Singleton Bound of Corollary~\ref{cor:sing} is achievable. \end{corollary} \begin{proof} Since $I_3(\mN,t) = \emptyset$, $\Tilde{I}_3(\mN,t)$ as defined in Theorem \ref{thm:lowbound} is also empty. Therefore Theorem~\ref{thm:lowbound} gives \begin{equation} \label{eqn:eql-sb} \CC_1(\mN,\mA,\mU_S,t)\geq \sum_{i\in I_{1}(\mN,t)}b_i+\max\left\{0,\left(\sum_{i\in I_{2}(\mN,t)} a_i\right) -2t\right\}. \end{equation} Choosing $P_1 = I_1(\mN,t)$ and $P_2 = I_2(\mN,t)$ in Corollary~\ref{cor:sing} gives us that the Generalized Network Singleton (upper) Bound is equal to the right-hand side of \eqref{eqn:eql-sb}. \end{proof} We include an example illustrating how Corollary~\ref{cor:conf} is applied in practice. \begin{example} Take $\mN=([12,8,2,2,1],[5,2,4,3,1])$ and let $\mA$ be a sufficiently large finite field. We want to compute $$\CC_1(\mN,\mA,\mU_S,3).$$ We have $I_1(\mN,3) = \{1,2\}$, $I_2(\mN,t) = \{3,4,5\}$, and $I_3(\mN,t)=\emptyset$. The intermediate nodes indexed by $i \in I_1(\mN,3)$ use local MDS decoders of dimension $b_i$ to retrieve $\sum_{i\in I_{1}(\mN,t)}b_i$ information symbols. That is, $V_1$ uses an $[11,5,7]$ MDS decoder and $V_2$ uses an $[8,2,7]$ MDS decoder. The intermediate nodes indexed by $I_2(\mN,3)$ are supposed to cooperate with each other and can be seen as one intermediate node with $5$ incoming and $8$ outgoing edges. In the notation of the proof of Corollary~\ref{cor:conf}, we have~$\sum_{i\in I_{2}(\mN,t)} a_i < 2t$. In this case, the vertices indexed by $I_{2}(\mN,t)$ are disregarded. In total, the Generalized Network Singleton Bound of Corollary~$\ref{cor:sing}$, whose value is 7, is met with equality. \end{example} We finally turn to the networks of Families~\ref{fam:c} and~\ref{fam:d}, which we did not treat in Section~\ref{sec:upper}. The next two results compute the capacities of these families and prove, in particular, that the Generalized Network Singleton Bound of Corollary~\ref{cor:sing} is attained. This, along with the upper bounds of Section~\ref{sec:upper} for the families \ref{fam:a},\ref{ex:s} and~\ref{fam:e}, demonstrates that emptiness of $I_3(\mN,t)$ is far from a complete characterization of the Generalized Network Singleton Bound achievability~(i.e., Corollary \ref{cor:conf} is not biconditional); see Remark \ref{rem:i3crit}. \begin{theorem} \label{thm:metc} Let $\mathfrak{C}_t=(\mV,\mE,S,\{T\})$ be a member of Family~\ref{fam:c}. Let $\mA$ be any network alphabet and let $\mU_S$ be the set of edges of $\mathfrak{C}_t$ directly connected to $S$. We have $$\CC_1(\mathfrak{C}_t,\mA,\mU_S,t) = 1.$$ In particular, the Generalized Network Singleton Bound of Corollary~\ref{cor:sing} is met with equality. \end{theorem} \begin{proof} We let $q:=|\mA|$ for ease of notation (but note that $q$ does not need to be a prime power). We will construct a network code $\mF=\{\mF_1,\mF_2\}$ for $(\mN,\mA)$ and an unambiguous outer code~$\mC$ for the channel $\Omega[\mathfrak{C}_{t},\mA,S \to T,\mU_S,t]$ of size~$q$. Recall that $t \ge 2$ by the very definition of Family~\ref{fam:c}. \underline{Case 1}: $q=t=2$. \ Let $\mA = \{a,b\}.$ Encode each element of $\mA$ using a 5-times repetition code. It can be checked that the pair $(\mC,\mF)$ achieves the desired capacity, where~$\mC=\{(a,a,a,a,a),(b,b,b,b,b)\}$ and $\mF=\{\mF_1,\mF_2\}$ is defined as follows. $\mF_1(v) = v$ for all~$v \in \mA^2$ and $$\mF_2(w)= \begin{cases} (a,a) & \text{if} \ \ w = (a,a,a), \\ (b,b) & \text{if} \ \ w = (b,b,b), \\ (a,b) & \text{if} \ \ w \in S^\HH_{1}(a,a,a), \\ (b,a) & \text{if} \ \ w \in S^\HH_{1}(b,b,b). \\ \end{cases}$$ \underline{Case 2}: $\max\{q,t\} \ge 3$. \ Encode each element of $\mA$ using a $2t+1$-times repetition code, so that the codewords of $\mC$ are given by $c_{1},c_{2},\ldots,c_{q}$. The intermediate function $\mF_1$ simply forwards its received symbols. We will next define the function $\mF_2$. Notice that there are $q\left(\lfloor t/2 \rfloor+1\right)$ shells $S^\HH_{i}(\pi_{2}(c_{j}))$ for $i\in \{ 0,1,\ldots, \lfloor t/2 \rfloor\}$ and $j\in \{1,\ldots,q\}$. Define $$D= \mA^{t+1}\setminus \bigcup_{j\in \{1,\ldots,q\}}B^\HH_{\lfloor t/2 \rfloor}(\pi_{2}(c_{j})),$$ where $\pi_2$ is defined in Notation~\ref{not:ballsetc}. That is, $D$ is the set of words that do not belong to any ball of radius $\lfloor t/2 \rfloor$ centered at the projection of a codeword, so that the union of $D$ with the collection of the shells described above is all of $\mA^{t+1}$. Let $q':=q\left(\lfloor t/2 \rfloor+1\right)+1$. Since $\max\{q,t\} \ge 3$, we have \begin{equation*} q'<q^{t}. \end{equation*} Therefore, we may define $\mF_2$ to be such that the sets $D$ and $S^\HH_{i}(\pi_{2}(c_{j}))$ for $i\in \{ 0,1,\ldots, \lfloor {t/2} \rfloor\}$ and $j\in \{1,\ldots, q\}$ each map to a single, \textit{distinct} element of $\mA^{t}$. Decoding at the terminal is accomplished as follows. Suppose that the terminal receives the pair $(x,y)\in \mA^{t} \times \mA^{t}$. First, the set $\mF_2^{-1}(y)$ is computed. If $\mF_2^{-1}(y) = D$, then the terminal decodes to the majority of the coordinates in $x$, this is guaranteed to be the transmitted symbol based on how $D$ was defined. If $\mF_2^{-1}(y) =\pi_{2}(c_{j})$ for some $j$, the terminal decodes to the repeated symbol in $c_j$. Finally, if $\mF_2^{-1}(y) = S^\HH_{i}(\pi_{2}(c_{j}))$ for $i\in \{1,\ldots, \lfloor t/2 \rfloor\}$, then it is not clear to the decoder if $i$ symbols were corrupted incoming to~$V_{2}$ (and up to $t-i$ symbols to~$V_{1}$), or if at least $(t+1)-i$ symbols were corrupted from~$V_{2}$ (and up to $i-1$ to~$V_{1}$). To differentiate the possibilities, the terminal looks to $x$. If at least $i$ of the symbols of $x$ are consistent with~$c_{j}$, then we must be in the first scenario (recall $i\leq \lfloor t/2\rfloor$), so the terminal decodes to $c_{j}$. Otherwise, at most $i-1$ symbols were changed to~$V_{1}$, and hence the majority of the symbols in $x$ will correspond to the transmitted codeword. In this case, the terminal decodes to the majority of symbols in $x$. \end{proof} We propose an example that illustrates the scheme in the proof of Theorem \ref{thm:metc}. \begin{example} We will show that $\CC_1(\mathfrak{C}_4,\mA,\mU_S,4) = 1$ when $|\mA|=2$; see Family~\ref{fam:c} for the notation. Let $\mA=\{a,b\}$. Following the proof of Theorem \ref{thm:metc}, we consider the repetition code that has the codewords $c_1=(a,a,a,a,a,a,a,a,a)$ and~$c_2=(b,b,b,b,b,b,b,b,b)$. Observe that~$D=\emptyset$. We illustrate the decoding of $c_1$ case-by-case. Since the alphabet size is equal to~2, the analysis of the $\binom{9}{4}$ possible actions of the adversary can be reduced to the following~5 basic cases (which also cover the favorable scenario where the adversary might not use their full power). \begin{enumerate} \item$(a,a,a,a,a,a,a,a,a)$ is changed into $(a,a,a,a,a,b,b,b,b)$ by the adversary. Since we have $(a,b,b,b,b) \in S_1(\pi_2(c_2))$ and none of the coordinates of $(a,a,a,a)$ is $b$, the terminal decodes to $c_1$. \item $(a,a,a,a,a,a,a,a,a)$ is changed into $(a,a,a,b,a,a,b,b,b)$ by the adversary. Since we have~$(a,a,b,b,b) \in S_2(\pi_2(c_2))$ and only one of the coordinates of $(a,a,a,b)$ is $b$, the terminal decodes to $c_1$. \item $(a,a,a,a,a,a,a,a,a)$ is changed into $(a,a,b,b,a,a,a,b,b)$ by the adversary. Since we have~$(a,a,a,b,b) \in S_2(\pi_2(c_1))$ and at least 2 of the coordinates of $(a,a,b,b)$ is $a$, the terminal decodes to $c_1$. \item $(a,a,a,a,a,a,a,a,a)$ is changed into $(a,b,b,b,a,a,a,a,b)$ by the adversary. Since we have~$(a,a,a,a,b) \in S_1(\pi_2(c_1))$ and least 1 of the coordinates of $(a,b,b,b)$ is $a$, the terminal decodes to $c_1$. \item $(a,a,a,a,a,a,a,a,a)$ is changed into $(b,b,b,b,a,a,a,a,a)$ by the adversary. Since we have~$(a,a,a,a,a) = \pi_2(c_1)$, the terminal decodes to $c_1$. \end{enumerate} This shows that, \textit{no matter} what the action of the adversary is, one alphabet symbol can always be transmitted unambiguously. \end{example} \begin{remark} \label{rem:exclude} The reason for excluding the case $t=1$ in the Definition of Family \ref{fam:c} is the non-achievability of the Generalized Network Singleton Bound of Corollary~\ref{cor:sing} given in Theorem~\ref{thm:diamond_cap}. It should be noted that since that case is already studied, excluding it from Family~\ref{fam:c} makes sense to exhibit a family of networks which achieve Corollary~\ref{cor:sing} as proven in Theorem~\ref{thm:metc}. \end{remark} We now turn to the networks of Family~\ref{fam:d}, the only family introduced in Subsection \ref{sec:families} that we have not yet considered. \begin{theorem} \label{thm:metd} Let $\mathfrak{D}_t=(\mV,\mE,S,\{T\})$ be a member of Family~\ref{fam:d}. Let $\mA$ be any network alphabet and let $\mU_S$ be the set of edges of $\mathfrak{D}_t$ directly connected to $S$. We have $$\CC_1(\mathfrak{D}_t,\mA,\mU_S,t)= 1.$$ In particular, the Generalized Network Singleton Bound of Corollary~\ref{cor:sing} is met with equality. \end{theorem} \begin{proof} Fix an alphabet symbol $* \in \mA$. The source $S$ encodes each element of $\mA$ using a~$4t$-times repetition code. The vertices $V_1$ and $V_2$ implement a majority-vote decoder each, unless two symbols occur an equal number of times over the incoming edges. In that case, the vertices output $*$. At the destination, if the incoming symbols match, then the terminal decodes to that symbol. Otherwise, it decodes to the alphabet symbol that is not equal to $*$. All symbols from~$\mA$ can be sent with this scheme, including $*$, giving a capacity of at least $1$ and establishing the theorem. \end{proof} \begin{remark} \label{rem:i3crit} Let $\mN = ([a_1,a_2],[b_1,b_2])$ a simple 2-level network with $n=2$ intermediate nodes. Let~$\mA$ be a sufficiently large finite field. \begin{table}[!ht] \begin{center} \renewcommand{\arraystretch}{1.4} \begin{tabular}{|x{4.5cm}| x{4.5cm}| x{4.5cm}|} \hline Size of $I_3(\mN,t)$ & Corollary \ref{cor:sing} is met & Corollary \ref{cor:sing} is not met \\ [0.5ex] \hline\hline 0 & \text{always} & \text{never }(Corollary \ref{cor:conf}) \\ \hline 1 & $\mathfrak{C}_2$ (Theorem \ref{thm:metc} ) & $\mathfrak{A}_2$ (Theorem \ref{thm:meta}) \\ \hline 2 & $\mathfrak{D}_2$ (Theorem \ref{thm:metd}) & $\mathfrak{E}_2$ (Theorem \ref{thm:mete} ) \\ \hline \end{tabular} \end{center} \caption{On the 1-shot capacity of simple 2-level networks with 2 intermediate nodes.\label{tablele}} \end{table} We present Table \ref{tablele} to illustrate that the size of $I_3(N,t)$ cannot be considered as a criterion for the achievability of the Generalized Network Singleton Bound of Corollary~\ref{cor:sing}. \end{remark} \section{The Double-Cut-Set Bound and Applications} \label{sec:double-cut-bd} In this section we illustrate how the results on 2-level and 3-level networks derived throughout the paper can be combined with each other and applied to study an arbitrarily large and complex network $\mN$. We already stressed in Section~\ref{sec:motiv} that known cut-set bounds are not sharp in general when considering a \textit{restricted} adversary (whereas they are sharp, under certain assumptions, when the adversary is not restricted; see Theorem~\ref{thm:mcm}). The main idea behind the approach taken in this section is to consider \textit{pairs} of edge-cuts, rather than a single one, and study the ``information flow'' between the two. This allows one to better capture the adversary's restrictions and to incorporate them into explicit upper bounds for the capacity of the underlying network $\mN$. All of this leads to our Double-Cut-Set Bound below; see Theorem~\ref{thm:dcsb}. In turn, Theorem~\ref{thm:dcsb} can be used to derive an upper bound for the capacity of $\mN$ in terms of the capacity of an \textit{induced} 3-level network. This brings the study of~3-level networks and their reduction to 2-level networks into the game; see Sections~\ref{sec:net-2-and-3} and~\ref{sec:upper}. A concrete application of the machinery developed in this section will be illustrated later in Example~\ref{ex:tulipA}, where we will go back to our opening example of Section~\ref{sec:motiv} and rigorously compute its capacity. Another network is studied in Example~\ref{ex:second}. We begin by introducing supplementary definitions and notation specific to this section. \begin{definition} \label{def:imme} Let $\mN=(\mV,\mE,\mS,\bfT)$ be a network and let $\mE_1, \mE_2 \subseteq \mE$ be non-empty edge sets. We say that $\mE_1$ \textbf{precedes} $\mE_2$ if every path from $S$ to an edge of $\mE_2$ contains an edge of $\mE_1$. In this situation, for $e \in \mE_2$ and $e' \in \mE_1$, we say that $e'$ is an \textbf{immediate predecessor of $e$ in~$\mE_1$} if $e' \preccurlyeq e$ and there is no $e'' \in \mE_1$ with $e' \preccurlyeq e'' \preccurlyeq e$ and $e' \neq e''$. \end{definition} We illustrate the previous notions with an example. \begin{example} \label{ex:imme} Consider the network $\mN$ and the edge sets $\mE_1$ and $\mE_2$ in Figure~\ref{tulipsolved} below. Then~$\mE_1$ precedes~$\mE_2$. We have $e_2 \preccurlyeq e_{10}$ and $e_9 \preccurlyeq e_{10}$. Moreover, $e_9$ is an immediate predecessor of $e_{10}$ in $\mE_1$, while $e_2$ is not. \end{example} \begin{figure}[h!] \centering \begin{tikzpicture} \draw[blue, line width=1.5pt] (1,2) .. controls (2,1) .. (2,-0) ; \draw[blue, line width=1.5pt] (6.3,1) -- (7.3,-1) ; \draw[red, line width=1.5pt] (9.5,2.5) .. controls (9.2,1) .. (10,0); \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace of S1] (K) {}; \node[nnode,above=0.6\mynodespace of K] (V1) {$V_1$}; \node[nnode,below=0.6\mynodespace of K] (V2) {$V_2$}; \node[nnode,right=\mynodespace of K] (V3) {$V_3$}; \node[nnode,right=\mynodespace of V3] (V4) {$V_4$}; \node[vertex,right=3\mynodespace of V1 ] (T1) {$T_1$}; \node[vertex,right=3\mynodespace of V2] (T2) {$T_2$}; \draw[ddedge,bend left=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_1$} (V1); \draw[ddedge,bend right=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_2$} (V1); \draw[ddedge,bend left=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_3$} (V2); \draw[ddedge,bend right=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_4$} (V2); \draw[ddedge,bend left=0] (V1) to node[fill=white, inner sep=3pt]{\small $e_6$} (V3); \draw[edge,bend left=0] (V4) to node[fill=white, inner sep=3pt]{\small $e_{10}$} (T1); \draw[edge,bend left=0] (V4) to node[fill=white, inner sep=3pt]{\small $e_{11}$} (T2); \draw[edge,bend left=0] (V1) to node[fill=white, inner sep=3pt]{\small $e_{5}$} (T1); \draw[edge,bend left=0] (V2) to node[fill=white, inner sep=3pt]{\small $e_{8}$} (T2); \draw[ddedge,bend left=0] (V2) to node[fill=white, inner sep=3pt]{\small $e_7$} (V3); \draw[ddedge,bend left=0] (V3) to node[fill=white, inner sep=3pt]{\small $e_{9}$} (V4); \node[text=blue] (E1) at (2.3,0.0) {$\mE_1$}; \node[text=blue] (E11) at (7.6,-1) {$\mE_1$}; \node[text=red] (E2) at (10.3,0.1) {$\mE_2$}; \end{tikzpicture} \caption{{{Network $\mN$ for Examples~\ref{ex:imme}, \ref{extransf}, and ~\ref{ex:tulipA}}}. \label{tulipsolved}} \end{figure} The following notion of channel will be crucial in our approach. It was formally defined in~\cite{RK18} using a recursive procedure. \begin{notation} \label{notat:specialtransfer} Let $\mN$, $\mE_1$ and $\mE_2$ be as in Definition~\ref{def:imme}. If $\mA$ is a network alphabet, $\mF$ is a network code for $(\mN,\mA)$, $\mU \subseteq \mE$, and $t \ge 0$, then we denote by \begin{equation} \label{chtd} \Omega[\mN,\mA,\mF,\mE_1 \to \mE_2,\mU \cap \mE_1,t]: \mA^{|\mE_1|} \dashrightarrow \mA^{|\mE_2|} \end{equation} the channel that describes the transfer from the edges of $\mE_1$ to those of $\mE_2$, when an adversary can corrupt up to $t$ edges from $\mU \cap \mE_1$. \end{notation} In this paper, we will not formally recall the definition of the channel introduced in Notation~\ref{notat:specialtransfer}. We refer to~\cite[page 205]{RK18} for further details. We will however illustrate the channel with an example. \begin{example} \label{extransf} Consider again the network $\mN$ and the edge sets $\mE_1$ and $\mE_2$ in Figure~\ref{tulipsolved}. Let~$\mU=\{e_1,e_2,e_3,e_4,e_6,e_7,e_9\}$ be the set of dashed (vulnerable) edges in the figure. Let $\mA$ be a network alphabet and let~$\mF$ be a network code for $(\mN,\mA)$. We want to describe the channel in~\eqref{chtd} for $t=1$, which we denote by $\Omega: \mA^3 \dashrightarrow \mA^2$ for convenience. We have $$\Omega(x_1,x_2,x_3)=\{(\mF_{V_1}(y_1,y_2),\mF_{V_4}(y_9)) \mid y=(y_1,y_2,y_9) \in \mA^3, \, \dH(x,y) \le 1\},$$ where $\dH$ is the Hamming distance on $\mA^3$. Note that the value of each edge of $\mE_2$ depends only on the values of the edges that are its immediate predecessors in $\mE_1$. For example, when computing the values that $e_{10}$ can take, the channel only considers the values that $e_9$ can take, even though both $e_1$ and $e_2$ precede $e_{10}$. This follows from the definition of~\eqref{chtd} proposed in~\cite{RK18}, which we adopt here. \end{example} \begin{remark} \label{rmk:immediate} Note that we do not require the edge-cuts $\mE_1$ and $\mE_2$ to be minimal or \textit{antichain} cuts (i.e., cuts where any two different edges cannot be compared with respect to the order $\preccurlyeq$). Furthermore, the channel~$\Omega[\mN,\mA,\mF,\mE_1 \to \mE_2,\mU \cap \mE_1,t]$ considers the immediate predecessors first in the network topology. In other words, the channel~$\Omega[\mN,\mA,\mF,\mE_1 \to \mE_2,\mU \cap \mE_1,t]$ expresses the value of each edge of $\mE_2$ as a \textit{function} of the values of its immediate predecessors in $\mE_1$. \end{remark} We are now ready to state the main result of this section. \begin{theorem}[Double-Cut-Set Bound] \label{thm:dcsb} Let $\mN=(\mV,\mE, S, \bfT)$ be a network, $\mA$ a network alphabet, $\mU \subseteq \mE$ a set of edges and $t \ge 0$. Let $T \in \bd{T}$ and let $\mE_1$ and $\mE_2$ be edge-cuts between~$S$ and~$T$ with the property that~$\mE_1$ precedes~$\mE_2$. We have $$\CC_1(\mN,\mA,\mU,t) \le \max_{\mF} \, \CC_1(\Omega[\mN,\mA,\mF,\mE_1 \to \mE_2,\mU \cap \mE_1,t]),$$ where the maximum is taken over all the network codes $\mF$ for $(\mN,\mA)$. \end{theorem} \begin{proof} Fix a network code $\mF$ for $(\mN,\mA)$. We consider the (fictitious) scenario where up to $t$ errors can occur only on the edges from $\mU \cap \mE_1$. This scenario is modeled by the concatenation of channels \begin{multline} \label{con} \Omega[\mN,\mA,\mF,\out(S) \to \mE_1,\mU \cap \out(S),0] \, \blacktriangleright \, \Omega[\mN,\mA,\mF, \mE_1 \to \mE_2,\mU \cap \mE_1,t] \\ \blacktriangleright \, \Omega[\mN,\mA,\mF,\mE_2 \to \inn(T),\mU \cap \mE_2,0], \end{multline} where the three channels in~\eqref{con} are of the type introduced in Notation~\ref{notat:specialtransfer}. Note moreover that the former and the latter channels in~\eqref{con} are deterministic( see Definition \ref{dd1}) as we consider an adversarial power of 0. They describe the transfer from the source to $\mE_1$ and from $\mE_2$ to $T$, respectively. We set $\hat{\Omega}:=\Omega[\mN,\mA,\mF,\out(S) \to \mE_1,\mU,0]$ and $\overline{\Omega}:=\Omega[\mN,\mA,\mF,\mE_2 \to \inn(T),\mU,0]$ to simplify the notation throughout the proof. The channel $\Omega[\mN,\mA,\mF,S \to T,\mU,t]$ is coarser (Definition~\ref{deffiner}) than the channel in~\eqref{con}, since in the latter the errors can only occur on a subset of $\mU$. In symbols, using Proposition~\ref{prop:11} we have \begin{equation*} \Omega[\mN,\mA,\mF,S \to T,\mU,t] \, \ge \, \hat{\Omega} \, \blacktriangleright \, \Omega[\mN,\mA,\mF, \mE_1 \to \mE_2,\mU \cap \mE_1,t] \, \blacktriangleright \, \overline{\Omega}. \end{equation*} By Propositions~\ref{prop:finer} and~\ref{dpi}, this implies that \begin{equation} \label{almost} \CC_1(\Omega[\mN,\mA,\mF,S \to T,\mU,t]) \le \CC_1(\Omega[\mN,\mA,\mF,\mE_1 \to \mE_2, \mU \cap \mE_1,t]). \end{equation} Since~\eqref{almost} holds for any $\mF$, Proposition~\ref{prop:aux} finally gives \begin{equation*} \label{mmm} \CC_1(\mN,\mA,\mU,t) \le \max_{\mF} \, \CC_1(\Omega[\mN,\mA,\mF,\mE_1 \to \mE_2,\mU \cap \mE_1,t]), \end{equation*} concluding the proof. \end{proof} Our next step is to make the Double-Cut-Set Bound of Theorem~\ref{thm:dcsb} more explicit and~``easy'' to apply. More in detail, we now explain how Theorem~\ref{thm:dcsb} can be used to construct a simple~3-level network from a larger (possibly more complex) network $\mN$, whose capacity is an upper bound for the capacity of $\mN$. This strategy reduces the problem of computing an upper bound for the capacity of $\mN$ to that of estimating the capacity of the corresponding simple~3-level network. In turn, Subsection~\ref{sec:3to2reduc} often reduces the latter problem to that of computing an upper bound for a simple 2-level network, a problem we have studied extensively throughout the paper. \begin{corollary} \label{cor:tothree} Let $\mN=(\mV,\mE, S, \bfT)$ be a network, $\mA$ a network alphabet, $\mU \subseteq \mE$ a set of edges and $t \ge 0$. Let $T \in \bd{T}$ and let $\mE_1$ and $\mE_2$ be edge-cuts between~$S$ and~$T$ with the property that~$\mE_1$ precedes~$\mE_2$. Consider a simple 3-level network $\mN'$ with source $S$, terminal~$T$, and vertex layers~$\mV_1$ and~$\mV_2$. The vertices of~$\mV_1$ are in bijection with the edges of $\mE_1$ and the vertices of~$\mV_2$ with the edges of $\mE_2$. A vertex $V \in \mV_1$ is connected to vertex $V' \in \mV_2$ if and only if the edge of~$\mE_1$ corresponding to $V$ is an immediate predecessor of the edge of~$\mE_2$ corresponding to $V'$; see Definition~\ref{def:imme}. Denote by~$\mE'_S$ the edges directly connected with the source of $\mN'$, which we identify with the edges of~$\mE_1$ (consistently with how we identified these with the vertices in $\mV_1$). We then have $$\CC_1(\mN,\mA,\mU,t) \le \CC_1(\mN',\mA, \mU \cap \mE'_S,t).$$ \end{corollary} Before proving Corollary~\ref{cor:tothree}, we show how to apply it to the opening example of this paper. This will give us a sharp {upper} bound for its capacity, as we will show. \begin{example} \label{ex:tulipA} Consider the network $\mN$ of Figure~\ref{fig:introex1}, where the adversary can corrupt at most~$t=1$ of the dashed edges in $\mU=\{e_1,e_2,e_3,e_4,e_6,e_7,e_9\}$. We focus on terminal $T_1$ (a similar approach can be taken for $T_2$, since the network is symmetric) and consider the two edge-cuts~$\mE_1$ and $\mE_2$ depicted in Figure~\ref{tulipsolved}. Clearly, $\mE_1$ precedes $\mE_2$. Following Corollary~\ref{cor:tothree}, we construct a simple 3-level network $\mN'$ with source $S$, terminal~$T$, and vertex sets $\mV_1$ and $\mV_2$ of cardinalities 3 and 2, respectively. We depict the final outcome in Figure~\ref{fig:3lev}, where we label the vertices and some of the edges according to the edges of $\mN$ they are in bijection with. \begin{figure}[htbp] \centering \scalebox{0.90}{ \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace of S1] (K) {}; \node[nnode,above=0.5\mynodespace of K] (V1) {$V_1$}; \node[nnode,right=-0.13\mynodespace of K] (V2) {$V_2$}; \node[nnode,below=0.5\mynodespace of K] (V9) {$V_9$}; \node[nnode,right=0.9\mynodespace of V1] (V5) {$V_5$}; \node[nnode,right=0.9\mynodespace of V9 ] (V10) {$V_{10}$}; \node[vertex,right=1.8\mynodespace of V2] (T) {$T$}; \draw[edge,bend left=0] (S1) to node[fill=white, inner sep=0pt]{$e_1$} (V1); \draw[edge,bend right=0] (S1) to node[fill=white, inner sep=0pt]{$e_2$} (V2); \draw[edge,bend left=0] (S1) to node[fill=white, inner sep=0pt]{$e_9$} (V9); \draw[edge,bend left=0] (V1) to node{} (V5); \draw[edge,bend left=0] (V2) to node{} (V5); \draw[edge,bend left=0] (V9) to node{} (V10); \draw[edge,bend left=0] (V5) to node[fill=white, inner sep=0pt]{$e_5$} (T); \draw[edge,bend left=0] (V10) to node[fill=white, inner sep=0pt]{$e_{10}$} (T); \end{tikzpicture} } \caption{The 3-level network $\mN'$ induced by the network $\mN$ of Figure~\ref{tulipsolved}. \label{fig:3lev}} \end{figure} The next step is to make the edges of $\mU \cap \mE'_S=\{e_1,e_2,e_9\}$ vulnerable and consider an adversary capable of corrupting at most $t=1$ of them. We thus consider the network in Figure~\ref{fig:3levB} after renumbering the edges and vertices. \begin{figure}[htbp] \centering \scalebox{0.90}{ \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace of S1] (K) {}; \node[nnode,above=0.5\mynodespace of K] (V1) {$V_1$}; \node[nnode,right=-0.13\mynodespace of K] (V2) {$V_2$}; \node[nnode,below=0.5\mynodespace of K] (V9) {$V_3$}; \node[nnode,right=0.9\mynodespace of V1] (V5) {$V_4$}; \node[nnode,right=0.9\mynodespace of V9 ] (V10) {$V_{5}$}; \node[vertex,right=1.8\mynodespace of V2] (T) {$T$}; \draw[ddedge,bend left=0] (S1) to node[fill=white, inner sep=0pt]{$e_1$} (V1); \draw[ddedge,bend right=0] (S1) to node[fill=white, inner sep=0pt]{$e_2$} (V2); \draw[ddedge,bend left=0] (S1) to node[fill=white, inner sep=0pt]{$e_3$} (V9); \draw[edge,bend left=0] (V1) to node{} (V5); \draw[edge,bend left=0] (V2) to node{} (V5); \draw[edge,bend left=0] (V9) to node{} (V10); \draw[edge,bend left=0] (V5) to node{} (T); \draw[edge,bend left=0] (V10) to node{} (T); \end{tikzpicture} } \caption{The 3-level network $\mN'$ induced by the network $\mN$ of Figure~\ref{tulipsolved} where the vulnerable edges are dashed. \label{fig:3levB}} \end{figure} We finally apply the procedure described in Subsection~\ref{sec:3to2reduc} to obtain a 2-level network from~$\mN'$, whose capacity is an upper bound for that of~$\mN'$. It is easy to check that the 2-level network obtained from the network in Figure~\ref{fig:3levB} is precisely the Diamond Network $\mathfrak{A}_1$ introduced in Section~\ref{sec:diamond}; see Figure~\ref{fig:diamond}. Therefore by combining Theorem~\ref{thm:diamond_cap}, Theorem~\ref{thm:channel}, and Corollary~\ref{cor:tothree}, we obtain \begin{equation} \label{finalestimate} \CC_1(\mN,\mA,\mU,1) \le \log_{|\mA|}(|\mA|-1). \end{equation} In Theorem~\ref{computC} below, we will prove that the above bound is met with equality. In particular, the procedure described in this example to obtain the bound in~\eqref{finalestimate} is sharp, and actually leads to the exact capacity value of the opening example network from Section \ref{sec:motiv}. \end{example} \begin{proof}[Proof of Corollary~\ref{cor:tothree}] We will prove that \begin{equation*} \CC_1(\Omega[\mN,\mA,\mF,\mE_1 \to \mE_2,\mU \cap \mE_1,t]) \le \CC_1(\mN',\mA,\mU\cap \mE'_S,t) \end{equation*} for every network code $\mF$ for $(\mN,\mA)$, which in turn establishes the corollary thanks to Theorem~\ref{thm:dcsb}. We fix $\mF$ and consider the auxiliary channel $\Omega:=\Omega[\mN,\mA,\mF,\mE_1 \to \mE_2,\mU \cap \mE_1,0]$, which is deterministic. By Remark~\ref{rmk:immediate}, the channel $\Omega$ expresses the value of each edge of $\mE_2$ as a function of the values of its immediate predecessors in $\mE_1$. By the construction of $\mN'$, there exists a network code $\mF'$ (which depends on $\mF$) for $(\mN',\mA)$ with the property that \begin{equation} \label{cc1} \Omega=\Omega[\mN',\mA,\mF',\mE'_S \to \inn(T),\mU \cap \mE'_S,0], \end{equation} where the edges of $\mE_1$ and $\mE_2$ are identified with those of $\mE'_S$ and $\inn(T)$ in $\mN'$ as explained in the statement. Now observe that the channel $\Omega[\mN,\mA,\mF,\mE_1 \to \mE_2,\mU \cap \mE_1,t]$ can be written as the concatenation \begin{equation} \label{cc2} \Omega[\mN,\mA,\mF,\mE_1 \to \mE_2,\mU \cap \mE_1,t] = \Omega[\mN,\mA,\mF,\mE_1 \to \mE_1,\mU \cap \mE_1,t] \blacktriangleright \Omega, \end{equation} where the first channel in the concatenation simply describes the action of the adversary on the edges of $\mU \cap \mE_1$ (in the terminology of~\cite{RK18}, the channel is called of \textit{Hamming type}; see~\cite[Sections~III and~V]{RK18}). By combining~\eqref{cc1} with~\eqref{cc2} and using the identifications between $\mE_1$ and $\mE_S'$, we can write \begin{align} \Omega[\mN',\mA,\mF',\mE'_S \to \inn(T),\mU \cap \mE'_S,t] &= \Omega[\mN',\mA,\mF',\mE'_S \to \mE'_S,\mU \cap \mE'_S,t] \nonumber \\ & \qquad \qquad \qquad \qquad \quad \blacktriangleright \Omega[\mN',\mA,\mF',\mE'_S \to \inn(T),\mU \cap \mE'_S,0] \nonumber \\ &= \Omega[\mN,\mA,\mF,\mE_1 \to \mE_1,\mU \cap \mE_1,t] \blacktriangleright \Omega \nonumber \\ &=\Omega[\mN,\mA,\mF,\mE_1 \to \mE_2, \mU \cap \mE_1,t]. \label{lll} \end{align} Note that, by definition, $\CC_1(\mN',\mA,\mU \cap \mE'_S,t) \ge \CC_1(\Omega[\mN',\mA,\mF',\mE'_S \to \inn(T),\mU \cap \mE'_S,t])$, which, combined with~\eqref{lll}, leads to $$\CC_1(\mN',\mA,\mU \cap \mE'_S,t) \ge \CC_1(\Omega[\mN,\mA,\mF,\mE_1 \to \mE_2,\mU \cap \mE_1,t]).$$ Since $\mF$ was an arbitrary network code for $(\mN,\mA)$, this is precisely what we wanted to show, concluding the proof of the corollary. \end{proof} Next, we give a capacity-achieving scheme for the network depicted in Figure \ref{fig:introex1}, proving that the estimate in~\eqref{finalestimate} is sharp. \begin{theorem} \label{computC} Let $\mN$ and $\mU$ be as in Example~\ref{ex:tulipA}; see also Figure~\ref{fig:introex1}. Then for all network alphabets $\mA$ we have $$\CC_1(\mN,\mA,\mU,1) = \log_{|\mA|}(|\mA|-1).$$ \end{theorem} \begin{proof} The fact that $\CC_1(\mN,\mA,\mU,1) \le \log_{|\mA|}(|\mA|-1)$ has already been shown in Example~\ref{ex:tulipA} when illustrating how to apply Corollary~\ref{cor:tothree}. We will give a scheme that achieves the desired capacity value. Reserve an alphabet symbol $* \in \mA$. The source $S$ emits any symbol from~$\mA \setminus \{*\}$ via a 4-times repetition code. Vertices $V_1$ and $V_2$ proceed as follows: If the symbols on their incoming edges are equal, they forward that symbol; otherwise they output $*$. Vertex~$V_3$ proceeds as follows: If one of the two received symbols is different from $*$, then it forwards that symbol. If both received symbols are different from $*$, then it outputs $*$ over $e_9$. The vertex $V_4$ just forwards. Decoding is done as follows. $T_1$ and $T_2$ look at the edges~$e_5$ and~$e_8$, respectively. If they do not receive $*$ over those edges, they trust the received symbol. If one of them is~$*$, then the corresponding terminal trusts the outgoing edge from $V_4$. For example, if $e_5$ carries $*$, then $T_1$ trusts $e_{10}.$ It is not difficult to see that this scheme defines a network code~$\mF$ for~$(\mN,\mA)$ and an unambiguous outer code $\mC$ of cardinality $|\mA|-1$, establishing the theorem. \end{proof} \begin{figure}[h!] \centering \begin{tikzpicture} \draw[blue, line width=1.5pt] (4.2,4) .. controls (4,2.2) and (1.8,-1) .. (5,-2); \draw[red, line width=1.5pt] (11,3) .. controls (11,2) and (11,1) .. (12,0); \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace of S1] (K) {}; \node[nnode,above=0.6\mynodespace of K] (V1) {$V_1$}; \node[nnode,below=0.6\mynodespace of K] (V2) {$V_2$}; \node[nnode,right=0.9\mynodespace of K] (V3) {$V_3$}; \node[nnode,right=0.9\mynodespace of V3] (V4) {$V_4$}; \node[vertex,right=3.4\mynodespace of V1 ] (T1) {$T_1$}; \node[vertex,right=3.4\mynodespace of V2] (T2) {$T_2$}; \draw[edge,bend left=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_2$} (V1); \draw[edge,bend right=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_3$} (V1); \draw[edge,bend left=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_4$} (V2); \draw[edge,bend right=15] (S1) to node[fill=white, inner sep=3pt]{\small $e_5$} (V2); \draw[ddedge,bend left=15] (V1) to node[fill=white, inner sep=3pt]{\small $e_7$} (V3); \draw[ddedge,bend right=15] (V1) to node[fill=white, inner sep=3pt]{\small $e_8$} (V3); \draw[edge,bend left=20] (V4) to node[fill=white, inner sep=3pt]{\small $e_{14}$} (T1); \draw[edge,bend left=0] (V4) to node[fill=white, inner sep=3pt]{\small $e_{15}$} (T1); \draw[edge,bend right=20] (V4) to node[fill=white, inner sep=3pt]{\small $e_{16}$} (T1); \draw[edge,bend left=20] (V4) to node[fill=white, inner sep=3pt]{\small $e_{17}$} (T2); \draw[edge,bend right=0] (V4) to node[fill=white, inner sep=3pt]{\small $e_{18}$} (T2); \draw[edge,bend right=20] (V4) to node[fill=white, inner sep=3pt]{\small $e_{19}$} (T2); \draw[ddedge,bend left=15] (V2) to node[fill=white, inner sep=3pt]{\small $e_9$} (V3); \draw[ddedge,bend right=15] (V2) to node[fill=white, inner sep=3pt]{\small $e_{10}$} (V3); \draw[edge,bend left=27] (V3) to node[fill=white, inner sep=3pt]{\small $e_{11}$} (V4); \draw[edge,bend left=0] (V3) to node[fill=white, inner sep=3pt]{\small $e_{12}$} (V4); \draw[edge,bend right=27] (V3) to node[fill=white, inner sep=3pt]{\small $e_{13}$} (V4); \draw[ddedge,out=80,in=165] (S1) to node[fill=white, inner sep=3pt]{\small $e_1$} (T1); \draw[ddedge,out=-80,in=-165] (S1) to node[fill=white, inner sep=3pt]{\small $e_{6}$} (T2); \node[text=blue] (E11) at (5.3,-2) {$\mE_1$}; \node[text=red] (E2) at (12.3,-0.17) {$\mE_2$}; \end{tikzpicture} \caption{{{Network $\mN$ for Example \ref{ex:second}.}}\label{fig:secondex}} \end{figure} We conclude this section by illustrating with another example how the results of this paper can be combined and applied to derive upper bounds for the capacity of a large network. \begin{example} \label{ex:second} Consider the network $\mN$ and the edge sets $\mE_1$ and $\mE_2$ depicted in Figure \ref{fig:secondex}. Both $\mE_1$ and $\mE_2$ are edge-cuts between $S$ and $T_1$. Moreover, $\mE_1$ precedes~$\mE_2$. We start by observing that if there is no adversary present, then the capacity of the network~$\mN$ of Figure \ref{fig:secondex} is at most 4 since the min-cut between $S$ and any terminal~$T \in \{T_1,T_2\}$ is 4. It is straightforward to design a strategy that achieves this rate. When the adversary is allowed to change \textit{any} of the network edges, then Theorem \ref{thm:mcm} gives that the capacity is equal to 2, under certain assumptions on the alphabet. Now consider an adversary able to corrupt at most~$t=1$ of the edges from the set $\mU=\{e_1,e_7,e_8,e_9,e_{10}\}$, which are dashed in Figure~\ref{fig:secondex}. In this situation, the capacity expectation increases from the fully vulnerable case, and the Generalized Network Singleton Bound of Theorem \ref{sbound} predicts 3 as the largest achievable rate. Using the results of this paper, we will show that a rate of 3 is actually not achievable. Following Corollary~\ref{cor:tothree}, we construct a simple~3-level network $\mN'$ induced from $\mN$. We depict the final outcome in Figure~\ref{fig:3rc}. \begin{figure}[htbp] \centering \scalebox{0.90}{ \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace of S1] (K) {}; \node[nnode,above=0.8\mynodespace of K] (V1) {$V_1$}; \node[nnode,above=0.3\mynodespace of K] (V2) {$V_2$}; \node[nnode,right=-0.13\mynodespace of K] (V3) {$V_3$}; \node[nnode,below=0.3\mynodespace of K] (V4) {$V_4$}; \node[nnode,below=0.8\mynodespace of K] (V5) {$V_5$}; \node[nnode,right=0.9\mynodespace of V1] (V6) {$V_6$}; \node[nnode,right=0.9\mynodespace of V3 ] (V7) {$V_{7}$}; \node[nnode,right=0.9\mynodespace of V4] (V8) {$V_8$}; \node[nnode,right=0.9\mynodespace of V5 ] (V9) {$V_{9}$}; \node[vertex,right=1.8\mynodespace of V3] (T) {$T$}; \draw[ddedge,bend left=0] (S1) to node{} (V1); \draw[ddedge,bend right=0] (S1) to node{} (V2); \draw[ddedge,bend left=0] (S1) to node{} (V3); \draw[ddedge,bend right=0] (S1) to node{} (V4); \draw[ddedge,bend left=0] (S1) to node{} (V5); \draw[edge,bend left=0] (V1) to node{} (V6); \draw[edge,bend left=0] (V2) to node[]{} (V7); \draw[edge,bend left=0] (V2) to node[]{} (V8); \draw[edge,bend left=0] (V2) to node[]{} (V9); \draw[edge,bend left=0] (V3) to node[]{} (V7); \draw[edge,bend left=0] (V3) to node[]{} (V8); \draw[edge,bend left=0] (V3) to node[]{} (V9); \draw[edge,bend left=0] (V4) to node[]{} (V7); \draw[edge,bend left=0] (V4) to node[]{} (V8); \draw[edge,bend left=0] (V4) to node[]{} (V9); \draw[edge,bend left=0] (V5) to node[]{} (V7); \draw[edge,bend left=0] (V5) to node[]{} (V8); \draw[edge,bend left=0] (V5) to node[]{} (V9); \draw[edge,bend left=0] (V6) to node{} (T); \draw[edge,bend left=0] (V7) to node[]{} (T); \draw[edge,bend left=0] (V8) to node{} (T); \draw[edge,bend left=0] (V9) to node[]{} (T); \end{tikzpicture} } \caption{The 3-level network $\mN'$ induced by the network $\mN$ of Figure~\ref{fig:secondex}. Vulnerable edges are dashed. \label{fig:3rc}} \end{figure} Lastly, we apply the procedure described in Subsection~\ref{sec:3to2reduc} to obtain a 2-level network from~$\mN'$, whose capacity will be an upper bound for that of~$\mN'$. It can easily be seen that the~2-level network obtained is precisely the network $\mathfrak{B}_3$ of Family \ref{fam:b} introduced in Section~\ref{sec:families}. This is depicted in Figure \ref{fig:2rc}. Therefore, by combining Theorem~\ref{thm:channel}, Theorem \ref{thm:notmet} and Corollary~\ref{cor:tothree}, we finally obtain $$\CC_1(\mN,\mA,\mU,1) < 3.$$ At the time of writing this paper we cannot give an exact expression for the value of $\CC_1(\mN,\mA,\mU,1)$ for an arbitrary alphabet $\mA$. This remains an open problem. \end{example} \begin{figure}[htbp] \centering \begin{tikzpicture} \tikzset{vertex/.style = {shape=circle,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{nnode/.style = {shape=circle,fill=myg,draw,inner sep=0pt,minimum size=1.9em}} \tikzset{edge/.style = {->,> = stealth}} \tikzset{dedge/.style = {densely dotted,->,> = stealth}} \tikzset{ddedge/.style = {dashed,->,> = stealth}} \node[vertex] (S1) {$S$}; \node[shape=coordinate,right=\mynodespace of S1] (K) {}; \node[nnode,above=0.5\mynodespace of K] (V1) {$V_1$}; \node[nnode,below=0.5\mynodespace of K] (V2) {$V_2$}; \node[vertex,right=2\mynodespace of S1] (T) {$T$}; \draw[ddedge,bend left=0] (S1) to node[]{} (V1); \draw[ddedge,bend left=30] (S1) to node[]{} (V2); \draw[ddedge,bend left=10] (S1) to node[]{} (V2); \draw[ddedge,bend right=10] (S1) to node[]{} (V2); \draw[ddedge,bend right=30] (S1) to node[]{} (V2); \draw[edge,bend right=0] (V1) to node[]{} (T); \draw[edge,bend left=20] (V2) to node[]{} (T); \draw[edge,bend left=0] (V2) to node[]{} (T); \draw[edge,bend right=20] (V2) to node[]{} (T); \end{tikzpicture} \caption{{{The simple 2-level network associated to the network of Figure~\ref{fig:3rc}.}}}\label{fig:2rc} \end{figure} \section{Linear Capacity} \label{sec:linear} As mentioned in Sections \ref{sec:motiv} and \ref{sec:channel}, in the presence of an ``unrestricted'' adversarial noise the~(1-shot) capacity of a network can be achieved by combining a rank-metric (outer) code with a~\textit{linear} network code; see~\cite{SKK,MANIAC,RK18,KK1}. In words, this means that the intermediate nodes of the network focus on spreading information, while decoding is performed in an end-to-end fashion. In this section, we show that the strategy outlined above is far from being optimal when the adversary is restricted to operate on a proper subset of the network edges. In fact, we establish some strong separation results between the capacity (as defined in Section~\ref{sec:channel}) and the ``linear'' capacity of a network, which we define by imposing that the intermediate nodes combine packets linearly. This indicates that implementing network \textit{decoding} becomes indeed necessary to achieve capacity in the scenario where the adversary is restricted. The following definitions make the concept of linear capacity rigorous. \begin{definition} Let $\mN=(\mV,\mE, S, \bfT)$ be a network and $\mA$ an alphabet. Consider a \textbf{network code} $\mF$ for $(\mN,\mA)$ as in Definition \ref{def:nc}. We say that $\mF$ is a \textbf{linear} network code if $\mA$ is a finite field and each function~$\mF_V$ is $\mA$-linear. \end{definition} We can now define the linear version of the 1-shot capacity of an adversarial network, i.e., the analogue of Definition~\ref{def:capacities}. \begin{definition} \label{def:lin_capacities} Let $\mN=(\mV,\mE, S, \bfT)$ be a network, $\mA$ a finite field, $\mU \subseteq \mE$ an edge set, and~$t \ge 0$ an integer. The (\textbf{1-shot}) \textbf{linear capacity} of $(\mN,\mA,\mU,t)$ is the largest real number~$\kappa$ for which there exists an \textbf{outer code} $$\mC \subseteq \mA^{\degout(S)}$$ and a linear network code $\mF$ for~$(\mN,\mA)$ with $\kappa=\log_{|\mA|}(|\mC|)$ such that $\mC$ is unambiguous for each channel $\Omega[\mN,\mA,\mF,S \to T,\mU,t]$, $T \in \bd{T}$. The notation for such largest $\kappa$ is $$\CC^\lin_1(\mN,\mA,\mU,t).$$ \end{definition} Note that in the definition of linear capacity we do not require $\mC$ to be a linear code, but only that the network code $\mF$ is linear. The first result of this section shows that the linear capacity of any member of Family~\ref{fam:d} is zero. This is in sharp contrast with Theorem~\ref{thm:metd}. \begin{theorem} \label{thm:linmirr} Let $\mathfrak{D}_t=(\mV,\mE,S,\{T\})$ be a member of Family~\ref{fam:d}. Let $\mA$ be any {finite field} and let $\mU_S$ be the set of edges of $\mathfrak{D}_t$ directly connected to $S$. We have $$\CC^\lin_1(\mathfrak{D}_t,\mA,\mU_S,t)= 0.$$ In particular, the linear capacity of the Mirrored Diamond Network of Figure~\ref{fig:mirrored} is zero. \end{theorem} \begin{proof} {Let $q:=|\mA|$.} Fix any {linear} network code $\mF=\{\mF_1,\mF_2\}$ for $(\mathfrak{D}_t,\mA)$ and let $\mC$ be an unambigious code for the channel $\Omega[\mathfrak{D}_t,\mA,\mF,S \to T,\mU_S,t]$. Suppose that $|\mC| \ge 2$ and let~$x,a \in \mC$ with $x \neq a$ such that $$x = (x_1,\ldots,x_{2t},x_{2t+1},\ldots,x_{4t}), \quad a = (a_1,\ldots,a_{2t},a_{2t+1},\ldots,a_{4t}),$$ and $$\mF_1(u_1,\ldots,u_{2t}) = \sum_{i=1}^{2t}\lambda_iu_i, \quad \mF_2(u_{2t+1},\ldots,u_{4t}) = \sum_{i=2t+1}^{4t}\lambda_iu_i,$$ where $\lambda_r \in \mathbb{F}_q$ for $1 \le r \le 4t$ and $u \in \mA^{4t}$. We let $\Omega := \Omega[\mathfrak{D}_t,\mA,\mF,S \to T,\mU_S,t]$ to simplify the notation throughout the remainder of the proof. We start by observing that $\lambda_1,\ldots,\lambda_{2t}$ cannot all be 0. Similarly, $\lambda_{2t+1},\ldots,\lambda_{4t}$ cannot all be 0 (it is easy to see that the adversary can cause ambiguity otherwise). Therefore we shall assume $\lambda_1 \ne 0$ and $\lambda_{4t} \ne 0$ without loss of generality. We will now construct vectors $y,b \in \mA^{4t}$ such that $\dH(x,y) = \dH(a,b)= 1$. Concretely, let \begin{itemize} \item $y_i = x_i \mbox{ for } 1\le i \le 4t-1$, \item $y_{4t} = a_{4t} + \sum_{i=2t+1}^{4t-1} \lambda_{4t}^{-1}\lambda_i(a_i-x_i)$, \item $b_{1} = x_{1} + \sum_{i=2}^{2t} \lambda_{1}^{-1}\lambda_i(x_i-a_i)$, \item $b_i = a_i \mbox{ for } 2\le i \le 4t$. \end{itemize} It follows from the definitions that $\dH(x,y) = \dH(a,b)= 1$ and that $$z_x:=\left(\sum_{r=1}^{2t} \lambda_ry_r,\ \sum_{r=2t+1}^{4t} \lambda_ry_r \right) \in \Omega(x), \qquad z_a:=\left(\sum_{r=1}^{2t} \lambda_rb_r,\ \sum_{r=2t+1}^{4t} \lambda_rb_r\right) \in \Omega(a).$$ However, one easily checks that $z_x=z_a$, which in turn implies that $\Omega(x) \cap \Omega(a) \neq \emptyset$. Since $x$ and $a$ were arbitrary elements of $\mC$, this establishes the theorem. \end{proof} By proceeding as in the proof of Theorem~\ref{thm:linmirr}, one can check that the linear capacity of any member of Family~\ref{fam:e} is zero as well. This can also be established by observing that $\mathfrak{E}_t$ is~a ``subnetwork'' of $\mathfrak{D}_t$ for all $t$. \begin{theorem} \label{thm:8.4} Let $\mathfrak{E}_t=(\mV,\mE,S,\{T\})$ be a member of Family~\ref{fam:e}. Let $\mA$ be any finite field and let $\mU_S$ be the set of edges of $\mathfrak{E}_t$ directly connected to $S$. We have $$\CC^\lin_1(\mathfrak{E}_t,\mA,\mU_S,t)= 0.$$ In particular, the linear capacity of the Diamond Network of Section~\ref{sec:diamond} is zero. \end{theorem} We conclude this section by observing that the proof of Proposition \ref{prop:lin} actually uses a linear network code. In particular, the following holds. \begin{proposition} \label{cor:ll} Let $\mN=([a_1,\ldots,a_n],[b_1,\ldots,b_n])=(\mV,\mE,S,\{T\})$ be a simple 2-level network, $\mA$ a sufficiently large finite field, $t \ge 0$. Let $\mU_S$ denote the set of edges directly connected to $S$. Then $$\CC^\lin_1(\mN,\mA,\mU_S,t) \ge \max\left\{0,\sum_{i=1}^n\min\{a_i,b_i\} -2t\right\}.$$ \end{proposition} Finally, by combining Proposition \ref{cor:ll} and Theorem \ref{thm:notmet}, we obtain the following result on the capacities of the members of Family~\ref{fam:b}. \begin{corollary} \label{cor:sbs} Let $\mathfrak{B}_s=(\mV,\mE,S,\{T\})$ be a member of Family~\ref{fam:b}. Let $\mA$ be a sufficiently large finite field and let $\mU_S$ be the set of edges of $\mathfrak{B}_s$ directly connected to $S$. We have $$s >\CC_1(\mathfrak{B}_s,\mA,\mU_S,1) \ge \CC^\lin_1(\mathfrak{B}_s,\mA,\mU_S,1) \ge s-1.$$ \end{corollary} \section{Conclusions and Future Research Directions} \label{sec:open} In this paper, we considered the 1-shot capacity of multicast networks affected by adversarial noise restricted to a proper subset of vulnerable edges. We introduced a formal framework to study these networks based on the notions of adversarial channels and fan-out sets. We defined five families of 2-level networks that play the role of fundamental stepping stones in our developed theory, and derived upper and lower bounds for the capacities of these families. We also showed that upper bounds for 2-level and 3-level networks can be ported to arbitrarily large networks via a Double-Cut-Set Bound. Finally, we analyzed the capacity of certain partially vulnerable networks under the assumption that the intermediate nodes combine packets linearly. The results presented in this paper show that classical approaches to estimate or achieve capacity in multicast communication networks affected by an unrestricted adversary (cut-set bounds, linear network coding, rank-metric codes) are far from being optimal when the adversary is restricted to operate on a proper subset of the network edges. Moreover, the non-optimality comes precisely from limiting the adversary to operate on a certain region of the network, which in turn forces the intermediate nodes to partially \textit{decode} information before forwarding it towards the terminal. This is in strong contrast with the typical scenario within network coding, where capacity can be achieved in an \textit{end-to-end} fashion using (random) linear network coding combined with a rank-metric code. We conclude this paper by mentioning three research directions that naturally originate from our work. \begin{enumerate} \item It remains an open problem to compute the capacities of three of the five fundamental families of networks we introduced in Subsection~\ref{sec:families} for arbitrary values of the parameters. We believe that this problem is challenging and requires developing new coding theory methods of combinatorial flavor that extend traditional packing arguments. \item A very natural continuation of this paper lies in the study of the scenario where a network can be used multiple times for communication, which we excluded from this first treatment. The multi-shot scenario is modeled by the \textit{power channel} construction; see~\cite{RK18,shannon_zero}. \item Most of our results and techniques extend to networks having multiple sources. This is another research direction that arises from this paper very naturally. \end{enumerate} \bigskip \bibliographystyle{abbrv} \bibliography{ADV} \end{document}
2205.14604v1
http://arxiv.org/abs/2205.14604v1
Increasing rate of weighted product of partial quotients in continued fractions
\documentclass[12pt]{elsarticle} \usepackage{amssymb} \usepackage{amsthm} \usepackage{enumerate} \usepackage{amsmath,amssymb,amsfonts,amsthm,fancyhdr} \usepackage[colorlinks,linkcolor=black,anchorcolor=black,citecolor=black,CJKbookmarks=True]{hyperref} \newdefinition{rem}{Remark}[section] \newdefinition{theorem}{Theorem}[section] \newdefinition{corollary}{Corollary}[section] \newdefinition{definition}{Definition}[section] \newdefinition{lemma}{Lemma}[section] \newdefinition{prop}{Proposition}[section] \numberwithin{equation}{section} \def\bc{\begin{center}} \def\ec{\end{center}} \def\be{\begin{equation}} \def\ee{\end{equation}} \def\F{\mathbb F} \def\N{\mathbb N} \def\P{\texttt P} \def\Q{\mathbb Q} \def\G{\mathbb G} \def\R{\mathbb R} \def\Z{\mathbb Z} \def\B{\mathbf1} \newcommand\hdim{\dim_{\mathrm H}} \def\vep{\varepsilon} \def\para{\parallel} \def\dps{\displaystyle} \def\B{\mathscr{B}} \topmargin -1.5cm \textwidth 16.5cm \textheight 23.6cm \oddsidemargin 0pt \begin{document} \begin{frontmatter} \title{Increasing rate of weighted product of partial quotients in continued fractions} \author[a]{Ayreena~Bakhtawar}\ead{[email protected]} \author[b]{Jing Feng\corref{cor1}}\ead{[email protected]} \address[a]{School of Mathematics and Statistics, University of New South Wales, Sydney NSW 2052, Australia.} \address[b]{School of Mathematics and Statistics, Huazhong University of Science and Technology, Wuhan, 430074 PR China and LAMA UMR 8050, CNRS, Universit\'e Paris-Est Cr\'eteil, 61 Avenue du G\'en\'eral de Gaulle, 94010 Cr\'eteil Cedex, France } \cortext[cor1]{Corresponding author.} \begin{abstract}\par Let $[a_1(x),a_2(x),\cdots,a_n(x),\cdots]$ be the continued fraction expansion of $x\in[0,1)$. In this paper, we study the increasing rate of the weighted product $a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)$ ,where $t_i\in \mathbb{R}_+\ (0\leq i \leq m)$ are weights. More precisely, let $\varphi:\mathbb{N}\to\mathbb{R}_+$ be a function with $\varphi(n)/n\to \infty$ as $n\to \infty$. For any $(t_0,\cdots,t_m)\in \mathbb{R}^{m+1}_+$ with $t_i\geq 0$ and at least one $t_i\neq0 \ (0\leq i\leq m)$, the Hausdorff dimension of the set $$\underline{E}(\{t_i\}_{i=0}^m,\varphi)=\left\{x\in[0,1):\liminf\limits_{n\to \infty}\dfrac{\log \left(a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\right)}{\varphi(n)}=1\right\}$$ is obtained. Under the condition that $(t_0,\cdots,t_m)\in \mathbb{R}^{m+1}_+$ with $0<t_0\leq t_1\leq \cdots \leq t_m$, we also obtain the Hausdorff dimension of the set \begin{equation*} \overline{E}(\{t_i\}_{i=0}^m,\varphi)=\left\{x\in[0,1):\limsup\limits_{n\to \infty}\dfrac{\log \left(a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\right)}{\varphi(n)}=1\right\}. \end{equation*} \end{abstract} \begin{keyword} Continued fractions, Hausdorff dimension, Product of partial quotients \MSC[2010] Primary 11K50, Secondary 37E05, 28A80 \end{keyword} \end{frontmatter} \section{Introduction} Each irrational number $x\in[0,1)$ admits a unique continued fraction expansion of the from \begin{equation}\label{cf11} x=\frac{1}{a_{1}(x)+\displaystyle{\frac{1}{a_{2}(x)+\displaystyle{\frac{1}{ a_{3}(x)+{\ddots }}}}}},\end{equation} where for each $n\geq 1$, the positive integers $a_{n}(x)$ are known as the partial quotients of $x.$ The partial quotients can be generated by using the Gauss transformation $T:[0,1)\rightarrow [0,1)$ defined as \begin{equation} \label{Gmp} T(0):=0 \ {\rm} \ {\rm and} \ T(x)=\frac{1}{x} \ ({\rm mod} \ 1), \text{ for } x\in (0,1). \end{equation} In fact, let $a_{1}(x)=\big\lfloor \frac{1}{x}\big\rfloor $ (where $\lfloor \cdot \rfloor$ denotes the integral part of real number). Then $a_{n}(x)=\big\lfloor \frac{1}{T^{n-1}(x)}\big\rfloor$ for $n\geq 2$. Sometimes \eqref{cf11} is written as $x= [a_{1}(x),a_{2}(x),a_{3}(x),\ldots ].$ Further, the $n$-th convergent $p_n(x)/q_n(x)$ of $x$ is defined by $p_n(x)/q_n(x)=[a_{1}(x),a_{2}(x),\ldots, a_n(x)].$ The metrical aspect of the theory of continued fractions has been very well studied due to its close connections with Diophantine approximation. For example, for any $\tau>0$ the famous Jarn\'{i}k-Besicovitch set \begin{equation*} {J}_\tau:=\left\{ x\in \lbrack 0,1): \left|x-\frac pq\right| <\frac{1}{q^{\tau+2}} \ \ \mathrm{for\ infinitely\ many\ }(p,q)\in \mathbb{Z} \times \mathbb{N}\right\}, \end{equation*} can be described by using continued fractions. In fact, \begin{equation}\label{Jset} {J}_\tau=\left\{ x\in [ 0,1):a_{n+1}(x)\geq q^{\tau}_{n}(x)\ \ \mathrm{for\ infinitely\ many\ }n\in \mathbb{N}\right\}. \end{equation} For further details about this connection we refer to \cite{Go_41}. Thus the growth rate of the partial quotients reveals how well a real number can be approximated by rationals. The well-known Borel-Bernstein Theorem \cite{Be_12,Bo_12} states that for Lebesgue almost all $x\in[0,1),$ $a_{n}(x)\geq\varphi(n)$ holds for finitely many $n^{\prime}s$ or infinitely many $n^{\prime}s$ according to the convergence or divergence of the series $\sum_{n=1}^{\infty}{1}/{\varphi(n)}$ respectively. However, for rapidly growing function ${\varphi},$ the Borel-Bernstein Theorem does not give any conclusive information other than Lebesgue measure zero. To distinguish the sizes of zero Lebesgue measure sets, Hausdorff dimension is considered as an appropriate conception and has gained much importance in the metrical theory of continued fractions. Jarn\'{i}k \cite{Ja_32} proved that the set of real numbers with bounded partials quotients has full Hausdorff dimension. Later on, Good \cite{Go_41} showed that the Hausdorff dimension of the set of numbers whose partial quotients tend to infinity is one half. After that, a lot of work has been done in the direction of improving Borel-Bernstein Theorem, for example, the Hausdorff dimension of sets when partial quotients $a_n(x)$ obeys different conditions has been obtained in \cite{FaLiWaWu_13,FaLiWaWu_09,FaMaSo_21,Luczak,LiRa_016,LiRa_16,WaWu_008}. Motivation for studying the growth rate of the products of consecutive partial quotients aroses from the works of Davenport-Schmidt \cite{DaSc_70} and Kleinbock-Wadleigh \cite{KlWa_18} where they considered improvements to Dirichlet's theorem. Let $\psi :[t_{0},\infty )\rightarrow \mathbb{R}_{+}$ be a monotonically decreasing function, where $t_{0}\geq 1$ is fixed. Denote by $D(\psi )$ the set of all real numbers $ x$ for which the system \begin{equation*} |qx-p|\leq \psi (t)\ \rm{and} \ |q|<t \end{equation*} has a nontrivial integer solution for all large enough $t$. A real number $ x\in D(\psi )$ (resp. $x\in D(\psi )^{c}$) will be referred to as \emph{$ \psi$-Dirichlet improvable} (resp. \emph{$\psi$-Dirichlet non-improvable}) number. The starting point for the work of Davenport-Schmidt \cite{DaSc_70} and Kleinbock-Wadleigh {\protect\cite[Lemma 2.2]{KlWa_18}} is an observation that Dirichlet improvability is equivalent to a condition on the growth rate of the products of two consecutive partial quotients. Precisely, they observed that \begin{align*} x\in D(\psi) &\Longleftrightarrow |q_{n-1}x-p_{n-1}|\le \psi(q_n),\text{ for all } n \text{ large } \\ &\Longleftrightarrow [a_{n+1}, a_{n+2},\cdots]\cdot [a_n, a_{n-1},\cdots, a_1]\ge ((q_n\psi(q_n))^{-1}-1), \text{ for all } n \text{ large. } \end{align*} Then \begin{multline*} \Big\{x\in [0,1)\colon a_n(x)a_{n+1}(x)\ge ((q_n(x)\psi(q_n(x)))^{-1}-1)^{-1} \ {\text{for i.m.}}\ n\in \N\Big\}\subset D^{\mathsf{c}}(\psi)\\ \subset \Big\{x\in [0,1)\colon a_n(x)a_{n+1}(x)\ge 4^{-1}((q_n(x)\psi(q_n(x)))^{-1}-1)^{-1}\ {\text{for i.m.}}\ n\in \N\Big\}, \end{multline*} where i.m. stands for infinitely many. In other words, a real number $x\in [0, 1)\setminus\mathbb{Q}$ is $\psi$-Dirichlet improvable if and only if the products of consecutive partial quotients of $x$ do not grow quickly. We refer the reader to \cite{Ba_20,BaHuKlWa_22,FeXu_21,HuaWu_19,HuWuXu_19} for more metrical results related with the set of Dirichlet non-improvable numbers. As a consequence of Borel-Bernstein Theorem, for almost all $x\in[0,1)$ there exists a subsequence of partial quotients tending to infinity with a linear speed. In other words, for Lebesgue almost every $x\in[0,1)$ \begin{equation*} \limsup_{n\to\infty}\frac{\log a_{n}(x)}{\log n}=1. \end{equation*} Taking inspirations from the study of the growth rate of the products of consecutive partial quotients for the real numbers, in this paper we consider the growth rate of the products of the consecutive weighted partial quotients. More precisely, by \cite[Theorem 1.4]{BaHuKlWa_22}, we have for Lebesgue almost all $x\in[0,1)$ \begin{equation}\label{ES2} \limsup_{n\to\infty}\frac{\log a^{t_0}_{n}(x)a^{t_1}_{n+1}(x)\cdots a^{t_{m}}_{n+m}(x)}{\log n^{t_{\max}}}=1, \end{equation} where $t_{\max}=\max\{t_i:0\leq i\leq m\}$. This paper is concerned with Hausdorff dimension of some exceptional sets of \eqref{ES2}. Let $\varphi: \mathbb{N}\to \mathbb{R}_+$ be a function satisfying $\varphi(n)/n\to \infty$ as $n\to\infty $ and let $t_i\in \mathbb{R}_+\ (0\leq i \leq m)$. Define the sets \begin{equation*} \overline{E}(\{t_i\}_{i=0}^m,\varphi)=\left\{x\in[0,1):\limsup\limits_{n\to \infty}\dfrac{\log \left(a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\right)}{\varphi(n)}=1\right\}, \end{equation*} and \begin{equation*} \underline{E}(\{t_i\}_{i=0}^m,\varphi)=\left\{x\in[0,1):\liminf\limits_{n\to \infty}\dfrac{\log \left(a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\right)}{\varphi(n)}=1\right\}. \end{equation*} The study of the level sets about the growth rate of $\{a_n(x)a_{n+1}(x) : n \geq 1\}$ relative to that of $\{q_n(x) : n \geq 1\}$ was discussed in \cite{HuaWu_19}. Let $m\geq2$ be an integer and $\Psi: \mathbb{N}\to \mathbb{R}_{+}$ be a positive function. The Lebesgue measure and the Hausdorff dimension of the set \begin{equation}\label{em} \left\{x\in[0, 1): {a_{n}(x)\cdots a_{n+m}(x)}\geq \Psi(n) \ \text{for infinitely many} \ n\in \N \right\}, \end{equation} have been comprehensively determined by Huang-Wu-Xu \cite{HuWuXu_19}. Very recently the results of \cite{HuWuXu_19} were generalized by Bakhtawar-Hussain-Kleinbock-Wang \cite{BaHuKlWa_22} to a weighted generalization of the set \eqref{em}. For more details we refer the reader to \cite{BaHuKlWa_22,HuWuXu_19}. Our main results are as follows. \begin{theorem}\label{foralln} Let $\varphi: \mathbb{N}\to \mathbb{R}_+$ be a function satisfying $\varphi(n)/n\to \infty$ as $n\to\infty $. Write \begin{equation*} \log B=\limsup\limits_{n\rightarrow \infty} \frac{ \log \varphi(n)}{n}. \end{equation*} Assume $B\in[1,\infty]$. Then for any $(t_0,\cdots,t_m)\in \mathbb{R}^{m+1}_+$ with $t_i\geq 0$ and at least one $t_i\neq0 \ (0\leq i\leq m)$, \begin{equation*} \hdim \underline{E}(\{t_i\}_{i=0}^m,\varphi)=\frac{1}{1+B}. \end{equation*} \end{theorem} \begin{theorem}\label{forinfinitelymanyn} Let $\varphi: \mathbb{N}\to \mathbb{R}_+$ be a function satisfying $\varphi(n)/n\to \infty$ as $n\to\infty $. Write \begin{equation*} \log b=\liminf\limits_{n\rightarrow \infty} \frac{ \log \varphi(n)}{n}. \end{equation*} Assume $b\in[1,\infty]$. Then for any $(t_0,t_1,\cdots,t_m)\in \mathbb{R}_+^{m+1}$ with $0<t_0\leq t_1\leq \cdots \leq t_m$, we have \begin{equation*} \hdim \overline{E}(\{t_i\}_{i=0}^m,\varphi)=\frac{1}{1+b}. \end{equation*} \end{theorem} \begin{rem} Note that in Theorem \ref{forinfinitelymanyn} we are only able to treat the case when the sequence $\{t_{i}\}_{i=0}^{m}$ is nondecreasing. We would like to drop this monotonic condition. Indeed, our method for the upper bound is true for all sequences $\{t_{i}\}_{i=0}^{m}$. However, when dealing with the lower bound, the sequence $\{c_n\}_{n\geq1}$ we construct (see the proof for details) might not be bounded away from 0 once we drop the monotonic condition, which is important in constructing a suitable subset of $\overline{E}(\{t_i\}_{i=0}^m,\varphi)$. \end{rem} \section{Preliminaries} In this section, we fix some notations and recall some known results in theory of continued fraction expansions. For an irrational number $x\in[0,1)$, recall $a_n(x) $ is the $n$-th partial quotient of $x$ in its continued fraction expansion. The sequences $\{p_n(x)\}_{n\geq1},$ $\{q_n(x)\}_{n\geq1}$ are the numerator and denominator of the $n$-th convergent of $x$. It is well-known that $\{p_n(x)\}_{n\geq1}$ and $\{q_n(x)\}_{n\geq1}$ can be obtain by the following recursive relations (see \cite{Kh_63}): \begin{equation}\label{sequence-qn} \begin{cases} &p_n(x)=a_n(x)p_{n-1}(x)+p_{n-2}(x),\ \text{for any $n \geq $ 1 };\\ &q_n(x)=a_n(x)q_{n-1}(x)+q_{n-2}(x),\ \text{for any $n \geq $ 1 }, \end{cases} \end{equation} with the conventions $p_{-1}=1,\ q_{-1}=0, \ p_0=0$ and $q_0=1$. For any $n$-tuple $(a_1,\cdots,a_n)\in \mathbb{N}^n$ with $n\geq 1$, we call $$I_n(a_1,\cdots,a_n)=\big\{x\in[0,1)\colon a_1(x)=a_1,\ a_2(x)=a_2, \cdots ,a_n(x)=a_n\big\},$$ a cylinder of order $n$. Note that $p_n(x)$ and $q_n(x)$ are determined by the first $n$ partial quotients of $x$. So all points in $I_n(a_1,\cdots,a_n)$ determine the same $p_n(x)$ and $q_n(x)$. Hence for simplicity, if there is no confusion, we write $a_n$, $p_n$ and $q_n$ to denote $a_n(x)$, $p_n(x)$ and $q_n(x)$ for $x\in I_n(a_1,\cdots,a_n)$ respectively. The following lemma is a collection of basic facts on continued fractions which can be found in the book of Khintchine \cite{Kh_63}. \begin{lemma} For any $(a_1,\cdots,a_n)\in \mathbb{N}^n$, let $q_n$ and $p_n$ be given recursively by \eqref{sequence-qn}. Then (1)\begin{equation*} I_n(a_1,\cdots,a_n)=\begin{cases} &\Big[\dfrac{p_n}{q_n},\dfrac{p_n+p_{n-1}}{q_n+q_{n-1}}\Big), \text{ if } n \text{ is even },\\ &\Big[\dfrac{p_n+p_{n-1}}{q_n+q_{n-1}},\dfrac{p_n}{q_n}\Big), \text{ if } n \text{ is odd }; \end{cases} \end{equation*} (2) $q_n\geq 2^{\frac{n-1}{2}},\ \prod_{k=1}^na_k\leq q_n \leq 2^n\prod_{k=1}^na_k;$ (3) \begin{equation*} \frac{1}{3a_{n+1}q_{n}^{2}}\,<\,\Big|x-\frac{p_{n}}{q_{n}}\Big|=\frac{1}{ q_n(q_{n+1}+T^{n+1}(x)q_n)}<\,\frac{1}{a_{n+1}q_{n}^{2}}, \end{equation*} and for any $n\geq1$ the derivative of $T^{n}$ is given by \begin{equation*} (T^{n})^{\prime }(x)=\frac{(-1)^{n}}{(xq_{n-1}-p_{n-1})^{2}}. \end{equation*} \end{lemma} The next theorem, known as Legendre's Theorem, connects 1-dimensional Diophantine approximation with continued fractions. \begin{theorem}[Legendre]\label{leg} Let $\frac{p}{q}$ be an irreducible rational number. Then \begin{equation*} \label{Legendre} \Big|x-\frac pq\Big|<\frac1{2q^2}\Longrightarrow \frac pq=\frac{p_n(x)}{ q_n(x)},\quad \mathrm{for\ some \ } n\geq 1. \end{equation*} \end{theorem} According to Legendre's theorem if an irrational $x$ is ``well" approximated by a rational $p/q$, then this rational must be a convergent of $x$. So, the continued fraction expansions is a quick and efficient tool for finding good rational approximations to real numbers. For more basic properties of continued fraction expansions, one can refer to \cite{Kh_63}. We also give some auxiliary results on the Hausdorff dimension theory of continued fractions that will be used later. \begin{lemma}[\cite{FaLiWaWu_13}]\label{anyset} Let $\{s_n\}_{n\geq1}$ be a sequence of positive integers tending to infinity, then for any positive integer number $N\geq2$, \begin{equation*} \begin{aligned} &\hdim \big\{x\in[0,1): s_n\leq a_n(x)< Ns_n,\ \text{for all}\ n \geq 1\big\}\\ =&\liminf_{n\to\infty}\frac{\log (s_1s_2\cdots s_n)}{2\log (s_1s_2\cdots s_n)+\log s_{n+1}}\\ =&\frac{1}{2+\limsup\limits_{n\to\infty}\frac{\log s_{n+1}}{\log (s_1s_2\cdots s_n)}}. \end{aligned} \end{equation*} \end{lemma} \begin{lemma} [\cite{FeWULiTs_97,Luczak}]\label{LUZARK RESULT} For any $a,c>1$, \begin{equation*} \begin{aligned} &\hdim \left\{x\in[0,1):a_n(x)\geq c^{a^n},\ \text{for all} \ n\geq1\right\}\\ =&\hdim \left\{x\in[0,1):a_n(x)\geq c^{a^n},\ \text{for infinitely many}\ n\in\mathbb{N}\right\}\\ =&\frac{1}{1+a}. \end{aligned} \end{equation*} \end{lemma} Applying Lemma \ref{LUZARK RESULT}, we obtain the follwing corollary which will be useful for the upper bound estimation on $\hdim \underline{E}(\{t_i\}_{i=0}^m,\varphi).$ \begin{corollary}\label{Luzarks cor} For any $a,c>1$ and $(t_0,\cdots,t_m)\in \mathbb{R}^{m+1}_+$ with $t_i\geq 0$ and at least one $t_i\neq0 \ (0\leq i\leq m)$, \begin{equation*} \begin{aligned} &\hdim \left\{x\in[0,1):a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\geq c^{a^n},\ \text{for all} \ n\geq1\right\}\\ =&\hdim \left\{x\in[0,1):a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\geq c^{a^n},\ \text{for infinitely many}\ n\geq1\right\}\\ =&\frac{1}{1+a}. \end{aligned} \end{equation*} \end{corollary} \begin{proof}Denote $k=\min\{0\leq i\leq m:\ t_{i}\neq0\}$. It is clear that for some $t_{j}\neq0 \ (0\leq j\leq m),$ \begin{equation*} \begin{aligned} &\left\{x\in[0,1):a^{t_k}_{n+k}(x)\geq c^{a^n},\ \text{for all} \ n\geq1\right\} \\ \subset&\left\{x\in[0,1):a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\geq c^{a^n},\ \text{ for all} \ n\geq1\right\}\\ \subset& \left\{x\in[0,1):a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\geq c^{a^n},\ \text{ for infinitely many} \ n\geq1\right\}\\ \subset&\left\{x\in[0,1):a^{t_{j}}_{n+j}(x)\geq c^{\frac{a^n}{m+1}},\ \text{ for infinitely many}\ n\geq1\right\}. \end{aligned} \end{equation*} From Lemma \ref{LUZARK RESULT}, we deduce that for some $t_{j}\neq0\ (0\leq j\leq m),$ \begin{equation*} \begin{aligned} &\hdim \left\{x\in[0,1):a^{t_{k}}_{n+k}(x)\geq c^{a^n},\ \text{ for all} \ n\geq1\right\}\\ =&\hdim \left\{x\in[0,1):a^{t_{j}}_{n+j}(x)\geq c^{a^n},\ \text{for infinitely many}\ n\in\mathbb{N}\right\}\\ =&\frac{1}{1+a}. \end{aligned} \end{equation*} Then the desired results are obtained. \end{proof} \section{Proof of theorem \ref{foralln}} Let $\varphi: \mathbb{N}\to \mathbb{R}_+$ be a positive function with $\varphi(n)\rightarrow\infty$ as $n\rightarrow \infty$. For any $(t_0,\cdots,t_m)\in \mathbb{R}^{m+1}_+$ with $t_i\geq 0$ and at least one $t_i\neq0 \ (0\leq i\leq m)$, we introduce the sets \begin{align*} ND(\varphi)=\left\{x\in[0,1):a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\geq \varphi(n),\text{ for all }\ n \geq 1\right\}, \end{align*} and \begin{align*} {ND}^{\prime}(\varphi)=\left\{x\in[0,1):a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\geq \varphi(n), \text{ for $n$ large enough }\right\}. \end{align*} In order to prove Theorem \ref{foralln}, we first give a complete characterization on the size of the sets $ND(\varphi)$ and ${ND}^{\prime}(\varphi)$ in terms of Hausdorff dimension. \begin{prop}\label{PQ set}For any $(t_0,\cdots,t_m)\in \mathbb{R}^{m+1}_+$ with $t_i\geq 0$ and at least one $t_i\neq0~ (0\leq i\leq m)$, \begin{equation*} \hdim ND(\varphi)=\hdim {ND}^{\prime}(\varphi)=\frac{1}{1+A},\ \text{where}\ \log A=\limsup\limits_{n\rightarrow \infty} \frac{\log \log \varphi(n)}{n}. \end{equation*} \end{prop} We remark that recently Zhang (\cite{Zh_20}) obtained the Hausdorff dimension results of $ND(\varphi)$ and ${ND}^{\prime}(\varphi)$ for the special case $t_{0}=t_{1}=1$, $t_{i}=0\ (i\geq2).$ \subsection{ Proof of Proposition \ref{PQ set} }\ \\ To prove Proposition \ref{PQ set}, we start with the following lemma. \begin{lemma} \label{a_ninfinity} For any $(t_0,\cdots,t_m)\in \mathbb{R}^{m+1}_+$ with $t_i\geq 0$ and at least one $t_i\neq0~(0\leq i\leq m)$, \begin{equation*} \hdim \left\{x\in[0,1): a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\rightarrow \infty, \ \text{as}\ n\rightarrow \infty\right\}=\frac 1 2. \end{equation*} \end{lemma} \begin{proof} Denote by $C(\infty)$ the set above and $k=\min\{0\leq i\leq m:\ t_{i}\neq0\}$. It is evident that \begin{equation*} \begin{aligned} &\left\{x\in[0,1): a^{t_k}_{n+k}(x)\rightarrow \infty, \ \text{as}\ n\rightarrow \infty\right\}\\ \subset& \left\{x\in[0,1): a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\rightarrow \infty, \ \text{as}\ n\rightarrow \infty\right\}. \end{aligned} \end{equation*} So, $\hdim C(\infty)\geq \frac 1 2.$ In the following, we give the upper bound for $\hdim C(\infty)$. \textbf{Step \uppercase\expandafter{\romannumeral1}.} We find a cover for $C(\infty)$. For any $M>0$, \begin{equation*} \begin{aligned} C(\infty)&\subset \left\{x\in[0,1): a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\geq M, \ \text{for $n$ large enough}\right\}\\ &=\bigcup_{N=1}^\infty \left\{x\in[0,1): a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\geq M, \ \text{for}\ n\geq N\right\}\\ &:=\bigcup_{N=1}^\infty E_M(N). \end{aligned} \end{equation*} It is clear that $\hdim C(\infty)\leq\hdim E_M(1)$, since $\hdim C(\infty)\leq\hdim \sup\limits_{N\geq1}E_M(N)$, and by \cite[Lemma 1]{Go_41}, we have $\hdim E_M(N)=\hdim E_M(1)$ for any $N\geq 1$. So it is sufficient to estimate the upper bound for $E_M(1)$. For any $n\geq1$, set $$D_n(M)=\left\{(a_1,\cdots,a_n)\in\mathbb{N}^n:\ a^{t_0}_ka^{t_1}_{k+1}\cdots a^{t_m}_{k+m}\geq M, \ \text{for}\ 1\leq k \leq n-m\right\}.$$ Hence, \begin{equation} \begin{aligned}\label{cover} E_M(1)\subset &\bigcup_{(a_1,\cdots,a_{n})\in D_{n}(M)}I_n(a_1,\cdots,a_n). \end{aligned} \end{equation} \medskip \textbf{Step \uppercase\expandafter{\romannumeral2}.} We construct a family of Bernoulli measures $\{\mu_t\}_{t>1}$ on $[0, 1)$. For each $t>1$ and any $(a_1,\cdots,a_n)\in \mathbb{N}^n,$ put $$\mu_t(I_n(a_1,\cdots,a_n))=e^{-nP(t)-t\Sigma_{j=1}^n\log a_j},$$ where $e^{P(t)}=\sum_{k=1}^\infty k^{-t}.$ It is easy to see that $$\sum_{a_{n+1}}\mu_t(I_n(a_1,\cdots,a_n,a_{n+1}))=\mu_t(I_n(a_1,\cdots,a_n))$$ and $$\sum_{(a_1,\cdots,a_n)\in\mathbb{N}^n}\mu_t(I_n(a_1,\cdots,a_n))=1.$$ So the measures $\{\mu_t\}_{t>1}$ are well defined by Kolmogorov's consistency theorem. Fix $s>\frac 1 2$ and set $t=s+\frac1 2>1$. Choose $M$ sufficiently large such that \begin{equation}\label{bernoulli-estimation} \left(s-\frac{1}{2}\right)\cdot\frac{\log M^{t^{-1}_{\max}}}{2m}\geq P(s+\frac{1}{2}), \end{equation} where $t_{\max}=\max\{t_i:0\leq i\leq m\}$. We claim that for any $(a_1,\cdots,a_n)\in D_n(M)$, \begin{equation}\label{estimate for q_n} q_n^{-2s}\leq\mu_{s+\frac 1 2}(I_n(a_1,\cdots,a_n)). \end{equation} More precisely, for any $(a_1,\cdots,a_n)\in D_n(M)$, by the fact that for $1\leq l\leq n-m$, $$a^{t_0}_la^{t_1}_{l+1}\cdots a^{t_m}_{l+m}\geq M,$$ then for $1\leq l\leq n-m$, $$a_la_{l+1}\cdots a_{l+m}\geq M^{t^{-1}_{\max}},$$ where $t_{\max}=\max\{t_i:0\leq i\leq m\}$. Then we have \begin{equation*} e^{-2s\sum_{j=1}^n\log a_j}\leq e^{-(s+\frac 1 2)\sum_{j=1}^n\log a_j-(s-\frac 1 2) \lfloor \frac {n}{ m}\rfloor \log M^{t^{-1}_{\max}}}. \end{equation*} Thus, by $q_n\geq \prod\limits_{i=1}^na_i$ and then \eqref{bernoulli-estimation}, we get \begin{equation*} q_n^{-2s}\leq e^{-2s\sum_{j=1}^n\log a_j}\leq e^{-(s+\frac 1 2)\sum_{j=1}^n\log a_j-nP(s+\frac 1 2)} \end{equation*} Therefore, by \eqref{cover} and \eqref{estimate for q_n}, \begin{equation*} \begin{aligned} \mathcal{H}^{s}(E_M(1))&\leq \liminf_{n\to \infty}\sum_{(a_1,\cdots,a_{n})\in D_{n}(M)}{\big|I_n(a_1,\cdots,a_{n})\big|}^s\\ &\leq \liminf_{n\to \infty}\sum_{(a_1,\cdots,a_{n})\in D_{n}(M)}\frac{1}{q_n^{2s}}\\ &\leq\liminf_{n\to \infty}\sum_{(a_1,\cdots,a_{n})\in D_{n}(M)}\mu_{s+\frac{1}{2}}(I_{n}(a_1,\cdots,a_{n}))=1. \end{aligned} \end{equation*} Hence $\hdim E_M(1)\leq s$, and then $\hdim C(\infty)\leq s$. Consequently, $\hdim C(\infty)\leq \frac 1 2$ by the arbitrariness of $s>\frac 1 2$. This completes the proof of Lemma \ref{a_ninfinity}. \end{proof} Now we are ready to prove Proposition \ref{PQ set}.\\ \textbf{\noindent\text{Proof of Proposition \ref{PQ set}:}} We see that $\hdim {ND}^{\prime}(\varphi)=\hdim ND(\varphi).$ The proof is divided into two cases according to $A=1$ or $A>1$. \emph{(1)} If $A=1,$ then for any $\epsilon>0$, $\varphi(n)\leq e^{ {(1+\epsilon)}^n}\ \text{holds for $n$ large enough},$ and we have $$\big\{x\in [0,1): \ a_n(x)\geq e^{ {(1+\epsilon)}^n}\ \text{for $n$ large enough}\big\}\subset {ND}^{\prime}(\varphi).$$ By Lemma \ref{LUZARK RESULT}, we obtain $$\hdim{ND}^{\prime}(\varphi)\geq \frac{1}{2}.$$ On the other hand, $$ND(\varphi)\subset \left\{x\in[0,1): a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\rightarrow \infty, \ \text{as}\ n\rightarrow \infty\right\}.$$ Thus, $$\hdim ND(\varphi)\leq \frac 1 2 .$$ \emph{(2)} If $A>1$, by the definition of limsup, for any $\epsilon>0$, \begin{equation*} \begin{cases}&\varphi(n)\geq e^{{(A-\epsilon)}^n },\; \text{for infinitely many} \ n \in\mathbb{N},\\ &\varphi(n)\leq e^{{(A+\epsilon)}^n },\; \text{for all sufficiently large}\;n. \end{cases} \end{equation*} Therefore $$\left\{x\in[0,1):a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\geq e^{ {(A+\epsilon)}^n}, \ \text{for any $n\geq1$}\right\}\subset {ND}^{\prime}(\varphi).$$ Applying Corollary \ref{Luzarks cor}, we obtain \begin{equation*}\hdim{ND}^{\prime}(\varphi)\geq \frac{1}{1+A+\epsilon}. \end{equation*} By the arbritrary of $\epsilon$, we have $$\hdim{ND}^{\prime}(\varphi)\geq\frac{1}{1+A}.$$ On the other hand, $$ND(\varphi)\subset\left\{x\in[0,1):a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}(x)\geq e^{ {(A-\epsilon)}^n}, \ \text{for infinitely many}\ n\in \mathbb{N}\right\}.$$ From Corollary \ref{Luzarks cor}, we obtain \begin{equation*}\hdim ND(\varphi)\leq \frac{1}{1+A-\epsilon}. \end{equation*} Taking $\epsilon\to0$, we conclude $$\hdim ND(\varphi)\leq \frac{1}{1+A}.$$ \qed \ \\ Let us give a proof of Theorem \ref{foralln}. \subsection{ Proof of Theorem \ref{foralln} }\ \textbf{Upper bound:} For $x\in\underline{E}(\{t_i\}_{i=0}^m,\varphi)$, for any $\epsilon>0$, we have $$a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}\geq e^{(1-\epsilon)\varphi(n)}, \ \text{for $n$ large enough}.$$ Then it follows from Proposition \ref{PQ set} that \begin{equation*} \hdim \underline{E}(\{t_i\}_{i=0}^m,\varphi)\leq\frac{1}{1+B}. \end{equation*} \textbf{Lower bound:} It is trivial for $B=\infty$, so we only need to consider the case $1\leq B<\infty$. We construct a suitable Cantor subset of $\underline{E}(\{t_i\}_{i=0}^m,\varphi)$ in two steps. \textbf{Step \uppercase\expandafter{\romannumeral1}.} Since $ \log B=\limsup\limits_{n\rightarrow \infty} \frac{ \log \varphi(n)}{n },$ for any $\epsilon>0$, we have $\varphi(n)\leq {(B+\frac \epsilon 2)}^n$ for $n$ large enough. Hence, \begin{equation*} \varphi(n){(B+\epsilon)}^{j-n}\leq {(B+\frac \epsilon 2)}^n{(B+\epsilon)}^{j-n}\to 0, \ \text{as} \ n\to \infty. \end{equation*} We define a sequence $\{L_j\}_{j\geq 1}$: For $j,k\geq1$, let \begin{equation*} c_{j,k}=\begin{cases}&\exp({\varphi(k)}), \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ 1\leq k \leq j;\\ & \exp(\varphi(k){(B+\epsilon)}^{j-k}), \ k\geq j+1. \end{cases} \end{equation*} Define $L_j=\sup\limits_{k\geq 1}\{c_{j,k}\}$. Clearly, \begin{equation}\label{L j+1-L_j} L_j\leq L_{j+1}\leq L_{j}^{B+\epsilon}\ \text{and}\ L_j\geq e^{\varphi(j)}\ \text{for any}\ j\geq 1. \end{equation} By the first part of \eqref{L j+1-L_j}, \begin{equation*} \log L_{j+1}-\log L_j\leq (B+\epsilon-1)\log L_j. \end{equation*} Hence \begin{equation}\label{for limsup set} \log L_{n+1}-\log L_1\leq (B+\epsilon-1)\sum\limits_{j=1}^n\log L_j. \end{equation} We claim that \begin{equation}\label{L j+1-L_j-2} \liminf_{n\to\infty}\dfrac{\log L_n}{\varphi(n)}=1. \end{equation} In fact, on the one hand, in view of the second part of \eqref{L j+1-L_j}, we see at once that $$\liminf_{n\to\infty}\dfrac{\log L_n}{\varphi(n)}\geq1.$$ For the opposite inequality, let $t_j:=\min\{k\geq1: c_{j,k}=L_j\}$. Notice that for many consecutive $j^{\prime}$s, the number $t_j$ will be the same. More precisely, if $t_j<j$, $t_{t_j}=t_{t_j+1}=\cdots=t_j;$ if $t_j\geq j$, $t_{j}=t_{j+1}=\cdots=t_{t_j}.$ Let $\{l_i\}$ be the sequence of all ${t_{t_j}}^{\prime}$s in the strictly increasing order. Then we obtain $L_{l_i}=\exp{\varphi(l_i)}$ and thus $$\liminf_{n\to\infty}\dfrac{\log L_n}{\varphi(n)}\leq\liminf_{i\to\infty}\dfrac{\log L_{l_i}}{\varphi(l_i)}=1.$$ Let \begin{equation*} Z:=\liminf\limits_{n\to \infty}\dfrac{\log \left(L_n^{t_0}L_{n+1}^{t_1}\cdots L_{n+m}^{t_{m+1}}\right)}{\varphi(n)}. \end{equation*} We claim that \begin{equation*} t_k\leq Z<\infty, \end{equation*} where $k=\min\{0\leq i\leq m:\ t_{i}\neq0\}$. In fact, by the first part of \eqref{L j+1-L_j} and \eqref{L j+1-L_j-2}, we can check that \begin{equation*} Z\geq \liminf\limits_{n\to\infty}\dfrac{\log L_{n+k}^{t_k}}{\varphi(n)}\geq\liminf\limits_{n\to\infty}\dfrac{\log L_n^{t_k}}{\varphi(n)}= t_k. \end{equation*} On the other hand, \begin{equation*} \begin{aligned} \liminf_{n\to\infty}\dfrac{\log \left(L_n^{t_0}L_{n+1}^{t_1}\cdots L_{n+m}^{t_{m+1}}\right)}{\varphi(n)}&\overset{\eqref{L j+1-L_j}}\leq \liminf\limits_{n\to \infty}\dfrac{\left(t_0+t_1(B+\epsilon)+\cdots+t_{m+1}{(B+\epsilon)}^m\right)\log L_n}{\varphi(n)}\\ &\overset{\eqref{L j+1-L_j-2}}= t_0+t_1(B+\epsilon)+\cdots+t_{m+1}{(B+\epsilon)}^m<\infty. \end{aligned} \end{equation*} \textbf{Step \uppercase\expandafter{\romannumeral2}.} We use the sequence $\{L_j\}_{j\geq1}$ and $Z$ to construct a subset of $\underline{E}(\{t_i\}_{i=0}^m,\varphi).$ Define \begin{equation*} E(\{L_n\}_{n\geq1})=\left\{x\in[0,1):\lfloor L^{\frac 1 Z}_n\rfloor\leq a_n(x)<2\lfloor L^{\frac 1 Z}_n\rfloor,\ \text{for any}\ n\geq1\right\}. \end{equation*} Then \begin{equation*} \begin{aligned} \liminf\limits_{n\to\infty}\dfrac{\log \left(a^{t_0}_na^{t_1}_{n+1}\cdots a^{t_{m+1}}_{n+m}\right)}{\varphi(n)}&= \liminf\limits_{n\to\infty}\dfrac{\log \left(L^{\frac{t_0}{Z}}_nL^{\frac{t_1}{Z}}_{n+1}\cdots L^{\frac{t_{m+1}}{Z}}_{n+m}\right)}{\varphi(n)}\\ &=\frac{1}{Z}\liminf\limits_{n\to\infty}\dfrac{\log \left(L^{t_0}_nL^{t_1}_{n+1}\cdots L^{t_{m+1}}_{n+m}\right)}{\varphi(n)}=1. \end{aligned} \end{equation*} Hence $E(\{L_n\}_{n\geq1})\subset\underline{E}(\{t_i\}_{i=0}^m,\varphi).$ Since $\varphi(n)/n\to \infty$ as $n\to \infty$, by the second part of \eqref{L j+1-L_j}, we see that \begin{equation*} \lim\limits_{n\to\infty}\dfrac{\log (L_1L_2\cdots L_n)}{n}=\infty. \end{equation*} By Lemma \ref{anyset}, we obtain \begin{equation*} \hdim \underline{E}(\{t_i\}_{i=0}^m,\varphi)\geq \hdim E(\{L_n\}_{n\geq1})=\dfrac{1}{2+\limsup\limits_{n\to\infty}\frac{\log L_{n+1}}{\log (L_1L_2\cdots L_n)}}\overset{\eqref{for limsup set}}\geq \frac{1}{B+1+\epsilon}. \end{equation*} Taking $\epsilon\to0$, we conclude $$\hdim \underline{E}(\{t_i\}_{i=0}^m,\varphi)\geq \frac{1}{B+1}.$$ \qed \section{ Proof of Theorem \ref{forinfinitelymanyn}} In this section, we give a proof of Theorem \ref{forinfinitelymanyn}. We adopt the strategies in \cite{LiRa_16}. The proof of the theorem splits into two parts: finding the upper bound and the lower bound separately. \textbf{Upper bound:} For $x\in\overline{E}(\{t_i\}_{i=0}^m,\varphi)$, for any $\epsilon>0$, there exist infinitely many $n$ such that $$a^{t_0}_n(x)a^{t_1}_{n+1}(x)\cdots a^{t_m}_{n+m}\geq e^{(1-\epsilon)\varphi(n)}.$$ Then by \cite[Theorem 1.5]{BaHuKlWa_22}, \begin{equation*} \hdim \overline{E}_{m}(\{t_i\}_{i=0}^m,\varphi)\leq\frac{1}{1+b}. \end{equation*} \textbf{Lower bound:} We construct a suitable Cantor subset of $\overline{E}(\{t_i\}_{i=0}^m,\varphi)$ in two steps. \textbf{Step \uppercase\expandafter{\romannumeral1}.} We will construct a sequence $\{c_n\}_{n\geq1}$ of positive real numbers such that $$\limsup\limits_{n\to\infty}\dfrac{\log\left(c^{t_0}_nc^{t_1}_{n+1}\cdots c^{t_m}_{n+m}\right)}{\varphi(n)}=1,$$ and $$\limsup\limits_{n\to\infty}\dfrac{\log c_{n+1}}{\log\left(c_1c_2\cdots c_n\right)}\leq b+\epsilon-1.$$ For all $n\in \mathbb{N}$, let $\Phi(n)=\min\limits_{k\geq n} \varphi(k).$ Since $\varphi(n)\to \infty$, as $n\to \infty$, $\Phi(n)$ is well defined. Thus, $\Phi(n)\leq \varphi(n),$ $\Phi(n)\leq \Phi(n+1)$ for all $n\in \mathbb{N}$. We claim that \begin{equation*} \Phi(n)= \varphi(n), \ \text{infinitely many}\ n\in \mathbb{N}. \end{equation*} If not, there exists $N\in\mathbb{N}$ such that for any $n\geq N$, $\Phi(n)< \varphi(n).$ Then for $n\geq N$, $\Phi(n)< \min\limits_{k\geq n}\varphi(k),$ which contradicts to the definition of $\Phi(n)$. We define a sequence $\{c_n\}_{n\geq1}$ as follows: \begin{equation}\label{sequencecn} \begin{aligned} &c_1=c_2=\cdots=c_m=1,\ c^{t_m}_{m+1}=e^{\Phi(1)},\\ &c^{t_m}_{n+m}=\min\left\{\dfrac{e^{\Phi(n)}}{c^{t_0}_{n}c^{t_1}_{n+1}\cdots c^{t_{m-1}}_{n+m-1}},{(c_1c_2\cdots c_{n+m-1})}^{t_m(b+\epsilon-1)}\right\},\ \text{for}\ n\geq 2. \end{aligned} \end{equation} Since $(t_0,t_1,\cdots,t_m)\in \mathbb{R}_+^{m+1}$ with $0<t_0\leq t_1\leq \cdots \leq t_m$ and $\Phi$ is nondecreasing, we have $c_n\geq 1$ for all $n\geq1$. Thus \begin{equation}\label{limsupb-1} \limsup\limits_{n\to\infty}\frac{\log c_{n+1}}{\log (c_1c_2\cdots c_n)}\leq \limsup\limits_{n\to\infty}\frac{\log {(c_1c_2\cdots c_n)}^{b+\epsilon-1}}{\log (c_1c_2\cdots c_n)}=b+\epsilon-1. \end{equation} We also claim that \begin{equation}\label{sequencec-n} c^{t_m}_{n+m}=\frac{e^{\varphi(n)}}{c^{t_0}_{n}c^{t_1}_{n+1}\cdots c^{t_{m-1}}_{n+m-1}} \ \text{for infinitely many}\ n. \end{equation} In order to prove \eqref{sequencec-n}, we first show that \begin{equation} \label{sequencec-n-1} c^{t_m}_{n+m}=\frac{e^{\Phi(n)}}{c^{t_0}_{n}c^{t_1}_{n+1}\cdots c^{t_{m-1}}_{n+m-1}} \ \text{for infinitely many}\ n. \end{equation} If not, there exists $N\in\mathbb{N}$ such that for any $n\geq N$, \begin{equation}\label{sequence-2} \begin{cases} &c_{n+m}={\left(c_1c_2\cdots c_{n+m-1}\right)}^{b+\epsilon-1}\\ &\frac{e^{\Phi(n)}}{c^{t_0}_{n}c^{t_1}_{n+1}\cdots c^{t_{m-1}}_{n+m-1}} >{\left(c_1c_2\cdots c_{n+m-1}\right)}^{t_m(b+\epsilon-1)}. \end{cases} \end{equation} Then \begin{equation}\label{sequence-3} \begin{cases} &c_{n+m}=c^{b+\epsilon}_{n+m-1}\\ &e^{\Phi(n)} >{\left(c_1c_2\cdots c_{n+m-1}\right)}^{b+\epsilon-1}\dot c^{t_0}_{n}c^{t_1}_{n+1}\cdots c^{t_{m-1}}_{n+m-1}>{\left(c_1c_2\cdots c_{n+m-1}\right)}^{t_m(b+\epsilon-1)}. \end{cases} \end{equation} Therefore, by \eqref{sequence-2} and \eqref{sequence-3} \begin{equation}\label{sequence-4} \begin{aligned} \prod_{k=1}^nc_k=&\left(\prod_{k=1}^{N+m-1}c_k\right)\cdot c_{N+m}\cdot c_{N+m+1}\cdots c_n\\ =&\left(\prod_{k=1}^{N+m-1}c_k\right)\cdot {\left(\prod_{k=1}^{N+m-1}c_k\right)}^{b+\epsilon-1}\cdot c_{N+m+1}\cdots c_n\\ =&\left(\prod_{k=1}^{N+m-1}c_k\right)\cdot {\left(\prod_{k=1}^{N+m-1}c_k\right)}^{b+\epsilon-1}\cdot{\left(\prod_{k=1}^{N+m-1}c_k\right)}^{(b+\epsilon-1)(b+\epsilon)}\\ \cdots&{\left(\prod_{k=1}^{N+m-1}c_k\right)}^{(b+\epsilon-1){(b+\epsilon)}^{n-N-m}}\\ =&{\left(\prod_{k=1}^{N+m-1}c_k\right)}^{{(b+\epsilon)}^{n-N-m+1}}. \end{aligned} \end{equation} Combining \eqref{sequence-3} with \eqref{sequence-4}, we obtain \begin{equation*} \begin{aligned} \liminf\limits_{n\to\infty}\frac{\Phi(n+1)}{n+1} &>\liminf\limits_{n\to\infty}\dfrac{{\log \log \left(c_1c_2 \cdots c_{n+m}\right)}^{t_m(b+\epsilon-1)}}{n+1}\\ &=\liminf\limits_{n\to\infty}\dfrac{\log \log {\left(\prod\limits_{k=1}^{N+m-1}c_k\right)}^{t_m(b+\epsilon-1){(b+\epsilon)}^{n-N+1}}}{n+1}=\log(b+\epsilon). \end{aligned} \end{equation*} Then \begin{equation*} \liminf\limits_{n\to \infty}\frac{\varphi(n+1)}{n+1}\geq \liminf\limits_{n\to \infty}\frac{\Phi(n+1)}{n+1}>\log (b+\epsilon)>\log b, \end{equation*} which contradicts to $\log b=\liminf\limits_{n\to \infty}\frac{\varphi(n)}{n}.$ Now we begin to prove \eqref{sequencec-n}. If the equality \eqref{sequencec-n-1} holds for some $n$ such that $\Phi(n)\neq\varphi(n)$, then $\Phi(n)=\Phi(n+1)$, and the equality \eqref{sequencec-n-1} holds for $n+1$, since \begin{equation*} \begin{aligned} &\frac{e^{\Phi(n+1)}}{c^{t_0}_{n+1}c^{t_1}_{n+2}\cdots c^{t_{m-1}}_{n+m}}=\frac{e^{\Phi(n)}}{c^{t_0}_{n+1}c^{t_1}_{n+2}\cdots c^{t_{m-1}}_{n+m}}=c^{t_0}_{n}c^{t_1-t_0}_{n+1}\cdots c^{t_m-t_{m-1}}_{n+m}\\ \leq&{ \left(c_1c_2\cdots c_{n-1}\right)}^{t_0(b+\epsilon-1)} {\left(c_1c_2\cdots c_{n}\right)}^{(t_1-t_0)(b+\epsilon-1)}\\ &\cdots {\left(c_1c_2\cdots c_{n+m-1}\right)}^{(t_m-t_{m-1})(b+\epsilon-1)}\\ =&{(c_1c_2\cdots c_{n-1})}^{t_m(b+\epsilon-1)}c_n^{(t_m-t_0)(b+\epsilon-1)}\cdots c_{n+m-1}^{(t_m-t_{m-1})(b+\epsilon-1)}\\ <&{(c_1c_2\cdots c_{n+m-1})}^{t_m(b+\epsilon-1)}. \end{aligned} \end{equation*} By the fact that $\Phi(n)=\varphi(n)$ for infinitely many $n\in\mathbb{N}$, we can repeat this argument until we get to some $n+k$ such that $\Phi(n+k)=\varphi(n+k)$. Then the desired result is obtained. Combining \eqref{sequencecn} with \eqref{sequencec-n}, we have \begin{equation}\label{sequence-5} \limsup\limits_{n\to\infty}\dfrac{\log\left(c^{t_0}_nc^{t_1}_{n+1}\cdots c^{t_m}_{n+m}\right)}{\varphi(n)}=1. \end{equation} \textbf{Step \uppercase\expandafter{\romannumeral2}.} We use the sequence $\{c_n\}_{n\geq1}$ to construct a subset of $\overline{E}(\{t_i\}_{i=0}^m,\varphi).$ By $\varphi(n)/n\to \infty$ as $n\to \infty$, we choose an increasing sequence $\{n_k\}_{k=1}^{\infty}$ such that for each $k\geq 1$ \begin{equation*} \frac{\varphi(n)}{n}\geq k^2,\ \text{when} \ n\geq n_k. \end{equation*} Let $\alpha_n=2$ if $1\leq n<n_1$ and $$\alpha_n=k+1,\ \text{when}\ n_k\leq n<n_{k+1}.$$ For any $n\geq 1$, there exists $k(n)$ such that $n_{k(n)}\leq n+m<n_{k(n)+1}$. Then \begin{equation}\label{sequence-alpha1} \lim\limits_{n\to \infty}\dfrac{\log\left(\alpha^{t_0}_n\alpha^{t_1}_{n+1}\cdots\alpha^{t_m}_{n+m}\right)}{\varphi(n)}\leq \lim\limits_{n\to\infty}\dfrac{(t_0+\cdots+t_m)\log(k(n)+1)}{n{k(n)}^2}=0, \end{equation} and \begin{equation}\label{sequence-alpha2} \lim\limits_{n\to\infty}\dfrac{\log \alpha_{n+1}}{\log(\alpha_1\alpha_2\cdots\alpha_n)}\leq \lim\limits_{n\to\infty}\dfrac{\log(n+1)}{n\log2}=0. \end{equation} For any $n\geq1,$ take $s_n=c_n+\alpha_n$. Then we have $s_n\to\infty$ as $n\to\infty$. Define \begin{equation*} E(\{s_n\}_{n\geq1})=\left\{x\in[0,1):\lfloor s_n\rfloor\leq a_n(x)<2\lfloor s_n\rfloor,\ \text{for any}\ n\geq1\right\}. \end{equation*} Since $c_n\geq 1$ and $\alpha_n\geq2$ for all $n\geq1$, we can check that for any $n\geq1$ $$\log c_n\leq \log s_n\leq \log c_n +2\log \alpha_n.$$ Combining \eqref{sequence-5}, \eqref{sequence-alpha1}, \eqref{sequence-alpha2}, we get \begin{equation*} \limsup\limits_{n\to\infty}\dfrac{\log \left(s^{t_0}_ns^{t_1}_{n+1}\cdots s^{t_m}_{n+m}\right)}{\varphi(n)}=1. \end{equation*} So $E(\{s_n\}_{n\geq1})\subset\overline{E}(\{t_i\}_{i=0}^m,\varphi).$ Applying Lemma \ref{anyset}, we obtain \begin{equation*} \hdim \overline{E}(\{t_i\}_{i=0}^m,\varphi)\geq \hdim E(\{s_n\}_{n\geq1})=\dfrac{1}{2+\limsup\limits_{n\to\infty}\frac{\log s_{n+1}}{\log (s_1s_2\cdots s_n)}}\overset{\eqref{limsupb-1}}\geq \frac{1}{b+1+\epsilon}. \end{equation*} Therefore, $$\hdim\overline{E}(\{t_i\}_{i=0}^m,\varphi)\geq \frac{1}{b+1}.$$ \section*{Acknowledgements} A. Bakhtawar is supported by the Australian Research Council Discovery Project (ARC Grant DP180100201) and J. Feng is supported by the National Natural Science Foundation of China (NSFC Grant No. 11901204). J. Feng would like to thank China Scholarship Council financial support (No. 202106160053). The authors are grateful to Professor Lingmin Liao for helpful discussions. \section*{References} \begin{thebibliography}{10} \bibitem{Ba_20} A.~Bakhtawar. \newblock Hausdorff dimension for the set of points connected with the generalized {J}arn\'{\i}k-{B}esicovitch set. \newblock {\em J. Aust. Math. Soc.}, 112(1):1--29, 2022. \bibitem{BaHuKlWa_22} A.~{Bakhtawar}, M.~{Hussain}, D.~{Kleinbock}, and B.-W.~{Wang}. \newblock Metrical properties for the weighted products of multiple partial quotients in continued fractions. \newblock {\em Pre-Print: https://arxiv.org/abs/2202.11212}, 2022. \bibitem{Be_12} F.~Bernstein. \newblock \"{U}ber eine {A}nwendung der {M}engenlehre auf ein aus der {T}heorie der s\"{a}kularen {S}t\"{o}rungen herr\"{u}hrendes {P}roblem. \newblock {\em Math. Ann.}, 71(3):417--439, 1911. \bibitem{Bo_12} E.~Borel. \newblock Sur un probl\`eme de probabilit\'{e}s relatif aux fractions continues. \newblock {\em Math. Ann.}, 72(4):578--584, 1912. \bibitem{DaSc_70} H.~Davenport and W.-M. Schmidt. \newblock Dirichlet's theorem on diophantine approximation. \newblock In {\em Symposia {M}athematica, {V}ol. {IV} ({INDAM}, {R}ome, 1968/69)}, pages 113--132. Academic Press, London, 1970. \bibitem{FaLiWaWu_13} A.~Fan, L.-M.~Liao, B.-W.~Wang, and J.~Wu. \newblock On the fast {K}hintchine spectrum in continued fractions. \newblock {\em Monatsh. Math.}, 171(3-4):329--340, 2013. \bibitem{FaLiWaWu_09} A.-H. Fan, L.-M. Liao, B.-W. Wang, and J.~Wu. \newblock On {K}hintchine exponents and {L}yapunov exponents of continued fractions. \newblock {\em Ergodic Theory Dynam. Systems}, 29(1):73--109, 2009. \bibitem{FaMaSo_21} L.-L.~Fang, J.~Ma, and K.-K.~Song. \newblock Some exceptional sets of {B}orel-{B}ernstein theorem in continued fractions. \newblock {\em Ramanujan J.}, 56(3):891--909, 2021. \bibitem{FeWULiTs_97} D.-J. Feng, J.~Wu, J.-C. Liang, and S.~Tseng. \newblock Appendix to the paper by {T. } {{\L}uczak}---a simple proof of the lower bound: ``{O}n the fractional dimension of sets of continued fractions''. \newblock {\em Mathematika}, 44(1):54-55, 1997. \bibitem{FeXu_21} J.~Feng and J.~Xu. \newblock Sets of {D}irichlet non-improvable numbers with certain order in the theory of continued fractions. \newblock {\em Nonlinearity}, 34(3):1598--1611, 2021. \bibitem{Go_41} I.-J. Good. \newblock The fractional dimensional theory of continued fractions. \newblock {\em Proc. Cambridge Philos. Soc.}, 37:199--228, 1941. \bibitem{HuaWu_19} L.-L.~Huang and J.~Wu. \newblock Uniformly non-improvable {D}irichlet set via continued fractions. \newblock {\em Proc. Amer. Math. Soc.}, 147(11):4617--4624, 2019. \bibitem{HuWuXu_19} L.-L.~Huang, J.~Wu, and J.~Xu. \newblock Metric properties of the product of consecutive partial quotients in continued fractions. \newblock {\em Israel J. Math.}, 238(2):901--943, 2020. \bibitem{Ja_32} V.~Jarnik. \newblock Zur {T}heorie der diophantischen {A}pproximationen. \newblock {\em Monatsh. Math. Phys.}, 39(1):403--438, 1932. \bibitem{Kh_63} A.-Y. Khintchine. \newblock Continued Fractions. \newblock {\em University of Chicago Press, Chicago, London, } 1964. \bibitem{KlWa_18} D.~Kleinbock and N.~Wadleigh. \newblock A zero-one law for improvements to {D}irichlet's {T}heorem. \newblock {\em Proc. Amer. Math. Soc.}, 146(5):1833--1844, 2018. \bibitem{Luczak} T.~\L uczak. \newblock On the fractional dimension of sets of continued fractions. \newblock {\em Mathematika}, 44(1):50--53, 1997. \bibitem{LiRa_016} L.-M.~Liao and M.~Rams. \newblock Subexponentially increasing sums of partial quotients in continued fraction expansions. \newblock {\em Math. Proc. Cambridge Philos. Soc.}, 160(3):401--412, 2016. \bibitem{LiRa_16} L.-M.~Liao and M.~Rams. \newblock Upper and lower fast {K}hintchine spectra in continued fractions. \newblock {\em Monatsh. Math.}, 180(1):65--81, 2016. \bibitem{WaWu_008} B.-W. Wang and J.~Wu. \newblock Hausdorff dimension of certain sets arising in continued fraction expansions. \newblock {\em Adv. Math.}, 218(5):1319--1339, 2008. \bibitem{Zh_20} L.-L.~Zhang. \newblock Set of extremely Dirichlet non-improvable points. \newblock {\em Fractals}, 28(02):2050034, 2020. \end{thebibliography} \end{document}
2205.14555v1
http://arxiv.org/abs/2205.14555v1
Two New Piggybacking Designs with Lower Repair Bandwidth
\documentclass[journal,draftcls,onecolumn,12pt,twoside]{IEEEtran} \usepackage[T1]{fontenc} \usepackage{times} \usepackage{amsmath} \usepackage{amssymb} \usepackage{amsthm} \usepackage{color} \usepackage{algorithm} \usepackage[noend]{algorithmic} \usepackage{graphicx} \usepackage{subfigure} \usepackage{multirow} \usepackage[bookmarks=false,colorlinks=false,pdfborder={0 0 0}]{hyperref} \usepackage{cite} \usepackage{bm} \usepackage{arydshln} \usepackage{mathtools} \usepackage{microtype} \usepackage{subfigure} \usepackage{float} \usepackage[figuresright]{rotating} \usepackage{threeparttable} \usepackage{booktabs} \usepackage{color} \newcommand{\sS}{\mathsf{S}} \newcommand{\sT}{\mathsf{T}} \newcommand{\sIn}{\mathsf{In}} \newcommand{\sOut}{\mathsf{Out}} \newcommand{\bE}{\mathbf{E}} \newcommand{\bI}{\mathbf{I}} \newcommand{\sfa}{\mathsf{a}} \newcommand{\sfb}{\mathsf{b}} \newcommand{\sumset}[3]{\sum_{#2}^{#3}\hspace{-2.9mm}{\scriptstyle {#1}}\hspace{1.9mm}} \newcommand{\sumsett}[3]{\hspace{4.7mm}{\scriptstyle {#1}}\hspace{-4.2mm}\sum_{#2}^{#3}} \newtheorem{theorem}{Theorem} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{defn}{Definition} \long\def\symbolfootnote[#1]#2{\begingroup \def\thefootnote{\fnsymbol{footnote}}\footnote[#1]{#2}\endgroup} \renewcommand{\paragraph}[1]{{\bf #1}} \long\def\symbolfootnote[#1]#2{\begingroup \def\thefootnote{\fnsymbol{footnote}}\footnote[#1]{#2}\endgroup} \ifodd1\newcommand{\rev}[1]{{\color{red}#1}}\newcommand{\com}[1]{\textbf{\color{blue} (COMMENT: #1)}} \begin{document} \title{Two New Piggybacking Designs with Lower Repair Bandwidth} \author{Zhengyi Jiang, Hanxu Hou, Yunghsiang S. Han, Patrick P. C. Lee, Bo Bai, and Zhongyi Huang } \maketitle \begin{abstract}\symbolfootnote[0]{ Zhengyi Jiang and Zhongyi Huang are with the Department of Mathematics Sciences, Tsinghua University (E-mail: [email protected], [email protected]). Hanxu Hou and Bo Bai are with Theory Lab, Central Research Institute, 2012 Labs, Huawei Technology Co. Ltd. (E-mail: [email protected], [email protected]). Yunghsiang S. Han is with the Shenzhen Institute for Advanced Study, University of Electronic Science and Technology of China~(E-mail: [email protected]). Patrick P. C. Lee is with the Department of Computer Science and Engineering, The Chinese University of Hong Kong (E-mail: [email protected]). This work was partially supported by the National Key R\&D Program of China (No. 2020YFA0712300), National Natural Science Foundation of China (No. 62071121, No.12025104, No.11871298), Research Grants Council of HKSAR (AoE/P-404/18), Innovation and Technology Fund (ITS/315/18FX). } Piggybacking codes are a special class of MDS array codes that can achieve small repair bandwidth with small sub-packetization by first creating some instances of an $(n,k)$ MDS code, such as a Reed-Solomon (RS) code, and then designing the piggyback function. In this paper, we propose a new piggybacking coding design which designs the piggyback function over some instances of both $(n,k)$ MDS code and $(n,k')$ MDS code, when $k\geq k'$. We show that our new piggybacking design can significantly reduce the repair bandwidth for single-node failures. When $k=k'$, we design a piggybacking code that is MDS code and we show that the designed code has lower repair bandwidth for single-node failures than all existing piggybacking codes when the number of parity node $r=n-k\geq8$ and the sub-packetization $\alpha<r$. Moreover, we propose another piggybacking codes by designing $n$ piggyback functions of some instances of $(n,k)$ MDS code and adding the $n$ piggyback functions into the $n$ newly created empty entries with no data symbols. We show that our code can significantly reduce repair bandwidth for single-node failures at a cost of slightly more storage overhead. In addition, we show that our code can recover any $r+1$ node failures for some parameters. We also show that our code has lower repair bandwidth than locally repairable codes (LRCs) under the same fault-tolerance and redundancy for some parameters. \end{abstract} \begin{IEEEkeywords} Piggybacking, MDS array code, repair bandwidth, storage overhead, sub-packetization, fault tolerance \end{IEEEkeywords} \IEEEpeerreviewmaketitle \section{Introduction} \label{sec:intro} {\em Maximum distance separable (MDS)} array codes are widely employed in distributed storage systems that can provide the maximum data reliability for a given amount of storage overhead. An $(n,k,\alpha)$ MDS array code encodes a data file of $k\alpha$ {\em data symbols} to obtain $n\alpha$ {\em coded symbols} with each of the $n$ nodes storing $\alpha$ symbols such that any $k$ out of $n$ nodes can retrieve all $k\alpha$ data symbols, where $k < n$ and $\alpha\geq 1$. The number of symbols stored in each node, i.e., the size of $\alpha$, is called {\em sub-packetization level}. We usually employ \emph{systematic code} in practical storage systems such that the $k\alpha$ data symbols are directly stored in the system and can be retrieve without performing any decoding operation. Note that Reed-Solomon (RS) codes \cite{reed1960} are typical MDS codes with $\alpha=1$. In modern distributed storage systems, node failures are common and single-node failures occur more frequently than multi-node failures \cite{ford2010}. When a single-node fails, it is important to repair the failed node with the {\em repair bandwidth} (i.e,. the total amount of symbols downloaded from other surviving nodes) as small as possible. It is shown in \cite{dimakis2010} that we need to download at least $\frac{\alpha}{n-k}$ symbols from each of the $n-1$ surviving nodes in repairing one single-node failure. MDS array codes with minimum repair bandwidth for any single-node failure are called {\em minimum storage regenerating} (MSR) codes. There are many constructions of MSR codes to achieve minimum repair bandwidth in the literature \cite{rashmi2011,tamo2013,hou2016,2017Explicit,li2018,2018A,hou2019a,hou2019b}. However, the sub-packetization level $\alpha$ of high-code-rate (i.e., $\frac{k}{n}>0.5$) MSR codes \cite{2018A} is exponential in parameters $n$ and $k$. A nature question is that can we design new MDS array codes with both sub-packetization and repair bandwidth as small as possible. Piggybacking codes \cite{2014A,2017Piggybacking} are a special class of MDS array codes that have small sub-packetization and small repair bandwidth. The essential idea behind the piggybacking codes \cite{2017Piggybacking} is as follows: by creating $\alpha$ instances of $(n,k)$ RS codes and adding carefully well-designed linear combinations of some symbols as so-called piggyback functions from one instance to the others, we can reduce the repair bandwidth of single-node failure. Some further studies of piggybacking codes are in \cite{2014Sytematic,2018Repair,2019AnEfficient,2016A,2021piggyback,2021piggybacking}. The existing piggybacking codes are designed based on some instances of an $(n,k)$ RS codes. The motivation of this paper is to significantly reduce the repair bandwidth by designing new piggybacking codes. In this paper, we propose new piggybacking codes by first creating some instances of both $(n,k)$ MDS code and $(n,k')$ MDS code, and then designing the piggyback functions that can significantly reduce repair bandwidth for single-node failures, when $k\geq k'$. \subsection{Contributions} Our main contributions are as follows. \begin{itemize} \item First, we propose a new type of piggybacking coding design which is designed by both $(n,k)$ MDS code and $(n,k')$ MDS code, where $k\geq k'$. We give an efficient repair method for any single-node failure for our piggybacking coding design and present an upper bound on repair bandwidth. When $k>k'$, our codes are non-MDS codes and we show that our codes have much less repair bandwidth than that of existing piggybacking codes at a cost of slightly more storage overhead. The essential reason of repair bandwidth reduction of our codes is that we have more design space than that of existing piggybacking codes. \item Second, when $k=k'$, we design new piggybacking codes that are MDS codes based on the proposed design. We show that the proposed piggybacking codes with $k=k'$ have lower repair bandwidth than that of the existing piggybacking codes when $r=n-k\geq 8$ and the sub-packetization is less than $r$. \item Third, we design another piggybacking codes by designing and adding the $n$ piggyback functions into the $n$ newly created empty entries with no data symbols. We show that our piggybacking codes can tolerant any $r+1$ node failures under some conditions. We also show that our codes have lower repair bandwidth than that of both Azure-LRC \cite{huang2012} and optimal-LRC \cite{2014optimal} under the same fault-tolerance and the same storage overhead for some parameters. \end{itemize} \subsection{Related Works} Many works are designed to reduce the repair bandwidth of erasure codes which we discuss as follows. \subsubsection{Piggybacking Codes} Rashmi \emph{et al.} present the seminal work of piggybacking codes \cite{2014A,2017Piggybacking} that can reduce the repair bandwidth for any single-data-node with small sub-packetization. Another piggybacking codes called REPB are proposed \cite{2018Repair} to achieve lower repair bandwidth for any single-data-node than that of the codes in \cite{2017Piggybacking}. Note that the piggybacking codes in \cite{2017Piggybacking,2018Repair} only have small repair bandwidth for any single-data-node failure, while not for parity nodes. Some follow-up works \cite{2019AnEfficient,2021piggyback,2021piggybacking} design new piggybacking codes to obtain small repair bandwidth for both data nodes and parity nodes. Specifically, when $r=n-k\leq10$ and sub-packetization is $r-1+\sqrt{r-1}$, OOP codes \cite{2019AnEfficient} have the lowest repair bandwidth for any single-node failure among the existing piggybacking codes; when $r\geq10$ and sub-packetization is $r$, the codes in \cite{2021piggybacking} have the lowest repair bandwidth for any single-node failure among the existing piggybacking codes. Note that all the existing piggybacking codes are designed over some instances of an $(n,k)$ MDS code. In this paper, we design new piggybacking codes that are non-MDS codes over some instances of both $(n,k)$ MDS code and $(n,k')$ MDS codes with $k>k'$ that have much lower repair bandwidth for any single-node failures at a cost of slightly larger storage overhead. \subsubsection{MDS Array Codes} Minimum storage regenerating (MSR) codes \cite{dimakis2010} are a class of MDS array codes with minimum repair bandwidth for a single-node failure. Some exact-repair constructions of MSR codes are investigated in \cite{rashmi2011,shah2012,tamo2013,hou2016,ye2017,li2018,hou2019a,hou2019b}. The sub-packetization of high-code-rate MSR codes \cite{tamo2013,ye2017,li2018,hou2019a,hou2019b} is exponentially increasing with the increasing of parameters $n$ and $k$. Some MDS array codes have been proposed \cite{corbett2004row,blaum1995evenodd,Hou2018A,xu1999x,2018MDS,2021A} to achieve small repair bandwidth under the condition of small sub-packetization; however, they either only have small repair bandwidth for data nodes \cite{corbett2004row,blaum1995evenodd,hou2018d,Hou2018A,xu1999x} or require large field sizes \cite{2018MDS,2021A}. \subsubsection{Locally Repairable Codes} Locally repairable codes (LRCs) \cite{huang2012,2014Locally} are non-MDS codes that can achieve small repair bandwidth for any single-node failure with sub-packetization $\alpha=1$ by adding some local parity symbols. Consider the $(n,k,g)$ Azure-LRC \cite{huang2012} that is employed in Windows Azure storage systems, we first create $n-k-g$ global parity symbols by encoding all $k$ data symbols, divide the $k$ data symbols into $g$ groups and then create one local parity symbol for each group, where $k$ is a multiple of $g$. In the $(n,k,g)$ Azure-LRC, we can repair any one symbol except $n-k-g$ global parity symbols by locally downloading the other $k/g$ symbols in the group. Optimal-LRC \cite{2014optimal,2019How,2020Improved,2020On} is another family of LRC that can locally repair any one symbol (including the global parity symbols). One drawback of optimal-LRC is that existing constructions \cite{2014optimal,2019How,2020Improved,2020On} can not support all the parameters and the underlying field size should be large enough. In this paper, we propose new piggybacking codes by designing and adding the $n$ piggyback functions into the $n$ newly created empty entries with no data symbols that are also non-MDS codes and we show that our piggybacking codes have lower repair bandwidth when compared with Azure-LRC \cite{huang2012} and optimal-LRC under the same storage overhead and fault-tolerance, for some parameters. The remainder of this paper is organized as follows. Section \ref{sec:2} presents two piggybacking coding designs. Section \ref{sec:3} shows new piggybacking codes with $k=k'$ based on the first design. Section \ref{sec:4} shows another new piggybacking codes based on the second design. Section \ref{sec:com} evaluates the repair bandwidth for our piggybacking codes and the related codes. Section \ref{sec:con} concludes the paper. \section{Two Piggybacking Designs} \label{sec:2} In this section, we first present two piggybacking designs and then consider the repair bandwidth of any single-node failure for the proposed piggybacking codes. \subsection{Two Piggybacking Designs} \label{sec:2.1} Our two piggybacking designs can be represented by an $n\times (s+1)$ array, where $s$ is a positive integer, the $s+1$ symbols in each row are stored in a node, and $s+1\le n$. We label the index of the $n$ rows from 1 to $n$ and the index of the $s+1$ columns from 1 to $s+1$. Note that the symbols in each row are stored at the corresponding node. In the following, we present our first piggybacking design. In the piggybacking design, we first create $s$ instances of $(n,k)$ MDS codes plus one instance of $(n,k')$ MDS codes and then design the piggyback functions, where $k\geq k'>0$. We describe the detailed structure of the design as follows. \begin{enumerate}[] \item First, we create $s+1$ instances of MDS codes over finite field $\mathbb{F}_q$, the first $s$ columns are the codewords of $(n,k)$ MDS codes and the last column is a codeword of $(n,k')$ MDS codes, where $k'=k-h$, $h\in\{0,1,\ldots,k-1\}$ and $s-n+k+2\leq h$. Let $\{ \mathbf{a_i}=( a_{i,1},a_{i,2},\ldots,a_{i,k} )^T \}_{i=1}^{s}$ be the $sk$ data symbols in the first $s$ columns and $( a_{i,1},a_{i,2},\ldots,a_{i,k},\mathbf{P}_1^T\mathbf{a_i},$ $\ldots, \mathbf{P}_r^T\mathbf{a_i})^T$ be codeword $i$ of the $(n,k)$ MDS codes, where $i=1,2,\ldots,s$ and $\mathbf{P}_j^T=(\eta^{j-1},\eta^{2(j-1)},\ldots,\eta^{k(j-1)})$ with $j=1,2,\ldots,r,r=n-k$ and $\eta$ is a primitive element of $\mathbb{F}_q$. Let $\{ \mathbf{b}=( b_{1},b_{2},\ldots,b_{k'} )^T \}$ be the $k'=k-h$ data symbols in the last column and $( b_{1},b_{2},\ldots,b_{k'},\mathbf{Q}_1^T\mathbf{b},\ldots, \mathbf{Q}_{h+r}^T\mathbf{b})^T$ be a codeword of an $(n,k')$ MDS code, where $\mathbf{Q}_j^T=(\eta^{j-1},\eta^{2(j-1)},\ldots,\eta^{k'(j-1)})$ with $j=1,2,\ldots,h+r$. Note that the total number of data symbols in this code is $sk+k'$. \item Second, we add the {\em piggyback functions} of the symbols in the first $s$ columns to the parity symbols in the last column, in order to reduce the repair bandwidth. We divide the piggyback functions into two types: $(i)$ piggyback functions of the symbols in the first $k'+1$ rows in the first $s$ columns; $(ii)$ piggyback functions of the symbols in the last $r+h-1$ rows in the first $s$ columns. Fig. \ref{fig.1} shows the structure of two piggyback functions. For the first type of the piggyback functions, we add symbol $a_{i,j}$ (the symbol in row $j$ and column $i$) to the parity symbol $\mathbf{Q}_{2+(((j-1)s+i-1)\bmod(h+r-1))}^T\mathbf{b}$ (the symbol in row $k-h+2+(((j-1)s+i-1)\bmod(h+r-1))$ in the last column), where $i\in\{1,2,\ldots,s\}$ and $j\in\{1,2,\ldots,k-h+1\}$. For the second type of the piggyback functions, we add the symbol in row $j$ and column $i$ with $i\in\{1,2,\ldots,s\}$ and $j\in\{k-h+2,\ldots,k+r\}$ to the parity symbol $\mathbf{Q}_{t_{i,j}}^T\mathbf{b}$ (the symbol in row $k-h+t_{i,j}$ in the last column), where \begin{equation} t_{i,j}=\left\{\begin{matrix} i+j-k+h, \text{ if }\ i+j\leq n\\ i+j-n+1, \text{ if }\ i+j>n \end{matrix}\right.. \label{eq:tij1} \end{equation} \end{enumerate} The first piggybacking design described above is denoted by $\mathcal{C}(n,k,s,k')$. When $h=0$, we have $k=k'$ and the created $s+1$ instances are codewords of $(n,k)$ MDS codes. We will show the repair bandwidth in Section \ref{sec:3}. We present the second piggybacking design as follows. We create $s$ instances (in the first $s$ columns) of $(n,k)$ MDS codes over finite field $\mathbb{F}_q$ and one additional empty column of length $n$, i.e., there is no data symbol in the last column, all the $n=k+r$ entries in the last columns are piggyback functions. We design the $k+r$ piggyback functions in the last column as follows. For $i\in\{1,2,\ldots,s\}$ and $j\in\{1,2,\ldots,k+r\}$, we add the symbol in row $j$ and column $i$ to the symbol in row $\hat{t}_{i,j}$ in the last column, where \begin{equation} \hat{t}_{i,j}=\left\{\begin{matrix} i+j, \text{ if }\ i+j\leq n\\ i+j-n, \text{ if }\ i+j>n \end{matrix}\right.. \label{eq:tij2} \end{equation} We denote the second piggybacking design by $\mathcal{C}(n,k,s,k'=0)$, the last parameter $k'=0$ denotes that there is not data symbol in the last column. We will discuss the repair bandwidth in Section~\ref{sec:4}. \begin{figure}[htpb] \centering \includegraphics[width=0.70\linewidth]{1} \caption{The structure of the first piggybacking design $\mathcal{C}(n,k,s,k')$, where $k'>0$.} \label{fig.1} \end{figure} Recall that in our first piggybacking design, the number of symbols to be added with piggyback functions in the last column is $h-1+r$ and $h\geq s-r+2$ such that we can see that any two symbols used in computing both types of piggyback functions are from different nodes. Since \begin{align*} k-h+t_{i,j}=\left\{\begin{matrix} k-h+i+j-k+h=i+j>j, \text{ when }\ i+j\leq n\\ k-h+i+j-n+1<j, \text{ when }\ i+j>n \end{matrix}\right., \end{align*} the symbol in row $j$ with $j\in\{k-h+2,k-h+3,\ldots,k+r\}$ and column $i$ with $i\in\{1,2,\ldots,s\}$ is not added to the symbol in row $j$ and column $s+1$ in computing the second type of piggyback functions. In our second piggybacking design, since \begin{align*} \hat{t}_{i,j}=\left\{\begin{matrix} i+j>j, \text{ when }\ i+j\leq n\\ i+j-n<j, \text{ when }\ i+j>n \end{matrix}\right., \end{align*} the symbol in row $j$ with $j\in\{1,2,\ldots,k+r\}$ and column $i$ with $i\in\{1,2,\ldots,s\}$ is not added to the symbol in row $j$ and column $s+1$ in computing the piggyback functions. It is easy to see the MDS property of the first piggybacking design $\mathcal{C}(n,k,s,k')$. We can retrieve all the other symbols in the first $s$ columns from any $k$ nodes (rows). By computing all the piggyback functions and subtracting all the piggyback functions from the corresponding parity symbols, we can retrieve all the symbols in the last column. Fig.~\ref{fig.2} shows an example of $\mathcal{C} (8,6,1,3)$. \begin{figure} \centering \includegraphics[width=0.5\linewidth]{2} \caption{An example of $\mathcal{C} (n,k,s,k')$, where $(n,k,s,k')=(8,6,1,3)$.} \label{fig.2} \end{figure} Note that the piggyback function of the second piggybacking design is different from that of the first piggybacking design. In the following of the section, we present the repair method for the first piggybacking design. We will show the repair method for the second piggybacking design in Section \ref{sec:4}. For $i\in\{2,3,\ldots,h+r\}$, let $p_{i-1}$ be the piggyback function added on the parity symbol $\mathbf{Q}_{i}^T\mathbf{b}$ and $n_{i-1}$ be the number of symbols in the sum in computing piggyback function $p_{i-1}$. {According to the design of piggyback functions, we have two set of symbols that are used in computing the $h+r-1$ piggyback functions. The first set contains $s(k-h+1)$ symbols (in the first $k-h+1$ rows and in the first $s$ columns) and the second set contains $s(h+r-1)$ symbols (in the last $h+r-1$ rows and in the first $s$ columns). We have that the total number of symbols used in computing the $h+r-1$ piggyback functions is $s(k+r)$, i.e., \begin{eqnarray} &&\sum_{i=1}^{h+r-1}n_i=s(k+r).\label{eq1} \end{eqnarray} In our first piggybacking design, the number of symbols used in computing each piggyback function is given in the next lemma. \begin{lemma} In the first piggybacking design $\mathcal{C}(n,k,s,k')$ with $k'>0$, the number of symbols used in computing the piggyback function $p_{\tau}$ is \begin{eqnarray} &&n_\tau= s+\left \lceil \frac{s(k-h+1)}{h+r-1} \right \rceil, \forall 1\leq \tau\leq (k-h+1)s-\lfloor \frac{(k-h+1)s}{h+r-1}\rfloor (h+r-1)\nonumber\\ &&n_\tau= s+\left \lfloor \frac{s(k-h+1)}{h+r-1} \right \rfloor, \forall (k-h+1)s-\lfloor \frac{(k-h+1)s}{h+r-1}\rfloor (h+r-1)< \tau< h+r.\label{eq2} \end{eqnarray} \end{lemma} \begin{proof} In the design of the piggyback function, we add the symbol in row $j$ and column $i$ for $i\in\{1,2,\ldots,s\}$ and $j\in\{1,2,\ldots,k-h+1\}$ ($(k-h+1)s$ symbol in the first set) to the symbol in row $k-h+2+(((j-1)s+i-1)\bmod(h+r-1))$ (piggyback function $p_{1+(((j-1)s+i-1)\bmod(h+r-1))}$) in the last column. Therefore, we can see that the symbols in row $j$ and column $i$ are added to $p_1$ for all $i\in\{1,2,\ldots,s\}$, $j\in\{1,2,\ldots,k-h+1\}$ and $(j-1)s+i-1$ is a multiple of $h+r-1$. Note that \[ \{(j-1)s+i-1|i=1,2,\ldots,s,j=1,2,\ldots,k-h+1\}=\{0,1,\ldots,(k-h+1)s-1\}, \] we need to choose the symbol in row $j$ and column $i$ for $i\in\{1,2,\ldots,s\}$, $j\in\{1,2,\ldots,k-h+1\}$ such that $\eta$ is a multiple of $h+r-1$ for all $\eta\in\{0,1,\ldots,(k-h+1)s-1\}$. The number of symbols in the first set used in computing $p_1$ is $\lceil \frac{(k-h+1)s}{h+r-1}\rceil$. Given integer $\tau$ with $1\leq \tau\leq h+r-1$, we add the symbol in row $j$ and column $i$ for $i\in\{1,2,\ldots,s\}$, $j\in\{1,2,\ldots,k-h+1\}$ such that $\eta-\tau+1$ is a multiple of $h+r-1$ for all $\eta\in\{0,1,\ldots,(k-h+1)s-1\}$ to $p_{\tau}$. The number of symbols in the first set used in computing $p_{\tau}$ is $\lceil \frac{(k-h+1)s}{h+r-1}\rceil$ if $(k-h+1)s-\tau\geq \lfloor \frac{(k-h+1)s}{h+r-1}\rfloor (h+r-1)$ and $\lfloor \frac{(k-h+1)s}{h+r-1}\rfloor$ if $(k-h+1)s-\tau\leq \lfloor \frac{(k-h+1)s}{h+r-1}\rfloor (h+r-1)$. Therefore, the number of symbols in the first set used in computing $p_{\tau}$ is $\lceil \frac{(k-h+1)s}{h+r-1}\rceil$ if $1\leq \tau\leq (k-h+1)s-\lfloor \frac{(k-h+1)s}{h+r-1}\rfloor (h+r-1)$ and $\lfloor \frac{(k-h+1)s}{h+r-1}\rfloor$ if $h+r-1\geq \tau\geq (k-h+1)s-\lfloor \frac{(k-h+1)s}{h+r-1}\rfloor (h+r-1)+1$. For the $(h+r-1)s$ symbols in the second set, we add the symbol in row $j$ and column $i$ with $i\in\{1,2,\ldots,s\}$ and $j\in\{k-h+2,\ldots,k+r\}$ to the symbol in row $k-h+t_{i,j}$ (piggyback function {$p_{t_{i,j}-1}$}) in the last column, where $t_{i,j}$ is given in Eq. \eqref{eq:tij1}. Consider the piggyback function $p_1$, i.e., $t_{i,j}=2$. When $i=1$, according to Eq. \eqref{eq:tij1}, only when $j=k+r$ for $j\in\{k-h+2,\ldots,k+r\}$, we can obtain $t_{i,j}=2$. When $i=2$, according to Eq. \eqref{eq:tij1}, only when $j=k+r-1$ for $j\in\{k-h+2,\ldots,k+r\}$, we can obtain $t_{i,j}=2$. Similarly, for any $i$ with $i\in\{1,2,\ldots,s\}$, only when $j=k+r+1-i$ for $j\in\{k-h+2,\ldots,k+r\}$, we can obtain $t_{i,j}=2$. Since $h\geq s-r+2$, we have $j=k+r+1-i\geq k+r+1-s>k-h+2$, which is within $\{k-h+2,\ldots,k+r\}$. In other words, for any $i$ with $i\in\{1,2,\ldots,s\}$, we can find one and only one $j$ with $j\in\{k-h+2,\ldots,k+r\}$ such that $t_{i,j}=2$. The number of symbols in the second set used in computing $p_{1}$ is $s$. Similarly, we can show that the number of symbols in the second set used in computing $p_{\tau}$ is $s$ for all $\tau=1,2,\ldots,h+r-1$. Therefore, the total number of symbols used in computing $p_{\tau}$ is $n_{\tau}=s+\lceil \frac{(k-h+1)s}{h+r-1}\rceil$ for $\tau=1,2,\ldots,(k-h+1)s-\lfloor \frac{(k-h+1)s}{h+r-1}\rfloor (h+r-1)$ and $n_{\tau}=s+\lfloor \frac{(k-h+1)s}{h+r-1}\rfloor$ for $\tau=(k-h+1)s-\lfloor \frac{(k-h+1)s}{h+r-1}\rfloor (h+r-1)+1,(k-h+1)s-\lfloor \frac{(k-h+1)s}{h+r-1}\rfloor (h+r-1)+2,\ldots,h+r-1$. \end{proof} Next lemma shows that any two symbols in one row in the first $s$ columns are used in computing two different piggyback functions. \begin{lemma} In the first piggybacking design, if $s+2\leq h+r$, then the symbol in row $j$ in column $i_1$ and the symbol in row $j$ in column $i_2$ are used in computing two different piggyback functions, for any $j\in\{1,2,\ldots,k+r\}$ and $i_1\neq i_2\in\{1,2,\ldots,s\}$. \label{lm:dif-piggy} \end{lemma} \begin{proof} When $j\in\{1,2,\ldots,k-h+1\}$, we add the symbol in row $j$ and column $i_1$ to the symbol in row $k-h+2+(((j-1)s+i_1-1)\bmod(h+r-1))$ in the last column. Similarly, the symbol in row $j$ and column $i_2$ is added to the symbol in row $k-h+2+(((j-1)s+i_2-1)\bmod(h+r-1))$ in the last column. Suppose that the two symbols in row $j$ and columns $i_1,i_2$ are added to the same piggyback function, we obtain that $((j-1)s+i_1-1)\bmod(h+r-1)=((j-1)s+i_2-1)\bmod(h+r-1)$, i.e., $i_1=i_2\bmod(h+r-1)$, which contradicts to $i_1\neq i_2\in\{1,2,\ldots,s\}$ and $s+2\leq h+r$. When $j\in\{k-h+2,k-h+2,\ldots,k+r\}$, we add two symbols in row $j$ column $i_1$ and row $j$ column $i_2$ to the symbol in the last column in row $k-h+t_{i_1,j}$ and row $k-h+t_{i_2,j}$, respectively, where $i_1\neq i_2\in\{1,2,\ldots,s\}$ and $$t_{i,j}=\left\{\begin{matrix} i+j-k+h, \text{ if }\ i+j\leq n\\ i+j-n+1, \text{ if }\ i+j>n \end{matrix}\right..$$ Suppose that the two symbols in row $j$ and columns $i_1,i_2$ are added to the same piggyback function, we obtain that $t_{i_1,j}=t_{i_2,j}$. If $i_1+j\leq n$ and $i_2+j\leq n$, we have that $i_1=i_2$ which contradicts to $i_1\neq i_2$. If $i_1+j\leq n$ and $i_2+j> n$, we have that $i_1+r+h-1=i_2$ which contradicts to $i_1\neq i_2\in\{1,2,\ldots,s\}$ and $s+2\leq h+r$. Similarly, we can obtain a contradiction if $i_1+j> n$ and $i_2+j\leq n$. If $i_1+j> n$ and $i_2+j> n$, we have that $i_1=i_2$ which contradicts to $i_1\neq i_2$. Therefore, in our first piggybacking design, any two symbols in the same row are not used in computing the same piggyback function. \end{proof} \subsection{Repair Process} \label{sec:2.2} In the fisrt piggybacking design, suppose that node $f$ fails, we present the repair procedure of node $f$ as follows, where $f\in\{1,2,\ldots,k+r\}$. We first consider that $f\in\{1,2,\ldots,k-h+1\}$, each of the first $s$ symbols $\{ a_{1,f},a_{2,f},\ldots,a_{s,f} \}$ stored in node $f$ is used in computing one piggyback function and we denote the corresponding piggyback function associated with symbol $a_{i,f}$ by $p_{t_{i,f}}$, where $i=1,2,\ldots,s$ and $t_{i,f}\in\{1,2,\ldots,h+r-1\}$. We download $k-h$ symbols in the last column from nodes $\{1,2,\ldots,k-h+1\}\setminus\{f\}$ to recover $s+1$ symbols $b_{f},\mathbf{Q}_{t_{1,f}+1}^T\mathbf{b},\mathbf{Q}_{t_{2,f}+1}^T\mathbf{b}, \ldots,\mathbf{Q}_{t_{s,f}+1}^T\mathbf{b}$ when $f\in\{1,2,\ldots,k-h\}$, or $\mathbf{Q}_{1}^T\mathbf{b},\mathbf{Q}_{t_{1,f}+1}^T\mathbf{b},\mathbf{Q}_{t_{2,f}+1}^T\mathbf{b}, \ldots,\mathbf{Q}_{t_{s,f}+1}^T\mathbf{b}$ when $f=k-h+1$, according to the MDS property of the last instance. {By Lemma \ref{lm:dif-piggy}, any two symbols in one row are used in computing two different piggyback functions. The piggyback function $p_{t_{i,f}}$ is computed by $n_{t_{i,f}}$ symbols, where one symbol is $a_{i,f}$ and the other $n_{t_{i,f}}-1$ symbols are not in node $f$ (row $f$).} Therefore, we can repair the symbol $a_{i,f}$ in node $f$ by downloading the parity symbol $\mathbf{Q}_{t_{i,f}+1}^T\mathbf{b}+p_{t_{i,f}}$ and $n_{t_{i,f}}-1$ symbols which are used to compute $p_{t_{i,f}}$ except $a_{i,f}$, where $i=1,2,\ldots,s$. The repair bandwidth is $k-h+\sum_{i=1}^{s}n_{t_{i,f}}$ symbols. When $f\in\{k-h+2,k-h+3,\ldots,n\}$, each of the first $s$ symbols stored in node $f$ is used in computing one piggyback function and we denote the corresponding piggyback function associated with the symbol in row $f$ and column $i$ by $p_{t_{i,f}}$, where $i\in\{1,2,\ldots,s\}$ and $t_{i,f}\in\{1,2,\ldots,h+r-1\}$. We download $k-h$ symbols in the last column from nodes $\{1,2,\ldots,k-h\}$ to recover $s+1$ symbols $\mathbf{Q}_{f-k+h}^T\mathbf{b}, \mathbf{Q}_{t_{1,f}+1}^T\mathbf{b},\mathbf{Q}_{t_{2,f}+1}^T\mathbf{b},\ldots, \mathbf{Q}_{t_{s,f}+1}^T\mathbf{b}$, according to the MDS property of the last instance. {Recall that any symbol in row $f$ in the first $s$ columns is not used in computing the piggyback function in row $f$.} We can recover the last symbol $\mathbf{Q}_{f-k+h}^T\mathbf{b}+p_{f-k+h-1}$ stored in node $f$ by downloading $n_{f-k+h-1}$ symbols which are used to compute the piggyback function $p_{f-k+h-1}$. {Recall that any two symbols in one row are used in computing two different piggyback functions by Lemma \ref{lm:dif-piggy}. The piggyback function $p_{t_{i,f}}$ is computed by $n_{t_{i,f}}$ symbols, where one symbol is in row $f$ column $i$ and the other $n_{t_{i,f}}-1$ symbols are not in node $f$ (row $f$).} We can repair the symbol in row $f$ and column $i$, for $i\in\{1,2,\ldots,s\}$, by downloading symbol $\mathbf{Q}_{t_{i,f}+1}^T\mathbf{b}+p_{t_{i,f}}$ and $n_{t_{i,f}}-1$ symbols which are used to compute $p_{t_{i,f}}$ except the symbol in row $f$ and column $i$. The repair bandwidth is $k-h+n_{f-k+h-1}+\sum_{i=1}^{s}n_{t_{i,f}}$ symbols. Consider the repair method of the code $\mathcal{C} (8,6,1,3)$ in Fig. \ref{fig.2}. Suppose that node 1 fails, we can first download 3 symbols $b_2,b_3,\mathbf{Q}_{1}^T\mathbf{b}$ to obtain the two symbols $b_1,\mathbf{Q}_{2}^T\mathbf{b}$, according to the MDS property. Then, we download the following 2 symbols \[ \mathbf{Q}_{2}^T\mathbf{b}+a_{1,1}+\mathbf{P}_{2}^T\mathbf{a}_1,\mathbf{P}_{2}^T\mathbf{a}_1 \] to recover $a_{1,1}$. The repair bandwidth of node 1 is 5 symbols. Similarly, we can show that the repair bandwidth of any single-node failure among nodes 2 to 4 is 5 symbols. Suppose that node 5 fails, we can download the 3 symbols $b_1,b_2,b_{3}$ to obtain $\mathbf{Q}_{2}^T\mathbf{b},\mathbf{Q}_{3}^T\mathbf{b}$, according to the MDS poverty. Then, we download the 2 symbols $a_{1,1},\mathbf{P}_{2}^T\mathbf{a}_1$ to recover $\mathbf{Q}_{2}^T\mathbf{b}+p_1$. Finally, we download the 2 symbols $\mathbf{Q}_{3}^T\mathbf{b}+p_2,a_{1,2}$ to recover $a_{1,5}$. The repair bandwidth of node 5 is 7 symbols. Similarly, we can show that the repair bandwidth of any single-node failure among nodes 6 to 8 is 7 symbols. \subsection{Average Repair Bandwidth Ratio of Code $\mathcal{C} (n,k,s,k'), k'>0$} \label{sec:2.3} Define the {\em average repair bandwidth} of data nodes (or parity nodes or all nodes) as the ratio of the summation of repair bandwidth for each of $k$ data nodes (or $r$ parity nodes or all $n$ nodes) to the number of data nodes $k$ (or the number of parity nodes $r$ or the number of all nodes $n$). Define the {\em average repair bandwidth ratio} of data nodes (or parity nodes or all nodes) as the ratio of the average repair bandwidth of $k$ data nodes (or $r$ parity nodes or all $n$ nodes) to the number of data symbols. In the following, we present an upper bound of the average repair bandwidth ratio of all $n$ nodes, denoted by $\gamma^{all}$, for the proposed codes $\mathcal{C} (n,k,s,k')$ when $k'>0$. \begin{theorem} \label{th1} When $k'>0$, the average repair bandwidth ratio of all $n$ nodes, $\gamma^{all}$, of codes $\mathcal{C} (n,k,s,k')$, is upper bounded by \begin{eqnarray} \gamma^{all}&\leq&\frac{(u+s)^2(h+r-1)}{(k+r)(sk+k-h)}+\frac{k-h+s}{sk+k-h},\nonumber \end{eqnarray} where $u=\left \lceil \frac{s(k-h+1)}{h+r-1} \right \rceil$. \end{theorem} \begin{proof} {Suppose that node $f$ fails, where $f\in\{1,2,\ldots,n\}$, we will count the repair bandwidth of node $f$ as follows. Recall that the symbol in row $f$ and column $i$ is used to compute the piggyback function $p_{t_{i,f}}$, where $f\in\{1,2,\ldots,n\}$ and $i\in\{1,2,\ldots,s\}$.} Recall also that the number of symbols in the sum in computing piggyback function $p_{t_{i,f}}$ is $n_{t_{i,f}}$. When $f\in\{1,2,\ldots,k-h+1\}$, according to the repair method in Section \ref{sec:2.2}, the repair bandwidth of node $f$ is $(k-h+\sum_{i=1}^{s}n_{t_{i,f}})$ symbols. When $f\in\{k-h+2,\ldots,n\}$, according to the repair method in Section \ref{sec:2.2}, the repair bandwidth of node $f$ is $(k-h+n_{f-k+h-1}+\sum_{i=1}^{s}n_{t_{i,f}})$ symbols. The summation of the repair bandwidth for each of the $n$ nodes is \begin{eqnarray} &&\sum_{f=1}^{k-h+1}(k-h+\sum_{i=1}^{s}n_{t_{i,f}})+ \sum_{f=k-h+2}^{k+r}(k-h+n_{f-k+h-1}+\sum_{i=1}^{s}n_{t_{i,f}})\nonumber\\ =&&(k+r)(k-h)+\sum_{f=1}^{k+r}(\sum_{i=1}^{s}n_{t_{i,f}})+\sum_{f=k-h+2}^{k+r}n_{f-k+h-1}.\label{eq:rep-sum} \end{eqnarray} Next, we show that \begin{equation} \sum_{f=1}^{k+r}(\sum_{i=1}^{s}n_{t_{i,f}})=\sum_{i=1}^{h+r-1}n_i^2. \label{eq:rep-sum1} \end{equation} Note that $\sum_{i=1}^{k+r}(\sum_{i=1}^{s}n_{t_{i,f}})$ is the summation of the repair bandwidth for each of the $(k+r)s$ symbols in the first $s$ columns. The $(k+r)s$ symbols are used to compute the $h+r-1$ piggyback functions and each symbol is used for only one piggyback function. For $i=1,2,\ldots,h+r-1$, the piggyback function $p_i$ is the summation of the $n_i$ symbols in the first $s$ columns and can recover any one of the $n_i$ symbols (used in computing $p_i$) with repair bandwidth $n_i$ symbols. Therefore, the summation of the repair bandwidth for each of the $n_i$ symbols (used in computing $p_i$) is $n_i^2$. In other words, the summation of the repair bandwidth for each of the $(k+r)s$ symbols in the first $s$ columns is the summation of the repair bandwidth for each of all the $(k+r)s$ symbols used for computing all $h+r-1$ piggyback functions, i.e., Eq. \eqref{eq:rep-sum1} holds. By Eq. \eqref{eq1}, we have $\sum_{f=k-h+2}^{n}n_{f-k+h-1}=\sum_{i=1}^{h+r-1}n_i=s(k+r)$. By Eq. \eqref{eq2}, we have $n_i\leq u+s, \forall i\in\{1,2,\ldots,h+r\}$, where $u=\left \lceil \frac{s(k-h+1)}{h+r-1} \right \rceil$. According to Eq. \eqref{eq:rep-sum} and Eq. \eqref{eq:rep-sum1}, we have \begin{eqnarray} \gamma^{all}&=&\frac{(k+r)(k-h)+\sum_{i=1}^{h+r-1}n_i^2}{(k+r)(sk+k-h)} +\frac{\sum_{f=k-h+2}^{n}n_{f-k+h-1}}{(k+r)(sk+k-h)}\nonumber\\ &=&\frac{(k+r)(k-h+s)+\sum_{i=1}^{h+r-1}n_i^2}{(k+r)(sk+k-h)}\nonumber\\ &\leq&\frac{k-h+s}{sk+k-h}+\frac{(u+s)^2(h+r-1)}{(k+r)(sk+k-h)}.\nonumber \end{eqnarray} \end{proof} Define {\em storage overhead} to be the ratio of total number of symbols stored in the $n$ nodes to the total number of data symbols. We have that the storage overhead $s^*$ of codes $\mathcal{C}(n,k,s,k')$ satisfies that \begin{eqnarray} &&\frac{k+r}{k}\leq s^*=\frac{(s+1)(k+r)}{sk+k-h}\leq\frac{(s+1)(k+r)}{sk}=(\frac{s+1}{s})\cdot\frac{k+r}{k}.\nonumber \end{eqnarray} \section{Piggybacking Codes $\mathcal{C}(n,k,s,k'=k)$} \label{sec:3} In this section, we consider the special case of codes $\mathcal{C}(n,k,s,k')$ with $k'=k$. When $k'=k$, we have $s\leq r-2$ and the created $s+1$ instances are codewords of $(n,k)$ MDS codes and the codes $\mathcal{C}(n,k,s,k'=k)$ are MDS codes. The structure of $\mathcal{C}(n,k,s,k'=k)$ is shown in Fig. \ref{fig.3}. \begin{figure}[htpb] \centering \includegraphics[width=0.60\linewidth]{3} \caption{The design of code $\mathcal{C}(n,k,s,k'=k),s\leq r-2$.} \label{fig.3} \end{figure} In $\mathcal{C}(n,k,s,k'=k)$, we have $r-1$ piggyback functions $\{p_i\}_{i=1}^{r-1}$, and each piggyback function $p_i$ is a linear combination of $n_i$ symbols that are located in the first $s$ columns of the $n\times (s+1)$ array, where $i\in\{1,2,\ldots,r-1\}$. According to Eq. \eqref{eq1}, we have \begin{eqnarray} &&\sum_{i=1}^{r-1}n_i=s(k+r).\label{eq7} \end{eqnarray} The average repair bandwidth ratio of all nodes of $\mathcal{C}(n,k,s,k'=k)$ is given in the next theorem. \begin{theorem} \label{th2} The lower bound and the upper bound of the average repair bandwidth ratio of all nodes $\gamma^{all}_{0}$ of $\mathcal{C}(n,k,s,k'=k)$ is \begin{eqnarray} &&\gamma^{all}_{0,min}=\frac{k+s}{(s+1)k}+\frac{s^2(k+r)}{(r-1)(s+1)k} \text{ and }\label{eq8}\\ &&\gamma^{all}_{0,max}=\gamma^{all}_{0,min}+\frac{r-1}{4k(k+r)(s+1)},\label{eq9} \end{eqnarray} respectively. \end{theorem} \begin{proof} By Eq. \eqref{eq:rep-sum}, the summation of the repair bandwidth for each of the $n$ nodes is \begin{eqnarray} &&(k+r)k+\sum_{i=1}^{r-1}n_i^2+\sum_{i=1}^{r-1}n_{i}.\nonumber \end{eqnarray} By Eq. \eqref{eq7}, we have \begin{eqnarray} \gamma^{all}_0&=&\frac{(k+r)k+\sum_{i=1}^{r-1}n_i^2+\sum_{i=1}^{r-1}n_{i}}{(k+r)(s+1)k}\nonumber\\ &=&\frac{(k+r)(k+s)+\sum_{i=1}^{r-1}n_i^2}{(k+r)(s+1)k}\nonumber\\ &=&\frac{(k+r)(k+s)+\frac{(\sum_{i=1}^{r-1}n_{i})^2+\sum_{i\neq j}(n_i-n_j)^2}{r-1}}{(k+r)(s+1)k}.\nonumber \end{eqnarray} Note that $\sum_{i\neq j}(n_i-n_j)^2=t(r-1-t)$ by Eq. \eqref{eq2}, where $t=s(k+1)-\left \lfloor \frac{s(k+1)}{r-1} \right \rfloor(r-1)$. According to Eq. \eqref{eq7}, we have \begin{eqnarray} \gamma^{all}_0&=&\frac{(k+r)(k+s)+\frac{(\sum_{i=1}^{r-1}n_{i})^2+t(r-1-t)}{r-1}}{(k+r)(s+1)k}\nonumber\\ &=&\frac{k+s}{(s+1)k}+\frac{s^2(k+r)}{(r-1)(s+1)k}+\frac{t(r-1-t)}{(k+r)(s+1)(r-1)k}.\nonumber \end{eqnarray} By the mean inequality, we have $0\leq t(r-1-t)\leq\frac{(r-1)^2}{4}$ and we can further obtain that \begin{eqnarray} &&\frac{k+s}{(s+1)k}+\frac{s^2(k+r)}{(r-1)(s+1)k}\leq\gamma^{all}_{0}\nonumber\\ &&\leq\frac{k+s}{(s+1)k}+\frac{s^2(k+r)}{(r-1)(s+1)k}+\frac{r-1}{4k(k+r)(s+1)}.\nonumber \end{eqnarray} \end{proof} According to Theorem \ref{th2}, the difference between the lower bound and the upper bound of the average repair bandwidth ratio satisfies that \begin{eqnarray} |\gamma^{all}_{0}-\gamma^{all}_{0,min}|&\leq&|\gamma^{all}_{0,min}-\gamma^{all}_{0,max}|= \frac{r-1}{4k(k+r)(s+1)}\leq\frac{r-1}{8k(k+r)}.\label{eq10} \end{eqnarray} When $r\ll k$, the difference between $\gamma^{all}_{0}$ and $\gamma^{all}_{0,min}$ can be ignored. When $r\ll k$, we present the repair bandwidth for $\mathcal{C}(n,k,s,k'=k)$ as follows. \begin{corollary} \label{col3} Let $r\ll k$ and $k\rightarrow +\infty$, then the minimum value of the average repair bandwidth ratio $\gamma^{all}_{0}$ of $\mathcal{C}(n,k,s,k'=k)$ is achieved when $s=\sqrt{r}-1$. \end{corollary} \begin{proof} When $r\ll k$ and $k\rightarrow +\infty$, we have that $\underset{k\rightarrow +\infty}{lim}\gamma_{0}^{all} =\underset{k\rightarrow +\infty}{lim}\gamma_{0,min}^{all}$ by Eq. \eqref{eq10} and by Eq. \eqref{eq8}, we can further obtain that \begin{eqnarray} \underset{k\rightarrow +\infty}{lim}\gamma_{0}^{all} =\underset{k\rightarrow +\infty}{lim}\gamma_{0,min}^{all} =\frac{s^2}{(r-1)(s+1)}+\frac{1}{s+1}.\label{eq11} \end{eqnarray} We can compute that \begin{eqnarray} &&\frac{\partial\underset{k\rightarrow +\infty}{lim}\gamma_{0}^{all}}{\partial s}=\frac{(s+1)^2-r}{(s+1)^2(r-1)}.\nonumber \end{eqnarray} If $s>\sqrt{r}-1$, then $\frac{\partial\gamma(r,s)}{\partial s}>0$; if $s<\sqrt{r}-1$, then $\frac{\partial\gamma(r,s)}{\partial s}<0$; if $s=\sqrt{r}-1$, then $\frac{\partial\gamma(r,s)}{\partial s}=0$. Therefore, when $s=\sqrt{r}-1$, $\underset{k\rightarrow +\infty}{lim}\gamma_{0}^{all}$ achieves the minimum value. \end{proof} Note that $s$ should be a positive integer, we can let $s=\left \lfloor \sqrt{r}-1 \right \rfloor$ or $s=\left \lceil \sqrt{r}-1 \right \rceil$, and then choose the minimum value of the average repair bandwidth ratio $\gamma^{all}_{0}$ for $\mathcal{C}(n,k,s,k'=k)$. \begin{figure} \centering \includegraphics[width=0.55\linewidth]{4} \caption{An example of code $\mathcal{C}(n,k,s,k'=k)$, where $(n,k,s)=(20,14,1)$.} \label{fig.4} \end{figure} Consider a specific example of code $\mathcal{C} (n=20,k=14,s=1,k'=14)$ in Fig. \ref{fig.4}. Suppose that node 1 fails, we can first download 14 symbols $a_{2,2},a_{2,3},\ldots,a_{2,14},\mathbf{P}_{1}^T\mathbf{a_2}$ to obtain the two symbols $a_{2,1},\boldsymbol{P_2^Ta_2}$, according to the MDS property. Then, we download the following 4 symbols \[ \boldsymbol{P_2^Ta_2}+(a_{1,1}+a_{1,6}+a_{1,11}+\boldsymbol{P_6^Ta_1}), a_{1,6},a_{1,11},\mathbf{P}_{6}^T\mathbf{a}_1 \] to recover $a_{1,1}$. The repair bandwidth of node 1 is 18 symbols. Similarly, we can show that the repair bandwidth of any single-node failure among nodes 2 to 15 is 18 symbols. Suppose that node 16 fails, we can download the 14 symbols $a_{2,1},a_{2,2},\ldots,a_{2,14}$ to obtain $\mathbf{P}_{2}^T\mathbf{a_{2}},\mathbf{P}_{3}^T\mathbf{a_2}$, according to the MDS poverty. Then, we download the 4 symbols $\boldsymbol{P_3^Ta_2}+(a_{1,2}+a_{1,7}+a_{1,12}+\boldsymbol{P_2^Ta_1}),a_{1,2},a_{1,7},a_{1,12}$ to recover $\mathbf{P}_{2}^T\mathbf{a_1}$. Finally, we download the 4 symbols $a_{1,1},a_{1,6},a_{1,11},\boldsymbol{P_6^Ta_1}$ to recover $\mathbf{P}_{2}^T\mathbf{a}_2+p_1$. The repair bandwidth of node 16 is 22 symbols. Similarly, we can show that the repair bandwidth of any single-node failure among nodes 17 to 20 is 22 symbols. Therefore, we know that in this example, the average repair bandwidth ratio of all nodes is $\frac{15*18+5*22}{20*28}\approx 0.68$. \section{Piggybacking Codes $\mathcal{C}(n,k,s,k'=0)$} \label{sec:4} In this section, we consider the special case of codes $\mathcal{C}(n,k,s,k'=0)$ with $n\geq s+1$ based on the second piggybacking design. Recall that there is no data symbol in the last column and we add the $n$ piggyback functions in the last column. Here, for $i\in\{1,2,\dots,n\}$, we use $p_i$ to represent the piggyback function in the last column in row (node) $i$. Fig. \ref{fig.5} shows the structure of codes $\mathcal{C}(n,k,s,k'=0)$. \begin{figure}[htpb] \centering \includegraphics[width=0.55\linewidth]{5} \caption{The structure of codes $\mathcal{C}(n,k,s,k'=0)$.} \label{fig.5} \end{figure} For notational convenience, we denote the parity symbol $\mathbf{P}_j^T\mathbf{a}_i$ by $a_{i,k+j}$ in the following, where $1\leq j\leq r, 1\leq i\leq s$. Given an integer $x$ with $-s+1\leq x\leq k+r+s$, we define $\overline{x}$ by \[ \overline{x}=\left\{\begin{matrix} x+k+r, \text{ if }\ -s+1\leq x\leq0\\ x, \text{ if }\ 1\leq x\leq k+r\\ x-k-r, \text{ if }\ k+r+1\leq x\leq k+r+s \end{matrix}\right.. \] According to the design of piggyback functions in Section \ref{sec:2.1}, {the symbol $a_{i,j}$ is used to compute the piggyback function $p_{\overline{i+j}}$ for $i\in\{1,2,\ldots,s\}$ and $j\in\{1,2,\ldots,n\}$. Therefore, we can obtain that the piggyback function $p_j=\sum_{i=1}^{s}a_{i,\overline{j-i}}$ for $1\leq j\leq k+r$. For any $j\in\{1,2,\ldots,k+r\}$ and $i_1\neq i_2\in\{1,2,\ldots,s\}$, the two symbols in row $j$, columns $i_1$ and $i_2$ are added to two different piggyback functions $p_{\overline{i_1+j}}$ and $p_{\overline{i_2+j}}$, respectively, since $\overline{i_1+j}\neq \overline{i_2+j}$ for $n\geq s+1$.} The next theorem shows the repair bandwidth of codes $\mathcal{C}(n,k,s,k'=0)$. \begin{theorem} \label{th4} The repair bandwidth of codes $\mathcal{C}(n,k,s,k'=0)$ is $s+s^2$. \end{theorem} \begin{proof} Suppose that node $f$ fails, where $f\in \{1,2,\ldots,n\}$. {Similar to} the repair method in Section \ref{sec:2.2}, we can first repair the piggyback function $p_f$ (the symbol in row $f$ and column $s+1$) by downloading $s$ symbols $\{a_{j,\overline{f-j}}\}_{j=1}^s$ to repair the symbol $p_f$. Note that the symbol $a_{j,f}$ is used to compute the piggyback function $p_{\overline{j+f}}$ for $j\in\{1,2,\ldots,s\}$, we can recover the symbol $a_{j,f}$ by downloading $p_{\overline{j+f}}$ and the other $s-1$ symbols used in computing $p_{\overline{j+f}}$ except $a_{j,f}$. Therefore, the repair bandwidth of node $f$ is $s+s^2$ symbols. \end{proof} By Theorem \ref{th4}, the average repair bandwidth ratio of $\mathcal{C}(n,k,s,k'=0)$ is $\frac{s+1}{k}$. The storage overhead of $\mathcal{C}(n,k,s,k'=0)$ is $$\frac{(s+1)(k+r)}{sk}=(1+\frac{1}{s})\cdot\frac{k+r}{k}.$$ We show in the next theorem that our codes $\mathcal{C}(n,k,s,k'=0)$ can recover any $r+1$ failures under some condition. \begin{theorem} \label{th5} If $k>(s-1)(r+1)+1$, then the codes $\mathcal{C}(n,k,s,k'=0)$ can recover any $r+1$ failures. \end{theorem} \begin{proof} Suppose that $r+1$ nodes $f_1,f_2,\ldots,f_{r+1}$ fail, where $1\leq f_1<f_2<\cdots<f_{r+1}\leq k+r$. In the following, we present a repair method to recover the failed $r+1$ nodes. For $i\in\{1,2,\ldots,r\}$, let $t_i$ be the number of surviving nodes between two failed nodes $f_i$ and $f_{i+1}$, and $t_{r+1}$ be the number of surviving nodes between nodes $f_{r+1}$ and $k+r$ plus the number of surviving nodes between nodes 1 and $f_1$, i.e., $t_i=f_{i+1}-f_i-1$ and $t_{r+1}=k+r-f_{r+1}+f_1-1$. It is easy to see that \begin{eqnarray} &&\sum_{i=1}^{r+1}t_i=k-1.\nonumber \end{eqnarray} Let $t_{\max}=\max\{t_1,t_{2},\ldots,t_{r+1}\}$ and without loss of generality, we assume that $t_{\max}=t_j$ with $j\in\{1,2,\ldots,r+1\}$. We have $t_j=t_{\max}\geq t_i$ for $i=1,2,\ldots,r+1$ and \begin{eqnarray} &&(r+1)t_j\geq\sum_{i=1}^{r+1}t_i=k-1>(s-1)(r+1),\nonumber \end{eqnarray} where the last inequality comes from that $k>(s-1)(r+1)+1$. Therefore, we obtain that $t_j\geq s$. We are now ready to describe the repair method for the failed $r+1$ nodes. We can first repair one symbol stored in the failed node $f_j$, some symbols that are used to repair the symbol in node $f_j$ are shown in Fig. \ref{fig.6}. Recall that the symbol $p_{\overline{f_j+s}}$ is located column $s+1$ in node $\overline{f_j+s}$. We claim that node $\overline{f_j+s}$ is not a failed node, since $t_j\geq s$. Recall also that $p_{\overline{f_j+s}}=a_{s,f_j}+\sum_{i=1}^{s-1}a_{i,\overline{f_j+s-i}}$. We claim that node $\overline{f_j+s-i}$ is not a failed node, for all $1\leq i\leq s-1$, since $t_{j}\geq s$. Therefore, we can download the $s$ symbols $p_{\overline{f_j+s}}, \{a_{i,\overline{f_j+s-i}}\}_{i=1}^{s-1}$ to recover the symbol $a_{s,f_j}$. Once the symbol $a_{s,f_j}$ is recovered, we can recover the other failed $r$ symbols $\{a_{s,f_i}\}_{i=1,2,\ldots,j-1,j+1,\ldots,r+1}$ in column $s$ in nodes $f_1,\ldots,f_{j-1},f_{j+1},\ldots,f_{r+1}$, according to the MDS property of the MDS codes. \begin{figure}[htpb] \centering \includegraphics[width=0.7\linewidth]{6} \caption{Piggyback functions of the codes $\mathcal{C}(n,k,s,k'=0)$, where one piggyback function and the corresponding symbols that are used to compute the piggyback function are with the same color.} \label{fig.6} \end{figure} Next, we present a repair method to recover the symbol $a_{s-1,f_j}$. First, recall that \[ p_{\overline{f_j+s-1}}=a_{s-1,f_j}+a_{s,\overline{f_j-1}}+\sum_{i=1}^{s-2}a_{i,\overline{f_j+s-1-i}}, \] node $\overline{f_j+s-1-i}$ is not a failed node for all $1\leq i\leq s-2$ and the symbol $a_{s,\overline{f_j-1}}$ has already recovered. Therefore, we can recover $a_{s-1,f_j}$ by downloading the following $s$ symbols \[ p_{\overline{f_j+s-1}}, a_{s,\overline{f_j-1}}, \{a_{i,\overline{f_j+s-1-i}}\}_{i=1}^{s-2}. \] Once the symbol $a_{s-1,f_j}$ is recovered, we can recover the other failed $r$ symbols $\{a_{s-1,f_i}\}_{i=1,2,\ldots,j-1,j+1,\ldots,r+1}$ in column $s-1$ in nodes $f_1,\ldots,f_{j-1},f_{j+1},\ldots,f_{r+1}$, according to the MDS property of the MDS codes. Similarly, we can recover $a_{s-\ell,f_j}$ by downloading the following $s$ symbols \[ p_{\overline{f_j+s-\ell}}, \{a_{s-l+i,\overline{f_j-i}}\}_{i=1}^{l}, \{a_{s-l-i,\overline{f_j+i}}\}_{i=1}^{s-l-1}, \] and further recover the other failed $r$ symbols $\{a_{s-\ell,f_i}\}_{i=1,2,\ldots,j-1,j+1,\ldots,r+1}$ in column $s-\ell$ in nodes $f_1,\ldots,f_{j-1},f_{j+1},\ldots,f_{r+1}$, according to the MDS property of the MDS codes, where $\ell=1,2,\ldots,s-1$. We have recovered $(r+1)s$ symbols stored in the $r+1$ failed nodes and can recover all the other $r+1$ symbols in the $r+1$ failed nodes in the last column. \end{proof} Recall that an $(n,k,g)$ Azure-LRC code \cite{huang2012} first computes $n-k-g$ global parity symbols by encoding all the $k$ data symbols, then divides the $k$ data symbols into $g$ groups each with $\frac{k}{g}$ symbols (suppose that $k$ is a multiple of $g$) and compute one local parity symbol for each group. Each of the obtained $n$ symbols is stored in one node and an $(n,k,g)$ Azure-LRC code can tolerant any $n-k-g+1$-node failures. In the following theorem, we will show that the proposed codes $\mathcal{C}(n-g,k,s,k'=0)$ have lower repair bandwidth than the $(n,k,g)$ Azure-LRC code \cite{huang2012} under the condition that both the fault-tolerance and the storage overhead of two codes are the same. \begin{theorem} \label{th6} If $2g>n-k+1$ and $n^2-k^2<kg\cdot (n-k-g+1)$, then the proposed codes $\mathcal{C}(n-g,k,\frac{n-g}{g},k'=0)$ (suppose that $\frac{n-g}{g}$ is an integer) have strictly less repair bandwidth than that of $(n,k,g)$ Azure-LRC code, under the condition that both the fault-tolerance and the storage overhead of two codes are the same. \end{theorem} \begin{proof} Recall that the storage overhead of $(n,k,g)$ Azure-LRC code is $\frac{n}{k}$ and the fault-tolerance of $(n,k,g)$ Azure-LRC code is $n-k-g+1$. We should determine the parameter $s$ for $\mathcal{C}(n-g,k,s,k'=0)$ such that the storage overhead is $\frac{n}{k}$ and the fault-tolerance is $n-k-g+1$. When $s=\frac{n-g}{g}$, we have that the storage overhead of code $\mathcal{C}(n-g,k,s,k'=0)$ is \begin{eqnarray} \frac{(s+1)\cdot(k+r)}{sk}=\frac{(\frac{n-g}{g}+1)\cdot(n-g)}{(\frac{n-g}{g})\cdot k}=\frac{n}{k},\nonumber \end{eqnarray} which is equal to the storage overhead of $(n,k,g)$ Azure-LRC code. By assumption, we have that $2g>n-k+1$, then we can obtain that \begin{align*} (s-1)(r+1)+1=(\frac{n-g}{g}-1)(n-k-g+1)+1<(\frac{n-g}{g}-1) g+1=n-2g+1<k. \end{align*} Therefore, the condition in Theorem \ref{th5} is satisfied, the fault-tolerant of our code $\mathcal{C}(n-g,k,\frac{n-g}{g},k'=0)$ is $r+1=n-k-g+1$ and the code length of $\mathcal{C}(n-g,k,\frac{n-g}{g},k'=0)$ is $n-g$. In the following, we show that the average repair bandwidth ratio of all nodes of our $\mathcal{C}(n-g,k,\frac{n-g}{g},k'=0)$ is strictly less than that of $(n,k,g)$ Azure-LRC code. Let the average repair bandwidth ratio of all nodes of $(n,k,g)$ Azure-LRC code and $\mathcal{C}(n-g,k,\frac{n-g}{g},k'=0)$ be $\gamma_{1}$ and $\gamma_{2}$, respectively. In $(n,k,g)$ Azure-LRC code, we can repair any symbol in a group by downloading the other $\frac{k}{g}$ symbols in the group and repair any global parity symbol by downloading the $k$ data symbols. The average repair bandwidth ratio of all nodes of $(n,k,g)$ Azure-LRC code is \begin{eqnarray} &&\gamma_{1}=\frac{(k+g)\cdot \frac{k}{g}+(n-k-g)\cdot k}{nk}=\frac{(n-k-g+1)\cdot g+k}{ng}.\nonumber \end{eqnarray} According to Theorem \ref{th4}, the average repair bandwidth ratio of all nodes of $\mathcal{C}(n-g,k,\frac{n-g}{g},k'=0)$ is $\frac{s+1}{k}$. We have that \begin{eqnarray} &&\gamma_{2}<\gamma_{1}\Leftrightarrow\frac{s+1}{k}<\frac{(n-k-g+1)\cdot g+k}{ng}\nonumber\\ &&\Leftrightarrow\frac{\frac{n-g}{g}+1}{k}<\frac{(n-k-g+1)\cdot g+k}{ng}\nonumber\\ &&\Leftrightarrow n^2-k^2<kg\cdot (n-k-g+1).\nonumber \end{eqnarray} By the assumption, we have that $\gamma_{2}<\gamma_{1}$. \end{proof} By Theorem \ref{th6}, the storage overhead of $\mathcal{C}(n-g,k,\frac{n-g}{g},k'=0)$ and $(n,k,g)$ Azure-LRC code are the same and $\mathcal{C}(n-g,k,\frac{n-g}{g},k'=0)$ have strictly less repair bandwidth than that of $(n,k,g)$ Azure-LRC code, when the condition is satisfied. Since the storage overhead and the repair bandwidth of $\mathcal{C}(n,k+g,\frac{n}{g},k'=0)$ are strictly less than that of $\mathcal{C}(n-g,k,\frac{n-g}{g},k'=0)$. We thus obtain that the proposed codes $\mathcal{C}(n,k+g,\frac{n}{g},k'=0)$ have better performance than that of $(n,k,g)$ Azure-LRC code in terms of both storage overhead and repair bandwidth, when $2g>n-k+1$ and $n^2-k^2<kg\cdot (n-k-g+1)$. Note that, with $\mathcal{C}(n,k+g,\frac{n}{g},k'=0)$, one can repair any single-node failure by accessing $\frac{2n}{g}$ helper nodes; however, one only need to access $\frac{k}{g}$ helper nodes in repairing any symbol in a group and access $k$ helper nodes in repairing any global parity symbol with $(n,k,g)$ Azure-LRC code. Recall that an $(n,k,g)$ optimal-LRC first encodes $k$ data symbols to obtain $n-k-g$ global parity symbols, then divides the $n-g$ symbols (including $k$ data symbols and $n-k-g$ global parity symbols) into $g$ groups each with $(n-g)/g$ symbols and encodes one local parity symbol for each group, where $n-g$ is a multiple of $g$. Each of the obtained $n$ symbols is stored in one node and an $(n,k,g)$ optimal-LRC code can tolerant any $n-k-g+1$ node failures. The repair bandwidth of any single-node failure of optimal-LRC is $(n-g)/g$ symbols. Next theorem shows that our codes $\mathcal{C}(n,k+g,\frac{n-g}{g},k'=0)$ have better performance than $(n,k,g)$ optimal-LRC code, in terms of both storage overhead and repair bandwidth. \begin{theorem} \label{th7} If $2g>n-k+1$, then the proposed codes $\mathcal{C}(n,k+g,\frac{n-g}{g},k'=0)$ (suppose that $\frac{n-g}{g}$ is an integer) have strictly less storage overhead and less repair bandwidth than that of $(n,k,g)$ optimal-LRC code, under the same fault-tolerant capability. \end{theorem} \begin{proof} When $s=\frac{n-g}{g}$, the storage overhead of $\mathcal{C}(n,k+g,\frac{n-g}{g},k'=0)$ is \begin{align*} \frac{(s+1)\cdot n}{s\cdot(k+g)}=\frac{(\frac{n-g}{g}+1)\cdot n}{(\frac{n-g}{g})\cdot (k+g)}=\frac{n^2}{(n-g)\cdot(k+g)}, \end{align*} while the storage overhead of $(n,k,g)$ optimal-LRC is $\frac{n}{k}$. We have that \begin{align*} \frac{n^2}{(n-g)\cdot(k+g)}<\frac{n}{k}\Leftrightarrow nk<(n-g)(k+g)\Leftrightarrow k+g<n. \end{align*} The last inequality is obviously true. Therefore, $\mathcal{C}(n,k+g,\frac{n-g}{g},k'=0)$ have strictly less storage overhead than that of $(n,k,g)$ optimal-LRC. By assumption, we have that $2g>n-k+1$, then we can obtain that \begin{align*} (s-1)(r+1)+1=(\frac{n-g}{g}-1)(n-k-g+1)+1<(\frac{n-g}{g}-1) g+1=n-2g+1<k+g. \end{align*} The condition in Theorem \ref{th5} is satisfied, and the fault-tolerant of $\mathcal{C}(n,k+g,\frac{n-g}{g},k'=0)$ is $r+1=n-k-g+1$, which is equal to the fault-tolerant of $(n,k,g)$ optimal-LRC code. Recall that the average repair bandwidth ratio of all nodes of $(n,k,g)$ optimal-LRC and $\mathcal{C}(n,k+g,\frac{n-g}{g},k'=0)$ is $\frac{n-g}{k\cdot g}$ and $\frac{s+1}{k+g}$, respectively. We have that \begin{align*} \frac{s+1}{k+g}<\frac{n-g}{k\cdot g}\Leftrightarrow\frac{(\frac{n-g}{g})+1}{k+g}<\frac{n-g}{k\cdot g}\Leftrightarrow nk<(n-g)(k+g)\Leftrightarrow k+g<n. \end{align*} The last inequality is obviously true. Therefore, $\mathcal{C}(n,k+g,\frac{n-g}{g},k'=0)$ have strictly less repair bandwidth than that of $(n,k,g)$ optimal-LRC. \end{proof} \begin{figure} \centering \includegraphics[width=0.50\linewidth]{7} \caption{An example of code $\mathcal{C}(n=7,k=5,s=2,k'=0)$.} \label{fig.7} \end{figure} Consider the code $\mathcal{C}(n=7,k=5,s=2,k'=0)$ which is shown in Fig. \ref{fig.7}. Suppose that node 1 fails, we can first download 2 symbols $\mathbf{P}_{2}^T\mathbf{a}_1,\mathbf{P}_{1}^T\mathbf{a}_2$ to obtain the piggyback function $p_1=\mathbf{P}_{2}^T\mathbf{a}_1+\mathbf{P}_{1}^T\mathbf{a}_2$, then download 2 symbols $a_{1,1}+\mathbf{P}_{2}^T\mathbf{a}_2, \mathbf{P}_{2}^T\mathbf{a}_2$ to recover $a_{1,1}$ and finally download 2 symbols $a_{1,2}+a_{2,1},a_{1,2}$ to recover $a_{2,1}$. The repair bandwidth of node 1 is 6 symbols. Similarly, the repair bandwidth of any single-node failure is 6 symbols. We claim that the code $\mathcal{C}(n=7,k=5,s=2,k'=0)$ can tolerant any $r+1=3$ node failures. Suppose that nodes 2, 4 and 6 fail, we have that the number of surviving nodes between two failed nodes are $t_1=1$, $t_2=1$ and $t_3=2$. We can first repair the symbol $\mathbf{P}_{1}^T\mathbf{a}_2$ in node 6 by downloading symbols $\mathbf{P}_{2}^T\mathbf{a}_1+\mathbf{P}_{1}^T\mathbf{a}_2, \mathbf{P}_{2}^T\mathbf{a}_1$. Since the symbol $\mathbf{P}_{1}^T\mathbf{a}_2$ has been recovered, according to the MDS property of the second column, we can recover $a_{2,2},a_{2,4}$. Then, we can download symbols $a_{2,5},\mathbf{P}_{1}^T\mathbf{a}_1+a_{2,5}$ to recover $\mathbf{P}_{1}^T\mathbf{a}_1$. Finally, since $\mathbf{P}_{1}^T\mathbf{a}_1$ has been repaired, we can recover $a_{1,2},a_{1,4}$ by the MDS property of the first column. Up to now, we have recovered the first two symbols in the three failed nodes. Since the third symbol in each of the three failed nodes is a piggyback function, we can recover the symbol by reading some data symbols. The repair method of any three failed nodes is similar as the above repair method. \section{Comparison} \label{sec:com} In this section, we evaluate the repair bandwidth for our piggybacking codes $\mathcal{C}(n,k,s,k')$ and other related codes, such as existing piggybacking codes \cite{2017Piggybacking,2021piggyback} and $(n,k,g)$ Azure-LRC code \cite{huang2012} under the same fault-tolerance and the storage overhead. \subsection{Codes $\mathcal{C}(n,k,s,k'=k)$ VS Piggybacking Codes} \label{sec:com1} Recall that OOP codes \cite{2019AnEfficient} have the lowest repair bandwidth for any single-node failure among the existing piggybacking codes when $r\leq10$ and sub-packetization is $r-1+\lfloor \sqrt{r-1}\rfloor$ or $r-1+\lceil \sqrt{r-1}\rceil$, the codes in \cite{2021piggybacking} have the lowest repair bandwidth for any single-node failure among the existing piggybacking codes when $r\geq10$ and sub-packetization is {$r$}. REPB codes \cite{2018Repair} have small repair bandwidth for any single-data-node with sub-packetization {usually} less than $r$. Moreover, the codes in \cite{2021piggyback} have lower repair bandwidth than the existing piggybacking codes when {$r\geq10$} and sub-packetization is less than $r$. There are two constructions in \cite{2021piggyback}, the first construction in \cite{2021piggyback} has larger repair bandwidth than the second construction. We choose codes in \cite{2018Repair,2021piggybacking} and the second construction in \cite{2021piggyback} as the main comparison. Let $\mathcal{C}_{1}$ be the second piggybacking codes in \cite{2021piggyback} and $\mathcal{C}_{2}$ be the codes in \cite{2021piggybacking}. Fig. \ref{fig.8} shows the average repair bandwidth ratio of all nodes for codes $\mathcal{C}_{1}$, $\mathcal{C}_{2}$, REPB codes \cite{2018Repair} and the proposed codes $\mathcal{C}(n,k,s,k'=k)$, where $r=8,9$ and $k=10,11,\ldots,100$. Note that the sub-packetization of REPB codes and $\mathcal{C}_{1}$ is inexplicit and {usually} less than $r$. In Fig. \ref{fig.8}, we choose the lower bound of the repair bandwidth of REPB codes and $\mathcal{C}_{1}$. The sub-packetization of $\mathcal{C}_{2}$ is $r$. The results in Fig. \ref{fig.8} demonstrate that the proposed codes $\mathcal{C}(n,k,s,k'=k)$ have the lowest average repair bandwidth ratio of all nodes compared to the existing piggybacking codes, when the sub-packetization level is less than $r$ and $k\geq 30$. Note that the sub-packetization of our codes $\mathcal{C}(n,k,s,k'=k)$ is $\sqrt{r}$, which is lower than that of $\mathcal{C}_{2}$. \begin{figure}[htpb] \centering \subfigure[$r=8$]{ \includegraphics[width=7cm]{add1}} \subfigure[$r=9$]{ \includegraphics[width=7cm]{add2}} \caption{Average repair bandwidth ratio of all nodes for codes $\mathcal{C}(n,k,s,k'=k)$, REPB, $\mathcal{C}_{1}$ and $\mathcal{C}_{2}$, where $r=8$ and $k=10,11,\ldots,100$ in $(a)$, $r=9$ and $k=10,11,\ldots,100$ in $(b)$.} \label{fig.8} \end{figure} \subsection{Codes $\mathcal{C}(n,k,s,k'=k-sr-1)$ VS Piggybacking Codes} Recall that OOP codes \cite{2019AnEfficient} have the lowest repair bandwidth for any single-node failure among the existing piggybacking codes when $r\leq10$, where the sub-packetization of OOP codes is $r-1+\lfloor \sqrt{r-1}\rfloor$ or $r-1+\lceil \sqrt{r-1}\rceil$, and the minimum value of average repair bandwidth ratio of all nodes $\gamma_{OOP}^{all}$ is \begin{eqnarray} &&\gamma_{OOP}^{all}=\frac{1}{k+r}(k\cdot \frac{2\sqrt{r-1}+1}{2\sqrt{r-1}+r}+r\cdot(\frac{\sqrt{r-1}}{r}+\frac{1}{r}+\frac{(r-1)^2-\sqrt{(r-1)^3}}{kr})).\label{eq0} \end{eqnarray} In the following, we show that our codes $\mathcal{C}(n,k,s,k'=k-sr-1)$ have strictly less repair bandwidth than OOP codes and thus have less repair bandwidth than all the existing piggybacking codes when $r\leq10$. \begin{lemma} \label{col1} Let $\gamma_{1}^{all}$ be the average repair bandwidth ratio of all nodes of $\mathcal{C}(n,k,s,k'=k-sr-1)$. When $2\leq r\ll k,2+\sqrt{r-1}\leq s$, we have \begin{eqnarray} &&\underset{k\rightarrow +\infty}{lim}\gamma_{1}^{all}< \underset{k\rightarrow +\infty}{lim}\gamma_{OOP}^{all}.\nonumber \end{eqnarray} \end{lemma} \begin{proof} According to Theorem. \ref{th1}, since $k-k'=h=sr+1$, we have \begin{eqnarray} \underset{k\rightarrow +\infty}{lim}\gamma_{1}^{all}&\leq &\frac{s^2}{(sr+r)(s+1)}+\frac{1}{s+1}\nonumber\\&=&\frac{s^2}{(s+1)^2}\cdot\frac{1}{r}+\frac{1}{s+1}<\frac{1}{r}+\frac{1}{s+1}.\nonumber \end{eqnarray} From Eq. \eqref{eq0}, when $r\geq2$, we have \begin{eqnarray} (\underset{k\rightarrow+\infty}{lim}\gamma_{OOP}^{all})-\frac{1}{r} &=&\frac{2\sqrt{r-1}+1}{2\sqrt{r-1}+r}-\frac{1}{r}\nonumber\\ &=&\frac{2\sqrt{r-1}}{2\sqrt{r-1}+r}\cdot\frac{r-1}{r}\nonumber\\ &\geq&\frac{2\sqrt{r-1}}{2\sqrt{r-1}+r}\cdot\frac{1}{2}\nonumber\\ &=&\frac{\sqrt{r-1}}{2\sqrt{r-1}+r}.\label{eq5} \end{eqnarray} When $s\geq\sqrt{r-1}+2$ and $r\geq2$, we have \begin{eqnarray} s&\geq&\sqrt{r-1}+2\nonumber\\ &=&(\sqrt{r-1}+1)+1\nonumber\\ &=&(\frac{r-1}{\sqrt{r-1}}+1)+1\nonumber\\ &\geq&(\frac{r-1}{\sqrt{r-1}}+1)+\frac{1}{\sqrt{r-1}}\nonumber\\ &=&\frac{r+\sqrt{r-1}}{\sqrt{r-1}}.\label{eq6} \end{eqnarray} From Eq. \eqref{eq6} and Eq. \eqref{eq5}, we have \begin{eqnarray} \frac{1}{s+1}&\leq&(\frac{r+\sqrt{r-1}}{\sqrt{r-1}}+1)^{-1}\nonumber\\ &=&\frac{\sqrt{r-1}}{2\sqrt{r-1}+r}\nonumber\\ &\leq&(\underset{k\rightarrow+\infty}{lim}\gamma_{OOP}^{all})-\frac{1}{r}.\nonumber \end{eqnarray} Therefore, we have \begin{eqnarray} &&\underset{k\rightarrow +\infty}{lim}\gamma_{1}^{all}<\frac{1}{r}+\frac{1}{s+1}\leq \underset{k\rightarrow +\infty}{lim}\gamma_{OOP}^{all}.\nonumber \end{eqnarray} \end{proof} It is easy to see that the storage overhead of $\mathcal{C}(n,k,s,k'=k-sr-1)$ ranges from $\frac{k+r}{k}$ to $\frac{k}{k-r}\cdot\frac{k+r}{k}$. According to Lemma \ref{col1}, our codes $\mathcal{C}(n,k,s,k'=k-sr+1)$ have strictly less repair bandwidth than all the existing piggybacking codes when $r\ll k$, at a cost of slightly more storage overhead. \subsection{The proposed Codes VS LRC} \label{sec:com2} Next, we evaluate the repair bandwidth of our codes $\mathcal{C}(n,k+g,\frac{n}{g},k'=0)$, codes $\mathcal{C}(n,k+g,\frac{n-g}{g},k'=0)$, Azure-LRC \cite{huang2012} and optimal-LRC. According to Theorem \ref{th6}, the repair bandwidth of our $\mathcal{C}(n,k+g,\frac{n}{g},k'=0)$ is strictly less than that of $(n,k,g)$ Azure-LRC \cite{huang2012}. Fig. \ref{fig.9} shows the average repair bandwidth ratio of all nodes for $\mathcal{C}(n,k+g,\frac{n}{g},k'=0)$ and $(n,k,g)$ Azure-LRC when the fault-tolerance is 8 and $n=100$. The results demonstrate that $\mathcal{C}(n,k+g,\frac{n}{g},k'=0)$ have strictly less repair bandwidth than $(n,k,g)$ Azure-LRC. Moreover, our $\mathcal{C}(n,k+g,\frac{n}{g},k'=0)$ have less storage overhead than $(n,k,g)$ Azure-LRC. For example, $\mathcal{C}(n,k+g,\frac{n}{g},k'=0)$ have 44.92\% less repair bandwidth and 6.16\% less storage overhead than $(n,k,g)$ Azure-LRC when $(n,k,g)=(100,73,20)$. \begin{figure}[htpb] \centering \includegraphics[width=0.50\linewidth]{P-L1} \caption{Average repair bandwidth ratio of all nodes for codes $\mathcal{C}(n,k+g,\frac{n}{g},k'=0)$ and $(n,k,g)$ Azure-LRC, where $10\leq g\leq20$, fault-tolerance is 8, and $n=100$.} \label{fig.9} \end{figure} \begin{figure} \centering \includegraphics[width=0.50\linewidth]{lrc} \caption{Average repair bandwidth ratio of all nodes for codes $\mathcal{C}(n,k+g,\frac{n-g}{g},k'=0)$ and $(n,k,g)$ optimal-LRC under the same fault-tolerance and code length $n=100$, where $10\leq g\leq20$.} \label{fig.10} \end{figure} According to Theorem \ref{th7}, the repair bandwidth of our $\mathcal{C}(n,k+g,\frac{n-g}{g},k'=0)$ is strictly less than that of $(n,k,g)$ optimal-LRC. Fig. \ref{fig.10} shows the average repair bandwidth ratio of all nodes for $\mathcal{C}(n,k+g,\frac{n-g}{g},k'=0)$ and $(n,k,g)$ optimal-LRC under the same fault-tolerance and $n=100$. The results demonstrate that $\mathcal{C}(n,k+g,\frac{n-g}{g},k'=0)$ have strictly less repair bandwidth than $(n,k,g)$ optimal-LRC. \section{Conclusion} \label{sec:con} In this paper, we propose two new piggybacking coding designs. We propose one class of piggybacking codes based on the first design that are MDS codes, have lower repair bandwidth than the existing piggybacking codes when $r\geq8$ and the sub-packetization is $\alpha<r$. We also propose another piggybacking codes based on the second design that are non-MDS codes and have better tradeoff between storage overhead and repair bandwidth, compared with Azure-LRC and optimal-LRC for some parameters. One future work is to generalize the piggybacking coding design over codewords of more than two different MDS codes. Another future work is to obtain the condition of $\mathcal{C}(n,k,s,k'=0)$ that can recover any $r+2$ failures. \ifCLASSOPTIONcaptionsoff \newpage \bibliographystyle{IEEEtran} \bibliography{CNC-v1} \end{document}
2205.14541v1
http://arxiv.org/abs/2205.14541v1
$\ell^{\infty}$ Poisson invariance principles from two classical Poisson limit theorems and extension to non-stationary independent sequences
\documentclass{amsart} \usepackage{amssymb} \usepackage{amsfonts} \usepackage{amsmath} \usepackage[legalpaper,bookmarks=true,colorlinks=true,linkcolor=blue,citecolor=blue]{hyperref} \usepackage{graphicx}\setcounter{MaxMatrixCols}{30} \usepackage{fancyhdr} \usepackage{color} \usepackage[mathlines]{lineno} \usepackage{lscape} \usepackage{epsfig} \usepackage{natbib} \usepackage{geometry} \usepackage{tgbonum} \usepackage[]{listings} \fontfamily{qcr}\selectfont \newtheorem{theorem}{Theorem} \theoremstyle{plain} \newtheorem{acknowledgement}{Acknowledgement} \newtheorem{algorithm}{Algorithm} \newtheorem{axiom}{Axiom} \newtheorem{case}{Case} \newtheorem{claim}{Claim} \newtheorem{conclusion}{Conclusion} \newtheorem{condition}{Condition} \newtheorem{conjecture}{Conjecture} \newtheorem{corollary}{Corollary} \newtheorem{criterion}{Criterion} \newtheorem{definition}{Definition} \newtheorem{example}{Example} \newtheorem{exercise}{Exercise} \newtheorem{lemma}{Lemma} \newtheorem{notation}{Notation} \newtheorem{problem}{Problem} \newtheorem{proposition}{Proposition} \newtheorem{remark}{Remark} \newtheorem{solution}{Solution} \newtheorem{summary}{Summary} \numberwithin{equation}{section} \newcommand{\Bin}{\bigskip \noindent} \newcommand{\Bi}{\bigskip} \newcommand{\Ni}{\noindent} \begin{document} \Large \title[$\ell^{\infty}$ Poisson invariance principles from two Poisson limit theorems and extension]{ $\ell^{\infty}$ Poisson invariance principles from two classical Poisson limit theorems and extension to non-stationary independent sequences} \author{Gane Samb Lo} \author{Aladji Babacar Niang}\author{Amadou Ball} \begin{abstract} The simple Lévy Poisson process and scaled forms are explicitly constructed from partial sums of independent and identically distributed random variables and from sums of non-stationary independent random variables. For the latter, the weak limits are scaled Poisson processes. The method proposed here prepares generalizations to dependent data, to associated data in the first place.\\ \noindent $^{\dag}$ Gane Samb Lo.\\ LERSTAD, Gaston Berger University, Saint-Louis, S\'en\'egal (main affiliation).\newline LSTA, Pierre and Marie Curie University, Paris VI, France.\newline AUST - African University of Sciences and Technology, Abuja, Nigeria\\ Imhotep Mathematical Center (IMC), imhotepsciences.org\\ [email protected], [email protected], [email protected]\\ Permanent address : 1178 Evanston Dr NW T3P 0J9,Calgary, Alberta, Canada.\\ \noindent $^{\dag\dag}$ Aladji Babacar Niang\\ LERSTAD, Gaston Berger University, Saint-Louis, S\'en\'egal.\\ Email: [email protected], [email protected]\\ Imhotep Mathematical Center (IMC), imhotepsciences.org\\ \noindent $^{\dag\dag\dag}$ Amadou Ball\\ LERSTAD, Gaston Berger University, Saint-Louis, S\'en\'egal (main affiliation).\newline [email protected]\\ \noindent $^{\dag}$ Gane Samb Lo.\\ LERSTAD, Gaston Berger University, Saint-Louis, S\'en\'egal (main affiliation).\newline LSTA, Pierre and Marie Curie University, Paris VI, France.\newline AUST - African University of Sciences and Technology, Abuja, Nigeria\\ Imhotep Mathematical Center (IMC), imhotepsciences.org\\ [email protected], [email protected], [email protected]\\ Permanent address : 1178 Evanston Dr NW T3P 0J9,Calgary, Alberta, Canada.\\ \noindent \textbf{Keywords}: central limit theorem for arrays of random variables; infinitely divisible laws; simple and scaled Poisson L\'evy processes; weak convergence of stochastic processes in the space of bounded functions on $(0,1)$.\\ \Ni\textbf{AMS 2010 Mathematics Subject Classification}: 60F17; 60E07. \end{abstract} \maketitle \newpage \noindent \textbf{R\'{e}sum\'{e}.} Les processus poisonniens de Lévy simples ou re-échelonnés sont construits explicitely à partir de sommes partielles de de suites de variables aléatoires indépendantes et identiquement distribuées et de suites non-stationaires indépendentes. Pour ces dernières, les lois limites faibles sont des processus de Poisson composés. Cette étude prépare le terrain à des généralisations aux données dépendantes, aux variables associées dans un premier temps.\\ \Ni \textbf{The authors}.\\ \Ni \textbf{Aladji babacar Niang}, M.Sc., is preparing a Ph.D. dissertation under the supervision of the fourth authors at Gaster Berger University, SENEGAL.\\ \Ni \textbf{Gane Samb Lo}, Ph.D. and Doctorate in Sciences, is full professor at Gaston Berger of Saint-Louis (SENEGAL), at the African University of Science and Technology (AUST), NIGERIA. He is affiliated to LSTA, Pierre et Marie Curie University, FRANCE. He is the founder of Imhotep Mathematical Center\\ \Ni \textbf{Ch\'erif Mactar Mamadou Traor\'e}, M.Sc., is preparing a Ph.D. dissertation under the supervision of the second authors at Gaster Berger University, SENEGAL.\\ \Ni \textbf{Amadou Ball}, M.Sc., has prepared a M>Sc degree under the supervision of the second authors at Gaston Berger University, SENEGAL. Most of the work of this paper was done in his dissertation.\\ \section{Introduction} \Ni In this paper, we provide invariance principles, also called functional limit theorems, for Poisson weak limits for two classical results in probability theory but also for their recent generalizations. Let us begin by describing the central limit theorems for which functional laws have to be established. \subsection{Poisson weak limits} \label{sec_01_ss_02} \Ni The approximation of a sequence of binomial probability laws $\left(\mathcal{B}(n,p_{n})\right)_{n\geq 1}$ associated to a sequence of random variables $(Z_n)_{n\geq 1}$ [such that the sequence of probabilities $(p_n)_{n\geq 1}$ converges to zero and $np_n \rightarrow \lambda >0$ as $n\rightarrow +\infty$] to a Poisson law $\mathcal{P}(\lambda)$ is a classical and easy-to-prove result in probability theory. This approximation is very important in some real-life situations, especially in lack of powerful computers. In this simple case, the distribution of each $Z_n$ is a convolution product of $n$ independent and identically distributed (\textit{iid}) Bernoulli laws $\mathcal{B}(p_n)$-associated to the random variables $\left\{(X_{j,n})_{1\leq j \leq n}, \ n\geq 1\right\}$, \textit{i.e.}, $Z_n=X_{1,n}+X_{2,n}+\cdots+X_{n,n}$. A parallel theory also exists for sums of corrected geometric laws. When we depart from the \textit{iid} assumption, the problem may get more and rapidly, even if the independence assumption is still required. The situation becomes more interesting if the random variables $X_{j,n}$ are non-stationary and dependent. Generalizations of these results have been given recently by \cite{apwl-nnLo-ins}. It happens that asymptotic laws of sums of random variables are closely related to invariance principles or functional weak limits which in turn may lead to L\'evy stochastic processes which are so important in many areas of applications, in mathematical finance for example.\\ \Bin In that view, the aforementioned Asymptotic Poisson Weak Limits (\textit{APWL}) should be associated with Invariance principles (\textit{IP}) or Functional Limit Theorems (\textit{FLT}) leading to scaled L\'evy process, both for the classical cases and for the extended non-stationary approach. Among L\'evy processes, two are iconic: Brownian motions and Poisson processes.\\ \Bin So our aim is to establish \textit{FLT}'s for the extended Poisson weak limit laws in \cite{apwl-nnLo-ins} and by the way to re-establish \textit{FLT}'s for the two classical cases, since the statements of the later cases can be hardly found in the most common literature. \\ \Ni To have more details on our objectives, we recall the \textit{APWL} under consideration in Subsection \ref{sec_01_ss_02}. Next we make a quick introduction to \textit{FLT}'s in the space $\ell^{\infty}(0,1)$ in Subsection \ref{sec_01_ss_03} with the main tools to be used there. \\ \subsection{The Asymptotic Poisson Weak Laws} \label{sec_01_ss_02} \Ni We recall the two results for stationary Bernoulli and corrected geometric sequences of random variables. \begin{proposition} \label{piidBinomial} Let $(X_{n})_{n\geq 0}$ be a sequence of random variables in some probability space $(\Omega,\mathcal{A}, \mathbb{P})$ such that:\\ \Ni 1) $\forall n\geq 1 $, $X_n \sim \mathcal{B}(n,p_{n})$, $n\geq 1 $;\\ \Ni 2) $p_{n} \rightarrow 0$ and $np_{n} \rightarrow \lambda \in \mathbb{R}_{+}\setminus\{0\}$ as $n\rightarrow + \infty$.\\ \Ni Then $$ X_{n} \rightsquigarrow \mathcal{P}(\lambda). $$ \end{proposition} \Bin Next, we have: \\ \begin{proposition}\label{niidBinomial} Let $(X_{n})_{n\geq 0}$ be a sequence of random variables in some probability space $(\Omega,\mathcal{A}, \mathbb{P})$ such that: \\ \Ni 1) $\forall n\geq 1 $, $X_n \sim \mathcal{N}\mathcal{B}(n,p_{n})$, $n\geq 1 $; \\ \Ni 2) $(1-p_{n}) \rightarrow 0 $ and $ n(1-p_{n}) \rightarrow \lambda \in \mathbb{R}_{+}\setminus \{0\} $ as $n\rightarrow + \infty$.\\ \Ni Then $$ X_{n}-n \rightsquigarrow \mathcal{P}(\lambda). $$ \end{proposition} \Bin We also recall the two generalizations of the above mentioned results for non-stationary Bernoulli and corrected geometric sequences of random variables (see \cite{apwl-nnLo-ins}). \begin{theorem}\label{pbinomialINS} Let $X=\biggr\{ \{X_{k,n}, \ 1 \leq k \leq k_n=k(n)\}, \ n\geq 1\biggr\}$ be an array by-row-independent Bernoulli random variables, that is:\\ \Ni (1) $\forall n\geq 1$, $\forall 1\leq k \leq k(n)$, $X_{k,n}\sim \mathcal{B}(p_{k,n})$, with $0<p_{k,n}<1$ and:\\ \Ni (2) $\sup_{1\leq k \leq k(n)} p_{k,n} \rightarrow 0$;\\ \Ni (3) $\sum_{1\leq k \leq k(n)} p_{k,n} \rightarrow \lambda \in ]0, \ +\infty[$.\\ \Ni Then we have $$ S_n[X]:=\sum_{k=1}^{k(n)} X_{k,n} \rightsquigarrow \mathcal{P}(\lambda). $$ \end{theorem} \Bin Next: \\ \begin{theorem}\label{nbinomialINS} Let $X=\biggr\{ \{X_{k,n}, \ 1 \leq k \leq k_n=k(n)\}, \ n\geq 1\biggr\}$ be an array by-row-independent corrected Geometric random variables, that is: \\ \Ni (1) $\forall n\geq 1$, $\forall 1\leq k \leq k(n)$, $X_{k,n}\sim \mathcal{G}^{\ast}(p_{k,n})$, with $0<p_{k,n}=1-q_{k,n}<1$ and:\\ \Ni (2) $\sup_{1\leq k \leq k(n)} q_{k,n} \rightarrow 0$;\\ \Ni (3) $\sum_{1\leq k \leq k(n)} q_{k,n} \rightarrow \lambda \in ]0, \ +\infty[$.\\ \Ni Then we have $$ S_n[X]:=\sum_{k=1}^{k(n)} X_{k,n} \rightsquigarrow \mathcal{P}(\lambda). $$ \end{theorem} \Bin Now, let us do a quick introduction on simple functional laws.\\ \subsection{A quick introduction to the \textit{FLT} in $\ell^{\infty}(0,1)$} \label{sec_01_ss_03} \noindent The general principle of invariance principles is the following. Let $X_{1}$, $X_{2}$,$\cdots$ be a sequence of real-valued centered random variables, defined on the same probability space, with finite variances, that is $\sigma_i^2=\mathbb{E}\left\vert X_{i}\right\vert ^{2}<\infty$. For each $n\geq 1$, set $$ s_n^2=\sigma_1^2+\cdots + \sigma_n^2 $$ \bigskip\noindent and \begin{equation*} S_{n}=X_{1}+\cdots+X_{n}. \end{equation*} \bigskip\noindent For $0\leq t\leq 1$ and $n\geq 1$, put \begin{equation*} Y_{n}(t)=\frac{S_{\left[ nt\right] }}{s_n}, \end{equation*} \bigskip\noindent where, for any real $u$, $[u]$ stands for the integer part of $u$, which is the greatest integer less or equal to $u$.\\ \noindent In the \textit{iid} case with variance one, $s_n^2=n$ and the sequence $\{Y_{n}(t), \ 0\leq t \leq 1\}$ is studied in the space $D(0,1)$ of functions of the first kind endowed with the Skorohod metric. It may be proved that \begin{equation} \{Y_{n}(t), \ 0\leq t \leq 1\} \rightsquigarrow \{W(t), \ 0\leq t \leq 1\}, \ as \ n\rightarrow +\infty, \label{FLT} \end{equation} \bigskip\noindent meaning that $\{Y_{n}(t), \ 0\leq t \leq 1\}$ weakly converges to a Wiener process (Brownian Motion) $\{W(t), \ 0\leq t \leq 1\}$ in the sense of the Skorohod topology.\\ \noindent This result is the starting point of a considerable research trying to have extensions of that result, labeled as \textit{functional laws} or \textit{invariance principles}.\\ \noindent \textbf{In each part of this document}, the notation given in this introduction will be used and eventually adapted.\\ \Ni For a long time, the weak law \eqref{FLT} proved in the space $\mathcal{C}(0,1)$ of continuous functions on (0,1) or in the space $\mathcal{D}(0,1)$ of functions defined on (0,1) having left and right limits at each point and having at most a countable number of discontinuity points. The spaces $\mathcal{C}(0,1)$ and $\mathcal{D}(0,1)$, when endowed with the supremum norm and the Skorohod metric are complete and separable spaces (Polish spaces). Most importantly, suprema of stochastic processes in them are measurable. However, handling such weak limits in them, specially in $\mathcal{D}(0,1)$ is very hard. To find a less complicated way, the space $\ell^{\infty}(T)$ of bounded functions equipped with the supremum norm is used where $T$ is some set, and here we use $T=[a,b]$ and frequently $T=[0,\ 1]$. To avoid the measurability problem, exterior and interior integrals with respect to a probability measure, and exterior and interior probabilities are introduced. A very advanced round up of weak convergence in $\ell^{\infty}(0,1)$ is available in \cite{vaart}.\\ \Ni Now, we can state our precise objective, which is to prove that for all four theorems above, we will have \textit{FLT}'s in the form \begin{equation} \{Y_{n}(t), \ 0\leq t \leq 1\} \rightsquigarrow \{N(\lambda a(t)), \ 0\leq t \leq 1\}, \ in \ \ell^{\infty}(0,1) \ as \ n\rightarrow +\infty, \label{FLT2} \end{equation} \Bin where $N(\lambda a(\circ))$ is scaled Poisson process of intensity $\lambda$ and that $a(t)\equiv t$ for the first two theorems.\\ \Ni Finally, before we give our results, let us describe the method of finding \textit{FTL}'s in $\ell^{\infty}(0,1)$.\\ \subsection{General tools for weak laws in $\ell^{\infty}(0,1)$} \Ni The invariance principle results are proved by first establishing the finite-dimensional convergence to a given stochastic process and second, by showing that the sequence of stochastic process is asymptotically tight. We will use the following theorem. \\ \begin{theorem} \label{workingTool_01} Let $a$ and $b$ be two real numbers such that $a<b$ and $T=[a,b]$. For the sequence $(Y_{n})_{n\geq 1} \subset \ell^{\infty} \left([a,b]\right)$ converges to a tight stochastic process $W \in \ell^{\infty}\left([a,b]\right)$, it is sufficient that :\\ \noindent (a) the finite dimensional margins of $(Y_{n})_{n\geq 1}$ converge to those of $W$\\ \noindent and\\ \noindent (b) $(Y_{n})_{n\geq 1}$ is asymptotically tight. \end{theorem} \bigskip \noindent The asymptotically tightness is characterized as follows: Given that each margin $(Y_{n}(t))_{n\geq 1}$, $t \in T$, is asymptotically tight, the stochastic process $(Y_{n})_{n\geq 1}$ is tight if and only if there exists a semi-metric $\rho$ on $T=[a,b]$ such that $(T,\rho)$ is totally bounded and for all $\eta>0$, \begin{equation} \lim_{\delta \downarrow 0} \limsup_{n\rightarrow +\infty} \mathbb{P}^\ast\left(\sup_{\rho(t,s)<\delta} \left|Y_n(t)-Y_n(s)\right|\geq \eta\right)=0. \ \ \label{ip_tightness_charac} \end{equation} \bigskip\noindent (See \cite{vaart} or \cite{ips-wcib-ang}, Chapter 3). \\ \Ni However in most cases where $T$ is a bounded interval of $\mathbb{R}$, we try to have that tightness in the simple case where $\rho(s,t)=|t-s|$. \\ \section{\textit{FLT} related \textit{APWL}} \label{sec_02} \Ni Let us consider the stationary case first. We have the two following theorems.\\ \begin{theorem} \label{FLT-piidBinomial} Let $(X_{n})_{n\geq 0}$ be a sequence of random variables in some probability space $(\Omega,\mathcal{A}, \mathbb{P})$ such that:\\ \Ni 1) $\forall n\geq 1 $, $X_n=X_{1,n} + \cdots + X_{n,n}$, with each $X_{i,n} \sim \mathcal{B}(p_{n})$, \ $1\leq i\leq n$;\\ \Ni 2) $p_{n} \rightarrow 0$ and $np_{n} \rightarrow \lambda \in \mathbb{R}_{+}\setminus\{0\}$ as $n\rightarrow + \infty$.\\ \Ni Let us consider the sequence of stochastic processes $$ \biggr\{ \left\{Y_n(t), \ 0\leq t \leq 1\right\}, \ n\geq 1 \biggr\}=\biggr\{ \left\{X_{[nt]}, \ 0\leq t \leq 1\right\}, \ n\geq 1 \biggr\} $$ \Bin with $X_{[nt]}=0$ for $0\leq nt <1$. Then we have \begin{equation} Y_{n}(\circ) \rightsquigarrow N(\circ) \ in \ \ell^{\infty}(0,1), \label{FLT2SB} \end{equation} \Bin where $N(\circ)$ is a Poisson process of intensity $\lambda$. \end{theorem} \Bin We also have: \\ \begin{theorem}\label{FLT-niidBinomial} Let $(X_{n})_{n\geq 0}$ be a sequence of random variables in some probability space $(\Omega,\mathcal{A}, \mathbb{P})$ such that: \\ \Ni 1) $\forall n\geq 1 $, $X_n=X_{1,n} + \cdots + X_{n,n}$, with each $X_{i,n} \sim \mathcal{G}(p_{n})$, \ $1\leq i\leq n$;\\ \Ni 2) $(1-p_{n}) \rightarrow 0 $ and $ n(1-p_{n}) \rightarrow \lambda \in \mathbb{R}_{+}\setminus \{0\} $ as $n\rightarrow + \infty$.\\ \Ni Let us denote $Z_n=X_n-n$, $n\geq 1$ and consider the sequence of stochastic processes $$ \biggr\{ \left\{Y_n(t), \ 0\leq t \leq 1\right\}, \ n\geq 1 \biggr\}=\biggr\{ \left\{Z_{[nt]}, \ 0\leq t \leq 1\right\}, \ n\geq 1 \biggr\}, $$ \Bin with $Z_{[nt]}=0$ for $0\leq nt <1$. Then we have \begin{equation} Y_{n}(\circ) \rightsquigarrow N(\circ) \ in \ \ell^{\infty}(0,1), \label{FLT2SG} \end{equation} \Bin where $N(\circ)$ is a Poisson process of intensity $\lambda$. \end{theorem} \Bin \textbf{Proofs of theorems}.\\ \Ni \textbf{Proof of Theorem \ref{FLT-piidBinomial}}. We are going to apply Theorem \ref{workingTool_01}. Let us proceed to two steps.\\ \Ni \textbf{Step 1}. Let us begin by the finite distribution convergences. Since, we have for any $0\leq t \leq 1$, $$ Y_n(t)=X_{[nt]}=X_{1,n}+\cdots+ X_{[nt],n} \sim \mathcal{B}([nt],p_n), \ for \ t\geq 1/n, $$ \Bin we have, for $n\geq 1$ big enough, $$ [nt]p_n=(np_n) ([nt]/n) \to \lambda t $$ \Bin and by Proposition \ref{piidBinomial}, $Y_n(t) \rightsquigarrow \mathcal{P}(\lambda t)$. Now let $k\geq 0$ and $0=t_0<t_1<\cdots<t_k$. Let $n\geq 1$ large enough (say $n\geq n_0$) to ensure that all $nt_j\geq 1$, $1\leq j \leq k$ and $[nt_1]<\cdots<[nt_k]$. Let us define $$ Z_n=\left(Y_n(t_1), \ Y_n(t_2)-Y_n(t_1),\ldots,Y_n(t_k)-Y_n(t_{k-1})\right), \ n\geq n_0. $$ \Bin It is clear that:\\ \Ni (1) $Z_n$ has independent components; \\ \Ni (2) For each $1\leq j\leq k$, we have $$ Z_n(t_j)=Y_n(t_j)-Y_n(t_{j-1})=X_{[nt_{j-1}]+1}+\cdots+X_{[nt_{j}]}, $$ \Bin and the number of terms in $Z_n(t_j)$ is $m_n(j)=[nt_{j}]-[nt_{j-1}]$ and satisfies $m_n(j)p_n \rightarrow \lambda(t_j-t_{j-1})$. Hence, by Proposition \ref{piidBinomial}, $$ Z_n(j) \rightsquigarrow \mathcal{P}\left(\lambda (t_j-t_{j-1})\right).\\ $$ \Bin So, $Z_n=(Z_{n}(t_1), \cdots, Z_{n}(t_k))$ has independent components which weakly converge to marginal laws. By the general Slutsky rule, $Z_n$ weakly converges to the product law of those marginals laws, that is $$ Z_n \rightsquigarrow (Z_{t_1}, \cdots, Z_{t_k}), $$ \Bin where the $Z_{t_1}$, $\cdots$, $Z_{t_k}$ are independent and $Z_{t_j} \sim \mathcal{P}(\lambda (t_{j}-t_{j-1}))$. So, by decrementing, we obtain that $(Y_n(t_1),\cdots,Y_n(t_k))$ converges to $(N(t_1,\lambda), \cdots, N(t_k,\lambda))$, where $N(\circ,\lambda)$ is a simple Poisson process. $\square$\\ \Ni \textbf{Step 2}. Let $\delta>0$ and let, for $n\geq 1$ $$ A_n(\delta)= \sup_{(s,t)\in [0,1]^2, \ |s-t|<\delta} |Y_n(s)-Y_n(t)|. $$ \Bin We take $\delta \in ]0,1[$ and so $m(n,\delta):=\left[n(\delta+1/n)\right]$ is small against $n$. Here we will exploit that any $Y_n(t)$ is a sum of constant-signed random variables. We have the following facts.\\ \Ni (a) For any $(s,t)\in [0,1]^2$ such that $|s-t|<\delta$, $|Y_n(s)-Y_n(t)|$ is of the form $X_{j,n}+\cdots+X_{j+h,n}$, where $h\leq m(n,\delta)-1$.\\ \Ni (b) So for each $j \in \{1,\cdots,n-m(n,\delta)+1\}$, fixed, $X_{j,n}+\cdots+X_{j+h,n}$, with $h\leq m(n,\delta)-1$, is bounded by $$ Z_{j,n}:=X_{j,n}+\cdots+X_{j+m(n,\delta)-1},n. $$ \Bin Let us denote $N=n-m(n,\delta)+1$. For $j\leq N$, the quantity $X_{j,n}+\cdots+X_{j+h,n}$, with $h\leq m(n,\delta)-1$, is still bounded by $Z_N:=\sup_{1\leq j \leq N} Z_{j,n}$.\\ \Ni (c) Finally, we have $$ A_n(\delta)\leq \sup_{1\leq j \leq N} Z_{j,n}. $$ \Ni We set $B_h=(\sup_{1\leq j \leq N} Z_{j,n} = Z_{h,n})$, $1\leq h \leq N$. It is clear that $$ \Omega = \bigcup_{1\leq h \leq N} B_h=\sum_{1\leq h \leq N} B^{\prime}_h, $$ \Bin with $B^{\prime}_1=B_1$, $B^{\prime}_2=B_1^cB_2$, $B^{\prime}_h=B_1^c \cdots B_{h-1}^c B_h$ for $h\geq 1$. We get, for any $\eta>0$, \begin{eqnarray*} \mathbb{P}(A_n >\eta)&\leq& \mathbb{P}(\{\sup_{1\leq j \leq N} Z_{j,n}\}>\eta)\\ &=&\sum_{h=1}^{N} \mathbb{P}\left(\left\{ \sup_{1\leq j \leq N} Z_{j,n} >\eta\right\} \cap B^{\prime}_h\right)\\ &=& \sum_{h=1}^{N} \mathbb{P}\left(\left\{ \sup_{1\leq j \leq N} Z_{j,n} >\eta\right\} / B^{\prime}_h\right) \ \mathbb{P}( B^{\prime}_h)\\ &=& \sum_{h=1}^{N} \mathbb{P}(\{Z_{h,n} >\eta\}) \ \mathbb{P}( B^{\prime}_h)\\ &\leq & \frac{1}{\eta} \sum_{h=1}^{N} \mathbb{E}(Z_{h,n}) \ \mathbb{P}( B^{\prime}_h)\\ &=& \frac{m(n,\delta)p_n}{\eta} \sum_{h=1}^{N} \mathbb{P}( B^{\prime}_h)\\ &=& \frac{m(n,\delta)p_n}{\eta}= \frac{\delta (\lambda+o(1))}{\eta}. \end{eqnarray*} \Bin We conclude that, for any $\eta>0$, $$ \lim_{\delta\downarrow 0} \limsup_{n\rightarrow +\infty} \mathbb{P}\biggr(\biggr(\sup_{(s,t)\in [0,1]^2, \ |s-t|<\delta} |Y_n(s)-Y_n(t)|\biggr) > \eta\biggr)=0. $$ \Bin $\square$\\ \Bin \textbf{Proof of Theorem \ref{FLT-niidBinomial}}. That proof closely follows that of Theorem \ref{FLT-piidBinomial}. There is no need to give the details. $\blacksquare$ \section{\textit{FLT} for non-stationary Bernoulli or corrected geometrical laws} \label{sec_03} \Bin To have clues on how to extend the above results to a non-stationary scheme, we may draw some facts from hypotheses of Theorem \ref{FLT-piidBinomial} for example. There, we add $k_n(t)=[nt]$, $0\leq t\leq 1$, $n\geq 1$. For $0\leq s <t$, $$ Y_n(t)-Y_n(s)=X_{k_n(s)+1,n}+\cdots + X_{k_n(t),n} \sim \mathcal{B}(k_n(t)-k_n(s),p_n). $$ \Bin For $a(t)=t$, \ $0\leq t \leq 1$, by setting $\Delta a(s,t)=a(t)-a(s)$, $$ \mathbb{E}(Y_n(t)-Y_n(s))=(k_n(t)-k_n(s))p_n=: \Delta p_n(s,t) \rightarrow \lambda \Delta a(s,t)=\lambda \Delta(s,t), $$ \Bin uniformly in $(s,t) \in [0,1]^2$. This simple analysis suggests generalizing the above \textit{FLT} in the following way: require a sequence of functions $(k_n(t))_{0\leq t \leq 1}$, $n\geq 1$, which are non-decreasing with $k_n(0)=0$ and $k_n(1)=n$ and a uniformly right-continuous and non-decreasing function $a(t)$ of $t\in [0,1]$, with $a(0)=1-a(1)=0$ such that for $0\leq s <t$, $$ \lim_{n\rightarrow +\infty} \sup_{(s,t) \in [0,1]^2} \left|\biggr(p_{k_n(s)+1,n} + \cdots + p_{k_n(t),n}\biggr)- \lambda \Delta a(s,t)\right|=0 $$ \Bin and $$ \lim_{\delta \rightarrow 0} \sup_{(s,t) \in [0,1]^2, |s-t|<\delta} \Delta a(s,t)=0. $$ \Bin Exploiting these ideas leads to the two \textit{FLT}'s for non stationary data.\\ \subsection{Non-stationary sequences of Bernoulli random variables} \label{sec_02_ss_01} \begin{theorem} \label{FLT-pBinomialNS} Let $X=\biggr\{ \{X_{k,n}, \ 1 \leq k \leq k_n=k(n)\}, \ n\geq 1\biggr\}$ (with $k_n\rightarrow +\infty$ as $n\rightarrow +\infty$) be an array by-row-independent Bernoulli random variables, that is: \\ \Ni (1) $\forall n\geq 1$, $\forall 1\leq k \leq k(n)$, $X_{k,n}\sim \mathcal{B}(p_{k,n})$, with $0<p_{k,n}<1$.\\ \Bin Suppose that we have the following other assumptions.\\ \Ni (2) $\sup_{1\leq k \leq k(n)} p_{k,n} \rightarrow 0$;\\ \Ni (3) There exist:\\ \Bin (3a) a sequence of non-decreasing functions $(k_n(t))_{0\leq t \leq 1}$, such that $k_n(0)=0$ and $k_n(1)=k_n$, $n\geq 1$, (satisfying : for any $0\leq s <t$, we have $k_n(s)<k_n(t)$ for $n$ large enough) \\ \Ni and: \\ \Ni (3b) a uniformly right-continuous and non-decreasing function $a(t)$ of $t\in [0,1]$ with $a(0)=1-a(1)=0$, \textit{i.e}, $$ \lim_{\delta \rightarrow 0} \sup_{t \in [0,1[} \Delta a(t,t+\delta)=0; \ \ (F3a) $$ \Bin with $\Delta a(s,t)=a(t)-a(s)$ for $0\leq s <t$; \\ \Ni Moreover, for $$ \Delta p_n(s,t)=p_{k_n(s)+1,n} + \cdots + p_{k_n(t),n}, $$ \Bin we have $$ \limsup_{n\rightarrow +\infty} \left| \Delta p_n(s,t) - \lambda \Delta a(s,t)\right|=0 \ \ (F3b) $$ \Bin and, for $\delta>0$ (small enough) and for $m(n,\delta)=k_n(t+\delta)-k_n(t)$, $$ \limsup_{n\rightarrow +\infty} \sup_{1\leq j \leq k_n-m(n,\delta)+1} \left|p_{j,n}+\cdots+p_{j+m(n,\delta)-1,n} - \lambda \delta\right|=0. \ \ (F3c) $$ \Bin Then the stochastic process $$ \biggr\{ \left\{Y_n(t), \ 0\leq t \leq 1\right\}, \ n\geq 1 \biggr\}=\biggr\{ \left\{X_{1,n}+\cdots+X_{k_n(t),n}, \ 0\leq t \leq 1\right\}, \ n\geq 1 \biggr\} $$ \Bin weakly converges in $\ell^{\infty}(0,1)$ to the compound Poisson process $N(a(\circ),\lambda)$ of intensity $\lambda>0$. \end{theorem} \Bin \textbf{Proof of Theorem of \ref{FLT-pBinomialNS}}. Let us proceed by steps.\\ \Ni \textbf{Step 1}. Finite dimensional convergence. Let $r>1$ and $0=t_0<t_1<\cdots<t_r\leq 1$. Let for $1\leq j \leq r$, $$ Z_{j,n}=Y_n(t_j)-Y_n(t_{j-1})=X_{k_n(t_{j-1})+1,n}+\cdots+X_{k_n(t_{j}),n}. $$ \Bin Since all the assumptions of Theorem \ref{pbinomialINS} hold for each $Z_{j,n}$, we have $$ \forall j \in \{1,\cdots,r\}, \ Z_{j,n} \rightsquigarrow \mathcal{P}(\lambda \Delta a(t_{j-1},t_j)). $$ \Bin So, $Z_n=(Z_{1,n}, \cdots, Z_{r,n})$ has independent components which weakly converge to marginal laws. By the general Slutsky rule, $Z_n$ weakly converges to the product law of those marginals laws, that is, $$ Z_n \rightsquigarrow (Z_1, \cdots, Z_r), $$ \Bin where the $Z_1$, $\cdots$, $Z_r$ are independent and $Z_j \sim \mathcal{P}(\lambda \Delta a(t_{j-1},t_j))$. So, by decrementing, we obtain that $(Y_n(t_1),\cdots,Y_n(t_r))$ converges to the $(N(a(t_1),\lambda), \cdots, N(a(t_r),\lambda))$, where $N(a(\circ),\lambda)$ is a scaled Poisson process by $(a(\circ))$.\\ \Ni \textbf{Step 2}. We closely follow the proof of Theorem \ref{FLT-piidBinomial} and re-use its notations. Let $\delta>0$ and let, for $n\geq 1$ $$ A_n(\delta)= \sup_{(s,t)\in [0,1]^2, \ |s-t|<\delta} |Y_n(s)-Y_n(t)|. $$ \Bin We have:\\ \Ni (a) For any $(s,t)\in [0,1]^2$, such that $|s-t|<\delta$, $|Y_n(s)-Y_n(t)|$ is of the form $X_{j,n}+\cdots+X_{j+h,n}$, where $$ h\leq m(n,\delta)-1, \ \ with \ \ m(n,\delta)=k_n(t+\delta)-k_n(\delta). $$ \Bin (b) So for each $j \in \{1,\cdots, k_n-m(n,\delta)+1\}$, fixed, $X_{j,n}+\cdots+X_{j+h,n}$, with $h\leq m(n,\delta)-1$, is bounded by $$ Z_{j,n}=:X_{j,n}+\cdots+X_{j+m(n,\delta)-1},n. $$ \Bin Let us denote $N=k_n-m(n,\delta)+1$. For $j\leq N$, the quantity $X_{j,n}+\cdots+X_{j+h,n}$, with $h\leq m(n,\delta)-1$, is still bounded by $Z_N=:\sup_{1\leq j \leq N} Z_{j,n}$.\\ \Ni (c) Finally, we have $$ A_n(\delta)\leq \sup_{1\leq j \leq N} Z_{j,n}. $$ \Bin We set $B_h=(\sup_{1\leq j \leq N} Z_j = Z_h)$, $1\leq h \leq N$. Surely, we have $$ \Omega = \bigcup_{1\leq h \leq N} B_h=\sum_{1\leq h \leq N} B^{\prime}_h $$ \Bin with $B^{\prime}_1=B_1$, $B^{\prime}_2=B_1^cB_2$, $B^{\prime}_h=B_1^c \cdots B_{h-1}^c B_h$ for $h\geq 1$. We get, for any $\eta>0$, \begin{eqnarray*} \mathbb{P}(A_n(\delta) >\eta)&\leq& \mathbb{P}(\{\sup_{1\leq j \leq N} Z_{j,n}\}>\eta)\\ &=&\sum_{h=1}^{N} \mathbb{P}\left(\left\{ \sup_{1\leq j \leq N} Z_{j,n} >\eta\right\} \cap B^{\prime}_h\right)\\ &=& \sum_{h=1}^{N} \mathbb{P}\left(\left\{ \sup_{1\leq j \leq N} Z_{j,n} >\eta\right\} / B^{\prime}_h\right) \ \mathbb{P}( B^{\prime}_h)\\ &=& \sum_{h=1}^{N} \mathbb{P}(\{Z_{h,n} >\eta\}) \ \mathbb{P}( B^{\prime}_h)\\ &\leq & \frac{1}{\eta} \sum_{h=1}^{N} \mathbb{E}(Z_{h,n}) \ \mathbb{P}( B^{\prime}_h)\\ &=& \frac{1}{\eta} \sum_{h=1}^{N} \biggr(p_{h,n}+\cdots+p_{h+m(n,\delta)-1,n}\biggr) \ \mathbb{P}( B^{\prime}_h) \ \ (L26)\\ &=& \frac{\lambda \Delta(t,t+\delta)}{\eta} \sum_{h=1}^{N} \ \mathbb{P}( B^{\prime}_h)\\ &=& \frac{\lambda \Delta(t,t+\delta)}{\eta}, \end{eqnarray*} \Bin where we applied Assumption (F3c) in Line (L26). By applying Assumption (F3a), we get, for any $\eta>0$, $$ \lim_{\delta\downarrow 0} \limsup_{n\rightarrow +\infty} \mathbb{P}\biggr(\biggr(\sup_{(s,t)\in [0,1]^2, \ |s-t|<\delta} |Y_n(s)-Y_n(t)|\biggr) > \eta\biggr)=0. $$ \Bin $\square$\\ \subsection{Non-stationary sequences of Corrected geometric variables} \label{sec_02_ss_02} \Ni The following \textit{FLT} is also valid for sums of corrected geometric random variables. The proof will be omitted because it is very similar of the proof of Theorem \ref{FLT-pBinomialNS}. \\ \begin{theorem} \label{FLT-nBinomialNS} Let $X=\biggr\{ \{X_{k,n}, \ 1 \leq k \leq k_n=k(n)\}, \ n\geq 1\biggr\}$ (with $k_n\rightarrow +\infty$ as $n\rightarrow +\infty$) be an array by-row-independent corrected geometric random variables, that is: \\ \Ni (1) $\forall n\geq 1$, $\forall 1\leq k \leq k(n)$, $X_{k,n}\sim \mathcal{G}^{\ast}(p_{k,n})$, with $0<p_{k,n}<1$.\\ \Ni Suppose that we have the following other assumptions.\\ \Ni (2) $\sup_{1\leq k \leq k(n)} q_{k,n} \rightarrow 0$.\\ \Ni (3) There exist:\\ \Bin (3a) a sequence of non-decreasing functions $(k_n(t))_{0\leq t \leq 1}$ such that $k_n(0)=0$ and $k_n(1)=k_n$, $n\geq 1$ (satisfying : for any $0\leq s <t$, we have $k_n(s)<k_n(t)$ for $n$ large enough) \\ \Ni and: \\ \Ni (3b) a uniformly right-continuous and non-decreasing functions $a(t)$ of $t\in [0,1]$ with $a(0)=1-a(1)=0$, \textit{i.e.}, $$ \lim_{\delta \rightarrow 0} \sup_{t \in [0,1]} \Delta a(t,t+\delta)=0 \ \ (G3a) $$ \Bin with $\Delta a(s,t)=a(t)-a(s)$ for $0\leq s <t$. Moreover, by setting $$ \Delta q_n(s,t)=q_{k_n(s)+1,n} + \cdots + q_{k_n(t),n}, $$ \Bin we have $$ \limsup_{n\rightarrow +\infty} \left| \Delta q_n(s,t) - \lambda \Delta a(s,t)\right|=0 \ \ (G3b) $$ \Bin and, for $\delta>0$ (small enough) and for $m(n,\delta)=k_n(t+\delta)-k_n(t)$, $$ \limsup_{n\rightarrow +\infty} \sup_{1\leq j \leq k_n-m(n,\delta)+1} \left|q_{j,n}+\cdots+q_{j+m(n,\delta)-1,n} - \lambda \delta\right|=0. \ \ (G3c) $$ \Bin Then the sequence of stochastic processes $$ \biggr\{ \left\{Y_n(t), \ 0\leq t \leq 1\right\}, \ n\geq 1 \biggr\}=\biggr\{ \left\{X_{1,n}+\cdots+X_{k_n(t),n}, \ 0\leq t \leq 1\right\}, \ n\geq 1 \biggr\} $$ \Bin weakly converges in $\ell^{\infty}(0,1)$ to the compound Poisson process $N(a(\circ),\lambda)$ of intensity $\lambda>0$. \\ \end{theorem} \subsection{Examples} \label{sec-example} \Ni Let us provide some examples for which the assumptions of Theorems \ref{FLT-pBinomialNS} and \ref{FLT-nBinomialNS}, specifically (F3b), (F3c), (G3b) and (G3c) hold. Actually, we illustrate conditions of the first theorem only.\\ \Ni \textbf{1. Stationary case}. We already said that (F3b) and (F3c) hold in the stationary case with $k_n=n$, $k_n(t)=[tk_n]$, $p_{k,n}=p_n$ and $a(t)=t$. This still hold even if $k_n$ is an arbitrary sequence converging to $+\infty$.\\ \Ni \textbf{2. Checking (F3c)}. It seems that (F3c) is more difficult to get. The example used to realize (F3b) in the next example seems not to make hold (F3c).\\ \Ni Actually, (F3c) broadly means the partial sums $p_{j,n}+\cdots+p_{j+\ell,n}$, for $\ell>0$, are stationary, \textit{i.e.}, nearly stationary. They would be exactly stationary if they were all equal to $p_{1,n}+\cdots+p_{1+\ell,n}$. In other words, (F3c) means that the partial sums $p_{j,n}+\cdots+p_{j+\ell,n}$ asymptotically behave like $p_{1,n}+\cdots+p_{1+\ell,n}$, independently of $j$, as $n\rightarrow +\infty$, meaning that they are asymptotically stationary.\\ \Ni Let us give some examples of such sequences.\\ \Ni \textbf{3. Non trivial example for (F3b) holds}. Let us consider an array $\{\{X_{k,n, \ 1\leq k\leq \ell(n)}\}, \ n\geq 1\}$ with $\ell(n)\rightarrow +\infty$. Let $\varepsilon \in ]0,1[$. Let $(k_n)_{n\geq 1}$ be a sequence converging to $+\infty$ such that $[k_n^{1+\varepsilon}]\leq \ell(n)$. Let us define $$ k_n(t)=\left[k_n^{\varepsilon+t}\right], \ n\geq 1, \ t\in [0,1], $$ \Bin for $\gamma$ standing for the Euler number, $$ b_n=\sum_{1\leq k \leq k_n} k^{-1}=\log k_n + \gamma + o(1). $$ \Bin Now, suppose that for any $1<k\leq K_n=\left[ k_n^{1+\varepsilon}\right]$, $$ p_{k,n}=\frac{\lambda_n}{k b_n}, $$ \Bin where $\lambda_n\leq k_n^2 b_n$ and $\lambda_n \rightarrow \lambda$. For $0\leq s <t\leq 1$, for $n\geq 1$, we have \begin{equation} \Delta p_n(s,t)=\frac{\lambda_n}{b_n} \left\{\frac{1}{k_n(s)+1}+\cdots+\frac{1}{k_n(t)}\right\} =\frac{\lambda_n (\log k_n)}{b_n} \frac{\log k_n(t) - \log k_n(s) +o(1)}{\log k_n}. \label{eqExamp1} \end{equation} \Bin By setting $a_n=k_n^{\varepsilon}-1$ (for $n$ large enough), by direct computations, we may check \begin{equation} \sup_{(s,t)\in ]0,1]^2} \left|\frac{\lambda_n\left(\log k_n(t) - \log k_n(s)\right)}{\log k_n}-\lambda(t-s) \right|\leq \log(1+a_n^{-1}), \ n\geq 1. \label{eqExamp2} \end{equation} \Bin The combination of Formulas \eqref{eqExamp1} and \eqref{eqExamp2} leads to (F3b).\\ \Ni \textbf{4. Non trivial examples for (F3c) holds}. \\ \Ni (a) Let $(\varepsilon_n)_{n\geq 1}$ a sequence of real numbers converging to zero and bounded by $\lambda/2>0$. Let us set, for $n\geq 1$, $k_n(t)=[k_nt]$, $k_n<n$ and $$ p_{k,n}=\frac{\lambda + (-1)^k \varepsilon_n}{n}, \ 1\leq k \leq k_n. $$ \Bin For any $j \in [1,k_n]$, for any $\ell \in [1, \ n-k_n]$, $$ p_{j,n}+\cdots+p_{j+\ell,n} \in \left\{ \frac{\lambda (\ell+1) - \varepsilon_{n}}{n}, \frac{\lambda (\ell+1)}{n}, \frac{\lambda (\ell+1) + \varepsilon_{n}}{n}\right\} $$ \Bin and hence the partial sums $p_{j+1,n}+\cdots+p_{j+\ell,n}$ are stationary.\\ \Ni (b) Let us take $k_n=n$ and $$ b_n=\left(1 + \frac{1}{k_n+1}\right)^{-1} + \left(1 + \frac{1}{k_n+2}\right)^{-1}+\cdots + \left(1 + \frac{1}{k_n+k_n}\right)^{-1} $$ \Bin and $$ p_{k,n}=\frac{\lambda_n}{b_n\left(1 + \frac{1}{k+k_n}\right)}, \ 1\leq k \leq k_n, $$ \Bin where $\lambda_n<b_n$ and $\lambda_n\rightarrow \lambda$. \\ \Ni We will show later that $b_n=k_n(1+o(1))$. We have $$ p_{j,n}+\cdots+p_{j+m(n)-1,n}= \frac{\lambda_n}{b_n} \times \left\{\frac{k_n+j}{(k_n+1)+j} + \cdots + \frac{k_n+j+m(n)-1}{k_n+j+m(n)}\right\}. $$ \Bin We denote $\Delta p_n(j)=p_{j,n}+\cdots+p_{j+m(n)-1}$. Let us bound $$ A_{j,n}=\frac{k_n+j}{(k_n+1)+j} + \cdots + \frac{k_n+j+m(n)-1}{k_n+j+m(n)}. $$ \Bin The function $x \mapsto h(x)=(k_n+x)/(k_n+1+x)$ is non-decreasing and concave and so, the comparison of the integral $\int_{j}^{j+m(n)-1} h(x) \ dx$ and the series is as follows: $$ A_{j,n} - \frac{k_n+j+m(n)-1}{k_n+j+m(n)} \leq \int_{j}^{j+m(n)-1} \frac{k_n+x}{k_n+1+x} \ dx\leq A_{j,n} - \frac{k_n+j}{k_n+j+1}. $$ \Bin But $$ \int_{j}^{j+m(n)-1} \frac{k_n+1+x}{k_n+x} \ dx=\int_{j}^{j+m(n)-1} \left(1 + \frac{1}{k_n+x}\right) \ dx $$ \Bin and by using the primitive $$ \left(x + \log (k_n+x)\right) \ of \ \left(1 + \frac{1}{k_n+x}\right), $$ \Bin we get \begin{eqnarray} A_{j,n} - \frac{k_n+j+m(n)-1}{k_n+j+m(n)} &\leq& (m(n)-1) + \log \frac{k_n+j+m(n)-1}{k_n+j} \notag\textbf{}\\ &\leq& A_{j,n} - \frac{k_n+j}{k_n+j+1}. \label{bound10} \end{eqnarray} \Bin But all the terms, except $A_{j,n}$, when divided by $b_n \sim k_n$, go to zero uniformly in $j$. Let show this for one them, the middle term for example, $$ \frac{1}{k_n} \log \frac{k_n-1}{2k_n} \leq B_{j,n}=\frac{1}{k_n} \log \frac{k_n+j+m(n)-1}{k_n+j} \leq \frac{1}{k_n} \log \frac{3k_n-1}{k_n} $$ \Bin which shows that $\sup_{1\leq j \leq k_n} B_{j,n} \rightarrow 0$. By using the same remarks, ideas and methods, we have $$ b_n-\frac{2 k_n}{2k_n+1} \leq \int_{1}^{k_n} \left(1 - \frac{1}{k_n+x+1}\right) \ dx \leq b_n-\frac{k_n+1}{k_n+2}. $$ \Bin The integral is $(k_n-1) - \log( (2k_n+1)/(k_n+2))$ and so $b_n=k_n (1+o(1))$. Now, Formula \eqref{bound10} becomes $$ \frac{A_{j,n}}{b_n}+o(1) \leq \frac{m(n)}{k_n} (1+o(1)) + o(1)\leq \frac{A_{j,n}}{b_n}+o(1), $$ \Bin where all the small $o's$ are uniform in $j$. We already know that $$ \left|\frac{m(n)}{k_n}-(t-s)\right|\leq \frac{1}{k_n}. $$ \Bin The combination of all these facts gives $$ \sup_{1\leq j \leq k_n} \left|\Delta p_n(j) - \lambda (t-s)\right| \rightarrow 0 $$ \Bin and so Condition (F3c) holds. $\blacksquare$ \section{Conclusion} \Ni We saw how a very simple result in probability that can be proved by moment generating function can become sophisticated for sums of independent but non-stationary random variables. In this paper, we treated the question in the general frame of the weak laws in $\ell^{\infty}(0,1)$ and learned how to deal with it in a general frame to have new results. The most important thing is the construction of the frame that will allow extensions for non-stationary and dependent data, for associated data in the first place.\\ \Ni \textbf{Acknowledgment}. The authors wish to thank the members of Imhotep Mathematical Center (IMC) for their comments. \begin{thebibliography}{99} \bibitem[Gut (2005)]{gutt} Gut, A. (2005). \textit{Probability : A Graduate Course}. Springer Science+Business Media, Inc. ISBN 0-387-22833-0. \bibitem[Lo\`eve (1977)]{loeve} Lo\`{e}ve, M.(1977). \textit{Probability Theory I}. Springer-Verlag. New-York. \bibitem[Feller (1968a)]{feller1} Feller W.(1968) \textit{An introduction to Probability Theory and its Applications. Volume I}. Third Editions. John Wiley \& Sons Inc., New-York. \bibitem[Feller (1968b)]{feller2} Feller W.(1968) \textit{An introduction to Probability Theory and its Applications. Volume II}. Third Editions. John Wiley \& Sons Inc., New-York. \bibitem[Lo (2018)]{ips-wcia-ang} Lo, G.S.(2018). Weak Convergence (IA). Sequences of random vectors. SPAS Books Series.(2016). Doi : 10.16929/sbs/2016.0001. \bibitem[Lo (2016)]{gslo-noteCW2016} Lo, G.S.(2016). A remark on Asymptotic Tightness in the $\ell^{\infty}([a,b]$ space. \textit{Arxiv} : 1405.6342 \bibitem[van der Vaart and Wellner (1996)]{vaart} van der Vaart A. W. and Wellner J. A.(1996). \textit{Weak Convergence and Empirical Processes With Applications to Statistics}. Springer, New-York. \bibitem[Billingsley (1968)]{billingsley} Billingsley, P.(1968). \textit{Convergence of Probability measures}. John Wiley, New-York. \bibitem[Lo \textit{et al.} (2018)]{ips-wcib-ang} Lo G.S., Kpanzou T.A.(2018), Seck C.T.(2018) \textit{Weak Convergence (IB) - General Theory and Onvergence of Bounded Path Stochastic Processes}. SPAS Book Series, Calgary, Alberta, Saint-Louis (Senegal). \bibitem[Lo et al. (2021)]{apwl-nnLo-ins} Lo G.S. Niang A.B., Ngom N. and Gning G.(2021). Extension of two classical Poisson limit laws to non-stationary independent sequences. Preprint. \end{thebibliography} \end{document}
2205.14491v1
http://arxiv.org/abs/2205.14491v1
Distribution symmetry of toral eigenfunctions
\documentclass[12pt]{amsart} \usepackage{latexsym, amsmath,amssymb} \usepackage{amsthm} \usepackage{hyperref} \usepackage{xcolor} \hypersetup{ colorlinks, linkcolor={red!50!black}, citecolor={blue!50!black}, urlcolor={blue!80!black} } \usepackage{graphicx} \newtheorem*{theorem*}{Theorem} \newcommand{\cal}{\mathcal} \providecommand{\abs}[1]{\lvert#1\rvert} \providecommand{\norm}[1]{\lVert#1\rVert} \numberwithin{equation}{section} \newcommand{\half}{{\textstyle \frac 12}} \newcommand{\acal}{\mathcal{A}} \newcommand{\bcal}{\mathcal{B}} \newcommand{\ccal}{\mathcal{C}} \newcommand{\dcal}{\mathcal{D}} \newcommand{\ecal}{\mathcal{E}} \newcommand{\fcal}{\mathcal{F}} \newcommand{\gcal}{\mathcal{G}} \newcommand{\hcal}{\mathcal{H}} \newcommand{\ical}{\mathcal{I}} \newcommand{\jcal}{\mathcal{J}} \newcommand{\kcal}{\mathcal{K}} \newcommand{\lcal}{\mathcal{L}} \newcommand{\mcal}{\mathcal{M}} \newcommand{\ncal}{\mathcal{N}} \newcommand{\ocal}{\mathcal{O}} \newcommand{\pcal}{\mathcal{P}} \newcommand{\qcal}{\mathcal{Q}} \newcommand{\rcal}{\mathcal{R}} \newcommand{\scal}{\mathcal{S}} \newcommand{\tcal}{\mathcal{T}} \newcommand{\ucal}{\mathcal{U}} \newcommand{\vcal}{\mathcal{V}} \newcommand{\Q}{{\mathbb Q}} \newtheorem{theo}{{\sc Theorem}}[section] \newcommand{\wcal}{\mathcal{W}} \newcommand{\zcal}{\mathcal{Z}} \newtheorem{defin}{{\sc Definition}} \newtheorem{mainprop}{{\sc Proposition}} \newtheorem{lem}[theo]{{\sc Lemma}} \newtheorem{conj}{\sc Conjecture} \newtheorem{mainconj}{\sc Conjecture} \newenvironment{example}{\medskip\noindent{\it Example:\/} }{\medskip} \newenvironment{rem}{\medskip\noindent{\it Remark:\/} }{\medskip} \newtheorem{defn}[theo]{{\sc Definition}} \newenvironment{claim}{\medskip\noindent{\it Claim:\/} }{\medskip} \def\Xint#1{\mathchoice {\XXint\displaystyle\textstyle{#1}}{\XXint\textstyle\scriptstyle{#1}}{\XXint\scriptstyle\scriptscriptstyle{#1}}{\XXint\scriptscriptstyle\scriptscriptstyle{#1}}\!\int} \def\XXint#1#2#3{{\setbox0=\hbox{$#1{#2#3}{% \int}$ } \vcenter{\hbox{$#2#3$ }}\kern-.6\wd0}} \def\ddashint{\Xint=} \def\dashint{\Xint-} \setlength\evensidemargin{.5in} \setlength\textheight{44cc} \setlength\textwidth{30cc} \setlength\topmargin{0in} \setlength\parskip{5pt} \renewcommand{\epsilon}{\varepsilon} \newtheorem{theorem}{Theorem} \renewcommand{\thetheorem}{\arabic{section}.\arabic{theorem}} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corr}[theorem]{Corollary} \newtheorem{prop}[theorem]{Prop} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{deff}[theorem]{Definition} \newtheorem{remark}[theorem]{Remark} \newtheorem{conjecture}[theorem]{Conjecture} \newcommand{\bth}{\begin{theorem}} \newcommand{\ble}{\begin{lemma}} \newcommand{\bcor}{\begin{corr}} \newcommand{\ltrt}{{L^2({\Bbb R}^3)}} \newcommand{\ltoo}{{L^2({\mathbb{R}}^3\backslash\mathcal{K})}} \newcommand{\bdeff}{\begin{deff}} \newcommand{\lirt}{{L^\infty({\Bbb R}^3)}} \newcommand{\lioo}{{L^{\infty}({\mathbb{R}}^3 \backslash \mathcal{K})}} \newcommand{\bprop}{\begin{proposition}} \newcommand{\ele}{\end{lemma}} \newcommand{\ecor}{\end{corr}} \newcommand{\edeff}{\end{deff}} \numberwithin{theorem}{section} \newcommand{\ii}{i} \newcommand{\eprop}{\end{proposition}} \newcommand{\rlnu}{{R_{\lambda}^{\nu}}} \newcommand{\trlnu}{{\tilde{R}_{\lambda}^{\nu}}} \newcommand{\tlnu}{{T_{\lambda}^{\nu}}} \newcommand{\ttlnu}{{\tilde{T}_{\lambda}^{\nu}}} \newcommand{\slnu}{{S_{\lambda}^{\nu}}} \newcommand{\slnut}{{}^t\!\slnu} \newcommand{\cd}{\, \cdot\, } \newcommand{\mlnu}{{m_{\lambda}^{\nu}}} \newcommand{\psilnu}{\psi_{\lambda}^{\nu}} \newcommand{\xilnu}{\xi_{\lambda}^{\nu}} \newcommand{\nlnu}{N_{\lambda}^{\nu}} \newcommand{\nl}{N_{\lambda}} \newcommand{\Rn}{{\mathbb R}^n} \newcommand{\jump}{{}} \newcommand{\la}{\lambda} \newcommand{\st}{{\Bbb R}^{1+3}_+} \newcommand{\eps}{\varepsilon} \newcommand{\e}{\varepsilon} \renewcommand{\l}{\lambda} \newcommand{\loc}{{\text{\rm loc}}} \newcommand{\comp}{{\text{\rm comp}}} \newcommand{\Coi}{C^\infty_0} \newcommand{\supp}{\text{supp }} \renewcommand{\Pi}{\varPi} \renewcommand{\Re}{\rm{Re} \,} \renewcommand{\Im}{\rm{Im} \,} \renewcommand{\epsilon}{\varepsilon} \newcommand{\sgn}{{\text {sgn}}} \newcommand{\Gmid}{\Gamma_{\text{mid}}} \newcommand{\Rt}{{\Bbb R}^3} \newcommand{\Mdel}{{{\cal M}}^\alpha} \newcommand{\dist}{{{\rm dist}}} \newcommand{\Adel}{{{\cal A}}_\delta} \newcommand{\Kob}{{\cal K}} \newcommand{\Dia}{\overline{\Bbb E}^{1+3}} \newcommand{\Diap}{\overline{\Bbb E}^{1+3}_+} \newcommand{\Cyl}{{\Bbb E}^{1+3}} \newcommand{\Cylp}{{\Bbb E}^{1+3}_+} \newcommand{\Penrose}{{\cal P}} \newcommand{\Rplus}{{\Bbb R}_+} \newcommand{\parital}{\partial} \newcommand{\tidle}{\tilde} \newcommand{\grad}{\text{grad}\,} \newcommand{\ob}{{\mathcal K}} \newcommand{\R}{{\mathbb R}} \newcommand{\Z}{{\mathbb Z}} \newcommand{\subheading}[1]{{\bf #1}} \newcommand{\1}{{\rm 1\hspace*{-0.4ex}\rule{0.1ex}{1.52ex}\hspace*{0.2ex}}} \begin{document} \title[Distribution symmetry of toral eigenfunctions] {Distribution symmetry\\ of toral eigenfunctions} \author[A. D. Mart\'inez]{\'Angel D. Mart\'inez} \address{Albacete, Fields Ontario Postdoctoral Fellow, University of Toronto Mississauga, Canad\'a} \email{[email protected]} \author[F. Torres de Lizaur]{Francisco Torres de Lizaur} \address{Sevilla, Fields Ontario Postdoctoral Fellow, University of Toronto, Canad\'a} \email{[email protected]} \maketitle \begin{abstract} In this paper we study a number of conjectures on the behavior of the value distribution of eigenfunctions. On the two dimensional torus we observe that the symmetry conjecture holds in the strongest possible sense. On the other hand we provide a counterexample for higher dimensional tori, which relies on a computer assisted argument. Moreover we prove a theorem on the distribution symmetry of a certain class of trigonometric polynomials that might be of independent interest. \end{abstract} \section{Introduction} The importance of Laplace eigenfunctions might be underpinned by the fact that they are analogous to the trigonometrical polynomials in the classical harmonic analysis. How far the analogy can be drawn lies at the heart of many conjectures on their behavior. For instance, Yau conjectured that the nodal set of Laplace eigenfunctions, $-\Delta_g\psi_{\lambda}=\lambda\psi_{\lambda}$, on a smooth Riemannian manifold $M$ of dimension $n$, has $(n-1)$-dimensional Hausdorff measure comparable to $\lambda^{1/2}$. In the case of eigenfunctions on the torus this is easily seen to be the case as an application of the fundamental theorem of calculus and an elementary geometric argument. For real-analytic manifolds, the proof of Yau's conjectue was given by Donnelly and Fefferman in \cite{DF}. Recently, Logunov proved the lower bound in the smooth category and improved the exponential upper bound of Hardt and Simon to a polynomial bound (cf. \cite{Lo, Lo2}). It is beyond the scope of this paper to provide a comprehensive introduction to the subject and we refer the reader to the extensive literature (see e.g \cite{Z} and references therein). In view of the predictions of the random wave conjectures of quantum chaos \cite{B, JNT, KRud}, it is interesting to investigate the relationship between positive and negative parts of real eigenfuntions on Riemannian manifolds. In the aforementioned seminal work, Donelly and Fefferman also proved \begin{theorem*}[Corollary 7.10 in \cite{DF}] There exist a constant $C>0$ such that \[\frac{1}{C}\leq \frac{\operatorname{vol}(\{x\in M:\psi_{\lambda}(x)>0\})}{\operatorname{vol}(\{x\in M:\psi_{\lambda}(x)<0\})}\leq C.\] \end{theorem*} The constant depends on the manifold but not on the eigenvalue. In the case of the sphere, this quasi-symmetry result was conjectured in \cite{ArG}. In the case of surfaces, a different proof was found by Nadirashvili in \cite{N}; local versions have appeared for instance in the work of Nazarov, Polterovich and Sodin \cite{NPS} . For smooth Riemannian manifolds the analogous quasi-symmetry statement remains open (even in two dimensions). In connection with this, it has even been conjectured that \begin{conjecture}[Symmetry]\label{conj1} The limit \[ \frac{\operatorname{vol}(\{x\in M:\psi_{\lambda}(x)>0\})}{\operatorname{vol}(\{x\in M:\psi_{\lambda}(x)<0\})}\rightarrow 1\] holds as $\lambda$ grows to infinity. \end{conjecture} An heuristic justification of the above, in the case of metrics of negative curvature, seems to be provided by Berry's conjecture, but Conjecture \ref{conj1} is actually believed to hold for any manifold \cite{LoSB}. In this paper we will disprove Conjecture \ref{conj1} in the case of the $n$-dimensional tori with $n\geq 3$, although it will be proved to hold in $\mathbb{T}^2$ (cf. Theorem \ref{cx} and Theorem \ref{sign} below respectively). It is not clear to the authors whether other two dimensional manifolds will satisfy the conjecture. This is a particularly interesting geometry to study the behaviour of eigenfunctions with subtle connections to number theory (cf. \cite{J, RL, RW}). In a forthcoming work the authors will prove that, for the two dimensional sphere, the conjecture holds for a basis of eigenfunctions (this is a weaker statement that trivially holds for $n$-dimensional tori regardless of the dimension). Another well-known result explores the ratio of global extrema \begin{theorem*}[Nadirashvili, \cite{N}] There exist a constant $C>0$ such that \[\frac{1}{C}\leq \frac{\|\psi_{\lambda}\chi_{\{\psi_{\lambda}>0\}}\|_{\infty}}{\|\psi_{\lambda}\chi_{\{\psi_{\lambda}<0\}}\|_{\infty}} \leq C.\] \end{theorem*} In general the constant cannot be taken to be one (which would be optimal) as shown by Jakobson and Nadirashvili using zonal spherical harmonics in \cite{JN}. This theorem was extended by Jakobson and Nadirashvili for general $L^p$ norms, $1\leq p<\infty$ (loc. cit.). The exact value of the constant for the sphere $\mathbb{S}^n$ was considered in a work of Armitage in \cite{Ar}. In \cite{JNT} Jakobson, Nadirashvili and Toth claim: \textit{it is unclear whether } \begin{conjecture}\label{conj2} \textit{$\|\psi_{\lambda}\chi_{\{\psi_{\lambda}>0\}}\|_{p}/\|\psi_{\lambda}\chi_{\{\psi_{\lambda}<0\}}\|_{p} \rightarrow 1$ as $\lambda\rightarrow \infty$ for $1<p<\infty$ on a given manifold.} \end{conjecture} One of the by-products of the results in this paper will be to answer this affirmatively for a wide class of two dimensional tori. It is quite likely that our counterexamples (Theorem \ref{sign}) to Conjecture \ref{conj1} will also disprove this in the case of higher dimensional tori (for instance, it is easy to observe that they do disprove the endpoint $p=\infty$), but we will not pursue that question in this paper. \section{Statements of results} The first observation of this paper will be the following \begin{theorem}[Sign equidistribution]\label{sign} Given a non constant real eigenfunction $\psi$ of the flat two dimensional torus, the following identity holds \[\operatorname{vol}(\{x\in\mathbb{T}^2:\psi(x)>0\})=\operatorname{vol}(\{x\in\mathbb{T}^2:\psi(x)<0\}).\] \end{theorem} This is stronger than Conjecture \ref{conj1} and provides the first example for which it holds (to the best of the authors' knowledge). Let us observe in passing though, that one can show the symmetry conjecture holds for a canonical basis of eigenfunctions for the Dirichlet problem on a ball using Stoke's aproximations of the zeroes of Bessel functions (cf. \cite{Watson}, p. 505). This might provide an idea of the intrinsic analytic difficulties even in particular well-known cases. The following also holds: \begin{theorem}[Global extrema of eigenfunctions]\label{maxmin} Given a non constant real eigenfunction $\psi$ of the flat two dimensional torus, the following identity holds \[\max_{x\in\mathbb{T}^2}\psi(x)=-\min_{x\in\mathbb{T}^2}\psi(x).\] Or, stated differently, the absolute values of the maxima and minima coincide. \end{theorem} This fact is in clear contrast with the result of Armitage on the sphere \cite{Ar} where equality is shown to fail. Both observations suggest some symmetry of the distribution function and will be consequences of the following more general \begin{theorem}[Distributional symmetry]\label{main} Given a non constant real eigenfunction $\psi$ of the flat two dimensional torus, the distribution function \[ d\lambda(s)=d\operatorname{vol}(\{x\in\mathbb{T}^2:\psi(x)>s\}) \] is symmetric around $s=0$. \end{theorem} This might be compared with the recent work of Klartag \cite{Klartag}. \begin{corr}[$L^p$ norm symmetry]\label{lp} Under the same hypothesis, the following holds \[\int_{\{\psi>0\}}|\psi(x)|^pdx=\int_{\{\psi<0\}}|\psi(x)|^pdx\] \end{corr} This answers affirmatively the question raised by Jakobson, Nadirashvili and Toth (i.e. Conjecture \ref{conj2} above) in $\mathbb{T}^2$. Notice that all these results are neither probabilistic nor semiclassical, which is in contrast with part of the recent literature. At the same time, these observations together with the general conjectures stated above suggest their truth in higher dimensions as well. However, this is not the case (cf. Theorem \ref{cx}). We will next describe a distribution symmetry result in all dimensions, but only for a special class of trigonometric polynomials which does not include all eigenfunctions; this will be later complemented with counterexamples exhibiting its sharpness. To state it, we introduce the following class of trigonometric polynomials: \[f(x)=\sum_{i=1}^n(a_n\sin(2\pi \nu_i\cdot x)+b_n\cos(2\pi \nu_i\cdot x))\] where $x\in\mathbb{T}^n$, $a_i,b_i\in\mathbb{R}$ and the $\nu_i\in\mathbb{Z}^n$ are linearly independent vectors. We shall denote this class of functions by $\mathcal{S}(\mathbb{T}^n)$. The main result we obtain for this class is \begin{theorem}[Distributional symmetry of functions in $\mathcal{S}$]\label{main2} The distribution function $d\lambda$ of a non constant function $f\in\mathcal{S}(\mathbb{T}^n)$ is symmetric around $s=0$. In particular, it satisfies sign equidistribution and its global extrema coincide in absolute value. \end{theorem} Notice that this is neither contained nor implied by our previous results. It complements Theorem \ref{main} in dimension two, and analogous results regarding sign equidistribution, global maxima and $L^p$ norm symmetry follow for this class as well. The linear independence hypothesis can not be dropped in any dimension as the example \[f(x)=\sin(x)+\cos(2x)\] and small perturbations of it readily shows. A more subtle counterexample within the class of eigenfunctions also exists, showing that, in general, Conjecture \ref{conj1} does not hold true. \begin{theorem}\label{cx} The function $g(x,y,z)$ given by \[\sin(x+y)-\cos(y-z)-\sin(x+z)\] satisfies $-\Delta\psi=2\psi$ in $\mathbb{T}^3$ and it is negative for at least $52\%$ of the volume. \end{theorem} The proof of this is a computer assisted argument requiring about a billion computations. The paper is organized as follows. In the next section we provide a proof of Theorem \ref{main} \color{blue}, \color{black} of which Theorems \ref{sign}, \ref{maxmin} and Corollary \ref{lp} are immediate consequences. The proof method applies to more general two dimensional tori. In Section \ref{higher} we show that it does not apply to higher dimensional tori. In Section \ref{mainsection} we give three proofs of Theorem \ref{main2} on trigonometric polynomials in the class $\mathcal{S}$. They have different flavors although two of them are essentially equivalent. Finally, we devote Section \ref{sectioncx} to explain how to perform the computer assisted proof of Theorem \ref{cx}. \section{Proof of theorem \ref{main}} We will divide the proof in a number of cases depending on the eigenvalue. This is based on the following tricotomy: since an eigenvalue should be a sum of two squares, say, \[\lambda=n^2+m^2\,,\] then the following combinations of parities arise naturally: \begin{itemize} \item[(a)] Both $n$ and $m$ are even iff $\lambda\equiv 0\mod 4$. \item[(b)] The pair $n$ and $m$ have different parity iff $\lambda\equiv 1 \mod 4$. \item[(c)] Both $n$ and $m$ are odd iff $\lambda\equiv 2\mod 4$. \end{itemize} Notice that the case $\lambda\equiv 3\mod 4$ is not possible in dimension two. The first two cases are easy to handle. For instance, (a) reduces to the latter. Indeed, if $\psi(x)$ is an eigenfunction with eigenvalue $\lambda\equiv 0\mod 4$ it is easy to observe that $\psi(x/2)$ will be an eigenfunction with eigenvalue $\lambda/4$ and the proportion of area where it is positive or negative is preserved. We might apply this procedure until we hit the cases (b) or (c). In case (b), we claim that the eigenfunction satisfies the following identity \[\psi(x,y)=-\psi\left(x+\frac{1}{2},y+\frac{1}{2}\right)\] which proves that the set where it is positive is a translation of the set where it is negative and viceversa. Since translations preserve volume the result follows. To see the claim, observe that since we can express \begin{equation}\label{*} \psi(x,y)=\sum_{\nu} a_{\nu}\exp(2\pi i \nu_1x+2\pi i\nu_2y) \end{equation} where the sum extends over the set of $\nu\in\mathbb{Z}^2$ such that $\nu_1^2+\nu_2^2=\lambda$. The proof in this case ends observing that the sum $\nu_1+\nu_2$, being odd, immediately implies the claimed functional identity. Finally, in case (c) we know both coordinates are odd so it is enough to translate by $\frac{1}{2}$ in one of them to get the same functional identity. This concludes the proof as the translational symmetry is an involution and isometry, therefore: it interchanges the sets with different sign, global extrema, and the level sets in general. \section{Extensions of the argument and obstructions}\label{higher} The same method of proof applies to other situations as well. For instance, Theorem \ref{main} might be generalized to the following \begin{theorem}[Symmetry] Let $\Lambda \subset \mathbb{R}^{2}$ be a lattice and construct the torus $T=\mathbb{R}^2/(2 \pi \Lambda)$. Given a non constant real eigenfunction $\psi$ on $T$, the distribution function $d\lambda$ is symmetric around $s=0$. \end{theorem} The proof is a straightforward generalization of the one presented above, taking into account that the eigenfunctions are combinations of $\exp(i k \cdot x)$ with $k=n v_1+ m v_2$, where $(v_1, v_2)$ are the generators of the dual lattice $\Lambda^{*}$. Indeed, case (b) is dealt with using the translation $(x,y)\mapsto(x,y)+u$, with $u$ being the unique vector such that $v_1 \cdot u= v_2 \cdot u= \frac{1}{2}$, and the rest of the argument follows mutatis mutandis. We leave further details to the reader. Likewise, the method of proof immediately provides \begin{theorem}\label{oddfrequencies} Distributional symmetry holds for real eigenfunctions in $\mathbb{T}^3$ with eigenvalue $\lambda\not\equiv 2\mod 4$ or functions in any $\mathbb{T}^n$ supported in odd frequencies in any torus $\mathbb{T}^n$. \end{theorem} Notice that this is more general as it does not restrict to eigenfunctions. One might be led to believe that the exceptions to the theorem could be handled using a different argument. This is false, as the counterexample constructed in Theorem \ref{cx} shows. As the proof of this will be computer assisted we believe it is suitable to provide the reader beforehand with an elementary argument showing that there are no semiintegral translation $T:x\mapsto x+\frac{1}{2}v$ with $v\in\mathbb{Z}^3$ such that the identity $\psi(x)=-\psi(Tx)$ holds, obstructing the extension of the arguments above. In fact, this played a crucial role in finding the counterexample itself. Indeed, suppose that the Fourier transform of $\psi$ is supported at least in the frequency vectors $\nu_1^*=(1,1,0)$, $\nu_2^*=(1,-1,0)$, $\nu_3^*=(0,1,1)$, $\nu_4^*=(0,1,-1)$, $\nu_5^*=(1,0,1)$ and $\nu_6^*=(1,0,-1)$. It is evident that one needs the scalar product of any of these with $v=(x,y,z)$ to be an odd number which forces at least one of the entries to be odd. Due to the symmetry it is enough to suppose $x$ is odd. Let us now suppose that $y$ is odd as well. The four possible scalar products of such a $v$ against the vectors $(1,1,0)$ and $(1,-1,0)$ show a contradiction, so $y$ must be even. The same argument would apply to $z$. But this is a contradiction as the vector product with $(0,1,1)$ would be even. \section{Proof of Theorem \ref{main2}}\label{mainsection} We present three proofs: one analytic, the other geometric and the last one purely algebraic. We find it appropriate to present the analytic proof, although it is not the most elementary, for a couple of reasons. First because it might be of interest in itself, and we hope the reader might find applications of the argument to different problems. Secondly, it was our first proof and the way we discovered the result originally. \subsection{Analytic proof:}\label{anproof} \color{black} The proof will be based on the following \begin{lemma}\label{MS} Any continuous odd function can be uniformly approximated by linear combinations of $x^{2k+1}$ for $k=0,1,\ldots$ in any symmetric interval $[-L,L]$. \end{lemma} The proof can be found in the appendix and is based on a simple application of the Muntz-Sz\'asz theorem (cf. \cite{FN} p. 114). We are now ready to fix $f\in\mathcal{S}$. Let us define $L=\|f\|_{\infty}$. Let us approximate the function $\textrm{sign}(x)$ by a continuous odd function $\sigma(x)$ such that $|\sigma(x)|\leq 1$ and $\sigma(x)\neq\textrm{sign}(x)$ for $|x|\leq \eta$ while $\sigma(x)=\textrm{sign}(x)$ for $|x|>\eta$. This implies \[\int_{\mathbb{T}^n}\textrm{sign}(f(x))dx=\int_{\mathbb{T}^n}\sigma(f(x))dx+O(\operatorname{vol}(\{x:|f(x)|<\eta\})).\] The latter is a thin neighbourhood of the nodal set $Z(f)$ of $f$. In fact, by continuity of $f$ and compactness it follows that for any $\delta>0$ there exist $\eta=\eta(\delta,f,\lambda)>0$ such that \[\{f(x)\leq\eta\}\subseteq Z(f)+B_{\delta}.\] The size of which can be shown to be $o(1)$ as $\eta$ tends to zero. Let us now estimate the integral in the right hand side. Given any $\epsilon>0$ an application of the Lemma \ref{MS} shows that \[\int_{\mathbb{T}^n}\sigma(f(x))dx=\int_{\mathbb{T}^n}\sum_{k=0}^Na_k f(x)^{2k+1}dx+O(\epsilon).\] The proof will end if we can proof that each integral \[\int_{\mathbb{T}^n}f(x)^{2k+1}dx\] vanishes. Indeed, in such a case one might let $\epsilon, \eta\rightarrow 0$ and putting together both estimates we would be done. To complete the proof then let us recall that $f$ can be expressed as in equation \ref{*}. Unfolding the $(2k+1)$-fold product, the only terms that survive are those for which the frequencies satisfy \begin{equation}\label{identity} 0=\sum_{j=0}^{2k+1}\nu_{i(j)}=\sum_{i=1}^nA_i\nu_i \end{equation} which can not happen because the $\nu_i$ are linearly independent. This concludes the proof of the sign equidistribution analogue for trigonometric polynomials in the class $\mathcal{S}$. Let us point out before continuing that the proof breaks down in general as the linear combination \[\nu^*_1-\nu_4^*-\nu_5^*=(1,1,0)-(0,1,-1)-(1,0,1)=0\] shows (cf. Section \ref{higher}, last paragraph, for the definition of $\nu^*$). The same method of proof provides the generalization \begin{proposition} For any $k\in\mathbb{N}$ the identity \[\int_{\mathbb{T}^2}f(x)^{2k}\operatorname{sign}(f(x))dx=0\] holds. \end{proposition} We have proved $k=0$ above. We leave details to the reader. This implies the following identity \[\int_0^Ls^{2k}d\operatorname{vol}(\{f(x)>s\})=\int_0^Ls^{2k}d\operatorname{vol}(\{f(x)<-s\}).\] This and a straightforward variation of the uniqueness results known for the moment problem\footnote{One only needs to replace the application of Weierstrass' theorem in Chapter II Section 6 from \cite{W} by a version of the M\"untz-Sz\'asz theorem.} imply that the distribution functions \[\{f(x)>s\}\textrm{ and }\{-f(x)>s\}\] coincide for $s>0$ which concludes the proof. \subsection{Geometric proof} Without loss of generality let $\{\nu_i\}_{i=1}^n$ be a basis of $\mathbb{R}^n$. It is possible to change to the canonical basis by a linear map whose entries are rational. This induces a transformation on the torus which distorts the volume uniformly (the same distortion at every point). As a consequence the distribution functions of our function and the function in the new coordinates are proportional. The latter satisfies the distribution symmetry conjecture since there is a translation $T$ such that $g(Tx)=-g(x)$. \subsection{Algebraic proof} Given $f$ expressed as in equation \ref{*}, let $A$ be the $n \times n$ matrix whose $i$th column is given by the frequency vector $\nu_i$; equivalently, let $A_{ji}:=(\nu_{i})_j$. Observe that, denoting by $\{e_i\}_{i=1}^{n}$ the standard basis in $\Rn$, we have $A e_i=\nu_i$. Since the frequencies $\{\nu_i\}_{i=1}^{n}$ are linearly independent, the matrix $A$ has an inverse $A^{-1}$. In general, the determinant of $A$ will be (in absolute value) bigger than $1$, and thus the entries of $A^{-1}$ will not be integers, but rational numbers (cf. end of Section 4 above). Denote by $(A^{-1})^{t}$ its transpose. For a sufficiently large $N$, the matrix $B:=N (A^{-1})^{t}$ has integer entries and defines a map $\Phi: \mathbb{T}^{n} \rightarrow \mathbb{T}^{n}$. It is easy to see that this map is a covering with degree equal to the determinant of $B$; locally, it is a diffeomorphism with constant Jacobian, i.e, with uniform volume distortion. Thus the distribution function of $f$ is proportional to that of the function \[ g(x):=f(B x)=\sum_{i=1}^{n} a_i \cos(B^{t}\nu_i \cdot x)+b_i \sin (B^{t}\nu_i \cdot x). \] On the other hand, $B^{t}\nu_i=N e_i$, so \[ g(x)=\sum_{i=1}^{n} a_i \cos (2 \pi N x_i)+b_i \sin (2 \pi N x_i)\,. \] Therefore $g$ has a translational antisymmetry $g(x+w)=-g(x)$, for the vector $w=\frac{1}{2N}(1, 1,...,1)^{t}$. Since translations preserve volume, we have distribution symmetry for $g$, and hence also for $f$. A more succinct variant of the previous one, only without a geometric interpretation. With the previous notations, define \[ u:=\frac{1}{2}\sum_{i=1}^{n} (A^{-1})^{t} e_{i} \,. \] This vector verifies, for any $i=1,...,n$, the identity $\nu_i \cdot u=\frac{1}{2}$. Thus we conclude that the function $f$ is antisymmetric under translations by $u$, i.e \[ f(x+u)= \sum_{i=1}^{n} a_i \cos(2 \pi \nu_i x+\pi)+b_i \sin (2 \pi \nu_i x+\pi)=-f(x) \,, \] and the result follows. \section{Proof of Theorem \ref{cx}}\label{sectioncx} We can estimate the volume of $(x,y,z)\in[0,1]^3$ such that \[g(x,y,z)=\sin(2\pi(x+y))-\cos(2\pi(y-z))-\sin(2\pi(x+z))<0\] from below numerically. Indeed, it is easy to check that the gradient of $g$ is uniformly bounded by $6\pi$, this shows that the error $g(x)-g(y)$, is bounded by $e=6\sqrt{3}\pi L$ for any point $y$ within a cube of sidelength $L$ centered at $x$. This allows us to reduce the problem to a counting argument as it will be enough to find the proportion of cubes where $g$ is negative. As a consequence, if the mesh length $L$ of a grid $G$ is small enough, the volume where $g$ is negative is bounded from below by the proportion of the centers of cubes $x\in G$ such that \[g(x)<-e.\] The above argument provides a criteria to bound this proportion from below that can be done computer assistedly for a large grid. However, the machine will commit rounding errors when computing the elementary function $g$ at any $x\in G$. To address this issue rigorously we will check the condition \[\bar{g}(x)<-1.1e,\] where we denote by $\bar{g}(x)$ the machine output of the calculation of $g(x)$. Indeed, the computation to check the above inequality requires to compute $g$ at every $x\in G$ with a certain accuracy, that, say, should be smaller than $0.05e$. As a consequence, for any $y$ in the cube centered at $x$ \[g(y)=g(y)-g(x)+g(x)-\bar{g}(x)+\bar{g}(x)<e+0.05e-1.1e<0.\] Of course, we are forced to use an approximation of $\pi$. Let the approximation be $\bar{\pi}$. The mean value theorem implies that the error commited when we compute each trigonometric function (at any fixed $x\in[0,1]^3$) is bounded by $4\cdot |\pi-\bar{\pi}|$. (Let us remark that the mean value theorem is applied with respect to the variable $\bar{\pi}$.) In our computations we can use an approximation $\bar{\pi}$ with accuracy $ 10^{-20}$. This adds an error corresponding to the computation of the three trigonometric functions with $\bar{\pi}$ which together is then bounded by $12\cdot 10^{-20}$. The error commited computing \[g'(x,y,z)=\sin(2\bar{\pi}(x+y))-\cos(2\bar{\pi}(y-z))-\sin(2\bar{\pi}(x+z))<0\] in $C$++ is bounded by, at most, $3\cdot 10^{-6}$. Finally, the computer will need to add this three numbers, which adds rounding errors $10^{-5}$ twice. The computation will be reliable if $11\cdot 10^{-6}<0.05e$ holds. Let us emphasize that this is a crude estimate on the error and using more precission arithmetic one can do much better but as we will see below for this problem we will not need to go deeper as all we are approximating is the sum of three trigonometric functions. A simple algorithm storing in a variable $M$ the number of points $x\in G$ that satisfy the condition \[g(x)<-1.1e\] allows to compute the proportion $M/L^3$. Due to the unusual number of computation required this is achieved through a computer assisted approach. We supplemented this argument with an algorithm implemented in $C$++ which in the particular case of a grid with $N^3=(2^{7})^3$ points gives \[\frac{M}{N^3}=\frac{1123200}{2097152}\] which is roughly an estimated $53.5\%$ of points $x\in G$ satisfying $g(x)<-1.1e<0$. In this case the mesh sidelength is $L=1/N=2^{-7}$ which implies the maximal error $e$ committed within each box centered at those grid points is bounded above by $0.26$. As a consequence the proportion of points provides a lower bound estimate for the points where $g(x)<0$ proving the result. Similarly one can conclude that the size of the area positiveness is at least $34.3\%$. This is far from giving a close estimate. Using about a billion nodes instead ($(2^{10})^3$ nodes) one can get the lower estimates $59.3\%$ for the set of points where $g$ is negative and $39.1\%$ for its complementary. This time the uncertainty of sign reduces to less than $2\%$ of the total volume. The precision of any machine for the required calculation is within $10^{-6}$, an accuracy that makes the above argument reliable and allows us to avoid a more rigorous study of the cumulative errors the algorithm might have committed. \section{Appendix} \textsc{Proof of Lemma \ref{MS}:} the Muntz-Sz\'asz theorem implies that any continuous function $f$ in $[0,1]$ can be uniformly approximated by linear combinations of $1$ and $x^{2k+1}$ with $k=0,1,\ldots$. We might use a rescaling to let $L=1$ without loss of generality. The oddness in our case implies that $f(0)=0$ from which it follows that for any $\epsilon$ there exist coefficients $a_n$ such that \[f(x)=a_0+\sum_{k=0}^N a_k x^k+O(\epsilon)\] and evaluating at $x=0$ it follows that $a_0=O(\epsilon)$ too and we might forget about it for free. The rest follows by odd symmetry. \section{\textbf{Acknowledgments}} The authors are grateful to the Fields Institute Hydrodynamics Program and the University of Toronto for their support. F.T.L gratefully acknowledges support from the Max-Planck Institute for Mathematics. The authors would like to thank the referees for their suggestions. \begin{thebibliography}{10} \bibitem{Ar} Armitage, D., {\em Spherical extrema of harmonic polynomials,} J. London Math. Soc. (2), 19 (1979), 451-456. \bibitem{ArG} Armitage, D. H.; Gardiner, S. J., {\em The sign of a harmonic function near zero}, Recent progress in multivariate approximation Witten-Bommerholz, 200. Internat. Ser. Numer. Math., 137, pp. 31-32. \bibitem{B} Berry, M., Regular and irregular semiclassical wavefunctions, J. Phys. A 10 (1977), no. 12, pp. 2083-2091. \bibitem{DF} Donelly, H.; Fefferman, Ch., {\em Nodal sets of eigenfunctions on Riemannian manifolds}, Invent. Math. 93 (1988), no. 1, 161-183. \bibitem{HS} Hardt, R.; Simon, L., {\em Nodal sets for solutions of elliptic equations}, J. Diff. Geom. 30 (1989), pp. 505-522. \bibitem{J} Jakobson, D.,{\em Quantum limits on flat tori}, Ann. of Math. (2) 145 (1997), no. 2, pp. 235–266 \bibitem{JN} Jakobson, D.; Nadirashvili, N, {\em Quasi-symmetry of $L^p$ norms of eigenfunctions}, Communications in Analysis and Geometry, Volume 10, Number 2, 397-408, 2002. \bibitem{JNT} Jakobson, D.; Nadirashvili and J. Toth, J., {\em Geometric Properties of Eigenfunctions,} Russian Mathematical Surveys, Volume 56, Number 6. \bibitem{Klartag} Klartag, B., {\em Unimodal value distribution of Laplace eigenfunctions and a monotonicity formula}, Geometriae Dedicata (2020) 208 pp. 13-29. \bibitem{KRud} Kurlberg, P.; Rudnick, Z., {\em Value distribution for eigenfuntions of desymmetrized quantum maps}, International Mathematics Research Notices (2001), Issue 18, pp. 985-1002. \bibitem{Kr} Kroger, P., {\em On the ranges of eigenfunctions on compact manifolds,} Bull. London Math. Soc, 30 (1998), pp. 651-655 \bibitem{RL} Lester, S., Rudnick, Z., {\em Small scale equidistribution of eigenfunctions on the torus}, Comm. Math. Phys. 350 (2017), no. 1, pp. 279–300 \bibitem{Lo} Logunov, A., {\em Nodal sets of Laplace eigenfunctions: proof of Nadirashvili’s conjecture and of the lower bound in Yau’s conjecture}, Ann. of Math. vol 187-1 pp. 241-262 \bibitem{Lo2} Logunov, A., {\em Nodal sets of Laplace eigenfunctions: polynomial upper estimates of the Hausdorff measure}, Ann. of Math., Vol. 187 (2018), Issue 1 pp. 221-239. \bibitem{LoSB} Logunov, A., {\em Geometry of nodal sets of Laplace eigenfunctions} at the Workshop: Analysis, Dynamics, Geometry and Probability the 2020-03-06. Available at \url{http://scgp.stonybrook.edu/video_portal/video.php?id=4473} \bibitem{FN} Feinerman, R. P.; Newman, D. J., {\em Polynomial approximation}, Waverly Press, 1974. \bibitem{N} Nadirashvili, N. {\em Metric properties of eigenfunctions of the Laplace operator on manifolds,} Ann. Inst. Fourier, 41 (1991), 259-265. \bibitem{NPS} Nazarov, F.; Polterovich, L.; Sodin, M., {\em Sign and area in nodal geometry of Laplace eigenfunctions}, American Journal of Mathematics Volume 127, Number 4, August 2005 pp. 879-910. \bibitem{RW} Rudnick, Z.; Wigman, I., {\em Nodal intersections for random eigenfunctions on the torus}, Amer. J. Math. 138 (2016), no. 6, pp. 1605–1644 \bibitem{Y} Yau, S.-T., {\em Seminar on Differential Geometry}, Ann. of Math. Stud., vol. 102, Princeton University Press, Princeton, NJ, 1982. \bibitem{Watson} Watson, G. N., {\em A treatise on the theory of Bessel functions}, Cambridge University Press, 1922. \bibitem{W} Widder, D. V., {\em The Laplace transform}, Princeton University Press, 1946. \bibitem{Z} Zelditch, S., Eigenfunctions and Nodal sets, Surveys in Differential Geometry, vol. 18 (2013), pp. 237-308 \end{thebibliography} \end{document}
2205.14385v7
http://arxiv.org/abs/2205.14385v7
Isometric direct limits of bidual Banach spaces
\documentclass{amsart} \usepackage{graphicx} \usepackage{amsmath} \usepackage{amssymb} \usepackage{amsthm} \usepackage{enumitem} \usepackage{tikz-cd} \newtheorem{theorem}{Theorem}[section] \newtheorem{proposition}[theorem]{Proposition} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{example}[theorem]{Example} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{hypothesis}[theorem]{Hypothesis} \begin{document} \author{Sebastian Gwizdek} \address{Faculty of Applied Mathematics, AGH University of Science and Technology, al. Mickiewicza 30, 30-059, Krak\'ow, Poland} \email{[email protected]} \title{Isometric direct limits of bidual Banach spaces} \keywords{direct limit, inverse limit, Banach space, Banach algebra, uniform algebra, Corona theorem, Arens Product, Gleason part, representing measure} \subjclass[2020]{32A38, 32A65, 32A70, 46A13, 46J10, 46J15, 46M15, 46M40} \begin{abstract} Sequences of $n$-th order bidual Banach spaces, called tower systems and their direct and inverse limits are considered. Motivated by recent applications in corona problem, we introduce two functors: \textrm{Dir} and \textrm{Inv} assigning to Banach spaces (and to bounded linear operators) some new Banach spaces and operators. Of particular interest is the enormous "tower space" built over the space of continuous functions. We prove that the action of \textrm{Dir} preserves direct sum decompositions. This functor preserves also spectra of operators, their Fredholmness and compactness properties. An application of these functors to the problem of location of supports of representing measures for function algebras is outlined in the last section. \end{abstract} \maketitle \section{Introduction} Direct (or \emph{inductive}) limits were used with success in many branches of mathematics. From theory of distributions to quite recent works \cite{AW}, \cite{WF} to name a few, such limits were studied in various categories. Here we present a construction of two functors in $\normalfont\textbf{Ban}_1$, the category of Banach spaces with linear contractions as morphisms. The first functor assigns to a given Banach space a direct limit of the appropriate tower system and to a bounded linear mapping a bounded operator acting between corresponding direct limits. The second functor produces inverse limits of such systems. In the case of uniform Banach algebras the first functor proved useful in studying corona problem for a special class of strongly starlike strictly pseudoconvex domains \cite{KR}. This problem has a long history dating back to its 1941 formulation by Shizuo Kakutani and its solution in the case of the unit disc in 1962 by Lennart Carleson. The problem remained open for regular domains in higher dimensional case. The recent solution in \cite{KR} established corona theorem for strictly pseudoconvex domains which are also strongly starlike. In \cite{SG} the starlikeness condition has even been eliminated. One of the key results in \cite{KR} concerning supports of representing measures was obtained after taking direct limit of a sequence of consecutive biduals of a uniform algebra. This construction suggested the introduction of our functors in a broad framework of Banach spaces. The paper is organized as follows. In Section 2 we construct the functors \textrm{Dir} and \textrm{Inv}, which turn out to be adjoint in the sense described in Theorem 2.9. The action of both functors preserves direct sum decompsitions, as shown in Theorem 2.6 and Corollary 2.10. In Theorems 2.11 and 2.12 we show that these functors preserve spectra of bounded operators and the property of being a Fredholm operator, respectively. The final theorem of Section 2. shows the preservation both under \textrm{Dir} and under \textrm{Inv} of compactness of operators under an additional hypothesis. Namely we assume that the underlying Banach space possesses the approximation property (which requires compact operators to be norm limits of finite rank operators). Section 3. contains an application of these functors to a recent result of \cite{KR} on supports of representing measures for certain uniform algebras. \section{Tower Functors} Whenever we speak of direct or inverse limits of Banach spaces, we mean limits in the category $\normalfont\textbf{Ban}_1$. This category consists of Banach spaces as objects and contractive (i.e. of norm less than or equal to 1) linear mappings as morphisms. It is well known that each direct (resp. inverse) system in $\normalfont\textbf{Ban}_1$ has a direct (resp. inverse) limit (see \cite{Se}, \S 11.8.2). Let $\left\{X_i\colon i\in I \right\}$ be a family of objects of $\normalfont\textbf{Ban}_1$ indexed by a directed set $\langle I,\le\rangle$ and assume that $f_{ij}\colon X_i\to X_j$ are morphisms for all $i\le j$ such that \begin{enumerate}[label={\textup{(\roman*)}}, widest=iii, leftmargin=*] \item $f_{ii}$ is the identity of $X_i$, \item $f_{ik}=f_{jk}\circ f_{ij}$ for all $i\le j\le k$. \end{enumerate} Then the pair $\langle X_i,f_{ij}\rangle$ is called a \textit{direct system} over $I$. Here we will consider only \emph{isometric direct systems}, which means that the all the maps $f_{ij}$ are isometries and $I=\mathbb N$ will be the set of natural numbers. On the disjoint union $\bigsqcup_{i\in \mathbb N} X_i:= \bigcup_i(X_i\times \{i\})$ we define the equivalence relation $(x_i,i)\sim (x_j,j)$, if for $k=\max\{i,j\}$ we have $f_{ik}(x_i)=f_{jk}(x_j)$. (Here $x_i\in X_i$ and $x_j\in X_j$.) In particular, $(x_i,i)\sim (f_{ij}(x_i),j)$ for all $i\le j$. Sometimes we shall write $x_i\sim x_j$ instead of $(x_i,i)\sim (x_j,j)$ to simplify the notation. On the set $\bigsqcup_{i\in I} X_i\big/_\sim$ we consider a standard quotient vector space linear structure. If our system is isometric, the norm of $[(x_i,i)]\in\bigsqcup_{i\in I} X_i\big/_\sim$ is defined by $\Vert [(x_i,i)]\Vert :=\Vert x_i\Vert$ and it does not depend on the choice of representative of the equivalence class. The \textit{direct limit} of the isometric direct system $\langle X_i,f_{ij}\rangle$, denoted by $\displaystyle\lim_{\longrightarrow} X_i$, is the completion in the norm of $\bigsqcup_{i\in I} X_i\big/_\sim$. The canonical mappings $f_{i,\infty}\colon X_i\to\displaystyle\lim_{\longrightarrow} X_i$ sending each element to its equivalence class are isometric morphisms in $\normalfont\textbf{Ban}_1$ under the above operations. For a Banach space $X$ let $X^{*}$ denote its dual. We define $X_0:=X$,\dots, $X_{n+1}:=X_{n}^{**}$ for $n=0,1,2,\ldots$. The family $\left\{X_n\colon n\in\mathbb{N}\right\}$ of second duals of Banach space $X$ will be called a \textit{tower system}. For any "base space" $X$ we denote by $\kappa_{n,n+1}$ the canonical embedding of $X_n$ into its bidual $X_{n+1}$. For $n\le m$ we define $\kappa_{n,m}\colon X_n\to X_m$ by \begin{equation*} \kappa_{n,m}:=\kappa_{m-1,m}\circ \kappa_{m-2,m-1}\circ\ldots\circ \kappa_{n, n+1}, \ \textrm{if} \ n<m \end{equation*} and $\kappa_{n,n}$ is defined to be the identity mapping. Since each $\kappa_{n,m}$ is an isometry we obtain an isometric direct system $\langle X_n,\kappa_{n,m}\rangle$. Given $Y$, a second Banach space, denote by $Y_n$ the related tower system, with canonical embeddings denoted also by $\kappa_{n,m}$ and $\kappa_{n,\infty}$. For a bounded linear operator $T\in B(X,Y)$ we denote by $T^*$ its adjoint, i.e. the mapping $T^*: Y^*\ni \varphi \mapsto \varphi\circ T \in X^*$. Define $\mathcal{J}_0(T):=T$, $\mathcal{J}_{n+1}(T):=\mathcal{J}_{n}(T)^{**}$ for $n=0,1,2,\ldots$, so that each $\mathcal{J}_{n}(T)\in B(X_n, Y_n)$ with $\|\mathcal{J}_{n}(T)\|=\|T\|$. Define $\displaystyle\lim_{\longrightarrow}\mathcal{J}_{n}(T)\colon\lim_{\longrightarrow} X_n\to\lim_{\longrightarrow} Y_n$ first on $\bigsqcup_{i\in I} X_i\big/_\sim$ by \begin{equation} \lim_{\longrightarrow} \mathcal{J}_{n}(T) \left([(x_n,n)]\right):=[(\mathcal{J}_{n}(T)(x_n),n)] \ \textrm{for} \ x_n\in X_n . \end{equation} and then extend it by continuity to the whole space (denoting this extension by the same symbol $\displaystyle\lim_{\longrightarrow}\mathcal{J}_{n}(T)$). Finally, we define the functor $\textrm{Dir}\colon\normalfont\textbf{Ban}\to\normalfont\textbf{Ban}$ as follows \begin{equation*} \textrm{Dir}\colon X\mapsto\lim_{\longrightarrow} X_n, \end{equation*} \begin{equation*} \textrm{Dir}\colon T\mapsto\lim_{\longrightarrow} \mathcal{J}_{n}(T). \end{equation*} The following lemma shows that the mapping $\displaystyle\lim_{\longrightarrow} \mathcal{J}_{n}(T)$ is well defined. \begin{lemma} If $x_n\in X_n$ for some $n\ge 0$ then \begin{equation*} (\mathcal{J}_{n+1}(T)\circ\kappa_{n,n+1})(x_n)=(\kappa_{n,n+1}\circ \mathcal{J}_{n}(T))(x_n). \end{equation*} \end{lemma} \begin{proof} For any $\gamma\in Y_n^*$ we have \begin{equation*} \begin{split} \langle (\kappa_{n,n+1}\circ \mathcal{J}_{n}(T))(x_n), \gamma \rangle = \langle \gamma, \mathcal{J}_{n}(T)(x_n) \rangle = \langle \mathcal{J}_{n}(T)^{*}(\gamma), x_n\rangle = \\ =\langle \kappa_{n,n+1}(x_n), \mathcal{J}_{n}(T)^{*}(\gamma) \rangle = \langle (\mathcal{J}_{n+1}(T)\circ\kappa_{n,n+1})(x_n), \gamma \rangle. \end{split} \end{equation*} \end{proof} \begin{proposition} $\normalfont\textrm{Dir}$ is a covariant functor from $\normalfont\textbf{Ban}$ to $\normalfont\textbf{Ban}$. \end{proposition} \begin{proof} First we check that $\displaystyle{ \lim_{\longrightarrow} }\mathcal{J}_{n}(T)$ is well defined. If $[(x_n,n)]= [(x_{n+1},n+1)]$, i.e. $x_n\sim x_{n+1}$, then $x_{n+1}=\kappa_{n,n+1}(x_n)$. From Lemma 3.1 we obtain the equality \begin{equation*} \mathcal{J}_{n+1}(T)(x_{n+1})=(\mathcal{J}_{n+1}(T)\circ\kappa_{n,n+1})(x_n)=(\kappa_{n,n+1}\circ \mathcal{J}_{n}(T) )(x_n). \end{equation*} This implies $\mathcal{J}_{n+1}(T)(x_{n+1})\sim \mathcal{J}_{n}(T)(x_n)$. For arbitrary $n\le m$, if $x_n\sim x_m$ then $\kappa_{n,m-1}(x_n)\sim x_m$. Hence $\mathcal{J}_{m}(T)(x_m)\sim \mathcal{J}_{m-1}(T)(\kappa_{n,m-1}(x_n))$ from which we obtain the equality \begin{equation*} \mathcal{J}_{m}(T)(x_{m})=\kappa_{m-1,m}(\mathcal{J}_{m-1}(T)(\kappa_{n,m-1}(x_n))). \end{equation*} Now $\kappa_{n,m-1}(x_n)\sim\kappa_{n,m-2}(x_n)$ so that \begin{equation*} \mathcal{J}_{m-1}(T)(\kappa_{n,m-1}(x_n))\sim \mathcal{J}_{m-2}(T)(\kappa_{n,m-2}(x_n)). \end{equation*} Hence \begin{equation*} \mathcal{J}_{m}(T)(x_{m})=(\kappa_{m-1,m}\circ\kappa_{m-2,m-1})(\mathcal{J}_{m-2}(T)(\kappa_{n,m-2}(x_n))). \end{equation*} Repeating the above reasoning yields \begin{equation*} \begin{split} \mathcal{J}_{m}(T)(x_{m})=(\kappa_{m-1,m}\circ\ldots\circ\kappa_{n+1,n+2})(\mathcal{J}_{n+1}(T)(\kappa_{n,n+1}(x_n)))= \\ =(\kappa_{m-1,m}\circ\ldots\circ\kappa_{n,n+1})(\mathcal{J}_{n}(T)(x_n))=\kappa_{n,m}(\mathcal{J}_{n}(T)(x_n)). \end{split} \end{equation*} Hence we obtain that $\mathcal{J}_{n}(T)(x_{n})\sim \mathcal{J}_{m}(T)(x_m)$. Take any $T\in B(X,Y)$ and $S \in B(Y,Z)$. For $x_n\in X_n$ we have \begin{equation*} \begin{split} \lim_{\longrightarrow} (\mathcal{J}_{n}(S)\circ \mathcal{J}_{n}(T)) \left([(x_n,n)]\right)=[(\mathcal{J}_{n}(S)(\mathcal{J}_{n}(T)(x_n)),n)]= \\ =\lim_{\longrightarrow} \mathcal{J}_{n}(S) \left([(\mathcal{J}_{n}(T)(x_n),n)]\right)=\lim_{\longrightarrow} \mathcal{J}_{n}(S) \left(\lim_{\longrightarrow}\mathcal{J}_{n}(T)([(x_n,n)])\right). \end{split} \end{equation*} Hence $\textrm{Dir}(S\circ T)=\textrm{Dir}(S)\circ \textrm{Dir}(T)$. Clearly $\textrm{Dir}(T)$ is bounded and linear. Finally, the functor $\textrm{Dir}$ preserves the identity mapping (by obvious calculations). \end{proof} One can easily notice that the mapping $\textrm{Dir}\colon X\mapsto\displaystyle\lim_{\longrightarrow} X_n$ in general is not injective. Indeed, for any Banach space $X$ we have $\textrm{Dir}(X)=\textrm{Dir}(X^{**})$ up to isomorphism. In the case when the Banach space $X$ is reflexive we even have the equality $\textrm{Dir}(X)=X$. The following example shows that $\textrm{Dir}(X)$ can be small even for non-reflexivs $X$. \begin{example} \normalfont Let $\mathfrak{J}$ denote the \textit{James space}. The space $\mathfrak{J}$ is a separable Banach space of codimension one in its bi-dual, i.e., $\dim\mathfrak{J}^{**}\big/\kappa_{0,1}(\mathfrak{J})=1$. Actually, $\mathfrak{J}$ is isometrically isomorphic to $\mathfrak{J}^{**}$. More information on this space can be found in \cite{M}. Since the subspace $ \kappa_{0,1}(\mathfrak{J})$ is complemented in $\mathfrak{J}^{**}$, for some $e_1\in\mathfrak{J}^{**},\dots, e_n\in \mathfrak J_n$ the tower system $\left\{\mathfrak{J}_n\right\}$ of $\mathfrak{J}$ satisfies up to an isomorphism the equalities \begin{equation*} \mathfrak{J}_1=\mathfrak{J}^{**}=\mathfrak{J}\oplus\mathbb{C}e_1, \end{equation*} \begin{equation*} \mathfrak{J}_2=\mathfrak{J}_1^{**}=\mathfrak{J}_1\oplus\mathbb{C}e_2=\mathfrak{J}\oplus\mathbb{C}e_1\oplus\mathbb{C}e_2, \end{equation*} \begin{equation*} \mathfrak{J}_n=\mathfrak{J}\oplus\mathbb{C}e_1\oplus\ldots\oplus\mathbb{C}e_n. \end{equation*} Taking the direct limit we obtain \begin{equation*} \textrm{Dir}(\mathfrak{J})=\mathfrak{J}\oplus\mathfrak{R}, \end{equation*} where $\mathfrak{R}$ denotes the completion of linear span of vectors $\left\{e_k\colon k=1,\ldots, n\right\}$ in which the norms of vectors $\lambda_1e_1+\ldots +\lambda_n e_n$, $\lambda_k\in\mathbb{C}$, are calculated in the space $\mathfrak{J}_n$, $n\ge 1$. Hence $\textrm{Dir}(\mathfrak{J})$ is a separable space. \end{example} In the second example the tower space will be large, far from separable. \begin{example} \normalfont Let $K$ be a compact, infinite Hausdorff space with $\mathcal{F}_1$ --a maximal singular family of probabilistic regular Borel measures on $K$ (which can be arbitrarily chosen). Here singularity means that $\mu\perp\nu$ for any $\mu\neq \nu, \mu,\nu\in \mathcal F$. Let $\left\{\mathcal{C}_n\right\}$ denote the tower system of $C(K)$. Define $U_{\mathcal{F}_1}$ to be the disjoint topological union of the spectra $\Phi_\mu$ of the Banach algebras $L^{\infty}(K,\mu)$, ($\Phi_\mu$ are the sets of all non-zero linear and multiplicative functionals on $L^{\infty}(K,\mu)$). Then each $\Phi_\mu$ is a compact and open subspace of $U_{\mathcal{F}_1}$ and by \cite[Theorem 5.4.4]{D} we have the equality \begin{equation*} \mathcal{C}_1=C(K)^{**}=C(\beta U_{\mathcal{F}_1}), \end{equation*} where $\beta$ denotes the Stone-\v{C}ech compactification. Also for $n\ge 2$ we obtain \begin{equation*} \mathcal{C}_n=C(\beta U_{\mathcal{F}_n}). \end{equation*} Here $\mathcal{F}_n$ is a maximal singular family of probabilistic regular borel measures on $\beta U_{\mathcal{F}_{n-1}}$ and containing $\kappa_{n-1,n}(\mathcal{F}_{n-1})$. The mapping $\kappa_{n-1,n}$ is an isometry, hence from the singularity relation $\mu\perp\nu$ it follows that $\kappa_{n-1,n}(\mu)\perp\kappa_{n-1,n}(\nu)$. We use the fact that $\mu\perp\nu$ if and only if $\Vert\mu\pm\nu\Vert=\Vert\mu\Vert +\Vert\nu\Vert$. Hence $\kappa_{n-1,n}(\mathcal{F}_{n-1})$ is a family of singular measures. Taking the direct limit we get the equality \begin{equation*} \textrm{Dir}(C(K))=C\left(\beta \left(\bigsqcup U_{\mathcal{F}_n}\right)\right), \end{equation*} where $\bigsqcup U_{\mathcal{F}_n}$ denotes the topological disjoint union of the spaces $U_{\mathcal{F}_n}$ in which each $\Phi_\mu$ is a compact and open set for every $\mu\in\mathcal{F}_n$, $n\in\mathbb{N}$. \end{example} Our next theorem will show that the action of functor $\textrm{Dir}$ preserves finite direct sum decompositions. But first we need the following lemma. For $T\in B(X):= B(X,X)$ we denote by $\mathcal{N}(T)$ and $\mathcal{R}(T)$ its kernel and range respectively. \begin{lemma} If $P\in B(X)$ is a projection, then $P^{**}\in B(X^{**})$ is a projection onto $\mathcal{R}(P^{**})=\mathcal{R}(P)^{**}$ and such that $\mathcal{N}(P^{**})=\mathcal{N}(P)^{**}$. \end{lemma} \begin{proof} From the multiplicative property of taking the adjoint we see that $(P^*)^2=P^*$, i.e. $P^{*}\in B(X^{*})$ is a projection and so is $P^{**}\in B(X^{**})$. It remains to show that $\mathcal{R}(P^{**})=\mathcal{R}(P)^{**}$ and $\mathcal{N}(P^{**})=\mathcal{N}(P)^{**}$. First we shall prove that $\kappa_{0,1}(\mathcal{R}(P))\subset\mathcal{R}(P^{**})$ and $\kappa_{0,1}(\mathcal{N}(P))\subset\mathcal{N}(P^{**})$. Take any $y\in\mathcal{R}(P)$ of the form $y=Px$ for some $x\in X$. Then for any $\phi\in X^{*}$ we obtain the equalites \begin{equation*} \langle P^{**}(\kappa_{0,1}(x)) ,\phi\rangle=\langle\kappa_{0,1}(x), P^{*}\phi\rangle=\langle P^{*}\phi, x\rangle=\langle \phi, Px\rangle=\langle\phi, y\rangle=\langle\kappa_{0,1}(y), \phi\rangle. \end{equation*} Hence $\kappa_{0,1}(\mathcal{R}(P))\subset\mathcal{R}(P^{**})$. Similarly, for any $x\in\mathcal{N}(P)$ and $\phi\in X^{*}$ we have \begin{equation*} \langle P^{**}(\kappa_{0,1}(x)) ,\phi\rangle=\langle\kappa_{0,1}(x), P^{*}\phi\rangle=\langle P^{*}\phi, x\rangle=\langle \phi, Px\rangle=0 \end{equation*} and $\kappa_{0,1}(\mathcal{N}(P))\subset\mathcal{N}(P^{**})$. The sets $\mathcal{R}(P^{**})$ and $\mathcal{N}(P^{**})$ are weak-* closed in $X^{**}$. From the Goldstine theorem canonical images of $\mathcal{R}(P)$ and $\mathcal{N}(P)$ are weak-* dense in $\mathcal{R}(P^{**})$ and $\mathcal{N}(P^{**})$. As a consequence we obtain the inclusions $\mathcal{R}(P)^{**}\subset\mathcal{R}(P^{**})$ and $\mathcal{N}(P)^{**}\subset\mathcal{N}(P^{**})$. The operator $P$ is a projection so we have the decomposition $X=\mathcal{R}(P)\oplus\mathcal{N}(P)$. It follows that \begin{equation*} X^{**}=\mathcal{R}(P)^{**}\oplus\mathcal{N}(P)^{**}=\mathcal{R}(P^{**})\oplus\mathcal{N}(P^{**}). \end{equation*} Take any $\phi\in\mathcal{R}(P^{**})$. Then $\phi$ has a unique decomposition of the form $\phi=\phi_1+\phi_2$, where $\phi_1\in\mathcal{R}(P)^{**}$ and $\phi_2\in\mathcal{N}(P)^{**}$. From the already proved inclusions we obtain $\phi_1\in\mathcal{R}(P^{**})$ and $\phi_2\in\mathcal{N}(P^{**})$. Hence $\phi_2=\phi-\phi_1\in\mathcal{R}(P^{**})$. However $\mathcal{R}(P^{**})\cap\mathcal{N}(P^{**})=\left\{0\right\}$, so that $\phi=\phi_1\in\mathcal{R}(P)^{**}$. This proves the inclusion $\mathcal{R}(P^{**})\subset\mathcal{R}(P)^{**}$. The proof that $\mathcal{N}(P^{**})\subset\mathcal{N}(P)^{**}$ is analogous. \end{proof} We are ready to prove our next theorem. \begin{theorem} Let $X$ be a Banach space. If $X=U\oplus V$ for some closed subspaces $U$, $V$ of $X$ then $\normalfont\textrm{Dir}(X)=\textrm{Dir}(U)\oplus\textrm{Dir}(V)$. \end{theorem} \begin{proof} Let $P\in B(X)$ be a projection such that $\mathcal{R}(P)=U$ and $\mathcal{N}(P)=V$. Then $\textrm{Dir}(P)\in B(\textrm{Dir}(X))$ and \begin{equation*} \textrm{Dir}(P)=\textrm{Dir}(P^2)=\textrm{Dir}(P)^2. \end{equation*} Hence $\textrm{Dir}(P)$ is a bounded projection. In order to prove that $\mathcal{R}(\textrm{Dir}(P))=\textrm{Dir}(U)$ and $\mathcal{N}(\textrm{Dir}(P))=\textrm{Dir}(V)$ we first show that $\textrm{Dir}(U)\subset\mathcal{R}(\textrm{Dir}(P))$. Let $\left\{U_n\right\}$ denote the tower system of $U$. Take any $[(y_n,n)]\in\bigsqcup U_n\big/_\sim$. From the previous lemma we deduce the equalities \begin{equation*} \bigsqcup U_n\big/_\sim=\bigsqcup \left(\textrm{Dir}(P)(X_n \big/_\sim)\right)=\textrm{Dir}(P)\left(\bigsqcup X_n\big/_\sim\right). \end{equation*} It follows that $[(y_n,n)]\in\mathcal{R}(\textrm{Dir}(P))$. The set $\bigsqcup U_n\big/_\sim$ is dense in $\textrm{Dir}(U)$ and the space $\mathcal{R}(\textrm{Dir}(P))$ is closed. Hence $\textrm{Dir}(U)\subset\mathcal{R}(\textrm{Dir}(P))$. Conversely, from the continuity of $\textrm{Dir}(P)$ we have that \begin{equation*} \mathcal{R}(\textrm{Dir}(P))=\textrm{Dir}(P)\left(\overline{\bigsqcup X_n\big/_\sim}\right)\subset\overline{\textrm{Dir}(P)\left(\bigsqcup X_n\big/_\sim\right)}=\textrm{Dir}(U). \end{equation*} Hence $\mathcal{R}(\textrm{Dir}(P))=\textrm{Dir}(U)$. Now we show that $\textrm{Dir}(V)\subset\mathcal{N}(\textrm{Dir}(P))$. Let $\left\{V_n\right\}$ denote the tower system of $V$. Take any $[(x_n,n)]\in\bigsqcup V_n\big/_\sim$. Using the lemma preceding this theorem we obtain the equalities \begin{equation*} \bigsqcup V_n\big/_\sim=\bigsqcup \left(\mathcal{N}(\textrm{Dir}(P)\cap\left(X_n \big/_\sim\right)\right)=\mathcal{N}(\textrm{Dir}(P))\cap\left(\bigsqcup X_n\big/_\sim\right). \end{equation*} As a consequence $[(x_n,n)]\in\mathcal{N}(\textrm{Dir}(P))$. The set $\bigsqcup V_n\big/_\sim$ is dense in $\textrm{Dir}(V)$ and the space $\mathcal{N}(\textrm{Dir}(P))$ is closed so we obtain the inclusion $\textrm{Dir}(V)\subset\mathcal{N}(\textrm{Dir}(P))$. Conversely, using the continuity of $\textrm{Dir}(P)$ we get the relactions \begin{equation*} \begin{split} \mathcal{N}(\textrm{Dir}(P))=\textrm{Dir}(I-P)\left(\overline{\bigsqcup X_n\big/_\sim}\right)\subset\overline{\textrm{Dir}(I-P)\left(\bigsqcup X_n\big/_\sim\right)}= \\ =\overline{\mathcal{N}(\textrm{Dir}(P))\cap\left(\bigsqcup X_n\big/_\sim\right)}=\overline{\bigsqcup V_n\big/_\sim}=\textrm{Dir}(V). \end{split} \end{equation*} Hence $\mathcal{N}(\textrm{Dir}(P))=\textrm{Dir}(V)$ and the proof is finished. \end{proof} Let us now recall the definition of natural transformations of functors. Let $\mathcal{C}$ and $\mathcal{C'}$ be categories. A \textit{natural transformation} of functors $F$, $G\colon\mathcal{C}\to \mathcal{C'}$ is any family $\eta$ of mappings: $\eta=(\eta_X)$ assigned to all objects $X$ in $\mathcal{C}$, such that \begin{enumerate}[label={\textup{(\roman*)}}, widest=iii, leftmargin=*] \item Each $\eta_X\colon F(X)\to G(X)$ is a morphism in $\mathcal{C'}$ for any object $X$ in $\mathcal{C}$, \item For every morphism $f\colon X\to Y$ in $\mathcal{C}$ the following equality holds \begin{equation*} \eta_Y\circ F(f)=G(f)\circ\eta_X. \end{equation*} \end{enumerate} It is well known that there exists a natural transformation of the identity functor to the double dual of a vector space functor. It turns out that the same holds for the functor $\normalfont\textrm{Dir}$. \begin{proposition} There exists a natural transformation of the identity functor on $\normalfont\textbf{Ban}$ to the functor $\normalfont\textrm{Dir}$. \end{proposition} \begin{proof} Let $X$ be a Banach space. We define $\eta_X\colon X\to \displaystyle\lim_{\longrightarrow} X_n$ by \begin{equation*} \eta_X(x):= [(x,0)]. \end{equation*} For any Banach space $X$ the mapping $\eta_X$ is an isometry. It suffices to show that given any $T\in B(X,Y)$ the diagram below commutes. \[ \begin{tikzcd}[row sep=4em, column sep=4em] \displaystyle\lim_{\longrightarrow} X_n \arrow[r, "\displaystyle\lim_{\longrightarrow} \mathcal{J}_{n}(T)"] & \displaystyle\lim_{\longrightarrow} Y_n \\ X \arrow[r, "T"'] \arrow[u, "\eta_X"] & Y \arrow[u, "\eta_Y"'] \end{tikzcd} \] For $x\in X$ the following equalities hold \begin{equation*} \begin{split} \left(\lim_{\longrightarrow} \mathcal{J}_{n}(T)\circ\eta_X\right)(x)=\lim_{\longrightarrow} \mathcal{J}_{n}(T)([(x,0)])= \\ =[(T(x),0)]=\eta_Y(T(x))=\left(\eta_Y\circ f\right)(x). \end{split} \end{equation*} Hence the result follows. \end{proof} We also have an inverse system $\langle X_n^*,\kappa_{n,m}^*\rangle$. Its inverse limit is the set \begin{equation*} \lim_{\longleftarrow} X_n^*=\left\{\phi=(\phi_n)_{n\in\mathbb{N}}\in\prod_{n=0}^{\infty}X_n^*\colon \phi_n=\kappa_{n,m}^*(\phi_m), \ \ n\le m, \ \Vert \phi\Vert=\sup_{n}\Vert \phi_n\Vert <\infty\right\}. \end{equation*} It should be noted that for non-reflexive $X$ the morphisms $\kappa_{n,m}^*$ are not isometric, but they are contractive instead. We define the mapping $\displaystyle\lim_{\longleftarrow} \mathcal{J}_{n}(T)^*\colon\lim_{\longleftarrow} Y_n^*\to\lim_{\longleftarrow} X_n^*$ as follows \begin{equation*} \lim_{\longleftarrow} \mathcal{J}_{n}(T)^*\left(\phi\right)(n):=\mathcal{J}_{n}(T)^*\left(\phi_n\right) \ \textrm{for} \ \phi=(\phi_n)_{n\in\mathbb{N}}\in\lim_{\longleftarrow} Y_n^*. \end{equation*} Finally, we define the functor $\textrm{Inv}\colon\normalfont\textbf{Ban}\to\normalfont\textbf{Ban}$ by \begin{equation*} \textrm{Inv}\colon X\mapsto\lim_{\longleftarrow} X_n^*, \end{equation*} \begin{equation*} \textrm{Inv}\colon T\mapsto\lim_{\longleftarrow} \mathcal{J}_{n}(T)^*. \end{equation*} \begin{proposition} The functor $\normalfont\textrm{Inv}$ is a contravariant functor from $\normalfont\textbf{Ban}$ to $\normalfont\textbf{Ban}$. \end{proposition} \begin{proof} Take any $T\in B(X,Y)$ and $S\in B(Y,Z)$ . For $\phi=(\phi_n)_{n\in\mathbb{N}}\in\displaystyle\lim_{\longleftarrow} Z_n^*$ the following equalities hold \begin{equation*} \begin{split} \lim_{\longleftarrow} \left(\mathcal{J}_{n}(S)\circ \mathcal{J}_{n}(T)\right)^*\left(\phi\right)(n)=\left(\mathcal{J}_{n}(S)\circ \mathcal{J}_{n}(T)\right)^*(\phi_n)= \\ =\left(\mathcal{J}_{n}(T)^*\circ \mathcal{J}_{n}(S)^*\right)(\phi_n)=\mathcal{J}_{n}(T)^*\left(\lim_{\longleftarrow} \mathcal{J}_{n}(S)^*\left(\phi\right)(n)\right)= \\ =\lim_{\longleftarrow} \mathcal{J}_{n}(T)^* \left(\lim_{\longleftarrow} \mathcal{J}_{n}(S)^*\left(\phi\right)(n)\right). \end{split} \end{equation*} Whence we obtain $\textrm{Inv}(S\circ T)=\textrm{Inv}(T)\circ \textrm{Inv}(S)$. It is clear that the mapping $\textrm{Inv}(T)$ is linear. We have to prove that $\textrm{Inv}(T)$ is a bounded operator. For $\phi=(\phi_n)_{n\in\mathbb{N}}\in\displaystyle\lim_{\longleftarrow} Y_n^*$ we have \begin{equation*} \Vert\textrm{Inv}(T)(\phi)(n)\Vert=\Vert \mathcal{J}_{n}(T)^*(\phi_n)\Vert\le\Vert \mathcal{J}_{n}(T)^*\Vert\cdot\Vert \phi_n\Vert\le\Vert T\Vert\cdot\Vert\phi\Vert. \end{equation*} Hence $\Vert\textrm{Inv}(T)\Vert\le\Vert T\Vert$. Finally, for any $\phi=(\phi_n)_{n\in\mathbb{N}}\in\displaystyle\lim_{\longleftarrow} X_n^*$ we have \begin{equation*} \begin{split} \textrm{Inv}(\textrm{id}_X)(\phi)(n)=\lim_{\longleftarrow} \mathcal{J}_{n}(\textrm{id}_X)^*(\phi)(n)=\mathcal{J}_{n}(\textrm{id}_X)^*(\phi_n)= \\ =\textrm{id}_{X^*_n}(\phi_n)=\phi_n=\phi(n)=\textrm{id}_{\textrm{Inv}(X)}(\phi)(n). \end{split} \end{equation*} This shows that the functor $\textrm{Inv}$ preserves an identity mapping. \end{proof} We have the following relation between these two functors. \begin{theorem} The functor $\normalfont\textrm{Inv}$ is adjoint to $\normalfont\textrm{Dir}$ in the following sense: \begin{enumerate}[label={\textup{(\roman*)}}, widest=iii, leftmargin=*] \item $\normalfont\textrm{Dir}(X)^*=\textrm{Inv}(X)$ for any Banach space $X$, \item $\normalfont\textrm{Dir}(T)^*=\textrm{Inv}(T)$ for any $T\in B(X,Y)$. \end{enumerate} \end{theorem} \begin{proof} To prove the first part of this theorem we have to show that \begin{equation*} \left(\lim_{\longrightarrow} X_n\right)^*=\lim_{\longleftarrow} X_n^*, \end{equation*} up to an isometric isomorphism. Take any $\phi\in\displaystyle\left(\lim_{\longrightarrow} X_n\right)^*$. Define $\phi_n:=\phi\circ\kappa_{n,\infty}$, where $\kappa_{n,\infty}\colon X_n\to\displaystyle\lim_{\longrightarrow} X_n$ is the canonical mapping sending each element to the corresponding equivalence class. We claim that $\Psi(\phi):=(\phi_n)_{n\in\mathbb{N}}$ is an element of $\displaystyle\lim_{\longleftarrow} X_n^*$. For any $\gamma\in X_n$ the following equalities hold \begin{equation*} \begin{split} \langle\kappa_{n,n+1}^*(\phi_{n+1}), \gamma\rangle=\langle\phi_{n+1}, \kappa_{n,n+1}(\gamma)\rangle=\langle\phi\circ\kappa_{n+1,\infty}, \kappa_{n,n+1}(\gamma)\rangle= \\ =\langle\phi, \left(\kappa_{n+1,\infty}\circ\kappa_{n,n+1}\right)(\gamma)\rangle=\langle\phi, \kappa_{n,\infty}(\gamma)\rangle=\langle\phi\circ\kappa_{n,\infty}, \gamma\rangle=\langle\phi_n, \gamma\rangle. \end{split} \end{equation*} Hence $\phi_n=\kappa_{n,n+1}^*\left(\phi_{n+1}\right)$ for any $n\in\mathbb{N}$. For arbitrary $n\le m$ we have \begin{equation*} \begin{split} \kappa_{n,m}^*(\phi_m)=\left(\kappa_{m-1,m}\circ\ldots\circ\kappa_{n,n+1}\right)^*(\phi_m)=\left(\kappa_{n,n+1}^*\circ\ldots\circ\kappa_{m-1,m}^*\right)(\phi_m)= \\ =\left(\kappa_{n,n+1}^*\circ\ldots\circ\kappa_{m-2,m-1}^*\right)(\phi_{m-1})=\ldots =\phi_n. \end{split} \end{equation*} Also for any $n\in\mathbb{N}$ the following inequalities are satisfied \begin{equation*} \Vert\phi_n\Vert=\Vert\phi\circ\kappa_{n,\infty}\Vert\le\Vert\phi\Vert\cdot\Vert\kappa_{n,\infty}\Vert\le\Vert\phi\Vert, \end{equation*} so that $\Vert\Psi (\phi)\Vert\le\Vert\phi\Vert$. Whence $\Psi\in\displaystyle\lim_{\longleftarrow} X_n^*$. Now take any $(\psi_n)_{n\in\mathbb{N}}\in\displaystyle\lim_{\longleftarrow} X_n^*$. Define the mapping $\Psi(\psi)$ first on a a dense subset of $\displaystyle\lim_{\longrightarrow} X_n$ by \begin{equation} \Psi(\psi)\left([(x_n,n)]\right):=\psi_n(x_n). \end{equation} We prove that this mapping is well defined. Take $x_n\in X_n$ and $x_m\in X_m$ with $n\le m$ such that $x_n\sim x_m$. Hence $x_m=\kappa_{n,m}(x_n)$ and \begin{equation*} \langle\psi_m, x_m\rangle=\langle\psi_m, \kappa_{n,m}(x_n)\rangle=\langle\kappa_{n,m}^*(\psi_m),x_n\rangle=\langle\psi_n, x_n\rangle. \end{equation*} This shows that the mapping (2) is well defined. We prove that $\Psi(\psi)$ is a bounded linear functional. For any $x_n\in X_n$, $x_m\in X_m$ with $n\le m$ and $\alpha\in\mathbb{C}$ we have \begin{equation*} \Psi(\psi)\left(\alpha\cdot[(x_n,n)]\right)=\Psi(\psi)\left([(\alpha x_n,n)]\right)=\psi_n(\alpha x_n)=\alpha\psi_n(x_n)=\alpha\Psi(\psi)\left([(x_n,n)]\right), \end{equation*} also there exists $k\ge m\ge n$ such that \begin{equation*} \begin{split} \Phi\left([(x_n,n)]+[(x_m,m)]\right)=\Psi(\psi)\left([(\kappa_{n,k}(x_n)+\kappa_{m,k}(x_m),k)]\right)= \\ =\psi_k(\kappa_{n,k}(x_n)+\kappa_{m,k}(x_m))=\left(\psi_k\circ\kappa_{n,k}\right)(x_n)+\left(\psi_k\circ\kappa_{m,k}\right)(x_m)= \\ =\left(\kappa_{n,k}^*(\psi_k)\right)(x_n)+\left(\kappa_{m,k}^*(\psi_k)\right)(x_m)=\psi_n(x_n)+\psi_m(x_m)= \\ =\Psi(\psi)\left([(x_n,n)]\right)+\Psi(\psi)\left([(x_m,m)]\right). \end{split} \end{equation*} Hence $\Psi(\psi)$ is linear. For $x_n\in X_n$ we have \begin{equation*} \Vert\Psi(\psi)\left([(x_n,n)]\right)\Vert=\Vert\psi_n(x_n)\Vert\le\Vert\psi_n\Vert\cdot\Vert x_n\Vert\le\Vert\psi\Vert\cdot\Vert x_n\Vert. \end{equation*} Whence $\Psi(\psi)$ is a bounded linear functional. By continuity we may extend $\Psi(\psi)$ to the entire space $\displaystyle\lim_{\longrightarrow} X_n$. The mappings $\displaystyle\Phi\colon\left(\lim_{\longrightarrow} X_n\right)^*\ni\phi\mapsto\Phi(\phi)\in\lim_{\longleftarrow} X_n^*$ and $\displaystyle\Psi\colon\lim_{\longleftarrow} X_n^*\ni\psi\mapsto\Psi(\psi)\in\left(\lim_{\longrightarrow} X_n\right)^*$ are inverse of each other and are contractions hence the spaces are isometrically isomorphic. For the second claim of the theorem we need to verify that \begin{equation*} \left(\lim_{\longrightarrow} \mathcal{J}_{n}(T)\right)^*=\lim_{\longleftarrow} \mathcal{J}_{n}(T)^*. \end{equation*} Take arbitrary $\phi\in\displaystyle\left(\lim_{\longrightarrow} Y_n\right)^*$ and $x_n\in X_n$. Hence \begin{equation*} \begin{split} \left\langle\left(\lim_{\longrightarrow} \mathcal{J}_{n}(T)\right)^*(\phi), [(x_n,n)]\right\rangle =\langle\phi, \lim_{\longrightarrow} \mathcal{J}_{n}(T)\left([(x_n,n)]\right)\rangle = \\ =\langle\phi, [(\mathcal{J}_{n}(T)(x_n),n)]\rangle =\langle\phi\circ\kappa_{n,\infty}, \mathcal{J}_{n}(T)(x_n)\rangle = \\ =\langle \mathcal{J}_{n}(T)^*(\phi\circ\kappa_{n,\infty}), x_n \rangle =\left\langle\lim_{\longleftarrow} \mathcal{J}_{n}(T)^*(\phi\circ\kappa_{n,\infty}),x_n\right\rangle. \end{split} \end{equation*} It follows from the identification $\displaystyle\left(\lim_{\longrightarrow} Y_n\right)^*=\lim_{\longleftarrow} Y_n^*$ that an element $\phi$ is identified with the sequence $(\phi\circ\kappa_{n,\infty})_{n\in\mathbb{N}}$ and we obtain the required equality. The second part of the theorem is proved. \end{proof} The following fact is an easy consequence of previous theorems. \begin{corollary} Let $X$ be a Banah space. If $X=U\oplus V$ for some closed subspaces $U$, $V$ of $X$ then $\normalfont\textrm{Inv}(X)=\textrm{Inv}(U)\oplus\textrm{Inv}(V)$. \end{corollary} \begin{proof} From Corollary 2.6 we know that $\textrm{Dir}(X)=\textrm{Dir}(U)\oplus\textrm{Dir}(V)$. It follows that $\textrm{Dir}(X)^{*}=\textrm{Dir}(U)^{*}\oplus\textrm{Dir}(V)^{*}$. Applying part (i) of Theorem 2.9 to the last equality finishes the proof. \end{proof} In the next result we shall prove that the action of our functors preserves the spectrum of an operator. \begin{theorem} Let $X$ be a Banach space. For any operator $T\in B(X)$ the following equalities hold \begin{equation*} \normalfont\sigma(T)=\sigma(\textrm{Dir}(T))=\sigma(\textrm{Inv}(T)), \end{equation*} where $\sigma(T)$ denotes the spectrum of $T$. \end{theorem} \begin{proof} Suppose that $0\notin\sigma(T)$ and denote by $T^{-1}\in\mathcal{B}(X)$ an inverse of $T$. From the properties of functor $\textrm{Dir}$ we obtain \begin{equation*} \textrm{Dir}(TT^{-1})=\textrm{Dir}(T)\textrm{Dir}(T^{-1})=\textrm{Dir}(\textrm{id}_X)=\textrm{id}_{\textrm{Dir}(X)}. \end{equation*} It follows that $0\notin\sigma(\textrm{Dir}(T))$ and consequently $\sigma(\textrm{Dir}(T))\subset\sigma(T)$. In order to prove the second inclusion assume that $0\in\sigma(T)$. There are two possibilities: either $0$ belongs to the approximate point spetrum of $T$ or the range $\mathcal{R}(T)$ of $T$ isn't dense in $X$. In the first case there exists a sequence $(x_k)$ of elements of $X$ such that $\Vert x_k\Vert =1$ and $Tx_k\to 0$. Hence \begin{equation*} \textrm{Dir}(T)([(x_k,0)])=[(Tx_k,0)]\to [(0,0)] \end{equation*} and $0$ is in the approximate point spectrum of $\textrm{Dir}(T)$. Suppose that $\overline{\mathcal{R}(T)}\ne X$. By Hahn-Banach theorem there exists a non-zero functional $\phi\in X^{*}$ vanishing on $\mathcal{R}(T)$. Let $\phi_n:=\kappa_{0,n}(\phi)$ so that $\phi_n\in X_n^{*}$. We define the functional $\Phi$ on a dense subset of $\textrm{Dir}(X)$ by \begin{equation*} \Phi([(x_n,n)])=\phi_n(x_n). \end{equation*} By continunity $\Phi$ has a unique extension to $\Phi\in\textrm{Dir}(X)^{*}$. We will prove that $\Phi$ belongs to the kernel $\mathcal{N}(\textrm{Dir}(T)^{*})$ of $\textrm{Dir}(T)^{*}$. For any $x_n\in X_n$ we have \begin{equation} \begin{split} \langle\textrm{Dir}(T)^{*}\circ\Phi , [(x_n,n)] \rangle =\langle\Phi , \textrm{Dir}(T)([(x_n,n)]) \rangle = \\ =\langle\Phi , [(\mathcal{J}_n(T) x_n,n)]\rangle =\langle \phi_n , \mathcal{J}_n(T) x_n \rangle. \end{split} \end{equation} We claim that each $\phi_n$ vanishes on $\mathcal{R}(\mathcal{J}_n(T))$. It sufficies to check if this property holds for $n=1$. For any $x\in X$ we have \begin{equation*} \begin{split} \langle\phi_1, T^{**}(\kappa_{0,1}(x))\rangle =\langle\kappa_{0,1}(\phi), T^{**}(\kappa_{0,1}(x))\rangle = \langle T^{**}(\kappa_{0,1}(x)), \phi \rangle = \\ = \langle\kappa_{0,1}(x), T^{*}\phi \rangle = \langle T^{*}\phi , x \rangle = \langle \phi , Tx \rangle = 0. \end{split} \end{equation*} By Goldstine theorem the canonical image of $X$ is weak-* dense in $X^{**}$. Since $T^{**}$ is weak-* continuous it follows that $\phi_1$ vanishes on $\mathcal{R}(T^{**})$. Consequently, from (3) we deduce that $\mathcal{N}(\textrm{Dir}(T)^{*})$ is non-zero or equivalently $\overline{\mathcal{R}(\textrm{Dir}(T))}\ne\textrm{Dir}(X)$. Hence $\sigma(T)=\sigma(\textrm{Dir}(T))$. The equality $\sigma(\textrm{Dir}(T))=\sigma(\textrm{Inv}(T))$ follows from the fact that $\textrm{Inv}(T)$ is adjoint to $\textrm{Dir}(T)$. \end{proof} Our next result shows that for a Fredholm operator $T\in B(X)$ the action of our functors yields also a Fredholm operator. Moreover, corresponding equalities of indices are satisfied. Let us recall that for a Fredholm operator $T$ its index, denoted by $\textrm{i}(T)$, is the difference between the dimension of kernel of $T$ and the dimension of cokernel of $T$. Then $T^*\in B(X^{*})$ is also Fredholm and $\textrm{i}(T^{*})=-\textrm{i}(T)$. \begin{theorem} Let $X$ be a Banach space possessing an approximation property. If $T\in B(X)$ is a Fredholm operator then $\normalfont\textrm{Dir}(T)$ and $\normalfont\textrm{Inv}(T)$ are also Fredholm operators. What is more, the equalities $\normalfont\textrm{i}(T)=\textrm{i}(\textrm{Dir}(T))=-\textrm{i}(\textrm{Inv}(T))$ are satisfied. \end{theorem} \begin{proof} Let $T\in B(X)$ be a Fredholm operator. By \cite[Lemma 4.39]{A} there exists a closed subspace $V$ and a finite dimensional subspace $W$ such that \begin{equation*} T=0\oplus S\colon\mathcal{N}(T)\oplus V\to W\oplus\mathcal{R}(T), \end{equation*} where $0$ denotes the operator constantly equal $0$ and $S:=T\big|_V$ is an isomorphism. From Theorem 2.6 we obtain that \begin{equation*} \textrm{Dir}(T)=0\oplus\textrm{Dir}(S)\colon\textrm{Dir}(\mathcal{N}(T))\oplus\textrm{Dir}(V)\to\textrm{Dir}(W)\oplus\textrm{Dir}(\mathcal{R}(T)). \end{equation*} From Proposition 2.2 operator $\textrm{Dir}(S)$ is also an isomorphism and we get the equalities $\mathcal{N}(\textrm{Dir}(T))=\textrm{Dir}(\mathcal{N}(T))$ and $\mathcal{R}(\textrm{Dir}(T))=\textrm{Dir}(\mathcal{R}(T))$. Hence from the fact that $\mathcal{N}(T)$ and $W$ are finite dimensional we have that \begin{equation*} \dim\mathcal{N}(\textrm{Dir}(T))=\dim\textrm{Dir}(\mathcal{N}(T))=\dim\mathcal{N}(T)<\infty \end{equation*} and \begin{equation*} \begin{split} \dim\textrm{Dir}(X)\big/\mathcal{R}(\textrm{Dir}(T))=\dim\textrm{Dir}(X)\big/\textrm{Dir}(\mathcal{R}(T))= \\ =\dim\textrm{Dir}(W)=\dim W=\dim X\big/\mathcal{R}(T)<\infty. \end{split} \end{equation*} From above equalities and from part (ii) of Theorem 2.9 we obtain that $\textrm{Dir}(T)$ and $\textrm{Inv}(T)$ are Fredholm operators and $\textrm{i}(T)=\textrm{i}(\textrm{Dir}(T))=-\textrm{i}(\textrm{Inv}(T))$. \end{proof} In the last theorem of this section we are going to prove that our functors preserve compactness of operators. Our method of proof requires that the Banach space $X$ has an \textit{approximation property} so that any compact operator on $X$ is a norm-limit of finite rank operators. \begin{theorem} Let $X$ be a Banach space possessing an approximation property. If $T\in B(X)$ is a compact operator then $\normalfont\textrm{Dir}(T)$ and $\normalfont\textrm{Inv}(T)$ are also compact operators. \end{theorem} \begin{proof} By the assumption $T$ is a limit of finite rank operators. Since $\Vert T\Vert=\Vert\textrm{Dir}(T) \Vert$ it suffices to prove that rank of $\textrm{Dir}(T)$ equals the rank of $T$, even in the case when this rank equals one. Indeed, finite rank operators are sums of rank one operators and $\textrm{Dir}$ preserves addition of operators. In the later case $T$ has the form $Tx=\varphi(x)z$ for $\varphi\in X^{*}$ and $z\in X$. For $\gamma\in X_n$ we have \begin{equation*} \mathcal{J}_{n}(T)(\gamma)=\kappa_{0,n}(\varphi)(\gamma)\kappa_{0,n}(z). \end{equation*} Hence \begin{equation*} \begin{split} \textrm{Dir}(T)(\kappa_{n,\infty}(\gamma))=\kappa_{n,\infty}(\mathcal{J}_{n}(T)(\gamma))=\kappa_{n,\infty}(\kappa_{0,n}(\varphi)(\gamma)\kappa_{0,n}(z))= \\ =\kappa_{0,n}(\varphi)(\gamma)(\kappa_{n,\infty}\circ\kappa_{0,n})(z)=\kappa_{0,n}(\varphi)(\gamma)\kappa_{0,\infty}(z)=\Phi(\gamma)\kappa_{0,\infty}(z), \end{split} \end{equation*} where $\Phi$ is a functional $\displaystyle\lim_{\longleftarrow}\kappa_{0,n}(\varphi)$. By the density of $\bigcup\kappa_{n,\infty}(X_n)$ in $\textrm{Dir}(X)$ it follows that $\textrm{Dir}(T)$ is a rank one operator. From Schauder theorem we deduce that $\textrm{Inv}(T)$ is also a compact operator. \end{proof} We conjecture that the approximation property assumption can be omitted. \begin{hypothesis} For any Banach space $X$ if $T\in B(X)$ is a compact operator then $\normalfont\textrm{Dir}(T)$ is a compact operator. \end{hypothesis} \section{Supports of representing measures} In this section we outline an application of our functors to uniform algebras. Here the functor Dir assigns to a uniform algebra another function algebra by extending Arens product. It should be noted that the inverse system consists of spectra of bidual uniform algebras, we restrict Inv to multiplicative linear functionals, sending the unit element to 1. This "restricted Inv" will act as a functor in the category of compact Hausdorff spaces. Let $K$ be a compact Hausdorff space. By a \textit{uniform algebra} on $K$ we understand a closed unital subalgebra $A$ of $C(K)$ separating the points of $K$. An important example of a uniform algebra is $A(G)$, the algebra of those analytic functions on a strictly pseudoconvex domain $G\subset\mathbb{C}^d$, which have continuous extensions to the Euclidean closure $\overline G$. The \textit{spectrum} of $A$, denoted by $\textrm{Sp}(A)$, is the set of all nonzero multiplicative and linear functionals on $A$. Endowed with the Gelfand (=weak-*) topology, $\textrm{Sp}(A)$ is a compact Hausdorff space, containing a homeomorphic copy of $K$. The natural embedding of $K$ into the spectrum is given by $K\ni x\mapsto\delta_x\in\textrm{Sp}(A)$, where $\delta_x(f)=f(x)$ for $f\in A$. A uniform algebra $A$ on $K$ is called a \textit{natural uniform algebra} if $\textrm{Sp}(A)=K$ in the sense of this embedding. It is known that $A(G)$ is natural on $G$ if the domain $G$ is strictly pseudoconvex (see \cite{HS}). Let $A$ be a Banach algebra. For $\lambda\in A^{*}$, define $a\cdot\lambda$ and $\lambda\cdot a$ by duality \begin{equation*} \langle a\cdot\lambda, b\rangle:=\langle\lambda, ba\rangle, \quad \langle\lambda\cdot a,b\rangle:=\langle\lambda, ab\rangle, \quad a,b\in A. \end{equation*} Now, for $\lambda\in A^{*}$ and $M\in A^{**}$, define $\lambda\cdot M$ and $M\cdot\lambda$ by \begin{equation*} \langle \lambda\cdot M,a\rangle:=\langle M,a\cdot\lambda\rangle, \quad \langle M\cdot\lambda ,a\rangle:=\langle M, \lambda\cdot a\rangle, \quad a\in A. \end{equation*} Finally, for $M,N\in A^{**}$, define \begin{equation*} \langle M\Box N,\lambda\rangle:=\langle M,N\cdot\lambda\rangle, \quad \langle M\Diamond N,\lambda\rangle:=\langle N,\lambda\cdot M\rangle, \quad \lambda\in A^{*}. \end{equation*} The products $\Box$ and $\Diamond$ are called, respectively, the \textit{first} and \textit{second Arens products} on $A^{**}$. A bidual of $A$ is Banach algebra with respect to Arens products. The natural embedding of $A$ into its bidual identifies $A$ as a norm -- closed subalgebra of both $(A^{**},\Box)$ and $(A^{**},\Diamond)$. All $C^{*}$-algebras are \emph{Arens regular} in the sense, that the two products $\Box$ and $\Diamond$ agree on $A^{**}$. Closed subalgebras of Arens regular algebras are Arens regular, hence all uniform algebras are Arens regular. Also the bidual of uniform algebra is again a uniform algebra with respect to the Arens products (cf.\cite{Da} and \cite{D}). There is an equivalence relation on the spectrum of a uniform algebra given by \begin{equation*} \Vert\phi -\psi\Vert <2, \end{equation*} with $\Vert\cdot\Vert$ denoting the norm in $A^*$ for $\phi$, $\psi\in\textrm{Sp}(A)$. The equivalence classes under the above relation are called \textit{Gleason parts}. The space of complex, regular Borel measures on $K$, with total variation norm will be denoted by $M(K)$. As the consequence of Riesz-Markov-Kakutani Representation Theorem we have $M(K)=C(K)^*$. We say that $\mu\in M(K)$ is \textit{representing} \textit{measure} for $\phi\in\textrm{Sp}(A)$ if $\mu $ is probabilistic and \begin{equation*} \phi(f)=\int_Kfd\mu \quad \textrm{for any} \quad f\in A. \end{equation*} The set of all representing measures for $\phi\in\textrm{Sp}(A)$ is denoted by $M_{\phi}(K)$. By \cite[Proposition 8.2]{Co} for any functional $\phi\in\textrm{Sp}(A)$ there exists at least one representing measure. In fact this measure can be chosen with its support contained in the Shilov boundary of $A$. We refer to \cite{Co}, \cite{G} and \cite{S} for further information on uniform algebras. The idea of using second duals in studying the spectrum of $H^\infty(G)$ is based on the observation that this algebra can be seen either as a weak-* closure of a (much easier to study) algebra $A:=A(G)$ in $A^{**}$ or as a quotient algebra of $A^{**}$ by one of its ideals. Moreover, to any Gleason part $\gamma $ of $\textrm{Sp}(A)$ there corresponds an idempotent $g\in A^{**}$ vanishing on $\textrm{Sp}(A)\setminus \gamma$ and equal 1 on $ \gamma$ (also $g=1$ on its w-* closure $\overline{\gamma}$ in $\textrm{Sp}(A^{**})$). But the behaviour of $g$ outside $\overline{\gamma}$ is hard to control. It would be more convenient to have the idempotent related to $\gamma$ in the same algebra $A$ (or in the canonical image of $A$ in $A^{**}$). Unfortunately, this is impossible. But getting close to such a situation is offered by direct limits, where the passage from $n$ to $ n+1$ is in a sense reminiscent to the "Hilbert hotel method". Let $A$ be a natural uniform algebra on $K$. We define $A_0:=A$, $A_{n+1}:=A_n^{**}$ for $n=0,1,2,\ldots $. By $\mathcal{A}$ we denote the direct limit of the direct system $\langle A_n, \kappa_{n,m}\rangle$. Let $f\in A_n$ and $h\in A_m$ for some $n\le m$. The multiplication on $\mathcal{A}$ is given by \begin{equation*} [(f,n)]\cdot [(h,m)]:=[(\kappa_{n,m}(f)\cdot h,m)]. \end{equation*} Since $\langle A_n, \kappa_{n,m}\rangle$ is an isometric direct system of uniform algebras its direct limit is again a uniform algebra. For $\mu\in M(\textrm{Sp}(A))$, where $A$ is a natural uniform algebra on $K$, we define a measure $k(\mu)\in M(\textrm{Sp}(C(K)^{**}))$ by duality \begin{equation*} \langle k(\mu), f\rangle := \langle f, \mu\rangle, \quad f\in C(\textrm{Sp}(C(K)^{**})). \end{equation*} In this definition we use an identification $M(K)^*=C(\textrm{Sp}(C(K)^{**})$. The identification is valid because the second dual of $C(K)$ endowed with Arens product is a commutative $C^*$-algebra. Hence by Gelfand-Najmark theorem it is isometrically isomorphic to $C(\textrm{Sp}(C(K)^{**}))$. Let $\phi\in\textrm{Sp}(A)$. The mapping $k\colon\textrm{Sp}(A)\to\textrm{Sp}(C(K)^{**})$ is given by \begin{equation*} k(\phi)(F)=\int Fdk(\nu ), \end{equation*} where $F\in C(K)^{**}$ and $\nu\in M_{\phi}(K)$. It can be easily shown that the function $k$ does not depend on the choice of the representing measure $\nu$. On the set $\textrm{Sp}(C(K)^{**})$ we introduce an equivalence relation $\simeq$ as follows \begin{equation*} x\simeq y \ \textrm{if and only if} \ f(x)=f(y) \ \textrm{for all} \ f\in A^{**}. \end{equation*} The mapping $\Pi\colon\textrm{Sp}(C(K)^{**})\to\textrm{Sp}(C(K)^{**})\big/_\simeq\subset\textrm{Sp}(A^{**})$ denotes the canonical surjection assigning to each element of $\textrm{Sp}(C(K)^{**})$ its equivalence class. Finally, we define $j\colon\textrm{Sp}(A)\to\textrm{Sp}(A^{**})$ by $j:=\Pi\circ k$. Extending previous definitions to the $n$-th level spaces we obtain functions $j_n\colon\textrm{Sp}(A_n)\to\textrm{Sp}(A_{n+1})$. For $n\le m$ we define $j_{n,m}\colon \textrm{Sp}(A_n)\to \textrm{Sp}(A_m)$ by \begin{equation*} j_{n,m}:=j_{m-1}\circ j_{m-2}\circ\dots\circ j_{n}, \ \textrm{if} \ n<m. \end{equation*} and $j_{n,n}$ is defined to be the identity mapping. For $x\in\textrm{Sp}(A_n)$ denote by $j_{n,\infty}(x)$ embedding into the inverse limit $\displaystyle\lim_{\longleftarrow}\textrm{Sp}(A_n)$, defined by \begin{equation*} j_{n,\infty}(x):=(\kappa_{0,n}^*(x),\ldots,\kappa_{n-1,n}^*(x),x,j_{n,n+1}(x),j_{n,n+2}(x),\ldots). \end{equation*} It should be noted that the inverse limit of the spectrum is limit in the category of compact Hausdorff spaces. In this case part (i) of Theorem 2.9 becomes the equality \begin{equation*} \lim_{\longleftarrow}\textrm{Sp}(A_n)=\textrm{Sp}(\mathcal{A}). \end{equation*} The following assumption will be needed: there exists an open Gleason part $G$ satisfying \begin{equation} j_{0,n}(G)=(\kappa_{0,n}^*)^{-1}(G) \ \textrm{for} \ n=1,2,\ldots,\infty \tag{$\dagger$} \label{eq:special}. \end{equation} It is shown in \cite{KR} that this assumption is satisfied for $A=A(G)$ if $G\subset\mathbb{C}^d$, $d>1$, is a strictly pseudoconvex domain. Let $G\subset\mathbb{C}^d$, be a strictly pseudoconvex domain. By $H^{\infty}(G)$ we denote the Banach algebra of all bounded and analytic functions on $G$. The Corona theorem states that the set $G$ (identified with the set of evaluation functionals) is dense (in Gelfand topology) in the spectrum of $H^{\infty}(G)$ the Banach algebra of all bounded analytic functions on $G$. The following abstract result was a key to the proof of Corona theorem by M. Kosiek and K. Rudol. \begin{theorem}[\cite{KR}] Let $A$ be a natural uniform algebra on $K$. If $G$ is an open Gleason part in $\emph{Sp}(A)$ satisfying \textup{(}$\dagger $\textup{)} then support of every representing measure $\mu_0$ for $A_n$ at any point $x_0$ in $j_{0,n}(G)$ lies in the Gelfand closure of $j_{0,n}(G)$. \end{theorem} Proof of the last theorem is first carried for the direct limit algebra (the $n=\infty$ case) and then "projected" by the natural mappings to the finite level algebras (with $n<\infty$). The detailed exposition of this construction can be found in \cite{KR}. One could ask if for arbitrary uniform algebra the support of any representing measure of a point in $G$ lies in the Gelfand closure of a Gleason part $G$. The answer to this question is no. The work \cite{C} of B. J. Cole assures an existence of natural uniform algebra $A$ on $K$ with a property that each point of $\textrm{Sp}(A)$ is a one-point Gleason part and the Shilov boundary of $A$ is not the entire set $K$. Since any point of $\textrm{Sp}(A)$ has a representing measure with its support contained in the Shilov boundary of $A$ the general result doesn't hold. It is worth noting that the construction of Cole's algebra also relies on the direct limit technique. \begin{thebibliography}{99} \bibitem{A} J.~A.~Abramovich, C.~D.~Aliprantis, {\em An invitation to Operator Theory}, American Mathematical Society, Providence, Rhode Island, 2002. \bibitem{AW} A.~van~Amstel, J.~H.~van~der~Walt, {\em Limits of vector lattices}, arXiv:2207.05459 \bibitem{C} B.~J.~Cole, {\em One-point parts and the peak point conjecture}, Ph.D. dissertation, Yale University, 1968. \bibitem{Co} J.~B.~Conway, {\em The Theory of Subnormal Operators}, vol. 36 of {\em Mathematical Surveys and Monographs}, American Mathematical Society, Providence, RI, USA, 1991. \bibitem{Da} H.~G.~Dales, {\em Banach Algebras and Automatic Continuity}, London Mathematical Society Monographs, vol. 24 (Clarendon Press, Oxford, 2000). \bibitem{D} H.~G.~Dales, F.~K.~Dashiell, Jr., A.~T.-M.~Lau, and D.~Strauss, {\em Banach Spaces of Continuous Functions as Dual Spaces}, CMS Books in Mathematics/Ouvrages de Math\'ematiques de la SMC, Springer-Verlag, New York, 2016. \bibitem{WF} W.~Filter, {\em Inductive limits of Riesz spaces}. In B. Stankovi\'{c}, E. Pap, S. Pilipovi\'{c}, and V.S. Vladimirov, editors, Generalized functions, convergence structures, and their applications (Dubrovnik, 1987), pages 383-392. Plenum Press, New York, 1988. Proceedings of the International Conference held in Dubrovnik, June 23-27, 1987. \bibitem{G} T.~W.~Gamelin, {\em Uniform Algebras}, Prentice Hall, Inc., Englewood Clifs, N.J. 1969. \bibitem{SG} S.~Gwizdek, \emph{Corona theorem for strictly pseudoconvex domains}, Opuscula Math. {\bf 41}, no. 6 (2021), 843-848. \bibitem{HS} M.~Hakim, N.~Sibony, {\em Spectre de $A(\overline\Omega)$ pour les domains borne\'{e}s faiblement pseudoconvexes r\'{e}guliers}, J. Funct. Anal. {\bf 37} (1980), 127-135. \bibitem{KR} M.~Kosiek, K.~Rudol, {\em Corona Theorem}, arXiv:2106.15683 \bibitem{M} T.~J.~Morrison, {\em Functional Analysis: An Introduction to Banach space theory}, John Wiley \& Sons, 2011. \bibitem{Se} Z.~Semadeni, {\em Banach Spaces of Continuous Functions}, Volume I, PWN, Warszawa 1971. \bibitem{S} E.~L.~Stout, {\em The Theory of Uniform Algebras}, Bogden \& Quigley, New York, 1971. \end{thebibliography} \end{document}
2205.14314v1
http://arxiv.org/abs/2205.14314v1
On a singular limit of the Kobayashi--Warren--Carter energy
\documentclass{amsart} \usepackage{amssymb,amsmath,accents} \usepackage{amscd} \usepackage{amsfonts,amsthm,mathrsfs} \usepackage{setspace} \usepackage{graphics} \usepackage[dvips]{graphicx} \usepackage{latexsym} \usepackage{xcolor} \def\qed{\hfill $\Box$} \usepackage{url} \newtheorem{theorem}{Theorem} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{assumption}[theorem]{Assumption} \newtheorem{claim}[theorem]{Claim} \newtheorem{statement}[theorem]{Statement} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{remark}[theorem]{Remark} \newtheorem{remarks}[theorem]{Remarks} \newtheorem{example}{Example}[section] \newcommand{\id}{\,\mathrm{d}} \providecommand{\keywords}{ \small \textbf{\textit{Keywords:}}} \numberwithin{equation}{section} \begin{document} \title{On a singular limit of the Kobayashi--Warren--Carter energy} \author[Y.~Giga]{Yoshikazu Giga} \address[Y.~Giga]{Graduate School of Mathematical Sciences, The University of Tokyo, 3-8-1 Komaba, Meguro-ku, Tokyo 153-8914, Japan} \email{[email protected]} \author[J.~Okamoto]{Jun Okamoto} \address[J.~Okamoto]{Kyoto University Institute for the Advanced Study of Human Biology, Yoshida-Konoe-cho, Sakyo-ku, Kyoto 606-8501, Japan} \email{[email protected]} \author[K.~Sakakibara]{Koya Sakakibara} \address[K.~Sakakibara]{Department of Applied Mathematics, Faculty of Science, Okayama University of Science, 1-1 Ridaicho, Okayama-shi, Okayama 700-0005, Japan; RIKEN iTHEMS, 2-1 Hirosawa, Wako-shi, Saitama 351-0198, Japan} \email{[email protected]} \author[M.~Uesaka]{Masaaki Uesaka} \address[M.~Uesaka]{Arithmer Inc., 1-6-1 Roppongi, Minato-ku, Tokyo 106-6040, Japan; Graduate School of Mathematical Sciences, The University of Tokyo, 3-8-1 Komaba, Meguro-ku, Tokyo 153-8914, Japan} \email{[email protected]} \keywords{Gamma limit, Modica--Mortola functional, Kobayashi--Warren--Carter energy, multi-dimensional domain.} \subjclass[2010]{49J45, 82B26.} \begin{abstract} By introducing a new topology, a representation formula of the Gamma limit of the Kobayashi--Warren--Carter energy is given in a multi-dimensional domain. A key step is to study the Gamma limit of a single-well Modica--Mortola functional. The convergence introduced here is called the sliced graph convergence, which is finer than conventional $L^1$ convergence, and the problem is reduced to a one-dimensional setting by a slicing argument. \end{abstract} \maketitle \tableofcontents \section{Introduction} \label{S1} We consider the Kobayashi--Warren--Carter energy, which is a sum of a weighted total variation and a single-well Modica--Mortola energy. Their explicit forms are \begin{align} E^\varepsilon_\mathrm{KWC}(u,v) &:= \int_\Omega \alpha(v)|Du| + E^\varepsilon_\mathrm{sMM}(v), \label{eq:E_KWC}\\ {E^\varepsilon_\mathrm{sMM}(v)} &:= \frac\varepsilon2 \int_\Omega |\nabla v|^2 {\mathrm{d}\mathcal{L}^N} + \frac{1}{2\varepsilon} \int_\Omega F(v){\mathrm{d}\mathcal{L}^N},\notag \end{align} where $\Omega$ is a bounded domain in $\mathbf{R}^N$ with the Lebesgue measure $\mathcal{L}^N$, $\alpha\ge0$, $\varepsilon>0$ is a small parameter, and $F$ is a single-well potential which takes its minimum at $v=1$. Typical examples of $\alpha$ and $F$ are $\alpha(v)=v^2$ and $F(v)=(v-1)^2$, respectively. These are the original choices in \cite{KWC1,KWC3}. The first term in \eqref{eq:E_KWC} is a weighted total variation with weight $\alpha(v)$. This energy was first introduced by {\cite{KWC1,KWC3}} to model motion of {grain boundaries of polycrystal} which have some structures like the averaged angle of each grain. This energy is quite popular in materials science. We are interested in a singular limit of the Kobayashi--Warren--Carter energy $E^\varepsilon_\mathrm{KWC}$ as $\varepsilon$ tends to zero. If we assume boundedness of $E^\varepsilon_\mathrm{KWC}$ for a sequence $(u,v_\varepsilon)$ for fixed $u$, {then} $v_\varepsilon$ tends to a unique minimum of $F$ as $\varepsilon\to0$ in the $L^2$ sense. However, if $u$ has a jump discontinuity, its convergence is not uniform near such places, even in a one-dimensional setting, suggesting that we have to introduce a finer topology than $L^2$ or $L^1$ topology. In fact, in a one-dimensional setting, the notion of graph convergence of $v^\varepsilon$ to a set-valued function is introduced, and representations of Gamma limits of $E^\varepsilon_\mathrm{KWC}$ and $E^\varepsilon_\mathrm{sMM}$ are given in \cite{GOU}. In this paper, we extend this one-dimensional results to a multi-dimensional setting. For this purpose, we introduce a new concept of convergence called sliced graph convergence. Roughly speaking, it requires graph convergence on each line. Under this convergence in $v_\varepsilon$ and the $L^1$-convergence in $u$, one is able to derive a representation formula for the Gamma limit of $E^\varepsilon_\mathrm{KWC}$ as $\varepsilon\to0$. It is \begin{align*} E^0_\mathrm{KWC}(u,\Xi) &:= \alpha(1) \int_{\Omega\backslash J_u} |Du| + \int_{J_u} \min_{\xi^-\leq\xi\leq\xi^+} \alpha(\xi) \left|u^+ - u^- \right| {\mathrm{d}\mathcal{H}^{N-1}} + {E^0_\mathrm{sMM}(\Xi)}, \\ {E^0_\mathrm{sMM}(\Xi)} &:= 2 \int_\Sigma \left\{ G(\xi^-) + G(\xi^+) \right\} {\mathrm{d}\mathcal{H}^{N-1}}\notag \end{align*} when $v_\varepsilon$ converges to a set-valued function $\Xi$ of form \begin{equation*} \Xi(z) = \begin{cases} \left[\xi^-(z),\xi^+(z)\right],&z\in\Sigma,\\ \{1\},&z\not\in\Sigma, \end{cases} \end{equation*} where $\Sigma$ is a countably $N-1$ rectifiable set, and $\xi^\pm$ are {$\mathcal{H}^{N-1}$-measurable functions with $\xi^- \le 1 \le \xi^+$. Here} $\mathcal{H}^{N-1}$ denotes the $N-1$ dimensional Hausdorff measure. The function $G$ is defined by \[ G(\sigma) := \left| \int^\sigma_1 \sqrt{F(\tau)} {\mathrm{d}\tau} \right|. \] The functions $u^+$ and $u^-$ denote upper and lower approximate limits in the measure-theoretic sense \cite{Fe}. In the case $\alpha(v)=v^2$, we see \[ E^0_\mathrm{KWC}(u,\Xi) = \int_{\Omega\backslash J_u} |Du| + \int_{J_u\cap\Sigma} \left(\xi^-_+ \right)^2 \left|u^+ - u^- \right| {\mathrm{d}\mathcal{H}^{N-1}} + {E^0_\mathrm{sMM}(\Xi)}, \] where $a_+$ denotes the positive part of a function $a$, i.e., $a_+=\max(a,0)$. In \cite{GOU}, the case $\alpha(v)=v^2$ is discussed for a one-dimensional setting. (Unfortunately, $\xi^-_+$ has been misprinted as $\xi^-$ in \cite{GOU}.) When $F(v)=(v-1)^2$, \[ {E^0_\mathrm{sMM}(\Xi)} = \int_\Sigma \left\{ (\xi^- -1)^2 + (\xi^+ -1)^2 \right\} {\mathrm{d}\mathcal{H}^{N-1}}. \] In a one-dimensional setting, the results in \cite{GOU} gave a full characterization of the Gamma limit: the compactness result and the mere convergence result. On the other hand, it is unclear what kind of set-valued functions should be considered {as} the limit of $v_\varepsilon$ in a multi-dimensional setting, assuming $E^\varepsilon_\mathrm{sMM}(v_\varepsilon)$ is bounded. A compactness result is still missing in a multi-dimensional setting. The basic idea is to reduce a multi-dimensional setting to a one-dimensional setting by a slicing argument based on the following disintegration \[ \int_\Omega f(z) {\mathrm{d}\mathcal{L}^N}(z) = \int_{{\pi_\nu(\Omega_\nu)}} \left(\int_{\pi^{-1}_\nu(x)} f\id\mathcal{H}^1\right)\id\mathcal{L}^{N-1}(x), \] where $\pi_\nu$ denotes the projection of $\mathbf{R}^N$ to the {subspace} orthogonal to a unit vector $\nu$, and $\Omega_\nu=\pi_\nu(\Omega)$. This idea is often used to study the singular limit of the Ambrosio--Tortorelli functional \[ \mathcal{E}^\varepsilon (u,v) = \int_\Omega v^2 |\nabla u|^2\id\mathcal{L}^N + \lambda \int_\Omega (u-h)^2\id\mathcal{L}^N + {E^\varepsilon_\mathrm{sMM}(v)}, \quad \lambda \geq 0 \] as in \cite{AT,AT2,FL}, where $h$ is a given $L^2$ function and $F(v)=(v-1)^2$. This problem can be handled in $L^1$ topology, and its limit is known to be the Mumford--Shah functional \[ \mathcal{E}^0 (u,K) = \int_{\Omega\backslash K} |\nabla u|^2\id\mathcal{L}^N + \mathcal{H}^{N-1}(K) + \lambda \int_\Omega (u-h)^2\id\mathcal{L}^N, \] where $K$ is a countably $N-1$ rectifiable set. In this case, in our language, it suffices to consider the case $\xi^-=0$, $\xi^+=1$ on $\Sigma=K$ so that \[ {\mathcal{E}^0_\mathrm{sMM}(\Xi) }= \mathcal{H}^{N-1}(K). \] In our case, however, as observed in the one-dimensional problem \cite{GOU}, it is reasonable to study non-constant $\xi^\pm$. Moreover, the fidelity term including $\lambda$ is also allowed in our case. Our first main result is the Gamma-convergence of \[ {E^\varepsilon_\mathrm{sMM} (v)} + \int_J \alpha(v) j(y) {\mathrm{d}\mathcal{H}^{N-1}}(y) \] for a given countably rectifiable set $J$, where $j$ is an $ \mathcal{H}^{N-1}${-}integrable function on $J$. This energy is a special case of $E^\varepsilon_\mathrm{KWC} (u,v)$ when $u$ has a jump in $J$ while it is constant outside $J$. To show liminf inequality, we decompose $\Sigma$ into a disjoint union of compact sets {$\{K_i\}_i$} lying in almost flat hypersurfaces. Then we reduce the problem in a one-dimensional setting like \cite{FL}. To show limsup inequality, we approximate $\xi^\pm$ so that they are constants in each $K_i$. This approximation procedure is quite involved because one should approximate not only energies but also approximate in the sliced graph topology. The basic choice of recovery sequences is similar to \cite{AT,FL}. This paper's main result is the Gamma-convergence of the Kobayashi--Warren--Carter energy. The additional difficulty comes from the $\int\alpha(v)|Du|$ part, and this part can be carried out by decomposing the domain of integration into two parts: place close to $\Sigma$ of the limit $\Xi$ of $v_\varepsilon$, and outside such place. The most difficult problem is how to choose a suitable topology for $v_\varepsilon$ to $\Xi$. We take a slice, a straight line passing through $x$ with direction $\nu$ for $\mathcal{L}^{N-1}$-almost every $x\in\pi_\nu(\Omega)$ for some directions $\nu$. We need several concepts of set-valued functions to formulate the topology, including measurability, as discussed in \cite{AF}. The compactness is missing for the convergence of $E^\varepsilon_\mathrm{KWC}$ to $E^0_\mathrm{KWC}$. Therefore, we do not know whether a minimizer of $E^0_\mathrm{KWC}$ exists under suitable boundary conditions or a minimizer of energy like $E^0_\mathrm{KWC}+\lambda\int_\Omega(u-h)^2 d\mathcal{L}^N$ exists. If one minimizes $E^0_\mathrm{KWC}$ in the $\Xi$ variable, i.e., \[ TV_\mathrm{KWC}(u) := \inf_{\Xi\in\mathcal{A}_0} E^0_\mathrm{KWC}(u,\Xi), \] this can be calculated as \[ TV_\mathrm{KWC}(u) = \int_\Sigma \sigma \left( |u^+ - u^-| \right) {\mathrm{d}\mathcal{H}^{N-1}} + \int_{\Omega\backslash J_u}|Du| \] with \begin{align*} \sigma(r) & := \min_{\xi^-,\xi^+} \left\{ r\min_{\xi^-\leq\xi\leq\xi^+} \alpha(\xi) + 2 \left( G(\xi^-)+G(\xi^+) \right) \right\} \\ & = \min_{\xi^-} \left\{ r\min_{\xi^-\leq\xi\leq1} \alpha(\xi) + 2G(\xi^-) \right\},\ r \geq 0 \end{align*} if $\alpha(v)\geq\alpha(1)$ for $v\geq1$. This $\sigma$ is always concave. If $F(v)=(v-1)^2$, then \[ \sigma(r) = \min_{\xi^-} \left\{ r(\xi^-_+)^2 + (\xi^- - 1)^2 \right\} = \frac{r}{r+1}. \] In other words, \[ TV_\mathrm{KWC}(u) = \int_\Sigma \frac{|u^+ - u^-|}{1+|u^+ - u^-|}{\mathrm{d}\mathcal{H}^{N-1}} + \int_{\Omega\backslash J_u}|Du|. \] This functional is a kind of total variation but has different aspects. For example, if $u$ is a piecewise constant monotone increasing function in a one-dimensional setting, the total variation $TV(u)=\int_\Omega|Du|$ equals $\sup u-\inf u$. This case is often called a staircase problem since $TV$ does not care about the number and size of jumps for monotone functions. In contrast to $TV$, the $TV_\mathrm{KWC}$ costs less if the number of jumps is smaller, provided that each jump is the same size and $\sup u-\inf u$ is the same. The energy like $TV_\mathrm{KWC}$ for a piecewise constant function is derived as the surface tension of grain boundaries in polycrystals \cite{LL}, which is an active area, as studied by \cite{GaSp}. The Modica--Mortola functional is the sum of Dirichlet energy and potential energy. The Gamma limit problem was first studied in \cite{MM1}. Since then, there has been much literature studying the Gamma-convergence problems. If $F$ is a double-well potential, say $F(v)=(v^2-1)^2$, then the Modica--Mortola functional reads \[ E^\varepsilon_\mathrm{dMM}(v) = \frac{\varepsilon}{2} \int_\Omega |\nabla v|^2 {\mathrm{d}\mathcal{L}^N} + \frac{1}{2\varepsilon} \int_\Omega (v^2-1)^2 {\mathrm{d}\mathcal{L}^N}. \] If $E^\varepsilon_\mathrm{dMM}(v_\varepsilon)$ is bounded, $v_\varepsilon(z)$ converges to either $1$ or $-1$ for $\mathcal{L}^N$-almost all $z\in\Omega$ by taking a subsequence. The interface between two states, $\{\lim v_\varepsilon=1\}$ {and} $\{\lim v_\varepsilon=-1\}$, is called a transition interface. In a one-dimensional setting, its Gamma limit is considered in $L^1$ topology and is characterized by the number of transition points \cite{MM2}. This result is extended to a multi-dimensional setting in \cite{M,St}, and the Gamma limit is a constant multiple of the surface area of the transition interface. However, the topology of convergence of $v_\varepsilon$ is either {in} $L^1$ {topology or in measure} (including almost everywhere convergence). If we consider its Gamma limit in the sliced graph convergence, we expect that the limit equals \[ E^0_\mathrm{dMM}(\Xi) = 2 \int_\Sigma \left\{G_-(\xi^-)+G_+(\xi^+) \right\} {\mathrm{d}\mathcal{H}^{N-1}} + G_-(1)\mathcal{H}^{N-1} (\Sigma) \] for \begin{equation*} \Xi(z) := \left \{ \begin{array}{ll} \left[ \xi^-(z), \xi^+(z) \right], &{\text{for}}\ z \in \Sigma, \\ \text{either}\ 1\ \text{or}\ -1, &{\text{otherwise}}, \end{array} \right. \end{equation*} where $[-1,1]\subset[\xi^-,\xi^+]$. Here, $G_\pm$ is defined as \[ G_\pm(\sigma) = \left| \int^\sigma_{\pm 1} \sqrt{F(\tau)}{\mathrm{d}\tau} \right|. \] The first term in $E_{\mathrm{dMM}}^0(\Xi)$ is invisible in $L^1$ convergence, while the second term is the Gamma limit of $E_\mathrm{dMM}$ in the $L^1$ sense. We do not give proof in this paper. If compactness is available, the Gamma-convergence yields the convergence of a local minimizer and the global minimizer. For $L^1$ convergence, based on this strategy, the convergence of a local minimizer has been established in \cite{KS} when the limit is a strict local minimizer. The convergence of critical points is outside the framework of a general theory and should be discussed separately as in \cite{HT}. In recent years, the Gamma limit of the double-well Modica--Mortola function with spatial inhomogeneity has been studied from a homogenization point of view (see, e.g.\ \cite{CFHP1}, \cite{CFHP2}) but still under $L^1$ or convergence in measure. The Mumford--Shah functional $\mathcal{E}^0$ is difficult to handle because one of the variables is a set $K$. This is the motivation for introducing $\mathcal{E}^\varepsilon$, called the Ambrosio--Tortorelli functional, to approximate $\mathcal{E}^0$ in \cite{AT}. The Gamma limit of $\mathcal{E}^\varepsilon$ is by now well studied \cite{AT,AT2}, and with weights \cite{FL}. The convergence of critical parts is studied in \cite{FLS} in a one-dimensional setting; the higher-dimensional case was studied quite recently by \cite{BMR} by adjusting the idea of \cite{LSt}. The Ambrosio--Tortorelli approximation is now used in various problems, including the decomposition of brittle fractures \cite{FMa} and the Steiner problem \cite{LS,BLM}. However, in all these works, the energy for $u$ is a $v$-weighted Dirichlet energy, not $v$-weighted total variation energy. A singular limit of the gradient flow of the double-well Modica--Mortola flow is well studied. The sharp interface limit, i.e., $\varepsilon\to0$ yields the mean curvature flow of an interface. For an early stage of development, see \cite{BL,XC,MSch}, on convergence to a smooth mean curvature flow and \cite{ESS} on convergence to a level-set mean curvature flow \cite{G}. For more recent studies, see, for example, \cite{AHM,To}. The gradient flow of the Kobayashi--Warren--Carter energy $E^\varepsilon_\mathrm{KWC}$ is proposed in \cite{KWC1} (see also \cite{KWC2,KWC3}) to model grain boundary motion when each grain has some structure. Its explicit form is \begin{align*} \tau_1 v_t &= s \Delta v + (1-v) - 2sv |\nabla v|, \\ \tau_0 v^2 u_t &= s \operatorname{div} \left( v^2\frac{\nabla u}{|\nabla u|}\right), \end{align*} where $\tau_0$, $\tau_1$, and $s$ are positive parameters. This system is regarded as the gradient flow of $E^\varepsilon_\mathrm{KWC}$ with $F(v)=(v-1)^2$, $\varepsilon=1$, {and} $\alpha(v)=v^2$. Because of the presence of the singular term $\nabla u/|\nabla u|$, the meaning of the solution itself is non-trivial since, even if $v\equiv1$, the flow is the total variation flow, and a non-local quantity determines the speed \cite{KG}. At this moment, the well-posedness of its initial-value problem is an open question. If the second equation is replaced by \[ \tau_0 (v^2+{\delta}) u_t = s \operatorname{div} \left( (v^2+\delta') \nabla u/|\nabla u| + \mu\nabla u \right) \] with $\delta>0$, $\delta'\geq0$ and $\mu\geq0$ satisfying $\delta'+\mu>0$, the existence and large-time behavior of solutions are established in \cite{IKY,MoSh,MoShW1,SWat,SWY,WSh} under several homogeneous boundary conditions. However, its uniqueness is only proved in a one-dimensional setting under $\mu>0$ \cite[Theorem 2.2]{IKY}. These results can be extended to the cases of non-homogeneous boundary conditions. Under non-homogeneous Dirichlet boundary conditions, we are able to find various structural patterns of steady states; see \cite{MoShW2}. The singular limit of the gradient flow of $E^\varepsilon_\mathrm{KWC}$ is not known even if $\alpha(v)=v^2+\delta'$, $\delta'>0$. In \cite{ELM}, a gradient flow of \[ E(u,\Sigma) = \int_\Sigma \sigma \left(\left|u^+-u^-\right|\right){\mathrm{d}\mathcal{H}^{N-1}}, \quad N=2 \] is studied. Here $u$ is a piecewise constant function outside a union $\Sigma$ of smooth curves, including triple junction, and $\sigma$ is a given non-negative function. Our $TV_\mathrm{KWC}$ is a typical example. They take variation of $E$ not only $u$ but also of $\Sigma$ and derive a weighted curvature flow with evolutions of boundary values of $u$ together with motion of triple junction. It is not clear that the singular limit of the gradient flow of $E^\varepsilon_\mathrm{KWC}$ gives this flow since, in the total variation flow, the variation is taken only in the direction of $u$ and does not include domain variation, which is the source of the mean curvature flow. This paper is organized as follows. In Section \ref{SSGC}, we introduce the notion of sliced graph convergence. In Section \ref{SLSC}, we discuss the liminf inequality of the singular limit of $E^\varepsilon_\mathrm{sMM}$ with an additional term under the sliced graph convergence. In Section \ref{SCRS}, we discuss the limsup inequality by constructing recovery sequences. In Section \ref{SLKWC}, we discuss the singular limit of $E^\varepsilon_\mathrm{KWC}$. {The results of this paper are based on the thesis \cite{O} of the second author.} \section{Sliced graph convergence} \label{SSGC} In this section, we introduce the notion of sliced graph convergence. We first recall a few basic notions of a set-valued function, especially on the measurability. Consequently, we review the notion of the slicing argument and introduce the concept of sliced graph convergence. \subsection{A set-valued function and its measurability} We first recall a few basic notions of a set-valued function; see \cite{AF}. Let $M$ be a Borel set in $\mathbf{R}^d$ and $\Gamma$ be a set-valued function on $M$ with values in $2^{\mathbf{R}^m}\backslash\{\emptyset\}$ such that $\Gamma(z)$ is closed in $\mathbf{R}^m$ for all $z\in M$. We say that such $\Gamma$ is a closed set-valued function. We say that $\Gamma$ is \emph{Borel measurable} if $\Gamma^{-1}(U)$ is a Borel set whenever $U$ is an open set in $\mathbf{R}^m$. Here, the inverse $\Gamma^{-1}(U)$ is defined as \[ \Gamma^{-1}(U) := \left\{ z \in M \bigm| \Gamma(z) \cap U \neq \emptyset \right\}. \] Similarly, we say that $\Gamma$ is \emph{Lebesgue measurable} if $\Gamma^{-1}(U)$ is Lebesgue measurable whenever $U$ is an open set. Assume that $M$ is closed. We say that $\Gamma$ is \emph{upper semicontinuous} if $\operatorname{graph}\Gamma$ is closed in $M\times\mathbf{R}^m$, where \[ \operatorname{graph}\Gamma := \left\{ z=(x,y) \in M \times \mathbf{R}^m \bigm| y \in \Gamma({x}),\ x \in M \right\}. \] If $\Gamma$ is upper semicontinuous, $\Gamma$ is Borel measurable \cite{AF}. Assume that $M$ is compact. Then, $\operatorname{graph}\Gamma$ is compact if it is closed. We set \[ \mathcal{C} = \left\{ \Gamma \mid \operatorname{graph}\Gamma\ \text{is compact in}\ M\times \mathbf{R}^m\ \text{and}\ \Gamma(x)\neq\emptyset \ \text{for}\ {x} \in M\right\}. \] For $\Gamma_1, \Gamma_2 \in \mathcal{C}$, we set \[ d_g(\Gamma_1, \Gamma_2) := d_H(\operatorname{graph}\Gamma_1, \operatorname{graph}\Gamma_2), \] where $d_H$ denotes the Hausdorff distance of two sets in $M\times\mathbf{R}^m$, defined by \[ d_H(A,B) := \max \left\{ \sup_{x \in A} \operatorname{dist}(z,B), \sup_{w \in B} \operatorname{dist}(w,A) \right\} \] for $A,B \subset M\times\mathbf{R}^m$, and \[ \operatorname{dist}(z,B) := \inf_{w \in B} \operatorname{dist}(z,w),\quad \operatorname{dist}(z,w) = |z-w|, \] where $|\cdot|$ denotes the Euclidean norm in $\mathbf{R}^d\times\mathbf{R}^m$. We recall a fundamental property of a Borel measurable set-valued function \cite[Theorem 8.1.4]{AF}. \begin{theorem} \label{MBA} Let $\Gamma$ be a closed set-valued function on a Borel set $M$ in $\mathbf{R}^d$ with values in $2^{\mathbf{R}^m}\backslash\{\emptyset\}$. The following three statements are equivalent: \begin{enumerate} \item[(i)] $\Gamma$ is Borel (resp.\ Lebesgue) measurable. \item[(i\hspace{-1pt}i)] $\operatorname{graph}\Gamma$ is a Borel set ($\mathbf{M}\otimes\mathbf{B}$ measurable set) in $M\times\mathbf{R}^m$. \item[(i\hspace{-1pt}i\hspace{-1pt}i)] There is a sequence of Borel (Lebesgue) measurable functions $\{f_j\}^\infty_{j=1}$ such that \[ \Gamma(z) = \overline{\left\{f_j(z)\bigm| j=1,2,\ldots\right\}}. \] \end{enumerate} Here $\mathbf{M}$ denotes the $\sigma$-algebra of Lebesgue measurable sets in $M$ and $\mathbf{B}$ denotes the $\sigma$-algebra of Borel sets in $\mathbf{R}^m$. \end{theorem} \subsection{The definition of the sliced graph convergence} We next recall the notation often used in the slicing argument \cite{FL}. Let $S$ be a set in $\mathbf{R}^N$. Let $S^{N-1}$ denote the unit sphere in $\mathbf{R}^N$ centered at the origin, i.e., \[ S^{N-1} = \left\{ \nu \in \mathbf{R}^N \bigm| |\nu| = 1 \right\}. \] For a given $\nu$, let $\Pi_\nu$ denote the hyperplane whose normal equals $\nu$. In other words, \[ \Pi_\nu := \left\{ x \in \mathbf{R}^N \bigm| \langle x,\nu \rangle = 0 \right\}, \] where $\langle \ ,\ \rangle$ denotes the standard inner product in $\mathbf{R}^N$. For $x\in\Pi_\nu$, let $S_{x,\nu}$ denote the intersection of {$S$} and the whole line with direction $\nu$, which contains $x$; that is, \[ S_{x,\nu} := \left\{ x + t \nu \bigm| t \in S^1_{x,\nu} \right\}, \] where \[ S^1_{x,\nu} := \left\{ t \in \mathbf{R} \bigm| x + t\nu \in S \right\} \subset \mathbf{R}. \] We also set \[ S_\nu := \left\{ x \in \Pi_\nu \bigm| S_{x,\nu} \neq \emptyset \right\}. \] See Figure \ref{FSC}. \begin{figure}[htb] \centering \includegraphics[width=5cm]{GOSUfigure_1.png} \caption{Slicing} \label{FSC} \end{figure} For a given function $f$ on $S$, we associate it with a function $f_{x,\nu}$ on $S^1_{x,\nu}$ defined by \[ f_{x,\nu}(t) := f(x + t \nu). \] Let $\Omega$ be a bounded domain in $\mathbf{R}^N$, and $\mathcal{T}$ denote the set of all Lebesgue measurable (closed) set-valued function $\Gamma:\Omega\to2^\mathbf{R}$. For $\nu \in S^{N-1}$, we consider $\Omega^1_{x,\nu}\subset\mathbf{R}$ and the (sliced) set-valued function $\Gamma_{x,\nu}$ on $\Omega^1_{x,\nu}$ defined by $\Gamma_{x,\nu}(t)=\Gamma(x+t\nu)$. Let $\overline{\Gamma_{x,\nu}}$ denote its closure defined on the closure of $\overline{\Omega^1_{x,\nu}}$. Namely, it is uniquely determined so that the graph of $\overline{\Gamma_{x,\nu}}$ equals the closure of $\operatorname{graph}\Gamma_{x,\nu}$ in $\mathbf{R}\times\mathbf{R}$. As with usual measurable functions, $\Gamma^{(1)}$ and $\Gamma^{(2)}$ belonging to $\mathcal{T}$ are identified if $\Gamma^{(1)}(z)=\Gamma^{(2)}(z)$ for $\mathcal{L}^N$-a.e.\ $z\in\Omega$. By Fubini's theorem, $\Gamma^{(1)}_{x,\nu}(t)=\Gamma^{(2)}_{x,\nu}(t)$ for $\mathcal{L}^1$-a.e.\ $t$ for $\mathcal{L}^{N-1}$-a.e.\ $x\in\Omega_\nu$. With this identification, we consider its equivalence class, and we call each $\Gamma^{(1)}$, $\Gamma^{(2)}$ a representative of this equivalence class. For $\nu\in S^{N-1}$, we define the subset $\mathcal{B}_\nu \subset \mathcal{T}$ as follows: $\Gamma \in \mathcal{B}_\nu$ if, for a.e.\ $x\in\Omega_\nu$, \begin{itemize} \item There is a representative of $\Gamma_{x,\nu}$ such that $\overline{\Gamma_{x,\nu}} = \Gamma_{x,\nu}$ on $\Omega^1_{x,\nu}$; \item $\operatorname{graph}\overline{\Gamma_{x,\nu}}$ is compact in $\overline{\Omega^1_{x,\nu}}\times\mathbf{R}$. \end{itemize} We note that if $\Gamma^{(1)},\Gamma^{(2)}\in\mathcal{B}_\nu$, then $\overline{\Gamma^{(1)}_{x,\nu}},\overline{\Gamma^{(2)}_{x,\nu}}\in\mathcal{C}$ with $M=\overline{\Omega^1_{x,\nu}}$ by a suitable choice of representative of $\Gamma^{(1)}_{x,\nu}, \Gamma^{(2)}_{x,\nu}$, which follows from the definition. In this situation, we have the following fact: \begin{lemma} The function \[ f(x) = d_g \left( \overline{\Gamma^{(1)}_{x,\nu}},\overline{\Gamma^{(2)}_{x,\nu}} \right) = d_H \left( \operatorname{graph}\Gamma^{(1)}_{x,\nu},\operatorname{graph}\Gamma^{(2)}_{x,\nu} \right) \] is Lebesgue measurable in $\Omega_\nu$. \end{lemma}\label{lemma:distance} \begin{proof} Since each Lebesgue measurable function $f$ has a Borel measurable function $\overline{f}$ with $f(z)=\overline{f}(z)$ for $\mathcal{L}^N$-a.e.\ $z\in\Omega$, by Theorem~\ref{MBA}~(i\hspace{-1pt}i\hspace{-1pt}i), there is a Borel measurable representative of $\Gamma$. By Theorem~\ref{MBA}~(i\hspace{-1pt}i), $\operatorname{graph}\Gamma$ is a Borel set for the Borel representative of $\Gamma$. Since the graph of the set-valued function $T:x\longmapsto\operatorname{graph}\overline{\Gamma_{x,\nu}}$ on $\Omega_\nu$ equals $\operatorname{graph}\Gamma$ for $\Gamma\in\mathcal{B}_\nu$ by taking a suitable representative of $\Gamma$, we see that $T$ should be Borel measurable if $\Gamma$ is Borel measurable by Theorem~\ref{MBA}~(i\hspace{-1pt}i). (Note that $T(x)$ is a compact set in $\mathbf{R}\times\mathbf{R}$.) Since $d_H$ is continuous, the map $f(x)$ should be measurable. \end{proof} We now introduce a metric on $\mathcal{B}_\nu$ of form \[ d_\nu \left( \Gamma^{(1)},\Gamma^{(2)} \right) := \int_{\Omega_\nu} \frac{d_g \left( \overline{\Gamma^{(1)}_{x,\nu}},\overline{\Gamma^{(2)}_{x,\nu}} \right)}{1+d_g \left( \overline{\Gamma^{(1)}_{x,\nu}},\overline{\Gamma^{(2)}_{x,\nu}} \right)} \id\mathcal{L}^{N-1}(x) \] for $ \Gamma^1,\Gamma^2\in\mathcal{B}_\nu$, where $\mathcal{L}^{N-1}$ denotes the Lebesgue measure on $\Pi_\nu$. From Lemma~\ref{lemma:distance}, we see that this is a well-defined quantity for all $ \Gamma^{(1)},\Gamma^{(2)}\in\mathcal{B}_\nu$. We identify $\Gamma^{(1)},\Gamma^{(2)}\in\mathcal{B}_\nu$ if $\Gamma^{(1)}_{x,\nu}=\Gamma^{(2)}_{x,\nu}$ for a.e.\ $x$. With this identification, $(\mathcal{B}_\nu,d_\nu)$ is indeed a metric space. By a standard argument, we see that $(\mathcal{B}_\nu,d_\nu)$ is a complete metric space; we do not give proof since we do not use this fact. Let $D$ be a countable dense set in $S^{N-1}$. We set \[ \mathcal{B}_D := \bigcap_{\nu\in D}{\mathcal{B}_\nu}. \] It is a metric space with metric \[ d_D \left( \Gamma^{(1)},\Gamma^{(2)} \right) := \sum^\infty_{j=1} \frac{1}{2^j} \frac{d_{\nu_j} \left( \Gamma^{(1)},\Gamma^{(2)} \right)}{1+d_{\nu_j} \left( \Gamma^{(1)},\Gamma^{(2)} \right)}, \] where $D=\{\nu_j\}^\infty_{j=1}$. (This is also a complete metric space.) We shall fix $D$. The convergence with respect to $d_D$ is called the \emph{sliced graph convergence}. If $\{\Gamma_k\}\subset\mathcal{B}_D$ converges to $\Gamma\in\mathcal{B}_D$ with respect to $d_D$, we write $\Gamma_k\xrightarrow{sg}\Gamma$ (as $k\to\infty$). Roughly speaking, $\Gamma_k\xrightarrow{sg}\Gamma$ if the graph of the slice $\Gamma_k$ converges to that of $\Gamma$ for a.e. $x \in \Omega_\nu$ for any $\nu \in D$. For a function $v$ on $\Omega$, we associate a set-valued function $\Gamma_v$ by $\Gamma_v(x)=\left\{v(x)\right\}$. If $\Gamma_k=\Gamma_{v_k}$ for some $v_k$, we shortly write $v_k\xrightarrow{sg}\Gamma$ instead of $\Gamma_{v_k}\xrightarrow{sg}\Gamma$. We note that if $v\in H^1(\Omega)$, the $L^2$-Sobolev space of order $1$, then $\Gamma_v\in\mathcal{B}_D$ for any $D$. We conclude this subsection by showing that the notions of the graph convergence and the sliced graph convergence are unrelated for $N\geq2$. First, we give an example that the graph convergence does not imply the sliced graph convergence. Let $C(r)$ denote the circle of radius $r>0$ centered at the origin in $\mathbf{R}^2$. It is clear that $d_H\left(C(r),C(r-\varepsilon)\right)\to0$ as $\varepsilon>0$ tends to zero. However, for $\nu=(1,0)$, $C(r-\varepsilon)_{x,\nu}$ with $x=(0,\pm r)$ is empty and does not converge to a single point $C(r)_{x,\nu}=\left\{(0,\pm r)\right\}$. In this case, $C(r-\varepsilon)_{x,\nu}$ converges to $C(r)_{x,\nu}$ in the Hausdorff sense except the case $x=(0,\pm r)$. To make the exceptional set has a positive $\mathcal{L}^1$ measure in $\Pi_\nu$, we recall a thick Cantor set defined by \begin{align*} G &:= [0,1] \backslash U \\ U &:= \bigcup \left\{\left( \frac{a}{2^n} - \frac{1}{2^{2n+1}}, \frac{a}{2^n} + \frac{1}{2^{2n+1}} \right) \biggm| n, a = 1,2,\ldots \right\}. \end{align*} This $G$ is a compact set with a positive $\mathcal{L}^1$ measure. We set \[ K := \bigcup_{r \in G} C(r), \quad K_\varepsilon := \bigcup_{r \in G} C(r-\varepsilon). \] $K_\varepsilon$ converges to $K$ as $\varepsilon\to0$ in the Hausdorff distance sense. However, for any $\nu\in S^2$, the slice $(K_\varepsilon)_{x,\nu}$ does not converge to {$K_{x,\nu}$} for $x\in\Pi_\nu$ with $|x|\in G$. It is easy to construct an example that the graph convergence does not imply the sliced graph convergence based on this set. Let $\Omega$ be an open unit disk centered at the origin. We set \begin{center} \begin{minipage}[c][24pt][b]{0.35\textwidth} \begin{eqnarray*} \Gamma_\varepsilon(x) := \left\{ \begin{array}{cl} [0,1], & z \in K_\varepsilon \\ \{ 1 \}, & z \in \Omega\backslash K_\varepsilon \end{array} \right., \end{eqnarray*} \end{minipage} \begin{minipage}[c][24pt][b]{0.35\textwidth} \begin{eqnarray*} \Gamma(x) := \left\{ \begin{array}{cl} [0,1], & z \in K \\ \{ 1 \}, & z \in \Omega\backslash K \end{array} \right.. \end{eqnarray*} \end{minipage} \end{center} The graph convergence of $\Gamma_\varepsilon$ to $\Gamma$ is equivalent to the Hausdorff convergence of $K_\varepsilon$ to $K$. The sliced graph convergence is equivalent to saying $(K_\varepsilon)_{x,\nu}\to K_{x,\nu}$ for $\nu\in D$ and a.e.\ $x$, where $D$ is some dense set in $S^1$. However, from the construction of $K_\varepsilon$ and $K$, we observe that for any $\nu\in S^1$, the slice $K_{x,\nu}$ does not converge to $K$ for $x$ with $|x|\in G$, which has a positive $\mathcal{L}^1$ measure on $\Pi_\nu$. Thus, we see that $\Gamma_\varepsilon$ does not converge to $\Gamma$ in the sense of the sliced graph convergence while $\Gamma_\varepsilon$ converges to $\Gamma$ in the sense of graph convergence. The sliced graph convergence does not imply the graph convergence even if the graph convergence is interpreted in the sense of essential distance. For any $\mathcal{H}^N$-measurable set $A$ in $\mathbf{R}^{N+1}$ and a point $p\in\mathbf{R}^{N+1}$, we set the essential distance from $p$ to $A$ as \[ d_e(p,A) := \inf \left\{ r>0 \bigm| \mathcal{H}^N \left( B_r(p)\cap A \right) > 0 \right\}, \] where $B_r(p)$ is a closed ball of radius $r$ centered at $p$. We set \[ N_\delta(A) := \left\{ q\in\mathbf{R}^{N+1} \bigm| d_e (q,A) < \delta \right\}, \] and the essential Hausdorff distance is defined as \[ d_{eH}(A,B) := \inf \left\{ \delta>0 \bigm| A \subset N_\delta(B),\ B \subset N_\delta(A) \right\}. \] Let $\Omega$ be a domain in $\mathbf{R}^N$ ($N\geq 2$) containing $B_1(0)$ and set \[ \Gamma^\varepsilon(z) = \left\{ \left( 1-|z|/\varepsilon \right)_+ \right\}, \quad \Gamma^0(z) = \{0\} \] for $z\in\Omega$ and $\varepsilon>0$. Clearly, for any $\nu\in S^{N-1}$, $x\in\Omega_\nu$ with $x\neq0$, \[ d_H \left( \operatorname{graph} \Gamma^\varepsilon_{x,\nu}, \operatorname{graph} \Gamma^0_{x,\nu} \right) \to 0 \] holds as $\varepsilon\to0$, However, \[ d_{eH} \left( \operatorname{graph} \Gamma^\varepsilon, \operatorname{graph} \Gamma^0 \right) = 1; \] in particular, $\Gamma^\varepsilon$ does not converge to $\Gamma^0$ in the $d_{eH}$ convergence of the graphs. \section{Lower semicontinuity} \label{SLSC} We now introduce a single-well Modica--Mortola function $E^\varepsilon_\mathrm{sMM}$ on $H^1(\Omega)$ when $\Omega$ is a bounded domain in $\mathbf{R}^N$. For $v\in H^1(\Omega)$, we set an integral \[ E^\varepsilon_\mathrm{sMM} (v) := \frac{\varepsilon}{2} \int_\Omega |\nabla v|^2 \id\mathcal{L}^N + \frac{1}{2\varepsilon} \int_\Omega F(v) \id\mathcal{L}^N, \] where $\mathcal{L}^N$ denotes the $N$-dimensional Lebesgue measure. Here, the potential energy $F$ is a single-well potential. We shall assume that \begin{enumerate} \item[(F1)] $F\in C^1(\mathbf{R})$ is non-negative, and $F(v)=0$ if and only if $v=1$, \item[(F2)] $\liminf_{|v|\to\infty} F(v) > 0$. We occasionally impose a stronger growth assumption than (F2): \item[(F2')] (monotonicity condition) $F'(v)(v-1)\geq0$ for all $v\in\mathbf{R}$. \end{enumerate} We are interested in the Gamma limit of $E^\varepsilon_\mathrm{sMM}$ as $\varepsilon\to 0$ under the sliced graph convergence. We define the subset $\mathcal{A}_0 := \mathcal{A}_0(\Omega) \subset \mathcal{B}_D$ as follows: $\Xi \in \mathcal{A}_0(\Omega)$ if there is a countably $N-1$ rectifiable set $\Sigma\subset\Omega$ such that \begin{equation} \Xi(z) = \left \{ \begin{array}{l} \label{SIG} 1,\ z\in\Omega\backslash\Sigma \\ \left[\xi^-, \xi^+\right],\ z\in\Sigma \end{array} \right. \end{equation} with $\mathcal{H}^{N-1}$-measurable function $\xi_\pm$ on $\Sigma$ and $\xi^-(z) \leq 1 \leq \xi^+(z)$ for $\mathcal{H}^{N-1}$-a.e.\ $z \in \Sigma$. For the definition of countably $N-1$ rectifiability, see the beginning of Section~\ref{SSBP}. Here $\mathcal{H}^m$ denotes the $m$-dimensional Hausdorff measure. We briefly remark on the compactness of the graph of $\Xi\in\mathcal{A}_0$. By definition, if $\Xi$ is of form \eqref{SIG}, then $\Xi(z)$ is compact. However, there may be a chance that $\operatorname{graph}\overline{\Gamma_{x,\nu}}$ is not compact, even for the one-dimensional case ($N=1$). Indeed, if a set-valued function on $(0,1)$ is of form \begin{equation*} \Xi(z) = \left \{ \begin{array}{ll} \left[1,m\right]&\text{for}\ z=1/m \\ \{1\}&\text{otherwise}, \end{array} \right. \end{equation*} then $\overline{\Xi}$ is not compact in $[0,1]\times\mathbf{R}$. It is also possible to construct an example that $\overline{\Xi}\neq\Xi$ in $(0,1)$, which is why we impose $\Xi\in\mathcal{B}_D$ in the definition of $\mathcal{A}_0$. For $\Xi\in\mathcal{A}_0$, we define a functional \[ E^0_\mathrm{sMM}(\Xi,\Omega) := 2\int_\Sigma \left\{ G(\xi^-) + G(\xi^+) \right\} {\id\mathcal{H}^{N-1}},\quad \text{where}\ G(\sigma) := \left| \int^\sigma_1 \sqrt{F(\tau)} {\id\tau} \right|. \] For later applications, it is convenient to consider a more general functional. Let $J$ be a countably $N-1$ rectifiable set, and $\alpha:\mathbf{R}\to[0,\infty)$ be continuous. Let $j$ be a non-negative $\mathcal{H}^{N-1}$-measurable function on $J$. We denote the triplet $(J,j,\alpha)$ by $\mathcal{J}$. We set \[ E^{0,{\mathcal{J}}}_\mathrm{sMM}(\Xi,\Omega) = E^0_\mathrm{sMM}(\Xi,\Omega) + \int_{J\cap\Sigma} \left( \min_{\xi^-\leq\xi\leq\xi^+} \alpha(\xi) \right) {\id\mathcal{H}^{N-1}}. \] For $S$, we also set \[ E^{\varepsilon,{\mathcal{J}}}_\mathrm{sMM}(v) := E^\varepsilon_\mathrm{sMM}(v) + \int_J \alpha(v)j{\id\mathcal{H}^{N-1}}, \] which is important to study the Kobayashi--Warren--Carter energy. \subsection{Liminf inequality} \label{SSLINF} We shall state the ``liminf inequality'' for the convergence of $E^{\varepsilon,{\mathcal{J}}}_\mathrm{sMM}$. \begin{theorem} \label{INF} Let $\Omega$ be a bounded domain in $\mathbf{R}^N$. Assume that $F$ satisfies (F1) and (F2). For ${\mathcal{J}}=(J,j,\alpha)$, assume that $J$ is countably $N-1$ rectifiable in $\Omega$ with a non-negative $\mathcal{H}^{N-1}$-measurable function $j$ on $J$ and that $\alpha \in C(\mathbf{R})$ is non-negative. Let $D$ be a countable dense set of $S^{N-1}$. Let $\{v_\varepsilon\}_{0<\varepsilon<1}$ be in $H^1(\Omega)$ so that $\Gamma_{v_\varepsilon}\in\mathcal{B}_D$. If $v_\varepsilon\xrightarrow{sg}\Xi$ and $\Xi\in\mathcal{A}_0$, then \[ E^{0,{\mathcal{J}}}_\mathrm{sMM}(\Xi,\Omega) \leq \liminf_{\varepsilon\to 0}E^{\varepsilon,{\mathcal{J}}}_\mathrm{sMM} (v_\varepsilon). \] \end{theorem} \begin{remark} \label{INF1} \begin{enumerate} \item[(i)] The last inequality is called the liminf inequality. Here, we assume that the limit $\Xi$ is in $\mathcal{A}_0$, which is a stronger assumption than the one-dimensional result \cite[Theorem 2.1 (i)]{GOU}, where this condition automatically follows from the finiteness of the right-hand side of the liminf inequality. \item[(i\hspace{-1pt}i)] In a one-dimensional setting, we consider the limit functional in $\overline{\Omega}$. Here we only consider it in $\Omega$. Thus, our definition of $\mathcal{A}_0$ is different from \cite{GOU}. Under suitable assumptions on the boundary, say $C^1$, we are able to extend the result onto $\overline{\Omega}$. Of course, we may replace $\Omega$ with a flat torus $\mathbf{T}^N=\mathbf{R}^N/\mathbf{Z}^N$. \item[(i\hspace{-1pt}i\hspace{-1pt}i)] In \cite{GOU}, $\alpha(v)$ is taken $v^2$ so that \[ E^{0,b}_\mathrm{sMM}(\Xi,M) = E^0_\mathrm{sMM}(\Xi,M) + b\left(\left(\min\Xi(a)\right)_+\right)^2, \] where $(f)_+$ denotes the positive part defined by $f_+=\max(f,0)$. However, in \cite{GOU}, this operation was missing in the definition, which is incorrect. \end{enumerate} \end{remark} \subsection{Basic properties of a countably $N-1$ rectifiable set} \label{SSBP} To prove Theorem \ref{INF}, we begin with the basic properties of a countably $N-1$ rectifiable set. A set $J$ in $\mathbf{R}^N$ is said to be countably $N-1$ rectifiable if \[ J \subset J_0 \cup \left( \bigcup^\infty_{j=1} F_j\left(\mathbf{R}^{N-1}\right) \right) \] where $\mathcal{H}^{N-1}(J_0)=0$ and $F_j:\mathbf{R}^{N-1}\to\mathbf{R}^N$ are Lipschitz mappings for $j=1,2,\ldots$. \begin{definition} \label{DEL} Let $\delta>0$. A set $K$ in $\mathbf{R}^N$ is $\delta$-flat if there are $V\subset\mathbf{R}^{N-1}$, a $C^1$ function $\psi\colon\mathbf{R}^{N-1}\to\mathbf{R}$, and a rotation $A\in SO(N)$ such that \[ K = \left\{ \left(x,\psi(x) \right)A \bigm| x \in V \right\} \] and $\|\nabla\psi\|_\infty\leq \delta$. \end{definition} \begin{lemma} \label{CR} Let $\Sigma$ be a countably $N-1$ rectifiable set. For any $\delta>0$, there is a disjoint countable family $\{K_i\}^\infty_{i=1}$ of compact $\delta$-flat sets and $\mathcal{H}^{N-1}$-measure zero $N_0$ such that \[ \Sigma = N_0 \cup \left( \bigcup^\infty_{i=1} K_i \right). \] \end{lemma} \begin{proof} By \cite[Lemma 11.1]{Sim}, there is a countable family of $C^1$ manifolds $\{M_i\}^\infty_{i=1}$ and $N$ with $\mathcal{H}^{N-1}(N)=0$ such that \[ \Sigma \subset N \cup \left( \bigcup^\infty_{i=1} M_i \right). \] Since $M_i$ is a $C^1$ manifold, it can be written as a countable family of $\delta$-flat sets. Thus, we may assume that $M_i$ is $\delta$-flat. We define $\{N_i,\Sigma_i\}^\infty_{k=1}$ inductively by \begin{align*} &N_{{1}} := \Sigma \cap M_1, \quad \Sigma_1 := \Sigma \backslash N_1 \\ &N_{i+1} := \Sigma_i \cap M_{i+1}, \quad \Sigma_{i+1} := \Sigma_i \backslash N_i\ (i=1,{2,\ldots}). \end{align*} Here, $N_i$ is $\mathcal{H}^{N-1}$-measurable and $\mathcal{H}^{N-1}(N_i)<\infty$. Since $\mathcal{H}^{N-1}$ is Borel regular, for any $\delta$, there exists a compact set $C\subset N_i$ such that $\mathcal{H}^{N-1}(N_i\backslash C)<\delta$. Thus, there is a disjoint countable family $\{M_{ij}\}^\infty_{j=1}$ of compact sets, and an $\mathcal{H}^{N-1}$-zero set $N_{i0}$ such that \[ N_i = N_{i0} \cup \left( \bigcup^\infty_{j=1} M_{ij} \right)\ (i=1,2,\ldots). \] Indeed, we define a sequence of compact sets $\{M_{ij}\}$ inductively by \begin{align*} & M_{i1} \subset N_i,\\ & M_{i,j+1} \subset N_i \backslash \bigcup^j_{k=1} M_{ik},\ j=1,2,\ldots \end{align*} such that $\mathcal{H}^{N-1} \left(N_i\backslash \bigcup^{{j}}_{k=1}M_{i{k}}\right)<1/2^{{j}}$. Then, setting $N_{i0}=N_i\backslash\bigcup^\infty_{j=1} M_{ij}$ yields the desired decomposition of $N_i$. Setting \[ N_0 = (N\cap\Sigma) \cup \left( \bigcup^\infty_{i=1} N_{i0} \right) \] and renumbering $\{M_{ij}\}$ as $\{K_i\}$, the desired decomposition is obtained. \end{proof} \subsection{Proof of liminf inequality} \label{SINF} \begin{proof}[Proof of Theorem \ref{INF}] By Lemma \ref{CR}, for $\delta\in(0,1)$, we decompose $\Sigma$ as \[ \Sigma = N_0 \cup \left( \bigcup^\infty_{i=1} K_i \right), \] where $\{K_i\}^\infty_{i=1}$ is a disjoint family of compact $\delta$-flat sets and $\mathcal{H}^{N-1}(N_0)=0$. We set \[ \Sigma_m = \bigcup^m_{i=1} K_i \] and take a disjoint family of open sets $\{U^m_i\}^m_{i=1}$ such that $K_i\subset U^m_i$. By definition, $K_i$ is of the form \[ K_i = \left\{ \left(x,\psi(x)\right)A_i \bigm| x \in V_i \right\} \] for some $A_i\in SO(N)$, a compact set $V_i\subset\mathbf{R}^{N-1}$ and $\psi_i\in C^1(\mathbf{R}^{N-1})$ with $\|\nabla\psi_i\|_\infty\leq\delta$. Since $D$ is dense in $S^{N-1}$, we are able to take $\nu^i\subset D$, which is close to the normal of the hyperplane \[ P_i = \left\{ (x,0)A_i \bigm| x \in \mathbf{R}^{N-1} \right\} \] for $i=1,\ldots,m$. We may assume that $\nu_i$ is normal to $P_i$ and $\|\nabla\psi_i\|_\infty\leq 2\delta$ by rotating slightly. See Figure \ref{FRK}. \begin{figure}[htb] \centering \includegraphics[width=6.5cm]{GOSUfigure_2.png} \caption{The set $\Sigma_2$} \label{FRK} \end{figure} We decompose \[ E^\varepsilon_\mathrm{sMM}(v_\varepsilon) \geq \sum^m_{i=1} \int_{U^m_i} \left\{ \frac{\varepsilon}{2}|\nabla v_\varepsilon|^2 + \frac{1}{2\varepsilon} F(v_\varepsilon) \right\} \id\mathcal{L}^N. \] By slicing, we observe that the right-hand side is \begin{align*} \int_{U^m_i} &{\left\{ \frac{\varepsilon}{2}|\nabla v_\varepsilon|^2 + \frac{1}{2\varepsilon} F(v_\varepsilon) \right\}} \id\mathcal{L}^N\\ &= \int_{(U^m_i)_{\nu^i}} \left( \int_{(U^m_i)^1_{x,\nu^i}} \left\{ \frac{\varepsilon}{2}|\nabla v_\varepsilon|^2_{x,\nu^i} + \frac{1}{2\varepsilon} F(v_{\varepsilon,x,\nu^i}) \right\} \id t \right) \id\mathcal{L}^{N-1}(x) \\ &\geq \int_{(U^m_i)_{\nu^i}} \left( \int_{(U^m_i)^1_{x,\nu^i}} \left\{ \frac{\varepsilon}{2}\left|\partial_t(v_{\varepsilon,x,\nu^i}) \right|^2 + \frac{1}{2\varepsilon} F(v_{\varepsilon,x,\nu^i}) \right\} \id t \right) \id\mathcal{L}^{N-1}(x). \end{align*} Since $v_\varepsilon\xrightarrow{sg}\Xi$, we see that {$\overline{v_{\varepsilon,x,\nu}}$ converges to $\overline{\Xi_{x,\nu^i}}$ as $\varepsilon \to 0$ in the sense of the graph convergence in a one dimensional setting for $\mathcal{L}^{N-1}$-a.e. $x$.} Applying the one-dimensional result \cite[Theorem 2.1 (i)]{GOU}, we have \begin{equation} \label{ONE} \begin{split} \liminf_{\varepsilon\to 0} \int_{(U^m_i)^1_{x,\nu^i}} &\left\{ \frac{\varepsilon}{2}\left|\partial_t(v_{\varepsilon,x,\nu^i}) \right|^2 + \frac{1}{2\varepsilon} F(v_{\varepsilon,x,\nu^i}) \right\} \id t\\ &\geq \sum^\infty_{k=1} 2 \left\{ G \left(\xi^+_{x,\nu^i}(t_k)\right) + G \left(\xi^-_{x,\nu^i}(t_k)\right) \right\} \end{split} \end{equation} for $\{t_k\}^\infty_{k=1}$, where $\Xi_{x,\nu^i}(t)$ is not a singleton in $(U^m_i)^1_{x,\nu^i}$. This set $\{t_k\}^\infty_{k=1}$ contains a unique point $t_x$ such as \[ (K_i)^1_{x,\nu^i} \cap (U^m)^1_{x,\nu^i} = \{t_x\}, \] so the right-hand side of \eqref{ONE} is estimated from below by \[ 2 \left\{ G \left(\xi^+_{x,\nu^i}(t_x)\right) + G \left(\xi^-_{x,\nu^i}(t_x)\right) \right\}. \] Since integration is lower semicontinuous by Fatou's lemma, we now observe that \[ \liminf_{\varepsilon\to 0} E^\varepsilon_\mathrm{sMM}(v_\varepsilon) \geq \sum^m_{i=1} \int_{(U^m_i)_{\nu^i}} \widetilde{G} \left(x + t_x \nu^i \right) \id\mathcal{L}^{N-1}(x), \] where $\widetilde{G}(x)=2\left\{G \left(\xi^+(x)\right) + G \left(\xi^-(x)\right)\right\}$ ($x\in\Sigma$). By the area formula, we see \begin{align*} \int_{K_i} \widetilde{G}(y) {\id\mathcal{H}^{N-1}}(y) &= \int_{V_i} \widetilde{G}\left(\left(x,\psi_i(x)\right) A_i \right) \sqrt{1+\left|\nabla\psi_i(x)\right|^2} \id \mathcal{L}^{N-1}(x) \\ &\leq \sqrt{1+(2\delta)^2} \int_{(U^m_i)_{\nu^i}} \widetilde{G}(x + t_x \nu^i) \id \mathcal{L}^{N-1}(x). \end{align*} Thus \begin{align*} \liminf_{\varepsilon\to 0} E^\varepsilon_\mathrm{sMM}(v_\varepsilon) &\geq \left( 1+(2\delta)^2 \right)^{-1/2} \sum^m_{i=1} \int_{K_i} \widetilde{G}(x) {\id\mathcal{H}^{N-1}}(x) \\ &= \left( 1+(2\delta)^2 \right)^{-1/2} \int_{\Sigma_m} \widetilde{G}(x) {\id\mathcal{H}^{N-1}}(x). \end{align*} Sending $m\to\infty$ and then $\delta\to 0$, we conclude \[ \liminf_{\varepsilon\to 0} E^\varepsilon_\mathrm{sMM}(v_\varepsilon) \geq \int_\Sigma \widetilde{G}(x) {\id\mathcal{H}^{N-1}}(x). \] It remains to prove \[ \liminf_{\varepsilon\to 0} \int_J \alpha(v_\varepsilon)j {\id\mathcal{H}^{N-1}} \geq \int_{J\cap\Sigma} \left( \min_{\xi^-\leq\xi\leq\xi^+} \alpha(\xi) \right)j {\id\mathcal{H}^{N-1}} \] when $v_\varepsilon\xrightarrow{sg}\Xi$. It suffices to prove that \[ \liminf_{\varepsilon\to 0} \int_{J\cap K_i} \alpha(v_\varepsilon) j{\id\mathcal{H}^{N-1}} \geq \int_{J\cap K_i} \left( \min_{\xi^-\leq\xi\leq\xi^+} \alpha(\xi) \right)j {\id\mathcal{H}^{N-1}}. \] By slicing, we may reduce the problem in a one-dimensional setting. If the dimension equals one, this follows directly from the definition of graph convergence. The proof is now complete. \end{proof} \section{Construction of recovery sequences} \label{SCRS} Our goal in this section is to construct what is called a recovery sequence $\{w_\varepsilon\}$ to establish limsup inequality. \begin{theorem} \label{SUP} Let $\Omega$ be a bounded domain in $\mathbf{R}^N$. Assume that $F$ satisfies (F1) and (F2'). For $\mathcal{J}=(J,j,\alpha)$, assume that $J$ is countably $N-1$ rectifiable in $\Omega$ with a non-negative $\mathcal{H}^{N-1}$-integrable function $j$ on $J$ and that $\alpha\in C(\mathbf{R})$ is non-negative. For any $\Xi\in\mathcal{A}_0$ with $E^{0,{\mathcal{J}}}_\mathrm{sMM}(\Xi,\Omega)<\infty$, there exists a sequence $\{w_\varepsilon\}\subset H^1(\Omega)$ such that \begin{align*} & E^{0,{\mathcal{J}}}_\mathrm{sMM}(\Xi,\Omega) \geq \limsup_{\varepsilon\to 0} E^{\varepsilon,{\mathcal{J}}}_\mathrm{sMM}(w_\varepsilon),\\ & \lim_{\varepsilon\to 0} d_\nu (\Gamma_{w_\varepsilon},\Xi) = 0\quad \text{for all}\quad \nu \in S^{N-1}. \end{align*} In particular, $w_\varepsilon\xrightarrow{sg}\Xi$ in $\mathcal{B}_D$ for any $D\subset S^{{N}-1}$ with $\overline{D}=S^{N-1}$. By Theorem \ref{INF}, \[ E^{0,{\mathcal{J}}}_\mathrm{sMM}(\Xi,\Omega) = \lim_{\varepsilon\to 0} E^{\varepsilon,{\mathcal{J}}}_\mathrm{sMM}(w_\varepsilon). \] \end{theorem} \subsection{Approximation} \label{SSAP} We begin with various approximations. \begin{lemma} \label{APP} Assume ther same hypotheses concerning $\Omega$ and $S=(J,j,\alpha)$ as in Theorem \ref{SUP}. Assume that $F$ satisfies (F1). Assume $\Xi\in\mathcal{A}_0$ so that its singular set $\Sigma=\left\{ y\in\Omega \bigm| \Xi(y)\neq\{1\} \right\}$ is countably $N-1$ rectifiable. Let $\delta$ be an arbitrarily fixed positive number. Then, there exists a sequence $\{\Xi_m\}^\infty_{m=1}\subset\mathcal{A}_0$ such that the following properties hold: \begin{enumerate} \item[(i)] $E^{0,{\mathcal{J}}}_\mathrm{sMM}(\Xi,\Omega) \geq \limsup_{m\to\infty}E^{0,{\mathcal{J}}}_\mathrm{sMM}(\Xi_m,\Omega)$, \item[(i\hspace{-1pt}i)] $\lim_{m\to\infty}d_\nu(\Xi_m,\Xi)=0$ for all $\nu\in S^{N-1}$, \item[(i\hspace{-1pt}i\hspace{-1pt}i)] $\Xi_m(y)\subset\Xi(y)$ for all $y\in\Omega$, \item[(i\hspace{-1pt}v)] the singular set $\Sigma_m=\left\{ y\in\Omega \bigm| \Xi_m(y)\neq\{1\} \right\}$ consists of a disjoint finite union of compact $\delta$-flat sets $\{K_j\}^k_{j=1}$, \item[{(v)}] $\xi^+_m$, $\xi^-_m$ are constant functions on each $K_j$ ($j=1,\ldots,k$), where $\Xi_m(y)=\left[\xi^-_m(y), \xi^+_m(y)\right]\ni1$ on $\Sigma_m$. Here $k$ may depend on $m$. \end{enumerate} \end{lemma} We recall an elementary fact. \begin{proposition} \label{SEQ} Let $h\in C(\mathbf{R})$ be a non-negative function that satisfies $h(1)=0$ and is strictly monotonically increasing for $\sigma\geq 1$. Let $\{a_j\}^\infty_{j=1}$ be a sequence such that $a_j\geq 1$ ($j=1,2,\ldots$) and \[ \sum^\infty_{j=1} h(a_j) < \infty. \] Then \[ \lim_{m\to\infty} \sup_{j\geq m} (a_j-1) = 0. \] \end{proposition} \begin{proof} By monotonicity of $h$ for $\sigma\geq 1$, we observe that \[ h \left( \sup_{j\geq m} a_j \right) = \sup_{j\geq m} h(a_j) \leq \sum_{j\geq m} h(a_j) \to 0 \] as $m\to\infty$. This yields the desired result since $h(\sigma)$ is strictly monotone for $\sigma \geq 1$. \end{proof} We next recall a special case of co-area formula \cite[12.7]{Sim} for a countably rectifiable set. \begin{lemma} \label{CAR} Let $\Sigma$ be a countably $N-1$ rectifiable set on $\Omega$, and let $g$ be an $\mathcal{H}^{N-1}$-measurable function on $\Sigma$. For $\nu\in S^{N-1}$, let $\pi_\nu$ denote the restriction on $\Sigma$ of the orthogonal projection from $\mathbf{R}^N$ to $\Pi_\nu$. Then \[ \int_\Sigma gJ^*\pi_\nu {\id\mathcal{H}^{N-1}} = \int_{\Omega_\nu} \left( \int_{\Sigma^1_{x,\nu}} g_{x,\nu} (t)\id\mathcal{H}^0(t) \right) \id \mathcal{L}^{N-1}(x). \] Here $J^*f$ denotes the Jacobian of a mapping $f$ from $\Sigma$ to $\Pi_\nu$. \end{lemma} \begin{proof}[Proof of {Theorem} \ref{APP}] We divide the proof into two steps. \noindent \textit{Step 1.} We shall construct $\Xi_m$ satisfying (i)--(i\hspace{-1pt}v). By Lemma \ref{CR}, we found a disjoint family of compact $\delta$-flat sets $\{K_j\}^\infty_{j=1}$ such that $\Sigma=\bigcup^\infty_{j=1}K_j$ up to $\mathcal{H}^{N-1}$-measure zero set for $\Sigma$ associated with $\Xi$. By the co-area formula (Lemma \ref{CAR}) and $J^*\pi_\nu\leq 1$, we observe \begin{equation} \label{ACA} \begin{split} \int_{K_j} \widetilde{G}(y) {\id\mathcal{H}^{N-1}}(y) &\geq \int_{K_j} \widetilde{G} J^*\pi_\nu {\id\mathcal{H}^{N-1}}\\ &= \int_{\Omega_\nu} \left(\int_{(K_j)^1_{x,\nu}} \widetilde{G}_{x,\nu}(t)\id\mathcal{H}^0(t) \right) \id \mathcal{L}^{N-1}(x), \end{split} \end{equation} where $\widetilde{G}(y)=2\bigl(G\left(\xi^+(y)\right)+G\left(\xi^-(y)\right)\bigr)$. Since $E^{0,{\mathcal{J}}}_\mathrm{sMM}(\Xi,\Omega)<\infty$, we see that \begin{equation} \label{BU} \sum^\infty_{j=1} \int_{K_j} \widetilde{G} {\id\mathcal{H}^{N-1}}(y) < \infty. \end{equation} We then take \begin{equation*} \Xi_m(y)= \left \{ \begin{array}{cl} \left[\xi^-(y), \xi^+(y)\right] &,\ y\in\Sigma_m = \bigcup^m_{j=1} K_j \\ \{1\} &,\ \text{otherwise.} \end{array} \right. \end{equation*} By definition, (i), (i\hspace{-1pt}i\hspace{-1pt}i), and (i\hspace{-1pt}v) are trivially fulfilled. It remains to prove (i\hspace{-1pt}i). By \eqref{ACA} and \eqref{BU}, we observe that \[ \sum^\infty_{j=1}\int_{\Omega_\nu} \left(\int_{(K_j)^1_{x,\nu}} \widetilde{G}_{x,\nu}(t)\id\mathcal{H}^0(t) \right) \id \mathcal{L}^{N-1}(x) < \infty \] for $\Xi$. Since all integrands are non-negative, the monotone convergence theorem implies that \[ \sum^\infty_{j=1}\int_{\Omega_\nu} \left(\int_{(K_j)^1_{x,\nu}} \widetilde{G}_{x,\nu}\id\mathcal{H}^0 \right) \id \mathcal{L}^{N-1}(x) = \int_{\Omega_\nu} \left( \sum^\infty_{j=1} \int_{(K_j)^1_{x,\nu}} \widetilde{G}_{x,\nu}\id\mathcal{H}^0 \right) \id \mathcal{L}^{N-1}(x). \] Thus \[ \sum^\infty_{j=1} \int_{(K_j)^1_{x,\nu}}\widetilde{G}_{x,\nu}\id\mathcal{H}^0 < \infty \] for $\mathcal{L}^{N-1}$-a.e.\ $x\in\Omega_\nu$. Proposition \ref{SEQ} yields \[ \lim_{m\to 0} \sup_{j\geq m} \sup_{t\in(K_j)^1_{x,\nu}} \left(\xi^+_{x,\nu}(t)-1\right) = 0 \] and, similarly, \[ \lim_{m\to 0} \sup_{j\geq m} \sup_{t\in(K_j)^1_{x,\nu}} \left(1-\xi^-_{x,\nu}(t)\right) = 0. \] Since \[ d_H\left(\left(\Xi_m\right)_{x,\nu}, \Xi_{x,\nu}\right) = \sup_{j\geq m+1} \sup_{t\in(K_j)^1_{x,\nu}} \max\left\{ \left|\xi^+_{x,\nu}(t)-1\right|, \left|\xi^-_{x,\nu}(t)-1\right| \right\}, \] we conclude that \[ d_H\left(\left(\Xi_m\right)_{x,\nu}, \Xi_{x,\nu}\right) \to 0 \] as $m\to\infty$ for a.e.\ $x\in\Omega_\nu$. Since the integrand of \[ d_\nu\left(\Xi_m,\Xi\right) = \int_{\Omega_\nu} \frac{d_H\left(\left(\Xi_m\right)_{x,\nu}, \Xi_{x,\nu}\right)}{1+d_H\left(\left(\Xi_m\right)_{x,\nu}, \Xi_{x,\nu}\right)} \id \mathcal{L}^{N-1}(x) \] is bounded by $1$, the Lebesgue dominated convergence theorem implies (i\hspace{-1pt}i). \noindent \textit{Step 2.} We next approximate $\Xi_m$ constructed by Step 1 and construct a sequence $\{\Xi_{m_k}\}^\infty_{k=1}$ satisfying (i)--{(v)} by replacing $\Xi$ with $\Xi_m$. If such a sequence exists, a diagonal argument yields the desired sequence. We may assume that \begin{equation*} \Xi(y)= \left \{ \begin{array}{cl} \left[\xi^-(y), \xi^+(y)\right], & y\in\Sigma_m = \bigcup^m_{j=1} K_j \\ \{1\} &,\ \text{otherwise.} \end{array} \right. \end{equation*} We approximate $\xi^+$ from below. For a given integer $n$, we set \[ \xi^+_n(y) := \inf \left\{ \xi^+(z) \Bigm| z \in I^k_n \right\}, \quad I^k_n := \left\{ y \in\Sigma_m \biggm| \frac{k-1}{n} \leq \xi^+(y)-1 < \frac{k}{n} \right\} \] for $k=1,2,\ldots$. Since $I^k_n$ is $\mathcal{H}^{N-1}$-measurable set, as in the proof of Lemma \ref{CR}, $I^k_n$ is decomposed as a countable disjoint family of compact sets up to $\mathcal{H}^{N-1}$-measure zero set. We approximate $\xi^-$ from above similarly, and we set \begin{equation*} \Xi_{m,n}(y)= \left \{ \begin{array}{cl} \left[\xi^-_n(y), \xi^+_n(y)\right] & ,\ y\in\Sigma_m \\ \{1\} & ,\ \text{otherwise.} \end{array} \right. \end{equation*} It is easy to see that $\Xi_{m,n}$ satisfies (i\hspace{-1pt}i\hspace{-1pt}i) and (i\hspace{-1pt}v) by replacing $m$ with $n$. Since $E^0_\mathrm{sMM}(\Xi,\Omega)\geq E^0_\mathrm{sMM}(\Xi_{m,n},\Omega)$ and \[ \min_{\xi^-_n(y)\leq\xi\leq\xi^+_{{n}}(y)} j(y)\alpha(\xi) \to \min_{\xi^-(y)\leq\xi\leq\xi^+(y)} j(y)\alpha(\xi) \ \text{as}\ n \to \infty \ \text{for}\ \mathcal{H}^{N-1}\text{-a.e.}\ y \] with bound $j(y)\alpha(1)$, the property (i) follows from the Lebesgue dominated convergence theorem. Since \[ d_H\left(\left(\Xi_{m,n}\right)_{x,\nu}, \Xi_{x,\nu}\right) = \sup_{t\in(\Sigma_m)^1_{x,\nu}} \max\left\{ \left|\xi^+_{x,\nu}-\xi^+_{n,x,\nu}\right|, \left|\xi^-_{x,\nu}-\xi^-_{n,x,\nu}\right| \right\} \leq 1/n, \] we now conclude (i\hspace{-1pt}i) as discussed at the end of Step 1. \end{proof} \subsection{Recovery sequences} \label{SSRC} In this subsection, we shall prove Theorem \ref{SUP}. An essential step is constructing a recovery sequence $\{w_\varepsilon\}$ when $\Xi$ has a simple structure, and the basic idea is similar to that of \cite{AT,FL}. Besides generalization to general $F$ satisfying (F1) and (F2') from $F(z)=(z-1)^2$, our situation is more involved because $\Xi(y)=[0,1]$ for $y\in\Sigma$ in their case, while in our case, $\Xi(y)=\left[\xi^-(y),\xi^+(y)\right]$ for a general $\xi^-\leq1\leq\xi^+$. Moreover, we must show the convergence in $d_\nu$ and handle the $\alpha$-term. \begin{lemma} \label{REC} Assume the same hypotheses concerning $\Omega$, $F$, and $\mathcal{J}=(J,j,\alpha)$ as {in} Theorem \ref{SUP}. For $\Xi\in\mathcal{A}_0$, assume that its singular set $\Sigma=\left\{x\in\Omega \mid \Xi(x)\neq\{1\} \right\}$ consists of a disjoint finite union of compact $\delta$-flat sets $\{K_j\}^k_{j=1}$, and $\xi^-$ and $\xi^+$ are constant functions in each $K_j$ ($j=1,\ldots,k$), where $\Xi(x)=[\xi^-,\xi^+]$ on $\Sigma$. Then there exists a sequence $\{w_\varepsilon\}\subset H^1(\Omega)$ such that \begin{align*} & E^{0,{\mathcal{J}}}_\mathrm{sMM}(\Xi,\Omega) \geq \limsup_{\varepsilon\to 0} E^{\varepsilon,{\mathcal{J}}}_\mathrm{sMM}(w_\varepsilon), \\ & \lim d_\nu(\Gamma_{w_\varepsilon}, \Xi) = 0 \quad\text{for all}\quad \nu \in S^{N-1}. \end{align*} \end{lemma} This lemma follows from the explicit construction of functions $\{w_\varepsilon\}$ similarly to the standard double-well Modica--Mortola functional. \begin{proof} We take a disjoint family of open sets $\{U_j\}^k_{j=1}$ with the property $K_j\subset U_j$. It suffices to construct a desired sequence $\{w_\varepsilon\}$ so that the support of $w_\varepsilon-1$ is contained in $\bigcup^k_{j=1}U_j$, so we shall construct such $w_\varepsilon$ in each $U_j$. We may assume $k=1$ and write $K_1, U_1$ by $K, U$, and $\xi_-,\xi_+$ by $a,b$ ($a\leq1\leq b$) so that \begin{equation*} \Xi(y)= \left \{ \begin{array}{cl} [a, b] & ,\ y\in K, \\ \{1\} & ,\ y \in U \backslash K. \end{array} \right. \end{equation*} For $c<1$ and $s>0$, let $\psi(s,c)$ be a function determined by \[ \int^\psi_c \frac{1}{\sqrt{F(z)}} \id z = s. \] By (F1), this equation is uniquely solvable for all $s\in[0,s_*)$ with \[ s_* := \int^1_c \frac{1}{\sqrt{F(z)}}\id z. \] This $\psi(s,c)$ solves the initial value problem \begin{equation} \label{SR} \left \{ \begin{array}{l} \displaystyle{\frac{\mathrm{d}\psi}{\mathrm{d}s}} = \sqrt{F(\psi)}, \quad s \in (0,s_*) \\ \psi(0,c)=c, \end{array} \right. \end{equation} although this ODE may admit many solutions. For $c>1$, we parallelly define $\psi$ by \[ \int^c_\psi \frac{1}{\sqrt{F(z)}}\id z = s \] for $s\in(0,s_*)$ with \[ s_* := \int^c_1 \frac{1}{\sqrt{F(z)}} \id z. \] In this case, $\psi$ also solves \eqref{SR}. We consider the even extension of $\psi$ (still denoted by $\psi$) for $s<0$ so that $\psi(s,c)=\psi(-s,c)$. For the case $c=1$, we set $\psi(s,c)\equiv 1$. For $a,b$ with $[a,b]\ni 1$, we consider a rescaled function $\psi_\varepsilon(s,\cdot)=\psi(s/\varepsilon,\cdot)$ and then define \begin{equation*} \Psi_\varepsilon(s,a,b)= \left \{ \begin{array}{ll} 1 & ,\quad s \leq -2\sqrt{\varepsilon} \\ \alpha_1 s + \beta_1 & , \quad -2\sqrt{\varepsilon} \leq s \leq -\sqrt{\varepsilon}\\ \psi_\varepsilon(-s,a) & ,\quad -\sqrt{\varepsilon} \leq s \leq 0\\ \psi_\varepsilon(s,a) & ,\quad 0 \leq s \leq \sqrt{\varepsilon}\\ \alpha_2 s + \beta_2 & ,\quad \sqrt{\varepsilon} \leq s \leq 2\sqrt{\varepsilon}\\ \psi_\varepsilon(s-3\sqrt{\varepsilon},b) & ,\quad 2\sqrt{\varepsilon} \leq s \leq 4\sqrt{\varepsilon}\\ \alpha_3 s + \beta_3 & ,\quad 4\sqrt{\varepsilon} \leq s \leq 5\sqrt{\varepsilon}\\ 1 & ,\quad 5\sqrt{\varepsilon} \leq s \end{array} \right. \end{equation*} with $\alpha_i,\beta_i\in\mathbf{R}$ ($i=1,2,3$) so that $\Psi_\varepsilon$ is Lipschitz continuous. \begin{figure}[thbp] \centering \includegraphics[width=0.5\linewidth]{multi_kwc_recovery-2.pdf} \label{fig:psi-graph} \caption{The graph of $\Psi_\varepsilon(s,a,b)$. Thick lines are the part of the graph of $\psi_\varepsilon(s,a)$ or $\psi_\varepsilon(s,b)$, and other parts are linear.} \end{figure} Let $\eta$ be a minimizer of $\alpha$ in $[a,b]$. We first consider the case when $\eta<1$ so that $a\leq\eta<1$. In this case, by definition of $\Psi_\varepsilon$, there is a unique $s_0>0$ such that $\Psi_\varepsilon(s_0,a,b)=\eta$. We then set \[ \varphi_\varepsilon(s,a,b) = \Psi_\varepsilon(s+s_0,a,b). \] For the case $\eta\geq1$, we take the smallest positive $s_0>0$ such that $\Psi_\varepsilon(s_0,a,b)=\eta$. This $s_0=s_0(\varepsilon)$ is of order $\varepsilon^{3/2}$ as $\varepsilon\to 0$. Since $K$ is a $\delta$-flat surface, it is on the graph of a $C^1$ function $p$. So we can write \[K = \{(x',p(x')) \mid x' \in V\}.\] We set $A := \{(x,z) \mid p(x) \geq z \}$ and $B := \{(x,z) \mid p(x) < z \}.$ Let $\mathrm{sd}(z)$ be the signed distance of $z$ from $K$, i.e. \[\mathrm{sd}(z) := d({z},A) - d({z},B).\] If $\mathrm{sd}(z)$ is non-negative then we simply write it by $d(z).$ We then take \[ w_\varepsilon(z) = \varphi_\varepsilon \left(\mathrm{sd}(z),a,b\right), \] This is the desired sequence such that the support of $w_\varepsilon-1$ is contained in $U$ for sufficiently small $\varepsilon>0$. Since $w_\varepsilon$ is Lipschitz continuous, it is clear that $w_\varepsilon\in H^1(\Omega)$. Since \[ \nabla w_\varepsilon = (\partial_s \Psi_\varepsilon) \left( \mathrm{sd}(z)+s_0,a,b \right) \nabla \mathrm{sd}(z), \] we have {for $|\mathrm{sd}(z)| <\sqrt{\varepsilon} - s_0$,} \begin{align*} \nabla w_\varepsilon(z) & = (\partial_s \psi_\varepsilon) \left( \mathrm{sd}(z)+s_0,a \right) \nabla \mathrm{sd}(z) \\ & = \frac{1}{\varepsilon} (\partial_s \psi) \left(\left( \mathrm{sd}(z)+\delta_0\right)/\varepsilon,a \right) \nabla \mathrm{sd}(z). \end{align*} Thus, for $z$ with $ -\sqrt{\varepsilon}+s_0 < \mathrm{sd}(z) <\sqrt{\varepsilon} - s_0 $, we see that \[ \left| \nabla w_\varepsilon(z) \right|^2 = \frac{1}{\varepsilon^2} \bigl| (\partial_s \psi) \left(\left( \mathrm{sd}(z)+s_0\right)/\varepsilon,a \right)\bigr|^2. \] Let $U_\varepsilon$ denote set \[ U_\varepsilon = \left\{ z \in \Omega \bigm| -\sqrt{\varepsilon} + s_0 < \mathrm{sd}(z) < \sqrt{\varepsilon} - s_0 \right\}. \] Since $s_0$ is of order $\varepsilon^{3/2}$, the closure $\overline{U}_\varepsilon$ converges to $K$ in the sense of Hausdorff distance. We proceed \begin{align*} E^0_\mathrm{sMM}&(w_\varepsilon,U_\varepsilon) = \int_{U_\varepsilon} \left\{ \frac{\varepsilon}{2} |\nabla w_\varepsilon|^2 + \frac{1}{2\varepsilon} F(w_\varepsilon) \right\} \id\mathcal{L}^N \\ & = \frac{1}{2\varepsilon} \int_{U_\varepsilon} \bigl| (\partial_s \psi) \left(\left( \mathrm{sd}(z)+s_0\right)/\varepsilon,a \right)\bigr|^2 + F \bigl(\psi \left(\left( \mathrm{sd}(z)+s_0\right)/\varepsilon,a \right)\bigr) \id\mathcal{L}^N(z) \\ & = \frac{1}{\varepsilon} \int_{U_\varepsilon} F \bigl(\psi \left(\left( \mathrm{sd}(z)+s_0\right)/\varepsilon,a \right)\bigr) \id\mathcal{L}^N(z) \end{align*} by \eqref{SR}. To simplify the notation, we set \[ f_\varepsilon(t) = \frac{1}{\varepsilon} F \bigl(\psi \left((t+s_0)/\varepsilon,a \right)\bigr) \] and observe that \[ E^0_\mathrm{sMM} (w_\varepsilon,U_\varepsilon) = \int_{U_\varepsilon} f_\varepsilon \left(\mathrm{sd}(z)\right) \id\mathcal{L}^N(z) = \int^{\beta(\varepsilon)}_{-\beta(\varepsilon)} f_\varepsilon(t) H(t) \id t, \quad \beta(\varepsilon) := \sqrt{\varepsilon}-s_0(\varepsilon) \] with $H(t):=\mathcal{H}^{N-1}\left(\left\{ z\in U_\varepsilon \bigm| d(z)=t \right\}\right)$ by the co-area formula. We set $A(t):=\mathcal{L}^N\left(\left\{ z\in U_\varepsilon \bigm| |\mathrm{sd}(z)| < t \right\}\right)$ and observe that $A(t)=\int^t_{-t} H(s)ds$ by the co-area formula. Integrating by parts, we observe that \begin{align*} \int^{\beta(\varepsilon)}_{-\beta(\varepsilon)} f_\varepsilon(t) H(t) \id t &= \int^{\beta(\varepsilon)}_0 f_\varepsilon(t) H(t) \id t + \int^0_{-\beta(\varepsilon)} f_\varepsilon(t) H(t) \id t\\ &=\int^{\beta(\varepsilon)}_0 f_\varepsilon(t) \left( H(t) + H(-t) \right) \id t \\ &= \int^{\beta(\varepsilon)}_0 f_\varepsilon(t) A'(t) \id t \\ &= f_\varepsilon \left( \beta(\varepsilon) \right) A\left(\beta(\varepsilon) \right) - \int^{\beta(\varepsilon)}_0 f'_\varepsilon(t) A(t) \id t. \end{align*} By the relation of Minkowski contents and area \cite[Theorem 3.2.39]{Fe}, we know that \[ \lim_{t\downarrow 0} A(t)/2t = \mathcal{H}^{N-1}(K). \] In other words, \[ A(t) = 2 \left( \mathcal{H}^{N-1}(K) + \rho(t) \right)t \] with $\rho$ such that $\rho(t)\to 0$ as $t\to 0$. Thus, \[ - \int^{\beta(\varepsilon)}_0 f'_\varepsilon(t) A(t) \id t \leq - \int^{\beta(\varepsilon)}_0 f'_\varepsilon(t) 2t\id t \left( \mathcal{H}^{N-1}(K) + \max_{0\leq t\leq\beta(\varepsilon)} \rho(t)_+ \right) \] since $f'_\varepsilon(t)\leq 0$. Here we invoke (F2') so that $F'(\sigma)\leq0$ for $\sigma<1$. We thus observe that \[ E^\varepsilon_\mathrm{sMM}(w_\varepsilon,U_\varepsilon) \leq f_\varepsilon \left(\beta(\varepsilon)\right) A\left(\beta(\varepsilon)\right) - \int^{\beta(\varepsilon)}_0 f'_\varepsilon(t) 2t\id t \left( \mathcal{H}^{N-1}(K) + \max_{0\leq t\leq\beta(\varepsilon)} \rho(t)_+ \right). \] Integrating by parts yields \[ - \int^{\beta(\varepsilon)}_0 f'_\varepsilon(t) 2t\id t = 2 \int^{\beta(\varepsilon)}_0 f_\varepsilon(t) \id t -2 f_\varepsilon \left(\beta(\varepsilon)\right) \beta(\varepsilon). \] Since $\psi(s)=\psi(s,a)$ solves \eqref{SR}, we see \begin{align*} f_\varepsilon(t-s_0) &= \frac{1}{\varepsilon} F \left(\psi(t/\varepsilon) \right) \\ &= \frac{1}{\varepsilon} (\partial_s \psi) (t/\varepsilon)\sqrt{F\left(\psi(t/\varepsilon) \right)} \\ &= -{\frac{\mathrm{d}}{\mathrm{d}t}} \bigl(G\left(\psi(t/\varepsilon) \right)\bigr). \end{align*} Thus \[ \int^{\beta(\varepsilon)}_0 f_\varepsilon(t) \id t = G \left(\psi(s_0/\varepsilon) \right) - G \left(\psi(1/\sqrt{\varepsilon}) \right). \] Since $s_0/\varepsilon\to 0$, $\psi(1/\sqrt{\varepsilon},a)\to 1$ as $\varepsilon\to 0$, we obtain \[ \lim_{\varepsilon\to 0} \int^{\beta(\varepsilon)}_0 f_\varepsilon(t) dt = G(a). \] Combing these manipulations, we obtain that \begin{align*} \limsup_{\varepsilon\to 0}& E^\varepsilon_\mathrm{sMM} (w_\varepsilon,U_\varepsilon) \\ &\leq \limsup_{\varepsilon\to 0} f_\varepsilon \left(\beta(s)\right) \left\{ A\left(\beta(\varepsilon)\right) - 2\left(\mathcal{H}^{N-1} (K)-\max_{0\leq t\leq\beta(\varepsilon)} \left|\rho(t)\right|\right) \beta(\varepsilon) \right\} \\ &\qquad+ 2\mathcal{H}^{N-1} (K) G(a) \end{align*} We thus conclude that \[ \limsup_{\varepsilon\to 0} E^\varepsilon_\mathrm{sMM} (w_\varepsilon,U_\varepsilon) \leq 2 \mathcal{H}^{N-1} (K) G(a) \] provided that \[ \lim_{\varepsilon\to0} f_\varepsilon \left(\beta(\varepsilon)\right) \beta(\varepsilon) < \infty \] since $\left(A(t)-2\mathcal{H}^{N-1}(K)t\right)\bigm/t=\rho(t)\to0$ as $t\to0$. This condition follows from the following lemma by setting $\varepsilon^{1/2}=\delta$. Indeed, we obtain a stronger result \[ \limsup_{\varepsilon\to0} f_\varepsilon\left(\beta(\varepsilon)\right) \beta(\varepsilon) \bigm/ \varepsilon^{1/2} < \infty. \] \begin{lemma} \label{ELPF} Assume that $F$ satisfies (F1), (F2'). Then, for $c\in\mathbf{R}$, \[ F \left(\psi(1/\delta,c)\right) \bigm/ \delta^2 \leq (1-c)^2 \ \quad\text{for}\quad \delta > 0. \] \end{lemma} \begin{proof}[Proof of Lemma \ref{ELPF}] We may assume $c<1$ since the argument for $c>1$ is symmetric and the case $c=1$ is trivial. We write $\psi(s,a)$ by $\psi(s)$. By definition and monotonicity (F2') of $F$, we see \[ \frac{1}{\delta} = \int^{\psi(1/\delta)}_c \frac{1}{\sqrt{F(z)}} \id z \leq \frac{\psi(1/\delta)-c}{\sqrt{F\left(\psi(1/\delta)\right)}}. \] Taking the square of both sides, we end up with \[ F\left(\psi(1/\delta)\right) \bigm/ \delta^2 \leq \left(\psi(1/\delta)-c\right)^2 \leq (1-c)^2. \] \end{proof} Let $V_{\varepsilon}$ denote the set \begin{align*} V_{\varepsilon} := \left\{z \in \Omega \mid 2\sqrt{\varepsilon} < d(z) + s_0 < 4\sqrt{\varepsilon} \right\}. \end{align*} {We} observe that \begin{align*} E^0_\mathrm{sMM} (w_\varepsilon,V_\varepsilon) &= \frac{1}{\varepsilon} \int_{V_\varepsilon} F \left(\psi \left(\frac{d(z) + s_0 -3\sqrt{\varepsilon}}{\varepsilon}, b \right) \right) \id\mathcal{L}^N(z) \\ &= \frac{1}{\varepsilon} \int_{2\sqrt{\varepsilon}-s_0}^{4\sqrt{\varepsilon}-s_0} F \left(\psi \left(\frac{t + s_0 -3\sqrt{\varepsilon}}{\varepsilon}, b \right) \right) H(t) \id t \\ &= \int_{2\sqrt{\varepsilon}-s_0}^{4\sqrt{\varepsilon}-s_0} \tilde{f}_{\varepsilon}(t - 3\sqrt{\varepsilon}) H(t) \id t, \end{align*} where $\tilde{f}_\varepsilon(t) := \frac{1}{\varepsilon} F \bigl(\psi \left((t+s_0)/\varepsilon,b \right)\bigr) .$ We set $\tilde{A}(t):=\mathcal{L}^N\left(\left\{ z\in V_\varepsilon \bigm| 0\leq d(z) < t \right\}\right)$ and observe that $ \tilde{A}(t)=\int^t_0 H(s)ds$ by the co-area formula. As before, we see \[ \tilde{A}(t) = \left( \mathcal{H}^{N-1}(K) + \rho(t) \right)t \] with $\rho$ such that $\rho(t)\to 0$ as $t\to 0$. We set \[b(\varepsilon) := \tilde{f}_{\varepsilon}(\sqrt{\varepsilon} - s_0) \tilde{A}(4\sqrt{\varepsilon} - s_0) - \tilde{f}_{\varepsilon}(-\sqrt{\varepsilon} - s_0) \tilde{A}(2\sqrt{\varepsilon} - s_0),\] and observe that \begin{align*} E^0_\mathrm{sMM}(w_\varepsilon,V_\varepsilon) &\leq b(\varepsilon) - \int_{2\sqrt{\varepsilon} - s_0}^{4\sqrt{\varepsilon} - s_0} \tilde{f}'_{\varepsilon}(t - 3\sqrt{\varepsilon}) t \id t \\ &\qquad\times\left(\mathcal{H}^{N-1}(K) + \max_{2\sqrt{\varepsilon} - s_0 \leq t \leq 4\sqrt{\varepsilon} - s_0} \rho(t)_{+} \right). \end{align*} Integration by parts yields \[-\int_{2\sqrt{\varepsilon} - s_0}^{4\sqrt{\varepsilon} - s_0} \tilde{f}'_{\varepsilon}(t - 3\sqrt{\varepsilon}) t \id t = \int_{2\sqrt{\varepsilon} - s_0}^{4\sqrt{\varepsilon} - s_0} \tilde{f}_{\varepsilon} (t - 3\sqrt{\varepsilon}) \id t - 2 \sqrt{\varepsilon} \tilde{f}_{\varepsilon}(\beta(\varepsilon)), \] and we see \[\int_{2\sqrt{\varepsilon} - s_0}^{4\sqrt{\varepsilon} - s_0} \tilde{f}_{\varepsilon} (t - 3\sqrt{\varepsilon}) \id t = 2 \int_{0}^{\beta(\varepsilon)} \tilde{f}_{\varepsilon}(t) \id t. \] As before, we thus conclude that \[ \limsup_{\varepsilon\to 0} E^\varepsilon_\mathrm{sMM} (w_\varepsilon,V_\varepsilon) \leq 2 \mathcal{H}^{N-1} (K) G(b). \] The part corresponding to $\psi(s,b)$ is similar, and the part where $\Psi_\varepsilon$ is linear will vanish as $\varepsilon\to 0$. So, we conclude \[ \lim_{\varepsilon\to 0} E^\varepsilon_\mathrm{sMM} (w_\varepsilon,\Omega) \leq E^0_\mathrm{sMM} (\Xi,\Omega). \] The term related to $\alpha$ is independent of $\varepsilon$ because of the choice of $s_0$ so that $w_\varepsilon(x)=\eta$ for $x\in K$. Since $\mathcal{H}^{N-1}(K)<\infty$, by the co-area formula (Lemma \ref{CAR}), $K^1_{x,\nu}$ is a finite set for $\mathcal{L}^{N-1}$-a.e.\ $x\in\Omega_\nu$. In the Hausdorff sense, $(S_\varepsilon)^1_{x,\nu}\to K^1_{x,\nu}$ holds, as observed in the following lemma for \[ S_\varepsilon = \left\{ y\in\mathbf{R}^N \bigm| d(y,K) = \varepsilon \right\}. \] Therefore, we observe that for $\mathcal{L}^{N-1}$-a.e.\ $x\in\Omega_\nu$, \[ \textstyle \limsup^* w_{\varepsilon,x,\nu}=b, \quad \liminf_* w_{\varepsilon,x,\nu}=a \ \text{on}\ K^1_{x,\nu} \] and outside $K^1_{x,\nu}$, $\limsup^* w_{\varepsilon,x,\nu}=\liminf_* w_{\varepsilon,x,\nu}=1$. We conclude that $w_{\varepsilon,x,\nu}$ converges to $\Xi_{x,\nu}$ in the graph sense on $\Omega^1_{x,\nu}$, which proves (i\hspace{-1pt}i). \end{proof} \begin{lemma} \label{HAUS} Let $K$ be a compact set in a bounded open subset $\Omega$ of $\mathbf{R}^N$ and set \[ S_\varepsilon = \left\{ y \in \Omega \bigm| d(y,K) = \varepsilon \right\}. \] For $\nu\in S^{N-1}$, let $x\in\Omega_\nu$ be such that $K^1_{x,\nu}$ is a non-empty finite set. Then, $(S_\varepsilon)^1_{x,\nu}\to K^1_{x,\nu}$ in Hausdorff distance in $\mathbf{R}$ as $\varepsilon\to0$. \end{lemma} \begin{proof}[Proof of Lemma \ref{HAUS}] If $(S_\varepsilon)^1_{x,\nu}$ is not empty, it is clear that \[ \sup_{y\in(S_\varepsilon)^1_{x,\nu}} d(y, K^1_{x,\nu}) \leq \varepsilon \to 0 \] as $\varepsilon\to0$. It remains to prove that for any $t_0\in K^1_{x,\nu}$, there is a sequence $t_\varepsilon\in(S_\varepsilon)^1_{x,\nu}$ such that $t_\varepsilon\to t_0$ in $\mathbf{R}$. We set \[ f(\delta) = d \left(x+\nu(t_0 + \delta), K\right) \quad\text{for}\quad \delta>0. \] Since $t_0$ is isolated and $K$ is compact, we see that $f(\delta)>0$ for sufficiently small $\delta$, say $\delta<\delta_0$. Moreover, $f(\delta)$ is continuous on $(0,\delta_0)$ since $K$ is compact. Since $f(\delta)\leq\delta$, $f$ satisfies $f(\delta)\to0$ as $\delta\to0$. By the intermediate value theorem, for sufficiently small $\varepsilon$, say $\varepsilon\in(0,\varepsilon_0)$, there always exists $\delta(\varepsilon)$ such that $f\left(\delta(\varepsilon)\right)=\varepsilon$, which implies that \[ t_\varepsilon = t_0 + \delta(\varepsilon) \in (S_\varepsilon)^1_{x,\nu}. \] Since $\delta(\varepsilon)\to0$ as $\varepsilon\to0$, this implies $t_\varepsilon\to t_0$. The proof is now complete. \end{proof} \begin{proof}[Proof of Theorem \ref{SUP}] This follows from Lemma \ref{APP} and Lemma \ref{REC} by a diagonal argument. \end{proof} \section{Singular limit of the Kobayashi--Warren--Carter energy} \label{SLKWC} We first recall the Kobayashi--Warren--Carter energy. For a given $\alpha\in C(\mathbf{R})$ with $\alpha\geq 0$, we consider the Kobayashi--Warren--Carter energy of the form \[ E^\varepsilon_\mathrm{KWC}(u,v) = \int_\Omega \alpha(v) |Du| + E^\varepsilon_\mathrm{sMM}(v) \] for $u\in BV(\Omega)$ and $v\in H^1(\Omega)$. The first term is the weighted total variation of $u$ with weight $w=\alpha(v)$, defined by \[ \int_\Omega w|Du| := \sup \left\{ -\int_\Omega u \operatorname{div}\varphi\, \id\mathcal{L}^N \Bigm| \left|\varphi(z)\right| \leq w(z)\ \text{a.e.}\ x,\ \varphi\in C^1_c(\Omega) \right\} \] for any non-negative Lebesgue measurable function $w$ on $\Omega$. We next define the functional, which turns out to be a singular limit of the Kobayashi--Warren--Carter energy. For $\Xi\in\mathcal{A}_0(\Omega)$, let $\Sigma$ be its singular set in the sense that \[ \Sigma = \left\{ z\in\Omega \bigm| \Xi(z) \neq \{1\} \right\}. \] For $u\in BV(\Omega)$, let $J_u$ denote the set of its jump discontinuities. In other words, \[ J_u = \left\{ z\in\Omega \backslash \Sigma_0 \bigm| j(z) := \left| u(z+0\nu) - u(z-0\nu) \right| > 0 \right\}. \] Here $\nu$ denotes the approximate normal of $J_u$, and $u(z\pm0\nu)$ denotes the trace of $u$ in the direction of $\pm\nu$. We consider a triplet $\mathcal{J}(u)={(J_u,j,\alpha)}$ and consider $E^{0,{\mathcal{J}}}_\mathrm{sMM}(\Xi,\Omega)$, whose explicit form is \[ E^{0,{\mathcal{J}}}_\mathrm{sMM}(\Xi,\Omega) = E^0_\mathrm{sMM}(\Xi,\Omega) + \int_{{J\cap\Sigma}} j \min_{\xi^-\leq\xi\leq\xi^+} \alpha(\xi)\, {\id\mathcal{H}^{N-1}}, \] where $\Xi(z)=\left[\xi^-(z),\xi^+(z)\right]$ for $z\in\Sigma$. We then define the limit Kobayashi--Warren--Carter energy: \[ E^0_\mathrm{KWC}(u,\Xi,\Omega) = \int_{\Omega\backslash J_u} \alpha(1) |Du| + E^{0,\mathcal{J}(u)}_\mathrm{sMM}(\Xi,\Omega), \] in which the explicit representation of the second term is \[ E^{0,\mathcal{J}(u)}_\mathrm{sMM}(\Xi,\Omega) = E^0_\mathrm{sMM}(\Xi,\Omega) + \int_{{J_u\cap\Sigma}} |u^+-u^-|\alpha_0(z)\, {\id\mathcal{H}^{N-1}}(z) \] with $u^\pm=u(z\pm0\nu)$ and \[ \alpha_0(z) := \min\left\{ \alpha(\xi) \bigm| \xi^-(z) \leq \xi \leq \xi^+(z) \right\}. \] Here $u^\pm$ are defined by \begin{align*} u^+(x) &:= \inf \left\{ t \in \mathbf{R} \biggm| \lim_{r\to0} \frac{\mathcal{L}^{{N}}\left(B_r(x)\cap\{u>t\}\right)}{r^N}=0 \right\}, \\ u^-(x) &:= \sup \left\{ t \in \mathbf{R} \biggm| \lim_{r\to0} \frac{\mathcal{L}^{{N}}\left(B_r(x)\cap\{u<t\}\right)}{r^N}=0 \right\}, \end{align*} where $B_r(x)$ is the closed ball of radius $r$ centered at $x$ in $\mathbf{R}^N$. This is a measure-theoretic upper and lower limit of $u$ at $x$. If $u^+(x)=u^-(x)$, we say that $u$ is approximately continuous. For more detail, see \cite{Fe}. We are now in a position to state our main results rigorously. \begin{theorem} \label{GKWC1} Let $\Omega$ be a bounded domain in $\mathbf{R}^N$. Assume that $F$ satisfies (F1) and (F2) and that $\alpha\in C(\mathbf{R})$ is non-negative. \begin{enumerate} \item[(i)] (liminf inequality) Assume that $\{u_\varepsilon\}_{0<\varepsilon<1}\subset BV(\Omega)$ converges to $u\in BV(\Omega)$ in $L^1$, i.e., $\|u_\varepsilon-u\|_{L^1}\to0$. Assume that $\{u_\varepsilon\}_{0<\varepsilon<1}\subset H^1(\Omega)$. If $v_\varepsilon\xrightarrow{sg}\Xi$ and $\Xi\in\mathcal{A}_0$, then \[ E^0_\mathrm{KMC} (u,\Xi\ \Omega) \leq \liminf_{\varepsilon\to0} E^\varepsilon_\mathrm{KMC} (u_\varepsilon,v_\varepsilon). \] \item[(i\hspace{-1pt}i)] (limsup inequality) For any $\Xi\in\mathcal{A}_0$ and $u\in {BV(\Omega)}$, there exists a family of Lipschitz functions $\{w_\varepsilon\}_{0<\varepsilon<1}$ such that \[ E^0_\mathrm{KMC} (u,\Xi,\Omega) = \lim_{\varepsilon\to0} E^\varepsilon_\mathrm{KMC} (u,w_\varepsilon). \] \end{enumerate} \end{theorem} \begin{corollary} \label{GKWC2} Assume the same hypotheses of Theorem \ref{GKWC1}. Assume that $f\in L^2(\Omega)$ and $\lambda\geq0$. Then the results of Theorem \ref{GKWC1} with $E_{\mathrm{KWC}}^0(u,\Xi,\Omega)$ and $E_{\mathrm{KWC}}^\epsilon(u,\Xi,\Omega)$ being replaced with \[ E^0_\mathrm{KMC} (u,\Xi,\Omega) + \frac{\lambda}{2} \int_\Omega|u-f|^2 \id\mathcal{L}^N \quad\text{and}\quad E^\varepsilon_\mathrm{KMC} (u,v) + \frac{\lambda}{2} \int_\Omega|u-f|^2 \id\mathcal{L}^N, \] respectively, still hold, provided that $u\in L^2(\Omega)$. \end{corollary} \begin{remark} \label{GKWC3} \begin{enumerate} \item[(i)] In a one-dimensional case, the liminf inequality here is weaker than \cite[Theorem 2.3 (i)]{GOU} because we assume $u\in BV(\Omega)$, not $u\in BV(\Omega\backslash\Sigma_0)$ with \[ \Sigma_0 = \left\{ x \in \Sigma \bigm| \alpha_0(z) = 0 \right\}. \] It seems possible to extend our results to this situation, but we did not try to avoid technical complications. \item[(i\hspace{-1pt}i)] It is clear that Corollary \ref{GKWC2} immediately follows from Theorem \ref{GKWC1} once we admit that $u_\varepsilon\to u$ in $L^1(\Omega)$ implies \[ \| u-f \|^2_{L^2} \leq \liminf_{\varepsilon\to0} \| u_\varepsilon-f \|^2_{L^2}. \] The last lower semicontinuity holds by Fatou's lemma since $u_{\varepsilon'}\to u$\ $\mathcal{L}^N$-a.e.\ by taking a suitable subsequence. \end{enumerate} \end{remark} \begin{proof}[Proof of Theorem \ref{GKWC1}] Part (i\hspace{-1pt}i) follows easily from Theorem \ref{SUP}. Indeed, taking $w_\varepsilon$ in Theorem \ref{SUP} for $\mathcal{J}=\mathcal{J}(u)$, we see that \[ E^{0,{\mathcal{J}}}_\mathrm{sMM} (\Xi,\Omega) = \lim_{\varepsilon\to0} E^{\varepsilon,{\mathcal{J}}}_\mathrm{sMM} (w_\varepsilon). \] Since \[ \int_\Omega \alpha(w_\varepsilon)|Du| = \int_{\Omega\backslash J_u} \alpha(w_\varepsilon)|Du| + \int_{J_u} |u^+ - u^-| \alpha(w_\varepsilon){\id\mathcal{H}^{N-1}}, \] it suffices to prove that \[ \lim_{\varepsilon\to0} \int_{\Omega\backslash J_u} \alpha(w_\varepsilon)|Du| = \int_{\Omega\backslash J_u} \alpha(1)|Du|. \] Similarly in the proof of Theorem \ref{SUP}, by a diagonal argument, we may assume that $w_\varepsilon$ is bounded. Since, by construction, $w_\varepsilon(z)\to1$ for $z\in\Omega\backslash\Sigma$ with a uniform bound for $\alpha(w_\varepsilon)$ and since \[ |Du| \left(\Sigma\cap(\Omega\backslash J_u) \right) = 0, \] the Lebesgue dominated convergence theorem yields the desired convergence. It remains to prove (i). For this purpose, we recall a few properties of the measure $\langle Du,\nu\rangle$ for $u\in BV(\Omega)$, where $Du$ denotes the distributional gradient of $u$ and $\nu\in S^{N-1}$. The following disintegration lemma is found in \cite[Theorem 3.107]{AFP}. \begin{lemma} \label{5DI} For $u\in BV(\Omega)$ and $\nu\in S^{N-1}$, \[ \left|\langle Du,\nu\rangle\right| = (\mathcal{H}^{N-1} \lfloor \Omega_\nu) \otimes \left|Du_{x,\nu}|\right. \] In other words, \[ \int_\Omega \varphi \left|\langle Du,\nu\rangle\right| = \int_{\Omega_\nu} \left\{ \int_{\Omega^1_{x,\nu}} \varphi_{x,\nu} \left|Du_{x,\nu}\right| \right\}{\id\mathcal{H}^{N-1}}(x) \] for any bounded Borel function $\varphi\colon\Omega\to\mathbf{R}$. \end{lemma} We also need a representation of the total variation of a vector-valued measure and its component. Let $\tau > 0$ and monotone increasing sequence $(a_j)_{j \in \mathbf{Z}}$ such that $a_{j+1} < a_j + \tau$ be given. We consider a division of $\mathbf{R}^N$ into a family of rectangles of the form \[ R^\tau_{J,(a_j)} = \prod^N_{i=1}[a_{j_i},a_{j_i+1}), \quad J = (j_1, \ldots, j_N) \in \mathbf{Z}^N \] We say that the division $\{R^\tau_{J,(a_j)}\}_{J \in \mathbf{Z}^N}$ is a $\tau$-rectangular division associated with $(a_j)$. {Hereafter, we may omit $(a_j)$ and write $\{R_J^\tau\}_{J\in\mathbb{Z}^N}$ in short.} \begin{lemma} \label{5VT} Let $\mu$ be an $\mathbf{R}^d$-valued finite Radon measure in a domain $\Omega$ in $\mathbf{R}^N$. Let $\{\tau_k\}$ be a decreasing sequence converging to zero as $k\to\infty$. Let $\{R^{\tau_k}_J\}_J$ be a fixed $\tau_k$-rectangular division of $\mathbf{R}^N$. Let $D$ be a dense subset of $S^{N-1}$. Then \[ |\mu|(A) = \sup \left\{ \left|\langle \mu,\nu_k \rangle\right|(A) \bigm| \nu_k : \Omega\to D\ \text{is constant on}\ {R^{\tau_k}_J \cap \Omega,\ J \in \mathbf{Z}^N,}\ k=1,2,\ldots \right\}, \] where $A$ is a Borel set. \end{lemma} We postpone its proof to the end of this section. We shall prove (i). We recall the decomposition of $\Sigma$ into a countable disjoint union of $\delta$-flat compact sets $K_i$ up to $\mathcal{H}^{N-1}$-measure zero set, and take the corresponding ${\nu^i\in D}$ as in Theorem \ref{INF}. We use the notation in Theorem \ref{INF}. We may assume that $\bigcap^\infty_{m=1}U^m_i=K_i$. By Lemma \ref{5DI}, we proceed \begin{align*} \int_{U^m_i} \alpha(v_\varepsilon)\left|Du_\varepsilon\right| &\geq \int_{U^m_i} \alpha(v_\varepsilon)\left|\langle Du_\varepsilon, \nu^i\rangle\right|\\ &= \int_{(U^m_i)_{\nu^i}} \left\{ \int_{(U^m_i)^1_{x,\nu^i}} \alpha(v_{\varepsilon,x,\nu^i})\left| Du_{\varepsilon,x,\nu^i} \right| \right\}{\id\mathcal{H}^{N-1}}(x). \end{align*} By one dimensional result \cite[Lemma 5.1]{GOU}, we see that \begin{align*} &\liminf_{\varepsilon\to0} \int_{(U^m_i)^1_{x,\nu^i}} \alpha(v_{\varepsilon,x,\nu^i})\left|Du_{\varepsilon,x,\nu^i}\right| \\ &\geq \int_{(U^m_i\backslash\Sigma)^1_{x,\nu^i}} \alpha(1)\left|Du_{x,\nu^i}\right| + \sum_{t\in(\Sigma\cap U^m_i)^1_{x,\nu^i}} \left( \min_{\xi^-_{x,\nu^i}\leq\xi\leq\xi^+_{x,\nu^i}} \alpha(\xi) \right) \left|u^+_{x,\nu^i}-u^-_{x,\nu^i}\right|(t). \end{align*} (In \cite[Lemma 5.1]{GOU}, $\alpha(v)$ is taken as $v^2$, but the proof works for general $\alpha$. In \cite[Lemma 5.1]{GOU}, $|\xi^-_i|^2$ should be $\left((\xi^-_i)_+\right)^2$.) The last term is bounded from below by \[ \alpha_0 \left(x + t^i_x \nu^i\right) \left|u^+ - u^-\right| \left(x + t^i_x \nu^i\right) \] since $(K^m_i)^1_{x,\nu^i}$ (${\subset \left(\Sigma\cap U^m_i \right)^1_{x,\nu^i} }$) is a singleton $\{t^i_x\}$. By the area formula, we see \begin{align} \int_{K^m_i} &\alpha_0 \left|u^+ - u^-\right|{\id\mathcal{H}^{N-1}}\\ &\leq \sqrt{1+(2\delta)^2} \int_{(K^m_i)_{\nu^i}} \alpha_0 \left(x + t^i_x \nu^i\right) \left|u^+ - u^-\right| \left(x + t^i_x \nu^i \right){\id\mathcal{H}^{N-1}} (x). \end{align} Combining these observations, by Fatou's lemma, we conclude that \[ \liminf_{\varepsilon\to0} \int_{U^m_i} \alpha\left(v_\varepsilon\right) \left|Du_\varepsilon\right| \geq \frac{1}{\sqrt{1+(2\delta)^{2}}} \int_{K^m_i} \alpha_0 \left|u^+ - u^-\right|{\id\mathcal{H}^{N-1}}. \] Adding from $i=1$ to $m$, we conclude that \[ \liminf_{\varepsilon\to0} \int_{V^m} \alpha\left(v_\varepsilon\right) \left|Du_\varepsilon\right| \geq \frac{1}{\sqrt{1+(2\delta)^{2}}} \int_{\Sigma^m} \alpha_0 \left|u^+ - u^-\right|{\id\mathcal{H}^{N-1}} \] for $V^m=\bigcup^m_{i=1}U^m_i$. For $W^m=\Omega\backslash V^m$, we take $\nu\in D$ and argue in the same way to get \begin{align*} \liminf_{\varepsilon\to0}& \int_{W^m} \alpha(v_\varepsilon)\left|Du_\varepsilon\right|\\ &\geq \int_{(W^m)_\nu} \Biggl\{ \int_{(W^m\backslash\Sigma)^1_{x,\nu}} \alpha(1)\left|Du_{x,\nu}\right| \\ &\qquad+ \sum_{t\in(\Sigma\cap W^m)^1_{x,\nu}} \left( \min_{\xi^-_{x,\nu}\leq\xi\leq\xi^+_{x,\nu}} \alpha(\xi) \right) \left|u^+_{x,\nu}-u^-_{x,\nu}\right|(t) \Biggr\}{\id\mathcal{H}^{N-1}}(x) \\ &\geq \alpha(1) \int_{(W^m)_\nu} \left\{ \int_{(W^m\backslash\Sigma)^1_{x,\nu}} \left|Du_{x,\nu}\right| \right\}{\id\mathcal{H}^{N-1}}(x) \\ &= \alpha(1) \int_{W^m\backslash\Sigma} \left|\langle Du, \nu \rangle\right|. \end{align*} The last equality follows from Lemma \ref{5DI}. Since $W^m\cap\Sigma_m=\emptyset$, combining the estimate of the integral on $V^m$, we now observe that \begin{align*} \liminf_{\varepsilon\to0}& \int_\Omega \alpha(v_\varepsilon)\left|Du_\varepsilon\right|\\ &\geq \liminf_{\varepsilon\to0} \int_{W^m\backslash(\Sigma\backslash\Sigma_m)} \alpha(v_\varepsilon) \left|\langle Du,\nu \rangle\right| + \liminf_{\varepsilon\to0} \int_{V^m} \alpha(v_\varepsilon) \left| Du_\varepsilon \right| \\ &\geq \alpha(1) \int_{W^m\backslash(\Sigma\backslash\Sigma_m)} \left|\langle Du,\nu \rangle\right| + \frac{1}{\sqrt{1+(2\delta)^2}} \int_{\Sigma_m} \alpha_0 \left| u^+ - u^- \right|{\id\mathcal{H}^{N-1}}. \end{align*} Passing $m$ to $\infty$ yields \[ \liminf_{\varepsilon\to0} \int_\Omega \alpha(v_\varepsilon)\left|Du_\varepsilon\right| \geq \alpha(1) \int_{\Omega\backslash\Sigma} \left|\langle Du,\nu \rangle\right| + \frac{1}{\sqrt{1+(2\delta)^2}} \int_\Sigma \alpha_0 \left| u^+ - u^- \right|{\id\mathcal{H}^{N-1}} \] by Fatou's lemma. Since $\delta>0$ can be taken arbitrarily, we now conclude that \[ {\liminf_{\varepsilon\to0}} \int_\Omega \alpha(v_\varepsilon)\left|Du_\varepsilon\right| \geq \alpha(1) \int_{\Omega\backslash\Sigma} \left|\langle Du,\nu \rangle\right| + \int_{\Omega\cap\Sigma} \alpha_0 \left| u^+ - u^- \right|{\id\mathcal{H}^{N-1}}. \] For any $\nu \in D$, we may replace $\Omega$ with an open set in $\Omega$, for example, $\Omega_0 \cap \Omega$ where $\Omega_0$ is an open rectangle. Applying the co-area formula (or Fubini's theorem) to the projection $(x_1,\ldots,x_N)\longmapsto x_i$, we have $\mathcal{H}^{N-1}\left(\Sigma\cap\{x_i=q\}\right)=0$ for $\mathcal{L}^1$-a.e.\ $q$, since otherwise, $\mathcal{L}^N(\Sigma)>0$. Thus, for any $\tau>0$, there is a $\tau$-rectangular division $\{R^\tau_J\}_J$ with $\mathcal{H}^{N-1}({\partial R^\tau_J\cap\Sigma})=0$. Since $\mathcal{H}^{N-1}({\partial R^\tau_J\cap\Sigma})=0$, by dividing $\Omega$ into ${\{\Omega\cap R^\tau_J\}_J}$, we conclude that \[ {\liminf_{\varepsilon\to0}} \int_\Omega \alpha(v_\varepsilon)\left|Du_\varepsilon\right| \geq \alpha(1) \int_{\Omega\backslash\Sigma} \left|\left\langle Du,\nu(x) \right\rangle\right| + \int_{\Omega\cap\Sigma} \alpha_0 \left| u^+ - u^- \right|{\id\mathcal{H}^{N-1}} \] where $\nu:\Omega\to D$ is a constant on each rectangle. Applying Lemma \ref{5VT}, we now conclude that \[ {\liminf_{\varepsilon\to0}} \int_\Omega \alpha(v_\varepsilon)\left|Du_\varepsilon\right| \geq \alpha(1) \int_{\Omega\backslash\Sigma} |Du| + \int_\Omega \alpha_0 \left| u^+ - u^- \right|{\id\mathcal{H}^{N-1}}. \] Since we already obtained \[ \liminf_{\varepsilon\to0} {E^\varepsilon_\mathrm{sMM}} (v_\varepsilon) \geq {E^0_\mathrm{sMM}} (\Xi,\Omega) \] by Theorem \ref{INF} and since \[ E^\varepsilon_\mathrm{KWC} (v) = {E^\varepsilon_\mathrm{sMM} (v)} + \int_\Omega \alpha(v) |Du|, \] the desired liminf inequality follows. \end{proof} \begin{proof}[Proof of Lemma \ref{5VT}] We may assume that $A$ is open since $\mu$ is a Radon measure. By duality representation, \[ |\mu|(A) = \sup \left\{ \sum^d_{i=1} \int_A \varphi_i \id\mu_i \biggm| \varphi = (\varphi_1, \ldots, \varphi_d) \in C_c(A),\ \|\varphi\|_{L^\infty} \leq 1 \right\}, \] where $C_c(A)$ denotes the space of ($\mathbf{R}^d$-valued) continuous functions compactly supported in $A$ and $\|\varphi\|_\infty:=\sup_{x\in\Omega} \left|\varphi(x)\right|$ with the Euclidean norm $|a|=\langle a,a\rangle^{1/2}$ for $a\in\mathbf{R}^d$. Since $\mu(A)<\infty$, by this representation, we see that for any $\delta>0$, there exists $\varphi\in C_c(A)$ with $\|\varphi\|_\infty\leq1$ satisfying \[ |\mu|(A) \leq \sum^d_{i=1} \int_A \varphi_i \id\mu_i + \delta. \] Since $\varphi$ is uniformly continuous in $A$ and $D$ is dense, for sufficiently large $k$, there is $\tau_k$-rectangular division $\{R^{\tau_k}_J\}$ and $\nu^\delta_k:\Omega\to D$, which is constant on $R^{\tau_k}_J\cap\Omega$ such that \[ \left| \varphi - \nu^\delta_k c_k \right| < \delta \quad\text{in}\quad R^{\tau_k}_J \cap \Omega \] with some constant $0\leq c_k\leq1$. This inequality implies that \begin{align*} \sum^d_{i=1} \int_A \varphi_i \id\mu_i &\leq {\sum_J} {\int_{R^{\tau_k}_J\cap A}} c_k \langle \mu, \nu^\delta_k \rangle + \delta |\mu|(A) \\ &\leq \left| \langle\mu,\nu^\delta_k \rangle\right| (A) +\delta |\mu|(A). \end{align*} Thus we obtain that \[ |\mu|(A) \leq \left| \langle\mu,\nu^\delta_k \rangle\right| (A) + \delta + \delta |\mu|(A). \] Hence, by $\mu(A)<\infty$ and the arbitrariness $\delta>0$, we have \begin{multline*} |\mu|(A) \leq \sup \left\{ \left| \langle\mu,\nu_k \rangle\right| (A) \bigm| \nu_k : \Omega \to D,\ \nu_k\ \text{is constant on}\right.\\ \left.\ {R^{\tau_k}_J \cap \Omega,\ J \in \mathbf{Z}^N,}\ k=1,2,\ldots \right\}. \end{multline*} The reverse inequality is trivial, so the proof is now complete. \end{proof} \section*{Acknowledgments} The work of the second was supported by the Program for Leading Graduate Schools, MEXT, Japan. The work of the first author was partly supported by the Japan Society for the Promotion of Science through the grants KAKENHI No.~19H00639, No.~18H05323, No.~17H01091, and by Arithmer Inc.~and Daikin Industries, Ltd.~through collaborative grants. The work of the third author was partly supported by the Japan Society for the Promotion of Science through the grants KAKENHI No.~18K13455 {and No.~22K03425}. \begin{thebibliography}{99} \bibitem[{AHM}]{AHM} {M.~Alfaro, D.~Hilhorst, H.~Matano, The singular limit of the Allen--Cahn equation and the FitzHugh--Nagumo system. \emph{J.~Differential Equations} 245, no.~2 (2008), 505--565.} {\bibitem[AFP]{AFP} L.~Ambrosio, N.~Fusco and D.~Pallara, Functions of bounded variation and free discontinuity problems. Oxford Mathematical Monographs. \emph{The Clarendon Press, Oxford University Press, New York,} 2000. } \bibitem[AT]{AT} L.~Ambrosio and V.~M.~Tortorelli, Approximation of functionals depending on jumps by elliptic functionals via $\Gamma$-convergence. \emph{Comm.~Pure Appl.~Math.}\ 43 (1990), no.~8, 999--1036. {\bibitem[AT2]{AT2} L.~Ambrosio and V.~M.~Tortorelli, On the approximation of free discontinuity problems. \emph{Boll.~Un.~Mat.~Ital.~B (7)} 6 (1992), no.~1, 105--123. } \bibitem[AF]{AF} J.-P.~Aubin and H.~Frankowska, Set-valued analysis. Modern Birkh\"auser Classics. \emph{Birkh\"auser Boston, Inc., Boston, MA,} 2009. \bibitem[BMR]{BMR} J.-F.~Babadjian, V.~Millot and R.~Rodiac, On the convergence of critical points of the Ambrosio--Tortorelli functional. Lecture at the workshop ``Free boundary problems and related evolution equations'' organized by G.~Bellettini et al., Erwin Schr\"odinger Institute, Feb.\ 21--25, 2022, Vienna. {\bibitem[BLM]{BLM} M.~Bonnivard, A.~Lemenant and V.~Millot, On a phase field approximation of the planar Steiner problem: Existence, regularity, and asymptotic of minimizers. \emph{Interfaces Free Bound.}\ 20 (2018), no.~1, 69--106. } {\bibitem[BL]{BL} L.~Bronsard and R.~V.~Kohn, Motion by mean curvature as the singular limit of Ginzburg--Landau dynamics. \emph{J.~Differential Equations} 90 (1991), no.~2, 211--237. } {\bibitem[XC]{XC} X.~Chen, Generation and propagation of interfaces for reaction-diffusion equations. \emph{J.~Differential Equations} 96 (1992), no.~1, 116--141.} \bibitem[CFHP1]{CFHP1} R.~Cristoferi, I.~Fonseca, A.~Hagerty and C.~Popovici, A homogenization result in the gradient theory of phase transitions. \emph{Interfaces Free Bound.}\ 21 (2019), no.~3, 367--408. \bibitem[CFHP2]{CFHP2} R.~Cristoferi, I.~Fonseca, A.~Hagerty and C.~Popovici, Erratum to: A homogenization result in the gradient theory of phase transitions. \emph{Interfaces Free Bound.}\ 22 (2020), no.~2, 245--250. \bibitem[ELM]{ELM} Y.~Epshteyn, C.~Liu, and M.~Mizuno, Motion of grain boundaries with dynamic lattice misorientations and with triple junctions drag. \emph{SIAM J.~Math.~Anal.}\ 53 (2021), 3072--3097. \bibitem[ESS]{ESS} L.~C.~Evans, H.~M.~Soner and P.~E.~Souganidis, Phase transitions and generalized motion by mean curvature. \emph{Comm.~Pure Appl.~Math.}\ 45 (1992), no.~9, 1097--1123. \bibitem[Fe]{Fe} H.~Federer, Geometric measure theory. Die Grundlehren der mathematischen Wissenschaften, Band 153 \emph{Springer-Verlag New York Inc., New York,} 1969. \bibitem[FL]{FL} I.~Fonseca and P.~Liu, The weighted Ambrosio--Tortorelli approximation scheme. \emph{SIAM J.~Math.~Anal.}\ 49 (2017), 4491--4520. {\bibitem[FLS]{FLS} G.~A.~Francfort, N.~Q.~Le and S.~Serfaty, Critical points of Ambrosio--Tortorelli converge to critical points of Mumford--Shah in the one-dimensional Dirichlet case. \emph{ESAIM Control Optim.~Calc.~Var.}\ 15 (2009), no.~3, 576--598.} {\bibitem[FMa]{FMa} G.~A.~Francfort and J.-J.~Marigo, Revisiting brittle fracture as an energy minimization problem. \emph{J.~Mech.~Phys.~Solids} 46 (1998), no.~8, 1319--1342.} \bibitem[GaSp]{GaSp} A.~Garroni and E.~Spadaro, Derivation of surface tension of grain boundaries in polycystals. Lecture at the workshop ``Free boundary problems and related evolution equations'' organized by G.~Bellettini et al., Erwin Schr\"odinger Institute, Feb.\ 21--25, 2022, Vienna. {\bibitem[Gia]{Gia} A.~Giacomini, Ambrosio--Tortorelli approximation of quasi-static evolution of brittle fractures. \emph{Calc.~Var.~Partial Differential Equations} 22 (2005), no.~2, 129--172.} {\bibitem[G]{G} Y.~Giga, Surface Evolution Equations: A Level Set Approach. \emph{Monogr.~Math.~99, Birkh\"auser, Basel,} 2006.} \bibitem[GOU]{GOU} Y.~Giga, J.~Okamoto and M. Uesaka, A finer singular limit of a single-well Modica--Mortola functional and its applications to the Kobayashi--Warren--Carter energy. \emph{Adv.~Calc.~Var.}, to appear. {\bibitem[HT]{HT} J.~E.~Hutchinson and Y.~Tonegawa, Convergence of phase interfaces in the van der Waals--Cahn--Hilliard theory. \emph{Calc.~Var.~Partial Differential Equations} 10 (2000), no.~1, 49--84.} {\bibitem[IKY]{IKY} A.~Ito, N.~Kenmochi and N.~Yamazaki, A phase-field model of grain boundary motion. \emph{Appl.~Math.}\ 53 (2008), no.~5, 433--454.} {\bibitem[KG]{KG} R.~Kobayashi and Y.~Giga, Equations with singular diffusivity. \emph{J.~Stat.~Phys.}\ 95 (1999), no.~5--6, 1187--1220.} {\bibitem[KWC1]{KWC1} R.~Kobayashi, J.~A.~Warren and W.~C.~Carter, A continuum model of grain boundaries. \emph{Phys.~D} 140 (2000), no.~1--2, 141--150.} {\bibitem[KWC2]{KWC2} R.~Kobayashi, J.~A.~Warren and W.~C.~Carter, Grain boundary model and singular diffusivity. in: \emph{Free Boundary Problems: Theory and Applications,} GAKUTO Internat.~Ser.~Math.~Sci.~Appl.~14, Gakk\=otosho, Tokyo (2000), 283--294.} {\bibitem[KWC3]{KWC3} R.~Kobayashi, J.~A.~Warren and W.~C.~Carter, Modeling grain boundaries using a phase field technique. \emph{J.~Crystal Growth} 211 (2000), no.~1--4, 18--20.} {\bibitem[KS]{KS} R.~V.~Kohn and P.~Sternberg, Local minimisers and singular perturbations. \emph{Proc.~Roy.~Soc.~Edinburgh Sect. A} 111 (1989), no.~1--2, 69--84.} \bibitem[LL]{LL} G.~Lauteri and S.~Luckhaus, An energy estimate for dislocation configurations and the emergence of Cosserat-type structures in metal plasticity. arXiv: 1608.06155 (2016). \bibitem[LSt]{LSt} N.~Q.~Le and P.~J.~Sternberg, Asymptotic behavior of Allen--Cahn-type energies and Neumann eigenvalues via inner variations. \emph{Ann.~Mat.~Pura Appl.}\ 198 (2019), no.~4, 1257--1293. {\bibitem[LS]{LS} A.~Lemenant and F.~Santambrogio, A Modica--Mortola approximation for the Steiner problem. \emph{C.~R.~Math.~Acad.~Sci.~Paris} 352 (2014), no.~5, 451--454.} {\bibitem[M]{M} L.~Modica, The gradient theory of phase transitions and the minimal interface criterion. \emph{Arch.~Ration.~Mech.~Anal.}\ 98 (1987), no.~2, 123--142.} \bibitem[MM1]{MM1} L.~Modica and S.~Mortola, Un esempio di $\Gamma$-convergenza. \emph{Boll.~Un.~Mat.~Ital.~B (5)} 14 (1977), no.~1, 285--299. \bibitem[MM2]{MM2} L.~Modica and S.~Mortola, Il limite nella $\Gamma$-convergenza di una famiglia di funzionali ellittici. \emph{Boll.~Un.~Mat.~Ital.~A (5)} 14 (1977), no.~3, 526--529. {\bibitem[MoSh]{MoSh} S.~Moll and K.~Shirakawa, Existence of solutions to the Kobayashi--Warren--Carter system. \emph{Calc.~Var.~Partial Differential Equations} 51 (2014), no.~3--4, 621--656.} {\bibitem[MoShW1]{MoShW1} S.~Moll, K.~Shirakawa and H.~Watanabe, Energy dissipative solutions to the Kobayashi--Warren--Carter system. \emph{Nonlinearity} 30 (2017), no.~7, 2752--2784.} {\bibitem[MoShW2]{MoShW2} S.~Moll, K.~Shirakawa and H.~Watanabe, Kobayashi--Warren--Carter type systems with nonhomogeneous Dirichlet boundary data for crystalline orientation, in preparation.} {\bibitem[MSch]{MSch} P.~de Mottoni and M.~Schatzman, Geometrical evolution of developed interfaces. \emph{Trans.~Amer.~Math.~Soc.}\ 347 (1995), no.~5, 1533--1589.} {\bibitem[O]{O} {J.~Okamoto, Convergence of some non-convex energies under various topology, PhD Thesis, University of Tokyo (2022).} } {\bibitem[SWat]{SWat} K.~Shirakawa and H.~Watanabe, Energy-dissipative solution to a one-dimensional phase field model of grain boundary motion. \emph{Discrete Contin.~Dyn.~Syst.~Ser.~S} 7 (2014), no.~1, 139--159.} {\bibitem[SWY]{SWY} K.~Shirakawa, H.~Watanabe and N.~Yamazaki, Solvability of one-dimensional phase field systems associated with grain boundary motion. \emph{Math.~Ann.}\ 356 (2013), no.~1, 301--330.} \bibitem[Sim]{Sim} L.~Simon, Lectures on geometric measure theory. Proceedings of the Centre for Mathematical Analysis, Australian National University, 3. \emph{Australian National University, Centre for Mathematical Analysis, Canberra,} 1983. {\bibitem[St]{St} P.~Sternberg, The effect of a singular perturbation on nonconvex variational problems. \emph{Arch.~Ration.~Mech.~Anal.}\ 101 (1988), no.~3, 209--260.} {\bibitem[To]{To} Y.~Tonegawa, Brakke's Mean Curvature Flow: An Introduction, SpringerBriefs in Mathematics. \emph{Springer Nature, Singapore,} 2019. } {\bibitem[WSh]{WSh} H.~Watanabe and K.~Shirakawa, Qualitative properties of a one-dimensional phase-field system associated with grain boundary, in: \emph{Nonlinear Analysis in Interdisciplinary Sciences -- Modellings, Theory and Simulations,} GAKUTO Internat.~Ser.~Math.~Sci.~Appl.~36, Gakk\=otosho, Tokyo (2013), 301--328.} \end{thebibliography} \end{document}
2205.14255v2
http://arxiv.org/abs/2205.14255v2
Dips at small sizes for topological graph obstruction sets
\documentclass[11pt,a4paper]{amsart} \usepackage{graphicx,multirow,array,amsmath,amssymb,color,enumitem,subfig} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{conjecture}[theorem]{Conjecture} \newtheorem{question}[theorem]{Question} \newtheorem{remark}{Remark} \newcommand{\ty}{\nabla\mathrm{Y}} \newcommand{\yt}{\mathrm{Y}\nabla} \newcommand{\DD}{\mathcal{D}_4} \newcommand{\F}{\mathcal{F}} \newcommand{\PP}{\mathcal{P}} \newcommand{\R}{\mathbb{R}} \begin{document} \title{Dips at small sizes for topological graph obstruction sets} \author[H.\ Kim]{Hyoungjun Kim} \address{Institute of Data Science, Korea University, Seoul 02841, Korea} \email{[email protected]} \author[T.W.\ Mattman]{Thomas W.\ Mattman} \address{Department of Mathematics and Statistics, California State University, Chico, Chico, CA 95929-0525} \email{[email protected]} \begin{abstract} The Graph Minor Theorem of Robertson and Seymour implies a finite set of obstructions for any minor closed graph property. We show that there are only three obstructions to knotless embedding of size 23, which is far fewer than the 92 of size 22 and the hundreds known to exist at larger sizes. We describe several other topological properties whose obstruction set demonstrates a similar dip at small size. For order ten graphs, we classify the 35 obstructions to knotless embedding and the 49 maximal knotless graphs. \end{abstract} \thanks{The first author(Hyoungjun Kim) was supported by the National Research Foundation of Korea (NRF) grant funded by the Korea government Ministry of Science and ICT(NRF-2021R1C1C1012299 and NRF-2022M3J6A1063595 ).} \maketitle \section{Introduction} The Graph Minor Theorem of Robertson and Seymour~\cite{RS} implies that any minor closed graph property $\PP$ is characterized by a finite set of obstructions. For example, planarity is determined by $K_5$ and $K_{3,3}$ \cite{K,W} while linkless embeddability has seven obstructions, known as the Petersen family~\cite{RST}. However, Robertson and Seymour's proof is highly non-constructive and it remains frustratingly difficult to identify the obstruction set, even for a simple property such as apex (see~\cite{JK}). Although we know that the obstruction set for a property $\PP$ is finite, in practice it is often difficult to establish any useful bounds on its size. In the absence of concrete bounds, information about the shape or distribution of an obstruction set would be welcome. Given that the number of obstructions is finite, one might anticipate a unimodal distribution with a central maximum and numbers tailing off to either side. Indeed, many of the known obstruction sets do appear to follow this pattern. In \cite[Table 2]{MW}, the authors present a listing of more than 17 thousand obstructions for torus embeddings. Although, the list is likely incomplete, it does appear to follow a normal distribution both with respect to graph {\em order} (number of vertices) and graph {\em size} (number of edges), see Figure~\ref{fig:TorObs}. \begin{figure}[htb] \centering \subfloat[By order]{\includegraphics[width=0.4\textwidth]{TLO.png}} \hfill \subfloat[By size]{\includegraphics[width=0.4\textwidth]{TLS.png}} \caption{Distribution of torus embedding obstructions} \label{fig:TorObs} \end{figure} \begin{table}[ht] \centering \begin{tabular}{l|ccccccccccc} Size & 18 & 19 & 20 & 21 & 22 & 23 & $\cdots$ & 28 & 29 & 30 & 31 \\ \hline Count & 6 & 19 & 8 & 123 & 517 & 2821 & $\cdots$ & 299 & 8 & 4 & 1 \end{tabular} \caption{Count of torus embedding obstructions by size.} \label{tab:TorSiz} \end{table} However, closer inspection (see Table~\ref{tab:TorSiz}) shows that there is a {\em dip}, or local minimum, in the number of obstructions at size twenty. We will say that the dip occurs at {\em small size} meaning it is near the end of the left tail of the size distribution. In this paper, we will see that the knotless embedding property likewise has a dip at size 23. A {\em knotless embedding} of a graph is an embedding in $\R^3$ such that each cycle is a trivial knot. Having noticed this dip we investigated what other topological properties have a dip, or even {\em gap} (a size, or range of sizes, for which there is no obstruction), at small sizes. We report on what we found in the next section. In a word, the most prominent dips and gaps seem to trace back to that perennial counterexample, the Petersen graph. In Section 3, we prove the following. \begin{theorem} \label{thm:3MM} There are exactly three obstructions to knotless embedding of size 23. \end{theorem} Since there are no obstructions of size 20 or less, 14 of size 21, 92 of size 22 and at least 156 of size 28 (see \cite{FMMNN, GMN}), the theorem shows that the knotless embedding obstruction set has a dip at small size, 23. The proof of Theorem~\ref{thm:3MM} naturally breaks into two parts. We show ``by hand'' that the three obstruction graphs have no knotless embedding and are minor minimal for that property. To show these three are the {\bf only} obstructions of size 23 we make use of the connection with 2-apex graphs. \begin{figure}[htb] \centering \includegraphics[scale=1]{DY.eps} \caption{$\ty$ and $\yt$ moves.} \label{fig:TY} \end{figure} This second part of the argument is novel. While our analysis depends on computer calculations, the resulting observations may be of independent interest. To describe these, we review some terminology for graph families, see~\cite{GMN}. The {\em family} of graph $G$ is the set of graphs related to $G$ by a sequence of zero or more $\ty$ and $\yt$ moves, see Figure~\ref{fig:TY}. The graphs in $G$'s family are {\em cousins} of $G$. We do not allow $\yt$ moves that would result in doubled edges and all cousins have the same size. If a $\ty$ move on $G$ results in graph $H$, we say $H$ is a {\em child} of $G$ and $G$ is a {\em parent} of $H$. The set of graphs that can be obtained from $G$ by a sequence of $\ty$ moves are $G$'s {\em descendants}. Similarly, the set of graphs that can be obtained from $G$ by a sequence of $\yt$ moves are $G$'s {\em ancestors}. To show that the three graphs are the only obstructions of size 23 relies on a careful analysis of certain graph families with respect to knotless embedding. This analysis includes progress in resolving the following question. \begin{question} \label{que:d3MMIK} If $G$ has a vertex of degree less than three, can an ancestor or descendant of $G$ be an obstruction for knotless embedding? \end{question} It has been fruitful to search in graph families for obstructions to knotless embedding. For example, of the 264 known obstructions described in \cite{FMMNN}, all but three occur as part of four graph families. The same paper states ``It is natural to investigate the graphs obtained by adding one edge to each of ... six graphs'' in the family of the Heawood graph. We carry out this investigation and classify, with respect to knotless embedding, the graphs obtained by adding an edge to a Heawood family graph; see Section 3 for details. As a first step toward a more general strategy for this type of problem, we make the following connections between sets of graphs of the form $G+e$ obtained by adding an edge to graph $G$. \begin{theorem} \label{thm:Gpe} If $G$ is a parent of $H$, then every $G+e$ has a cousin that is an $H+e$. \end{theorem} \begin{corollary} \label{cor:Gpe} If $G$ is an ancestor of $H$, then every $G+e$ has a cousin that is an $H+e$. \end{corollary} In Section 4, we prove the following. \begin{theorem} \label{thm:ord10} There are exactly 35 obstructions to knotless embedding of order ten. \end{theorem} This depends on a classification of the maximal knotless graphs of order ten, that is the graphs that are edge maximal in the set of graphs that admit a knotless embedding, see~\cite{EFM}. In Appendix~\ref{sec:appmnik} we show that there are 49 maximal knotless graphs of order ten. In contrast to graph size, distributions with respect to graph order generally do not have dips or gaps. In particular, Theorem~\ref{thm:ord10} continues an increasing trend of no obstructions of order 6 or less, one obstruction of order 7~\cite{CG}, two of order 8~\cite{CMOPRW,BBFFHL}, and eight of order 9~\cite{MMR}. In the next section we discuss some graph properties for which we know something about the obstruction set, with a focus on those that have a dip at small size. In Sections 3 and 4, we prove Theorems~\ref{thm:3MM} and \ref{thm:ord10}, respectively. Appendix A is a traditional proof that the graphs $G_1$ and $G_2$ are IK. In Appendix B we describe the 49 maxnik graphs of order ten. Finally, Appendix C gives graph6~\cite{sage} notation for the important graphs and further details of arguments throughout the paper including the structure of the large families that occur at the end of subsection~\ref{sec:Heawood}. \section{Dips at small size} As mentioned in the introduction, it remains difficult to determine the obstruction set even for simple graph properties. In this section we survey some topological graph properties for which we know something about the obstruction set. We begin by focusing on four properties that feature a prominent dip or gap at small sizes. Two are the obstructions to knotless and torus embeddings mentioned in the introduction. Although the list of torus obstructions is likely incomplete we can be confident about the dip at size 20. Like all of the incomplete sets we look at, research has focused on smaller sizes such that data on this side of the distribution is (nearly) complete. In the specific case of torus obstructions, we can compare with a 2002 study~\cite{C} that listed 16,682 torus obstructions. Of the close to one thousand graphs added to the set in the intervening decade and a half, only three are of size 23 or smaller. Similarly, while our proof that there are three obstructions for knotless embedding depends on computer verification, it seems certain that the number of obstructions at size 23 is far smaller than the 92 of size 22 and the number for size 28, which is known to be at least 156~\cite{GMN}. \begin{table} \centering \begin{tabular}{l|ccccccc} Size & 15 & 16 & 17 & 18 & 19 & 20 & 21 \\ \hline Count & 7 & 0 & 0 & 4 & 5 & 22 & 33 \end{tabular} \caption{Count of apex obstructions through size 21.} \label{tab:MMNASiz} \end{table} The set of apex obstructions, investigated by \cite{JK,LMMPRTW,P}, suggests one possible source for these dips. A graph is {\em apex} if it is planar, or becomes planar on deletion of a single vertex. As yet, we do not have a complete listing of the apex obstruction set, but Jobson and K{\'e}zdy~\cite{JK} report that there are at least 401 obstructions. Table~\ref{tab:MMNASiz} shows the classification of obstructions through size 21 obtained by Pierce~\cite{P} in his senior thesis. There is a noticeable gap at sizes 16 and 17. The seven graphs of size 15 are the graphs in the Petersen family. This is the family of the Petersen graph, which is also the obstruction set for linkless embedding. Note that, as for all our tables of size distributions, the table begins with what is known to be the smallest size in the distribution, in this case 15. Our proof that there are only three knotless embeddding obstructions of size 23 depends on a related property, $2$-apex. We say that a graph is {\em 2-apex} if it is apex, or becomes apex on deletion of a single vertex. Table~\ref{tab:MMN2ASiz} shows the little that we know about the obstruction set for this family \cite{MP}. Aside from the counts for sizes 21 and 22 and the gap at size 23, we know only that there are obstructions for each size from 24 through 30. \begin{table} \centering \begin{tabular}{l|ccc} Size & 21 & 22 & 23 \\ \hline Count & 20 & 60 & 0 \end{tabular} \caption{Count of 2-apex obstructions through size 23.} \label{tab:MMN2ASiz} \end{table} While it is not an explanation, these four properties with notable dips or gaps are related to one another and seem to stem from the Petersen graph, a notorious counterexample in graph theory. The most noticeable gap is for the apex property and the seven graphs to the left of the gap are precisely the Petersen family. The gap at size 23 for the $2$-apex property is doubtless related. In turn, our proof of Theorem~\ref{thm:3MM} in Section 3 relies heavily on the strong connection between $2$-apex and knotless graphs. For this reason, it is not surprising that the gap at size 23 for $2$-apex obstructions results in a similar dip at size 23 for obstructions for knotless embeddings. The connection with the dip at size 20 for torus embeddings is not as direct, but we remark that eight of the obstructions of size 19 have a minor that is either a graph in the Petersen family, or else one of those seven graphs with a single edge deleted. In contrast, let us briefly mention some well known obstruction sets that do not have dips or gaps and are instead unimodal. There are two obstructions to planarity, one each of size nine ($K_{3,3}$) and ten ($K_5$). The two obstructions, $K_4$ and $K_{3,2}$, to outerplanarity both have size six and the seven obstructions to linkless embedding in the Petersen family~\cite{RST} are all of size 15. Aside from planarity and linkless embedding, the most famous set is likely the 35 obstructions to projective planar embedding~\cite{A,GHW,MT}. Table~\ref{tab:PPSiz} shows that the size distribution for these obstructions is unimodal. \begin{table} \centering \begin{tabular}{l|cccccc} Size & 15 & 16 & 17 & 18 & 19 & 20 \\ \hline Count & 4 & 7 & 10 & 10 & 2 & 2 \end{tabular} \caption{Count of projective planar obstructions by size.} \label{tab:PPSiz} \end{table} \section{Knotless embedding obstructions of size 23} In this section we prove Theorem~\ref{thm:3MM}: there are exactly three obstructions to knotless embedding of size 23. Along the way (see subsection~\ref{sec:Heawood}) we provide evidence in support of a negative answer to Question~\ref{que:d3MMIK} and prove Theorem~\ref{thm:Gpe} and its corollary. We also classify, with respect to knotless embedding, three graph families of size 22. These families include every graph obtained by adding an edge to a Heawood family graph. We begin with some terminology. A graph that admits no knotless embedding is {\em intrinsically knotted (IK)}. In contrast, we will call the graphs that admit a knotless embedding {\em not intrinsically knotted (nIK)}. If $G$ is in the obstruction set for knotless embedding we will say $G$ is {\em minor minimal intrinsically knotted (MMIK)}. This reflects that, while $G$ is IK, no proper minor of $G$ has that property. Similarly, we will call 2-apex obstructions {\em minor minimal not 2-apex (MMN2A)}. Our strategy for classifying MMIK graphs of size 23 is based on the following observation. \begin{lemma} \cite{BBFFHL,OT} \label{lem:2apex} If $G$ is 2-apex, then $G$ is not IK \end{lemma} Suppose $G$ is MMIK of size 23. By Lemma~\ref{lem:2apex}, $G$ is not 2-apex and, therefore, $G$ has an MMN2A minor. The MMN2A graphs through order 23 were classified in~\cite{MP}. All but eight of them are also MMIK and none are of size 23. It follows that a MMIK graph of size 23 has one of the eight exceptional MMN2A graphs as a minor. Our strategy is to construct all size 23 expansions of the eight exceptional graphs and determine which of those is in fact MMIK. Before further describing our search, we remark that it does rely on computer support. Indeed, the initial classification of MMN2A graphs in \cite{MP} is itself based on a computer search. We give a traditional proof that there are three size 23 MMIK graphs, which is stated as Theorem~\ref{thm:TheThree} below. We rely on computers only for the argument that there are no other size 23 MMIK graphs. Note that, even if we cannot provide a complete, traditional proof that there are no more than three size 23 MMIK graphs, our argument does strongly suggest that there are far fewer MMIK graphs of size 23 than the known 92 MMIK graphs of size 22 and at least 156 of size 28~\cite{FMMNN, GMN}. In other words, even without computers, we have compelling evidence that there is a dip at size 23 for the obstructions to knotless embedding. Below we give graph6 notation~\cite{sage} and edge lists for the three MMIK graphs of size 23. See also Figures~\ref{fig:G1} and \ref{fig:G2} in Appendix~\ref{sec:appG12}. \noindent $G_1$ \verb"J@yaig[gv@?" $$[(0, 4), (0, 5), (0, 9), (0, 10), (1, 4), (1, 6), (1, 7), (1, 10), (2, 3), (2, 4), (2, 5), (2, 9),$$ $$ (2, 10), (3, 6), (3, 7), (3, 8), (4, 8), (5, 6), (5, 7), (5, 8), (6, 9), (7, 9), (8, 10)]$$ \noindent $G_2$ \verb"JObFF`wN?{?" $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (1, 5), (1, 6), (1, 7), (1, 8), (2, 6), (2, 7), (2, 8),$$ $$(2, 9), (3, 7), (3, 8), (3, 9), (3, 10), (4, 8), (4, 9), (4, 10), (5, 9), (5, 10), (6, 10)]$$ \noindent $G_3$ \verb"K?bAF`wN?{SO" $$[(0, 4), (0, 5), (0, 7), (0, 11), (1, 5), (1, 6), (1, 7), (1, 8), (2, 7), (2, 8), (2, 9), (2, 11),$$ $$(3, 7), (3, 8), (3, 9), (3, 10), (4, 8), (4, 9), (4, 10), (5, 9), (5, 10), (6, 10), (6, 11)]$$ The graph $G_1$ was discovered by Hannah Schwartz~\cite{N}. Graphs $G_1$ and $G_2$ have order 11 while $G_3$ has order 12. We prove the following in subsection~\ref{sec:3MMIKpf} below. \begin{theorem} \label{thm:TheThree} The graphs $G_1$, $G_2$, and $G_3$ are MMIK of size 23 \end{theorem} We next describe the computer search that shows there are no other size 23 MMIK graphs and completes the proof of Theorem~\ref{thm:3MM}. We also describe how Question~\ref{que:d3MMIK}, Theorem~\ref{thm:Gpe} and its corollary fit in, along with the various size 22 families. There are eight exceptional graphs of size at most 23 that are MMN2A and not MMIK. Six of them are in the Heawood family of size 21 graphs. The other two, $H_1$ and $H_2$, are 4-regular graphs on 11 vertices with size 22 described in \cite{MP}, listed in Appendix C, and shown in Figure~\ref{fig:H1H2}. It turns out that the three graphs of Theorem~\ref{thm:TheThree} are expansions of $H_1$ and $H_2$. In subsection~\ref{sec:1122graphs} we show that these two graphs are MMN2A but not IK and argue that no other size 23 expansion of $H_1$ or $H_2$ is MMIK. The Heawood family consists of twenty graphs of size 21 related to one another by $\ty$ and $\yt$ moves, see Figure~\ref{fig:TY}. In \cite{GMN,HNTY} two groups, working independently, verified that 14 of the graphs in the family are MMIK, and the remaining six are MMN2A and not MMIK. In subsection~\ref{sec:Heawood} below, we argue that no size 23 expansion of any of these six Heawood family graphs is MMIK. Combining the arguments of the next three subsections give a proof of Theorem~\ref{thm:3MM}. There are two Mathematica programs written by Naimi and available at his website~\cite{NW} that we use throughout. One, isID4, is an implementation of the algorithm of Miller and Naimi~\cite{MN} and we refer the reader to that paper for details. Note that, while this algorithm can show that a particular graph is IK, it does not allow us to deduce that a graph is nIK. Instead, we make use of a second program of Naimi, findEasyKnots, to find knotless embeddings of nIK graphs. This program determines the set of cycles $\Sigma$ of a graph $G$. Then, given an embedding of $G$, for each $\sigma \in \Sigma$ the program applies $R1$ and $R2$ Reidemeister moves (see~\cite{R}) until it arrives at one of three possible outcomes: $\sigma$ is the unknot; $\sigma$ is an alternating (hence non-trivial) knot; or $\sigma$ is a non-alternating knot (which may or may not be trivial). In this paper, we will often show that a graph is nIK by presenting a knotless embedding. In all cases, this means that when we apply findEasyKnots to the embedding, it determines that every cycle in the graph is a trivial knot. Before diving into our proof of Theorem~\ref{thm:3MM}, we state a few lemmas we will use throughout. The first is about the {\em minimal degree} $\delta(G)$, which is the least degree among the vertices of graph $G$. \begin{lemma} If $G$ is MMIK, then $\delta(G) \geq 3$. \label{lem:delta3} \end{lemma} \begin{proof} Suppose $G$ is IK with $\delta(G) < 3$. By either deleting, or contracting an edge on, a vertex of small degree, we find a proper minor that is also IK. \end{proof} \begin{figure}[htb] \centering \includegraphics[scale=1]{TYn.eps} \caption{Place a triangle in a neighborhood of the $Y$ subgraph.} \label{fig:TYn} \end{figure} \begin{lemma} \label{lem:tyyt} The $\ty$ move preserves IK: If $G$ is IK and $H$ is obtained from $G$ by a $\ty$ move, then $H$ is also IK. Equivalently, the $\yt$ move preserves nIK: if $H$ is nIK and $G$ is obtained from $H$ by a $\yt$ move, then $G$ is also nIK. \end{lemma} \begin{proof} We begin by noting that the $\yt$ move preserves planarity. Suppose $H$ is planar and has an induced $Y$ or $K_{3,1}$ subgraph with degree three vertex $v$ adjacent to vertices $a,b,c$. In a planar embedding of $H$ we can choose a neighborhood of the $Y$ subgraph small enough that it excludes all other edges of $H$, see Figure~\ref{fig:TYn}. This allows us to place a $3$-cycle $a,b,c$ within the neighborhood which shows that the graph $G$ that results from a $\yt$ move is also planar. Now, suppose $H$ is nIK with degree three vertex $v$. Then, as in Figure~\ref{fig:TYn}, in a knotless embedding of $H$, we can find a neighborhood of the induced $Y$ subgraph small enough that it intersects no other edges of $H$. Again, place a $3$-cycle $a,b,c$ within this neighborhood. We claim that the resulting embedding of $G$ obtained by this $\yt$ move is likewise knotless. Indeed, any cycle in $G$ that uses vertices $a$, $b$, or $c$ has a corresponding cycle in $H$ that differs only by a small move within the neighborhood of the $Y$ subgraph. Since every cycle of $H$ is unknotted, the same is true for every cycle in this embedding of $G$. \end{proof} Finally, we note that the MMIK property can move backwards along $\ty$ moves. \begin{lemma} \cite{BDLST,OT} \label{lem:MMIK} Suppose $G$ is IK and $H$ is obtained from $G$ by a $\ty$ move. If $H$ is MMIK, then $G$ is also MMIK. \end{lemma} \subsection{Proof of Theorem~\ref{thm:TheThree}}\label{sec:3MMIKpf}\ In this subsection we prove Theorem~\ref{thm:TheThree}: the three graphs $G_1$, $G_2$, and $G_3$ are MMIK. We first show these graphs are IK. For $G_1$ and $G_2$ we present a proof ``by hand" as Appendix~\ref{sec:appG12}. We remark that we can also verify that these two graphs are IK using Naimi's Mathematica implementation~\cite{NW} of the algorithm of Miller and Naimi~\cite{MN}. The graph $G_3$ is obtained from $G_2$ by a single $\ty$ move. Specifically, using the edge list for $G_2$ given above: $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (1, 5), (1, 6), (1, 7), (1, 8), (2, 6), (2, 7), (2, 8),$$ $$(2, 9), (3, 7), (3, 8), (3, 9), (3, 10), (4, 8), (4, 9), (4, 10), (5, 9), (5, 10), (6, 10)],$$ make the $\ty$ move on the triangle $(0,2,6)$. Since $G_2$ is IK, Lemma~\ref{lem:tyyt} implies $G_3$ is also IK. To complete the proof of Theorem~\ref{thm:TheThree}, it remains only to show that all proper minors of $G_1$, $G_2$ and $G_3$ are nIK. First we argue that no proper minor of $G_1$ is IK. Up to isomorphism, there are 12 minors obtained by contracting or deleting a single edge. Each of these is 2-apex, except for the MMN2A graph $H_1$. By Lemma~\ref{lem:2apex}, a 2-apex graph is nIK and Figure~\ref{fig:H1H2} gives a knotless embedding of $H_1$ (as we have verified using Naimi's program findEasyKnots, see~\cite{NW}). This shows that all proper minors of $G_1$ are nIK. \begin{figure}[htb] \centering \includegraphics[scale=1]{h1h2.eps} \caption{Knotless embeddings of graphs $H_1$ (left) and $H_2$ (right).} \label{fig:H1H2} \end{figure} Next we argue that no proper minor of $G_2$ is IK. Up to isomorphism, there are 26 minors obtained by deleting or contracting an edge of $G_2$. Each of these is 2-apex, except for the MMN2A graph $H_2$. Since $H_2$ has a knotless embedding as shown in Figure~\ref{fig:H1H2}, then, similar to the argument for $G_1$, all proper minors of $G_2$ are nIK. It remains to argue that no proper minor of $G_3$ is IK. Up to isomorphism, there are 26 minors obtained by deleting or contracting an edge of $G_3$. Each of these is 2-apex, except for the MMN2A graph $H_2$ which is nIK. Similar to the previous cases, all proper minors of $G_3$ are nIK. This completes the proof of Theorem~\ref{thm:TheThree}. \subsection{Expansions of nIK Heawood family graphs}\label{sec:Heawood}\ In this subsection we argue that there are no MMIK graphs among the size 23 expansions of the six nIK graphs in the Heawood family. As part of the argument, we classify with respect to knotless embedding all graphs obtained by adding an edge to a Heawood family graph. We also discuss our progress on Question~\ref{que:d3MMIK} and prove Theorem~\ref{thm:Gpe} and its corollary. We will use the notation of \cite{HNTY} to describe the twenty graphs in the Heawood family, which we also recall in Appendix C. For the reader's convenience, Figure~\ref{fig:Hea} shows four of the graphs in the family that are central to our discussion. \begin{figure}[htb] \centering \includegraphics[scale=1]{heawood.eps} \caption{Four graphs in the Heawood family. ($E_9$ is in the family, but $E_9+e$ is not.)} \label{fig:Hea} \end{figure} Kohara and Suzuki~\cite{KS} showed that 14 graphs in this family are MMIK. The remaining six, $N_9$, $N_{10}$, $N_{11}$, $N'_{10}$, $N'_{11}$, and $N'_{12}$, are nIK~\cite{GMN,HNTY}. The graph $N_9$ is called $E_9$ in \cite{GMN}. In this subsection we argue that no size 23 expansion of these six graphs is MMIK. The Heawood family graphs are the cousins of the Heawood graph, which is denoted $C_{14}$ in \cite{HNTY}. All have size 21. We can expand a graph to one of larger size either by adding an edge or by splitting a vertex. In {\em splitting a vertex} we replace a graph $G$ with a graph $G'$ so that the order increases by one: $|G'| = |G|+1$. This means we replace a vertex $v$ of $G$ with two vertices $v_1$ and $v_2$ in $G'$ and identify the remaining vertices of $G'$ with those of $V(G) \setminus \{v\}$. As for edges, $E(G')$ includes the edge $v_1v_2$. In addition, we require that the union of the neigborhoods of $v_1$ and $v_2$ in $G'$ otherwise agrees with the neighborhood of $v$: $N(v) = N(v_1) \cup N(v_2) \setminus \{v_1,v_2 \}$. In other words, $G$ is the result of contracting $v_1v_2$ in $G'$ where double edges are suppressed: $G = G'/v_1v_2$. Our goal is to argue that there is no size 23 MMIK graph that is an expansion of one of the six nIK Heawood family graphs, $N_9$, $N_{10}$, $N_{11}$, $N'_{10}$, $N'_{11}$, and $N'_{12}$. As a first step, we will argue that, if there were such a size 23 MMIK expansion, it would also be an expansion of one of 29 nIK graphs of size 22. Given a graph $G$, we will use $G+e$ to denote a graph obtained by adding an edge $e \not\in E(G)$. As we will show, if $G$ is a Heawood family graph, then $G+e$ will fall in one of three families that we will call the $H_8+e$ family, the $E_9+e$ family, and the $H_9+e$ family. The $E_9+e$ family is discussed in \cite{GMN} where it is shown to consist of 110 graphs, all IK. The $H_8 + e$ graph is formed by adding an edge to the Heawood family graph $H_8$ between two of its degree 5 vertices. The $H_8+e$ family consists of 125 graphs, 29 of which are nIK and the remaining 96 are IK, as we will now argue. For this, we leverage graphs in the Heawood family. In addition to $H_8+e$, the family includes an $F_9+e$ graph formed by adding an edge between the two degree 3 vertices of $F_9$. Since $H_8$ and $F_9$ are both IK~\cite{KS}, the corresponding graphs with an edge added are as well. By Lemma~\ref{lem:tyyt}, $H_8+e$, $F_9+e$ and all their descendants are IK. These are the 96 IK graphs in the family. The remaining 29 graphs are all ancestors of six graphs that we describe below. Once we establish that these six are nIK, then Lemma~\ref{lem:tyyt} ensures that all 29 are nIK. We will denote the six graphs $T_i$, $i = 1,\ldots,6$ where we have used the letter $T$ since five of them have a degree two vertex (`T' being the first letter of `two'). After contracting an edge on the degree 2 vertex, we recover one of the nIK Heawood family graphs, $N_{11}$ or $N'_{12}$. It follows that these five graphs are also nIK. The two graphs that become $N_{11}$ after contracting an edge have the following graph6 notation\cite{sage}: $T_1$: \verb'KSrb`OTO?a`S' $T_2$: \verb'KOtA`_LWCMSS' The three graphs that contract to $N'_{12}$ are: $T_3$: \verb'LSb`@OLOASASCS' $T_4$: \verb'LSrbP?CO?dAIAW' $T_5$: \verb'L?tBP_SODGOS_T' The five graphs we have described so far along with their ancestors account for 26 of the nIK graphs in the $H_8+e$ family. The remaining three are ancestors of $T_6$: \verb'KSb``OMSQSAK' Figure~\ref{fig:T6} shows a knotless embedding of $T_6$. By Lemma~\ref{lem:tyyt}, its two ancestors are also nIK and this completes the count of 29 nIK graphs in the $H_8+e$ family. \begin{figure}[htb] \centering \includegraphics[scale=1]{t6.eps} \caption{A knotless embedding of the $T_6$ graph.} \label{fig:T6} \end{figure} The graph $H_9+e$ is formed by adding an edge to $H_9$ between the two degree 3 vertices. There are five graphs in the $H_{9}+e$ family, four of which are $H_9+e$ and its descendants. Since $H_9$ is IK~\cite{KS}, by Lemma~\ref{lem:tyyt}, these four graphs are all IK. The remaining graph in the family is the MMIK graph denoted $G_{S}$ in \cite{FMMNN} and shown in Figure~\ref{fig:HS}. Although the graph is credited to Schwartz in that paper, it was a joint discovery of Schwartz and and Barylskiy~\cite{N}. Thus, all five graphs in the $H_9+e$ family are IK. \begin{figure}[htb] \centering \includegraphics[scale=1]{gs.eps} \caption{The MMIK graph $G_S$.} \label{fig:HS} \end{figure} Having classified the graphs in the three families with respect to intrinsic knotting, using Corollary~\ref{cor:3fam} below, we have completed the investigation, suggested by \cite{FMMNN}, of graphs formed by adding an edge to a Heawood family graph. We remark that among the three families $H_8+e$, $E_9+e$, and $H_9+e$, the only instances of a graph with a degree 2 vertex occur in the family $H_8+e$, which also contains no MMIK graphs. This observation suggests the following question. \setcounter{section}{1} \setcounter{theorem}{1} \begin{question} If $G$ has minimal degree $\delta(G) < 3$, is it true that $G$'s ancestors and descendants include no MMIK graphs? \end{question} \setcounter{section}{3} \setcounter{theorem}{5} Initially, we suspected that such a $G$ has no MMIK cousins at all. However, we discovered that the MMIK graph of size 26, described in Section~\ref{sec:ord10} below, includes graphs of minimal degree two among its cousins. Although we have not completely resolved the question, we have two partial results. \begin{theorem} If $\delta(G) < 3$ and $H$ is a descendant of $G$, then $H$ is not MMIK. \end{theorem} \begin{proof} Since $\delta(G)$ is non-increasing under the $\ty$ move, $\delta(H) \leq \delta(G) < 3$ and $H$ is not MMIK by Lemma~\ref{lem:delta3}. \end{proof} As defined in \cite{GMN} a graph has a $\bar{Y}$ if there is a degree 3 vertex that is also part of a $3$-cycle. A $\yt$ move at such a vertex would result in doubled edges. \begin{lemma}\label{lem:ybar} A graph with a $\bar{Y}$ is not MMIK. \end{lemma} \begin{proof} Let $G$ have a vertex $v$ with $N(v) = \{a,b,c\}$ and $ab \in E(G)$. We can assume $G$ is IK. Make a $\ty$ move on triangle $v,a,b$ to obtain the graph $H$. By Lemma~\ref{lem:tyyt} $H$ is IK, as is the homeomorphic graph $H'$ obtained by contracting an edge at the degree 2 vertex $c$. But $H' = G - ab$ is obtained by deleting an edge $ab$ from $G$. Since $G$ has a proper subgraph $H'$ that is IK, $G$ is not MMIK. \end{proof} \begin{theorem} If $G$ has a child $H$ with $\delta(H) < 3$, then $G$ is not MMIK. \end{theorem} \begin{proof} By Lemma~\ref{lem:delta3}, we can assume $G$ is IK with $\delta(G) = 3$. It follows that $G$ has a $\bar{Y}$ and is not MMIK by the previous lemma. \end{proof} Suppose $G$ is a size 23 MMIK expansion of one of the six nIK Heawood family graphs. We will argue that $G$ must be an expansion of one of the graphs in the three families, $H_8+e$, $E_9+e$, and $H_9+e$. However, as a MMIK graph, $G$ can have no size 22 IK minor. Therefore, $G$ must be an expansion of one of the 29 nIK graphs in the $H_8+e$ family. There are two ways to form a size 22 expansion of one of the six nIK graphs, either add an edge or split a vertex. We now show that if $H$ is in the Heawood family, then $H+e$ is in one of the three families, $H_8+e$, $E_9+e$, and $H_9 + e$. We begin with a proof of a theorem and corollary, mentioned in the introduction, that describe how adding an edge to a graph $G$ interacts with the graph's family. \setcounter{section}{1} \setcounter{theorem}{2} \begin{theorem} If $G$ is a parent of $H$, then every $G+e$ has a cousin that is an $H+e$. \end{theorem} \begin{proof} Let $H$ be obtained by a $\ty$ move that replaces the triangle $abc$ in $G$ with three edges on the new vertex $v$. That is, $V(H) = V(G) \cup \{v\}$. Form $G+e$ by adding the edge $e = xy$. Since $V(H) = V(G) \cup \{v\}$, then $x,y \in V(H)$ and the graph $H+e$ is a cousin of $G+e$ by a $\ty$ move on the triangle $abc$. \end{proof} \begin{corollary} If $G$ is an ancestor of $H$, then every $G+e$ has a cousin that is an $H+e$. \end{corollary} \setcounter{section}{3} \setcounter{theorem}{8} Every graph in the Heawood family is an ancestor of one of two graphs, the Heawood graph (called $C_{14}$ in \cite{HNTY}) and the graph $H_{12}$ (see Figure~\ref{fig:Hea}). \begin{theorem} \label{thm:Heawpe} Let $H$ be the Heawood graph. Up to isomorphism, there are two $H+e$ graphs. One is in the $H_8+e$ family, the other in the $E_9+e$ family. \end{theorem} \begin{proof} The diameter of the Heawood graph is three. Up to isomorphism, we can either add an edge between vertices of distance two or three. If we add an edge between vertices of distance two, the result is a graph in the $H_8+e$ family. If the distance is three, we are adding an edge between the different parts and the result is a bipartite graph of size 22. As shown in~\cite{KMO}, this means it is cousin 89 of the $E_9+e$ family. \end{proof} \begin{theorem} \label{thm:H12pe} Let $G$ be formed by adding an edge to $H_{12}$. Then $G$ is in the $H_8+e$, $E_9+e$, or $H_9+e$ family. \end{theorem} \begin{proof} Note that $H_{12}$ consists of six degree 4 vertices and six degree 3 vertices. Moreover, five of the degree 3 vertices are created by $\ty$ moves in the process of obtaining $H_{12}$ from $K_7$. Let $a_i$ ($i = 1 \ldots 5$) denote those five degree 3 vertices. Further assume that $b_1$ is the remaining degree 3 vertex and $b_2$, $b_3$, $b_4$, $b_5$, $b_6$ and $b_7$ are the remaining degree 4 vertices. Then the $b_j$ vertices correspond to vertices of $K_7$ before applying the $\ty$ moves. First suppose that $G$ is obtained from $H_{12}$ by adding an edge which connects two $b_j$ vertices. Since these seven vertices are the vertices of $K_7$ before using $\ty$ moves, there is exactly one vertex among the $a_i$, say $a_1$, that is adjacent to the two endpoints of the added edge. Let $G'$ be the graph obtained from $G$ by applying $\yt$ moves at $a_2$, $a_3$, $a_4$ and $a_5$. Then $G'$ is isomorphic to $H_8+e$. Therefore $G$ is in the $H_8+e$ family. Next suppose that $G$ is obtained from $H_{12}$ by adding an edge which connects two $a_i$ vertices. Let $a_1$ and $a_2$ be the endpoints of the added edge. We assume that $G'$ is obtained from $G$ by using $\yt$ moves at $a_3$, $a_4$ and $a_5$. Then there are two cases: either $G'$ is obtained from $H_9$ or $F_9$ by adding an edge which connects two degree 3 vertices. In the first case, $G'$ is isomorphic to $H_9+e$. Thus $G$ is in the $H_9+e$ family. In the second case, $G'$ is in the $H_8+e$ or $E_9+e$ family by Corollary~\ref{cor:Gpe} and Theorem~\ref{thm:Heawpe}. Thus $G$ is in the $H_8+e$ or $E_9+e$ family. Finally suppose that $G$ is obtained from $H_{12}$ by adding an edge which connects an $a_i$ vertex and a $b_j$ vertex. Let $a_1$ be a vertex of the added edge. We assume that $G'$ is the graph obtained from $G$ by using $\yt$ moves at $a_2$, $a_3$, $a_4$ and $a_5$. Since $G'$ is obtained from $H_8$ by adding an edge, $G'$ is in the $H_8+e$ or $E_9+e$ family by Corollary~\ref{cor:Gpe} and Theorem~\ref{thm:Heawpe}. Therefore $G$ is in the $H_8+e$ or $E_9+e$ family. \end{proof} \begin{corollary} \label{cor:3fam} If $H$ is in the Heawood family, then $H+e$ is in the $H_8+e$, $E_9+e$, or $H_9+e$ family. \end{corollary} \begin{proof} The graph $H$ is either an ancestor of the Heawood graph or $H_{12}$. Apply Corollary~\ref{cor:Gpe} and Theorems~\ref{thm:Heawpe} and \ref{thm:H12pe}. \end{proof} \begin{corollary} \label{cor:29nIK} If $H$ is in the Heawood family and $H+e$ is nIK, then $H+e$ is one of the 29 nIK graphs in the $H_8+e$ family \end{corollary} \begin{lemma} \label{lem:deg2} Let $H$ be a nIK Heawood family graph and $G$ be an expansion obtained by splitting a vertex of $H$. Then either $G$ has a vertex of degree at most two, or else it is in the $H_8+e$, $E_9+e$, or $H_{9}+e$ family. \end{lemma} \begin{proof} Note that $\Delta(H) \leq 5$. If $G$ has no vertex of degree at most two, then the vertex split produces a vertex of degree three. A $\yt$ move on the degree three vertex produces $G'$ which is of the form $H+e$. \end{proof} \begin{corollary} \label{cor:deg2} Suppose $G$ is nIK and a size 22 expansion of a nIK Heawood family graph. Then either $G$ has a vertex of degree at most two or $G$ is in the $H_8 + e$ family. \end{corollary} \begin{theorem} \label{thm:23to29} Let $G$ be size 23 MMIK with a minor that is a nIK Heawood family graph. Then $G$ is an expansion of one of the 29 nIK graphs in the $H_8+e$ family. \end{theorem} \begin{proof} There must be a size 22 graph $G'$ intermediate to $G$ and the Heawood family graph $H$. That is, $G$ is an expansion of $G'$, which is an expansion of $H$. By Corollary~\ref{cor:deg2}, we can assume $G'$ has a vertex $v$ of degree at most two. By Lemma~\ref{lem:delta3}, a MMIK graph has minimal degree, $\delta(G) \geq 3$. Since $G'$ expands to $G$ by adding an edge or splitting a vertex, we conclude $v$ has degree two exactly and $G$ is $G'$ with an edge added at $v$. Since $\delta(H) \geq 3$, this means $G'$ is obtained from $H$ by a vertex split. In $G'$, let $N(v) = \{a,b\}$ and let $cv$ be the edge added to form $G$. Then $H = G'/av$ and we recognize $H+ac$ as a minor of $G$. We are assuming $G$ is MMIK, so $H+ac$ is nIK and, by Corollary~\ref{cor:29nIK}, one of the 29 nIK graphs in the $H_8+e$ family. Thus, $G$ is an expansion of $H+ac$, which is one of these 29 graphs, as required. \end{proof} It remains to study the expansions of the 29 nIK graphs in the $H_8+e$ family. We will give an overview of the argument, leaving many of the details to Appendix C. The size 23 expansions of the 29 size 22 nIK graphs fall into one of eight families, which we identify by the number of graphs in the family: $\F_9$, $\F_{55}$ $\F_{174}$, $\F_{183}$, $\F_{547}$, $\F_{668}$, $\F_{1229}$, and $\F_{1293}$. We list the graphs in each family in Appendix C. \begin{theorem} \label{thm:Fam8} If $G$ is a size 23 MMIK expansion of a nIK Heawood family graph, then $G$ is in one of the eight families, $\F_9$, $\F_{55}$ $\F_{174}$, $\F_{183}$, $\F_{547}$, $\F_{668}$, $\F_{1229}$, and $\F_{1293}$. \end{theorem} \begin{proof} By Theorem~\ref{thm:23to29}, $G$ is an expansion of $G'$, which is one of the 29 nIK graphs in the $H_8+e$ family. As we have seen, these 29 graphs are ancestors of the six graphs $T_1, \ldots, T_6$. By Corollary~\ref{cor:Gpe}, we can find the $G'+e$ graphs by looking at the six graphs. Given the family listings in Appendix C, it is straightforward to verify that each $T_i+e$ is in one of the eight families. This accounts for the graphs $G$ obtained by adding an edge to one of the 29 nIK graphs in the $H_8+e$ family. If instead $G$ is obtained by splitting a vertex of $G'$, we use the strategy of Lemma~\ref{lem:deg2}. By Lemma~\ref{lem:delta3}, $\delta(G) \geq 3$. Since $\Delta(G') \leq 5$, the vertex split must produce a vertex of degree three. Then, a $\yt$ move on the degree three vertex produces $G''$ which is of the form $G'+e$. Thus $G$ is a cousin of $G'+e$ and must be in one of the eight families. \end{proof} To complete our argument that there is no size 23 MMIK graph with a nIK Heawood family minor, we argue that there are no MMIK graphs in the eight families $\F_9, \F_{55}, \ldots, \F_{1293}$. In large part our argument is based on two criteria that immediately show a graph $G$ is not MMIK. \begin{enumerate} \item $\delta(G) < 3$, see Lemma~\ref{lem:delta3}. \item By deleting an edge, there is a proper minor $G-e$ that is an IK graph in the $H_8+e$, $E_9+e$, or $H_9+e$ families. In this case $G$ is IK, but not MMIK. \end{enumerate} By Lemma~\ref{lem:MMIK}, if $G$ has an ancestor that satisfies criterion 2, then $G$ is also not MMIK. By Lemma~\ref{lem:tyyt}, if $G$ has a nIK descendant, then $G$ is also not nIK. \begin{theorem}There is no MMIK graph in the $\F_9$ family. \end{theorem} \begin{proof} Four of the nine graphs satisfy the first criterion, $\delta(G) = 2$, and these are not MMIK by Lemma~\ref{lem:delta3}. The remaining graphs are descendants of a graph $G$ that is IK but not MMIK. Indeed, $G$ satisfies criterion 2: by deleting an edge, we recognize $G-e$ as an IK graph in the $H_9+e$ family (see Appendix C for details). By Lemma~\ref{lem:MMIK}, $G$ and its descendants are also not MMIK. \end{proof} \begin{theorem}There is no MMIK graph in the $\F_{55}$ family. \end{theorem} \begin{proof} All graphs in this family have $\delta(G) \geq 3$, so none satisfy the first criterion. All but two of the graphs in this family are not MMIK by the second criterion. The remaining two graphs have a common parent that is IK but not MMIK. By Lemma~\ref{lem:MMIK}, these last two graphs are also not MMIK. See Appendix C for details. \end{proof} We remark that $\F_{55}$ is the only one of the eight families that has no graph with $\delta(G) < 3$. \begin{theorem}There is no MMIK graph in the $\F_{174}$ family. \end{theorem} \begin{proof} All but 51 graphs are not MMIK by the first criterion. Of the remaining graphs, all but 17 are not MMIK by the second criterion. Of these, 11 are descendants of a graph $G$ that is IK but not MMIK by the second criterion. By Lemma~\ref{lem:MMIK}, these 11 are also not MMIK. This leaves six graphs. For these we find two nIK descendants. By Lemma~\ref{lem:tyyt} the remaining six are also not MMIK. Both descendants have a degree 2 vertex. On contracting an edge of the degree 2 vertex, we obtain a homeomorphic graph that is one of the 29 nIK graphs in the $H_8+e$ family. \end{proof} \begin{theorem}There is no MMIK graph in the $\F_{183}$ family. \end{theorem} \begin{proof} All graphs in this family have a vertex of degree two or less and are not MMIK by the first criterion. \end{proof} \begin{theorem}There is no MMIK graph in the $\F_{547}$ family. \end{theorem} \begin{proof} All but 229 of the graphs in the family are not MMIK by criterion one. Of those, all but 52 are not MMIK by criterion two. Of those, 25 are ancestors of one of the graphs meeting criterion two and are not MMIK by Lemma~\ref{lem:MMIK}. For the remaining 27 graphs, all but five have a nIK descendant and are not IK by Lemma~\ref{lem:tyyt}. For the remaining five, three are ancestors of one of the five. In Figure~\ref{fig:547UK} we give knotless embeddings of the other two graphs. Using Lemma~\ref{lem:tyyt}, all five graphs are nIK, hence not MMIK. \end{proof} \begin{figure}[htb] \centering \includegraphics[scale=1]{f547.eps} \caption{Knotless embeddings of two graphs in $\F_{547}$.} \label{fig:547UK} \end{figure} \begin{theorem}There is no MMIK graph in the $\F_{668}$ family. \end{theorem} \begin{proof} All but 283 of the graphs in the family are not MMIK by criterion one. Of those, all but 56 are not MMIK by criterion two. Of those, 23 are ancestors of one of the graphs meeting criterion two and are not MMIK by Lemma~\ref{lem:MMIK}. For the remaining 33 graphs all but three have a nIK descendant and are not IK by Lemma~\ref{lem:tyyt}. Of the remaining three, two are ancestors of the third. Figure~\ref{fig:668UK} is a knotless embedding of the common descendant. By Lemma~\ref{lem:tyyt} all three of these graphs are nIK, hence not MMIK. \end{proof} \begin{figure}[htb] \centering \includegraphics[scale=1]{f668.eps} \caption{Knotless embedding of a graph in $\F_{668}$.} \label{fig:668UK} \end{figure} \begin{theorem}There is no MMIK graph in the $\F_{1229}$ family. \end{theorem} \begin{proof} There are 268 graphs in the family that are not MMIK by criterion one. Of the remaining 961 graphs, all but 140 are not MMIK by criterion two. Of those, all but three are ancestors of one of the graphs meeting criterion two and are not MMIK by Lemma~\ref{lem:MMIK}. The remaining three graphs have an IK minor by contracting an edge and are, therefore, not MMIK. \end{proof} \begin{theorem}There is no MMIK graph in the $\F_{1293}$ family. \end{theorem} \begin{proof} There are 570 graphs in the family that are not MMIK by criterion one. Of the remaining 723 graphs, all but 99 are not MMIK by criterion two. Of those, all but 12 are ancestors of one of the graphs meeting criterion two and are not MMIK by Lemma~\ref{lem:MMIK}. The remaining 12 graphs have an IK minor by contracting an edge and are, therefore, not MMIK. \end{proof} \subsection{Expansions of the size 22 graphs $H_1$ and $H_2$} \label{sec:1122graphs} We have argued that a size 23 MMIK graph must have a minor that is either one of six nIK graphs in the Heawood family, or else one of two $(11,22)$ graphs that we call $H_1$: \verb'J?B@xzoyEo?' and $H_2$: \verb'J?bFF`wN?{?' (see Figure~\ref{fig:H1H2}). We treated expansions of the Heawood family graphs in the previous subsection. In this subsection we show that $G_1$, $G_2$, and $G_3$ are the only size 23 MMIK expansions of $H_1$ and $H_2$. Recall that these two graphs were shown MMN2A (minor minimal for $2$-apex) in \cite{MP}. The unknotted embeddings of Figure~\ref{fig:H1H2} demonstrate that they are nIK. By Lemma~\ref{lem:delta3}, if a vertex split of $H_1$ results in a vertex of degree less than three, the resulting graph is not MMIK. Since $H_1$ is $4$-regular, the only other way to make a vertex split produces adjacent degree 3 vertices. Then, a $\yt$ move on one of the degree three vertices yields an $H_1+e$. Thus, a size 23 MMIK expansion of $H_1$ must be in the family of a $H_1+e$. Up to isomorphism, there are six $H_1+e$ graphs formed by adding an edge to $H_1$. These six graphs generate families of size 6, 2, 2, and 1. Three of the six graphs are in the family of size 6 and there is one each in the remaining three families. All graphs in the family of size six are ancestors of three graphs. In Figure~\ref{fig:sixfam} we provide knotless embeddings of those three graphs. By Lemma~\ref{lem:tyyt}, all graphs in this family are nIK, hence not MMIK. \begin{figure}[htb] \centering \includegraphics[scale=1]{6fh1.eps} \caption{Knotless embeddings of three graphs in the size six family from $H_1$.} \label{fig:sixfam} \end{figure} In a family of two graphs, there is a single $\ty$ move. In Figure~\ref{fig:two2s} we give knotless embeddings of the children in these two families. By Lemma~\ref{lem:tyyt}, all graphs in these two families are nIK, hence not MMIK. \begin{figure}[htb] \centering \includegraphics[scale=1]{2fh1.eps} \caption{Knotless embeddings of two graphs in the size two families from $H_1$.} \label{fig:two2s} \end{figure} The unique graph in the family of size one is $G_1$. In subsection~\ref{sec:3MMIKpf} we show that this graph is MMIK. Using the edge list of $G_1$ given above near the beginning of Section~3: $$[(0, 4), (0, 5), (0, 9), (0, 10), (1, 4), (1, 6), (1, 7), (1, 10), (2, 3), (2, 4), (2, 5), (2, 9),$$ $$ (2, 10), (3, 6), (3, 7), (3, 8), (4, 8), (5, 6), (5, 7), (5, 8), (6, 9), (7, 9), (8, 10)],$$ we recover $H_1$ by deleting edge $(2,5)$. Again, since $H_2$ is $4$-regular, MMIK expansions formed by vertex splits (if any) will be in the families of $H_2+e$ graphs. Up to isomorphism, there are three $H_2+e$ graphs. These produce a family of size four and another of size two. \begin{figure}[htb] \centering \includegraphics[scale=1]{4fh2.eps} \caption{Knotless embeddings of two graphs in the size four family from $H_2$.} \label{fig:fourfam} \end{figure} The family of size four includes two $H_2+e$ graphs. All graphs in the family are ancestors of the two graphs that are each shown to have a knotless embedding in Figure~\ref{fig:fourfam}. By Lemma~\ref{lem:tyyt}, all graphs in this family are nIK, hence not MMIK. The family of size two consists of the graphs $G_2$ and $G_3$. In subsection~\ref{sec:3MMIKpf} we show that these two graphs are MMIK. Using the edge list for $G_2$ given above near the beginning of Section~3: $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (1, 5), (1, 6), (1, 7), (1, 8), (2, 6), (2, 7), (2, 8),$$ $$(2, 9), (3, 7), (3, 8), (3, 9), (3, 10), (4, 8), (4, 9), (4, 10), (5, 9), (5, 10), (6, 10)],$$ we recover $H_2$ by deleting edge $(0,2)$. As for $G_3$: $$[(0, 4), (0, 5), (0, 7), (0, 11), (1, 5), (1, 6), (1, 7), (1, 8), (2, 7), (2, 8), (2, 9), (2, 11),$$ $$(3, 7), (3, 8), (3, 9), (3, 10), (4, 8), (4, 9), (4, 10), (5, 9), (5, 10), (6, 10), (6, 11)],$$ contracting edge $(6,11)$ leads back to $H_2$. \section{ \label{sec:ord10}Knotless embedding obstructions of order ten.} In this section, we prove Theorem~\ref{thm:ord10}: there are exactly 35 obstructions to knotless embedding of order ten. As in the previous section, we refer to knotless embedding obstructions as MMIK graphs. We first describe the 26 graphs given in~\cite{FMMNN,MNPP} and then list the 9 new graphs unearthed by our computer search. \subsection{26 previously known order ten MMIK graphs.} \label{sec:26known} In~\cite{FMMNN}, the authors describe 264 MMIK graphs. There are three sporadic graphs (none of order ten), the rest falling into four graph families. Of these, 24 have ten vertices and they appear in the families as follows. There are three MMIK graphs of order ten in the Heawood family~\cite{KS, GMN, HNTY}: $H_{10}, F_{10}, $ and $E_{10}$. In~\cite{GMN}, the authors study the other three families. All 56 graphs in the $K_{3,3,1,1}$ family are MMIK. Of these, 11 have order ten: Cousins 4, 5, 6, 7, 22, 25, 26, 27, 28, 48, and 51. There are 33 MMIK graphs in the family of $E_9+e$. Of these seven have order ten: Cousins 3, 28, 31, 41, 44, 47, and 50. Finally, the family of $G_{9,28}$ includes 156 MMIK graphs. Of these, there are three of order ten: Cousins 2, 3, and 4. The other two known MMIK graphs of order ten are described in~\cite{MNPP}, one having size 26 and the other size 30. We remark that the family for the graph of size 26 includes both MMIK graphs and graphs with $\delta(G) = 2$. However, no ancestor or descendant of a $\delta(G) = 2$ graph is MMIK. This is part of our motivation for Question~\ref{que:d3MMIK} \subsection{Nine new MMIK graphs of order ten} In this subsection we list the nine additional MMIK graphs that we found after a computer search described following the list. In each case, we use the program of~\cite{MN} to verify that the graph we found is IK. We use the Mathematica implementation of the program available at Ramin Naimi's website~\cite{NW}. To show that the graph is MMIK, we must in addition verify that each minor formed by deleting or contracting an edge is nIK. Many of these minors are $2$-apex and not IK by Lemma~\ref{lem:2apex}. There remain 21 minors and below we discuss how we know that those are also nIK. First we list the nine new MMIK graphs of order ten, including size, graph6 format~\cite{sage}, and an edge list. \begin{enumerate} \item Size: 25; graph6 format: \verb"ICrfbp{No" $$[(0, 3), (0, 4), (0, 5), (0, 6), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8),$$ $$(2, 5), (2, 6), (2, 7), (2, 8), (2, 9), (3, 6), (3, 7), (3, 8),$$ $$(3, 9), (4, 7), (4, 8), (4, 9), (5, 8), (5, 9), (6, 9), (7, 9)]$$ \item Size: 25; graph6 format: \verb"ICrbrrqNg" $$[(0, 3), (0, 4), (0, 5), (0, 8), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8),$$ $$(2, 5), (2, 6), (2, 7), (2, 8), (2, 9), (3, 6), (3, 7), (3, 8),$$ $$(3, 9), (4, 6), (4, 7), (4, 9), (5, 9), (6, 8), (6, 9), (8, 9)]$$ \item Size: 25; graph6 format: \verb"ICrbrriVg" $$[(0, 3), (0, 4), (0, 5), (0, 8), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8),$$ $$(1, 9), (2, 5), (2, 6), (2, 7), (2, 8), (3, 6), (3, 7), (3, 9),$$ $$(4, 6), (4, 7), (4, 8), (4, 9), (5, 9), (6, 8), (6, 9), (8, 9)]$$ \item Size: 25; graph6 format: \verb"ICrbrriNW" $$[(0, 3), (0, 4), (0, 5), (0, 8), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8),$$ $$(2, 5), (2, 6), (2, 7), (2, 8), (2, 9), (3, 6), (3, 7), (3, 9),$$ $$(4, 6), (4, 7), (4, 8), (4, 9), (5, 9), (6, 8), (7, 9), (8, 9)]$$ \item Size: 27; graph6 format: \verb"ICfvRzwfo" $$[(0, 3), (0, 4), (0, 5), (0, 6), (0, 8), (0, 9), (1, 5), (1, 6), (1, 7),$$ $$(1, 8), (2, 5), (2, 6), (2, 7), (2, 8), (3, 4), (3, 5), (3, 7), (3, 8),$$ $$(3, 9), (4, 6), (4, 7), (4, 8), (4, 9), (5, 7), (5, 9), (6, 9), (7, 9)]$$ \item Size: 29; graph6 format: \verb"ICfvRr^vo" $$[(0, 3), (0, 4), (0, 5), (0, 6), (0, 8), (0, 9), (1, 5), (1, 6), (1, 7), (1, 8),$$ $$(1, 9), (2, 5), (2, 6), (2, 7), (3, 4), (3, 5), (3, 7), (3, 8), (3, 9), (4, 6),$$ $$(4, 7), (4, 8), (4, 9), (5, 8), (5, 9), (6, 8), (6, 9), (7, 8), (7, 9)]$$ \item Size: 30; graph6 format: \verb"IQjuvrm^o" $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (1, 3), (1, 5), (1, 6), (1, 7),$$ $$(1, 8), (1, 9), (2, 4), (2, 5), (2, 7), (2, 8), (2, 9), (3, 5), (3, 6), (3, 7),$$ $$(3, 9), (4, 6), (4, 7), (4, 8), (4, 9), (5, 8), (5, 9), (6, 8), (6, 9), (7, 9)]$$ \item Size: 31; graph6 format: \verb"IQjur~m^o" $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 8), (1, 3), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9),$$ $$(2, 4), (2, 5), (2, 7), (2, 8), (2, 9), (3, 5), (3, 6), (3, 7), (3, 9), (4, 6),$$ $$(4, 7), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 7), (6, 8), (6, 9), (7, 9)]$$ \item Size: 32; graph6 format: \verb"IEznfvm|o" $$[(0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (1, 3), (1, 4), (1, 5), (1, 6),$$ $$(1, 7), (1, 8), (1, 9), (2, 4), (2, 5), (2, 6), (2, 7), (2, 8), (2, 9), (3, 6), (3, 7),$$ $$(3, 9), (4, 5), (4, 7), (4, 8), (5, 8), (5, 9), (6, 7), (6, 8), (6, 9), (7, 9)]$$ \end{enumerate} To complete our argument, it remains to argue that the 21 non 2-apex minors are nIK. Each of these minors is formed by deleting or contracting an edge in one of the nine graphs just listed. Of these minors, 19 have a 2-apex descendant and are nIK by Lemmas~\ref{lem:2apex} and \ref{lem:tyyt}. In Figure~\ref{fig:ord10} we give knotless embeddings of the remaining two minors showing that they are also nIK. \begin{figure}[htb] \centering \includegraphics[scale=1]{order10.eps} \caption{Knotless embeddings of two order ten graphs.} \label{fig:ord10} \end{figure} Let us describe our computer search. A MMIK graph $G$ of order ten must be connected, have $\delta(G) \leq 3$ (by Lemma~\ref{lem:delta3}) and have size (number of edges) between 21 and 35. For, the lower bound on size, see~\cite{M}. The upper bound follows as Mader~\cite{Ma} showed that a graph with $n \geq 7$ vertices and $5n-14$ or more edges has a $K_7$ minor. Since $K_7$ is IK (see \cite{CG}), this means that a graph of order ten with 36 or more edges cannot be MMIK as it has a proper IK minor, $K_7$. There remain just under 5 million graphs to consider. We next sieve out any graph that has one of the 26 known MMIK graphs of order ten (see subsection~\ref{sec:26known}) as a subgraph or any of the known MMIK graphs of order less than ten as a minor. We also discard any $2$-apex graph, which must be nIK by Lemma~\ref{lem:2apex}. We test the remaining graphs using Ramin Naimi's implementation~\cite{NW} of the algorithm of Miller and Naimi~\cite{MN}. Those which were found to be IK led us to the nine new MMIK graphs listed above. Those graphs that we could not otherwise classify led us to a list of 35 graphs that we subsequently showed to be maxnik: following~\cite{EFM}, we say that a graph $G$ is {\em maximal knotless} or {\em maxnik} if $G$ is nIK, but every $G+e$ is IK. In Appendix~\ref{sec:appmnik} we provide a classification of the 49 maxnik graphs of order ten. In addition to the 35 graphs just mentioned, there are 14 maximal $2$-apex graphs. Once we determined the 35 MMIK obstructions and the 35 maxnik (and not $2$-apex) graphs of order ten, we were in a position to show that the remaining nearly 5 million candidates $G$ are not MMIK for at least one of the following three reasons: 1) $G$ is $2$-apex and nIK by Lemma~\ref{lem:2apex}, 2) $G$ has a proper minor that is IK and is therefore not MMIK, or 3) $G$ is a subgraph of one of the 35 maxnik graphs and is therefore nIK. Having determined the 49 maxnik graphs of order ten (see Appendix~\ref{sec:appmnik}), we can update some observations of \cite{EFM}, which catalogs the maxnik graphs through order nine. The fourteen maximally $2$-apex graphs are of the form $K_2 \ast T_{8}$, the join of $K_2$ and a planar triangulation on eight vertices. As such, each has $35$ edges and $\Delta(G) = 9$. The remaining 35 maxnik graphs range between size 23 and 34. In particular, we still know of no maxnik graph on 22 edges. To continue Tables 1 and 2 of \cite{EFM}, the least $|E|/|V|$ among maxnik graphs of order ten is $23/10$ and we have $2 \leq \delta(G) \leq 6$ and $5 \leq \Delta(G) \leq 9$. Although we still have no regular maxnik graph, there are several examples where $\Delta(G) - \delta(G)$ is only one. \section*{Acknowledgements} We thank Ramin Naimi for use of his programs that were essential for this project. We thank the referees for a careful reading of an earlier version of this paper; their feedback resulted in substantial improvements. \appendix \section{Proof by hand that $G_1$ and $G_2$ are IK}\label{sec:appG12} In this section, we give a traditional proof (ie one that does not rely on computers) that $G_1$ and $G_2$ are IK. For this we use a lemma due, independently, to two groups~\cite{F,TY}. Let $\DD$ denote the multigraph of Figure~\ref{fig:D4} and, for $ i = 1,2,3,4$, let $C_i$ be the cycle of edges $e_{2i-1}, e_{2i}$. For any given embedding of $\DD$, let $\sigma$ denote the mod 2 sum of the Arf invariants of the 16 Hamiltonian cycles in $\DD$ and $\mbox{lk}(C,D)$ the mod 2 linking number of cycles $C$ and $D$. Since the Arf invariant of the unknot is zero, an embedding of $\DD$ with $\sigma \neq 0$ must have a knotted cycle. \begin{figure}[htb] \centering \includegraphics[scale=1]{D4.eps} \caption{The $\DD$ graph.} \label{fig:D4} \end{figure} \begin{lemma}\cite{F,TY} \label{lem:D4} Given an embedding of $\DD$, $\sigma \neq 0$ if and only if $\mbox{lk}(C_1,C_3) \neq 0$ and $\mbox{lk}(C_2,C_4) \neq 0$. \end{lemma} \subsection{$G_1$ is IK}\ In this subsection, we show that $G_1$ is IK. To use Lemma~\ref{lem:D4} we need pairs of linked cycles. We first describe three sets of pairs that we will call $A_i$'s, $B_i$'s, and $C_i$'s \smallskip \noindent{\bf Step I:} Define $A_i$ pairs. To find pairs of linked cycles, we use minors of $G_1$ that are members of the Petersen family of graphs as these are the obstructions to linkless embedding~\cite{RST}. Our first example is based on contracting the edges $(3,6)$, $(5,7)$, and $(6,9)$ in Figure~\ref{fig:G1} to produce a $K_{4,4}$ minor with $\{3,4,5,10\}$ and $\{0,1,2,8\}$ as the two parts. For convenience, we use the smallest vertex label to denote the new vertex obtained when contracting edges. Thus, we denote by 3 the vertex obtained by identifying 3, 6, and 9 of $G_1$. Further deleting the edge $(1,3)$ we identify the Petersen family graph $K_{4.4}^-$ as a minor of $G_1$. \begin{figure}[htb] \centering \includegraphics[scale=1.25]{g1.eps} \caption{The graph $G_1$.} \label{fig:G1} \end{figure} There are nine pairs of disjoint cycles in $K_{4,4}^-$ and we denote these pairs as $A_1$ through $A_9$. In Table~\ref{tab:An}, we first give the cycle pair in the $K_{4,4}^-$ and then the corresponding pair in $G_1$. \begin{table}[htb] \centering \begin{tabular}{c|l|l} $A_1$ & 0,4,1,5 -- 2,3,8,10 & 0,4,1,7,5 -- 2,3,8,10 \\ $A_2$ & 0,4,1,10 -- 2,3,8,5 & 0,4,1,10 -- 2,3,8,5 \\ $A_3$ & 1,4,2,5 -- 0,3,8,10 & 1,4,2,5,7 -- 0,9,6,3,8,10 \\ $A_4$ & 1,4,2,10 -- 0,3,8,5 & 1,4,2,10 -- 0,9,6,3,8,5 \\ $A_5$ & 1,4,8,5 -- 0,3,2,10 & 1,4,8,5,7 -- 0,9,6,3,2,10 \\ $A_6$ & 1,4,8,10 -- 0,3,2,5 & 1,4,8,10 -- 0,9,6,3,2,5 \\ $A_7$ & 1,5,2,10 -- 0,3,8,4 & 1,7,5,2,10 -- 0,9,6,3,8,4 \\ $A_8$ & 0,5,1,10 -- 2,3,8,4 & 0,5,7,1,10 -- 2,3,8,4 \\ $A_9$ & 1,5,8,10 -- 0,3,2,4 & 1,7,5,8,10 -- 0,9,6,3,2,4 \\ \end{tabular} \caption{Nine pairs of cycles in $G_1$ called $A_1, \ldots, A_9$.} \label{tab:An} \end{table} \smallskip \noindent{\bf Step II:} Find pairs $B_i$ and $C_i$. Similarly, we will describe a $K_{3,3,1}$ minor that gives pairs of cycles $B_1$ through $B_9$. Contract edges $(1,7)$, $(3,7)$, and $(7,9)$. Delete vertex $6$ and edge $(2,9)$. The result is a $K_{3,3,1}$ minor with parts $\{0,2,8\}$, $\{4,5,10\}$, and $\{1\}$. In Table~\ref{tab:Bn} we give the nine pairs of cycles, first in $K_{3,3,1}$ and then in $G_1$. \begin{table}[htb] \centering \begin{tabular}{c|l|l} $B_1$ & 0,1,4 -- 2,5,8,10 & 0,4,1,7,9 -- 2,5,8,10 \\ $B_2$ & 0,1,5 -- 2,4,8,10 & 0,5,7,9 -- 2,4,8,10 \\ $B_3$ & 0,1,10 -- 2,4,8,5 & 0,9,7,1,10 -- 2,4,8,5 \\ $B_4$ & 1,2,4 -- 0,5,8,10 & 1,4,2,3,7 -- 0,5,8,10 \\ $B_5$ & 1,2,5 -- 0,4,8,10 & 2,3,7,5 -- 0,4,8,10 \\ $B_6$ & 1,2,10 -- 0,4,8,5 & 2,3,7,1,10 -- 0,4,8,5 \\ $B_7$ & 1,4,8 -- 0,5,2,10 & 1,4,8,3,7 -- 0,5,2,10 \\ $B_8$ & 1,5,8 -- 0,4,2,10 & 3,7,5,8 -- 0,4,2,10 \\ $B_9$ & 1,8,10 -- 0,4,2,5 & 1,7,3,8,10 -- 0,4,2,5 \\ \end{tabular} \caption{Nine pairs of cycles in $G_1$ called $B_1, \ldots, B_9$.} \label{tab:Bn} \end{table} Another $K_{4,4}^-$ minor of $G_1$ will give our last set of nine cycle pairs. Contract edges $(0,9)$, $(2,5)$, and $(3,8)$ to obtain a $K_{4,4}$ with parts $\{0,1,2,8\}$ and $\{4,6,7,10\}$. Then delete edge $(1,4)$ to make a $K_{4.4}^-$ minor. Table~\ref{tab:Cn} lists the nine pairs of cycles, first in the $K_{4,4}^-$ minor and then in $G_1$. \begin{table}[htb] \centering \begin{tabular}{c|l|l} $C_1$ & 0,6,1,7 -- 2,4,8,10 & 9,6,1,7 -- 2,4,8,10 \\ $C_2$ & 0,6,1,10 -- 2,4,8,7 & 0,9,6,1,10 -- 2,4,8,3,7,5 \\ $C_3$ & 1,6,2,7 -- 0,4,8,10 & 1,6,5,7 -- 0,4,8,10 \\ $C_4$ & 1,6,2,10 -- 0,4,8,7 & 1,6,5,2,10 -- 0,4,8,3,7,9 \\ $C_5$ & 1,6,8,7 -- 0,4,2,10 & 1,6,,3,7 -- 0,4,2,10 \\ $C_6$ & 1,6,8,10 -- 0,4,2,7 & 1,6,3,8,10 -- 0,4,2,5,7,9 \\ $C_7$ & 0,7,1,10 -- 2,4,8,6 & 0,9,7,1,10 -- 2,4,8,3,6,5 \\ $C_8$ & 1,7,2,10 -- 0,4,8,6 & 1,7,5,2,10 -- 0,4,8,3,6,9 \\ $C_9$ & 1,7,8,10 -- 0,4,2,6 & 1,7,3,8,10 -- 0,4,2,5,6,9 \\ \end{tabular} \caption{Nine pairs of cycles in $G_1$ called $C_1, \ldots, C_9$.} \label{tab:Cn} \end{table} As shown by Sachs~\cite{S}, in any embedding of $K_{4,4}^-$ or $K_{3,3,1}$, at least one pair of the nine disjoint cycles in each graph has odd linking number. We will simply say the cycles are {\em linked} if the linking number is odd. Fix an embedding of $G_1$. Our goal is to show that the embedding must have a knotted cycle. We will argue by contradiction. For a contradiction, assume that there is no knotted cycle in the embedding of $G_1$. We leverage Lemma~\ref{lem:D4} to deduce that certain pairs of cycles are not linked. Eventually, we will conclude that none of $B_1, \ldots, B_9$ are linked. This is a contradiction as these correspond to cycles in a $K_{3,3,1}$ and we know that every embedding of this Petersen family graph must have a pair of linked cycles~\cite{S}. The contradiction shows that there must in fact be a knotted cycle in the embedding of $G_1$. As the embedding is arbitrary, this shows that $G_1$ is IK. \smallskip \noindent{\bf Step III:} Eliminate $A_2$ by combining with each $B_i$. We illustrate our strategy by first focusing on the pair $A_2 = $ 0,4,1,10 -- 2,3,8,5. Combine $A_2$ with each $B_i$. In each case we form a $\DD$ graph as in Figure~\ref{fig:D4}. Since the $B_i$ are pairs of cycles in $K_{3,3,1}$, a Petersen family graph, at least one pair is linked~\cite{S}. If $A_2$ is also linked, then Lemma~\ref{lem:D4} implies that the embedding of $G_1$ has a knotted cycle, in contradiction to our assumption. Therefore, we conclude that $A_2$ is not linked (i.e., does not have odd linking number). \begin{table}[htb] \centering \begin{tabular}{c|l} $B_1$ & $\{0,1,4\}$, $\{2,5,8\}$, $\{3,7\}$, $\{10\}$ \\ $B_2$ & $\{0\}$, $\{1,4,10\}$, $\{2,3,8\}$, $\{5\}$ \\ $B_3$ & $\{0,1,10\}$, $\{2,5,8\}$, $\{3,7\}$, $\{4\}$ \\ $B_4$ & $\{0,10\}$, $\{1,4\}$, $\{2,3\}$, $\{5,8\}$ \\ $B_5$ & $\{0,4,10\}$, $\{1\}$, $\{2,3,5\}$, $\{8\}$ \\ $B_6$ & $\{0,4\}$, $\{1,10\}$, $\{2,3\}$, $\{5,8\}$ \\ $B_7$ & $\{0,10\}$, $\{1,4\}$, $\{2,5\}$, $\{3,8\}$ \\ $B_8$ & $\{0,4,10\}$, $\{1,7\}$, $\{2\}$, $\{3,8,5\}$ \\ $B_9$ & $\{0,4\}$, $\{1,10\}$, $\{2,5\}$, $\{3,8\}$ \\ \end{tabular} \caption{Pairing $A_2$ with $B_1, \ldots, B_9$.} \label{tab:A2} \end{table} In Table~\ref{tab:A2}, we list the vertices in $G_1$ that are identified to give each of the four vertices of $\DD$. Let us examine the pairing with $B_2$ as an example to see how this results in a $\DD$. We identify $\{1,4,10\}$ as a single vertex by contracting edges $(1,4)$ and $(1,10)$. Similarly contract $(2,3)$ and $(3,8)$ to make a vertex of the $\DD$ from vertices $\{2,3,8\}$ of $G_1$. In this way, the cycle 0,4,1,10 of $A_2$ in $G_1$ becomes cycle $C_1$ of the $\DD$ (see Figure~\ref{fig:D4}) between $\{1,4,10\}$ and $\{0\}$ and the cycle 2,3,8,5 becomes cycle $C_3$ between $\{2,3,8\}$ and $\{5\}$. Similarly 0,5,7,9 of $B_2$ becomes homeomorphic to the cycle $C_2$ between $\{0\}$ and $\{5\}$. For the final cycle of $B_2$, 2,4,8,10, we observe that, in homology, $\mbox{2,4,8,10 } = \mbox{ 1,4,2,10 } \cup \mbox{ 1,4,8,10}$. Assuming $\mbox{lk}((\mbox{0,5,7,9}),(\mbox{2,4,8,10})) \neq 0$, then one of $\mbox{lk}((\mbox{0,5,7,9}),(\mbox{1,4,2,10}))$ and $\mbox{lk}((\mbox{0,5,7,9}),(\mbox{1,4,8,10}))$ is also nonzero. Whichever it is, 1,4,2,10 or 1,4,8,10, that will be our $C_4$ cycle in the $\DD$ of Figure~\ref{fig:D4}. To summarize, we have argued that $A_2$ forms a $\DD$ with each pair $B_1, \ldots, B_9$. Since at least one of the $B_i$'s is linked, then, assuming $A_2$ is linked, these two pairs make a $\DD$ that has a knotted cycle. Therefore, by way of contradiction, going forward, we may assume $A_2$ is not linked. \smallskip \noindent{\bf Step IV:} Argue $A_6$ is not linked. We next argue that $A_6$ is not linked. For a contradiction, assume instead that $A_6$ is linked. Pairing with the $B_i$'s again, the vertices for each $\DD$ are \begin{align*} B_1 & \{0,9\}, \{1,4\}, \{2,5\}, \{8,9\} & B_2 & \{0,5,9\}, \{1,7\}, \{2\}, \{4,8,10 \} \\ B_3 & \{0,9\}, \{1,10\}, \{2,5\}, \{4,8\} & B_4 & \{0,5\}, \{1,4\}, \{2,3\}, \{8,10 \} \\ B_5 & \{0\}, \{1,7\}, \{2,3,5\}, \{4,8,10\} & B_6 & \{0,5\}, \{1,10\}, \{2,3\}, \{4,8 \} \\ B_7 & \{0,2,5\}, \{1,4,8\}, \{3\}, \{10\} & B_9 & \{0,2,5\}, \{1,8,10\}, \{3\}, \{4 \}.\\ \end{align*} For $B_8 = $ 3,7,5,8 -- 0,4,2,10, we first split one of the $A_6$ cycles: $\mbox{0,5,2,3,6,9 } = \mbox{ 0,5,2,9 } \cup \mbox{ 2,3,6,9}$. One of the two summands must link with the other $A_2$ cycle $1,4,8,10$. If $\mbox{lk}((\mbox{0,5,2,9}),(\mbox{1,4,8,10})) \neq 0$, then, by a symmetry of $G_1$, $\mbox{lk}((\mbox{8,5,2,3}),(\mbox{1,4,0,10})) \neq 0$. But this last is the pair $A_2$, and we have already argued that $A_2$ is not linked. Therefore, it must be that $\mbox{lk}((\mbox{2,3,6,9}),(\mbox{1,4,8,10})) \neq 0$. Next, we split a cycle of $B_8$: $\mbox{0,4,2,10 } = \mbox{ 1,4,2,10 } \cup \mbox{ 0,4,1,10}$. If $\mbox{lk}((\mbox{3,7,5,8}),(\mbox{1,4,2,10})) \neq 0$, form a $\DD$ with vertices $\{1,4,10\}$, $\{2\}$, $\{3\}$, and $\{8\}$. On the other hand, If $\mbox{lk}((\mbox{3,7,5,8}),(\mbox{0,4,1,10})) \neq 0$, form a $\DD$ with vertices $\{0,9\}$, $\{1,4,10\}$, $\{3\}$, and $\{8\}$. For every choice of $B_i$ we can make a $\DD$ with $A_6$. We know that at least one $B_i$ is a linked pair. If $A_6$ is also linked, then, by Lemma~\ref{lem:D4}, this embedding of $G_1$ has a knotted cycle. Therefore, by way of contradiction, we may assume the pair of $A_6$ is not linked. \smallskip \noindent{\bf Step V:} Eliminate $B_2$ and $B_8$. We next eliminate $B_2$ by pairing it with each $A_i$. As we are assuming $A_2$ and $A_6$ are not linked, it must be some other $A_i$ pair that is linked. Here are the vertices of the $\DD$'s in each case: \begin{align*} A_1 & \{0,5,7\}, \{2,8,10\}, \{3,6,9\}, \{4\} & A_3 & \{0,9\}, \{2,4\}, \{5,7\}, \{8,10 \} \\ A_4 & \{0,5,9\}, \{1,7\}, \{2,4,10\}, \{8\} & A_5 & \{0,9\}, \{2,10\}, \{4,8\}, \{5,7 \} \\ A_7 & \{0,9\}, \{2,10\}, \{4,8\}, \{5,7\} & A_8 & \{0,5,7\}, \{2,4,8\}, \{3,6,9\}, \{10\} \\ A_9 & \{0,9\}, \{2,4\}, \{5,7\}, \{8,10\}. \\ \end{align*} This shows $B_2$ is not linked. Since $B_8$ is the same as $B_2$ by a symmetry of $G_1$, $B_8$ is likewise not linked. Ultimately, we will show that no $B_i$ is linked. So far, we have this for $B_2$ and $B_8$. \smallskip \noindent{\bf Step VI:} Eliminate $C_1$, $C_5$, and $C_3$. Our next step is to argue $C_1$ is not linked by pairing it with the remaining $A_i$'s: \begin{align*} A_1 & \{1,7\}, \{2,8,10\}, \{3,6\}, \{4\} & A_3 & \{1,7\}, \{2,4\}, \{6,9\}, \{8,10 \} \\ A_4 & \{1\}, \{2,4,10\}, \{6,9\}, \{8\} & A_5 & \{1,7\}, \{2,10\}, \{4,8\}, \{6,9 \} \\ A_7 & \{0,4,8\}, \{1,5,7\}, \{6\}, \{10\} & A_8 & \{1,7\}, \{2,4,8\}, \{3,6\}, \{10\} \\ A_9 & \{1,7\}, \{2,4\}, \{6,9\}, \{8,10\}. \\ \end{align*} By a symmetry of $G_1$, $C_5$ is also not linked. Now we argue that $C_3$ is not linked, again by pairing with $A_i$'s: \begin{align*} A_1 & \{0,4\}, \{1,5,7\}, \{3,6\}, \{8,10\} & A_3 & \{0,8,10\}, \{1,5,7\}, \{4\}, \{6 \} \\ A_5 & \{0,10\}, \{1,5,7\}, \{4,8\}, \{6\} & A_7 & \{0,4,8\}, \{1,5,7\}, \{6\}, \{10\} \\ A_8 & \{0,10\}, \{1,5,7\}, \{4,8\}, \{6\} & A_9 & \{0,4\}, \{1,5,7\}, \{6\}, \{8,10\}. \\ \end{align*} For $A_4$ we split the second cycle: $\mbox{0,9,6,3,8,5 } = \mbox{ 0,9,6,5 } \cup \mbox{ 6,3,8,5}$. Suppose that it is 0,9,6,5 that is linked with 1,4,2,10. In this case we also split that cycle: $\mbox{1,4,2,10 } = \mbox{ 1,4,8,10 } \cup \mbox{ 2,4,8,10}$. To get a $\DD$ when pairing 0,9,6,5 -- 1,4,8,10 with $C_3$ we use vertices $\{0\}$, $\{1\}$, $\{4,8,10\}$, and $\{5,6\}$ and for 0,9,6,5 -- 2,4,8,10 with $C_3$, $\{0\}$, $\{2,3,7\}$, $\{4,8,10\}$, and $\{5,6\}$. On the other hand, if it is 6,3,8,5 that is linked with 1,4,2,10, we write $\mbox{1,4,2,10 } = \mbox{ 0,4,1,10 } \cup \mbox{ 0,4,2,10}$. Then when $C_3$ is paired with 6,3,8,5 --- 0,4,1,10, we have a $\DD$ using vertices $\{0,4,10\}$, $\{1\}$, $\{5,6\}$, and $\{8\}$ while if $C_3$ is paired with 6,3,8,5 -- 0,4,2,10 the vertices are $\{0,4,10\}$, $\{2,7,9\}$, $\{5,6\}$, and $\{8\}$. This completes the argument that $C_3$ is not linked. \smallskip \noindent{\bf Step VII:} Eliminate $A_8$ and $A_1$. We will show that $A_8$ is not linked by pairing with the remaining $C_i$'s. For $C_8$, the vertices would be $\{0\}$, $\{1,5,7,10\}$, $\{2\}$, and $\{3,4,8\}$. The remaining cases involve splitting cycles. For $C_2$ we write $\mbox{2,4,8,3,7,5 } = \mbox{ 3,7,5,8 } \cup \mbox{ 2,4,8,5}$. If it is 3,7,5,8 -- 0,9,6,1,10 that is linked, we use vertices $\{0,1,10\}$, $\{2,9\}$, $\{3,8\}$, and $\{5,7\}$ and if, instead, 2,4,8,5 -- 0,9,6,10 is linked, we have $\{0,1,10\}$, $\{2,4,8\}$, $\{3,6\}$, and $\{5\}$. For $C_4$, it is a cycle of $A_8$ that we rewrite: $\mbox{0,5,7,1,10 } = \mbox{ 0,9,7,1,10 } \cup \mbox{ 0,9,7,5}$. In either case, we use the same vertices: $\{0,7,9\}$, $\{1,5,6,10\}$, $\{2\}$, and $\{3,4,8\}$. For $C_6$, $\mbox{0,4,2,5,7,9 } = \mbox{ 0,4,2,9 } \cup \mbox{ 2,5,7,9}$. When 0,4,2,9 is linked with 1,6,3,8,10 the vertices are $\{0,9\}$, $\{1,10\}$, $\{2,4\}$, and $\{3,8\}$ while if 2,5,7,9 links 1,6,3,8,10, we use $\{1,10\}$, $\{2\}$, $\{3,8\}$, and $\{5,7\}$. Continuing with $C_7$, $\mbox{2,4,8,3,6,5 } = \mbox{ 3,6,5,8 } \cup \mbox{ 2,4,8,5}$. The vertices for 3,6,5,8 -- 0,9,7,1,0 are $\{0,1,7,10\}$, $\{2,9\}$, $\{3,8\}$, and $\{5\}$ and for 2,4,8,5 -- 0,9,7,1,10, use $\{0,1,7,10\}$, $\{2,4,8\}$, $\{3,6,9\}$, and $\{5\}$. Finally, in the case of $C_9$, write $\mbox{0,4,2,5,6,9 } = \mbox{ 0,4,2,9 } \cup \mbox{ 2,5,6,9}$. When 0,4,2,9 -- 1,7,3,8,10 is linked, the vertices are $\{0\}$, $\{1,7,10\}$, $\{2,4\}$, and $\{3,8\}$ and for 2,5,6,9 -- 1,7,3,8,10 use $\{1,7,10\}$, $\{2\}$, $\{3,8\}$, and $\{5\}$. This completes the argument for $A_8$. By a symmetry of $G_1$, $A_1$ is also not linked. In other words, going forward, we will assume it is one of $A_3$, $A_4$, $A_5$, $A_7$, or $A_9$ that is linked. \smallskip \noindent{\bf Step VIII:} Eliminate $B_1$, $B_3$, and $B_5$. Next, we will argue that $B_1$ is not linked by comparing with the remaining $A_i$'s: \begin{align*} A_3 & \{0,9\}, \{1,4,7\}, \{2,5\}, \{8,10\} & A_4 & \{0,9\}, \{1,4\}, \{2,10\}, \{5,8\} \\ A_5 & \{0,9\}, \{1,4,7\}, \{2,10\}, \{5,8\} & A_7 & \{0,4,9\}, \{1,7\}, \{2,5,10\}, \{8\} \\ A_9 & \{0,4,9\}, \{1,7\}, \{2\}, \{5,8,10\}. \\ \end{align*} By a symmetry of $G_1$ we also assume $B_3$ is not linked. Now, by pairing with the remaining $A_i$'s, we show $B_5$ is not linked: \begin{align*} A_3 & \{0,8,10\}, \{2,5,7\}, \{3\}, \{4\} & A_5 & \{0,10\}, \{2,3\}, \{4,8\}, \{5,7\} \\ A_7 & \{0,4,8\}, \{2,5,7\}, \{3\}, \{10\} & A_9 & \{0,4\}, \{2,3\}, \{5,7\}, \{8,10\}. \\ \end{align*} For $A_4$ we employ several splits. First, $\mbox{0,9,6,3,8,5 } = \mbox{ 0,9,6,5 } \cup \mbox{ 6,3,8,5}$. In the case that 0,9,6,5 -- 1,4,2,10 is linked, write $\mbox{1,4,2,10 } = \mbox{ 2,4,8,10 } \cup \mbox{ 1,4,8,10}$. Pairing 0,9,6,5 -- 2,4,8,10 with $B_5$, the vertices are $\{0\}$, $\{2\}$, $\{4,8,10\}$, and $\{5\}$. If instead it is 0,9,6,5 -- 1,4,8,10 that is linked, we use $\{0\}$, $\{1,7\}$, $\{4,8,10\}$, and $\{5\}$. So we assume that 6,3,8,5 -- 1,4,2,10 is linked and rewrite a $B_5$ cycle: $\mbox{2,3,7,5 } = \mbox{ 2,3,6,5 } \cup \mbox{ 3,6,5,7}$. In case 2,3,6,5 -- 0,4,8,10 is linked, we make a further split: $\mbox{1,4,2,10 } = \mbox{ 1,7,9,2,10 } \cup \mbox{ 1,4,9,2,10}$. Thus, assuming 2,3,6,5 -- 0,4,8,10 and 1,7,9,2,10 -- 6,3,8,5 are both linked, we have a $\DD$ with vertices $\{2\}$, $\{3,5,6\}$, $\{8\}$, and $\{10\}$. If instead it is 2,3,6,5 -- 0,4,8,10 and 1,4,9,2,10 -- 6,3,8,5 that are linked, use $\{2\}$, $\{3,5,6\}$, $\{4\}$, and $\{8\}$. This leaves the case where 3,6,5,7 -- 0,4,8,10 is linked. We must split a final time: $\mbox{1,4,2,10 } = \mbox{ 0,4,1,10 } \cup \mbox{ 0,4,2,10}$. If 3,6,5,7 -- 0,4,8,10 and 0,4,1,10 -- 6,3,8,5 are both linked, the vertices are $\{0,4,10\}$, $\{1,7\}$, $\{3,5,6\}$, and $\{8\}$. On the other hand, when 3,6,5,7 -- 0,4,8,10 and 0,4,2,10 -- 6,3,8,5 are linked, use $\{0,4,10\}$, $\{2,7,9\}$, $\{3,5,6\}$, and $\{8\}$. This completes the argument for $B_5$, which we have shown is not linked. \smallskip \noindent{\bf Step IX:} Eliminate $B_9$. Only $B_4$ and $B_6$ remain. Next we turn to $B_9$ which we compare with the remaining $A_i$'s: \begin{align*} A_3 & \{0\}, \{1,7\}, \{2,4,5\}, \{3,8,10\} & A_4 & \{0,5\}, \{1,10\}, \{2,4\}, \{3,8\} \\ A_7 & \{0,4\}, \{1,7,10\}, \{2,5\}, \{3,8\} & A_9 & \{0,2,4\}, \{1,7,8,10\}, \{3\}, \{5\}. \\ \end{align*} This leaves $A_5$ for which we rewrite: $\mbox{0,9,6,3,2,10 } = \mbox{ 0,9,2,10 } \cup \mbox{ 2,3,6,9}$. If 0,9,2,10 -- 1,4,8,5,7 is linked, then observe that, by a symmetry of $G_1$, this is the same as $A_8$, which is not linked. Thus, we can assume 2,3,6,9 -- 1,4,8,5,7 is linked and rewrite: $\mbox{1,4,8,5,7 } = \mbox{ 1,4,0,5,7 } \cup \mbox{ 0,4,8,5}$. Pairing 2,3,6,9 - 1,4,0,5,7 with $B_9$ gives a $\DD$ on vertices $\{0,4,5\}$, $\{1,7\}$, $\{2\}$, and $\{3\}$. If it is 2,3,6,9 -- 0,4,8,5 that is linked, then, pairing with $B_9$, use $\{0,4,5\}$, $\{2\}$, $\{3\}$, and $\{8\}$. This completes the argument for $B_9$ and, by a symmetry of $G_1$, also for $B_7$. Recall that our goal is to argue, for a contradiction, that no $B_i$ is linked. At this stage we are left only with $B_4$ and $B_6$ as pairs that could be linked. \smallskip \noindent{\bf Step X:} Eliminate $A_4$, $A_5$, and $A_9$ leaving only $A_3$ and $A_7$. Before providing the argument for the remaining two $B_i$'s, we first eliminate a few more $A_i$'s, starting with $A_4$, which we compare with the two remaining $B_i$'s: \begin{align*} B_4 & \{0,5,8\}, \{1,2,4\}, \{3\}, \{10\} & B_6 & \{0,5,8\}, \{1,2,10\}, \{3\}, \{4\}. \\ \end{align*} Next $A_5$, again by pairing with $B_4$ and $B_6$: \begin{align*} B_4 & \{0,10\}, \{1,4,7\}, \{2,3\}, \{5,8\} & B_6 & \{0\}, \{1,7\}, \{2,3,10\}, \{4,8,5\}. \\ \end{align*} Since $A_9$ agrees with $A_5$ under a symmetry of $G_1$, this leaves only $A_3$ and $A_7$ as pairs that may yet be linked among the $A_i$'s. \smallskip \noindent{\bf Step XI:} Eliminate $C_6$ and $C_9$, leaving $C_2$, $C_4$, $C_7$, and $C_8$. As a penultimate step, we show that $C_6$ is not linked by comparing with these two remaining $A_i$'s: \begin{align*} A_3 & \{0,9\}, \{1\}, \{2,4,5,7\}, \{3,6,8,10\} & A_7 & \{0,4,9\}, \{1,10\}, \{2,5,7\}, \{3,6,8\}. \\ \end{align*} By a symmetry of $G_1$, we can also assume that $C_9$ is not linked. This leaves only four $C_i$'s that may be linked: $C_2$, $C_4$, $C_7$, and $C_8$. \smallskip \noindent{\bf Step XII:} Eliminate the remaining two $B_i$'s ($B_6$ and $B_4$) to complete the argument. Finally, compare $B_6$ with the remaining $C_i$'s: \begin{align*} C_4 & \{0,4,8\}, \{1,2,10\}, \{3,7\}, \{5\} & C_8 & \{0,4,8\}, \{1,2,7,10\}, \{3\}, \{5\}. \\ \end{align*} For $C_2$ we rewrite: $\mbox{2,4,8,3,7,5 } = \mbox{ 2,3,8,4 } \cup \mbox{ 2,3,7,5}$. If 2,3,8,4 -- 0,9,6,1,10 is linked, then pairing with $B_6$, we have vertices $\{0\}$, $\{1,10\}$, $\{2,3\}$, and $\{4,8\}$. On the other hand pairing $B_6$ with 2,3,7,5 -- 0,9,6,1,10 we will have a $\DD$ using the vertices $\{0\}$, $\{1,10\}$, $\{2,3,7\}$, and $\{5\}$. For $C_7$: $\mbox{2,4,8,3,6,5 } = \mbox{ 2,3,8,4 } \cup \mbox{ 2,3,6,5}$. Then 2,3,8,4 -- 0,9,7,1,10 with $B_6$ gives a $\DD$ for vertices $\{0\}$, $\{1,7,10\}$, $\{2,3\}$, and $\{4,8\}$. On the other hand, 2,3,6,5 -- 0,9,7,1,10 pairs with $B_6$ using vertices $\{0\}$, $\{1,7,10\}$, $\{2,3\}$, and $\{5\}$. This completes the argument for $B_6$. By a symmetry of $G_1$ we see that $B_4$ is also not linked. In this way, the assumption that there is no knotted cycle in the given embedding of $G_1$ forces us to conclude that no pair $B_1, \ldots, B_9$ is linked. However, these correspond to the cycles of a $K_{3,3,1}$. As Sachs~\cite{S} has shown, any embedding of $K_{3,3,1}$ must have a pair of cycles with odd linking number. The contradiction shows that there can be no such knotless embedding and $G_1$ is IK. This completes the proof that $G_1$ is MMIK. \subsection{$G_2$ is MMIK}\ In this subsection, we show that $G_2$ is IK. The argument is similar to that for $G_1$ above. To use Lemma~\ref{lem:D4}, we need pairs of linked cycles in $G_2$. \smallskip \noindent{\bf Step I:} Define pairs $A_i$, $B_i$, $C_i$, and $D_i$. We begin by identifying four ways that the Petersen family graph $P_9$, of order nine, appears as a minor of graph $G_2$. Using the vertex labelling of Figure~\ref{fig:G2}, on contracting edge $(0,4)$ and deleting vertex 8, the resulting graph has $P_9$ as a subgraph. \begin{figure}[htb] \centering \includegraphics[scale=1.25]{g2.eps} \caption{The graph $G_2$.} \label{fig:G2} \end{figure} There are seven pairs of disjoint cycles in $P_9$. we denote these pairs as $A_1$ through $A_7$. In Table~\ref{tab:G2An}, we give the pairs in $G_2$. \begin{table}[htb] \centering \begin{tabular}{c|l} $A_1$ & 0,4,10,5 -- 1,6,2,9,3,7 \\ $A_2$ & 0,2,6,10,4 -- 1,5,9,3,7 \\ $A_3$ & 0,2,9,5 -- 1,6,10,3,7 \\ $A_4$ & 0,5,1,7 -- 2,6,10,3,9 \\ $A_5$ & 0,4,10,3,7 -- 1,5,9,2,6 \\ $A_6$ & 1,5,10,6 -- 0,2,9,3,7 \\ $A_7$ & 3,9,5,10 -- 0,2,6,1,7 \\ \end{tabular} \caption{Seven pairs of cycles in $G_2$ called $A_1, \ldots, A_7$.} \label{tab:G2An} \end{table} Similarly, if we contract edge $(3,10)$ and delete vertex 7 in $G_2$, the resulting graph has a $P_9$ subgraph. We will call these seven cycles $B_1, \ldots B_7$ as in Table~\ref{tab:G2Bn} \begin{table}[htb] \centering \begin{tabular}{c|l} $B_1$ & 0,2,6 -- 1,5,9,3,10,4,8 \\ $B_2$ & 0,2,8,4 -- 1,5,9,3,10,6 \\ $B_3$ & 0,2,9,5 -- 1,6,10,4,8 \\ $B_4$ & 0,4,10,6 -- 1,5,9,2,8 \\ $B_5$ & 0,5,1,6 -- 2,8,4,10,3,9 \\ $B_6$ & 1,6,2,8 -- 0,4,10,3,9,5 \\ $B_7$ & 2,6,10,3,9 -- 0,4,8,1,5 \\ \end{tabular} \caption{Seven pairs of cycles in $G_2$ called $B_1, \ldots, B_7$.} \label{tab:G2Bn} \end{table} Contracting edge $(4,10)$ and deleting vertex 6, we have the seven cycles of Table~\ref{tab:G2Cn}. \begin{table}[htb] \centering \begin{tabular}{c|l} $C_1$ & 3,8,4,10 -- 0,2,9,5,1,7 \\ $C_2$ & 3,8,1,7 -- 0,2,9,5,10,4 \\ $C_3$ & 2,8,3,9 -- 0,4,10,5,1,7 \\ $C_4$ & 0,4,10,3,7 -- 1,5,9,2,8 \\ $C_5$ & 3,9,5,10 -- 0,2,8,1,7 \\ $C_6$ & 1,5,10,4,8 -- 0,2,9,3,7 \\ $C_7$ & 0,2,8,4 -- 1,5,9,3,7 \\ \end{tabular} \caption{Seven pairs of cycles in $G_2$ called $C_1, \ldots, C_7$.} \label{tab:G2Cn} \end{table} Finally, if we contract edge $(3,9)$ and delete vertex 7 in $G_2$, the resulting graph has a $P_9$ subgraph. We will call these seven cycles $D_1, \ldots D_7$ as in Table~\ref{tab:G2Dn}. \begin{table}[htb] \centering \begin{tabular}{c|l} $D_1$ & 2,8,3,9 -- 0,4,10,6,1,5 \\ $D_2$ & 0,2,9,5 -- 1,6,10,4,8 \\ $D_3$ & 0,2,8,4 -- 1,5,9,3,10,6 \\ $D_4$ & 1,5,9,3,8 -- 0,2,6,10,4 \\ $D_5$ & 1,6,2,8 -- 0,4,10,3,9,5 \\ $D_6$ & 3,8,4,10 -- 0,2,6,1,5 \\ $D_7$ & 2,6,10,3,9 -- 0,4,8,1,5 \\ \end{tabular} \caption{Seven pairs of cycles in $G_2$ called $D_1, \ldots, D_7$.} \label{tab:G2Dn} \end{table} \smallskip \noindent{\bf Step II:} Eliminate $A_5$. We will need to introduce two more Petersen family graph minors later, but let us begin by ruling out some of the pairs we already have. As in our argument for $G_1$, we assume that we have a knotless embedding of $G_2$ and step by step argue that various cycle pairs are not linked (i.e. do not have odd linking number) using Lemma~\ref{lem:D4}. Eventually, this will allow us to deduce that all seven pairs $B_1, \ldots, B_7$ are not linked. This is a contradiction since Sachs~\cite{S} showed that in any embedding of $P_9$, there must be a pair of cycles with odd linking number. The contradiction shows that there is no such knotless embedding and $G_2$ is IK. We will see that $A_5$ is not linked by showing it results in a $\DD$ with every pair $B_1, \ldots, B_7$. Indeed the vertices of the $\DD$ are formed by contracting the following vertices \begin{align*} B_1 & \{0\}, \{1,5,9\}, \{2,6\}, \{3,4,10\} & B_2 & \{0,4\}, \{1,5,6,9\}, \{2\}, \{3,10\} \\ B_3 & \{0\}, \{1,6\}, \{2,5,9\}, \{4,10\} & B_4 & \{0,4,10\}, \{1,2,5,9\}, \{3,8\}, \{6\} \\ B_5 & \{0\}, \{1,5,6\}, \{2,9\}, \{3,4,10\} & B_7 & \{0,4\}, \{1,5\}, \{2,6,9\},\{3,10\}. \\ \end{align*} For $B_6 = $ 1,6,2,8 -- 0,4,10,3,9,5, we first split one of the $A_5$ cycles: $\mbox{0,4,10,3,7 } = \mbox{0,4,8,3,7 } \cup \mbox{ 3,8,4,10}$. One of the two summands must link with the other $A_5$ cycle $1,5,9,2,6$. If $\mbox{lk}((\mbox{3,8,4,10}),(\mbox{1,5,9,2,6})) \neq 0$, then, by contracting edges, we form a $\DD$ with $A_5$ whose vertices are $\{1,2,6\}, \{3,4,10\}, \{5,9\}, \{8\}$. On the other hand, if $\mbox{lk}((\mbox{0,4,8,3,7}),(\mbox{1,5,9,2,6})) \neq 0$, then we will split the $B_6$ cycle $\mbox{0,4,10,3,9,5 } = \mbox{0,4,10,5 } \cup \mbox{ 3,9,5,10}$. When $\mbox{lk}((\mbox{1,6,2,8}),(\mbox{0,4,10,5})) \neq 0$, we have a $\DD$ with vertices $\{0,4\}, \{1,2,6\}, \{5\}, \{8\}$ and when $\mbox{lk}((\mbox{1,6,2,8}),$ $(\mbox{3,9,5,10})) \neq 0$, the $\DD$ is on $\{1,2,6\}, \{3\}, \{5,9\}, \{8\}$. We have shown that, for each $B_i$, we must have a $\DD$ with $A_5$. Sachs~\cite{S} showed that in every embedding of $P_9$, there is a pair of linked cycles. Thus, in our embedding of $G_2$, at least one $B_i$ is linked. If $A_5$ were also linked, that would result in a $\DD$ with a knotted cycle by Lemma~\ref{lem:D4}. This contradicts our assumption that we have a knotless embedding of $G_2$. Therefore, going forward, we can assume $A_5$ is not linked. \smallskip \noindent{\bf Step III:} Eliminate $B_7$ and $A_4$. We next argue that $B_7$ is not linked by comparing with $C_1, \ldots, C_7$. For $C_4$, $C_6$, and $C_7$, we immediately form a $\DD$ as follows \begin{align*} C_4 & \{0,4\}, \{1,5,8\}, \{2,9\}, \{3,7\} & C_6 & \{0\}, \{1,4,5,8\}, \{2,3,9\}, \{10\} \\ C_7 & \{0,4,8\}, \{1,5\}, \{2\}, \{3,9\}. \\ \end{align*} For the remaining pairs, we will split a cycle of the $C_i$. For $C_1$, write $\mbox{0,2,9,5,1,7 } = \mbox{0,2,7 } \cup \mbox{ 1,5,9,2,7}$. In the first case, where $\mbox{lk}((\mbox{3,8,4,10}),$ $(\mbox{0,2,7})) \neq 0$, the $\DD$ is on $\{0\}, \{2\}, \{3,10\}, \{4,8\}$. In the second case, $\mbox{lk}((\mbox{3,8,4,10}),$ $(\mbox{1,5,9,7})) \neq 0$, we have $\{1,5\}, \{2,9\}, \{3,10\}, \{4,8\}$. For $C_2$, split $\mbox{0,2,9,5,10,4 } = \mbox{0,2,9,5 } \cup \mbox{ 0,4,10,5}$ with, in the first case, a $\DD$ on $\{0,5\}, \{1,8\}, \{2,9\}, \{3\}$ and, in the second, on $\{0,4,5\}, \{1,8\}, \{3\}, \{10\}$. For $C_5$, $\mbox{0,2,8,1,7 } = \mbox{0,2,7 } \cup \mbox{ 1,7,2,8}$, the first case is $\{0\}, \{2\}, \{3,9,10\}, \{5\}$ and the second has $\{1,8\}, \{2\}, \{3,9,10\}, \{5\}$. Finally, for $C_3$ split $\mbox{0,4,10,5,1,7 } = \mbox{1,6,7 } \cup \mbox{ 1,6,7,0,4,10,5}$. In the first case, there is a $\DD$ with vertices $\{1\}, \{2,3,9\}, \{6\}, \{8\}$. In the second case, we split the same cycle a second time: $\mbox{1,6,7,0,4,10,5 } = \mbox{1,5,10,6 } \cup \mbox{ 0,4,10,6,7}$. In the first subcase, we have a $\DD$ with vertices $ \{1,5\}, \{2,3,9\},\{6,10\}, \{8\}$ and in the second subcase, $\{0,4\}, \{2,3,9\}, \{6,10\}, \{8\}$. Going forward, we can assume that $B_7$ is not linked. We next argue that $A_4$ is not linked by comparing with $D_1, \ldots, D_7$. For four links, we immediately give the vertices of the $\DD$: \begin{align*} D2 & \{0,5\}, \{1\}, \{2,9\}, \{6,10\} & D3 & \{0\}, \{1,5\}, \{2\}, \{3,6,9,10\} \\ D4 & \{0\}, \{1,5\}, \{2,6,10\}, \{3,9\} & D5 & \{0,5\}, \{1\}, \{2,6\}, \{3,9,10\}. \\ \end{align*} For $D_1$, split the second cycle of $A_4$: $\mbox{2,6,10,3,9 } = \mbox{3,9,4,10 } \cup \mbox{ 2,9,4,10,6}$. In the first case, the verices of the $\DD$ are $\{0,1,5\}, \{2,7\}, \{3,9\}, \{4,10\}$ and in the second, $\{0,1,5\}, \{2,9\}, \{3,7\}, \{4,6,10\}$. For $D_6$, split the second cycle: $\mbox{0,2,6,1,5 } = \mbox{0,2,9,5 } \cup \mbox{ 1,5,9,2,6}$. In the first case, we have vertices $\{0,5\}, \{1,8\}, \{2,9\}, \{3,10\}$ and in the second, $\{0,4\}, \{1,5\}, \{2,6,9\}, \{3,10\}$. Finally, $D_7$ is the same pair of cycles as $B_7$, which we have assumed is not linked. Going forward, we will assume $A_4$ is not linked. \smallskip \noindent{\bf Step IV:} Introduce pairs $E_i$ to eliminate $A_2$ and $A_3$. This leaves only $A_1$, $A_6$, and $A_7$. We have already argued that we can assume $A_4$ and $A_5$ are not linked. In this step we eliminate $A_2$ and $A_3$, leaving only three $A_i$ that could be linked. For this we use another Petersen graph minor. Using the labelling of Figure~\ref{fig:G2}, partition the vertices as $\{0,8,9,10\}$ and $\{2,3,4,5\}$. Contract edges $(0,7)$ and $(2,6)$. This resulting graph has a $K_{4,4}^-$ subgraph, where $(5,8)$ is the missing edge. As in Table~\ref{tab:G2En}, we will call the resulting nine pairs of cycles $E_1, \ldots, E_9$. \begin{table}[htb] \centering \begin{tabular}{c|l} $E_1$ & 2,8,4,9 -- 0,5,10,3,7 \\ $E_2$ & 0,4,10,5 -- 2,8,3,9 \\ $E_3$ & 0,2,6,10,5 -- 3,8,4,9 \\ $E_4$ & 0,2,4,8 -- 3,9,5,10 \\ $E_5$ & 4,9,5,10 -- 0,2,8,3,7 \\ $E_6$ & 0,4,8,3,7 -- 2,6,10,5,9 \\ $E_7$ & 0,5,9,3,7 -- 2,6,10,4,8 \\ $E_8$ & 0,4,9,5 -- 2,6,10,3,8 \\ $E_9$ & 0,2,9,5 -- 3,8,4,10 \\ \end{tabular} \caption{Nine pairs of cycles in $G_2$ called $E_1, \ldots, E_9$.} \label{tab:G2En} \end{table} We will use $E_1, \ldots, E_9$ to show that $A_2$ may be assumed unlinked. Except for $E_1$, we list the vertices of the $\DD$: \begin{align*} E_2 & \{0,4,10\}, \{2\}, \{3,9\}, \{5\} & E_3 & \{0,2,6,10\}, \{3,9\}, \{4\}, \{5\} \\ E_4 & \{0,2,4\}, \{1,8\}, \{3,5,9\}, \{10\} & E_5 & \{0,2\}, \{3,7\}, \{4,10\}, \{5,9\} \\ E_6 & \{0,4\}, \{2,6,10\}, \{3,7\}, \{5,9\} & E_7 & \{0\}, \{1,8\}, \{2,4,6,10\}, \{3,5,7,9\} \\ E_8 & \{0,4\}, \{2,6,10\}, \{3\}, \{5,9\} & E_9 & \{0,2\}, \{3\}, \{4,10\}, \{5,9\}. \\ \end{align*} The argument for $E_1$ is a little involved. Let us split the second cycle $\mbox{0,5,10,3,7 } = \mbox{0,5,10,6,1,7 } \cup \mbox{ 1,6,10,3,7}$. \noindentCase 1: Suppose that $\mbox{lk}((\mbox{2,8,4,9}),(\mbox{0,5,10,6,1,7})) \neq 0$. Next split the first cycle of $A_2$: $\mbox{0,2,6,10,4 } = \mbox{0,2,6 } \cup \mbox{ 0,4,10,6}$. Case 1a): Suppose that $\mbox{lk}((\mbox{0,2,6}),(\mbox{1,5,9,3,7})) \neq 0$. We split the second $E_1$ cycle again: $\mbox{0,5,10,6,1,7 } = \mbox{0,5,10,6 } \cup \mbox{ 0,6,1,7}$ In the first case, where $\mbox{lk}((\mbox{2,8,4,9}),(\mbox{0,5,10,6})) \neq 0$, we have a $\DD$ with vertices $\{0,6\}, \{2\}, \{5\}, \{9\}$. In the second case, $\mbox{lk}((\mbox{2,8,4,9}),(\mbox{0,6,1,7})) \neq 0$, the $\DD$ vertices are $\{0,6\}, \{1,7\},$ $ \{2\}, \{9\}$. Case 1b): Suppose that $\mbox{lk}((\mbox{0,4,10,6}),(\mbox{1,5,9,3,7})) \neq 0$. Split the second $E_1$ cycle again: $\mbox{0,5,10,6,1,7 } = \mbox{0,5,10,6 } \cup \mbox{ 0,6,1,7}$. In the first case, where $\mbox{lk}((\mbox{2,8,4,9}),(\mbox{0,5,10,6})) \neq 0$, the $\DD$ vertices are $\{0,6,10\}, \{4\}, \{5\}, \{9\}$. In the second case, where $\mbox{lk}((\mbox{2,8,4,9}),(\mbox{0,6,1,7})) \neq 0$, we have vertices $\{0,6\}, \{1,7\},$ $ \{4\}, \{9\}$. \noindentCase 2: Suppose that $\mbox{lk}((\mbox{2,8,4,9}),(\mbox{1,6,10,3,7})) \neq 0$. We split the fist cycle of $A_2$: $\mbox{0,2,6,10,4 } = \mbox{0,2,6 } \cup \mbox{ 0,4,10,6}$. In the first case, the $\DD$ has vertices $\{1,3,7\}, \{2\}, \{6\}, \{9\}$ and in the second $\{1,3,7\}, \{4\}, \{6,10\}, \{9\}$. This completes the argument for $A_2$, which we henceforth assume is not linked. Next we again use $E_1, \ldots, E_9$ to see that $A_3$ is also not linked. Except for $E_8$ and $E_9$ we immediately have a $\DD$: \begin{align*} E_1 & \{0,5\}, \{1,8\}, \{2,9\}, \{3,7,10\} & E_2 & \{0,5\}, \{2,9\}, \{3\}, \{10\} \\ E_3 & \{0,2,5\}, \{3\}, \{6\}, \{9\} & E_4 & \{0,2\}, \{1,8\}, \{3,10\}, \{5,9\} \\ E_5 & \{0,2\}, \{3,7\}, \{5,9\}, \{10\} & E_6 & \{0\}, \{2,5,9\}, \{3,7\}, \{6,10\} \\ E_7 & \{0,5,9\}, \{2\}, \{3,7\}, \{6,10\} \\ \end{align*} For $E_8$, split the first cycle $\mbox{0,4,9,5 } = \mbox{0,5,1,7 } \cup \mbox{ 0,4,9,5,1,7}$. In the first case, we have a $\DD$ with vertices $\{0,5\}, \{1,7\}, \{2\}, \{3,6,10\}$. In the second case, we further split the first cycle of $A_3$: $\mbox{0,2,9,5 } = \mbox{2,8,4,9 } \cup \mbox{ 0,2,8,4,9,5}$ leading to two subcases. In the first subcase the $\DD$ is $\{1,7\}, \{2,8\}, \{3,6,10\}, \{4,9\}$ and in the second $\{0,4,5,9\}, \{1,7\}, \{2,8\}, \{3,6,10\}$. For $E_9$ split the first cycle $\mbox{0,2,9,5 } = \mbox{0,2,6,1,5 } \cup \mbox{ 2,6,1,5,9}$ with a $\DD$ on first $\{0,2,5\}, \{1,6\}, \{3,10\}, \{4,9\}$ and then $\{0,4\}, \{1,6\}, \{2,5,9\}, \{3,10\}$. We will not need $E_1, \ldots, E_9$ in the remainder of the argument. At this stage, we can assume that it is one of $A_1$, $A_6$, and $A_7$ that is linked. \smallskip \noindent{\bf Step V:} Eliminate $B_5$, $B_2$, and $B_3$, leaving only $B_1$, $B_4$, and $B_6$. Our next step is to argue that we can assume $B_5$ is not linked by comparing with the three remaining $A_i$'s. For the first two, we immediately recognize a $\DD$: \begin{align*} A_1 & \{0,5\}, \{1,6\}, \{2,3,9\}, \{4,10\} & A_6 & \{0\}, \{1,5,6\}, \{2,3,9\}, \{10\} \\ \end{align*} For $A_7$, split the second cycle: $\mbox{0,2,6,1,7 } = \mbox{0,2,7 } \cup \mbox{ 2,6,1,7}$ so that the $\DD$ has vertices $\{0\}, \{2\}, \{3,9,10\}, \{5\}$ in the first case and then $\{1,6\}, \{2\},$ $ \{3,9,10\}, \{5\}$. Going forward, we assume that $B_5$ is not linked. Next we will eliminate $B_2$ and $B_3$. For $B_3$, we have a $\DD$ with each of the remaining $A_i$'s: \begin{align*} A_1 & \{0,5\}, \{1,6\}, \{2,9\}, \{4,10\} & A_6 & \{0,2,9\}, \{1,6,10\}, \{3,8\}, \{5\} \\ A_7 & \{0,2\}, \{1,6\}, \{5,9\}, \{10\} \end{align*} For $B_2$, notice first that, as we are assuming $A_3$ is not linked, by a symmetry of $G_2$, we can assume that 0,2,8,4 - 1,6,10,3,7 is also not linked. Again, since $B_3$ is unlinked, the symmetric pair 0,2,8,4 - 1,5,9,3,7 is also not linked. Since $\mbox{1,5,9,3,10,6 } = \mbox{1,5,9,3,7 } \cup \mbox{ 1,6,10,3,7}$ we conclude that $B_2$ is not linked. Having eliminated four of the $B_i$'s, going forward, we can assume that it is one of $B_1$, $B_4$, and $B_6$ that is linked. Recall that our ultimate goal is to argue that none of the $B_i$ are linked and thereby force a contradiction. \smallskip \noindent{\bf Step VI:} Eliminate $C_1$ and $A_7$. This leaves only $A_1$ and $A_6$ among the $A_i$ pairs. Our next step is to argue that $C_1$ is not linked by comparing with the remaining three $A_i$'s. For $A_1$ split the second cycle of $C_1$ $\mbox{0,2,9,5,1,7 } = \mbox{0,2,9,5 } \cup \mbox{ 0,5,1,7}$, yielding first a $\DD$ on $\{0,5\}, \{2,9\}, \{3\}, \{4,10\}$ and then on $\{0,5\}, \{1,7\}, \{3\}, \{4,10\}$. For $A_6$, we have $\DD$ with vertices $\{0,2,7,9\}, \{1,5\},$ $ \{3\}, \{10\}$. For $A_7$, split the second cycle $\mbox{0,2,6,1,7 } = \mbox{0,2,8,1,7 } \cup \mbox{ 1,6,2,8}$. In the first case, we have a $\DD$ with vertices $\{0,1,2,7\}, \{3,10\}, \{5,9\}, \{8\}$. In the second case, split the second cycle of $C_1$ $\mbox{0,2,9,5,1,7 } = \mbox{0,2,9,5 } \cup \mbox{ 0,5,1,7}$, resulting in a $\DD$ either on $\{2\}, \{3,10\}, \{5,9\}, \{8\}$ or $\{1\}, \{3,10\}, \{5\}, \{8\}$. Now we can eliminate $A_7$. Using a symmetry of $G_2$ we will instead argue that the pair $A_7'$ = 3,8,4,10 - 0,2,6,1,7 is not linked. Note that this resembles $C_1$, which we just proved unlinked. Since $\mbox{0,2,6,1,7 } \cup \mbox{ 0,2,9,5,1,7 } = \mbox{1,5,9,2,6}$ it will be enough to show that 3,8,4,10 - 1,5,9,2,6 is not linked by comparing with the remaining $A_i$'s: \begin{align*} A_1 & \{1,2,6,9\}, \{3\}, \{4,10\}, \{5\} & A_6 & \{1,5,6\}, \{2,9\}, \{3\}, \{10\} \\ A_7 & \{0,4\}, \{1,2,6\}, \{3,10\}, \{5,9\} \\ \end{align*} Thus we can assume that it is $A_1$ or $A_6$ that is the linked pair in our embedding of $G_2$. \smallskip \noindent{\bf Step VII:} Eliminate $B_1$ and $B_6$, leaving only $B_4$. As for the $B_i$'s, only three candidates remain. We next eliminate $B_1$ by comparing with the remaining two $A_i$'s. For $A_1$, split the second cycle of $B_1$: $\mbox{1,5,9,3,10,4,8 } = \mbox{1,5,10,4,8 } \cup \mbox{ 3,9,10,5}$ giving a $\DD$ on either $\{0\}, \{1\}, \{2,6\}, \{4,5,10\}$ or $\{0\}, \{2,6\}, \{3,9\}, \{5,10\}$. For $A_6$, using the same split of the second cycle of $B_1$ the vertices are either $\{0,2\}, \{1,5,10\},$ $ \{4,9\}, \{6\}$ or $\{0,2\}, \{3,9\}, \{5,10\}, \{6\}$. To proceed, we will argue that $C_5$ is unlinked. In fact, we will show that it is $D_6 = C_5'$, the result of applying the symmetry of $G_2$, that is not linked by comparing with the two remaining $A_i$'s: \begin{align*} A_1& \{0,5\}, \{1,2,6\}, \{3\}, \{4,10\} & A_6& \{0,2\}, \{1,5,6\}, \{3\}, \{10\} \end{align*} We can now eliminate $B_6$ by comparing with the $C_i$'s. This will leave only $B_4$, which, therefore, must be linked. We have already argued that $C_1$ and $C_5$ are not linked. Also, by a symmetry of $G_2$, since $B_7$ is not linked, $C_4$ is also not linked. For $C_3$ and $C_7$, we immediately see a $\DD$: \begin{align*} C_3 & \{0,4,5,10\}, \{1\}, \{2,6\}, \{3,9\} & C_7 & \{0,4\}, \{1\}, \{2,8\}, \{3,9,5\}. \end{align*} For $C_2$, split the second cycle: $\mbox{0,2,9,5,10,4 } = \mbox{0,2,6,10,4 } \cup \mbox{ 2,6,10,5,9}$. In the first case, we have a $\DD$ on $\{0,4,10\}, \{1,8\}, \{2,6\}, \{3\}$. In the second case, split the second cycle of $B_6$: $\mbox{0,4,10,3,9,5 } = \mbox{0,4,10,3,7 } \cup \mbox{ 0,5,9,3,7}$ giving a $\DD$ with vertices $\{1,8\}, \{2,6\}, \{3,7\}, \{10\}$ in the first subcase and $\{1,8\}, \{2,6\}, \{3,7\}, \{5,9\}$ in the second. For $C_6$, we split the second cycle of $B_6$: $\mbox{0,4,10,3,9,5 } = \mbox{0,4,10,5} \cup \mbox{ 3,9,5,10}$ giving, first, a $\DD$ on $\{0\}, \{1,8\}, \{2\}, \{4,5,10\}$ and, second, a $\DD$ on $\{1,8\}, \{2\},$ $ \{3,9\}, \{5,10\}$. \smallskip \noindent{\bf Step VIII:} Introduce $F_i$ pairs to eliminate $B_4$ and complete the argument. Since $B_1, \ldots, B_7$, represent the pairs of cycles in an embedding of $P_9$, we know by \cite{S} that at least one pair must have odd linking number. We have just argued that all but $B_4$ are not linked, so we can conclude that it is $B_4$ that has odd linking number in our embedding of $G_2$. We will now derive a contradiction by using a final Petersen family graph minor. \begin{table}[htb] \centering \begin{tabular}{c|l} $F_1$ & 0,5,1,7 -- 2,6,10,3,8 \\ $F_2$ & 0,4,8,1,5 -- 2,6,10,3,7 \\ $F_3$ & 0,2,8,4 -- 1,6,10,3,7 \\ $F_4$ & 0,2,7 -- 1,6,10,3,8 \\ $F_5$ & 1,5,10,6 -- 2,7,3,8 \\ $F_6$ & 1,7,3,8 -- 0,2,6,10,5 \\ $F_7$ & 1,6,2,8 -- 0,5,10,3,7 \\ $F_8$ & 1,6,2,7 -- 0,4,8,3,10,5 \\ \end{tabular} \caption{Eight pairs of cycles in $G_2$ called $F_1, \ldots, F_8$.} \label{tab:G2Fn} \end{table} Our last set of cycles comes from a $P_8$ minor. This is the Petersen family graph on eight vertices that is not $K_{4,4}^-$. Using the labelling of $G_2$ in Figure~\ref{fig:G2}, contracting edges $(0,4)$ and $(0,5)$ and deleting vertex 9 results in a graph with a $P_8$ subgraph. This graph has eight pairs of cycles shown in Table~\ref{tab:G2Fn}. Using $B_4$ will derive a $\DD$ with each $F_i$. For the first five $F_i$ we immediately find a $\DD$: \begin{align*} F_1 & \{0\}, \{1,2,6\}, \{3\}, \{4,10\} & F_2 & \{0,2\}, \{1,5,6\}, \{3\}, \{10\} \\ F_3 & \{0,5\}, \{1,2,6\}, \{3\}, \{4,10\} & F_4 & \{0,2\}, \{1,5,6\}, \{3\}, \{10\} \\ F_5 & \{0,5\}, \{1,2,6\}, \{3\}, \{4,10\}. \\ \end{align*} Since $B_4$ is linked, we deduce that the pair $B_4' = 2,7,3,9 -- 0,4,8,1,5$, obtained by the symmetry of $G_2$, is also linked. Using $B_4'$ we have a $\DD$ with the remaining cycles of $F$: \begin{align*} F_6 & \{0,5\}, \{1,8\}, \{2\}, \{3,7\} & F_7 & \{0,5\}, \{1,8\}, \{2\}, \{3,7\} \\ F_8 & \{0,4,5,8\}, \{1\}, \{2,7\}, \{3\} \\ \end{align*} We have shown that there is a $\DD$ with each $F_i$ using the pairs $B_4$ or $B_4'$, both of which must be linked. Since the $F_1, \ldots, F_8$ represent the cycle pairs of a $P_8$ minor, at least one of them has odd linking number~\cite{S}. By Lemma~\ref{lem:D4}, our embedding of $G_2$ has a knotted cycle. This contradicts our assumption that we were working with a knotless embedding. The contradiction shows that there is no such knotless embedding and $G_2$ is IK. This completes the proof that $G_2$ is MMIK. \section{Maxnik graphs of order ten} \label{sec:appmnik} In this section we describe the maxnik graphs of order ten. Recall (see~\cite{EFM}) that a maximal $2$-apex graph is maxnik. There are 14 maximal $2$-apex graphs formed as the join $K_2 \ast T_8$ of $K_2$ with one of the 14 triangulations on eight vertices, see Bowen and Fisk~\cite{BF}. We list the 14 maximal $2$-apex graphs of order ten in Appendix C. Aside from those 14 there are 35 additional maxnik graphs. Our computer search, described in Section~\ref{sec:ord10} above, shows that there are no other maxnik graphs beyond these 49. In this section we argue that the 35 non $2$-apex graphs that we have found are indeed maxnik. The graphs are listed below. To show a graph is maxnik requires two things. Using Naimi's implementation~\cite{NW} of Miller and Naimi's~\cite{MN} algorithm, we have verified for each graph $G$ that whenever we add an edge $e \not\in E(G)$, the graph $G+e$ is IK. It remains to show that each of the graphs is nIK. We divide the graphs into three bins. For the first 14 graphs we argue the graph is nIK by demonstrating a $2$-apex child. We handle the next three graphs using two lemmas from \cite{EFM}. For the remaining 18 graphs, we give a knotless embedding. \subsection{Graphs with a $2$-apex child} In this subsection we list the first 14 of the 35 maxnik graphs of order ten that are not $2$-apex. We show these graphs are nIK by demonstrating a $2$-apex child. For each graph $G$ in this list of 14, we provide the size, graph6 format~\cite{sage}, edge list, and the vertices of a triangle in the graph. Making a $\ty$ move on that triangle results in a child $H$ that is $2$-apex. In each case, $H$ becomes planar on deleting vertex $9$ and the new degree $3$ vertex. By Lemma~\ref{lem:tyyt}, since $G$ has a $2$-apex (hence nIK) child, $G$ is also nIK. \begin{enumerate} \item Size: 33; graph6 format: \verb"ICf^f\~~w"; triangle: $0,3,6$ $$[(0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 9), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9),$$ $$(2, 6), (2, 7), (2, 8), (2, 9), (3, 4), (3, 5), (3, 6), (3, 8), (3, 9), (4, 5), (4, 7),$$ $$(4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 7), (6, 8), (6, 9), (7, 8), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"ICxu|~{~w"; triangle: $0,3,8$ $$[(0, 3), (0, 4), (0, 6), (0, 7), (0, 8), (0, 9), (1, 4), (1, 5), (1, 6), (1, 8), (1, 9),$$ $$(2, 4), (2, 5), (2, 7), (2, 8), (2, 9), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (4, 6),$$ $$(4, 7), (4, 8), (4, 9), (5, 6), (5, 7), (5, 8), (5, 9), (6, 7), (6, 9), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"ICvbm~}~w"; triangle $0,5,7$ $$[(0, 3), (0, 4), (0, 5), (0, 7), (0, 8), (0, 9), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8),$$ $$(1, 9), (2, 5), (2, 6), (2, 8), (2, 9), (3, 4), (3, 6), (3, 7), (3, 8), (3, 9), (4, 7),$$ $$(4, 8), (4, 9), (5, 6), (5, 7), (5, 8), (5, 9), (6, 7), (6, 8), (6, 9), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IEirt~}~w"; triangle $0,4,7$ $$[(0, 3), (0, 4), (0, 5), (0, 7), (0, 8), (0, 9), (1, 3), (1, 6), (1, 8), (1, 9), (2, 4),$$ $$(2, 5), (2, 6), (2, 7), (2, 8), (2, 9), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (4, 6),$$ $$(4, 7), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 7), (6, 8), (6, 9), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IEhuV~}~w"; triangle $0,3,7$ $$[(0, 3), (0, 4), (0, 6), (0, 7), (0, 8), (0, 9), (1, 3), (1, 5), (1, 6), (1, 7), (1, 8),$$ $$(1, 9), (2, 4), (2, 5), (2, 7), (2, 8), (2, 9), (3, 5), (3, 7), (3, 8), (3, 9), (4, 6),$$ $$(4, 7), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 7), (6, 8), (6, 9), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IEhvVn}~w"; triangle $0,3,7$ $$[(0, 3), (0, 4), (0, 6), (0, 7), (0, 8), (0, 9), (1, 3), (1, 5), (1, 6), (1, 7), (1, 8),$$ $$(1, 9), (2, 4), (2, 5), (2, 6), (2, 7), (2, 8), (2, 9), (3, 5), (3, 7), (3, 8), (3, 9),$$ $$(4, 6), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 7), (6, 8), (6, 9), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IEh~f]}~w"; triangle $0,4,7$ $$[(0, 3), (0, 4), (0, 6), (0, 7), (0, 8), (0, 9), (1, 3), (1, 5), (1, 6), (1, 7), (1, 9),$$ $$(2, 4), (2, 5), (2, 6), (2, 7), (2, 8), (2, 9), (3, 5), (3, 6), (3, 8), (3, 9), (4, 5),$$ $$(4, 7), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 7), (6, 8), (6, 9), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IQjnex~~w"; triangle $0,2,6$ $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (0, 9), (1, 3), (1, 5), (1, 6), (1, 7), (1, 8),$$ $$(1, 9), (2, 4), (2, 5), (2, 6), (2, 8), (2, 9), (3, 6), (3, 7), (3, 8), (3, 9), (4, 5),$$ $$(4, 7), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 8), (6, 9), (7, 8), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IQzTuz}~w"; triangle $3,6,8$ $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (1, 3), (1, 4), (1, 5), (1, 7),$$ $$(1, 8), (1, 9), (2, 4), (2, 6), (2, 8), (2, 9), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9),$$ $$(4, 6), (4, 7), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 8), (6, 9), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IQzTvz]~w"; triangle $3,6,8$ $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (1, 3), (1, 4), (1, 5), (1, 7),$$ $$(1, 8), (1, 9), (2, 4), (2, 6), (2, 7), (2, 9), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9),$$ $$(4, 6), (4, 7), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 8), (6, 9), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IQzTu~]~w"; triangle $0,5,7$ $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (1, 3), (1, 4), (1, 5), (1, 7),$$ $$(1, 8), (1, 9), (2, 4), (2, 6), (2, 9), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (4, 6),$$ $$(4, 7), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 7), (6, 8), (6, 9), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IQyuvx}~w"; triangle $0,4,6$ $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (0, 9), (1, 3), (1, 4), (1, 6), (1, 7), (1, 8),$$ $$(1, 9), (2, 4), (2, 5), (2, 7), (2, 8), (2, 9), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9),$$ $$(4, 6), (4, 7), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 8), (6, 9), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IQyvux|~w"; triangle $1,3,6$ $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (0, 9), (1, 3), (1, 4), (1, 6), (1, 7), (1, 8),$$ $$(1, 9), (2, 4), (2, 5), (2, 6), (2, 8), (2, 9), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9),$$ $$(4, 6), (4, 7), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 9), (7, 8), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IUZurzm~w"; triangle $0,3,6$ $$[(0, 2), (0, 3), (0, 5), (0, 6), (0, 8), (0, 9), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7),$$ $$(1, 8), (1, 9), (2, 4), (2, 5), (2, 7), (2, 8), (2, 9), (3, 5), (3, 6), (3, 7), (3, 9),$$ $$(4, 6), (4, 7), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 8), (6, 9), (7, 9), (8, 9)]$$ \end{enumerate} \subsection{Lemmas from \cite{EFM}} In this subsection, we argue that graphs 15, 16, and 17 (listed below) are nIK using ideas from \cite{EFM}. Graphs 16 and 17 have a degree $2$ vertex and we can recognize them as clique sums. In both cases, the graph is the sum of $K_3$ and the Heawood family graph $E_9$ over $K_2$. Since $K_3$ and $E_9$ are both nIK, by \cite[Lemma 3.1]{EFM} graphs 16 and 17 are nIK. Graph 15 has a degree $3$ vertex and is the clique sum over $K_3$ of $K_4$ and $E_9$. In the embedding of $E_9$ in \cite[Figure 2]{EFM} the $K_3$ is given by the vertices $a,b,c$, which bound a disk whose interior is disjoint from the graph. By \cite[Lemma 3.4]{EFM}, Graph 15 is also nIK. \begin{enumerate} \setcounter{enumi}{14} \item Size: 24; graph6 format: \verb"ICRffQmn_" $$[(0, 3), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (1, 4), (1, 5), (1, 6), (1, 7), (2, 5), (2, 6),$$ $$(2, 7), (2, 8), (2, 9), (3, 6), (3, 9), (4, 7), (4, 8), (4, 9), (5, 8), (5, 9), (6, 8), (6, 9)]$$ \item Size: 23; graph6 format: \verb"I?qtdo}^_" $$[(0, 4), (0, 5), (0, 6), (0, 7), (1, 4), (1, 9), (2, 5), (2, 6), (2, 7), (2, 8), (2, 9), (3, 5),$$ $$(3, 6), (3, 7), (3, 8), (3, 9), (4, 7), (4, 8), (4, 9), (5, 8), (5, 9), (6, 8), (6, 9)]$$ \item Size: 23; graph6 format: \verb"I?qtfo}N_" $$[(0, 4), (0, 5), (0, 6), (0, 7), (1, 4), (1, 7), (2, 5), (2, 6), (2, 7), (2, 8), (2, 9), (3, 5),$$ $$(3, 6), (3, 7), (3, 8), (3, 9), (4, 7), (4, 8), (4, 9), (5, 8), (5, 9), (6, 8), (6, 9)]$$ \end{enumerate} \subsection{Knotless embeddings} \begin{figure}[htb] \centering \includegraphics[scale=.8]{knotlessebd.eps} \caption{Knotless embeddings of 18 graphs.} \label{fig:18nik} \end{figure} We show that the remaining 18 graphs (listed below) are nIK by presenting knotless embeddings in Figure~\ref{fig:18nik}. Recall that this means when we apply Naimi's findEasyKnots program~\cite{NW} to the embedding, it confirms that every cycle in the graph is an unknot. \begin{enumerate} \setcounter{enumi}{17} \item Size: 34; graph6 format: \verb"IQjuz~nnw" $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 8), (0, 9), (1, 3), (1, 5), (1, 6), (1, 7), (1, 8), (2, 4),$$ $$(2, 5), (2, 7), (2, 8), (2, 9), (3, 5), (3, 6), (3, 7), (3, 9), (4, 6), (4, 7), (4, 8),$$ $$(4, 9), (5, 6), (5, 7), (5, 8), (5, 9), (6, 7), (6, 8), (6, 9), (7, 8), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IQjUnzz~w" $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (1, 3), (1, 5), (1, 6), (1, 7),$$ $$(1, 8), (1, 9), (2, 4), (2, 7), (2, 8), (2, 9), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9),$$ $$(4, 7), (4, 8), (4, 9), (5, 6), (5, 7), (5, 9), (6, 8), (6, 9), (7, 8), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IQjne~^^w" $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (1, 3), (1, 5), (1, 6), (1, 7), (1, 8),$$ $$(1, 9), (2, 4), (2, 5), (2, 6), (2, 9), (3, 6), (3, 7), (3, 8), (3, 9), (4, 5), (4, 7),$$ $$(4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 7), (6, 8), (6, 9), (7, 8), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IQjne|~~W" $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (0, 9), (1, 3), (1, 5), (1, 6), (1, 7), (1, 8),$$ $$(1, 9), (2, 4), (2, 5), (2, 6), (2, 8), (2, 9), (3, 6), (3, 7), (3, 8), (3, 9), (4, 5),$$ $$(4, 7), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 7), (6, 8), (7, 8), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IQjne|~zw" $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (0, 9), (1, 3), (1, 5), (1, 6), (1, 7), (1, 8),$$ $$(1, 9), (2, 4), (2, 5), (2, 6), (2, 8), (2, 9), (3, 6), (3, 7), (3, 8), (4, 5), (4, 7),$$ $$(4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 7), (6, 8), (6, 9), (7, 8), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IQzTvz^~o" $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (1, 3), (1, 4), (1, 5), (1, 7),$$ $$(1, 8), (1, 9), (2, 4), (2, 6), (2, 7), (2, 9), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9),$$ $$(4, 6), (4, 7), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 8), (6, 9), (7, 8), (7, 9)]$$ \item Size: 33; graph6 format: \verb"IQzTvv^~o" $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (1, 3), (1, 4), (1, 5), (1, 7),$$ $$(1, 8), (1, 9), (2, 4), (2, 6), (2, 7), (2, 9), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9),$$ $$(4, 6), (4, 7), (4, 8), (4, 9), (5, 8), (5, 9), (6, 7), (6, 8), (6, 9), (7, 8), (7, 9)]$$ \item Size: 33; graph6 format: \verb"IQzTu~^~o" $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (1, 3), (1, 4), (1, 5), (1, 7),$$ $$(1, 8), (1, 9), (2, 4), (2, 6), (2, 9), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (4, 6),$$ $$(4, 7), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 7), (6, 8), (6, 9), (7, 8), (7, 9)]$$ \item Size: 33; graph6 format: \verb"IQyuz~{~o" $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 8), (0, 9), (1, 3), (1, 4), (1, 6), (1, 7), (1, 8),$$ $$(1, 9), (2, 4), (2, 5), (2, 7), (2, 8), (2, 9), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9),$$ $$(4, 6), (4, 7), (4, 8), (4, 9), (5, 6), (5, 7), (5, 8), (5, 9), (6, 7), (6, 9), (7, 9)]$$ \item Size: 33; graph6 format: \verb"IQyuz~{zw" $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 8), (0, 9), (1, 3), (1, 4), (1, 6), (1, 7), (1, 8),$$ $$(1, 9), (2, 4), (2, 5), (2, 7), (2, 8), (2, 9), (3, 5), (3, 6), (3, 7), (3, 8), (4, 6),$$ $$(4, 7), (4, 8), (4, 9), (5, 6), (5, 7), (5, 8), (5, 9), (6, 7), (6, 9), (7, 9), (8, 9)]$$ \item Size: 33; graph6 format: \verb"IQyuz~{vw" $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 8), (0, 9), (1, 3), (1, 4), (1, 6), (1, 7), (1, 8),$$ $$(1, 9), (2, 4), (2, 5), (2, 7), (2, 8), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (4, 6),$$ $$(4, 7), (4, 8), (4, 9), (5, 6), (5, 7), (5, 8), (5, 9), (6, 7), (6, 9), (7, 9), (8, 9)]$$ \item Size: 32; graph6 format: \verb"IQzTrj~~o" $$[(0, 2), (0, 4), (0, 5), (0, 6), (0, 8), (0, 9), (1, 3), (1, 4), (1, 5), (1, 7), (1, 8),$$ $$(1, 9), (2, 4), (2, 6), (2, 7), (2, 8), (2, 9), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9),$$ $$(4, 6), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 8), (6, 9), (7, 8), (7, 9)]$$ \item Size: 32; graph6 format: \verb"IUZuvzmno" $$[(0, 2), (0, 3), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (1, 3), (1, 4), (1, 5), (1, 6),$$ $$(1, 7), (1, 8), (2, 4), (2, 5), (2, 7), (2, 8), (2, 9), (3, 5), (3, 6), (3, 7), (3, 9),$$ $$ (4, 6), (4, 7), (4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 8), (6, 9), (7, 9)]$$ \item Size: 31; graph6 format: \verb"IEivux~zo" $$[(0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 9), (1, 3), (1, 6), (1, 7), (1, 8), (1, 9),$$ $$(2, 4), (2, 5), (2, 6), (2, 8), (2, 9), (3, 5), (3, 6), (3, 7), (3, 8), (4, 6), (4, 7),$$ $$(4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 8), (6, 9), (7, 8), (7, 9)]$$ \item Size: 31; graph6 format: \verb"IEhvuzn^o" $$[(0, 3), (0, 4), (0, 6), (0, 7), (0, 8), (1, 3), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), $$ $$(2, 4), (2, 5), (2, 6), (2, 8), (2, 9), (3, 5), (3, 6), (3, 7), (3, 9), (4, 6), (4, 7), $$ $$(4, 8), (4, 9), (5, 7), (5, 8), (5, 9), (6, 8), (6, 9), (7, 8), (7, 9)]$$ \item Size: 31; graph6 format: \verb"IEnb~jm}W" $$[(0, 3), (0, 4), (0, 5), (0, 7), (0, 8), (0, 9), (1, 3), (1, 5), (1, 6), (1, 7), (1, 8), $$ $$(1, 9), (2, 4), (2, 5), (2, 6), (2, 7), (2, 8), (2, 9), (3, 4), (3, 6), (3, 7), (3, 9), $$ $$(4, 6), (4, 8), (4, 9), (5, 6), (5, 7), (5, 8), (6, 8), (7, 9), (8, 9)]$$ \item Size: 23; graph6 format: \verb"ICpVbrkN_" $$[(0, 3), (0, 4), (0, 6), (0, 8), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (2, 6), (2, 7), (2, 8),$$ $$(2, 9), (3, 5), (3, 6), (3, 7), (3, 9), (4, 7), (4, 8), (4, 9), (5, 8), (5, 9), (6, 9)]$$ \item Size: 23; graph6 format: \verb"ICpvbqkN_" $$[(0, 3), (0, 4), (0, 6), (0, 8), (1, 4), (1, 5), (1, 6), (1, 7), (2, 5), (2, 6), (2, 7), (2, 8),$$ $$(2, 9), (3, 5), (3, 6), (3, 7), (3, 9), (4, 7), (4, 8), (4, 9), (5, 8), (5, 9), (6, 9)]$$ \end{enumerate} \begin{thebibliography}{HNTY} \bibitem[A]{A} D.~Archdeacon. A Kuratowski theorem for the projective plane. J.~Graph Theory 5 (1981), 243--246. \bibitem[BM]{BM}J.~Barsotti and T.W.~Mattman. Graphs on 21 edges that are not 2-apex. Involve 9 (2016), 591--621. \bibitem[BBFFHL]{BBFFHL} P.~Blain, G.~Bowlin, T.~Fleming, J.~Foisy, J.~Hendricks, and J.~Lacombe. Some results on intrinsically knotted graphs. J.~Knot Theory Ramifications 16 (2007), 749--760. \bibitem[BF]{BF} R.~Bowen and S.~Fisk. Generations of triangulations of the sphere. Math.~Comp. 21 (1967), 250--252. \bibitem[BDLST]{BDLST} A.~Brouwer, R.~Davis, A.~Larkin, D.~Studenmund, and C.~Tucker. Intrinsically $S^1$ 3-Linked Graphs and Other Aspects of $S^1$ Embeddings. Rose-Hulman Undergraduate Mathematics Journal, 8(2) (2007), {\tt https://scholar.rose-hulman.edu/rhumj/vol8/iss2/2/} . \bibitem[CMOPRW]{CMOPRW} J.~Campbell, T.W.~Mattman, R.~Ottman, J.~Pyzer, M.~Rodrigues, and S.~Williams. Intrinsic knotting and linking of almost complete graphs. Kobe J.~Math. 25 (2008), 39--58. \bibitem[C]{C}J.~Chambers, Hunting for torus obstructions, M.Sc. Thesis, Department of Computer Science, University of Victoria, 2002. \bibitem[CG]{CG}J.~Conway and C.McA.~Gordon. Knots and links in spatial graphs. J.~Graph Theory 7 (1983), 445--453. \bibitem[EFM]{EFM} L.~Eakins, T.~Fleming, and T.W.~Mattman. Maximal knotless graphs. Algebr. Geom. Topol. 23 (2023), 1831--1848. \bibitem[F]{F} J.~Foisy. Intrinsically knotted graphs. J.~Graph Theory 39 (2002), no. 3, 178--187. \bibitem[FMMNN]{FMMNN} E.~Flapan, B.~Mellor, T.W.~Mattman, R.~Naimi, and R.~Nikkuni. Recent developments in spatial graph theory. Knots, links, spatial graphs, and algebraic invariants, 81--102, Contemp.~Math., 689, Amer.~Math.~Soc., Providence, RI, 2017. \bibitem[GHW]{GHW} H.~Glover, J.~Huneke, and C.S.~Wang. 103 graphs that are irreducible for the projective plane. J.~Combin.~Theory Ser. B 27 (1979), 332--370. \bibitem[GMN]{GMN} N.~Goldberg, T.W.~Mattman, and R.~Naimi. Many, many more intrinsically knotted graphs. Algebr.~Geom.~Topol. 14 (2014), no. 3, 1801--1823. \bibitem[HNTY]{HNTY} R.~Hanaki, R.~Nikkuni, K.~Taniyama, and A.~Yamazaki. On intrinsically knotted or completely 3-linked graphs. Pacific J.~Math. 252 (2011), 407--425. \bibitem[JK]{JK} A.~Jobson and A.~K{\'e}zdy All minor-minimal apex obstructions with connectivity two. Electron.~J.~Combin. 28 (2021), Paper No. 1.23, 58 pp. \bibitem[KMO]{KMO} H.~Kim, T.W.~Mattman, and S.~Oh. Bipartite intrinsically knotted graphs with 22 edges. J.~Graph Theory 85 (2017), 568--584. \bibitem[KS]{KS} T.~Kohara and S.~Suzuki. Some remarks on knots and links in spatial graphs. Knots 90 (Osaka, 1990), 435--445, de Gruyter, Berlin, 1992. \bibitem[K]{K} K.~Kuratowski. Sur le probl\`eme des courbes gauches en topologie. Fund.~Math. 15 (1930), 271--283. \bibitem[LMMPRTW]{LMMPRTW} M.~Lipton, E.~Mackall, T.W.~Mattman, M.~Pierce, S.~Robinson, J.~Thomas, and I.~Weinschelbaum. Six variations on a theme: almost planar graphs. Involve 11 (2018), 413--448. \bibitem[Ma]{Ma} Mader W.~Mader, Homomorphies\"atze f\"ur Graphen. Math. Ann. 178 (1968), 154--168. \bibitem[M]{M} T.W.~Mattman. Graphs of 20 edges are 2-apex, hence unknotted. Algebr.~Geom.~Topol. 11 (2011), 691--718. \bibitem[MNPP]{MNPP} T.W.~Mattman, R.~Naimi, A.~Pavelescu, and E.~Pavelescu. Intrinsically knotted graphs with linklessly embeddable simple minors. Algebr. Geom. Topol. 24 (2024), 1203--1223. \bibitem[MMR]{MMR} T.W.~Mattman, C.~Morris, and J.~Ryker. Order nine MMIK graphs. Knots, links, spatial graphs, and algebraic invariants, 103--124, Contemp.~Math., 689, Amer.~Math.~Soc., Providence, RI, 2017. \bibitem[MP]{MP} T.W.~Mattman and M.~Pierce. The $K_{n+5}$ and $K_{3^2, 1^n}$ families and obstructions to $n$-apex. Knots, links, spatial graphs, and algebraic invariants, 137--158, Contemp.~Math., 689, Amer.~Math.~Soc., Providence, RI, 2017. \bibitem[MN]{MN} J.~Miller and R.~Naimi. An algorithm for detecting intrinsically knotted graphs. Exp.~Math. 23 (2014), 6--12. \bibitem[MT]{MT} B.~Mohar and C.~Thomassen. Graphs on surfaces. Johns Hopkins Studies in the Mathematical Sciences. Johns Hopkins University Press, Baltimore, MD, 2001. \bibitem[MW]{MW} W.~Myrvold and J.~Woodcock. A large set of torus obstructions and how they were discovered. Electron.~J.~Combin. 25 (2018), Paper No. 1.16, 17 pp. \bibitem[N1]{N} R.~Naimi. Private communication. \bibitem[N2]{NW} R.~Naimi. {\tt https://sites.google.com/a/oxy.edu/rnaimi/misc-links/} \bibitem[OT]{OT} M.~Ozawa and Y.~Tsutsumi. Primitive spatial graphs and graph minors. Rev.~Mat.~Complut. 20 (2007), 391--406. \bibitem[P]{P} M.~Pierce Searching for and classifying the finite set of minor-minimal non-apex graphs, CSU, Chico Honor's Thesis., (2014). Available at {\tt http://tmattman.yourweb.csuchico.edu/} . \bibitem[R]{R} K.~Reidemeister. Knotentheorie. Springer-Verlag, Berlin-New York, 1974. \bibitem[RS]{RS} N.~Robertson and P.D.~Seymour. Graph minors. XX. Wagner's conjecture. J.~Combin.~Theory Ser.~B 92 (2004), 325--357. \bibitem[RST]{RST} N.~Robertson, P.~Seymour, and R.~Thomas. Sachs' linkless embedding conjecture. J.~Combin.~Theory Ser.~B 64 (1995), 185--227. \bibitem[Sc]{S} H.~Sachs. On spatial representations of finite graphs. Finite and infinite sets, Vol. I, II (Eger, 1981), 649--662, Colloq.~Math.~Soc. J\'{a}nos Bolyai, 37, North-Holland, Amsterdam, 1984. \bibitem[Sg]{sage} \emph{{S}ageMath, the {S}age {M}athematics {S}oftware {S}ystem ({V}ersion 9.6)}, The Sage Developers, 2022, {\tt https://www.sagemath.org} . \bibitem[TY]{TY} K.~Taniyama, A.~Yasuhara. Realization of knots and links in a spatial graph. Topology Appl. 112 (2001), 87--109. \bibitem[W]{W} Wagner, K.: \"Uber eine Eigenschaft der ebenen Komplexe. Math.~Ann. 114 (1937), 570--590. \end{thebibliography} \end{document}
2205.14192v2
http://arxiv.org/abs/2205.14192v2
Constrained Langevin Algorithms with L-mixing External Random Variables
\documentclass{article} \PassOptionsToPackage{numbers, sort&compress}{natbib} \bibliographystyle{plainnat} \newif\ifsubmission \newif\ifpreprint \newif\iffinal \preprinttrue naltrue \ifsubmission \usepackage{neurips_2022} \ifpreprint \usepackage[preprint]{neurips_2022} \iffinal \usepackage[final]{neurips_2022} \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage{hyperref} \usepackage{url} \usepackage{booktabs} \usepackage{nicefrac} \usepackage{microtype} \usepackage{xcolor} \usepackage{graphicx} \usepackage{mathtools} \usepackage{amsfonts,dsfont} \usepackage{mathrsfs} \usepackage{amssymb} \usepackage{amsthm} \usepackage{amsmath} \theoremstyle{plain} \newtheorem{theorem}{Theorem} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{lemmac}[theorem]{Lemma Correction} \newtheorem{theoremc}[theorem]{Theorem Correction} \PassOptionsToPackage{hyphens}{url}\usepackage{hyperref} \hypersetup{ colorlinks, linkcolor={red!50!black}, citecolor={blue!50!black}, urlcolor={blue!80!black} } \usepackage{tikz} \usetikzlibrary{shapes,matrix,arrows,calc,positioning,fit,bayesnet,angles} \usepackage{customCommands} \usepackage{appendix} \usepackage{comment} \usepackage{multirow} \newcounter{constnum} \newcommand{\const}[1]{\refstepcounter{constnum}\label{#1}} \title{Constrained Langevin Algorithms with L-mixing External Random Variables} \author{ Yuping Zheng \\ Department of Electrical and Computer Engineering\\ University of Minnesota, Twin Cities\\ Minneapolis, MN 55455 \\ \texttt{[email protected]} \\ \And Andrew Lamperski \\ Department of Electrical and Computer Engineering\\ University of Minnesota, Twin Cities\\ Minneapolis, MN 55455 \\ \texttt{[email protected]} \\ } \begin{document} \maketitle \input{introduction} \input{setup} \input{results} \section{Quantitative bounds on Skorokhod solutions over polyhedra} \label{sec:SkorokhodTightBounnd} In this section, we present a result that enables our new bound between the continuous-time process $\bx_t^C$ and the discretized process $\bx_t^M$ when constrained to the set $\cK$ defined by: \begin{equation} \label{eq:halfspaceRep} \cK = \{x | a_i^\top x \le b_i \textrm{ for } i=1,\ldots,m\}, \end{equation} where $a_i$ are unit vectors. As discussed in Section \ref{ss:proofOverviewofLemmaAtoC}, the bound in Lemma ~\ref{lem:C2D} improves upon the corresponding results in earlier works \cite{bubeck2018sampling,lamperski2021projected}. The improvement arises from the use of Theorem~\ref{thm:skorokhodConst} below, which utilizes the explicit polyhedral structure of $\cK$ to achieve a tighter bound than could be obtained for general convex constraint sets. It is a variation on an earlier result from \cite{dupuis1991lipschitz}. The main distinction is that the proof in \cite{dupuis1991lipschitz} is non-constructive, and so there is no way to calculate the constants, whereas the proof in Appendix~\ref{app:skorokhod} is fully constructive and the constants can be computed explicitly. \begin{theorem} \label{thm:skorokhodConst} There are constants $c_{\ref{diamBound}}$ and $\alpha \in (0, 1/2]$ such that if $x = \cS(y)$ and $x'=\cS(y')$ are Skorokhod solutions on the polyhedral set $\cK$ defined by (\ref{eq:halfspaceRep}), then for all $t\ge 0$, the following bound holds: $$ \sup_{0\le s \le t} \|x_s-x_s'\| \le (c_{\ref{diamBound}}+1) \sup_{0\le s\le t} \|y_s-y'_s\|. $$ Here $$ c_{\ref{diamBound}} = 6 \left(\frac{1}{\alpha}\right)^{\rank(A)/2} $$ and $A = \begin{bmatrix} a_1 & \cdots a_m\end{bmatrix}^\top$ whose rows are the $a_i^\top$ vectors. \end{theorem} \input{limitations} \input{conclusion} \input{acknowledgment} \bibliography{cool-refs} \ifsubmission \input{checklist} \newpage \appendix \input{skorokhod} \input{gibbs} \input{bounded} \input{contraction} \input{averaging_proofs} \input{discretization} \input{switching} \input{Constants} \input{nearOptimality} \end{document} \begin{abstract} Langevin algorithms are gradient descent methods augmented with additive noise, and are widely used in Markov Chain Monte Carlo (MCMC) sampling, optimization, and machine learning. In recent years, the non-asymptotic analysis of Langevin algorithms for non-convex learning has been extensively explored. For constrained problems with non-convex losses over a compact convex domain with IID data variables, the projected Langevin algorithm achieves a deviation of $O(T^{-1/4} (\log T)^{1/2})$ from its target distribution \cite{lamperski2021projected} in $1$-Wasserstein distance. In this paper, we obtain a deviation of $O(T^{-1/2} \log T)$ in $1$-Wasserstein distance for non-convex losses with $L$-mixing data variables and polyhedral constraints (which are not necessarily bounded). This improves on the previous bound for constrained problems and matches the best-known bound for unconstrained problems. \end{abstract} \section{Introduction} Langevin algorithms can be viewed as the simulation of Langevin dynamics from statistical physics \citep{coffey2012langevin}. They have been widely studied for Markov Chain Monte Carlo (MCMC) sampling \citep{roberts1996exponential}, non-convex optimization \citep{gelfand1991recursive,borkar1999strong} and machine learning \citep{welling2011bayesian}. In the statistical community, Langevin methods are used to resolve the difficulty of exact sampling from a high dimensional distribution. For non-convex optimization, the additive noise assists the algorithms to escape from local minima and saddles. Since many modern technical challenges can be cast as sampling and optimization problems, Langevin algorithms are a potential choice for the areas of adaptive control, deep neural networks, reinforcement learning, time series analysis, image processing and so on \citep{lekang2021wasserstein_accepted,barkhagen2021stochastic, chau2019stochastic}. \paragraph{Related Work.} In recent years, the non-asymptotic analysis of Langevin algorithms has been extensively studied. The discussion below reviews theoretical studies of Langevin algorithms for MCMC sampling, optimization, and learning. The non-asymptotic analysis of Langevin algorithms for approximate sampling (Langevin Monte Carlo, or LMC) began with \citep{dalalyan2012sparse,dalalyan2017theoretical}, with more recent relevant work given in \citep{durmus2017nonasymptotic,barkhagen2021stochastic,chau2019stochastic,ma2019sampling,majka2018non, wang2020fast, zou2021faster, li2021sqrt, erdogdu2022convergence, balasubramanian2022towards, nguyen2021unadjusted,lehec2021langevin, chewi2021analysis}. Most works on LMC consider log-concave target distributions, though there exists some work relaxing log-concavity \citep{majka2018non,chau2019stochastic,wang2020fast,nguyen2021unadjusted,chewi2021analysis} and smoothness of the target distribution \citep{nguyen2021unadjusted,lehec2021langevin, chewi2021analysis}. Most LMC work focuses on the unconstrained case. Constrained problems are less studied, but a variety of works have begun to address constraints in recent years. The work \citep{bubeck2015finite,bubeck2018sampling} analyzes the case of log-concave distributions with samples constrained to a convex, compact set. Other methods derived from optimization have been introduced to handle constraints, such as mirror descent \citep{ahn2020efficient,hsieh2018mirrored,zhang2020wasserstein,krichene2017acceleration} and proximal methods \citep{brosse2017sampling}. Pioneering work on non-asymptotic analysis of Langevin algorithms for unconstrained non-convex optimization with IID external data variables was given in \citep{raginsky2017non}, which was motivated by machine learning applications \citep{welling2011bayesian}. Since then, numerous improvements and variations on unconstrained Langevin algorithms for non-convex optimization have been reported \citep{chau2019stochastic, xu2018global,erdogdu2018global,cheng2018sharp,chen2020stationary}. The work \cite{vempala2019rapid} examines the Unadjusted Langevin Algorithms without convexity assumption of the objective function and achieves a convergence guarantee in Kullback-Leibler (KL) divergence assuming that the target distribution satisfies a log-Sobolev inequlity. However, KL divergence is infinite with the deterministic initialization. To mitigate this pitfall, our work measures the convergence bound in 1-Wasserstein distance, which allows the initial condition to be deterministic. The first analysis of Langevin algorithms for non-convex optimization with IID external variables constrained to compact convex sets is given in \citep{lamperski2021projected}, and builds upon \citep{bubeck2015finite, bubeck2018sampling}. However, the convergence rate derived in \citep{lamperski2021projected} is rather slow since it uses a loose result on Skorokhod problems in \citep{tanaka1979stochastic}. Recent work of \citep{sato2022convergence} obtains $\epsilon$-suboptimality guarantees in $\tilde O(\epsilon^{-1/3})$. However, some extra work would be required to give a direct comparison with the current work, as the results in \citep{sato2022convergence} depend additionally on the spectral gap, which is not computed here. Most convergence analyses for constrained non-convex optimization require no constraints or bounded constraint sets and IID external random variables or no external variables. In practice, the boundedness of constraint sets and the dependence of external variables do not always hold. The work \citep{chau2019stochastic} gives non-asymptotic bounds with L-mixing external variables and non-convex losses, which achieves tight performance guarantees in the unconstrained case. In contrast, our work gets a tight convergence bound (up to logarithmic factors) with L-mixing data streams and applies to arbitrary polyhedral constraints, which may be unbounded. \paragraph{Contributions.} This paper focuses on the non-asymptotic analysis of constrained Langevin algorithms for a non-convex problem with L-mixing external random variables and polyhedral constraints. We show the algorithm can achieve a deviation of $O(T^{-1/2} \log T)$ from its target distribution in 1-Wasserstein distance in the polyhedral constraint and with dependent variables. The result from \cite{chau2019stochastic} on unconstrained Langevin algorithms with L-mixing external random variables gives a deviation of $O(T^{-1/2} (\log T)^{1/2})$, and so we see that our results match, up to a factor of $(\log T)^{1/2}$. For constrained problems, our general polyhedral assumption is not directly comparable to related work of \cite{lamperski2021projected}, which examines compact convex constraints, and \cite{sato2022convergence}, which examines bounded non-convex constraints. In the cases where the domains and random variable assumptions match (i.e. bounded polyhedra with IID external random variables or no external random variables), our paper gives the tightest bounds. In particular, this improves on the bound from \cite{lamperski2021projected}, which gives a deviation of $O(T^{-1/4} (\log T)^{1/2})$ with respect to $1$-Wasserstein distance. A key enabling result in this paper is a new quantitative bound on the deviation between Skorokhod problem solutions over polyhedra, which gives a more explicit variation of an earlier non-constructive result from \citep{dupuis1991lipschitz}. Additionally, we derive a relatively simple approach to averaging out the effect of L-mixing random variables on algorithms. \section{Problem Setup} \subsection{Notation and terminology} $\bbR$ denotes the set of real numbers while $\bbN$ denotes the set of non-negative integers. The Euclidean norm over $\bbR^n$ is denoted by $\|\cdot \|$. Random variables will be denoted in bold. If $\bx$ is a random variable, then $\bbE[\bx]$ denotes its expected value and $\cL(\bx)$ denotes its law. IID stands for independent, identically distributed. The indicator function is denoted by $\indic$. If $P$ and $Q$ are two probability measures over $\bbR^n$, then the $1$-Wasserstein distance between them with respect to the Euclidean norm is denoted by $W_1(P,Q)$. The $1$-Wasserstein distance is defined as: \begin{equation} \nonumber W_{1}(P,Q) = \inf_{\Gamma \in \mathfrak{C}(P,Q)} \int_{\cK \times \cK} \|x-y \| d \Gamma(x,y) \end{equation} where $\mathfrak{C}$ is the couplings between $P$ and $Q$. Let $\cK$ be a convex set. (In this paper, we will assume that $\cK$ is polyhedral with $0$ in its interior.) The boundary of $\cK$ is denoted by $\partial \cK$. The normal cone of $\cK$ at a point $x$ is denoted by $N_{\cK}(x)$. The convex projection onto $\cK$ is denoted by $\Pi_{\cK}$. Let $\cZ$ denote the domain of the external random variables $\bz_k$. If $\cF$ and $\cG$ are $\sigma$-algebras, let $\cF\lor\cG$ denote the $\sigma$-algebra generated by the union of $\cF$ and $\cG$. \subsection{Constrained Langevin algorithm} For integers $k$ let $\hat \bw_k \sim\cN(0,I)$ be IID Gaussian random variables and let $\bz_k$ be an L-mixing process whose properties will be described later. Assume that $\bz_i$ is independent of $\hat \bw_j$ for all $i,j\in\bbN$. Assume that the initial value of $\bx_0\in\cK$ is independent of $\bz_i$ and $\hat \bw_j$. Then the constrained Langevin algorithm has the form: \begin{equation} \label{eq:projectedLangevin} \bx_{k+1} = \Pi_{\cK}\left(\bx_k -\eta \nabla_x f(\bx_k,\bz_k) + \sqrt{\frac{2\eta}{\beta}} \hat\bw_{k}\right), \end{equation} with $k$ an integer. Here $\eta>0$ is the step size parameter and $\beta >0$ is the inverse temperature parameter. In the learning context, $f(\bx,\bz)$ is the objective function where $\bx$ are the parameters we aim to learn and $\bz$ is a training data point. \subsection{L-mixing processes} \label{ss:L-mixingAssumption} In this paper, we assume that $\bz_k$ is a sequence of external data variables. The class of $L$-mixing processes was introduced in \citep{gerencser1989class} for applications in system identification and time-series analysis, and gives a means to quantitatively measure how the dependencies between the $\bz_k$ decay over time. Formally, $L$-mixing requires two components: 1) M-boundedness, which specifies a global bound on the moments and 2) a measure of the decay of influence over time. A discrete-time stochastic processes $\bz_k$ is M-bounded if for all $m \ge 1$ \begin{equation} \label{eq:Mbounded} \cM_m(\bz) = \sup_{k\ge 0 } \bbE^{1/m}\left[\|\bz_k\|^{m} \right]<\infty. \end{equation} Let $\cF_k$ be an increasing family of $\sigma$-algebras such that $\bz_k$ is $\cF_k$-measurable and $\cF_k^+$ be a decreasing family of $\sigma$-algebras such that $\cF_k$ and $\cF_k^+$ are independent for all $k\ge 0$. Then, the process $\bz_k$ is L-mixing with respect to $\left( \left( \cF_k\right), \left( \cF_k^+ \right)\right)$ if it is M-bounded and \begin{subequations} \label{eq:LMixing} \begin{equation} \label{eq:totalInfluence} \Psi_m(\bz) = \sum_{\tau=0}^\infty \psi_m(\tau,\bz) <\infty \end{equation} with \begin{equation} \label{eq:influenceTau} \psi_m(\tau,\bz) = \sup _{k\ge \tau}\bbE^{1/m}\left[\left\|\bz_k - \bbE\left[\bz_k\vert\cF_{k-\tau}^+ \right]\right\|^m\right]. \end{equation} \end{subequations} For a concrete example, consider the order-1 autoregressive model: \begin{align} \label{eq:ARmodel1} \bz_{k+1} = \alpha \bz_{k} + \boldsymbol{\xi}_{k+1} \end{align} where $\alpha$ is a constant with $\left| \alpha \right|<1$ and for all $k\in \bbZ$, $\boldsymbol{\xi}_k$ are IID standard Gaussian random variables and $\bz_k \in \cZ$, where $\cZ=\bbR$ in this case. It can be observed from \eqref{eq:ARmodel1} that \begin{align} \label{eq:ARmodel2} \bz_k = \sum_{j=0}^\infty \alpha^j \boldsymbol{\xi}_{k-j}. \end{align} Then, if we specify $\cF_k = \sigma \{ \bxi_i: i\le k\}$ and $\cF_k^+ = \sigma \{ \bxi_i: i> k\}$, it can be verified that $\bz_k$ satisfies \eqref{eq:Mbounded} and \eqref{eq:LMixing} and so is an L-mixing process. \subsection{Assumptions} \label{ss:assumptions} We assume that $\nabla_x f(x,z)$ is $\ell$-Lipschitz in both $x$ and $z$. In particular, this implies that $\|\nabla_x f(x_1,z)-\nabla_x f(x_2,z)\|\le \ell \|x_1-x_2\|$ and $\|\nabla_x f(x,z_1)-\nabla f(x,z_2)\|\le \ell \|z_1-z_2\|$. We assume that $\bz_t$ is a stationary $L$-mixing process, and let $\bar{f}(x)=\bbE[f(x,\bz_t)]$ denote the function which averages $f(x,\bz_t)$ with respect to $\bz_t$. Further, we assume that $\bar{f}(x)$ is $\mu$-strongly convex outside a ball of radius $R>0$, i.e. $(x_1 - x_2)^\top\left( \nabla \bar{f}(x_1) - \nabla \bar{f}(x_2)\right) \ge \mu \left\| x_1 -x_2\right\|^2$ for all $ x_1,x_2 \in \cK$ such that $\| x_1-x_2\| \ge R$. We assume that the initial second moment is bounded above as $\bbE[\|\bx_0\|^2]\le \varsigma < \infty$. Throughout the paper, $\cK$ will denote a polyhedral subset of $\bbR^n$ with $0$ in its interior. \section{Main results} \label{sec:results} \subsection{Convergence of the law of the iterates} For $\bar f$ defined above, the associated Gibbs measure is defined by: \begin{equation} \label{eq:gibbs} \pi_{\beta \bar f}(A) = \frac{\int_{A\cap \cK} e^{-\beta \bar f(x)} dx}{\int_{\cK} e^{-\beta \bar f(x)} dx}. \end{equation} The main result of this paper is stated next: \const{contraction_const1} \newcommand{\contractionConstOneVal}{2\varphi(R)^{-1} \sqrt{ \frac{2}{\mu} c_{\ref{LyapunovConst}}}} \const{contraction_const2} \newcommand{\contractionConstTwoVal}{ 4\varphi(R)^{-1}} \const{error_polyhedron1} \newcommand{\errorPolyhedronOneVal}{\left(c_{\ref{AtoC1}}+c_{\ref{BoundCtoD2}}\sqrt{c_{\ref{AlgBound}}}+ \sqrt{2} c_{\ref{BoundCtoD3}} \right) e^{\ell}\left( 1+ \frac{2\varphi(R)^{-1}}{1-e^{-{a}/2}}\right) } \const{error_polyhedron2} \newcommand{\errorPolyhedronTwoVal}{ c_{\ref{BoundCtoD2}} e^{\ell}\left( 1+ \frac{2\varphi(R)^{-1}}{1-e^{-{a}/2}}\right)} \begin{theorem} \label{thm:nonconvexLangevin} Assume that $\eta \le \min\left\{\frac{1}{4},\frac{\mu}{4\ell^2}\right\}$, $\cK$ is a polyhedron with $0$ in its interior, $\bx_0\in\cK$, and $\bbE[\|\bx_0\|^2]\le \varsigma$. There are constants $a$, $c_{\ref{contraction_const1}}$, $c_{\ref{contraction_const2}}$, $c_{\ref{error_polyhedron1}}$, and $c_{\ref{error_polyhedron2}}$ such that the following bound holds for all integers $k\ge 4$: \begin{equation*} \label{eq:mainBound} W_1(\cL(\bx_k), \pi_{\beta \bar{f}}) \le (c_{\ref{contraction_const1}} + c_{\ref{contraction_const2}} \sqrt{\varsigma}) e^{-\eta a k} + (c_{\ref{error_polyhedron1}} + c_{\ref{error_polyhedron2}}\sqrt{\varsigma})\sqrt{\eta\log(\eta^{-1})}. \end{equation*} In particular, if $\eta = \frac{\log T}{2aT}$, $T\ge 4$ and $T \ge e^{2a}$, then \begin{equation*} \label{eq:mainBound2} W_1(\cL(\bx_T), \pi_{\beta \bar{f}}) \le \left( c_{\ref{contraction_const1}} + c_{\ref{contraction_const2}} \sqrt{\varsigma} + \frac{c_{\ref{error_polyhedron1}} + c_{\ref{error_polyhedron2}}\sqrt{\varsigma}}{(2a)^{1/2}} \right) T^{-1/2} \log T. \end{equation*} Furthermore, the constants, $c_{\ref{contraction_const1}}, c_{\ref{contraction_const2}}, c_{\ref{error_polyhedron1}}$, and $c_{\ref{error_polyhedron2}}$ are $O(n)$ with respect to the dimension of $\bx_k$, and $O(e^{\ell \beta R^2/2})$ with respect to the inverse temperature, $\beta$. And for all $\beta>0$, $a \ge \frac{2}{\frac{\beta R^2}{2}+\frac{16}{\mu}} e^{-\frac{\beta \ell R^2}{4}}$. \end{theorem} The constants depend on the dimension of $\bx_k$, $n$, the noise parameter, $\beta$, the Lipschitz constant, $\ell$, the strong convexity constant $\mu$, the variance bound of the initial states, $\varsigma$, and some geometric properties of the polyhedron, $\cK$. The constants shown in Theorem~\ref{thm:nonconvexLangevin} are described explicitly in Appendix~\ref{app:constants}. \subsection{Auxiliary processes for convergence analysis} \label{ss:processes} Similar to the previous analyses of Langevin methods, e.g. \citep{raginsky2017non, bubeck2018sampling,chau2019stochastic,lamperski2021projected}, the proof of Theorem~\ref{thm:nonconvexLangevin} uses a collection of auxiliary processes fitting between the algorithms iterates from (\ref{eq:projectedLangevin}) and a stationary distribution given by (\ref{eq:gibbs}). The algorithm and a variation in which the $\bz_t$ variables are averaged out are respectively given by: \begin{subequations} \begin{align} \label{eq:algorithmA} \bx_{t+1}^A &= \Pi_{\cK}\left(\bx_t^A-\eta \nabla_x f(\bx_t^A,\bz_t)+\sqrt{\frac{2\eta}{\beta}} \hat \bw_t \right) \\ \label{eq:averagedM} \bx_{t+1}^M&= \Pi_{\cK}\left(\bx_t^M-\eta \nabla_x \bar f(\bx_t^M)+\sqrt{\frac{2\eta}{\beta}} \hat \bw_t \right). \end{align} \end{subequations} Here $\bx_t^A$ represents the \underline{A}lgorithm, while $\bx_t^M$ represents a corresponding \underline{M}ean process. We embed the mean process in continuous time by setting $\bx_t^M = \bx_{\floor{t}}^M$, where $\floor{t}$ indicates floor function. The Gaussian noise $\hat \bw_k$ can be realized as $\hat \bw_k = \bw_{k+1} - \bw_{k}$ where $\bw_t$ is a Brownian motion. Let $\bx_t^C$ denote a \underline{C}ontinuous-time approximation of $\bx_t^M$ defined by the following reflected stochastic differential equation (RSDE): \begin{equation} \label{eq:AvecontinuousProjectedLangevin} d\bx^C_t = -\eta \nabla_x \bar{f} (\bx^C_t) dt + \sqrt{\frac{2\eta}{\beta}} d\bw_t - \bv_t^C d\bmu^C(t). \end{equation} Here $-\int_0^t \bv_s^Cd\bmu^C(s)$ is a bounded variation reflection process that ensures that $\bx_t^C\in\cK$ for all $t\ge 0$, as long as $\bx_0^C\in\cK$. In particular, the measure $\bmu^C$ is such that $\bmu^C([0,t])$ is finite, $\bmu^C$ supported on $\{s|\bx_s^C\in\partial \cK\}$, and $\bv_s^C\in N_{\cK}(\bx_s^C)$ where $N_{\cK}(x)$ is the normal cone of $\cK$ at $x$. Lemma~\ref{lem:skorokhodExistence} in Appendix~\ref{app:skorokhod} shows that the reflection process is uniquely defined and $\bx^C$ is the unique solution to the Skorokhod problem for the process defined by: \begin{equation} \label{eq:AveContinuousY} \by^C_t = \bx_0^C + \sqrt{\frac{2\eta}{\beta}} \bw_t - \eta \int_0^t \nabla_x \bar{f}(\bx^C_s) ds. \end{equation} See Appendix~\ref{app:skorokhod} for more details on the Skorokhod problem. For compact notation, we denote the Skorokhod solution for a given trajectory, $\by$, by $\cS(\by)$. So, the fact that $\bx^C$ is the solution to the Skorokhod problem for $\by^C$ will be denoted succinctly by $\bx^C=\cS(\by^C)$. The basic idea behind the proof is to utilize the triangle inequality: \begin{align} W_1(\cL(\bx_k^A),\pi_{\beta f}) \label{eq:wassersteinTriangl1} \le W_1(\cL(\bx_k^A),\cL(\bx_k^C))+W_1(\cL(\bx_k^C),\pi_{\beta \bar f}). \end{align} and then bound each of the terms separately. The second term is bounded by the following lemma: \begin{lemma} \label{lem:convergeToStationary} Assume that $\bx_0\in\cK$ and $\bbE[\|\bx_0^C\|^2] \le \varsigma$. There are positive constants $a$, $c_{\ref{contraction_const1}}$ and $c_{\ref{contraction_const2}}$ such that for all $t \ge 0$ \begin{equation*} W_1(\cL(\bx_t^{C}),\pi_{\beta \bar f})\le \left( c_{\ref{contraction_const1}} + c_{\ref{contraction_const2}} \sqrt{\varsigma} \right) e^{-\eta a t}. \end{equation*} \end{lemma} This result is based on an extension of the contraction results from Corollary~2 of \citep{eberle2016reflection} for SDEs to the case of the reflected SDEs. Appendix~\ref{sec:contraction} steps through the methodology from \citep{eberle2016reflection} in order to derive $a$, $c_{\ref{contraction_const1}}$ and $c_{\ref{contraction_const2}}$ for our particular problem. Most of the novel work in the paper focuses on deriving the following bound on $W_1(\cL(\bx_k^A),\cL(\bx_k^C))$: \begin{lemma} \label{lem:AtoC} Assume that $\bx_0^A=\bx_0^C\in\cK$, $\bbE[\|\bx_0^C\|^2]\le \varsigma$, and $\eta \le \min\left\{\frac{1}{4},\frac{\mu}{8\ell^2}\right\}$. Then there are positive constants $c_{\ref{error_polyhedron1}}$ and $c_{\ref{error_polyhedron2}}$ such that for all integers $k\ge 0$: $$ W_1(\cL(\bx_k^A),\cL(\bx_k^C))\le \left( c_{\ref{error_polyhedron1}} + c_{\ref{error_polyhedron2}} \sqrt{\varsigma} \right) \sqrt{\eta\log(\eta^{-1})}. $$ \end{lemma} \paragraph*{Proof of Theorem~\ref{thm:nonconvexLangevin}} Plugging the results of Lemmas~\ref{lem:convergeToStationary} and \ref{lem:AtoC} into the triangle inequality bound from (\ref{eq:wassersteinTriangl1}) proves the first result of the theorem. Specifically, let $\eta = \frac{\log T}{2aT}$, then \begin{align*} W_1(\cL(\bx_T), \pi_{\beta \bar{f}}) &\le (c_{\ref{contraction_const1}} + c_{\ref{contraction_const2}} \sqrt{\varsigma}) T^{-1/2} + (c_{\ref{error_polyhedron1}} + c_{\ref{error_polyhedron2}}\sqrt{\varsigma})\sqrt{\frac{\log T}{2aT}\log( \frac{2aT}{\log T})}\\ & \le (c_{\ref{contraction_const1}} + c_{\ref{contraction_const2}} \sqrt{\varsigma}) T^{-1/2} \log T + \frac{c_{\ref{error_polyhedron1}} + c_{\ref{error_polyhedron2}}\sqrt{\varsigma}}{(2a)^{1/2}} T^{-1/2}\log T . \end{align*} This gives the specific bound in the theorem. The last inequality utilizes the fact that $\log T > 1$ for all $T \ge 4$ and $\frac{2aT}{\log T} \le T$ when $T \ge e^{2a}$. Furthermore, we examine the bounds of the constants $c_{\ref{contraction_const1}}$, $c_{\ref{contraction_const2}}$, $c_{\ref{error_polyhedron1}}$, $c_{\ref{error_polyhedron2}}$ and $a$ in Appendix~\ref{app:constants}, where the dependencies of the convergence guarantee on state dimension $n$ and the inverse temperature parameter, $\beta$ can be observed directly. \hfill$\blacksquare$ The rest of the paper focuses on proving Lemma~\ref{lem:AtoC}. \subsection{Proof overview for Lemma~\ref{lem:AtoC}} \label{ss:proofOverviewofLemmaAtoC} This subsection describes the main ideas in the proof of Lemma~\ref{lem:AtoC}. The results highlighted here, and proved in the appendix, cover the main novel aspects of the current work. The first novelty, captured in Lemmas~\ref{lem:MeanBetween1} and \ref{lem:MeanBetween2}, is a new way to bound stochastic gradient Langevin schemes with L-mixing data from a Langevin method with the data variables averaged out. The key idea is a method for examining a collection of partially averaged processes. The second novelty is a tight quantitative bound on the deviation of discretized Langevin algorithms from their continuous-time counterparts when constrained to a polyhedron. This result is based on a new quantitative bound on Skorokhod solutions over polyhedra. First we derive time-dependent bounds (i.e. bounds that depend on $k$) for $W_1(\cL(\bx_k^A),\cL(\bx_k^C))$ . This is achieved by introducing a collection of intermediate processes and bounding their differences. Time-uniform bounds are then achieved by exploiting contractivity properties of $\bx_t^C$. To bound $W_1(\cL(\bx_k^A),\cL(\bx_k^C))$, we first use the triangle inequality: \begin{align} \label{eq:AtoCTriangle} W_1(\cL(\bx_k^A),\cL(\bx_k^C))\le W_1(\cL(\bx_k^A),\cL(\bx_k^M)) + W_1(\cL(\bx_k^M),\cL(\bx_k^C)). \end{align} We bound $W_1(\cL(\bx_k^A),\cL(\bx_k^M))$ via a collection of auxiliary processes in which the effect of $\bz_k$ is partially averaged out. We bound $W_1(\cL(\bx_k^M),\cL(\bx_k^C))$ via a specialized discrete-time approximation of $\bx_t^C$. Now we construct the collection of partially averaged processes. Recall that $\bz_k\in\cZ$ is a stationary $L$-mixing process with respect to the $\sigma$-algebras $\cF_k$ and $\cF_k^+$. For $k<0$, we set $\cF_k=\{\emptyset,\cZ\}$, i.e. the trivial $\sigma$-algebra. Let $\cG_t$ be the filtration generated by the Brownian motion, $\bw_t$. Recall that for $k\in\bbN$, we set $\hat\bw_k = \bw_{k+1}-\bw_k$. Define the following discrete-time processes: \begin{subequations} \begin{align} \label{eq:averagedIntermediate} \bx_{k+1}^{M,s}&= \Pi_{\cK}\left(\bx_k^{M,s}-\eta \bbE[ \nabla_x f(\bx_k^{M,s},\bz_k) | \cF_{k-s}\lor \cG_k ]+\sqrt{\frac{2\eta}{\beta}} \hat\bw_k \right) \\ \label{eq:averagedBetween} \bx_{k+1}^{B,s} &= \Pi_{\cK}\left(\bx_k^{B,s}-\eta \bbE[ \nabla_x f(\bx_k^{M,s},\bz_k) | \cF_{k-s-1} \lor \cG_k ] +\sqrt{\frac{2\eta}{\beta}} \hat\bw_k \right). \end{align} \end{subequations} Assume that all initial conditions are equal. In other words, $\bx_0^A=\bx_0^M=\bx_0^{M,s}=\bx_0^{B,s}$, for all $s\ge 0$. The iterations from (\ref{eq:averagedIntermediate}) define a family of algorithms in which the data variables are partially averaged, while $\bx_k^{B,s}$ from (\ref{eq:averagedBetween}) corresponds to an auxiliary process that fits between $\bx_k^{M,s}$ and $\bx_k^{M,s+1}$. (Here ``A'' stands for algorithm, ``M'' stands for mean, and ``B'' stands for between.) Note for $s=0$, we have that $\bx_k^{M,0}=\bx_k^A$ and for $s>k$, we have that $\bx_k^{M,s}=\bx_k^M$. So, in order to bound $W_1(\cL(\bx_k^A),\cL(\bx_k^M))$, it suffices to bound $W_1(\cL(\bx_k^{M,s}),\cL(\bx_k^{B,s}))$ and $W_1(\cL(\bx_k^{B,s}),\cL(\bx_k^{M,s+1}))$ for all $s\ge 0$. These bounds are achieved in the following lemmas, which are proved in Appendix~\ref{app:aveLemmas}. \begin{lemma} \label{lem:MeanBetween1} For all $s\ge 0$ and all $k\ge 0$, the following bound holds: \begin{equation*} \nonumber W_1\left(\cL(\bx_k^{M,s}),\cL(\bx_k^{B,s}) \right) \le \bbE[\|\bx_k^{M,s}-\bx_k^{B,s}\|] \le 2\ell \psi_2(s,\bz)\eta \sqrt{k} . \end{equation*} \end{lemma} \begin{lemma} \label{lem:MeanBetween2} For all $s\ge 0$ and all $k\ge 0$, the following bound holds \begin{multline} \nonumber W_1\left(\cL(\bx_k^{B,s}),\cL(\bx_k^{M,s+1})\right) \le \bbE[\|\bx_k^{B,s}-\bx_k^{M,s+1}\|] \le 2\ell \psi_2(s,\bz)\eta \sqrt{k} \left( e^{\eta k \ell}-1\right). \end{multline} \end{lemma} Now we define the discretized approximation of $\bx_t^C$. For any initial $\bx_0^D \in \cK$, we define the following iteration on the integers: \begin{align*} \bx_{k+1}^D = \Pi_{\cK} (\bx_k^D + \by_{k+1}^C - \by_k^C) = \Pi_{\cK} \left(\bx_k^D + \int_{k}^{k+1} \nabla \bar{f} (\bx_{s}^C) ds + \sqrt{\frac{2 \eta}{\beta}} \hat\bw_{k}\right). \end{align*} Recall that the process $\by^C$ is defined by \eqref{eq:AveContinuousY}. Provided that $\bx_0^D = \bx_0^C$, we have that $\bx^D = \cS(\by^D) = \cS(\cD(\by^C))$, where $\cD$ is the discretization operator that sets $\cD(x)_t = x_{\floor{t}}$ for any continuous-time trajectory $x_t$. Recall that $\cS$ is the Skorokhod solution operator. The approximation, $\bx^D$, was utilized in \citep{bubeck2018sampling,lamperski2021projected} to bound discretization errors. The next lemmas show how to bound $W_1(\cL(\bx_k^C),\cL(\bx_k^D))$ and $W_1(\cL(\bx_k^M),\cL(\bx_k^D))$, respectively. In particular, Lemma~\ref{lem:C2D} is analogous to Propositions 2.4 and 3.6 of \citep{bubeck2018sampling} and Lemma 9 of \citep{lamperski2021projected}. These earlier works end up with bounds of $O(\eta^{3/4}k^{1/2}+\sqrt{\eta \log k})$. It is shown in \cite{lamperski2021projected} that such bounds can be translated into time-uniform bounds of the form $\tilde O(\eta^{1/4})$. The bound from Lemma~\ref{lem:C2D} is of the form $O(\eta k^{1/2}+\sqrt{\eta \log k})$, and we will see in the next subsection that this leads to a time-uniform bound of the form $\tilde O(\eta^{1/2})$. \const{BoundCtoD1} \newcommand{\BoundCtoDOneVal}{(c_{\ref{diamBound}}+1)\sqrt{\frac{2}{\mu}\ell^2 c_{\ref{LyapunovConst}} + 2 \| \nabla_x \bar{f}(0)\|^2}} \const{BoundCtoD2} \newcommand{\BoundCtoDTwoVal}{(c_{\ref{diamBound}}+1) \sqrt{2 \ell^2}} \const{BoundCtoD3} \newcommand{\BoundCtoDThreeVal}{ (c_{\ref{diamBound}}+1) n\sqrt{\frac{8}{\beta}} } \begin{lemma}\label{lem:C2D} Assume that $\cK$ is a polyhedron with $0$ in its interior. Assume that $\bx_0^C = \bx_0^D \in \cK$ and that $\bbE[\|\bx_0^C\|] \le \varsigma$. There are constants, $c_{\ref{BoundCtoD1}}$, $c_{\ref{BoundCtoD2}}$ and $c_{\ref{BoundCtoD3}}$ such that for all integers $k\ge 0$, the following bound holds: \begin{align*} W_1\left(\cL(\bx_k^C),\cL(\bx_k^D) \right)\le \bbE\left[\|\bx_k^C - \bx_k^D \| \right] \le \left( c_{\ref{BoundCtoD1}} + c_{\ref{BoundCtoD2}} \sqrt{\varsigma}\right) \eta \sqrt{k} + c_{\ref{BoundCtoD3}} \sqrt{\eta \log(4k)} . \end{align*} \end{lemma} \begin{lemma} \label{lem:M2D} Assume that $\cK$ is a polyhedron with $0$ in its interior. Assume that $\bx_0^C = \bx_0^D=\bx_0^M \in \cK$ and that $\bbE[\|\bx_0^C\|] \le \varsigma$. Then for all integers $k\ge0$, the following bound holds \begin{align*} W_1\left(\cL(\bx_k^M),\cL(\bx_k^D) \right) \le \bbE\left[\|\bx_k^M - \bx_k^D \| \right] \le \left(\left( c_{\ref{BoundCtoD1}} + c_{\ref{BoundCtoD2}} \sqrt{\varsigma}\right) \eta \sqrt{k} + c_{\ref{BoundCtoD3}} \sqrt{\eta \log(4k)} \right) \left( e^{\eta\ell k} -1 \right). \end{align*} \end{lemma} We highlight that Lemma~\ref{lem:C2D} utilizes the rather tight bounds on solutions to Skorokhod problems over a polyhedral domain shown in Theorem~\ref{thm:skorokhodConst}. The derivation of such tight bounds is one of the novelties of our work. More details will be discussed in Section \ref{sec:SkorokhodTightBounnd} and Appendix {\ref{app:skorokhod}. With all of the auxiliary processes defined and their differences, we have the following lemma, which gives a time-dependent bound on $W_1(\cL(\bx_k^A),\cL(\bx_k^C))$: \const{AtoC1} \newcommand{\AtoCOneVal}{c_{\ref{BoundCtoD1}}+2\ell \Psi_2(\bz) } \begin{lemma} \label{lem:AtoCdependent} Assume that $\cK$ is a polyhedron with $0$ in its interior. Assume that $\bx_0^A=\bx_0^C \in \cK$ and that $\bbE[\|\bx_0^A\|] \le \varsigma$. There are constants, $c_{\ref{BoundCtoD2}}$, $c_{\ref{BoundCtoD3}}$ and $c_{\ref{AtoC1}}$, such that for all $k\ge 0$, the following bound holds: \begin{align*} W_1(\cL(\bx_k^A),\cL(\bx_k^C)) \le \left( \left( c_{\ref{AtoC1}} + c_{\ref{BoundCtoD2}} \sqrt{\varsigma}\right) \eta \sqrt{k} + c_{\ref{BoundCtoD3}} \sqrt{\eta \log(4k)} \right)e^{\eta \ell k}. \end{align*} \end{lemma} \paragraph{Proof of Lemma~\ref{lem:AtoCdependent}} \input{atocProof} The proof of Lemma~\ref{lem:AtoC} is completed by showing how the time-dependent bound from Lemma~\ref{lem:AtoCdependent} can be turned into a bound that is independent of $k$. The technique used for this step is based on ideas from \citep{chau2019stochastic}, and is shown in Appendix~\ref{app:switching}. Recalling that $\bx_k^{M,0}=\bx_k^A$ and $\bx_k^{M,k+1}=\bx_k^M$ and using the triangle inequality gives: \begin{align} \nonumber \MoveEqLeft[0] W_1(\cL(\bx_k^A),\cL(\bx_k^M)) \\ \nonumber &\le \sum_{s=0}^{k} W_1(\cL(\bx_k^{M,s}),\cL(\bx_k^{M,s+1}))\\ \nonumber &\le \ \sum_{s=0}^{k} \left(W_1(\cL(\bx_k^{M,s}),\cL(\bx_k^{B,s})) + W_1(\cL(\bx_k^{B,s}),\cL(\bx_k^{M,s+1}))\right)\\ \nonumber &\overset{\textrm{Lemmas~\ref{lem:MeanBetween1}~\& ~\ref{lem:MeanBetween2}}}{\le} \sum_{s=0}^{k}2\ell\psi_2(s,\bz)\eta \sqrt{k}e^{\eta\ell k} \\ \label{eq:AtoMdependent} &\le 2\ell \Psi_2(\bz) \eta \sqrt{k}e^{\eta \ell k}. \end{align} Here $\psi_2(s,\bz)$ and $\Psi_2(\bz)$ are the terms that bound the decay of probabilistic dependence between the $\bz_k$ variables, as defined in \eqref{eq:LMixing}. Similarly, we bound \begin{align} \nonumber \MoveEqLeft[0] W_1(\cL(\bx_k^M),\cL(\bx_k^C))\\ \nonumber &\le W_1(\cL(\bx_k^M),\cL(\bx_k^D))+W_1(\cL(\bx_k^D),\cL(\bx_k^C)) \\ \label{eq:MtoCdependent} &\overset{\textrm{Lemmas}~\ref{lem:C2D}~\&~\ref{lem:M2D}}{\le} \left( (c_{\ref{BoundCtoD1}} + c_{\ref{BoundCtoD2}} \sqrt{\varsigma}) \eta \sqrt{k} + c_{\ref{BoundCtoD3}} \sqrt{\eta \log(4k)} \right) e^{\eta\ell k}. \end{align} Plugging the bounds from (\ref{eq:AtoMdependent}) and (\ref{eq:MtoCdependent}) into (\ref{eq:AtoCTriangle}) proves the lemma, with $c_{\ref{AtoC1}} = \AtoCOneVal $. \hfill$\blacksquare$ \section{Stochastic contraction analysis} \label{sec:contraction} In this Appendix, we prove Lemma~\ref{lem:convergeToStationary}. \subsection{Contraction for the reflected SDEs} We extend the analysis of standard SDEs from \cite{eberle2016reflection} to the case of reflected SDEs. The main idea of \cite{eberle2016reflection} is to construct a specialized metric over $\bbR^n$ and corresponding Wasserstein distance under which contraction rates can be computed. In the context of this paper, we only use Euclidean norm to construct the metric, whereas in \cite{eberle2016reflection}, both Euclidean and a second norm were used to construct the specilized metric. Using just one norm leads to some simplifications. Our choice of reflection term in the coupling process is also slightly different, leading to further simplifications. In the following, we firstly examine the contractivity properties of the generalized reflected SDEs and then associate the generalized process with the original process from (\ref{eq:projectedLangevin}). Let $\cK$ be a closed convex subset of $\bbR^n$ and consider a reflected stochastic differential equations of the form: \begin{equation}\label{eq:diffX} d\bx_t = H(\bx_t)dt + G d\bw_t - \bv_td\bmu(t) , \end{equation} where $G$ is an invertible $n\times n$ matrix with minimum singular value $\sigma_{\min}(G)$, $\bw_t$ is a standard Brownian motion, and $-\int_0^t \bv_sd\bmu(s)$ is a reflection term that ensures that $\bx_t\in\cK$ for all $t\ge 0$. (We are slightly abusing notation, since here $\bx_t$ denotes the solution to a general RSDE, and is not the iterates of the original algorithm from (\ref{eq:projectedLangevin}).) Following \cite{eberle2016reflection}, we construct a function $\delta: [0,+\infty) \rightarrow \bbR $ such that $\delta(0) = 0$, $\delta'(0) = 1$, $\delta'(r) >0$, and $\delta''(r) \le 0$ for all $r \ge 0 $. With these properties, it can be shown that $\delta(\|x-y\|)$ forms a metric over $\cK$. The particular metric is constructed so that the dynamics are contractive with respect to the corresponding Wasserstein distance. Assume there exists a continuous function $\kappa(r): [0, +\infty) \rightarrow \bbR$ such that for any $ x, y \in \bbR^n, x \neq y$, \begin{equation} \label{eq:oneSided} (x-y)^\top\left( H(x)-H(y)\right) \le \kappa(\|x-y\|)\|x-y\|^2. \end{equation} Also, assume that \begin{equation} \lim \sup \kappa(r) < 0. \end{equation} This implies that there is a postive constant, $R_0$, and a negative constant $\bar \kappa$, such that $ \kappa(r) \le \bar \kappa <0$ for all $r>R_0$. We choose $$R_1 = \frac{R_0}{2} + \frac{1}{2}\sqrt{R_0^2 - \frac{16 \sigma_{min}(G)^2e^{h(R_0)}}{\bar \kappa}} > R_0,$$ and define $\delta$ via the following chain of definitions: \begin{subequations} \label{eq:deltaDef} \begin{align} \label{eq:delta} \delta(r) &= \int_0^{r} \varphi(s) g(s) ds\\ \label{eq:g} g(r) &= 1- \frac{\xi}{2} \int_0^{r \wedge R_1} \Phi(s)\varphi(s)^{-1} ds \\ \label{eq:xi} \xi^{-1} &= \int_0^{R_1} \Phi(s)\varphi(s)^{-1} ds \\ \label{eq:Phi} \Phi(r) &= \int_0^r \varphi(s) ds \\ \label{eq:phi} \varphi(r) &= e^{-h(r)}\\ \label{eq:lipschitzIntegral} h(r) &= \frac{1}{2\sigma_{\min}(G)^2}\int_0^r s(\kappa(s) \vee 0) ds. \end{align} \end{subequations} In the above definition, we use the shorthand notation $a \wedge b = \min \{a,b \}$ and $a \lor b = \max \{a,b \}$. The details on the choices of $R_0$ and $R_1$ will be presented during the proof of Theorem \ref{thm:contraction_general} for the general reflection coupling related to \eqref{eq:diffX} and Corollary \ref{cor:contraction} for the specific reflection coupling related to \eqref{eq:AvecontinuousProjectedLangevin}. As discussed above, $\delta(\|x-y\|)$ is a metric. See \cite{eberle2016reflection} for details. The corresponding Wasserstein distance is defined by \begin{equation} \nonumber W_{\delta}(P,Q) = \inf_{\Gamma \in \mathfrak{C}(P,Q)} \int_{\cK \times \cK} \delta(\|x-y \|) d \Gamma(x,y) \end{equation} Here, $\mathfrak{C}$ is the couplings between $P$ and $Q$. To get an explicit form of the constant factor in Lemma~\ref{lem:convergeToStationary}, we use the following theorem, which is analogous to Corollary 2 of \cite{eberle2016reflection}. \begin{theorem} \label{thm:contraction_general} If $\bx_t^1$ and $\bx_t^2$ are two solutions to (\ref{eq:diffX}), then for all $0\le s\le t$, their laws satisfy $$ W_{\delta}(\cL(\bx_t^1),\cL(\bx_t^2))\le e^{-\tilde a (t-s)} W_{\delta}(\cL(\bx_s^1),\cL(\bx_s^2)) $$ where $\tilde a = \xi \sigma_{\min}(G)^2$. \end{theorem} \paragraph{Proof} The proof closely follows the proof of Theorem 1 from \cite{eberle2016reflection} with constraints handled similar to works in \cite{lamperski2021projected,lekang2021wasserstein_accepted}. The key is to create an explicit coupling between $\bx_t^1$ and $\bx_t^2$, which is known as a \emph{reflection coupling} \cite{lindvall1986coupling}. To define the reflection coupling, let $\btau$ be coupling time: $\btau = \inf \left\{ t \vert \bx_t^1 = \bx_t^2 \right\}$. Let $\br_t = \|\bx_t^1 -\bx_t^2\|$, $\bu_t = (\bx_t^1 -\bx_t^2)/{\br_t}$. Then the reflection coupling between $\bx_t^1$ and $\bx_t^2$ is defined by: \begin{subequations} \label{eq:reflectionCoupling} \begin{align} d\bx_t^1 &= H(\bx_t^1)dt + G d\bw_t - \bv^1_td\bmu^1(t)\\ d\bx_t^2 &= H(\bx_t^2)dt + (I - 2\bu_t \bu_t^\top \indic (t<\btau)) G d\bw_t - \bv^2_td\bmu^2(t) \end{align} \end{subequations} where $-\int_0^t \bv^1_sd\bmu^1(s)$ and $-\int_0^t \bv^2_s d\bmu^2(s)$ are reflection terms that ensure that $\bx_t^1\in\cK$ and $\bx_t^2\in\cK$ for all $t\ge 0$. The processes from (\ref{eq:reflectionCoupling}) define a valid coupling since $\int_0^T (I - 2\bu_t \bu_t^\top \indic(t<\btau)) G d\bw_t$ is a Brownian motion by L\'evy's characterization. The main idea is to show that with the specially constructed metric (\ref{eq:deltaDef}), there will be a constant $\tilde{a}$ such that $e^{\tilde{a}t}\delta(\br_t)$ is a supermartingale. Then, the definition of $W_{\delta}$ and the supermartingale property shows that \begin{align*} W_{\delta} (\cL(\bx_t^1),\cL(\bx_t^2)) \le \bbE\left[ \delta(\br_t)\right] \le e^{-\tilde{a}(t-s)} \bbE[\delta( \br_s )] \end{align*} Since this bound holds for all couplings of the laws $ \cL(\bx_s^1)$ and $\cL(\bx_s^2)$, it must hold for the optimal coupling, and so \begin{align*} W_{\delta} (\cL(\bx_t^1),\cL(\bx_t^2)) \le e^{-\tilde{a}(t-s)} W_{\delta}(\cL(\bx_s^1),\cL(\bx_s^2)), \end{align*} which is the desired conclusion. Therefore, to complete the proof, we must show that $e^{\tilde{a}t}\delta(\br_t)$ is a supermartingale, which is to ensure that this process is non-increasing on average. Recall that $\btau$ is the coupling time, so that $e^{\tilde{a}t}\delta(\br_t) =0$ for $t \ge \btau$. So we want to bound the behavior of the process for all $t <\btau$. Specifically, it is required to show that non-martingale terms of $d \left( e^{\tilde{a} t } \delta(\br_t)\right)$ are non-positive. By It\^o's formula, we have that \begin{align*} d \left( e^{\tilde{a} t } \delta( \br_t)\right) = e^{\tilde{a} t } \left( \tilde{a} \delta(r)dt + \delta'(r) d \br_t + \frac{1}{2} \delta''( r) (d \br_t)^2 \right). \end{align*} To achieve the desired differential, we have to derive the terms $d \br_t$ and $(d \br_t)^2$. \begin{align*} d \br_t &= \bu_t^\top \left( d\bx_t^1 - d \bx_t^2\right) \\ &= \bu_t^\top \left( \left( H(\bx_t^1)- H(\bx_t^2)\right) dt + 2 \bu_t \bu_t^\top G d\bw_t - \bv^1_td\bmu^1(t) + \bv^2_td\bmu^2(t) \right) \end{align*} The above equation is simplified because $(d\bx_t^1 - d \bx_t^2)^\top (\nabla^2 \br_t) (d\bx_t^1 - d \bx_t^2)=0$. Also, by assumption we have \begin{equation} (\bx_t^1-\bx_t^2)^\top\left( H(\bx_t^1)-H(\bx_t^2)\right) \le \kappa(\|\bx_t^1-\bx_t^2\|)\|\bx_t^1-\bx_t^2\|^2. \end{equation} By the definition of $\bu_t$ and the facts that $\bv_t^1\in N_{\cK}(\bx_t^1)$ and $\bv_t^2\in N_{\cK}(\bx_t^2)$ imply that $- \bu_t^\top \bv^1_td\bmu^1(t) \le 0$ and $\bu_t^\top \bv^2_td\bmu^2(t) \le 0$. It follows that and the assumption (\ref{eq:oneSided}) gives \begin{align*} d \br_t \le \kappa(r) r dt + 2 \bu_t^\top G d\bw_t. \end{align*} Now, since the terms that were dropped in the inequality have bounded variation, we have that \begin{align*} (d \br_t)^2 = 4\bu_t G G^\top \bu_t dt \ge 4 \sigma_{\min}(G)^2 dt. \end{align*} By construction $\delta'(r)\ge 0$ and $\delta''(r)\le 0$, and so It\^o's formula gives \begin{align*} d \left( e^{\tilde{a} t } \delta(\br_t)\right) &\le dt e^{\tilde{a}t } \left( \tilde{a} \delta(r) + \delta'(r)\kappa(r) r + \delta''(r) 2 \sigma_{\min}(G)^2 \right) + \bm_t\\ &= 2 \sigma_{\min}(G)^2 e^{\tilde{a} t } dt \left( \frac{\tilde{a}}{2 \sigma_{\min}(G)^2} \delta(r) + \frac{k(r) r}{2 \sigma_{\min}(G)^2} \delta'(r) +\delta''(r) \right) + \bm_t, \end{align*} where $\bm_t$ denotes a local martingale. So it suffices to pick certain $\tilde{a}$ and $R_1$ to ensure that for all $r \ge 0 $, the following holds: \begin{align} \label{eq:driftTerm} \frac{\tilde{a}}{2 \sigma_{\min}(G)^2} \delta(r) + \frac{\kappa(r) r}{2 \sigma_{\min}(G)^2} \delta'(r) +\delta''(r) \le 0. \end{align} Recall that \begin{subequations} \begin{align*} \delta''(r) &= \varphi'(r)g(r) + g'(r)\varphi(r) \\ &= -\frac{1}{2\sigma_{\min} (G)^2} r(\kappa(r) \vee 0 )\delta'(r) -\frac{\xi}{2} \Phi(r) \indic(r<R_1). \end{align*} \end{subequations} So if we set $\tilde{a} = \xi \sigma_{\min}(G)^2$, then $\delta(r)\le \Phi(r)$ implies that (\ref{eq:driftTerm}) holds for all $r<R_1$. The remaining work is to find a sufficient condition under which (\ref{eq:driftTerm}) holds when $r \ge R_1$. Recall that we assume that there exists $0<R_0$, such that $k(r) <0$ for all $r\ge R_0$. So, if we choose $R_1 > R_0$, we have for all $r\ge R_1$ that $\varphi(r) = \varphi(R_0)$. By definition, $g(r) = \frac{1}{2}$ for all $r\ge R_1$, and so we must also have $\delta'(r) = \frac{1}{2} \varphi(R_0)$. Therefore, for $r\ge R_1$, (\ref{eq:driftTerm}) becomes \begin{align*} \label{driftInft} \frac{\tilde{a}}{2 \sigma_{\min}(G)^2} \delta(r) + \frac{\kappa(r) r}{2 \sigma_{\min}(G)^2} \frac{1}{2} \varphi(R_0) \le 0. \end{align*} So, a sufficient condition for (\ref{eq:driftTerm}) to hold when $r \ge R_1$ is given by: \begin{subequations} \label{eq:sufficientarguement} \begin{align} &\frac{\tilde a \delta(r)}{2 \sigma_{\min}(G)^2} + \frac{\kappa(r)r}{2 \sigma_{\min}(G)^2} \frac{1}{2} \varphi(R_0) \le 0 \\ \iff & \tilde a \delta(r) + \kappa(r)r \frac{1}{2} \varphi(R_0) \le 0 \\ \iff & \kappa(r)r \frac{1}{2} \varphi(R_0) \le -\tilde a \delta(r) \\ \iff & \kappa(r)r \frac{1}{2} \varphi(R_0) \le -\xi \sigma_{min}(G)^2 \delta(r) \\ \iff & \kappa(r)r \frac{1}{2} \varphi(R_0) \le -\frac{\sigma_{min}(G)^2 }{\int_0^{R_1} \Phi(s)\varphi(s)^{-1} ds}\delta(r) \label{eq:sufficient1}\\ \impliedby & \kappa(r)r \frac{1}{2} \varphi(R_0) \le -\frac{\sigma_{min}(G)^2 }{(R_1-R_0)\Phi(R_1) \varphi(R_0)^{-1} /2} \delta(r) \label{eq:sufficient2}\\ \impliedby & \kappa(r)r \frac{1}{2} \varphi(R_0) \le -\frac{\sigma_{min}(G)^2 }{(R_1-R_0)\Phi(R_1) \varphi(R_0)^{-1} /2} r \label{eq:sufficient3}\\ \iff & \kappa(r) \le -\frac{4 \sigma_{min}(G)^2 }{(R_1-R_0)\Phi(R_1) } \\ \iff & (R_1-R_0)\Phi(R_1) \ge -\frac{4 \sigma_{min}(G)^2 }{\kappa(r)} \label{eq:sufficient4}\\ \impliedby & (R_1-R_0)R_1 e^{-h(R_0)} \ge -\frac{4 \sigma_{min}(G)^2 }{\kappa(r)} \label{eq:sufficient5} \\ \impliedby & (R_1-R_0)R_1 e^{-h(R_0)} \ge -\frac{4 \sigma_{min}(G)^2 }{\bar \kappa} \label{eq:sufficient6} \end{align} \end{subequations} Note (\ref{eq:sufficient1}) is implied by (\ref{eq:sufficient2}) because for $r>R_0$, $\varphi(r) = \varphi(R_0)$, therefore, $\Phi(r) = \Phi(R_0) + \varphi(R_0)(r-R_0)$ which gives \begin{align} \nonumber \int_0^{R_1} \Phi(s)\varphi(s)^{-1} ds & \ge \int_{R_0}^{R_1}\Phi(s)\varphi(s)^{-1} ds \\ \nonumber &= \int_{R_0}^{R_1} \left( \Phi(R_0) +\varphi(R_0)(s-R_0) \right) \varphi(R_0)^{-1} ds \\ \nonumber &= \Phi(R_0)\varphi(R_0)^{-1}(R_1-R_0) + \frac{(R_1-R_0)^2}{2} \\ \nonumber & \ge \frac{\Phi(R_0)\varphi(R_0)^{-1}(R_1-R_0)}{2} + \frac{(R_1-R_0)^2}{2}\\ \nonumber &= (R_1-R_0) \left( \Phi(R_0) + (R_1-R_0)\varphi(R_0)\right) \varphi(R_0)^{-1}/2\\ &= (R_1-R_0)\Phi(R_1) \varphi(R_0)^{-1} /2. \label{eq:xiInverseBound} \end{align} Also, (\ref{eq:sufficient2}) is implied by (\ref{eq:sufficient3}) because $\delta(r) < r$. From (\ref{eq:sufficient4}) to (\ref{eq:sufficient5}), we use: \begin{align} \label{eq:PhiBound} \nonumber \Phi(R_1) &= \int_0^{R_1} \varphi(s) ds\\ \nonumber &= \int_0^{R_1} e^{-h(s)}ds \\ \nonumber &\ge \int_0^{R_1} e^{-h(R_0)}ds \\ &= R_1 e^{-h(R_0)}. \end{align} The implication (\ref{eq:sufficient6}) $\implies$ (\ref{eq:sufficient5}) arises because of the assumption that $ \kappa(r) \le \bar \kappa <0$ for all $r>R_0$. Therefore, (\ref{eq:driftTerm}) will hold all $r\ge R_1$, as long as $R_1$ satisfies (\ref{eq:sufficient6}). The smallest such $R_1$ is given by \begin{equation} \label{R_1} R_1 = \frac{R_0}{2} + \frac{1}{2}\sqrt{R_0^2 - \frac{16 \sigma_{min}(G)^2e^{h(R_0)}}{\bar \kappa}} > R_0. \end{equation} \hfill$\blacksquare$ We choose our reflection term as $(I - 2\bu_t \bu_t^\top \indic (t<\btau)) G d\bw_t$, while \cite{eberle2016reflection} uses $G(I - 2\be_t \be_t^\top \indic (t<\btau)) d\bw_t$, with $\be_t = \frac{G^{-1}(\bx_t^1-\bx_t^2)}{\|G^{-1}(\bx_t^1-\bx_t^2)\|}$. Our form of the reflection term leads to mild simplification of some formulas. Now we specialize the result from the previous theorem to the specific case of this paper: \begin{corollary} \label{cor:contraction} If $\bx_t^1$ and $\bx_t^2$ are two solutions to \eqref{eq:AvecontinuousProjectedLangevin}, then for all $0\le s\le t$, their laws satisfy $$ W_{\delta}(\cL(\bx_t^1),\cL(\bx_t^2))\le e^{-\tilde a (t-s)} W_{\delta}(\cL(\bx_s^1),\cL(\bx_s^2)) $$ where $\tilde a = \xi \frac{2\eta}{\beta}$, $R_0=R$, and $R_1 = \frac{R}{2} +\frac{1}{2} \sqrt{R^2 + \frac{32}{\mu \beta}e^{\frac{\beta \ell R^2}{8}}}$ in the construction of $\delta$. \end{corollary} \paragraph{Proof} We can see that \eqref{eq:AvecontinuousProjectedLangevin} is a special case of \eqref{eq:diffX} with \begin{align*} H(x) &= -\eta \nabla_x \bar f(x) \\ G &= \sqrt{\frac{2\eta}{\beta}}I . \end{align*} Since we assume that $\bar f$ is $\ell$-Lipschitz and convex outside a ball with radius $R$, we have that \eqref{eq:oneSided} holds with $\kappa(s)=\eta \ell$ for $0\le s<R$ and $\kappa(s) = - \eta \mu$ for $s\ge R$. Therefore, we can pick $R_0 = R$ to construct the metric \eqref{eq:deltaDef}. Now, $\sigma_{\min}(G)^2 =\frac{2\eta}{\beta}$ implies that $\tilde a = \xi \frac{2\eta}{\beta}$. Furthermore, the choice of $\kappa(r)$ implies that $h(R_0)=h(R) = \frac{\beta \ell R^2}{8}$. The choice of $\kappa(r)$ also implies that $\bar\kappa = -\eta \mu$. Thus, the form of $R_1$ is given by plugging terms into \eqref{R_1}. \hfill$\blacksquare$ \begin{corollary} \label{cor:contractionW1} If $\bx_t^1$ and $\bx_t^2$ are two solutions to \eqref{eq:AvecontinuousProjectedLangevin}, then for all $0\le s\le t$, their laws satisfy $$ W_{1}(\cL(\bx_t^1),\cL(\bx_t^2))\le 2 \varphi(R)^{-1}e^{-\tilde a (t-s)} W_{1}(\cL(\bx_s^1),\cL(\bx_s^2)). $$ \end{corollary} \paragraph{Proof} From the special constructed of $\delta$, we that $\delta'(r)$ is monotonically decreasing, and also $\delta'(r)=\delta(R_1)$ for all $r\ge R_1$. Furthermore, $\delta(r)=\int_0^r \delta'(s) ds \ge \delta'(r) \int_0^r ds = r\delta'(r)$. Thus, for all $r\ge 0$, the following bounds hold: $$\delta'(R_1)r \le \delta'(r)r \le \delta(r) \le r$$ These bounds are now used to relate the $W_{\delta}$ and $W_1$ distances: \begin{align} \label{eq:W1Upper} \delta'(R_1) W_1(\cL(\bx_t^1),\cL(\bx_t^2))\le W_{\delta}(\cL(\bx_t^1),\cL(\bx_t^2)) \le W_1(\cL(\bx_t),\cL(\by_t)). \end{align} In particular, \begin{align} \label{eq: deltaR1} \delta'(R_1) = \varphi(R_1)g(R_1) = \frac{1}{2} \varphi(R). \end{align} Plugging (\ref{eq: deltaR1}) into the first inequality of (\ref{eq:W1Upper}) gives \begin{align} W_1(\cL(\bx_t^1),\cL(\bx_t^2))\le 2 \varphi(R)^{-1} W_{\delta}(\cL(\bx_t^1),\cL(\bx_t^2)) \end{align} And combining with Corollary \ref{cor:contraction} gives \begin{align} W_1(\cL(\bx_t^1),\cL(\bx_t^2))\le 2 \varphi(R)^{-1} e^{-\tilde a (t-s)} W_{\delta}(\cL(\bx_s^1),\cL(\bx_s^2)) \end{align} Finally, utilizing the second inequality of \eqref{eq:W1Upper} gives the desired result. \hfill$\blacksquare$ \subsection{Proof of Lemma~\ref{lem:convergeToStationary}} \label{ss:proofLemmaconvergeToStationary} In Lemma~\ref{lem:gibbs} of Appendix~\ref{sec:gibbs}, we showed that the Gibbs distribution, $\pi_{\beta \bar f}$, defined in (\ref{eq:gibbs}) is invariant for the dynamics of $\bx_t^C$. Thus, setting $\cL(\bx_t^1)=\cL(\bx_t^C)$ and $\cL(\bx_t^2)=\pi_{\beta\bar f}$ in Corollary ~\ref{cor:contractionW1} gives \begin{equation} \label{eq:convergeW1} W_{1}(\cL(\bx_t^C),\pi_{\beta \bar f})\le 2 \varphi(R)^{-1} e^{-\tilde a t} W_{1}(\cL(\bx_0^C),\pi_{\beta \bar f}). \end{equation} Let $\by$ be distributed according to $\pi_{\beta \bar f}$. For any joint distribution over $(\bx_0^C,\by)$ whose marginals are $\cL(\bx_0^C)$ and $\pi_{\beta \bar f}$, we have that \begin{align} \nonumber W_{1}(\cL(\bx_0^C),\pi_{\beta \bar f}) \nonumber &\le \nonumber \bbE[ \|\bx_0^C -\by \| ] \\ \nonumber &\le \sqrt{\bbE[ \|\bx_0^C -\by \|^2 ]}\\ \nonumber &\le \sqrt{\bbE[ 2\|\bx_0^C\|^2 +2\|\by \|^2 ]}\\ \nonumber &= \sqrt{2\bbE[ \|\bx_0^C\|^2 ] +2\bbE[ \|\by \|^2 ]}\\ \nonumber &\le \sqrt{ 2\varsigma + 2\left(\varsigma+ \frac{1}{\mu} c_{\ref{LyapunovConst}}\right)}\\ &\le \sqrt{\frac{2}{\mu} c_{\ref{LyapunovConst}}} + 2 \sqrt{\varsigma}. \label{eq:WrhoBound} \end{align} The second to last inequality uses Lemma~\ref{lem:continuousBound}. Combining \eqref{eq:convergeW1}, \eqref{eq:WrhoBound} shows that \begin{equation*} W_1(\cL(\bx_t^C),\pi_{\beta \bar f}) \le 2 \varphi(R)^{-1} e^{-\tilde a t} \left( \sqrt{\frac{2}{\mu} c_{\ref{LyapunovConst}}} + 2\sqrt{\varsigma}\right). \end{equation*} Thus, the lemma is proved and the constants are given by: \begin{subequations} \label{eq:contractionConstants} \begin{align} a &= \frac{2\xi}{\beta}\\ c_{\ref{contraction_const1}} &= \contractionConstOneVal \\ c_{\ref{contraction_const2}} &= \contractionConstTwoVal \end{align} where $\xi$ is given in \eqref{eq:xi}. \end{subequations} \hfill$\blacksquare$ \section{Limitations} \label{sec:limitations} Our current work is restricted to polyhedral sets. In particular, Theorem~\ref{thm:skorokhodConst} requires the polyhedral assumption, and it is unclear if Skorokhod problems satisfy similar bounds on any more general classes of constraint sets. As a result, it is unclear if our main results on projected Langevin algorithms can be extended beyond polyhedra. We also only considered constant step sizes, but in many cases decreasing or adaptive step sizes are used in practice. Finally, the dependence of the external data variables is limited to the class of L-mixing processes, which does not include all the real-world dependent data streams. Furthermore, it can be difficult to check that a data stream is L-mixing without requiring strong assumptions or knowledge about how it is generated. \section{Conclusions and future work} In this paper, we derived non-asymptotic bounds in 1-Wasserstein distance for a constrained Langevin algorithm applied to non-convex functions with dependent data streams satisfying L-mixing assumptions. Our convergence bounds match the best known bounds of the unconstrained case up to logarithmic factors, and improve on all existing bounds from the constrained case. The tighter bounds are enabled by a constructive and explicit bound on Skorokhod solutions, which builds upon an earlier non-constructive bound from \citep{dupuis1991lipschitz}. The analysis of L-mixing variables followed by a comparatively simple averaging method. Future work will examine extensions beyond polyhedral domains, higher-order Langevin algorithms, alternative approaches to handling constraints, such as mirror descent, and more sophisticated step size rules. More specifically, future work will examine whether the projection step, and thus Skorkhod problems, can be circumvented by utilizing different algorithms, such as those based on proximal LMC \citep{brosse2017sampling}. Additionally, applications to real-world problems such as time-series analysis and adaptive control will be studied. \section{Acknowledgments} This work was supported in part by NSF CMMI-2122856. The authors thank the reviewers for helpful suggestions for improving the paper. \section*{Checklist} \begin{enumerate} \item For all authors... \begin{enumerate} \item Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? \answerYes{} \item Did you describe the limitations of your work? \answerYes{See Section~\ref{sec:limitations}.} \item Did you discuss any potential negative societal impacts of your work? \answerNA{} \item Have you read the ethics review guidelines and ensured that your paper conforms to them? \answerYes{} \end{enumerate} \item If you are including theoretical results... \begin{enumerate} \item Did you state the full set of assumptions of all theoretical results? \answerYes{See Section~\ref{ss:L-mixingAssumption} and Section~\ref{ss:assumptions}.} \item Did you include complete proofs of all theoretical results? \answerYes{See Section~\ref{sec:results} and Appendix~\ref{app:skorokhod}, \ref{sec:gibbs}, \ref{app:bounded}, \ref{sec:contraction}, \ref{app:aveLemmas}, \ref{app:disBounds}, \ref{app:switching}.} \end{enumerate} \item If you ran experiments... \begin{enumerate} \item Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? \answerNA{} \item Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? \answerNA{} \item Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? \answerNA{} \item Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? \answerNA{} \end{enumerate} \item If you are using existing assets (e.g., code, data, models) or curating/releasing new assets... \begin{enumerate} \item If your work uses existing assets, did you cite the creators? \answerNA{} \item Did you mention the license of the assets? \answerNA{} \item Did you include any new assets either in the supplemental material or as a URL? \answerNA{} \item Did you discuss whether and how consent was obtained from people whose data you're using/curating? \answerNA{} \item Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? \answerNA{} \end{enumerate} \item If you used crowdsourcing or conducted research with human subjects... \begin{enumerate} \item Did you include the full text of instructions given to participants and screenshots, if applicable? \answerNA{} \item Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? \answerNA{} \item Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? \answerNA{} \end{enumerate} \end{enumerate} \section*{Corrections for the Main Paper} During edits of the supplementary material after the submission of the main paper, a few minor errors were flagged. The result is that some of the constant terms in the results changed. In each case, the originally stated results actually remain true, but the work in the supplementary material derives slightly different constant factors. The differences are highlighted below. \paragraph*{Correction of Theorem~\ref{thm:nonconvexLangevin} and Lemma~\ref{lem:AtoC}} Theorem~\ref{thm:nonconvexLangevin} remains true. In the original paper, we required that $\eta\le \min\left\{\frac{1}{4},\frac{\mu}{8\ell^2} \right\}$. In the supplemental material, we show that the theorem holds as long as $\eta$ satisfies the slightly weaker bound of $\eta\le \min\left\{\frac{1}{4},\frac{\mu}{4\ell^2} \right\}$. Similarly, Lemma~\ref{lem:AtoC}, which is used to prove Theorem~\ref{thm:nonconvexLangevin}, required $\eta\le \min\left\{\frac{1}{4},\frac{\mu}{8\ell^2} \right\}$ in the original submission. In the supplemental material, we show that the lemma holds as long as $\eta\le \min\left\{\frac{1}{4},\frac{\mu}{4\ell^2} \right\}$. \paragraph*{Correction of Lemma~\ref{lem:MeanBetween1}} {\it For all $s\ge 0$ and all $k\ge 0$, the following bound holds: \begin{equation*} \nonumber W_1\left(\cL(\bx_k^{M,s}),\cL(\bx_k^{B,s}) \right) \le \bbE[\|\bx_k^{M,s}-\bx_k^{B,s}\|] \le 2\ell \psi_2(s,\bz)\eta \sqrt{k} . \end{equation*} } In the submission, Lemma~\ref{lem:MeanBetween1} has the looser upper bound $$ W_1\left(\cL(\bx_k^{M,s}),\cL(\bx_k^{B,s}) \right) \le \bbE[\|\bx_k^{M,s}-\bx_k^{B,s}\|] \le 2\ell \left(\psi_2(s,\bz)+\psi_2(s+1,\bs)\right)\eta \sqrt{k} . $$ The bound from Lemma~\ref{lem:MeanBetween1} was used to derive Lemma~\ref{lem:MeanBetween2}, and as a result this must be corrected to: \paragraph*{Correction of Lemma~\ref{lem:MeanBetween2}} {\it For all $s\ge 0$ and all $k\ge 0$, the following bound holds \begin{multline} \nonumber W_1\left(\cL(\bx_k^{B,s}),\cL(\bx_k^{M,s+1})\right) \le \bbE[\|\bx_k^{B,s}-\bx_k^{M,s+1}\|] \le 2\ell \psi_2(s,\bz)\eta \sqrt{k} \left( e^{\eta k \ell}-1\right). \end{multline} } The bounds from Lemma~\ref{lem:MeanBetween1} and \ref{lem:MeanBetween2} were used to prove Lemma~\ref{lem:AtoCdependent}. The proof with the corrected bounds is given below. \paragraph*{Corrected proof of Lemma~\ref{lem:AtoCdependent}} Recalling that $\bx_k^{M,0}=\bx_k^A$ and $\bx_k^{M,k+1}=\bx_k^M$ and using the triangle inequality gives: \begin{align} \nonumber \MoveEqLeft[0] W_1(\cL(\bx_k^A),\cL(\bx_k^M)) \\ \nonumber &\le \sum_{s=0}^{k} W_1(\cL(\bx_k^{M,s}),\cL(\bx_k^{M,s+1}))\\ \nonumber &\le \ \sum_{s=0}^{k} \left(W_1(\cL(\bx_k^{M,s}),\cL(\bx_k^{B,s})) + W_1(\cL(\bx_k^{B,s}),\cL(\bx_k^{M,s+1}))\right)\\ \nonumber &\overset{\textrm{Lemmas~\ref{lem:MeanBetween1}~\& ~\ref{lem:MeanBetween2}}}{\le} \sum_{s=0}^{k}2\ell\psi_2(s,\bz)\eta \sqrt{k}e^{\eta\ell k} \\ \label{eq:AtoMdependent} &\le 2\ell \Psi_2(\bz) \eta \sqrt{k}e^{\eta \ell k}. \end{align} Here $\psi_2(s,\bz)$ and $\Psi_2(\bz)$ are the terms that bound the decay of probabilistic dependence between the $\bz_k$ variables, as defined in \eqref{eq:LMixing}. Similarly, we bound \begin{align} \nonumber \MoveEqLeft[0] W_1(\cL(\bx_k^M),\cL(\bx_k^C))\\ \nonumber &\le W_1(\cL(\bx_k^M),\cL(\bx_k^D))+W_1(\cL(\bx_k^D),\cL(\bx_k^C)) \\ \label{eq:MtoCdependent} &\overset{\textrm{Lemmas}~\ref{lem:C2D}~\&~\ref{lem:M2D}}{\le} \left( (c_{\ref{BoundCtoD1}} + c_{\ref{BoundCtoD2}} \sqrt{\varsigma}) \eta \sqrt{k} + c_{\ref{BoundCtoD3}} \sqrt{\eta \log(4k)} \right) e^{\eta\ell k}. \end{align} Plugging the bounds from (\ref{eq:AtoMdependent}) and (\ref{eq:MtoCdependent}) into (\ref{eq:AtoCTriangle}) proves the lemma, with $c_{\ref{AtoC1}} = \AtoCOneVal $. \hfill$\blacksquare$ \section{Background and results on Skorokhod problems} \label{app:skorokhod} In this section, we will show that when the domain is a polyhedron, rather tight bounds on solutions to Skorokhod problems can be obtained. \subsection{Background on Skorokhod problems} Let $\cK$ be a convex subset of $\bbR^n$ with non-empty interior. Let $y:[0,\infty)\to \bbR^n$ be a trajectory which is right-continuous with left limits and has $y_0\in \cK$. For each $x\in\bbR^n$, let $N_{\cK}(x)$ be the normal cone at $x$. Then the functions $x_t$ and $\phi_t$ solve the \emph{Skorokhod problem} for $y_t$ if the following conditions hold: \begin{itemize} \item $x_t = y_t+\phi_t \in \cK$ for all $t\in [0,T)$. \item The function $\phi$ has the form $\phi(t) = -\int_0^t v_s d\mu(s)$, where $\|v_s\|\in \{0,1\}$ and $v_s\in N_{\cK}(x_s)$ for all $s\in [0,T)$, while the measure, $\mu$, satisfies $\mu([0,T))<\infty$ for any $T>0$. \end{itemize} It can be shown that if a solution exists, it is unique. See \cite{tanaka1979stochastic}. However, existence of solutions typically relies on extra requirements beyond just convexity. For example, \cite{tanaka1979stochastic} showed the existence of solutions in the case that $y$ is continuous and $\cK$ is compact. Below, we will utilize results from \cite{anulova1991diffusional} to prove existence in the case that $\cK$ is a polyhedron. Whenever solutions are guaranteed to exist, uniqueness implies that we may view the Skorokhod solution as a mapping: $x=\cS(y)$. \subsection{Existence of solutions over polyhedra} The following is a consequence of Theorem 4 from \cite{anulova1991diffusional}. \begin{lemma} \label{lem:skorokhodExistence} Let $\cK$ be a polyhedron with non-empty interior. If $y_t$ is a trajectory in $\bbR^n$ which is right-continuous with left-limits, then $x=\cS(y)$ exists, is unique, and is right-continuous with left-limits. \end{lemma} \paragraph{Proof} To verify the conditions of Theorem 4 from \cite{anulova1991diffusional}, we just need to show that $\cK$ satisfies condition $\beta$ of that paper, which states that there exist constants $\epsilon >0 $ and $\bar \delta >0$ such that for all $x\in\partial \cK$, there exist $x_0\in \cK$ such that $\|x-x_0\|\le \bar \delta$ and $\{y|\|y-x_0\| <\epsilon\} \subset \cK$. We will show how to construct $\epsilon$, $\bar \delta$, and we will see that a suitable vector, $x_0$, exists for any $x\in\cK$. Note that since $\cK$ is a polyhedron, there are vectors $u_1,\ldots,u_p$ such that $x\in\cK$ if and only if it can be expressed as $$ x = \sum_{i=1}^k \lambda_i u_i + \sum_{i=k+1}^p \lambda_i u_i $$ with $\lambda_i\ge 0$ for $i=1,\ldots,p$ and $\sum_{i=1}^k\lambda_i = 1$. See~\cite{rockafellar2015convex}. (If $p=k$, then $\cK$ is a compact polytope, while if $k=0$, then $\cK$ is a convex cone.) Let $x^\star$ be an arbitrary point in the interior of $\cK$ and let $\epsilon >0$ be such that $\{y|\|y-x^\star\| <\epsilon\} \subset\cK$. Pick $\bar \delta$ such that $\|u_i-x^\star\| \le \bar\delta$ for $i=1,\ldots,k$. For any $x=\sum_{i=1}^p \lambda_i u_i \in \cK$, let $x_0 = x^\star + \sum_{i=k+1}^p \lambda_i u_i$. It follows that \begin{align*} \|x-x_0\| &= \left\| \sum_{i=1}^k \lambda_i u_i - x^\star \right\| \\ &= \left\| \sum_{i=1}^k \lambda_i (u_i - x^\star) \right\| \\ &\le \sum_{i=1}^k \lambda_i \|u_i-x^\star\| \\ &\le \bar \delta. \end{align*} Also, if $y\in \{y|\|y-x_0\| < \epsilon\}$, then there is a vector, $v$, with $\|v\|< \epsilon$, such that \begin{align*} y = x_0 + v = (x^\star + v) + \sum_{i=k+1}^p \lambda_i u_i. \end{align*} Now note that $x^\star + v\in \cK$, so there must be numbers $\lambda'_i\ge 0$ such that $\sum_{i=1}^k\lambda_i'=1$ and $x^\star + v = \sum_{i=1}^p \lambda_i' u_i$. It follows that $$ y = \sum_{i=1}^p \lambda_i' u_i + \sum_{i=k+1}^p \lambda_i u_i = \sum_{i=1}^k \lambda_i' u_i + \sum_{i=k+1}^p (\lambda_i+\lambda_i')u_i \in \cK. $$ \hfill$\blacksquare$ \subsection{Proof of Theorem~\ref{thm:skorokhodConst}} \label{app:diamProof} In this subsection, we provide a short proof of Theorem \ref{thm:skorokhodConst}. A supporting Lemma is firstly presented to complete the proof. The technical work in this subsection relies on some notation about the vectors defining $\cK$ from (\ref{eq:halfspaceRep}). Let $A = \begin{bmatrix} a_1 & \cdots a_m\end{bmatrix}^\top$ be the matrix whose rows are the $a_i^\top$ vectors. For $\cI\subset \{1,\ldots,m\}$ let $A_{\cI}$ be the matrix whose rows are $a_i^\top$ for $i\in\cI$. Let $\begin{bmatrix} W_{\cI} & V_{\cI}\end{bmatrix}$ be an orthogonal matrix such that $\cN(A_{\cI})=\cR(W_{\cI})$. Here $\cN(A_{\cI})$ denotes the null space of $A_{\cI}$ and $\cR(W_{\cI})$ denotes the range space of $W_{\cI}$. Let $P_{\cI}=W_{\cI}W_{\cI}^\top$, which is the orthogonal projection onto $\cN(A_{\cI})$. We will use the convention that $A_{\emptyset}$ is a $1\times n$ matrix of zeros, so that $\cN(A_{\emptyset})=\bbR^n$, and thus $P_{\emptyset}=I$. The following lemma is a quantitative and explicit version of Theorem 2.1 of \cite{dupuis1991lipschitz}: \const{diamBound} \newcommand{\diamBoundVal}{6 \left(\frac{1}{\alpha}\right)^{\rank(A)/2}} \begin{lemma} \label{lem:boundingExistence} If $\cK$ is a polyhedron defined by (\ref{eq:halfspaceRep}), then there is a compact, convex set $\cB$ with $0\in\mathrm{int}(\cB)$ such that if $z\in\partial \cB$, $v\in N_{\cB}(z)$, and $a_j$ is a unit vector from (\ref{eq:halfspaceRep}) with $a_j^\top v\ne 0$, then \begin{enumerate} \item \label{item:largeProduct} $|a_j^\top z| \ge 1$ \item \label{item:sameSign} $\mathrm{sign}(a_j^\top z) = \mathrm{sign}(a_j^\top v)$. \end{enumerate} Furthermore, the diameter of $\cB$ is at most $c_{\ref{diamBound}}$, defined by $$ c_{\ref{diamBound}} = 6 \left(\frac{1}{\alpha}\right)^{\rank(A)/2} $$ where \begin{align*} \alpha=\frac{1}{2}\min\left\{\|P_{\cI} a_j\|^2 \middle| P_{\cI}a_j\ne 0, \: \cI\subset \{1,\ldots,m\},\: j\in\{1,\ldots,m\} \right\}, \end{align*} and $\alpha \in (0,1/2] $. \end{lemma} A non-constructive proof of the existence of $\cB$ was given in \cite{dupuis1991lipschitz}. While that paper shows that $\cB$ is compact, it does not quantitatively bound its diameter. The diameter of $\cB$ is precisely the quantity that is used to bound the difference between Skorokhod solutions. \paragraph*{Proof of Theorem \ref{thm:skorokhodConst}.} Theorem 2.2 of \cite{dupuis1991lipschitz} shows that if a compact convex set with $0\in\mathrm{int}(B)$ satisfying conditions \ref{item:largeProduct} and \ref{item:sameSign} exists, then $$ \sup_{0\le s \le t} \|x_s-x_s'\| \le (\mathrm{diameter}(\cB)+1) \sup_{0\le s\le t} \|y_s-y'_s\|. $$ The result now follows since $c_{\ref{diamBound}}$ is an upper bound on the diameter of the set $\cB$ constructed in Lemma~\ref{lem:boundingExistence}. \hfill$\blacksquare$ \paragraph*{Proof of Lemma \ref{lem:boundingExistence}.} We will focus on constructing a compact, convex $\cB$ with $0\in\mathrm{int}(\cB)$ which satisfies condition \ref{item:largeProduct}. Lemma 2.1 of \cite{dupuis1991lipschitz} shows that condition \ref{item:sameSign} must also hold. (Note that the sign is opposite of what appears in \cite{dupuis1991lipschitz}, because that paper examines inward normal vectors, while we are examining outward normal vectors.) We will find numbers $\epsilon \in (0,1)$ and $r_{\cI}\in (0,1)$ for $\cI\subset \{1,\ldots,m\}$ such that $$ \cB = \{x | \|P_{\cI} x\| \le \epsilon^{-1} r_{\cI} \textrm{ for } \cI \in \{1,\ldots,m\} \} $$ has the desired properties. By construction, $\cB$ is compact and convex, $0\in\mathrm{int}(\cB)$, and the diameter is at most $2\epsilon^{-1} r_{\emptyset} < 2\epsilon^{-1}$, since every $x\in\cB$ satisfies $\|P_{\emptyset}x\|=\|x\|\le \epsilon^{-1}r_{\emptyset}$. Furthermore, $\cB = \epsilon^{-1} \hat\cB$, where $$ \hat \cB = \{x | \|P_{\cI} x\| \le r_{\cI} \textrm{ for } \cI \subset \{1,\ldots,m\}\}. $$ A similar construction for $\cB$ was utilized in \cite{dupuis1991lipschitz}. The main distinction is that this proof will give an explicit procedure for determining the values of $\epsilon$ and $r_{\cI}$. Note that $z\in \hat \cB$ if and only if $\epsilon^{-1}z \in \cB$, $z\in\partial \hat\cB$ if and only if $\epsilon^{-1}z\in \partial \cB$, and $N_{\hat\cB}(z)=N_{\cB}(\epsilon^{-1} z)$. Thus, Condition \ref{item:largeProduct} holds for $\cB$ if and only if \begin{equation} \label{eq:revisedImplication} z\in\partial\hat\cB,\: v\in N_{\hat\cB}(z),\: \textrm{ and } a_j^\top v\ne 0 \implies |a_j^\top z|\ge \epsilon>0. \end{equation} Note that if $x\in\partial\hat \cB$, then \begin{align} \nonumber N_{\hat\cB}(x) &= \mathrm{cone}\{P_{\cI}x | \|P_{\cI}x\|=r_{\cI}\} \\ \label{eq:normalConeRep} &= \left\{\sum_{\{\cI | \|P_{\cI}x\|=r_{\cI} \}} \lambda_{\cI}P_{\cI}x \middle| \lambda_{\cI}\ge 0 \right\}. \end{align} See Corollary 23.8.1 of \cite{rockafellar2015convex}. The representation in (\ref{eq:normalConeRep}) implies that if $x\in\partial\hat\cB$, $v\in N_{\cB}(x)$, and $a_j^\top v\ne 0$, then there must be a set $\cI$ such that, $\|P_{\cI}x\|=r_{\cI}$, $\lambda_{\cI}>0$, and $a_j^\top P_{\cI}\ne 0$. We will choose $\epsilon$ such that for all $\cI$ and $j$ with $P_{\cI}a_j\ne 0$, $\epsilon$ is a lower bound on the optimal value of the following (non-convex) optimization problem: \begin{subequations} \label{eq:epsDefProblem} \begin{align} &\min_{x} && |a_j^\top x| \\ &\textrm{subject to} && \|P_{\cI}x\| \ge r_{\cI} \\ &&& \|P_{\cI\cup\{j\}} x\| \le r_{\cI\cup \{j\}} \\ &&& \|x\|\le 1. \end{align} \end{subequations} By construction, if $x\in\partial\hat \cB$, $v\in N_{\cB}(x)$, and $a_j^\top v\ne 0$, there must be some $\cI$ such that $x$ is feasible for (\ref{eq:epsDefProblem}). As a result, we must have that $|a_j^\top x|\ge \epsilon$. Thus, the implication from (\ref{eq:revisedImplication}) will hold, provided that the values of $r_{\cI}$ can be chosen so that all of the problems of the form (\ref{eq:epsDefProblem}) have strictly positive optimal values. The rest of the proof proceeds as follows. First we derive conditions on $r_{\cI}$ that ensure that the problems from (\ref{eq:epsDefProblem}) always have positive optimal values. Next, we compute specific values of $r_{\cI}$ that satisfy these conditions. Finally, we use those values of $r_{\cI}$ to compute $\epsilon$, the desired lower bound on the optimal value of (\ref{eq:epsDefProblem}). We now assume that $r_{\cI},r_{\cI\cup\{j\}}\in (0,1)$ and derive sufficient conditions to make the optimal value in (\ref{eq:epsDefProblem}) strictly positive. To derive the optimal value of (\ref{eq:epsDefProblem}), we need a few basic facts: \begin{itemize} \item If $\cI\subset \cJ$, then $P_{\cJ}P_{\cI}=P_{\cJ}$ and $P_{\cI}P_{\cJ}=P_{\cJ}$. \item The matrix $\begin{bmatrix}W_{\cI\cup\{j\}} & \frac{P_{\cI}a_j}{\|P_{\cI}a_j\|} & V_{\cI} \end{bmatrix}$ is orthogonal. \end{itemize} First we show that $\cI\subset \cJ$ implies that $P_{\cJ}P_{\cI}=P_{\cJ}$. Symmetry of the projection matrices would then imply that $P_{\cI}P_{\cJ}=P_{\cJ}$. Note that $P_{\cI}=I-V_{\cI}V_{\cI}^\top$, where $$ \cR(V_{\cI}) = \cR(P_{\cI})^{\perp} = \cN(A_{\cI})^\perp \subset \cN(A_{\cJ})^\perp = \cR(P_{\cJ})^\perp. $$ It follows that $P_{\cJ}V_{\cI}=0$ and thus $P_{\cJ}P_{\cI}=P_{\cJ}$. Now we will show that $P_{\cI} a_j \in \cR(P_{\cI})\setminus \cR(P_{\cI\cup\{j\}})$. By construction, $P_{\cI}a_j \in \cR(P_{\cI})$. Also, we have that $P_{\cI\cup \{j\}}P_{\cI}a_j = P_{\cI\cup \{j\}} a_j = 0$, where the second equality follows because $a_j \in \cN(A_{\cI\cup\{j\}})^\perp = \cR(P_{\cI\cup\{j\}})^\perp$. Thus, we have that $P_{\cI}a_j\ne P_{\cI\cup\{j\}} P_{\cI} a_j$. Now, if $P_{\cI}a_j\in \cR(P_{\cI\cup\{j\}})$, then $P_{\cI}a_j = P_{\cI\cup \{j\}} z$ for some vector $z$. But then $P_{\cI\cup \{j\}}^2 = P_{\cI\cup\{j\}}$ would imply that $P_{\cI\cup \{j\}}P_{\cI}a_j = P_{\cI\cup\{j\}}z = P_{\cI}a_j$, which gives a contradiction. Thus, $P_{\cI}a_j\notin \cR(P_{\cI\cup\{j\}})$. Now the rank nullity theorem implies that \begin{align*} \rank(A_{\cI})&=n-\dim(\cN(A_{\cI}))\\ \rank(A_{\cI\cup\{j\}})&=n-\dim(\cN(A_{\cI\cup\{j\}})). \end{align*} Now since $A_{\cI\cup\{j\}}$ has only one more row than $A_{\cI}$, we must have that $\rank(A_{\cI})\le \rank(A_{\cI\cup\{j\}})\le \rank(A_{\cI})+1$. Also, $\cN(A_{\cI\cup\{j\}})\subset \cN(A_{\cI}) $ by construction, and we just saw that $P_{\cI}a_j \in \cN(A_{\cI})\setminus \cN(A_{\cI\cup \{j\}})$, so the inclusion is strict. It follows that \begin{align} \label{eq:nullspaceDimensions} \nonumber \dim(\cN(A_{\cI}))&=\dim(\cR(P_{\cI}))\\ &=\dim(\cN(A_{\cI\cup\{j\}}))+1 \nonumber \\ &=\dim(\cR(P_{\cI\cup\{j\}}))+1. \end{align} Now, since $\cR(W_{\cI\cup\{j\}})=\cN(A_{\cI\cup\{j\}})$, we must have that $$ \cR\left(\begin{bmatrix}W_{\cI\cup\{j\}} & \frac{P_{\cI}a_j}{\|P_{\cI}a_j\|} \end{bmatrix}\right) = \cN(A_{\cI}). $$ Furthermore, since $\cR(W_{\cI\cup\{j\}})=\cR(P_{\cI\cup\{j\}})$ and $P_{\cI\cup \{j\}} P_{\cI}a_j=0$, we must have that $$ \begin{bmatrix}W_{\cI\cup\{j\}}^\top \\ \frac{(P_{\cI}a_j)^\top}{\|P_{\cI}a_j\|} \\ V_{\cI}^\top \end{bmatrix} \begin{bmatrix}W_{\cI\cup\{j\}} & \frac{P_{\cI}a_j}{\|P_{\cI}a_j\|} & V_{\cI} \end{bmatrix} =I $$ Now we use this orthogonal matrix to perform a change of coordinates. In particular, let $y_1$, $y_2$, and $y_3$ be such that $$ x = W_{\cI\cup \{j\}} y_1 + \frac{P_{\cI}a_j}{\|P_{\cI}a_j\|} y_2 + V_{\cI}y_3. $$ In these new coordinates, (\ref{eq:epsDefProblem}) is equivalent to \begin{subequations} \label{eq:epsDefChanged} \begin{align} & \min_{y} && |\|P_{\cI}a_j\|y_2 + a_j^\top V_{\cI} y_3| \\ \label{eq:partialProj1} &\textrm{subject to}&& \|y_1\|^2 + y_2^2 \ge r_{\cI}^2 \\ &&& \|y_1\| \le r_{\cI\cup\{j\}} \\ \label{eq:totalNormBound} &&& \|y_1\|^2 + y_2^2 + \|y_3\|^2 \le 1. \end{align} \end{subequations} The equivalence arises because \begin{align*} a_j^\top x &= \|P_{\cI}a_j\| y_2 + a_j^\top V_{\cI}y_3 \\ P_{\cI}x &= W_{\cI\cup \{j\}} y_1 + \frac{P_{\cI}a_j}{\|P_{\cI}a_j\|}y_2 \\ P_{\cI\cup\{j\}} x &= W_{\cI\cup \{j\}} y_1 \end{align*} along with orthogonality of the corresponding transformation from $y$ to $x$. If we choose $r_{\cI} > r_{\cI\cup \{j\}}$, then we must have $$ y_2^2 \ge r_{\cI}^2 - \|y_1\|^2 \ge r_{\cI}^2-r_{\cI\cup \{j\}}^2 > 0. $$ Now, if $y$ is feasible, $-y$ is also feasible, and they have the same objective value in (\ref{eq:epsDefChanged}). So, without loss of generality, we may assume that $y_2 > 0$. The Cauchy-Schwartz inequality, combined with (\ref{eq:totalNormBound}), implies that \begin{equation} \|P_{\cI}a_j\|y_2 + a_j^\top V_{\cI} y_3 \label{eq:csLower} \ge \|P_{\cI}a_j\|y_2 - \|V_{\cI}^\top a_j\| \sqrt{1-\|y_1\|^2-y_2^2}. \end{equation} Note that this bound is achieved by setting $y_3 = -\frac{V_{\cI}^\top a_j}{\|V_{\cI}^\top a_j\|}\sqrt{1-\|y_1\|^2-y_2^2}$. The right side of (\ref{eq:csLower}) is monotonically increasing in $y_2$. So, (\ref{eq:partialProj1}) implies that it is minimized over $y_2$ by setting $y_2=\sqrt{r_{\cI}^2-\|y_1\|^2}$. This leads to a lower bound of the form: \begin{equation*} \|P_{\cI}a_j\|y_2 - \|V_{\cI}^\top a_j\| \sqrt{1-\|y_1\|^2-y_2^2} \ge \|P_{\cI}a_j\|\sqrt{r_{\cI}^2-\|y_1\|^2} - \|V_{\cI}^\top a_j\| \sqrt{1-r_{\cI}^2}. \end{equation*} The right side is now monotonically decreasing with respect to $\|y_1\|$, and so it is minimized by setting $\|y_1\|=r_{\cI\cup\{j\}}$. This leads to the characterization: \begin{align} &\textrm{Optimal Value of (\ref{eq:epsDefChanged})} \nonumber \\ &= \|P_{\cI}a_j\|\sqrt{r_{\cI}^2-r_{\cI\cup\{j\}}^2} - \|V_{\cI}^\top a_j\| \sqrt{1-r_{\cI}^2} \nonumber \\ \label{eq:optVal} &= \|P_{\cI}a_j\|\sqrt{r_{\cI}^2-r_{\cI\cup\{j\}}^2} - \sqrt{1-\|P_{\cI}^\top a_j\|^2} \sqrt{1-r_{\cI}^2}. \end{align} The second equality follows because $$ \|V_{\cI}^\top a_j\|^2 = a_j^\top V_{\cI}V_{\cI}^\top a_j = a_j^\top (I-P_{\cI})a_j = 1-\|P_{\cI}a_j\|^2. $$ Now, we have that the right side of (\ref{eq:optVal}) is positive if and only if: \begin{subequations} \label{eq:inductiveRbound} \begin{align} \MoveEqLeft \|P_{\cI}a_j\|^2\left(r_{\cI}^2-r_{\cI\cup\{j\}}^2 \right) > \left(1-\|P_{\cI} a_j\|^2\right)\left(1-r_{\cI}^2\right) \\ \label{eq:inductiveRboundIterative} &\iff r_{\cI}^2 > 1-\|P_{\cI}a_j\|^2 + \|P_{\cI}a_j\|^2 r_{\cI\cup\{j\}}^2\\ \label{eq:inductiveRboundMonotone} &\iff r_{\cI}^2 > 1-\|P_{\cI}a_j\|^2 (1-r_{\cI\cup\{j\}}^2) \\ \label{eq:inductiveRboundFinal} & \iff r_{\cI}^2 > r_{\cI\cup\{j\}}^2 +(1-\|P_{\cI}a_j\|^2)(1-r_{\cI\cup\{j\}}^2). \end{align} \end{subequations} Note that (\ref{eq:inductiveRboundFinal}) implies that $r_{\cI} > r_{\cI\cup\{j\}}$ holds. Also note that any collection of $r_{\cI}$ values in $(0,1)$ that satisfy (\ref{eq:inductiveRbound}) will ensure that the corresponding set, $\hat\cB$, satisfies the implication from (\ref{eq:revisedImplication}). In that case, we have that $\cB$ has the desired properties. Now we seek a simpler, more explicit formula for the $r_{\cI}$ values which satisfy (\ref{eq:inductiveRbound}). Note that (\ref{eq:inductiveRboundMonotone}) implies that the right side is monotonically decreasing with respect to $\|P_{\cI}a_j\|^2$. So, if $\alpha>0$ is a number such that $\alpha \le \frac{1}{2}\|P_{\cI}a_j\|^2$ for all $\cI$ and $j$ with $P_{\cI}a_j\ne 0$ we obtain a sufficient condition for (\ref{eq:inductiveRbound}): \begin{subequations} \label{eq:relaxedRbound} \begin{align} r_{\cI}^2& = 1-\alpha(1-r_{\cI\cup\{j\}}^2) \\ r_{\cI}^2& = (1-\alpha) + \alpha r_{\cI\cup\{j\}}^2. \end{align} \end{subequations} Now we use (\ref{eq:relaxedRbound}) to derive the desired formula for $r_{\cI}$. In particular, consider the recursion $$ x_{k+1}=(1-\alpha) + \alpha x_k. $$ This has an explicit solution given by $$ x_{k} = \alpha^k x_0 + 1-\alpha^k = 1-\alpha^k(1-x_0). $$ In particular, if $x_0\in (0,1)$, we have that $x_k\in (0,1)$ for all $k\ge 0$. We define $r_{\cI}$ by fixing a value $x_0\in (0,1)$, which will be defined explicitly later, and setting $r_{\cI}^2=x_k=1-\alpha^k(1-x_0)$ if $\rank(A)-\rank(A_{\cI})=k$. To see that this definition satisfies (\ref{eq:relaxedRbound}), first note that $r_{\cI}^2= x_0$ for all $\cI$ with $\rank(A)=\rank(A_{\cI})$. Now, recall that if $P_{\cI}a_j\ne 0$, then (\ref{eq:nullspaceDimensions}) implies that $\rank(A_{\cI\cup\{j\}})=\rank(A_{\cI})+1$. The converse is also true: If $\rank(A_{\cI\cup\{j\}})=\rank(A_{\cI})+1$, then we must have that $a_j\notin \cR(A_{\cI}^\top)=\cR(V_{\cI})=\cR(P_{\cI})^\perp$. It follows that $P_{\cI}a_j\ne 0$. Thus, if $\rank(A)-\rank(A_{\cI\cup\{j\}})=k\ge 0$, we have that $P_{\cI}a_j\ne 0$ precisely when $\rank(A)-\rank(A_{\cI})=k+1$. So we see that setting $r_{\cI}^2=x_{k+1}=1-\alpha(1-x_k)$ gives the same value as specified in (\ref{eq:relaxedRbound}). The final step in the proof requires finding a lower bound, $\epsilon$, for the optimal value from (\ref{eq:optVal}). Let $r_{\cI}^2=x_k$ and $r_{\cI\cup\{j\}}^2=x_{k-1}$. Then we have that \begin{align*} r_{\cI}^2-r_{\cI\cup\{j\}}^2 &= (1-\alpha)\alpha^{k-1}(1-x_0)\\ 1-r_{\cI}^2 &= \alpha \alpha^{k-1} (1-x_0). \end{align*} Also note that the right side of (\ref{eq:optVal}) is monotonically increasing with respect to $\|P_{\cI}a_j\|^2$ and that $\|P_{\cI}a_j\|^2 \ge 2\alpha$ by our choice of $\alpha$. So, plugging in this lower bound gives \begin{align*} \MoveEqLeft \|P_{\cI}a_j\|\sqrt{r_{\cI}^2-r_{\cI\cup\{j\}}^2} - \sqrt{1-\|P_{\cI}^\top a_j\|^2} \sqrt{1-r_{\cI}^2} \\ &\ge \left(\sqrt{2\alpha}\sqrt{1-\alpha}-\sqrt{1-2\alpha}\sqrt{\alpha}\right)\sqrt{\alpha^{k-1}(1-x_0)} \\ &=\left(\sqrt{2-2\alpha}-\sqrt{1-2\alpha}\right)\sqrt{\alpha^k(1-x_0)} \\ &\ge \left(\sqrt{2}-1\right) \sqrt{\alpha^{\rank(A)} (1-x_0)} \end{align*} The final inequality follows because $k\le \rank(A)$ and the minimum value of $\sqrt{2-2\alpha}-\sqrt{1-2\alpha}$ over $\alpha \in [0,\|P_{\cI}a_j\|^2/2] \subset [0,1/2]$ occurs at $\alpha=0$. To simplify the final formula for $\epsilon$, note that $\sqrt{2}-1>1/3$, and thus we can choose $x_0\in(0,1)$ so that $$ (\sqrt{2}-1)\sqrt{1-x_0}=\frac{1}{3} \iff x_0 = 1-\frac{1}{9\left(\sqrt{2}-1\right)^2}\approx 0.352. $$ Plugging in this value for $x_0$ gives the bound: \begin{equation*} \textrm{Optimal Value of (\ref{eq:epsDefChanged})} \ge \frac{1}{3}\alpha^{\frac{\rank(A)}{2}}=:\epsilon \end{equation*} Now recalling that the diameter of $\cB$ is at most $2/\epsilon$ completes the proof. \hfill$\blacksquare$ \section{Invariance of the Gibbs measure} \label{sec:gibbs} \begin{lemma} \label{lem:gibbs} The Gibbs measure, (\ref{eq:gibbs}), is stationary under the dynamics of the reflected SDE from (\ref{eq:AvecontinuousProjectedLangevin}). \end{lemma} \paragraph{Proof} Before showing invariance of the Gibbs measure, we first remark that it is well-defined. In particular, we have that $\int_{\cK}e^{-\beta \bar f(x)} dx<\infty$. To see this, let $\|x\| \ge R/\theta$, where $\theta\in (0,1)$ is a number to be chosen later. Note that for $t\in [\theta,1]$, we have that $\|\theta x\|\ge R$. So, we can use strong convexity outside a ball of radius $R$ to show \begin{align} \nonumber \bar f(x)&\ge \bar f(0)+\int_0^1 \nabla \bar f(tx)^\top x dt \\ \nonumber &=\bar f(0) + \nabla \bar f(0)^\top x + \int_0^\theta \left(\nabla \bar f(tx)-\nabla\bar f(0) \right)^\top x dt + \int_{\theta}^1 \left(\nabla \bar f(tx)-\nabla\bar f(0) \right)^\top x dt \\ \nonumber &\ge \bar f(0)-\|\nabla \bar f(0)\| \|x\| -\ell \|x\|^2 \int_0^\theta t dt +\mu \|x\|^2 \int_{\theta}^1 t dt \\ \nonumber & \ge \bar f(0)-\|\nabla \bar f(0)\| \|x\| + \frac{1}{2} \|x\|^2\left( - \ell \theta^2 + \mu(1-\theta^2) \right) \end{align} The coefficient $-\ell^2 \theta^2 + \mu (1-\theta^2)$ is positive, as long as $\theta < \sqrt{\frac{\mu}{\mu+\ell}}$. In particular, choosing $\theta^2 = \frac{1}{2}\frac{\mu}{\mu+\ell}$ gives \begin{equation} \label{eq:quadraticLower} \bar f(x) \ge \bar f(0)-\|\nabla \bar f(0)\| \|x\| +\frac{1}{4}\mu \|x\|^2. \end{equation} It follows that $Z=\int_{\cK}e^{-\beta \bar f(x)}dx < \infty$. In \cite{lamperski2021projected}, it was shown in that the Gibbs measure is invariant under (\ref{eq:AvecontinuousProjectedLangevin}) when $\cK$ is compact. We will extend the result to non-compact $\cK$ via a limiting argument. Let $\cK_i = \cK \cap \{x\in\bbR^n | \|x\|_{\infty}\le i\}$. Let $Z_i=\int_{\cK_i}e^{-\beta \bar f(x)}dx$. Note that $\lim_{i\to\infty}Z_i=Z$, by monotone convergence. We choose $\|x\|_\infty = \max\{|x_1|,\ldots,|x_n|\} \le i$ so that $\cK_i$ becomes a compact polyhedron for $i\ge 1$. Let $\bx_t^C$ be a solution to the original form of (\ref{eq:AvecontinuousProjectedLangevin}) and let $\bx_t^{C,i}$ be a solution to the RSDE from (\ref{eq:AvecontinuousProjectedLangevin}), with $\cK_i$ used in place of $\cK$. Since $\cK_i$ is polyhedral, Lemma~\ref{lem:skorokhodExistence} in Appendix~\ref{app:skorokhod} shows that $\bx_t^{C,i}$ is uniquely defined. Define the diffusion operators $P$ and $P^i$ by \begin{align*} (P_tg)(x) &= \bbE[g(\bx_t^C)|\bx_0=x] \\ (P_t^i g)(x) &= \bbE[g(\bx_t^{C,i})|\bx_0=x] \end{align*} Let $L_2(\cK,\pi_{\beta \bar f})$ be the set of functions $g:\cK\to \bbR$ which are square integrable with respect to the measure $\pi_{\beta \bar f}$. We will show that $\pi_{\beta \bar f}$ is invariant for (\ref{eq:AvecontinuousProjectedLangevin}) by showing that for all $g\in L_2(\cK,\pi_{\beta \bar f})$ the following equality holds for all $t\ge 0$: \begin{equation} \label{eq:stationaryTransition} \frac{1}{Z}\int_{\cK} g(x)e^{-\beta \bar f(x)}dx = \frac{1}{Z}\int_{\cK} (P_tg)(x)e^{-\beta \bar f(x)}dx. \end{equation} The subset of bounded, compactly supported functions in $L_2(\cK,\pi_{\beta \bar f})$ is a dense subset. Fix an arbitary bounded, compactly supported $g\in L_2(\cK,\pi_{\beta \bar f})$. It suffices to show that \eqref{eq:stationaryTransition} holds for $g$. Lemma 19 of \cite{lamperski2021projected} shows that for all $i\ge 1$, the following holds: \begin{equation} \label{eq:stationaryTransitionCompact} \frac{1}{Z_i}\int_{\cK_i} g(x)e^{-\beta \bar f(x)}dx = \frac{1}{Z_i}\int_{\cK_i} (P_t^ig)(x)e^{-\beta \bar f(x)}dx. \end{equation} We saw earlier that $Z_i\to Z$. Furthermore, since $g$ is compactly supported, there is a number, $m$, such that $i\ge m$ implies that $$ \int_{\cK_i} g(x)e^{-\beta \bar f(x)}dx = \int_{\cK}g(x)e^{-\beta \bar f(x)}dx. $$ It follows that the left of \eqref{eq:stationaryTransitionCompact} converges to the left of \eqref{eq:stationaryTransition}. The proof will be completed if we can show that for $t>0$, \begin{align} \label{eq:doubleIntegralSeq} \lim_{i\to\infty} \int_{\cK_i} (P_t^ig)(x)e^{-\beta \bar f(x)}dx =&\lim_{i\to\infty}\int_{\cK_i}\bbE[g(\bx_t^{C,i})|\bx_0=x] e^{-\beta \bar f(x)}dx \\ \label{eq:doubleIntegralFinal} =& \int_{\cK}\bbE[g(\bx_t^{C})|\bx_0=x] e^{-\beta \bar f(x)}dx \\ \nonumber = &\int_{\cK}(P_tg)(x)e^{-\beta \bar f(x)}dx. \end{align} We assumed that $g$ was bounded, and so there is a number, $b$, such that $|g(x)|\le b$ for all $x\in\cK$. It follows from the definition of $P_t$ and $P_t^i$ that $|P_tg(x)|\le b$ and $|P_t^i g(x)|\le b$ for all $t$. Fix any $t>0$. The Brownian motion, $\bw$, is continuous, and so for each $t$, $\bw_s$ is bounded for $s\in [0,t]$. Now the form of (\ref{eq:AveContinuousY}) shows that $\by^C$, and thus $\bx^C$ must also be continuous, and thus also bounded for $s\in [0,t]$. Thus, for each realization, we see that there is a number $m$ such that $\bx_s^C\in \cK_i$ for all $s\in [0,t]$ and all $i\ge m$. Thus, we see that $\bx_s^{C}=\bx_s^{C,i}$ for $s\in [0,t]$. This argument shows that the integrand on the right of \eqref{eq:doubleIntegralSeq} converges pointwise to the integrand of \eqref{eq:doubleIntegralFinal}. So, the desired equality follows by the dominated convergence theorem. \hfill$\blacksquare$ \section{Bounded variance of the processes} \label{app:bounded} In this section, we derive variance bounds on all of the main processes, $\bbE[\|\bx_k^A\|^2]$ and $\bbE[\|\bx_t^C\|^2]$. The bound on $\bbE[\|\bx_t^C\|^2]$ is used to prove bounds on the discretization error from $\bx_t^M$ to $\bx_t^C$. The bound on $\bbE[\|\bx_k^A\|^2]$ is used to derive the time-uniform bounds on $W_1(\cL(\bx_k^A),\cL(\bx_k^C))$ from Lemma~\ref{lem:AtoC}. \subsection{Continuous-time bounds} \label{app:contBounds} In this section, we show that the assumption that $\bar f$ is strongly convex outside a ball implies that $\cV(x)=\frac{1}{2}\|x\|^2$ can be used as a Lyapunov function for $\bx_t^C$. In turn, we use this Lyapunov function to derive bounds on $\bbE[\|\bx_t^C\|^2]$. \const{LyapunovConst} \newcommand{\LyapunovConstVal}{(\ell+\mu)R^2 + R\|\nabla_x \bar f(0)\| + \frac{n}{\beta}} \begin{lemma} \label{lem:GeometricDrift} If $\bar f(x)$ is $\mu$-strongly convex outside a ball with radius $R$, then $\cV(x) = \frac{1}{2} x^\top x$ satisfies the following the geometric drift condition: \begin{align*} \cA \cV(x) \le -2\eta \mu \cV(x) + c_{\ref{LyapunovConst}} \eta. \end{align*} Here $c_{\ref{LyapunovConst}}$ is defined by \begin{align*} c_{\ref{LyapunovConst}} &= \LyapunovConstVal. \end{align*} \end{lemma} \paragraph{Proof} By Ito's formula, we have \begin{align*} d\cV(\bx^C_t) &= \nabla_x \cV ^\top d\bx^C_t + \frac{1}{2} d(\bx^C_t)^\top (\nabla_x^2\cV)d\bx^C_t\\ &= (\bx^C_t)^\top (-\eta \nabla_x \bar{f}(\bx^C_t)dt + \sqrt{\frac{2\eta}{\beta}}d\bw_t - \bv_t d\bmu_t) + \frac{1}{2}d(\bx^C_t)^\top d\bx^C_t\\ &= - \eta (\bx^C_t)^\top \nabla_x \bar{f}(\bx^C_t)dt + \sqrt{\frac{2\eta}{\beta}} (\bx^C_t)^\top d\bw_t - (\bx^C_t)^\top \bv_t d\bmu_t + \frac{\eta}{\beta} \Tr(d\bw_t d\bw_t^\top) \\ &= (- \eta (\bx^C_t)^\top \nabla_x \bar{f}(\bx^C_t)+ \frac{n \eta}{\beta})dt + \sqrt{\frac{2\eta}{\beta}} (\bx^C_t)^\top d\bw_t - (\bx^C_t)^\top \bv_t d\bmu_t. \end{align*} The third equality holds because $\int_0^t \bv_s d \bmu_s$ has bounded variation. The last equality is based on the fact that $d\bw_t d\bw_t^\top = dt \:I$. Since $\bv_t \in N_\cK(\bx_t^C)$, $\bmu_t$ is a nonnegative measure, and $0\in\cK$, we have that $- (\bx_t^C)^\top \bv_t d\bmu_t \le 0 $. Thus, the generator of the Lyapunov function satisfies \begin{align} \label{eq:generalLyapunovGen} \cA \cV(x) &\le - \eta x^\top \nabla_x \bar{f}(x)+ \frac{n \eta}{ \beta}. \end{align} If $\|x\|\ge R$, strong convexity outside a ball of radius $R$, along with the Cauchy-Schwartz inequality imply that \begin{align} \nonumber x^\top \nabla_x \bar f(x) & = (x-0)^\top (\nabla_x \bar f(x)-\nabla_x \bar f(0)) + x^\top \nabla_x \bar f(0) \\ \label{eq:bigXInnerProduct} &\ge \mu \|x\|^2 - R\|\nabla_x \bar f(0)\| \end{align} It follows that when $\|x\|\ge R$, we have that \begin{align*} \cA\cV(x) &\le -\eta\mu \|x\|^2 + \eta \left( R\|\nabla_x \bar f(0)\| + \frac{n}{\beta} \right) \\ &= -\eta 2\mu \cV(x) + \eta \left( R\|\nabla_x \bar f(0)\| + \frac{n}{\beta} \right). \end{align*} If $\|x\|\le R$, then the Cauchy-Schwartz inequality and the Lipschitz continuity imply that \begin{align} \nonumber - x^\top \nabla_x \bar{f}(x) &= - x^\top \left(\nabla_x \bar{f}(x)-\nabla_x \bar{f}(0) +\nabla_x \bar{f}(0) \right)\\ \nonumber &\le \|x \| \|\nabla_x \bar{f}(x)-\nabla_x \bar{f}(0)\| + R \|\nabla_x \bar{f}(0)\|\\ \nonumber &\le \ell \|x \|^2 + R \|\nabla_x \bar{f}(0)\| \\ \nonumber &= -\mu \|x\|^2 + (\ell+\mu) \|x\|^2 + R \|\nabla_x \bar{f}(0)\| \\ \label{eq:smallXInnerProduct} &\le -\mu \|x\|^2 + (\ell+\mu)R^2 + R\|\nabla_x \bar f(0)\|. \end{align} Note that \eqref{eq:bigXInnerProduct} implies that \eqref{eq:smallXInnerProduct} also holds whenever $\|x\|\ge R$. So, combining (\ref{eq:smallXInnerProduct}) with (\ref{eq:generalLyapunovGen}) shows that for all $x\in\cK$, \begin{align*} \cA\cV(x)&\le \eta \left( -\mu \|x\|^2 + (\ell+\mu)R^2 + R\|\nabla_x \bar f(0)\| \right) + \frac{n\eta}{\beta} \\ &=-\eta 2\mu \cV(x) + \eta \left( (\ell+\mu)R^2 + R\|\nabla_x \bar f(0)\| + \frac{n}{\beta} \right) \end{align*} \hfill$\blacksquare$ \begin{lemma} \label{lem:continuousBound} If $\bbE[\|\bx^C_0\|^2]\le \varsigma$, then for all $t\ge 0$, we have that $$ \bbE[\|\bx^C_t\|^2]\le \varsigma +\frac{1}{\mu} c_{\ref{LyapunovConst}}, $$ where $c_{\ref{LyapunovConst}}$ is defined in Lemma~\ref{lem:GeometricDrift}. \end{lemma} \paragraph{Proof} Recall that Lyapunov generator $\cA$ is defined as below \begin{align*} \cA\cV(x) = \lim_{t\downarrow0} \bbE\left[ \frac{1}{t}(\cV(\bx^C_t) - \cV(\bx^C_0))\vert \bx^C_0 = x\right]. \end{align*} Using Dynkin's formula and Lemma $\ref{lem:GeometricDrift}$ gives \begin{align*} \bbE\left[ \cV(\bx^C_t) - \cV(\bx^C_0) \right] &= \int_0^t \bbE\left[ \cA \cV(\bx^C_s)\right] ds\\ &\le - 2\eta \mu \int_0^t \bbE\left[ \cV(\bx^C_s)\right]ds + c_{\ref{LyapunovConst}}\eta t. \end{align*} Let $u_t = \bbE\left[ \cV(\bx^C_t)\right]$, $u_0 = \bbE\left[ \cV(\bx^C_0)\right]$. By Gr\"{o}nwall's inequality, we get \begin{align*} u_t &\le e^{-2\eta \mu t} u_0 + \eta c_{\ref{LyapunovConst}} \int_0^t e^{-2\eta \mu s} ds \\ &= e^{-2\eta \mu t} u_0 + \frac{c_{\ref{LyapunovConst}}}{2\mu}\left(1 -e^{-2\eta \mu t}\right) \\ &\le u_0 + \frac{c_{\ref{LyapunovConst}}}{2\mu}. \end{align*} Recalling that $u_t = \frac{1}{2}\bbE[\|\bx_t\|^2]$ and $\bbE[\|\bx_0\|^2]\le \varsigma$ completes the proof. \hfill$\blacksquare$ \subsection{Discrete-time bounds} \label{app:dis-timeBounds} Here we derive a uniform bound on $\bbE[\|\bx_k^A\|^2]$. \const{AlgBound} \newcommand{\AlgBoundVal}{\frac{4}{\mu} \left(\frac{ n}{\beta}+(\ell+\mu)R^2 + (2+R)\|\nabla_x \bar f(0)\| + \left(8 \ell^2 + \frac{1}{\mu}\right)\ell^2\cM_2(\bz)\right)} \begin{lemma} \label{lem:algBound} Assume that $\bbE[\|\bx_0^A\|^2] \le \varsigma$ and that $\eta\le \min\left\{1, \frac{\mu}{4\ell^2} \right\}$. There is a constant, $c_{\ref{AlgBound}}$ such that for all $k\ge 0$, we have that $$ \bbE[\|\bx_k^A\|^2]\le \varsigma + c_{\ref{AlgBound}}. $$ The constant is given by $$ c_{\ref{AlgBound}}=\AlgBoundVal $$ \end{lemma} \paragraph{Proof} Using non-expansiveness of the projection and then expanding the square of the norm gives: \begin{align*} \bbE \left[ \|\bx_{t+1}^A\|^2 \right] &= \bbE\left[ \left\| \Pi_{\cK}\left( \bx_t^A - \eta \nabla_x f(\bx_t^A,\bz_t) + \sqrt{\frac{2\eta}{\beta}}\hat \bw_t \right) - \Pi_{\cK}(0) \right\|^2 \right] \\ &\le \bbE\left[ \left\| \bx_t^A - \eta \nabla_x f(\bx_t^A,\bz_t) + \sqrt{\frac{2\eta}{\beta}}\hat \bw_t \right\|^2 \right] \\ &= \bbE\left[\|\bx_t^A\|^2 + \eta^2 \|\nabla_x f(\bx_t^A,\bz_t)\|^2 -2\eta (\bx_t^A)^\top \nabla_x f(\bx_t^A,\bz_t)\right] + \frac{2n\eta}{\beta}. \end{align*} Now we bound the term $\bbE\left[\|\nabla_x f(\bx_t^A,\bz_t)\|^2\right]$. For any $x\in\cK$, we have that \begin{align} \label{eq:gradientSquareBound1} \nonumber \|\nabla_x f(x,z)\|^2 &= \|\nabla_x f(x,z)-\nabla_x f(0,z)+ \nabla_x f(0,z)\|^2\\ &\le 2\|\nabla_x f(0,z)\|^2 + 2\ell^2\|x\|^2. \end{align} This leads to: \begin{multline} \nonumber \bbE\left[\|\bx_{t+1}^A\|^2\right] \le \left(1+2\ell^2\eta^2\right) \bbE\left[ \|\bx_{t}^A\|^2 \right] \\ +\left(\frac{2\eta n}{\beta} + 2\eta^2\bbE[ \|\nabla_x f(0,\bz_t)\|^2] \right) -2\eta \bbE\left[ (\bx_t^{A})^\top \nabla_x f(\bx_t^A,\bz_t) \right]. \end{multline} To bound the term $\bbE[\|\nabla_x f(0,\bz_t)\|^2]$, note that $\nabla_x \bar f(0)=\bbE[\nabla_x f(0,\hat\bz_t)]$, where $\hat \bz_t$ is identically distributed to $\bz_t$ and independent of $\bz_t$. \begin{align} \label{eq:gradientSquareBound2} \nonumber \bbE[\|\nabla_x f(0,\bz_t)\|^2] &= \bbE\left[ \| \nabla_x \bar f(0)+\nabla_x f(0,\bz_t)-\bbE[\nabla_x f(0,\hat\bz_t)]\|^2 \right] \\ \nonumber & \le 2\|\nabla_x \bar f(0)\|^2+ 2\bbE[\|\nabla_x f(0,\bz_t)-\bbE[\nabla_x f(0,\hat\bz_t)]\|^2] \\ \nonumber & \overset{\textrm{Jensen}}{\le} 2\|\nabla_x \bar f(0)\|^2+ 2\bbE[\|\nabla_x f(0,\bz_t)-\nabla_x f(0,\hat\bz_t)\|^2] \\ \nonumber & \le 2\|\nabla_x \bar f(0)\| + 2\ell^2 \bbE[\|\bz_t-\hat\bz_t\|^2] \\ \nonumber &\le 2\|\nabla_x \bar f(0)\| + 4\ell^2 \bbE[\|\bz_t\|^2+\|\hat\bz_t\|^2] \\ &\le 2\|\nabla_x \bar f(0)\| + 8 \ell^2 \cM_2(\bz), \end{align} where $\cM_2(\bz)$ is a bound on $\bbE[\|\bz_t\|^2]$ from (\ref{eq:Mbounded}). So, we have a bound of the form \begin{multline} \label{eq:meanSquareIntermediateZ} \bbE\left[\|\bx_{t+1}^A\|^2\right] \le \left(1+2\ell^2\eta^2\right) \bbE\left[ \|\bx_{t}^A\|^2 \right] \\ +\left(\frac{2\eta n}{\beta} + \eta^2 \left(4\|\nabla_x \bar f(0)\| + 16 \ell^2 \cM_2(\bz)\right) \right) -2\eta \bbE\left[ (\bx_t^{A})^\top \nabla_x f(\bx_t^A,\bz_t) \right]. \end{multline} To bound the inner product term, note that \begin{align*} \bbE\left[ (\bx_t^A)^\top \nabla_x f(\bx_t^A,\bz_t) \right] &= \bbE\left[ (\bx_t^A)^\top \left(\nabla_x f(\bx_t^A,\bz_t) - \nabla_x f(\bx_t^A,\hat\bz_t)\right) \right] + \bbE\left[ (\bx_t^A)^\top \nabla_x f(\bx_t^A,\hat\bz_t) \right] \\ &= \bbE\left[ (\bx_t^A)^\top \left(\nabla_x f(\bx_t^A,\bz_t) - \nabla_x f(\bx_t^A,\hat\bz_t)\right) \right] + \bbE\left[ (\bx_t^A)^\top \nabla_x \bar f(\bx_t^A) \right]. \end{align*} The second equality follows because $\hat\bz_t$ is independent of $\bx_t^A$ and identically distributed to $\bz_t$. So, we can use the Cauchy-Schwartz inequality on the first term on the right and (\ref{eq:smallXInnerProduct}) on the second term to give: \begin{align*} \bbE\left[ (\bx_t^A)^\top \nabla_x f(\bx_t^A,\bz_t) \right] \ge -\ell \bbE[\|\bx_t^A\| \|\bz_t-\hat\bz_t\|] +\mu\bbE[\|\bx_t^A\|^2] -\left((\ell+\mu)R^2 + R\|\nabla_x \bar f(0)\|\right). \end{align*} Using a completing-the-squares argument shows that for any numbers $a$ and $b$ \begin{align*} \frac{\mu}{2}a^2 -\ell ab &= \frac{\mu}{2}\left(a - \frac{\ell}{\mu}b\right)^2 - \frac{\ell^2}{2\mu}b^2 \\ &\ge -\frac{\ell^2}{2\mu}b^2. \end{align*} Setting $a = \|\bx_t^A\|$ and $b = \|\bz_t-\hat\bz_t\|$ leads to a bound of the form \begin{align} \label{eq:innerProductBound} \nonumber \bbE\left[ (\bx_t^A)^\top \nabla_x f(\bx_t^A,\bz_t) \right]&\ge \frac{\mu}{2}\bbE[\|\bx_t^A\|^2]-\frac{\ell^2}{2\mu}\bbE[\|\bz_t-\hat\bz_t\|^2] -\left((\ell+\mu)R^2 + R\|\nabla_x \bar f(0)\|\right) \\ &\ge \frac{\mu}{2}\bbE[\|\bx_t^A\|^2]-\frac{\ell^2}{\mu} \cM_2(\bz) -\left((\ell+\mu)R^2 + R\|\nabla_x \bar f(0)\|\right). \end{align} Plugging the new bounds into (\ref{eq:meanSquareIntermediateZ}) gives \begin{multline} \nonumber \bbE\left[\|\bx_{t+1}^A\|^2\right]\le \left(1-\mu\eta+2\ell^2\eta^2\right) \bbE\left[ \|\bx_{t}^A\|^2 \right] \\ + \left(\frac{2\eta n}{\beta} + \eta^2\left( 4\|\nabla_x \bar f(0)\| + 16 \ell^2 \cM_2(\bz)\right) \right) +2\eta \left( \frac{\ell^2}{\mu}\cM_2(\bz) +\left((\ell+\mu)R^2 + R\|\nabla_x \bar f(0)\|\right)\right) \end{multline} Note that if $\eta \le \frac{\mu}{4\ell^2}$, then $$ 1-\mu\eta+2\ell^2\eta^2\le 1-\frac{\mu\eta}{2}. $$ Furthermore, if $\eta \le 1$, we get the simplified bound: \begin{multline} \label{eq:algIterateBound} \bbE\left[\|\bx_{t+1}^A\|^2\right]\le \left(1-\frac{\mu\eta}{2}\right) \bbE\left[ \|\bx_{t}^A\|^2 \right] \\ + 2\eta\left(\frac{ n}{\beta} + 2\|\nabla_x \bar f(0)\| + 8 \ell^2 \cM_2(\bz) + \frac{\ell^2}{\mu}\cM_2(\bz) + (\ell+\mu)R^2 + R\|\nabla_x \bar f(0)\| \right). \end{multline} Now for any $a\in [0,1)$ and any $b\ge 0$, if $u_t\ge 0$ satisfies $$ u_{t+1} \le au_t +b $$ then \begin{align*} u_{t} &\le a^t u_0 + b\sum_{k=0}^{t-1} a^k \\ &= a^t u_0 + b\frac{1-a^t}{1-a} \\ &\le u_0 + \frac{b}{1-a} \end{align*} Applying this bound to $\bbE[\|\bx_t^A\|^2]$ and using that $\bbE[\|\bx_0^A\|^2]\le \varsigma$ gives \begin{align*} \nonumber \bbE[\|\bx_t^A\|^2] \le \varsigma + \frac{4}{\mu} \left(\frac{ n}{\beta}+(\ell+\mu)R^2 + (2+R)\|\nabla_x \bar f(0)\| + \left(8 \ell^2 + \frac{1}{\mu}\right)\ell^2\cM_2(\bz)\right). \end{align*} \hfill$\blacksquare$ \section{Proofs of averaging lemmas} \label{app:aveLemmas} \paragraph{Proof of Lemma~\ref{lem:MeanBetween1}} Non-expansiveness of the projection and the definitions of $\bx_t^{M,s}$ and $\bx_t^{B,s}$ show that: \begin{align} \nonumber \MoveEqLeft \|\bx_{t+1}^{M,s}-\bx_{t+1}^{B,s}\|^2 \\ \nonumber & \le \left\| \bx_t^{M,s}-\bx_t^{B,s} +\eta \left( \bbE[ \nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s-1}\lor \cG_t ] - \bbE[ \nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s} \lor \cG_t] \right) \right\|^2 \\ \nonumber &= \|\bx_t^{M,s}-\bx_t^{B,s}\|^2 + 2\eta \left( \bx_t^{M,s}-\bx_t^{B,s}\right)^\top \\ \nonumber & \quad{} \left( \bbE[ \nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s-1} \lor \cG_t] - \bbE[ \nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s} \lor \cG_t ] \right) \\ &\quad{} \quad{} + \eta^2 \left\|\bbE[ \nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s-1}\lor \cG_t ] - \bbE[ \nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s}\lor \cG_t ] \right\|^2. \label{eq:meanBetweenExpand} \end{align} We will show that the second term on the right of (\ref{eq:meanBetweenExpand}) has mean zero, and then we will bound the mean of the third term on the right of (\ref{eq:meanBetweenExpand}). By construction, we have that $\bx_t^{M,s}$ is $\cF_{t-s-1}\lor \cG_{t}$-measurable, while $\bx_t^{B,s}$ is $\cF_{t-s-2}\lor \cG_{t}$-measurable. Thus, the only part of the second term on the right of (\ref{eq:meanBetweenExpand}) which is not $\cF_{t-s-1}\lor \cG_t$-measurable is $\bbE[ \nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s} \lor \cG_t ]$. Therefore, the tower-property gives: \begin{align*} \MoveEqLeft \bbE\left[ \left( \bx_t^{M,s}-\bx_t^{B,s}\right)^\top \left( \bbE[ \nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s-1}\lor \cG_t ] - \bbE[ \nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s}\lor \cG_t ] \right) \right] \\ &= \bbE\left[ \left( \bx_t^{M,s}-\bx_t^{B,s}\right)^\top \left( \bbE[ \nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s-1}\lor \cG_t ] \right. \right. \\ & \left. \left. \quad \quad \quad \quad\quad \quad- \bbE\left[\bbE[ \nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s}\lor \cG_t ] \right)\middle | \cF_{t-s-1}\lor \cG_t \right] \right] \\ &=0. \end{align*} Now we focus on bounding the mean of the third term on the right of (\ref{eq:meanBetweenExpand}). Recall that $\bx_t^{M,s}$ is $\cF_{t-s-1}\lor \cG_t$-measurable. Furthermore, since $\cF_{t-s}^+$ is independent of $\cF_{t-s}\lor \cG_t$, it must also be independent of $\cF_{t-s-1}\lor \cG_t$ because $\cF_{t-s-1}\subset \cF_{t-s}$. It follows that \begin{align*} \bbE[\nabla_x f(\bx_t^{M,s},\bbE[\bz_t|\cF_{t-s}^+]) | \cF_{t-s}\lor \cG_t] = \bbE[\nabla_x f(\bx_t^{M,s},\bbE[\bz_t|\cF_{t-s}^+]) | \cF_{t-s-1}\lor \cG_t]. \end{align*} Thus, adding and subtracting $\bbE\left[\nabla_x f(\bx_t^{M,s},\bbE[\bz_t|\cF_{t-s}^+])\middle|\cF_{t-s}\lor \cG_t \right]$ gives \begin{align} \nonumber \MoveEqLeft[0] \left\|\bbE[ \nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s-1}\lor \cG_t ] - \bbE[ \nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s}\lor \cG_t ] \right\|^2 \\ \nonumber &\le 2 \left\|\bbE\left[ \nabla_x f(\bx_t^{M,s},\bz_t)- \nabla_x f(\bx_t^{M,s},\bbE[\bz_t | \cF_{t-s}^+] ) \middle| \cF_{t-s-1}\lor \cG_t \right] \right\|^2 \\ \label{eq:conditionalSplit} &\quad \quad + 2 \left\|\bbE\left[ \nabla_x f(\bx_t^{M,s},\bz_t) - \nabla_x f(\bx_t^{M,s},\bbE[\bz_t|\cF_{t-s}^+]) \middle| \cF_{t-s}\lor \cG_t \right] \right\|^2. \end{align} To bound the second term on the right of (\ref{eq:conditionalSplit}), we have \begin{align*} \MoveEqLeft \bbE\left[ \left\|\bbE\left[ \nabla_x f(\bx_t^{M,s},\bz_t) - \nabla_x f(\bx_t^{M,s},\bbE[\bz_t|\cF_{t-s}^+]) \middle| \cF_{t-s}\lor \cG_t \right] \right\|^2 \right] \\ &\overset{\textrm{Jensen}}{\le} \bbE\left[ \left\|\nabla_x f(\bx_t^{M,s},\bz_t) - \nabla_x f(\bx_t^{M,s},\bbE[\bz_t|\cF_{t-s}^+]) \right\|^2 \right] \\ &\overset{\textrm{Lipschitz}}{\le} \ell^2 \bbE\left[\| \bz_t - \bbE[\bz_t|\cF_{t-s}^+\|^2 \right] \\ &\le \ell^2 \psi_2(s,\bz)^2. \end{align*} Here $\psi_2(s,\bz)$ was defined in \eqref{eq:influenceTau}. The first term on the right of (\ref{eq:conditionalSplit}) is bounded by analogous calculations with $\cF_{t-s-1}$ used in place of $\cF_{t-s}$, and gives rise to the same bound of $\ell^2 \psi(s,\bz)^2$. Plugging these bounds into (\ref{eq:meanBetweenExpand}) shows that \begin{equation*} \label{eq:meanBetweenAve} \bbE\left[ \|\bx_{t+1}^{M,s}-\bx_{t+1}^{B,s}\|^2 \right] \le \bbE\left[ \|\bx_{t}^{M,s}-\bx_{t}^{B,s}\|^2 \right] +4\eta^2 \ell^2 \psi_2(s,\bz)^2 \end{equation*} Iterating \eqref{eq:meanBetweenAve} $t$ times and using the fact that $\bx_0^{B,s}=\bx_0^{M,s}$, shows that \begin{equation*} \bbE\left[\|\bx_t^{M,s}-\bx_t^{B,s}\|^2 \right] \le 4\eta^2 t \ell^2 \psi_2(s,\bz)^2. \end{equation*} Using the fact that $$\bbE[\|\bx_t^{M,s}-\bx_t^{B,s}\|]\le \sqrt{\bbE\left[\|\bx_t^{M,s}-\bx_t^{B,s}\|^2 \right]}$$ gives the result. \hfill$\blacksquare$ \paragraph{Proof of Lemma~\ref{lem:MeanBetween2}} Non-expansiveness of the projection and the definitions of $\bx_t^{B,s}$ and $\bx_t^{M,s+1}$, shows that \begin{multline} \nonumber \|\bx_{t+1}^{B,s}-\bx_{t+1}^{M,s+1}\| \\ \nonumber \le \left\|\bx_{t}^{B,s}-\bx_t^{M,s+1}+\eta \left( \bbE[ \nabla_x f(\bx_t^{M,s+1},\bz_t) | \cF_{t-s-1} \lor \cG_t ] - \bbE[ \nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s-1} \lor \cG_t ] \right) \right\|. \end{multline} Let $\|\bx\|_2=\sqrt{\bbE[\|\bx\|^2]}$ denote the $2$-norm over random vectors. The triangle inequality then implies that \begin{align} \nonumber \hspace{-10pt} \nonumber &\left\|\bx_{t}^{B,s}-\bx_t^{M,s+1}+\eta \left( \bbE[ \nabla_x f(\bx_t^{M,s+1},\bz_t) | \cF_{t-s-1} \lor \cG_t ] - \bbE[ \nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s-1} \lor \cG_t ] \right) \right\|_2 \\ \nonumber &\le \left\|\bx_{t}^{B,s}-\bx_t^{M,s+1}\right\|_2 +\eta \left\|\bbE[ \nabla_x f(\bx_t^{M,s+1},\bz_t) -\nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s-1} \lor \cG_t ] \right\|_2. \label{eq:twoNormSplit} \end{align} For any random vector, $\bx$, and any $\sigma$-algebra, $\cF$, Jensen's inequality followed by the tower property implies that $\bbE[\|\bbE[\bx|\cF]\|^2]\le \bbE[\|\bx\|^2]$. Applying this fact to the second term on the right of (\ref{eq:twoNormSplit}) and then using the Lipschitz property shows that \begin{align*} \left\|\bbE[ \nabla_x f(\bx_t^{M,s+1},\bz_t) -\nabla_x f(\bx_t^{M,s},\bz_t) | \cF_{t-s-1} \lor \cG_t ] \right\|_2 \le \ell \|\bx_t^{M,s+1}-\bx_t^{M,s}\|_2. \end{align*} Plugging this bound into (\ref{eq:twoNormSplit}) then adding and subtracting $\bx_t^{B,s}$ gives: \begin{align} \nonumber \MoveEqLeft[0] \|\bx_{t+1}^{B,s}-\bx_{t+1}^{M,s+1}\|_2 \\ \nonumber &\le \|\bx_{t}^{B,s}-\bx_t^{M,s+1}\|_2+\eta \ell \|\bx_t^{M,s+1}-\bx_t^{M,s}\|_2 \\ \nonumber &\le \|\bx_{t}^{B,s}-\bx_t^{M,s+1}\|_2+\eta \ell \|\bx_t^{M,s+1}-\bx_t^{B,s}\|_2 + \eta \ell \|\bx_t^{B,s}-\bx_t^{M,s}\|_2 \\ &= (1+\eta\ell) \|\bx_{t}^{B,s}-\bx_t^{M,s+1}\|_2 + \eta \ell \|\bx_t^{B,s}-\bx_t^{M,s}\|_2. \end{align} Using the fact that $\bx_0^{B,s}=\bx_0^{M,s+1}$ and iterating this inequality shows that: \begin{align*} & \|\bx_t^{B,s}-\bx_t^{M,s+1}\|_2 \\ &\le \eta \ell \sum_{k=0}^{t-1}(1+\eta \ell)^{k} \|\bx_{t-k}^{B,s}-\bx_{t-k}^{M,s}\|_2\\ &\overset{\textrm{Lemma~\ref{lem:MeanBetween1}}}{\le} \left(2\ell \psi_2(s,\bz) \eta \sqrt{t}\right) \eta \ell \sum_{k=0}^{t-1}(1+\eta\ell)^k\\ &= \left(2\ell \psi_2(s,\bz) \eta \sqrt{t}\right) \left((1+\eta \ell)^t-1\right) \\ &\le \left(2\ell \psi_2(s,\bz) \eta \sqrt{t}\right) \left(e^{\eta t \ell}-1\right). \end{align*} The final inequality follows by taking logarithms and using the fact that $\log(1+\eta \ell)\le \eta \ell$. \hfill$\blacksquare$ \section{Discretization bounds} \label{app:disBounds} \paragraph{Proof of Lemma~\ref{lem:C2D}} Recall that $\by_t^D= \by_{\floor{t}}^C$ and so for all $k \in \bbN$, $\by_k^D= \by_{k}^C$. By the construction of Skorokhod solutions to the process $\bx_t^C$ and $\bx_t^D$, and using Theorem $\ref{thm:skorokhodConst}$, we have for all $ k \in \bbN$ \begin{align*} \left\|\bx_k^C - \bx_k^D \right\| \le (c_{\ref{diamBound}}+1) \sup_{0 \le s \le k } \left\| \by_s^C - \by_{\lfloor s \rfloor}^C \right\|. \end{align*} Since \begin{align*} \by_t^C = \bx_0^C - \eta \int_0^t \nabla_x \bar{f}(\bx_s^C) ds + \sqrt{\frac{2 \eta}{\beta} } \bw_t, \end{align*} the triangle inequality implies that \begin{align*} \left\|\bx_k^C - \bx_k^D \right\| &\le (c_{\ref{diamBound}}+1) \eta \sup_{s \in [0,k]} \left\| \int_{\lfloor s \rfloor}^s \nabla_x \bar{f}(\bx_{\tau}^C) d\tau \right\| + (c_{\ref{diamBound}}+1) \sqrt{\frac{2 \eta}{\beta} } \sup_{s \in [0,k]} \left\| \bw_s - \bw_{\lfloor s \rfloor}\right\|. \end{align*} $\bbE\left[ \sup_{s \in [0,k]} \left\| \bw_s - \bw_{\lfloor s \rfloor}\right\|\right]$ is upper bounded by $2n\sqrt{\log(4k)}$. See Lemma 9 in \cite{lamperski2021projected}. So, the remaining work is to bound the first term on the right. Take the expectation of the first term, we have \begin{align*} \label{eq:FirstSupremeBound} &\bbE\left[ \sup_{s \in [0,k]}\left\| \int_{\lfloor s \rfloor}^s \nabla_x \bar{f}(\bx_{\tau}^C) d\tau \right\|\right]\\ &= \bbE\left[ \max_{i=0,\cdots, k-1}\sup_{s \in [i,i+1]}\left\| \int_i^s \nabla_x \bar{f}(\bx_{\tau}^C) d\tau \right\|\right]\\ &\le \bbE\left[ \left( \sum_{i=0}^{k-1}\left(\sup_{s \in [i,i+1]}\left\| \int_i^s \nabla_x \bar{f}(\bx_{\tau}^C) d\tau \right\|\right)^2 \right)^{1/2}\right]\\ &\overset{\textrm{Jensen}}{\le} \left( \bbE\left[ \sum_{i=0}^{k-1}\left(\sup_{s \in [i,i+1]}\left\| \int_i^s \nabla_x \bar{f}(\bx_{\tau}^C) d\tau \right\|\right)^2 \right]\right)^{1/2}\\ &\overset{}{=} \left( \sum_{i=0}^{k-1}\bbE\left[\left(\sup_{s \in [i,i+1]}\left\| \int_i^s \nabla_x \bar{f}(\bx_{\tau}^C) d\tau \right\|\right)^2 \right]\right)^{1/2}. \end{align*} So we want to upper bound the supremum inside the expectation operation. We can show for all $ s \in [0,k]$, \begin{align*} \left\| \int_{\lfloor s \rfloor}^s \nabla_x \bar{f}(\bx_{\tau}^C) d\tau \right\| &\overset{\textrm{triangle inequality}}{\le} \int_{\lfloor s \rfloor}^s \left\| \nabla_x \bar{f}(\bx_{\tau}) \right\| d\tau \\ &\le \int_{\lfloor s \rfloor}^{\lfloor s \rfloor+1} \left\| \nabla_x \bar{f}(\bx_{\tau}^C) \right\| d\tau \\ &\overset{\textrm{Jensen}}{\le} \left( \int_{\lfloor s \rfloor}^{\lfloor s \rfloor+1} \| \nabla_x \bar{f}(\bx_{\tau}^C) \|^2 d\tau \right)^{1/2}. \end{align*} Therefore, \begin{align*} \bbE\left[ \sup_{s \in [0,k]}\left\| \int_{\lfloor s \rfloor}^s \nabla_x \bar{f}(\bx_{\tau}^C) d\tau \right\|\right] &{\le} \left( \sum_{i=0}^{k-1}\bbE\left[\int_{i}^{i+1} \| \nabla_x \bar{f}(\bx_{\tau}^C) \|^2 d\tau \right]\right)^{1/2}\\ &\overset{\textrm{Fubini}}{=} \left( \sum_{i=0}^{k-1}\int_{i}^{i+1} \bbE\left[\| \nabla_x \bar{f}(\bx_{\tau}^C1) \|^2 \right]d\tau \right)^{1/2}. \end{align*} Here, we can see it suffices to bound $\bbE\left[ \| \nabla_x \bar{f}(\bx_{t}) \|^2 \right]$. We have assumed that $0 \in \cK$, and so we have \begin{align*} \left\| \nabla_x \bar{f}(\bx_{t}^C) \right\|^2 &= \left\| \nabla_x \bar{f}(\bx_{t}^C) - \nabla_x \bar{f}(0) + \nabla_x \bar{f}(0)\right\|^2 \\ &\le 2\left\| \nabla_x \bar{f}(\bx_{t}^C) - \nabla_x \bar{f}(0)\right\|^2 + 2 \left\| \nabla_x \bar{f}(0)\right\|^2 \\ &\le 2\ell^2\left\| \bx_{t}^C \right\|^2 + 2 \left\| \nabla_x \bar{f}(0) \right\|^2. \end{align*} Plugging in the bound from Lemma~\ref{lem:continuousBound} shows that \begin{align*} \bbE\left[ \| \nabla_x \bar{f}(\bx_{t}^C) \|^2 \right] &\le \bbE\left[ 2\ell^2 \| \bx_{t}^C\|^2 + 2 \| \nabla_x \bar{f}(0)\|^2\right]\\ &= 2 \ell^2 \bbE\left[ \| \bx_{t}^C\|^2 \right]+ 2 \| \nabla_x \bar{f}(0)\|^2\\ &\le 2\ell^2 \left( \varsigma +\frac{1}{\mu} c_{\ref{LyapunovConst}} \right) + 2 \| \nabla_x \bar{f}(0)\|^2. \end{align*} Therefore, we have \begin{align*} \bbE\left[ \sup_{s \in [0,k]}\left\| \int_{\lfloor s \rfloor}^s \nabla_x \bar{f}(\bx_{\tau}^C) d\tau \right\|\right] &{\le} \left( \sum_{i=0}^{k-1}\int_{i}^{i+1} \bbE\left[\| \nabla_x \bar{f}(\bx_{\tau}^C) \|^2 \right]d\tau \right)^{1/2}\\ &\le \sqrt{2\ell^2 \left(\varsigma+\frac{1}{\mu} c_{\ref{LyapunovConst}} \right) + 2 \| \nabla_x \bar{f}(0)\|^2 } \sqrt{k} \\ &\le \left(\sqrt{\frac{2}{\mu}\ell^2 c_{\ref{LyapunovConst}} + 2 \| \nabla_x \bar{f}(0)\|^2} + \sqrt{2 \ell^2} \sqrt{\varsigma}\right) \sqrt{k}. \end{align*} Setting \begin{align*} c_{\ref{BoundCtoD1}} &= \BoundCtoDOneVal\\ c_{\ref{BoundCtoD2}} &= \BoundCtoDTwoVal \\ c_{\ref{BoundCtoD3}} &= \BoundCtoDThreeVal \end{align*} where $c_{\ref{LyapunovConst}}$ is defined in Lemma~\ref{lem:continuousBound} and $c_{\ref{diamBound}}$ is defined in Theorem~\ref{thm:skorokhodConst} and combining the bound on the second supreme term gives the desired result. \hfill$\blacksquare$ \paragraph{Proof of Lemma~\ref{lem:M2D}} The argument of bounding $\bx_t^M$ and $\bx_t^D$ closely follows the proof of Lemma 10 in \citep{lamperski2021projected}. Recall that $\bx_t^M$ is a discretized process and $\bx_t^M = \bx_{\floor{t}}^M$. We also have $\bx_t^M = \cS(\cD(\by_t^M))$, where $\by_t^M$ is defined by \begin{align*} \by_t^M = \bx_0^M - \eta \int_0^t \nabla \bar{f} (\bx_{\floor{s}}^M) ds + \sqrt{\frac{2 \eta}{\beta}} \bw_t. \end{align*} The intermediate process $\bx_t^D$ satisfies $\bx_t^D = \cS(\cD(\by_t^C))$, where \begin{align*} \by_t^C = \bx_0^C - \eta \int_0^t \nabla \bar{f} (\bx_s^C) ds + \sqrt{\frac{2 \eta}{\beta}} \bw_t. \end{align*} So in particular, \begin{align*} \bx_{k+1}^M &= \Pi_{\cK} \left(\bx_k^M + \by_{k+1}^M - \by_k^M\right) \\ &= \Pi_{\cK} \left(\bx_k^M -\eta \nabla \bar{f} (\bx_{k}^M) + \sqrt{\frac{2 \eta}{\beta}} (\bw_{k+1}- \bw_{k}) \right)\\ \bx_{k+1}^D &= \Pi_{\cK} \left(\bx_k^D + \by_{k+1}^C - \by_k^C \right) \\ &= \Pi_{\cK} \left(\bx_k^D -\eta \int_{k}^{k+1} \nabla \bar{f} (\bx_{s}^C) ds + \sqrt{\frac{2 \eta}{\beta}} (\bw_{k+1}- \bw_{k})\right). \end{align*} Define a difference process $$\brho_t = \left( \bx_t^M + \by_t^M - \by_{\floor{t}}^M \right) - \left( \bx_t^D + \by_t^C - \by_{\floor{t}}^C \right).$$ Note that at integers $k \in \bbN$, $\brho_k =\bx_k^M - \bx_k^D$ and for $t \in [k, k+1)$, we have $$ \brho_t =\left(\bx_k^M-\by_k^M - \bx_k^D + \by_k^D\right) + \by_t^M - \by_t^C. $$ It follows that \begin{align*} d \brho_t = d(\by_t^M - \by_t^C) = \eta \left(\nabla \bar{f}(\bx_t^C) - \nabla \bar{f}(\bx_t^M)\right) \end{align*} By construction, $\brho_t$ is a continuous bounded variation process on the interval $[k, k+1)$. Thus, when $\brho_t \neq 0 $, we can calculate $d\|\brho_t\|$ using the chain rule. \begin{align*} d \left\|\brho_t \right\| &\overset{\textrm{chain rule}}{=} \left( \frac{\brho_t}{\left\| \brho_t \right\|} \right)^\top d \brho_t \\ &= \left( \frac{\brho_t}{\left\| \brho_t \right\|} \right)^\top \eta \left( \nabla \bar{f}(\bx_t^C) - \nabla \bar{f}(\bx_t^M)\right) dt\\ & \overset{\textrm{Cauchy-Schwarz}}{\le} \eta \left\| \nabla \bar{f}(\bx_t^C) - \nabla \bar{f}(\bx_t^M) \right\| dt\\ & \overset{\textrm{Lipschitz}}{\le} \eta \ell \left\| \bx_t^C - \bx_t^M \right\| dt \\ & = \eta \ell \left\| \bx_t^C - \bx_t^D + \bx_t^D - \bx_t^M \right\| dt \\ & \overset{\textrm{triangle}}{\le} \eta \ell \left( \left\| \bx_t^C - \bx_t^D \right\| + \left\| \bx_t^D - \bx_t^M \right\| \right) dt. \end{align*} To include the case that $\brho_t = 0$, we use the Lemma 19 from \citep{lamperski2021projected}. The analysis is as below: For $t \in [k,k+1)$, \begin{align*} \MoveEqLeft \left\| \brho_t \right\| = \left\| \brho_k \right\| + \int_{k}^{t} d \left\|\brho_t \right\|\\ & =\left\| \brho_k \right\| + \lim_{\epsilon \downarrow 0} \int_k^{t} \indic \left( \left\| \brho_s \right\| \ge \epsilon \right)d \left\|\brho_s \right\| \\ & \overset{}{\le} \left\| \brho_k \right\| + \lim_{\epsilon \downarrow 0} \int_k^{t} \indic \left( \left\| \brho_s \right\| \ge \epsilon \right) \eta \ell \left( \left\| \bx_s^C - \bx_s^D \right\| + \left\| \bx_s^D - \bx_s^M \right\| \right) dt \\ & = (1+ \eta \ell )\left\| \brho_k \right\| + \eta \ell\int_k^t \left( \left\| \bx_s^C - \bx_s^D \right\| \right) ds. \end{align*} The second equality follows from Lemma 19 from \cite{lamperski2021projected}. The last equality holds because that $ \brho_k = \bx_s^M - \bx_s^D $, $\forall s \in [k, k+1 )$. Non-expansiveness of the convex projection implies that \begin{align} \left\| \brho_k \right\| = \left\| \bx_s^M - \bx_s^D \right\| \le \lim_{t\uparrow k } \left\| \brho_t \right\|. \end{align} Letting $t=k+1$ gives \begin{align*} \left\| \brho_{k+1} \right\| \le (1+ \eta \ell )\left\| \brho_k \right\| + \eta \ell\int_k^{k+1} \left\| \bx_s^C - \bx_s^D \right\| ds. \end{align*} Iterating this inequality, and using the assumption that $\bx_0^M = \bx_0^D$ gives \begin{align*} \left\| \brho_k \right\| \le \sum_{i=0}^{k-1} \eta \ell (1+\eta \ell )^{k-i-1} \int_i^{i+1} \left\| \bx_s^C - \bx_s^D \right\| ds. \end{align*} Taking expectation, and using Lemma \ref{lem:C2D} gives \begin{align*} \bbE\left[ \left\| \brho_k \right\|\right] &\le \sum_{i=0}^{k-1} \eta \ell (1+\eta \ell )^{k-i-1} \int_i^{i+1} \left( (c_{\ref{BoundCtoD1}} + c_{\ref{BoundCtoD2}} \sqrt{\varsigma} ) \eta \sqrt{s} + c_{\ref{BoundCtoD3}} \sqrt{\eta \log(4s)} \right) ds \\ &\le \eta\ell \left( (c_{\ref{BoundCtoD1}} + c_{\ref{BoundCtoD2}} \sqrt{\varsigma} ) \eta \sqrt{k} + c_{\ref{BoundCtoD3}} \sqrt{\eta \log(4k)} \right) \sum_{i=0}^{k-1} (1+\eta \ell )^{k-i-1} \\ &\le \left( (c_{\ref{BoundCtoD1}} + c_{\ref{BoundCtoD2}} \sqrt{\varsigma} ) \eta \sqrt{k} + c_{\ref{BoundCtoD3}} \sqrt{\eta \log(4k)} \right) \left( (1+\eta \ell)^k -1 \right)\\ &\le \left( (c_{\ref{BoundCtoD1}} + c_{\ref{BoundCtoD2}} \sqrt{\varsigma} ) \eta \sqrt{k} + c_{\ref{BoundCtoD3}} \sqrt{\eta \log(4k)} \right) \left( e^{\eta \ell k} -1 \right). \end{align*} The last inequality is based on the fact that $(1+\eta \ell)^k \le e^{ \eta \ell k}$ for all $\eta \ell >0$. Recall that for all $k \in \bbN$, $\brho_k =\bx_k^M - \bx_k^D$, which gives the desired result. \hfill$\blacksquare$ \section{Conclusion of the proof of Lemma~\ref{lem:AtoC}} \label{app:switching} This subsection uses a ``switching'' trick to derive a bound on $W_1(\cL(\bx_k^A),\cL(\bx_k^C))$ that is uniform in time. The essential idea is to utilize a family of processes that switch from the dynamics of $\bx_k^A$ to the dynamics of $\bx_k^C$, and utilize contractivity of the law of $\bx_k^C$ to derive the uniform bounds. A similar methodology was utilized in \cite{chau2019stochastic}. For $s\ge 0$, let $\bx_{s,t}^{A,C}$ be the process such that $\bx_{s,t}^{A,C}=\bx_t^A=\bx_{\floor*{t}}^A$ for $t\le s$ and for $t\ge s$, $\bx_{s,t}^{A,C}$ follows: $$ d\bx^{A,C}_{s,t} = -\eta \nabla_x \bar{f} (\bx^{A,C}_{s,t}) dt + \sqrt{\frac{2\eta}{\beta}} d\bw_t - \bv_{s,t}^{A,C} d\bmu_s^{A,C}(t). $$ In other words, $\bx_{s,t}^{A,C}$ follows the algorithm for $t\le s$, and then switches to the dynamics of the continuous-time approximation from (\ref{eq:AvecontinuousProjectedLangevin}) at $t= s$. Now let $0\le s\le \hat s \le t$ where $s, \hat{s} \in \bbN$, then Corollary~\ref{cor:contractionW1} from Appendix~\ref{sec:contraction} shows that \begin{equation} \label{eq:switchContraction} W_{1}(\cL(\bx_{s,t}^{A,C}),\cL(\bx_{\hat s,t}^{A,C}))\le 2 \varphi(R)^{-1} e^{-\tilde a (t-\hat s)} W_{1}(\cL(\bx_{s,\hat s}^{A,C}),\cL(\bx_{\hat s,\hat s}^{A,C})). \end{equation} By starting the analysis of the processes $\bx^A$ and $\bx^C$ at time $s$, rather than time $0$, Lemma~\ref{lem:AtoCdependent} implies the following bound: \begin{align} W_{1}(\cL(\bx_{s,\hat s}^{A,C}),\cL(\bx_{\hat s,\hat s}^{A,C})) &= W_1(\cL(\bx_{s,\hat s}^{A,C}),\cL(\bx_{\hat{s}}^{A})) \nonumber\\ &\le \left( \left(c_{\ref{AtoC1}}+c_{\ref{BoundCtoD2}}\sqrt{\bbE[\|\bx_s^{A}\|^2]}\right) \eta \sqrt{\hat{s}-s}+ c_{\ref{BoundCtoD3}} \sqrt{\eta \log(4(\hat{s}-s))} \right)e^{\eta \ell (\hat{s}-s)} \nonumber\\ \label{eq:basicSwitch} &\le \left( \left(c_{\ref{AtoC1}}+c_{\ref{BoundCtoD2}}\sqrt{\varsigma + c_{\ref{AlgBound}}}\right) \eta \sqrt{\hat{s}-s}+ c_{\ref{BoundCtoD3}} \sqrt{\eta \log(4(\hat{s}-s))} \right)e^{\eta \ell (\hat{s}-s)}. \end{align} The second inequality is based on Lemma~\ref{lem:algBound}. Let $H=\floor*{1/\eta}$ and $t \in [\hat{k}H, (\hat{k}+1)H)$ where $\hat{k} \in \bbN$, we have $\bx_{0,t}^{A,C} = \bx_{t}^{C}$ and $\bx_{(\hat{k}+1)H,t}^{A,C} = \bx_{t}^{A} = \bx_{\floor*{t}}^{A}$. Then, the triangle inequality implies that \begin{align*} W_1(\cL(\bx_{t}^A),\cL(\bx_{t}^C)) \le \sum_{i=0}^{\hat{k}} W_{1}(\cL(\bx_{iH,t}^{A,C}),\cL(\bx_{(i+1)H,t}^{A,C})). \end{align*} For $i<\hat{k}$, setting $s=iH$, $\hat s = (i+1)H$ in \eqref{eq:switchContraction} gives that \begin{align*} W_{1}(\cL(\bx_{iH,t}^{A,C}),\cL(\bx_{(i+1)H,t}^{A,C})) &\le 2\varphi(R)^{-1} e^{-\tilde a \left(t-(i+1)H\right)} W_{1}(\cL(\bx_{iH,(i+1)H}^{A,C}),\cL(\bx_{(i+1)H,(i+1)H}^{A,C}))\\ &\le 2\varphi(R)^{-1} e^{-\eta a \left(t-(i+1)H\right)}g(H) \\ &\le 2\varphi(R)^{-1} e^{- a \left(\hat k -i-1)/2\right)} g(\eta^{-1}) \end{align*} where \begin{align} g(r) = \left( \left(c_{\ref{AtoC1}}+c_{\ref{BoundCtoD2}}\sqrt{\varsigma + c_{\ref{AlgBound}}}\right) \eta \sqrt{r}+ c_{\ref{BoundCtoD3}} \sqrt{\eta \log(4r)} \right)e^{\eta \ell r}. \end{align} The last inequality uses the facts that $1/2 \le \eta H \le 1 $ along with monotonicity of $g$. The lower bound of $\eta H$ arises because $H \ge \eta^{-1} -1 $ and so $\eta H \ge 1- \eta \ge 1/2$, since $\eta \le 1/2$. Thus, the first $\hat k$ terms are bounded by: \begin{align*} \nonumber \sum_{i=0}^{\hat k-1} W_{1}(\cL(\bx_{iH,t}^{A,C}),\cL(\bx_{(i+1)H,t}^{A,C})) &\le \sum_{i=0}^{\hat k-1} 2\varphi(R)^{-1} e^{- a \left(\hat k-i-1)/2\right)} g(\eta^{-1}) \\ &\le 2\varphi(R)^{-1} \frac{g(\eta^{-1})}{1-e^{-{a}/2}} \end{align*} For $ i =\hat{k}$, \begin{align*} W_{1}(\cL(\bx_{iH,t}^{A,C}),\cL(\bx_{(i+1)H,t}^{A,C})) &= W_1(\cL(\bx_{\hat k H,t}^{A,C}),\cL(\bx_{t}^{A}))\\ &\le g(t-\hat k H) \le g(\eta^{-1}) \end{align*} By triangle inequality, adding all the $\hat k+1$ terms gives \begin{align} \nonumber \MoveEqLeft[0] W_1(\cL(\bx_t^A),\cL(\bx_t^C))\\ \nonumber &\le g(\eta^{-1}) \left( 1+ \frac{2\varphi(R)^{-1}}{1-e^{-{a}/2}}\right)\\ &\le \left( \left(c_{\ref{AtoC1}}+c_{\ref{BoundCtoD2}}\sqrt{\varsigma + c_{\ref{AlgBound}}}\right) \eta \sqrt{\eta^{-1}}+ c_{\ref{BoundCtoD3}} \sqrt{\eta \log(4\eta^{-1})} \right)e^{\ell} \left( 1+ \frac{2\varphi(R)^{-1}}{1-e^{-{a}/2}}\right) . \label{eq:AtoCGen} \end{align} For $\eta^{-1} \ge 4$, we have $\log(4\eta^{-1}) \le 2 \log(\eta^{-1})$, and also $\log \eta^{-1}>1$. Thus, if $\eta \le 1/4$, then \eqref{eq:AtoCGen} can be further upper bounded by \begin{align*} \nonumber \MoveEqLeft[0] W_1(\cL(\bx_t^A),\cL(\bx_t^C))\\ &\le \left( \left(c_{\ref{AtoC1}}+c_{\ref{BoundCtoD2}}\sqrt{\varsigma + c_{\ref{AlgBound}}}\right) \eta \sqrt{\eta^{-1} \log(\eta^{-1})}+ c_{\ref{BoundCtoD3}} \sqrt{2\eta \log( \eta^{-1})} \right)e^{\ell} \left( 1+ \frac{2\varphi(R)^{-1}}{1-e^{-{a}/2}}\right)\\ &=\left(c_{\ref{AtoC1}}+c_{\ref{BoundCtoD2}}\sqrt{\varsigma + c_{\ref{AlgBound}}}+ \sqrt{2}c_{\ref{BoundCtoD3}} \right) e^{\ell}\left( 1+ \frac{2\varphi(R)^{-1}}{1-e^{-{a}/2}}\right)\sqrt{\eta \log( \eta^{-1})} \\ &\le \left(c_{\ref{AtoC1}}+c_{\ref{BoundCtoD2}}\sqrt{ c_{\ref{AlgBound}}}+ \sqrt{2}c_{\ref{BoundCtoD3}} + c_{\ref{BoundCtoD2}} \sqrt{\varsigma}\right) e^{\ell}\left( 1+ \frac{2\varphi(R)^{-1}}{1-e^{-{a}/2}}\right)\sqrt{\eta \log( \eta^{-1})}. \end{align*} So setting \begin{align*} c_{\ref{error_polyhedron1}} &= \left(c_{\ref{AtoC1}}+c_{\ref{BoundCtoD2}}\sqrt{c_{\ref{AlgBound}}}+ \sqrt{2} c_{\ref{BoundCtoD3}} \right) e^{\ell}\left( 1+ \frac{2\varphi(R)^{-1}}{1-e^{-{a}/2}}\right) \\ c_{\ref{error_polyhedron2}} &= c_{\ref{BoundCtoD2}} e^{\ell}\left( 1+ \frac{2\varphi(R)^{-1}}{1-e^{-{a}/2}}\right) \end{align*} completes the proof. \hfill$\blacksquare$ \section{Bound the constants} In this section, we bound the main constants $c_{\ref{contraction_const1}}, c_{\ref{contraction_const2}}, c_{\ref{error_polyhedron1}}, c_{\ref{error_polyhedron2}} $ shown in Theorem 1 so that we can have the simplified version of Theorem 1, which is loose but is easier for the application of optimization. \begin{proposition} \end{proposition} \begin{proof} Trace back to the contraction proof in Here we show the constants explicitly so that we can see the dimension and parameter dependencies directly. Combining the bound shown in line 674 and line 676 gives \begin{align*} \xi &\ge (R_1 - R_0) \Phi(R_1) \varphi(R_0)^{-1}/2\\ &\ge (R_1 - R_0) R_1e^{-h(R_0)} \varphi(R_0)^{-1}/2\\ & = (R_1 - R_0) R_1/2 \end{align*} Here we choose $R_0 = R$. and $R_1 = \frac{R}{2} +\frac{1}{2} \sqrt{R^2 + \frac{32\eta}{\mu \beta}e^{\frac{\beta \ell R^2}{8}}}$ Then, \begin{align*} a =\frac{2\xi}{\beta} \ge \frac{(R_1 - R) R_1}{\beta} = \frac{32\eta}{\mu \beta^2}e^{\frac{\beta \ell R^2}{8}} \end{align*} and \begin{align*} (2a)^{-1/2} &\le (\frac{64\eta}{\mu \beta^2}e^{\frac{\beta \ell R^2}{8}})^{-1/2}\\ &\le \frac{1}{8}\sqrt{\frac{\mu}{\eta}} \beta e^{-\frac{\beta \ell R^2}{16}}\\ &\le \sqrt{\frac{\mu}{\eta}} \frac{2}{\ell R^2} e^{-1} \end{align*} Then, we have the following inequality: \begin{align*} \frac{1}{1-e^{-a/2}} &\le \max\left\{ \frac{4}{a}, \frac{1}{1-e^{-1}}\right\}\\ &\le \max\left\{ \frac{\mu \beta^2 }{8 \eta} e^{-\frac{\beta \ell R^2}{8}}, \frac{1}{1-e^{-1}}\right\} \end{align*} which uses the fact that for all $y>0$, $\frac{1}{1-e^{-y}} \le \max\left\{ \frac{2}{y}, \frac{1}{1-e^{-1}}\right\}$ and see the proof detail in \citep{lamperski2021projected}. So \begin{align*} 1+ \frac{2 \varphi(R)^{-1}}{1-e^{-a/2}} \le 1+ 2 e^{\frac{\beta \ell R^2}{8}}\max\left\{ \frac{\mu \beta^2 }{8 \eta} e^{-\frac{\beta \ell R^2}{8}}, \frac{1}{1-e^{-1}}\right\} \end{align*} Without loss of generality, we assume that $\frac{a}{2} \ge 1$, which leads to $\frac{1}{1-e^{-a/2}} \le \frac{1}{1-e^{-1}} $ Then, we have $$c_{\ref{contraction_const1}} = 2 e^{\beta \ell R^2/8}\sqrt{\frac{2}{\mu}\left( (\ell +\mu)R^2 + R \|\nabla_x \bar{f}(0)\| + \frac{n}{\beta}\right)}$$ $$ c_{\ref{contraction_const2}} = 4 e^{\beta \ell R^2/8} $$ \begin{align*} c_{\ref{error_polyhedron1}} &= \left(c_{\ref{AtoC1}}+c_{\ref{BoundCtoD2}}\sqrt{c_{\ref{AlgBound}}}+ \sqrt{2} c_{\ref{BoundCtoD3}} \right) e^{\ell}\left( 1+ \frac{2\varphi(R)^{-1}}{1-e^{-\frac{16\eta}{\mu \beta^2}e^{\frac{\beta \ell R^2}{8}}}}\right) \\ & \le \left(c_{\ref{AtoC1}}+c_{\ref{BoundCtoD2}}\sqrt{c_{\ref{AlgBound}}}+ \sqrt{2} c_{\ref{BoundCtoD3}} \right) e^{\ell}\left(1+ \frac{2e^{\frac{\beta \ell R^2}{8}}}{1-e^{-1}}\right) \end{align*} \begin{align*} c_{\ref{error_polyhedron2}} &= \left( 6(\frac{1}{\alpha})^{\textrm{rank}(A)/2}+1\right) \sqrt{2\ell^2}e^{\ell}\left( 1+ \frac{2 e^{\beta \ell R^2/8}}{1-e^{-\frac{16\eta}{\mu \beta^2}e^{\frac{\beta \ell R^2}{8}}}}\right)\\ & \le \left( 6(\frac{1}{\alpha})^{\textrm{rank}(A)/2}+1\right) \sqrt{2\ell^2}e^{\ell} \left(1+ \frac{2e^{\frac{\beta \ell R^2}{8}}}{1-e^{-1}}\right) \end{align*} This is the bound we want to simplify: \begin{align*} W_1(\cL(\bx_T), \pi_{\beta \bar{f}}) &\le \left( c_{\ref{contraction_const1}} + c_{\ref{contraction_const2}} \sqrt{\varsigma} + \frac{c_{\ref{error_polyhedron1}} + c_{\ref{error_polyhedron2}}\sqrt{\varsigma}}{(2a)^{1/2}} \right) T^{-1/2} \log T\\ &\le \left( c_{\ref{contraction_const1}} + c_{\ref{contraction_const2}} \sqrt{\varsigma} + {c_{\ref{error_polyhedron1}} + c_{\ref{error_polyhedron2}}\sqrt{\varsigma}}\sqrt{\frac{\mu}{\eta}} \frac{2}{\ell R^2} e^{-1} \right) T^{-1/2} \log T . \end{align*} Observing the main constants in the bound gives \begin{align*} W_1(\cL(\bx_T), \pi_{\beta \bar{f}}) \le p(\beta^{-1/2})e^{\beta \ell R^2/8} T^{-1/2} \log T . \end{align*} where $p(\beta^{1/2})$ is a polynomial function. We assume $\beta >1$, then \begin{align*} W_1(\cL(\bx_T), \pi_{\beta \bar{f}}) \le p(1)e^{\beta \ell R^2/8} T^{-1/2} \log T . \end{align*} We want to get the sufficient condition for $$uW_1(\cL(\bx_T), \pi_{\beta \bar{f}}) \le \frac{\epsilon}{2}.$$ For all $\delta \in (0, 1) $, $T^{-1/2} \log T \le \sqrt{\frac{T^{-1 +\delta}}{e \delta}} $. So it suffices to have \begin{equation} T^{-1+\delta} \le e\delta (\epsilon /2)^2 (u p(1) e^{\frac{\beta \ell R^2}{8}})^{-2} := \hat \epsilon \end{equation} so \begin{align*} T &\ge \hat \epsilon^{-\frac{1}{1-\delta}}\\ &= \left(\frac{4 e^{-1} u^2 p(1)^2}{\delta \epsilon} \right)^{\frac{1}{1-\delta}} e^{\frac{\beta \ell R^2}{16(1-\delta)}} \end{align*} Let $\rho = \frac{2}{1-\delta} > 2$ since $\delta <1$. Then we have \begin{align*} T &\ge \hat \epsilon^{-\frac{1}{1-\delta}}\\ &= \left(\frac{\rho 4 e^{-1} u^2 p(1)^2}{\rho-2 }\right)^{\rho/2} \frac{1}{\epsilon^\rho} e^{\frac{\rho\beta \ell R^2}{32}} \end{align*} \end{proof} \section{Bounding the constants} \label{app:constants} In this section, we summarize all the constants in Table \ref{tb:constants}. The second column of the table points to the place where these values are defined or computed. Then we show the simplified bounds of the main constants $c_{\ref{contraction_const1}}, c_{\ref{contraction_const2}}, c_{\ref{error_polyhedron1}}, c_{\ref{error_polyhedron2}},a $ in Theorem~{\ref{thm:nonconvexLangevin}} explicitly and also discuss their dependencies on state dimension $n$ and parameter $\beta$. \begin{table}[ht] \caption{List of constants} \label{tb:constants} \centering \begin{tabular}{l l} \toprule Constant & Definition \\ \midrule\\ $a = \frac{2\xi}{\beta}$ \\ $c_{\ref{contraction_const1}} = \contractionConstOneVal $ \\ $c_{\ref{contraction_const2}} = \contractionConstTwoVal $ & \multirow{1}*{Appendix \ref{ss:proofLemmaconvergeToStationary} (Proof of Lemma~\ref{lem:convergeToStationary}}) \\ \midrule\\ $c_{\ref{error_polyhedron1}} = \errorPolyhedronOneVal$ \\ $c_{\ref{error_polyhedron2}} = \errorPolyhedronTwoVal $ & \multirow{1}*{Appendix \ref{app:switching} (Proof of Lemma~\ref{lem:AtoC})} \\ \midrule\\ $ c_{\ref{BoundCtoD1}} = \BoundCtoDOneVal $ \\ $c_{\ref{BoundCtoD2}} = \BoundCtoDTwoVal $\\ $c_{\ref{BoundCtoD3}} = \BoundCtoDThreeVal $ & \multirow{2}*{Appendix \ref{app:disBounds} (Proof of Lemma~\ref{lem:C2D}) } \\ \midrule\\ $ c_{\ref{AtoC1}} = \AtoCOneVal $ & Section \ref{ss:proofOverviewofLemmaAtoC} (Proof of Lemma \ref{lem:AtoCdependent}) \\ \midrule\\ $ c_{\ref{diamBound}} = \diamBoundVal $ & Appendix \ref{app:diamProof} (Proof of Lemma~\ref{lem:boundingExistence})\\ \midrule\\ $ c_{\ref{LyapunovConst}} = \LyapunovConstVal $ & Appendix \ref{app:contBounds} (Proof of Lemma \ref{lem:GeometricDrift})\\ \midrule\\ $ c_{\ref{AlgBound}} = \AlgBoundVal $ & Appendix \ref{app:dis-timeBounds} (Proof of Lemma \ref{lem:algBound}) \\ \bottomrule \end{tabular} \end{table} \begin{proposition} \label{prop:mainConstantsBound} The constants $c_{\ref{contraction_const2}}$ and $c_{\ref{error_polyhedron2}}$ grow linearly with $n$. The constants $c_{\ref{contraction_const1}}$ and $c_{\ref{error_polyhedron1}}$ have $O(\sqrt{n})$ and $O(n)$ dependencies respectively. So overall, the dimension dependency of convergence guarantee is $O(n)$. Constants $c_{\ref{contraction_const1}}, c_{\ref{contraction_const2}},c_{\ref{error_polyhedron1}}, c_{\ref{error_polyhedron2}}$ all grow exponentially with respect to $\frac{\beta \ell R^2}{2}$. And for all $\beta>0$, $a \ge \frac{2}{\frac{\beta R^2}{2}+\frac{16}{\mu}} e^{-\frac{\beta \ell R^2}{4}}$. \end{proposition} \paragraph{Proof of Proposition~\ref{prop:mainConstantsBound}} Recall that $a = 2\xi / \beta$, and from (\ref{eq:xi}) we have that from $$ \xi^{-1} = \int_0^{R_1} \Phi(s)\varphi(s)^{-1} ds. $$ So, to get a lower bound on $\xi$, we need an upper bound on the right side. Recalling the definitions of the various functions for our scenario gives: \begin{align*} h(s) &= \frac{\ell\beta \min\{s^2,R^2\}}{8} \\ \varphi(s) &= e^{-h(s)} \\ \Phi(s)&= \int_0^s \varphi(r)dr. \end{align*} It follows that $\Phi(s)\le s$ and $\varphi(s)^{-1}=e^{h(s)}\le e^{\frac{\ell \beta R^2}{8}}$. Thus, we have that $$ \xi^{-1}\le \frac{1}{2}R_1^2 e^{\frac{\ell\beta R^2}{8}}. $$ Now, note that in Corollary~\ref{cor:contraction} that we have set $$ R_1 = \frac{R}{2} +\frac{1}{2} \sqrt{R^2 + \frac{32}{\mu \beta}e^{\frac{\beta \ell R^2}{8}}}. $$ So, a bit of crude upper bounding gives: \begin{align*} \xi^{-1}&\le \frac{1}{2}R_1^2 e^{\frac{\ell\beta R^2}{8}} \\ &\le \frac{1}{2}\left( R^2 + \frac{32}{\mu \beta}e^{\frac{\beta \ell R^2}{8}}\right) e^{\frac{\beta \ell R^2}{8}} \\ &\le \left(\frac{R^2}{2}+\frac{16}{\mu\beta} \right) e^{\frac{\beta \ell R^2}{4}} \end{align*} The final bound on $a$ becomes: \begin{align*} a &= 2\xi/\beta \ge \frac{2}{\frac{\beta R^2}{2}+\frac{16}{\mu}} e^{-\frac{\beta \ell R^2}{4}} \end{align*} The rest of focuses on bounding the other constants as $\beta$ grows large. For all sufficiently large $\beta$, we have that $$ \frac{\frac{\beta R^2}{2}+\frac{16}{\mu}}{2}\le e^{\frac{\beta \ell R^2}{4}} $$ so that \begin{equation} \label{eq:crudeA} a\ge e^{-\frac{\beta \ell R^2}{2}} . \end{equation} We have the following inequality for all sufficiently large $\beta$: \begin{align*} \frac{1}{1-e^{-a/2}} &\le \max\left\{ \frac{4}{a}, \frac{1}{1-e^{-1}}\right\}\\ &\le \max\left\{ 4 e^{\frac{\beta \ell R^2}{2} }, \frac{1}{1-e^{-1}}\right\} \\ &= 4 e^{\frac{\beta \ell R^2}{2}}. \end{align*} The first inequality uses the fact that for all $y>0$, $\frac{1}{1-e^{-y}} \le \max\left\{ \frac{2}{y}, \frac{1}{1-e^{-1}}\right\}$, which is shown in \citep{lamperski2021projected}. So \begin{align} \label{eq:mainBoundExponential} 1+ \frac{2 \varphi(R)^{-1}}{1-e^{-a/2}} \le 1+ 4 e^{\frac{\beta \ell R^2}{2}}. \end{align} Now we bound the growth of the other constants for large $\beta$. So, without loss of generality, assume $\beta \ge 1$. Then, plugging the definition of $\xi$ and $\varphi$ and (\ref{eq:mainBoundExponential}) gives \begin{align*} c_{\ref{contraction_const1}} &= 2 e^{\frac{\beta \ell R^2}{8}}\sqrt{\frac{2}{\mu}\left( (\ell +\mu)R^2 + R \|\nabla_x \bar{f}(0)\| + \frac{n}{\beta}\right)} \\ &\le 2 e^{\frac{\beta \ell R^2}{8}}\sqrt{\frac{2}{\mu}\left( (\ell +\mu)R^2 + R \|\nabla_x \bar{f}(0)\| + n\right)}\\ c_{\ref{contraction_const2}} &= 4 e^{\frac{\beta \ell R^2}{8}}\\ c_{\ref{error_polyhedron1}} &= \left(c_{\ref{AtoC1}}+c_{\ref{BoundCtoD2}}\sqrt{c_{\ref{AlgBound}}}+ \sqrt{2} c_{\ref{BoundCtoD3}} \right) e^{\ell}\left( 1+ \frac{2 \varphi(R)^{-1}}{1-e^{-a/2}}\right) \\ & \le \left(c_{\ref{AtoC1}}+c_{\ref{BoundCtoD2}}\sqrt{c_{\ref{AlgBound}}}+ \sqrt{2} c_{\ref{BoundCtoD3}} \right) e^{\ell}\left(1+ 4e^{\frac{\beta\ell R^2}{2}}\right) \\ &\le r(\sqrt{n}) e^{\ell}\left(1+4e^{\frac{\beta \ell R^2}{2}}\right)\\ c_{\ref{error_polyhedron2}} &= \left( 6(\frac{1}{\alpha})^{\textrm{rank}(A)/2}+1\right) \sqrt{2\ell^2}e^{\ell}\left( 1+ \frac{2 \varphi(R)^{-1}}{1-e^{-a/2}}\right)\\ & \le \left( 6(\frac{1}{\alpha})^{\textrm{rank}(A)/2}+1\right) \sqrt{2\ell^2}e^{\ell} \left(1+ 4e^{\frac{\beta \ell R^2}{2}}1\right) . \end{align*} For constant $c_{\ref{error_polyhedron1}}$, $r(\sqrt{n})$ is a monotonically increasing function of order $\sqrt{n}$, (independent of $\eta$ and $\beta$). The upper bound of $c_{\ref{error_polyhedron1}}$ is derived by direct observation of the corresponding constants. We can see neither $c_{\ref{contraction_const2}}$ nor $c_{\ref{error_polyhedron2}}$ depends on the state dimension, so the two constants grow linearly with $n$. The constant $c_{\ref{contraction_const1}}$ are $O(\sqrt{n})$ and $c_{\ref{error_polyhedron1}}$ are $O(n)$. As for the dependencies on $\beta$, we can see that all four constants are $O(e^{\frac{\beta \ell R^2}{2}})$. \hfill $\blacksquare$ \section{Near-optimality of Gibbs distributions} In this appendix, we prove Proposition~\ref{prop: app_optimization} which shows that $\bx_k$ can be near-optimal. The proof closely follows \cite{lamperski2021projected} and \cite{raginsky2017non}. The main difference is that in our case we have to deal with the unbounded polyhedral constraint, while in \cite{raginsky2017non} there is no constraint and in \cite{lamperski2021projected} the constraint is compact. Firstly, we need a preliminary result shown as below. \const{const_subopt} \begin{lemma} \label{lem:gibbsSuboptimality} Assume $\bx$ is drawn according to $\pi_{\beta \bar f}$. There exists a positive constant $c_{\ref{const_subopt}}$ such that the following bounds hold: \begin{align*} \bbE[\bar f(\bx)] \le \min_{x \in \cK} \bar f(x) +\frac{n}{ \beta} \left( 2\max\{0,\log\varsigma\} +c_{\ref{const_subopt}}\right) \end{align*} where $c_{\ref{const_subopt}} =\log n + 2 \log( 1 +\frac{1}{\mu} c_{\ref{LyapunovConst}} ) + \frac{1}{6}\log 3 + \log 2 \sqrt{\pi} - \log r_{\min}$ and $r_{\min}$ is a positive constant. \end{lemma} \paragraph{Proof of Lemma~{\ref{lem:gibbsSuboptimality}}} Recall that the probability measure $\pi_{\beta \bar f }(A)$ is defined by $\pi_{\beta \bar f (A)}(A) = \frac{\int_{A \cap \cK} e^{-\beta \bar f(x)}dx}{\int_{\cK} e^{- \beta \bar f(y)}dy}$. Let $\Lambda = \int_{\cK} e^{-\beta \bar f(y)} dy $ and $p(x) = \frac{e^{-\beta \bar f(x)}}{\Lambda}$. So $\log p(x) = -\beta \bar f(x) - \log \Lambda$, which implies that $\bar f(x) = -\frac{1}{\beta} \log p(x) -\frac{1}{\beta} \log \Lambda$. Then we have \begin{align} \nonumber \bbE_{\pi_{\beta \bar f}} [\bar f(\bx)] &= \int_{\cK} \bar f(x) p(x) dx \\ \label{eq:sub_optimal} &= -\frac{1}{\beta} \int_{\cK} p(x) \log p(x) dx -\frac{1}{\beta} \log \Lambda. \end{align} We can bound the first term by maximizing the differential entropy. Let $h(x) = - \int_{\cK} p(x) \log p(x) dx$. Using the fact that the differential entropy of a distribution with finite moments is upper-bounded by that of a Gaussian density with the same second moment (see Theorem 8.6.5 in \cite{cover2012elements}), we have \begin{align} \label{eq:diff_entropy_bound} h(x) \le \frac{n}{2}\log(2 \pi e \sigma^2) \le \frac{n}{2}\log(2 \pi e ( \varsigma +\frac{1}{\mu} c_{\ref{LyapunovConst}})), \end{align} where $\sigma^2 = \bbE_{\pi_{\beta\bar f}}[\|\bx\|^2]$ and the second inequality uses Lemma \ref{lem:continuousBound}. We aim to derive the upper bound of the second term of (\ref{eq:sub_optimal}). First we show that there is a vector $x^\star \in \cK$ which minimizes $\bar f$ over $\cK$. In other words, an optimal solution exists. The bound (\ref{eq:quadraticLower}) from the proof of Lemma~\ref{lem:gibbs} implies that $\bar f(x)\ge \bar f(0)+1$ for all sufficiently large $x$. This implies that there is a compact ball, $B$ such that if $x_n\in \cK$ is a sequence such that $\lim_{n\to \infty}\bar f(x_n)=\inf_{x\in\cK} \bar f(x)$, then $x_n$ must be in $B\cap \cK$ for all sufficiently large $n$. Then since $\bar f$ is continuous and $B\cap \cK$ is compact, there must be a limit point $x^\star \in B\cap \cK$ which minimizes $\bar f$. Let $x^* \in \cK$ be a minimizer. The normalizing constant can be expressed as: \begin{align*} \label{eq:logNormalizationConstant} \log \Lambda &= \log \int_{\cK} e^{-\beta \bar f(x)} dx\\ &= \log e^{-\beta \bar f(x^*)} \int_{\cK} e^{\beta \left( \bar f(x^*) - \bar f(x)\right) } dx\\ &= -\beta \bar f(x^*) + \log \int_{\cK} e^{\beta \left( \bar f(x^*) - \bar f(x)\right) } dx \end{align*} So, to derive our desired upper bound on $-\log\Lambda$, it suffices to derive a lower bound on \begin{equation} \label{eq:suboptIntegral} \int_{\cK} e^{\beta \left( \bar f(x^*) - \bar f(x)\right) } dx. \end{equation} We have \begin{align*} \bar f(x) - \bar f(x^*) = \int_0^1 \nabla \bar f(x^* + t(x-x^*))^\top (x-x^*)dt. \end{align*} Let $y = x^* + t(x-x^*)$, $t \in [0,1]$, then \begin{align*} \|\nabla \bar f(y)\| &= \| \nabla \bar f(y) - \nabla \bar f(x^*) + \nabla \bar f(x^*) - \nabla \bar f(0) + \nabla \bar f(0)\| \\ &\le \ell \| y- x^*\| + \ell \| x^*\| + \| \nabla \bar f(0)\| \\ & \le \ell \|x-x^*\| t + \ell \| x^*\| + \| \nabla \bar f(0)\|. \end{align*} We can show $\|x^*\|$ is upper bounded by $\max \{R, \frac{\| \nabla \bar f(0)\|}{\mu}\}$. We have to find the bound for the case $\|x^*\| > R$. The convexity outside a ball assumption gives \begin{equation} \label{eq:conv_opt} \left( \nabla \bar f(x^*) - \nabla \bar f(0) \right)^\top x^* \ge \mu \|x^*\|^2. \end{equation} The optimality of $x^*$ gives $-\nabla \bar f(x^*) \in N_{\cK}(x^*)$, which is to say for all $y \in \cK$, $-\nabla \bar f(x^*)^\top (y- x^*) \le 0$. Since $0 \in \cK$, $\nabla \bar f(x^*)^\top x^* \le 0 $ holds. Applying the Cauchy-Schwartz inequality to the left side of \eqref{eq:conv_opt} gives \begin{align*} \| \nabla \bar f(0) \| \|x^*\| \ge \mu \|x^*\|^2. \end{align*} \const{xoptBound} This implies that $\|x^*\| \le \frac{\|\nabla \bar f(0)\|}{\mu} $. So we can conclude that $\|x^*\| \le \max \{R, \frac{\|\nabla \bar f(0)\|}{\mu} \} = c_{\ref{xoptBound}}$. Therefore, \begin{align*} \bar f(x) - \bar f(x^*) &\le \int_0^1 \|\nabla \bar f(x^*+t(x-x^*))\| \|x -x^*\| dt \\ &\le \frac{\ell}{2} \|x- x^*\|^2 + \left( \ell \| x^*\| + \| \nabla \bar f(0)\|\right) \|x-x^*\| \\ &\le \frac{\ell}{2} \|x- x^*\|^2 + \left( \ell c_{\ref{xoptBound}} + \| \nabla \bar f(0)\|\right) \|x-x^*\|. \end{align*} To lower-bound the integral from (\ref{eq:suboptIntegral}), we restrict our attention to the points $x$ such that the integrand is at least $1/2$. For these values, we have the following implications: \begin{align*} &e^{\beta \left( \bar f(x^*) - \bar f(x)\right)} \ge 1/2 \\ \iff &\beta \left( \bar f(x^*) - \bar f(x)\right) \ge -\log 2 \\ \impliedby & -\frac{\ell}{2} \|x- x^*\|^2 - \left( \ell c_{\ref{xoptBound}} + \| \nabla \bar f(0)\|\right) \|x-x^*\| \ge -\frac{1}{\beta} \log 2. \end{align*} So solving the corresponding quadratic equation and taking the positive root gives an upper bound of $\|x- x^*\|$: \begin{align*} \|x - x^*\| \le - \frac{1}{\ell}\left( \ell c_{\ref{xoptBound}} + \| \nabla \bar f(0)\|\right) + \frac{1}{\ell}\sqrt{\left( \ell c_{\ref{xoptBound}} + \| \nabla \bar f(0)\|\right)^2 + 2 \ell \frac{1}{\beta} \log 2}. \end{align*} So let $\epsilon = -\frac{1}{\ell}\left( \ell c_{\ref{xoptBound}} + \| \nabla \bar f(0)\|\right) + \frac{1}{\ell}\sqrt{\left( \ell c_{\ref{xoptBound}} + \| \nabla \bar f(0)\|\right)^2 + 2 \ell \frac{1}{\beta} \log 2}$ and let $\cB_{x^*}(\epsilon)$ be the ball of radius $\epsilon$ centered at $x^*$. Then we want to find a ball $\cS$ such that \begin{align*} \int_{\cK} e^{\beta \left( \bar f(x^*) - \bar f(x)\right)} dx \ge \frac{1}{2} \textrm{vol} (\cK \cap \cB_{x^*}(\epsilon)) \ge \frac{1}{2} \textrm{vol}(\cS). \end{align*} To find the desired ball $\cS$, we consider the problem of finding the largest ball inscribed within $\cK\cap \cB_{x^\star}(\epsilon)$. This is a Chebyshev centering problem, and can be formulated as the following convex optimization problem. \begin{subequations} \label{eq:chebyOptimization} \begin{align} &\max_{r,y} && r \\ \label{eq:ballInPolyhedron} &\textrm{subject to} && Ay \le b - r \boldsymbol{1} \\ \label{eq:ballInBall} &&&\|x^* -y\| + r \le \epsilon \end{align} \end{subequations} where $r$ and $y$ denotes the radius and the center of the Chebyshev ball respectively. The particular form arises because the rows of $A$ are unit vectors, and so the ball of radius $r$ around $y$ is inscribed in $\cK$ if and only if (\ref{eq:ballInPolyhedron}) holds, while this ball is contained in $\cB_{x^\star}(\epsilon)$ if and only if (\ref{eq:ballInBall}) holds. We rewrite this optimization problem as: \begin{subequations} \label{eq:chebyOptimization2} \begin{align} &\min_{r,y }&& -r + I_S\left(x^*, [\begin{smallmatrix}r\\y \end{smallmatrix}]\right)\\ &\textrm{subject to} && Ay \le b - r \boldsymbol{1} \end{align} \end{subequations} where $S = \{(x^*, [\begin{smallmatrix}r\\y \end{smallmatrix}]) \vert \|x^* -y\| + r <\epsilon \}$. Here, $I_S$ is defined by \begin{equation} I_S(x,[\begin{smallmatrix}r\\y \end{smallmatrix}]) = \left\{ \begin{array}{ c l } +\infty & \quad \textrm{if } (x,[\begin{smallmatrix}r\\y \end{smallmatrix}]) \notin S \\ 0 & \quad \textrm{otherwise}. \end{array} \right. \end{equation} Let $g(x^\star)$ denote the optimal value of (\ref{eq:chebyOptimization2}). We will show that there is a positive constant $r_{\min}>0$ such that $-g(x) \ge r_{\min}$ for all $x \in \cK$. As a result, for any $x^\star$ the corresponding Chebyshev centering solutions has radius at least $r_{\min}$. Let $F(x^*,[\begin{smallmatrix}r\\y \end{smallmatrix}])= -r + I_s(x^*, [\begin{smallmatrix}r\\y \end{smallmatrix}])$. We can see that $F$ is convex in $(x^*, [\begin{smallmatrix}r\\y \end{smallmatrix}])$ and $\textrm{dom}\:F = S$. Let $C = \{ [\begin{smallmatrix}r\\y \end{smallmatrix}] \vert [\boldsymbol{1} \; A] [\begin{smallmatrix}r\\y \end{smallmatrix}] \le b \}$. Then the optimal value of (\ref{eq:chebyOptimization2}) can be expressed as $g(x)= \inf_{[\begin{smallmatrix}r\\y \end{smallmatrix}] \in C} F(x, [\begin{smallmatrix}r\\y \end{smallmatrix}])$ and $\textrm{dom}\:g = \{ x \vert \exists [\begin{smallmatrix}r\\y \end{smallmatrix}] \in C \; \textrm{s.t.} \; (x, [\begin{smallmatrix}r\\y \end{smallmatrix}]) \in S\}$. The results of Section 3.2.5 of \cite{boyd2004convex} imply that if $F$ is convex, $S$ is convex, and $g(x) >-\infty$ for all $x$, then $g$ is also convex. If $(x, [\begin{smallmatrix}r\\y \end{smallmatrix}])\in \textrm{dom} \:F$, then \begin{align*} \|x-y\| + r \le \epsilon & \implies r \le \epsilon - \|x-y\|\\ & \implies -r \ge - \epsilon + \|x-y\| > - \infty. \end{align*} In particular, if there exist $ y,r $ such that $(x, [\begin{smallmatrix}r\\y \end{smallmatrix}]) \in \textrm{dom} \:F$, then $\inf_{[\begin{smallmatrix}r\\y \end{smallmatrix}] \in C} F(x, [\begin{smallmatrix}r\\y \end{smallmatrix}]) \ge -\epsilon $. There are two cases: \begin{itemize} \item If there exists $[\begin{smallmatrix}r\\y \end{smallmatrix}] \in C$ such that $(x, [\begin{smallmatrix}r\\y \end{smallmatrix}]) \in \textrm{dom}\: F$, then $\inf_{[\begin{smallmatrix}r\\y \end{smallmatrix}] \in C} F(x, [\begin{smallmatrix}r\\y \end{smallmatrix}]) $ is finite and bounded below. \item If there does not exist $[\begin{smallmatrix}r\\y \end{smallmatrix}] \in C$ such that $(x, [\begin{smallmatrix}r\\y \end{smallmatrix}]) \in \textrm{dom} \:F$, then for all $[\begin{smallmatrix}r\\y \end{smallmatrix}] \in C$, $F(x, [\begin{smallmatrix}r\\y \end{smallmatrix}]) = +\infty$. So $g(x) = \inf_{[\begin{smallmatrix}r\\y \end{smallmatrix}] \in C} F(x,[\begin{smallmatrix}r\\y \end{smallmatrix}]) = +\infty > - \infty$. \end{itemize} Hereby, we can conclude that for all $x$ , $g(x) >-\infty$, so $g(x)$ is convex. So, to found a lower bound on the inscribed radius, we want to maximize $g(x)$ over $\cK$. Specifically, we analyze the following optimization problem \begin{subequations} \label{eq:chebyOptimization3} \begin{align} &\max_{x \in \cK }&& g(x) \end{align} \end{subequations} which corresponds to maximizing a convex function over a convex set. Note that $\cK \subset \dom{(g )}$. In particular, if $x\in\cK$, then $(x,[\begin{smallmatrix} 0\\x \end{smallmatrix}])\in S$, which implies that $g(x) \le 0$. Thus, $g(x)\le 0$ for all $x\in \cK$. Therefore, using Theorem 32.2 \cite{rockafellar2015convex}, given $\cK$ is closed convex by our assumption and $g(x)$ is bounded above gives \begin{align*} \sup \left\{ g(x) \vert x \in \cK \right\} = \sup \left\{ g(x) \vert x \in E \right\} \end{align*} where $E$ is a subset of $\cK$ consisting of the extreme points of $\cK \cap L^{\perp}$, where $L$ is the linearity space of $C$ and $L = \{x \vert Ax = 0 \} = \cN(A)$. Now, we will show that $E$ is a finite set. Let \begin{align*} A = \begin{bmatrix} U_1 & U_2 \end{bmatrix} \begin{bmatrix} \Sigma & 0 \\ 0& 0 \end{bmatrix} \begin{bmatrix} V_1^\top \\ V_2^\top \end{bmatrix}. \end{align*} Then $\cN(A) = L = \cR(V_2)$ and $L^\perp = \cR(V_1)$, and \begin{align*} K\cap L^\perp = \{ V_1 Z_1 \vert A V_1 Z_1 \le b \}. \end{align*} This is a polyhedral with no lines so has a finite set of extreme points, i.e. $E$ is finite. In particular, they are contained in a compact subset of $\cK$. Then it is shown in the proof of Proposition 16 of \cite{lamperski2021projected} that the Chebyshev centering problem has a positive global lower bound, when restricted to a compact convex set with $0$ in its interior. Denote this value by $r_{\min}$. Thus, we have that $\textrm{vol}(\cS)\ge \frac{\pi^{n/2}}{\Gamma(n/2+1)}r_{\min}^n $, using the fact that a ball of radius $\rho$ has volumn given by $\frac{\pi^{n/2}}{\Gamma(n/2+1)}\rho^n$ Then, utilizing an upper bound of Gamma function recorded in \cite{ramanujan1988lost} shown as below: \begin{equation} \label{eq:stirling} \Gamma(x+1) < \sqrt{\pi} \left( \frac{x}{e}\right)^x \left( 8x^3 + 4x^2 +x + \frac{1}{30}\right)^{1/6}, \; x \ge 0. \end{equation} Setting $x = \frac{n}{2}$ in (\ref{eq:stirling}) gives: \begin{equation} \Gamma(\frac{n}{2}+1) < \sqrt{\pi} \left( \frac{n}{2e}\right)^{\frac{n}{2}} \left( n^3 + n^2 + \frac{n}{2} + \frac{1}{30}\right)^{1/6}. \end{equation} Therefore, we can find the lower bound of $\log \frac{1}{2} \textrm{vol}(S)$: \begin{align} \nonumber \log \frac{1}{2} \textrm{vol}(S) &= \log \frac{\pi^{n/2}}{\Gamma(n/2+1)}r_{\min}^n - \log 2\\ \nonumber &> \frac{n}{2} \log \pi + n \log r_{\min} - \log \left\{ \sqrt{\pi} \left( \frac{n}{2e}\right)^{\frac{n}{2}} \left( n^3 + n^2 + \frac{n}{2} + \frac{1}{30}\right)^{1/6} \right\} - \log 2\\\nonumber \label{eq:logvolumn_bound} &= -\frac{1}{2} \log \pi + n \log r_{\min} + \frac{n}{2} \log (2 \pi e) - \frac{n}{2} \log n- \frac{1}{6} \log \left( n^3 + n^2 + \frac{n}{2} + \frac{1}{30}\right) - \log 2\\ & > n \log r_{\min} + \frac{n}{2} \log (2 \pi e) - \frac{n}{2} \log n -\frac{1}{6} \log \left( 3 n^3 \right) - \log (2 \sqrt{\pi}) \end{align} The last inequality holds because $n \ge 1 $. Plugging (\ref{eq:logvolumn_bound}) and (\ref{eq:diff_entropy_bound}) in (\ref{eq:sub_optimal}) gives \begin{align*} \bbE_{\pi_{\beta \bar f}} [\bar f(x)] & < \min f(x) + \frac{n}{2 \beta} \log(2 \pi e ( \varsigma +\frac{1}{\mu} c_{\ref{LyapunovConst}})) \\ & -\frac{1}{\beta} \left( n \log r_{\min} + \frac{n}{2} \log (2 \pi e) - \frac{n}{2} \log n -\frac{1}{2} \log n - \frac{1}{6}\log 3 - \log 2 \sqrt{\pi} \right)\\ &= \min f(x) +\frac{n}{2 \beta} \log( \varsigma +\frac{1}{\mu} c_{\ref{LyapunovConst}}) - \frac{1}{\beta} \left( n \log r_{\min} - \frac{n}{2} \log n - \frac{1}{2} \log n \right)\\ & + \frac{1}{\beta} ( \frac{1}{6}\log 3 + \log 2 \sqrt{\pi})\\ &\le \min f(x) +\frac{n}{ \beta} \left( 2 \log( \varsigma +\frac{1}{\mu} c_{\ref{LyapunovConst}} ) + \frac{1}{6}\log 3 + \log 2 \sqrt{\pi} - \log r_{\min} + \log n \right). \end{align*} where last inequality holds because $n \ge 1$. The final form of the bound holds because for any $c>0$ \begin{align*} \log\left(\varsigma + c \right) &\le \log\left( \max\{\varsigma,1\} + c\right) \\ &= \log \max\{\varsigma,1\} + \log\left( 1 + \frac{c}{\max\{\varsigma,1\}}\right) \\ &\le \max\{\log\varsigma,0\}+ \log\left( 1 + c\right). \end{align*} \hfill $\blacksquare$ Now we cover the case of compact sets for comparison with \cite{lamperski2021projected}. \const{compactLipschitz} \const{subOptIndep} \const{TExponent} \begin{proposition} \label{prop:suboptCompact} Assume that $\cK$ has diameter $D$ and $0\in \cK$ and let $c_{\ref{compactLipschitz}}=\ell D + \|\nabla \bar f(0)\|$. Then for all $k\ge 0$, the iterates of the algorithm satisfy \begin{equation} \label{eq:compactKantorovich} \bbE[\bar f(\bx^A_k)]\le \min_{x\in\cK} f(x) + c_{\ref{compactLipschitz}} W(\cL(\bx^A_k),\pi_{\beta \bar f}) + \frac{n}{ \beta} \left(\max\{\log\varsigma,0\}+ c_{\ref{const_subopt}} \right) \end{equation} In particular, there are constants $c_{\ref{subOptIndep}}$ and $c_{\ref{TExponent}}$ such that, for all sufficiently small $\epsilon$, if \begin{subequations} \label{eq:betaOpt} \begin{align} \beta &= \frac{2n(\max\{\log\varsigma,0\}+c_{\ref{subOptIndep}})}{\epsilon}\\ T&= e^{c_{\ref{TExponent}/\epsilon}} \end{align} \end{subequations} then \begin{equation} \label{eq:compactSuboptimality} \bbE[\bar f(\bx^A_T)]\le \min_{x\in\cK} \bar f(x) +\epsilon. \end{equation} \end{proposition} \paragraph{Proof of Proposition~{\ref{prop:suboptCompact}}} First, we show that $\bar f(x)$ is Lipschitz with Lipschitz constant $c_{\ref{compactLipschitz}}$. Indeed, $$ \|\nabla \bar f(x)\| \le \|\nabla \bar f(x)-\nabla \bar f(0)\| + \| \nabla \bar f(0)\| \le \ell D + \|\nabla \bar f(0)\|. $$ So, if $x$ and $y$ are in $\cK$, we have \begin{align*} |\bar f(x)-\bar f(y)| &= \left| \int_0^1 \nabla \bar f(y+t(x-y))^\top (x-y)dt \right|\\ &\le c_1 \|x-y\|. \end{align*} Then (\ref{eq:compactKantorovich}) follows by Kantorovich duality combined with Lemma~\ref{lem:gibbsSuboptimality}. Now, using our bound from Theorem~\ref{thm:nonconvexLangevin} gives that for $T\ge 4$: $$ \bbE[\bar f(\bx^A_T)]\le \min_{x\in\cK} \bar f(x) + \frac{n}{ \beta} \left(\max\{\log\varsigma,0\}+ c_{\ref{const_subopt}} \right) +c_{\ref{compactLipschitz}} \left( c_{\ref{contraction_const1}} + c_{\ref{contraction_const2}} \sqrt{\varsigma} + \frac{c_{\ref{error_polyhedron1}} + c_{\ref{error_polyhedron2}}\sqrt{\varsigma}}{(2a)^{1/2}} \right) T^{-1/2} \log T $$ Now, note that $c_{\ref{const_subopt}}$ is monotonically decreasing in $\beta$. In particular, for $\beta \ge 1$ $$ c_{\ref{LyapunovConst}}=\LyapunovConstVal \le (\ell+\mu)R^2 + R\|\nabla_x \bar f(0)\| + n, $$ so that \begin{align*} c_{\ref{const_subopt}} &= \log n + 2 \log( 1 +\frac{1}{\mu} c_{\ref{LyapunovConst}} ) + \frac{1}{6}\log 3 + \log 2 \sqrt{\pi} - \log r_{\min} \\ &\le \log n + 2 \log\left( 1 +\frac{(\ell+\mu)R^2 + R\|\nabla_x \bar f(0)\| + n}{\mu} \right) + \frac{1}{6}\log 3 + \log 2 \sqrt{\pi} - \log r_{\min} \\ &=: c_{\ref{subOptIndep}} \end{align*} It follows that for $\beta\ge 1$ we have the bound $$ \bbE[\bar f(\bx^A_T)]\le \min_{x\in\cK} \bar f(x) + \frac{n}{ \beta} \left(\max\{\log\varsigma,0\}+ c_{\ref{subOptIndep}} \right) +c_{\ref{compactLipschitz}} \left( c_{\ref{contraction_const1}} + c_{\ref{contraction_const2}} \sqrt{\varsigma} + \frac{c_{\ref{error_polyhedron1}} + c_{\ref{error_polyhedron2}}\sqrt{\varsigma}}{(2a)^{1/2}} \right) T^{-1/2} \log T $$ Now, picking $\beta$ as in (\ref{eq:betaOpt}) gives $$ \frac{n}{ \beta} \left(\max\{\log\varsigma,0\}+ c_{\ref{subOptIndep}} \right) = \epsilon / 2. $$ Proposition~\ref{prop:mainConstantsBound} implies that there is some constant, $c$ (independent of $\eta$ and $\beta$) such that $c_{\ref{contraction_const1}}, c_{\ref{contraction_const2}},c_{\ref{error_polyhedron1}}, c_{\ref{error_polyhedron2}}\le c e^{\frac{\beta \ell R^2}{2}}$. Furthermore, for all $\beta$ sufficiently large, we have from \eqref{eq:crudeA} that \begin{equation} \frac{1}{\sqrt{a}} \le e^{\frac{\beta \ell R^2}{4}}. \end{equation} \const{crudeExp} Thus, for all $\beta$ sufficiently large we have that $$ c_{\ref{compactLipschitz}} \left( c_{\ref{contraction_const1}} + c_{\ref{contraction_const2}} \sqrt{\varsigma} + \frac{c_{\ref{error_polyhedron1}} + c_{\ref{error_polyhedron2}}\sqrt{\varsigma}}{(2a)^{1/2}} \right) \le e^{\beta \ell R^2}. $$ Thus, for our choice of $\beta$ (which is large for sufficiently small $\epsilon$), we have that $$ \bbE[\bar f(\bx^A_T)]\le \min_{x\in\cK} \bar f(x) + \frac{\epsilon}{2} + e^{\beta \ell R^2} T^{-1/2} \log T $$ For simple notation, let $\alpha$ be such that $$ \beta\ell R^2 = \frac{\alpha}{\epsilon}. $$ In this case, $\alpha = 2n\left(\max\{\log \varsigma,0\}+c_{\ref{subOptIndep}}\right)\ell R^2$. We will choose $T=e^{\gamma/\epsilon}$ and choose $\gamma$ to ensure that $$ e^{\beta \ell R^2} T^{-1/2}\log T = \exp\left(\frac{1}{\epsilon}\left(\alpha - \frac{\gamma}{2}\right)\right) \frac{\gamma}{\epsilon}\le \frac{\epsilon}{2}. $$ The desired inequality holds if and only if: $$ \exp\left(\frac{1}{\epsilon}\left(\alpha - \frac{\gamma}{2}\right)\right) \frac{2\gamma}{\epsilon^2}\le 1 $$ Note that if $\gamma/2 > \alpha$, then the left side is maximized over $(0,\infty)$ at $\epsilon = \frac{\frac{\gamma}{2}-\alpha}{2}$. Thus, a sufficient condition for this inequality to hold is: $$ \frac{8\gamma e^{-2}}{\left(\frac{\gamma}{2}-\alpha\right)^2}\le 1. $$ A clean sufficient condition is $T=e^{c_{\ref{TExponent}/\epsilon}}$, where $$c_{\ref{TExponent}}:=\gamma = 4\alpha + 32 e^{-2}= 8n\left(\max\{\log \varsigma,0\}+c_{\ref{subOptIndep}}\right)\ell R^2 + 32 e^{-2}. $$ \hfill $\blacksquare$ Now we extend the analysis to the non-compact case. \const{optimization_const1} \const{optimization_const2} \const{TExpGeneral} \const{2qmomentremainder1} \const{2qmomentremainder2} \begin{proposition} \label{prop: app_optimization} Let $\bx_k$ be the iterates of the algorithms and assume $\eta \le \frac{\mu}{3\ell^2}$ and $\bbE[\|\bx_0\|^{2q}] < \infty$ for all $q>1$. For all $q>1$, there exist positive constants $c_{\ref{optimization_const1}}, c_{\ref{optimization_const2}}$ such that for all integers $k \ge 0$, the following bound holds: \begin{align} \label{eq:noncompactGenSuboptimality} \bbE[\bar f(\bx_k)] \le \min_{x \in \cK} \bar f(x) + c_{\ref{optimization_const1}} W_1(\cL(\bx_k), \pi_{\beta \bar f}) + c_{\ref{optimization_const2}} W_1 (\cL(\bx_k), \pi_{\beta \bar f})^{\frac{2-2q}{1-2q}} + \frac{n}{ \beta} \left(\max\{\log\varsigma,0\}+ c_{\ref{const_subopt}} \right) \end{align} where \begin{align*} c_{\ref{optimization_const1}} &= \|\nabla \bar f(0) \| + \ell \frac{2\left(\|\nabla \bar f(0)\|+\sqrt{\|\nabla \bar f(0)\|} + \sqrt{\mu \bar f(0)}\right)}{\mu} \\ c_{\ref{optimization_const2}} &= \left( \frac{2 \ell }{\sqrt{\mu}} +\left( \bbE[\|\bx_0\|^{2q}] + c_{\ref{2qmomentremainder2}}\right) \frac{\ell^q 2^{q-1}}{(q-1)} \right) \left( \frac{\ell }{\sqrt{\mu} (q-1)\left( \bbE[\|\bx_0\|^{2q}] + c_{\ref{2qmomentremainder2}} \right) \frac{\ell^q 2^{q-1}}{(q-1)}}\right)^{\frac{2-2q}{-2q+1}} \end{align*} and $c_{\ref{2qmomentremainder2}}$ depends on $q$, the statistics of $\bz$, the parameters $\mu$, $\ell$ and $\nabla \bar{f}(0)$ and decreases monotonically with respect to $\beta$. Furthermore, there is a constant $c_{\ref{TExpGeneral}}$ such if $\epsilon$ is sufficiently small, $\beta$ is chosen as in (\ref{eq:betaOpt}), and $T=e^{c_{\ref{TExpGeneral}}/\epsilon}$, then $$ \bbE[\bar f(\bx_T)]\le \min_{x\in\cK}\bar f(x) +\epsilon. $$ \end{proposition} \paragraph{Proof of Proposition~{\ref{prop: app_optimization}}} Let $\bx$ be drawn according to $\pi_{\beta \bar f}$. Then Lemma~\ref{lem:gibbsSuboptimality} implies: \begin{align} \label{eq:optimizationBound} \nonumber \bbE[\bar f(\bx_k)] &= \bbE[\bar f(\bx)] + \bbE[\bar f(\bx_k)-\bar f(\bx)]\\ &\le \min_{x \in \cK} \bar f(x) + \bbE[\bar f(\bx_k)-\bar f(\bx)] + \frac{n}{ \beta} \left(\max\{\log\varsigma,0\}+ c_{\ref{const_subopt}} \right) \end{align} So, it now suffices to bound $ \bbE[\bar f(\bx_k)-\bar f(\bx)]$. Ideally, we would bound this term via Kantorovich duality. The problem is that $\bar f$ may not be globally Lipschitz. So, we must approximate it with a Lipschitz function, and then bound the gap induced by this approximation. Namely, fix a constant $m>\bar f(0)$ with $m$ to be chosen later. Set $g(x)=\min\{\bar f(x),m\}$. The inequality from (\ref{eq:quadraticLower}) implies that if $\|x\|\ge \hat R:=\frac{2\left(\|\nabla \bar f(0)\|+\sqrt{\|\nabla \bar f(0)\|+\mu (m-\bar f(0))}\right)}{\mu}$, then $\bar f(x)\ge m$. We claim that $g$ is globally Lipschitz. For $\|x\|\le \hat R$, we have that $$ \|\nabla \bar f(x)\|\le \|\nabla \bar f(0)\| + \ell \hat R=:u. $$ We will show that $g$ is $u$-Lipschitz. In the case that $f(y)\ge m$ and $f(x)\ge m$, we have $|g(x)-g(y)|=0$, so the property holds. Now say that $\bar f(x) < m$ and $\bar f(y) < m$. Then we must have $\|x\|\le \hat R$ and $\|y\|\le \hat R$. Then for all $t\in [0,1]$, we have $\|(1-t)x+ty \|\le \hat R$. It follows that \begin{align*} g(x)-g(y)&= \bar f(x)-\bar f(y) \\ &= \int_0^1 \nabla \bar f(x+t(y-x))^\top (y-x) dt \\ &\le u \|x-y\|. \end{align*} Finally, consider the case that $\bar f(x) \ge m$ and $\bar f(y) < m$. Then there is some $\theta \in [0,1]$ such that $\bar f(y + \theta (x-y))=m$. Furthermore \begin{align*} |g(x)-g(y)| &= m-\bar f(y) \\ &= \bar f(y+\theta (x-y))-\bar f(y) \\ &=\int_0^\theta \nabla \bar f( y + t(x-y))^\top (x-y) dt\\ &\le u \|x-y\|. \end{align*} It follows that $g$ is $u$-Lipschitz. Now noting that $g(x)\le \bar f(x)$ for all $x$ gives \begin{align} \nonumber \bbE[\bar f(\bx_k)-\bar f(\bx)]&\le \bbE[\bar f(\bx_k)-g(\bx)] \\ \nonumber &=\bbE[g(\bx_k)-g(\bx)] + \bbE[\indic(\bar f(\bx_k) > m) (\bar f(\bx_k)-m)] \\ \label{eq:kantorovichSplit} &\le u W_1(\cL(\bx_k),\pi_{\beta \bar f}) + \bbE[\indic(\bar f(\bx_k) > m) (\bar f(\bx_k)-m)]. \end{align} The final inequality uses Kantorovich duality. Now, it remains to bound $ \bbE[\indic(\bar f(\bx_k) > m) (\bar f(\bx_k)-m)]. $ Note that if $\by$ is a non-negative random variable, a standard identity gives that $\bbE[\by]=\int_0^\infty \bbP(\by > \epsilon)d\epsilon$. Thus, we have $$ \bbE[\indic(\bar f(\bx_k) > m) (\bar f(\bx_k)-m)] = \int_0^\infty \bbP(\bar f(\bx_k) -m > \epsilon)d\epsilon. $$ For all $x\in \cK$, we have \begin{align*} \bar f(x) &= \bar f(0) - \nabla \bar f(0)^\top x + \int_0^1 (\nabla \bar f(tx)-\nabla \bar f(0))^\top x dt \\ &\le \bar f(0) + \|\nabla \bar f(0)\| \|x \| + \frac{1}{2} \ell \|x\|^2 \\ &\le \bar f(0) + \frac{\|\nabla \bar f(0)\|^2}{2\ell} + \ell \|x\|^2. \end{align*} So, \begin{align*} \bar f(x)-m > \epsilon &\implies \bar f(0) + \frac{\|\nabla \bar f(0)\|^2}{2\ell} + \ell \|x\|^2 > m+\epsilon \\ &\iff \|x\|^2 > \frac{m+\epsilon- \left(\bar f(0) + \frac{\|\nabla \bar f(0)\|^2}{2\ell}\right)}{\ell}. \end{align*} Now assume that $m/2 > \bar f(0) + \frac{\|\nabla \bar f(0)\|^2}{2\ell}$. Then the right side implies $\|x\|^2 \ge \frac{\frac{m}{2}+\epsilon}{\ell}$. It follows that for any $q>1$, we have, via Markov's inequality and direct computation: \begin{align*} \bbE[\indic(\bar f(\bx_k) > m) (\bar f(\bx_k)-m)] & \le \int_0^{\infty} \bbP\left(\|\bx_k\|^2 > \frac{\frac{m}{2}+\epsilon}{\ell} \right) d\epsilon \\ &= \int_0^{\infty} \bbP\left(\|\bx_k\|^{2q} > \left(\frac{\frac{m}{2}+\epsilon}{\ell}\right)^q \right) d\epsilon \\ &\le \bbE[\|\bx_k\|^{2q}] \int_0^{\infty} \left(\frac{\frac{m}{2}+\epsilon}{\ell}\right)^{-q} d\epsilon \\ &= \bbE[\|\bx_k\|^{2q}] \frac{\ell^q 2^{q-1}}{(q-1)m^{q-1}}. \end{align*} Plugging this expression into (\ref{eq:kantorovichSplit}) and using the definition of $u$ gives \begin{align} \label{eq:kantorovichSplit2} \MoveEqLeft \nonumber \bbE[\bar f(\bx_k)-\bar f(\bx)] \\ & \nonumber \le \left(\|\nabla \bar f(0) \| + \ell \frac{2\left(\|\nabla \bar f(0)\|+\sqrt{\|\nabla \bar f(0)\|+\mu (m-\bar f(0))}\right)}{\mu} \right) W_1(\cL(\bx_k),\pi_{\beta \bar f}) \\ &\qquad + \bbE[\|\bx_k\|^{2q}] \frac{\ell^q 2^{q-1}}{(q-1)m^{q-1}}. \end{align} We want to derive the bound of $\bbE[\|\bx_k\|^{2q}]$. We have \begin{align*} \|\bx_{k+1}\|^{2q} \le \|\bx_k - \eta \nabla f(\bx_k, \bz_k) + \sqrt{\frac{2\eta}{\beta}} \hat{\bw}_k\|^2. \end{align*} For notational simplicity, let $\by = \frac{\bx_k - \eta \nabla f(\bx_k, \bz_k)}{\sqrt{2\eta /\beta}}$ and $\bw = \hat{\bw}_k$, then the above inequality can be expressed as \begin{align} \label{eq:binomial_expand} \nonumber \|\bx_{k+1}\|^{2q} &\le \left( \frac{2 \eta}{\beta}\right)^q\|\by + \bw\|^{2q}\\ \nonumber &= \left( \frac{2 \eta}{\beta}\right)^q \left( \|\by\|^2 + \|\bw\|^2 +2 \by^\top \bw \right)^q\\ \nonumber &= \left( \frac{2 \eta}{\beta}\right)^q \sum_{k=0}^q \binom qk \left( 2 \by^\top \bw \right)^{q-k}\left( \|\by\|^2 + \|\bw\|^2 \right)^k \\ & = \left( \frac{2 \eta}{\beta}\right)^q \sum_{k=0}^q \binom qk \left( 2 \by^\top \bw \right)^{q-k} \sum_{i=0}^k \binom ki\left( \|\by\|^{2i} \|\bw\|^{2(k-i)} \right). \end{align} The last two equalities use the binomial theorem. Here, we construct an orthogonal matrix $U = \begin{bmatrix}\frac{1}{\|\by\| } \by^\top \bw\\ s \end{bmatrix}$ such that we can linearly transform the Gaussian noise $\bw$ into $\bv = U\bw = \begin{bmatrix}\bv_1\\ \bv_2 \end{bmatrix}$, where $\bv_1 = \frac{1}{\|\by\|} \by^\top \bw$ and $\bv_2 = s\bw$. And the orthogonality of the matrix $U$ gives $\bv_1 \perp \bv_2$ and thus $\bv_1^2 + \bv_2^\top \bv_2$ follows a chi-squared distribution with $n$ degrees of freedom. Furthermore, we have $\|\bw\|^2 = \bv_1^2 + \bv_2^\top \bv_2$ and $\by^\top \bw = \|\by\| \bv_1$. Therefore, with the change of variables, (\ref{eq:binomial_expand}) can be expressed as \begin{align*} \|\bx_{k+1}\|^{2q} \le \left( \frac{2 \eta}{\beta}\right)^q \sum_{k=0}^q \binom qk \left( 2 \|\by\| \bv_1 \right)^{q-k} \sum_{i=0}^k \binom ki\left( \|\by\|^{2i} (\bv_1^2 + \bv_2^\top \bv_2)^{(k-i)} \right). \end{align*} Taking the expectation of the above inequality gives \begin{align} \label{eq:expectaion_x_2q} \bbE[\|\bx_{k+1}\|^{2q}] &\le \left( \frac{2 \eta}{\beta}\right)^q \bbE \left[ \sum_{k=0}^q \binom qk \left( 2 \|\by\|\bv_1 \right)^{q-k} \sum_{i=0}^k \binom ki\left( \|\by\|^{2i} (\bv_1^2 + \bv_2^\top \bv_2)^{(k-i)} \right) \right]\\ \label{eq:simplified_momentBound} &\le \bbE\left[ \|\bx_k - \eta \nabla f(\bx_k, \bz_k )\|^{2q} \right] + \eta \bbE\left[ p(\|\bx_k - \eta \nabla f(\bx_k, \bz_k ) \|^{2})\right] \end{align} where $p(\|\bx_k - \eta \nabla f(\bx_k, \bz_k )\|^2)$ is a polynomial in $\|\bx_k - \eta \nabla f(\bx_k, \bz_k )\|^2$ with order strictly lower than $q$ and the coefficients of $\bbE[p(\|\bx_k - \eta \nabla f(\bx_k, \bz_k )\|^2)]$ depend on the moments of the chi-squared distributions and $q$. (Additionally, note that the coefficients of $p$ can be taken to be monotonically decreasing with respect to $\beta$.) And the reason the polynomial only have even order terms in $\|\bx_k - \eta \nabla f(\bx_k, \bz_k )\|$ is that in (\ref{eq:expectaion_x_2q}), when $q-k$ is odd, the expectation is zero since $\bv_1 \sim \cN(0,1)$ whose odd order moments are all zero. Then we firstly aim to bound $\bbE[\|\bx_k - \eta \nabla \bar{f}(\bx_k)\|^{2q}]$. We have \begin{align} \label{eq:x_2q_first_term} \|\bx_k - \eta \nabla f(\bx_k, \bz_k)\|^{2q} &= \left( \|\bx_k\|^2 - 2 \eta \bx_k^\top \nabla f(\bx_k, \bz_k) + \eta^2 \|\nabla f(\bx_k, \bz_k)\|^2\right)^q. \end{align} We examine the second term: \begin{align*} \bx_k^\top \nabla f(\bx_k, \bz_k) &= \bx_k^\top (\nabla \bar{f}(\bx_k) - \nabla \bar{f}(0)) + \bx_k^\top( \nabla \bar{f}(0) - \nabla \bar{f}(\bx_k) + \nabla f(\bx_k, \bz_k) ) \\ &\ge \mu \|\bx_k\|^2 - (\ell+ \mu) R^2 + \bx_k^\top(\nabla \bar{f}(0) + \bbE_{\hat \bz}[\nabla f(\bx_k, \bz_k) - \nabla f(\bx_k, \hat \bz_k) ]) \end{align*} where the first term is bounded by the assumption of the strong convexity outside a ball and the detailed statement is shown below: If $\|x\| \ge R$, then $x^\top (\nabla \bar f(x) - \nabla \bar f(0)) \ge \mu \|x\|^2$. If $\|x\| \le R$, then $x^\top (\nabla \bar f(x) - \nabla \bar f(0)) \ge - \ell \|x\|^2 \ge - \ell R^2$. Therefore, we have for all $x \in \cK$, $x^\top (\nabla \bar f(x) - \nabla \bar f(0)) \ge \mu \|x\|^2 - (\ell+\mu) R^2$. Note here and below $\hat \bz$ and $\bz$ are IID. Taking expectation of (\ref{eq:x_2q_first_term}) gives \begin{align} \nonumber & \bbE\left[ \|\bx_k - \eta \nabla f(\bx_k, \bz_k)\|^{2q} \right] \nonumber \\ \nonumber &= \bbE\left[\left( \|\bx_k\|^2 - 2 \eta \bx_k^\top \nabla f(\bx_k, \bz_k) + \eta^2 \|\nabla f(\bx_k, \bz_k)\|^2\right)^q\right] \\ \nonumber &\le \bbE\left[\left((1- 2 \mu \eta) \|\bx_k\|^2 + 2 \eta (\ell+ \mu) R^2 \right.\right. \\ \nonumber & \left. \left. \qquad - 2 \eta\bx_k^\top(\nabla \bar{f}(0) + \bbE_{\hat \bz}[\nabla f(\bx_k, \bz_k) - \nabla f(\bx_k, \hat \bz_k) ]) + \eta^2 \|\nabla f(\bx_k, \bz_k)\|^2\right)^q\right] \\ \nonumber &\le \bbE\left[\left((1- 2 \mu \eta) \|\bx_k\|^2 + 2 \eta (\ell+ \mu) R^2 \right.\right. \\ \nonumber & \left. \left. \qquad - 2 \eta\bx_k^\top(\nabla \bar{f}(0) + \nabla f(\bx_k, \bz_k) - \nabla f(\bx_k, \hat \bz_k) ) + \eta^2 \|\nabla f(\bx_k, \bz_k)\|^2\right)^q\right] \\ \nonumber &\le \bbE\left[\left((1- 2 \mu \eta) \|\bx_k\|^2 + 2 \eta (\ell+ \mu) R^2 \right.\right. \\ & \left. \left. \qquad + 2 \eta \|\bx_k\|(\|\nabla \bar{f}(0)\| + \ell\|\bz_k - \hat \bz_k\| ) + \eta^2 \|\nabla f(\bx_k, \bz_k)\|^2\right)^q\right]. \label{eq:Bound2qmoment} \end{align} The second inequality uses Jensen's inequality, and the last inequality uses Cauchy-Schwartz inequality together with $\ell$-Lipschitzness of $\nabla f(x,z)$ in $z$. Now we examine the last term of (\ref{eq:Bound2qmoment}). Firstly, we have \begin{align*} \|\nabla f(x,z)\| &= \|\nabla f(x,z) - \bbE_{\hat{\bz}}[\nabla f(x, \hat \bz)] + \bbE_{\hat \bz}[\nabla f(x, \hat \bz)]\| \\ &\le \|\bbE_{\hat{\bz}}[ \nabla f(x,z) - \nabla f(x, \hat \bz)]\| + \| \bar{f}(x)\| \\ &\le \ell \bbE_{\hat{\bz}}[ \|z - \hat \bz\|] + \|\nabla \bar{f}(0)\| + \ell \|x\|. \end{align*} So \begin{align} \label{eq:fsquareBound} \|\nabla f(x,z)\|^2 \le 3 \left( \ell^2 \left(\bbE_{\hat{\bz}}[ \|z - \hat \bz\|] \right)^2 + \|\nabla \bar{f}(0)\|^2 + \ell^2 \|x\|^2\right). \end{align} Then, we can group the square terms in (\ref{eq:Bound2qmoment}) together and simplify it: \begin{align*} &(1-2\mu \eta) \|x\|^2 + \eta^2 3 \ell^2 \|x\|^2 \le (1-\eta \mu) \|x\|^2\\ \iff & 1-2\mu \eta + \eta^2 3 \ell^2 \le 1- \eta \mu \\ \iff& \eta \le \frac{\mu}{3\ell^2}. \end{align*} So, if $\eta \le \frac{\mu}{3\ell^2}$, plugging (\ref{eq:fsquareBound}) into (\ref{eq:Bound2qmoment}) gives \begin{align} \nonumber \MoveEqLeft \bbE\left[ \|\bx_k - \eta \nabla f(\bx_k, \bz_k)\|^{2q} \right] \le \bbE\left[\left((1- \mu \eta) \|\bx_k\|^2 + 2 \eta (\ell+ \mu) R^2 \right.\right. \\ & \left. \left. + 2 \eta \|\bx_k\|(\|\nabla \bar{f}(0)\| + \ell\| \bz_k - \hat \bz_k \| ) + \eta^2 3 \left( \ell^2 \left(\bbE_{\hat{\bz}}[ \|\bz_k - \hat \bz_k\|] \right)^2 + \|\nabla \bar{f}(0)\|^2 \right) \right)^q\right]. \label{eq:Bound2qmoment2} \end{align} We want to further group the first and third terms above together. For all $\epsilon \ge 0$, $2ab = 2 (\epsilon a) (\frac{1}{\epsilon} b) \le (\epsilon a)^2 + (\frac{1}{\epsilon}b)^2$ . Let $a= \|\bx_k\|$, $b = \|\nabla \bar{f}(0)\| + \ell \|\bz_k - \hat \bz_k\| $, then we can see the third term of the right side of (\ref{eq:Bound2qmoment2}) can be upper bounded by a summation of two parts. The first part can be grouped with the first term of the right side of (\ref{eq:Bound2qmoment2}): \begin{align*} &(1- \mu \eta) \|\bx_k\|^2 + \eta \epsilon^2 \|\bx_k\|^2 \le (1- \frac{\mu \eta}{2}) \|\bx_k\|^2 \\ \iff & 1- \mu \eta +\eta \epsilon^2 \le 1- \frac{\mu \eta}{2} \\ \iff & \epsilon \le \sqrt{\frac{\mu}{2}}. \end{align*} So let $\epsilon = \sqrt{\frac{\mu}{2}}$, we have \begin{align} \nonumber \bbE\left[ \|\bx_k - \eta \nabla f(\bx_k, \bz_k)\|^{2q} \right] &\le \bbE\left[\left((1- \frac{\mu \eta}{2}) \|\bx_k\|^2 + 2 \eta (\ell+ \mu) R^2 \right.\right. \\ \nonumber & \hspace{-40pt} \left. \left. + \frac{2\eta}{\mu}(\|\nabla \bar{f}(0)\| + \ell \| \bz_k - \hat \bz_k \| )^2 + \eta^2 3 \left( \ell^2 \left(\bbE_{\hat{\bz}}[ \|\bz_k - \hat \bz_k\|] \right)^2 + \|\nabla \bar{f}(0)\|^2 \right) \right)^q\right] \\ \nonumber &\le \bbE\left[\left((1- \frac{\mu \eta}{2}) \|\bx_k\|^2 + 2 \eta (\ell+ \mu) R^2 \right.\right. \\ \label{eq:second_inequality} & \hspace{-30pt} \left. \left. + \frac{2\eta}{\mu}(\|\nabla \bar{f}(0)\| + \ell\| \bz_k - \hat \bz_k \| )^2 + \eta^2 3 \left( \ell^2 \left( \|\bz_k - \hat \bz_k\| \right)^2 +\|\nabla \bar{f}(0)\|^2 \right) \right)^q\right] \\ \nonumber & = (1-\frac{\mu \eta}{2})^q \bbE[\|\bx_k\|^{2q}] + \eta \bbE[p_2(\|\bx_k\|^2, \|\bz_k -\hat{\bz}_k\|)]\\ &\le (1-\frac{\mu \eta}{2}) \bbE[\|\bx_k\|^{2q}] + \eta \bbE[p_2(\|\bx_k\|^2, \|\bz_k -\hat{\bz}_k\|)]. \label{eq:y2q_upperbound} \end{align} The inequality (\ref{eq:second_inequality}) uses Jensen's inequality twice. The polynomial $p_2(\|\bx_k\|^2, \|\bz_k -\hat{\bz}_k\|)$ is with order strictly lower than $q$ in $\|\bx_k\|^2$ and with the highest order of $2q$ in $\|\bz_k - \hat{\bz}_k\|$. Similarly, we can obtain for all $i <q$, \begin{align*} \MoveEqLeft \bbE\left[ \|\bx_k - \eta \nabla f(\bx_k, \bz_k)\|^{2i} \right] \le \bbE\left[ \left((1- \frac{\mu \eta}{2}) \|\bx_k\|^2 + 2 \eta (\ell+ \mu) R^2 \right. \right. \\ & \left. \left. + \frac{2\eta}{\mu}(\|\nabla \bar{f}(0)\| + \ell\| \bz_k - \hat \bz_k \| )^2 + \eta^2 3 \left( \ell^2 \left( \|\bz_k - \hat \bz_k\| \right)^2 +\|\nabla \bar{f}(0)\|^2 \right)\right)^i \right]. \end{align*} This implies that $ \bbE[p(\|\bx_k - \eta \nabla f(\bx_k, \bz_k ) \|^{2})]$ can be upper bounded by $\bbE[p_1(\|\bx_k\|^2, \|\bz_k - \hat{\bz}_k\|)]$ where $p_1(\|\bx_k\|^2, \|\bz_k - \hat{\bz}_k\|)$ is a polynomial with the order strictly lower than q in $\|\bx_k\|^2$ and the highest order of $2q-2$ in $\|\bz_k - \hat{\bz}_k \|$. So (\ref{eq:simplified_momentBound}) can be further upper bounded as below: \begin{align} \nonumber \bbE[\|\bx_{k+1}\|^{2q}] &\le (1-\frac{\mu \eta}{2}) \bbE[\|\bx_k\|^{2q}] + \eta \bbE[p_2(\|\bx_k\|^2, \|\bz_k -\hat{\bz}_k\|)] + \eta \bbE[p_1(\|\bx_k\|^2, \|\bz_k -\hat{\bz}_k\|)] \\ \nonumber &= (1-\frac{\mu \eta}{2}) \bbE[\|\bx_k\|^{2q}] + \eta \bbE[p_3(\|\bx_k\|^2, \|\bz_k -\hat{\bz}_k\|)] \\ \nonumber & \le (1- \frac{\mu \eta}{4}) \bbE[\|\bx_k\|^{2q}] + \eta \bbE[-\frac{\mu}{4} \|\bx_k\|^{2q} + p_3(\|\bx_k\|^2, \|\bz_k -\hat{\bz}_k\|)] \\ \label{eq:x_2q_momentBound} & \le (1- \frac{\mu \eta}{4}) \bbE[\|\bx_k\|^{2q}] + \eta \bbE[\frac{\mu}{4} \left( - \|\bx_k\|^{2q} +\tilde{p}(\|\bx_k\|^2, \|\bz_k -\hat{\bz}_k\|) ] \right) \end{align} To get the upper bound of the second term of (\ref{eq:x_2q_momentBound}), we examine the following polynomial with $x \ge 0$ \begin{align*} - x^{q} + \sum_{i=0}^{q-1} a_{q,i}x^{i}, \end{align*} where the $a_{q,i}$'s depend on the value of $q$, the statistics of the external random variables $\bz$ and some other parameters including $\ell$, $\mu$ and $\|\nabla \bar{f}(0)\|$ and $a_{q,i}$'s decrease monotonically with respect to $\beta$. To find the upper bound of such a polynomial, we consider two cases \begin{itemize} \item Assume $0 \le x \le 1$, then $- x^{q} + \sum_{i=0}^{q-1} a_{q,i}x^{i} \le \sum_{i=0}^{q-1} |a_{q,i}| $; \item Assume $x >1$, then $- x^{q} + \sum_{i=0}^{q-1} a_{q,i} x^{i} \le \left( \sum_{i=0}^{q-1} |a_{q,i}| \right) \left( \sum_{i=0}^{q-1} |a_{q,i}| + 1 \right)^{q-1} $. \end{itemize} Combining the two cases gives that for all $x \ge 0$, \begin{equation} \nonumber - x^{q} + \sum_{i=0}^{q-1} a_{q,i} x^{i} \le \left( \sum_{i=0}^{q-1} |a_{q,i}| \right) \left( \sum_{i=0}^{q-1} |a_{q,i}| + 1 \right)^{q-1}. \end{equation} The first case is a direct result of dropping the negative term and using Cauchy-Schwartz inequality. The second case is obtained by firstly showing the sufficient condition of the polynomial being non-positive. The detail is shown below: \begin{align} - x^{q} + \sum_{i=0}^{q-1} a_{q,i}x^{i} \le 0 &\iff -1 + \sum_{i=0}^{q-1} \frac{a_{q,i}}{x^{q-i}} \le 0 \nonumber \\ & \impliedby -1 + \sum_{i=0}^{q-1} \frac{|a_{q,i}|}{x} \le 0 \label{eq:poly_sufficient_1}\\ \nonumber & \iff -1 + \frac{1}{x} \sum_{i=0}^{q-1} |a_{q,i}| \le 0 \\ & \iff x \ge \max \{ \sum_{i=0}^{q-1} |a_{q,i}|, 1 \} \label{eq:poly_sufficient_2} \\ \nonumber &\impliedby x \ge \sum_{i=0}^{q-1} |a_{q,i}| + 1 \end{align} Both (\ref{eq:poly_sufficient_1}) and (\ref{eq:poly_sufficient_2}) use the assumption that $x>1$. Besides, for $1<x \le \sum_{i=0}^{q-1} |a_{q,i}| + 1 $, \begin{align*} - x^{q} + \sum_{i=0}^{q-1} a_{q,i} x^{i} &\le \sum_{i=0}^{q-1} |a_{q,i}| x^i \\ &\le \sum_{i=0}^{q-1} |a_{q,i}| x_{max}^{q-1}\\ &= \sum_{i=0}^{q-1} |a_{q,i}| \left( \sum_{i=0}^{q-1} |a_{q,i}|+1 \right)^{q-1}. \end{align*} Therefore, we can conclude that \begin{align*} \bbE[-\frac{\mu}{4} \|\bx_k\|^{2q} + \tilde{p} (\|\bx_k\|^2, \|\bz_k -\hat{\bz}_k\|)] \le \bbE \left[\frac{\mu}{4} \sum_{i=0}^{q-1} |a_{q,i}| \left( \sum_{i=0}^{q-1} |a_{q,i}|+1 \right)^{q-1} \right]. \end{align*} The L-mixing property ensures that the right side of the inequality is bounded. Then, we achieve the upper bound of equation (\ref{eq:x_2q_momentBound}). \begin{align*} \bbE[\|\bx_{k+1}\|^{2q}] &\le (1- \frac{\mu \eta}{4}) \bbE[\|\bx_k\|^{2q}] + \eta \bbE \left[\frac{\mu}{4} \sum_{i=0}^{q-1} |a_{q,i}| \left( \sum_{i=0}^{q-1} |a_{q,i}|+1 \right)^{q-1} \right] \end{align*} Iterating the inequality above and letting $ \tilde{a}_q = \bbE \left[\frac{\mu}{4} \sum_{i=0}^{q-1} |a_{q,i}| \left( \sum_{i=0}^{q-1} |a_{q,i}|+1 \right)^{q-1} \right]$ give \begin{align*} \bbE[\|\bx_k\|^q] &\le \left( 1- \frac{\mu \eta}{4} \right)^k \bbE[\|\bx_0\|^{2q}] + \eta \tilde{a}_q \sum_{i=0}^{k-1}(1-\frac{\mu \eta}{4})^i \\ &\le \bbE[\|\bx_0\|^{2q}] + \eta \tilde{a}_q \frac{1- \left( 1- \frac{\mu \eta}{4}\right)^k}{1-\left( 1- \frac{\mu \eta}{4}\right) } \\ &\le \bbE[\|\bx_0\|^{2q}] + \frac{4}{\mu} \tilde{a}_q \left( 1-\left(1- \frac{\mu \eta}{4} \right)^k \right) \\ & \le \bbE[\|\bx_0\|^{2q}] + \frac{4}{\mu} \tilde{a}_q . \end{align*} Now as long as $\bbE[\|\bx_0\|^{2q}] < \infty$ and $\eta <1$, we have \begin{align*} \bbE[\|\bx_k\|^{2q}] \le \bbE[\|\bx_0\|^{2q}] + c_{\ref{2qmomentremainder2}}, \end{align*} where $c_{\ref{2qmomentremainder2}} = \frac{4}{\mu} \tilde{a}_q$. More specifically, $c_{\ref{2qmomentremainder2}}$ depends on $q$, the statistics of $\bz$, the parameters $\mu$, $\ell$ and $\nabla \bar{f}(0)$. Plugging the above result into (\ref{eq:kantorovichSplit2}) gives \begin{align*} \label{eq:kantorovichSplit3} \MoveEqLeft \bbE[\bar f(\bx_k)-\bar f(\bx)] \\ & \le \left(\|\nabla \bar f(0) \| + \ell \frac{2\left(\|\nabla \bar f(0)\|+\sqrt{\|\nabla \bar f(0)\|+\mu (m-\bar f(0))}\right)}{\mu} \right) W_1(\cL(\bx_k),\pi_{\beta \bar f}) \\ &\qquad + \left( \bbE[\|\bx_0\|^{2q}] + c_{\ref{2qmomentremainder2}} \right) \frac{\ell^q 2^{q-1}}{(q-1)m^{q-1}}. \end{align*} The remaining work is to optimize the right side of the above inequality with respect to $m$ so that we can make a choice of the value of $m$ mentioned earlier in the proof. Let \begin{align*} g(m)&=\left(\|\nabla \bar f(0) \| + \ell \frac{2\left(\|\nabla \bar f(0)\|+\sqrt{\|\nabla \bar f(0)\|} + \sqrt{\mu m} + \sqrt{\mu \bar f(0)}\right)}{\mu} \right) W_1(\cL(\bx_k),\pi_{\beta \bar f}) \\ &\qquad + \left( \bbE[\|\bx_0\|^{2q}] + c_{\ref{2qmomentremainder2}} \right) \frac{\ell^q 2^{q-1}}{(q-1)m^{q-1}}. \end{align*} We can see that $g(m)$ is an upper bound of the right side of (\ref{eq:kantorovichSplit2}). Setting $g^\prime(m) = 0$ leads to $m^* = \left( \frac{\ell W_1}{\sqrt{\mu} (q-1)C}\right)^{\frac{2}{-2q+1}}$, where $C = \left(\bbE[\|\bx_0\|^{2q}] + c_{\ref{2qmomentremainder2}} \right) \frac{\ell^q 2^{q-1}}{(q-1)}$ for notation simplicity. So \begin{align*} \max_{m \ge 0}g(m) &= g(m^*)\\ &\le \left(\|\nabla \bar f(0) \| + \ell \frac{2\left(\|\nabla \bar f(0)\|+\sqrt{\|\nabla \bar f(0)\|} + \sqrt{\mu \bar f(0)}\right)}{\mu} \right) W_1(\cL(\bx_k),\pi_{\beta \bar f}) \\ & + \frac{2 \ell }{\sqrt{\mu}} \left( \frac{\ell W_1(\cL(\bx_k),\pi_{\beta \bar f})}{\sqrt{\mu} (q-1)C}\right)^{\frac{2-2q}{-2q+1}} + C \left( \frac{\ell W_1(\cL(\bx_k),\pi_{\beta \bar f})}{\sqrt{\mu} (q-1)C}\right)^{\frac{2-2q}{-2q+1}}\\ & = \left(\|\nabla \bar f(0) \| + \ell \frac{2\left(\|\nabla \bar f(0)\|+\sqrt{\|\nabla \bar f(0)\|} + \sqrt{\mu \bar f(0)}\right)}{\mu} \right) W_1(\cL(\bx_k),\pi_{\beta \bar f}) \\ & + \left( \frac{2 \ell }{\sqrt{\mu}} +C \right) \left( \frac{\ell }{\sqrt{\mu} (q-1)C}\right)^{\frac{2-2q}{-2q+1}} W_1(\cL(\bx_k),\pi_{\beta \bar f})^{\frac{2-2q}{-2q+1}} \end{align*} Setting \begin{align*} c_{\ref{optimization_const1}} &= \|\nabla \bar f(0) \| + \ell \frac{2\left(\|\nabla \bar f(0)\|+\sqrt{\|\nabla \bar f(0)\|} + \sqrt{\mu \bar f(0)}\right)}{\mu} \\ c_{\ref{optimization_const2}} &= \left( \frac{2 \ell }{\sqrt{\mu}} +\left( \bbE[\|\bx_0\|^{2q}] + c_{\ref{2qmomentremainder2}}\right) \frac{\ell^q 2^{q-1}}{(q-1)} \right) \left( \frac{\ell }{\sqrt{\mu} (q-1)\left( \bbE[\|\bx_0\|^{2q}] + c_{\ref{2qmomentremainder2}} \right) \frac{\ell^q 2^{q-1}}{(q-1)}}\right)^{\frac{2-2q}{-2q+1}}. \end{align*} and plugging this bound into (\ref{eq:optimizationBound}) give the suboptimality bound from \eqref{eq:noncompactGenSuboptimality}. In particular, if $q=4$, $\beta \ge 1$ and $W_1(\cL(\bx_k),\pi_{\beta\bar f})\le 1$ we get a bound of the form: $$ \bbE[\bar f(\bx_k)] \le \min_{x \in \cK} \bar f(x) + c W_1 (\cL(\bx_k), \pi_{\beta \bar f})^{\frac{2}{3}} + \frac{n}{ \beta} \left(\max\{\log\varsigma,0\}+ c_{\ref{const_subopt}} \right) $$ for some constant $c$ independent of $\beta$. Indeed, $c_{\ref{2qmomentremainder2}}$ decreases monotonically with respect to $\beta$, and thus so does $c_{\ref{optimization_const2}}$. So, assuming $\beta\ge 1$, we can take $c\ge c_{\ref{optimization_const1}}+c_{\ref{optimization_const2}}$ to be a fixed value independent of $\beta$. Setting $\beta$ as in (\ref{eq:betaOpt}) gives $$ \bbE[\bar f(\bx_k)] \le \min_{x \in \cK} \bar f(x) + c W_1 (\cL(\bx_k), \pi_{\beta \bar f})^{\frac{2}{3}} + \frac{\epsilon}{2} $$ Then, arguing as in the proof of Proposition~\ref{prop:suboptCompact}, for sufficiently large $\beta$ and $T\ge 4$, we have that \begin{align*} c W_1 (\cL(\bx_k), \pi_{\beta \bar f})^{\frac{2}{3}} & \le c \left( c_{\ref{contraction_const1}} + c_{\ref{contraction_const2}} \sqrt{\varsigma} + \frac{c_{\ref{error_polyhedron1}} + c_{\ref{error_polyhedron2}}\sqrt{\varsigma}}{(2a)^{1/2}} \right)^{2/3} T^{-1/3} \log T \\ &\le e^{\frac{2\beta\ell R^2}{3}} T^{-1/3}\log T \end{align*} Then setting, $\alpha = 2n\left(\max\{\log \varsigma,0\}+c_{\ref{subOptIndep}}\right)\ell R^2$, $\beta$ from (\ref{eq:betaOpt}), and $T=e^{\gamma/\epsilon}$ gives $$ c W_1 (\cL(\bx_k), \pi_{\beta \bar f})^{\frac{2}{3}}\le \exp\left( \frac{1}\epsilon\left(\frac{2\alpha}{3}-\frac{\gamma}{3} \right) \right) \frac{\gamma}{\epsilon} $$ So, we seek a sufficient condition for $$ \exp\left( \frac{1}\epsilon\left(\frac{2\alpha}{3}-\frac{\gamma}{3} \right) \right) \frac{\gamma}{\epsilon}\le \frac{\epsilon}{2} \iff \exp\left( \frac{1}\epsilon\left(\frac{2\alpha}{3}-\frac{\gamma}{3} \right) \right) \frac{2\gamma}{\epsilon^2}\le 1. $$ Then, similar to the compact case, we have that when $\gamma > 2\alpha$, the left side is maximized over $(0,\infty)$ at $\epsilon = \frac{\gamma - 2\alpha}{6}$. Plugging in the maximizer gives the sufficient condition: $$ \frac{72 e^{-2} \gamma}{(\gamma-2\alpha)^2}\le 1 $$ This is satisfied in particular at $$ c_{\ref{TExpGeneral}}=\gamma = 8\alpha + 72 e^{-2}. $$ \hfill $\blacksquare$ \section{Near-optimality of Gibbs distributions} In this appendix, we prove Proposition~\ref{prop: app_optimization} which shows that $\bx_k$ can be near-optimal. The proof closely follows \cite{lamperski2021projected} and \cite{raginsky2017non}. The main difference is that in our case we have to deal with the unbounded polyhedral constraint, while in \cite{raginsky2017non} there is no constraint and in \cite{lamperski2021projected} the constraint is compact. Firstly, we need a preliminary result shown as below. \const{const_subopt} \begin{lemma} \label{lem:gibbsSuboptimality} Assume $\bx$ is drawn according to $\pi_{\beta \bar f}$. There exists a positive constant $c_{\ref{const_subopt}}$ such that the following bounds hold: \begin{align*} \bbE[\bar f(\bx)] \le \min_{x \in \cK} \bar f(x) +\frac{n}{ \beta} \left( c_{\ref{const_subopt}} + \log n \right) \end{align*} where $c_{\ref{const_subopt}} = 2 \log( \varsigma +\frac{1}{\mu} c_{\ref{LyapunovConst}} ) + \frac{1}{6}\log 3 + \log 2 \sqrt{\pi} - \log r_{\min}$. \end{lemma} \paragraph{Proof of Lemma~{\ref{lem:gibbsSuboptimality}}} Recall that the probability measure $\pi_{\beta \bar f (A)}$ is defined by $\pi_{\beta \bar f (A)}(A) = \frac{\int_{A \cap \cK} e^{-\beta \bar f(x)}dx}{\int_{\cK} e^{- \beta \bar f(y)}dy}$. Let $\Lambda = \int_{\cK} e^{-\beta \bar f(y)} dy $ and $p(x) = \frac{e^{-\beta \bar f(x)}}{\Lambda}$. So $\log p(x) = -\beta \bar f(x) - \log \Lambda$, which implies that $\bar f(x) = -\frac{1}{\beta} \log p(x) -\frac{1}{\beta} \log \Lambda$. Then we have \begin{align} \nonumber \bbE_{\pi_{\beta \bar f}} [\bar f(\bx)] &= \int_{\cK} \bar f(x) p(x) dx \\ \label{eq:sub_optimal} &= -\frac{1}{\beta} \int_{\cK} p(x) \log p(x) dx -\frac{1}{\beta} \log \Lambda. \end{align} We can bound the first term by maximizing the differential entropy. Let $h(x) = - \int_{\cK} p(x) \log p(x) dx$. Using the fact that the differential entropy with a finite moments is upper-bounded by that of a Gaussian density with the same second moment (see Theorem 8.6.5 in \cite{cover2012elements}), we have \begin{align} \label{eq:diff_entropy_bound} h(x) \le \frac{n}{2}\log(2 \pi e \sigma^2) \le \frac{n}{2}\log(2 \pi e ( \varsigma +\frac{1}{\mu} c_{\ref{LyapunovConst}})), \end{align} where $\sigma^2 = \bbE_{\pi_{\beta\bar f}}[\|\bx\|^2]$ and the second inequality uses Lemma \ref{lem:continuousBound}. We aim to derive the upper bound of the second term of (\ref{eq:sub_optimal}). First we show that there is a vector $x^\star \in \cK$ which minimizes $\bar f$ over $\cK$. In other words, an optimal solution exists. The bound (\ref{eq:quadraticLower}) from the proof of Lemma~\ref{lem:gibbs} implies that $\bar f(x)\ge \bar f(0)+1$ for all sufficiently large $x$. This implies that there is a compact ball, $B$ such that if $x_n\in \cK$ is a sequence such that $\lim_{n\to \infty}\bar f(x_n)=\inf_{x\in\cK} \bar f(x)$, then $x_n$ must be in $B\cap \cK$ for all sufficiently large $n$. Then since $\bar f$ is continuous and $B\cap \cK$ is compact, there must be a limit point $x^\star \in B\cap \cK$ which minimizes $\bar f$. Let $x^* \in \cK$ be a minimizer. The normalizing constant can be expressed as: \begin{align*} \label{eq:logNormalizationConstant} \log \Lambda &= \log \int_{\cK} e^{-\beta \bar f(x)} dx\\ &= \log e^{-\beta \bar f(x^*)} \int_{\cK} e^{\beta \left( \bar f(x^*) - \bar f(x)\right) } \\ &= -\beta \bar f(x^*) + \log \int_{\cK} e^{\beta \left( \bar f(x^*) - \bar f(x)\right) } dx \end{align*} So, to derive our desired upper bound on $-\log\Lambda$, it suffices to derive a lower bound on $\int_{\cK} e^{\beta \left( \bar f(x^*) - \bar f(x)\right) } dx$. We have \begin{align*} \bar f(x) - \bar f(x^*) = \int_0^1 \nabla \bar f(x^* + t(x-x^*))^\top (x-x^*)dt. \end{align*} Let $y = x^* + t(x-x^*)$, $t \in [0,1]$, then \begin{align*} \|\nabla \bar f(y)\| &= \| \nabla \bar f(y) - \nabla \bar f(x^*) + \nabla \bar f(x^*) - \nabla \bar f(0) + \nabla \bar f(0)\| \\ &\le \ell \| y- x^*\| + \ell \| x^*\| + \| \nabla \bar f(0)\| \\ & \le \ell \|x-x^*\| t + \ell \| x^*\| + \| \nabla \bar f(0)\|. \end{align*} We can show $\|x^*\|$ is upper bounded by $\max \{R, \frac{\nabla \|\bar f(0)\|}{\mu}\}$. We have to find the bound for the case $\|x^*\| > R$. The convexity outside a ball assumption gives \begin{equation} \label{eq:conv_opt} \left( \nabla \bar f(x^*) - \nabla \bar f(0) \right)^\top x^* \ge \mu \|x^*\|^2. \end{equation} The optimality of $x^*$ gives $-\nabla \bar f(x^*) \in N_{\cK}(x^*)$, which is to say for all $y \in \cK$, $-\nabla \bar f(x^*)^\top (y- x^*) \le 0$. Since $0 \in \cK$, $\nabla \bar f(x^*)^\top x^* \le 0 $ holds. Applying the Cauchy-Schwartz inequality to the left hand side of \eqref{eq:conv_opt} gives \begin{align*} \| \nabla \bar f(0) \| \|x^*\| \ge \mu \|x^*\|^2. \end{align*} \const{xoptBound} This implies that $\|x^*\| \le \frac{\|\nabla \bar f(0)\|}{\mu} $. So we can conclude that $\|x^*\| \le \max \{R, \frac{\|\nabla \bar f(0)\|}{\mu} \} = c_{\ref{xoptBound}}$. Therefore, \begin{align*} \bar f(x) - \bar f(x^*) &\le \int_0^1 \|\nabla \bar f(y)\| \|x -x^*\| dt \\ &\le \frac{\ell}{2} \|x- x^*\|^2 + \left( \ell \| x^*\| + \| \nabla \bar f(0)\|\right) \|x-x^*\| \\ &\le \frac{\ell}{2} \|x- x^*\|^2 + \left( \ell c_{\ref{xoptBound}} + \| \nabla \bar f(0)\|\right) \|x-x^*\|. \end{align*} Here we set \begin{align*} &e^{\beta \left( \bar f(x^*) - \bar f(x)\right)} \ge 1/2 \\ \iff &\beta \left( \bar f(x^*) - \bar f(x)\right) \ge -\log 2 \\ \impliedby & -\frac{\ell}{2} \|x- x^*\|^2 - \left( \ell c_{\ref{xoptBound}} + \| \nabla \bar f(0)\|\right) \|x-x^*\| \ge -\frac{1}{\beta} \log 2. \end{align*} So solving the corresponding quadratic equation and taking the positive root gives an upper bound of $\|x- x^*\|$: \begin{align*} \|x - x^*\| \le - \frac{1}{\ell}\left( \ell c_{\ref{xoptBound}} + \| \nabla \bar f(0)\|\right) + \frac{1}{\ell}\sqrt{\left( \ell c_{\ref{xoptBound}} + \| \nabla \bar f(0)\|\right)^2 + 2 \ell \frac{1}{\beta} \log 2}. \end{align*} So let $\epsilon = -\frac{1}{\ell}\left( \ell c_{\ref{xoptBound}} + \| \nabla \bar f(0)\|\right) + \frac{1}{\ell}\sqrt{\left( \ell c_{\ref{xoptBound}} + \| \nabla \bar f(0)\|\right)^2 + 2 \ell \frac{1}{\beta} \log 2}$ and let $\cB_{x^*}(\epsilon)$ be the ball of radius $\epsilon$ centered at $x^*$. Then we want to find a ball $\cS$ such that \begin{align*} \int_{\cK} e^{\beta \left( \bar f(x^*) - \bar f(x)\right)} dx \ge \frac{1}{2} \textrm{vol} (\cK \cap \cB_{x^*}(\epsilon)) \ge \frac{1}{2} \textrm{vol}(\cS). \end{align*} To find the desired ball $\cS$, we consider the problem of finding the largest ball inscribed within $\cK\cap \bB_{x^\star}(\epsilon)$. This is a Chebyshev centering problem, and can be formulated as the following convex optimization problem. \begin{subequations} \label{eq:chebyOptimization} \begin{align} &\max_{r,y} && r \\ \label{eq:ballInPolyhedron} &\textrm{subject to} && Ay \le b - r \boldsymbol{1} \\ \label{eq:ballInBall} &&&\|x^* -y\| + r \le \epsilon \end{align} \end{subequations} where $r$ and $y$ denotes the radius and the center of the Chebyshev ball respectively. The particular form arises because the rows of $A$ are unit vectors, and so the ball of radius $r$ around $y$ is inscribed in $\cK$ if and only if (\ref{eq:ballInPolyhedron}) holds, while this ball is contained in $\bB_{x^\star}(\epsilon)$ if and only if (\ref{eq:ballInBall}) holds. We rewrite this optimization problem as: \begin{subequations} \label{eq:chebyOptimization2} \begin{align} &\min_{r,y }&& -r + I_S(x^*, [r,y]^\top) \\ &\textrm{subject to} && Ay \le b - r \boldsymbol{1} \end{align} \end{subequations} where $S = \{(x^*, [r,y^\top]^\top) \vert \|x^* -y\| + r <\epsilon \}$. Here, $I_S$ is defined by \begin{equation} I_S(x,[r,y^\top]^\top ) = \left\{ \begin{array}{ c l } +\infty & \quad \textrm{if } (x,[r,y^\top]^\top) \notin S \\ 0 & \quad \textrm{otherwise}. \end{array} \right. \end{equation} Let $g(x^\star)$ denote the optimal value of (\ref{eq:chebyOptimization2}). We will show that there is a positive constant $r_{\min}>0$ such that $-g(x) \ge r_{\min}$ for all $x \in \cK$. As a result, for any $x^\star$ the corresponding Chebyshev centering solutions has radius at least $r_{\min}$. Let $F(x^*,[r,y^\top]^\top)= -r + I_s(x^*, [r,y^\top]^\top)$. We can see that $F$ is convex in $(x^*, [r,y^\top]^\top)$ and $\textrm{dom}\:F = S$. Let $C = \{ [r,y^\top ]^\top \vert [\boldsymbol{1} \; A] [r,y]^\top \le b \}$. Then the optimal value of (\ref{eq:chebyOptimization2}) can be expressed as $g(x)= \inf_{[r,y^\top ]^\top \in C} F(x, [r,y^\top]^\top)$ and $\textrm{dom}\:g = \{ x \vert \exists [r,y^\top]^\top \in C \; \textrm{s.t.} \; (x, [r,y^\top ]^\top) \in S\}$. The results of Section 3.2.5 of \cite{boyd2004convex} imply that if $F$ is convex, $S$ is convex, and $g(x) >-\infty$ for all $x$, then $g$ is also convex. If $(x, [r,y^\top ]^\top)\in \textrm{dom} \:F$, then \begin{align*} \|x-y\| + r \le \epsilon & \implies r \le \epsilon - \|x-y\|\\ & \implies -r \ge - \epsilon + \|x-y\| > - \infty. \end{align*} In particular, if there exist $ y,r $ such that $(x, [r,y^\top ]^\top) \in \textrm{dom} \:F$, then $\inf_{[r,y^\top ]^\top \in C} F(x, [r,y^\top]^\top) \ge -\epsilon $. There are two cases: \begin{itemize} \item If there exist $[r,y^\top]^\top \in C$ such that $(x, [r,y^\top]^\top) \in \textrm{dom}\: F$, then $\inf_{[r,y^\top]^\top \in C} F(x, [r,y^\top]\top) $ is finite and bounded below. \item If there does not exist $[r,y^\top]^\top \in C$ such that $(x, [r,y^\top]^\top) \in \textrm{dom} \:F$, then for all $[r,y^\top]^\top \in C$, $F(x, [r,y^\top]^\top) = +\infty$. So $g(x) = \inf_{[r,y^\top]^\top\in C} F(x,[r,y^\top]\top) = +\infty > - \infty$. \end{itemize} Hereby, we can conclude that for all $x$ , $g(x) >-\infty$, so $g(x)$ is convex. So, to found a lower bound on the inscribed radius, we want to maximize $g(x)$ over $\cK$. Specifically, we analyze the following optimization problem \begin{subequations} \label{eq:chebyOptimization3} \begin{align} &\max_{x \in \cK }&& g(x) \end{align} \end{subequations} which corresponds to maximizing a convex function over a convex set. Note that $\cK \subset \dom{(g )}$. In particular, if $x\in\cK$, then $(x,[0,x^\top]^\top)\in S$, which implies that $g(x) \le 0$. Thus, $g(x)\le 0$ for all $x\in \cK$. Therefore, using Theorem 32.2 \cite{rockafellar2015convex}, given $\cK$ is closed convex by our assumption and $g(x)$ is bounded above gives \begin{align*} \sup \left\{ g(x) \vert x \in \cK \right\} = \sup \left\{ g(x) \vert x \in E \right\} \end{align*} where $E$ is a subset of $\cK$ consisting of the extreme points of $\cK \cap L^{\perp}$, where $L$ is the linearity space of $C$ and $L = \{x \vert Ax = 0 \} = \cN(A)$. Now, we will show that $E$ is a finite set. Let \begin{align*} A = \begin{bmatrix} U_1 & U_2 \end{bmatrix} \begin{bmatrix} \Sigma & 0 \\ 0& 0 \end{bmatrix} \begin{bmatrix} V_1^\top \\ V_2^\top \end{bmatrix}, \end{align*} then $\cN(A) = L = \cR(V_2)$ and $L^\perp = \cR(V_1)$, and \begin{align*} K\cap L^\perp = \{ V_1 Z_1 \vert A V_1 Z_1 \le b \}. \end{align*} This is a polyhedral with no lines so has a finite set of extreme points, i.e. $E$ is finite. Then, based on the process shown in \cite{lamperski2021projected}, we have $\textrm{vol}(\cS)\ge \frac{\pi^{n/2}}{\Gamma(n/2+1)}r_{\min}^n $, using the fact that a ball of radius $\rho$ has volumn given by $\frac{\pi^{n/2}}{\Gamma(n/2+1)}\rho^n$ Then, utilizing an upper bound of Gamma function recorded in \cite{ramanujan1988lost} shown as below: \begin{equation} \label{eq:stirling} \Gamma(x+1) < \sqrt{\pi} \left( \frac{x}{e}\right)^x \left( 8x^3 + 4x^2 +x + \frac{1}{30}\right)^{1/6}, \; x \ge 0. \end{equation} Setting $x = \frac{n}{2}$ in (\ref{eq:stirling}) gives: \begin{equation} \Gamma(\frac{n}{2}+1) < \sqrt{\pi} \left( \frac{n}{2e}\right)^{\frac{n}{2}} \left( n^3 + n^2 + \frac{n}{2} + \frac{1}{30}\right)^{1/6}. \end{equation} Therefore, we can find the lower bound of $\log \frac{1}{2} \textrm{vol}(S)$: \begin{align} \nonumber \log \frac{1}{2} \textrm{vol}(S) &= \log \frac{\pi^{n/2}}{\Gamma(n/2+1)}r_{\min}^n - \log 2\\ \nonumber &> \frac{n}{2} \log \pi + n \log r_{\min} - \log \left\{ \sqrt{\pi} \left( \frac{n}{2e}\right)^{\frac{n}{2}} \left( n^3 + n^2 + \frac{n}{2} + \frac{1}{30}\right)^{1/6} \right\} - \log 2\\\nonumber \label{eq:logvolumn_bound} &= -\frac{1}{2} \log \pi + n \log r_{\min} + \frac{n}{2} \log (2 \pi e) - \frac{n}{2} \log n- \frac{1}{6} \log \left( n^3 + n^2 + \frac{n}{2} + \frac{1}{30}\right) - \log 2\\ & > n \log r_{\min} + \frac{n}{2} \log (2 \pi e) - \frac{n}{2} \log n -\frac{1}{6} \log \left( 3 n^3 \right) - \log (2 \sqrt{\pi}) \end{align} The last inequality holds because $n \ge 1 $. Plugging (\ref{eq:logvolumn_bound}) and (\ref{eq:diff_entropy_bound}) in (\ref{eq:sub_optimal}) gives \begin{align*} \bbE_{\pi_{\beta \bar f}} [\bar f(x)] & < \min f(x) + \frac{n}{2 \beta} \log(2 \pi e ( \varsigma +\frac{1}{\mu} c_{\ref{LyapunovConst}})) \\ & -\frac{1}{\beta} \left( n \log r_{\min} + \frac{n}{2} \log (2 \pi e) - \frac{n}{2} \log n -\frac{1}{2} \log n - \frac{1}{6}\log 3 - \log 2 \sqrt{\pi} \right)\\ &= \min f(x) +\frac{n}{2 \beta} \log( \varsigma +\frac{1}{\mu} c_{\ref{LyapunovConst}}) - \frac{1}{\beta} \left( n \log r_{\min} - \frac{n}{2} \log n - \frac{1}{2} \log n \right)\\ & + \frac{1}{\beta} ( \frac{1}{6}\log 3 + \log 2 \sqrt{\pi})\\ &\le \min f(x) +\frac{n}{ \beta} \left( c_{\ref{const_subopt}} + \log n \right) \end{align*} where $c_{\ref{const_subopt}} = 2 \log( \varsigma +\frac{1}{\mu} c_{\ref{LyapunovConst}} ) + \frac{1}{6}\log 3 + \log 2 \sqrt{\pi} - \log r_{\min}$ and the last inequality holds because $n \ge 1$. \hfill $\blacksquare$ \const{optimization_const1} \const{optimization_const2} \const{2qmomentremainder1} \const{2qmomentremainder2} \begin{proposition} \label{prop: app_optimization} Let $\bx_k$ be the iterates of the algorithms and assume $\eta \le \frac{\mu}{3\ell^2}$ and $\bbE[\|\bx_o\|^{2q}] < \infty$ for all $q>1$. There exist positive constants $c_{\ref{const_subopt}}, c_{\ref{optimization_const1}}, c_{\ref{optimization_const2}}$ such that for all $q>1$ and integers $k \ge 0$, the following bound holds: \begin{align*} \bbE[\bar f(\bx_k)] \le \min_{x \in \cK} \bar f(x) + c_{\ref{optimization_const1}} W_1(\cL(\bx_k), \pi_{\beta \bar f}) + c_{\ref{optimization_const2}} W_1 (\cL(\bx_k), \pi_{\beta \bar f})^{\frac{2-2q}{1-2q}} + \frac{n}{ \beta} \left( c_{\ref{const_subopt}} + \log n \right), \end{align*} where \begin{align*} c_{\ref{const_subopt}} &= 2 \log( \varsigma +\frac{1}{\mu} c_{\ref{LyapunovConst}} ) + \frac{1}{6}\log 3 + \log 2 \sqrt{\pi} - \log r_{\min} \\ c_{\ref{optimization_const1}} &= \|\nabla \bar f(0) \| + \ell \frac{2\left(\|\nabla \bar f(0)\|+\sqrt{\|\nabla \bar f(0)\|} + \sqrt{\mu \bar f(0)}\right)}{\mu} \\ c_{\ref{optimization_const2}} &= \left( \frac{2 \ell }{\sqrt{\mu}} +C \right) \left( \frac{\ell }{\sqrt{\mu} (q-1)C}\right)^{\frac{2-2q}{-2q+1}} \end{align*} \end{proposition} \paragraph{Proof of Proposition~{\ref{prop: app_optimization}}} Let $\bx$ be drawn according to $\pi_{\beta \bar f}$. Then Lemma~\ref{lem:gibbsSuboptimality} implies: \begin{align} \label{eq:optimizationBound} \nonumber \bbE[\bar f(\bx_k)] &= \bbE[\bar f(\bx)] + \bbE[\bar f(\bx_k)-\bar f(\bx)]\\ &\le \min_{x \in \cK} \bar f(x) + \bbE[\bar f(\bx_k)-\bar f(\bx)] + \frac{n}{ \beta} \left( c_{\ref{const_subopt}} + \log n \right) \end{align} So, it now suffices to bound $ \bbE[\bar f(\bx_k)-\bar f(\bx)]$. Ideally, we would bound this term via Kantorovich duality. The problem is that $\bar f$ may not be globally Lipschitz. So, we must approximate it with a Lipschitz function, and then bound the gap induced by this approximation. Namely, fix a constant $m>\bar f(0)$ with $m\ge $, to be chosen later. Set $g(x)=\min\{\bar f(x),m\}$. The inequality from (\ref{eq:quadraticLower}) implies that if $\|x\|\ge \hat R:=\frac{2\left(\|\nabla \bar f(0)\|+\sqrt{\|\nabla \bar f(0)\|+\mu (m-\bar f(0))}\right)}{\mu}$, then $\bar f(x)\ge m$. We claim that $g$ is globally Lipschitz. For $\|x\|\le \hat R$, we have that $$ \|\nabla \bar f(x)\|\le \|\nabla \bar f(0)\| + \ell \hat R=:u. $$ We will show that $g$ is $u$-Lipschitz. In the case that $f(y)\ge m$ and $f(x)\ge m$, we have $|g(x)-g(y)|=0$, so the property holds. Now say that $\bar f(x) < m$ and $\bar f(y) < m$. Then we must have $\|x\|\le \hat R$ and $\|y\|\le \hat R$. Then for all $t\in [0,1]$, we have $\|(1-t)x+ty \|\le \hat R$. It follows that \begin{align*} g(x)-g(y)&= \bar f(x)-\bar f(y) \\ &= \int_0^1 \nabla \bar f(x+t(y-x))^\top (y-x) dt \\ &\le u \|x-y\|. \end{align*} Finally, consider the case that $\bar f(x) \ge m$ and $\bar f(y) < m$. Then there is some $\theta \in [0,1]$ such that $\bar f(y + \theta (x-y))=m$. Furthermore \begin{align*} |g(x)-g(y)| &= m-\bar f(y) \\ &= \bar f(y+\theta (x-y))-\bar f(y) \\ &=\int_0^\theta \nabla \bar f( y + t(x-y))^\top (x-y) dt\\ &\le u \|x-y\|. \end{align*} It follows that $g$ is $u$-Lipschitz. Now noting that $g(x)\le \bar f(x)$ for all $x$ gives \begin{align} \nonumber \bbE[\bar f(\bx_k)-\bar f(\bx)]&\le \bbE[\bar f(\bx_k)-g(\bx)] \\ \nonumber &=\bbE[g(\bx_k)-g(\bx)] + \bbE[\indic(\bar f(\bx_k) > m) (\bar f(\bx_k)-m)] \\ \label{eq:kantorovichSplit} &\le u W_1(\cL(\bx_k),\pi_{\beta \bar f}) + \bbE[\indic(\bar f(\bx_k) > m) (\bar f(\bx_k)-m)]. \end{align} The final inequality uses Kantorovich duality. Now, it remains to bound $ \bbE[\indic(\bar f(\bx_k) > m) (\bar f(\bx_k)-m)]. $ Note that if $\by$ is a non-negative random variable, a standard identity gives that $\bbE[\by]=\int_0^\infty \bbP(\by > \epsilon)d\epsilon$. Thus, we have $$ \bbE[\indic(\bar f(\bx_k) > m) (\bar f(\bx_k)-m)] = \int_0^\infty \bbP(\bar f(\bx_k) -m > \epsilon)d\epsilon. $$ For all $x\in \cK$, we have \begin{align*} \bar f(x) &= \bar f(0) - \nabla \bar f(0)^\top x + \int_0^1 (\nabla \bar f(tx)-\nabla \bar f(0))^\top x dt \\ &\le \bar f(0) + \|\nabla \bar f(0)\| \|x \| + \frac{1}{2} \ell \|x\|^2 \\ &\le \bar f(0) + \frac{\|\nabla \bar f(0)\|^2}{2\ell} + \ell \|x\|^2. \end{align*} So, \begin{align*} \bar f(x)-m > \epsilon &\implies \bar f(0) + \frac{\|\nabla \bar f(0)\|^2}{2\ell} + \ell \|x\|^2 > m+\epsilon \\ &\iff \|x\|^2 > \frac{m+\epsilon- \left(\bar f(0) + \frac{\|\nabla \bar f(0)\|^2}{2\ell}\right)}{\ell}. \end{align*} Now assume that $m/2 > \bar f(0) + \frac{\|\nabla \bar f(0)\|^2}{2\ell}$. Then the right side implies $\|x\|^2 \ge \frac{\frac{m}{2}-\epsilon}{\ell}$. It follows that for any $q>1$, we have, via Markov's inequality and direct computation: \begin{align*} \bbE[\indic(\bar f(\bx_k) > m) (\bar f(\bx_k)-m)] & \le \int_0^{\infty} \bbP\left(\|\bx_k\|^2 > \frac{\frac{m}{2}+\epsilon}{\ell} \right) d\epsilon \\ &= \int_0^{\infty} \bbP\left(\|\bx_k\|^{2q} > \left(\frac{\frac{m}{2}+\epsilon}{\ell}\right)^q \right) d\epsilon \\ &\le \bbE[\|\bx_k\|^{2q}] \int_0^{\infty} \left(\frac{\frac{m}{2}+\epsilon}{\ell}\right)^{-q} d\epsilon \\ &= \bbE[\|\bx_k\|^{2q}] \frac{\ell^q 2^{q-1}}{(q-1)m^{q-1}}. \end{align*} Plugging this expression into (\ref{eq:kantorovichSplit}) and using the definition of $u$ gives \begin{align} \label{eq:kantorovichSplit2} \MoveEqLeft \nonumber \bbE[\bar f(\bx_k)-\bar f(\bx)] \\ & \nonumber \le \left(\|\nabla \bar f(0) \| + \ell \frac{2\left(\|\nabla \bar f(0)\|+\sqrt{\|\nabla \bar f(0)\|+\mu (m-\bar f(0))}\right)}{\mu} \right) W_1(\cL(\bx_k),\pi_{\beta \bar f}) \\ &\qquad + \bbE[\|\bx_k\|^{2q}] \frac{\ell^q 2^{q-1}}{(q-1)m^{q-1}}. \end{align} We want to derive the bound of $\bbE[\|\bx_k\|^{2q}]$. We have \begin{align*} \|\bx_{k+1}\|^{2q} \le \|\bx_k - \eta \nabla f(\bx_k, \bz_k) + \sqrt{\frac{2\eta}{\beta}} \hat{\bw}_k\|^2. \end{align*} For notation simplicity, let $y = \frac{\bx_k - \eta \nabla f(\bx_k, \bz_k)}{\sqrt{2\eta /\beta}}$ and $w = \hat{\bw}_k$, so the above inequality can be expressed as \begin{align*} \|\bx_{k+1}\|^{2q} &\le \left( \frac{2 \eta}{\beta}\right)^q\|y + w\|^{2q}\\ &= \left( \frac{2 \eta}{\beta}\right)^q \left( \|y\|^2 + \|w\|^2 +2 y^\top w \right)^q\\ &= \left( \frac{2 \eta}{\beta}\right)^q \sum_{k=0}^q \binom qk \left( 2 y^\top w \right)^{q-k}\left( \|y\|^2 + \|w\|^2 \right)^k \\ & = \left( \frac{2 \eta}{\beta}\right)^q \sum_{k=0}^q \binom qk \left( 2 y^\top w \right)^{q-k} \sum_{i=0}^k \binom ki\left( \|y\|^{2i} \|w\|^{2(k-i)} \right). \end{align*} The last two equalities use binomial theorem. Taking expectation, we can observe that the right hand side bound can be expressed as $\left( \frac{2 \eta}{\beta}\right)^q \bbE\left[ \|y\|^{2q} \right] + \bbE\left[ p(\|y\|)\right]$, where $p(\|y\|)$ is a polynomial in $y$ with order strictly lower than $2q$. Note that $p(\|y\|)$ will be dominated by the higher order term if $y$ is big. More specifically, the lower order terms will be dominated when $\bx_k$ is big. When $\bx_k$ is small, these terms can be bounded above by a constant. So now we want to examine $\bbE[\|y\|^{2q}]$, which is to bound $\bbE[\|\bx_k - \eta \nabla \bar{f}(\bx_k)\|^{2q}]$. We have \begin{align*} \|\bx_k - \eta \nabla f(\bx_k, \bz_k)\|^{2q} &= \left( \|\bx_k\|^2 - 2 \eta \bx_k^\top \nabla f(\bx_k, \bz_k) + \eta^2 \|\nabla f(\bx_k, \bz_k)\|^2\right)^q \end{align*} We examine the second term: \begin{align*} \bx_k^\top \nabla f(\bx_k, \bz_k) &= \bx_k^\top (\nabla \bar{f}(\bx_k) - \nabla \bar{f}(0)) + \bx_k^\top( \nabla \bar{f}(0) - \nabla \bar{f}(\bx_k) + \nabla f(\bx_k, \bz_k) ) \\ &\ge \mu \|\bx_k\|^2 - (\ell+ \mu) R^2 + \bx_k^\top(\nabla \bar{f}(0) + \bbE_{\hat \bz}[\nabla f(\bx_k, \bz_k) - \nabla f(\bx_k, \hat \bz_k) ]) \end{align*} where the first term is bounded by the assumption of the strong convexity outside a ball and the detailed statement is shown below: If $\|x\| \ge R$, then $x^\top (\nabla \bar f(x) - \nabla \bar f(0)) \ge \mu \|x\|^2$. If $\|x\| \le R$, then $x^\top (\nabla \bar f(x) - \nabla \bar f(0)) \ge - \ell \|x\|^2 \ge - \ell R^2$. Therefore, we have for all $x \in \cK$, $x^\top (\nabla \bar f(x) - \nabla \bar f(0)) \ge \mu \|x\|^2 - (\ell+\mu) R^2$. Note here and below $\hat \bz$ is independent of $\bz$ and identically distributed. Then, we have \begin{align} \nonumber & \bbE\left[ \|\bx_k - \eta \nabla f(\bx_k, \bz_k)\|^{2q} \right] \nonumber \\ \nonumber &= \bbE\left[\left( \|\bx_k\|^2 - 2 \eta \bx_k^\top \nabla f(\bx_k, \bz_k) + \eta^2 \|\nabla f(\bx_k, \bz_k)\|^2\right)^q\right] \\ \nonumber &\le \bbE\left[\left((1- 2 \mu \eta) \|\bx_k\|^2 + 2 \eta (\ell+ \mu) R^2 \right.\right. \\ \nonumber & \left. \left. \qquad - 2 \eta\bx_k^\top(\nabla \bar{f}(0) + \bbE_{\hat \bz}[\nabla f(\bx_k, \bz_k) - \nabla f(\bx_k, \hat \bz_k) ]) + \eta^2 \|\nabla f(\bx_k, \bz_k)\|^2\right)^q\right] \\ \nonumber &\le \bbE\left[\left((1- 2 \mu \eta) \|\bx_k\|^2 + 2 \eta (\ell+ \mu) R^2 \right.\right. \\ \nonumber & \left. \left. \qquad - 2 \eta\bx_k^\top(\nabla \bar{f}(0) + \nabla f(\bx_k, \bz_k) - \nabla f(\bx_k, \hat \bz_k) ) + \eta^2 \|\nabla f(\bx_k, \bz_k)\|^2\right)^q\right] \\ \nonumber &\le \bbE\left[\left((1- 2 \mu \eta) \|\bx_k\|^2 + 2 \eta (\ell+ \mu) R^2 \right.\right. \\ & \left. \left. \qquad + 2 \eta \|\bx_k\|(\|\nabla \bar{f}(0)\| + \ell(\bz_k - \hat \bz_k) ) + \eta^2 \|\nabla f(\bx_k, \bz_k)\|^2\right)^q\right]. \label{eq:Bound2qmoment} \end{align} The second inequality uses Jensen's inequality, and the last inequality uses Cauchy-Schwartz inequality together with $\ell$-Lipschitzness of $\nabla f(x,z)$ in $z$. Now we examine the last term above. \begin{align*} \|\nabla f(x,z)\| &= \|\nabla f(x,z) - \bbE_{\hat{z}}[\nabla f(x, \hat z)] + \bbE_{\hat z}[\nabla f(x, \hat z)]\| \\ &\le \|\bbE_{\hat{z}}[ \nabla f(x,z) - \nabla f(x, \hat z)]\| + \| \bar{f}(x)\| \\ &\le \ell \bbE_{\hat{z}}[ \|z - \hat z\|] + \|\nabla \bar{f}(0)\| + \ell \|x\|. \end{align*} So \begin{align} \label{eq:fsquareBound} \|\nabla f(x,z)\|^2 \le 3 \left( \ell^2 \left(\bbE_{\hat{z}}[ \|z - \hat z\|] \right)^2 + \|\nabla \bar{f}(0)\|^2 + \ell^2 \|x\|^2\right). \end{align} Then, we can group the square terms in (\ref{eq:Bound2qmoment}) together and simplify it: \begin{align*} &(1-2\mu \eta) \|x\|^2 + \eta^2 3 \ell^2 \|x\|^2 \le (1-\eta \mu) \|x\|^2\\ \iff & 1-2\mu \eta + \eta^2 3 \ell^2 \le 1- \eta \mu \\ \iff& \eta \le \frac{\mu}{3\ell^2}. \end{align*} So, if $\eta \le \frac{\mu}{3\ell^2}$, plugging (\ref{eq:fsquareBound}) into (\ref{eq:Bound2qmoment}) gives \begin{align*} \bbE\left[ \|\bx_k - \eta \nabla f(\bx_k, \bz_k)\|^{2q} \right] &\le \bbE\left[\left((1- \mu \eta) \|\bx_k\|^2 + 2 \eta (\ell+ \mu) R^2 \right.\right. \\ & \left. \left. - 2 \eta \|\bx_k\|(\|\nabla \bar{f}(0)\| + \ell(\bz_k - \hat \bz_k) ) + \eta^2 3 \left( \ell^2 \left(\bbE_{\hat{\bz}}[ \|\bz_k - \hat \bz_k\|] \right)^2 \|\nabla \bar{f}(0)\|^2 \right) \right)^q\right]. \end{align*} We want to further group the first and third terms above together. Let $a= \|\bx_k\|$, $b = \|\nabla \bar{f}(0)\| + \ell(\bz_k - \hat \bz_k) $. For all $\epsilon \ge 0$, $2ab = 2 (\epsilon a) (\frac{1}{\epsilon} b) \le (\epsilon)^2 + (\frac{1}{\epsilon}b)^2$ . Then, we have \begin{align*} &(1- \mu \eta) \|\bx_k\|^2 + \eta \epsilon^2 \|\bx_k\|^2 \le (1- \frac{\mu \eta}{2}) \|\bx_k\|^2 \\ \iff & 1- \mu \eta +\eta \epsilon^2 \le 1- \frac{\mu \eta}{2} \\ \iff & \epsilon \le \sqrt{\frac{\mu}{2}}. \end{align*} So let $\epsilon = \sqrt{\frac{\mu}{2}}$, we have \begin{align*} \bbE\left[ \|\bx_k - \eta \nabla f(\bx_k, \bz_k)\|^{2q} \right] &\le \bbE\left[\left((1- \frac{\mu \eta}{2}) \|\bx_k\|^2 + 2 \eta (\ell+ \mu) R^2 \right.\right. \\ & \left. \left. + \frac{2\eta}{\mu}(\|\nabla \bar{f}(0)\| + \ell(\bz_k - \hat \bz_k) )^2 + \eta^2 3 \left( \ell^2 \left(\bbE_{\hat{\bz}}[ \|\bz_k - \hat \bz_k\|] \right)^2 \|\nabla \bar{f}(0)\|^2 \right) \right)^q\right] \\ &\le \bbE\left[\left((1- \frac{\mu \eta}{2}) \|\bx_k\|^2 + 2 \eta (\ell+ \mu) R^2 \right.\right. \\ & \left. \left. + \frac{2\eta}{\mu}(\|\nabla \bar{f}(0)\| + \ell(\bz_k - \hat \bz_k) )^2 + \eta^2 3 \left( \ell^2 \left( \|\bz_k - \hat \bz_k\| \right)^2 \|\nabla \bar{f}(0)\|^2 \right) \right)^q\right] \\ & = (1-\frac{\mu \eta}{2})^q \bbE[\|\bx_k\|^{2q}] + \eta \bbE[p(\|\bx_k\|^2, \|\bz_k -\hat{\bz}_k\|^2)]\\ &\le (1-\frac{\mu \eta}{2}) \bbE[\|\bx_k\|^{2q}] + \eta \bbE[p(\|\bx_k\|^2, \|\bz_k -\hat{\bz}_k\|^2)]\\ & \le (1- \frac{\mu \eta}{4}) \bbE[\|\bx_k\|^{2q}] + \eta \bbE[-\frac{\mu}{4} \|\bx\|^{2q} + p(\|\bx_k\|^2, \|\bz_k -\hat{\bz}_k\|^2)]\\ &\le (1- \frac{\mu \eta}{4}) \bbE[\|\bx_k\|^{2q}] + \eta c_{\ref{2qmomentremainder1}}. \end{align*} The second inequality uses Jensen's inequality twice. The polynomial $p(\|\bx_k\|^2, \|\bz_k -\hat{\bz}_k\|^2)$ is with order strictly lower than $2q$ in $\|\bx_k\|$ and with highest order of $2q$ in $\|\bz - \hat{\bz}_k\|$. The last inequality is based on the claim that $\bbE[-\frac{\mu}{4} \|\bx\|^{2q} + p(\|\bx_k\|^2, \|\bz_k -\hat{\bz}_k\|^2)]$ is bounded by a constant $c_{\ref{2qmomentremainder1}}$, which depends on the value of $q$, the statistics of external random variables $\bz$. Therefore, we have \begin{align*} \bbE[\|\bx_{k+1}\|^{2q}] \le (1-\frac{\eta \mu}{4}) \bbE[\|\bx_k\|^{2q}] + \eta c_{\ref{2qmomentremainder2}}. \end{align*} Intuitively, the constant $c_{\ref{2qmomentremainder2}}$ represents the influence of the lower order remainders in the processes of using binomial theorem. Now as long as $\bbE[\|\bx_0\|^{2q}] < \infty$, we have \begin{align*} \bbE[\|\bx_k\|^{2q}] \le \bbE[\|\bx_0\|^{2q}] + c_{\ref{2qmomentremainder2}}. \end{align*} Plugging the above result into (\ref{eq:kantorovichSplit2}) gives \begin{align*} \label{eq:kantorovichSplit3} \MoveEqLeft \bbE[\bar f(\bx_k)-\bar f(\bx)] \\ & \le \left(\|\nabla \bar f(0) \| + \ell \frac{2\left(\|\nabla \bar f(0)\|+\sqrt{\|\nabla \bar f(0)\|+\mu (m-\bar f(0))}\right)}{\mu} \right) W_1(\cL(\bx_k),\pi_{\beta \bar f}) \\ &\qquad + \left( \bbE[\|\bx_0\|^{2q}] + c_{\ref{2qmomentremainder2}} \right) \frac{\ell^q 2^{q-1}}{(q-1)m^{q-1}}. \end{align*} The remaining work is to optimize the left hand side with respect to $m$ so that we can make a choice of the value of $m$ which is mentioned earlier in the proof. Let \begin{align*} g(m)&=\left(\|\nabla \bar f(0) \| + \ell \frac{2\left(\|\nabla \bar f(0)\|+\sqrt{\|\nabla \bar f(0)\|} + \sqrt{\mu m} + \sqrt{\mu \bar f(0)}\right)}{\mu} \right) W_1(\cL(\bx_k),\pi_{\beta \bar f}) \\ &\qquad + \left( \bbE[\|\bx_0\|^{2q}] + c_{\ref{2qmomentremainder2}} \right) \frac{\ell^q 2^{q-1}}{(q-1)m^{q-1}}. \end{align*} We can see that $g(m)$ is an upper bound of the right hand side of (\ref{eq:kantorovichSplit2}). Setting $g^\prime(m) = 0$ leads to $m^* = \left( \frac{\ell W_1}{\sqrt{\mu} (q-1)C}\right)^{\frac{2}{-2q+1}}$, where $C = \left( \bbE[\|\bx_0\|^{2q}] + c_{\ref{2qmomentremainder2}} \right) \frac{\ell^q 2^{q-1}}{(q-1)}$ for notation simplicity. So \begin{align*} \max_{m \ge 0}g(m) &= g(m^*)\\ &\le \left(\|\nabla \bar f(0) \| + \ell \frac{2\left(\|\nabla \bar f(0)\|+\sqrt{\|\nabla \bar f(0)\|} + \sqrt{\mu \bar f(0)}\right)}{\mu} \right) W_1(\cL(\bx_k),\pi_{\beta \bar f}) \\ & + \frac{2 \ell }{\sqrt{\mu}} \left( \frac{\ell W_1(\cL(\bx_k),\pi_{\beta \bar f})}{\sqrt{\mu} (q-1)C}\right)^{\frac{2-2q}{-2q+1}} + C \left( \frac{\ell W_1(\cL(\bx_k),\pi_{\beta \bar f})}{\sqrt{\mu} (q-1)C}\right)^{\frac{2-2q}{-2q+1}}\\ & = \left(\|\nabla \bar f(0) \| + \ell \frac{2\left(\|\nabla \bar f(0)\|+\sqrt{\|\nabla \bar f(0)\|} + \sqrt{\mu \bar f(0)}\right)}{\mu} \right) W_1(\cL(\bx_k),\pi_{\beta \bar f}) \\ & + \left( \frac{2 \ell }{\sqrt{\mu}} +C \right) \left( \frac{\ell }{\sqrt{\mu} (q-1)C}\right)^{\frac{2-2q}{-2q+1}} W_1(\cL(\bx_k),\pi_{\beta \bar f})^{\frac{2-2q}{-2q+1}} \end{align*} Setting \begin{align*} c_{\ref{optimization_const1}} &= \|\nabla \bar f(0) \| + \ell \frac{2\left(\|\nabla \bar f(0)\|+\sqrt{\|\nabla \bar f(0)\|} + \sqrt{\mu \bar f(0)}\right)}{\mu} \\ c_{\ref{optimization_const2}} &= \left( \frac{2 \ell }{\sqrt{\mu}} +\left( \bbE[\|\bx_0\|^{2q}] + c_{\ref{2qmomentremainder2}} \right) \frac{\ell^q 2^{q-1}}{(q-1)} \right) \left( \frac{\ell }{\sqrt{\mu} (q-1)\left( \bbE[\|\bx_0\|^{2q}] + c_{\ref{2qmomentremainder2}} \right) \frac{\ell^q 2^{q-1}}{(q-1)}}\right)^{\frac{2-2q}{-2q+1}}. \end{align*} and plugging this bound into (\ref{eq:optimizationBound}) give the desired result. Below are some further proofs that need to work on later. \begin{align*} (2a)^{-1/2} &\le (\frac{64\eta}{\mu \beta^2}e^{\frac{\beta \ell R^2}{8}})^{-1/2}\\ &\le \frac{1}{8}\sqrt{\frac{\mu}{\eta}} \beta e^{-\frac{\beta \ell R^2}{16}} \end{align*} Therefore, we can simplify the bound in Theorem \ref{thm:nonconvexLangevin} as below \begin{align*} W_1(\cL(\bx_T), \pi_{\beta \bar{f}}) &\le \left( c_{\ref{contraction_const1}} + c_{\ref{contraction_const2}} \sqrt{\varsigma} + \frac{c_{\ref{error_polyhedron1}} + c_{\ref{error_polyhedron2}}\sqrt{\varsigma}}{(2a)^{1/2}} \right) T^{-1/2} \log T\\ &\le \left( c_{\ref{contraction_const1}} + c_{\ref{contraction_const2}} \sqrt{\varsigma} + \left( c_{\ref{error_polyhedron1}} + c_{\ref{error_polyhedron2}} \sqrt{\varsigma} \right)\frac{1}{8}\sqrt{\frac{\mu}{\eta}} \beta e^{-\frac{\beta \ell R^2}{16}}\right) T^{-1/2} \log T . \end{align*} Observing the main constants in the bound gives \begin{align*} W_1(\cL(\bx_T), \pi_{\beta \bar{f}}) \le p(\beta^{-1/2})\beta T^{-1/2} \log T . \end{align*} where $p(\beta^{-1/2})$ is a polynomial function. We assume $\beta >1$, then \begin{align*} W_1(\cL(\bx_T), \pi_{\beta \bar{f}}) \le p(1)\beta T^{-1/2} \log T . \end{align*} We want to get the sufficient condition for $$c_{\ref{optimization_const1}}W_1(\cL(\bx_T), \pi_{\beta \bar{f}}) \le \frac{\epsilon}{4}.$$ For all $\delta \in (0, 1) $, $T^{-1/2} \log T \le \sqrt{\frac{T^{-1 +\delta}}{(\frac{e \delta}{2})^2}} $. How to pick the range of $\delta$? -Yuping So it suffices to have \begin{equation} T^{-1+\delta} \le \left( \frac{e\delta}{2} \right)^2 (\frac{\epsilon}{4})^2 (c_{\ref{optimization_const1}} p(1) \beta)^{-2} := \hat \epsilon \end{equation} so \begin{align*} T &\ge \hat \epsilon^{-\frac{1}{1-\delta}}\\ &= \left(\frac{64}{\delta^2 \epsilon^2 e^2} \right)^{\frac{1}{1-\delta}} (c_{\ref{optimization_const1}} p(1) \beta)^{\frac{2}{1-\delta}} \end{align*} Let $\rho = \frac{2}{1-\delta} > 2$ since $\delta <1$. Then we have \begin{align*} T &\ge \hat \epsilon^{-\frac{1}{1-\delta}}\\ &= \left(\frac{\rho 8 c_{\ref{optimization_const1}} p(1)}{e(\rho-2 ) }\right)^{\rho} \frac{1}{\epsilon^\rho} \beta^{\rho} \end{align*} \begin{align*} c_{\ref{optimization_const2}} W_1 (\cL(\bx_k), \pi_{\beta \bar f})^{\frac{2-2q}{1-2q}} \le \frac{\epsilon}{4} \end{align*} \begin{align*} T^{-1+\delta} \le \left( \frac{e\delta}{2} \right)^2 (\frac{\epsilon}{4})^{\frac{1-2q}{1-q}} c_{\ref{optimization_const2}}^{-\frac{1-2q}{1-q}} (p(1) \beta)^{-2}:= \hat \epsilon \end{align*} \begin{align*} T \ge \left( \left( \frac{e\delta}{2} \right)^2 (\frac{\epsilon}{4})^{\frac{1-2q}{1-q}} c_{\ref{optimization_const2}}^{-\frac{1-2q}{1-q}} (p(1) \beta)^{-2} \right)^{-\frac{1}{1-\delta}}:= \hat \epsilon \end{align*} Let $\rho = \frac{2}{1-\delta} > 2$ since $\delta <1$. \begin{align*} T\ge \left( \frac{2 \rho p(1) \beta}{e (\rho -2)} \right)^{-\rho} \epsilon^{\frac{1-2q}{1-q} \frac{\rho}{ 2}} \left( 4 c_{\ref{optimization_const2}}\right)^{-\frac{1-2q}{1-q} \frac{\rho}{2}} \end{align*} where $c_{\ref{optimization_const2}}$ depends on $q$ and the initial $2q$ moment of $\bx$. And let $\frac{n}{ \beta} \left( c_{\ref{const_subopt}} + \log n \right) \le \frac{\epsilon}{2}$, we have \begin{align*} \beta \ge \frac{2 n ( c_{\ref{const_subopt}} + \log n)}{\epsilon} \end{align*} \hfill $\blacksquare$
2205.14132v3
http://arxiv.org/abs/2205.14132v3
The gap between a variational problem and its occupation measure relaxation
\documentclass{article} \usepackage[utf8]{inputenc} \usepackage{xcolor} \usepackage{graphicx} \graphicspath{ {./img/} } \usepackage{amsmath,amsthm,amssymb,amsfonts,hyperref,mathtools,enumitem,bm} \usepackage[normalem]{ulem} \usepackage{cite} \title{The gap between a variational problem and its occupation measure relaxation} \author{Milan Korda$^{1,2}$ and Rodolfo R\'ios-Zertuche$^1$} \date{} \newcommand{\R}{\mathbb{R}} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \theoremstyle{remark} \newtheorem{remark}[theorem]{Remark} \newcommand{\new}[1]{{\color{blue}#1}} \newcommand{\denomE}{41} \newcommand{\denomxi}{10} \newcommand{\denomA}{4} \newcommand{\proj}{\pi} \newcommand{\projXY}{\proj_{\Omega\times Y}} \newcommand{\projX}{\proj_{\Omega}} \newcommand{\projXYZ}{\proj_{\Omega\times Y\times Z}} \newcommand{\projdXY}{\proj_{\partial\Omega\times Y}} \usepackage{cancel} \textwidth145mm \hoffset-10mm \usepackage{empheq} \newcommand*\widefbox[1]{\fbox{\hspace{2em}#1\hspace{2em}}} \usepackage[most]{tcolorbox} \newtcolorbox{mymathbox}[1][]{colback=white, sharp corners, #1} \newcommand{\uu}{u} \begin{document} \maketitle \footnotetext[1]{CNRS; LAAS; 7 avenue du colonel Roche, F-31400 Toulouse; France. {\tt [email protected], [email protected]}} \footnotetext[2]{Faculty of Electrical Engineering, Czech Technical University in Prague, Technick\'a 2, CZ-16626 Prague, Czech Republic.} \renewcommand{\thefootnote}{\fnsymbol{footnote}} \footnotetext{\emph{MSC classes.} 35Q93 (Primary), 49Q15, 26B40, 65M99 (Secondary)} \renewcommand{\thefootnote}{\arabic{footnote}} \begin{abstract} Recent works have proposed linear programming relaxations of variational optimization problems subject to nonlinear PDE constraints based on the occupation measure formalism. The main appeal of these methods is the fact that they rely on convex optimization, typically semidefinite programming. In this work we close an open question related to this approach. We prove that the classical and relaxed minima coincide when the dimension of the codomain of the unknown function equals one, both for calculus of variations and for optimal control problems, thereby complementing analogous results that existed for the case when the dimension of the domain equals one. In order to do so, we prove a generalization of the Hardt-Pitts decomposition of normal currents applicable in our setting. We also show by means of a counterexample that, if both the dimensions of the domain and of the codomain are greater than one, there may be a positive gap. The example we construct to show the latter serves also to show that sometimes relaxed occupation measures may represent a more conceptually-satisfactory ``solution'' than their classical counterparts, so that ---even though they may not be equivalent--- algorithms rendering accessible the minimum in the larger space of relaxed occupation measures remain extremely valuable. Finally, we show that in the presence of integral constraints, a positive gap may occur at any dimension of the domain and of the codomain. \end{abstract} \tableofcontents \section{Introduction} \label{sec:intro} This work is concerned with a gap between the optimal value of a variational problem and the optimal value of its convex relaxation based on the so-called occupation measures. The variational problem considered is subject to constraints in the form of first-order nonlinear partial differential equations and inequalities. In this section we present a simplified version of the problem and introduce the convex relaxation, omitting constraints and boundary terms. The full version of the problem is treated in Section~\ref{sec:codim1}, with the main results being Theorem \ref{thm:decomposition} (superposition), Theorems \ref{thm:consolidated} and \ref{thm:nogap} (no gap in codimension one); these results are also stated in the context of optimal control in Section \ref{sec:oc}, where the main result is Theorem \ref{cor:oc}. The example with a positive gap in codimension greater than one is constructed in Section~\ref{sec:positive gap} with the main result being Theorem~\ref{thm:gap}. Additional examples, showing that there may be gaps when integral constraints are involved, are presented in Section \ref{sec:integralconstraints}. \paragraph{A global optimization problem.} Let $n,m>0$. Let $\Omega\subset\R^n$ be a bounded, connected, open set with piecewise $C^1$ boundary $\partial \Omega$, and $Y=\R^m$ and $Z=\R^{n\times m}$. Let the Lagrangian density be a locally bounded, measurable function $L\colon \Omega\times Y\times Z\to\R$ that is convex in $z$. Let $W^{1,\infty}(\Omega;Y)$ denote the Sobolev space of Lipschitz functions. Observe that for a function $y\in W^{1,\infty}(\Omega;Y)$, the dimension $n$ of the domain of $y$ and the dimension $m$ of its range are also, respectively, the dimension and codimension of the graph of $y$ in $\Omega\times Y$. Therefore, throughout this work we refer to $n$ as the dimension and $m$ as the codimension. Using these data, consider the problem of determining, globally, the infimum of a possibly nonconvex functional: \begin{align}\label{eq:pde} &\inf\limits_{y \in W^{1,\infty}(\Omega;Y)} \displaystyle\int_{\Omega} L(x,y(x),D y(x))\, dx. \end{align} In \cite{korda2018moments}, it is proposed to attack this problem by first relaxing it to take the infimum over the space of relaxed occupation measures rather than over $W^{1,\infty}(\Omega;Y)$, as this relaxation is amenable --- at least when we have semialgebraic data $\Omega$ and $L$ --- to numerical solution through a hierarchy of finite-dimensional convex semidefinite programs, without resorting to spatio-temporal discretization. The details of this semidefinite programming hierarchy are not the topic of this work; the reader is referred to~\cite{lasserre2009moments} for basic theory and to~\cite{henrion2020moment} for a number of applications. In this work we focus on the occupation measure relaxation of \eqref{eq:pde}, which we now explain in detail and give the necessary definitions to outline our results. \paragraph{Occupation measure relaxation.} In order to introduce the concept of occupation measures, first observe that each function $y\in C^1(\overline\Omega)$ induces a measure $\mu_y$ on $\Omega\times Y\times Z$ by pushing forward Lebesgue measure on $\Omega$ by the map $x\mapsto (x,y(x),Dy(x))$; in other words, for any measurable function $f\colon \Omega\times Y\times Z\to\R$ we have \[\int_{\Omega\times Y\times Z}f\,d\mu_y=\int_\Omega f(x,y(x),Dy(x))\,dx.\] The measure $\mu_y$ is the \emph{occupation measure} associated to the function $y$, and encodes $y$ and its derivative $Dy$. For all compactly-supported test functions $\phi\in C_c^\infty(\Omega\times Y)$, applying the fundamental theorem of calculus to the function $x_\ell\mapsto \phi(x_1,\dots,x_\ell,\dots,x_n,y(x_1,\dots,x_\ell,\dots,x_n))$, we have \[\int_\Omega\left[\frac{\partial\phi}{\partial x_\ell}(x,y(x))+\sum_{i=1}^m\frac{\partial\phi}{\partial y_i}(x,y(x))\frac{\partial y_i}{\partial x_\ell}(x)\right]dx=0,\quad \ell=1,\dots,n, \] as $\phi$ vanishes on the boundary $\partial\Omega$. Thus $\mu_y$ satisfies \[\int_\Omega\left[\frac{\partial\phi}{\partial x_\ell}(x,y)+\sum_{i=1}^m\frac{\partial\phi}{\partial y_i}(x,y)z_{i\ell}\right]d\mu_y(x,y,z)=0,\quad \ell=1,\dots,n,\] for $\phi\in C_c^\infty(\Omega\times Y)$. This is the property we will use to obtain a slightly larger set of measures in which we can still meaningfully consider the problem \eqref{eq:pde}. Define the space $\mathcal M_0$ of relaxed occupation measures to be the set of Radon measures $\mu$ on $\Omega\times Y\times Z$ satisfying, for all $\phi\in C^\infty_c(\Omega\times Y)$, \begin{equation}\label{eq:closed} \int_\Omega\left[\frac{\partial\phi}{\partial x_\ell}(x,y)+\sum_{i=1}^m\frac{\partial\phi}{\partial y_i}(x,y)z_{i\ell}\right]d\mu(x,y,z)=0,\quad \ell=1,\dots,n, \end{equation} as well as \begin{equation}\label{eq:finitemoments} \int_{\Omega\times Y\times Z} \|z\|\,d\mu(x,y,z)<+\infty. \end{equation} Then $\mathcal M_0$ contains all the occupation measures $\mu_y$ induced by $C^1$ functions $y$, as we noted above, so we have that the relaxed infimum \begin{equation}\label{eq:relaxedpde} \inf_{\mu\in \mathcal M_0}\int_{\Omega\times Y\times Z}L(x,y,z)\,d\mu(x,y,z) \end{equation} is a lower bound of the original problem \eqref{eq:pde}. The advantage of \eqref{eq:relaxedpde} is that it is a linear programming problem, albeit infinite-dimensional, and it is possible to approximate it arbitrarily well using a hierarchy of semidefinite programming problems, at least when $\Omega$ and $L$ are semialgebraic \cite{korda2018moments}. However, the question of the equivalence of problems \eqref{eq:pde} and \eqref{eq:relaxedpde} remains open in full generality and is the topic of this paper. To give a simple example when a gap between~(\ref{eq:pde}) and (\ref{eq:relaxedpde}) may occur in the presence of additional constraints on $y(\cdot)$, consider $\Omega = [0,1]$, the double-well potential $L(x,y,z) = \min(|z-1|,|z+1|)$ and the constraint $y(x) = 0$ in $\Omega$. This constraint is modeled as a support constraint on $\mu$ in~(\ref{eq:relaxedpde}) in the form $\mathrm{supp}\,\mu \subset \{(x,y,z)\,:\, y = 0\}$. In this case, the only function $y \in W^{1,p}$, $p\in[1,\infty]$, feasible in~(\ref{eq:pde}) is $y = 0$, attaining the value $+1$ whereas the measure $\mu = dx\otimes \delta_0 \otimes (\frac{1}{2}\delta_{-1} + \frac{1}{2}\delta_{+1})$ attains the infimum of~(\ref{eq:relaxedpde}) equal to 0. This example has the property that $L$ is \emph{not convex} in $z$. We will see that this is the crucial property for the absence of relaxation gap if the dimension or codimension of the problem is equal to one, although it \emph{may not suffice} if both the dimension and codimension are greater than one. In particular we will see that the infimum of~(\ref{eq:relaxedpde}) \emph{need not be equal} to the infimum in~(\ref{eq:pde}) even when $L$ is replaced by its convexification or quasiconvexification in $z$. \paragraph{Contributions and previous work.} It will perhaps come as no surprise that the question of the equivalence of problems \eqref{eq:pde} and \eqref{eq:relaxedpde} depends on the dimensions $n=\dim \Omega$ and $m=\dim Y$, since many related questions have been found to depend on these quantities, such as the regularity of minimal surfaces (see for example \cite{de2014regularity}) and the possibility of generalization of the Frobenius theorem \cite{alberti2017some,schioppa2016unrectifiable}, among many other examples. Notice that $n$ is the dimension of the graph of a classical minimizer $y$, while $m$ is the codimension of this graph, which motivates our terminology below. We distinguish three cases according to the dimension and the codimension of the graph of the decision variable $y(\cdot)$ in $\Omega\times Y$: \begin{itemize} \item \emph{Dimension 1, that is, $n=\dim \Omega=1$ and any $m=\dim Y>0$.} In this case, \eqref{eq:pde} and \eqref{eq:relaxedpde} are equivalent. The ideas behind this result originated in the seminal work of Young~\cite{young_lectures} (see also \cite{patrick}) but were to the best of our knowledge first proven by Rubio~\cite{rubio1975generalized,rubio1976extremal} and Lewis and Vinter~\cite{vinter1978equivalence,lewis1980relaxation}. Computationally, this approach was used in conjunction with semidefinite-programming relaxations in~\cite{lasserre2008nonlinear} for optimal control as well as in~\cite{kordaROA} for region of attraction computation, proving a slight generalization of~\cite{vinter1978equivalence} using a superposition theorem from~\cite{ambrosio2008transport}. We remark that in those papers the equivalence has been proved in situations more general than the one stated in \eqref{eq:pde} and \eqref{eq:relaxedpde} that are akin to the one considered in Section \ref{sec:codim1}. \item \emph{Codimension 1, that is, $m=1$ and any $n>0$.} In this case, \eqref{eq:pde} and \eqref{eq:relaxedpde} are equivalent as well. To prove this in Section \ref{sec:codim1}, we generalize the Hardt-Pitts decomposition \cite{hardt1996solving,zworski1988decomposition,tasso}, thereby obtaining a decomposition of the measure $\mu$ into a convex combination of functions in Sobolev space $W^{1,\infty}(\Omega)$, which can be approximated arbitrarily well by $C^1$ functions, providing the pursued result. While the Hardt-Pitts decomposition is an old, well-known result, the existing versions thereof do not directly apply in our setting and are hard to approach for non-expert audience. Here, we provide a self-contained proof of the extension applicable in our setting that relies on theory by de Giorgi, already made accessible in the books \cite{maggi2012sets,evansgariepy}. This result holds true in a very general setting, with the most important assumption being the convexity of $L$ in the variable $z$; see Theorem \ref{thm:nogap}. We have also reformulated the no-gap result in the context of optimal control problems; see Section \ref{sec:oc}. The idea of reformulating \eqref{eq:pde} as a linear programming problem and using a hierarchy of semidefinite programming problems to approximate it was first proposed in~\cite{korda2018moments}. First partial positive results on the absence of relaxation gap between \eqref{eq:pde} and \eqref{eq:relaxedpde} can be found in~\cite{marx2018moment,chernyavsky2021convex}, with~\cite{marx2018moment} using additional entropy inequalities to ensure concentration of the measure on a graph of a function for scalar hyperbolic conservation laws while~\cite{chernyavsky2021convex} treating special cases of $L$. \item \emph{Higher dimension and codimension, that is, any $m>1$ and any $n>1$.} In this case, we are able to construct an example in which the infimum from \eqref{eq:relaxedpde} is strictly less than the one from \eqref{eq:pde}, thus showing that these two problems are not equivalent. The example constructed in Section \eqref{sec:positive gap} consists of a situation in which the measure-valued minimizer corresponds to an irreducible double-covering of $\Omega$, similar to the Riemann surface of the complex square root. The difficulty of the argument is in providing a lower bound for the integral of $L$ on every classical subsolution; this is done applying the Poincar\'e-Wirtinger inequality. In the example we construct, $L$ is of regularity $C^{1,1}_{\mathrm{loc}}$, that is, it is differentiable with locally Lipschitz gradient, and we indicate how to construct similar examples of arbitrary regularity $C^k$, $k\geq 1$. \end{itemize} We have additionally found that integral constraints of the form \[\int_{\Omega}H(x,y(x),Dy(x))dx\leq 0\quad \textrm{or}\quad \int_{\Omega}H(x,y(x),Dy(x))dx= 0\] may give rise to positive gaps in any dimension; we give some examples in Section \ref{sec:integralconstraints}. \paragraph{Further discussion.} While it is tempting to understand measure-valued solutions as a less-quality objects than their classical counterparts due to the possible existence of gaps between the original problem \ref{eq:pde} and its measure-valued relaxation \ref{eq:relaxedpde}, there are cases in which measure-valued solutions may make more sense than the ``true solutions'' of a minimization problem, depending on taste and desired applications. This in particular means that in many cases, even as there may be a gap between the classical problem \eqref{eq:pde} and its relaxation \eqref{eq:relaxedpde}, the algorithms proposed in \cite{korda2018moments} will still prove useful and valuable. A good example is given by the multi-valued minimizer of the Lagrangian $L$ constructed in Section \ref{sec:positive gap} below. In this case the measure-valued minimizer correctly encodes both values, and its support elegantly occupies exactly the zeros of $L$. No weakly-differentiable function is able to capture the multi-valued aspect of the problem, and in fact no global classical solution exits. While it is possible to construct discontinous minimizing functions, these are likely to be deemed defective or incomplete when compared to the information conveyed by the measure-valued minimizer. Thus in this case the latter is likely superior for most applications, and in this sense problem \eqref{eq:relaxedpde} may be preferred over \eqref{eq:pde}. \paragraph{Notations.} For a set $A\subset\R^n$, we denote its closure by $\overline A$. For a measurable set $A\subseteq\R^k$, denote by $|A|$ its Lebesgue measure, and by $\chi_A$ the indicator function of $A$, which is equal to 1 on $A$ and to 0 elsewhere. Given a measure $\mu$ on a set $A$ and a map $\phi\colon A\to B$, the pushforward measure $\phi_\#\mu$ is defined by $\phi_\#\mu(X)=\mu(\phi^{-1}(X))$ for all measurable sets $X\subset B$. For a finite-dimensional linear space $V$, denote by $V^*$ the space of linear functionals $V\to\R$. Denote by $C^\infty(X)$ the set of infinitely-differentiable functions on $X$, real valued, and by $C^\infty_c(X)$ the subset consisting of compactly-supported functions. If $X$ is an open set, the functions in $C^\infty_c(X)$ must vanish in a neighborhood of the boundary $\partial X$. For a closed set $B\subset\R^n$, the notation $C^k(B)$ denotes the space of functions $f\colon B\to\R$ such that there is an open set $U$ containing $B$ such that $f$ can be extended to a $k$-times continuously differentiable function on $U$. Recall a function $\varphi\colon\Omega\to\R$ is \emph{weakly differentiable} if there is an integrable function $D\varphi\colon\Omega\to\R^n$, referred to as the weak derivative of $\varphi$, such that \begin{equation}\label{eq:defweakdiff} \int_\Omega \varphi \,D\phi\,dx=-\int_\Omega \phi \, D\varphi\,dx \end{equation} for all $\phi\in C_c^\infty(\Omega)$. The Sobolev space $W^{k,p}(U)$, for $U\subset\R^n$ open, contains all $k$ times weakly-differentiable functions $U\to\R$ with weak derivatives in $L^p(U)$. Given a function $f\colon A\times B\to\R$, defined on the product of two convex subsets $A$ and $B$ of Euclidean spaces, we say that $f$ is \emph{convex in $A$} if for all $a,a'\in A$ and all $b\in B$ we have \[f(\lambda a+(1-\lambda)a',b)\leq \lambda f(a,b)+(1-\lambda)f(a',b),\quad \lambda\in [0,1].\] For projections on product spaces $A\times B$, we will use the notation \begin{align*} &\proj_{A}\colon A\times B\to A,\\ &\proj_A(a,b)=a,\qquad a\in A,\; b\in B. \end{align*} \section{No gap in codimension one} \label{sec:codim1} In this section we study the relaxation gap in codimension one in a rather general setting including constraints in the form of nonlinear first-order partial differential equations and inequalities as well as boundary conditions. We do so first for the problem of calculus of variations and then generalize it to optimal control, with the backbone of both results being the superposition principle proved in Theorem~\ref{thm:decomposition}. \subsection{Formulation for variational calculus problems} \label{sec:lagrangian} Let $\Omega$ be a bounded, connected, open subset of $\R^n$ with piecewise $C^1$ boundary $\partial \Omega$ and denote the variables on $\Omega$ by $x=(x_1,\dots,x_n)$. Let $\sigma$ denote the Hausdorff boundary measure on the piecewise $C^1$ set $\partial \Omega$. We also set $Y=\R$ with variable $y$ and $Z=\R^n$ with variables $z=(z_1,\dots,z_n)$. For simplicity, we will sometimes denote $x_{n+1}=y$. Recall that a function is \emph{locally bounded} if it is bounded on every compact subset of its domain. We consider two optimization problems, formulated with the following objects and assumptions: \begin{enumerate}[label=CV\arabic*.,ref=CV\arabic*] \item \label{U:first}\label{U:lsc} $L\colon\Omega\times Y\times Z\to\R$ and $L_\partial\colon\partial\Omega\times Y\to\R$ are measurable and locally bounded, \item\label{U:bulkconditions} $F,G% \colon \Omega\times Y\times Z\to\R$ are measurable functions, \item\label{U:boundaryconditions} $F_\partial,G_\partial% \colon\partial\Omega\times Y\to\R$ are measurable functions on the boundary, \item \label{U:convexity} $L$ is convex in $z$, \item \label{U:synthetic} $F^{-1}(0)\cap G^{-1}((-\infty,0])\cap((x,y)\times Z)$ is convex for every $(x,y)\in\Omega\times Y$. \item\label{U:last}\label{U:closedconditions} $F^{-1}(0)\cap G^{-1}((-\infty,0])$ and $F_\partial^{-1}(0)\cap G_\partial^{-1}((-\infty,0])$ are closed. \end{enumerate} As an example, here are some simple assumptions that imply \ref{U:first}--\ref{U:last}: \begin{itemize} \item $L,F,G,L_\partial, F_\partial,G_\partial$ are continuous, \item $L$ and $G$ are convex in $z$, and \item $F$ satisfies either of the following two assumptions: \begin{enumerate}[label=A\arabic*.,ref=A\arabic*] \item \label{a:convex} $F$ is nonnegative and convex in $z$, or \item \label{a:affine} $F$ is affine in $z$. \end{enumerate} \end{itemize} The first problem that interests us is the classical one: \begin{mymathbox}[ams align]\label{opt:classical} M_\mathrm{c}= &\!\inf\limits_{y \in W^{1,\infty}(\Omega;Y)} &\quad &\displaystyle\int_{\Omega} L(x,y(x),D y(x))\, dx + \int_{\partial\Omega} L_\partial(x,y(x))\, d\sigma(x) \\ &\textrm{subject to}& & F(x,y(x),Dy(x)) = 0,\quad G(x,y(x),Dy(x)) \le 0,\quad x\in \Omega,\nonumber \\ &&& F_\partial(x,y(x)) = 0,\quad \;\hspace{0.9cm} G_\partial(x,y(x)) \le 0, \quad \hspace{0.9cm} x\in \partial\Omega, \nonumber \end{mymathbox} The second one is the occupation-measure relaxation. \begin{definition}[Relaxed occupation measures]\label{def:M} Let $\mathcal M$ be the set of pairs $(\mu,\mu_\partial)$ consisting of compactly-supported, positive, Radon measures on $\overline\Omega\times Y\times Z$ respectively $\partial\Omega\times Y$ satisfying \begin{equation}\label{eq:measureomega} \mu(\Omega\times Y\times Z)=|\Omega|, \end{equation} and \begin{equation}\label{eq:boundarymeasure} \int_{\Omega\times Y\times Z}\frac{\partial\phi}{\partial x}(x,y)+\frac{\partial \phi}{\partial y}(x,y)z\,d\mu(x,y,z) =\int_{\partial\Omega\times Y} \phi(x,y)\mathbf n(x)\,d\mu_\partial(x,y),\quad \phi\in C^\infty(\Omega\times Y). \end{equation} Here $\mathbf n$ denotes the exterior unit vector normal to the boundary $\partial\Omega$. Note that here $\frac{\partial \phi}{\partial x}$, $\mathbf{n}$ and $z$ are in $\R^n$ and hence for each $\phi$ the above equation is in fact a system of $n$ equations. In each pair $(\mu,\mu_\partial)\in \mathcal M$, the measure $\mu$ is referred to as a \emph{relaxed occupation measure} and the measure $\mu_\partial$ as a \emph{relaxed boundary measure}. \end{definition} Observe that every $(\mu,\mu_\partial)\in \mathcal M$ satisfies \begin{equation}\label{eq:finitemoment} \int_{\Omega\times Y\times Z}\|z\|\,d\mu(x,y,z)<+\infty, \end{equation} since $\mu$ is finite and compactly-supported. The relaxation of problem \eqref{opt:classical} considered in this work is \begin{mymathbox}[ams align] \label{opt:relaxed} M_\mathrm{r}=&\!\inf\limits_{(\mu,\mu_\partial)\in \mathcal M} &\quad&\displaystyle\int_{\Omega} L(x,y,z)\, d\mu(x,y,z) + \int_{\partial\Omega} L_\partial(x,y)\, d\mu_\partial \\ &\textrm{subject to} && \operatorname{supp}\mu\subseteq \{(x,y,z)\in\Omega\times Y\times Z:F(x,y,z) = 0,\;\; G(x,y,z) \le 0\},\nonumber\\ &&& \operatorname{supp}\mu_\partial\subseteq \{(x,y)\in\Omega\times Y:F_\partial(x,y) = 0,\;\; G_\partial(x,y) \le 0\}.\nonumber\end{mymathbox} Naturally we have $M_{\mathrm{c}}\geq M_{\mathrm{r}}$ (see the proof of Theorem \ref{thm:nogap}) and the primary goal of this section is to prove that $M_{\mathrm{c}} = M_{\mathrm{r}}$ if \ref{U:first}-\ref{U:last} hold. The main theoretical result of this work that will enable us to establish this is the following generalization of the celebrated Hardt--Pitts decomposition \cite{hardt1996solving}. \begin{theorem}\label{thm:decomposition} Let $m = \mathrm{dim}\,Y = 1$ and let $\mu$ be a compactly supported, positive, finite, Radon measure on $\overline\Omega\times Y\times Z$ and, for all $\phi\in C_c^\infty(\Omega\times Y)$, \begin{equation}\label{eq:boundarycondition}\int_{\Omega\times Y\times Z}\frac{\partial\phi}{\partial x}(x,y)+\frac{\partial \phi}{\partial y}(x,y)z\,d\mu(x,y,z)=0. \end{equation} Then there are a compactly-supported, finite, positive, Radon measure $\nu$ on $\R$ and a family of functions $(\varphi_r\colon\Omega\to Y)_{r\in\R}\subseteq W^{1,\infty}(\Omega)$ such that, for all functions $\phi\in L_1(\mu)$ that are affine in $z$ we have \begin{equation}\label{eq:superposition} \int_{\Omega\times Y\times Z} \phi\,d\mu=\int_\R\int_\Omega\phi(x,\varphi_r(x),D\varphi_r(x))\,dx\,d\nu(r). \end{equation} Additionally, if $r\geq r'$ then $\varphi_r(x)\leq \varphi_{r'}(x)$ for all $x\in \Omega$. \end{theorem} Note that \eqref{eq:boundarycondition} is a special case of \eqref{eq:boundarymeasure} when the set of test functions is restricted to $C_c^\infty(\Omega\times Y)$. The proof of Theorem \ref{thm:decomposition} presented in Section \ref{sec:proof} follows the arguments given in \cite{zworski1988decomposition}, although the setting of~\cite{zworski1988decomposition} is different than the one considered here. Theorem~\ref{thm:decomposition} enables us to prove the following result, which leads immediately to establishing $M_\mathrm{r} = M_\mathrm{c}$: \begin{theorem}\label{thm:consolidated} Assume that $m = \mathrm{dim}\,Y = 1$ and that the functions $L,F,G,L_\partial, F_\partial,G_\partial$ satisfy \ref{U:first}--\ref{U:last}. Let $(\mu,\mu_\partial)\in\mathcal M$. Suppose that the supports of $\mu$ and $\mu_\partial$ satisfy \begin{gather} \label{eq:Fcond} \operatorname{supp}\mu\subseteq \{(x,y,z)\mid F(x,y,z) = 0, \;G(x,y,z) \le 0\}\\ \label{eq:Gcond} \operatorname{supp}\mu_\partial \subset \{(x,y,z)\mid F_\partial(x,y) = 0,\; G_\partial(x,y) \le 0\}. \end{gather} Then we have the following two conclusions: \begin{enumerate}[label=\roman*.,ref=(\roman*)] \item \label{it:barphi} There is a function $\bar\varphi\in W^{1,\infty}(\bar\Omega)$ such that \begin{gather}\label{eq:barphiL1} \int_\Omega L(x,\bar\varphi(x),D\bar\varphi(x))\,dx+\int_{\partial\Omega} L_\partial(x,\bar\varphi(x))\,d\sigma(x)\leq \int_{\Omega\times Y\times Z} L\,d\mu+\int_{\partial\Omega\times Y} L_\partial\,d\mu_\partial,\\ \label{eq:barphiF} F(x,\bar\varphi(x),D\bar\varphi(x))=0,\quad G(x,\bar\varphi(x),D\bar\varphi(x))\le 0\quad\textrm{ $x\in\Omega$},\\ \label{eq:barphiG} F_\partial(x,\bar\varphi(x))=0,\quad G_\partial(x,\bar\varphi(x))\leq 0\quad \textrm{$x\in\partial\Omega$} \end{gather} where $\sigma$ is the $(n-1)$-dimensional Hausdorff measure on $\partial\Omega$. \item \label{it:gi} Assume additionally that $L$, $F$, and $G$ are continuous. There exists a sequence of functions $(g_i\colon\overline \Omega \to Y)\subseteq C^\infty(\Omega)\cap W^{1,\infty}(\overline\Omega)$, such that \begin{equation}\label{eq:Lineq} \lim_{i\to+\infty}\int_\Omega L(x,g_i(x),Dg_i(x))dx+ \int_{\partial\Omega} L_\partial(x,g_i(x))dx \leq \int_{\Omega\times Y\times Z} L\,d\mu+\int_{\partial\Omega\times Y} L_\partial\,d\mu_\partial, \end{equation} and \begin{gather} \label{eq:limF} \lim_{i\to+\infty} F(x,g_i(x),D g_i(x))=0,\quad \lim_{i\to+\infty} G(x,g_i(x),D g_i(x))\le 0 \quad x\in\Omega,\\ \label{eq:limG} F_\partial(x,g_i(x))=0,\quad G_\partial(x,g_i(x))\le 0\quad x\in\partial\Omega, \;i=1,2,\dots \end{gather} \end{enumerate} \end{theorem} The proof of Theorem \ref{thm:consolidated} is presented in Section \ref{sec:proof2}. This theorem immediately leads to a result on the absence of a relaxation gap between \eqref{opt:classical} and \eqref{opt:relaxed}. \begin{theorem} \label{thm:nogap} Assume that $m = \mathrm{dim}\,Y = 1$ and that the functions $L,F,G,L_\partial, F_\partial,G_\partial$ satisfy \ref{U:first}--\ref{U:last}. If $M_\mathrm{c}<+\infty$, then \[M_\mathrm{c}=M_\mathrm{r}.\] \end{theorem} \begin{proof} Since every function $y\in W^{1,\infty}(\Omega;Y)$ induces measures $(\mu,\mu_\partial)$ by \begin{gather*} \int_{\Omega\times Y\times Z}\phi(x,y,z)d\mu(x,y,z)=\int_{\Omega}\phi(x,y(x),Dy(x))\,dx,\quad \phi\in C^0(\Omega\times Y\times Z),\\ \int_{\partial \Omega\times Y}\phi_\partial(x,y) d\mu_\partial(x,y)=\int_{\partial\Omega}\phi_\partial(x,y(x))\,d\sigma(x),\quad \phi_\partial\in C^0(\partial\Omega\times Y), \end{gather*} and the pair $(\mu,\mu_\partial)$ satisfies all the constraints of $M_\mathrm{r}$, we have $M_\mathrm{r}\leq M_\mathrm{c}$. In order to prove the opposite direction, assume that $(\mu,\mu_\partial)$ is feasible in \eqref{opt:relaxed}. Such $(\mu,\mu_\partial)$ satisfies the assumptions of Theorem~\ref{thm:consolidated} and hence there exists a function $\bar\varphi \in W^{1,\infty}(\overline\Omega)$ satisfying~\eqref{eq:barphiL1}--\eqref{eq:barphiG}. This implies that $\bar\varphi$ is feasible in \eqref{opt:classical} and achieves an objective value no worse than the objective value achieved by $(\mu,\mu_\partial)$ in \eqref{opt:relaxed}. \end{proof} \begin{definition}[Centroid and centroid-concentrated measure] \label{def:centroid} Let $\mu$ be a positive Radon measure on $\Omega\times Y\times Z$. Denote the marginal measure $(\projXY)_\#\mu$ by $\mu_{\Omega\times Y}$. Disintegrate $\mu$ through the projection map $\projXY{}$ to obtain a family of measures $(\mu_{xy})_{(x,y)\in\Omega\times Y}$, with $\mu_{xy}$ being a measure on $Z$, such that \[\mu=\int_{\Omega\times Y}\mu_{xy}\,d \mu_{\Omega\times Y}(x,y).\] In other words, we have, for measureable $f\colon\Omega\times Y\times Z\to\R$, \[\int f(x,y,z) d\mu=\int_{\Omega\times Y} \int_Z f(x,y,z)d\mu_{xy}(z)\,d \mu_{\Omega\times Y}(x,y).\] By \eqref{eq:finitemoment}, the quantity \begin{equation}\label{eq:defZ} \mathcal Z(x,y)=\int z\,d\mu_{xy}(z) \end{equation} is well defined and finite for $(\projXY)_\#\mu$-almost every $(x,y)$; it is referred to as the \emph{centroid} of $\mu$ at $(x,y)$ and can also be thought of as the conditional expectation of the $z$ variable given $(x,y)$. Let $\bar \mu$ be the measure whose projection coincides with that of $\mu$, that is, $(\projXY)_\#\bar\mu=(\projXY)_\#\mu=\mu_{\Omega\times Y}$, and which is concentrated on $\mathcal Z(x,y)$, that is, \[\bar\mu=\int_{\Omega\times Y}\delta_{\mathcal Z(x,y)}d\mu_{\Omega\times Y}(x,y);\] this means that, for measurable $f\colon\Omega\times Y\times Z\to\R$, we have \[\int_{\Omega\times Y\times Z}f(x,y,z)\,d\bar\mu(x,y,z)=\int_{\Omega\times Y}f(x,y,\mathcal Z(x,y))\,d\mu_{\Omega\times Y}(x,y).\] The measure $\bar\mu$ is \emph{the version of $\mu$ concentrated at its centroid in the z variable}. \end{definition} \begin{remark} In the absence of the convexity assumptions \ref{U:convexity} and \ref{U:synthetic}, $M_{\mathrm{r}}$ remains the same if we replace $L$ with its convexification $\tilde L$ in $z$, given, for $(x,y,z)\in \Omega\times Y\times Z$, by \begin{align*}\tilde L(x,y,z)=\inf \{&\lambda L(x,y,z')+(1-\lambda)L(x,y,z''):\\ &z=\lambda z'+(1-\lambda)z'',\;\lambda\in[0,1],\;z',z''\in Z,\\ &F(x,y,z')=0=F(x,y,z''),\;G(x,y,z')\leq 0,\;G(x,y,z'')\leq 0\}. \end{align*} Indeed, denoting the latter minimum by $\tilde M_\mathrm{r}$, observe that we always have $M_\mathrm{r}\geq \tilde M_\mathrm{r}$ because $L\geq \tilde L$; let us show the opposite inequality. The measure $\bar\mu$ constructed in Definition \ref{def:centroid}, which concentrates the mass of $\mu$ on its centroid $\mathcal Z(x,y)$ in each fiber $(x,y)\times Z$, satisfies \[\int L\,d\mu\geq \int \tilde L\,d\mu\geq\int \tilde L\,d\bar\mu.\] A new measure $\tilde\mu$ can be constructed that redistributes, on each fiber $(x,y) \times Z$, the mass of $\bar\mu$ on the points where $\tilde L=L$ while maintaining the same centroid; indeed, on each fiber $(x,y)\times Z$ we can pick (for example, using Choquet's theorem) a probability measure $\nu_{(x,y)}$ supported on the extreme points of the facet of $\tilde L$ containing the centroid $\mathcal Z(x,y)$, in such a way that the centroid of $\nu_{(x,y)}$ will again be $\mathcal Z(x,y)$; it can be argued using standard set-valued analysis techniques that this choice can be done in such a way as to produce a measurable selection on the set-valued map associating to each $(x,y)\in\Omega\times Y$ the set of probabilities on the extreme points of the facet containing the centroid; to finish the construction, let $\tilde\mu=\int_{\Omega\times Y}\nu_{(x,y)}d(\projXY)_\#\mu(x,y)$. Then we have \[\int \tilde L\,d\mu=\int \tilde L\,d\tilde\mu=\int L\,d\tilde\mu.\] Now, $(\tilde\mu,\mu_\partial)\in\mathcal M$ because condition \eqref{eq:boundarycondition} does not change by the construction of $\tilde\mu$ because integrals of functions linear in $z$ are not affected. Thus we have $M_\mathrm{r}\leq \tilde M_r$, which is what we wanted to show. \end{remark} \subsection{Formulation for optimal control} \label{sec:oc} In this section we extend the no-gap result of Theorem \ref{thm:nogap} to the context of optimal control. Let $\Omega\subset\R^n$ be a bounded, connected, open set with piecewise $C^1$ boundary $\partial\Omega$ and with boundary measure $\sigma$. Let also $Y=\R$, and $Z=\R^{n}$. Let $U$ and $U_\partial$ be compact topological spaces. Let $\projXYZ{}\colon \Omega\times Y\times Z\times U\to \Omega\times Y\times Z$ and $\projdXY{}\colon\partial\Omega\times Y\times U_\partial\to \partial\Omega\times Y$ be the projections $\projXYZ{}(x,y,z,u)=(x,y,z)$ and $\projdXY{}(x,y,u)=(x,y)$. In analogy with \ref{U:first}--\ref{U:last}, we will assume: \begin{enumerate}[label=OC\arabic*.,ref=OC\arabic*] \item \label{OC:first} $L\colon \Omega\times Y\times Z\times U\to\R$ and $L_\partial \colon\partial\Omega\times Y\times U_\partial\to\R$ are measurable and locally bounded functions, \item $F,G\colon \Omega\times Y\times Z\times U\to\R$ are measurable functions, \item $F_\partial,G_\partial\colon\partial \Omega\times Y\times U_\partial\to \R$ are measurable functions on the boundary, \item\label{OC:3} the function $\bar L\colon \Omega\times Y\times Z\to\R$ defined by \[\bar L(x,y,z)=\inf\{L(x,y,z,u):F(x,y,z,u)=0,\;u\in U\}\] is measurable, locally bounded, and convex in $z$, \item\label{OC:2} $\projXYZ{}(F^{-1}(0)\cap G^{-1}((-\infty,0]))\cap((x,y)\times Z)$ is convex for every $(x,y)\in\Omega\times Y$. \item \label{OC:last} $F^{-1}(0)\cap G^{-1}((-\infty,0])$ and $F_\partial^{-1}(0)\cap G^{-1}_\partial((-\infty,0])$ are closed. \end{enumerate} Assumption \ref{OC:2} amounts to the set of permissible points being convex on each fiber $Z$, once we project with $\projXYZ{}$. For a concrete application satisfying these assumptions, refer to Example \ref{ex:affine}. We want to consider the following two optimization problems: first, the classical multivariable optimal control problem \begin{alignat}{2} \label{opt:classical_cont} M_\mathrm{c}^\mathrm{oc}= &\!\inf\limits_{\substack{y \in W^{1,\infty}(\Omega;Y)\\u \in L^{\infty}(\Omega;U)\\u_\partial \in L^\infty(\partial\Omega;U_\partial)}} &\quad &\displaystyle\int_{\Omega} L(x,y(x),D y(x),u(x))\, dx + \int_{\partial\Omega} L_\partial(x,y(x),u_\partial(x))\, d\sigma(x) \\ &\textrm{subject to}& & F(x,y(x),Dy(x),u(x)) = 0,\quad G(x,y(x),Dy(x),u(x)) \le 0,\quad x\in \Omega,\nonumber \\ &&& F_\partial(x,y(x),u_\partial(x)) = 0,\quad \;\hspace{0.725cm} G_\partial(x,y(x),u_\partial(x)) \le 0, \quad \hspace{0.725cm} x\in \partial\Omega, \nonumber \end{alignat} and its relaxation \begin{alignat}{2} \label{opt:relaxed_cont} M_\mathrm{r}^\mathrm{oc}= &\!\inf\limits_{(\mu,\mu_\partial)\in \mathcal M^\mathrm{oc}} &\quad&\displaystyle\int_{\Omega\times Y\times Z\times U} L(x,y,z,u)\, d\mu(x,y,z,u) + \int_{\partial\Omega\times Y\times U_\partial} L_\partial(x,y,u)\, d\mu_\partial(x,y,u) \\ &\textrm{subject to}&& \operatorname{supp}\mu\subset \{(x,y,z,u)\in\Omega\times Y\times Z\times U:F(x,y,z,u) = 0,\;\; G(x,y,z,u) \le 0\},\nonumber\\ &&& \operatorname{supp}\mu_\partial\subset \{(x,y,u)\in\partial\Omega\times Y\times U_\partial:F_\partial(x,y,u) = 0,\;\; G_\partial(x,y,u) \le 0\},\nonumber\end{alignat} where $\mathcal M^\mathrm{oc}$ denotes the set of pairs $(\mu,\mu_\partial)$ consisting of compactly-supported positive Borel measures on $\overline\Omega\times Y\times Z\times U$ respectively $\partial\Omega\times Y\times U_\partial$ satisfying \begin{gather} \label{eq:mass_cont} \mu(\overline\Omega\times Y\times Z\times U)=|\Omega|, \end{gather} and \begin{equation} \label{eq:boundarymeasure_cont} \int_{\overline\Omega\times Y\times Z\times U}\frac{\partial\phi}{\partial x}(x,y)+\frac{\partial\phi}{\partial y}(x,y)z\,d\mu(x,y,z,u) =\int_{\partial\Omega\times Y\times U_\partial}\hspace{-8mm}\phi(x,y)\mathbf n(x)\,d\mu_\partial(x,y,u),\;\; \phi\in C^\infty(\Omega\times Y), \end{equation} which are the analogies of \eqref{eq:measureomega} and \eqref{eq:boundarymeasure}. Note that none of these conditions \eqref{eq:mass_cont}--\eqref{eq:boundarymeasure_cont} substantially involves the control set $U$, and they correspond to the hypotheses of Theorem \ref{thm:consolidated} and Theorem \ref{thm:nogap}. \begin{theorem}\label{cor:oc} If $M^\mathrm{oc}_\mathrm{c}$ is finite and \ref{OC:first}--\ref{OC:last} hold, then $M^\mathrm{oc}_\mathrm{c}=M^\mathrm{oc}_\mathrm{r}$. \end{theorem} \begin{proof} We always have $M^\mathrm{oc}_\mathrm{r}\leq M^\mathrm{oc}_\mathrm{c}$ because every $(y_0,u_0,v_0)\in W^{1,\infty}(\Omega;Y)\times L^{\infty}(\Omega;U)\times L^\infty(\partial\Omega;U_\partial)$ induces pairs of measures $(\mu,\mu_\partial)\in \mathcal M^\mathrm{oc}$ by \[\int_{\overline\Omega\times Y\times Z\times U}\phi(x,y,z,u)\,d\mu(x,y,z,u)=\int_{\Omega}\phi(x,y_0(x),Dy_0(x),u_0(x))\,dx,\quad \phi\in C^0(\Omega\times Y\times Z\times U),\] and \[ \int_{\partial\Omega\times Y\times U_\partial}\phi(x,y,u)\,d\mu_\partial(x,y,u)=\int_{\partial\Omega}\phi(x,y_0(x),v_0(x))\,d\sigma(x),\quad \phi\in C^0(\Omega\times Y\times U_\partial),\] and they satisfy \eqref{eq:mass_cont}--\eqref{eq:boundarymeasure_cont}. Define \[\bar L_\partial(x,y)=\inf_{u\in U_\partial}L_\partial(x,y,u),\quad (x,y)\in \partial\Omega\times Y.\] Then because of the local boundedness of $L_\partial$ and the compactness of $U$, $\bar L_\partial\colon \partial\Omega\times Y\to \R$ is locally bounded and measurable. We will use the functions $\bar L$ and $\bar L_\partial $ to reduce the optimal control problem to the variational calculus problem from Section \ref{sec:lagrangian}. The sets \[\projXYZ{}(F^{-1}(0)\cap G^{-1}((-\infty,0]))\quad\textrm{and} \quad\projdXY{}(F_\partial^{-1}(0)\cap G_\partial^{-1}((-\infty,0]))\] are closed. We explain why this is true for the former, the latter being similar. For every compact set $K\subset \Omega\times Y\times Z$, the set $(K\times U)\cap (F^{-1}(0)\cap G^{-1}((-\infty,0])$ is compact, so its image under the continuous map $\projXYZ{}$ is compact, and it equals $K\cap\projXYZ{}(F^{-1}(0)\cap G^{-1}((-\infty,0])) $. Thus $\projXYZ{}(F^{-1}(0)\cap G^{-1}((-\infty,0]))$ is a set whose intersection with every compact set is compact, so it must be closed. In order to reduce the optimal control problem to the variational calculus one considered in Section \ref{sec:lagrangian}, we will need functions that encode the admisibility conditions. Let \begin{align*} \bar F(x,y,z)&=\chi_{\projXYZ{}(F^{-1}(0)\cap G^{-1}((-\infty,0]))}(x,y,z),\quad (x,y,z)\in \Omega\times Y\times Z,\\ \bar F_\partial(x,y)&=\chi_{\projdXY{}(F_\partial^{-1}(0)\cap G_\partial^{-1}((-\infty,0]))}(x,y),\quad (x,y)\in\partial\Omega\times Y, \end{align*} as well as $\bar G=0=\bar G_\partial$. Consider problems \eqref{opt:classical} and \eqref{opt:relaxed} with $L,F,G,L_\partial,F_\partial,G_\partial$ replaced by $\bar L,\bar F,\bar G,\bar L_\partial,\bar F_\partial,\bar G_\partial$; since assumptions \ref{OC:first}--\ref{OC:last} imply the corresponding assumptions \ref{U:first}--\ref{U:last}, and since $M_\mathrm{c}^\mathrm{oc}<+\infty$ on the optimal control side implies $M_\mathrm{c}<+\infty$ on the variational side, we have, by Theorem \ref{thm:nogap}, $M_{\mathrm{c}}=M_{\mathrm{r}}$ on the variational side. Denote by \[I_1\coloneqq\int_{\partial\Omega}\bar L_\partial(x,\bar\varphi(x))\,d\sigma(x)\] and by \[I_2\coloneqq\int_{\partial\Omega\times Y}\bar L_\partial(x,y)\,d\mu_\partial(x,y,u)\leq \int_{\partial\Omega\times Y}L_\partial(x,y,u)\,d\mu_\partial(x,y,u).\] We have (omitting for brevity the conditions on $\bar\varphi$ and $\mu$ as in \eqref{opt:classical} and \eqref{opt:classical_cont} for lines involving $\bar L$, and as in \eqref{opt:classical_cont} and \eqref{opt:relaxed_cont} for lines involving $L$), \begin{align*} M^\mathrm{oc}_\mathrm{c} &\leq \inf_{\bar\varphi\in W^{1,\infty}(\Omega;Y)}\inf_{u\in L^\infty(\Omega; U)}\int_\Omega L(x,\bar\varphi(x),D\bar\varphi(x),u(x))\,dx+I_1\\ &=\inf_{\bar\varphi\in W^{1,\infty}(\Omega;Y)} \int_\Omega \bar L(x,\bar\varphi(x),D\bar\varphi(x))\,dx+I_1\\ &=M_\mathrm{c}\\ &=M_\mathrm{r}\\ &=\inf_{(\mu,\mu_\partial)\in \mathcal M}\int_{\Omega\times Y\times Z}\bar L\,d\mu+I_2\\ &=\inf_{(\mu,\mu_\partial)\in \mathcal M^\mathrm{oc}}\int_{\Omega\times Y\times Z}\bar L\,d(\projXYZ{})_\#\mu+I_2\\ &=\inf_{(\mu,\mu_\partial)\in \mathcal M^\mathrm{oc}}\int_{\Omega\times Y\times Z}\bar L\circ\projXYZ{}\,d\mu+I_2\\ &\leq \inf_{(\mu,\mu_\partial)\in \mathcal M^\mathrm{oc}}\int_{\Omega\times Y\times Z\times U} L\,d\mu+I_2\\ &= M^\mathrm{oc}_\mathrm{r}\\ &\leq \inf_{\substack{y\in W^{1,\infty}(\Omega;Y)\\u\in L^\infty(\Omega;U)}}\int_\Omega L(x,y(x),Dy(x),u(x))\,dx+I_2\\ &=M^\mathrm{oc}_\mathrm{c}.\qedhere \end{align*} \end{proof} \begin{example}[Affine control of the derivatives] \label{ex:affine} Consider an optimal control problem in which a relation of the form \[Dy(x)=v(x,y,u)\] must be enforced. Assume that $v\colon \Omega\times Y\times U\to Z$ is such that $u\mapsto v(x,y,u)$ is affine and invertible for each pair $(x,y)$. Then we may encode the relation above by letting \[F(x,y,z,u)=z-v(x,y,u).\] The effective Lagrangian $\bar L$ is then simply \[\bar L(x,y,z)=L(x,y,z,(v(x,y,\cdot))^{-1}(z)).\] If $L$ is continuous and convex in $z$ and $v$ is continuous, then $\bar L$ is continuous and convex in $z$ as well. With $F$ defined as above, and assuming for simplicity that $F_\partial=G_\partial=G=0$, then \ref{OC:first}--\ref{OC:last} are true. \end{example} \subsection{Proof of Theorem \ref{thm:decomposition} }\label{sec:proofdecomposition} Now we come to the proof of Theorem~\ref{thm:decomposition}. We start by illustrating the main steps of the proof on a simple example. \subsubsection{Overview of the proof of Theorem \ref{thm:decomposition}} \label{sec:overview} To fix ideas, let us show how the proof of Theorem \ref{thm:decomposition} works in the very simple case when $\Omega=[0,1]\subset\R$, $Y = \R$, $Z = \R$, and $\mu$ is induced by a $C^1$ curve $\gamma\colon\Omega\to Y$, so that it is given by \[\int_{\Omega\times Y\times Z}f(x,y,z)\,d\mu(x,y,z)=\int_0^1f(x,\gamma(x),\gamma'(x))\,dx,\quad f\in C^0(\Omega\times Y\times Z).\] In this case, Lemma \ref{lem:radon-nikodym} will confirm that the projection of $\mu$ onto $\Omega$ is a multiple of Lebesgue measure (it is just $dx|_{[0,1]}$). We will then use a trick involving the computation of the circulation $\mu(X)$ of vector fields $X$ and its relation to a linear functional $S\colon C^0(\Omega\times Y)\to \R$ that will be related by the fundamental identity (Lemma \ref{lem:dividentity}) \[\mu(X)=S(\operatorname{div}X)\] and will give us, by the Radon-Nikodym theorem (see Lemma \ref{lem:radon-nikodym}), a function $\rho\colon\Omega\times Y\to\R$ that heuristically has the property that \[\textrm{``}(\projXY)_\#\mu=-\frac{\partial\rho}{\partial y}.\textrm{''}\] Thus in our example (see Figure \ref{fig:10}), \[\rho(x,y)=\begin{cases} -1,&y\geq \gamma(x),\\ 0,&y< \gamma(x). \end{cases} \] \begin{figure} \includegraphics[width=13.5cm]{img9} \centering \caption{The left-hand side diagram illustrates the values of $\rho$ when $\mu$ is induced by a single curve $\gamma$. In the right-hand side, we illustrate the case in which $\mu$ is the convex combination $\mu=\frac23\mu_1+\frac13\mu_2$ and $\mu_1$ and $\mu_2$ are measures induced by two curves, $\gamma$ and $\eta$, respectively. } \label{fig:10} \end{figure} After checking that $\rho$ is bounded (Lemma \ref{lem:boundedrange}), we will use the function $\rho$ to define the functions $\varphi_r$ (commonly known as \emph{sheets}) that will give the decomposition of $\mu$. This is done in Lemma \ref{lem:main}. Lemma \ref{lem:function} shows that $\varphi_r$ roughly corresponds to the boundary of a level set of $\rho$, and that it is ``almost continuous,'' and Lemma \ref{lem:weakderivative} shows that it is weakly differentiable; these two lemmas are used to prove Lemma \ref{lem:main}. The proof of Theorem \ref{thm:decomposition}, presented at the end of Section \ref{sec:proof}, relies on the fundamental identity above, together with the technical details from Lemma \ref{lem:main}. In our example, the decomposition of Theorem \ref{thm:decomposition} gives the measure $\nu$ equal to Lebesgue measure on $I=[-1,0]$, and \[\varphi_{r}(x)=\inf_{\substack{y\in Y\\\rho(x,y)\leq r}}y=\gamma(x),\quad r\in [-1,0),\] so that, indeed, \begin{multline*} \int_{\Omega\times Y\times Z} f\,d\mu =\int_I\int_\Omega f(x,\varphi_r(x),D\varphi_r(x))\,dx\,d\nu(r)\\ =\int_{-1}^0\int_0^1 f(x,\gamma(x),\gamma'(x))\,dx\,d\nu(r)=\int_0^1 f(x,\gamma(x),\gamma'(x))\,dx. \end{multline*} Another example, illustrated as well in Figure \ref{fig:10}, is the case in which $\mu=\tfrac23\mu_1+\tfrac13\mu_2$, and $\mu_1$ and $\mu_2$ are the measures induced by curves $\gamma$ and $\eta$, and say that $\gamma\geq \eta$ on $[0,a]$ and $\gamma<\eta$ on $(a,1]$, for some $0<a<1$. In this case, \[\rho(x,y)=\begin{cases} 0,&y<\gamma(x)\;\textrm{and}\;y<\eta(x),\\ -\tfrac13,&\eta(x)\leq y<\gamma(y),\\ -\tfrac23, &\gamma(x)\leq y<\eta(y),\\ -1,&y\geq \gamma(x)\;\textrm{and}\;y\geq\eta(x). \end{cases} \] Similarly, \[\varphi_r(x)=\begin{cases} \gamma(x),&\textrm{($-1<r<-\tfrac13$ and $0<x<a$) or ($-\tfrac23<r<0$ and $a<x<1$)},\\ \eta(x),&\textrm{($-\tfrac13<r<0$ and $0<x<a$) or ($-1<r<-\tfrac23$ and $a<x<1$)}. \end{cases} \] \subsubsection{Proof of Theorem \ref{thm:decomposition}} \label{sec:proof} We collect some lemmas needed in the proof of the theorem, which is presented at the end of the section. Throughout this section, we assume that $\mu$ is a measure satisfying the hypotheses of Theorem \ref{thm:decomposition}. \begin{lemma}\label{lem:lebesgueproj} If $\projX{}\colon \Omega\times Y\times Z\to\Omega$ is the projection, then there is $c>0$ such that \[(\projX{})_{\#}\mu=c\,dx.\] In other words, the pushforward $(\projX{})_{\#}\mu$ is a positive multiple of the Lebesgue measure on $\Omega$. \end{lemma} \begin{figure} \includegraphics[width=4.5cm]{img1} \centering \caption{Translating a rectangle in the proof of Lemma \ref{lem:lebesgueproj}.} \label{fig:lebesgueproj} \end{figure} \begin{proof} Let $R\subset \Omega$ be a small parallelepiped, and let $\tau$ be a translation such that $\tau(R)\subset \Omega$. We will show that $(\projX{})_{\#}\mu(R)=(\projX{})_{\#}\mu(\tau(R))$, and since this will be true for all $R$ and all $\tau$, $(\projX{})_{\#}\mu$ must be a positive multiple of Lebesgue on $\Omega$ \cite[Thm. 2.20]{rudin}. Write $\tau$ as a finite composition of translations $\tau_i$ in the directions of the axes $x_1,\dots,x_n$, \[\tau=\tau_k\circ\tau_{k-1}\circ\dots\circ\tau_1.\] Denote $\tilde \tau_i=\tau_i\circ\tau_{i-1}\circ\dots\circ\tau_1$ and set $\tilde\tau_0$ equal to the identity. We assume $\tau_1,\dots,\tau_k$ have been chosen also in a such a way that the convex hull of $\tilde\tau_{i-1}(R)\cup\tilde\tau_{i}(R)$ is contained in $\Omega$ for each $i$. Refer to Figure \ref{fig:lebesgueproj}. For each $i=1,\dots,k$, let $j_i$ be such that $\tau_i$ is a translation in direction $x_{j_i}$. Recall that $\chi_{\tilde\tau_{i}(R)}$ is the indicator function of the translated rectangle $\tilde\tau_{i}(R)$, and let \begin{multline*} \phi_i(x_1,\dots,x_n)=\int_{-\infty}^{x_{j_i}}\chi_{\tilde\tau_{i}(R)}(x_1,\dots,x_{j_i-1},s,x_{j_i+1},\dots,x_n)\\ -\chi_{\tilde\tau_{i-1}(R)}(x_1,\dots,x_{j_i-1},s,x_{j_i+1},\dots,x_n)ds. \end{multline*} Observe that \[\operatorname{supp}\phi_i=\overline{\operatorname{conv}}(\tilde\tau_{i-1}(R)\cup\tilde\tau_i(R)),\] which is a compact set properly contained in $\Omega$. Approximating with smooth, compactly-supported functions and using the Lebesgue dominated convergence theorem, we conclude that \eqref{eq:boundarycondition} is true for $\phi_i$, which means, for the $j_i^{\mathrm{th}}$ entry, \begin{multline*} (\projX{})_{\#}\mu(\tilde\tau_{i}(R))- (\projX{})_{\#}\mu(\tilde\tau_{i-1}(R))=\int_{\Omega\times Y\times Z}\chi_{\tilde\tau_i(R)}-\chi_{\tilde\tau_{i-1}(R)}\,d\mu \\ =\int_{\Omega\times Y\times Z}\frac{\partial \phi_i}{\partial x_{j_i}}\,d\mu =\int_{\Omega\times Y\times Z}\frac{\partial \phi_i}{\partial x_{j_i}}+\frac{\partial \phi_i}{\partial y}z_{j_i}\,d\mu =0. \end{multline*} By induction we get \[(\projX{})_{\#}\mu(R)=(\projX{})_{\#}\mu(\tilde\tau_{0}(R))=(\projX{})_{\#}\mu(\tilde\tau_{k}(R))=(\projX{})_{\#}\mu(\tau(R)).\qedhere\] \end{proof} For a vector field $X\colon \Omega\times Y\to\R^{n+1}$, we can define \begin{equation}\label{eq:defmuX} \mu(X)\coloneqq\int \langle X(x,y), ( z_1,\dots,z_n,-1)\rangle\, d\mu(x,y,z). \end{equation} When $\mu$ is induced by a smooth function $\varphi\colon \Omega\to Y$, $\mu(X)$ is the circulation of $X$ through the graph of $\varphi$, since $(z_1,\dots,z_n,-1)=(\frac{\partial\varphi}{\partial x_1},\dots,\frac{\partial\varphi}{\partial x_n},-1)$ is normal to that graph of $\varphi$. \begin{lemma}\label{lem:divergencecond} Let $X\colon\Omega\times Y\to\R^{n+1}$ be a smooth, compactly-supported vector field that vanishes on a neighborhood of $\partial \Omega\times Y$ and satisfies $\operatorname{div} X=0.$ Then \[\mu(X)=0.\] \end{lemma} \begin{proof} Let \begin{equation}\label{eq:Xtilde} \tilde X_i(x,y)=\int_{-\infty}^yX_i(x,s)ds. \end{equation} Then $% \tilde X_i\in C^\infty(\Omega\times Y)$ and vanishes on $\partial\Omega\times Y$, so by the $i$-th entry of \eqref{eq:boundarycondition}, with $\phi=\tilde X_i$,% \[ \int_{\Omega\times Y\times Z}\left(\frac{\partial\tilde X_i}{\partial x_i}(x,y) +X_i(x,y)z_i\right) d\mu(x,y,z)=0. \]Rearranging, and plugging this into the definition of $\mu(X)$, it follows that \begin{align*} \mu(X)&=\sum_{i=1}^n\int_{\Omega\times Y\times Z}X_i(x,y)z_i\,d\mu(x,y,z)-\int_{\Omega\times Y\times Z}X_{n+1}(x,y)d\mu(x,y,z)\\ &=- \int_{ \Omega\times Y\times Z}\sum_{i=1}^{n}\frac{\partial\tilde X_i}{\partial x_i}(x,y) +X_{n+1}(x,y)\,d\mu(x,y,z) \end{align*} Now, using (\ref{eq:Xtilde}) and \[X_{n+1}(x,y)=\int_{-\infty}^y\frac{\partial X_{n+1}}{\partial x_{n+1}}(x,s)\,ds,\] we get \begin{equation*} \sum_{i=1}^n\frac{\partial\tilde X_i}{\partial x_i}+X_{n+1} = \int_{-\infty}^{y} \sum_{i=1}^{n+1}\frac{\partial X_i}{\partial x_i}(x,s)\,ds=\int_{-\infty}^{y}\operatorname{div} X\,ds, \end{equation*} which vanishes by the assumption that $\operatorname{div} X = 0$. \qedhere \end{proof} We define, for measurable, compactly supported, and bounded functions $u\colon\Omega\times Y\to\R$, \begin{align} \notag S(u)&=\mu\left(0,\dots,0,\int_{y}^\infty u(x,s)ds\right)\\ \label{eq:defS} &=-\int \left(\int_{y}^\infty u(x,s)ds\right)\,d\mu(x,y,z). \end{align} \begin{lemma}\label{lem:radon-nikodym} The functional $S$ corresponds to integration with respect to an absolutely continuous nonpositive measure; in other words, there is a measurable function $\rho\colon\Omega\times Y\to(-\infty,0]$ such that \begin{align*} S(u)&=\int_{\Omega\times Y} u(x,y)\rho(x,y)\,dx\,dy \end{align*} \end{lemma} \begin{proof} This follows from the Radon-Nikodym theorem. To apply the theorem we need to check that, if $A\subset \Omega\times Y$ is a set of measure zero and $\chi_A$ is its indicator function, then $S(\chi_A)=0$. Indeed, if $A\cap \{(x,y):y\in Y\}$ has zero measure for Lebesgue-almost all $x\in\Omega$, then $\int_{-\infty}^y\chi_A(x,s)ds=0$ for almost all $x\in \Omega$, and by Lemma \ref{lem:lebesgueproj} and the Fubini theorem, the integral in the definition \eqref{eq:defS} of $S(\chi_A)$ vanishes. To see that the function $\rho$ can be taken to be nonpositive, observe that whenever $u$ is nonnegative, its primitive also satisfies $\int_{-\infty}^yu(x,s)ds\geq 0$, so $S(u)\leq 0$. \end{proof} \begin{lemma}\label{lem:decreasingconstant} When restricted to a line $\{(x,y):y\in Y\}$, $x\in \Omega$, the function $y\mapsto \rho(x,y)$ is (non-strictly) decreasing for almost every $x\in \Omega$. If $N>0$ is such that $\operatorname{supp}\mu\subset \Omega\times (-N,N)\times Z$, then $y\mapsto\rho(x,y)$ vanishes throughout $(-\infty,N]$ and is constant on $[N,+\infty)$, for almost every $x\in\Omega$. \end{lemma} Observe that, strictly speaking, $\rho$ is only defined Lebesgue-almost everywhere on $\Omega\times Y$, so the statement of the lemma should be interpreted as ascertaining the existence of a representative, in the equivalence class of measurable functions coinciding with $\rho$ Lebesgue-almost everywhere, having the desired properties. \begin{proof} Let $R$ be an $(n+1)$-dimensional box in $\Omega\times Y$ whose edges are parallel to the axes , and let $\tau_t(x,y)=(x,y+t)$ be the translation in the $y$ direction. Then, by Lemma \ref{lem:radon-nikodym} and definitions \eqref{eq:defmuX} and \eqref{eq:defS}, \begin{align*} \int_{R} \rho(x,y-t)\,dx\,dy&=\int_{\tau_t(R)} \rho(x,y)\,dx\,dy\\ &=\int_{\Omega\times Y} \chi_{\tau_t(R)}(x,y)\rho(x,y)\,dx\,dy\\ &=S(\chi_{\tau_t(R)})\\ &=\mu\left(0,\dots,0,\int_{y}^\infty\chi_{\tau_t(R)}(x,s)ds\right)\\ &=\mu\left(0,\dots,0,\int_{y-t}^{\infty}\chi_{R}(x,s)ds\right)\\ &=-\int_{\Omega\times Y\times Z}\left(\int_{y-t}^{\infty}\chi_R(x,s)ds\right)d\mu(x,y,z). \end{align*} Since $\mu$ is a positive measure, the last term is nonincreasing in $t$. Since this is true for all $t$ and all $R$, this proves that $\rho$ is nonincreasing in the $y$ direction. This proves the first part of the lemma. To prove the second statement of the lemma, consider the case in which $R=R_\Omega\times[a,b]$ for some box $R_\Omega\subset \Omega$ and some $a<b$. Then \[\int_y^\infty\chi_R(x,s)\,ds=\begin{cases} b-a, &y\leq a,\\ 0,&y\geq b. \end{cases} \] Thus if $a<b\leq -N$, we have \[\int_R\rho(x,y)\,dx\,dy=-\int_{\Omega\times Y\times Z}\left(\int_y^\infty\chi_R(x,y)\,ds\right)d\mu(x,y,z)=\int_{\Omega\times Y\times Z}0\,d\mu(x,y,z).\] On the other hand, if $N\leq a<b$, then \[\int_R\rho(x,y)\,dx\,dy=-\int_{\Omega\times Y\times Z}\left(\int_y^\infty\chi_R(x,y)\,ds\right)d\mu(x,y,z)=-\int_{\Omega\times Y\times Z}(b-a)\,d\mu(x,y,z).\] This is impervious to translations of the interval $[a,b]$. This proves the second statement of the lemma. \end{proof} \begin{lemma}\label{lem:boundedrange} The function $\rho$ in Lemma \ref{lem:lebesgueproj} is essentially bounded. \end{lemma} \begin{proof} Aiming for a contradiction, assume that the function $\rho\leq 0$ is not essentially bounded. Then the sets $B_j=\{(x,y)\in\Omega\times Y:\rho(x,y)\leq -j\}$, $j\in\mathbb N$, have positive measure. By Lemma \ref{lem:decreasingconstant}, if we take $N>0$ to be such that $\operatorname{supp}\mu\subset\Omega\times(-N,N)\times Z$, then $y\mapsto \rho(x,y)$ is everywhere non-strictly decreasing and is constant on $[N,+\infty)$ for all $x\in\Omega$. Thus the sets $B_j\cap(\Omega\times [N,N+1])$ must have positive measure. Pick a subset $A_j\subset \{(x,y)\in\Omega\times [N,N+1]:\rho(x,y)\leq -j\}\subset B_j$ of finite measure $|A_j|<\infty$ and of the form $A_j=A_j^\Omega\times [N,N+1]$, with $A_j^\Omega\subseteq\Omega$. Observe that this means that $A_j$ does not intersect the compact set $\operatorname{supp}\mu$. Pick an open set $U_j\subset\Omega\times Y$, of the same product form, $U_j=U^\Omega_j\times[N,N+1]$, such that $A_j\subseteq U_j$ with $|U_j\setminus A_j|\leq |A_j|/j$, which is possible due to the outer regularity of Lebesgue measure. Note that the function $f_j=\chi_{U_j}/|A_j|$ verifies $\int_{\Omega\times Y} \rho f_j\leq \int_{A_j} (-j) f_j+\int_{U_j\setminus A_j}0\,f_j= -j$. Take $\phi_j\in C^\infty_c(\Omega\times Y)$ to be any good $C^\infty$ approximation of $f_j$ that satisfies \begin{gather} \label{eq:contradiction} \int_{\Omega\times Y}\rho\phi_j dx\,dy\leq -j/2, \\ \notag |\projX{}(\operatorname{supp}\phi_j)|\leq 2|A^\Omega_j|, \\ \notag \sup_{x\in\Omega}\int_{-\infty}^\infty\phi_j(x,s)ds\leq 2\frac{1}{|A_j|}=\frac2{|A^\Omega_j|}. \end{gather} Then we have by Lemma \ref{lem:radon-nikodym}, \eqref{eq:defS}, the fact that $\mu$ and $\phi_j$ are non-negative, the bounds above, and Lemma \ref{lem:lebesgueproj}, \begin{align*} -\frac j2&\geq\int_{\Omega\times Y}\phi_j(x,y)\rho(x,y)dx\,dy\\ &=S(\phi_j)\\ &=\mu\left(0,\dots,0,\int_{-\infty}^y\phi_j(x,s)ds\right)\\ &=-\int_{\Omega\times Y\times Z}\int_{-\infty}^y\phi_j(x,s)ds\, d\mu(x,y,z)\\ &\geq - \int_{\Omega\times Y\times Z}\int_{-\infty}^\infty\phi_j(x,s)ds\, d\mu(x,y,z)\\ &\geq -\int_{\Omega\times Y\times Z} \frac2{|A^\Omega_j|} \chi_{\projX{}(\operatorname{supp}\phi_j)}(x)d\mu(x,y,z)\\ &=-\frac2{|A^\Omega_j|}(\projX{})_\#\mu(\projX{}(\operatorname{supp}\phi_j))\\ &=-\frac2{|A^\Omega_j|}c|\projX{}(\operatorname{supp}\phi_j)|\\ &\geq -\frac2{|A^\Omega_j|}c(2|A^\Omega_j|)=-4c, \end{align*} where $\projX{}$ and $c$ are as in the statement of Lemma \ref{lem:lebesgueproj}. This uniform bound gives the contradiction we were aiming for. We conclude that the essential range of $\rho$ is a bounded interval in $(-\infty,0]$. \end{proof} We will henceforth take $\rho$ to be bounded (we may choose such a representative in its class of essentially bounded functions) and denote the range of $\rho$ by \[I=\rho(\Omega\times Y)\subset(-\infty,0].\] We will also denote by $\nu$ the restriction of Lebesgue measure to $I$. \begin{lemma}\label{lem:dividentity} For all smooth vector fields $X$ compactly supported in $\Omega\times Y$ and vanishing in a neighborhood of $\partial \Omega\times Y$, \[\mu(X)=S(\operatorname{div} X).\] \end{lemma} \begin{proof} Indeed, \begin{align*} \operatorname{div}&(X-\Big(\underbrace{0,\dots,0}_n,\int_y^{\infty} \operatorname{div}X(x,s)\,ds\Big))\\ &=\sum_{i=1}^n\frac{\partial }{\partial x_i}\left( X_i-0 \right)+\frac{\partial}{\partial y}\left(X_{n+1}-\int_y^\infty\operatorname{div} X\,ds\right)\\ &=\operatorname{div} X-\operatorname{div} X\\ &=0. \end{align*} By Lemma \ref{lem:divergencecond}, \begin{align*}0&=\mu(X-\left(0,\dots,0,\int_y^\infty\operatorname{div}X\,ds\right))\\ &=\mu(X)-\mu\left(0,\dots,0,\int_y^\infty\operatorname{div}X\,ds\right)\\ &=\mu(X)-S(\operatorname{div}X).\qedhere \end{align*} \end{proof} \begin{lemma}\label{lem:main} The functions $\varphi_r:\Omega\to \R$ defined by, \[\varphi_r(x)=\inf_{\substack{y\in Y\\\rho(x,y)\leq r}}y,\qquad r\in I=\rho(\Omega\times Y)\subseteq(-\infty,0], \; x\in \Omega,\] are weakly differentiable. These functions satisfy, for all $X\in C^\infty_c(\Omega\times Y;\R^{n+1})$, \begin{equation}\label{eq:divXvarphi} \int_I\int_{\{(x,y)\in\Omega\times Y:y\geq \varphi_r(x)\}}\hspace{-20mm}\operatorname{div}X\,dx\,dy\,d\nu(r)=\int_I\int_\Omega\langle X(x,\varphi_r(x)),(D\varphi_r(x),-1)\rangle\,dx\,d\nu(r), \end{equation} where $\nu$ is Lebesgue measure restricted to $I$. \end{lemma} Observe that, since by Lemma \ref{lem:boundedrange} $\rho$ is essentially bounded, we may take a representative in the class of $\rho$ that is bounded, and then $\varphi_r(x)$ is finite for each $x\in \Omega$. \begin{proof} Consider the set $C^1_c(\Omega\times Y;\R^{n+1})$ of compactly-supported vector fields that are continuously differentiable. Observe that since these vector fields are compactly supported, they vanish on the boundary $\partial\Omega\times Y$. Consider also the set $B$ of vector fields $X\in C^1_c(\Omega\times Y;\R^{n+1})$ satisfying $\sup_{\Omega\times Y}\|X(x,y)\|\leq 1$. Since we have a uniform bound, by \eqref{eq:finitemoment}, for $X\in B$, by Lemma \ref{lem:radon-nikodym}, \eqref{eq:defS}, Lemma \ref{lem:dividentity}, \eqref{eq:defmuX}, the Cauchy-Schwarz inequality, and the finiteness of $\mu$ together with \eqref{eq:finitemoment}, \begin{multline*} \left| \int \rho\operatorname{div} X\,dx\,dy\right| = S(\operatorname{div}X)=\mu(X)=\\ \left|\int \langle X(x,y),(z,-1)\rangle\,d\mu(x,y,z)\right| \leq \int 1+\|z\|\,d\mu(x,y,z)<+\infty, \end{multline*} we conclude that $\rho$ is a \emph{function of bounded variation} (see \cite[Def. 5.1]{evansgariepy}) in $\Omega\times Y$. It follows from the coarea formula \cite[Thm. 5.9]{evansgariepy} that there is a set of full measure $A\subset I$, $|I\setminus A|=0$, such that if $r\in A$ then $\rho^{-1}(-\infty,r]\subset \Omega\times Y$ is a \emph{set of locally finite perimeter} (\cite[Def. 5.1]{evansgariepy}, \cite[Ch. 12]{maggi2012sets}), meaning that \[\sup_{X\in B}\int_{\rho^{-1}(-\infty,r]}\operatorname{div}X(x,y)\,dx\,dy<+\infty, \quad r\in A.\] By \cite[Prop. 12.1]{maggi2012sets}, there exists an $\R^{n+1}$-valued measure $\bm\nu_r$ with bounded total variation $|{\bm\nu}_r|$ (defined in \cite[Rmk. 4.12]{maggi2012sets}), $|{\bm\nu}_r|(\overline{\Omega\times Y})<+\infty$, such that, for $X\in C^1_c(\Omega\times Y;\R^{n+1})$, \[\int_{\rho^{-1}(-\infty,r]}\operatorname{div}X\,dx\,dy=\int_{\Omega\times Y}X\cdot d{\bm\nu}_r = \sum_{i=1}^{n+1} \int_{\Omega\times Y} X_i d{\bm\nu}_{r,i}\] De Giorgi's Structure Theorem (\cite[Th. 5.15 and 5.16]{evansgariepy} or \cite[Th. 15.9]{maggi2012sets}) then implies that ${\bm\nu}_r$ is supported on the boundary $\partial \rho^{-1}(-\infty,r]$, that this boundary is of Hausdorff dimension $n$, and that the unit normal $\eta_r$ to the boundary of $\rho^{-1}(-\infty,r]$ is well defined for almost every point $(x,y)$ on the boundary with respect to Hausdorff measure $H^n$ of dimension $n$ by \begin{equation}\label{eq:defeta} \eta_r(x,y)=\lim_{b\searrow 0}\frac{{\bm\nu}_r(D((x,y),b))}{|{\bm\nu}_r|(D((x,y),b))} \end{equation} where $D((x,y),b)$ denotes the ball centered at $(x,y)$ of radius $b>0$ and $|{\bm\nu}_r|$ denotes the total variation of ${\bm\nu}_r$. Refer to Figure \ref{fig:2}. \begin{figure} \includegraphics[width=6.5cm]{img2} \centering \caption{The sets $\rho^{-1}(-\infty,r]$ and their exterior unit normal $\eta_r$, as in the situation of Lemma \ref{lem:main}.} \label{fig:2} \end{figure} Also, the Gauss-Green formula holds: for $X\in C^1(\overline{\Omega\times Y},\R^{n+1})$, \begin{equation}\label{eq:normal} \int_{\rho^{-1}(-\infty,r]}\operatorname{div}X\,dx\,dy=\int_{\partial \rho^{-1}(-\infty,r]} \langle X,\eta_r\rangle d H^n. \end{equation} Indeed, this is equivalent to \cite[eq. (15.11)]{maggi2012sets}, summing over all the entries in that vector-valued equation; cf. \cite[Rmk. 12.2]{maggi2012sets}. From Lemma \ref{lem:function} below and Remark \ref{rmk:function}, it follows that $H^n$-almost all the boundary $(\partial \rho^{-1}(-\infty,r])\cap (\Omega\times Y)$ corresponds to the image of $\varphi_r$, i.e., \[H^n((\partial \rho^{-1}(-\infty,r])\cap (\Omega\times Y)\setminus \{(x,\varphi_r(x)):x\in\Omega\})=0.\] Let $\zeta_r\colon\Omega\to \R^n$ be the vector field whose $i$-th entry is given by \begin{equation}\label{eq:defzeta} [\zeta_r(x)]_i=-\frac{[\eta_r(x,\varphi_r(x))]_i}{[\eta_r(x,\varphi_r(x))]_{n+1}},\quad 1\leq i\leq n, \end{equation} if the denominator is $\neq 0$, and $[\zeta_r(x)]_i=\operatorname{sign}([\eta_r(x)]_i)\infty$ otherwise. It follows from Lemma \ref{lem:weakderivative} below that the denominator in \eqref{eq:defzeta} is almost-everywhere nonzero, and that $\zeta_r$ is the weak derivative of $\varphi_r$. Equality \eqref{eq:divXvarphi} follows from \begin{align*} \int_I\int_{\{y\geq \varphi_r(x)\}}\operatorname{div}X\,dx\,dy\,d\nu(r) &=\int_I\int_{\partial\rho^{-1}(-\infty,r]}\langle X,\eta_r\rangle dH^n\,d\nu(r)\\ &=\int_I\int_{\Omega}\langle X(x,\varphi_r(x)),(\zeta_r(x),-1)\rangle dx\,d\nu(r), \end{align*} which is Lemma \ref{lem:weakderivative}\ref{it:weakderivative2}, together with $\zeta_r$ being the weak derivative $D\varphi_r$. \end{proof} \begin{lemma}\label{lem:samederivatives2} If $r\leq r'\leq 0$ and $x_0\in\Omega$ is such that $\varphi_r(x_0)=\varphi_{r'}(x_0)$ and $\eta_r$ and $\eta_{r'}$ are defined at $x_0$, then $\eta_r(x,\varphi_r(x_0))=\eta_{r'}(x,\varphi_{r'}(x_0))$. \end{lemma} \begin{proof} This follows immediately from \cite[Th. 5.13]{evansgariepy}. \end{proof} \begin{lemma}\label{lem:function} For $r\in I$, let $P_r$ be the set of points $x\in \Omega$ such that there is exactly one value $y\in Y$ such that $(x,y)\in\partial \rho^{-1}(-\infty,r]$. For almost every $r\in I$, the $(n-1)$-dimensional Hausdorff volume of the complement of $P_r$ is \[H^{n-1}(\Omega\setminus P_r)=0.\] \end{lemma} \begin{remark}\label{rmk:function} The statement of Lemma \ref{lem:function} means that the graph of the function $\varphi_r$ defined in the statement of Lemma \ref{lem:main} coincides with $(\partial \rho^{-1}(-\infty,r])\cap\Omega\times Y$ almost everywhere. \end{remark} \begin{figure} \includegraphics[width=6.5cm]{img3} \centering \caption{When there is a vertical segment $\{x\}\times[a,b]$ in the boundary $\partial \rho^{-1}(-\infty,r]$, the normal vector is horizontal, that is, of the form $(z,0)$, $z\in\R^n$. The proof of Lemma \ref{lem:function} shows that the $n$-dimensional volume of the union of these segments is zero.} \label{fig:3} \end{figure} \begin{proof} From the boundedness of $\rho$ (Lemma \ref{lem:boundedrange} it follows that at least one such value $y$ exists. Since $\rho$ is non-increasing (Lemma \ref{lem:decreasingconstant}, given $x\in\Omega$ the set of values $y\in Y$ with $(x,y)\in \partial\rho^{-1}(-\infty,r]$ must be an interval. We will show that, for almost all $r\in I$, the normal vector $\eta_r$ is almost nowhere with respect to Hausdorff measure $H^n|_{\rho^{-1}(-\infty,r]}$ of the form $(z,0)$ for some $z\in\R^n$; the $0$ in the $Y$ direction appears every time the boundary $\partial \rho^{-1}(-\infty,r]$ contains a segment of the form $\{x\}\times [a,b]\subset \Omega\times Y$ with $a < b$, as vectors tangent to such a segment are of the form $(0,\dots,0,a)$, $0\neq a\in\R$, and $\eta_r$ is perpendicular to them; refer to Figure \ref{fig:3}. In other words, we will show that the $n$-dimensional Hausdorff volume of the union of intervals of the form $\{x\}\times[a,b]$ in the boundary $\partial\rho^{-1}(-\infty,r]$ is zero; its projection onto $\Omega$ is $\Omega\setminus P_r$, whence this implies the statement of the lemma. Denoting the last entry of the vector field $\eta_r$ by $[\eta_r]_{n+1}$, we let $A_r$ be the set of points $(x,y)\in \partial \rho^{-1}(-\infty,r]\subseteq\Omega\times Y$ where $[\eta_r]_{n+1}(x,y)=0$. By Lemmas \ref{lem:radon-nikodym} and \ref{lem:dividentity} as well as definitions \eqref{eq:defmuX} and \eqref{eq:defS}, \begin{align} \notag \int_I\int_{\partial \rho^{-1}(-\infty,r]} \langle X,\eta_r\rangle d H^n\,d\nu(r) \notag &=\int_I\left(\int_{\rho^{-1}(-\infty,r]} \operatorname{div}X\,dx\,dy\right)\,d\nu(r) \\ \notag &=\int\rho\operatorname{div}X\,dx\,dy\\ \notag &=S(\operatorname{div} X)\\ \notag &=\mu(X)\\ &=\int_{\Omega\times Y\times Z} \langle X(x,y),(z,-1)\rangle\,d\mu(x,y,z) \label{eq:normalid} \end{align} for all $X\in C^1_c(\Omega\times Y;\R^{n+1})$. By the density of $C^1_c(\Omega\times Y;\R^n)$ in $L^1((\projXY)_\#\mu;\R^n)$ and dominated convergence, this holds as well for vector fields $X$ in the latter space. Let $X=(0,\chi_A)$, with $A=\bigcup_rA_r$. Then, by \eqref{eq:normalid}, \begin{align*} -\mu(A)=&-\int_{\Omega\times Y\times Z}\chi_A(x,y)d\mu(x,y,z)\\ &=\int_{\Omega\times Y\times Z}\langle X(x,y),(z,-1)\rangle\,d\mu(x,y,z)\\ &= \int_I\int_{\partial\rho^{-1}(-\infty,r]}\langle X,\eta_r\rangle\,dH^n\,d\nu(r)\\ &=\int_I\int_{\partial\rho^{-1}(-\infty,r]}\chi_A[\eta_r]_{n+1}\,dH^n\,d\nu(r)=0, \end{align*} since $[\eta_r]_{n+1}(x,y)=0$ whenever $(x,y)\in A_r$, and by Lemma \ref{lem:samederivatives2} this happens whenever $(x,y)\in A$ because $\eta_r(x,y)$ is independent of $r$ (among those values of $r$ such that $(x,y)\in\partial\varphi^{-1}(-\infty,r]$). Then we have, again using \eqref{eq:normalid} and $\|\eta_r\|=1$, \begin{multline*} \int_I\int_{\partial\rho^{-1}(-\infty,r]}\chi_A\,dH^n\,d\nu(r) =\int_I\int_{\partial\rho^{-1}(-\infty,r]}\langle \chi_A\eta_r,\eta_r\rangle\,dH^n\,d\nu(r)\\ =\int_{\Omega\times Y\times Z}\chi_A\langle \eta_r,(z,-1)\rangle\,d\mu(x,y,z)=0. \end{multline*} This is what we wanted to show. \end{proof} \begin{lemma}\label{lem:weakderivative} \begin{enumerate}[label=\roman*.,ref=(\roman*)] \item \label{it:weakderivative1}The vector field $\zeta_r$ defined in \eqref{eq:defzeta} is the weak derivative of $\varphi_r$, that is, \[\int_\Omega\varphi_r(x)\nabla\phi(x)\,dx=\int_\Omega\phi(x)\zeta_r(x)\,dx\] for all $\phi\in C^\infty_c(\Omega)$. \item \label{it:weakderivative2} We also have, for $X\in C^1_c(\Omega\times Y;\R^{n+1})$, and for almost every $r\leq 0$, \[\int_{\partial\rho^{-1}(-\infty,r]}\langle X,\eta_r\rangle dH^n =\int_{\Omega}\langle X(x,\varphi_r(x)),(\zeta_r(x),-1)\rangle dx.\] \end{enumerate} \end{lemma} \begin{proof} For almost every $r\in(-\infty,0]$, $\eta_r$ is well defined on almost all the boundary $\partial\rho^{-1}(-\infty,r]$. Denote by $m_1^r$ the Hausdorff measure $H^n$ on the boundary $(\partial \rho^{-1}(-\infty,r])\cap(\Omega\times Y)$ (since $\Omega$ is open, this is just the graph of $\varphi_r$; see Figure \ref{fig:2}), and by $m_2^r$ the pushforward of Lebesgue measure on $\Omega$ by the map $x\mapsto(x,\varphi_r(x))$; $m_2^r$ is also supported in $(\partial \rho^{-1}(-\infty,r])\cap(\Omega\times Y)$. The measure $m_2^r$ is absolutely continuous with respect to $m_1^r$. Indeed, if $A$ is a measurable set of zero $m_1^r$ measure, this means that for every $\varepsilon>0$, $A$ can be covered with finitely many balls $U_1,\dots,U_k$ such that $\sum_{i=1}^k(\operatorname{diam} U_k)^n\leq \varepsilon$. The image $\projXY{}(A)$ through the projection $\projXY{}\colon\Omega\times Y\to\Omega$ can then be covered by the projections of the balls $\projXY{}(U_i)$, which will still satisfy (for some $C>0$ dependent only on $n$) \[m^r_2(A)\leq m^r_2({\textstyle\bigcup_{i=1}^k U_i})\leq |\textstyle\bigcup_{i=1}^k \projXY{}(U_i)|\leq C\sum_{i=1}^k(\operatorname{diam}\projXY{}(U_i))^n\leq \varepsilon,\] and hence $A$ is a set of measure zero with respect to $m^2_k$. This proves the absolute continuity of $m^2_k$ with respect to $m_1^r$. By the Radon-Nikodym theorem there is a measurable function $J_r(x,y)$ such that for all measurable functions $f\colon\Omega\times Y\times Z\to\R$, \begin{multline}\label{eq:jacobian} \int_{\partial\rho^{-1}(-\infty,r]}f(x,y)\eta_r(x,y)\,dH^n=\int_{\partial\rho^{-1}(-\infty,r]}f(x,y)\eta_r(x,y)\,dm^r_1\\ =\int_{\partial\rho^{-1}(-\infty,r]}f(x,y)\eta_r(x,y)J_r(x,y)\,dm_2^r\\= \int_\Omega f(x,\varphi_r(x))\eta_r(x,\varphi_r(x))J_r(x,\varphi_r(x))dx. \end{multline} From \eqref{eq:normal} it follows that \[\int_{\{y\geq \varphi_r(x)\}}\operatorname{div}X\,dx\,dy=\int_{\partial \rho^{-1}(-\infty,r]} \langle X,\eta_r\rangle d H^n,\quad X\in C^\infty_c(\overline{\Omega\times Y};\R^{n+1}),\] or equivalently, we have the vector-valued identity (proved from the above by taking $X=\phi \,e_i$ for each vector $e_i$ in the standard basis) \begin{equation}\label{eq:previouslydisplayedidentity} \int_{\{y\geq \varphi_r(x)\}}\nabla \phi(x,y)\,dx\,dy=\int_{\partial \rho^{-1}(-\infty,r]} \phi\, \eta_r\, d H^n,\quad \phi\in C^\infty_c(\overline{\Omega\times Y};\R). \end{equation} Take $N>0$ large enough that $\operatorname{supp}\mu\subset\Omega\times[-N,N$, and take a function $\psi\in C^\infty_c(\overline{\Omega\times Y})$, $0\leq\psi\leq 1$, such that $\psi(x,y)=1$ for all $|y|\leq N$. Then, if we let $\mathbf n$ denote the unit normal to the boundary of $\Omega\times [0,+\infty)\subset\Omega\times Y$, using \eqref{eq:jacobian}, and computing the derivatives below as in $\Omega\times Y$, so that for example $\nabla \phi(x)=\nabla_{x,y} \phi(x)=(\frac{\partial\phi}{\partial x_1},\dots,\frac{\partial\phi}{\partial x_n},0)$ to account for $\frac{\partial\phi}{\partial y}=0$, we have \begin{align*} \int_\Omega&\varphi_r(x)\nabla\phi(x)\,dx= \int_\Omega\int_0^{\varphi_r(x)}\nabla \phi(x)\,dy\,dx\\ &=\int_\Omega\int_0^{\varphi_r(x)}\nabla (\psi\phi)(x,y)\,dy\,dx\\ \notag &=\int_{\{(x,y)\in\Omega\times Y:0\leq y\leq \varphi_r(x)\}}\nabla(\psi\phi)\,dx\,dy -\int_{\{(x,y)\in\Omega\times Y:0\geq y\geq \varphi_r(x)\}}\nabla(\psi\phi)\,dx\,dy \\ \notag &=\int_{\{(x,y)\in\Omega\times Y:y\geq 0\}}\nabla(\psi\phi)\,dx\,dy-\int_{\{(x,y)\in\Omega\times Y:y\geq \varphi_r(x)\}}\nabla(\psi\phi)\,dx\,dy \\ \notag &=\int_{\partial\{(x,y)\in\Omega\times Y:y\geq 0\}} \psi\phi\mathbf{n}\,dH^n-\int_{\partial\rho^{-1}(-\infty,r]} \psi\phi\eta_r\,dH^n\\ \notag &=\int_{\Omega\times \{0\}} \phi\mathbf{n}\,dH^n-\int_{\{(x,\varphi_r(x)):x\in\Omega\}} \phi\eta_r\,dH^n\\ &=\int_\Omega \phi(x)e_{n+1}dx+\int_\Omega\phi(x)\eta_r(x)J_r(x,\varphi_r(x)) dx \end{align*} for all $\phi\in C^\infty_c(\Omega;\R)$; here, we have first used the fact that $\varphi_r(x)=\int_0^{\varphi_r(x)}dy$. Then we introduced $\psi$, and we separated the positive and negative parts of $\varphi_r$. We have expressed them in a slightly different form (see Figure \ref{fig:4}), and then passed to the boundary using \eqref{eq:previouslydisplayedidentity} and its equivalent for the domain $\{y\geq 0\}\subset \overline{\Omega\times Y}$. In the next-to-last line we used that $\psi=1$ in the region whre the domain of integration and we have kept only the parts that do not cancel out from the fact that $\phi$ is compactly supported in $\Omega$, namely, the sets $\Omega\times \{0\}\subset\partial\{y\geq 0\}$, whose normal is $e_{n+1}$, and $\varphi_r(\Omega)$, whose normal is $\eta_r$; $J_r(x)$ comes in once we apply \eqref{eq:jacobian}. In other words, we have \begin{equation}\label{eq:vectident} \int_\Omega\varphi_r(x)\begin{pmatrix}\nabla_x \phi(x)\\0\end{pmatrix}dx =\int_\Omega\phi(x)e_{n+1}dx+\int_\Omega\phi(x)\eta_r(x)J_r(x,\varphi_r(x))dx, \end{equation} where the 0 entry in the left-hand side appears because $\phi$ is independent of $y$. The last entry of \eqref{eq:vectident} gives \[0=\int_\Omega\phi(x)1\,dx+\int_\Omega \phi(x)[\eta_r(x,\varphi_r(x))]_{n+1}J_r(x,\varphi_r(x))\,dx.\] Since this is true for all $\phi\in C^\infty_c(\Omega)$, we conclude that, for almost every $x\in\Omega$, \[J_r(x,\varphi_r(x))=-\frac1{[\eta_r(x,\varphi_r(x))]_{n+1}},\] and \eqref{eq:jacobian} becomes (cf. \eqref{eq:defzeta}) \begin{equation}\label{eq:jacobian2} \int_{\partial\rho^{-1}(-\infty,r]}f(x,y)\eta_r(x,y)\,dH^n(x,y)=\int_{\Omega}f(x,\varphi_r(x))\ \begin{pmatrix} \zeta_r(x)\\-1 \end{pmatrix}dx. \end{equation} Applying \eqref{eq:jacobian2} to $f=X_i$ and adding over all $i=1,\dots, n$ proves the identity in item \ref{it:weakderivative2}. \begin{figure} \includegraphics[width=\textwidth]{img4} \centering \caption{Illustrating a step in the proof of Lemma \ref{lem:weakderivative}, we see that the difference of integrals on the shaded areas in the first two diagrams on the left is equal to the difference of integrals on the the two areas on the right. } \label{fig:4} \end{figure} We also have, taking only the first $n$ entries in \eqref{eq:vectident}, \[\int_\Omega\varphi_r(x)\nabla\phi(x)\,dx=\int_\Omega\phi(x)\zeta_r(x)\,dx.\] This is precisely the definition \eqref{eq:defweakdiff} of weak differentiability. \end{proof} \begin{proof}[Proof of Theorem \ref{thm:decomposition}] Let $\phi\colon\Omega\times Y\times Z\to\R$ be, for now, a \emph{smooth} function that is linear in $z$ and is compactly-supported in $\Omega\times Y$, vanishing in a neighborhood of $\partial \Omega\times Y$. By Lemma \ref{lem:boundedrange}, the function $\rho$ from Lemma \ref{lem:radon-nikodym} is bounded; its range is the bounded interval $I\subseteq\R$. The functions $\varphi_r$ in Lemma \ref{lem:main} are defined only for $r\in I$. Let $\nu$ denote the restriction of Lebesgue measure to $I$. Since $\phi$ is linear in $z$, for each $(x,y)\in\Omega\times Y$ the functional $z\mapsto \phi(x,y,z)$ corresponds to a vector $\tilde X(x,y)\in\R^n$ satisfying \[\phi(x,y,z)=\langle\tilde X(x,y),z\rangle,\quad (x,y,z)\in\Omega\times Y\times Z,\] and we let $X(x,y)=(\tilde X(x,y),0)\in\R^{n+1}$. Then by Lemmas \ref{lem:dividentity} and \ref{lem:main} we have \begin{align*} \int_{\Omega\times Y\times Z}\phi\,d\mu&=\int_{\Omega\times Y\times Z}\langle X,(z,-1)\rangle\,d\mu\\ &=\mu(X)\\ &=S(\operatorname{div} X)\\ &=\int_{\Omega\times Y} \rho\operatorname{div} X\,dx\,dy\\ &=\int_I\int_{\rho^{-1}(-\infty,r]} \operatorname{div} X\,dx\,dy\,d\nu(r)\\ &=\int_I\int_{\{(x,y)\in\Omega\times Y:y\geq \varphi_r(x)\}}\operatorname{div} X\,dx\,dy\,d\nu(r)\\ &=\int_I \int_{\Omega} \langle X(x,\varphi_r(x)),(D\varphi_r(x),-1)\rangle \,dx\,d\nu(r)\\ &=\int_I\int_\Omega \phi(x,\varphi_r(x),D\varphi_r(x)) \,dx\,d\nu(r) \end{align*} Thus the first statement of the theorem is true in the case of smooth $\phi$ linear in $z$. Defining $\mathcal Z$, $\bar\mu$, and $\mu_{xy}$ as in \eqref{eq:defZ} we have for all continuous functions $\phi\colon\Omega\times Y\times Z\to \R$ that are linear in $z$, \begin{equation}\label{eq:linearinz} \int_{\Omega\times Y\times Z}\phi\,d\bar\mu=\int_{\Omega\times Y\times Z}\phi\,d\mu=\int_{\R}\int_\Omega\phi(x,\varphi_r(x),D\varphi_r(x))\,dx\,d\nu(r). \end{equation} This means that for $(\projXY)_\#\mu$-almost every $(x,y)$ we have, by Lemma \ref{lem:samederivatives2}, that, if $r$ is such that $\varphi_r(x)=y$ then \begin{equation}\label{eq:singlepoint} D\varphi_r(x)=\mathcal Z(x,y). \end{equation} Observe that, by \eqref{eq:finitemoments}, we have \[\int_{I}\int_\Omega\|D\varphi_r\|\,dx\,d\nu(r)=\int_{\Omega\times Y\times Z}\|z\|d\mu<+\infty,\] whence it follows that for $\nu$-almost every $r$, we have $\varphi_r\in W^{1,1}(\Omega)$. The argument used to establish \eqref{eq:singlepoint} shows that in fact $D\varphi_r(x)$ is, for almost every $x$ and $\nu$-almost every $r$, in the convex hull of $\operatorname{supp}\mu\cap\{(x,\varphi_r(x),z):z\in Z\}$. Since $\operatorname{supp} \mu$ is compact, this implies that $\varphi_r$ is in $W^{1,\infty}(\Omega)$ for $\nu$-almost every $r$. For $\psi\in C^\infty(\Omega\times Y)$ we have, using \eqref{eq:singlepoint}, \begin{multline}\label{eq:scalarint} \int_{\Omega\times Y\times Z} \psi(x,y)\,d\mu(x,y,z) =\int_{\Omega\times Y\times Z} \psi(x,y)\left\langle z,\frac{\mathcal Z(x,y)}{\|\mathcal Z(x,y)\|^2}\right\rangle\,d\mu(x,y)\\ =\int_I\int_\Omega \psi(x,\varphi_r(x))\left\langle D\varphi_r(x),\frac{\mathcal Z(x,\varphi_r(x))}{\|\mathcal Z(x,\varphi_r(x))\|^2}\right\rangle\,dx\,d\nu(r)\\ =\int_I \int_\Omega \psi(x,\varphi_r(x))\,dx\,d\nu(r). \end{multline} This shows that (\ref{eq:superposition}) holds for smooth $\phi$ constant in $z$, whence adding up we get the statement for smooth $\phi$ affine in $z$. This implies that the statement holds also for continuous $\phi$ affine in $z$ by the density of $C^\infty$ functions affine in $z$ in the space of continuous functions affine in $z$ in the uniform norm on compact sets. The case of $\phi \in L_1(\mu)$ is proven by the following argument. Let $\mu' = \int_I (\mathrm{id},\varphi_r,D\varphi_r)_{\#}\lambda_{\Omega}\, dr$, where $\lambda_\Omega$ is the Lebesgue measure on $\Omega$; that is, $\mu'$ is the superposition of the occupation measures generated by the functions $\varphi_r$. Then equation (\ref{eq:superposition}) reads \[ \int_{\Omega\times Y\times Z} \phi\,d\mu = \int_{\Omega\times Y\times Z} \phi\,d\mu' \] for all $\phi\in L_1(\mu)$ affine in $z$. Since the result holds for all continuous $\phi$ affine in $z$ and both $\mu$ and $\mu'$ are Radon measures, it follows immediately by a classical density result that the statement holds for all $\phi \in L_1(\mu)$ independent of $z$. This implies that the $(x,y)$ marginals $\mu_{\Omega\times Y}$ and $\mu'_{\Omega\times Y}$ coincide. It remains to prove the statement with $\phi\in L_1(\mu)$ linear in $z$; it suffices to consider $\phi$ of the form $\phi(x,y,z)=f(x,y)z_k$ for $f\in L^1(\mu_{\Omega\times Y})$ and $1\leq k\leq n$, because a general $\phi$ will be a linear combination of these. We already have, for continuous $f$, the identity \[\int_{\Omega\times Y\times Z}f(x,y)z_k\,d\mu(x,y,z)=\int_{\Omega\times Y\times Z}f(x,y)z_k\,d\mu'(x,y,z).\] By the same classical density result cited above applied to the signed Radon measures $z_k d\mu$ and $z_k d\mu'$, this identity holds for $f\in L^1(\mu_{\Omega\times Y})$ too. This shows that the result is true for all $\phi\in L_1(\mu)$. The last statement of the theorem follows directly from Lemma \ref{lem:main}. \end{proof} \subsection{Connection with the original Hardt-Pitts decomposition} The context in which superpositions of the type described in Theorem \ref{thm:decomposition} were first developed is that of Geometric Measure Theory, in which the main objects of interest are \emph{currents}, which are the continuous functionals on the space of smooth differential forms on an open set or a manifold. Just like distributions (continuous functionals on $C^\infty_c(U)$) can be of order higher than 1, involving integrals of derivatives of the test function, currents can also involve derivatives of the differential forms they are fed. This is why it is interesting to distinguish \emph{normal currents}, which roughly correspond to currents that can be expressed as integrals over a finite measure, evaluating the differential form on a set of vector fields, and satisfying some additional integrability conditions (see for example \cite{morgan}). Thus for example, the version of the Hardt-Pitts decomposition described in \cite{zworski1988decomposition} shows that a normal current of dimension $n$ in $\R^{n+1}$ and associated to a finite measure whose density is a positive $C^\infty$ function, and smooth vector fields satisfying an integrability condition, can be expressed as a superposition of so-called \emph{rectifiable currents} of dimension $n$. These are currents that can be written as a sum of countably many integrals over Lipschitz hypersurfaces. Our result does not require the smoothness assumptions of \cite{zworski1988decomposition}. Let us explain how to associate a normal current $T_\mu$ to the measure $\mu$ that Theorem \ref{thm:decomposition} decomposes: for a differential form $\omega$ of order $n$ on $\Omega\times Y$, we let \[T_\mu(\omega)=\int_{\Omega\times Y\times Z} \omega_{(x,y)}(\tiny\begin{pmatrix} 1\\0\\\vdots\\0\\z_1 \end{pmatrix}, \begin{pmatrix} 0\\1\\\vdots\\0\\z_2 \end{pmatrix}, \dots, \begin{pmatrix} 0\\0\\\vdots\\1\\z_n \end{pmatrix})\,d\mu(x,y,z). \] Similarly, to each of the sheets $\varphi_r$ we can associate a rectifiable current $R_r$ on $\Omega\times Y$ given by \[R_r(\omega)=\int_{\Omega} \omega_{(x,\varphi_r(x))}(\tiny\begin{pmatrix} 1\\0\\\vdots\\0\\\frac{\partial\varphi_r}{\partial x_1}(x) \end{pmatrix}, \begin{pmatrix} 0\\1\\\vdots\\0\\\frac{\partial\varphi_r}{\partial x_2}(x) \end{pmatrix}, \dots, \begin{pmatrix} 0\\0\\\vdots\\1\\\frac{\partial\varphi_r}{\partial x_n}(x) \end{pmatrix})\,dx. \] Then the main conclusion of Theorem \ref{thm:decomposition} can be written as \[T_\mu=\int_\R R_rd\nu(r),\] an expression that roughly corresponds to \cite[eqs. (2), (8)]{zworski1988decomposition}. \subsection{{Proof of Theorem \ref{thm:consolidated}}} \label{sec:proof2} In order to prove Theorem \ref{thm:consolidated}, we need a lemma. \begin{lemma}\label{lem:jensenlemma} \begin{enumerate}[label=\roman*.,ref=(\roman*)] \item \label{it:insideomega}Let $\mu$ be as in the statement of Theorem \ref{thm:consolidated}. Let $\nu$ and $(\varphi_r)$ be as in the conclusion of Theorem \ref{thm:decomposition}. Assume that $L\colon\Omega\times Y\times Z\to\R$ is measurable, and convex in $z$. Then \[\int_{\R}\int_\Omega L(x,\varphi_r(x),D\varphi_r(x))\,d\nu\leq \int_{\Omega\times Y\times Z}L\,d\mu.\] \item \label{it:boundary} Assume, additionally to the previous item, that $\mu_\partial$ is as in the statement of Theorem \ref{thm:consolidated}. Then the restriction of $\varphi_r$ to $\partial\Omega$ is a well-defined Lipschitz function, and we have, for all measurable functions $\phi\colon\partial\Omega\times Y\to\R$, \[\int_{\partial\Omega\times Y\times Z}\phi(x,y) d\mu_\partial(x,y)=\int_{\R}\int_{\partial\Omega}\phi(x,\varphi_r(x))d\sigma(x)\,d\nu(r),\] where $\sigma$ denotes Hausdorff measure on the boundary $\partial \Omega$. In other words, the decomposition of $\mu$ implies a decomposition of $\mu_\partial$. \end{enumerate} \end{lemma} \begin{proof} Let us prove item \ref{it:insideomega}. Let $\mathcal Z$, $\mu_{\Omega\times Y}$, $\mu_{xy}$, and $\bar \mu$ be as in Definition \ref{def:centroid}. It follows from Jensen's inequality that \begin{equation}\label{eq:jensenarg} L(x,y,\mathcal Z(x,y))\leq \int_{Z} L(x,y,z)\,d\mu_{xy}(z) \end{equation} and hence also \begin{multline}\label{eq:Jensen_mu} \int_{\Omega\times Y\times Z} L\,d\bar\mu=\int_{\Omega\times Y} L(x,y,\mathcal Z(x,y))\,d\mu_{\Omega\times Y}(x,y)\\\leq \int_{\Omega\times Y}\int_Z L(x,y,z)d\mu_{xy}(z)d\mu_{\Omega\times Y}(x,y)=\int_{\Omega\times Y\times Z} L\,d\mu. \end{multline} Since the integrals of the functions $\phi$ in the statement of Theorem \ref{thm:decomposition} with respect to $\mu$ and $\bar\mu$ coincide, the decomposition given by the theorem is the same for either of these measures; let $\nu$ be the corresponding measure, and $(\varphi_r)$ be the corresponding family of functions. Thus by \eqref{eq:Jensen_mu}, the definition of $\bar\mu$, the fact that the $L(x,y,\mathcal Z(x,y))$ does not depend on $z$, \eqref{eq:scalarint}, and \eqref{eq:singlepoint}, \begin{align*} \int L\,d\mu&\geq \int L\,d\bar\mu\\ &=\int L(x,y,\mathcal Z(x,y))\,d\mu_{\Omega\times Y}(x,y)\\ &=\int L(x,y,\mathcal Z(x,y))\,d\mu(x,y,z)\\ &=\int L(x,\varphi_r(x),\mathcal Z(x,\varphi_r(x)))\,dx\,d\nu(r)\\ &= \int_\R\int_\Omega L(x,\varphi_r(x),D\varphi_r(x))\,dx\,d\nu(r) \end{align*} This proves item \ref{it:insideomega}. To prove item \ref{it:boundary}, note that the set $\Omega$ has a boundary measure $\sigma$ supported on $\partial \Omega$ such that, if $X\in C^1(\overline\Omega;\R^n)$, then the Gauss theorem holds, that is \begin{equation}\label{eq:gauss} \int_{\partial \Omega}\langle X(x),\mathbf n(x)\rangle\,d\sigma(x)=\int_\Omega\operatorname{div} X(x)\,dx,\quad \operatorname{div} X=\sum_i\frac{\partial X_i}{\partial x_i}, \end{equation} where $\mathbf n\colon\partial\Omega\to\R^n$ is the exterior unit vector normal to $\Omega$. Equivalently, for all $u\in C^1(\overline\Omega;\R)$ and all $\phi\in C^\infty(\overline\Omega\times Y;\R)$, taking $X(x)=e_j\phi(x,u(x))$ for each $j=1,\dots n$ in \eqref{eq:gauss}, we get, \begin{equation}\label{eq:gauss2} \int_{\partial \Omega} \phi(x,u(x))\mathbf n(x)\,d\sigma(x)=\int_\Omega\frac{\partial\phi}{\partial x}(x,u(x)) +\frac{\partial\phi}{\partial y}(x,u(x))Du(x)\,dx. \end{equation} By the density of smooth functions among the weakly-differentiable ones, and continuity of the integral, \eqref{eq:gauss2} holds also for bounded weakly differentiable functions $u$. Remark that, since $\mu$ is compactly supported, for $\nu$-almost every $r$ the function $\varphi_r$ is bounded, as is its weak derivative $D\varphi_r$. Thus $\varphi_r\in W^{1,\infty}(\overline\Omega)$, and $\varphi_r$ is Lipschitz, as is its restriction to the boundary $\partial\Omega$. We have, from \eqref{eq:boundarymeasure}, \eqref{eq:linearinz}, and \eqref{eq:gauss2} with $u=\varphi_r$, for $f\in C^\infty(\overline \Omega;\R)$, \begin{align*} \int_{\partial\Omega\times Y\times Z}&f(x,y)\mathbf n(x)\,d\mu_\partial(x,y)\\ &=\int_{\Omega\times Y\times Z}\frac{\partial f}{\partial x}(x,y)+\frac{\partial f}{\partial y}(x,y)z\,d\mu(x,y,z)\\ &=\int_\R\int_{\Omega}\frac{\partial f}{\partial x}(x,\varphi_r(x))+\frac{\partial f}{\partial y}(x,\varphi_r(x))D\varphi_r(x)\,dx\,d\nu(r)\\ &=\int_\R\int_{\partial \Omega}f(x,\varphi_r(x))\mathbf n(x)\,d\sigma(x)\,d\nu(r). \end{align*} Let $\phi\in C^\infty(\overline\Omega;\R)$. Letting $f=\phi\mathbf n_i$, where $\mathbf n=(\mathbf n_1,\dots,\mathbf n_n)$, we get \[\int_{\partial\Omega\times Y\times Z}\phi(x,y)\mathbf n_i(x)\mathbf n(x)\,d\mu_\partial(x,y)= \int_\R\int_{\partial \Omega}\phi(x,\varphi_r(x))\mathbf n_i(x)\mathbf n(x)\,d\sigma(x)\,d\nu(r).\] Summing over the $i$-th entries, we get integrals of $\phi(x,y)\langle\mathbf n(x),\mathbf n(x)\rangle=\phi(x,y)$, thereby proving item \ref{it:boundary}. \end{proof} \begin{proof}[Proof of Theorem \ref{thm:consolidated}] Define $\mathcal C=F^{-1}(0)\cap G^{-1}((-\infty,0])$. Clearly $(x,y,z) \in \mathcal C$ if and only if $F(x,y,z) = 0$ and $G(x,y,z) \le 0$. Assumption \ref{U:synthetic} means that $\mathcal C\cap ((x,y)\times Z)$ is convex for each $(x,y)\in \Omega\times Y$. Assumption \ref{U:closedconditions} means that $\mathcal C$ and $\mathcal C_\partial$ are closed sets. Define also $\mathcal C_\partial=F_\partial^{-1}(0)\cap G_\partial^{-1}((-\infty,0])$ and observe that $(x,y)\in\mathcal C_\partial$ if, and only if, $F_\partial(x,y) = 0$ and $G_\partial(x,y) \le 0$. The total mass $\nu(\R)$ of the measure $\nu$ is 1 because \[\nu(\R)|\Omega|=\int_\R \int_\Omega dx\,d\nu(r)=\int_{\Omega\times Y\times Z}d\mu=\mu(\Omega\times Y\times Z),\] and we assumed $\mu(\Omega\times Y\times Z)=|\Omega|$. Hence we also have, using Lemma \ref{lem:jensenlemma}, \begin{multline}\label{eq:averagearg} \inf_{r\in\operatorname{supp}\nu}\int_\Omega L(x,\varphi_r(x),D\varphi_r(x))\,dx +\int_{\partial\Omega} L_\partial(x,\varphi_r(x))\,d\sigma(x)\\ \leq\frac1{\nu({\R})} \int_\R \int_\Omega L(x,\varphi_r(x),D\varphi_r(x))\,dx\,d\nu(r)+\frac1{\nu({\R})} \int_\R \int_{\partial\Omega} L_\partial(x,\varphi_r(x))\,d\sigma(x)\,d\nu(r)\\ \leq\frac1{\nu({\R})} \int L\,d\mu+\frac1{\nu({\R})}\int L_\partial\,d\mu_\partial=\int L\,d\mu+\int L_\partial\,d\mu_\partial. \end{multline} This means that the set $I_1$ of values of $r$ such that $\varphi_r$ satisfies \eqref{eq:barphiL1} has positive measure $\nu(I_1)>0$. For $\nu$-almost every $r$ and almost every $x\in\Omega$, the point $(x,\varphi_r(x))$ is in the support of $(\projXY)_\#\mu$, for if we take $\phi\in C^0(\Omega\times Y)$ then, by \eqref{eq:scalarint}, \[\int_{\Omega\times Y}\phi\,d(\projXY)_\#\mu=\int_{\Omega\times Y}\phi\,d\mu=\int_\R\int_\Omega\phi(x,\varphi_r(x))\,dx\,d\nu(r).\] From the argument leading to \eqref{eq:singlepoint}, it follows that for $\nu$-almost every $r$ and almost every $x\in\Omega$ we have $(x,\varphi_r(x),D\varphi(x))=(x,\varphi_r(x),\mathcal Z(x,\varphi_r(x)))$. This point is in $\mathcal C$ because $\mathcal Z(x,\varphi_r(x))$ is in the convex hull of $\operatorname{supp}\mu\cap ((x,\varphi_r(x))\times Z)$, and the latter is contained in the convex set $\mathcal C\cap ((x,\varphi_r(x))\times Z)$. Let $I_2$ be the set of values of $r$ such that $(x,\varphi_r(x),D\varphi_r(x))\in\mathcal C$ for almost every $x\in\Omega$; we have shown that $\nu(I_2)=1$. Also, \eqref{eq:Gcond} and the decomposition of $\mu_\partial $ from Lemma \ref{lem:jensenlemma}\ref{it:boundary} imply that the set $I_3$ of values of $r$ such that, for $\sigma$-almost every $x\in \partial\Omega$ we have $ (x,\varphi_r(x))\in\mathcal C_\partial$, satisfies $\nu(I_3)=1$. We thus have that $\nu(I_1\cap I_2\cap I_3)>0$. Pick $r_0\in I_1\cap I_2\cap I_3$, and set $\bar\varphi=\varphi_{r_0}$. Then $\bar\varphi$ satisfies \eqref{eq:barphiL1}--\eqref{eq:barphiG}. To prove item \ref{it:gi}, note that $C^\infty(\Omega)\cap W^{1,\infty}(\overline\Omega)$ is dense in $W^{1,\infty}(\overline\Omega)$, so we may take the functions $g_i$ to be equal to $\bar\varphi$ on the boundary $\partial\Omega$ and smooth in $\Omega$; for example, we can take a mollifier $\psi\colon \R^n\to \R_{\geq0}$, $\psi\in C^\infty(\R^n)$ supported in the unit ball and verifying $\int_{\R^n}\psi=1$, and take $h\in C^\infty(\Omega)\cap W^{1,\infty}(\overline\Omega)$ such that $0<h(x)<\operatorname{dist}(x,\partial\Omega)/2$, and define $h(x)=0$ for $x\in\partial\Omega$. Then \[g_i(x)=\begin{cases} \frac{i^n}{h(x)^n}\int_{\R^n}\psi\left(i\frac{x-y}{h(x)}\right)\,\bar\varphi(y)\,dy,&x\in\Omega,\\ \bar\varphi(x),&x\in\partial \Omega. \end{cases} \] This makes $g_i$ into a convolution of $\bar\varphi$ with a smooth kernel that approximates the Dirac delta as $i\to+\infty$ that is supported inside of $\Omega$ ($h$ guarantees this). From this definition and properties \eqref{eq:barphiL1}--\eqref{eq:barphiG} of $\bar\varphi$, together with the continuity of $F$ and $G$, it follows that \eqref{eq:Lineq}-- \eqref{eq:limG} also hold. We may differentiate $\psi$ infinitely many times inside the integral sign, by the dominated convergence theorem, so $g_i\in C^\infty(\Omega)$. Let us prove that $g_i$ is Lipschitz on $\overline \Omega$. Since $\bar\varphi\in W^{1,\infty}(\overline\Omega)$, it is Lipschitz, and we will denote its Lipschitz constant by $\ell$. For $x_1,x_2\in\overline\Omega$, we have three cases. First, if $x_1,x_2$ are both in $\partial\Omega$, then \[|g_i(x_1)-g_i(x_2)|=|\bar\varphi(x_1)-\bar\varphi(x_2)|\leq \ell|x_1-x_2|.\] Next, if $x_1,x_2\in \Omega$ and $H$ is the Lipschitz constant of $h$, then \begin{align*} &|g_i(x_1)-g_i(x_2)|\\ &=\left|\frac{i^n}{h(x_1)^n}\int_{\R^n}\psi\left(i\frac{x_1-y}{h(x_1)}\right)\,\bar\varphi(y)\,dy-\frac{i^n}{h(x_2)^n}\int_{\R^n}\psi\left(i\frac{x_1-y}{h(x_2)}\right)\,\bar\varphi(y)\,dy\right|\\ &=\left|\frac{i^n}{h(x_1)^n}\int_{\R^n}\psi\left(i\frac{y}{h(x_1)}\right)\,\bar\varphi(x_1-y)\,dy-\frac{i^n}{h(x_2)^n}\int_{\R^n}\psi\left(i\frac{y}{h(x_2)}\right)\,\bar\varphi(x_2-y)\,dy\right|\\ &\leq \left|\frac{i^n}{h(x_1)^n}\int_{\R^n}\psi\left(i\frac{y}{h(x_1)}\right)\,\bar\varphi(x_1-y)\,dy-\frac{i^n}{h(x_1)^n}\int_{\R^n}\psi\left(i\frac{y}{h(x_1)}\right)\,\bar\varphi(x_2-y)\,dy\right|\\ &\quad+\left|\frac{i^n}{h(x_1)^n}\int_{\R^n}\psi\left(i\frac{y}{h(x_1)}\right)\,\bar\varphi(x_2-y)\,dy-\frac{i^n}{h(x_2)^n}\int_{\R^n}\psi\left(i\frac{y}{h(x_2)}\right)\,\bar\varphi(x_2-y)\,dy\right|\\ &\leq \ell\|x_1-x_2\|\frac{i^n}{h(x_1)^n}\int_{\R^n}\psi\left(i\frac{y}{h(x_1)}\right)dy \\ &\quad +\left|\frac{i^n}{h(x_1)^n}\int_{\R^n}\psi\left(i\frac{y}{h(x_1)}\right)\,\bar\varphi(x_2-y)\,dy-\frac{i^n}{h(x_1)^n}\int_{\R^n}\psi\left(i\frac{u}{h(x_1)}\right)\,\bar\varphi\left(x_2-u\frac{h(x_2)}{h(x_1)}\right)\,du\right|\\ &\leq \ell\|x_1-x_2\|+\ell\sup_{\|y\|\leq h(x_1)/i}\|(x_2-y)-(x_2-y\tfrac{h(x_2)}{h(x_1)})\|\frac{i^n}{h(x_1)^n}\int_{\R^n}\psi\left(i\frac{y}{h(x_1)}\right)dy \\ &\leq \ell\|x_1-x_2\|+\ell|h(x_1)-h(x_2)|/i\\ &\leq (\ell+\ell H/i)\|x_1-x_2\| \end{align*} where we used the change of variables $u=yh(x_1)/h(x_2)$. Similarly, if, say, $x_1\in\partial\Omega$ and $x_2\in\Omega$, we have (and this is our last case), \begin{align*} &|g_i(x_1)-g_i(x_2)|\\ &=\left|\bar\varphi(x_1)-\frac{i^n}{h(x_2)^n}\int_{\R^n}\psi\left(i\frac{x_1-y}{h(x_2)}\right)\,\bar\varphi(y)\,dy\right|\\ &=\left|\frac{i^n}{h(x_2)^n}\int_{\R^n}\psi\left(i\frac{y}{h(x_2)}\right)\,\bar\varphi(x_1)\,dy-\frac{i^n}{h(x_2)^n}\int_{\R^n}\psi\left(i\frac{y}{h(x_2)}\right)\,\bar\varphi(x_2-y)\,dy\right|\\ &\leq \ell(\|x_1-x_2\|+h(x_2)/i)\frac{i^n}{h(x_2)^n}\int_{\R^n}\psi\left(i\frac y{h(x_2)}\right)dy\\ &\leq \ell(\|x_1-x_2\|+|h(x_2)-h(x_1)|/i)\\ &\leq(\ell+\ell H/i)\|x_1-x_2\| \end{align*} since $h(x_1)=0$ in this case. Thus indeed $g_i\in W^{1,\infty}(\overline\Omega)$. This concludes the proof of item \ref{it:gi}. \end{proof} \section{Positive gap in codimensions greater than one} \label{sec:positive gap} In this section we construct an explicit example of a Lagrangian $L$ that exhibits a positive gap between the classical and relaxed solution in codimension two (i.e., $ m =\mathrm{dim}(Y) = 2$). The Lagrangian constructed is \emph{strictly convex} in $z$ and of class $C^{1,1}_{\mathrm{loc}}$. The construction extends to codimensions greater than two and can be modified to provide a higher degree of differentiability of $L$. Let $\Omega=B(0,1)$ be the unit ball in $\R^2$, $Y=\R^2$, $Z=\R^{2\times 2}$. Denote by $W^{1,2}(\Omega)$ the Sobolev space of real valued, weakly differentiable functions on $\Omega$ whose derivative is in $L^2(\Omega)$. Let $\mathcal M$ denote the set of pairs $(\mu,\mu_\partial)$ of relaxed occupation measures and their boundary measures, as in Definition \ref{def:M}. We say a function is of class $C^{1,1}_{\mathrm{loc}}$ if it is continuously differentiable and its derivative is Lipschitz continuous on each compact set. \begin{theorem}\label{thm:gap} There is a function $L\colon\Omega\times Y\times Z\to\R$ of class $C^{1,1}_{\mathrm{loc}}$, strictly convex in $z$, and such that \begin{equation}\label{eq:gap} \inf_{h\in W^{1,2}(\Omega)}\int_{\Omega}L(x,h(x),Dh(x))\,dx>\min_{(\mu,\mu_\partial)\in\mathcal M} \int_{\Omega\times Y\times Z} L\,d\mu. \end{equation} \end{theorem} \begin{remark} in In our construction below, it will be clear that while $L$ is convex in $z$, it is not convex in $\Omega$ or in $Y$. Also, by replacing the exponent $3$ by a larger integers $p>3$ in \eqref{eq:example} below, examples of arbitrarily high regularity $C^{p-2}$ can be obtained. \end{remark} \paragraph{Construction of $L$.} Define a set-valued map $f\colon\Omega\rightrightarrows Y \subset \mathbb{R}^2$ by \begin{equation}\label{eq:example} f(x)=\{r^3(\cos \tfrac\theta2,\sin \tfrac\theta2):x=r(\cos\theta,\sin\theta), r\geq 0,\theta\in\R\}, \quad x\in\R^2, \end{equation} so that $f$ is essentially a modified version of the complex square root, where we have replaced $\sqrt r$ by $r^3$. If $x\neq0$, $f(x)$ consists of exactly two points in $\mathbb{R}^2$. Let, for $k=0,1$, \begin{align*} \uu_k(r(\cos \theta,\sin\theta))&=(-1)^kr^3\left(\cos\frac\theta2,\sin\frac\theta2\right)\\ &=r^3\left(\cos\frac{\theta+2\pi k}2,\sin\frac{\theta+2\pi k}2\right),\quad r\in[0,1),\theta\in[0,2\pi). \end{align*} Thus $f(x)=\{\uu_0(x),\uu_1(x)\}$ and $\uu_0(x)=-\uu_1(x)$. See Figure \ref{fig:6}. \begin{figure} \includegraphics[width=0.6\textwidth]{img6} \centering \caption{This very rough scheme captures only the topological aspect of the situation to illustrate the fact that the image under the set-valued map $f$ of each circle $\{r=r_0\}$, $0<r_0<1$, is a twice-winding, non-self-intersecting cycle; on top of each point $x$ on the circle there are two points, $\uu_0(x)$ and $\uu_1(x)$. We have also marked the point corresponding to angle $\theta=0$ that is mapped to the interface between the parameterizations $\uu_0$ and $\uu_1$ of the image of $f$. % More accurate depictions of the situation can be found in Figures \ref{fig:5}--\ref{fig:9}.} \label{fig:6} \end{figure} Let \begin{equation}\label{eq:Delta} \Delta=\{(x,y)\in \Omega\times Y:|\langle y,\uu_0(x)\rangle|>\|x\|^6/10\}. \end{equation} Note that $\|\uu_i(x)\|^2=\|x\|^6$, so the graph of $f$ is contained in $\Delta$; see Figures \ref{fig:5} and \ref{fig:7}. Also, for each $0\neq x\in \Omega$, the set of points $y\in Y$ with $(x,y)\in \Delta$ has two connected components corresponding to the sign of the inner product $\langle y,\uu_0(x)\rangle$. In order to define an auxiliary function $\psi\colon(\Omega\times Y)\setminus\{0\}\to[0,1]\in C^\infty$ that will be of great utility, pick a function $\rho\in C^\infty(\R;[0,1])$ such that $\rho(r)=1$ for all $r\geq 1$ and $\rho(-r)=1-\rho(r)$, and let \[\psi(x,y)=\rho\left(\frac{10\langle y,\uu_0(x)\rangle}{\|x\|^6}\right),\quad (x,y)\in\Omega\times Y.\] Then \begin{itemize} \item $\psi(x,y)=1$ for $(x,y)\in\Delta$ with $\langle y,\uu_0(x)\rangle<0$, and \item $\psi(x,y)=0$ for $(x,y)\in\Delta$ with $\langle y,\uu_0(x)\rangle>0$.\end{itemize} For later use we record the following properties of $\psi$ (see Figure \ref{fig:5}): \begin{lemma}\label{lem:psi} \begin{enumerate}[label=\roman*.,ref=(\roman*)] \item \label{it:psibound}$ |\psi(x,y)|\leq 1$. \item \label{it:Usmooth} the function \[U(x,y)=\begin{cases}\psi(x,y)\uu_0(x)+(1-\psi(x,y))\uu_1(x),&(x,y)\neq (0,0),\\ 0,&(x,y)=(0,0) \end{cases}\] is smooth on $(\Omega\setminus\{0\})\times Y$, and can be alternatively written as \[U(x,y)=\begin{cases} (2\psi(x,y)-1)\uu_0(x), & (x,y)\neq (0,0),\\ 0,&(x,y)=(0,0), \end{cases}\] because $\uu_0=-\uu_1$, and verifies \[\|U(x,y)\|=O(\|x\|^3)\] as $x\to 0$. \item \label{it:Utrivial} On $\Delta$, the function $U(x,y)$ coincides either with $\uu_0(x)$ or with $\uu_1(x)$, whichever is closest to $y$. \item \label{it:Vsmooth} For $i=0,1$, let $D\uu_i$ be the $2\times 2$ matrix \[D\uu_i=\left(\frac{\partial \uu_i}{\partial x_1} , \frac{\partial \uu_i}{\partial x_2}\right),\] except at the points of the form $(a,0)$, $a\geq0$, where this is not defined; we define $D\uu_i$ there by extending it continuously from above, namely, \begin{equation*}\label{eq:defDu}D\uu_i(a,0)\coloneqq(-1)^ia^2\begin{pmatrix} 3& 0\\ 0&1/2 \end{pmatrix},\quad a\geq0. \end{equation*} The function \[ V(x,y)=\begin{cases} \psi(x,y)D\uu_0(x)+(1-\psi(x,y))D\uu_1(x),&(x,y)\neq (0,0),\\ 0,&(x,y)=(0,0), \end{cases} \] is smooth on $(\Omega\setminus\{0\})\times Y$, and \[\|V(x,y)\|=O(\|x\|^2)\] as $x\to 0$. \item \label{it:Vtrivial} On $\Delta$, the function $V(x,y)$ coincides either with $D\uu_0(x)$ or with $D\uu_1(x)$, according to whether $\uu_0(x)$ or $\uu_1(x)$ is closest to $y$, respectively. \end{enumerate} \end{lemma} \begin{proof} Using Lemma \ref{lem:lemregularity} below with $u=\uu_0$ and then again with $u=D\uu_0$, we see that $U(x,y)=(2\psi(x,y)-1)\uu_0(x)$ and $V(x,y)=(2\psi(x,y)-1)D\uu_0(x)$ are smooth on $(\Omega\setminus\{0\})\times Y$. The rest of the lemma is clear from the definitions. \end{proof} \begin{lemma}\label{lem:lemregularity} Let $k>0$ and let $u\colon\Omega\setminus \{(a,0):a\geq 0\}\to\R^k$ be a smooth function such that, for all derivatives $\partial^Iu$ of $u$, of any order including zero, we have that the following limits exist and satisfy \[\lim_{\substack{\bar a\to a\\b\searrow0}}\partial^Iu(\bar a,b)=-\lim_{\substack{\bar a\to a\\b\nearrow0}}\partial^Iu(\bar a,b),\quad a> 0.\] Assume additionally that \begin{equation}\label{eq:slitcontinuity} u(a,0)=\lim_{b\searrow0}u(a,b),\quad a>0. \end{equation} Then $(2\psi(x,y)-1)u(x)$ is $C^{\infty}$ on $(\Omega\setminus\{0\})\times Y$. \end{lemma} \begin{proof} Fix $y\in Y$ and $a>0$. Take sequences $(a_i)\subset\R$, $(b_i)\subset\R_{>0}$, $(y_i)\subset\R^2$ such that $a_i\to a$, $b_i\searrow0$, $y_i\to y$. We have, using $\rho(r)=1-\rho(-r)$, for every multi-index $I$, and every $a>0$ and $y\in Y$, \begin{align*} \lim_{\substack{\bar a\to a\\ b\searrow 0\\\bar y\to y}}\partial^I[(2\psi((\bar a,b),\bar y)-1)u(\bar a,b)] &=\lim_{i\to+\infty}\partial^I[(2\psi((a_i,b_i),y_i)-1)u(a_i,b_i)]\\ &=\lim_{i\to+\infty}\partial^I[(2\rho\left(\frac{10\langle y_i,\uu_0(a_i,b_i)\rangle}{\|(a_i,b_i)\|^6}\right)-1)u(a_i,b_i)]\\ &=\lim_{i\to+\infty}\partial^I[(2(1-\rho\left(-\frac{10\langle y_i,\uu_0(a_i,b_i)\rangle}{\|(a_i,b_i)\|^6}\right))-1)u(a_i,b_i)] \\ &=\lim_{i\to+\infty} \partial^I[(2(1-\rho\left(\frac{10\langle y_i,-\uu_0(a_i,b_i)\rangle}{\|(a_i,b_i)\|^6}\right))-1)u(a_i,b_i)] \\ &=\lim_{i\to+\infty}\partial^I[(2(1-\rho\left(\frac{10\langle y_i,\uu_0(a_i,-b_i)\rangle}{\|(a_i,b_i)\|^6}\right))-1)u(a_i,b_i)] \\ &=\lim_{i\to+\infty}\partial^I[-(2\rho\left(\frac{10\langle y_i,\uu_0(a_i,-b_i)\rangle}{\|(a_i,b_i)\|^6}\right)-1)u(a_i,b_i)] \\ &=\lim_{i\to+\infty}\partial^I[(2\rho\left(\frac{10\langle y_i,\uu_0(a_i,-b_i)\rangle}{\|(a_i,-b_i)\|^6}\right)-1)u(a_i,-b_i)] \\ &=\lim_{i\to+\infty}\partial^I[(2\psi((a_i,-b_i),y_i)-1)u(a_i,-b_i)]\\ &=\lim_{\substack{\bar a\to a\\ b\nearrow 0\\\bar y\to y}}\partial^I[(2\psi((\bar a,b),\bar y)-1)u(\bar a,b)] \end{align*} This means that all derivatives of $(2\psi(x,y)-1)u(x)$ exist on $\{(a,0):a> 0\}$. A similar calculation, together with \eqref{eq:slitcontinuity}, shows that $(2\psi(x,y)-1)u(x)$ is continuous. This shows that $(2\psi(x,y)-1)u(x)$ is $C^\infty$ on $(\Omega\setminus\{0\})\times Y$, as the continuity of the partial derivatives near a given point implies their existence at the point. \end{proof} \begin{figure} \includegraphics[width=0.7\textwidth]{img5} \centering \caption{For $x\in\Omega$, this is the plane $\{x\}\times Y$. We have shaded the region $\Delta$, and indicated the vectors $\uu_0(x)$ and $\uu_1(x)=-\uu_0(x)$, together with their length, $\|x\|^3$, and the distance from $\Delta$ to the origin, $\|x\|^3/10$. We have also indicated what the values of $\psi$, $U$, and $V$ are on each of the connected components of $\Delta\cap (\{x\}\times Y)$. We have also included a reminder that $g$ (defined just after Lemma \ref{lem:psi}) is positive only outside of $\Delta$. } \label{fig:5} \end{figure} \begin{figure} \includegraphics[width=0.35\textwidth]{img7} \centering \caption{Radial scheme of the graph of $f$, made up of those of $\uu_0$ and $\uu_1$, and of $\Delta$ (shaded, with dashed boundary).} \label{fig:7} \end{figure} Take also a positive function $g\colon\Omega\times Y\to\R$ that will be auxiliary at helping us force minimizers of the proposed Lagrangian $L$ (to be defined below) to be supported in $\Delta$. We take $g$ such that \begin{itemize} \item $g\in C^\infty(\Omega\times Y)$, \item $g(x,y)\geq0$, \item $g(x,y)=0$ for all $(x,y)\in \Delta$, and \item $g$ verifies \begin{equation}\label{eq:grel} \|y-U(x,y)\|^2+g(x,y)\geq \min_{i\in\{0,1\}}\|y-\uu_i(x)\|^2 \end{equation} if $(x,y)\notin \Delta$. Observe that, by Lemma \ref{lem:psi}\ref{it:Usmooth}--\ref{it:Utrivial}, the function \[S(x,y)\coloneqq\min_i\|y-\uu_i(x)\|^2-\|y-U(x,y)\|^2\] vanishes on $\Delta$ and is smooth everywhere except at the locus of points of the form $(x,0)$ (i.e., $y=0$), $x\in\Omega$, since it is there that $\|y-\uu_0(x)\|=\|\uu_0(x)\|=\|\uu_1(x)\|=\|y-\uu_1(x)\|$. Also, by Lemma \ref{lem:psi}\ref{it:Usmooth}, $S(x,y)=O(\|x\|^3)$ as $x\to 0$. Thus in order to get a function $g$ that complies with inequality \eqref{eq:grel}, it suffices to take $g$ equal to $S$ in a small neighborhood of $\Delta$ while ensuring that it remains $\geq S$ everywhere. \end{itemize} The function $g$ will force the minimizers to be supported within $\Delta$. Remark that $g(0,0)=0$ because $g$ is $C^\infty$, $g$ vanishes on $\Delta$, and $(0,0)\in\overline \Delta$. Now we can define $L\colon \Omega\times Y\times Z\to\R$ to be given by \begin{equation}\label{eq:longdefL} L(x,y,z)=\|y-U(x,y)\|^2+\|z-V(x,y)\|^2+g(x,y); \end{equation} in other words, \begin{multline*} L(x,y,z)=\|y-\psi(x,y)\uu_0(x)-(1-\psi(x,y))\uu_1(x)\|^2\\ +\|z-\psi(x,y)D\uu_0(x)-(1-\psi(x,y))D\uu_1(x)\|^2+g(x,y), \end{multline*} for $(0,0)\neq (x,y)\in \Omega\times Y$, and $z\in Z$, and \[L(0,0,z)=\|z\|^2,\quad z\in Z.\] Observe that on $(x,y)\in\Delta$ the expression \eqref{eq:longdefL} simplifies to \begin{equation}\label{eq:defL} L(x,y,z)=\|y-\uu_i(x)\|^2+\|z-D\uu_i(x)\|^2\quad \textrm{if}\quad i=\operatornamewithlimits{arg\,min}_{j\in\{0,1\}}\|y-\uu_j(x)\|^2 \end{equation} because $g$ vanishes on $\Delta$ and because of Lemma \ref{lem:psi}. \begin{lemma}\label{lem:regularityL} $L$ is of class $C^{1,1}_{\mathrm{loc}}$. \end{lemma} \begin{proof}[Proof of Lemma \ref{lem:regularityL}] From Lemma \ref{lem:psi}, we know that $U$ and $V$ are $C^\infty$ on $(\Omega\setminus\{0\})\times Y$. This, together with the expression \eqref{eq:longdefL} defining $L$ away from the origin, and the smoothness of $g$, we conclude that $L$ is $C^\infty$ on $(\Omega\setminus\{0\})\times Y$. For fixed $y'\in Y$ and $z'\in Z$, as $(x,y,z)\to(0,y',z')$, using the estimates from Lemma \ref{lem:psi} as well as the fact that \[g(x,y)=\begin{cases}O(\|x\|^2+\|y\|^2),& y'\neq 0,\\ 0&y'=0,\end{cases}\] as $(x,y)\to(0,y')$ (which follows from $g$ being smooth, nonnegative, and vanishing at the origin, $g(0,0)=0$, because then necessarily $\nabla g(0,0)=0$; and from $g(x,y)=0$ on a neighborhood of every point $(0,y)$, $y\neq 0$, as this point belongs to $ \Delta$), we have \begin{align*} |L(x,y,z)&-L(0,y',z')-2\langle y',y-y' \rangle-2\langle z',z-z' \rangle| \\ &=|\|y-U(x,y)\|^2+\|z-V(x,y)\|^2+g(x,y)\\ &\qquad -\|y'\|^2-\|z'\|^2-2\langle y',y-y' \rangle-2\langle z',z-z'\rangle|\\ &= |\|y\|^2-\|y'\|^2-2\langle y',y-y' \rangle\\ &\qquad+\|U(x,y)\|^2-2\langle y,U(x,y)\rangle\\ &\qquad \|V(x,y)\|^2-2\langle z, V(x,y)\rangle+g(x,y)\\ &\qquad \|z\|^2-\|z'\|^2-2\langle z',z-z'\rangle|\\ &\leq \|y^2-y'\|^2+\|y^2-y'\|^2+O(\|x\|^6+\|y\|\|x\|^3\\ &\qquad +\|x\|^4+\|z\|\|x\|^2+\chi_{y'\neq0}(\|x\|^2+ \|y\|^2))\\ &\qquad +\|z-z'\|^2\\ &\leq O(\|x\|^2+\|y-y'\|^2+\|z-z'\|^2)\\ &= O(\|(x,y,z)-(0,y',z')\|^2). \end{align*} Here, $\chi_{y'\neq 0}\in\{0,1\}$ vanishes when $y'=0$ and is 1 otherwise. Then \cite[Proposition 4.11.3]{fathi2008weak} implies that the derivative is locally Lipschitz continuous. \end{proof} \begin{proof}[Proof of the theorem] We present the proof in several steps. \noindent\textbf{Step 1.} $\displaystyle\min_{(\mu,\mu_\partial)\in \mathcal M}\int_{\Omega\times Y\times Z}L\,d\mu=0.$ The map $f$ can be encoded using the measure $\mu$ on $\Omega\times Y\times Z$ defined by the pushforwards \[\mu=\tfrac12\xi_{0\#}dx+\tfrac12\xi_{1\#}dx\] where $dx$ is Lebesgue measure on $\Omega$, and $\xi_i\colon\Omega\to\Omega\times Y\times Z$ is the map \[\xi_i(x)=(x,\uu_i(x),D\uu_i(x)),\quad x\in \Omega.\] By the definition \eqref{eq:Delta} of $\Delta$, it holds that $(x,\uu_i(x)) \in \Delta$ for $i \in \{1,2\}$. Therefore the $(x,y)$-marginal of $\mu$ is supported in $\Delta$, where $L$ is given by \eqref{eq:defL} (see also Figure~\ref{fig:5}). It follows that \begin{align*} \int_{\Omega\times Y\times Z} L\,d\mu = \int_{\Delta \times Z} L\,d\mu &=\frac12\sum_{i=0}^1\int_{\Omega}L(x,\uu_i(x),D\uu_i(x))\,dx\\ &=\frac12\sum_{i=0}^1 \int_{\Omega}\|\uu_i(x)-\uu_i(x)\|^2+\|D\uu_i(x)-D\uu_i(x)\|^2\,dx=0, \end{align*} Since the integrand is nonnegative, this is the minimum of the integral of $L$ over any measure $\mu$ with $(\mu,\mu_\partial)\in\mathcal M$. \noindent\textbf{Step 2.} \emph{Reparameterization of $f$ using $\bar \uu_\alpha$ and choice of $\alpha_0$.} For $\alpha \in \R$, let $\bar \uu_\alpha$ the $\R^2$-valued function on $\Omega$ given by \begin{equation}\label{eq:ui} \bar \uu_\alpha(r(\cos (\theta+\alpha),\sin(\theta+\alpha)))= r^3\left(\cos \frac{\theta+\alpha}2,\sin \frac{\theta+\alpha}2\right),\quad r\in [0,1),\,\theta\in [0,2\pi), \end{equation} so that $\bar \uu_\alpha=-\bar \uu_{\alpha+2\pi}$. Thus if $\alpha\in [0,2\pi)$ then \begin{equation}\label{eq:ubarexplain} \bar \uu_\alpha(x)=\begin{cases} \uu_0(x),&\textrm{for $\theta(x)\in [\alpha,2\pi)$},\\ \uu_1(x),&\textrm{for $\theta(x)\in [0,\alpha)$}, \end{cases}\quad \bar \uu_{\alpha+2\pi}(x)=\begin{cases} \uu_1(x),&\textrm{for $\theta(x)\in [\alpha,2\pi)$},\\ \uu_0(x),&\textrm{for $\theta(x)\in [0,\alpha)$}, \end{cases} \end{equation} where $x\in\Omega$ and $\theta(x)\in[0,2\pi)$ is the polar angle of $x=r(\cos\theta(x),\sin\theta(x))$. Therefore $\uu_0=\bar \uu_0$ and $\uu_1=\bar \uu_{2\pi}$. Just like $\uu_0$ and $\uu_1$ parameterize the image of $f$ and the jump between the two happens at angle 0 (see Figure \ref{fig:6}), for each $\alpha\in\R$ the functions $\bar \uu_{\alpha}$ and $\bar \uu_{\alpha+2\pi}=-\bar \uu_{\alpha}$ give another parametrization of the image of $f$, with the jump from one chart $\uu_1$ to the other $\uu_0$ at angle $\alpha$. \begin{figure} \includegraphics[width=0.7\textwidth]{drawing} \centering \caption{This 3-dimensional projection of the graph of $f|_\Gamma$ under the map $(x_1,x_2,y_1,y_2)\mapsto(x_1,x_2,y_1)$ has been colored to distinguish the images of $\uu_\alpha$ and $\uu_{\alpha+2\pi}$. We have also represented the domains, in polar coordinates, of these functions, and indicated where some points are mapped. Note that the apparent self-intersection is an artifact of the projection that does not occur in reality.} \label{fig:9} \end{figure} Let $\Gamma\subset \Omega$ be the corona consisting of points $x$ with radius $\frac12\leq |x|\leq 1$, whose area is $|\Gamma|=3\pi/4$. Take \[E=\frac{1}{\denomE}.\] Let $h\colon\Omega\to Y$ be any function of class $W^{1,2}(\Omega)$, a candidate solution to the optimization problem on the left-hand side of \eqref{eq:gap}. \begin{figure} \includegraphics[width=0.65\textwidth]{img8} \centering \caption{In polar coordinates $(\theta,r)$, the corona $\Gamma$ can be parameterized by the rectangle $[\alpha,\alpha+2\pi]\times[\tfrac12,1]$, and its image under $\bar \uu_\alpha$ and $\bar \uu_{\alpha+2\pi}$ is a double-covering. In the picture, we illustrate the definition of the disjoint sets $B_\alpha$ and $B_{\alpha+2\pi}$ for a given function $h$; these sets are the subsets of $\Gamma$ in which $h$ is $E$-close to $\bar \uu_\alpha(\Gamma)$ and $\bar \uu_{\alpha+2\pi}(\Gamma)$, respectively. Changing $\alpha$ translates the picture in the $\theta$ direction. While $h$ is $2\pi$-periodic in $\theta$, the overall picture is $4\pi$-periodic because the right-hand border $\bar \uu_{\alpha}(\{\alpha+2\pi\}\times[\tfrac12,1])$ coincides with the left-hand border $\bar \uu_{\alpha+2\pi}(\{\alpha\}\times[\tfrac12,1])$, and similarly for $\bar \uu_{\alpha+2\pi}(\{\alpha+2\pi\}\times[\tfrac12,1])$ and $\bar \uu_{\alpha}(\{\alpha\}\times[\tfrac12,1])$. As we move $\alpha$, the union $B_\alpha\cup B_{\alpha+2\pi}$ does not change, but the contents of the sets $B_\alpha$ and $B_{\alpha+2\pi}$ get gradually interchanged. Although we have drawn $h$, $B_\alpha$ and $B_{\alpha+2\pi}$ as independent of $r$, this need not be the case.} \label{fig:8} \end{figure} For $\alpha\in\R$, let $B_\alpha\subset \Omega$ be defined by \[ B_\alpha = \{x\in\Gamma \,: \, \|h(x)-\bar \uu_\alpha(x)\|\leq E\}, \] (see Figure \ref{fig:8}). Given $\alpha$, the union $B_\alpha\cup B_{\alpha+2\pi}$ is, because of \eqref{eq:ubarexplain}, the set of points $x\in \Gamma$ such that $h(x)$ is $E$-close to $f(x)$. As the angle $\alpha$ that determines which of those points are in $B_\alpha$ and which are in $B_{\alpha+2\pi}$ varies, the areas of these sets vary continuously; in other words, $\alpha\mapsto |B_\alpha|$ is continuous. Let \[\varphi(\alpha)=|B_\alpha|-|B_{\alpha+2\pi}|.\] Then $\varphi$ is continuous, verifies $\varphi(\alpha)=-\varphi(\alpha+2\pi)$, and is $4\pi$-periodic in $\alpha$. By the intermediate value theorem, there is some $\alpha_0\in[0,2\pi)$ such that $\varphi(\alpha_0)=0$. In particular, with our choice of $\alpha_0$ we have \[|B_{\alpha_0}|=|B_{{\alpha_0}+2\pi}|=\frac{|B_{\alpha_0}|+|B_{{\alpha_0}+2\pi}|}{2}.\] \noindent\textbf{Step 3.} \emph{Bound for $h$ when $|B_{\alpha_0}|=|B_{{\alpha_0}+2\pi}|< |\Gamma|/\denomA$.} Let $j(x)\in \{0,1\}$ be given by\[j(x)=\operatornamewithlimits{arg\,min}_{j\in\{0,1\}}\|h(x)-\bar \uu_{{\alpha_0}+2\pi j}(x)\|,\quad x\in\Gamma. \] We have, from \eqref{eq:grel} and \eqref{eq:ubarexplain}, \[\|h(x)-U(x,h(x))\|^2+g(x,y)\geq \|h(x)-\bar \uu_{{\alpha_0} + 2\pi j(x)}(x)\|^2.\] We use this to get the uniform bound \begin{align*} \int_\Omega L(x,h(x),Dh(x))dx &=\int_\Omega \|h(x)-U(x,h(x))\|^2\\ &\qquad +\|Dh(x)-V(x,h(x))\|^2+g(x,y)\,dx\\ &\geq\int_\Gamma \|h(x)-U(x,h(x))\|^2+g(x,y)\,dx\\ &\geq \int_\Gamma \|h(x)-\bar \uu_{{\alpha_0} + 2\pi j(x)}(x)\|^2dx\\ &\geq \int_{\Gamma\setminus(B_{\alpha_0}\cup B_{\alpha_0+2\pi})}E^2\,dx\\ &\geq E^2(|\Gamma|-|B_{\alpha_0}|-|B_{{\alpha_0}+2\pi}|)\\ &= E^2\left(|\Gamma|-2\frac{|\Gamma|}4\right)=E^2\frac{|\Gamma|}{2}>0. \end{align*} We have additionally used the fact that, by our choice of the sets $B_{\alpha_0}$ and $B_{\alpha_0+2\pi}$, for $x$ in these sets it holds that $\|h(x)-\bar \uu_{\alpha_0+2\pi j(x)}(x)\|\geq E$. \noindent\textbf{Step 4.} \emph{Definition and properties of $\bar h_0$.} Let, for $x\in\Gamma$, \[\bar h_0(x)=\min\left(\|h(x)-\bar \uu_{{\alpha_0}}(x)\|,\;\frac1{\denomxi}\right). \] The role of the function $\bar h_0$ is to give a sort of truncated version of the distance from $h$ to $\bar \uu_{{\alpha_0}}$ that will be useful in our estimation below. Note that, by the definition of $B_{\alpha_0}$, $\bar h_0(x)\leq E$ on $B_{\alpha_0}$. Observe that if we parameterize $\Gamma$ in polar coordinates with the rectangle $[\frac12, 1]\times [\alpha_0,\alpha_0+2\pi)$, then $\bar \uu_\alpha$ is smooth on that chart, and $\bar h_0$ is in $W^{1,2}$, as is $h$. In other words, although $\bar h_0$ is possibly discontinuous on the ray segment \[R_{\alpha_0}=\{x\in \Gamma:x=r(\cos{\alpha_0},\sin{\alpha_0}), \;r\in[\frac12,1]\}\] corresponding to angle $\alpha_0$, it is a Sobolev $W^{1,2}$ function on the rest of $\Gamma$. \noindent\textbf{Claim.} We have, for almost every $x\in \Gamma\setminus R_{\alpha_0}$, \begin{equation}\label{eq:estimatederivative} \|Dh(x)-V(x,h(x))\|\geq \|D\bar h_0(x)\|. \end{equation} \begin{proof}[Proof of the claim.] We have the following cases for $x\in\Gamma\setminus R_{\alpha_0}$: \begin{itemize} \item In the region where $\|h(x)-\bar \uu_{{\alpha_0}}(x)\|\geq 1/\denomxi$, the function $\bar h_0$ is constant so the right-hand side equals zero, and the inequality is verified trivially. \item In the region where $0< \|h(x)-\bar \uu_{{\alpha_0}}(x)\|< 1/\denomxi$, we have $(x,h(x))\in \Delta$ because for $x\in\Gamma$ we have $\frac12\leq \|x\|\leq1$ and then, expanding the squared inequality \[0< \|h(x)-\bar \uu_{{\alpha_0}}(x)\|^2=\|h(x)\|^2-2\langle \bar \uu_{{\alpha_0}}(x),h(x)\rangle+\|\bar \uu_{{\alpha_0}}(x)\|^2< \tfrac{1}{\denomxi^2},\] we get \begin{multline*} \langle\bar \uu_{\alpha_0}(x),h(x)\rangle>\tfrac12(\|h(x)\|^2+\|\bar \uu_{\alpha_0}(x)\|^2-\tfrac1{10^2})\\ \geq\tfrac12((\|\bar \uu_{\alpha_0}(x)\|-\tfrac1{10})^2+\|\bar \uu_{\alpha_0}(x)\|^2-\tfrac1{10^2})=\|\bar \uu_{\alpha_0}(x)\|^2-\tfrac1{10}\|\bar \uu_{\alpha_0}(x)\|\\ =\|x\|^6-\tfrac1{10}\|x\|^3>\frac{\|x\|^6}{10}. \end{multline*} Since the left-hand side equals $|\langle \uu_i(x),h(x)\rangle|$ for some $i=0,1$ by \eqref{eq:ubarexplain}, this shows that $(x,h(x))\in \Delta$, as per its definition \eqref{eq:Delta}. This, in turn, means (by Lemma \ref{lem:psi}\ref{it:Vsmooth}) that the left-hand side of \eqref{eq:estimatederivative} reduces to \[\|Dh(x)-D\bar \uu_{\alpha_0}(x)\|\] by our choice of $\psi$ and \eqref{eq:ubarexplain}. Inequality \eqref{eq:estimatederivative} then follows by taking $\phi(x)=h(x)-\bar \uu_{\alpha_0}(x)$ and observing that all weakly differentiable functions $\phi$ verify, almost everywhere within the set where $\|\phi\|\neq 0$, \[ \Big\|D\|\phi\|\Big\|=\left\|\frac{\phi}{\|\phi\|}D\phi\right\|=\left\|(D\phi)^t\frac{\phi^t}{\|\phi\|}\right\|\leq\|(D\phi)^t\| \frac{\|\phi\|}{\|\phi\|}=\|D\phi\|,\] where $(D\phi)^t$ is the transposed Jacobian matrix and $\|(D\phi)^t\|=\|D\phi\|$ is its operator norm. \item In the region where $ \bar h_0(x)=\|h(x)-\bar \uu_{{\alpha_0}}(x)\|=0$, either the weak derivative of $\bar h_0$ vanishes wherever it is defined (because $\bar h_0$ is nonnegative), hence verifying \eqref{eq:estimatederivative}; the set where it is not defined has measure zero because $\bar h_0$ is weakly differentiable since $h$ is. \end{itemize} \end{proof} \noindent\textbf{Step 5.} \emph{Bound for $h$ when} \begin{equation}\label{eq:largesets} |B_{\alpha_0}|=|B_{{\alpha_0}+2\pi}|\geq \frac{|\Gamma|}{\denomA}. \end{equation} For $x\in B_{\alpha_0+2\pi}$, $\|h(x)-\bar \uu_{\alpha_0+2\pi}\|\leq E$, so \[ \|\bar \uu_{\alpha_0}(x)-\bar \uu_{\alpha_0+2\pi}(x)\| \leq\|\bar \uu_{\alpha_0}(x)-h(x)\|+\|h(x)-\bar \uu_{\alpha_0+2\pi}(x)\|\leq \|\bar \uu_{\alpha_0}(x)-h(x)\|+E, \] and then \[\bar h_0(x)=\|h(x)-\bar \uu_{\alpha_0}(x)\|>\|\uu_{\alpha_0}(x)-\bar \uu_{\alpha_0+2\pi}(x)\|-E\geq \frac18-E>\frac1{10}.\] Hence, using \eqref{eq:largesets}, \[M\coloneqq |\Gamma|^{-1}\int_\Gamma \bar h_0(x)dx\geq|\Gamma|^{-1}\int_{B_{\alpha_0+2\pi}} \frac 1{\denomxi} dx\geq\frac{|B_{\alpha_0+2\pi}|}{\denomxi|\Gamma|}\geq\frac{1}{ \the\numexpr\denomxi*\denomA\relax}.\] The domain consisting of the slit corona $\Gamma\setminus R_{\alpha_0}$ satisfies the so-called \emph{cone property} \cite[Definition 2.5.14]{gasipapa}. By \eqref{eq:estimatederivative} together with the Poincar\'e-Wirtinger inequality \cite[Theorem 2.5.21]{gasipapa} for the domain $\Gamma\setminus R_{\alpha_0}$ with constant $C>0$, \begin{align*} \int_\Omega L(x,h(x),Dh(x))dx&=\int_\Omega \|h(x)-U(x,h(x)\|^2 \\ &\qquad +\|Dh(x)-V(x,h(x))\|^2+g(x,y)\,dx\\ &\geq \int_\Gamma \|Dh(x)-V(x,h(x))\|^2dx\\ &\geq \int_{\Gamma\setminus R_{\alpha_0}} \|D\bar h_0(x)\|^2dx\\ &\geq C \int_{\Gamma\setminus R_{\alpha_0}}\left|\bar h_0(x)-M\right|^2dx. \end{align*} Now, since for $x\in B_{\alpha_0}$ we have $0\leq \bar h_0(x)\leq E=1/\denomE<1/{ \the\numexpr\denomxi*\denomA\relax}\leq M$ there, we have \[|\bar h_0(x)-M|\geq F\coloneqq\frac{1}{ \the\numexpr\denomxi*\denomA\relax}-\frac{1}{ \denomE}>0,\] so the above is \begin{equation*} C \int_{\Gamma\setminus R_{\alpha_0}}\left|\bar h_0(x)-M\right|^2dx\geq C \int_{B_{\alpha_0}} F^2dx= CF^2 |B_{\alpha_0}|\geq CF^2\frac{|\Gamma|}{\denomA}>0. \end{equation*} This is a uniform lower bound for all $h\in W^{1,2}(\Omega)$ satisfying the above constraints. Together, the bounds from Steps 3 and 5 prove the theorem. \end{proof} \section{Positive gap with integral constraints}\label{sec:integralconstraints} The reader may be curious why we have not included, in the statements of Theorems \ref{thm:consolidated} and \ref{thm:nogap} and in the definitions \eqref{opt:classical} and \eqref{opt:relaxed} of $M_\mathrm{c}$ and $M_\mathrm{r}$, any integral constraints of the form \[\int_\Omega H(x,y(x),Dy(x))\,dx \le 0\quad\textrm{or}\quad \int_\Omega H(x,y(x),Dy(x))\,dx = 0.\] The reason is that in the presence of these constraints, there may be a gap between the classical case and its relaxation. The following two sections give examples of such situations. The idea for each of these examples works in any dimensions $n,m>0$, and we show them in the special case $n=m=1$ for simplicity. We use the same notations as in the definitions \ref{opt:classical} and \ref{opt:relaxed} of $M_\mathrm{c}$ and $M_\mathrm{r}$. \subsection{Inequality integral constraints} Let $\Omega=(0,1)\subset\R$, $Y=\R$, $L(x,y,z)=y$, $F(x,y,z)=y(1-y)$, $F_\partial=G=G_\partial=0$. Note that the only Lipschitz curves $y\colon\Omega\to Y$ such that $F(x,y(x),Dy(x))=0$ are $y_0(x)=0$ and $y_1(x)=1$, $x\in\Omega$, and these satisfy \[\int_{\Omega}L(x,y_0(x),Dy_0(x))dx=\int_0^10\,dx=0\quad\textrm{and}\quad\int_{\Omega}L(x,y_1(x),Dy_1(x))dx=\int_0^11\,dx=1.\] Let $H(x,y,z)=1-10y$. Consider the problem of computing $M_\mathrm{c}$ and $M_\mathrm{r}$ as in \ref{opt:classical} and \ref{opt:relaxed}, above, with the additional integral constraints \[\int_\Omega H(x,y(x),Dy(x))\,dx\leq 0\quad\textrm{and}\quad \int_{\Omega\times Y\times Z}H(x,y,z)\,d\mu(x,y,z)\leq 0,\] to be satisfied by the respective competitors $y$ and $(\mu,\mu_\partial)$. We will show that in this case $M_\mathrm{c}>M_\mathrm{r}$. For the classical case, we have \[\int_{\Omega}H(x,y_0,Dy_0)\,dx=\int_0^11-0\,dx=1\stackrel{!}>0\quad\textrm{and}\quad\int_{\Omega}H(x,y_1,Dy_1)\,dx=\int_0^11-10\,dx=-9\leq 0,\] so in the calculation of $M_\mathrm{c}$ the only competitor is $y_1$, because $y_0$ does not satisfy the integral constraint. We conclude that $M_\mathrm{c}=1$. For the relaxed case, consider the measure $\mu=\frac9{10}\mu_0+\frac1{10}\mu_1$, where $\mu_i$ is the measure induced by $y_i$, $i=0,1$. Then \[\int_{\Omega\times Y\times Z}H\,d\mu=\tfrac9{10}\int_{\Omega\times Y\times Z}H\,d\mu_0+\tfrac1{10}\int_{\Omega\times Y\times Z}H\,d\mu_1=\tfrac{9}{10}-9\tfrac1{10}=0,\] so $\mu$ satisfies the constraint. We also have \[\int_{\Omega\times Y\times Z}L\,d\mu=\tfrac9{10}\int_{\Omega\times Y\times Z}L\,d\mu_0+\tfrac1{10}\int_{\Omega\times Y\times Z}L\,d\mu_1=\tfrac1{10}.\] Thus $M_\mathrm{r}\leq \frac1{10}<1=M_\mathrm{c}$. \subsection{Equality integral constraints} Let $\Omega=(0,1)\subset\R$, $Y=\R$, $L(x,y,z)=y$, $F(x,y,z)=y(y-1)(y-2)$, $F_\partial=G=G_\partial=0$. Note that the only Lipschitz curves $y\colon\Omega\to Y$ such that $F(x,y(x),Dy(x))=0$ are $y_0(x)=0$, $y_1(x)=1$, and $y_2(x)=2$, $x\in\Omega$, and these satisfy \[\int_{\Omega}L(x,y_i(x),Dy_i(x))dx=\int_0^1i\,dx=i,\quad i=0,1,2.\] Let $H(x,y,z)=\frac74y-\frac34y^2$. Consider the problem of computing $M_\mathrm{c}$ and $M_\mathrm{r}$ as in \ref{opt:classical} and \ref{opt:relaxed}, above, with the additional integral constraints \[\int_\Omega H(x,y(x),Dy(x))\,dx= \tfrac12\quad\textrm{and}\quad \int_{\Omega\times Y\times Z}H(x,y,z)\,d\mu(x,y,z)=\tfrac12,\] to be satisfied by the respective competitors $y$ and $(\mu,\mu_\partial)$. We will show that in this case $M_\mathrm{c}>M_\mathrm{r}$ too. For the classical case, we have \[\int_{\Omega}H(x,y_0,Dy_0)\,dx=\int_0^10\,dx=0\stackrel{!}\neq\tfrac12 \quad\textrm{and}\quad\int_{\Omega}H(x,y_1,Dy_1)\,dx=\int_0^1\tfrac74-\tfrac34\,dx=1\stackrel!\neq \tfrac12,\] and \[\int_{\Omega}H(x,y_2,Dy_2)\,dx=\int_0^12\tfrac74-4\tfrac34dx=\tfrac12,\] so in the calculation of $M_\mathrm{c}$ the set of competitors contains only $y_2$. We conclude that \[M_\mathrm{c}=\int_0^1L(x,y_2(x),Dy_2(x))\,dx=\int_0^12\,dx=2.\] For the relaxed case, consider the measure $\mu=\frac12\mu_0+\frac12\mu_1$, where $\mu_i$ is the measure induced by $y_i$, $i=0,1$. Then \[\int_{\Omega\times Y\times Z}H\,d\mu=\tfrac12\int_{\Omega\times Y\times Z}H\,d\mu_0+\tfrac12\int_{\Omega\times Y\times Z}H\,d\mu_1=0\tfrac{1}{2}+1\tfrac12=\tfrac12,\] so $\mu$ satisfies the constraint. We also have \[\int_{\Omega\times Y\times Z}L\,d\mu=\tfrac12\int_{\Omega\times Y\times Z}L\,d\mu_0+\tfrac12\int_{\Omega\times Y\times Z}L\,d\mu_1=\tfrac12.\] Thus $M_\mathrm{r}\leq \frac12<2=M_\mathrm{c}$. \section{Acknowledgements} This work has been supported by the Czech Science Foundation (GACR) under contract No. 20-11626Y, the European Union’s Horizon 2020 research and innovation programme under the Marie Skłodowska-Curie Actions, grant agreement 813211 (POEMA), by the AI Interdisciplinary Institute ANITI funding, through the French “Investing for the Future PIA3” program under the Grant agreement n$^\circ$ ANR-19-PI3A-0004 as well as by the National Research Foundation, Prime Minister’s Office, Singapore under its Campus for Research Excellence and Technological Enterprise (CREATE) programme. The authors would like to thank to Martin Kru\v z\'ik, Jared Miller, Corbinian Schlosser and Ian Tobasco for constructive feedback on this work. \paragraph{Data availability statement.} This manuscript has no associated data. \paragraph{Competing interests.} The authors have no competing interests. \begin{thebibliography}{10} \bibitem{alberti2017some} Giovanni Alberti and Annalisa Massaccesi. \newblock On some geometric properties of currents and frobenius theorem. \newblock {\em Rendiconti Lincei-Matematica e Applicazioni}, 28(4):861--869, 2017. \bibitem{ambrosio2008transport} Luigi Ambrosio. \newblock Transport equation and cauchy problem for non-smooth vector fields. \newblock In {\em Calculus of variations and nonlinear partial differential equations}, pages 1--41. Springer, 2008. \bibitem{patrick} Patrick Bernard. \newblock Young measures, superposition and transport. \newblock {\em Indiana Univ. Math. J.}, 57(1):247--275, 2008. \bibitem{chernyavsky2021convex} Alexander Chernyavsky, Jason~J Bramburger, Giovanni Fantuzzi, and David Goluskin. \newblock Convex relaxations of integral variational problems: pointwise dual relaxation and sum-of-squares optimization. \newblock {\em arXiv preprint arXiv:2110.03079}, 2021. \bibitem{de2014regularity} Camillo De~Lellis and Emanuele Spadaro. \newblock Regularity of area minimizing currents i: gradient l p estimates. \newblock {\em Geometric and Functional Analysis}, 24(6):1831--1884, 2014. \bibitem{evansgariepy} Lawrence~C. Evans and Ronald~F. Gariepy. \newblock {\em Measure theory and fine properties of functions}. \newblock CRC Press, 2015. \bibitem{fathi2008weak} Albert Fathi. \newblock Weak kam theorem in lagrangian dynamics, preliminary version number 10. \newblock Available online, 2008. \bibitem{gasipapa} L~Gasinski and N~Papageorgiou. \newblock Nonlinear analysis, volume 9 of ser. \newblock {\em Math. Anal. Appl. Chapman \& Hall/CRC, Boca Raton, FL}, 2006. \bibitem{hardt1996solving} R~Hardt and J~Pitts. \newblock Solving the plateau’s problem for hypersurfaces without the compactness theorem for integral currents. \newblock {\em Geometric measure theory and the calculus of variations}, 44:255--295, 1996. \bibitem{kordaROA} Didier Henrion and Milan Korda. \newblock Convex computation of the region of attraction of polynomial control systems. \newblock {\em IEEE Transactions on Automatic Control}, 59(2):297--312, 2014. \bibitem{henrion2020moment} Didier Henrion, Milan Korda, and Jean~Bernard Lasserre. \newblock {\em Moment-sos Hierarchy, The: Lectures In Probability, Statistics, Computational Geometry, Control And Nonlinear Pdes}, volume~4. \newblock World Scientific, 2020. \bibitem{korda2018moments} Milan Korda, Didier Henrion, and Jean-Bernard Lasserre. \newblock Moments and convex optimization for analysis and control of nonlinear pdes. \newblock In E.~Zuazua E.~Trelat, editor, {\em Handbook of Numerical Analysis}, volume~23, chapter~10, pages 339--366. Elsevier, 2022. \bibitem{lasserre2008nonlinear} Jean~B Lasserre, Didier Henrion, Christophe Prieur, and Emmanuel Tr{\'e}lat. \newblock Nonlinear optimal control via occupation measures and {LMI}-relaxations. \newblock {\em SIAM Journal on Control and Optimization}, 47(4):1643--1666, 2008. \bibitem{lasserre2009moments} Jean~Bernard Lasserre. \newblock {\em Moments, positive polynomials and their applications}, volume~1. \newblock World Scientific, 2009. \bibitem{lewis1980relaxation} RM~Lewis and RB~Vinter. \newblock Relaxation of optimal control problems to equivalent convex programs. \newblock {\em Journal of Mathematical Analysis and Applications}, 74(2):475--493, 1980. \bibitem{maggi2012sets} Francesco Maggi. \newblock {\em Sets of finite perimeter and geometric variational problems: an introduction to Geometric Measure Theory}. \newblock Number 135 in Cambridge studies in advanced mathematics. Cambridge University Press, 2012. \bibitem{marx2018moment} Swann Marx, Tillmann Weisser, Didier Henrion, and Jean Lasserre. \newblock A moment approach for entropy solutions to nonlinear hyperbolic pdes. \newblock {\em Mathematical Control and Related Fields}, 10(1):113--140, 2020. \bibitem{morgan} Frank Morgan. \newblock {\em Geometric measure theory: a beginner's guide}. \newblock Academic press, 2016. \bibitem{rubio1975generalized} JE~Rubio. \newblock Generalized curves and extremal points. \newblock {\em SIAM Journal on Control}, 13(1):28--47, 1975. \bibitem{rubio1976extremal} JE~Rubio. \newblock Extremal points and optimal control theory. \newblock {\em Annali di Matematica Pura ed Applicata}, 109(1):165--176, 1976. \bibitem{rudin} Walter Rudin. \newblock {\em Real and Complex Analysis}. \newblock McGraw-Hill, 1968. \bibitem{schioppa2016unrectifiable} Andrea Schioppa. \newblock Unrectifiable normal currents in euclidean spaces. \newblock arXiv:1608.01635. \bibitem{tasso} Emanuele Tasso. \newblock Decomposizione di correnti normali. \newblock Master's thesis, University of Pisa, 5 2015. \bibitem{vinter1978equivalence} Richard~B Vinter and Richard~M Lewis. \newblock The equivalence of strong and weak formulations for certain problems in optimal control. \newblock {\em SIAM Journal on Control and Optimization}, 16(4):546--570, 1978. \bibitem{young_lectures} Laurence~Chisholm Young. \newblock {\em Lectures on the calculus of variations and optimal control theory}. \newblock Saunders, Philadelphia, 1969. \bibitem{zworski1988decomposition} Maciej Zworski. \newblock Decomposition of normal currents. \newblock {\em Proceedings of the American Mathematical Society}, 102(4):831--839, 1988. \end{thebibliography} \end{document}
2205.14043v1
http://arxiv.org/abs/2205.14043v1
An Elementary Proof of the Minimal Euclidean Function on the Gaussian Integers
\documentclass{article} \pagestyle{plain} \usepackage[fontsize=12pt]{scrextend} \usepackage{babel} \usepackage{amsmath} \usepackage{mathtools} \usepackage{euscript, amsmath,amssymb,amsfonts,mathrsfs,amsthm,mathtools,graphicx, tikz, xcolor,verbatim, bm, enumerate, enumitem,multicol,appendix,etoolbox} \usepackage{wrapfig} \usepackage[all]{xy} \usepackage{upquote} \usepackage{listings} \usetikzlibrary{arrows,patterns} \usepackage{authblk} \usepackage[latin1]{inputenc} \usepackage{verbatim} \usepackage{bm} \usepackage[justification=centering]{subcaption} \lstdefinelanguage{Sage}[]{Python} {morekeywords={True,False,sage,singular}, sensitive=true} \lstset{frame=none, showtabs=False, showstringspaces=False, commentstyle={\ttfamily\color{dredcolor}}, keywordstyle={\ttfamily\color{dbluecolor}\bfseries}, stringstyle = {\ttfamily\color{dgraycolor}\bfseries}, language = Sage, basicstyle={\small \ttfamily}, aboveskip=.3em, belowskip=.1em } \definecolor{dblackcolor}{rgb}{0.0,0.0,0.0} \definecolor{dbluecolor}{rgb}{.01,.02,0.7} \definecolor{dredcolor}{rgb}{0.8,0,0} \definecolor{dgraycolor}{rgb}{0.30, 0.3,0.30} \usepackage[outer=1in,marginparwidth=.75in]{geometry} \usepackage{marginnote} \usetikzlibrary{calc} \usetikzlibrary{positioning} \usetikzlibrary{shapes.geometric} \usetikzlibrary{shapes.geometric} \usepackage{color} \usepackage[latin1]{inputenc} \tikzstyle{square} = [shape=regular polygon, regular polygon sides=4, minimum size=1cm, draw, inner sep=0, anchor=south, fill=gray!30] \tikzstyle{squared} = [shape=regular polygon, regular polygon sides=4, minimum size=1cm, draw, inner sep=0, anchor=south, fill=gray!60] \newtheorem{theorem}{Theorem}[section] \newtheorem{definition}[theorem]{Definition} \newtheorem{conjecture}[theorem]{Conjecture} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{coro}[theorem]{Corollary} \newtheorem{example}[theorem]{Example} \newtheorem{prop}[theorem]{Proposition} \newcommand{\R}{{\mathbb{R}}} \newcommand{\C}{{\mathbb{C}}} \newcommand{\Z}{{\mathbb{Z}}} \newcommand{\Q}{{\mathbb{Q}}} \newcommand{\N}{{\mathbb{N}}} \newcommand{\ZZ}{{\mathbb{Z}}} \newcommand{\Spec}{{\mathrm{Spec}}} \newcommand{\Gal}{{\mathrm{Gal}}} \newcommand{\Cl}{{\mathrm{Cl}}} \newcommand{\ord}{{\mathrm{ord}}} \newcommand{\p}{{\mathfrak{p}}} \newcommand{\B}{{\mathfrak{P}}} \newcommand{\I}{{\mathbb{I}}} \newcommand{\uc}{{\emph{c}}} \newcommand{\ub}{{\emph{b}}} \newcommand{\Nm}{{\mathrm{Nm}}} \newcommand{\Frac}{{\mathrm{Frac}}} \newcommand{\A}{{\mathfrak{A}}} \newcommand{\M}{{\mathfrak{M}}} \renewcommand{\d}{{\sqrt{d}}} \renewcommand{\O}{{\EuScript{O}}} \providecommand{\keywords}[1] { \small \textbf{\textit{Keywords---}} #1 } \begin{document} \title{An Elementary Proof of the Minimal Euclidean Function on the Gaussian Integers} \author{Hester Graves} \affil{Center for Computing Sciences/IDA} \date{\today} \maketitle \abstract{Every Euclidean domain $R$ has a minimal Euclidean function, $\phi_R$. A companion paper \cite{Graves} introduced a formula to compute $\phi_{\Z[i]}$. It is the first formula for a minimal Euclidean function for the ring of integers of a non-trivial number field. It did so by studying the geometry of the set $B_n = \left \{ \sum_{j=0}^n v_j (1+i)^j : v_j \in \{0, \pm 1, \pm i \} \right \}$ and then applied Lenstra's result that $\phi_{\Z[i]}^{-1}([0,n]) = B_n$ to provide a short proof of $\phi_{\Z[i]}$. Lenstra's proof requires s substantial algebra background. This paper uses the new geometry of the sets $B_n$ to prove the formula for $\phi_{\Z[i]}$ without using Lenstra's result. The new geometric method lets us prove Lenstra's theorem using only elementary methods. We then apply the new formula to answer Pierre Samuel's open question: what is the size of $\phi_{\Z[i]}^{-1}(n)$?. Appendices provide a table of answers and the associated SAGE code. \\ \keywords{number theory, Euclidean algorithm, Euclidean function, Euclidean domain, Gaussian integers, quadratic number fields} \section{Introduction}\label{introduction} This paper presents the first formula that computes the minimal Euclidean function for a non-trivial number field. Theorem \ref{formula_statement} gives a formula for $\phi_{\Z[i]}$, the minimal Euclidean function for $\Z[i]$. The ring $\Z[i]$, also called the Gaussian integers or the Gaussians, is the ring of integers of $\Q(i)$. Calculating the minimal Euclidean function for any number field's ring of integers (other than $\Z$, the ring of integers of $\Q$) has been an open problem since Motzkin introduced minimal Euclidean functions in 1941. Pierre Samuel explicitly mentioned being unable to generally enumerate the pre-images of $\phi_{\Z[i]}^{-1}$ in 1971 \cite{Samuel}. Section~\ref{history} provides the question's history. To the author's surprise, $\phi_{\Z[i]}$ is easy to compute, and can be done by hand for small examples. Sections~\ref{expansions} and \ref{Main Result} study the geometry of the sets $\phi_{\Z[i]}^{-1}([0,n])$. Samuel calculated $|\phi_{\Z[i]}^{-1}(n)|$ for $n \in [0,8]$. Section~\ref{Application} shows how to quickly compute $\phi_{\Z[i]}^{-1} (9)$, and gives a closed form expression for $|\phi_{\Z[i]}^{-1}|$ for $n\geq 2$. Appendix~\ref{Table} is a table of these values. The section also compares our new formula with the previous recursive methods to compute $\phi_{\Z[i]}^{-1}([0,n])$; Appendix~\ref{Code} provides code for those older techniques. A companion paper \cite{Graves} gives a short proof of Theorem \ref{formula_statement}, using a result of Lenstra. Lenstra's proof requires comfort with a range of ideas in algebra. We use our new geometric description of the sets $B_n$ to provide a shorter, alternative proof of Lenstra's theorem. This paper, therefore, provides a self-contained, elementary proof, at the expense of the brevity of \cite{Graves}. The only background knowledge required is familiarity with complex conjugation and quotients in rings. The proof focuses on the geometry of the sets $\phi_{\Z[i]}^{-1}([0,n])$, so readers will want to study the figures carefully, and pay particular attention to Figure \ref{Fig:triangle}. \subsection{History}\label{history} Answering a question of Zariski, Motzkin showed in 1949 that every Euclidean domain $R$ has a unique minimal Euclidean function $\phi_R$. His paper only gave one example in a number field: he showed that $\phi_{\Z}(x)$ is the number of digits in the binary expansion of $|x|$, or $\lfloor \log_2(|x|) \rfloor$ \cite{Motzkin}. Following his lead, mathematicians searched fruitlessly for minimal Euclidean functions for number fields' rings of integers. Pierre Samuel calculated $\phi_{\Z[i]}^{-1}(n)$ and $\phi_{\Z[\sqrt{2}]}^{-1}(n)$ for $n\leq 8$ \footnote{Conscientious readers who check the original source will note that Samuel claimed that he went up to $n=9$. He used a slightly different definition, so that $\phi_{\Z[i]}(0) \neq \phi_{\Z[i]}(1)$. This footnoted sentence is his result, translated to our notation using Definition~\ref{construction}.}, and said in his survey `About Euclidean Rings' that the sets were `very irregular (\cite{Samuel}, p. 290).' He explicitly expressed interest in computing the sets, and included their various sizes. In his monograph ``Lectures in Number Fields\cite{Lenstra}," Lenstra showed on page 49 that \begin{equation}\label{1+i expansion} \phi_{\Z[i]}^{-1}([0,n]) = \left \{ \sum_{j=0}^n v_j (1+i)^j : v_j \in \{0, \pm 1, \pm i \} \right \}. \end{equation} Note that Lenstra, unlike Motzkin in his study of $\Z$, provided an algebraic description of the preimages of $\phi_{\Z[i]}$, rather than a function. That may seem like a distinction without a difference, but in the Gaussians, it is not easy to determine the least $n$ for which $a+bi$ can be written as a $(1+i)$-ary expansion of length $\leq n$. Section \ref{expansions} expands on some of these challenges. Using Lenstra's result to compute $\phi_{\Z[i]}^{-1}(9)$ (where Samuel stopped his computation) would require computing $v (1+i)^9 + w$ for all possible $v_j \in \{ \pm 1, \pm i\}$ and $w \in \phi_{\Z[i]}^{-1}([0,8])$. One would then remove any elements that appear in $\phi_{\Z[i]}^{-1}([0,8])$. An explicit formula allows us to directly compute the elements, without the repetition required by the recursive method outlined above. We see in Section~\ref{Application} that Theorem~\ref{pre-image_cardinality} calculates the cardinality of $\phi_{\Z[i]}^{-1}(n)$ for $n \geq 1$ without enumerating all of the sets' elements. In \cite{Graves}, the author explicitly computed $\phi_{\Z[i]}$, using the sequence $w_n$. We define $B_n = \left \{ \sum_{j=0}^n v_j (1+i)^j : v_j \in \{0, \pm 1, \pm i \} \right \}$, the Gaussians' $(1+i)$-ary analogue of the set of integers with binary expansions of length $\leq n$. That paper gives a formula to find the least $n$ such that a Gaussian integer is an element of $B_n$. It then uses Lenstra's theorem (Equation \ref{1+i expansion}) to show that $\phi_{\Z[i]}$ is given by that formula. \begin{definition} For $k \geq 0$, $w_{2k} = 3 \cdot 2^k$ and $w_{2k +1} = 4 \cdot 2^k$. \end{definition} We denote $b$ divides $a$ by $a \mid b$. When $b^ c \mid a$ but $b^{c+1} \nmid a$, we write $b^c \parallel a$. \begin{theorem}\label{formula_statement} (Theorem 1.2 in \cite{Graves}) Suppose that $a+bi \in \Z[i] \setminus 0$, that $2^j \parallel a+bi$, and that $n$ is the least integer such that $\max \left ( \left | \frac{a}{2^j} \right |, \left | \frac{b}{2^j} \right | \right ) + 2 \leq w_n$. If $\left | \frac{a}{2^j} \right | + \left | \frac{b}{2^j} \right | + 3 \leq w_{n+1} $, then $\phi_{Z[i]}(a+bi) = n + 2j$. Otherwise, $\phi_{Z[i]}(a+bi) = n + 2j +1$. \end{theorem} The formula's proof in \cite{Graves} provided a geometric description of the sets $B_n$.Section~\ref{expansions} defines the geometry used in \cite{Graves}, and uses it to study our sets $B_n$. Sections~\ref{expansions} and \ref{Main Result} then show that $\phi_{\Z[i]}^{-1}([0,n]) = B_n$ and thus \[\phi_{\Z[i]}^{-1}([0,n]) \setminus 0 = \displaystyle \coprod_{j=0}^{\lfloor n/2 \rfloor } ( a + bi: 2^j \parallel a + bi, \max(|a|, |b|) \leq w_n - 2^{j+1}, |a| + |b| \leq w_{n+1} - 3 \cdot 2^j \}, \] thereby bypassing Lenstra's proof. We do this because Lenstra's proof requires an extensive knowledge of algebra, while this paper's arguments are elementary. As a consequence of Theorem \ref{octo_union} in \cite{Graves} and Section~\ref{expansions}, we answer Samuel's question by characterizing the sets $\phi_{\Z[i]}^{-1}(n)$ and then providing a closed-form formula computing $|\phi_{\Z[i]}^{-1}(n)|$. \begin{theorem}\label{pre-images} For $k \geq 1$,\\ $\begin{array}{ccc} \phi_{\Z[i]}^{-1}(2k +1) & = &\displaystyle \coprod _{j=0}^{k} \left ( a+bi: \begin{array}{c} 2^j \parallel (a+bi); |a|, |b|\leq w_n - 2^{j+1}; \\ |a| + |b| \leq w_{n+1} - 3 \cdot 2^j ,\\ \text{ and either } \max(|a|, |b|) > w_{n-1} - 2^{j+1} \\ \text{ or } |a| + |b| > w_{n} - 3 \cdot 2^j \end{array} \right ) \\ \text{and} && \\ \phi_{\Z[i]}^{-1}(2k) & = &\begin{array}{c} \{\pm 2^k, \pm 2^k i \} \cup \\ \displaystyle \coprod _{j=0}^{k-1} \left ( a+bi: \begin{array}{c}2^j \parallel (a+bi); |a|, |b|\leq w_n - 2^{j+1};\\ |a| + |b| \leq w_{n+1} - 3 \cdot 2^j ,\\ \text{ and either } \max(|a|, |b|) > w_{n-1} - 2^{j+1} \\ \text{ or } |a| + |b| > w_{n} - 3 \cdot 2^j \end{array} \right ). \end{array} \end{array}$ \end{theorem} We use this description to find the following expressions. \begin{theorem}\label{size_of_sets} For $k\geq 1$, \begin{align*} |\phi_{\Z[i]}^{-1} (2k)| &= 14 \cdot 4^k - 14 \cdot 2^k + 4\\ \intertext{ and} |\phi_{\Z[i]}^{-1}(2k +1)| &= 28 \cdot 4^k - 20 \cdot 2^k + 4. \end{align*} \end{theorem} Appendix \ref{Table} is a table of the values of $|\phi_{\Z[i]}^{-1} (n)|$. \section{Preliminaries} \subsection{Motzkin's Lemma and minimal Euclidean functions} A domain $R$ is \textbf{Euclidean} if there exists a \textbf{Euclidean function} $f$, $f: R \setminus 0 \rightarrow \N,$ such that if $a \in R$ and $b \in R \setminus 0$, then there exist some $q,r \in R$ such that $a =qb +r$, where either $r=0$ or $f(r) < f(b)$.\footnote{Motzkin and Lenstra both define $f: R \setminus 0 \rightarrow W$, where $W$ is a well-ordered set with $\N$ as an initial segment.} We can restate this standard definition of Euclidean functions in terms of cosets, by saying that $f:R \setminus 0 \rightarrow \N$ is a Euclidean function if, for all $b \in R \setminus 0$, every non-zero coset $[a] \in R/b$ has a representative $r$ (i.e., $a \equiv r \pmod {b}$) such that $f(r) < f(b)$. This reformulation paves the way for Motzkin's Lemma. \begin{definition}\label{construction} \textbf{Motzkin Sets} \cite{Motzkin} Given a domain $R$, define \begin{align*} A_{R,0} &: = 0 \cup R^{\times} \\ A_{R,j} &: = A_{R, j-1} \cup \{ \beta :A_{R,j-1} \twoheadrightarrow R/\beta \}, \text{ and}\\ A_R & := \bigcup_{j=0}^{\infty} A_{R,j}, \end{align*} where $R^{\times}$ is the multiplicative group of $R$ and $G \twoheadrightarrow R/ \beta$ if every $[a] \in R/\beta$ has a representative $r \in G$. \end{definition} Studying $A_{\Z}$ clarifies this cumbersome definition. The elements $[0]$, $[1]$, and $[2]$ of $\Z / 3\Z$ can be represented as $[0]$, $[1]$, and $[-1]$, as $2 \equiv -1 \pmod{3}$. \begin{example}\label{example_in_Z} When $R = \Z$, our Motzkin sets are \begin{align*} A_{\Z,0} & = \{0, \pm 1\} \\ A_{\Z,1} & = \{0, \pm 1, \pm 2, \pm 3\} \\ A_{\Z,2} & = \{0, \pm 1, \pm 2, \pm 3, \pm 4, \pm 5, \pm 6, \pm 7\} \\ A_{\Z,n} & = \{0, \pm 1, \ldots , \pm (2^{n+1} -1)\} \\ A_{\Z} & = \Z. \end{align*} \end{example} Motzkin' sets allow us to present his foundational lemma. \begin{lemma}(Motzkin's Lemma \cite{Motzkin}) \label{Motzkins_Lemma} A domain $R$ is Euclidean if and only if $R = A_R$. Furthermore, if $R$ is Euclidean, if $F$ is the set of all Euclidean functions on $R$, and if \begin{align*} \phi_R &: R \setminus 0 \rightarrow \N,\\ \phi_R(a) &:= j \text{ if }a \in A_{R,j} \setminus A_{R, j-1}, \end{align*} then $\phi_R(a) = \displaystyle \min_{f\in F} f(a)$ and $\phi_R$ is itself a Euclidean function. \end{lemma} We call $\phi_R$ the \textbf{minimal Euclidean function} on $R$. Example \ref{example_in_Z} shows that $\phi_{\Z} (x) = \lfloor \log_2 |x| \rfloor$ is the number of digits in the binary expansion of $x$, as mentioned in the introduction. Before Motzkin's Lemma, proving a domain was Euclidean was an exercise in trial and error, as people searched for potential Euclidean functions. Motzkin showed that if a Euclidean function exists, then the Motzkin sets explicitly define it. Motzkin's Lemma tells us that $A_{R, n} = \phi_{R}^{-1} ([0,n])$. The simplest applications of Motzkin's Lemma show that certain rings are not Euclidean. If $R$ is a principal ideal domain with finitely many multiplicative units, it is easy to compute $A_{R,n}$ for small $n$. If the sets stabilize, then $A_R \subsetneq R$ and $R$ is not a Euclidean domain. Computing Motzkin sets quickly shows that while $\Q(\frac{1 + \sqrt{-19}}{2})$ is principal, it is not Euclidean. \subsection{Motzkin Sets for the Gaussian Integers}\label{A_sets} The elements of $\Z[i] = \{ a + bi: a, b \in \Z \}$ are called Gaussian integers because Gauss showed that $\Nm(a+bi) = a^2 + b^2$ is a Euclidean function for $\Z[i]$, making $\Z[i]$ a norm-Euclidean ring. The (algebraic) norm is a multiplicative function, so $\Nm(a+bi) \Nm(c+di) = \Nm((a+bi)(c+di))$, and $\Nm(a+bi) = |\Z[i]/(a+bi)\Z[i]|$, the number of cosets of $a+bi$. The domain $\Z[i]$ is the ring of integers of $\Q(i)$, and its group of multiplicative units is $\Z[i]^{\times} = \{ \pm 1, \pm i \}$. Following Definition \ref{construction}, we present the first three Motzkin sets for $\Z[i]$. \begin{example}\label{example_in_G} \begin{align*} A_{\mathbb{Z}[i], 0} &= \{0, \pm 1, \pm i \},\\ A_{\mathbb{Z}[i], 1} & = \{0, \pm 1, \pm i , \pm 1 \pm i, \pm 2 \pm i, \pm 1 \pm 2i\},\\ A_{\mathbb{Z}[i], 2} & = \{0, \pm 1, \pm i , \pm 1 \pm i, \pm 2 \pm i, \pm 1 \pm 2i\} \\ & \cup \{ \pm 2, \pm 2i, \pm 3, \pm 3i, \pm 3 \pm i, \pm 1 \pm 3i, \pm 4 \pm i, \pm 1 \pm 4i, \pm 2 \pm 3i, \pm 3 \pm 2i\}. \end{align*} \end{example} For $n \geq 1$, \[A_{\mathbb{Z}[i],n} = A_{\mathbb{Z}[i],n-1} \cup \{a+bi \in \mathbb{Z}[i] :A_{\mathbb{Z}[i], n-1} \twoheadrightarrow \mathbb{Z}[i]/(a+bi) \},\] so the sets $A_{\mathbb{Z}[i], n}$ are closed under multiplication by units, as $a+bi$ and its associates $u(a+bi)$, $u \in \Z[i]^{\times}$, generate the same ideal. This gives the sets $A_{\mathbb{Z}[i], n}$ a four-fold symmetry, but the Gaussian integers' Motzkin sets actually have an eight-fold symmetry. \begin{lemma}\label{cc} The sets $A_{\mathbb{Z}[i],n}$ are closed under complex conjugation. \end{lemma} \begin{proof} We use induction; note that $A_{\mathbb{Z}[i],0}$ is closed under complex conjugation. Suppose that $A_{\mathbb{Z}[i],n}$ is closed under complex conjugation, that $a+bi \in A_{\mathbb{Z}[i], n+1}$, and that $[x] \in \mathbb{Z}[i] / (\overline{a+bi})$. Then there exist some $q$ in $\mathbb{Z}[i]$ and some $r \in A_{\mathbb{Z}[i], n}$ such that $\overline{x} = q (a+bi) + r$. Our induction hypothesis forces $\overline{r}$ to be an element of $A_{\mathbb{Z}[i], n}$, and as $x = \overline{q} (\overline{a+bi} ) + \overline{r}$, $A_{\Z[i],n} \twoheadrightarrow \Z/(\overline{a+bi})\Z$ and $\overline{a+bi} \in A_{\mathbb{Z}[i], n+1}$. \end{proof} \begin{coro}\label{you_get_the_whole_set} An element $a+bi \in A_{\mathbb{Z}[i],n}$ if and only if $\{ \pm a \pm bi \}, \{ \pm b \pm ai\} \subset A_{\mathbb{Z}[i],n}$. \end{coro} Lemma \ref{cc} is a special case of the general result that if $K$ is a Galois number field, its Motzkin sets are closed under $\sigma$ for all $\sigma \in \Gal(K/ \Q)$. \subsection{Representatives of Cosets of $a+bi$}\label{cosets} Our definition of $A_{\Z[i],n}$ relies on sets that surject onto quotients $\Z[i]/(a + bi)$, so it behooves us to study how subsets of $\Z[i]$ map onto these quotients. First, we examine squares in the plane. \begin{lemma}\label{a_square} When $a > b \geq 0$, distinct elements in an $a \times a$ square in $\Z[i]$ are not congruent modulo $a +bi$. In other words, if $a > b \geq 0$, if $c,d \in \mathbb{Z}$, if \begin{equation*} S = \{ x+yi: c \leq x < c +a, d \leq y < d + a\}, \end{equation*} and if $\alpha + \beta i, \gamma + \delta i$ are distinct elements of $S$, then $\alpha + \beta i \not \equiv \gamma +\delta i \pmod{a + bi}$. \end{lemma} \begin{proof} Suppose, leading to a contradiction, that $\alpha + \beta i \equiv \gamma +\delta i \pmod{a+bi}$. Then there exists some $y \in \mathbb{Z}[i]$ such that $(\alpha - \gamma) + (\beta -\delta) i = y (a+bi)$. Note that \begin{equation*} \Nm(y) \Nm(a+bi) = (\alpha -\gamma)^2 + (\beta -\delta)^2 \leq 2(a-1)^2 < 2(a^2 + b^2)=2 \Nm(a+bi). \end{equation*} As $\alpha + \beta i \neq \gamma + \delta i$, the norm of $y$ equals one, so $(\alpha - \gamma) + (\beta -\delta)i \in \{ \pm (a+bi), \pm (b-ai)\}$, which cannot be, as $|\alpha -\gamma|, |\beta -\delta| \leq a-1$. \end{proof} \begin{lemma} \label{two_squares} If $a > b \geq 0$, if $S = \{ x+yi: 0 \leq x,y < a\}$, if $T=\{ x+iy: 0 \leq x <b, -b \leq y <0\}$, and if $\alpha + \beta i, \gamma + \delta i$ are distinct elements of any translate of $S \cup T$, then $\alpha + \beta i \not \equiv \gamma + \delta i \pmod{a +bi}$ and $|S \cup T| = \Nm(a +bi)$. The set $S \cup T$ contains exactly one representative of every coset of $a+bi$. \end{lemma} \begin{figure}[ht] \centering \begin{tikzpicture} [scale=.5, transform shape] \foreach \x in {0,...,6} \foreach \y in {0,...,6}{ \node[square] at (.8*\x,.8*\y) {}; } \foreach \x in {0,...,3} \foreach \y in {1,...,4}{ \node[square] at (.8*\x,-.8*\y) {}; } \foreach \x in {0,...,6} \node[circle,minimum size=1cm] at (.8*\x,.4) {$\bm \x $}; \foreach \y in {-4,...,-2} \node[circle,minimum size=1cm] at (0,.4 + .8*\y) {$\bm \y i $}; \node[circle,minimum size=1cm] at (0,-.4) {$\bm -i $}; \node[circle,minimum size=1cm] at (0,1.2) {$\bm i $}; \foreach \y in {2,...,6} \node[circle,minimum size=1cm] at (0,.4 + .8*\y) {$\bm \y i $}; \end{tikzpicture} \caption{$S \cup T$ for $a +bi = 7 +4i$} \label{Fig:S_cup_T} \end{figure} \begin{proof} See Figure \ref{Fig:S_cup_T}. Lemma \ref{a_square} shows that two distinct elements of $S$ (respectively, $T$) are not equivalent modulo $a+bi$. It remains to show that if $\alpha + \beta i \in T$ and $\gamma + \delta i \in S$, then $\alpha + \beta i \not \equiv \gamma + \delta i \pmod{a+bi}$. Suppose, leading to a contradiction, there exists some $y \in \mathbb{Z}[i]$ such that $(\alpha + \beta i) - (\gamma + \delta i) = y(a+bi)$. Then \begin{align*} \Nm(y)\Nm(a+bi) &= (\alpha - \gamma)^2 + (\beta - \delta)^2 \\ & \leq (a-1)^2 + (a+b-1)^2\\ & < 4 (a^2 + b^2) = 4 \Nm(a+bi), \end{align*} so $1 \leq \Nm(y) < 4$. This means that $\Nm(y) = 1$ or $2$, as there are no Gaussian integers with norm $3$. The Gaussian integers with norm $1$ or $2$ are $\{ \pm 1, \pm i, \pm 1 \pm i \}$ and thus the set $C$ of potential values of $y(a+bi)$, where the real part of $y(a+bi)$ is $\geq 0$, is \begin{equation*} \{ a+bi, b-ai, a-b + (a+b)i, a + b + (b-a)i \}. \end{equation*} If $x \in C$, if $\alpha + \beta i \in S$, and if $\gamma + \delta i \in T$, then neither $x + \alpha + \beta i$ nor $x + \gamma + \delta i$ is in $S \cup T$ (see Figure \ref{Fig:triangle}), so no two distinct elements of $S \cup T$ are congruent modulo $a +bi$. As $S$ and $T$ are disjoint, as $|S| = a^2$, and as $|T| = b^2$, the size of their union is $|S \cup T |= a^2 + b^2 = \Nm(a +bi)$. We conclude that any translate of $S \cup T$ contains precisely one representative for each coset of $a +bi$. \end{proof} \begin{coro}\label{down_to_one_square} If $M \subset \Z[i]$, if $M$ is closed under multiplication by units, and if $S \subset U = \displaystyle \bigcup_{q \in \Z[i]} ( M + q(a +bi))$, then $M \twoheadrightarrow \Z[i]/(a+bi)$. \end{coro} \begin{proof} If $M$ is closed under multiplication by units and $S \subset U$, then $T \subset -iS \subset -i U \subset U$, and $S \cup T \subset U$. Given $[x] \in \Z[i]/(a +bi)$, there exists an $r \in (S \cup T)$ such that $[x] = [r]$ by Lemma \ref{two_squares}. Our hypothesis says there exist an $m \in M$ and $q \in \Z[i]$ such that $r = m + q(a +bi)$. We conclude that $[m] = [x]$ and thus $M \twoheadrightarrow \Z[i]/(a +bi)$. \end{proof} So far, we have looked at squares to analyze collections of representatives of cosets of $a +bi$. We now turn to triangles. \begin{definition} \label{basic_triangle} If $a+bi \in \Z[i] \setminus 0$, let \begin{equation*} \mathscr{S}_{a+bi} := \{ x+yi: 0 \leq x,y, x +y < \max (|a|, |b| )\}. \end{equation*} \end{definition} \begin{lemma}\label{triangle} Suppose that $a > b \geq 0$, that $(1 +i) \nmid a +bi$, and that $M \subset \Z[i]$ is closed under multiplication by units. If $\mathscr{S}_{a+bi} \subset U = \displaystyle \bigcup_{q \in \Z[i]} (M + q(a+bi))$, then $M \twoheadrightarrow \mathbb{Z}[i]/(a+bi)$. \end{lemma} \begin{figure}[ht]\centering \subcaptionbox{ $(- \mathscr{S}_{a+bi} \cup i \mathscr{S}_{a+bi} ) + a +bi$ is in dark gray}{ \begin{tikzpicture} [scale=.5, transform shape] \foreach \y in {0,...,6} \node[square] at (0,.8*\y) {}; \foreach \y in {0,...,3} \node[square] at (.8,.8*\y) {}; \foreach \y in {5,...,5} \node[square] at (.8,.8*\y) {}; \foreach \y in {4,...,4} \node[squared] at (.8,.8*\y) {}; \foreach \y in {0,...,2} \node[square] at (1.6,.8*\y) {}; \foreach \y in {3,...,5} \node[squared] at (1.6,.8*\y) {}; \foreach \y in {0,...,1} \node[square] at (2.4,.8*\y) {}; \foreach \y in {2,...,6} \node[squared] at (2.4,.8*\y) {}; \node[square] at (3.2,0) {}; \foreach \y in {1,...,7} \node[squared] at (3.2,.8*\y) {}; \foreach \y in {0,...,8} \node[squared] at (4,.8*\y) {}; \foreach \y in {-1,...,9} \node[squared] at (4.8,.8*\y) {}; \foreach \y in {-2,...,10} \node[squared] at (5.6,.8*\y) {}; \foreach \x in {0,...,7} \node[circle,minimum size=1cm] at (.8*\x,.4) {$\bm \x $}; \node[circle,minimum size=1cm] at (0,1.2) {$\bm i $}; \foreach \y in {2,...,6} \node[circle,minimum size=1cm] at (0,.4 + .8*\y) {$\bm \y i $}; \draw[thick] (-.35,0)--(5.15,0); \draw[thick] (-.35,5.55)--(5.15,5.55); \draw[thick] (-.35,0)--(-.35,5.55); \draw[thick] (5.15,0)--(5.15,5.55); \end{tikzpicture}} \subcaptionbox{ $- \mathscr{S}_{a+bi} + (1 +i)(a +bi)$ is in dark gray}{ \begin{tikzpicture} [scale=.5, transform shape] \foreach \y in {11,...,11} \node[squared] at (-2.4,.8*\y) {}; \foreach \y in {10,...,11} \node[squared] at (-1.6,.8*\y) {}; \foreach \y in {9,...,11} \node[squared] at (-.8,.8*\y) {}; \foreach \y in {0,...,6} \node[square] at (0,.8*\y) {}; \foreach \y in {8,...,11} \node[squared] at (0,.8*\y) {}; \foreach \y in {0,...,5} \node[square] at (.8,.8*\y) {}; \foreach \y in {7,...,11} \node[squared] at (.8,.8*\y) {}; \foreach \y in {0,...,4} \node[square] at (1.6,.8*\y) {}; \foreach \y in {6,...,11} \node[squared] at (1.6,.8*\y) {}; \foreach \y in {0,...,3} \node[square] at (2.4,.8*\y) {}; \foreach \y in {5,...,11} \node[squared] at (2.4,.8*\y) {}; \foreach \y in {0,...,2} \node[square] at (3.2,.8*\y) {}; \foreach \y in {0,...,1} \node[square] at (4,.8*\y) {}; \node[square] at (4.8,0) {}; \foreach \x in {0,...,6} \node[circle,minimum size=1cm] at (.8*\x,.4) {$\bm \x $}; \node[circle,minimum size=1cm] at (0,1.2) {$\bm i $}; \foreach \y in {2,...,11} \node[circle,minimum size=1cm] at (0,.4 + .8*\y) {$\bm \y i $}; \draw[thick] (-.35,0)--(5.15,0); \draw[thick] (-.35,5.55)--(5.15,5.55); \draw[thick] (-.35,0)--(-.35,5.55); \draw[thick] (5.15,0)--(5.15,5.55); \draw[thick] (5.6, -1.6) --(5.6, -1.6); \end{tikzpicture}} \subcaptionbox{ $-i \mathscr{S}_{a+bi} + i(a +bi)$ is in dark gray}{ \begin{tikzpicture} [scale=.5, transform shape] \foreach \y in {1,...,7} \node[squared] at (-3.2,.8*\y) {}; \foreach \y in {2,...,7} \node[squared] at (-2.4,.8*\y) {}; \foreach \y in {3,...,7} \node[squared] at (-1.6,.8*\y) {}; \foreach \y in {4,...,7} \node[squared] at (-.8,.8*\y) {}; \foreach \y in {0,...,4} \node[square] at (0,.8*\y) {}; \foreach \y in {5,...,7} \node[squared] at (0,.8*\y) {}; \foreach \y in {0,...,5} \node[square] at (.8,.8*\y) {}; \foreach \y in {6,...,7} \node[squared] at (.8,.8*\y) {}; \foreach \y in {0,...,4} \node[square] at (1.6,.8*\y) {}; \foreach \y in {7,...,7} \node[squared] at (1.6,.8*\y) {}; \foreach \y in {0,...,3} \node[square] at (2.4,.8*\y) {}; \foreach \y in {0,...,2} \node[square] at (3.2,.8*\y) {}; \foreach \y in {0,...,1} \node[square] at (4,.8*\y) {}; \foreach \y in {0,...,0} \node[square] at (4.8,.8*\y) {}; \foreach \x in {0,...,6} \node[circle,minimum size=1cm] at (.8*\x,.4) {$\bm \x $}; \node[circle,minimum size=1cm] at (0,1.2) {$\bm i $}; \foreach \y in {2,...,7} \node[circle,minimum size=1cm] at (0,.4 + .8*\y) {$\bm \y i $}; \draw[thick] (-.35,0)--(5.15,0); \draw[thick] (-.35,5.55)--(5.15,5.55); \draw[thick] (-.35,0)--(-.35,5.55); \draw[thick] (5.15,0)--(5.15,5.55); \draw[thick] (5.6, -1.6) --(5.6, -1.6); \end{tikzpicture}} \caption{When $a +bi = 7 +4i$\\$\mathscr{S}_{a+bi}$ is in light gray in all three figures} \label{Fig:triangle} \end{figure} \begin{proof} We will show that if $\mathscr{S}_{a+bi} \subset U$, then $S = \{ x +yi: 0 \leq x, y <a \}$ is also contained in $U$. Observe that if $u \in \{ \pm 1, \pm i\}$, if $q \in \Z[i]$, and if $\mathscr{S}_{a+bi} \subset U$, then $u (\mathscr{S}_{a+bi} + q(a+bi)) \subset U$. Figure \ref{Fig:triangle}, with its outlined $S$, may help the reader visualize the following arguments. Computation shows that \begin{equation}\label{long} ((- \mathscr{S}_{a+bi} \cup i \mathscr{S}_{a+bi}) + a + bi) \supset \{x + yi: 0 < x \leq a, -x + b < y < x + b \}. \end{equation} The set $\mathscr{S}_{a+bi}$ can be written as $\{x + yi: 0 \leq x <a, 0 \leq y<a-x\}$. As $a >b$, $-x + b < a-x$ for all $x$ and thus equation \ref{long} implies that \begin{align}\label{triangle_subsets} \nonumber U &\supset \mathscr{S}_{a+bi} \cup ((- \mathscr{S}_{a+bi} \cup i \mathscr{S}_{a+bi}) + a + bi) \\ &\supset \{ x + yi: 0 \leq x < a, 0 \leq y < \max (a -x, x + b )\}. \end{align} Because $x + b -1 \geq a-1$ when $x \geq a-b$, $\{x + yi: a-b \leq x < a, 0 \leq y < a \} \subset U$ (in Figure \ref{Fig:triangle}, this is $[3,6] \times [0, 6i] \subset U$). Our proof that $S \subset U$ then reduces to demonstrating that \[\{x + yi: 0 \leq x < a-b, \max (a-x, x+b ) \leq y < a \} \subset U.\] Mark that \[-\mathscr{S}_{a+bi} + (1+i)(a+bi) \supset \{x+yi: 0 \leq x \leq a-b, a - x < y \leq a+b\},\] so $U$ contains $\{x + yi: 0 \leq x < a-b, 0 \leq y < a, y \neq a-x\}$. When $x > \frac{a-b}{2}$, $a-x < x+b $, so $U$ contains $\{x +yi: \frac{a-b}{2} < x < a-b, y = a-x\}$ by equation \ref{triangle_subsets}. We have now reduced the problem to showing that \begin{equation} \label{diagonal_subset} \left \{x+yi: 0 \leq x < \frac{a-b}{2}, y = a-x \right \} \subset U; \end{equation} the condition is $x < \frac{a-b}{2}$ as $1+i \nmid a+bi$, which is equivalent to $a-b$ being odd. The variable $x$ represents an integer, so if $x \leq \frac{a-b}{2}$, then $x < \frac{a-b}{2}$. To finish, note that \[-i\mathscr{S}_{a+bi} + i(a+bi) \supseteq \{x +yi: 0 \leq x < a-b, b + x < y \leq a\}.\] When $0 \leq x < \frac{a-b}{2}$, $a - x > b+x$, so $-i\mathscr{S}_{a+bi} + i(a+bi)$ ( and thus the union $U$) contains $\{x+yi: 0 \leq x <\frac{a-b}{2}, y = a-x\}$. We have now shown that equation \ref{diagonal_subset} does hold, so $U$ contains all of $S$, and therefore $M \twoheadrightarrow \Z[i]/(a + bi)$ by Corollary \ref{down_to_one_square}. \end{proof} \subsection{$(1 + i)$-ary expansions in $\mathbb{Z}[i]$}\label{expansions} \begin{definition}\label{sets B_n} The sets $B_n$ are the Gaussian integers that can be written with $n+1$ `digits,' i.e. $$B_n = \left \{ \sum_{j=0}^n v_j (1+i)^n, v_j \in \{0, \pm 1, \pm i\} \right \}.$$ \end{definition} This new notation allows us to restate Lenstra's result, Equation \ref{1+i expansion}, as $\phi_{\Z[i]}^{-1} ([0,n]) = A_{\Z[i],n} = B_n$. Unfortunately for us, it is not obvious which sets $B_n$ a given element $a+bi$ belongs to. For example, as $4=-(1+i)^4$, it is clear that $4+i = -(1+i)^4 +i$, and thus $4+i \in B_4$. It is not so obvious that $4+i = i(1+i)^2 +(1+i) +1,$ revealing that $4+i$ is also in $B_2$ (and thus also $B_3$). In \cite{Graves}, the author introduced the following geometric sets and theorem, giving a fast way to compute $\phi_{\Z[i]}(a+bi)$. The sets are all octagonal when plotted in $\Z \times \Z i$, as shown in Figure \ref{fig:oct_examples}. \begin{definition}\label{octogons} We define \begin{align*} Oct_n &: = \{ x+yi \in \Z[i]: |x|,|y| \leq w_n -2 ,|x| + |y| \leq w_{n+1} - 3 \},\\ S_n &: = \{ x+yi \in \Z[i] \setminus 0: |x|,|y| \leq w_n -2, |x| + |y| \leq w_{n+1} - 3 ,2 \nmid \gcd (x,y)\},\\ \intertext{and} D_n &: = \{ x+yi \in \Z[i] \setminus 0: |x|,|y| \leq w_n -2, |x| + |y| \leq w_{n+1} - 3 ,2 \nmid (x+y)\}. \end{align*} \end{definition} It follows that $S_n = \{x +yi \in Oct_n: (1 +i)^2 \nmid (x +yi)\}$ and $D_n = \{x +yi \in Oct_n: (1+i) \nmid (x+yi) \}$, so $D_n \subset S_n \subset Oct_n$, as shown in Figure \ref{fig:oct_examples}. Lemma 2.6 from \cite{Graves} shows that for $n \geq 1$, $S_n = D_n \cup (1+i) D_{n-1}$. \begin{figure}[ht]\centering \subcaptionbox{$D_2$}{ \begin{tikzpicture} [scale=.4, transform shape] \foreach \y in {-3,-1, 1,3} \node[square] at (0,.8*\y) {}; \foreach \y in {-2,...,2} \node[square] at (.8,1.6*\y) {}; \foreach \y in {-2,...,2} \node[square] at (-.8,1.6*\y) {}; \foreach \y in {-3,-1, 1,3} \node[square] at (1.6,.8*\y) {}; \foreach \y in {-3,-1, 1,3} \node[square] at (-1.6,.8*\y) {}; \foreach \y in {-1,...,1} \node[square] at (2.4,1.6*\y) {}; \foreach \y in {-1,...,1} \node[square] at (-2.4,1.6*\y) {}; \node[square] at (3.2,.8) {}; \node[square] at (-3.2,.8) {}; \node[square] at (3.2,-.8) {}; \node[square] at (-3.2,-.8) {}; \node [circle,minimum size=1cm] at (0,.4) {$\bm 0 $}; \node [circle,minimum size=1cm] at (.8,.4) {$\bm 1 $}; \node [circle,minimum size=1cm] at (-.8,.4) {$\bm -1 $}; \node [circle,minimum size=1cm] at (0,1.2) {$\bm i $}; \node [circle,minimum size=1cm] at (0,-.4) {$\bm -i $}; \end{tikzpicture}} \subcaptionbox{$S_2$}{ \begin{tikzpicture} [scale=.4, transform shape] \node[square] at (.8,0) {}; \node[square] at (-.8,0) {}; \node[square] at (0,.8) {}; \node[square] at (0,-.8) {}; \node[square] at (.8, .8) {}; \node[square] at (-.8, .8) {}; \node[square] at (-.8, -.8) {}; \node[square] at (.8, -.8) {}; \node[square] at (0, 2.4) {}; \node[square] at (.8, 1.6) {}; \node[square] at (.8, 2.4) {}; \node[square] at (.8, 3.2) {}; \node[square] at (1.6, .8) {}; \node[square] at (1.6, 2.4) {}; \node[square] at (2.4, .8) {}; \node[square] at (2.4, 1.6) {}; \node[square] at (3.2, .8) {}; \node[square] at (2.4, 0) {}; \node[square] at (0, -2.4) {}; \node[square] at (.8, -1.6) {}; \node[square] at (.8, -2.4) {}; \node[square] at (.8, -3.2) {}; \node[square] at (1.6, -.8) {}; \node[square] at (1.6, -2.4) {}; \node[square] at (2.4, -.8) {}; \node[square] at (2.4, -1.6) {}; \node[square] at (3.2, -.8) {}; \node[square] at (0, 2.4) {}; \node[square] at (-.8, 1.6) {}; \node[square] at (-.8, 2.4) {}; \node[square] at (-.8, 3.2) {}; \node[square] at (-1.6, .8) {}; \node[square] at (-1.6, 2.4) {}; \node[square] at (-2.4, .8) {}; \node[square] at (-2.4, 1.6) {}; \node[square] at (-3.2, .8) {}; \node[square] at (-2.4, 0) {}; \node[square] at (-.8, -1.6) {}; \node[square] at (-.8, -2.4) {}; \node[square] at (-.8, -3.2) {}; \node[square] at (-1.6, -.8) {}; \node[square] at (-1.6, -2.4) {}; \node[square] at (-2.4, -.8) {}; \node[square] at (-2.4, -1.6) {}; \node[square] at (-3.2, -.8) {}; \node[square] at (0, -.8) {}; \node [circle,minimum size=1cm] at (0,.4) {$\bm 0 $}; \node [circle,minimum size=1cm] at (.8,.4) {$\bm 1 $}; \node [circle,minimum size=1cm] at (-.8,.4) {$\bm -1 $}; \node [circle,minimum size=1cm] at (0,1.2) {$\bm i $}; \node [circle,minimum size=1cm] at (0,-.4) {$\bm -i $}; \end{tikzpicture}} \subcaptionbox{$B_2 $}{ \begin{tikzpicture} [scale=.4, transform shape] \node[square] at (0,0) {}; \foreach \y in {-3,-1, 1,3} \node[square] at (0,.8*\y) {}; \foreach \y in {-2,2} \node[square] at (0,.8*\y) {}; \foreach \y in {-4,...,4} \node[square] at (.8,.8*\y) {}; \foreach \y in {-4,...,4} \node[square] at (-.8,.8*\y) {}; \foreach \y in {-3,-1,1,3} \node[square] at (1.6,.8*\y) {}; \foreach \y in {-3,-1,1,3} \node[square] at (-1.6,.8*\y) {}; \node[square] at (-1.6,0) {}; \node[square] at (1.6,0) {}; \foreach \y in {-2,...,2} \node[square] at (2.4,.8*\y) {}; \foreach \y in {-2,...,2} \node[square] at (-2.4,.8*\y) {}; \foreach \y in {-1,1} \node[square] at (3.2,.8*\y) {}; \foreach \y in {-1,1} \node[square] at (-3.2,.8*\y) {}; \node [circle,minimum size=1cm] at (0,.4) {$\bm 0 $}; \node [circle,minimum size=1cm] at (.8,.4) {$\bm 1 $}; \node [circle,minimum size=1cm] at (-.8,.4) {$\bm -1 $}; \node [circle,minimum size=1cm] at (0,1.2) {$\bm i $}; \node [circle,minimum size=1cm] at (0,-.4) {$\bm -i $}; \end{tikzpicture}} \subcaptionbox{$Oct_2$}{ \begin{tikzpicture} [scale=.4, transform shape] \foreach \y in {-4,...,4} \node[square] at (0,.8*\y) {}; \foreach \y in {-4,...,4} \node[square] at (.8,.8*\y) {}; \foreach \y in {-4,...,4} \node[square] at (-.8,.8*\y) {}; \foreach \y in {-3,...,3} \node[square] at (1.6,.8*\y) {}; \foreach \y in {-3,...,3} \node[square] at (-1.6,.8*\y) {}; \foreach \y in {-2,...,2} \node[square] at (2.4,.8*\y) {}; \foreach \y in {-2,...,2} \node[square] at (-2.4,.8*\y) {}; \foreach \y in {-1,...,1} \node[square] at (3.2,.8*\y) {}; \foreach \y in {-1,...,1} \node[square] at (-3.2,.8*\y) {}; \node [circle,minimum size=1cm] at (0,.4) {$\bm 0 $}; \node [circle,minimum size=1cm] at (.8,.4) {$\bm 1 $}; \node [circle,minimum size=1cm] at (-.8,.4) {$\bm -1 $}; \node [circle,minimum size=1cm] at (0,1.2) {$\bm i $}; \node [circle,minimum size=1cm] at (0,-.4) {$\bm -i $}; \end{tikzpicture}} \caption{Examples of $D_n$, $S_n$, $B_n $, and $Oct_n$ when $n =2$} \label{fig:oct_examples} \end{figure} Our definitions let us describe the shape of $B_n$. \begin{theorem}\label{octo_union} (\cite{Graves}, Theorems 2.4 and 2.7) The set $B_n \setminus 0$ equals the disjoint union \[ \displaystyle \coprod_{j=0}^{\lfloor n/2 \rfloor } 2^j S_{n- 2j} = \coprod_{j=0}^n (1+i)^j D_{n-j}.\] \end{theorem} \begin{coro}\label{one_up} Suppose that $x +yi \in Oct_n$, and that $2^l \parallel (x,y)$. If $l \neq \lfloor \frac{n}{2} \rfloor + 1$, then $l \leq \lfloor \frac{n}{2} \rfloor$ and $x +yi \in B_{n+1}$. \end{coro} The prove Corollary \ref{one_up}, we need the following two lemmas, which are simple to verify. \begin{lemma}\label{max_power} If $x +yi \in Oct_n$ and $2^l \mid \gcd(x,y)$, then $l \leq \lfloor \frac{n}{2} \rfloor + 1$. If $l = \lfloor \frac{n}{2} \rfloor + 1$ and $n = 2k$, then $x +yi \in 2^{k+1}\{ \pm 1, \pm i \}$. If $n = 2k +1$, then $x + yi \in 2^{k+1}\{ \pm 1, \pm i, \pm 1 \pm i \}$. \end{lemma} \begin{lemma}\label{identities} The following identities hold: \begin{multicols}{2} \begin{itemize} \item $w_{n+2} = 2 w_n$ \item $w_{n-2} \leq w_{n+1} - w_n \leq w_{n-2}$ \item $2(w_{n+1} - w_n) \leq w_n$ \item $3(w_{n+1} - w_n) \leq w_{n+1}$ \item If $2^{l+1} < w_n$, then $l\leq \lfloor \frac{n}{2} \rfloor$. \item If $2^{l+1} \leq w_n$, then $2^l \leq w_{n+1} - w_n$ . \item If $w_{n+1} - w_n \leq 2^l$, then $\lfloor \frac{n+1}{2} \rfloor \leq l$. \item If $l \leq \lfloor \frac{n}{2} \rfloor$, then $2^l | (w_n - 2^l)$. \item If $l \leq \lfloor \frac{n}{2} \rfloor$, then $2^l \leq w_{n+1} - w_n$ . \item If $l \leq \lfloor \frac{n}{2} \rfloor$, then $w_{n+1} - w_n + 2^l \leq w_n$. \end{itemize} \end{multicols} \end{lemma} \begin{proof} (of Corollary \ref{one_up}) If $l =0$, then $x + yi \in S_n \subset B_n \subset B_{n+1}$. Lemma \ref{max_power} implies $l \leq \lfloor \frac{n}{2} \rfloor$, so if $l \geq 1$, then Lemma \ref{identities} shows \begin{align*} |x|, |y| & \leq w_n - 2^l = 2^l (w_{n-2l} - 1) \leq 2^l (w_{n -2l +1} -2)\\ \intertext{and} |x| + |y| & \leq w_{n+1} - 2^l = 2^l (w_{n - 2l +1} -1) \leq 2^l( w_{n - 2l +2} -3). \end{align*} These equations show that $x +yi \in 2^l S_{n - 2l +1}$ and thus, by Theorem \ref{octo_union}, also in $B_n$. \end{proof} The sets $B_n$ look like lacy, octagonal snowflakes, and they have several nice properties. Like the other sequences ($D_n$, $S_n$, and $Oct_n$), they are nested, as well as closed under both complex conjugation and multiplication by units. Theorem \ref{octo_union} tells us that $D_n \subset S_n \subset B_n \subset Oct_n$; Definition \ref{sets B_n} implies that if $a+bi \in B_n$, then $(1+i)^j (a+bi) \in B_{n+j}$. Similarly, if $2^j | \gcd(a,b)$ for some $a+bi \in B_n$, then $\frac{a}{2^j} + \frac{b}{2^j} i \in B_{n-2j}$. Definition \ref{sets B_n} also tells us that if $(1 +i)^{n+1} | x$ and $x \in B_{n}$, then $x =0$. These properties lead to the following useful result on the sets $B_n$. \begin{lemma} \label{divides_xy} If $xy \in B_n \setminus 0$, then $x \in B_n \setminus 0$. \end{lemma} \begin{proof} Proof by induction. The hypothesis clearly holds for $xy \in B_0\setminus 0$, as $xy$, $x$, and $y$ are all multiplicative units, and $B_0 \setminus 0$ is the set of all the multiplicative units $\Z[i]^{\times}$. Now suppose that our claim holds for all $j$, $ 0 \leq j \leq n-1$. Suppose that $x = a +bi$ and $y = c + di$, so $xy = (ac -bd) + (ad+bc) i \in B_n \setminus 0$. We will show that $x=a + bi \in B_n$. We may suppose that $(1+i)$ divides neither $x$ nor $y$, because then $\frac{xy}{1+i}$ would be an element of $B_{n-1}$, allowing us to apply our induction hypothesis. Corollary \ref{you_get_the_whole_set} lets us assume without loss of generality that $a > b \geq 0$, that $c > d$, and that $a,c >0$. There are three cases to consider. If $d=0$, then $0 \leq b < a \leq ac = \max (ac-bd, ad + bc) \leq w_n -2$ and \begin{align*} 0 &<a +b \leq ac +bc = (ac - bd) + (ad +bc) \leq w_{n+1} - 3.\\ \intertext{If $d < 0$, then } 0 &\leq b < a \leq a +b \leq ac -bd \leq w_n - 2 \leq w_{n+1} -3.\\ \intertext{If $d >0$, then } 0 &\leq b <a \leq a+b \leq ad+bc \leq w_n -2 \leq w_{n+1} -3. \end{align*} As $2 \nmid \gcd(a,b)$, $x = a +bi \in S_n$. Theorem \ref{octo_union} tells us that $S_n \subset B_n$, so $x \in B_n$ in all three scenarios. \end{proof} \subsection{Motzkin sets and $(1+i)$-ary expansions} Our proof that $A_{\mathbb{Z}[i], n} = B_n$ uses induction to show containment in both directions. We start with three lemmas that show containment between our sets under special circumstances. \begin{lemma} \label{containment}If $A_{\mathbb{Z}[i], n }= B_n$, then $A_{\mathbb{Z}[i], n+1} \subset B_{n+1}$. \end{lemma} \begin{proof} Given $a+bi \in A_{\mathbb{Z}[i], n+1}$, there exists some $q \in \mathbb{Z}[i]$ and $r \in A_{\mathbb{Z}[i], n}=B_n$ such that $(1+i)^{n+1} = q(a+bi) +r$. Rearranging terms reveals that \begin{equation*} q(a+bi) = (1+i)^{n+1} - r \in B_{n+1} \setminus 0, \end{equation*} so $a+bi \in B_{n+1}$ by Lemma \ref{divides_xy}. \end{proof} \begin{lemma}\label{multiply_by_1+i} If $A_{\mathbb{Z}[i], j} = B_j$ for $j \in \{n, n-1\}$, then $(1+i)B_n \subset A_{\mathbb{Z}[i], n+1}$. \end{lemma} \begin{proof} Given $x \in \mathbb{Z}[i]$, we can write $x = q(1+i) +r$ for some $q \in \Z[i]$ and $r \in A_{\Z[i],0}$. Suppose that $b \in B_n = A_{\mathbb{Z}[i], n}$, so we can expand $q$ as $q = q' b + r'$, where $r' \in A_{\Z[i], n-1}$. Then \begin{align*} (1+i)q + r &= (1+i)q' b + (1+i)r' +r\\ \intertext{and thus} x &= q' (1+i)b + ((1+i)r'+r). \end{align*} The element $(1+i)r' + r \in B_n = A_{\mathbb{Z}[i], n}$, so $A_{\mathbb{Z}[i], n} \twoheadrightarrow \Z[i]/b(1+i)$ and $b(1+i) \in A_{\mathbb{Z}[i], n+1}$. \end{proof} \begin{lemma} \label{subset_containment} If $A_{\Z[i], j} = B_j$ for $j \in \{n, n-1\}$, and if $\left ( B_{n+1} \setminus (1+i)\Z[i] \right )\subset A_{\Z[i], n+1} $, then $A_{\Z[i], n+1} = B_{n+1}$. \end{lemma} \begin{proof} The set $B_{n+1}$ is the union of its elements that are divisible by $(1 +i)$, and the elements that are not. The set of elements of $B_{n+1}$ that are divisible by $(1+i)$ is the set $(1 +i) B_n$, i.e., \[\{x + yi \in B_{n+1}: (1 +i) | (x +iy)\} = (1 +i) B_n.\] Lemma \ref{multiply_by_1+i} shows that, under our assumptions, $(1 +i)B_n \subset A_{\Z[i], n+1}$, so if $\{x + yi \in B_{n+1} : (1 +i) \nmid (x +iy)\} \subset A_{\Z[i], n+1}$, then all of $B_{n+1} \subset A_{\Z[i], n+1}$. Then, under our assumptions, $A_{\Z[i], n+1} \subset B_{n+1}$ by Lemma \ref{containment}, so $B_{n+1} = A_{\Z[i], n+1}$. \end{proof} \section{Main Result}\label{Main Result} We reduce proving $A_{\Z[i], n} = B_n$ to showing that $\mathscr{S}_{a+bi} \subset U = \bigcup _{q \in \Z[i]} (B_n + q(a+bi))$ for all $a +bi \in B_{n+1}\setminus (1+i)\Z[i]$. We use the geometry of our sets $D_n$, $S_n$, $B_n$, and $Oct_n$ to prove containment. Section \ref{iden} introduces some necessary lemmas, and Section \ref{meat} uses them to prove two technical propositions that allow us to apply Proposition \ref{subset_containment}. Each of the two propositions has a long proof, broken up into cases. Having done all the heavy lifting, we conclude with a short argument in subsection \ref{finally} that $A_{\Z[i], n} = B_n$. \subsection{Necessary Lemmas}\label{iden} \begin{lemma}\label{oct_translate} Suppose that $a + bi \in \Z[i]\setminus (1+i)\Z[i]$ and that $u \in \mathbb{Z}[i]^{\times} = \{\pm 1, \pm i\}$. If $x+yi \in (Oct_n + u(a+bi))$ and $2|(x +y)$, then $x+yi \in (B_n + u(a+bi))$. \end{lemma} \begin{proof} If $x+yi \in (Oct_n + u(a+bi))$, then $c +di = (x +yi) - u(a+bi)$ is an element of $Oct_n$. Because $(1+i) | (x +yi)$ and $(1 +i) \nmid (a+bi)$, we see that $(1+i) \nmid (c+di)$ and thus $c+di \in D_n \subset B_n$ by Theorem \ref{octo_union}. \end{proof} \begin{lemma}\label{broom} Suppose that $(1+i) \nmid (a+bi)$ and that $2^k \parallel \gcd(x,y), k \geq 1$. If any one of $(a-x) + (b-y)i$, $(a-y) + (b+x)i$, or $-(b+x) + (a-y)i \in Oct_n$, then $x+yi \in U = \bigcup_{q\in \Z[i]} (B_n + q(a+bi))$. \end{lemma} \begin{proof} As $(1+i) | (x+yi)$ and $(1+i) \nmid (a+bi)$, $(1+i)$ divides neither $(a-x) + (b-y)i = (a+bi) - (x+yi)$ nor $-(b+x) + (a-y)i = i(a+bi) - (x+yi)$. It certainly does not divide $(a-y) + (b+x)i = (a + bi) +i(x +yi).$ These three elements are all in $Oct_n \setminus (1+i) \Z[i] $, i.e., they are in $D_n \subset B_n$. Some computational housekeeping then shows that $x+yi \in U$. \end{proof} \begin{lemma} \label{small} If $a+bi \in B = ( B_{n+1} \cap Oct_n) \setminus( B_n \cup (1+i) B_n)$, then $B_n \twoheadrightarrow \mathbb{Z}[i]/(a+bi)$. \end{lemma} \begin{proof} Proof by induction. Simple computations show this holds true for $n \in \{0, 1\}$, so for the rest of the proof, assume that $n \geq 2$. For ease of notation, we again define $U = \bigcup_{q \in \mathbb{Z}[i]} (B_n + q(a+bi))$. The set $B$ is closed under complex conjugation and multiplication by units, so as $(1 + i) \nmid a + bi$, we can assume without loss of generality that $w_n - 2 \geq a > b \geq 0$. By applying Proposition \ref{triangle}, it suffices to show that $\mathscr{S}_{a+bi} \subset U$ to prove our claim. As $0 <a \leq w_n -2$, the set $\mathscr{S}_{a +bi} \subset Oct_n$, so if $x +iy \in \mathscr{S}_{a+bi}$ and $(1+i) \nmid (x +yi)$, then $x +iy \in D_n \subset B_n \subset U$. For the rest of this proof, assume that $x +yi \in \mathscr{S}_{a+bi}$ and that $(1+i) | (x +yi)$; we must show that $x +yi \in U$. We do this by showing that either $x +yi \in B_n$ or $x +yi \in Oct_n + u(a+bi)$ for some $ u \in \Z[i]^{\times}$, as then $x +yi \in U$ by Lemma \ref{oct_translate}. Let us first consider $x +yi$, where $x, y \neq 0$. Suppose that $2^k \parallel \gcd(x,y)$, so that $2^k \leq x,y < x+y \leq w_n -2^k $ (as $x +y < a \leq w_n -2$) and thus $2^k \leq x,y \leq w_n - 2^{k+1}$. As $2^{k+1} < w_n$, we see by Lemma \ref{identities} that $k\leq \lfloor \frac{n}{2} \rfloor$ and that \[x + y \leq w_n - 2^k + (w_{n+1} - w_n - 2^k) = w_{n+1} - 2^{k+1}.\] If $x + y \leq w_{n+1} - 3 \cdot 2^k$, then $x +yi \in 2^k S_{n-2k} \subset B_n \subset U$. If not, then $x + y = w_{n+1} - 2^{k+1} < a \leq w_n -2$ and thus $w_{n+1} - 2^{k+1} \leq w_n - 2^k$. We rearrange to see that $w_{n+1} - w_n \leq 2^k$ and thus $\lfloor \frac{n+1}{2} \rfloor \leq k$ by Lemma \ref{identities}. In this situation, $\lfloor \frac{n+1}{2} \rfloor \leq k \leq \lfloor \frac{n}{2} \rfloor$, so $n = 2k$, $k \geq 1$, $a > x + y = 2^{k+1}$, and $x= y = 2^k$. We know that $2 \nmid \gcd (a-2^k, b-2^k)$, that $|a-2^k| , |b - 2^k| \leq w_n - 2^k - 2 < w_n -2$, and that \begin{align*} |a-2^k| + |b-2^k| & \leq \max \{ a+b - 2^{k+1}, a-b\} \\ & \leq \max \{ w_{n+2} - 2^{k+1} - 3, w_n -3\}\\ & \leq w_{n+1} - 3, \end{align*} so $(a-x) + (b-y)i \in D_n \subset B_n$ and $x +yi \in U$. Now we consider $x+yi$, where one of the coordinates is zero. Label the non-zero coordinate $z$. If $2^k \parallel z$ and if $2^k \leq z \leq w_n - 2^{k+1}$, then $k \geq 1$ and $z \leq w_{n+1} - 3 \cdot 2^k$, demonstrating that $z, zi \in 2^k S_{n-2k} \subset B_n \subset U$. If $2^k \leq z = w_n - 2^k $, then $0 \leq b, |a-z| < w_n -2$. As $2 \nmid \gcd (a-z, b)$ and \begin{align*} 0 < b + |a-z| &\leq \max (a +b-z, b + z-a)\\ &\leq \max (w_n + 2^k -3, w_n - 2^k -1)\\ & \leq w_{n+1} - 3, \end{align*}, $(a-z) + bi \in D_n \subset B_n$, allowing us to conclude that both $z , zi \in U$ by Lemma \ref{broom}. \end{proof} \subsection{Propositions at the heart of our proof}\label{meat} Our main proof requires us to show that if $A_{\Z[i], j} = B_j$ for all $0 \leq j \leq n$, and if $a+bi \in B_{n+1} \setminus ( B_n \cup (1+i)\Z[i])$, then $\mathscr{S}_{a+bi} \subset U = \bigcup_{q \in \mathbb{Z}[i]} (B_n + q(a+bi))$. Lemma \ref{small} established our claim for the $a+bi \in B_{n+1} \setminus ( B_n \cup (1+i)\Z[i])$ that are also in $Oct_n$. We now prove it for the $a+bi \in B_{n+1} \setminus ( B_n \cup (1+i)\Z[i])$ that are not in $Oct_n$. First, Proposition \ref{inside_the_octogon} shows that, under our assumptions, $\mathscr{S}_{a+bi} \cap Oct_n \subset U$. Proposition \ref{outside_the_octogon} then demonstrates that, under the same assumptions, $(\mathscr{S}_{a+bi}\setminus Oct_n) \subset U$ as well. \begin{prop}\label{inside_the_octogon} Suppose that $A_{\Z[i], n} = B_n$. If $a +bi \in B_{n+1} \setminus (Oct_n \cup (1+i) \Z[i])$, if $a >b \geq 0$, and if $x+yi \in \mathscr{S}_{a+bi} \cap Oct_n$, then $x +yi \in U = \bigcup_{q \in \mathbb{Z}[i]} (B_n + q(a+bi))$. \end{prop} \begin{proof} Suppose that $x +yi \in Oct_n$ and that $2^l \parallel \gcd(x,y)$. If $x +yi \in S_n \subset B_n$, then $x+yi$ is certainly an element of $U$, so we will assume for the rest of this proof that $x+yi \notin S_n$, so $1 \leq l \leq \lfloor \frac{n}{2} \rfloor$. Lemma \ref{max_power} states that $l \leq \lfloor \frac{n}{2} \rfloor +1$. If $x+yi \in Oct_n \cap \mathscr{S}_{a+bi}$ and $l = \lfloor \frac{n}{2} \rfloor +1$, then $x +yi \in \{ 2^{k+1}, 2^{k+1} i\}$ when $n = 2k$, and $x \in \{2^{k+1}, 2^{k+1} i, 2^{k+1}(1+i) \}$ when $n = 2k+1$. Checking all five cases shows that at least one of $(a+bi) - (x+yi)$ and $i(a+bi) - (x+yi)$ must be an element of $B_n$. We therefore assume for the rest of the proof that $l \leq \lfloor \frac{n}{2} \rfloor$, so $1\leq l \leq \lfloor \frac{n}{2} \rfloor$ and $x +yi \in B_{n+1}$ by Corollary \ref{one_up}. Because $a > b \geq 0$ and $a+bi \notin Oct_n$, we observe that $a > w_n -2$. As $x +yi \in Oct_n$, we note that $x, y \leq w_n - 2^l$ and $x+y \leq w_{n+1} -\max(3,2^l)$. Theorem \ref{octo_union} shows $x+yi \in B_n$ if and only if $x, y \leq w_n - 2^{l+1}$ and $x+y \leq w_{n+1} - 3 \cdot 2^l$. Our element $x+yi \in Oct_n \cap (B_{n+1} \setminus B_n)$ then falls into one of three cases: either $x = w_n - 2^l$; $y = w_n - 2^l$; or $x,y \leq w_n - 2^{l+1}$ and $x+y \geq w_{n+1} - 2^{l+1}$. We address each of the three cases below. \underline{$\mathbf{x = w_n - 2^l}$:} By our assumptions and Lemma \ref{identities}, \begin{align*} 0 \leq a -x &\leq (w_{n+1}-2) - (w_n - 2^l) \leq 2(w_{n+1} - w_n) -2 \leq w_n -2.\\ \intertext{ As $x+y \leq w_{n+1} - 2^l$, we also see that $y \leq w_{n+1} - w_n$. This then implies that } |b-y| & \leq \max (b,y) \leq \max (w_n -2, w_{n+1} - w_n ) \leq w_n -2,\\ \intertext{and thus} |a-x| + |b-y| & = \max ( a+ b - (x+y) , (a-b) + y -x) \\ & \leq \max ( w_{n+2} -3 - w_n + 2^l, w_{n+1} -3 + w_{n+1} - w_n - (w_n + 2^l) ) \\ & \leq \max (w_{n+1} - 3, 2(w_{n+1} - w_n)-2^l - 3 ) \\ &= w_{n+1} -3. \end{align*} We conclude that $(a-x) + (b-y)i \in Oct_n$ and thus $x+yi \in U$ by Lemma \ref{broom}.\\ \underline{$\mathbf{y = w_n - 2^l}$:} When $y = w_n -2^l$, then $0 \leq a-y \leq 2(w_{n+1} - w_n )- 2 \leq w_n -2.$ The condition $a-x > w_n -2$ is equivalent to $b+x \leq a+b - w_n +1$; the right hand side is bounded above by $w_{n+2} - 3 - w_n + 1 = w_n -2$. The assumption is also equivalent to $x < a-w_n +2$. As $a-w_n +2 < w_{n+1} - w_n$, note that $x \leq w_{n+1} - w_n - 2^l$. We then see that if $a -x > w_n -2$, then \begin{align*} |a-y| + |b+x| &\leq a+b -y + x \\ &\leq w_{n+2} - 3 - w_n +2^l + w_{n+1} - w_n - 2^l \\ &= w_{n+1} - 3, \end{align*} demonstrating that $(a-y) + (b+x)i \in Oct_n$. Similarly, if $b+ x \leq y = w_n - 2^l \leq w_n -2$, then the odd sum $|a-y| + |b+x| \leq a -y +y =a \leq w_{n+1} -2$, so $|a-y| + |b+x| \leq w_{n+1} -3$ and $(a-y) + (b+x) i \in Oct_n$. Lemma \ref{broom} shows that $x +yi \in U$ when either $a-x > w_n -2$ or $b+x \leq y$. Let us now suppose that $a-x \leq w_n -2$ and $b+x >y$. Note that $|b-y| \leq w_n -2$. If $b \geq y$, then \begin{align*} |a-x| + |b-y| &= (a+b) - (x+y) \leq w_{n+2} - 3 - w_n + 2^l \leq w_{n+1} - 3; \\ \intertext{otherwise, $b < y < b+x$ and } |a-x| + |b-y| &= a + (y - (b+x)) \leq a-1 \leq w_{n+1} - 3. \end{align*} Either way, $(a-x) + (b-y) i \in Oct_n$ and thus $x+yi \in U$ by Lemma~\ref{broom}.\\ \underline{$\mathbf{x,y \leq w_n - 2^{l +1} \text{ and } x + y \geq w_{n+1} - 2^{l+1}}$:} These conditions imply that $|b-y| \leq w_n -2$, that $\min(x,y) \geq w_{n+1} - w_n$, and that \[w_{n+1} - w_n < a-x, a-y \leq w_{n+1} -2 - (w_{n+1} -w_n) = w_n -2.\] If $b \geq y$, then \[|a -x| + |b-y| = (a+b) - (x+y) \leq w_{n+2} - 3 - w_{n+1} + 2^{l+1} = w_{n+1} - 3\] and $(a-x) + (b-yi) \in Oct_n$ by Lemma \ref{identities}, as desired. If $b + x \leq y \leq w_n -2$, then $|a-y| + |b+x| \leq a- y + y \leq w_{n+1} -2$ and thus the odd sum $|a-y| + |b+x|$ is bounded above by $w_{n+1} -3$, showing that $(a-y) + (b+x) i \in Oct_n$. We are then left with when $b+x > y > b$, implying that \[|a-x| + |b-y| = a +y - (b+x) \leq a-1 \leq w_{n+1} - 3,\] demonstrating that $(a - x ) + (b-y) i \in Oct_n$. In all three scenarios, $x +yi \in U$ by Lemma \ref{broom}. \end{proof} \begin{prop} \label{outside_the_octogon} Suppose that $A_{\Z[i], n} = B_n$. If $a+bi \in B_{n+1} \setminus (Oct_n \cup (1+i)\Z[i])$, if $a > b \geq 0$, and if $x +yi \in \mathscr{S}_{a+bi} \setminus Oct_n$, then $x +yi \in U = \bigcup_{q \in \mathbb{Z}[i]} (B_n + q(a+bi))$. \end{prop} \begin{proof} Our assumptions imply that $b \leq w_n -2 <a$. As $x +yi \in \mathscr{S}_{a+bi} \setminus Oct_n$, $x +y \leq a-1 \leq w_{n+1} -3$, so either $x > w_n -2$ or $y > w_n -2$. We address the two cases below. \underline{$\mathbf{x > w_n -2}:$} As $x+yi \in \mathscr{S}_{a+bi}$, our bound implies that \[\max (y, 2^l) \leq a-x \leq w_{n+1} - w_n -1< w_n -2 < x.\] Suppose that $2^l \parallel (a-x, b-y),$ so that \begin{equation}\label{heart} 0 \leq y< a-x \leq w_{n+1} - w_n - 2^l < 2(w_{n+1} - w_n - 2^l) \leq w_n - 2^{l+1}, \end{equation} and $l \leq \lfloor \frac{n}{2} \rfloor$ by Lemma \ref{identities}. If $|b-y| \leq w_n - 2^{l+1}$, then \[|a-x| + |b-y| \leq (w_{n+1} - w_n - 2^l) + (w_n - 2^{l+1}) = w_{n+1} - 3\cdot 2^l,\] and $(a-x) + (b-y)i \in 2^l S_{n-2l} \subset B_n,$ so Lemma \ref{broom} places $x +yi \in U$. If $|b-y| > w_n - 2^{l+1}$, then $b-y = w_n -2^l$, as $0 \leq y < w_n - 2^{l+1}$ and $0\leq b \leq w_n -2$, thereby forcing $l \geq 1$. Lemma \ref{identities} then shows that, as $l\leq \lfloor \frac{n}{2} \rfloor$, \begin{align*} \max (x, 2^l) \leq a - b + y & \leq (w_{n+1} -2) - (w_n - 2^{l}) \leq 2(w_{n+1} -w_n) -2 \leq w_n -2,\\ \intertext{that} 0 < a +b - x &\leq (w_{n+2} -3) - (w_n -1) = w_n -2,\\ \intertext{and that} |a-b+y| + |a+b -x| & = (a+b) + (a-x) -(b-y) \\ &\leq (w_{n+2} -3) + (w_{n+1} - w_n -2^l) - (w_n -2^l) \\ &= w_{n+1} -3. \end{align*} We noted previously that $l \geq 1$, so $2 | (a-x) + (b-y)i$. As $(1+i) \nmid (a+bi)$, it follows that $(1+i) \nmid (x+yi)$ and thus $(1+i)$ does not divide $(1+i)(a+bi) - i(x+yi) = (a-b+y) + (a+b-x)i$. We conclude that $(a-b+y) + (a+b-x) i \in D_n \subset B_n$ and thus $x +yi \in (B_n + (1-i)(a+bi)) \subset U$. \underline{$\mathbf{y > w_n -2}:$} Suppose that $2^l \parallel (a-y, b+x)$. We apply Lemma \ref{identities} to see that \begin{equation}\label{med} 0 < a-y \leq w_{n+1} - w_n - 2^l < 2(w_{n+1} - w_n - 2^l) \leq w_n - 2^{l+1}, \end{equation} and $l \leq \lfloor \frac{n}{2} \rfloor.$ If $b+x \leq w_n - 2^{l+1}$, then \[|a-y| + |b+x| \leq (w_{n+1} - w_n - 2^l) + (w_n - 2^{l+1}) = w_{n+1} - 3\cdot 2^l\] and $(a- y) + (b+x)i \in 2^l S_{n - 2l} \subset B_n$, setting $x +yi \in U$ by Lemma \ref{broom}. If $b + x > w_n - 2^{l+1}$, then \begin{equation}\label{needed?} w_n - 2^l \leq b+x < b + (a-y) \leq w_n -2 <a, \end{equation} and $l \geq 2$. Equation \ref{needed?} just showed that $0 < a+b - y \leq w_n -2$, so as \begin{align*} |a - b-x| = a - (b+x) & \leq w_{n+1} - 2 - (w_n - 2^l) \leq w_n -2\\ \intertext{and} |a -b-x| + |a + b - y| & \leq (a -y) + (a +b) - (b+x) \\ &\leq (w_{n+1} - w_n - 2^l) + (w_{n+2} - 3) +(2^{l} - w_n) \\ &=w_{n+1} -3, \end{align*} we see that $(a-b -x ) + (a+b -y )i \in Oct_n$. As $l \geq 2$, $(1 +i)$ divides $(a-y) +(b+x)i = (a+bi) + i(x+yi)$. We deduce that $(1 +i) \nmid (x+yi)$, and thus $(1+i)$ does not divide $(a -b-x) + (a+b -y)i = (1+i)(a+bi) - (x+yi)$. We conclude that $(a-b-x) + (a+b-y)i \in D_n \subset B_n$ and that $x+yi \in (B_n + (1+i)(a+bi)) \subset U$. \end{proof} \subsection{Main Results}\label{finally} \begin{theorem} (Lenstra, \cite{Lenstra})\label{main_result} For $n \geq 0$, $A_{\mathbb{Z}[i],n} = \phi_{\Z[i]}^{-1}([0,n])= B_n$. \end{theorem} \begin{proof} Proof by induction. Example \ref{example_in_G} computes our base cases and shows that $A_{\mathbb{Z}[i],n} = B_n$ when $n =0,1,$ and $2$. Suppose that $n \geq 2$ and $A_{\mathbb{Z}[i],j} = B_j$ for all $j < n$. If $(B_n \setminus (1+i)\Z[i]) \subset A_{\Z[i],n}$, then $A_{\mathbb{Z}[i],n} = B_{n}$ by Lemma \ref{subset_containment}. It is clear that if $a + bi \in B_{n-1} = A_{\Z[i], n-1}$, then $a +bi \in A_{\Z[i], n}$. To prove our theorem, it therefore suffices to prove that if $a + bi \in B_n \setminus (B_{n-1} \cup (1+i) \Z[i])$, then $a + bi \in A_{\Z[i], n}$. Lemma \ref{small} shows that if $a+bi \in B_n \setminus (B_{n-1} \cup (1+i) \Z[i])$ and $a + bi \in Oct_{n-1}$, then $B_{n-1} \twoheadrightarrow \Z[i]/(a+bi)$. As $B_{n-1} = A_{\Z[i], n-1}$, $a+bi \in A_{\Z[i], n}$. If $a + bi \notin Oct_{n-1}$, it is certainly not in $B_{n-1}$, so the set of $a+bi \in B_n \setminus (B_{n-1} \cup (1+i) \Z[i])$ that are not in $Oct_{n-1}$ is the set $B_n \setminus (Oct_{n-1} \cup (1+i) \Z[i])$. Suppose that $a + bi \in B_n \setminus (Oct_{n-1} \cup (1+i) \Z[i])$, that $\alpha = \max (|a|, |b|)$, and that $\beta = \max (|a|, |b|)$. As $\alpha > \beta \geq 0$, Proposition \ref{inside_the_octogon} says that $\mathscr{S}_{\alpha + \beta i} \cap Oct_{n-1} \subset U = \bigcup_{q \in \Z[i]} (B_{n-1} + q (a+bi))$ and Proposition \ref{outside_the_octogon} says that $\mathscr{S}_{\alpha + \beta i} \setminus Oct_{n-1} \subset U$. The union $\mathscr{S}_{\alpha + \beta i} \subset U$ and $B_{n-1}$ is closed under multiplication by units, so $B_{n-1} = A_{\Z[i], n-1} \twoheadrightarrow \Z[i]/(\alpha + \beta i)$ by Lemma \ref{triangle}. As $\alpha + \beta i \in A_{\Z[i], n}$, $a+bi \in A_{\Z[i], n}$ by Corollary \ref{you_get_the_whole_set}. We have now shown that $B_n \setminus (B_{n-1} \cup (1+i) \Z[i]) \subset A_{\Z[i],n}$, as required. \end{proof} We can now prove Theorem \ref{pre-images} and describe the sets $\phi_{\Z[i]}^{-1}(n)$. \begin{proof} (of Theorem \ref{pre-images}) As Theorem \ref{main_result} shows that $\phi_{\Z[i]}^{-1}([0,n]) = B_n$, it follows that, for $n \geq 1$, \begin{align*} \phi_{\Z[i]}^{-1}(n) &= B_n \setminus B_{n-1}\\ & = \coprod_{j=1}^{\lfloor n/2 \rfloor} 2^j S_{n-2j} \setminus \left (\coprod_{j=0}^{\lfloor (n-1)/2 \rfloor} 2^j S_{n-2j-1} \right ). \end{align*} Then, for $k \geq 0$, \begin{align*} \phi_{\Z[i]}^{-1}(2k+1) &= B_{2k+1} \setminus B_{2k}\\ & = \coprod_{j=1)}^{\lfloor n/2 \rfloor} 2^j ( S_{2(k-j)+1} \setminus S_{2(k-j)}) \\ & = \displaystyle \coprod _{j=0}^{k} \left ( a+bi: \begin{array}{c} 2^j \parallel (a+bi); |a|, |b|\leq w_n - 2^{j+1}; \\ |a| + |b| \leq w_{n+1} - 3 \cdot 2^j ,\\ \text{ and either } \max(|a|, |b|) > w_{n-1} - 2^{j+1} \\ \text{ or } |a| + |b| > w_{n} - 3 \cdot 2^j \end{array} \right )\\ \intertext{ and for $k \geq 1$,} \phi_{\Z[i]}^{-1}(2k) &= B_{2k} \setminus B_{2k-1}\\ & = (2^k S_0) \cup \coprod_{j=1)}^{\lfloor n/2 \rfloor} 2^j ( S_{2(k-j)+1} \setminus S_{2(k-j)}) \\ & = \begin{array}{c} \{\pm 2^k, \pm 2^k i \} \cup \\ \displaystyle \coprod _{j=0}^{k-1} \left ( a+bi: \begin{array}{c}2^j \parallel (a+bi); |a|, |b|\leq w_n - 2^{j+1};\\ |a| + |b| \leq w_{n+1} - 3 \cdot 2^j ,\\ \text{ and either } \max(|a|, |b|) > w_{n-1} - 2^{j+1} \\ \text{ or } |a| + |b| > w_{n} - 3 \cdot 2^j \end{array} \right ). \end{array} \end{align*} \end{proof} ll[gray!30,pattern = north west lines](-.1,.75) -- (-.1,0.2)--(.2,-.1)--(.75,-.1)--(.75,.75)--(-.1,.75); ll[gray!30,pattern = north west lines](-.1,.75) -- (-.1,0.2)--(.2,-.1)--(.75,-.1)--(.75,.75)--(-.1,.75); ll[gray!30,pattern = north east lines](-.15,.75)--(-.15,-.05)--(0,.1)--(.2,.3)--(.2,.75)--(-.15,.75); ll[black] (0,.1) circle (.5pt); ll[gray!30,pattern = north west lines](.1,.75)--(-.1,.55)-- (-.1,0.2)--(.2,-.1)--(.75,-.1)--(.75,.75)--(.1,.75); ll[gray!30,pattern = north east lines](-.15,.75)--(-.15,-.05)--(0,.1)--(.1,.2)--(.1,.75)--(-.15,.75); ll[black] (0,.1) circle (.5pt); ll[black] (.1,.75) circle (.5pt); \section{Application: Answering Samuel's question}\label{Application} As mentioned in Sections~\ref{introduction} and \ref{history}, Pierre Samuel computed $|\phi_{\Z[i]}^{-1} (n)|$ for $n \in [0,8]$ (\cite{Samuel}, p. 290). He did not compute $|\phi_{\Z[i]}^{-1}(9)|$, presumably because the sets involved became so large that the computations became unwieldy. After all, $|\phi_{\Z[i]}^{-1}(8)| = 3364$ and $A_{\Z[i],8} = 6457$ (see Table). In this section, we will describe the naive method to find $|\phi_{\Z[i]}^{-1}(9)|$ using techniques known when Samuel wrote his his survey. Then we will describe the (still exponential) techniques implied by Lenstra's theorem to compute $|\phi_{\Z[i]}^{-1}(9) |$. Lastly, we present a closed-form exponential function that computes $|\phi_{\Z[i]}^{-1}(9) |$. Appendix A is a table presenting $|\phi_{\Z[i]}^{-1}(n) |$ and $|A_{\Z[i], n}|$ for $n \in [0,\ldots, 20]$ and Appendix B contains Sage code used to do this section's calculations. To clarify, the last subsection introduces a closed-form exponential function; the previous subsections require doing exponentially many operations. \subsection{Before Lenstra} We present a reasonable method to calculate $|\phi_{\Z[i]}^{-1}(9) |$ with the knowledge Samuel had when he wrote his survey \cite{Samuel}. He had computed $|\phi_{\Z[i]}^{-1}(n) |$ for $n \in [0, \ldots, 8]$, so he knew that $|A_{\Z[i],8}| = 6457$. He also knew that if $a + bi \in \phi_{\Z[i]}^{-1}(9) $, then $\Nm(a+bi) \leq 6457$, as every equivalence class in $\Z[i]/(a+bi)\Z[i]$ must have a representative in $A_{\Z[i],8}$. In order to find $|\phi_{\Z[i]}^{-1}(9) |$, he would have had to examine each element of norm $\leq 6457$, and see if all of their cosets had a representative in $A_{\Z[i], 8}$. We reduce our study to pairs $a + bi$ such that $a \geq b \geq 0$, as that cuts our search range by approximately a factor of $8$. A simple program in SAGE (not available in 1971) shows that $|\{a+bi \in \Z[i]: 0 \leq b \leq a, \Nm(a+bi) \leq 6457 \} | = 2605$ (see Appendix B's first listing). We then go through this list and remove all elements that are already in $A_{\Z[i],8}$. Appendix B's second program shows there are $842$ elements $a+bi \in A_{\Z[i],8}$ such that $0 \leq b \leq a$, so we would have to examine $1763$ elements (see Appendix B's third program). For each of these $1763$ remaining $a+bi$, we would have to check whether every elment in the associated set $S \cup T$ (see Lemma \ref{two_squares}) is congruent to some element of $A_{\Z[i],8}$ modulo $a+bi$. This means checking $7476972$ cosets against $6457$ elements. \subsection{Using Lenstra's Theorem} Lenstra's Theorem makes it significantly easier to study $|\phi_{\Z[i]}^{-1}(9) |$. Every element of $A_{\Z[i],9} \setminus A_{\Z[i],8}$ can be written as $u(1+i)^9 +b$ for some $u \in \{ \pm 1, \pm i \}$ and some $b \in A_{\Z[i],8}$. A simple way to find $|\phi_{\Z[i]}^{-1}(9) |$ would be to compute all $4 \cdot 6457 = 25828$ sums $\{ u(1+i)^9 + b, u \in \Z[i]^{\times}, b \in A_{\Z[i],8} \}$, remove all dulplicate elements from the list, and then remove any elements that are also in $A_{\Z[i], 8}$. There are ways to make the general computation more efficient, but they all involve calculating $\sim |A_{\Z[i],n}|$ sums, where $c$ is a small constant. Appendix \ref{Table}'s table shows that this involves exponentially (in $n$) many sums. \subsection{Explicit Formula} Computing $|\phi_{\Z[i]}^{-1}(9) |$ is the same as calculating $|A_{\Z[i],9} \setminus A_{\Z[i],8}| = |A_{\Z[i],9}| - |A_{\Z[i],8}|$. Theorem \ref{octo_union} shows that each $A_{\Z[i],n} \setminus 0$ can be written as a disjoint union of multiples of sets $S_j$, so to find $|B_n|$, we need to know $|S_n|$. \begin{lemma} For $n \geq 1$, $|S_n| = 3(w_n -2)^2 + 2(w_n -2) -6(w_n - w_{n-1})(w_n - w_{n-1} -1)$. \end{lemma} \begin{proof} By symmetry, \begin{equation*} \begin{split} |S_n| = {}& 4 | \{ x \in \Z: 1 \leq x \leq w_n -2, 2 \nmid x\} \\ & + 4 | \{ x+yi \in \Z[i]: 1 \leq x,y \leq w_n -2, x + y \leq w_{n+1} - 3, 2 \nmid \gcd(x,y) \}\\ ={}& 4 \left (\frac{w_n -2}{2} \right ) + 4 | \{x + yi \in \Z[i]: 1 \leq x, y \leq w_n -2; 2 \nmid \gcd (x,y) \}|\\ & - 4 |\{x+yi\in \Z[i]: w_{n+1} - 2 \leq x+y; w_{n+1} - w_n \leq x, y\leq w_n -2; 2 \nmid \gcd(x,y) \} |\\ ={}& + 4 | \{x + yi \in \Z[i]: 1 \leq x, y \leq w_n -2 \}|\\ & -4 | \{x + yi \in \Z[i]: 1 \leq x, y \leq w_n -2; 2\mid x; 2 \mid y \}|\\ & - 4 \sum_{\mathclap{\substack{x = w_{n+1} - w_n \\x \text{ odd} }}}^{w_n -2} | \{y: w_{n+1} -2 -x \leq y \leq w_n -2 \} | \\ & - 4 \sum_{\mathclap{\substack{x = w_{n+1} - w_n \\x \text{ even} }}}^{w_n -2} | \{y: 2 \nmid y, w_{n+1} -2 -x \leq y \leq w_n -2 \}| \\ ={}& 4 \left (\frac{w_n -2}{2} \right ) + 4 (w_n -2)^2 -4 \left ( \frac{w_n -2}{2} \right )^2 - 4 \sum_{\mathclap{\substack{x = w_{n+1} - w_n \\x \text{ odd} }}}^{w_n -2} x - (w_{n+1} - w_n) +1 \\ & - \frac{4}{2} \sum_{\mathclap{\substack{x = w_{n+1} - w_n \\x \text{ even} }}}^{w_n -2} x - (w_{n+1} -w_n) \\ ={}& 3(w_n -2)^2 + 2(w_n -2) - 4 \hspace{-.7 cm}\sum_{\mathclap{\substack{x = 0 \\x \text{ odd} }}}^{w_{n+2} -w_{n+1} -3} \hspace{-.7 cm}x - (w_{n+1} - w_n) +1 - 2 \hspace{-.7 cm} \sum_{\mathclap{\substack{x = 0 \\x \text{ even} }}}^{w_{n+2} -w_{n+1} -2} \hspace{-.7 cm} x \\ ={}& 3 (w_n -2)^2 + 2(w_n -2) -6 \sum_{\substack{ x = 0\\x \text{ even}}}^{\mathclap{w_{n+2} - w_{n+1} -2}} x\\ ={}& 3 (w_n -2)^2 + 2(w_n -2) -12 \sum_{x = 0}^{\mathclap{w_n - w_{n-1} -1}} x\\ ={}& 3 (w_n -2)^2 + 2(w_n -2) -6 \cdot 2 \sum_{x = 0}^{\mathclap{w_{n} - w_{n-1} -1}} x\\ ={}& 3 (w_n -2)^2 + 2(w_n -2) -6 (w_n - w_{n-1})(w_n - w_{n-1} -1). \end{split} \end{equation*} \end{proof} \begin{coro} If $n = 2k +1$, $k \geq 0$, then $S_n| = 42 \cdot 4^k - 34 \cdot 2^k + 8$. If $n = 2k$, $k \geq 1$, then $|S_n| = 21 \cdot 4^k - 24 \cdot 2^k + 8$. \end{coro} We can now use our formula for $|S_n|$ to find $|A_{\Z[i],n}|$. \begin{theorem}\label{pre-image_cardinality} For all $k \geq 0$, $|A_{\Z[i], 2k+1}| = 14 \cdot 4^{k+1} - 34 \cdot 2^{k+1} + 8k + 29$. For $k \geq 1$, $|A_{\Z[i], 2k}| = 28 \cdot 4^{k} - 48 \cdot 2^{k} + 8k + 25$. \end{theorem} \begin{proof} Theorem \ref{octo_union} shows that $A_{\Z[i],n} \setminus 0 = \coprod_{j=0}^{n/2} 2^j S_{n -2j}$, so \\ $|A_{\Z[i],n}|= 1 + \sum_{j=0}^{n/2} |S_{n-2j}|$. Therefore \begin{equation*} \begin{split} |A_{\Z[i],2k}| &= 1 + |S_0| + \sum_{j=1}^k |S_{2j}|\\ &=5 + \sum_{j=1}^k 21 \cdot 4^j - 24 \cdot 2^j + 8 \\ &= 5 + \sum_{j=0}^{k-1} 84 \cdot 4^j - 48 \cdot 2^j+ 8\\ &=84 \left (\frac{ 4^{k}-1}{3} \right ) - 48 \cdot 2^{k} + 8k + 53 \\ &= 28 \cdot 4^{k} - 48 \cdot 2^{k} + 8k + 25 \end{split} \end{equation*} and \begin{equation*} \begin{split} |A_{\Z[i],2k +1}| &= 1 + \sum_{j=0}^k |S_{2j +1}|\\ & = 1 + \sum_{j=0}^k 42 \cdot 4^j - 34 \cdot 2^j +8\\ & = 1 + \left (42 \left (\frac{ 4^{k+1}-1}{3} \right ) - 34 \cdot 2^{k} + 8k \right ) \\ & = 14 \cdot 4^{k+1} - 34 \cdot 2^{k+1} + 8k + 29. \end{split} \end{equation*} \end{proof} This naturally leads to Theorem \ref{size_of_sets}. \begin{proof} (Of Theorem \ref{size_of_sets}). Applying Theorem \ref{pre-image_cardinality} reveals that \begin{align*} |\phi_{\Z[i]}^{-1}(2k)| & = | A_{\Z[i], 2k} \setminus A_{\Z[i], 2k-1}|\\ & = | A_{\Z[i], 2k}| - |A_{\Z[i],2k -1}|\\ & = (28 \cdot 4^{k} - 48 \cdot 2^{k} + 8k + 25) - (14 \cdot 4^{k} - 34 \cdot 2^{k} + 8(k-1) + 29 )\\ & = 14 \cdot 4^k -14 \cdot 2^k +4\\ \intertext{ and } |\phi_{\Z[i]}^{-1}(2k+1)| & = | A_{\Z[i], 2k+1} \setminus A_{\Z[i], 2k}|\\ & = | A_{\Z[i], 2k+1}| - |A_{\Z[i],2k }|\\ & = (14 \cdot 4^{k+1} - 34 \cdot 2^{k+1} + 8k + 29) - (28 \cdot 4^{k} - 48 \cdot 2^{k} + 8k + 25)\\ & = 28 \cdot 4^k - 20 \cdot 2^k +4. \end{align*} \end{proof} \newpage \appendix \section{Table}\label{Table} \begin{table}[h!] \centering \begin{tabular}{|c c c c|} \hline $n$ & $|S_n|$ & $|B_n|$ & $|\phi_{\Z[i]}^{-1}(n)|$\\ \hline 0& 4& 5& 5\\ 1& 16& 17& 12\\ 2& 44& 49& 32\\ 3& 108& 125& 76\\ 4& 248& 297& 172\\ 5& 544& 669& 372\\ 6& 1160& 1457& 788\\ 7& 2424& 3093& 1636\\ 8& 5000& 6457& 3364\\ 9& 10216& 13309& 6852\\ 10& 20744& 27201& 13892\\ 11& 41928& 55237& 28036\\ 12& 84488& 111689& 56452\\ 13& 169864& 225101& 113412\\ 14& 341000& 452689& 227588\\ 15& 683784& 908885& 456196\\ 16& 1370120& 1822809& 913924\\ 17& 2743816& 3652701& 1829892\\ 18& 5492744& 7315553& 3662852\\ 19& 10992648& 14645349& 7329796\\ 20& 21995528& 29311081& 14665732\\ 21& 44005384& 58650733& 29339652\\ 22& 88031240 & 117342321& 58691588\\ 23& 176091144 & 234741877& 117399556\\ 24& 352223240 & 469565561& 234823684\\ 25& 704503816 & 939245693& 469680132\\ \hline \end{tabular} \caption{$|S_n|$, $|B_n|$, and $|\phi_{\Z[i]}^{-1}(n)|$ for $n \in [0,25]$} \label{table:1} \end{table} \section{Code}\label{Code} Finding the set of $x +yi$ pairs where $0 \leq y \leq x , x^2 + y^2 \leq 6456$:\\ \begin{lstlisting}[numbers=left,numberstyle=\tiny,numbersep=0pt] sage: Norm_set =[] sage: for x in [0,..,80]: f = min(x,floor(sqrt(6457-x^2))) for y in [0,..,f]: Norm_set.append((x,y)) sage: len(Norm_set) \end{lstlisting} Here is the code to compute the elements in $B_8$ such that $0 \leq b \leq a$. Recall that $B_8 = S_8 \cup 2 \cdot S_6 \cup 4\cdot S_4 \cup 8\cdot S_2 \cup 16\cdot S_0.$ We compute the part of $16 \cdot S_0$ such that $0 \leq b \leq a $, and then repeat with $8 \cdot S_2$, etc. \\ \begin{lstlisting}[numbers=left,numberstyle=\tiny,numbersep=0pt] sage: B=[(0,0)] sage: for x in [1,..,1]: for y in [0,..,x]: if (gcd(x,y)) if (x +y) <=1: B.append((16*x,16*y)) sage: for x in [1,..,4]: for y in [0,..,x]: if (gcd(x,y)) if (x +y) <=5: B.append((8*x,8*y)) sage: for x in [1,..,10]: for y in [0,..,x]: if (gcd(x,y)) if (x +y) <=13: B.append((4*x,4*y)) sage: for x in [1,..,22]: for y in [0,..,x]: if (gcd(x,y)) if (x +y) <=29: B.append((2*x, 2*y)) sage: for x in [1,..,46]: for y in [0,..,x]: if (gcd(x,y)) if (x +y) <=61: B.append((x,y)) sage: print(len(B)) \end{lstlisting} Computing the number of elements $x +yi$ pairs where $0 \leq y \leq x , x^2 + y^2 \leq 6456$ and $x +yi $ is not in $B_8$:\\ \begin{lstlisting}[numbers=left,numberstyle=\tiny,numbersep=0pt] sage: Norm_List=[] sage: for x in [0,..,len(Norm_set)-1]: if (Norm_set[x] in B)==0: Norm_List.append(Norm_set[x]) sage: print(len(Norm_set)- len(B)) \end{lstlisting} Computing the sizes of the sets $S_n$ for $0 \leq n \leq 21$:\\ \begin{lstlisting}[numbers=left,numberstyle=\tiny,numbersep=0pt] sage: S_size=[4, 16] sage: for n in [1,..,12]: S_size.append(21*4^n - 24*2^n + 8) S_size.append(42*4^n - 34*(2^n) +8) sage: print(S_size) \end{lstlisting} Computing the sizes of the sets $B_n$ for $0 \leq n \leq 21$:\\ \begin{lstlisting}[numbers=left,numberstyle=\tiny,numbersep=0pt] sage: B_size=[0,..,21] sage: B_size=[0,..,21] sage: for n in [0,..,10]: B_size[2*n]=1 for m in [0,..,n]: B_size[2*n] = B_size[2*n] + S_size[2*m] B_size[2*n +1]=1 for m in [0,..,n]: B_size[2*n+1] = B_size[2*n +1] + S_size[2*m +1] sage: print(B_size) \end{lstlisting} Checking that the formula does give us the size of $B_n$ for $0 \leq n \leq 21$:\\ \begin{lstlisting}[numbers=left,numberstyle=\tiny,numbersep=0pt] sage: formula_list=[0,..,25] sage: formula_list[0]=5 sage: formula_list[1] =17 sage: for n in [1,..,12]: formula_list[2*n] = (28)*(4^(n)) - 48*2^(n) + 8*n +25 formula_list[2*n +1] = (14)*(4^(n+1)) - 34*2^(n +1) +8*n +29 sage: print(formula_list) \end{lstlisting} Computing the sizes of the sets $\phi_{\Z[i]}^{-1}n$ for $1 \leq n \leq 21$:\\ \begin{lstlisting}[numbers=left,numberstyle=\tiny,numbersep=0pt] sage: B_size=[0,..,21] sage: for n in [0,..,10]: B_size[2*n]=1 for m in [0,..,n]: B_size[2*n] = B_size[2*n] + S_size[2*m] B_size[2*n +1]=1 for m in [0,..,n]: B_size[2*n+1] = B_size[2*n +1] + S_size[2*m +1] sage: print(B_size) \end{lstlisting} Checking that the formula does give $\phi_{\Z[i]}^{-1}n$ for $2 \leq n \leq 21$:\\ \begin{lstlisting}[numbers=left,numberstyle=\tiny,numbersep=0pt] sage: for n in [1,..,12]: print(2*n, 14*4^n - 14*2^n +4) print(2*n+1, 28*4^n - 20*2^n + 4) \end{lstlisting} \section*{Acknowledgements} I would like to thank my very patient spouse, Loren LaLonde, who has listened to me talk about this problem for the last fifteen years, and who has ensured that my LaTeX always compiled. I greatly appreciate the help from Michael Bridgland, who not only read Martin Fuch's thesis \cite{Fuchs} for me (I don't speak German), but who also asked a question that led to Lemma \ref{max_power}. My frequent collaborator Jon Grantham gave me very useful comments on an earlier draft, leading to both the history and application sections. I would also like to thank H.W. Lenstra, Jr. for his helpful feedback and Franz Lemmermeyer for his help with this paper's background research and literature review. I highly recommend Lemmermeyer's survey, ``The Euclidean Algorithm in Algebraic Number Fields,'' to anyone interested in the subject \cite{Lemmermeyer}. \begin{thebibliography}{99} \bibitem{Fuchs} M. Fuchs, \newblock{``Der Minimale Euklidische Algorithmus im Ring der ganzen Gausschen Zahlen,''} \newblock{Munich}, 2003. \newblock{http://www.mafu.ws/papers/studienarbeit.pdf} \bibitem{Graves} H. Graves, \newblock{``The Minimal Euclidean Function on the Gaussian Integers,''} submitted. \newblock{{\tt arXiv:2110.13112 [math.FA]}} \bibitem{Lemmermeyer} F. Lemmermeyer, \newblock{``The Euclidean Algorithm in Algebraic Number Fields,''} 2004. \newblock{http://www.rzuser.uni-heidelberg.de/~hb3/publ/survey.pdf} \bibitem{Lenstra} H.W. Lenstra, Jr., \newblock{``Lectures on Euclidean Rings,''} \newblock{Bielefield}, 1974. \bibitem{Motzkin} T. Motzkin, \newblock{``The Euclidean Algorithm,''} \newblock{ \sl Bull. Am. Math. Soc}, 55 (1949), 1142-1146. \bibitem{Samuel} P. Samuel, \newblock{``About Euclidean Rings,''} \newblock{ \sl J. Algebra}, 19 (1971), 282-301. \end{thebibliography} \end{document}
2205.14039v1
http://arxiv.org/abs/2205.14039v1
Group-invariant max filtering
\documentclass[12pt]{article} \usepackage[margin=1in]{geometry} \usepackage{amsthm} \usepackage{amssymb} \usepackage{amsmath} \usepackage{graphicx} \usepackage{mathdots} \usepackage{mathtools} \usepackage{url} \usepackage{color} \usepackage{framed} \usepackage{tikz} \usepackage{stmaryrd} \usepackage[ruled,vlined]{algorithm2e} \usepackage{array} \newcommand{\vsubseteq}{\rotatebox[origin=c]{90}{$\subseteq$}} \newcommand{\llangle}{\langle\hspace{-2.5pt}\langle} \newcommand{\rrangle}{\rangle\hspace{-2.5pt}\rangle} \tikzset{every node/.style={shape=circle,fill=black,inner sep=0pt, minimum size=0.5em,anchor=mid}} \newcommand\blfootnote[1]{ \begingroup \renewcommand\thefootnote{}\footnote{#1} \addtocounter{footnote}{-1} \endgroup } \newtheorem{theorem}{Theorem}\newtheorem{corollary}[theorem]{Corollary} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \theoremstyle{definition} \newtheorem{problem}[theorem]{Problem} \newtheorem{example}[theorem]{Example} \newtheorem{definition}[theorem]{Definition} \title{Group-invariant max filtering} \author{ Jameson~Cahill\footnote{Department of Mathematics and Statistics, University of North Carolina Wilmington, Wilmington, NC} \quad Joseph~W.~Iverson\footnote{Department of Mathematics, Iowa State University, Ames, IA} \quad Dustin~G.~Mixon\footnote{Department of Mathematics, The Ohio State University, Columbus, OH} \footnote{Translational Data Analytics Institute, The Ohio State University, Columbus, OH} \quad Daniel~Packer\footnotemark[3] } \date{} \begin{document} \maketitle \begin{abstract} Given a real inner product space $V$ and a group $G$ of linear isometries, we construct a family of $G$-invariant real-valued functions on $V$ that we call \textit{max filters}. In the case where $V=\mathbb{R}^d$ and $G$ is finite, a suitable max filter bank separates orbits, and is even bilipschitz in the quotient metric. In the case where $V=L^2(\mathbb{R}^d)$ and $G$ is the group of translation operators, a max filter exhibits stability to diffeomorphic distortion like that of the scattering transform introduced by Mallat. We establish that max filters are well suited for various classification tasks, both in theory and in practice. \end{abstract} \section{Introduction} Modern machine learning has been extraordinarily successful in domains where large volumes of labeled data are available~\cite{KrizhevskySH:12,Silver:16}. Indeed, highly expressive models can generalize once they fit an appropriately large training set. Unfortunately, many important domains are plagued by a scarcity of data or by expensive labels (or both). One way to bridge this gap is by augmenting the given dataset with the help of a large family of innocuous distortions. In many cases, the distortions correspond to the action of a group, meaning the ground truth exhibits known symmetries. Augmenting the training set by applying the group action encourages the model to learn these symmetries. While this approach has been successful~\cite{SimardSP:03,CiresanMGS:10,KrizhevskySH:12,ChenDL:20}, it is extremely inefficient to train a large, symmetry-agnostic model to find a highly symmetric function. One wonders: \begin{center} \textit{Why not use a model that already accounts for known symmetries?} \end{center} This motivates \textit{invariant machine learning}~(e.g., \cite{Wood:96,ZhangL:04,GuEtal:18,VillarHSYB:21,BalanHS:22}), where the model is invariant to underlying symmetries in the data. To illustrate, suppose an object is represented by a point $x$ in a set $V$, but there is a group $G$ acting on $V$ such that the same object is also represented by $gx\in V$ for every $g\in G$. This ambiguity emerges, for example, when using a matrix to represent a point cloud or a graph, since the representation depends on the labeling of the points or vertices. If we apply a $G$-invariant feature map $\Phi\colon V\to F$, then the learning task can be performed in the feature domain $F$ without having to worry about symmetries in the problem. Furthermore, if $\Phi$ separates the $G$-orbits in $V$, then no information is lost by passing to the feature domain. In practice, $V$ and $F$ tend to be vector spaces out of convenience, and $G$ is frequently a linear group. While our interest in invariants stems from modern machine learning, maps like $\Phi$ have been studied since Cayley established \textit{invariant theory} in the nineteenth century~\cite{Cayley:45}. Here, we take $V=\mathbb{C}^d$ and $G\leq\operatorname{GL}(V)$, and the maps of interest consist of the $G$-invariant polynomials $\mathbb{C}[V]^G$. In 1890, Hilbert~\cite{Hilbert:90} proved that $\mathbb{C}[V]^G$ is finitely generated as a $\mathbb{C}$-algebra in the special case where $G$ is the image of a representation of $\operatorname{SL}(\mathbb{C}^k)$, meaning one may take the feature domain $F$ to be finite dimensional (one dimension for each generator). Since $G$ is not a compact subset of $\mathbb{C}^{d\times d}$ in such cases, there may exist distinct $G$-orbits whose closures intersect, meaning no continuous $G$-invariant function can separate them; this subtlety plays an important role in Mumford's more general \textit{geometric invariant theory}~\cite{MumfordFK:94}. In general, the generating set of $\mathbb{C}[V]^G$ is often extraordinarily large~\cite{Defresne:08}, making it impractical for machine learning applications. To alleviate this issue, there has been some work to construct \textit{separating sets} of polynomials~\cite{DerksenK:15,Domokos:17,Domokos:arxiv}, i.e., sets that separate as well as $\mathbb{C}[V]^G$ does without necessarily generating all of $\mathbb{C}[V]^G$. For every reductive group $G$, there exists a separating set of $2d+1$ invariant polynomials~\cite{Defresne:08,CahillCC:20}, but the complexity of evaluating these polynomials is still quite large. Furthermore, these polynomials tend to have high degree, and so they are numerically unstable in practice. In practice, one also desires a quantitative notion of separating so that distant orbits are not sent to nearby points in the feature space, and this behavior is not always afforded by a separating set of polynomials~\cite{CahillCC:20}. Despite these shortcomings, polynomial invariants are popular in the data science literature due in part to their rich algebraic theory, e.g.,~\cite{BandeiraBKPWW:17,PerryWBRS:19,CahillCC:20,BendoryELS:22,BalanHS:22}. In this paper, we focus on the case where $V$ is a real inner product space and $G$ is a group of linear isometries of $V$. We introduce a family of non-polynomial invariants that we call \textit{max filters}. In Section~\ref{sec.preliminaries}, we define max filters, we identify some basic properties, and we highlight a few familiar examples. In Section~\ref{sec.separating}, we use ideas from~\cite{DymG:22} to establish that $2d$ generic max filters separate all $G$-orbits when $G$ is finite (see Corollary~\ref{cor.2d templates suffice for finite groups}), and then we describe various settings in which max filtering is computationally efficient. In Section~\ref{sec.lipschitz}, we show that when $G$ is finite, a sufficiently large random max filter bank is bilipschitz with high probability; see Theorem~\ref{thm.main result}. This is the first known construction of invariant maps for a general class of groups that enjoy a lower Lipschitz bound, meaning they separate orbits in a quantitative sense. In the same section, we later show that when $V=L^2(\mathbb{R}^d)$ and $G$ is the group of translations, certain max filters exhibit stability to diffeomorphic distortion akin to what Mallat established for his scattering transform in~\cite{Mallat:12}; see Theorem~\ref{thm.mallat bound}. In Section~\ref{sec.classification}, we explain how to select max filters for classification in a couple of different settings, we determine the subgradient of max filters to enable training, and we characterize how random max filters behave for the symmetric group. In Section~\ref{sec.numerics}, we use max filtering to process real-world datasets. Specifically, we visualize the shape space of voting districts, we use electrocardiogram data to classify whether patients had a heart attack, and we classify a multitude of textures. Surprisingly, we find that even in cases where the data do not appear to exhibit symmetries in a group $G$, max filtering with respect to $G$ can still reveal salient features. We conclude in Section~\ref{sec.discussion} with a discussion of opportunities for follow-on work. \section{Preliminaries} \label{sec.preliminaries} Given a real inner product space $V$ and a group $G$ of linear isometries of $V$, consider the quotient space $V/G$ consisting of the $G$-orbits $[x]:=G\cdot x$ for $x\in V$. This quotient space is equipped with the metric \[ d([x],[y]) :=\inf_{p\in[x],q\in[y]} \|p-q\|. \] (Indeed, $d$ satisfies the triangle inequality since $G$ is a group of isometries of $V$.) This paper is concerned with the following function. \begin{definition} The \textbf{max filtering map} $\llangle \cdot,\cdot\rrangle\colon V/G\times V/G\to \mathbb{R}$ is defined by \[ \llangle [x],[y]\rrangle :=\sup_{p\in[x],q\in[y]}\langle p,q\rangle. \] Sometimes, ``max filtering map'' refers to the related function $\llangle [\cdot],[\cdot]\rrangle\colon V\times V\to \mathbb{R}$ instead. The intended domain should be clear from context. \end{definition} We note that since $G$ consists of linear isometries, it is closed under adjoints, and so the max filtering map can be alternatively expressed as \[ \llangle [x],[y]\rrangle =\sup_{g\in G}\langle gx,y\rangle =\sup_{g\in G}\langle x,gy\rangle. \] Furthermore, if $G$ is topologically closed (e.g., finite), then the supremum can be replaced with a maximum: \[ \llangle [x],[y]\rrangle =\max_{g\in G}\langle gx,y\rangle =\max_{g\in G}\langle x,gy\rangle. \] The max filtering map satisfies several important properties, summarized below. \begin{lemma} \label{lem.max filter product properties} Suppose $x,y,z\in V$. Then each of the following holds: \begin{itemize} \item[(a)] $\llangle [x],[x]\rrangle=\|x\|^2$. \item[(b)] $\llangle [x],[y]\rrangle=\llangle [y],[x]\rrangle$. \item[(c)] $\llangle [x],[ry]\rrangle=r\llangle [x],[y]\rrangle$ for every $r\geq0$. \item[(d)] $\llangle [x],[\cdot]\rrangle\colon V\to\mathbb{R}$ is convex. \item[(e)] $\llangle [x],[y+z]\rrangle\leq\llangle [x],[y]\rrangle+\llangle [x],[z]\rrangle$. \item[(f)] $d([x],[y])^2=\|x\|^2-2\llangle [x],[y]\rrangle+\|y\|^2$. \item[(g)] $\llangle [x],\cdot\rrangle\colon V/G\to\mathbb{R}$ is $\|x\|$-Lipschitz. \end{itemize} \end{lemma} \begin{proof} First, (a) and (b) are immediate, as is the $r=0$ case of (c). For $r>0$, observe that $q\in[ry]$ precisely when $q':=r^{-1}q\in[y]$, since each member of $G$ is a linear isometry. Thus, \[ \llangle [x],[ry]\rrangle =\sup_{p\in[x],q\in[ry]}\langle p,q\rangle =\sup_{p\in[x],q'\in[y]}\langle p,rq'\rangle =r\cdot\sup_{p\in[x],q'\in[y]}\langle p,q'\rangle =r\llangle [x],[y]\rrangle. \] Next, (d) follows from the identity $\llangle [x],[y]\rrangle=\sup_{p\in[x]}\langle p,y\rangle$, which expresses $\llangle [x],[\cdot]\rrangle$ as a pointwise supremum of convex functions. For (e), we apply (d) and (c): \[ \llangle [x],[y+z]\rrangle =\llangle [x],[\tfrac{1}{2}\cdot2y+\tfrac{1}{2}\cdot2z]\rrangle \leq\tfrac{1}{2}\llangle [x],[2y]\rrangle+\tfrac{1}{2}\llangle [x],[2z]\rrangle =\llangle [x],[y]\rrangle+\llangle [x],[z]\rrangle. \] Next, (f) is immediate. For (g), select any $y,z\in V$. We may assume $\llangle [x],[y]\rrangle\geq \llangle [x],[z]\rrangle$ without loss of generality. Select any $p\in[y]$, $q\in[z]$, and $\epsilon>0$, and take $g\in G$ such that $\langle x,gp\rangle>\llangle [x],[y]\rrangle-\epsilon$. Then \begin{align*} |\llangle [x],[y]\rrangle-\llangle [x],[z]\rrangle| &=\llangle [x],[y]\rrangle-\llangle [x],[z]\rrangle\\ &< \langle x,gp\rangle+\epsilon-\langle x,gq\rangle =\langle x,g(p-q)\rangle+\epsilon \leq\|x\|\|p-q\|+\epsilon. \end{align*} Since $p$, $q$, and $\epsilon$ were arbitrary, the result follows. \end{proof} \begin{definition} Given a \textbf{template} $z\in V$, we refer to $\llangle [z],\cdot\rrangle\colon V/G\to\mathbb{R}$ as the corresponding \textbf{max filter}. Given a (possibly infinite) sequence $\{z_i\}_{i\in I}$ of templates in $V$, the corresponding \textbf{max filter bank} is $\Phi\colon V/G\to \mathbb{R}^I$ defined by $\Phi([x]):=\{\llangle [z_i],[x]\rrangle\}_{i\in I}$. \end{definition} In what follows, we identify a few familiar examples of max filters. \begin{example}[Norms] There are several norms that can be thought of as a max filter with some template. For example, consider $V=\mathbb{R}^n$. Then taking $G=\operatorname{O}(n)$ and any unit-norm template $z$ gives \[ \llangle [z],[x]\rrangle =\max_{g\in\operatorname{O}(n)}\langle gz,x\rangle =\|x\|. \] Similarly, the infinity norm is obtained by taking $G$ to be the group of signed permutation matrices and $z$ to be a standard basis element, while the $1$-norm comes from taking $G$ to be the group of diagonal orthogonal matrices and $z$ to be the all-ones vector. We can also recover various matrix norms when $V=\mathbb{R}^{m\times n}$. For example, taking $G\cong\operatorname{O}(m)\times\operatorname{O}(n)$ to be the group of linear operators of the form $X\mapsto Q_1XQ_2^{-1}$ for $Q_1\in\operatorname{O}(m)$ and $Q_2\in\operatorname{O}(n)$, then max filtering with any rank-$1$ matrix of unit Frobenius norm gives the spectral norm. \end{example} \begin{example}[Power spectrum] Consider the case where $V=L^2(\mathbb{R}/\mathbb{Z})$ and $G$ is the group of circular translation operators $T_a$ defined by $T_ag(t):=g(t-a)$ for $a\in\mathbb{R}$. (Here and throughout, functions in $L^2$ will be real valued by default.) Given a template $z_k$ of the form $z_k(t):=\cos(2\pi kt)$ for some $k\in\mathbb{N}$, it holds that \begin{align*} \llangle [z_k],[f]\rrangle =\max_{a\in [0,1)}\langle T_az_k,f\rangle &=\max_{a\in [0,1)}\int_0^1\cos(2\pi k(t-a))f(t)dt\\ &=\max_{a\in [0,1)}\operatorname{Re}\bigg(e^{2\pi ika}\int_0^1 f(t)e^{-2\pi ikt}dt\bigg) =|\hat{f}(k)|. \end{align*} A similar choice of templates recovers the power spectrum over finite abelian groups. \end{example} \begin{example}[Unitary groups] \label{ex.complex} While we generally assume $V$ is a real inner produce space, our theory also applies in the complex setting. For example, consider the case where $V=\mathbb{C}^n$ and $G\leq\operatorname{U}(n)$. Then $V$ is a $2n$-dimensional real inner product space with \[ \langle x,y\rangle :=\operatorname{Re}(x^*y), \] where $x^*$ denotes the conjugate transpose of $x$. As such, $\operatorname{U}(n)\leq\operatorname{O}(V)$ since $g\in\operatorname{U}(n)$ implies \[ \langle gx,gy\rangle =\operatorname{Re}((gx)^*(gy)) =\operatorname{Re}(x^*g^*gy) =\operatorname{Re}(x^*y) =\langle x,y\rangle. \] Thus, $G\leq\operatorname{O}(V)$. \end{example} \begin{example}[Phase retrieval] \label{ex.phase retrieval} Suppose $V=\mathbb{C}^r$ and $G=\{c\cdot\operatorname{id}:|c|=1\}\leq\operatorname{U}(r)$. Then \[ \llangle [z],[x]\rrangle =\max_{|c|=1}\operatorname{Re}((cz)^*x) =|z^*x|. \] The max filter bank corresponding to $\{z_i\}_{i=1}^n$ in $V$ is given by $\Phi([x])=\{|z_i^*x|\}_{i=1}^n$. The inverse problem of recovering $[x]$ from $\Phi([x])$ is known as \textit{complex phase retrieval}~\cite{BalanCE:06,BandeiraCMN:14,ConcaEHV:15,Vinzant:15}, and over the last decade, several algorithms were developed to solve this inverse problem~\cite{CandesSV:13,DemanetH:14,CandesESV:15,WaldspurgerdAM:15,CandesLS:15,ChenC:17}. In the related setting where $V=\mathbb{R}^d$ and $G=\{\pm\operatorname{id}\}\leq\operatorname{O}(d)$, the analogous inverse problem is known as \textit{real phase retrieval}~\cite{BalanCE:06}. \end{example} \begin{example}[Matched filtering] In classical radar, the primary task is to locate a target. Here, a transmitter emits a pulse $p\in L^2(\mathbb{R})$, which then bounces off the target and is received at the transmitter's location with a known direction of arrival. The return signal $q$ is a noisy version of $T_ap$ for some $a>0$, where $T_a$ denotes the translation-by-$a$ operator defined by $T_af(t):=f(t-a)$. Considering the transmitter-to-target distance is $a/2$ times the speed of light, the objective is to estimate $a$, which can be accomplished with \textit{matched filtering}: simply find $a$ for which $\langle T_ap,q\rangle$ is largest. This is essentially a max filter with $V=L^2(\mathbb{R})$ and $G$ being the group of translation operators, though for this estimation problem, the object of interest is the maximizer $a$, not the maximum value $\llangle [p],[q]\rrangle$. Meanwhile, the maximum value is used for the detection problem of distinguishing noise from noisy versions of translates of $p$. (This accounts for half of the etymology of \textit{max filtering}.) \end{example} \begin{example}[Max pooling] In a convolutional neural network, it is common for a convolutional layer to be followed by a max pooling layer. Here, the convolutional layer convolves the input image with several localized templates, and then the max pooling layer downsamples each of the resulting convolutions by partitioning the scene into patches and recording the maximum value in each patch. In the extreme case where the max pooling layer takes the entire scene to be a single patch to maximize over, these layers implement a max filter bank in which $V$ is the image space and $G$ is the group of translation operators. (This accounts for the other half of the etymology of \textit{max filtering}.) \end{example} \section{The complexity of separating orbits} \label{sec.separating} For practical reasons, we are interested in orbit-separating invariants $\Phi\colon V\to\mathbb{R}^n$ of \textit{low complexity}, which we take to mean two different things simultaneously: \begin{itemize} \item[(i)] $n$ is small (i.e., the map has low \textit{sample complexity}), and \item[(ii)] one may evaluate $\Phi$ efficiently (i.e., the map has low \textit{computational complexity}). \end{itemize} While these notions of complexity are related, they impact the learning task in different ways. In what follows, we study both notions of complexity in the context of max filtering. \subsection{Generic templates separate orbits} In this subsection, we focus on the case in which $V=\mathbb{R}^d$ and $G\leq\operatorname{O}(d)$ is \textit{semialgebraic}, which we will define shortly. Every polynomial function $p\colon\mathbb{R}^n\to\mathbb{R}$ determines a basic semialgebraic set \[ \{x\in\mathbb{R}^n:p(x)\geq0\}. \] By closing under finite unions, finite intersections, and complementation, the basic semialgebraic sets of $\mathbb{R}^n$ generate an algebra of sets known as the \textbf{semialgebraic sets} in $\mathbb{R}^n$. A \textbf{semialgebraic subgroup} $G\leq\operatorname{GL}(d)$ is a subgroup of $\operatorname{GL}(d)$ that is also a semialgebraic set in $\mathbb{R}^{d\times d}$, e.g., $\operatorname{O}(d)$. A \textbf{semialgebraic function} is a function $f\colon\mathbb{R}^s\to\mathbb{R}^t$ for which the graph $\{(x,f(x)):x\in\mathbb{R}^s\}$ is a semialgebraic set in $\mathbb{R}^{s+t}$. \begin{lemma} \label{lem.max filtering is semialgebraic} For every semialgebraic subgroup $G\leq\operatorname{O}(d)$, the corresponding max filtering map $\llangle [\cdot],[\cdot]\rrangle\colon\mathbb{R}^d\times\mathbb{R}^d\to\mathbb{R}$ is semialgebraic. \end{lemma} \begin{proof} The graph of the max filtering map can be expressed in first-order logic: \[ \{(x,y,t)\in\mathbb{R}^d\times\mathbb{R}^d\times\mathbb{R}: (\forall g\in G,t\geq\langle x,gy\rangle) \wedge (\forall \epsilon\in\mathbb{R}, \exists g\in G,\epsilon>0\Rightarrow t-\epsilon<\langle x,gy\rangle)\}. \] (To be precise, one should replace our quantifiers over $G$ with the polynomial conditions that define the semialgebraic set $G$ to obtain a condition in first-order logic.) It follows from Proposition~2.2.4 in~\cite{BochnakCR:13} that the graph is semialgebraic. \end{proof} Every semialgebraic set $A$ can be decomposed as a disjoint union $A=\bigcup_i A_i$ of finitely many semialgebraic sets $A_i$, each of which is homeomorphic to an open hypercube $(0,1)^{d_i}$ (where $(0,1)^0$ is a point). The \textbf{dimension} of $A$ can be defined in terms of this decomposition as $\operatorname{dim}(A):=\max_i d_i$. (It does not depend on the decomposition.) \begin{definition} Given a semialgebraic subgroup $G\leq\operatorname{O}(d)$, we say the corresponding max filtering map $\llangle [\cdot],[\cdot]\rrangle\colon\mathbb{R}^d\times\mathbb{R}^d\to\mathbb{R}$ is $k$-\textbf{strongly separating} if for every $x,y\in\mathbb{R}^d$ with $[x]\neq[y]$, it holds that \[ \operatorname{dim}\big\{z\in\mathbb{R}^d:\llangle [z],[x]\rrangle=\llangle [z],[y]\rrangle\big\} \leq d-k. \] \end{definition} As an example, consider the case where $G=\operatorname{O}(d)$. Then $\llangle [z],[x]\rrangle=\llangle [z],[y]\rrangle$ holds precisely when $\|z\|\|x\|=\|z\|\|y\|$, i.e., $z=0$ or $[x]=[y]$. Thus, the max filtering map is $d$-strongly separating in this case. \begin{theorem} \label{thm.dym-gortler} Consider any semialgebraic subgroup $G\leq\operatorname{O}(d)$ with $k$-strongly separating max filtering map $\llangle [\cdot],[\cdot]\rrangle\colon\mathbb{R}^d\times\mathbb{R}^d\to\mathbb{R}$ for some $k\in\mathbb{N}$. For generic $z_1,\ldots,z_n\in\mathbb{R}^d$, the max filter bank $x\mapsto\{\llangle[z_i],[x]\rrangle\}_{i=1}^n$ separates $G$-orbits in $\mathbb{R}^d$ provided $n\geq 2d/k$. \end{theorem} In the $d$-strongly separating case where $G=\operatorname{O}(d)$, Theorem~\ref{thm.dym-gortler} implies that $n=2$ generic templates suffice to separate orbits. (Of course, any single nonzero template suffices in this case.) Theorem~\ref{thm.dym-gortler} is an improvement to Theorem~1.9 in~\cite{DymG:22}, which gives the condition $n\geq 2d+1$. We obtain the improvement $n\geq 2d/k$ by leveraging a more detailed notion of \textit{strongly separating}, as well as the positive homogeneity of max filtering. The proof makes use of a \textit{lift-and-project} technique that first appeared in~\cite{BalanCE:06} and was subsequently applied in~\cite{ConcaEHV:15,WangX:19,CahillCC:20,RongWX:21}. \begin{proof}[Proof of Theorem~\ref{thm.dym-gortler}] Fix $n\geq 2d$, and let $\mathcal{Z}\subseteq(\mathbb{R}^d)^n$ denote the set of $\{z_i\}_{i=1}^n$ for which the max filter bank $x\mapsto\{\llangle[z_i],[x]\rrangle\}_{i=1}^n$ fails to separate $G$-orbits in $\mathbb{R}^d$. We will show that $\mathcal{Z}$ is semialgebraic with dimension $\leq dn-1$, from which the result follows. To do so, observe that $\{z_i\}_{i=1}^n\in\mathcal{Z}$ precisely when there exists a \textit{witness}, namely, $(x,y)\in\mathbb{R}^d\times\mathbb{R}^d$ with $[x]\neq[y]$ such that $\llangle[z_i],[x]\rrangle=\llangle[z_i],[y]\rrangle$ for every $i\in\{1,\ldots,n\}$. In fact, we may assume that the witness $(x,y)$ satisfies $\|x\|^2+\|y\|^2=1$ without loss of generality since the set of witnesses for $\{z_i\}_{i=1}^n$ avoids $(0,0)$ and is closed under positive scalar multiplication by Lemma~\ref{lem.max filter product properties}(c). This suggests the following lift of $\mathcal{Z}$: \begin{align*} \mathcal{L} :=\Big\{~(\{z_i\}_{i=1}^n,(x,y))\in(\mathbb{R}^d)^n\times(\mathbb{R}^d)^2~: &~ [x]\neq[y], ~ \|x\|^2+\|y\|^2=1,\\ &~ \llangle[z_i],[x]\rrangle=\llangle[z_i],[y]\rrangle ~~\forall i\in\{1,\ldots,n\}~\Big\}. \end{align*} Since $G$ is semialgebraic, we have that $[x]\neq[y]$ is a semialgebraic condition. Furthermore, $\llangle[z_i],[x]\rrangle=\llangle[z_i],[y]\rrangle$ is a semialgebraic condition for each $i$ by Lemma~\ref{lem.max filtering is semialgebraic}. It follows that $\mathcal{L}$ is semialgebraic. Next, we define the projection maps $\pi_1\colon(\{z_i\}_{i=1}^n,(x,y))\mapsto \{z_i\}_{i=1}^n$ and $\pi_2\colon(\{z_i\}_{i=1}^n,(x,y))\mapsto (x,y)$. Then $\mathcal{Z}=\pi_1(\mathcal{L})$ is semialgebraic by Tarski--Seidenberg (Proposition~2.2.1 in~\cite{BochnakCR:13}). To bound the dimension of $\mathcal{Z}$, we first observe that \[ \pi_2^{-1}(x,y) =\big\{z\in\mathbb{R}^d:\llangle [z],[x]\rrangle=\llangle [z],[y]\rrangle\big\}^n \times\{(x,y)\}, \] and so $\operatorname{dim}(\pi_2^{-1}(x,y)) \leq n(d-k)$, since the max filtering map is $k$-strongly separating by assumption. We use the fact that $\pi_2(\mathcal{L})$ is contained in the unit sphere in $(\mathbb{R}^d)^2$ together with Lemma~1.10 in~\cite{DymG:22} to obtain \[ \operatorname{dim}(\mathcal{Z}) \leq\operatorname{dim}(\mathcal{L}) \leq\operatorname{dim}(\pi_2(\mathcal{L}))+\max_{x,y\in\mathbb{R}^d}\operatorname{dim}(\pi_2^{-1}(x,y)) \leq (2d-1)+n(d-k) \leq dn-1, \] where the last step is equivalent to the assumption $n\geq 2d/k$. \end{proof} \begin{corollary} \label{cor.2d templates suffice for finite groups} Consider any finite subgroup $G\leq\operatorname{O}(d)$. For generic $z_1,\ldots,z_n\in\mathbb{R}^d$, the max filter bank $x\mapsto\{\llangle[z_i],[x]\rrangle\}_{i=1}^n$ separates $G$-orbits in $\mathbb{R}^d$ provided $n\geq 2d$. \end{corollary} Corollary~\ref{cor.2d templates suffice for finite groups} follows immediately from Theorem~\ref{thm.dym-gortler} and the following lemma: \begin{lemma} For every finite subgroup $G\leq\operatorname{O}(d)$, the corresponding max filtering map $\llangle [\cdot],[\cdot]\rrangle\colon\mathbb{R}^d\times\mathbb{R}^d\to\mathbb{R}$ is $1$-strongly separating. \end{lemma} \begin{proof} Consider any $x,y\in\mathbb{R}^d$ with $[x]\neq[y]$. Then $\llangle [z],[x]\rrangle=\llangle [z],[y]\rrangle$ only if there exists $g\in G$ such that $\langle z,x\rangle=\langle z,gy\rangle$, i.e., $z\in\operatorname{span}\{x-gy\}^\perp$. Thus, \[ \big\{z\in\mathbb{R}^d:\llangle [z],[x]\rrangle=\llangle [z],[y]\rrangle\big\} \subseteq\bigcup_{g\in G}\operatorname{span}\{x-gy\}^\perp. \] Since the max filtering map is semialgebraic by Lemma~\ref{lem.max filtering is semialgebraic}, it follows that the left-hand set is also semialgebraic. Since $G$ is finite, the right-hand set is semialgebraic with dimension $d-1$, and the result follows. \end{proof} We would like to know if a version of Corollary~\ref{cor.2d templates suffice for finite groups} holds for all semialgebraic groups, but we do not have a proof of strongly separating for infinite groups in general. This motivates the following problem: \begin{problem}\ \label{prob.semialgebraic} \begin{itemize} \item[(a)] For which semialgebraic groups is the max filtering map $k$-strongly separating? \item[(b)] How many templates are needed to separate orbits for a given group? \end{itemize} \end{problem} We identify a couple of interesting instances of Problem~\ref{prob.semialgebraic}. First, we consider the case of complex phase retrieval (as in Example~\ref{ex.phase retrieval}), where $V=\mathbb{C}^r$ and $G$ is the center of $\operatorname{U}(r)$. It is known that $n=4r-4=2\operatorname{dim}(V)-4$ generic templates separate orbits for every $r$, and this is the optimal threshold for infinitely many $r$~\cite{ConcaEHV:15}, but there also exist $11$ templates in $\mathbb{C}^4$ that separate orbits, for example~\cite{Vinzant:15}. As another example, consider the case where $V=\mathbb{R}^d$ and $G\cong S_d$ is the group of $d\times d$ permutation matrices. Then Corollary~\ref{cor.2d templates suffice for finite groups} gives that $2d$ generic templates separate orbits. However, it is straightforward to see that the templates $z_j:=\sum_{i=1}^j e_i$ for $j\in\{1,\ldots,d\}$ also separate orbits, where $e_i$ denotes the $i$th standard basis element. Indeed, take $\operatorname{sort}(x)$ to have weakly decreasing entries. Then the first entry equals $\llangle [z_1],[x]\rrangle$, while for each $j>1$, the $j$th entry equals $\llangle [z_j],[x]\rrangle-\llangle [z_{j-1}],[x]\rrangle$. As such, this max filter bank determines $\operatorname{sort}(x)$, which is a separating invariant of $V/G$. Considering Theorem~\ref{thm.dym-gortler}, one might suspect that the max filtering map is $2$-strongly separating in this case, but this is not so. Indeed, the cone $C$ of sorted vectors in $\mathbb{R}^d$ has dimension~$d$, and so there exists a subspace $H$ of co-dimension~$1$ that intersects the interior of $C$. Select any $x$ in the interior of $C$ and any unit vector $v\in H^\perp$, and then take $y=x+\epsilon v$ for $\epsilon>0$ sufficiently small so that $y\in C$. Then \[ \big\{z\in\mathbb{R}^d:\llangle [z],[x]\rrangle=\llangle [z],[y]\rrangle\big\} \supseteq H\cap C, \] which is a semialgebraic set of dimension $d-1$, and so the claim follows. \subsection{Low-complexity max filtering} In this subsection, we focus on the case in which $V\cong\mathbb{R}^d$. Naively, one may compute the max filtering map $(x,y)\mapsto\llangle [x],[y]\rrangle$ over a finite group $G\leq\operatorname{O}(d)$ of order $m$ by computing $\langle x,gy\rangle$ for every $g\in G$ and then returning the maximum. This approach costs $O(md)$ operations. Of course, this is not possible when $G$ is infinite, and it is prohibitive when $G$ is finite but large. Interestingly, many of the groups that we encounter in practice admit a faster implementation. In particular, for many quotient spaces $V/G$, the quotient metric $d\colon V/G\times V/G\to\mathbb{R}$ is easy to compute, and by Lemma~\ref{lem.max filter product properties}(f), the max filtering map is equally easy to compute in these cases: \[ \llangle [x],[y]\rrangle =\frac{1}{2}\Big(d([x],[y])^2-\|x\|^2-\|y\|^2\Big). \] In this subsection, we highlight a few examples of such quotient spaces before considering the harder setting of graphs. \subsubsection{Point clouds} Consider $V=\mathbb{R}^{k\times n}$ with Frobenius inner product. We can represent a point cloud of $n$ points in $\mathbb{R}^k$ as a member of $V$ by arbitrarily labeling the points with column indices. In this setting, we identify members of $V$ that reside in a common $G$-orbit with $G\cong S_n$ permuting the columns. The resulting quotient metric is known as the \textit{$2$-Wasserstein distance}: \[ d([X],[Y]) =\min_{P\in\Pi(n)}\|X-YP\|_F, \] where $\Pi(n)$ denotes the set of $n\times n$ permutation matrices and $\|\cdot\|_F$ denotes the Frobenius norm. The corresponding max filtering map is then given by \[ \llangle [X],[Y]\rrangle =\max_{P\in\Pi(n)}\langle X,YP\rangle =\max_{P\in\Pi(n)}\operatorname{tr}(X^\top YP) =\max_{S\in\operatorname{conv}\Pi(n)}\operatorname{tr}(X^\top YS), \] where $\operatorname{conv}\Pi(n)$ denotes the convex hull of $\Pi(n)$, namely, the doubly stochastic matrices. By this formulation, the max filtering map can be computed in polynomial time by linear programming. In the special case where $k=1$, the max filtering map has an even faster implementation: \[ \llangle [X],[Y]\rrangle =\langle \operatorname{sort}(X), \operatorname{sort}(Y) \rangle, \] which can be computed in linearithmic time. \subsubsection{Circular translations} Consider the case where $V\cong\mathbb{R}^n$ is the space of vectors with entries indexed by the cyclic group $C_n:=\mathbb{Z}/n\mathbb{Z}$, and $G\cong C_n$ is the group of circular translations $T_a$ defined by $T_af(x):=f(x-a)$. Then the max filtering map is given by \[ \llangle [f],[g]\rrangle =\max_{a\in C_n}\langle f,T_ag\rangle =\max_{a\in C_n}\sum_{x\in C_n}f(x)g(x-a) =\max_{a\in C_n}(f\star Rg)(a), \] where $\star$ denotes the circular convolution and $R$ denotes the reversal operator. Thus, the max filtering map can be computed in linearithmic time with the help of the fast Fourier transform. \subsubsection{Shape analysis} In geometric morphometics~\cite{MitteroeckerG:09}, it is common for data to take the form of a sequence of $n$ landmarks in $\mathbb{R}^k$ (where $k$ is typically $2$ or $3$) with a global rotation ambiguity. This corresponds to taking $V=\mathbb{R}^{k\times n}$ and $G\cong\operatorname{O}(k)$ acting on the left, and so the max filtering map is given by \[ \llangle [X],[Y]\rrangle =\max_{R\in\operatorname{O}(k)}\langle X,RY\rangle =\max_{R\in\operatorname{O}(k)}\operatorname{tr}(RYX^\top) =\|YX^\top\|_*, \] where $\|\cdot\|_*$ denotes the nuclear norm. As such, the max filtering map can be computed in polynomial time with the aid of the singular value decomposition. \subsubsection{Separation hierarchy for weighted graphs} Here, we focus on the case in which $V$ is the vector space of real symmetric $n\times n$ matrices with zero diagonal and $G\cong S_n$ is the group of linear isometries of the form $A\mapsto PAP^{-1}$, where $P$ is a permutation matrix. We think of $V/G$ as the space of weighted graphs on $n$ vertices (up to isomorphism). One popular approach for separating graphs uses \textit{message-passing graph neural networks}, but the separation power of such networks is limited by the so-called \textit{Weisfeler--Lehman test}~\cite{WeisfeilerA:68,XuHLJ:19,MorrisRFHLRG:19}. For example, message-passing graph neural networks fail to distinguish $C_3\cup C_3$ from $C_6$. See~\cite{Sato:20,HuangV:21} for surveys of this rapidly growing literature. As an alternative, we consider a max filtering approach. Given two adjacency matrices $A_1$ and $A_2$, Lemma~\ref{lem.max filter product properties}(f) implies that the corresponding graphs are isomorphic if and only if \[ \|A_1\|_F^2=\llangle [A_1],[A_2]\rrangle=\|A_2\|_F^2. \] As such, max filtering is graph isomorphism--hard in this setting. Interestingly, there exist $A\in V$ for which the map $X\mapsto\llangle [A],[X]\rrangle$ can be computed in linearithmic time, and furthermore, these easy-to-compute max filters help with separating orbits. To see this, we follow~\cite{AlonYZ:95}, which uses \textit{color coding} to facilitate computation by dynamic programming. \begin{definition} A tuple $\{f_i\}_{i=1}^N$ with $f_i\colon[n]\to[k]$ for each $i\in [N]$ is an \textbf{$(n,k)$-color coding} if $n\geq k$ and for every $S\subseteq[n]$ of cardinality $k$, there exists $i\in [N]$ such that $f_i(S)=[k]$. \end{definition} \begin{lemma} \label{lem.color coding size} Given $n,k\in\mathbb{N}$ with $n\geq k$, there exists an $(n,k)$-color coding of size $\lceil ke^k\log n\rceil$. \end{lemma} \begin{proof} We show that $N$ random colorings form a color coding with positive probability. For each $i\in [N]$ and $S\in\binom{[n]}{k}$, we have $\mathbb{P}\{f_i(S)=[k]\}=k!/k^k$, and so the union bound gives \[ \mathbb{P}\Big\{\{f_i\}_{i=1}^N\text{ is not $(n,k)$-color coding}\Big\} \leq\sum_{S\in\binom{[n]}{k}} \prod_{i=1}^N\mathbb{P}\{f_i(S)\neq [k]\} =\binom{n}{k}\bigg(1-\frac{k!}{k^k}\bigg)^{N}. \] It suffices to select $N$ so that the right-hand side is strictly smaller than $1$. The result follows by applying the bounds $\binom{n}{k}\leq n^k$, $k!\geq (k/e)^k$, and $(1-1/t)^t<1/e$ for $t>1$: \[ \tbinom{n}{k}(1-\tfrac{k!}{k^k})^{N} \leq n^k(1-e^{-k})^{e^k e^{-k}N} < e^{k\log n-e^{-k}N}, \] which is at most $1$ when $N\geq ke^k\log n$. \end{proof} \begin{algorithm}[t] \SetAlgoLined \KwData{Weighted tree with vertex set $[k]$ (in post-order traversal order) and with adjacency matrix $A\in\mathbb{R}^{k\times k}$, and $(n,k)$-color coding $\{f_i\}_{i=1}^N$} \KwIn{Weighted graph with adjacency matrix $B\in\mathbb{R}^{n\times n}$} \KwResult{Max filter $\llangle [\tilde{A}],[B]\rrangle$, where $\tilde{A}:=[\begin{smallmatrix} A&0\\0&0\end{smallmatrix}]\in\mathbb{R}^{n\times n}$} \For{$i\in[N]$ and $\pi\in S_k$}{ Initialize $\ell(v)\leftarrow0$ for all $v\in [n]$\\ \For{$u\in[k-1]$}{ Take the unique vertex $u' \in \{u+1,\ldots,k\}$ adjacent to the leaf $u$ in $H-[u-1]$\\ \For{$v'\in (\pi\circ f_i)^{-1}(u')$}{ $\ell(v')\leftarrow\ell(v')+\max_{v\in (\pi\circ f_i)^{-1}(u)}(\ell(v)+A_{u,u'}B_{v,v'})$\\ } } Put $s(i,\pi):=\max_{v\in (\pi\circ f_i)^{-1}(k)}\ell(v)$\\ } Output $\max_{i\in[N],\pi\in S_k}s(i,\pi)$\\ \caption{Max filtering with a weighted tree template by color coding \label{alg.color_coded_max_filter}} \end{algorithm} Algorithm~\ref{alg.color_coded_max_filter} computes the max filter with a small weighted tree using a color coding and dynamic programming. Lemma~\ref{lem.color coding size} implies that Algorithm~\ref{alg.color_coded_max_filter} has runtime $e^{O(k\log k)}n^2\log n$, which is linearithmic in the size of the data when $k$ is fixed. Notice that max filtering with the path on $k=4$ vertices already separates the graphs $C_3\cup C_3$ and $C_6$. Furthermore, using techniques from~\cite{AlonYZ:95}, one can modify Algorithm~\ref{alg.color_coded_max_filter} to max filter with \textit{any} template graph $H$ on $k$ vertices, though the runtime becomes $e^{O(k\log k)}n^{t+1}\log n$, where $t$ is the \textit{treewidth} of $H$. Letting $\mathcal{H}(k,t)$ denote the set of weighted graphs on at most $k$ vertices with treewidth at most $t$, we have the following hierarchy: \begin{equation} \label{eq.hierarchy} \begin{array}{ccccccccc} \mathcal{H}(n,1)&\subseteq&\cdots&\subseteq&\mathcal{H}(n,n-1)\\ \vsubseteq\\ \vdots&&\iddots\\ \vsubseteq\\ \mathcal{H}(2,1) \end{array} \end{equation} Corollary~\ref{cor.2d templates suffice for finite groups} gives that $n(n-1)$ generic templates from $\mathcal{H}(n,n-1)$ separate all isomorphism classes of weighted graphs on $n$ vertices. It would be interesting to study the separation power of templates of logarithmic order and bounded treewidth. \section{Stability of max filtering} \label{sec.lipschitz} \subsection{Bilipschitz max filter banks} Upper and lower Lipschitz bounds are used to quantify the stability of a mapping between metric spaces, but it is generally difficult to estimate such bounds; see~\cite{BandeiraCMN:14,BalanW:15,CahillCD:16,IwenMP:19,BalanD:21} for examples from phase retrieval and~\cite{ZouBS:19,CahillCC:20,CahillCC:arxiv,BalanHS:22} for other examples. In this subsection, we prove the following: \begin{theorem} \label{thm.main result} Fix a finite group $G\leq\operatorname{O}(d)$ of order $m$ and select \[ n\geq 12m^2d\log(\tfrac{2}{\delta}+1), \qquad \delta :=(\tfrac{\pi}{128m^4}\cdot\tfrac{1}{2d+3\log(4m^2)})^{1/2}. \] Draw independent random vectors $z_1,\ldots,z_n\sim\mathsf{Unif}(S^{d-1})$. With probability $\geq1-e^{-n/(12m^2)}$, it holds that the max filter bank $\Phi\colon\mathbb{R}^d/G\to\mathbb{R}^n$ with templates $\{z_i\}_{i=1}^n$ has lower Lipschitz bound $\delta$ and upper Lipschitz bound $n^{1/2}$. \end{theorem} This result distinguishes max filtering from separating polynomial invariants, which do not necessarily enjoy upper or lower Lipschitz bounds~\cite{CahillCC:20}. In Theorem~\ref{thm.main result}, we may take the embedding dimension to be $n=\Theta^*(m^2d)$ with bilipschitz bounds $\Theta^*(\frac{1}{m^2d^{1/2}})$ and $\Theta^*(md^{1/2})$, where $\Theta^*(\cdot)$ suppresses logarithmic factors. For comparison, we consider a couple of cases that have already been studied in the literature. First, the case where $G=\{\pm\operatorname{id}\}$ reduces to the setting of \textit{real phase retrieval} (as in Example~\ref{ex.phase retrieval}), where it is known that there exist $n=\Theta(d)$ templates that deliver lower- and upper-Lipschitz bounds $\frac{1}{4}$ and $4$, say; see equation~(17) in~\cite{BandeiraCMN:14}. Notably, these bounds do not get worse as $d$ gets large. It would be interesting if a version of Theorem~\ref{thm.main result} held for infinite groups, but we do not expect it to hold for infinite-dimensional inner product spaces. Case in point, for $V=\ell^2$ with $G=\{\pm\operatorname{id}\}$, it was shown in~\cite{CahillCD:16} that for every choice of templates, the map is \textit{not} bilipschitz. Another interesting phenomenon from finite-dimensional phase retrieval is that separating implies bilipschitz; see Lemma~16 and Theorem~18 in~\cite{BandeiraCMN:14} and Proposition~1.4 in~\cite{CahillCD:16}. This suggests the following: \begin{problem} \label{prob.sep implies bilip} Is every separating max filter bank $\Phi\colon \mathbb{R}^d/G\to\mathbb{R}^n$ bilipschitz? \end{problem} If the answer to Problem~\ref{prob.sep implies bilip} is ``yes,'' then Corollary~\ref{cor.2d templates suffice for finite groups} implies that $2d$ generic templates produce a bilipschitz max filter bank $\Phi\colon\mathbb{R}^d/G\to\mathbb{R}^{2d}$ whenever $G\leq\operatorname{O}(d)$ is finite. Theorem~\ref{thm.main result} follows immediately from Lemmas~\ref{lem.bilipschitz no randomness} and~\ref{lem.random vectors have projective uniformity} below. Our proof uses the following notion that was introduced in~\cite{AlexeevBFM:14}. We say $\{z_i\}_{i=1}^n\in(\mathbb{R}^d)^n$ exhibits $(k,\delta)$-\textbf{projective uniformity} if \[ s_k\{|\langle z_i,x\rangle|\}_{i=1}^n \geq\delta\|x\| \] for every $x\in\mathbb{R}^d$, where $s_k\colon\mathbb{R}^n\to\mathbb{R}$ returns the $k$th smallest entry of the input. In what follows, we denote $\|\{z_i\}_{i=1}^n\|_F:=(\sum_{i=1}^n\|z_i\|^2)^{1/2}$. \begin{lemma} \label{lem.bilipschitz no randomness} Fix a finite subgroup $G\leq\operatorname{O}(d)$ and suppose $\{z_i\}_{i=1}^n\in(\mathbb{R}^d)^n$ exhibits $(\lceil\frac{n}{|G|^2}\rceil,\delta)$-projective uniformity. Then the max filter bank $\Phi\colon\mathbb{R}^d/G\to\mathbb{R}^n$ with templates $\{z_i\}_{i=1}^n$ has lower Lipschitz bound $\delta$ and upper Lipschitz bound $\|\{z_i\}_{i=1}^n\|_F$. \end{lemma} \begin{proof} The upper Lipschitz bound follows from Lemma~\ref{lem.max filter product properties}(g): \[ \|\Phi([x])-\Phi([y])\|^2 =\sum_{i=1}^n|\llangle [z_i],[x]\rrangle-\llangle [z_i],[y]\rrangle|^2 \leq\|\{z_i\}_{i=1}^n\|_F^2\cdot d([x],[y])^2. \] For the lower Lipschitz bound, fix $x,y\in\mathbb{R}^d$ with $[x]\neq[y]$, and then for each $i\in\{1,\ldots,n\}$, select $g_i,h_i\in G$ such that $\llangle [z_i],[x]\rrangle=\langle z_i,g_ix\rangle$ and $\llangle [z_i],[y]\rrangle=\langle z_i,h_iy\rangle$. Then \begin{equation} \label{eq.to lower bound} \|\Phi([x])-\Phi([y])\|^2 =\sum_{i=1}^n\langle z_i,g_ix-h_iy\rangle^2 \geq\bigg(\sum_{i=1}^n\langle z_i,\tfrac{g_ix-h_iy}{\|g_ix-h_iy\|}\rangle^2\bigg)\cdot d([x],[y])^2, \end{equation} where the inequality follows from the bound $\|g_ix-h_iy\|\geq d([x],[y])$. Next, consider the map $p\colon i\mapsto (g_i,h_i)$, and select $(g,h)\in G^2$ with the largest preimage. By pigeonhole, we have $|p^{-1}(g,h)|\geq\lceil\frac{n}{|G|^2}\rceil=:k$, and so \[ \sum_{i=1}^n\langle z_i,\tfrac{g_i x-h_i y}{\|g_i x-h_i y\|}\rangle^2 \geq \max_{i\in p^{-1}(g,h)}\langle z_i,\tfrac{g x-h y}{\|g x-h y\|}\rangle^2 \geq s_k\{\langle z_i,\tfrac{g x-h y}{\|g x-h y\|}\rangle^2\}_{i=1}^n \geq \delta^2. \] Combining with \eqref{eq.to lower bound} gives the result. \end{proof} The following lemma gives that random templates exhibit projective uniformity. \begin{lemma}[cf.\ Lemma~6.9 in~\cite{AlexeevBFM:14}] \label{lem.random vectors have projective uniformity} Select $p\in(0,1)$ and take \begin{equation} \label{eq.choice for delta} \delta :=(\tfrac{\pi}{128}\cdot\tfrac{p^2}{2d+3\log(4/p)})^{1/2}, \qquad n\geq\tfrac{12d}{p}\log(\tfrac{2}{\delta}+1). \end{equation} Draw independent random vectors $z_1,\ldots,z_n\sim\mathsf{Unif}(S^{d-1})$. Then $\{z_i\}_{i=1}^n$ exhibits $(\lceil pn\rceil,\delta)$-projective uniformity with probability $\geq1-e^{-pn/12}$. \end{lemma} \begin{proof} Put $k:=\lceil pn\rceil$, let $\mathcal{E}$ denote the failure event that $\{z_i\}_{i=1}^n$ does not have $(k,\delta)$-projective uniformity, and let $N_\delta$ denote a $\delta$-net of $S^{d-1}$ of minimum size. Note that if $v$ is within $\delta$ of $x$, then for every $z_i$, it holds that \[ |\langle z_i,v\rangle| \leq |\langle z_i,x\rangle|+|\langle z_i,v-x\rangle| \leq |\langle z_i,x\rangle|+\|v-x\| \leq |\langle z_i,x\rangle|+\delta. \] Thus, we may pass to the $\delta$-net to get \begin{align*} \mathbb{P}(\mathcal{E}) &=\mathbb{P}\Big\{\text{ $\exists x\in S^{d-1}$ s.t.\ $s_k\{|\langle z_i,x\rangle|\}_{i=1}^n<\delta$ }\Big\}\\ &\leq\mathbb{P}\Big\{\text{ $\exists v\in N_\delta$ s.t.\ $s_k\{|\langle z_i,v\rangle|\}_{i=1}^n<2\delta$ }\Big\}\\ &\leq |N_\delta|\cdot\mathbb{P}\Big\{s_k\{|\langle z_i,e_1\rangle|\}_{i=1}^n<2\delta\Big\} =|N_\delta|\cdot\mathbb{P}\Big\{\sum_{i=1}^n\mathbf{1}_{\{|\langle z_i,e_1\rangle|<2\delta\}}\geq k\Big\}, \end{align*} where the second inequality applies the union bound and the rotation invariance of the distribution $\mathsf{Unif}(S^{d-1})$. A standard volume comparison argument gives $|N_\delta|\leq(\frac{2}{\delta}+1)^d$. The final probability concerns a sum of independent Bernoulli variables with some success probability $q=q(d,\delta)$, which can be estimated using the multiplicative Chernoff bound: \[ \mathbb{P}\Big\{\sum_{i=1}^n\mathbf{1}_{\{|\langle z_i,e_1\rangle|<2\delta\}}\geq k\Big\} \leq \mathbb{P}\Big\{\sum_{i=1}^n\mathbf{1}_{\{|\langle z_i,e_1\rangle|<2\delta\}}\geq pn\Big\} \leq \exp(-\tfrac{(p-q)^2}{p+q}\cdot n), \] provided $p>q$. Next, we verify that $q(d,\delta)\leq\frac{p}{2}$. Denoting $g\sim\mathsf{N}(0,I_d)$, we have \begin{align*} q :=\mathbb{P}\{|\langle \tfrac{g}{\|g\|},e_1\rangle|<2\delta\} &\leq \inf_{t>0}\Big(\mathbb{P}\{|\langle g,e_1\rangle|<2\delta t\}+\mathbb{P}\{\|g\|^2>t^2\}\Big)\\ &\leq \inf_{t\geq\sqrt{2d}}\Big(\sqrt{\tfrac{2}{\pi}}\cdot2\delta t+e^{-(t^2-2d)/3}\Big), \end{align*} where the final inequality uses the facts that $|\langle g,e_1\rangle|$ has half-normal distribution and $\|g\|^2$ has chi-squared distribution with $d$ degrees of freedom. We select $t:=(2d+3\log(\frac{4}{p}))^{1/2}$ so that the second term equals $\frac{p}{4}$, and then our choice \eqref{eq.choice for delta} for $\delta$ ensures that the first term equals $\frac{p}{4}$. Overall, we have \begin{align*} \mathbb{P}(\mathcal{E}) &\leq |N_\delta|\cdot\mathbb{P}\Big\{\sum_{i=1}^n\mathbf{1}_{\{|\langle z_i,e_1\rangle|<2\delta\}}\geq k\Big\}\\ &\leq (\tfrac{2}{\delta}+1)^d\cdot \exp(-\tfrac{(p-q)^2}{p+q}\cdot n) \leq \exp(d\log(\tfrac{2}{\delta}+1)-\tfrac{pn}{6}) \leq e^{-pn/12}, \end{align*} where the last step applied our assumption that $n\geq\frac{12d}{p}\log(\frac{2}{\delta}+1)$. \end{proof} \subsection{Mallat-type stability to diffeomorphic distortion} In this subsection, we focus on the case in which $V=L^2(\mathbb{R}^d)$ and $G$ is the group of translation operators $T_a$ defined by $T_af(x):=f(x-a)$ for $a\in \mathbb{R}^d$. Given a template $h\in L^2(\mathbb{R}^d)$, the corresponding max filter is \[ \llangle [h],[f]\rrangle =\sup_{a\in\mathbb{R}^d}\langle h,T_af \rangle =\sup_{a\in\mathbb{R}^d}\int_{\mathbb{R}^d}h(x)f(x-a)dx =\sup_{a\in\mathbb{R}^d}(Rh\star f)(a), \] where $R$ denotes the reversal operator defined by $Rh(x):=h(-x)$ and $\star$ denotes convolution. (Of course, the supremum of a member of $L^2(\mathbb{R}^d)$ is not well defined, but $Rh\star f$ is continuous since $Rh, f\in L^2(\mathbb{R}^d)$.) Our motivation for this setting stems from image analysis, in which case $d=2$. For a familiar example, consider the task of classifying handwritten digits. Intuitively, each class is translation invariant, and so it makes sense to treat images as members of $V/G$. In addition, images that are slight elastic distortions of each other should be sent to nearby points in the feature domain. The fact that image classification is invariant to such distortions has been used to augment the MNIST training set and boost classification performance~\cite{SimardSP:03}. Instead of using data augmentation to learn distortion-invariant features, it is desirable to restrict to feature maps that already exhibit distortion invariance. (Indeed, such feature maps would require fewer parameters to train.) This compelled Mallat to introduce his \textit{scattering transform}~\cite{Mallat:12}, which has since played an important role in the theory of invariant machine learning~\cite{BrunaM:11,BrunaM:13,Waldspurger:17,GaoWH:19,PerlmutterGWH:19}. Mallat used the following formalism to analyze the stability of the scattering transform to distortion. Given a diffeomorphism $g\in C^1(\mathbb{R}^d)$, we consider the corresponding distortion operator $L_g$ defined by $L_gf(x):=f(g^{-1}(x))$. It will be convenient to interact with the vector field $\tau:=\operatorname{id}-g^{-1}\in C^1(\mathbb{R}^d)$, since $L_gf(x)=f(x-\tau(x))$. For example, if $\tau(x)=a$ for every $x\in\mathbb{R}^d$, then $L_g$ is translation by $a$. In what follows, $J\tau(x)\in\mathbb{R}^{d\times d}$ denotes the Jacobian matrix of $\tau$ at $x$. \begin{theorem} \label{thm.mallat bound} Take any continuously differentiable $h\in L^2(\mathbb{R}^d)$ for which \begin{equation} \label{eq.bounded decay} (1+\|x\|_2)^{(d+1)/2} \cdot h(x) \qquad \text{and} \qquad (1+\|x\|_2)^{(d+3)/2}\cdot\nabla h(x) \end{equation} are bounded. There exists $C(h)>0$ such that for every $f\in L^2(\mathbb{R}^d)$ and every diffeomorphism $g\in C^1(\mathbb{R}^d)$ for which $\tau:=\operatorname{id}-g^{-1}$ satisfies $\sup_{x\in\mathbb{R}^d}\|J\tau(x)\|_{2\to2}\leq\frac{1}{2}$, it holds that \[ |\llangle[h],[f]\rrangle-\llangle[h],[L_gf]\rrangle| \leq C(h)\cdot\|f\|_{L^2(\mathbb{R}^d)}\cdot\sup_{x\in\mathbb{R}^d}\|J\tau(x)\|_{2\to2}. \] \end{theorem} This matches Mallat's bound~\cite{Mallat:12} on the stability of the scattering transform to diffeomorphic distortion, except our bound has no Hessian term. The proof of Theorem~\ref{thm.mallat bound} follows almost immediately from the following modification of Lemma~E.1 in~\cite{Mallat:12}, which bounds the commutator between the filter and the distortion by the magnitude of the distortion: \begin{lemma} \label{lem.commutator bound} Take $h\in L^2(\mathbb{R}^d)$ as in Theorem~\ref{thm.mallat bound}, and consider the linear operator $Z_h$ defined by $Z_hf:=h\star f$. There exists $C(h)>0$ such that for every diffeomorphism $g\in C^1(\mathbb{R}^d)$ for which $\tau:=\operatorname{id}-g^{-1}$ satisfies $\sup_{x\in\mathbb{R}^d}\|J\tau(x)\|_{2\to2}\leq\frac{1}{2}$, it holds that \[ \|L_gZ_h-Z_hL_g\|_{L^2(\mathbb{R}^d)\to L^\infty(\mathbb{R}^d)} \leq C(h)\cdot\sup_{x\in\mathbb{R}^d}\|J\tau(x)\|_{2\to2}. \] \end{lemma} Assuming Lemma~\ref{lem.commutator bound} for the moment, we can prove Theorem~\ref{thm.mallat bound}. \begin{proof}[Proof of Theorem~\ref{thm.mallat bound}] The change of variables $a=g^{-1}(a')$ gives \begin{align*} |\llangle[h],[f]\rrangle-\llangle[h],[L_gf]\rrangle| &=\Big|\sup_{a\in\mathbb{R}^d}(Z_{Rh}f)(a)-\sup_{b\in\mathbb{R}^d}(Z_{Rh}L_gf)(b)\Big|\\ &=\Big|\sup_{a'\in\mathbb{R}^d}(L_gZ_{Rh}f)(a')-\sup_{b\in\mathbb{R}^d}(Z_{Rh}L_gf)(b)\Big|\\ &\leq\|L_gZ_{Rh}f-Z_{Rh}L_gf\|_{L^\infty(\mathbb{R}^d)}\\ &\leq\|L_gZ_{Rh}-Z_{Rh}L_g\|_{L^2(\mathbb{R}^d)\to L^\infty(\mathbb{R}^d)}\cdot\|f\|_{L^2(\mathbb{R}^d)}, \end{align*} and so the result follows from Lemma~\ref{lem.commutator bound}. \end{proof} The rest of this section proves Lemma~\ref{lem.commutator bound}. Our proof follows some of the main ideas in the proof of Lemma~E.1 in~\cite{Mallat:12}. \begin{proof}[Proof of Lemma~\ref{lem.commutator bound}] Denote $K:=Z_h-L_gZ_hL_g^{-1}$. Then $L_gZ_h-Z_hL_g=-KL_g$, and so \[ \|L_gZ_h-Z_hL_g\|_{L^2(\mathbb{R}^d)\to L^\infty(\mathbb{R}^d)} =\|KL_g\|_{L^2(\mathbb{R}^d)\to L^\infty(\mathbb{R}^d)} \leq\|K\|_{L^2(\mathbb{R}^d)\to L^\infty(\mathbb{R}^d)}\|L_g\|_{L^2(\mathbb{R}^d)\to L^2(\mathbb{R}^d)}. \] We first bound the second factor. For $f\in L^2(\mathbb{R}^d)$, a change of variables gives \[ \|L_gf\|_{L^2(\mathbb{R}^d)}^2 =\int_{\mathbb{R}^d}f(g^{-1}(x))^2dx =\int_{\mathbb{R}^d}f(u)^2|\operatorname{det}(Jg(u))|du \leq\sup_{u\in\mathbb{R}^d}|\operatorname{det}(Jg(u))|\cdot\|f\|_{L^2(\mathbb{R}^d)}^2. \] For $x\in\mathbb{R}^d$, the fact that $\|J\tau(x)\|_{2\to2}\leq\frac{1}{2}$ implies \[ \operatorname{det}(Jg^{-1}(x)) =\operatorname{det}(I_d-J\tau(x)) \geq(1-\|J\tau(x)\|_{2\to2})^d \geq 2^{-d}, \] and so combining with the above estimate gives \begin{equation} \label{eq.bound on L_g} \|L_g\|_{L^2(\mathbb{R}^d)\to L^2(\mathbb{R}^d)} \leq\sup_{u\in\mathbb{R}^d}|\operatorname{det}(Jg(u))| =\sup_{x\in\mathbb{R}^d}|\operatorname{det}(Jg^{-1}(x))|^{-1} \leq 2^d. \end{equation} It remains to bound $\|K\|_{L^2(\mathbb{R}^d)\to L^\infty(\mathbb{R}^d)}$. To this end, one may verify that $K$ can be expressed as $Kf(x)=\int_{\mathbb{R}^d}k(x,u)f(u)du$, where the kernel $k$ is defined by \[ k(x,u) :=h(x-u)-\operatorname{det}(I_d-J\tau(u))\cdot h(x-u-\tau(x)+\tau(u)). \] We will bound the $L^2$ norms of every $k(x,\cdot)$ and $k(\cdot,u)$, and then appeal to Young's inequality for integral operators to bound $\|K\|_{L^2(\mathbb{R}^d)\to L^\infty(\mathbb{R}^d)}$. We decompose $k=k_1+k_2+k_3$, where \begin{align*} k_1(x,u) &:=h(x-u)-h\big((I_d-J\tau(u))(x-u)\big),\\ k_2(x,u) &:=\big(1-\operatorname{det}(I_d-J\tau(u))\big)\cdot h\big((I_d-J\tau(u))(x-u)\big),\\ k_3(x,u) &:=\operatorname{det}(I_d-J\tau(u))\cdot\Big(h\big((I_d-J\tau(u))(x-u)\big)-h(x-u-\tau(x)+\tau(u))\Big). \end{align*} First, we analyze $k_1$. Letting $p_1\colon[0,1]\to\mathbb{R}^d$ denote the parameterized line segment of constant velocity from $(I_d-J\tau(u))(x-u)$ to $x-u$, we have \[ k_1(x,u) =\int_0^1\nabla h(p_1(t))\cdot J\tau(u)(x-u)~dt, \] and so \begin{align} \nonumber |k_1(x,u)| &\leq \sup_{t\in[0,1]}\|\nabla h(p_1(t))\|_2\cdot\|J\tau(u)(x-u)\|_2\\ \label{eq.k1 bound} &\leq \sup_{t\in[0,1]}\|\nabla h(p_1(t))\|_2\cdot\sup_{z\in\mathbb{R}^d}\|J\tau(z)\|_{2\to2}\cdot\|x-u\|_2. \end{align} To bound the first factor, let $C_\infty(h)>0$ denote a simultaneous bound on the absolute value and $2$-norm of \eqref{eq.bounded decay}. To use this, we bound $\inf_{t\in[0,1]}\|p_1(t)\|_2$ from below: \begin{align*} \|p_1(t)\|_2 =\|x-u+tJ\tau(u)(x-u)\|_2 &\geq\|x-u\|_2-t\|J\tau(u)(x-u)\|_2\\ &\geq(1-t\sup_{z\in\mathbb{R}^d}\|J\tau(u)\|_{2\to2})\cdot\|x-u\|_2 \geq\tfrac{1}{2}\|x-u\|_2. \end{align*} Then \[ \sup_{t\in[0,1]}\|\nabla h(p_1(t))\|_2 \leq\sup_{t\in[0,1]}\frac{C_\infty(h)}{(1+\|p_1(t)\|_2)^{(d+3)/2}} \leq\frac{C_\infty(h)}{(1+\frac{1}{2}\|x-u\|_2)^{(d+3)/2}}, \] which allows us to further bound \eqref{eq.k1 bound}: \begin{equation} \label{eq.final bound on k1} |k_1(x,u)| \leq \frac{C_\infty(h)}{(1+\frac{1}{2}\|x-u\|_2)^{(d+3)/2}}\cdot\sup_{z\in\mathbb{R}^d}\|J\tau(z)\|_{2\to2}\cdot\|x-u\|_2. \end{equation} Next, we analyze $k_2$. Since $\|J\tau(x)\|_{2\to2}\leq\frac{1}{2}$ by assumption, Bernoulli's inequality gives \[ 1-\operatorname{det}(I_d-J\tau(x)) \leq 1-(1-\|J\tau(x)\|_{2\to2})^d \leq d\|J\tau(x)\|_{2\to2} \leq d\sup_{z\in\mathbb{R}^d}\|J\tau(z)\|_{2\to2}. \] Also, the convexity bound $(1+t)^d\leq 1+(2^d-1)t$ for $t\in[0,1]$ implies \begin{align*} 1-\operatorname{det}(I_d-J\tau(x)) &\geq1-\|I_d-J\tau(x)\|_{2\to2}^d\\ &\geq1-(1+\|J\tau(x)\|_{2\to2})^d \geq-2^d\sup_{z\in\mathbb{R}^d}\|J\tau(z)\|_{2\to2}. \end{align*} Furthermore, we have \[ \|(I_d-J\tau(x))(x-u)\|_2 \geq\|x-u\|_2-\|J\tau(u)\|_{2\to2}\|x-u\|_2 \geq\tfrac{1}{2}\|x-u\|_2, \] and so \begin{align} \nonumber |k_2(x,u)| &\leq \big|1-\operatorname{det}(I_d-J\tau(u))\big|\cdot \big|h\big((I_d-J\tau(u))(x-u)\big)\big|\\ \label{eq.final bound on k2} &\leq 2^d\sup_{z\in\mathbb{R}^d}\|J\tau(z)\|_{2\to2}\cdot \frac{C_\infty(h)}{(1+\frac{1}{2}\|x-u\|_2)^{(d+1)/2}}. \end{align} Finally, we analyze $k_3$. Put \[ r :=x-u-\tau(x)+\tau(u), \qquad s :=\tau(x)-\tau(u)-J\tau(u)(x-u). \] Then letting $p_2\colon[0,1]\to\mathbb{R}^d$ denote the parameterized line segment of constant velocity from $r$ to $r+s$, we have \[ k_3(x,u) =\operatorname{det}(I_d-J\tau(u))\cdot\big(h(r+s)-h(r)\big) =\operatorname{det}(I_d-J\tau(u))\int_0^1\nabla h(p_2(t))\cdot s~dt, \] and so \begin{equation} \label{eq.bound on k3} |k_3(x,u)| \leq|\operatorname{det}(I_d-J\tau(u))|\cdot\sup_{t\in[0,1]}\|\nabla h(p_2(t))\|_2\cdot \|s\|_2. \end{equation} For the first factor of \eqref{eq.bound on k3}, we have $|\operatorname{det}(I_d-J\tau(u))|\leq\|I_d-J\tau(u)\|_{2\to2}^d\leq (3/2)^d$. To bound the second factor of \eqref{eq.bound on k3}, we use our bound $C_\infty(h)>0$ on \eqref{eq.bounded decay}. To do so, we bound $\inf_{t\in[0,1]}\|p_2(t)\|_2$ from below. First, we note that \[ \tau(x)-\tau(u) =\int_0^1J\tau(p_3(t))(x-u)dt, \] where $p_3\colon[0,1]\to\mathbb{R}^d$ is the parameterized line segment of constant velocity from $u$ to $x$. Thus, \begin{equation} \label{eq.bound diff of taus} \|\tau(x)-\tau(u)\|_2 \leq\int_0^1 \|J\tau(p_3(t))\|_{2\to2}\|x-u\|_2dt \leq\sup_{z\in\mathbb{R}^d}\|J\tau(z)\|_{2\to2}\cdot\|x-u\|_2, \end{equation} and so for each $t\in[0,1]$, we have \begin{align*} \|p_2(t)\|_2 =\|r+ts\|_2 &=\|(x-u)-(1-t)(\tau(x)-\tau(u))-tJ\tau(u)(x-u)\|_2\\ &\geq\|x-u\|_2-(1-t)\|\tau(x)-\tau(u)\|-t\|J\tau(u)\|_{2\to2}\|x-u\|_2\\ &\geq(1-\sup_{z\in\mathbb{R}^d}\|J\tau(z)\|_{2\to2})\cdot\|x-u\|_2 \geq\tfrac{1}{2}\|x-u\|_2. \end{align*} Overall, we have \[ \sup_{t\in[0,1]}\|\nabla h(p_2(t))\|_2 \leq\sup_{t\in[0,1]}\frac{C_\infty(h)}{(1+\|p_2(t)\|_2)^{(d+3)/2}} \leq\frac{C_\infty(h)}{(1+\frac{1}{2}\|x-u\|_2)^{(d+3)/2}}. \] Finally, we apply \eqref{eq.bound diff of taus} to bound the third factor of \eqref{eq.bound on k3}: \[ \|s\|_2 \leq\|\tau(x)-\tau(u)\|_2+\|J\tau(u)(x-u)\|_2 \leq2\cdot\sup_{z\in\mathbb{R}^d}\|J\tau(z)\|_{2\to2}\cdot\|x-u\|_2. \] We combine these estimates to obtain the following bound on \eqref{eq.bound on k3}: \begin{equation} \label{eq.final bound on k3} |k_3(x,u)| \leq (3/2)^d \cdot \frac{C_\infty(h)}{(1+\frac{1}{2}\|x-u\|_2)^{(d+3)/2}} \cdot 2\cdot\sup_{z\in\mathbb{R}^d}\|J\tau(z)\|_{2\to2}\cdot\|x-u\|_2. \end{equation} Finally, \eqref{eq.final bound on k1}, \eqref{eq.final bound on k2}, and \eqref{eq.final bound on k3} together imply \begin{align*} |k(x,u)| &\leq|k_1(x,u)|+|k_2(x,u)|+|k_3(x,u)|\\ &\leq C_\infty(h)\cdot\sup_{z\in\mathbb{R}^d}\|J\tau(z)\|_{2\to2}\cdot\bigg(\frac{2^d}{(1+\frac{1}{2}\|x-u\|_2)^{(d+1)/2}} +\frac{(2(\frac{3}{2})^d+1)\|x-u\|_2}{(1+\frac{1}{2}\|x-u\|_2)^{(d+3)/2}}\bigg). \end{align*} Importantly, this is a bounded function of $x-u$ that decays like $\|x-u\|_2^{-(d+1)/2}$. By integrating the square, this simultaneously bounds the $L^2$ norm of every $k(x,\cdot)$ and $k(\cdot,u)$ by a quantity of the form $C_0(h)\cdot\sup_{z\in\mathbb{R}^d}\|J\tau(z)\|_{2\to2}$. By Young's inequality for integral operators (see Theorem~0.3.1 in~\cite{Sogge:17}, for example), it follows that \[ \|K\|_{L^2(\mathbb{R}^d)\to L^\infty(\mathbb{R}^d)} \leq C_0(h)\cdot\sup_{z\in\mathbb{R}^d}\|J\tau(z)\|_{2\to2}. \] Combining with \eqref{eq.bound on L_g} then gives the result with $C(h):=2^d\cdot C_0(h)$. \end{proof} \section{Template selection for classification} \label{sec.classification} \subsection{Classifying characteristic functions} In this subsection, we focus on the case in which $V=L^2(\mathbb{R}^d)$ and $G$ is the group of translation operators. Suppose we have $k$ distinct $G$-orbits of indicator functions of compact subsets of $\mathbb{R}^d$. According to the following result, there is a simple classifier based on a size-$k$ max filter bank that correctly classifies these orbits. (This cartoon setting enjoys precursors in~\cite{CaulfieldM:69,CaulfieldH:80}.) \begin{theorem} \label{thm.classifying indicator functions} Given compact sets $S_1,\ldots,S_k\subseteq\mathbb{R}^d$ of positive measure satisfying \[ [\mathbf{1}_{S_i}]\neq [\mathbf{1}_{S_j}] \qquad \text{whenever} \qquad i\neq j, \] there exist templates $z_1,\ldots, z_k\in L^2(\mathbb{R}^d)$ satisfying \[ \llangle [z_i],[\mathbf{1}_{S_j}]\rrangle<\llangle [z_i],[\mathbf{1}_{S_i}]\rrangle \qquad \text{whenever} \qquad i\neq j. \] \end{theorem} \begin{proof} By compactness, there exists $r>0$ such that every $S_i$ is contained in the closed ball centered at the origin with radius $r$. For reasons that will become apparent later, we take $B$ to be the closed ball centered at the origin with radius $3r$, and we define $z_i:=\mathbf{1}_{S_i}-\mathbf{1}_{B\setminus S_i}$. Then for every $i$ and $j$, it holds that \begin{equation} \label{eq.bound on indicator template} \llangle [z_i],[\mathbf{1}_{S_j}]\rrangle =\sup_{a\in\mathbb{R}^d}\langle \mathbf{1}_{S_i}-\mathbf{1}_{B\setminus S_i},T_a\mathbf{1}_{S_j}\rangle \leq \sup_{a\in\mathbb{R}^d}\langle \mathbf{1}_{S_i},T_a\mathbf{1}_{S_j}\rangle =\llangle [\mathbf{1}_{S_i}],[\mathbf{1}_{S_j}]\rrangle. \end{equation} In the special case where $j=i$, this implies \[ |S_i| =\langle z_i,\mathbf{1}_{S_i}\rangle \leq\llangle [z_i],[\mathbf{1}_{S_i}]\rrangle \leq\llangle [\mathbf{1}_{S_i}],[\mathbf{1}_{S_i}]\rrangle =|S_i|, \] and so $\llangle [z_i],[\mathbf{1}_{S_i}]\rrangle=|S_i|$. We consider all $j\neq i$ in two cases. \medskip \noindent \textbf{Case I:} $j\neq i$ and $|S_j|\leq|S_i|$. Considering \eqref{eq.bound on indicator template}, it suffices to bound $\llangle [\mathbf{1}_{S_i}],[\mathbf{1}_{S_j}]\rrangle$. Letting $R$ denote the reversal operator defined by $Rf(x):=f(-x)$, then \[ \llangle [\mathbf{1}_{S_i}],[\mathbf{1}_{S_j}]\rrangle =\sup_{a\in\mathbb{R}^d}(R\mathbf{1}_{S_i}\star\mathbf{1}_{S_j})(a). \] Since $R\mathbf{1}_{S_i},\mathbf{1}_{S_j}\in L^2(\mathbb{R}^d)$, it holds that the convolution $R\mathbf{1}_{S_i}\star\mathbf{1}_{S_j}$ is continuous, and since $S_i$ and $S_j$ are compact, the convolution has compact support. Thus, the extreme value theorem gives that the convolution achieves its supremum, meaning there exists $a\in\mathbb{R}^d$ such that \begin{equation} \label{eq.indicators 2} \llangle [\mathbf{1}_{S_i}],[\mathbf{1}_{S_j}]\rrangle =\langle \mathbf{1}_{S_i},T_a\mathbf{1}_{S_j}\rangle =|S_i\cap (S_j+a)|. \end{equation} Next, the assumptions $|S_j|\leq|S_i|$ and $[\mathbf{1}_{S_i}]\neq[\mathbf{1}_{S_j}]$ together imply \begin{equation} \label{eq.indicators 3} |S_i\cap (S_j+a)| <|S_i|. \end{equation} Indeed, equality in the bound $|S_i\cap (S_j+a)|\leq |S_i|$ is only possible if $S_i\subseteq S_j+a$ (modulo null sets), but since $|S_j|\leq|S_i|$ by assumption, this requires $S_i=S_j+a$ (modulo null sets), which violates the assumption $[\mathbf{1}_{S_i}]\neq[\mathbf{1}_{S_j}]$. Overall, we combine \eqref{eq.bound on indicator template}, \eqref{eq.indicators 2}, and \eqref{eq.indicators 3} to get \[ \llangle [z_i],[\mathbf{1}_{S_j}]\rrangle \leq\llangle [\mathbf{1}_{S_i}],[\mathbf{1}_{S_j}]\rrangle =|S_i\cap (S_j+a)| <|S_i| =\llangle [z_i],[\mathbf{1}_{S_i}]\rrangle. \] \medskip \noindent \textbf{Case II:} $j\neq i$ and $|S_j|\geq|S_i|$. If $\llangle [z_i],[\mathbf{1}_{S_j}]\rrangle\leq0$, then \[ \llangle [z_i],[\mathbf{1}_{S_j}]\rrangle \leq0 <|S_i| =\llangle [z_i],[\mathbf{1}_{S_i}]\rrangle, \] and so we are done. Now suppose $\llangle [z_i],[\mathbf{1}_{S_j}]\rrangle>0$. Considering \[ \llangle [z_i],[\mathbf{1}_{S_j}]\rrangle =\sup_{a\in\mathbb{R}^d}(Rz_i\star\mathbf{1}_{S_j})(a), \] then by continuity and compactness, the extreme value theorem produces $a\in\mathbb{R}^d$ such that \begin{equation} \label{eq.indicators 4} \llangle [z_i],[\mathbf{1}_{S_j}]\rrangle =\langle z_i,T_a\mathbf{1}_{S_j}\rangle =|S_i\cap(S_j+a)|-|(B\setminus S_i)\cap(S_j+a)|. \end{equation} Since $\llangle [z_i],[\mathbf{1}_{S_j}]\rrangle>0$, it follows that $|S_i\cap(S_j+a)|>0$, i.e., $S_i\cap(S_j+a)$ is nonempty, which in turn implies $S_j+a\subseteq B$. (This is why we defined $B$ to have radius $3r$.) As before, $|S_j|\geq|S_i|$ and $[\mathbf{1}_{S_i}]\neq [\mathbf{1}_{S_j}]$ together give $|S_i\cap(S_j+a)|<|S_j|$. Thus, \begin{equation} \label{eq.indicators 5} |(B\setminus S_i)\cap(S_j+a)| =|B\cap(S_j+a)|-|S_i\cap(S_j+a)| =|S_j+a|-|S_i\cap(S_j+a)| >0. \end{equation} We combine \eqref{eq.indicators 4} and \eqref{eq.indicators 5} to get \[ \llangle [z_i],[\mathbf{1}_{S_j}]\rrangle =|S_i\cap(S_j+a)|-|(B\setminus S_i)\cap(S_j+a)| <|S_i\cap(S_j+a)| \leq|S_i| =\llangle [z_i],[\mathbf{1}_{S_i}]\rrangle, \] as claimed. \end{proof} For each $i$, assume $S_i$ is translated so that it is contained in the smallest possible ball centered at the origin, and let $r_i$ denote the radius of this ball. The proof of Theorem~\ref{thm.classifying indicator functions} gives that each template $z_i$ is supported in a closed ball of radius $R:=3\max_i r_i$. The fact that these templates are localized bears some consequence for certain \textit{image articulation manifolds}~\cite{DonohoG:05}. In particular, for each $\pi\colon\{1,\ldots,k\}\to\mathbb{N}\cup\{0\}$, let $M_\pi \subseteq L^2(\mathbb{R}^d)$ denote the manifold of images of the form \[ \sum_{i=1}^k \sum_{j=1}^{\pi(i)} T_{a(i,j)}\mathbf{1}_{S_i}, \qquad \text{where} \qquad \|a(i,j)-a(i',j')\|>4R \quad \forall (i,j)\neq(i',j'). \] Thanks to the $4R$ spacing, each translate of each template interacts with at most one component of the image, and so for every $f\in M_\pi$, it holds that \[ \llangle [z_i],[f]\rrangle =\max_{i':\pi(i')>0}\llangle [z_i],[\mathbf{1}_{S_{i'}}]\rrangle. \] In particular, the same max filter bank can be used to determine the support of $\pi$. As an example, if some multiset of characters are typed on a page in a common font and with sufficient separation, then the max filter bank from Theorem~\ref{thm.classifying indicator functions} that distinguishes the characters can be used to determine which ones appear on the page. \subsection{Classifying mixtures of stationary processes} In this subsection, we focus on the case in which $V=\mathbb{R}^n$ and $G\cong C_n$ is the group of circular translation operators. A natural $G$-invariant probability distribution is a multivariate Gaussian with mean zero and circulant covariance, and so we consider the task of classifying a mixture of such distributions. One-dimensional textures can be modeled in this way, especially if the covariance matrix has a small bandwidth so that distant pixels are statistically independent. A standard approach for this problem is to estimate the first- and second-order moments given a random draw. As we will soon see, one can alternatively classify with high accuracy by thresholding a single max filter. In what follows, we make use of \textit{Thompson's part metric} on the set of positive definite matrices: \[ d_\infty(A,B) :=\|\log(A^{-1/2}BA^{-1/2})\|_{2\to2}. \] We also let $A_k$ denote the leading $k\times k$ principal submatrix of $A$. \begin{theorem} \label{thm.binary gmm classification} Fix $C>\log2$, take $n,w\in\mathbb{N}$ such that $k:=\lfloor \sqrt{n/2}\rfloor\geq w$, and consider any positive definite $A,B\in\mathbb{R}^{n\times n}$ that are circulant with bandwidth $w$ and satisfy \begin{equation} \label{eq.thompson distance bound} d_\infty(A_k,B_k)\geq C. \end{equation} There exists $z\in\mathbb{R}^n$ supported on an interval of length $k$ and a threshold $\theta\in\mathbb{R}$ such that for every mixture $\mathsf{M}$ of $\mathsf{N}(0,A)$ and $\mathsf{N}(0,B)$, then given $x\sim\mathsf{M}$, the comparison \[ \llangle[z],[x]\rrangle \gtrless \theta \] correctly classifies the latent mixture component of $x$ with probability $1-o_{n\to\infty;C}(1)$. \end{theorem} \begin{proof} First, we observe that $d_\infty(A_k,B_k)\geq C$ is equivalent to \[ \max\Big\{\lambda_{\mathrm{max}}(A_k^{-1/2}B_k A_k^{-1/2}),\lambda_{\mathrm{max}}(B_k^{-1/2}A_k B_k^{-1/2})\Big\} \geq e^C. \] Without loss of generality, we may assume $\lambda_{\mathrm{max}}(A_k^{-1/2}B_k A_k^{-1/2})\geq e^C$. Let $v\in\mathbb{R}^k$ denote a corresponding unit eigenvector of $A_k^{-1/2}B_k A_k^{-1/2}$, and define $z\in\mathbb{R}^n$ to be supported in its first $k$ entries as the subvector $z_k:=A_k^{-1/2}v$. Then \begin{equation} \label{eq.variance quotient} \frac{z^\top Bz}{z^\top Az} =\frac{z_k^\top B_k z_k}{z_k^\top A_k z_k} =\frac{v^\top A_k^{-1/2}B_k A_k^{-1/2}v}{v^\top v} =\lambda_{\mathrm{max}}(A_k^{-1/2}B_k A_k^{-1/2}) \geq e^C. \end{equation} Consider $x_1\sim\mathsf{N}(0,A)$. Then $\llangle [z],[x]\rrangle=\max_{a\in C_n}\langle T_az,x_1\rangle$, where each $\langle T_az,x_1\rangle$ has Gaussian distribution with mean zero and variance $(T_az)^\top A(T_az)$, which in turn equals $z^\top Az$ since $A$ is circulant. Denoting $Z\sim\mathsf{N}(0,1)$, a union bound then gives \[ \mathbb{P}\big\{\llangle [z],[x_1]\rrangle\geq t\big\} \leq n\cdot\mathbb{P}\big\{(z^\top Az)^{1/2}\cdot Z\geq t\big\} \leq ne^{-t^2/(2z^\top Az)} \] for $t\geq0$. This failure probability is $o_{n\to\infty;c_1}(1)$ by taking $t:=\sqrt{c_1\cdot z^\top Az\cdot\log n}$ for any $c_1>2$. Next, consider $x_2\sim\mathsf{N}(0,B)$, and take any subset $S\subseteq C_n$ consisting of $k$ members of $C_n$ of pairwise distance at least $2k$. Then \[ \llangle [z],[x_2]\rrangle =\max_{a\in C_n}\langle T_az,x_2\rangle \geq\max_{a\in S}\langle T_az,x_2\rangle. \] In this case, each $\langle T_az,x_2\rangle$ has Gaussian distribution with mean zero and variance $z^\top Bz$. Furthermore, since $k\geq w$, these random variables have pairwise covariance zero, and since they are Gaussian, they are therefore independent. For independent $Z_1,\ldots,Z_k\sim\mathsf{N}(0,1)$ and $t\geq0$, we have \[ \mathbb{P}\Big\{\max_{i\in\{1,\ldots,k\}}Z_i\leq t\Big\} =\mathbb{P}\{Z\leq t\}^k \leq(1-\tfrac{t}{1+t^2}\cdot\tfrac{1}{\sqrt{2\pi}}e^{-t^2/2})^k, \] where the last step follows from a standard lower bound on the tail of a standard Gaussian distribution. Take $t:=\sqrt{c_2\log k}$ to get \[ \mathbb{P}\Big\{\max_{i\in\{1,\ldots,k\}}Z_i\leq \sqrt{c_2\log k}\Big\} \leq\Big(1-(\tfrac{\sqrt{c_2\log k}}{1+c_2\log k}\cdot\tfrac{1}{\sqrt{2\pi}}\cdot k^{1-c_2/2})\cdot\tfrac{1}{k}\Big)^k \leq\exp\Big(-\tfrac{\sqrt{c_2\log k}}{1+c_2\log k}\cdot\tfrac{1}{\sqrt{2\pi}}\cdot k^{1-c_2/2}\Big), \] which is $o_{k\to\infty;c_2}(1)$ when $c_2<2$. Overall, we simultaneously have \[ \llangle [z],[x_1]\rrangle <\theta_1 :=\sqrt{c_1\cdot z^\top Az\cdot\log n}, \qquad \llangle [z],[x_2]\rrangle >\theta_2 :=\sqrt{\tfrac{1}{2}c_2\cdot z^\top Bz\cdot\log n} \] with probability $1-o_{n\to\infty;c_1,c_2}(1)$, provided $c_1>2>c_2$. We select $c_1:=2(\tfrac{e^C}{2})^{1/2}>2$ and $c_2:=2(\tfrac{e^C}{2})^{-1/2}<2$, and then \eqref{eq.variance quotient} implies \[ \frac{\theta_2^2}{\theta_1^2} =\frac{\frac{1}{2}c_2\cdot z^\top Bz}{c_1\cdot z^\top Az} =\frac{1}{e^C}\cdot\frac{z^\top Bz}{z^\top Az} \geq1. \] Thus, $\theta_1\leq\theta_2$, and so the result follows by taking $\theta:=(\theta_1+\theta_2)/2$. \end{proof} Given a mixture of $k$ Gaussians with covariance matrices that pairwise satisfy~\eqref{eq.thompson distance bound}, then we can perform multiclass classification by a one-vs-one reduction. Indeed, the binary classifier in Theorem~\ref{thm.binary gmm classification} can be applied to all $\binom{k}{2}$ pairs of Gaussians, in which case we correctly classify the latent mixture component with high probability (provided $k$ is fixed and $n\to\infty$). Interestingly, max filters can also distinguish between stationary processes with \textit{identical} first- and second-order moments. For example, $x\sim\mathsf{Unif}(\{\pm1\}^n)$ and $y\sim\mathsf{N}(0,I_n)$ are both stationary with mean zero and identity covariance. If $z\in\mathbb{R}^n$ is a standard basis element, then with high probability, it holds that \[ \llangle[z],[x]\rrangle \leq1 \leq \sqrt{\log n} \leq\llangle[z],[y]\rrangle. \] This indicates that max filters incorporate higher-order moments. \subsection{Subgradients} In this subsection, we focus on the case in which $V=\mathbb{R}^d$ and $G$ is a closed subgroup of $\operatorname{O}(d)$. The previous subsections carefully designed templates to classify certain data models. For real-world classification problems, one is expected to train a classifier on a given training set of labeled data. To do so, one selects a parameterized family of classifiers and then locally minimizes some notion of training loss over this family. This is feasible provided the classifier is a differentiable function of the parameters. We envision a classifier in which the first layer is a max filter bank, and so this subsection establishes how to differentiate a max filter with respect to the template. This is made possible by convexity; see Lemma~\ref{lem.max filter product properties}(d). Every convex function $f\colon \mathbb{R}^d\to\mathbb{R}$ has a \textbf{subdifferential} $\partial f\colon \mathbb{R}^d\to 2^{\mathbb{R}^d}$ defined by \[ \partial f(x) :=\{u\in \mathbb{R}^d : f(x+h) \geq f(x) + \langle h, u\rangle ~~ \forall h\in \mathbb{R}^d \}. \] For a fixed $x\in\mathbb{R}^d$ and $u\in\partial f(x)$, it is helpful to interpret the graph of $z\mapsto f(x) + \langle z-x, u\rangle$ as a supporting hyperplane of the epigraph of $f$ at $x$. For example, the absolute value function over $\mathbb{R}$ has the following subdifferential: \[ \partial|\cdot|(x) =\left\{\begin{array}{cl} \{-1\} & \text{if } x<0, \\ {[}-1,1] & \text{if } x=0, \\ \{1\} & \text{if } x>0. \end{array}\right. \] Indeed, this gives the slopes of the hyperplanes that support the epigraph of $|\cdot|$ at $x\in\mathbb{R}$. Following~\cite{Watson:92}, we will determine the subdifferential of the max filtering map by first finding its \textit{directional derivatives}. In general, the directional derivative of $f$ at $x$ in the direction of $v\neq0$ is given by \[ f'(x;v) :=\lim_{t\to0^+}\frac{f(x+tv)-f(x)}{t} =\sup_{u\in\partial f(x)}\langle u,v\rangle. \] The second equality is a standard result; see for example Theorem~23.4 in~\cite{Rockafellar:70}. It will be convenient to denote the set \[ G(x,y) :=\arg\max_{g\in G}\langle x,gy\rangle. \] Observe that $G(x,y)$ is a closed subset of $G$ since the map $g\mapsto\langle x,gy\rangle$ is continuous. \begin{lemma} \label{lem.directional derivative} Suppose $G$ is a closed subgroup of $\operatorname{O}(d)$ and select $x,y,v\in\mathbb{R}^d$ with $v\neq0$. Then \[ \llangle[\cdot],[y]\rrangle'(x;v) =\max_{g\in G(x,y)}\langle v,gy\rangle. \] \end{lemma} \begin{proof} For each $t>0$ and $g\in G(x,y)$, we have \begin{equation} \label{eq.directional derivative 1} \llangle[x+tv],[y]\rrangle \geq\langle x+tv,gy\rangle =\langle x,gy\rangle+t\langle v,gy\rangle =\llangle [x],[y]\rrangle+t\langle v,gy\rangle. \end{equation} For each $t>0$, select $g_t\in G(x+tv,y)$. Then \begin{equation} \label{eq.directional derivative 2} \llangle[x+tv],[y]\rrangle =\langle x+tv,g_ty\rangle =\langle x,g_ty\rangle+t\langle v,g_ty\rangle \leq\llangle [x],[y]\rrangle+t\langle v,g_ty\rangle. \end{equation} We rearrange and combine \eqref{eq.directional derivative 1} and \eqref{eq.directional derivative 2} to get \begin{equation} \label{eq.directional derivative 3} \max_{g\in G(x,y)}\langle v,gy\rangle \leq \frac{\llangle[x+tv],[y]\rrangle-\llangle[x],[y]\rrangle}{t} \leq \langle v,g_ty\rangle \end{equation} for all $t>0$. Select a sequence $t_n\to0^+$ such that $g_{t_n}$ converges to some $g^\star\in G$ (by passing to a subsequence if necessary). Then continuity implies \[ \langle x+t_nv,g_{t_n}y\rangle =\llangle [x+t_nv],[y]\rrangle \to \llangle [x],[y]\rrangle. \] Furthermore, \begin{align*} |\langle x+t_nv,g_{t_n}y\rangle-\langle x,g^\star y\rangle| &\leq|\langle x,g_{t_n}y\rangle-\langle x,g^\star y\rangle|+|\langle t_nv,g_{t_n}y\rangle|\\ &\leq \|x\|\|g_{t_n}-g^\star\|_{2\to2} \|y\|+t_n\|v\|\|y\| \to0. \end{align*} It follows that $\langle x,g^\star y\rangle=\llangle [x],[y]\rrangle$, i.e., $g^\star\in G(x,y)$. Taking limits of \eqref{eq.directional derivative 3} then gives \[ \max_{g\in G(x,y)}\langle v,gy\rangle \leq \lim_{t\to0^+}\frac{\llangle[x+tv],[y]\rrangle-\llangle[x],[y]\rrangle}{t} \leq \lim_{t\to0^+}\langle v,g_ty\rangle =\langle v,g^\star y\rangle \leq \max_{g\in G(x,y)}\langle v,gy\rangle. \qedhere \] \end{proof} \begin{theorem} Suppose $G$ is a closed subgroup of $\operatorname{O}(d)$ and select $x,y\in\mathbb{R}^d$. Then \[ \partial\llangle [\cdot],[y]\rrangle(x) =\operatorname{conv}\{gy:g\in G(x,y)\}. \] \end{theorem} \begin{proof} First, we claim that for every $g\in G(x,y)$, the vector $gy$ is in the subdifferential $\partial\llangle [\cdot],[y]\rrangle(x)$. Indeed, for every $h\in\mathbb{R}^d$, we have \[ \llangle [x+h],[y]\rrangle \geq \langle x+h,gy\rangle = \langle x,gy\rangle + \langle h,gy\rangle = \llangle [x],[y]\rrangle + \langle h,gy\rangle, \] where the last step uses the fact that $g\in G(x,y)$. Since $\partial\llangle [\cdot],[y]\rrangle(x)$ is convex, it follows that the $\supseteq$ portion of the desired result holds. Next, suppose there exists $w\in\partial\llangle [\cdot],[y]\rrangle(x)$ such that $w\not\in\operatorname{conv}\{gy:g\in G(x,y)\}$. Then there exists a hyperplane that separates $w$ from $\{gy:g\in G(x,y)\}$, i.e., there is a nonzero vector $v\in\mathbb{R}^d$ such that \[ \langle v,gy\rangle <\langle v,w\rangle \] for every $g\in G(x,y)$. By Lemma~\ref{lem.directional derivative}, it follows that \[ \llangle[\cdot],[y]\rrangle'(x;v) =\max_{g\in G(x,y)}\langle v,gy\rangle <\langle w,v\rangle \leq \sup_{u\in\partial \llangle[\cdot],[y]\rrangle(x)}\langle u,v\rangle =\llangle[\cdot],[y]\rrangle'(x;v), \] a contradiction. This establishes the $\subseteq$ portion of the desired result. \end{proof} \subsection{Random templates and limit laws} While the previous subsection was concerned with the differentiability needed to optimize a feature map, it is well known that random feature maps suffice for various tasks (e.g., Johnson--Lindenstrauss maps~\cite{JohnsonL:84} and random kitchen sinks~\cite{RahimiR:08}). In fact, our bilipschitz result (Theorem~\ref{thm.main result}) uses random templates. As such, one may be inclined to use random templates to produce features for classification. In this subsection, we focus on the case in which $V=\mathbb{R}^d$ and $G\cong S_d$ is the group of $d\times d$ permutation matrices. Consider a max filter bank consisting of independent standard gaussian templates $z_1,\ldots,z_n\in\mathbb{R}^d$. Then \[ \llangle [z_i],[x]\rrangle =\langle \operatorname{sort}(z_i),\operatorname{sort}(x)\rangle. \] When $d$ is large, we expect the vectors $\operatorname{sort}(z_i)$ to exhibit little variation, and so we are inclined to perform dimensionality reduction. Figure~\ref{fig.sorted gaussians} illustrates that the principal components of $\{\operatorname{sort}(z_i)\}_{i=1}^n$ exhibit a high degree of regularity. \begin{figure} \begin{center} \includegraphics[width=0.45\textwidth,trim={110 210 110 210},clip]{sorted_gaussians_1.pdf} \quad \includegraphics[width=0.45\textwidth,trim={110 210 110 210},clip]{sorted_gaussians_2.pdf} \end{center} \caption{\label{fig.sorted gaussians} \textbf{(left)} Draw $n=10,000$ independent standard gaussian random vectors in $\mathbb{R}^d$ with $d=1,000$, sort the coordinates of each vector, and then plot the top $6$ eigenvectors of the resulting sample covariance matrix. \textbf{(right)} Discretize and plot the top $6$ eigenfunctions described in Theorem~\ref{thm.eigenfunctions}. } \end{figure} To explain this regularity, select $Q\colon(0,1)\to\mathbb{R}$ so that $Q^{-1}$ is the cumulative distribution function of the standard normal distribution. Take $z\in\mathsf{N}(0,I_d)$ and put $s:=\operatorname{sort}(z)$. Denoting $p_i:=\frac{i}{d+1}$ and $q_i:=1-p_i$, then Section~4.6 in~\cite{HerbertN:04} gives \[ \mathbb{E}[s_i]=Q(p_i)+O(\tfrac{1}{d}), \qquad \operatorname{Cov}(s_i,s_j)=\tfrac{1}{d+2}p_iq_jQ'(p_i)Q'(p_j)+O(\tfrac{1}{d^{2}}), \qquad i\leq j. \] The principal components of $\{\operatorname{sort}(z_i)\}_{i=1}^n$ approximate the top eigenvectors of the covariance matrix, which in turn approximate discretizations of eigenfunctions of the integral operator with kernel $K\colon(0,1)^2\to\mathbb{R}$ defined by \[ K(x,y):=\min\{x,y\}\cdot(1-\max\{x,y\})\cdot Q'(x)\cdot Q'(y). \] The following result expresses these eigenfunctions in terms of the \textit{probabilist's Hermite polynomials}, which are defined by the following recurrence: \begin{equation} \label{eq.hermite recurrence} p_0(x) :=1, \qquad p_{n+1}(x) :=xp_n(x)-p_n'(x). \end{equation} \begin{theorem} \label{thm.eigenfunctions} The integral operator $L\colon L^2([0,1])\to L^2([0,1])$ defined by \[ Lf(x):=\int_0^1 K(x,y)f(y)dy \] has eigenvalue $\frac{1}{n+1}$ with corresponding eigenfunction $p_n\circ Q$ for each $n\in\mathbb{N}\cup\{0\}$. \end{theorem} As such, instead of max filtering with independent gaussian templates, one can efficiently capture the same information by taking the inner product between $\operatorname{sort}(x)$ and discretized versions of the eigenfunctions $p_n\circ Q$. To reduce the dimensionality, one can simply use fewer eigenfunctions. To prove Theorem~\ref{thm.eigenfunctions}, we will use the following lemma; here, $\varphi\colon\mathbb{R}\to\mathbb{R}$ denotes the probability density function of the standard normal distribution. \begin{lemma}\ \label{lem.derivatives go down and up} \begin{itemize} \item[(a)] $p_n(x)=\frac{1}{n+1}p_{n+1}'(x)$. \item[(b)] $\int_0^x p_{n+1}(Q(y))dy=-\varphi(Q(x))p_n(Q(x))$. \end{itemize} \end{lemma} The proof of Lemma~\ref{lem.derivatives go down and up} follows quickly from the recurrence~\eqref{eq.hermite recurrence}. \begin{proof}[Proof of Theorem~\ref{thm.eigenfunctions}] We compute $L(p_n\circ Q)$ by splitting the integral: \begin{align*} L(p_n\circ Q)(x) &= (1-x)Q'(x)\underbrace{\int_0^x yQ'(y)p_n(Q(y))dy}_{I_1} +xQ'(x)\underbrace{\int_x^1 (1-y)Q'(y)p_n(Q(y))dy}_{I_2}. \end{align*} For both $I_1$ and $I_2$, we integrate by parts with \[ dv =Q'(y)p_n(Q(y))dy =Q'(y)\cdot \tfrac{1}{n+1}p_{n+1}'(Q(y))\cdot dy =\tfrac{1}{n+1}(p_{n+1}\circ Q)'(y) dy, \] where the middle step follows from Lemma~\ref{lem.derivatives go down and up}(a). Then Lemma~\ref{lem.derivatives go down and up}(b) gives \begin{align*} I_1 &=yv\Big|_0^x-\int_0^x vdy =\tfrac{1}{n+1}\Big(xp_{n+1}(Q(x))+\varphi(Q(x))p_n(Q(x))\Big),\\ I_2 &=(1-y)v\Big|_x^1+\int_x^1 vdy =\tfrac{1}{n+1}\Big(-(1-x)p_{n+1}(Q(x))+\varphi(Q(x))p_n(Q(x))\Big). \end{align*} These combine (and mostly cancel) to give \[ L(p_n\circ Q)(x) =\tfrac{1}{n+1}Q'(x)p_n(Q(x))\varphi(Q(x)) =\tfrac{1}{n+1}(p_n\circ Q)(x). \qedhere \] \end{proof} \section{Numerical examples} \label{sec.numerics} In this section, we use max filters as feature maps for various real-world learning tasks. \begin{example}[Voting districts] \label{ex.districts} The \textit{one person, one vote} principle insists that in each state, different voting districts must have nearly the same number of constituents. This principle is enforced with the help of a decennial redistricting process based on U.S.\ Census data. Interestingly, each state assembly applies its own process for redistricting, and partisan approaches can produce unwanted gerrymandering. Historically, gerrymandering is detected by how contorted a district's shape looks; for example, the Washington Post article~\cite{Ingraham:14} uses a particular geometric score to identify the top $10$ most gerrymandered voting districts of the $113$th Congress. As an alternative, we visualize the distribution of district shapes with the help of max filtering. The shape files for all voting districts of the $116$th Congress are available in~\cite{Switzer:kaggle}. We center each district at the origin and scale it to have unit perimeter, and then we sample the boundary at $n=50$ equally spaced points. This results in a $2\times 50$ matrix representation of each district. However, the same district shape may be represented by many matrices corresponding to an action of $\operatorname{O}(2)$ on the left and an action of $C_n$ that cyclically permutes columns. Hence, it is appropriate to apply max filtering with $G\cong\operatorname{O}(2)\times C_n$. It is convenient to identify $\mathbb{R}^{2\times 50}$ with $\mathbb{C}^{50}$ so that the corresponding max filter is given by \begin{align*} \llangle [z],[x]\rrangle &=\max\Big\{~\max_{a\in C_n} |z^*T_ax|,~\max_{a\in C_n}| \overline{z}^*T_ax|~\Big\}\\ &=\max\Big\{~\max_{a\in C_n}|(R\overline{z}\star x)(a)|,~\max_{a\in C_n}|(Rz\star x)(a)|~\Big\}, \end{align*} which can be computed efficiently with the help of the fast Fourier transform. We max filter with $100$ random templates to embed these districts in the feature space $\mathbb{R}^{100}$, and then we visualize the result using PCA; see Figure~\ref{fig.districts}. \begin{figure} \begin{center} \includegraphics[width=\textwidth]{districts_pca.pdf} \end{center} \caption{\label{fig.districts} Visualization of voting districts of the $116$th Congress obtained by max filtering and principal component analysis. See Example~\ref{ex.districts} for details. } \end{figure} Interestingly, the principal components of this feature domain appear to be interpretable. The first principal component (given by the horizontal axis) seems to capture the extent to which the district is convex, while the second principal component seems to capture the eccentricity of the district. Six of the ten most gerrymandered districts from~\cite{Ingraham:14} are drawn in red, which appear at relatively extreme points in this feature space. The four remaining districts (NC-1, NC-4, NC-12, and PA-7) were redrawn by court order between the $113$th and $116$th Congresses; the new versions of these districts are drawn in blue. Unsurprisingly, the redrawn districts are not contorted, and they appear at not-so-extreme points in the feature space. \end{example} \begin{example}[ECG time series] \label{ex.ecg} An electrocardiogram (ECG) uses electrodes placed on the skin to quantify the heart's electrical activity over time. With ECG data, a physician can determine whether a patient has had a heart attack with about 70\% accuracy~\cite{MakimotoEtal:20}. Recently, Makimoto et al.~\cite{MakimotoEtal:20} trained a $6$-layer convolutional neural network to perform this task with about 80\% accuracy. The dataset they used is available at~\cite{BousseljotKS:95}, which consists of $(12+3)$-lead ECG data sampled at $1$~kHz from $148$ patients who recently had a heart attack (i.e., \textit{myocardial infarction} or \textit{MI}) and from $141$ patients who did not. As an alternative to convolutional neural networks, we use max filters to classify patients based on \textit{one second} of ECG data (i.e., roughly one heartbeat). In particular, we read the first $t=1000$ samples of all $15$ leads to obtain a $15\times t$ matrix $X$. We then lift this matrix to a $15\times w\times (t-w+1)$ tensor with $w=30$, where each $15\times w$ slice corresponds to a contiguous $15\times w$ submatrix of $X$. Then we normalize each $1\times w\times 1$ subvector to have mean zero to get the tensor $Y$; this discards any trend in the data. We account for time invariance by taking $G$ to be the order-$(t-w+1)$ group of circular permutations of the $15\times w$ slices of $Y$. Using this group, we max filter with $n=5$ different templates and then classify with a support-vector machine. We constrain each template to be supported on a single slice, and we train these templates together with the support-vector machine classifier by gradient descent to minimize hinge loss. \begin{figure} \begin{center} \includegraphics[width=\textwidth]{ecg_test_set.pdf} \end{center} \caption{\label{fig.ecg} We train max filter templates and a support-vector machine classifier on electrocardiogram data to distinguish between patients who have had a heart attack from those who have not. Above, we plot the most extreme examples in the test set (those with heart attacks on the left, and those without on the right). The blue windows illustrate the time segments of width $w=30$ that align best with the templates. See Example~\ref{ex.ecg} for details. } \end{figure} Following~\cite{MakimotoEtal:20}, we form the test set by randomly drawing $25$ examples from the MI class and $25$ examples from the non-MI class, and then we form the training set by randomly drawing $108$ non-test set examples from each class. We perform $10$ trials of this experiment, and each achieve between $74$\% and $84$\% accuracy on the test set. In particular, this is competitive with the $6$-layer convolutional neural network in~\cite{MakimotoEtal:20} despite accessing only a fraction of the data. In Figure~\ref{fig.ecg}, we illustrate what the classifier considers to be the most extreme examples in the test set. The time segments that align best with the trained templates typically cover the heartbeat portion of the ECG signal. \end{example} \begin{example}[Textures] \label{ex.textures} In this example, we use max filtering to classify various textures, specifically, those given in the Kylberg Texture Dataset v.\ 1.0, available in~\cite{Kylberg:online}. This dataset consists of 28 classes of textures, such as blanket, grass, and oatmeal. Each class consists of 160 distinct grayscale images. (We crop these $576\times 576$ images down to $256\times 256$ for convenience.) A few classifiers in the literature have been trained on this dataset~\cite{KylbergS:13,AndrearczykW:16}, but in order to achieve a high level of performance, they leverage domain-specific feature maps, augment the training set with other image sets, or simply initialize with a pre-trained network. As an alternative, we apply max filtering with random features, and we succeed with much smaller training sets. For an honest comparison, we train different classifiers on training sets of different sizes, and the results are displayed in Table~\ref{table.accuracies}. (Here, the test set consists of $32$ points from each class.) We describe each classifier in what follows. \begin{table} \caption{\label{table.accuracies} Accuracy on test set versus size of training set (baseline = 0.035, see Example~\ref{ex.textures}) } \begin{center} \begin{tabular}{|>{\centering\arraybackslash}p{0.17\textwidth}>{\centering\arraybackslash}p{0.17\textwidth}>{\centering\arraybackslash}p{0.17\textwidth}>{\centering\arraybackslash}p{0.17\textwidth}>{\centering\arraybackslash}p{0.17\textwidth}|}\hline pts per class & CNN & LDA & PCA-LDA & our method \\ \hline\hline 2 & 0.125 & 0.043 & 0.063 & 0.698 \\ \hline 4 & 0.114 & 0.103 & 0.060 & 0.947 \\ \hline 8 & 0.150 & 0.158 & 0.128 & 0.970 \\ \hline 16 & 0.214 & 0.125 & 0.128 & 0.970 \\ \hline 32 & 0.306 & 0.116 & 0.120 & 0.968 \\ \hline 64 & 0.534 & 0.117 & 0.122 & 0.964 \\ \hline 128 & 0.637 & (OOM) & 0.128 & 0.962 \\ \hline \end{tabular} \end{center} \end{table} First, we consider a convolutional neural network (CNN) with a standard architecture. We pass the $256\times 256$ image through a convolutional layer with a $3\times 3$ kernel before max pooling over $2\times 2$ patches. Then we pass the result through another convolutional layer with a $3\times 3$ kernel, and again max pool over $2\times 2$ patches. We end with a dense layer that maps to $28$ dimensions, one for each class. We train by minimizing cross entropy loss against the one-hot vector representation of each label. This model has $1.7$ million parameters, which enables us to achieve perfect accuracy on the training set. As an alternative, we apply linear discriminant analysis (LDA). Training this classifier involves a massive matrix operation, and this requires too much memory in our setting when the training set has $128$ points per class. This motivates the use of principal component analysis to reduce the dimensionality of each $256\times 256$ image to $k=25$ principal components. The resulting classifier (PCA-LDA) works for larger training sets, and even exhibits improved accuracy when the training set is not too small. For our method, we apply the same PCA-LDA method to a max filtering feature domain. Surprisingly, we find that modding out by the entire group of pixel permutations (i.e., just sorting the pixel values) performs reasonably well as a feature map. Our method implements a multiscale version of this observation. For each $\ell\in\{0,1,\ldots,8\}$, one may partition the $2^8\times 2^8$ image into $4^{8-\ell}$ square patches of width $2^\ell$. We perform max filtering with the group $(S_{4^\ell})^{4^{8-\ell}}$ of all patch-preserving pixel permutations for each $\ell\in\{2,\ldots,8\}$. For simplicity, we are inclined to draw templates at random, but for computational efficiency, we simulate random templates with the help of Theorem~\ref{thm.eigenfunctions}. In particular, for each $\ell\in\{2,\ldots,8\}$ and $n\in\{0,\ldots,5\}$, we sort the pixels in each $2^\ell\times 2^\ell$ patch and take their inner products with a discretized version of $p_n\circ Q$ and sum over the patches. This maps each $2^8\times 2^8$ image to a $42$-dimensional feature vector. As before, we apply PCA with $k=25$ and then train an LDA classifier on the result. In the end, this max filtering approach significantly outperforms the other classifiers we considered, as seen in Table~\ref{table.accuracies}. \end{example} \section{Discussion} \label{sec.discussion} Max filtering offers a rich source of invariants that can be used for a variety of machine learning tasks. In this section, we discuss several opportunities for follow-on work. \textbf{Separating.} In terms of sample complexity, Corollary~\ref{cor.2d templates suffice for finite groups} gives that a generic max filter bank of size $2d$ separates all $G$-orbits in $\mathbb{R}^d$ provided $G\leq\operatorname{O}(d)$ is finite. If $G\leq\operatorname{O}(d)$ is not topologically closed, then no continuous invariant (such as a max filter bank) can separate all $G$-orbits. If $G\leq\operatorname{O}(d)$ is topologically closed, then by Theorem~3.4.5 in~\cite{OnishchikV:90}, $G$ is algebraic, and so Theorem~\ref{thm.dym-gortler} applies. We suspect that max filtering separates orbits in such cases, but progress on this front will likely factor through Problem~\ref{prob.semialgebraic}(a). For computational complexity, how well does the max filtering separation hierarchy~\eqref{eq.hierarchy} separate isomorphism classes of weighted graphs? Judging by~\cite{AlonYZ:95}, we suspect that max filtering with a template of order $k$ and treewidth $t$ can be computed with runtime $e^{O(k)}n^{t+1}\log n$, which is polynomial in $n$ when $k$ is logarithmic and $t$ is bounded. Which classes of graphs are separated by such max filters? Also, can max filtering be used to solve real-world problems involving graph data, or is the exponent too large to be practical? \textbf{Bilipschitz.} The proof of Lemma~\ref{lem.bilipschitz no randomness} contains our approach to finding Lipschitz bounds on max filter banks. Our upper Lipschitz bound follows immediately from the fact that each individual max filter is Lipschitz, and it does not require the group to be finite. This bound is optimal for $G=\operatorname{O}(d)$, but it is known to be loose for small groups like $G=\{\pm\operatorname{id}\}$. We suspect that our lower Lipschitz bound leaves a lot more room for improvement. In particular, we use the pigeonhole principle to pass from a sum to a maximum so that we can leverage projective uniformity. A different approach might lead to a tighter bound that does not require $G$ to be finite. More abstractly, we suspect that separating implies bilipschitz, though the bounds might be arbitrarily bad; see Problem~\ref{prob.sep implies bilip}. \textbf{Randomness.} Our separating and bilipschitz results (Corollary~\ref{cor.2d templates suffice for finite groups} and Theorem~\ref{thm.main result}) are not given in terms of explicit templates. Meanwhile, our Mallat-type stability result (Theorem~\ref{thm.mallat bound}) requires a localized template, and we can interpret the templates used in our weighted graph separation hierarchy~\eqref{eq.hierarchy} as being localized, too. We expect that there are other types of structured templates that would reduce the computational complexity of max filtering (much like the structured measurements studied in compressed sensing~\cite{RudelsonV:08,PfanderRT:13,KrahmerMR:14} and phase retrieval~\cite{BandeiraCM:14,EldarSMBC:14,BodmanH:15,GrossKK:17}). It would be interesting to prove separating or bilipschitz results in such settings. More generally, can one construct explicit templates for which max filtering is separating or bilipschitz for a given group? Going the other direction, the reader may have noticed that the plots in Figure~\ref{fig.sorted gaussians} deviate from each other at the edges of the interval. We expect that such deviations decay as the dimension grows, but this requires further analysis. How can one analyze the behavior of random max filters for other groups? \textbf{Max filtering networks.} Example~\ref{ex.textures} opens the door to a variety of innovations with max filtering. Here, instead of fixing a single group to mod out by, we applied max filtering with a family of different groups. We selected permutations over patches due to the computational simplicity of using Theorem~\ref{thm.eigenfunctions}, and we arranged the patches in a hierarchical structure so as to capture behavior at different scales. Is there any theory to explain the performance of this architecture? Are there other useful architectures in this vicinity, perhaps by combining with neural networks? \section*{Acknowledgments} This work was initiated at the SOFT 2021:\ Summer of Frame Theory virtual workshop. The authors thank Boris Alexeev for helpful discussions and Soledad Villar for bringing the article~\cite{DymG:22} to their attention. \begin{thebibliography}{WW} \bibitem{AlexeevBFM:14} B.\ Alexeev, A.\ S.\ Bandeira, M.\ Fickus, D.\ G.\ Mixon, Phase retrieval with polarization, SIAM J.\ Imaging Sci.\ 7 (2014) 35--66. \bibitem{AlonYZ:95} N.\ Alon, R.\ Yuster, U.\ Zwick, Color-coding, J.\ ACM 42 (1995) 844--856. \bibitem{AndrearczykW:16} V.\ Andrearczyk, P.\ F.\ Whelan, Using filter banks in convolutional neural networks for texture classification, Pattern Recognit.\ Lett.\ 84 (2016) 63--69. \bibitem{BalanCE:06} R.\ Balan, P.\ Casazza, D.\ Edidin, On signal reconstruction without phase, Appl.\ Comput.\ Harmon.\ Anal.\ 20 (2006) 345--356. \bibitem{BalanD:21} R.\ Balan, C.\ B.\ Dock, Lipschitz Analysis of Generalized Phase Retrievable Matrix Frames, arXiv:2109.14522 (2021). \bibitem{BalanHS:22} R.\ Balan, N.\ Haghani, M.\ Singh, Permutation Invariant Representations with Applications to Graph Deep Learning, arXiv:2203.07546 (2022). \bibitem{BalanW:15} R.\ Balan, Y.\ Wang, Invertibility and robustness of phaseless reconstruction, Appl.\ Comput.\ Harmon.\ Anal.\ 38 (2015) 469--488. \bibitem{BandeiraBKPWW:17} A.\ S.\ Bandeira, B.\ Blum-Smith, J.\ Kileel, A.\ Perry, J.\ Weed, A.\ S.\ Wein, Estimation under group actions:\ Recovering orbits from invariants, arXiv:1712.10163 (2017). \bibitem{BandeiraCMN:14} A.\ S.\ Bandeira, J.\ Cahill, D.\ G.\ Mixon, A.\ A.\ Nelson, Saving phase:\ Injectivity and stability for phase retrieval, Appl.\ Comput.\ Harmon.\ Anal.\ 37 (2014) 106--125. \bibitem{BandeiraCM:14} A.\ S.\ Bandeira, Y.\ Chen, D.\ G.\ Mixon, Phase retrieval from power spectra of masked signals, Inform.\ Inference 3 (2014) 83--102. \bibitem{BendoryELS:22} T.\ Bendory, D.\ Edidin, W.\ Leeb, N.\ Sharon, Dihedral multi-reference alignment, IEEE Trans.\ Inform.\ Theory 68 (2022) 3489--3499. \bibitem{BochnakCR:13} J.\ Bochnak, M.\ Coste, M.-F.\ Roy, Real algebraic geometry, Springer, 2013. \bibitem{BodmanH:15} B.\ G.\ Bodmann, N.\ Hammen, Stable phase retrieval with low-redundancy frames, Adv.\ Comput.\ Math.\ 41 (2015) 317--331. \bibitem{BousseljotKS:95} R.\ Bousseljot, D.\ Kreiseler, A.\ Schnabel, Nutzung der EKG-Signaldatenbank CARDIODAT der PTB \"{u}ber das Internet, Biomed.\ Tech.\ 40 (1995) 317--318, \url{https://www.physionet.org/content/ptbdb/1.0.0/}. \bibitem{BrunaM:11} J.\ Bruna, S.\ Mallat, Classification with scattering operators, CVPR 2011, 1561--1566. \bibitem{BrunaM:13} J.\ Bruna, S.\ Mallat, Invariant scattering convolution networks, IEEE Trans.\ Pattern Anal.\ Mach.\ Intell.\ 35 (2013) 1872--1886. \bibitem{CahillCD:16} J.\ Cahill, P.\ Casazza, I.\ Daubechies, Phase retrieval in infinite-dimensional Hilbert spaces, Trans. Amer. Math. Soc., Ser.\ B 3 (2016) 63--76. \bibitem{CahillCC:arxiv} J.\ Cahill, A.\ Contreras, A.\ Contreras-Hip, Classifying Signals Under a Finite Abelian Group Action:\ The Finite Dimensional Setting, arXiv:1911.05862 (2019). \bibitem{CahillCC:20} J.\ Cahill, A.\ Contreras, A.\ Contreras-Hip, Complete set of translation invariant measurements with Lipschitz bounds, Appl.\ Comput.\ Harmon.\ Anal.\ 49 (2020) 521--539. \bibitem{CandesESV:15} E.\ J.\ Cand\`{e}s, Y.\ C.\ Eldar, T.\ Strohmer, V.\ Voroninski, Phase retrieval via matrix completion, SIAM Rev.\ 57 (2015) 225--251. \bibitem{CandesLS:15} E.\ J.\ Cand\`{e}s, X.\ Li, M.\ Soltanolkotabi, Phase retrieval via Wirtinger flow:\ Theory and algorithms, IEEE Trans.\ Inform.\ Theory 61 (2015) 1985--2007. \bibitem{CandesSV:13} E.\ J.\ Cand\`{e}s, T.\ Strohmer, V.\ Voroninski, Phaselift:\ Exact and stable signal recovery from magnitude measurements via convex programming, Comm.\ Pure Appl.\ Math.\ 66 (2013) 1241--1274. \bibitem{CaulfieldH:80} H.\ J.\ Caulfield, R.\ Haimes, Generalized matched filtering, Appl.\ Opt.\ 19 (1980) 181--183. \bibitem{CaulfieldM:69} H.\ J.\ Caulfield, W.\ T.\ Maloney, Improved discrimination in optical character recognition, Appl.\ Opt.\ 8 (1969) 2354--2356. \bibitem{Cayley:45} A.\ Cayley, On the theory of linear transformations, Cambridge Math.\ J.\ 4 (1845) 193--209. \bibitem{ChenDL:20} S.\ Chen, E.\ Dobriban, J.\ Lee, A group-theoretic framework for data augmentation, NeurIPS 2020, 21321--21333. \bibitem{ChenC:17} Y.\ Chen, E.\ J.\ Cand\`{e}s, Solving random quadratic systems of equations is nearly as easy as solving linear systems, Comm.\ Pure Appl.\ Math.\ 70 (2017) 822--883. \bibitem{CiresanMGS:10} D.\ C.\ Cire\c{s}an, U.\ Meier, L.\ M.\ Gambardella, J.\ Schmidhuber, Deep, big, simple neural nets for handwritten digit recognition, Neural Comput.\ 22 (2010) 3207--3220. \bibitem{ConcaEHV:15} A.\ Conca, D.\ Edidin, M.\ Hering, C.\ Vinzant, An algebraic characterization of injectivity in phase retrieval, Appl.\ Comput.\ Harmon.\ Anal.\ 38 (2015) 346--356. \bibitem{HerbertN:04} H.\ A.\ David, H.\ N.\ Nagaraja, Order statistics, John Wiley \& Sons, 2004. \bibitem{DemanetH:14} L.\ Demanet, P.\ Hand, Stable optimizationless recovery from phaseless linear measurements, J.\ Fourier Anal.\ Appl.\ 20 (2014) 199--221. \bibitem{DerksenK:15} H.\ Derksen, G.\ Kemper, Computational invariant theory, Springer, 2015. \bibitem{Domokos:17} M.\ Domokos, Degree bound for separating invariants of abelian groups, Proc.\ Amer.\ Math.\ Soc.\ 145 (2017) 3695--3708. \bibitem{Domokos:arxiv} M.\ Domokos, Separating monomials for diagonalizable actions, arXiv:2202.07002 (2022). \bibitem{DonohoG:05} D.\ L.\ Donoho, C.\ Grimes, Image manifolds which are isometric to Euclidean space, J.\ Math.\ Imaging Vis.\ 23 (2005) 5--24. \bibitem{Defresne:08} E.\ S.\ Dufresne, Separating Invariants, Ph.D.\ thesis, 2008. \bibitem{DymG:22} N.\ Dym, S.\ J.\ Gortler, Low Dimensional Invariant Embeddings for Universal Geometric Learning, arXiv:2205.02956 (2022). \bibitem{EldarSMBC:14} Y.\ C.\ Eldar, P.\ Sidorenko, D.\ G.\ Mixon, S.\ Barel, O.\ Cohen, Sparse phase retrieval from short-time Fourier measurements, IEEE Signal Process.\ Lett.\ 22 (2014) 638--642. \bibitem{GaoWH:19} F.\ Gao, G.\ Wolf, M.\ Hirn, Geometric scattering for graph data analysis, ICML 2019, 2122--2131. \bibitem{GrossKK:17} D.\ Gross, F.\ Krahmer, R.\ Kueng, Improved recovery guarantees for phase retrieval from coded diffraction patterns, Appl.\ Comput.\ Harmon.\ Anal.\ 42 (2017) 37--64. \bibitem{GuEtal:18} J.\ Gu, et al.,\ Recent advances in convolutional neural networks, Pattern Recognit.\ 77 (2018) 354--377. \bibitem{Hilbert:90} D.\ Hilbert, \"{U}ber die Theorie der algebraischen Formen, Math.\ Ann.\ 36 (1890) 473--534. \bibitem{HuangV:21} T.\ N.\ Huang, S.\ Villar, A Short Tutorial on The Weisfeiler-Lehman Test And Its Variants, ICASSP 2021, 8533--8537. \bibitem{Ingraham:14} C.\ Ingraham, America's most gerrymandered congressional districts, The Washington Post, May 15, 2014, \url{https://www.washingtonpost.com/news/wonk/wp/2014/05/15/americas-most-gerrymandered-congressional-districts/}. \bibitem{IwenMP:19} M.\ A.\ Iwen, S.\ Merhi, M.\ Perlmutter, Lower Lipschitz bounds for phase retrieval from locally supported measurements, Appl.\ Comput.\ Harmon.\ Anal.\ 47 (2019) 526--538. \bibitem{JohnsonL:84} W.\ B.\ Johnson, J.\ Lindenstrauss, Extensions of Lipschitz mappings into a Hilbert space, Contemp.\ Math.\ 26 (1984) 189--206. \bibitem{KrahmerMR:14} F.\ Krahmer, S.\ Mendelson, H.\ Rauhut, Suprema of chaos processes and the restricted isometry property, Comm.\ Pure Appl.\ Math.\ 67 (2014) 1877--1904. \bibitem{KrizhevskySH:12} A.\ Krizhevsky, I.\ Sutskever, G.\ Hinton, ImageNet Classification with Deep Convolutional Neural Networks, NeurIPS 2012, 1097--1105. \bibitem{Kylberg:online} G.\ Kylberg, The Kylberg Texture Dataset v.\ 1.0, \url{http://www.cb.uu.se/~gustaf/texture/}. \bibitem{KylbergS:13} G.\ Kylberg, I.-M.\ Sintorn, Evaluation of noise robustness for local binary pattern descriptors in texture classification, EURASIP J. Image Video Process.\ 2013 (2013) 1--20. \bibitem{MakimotoEtal:20} H.\ Makimoto, et al., Performance of a convolutional neural network derived from an ECG database in recognizing myocardial infarction, Sci.\ Rep.\ 10 (2020) 1--9. \bibitem{Mallat:12} S.\ Mallat, Group invariant scattering, Comm.\ Pure Appl.\ Math.\ 65 (2012) 1331--1398. \bibitem{MitteroeckerG:09} P.\ Mitteroecker, P.\ Gunz, Advances in geometric morphometrics, Evol.\ Biol.\ 36 (2009) 235--247. \bibitem{MorrisRFHLRG:19} C.\ Morris, M.\ Ritzert, M.\ Fey, W.\ L.\ Hamilton, J.\ E.\ Lenssen, G.\ Rattan, M.\ Grohe, Weisfeiler and Leman go neural:\ Higher-order graph neural networks, AAAI 2019, 4602--4609. \bibitem{MumfordFK:94} D.\ Mumford, J.\ Fogarty, F.\ Kirwan, Geometric invariant theory, Springer, 1994. \bibitem{OnishchikV:90} A.\ L.\ Onishchik, E.\ B.\ Vinberg, Lie groups and algebraic groups, Translated by D.\ A.\ Leites, Springer, 1990. \bibitem{PerlmutterGWH:19} M.\ Perlmutter, F.\ Gao, G.\ Wolf, M.\ Hirn, Understanding graph neural networks with asymmetric geometric scattering transforms, arXiv:1911.06253 (2019). \bibitem{PerryWBRS:19} A.\ Perry, J.\ Weed, A.\ S.\ Bandeira, P.\ Rigollet, A.\ Singer, The sample complexity of multireference alignment, SIAM J.\ Math.\ Data Science 1 (2019) 497--517. \bibitem{PfanderRT:13} G.\ E.\ Pfander, H.\ Rauhut, J.\ A.\ Tropp, The restricted isometry property for time--frequency structured random matrices, Probab.\ Theory Related Fields 156 (2013) 707--737. \bibitem{RahimiR:08} A.\ Rahimi, B.\ Recht, Weighted sums of random kitchen sinks:\ Replacing minimization with randomization in learning, NeurIPS 2008, 858. \bibitem{Rockafellar:70} R.\ T.\ Rockafellar, Convex Analysis, Princeton U.\ Press, 1970. \bibitem{RongWX:21} Y.\ Rong, Y.\ Wang, Z.\ Xu, Almost everywhere injectivity conditions for the matrix recovery problem, Appl.\ Comput.\ Harmon.\ Anal.\ 50 (2021) 386--400. \bibitem{RudelsonV:08} M.\ Rudelson, R.\ Vershynin, On sparse reconstruction from Fourier and Gaussian measurements, Comm.\ Pure Appl.\ Mathematics 61 (2008) 1025--1045. \bibitem{Sato:20} R.\ Sato, A survey on the expressive power of graph neural networks, arXiv:2003.04078 (2020). \bibitem{Silver:16} D.\ Silver, et al., Mastering the game of Go with deep neural networks and tree search, Nature 529 (2016) 484. \bibitem{SimardSP:03} P.\ Y.\ Simard, D.\ Steinkraus, J.\ C.\ Platt, Best practices for convolutional neural networks applied to visual document analysis, ICDAR 2003, 1--6. \bibitem{Sogge:17} C.\ D.\ Sogge, Fourier integrals in classical analysis, Cambridge U.\ Press, 2017. \bibitem{Switzer:kaggle} N.\ Switzer, USA 2019 Congressional District Shape Files, \url{https://www.kaggle.com/datasets/nswitzer/usa-2019-congressional-district-shape-files}. \bibitem{VillarHSYB:21} S.\ Villar, D.\ W.\ Hogg, K.\ Storey-Fisher, W.\ Yao, B.\ Blum-Smith, Scalars are universal:\ Equivariant machine learning, structured like classical physics, NeurIPS 2021, 28848--28863. \bibitem{Vinzant:15} C.\ Vinzant, A small frame and a certificate of its injectivity, SampTA 2015, 197--200. \bibitem{Waldspurger:17} I.\ Waldspurger, Exponential decay of scattering coefficients, SampTA 2017, 143--146. \bibitem{WaldspurgerdAM:15} I.\ Waldspurger, A.\ d'Aspremont, S.\ Mallat, Phase recovery, maxcut and complex semidefinite programming, Math.\ Program.\ 149 (2015) 47--81. \bibitem{WangX:19} Y.\ Wang, Z.\ Xu, Generalized phase retrieval:\ Measurement number, matrix recovery and beyond, Appl.\ Comput.\ Harmon.\ Anal.\ 47 (2019) 423--446. \bibitem{Watson:92} G.\ A.\ Watson, Characterization of the subdifferential of some matrix norms, Linear Algebra Appl.\ 170 (1992) 33--45. \bibitem{WeisfeilerA:68} B.\ Weisfeiler, A.\ Leman, The reduction of a graph to canonical form and the algebra which appears therein, Nauchno-Tekhn.\ Inform.\ 2 (1968) 12--16. \bibitem{Wood:96} J.\ Wood, Invariant pattern recognition:\ A review, Pattern Recognit.\ 29 (1996) 1--17. \bibitem{XuHLJ:19} K.\ Xu, W.\ Hu, J.\ Leskovec, S.\ Jegelka, How Powerful are Graph Neural Networks?, ICLR 2019. \bibitem{ZhangL:04} D.\ Zhang, G.\ Lu, Review of shape representation and description techniques, Pattern Recognit.\ 37 (2004) 1--19. \bibitem{ZouBS:19} D.\ Zou, R.\ Balan, M.\ Singh, On Lipschitz bounds of general convolutional neural networks, IEEE Trans.\ Inform.\ Theory 66 (2019) 1738--1759. \end{thebibliography} \end{document}
2205.14035v1
http://arxiv.org/abs/2205.14035v1
Learning to Control Linear Systems can be Hard
\documentclass[hidelinks,11pt]{article} \usepackage{fullpage} \usepackage[utf8]{inputenc} \usepackage{amsmath} \usepackage{hyperref} \usepackage{amsfonts} \usepackage{amsmath, amssymb, graphicx, url} \usepackage{amsthm} \usepackage{nicefrac} \usepackage{microtype} \usepackage{color} \usepackage{algorithm} \usepackage{algpseudocode} \usepackage{mathtools} \usepackage{epstopdf} \usepackage{bm} \usepackage{tabulary} \usepackage[mathscr]{euscript} \usepackage{mathrsfs} \usepackage{natbib} \allowdisplaybreaks \usepackage{tikz} \usetikzlibrary{shapes,arrows} \usetikzlibrary{arrows.meta} \usetikzlibrary{positioning} \usetikzlibrary{calc} \usetikzlibrary{backgrounds} \tikzset{block/.style = {draw, text=black,rectangle, rounded corners, minimum height=2.5em, minimum width=5em} } \newtheorem{theorem}{\bf Theorem} \newtheorem{lemma}{\bf Lemma} \newtheorem{proposition}{\bf Proposition} \newtheorem{corollary}{\bf Corollary} \newtheorem{remark}{\bf Remark} \newtheorem{question}{\bf Question} \newtheorem{problem}{\bf Problem} \newtheorem{assumption}{\bf Assumption} \newtheorem{definition}{\bf Definition} \newcommand{\paren}[1]{\ensuremath{\left( #1\right)}} \newcommand{\sparen}[1]{\ensuremath{(#1)}} \newcommand{\clint}[1]{\ensuremath{\left[ #1\right]}} \newcommand{\set}[1]{\ensuremath{\left\{ #1\right\}}} \newcommand{\matr}[1]{\ensuremath{\clint{\begin{array} #1 \end{array}}}} \newcommand{\norm}[1]{\ensuremath{\left\| #1\right\|}} \newcommand{\snorm}[1]{\ensuremath{\| #1\|}} \newcommand{\abs}[1]{\ensuremath{\left| #1\right|}} \newcommand{\mbf}[1]{\ensuremath{\bm{#1}}} \newcommand{\K}{\ensuremath{\mathcal{K}}} \newcommand{\poly}{\ensuremath{\mathrm{poly}}} \newcommand{\KL}{\ensuremath{\mathrm{KL}}} \newcommand{\rank}{\ensuremath{\mathrm{rank}}} \newcommand{\N}{\ensuremath{\mathcal{N}}} \newcommand{\R}{\ensuremath{\mathbb{R}}} \newcommand{\RR}{\ensuremath{\mbf{\mathfrak{R}}}} \renewcommand{\P}{\ensuremath{\mathbb{P}}} \renewcommand{\S}{\ensuremath{\mathbb{S}}} \newcommand{\Reg}{\ensuremath{\mathcal{R}}} \newcommand{\F}{\ensuremath{\mathcal{F}}} \newcommand{\G}{\ensuremath{\mathcal{G}}} \newcommand{\E}{\ensuremath{\mathbb{E}}} \newcommand{\rd}[1]{{\color{red}{#1}}} \newcommand{\gr}[1]{{\color{green}{#1}}} \renewcommand{\O}{\ensuremath{\mathcal{O}}} \newcommand{\C}{\ensuremath{\mathcal{C}}} \newcommand{\CC}{\ensuremath{\mathscr{C}}} \renewcommand{\SS}{\ensuremath{\mathcal{S}}} \newcommand{\T}{\ensuremath{\mathcal{T}}} \newcommand{\B}{\ensuremath{\mathcal{B}}} \newcommand{\Hinf}{\ensuremath{\mathcal{H}_{\infty}}} \newcommand{\Ht}{\ensuremath{\mathcal{H}_{2}}} \newcommand{\LL}{\ensuremath{\mathcal{L}}} \newcommand{\VEC}{\ensuremath{\mathrm{vec}}} \newcommand{\AUX}{\ensuremath{\mathrm{AUX}}} \newcommand{\LAG}{\ensuremath{{\mathpzc{L}_{\hspace{.1pt}}}}} \DeclareMathOperator{\Tr}{\mathrm{tr}} \DeclareMathOperator{\conv}{\circledast} \DeclarePairedDelimiter{\diagfences}{(}{)} \newcommand{\diag}{\operatorname{diag}\diagfences} \hypersetup{colorlinks, linkcolor=blue, citecolor=blue, urlcolor=magenta, linktocpage, plainpages=false} \title{Learning to Control Linear Systems can be Hard} \author{Anastasios~Tsiamis$^1$, Ingvar~Ziemann$^2$, Manfred Morari$^3$, Nikolai Matni$^3$, and George~J.~Pappas$^3$ \date{ $^1$ Automatic Control Laboratory, ETH Zurich, email: [email protected]\\ $^2$ School of Electrical Engineering and Computer Science, KTH Royal Institute of Technology, email: [email protected]\\ $^3$ Department of Electrical and Systems Engineering, University of Pennsylvania, email: \{morari,nmatni,pappasg\}@seas.upenn.edu} } \begin{document} \maketitle \begin{abstract}In this paper, we study the statistical difficulty of learning to control linear systems. We focus on two standard benchmarks, the sample complexity of stabilization, and the regret of the online learning of the Linear Quadratic Regulator (LQR). Prior results state that the statistical difficulty for both benchmarks scales polynomially with the system state dimension up to system-theoretic quantities. However, this does not reveal the whole picture. By utilizing minimax lower bounds for both benchmarks, we prove that there exist non-trivial classes of systems for which learning complexity scales dramatically, i.e. exponentially, with the system dimension. This situation arises in the case of underactuated systems, i.e. systems with fewer inputs than states. Such systems are structurally difficult to control and their system theoretic quantities can scale exponentially with the system dimension dominating learning complexity. Under some additional structural assumptions (bounding systems away from uncontrollability), we provide qualitatively matching upper bounds. We prove that learning complexity can be at most exponential with the controllability index of the system, that is the degree of underactuation. \end{abstract} \input{introduction} \input{formulation} \input{controllability} \input{stabilization} \input{onlineLQR} \input{conclusion} \section*{Acknowledgment} This work was supported by the AFOSR Assured Autonomy grant. \bibliographystyle{plainnat} \bibliography{literature} \counterwithin{theorem}{section} \counterwithin{equation}{section} \input{appendix} \end{document} \section{Introduction}\label{sec:introduction} In stochastic linear control, the goal is to design a controller for a system of the form \begin{equation}\label{CTRL_eq:system} S:\qquad x_{k+1}=Ax_k+Bu_k+Hw_k, \end{equation} where $x_k\in\R^{n}$ is the system internal state, $u_k\in \R^{p}$ is some exogenous input, and $w_k\in \R^r$ is some random disturbance sequence. Matrices $A,\,B,\,H$ determine the evolution of the state, based on the previous state, control input, and disturbance respectively. Control theory has a long history of studying how to design controllers for system~\eqref{CTRL_eq:system} when its model is \emph{known}~\citep{bertsekas2017dynamic}. However, in reality system~\eqref{CTRL_eq:system} might be \emph{unknown} and we might not have access to its model. In this case, we have to learn how to control~\eqref{CTRL_eq:system} based on data. Controlling unknown dynamical systems has also been studied from the perspective of Reinforcement Learning (RL). Although the setting of tabular RL is relatively well-understood~\citep{jaksch2010near}, it has been challenging to analyze the continuous setting, where the state and/or action spaces are infinite~\citep{ortner2012online,kakade2020information}. Recently, there has been renewed interest in learning to control linear systems. Indeed, linear systems are simple enough to allow for an in-depth theoretical analysis, yet exhibit sufficiently rich behavior so that we can draw conclusions about continuous control of more general system classes~\citep{recht2018tour}. In this paper we focus on the following two problems. \textbf{Regret of online LQR.} A fundamental benchmark for continuous control is the Linear Quadratic Regulator (LQR) problem, where the goal is to compute a policy \footnote{A policy decides the current control input $u_t$ based on past state-input values --see Section~\ref{CTRL_sec:formulation} for details. } $\pi$ that minimizes \begin{equation}\label{CTRL_eq:LQR_objective} J^*(S)\triangleq \min_{\pi}\lim_{T\rightarrow \infty} \frac{1}{T} \E_{S,\pi} \clint{\sum^{T-1}_{t=0}(x'_tQx_t+u'_tRu_t)+x'_TQ_Tx_T}, \end{equation} where $Q\in\R^{n\times n}$, $R\in\R^{p\times p}$ are the state and input penalties respectively; these penalties control the tradeoff between state regulation and control effort. When model~\eqref{CTRL_eq:system} is known, LQR enjoys a closed-form solution; the optimal policy is a linear feedback law $\pi_{\star,t}(x_t)=K_{\star}x_{t}$, where the control gain $K_{\star}$ is given by solving the celebrated Algebraic Riccati Equation (ARE)~\eqref{CTRL_eq:LQR_DARE}. If model~\eqref{CTRL_eq:system} is unknown, we have to learn the optimal policy from data. In the online learning setting, the goal of the learner is to find a policy that adapts online and competes with the optimal LQR policy that has access to the true model. The suboptimality of the online learning policy at time $T$ is captured by the \emph{regret} \begin{equation}\label{CTRL_eq:regret} R_T(S)\triangleq \sum_{t=0}^{T-1}(x'_tQx_t+u'_tRu_t)+x'_TQ_Tx_T-TJ^*(S). \end{equation} The learning task is to find a policy with as small regret as possible. \textbf{Sample Complexity of Stabilization} Another important benchmark is the problem of stabilization from data. The goal is to learn a linear gain $K\in\R^{m\times n}$ such that the closed-loop system $A+BK$ is stable, i.e., such that its spectral radius $\rho(A+BK)$ is less than one. Many algorithms for online LQR require the existence of such a stabilizing gain to initialize the online learning policy~\citep{simchowitz2020naive,jedra2021minimal}. Furthermore, stabilization is a problem of independent interest~\citep{faradonbeh2018stabilization}. In this setting, the learner designs an exploration policy $\pi$ and an algorithm that uses batch state-input data $x_0,\dots,x_N,u_0,\dots,u_{N-1}$ to output a control gain $\hat{K}_N$, at the end of the exploration phase. Here we focus on \emph{sample complexity}, i.e., the minimum number of samples $N$ required to find a stabilizing gain. Since the seminal papers by~\cite{abbasi2011regret} and~\cite{dean2017sample} both LQR and stabilization have been studied extensively in the literature -- see Section~\ref{CTLR_sec:related_work}. Current state-of-the-art results state that the regret of online LQR and the sample complexity of stabilization scale at most polynomially with system dimension $n$ \begin{equation}\label{CTRL_eq:existing_upper_bounds} R_T(S)\lesssim C^{\mathrm{sys}}_{1}\poly(n)\sqrt{T},\quad N\lesssim C^{\mathrm{sys}}_{\mathrm{2}}\poly(n), \end{equation} where $C^{\mathrm{sys}}_{1},\,C^{\mathrm{sys}}_{2}$ are system specific constants that depend on several control theoretic quantities of system~\eqref{CTRL_eq:system}. However, the above statements might not reveal the whole picture. In fact, system theoretic parameters $C^{\mathrm{sys}}_{1},\,C^{\mathrm{sys}}_{2}$ can actually hide dimensional dependence on $n$. This dependence has been overlooked in prior work. As we show in this paper, there exist non-trivial classes of linear systems for which system theoretic parameters scale dramatically, i.e. exponentially, with the dimension $n$. As a result, the system theoretic quantities $C^{\mathrm{sys}}_{1},\,C^{\mathrm{sys}}_{2}$ might be very large and in fact \emph{dominate} the $\poly(n)$ term in the upper bounds~\eqref{CTRL_eq:existing_upper_bounds}. This phenomenon especially arises in systems which are structurally difficult to control, such as for example underactuated systems. Then, the upper bounds~\eqref{CTRL_eq:existing_upper_bounds} suggest that learning might be difficult for such instances. This brings up the following questions. \emph{Can learning LQR or stabilizing controllers indeed be hard for such systems? How does system structure affect difficulty of learning?} To answer the first question, we need to establish lower bounds. As we discuss in Section~\ref{CTLR_sec:related_work}, existing lower bounds for online LQR~\citep{simchowitz2020naive} might not always reveal the dependence on control theoretic parameters. \cite{chen2021black}~provided exponential lower bounds for the start-up regret of stabilization. Still, to the best of our knowledge, there are no existing lower bounds for the \emph{sample complexity} of stabilization. Recently, it was shown that the sample complexity of system identification can grow exponentially with the dimension $n$~\citep{tsiamis2021linear}. However, it is not clear if difficulty of identification translates into difficulty of control. Besides, we do not always need to identify the whole system in order to control it~\citep{gevers2005identification}. To answer the second question, we need to provide upper bounds for several control theoretic parameters. Our contributions are the following: \textbf{Exp($n$) Stabilization Lower Bounds.} We prove an information-theoretic lower bound for the problem of learning stabilizing controllers, showing that it can indeed be statistically hard for underactuated systems. In particular, we show that the sample complexity of stabilizing an unknown underactuated linear system can scale exponentially with the state dimension $n$. To the best of our knowledge this is the first paper to address this issue and consider lower bounds in this setting. \textbf{Exp($n$) LQR Regret Lower Bounds.} We show that the regret of online LQR can scale exponentially with the dimension as $\exp(n)\sqrt{T}$. In fact, even common integrator-like systems can exhibit this behavior. To prove our result, we leverage recent regret lower bounds~\citep{ziemann2022regret}, which provide a refined analysis linking regret to system theoretic parameters. \cite{chen2021black}~first showed that the start-up cost of the regret (terms of low order) can scale exponentially with $n$. Here, we show that this exponential dependence can also affect multiplicatively the dominant $\sqrt{T}$~term. \textbf{Exponential Upper Bounds.} Under some additional structural assumptions (bounding systems away from uncontrollability), we provide matching global upper bounds. We show that the sample complexity of stabilization and the regret of online LQR can be at most exponential with the dimension $n$. In fact, we prove a stronger result, that they can be at most exponential with the \emph{controllability index} of the system, which captures the structural difficulty of control -- see Section~\ref{CTRL_sec:controllability}. This implies that if the controllability index is small with respect to the dimension $n$, then learning is guaranteed to be easy. \subsection{Related Work}\label{CTLR_sec:related_work} \textbf{System Identification.} A related problem is that of system identification, where the learning objective is to recover the model parameters $A,B,H$ from data~\citep{matni2019tutorial}. The sample complexity of system identification was studied extensively in the setting of fully observed linear systems~\citep{dean2017sample,simchowitz2018learning,faradonbeh2018finite,sarkar2018fast,fattahi2019learning,jedra2019sample,wagenmaker2020active,efroni2021sparsity} as well as partially-observed systems~\citep{oymak2018non,sarkar2019finite,simchowitz2019semi,tsiamis2019finite,lee2019non,zheng2020non,lee2020improved,lale2020logarithmic}. Recently, it was shown that the sample complexity of system identification can grow exponentially with the dimension $n$~\citep{tsiamis2021linear}. \noindent\textbf{Learning Feedback Laws.} The problem of learning stabilizing feedback laws from data was studied before in the case of stochastic~\citep{dean2017sample,tu2017non,faradonbeh2018stabilization, mania2019certainty} as well as adversarial~\citep{chen2021black} disturbances. The standard paradigm has been to perform system identification, followed by a robust control or certainty equivalent gain design. Prior work is limited to sample complexity upper bounds. To the best of our knowledge, there have been no sample complexity lower bounds. \noindent\textbf{Online LQR.} While adaptive control in the LQR framework has a rich history \citep{matni2019self}, the recent line of work on regret minimization in online LQR begins with \cite{abbasi2011regret}. They provide a computationally intractable algorithm based on optimism attaining $O(\sqrt{T})$ regret. Algorithms based on optimism have since been improved and made more tractable \citep{ouyang2017control, abeille2018improved, abbasi2019model, cohen2019learning, abeille2020efficient}. In a closely related line of work, \cite{dean2018regret} provide an $O(T^{2/3})$ regret bound for robust adaptive LQR control, drawing inspiration from classical methods in system identification and robust adaptive control. It has since been shown that certainty equivalent control, without robustness, can attain the (locally) minimax optimal $O(\sqrt{T})$ regret \citep{mania2019certainty, faradonbeh2020adaptive,lale2020explore, jedra2021minimal}. In particular, by providing nearly matching upper and lower bounds, \cite{simchowitz2020naive} refine this analysis and establish that the optimal rate, without taking system theoretic quantities into account, is $R_T = \Theta(\sqrt{p^2 n T})$. In this work, we rely on the lower bounds by \cite{ziemann2022regret}, which provide a refined instance specific analysis and also lower bounds for the partially observed setting. Here, we further refine their lower bounds to reveal a sharper dependence of the regret on control theoretic parameters. Hence, we how that certain non-local minimax complexities can be far worse than $R_T = \Omega( \sqrt{p^2 n T})$ and scale exponentially in the problem dimension. Indeed, an exponential start-up cost has already been observed by \cite{chen2021black}, in the case of adversarial disturbances. Here we show that this exponential dependency can persist multiplicatively even for large $T$, in the case of stochastic disturbances. Thus, our results complement the results of~\cite{chen2021black}. \subsection{Notation} The transpose of $X$ is denoted by $X'$. For vectors $v\in\R^d$, $\snorm{v}_2$ denotes the $\ell_2$-norm. For matrices $X\in\R^{d_1\times d_2}$, the spectral norm is denoted by $\snorm{X}_2$. For comparison with respect to the positive semi-definite cone we will use $\succeq$ or $\succ$ for strict inequality. By $\P$ we will denote probability measures and by $\E$ expectation. By $\poly(\cdot)$ we denote a polynomial function of its arguments. By $\exp(\cdot)$ we denote a exponential function of its arguments. \section{Problem Statement}\label{CTRL_sec:formulation} System~\eqref{CTRL_eq:system} is characterized by the matrices $A\in\R^{n\times n},\,B\in\R^{n\times p},\,H\in\R^{n\times r}$. We assume that $w_k\sim\mathcal{N}(0,I_r)$ is i.i.d. Gaussian with unit covariance. Without loss of generality the initial state is assumed to be zero $x_0=0$. In a departure from prior work, we do not necessarily assume that the noise is isotropic. Instead, we consider a more general model, where the noise $Hw_k$ is allowed to be degenerate--see also Remark~\ref{CTRL_rem:singular_noise}. \begin{assumption}\label{CTRL_ass:general_setting} Matrices $A,B,H$ and the noise dimension $r\le n$ are all unknown. The unknown matrices are bounded, i.e. $\snorm{A}_2,\snorm{B}_2,\snorm{H}_2\le M$, for some positive constant $M\ge 1$. Matrices $B,H$ have full column rank $\rank(B)=p\le n$, $\rank(H)=r\le n$. We also assume that the system is non-explosive $\rho(A)\le 1$. \end{assumption} The boundedness assumption on the state parameters allows us to argue about global sample complexity upper bounds. To simplify the presentation, we make the assumption that the system is non-explosive $\rho(A)\le 1$. This setting includes marginally stable systems and is rich enough to provide insights about the difficulty of learning more general systems. A policy is a sequence of functions $\pi=\set{\pi_t}_{t=0}^{N-1}$. Every function $\pi_t$ maps previous state-input values $x_0,\dots,x_t,u_0,\dots,u_{t-1}$ and potentially an auxiliary randomization signal $\AUX$ to the new input $u_t$. Hence all inputs $u_t$ are $\F_t$-measurable, where $\F_t\triangleq \sigma(x_0,\dots,x_t,u_0,\dots,u_{t-1},\AUX)$. For brevity we will use the symbol $S$ to denote a system $S=(A,B,H)$. Let $\P_{S,\pi}$ ($\E_{S,\pi}(\cdot)$) denote the probability distribution (expectation) of the input-state data when the true system is equal to $S$ and we apply a policy $\pi$. \subsection{Difficulty of Stabilization} In the stabilization problem, the goal is to find a state-feedback control law $u=Kx$, where $K$ renders the closed-loop system $A+BK$ stable with spectral radius less than one, i.e., $\rho(A+BK)<1$. We assume that we collect data $x_0,\dots,x_N,u_0,\dots,u_{N}$, which are generated by system~\eqref{CTRL_eq:system} using any exploration policy $\pi$, e.g. white-noise excitation, active learning etc. Since we care only about sample complexity, the policy is allowed to be maximally exploratory. To make the problem meaningful, we restrict the average control energy. \begin{assumption}\label{CTRL_ass:input_budget} The control energy is bounded $\E_{S,\pi} \snorm{u_t}^2_2\le \sigma^2_u$, for some $\sigma_u>0$. \end{assumption} Next, we define a notion of learning difficulty for classes of linear systems. By $\CC_n$ we will denote a class of systems with dimension $n$. We will define as easy, classes of linear system that exhibit $\poly(n)$ sample complexity. \begin{definition}[Poly$(n)$-stabilizable classes] Let $\CC_n$ be a class of systems. Let $\hat{K}_N$ be a function that maps input-state data $(u_{0},x_1),\dots$,$(u_{N-1},x_{N})$ to a control gain. We call the class $\CC_n$ $\poly(n)-$stabilizable if there exists an algorithm $\hat{K}_N$ and an exploration policy $\pi$ satisfying Assumption~\ref{CTRL_ass:input_budget}, such that for any confidence $0\le \delta<1$: \begin{align} &\sup_{S\in\CC_n}\P_{S,\pi}\paren{\rho(A+B\hat{K}_N)\ge 1}\le \delta\label{CTRL_eq:STAB_objective},\quad \text{if}\quad N\sigma^2_u\ge \mathrm{poly}(n,\log 1/\delta, M). \end{align} \end{definition} Our definition requires both the number of samples and the input energy to be polynomial with the arguments. The above class-specific definition can be turned into a local, instance-specific, definition of sample complexity by considering a neighborhood around an unknown system. The question then arises whether linear systems are generally poly$(n)$-stabilizable. \begin{problem}\label{CTRL_problem:STAB} Are there linear system classes which are not $\poly(n)$-stabilizable? When can we guarantee $\poly(n)$-stabilizability? \end{problem} \subsection{Difficulty of Online LQR} Consider the LQR objective~\eqref{CTRL_eq:LQR_objective}. Let the state penalty matrix $Q\in\R^{n\times n}\succ 0$ be positive definite, with the input penalty matrix $R\in\R^{p\times p}$ also positive definite. When the model is known, the optimal policy is a linear feedback law $\pi_{\star}=\set{K_{\star}x_{k}}^{T-1}_{k=0}$, where $K_{\star}$ is given by \begin{equation}\label{CTRL_eq:LQR_gain} K_{\star}=-(B'PB+R)^{-1}B'PA, \end{equation} and $P$ is the unique positive definite solution to the Algebraic Riccati Equation (ARE) \begin{equation}\label{CTRL_eq:LQR_DARE} P=A'PA+Q-A'PB(B'PB+R)^{-1}B'PA. \end{equation} Throughout the paper, we will assume that $Q_T=P$. If the model of~\eqref{CTRL_eq:system} is unknown, the goal of the learner is to find an online learning policy $\pi$ that leads to minimum regret $R_T(S)$. In the setting of online LQR, the data are revealed sequentially, i.e. $x_{t+1}$ is revealed after we select $u_t$. Contrary to the stabilization problem, here we study regret, i.e. there is a tradoff between exploration and exploitation. We will define a class-specific notion of learning difficulty based on the ratio between the regret and $\sqrt{T}$. \begin{definition}[Poly$(n)$-Regret] Let $\CC_n$ be a class of systems of dimension $n$. We say that the class $\CC_n$ exhibits poly($n$) minimax expected regret if \begin{equation} \begin{aligned} &\min_{\pi}\sup_{S\in\CC_n}\E_{S,\pi} R_T(S)\le \poly(n,M,\log T)\sqrt{T}+\tilde{O}(1)\label{CTRL_eq:REG_objective_EXPE}, \end{aligned} \end{equation} where $\tilde{O}(1)$ hides $\poly\log T$ terms. \end{definition} Our definition here is based on expected regret, but we could have a similar definition based on high probability regret guarantees -- see~\cite{dann2017unifying} for distinctions between the two definitions. Similar to the stabilization problem, we pose the following questions. \begin{problem}\label{CTRL_problem:REG} Are there classes of systems for which poly$(n)$-regret is impossible? When is poly$(n)$-regret guaranteed? \end{problem} \section{Classes with Rich Controllability Structure}\label{CTRL_sec:controllability} Before we present our learning guarantees, we need to find classes of systems, where learning is meaningful. To make sure that the stabilization and the LQR problems are well-defined, we assume that system~\eqref{CTRL_eq:system} is controllable\footnote{We can slightly relax the condition to $(A,B)$ stabilizable~\citep{lale2020explore,simchowitz2020naive,efroni2021sparsity}. To avoid technicalities we leave that for future work.}. \begin{assumption}\label{CTRL_ass:controllability} System~\eqref{CTRL_eq:system} is $(A,B)$ \emph{controllable}, i.e. matrix \begin{align}\label{CTRL_eq:controllability_matrix} \C_k(A,B)\triangleq \matr{{cccc}B&AB&\cdots&A^{k-1}B} \end{align} has full column rank $\rank(\C_k(A,B))=n$, for some $k\le n$. \end{assumption} Unsurprisingly, the class of all controllable systems does not exhibit finite sample complexity/regret, let alone polynomial sample complexity/regret. The main issue is that there exist systems which satisfy the rank condition but are arbitrarily close to uncontrollability. For example, consider the following controllable system, which we want to stabilize \begin{equation*} x_{k+1}=\matr{{cc}1&\alpha\\0&0}x_k+\matr{{c}0\\1}u_k+w_k. \end{equation*} The only way to stabilize the system is indirectly by using the second state $x_{k,2}$, via the coupling coefficient $\alpha$. However, we need to know the sign of $\alpha$. If $\alpha$ is allowed to be arbitrarily small, i.e. the system is arbitrarily close to uncontrollability, then an arbitrarily large number of samples is required to learn the sign of $\alpha$, leading to infinite complexity. To obtain classes with finite sample complexity/regret we need to bound the system instances away from uncontrollability. One way is to consider the least singular value of the controllability Gramian $\Gamma_k(A,B)$ at time $k$: \begin{equation}\label{CTRL_eq:gramian} \Gamma_{k}(A,B)\triangleq \sum_{t=0}^{k-1}A^tBB'(A')^{t}. \end{equation} An implicit assumption in prior literature is that $\sigma^{-1}_{\min}(\Gamma_{k}(A,B))\le \poly(n)$. We will not assume this here, since it might exclude many systems of interest, such as integrator-like systems, also known as underactuated systems, or networks~\citep{pasqualetti2014controllability}. Instead, we will relax this requirement to allow richer system structures. To avoid pathologies, we will lower bound the coupling between states in the case of indirectly controlled systems. To formalize this idea, let us review some notions from system theory. The \emph{controllability index} is defined as follows \begin{align}\label{eq:controllability_idex} \kappa(A,B)\triangleq \min\set{ k\ge 1: \rank(\C_k(A,B))=n }, \end{align} i.e., it is the minimum time such that the controllability rank condition is satisfied. It captures the degree of underactuation and reflects the structural difficulty of control. Based on the fact that the rank of the controllability matrix at time $\kappa$ is $n$, we can show that the pair $(A,B)$ admits the following canonical representation, under a unitary similarity transformation~\citep{Dooren03}. It is called the Staircase or Hessenberg form of system~\eqref{CTRL_eq:system}. \begin{proposition}[Staircase form]\label{CTRL_prop:Hessenberg} Consider a controllable pair $(A,B)$ with controllability index $\kappa$ and controllability matrix $\C_k$, $k\ge 0$. There exists a unitary similarity transformation $U\in\R^{n\times n}$ such that $U'U=UU'=I$ and: \begin{equation} \label{CTRL_eq:Hessenberg_form} U'B=\matr{{c}B_1\\0\\0\\0\\\vdots\\0},\qquad U'AU=\matr{{ccccc}A_{1,1}&A_{1,2}&\cdots&A_{1,\kappa-1}&A_{1,\kappa}\\A_{2,1}&A_{2,2}&\cdots&A_{3,\kappa-1}&A_{2,\kappa}\\0&A_{3,2}&\cdots&A_{3,\kappa-1}&A_{3,\kappa}\\0&0&\cdots&A_{4,\kappa-1}&A_{4,\kappa}\\\vdots& & &\vdots&\\0&0&\cdots&A_{\kappa,\kappa-1}&A_{\kappa,\kappa}}, \end{equation} where $A_{i,j}\in \R^{p_i\times p_j}$ are block matrices, with $p_i=\rank(\C_{i})-\rank(\C_{i-1})$, $p_1=p$, $B_1\in\R^{p\times p}$. Matrices $A_{i+1,i}$ have full row rank $\rank(A_{i+1,i})=p_{i+1}$ and the sequence $p_i$ is decreasing. \end{proposition} Matrix $U$ is the orthonormal matrix of the QR decomposition of the first $n$ independent columns of $\C_{\kappa}(A,B)$. It is unique up to sign flips of its columns. The above representation captures the coupling between the several sub-states via the matrices $A_{i+1,i}$. It has been used before as a test of controllability~\cite{Dooren03}. This motivates the following definition, wherein we bound the coupling matrices $A_{i+1,i}$ away from zero. \begin{definition}[Robustly coupled systems] Consider a controllable system $(A,B)$ with controllability index $\kappa$. It is called $\mu-$robustly coupled if and only if for some positive $\mu>0$: \begin{equation} \sigma_{p}(B_{1})\ge\mu,\quad \sigma_{p_{i+1}}(A_{i+1,i})\ge \mu,\,\text{ for all }1\le i\le \kappa-1, \end{equation} where $B_1$, $A_{i+1,i}$ are defined as in the Staircase form~\eqref{CTRL_eq:Hessenberg_form}. \end{definition} In the previous example, by introducing the $\mu-$robust coupling requirement, we enforce a lower bound on the coupling coefficient $\alpha\ge \mu$, thus, avoiding pathological systems. In the following sections, we connect the controllability index to the hardness/ease of control. We prove rigorously why performance might degrade as the index becomes $\kappa=O(n)$, as, e.g., in the case of integrator-like systems or networks. This cannot be explained based on prior work or based on global lower-bounds on the least singular value of the controllability Gramian. The controllability index and the controllability Gramian are two different measures that are suitable for different types of guarantees. The controllability index captures the structural difficulty of control, so it might be more suitable for class-specific guarantees versus instance-specific local guarantees. \section{Difficulty of Stabilization}\label{sec:stabilization} In this section, we show that there exist non-trivial classes of linear systems for which the problem of stabilization from data is hard. In fact, the class of robustly coupled systems requires at least an exponential, in the state dimension $n$, number of samples. \begin{theorem}[Stabilization can be Hard]\label{CTRL_thm:STAB_lower_exponential} Consider the class $\CC^{\mu}_{n,\kappa}$ of all $\mu$-robustly coupled systems $S=(A,B,H)$ of dimension $n$ and controllability index $\kappa$. Let Assumption~\ref{CTRL_ass:input_budget} hold and let $\mu<1$. Then, for any stabilization algorithm, the sample complexity is exponential in the index $\kappa$. For any confidence $0\le \delta<1/2$ the requirement \begin{align} &\sup_{S\in\CC^{\mu}_{n,\kappa}}\P_{S,\pi}\paren{\rho(A+B\hat{K}_N)\ge 1}\le \delta \nonumber \end{align} is satisfied only if \[ N\sigma^2_u \ge \frac{1}{2}\paren{\frac{1}{\mu}}^{2\kappa-2}\paren{\frac{1-\mu}{\mu}}^{2}\log\frac{1}{3 \delta}. \] \end{theorem} Theorem~\ref{CTRL_thm:STAB_lower_exponential} implies that system classes with large controllability index, e.g. $\kappa=n$, suffer in general from sample complexity which is exponential with the dimension $n$. In other words, learning difficulty arises in the case of under-actuated systems. Only a limited number of system states are directly driven by inputs and the remaining states are only indirectly excited, leading to a hard learning and stabilization problem. Consider now systems \begin{equation}\label{CTRL_eq:difficult_example_STAB} \begin{aligned} S_i:\qquad x_{k+1}=\matr{{ccccc}1 &\alpha_i\mu&0&\cdots&0\\0& 0&\mu&\cdots&0\\& &\ddots &\ddots&\\0&0&0&\cdots&\mu\\0&0&0&\cdots&0}x_k+\matr{{c}0\\0\\\vdots\\0\\\mu}u_k+ \matr{{c}1\\0\\\vdots\\0\\0}w_k,\,i\in\set{1,2}, \end{aligned} \end{equation} where $0<\mu<1$, $\alpha_1=1$, $\alpha_2=-1$. Systems $S_1$, $S_2$ are almost identical with the exception of element $A_{12}$ where they have different signs. Both systems have one marginally stable mode corresponding to state $x_{k,1}$. The only way to stabilize $x_{k,1}$ with state feedback is indirectly, via $x_{k,2}$. Given system $S_1$, since $\alpha_1\mu>0$, it is necessary that the first component of the gain is negative $\hat{K}_{N,1}<0$. This follows from the Jury stability criterion, a standard stability test in control theory~\citep[Ch. 4.5]{fadali2013digital}. Let $\phi_1(z)=\det(zI-A_1-B\hat{K}_N)$ be the characteristic polynomial of system $S_1$. Then one of the necessary conditions in Jury's criterion requires: \[ \phi_1(1)>0, \] which can only be satisfied if $\hat{K}_{N,1}<0$ (see Appendix~\ref{CTRL_app_sec:STAB_lower_bounds} for details). On the other hand, we can only stabilize $S_2$ if $\hat{K}_{N,1}>0$. Hence, the only way to stabilize the system is to identify the sign of $\alpha_i$. In other words, we transform the stabilization problem into a system identification problem. However, identification of the correct sign is very hard since the excitation of $x_{k,2}=\mu^{n-1}u_{k-n+1}$ scales with $\mu^{n-1}$. The proof relies on Birgé's inequality~\citep{boucheron2013concentration}. In Section~\ref{CTRL_app_sec:STAB_lower_bounds} we construct a slightly more general example with non-zero diagonal elements. Our construction relies on the fact that $\mu<1$. It is an open question whether we can construct hard learning instances for $\mu\ge 1$. One insight that we obtain from the above example is that lack of excitation might lead to large sample complexity of stabilization. In particular, this can happen when we have an unstable/marginally stable mode, which can only be controlled via the system identification bottleneck, like $A_{1,2}$ in the above example. \begin{remark}[Singular noise]\label{CTRL_rem:singular_noise} Our stabilization lower bound exploits the fact that the constructed system~\eqref{CTRL_eq:difficult_example_STAB} has low-rank noise, such that system identification is hard. It is an open problem whether we can construct examples of systems that are not $\poly(n)-$stabilizable even though they are excited by full-rank noise. Nonetheless, in our regret lower bounds, we allow the noise to be full-rank. \end{remark} \subsection{Sample complexity upper bounds}\label{CTRL_sec:STAB_upper_bounds} As we show below, sample complexity cannot be worse than exponential under the assumption of robust coupling. If the exploration policy is a white noise input sequence, then using a least squares identification algorithm~\citep{simchowitz2018learning}, and a robust control design scheme~\citep{dean2017sample}, the sample complexity can be upper bounded by a function which is at most exponential with the dimension $n$. In fact, we provide a more refined result, directly linking sample complexity to the controllability index $\kappa$. Our proof relies on bounding control theoretic quantities like the least singular value of the controllablility Gramian. The details of the proof and the algorithm can be found in Section~\ref{CTRL_app_sec:STAB_upper_bounds}. \begin{theorem}[Exponential Upper Bounds]\label{CTRL_thm:upper_bounds_STAB} Consider the class $\CC^{\mu}_{n,\kappa}$ of all $\mu$-robustly coupled systems $S=(A,B,H)$ of dimension $n$ and controllability index $\kappa$. Let Assumption~\ref{CTRL_ass:input_budget} hold. Then, the sample complexity is at most exponential with $\kappa$. There exists an exploration policy $\pi$ and algorithm $\hat{K}_N$ such that for any $\delta<1$: \begin{align*} &\sup_{S\in\CC^{\mu}_{n,\kappa}}\P_{S,\pi}\paren{\rho(A+B\hat{K}_N)\ge 1}\le \delta,\quad \text{if} \quad N\sigma^2_u \ge \poly\paren{\Big(\frac{M}{\mu}\Big)^\kappa,M^{\kappa},n,\log 1/\delta}. \end{align*} \end{theorem} Assume that the constants $\mu$ and $M$ are dimensionless. Then, our upper and lower bounds match qualitatively with respect to the dependence on $\kappa$. Theorem~\ref{CTRL_thm:upper_bounds_STAB} implies that if the degree of underactuation is mild, i.e. $\kappa=O(\log n)$, then robustly coupled systems are guaranteed to be poly$(n)$-stabilizable. Our upper bound picks up a dependence on the quantity $M/\mu$. Recall that $M$ upper-bounds the norm of $A$. Hence, it captures a notion of sensitivity of the dynamics $A$ to inputs/noise. In the lower bounds only the coupling term $\mu$ appears. It is an open question to prove or disprove whether the sensitivity of $A$ affects stabilization or it is an artifact of our analysis. Another important open problem is to determine the optimal constant that multiplies $\kappa$ in the exponent. Our lower bound suggests that the exponent can be at least of the order of $2$ times $\kappa$. In our upper bounds, by following the proof, we get an exponent which is larger than $2$. \section{Difficulty of online LQR}\label{sec:onlineLQR} In the following theorem, we prove that classes of robustly coupled systems can exhibit minimax expected regret which grows at least exponentially with the dimension $n$. Let $\CC^{\mu}_{n,\kappa}$ denote the class of $\mu$-robustly coupled systems $S=(A,B,H)$ of state dimension $n$ and controllability index $\kappa$. Define the $\epsilon$-dilation $\CC^{\mu}_{n,\kappa}(\epsilon)$ of $\CC^{\mu}_{n,\kappa}$ as \[ \CC^{\mu}_{n,\kappa}(\epsilon)\triangleq \set{(A,B,H):\: \snorm{\matr{{cc}A-\tilde{A}&B-\tilde{B}}}_2\le \epsilon,\text{ for some }(\tilde{A},\tilde{B},H)\in \CC^{\mu}_{n,\kappa}}, \] which consists of every system in $\CC^{\mu}_{n,\kappa}$ along with its $\epsilon-$ball around it. \begin{theorem}[Exponential Regret Lower Bounds]\label{CTRL_thm:REG_lower_exponential} Consider the class $\CC^{\mu}_{n,\kappa}$ of all $\mu$-robustly coupled systems $S=(A,B,H)$ of state dimension $n$ and controllability index $\kappa$, with $\kappa\le n-1$. For every $\epsilon>0$ define the $\epsilon$-dilation $\CC^{\mu}_{n,\kappa}(\epsilon)$. Let $Q_T=P$, the solution to the ARE \eqref{CTRL_eq:LQR_DARE}, and assume $\mu<1$. Let $0<\alpha<1/4$. For any policy $\pi$ \begin{equation*} \begin{aligned} &\liminf_{T\rightarrow \infty}\sup_{S\in\CC^{\mu}_{n,\kappa}(T^{-\alpha})}\E_{S,\pi} \frac{R_T(S)}{\sqrt{T}}\ge \frac{1}{4\sqrt{n}} 2^{\frac{\kappa-1}{2}}. \end{aligned} \end{equation*} \end{theorem} When the controllability index is large, e.g. $\kappa=n$, then the lower bounds become exponential with $n$. Hence, achieving poly($n$)-regret is impossible in the case of general linear systems. In general, learning difficulty depends on fundamental control theoretic parameters, i.e. on the solution $P$ to the ARE~\eqref{CTRL_eq:LQR_DARE} or the steady-state covariance of the closed-loop system, both of which can scale exponentially with the controllability index. Existing regret upper-bounds depend on such quantities in a transparent way~\cite{simchowitz2020naive}. Here, we reveal the dependence on such parameters in the regret lower-bounds as well (Lemma~\ref{CTRL_lem:modular_bound_two_subsystems}). Let us now explain when learning can be difficult. Consider the following $1-$strongly coupled system, which consists of two independent subsystems \begin{equation}\label{CTRL_eq:REG_difficult_example_integrator} A=\matr{{c|ccccc}0&0&0&&0&0\\\hline 0&1&1&&0&0\\& &&\ddots&\\0&0&0& &1&1\\0&0&0& &0&1},\,B=\matr{{c|c}1&0\\0&0\\\vdots\\0&1}u_k,\,H=I_n,\,Q=I_n,\,R=I_2, \end{equation} where the first subsystem is a memoryless system, while the second one is the discrete integrator of order $n-1$. Since the sub-systems are decoupled, the optimal LQR controller will also be decoupled and structured \[ K_{\star}=\matr{{cc}0 & 0\\0&K_{\star,0}}, \] where $K_{\star,0}$ is the optimal gain of the second subsystem. The first subsystem (upper-left) is memoryless and does not require any regulation, that is, $[K_{\star}]_{11}=0$. Consider now a perturbed system $\tilde{A}=A-\Delta K_{\star}$, $\tilde{B}=B+\Delta$, for some $\Delta\in\R^{p\times n}$. Such perturbations are responsible for the $\sqrt{T}$ term in the regret of LQR~\citep{simchowitz2020naive,ziemann2022regret}; systems $(A,B)$ and $(\tilde{A},\tilde{B})$ are indistinguishable under the control law $u_t=K_{\star}x_{t}$ since $ A+BK_{\star}=\tilde A+\tilde BK_{\star}. $ Now, informally, to get an $\exp(n)\sqrt{T}$ regret bound it is sufficient to satisfy two conditions: i) the system is sensitive to inputs or noise, in the sense that any exploratory signal can incur extra cost, which grows exponentially with $n$. ii) the difference $\tilde{A}-A$, $\tilde{B}-B$ is small enough, i.e. polynomial in $n$, so that identification of $\Delta$ requires significant deviation from the optimal policy. The $n-1$-th integrator is very sensitive to inputs or noises. As inputs $u_{k,2}$ and noises $w_k$ get integrated $(n-1)$-times, this will result in accumulated values that grow exponentially as we move up the integrator chain. Hence, the first informal condition is satisfied. To satisfy the second condition we let the perturbation $\Delta$ have the following structure \begin{equation}\label{CTRL_eq:perturbation_structure} \Delta=\matr{{cc}0&0\\\Delta_1&0}, \end{equation} where we only perturb the matrix of the first input $u_{k,1}$. By using two subsystems and the above construction, we make it harder to detect $\Delta$. In particular, because of the structure of the system ($[K_{\star}]_{11}=0$) and the perturbation $\Delta$, we have $\tilde{A}=A-\Delta K_{\star}=A$. Hence $\snorm{\matr{{cc}A&B}-\matr{{cc}\tilde{A}&\tilde{B}}}_2= \snorm{\Delta}_2\le \poly(n)\snorm{\Delta}_2 ,$ i.e., the perturbed system does not lie too far away from the nominal one. This last condition might be crucial. If $\snorm{\Delta K_{\star}}\ge \exp(n)\snorm{\Delta}_2$, then it might be possible to distinguish between $(A,B)$ and $(\tilde{A},\tilde{B})$ without deviating too much from the optimal policy. This may happen if we use only one subsystem, since $\snorm{K_{\star,0}}_2$ might be large. By using two subsystems, we cancel the effect of $K_{\star,0}$ in $\Delta K_{\star}$. In the stabilization problem, we show that the lack of excitation during the system identification stage might hurt sample complexity. Here, we show that if a system is too sensitive to inputs and noises, i.e. some state subspaces are too easy to excite, this can lead to large regret. Both lack of excitation and too much excitation of certain subspaces can hurt learning performance. This was observed before in control~\citep{skogestad1988robust}. \subsection{Sketch of Lower Bound Proof} Let $S_0=(A_0,B_0,I_{n-1})\in \CC^{\mu}_{n-1,\kappa}$ be a $\mu-$robustly coupled system of state dimension $n-1$, input dimension $p-1$ and controllability index $\kappa\le n-1$. Let $P_0$ be the solution of the Riccati equation for $Q_0=I_{n-1}$, $R_0=I_{p-1}$, with $K_{\star,0}$ the corresponding optimal gain. Define the steady-state covariance of the closed-loop system \begin{equation}\label{CTRL_eq:Covariance_steady_state} \Sigma_{0,x}=(A_0+B_0K_{\star,0})\Sigma_{0,x}(A_0+B_0K_{\star,0})'+I_{n-1}. \end{equation} Now, consider the composite system: \begin{equation}\label{CTRL_eq:composite_system} A=\matr{{cc}0&0\\0&A_0},\, B=\matr{{cc}1&0\\0&B_0},\, H=I_n, \end{equation} with $Q=I_n,\,R=I_p$. Let $\Delta$ be structured as in~\eqref{CTRL_eq:perturbation_structure}, for some arbitrary $\Delta_1$ of unit norm $\snorm{\Delta_1}_2=1$. The Riccati matrix of the composite system is denoted by $P$ and the corresponding gain by $K_{\star}$. Consider the parameterization: \begin{equation}\label{CTRL_eq:parameterized_system_family} A(\theta)=A-\theta \Delta K_{\star}, \qquad B(\theta)=B+\theta \Delta, \end{equation} for any $\theta \in\R$. Let $\mathcal{B}(\theta,\epsilon)$ denote the open Euclidean ball of radius $\epsilon$ around $\theta$. For every $\epsilon>0$, define the local class of systems around $S$ as $\CC_S({\epsilon})\triangleq\set{(A(\theta),B(\theta),I_n),\,\theta\in\mathcal{B}(0,\epsilon)}$. Based on the above construction and Theorem~1 of~\cite{ziemann2022regret}, a general information-theoretic regret lower bound, we prove the following lemma. \begin{lemma}[Two-Subsystems Lower Bound]\label{CTRL_lem:modular_bound_two_subsystems} Consider the parameterized family of linear systems defined in~\eqref{CTRL_eq:parameterized_system_family}, for $n,p \ge 2$ where $\Delta$ is structured as in~\eqref{CTRL_eq:perturbation_structure}. Let $Q=I_n$, $R=I_p$. Let $Q_T=P(\theta)$, where $P(\theta)$ is the solution to the Riccati equation for $(A(\theta),B(\theta))$. Then, for any policy $\pi$ and any $0<a<1/4$ the expected regret is lower bounded by \begin{align*} &\lim\inf_{T\rightarrow \infty}\sup_{\hat{S}\in \CC_S({T^{-a}})}\E_{\hat{S},\pi}\frac{R_{T}(\hat{S})}{\sqrt{T}}\ge \frac{1}{4\sqrt{n}}\sqrt{\Delta'_1 P_0 \clint{\Sigma_{0,x}-I_{n-1}}P_0\Delta_1}. \end{align*} \end{lemma} Optimizing over $\Delta_1$, we obtain a lower bound on the order of $\snorm{P_0 \clint{\Sigma_{0,x}-I_{n-1}}P_0}_2$. What remains to show is that for the $(n-1)$-th order integrator (second subsystem in~\eqref{CTRL_eq:REG_difficult_example_integrator}) the product $\snorm{P_0 \clint{\Sigma_{0,x}-I_{n-1}}P_0}_2$ is exponentially large with $n$. \begin{lemma}[System Theoretic Parameters can be Large]\label{CTRL_lem:integrator_system_theoretic_parameters} Consider the $(n-1)-th$ order integrator (second subsystem in~\eqref{CTRL_eq:REG_difficult_example_integrator}). Let $P_0$ be the Riccati matrix for $Q_0=I_{n-1},R_0=1$, with $K_{\star,0}$, $\Sigma_{0,x}$ the corresponding LQR control gain and steady-state covariance. Then \[\snorm{P_0 \clint{\Sigma_{0,x}-I_{n-1}}P_0}_2\ge \sum_{j=1}^{n-1} \sum_{i=0}^{j}\binom{j}{i}^2\ge 2^{n-1}\] \end{lemma} Our lemma shows that control theoretic parameters can scale exponentially with the dimension $n$. The $(n-1)-$th order integrator is a system which is mildly unstable. In Section~\ref{CTRL_sec:REG_stable}, we show that \textbf{stable} systems can also suffer from the same issue. \subsection{Regret Upper Bounds} Similar to the stabilization problem, we show that under the assumption of robust coupling, the regret cannot be worse than $\exp(\kappa)\sqrt{T}$ with high probability. As we prove in~Lemma~\ref{CTRL_lem:Riccati_Upper}, the solution $P$ to the Riccati equation has norm $\snorm{P}_{2}$ that scales at most exponentially with the index $\kappa$ in the case of robustly-coupled systems. This result combined with the regret upper bounds of~\cite{simchowitz2020naive}, give us the following result. \begin{theorem}[Exponential Upper Bounds] Consider a $\mu$-robustly coupled system $S=(A,B,H)$ of dimension $n$, controllability index $\kappa$. Assume that we are given an initial stabilizing gain $K_0$. Let $Q=I_n$, $R\succeq I_p$, and $Q_T=0$. Assume that the noise is non-singular $HH'=I_n$\footnote{It is possible to relax some of the assumptions on the noise--see~ \cite{simchowitz2020naive}}. Let $\delta\in (0,1/T)$. Using the Algorithm~1 of~\cite{simchowitz2020naive} with probability at least $1-\delta$: \[ R_T(A,B)\le \poly(n,\big(\frac{M}{\mu}\big)^{\kappa},M^{\kappa},\log 1/\delta)\sqrt{T}+\poly(n,\big(\frac{M}{\mu}\big)^{\kappa},M^{\kappa},\log 1/\delta,P(K_0)), \] where $P(K_0)=(A+BK_0)'P(K_0)(A+BK)+Q+K'_0RK_0$. \end{theorem} The result follows immediately by our Lemma~\ref{CTRL_lem:Riccati_Upper} and the upper bounds of Theorem~2 in~\cite{simchowitz2020naive}. Assuming that the plant sensitivity $M$ and the coupling coefficient $\mu$ are dimensionless, then if we have a mild degree of underactuation, i.e. $\kappa=O(\log n)$, we get poly($n$)-regret with high probability. Note that the above guarantees are for high probability regret which is not always equivalent to expected regret~\citep{dann2017unifying}. Our upper-bounds are almost global for all robustly coupled systems, in the sense that the dominant $\sqrt{T}$-term is globally bounded. To provide truly global regret guarantees it is sufficient to add an initial exploration phase to Algorithm~1 of \cite{simchowitz2020naive}, which first learns a stabilizing gain $K_0$. For this stage we could use the results of Section~\ref{CTRL_sec:STAB_upper_bounds}, and Section~\ref{CTRL_app_sec:STAB_upper_bounds}. We leave this for future work. \section{Conclusion}\label{CRTL_sec:conclusion} We prove that learning to control linear systems can be hard for non-trivial system classes. The problem of stabilization might require sample complexity which scales exponentially with the system dimension $n$. Similarly, online LQR might exhibit regret which scales exponentially with $n$. This difficulty arises in the case of underactuated systems. Such systems are structurally difficult to control; they can be very sensitive to inputs/noise or very hard to excite. If the system is robustly coupled and has a mild degree of underactuation (small controllability index), then we can guarantee that learning will be easy. We stress that system theoretic quantities might not be dimensionless. On the contrary, they might grow very large with the dimension and dominate any poly$(n)$ terms. Hence, going forward, an important direction of future work is to find policies with optimal dependence on such system theoretic quantities. Although the optimal dependence is known for the problem of system identification~\citep{simchowitz2018learning,jedra2019sample}, it is still not clear what is the optimal dependence in the case of control. For example, an interesting open problem is to find the optimal dependence of the regret $R_T$ on the Riccati equation solution $P$. For the problem of stabilization, it is open to find how sample complexity optimally scales with the least singular value of the controllability Gramian. \newpage \tableofcontents \newpage \appendix \addcontentsline{toc}{part}{Appendix} \section{System Theoretic Preliminaries} In this section, we review briefly some system theoretic concepts. A system $(A,B)\in\R^{n\times (n+p)}$ is \textbf{controllable} if and only if the controllability matrix \[ \C_k(A,B)=\matr{{cccc}B&AB&\cdots&A^{k-1}B} \] has full column rank for some $k\le n$. The minimum such index $\kappa$ that the rank condition is satisfied is called the controllability index, and it is always less or equal than the state dimension $n$. A system $(A,B)$ is called \textbf{stabilizable} if and only if there exists a matrix $K\in\R^{p\times n}$ such that $A+BK$ is stable, i.e. has spectral radius $\rho(A+BK)$. Any controllable system is also stabilizable. A system $(A',B')$ is called \textbf{observable} if and only if $(A,B)$ is controllable. Similarly $(A',B')$ is \textbf{detectable} if and only if $(A,B)$ is stabilizable. Let $A$ be stable ($\rho(A)<1$) and consider the transfer matrix $(zI-A)^{-1},z\in\mathcal{C}$ in the frequency domain. The $\mathcal{H}_{\infty}$-norm is given by \[ \snorm{(zI-A)^{-1}}_{\mathcal{H}_{\infty}}=\sup_{\abs{z}=1}\snorm{(zI-A)^{-1}}_2. \] Using the identity $(I-D)^{-1}=I+D+D^2\dots$ for $\rho(D)<1$, we can upper bound the $\mathcal{H}_{\infty}$-norm by \[\snorm{(zI-A)^{-1}}_{\mathcal{H}_{\infty}}\le \sum_{t=0}^{\infty} \snorm{A^t}_2.\] \subsection{Properties of the Riccati Equation}\label{CTRL_app_sec:Riccati} Consider the infinite horizon LQR problem defined in~\eqref{CTRL_eq:LQR_objective}. Let $(A,B)$ be controllable and assume that $Q\succ 0$ is positive semi-definite and $R\succ 0$ is positive definite. As we stated in Section~\ref{CTRL_sec:formulation}, the optimal policy $K_{\star}x_k$ has the following closed-form solution \begin{equation*} K_{\star}=-(B'PB+R)^{-1}B'PA, \end{equation*} where $P$ is the unique positive definite solution to the \textbf{Discrete Algebraic Riccati Equation} \begin{equation*} P=A'PA+Q-A'PB(B'PB+R)^{-1}B'PA. \end{equation*} Moreover, $A+BK_{\star}$ is stable, i.e. $\rho(A+BK_{\star})<1$. The above solution is well-defined under the conditions of $(A,B)$ controllable, $Q\succ 0$, $R\succ 0$. Note that we can relax the conditions to $Q\succeq 0$ being positive semi-definite, $(A,Q^{1/2})$ detectable, and $(A,B)$ stabilizable, which is a well-known result in control theory~\citep[Th. 3.1]{chan1984convergence}. Consider now the \textbf{finite-horizon} LQR problem, under the same assumptions of $(A,B)$ controllable, $Q\succ 0$, and $R\succ 0$ \begin{equation} J_T^*(S)\triangleq \min_{\pi} \E_{S,\pi} \clint{\sum^{T-1}_{t=0}(x'_tQx_t+u'_tRu_t)+x'_TQ_Tx_T}. \end{equation} The optimal policy is a feedback law $K_{t}x_t$, $t\le T-1$, with time varying gains. The gains satisfy the following closed-form expression \begin{equation*} K_{t}=-(B'P_{t+1}B+R)^{-1}B'P_{t+1}A, \end{equation*} where $P_t$ satisfies the \textbf{Riccati Difference Equation} \begin{equation*} P_{t}=A'P_{t+1}A+Q-A'P_{t+1}B(B'P_{t+1}B+R)^{-1}B'P_{t+1}A,\, P_T=Q_T. \end{equation*} It turns out that as we take the horizon to infinity $T\rightarrow \infty$, then we get $\lim_{T\rightarrow \infty} P_k=P$ exponentially fast, for any fixed $k$, where $P$ is the positive definite solution to the Algebraic Riccati Equation. The convergence is true under the conditions of $(A,B)$ controllable, $Q\succ 0$, $R\succ 0$. Again we could relax the conditions to $Q\succeq 0$ being positive semi-definite, $(A,Q^{1/2})$ detectable, and $(A,B)$ stabilizable~\citep[Th. 4.1]{chan1984convergence}. Note that if we select the terminal cost $Q_T=P$, then trivially $P_t=P$ for all $t\le T$, and we recover the same controller as in the infinite horizon case. Finally, a nice property of the Riccati recursion is that the right-hand side is order-preserving with respect to the matrices $P,Q$. In particular, define the operator: \[ g(X,Y)=A'XA+Y-A'YB(B'XB+R)^{-1}B'YA. \] Then, if $X_1\succeq X_2$, we have that $g(X_1,Y)\succeq g(X_2,Y)$~\citep[Ch. 4.4]{anderson2005optimal}. Similarly, if $Y_1\succeq Y_2$ then $g(X,Y_1)\succeq g(X,Y_2)$. \section{System Theoretic Bounds for Robustly Coupled Systems} The first result lower bounds the least singular value of the controllability Gramian in terms of the sensitivity $M$, the coupling coefficient $\mu$, and the controllability index $\kappa$ of the system. \begin{theorem}[Gramian lower bound~\citep{tsiamis2021linear}]\label{CTRL_thm:gramian_lower_bound} Consider a system $(A,B,H)$ that satisfies Assumption~\ref{CTRL_ass:general_setting}, with $\kappa$ its controllability index. Assume that $(A,B)$ is $\mu$-robustly coupled. Then, the least singular value of the Gramian $\Gamma_{\kappa}=\Gamma_{\kappa}(A,B)$ is lower bounded by: \[ \sigma_{\min}^{-1}(\Gamma_{\kappa})\le \mu^{-2}\bigg(\frac{3M}{\mu}\bigg)^{2\kappa}. \] \end{theorem} \begin{proof} The result follows from Theorem 5 in~\cite{tsiamis2021linear}. The theorem statement requires a different condition, called robust controllability. However, the proof still goes through if we have $\mu-$robust coupling instead. Recall that $\C_{\kappa}=\C_{\kappa}(A,B)$ is the controllability matrix~\eqref{CTRL_eq:controllability_matrix} of $(A,B)$ at $\kappa$. Following the proof in~\citep{tsiamis2021linear}, we arrive at \[ \sqrt{\sigma_{\min}(\Gamma_{\kappa}})\le\snorm{\C^{\dagger}_\kappa}_2 \le \snorm{\Xi^{\kappa-1}}_2\snorm{\alpha}_{2}, \] where \[ \Xi=\matr{{ccc}1&1&\mu^{-1}\\\frac{M}{\mu}&\frac{2+M}{\mu}&\frac{M}{\mu}\\0&0&\mu^{-1}},\,\alpha=\matr{{c}\frac{1}{\mu}\\\frac{M}{\mu^2}\\ \frac{1}{\mu}}. \] The result follows from the crude bounds $\snorm{\Xi}_2\le 3 M/\mu $, $\snorm{\alpha}_2\le \sqrt{3}M/\mu^{-2}$ where we assumed that $M>1$. \end{proof} The following result, upper bounds the solution $P$ to the LQR Riccati equation in terms of the sensitivity $M$, the coupling coefficient $\mu$, and the controllability index $\kappa$ of the system. \begin{lemma}[Riccati Upper Bounds]\label{CTRL_lem:Riccati_Upper} Let the system $(A,B)\in\R^{n\times (n+p)}$ be controllable and $\mu-$robustly coupled with controllability index $\kappa$. Let $R\in\R^{p\times p}$ be positive definite and $Q\in\R^{n\times n}$ be positive semi-definite. Assume $T>\kappa$ and consider the Riccati difference equation: \[ P_{k-1}=A'P_{k}A+Q-A'P_kB(B'P_kB+R)^{-1}B'P_kA,\: P_T=Q. \] Then, the Riccati matrix evaluated at time $0$ is upper-bounded by \[ \snorm{P_0}_2\le \poly\Big(\big(\frac{M}{\mu}\big)^{ \kappa},M^\kappa,\kappa,\snorm{Q}_2,\snorm{R}_2\Big). \] As a result, if $Q\succ 0$, then the unique positive definite solution $P$ of the algebraic Riccati equation: \[ P=A'PA+Q-A'PB(B'PB+R)^{-1}B'PA \] satisfies the same bound \[ \snorm{P}_2\le \poly\Big(\big(\frac{M}{\mu}\big)^{ \kappa},M^\kappa,\kappa,\snorm{Q}_2,\snorm{R}_2\Big). \] \end{lemma} \begin{proof} The optimal policy of the LQR problem does not depend on the noise. Even for deterministic systems, the optimal policy still have the same form $u_t=K_{\star}x_t$. This property is known as certainty equivalence~\citep[Ch. 4]{bertsekas2017dynamic}. In fact, for deterministic systems, the cost of regulation is given explicitly by $x_0'Px_0$. We leverage this idea to upper bound the stabilizing solution of the Riccati equation $P$. \noindent\textbf{Step a) Noiseless system upper bound.} Consider the noiseless version of system~\eqref{CTRL_eq:system} \begin{equation}\label{CTRL_eq:noiseless_system} x_{k+1}=Ax_k+Bu_k,\quad \snorm{x_0}_2=1. \end{equation} Let $u_{0:t}$ be the shorthand notation for \[ u_{0:t}=\matr{{c}u_{t}\\\vdots\\u_0}. \] Consider the deterministic LQR objective \begin{align*} \min_{u_{0:T-1}}&\quad J(u_{0:T-1})\triangleq x_T'Qx_T+\sum_{k=0}^{N-1}x_k'Qx_k+u_k'Ru_k\\ \mathrm{s.t.}& \quad \text{dynamics~\eqref{CTRL_eq:noiseless_system}}. \end{align*} The optimal cost of the problem is given by~\citep[Ch. 4]{bertsekas2017dynamic} \[ \min_{u_{0:T-1}} J(u_{0:T-1})=x_0'P_0x_0, \] where $P_0$ is the value of $P_t$ at time $t=0$. Let $u_{0:T-1}$ be any input sequence. Immediately, by optimality, we obtain an upper bound for the Riccati matrix $P_0$: \begin{equation}\label{CTRL_eq:deterministic_P_bound} x_0'P_0x_0 \le J(u_{0:T-1}). \end{equation} Hence, it is sufficient to find a suboptimal policy that incurs a cost which is at most exponential with the controllability index $\kappa$. \noindent\textbf{Step b) Suboptimal Policy.} It is sufficient to drive the state $x_{\kappa}$ to zero at time $\kappa$ with minimum energy $u_{0:\kappa-1}$ and then keep $x_{t+1}=0$, $u_{t}=0$, for $t\ge \kappa$. Recall that $\C_k$ is the controllability matrix at time $k$. By unrolling the state $x_{\kappa}$: \[ x_{\kappa}=A^{\kappa}x_0+\C_{\kappa}u_{0:\kappa-1}. \] To achieve $x_{\kappa}=0$, it is sufficient to apply the minimum norm control \[ u_{0:\kappa-1}=-\C^{\dagger}_{\kappa}A^{\kappa}x_0, \] which leads to input penalties \[ \sum_{k=0}^{T-1}u'_{k}R u_k\le \snorm{R}_2\sigma^{-1}_{\min}(\Gamma_{\kappa})M^{2\kappa}, \] where we used the fact that $\snorm{x_0}_2=1$. For the state penalties, we can write in batch form \[ x_{1:\kappa}\triangleq\matr{{c}x_{\kappa}\\\vdots\\x_1}=\matr{{cccc}B&AB&\cdots&A^{\kappa-1}B\\0&B&\cdots&A^{\kappa-2}B\\\vdots\\0&0&\cdots&B}u_{0:\kappa-1}+\matr{{c}A^{\kappa}\\A^{\kappa-1}\\\vdots\\A}x_0. \] Exploiting the Toeplitz structure of the first matrix above and by Cauchy-Schwartz \begin{align*} \sum_{t=0}^{T}x'_{t}Qx_t&\le \snorm{Q}_2 (\snorm{x_{1:\kappa}}^2_2+1)\\ &\le 2\snorm{Q}_2\big((\sum^{\kappa-1}_{t=0} \snorm{A^tB}_2)^2\snorm{u_{0:\kappa-1}}^2_2+\sum_{t=0}^{\kappa}\snorm{A^t}_2\big)\\ &\le 2\kappa^2\snorm{Q}_2(M^{4\kappa} \snorm{R}_2\sigma^{-1}_{\min}(\Gamma_{\kappa})+M^{2\kappa}). \end{align*} Putting everything together and since $x_0$ is arbitrary, we finally obtain \begin{equation}\label{CTRL_eq:P_upper_bound} \snorm{P_0}_2\le \frac{\snorm{R}_2}{\sigma_{\min}(\Gamma_{\kappa})}(M^{2\kappa}+2\kappa^2\snorm{Q}_2 M^{4\kappa})+2\kappa^2\snorm{Q}_2 M^{2\kappa}. \end{equation} The result for $P_0$ now follows from Theorem~\ref{CTRL_thm:gramian_lower_bound}. \noindent\textbf{Step c) Steady State Riccati.} If the pair $(A,Q^{1/2})$ is observable, then from standard LQR theory-see Section~\ref{CTRL_app_sec:Riccati}, $\lim_{T\rightarrow \infty} P_0=P$ and the bound for $P$ follows directly. \end{proof} Similar results have been reported before~\citep{cohen2018online,chen2021black}. However, instead of $\kappa$ and $(M/\mu)^{\kappa}$, the least singular value $\sigma^{-1}_{\min}(\Gamma_{k})$ shows up in the bounds, for some $k\ge \kappa$. Finally, based on Lemmas B.10, B.11 of~\cite{simchowitz2020naive}, we provide some upper bounds on the $\mathcal{H}_{\infty}-$norm of the closed loop response $(zI-A+BK)^{-1}$, where $K$ is the control gain of the optimal LQR controller for some $Q$ and $R$. \begin{lemma}[LQR Robustness Margins]\label{CTRL_lem:margins} Let the system $(A,B)\in\R^{n\times (n+p)}$ be controllable and $\mu-$robustly coupled. Let $R=I_p,\,Q=I_n$. Let $P$ be the stabilizing solution of the algebraic Riccati equation: \[ P=A'PA+Q-A'PB(B'PB+R)^{-1}B'PA \] with $K_{\star}$ the respective control gain $ K_{\star}=-(B'PB+R)^{-1}B'PA. $ The spectral radius and the $\mathcal{H}_{\infty}$-norm of the closed loop response are upper bounded by \begin{align} (1-\rho(A+BK_{\star}))^{-1}&\le \poly\Big(\big(\frac{M}{\mu}\big)^{ \kappa},M^\kappa,\kappa\Big) \label{CTRL_eq:spectral_radius_margin}\\ \snorm{(zI-A-BK_{\star})^{-1}}_{\mathcal{H}_{\infty}}&\le \poly\Big(\big(\frac{M}{\mu}\big)^{ \kappa},M^\kappa,\kappa\Big)\label{CTRL_eq:hinfinity_margin} \end{align} \end{lemma} \begin{proof} First, note that since $Q=I$, immediately $(A,Q^{1/2})$ is observable and the stabilizing solution $P$ is well-defined. Note that the Riccati solution $P$ also satisfies the Lyapunov equation \[ P=(A+BK_{\star})'P(A+BK_{\star})+I+K_{\star}'K_{\star}\succeq (A+BK_{\star})'P(A+BK_{\star})+I\succeq I. \] As a result, \begin{equation}\label{CTRL_eq:Riccati_Inequality} (A+BK_{\star})'(A+BK_{\star})\stackrel{i)}{\preceq} (A+BK_{\star})'P(A+BK_{\star})=P-I \stackrel{ii)}{\preceq} (1-\snorm{P}^{-1}_2)P, \end{equation} where i) follows from $P\succeq I$. To prove ii) observe that $P-I=P^{1/2}(I-P^{-1})P^{1/2}$ and $P^{-1}\succeq \snorm{P}^{-1}_2 I$. Hence \[ P-I\succeq P^{1/2}(I-\snorm{P}^{-1}_2 I)P^{1/2}=(1-\snorm{P}^{-1}_2)P. \] Applying inequality~\eqref{CTRL_eq:Riccati_Inequality} recursively \[ (A+BK_{\star})^{t'}(A+BK_{\star})^t=\snorm{(A+BK_{\star})^t}^2_2 \le \big(1-\snorm{P}^{-1}_2\big )^{t} P. \] From here, we immediately deduce that \[ \rho(A+BK_{\star})\le \sqrt{1-\snorm{P}^{-1}_2}, \] which by Lemma~\ref{CTRL_lem:Riccati_Upper} proves~\eqref{CTRL_eq:spectral_radius_margin}. For the $\mathcal{H}_{\infty}$ norm bound \begin{align*} \snorm{(zI-A-BK_{\star})^{-1}}_{\mathcal{H}_{\infty}}&\le \sum_{t\ge 0} \snorm{(A+BK_{\star})^t}_2\le \snorm{P}_2^{1/2}\frac{1}{1-\sqrt{1-\snorm{P}^{-1}_2}}\\ &\le \snorm{P}_2^{1/2}\frac{1+\sqrt{1-\snorm{P}^{-1}_2}}{\snorm{P}^{-1}_2}\le 2\snorm{P}_2^{3/2}. \end{align*} The proof of~\eqref{CTRL_eq:hinfinity_margin} now follows from Lemma~\ref{CTRL_lem:Riccati_Upper}. \end{proof} \section{Lower Bounds for the problem of Stabilization}\label{CTRL_app_sec:STAB_lower_bounds} In this section, we prove Theorem~\ref{CTRL_thm:STAB_lower_exponential} by using information theoretic methods. The main idea is to find systems that are nearly indistinguishable from data but require completely different stabilization schemes. We rely on Birgé's inequality~\citep{boucheron2013concentration}, which we review below for convenience. \begin{definition}[KL divergence] Let $\P$, $\mathbb{Q}$ be two probability measures on some space $(\Omega,\mathcal{A})$. Let $\mathbb{Q}$ be absolutely continuous with respect to $\mathbb{P}$, that is $\mathbb{Q}(A)=\E_{\mathbb{P}}(Y 1_{A})$ for some integrable non-negative random variable with $\E_{\mathbb{P}}(Y)=1.$ The KL divergence $D(\mathbb{Q}||\P)$ is given by \[ D(\mathbb{Q}||\P)\triangleq \E_{\mathbb{Q}}(\log Y). \] \end{definition} \begin{theorem}[Birgé's Inequality~\citep{boucheron2013concentration}]\label{CTRL_thm:Birge} Let $\P_0,\,\P_1$ be probability measures on $(\Omega,\mathcal{E})$ and let $E_0,\,E_1\in\mathcal{E}$ be disjoint events. If $1-\delta\triangleq \min_{i=0,1}\P_i(E_i)\ge 1/2$ then \[ (1-\delta)\log\frac{1-\delta}{\delta}+\delta \log \frac{\delta}{1-\delta}\le D(\P_1||\P_0). \] \end{theorem} The KL divergence between two Gaussian distributions with same variance is given below. \begin{lemma}[Gaussian KL divergence]\label{CTRL_app_lem:Gaussian_KL} Let $\P=\mathcal{N}(\mu_1,\sigma^2)$ and $\mathbb{Q}=\mathcal{N}(\mu_2,\sigma^2)$ then \[ D(\mathbb{Q}||\P)=\frac{1}{2\sigma^2}(\mu_1-\mu_2)^2. \] \end{lemma} \subsection{Proof of Theorem~\ref{CTRL_thm:STAB_lower_exponential}} It is sufficient to prove it for $\kappa=n$. The proof for $\kappa<n$ is similar. Let $\alpha>0$ be such that $\alpha+\mu<1$. Consider the systems: \[ S_1:\quad x_{k+1}=\matr{{ccccc}1 &\mu&0&\cdots&0\\0& \alpha&\mu&\cdots&0\\& &\ddots &\ddots&\\0&0&0&\cdots&\mu\\0&0&0&\cdots&\alpha}x_k+\matr{{c}0\\0\\\vdots\\0\\\mu}u_k+ \matr{{c}1\\0\\\vdots\\0\\0}w_k, \] \[ S_2:\quad x_{k+1}=\matr{{ccccc}1 &-\mu&0&\cdots&0\\0& \alpha&\mu&\cdots&0\\& &\ddots &\ddots&\\0&0&0&\cdots&\mu\\0&0&0&\cdots&\alpha}x_k+\matr{{c}0\\0\\\vdots\\0\\\mu}u_k+ \matr{{c}1\\0\\\vdots\\0\\0}w_k. \] By construction, the systems are $\mu-$robustly coupled. Denote the state matrices by $A_1,A_2$ for $S_1,S_2$ respectively. Let $\phi_1(z)=\det(zI-A_1-B\hat{K}_N)$, $\phi_2(z)=\det(zI-A_2-B\hat{K}_N)$ be the respective characteristic polynomials. By Jury's criterion~\citep[Ch. 4.5]{fadali2013digital}, a necessary (but not sufficient) condition for stability is: \[ \phi_1(1)>0,\,\phi_2(1)>0. \] An direct computation gives: \[ \phi_1(1)=\abs{\begin{array}{ccccc}0 &-\mu&0&\cdots&0\\0& 1-\alpha&-\mu&\cdots&0\\& &\ddots &\ddots&\\0&0&0&\cdots&-\mu\\-\hat{K}_{N,1}&-\hat{K}_{N,2}&-\hat{K}_{N,3}&\cdots&1-\alpha-\hat{K}_{N,n} \end{array}}=-\hat{K}_{N,1}\mu^{n-1},\, \phi_2(1)=\hat{K}_{N,1}\mu^{n-1}. \] As a result, the events: \[ E_1=\set{\rho(A_1+B\hat{K}_{N})<1}\subseteq\set{\hat{K}_{N,1}<0},\quad E_2=\set{\rho(A_2+B\hat{K}_{N})<1}\subseteq\set{\hat{K}_{N,1}>0} \] are disjoint. By Theorem~\ref{CTRL_thm:Birge}, a necessary condition for stabilizing both systems with probability larger than $1-\delta$ is: \begin{equation}\label{CTRL_eq:STAB_necessary} D(\mathbb{P}_{1}||\mathbb{P}_{2})\ge (1-2\delta)\log\frac{1-\delta}{\delta}\ge \log\frac{1}{2.4 \delta}\ge \log\frac{1}{3 \delta}. \end{equation} Here $\mathbb{P}_i$ is a shorthand notation for $\P_{S_i,\pi}$, for $i=1,2$. Meanwhile, by the chain rule of KL divergence (see Exercise 4.4 in~\cite{boucheron2013concentration}): \begin{align*} D(\mathbb{P}_1||\mathbb{P}_2)&=\mathbb{E}_{\mathbb{P}_1}\Big( D(\mathbb{P}_1(\AUX)||\mathbb{P}_2(\AUX))\\&+\sum^{N}_{k=0}D(\mathbb{P}_1(x_k|x_{0:k-1},u_{0:k-1},\AUX)||\mathbb{P}_2(x_k|x_{0:k-1},u_{0:k-1},\AUX))\\ &+\sum^{N-1}_{k=0}D(\mathbb{P}_1(u_k|x_{0:k},u_{0:k-1},\AUX)||\mathbb{P}_2(u_k|x_{0:k},u_{0:k-1},\AUX)\Big), \end{align*} where $x_{0:k}$ is a shorthand notation for $x_{0},\dots,x_k$ (same for $u_{0:k}$). By $\mathbb{P}(X|Y)$ we denote the conditional distribution of $X$ given $Y$. Note that the inputs have the same conditional distributions under both measures hence their KL divergence is zero. As a result \begin{align*} D(\mathbb{P}_1||\mathbb{P}_2)&=\mathbb{E}_{\mathbb{P}_1}\sum^{N}_{k=0}D(\mathbb{P}_1(x_k|x_{0:k-1},u_{0:k-1},\AUX)||\mathbb{P}_2(x_k|x_{0:k-1},u_{0:k-1},\AUX))\\ &\stackrel{1)}{=}\mathbb{E}_{\mathbb{P}_1} \sum^{N}_{k=0}D(\mathbb{P}_1(x_{k}|x_{k-1},u_{k-1})||\mathbb{P}_2(x_k|x_{k-1},u_{k-1})\\ &\stackrel{2)}{=}\mathbb{E}_{\mathbb{P}_1} \sum^{N}_{k=0}D(\mathbb{P}_1(x_{k,1}|x_{k-1,1},x_{k-1,2})||\mathbb{P}_2(x_{k,1}|x_{k-1,1},x_{k-1,2}) \Big), \end{align*} where $1)$ follows from the Markov property of the linear system and 2) follows from an application of the chain rule, the structure of the dynamics, and the fact that all $x_{k,j}$ have the same distribution for $j\ge 2$. Recall that the normal distribution is denoted by $\mathcal{N}(\mu,\Sigma)$. Now we can explicitly compute the KL divergence: \begin{align} D(\mathbb{P}_1||\mathbb{P}_2)&=\mathbb{E}_{\mathbb{P}_1}\sum^{N}_{k=1}D(\mathcal{N}(\alpha x_{k-1,1}+\mu x_{k-1,2},1)||\mathcal{N}(\alpha x_{k-1,1}-\mu x_{k-1,2},1))\nonumber\\ &\stackrel{i)}{=}\mathbb{E}_{\mathbb{P}_1}\sum_{k=1}^{N}2\mu^2 x^2_{k-1,2}=2\mu^2 \sum_{k=1}^{N}\mathbb{E}_{\mathbb{P}_1} x^2_{k-1,2}\label{CTRL_eq:KL_explicit_STAB}, \end{align} where $i)$ follows by Lemma~\ref{CTRL_app_lem:Gaussian_KL}. By~\eqref{CTRL_eq:STAB_necessary},~\eqref{CTRL_eq:KL_explicit_STAB}, and Lemma~\ref{CTRL_lem:S1_Gramian_like_bound_STAB}, it is necessary to have \[ N\sigma^2_u \ge \frac{1}{2}\paren{\frac{1}{\alpha+\mu}}^{2n-2}\paren{\frac{1-a-\mu}{\mu}}^{2}\log\frac{1}{3 \delta} \] Since we are free to choose $\alpha$, it is sufficient to choose $\alpha=0$. \hfill $\blacksquare$ \begin{lemma}\label{CTRL_lem:S1_Gramian_like_bound_STAB} Consider system $S_1$ as defined above. Recall that $\P_1$ is a shorthand notation for $\P_{S_1,\pi}$. Then, under Assumption~\ref{CTRL_ass:input_budget}, we have \[ \mathbb{E}_{\P_1} x^2_{k,2}\le \sigma^2_u (\alpha+\mu)^{2n-2} \paren{\frac{1}{1-(a+\mu)}}^2 \] \end{lemma} \begin{proof} Let $e_2$ denote the canonical vector $e_2=\matr{{ccccc}0&1&0&\cdots&0}'$. Then \[ x_{k,2}=\sum_{t=1}^{k}e'_2A^{t-1}Bu_{k-t}=\sum_{t=n-1}^{k}e'_2A^{t-1}Bu_{k-t}, \] where the second equality follows from the fact that $e'_2A^{t-1}B$, for $t\le n-1$. Moreover, we can upper bound: \[ \abs{e'_2A^{t-1}B}\le (\alpha+\mu)^{t-1}, \] which follows from the fact that the sub-matrix $[A_1]_{2:n,2:n}$ of $A_1$ if we delete the first row and column is bi-diagonal and Toeplitz hence $\snorm{[A_1]_{2:n,2:n}}_2\le \alpha+\mu$. Define $c_t\triangleq (\alpha+\mu)^{t-1}$. Then, we can upper bound $\abs{x_{k,2}}$ by \[ \abs{x_{k,2}}\le \sum_{t=n-1}^{k}c_t \abs{u_{k-t}}. \] By Cauchy-Schwartz and Assumption~\ref{CTRL_ass:input_budget} \[ \E_{S_1,\pi} u^2_{k}\le \sigma^2_u,\quad \E_{S_1,\pi} \abs{u_{k}u_t} \le \sigma^2_u. \] Finally, combining the above results \[ \mathbb{E}_{S_1,\pi} x^2_{k,2}\le \sigma^2_u (\sum_{t=n-1}^k c_t)^2 \le \sigma^2_u (\alpha+\mu)^{2n-2} \paren{\frac{1}{1-(a+\mu)}}^2, \] which completes the proof. \end{proof} \section{Upper Bounds for the problem of Stabilization}\label{CTRL_app_sec:STAB_upper_bounds} We employ a naive passive learning algorithm, where we employ a white-noise exploration policy to excite the state. Our gain design proceeds in two parts. First, we perform system identification based on least squares~\citep{simchowitz2018learning}. Second, we use robust control to design the gain based on the identified model and bounds on the identification error of $A$ and $B$, similar to~\cite{dean2017sample}. \subsection{Algorithm} \begin{figure} \centerline{ \begin{tikzpicture}[auto,>=latex'] \node [block] (plant) [draw, align=center] {White Noise\\Experiments}; \node [block, right=2.5cm of plant] (SID) [draw, align=center] {System\\Identification}; \node [block, right=1.8cm of SID] (controller) [draw, align=center] {Controller\\Design}; \coordinate [right=1.5cm of controller] (help_1) {}; \draw [->] (plant) -- node[name=y,above] {$x_0,\dots,x_N$} node[name=y,below] {$u_0,\dots,u_{N-1}$}(SID); \draw [->] (SID) -- node[name=y,above] { $\hat{A}_N$, $\hat{B}_N$} node[name=y,below] { $\epsilon_A$, $\epsilon_B$} (controller); \draw [->] (controller) -- node[name=y,above] {$\hat{K}_N$} (help_1); \end{tikzpicture} } \caption{The block diagram of the stabilization scheme. First, we generate white noise inputs $u_t\sim\mathcal{N}(0,\bar{\sigma}^2_uI)$ to excite the system. Then we perform system identification based on least squares to obtain estimates $\hat{A}_N,\hat{B}_N$ of the true system matrices. Finally, we design a controller gain $\hat{K}_N$, based on the system estimates and upper bounds $\epsilon_A,\epsilon_B$ on the estimation error.} \label{CTRL_fig:stabilization_architecture} \end{figure} The block diagram for the algorithm is shown in~Fig.~\ref{CTRL_fig:stabilization_architecture}. To generate the input data $u_0,\dots,u_{N-1}$, we employ white noise inputs $u_k\sim\mathcal{N}(0,\bar{\sigma}^2_u I)$, $\bar{\sigma}^2_u=\sigma^2_u/p$, where we normalize with $p$ in order to satisfy Assumption~\ref{CTRL_ass:input_budget}. For the system identification part, we use a least squares algorithm \begin{equation}\label{CTRL_eq:least_squares} \matr{{cc}\hat{A}_N&\hat{B}_N}=\arg\min_{\set{F\in\R^{n\times n},G\in \R^{n\times p}}} \sum_{t=0}^{N-1}\snorm{x_{t+1}-Fx_t-Gu_t}^2_2, \end{equation} to obtain estimates of the matrices $A\,,B$. Now, let $\epsilon_A,\,\epsilon_B$ be large enough constants such that $\snorm{A-\hat{A}_N}_2\le \epsilon_A$, $\snorm{B-\hat{B}_N}_2\le \epsilon_B$. To design the controller gain $\hat{K}_N$, it is sufficient to solve the following problem \begin{equation} \begin{aligned}\label{CTRL_eq:stabilization_scheme} \mathrm{find}\:&\quad {K\in\R^{p\times n}}\\ \mathrm{s.t.} &\quad \norm{\matr{{c}\sqrt{2}\epsilon_A (zI-\hat{A}_N-\hat{B}_NK)^{-1}\\\sqrt{2}\epsilon_B K(zI-\hat{A}_N-\hat{B}_NK)^{-1}}}_{\mathcal{H}_{\infty}}<1. \end{aligned} \end{equation} The idea behind the scheme is the following. Let $\hat{K}_N$ be a gain that stabilizes the estimated plant $(\hat{A}_N,\hat{B}_N)$. To make sure that it also stabilizes the nominal plant $(A,B)$ we impose some additional robustness conditions. In fact, as we show in Theorem~\ref{CTRL_thm:feasibility_STAB}, any feasible gain of problem~\eqref{CTRL_eq:stabilization_scheme} will stabilize any plant $(\hat A,\hat B)$ that satisfies $\snorm{\hat A-\hat{A}_N}_2\le \epsilon_A$, $\snorm{\hat B-\hat{B}_N}_2\le \epsilon_B$, including the nominal one. In this work, we do not study how to efficiently solve~\eqref{CTRL_eq:stabilization_scheme}. For efficient implementations one can refer to~\cite{dean2017sample}. Note that the certainty equivalent LQR design~\citep{mania2019certainty} or the SDP relaxation method~\citep{cohen2018online,chen2021black} could also work as stabilization schemes. \subsection{System Identification Analysis} Here we review a fundamental system identification result from~\cite{simchowitz2018learning}. The original proof can be easily adapted to the case of singular noise matrices $H$~\citep{tsiamis2021linear}. \begin{theorem}[Identification Sample Complexity]\label{CTRL_thm:identification} Consider a system $S=(A,B,H)$ such that Assumption~\ref{CTRL_ass:general_setting} is satisfied. Let $(A,B)$ be controllable with $\Gamma_{k}=\Gamma_k(A,B)$ the respective controllability Gramian and $\kappa=\kappa(A,B)$ the respective controllability index. Then, under the least squares system identification algorithm~\eqref{CTRL_eq:least_squares} and white noise inputs $u_k~\sim\N(0,\bar{\sigma}^2_u I_p)$, we obtain \begin{align} \P_{S,\pi}(\snorm{\matr{{cc}A-\hat{A}_N&B-\hat{B}_N}}_2\ge \epsilon)\le \delta \nonumber \end{align} if we have a large enough sample size \[ N\bar{\sigma}^2_u\ge \frac{\poly(n,\log1/\delta,M)}{\epsilon^2\sigma_{\min}(\Gamma_{\kappa})}\log N. \] \end{theorem} \begin{proof} The proof is almost identical to the one of Theorem~4 in~\cite{tsiamis2021linear}. The difference is that here we consider only the Gramian and index of $(A,B)$ in the final bound, while in~\cite{tsiamis2021linear} the Gramian and index of $(A\matr{{cc}H&B})$ appears. We repeat the proof here to avoid notation ambiguity. Our goal is to apply Theorem~2.4 in~\citep{simchowitz2018learning}. Define the noise-controllability Gramian $\Gamma^h_{t}=\Gamma_t(A,H)$ as well as the combined controllability Gramian \[ \Gamma^c_t=\Gamma_t(A,\matr{{cc}\bar{\sigma}_u B&H})=\bar{\sigma}^2_u\Gamma_{t}+\Gamma^h_{t}. \] Define $y_{k}=\matr{{cc}x'_k&u'_k}'$. It follows that for all $j\ge 0$ and all unit vectors $v\in\R^{(n+p)\times 1},$ the following small-ball condition is satisfied: \begin{equation}\label{CTRL_eq:small_ball} \frac{1}{2\kappa}\sum_{t=0}^{2\kappa}\P(\abs{v'y_{t+j}}\ge \sqrt{v'\Gamma_{\mathrm{sb}}v} |\bar{\F}_j)\ge \frac{3}{20}, \end{equation} where \begin{equation}\label{CTRL_eq:small_ball_covariance} \Gamma_{\mathrm{sb}}=\matr{{cc}\Gamma^c_{\kappa}&0\\0&\bar{\sigma}^2_uI_p}. \end{equation} Equation~\eqref{CTRL_eq:small_ball} follows from the same steps as in Proposition~3.1 in~\cite{simchowitz2018learning} with the choice $k=2\kappa$. Next, we determine an upper bound $\bar{\Gamma}$ for the gram matrix $\sum_{t=0}^{N-1}y_ty'_t$. Using a Markov inequality argument as in~\cite[proof of Th 2.1]{simchowitz2018learning}, we obtain that \[ \P(\sum_{t=0}^{N-1}y_ty'_t\preceq \bar{\Gamma})\ge 1-\delta, \] where \[\bar{\Gamma}=\frac{n+p}{\delta}N\matr{{cc}\Gamma^c_N&0\\0&\bar{\sigma}^2_u I_p}.\] Now, we can apply Theorem 2.4 of~\cite{simchowitz2018learning}. With probability at least $1-3\delta$ we have $\snorm{\matr{{cc}A-\hat{A}_N&B-\hat{B}_N}}_2\le \epsilon$ if: \begin{align*} N&\ge \frac{\poly(n,\log1/\delta,M)}{\epsilon^2\sigma_{\min}(\Gamma^c_{\kappa})}\log\det (\bar{\Gamma}\Gamma^{-1}_{\mathrm{sb}}), \end{align*} where we have simplified the expression by including terms in the polynomial term. Using Lemma~1 in~\cite{tsiamis2021linear}, we obtain \[ \log\det (\bar{\Gamma}\Gamma^{-1}_{\mathrm{sb}})=\poly(n,M,\log 1/\delta)\log N. \] Moreover, we use the lower bound $\Gamma^c_k\succeq \bar{\sigma}^2_u\Gamma_k$, which holds for every $k\ge 0$. \end{proof} We note that we can easily obtain sharper bounds by considering the combined controllability Gramian $\Gamma_k(A,\matr{{cc}\bar{\sigma}_u B&H})$ for the identification stage. For the economy of the presentation, we omit such an analysis here. \subsection{Sensitivity of Stabilization} Here we prove that when~\eqref{CTRL_eq:stabilization_scheme} is feasible, then $\hat{K}_N$ stabilizes all plants $(A,B)$ such that $\snorm{A-\hat{A}_N}_2\le \epsilon_A$, $\snorm{B-\hat{B}_N}_2\le \epsilon_B$. We also show that feasibility is guaranteed as long as we can achieve small enough error bounds $\epsilon_A$, $\epsilon_B$. \begin{theorem}\label{CTRL_thm:feasibility_STAB} Let $\hat{K}_N$ be a feasible solution to problem~\eqref{CTRL_eq:stabilization_scheme} for some $\epsilon_A,\epsilon_B>0$. Then for any system $(A,B)$ such that $\snorm{A-\hat{A}_N}_2\le \epsilon_A$, $\snorm{B-\hat{B}_N}_2\le \epsilon_B$ we have that \[ \rho(A+B\hat{K}_N)<1. \] Moreover, there exists an $\epsilon_0>0$ such that \[ \epsilon_0=\poly\Big(\big(\frac{M}{\mu}\big)^{ \kappa},M^\kappa,\kappa\Big) \] and Problem~\eqref{CTRL_eq:stabilization_scheme} is feasible if $\epsilon_A,\epsilon_B\le \epsilon_0$. \end{theorem} \begin{proof} Let $\hat{K}_N$ be a feasible solution to problem~\eqref{CTRL_eq:stabilization_scheme}. Define $\mbf{\Phi}_x=(zI-\hat{A}_N-\hat{B}_N\hat{K}_N)^{-1}$, which is well-defined and stable since $\epsilon_A>0$ and $\snorm{\mbf{\Phi}_x}_{\mathcal{H}_{\infty}}<1/(\sqrt{2}\epsilon_A)$. Define the system difference \[ \mbf{\Delta}\triangleq (\hat{A}_N-A)\mbf{\Phi}_x+(\hat{B}_N-B)\hat{K}_N\mbf{\Phi}_x \] It follows from simple algebra that: \begin{align*} zI-A-B\hat{K}_N&=zI-\hat{A}_N-\hat{B}_N\hat{K}_N+(\hat{A}_N-A)+(\hat{B}_N-B)\hat{K}_N\\ &=(I+\mbf{\Delta})(zI-\hat{A}_N-\hat{B}_N\hat{K}_N). \end{align*} If $(I+\mbf{\Delta})^{-1}$ is stable then the closed loop response is stable and well-defined \[ (zI-A-B\hat{K}_N)^{-1}=(zI-\hat{A}_N-\hat{B}_N\hat{K}_N)^{-1}(I+\mbf{\Delta})^{-1}. \] But $(I+\mbf{\Delta})^{-1}$ being stable is equivalent to \[ \snorm{(I+\mbf{\Delta})^{-1}}_{\mathcal{H}_{\infty}}<\infty. \] A sufficient condition for this to occur is to require~\citep{dean2017sample} \[ \snorm{\mbf{\Delta}}_{\mathcal{H}_{\infty}}<1. \] By Proposition~3.5 (select $\alpha=1/2$) of~\citep{dean2017sample} \[ \snorm{\mbf{\Delta}}_{\mathcal{H}_{\infty}}<\norm{\matr{{c}\sqrt{2}\epsilon_A (zI-\hat{A}_N-\hat{B}_NK)^{-1}\\\sqrt{2}\epsilon_B K(zI-\hat{A}_N-\hat{B}_NK)^{-1}}}_{\mathcal{H}_{\infty}}<1. \] This completes the proof of $\rho(A+B\hat{K}_N)<1$. To prove feasibility consider the optimal LQR gain $K_{\star}$, for $Q=I_n$, $R=I_p$. Following Lemma~4.2 in~\cite{dean2017sample}, if the following sufficient condition holds \[ (\epsilon_{A}+\epsilon_B\snorm{K_{\star}}_2)\snorm{(zI-A-BK_{\star})^{-1}}_{\mathcal{H}_{\infty}}\le 1/5, \] then $K_{\star}$ is a feasible solution \[ \norm{\matr{{c}\sqrt{2}\epsilon_A (zI-\hat{A}_N-\hat{B}_NK_{\star})^{-1}\\\sqrt{2}\epsilon_B K_{\star}(zI-\hat{A}_N-\hat{B}_NK_{\star})^{-1}}}_{\mathcal{H}_{\infty}}<1. \] Hence, we can choose \begin{equation}\label{CTRL_eq:epsilon_feasibility_STAB} \epsilon_0=\big(5(1+\snorm{K_{\star}}_2)\snorm{(zI-A-BK_{\star})^{-1}}_{\mathcal{H}_{\infty}}\big)^{-1}. \end{equation} The fact that $\epsilon_0=\poly\Big(\big(\frac{M}{\mu}\big)^{ \kappa},M^\kappa,\kappa\Big)$ follows from Lemmas~\ref{CTRL_lem:Riccati_Upper},~\ref{CTRL_lem:margins}. \end{proof} \subsection{Proof of Theorem~\ref{CTRL_thm:upper_bounds_STAB}} Let $u_t\sim\mathcal{N}(0,\bar{\sigma}^2_u I)$, with $\bar{\sigma}^2_u=\sigma^2_u/p$. Consider the stabilization algorithm as described in~\eqref{CTRL_eq:least_squares},~\eqref{CTRL_eq:stabilization_scheme}. Consider the $\epsilon_0$ defined in~\eqref{CTRL_eq:epsilon_feasibility_STAB}. By Theorems~\ref{CTRL_thm:identification},~\ref{CTRL_thm:feasibility_STAB}, if \[ N\sigma^2_{u}\ge \triangleq \underbrace{\frac{\poly(n,\log 1/\delta,M)}{\epsilon^2_0\sigma_{\min}(\Gamma_{\kappa})}}_{\mathcal{N}}\log N \] we have with probability at least $1-\delta$ that $\snorm{A-\hat{A}_N}_2,\snorm{B-\hat{B}_N}_2\le \epsilon_0$ and problem~\eqref{CTRL_eq:stabilization_scheme} is feasible with $\epsilon_B=\epsilon_A=\epsilon_0$. By Theorems~\ref{CTRL_thm:gramian_lower_bound}~\ref{CTRL_thm:feasibility_STAB}, \[ \mathcal{N}=\poly\paren{\Big(\frac{M}{\mu}\Big)^\kappa,M^{\kappa},n,\log 1/\delta}. \] To complete the proof we use the fact that \[ N\ge c\log N\text{ if }N\ge 2c\log 2c. \] \section{Regret Lower Bounds} First let us state an application of the main result of~\cite{ziemann2022regret}. Consider a system $(A,B,H)\in\R^{n\times (n+p+n)}$, where $(A,B)$ is controllable and $H=I_n$. Let $P$ be the respective Riccati matrix for $Q=I_n$, $R=I_p$, with $K_{\star}$ the respective optimal LQR gain. Fix a matrix $\Delta\in\R^{p\times n}$ and define the family of systems: \begin{equation}\label{CTRL_eq:general_system_family} A(\theta)=A-\theta B\Delta,\, B(\theta)=B+\theta \Delta,\, H(\theta)=I_n, \end{equation} where $\theta\in \mathcal{B}(0,\epsilon)$, for some small $\epsilon$. Assume that $\epsilon$ is small enough, such that the Riccati equation has a stabilizing solution for every system in the above family. The respective Riccati matrix is denoted by $P(\theta)$ and the LQR gain by $K(\theta)$. The derivative of $K_{\star}(\theta)$ with respect to $\theta$ at point $\theta=0$ is given by the following formula. \begin{lemma}[Lemma 2.1~\citep{simchowitz2020naive}]\label{CTRL_lem:K_derivative} If the system $(A,B)$ is stabilizable, then \[ \frac{d}{d\theta}K_{\star}(\theta)|_{\theta=0}=-(B'PB+R)^{-1}\Delta'P(A+BK_*). \] \end{lemma} Finally, let $\Sigma_x$ be the solution to the Lyapunov equation: \begin{equation}\label{CTRL_eq:appendix_steady_state_covariance} \Sigma_x=(A+BK_{\star})\Sigma_x(A+BK_{\star})'+I_n. \end{equation} \begin{theorem}[Application of Theorem~1 in~\cite{ziemann2022regret}]\label{CTRL_thm:variation_lower_bounds} Consider a system $S=(A,B,H)\in\R^{n\times (n+p+n)}$, where $(A,B)$ is controllable and $H=I_n$. Let $P$ be the respective solution of the algebraic Riccati equation for $Q=I_n$, $R=I_p$, with $K_{\star}$ the respective optimal LQR gain. Recall the definition of $\Sigma_x$ in~\eqref{CTRL_eq:appendix_steady_state_covariance}. Define the family of systems $\CC_S({\epsilon})\triangleq\set{(A(\theta),B(\theta),I_n),\,\theta\in\mathcal{B}(0,\epsilon)}$ as defined in~\eqref{CTRL_eq:general_system_family}, for any $\epsilon>0$ sufficiently small such that $P(\theta)$ and $K_{\star}(\theta)$ are well-defined. Let $Q_T=P(\theta)$. Then for any $\alpha\in (0,1/4)$: \begin{align}\label{CTRL_eq:main_technical_lower_bound} &\lim\inf_{T\rightarrow \infty}\sup_{\hat{S}\in\CC_S(T^{-a})}\E_{\hat{S},\pi}\frac{R_{T}(\hat{S})}{\sqrt{T}}\ge \frac{1}{2\sqrt{2}}\sqrt{\frac{F}{L}}, \end{align} where \begin{align*} F&=\Tr\bigg((B'PB+R)^{-1}\Delta' P \clint{\Sigma_x-I_n}P\Delta\bigg)\\ L&= n (\snorm{\Delta K_{\star}}^2_2+\snorm{\Delta}^2_2)\snorm{(B'PB+R)^{-1}}_{2} \end{align*} \end{theorem} \begin{proof} Note that if $\Delta'P(A+BK_{\star})=0$, then since $\Sigma_x\succeq I_n$ is invertible \begin{align*} \Delta'P(A+BK_{\star})=0&\Leftrightarrow \Delta'P(A+BK_{\star})\Sigma_x(A+BK_{\star})'P\Delta=0\\ &\Leftrightarrow \Delta'P(\Sigma_x-I_n)P\Delta=0. \end{align*} This implies that $F=0$ and the regret lower bound becomes $0$, in which case the claim of the theorem is trivially true. Hence, we will assume that $\Delta'P(A+BK_{\star})\neq 0$. All systems in the family have the same closed-loop response under the control policy $u=K_{\star}x$. In particular, for all $\theta\in \mathcal{B}(0,\epsilon)$: \[ \frac{d}{d\theta}\matr{{cc}A(\theta)&B(\theta)}\matr{{c}I_n\\K_{\star}}=\matr{{cc}-\Delta K_{\star}&\Delta}\matr{{c}I_n\\K_{\star}}=0. \] Moreover, by Lemma~\ref{CTRL_lem:K_derivative} \[ \frac{d}{d\theta}K_{\star}(\theta)|_{\theta=0}=(B'PB+R)^{-1}\Delta'P(A+BK_{\star})\neq 0. \] By Proposition 3.4 in~\cite{ziemann2022regret}, the above two conditions imply that the family $\CC_{S}(\epsilon)$ is $\epsilon-$uninformative (see Section 3 in~\cite{ziemann2022regret} for definition). Next, by Lemma 3.6 in~\cite{ziemann2022regret}, the family is also $L-$information regret bounded (see Section 3 in~\cite{ziemann2022regret} for the definition), where \[ L=\Tr(I_n)\snorm{\matr{{cc}-\Delta K_{\star}&\Delta}}^2_2\snorm{(B'PB+R)^{-1}}_{2}\stackrel{i)}{\le} n (\snorm{\Delta K_{\star}}^2_2+\snorm{\Delta}^2_2)\snorm{(B'PB+R)^{-1}}_{2}. \] Inequality $i)$ follows from $\Tr(I_n)=n$ and the norm property \[\snorm{\matr{{cc}M_1&M_2}}^2_2=\snorm{\matr{{cc}M_1&M_2}\matr{{cc}M_1&M_2}'}_2=\snorm{M_1M_1'+M_2M_2'}_2 \le \snorm{M_1}^2_2+\snorm{M_2}^2_2.\] Applying Theorem~1 in~\cite{ziemann2022regret}, we get~\eqref{CTRL_eq:main_technical_lower_bound}, for $L$ defined as above and \[ F=\Tr\bigg(\clint{\Sigma_x \otimes (B'P(\theta)B+R)}(\frac{d}{d\theta}\VEC K_{\star}(\theta)|_{\theta=0})(\frac{d}{d\theta}\VEC K_{\star}(\theta)|_{\theta=0})'\bigg), \] where $\otimes$ is the Kronecker product and $\VEC$ is the vectorization operator (mapping a matrix into a column vector by stacking its columns). Using the identities: \[ \VEC(XYZ)=(Z'\otimes X)\VEC(Y),\qquad \Tr(\VEC(X)\VEC(Y)')=\Tr (XY'), \] we can rewrite $F$ as \[ F=\Tr\bigg((B'P(\theta)B+R)\frac{d}{d\theta}K(\theta)|_{\theta=0}\Sigma_x \frac{d}{d\theta}K'(\theta)|_{\theta=0}\bigg). \] By Lemma~\ref{CTRL_lem:K_derivative} and the property $\Tr(XY)=\Tr(YX)$, we finally get \[ F=\Tr\bigg((B'PB+R)^{-1} \Delta'P(A+BK_*)\Sigma_x(A+BK_*) P\Delta\bigg). \] The result follows from $(A+BK_*)\Sigma_x(A+BK_*)'=\Sigma_x-I_{n}$. \end{proof} \subsection{Proof of Lemma~\ref{CTRL_lem:modular_bound_two_subsystems}} The result follows by Theorem~\ref{CTRL_thm:variation_lower_bounds}. We only need to compute and simplify $F$, $L$. Due to the structure of system~\eqref{CTRL_eq:composite_system}, we have \[ P=\matr{{cc}1&0\\0&P_0},\,K_{\star}=\matr{{cc}0&0\\0&K_{0,\star}}. \] Moreover, due to the structure of the perturbation $\Delta$ in~\eqref{CTRL_eq:perturbation_structure} \[ B'PB+R=\matr{{cc}2&0\\0&B'_0P_0B_0+R_0},\, P\Delta(B'PB+R)^{-1}\Delta'P=\frac{1}{2}\matr{{cc}0&0\\0&P_0\Delta_1\Delta'_1P_0}. \] Hence \[ F=\frac{1}{2}\Tr\bigg(\matr{{cc}0&0\\0&P_0\Delta_1\Delta'_1P_0}(\Sigma_{x}-I_n)\bigg)=\frac{1}{2}\Delta'_1P_0(\Sigma_{0,x}-I_{n-1})P_0\Delta_1 \] Finally we have $L\le n$, since $\Delta K_{\star}=0$, $\Delta_1$ has unit norm, and $R=I_p$.\hfill $\blacksquare$ \subsection{Proof of Lemma~\ref{CTRL_lem:integrator_system_theoretic_parameters}} First note that $P_0\succeq Q_0=I_{n-1}$. As a result, we have \[ \snorm{P_0(\Sigma_{0,x}-I_{n-1})P_0}_2\ge \snorm{\Sigma_{0,x}-I_{n-1}}_2. \] It is sufficient to lower bound $\snorm{\Sigma_{0,x}-I_{n-1}}_2$. Consider the recursion: \[ \Sigma_k=(A_0+B_0K_{0,\star})\Sigma_{k-1}(A_0+B_0K_{0,\star})'+I_{n-1},\,\Sigma_{0}=0. \] Then $\Sigma_{0,x}=\lim_{k\rightarrow \infty}\Sigma_{k}\succeq \Sigma_{n-1}\succeq I_{n-1}$. The second inequality follows from monotonicity of the Lyapunov operator: \[ g(X)=(A_0+B_0K_{0,\star})X(A_0+B_0K_{0,\star})'+I_{n-1}, \] i.e. $g(X)\succeq g(Y) $ if $X\succeq Y$. What remains is to lower bound $\snorm{\Sigma_{n-1}-I_{n-1}}_2$. Let $e_1=\matr{{cccc}1&0&\cdots&0}'$ be the first canonical vector. Due to the structure of $A_0,B_0$ \[ e_1'(A_0+B_0K_{0,\star})^i=e_1'(A_0)^i,\text{ for }i\le n-1. \] Hence \begin{align*} \snorm{\Sigma_{n-1}-I_{n-1}}_2&\ge e_1'(\Sigma_{n-1}-I_{n-1})e_1\\ &=\sum_{k=1}^{n-1} e_1' A^k_0(A'_0)^k e_1. \end{align*} After some algebra we can compute analytically \begin{align*} \snorm{\Sigma_{n-1}-I_{n-1}}_2&\ge \sum_{k=1}^{n-1} \sum_{t=0}^{k}\binom{k}{t}^2=\sum_{k=1}^{n-1} \binom{2k}{k}\ge \binom{2(n-1)}{n-1}\ge \paren{2\frac{n-1}{n-1}}^{n-1}=2^{n-1}, \end{align*} which completes the proof. \hfill $\blacksquare$ \subsection{Proof of Theorem~\ref{CTRL_thm:REG_lower_exponential}} It is sufficient to prove the result for the class $\C^{\mu}_{n,n-1}$. If $n>\kappa+1$, then we can consider the system: \[ \tilde{A}=\matr{{c|c}0&0\\\hline 0&A},\, \tilde{B}=\matr{{c|c}I_{n-\kappa-1}&0\\\hline 0&B},\, \tilde{H}=\matr{{c|c}I_{n-\kappa-1}&0\\\hline 0&H} \] where $(A,B,H)\in\CC^{\mu}_{\kappa,\kappa-1}$ and repeat the same arguments. The proof follows from Lemma~\ref{CTRL_lem:modular_bound_two_subsystems} and Lemma~\ref{CTRL_lem:integrator_system_theoretic_parameters}. What remains to show that for every $\epsilon$ \[ \CC_{S}(\epsilon)\subseteq \CC^{\mu}_{n,n-1}(\epsilon). \] This follows from the fact that $\Delta K_{\star}=0$, hence $A=A(\theta)$ and $\snorm{B-B(\theta)}=\theta\snorm{\Delta}_2=\theta \le \epsilon$. Thus, \[ \snorm{\matr{{cc}A-A(\theta)&B-B(\theta)}}_2\le \epsilon. \] Since $\CC_{S}(\epsilon)\subseteq \CC^{\mu}_{n,n-1}(\epsilon)$, we get \[ \lim\inf_{T\rightarrow \infty}\sup_{S\in \CC^{\mu}_{n,n-1}({T^{-a}})}\E_{\hat{S},\pi}\frac{R_{T}(\hat{S})}{\sqrt{T}}\ge \lim\inf_{T\rightarrow \infty}\sup_{\hat{S}\in \CC_S({T^{-a}})}\E_{\hat{S},\pi}\frac{R_{T}(\hat{S})}{\sqrt{T}} \tag*{$\blacksquare$}\] \subsection{Stable System Example}\label{CTRL_sec:REG_stable} Here we show that the local minimax expected regret can be exponential in the dimension even for stable systems. Using again the two subsystems trick, consider the following stable system \begin{equation}\label{CTRL_eq:REG_difficult_example_stable} S:\qquad x_{k+1}=\matr{{c|ccccc}0&0&0&&0&0\\\hline 0&\rho&2&&0&0\\& &&\ddots&\\0&0&0& &\rho&2\\0&0&0& &0&\rho}x_k+\matr{{c|c}1&0\\0&0\\\vdots\\0&1}u_k+w_{k},\,0<\rho<1, \end{equation} with $Q=I_n$, $R=I_2$. Following the notation of~\eqref{CTRL_eq:composite_system} let: \begin{equation}\label{CTRL_eq:REG_difficult_example_stable_subsystem} A_0=\matr{{cccccc}\rho&2&0&&0&0\\0&\rho&2&&0&0\\& &&\ddots&\\0&0&0& &\rho&2\\0&0&0& &0&\rho},\, B_0=\matr{{c}0\\0\\\vdots\\0\\1},\,Q_0=I_{n-1},\,R_0=1, \end{equation} where $A_0\in\R^{(n-1)\times (n-1)}$ and $B_0\in \R^{n-1}$. Note that $A_0$ has spectral radius $\rho<1$. Let $\Delta=\matr{{cc}0&0\\\Delta_1&0}$. Then, by Lemma~\ref{CTRL_lem:modular_bound_two_subsystems}, the local minimax expected regret for system $S$, given the perturbation $\Delta_1$ is lower bounded by \begin{align*} &\lim\inf_{T\rightarrow \infty}\sup_{\hat{S}\in \CC_S({T^{-a}})}\E_{\hat{S},\pi}\frac{R_{T}(\hat{S})}{\sqrt{T}}\ge \frac{1}{4\sqrt{n}}\sqrt{\Delta'_1 P_0 \clint{\Sigma_{0,x}-I_{n-1}}P_0\Delta_1}. \end{align*} As we show in the following lemma, the quantity $\sqrt{\Delta'_1 P_0 \clint{\Sigma_{0,x}-I_{n-1}}P_0\Delta_1}$ is exponential with $n$ if we choose $\Delta_1$ appropriately. Although the system is stable, it is very sensitive to inputs and noises. Any signal $u_{k,2}$ that we apply gets amplified by $2$ as we move up the chain from state $x_{k,n}$ to state $x_{k,2}$. As a result, any suboptimal policy will result in excessive excitation of the state. \begin{lemma}[Stable systems can be hard to learn]\label{CTRL_lem:stable_system_theoretic_parameters} Consider system~\eqref{CTRL_eq:REG_difficult_example_stable_subsystem} Let $P_0$ be the Riccati matrix for $Q_0=I_{n-1},R_0=1$, with $K_{\star,0}$, $\Sigma_{0,x}$ the corresponding LQR control gain and steady-state covariance, respectively. Then \[\snorm{P_0 \clint{\Sigma_{0,x}-I_{n-1}}P_0}_2\ge 2^{4n-8}+o(1), \] where $o(1)$ goes to zero as $n\rightarrow \infty$. \end{lemma} \begin{proof} Let $\Delta_1=\matr{{ccccc}0&0&\cdots&1&0}'$. It is sufficient to prove that \[ \Delta_1'P_0(\Sigma_{0,x}-I_{n-1})P_0\Delta_1 \] is exponential. Using the identity $\Sigma_{0,x}-I_{n-1}=(A_0+B_0K_{\star,0})\Sigma_{0,x}(A_0+B_0K_{\star,0})'$, $\Sigma_{0,x}\succeq I$, we have: \[ \Delta_1'P_0(\Sigma_{0,x}-I_{n-1})P_0\Delta_1 \ge \snorm{\Delta'_1 P_0 (A_0+B_0K_{\star,0})}^2_2. \] By Lemma~\ref{CTRL_lem_app:stable_aux1} and Lemma~\ref{CTRL_lem:exponential_riccati_stable} it follows that \[ \snorm{\Delta'_1 P_0 (A_0+B_0K_{\star,0})}^2_2 \ge 2^{4n-8}+o(1). \] \end{proof} \begin{lemma}[Riccati matrix can grow exponentially]\label{CTRL_lem:exponential_riccati_stable} For system~\eqref{CTRL_eq:REG_difficult_example_stable_subsystem} we have: \[ B_0'P_{0}B_0+R_0\ge 2^{2n-4}+1. \] \end{lemma} \begin{proof} Consider the Riccati operator: \[ g(X,Y)=A_0'XA_0+Y-A_0'XB_0(B_0'XB_0+R_0)^{-1}B_0'XA_0. \] Based on the above notation, we have $P_{0}=g(P_{0},Q_0)$. The Riccati operator is monotone~\citep{anderson2005optimal}, i.e \[ X_1\succeq X_2\Rightarrow g(X_1,Y)\succeq g(X_1,Y). \] It is also trivially monotone with respect to $Y$. Let $X_0=0$, then the recursion $X_{t+1}=g(X_t,Q_0)$ converges to $P_{0}$. By monotonicity \[P_{0}\succeq X_t, \text{ for all }t\ge 0\] Let $e_i$ denote the $i$-th canonical vector in $\R^{n-1}$. By monotonicity, we also have: \[ X_1=g(X_0,Q_0)\succeq g(X_0,e_1e_1')=\underbrace{e_1e_1'}_{\tilde{X}_1} \] Repeating the argument: \begin{align*} X_2&=g(X_1,Q_0)\succeq g(\tilde{X}_1,Q_0)\succeq g(\tilde{X}_1,e_1e_1')=\underbrace{A_0'\tilde{X}_1A_0+e_1e_1'}_{\tilde{X}_2}=A_0'e_1e_1'A_0+e_1e_1'\\ &=2^2e_2e_2'+\rho^2 e_1e_1'+2\rho e_1e_2'+2\rho e_2e_1' \end{align*} Similarly, \begin{align*} X_{n-1}=g(X_{n-2},Q_0)\succeq g(\tilde{X}_{n-2},e_1e_1')= (A_0')^{n-2}e_1e_1'A_0^{n-2}+(A_0')^{n-1}e_1e_1'A_0^{n-1}+\dots+e_1e_1', \end{align*} where we use the fact that every $\tilde{X}_k$ is orthogonal to $B_0$ for $k\le n-2$. As a result: \begin{align} [P_{0}]_{n-1,n-1}&\ge [X_n]_{n-1,n-1}\ge e'_{n-1}(A_0')^{n-2}e_1e_1'A_0^{n-2}e_{n-1}\nonumber\\ &=(e_1'A_0^{n-2}e_{n-1})^2=([A_0^{n-2}]_{1,n-1})^2 \label{CTRL_app_eq:P_lower_bound_stable} \end{align} What remains is to compute $[A_0^{n-2}]_{1,n-1}$. Define by $J\in\R^{(n-1)\times (n-1) }$ the companion matrix: \[ J=\matr{{cccccc}0&1&0&&0&0\\0&0&1&&0&0\\& &&\ddots&\\0&0&0& &0&1\\0&0&0& &0&0}. \] Since $A_0=\rho I+2 J$ and $I$ commutes with $J$ by the binomial expansion formula: \[ A_0^{n-2}=2^{n-2}J^{n-2}+\sum_{t=0}^{n-3}2^{t}\binom{n-2}{t}J^{t}. \] Since $e_1'J^{n-1}e_{n-1}=1$, $e_1'J^{t}e_{n-1}=0,$ for $t\le n-2$, we obtain: \begin{equation}\label{CTRL_app_eq:P_lower_bound_stable_A} ([A_0^{n-2}]_{1,n-1})^2=2^{2n-4}. \end{equation} By~\eqref{CTRL_app_eq:P_lower_bound_stable} and~\eqref{CTRL_app_eq:P_lower_bound_stable_A} we finally get \[ B_0'P_\mathrm{0}B_0+R_0=[P_{0}]_{n-1,n-1}+1\ge 2^{2n-4}+1 \] \end{proof} \begin{lemma}\label{CTRL_lem_app:stable_aux1} We have: \[ \snorm{\Delta'_1 P_0 (A_0+B_0K_{\star,0})}_2 \ge (0.5+o(1))(B_0'P_0B_0+R_0), \] where the $o(1)$ is in the large $n$ regime. \end{lemma} \begin{proof} Let $e_i$ denote the $i$-th canonical vector in $\R^{n-1}$. It is sufficient to show that \[ \abs{(B_0'P_{0}B_0+R_0)^{-1}\Delta'_1P_{0}(A_0+B_0K_{\star,0})e_{n-1}} \ge 0.5+o(1). \] For simplicity we will denote: \[ \alpha \triangleq [P_{0}]_{n-1,n-1},\quad\beta \triangleq [P_{0}]_{n-2,n-2},\quad \gamma\triangleq [P_{0}]_{n-1,n-2}. \] Due to the structure of $A_0$, we have \[A_0e_{n-1}=\rho e_{n-1}+2e_{n-2}.\] Using this, we obtain \begin{align}\label{CTRL_eq:aux_K_en} K_{\star,0}e_{n-1}&=-(B_0'P_0B_0+1)^{-1}B_0'P_0A_0e_{n-1}=-(\alpha+1)^{-1}e'_{n-1}P_0(\rho e_{n-1}+2e_{n-2})\nonumber\\ &=-(\alpha+1)^{-1}(\rho \alpha+2\gamma). \end{align} Combining the above results \begin{align*} &(B_0'P_{0}B_0+R)^{-1}\Delta'_1P_{0}(A_0+B_0K_{\star,0})e_{n-1}=(B_0'P_0B_0+1)^{-1}e_{n-2}'P_0(A_0+B_0K_{\star,0})e_{n-1}\\ &=(\alpha+1)^{-1}\bigg\{e_{n-2}'P_0(\rho e_{n-1}+2e_{n-2})-e_{n-2}'P_0e_{n-1}(\alpha+1)^{-1}(\rho \alpha+2\gamma)\bigg\}\\ &=(\alpha+1)^{-1}\set{\rho \gamma+2\beta-\gamma(\alpha+1)^{-1}(\rho \alpha+2\gamma)}\\ &=2(\alpha+1)^{-1}\set{\beta-(\alpha+1)^{-1}\gamma^2}-(\alpha+1)^{-2}\rho \gamma \\ &\stackrel{i)}{=}\frac{2}{\alpha+1}\set{\beta-\frac{\gamma^2}{\alpha+1}}+o(1), \end{align*} where i) follows from Lemma~\ref{CTRL_lem_app:stable_aux2}. What remains to show is that \begin{equation}\label{CTRL_eq:riccati_stable_aux} \frac{2}{\alpha+1}\set{\beta-\frac{\gamma^2}{\alpha+1}}=0.5+o(1). \end{equation} Using the algebraic Riccati equation: \begin{align*} \alpha&=e'_{n-1}A_0'P_0A_0e_{n-1}+1-e'_{n-1}A_0'P_0B_0(\alpha+1)^{-1}B_0'P_0A_0e_{n-1}\\ &=(\rho e_{n-1}+2e_{n-2})'P_0(\rho e_{n-1}+2e_{n-2})+1\\ &-(\rho e_{n-1}+2e_{n-2})'P_0e_{n-1}(\alpha+1)^{-1}e_{n-1}'P_0(\rho e_{n-1}+2e_{n-2})\\ &= \rho^2\alpha+4\beta+4\rho \gamma+1-\frac{(\rho \alpha+2\gamma)^2}{\alpha+1}\\ &=4\beta+\frac{\rho^2\alpha+4\rho \gamma+\alpha+1-4\gamma^2}{\alpha+1}. \end{align*} Dividing both sides with $\alpha+1$: \begin{align*} \frac{\alpha}{1+\alpha}=\frac{4}{\alpha+1}\set{\beta-\frac{\gamma^2}{\alpha+1}}+\frac{4\rho \gamma}{(\alpha+1)^2}+\frac{1+\rho^2 \alpha}{(1+\alpha)^2} \end{align*} Rearranging the terms gives: \begin{align*} \frac{2}{\alpha+1}\set{\beta-\frac{\gamma^2}{\alpha+1}}-0.5=-\frac{0.5}{1+\alpha}-\frac{2\rho \gamma}{(\alpha+1)^2}-\frac{1+\rho^2 \alpha}{2(1+\alpha)^2} \end{align*} By~Lemma~\ref{CTRL_lem_app:stable_aux2} the second term in the right-hand side is $o(1)$. By Lemma~\ref{CTRL_lem:exponential_riccati_stable}, $\alpha=\Omega(2^{2n})$, hence all remaining terms also go to zero, which completes the proof of~\eqref{CTRL_eq:riccati_stable_aux}. \end{proof} \begin{lemma}\label{CTRL_lem_app:stable_aux2} Recall the notation in the proof of Lemma~\ref{CTRL_lem_app:stable_aux1} \[ \alpha\triangleq [P_{0}]_{n-1,n-1},\quad \gamma\triangleq [P_{0}]_{n-1,n-2}. \] Then, we have: \[ \abs{\frac{\gamma}{(\alpha+1)^2}}=o(1) \] \end{lemma} \begin{proof} We use the relation: \[ P_{0}=(A_0+B_0K_{\star,0})'P_{0}(A_0+B_0K_{\star,0})+Q_0+K_{\star,0}'R_0K_{\star,0}\succeq K_{\star,0}'R_0K_{\star,0}. \] Multiplying from the left and right by $e_{n-1}$ and by invoking~\eqref{CTRL_eq:aux_K_en} we obtain: \[ \alpha\ge \paren{\frac{\rho \alpha+2\gamma}{\alpha+1}}^2=(\xi+\lambda)^2, \] where for simplicity we define $ \xi=\frac{\rho \alpha}{\alpha+1},\,\lambda=\frac{2\gamma}{\alpha+1}. $ We can further lower bound the above expression by: \[ \alpha \ge (\xi+\lambda)^2 \ge \xi^2+\lambda^2-2\xi\abs{\lambda}. \] This is a quadratic inequality and holds if and only if: \[ \xi-\sqrt{\alpha}\le \abs{\lambda} \le \xi+\sqrt{\alpha}. \] As a result: \[ 2\frac{\abs{\gamma}}{\alpha+1}\le \rho+\sqrt{\alpha+1} \] which leads to \[ \frac{\abs{\gamma}}{\alpha+1}\le 0.5\frac{\rho+\sqrt{\alpha+1}}{\alpha+1}=O(1/\sqrt{\alpha})=o(1) \] since $\alpha=\Omega(2^{2n})$. \end{proof}
2205.13995v3
http://arxiv.org/abs/2205.13995v3
Modular Heights of Quaternionic Shimura Curves
\documentclass[12pt]{article} \usepackage{amsthm, amsmath, amssymb, enumerate} \usepackage{fullpage} \usepackage{amscd} \usepackage[colorlinks=true]{hyperref} \usepackage{MnSymbol} \begin{document} \title{Modular Heights of Quaternionic Shimura Curves} \author{Xinyi Yuan} \maketitle \theoremstyle{plain} \newtheorem{thm}{Theorem}[section] \newtheorem{theorem}[thm]{Theorem} \newtheorem{cor}[thm]{Corollary} \newtheorem{corollary}[thm]{Corollary} \newtheorem{lem}[thm]{Lemma} \newtheorem{lemma}[thm]{Lemma} \newtheorem{pro}[thm]{Proposition} \newtheorem{proposition}[thm]{Proposition} \newtheorem{prop}[thm]{Proposition} \newtheorem{definition}[thm]{Definition} \newtheorem{assumption}[thm]{Assumption} \newtheorem*{thmm}{Theorem} \newtheorem*{conj}{Conjecture} \newtheorem*{notation}{Notation} \newtheorem*{corr}{Corollary} \theoremstyle{remark} \newtheorem{remark}[thm]{Remark} \newtheorem{example}[thm]{Example} \newtheorem{remarks}[thm]{Remarks} \newtheorem{problem}[thm]{Problem} \newtheorem{exercise}[thm]{Exercise} \newtheorem{situation}[thm]{Situation} \numberwithin{equation}{subsection} \newcommand{\ZZ}{\mathbb{Z}} \newcommand{\CC}{\mathbb{C}} \newcommand{\QQ}{\mathbb{Q}} \newcommand{\RR}{\mathbb{R}} \renewcommand{\AA}{\mathbb{A}} \newcommand{\HH}{\mathcal{H}} \newcommand{\inverse}{^{-1}} \newcommand{\gl}{\mathrm{GL}_2} \newcommand{\sll}{\mathrm{SL}_2} \newcommand{\adele}{\mathbb{A}} niteadele}{\mathbb{A}_f} \newcommand{\af}{\mathbb{A}_f} \newcommand{\across}{\mathbb{A}^{\times}} \newcommand{\afcross}{\mathbb{A}_f^{\times}} \newcommand{\gla}{\mathrm{GL}_2(\mathbb{A})} \newcommand{\glaf}{\mathrm{GL}_2(\mathbb{A}_f)} \newcommand{\glf}{\mathrm{GL}_2(F)} \newcommand{\glfv}{\mathrm{GL}_2(F_v)} \newcommand{\glofv}{\mathrm{GL}_2(O_{F_v})} \newcommand{\ofv}{O_{F_v}} \newcommand{\oev}{O_{E_v}} \newcommand{\evcross}{E_v^{\times}} \newcommand{\fvcross}{F_v^{\times}} \newcommand{\adelef}{\mathbb{A}_F} \newcommand{\adelee}{\mathbb{A}_E} \newcommand{\aecross}{\mathbb{A}_E^{\times}} \newcommand{\OCS}{\overline{\mathcal{S}}} \renewcommand{\Pr}{\mathcal{P}r} \newcommand{\jv}{\mathfrak{j}_v} \newcommand{\fg}{\mathfrak{g}} \newcommand{\fj}{\mathfrak{j}} \newcommand{\vv}{\mathbb{V}} \newcommand{\bb}{\mathbb{B}} \newcommand{\bbf}{\mathbb{B}_f} \newcommand{\bfcross}{\mathbb{B}_f^\times} \newcommand{\ba}{\mathbb{B}_{\mathbb{A}}} \newcommand{\baf}{\mathbb{B}_{\mathbb{A}_f}} \newcommand{\bv}{{\mathbb{B}_v}} \newcommand{\bacross}{\mathbb{B}_{\mathbb{A}}^{\times}} \newcommand{\bafcross}{\mathbb{B}_{\mathbb{A}_f}^{\times}} \newcommand{\bvcross}{\mathbb{B}_v^{\times}} \newcommand{\ad}{\mathrm{ad}} \newcommand{\NT}{\mathrm{NT}} \newcommand{\nonsplit}{\mathrm{nonsplit}} \newcommand{\Pet}{\mathrm{Pet}} \newcommand{\Fal}{\mathrm{Fal}} \newcommand{\Norm}{\mathrm{Norm}} \newcommand{\lb}{\mathcal{L}} \newcommand{\DD}{\mathcal{D}} \newcommand{\quasilim}{\widetilde\lim_{s\rightarrow 0}} \newcommand{\pr}{\mathcal{P}r} \newcommand{\CMU}{\mathrm{CM}_U} \newcommand{\eend}{\mathrm{End}} \newcommand{\eendd}{\mathrm{End}^{\circ}} \newcommand{\sumu}{\sum_{u \in \mu_U^2 \bs F\cross}} \newcommand{\supp}{\mathrm{supp}} \newcommand{\cross}{^{\times}} \newcommand{\der}{\frac{d}{ds}|_{s=0}} \newcommand{\pair}[1]{\langle {#1} \rangle} \newcommand{\wpair}[1]{\left\{{#1}\right\}} \newcommand\wh{\widehat} \newcommand\Spf{\mathrm{Spf}} \newcommand{\lra}{{\longrightarrow}} \newcommand{\Ei}{\mathrm{Ei}} \newcommand{\sumyu}{\sum_{(y,u)}} \newcommand{\matrixx}[4] {\left( \begin{array}{cc} #1 & #2 \\ #3 & #4 \\ \end{array}\right)} \newcommand{\barint}{\mbox{$ave \int$}} \def\barint_#1{\mathchoice {\mathop{\vrule width 6pt height 3 pt depth -2.5pt \kern -8.8pt \intop}\nolimits_{#1}} {\mathop{\vrule width 5pt height 3 pt depth -2.6pt \kern -6.5pt \intop}\nolimits_{#1}} {\mathop{\vrule width 5pt height 3 pt depth -2.6pt \kern -6pt \intop}\nolimits_{#1}} {\mathop{\vrule width 5pt height 3 pt depth -2.6pt \kern -6pt \intop}\nolimits_{#1}}} \newcommand{\BA}{{\mathbb {A}}} \newcommand{\BB}{{\mathbb {B}}} \newcommand{\BC}{{\mathbb {C}}} \newcommand{\BD}{{\mathbb {D}}} \newcommand{\BE}{{\mathbb {E}}} \newcommand{\BF}{{\mathbb {F}}} \newcommand{\BG}{{\mathbb {G}}} \newcommand{\BH}{{\mathbb {H}}} \newcommand{\BI}{{\mathbb {I}}} \newcommand{\BJ}{{\mathbb {J}}} \newcommand{\BK}{{\mathbb {K}}} \newcommand{\BL}{{\mathbb {L}}} \newcommand{\BM}{{\mathbb {M}}} \newcommand{\BN}{{\mathbb {N}}} \newcommand{\BO}{{\mathbb {O}}} \newcommand{\BP}{{\mathbb {P}}} \newcommand{\BQ}{{\mathbb {Q}}} \newcommand{\BR}{{\mathbb {R}}} \newcommand{\BS}{{\mathbb {S}}} \newcommand{\BT}{{\mathbb {T}}} \newcommand{\BU}{{\mathbb {U}}} \newcommand{\BV}{{\mathbb {V}}} \newcommand{\BW}{{\mathbb {W}}} \newcommand{\BX}{{\mathbb {X}}} \newcommand{\BY}{{\mathbb {Y}}} \newcommand{\BZ}{{\mathbb {Z}}} \newcommand{\CA}{{\mathcal {A}}} \newcommand{\CB}{{\mathcal {B}}} \renewcommand{\CD}{{\mathcal{D}}} \newcommand{\CE}{{\mathcal {E}}} \newcommand{\CF}{{\mathcal {F}}} \newcommand{\CG}{{\mathcal {G}}} \newcommand{\CH}{{\mathcal {H}}} \newcommand{\CI}{{\mathcal {I}}} \newcommand{\CJ}{{\mathcal {J}}} \newcommand{\CK}{{\mathcal {K}}} \newcommand{\CL}{{\mathcal {L}}} \newcommand{\CM}{{\mathcal {M}}} \newcommand{\CN}{{\mathcal {N}}} \newcommand{\CO}{{\mathcal {O}}} \newcommand{\CP}{{\mathcal {P}}} \newcommand{\CQ}{{\mathcal {Q}}} \newcommand{\CR }{{\mathcal {R}}} \newcommand{\CS}{{\mathcal {S}}} \newcommand{\CT}{{\mathcal {T}}} \newcommand{\CU}{{\mathcal {U}}} \newcommand{\CV}{{\mathcal {V}}} \newcommand{\CW}{{\mathcal {W}}} \newcommand{\CX}{{\mathcal {X}}} \newcommand{\CY}{{\mathcal {Y}}} \newcommand{\CZ}{{\mathcal {Z}}} \newcommand{\RA}{{\mathrm {A}}} \newcommand{\RB}{{\mathrm {B}}} \newcommand{\RC}{{\mathrm {C}}} \newcommand{\RD}{{\mathrm {D}}} \newcommand{\RE}{{\mathrm {E}}} \newcommand{\RF}{{\mathrm {F}}} \newcommand{\RG}{{\mathrm {G}}} \newcommand{\RH}{{\mathrm {H}}} \newcommand{\RI}{{\mathrm {I}}} \newcommand{\RJ}{{\mathrm {J}}} \newcommand{\RK}{{\mathrm {K}}} \newcommand{\RL}{{\mathrm {L}}} \newcommand{\RM}{{\mathrm {M}}} \newcommand{\RN}{{\mathrm {N}}} \newcommand{\RO}{{\mathrm {O}}} \newcommand{\RP}{{\mathrm {P}}} \newcommand{\RQ}{{\mathrm {Q}}} \newcommand{\RS}{{\mathrm {S}}} \newcommand{\RT}{{\mathrm {T}}} \newcommand{\RU}{{\mathrm {U}}} \newcommand{\RV}{{\mathrm {V}}} \newcommand{\RW}{{\mathrm {W}}} \newcommand{\RX}{{\mathrm {X}}} \newcommand{\RY}{{\mathrm {Y}}} \newcommand{\RZ}{{\mathrm {Z}}} \newcommand{\ga}{{\frak a}} \newcommand{\gb}{{\frak b}} \newcommand{\gc}{{\frak c}} \newcommand{\gd}{{\frak d}} \newcommand{\gf}{{\frak f}} \newcommand{\gh}{{\frak h}} \newcommand{\gi}{{\frak i}} \newcommand{\gj}{{\frak j}} \newcommand{\gk}{{\frak k}} \newcommand{\gm}{{\frak m}} \newcommand{\gn}{{\frak n}} \newcommand{\go}{{\frak o}} \newcommand{\gp}{{\frak p}} \newcommand{\gq}{{\frak q}} \newcommand{\gr}{{\frak r}} \newcommand{\gs}{{\frak s}} \newcommand{\gt}{{\frak t}} \newcommand{\gu}{{\frak u}} \newcommand{\gv}{{\frak v}} \newcommand{\gw}{{\frak w}} \newcommand{\gx}{{\frak x}} \newcommand{\gy}{{\frak y}} \newcommand{\gz}{{\frak z}} \newcommand{\ab}{{\mathrm{ab}}} \newcommand{\Ad}{{\mathrm{Ad}}} \newcommand{\an}{{\mathrm{an}}} \newcommand{\Aut}{{\mathrm{Aut}}} \newcommand{\Br}{{\mathrm{Br}}} \newcommand{\bs}{\backslash} \newcommand{\bbs}{\|\cdot\|} \newcommand{\Ch}{{\mathrm{Ch}}} \newcommand{\cod}{{\mathrm{cod}}} \newcommand{\cont}{{\mathrm{cont}}} \newcommand{\cl}{{\mathrm{cl}}} \newcommand{\criso}{{\mathrm{criso}}} \newcommand{\dR}{{\mathrm{dR}}} \newcommand{\disc}{{\mathrm{disc}}} \newcommand{\Div}{{\mathrm{Div}}} \renewcommand{\div}{{\mathrm{div}}} \newcommand{\Eis}{{\mathrm{Eis}}} \newcommand{\End}{{\mathrm{End}}} \newcommand{\Frob}{{\mathrm{Frob}}} \newcommand{\Gal}{{\mathrm{Gal}}} \newcommand{\GL}{{\mathrm{GL}}} \newcommand{\GO}{{\mathrm{GO}}} \newcommand{\GSO}{{\mathrm{GSO}}} \newcommand{\GSp}{{\mathrm{GSp}}} \newcommand{\GSpin}{{\mathrm{GSpin}}} \newcommand{\GU}{{\mathrm{GU}}} \newcommand{\BGU}{{\mathbb{GU}}} \newcommand{\Hom}{{\mathrm{Hom}}} \newcommand{\Hol}{{\mathrm{Hol}}} \newcommand{\HC}{{\mathrm{HC}}} \renewcommand{\Im}{{\mathrm{Im}}} \newcommand{\Ind}{{\mathrm{Ind}}} \newcommand{\inv}{{\mathrm{inv}}} \newcommand{\Isom}{{\mathrm{Isom}}} \newcommand{\Jac}{{\mathrm{Jac}}} \newcommand{\JL}{{\mathrm{JL}}} \newcommand{\Ker}{{\mathrm{Ker}}} \newcommand{\KS}{{\mathrm{KS}}} \newcommand{\Lie}{{\mathrm{Lie}}} \newcommand{\new}{{\mathrm{new}}} \newcommand{\NS}{{\mathrm{NS}}} \newcommand{\ord}{{\mathrm{ord}}} \newcommand{\ol}{\overline} \newcommand{\rank}{{\mathrm{rank}}} \newcommand{\PGL}{{\mathrm{PGL}}} \newcommand{\PSL}{{\mathrm{PSL}}} \newcommand{\Pic}{\mathrm{Pic}} \newcommand{\Prep}{\mathrm{Prep}} \newcommand{\Proj}{\mathrm{Proj}} \renewcommand{\Re}{{\mathrm{Re}}} \newcommand{\red}{{\mathrm{red}}} \newcommand{\sm}{{\mathrm{sm}}} \newcommand{\sing}{{\mathrm{sing}}} \newcommand{\reg}{{\mathrm{reg}}} \newcommand{\Rep}{{\mathrm{Rep}}} \newcommand{\Res}{{\mathrm{Res}}} \newcommand{\Sel}{{\mathrm{Sel}}} \font\cyr=wncyr10 \newcommand{\Sha}{\hbox{\cyr X}} \newcommand{\SL}{{\mathrm{SL}}} \newcommand{\SO}{{\mathrm{SO}}} \newcommand{\Sp}{\mathrm{Sp}} \newcommand{\Spec}{{\mathrm{Spec}}} \newcommand{\Sym}{{\mathrm{Sym}}} \newcommand{\sgn}{{\mathrm{sgn}}} \newcommand{\Supp}{{\mathrm{Supp}}} \newcommand{\tor}{{\mathrm{tor}}} \newcommand{\tr}{{\mathrm{tr}}} \newcommand{\ur}{{\mathrm{ur}}} \newcommand{\vol}{{\mathrm{vol}}} \newcommand{\wt}{\widetilde} \newcommand{\pp}{\frac{\partial\bar\partial}{\pi i}} \newcommand{\intn}[1]{\left( {#1} \right)} \newcommand{\norm}[1]{\|{#1}\|} \newcommand{\sfrac}[2]{\left( \frac {#1}{#2}\right)} \newcommand{\ds}{\displaystyle} \newcommand{\ov}{\overline} \newcommand{\incl}{\hookrightarrow} \newcommand{\imp}{\Longrightarrow} \newcommand{\lto}{\longmapsto} \newcommand{\iso}{\overset \sim \lra} \tableofcontents \section{Introduction} The goal of this paper is to prove a formula expressing the modular height of a quaternionic Shimura curve over a totally real number field in terms of the logarithmic derivative of the Dedekind zeta function of the totally real number field. Our proof is based on the work Yuan--Zhang--Zhang \cite{YZZ} on the Gross--Zagier formula, and the work Yuan--Zhang \cite{YZ} on the averaged Colmez conjecture. All these works are in turn inspired by the Pioneering work Gross--Zagier \cite{GZ} and some philosophies of Kudla's program. In the following, let us state the exact formula, compare it with other similar formulas, and explain our idea of proof. \subsection{Modular height of the Shimura curve} \label{sec intro shimura curve} Let $F$ be a totally real number field. Let $\Sigma$ be a finite set of places of $F$ containing all the archimedean places and having an odd cardinality $|\Sigma|$. Denote by $\Sigma_f$ the subset of non-archimedean places in $\Sigma$. Let $\BB$ be the totally definite incoherent quaternion algebra over the adele ring $\BA=\BA_F$ with ramification set $\Sigma$. Let $U=\prod_{v\nmid\infty}U_v$ be an open compact subgroup of $\BB_f^\times$ such that $U_v$ is maximal at every $v\in \Sigma_f$. Our main theorem concerns the case that $U$ is maximal, but we allow $U$ to be more general before the statement of the main theorem. Let $X_U$ be the associated \emph{Shimura curve} over $F$, which is a projective and smooth curve over $F$ descended from the analytic quotient $$ X_{U,\sigma}(\BC)=(B(\sigma)^\times \bs \CH^\pm\times \BB_f^\times/U) \cup \{\rm cusps\}, $$ where $\sigma:F\to\BC$ is any archimedean place of $F$ and $B(\sigma)$ is the quaternion algebra over $F$ with ramification set $\Sigma\setminus \{\sigma\}$. Note that $X_U$ is defined as the corresponding coarse moduli scheme, which is a projective and smooth curve over $F$. See \cite[\S1.2.1]{YZZ} for more details. Let $L_U$ be the \emph{Hodge bundle} of $X_U$ corresponding to modular forms of weight 2. It is a $\QQ$-line bundle over $X_U$, i.e. an element of $\Pic(X_U)\otimes_\ZZ\QQ$, defined by $$ L_{U}= \omega_{X_{U}/F} \otimes\CO_{X_U}\Big(\sum_{Q\in X_U(\overline F)} (1-e_Q^{-1}) Q\Big). $$ Here $\omega_{X_{U}/F}$ is the canonical bundle of $X_U$ over $F$, and for each $Q\in X_U(\overline F)$, the ramification index $e_Q$ is described as follows. Fix an embedding $\bar\sigma:\overline F\to \CC$ extending $\sigma:F\to \CC$, so that $Q$ is also viewed as a point of $X_{U,\sigma}(\BC)$. If $Q$ is a cusp in $X_{U,\sigma}(\BC)$ under the above complex uniformization, then $e_Q=\infty$ and $1-e_Q^{-1}=1$. If $Q$ is not a cusp in that sense, then the connected component of $Q$ in $X_{U,\sigma}(\BC)$ can be written as a quotient $\Gamma\bs\CH^*$ for a discrete group $\Gamma$, and $e_Q$ is the ramification index of any preimage of $Q$ under the map $\CH\to \Gamma\bs\CH$. One can check that $e_Q$ does not depend on the choices of $(\sigma, \bar\sigma)$ or the preimage, and that $e_Q$ is Galois invariant, so $L_U$ is indeed defined over $F$. See \cite[\S3.1.3]{YZZ} for more details. Let $\CX_U$ be the \emph{canonical integral model} of $X_U$ over $O_F$, as reviewed in \cite[\S4.2]{YZ}. Note that we always assume that $U$ is maximal at every $v\in \Sigma_f$. If $|\Sigma|=1$ (or equivalently $F=\QQ$ and $\Sigma=\{\infty\}$), then $X_U$ is a modular curve, $X_U\simeq \BP^1_\QQ$ via the $j$-function, and $\CX_U\simeq\BP^1_\ZZ$ under this identification. We refer to Deligne--Rapoport \cite{DR} for a thorough theory of this situation. If $|\Sigma|>1$, $\CX_U$ is a projective, flat, normal, semistable and $\QQ$-factorial arithmetic surface over $O_F$, defined as quotients of the canonical integral models of $\CX_{U'}$ for sufficiently small open compact subgroups $U'$ of $U$. We refer to Carayol \cite{Ca} and Boutot--Zink \cite{BZ} for integral models for sufficiently small level groups, and refer to \S\ref{sec shimura curve} for the quotient process. Only the part of the integral model $\CX_U$ above places $v$ such that $U_v$ is maximal is essential in this paper. Let $\CL_U$ be the \emph{canonical integral model} of $L_U$ over $\CX_U$ as reviewed in \cite[\S4.2]{YZ}. Then $\CL_U$ is a $\QQ$-line bundle on $\CX_U$ constructed as follows. If $U$ is sufficiently small in the sense that $U$ is contained in $U(N)=(1+N O_{\bb_f})^\times$ for some integer $N\geq 3$ and some maximal order $O_{\bb_f}$ of $\bb_f$, then over the open subscheme $\CX_{U, S(U)}=\CX_{U}\times_{\Spec\, O_F} S(U)$ with $$S(U)=\Spec\, O_F\setminus \{v: U_v \text{ is not maximal}\},$$ we have $$ \CL_U|_{\CX_{U, S(U)}}= \omega_{\CX_{U}/O_F} \otimes \CO_{\CX_{U, S(U)}}\Big(\sum_{Q\in X_U} (1-e_Q^{-1}) \CQ\Big). $$ Here $\omega_{\CX_{U}/O_F}$ is the relative dualizing sheaf, the summation is through closed points $Q$ of $X_U$, $\CQ$ is the Zariski closure of $Q$ in $\CX_{U,S(U)}$, and $e_Q$ is the ramification index of any point of $X_U(\ol F)$ corresponding to $Q$. If $U$ is a maximal open compact subgroup of $\bfcross$, for any sufficiently small normal open compact subgroup $U'$ of $\bfcross$ contained in $U$, we have $$ \CX_{U, S(U')}= \CX_{U',S(U')}/(U/U'), \quad \CL_{U}|_{\CX_{U, S(U')}}= N_{\pi} (\CL_{U'}|_{\CX_{U', S(U')}})^{\otimes (1/\deg(\pi))}, $$ where $N_{\pi}:\Pic(\CX_{U', S(U')})\to \Pic(\CX_{U, S(U')})$ is the norm map with respect to the natural map $\pi: \CX_{U', S(U')}\to \CX_{U, S(U')}$. Varying $U'$, we glue $\{\CL_{U}|_{\CX_{U, S(U')}}\}_{U'}$ together to form the $\QQ$-line bundle $\CL_U$ over $\CX_U$ for maximal $U$. For general $U$, we take an embedding $U\subset U_0$ into a maximal $U_0$, and than define $\CL_U$ to be the pull-back of $\CL_{U_0}$ via the natural map $\CX_U \to \CX_{U_0}$. At any archimedean place $\sigma:F\to \BC$, the \emph{Petersson metric} of $\CL_U$ is given by $$\|f(\tau)d\tau\|_{\mathrm{Pet}}=2\, \Im(\tau) |f(\tau)|,$$ where $\tau$ is the standard coordinate function on $\CH\subset \CC$, and $f(\tau)$ is any meromorphic modular form of weight 2 over $X_{U,\sigma}(\CC)$. Thus we have \emph{the arithmetic Hodge bundle} $$\ol\CL_U=(\CL_U, \{\|\cdot\|_\sigma\}_\sigma).$$ The \emph{modular height} of $X_U$ with respect to the arithmetic Hodge bundle $\ol\CL_U$ is defined to be $$ h_{\ol\CL_U}(X_U) = \frac{\widehat\deg(\hat c_1(\ol\CL_U)^2)}{2\deg(L_{U})}. $$ Here $\deg(L_{U})$ is the degree over the generic fiber $X_U$, and the numerator is the arithmetic self-intersection number on the arithmetic surface $\CX_U$ in the setting of Arakelov geometry. Note that if $|\Sigma|>1$, then $\ol\CL_U$ is a hermitian $\QQ$-line bundle over $\CX_U$, and the self-intersection number essentially follows from the theory of Gillet--Soul\'e \cite{GS}; if $|\Sigma|=1$, then the metric has a logarithmic singularity along the cusp, and the intersection number is defined in the framework of Bost \cite{Bo} or K\"uhn \cite{Kuh}. By the projection formula, $h_{\ol\CL_U}(X_U)$ is independent of $U$. However, as $\CL_U$ is less canonical at places $v$ such that $U_v$ is not maximal, we will assume that $U$ is maximal at every $v$ in the main theorem and afterwards. For any non-archimedean place $v$ of $F$, denote by $N_v$ the norm of $v$. Recall the Dedekind zeta function $$ \zeta_{F}(s)=\prod_{v\nmid\infty} (1-N_v^{-s})^{-1}. $$ The functional equation switches the values and derivatives of $\zeta_{F}(s)$ between $-1$ and $2$. The goal of this paper is to prove the following formula. \begin{thm}[modular height]\label{main} Let $U\subset \BB_f^\times$ be a \emph{maximal} open compact subgroup. Then \begin{eqnarray*} h_{\ol \CL_U}(X_U) =-\frac{\zeta_{F}'(-1)}{\zeta_{F}(-1)} -\frac12[F:\QQ] +\sum_{v\in\Sigma_f} \frac{3N_v-1}{4(N_v-1)}\log N_v. \end{eqnarray*} \end{thm} If $F=\QQ$ and $\Sigma=\{\infty\}$, the formula was proved by Bost (un-published) and K\"uhn (cf. \cite[Theorem 6.1]{Kuh}); if $F=\QQ$ and $|\Sigma|>1$, the formula was proved by Kudla--Rapoport--Yang (cf. \cite[Theorem 1.0.5]{KRY2}). Denote by $h_F$ the class number of $O_F$. A classical formula of Vign\'eras \cite{Vi} gives \begin{eqnarray*} \deg(L_{U}) = 4\cdot h_F \cdot (-2)^{-[F:\QQ]} \cdot \zeta_{F}(-1) \cdot \prod_{v\in \Sigma_f}(N_v-1). \end{eqnarray*} This is also an easy consequence of the formula in the remark right after \cite[Proposition 4.2]{YZZ}. Theorem \ref{main} is an arithmetic version of this formula. It computes the arithmetic degree instead of the geometric degree, and the result is given by the logarithmic derivative at $-1$ instead of the value at $-1$. The relation between these two formulas is similar to the relation between the Gross--Zagier formula and the Waldspurger formula (as fully explored in \cite{YZZ}), and is also similar to the relation between the averaged Colmez conjecture and the class number formula (as treated in \cite{YZZ}). In Kudla's program, it is crucial to extend the (modular) generating series of CM cycles over a Shimura variety to a (modular) generating series of arithmetic cycles over a reasonable integral model. An idea of S. Zhang \cite[\S3.5]{Zh2} to treat this problem is to apply his notion of admissible arithmetic extensions. This approach relies on concrete results on arithmetic intersection numbers, so our main formula fits this setting naturally. Inspired by S. Zhang's idea, Qiu \cite{Qi} solved the problem for generating series of divisors over unitary Shimura varieties under some assumptions, and his argument is based on many computational results of this paper. \subsection{The case $F=\QQ$ and other similar formulas} If $F=\BQ$ and $\Sigma=\{\infty\}$, or equivalently if $X_U$ is the usual modular curve, then the formula of Bost and K\"uhn \cite[Theorem 6.1]{Kuh} agrees with our formula by \cite[Theorem 5.3, Remark 5.4]{Yu2}. If $F=\BQ$ and $|\Sigma|>1$, the formula in \cite[Theorem 1.0.5]{KRY2} of Kudla--Rapoport--Yang is equivalent to \begin{eqnarray*} h_{\widehat\omega_0}(X_U) = -\frac{\zeta_{\BQ}'(-1)}{\zeta_{\BQ}(-1)} -\frac12 + \sum_{p\in\Sigma_f} \frac{p+1}{4(p-1)}\log p. \end{eqnarray*} This formula is compatible with our formula. In fact, the right-hand side of the formula differs from that of ours by $\ds\frac12 \log d_\BB$, and $\ds h_{\overline\CL_U}(X_U)= h_{\widehat\omega_0}(X_U)+\frac12 \log d_\BB$ by the explicit results on the Kodaira--Spencer map in \cite[Theorem 2.2, Remark 2.3]{Yu2}. There are many formulas of similar flavor in the literature. Besides the above mentioned works of Bost, K\"uhn and Kudla--Rapoport--Yang, Bruinier--Burgos--K\"uhn \cite{BBK} proved a modular height formula for Hilbert modular surfaces, H\"ormann \cite{Ho} proved a modular height formula up to $\log \QQ_{>0}$ for Shimura varieties of orthogonal types over $\QQ$, and Bruinier--Howard \cite{BH} recently proved a modular height formula for Shimura varieties of unitary types over $\QQ$. The formulas of \cite{Ho, BH} are based on the formulas of Bost, K\"uhn and Kudla--Rapoport--Yang. In a slightly different direction, Freixas--Sankaran \cite{FS} proved some other formulas for intersections of more general Chern classes over Hilbert modular surfaces. Finally, we refer to Maillot--R\"ossler \cite{MR1,MR2} for far-reaching conjectures generalizing these formulas. Our formula is primitive in that it involves Dedekind zeta functions of general totally real fields, while the previous known formulas involve Dedekind zeta functions of $\QQ$ and quadratic fields. Moreover, based on our formula and a strategy in the flavor of \cite{Ho, BH}, it is promising to prove similar formulas for high-dimensional Shimura varieties associated to orthogonal groups of signature $(n,2), (n+2,0), \cdots, (n+2,0)$ or unitary groups of signature $(n,1), (n+1,0), \cdots, (n+1,0)$ over totally real fields. \subsection{Modular height of a CM point} Our proof of Theorem \ref{main} is inspired by the works \cite{YZZ,YZ}. In the proof, we need to pick an auxiliary CM point, and the height of this point is also relevant to our treatment. Let us first review a formula in \cite{YZ} which is related to our main theorem. Let $E$ be a totally imaginary quadratic extension over $F$. Assume that there is an embedding $\BA_E\hookrightarrow \bb$ of $\BA$-algebras such that the image of $\wh O_E^\times$ lies in the maximal compact subgroup $U$. Let $P_U\in X_{U,\sigma}(\CC)$ be the CM point represented by $[\tau_0,1]$ under the complex uniformization at $\sigma:F\to \CC$, where $\tau_0$ is the unique fixed point of $E^\times$ in $\CH$. Fix embeddings $E^\ab\to \overline F\to \CC$ compatible with $\sigma:F\to \CC$. By the definition of the canonical model, we actually have $P_U\in X_U(E^\ab)$ via the embedding. The modular height of $P_U$ is defined by $$h_{\ol\CL_U}(P_U):=\frac{1}{\deg(P_U)}\wh\deg(\ol\CL_U|_{\bar P_U}),$$ where $\bar P_U$ denotes the Zariski closure of the image of $P_U$ in $\CX_U$, and $\deg(P_U)$ is the degree of the field of definition of of $P_U$ over $F$. By \cite[Theorem 1.7]{YZ}, we have the following formula. \begin{thm}\label{height CM} Assume that there is no non-archimedean place of $F$ ramified in both $E$ and $\BB$. Then $$ h_{\ol\CL_U}(P_U) =-\frac{L_f'(0,\eta)}{L_f(0,\eta)} +\frac 12 \log \frac{d_\BB}{d_{E/F}}. $$ Here $\eta:F^\times\bs\AA_F^\times\to \{\pm1\}$ is the quadratic character associated to the quadratic extension $E/F$, $d_\BB=\prod_{v\in \Sigma_f}N_v$ is the absolute discriminant of $\BB$, and $d_{E/F}$ is the norm of the relative discriminant of ${E/F}$. \end{thm} Theorem \ref{height CM} is one of the two steps in the proof of the averaged Colmez conjecture of \cite{YZ}. The averaged Colmez conjecture was proved independently by Andreatta--Goren--Howard--Madapusi-Pera \cite{AGHM}, and plays a crucial role in the final solution of the Andr\'e--Oort conjecture of Tsimerman \cite{Ts}. In the case $F=\QQ$ and $\Sigma=\{\infty\}$, Theorem \ref{height CM} is equivalent to the classical Chowla--Selberg formula proved in \cite{CS}. We refer to \cite[\S3.3]{Yu1} for many equivalent forms of the Chowla--Selberg formula. \subsection{Kronecker's limit formula} Both the Bost--K\"uhn formula and the Chowla--Selberg formula are easy consequences of the more classical Kronecker limit formula. In fact, by \cite[Prop. 5.2]{Kuh}, the Kronecker limit formula asserts that $$ -\log |\Delta(\tau)^2 \mathrm{Im}(\tau)^{12}| =4\pi \lim_{s\to 1} (E(\tau, s)-\varphi(s)), $$ where $$ \Delta(\tau)= q\prod_{n=1}^\infty (1-q^n)^{24}, \quad q=e^{2\pi i\tau} $$ is the modular discriminant function, $$E(\tau, s)=\frac12\sum_{c,d\in \ZZ,\ \gcd(c,d)=1} \frac{\mathrm{Im}(\tau)^s}{|c\tau+d|^{2s}} $$ is the classical non-holomorphic Eisenstein series, and $$ \frac{\pi}{3}\varphi(s)=\frac{1}{s-1}+ 2-2\log(4\pi)-24\zeta_{\QQ}'(-1)+O(s-1). $$ In particular, $\Delta(\tau)$ induces a global section of $L_U^{\otimes 6}$ over the modular curve $X_U=X_0(1)$. Then we can use this section to compute $h_{\ol\CL_U}(X_U)$ and $h_{\ol\CL_U}(P_U)$. Integrating $-\log |\Delta(\tau)^2 \mathrm{Im}(\tau)^{12}|$ over $X_U(\BC)$ with respect to the Poincare measure $y^{-2}dxdy$, the Kronecker limit formula implies the Bost--K\"uhn formula. This is essentially the proof of K\"uhn \cite{Kuh}. Averaging $-\log |\Delta(\tau)^2 \mathrm{Im}(\tau)^{12}|$ over the Galois orbit of the CM point $P_U$, the Kronecker limit formula implies the Chowla--Selberg formula. This is essentially the proof in Weil \cite{We}. In summary, in the case $|\Sigma|=1$, both Theorem \ref{main} and Theorem \ref{height CM} are consequences of the Kronecker limit formula. On the other hand, there is no analogous formulation of the Kronecker limit formula over totally real fields, since there is no explicit modular form over a quaternionic Shimura curve to replace the classical modular discriminant function $\Delta$. Hence, the above proof of the theorem does not work in the general case. Our proofs of Theorem \ref{main} and Theorem \ref{height CM} are extensions of the treatment of \cite{YZZ}. The original goal of \cite{YZZ} is to prove the Gross--Zagier formula over Shimura curves, but the method was enhanced in \cite{YZ} to prove Theorem \ref{height CM}, and now we can further enhance the method to prove Theorem \ref{main}. Note that our proof of Theorem \ref{main} in the case $F=\QQ$ is different from those of \cite{Kuh, KRY2}. It is interesting that in both the classical proofs and our current proofs, Theorem \ref{main} and Theorem \ref{height CM} are always put in the same framework. \subsection{Idea of proof} Now we sketch our proof of Theorem \ref{main}. It is an extension of the proof of the Gross--Zagier formula in \cite{YZZ} and the proof of the averaged Colmez conjecture in \cite{YZ}. To have a setup compatible with those in \cite{YZZ,YZ}, we first choose a CM extension $E$ over $F$ as in Theorem \ref{height CM}, though $E$ is irrelevant to the final statement of Theorem \ref{main}. \subsubsection*{The degeneracy assumptions} Recall that the Gross--Zagier formula is an identity between the derivative of the Rankin--Selberg $L$-function of a Hilbert modular form and the height of a CM point on a modular abelian variety. This formula is proved by a comparison of a derivative series $\Pr I'(0, g, \phi)$ with a geometric series $2Z(g, (1,1), \phi)$ parametrized by certain modified Schwartz function $\phi\in \ol\CS (\BB\times \BA^\times )$. More precisely, we have proved that the difference $$\mathcal D (g, \phi)=\Pr I'(0, g, \phi)-2Z(g, (1,1), \phi), \qquad g\in \GL_2(\BA_F)$$ is perpendicular to the relevant cusp form. The matching for the ``main terms'' of $\mathcal D (g, \phi)$ eventually implies the Gross--Zagier formula in \cite{YZZ}. In this process, many assumptions on the choice of $\phi$ in \cite[\S5.2.1]{YZZ} are made to ``annihilate'' the ``degenerate terms'', which simplifies the calculations dramatically and forces the computational results to satisfy the conditions of an approximation argument. The ``strictest'' degeneracy assumptions involved are \cite[Assumption 5.3, Assumption 5.4]{YZZ}. The assumptions are not harmful for the Gross--Zagier formula, as proved in \cite[Theorem 5.7]{YZZ}. Nonetheless, if we allow the Schwartz function to be more general, the matching process will actually give us more formulas. In fact, after removing \cite[Assumption 5.3]{YZZ}, we obtain a matching of some ``degenerate terms'', which eventually implies Theorem \ref{height CM}. This is the work of \cite[Part II]{YZ}. In the current paper, we remove both \cite[Assumption 5.3, Assumption 5.4]{YZZ} when considering the matching of the series $\Pr I'(0, g, \phi)$ and $2Z(g, (1,1), \phi)$. Then we finally obtain an extra identity, which eventually implies Theorem \ref{main}. Our precise choice of the Schwartz functions is given in \S\ref{choices}. From \cite{YZZ} to \cite{YZ}, and from \cite{YZ} to the current paper, each step removes a degeneracy assumption, which causes two significant problems. The first problem is that more terms appear in the comparison, which incur far more involved local computations. This is eventually overcome by patience and carefulness. The second problem is how to obtain exact identity from the ``partial matching'' of the two series; i.e., the matching of ``all but finitely many'' terms of the two series. In \cite{YZZ}, this problem is solved by the method of approximation (cf. \cite[\S1.5.10]{YZZ}). In \cite{YZ}, this problem is solved by the theory of pseudo-theta series (cf. \cite[\S6]{YZ}), which is an extension of the method of approximation. In the current paper, the theory of pseudo-theta series is not sufficient for the comparison. Our solution is to introduce a new notion of \emph{pseudo-Eisenstein series}, and generalize \cite[Lemma 6.1]{YZ}, the key matching principle of pseudo-theta series, to include both pseudo-theta series and pseudo-Eisenstein series. In the following, we review the derivative series $\Pr I'(0, g, \phi)$ and the height series $Z(g, (1,1), \phi)$ and introduce some new ingredients of our proof. \subsubsection*{Derivative series} By the reduced norm $q$, the incoherent quaternion algebra $\BB$ is viewed as a quadratic space over $\BA=\BA_F$. Then we have a modified space $\ol\CS (\BB\times \BA^\times)$ of Schwartz functions, which has a Weil representation $r$ by $\GL_2(\BA)\times\BB^\times\times \BB^\times$. Strictly speaking, the representation $r$ is induced by the canonical homomorphism $\BB^\times\times \BB^\times\to \GO(\BB,q)$ defined by sending $(b_1,b_2)\in \BB^\times\times \BB^\times$ to the automorphism of $\BB$ given by $x\mapsto b_1xb_2^{-1}$. We refer to \S\ref{sec theta eisenstein} (or the original \cite[\S 2.1, \S2.2, \S 4.1]{YZZ}) for more details. For each $\phi\in \ol\CS (\BB\times \BA^\times )$ invariant under an open compact subgroup $U\times U$ of $\BB_f^\times\times \BB_f^\times$, we have a mixed theta--Eisenstein series $$I(s, g, \phi)=\sum _{u\in \mu _U^2\bs F^\times} \sum _{\gamma \in P^1(F)\bs \SL_2(F)} \delta (\gamma g)^s \sum _{x_1\in E} r(\gamma g)\phi (x_1, u),$$ where $\mu _U=F^\times \cap U$, and $P^1$ is the upper triangular subgroup of $\SL_2$. The derivative $I'(0, g, \phi)$ of $I(s, g, \phi)$ at $s=0$ is an automorphic form in $g\in \gla$. Let $\Pr I'(0, g, \phi)$ be the \emph{holomorphic projection} of the derivative $I'(0, g, \phi)$. This holomorphic projection is just the orthogonal projection from the space of automorphic forms to the space of cuspidal and holomorphic automorphic forms of parallel weight two with respect to the Petersson inner product. In \S\ref{sec derivative series}, we decompose $\Pr I'(0, g, \phi)$ into a sum of ``local terms'', and compute all the relevant local components. Most of the terms are computed in \cite{YZZ, YZ}. However, as in \S\ref{sec 3.1}, a new extra term $\Pr' \CJ'(0, g, \phi)$ appears in the expression of $\Pr I'(0, g, \phi)$. This term comes from the overly fast growth of $I'(0, g, \phi)$ in the computation of the holomorphic projection. It was zero under \cite[Assumption 5.4]{YZZ}, but its non-vanishing is crucial to the treatment here. The extra term $\Pr' \CJ'(0, g, \phi)$ is computed in Proposition \ref{analytic series extra}, and its local component computed in Lemma \ref{local explicit}(1) gives $\zeta_v'(2)/\zeta_v(2)$ at almost all places $v$. The sum over all places gives the global logarithmic derivative $\zeta_F'(2)/\zeta_F(2)$, which is the main term on the right-hand side of Theorem \ref{main}. \subsubsection*{Height series} For any $\phi\in \ol\CS (\BB\times \BA^\times)$ invariant under $U\times U$, we have a generating series of Hecke operators on the Shimura curve $X_U$: $$Z(g, \phi)_U=Z_0(g, \phi)+w_U\sum _{a\in F^\times}\sum _{x\in U\bs \BB_f^\times /U} r(g)\phi (x, aq(x)^{-1})Z(x)_U,$$ where $w_U=|\{\pm1\}\cap U|$ and every $Z(x)_U$ is a divisor of $X_U\times X_U$ associated to the Hecke operator corresponding to the double coset $UxU$. The constant term $Z_0(g, \phi)$ does not play any essential role in this paper, and we denote by $Z_*(g, \phi)$ the sum of the other terms. By \cite[Theorem 3.17]{YZZ}, this series is absolutely convergent and defines an automorphic form in $g\in \GL_2(\BA)$ with coefficients in $\Pic (X_U\times X_U)_\BC$. Recall that $P_U\in X_U(E^\ab)$ is the CM point represented by $[\tau_0,1]$ under the complex uniformization, where $\tau_0$ is the unique fixed point of $E^\times$ in the upper half plane $\CH$. More generally, we have a CM point $t=[\tau_0,t]$ for any $t\in E^\times(\af)$. Let $t^\circ=t-\xi_t$ be the divisor in $\Pic(X_{U,\overline F})\otimes_\ZZ\QQ$ of degree zero on every connected component. Here the normalized Hodge class $\displaystyle\xi_{t}=\frac{1}{\deg(L_{U,t})}L_{U,t}$, where $L_{U,t}$ is the restriction of the Hodge bundle $L_U$ to the connected component of $X_{U,\overline F}$ containing $t$. Then we can form a height series $$Z(g, (t_1,t_2),\phi)=\pair{Z(g, \phi)_U t_1^\circ, \ t_2^\circ}_\NT,$$ where the right-hand side is the Neron--Tate height pairing. In \S\ref{sec height series}, we decompose the height series $Z(g, (t_1,t_2),\phi)$ into a sum of ``local terms'', and compute all the relevant local components. For simplicity, we might write $Z(g, (t_1,t_2))$ for $Z(g, (t_1,t_2),\phi)$ by suppressing the dependence on $\phi$ in this paper. The starting point is the decomposition $$Z(g, (t_1, t_2)) =\pair{Z_*(g,\phi) t_1, t_2} -\pair{Z_*(g,\phi) \xi_{t_1}, t_2} +\pair{Z_*(g,\phi)\xi_{t_1}, \xi_{t_2}} -\pair{Z_*(g,\phi) t_1, \xi_{t_2}}.$$ The first term is computed in \cite{YZZ, YZ}. The remaining three terms are further computed in \S\ref{sec 4.3}. These three terms are zero under \cite[Assumption 5.4]{YZZ}, but their non-vanishing is crucial to the treatment here. In particular, by Proposition \ref{geometric series extra1}, the term $\pair{Z_*(g,\phi)\xi_{t_1}, \xi_{t_2}}$ is equal to an Eisenstein series times $\pair{\xi_{t_2}, \xi_{t_2}}$, and thus it is an easy multiple of $h_{\ol \CL_U}(X_U)$. This gives the main term on the left-hand side of Theorem \ref{main}. \subsubsection*{Pseudo-Eisenstein series} The notion of pseudo-Eisenstein series is parallel to that of pseudo-theta series of \cite{YZ}. To illustrate the idea, we sketch the idea of both notions for $\SL_2$, while those for $\GL_2$, which are the ones we really need, can be introduced similarly. Let $(V,q)$ be a quadratic space over a totally real number field $F$, assumed to be even-dimensional for simplicity. Let $\phi\in \CS (V(\BA))$ be a Schwartz function. Then we have an action of $g\in \SL_2(\BA)$ on $\phi$ via the Weil representation. Start with the theta series $$\theta(g,\phi)=\sum _{x\in V} r(g)\phi(x), \qquad g\in \SL_2(\BA).$$ Let $S$ be a finite set of non-archimedean places of $F$. In $r(g)\phi(x)=r(g_S)\phi_S(x) r(g^S)\phi^S(x)$, if we replace $r(g_S)\phi_S(x)$ by a locally constant function $\phi_S'(g, x)$ of $(g, x)\in \GL_2(F_S)\times V(F_S)$, then we obtain a \textit{pseudo-theta series} $$A^{(S)}_{\phi'}(g)= \sum _{x\in V}\phi_S'(g, x)r(g)\phi^S (x), \qquad g\in \SL_2(\BA).$$ Note that $A^{(S)}_{\phi'}$ is not automorphic in general. More general types of pseudo-theta series are introduced in \cite[\S6]{YZ} and reviewed in \S\ref{sec key lemma}. We say that the pseudo-theta series $A^{(S)}_{\phi'}(g)$ is \textit{non-singular} if $\phi_S'(1, x)$ (for $g=1$) is actually a Schwarz function of $x\in V(F_S)$. In this case, we form a true theta series $$\theta_{A^{(S)}}(g)= \sum _{x\in V}r(g)\phi_S'(1, x)r(g)\phi^S (x), \qquad g\in \SL_2(\BA).$$ It is automorphic and approximates the original series in the sense that $A^{(S)}_{\phi'}(g)=\theta_{A^{(S)}}(g)$ as long as $g_S=1$. Now we start with the Siegel--Eisenstein series \begin{eqnarray*} E(s,g, \phi) =\sum_{\gamma \in P^1(F)\bs \SL_2(F)} \delta (\gamma g)^s r(\gamma g)\phi(0), \quad g\in\SL_2(\BA). \end{eqnarray*} The non-constant part of $E(s, g,\phi)$ has a Fourier expansion $$ E_*(s, g, \phi)=\sum_{a\in F^\times} W_a(s,g,\phi), $$ where the Whittaker function is defined by \begin{eqnarray*} W_a(s,g,\phi) = \int_{\adele} \delta(wn(b)g)^s \ r(wn(b)g)\phi(0) \psi(-ab) db, \quad a\in F. \end{eqnarray*} We define the local Whittaker functions similarly. For our purpose, we only care about the behavior at $s=0$. Let $S$ be a finite set of non-archimedean places of $F$. In $W_a(0,g,\phi)=W_{a,S}(0,g,\phi_S)W_a^S(0,g,\phi^S)$, if we replace $W_{a,S}(0,g,\phi_S)$ by a locally constant function $B_{a,S}(g)$ of $(a,g)\in F_S\cross \times \SL_2(F_S)$, then we obtain a \textit{pseudo-Eisenstein series} $$B^{(S)}_{\phi}(g)=\sum_{a\in F^\times} B_{a,S}(g) W_a^S(0,g,\phi^S), \ \quad g\in \SL_2(\adele).$$ Pseudo-Eisenstein series arise naturally in derivatives of Eisenstein series. In fact, the derivative of $E_*(s, g, \phi)$ at $s=0$ is $$ E_*'(0, g, \phi)= \sum_{a\in F^\times} \sum_v W_{a,v}'(0,g,\phi)W_a^v(0,g,\phi^v). $$ For every non-archimedean $v$, the ``$v$-part'' $$\sum_{a\in F^\times} W_{a,v}'(0,g,\phi)W_a^v(0,g,\phi^v)$$ is a pseudo-Eisenstein series. We say that the pseudo-Eisenstein series $B^{(S)}_{\phi}(g)$ is \textit{non-singular} if for every $v\in S$, there exist $\phi_v^+\in \CS(V_v^+)$ and $\phi_v^-\in \CS(V_v^-)$ such that $$B_{a,v}(1)=W_{a,v}(0,1,\phi_v^+)+ W_{a,v}(0,1,\phi_v^-), \quad \forall a\in F_v\cross.$$ Here $\{V_v^+,V_v^-\}$ is the set of (one or two) quadratic spaces over $F_v$ with the same dimension and the same discriminant as $V_v$. In this case, we form a linear combination of true Eisenstein series $$ E_{B}(s,g)=\sum_{\epsilon: S\to \{\pm\}} E(s,g, \phi_S^{\epsilon}\otimes \phi^S), \qquad g\in \SL_2(\BA). $$ It approximates the original series in the sense that $B_{\phi}^{(S)}(g)$ is equal to the non-constant part of $E_{B}(0,g)$ as long as $g_S=1$. The above pair $(\phi_v^+, \phi_v^-)$ (if it exists) is generally not uniquely determined by $B_{a,v}(1)$, but the Eisenstein series $E_{B}(s,g)$, as a function of $g\in \SL_2(F_v)$ and $s\in \CC$, is uniquely determined by $B_{\phi}^{(S)}(g)$. We refer to Lemma \ref{whittaker image1} for more details (in a slightly different setting). The key result in this pseudo theory is Lemma \ref{pseudo}, as an extension of \cite[Lemma 6.1]{YZ}. It asserts that if an automorphic form is equal to a finite linear combination of non-singular pseudo-theta series and non-singular pseudo-Eisenstein series, then it is actually equal to the finite linear combination of the corresponding theta series and Eisenstein series. \subsubsection*{The comparison} Go back to the difference $$\mathcal D (g, \phi)=\Pr I'(0, g, \phi)-2Z(g, (1,1), \phi), \qquad g\in \GL_2(\BA_F).$$ By the computational result of \S\ref{sec derivative series} and \S\ref{sec height series}, we eventually see that $\mathcal D (g, \phi)$ is a finite linear combination of non-singular pseudo-theta series and non-singular pseudo-Eisenstein series. By Lemma \ref{pseudo}, $\mathcal D (g, \phi)$ is actually equal to the finite linear combination of the corresponding theta series and Eisenstein series. Note that $\mathcal D (g, \phi)$ is cuspidal, so the linear combination of the corresponding constant terms is zero. This gives a nontrivial relation involving the major terms of Theorem \ref{main}. It suffices to take $g$ to be a specific matrix to make the relation precise. Take $g=(g_v)_v\in\gla$ with $g_v=1$ for $v\notin \Sigma_f$ and $g_v=w$ for $v\in \Sigma_f$. After explicit computation, the nontrivial relation becomes $$ d_0 \sumu r(g)\phi(0,u)=0. $$ Here $d_0$ is the difference of two sides of Theorem \ref{main}. This proves the theorem. Note that if we take $g=1$, then the nontrivial relation becomes $0=0$, since $\phi_v(0,u)=0$ for any $v\in \Sigma_f$ by our choice $\phi_v=1_{O_\bv^\times\times\ofv\cross}$ in \S\ref{choices}. As $r(w)\phi_v(0,u)\neq 0$, we choose $g_v$ to be $w$ for $v\in \Sigma_f$ instead. This serves the purpose, but incurs more computations about evaluating $g_v=w$ and about averaging of many local terms. \subsection{Notations and conventions} \label{sec notation} Most of the notations of this paper are compatible with those in \cite{YZZ, YZ}. The basic notations are as in \cite[\S1.6]{YZZ}. In particular, $F$ is a fixed totally real number field, and $E$ is a fixed totally imaginary quadratic extension of $F$. Denote by $\BA=\BA_F=\prod_v' F_v$ the adele ring of $F$. As in \cite[\S1.6]{YZZ}, we normalize the character $\psi=\oplus_v\psi_v:F\bs \BA\to \BC^\times$. We introduce the Weil representation based on $\psi$, and choose a precise Haar measure on each relevant algebraic group locally everywhere. For a non-archimedean place $v$ of $F$, make the following notations. \begin{enumerate}[(1)] \item $p_v$ denotes the maximal ideal of $O_{F_v}$; \item $N_v$ denotes the order of the residue field $O_{F_v}/p_v$; \item $d_v\in F_v$ denotes the local different of $F$ over $\BQ$; \item $D_v\in F_v$ denotes the discriminant of the quadratic extension $E_v$ in $F_v$. \end{enumerate} Note that $d_v$ and $D_v$ are only well-defined up to multiplication by $O_{F_v}^\times$, but we will only use their valuations at $v$ and the ideals of $O_{F_v}$ generated by them. For convenience, we recall the matrix notation: \begin{align*} m(a)&=\matrixx{a}{}{}{a^{-1}},\quad d(a)=\matrixx{1}{}{}{a}, \quad d^*(a)=\matrixx{a}{}{}{1}\\ n(b)&=\matrixx{1}{b}{}{1}, \quad w=\matrixx{}{1}{-1}{}. \end{align*} We denote by $P\subset \gl$ and $P^1\subset \sll$ the subgroups of upper triangular matrices, and by $N$ the standard unipotent subgroup of them. For any place $v$ of $F$, the character $$\delta_v: P(F_v)\longrightarrow \RR\cross, \quad \matrixx{a}{b}{}{d} \longmapsto \left| \frac ad \right|_v^{\frac 12}$$ extends to a function $\delta_v: \gl(F_v)\rightarrow \RR\cross$ by the Iwasawa decomposition. For the global field $F$, the product $\delta=\prod_v \delta_v$ gives a function on $\gl(\adele)$. If $v$ is a real place, we define a function $$\rho_v: \gl(F_v)\longrightarrow \BC, \quad g_v\longmapsto e^{i\theta},$$ where $$g_v= \matrixx{a}{b}{}{d} \matrixx{\cos \theta}{\sin \theta}{-\sin \theta}{\cos \theta} $$ is in the form of the Iwasawa decomposition, where we require $a>0$ so that the decomposition is unique. For the global field $F$, the product $\rho_\infty=\prod_{v\mid\infty} \rho_v$ gives a function on $\gl(\adele)$, which ignores the non-archimedean components. The following are all the conventions of this paper that are different from those of \cite{YZZ, YZ}, while only (3) is a major difference which brings extra computations. \begin{enumerate}[(1)] \item The Petersson metric on $\CL_U$ is defined by $\|d\tau\|_{\rm Pet}=2\, \Im(\tau)$ in \cite{YZ} and the current paper, while it is defined by $\|d\tau\|_{\rm Pet}=4\pi\, \Im(\tau)$ in \cite{YZZ}. This discrepancy does not affect our applying results of \cite{YZZ}, since only the curvature form of the Petersson metric is crucial in \cite{YZZ}. \item In the current paper, $\zeta_F(s)$ denotes the usual Dedekind zeta function (without Gamma factors), $\tilde \zeta_F(s)$ denotes the completed Zeta function (with Gamma factors), and $L(s,\eta)$ denotes the completed L-function (with Gamma factors) of the quadratic character $\eta$. In \cite{YZZ,YZ}, both $\zeta_F(s)$ and $L(s,\eta)$ denote the completed L-functions (with Gamma factors). \item Our choice of $(U,\phi)$ in \S\ref{choices} is different from those in \cite{YZZ,YZ} due to the dropping of the degeneracy assumptions. Moreover, \cite{YZ} and the current paper eventually assume that $U$ is maximal compact, while \cite{YZZ} does not. We will mention this difference and its effect from time to time. \item This paper and \cite{YZZ} do not assume $|\Sigma|>1$, while \cite{YZ} assumes $|\Sigma|>1$. Most results of \cite{YZ} actually hold in the case $|\Sigma|=1$. \end{enumerate} \subsubsection*{Acknowledgment} The author is indebted to Shou-Wu Zhang and Wei Zhang, as the current paper is inspired by the long-term joint works of the author with them on the Gross--Zagier formula and the averaged Colmez conjecture. The author would like to thank Congling Qiu for pointing out a few mistakes in an early version of this paper, and thank Tuoping Du, Benedict H. Gross, Ulf K\"uhn, Yifeng Liu, Vincent Maillot, and Michael Rapoport for helpful communications. Finally, the author is grateful to the anonymous referee for so many valuable comments or suggestions to revise this paper. The author is supported by a grant from the National Science Foundation of China (grant NO. 12250004) and the Xplorer Prize from the New Cornerstone Science Foundation. \section{Pseudo-Eisenstein series} \label{sec pseudo} In \cite[\S 6]{YZ}, the notion of pseudo-theta series is introduced, and its crucial property in \cite[Lemma 6.1]{YZ} is the key to get a clean identity from the matching of the major terms. The goal of this section is to introduce a notion of pseudo-Eisenstein series and extend \cite[Lemma 6.1]{YZ} to a result including both pseudo-theta series and pseudo-Eisenstein series. Throughout this section, let $F$ be a totally real number field, and $\BA$ the adele ring of $F$. We will use the terminologies of \S\ref{sec notation} and \cite[\S 6.1]{YZ} freely. \subsection{Theta series and Eisenstein series} \label{sec theta eisenstein} The goal here is to recall Weil representations, theta series and Eisenstein series following \cite{YZZ} and \cite[\S 6.1]{YZ}. \subsubsection*{Weil representations} Let us first review Weil representations associated to quadratic space and quaternion algebras. We will only introduce the local case, and apply similar conventions for the global base. Our conventions are compatible with those in \cite[\S 2.1, \S2.2, \S 4.1]{YZZ}. Let $k$ be a local field. Let $(V,q)$ be a quadratic space over $k$ of even dimension. If $k=\RR$, we further assume that $(V,q)$ is positive definite. We do not consider the case $k=\CC$ in this paper. If $k$ is non-archimedean, denote by $\OCS(V\times k^{\times})=\CS(V\times k^{\times})$ the usual space of locally constant, compactly supported, and complex-valued functions on $V\times k^{\times}$. If $k=\RR$, then $\OCS(V\times k^{\times})$ is the space of functions on $V\times k^{\times} $ of the form $$\phi(x,u)=\left(P_1(uq(x))+\sgn (u)P_2(uq(x))\right)e^{-2\pi |u|q(x)},$$ where $P_1, P_2$ are any polynomials of complex coefficients, and $\sgn (u)=u/|u|$ denotes the sign of $u$. The standard Schwartz function in this case is the standard Gaussian function $$ \phi(x,u)= \begin{cases} e^{-2\pi uq(x)}, & u>0, \\ 0, & u<0. \end{cases} $$ In \cite[\S 2.1.3]{YZZ}, the Weil representation on the usual space $\CS(V)$ is extended to a representation of $\GL_2(k)\times \GO(V)$ on $\OCS(V\times k^{\times})$. Note that the actions of $\GL_2(k)$ and $\GO(V)$ commute with each other. This extension is originally from Waldspurger \cite{Wa}. As a convention, the action of $(g,h)\in \GL_2(k)\times \GO(V)$ on $\phi\in \OCS(V\times k^{\times})$ is denoted by $r(g,h)\phi$. We also write $r(g)\phi=r(g,1)\phi$ and $r(h)\phi=r(1,h)\phi$. The Weil representation behaves well under the direct sum of orthogonal spaces. Assume that there is an orthogonal decomposition $V=V_1\oplus V_2$. Assume that $\phi=\phi_1\otimes\phi_2$ for $\phi\in \overline \CS (V\times k^\times)$ and $\phi_i\in \overline \CS (V_i\times k^\times)$ (with $i=1,2$) in the sense that $$ \phi(x_1+x_2,u)=\phi_1(x_1,u)\phi_2(x_2,u), \quad x_i\in V_i,\ u\in k^\times. $$ Then the Weil representation splits as $$ r(g)\phi(x_1+x_2,u)=r(g)\phi_1(x_1,u)\,r(g)\phi_2(x_2,u), \quad g\in\gla. $$ Here $r(g)\phi, r(g)\phi_1, r(g)\phi_2$ are Weil representations corresponding to the quadratic spaces $V, V_1, V_2$ respectively. Now we consider quadratic spaces coming from quaternion algebras. Assume that $V=B$ for a quaternion algebra $B$ over $k$, and assume that $q:B\to k$ is the reduced norm. If $k=\RR$, we further assume that $B$ is the Hamiltonian algebra so that $V$ is positive definite. Let $B^\times \times B^\times $ act on $V$ by $$x\longmapsto h_1xh_2^{-1}, \qquad x\in V,\quad h_1,h_2\in B^\times.$$ This induces a natural map $$ \tau:B^\times \times B^\times \lra \GO(V). $$ For $g\in \gl(k)$ and $h_1, h_2\in B^\times$, denote $$r(g,(h_1, h_2))\phi=r(g,\tau(h_1, h_2))\phi, \quad r(h_1, h_2)\phi=r(1,\tau(h_1, h_2))\phi.$$ We refer to \cite[\S2.2]{YZZ} for the kernel and the cokernel of $\tau$, but we do not need them in this paper. Finally, let $k'$ be either $k\oplus k$ or a separable quadratic field extension of $k$. Assume that there is an embedding $k'\to B$ of $k$-algebras. There is an element $j\in B^\times$ such that $jtj^{-1}=\bar t$ for any $t\in k'$. Here $\bar t$ denotes the action on $t$ by the unique non-trivial automorphism of $k'$ over $k$. This gives an orthogonal decomposition $$V=V_1\oplus V_2, \quad V_1=k', \ V_2=k'j,$$ where $V_1, V_2$ are endowed with the induced quadratic forms. Note that $k'j$, as a subset of $B$, does not depend on the choice of $j$. Assume that $\phi=\phi_1\otimes\phi_2$ for $\phi\in \overline \CS (V\times k^\times)$ and $\phi_i\in \overline \CS (V_i\times k^\times)$ in the above sense. Then the Weil representation splits as $$ r(g, (t_1, t_2))\phi(x_1+x_2,u)=r(g, (t_1, t_2))\phi_1(x_1,u)\cdot r(g, (t_1, t_2))\phi_2(x_2,u), \quad g\in\gla, t_1, t_2\in k^\times. $$ Here $r(g, (t_1, t_2))\phi$ is defined for the quadratic space $V$ by viewing $(t_1,t_2)\in B^\times\times B^\times$, and $$r(g, (t_1, t_2))\phi_i(x_i,u)=r(g, \tau_i(t_1, t_2))\phi_i(x_i,u)$$ via the map $\tau_i: k^\times\times k^\times \to \GO(V_i)$ induced by the action $(t_1, t_2)\circ x_i=t_1 x_i t_2^{-1}$ of $k^\times\times k^\times$ on $V_i$. For convenience, we also take the convention $$ r(t_1, t_2)\phi(x,u)=r(1, (t_1, t_2))\phi(x,u), \quad r(t_1, t_2)\phi_i(x_i,u)=r(1, (t_1, t_2))\phi_i(x_i,u). $$ \subsubsection*{Theta series} Let $(V,q)$ be a positive definite quadratic space over a totally real number field $F$. Assume that $\dim V$ is even in the following, which is always satisfied in our application. Denote $V_v=V\otimes_FF_v$ for any place $v$ of $F$. Denote $V(\BA)=V\otimes_F \BA\simeq \prod_v' V_v$ for the restricted product. Let $$ \OCS(V(\adele)\times \adele^{\times}) =\otimes_v' \OCS(V_v\times F_v^{\times}) $$ be the space of Schwartz functions. Here to defined the restricted tensor product, we need a distinguished vector in $\OCS(V_v\times F_v^{\times})$ for all but finitely many $v$. For that, fix a full $O_F$-lattice $\Lambda$ of $V$. For every non-archimedean place $v$, $\Lambda_v=\Lambda\otimes_{O_F}O_{F_v}$ is a full $O_{F_v}$-lattice of $V_v$. Take the distinguished vector in $\OCS(V_v\times F_v^{\times})$ to be the characteristic function of $\Lambda_v\times O_{F_v}\cross$. By the product, we have the Weil representation of $\GL_2(\BA)\times \GO(V(\BA))$ on $\OCS(V(\adele)\times \adele^{\times})$. Note that the actions of $\GL_2(\BA)$ and $\GO(V(\BA))$ commute with each other. Take any $\phi\in \overline \CS (V(\adele)\times \BA^\times)$. There is the partial theta series $$ \theta(g,u,\phi)= \sum_{x\in V} r(g)\phi(x,u), \quad g\in\gla, \ u\in \across. $$ If $u\in F\cross$, it is invariant under the left multiplication of $\SL_2(F)$ on $g$. To get an automorphic form on $\gla$, we define \begin{equation*} \theta(g, \phi)_K =\sum_{u\in \mu_K^2\backslash F\cross} \theta(g,u, \phi) = \sum_{u\in \mu_K^2\backslash F\cross} \sum_{x\in V} r(g)\phi(x,u), \quad g\in\gla. \end{equation*} Here $\mu_K=F\cross \cap K$, and $K$ is any open compact subgroup of $\GO(V(\adele_f))$ such that $\phi_f$ is invariant under the action of $K$ by the Weil representation. The summation is well-defined and absolutely convergent. The result $\theta(g, \phi)_K$ is an automorphic form in $g\in \gl(\adele)$, and $\theta(g, r(h)\phi)_K$ is an automorphic form in $(g,h)\in \gl(\adele)\times \GO(V(\adele))$. See \cite[\S 4.1.3]{YZZ} for more details. Furthermore, if the infinite component $\phi_\infty$ is standard, i.e., for any archimedean place $v$, $$ \phi_v(x,u)= \begin{cases} e^{-2\pi uq(x)}, & u>0, \\ 0, & u<0, \end{cases} $$ then $\theta(g, \phi)_K$ is holomorphic of parallel weight $\frac 12 \dim V$. The Weil representation and theta series behaves well under direct sum of orthogonal spaces. Assume that there is an orthogonal decomposition $V=V_1\oplus V_2$ and a decomposition $\phi=\phi_1\otimes\phi_2$ for $\phi\in \overline \CS (V(\adele)\times \BA^\times)$ and $\phi_i\in \overline \CS (V_i(\adele)\times \BA^\times)$ in the above sense; i.e. $$ \phi(x_1+x_2,u)=\phi_1(x_1,u)\phi_2(x_2,u). $$ Then the splitting of the Weil representation gives a natural splitting $$ \theta(g,u,\phi)=\theta(g,u,\phi_1)\theta(g,u,\phi_2), \quad g\in\gla, \ u\in \across. $$ \subsubsection*{Eisenstein series} In the above setting of $\phi\in \overline \CS (V(\adele)\times \BA^\times)$ for a quadratic space $(V,q)$ over $F$, we can define an Eisenstein series $E(s,g, \phi)$. Then $E(s,g, \phi)$ and $\theta(g, \phi)$ are related by the Siegel--Weil formula. On the other hand, we also have Eisenstein series associated to incoherent quadratic collections in the sense of Kudla \cite{Kud}. For convenience, we introduce the notion of adelic quadratic spaces to include both cases by dropping the last condition of \cite[Definition 2.1]{Kud}. A collection $\{(\BV_v, q_v)\}_v$ of quadratic spaces $(\BV_v, q_v)$ over $F_v$ indexed by the set of places $v$ of $F$ is called \emph{adelic} if it satisfies the following conditions: \begin{enumerate}[(1)] \item There is a quadratic space $(V,q_0)$ over $F$ and a finite set $S$ of places of $F$ such that there is an isomorphism $(V_v,q_0) \to (\BV_v, q)$ for all $v\notin S$; \item For any place $v$ of $F$, the quadratic spaces $(V_v,q_0)$ and $(\BV_v, q)$ have the same dimension and the same discriminant. \end{enumerate} In that case, we obtain a quadratic space $$ (\vv, q):={\prod_v}' (\vv_v, q_v) $$ over $\BA$. Here the restricted product makes sense by condition (1). We call $(\vv, q)$ an \emph{adelic quadratic space over $\BA$}. The dimension $\dim\vv\in \ZZ$ and the quadratic character $\chi_{(\vv,q)}:F^\times\bs \across\to \BC^\times$ are defined to be those of $(V,q)$. Its Hasse invariant is defined to be $$ \epsilon(\vv,q):=\prod_v \epsilon(\vv_v,q_v). $$ We say that the adelic quadratic space $(\vv, q)$ is \emph{coherent} (resp. \emph{incoherent}) if $\epsilon(\vv,q)=1$ (resp. $\epsilon(\vv,q)=-1$). Note that $(\vv, q)$ is coherent if it is isomorphic to $(V(\BA), q_0)$ for some quadratic space $(V,q_0)$ over $F$. Let $(\vv, q)$ be an adelic quadratic space over $\BA$ which is positive definite at all archimedean places. For simplicity, we still assume that $\dim\vv$ is even. The space $$\OCS(\vv\times \adele^{\times})=\otimes_v'\OCS(\vv_v\times F_v^{\times})$$ is defined. To explain the restricted tensor, for any $v\notin S$, we have an isomorphism $V_v\to \vv_v$ as above, so the distinguished vectors of $\{\OCS(V_v\times F_v^{\times})\}_{v\notin S}$ transfer to those of $\{\OCS(\vv_v\times F_v^{\times})\}_{v\notin S}$. The Weil representation of $\GL_2(\BA)\times \GO(\vv)$ on $\OCS(\vv\times \adele^{\times})$ is defined by local products as in the coherent case. Let $\phi\in \OCS(\BV\times \across)$ be a Schwartz function. Recall the associated partial Siegel Eisenstein series \begin{eqnarray*} E(s,g, u, \phi) &=& \sum_{\gamma \in P(F)\bs \GL_2(F)} \delta (\gamma g)^s r(\gamma g)\phi(0, u) \\ &=& \sum_{\gamma \in P^1(F)\bs \SL_2(F)} \delta (\gamma g)^s r(\gamma g)\phi(0, u), \quad g\in\gla, \ u\in \BA^\times. \end{eqnarray*} Here $P^1$ (resp. $P$) denotes the algebraic subgroup of upper triangle matrices in $\SL_2$ (resp. $\GL_2$), and $\delta$ is the standard modulus function as in \cite[\S1.6.6]{YZZ}. If $u\in F\cross$, it is invariant under the left multiplication of $\SL_2(F)$ on $g$, and it has a meromorphic continuation to $s\in\BC$ and a functional equation with center $s=1-\frac{\dim V}{2}$. To get an automorphic form on $\gla$, we define \begin{equation*} E(s,g,\phi) _K =\sum_{u\in \mu_K^2\backslash F\cross} E(s,g,u, \phi), \quad g\in\gla. \end{equation*} Here as before, $\mu_K=F\cross \cap K$, and $K$ is any open compact subgroup of $\GO(\adele_f)$ such that $\phi_f$ is invariant under the action of $K$ by the Weil representation. It is easy to see that $E(s,g,\phi)_K$ is invariant under the left multiplication of $\GL_2(F)$ on $g$. See \cite[\S 4.1.4]{YZZ}. The Eisenstein series $E(s,g,u,\phi)$ has the standard Fourier expansion $$E(s,g,u,\phi)=\delta(g)^s r(g)\phi(0,u)+\sum_{a \in F} W_a(s, g,u,\phi).$$ Here the Whittaker function is given by \begin{eqnarray*} W_a(s,g,u,\phi) = \int_{\adele} \delta(wn(b)g)^s \ r(wn(b)g)\phi(0,u) \psi(-ab) db, \quad a\in F, \ u\in F\cross. \end{eqnarray*} Here $w=\matrixx{}{1}{-1}{}$ and $n(b)=\matrixx{1}{b}{}{1}$. For each place $v$ of $F$ and any $\phi_v\in \OCS(\vv_v\times \fvcross)$, we also introduce the local Whittaker function \begin{eqnarray*} W_{a,v}(s,g,u,\phi_{v}) = \int_{F_v} \delta(wn(b)g)^s \ r(wn(b)g)\phi_{v}(0,u) \psi_v(-ab) db, \quad a\in F_v, \ u\in F_v\cross. \end{eqnarray*} We have a splitting $$ E(s,g,u,\phi)=E_0(s,g,u,\phi)+E_*(s,g,u,\phi), $$ where the constant term $$ E_0(s,g,u,\phi)=\delta(g)^s r(g)\phi(0,u)+ W_0(s, g,u), $$ and the non-constant part $$ E_*(s,g,u,\phi)=\sum_{a \in F^\times} W_a(s, g,u,\phi). $$ Similarly, we have $$ E(s,g,\phi)_K=E_0(s,g, \phi)_K+E_*(s,g,\phi)_K, $$ where the constant term $$ E_0(s,g,\phi) _K =\sum_{u\in \mu_K^2\backslash F\cross} E_0(s,g,u, \phi) $$ and the non-constant part $$ E_*(s,g,\phi) _K =\sum_{u\in \mu_K^2\backslash F\cross} E_*(s,g,u, \phi). $$ If $(\BV, q)$ is coherent, then we can express $E(0, g, \phi)$ and $E(0, g, u,\phi)$ in terms of the theta series by the Siegel--Weil formula (in most convergent cases). If $(\BV, q)$ is incoherent, then there is no theta series available. However, we can still express $W_{a,v}(0, g, u, \phi_v)$ in terms of certain average of the Schwartz function $\phi_v$. See the local Siegel--Weil formula in \cite[Theorem 2.2]{YZZ}. See also the examples of incoherent Eisenstein series (for $\SL_2$ or $\wt\SL_2$) in \cite[\S2.5]{YZZ}. \subsection{Pseudo-Eisenstein series} \label{sec pseudo} Here we introduce the notion of pseudo-Eisenstein series, which is parallel to the notion of pseudo-theta series in \cite[\S 6.2]{YZ}. Note that the term ``pseudo-Eisenstein series'' is also used in the literature as an unrelated terminology. \subsubsection*{Definition} Let $(V, q)$ be an even-dimensional quadratic space over a totally real number field $F$, positive definite at all archimedean places. Let $S$ be a fixed finite set of non-archimedean place of $F$, and $$\phi^S=\otimes_{w\notin S} \phi_w \in \overline\CS(V(\adele^S)\times \adele^{S, \times})$$ be a Schwartz function with standard archimedean components. A \textit{pseudo-Eisenstein series} is a series of the form $$B^{(S)}_{\phi}(g)=\sum_{u\in \mu^2\backslash F\cross} \sum_{a\in F^\times} B_{a,S}(g,u) W_a^S(0,g,u,\phi^S), \ \quad g\in \gl(\adele).$$ We explain the notations as follows: \begin{itemize} \item $W_a^S(0,g,u,\phi^S)=\prod_{w\notin S} W_{a,w}(0,g,u,\phi_w)$ is the product of the local Whittaker functions defined before. \item $B_{a,S}(g,u)=\prod_{v\in S} B_{a,v}(g,u)$ is the product of the local terms. \item For any $v\in S$, the function $$B_{\bullet,v}(\bullet, \bullet): F_v\cross \times \gl(F_v) \times F_v\cross \rightarrow \BC$$ is locally constant. It is smooth in $g$ in the sense that there is an open compact subgroup $K_v$ of $\gl(F_v)$ such that $$B_{a,v}(g\kappa,u)=B_{a,v}(g,u), \quad \forall (a, g,u)\in F_v\cross\times \gl(F_v)\times F_v\cross,\ \kappa\in K_v.$$ It is compactly supported in $u$ in the sense that there is a compact subset $D_g$ of $F_v^\times$ depending on $g$ (but independent of $a$) such that $B_{a,v}(g,u)=0$ for any $(a,g,u)$ with $u\notin D_g$. \item $\mu$ is a subgroup of $O_F\cross$ of finite index which acts trivially on the variable $u$ of $B_{a,v}(g,u)$ and $W_{a,w}(0,g,u,\phi_w)$ for every non-archimedean $w\notin S$ and $v\in S$. \item For any $g\in \gla$, the double sum is absolutely convergent. \end{itemize} Note that $B^{(S)}_{\phi}(g)$ does not have a ``constant term'' in the sense that the summation is over $a\in F^\times$. \begin{example} Let $(\vv, q)$ be an adelic quadratic space over $\BA$ which is positive definite at infinity, and $\phi\in \OCS(\BV\times \across)$ be a Schwartz function which is standard at infinity. Consider the non-constant part $$ E_*(s, g, \phi)=\sum_{u\in \mu_K^2\backslash F\cross} \sum_{a\in F^\times} W_a(s,g,u,\phi). $$ Its derivative at $s=0$ is $$ E_*'(0, g, \phi)=\sum_{u\in \mu_K^2\backslash F\cross} \sum_{a\in F^\times} \sum_v W_{a,v}'(0,g,u,\phi)W_a^v(0,g,u,\phi^v). $$ For every non-archimedean $v$, the ``$v$-part'' $$E_*'(0, g, \phi)(v)=\sum_{u\in \mu_K^2\backslash F\cross} \sum_{a\in F^\times} W_{a,v}'(0,g,u,\phi)W_a^v(0,g,u,\phi^v)$$ is a pseudo-Eisenstein series if it is absolutely convergent. For archimedean $v$, the ``$v$-part'' is not a pseudo-Eisenstein series by our definition, but a holomorphic projection will convert it to a multiple of $E_*(0, g, \phi)$. \end{example} \subsubsection*{Non-singular pseudo-Eisenstein series} Let $B^{(S)}_{\phi}(g)$ be the pseudo-Eisenstein series associated to $(V,q)$ as above. For every $v\in S$, there are one or two quadratic spaces over $F_v$ up to isomorphism with the same dimension and the same discriminant as $(V_v, q)$. Order them by $(V_v^+, q^+)$ and $(V_v^-, q^-)$ so that their Hasse invariants $\epsilon(V_v^+, q^+)=1$ and $\epsilon(V_v^-, q^-)=-1$. If there is only one such space, which happens when $V$ is isomorphic to the 2-dimensional hyperbolic space over $F_v$, ignore the notation $V_v^-$. The pseudo-Eisenstein series $B^{(S)}_{\phi}(g)$ is called \textit{non-singular} if for every $v\in S$, there exist $\phi_v^+\in \OCS(V_v^+\times F_v^\times)$ and $\phi_v^-\in \OCS(V_v^-\times F_v^\times)$ such that $$B_{a,v}(1,u)=W_{a,v}(0,1,u,\phi_v^+)+ W_{a,v}(0,1,u,\phi_v^-), \quad \forall (a,u)\in F_v\cross\times F_v\cross.$$ We take the convention that $W_{a,v}(0,1,u,\phi_v^-)=0$ if $V_v^-$ does not exist. Note that the equality is only for $g_v=1$. Once this is true, replacing $B_{a,v}(g,u)$ by $W_{a,v}(0,g,u,\phi_v^+)+ W_{a,v}(0,g,u,\phi_v^-)$ in $B^{(S)}_{\phi}(g)$, we see that $$ B^{(S)}_{\phi}(g)=\sum_{\epsilon: S\to \{\pm\}} E_*(0,g, \phi_S^{\epsilon}\otimes \phi^S), \quad \ \forall g\in 1_S \gl(\BA^S). $$ Here $\phi_S^{\epsilon}=\otimes_{v\in S}\phi_v^{\epsilon(v)}$ is the Schwartz function associated to the adelic quadratic space $V_S^\epsilon\times V(\BA^v)$ with $V_S^{\epsilon}=\otimes_{v\in S}V_v^{\epsilon(v)}$, and $E_*(0,g, \phi_S^{\epsilon}\otimes \phi^S)$ denotes the non-constant part of the Eisenstein series $E_*(0,g, \phi_S^{\epsilon}\otimes \phi^S)$. If $V_v^-$ does not exist, take the convention that $\epsilon(v)=+$ for every $\epsilon$. This is the counterpart of the approximation formula for pseudo-theta series in \cite[\S6.2]{YZ}. Hence, it is convenience to denote $$ E_{B}(g)=E_{B^{(S)}_\phi}(g)=\sum_{\epsilon: S\to \{\pm\}} E(0,g, \phi_S^{\epsilon}\otimes \phi^S). $$ It is called \emph{the Eisenstein series associated to $B^{(S)}_{\phi}(g)$}. One can also formulate the terminology of pseudo-Eisenstein series for $\SL_2$ based on Schwartz functions in $\CS(V(\BA))$ instead of $\overline\CS(V(\BA)\times \across)$. It can be done based on principal series of $\SL_2$, which is really what an Eisenstein series needs. However, we stick with the current formulation because it fits our application. \subsubsection*{Uniqueness of the Eisenstein series} Resume the above notation for $B_\phi^{(S)}, (V_v^+, V_v^-), $ and $(\phi_v^+, \phi_v^-)$. Assume that we are in the non-singular situation, so $(\phi_v^+, \phi_v^-)$ exists for $v\in S$. The pair $(\phi_v^+, \phi_v^-)$ is generally not uniquely determined by $B_{a,v}(1,u)$. However, the following lemma asserts that the function $r(g_v)\phi_v^+(0,u)+ r(g_v)\phi_v^-(0,u)$ of $(g,u)\in \gl(F_v)\times F_v^\times$ is actually uniquely determined by $B_{a,v}(1,u)$. Thus the principal series $\sum_{\epsilon: S\to \{\pm\}} \delta(g)^s r(g)(\phi_S^{\epsilon}\otimes \phi^S)(0,u)$ used to define the Eisenstein series $E_{B^{(S)}}(s,g)$ is also uniquely determined by $(\phi_v^+, \phi_v^-)$. As a consequence, the Eisenstein series $E_{B^{(S)}}(s,g)$, as a function of $g\in \GL_2(F_v)$ and $s\in \CC$, is uniquely determined by $B_{\phi}^{(S)}(g)$. \begin{lem} \label{whittaker image1} For any pair $(\phi_v^+, \phi_v^-)$ as above, the function $$f(g,u)=r(g)\phi_v^+(0,u)+r(g)\phi_v^-(0,u), \quad (g,u)\in \gl(F_v)\times F_v^\times$$ is uniquely determined by the function $$B_{a,v}(1,u)=W_{a,v}(0,1,u,\phi_v^+)+ W_{a,v}(0,1,u,\phi_v^-), \quad (a,u)\in F_v\cross\times F_v\cross.$$ In particular, the relation gives $$r(w)\phi_v^+(0,u)+r(w)\phi_v^-(0,u) = \int_{F_v} B_{a,v}(1,u) da, \quad \ u\in F_v^\times. $$ \end{lem} \begin{proof} Recall the matrix notations $w,m(a),n(b),d(c)$ introduced in \S\ref{sec notation}. For any $c\in F_v^\times$, we have $f(d(c)g,u)=|c|^{-(\dim V_v^+)/4}f(g,c^{-1}u)$ by properties of the Weil representation. Thus $f(g,u)=r(g)\phi_v^+(0,u)+r(g)\phi_v^-(0,u)$ is determined by its restriction to $\SL_2(F_v)\times F_v^\times$. For fixed $u$, it is a principal series of $g\in \SL_2(F_v)$. Then this is a classical result closely related to Kirillov models and has nothing to do with Weil representations. In fact, we need to recover $f(g,u)$ from \begin{eqnarray*} B_{a,v}(1,u) = \int_{F_v} f(wn(b), u) \psi(-ab) db, \quad a\in F, \ u\in F\cross. \end{eqnarray*} Observe that $B_{a,v}(1,u)$ as a function of $a\in F_v$ is the Fourier transform of $f(wn(b), u)$ as a function of $b\in F_v$. Thus we can recover $f(wn(b), u)$ by the Fourier inversion formula. Then $f(m(a)n(b')wn(b), u)$ can be recovered for any $a\in F_v^\times$ and $b'\in F_v$. But the set $m(a)n(b')wn(b)$ is dense in $\SL_2(F_v)$, as can be seen from the Bruhat decomposition. This determines all values of $f(g,u)$. In particular, the Fourier inversion formula gives $$f(w,u) = \int_{F_v} B_{a,v}(1,u) da. $$ This finishes the proof. \end{proof} \subsection{The key lemma} \label{sec key lemma} The goal of this section is to prove a key result (cf. Lemma \ref{pseudo}) for pseudo-theta series and pseudo-Eisenstein series. As a preparation, we first introduce approximation of pseudo-Eisenstein series, and also recall basic notions and approximation of pseudo-theta series. \subsubsection*{Approximation by the Eisenstein series} Let $B^{(S)}_{\phi}(g)$ be a non-singular pseudo-Eisenstein series as in the last subsection. Resume the above notations for its corresponding Eisenstein series $$ E_{B}(g)=\sum_{\epsilon: S\to \{\pm\}} E(0,g, \phi_S^{\epsilon}\otimes \phi^S). $$ We already have an approximation $$ B^{(S)}_{\phi}(g)=E_{B,*}(g), \quad \ \forall g\in 1_S \gl(\BA^S). $$ Here $$ E_{B,*}(g)=\sum_{\epsilon: S\to \{\pm\}} E_*(0,g, \phi_S^{\epsilon}\otimes \phi^S) $$ is the non-constant part of $E_{B}(g)$. In the following, we will approximate the constant term $$ E_{B,0}(g)=\sum_{\epsilon: S\to \{\pm\}} E_0(0,g, \phi_S^{\epsilon}\otimes \phi^S) $$ by a function built from automorphic forms. Note that by definition, for every archimedean place $v$, the Schwartz function $\phi_v$ is standard in $\OCS(V_v\times F_v^\times)$ for the positive definite quadratic space $V_v$. By linearity, it suffices to treat a general Eisenstein series $E(0,g,\phi)_K$ associated to an adelic quadratic space $\vv$ over $\BA$ of even dimension $d$. Here we assume that at every archimedean place $v$, the quadratic space $\vv_v$ is positive definite, and the Schwartz function $\phi_v$ is standard in $\OCS(\vv_v\times F_v^\times)$ . Recall the constant term $$ E_0(0,g,\phi)_K= \sum_{u\in \mu_K^2\backslash F\cross} r(g)\phi(0,u)+ \sum_{u\in \mu_K^2\backslash F\cross} W_{0}(0,g,u,\phi). $$ We first treat the first term on the right-hand side. For any archimedean place $v$, as $\phi_v$ is standard, $r(g_v)\phi_v(0,u)$ gives the standard Whittaker function as in \cite[\S4.1.1]{YZZ}. In particular, $$ r(g_v)\phi_v(0,u)= \rho_v(g_v)^{\frac{d}{2}}\delta_v(g_v)^{\frac{d}{2}} \phi_v(0,\det(g_v)^{-1}u),\quad g_v\in \gl(F_v). $$ Here $\delta_v$ and $\rho_v$ are introduced in \S\ref{sec notation}. We claim that there is a finite set $S'$ of non-archimedean places of $F$ such that for every non-archimedean $v\notin S'$, $$ r(g_v)\phi_v(0,u)= \delta_v(g_v)^{\frac{d}{2}} \phi_v(0,\det(g_v)^{-1}u),\quad g_v\in \gl(F_v). $$ In fact, there is a quadratic space $(V,q_0)$ over $F$ such that there is an isomorphism $(V_v,q_0) \to (\BV_v, q)$ for all but finitely many places $v$. Fix a full $O_F$-lattice $\Lambda$ in $V$. Then we can take $S'$ such that for any non-archimedean $v\notin S'$, the following holds: \begin{enumerate}[(1)] \item $v$ is unramified over $\QQ$; \item the isomorphism $(V_v,q_0) \to (\BV_v, q)$ is available for $v$; \item the lattice $\Lambda_v=\Lambda\otimes_{O_F}O_{F_v}$ of $V_v$ is self-dual with respect to $\psi_v$; \item the Schwartz function $\phi_v$ is the standard characteristic function of $\Lambda_v\times O_{F_v}^\times$ via the isomorphism $(V_v,q_0) \to (\BV_v, q)$. \end{enumerate} Then the claim follows form explicit computation of Weil representation. Combining all the places together, we have $$ \sum_{u\in \mu_K^2\backslash F\cross} r(g)\phi(0,u)= \rho_\infty(g)^{\frac{d}{2}}\delta(g)^{\frac{d}{2}} \sum_{u\in \mu_K^2\backslash F\cross} \phi(0,\det(g)^{-1}u), \quad g\in 1_{S'}\gl(\adele^{S'}). $$ Note that the summation on the right-hand side is automorphic in $g\in\gla$; i.e. it is invariant under the left action of $\glf$ on $g\in\gla$. This finishes approximating the first term of on the right-hand side of $E_0(0,g,\phi)_K$. For the second term on the right-hand side, by the above result for all places $v\notin S'$, $W_{0,v}(0,g,u,\phi_v)$ is a multiple of $\delta(g_v)^{2-d} r(g)\phi_v(0,u)$, as a basic result of intertwining operators of principal series. Consequently, we have a similar approximation $$ \sum_{u\in \mu_K^2\backslash F\cross} W_{0}(0,g,u,\phi)= \rho_\infty(g)^{\frac{d}{2}}\delta(g)^{2-\frac{d}{2}} \sum_{u\in \mu_K^2\backslash F\cross} W_{0}(0,1,\det(g)^{-1}u,\phi), \quad g\in 1_{S'}\gl(\adele^{S'}). $$ The summation on the right-hand side is again automorphic in $g\in\gla$. Return to the non-singular pseudo-Eisenstein series $B^{(S)}_{\phi}(g)$. Our conclusion gives an approximation $$ B^{(S)}_{\phi}(g)=E_{B}(g)+ \rho_\infty(g)^{\frac{d}{2}}\delta(g)^{\frac{d}{2}} f_1(g) + \rho_\infty(g)^{\frac{d}{2}}\delta(g)^{2-\frac{d}{2}} f_2(g) , \quad g\in 1_{S'}\gl(\adele^{S'}). $$ Here $E_{B}(g)$ is the full Eisenstein series, and $f_1(g), f_2(g)$ are automorphic in $g\in\gla$. Both sides are invariant under the action of some (non-empty) open compact subgroup $K'_S$ of $\gl(\adele_{S'})$, so the approximation actually holds for all $g\in K'_{S'}\gl(\adele^{S'})$. \subsubsection*{Review of pseudo-theta series} For convenience, we briefly review the notion of pseudo-theta series and its approximation in \cite[\S 6.2]{YZ}. Let $V$ be a positive definite quadratic space over $F$, and $V_0\subset V_1 \subset V$ be two subspaces over $F$ with induced quadratic forms. All spaces are assumed to be even-dimensional. We allow $V_0$ to be the empty set $\emptyset$, which is not a subspace in the usual sense. Let $S$ be a finite set of non-archimedean places of $F$, and $\phi^S \in \overline\CS(V(\adele^S)\times \adele^{S \times})$ be a Schwartz function with standard infinite components. A \textit{pseudo-theta series} is a series of the form $$A^{(S)}_{\phi'}(g)=\sum_{u\in \mu^2\backslash F\cross} \sum_{x\in V_1-V_0} \phi_S'(g,x,u) r_{_V}(g)\phi^S(x,u), \ \quad g\in \gl(\adele).$$ We explain the notations as follows: \begin{itemize} \item The Weil representation $r_{_V}$ is not attached to the space $V_1$ but to the space $V$; \item $\phi_S'(g,x,u)=\prod_{v\in S} \phi'_v(g_v,x_v,u_v)$ as local product; \item For each $v\in S$, the function $$\phi'_v: \gl(F_v)\times (V_1-V_0)(F_v) \times F_v\cross \rightarrow \BC$$ is locally constant. And it is smooth in the sense that there is an open compact subgroup $K_v$ of $\gl(F_v)$ such that $$\phi'_v(g\kappa,x,u)=\phi'_v(g,x,u), \quad \forall (g,x,u)\in \gl(F_v)\times (V_1-V_0)(F_v)\times F_v\cross,\ \kappa\in K_v.$$ \item $\mu$ is a subgroup of $O_F\cross$ with finite index such that $\phi^S(x,u)$ and $\phi'_S(g,x,u)$ are invariant under the action $\alpha: (x,u)\mapsto (\alpha x, \alpha^{-2} u)$ for any $\alpha\in \mu$. This condition makes the summation well-defined. \item For any $v\in S$ and $g\in \gl(F_v)$, the support of $\phi'_v(g,\cdot,\cdot)$ in $(V_1-V_0)(F_v)\times F_v\cross$ is bounded. This condition makes the sum convergent. \end{itemize} The pseudo-theta series $A^{(S)}$ sitting on the triple $V_0\subset V_1 \subset V$ is called \textit{non-degenerate} (resp. \textit{degenerate}) if $V_1=V$ (resp. $V_1\neq V$ ). It is called \textit{non-singular} if for each $v\in S$, the local component $\phi'_v(1,x,u)$ can be extended to a Schwartz function on $V_1(F_v)\times F_v\cross$. Assume that $A^{(S)}_{\phi'}$ is non-singular. Then there are two usual theta series associated to $A^{(S)}$. View $\phi'_v(1,\cdot,\cdot)$ as a Schwartz function on $V_1(F_v)\times F_v\cross$ for each $v\in S$, and $\phi_w$ as a Schwartz function on $V_1(F_w)\times F_w\cross$ for each $w\notin S$. Then the theta series $$\theta_{A,1}(g)=\sum_{u\in \mu^2\backslash F\cross} \sum_{x\in V_1} r_{_{V_1}}(g)\phi_S'(1,x,u) r_{_{V_1}}(g)\phi^S(x,u)$$ is called \textit{the outer theta series associated to} $A^{(S)}_{\phi'}$. Note that the Weil representation $r_{V_1}$ is based on the quadratic space $V_1$. Replacing the space $V_1$ by $V_0$, we get the theta series $$\theta_{A,0}(g)=\sum_{u\in \mu^2\backslash F\cross} \sum_{x\in V_0} r_{_{V_0}}(g)\phi_S'(1,x,u) r_{_{V_0}}(g)\phi^S(x,u).$$ We call it \textit{the inner theta series associated to} $A^{(S)}_{\phi'}$. We set $\theta_{A,0}=0$ if $V_0$ is empty. We introduce these theta series because the difference between $\theta_{A,1}$ and $\theta_{A,0}$ somehow approximates $A^{(S)}$. More precisely, there exist a finite set $S'$ of non-archimedean places of $F$ and a (non-empty) open compact subgroup $K'_S$ of $\gl(\adele_{S'})$ satisfying $$ A^{(S)}_{\phi'}(g)=\rho_\infty(g)^{\frac{d-d_1}{2}}\delta(g)^{\frac{d-d_1}{2}} \theta_{A,1}(g) - \rho_\infty(g)^{\frac{d-d_0}{2}}\delta(g)^{\frac{d-d_0}{2}} \theta_{A,0}(g), \quad g\in K'_{S'}\gl(\adele^{S'}). $$ Here $d=\dim V$ and $d_i=\dim V_i$. The rough idea of the approximation is as follows. Approximate $$A^{(S)}_{\phi'}(g)=\sum_{u\in \mu^2\backslash F\cross} \sum_{x\in V_1-V_0} \phi_S'(g,x,u) r_{_V}(g)\phi^S(x,u)$$ by $$ \sum_{u\in \mu^2\backslash F\cross} \sum_{x\in V_1-V_0} r_V(g_S)\phi_S'(1,x,u) r_{_V}(g^S)\phi^S(x,u), $$ split the later as $$ \sum_{u\in \mu^2\backslash F\cross}\sum_{x\in V_1-V_0}=\sum_{u\in \mu^2\backslash F\cross}\sum_{x\in V_1}-\sum_{u\in \mu^2\backslash F\cross}\sum_{x\in V_0}, $$ and then compare the two double sums of the right-hand side with $\theta_{A,1}$ and $\theta_{A,0}$ respectively. We refer to \cite[\S6.2]{YZ} for a proof of this fact. \subsubsection*{Key lemma} The following is a generalization of \cite[Lemma 6.1(1)]{YZ} to a sum of pseudo-theta series and pseudo-Eisenstein series. There is also a generalization of \cite[Lemma 6.1(2)]{YZ}, but we omit it due to the complexity of the statement. \begin{lem}\label{pseudo} Let $\{A_\ell^{(S_\ell)}\}_\ell$ be a finite set of non-singular pseudo-theta series sitting on even-dimensional quadratic spaces $V_{\ell,0}\subset V_{\ell,1}\subset V_{\ell}$. Let $\{B_j^{(S_j')}\}_j$ be a finite set of non-singular pseudo-Eisenstein series sitting on even-dimensional quadratic spaces $V_{j}'$. Assume that the sum $$f(g)=\sum_\ell A_\ell^{(S_\ell)}(g)+ \sum_j B_j^{(S_j')}(g)$$ is automorphic for $g\in\gl(\adele)$. Then $$f(g)=\sum_{\ell\in L_{0,1}} \theta_{A_\ell,1}(g)+ \sum_j E_{B_j}(g)$$ Here $L_{0,1}$ is the set of $\ell$ such that $V_{\ell,1}=V_{\ell}$ or equivalently $A_\ell^{(S_\ell)}$ is non-degenerate. \end{lem} \begin{proof} The proof is similar to that of \cite[Lemma 6.1(1)]{YZ}, by taking extra care of the pseudo-Eisenstein series. In fact, in the equation $$f-\sum_\ell A_\ell^{(S_\ell)}-\sum_j B_j^{(S_j')}=0,$$ replace all $A_\ell^{(S_\ell)}$ and $B_j^{(S_j')}$ by their approximations described above. After recollecting these theta series according to the powers of $\rho_\infty(g)$ and $\delta(g)$, we end up with an equation of the form \begin{eqnarray*} \sum_{(k,k')\in I} \rho_\infty(g)^{k}\delta(g)^{k'} f_{k,k'}(g)=0, \quad \ \forall g\in K_{S}\gl(\adele^{S}). \end{eqnarray*} Here $I$ is a finite subset of $(k,k')\in \ZZ^2$ with $k\geq 0$ and $2|(k-k')$, $S$ is some finite set of non-archimedean places of $F$, $K_S$ is an open compact subgroup of $\gl(\adele_{S})$, and $f_{k,k'}$ is some automorphic form on $\gl(\adele)$ coming from combinations of $f$, the corresponding theta series, the corresponding Eisenstein series, and approximation of constant terms of the Eisenstein series. In particular, $$f_{0,0}=f-\sum_{\ell\in L_{0,1}} \theta_{A_\ell,1}- \sum_j E_{B_j}$$ is the term we care about. We are going to prove that $f_{k,k'}=0$ for every $(k,k')\in I$. The proof is similar to that of \cite[Lemma 6.1]{YZ}, but it has a slight complication due to the possibility that $k\neq k'$. In fact, it suffices to show $f_{k, k'}(g_0)=0$ for all $g_0\in \gl(\adele_f^{S})$, since $\gl(F)\gl(\adele_f^{S})$ is dense in $\gl(\adele)$. Fix such a $g_0\in \gl(\adele_f^{S})$. For any $g \in \gl(F)\cap K_S\gl(\adele^S)$, we have $$\sum_{(k, k')\in I}\rho_\infty(gg_0)^{k}\delta(gg_0)^{k'} f_{k, k'}(gg_0)=0,$$ and thus $$\sum_{(k, k')\in I}\rho_\infty(g)^{k}\delta(gg_0)^{k'} f_{k, k'}(g_0)=0$$ by the automorphy. These are viewed as linear equations of $(f_{k,k'}(g_0))_{(k, k')\in I}$. To show that the solutions are zero, we only need to find sufficiently many $g$ to get plenty of independent equations. We first find some special $g$ to simplify the equation. The intersection $K_S\gl(\adele^S) \cap g_0 \gl(\widehat O_F)g_0^{-1}$ is still an open compact subgroup of $\gl(\adele)$. For any $g\in \gl(F)\cap (K_S\gl(\adele^S) \cap g_0 \gl(\widehat O_F)g_0^{-1})$, we have $$gg_0=g_0 \cdot g_0^{-1} g g_0\in g_0\gl(\widehat O_F).$$ Then $\delta_f(gg_0)=\delta_f(g_0)$, and our linear equation simplifies as $$\sum_{(k, k')\in I}\rho_\infty(g)^{k}\delta_\infty(g)^{k'} \delta_f(g_0)^{k'}f_{k, k'}(g_0)=0.$$ It is reduced to find sufficiently many $g$ such that the equation implies that every term $\delta_f(g_0)^{k'}f_{k, k'}(g_0)=0$. To be more explicit, set $g=\matrixx{1}{a}{b}{1+ab}$ for $a,b\in \ZZ$. Note that the proof of \cite[Lemma 6.1]{YZ} takes a simpler matrix corresponding to $a=0$ of the current one. There is a positive integer $N$ such that if $a,b$ are divisible by $N$, then $g$ lies in the intersection $\gl(F)\cap (K_S\gl(\adele^S) \cap g_0 \gl(\widehat O_F)g_0^{-1})$ as required. The Iwasawa decomposition in $\gl(\RR)$ gives $$\matrixx{1}{a}{b}{1+ab}= \matrixx{\frac{1}{\sqrt{c}}}{\frac{a+b+a^2b}{\sqrt{c}}}{}{_{\sqrt{c}}} \matrixx{\frac{1+ab}{\sqrt{c}}}{-\frac{b}{\sqrt{c}}}{\frac{b}{\sqrt{c}}}{ \frac{1+ab}{\sqrt{c}}} ,$$ where $c=(1+ab)^2+b^2$. Then we have $$\rho_v(g)= \frac{1+ab-ib}{\sqrt{c}},\quad \delta_v(g)=\frac{1}{\sqrt{c}}$$ at every archimedean place $v$ of $F$. Thus the equation becomes $$\sum_{(k, k')\in I} \left(\frac{1+ab-ib}{\sqrt{c}}\right)^{nk} \left(\frac{1}{\sqrt{c}}\right)^{nk'} \delta_f(g_0)^{k'}f_{k, k'}(g_0)=0\quad \forall a,b\in N\ZZ.$$ Here $n=[F:\QQ]$. Taking advantage of the property $2|(k-k')$, set $k''=-(k+k')/2$. The relation becomes $$\sum_{(k, k')} (1+ab-ib)^{nk} c^{nk''} \delta_f(g_0)^{k'}f_{k, k'}(g_0)=0.$$ Note $c=(1+ab-ib)(1+ab+ib)$. So the problem is reduced to the following one. \emph{ Let $J$ be a finite subset of $\ZZ^2$. Assume that $h:J\to \CC$ is a map satisfying the equation $$\sum_{(k_1, k_2)\in J} (1+ab+ib)^{k_1}(1+ab-ib)^{k_2} h(k_1,k_2)=0, \quad \forall a,b\in N\ZZ.$$ Then $h(k_1,k_2)=0$ for every $(k_1, k_2)\in J$.} To prove the result, by multiplying both sides of the equation by $(1+ab+ib)^m(1+ab-ib)^m$ for a sufficiently large integer $m$, we can assume that $k_1\geq 0, k_2\geq0$ for every $(k_1, k_2)\in J$. Now the left-hand side of the equation is a polynomial of $a,b$, which vanishes for all $a,b\in N\ZZ$. This implies that the left-hand side is 0 as a polynomial of $a,b$, so its value is 0 for any $a,b\in\CC$. Set $(x,y)=(1+ab, ib)$. We see that $$\sum_{(k_1, k_2)\in J} (x+y)^{k_1}(x-y)^{k_2} h(k_1,k_2)=0$$ for any $(x,y)\in \CC^2$ with $y\neq 0$. Then it is an identity of polynomials of $x,y$. A further relation $(z,w)=(x+y,x-y)$ implies that $$\sum_{(k_1, k_2)\in J} z^{k_1}w^{k_2} h(k_1,k_2)=0$$ as a polynomial of $(z,w)$. It follows that $h(k_1,k_2)=0$ for every $(k_1, k_2)\in J$. This finishes the proof. \end{proof} \subsection{Example by local quaternion algebras} In the case of quaternion algebras, we are going to figure out some important class of functions $B_{a,v}(1,u)$ which make the pseudo-Eisenstein series non-singular. Let $v$ be a non-archimedean place of $F$. Let $(M_2(F_v),q)$ (resp. $(D_v,q)$) be the matrix algebra (resp. the unique quaternion division algebra) over $F_v$ with the reduced norm. Consider the map $$ \CW_v: \OCS(M_2(F_v) \times F_v^\times) \oplus \OCS(D_v\times F_v^\times) \lra C^\infty(F_v^\times \times F_v^\times) $$ given by $$ (\phi^+, \phi^-) \longmapsto W_{a,v}(0,1,u,\phi^+)+ W_{a,v}(0,1,u,\phi^-). $$ Here $C^\infty(F_v^\times \times F_v^\times)$ denotes the space of locally constant functions with complex values, and the last expression is viewed as a function of $(a,u)\in F_v^\times \times F_v^\times$. \begin{lem} \label{whittaker image2} Let $\Psi\in C^\infty(F_v^\times \times F_v^\times)$ be a linear combination of the function $1_{O_{F_v} \times O_{F_v^\times}}$ and a locally constant and compactly supported function on $F_v^\times \times F_v^\times$. Then $\Psi$ has a preimage $(\phi^+, \phi^-)$ satisfying $$\phi^+(0,u)+\phi^-(0,u)=0, \quad \forall u\in F_v\cross.$$ \end{lem} \begin{proof} The problem is immediately reduced to two cases: \begin{itemize} \item[(a)] $\Psi$ is a locally constant and compactly supported function on $F_v^\times \times F_v^\times$; \item[(b)] $\Psi=1_{O_{F_v} \times O_{F_v^\times}}$. \end{itemize} As preparation, recall that the local Siegel-Weil formula in \cite[Proposition 2.9(2)]{YZZ} gives $$ W_{a,v}(0,1,u, \phi)= \epsilon(B_v)\ |a|_v \int_{B_v^1} \phi(hx_a,u)dh, \quad a,u\in F_v^\times. $$ Here $B_v^1=\{x\in B_v:q(x)=1\}$, the pair $(B_v, \phi)$ can be either $(M_2(F_v), \phi^+)$ or $(D_v, \phi^-)$, and $x_a\in B_v$ is any element satisfying $uq(x_a)=a.$ Now we treat case (a). Note that $D_v^1$ is compact. We will actually find a preimage of the form $(0,\phi^-)$, where $\phi^-$ is invariant under the action of $D_v^1$. In fact, the local Siegel--Weil formula gives $$ \phi^-(x,u)=-\frac{1}{\vol(D_v^1)|uq(x)|_v}W_{uq(x),v}(0,1,u, \phi^-) =-\frac{1}{\vol(D_v^1) |uq(x)|_v}\Psi(uq(x),u). $$ It is a Schwartz function since $\Psi(a,u)$ is assumed to be compactly supported in $a$. It is also clear that $\phi^-(0,u)=0$ for any $u\in F_v\cross$. For case (b), the local Siegel--Weil formula gives $$ W_{a,v}(0,1,u, 1_{O_{D_v}\times \ofv\cross}) =-|d_v|^{\frac{3}{2}} N_v^{-1}(1+N_v^{-1})\cdot |a|_v\cdot 1_{O_{F_v}\times O_{F_v^\times}}(a,u). $$ Here $O_{D_v}$ denotes the maximal order of $D_v$, $d_v\in F_v$ is the local different of $F$ over $\BQ$, and $\vol(D_v^1)=|d_v|^{\frac{3}{2}}N_v^{-1}(1+N_v^{-1})$ as normalized in \cite[\S1.6.2]{YZZ}. On the other hand, $$ W_{a,v}(0,1,u, 1_{M_2(O_{F_v})\times \ofv\cross}) =|a|_v\cdot \vol(\SL_2(O_{F_v})) \cdot |\SL_2(O_{F_v})\bs M_2(O_{F_v})(a)|\cdot 1_{O_{F_v}\times O_{F_v^\times}}(a,u). $$ Here $M_2(O_{F_v})(a)$ denotes matrices in $M_2(O_{F_v})$ of determinant $a$. Set $r=v(a)\geq 0$, and denote $$ M_2(\ofv)_r=\{x \in M_2(\ofv)_r: v(\det(x))=r\}. $$ Then we have $$ \SL_2(O_{F_v})\bs M_2(O_{F_v})(a)=\GL_2(O_{F_v})\bs M_2(O_{F_v})_r. $$ The last coset corresponds exactly to the classical Hecke correspondence $T(p_v^r)$, and its order is just $1+N_v+\cdots+ N_v^r$. Combine $\vol(\SL_2(O_k))=|d_v|^{\frac{3}{2}}(1-N_v^{-2})$ as normalized in \cite[\S1.6.2]{YZZ}. We end up with $$ W_{a,v}(0,1,u, 1_{M_2(O_{F_v})\times \ofv\cross}) =|d_v|^{\frac{3}{2}} N_v^{-1}(1+N_v^{-1})\cdot (N_v-|a|_v)\cdot 1_{O_{F_v}\times O_{F_v^\times}}(a,u). $$ The linear combination of these two expressions gives a preimage $$ \phi^+= |d_v|^{-\frac{3}{2}} (1+N_v^{-1})^{-1}\cdot 1_{M_2(O_{F_v})\times \ofv\cross}, \quad\ \phi^-=-|d_v|^{-\frac{3}{2}} (1+N_v^{-1})^{-1}\cdot 1_{O_{D_v}\times \ofv\cross}. $$ It is clear that $\phi^+(0,u)+\phi^-(0,u)=0$ for any $u\in F_v\cross$ in this case. \end{proof} \begin{remark} In part (2), the result $\phi^+(0,u)+\phi^-(0,u)=0$ in case (b) is not as random as what our computational proof suggests. In fact, we claim that for any image $$\Psi(a,u)=W_{a,v}(0,1,u,\phi^+)+ W_{a,v}(0,1,u,\phi^-),$$ if $\Psi$ can be extended to a locally constant and compactly supported function on $F_v \times F_v^\times$ (instead of the more restrictive $F_v^\times \times F_v^\times$), then $\phi^+(0,u)+\phi^-(0,u)=0$. For a proof, for $b\in F_v^\times$, set $$ g=n(b)m(-b)wn(b)=\matrixx{1}{}{b^{-1}}{1}. $$ The right hand side goes to 1 as the valuation $v(b)\to -\infty$. We have $$ r(g)\phi^+(0,u)+r(g)\phi^-(0,u) =|b|_v^2\cdot \big( r(wn(b))\phi^+(0,u)+r(wn(b))\phi^-(0,u) \big). $$ Note that $r(wn(b))\phi^+(0,u)+r(wn(b))\phi^-(0,u)$ is the Fourier transform of $\Psi(a,u)$, so it is also a locally constant and compactly supported function in $b\in F_v$. In particular, it is zero if $v(b)$ is sufficiently negative. This proves $\phi^+(0,u)+\phi^-(0,u)=0$. \end{remark} \section{Derivative series} \label{sec derivative series} The goal of this section is to study the holomorphic projection of the derivative of some mixed Eisenstein--theta series. This section is based on \cite[\S7]{YZ} and \cite[\S 6]{YZZ}, but the situation is more complicated since we do not have \cite[Assumption 7.1]{YZ} or equivalently \cite[Assumption 5.4]{YZZ}. \subsection{Derivative series} \label{sec 3.1} Let $F$ be a totally real field, and $E$ be a totally imaginary quadratic extension of $F$. Denote by $\BA=\BA_F$ the ring of adeles of $F$. Let $\BB$ be a totally definite incoherent quaternion algebra over $\BA$ with an embedding $E_\BA\to \BB$ of $\BA$-algebras. Fix a Schwartz function $\phi\in \ol\CS (\BB\times \BA^\times)$ invariant under $U\times U$ for some open compact subgroup $U$ of $\bfcross$. Start with the mixed theta-Eisenstein series \begin{equation*} I(s, g, \phi)_U = \sumu \sum_{\gamma \in P^1(F)\bs \SL_2(F)} \delta (\gamma g)^s \sum_{x_1\in E} r(\gamma g)\phi (x_1, u), \quad g\in\gla. \end{equation*} It was first introduced in \cite[\S 5.1.1]{YZZ}. There exists an element $\fj\in \BB^\times$ such that $\BB=E_\BA+E_\BA\fj$ is an orthogonal decomposition. This follows from the local version described in \S\ref{sec theta eisenstein}. Assume further that $\phi=\phi_1\otimes \phi_2$ for $\phi_1\in \ol\CS (E_\BA\times \BA^\times)$ and $\phi_2\in\ol\CS (E_\BA\fj\times \BA^\times)$ in the style of \S\ref{sec theta eisenstein}; i.e. $$ \phi(x_1+x_2,u)=\phi_1(x_1,u)\phi_1(x_2,u), \quad x_1\in E_\BA,\ x_2\in E_\BA\fj,\ u\in \across. $$ Then the splitting of the Weil representation gives a splitting \begin{equation*} I(s, g, \phi)_U=\sum_{u\in\mu_U^2\bs F\cross} \theta(g, u, \phi_1)\ E(s,g, u, \phi_2), \end{equation*} where for any $g\in\gla$, the theta series and the Eisenstein series are given by \begin{eqnarray*} \theta(g, u, \phi_1)&=& \sum_{x_1\in E} r(g)\phi_1(x_1, u), \\ E(s,g, u, \phi_2)&=& \sum_{\gamma \in P^1(F)\bs \SL_2(F)} \delta (\gamma g)^s r(\gamma g)\phi_2(0, u). \end{eqnarray*} The derivative series $\Pr I'(0,g,\phi)$ is the holomorphic projection of the derivative $I'(0,g,\phi)$ of $I(s,g,\phi)$. We will start with some general results about the holomorphic projection. \subsubsection*{Holomorphic projection} Recall that the holomorphic projection is the orthogonal projection $$\pr: \CA(\gl(\adele), \omega)\longrightarrow \CA_0^{(2)}(\gl(\adele), \omega)$$ with respect to the Petersson inner product. Here $\omega:F^\times\bs\across\to \BC^\times$ is a Hecke character with trivial archimedean components, $\CA(\gl(\adele), \omega)$ is the space of automorphic forms of central character $\omega$, and $\CA_0^{(2)}(\gl(\adele), \omega)$ is the subspace of holomorphic cusp forms of parallel weight two. It induces a projection $$\pr: \bigoplus_\omega\CA(\gl(\adele), \omega)\longrightarrow \bigoplus_\omega\CA_0^{(2)}(\gl(\adele), \omega).$$ As in \cite[\S7.1]{YZ}, by decomposing $I'(0,g,\phi)$ into a finite direct sum of automorphic forms with (distinct) central characters, we see that $ I'(0,g,\phi)$ lies in $\oplus_\omega\CA(\gl(\adele), \omega)$. Thus the holomorphic projection $\pr I'(0,g,\phi)$ is a well-defined holomorphic cusp form of parallel weight two in $g\in\gla$. We are still going to apply the formula in \cite[Proposition 6.12]{YZZ} to compute $\pr I'(0,g,\phi)$. To recall \cite[Proposition 6.12]{YZZ}, we start with the operator $\pr'$ defined right after the proposition. For convenience, we first introduce the corresponding operator $\pr'_\psi $ for Whittaker functions. For any (Whittaker) function $\alpha:\gl(\RR)\to \BC$ with $\alpha(n(b)g)=\psi(b)\alpha(g)$ for any $b\in \RR$ and $g\in\gl(\RR)$, define $$(\pr'_\psi \alpha)(g):=4\pi W^{(2)}(g)\cdot \quasilim \int_{Z(\RR)N(\RR)\bs \GL_2(\RR)} \delta(h)^s \alpha(h)\overline{W^{(2)}(h)}dh, $$ if the right-hand side is convergent. Here $W^{(2)}(g)$ is the standard Whittaker function of weight two as in \cite[\S4.1.1]{YZZ}, and $\quasilim$ is the constant term in the Laurent expansion at $s=0$. The definition extends to global Whittaker functions $\alpha:\gl(\adele)\to \BC$ by $$(\pr'_\psi \alpha)(g)=(4\pi)^{[F:\QQ]}W_\infty^{(2)}(g_\infty)\cdot \quasilim \int_{Z(F_\infty)N(F_\infty)\bs \GL_2(F_\infty)} \delta(h)^s \alpha(g_fh)\overline{W^{(2)}(h)}dh $$ if it is convergent. For any function $f:\gl(\adele)\to \BC$, we first take the Whittaker function $$ f_\psi(g)=\int_{N(F)\bs N(\adele)} f(n(b)g)\psi(-b)db, $$ and set $$ (\pr'f)(g)=\sum_{a\in F^\times} (\pr'_\psi f_\psi)(d^*(a)g), $$ if both are convergent in suitable sense. Here $d^*(a)=\matrixx{a}{}{}{1}$ is as in \S\ref{sec notation}. Finally, \cite[Proposition 6.12]{YZZ} asserts that if $f$ is an automorphic form satisfying certain growth condition, then $$ \pr f=\pr'f. $$ In other words, the above formula really computes the holomorphic projection of $f$. Go back to $\pr I'(0,g,\phi)$. In our previous works, \cite[Assumption 7.1]{YZ} or \cite[Assumption 5.4]{YZZ} makes $I'(0,g,\phi)$ satisfy the growth condition of \cite[Proposition 6.12]{YZZ}, but we do not make the assumption here, and we will see that the growth condition is not satisfied after dropping the assumption. Then the final result has an extra term contributed by the growth of $I'(0,g,\phi)$, as remarked in \cite[\S6.4.3]{YZZ}. To track the growth of $I'(0,g,\phi)$, we are going to apply \cite[Lemma 6.13]{YZZ}. Then we recall the absolute constant term $$I_{00}(s, g, \phi)=\sumu I_{00}(s, g, u, \phi),$$ where $$I_{00}(s, g, u, \phi)= \theta_0(g,u,\phi_1) E_0(s,g,u,\phi_2)$$ is the product of the constant terms. It follows that $$I_{00}(0, g, u, \phi)= \delta(g)^s r(g)\phi(0,u)+r(g)\phi_1(0,u) W_0(s, g,u,\phi_2)$$ is the sum of two principal series (by restricting to $\SL_2(\BA)$). For any $g'=\matrixx{a}{b}{}{d}$ with $a,d\in\AA^\times, b\in \AA$, we have $$ \delta(g'g)^s r(g'g)\phi(0,u)=\left|\frac{a}{d}\right|_\AA^{\frac s2+2}\delta(g)^s r(g)\phi(0,(ad)^{-1}u) $$ and $$ r(g'g)\phi_1(0,u) W_0(s,g'g,u,\phi_2) =\left|\frac{a}{d}\right|_\AA^{-\frac s2+2} r(g)\phi_1(0,(ad)^{-1}u) W_0(s,g,(ad)^{-1}u,\phi_2). $$ Here $|c|_\AA=\prod_v |c_v|_v$ for $c\in \across$. Let $\CJ(s, g, u, \phi)$ be the Eisenstein series formed by $I_{00}(s, g, u,\phi)$; i.e. $$\CJ(s, g, u, \phi)=\sum _{\gamma \in P^1(F)\bs \SL_2(F)}I_{00}(s, \gamma g, u,\phi).$$ Denote $$\CJ(s, g, \phi)_U=\sumu \CJ(s, g, u, \phi),$$ which will also be abbreviated as $\CJ(s, g, \phi)$. By \cite[Lemma 6.13]{YZZ}, the difference $$I'(0,g,\phi)_U-I_{00}'(0,g,\phi)_U$$ satisfies the growth condition of \cite[Proposition 6.12]{YZZ}. Note that the original statement is about the twisting of the difference by some character $\chi$, but a similar proof by decomposing the difference in terms of central characters works for the current situation. A similar argument proves that $$\CJ'(0,g,\phi)_U-I_{00}'(0,g,\phi)_U$$ also satisfies the growth condition. As a consequence, $$I'(0,g,\phi)_U-\CJ'(0,g,\phi)_U$$ satisfies the growth condition. Therefore, we have \begin{multline*} \pr(I'(0, g,\phi))=\pr(I'(0, g,\phi)-\CJ'(0, g, \phi)) \\ =\pr'(I'(0, g,\phi)-\CJ'(0, g, \phi)) =\pr'(I'(0, g,\phi))-\pr'(\CJ'(0, g, \phi)), \end{multline*} where the operator $\pr'$ is defined by the algorithm as recalled above. The term $\pr'(I'(0, g,\phi))$ is computed exactly as in \cite[Theorem 7.2]{YZ}. For its importance, we summarize the result in the following, and we will recall the notations after the statement. \begin{thm} \label{analytic series} Assume that $\phi$ is standard at infinity. Then $$ \pr I'(0, g,\phi)_U =\pr' I'(0, g,\phi)_U-\pr'\CJ'(0, g, \phi)_U, $$ where $\pr' I'(0, g,\phi)_U$ has the same expression as that of $\pr I'(0, g,\phi)_U$ in \cite[Theorem 7.2]{YZ}. Namely, \begin{align*} \pr' I'(0,g,\phi)_U =& -\sum_{v|\infty}\overline {I'}(0,g,\phi)(v)-\sum_{v\nmid\infty \ \nonsplit}I'(0,g,\phi)(v)\\ & -c_1 \sumu \sum_{y\in E^\times} r(g)\phi(y,u) -\sum_{v\nmid\infty} \sumu\sum_{y\in E^\times} c_{\phi_v}(g,y,u)\, r(g)\phi^v(y,u)\\ &+ \sumu \sum_{y\in E^\times} (2\log \delta_f(g_f)+ \log|uq(y)|_f)\ r(g)\phi(y,u), \end{align*} and the right-hand side is explained in the following. \begin{itemize} \item[(1)] For any archimedean $v$, \begin{align*} \overline {I'}(0,g,\phi)(v) &= 2 \barint_{C_U} \overline \CK^{(v)}_{\phi}(g,(t,t))dt, \\ \overline \CK^{(v)}_{\phi}(g,(t_1,t_2)) &= w_U \sum_{a\in F\cross} \quasilim \sum_{y\in \mu_U\backslash (B(v)_+\cross -E\cross)} r(g,(t_1,t_2)) \phi(y)_a\ k_{v, s}(y), \\ k_{v, s}(y) &= \frac{\Gamma(s+1)}{2(4\pi)^{s}} \int_1^{\infty} \frac{1}{t(1-\lambda(y)t)^{s+1}} dt, \end{align*} where $\lambda(y)=q(y_2)/q(y)$ is viewed as an element of $F_v$. \item[(2)] For any non-archimedean $v$ which is nonsplit in $E$, \begin{align*} I'(0,g,\phi)(v) &= 2 \barint_{C_U} \CK^{(v)}_{\phi}(g,(t,t))dt, \\ \CK^{(v)}_{\phi}(g,(t_1,t_2)) &= \sum_{u\in \mu_U^2\backslash F\cross} \sum_{y\in B(v)-E} k_{r(t_1,t_2)\phi_v}(g,y,u) r(g,(t_1,t_2)) \phi^v(y,u), \\ k_{\phi_v}(g,y,u)&= \frac{L(1,\eta_v)}{\mathrm{vol}(E_v^1)} r(g) \phi_{1,v}(y_1,u) {W_{uq(y_2),v}^\circ}'(0,g,u,\phi_{2,v}), \quad y_2\neq 0. \end{align*} Here the last identity holds under the relation $\phi_v= \phi_{1,v}\otimes \phi_{2,v}$, and the definition extends by linearity to general $\phi_v$. \item[(3)] The constant $$ c_1 =2\frac{L'_f(0,\eta)}{L_f(0,\eta)} +\log|d_E/d_F|.$$ \item[(4)] Under the relation $\phi_v= \phi_{1,v}\otimes \phi_{2,v}$, for $y\in E_v$ and $u\in \fvcross$, $$c_{\phi_v}(g,y,u)=r(g)\phi_{1,v}(y,u) W_{0,v}^{\circ}\ '(0,g,u, \phi_{2,v}) + \log \delta(g_v)r(g)\phi_v(y,u).$$ The definition extends by linearity to general $\phi_v$. \end{itemize} \end{thm} All the notations of the theorem are compatible with those of \cite{YZZ,YZ}. For convenience of readers, we recall them in the following. \begin{enumerate}[(a)] \item In (1) and (2), the set $C_U=E\cross\bs E^\times_{\af} / (U\cap E^\times_{\af})$ is a finite abelian group, and the averaged integration $\ds\barint_{C_U}$ is just the usual average over this finite group. \item For any place $v$ of $F$ nonsplit in $E$, $B(v)$ denotes the nearby quaternion algebra over $F$ obtained by switching the Hasse invariant of $\BB$ at $v$. Fix an embedding $E\to B(v)$ of $F$-algebras. For any place $w\neq v$, fix an isomorphism $i_w:B(v)_w\simeq \BB_w$ compatible with the embeddings $E\to B(v)$ and $E_\AA\to \BB$. This gives identifications $B(v)_{\AA^v}\simeq \BB^v$ and $\OCS(B(v)_{\AA^v}\times (\AA^v)^\times)\simeq \OCS(\bb^v\times (\AA^v)^\times). $ Thus we can view $\phi^v$ as an element of $\OCS(\bb^v\times (\AA^v)^\times)$. These identifications also appear in \S\ref{choices}. \item The Weil representation $r(g,(t_1,t_2))$ takes the convention of the local case in \S\ref{sec theta eisenstein}. Moreover, in (1), we further take the convention $$r(g,(t_1,t_2)) \phi(y)_a=r(g^v,(t_1^v,t_2^v)) \phi^v(y, aq(y)^{-1})\, W^{(2)}_{a_v}(g_v).$$ Here $W^{(2)}_{a_v}(g_v)$ is the standard holomorphic Whittaker function of weight two as in \cite[\S4.1.1]{YZZ}. \item In (1), $B(v)_+^\times$ denotes the subgroup of elements of $B(v)$ with totally positive norms. \item In (1) and (2), with the fixed embedding $E\to B(v)$, and we have an orthogonal decomposition $B(v)=E+E j(v)$. Under the decomposition, we can write $y=y_1+y_2$ for $y\in B(v)$. This explains $y_1, y_2$ (depending on $y$), and also explains $\lambda(y)=q(y_2)/q(y)$. \item In (2) and (4), the term ${W_{a,v}^\circ}(s,g,u,\phi_{2,v})$ is normalized as in \cite[\S7.1]{YZ}. Namely, for $a\in F_v\cross$, define \begin{eqnarray*} W_{a,v}^{\circ}(s, g,u,\phi_{2,v}) = \gamma_{u,v}^{-1} W_{a,v}(s, g,u,\phi_{2,v}). \end{eqnarray*} Here $\gamma_{u,v}$ is the Weil index of $(E_v\jv, uq)$, where $\BB_v=E_v+E_v\jv$ is an orthogonal decomposition. For $a=0$, define \begin{eqnarray*} W_{0,v}^{\circ}(s, g,u,\phi_{2,v}) = \gamma_{u,v}^{-1}\frac{L(s+1,\eta_v)}{L(s,\eta_v)} |D_v|^{-\frac{1}{2}}|d_v|^{-\frac{1}{2}} W_{0,v}(s, g,u,\phi_{2,v}). \end{eqnarray*} Here as in \S\ref{sec notation}, $d_v\in F_v$ is the local different of $F$ over $\BQ$, and $D_v\in F_v$ is the discriminant of the quadratic extension $E_v$ of $F_v$. \end{enumerate} \subsubsection*{Contribution of the Eisenstein series} Now we compute the term $\pr'\CJ'(0, g, \phi)$. For any $\phi \in\overline\CS(\BB\times \BA^\times)$ of the form $\phi=\phi_1\otimes \phi_2$, $$I_{00}'(0, g, u, \phi)= \log\delta(g)\ r(g)\phi(0,u)+r(g)\phi_1(0,u) W_0'(0, g,u,\phi_2).$$ Recall that the computation in \cite[Proposition 6.7]{YZZ} or that in \cite[\S7.1, p. 586]{YZ} gives \begin{align*} W_0'(0,g,u,\phi_2) = -c_0 r(g)\phi_2(0,u) - \sum_v r(g^v)\phi_{2}^v(0,u) W_{0,v}^{\circ}\ '(0,g_v,u,\phi_{2,v}) \end{align*} with the constant $$c_0=\der\left(\log \frac{L(s,\eta)}{L(s+1,\eta)}\right)=2\frac{L'(0,\eta)}{L(0,\eta)} +\log|d_E/d_F|.$$ Here $L(s,\eta)$ is the completed L-function with gamma factors, and we have used the functional equation $$ L(1-s,\eta)=|d_E/d_F|^{s-\frac12} L(s,\eta). $$ It follows that \begin{eqnarray*} && I_{00}'(0, g, u, \phi) \\ &=& \log\delta(g)\cdot r(g)\phi(0,u)- c_0 r(g)\phi(0,u) -\sum_v r(g^v)\phi^{v}(0,u)\cdot r(g_v)\phi_{1,v}(0,u) W_{0,v}^{\circ}\ '(0,g_v,u,\phi_{2,v})\\ &=& 2\log\delta(g)\cdot r(g)\phi(0,u)- c_0 r(g)\phi(0,u) -\sum_v r(g^v)\phi^{v}(0,u)c_{\phi_v}(g,0,u), \end{eqnarray*} where as in Theorem \ref{analytic series}(4), for $y\in E_v$ (including the case $y=0$) and $g\in \gl(F_v)$, \begin{eqnarray*} c_{\phi_v}(g,y,u)&=& r(g)\phi_{1,v}(y,u) W_{0,v}^{\circ}\, '(0,g,u,\phi_{2,v}) + \log \delta(g_v)r(g)\phi_v(y,u) \\ &=& r(g)\phi_{1,v}(y,u) \big( W_{0,v}^{\circ}\, '(0,g,u,\phi_{2,v}) + \log \delta(g_v)r(g)\phi_{2,v}(0,u) \big). \end{eqnarray*} Here the sum on $v$ is actually a finite sum, since $c_{\phi_v}(g,y,u)=0$ for all archimedean $v$ and for all but finitely many non-archimedean $v$ (uniformly in $(g,y,u)$) by \cite[Proposition 6.1(1)]{YZZ} and \cite[Lemma 7.6(1)]{YZ}. One checks that $c_{\phi_v}(g,0,u)$ is a principal series in the sense that $$ c_{\phi_v}(m(a)n(b)g,0,u)=|a|_v^{2}\ c_{\phi_v}(g,0,u), \quad a\in F_v^\times, \ b\in F_v. $$ This is a consequence of the basic fact $$ W_{0,v}^{\circ}(s,m(a)n(b)g,u,\phi_{2,v})=|a|_v^{1-s}\eta_v(a) W_{0,v}^{\circ}(s,g,u,\phi_{2,v}) $$ and the result $$ W_{0,v}^{\circ}(0,g,u,\phi_{2,v})=r(g)\phi_{2,v}(0,u) $$ of \cite[Proposition 6.1]{YZZ}. Then we introduce Eisenstein series \begin{align*} E(s, g, u, \phi)=&\sum _{\gamma \in P^1(F)\bs \SL_2(F)} \delta(\gamma g)^s r(\gamma g)\phi(0,u),\\ C(s, g, u, \phi)(v)=& \sum _{\gamma \in P^1(F)\bs \SL_2(F)} \delta(\gamma g)^s c_{\phi_v}(\gamma g,0,u)\ r(\gamma g^v)\phi^v(0,u), \end{align*} and \begin{align*} E(s, g, \phi)_U=&\sumu E(s, g, u,\phi),\\ C(s, g, \phi)_U(v)=&\sumu C(s, g, u,\phi)(v). \end{align*} Denote also \begin{align*} C(s, g, u, \phi)=& \sum_{v\nmid\infty} C(s, g, u, \phi)(v),\\ C(s, g, \phi)_U=& \sum_{v\nmid\infty} C(s, g,\phi)_U(v). \end{align*} Note that both summations have only finitely many nonzero terms since $c_{\phi_v}(g,0,u)=0$ for all but finitely many $v$ as mentioned above. We will usually supress the sub-index $U$ in $E(s, g, \phi)_U$ and $C(s, g, \phi)_U$. Return to the derivative $$\CJ'(0,g,\phi)=\sumu \CJ'(0,g,u,\phi)$$ with $$ \CJ'(0,g,u,\phi)=\sum_{\gamma \in P^1(F)\bs \SL_2(F)} I_{00}'(0, g, u, \phi). $$ Then the Eisenstein series constructed based on \begin{eqnarray*} I_{00}'(0, g, u, \phi) = 2\log\delta(g)\cdot r(g)\phi(0,u)- c_0 r(g)\phi(0,u) -\sum_v r(g^v)\phi^{v}(0,u)c_{\phi_v}(g,0,u) \end{eqnarray*} is given by \begin{align*} \CJ'(0,g,\phi) = 2 E'(0,g,\phi)-c_0 E(0,g,\phi)- C(0, g,\phi). \end{align*} Applying the formula for $\pr'$, we have the following expression. \begin{pro} \label{analytic series extra} Assume that $\phi$ is standard at infinity. Then $$ \pr'\CJ'(0, g, \phi) =- (c_0+(1+\log 4)[F:\QQ] ) E_*(0,g,\phi)- C_*(0, g,\phi) +2\sum_{v\nmid\infty} E'(0,g,\phi)(v).$$ Here $E_*$ and $C_*$ are the non-constant parts of the Eisenstein series $E$ and $C$. For any $v\nmid \infty$, $$ E'(0,g,\phi)(v) =\sumu E'(0,g,u,\phi)(v),$$ where $$ E'(0,g,u,\phi)(v) =\sum_{a\in F^\times} W_a^v(0,g,a^{-1}u,\phi^v) \left(W_{a,v}'(0,g,a^{-1}u,\phi_v)-\frac12\log|a|_v\cdot W_{a,v}(0,g,a^{-1}u,\phi_v)\right).$$ \end{pro} \begin{proof} Recall the non-constant part $$ E_*(s, g, \phi)=\sumu E_*(s, g, u,\phi). $$ Recall the Fourier expansions $$ E_*(s, g, u,\phi)=\sum_{a\in F^\times} W_a(s, g, u,\phi) $$ and $$ E_*(s, g,\phi)=\sum_{a\in F^\times} W_a(s, g, \phi). $$ We can recover $E_*(s, g, \phi)$ from the Whittaker functions at $a=1$ by $$ E_*(s, g,\phi)= \sum_{a\in F^\times} W_1(s, d^*(a)g,\phi) =\sumu \sum_{a\in F^\times} W_1(s, d^*(a)g, u,\phi). $$ By linearity, $$ \pr'\CJ'(0, g, \phi) =2 \pr'E'(0,g,\phi)-c_0 \pr' E(0,g,\phi)- \pr' C(0, g,\phi) $$ Note that the Whittaker function $W_1(0,g,u,\phi)$ of $E(0,g,u,\phi)$ is a product of local Whittaker functions, and the local component $W_{1,v}(0,g,u,\phi)$ at every archimedean $v$ is a multiple of the standard Whittaker function $W^{(2)}$ of weight two. Then the Whittaker function $W_1(0,g,\phi)$ of $E(0,g,\phi)$ is still a product of the standard Whittaker functions at archimedean places with a finite part. Then as in the proof of \cite[Proposition 6.12]{YZZ}, an explicit calculation shows that the holomorphic projection $\pr'$ does not change $W_1(0,g, \phi)$. As a consequence, $$ \pr' E(0,g,\phi)= E_*(0,g,\phi). $$ Similarly, $$ \pr' C(0,g,\phi)= C_*(0,g,\phi). $$ For $\pr' E'(0,g,\phi)$, start with the Whittaker function $$ W_1'(0,g,u,\phi)=\sumu W_1'(0,g,u,\phi) =\sumu \sum_{v} W_{1,v}'(0,g,u,\phi_v) W_1^v(0,g,u,\phi^v). $$ Then it amounts to apply $\pr'_\psi $ to $W_{1,v}'(0,g,u,\phi_v) W_1^v(0,g,u,\phi)$ for each place $v$ of $F$. If $v|\infty$, then $$\pr'_\psi W_{1,v}'(0,g,u,\phi_v)=c_3 W_{1,v}(0,g,u,\phi_v)$$ for some constant $c_3$. It follows that $$ \pr'_\psi \left(W_{1,v}'(0,g,u,\phi_v) W_1^v(0,g,u,\phi^v) \right) =c_3 W_{1}(0,g,u,\phi). $$ Recovering its contribution to the whole series, we get $$ c_3 E_*(0,g,\phi)=c_3 \sumu E_*(0,g,u,\phi). $$ Furthermore, the constant $c_3=-\frac 12(1+\log 4)$ is computed in Lemma \ref{constant by holomorphic projection} below. If $v\nmid \infty$, then $\pr'_\psi $ does not change $W_{1,v}'(0,g,u,\phi_v) W_1^v(0,g,u,\phi^v)$ since it is already holomorphic. However, when getting back to the whole series, its contribution is $$ \sum_{a\in F^\times} W_{1,v}'(0,d^*(a)g,u,\phi_v) W_1^v(0,d^*(a)g,u,\phi^v). $$ Apply the basic result $$ W_{1,v'}(s,d^*(a)g,u,\phi_{v'}) =|a|_{v'}^{-\frac s2}W_{a,v'}(s,g,a^{-1}u,\phi_{v'}), $$ which can be verified using $wn(b)d^*(a)=d(a)wn(a^{-1}b)$. We have $$ W_1^v(0,d^*(a)g,u,\phi^v) =W_a^v(0,g,a^{-1}u,\phi^v), $$ and $$ W_{1,v}'(0,d^*(a)g,u,\phi_v) =W_{a,v}'(0,g,a^{-1}u,\phi_v)-\frac12\log|a|_v\cdot W_{a,v}(0,g,a^{-1}u,\phi_v). $$ Then the result follows. \end{proof} \subsection{Choice of the Schwartz function} \label{choices} To make further explicit local computations, we need to put conditions on $(F,E,\BB, U)$ and specify the Schwartz function $\phi$. We will see that our choices are slightly different from that of \cite[\S7.2]{YZ}. Throughout this paper, we also assume the basic conditions, and assume part of the restrictive conditions from time to time. \subsubsection*{Basic conditions} Start with the setup of Theorem \ref{main} and Theorem \ref{height CM}. Let $F$ be a totally real field, and $E$ be a totally imaginary quadratic extension of $F$. Let $\BB$ be a totally definite incoherent quaternion algebra over $\BA=\BA_F$ with an embedding $E_\BA\to \BB$ of $\BA$-algebras. Then the ramification set $\Sigma$ of $\BB$ is of order cardinality and contains all the archimedean places of $F$. Fix a maximal order $O_{\BB_f}=\prod_{v\nmid\infty} O_{\BB_v}$ of $\BB_f=\BB\otimes_\AA\af$ invariant under the main involution of $\BB_f$. This gives a maximal open compact subgroup $O_{\BB_f}^\times$ of $\bfcross$. Let $U=\prod_{v\nmid\infty} U_v$ be an open compact subgroup of $O_{\BB_f}^\times$. For any $v\nmid\infty$, fix a nonzero element $\jv\in O_{\bb_v}$ orthogonal to $E_v$ such that $v(q(\mathfrak{j}_v))$ is non-negative and minimal; i.e., $v(q(\mathfrak{j}_v))\in\{0, 1\}$. Then $v(q(\mathfrak{j}_v))=1$ if and only if $\bv$ is nonsplit (and thus $E_v/F_v$ is inert by assumption). The existence of $\jv$ is basic and verified in \cite[\S7.2]{YZ}. For $v\mid \infty$, fix an nonzero element $\jv\in \bb_v$ orthogonal to $E_v$. Then we have an element $\fj=(\jv)_v$ of $\BB$, which gives an orthogonal decomposition $\BB=E_\AA+E_\AA\fj$. For any place $v$ nonsplit in $E$, let $B(v)$ be the nearby quaternion algebra over $F$ obtained by changing the Hasse invariant of $\BB$ at $v$. Fix an embedding $E\to B(v)$ and isomorphisms $B(v)_{w}\simeq \bb_{w}$ for all $w\neq v$, which are assumed to be compatible with the embedding $E_{\BA}\to \BB$. The compatible isomorphism $B(v)_{w}\simeq \bb_{w}$ always exists. In fact, if $E_w$ is a field, it follows from the classical Noether--Skolem theorem. If $E_w$ is not a field, we have a a splitting $E_w\simeq F_w\oplus F_w$, whose idempotents induce splittings $B(v)_v\simeq M_2(F_w)$ and $\bb_v\simeq M_2(F_w)$. At $v$, we also take an element $j_v\in B(v)_v$ orthogonal to $E_v$, such that $v(q(j_v))$ is non-negative and minimal as above. We remark that this set $\{\mathfrak j_{v'}:v'\neq v\} \cup \{j_v\}$ is not required to be the localizations of a single element of $B(v)$. Let $\phi=\otimes_v \phi_v$ be a Schwartz function in $\OCS(\bb\times \AA^\times)$. The isomorphism $B(v)_{\AA^v}\simeq \BB^v$ induces an identification $\OCS(B(v)_{\AA^v}\times (\AA^v)^\times)\simeq \OCS(\bb^v\times (\AA^v)^\times). $ Thus we can view $\phi^v$ as an element of $\OCS(\bb^v\times (\AA^v)^\times)$ and we will do this all the time. We make the following basic assumptions: \begin{itemize} \item[(a)] If $v$ is archimedean, $\phi_v$ is the standard Gaussian function as in \S\ref{sec theta eisenstein}. \item[(b)] Assume that $U_v$ is maximal at every $v\in \Sigma_f$. \item[(c)] Assume that $E$ is inert at every $v\in \Sigma_f$. \end{itemize} We will make these basic assumptions throughout most of the paper. \subsubsection*{Restrictive conditions} Here we introduce some very restrictive assumptions, which are required in local computations, and in the proof of Theorem \ref{main}. All our results are true under these conditions, but we will mention from time to time when these conditions are really essential in computations. Assume that $U=O_{\BB_f}\cross$ is maximal. Assume that the embedding $E_\AA\to \BB$ sends $\wh O_E$ into the maximal order $O_{\BB_f}$. As a consequence, $U=\prod_{v\nmid\infty} U_v$ contains (the image of) $\wh O_E^\times=\prod_{v\nmid\infty} O_{E_v}^\times$. As for the Schwartz function $\phi=\otimes_v \phi_v$, we make the following choices: \begin{itemize} \item[(1)] If $v$ is archimedean, set $\phi_v$ to be the standard Gaussian function as in \S\ref{sec theta eisenstein}. This is already mentioned above. \item[(2)] If $v$ is non-archimedean and split in $\BB$, set $\phi_v$ to be the standard characteristic function $1_{O_\bv\times\ofv\cross}$. \item[(3)] If $v$ is non-archimedean and nonsplit in $\BB$, set $\phi_v$ to be the characteristic function $1_{O_\bv^\times\times\ofv\cross}$ (instead of the standard $1_{O_\bv\times\ofv\cross}$). \end{itemize} By definition, $\phi$ is invariant under both the left action and the right action of $U$. Note that \cite[\S7.2]{YZ} assumes that there is a set $S_2$ consisting of two places of $F$ split in $E$ such that $\phi_v$ takes a specific degenerate form for $v\in S_2$. We do not make this assumption here, since this assumption exactly kills the terms we need for our main theorem. \subsection{Explicit local derivatives} Assume the basic conditions in \S\ref{choices} for the moment. We will assume all the conditions (at the relevant places) when we do explicit local calculations. Recall that we have defined the Eisenstein series \begin{eqnarray*} E(s, g,u,\phi) = \sum_{\gamma\in P(F)\bs \gl(F)} \delta(\gamma g)^s r (\gamma g)\phi (0,u). \end{eqnarray*} Note that this Eisenstein series uses the whole Schwartz function $\phi\in\OCS(\bb\times\across)$ and thus have weight two, comparing to the Eisenstein series $E(s, g,u,\phi_2)$ in the definition of the derivative series at the beginning of \S\ref{sec 3.1}, which only uses $\phi_2\in\OCS(E_\AA\fj\times\across)$ and thus has weight one. In this section, we will abbreviate $$ E(s, g,u) =E(s, g,u,\phi).$$ We have the usual Fourier expansion: $$E(s, g, u)=E_0(s, g,u) + \sum_{a \in F\cross} W_a(s, g,u)$$ with \begin{eqnarray*} E_0(s, g,u) &=& \delta(g)^s r (g)\phi (0,u) + W_0(s, g,u),\\ W_a(s, g,u) &=& \int_{\adele} \delta(wn(b)g)^s r(wn(b)g)\phi(0,u) \psi(-ab) db, \ \ a\in F. \end{eqnarray*} We also introduce the local Whittaker function \begin{eqnarray*} W_{a,v}(s, g,u) = \int_{F_v} \delta(wn(b)g)^s r(wn(b)g)\phi(0,u) \psi(-ab) db, \ \ a\in F_v,\ u\in F_v\cross, \ g\in\gl(F_v). \end{eqnarray*} The goal of this subsection is compute the derivative of this local Whittaker function at $s=0$. In the following, assume all the conditions on $(F,E,\BB, U,\phi)$ listed in \S \ref{choices}. \subsubsection*{Local holomorphic projection: archimedean place} Recall that $\phi$ at any archimedean place is the standard Gaussian as in \S\ref{sec theta eisenstein}. \begin{lem} \label{constant by holomorphic projection} Let $v$ be an archimedean place. \begin{itemize} \item[(1)] For any $a\in F_v$ with $a>0$, \begin{align*} W_{1,v}(0,d^*(a),u) =& -4\pi^2 ae^{-2\pi a}1_{F_{v,+}\cross}(u),\\ W_{1,v}'(0,d^*(a),u) =& -\left(\frac{\pi}{2} e^{-2\pi a} +2\pi^2 (\log\pi +\gamma-1) ae^{-2\pi a}\right) 1_{F_{v,+}\cross}(u). \end{align*} Here $\gamma$ is Euler's constant. \item[(2)] The holomorphic projection \begin{align*} \pr'_\psi \, W_{1,v}'(0,g,u)=-\frac 12 (1+\log 4) W_{1,v}(0,g,u). \end{align*} \end{itemize} \end{lem} \begin{proof} We first check (1). By $wn(b)d^*(a)=d(a)wn(a^{-1}b)$, it is easy to get $$W_{1,v}(s,d^*(a),u)=a^{-\frac s2} W_{a,v}(s,1,a^{-1}u).$$ It is reduced to compute \begin{eqnarray*} W_{a,v}(s,1,u) = \int_{F_v} \delta(wn(b))^s r(wn(b))\phi(0,u) \psi(-ab) db,\quad a>0. \end{eqnarray*} Assume $u>0$; otherwise, the above vanishes. The process is parallel to \cite[Proposition 2.11]{YZZ} and also uses the technique of \cite{KRY1}. In fact, from the proof of \cite[Proposition 2.11]{YZZ} for the case $d=4$, \begin{align*} W_{a,v}(s,1,u) =- \frac{2\pi^{s+2}}{\Gamma(\frac s2+2) \Gamma(\frac s2)} e^{-2\pi a} \int_{0}^{\infty} e^{-2\pi t} (t+2a)^{\frac s2+1} t^{\frac s2 -1} dt. \end{align*} To see its behavior at $s=0$, write \begin{align*} W_{a,v}(s,1,u) = -e^{-2\pi a} \frac{\pi^{s+2}s}{\Gamma(\frac s2+2) \Gamma(\frac s2+1)} \int_{0}^{\infty} e^{-2\pi t} (t+2a)^{\frac s2+1} t^{\frac s2 -1} dt. \end{align*} Then the product before the integral has a simple zero at $s=0$. Since \begin{align*} & \int_{0}^{\infty} e^{-2\pi t} (t+2a)^{\frac s2+1} t^{\frac s2 -1} dt -(2a)^{\frac s2+1} (2\pi)^{-\frac s2}\Gamma(\frac s2) \\ =& \int_{0}^{\infty} e^{-2\pi t} \frac{(t+2a)^{\frac s2+1}-(2a)^{\frac s2+1}}{t} t^{\frac s2} dt =\frac{1}{2\pi}+O(s), \end{align*} we get \begin{align*} -W_{a,v}(s,1,u) =& e^{-2\pi a} \frac{\pi^{s+2}s}{\Gamma(\frac s2+2) \Gamma(\frac s2+1)} \left( \frac{1}{2\pi} +(2a)^{\frac s2+1} (2\pi)^{-\frac s2}\Gamma(\frac s2) \right)+O(s^2)\\ =& e^{-2\pi a} \frac{\pi^{s+2}}{\Gamma(\frac s2+2) \Gamma(\frac s2+1)} \left( \frac{1}{2\pi} s +2(2a)^{\frac s2+1} (2\pi)^{-\frac s2}\Gamma(\frac s2+1) \right)+O(s^2). \end{align*} It follows that \begin{align*} -W_{a,v}(0,1,u) =&\ 4 \pi^2 ae^{-2\pi a}, \\ -W_{a,v}'(0,1,u) =&\ \frac{\pi}{2} e^{-2\pi a} +2\pi^2 (\log\pi +\gamma-1) ae^{-2\pi a} +2\pi^2 ae^{-2\pi a}\log a,\\ -W_{1,v}'(0,d^*(a),u) =& \frac{\pi}{2} e^{-2\pi a} +2\pi^2 (\log\pi +\gamma-1) ae^{-2\pi a}. \end{align*} Now we compute the holomorphic projection $$\pr'_\psi W_{1,v}'(0,g,u)=4\pi W^{(2)}(g)\cdot \quasilim \int_{Z(\RR)N(\RR)\bs \GL_2(\RR)} \delta(h)^s W_{1,v}'(0,h,u)\overline{W^{(2)}(h)}dh. $$ By the Iwasawa decomposition, \begin{align*} \pr'_\psi W_{1,v}'(0,g,u)= &4\pi W^{(2)}(g) \quasilim \int_0^{\infty} y^s e^{-2\pi y} W_{1,v}'(0,d^*(y),u) \frac{dy}{y} \\ =&- 4\pi W^{(2)}(g) \quasilim \int_0^{\infty} y^s e^{-2\pi y} \left(\frac{\pi}{2} e^{-2\pi y} +2\pi^2 (\log\pi +\gamma-1) ye^{-2\pi y}\right) \frac{dy}{y}. \end{align*} The integral above is computed by \begin{align*} & \int_0^{\infty} y^s e^{-2\pi y} \left(\frac{\pi}{2} e^{-2\pi y} +2\pi^2 (\log\pi +\gamma-1) ye^{-2\pi y}\right) \frac{dy}{y} \\ =& \frac{\pi}{2} \int_0^{\infty} y^s e^{-4\pi y} \frac{dy}{y} +2\pi^2 (\log\pi +\gamma-1) \int_0^{\infty} y^{s+1} e^{-4\pi y} \frac{dy}{y} \\ =& \frac{\pi}{2} (4\pi)^{-s} \Gamma(s) +2\pi^2 (\log\pi +\gamma-1) (4\pi)^{-s-1} \Gamma(s+1). \end{align*} Its constant term is equal to \begin{align*} \frac{\pi}{2} (-\log(4\pi)-\gamma) +2\pi^2 (\log\pi +\gamma-1) (4\pi)^{-1} = - \frac{1}{2}\pi (1+\log 4). \end{align*} Hence, $$\pr'_\psi W_{1,v}'(0,g,u)= 2\pi^2 (1+\log 4) W^{(2)}(g).$$ This holds for $u>0$. By the result of (1), $$ W_{1,v}(0,g,u)=- 4\pi^2 W^{(2)}(g) 1_{F_{v,+}\cross}(u). $$ Then (2) follows. \end{proof} \subsubsection*{Derivative of Whittaker functions: non-archimedean place} As in \S\ref{sec notation}, $p_v$ denotes the maximal ideal of $\ofv$, and $d_v\in F_v$ denotes the local different of $F$ over $\BQ$. Assume all the conditions in \S\ref{choices}. Recall that for a non-archimedean place $v$, we have $\phi_v=1_{O_\bv\times\ofv\cross}$ if $v$ is split in $\BB$, and $\phi_v=1_{O_\bv^\times\times\ofv\cross}$ if $v$ is nonsplit in $\BB$. \begin{lem} \label{local explicit} Let $v$ be a non-archimedean place of $F$, and let $a\in F_v^\times$. \begin{itemize} \item[(1)] Let $v$ be a non-archimedean place split in $\bb$. Then $W_{a,v}(s,1,u)$ is nonzero only if $u\in \ofv\cross$ and $v(a)\geq -v(d_v)$. In the case $u\in \ofv\cross$ and $a\in \ofv$, \begin{eqnarray*} W_{a,v}(s,1,u) &=& |d_v|^{s+\frac{3}{2}}\frac{(1-N_v^{-(s+2)})(1-N_v^{-(v(a)+1)(s+1)})}{1-N_v^{-(s+1)}}\\ && +\ |d_v|^{\frac{5}{2}} \frac{(1-N_v^{-s})(1-|d_v|^{s-1})}{1-N_v^{-(s-1)}}. \end{eqnarray*} Therefore, for $u\in \ofv\cross$ and $a\in \ofv$, \begin{eqnarray*} && W_{a,v}'(0,1,u)-\frac12\log|a|_v W_{a,v}(0,1,u) \\ &=& \left(-\zeta_v'(2)/\zeta_v(2) + \log|d_v|\right) W_{a,v}(0,1,u)\\ && +\, |d_v|^{\frac32} \frac{1+N_v^{-1}}{2(1-N_v^{-1})} \big((r+2)N_v^{-(r+1)}-rN_v^{-(r+2)}-(r+2)N_v^{-1}+r\big) \log N_v \\ && +\, |d_v|^{\frac{3}{2}} \frac{1-|d_v|}{N_v-1}\log N_v. \end{eqnarray*} Here $r=v(a)$. \item[(2)] Let $v$ be a non-archimedean place nonsplit in $\bb$. Then $W_{a,v}(s,1,u)$ is nonzero only if $v(a)\geq -v(d_v)$ and $u\in \ofv\cross$, and it is constant (depending on $s$) for $(a,u)\in p_v\times \ofv\cross$. Moreover, for any $u\in F_v\cross$, \begin{eqnarray*} \int_{F_v} \left(W_{a,v}'(0,1,u)-\frac12\log|a|_v W_{a,v}(0,1,u) \right)da=0. \end{eqnarray*} \end{itemize} \end{lem} \begin{proof} The calculation is rather involved due to the non-triviality of $d_v$. To simplify the calculation, we move between two different types of methods. We divide the process into three steps due to the ramifications of $v$ in $\BB$ and over $\BQ$. \medskip \noindent\emph{Step 1. unramified case:} $v$ is split in $\BB$ and $|d_v|=1$. Apply the formula \begin{eqnarray*} W_{a,v}(s,1,u) = \int_{F_v} \delta(wn(b))^s \ r(wn(b))\phi_{v}(0,u) \psi_v(-ab) db. \end{eqnarray*} Note that $\phi_v$ is invariant under the action of $\GL_2(O_{F_v})$ (as symplectic similitudes), as $\GL_2(O_{F_v})$ is generated by $w, m(a), n(b)$ with all $a\in O_{F_v}^\times, b\in O_{F_v}$. Thus the Iwasawa decomposition gives $$ r(g)\phi_v(0,u)=\delta(g)^2\ 1_{O_{F_v}^\times}(u), \quad g\in \SL_2(F_v). $$ Notice $$ \delta(wn(b)) = \left\{ \begin{array}{rl} 1 \ \quad &\mbox{ if $b\in O_{F_v}$,} \\ |b|^{-1} &\mbox{ otherwise}. \end{array} \right. $$ Assuming $u\in O_{F_v}^\times$ (so that $W_{a,v}(s,1,u)\neq 0$), we have \begin{eqnarray*} W_{a,v}(s,1,u) &=& \int_{F_v} \delta(wn(b))^{s+2} \psi_v(-ab) db \\ &=& \int_{O_{F_v}} \psi_v(-ab) db+ \int_{F_v-O_{F_v}} |b|^{-(s+2)} \psi_v(-ab) db. \end{eqnarray*} Write the domain $F_v-O_{F_v}$ of the second integral as a disjoint union of $p_v^{-n}- p_v^{-(n-1)}$ for $n\geq1$. We have \begin{eqnarray*} W_{a,v}(s,1,u) &=& \int_{O_{F_v}} \psi_v(-ab) db+ \sum_{n=1}^{\infty}\int_{p_v^{-n}- p_v^{-(n-1)}} N_v^{-n(s+2)} \psi_v(-ab) db\\ &=& \int_{O_{F_v}} \psi_v(-ab) db+ \sum_{n=1}^{\infty}\int_{p_v^{-n}} N_v^{-n(s+2)} \psi_v(-ab) db -\sum_{n=1}^{\infty}\int_{p_v^{-(n-1)}} N_v^{-n(s+2)} \psi_v(-ab) db\\ &=& (1-N_v^{-(s+2)}) \sum_{n=0}^{\infty} N_v^{-n(s+2)} \int_{p_v^{-n}} \psi_v(-ab) db. \end{eqnarray*} It is nonzero only if $a\in O_{F_v}$. In that case, \begin{eqnarray*} W_{a,v}(s,1,u) =(1-N_v^{-(s+2)}) \sum_{n=0}^{v(a)} N_v^{-n(s+2)} N_v^{n} = \frac{(1-N_v^{-(s+2)})(1-N_v^{-(v(a)+1)(s+1)})}{1-N_v^{-(s+1)}}. \end{eqnarray*} It follows that \begin{eqnarray*} W_{a,v}(0,1,u) = \frac{(1-N_v^{-2})(1-N_v^{-(v(a)+1)})}{1-N_v^{-1}} = (1+N_v^{-1})(1-N_v^{-(v(a)+1)}) \end{eqnarray*} and \begin{eqnarray*} && W_{a,v}'(0,1,u)-\frac12\log|a|_v W_{a,v}(0,1,u) \\ &=& W_{a,v}(0,1,u)\big(\frac{W_{a,v}'(0,1,u)}{W_{a,v}(0,1,u)}-\frac12\log|a|_v \big) \\ &=& W_{a,v}(0,1,u) \big( \frac{N_v^{-2}}{1-N_v^{-2}}+ \frac{(v(a)+1)N_v^{-(v(a)+1)}}{1-N_v^{-(v(a)+1)}} -\frac{N_v^{-1}}{1-N_v^{-1}}+\frac12 v(a) \big)\log N_v\\ &=& \frac{N_v^{-2}\log N_v}{1-N_v^{-2}} W_{a,v}(0,1,u)+ \frac{1-N_v^{-2}}{2(1-N_v^{-1})^2} \big((r+2)N_v^{-(r+1)}-rN_v^{-(r+2)}-(r+2)N_v^{-1}+r\big) \log N_v. \end{eqnarray*} This proves part (1) under $|d_v|=1$. \medskip \noindent\emph{Step 2. A general formula:} By the proof of \cite[Proposition 6.10(1)]{YZZ}, we have \begin{eqnarray*} W_{a,v}(s,1,u,\phi_v) = \gamma(\bv, uq)|d_v|^{\frac{1}{2}} (1-N_v^{-s}) \sum_{n=0}^{\infty} N_v^{-n(s-1)} \int_{D_n(a)} \phi_{v}(x,u) d_ux, \end{eqnarray*} where $d_ux$ is the self-dual measure of $(\bv, uq)$, and $$D_n(a)=\{x \in \bb_{v}: uq(x) \in a+p_v^nd_v^{-1}\}$$ is a subset of $\bb_{v}$. The local Weil index $\gamma(\bv,uq)=\pm 1$ coincides with the Hasse invariant of $\bv$. Note that the quadratic space $(\BB_v, uq)$ here is different from the quadratic space $(E_v\mathfrak j_v, uq)$ in \cite[Proposition 6.10(1)]{YZZ}, but the proof is similar. It is easy to see that $W_{a,v}(s,1,u)\neq 0$ only if $u\in \ofv\cross$ and $v(a)\geq -v(d_v)$. In the following, we always assume $u\in \ofv\cross$ and $v(a)\geq 0$. \medskip \noindent\emph{Step 3. matrix case:} $v$ is split in $\BB$ and $d_v$ is arbitrary. By the normalization of $\psi_v:F_v\to \BC^\times$ in \cite[\S1.6.1]{YZZ}, the characteristic function $1_{O_{F_v}}$ is not self-dual under $\psi_v$ if $|d_v|\neq 1$. Consequently, $\phi_v$ is not invariant under the action of $\GL_2(O_{F_v})$. Then the method of Step 1 does not work in this case, and we are going to use the formula in Step 2. We have \begin{eqnarray*} W_{a,v}(s,1,u) = |d_v|^{\frac{1}{2}} (1-N_v^{-s}) \sum_{n=0}^{\infty} N_v^{-n(s-1)} \vol(D_n(a)\cap O_\bv) \end{eqnarray*} with $$D_n(a)\cap O_\bv=\{x \in O_\bv: uq(x) \in a+p_v^nd_v^{-1}\}.$$ By $\vol(O_\bv)=|d_v|^2$, we write \begin{eqnarray*} W_{a,v}(s,1,u) = |d_v|^{\frac{5}{2}} (1-N_v^{-s}) \sum_{n=0}^{\infty} N_v^{-n(s-1)} \frac{\vol(D_n(a)\cap O_\bv)}{\vol(O_\bv)}. \end{eqnarray*} We use this expression because it holds for any Haar measure on $\bv$. Split the summation according to $n<v(d_v)$ and $n\geq v(d_v)$. It gives $$W_{a,v}(s,1,u)=W_{a,v}(s,1,u)_{n<v(d_v)}+ W_{a,v}(s,1,u)_{n\geq v(d_v)}$$ accordingly. In the following we compute the terms on the right-hand side separately. By the assumption $a\in O_{F_v}$, for any $n<v(d_v)$, we have $$D_n(a)=\{x \in \bb_{v}: q(x) \in p_v^nd_v^{-1}\} \supset O_\bv.$$ It follows that \begin{eqnarray*} W_{a,v}(s,1,u)_{n<v(d_v)} = |d_v|^{\frac{5}{2}} (1-N_v^{-s}) \sum_{n=0}^{v(d_v)-1} N_v^{-n(s-1)} = |d_v|^{\frac{5}{2}} \frac{(1-N_v^{-s})(1-|d_v|^{s-1})}{1-N_v^{-(s-1)}}. \end{eqnarray*} A direct calculation of $W_{a,v}(s,1,u)_{n\geq v(d_v)}$ is quite involved, so we compare it with the unramified case instead. For clarification, denote $$D_n(a)^\circ=\{x \in \bv: uq(x) \in a+p_v^n\},$$ which is equal to the set $D_n(a)$ in the unramified case in Step 1. For $n\geq v(d_v)$, the substitution $n\mapsto n+v(d_v)$ gives \begin{eqnarray*} W_{a,v}(s,1,u)_{n\geq v(d_v)} = |d_v|^{\frac{5}{2}} |d_v|^{s-1} (1-N_v^{-s}) \sum_{n=0}^{\infty} N_v^{-n(s-1)} \frac{\vol(D_n(a)^\circ \cap O_\bv)}{\vol(O_\bv)}. \end{eqnarray*} This is equal to $W_{a,v}(s,1,u)$ in the case $|d_v|=1$ considered in Step 1. In other words, the result of Step 1 gives the combinatorial equality \begin{eqnarray*} (1-N_v^{-s}) \sum_{n=0}^{\infty} N_v^{-n(s-1)} \frac{\vol(D_n(a)^\circ \cap O_\bv)}{\vol(O_\bv)} =\frac{(1-N_v^{-(s+2)})(1-N_v^{-(v(a)+1)(s+1)})}{1-N_v^{-(s+1)}} \end{eqnarray*} in the current setting. Hence, we have \begin{eqnarray*} W_{a,v}(s,1,u)_{n\geq v(d_v)} &=& |d_v|^{s+\frac{3}{2}}\frac{(1-N_v^{-(s+2)})(1-N_v^{-(v(a)+1)(s+1)})}{1-N_v^{-(s+1)}}. \end{eqnarray*} Now we have a formula for $W_{a,v}(s,1,u)$, and some elementary computations finish the proof of part (1) of the lemma. \medskip \noindent\emph{Step 4. division case:} $v$ is nonsplit in $\BB$. Then the formula in Step 2 becomes \begin{eqnarray*} W_{a,v}(s,1,u) = -|d_v|^{\frac{1}{2}} (1-N_v^{-s}) \sum_{n=0}^{\infty} N_v^{-n(s-1)} \vol(D_n(a)\cap O_\bv^\times), \end{eqnarray*} where $$D_n(a)\cap O_\bv^\times=\{x \in O_\bv^\times: uq(x) \in a+p_v^nd_v^{-1}\}.$$ Here we have assumed $u\in \ofv\cross$; otherwise, $W_{a,v}(s,1,u)=0$. Assume $v(a)\geq -v(d_v)$ by the same reason. If $v(a)>0$, the condition $uq(x) \in a+p_v^nd_v^{-1}$ is equivalent to $n\leq v(d_v)$. In this case $D_n(a)\cap O_\bv^\times=O_\bv^\times$. Therefore, \begin{eqnarray*} W_{a,v}(s,1,u) &=& -|d_v|^{\frac{1}{2}} (1-N_v^{-s}) \sum_{n=0}^{v(d_v)} N_v^{-n(s-1)} \vol(O_\bv^\times)\\ &=& -|d_v|^{\frac{1}{2}} (1-N_v^{-s}) \frac{1-N_v^{(v(d_v)+1)(1-s)}}{1-N_v^{1-s}} \vol(O_\bv^\times). \end{eqnarray*} This proves the first assertion in (2). It remains to verify the formula \begin{eqnarray*} \int_{F_v} \left(W_{a,v}'(0,1,u)-\frac12\log|a|_v W_{a,v}(0,1,u) \right)da=0. \end{eqnarray*} We first check that $$ \log|a|_v W_{a,v}(0,1,u)=0, \quad\ \forall a\in F_v^\times. $$ In fact, it suffices to check $W_{a,v}(0,1,u)=0$ if $v(a)\neq 0$. This is an easy consequence of the local Siegel--Weil formula in \cite[Theorem 2.2]{YZZ} or \cite[Proposition 2.9]{YZZ}. Alternatively, we can verify it by the type of computation here. Since we already know the vanishing for $v(a)>0$ from the above computation, it remains to check the case $-v(d_v) \leq v(a)< 0$. In this case, the condition $uq(x)-a \in p_v^nd_v^{-1}$ with $x\in O_\bv^\times$ is equivalent to $a \in p_v^nd_v^{-1}$. It holds only if $n\leq v(ad_v)$. Under this condition $D_n(a)\cap O_\bv^\times=O_\bv^\times$. It follows that \begin{eqnarray*} W_{a,v}(s,1,u) &=& -|d_v|^{\frac{1}{2}} (1-N_v^{-s}) \sum_{n=0}^{v(ad_v)} N_v^{-n(s-1)} \vol(O_\bv^\times)\\ &=& -|d_v|^{\frac{1}{2}} (1-N_v^{-s}) \frac{1-N_v^{(v(ad_v)+1)(1-s)}}{1-N_v^{1-s}}\vol(O_\bv^\times). \end{eqnarray*} Thus $W_{a,v}(0,1,u)=0$. It is reduced to prove \begin{eqnarray*} \int_{F_v} W_{a,v}'(0,1,u) da=0. \end{eqnarray*} We are going to compute \begin{eqnarray*} \int_{F_v} W_{a,v}(s,1,u) da = -|d_v|^{\frac{1}{2}} (1-N_v^{-s}) \sum_{n=0}^{\infty} N_v^{-n(s-1)} \int_{F_v} \vol(D_n(a)\cap O_\bv^\times) da. \end{eqnarray*} Use a Fuibini type of result to change the order of the last integral. We have $$ \int_{F_v} \vol(D_n(a)\cap O_\bv^\times) da =\iint_{uq(x)-a \in p_v^nd_v^{-1}} dx da =\int_{O_\bv^\times} \int_{uq(x)+p_v^nd_v^{-1}} da dx =\vol(O_\bv^\times) |d_v|^{-\frac{1}{2}}N_v^{-n}. $$ Hence, \begin{eqnarray*} \int_{F_v} W_{a,v}(s,1,u) da = -|d_v|^{\frac{1}{2}} (1-N_v^{-s}) \sum_{n=0}^{\infty} N_v^{-n(s-1)} \vol(O_\bv^\times) |d_v|^{-\frac{1}{2}}N_v^{-n} = - \vol(O_\bv^\times) . \end{eqnarray*} Taking derivative at $s=0$, we get the desired result. The proof is complete. \end{proof} \subsubsection*{Derivative of intertwining operators} Recall from Theorem \ref{analytic series}(4), under the relation $\phi_{v}=\phi_{1,v}\otimes \phi_{2,v}$, we have defined $$c_{\phi_v}(g,y,u)=r(g)\phi_{1,v}(y,u) W_{0,v}^{\circ}\ '(0,g,u, \phi_{2,v}) + \log \delta(g_v)r(g)\phi_v(y,u), $$ where the normalization \begin{eqnarray*} W_{0,v}^{\circ}(s,g,u,\phi_{2,v}) = \gamma_{u,v}^{-1}|D_v|^{-\frac{1}{2}}|d_v|^{-\frac{1}{2}}\frac{L(s+1,\eta_v)}{L(s,\eta_v)} W_{0,v}(s,g,u,\phi_{2,v}). \end{eqnarray*} Here $\gamma_{u,v}$ is the Weil index of $(E_v\jv, uq)$. Assume all the conditions in \S\ref{choices}. The following result is a variant of \cite[Lemma 7.6]{YZ}. \begin{lem} \label{derivative of intertwining} Denote $w=\matrixx{}{1}{-1}{}$. For any non-archimedean place $v$ nonsplit in $\BB$, \begin{eqnarray*} r(w)\phi_v(0,u) =- |d_v|^2 N_v^{-1} (1-N_v^{-2}) 1_{\ofv\cross}(u), \end{eqnarray*} and \begin{eqnarray*} c_{\phi_v}(w,0,u) =\begin{cases} \displaystyle \frac{(1-N_v)\log N_v}{1+N_v} r(w)\phi_{v}(0,u) & \text{ if } 2\mid v(d_v),\\ 0 & \text{ if } 2\nmid v(d_v). \end{cases} \end{eqnarray*} \end{lem} \begin{proof} Note that $v$ is inert in $E$ by assumption, and $$ \phi_v=1_{O_{\bv}^\times \times \ofv\cross}, \quad\ \phi_{1,v}=1_{\oev^\times \times \ofv\cross}, \quad\ \phi_{2,v}=1_{\oev \jv \times \ofv\cross}. $$ We need to compute $$c_{\phi_v}(w,0,u)=r(w)\phi_{1,v}(0,u) W_{0,v}^{\circ}\ '(0,w,u, \phi_{2,v}).$$ It is easy to have $$ r(w)\phi_{1,v}(0,u)= \gamma(E_{v},uq)1_{\ofv\cross}(u)\int_{\oev^\times}dx =\gamma(E_{v},uq) |d_v| (1-N_v^{-2}) 1_{\ofv\cross}(u), $$ $$ r(w)\phi_{2,v}(x_2,u)= \gamma(E_{v}\jv,uq) |d_vq(\jv)| \cdot 1_{d_v^{-1}q(\jv)^{-1}\oev\jv}(x_2)1_{\ofv\cross}(u). $$ This proves the first result, as $v(q(\jv))=1$ by assumption, and $$ \gamma(E_{v},uq)\gamma(E_{v}\jv,uq) =\gamma(\BB_{v},uq)=-1. $$ Now we prove the second identity. From the definition \begin{eqnarray*} W_{0,v}(s, g,u, \phi_{2,v})= \int_{F_v} \delta(wn(b)g)^s r(wn(b)g)\phi_{2,v}(0,u) db, \end{eqnarray*} we have \begin{eqnarray*} W_{0,v}(s,w,u, \phi_{2,v})= W_{0,v}(s, 1,u, r(w)\phi_{2,v}). \end{eqnarray*} Its computation is similar to that of \cite[Lemma 7.6]{YZ}. In fact, we still have \begin{eqnarray*} W_{0,v}(s,1,u, r(w)\phi_{2,v}) = \gamma(E_{v}\jv,uq) |d_v|^{\frac{1}{2}} (1-N_v^{-s}) \sum_{n=0}^{\infty} N_v^{-ns+n} \int_{D_n} r(w)\phi_{2,v}(x_2,u) d_ux_2, \end{eqnarray*} where $$ D_n =\{x_2 \in E_v\jv: uq(x_2) \in p_v^nd_v^{-1}\} $$ and the measure $d_ux_2$ gives $\vol(\oev\jv)= |d_vuq(\jv)|$. It follows that \begin{eqnarray*} W_{0,v}^{\circ}(s,w,u,\phi_{2,v}) = \frac {1-N_v^{-2s} }{1+N_v^{-(s+1)}} \sum_{n=0}^{\infty} N_v^{-ns+n} \int_{D_n} r(w)\phi_{2,v}(x_2,u) d_ux_2. \end{eqnarray*} Apply the formula of $r(w)\phi_{2,v}(x_2,u)$ in the above. Assume $u\in\ofv\cross$ in the following. Note that for any $n\geq 0$, $$ D_{n} =p_v^{[\frac {n-v(d_v)}{2}]}\oev\jv\ \subset \ d_v^{-1}q(\jv)^{-1}\oev\jv. $$ We have \begin{eqnarray*} W_{0,v}^{\circ}(s,w,u,\phi_{2,v}) = \gamma(E_{v}\jv,uq) |d_vq(\jv)| \frac {1-N_v^{-2s} }{1+N_v^{-(s+1)}} \sum_{n=0}^{\infty} N_v^{-ns+n} \vol(D_n). \end{eqnarray*} The summation is equal to \begin{eqnarray*} \sum_{n=0}^{\infty} N_v^{-ns+n} N_v^{-2[\frac {n-v(d_v)}{2}]} |d_vq(\jv)| =\begin{cases} N_v^{-1} (1+N_v^{1-s})(1-N_v^{-2s})^{-1} & \text{ if } 2\mid v(d_v),\\ (1+N_v^{-1-s})(1-N_v^{-2s})^{-1}& \text{ if } 2\nmid v(d_v). \end{cases} \end{eqnarray*} Hence, \begin{eqnarray*} W_{0,v}^{\circ}(s,w,u, \phi_{2,v}) =\gamma(E_{v}\jv,uq) |d_vq(\jv)| \cdot \begin{cases} N_v^{-1} (1+N_v^{1-s})(1+N_v^{-1-s})^{-1} & \text{ if } 2\mid v(d_v),\\ 1 & \text{ if } 2\nmid v(d_v). \end{cases} \end{eqnarray*} Then \begin{eqnarray*} W_{0,v}^{\circ}\ '(0,w,u, \phi_{2,v})= \gamma(E_{v}\jv,uq) |d_vq(\jv)|\cdot \begin{cases} \displaystyle \frac{(1-N_v)\log N_v}{1+N_v} & \text{ if } 2\mid v(d_v),\\ 0 & \text{ if } 2\nmid v(d_v). \end{cases} \end{eqnarray*} This finishes the proof. \end{proof} \subsubsection*{An average formula} Assume all the conditions in \S\ref{choices}. Let $v$ be a non-archimedean place nonsplit in $\BB$. Consider the difference $$ \bar k_{\phi_v}(y,u)= k_{\phi_v}(1,y,u)-m_{\phi_v}(y,u)\log N_v, \quad (y,u)\in (B(v)_v-E_v)\times F_v^\times. $$ Here $k_{\phi_v}(1,y,u)$ is defined in Theorem \ref{analytic series}(2). On the other hand, the function $m_{\phi_v}(y,u)$ is defined in \cite[\S8.2, Notation 8.3]{YZZ}, which works for the settings of both \cite[\S8.2]{YZZ} (for $v$ nonsplit in $E$ but split $\BB$) and \cite[\S 8.3]{YZZ} (for $v$ nonsplit in both $E$ and $\BB$). The definition is based on local intersection numbers on local integral models of the Shimura curve $X_U$ above $O_{F_v}$, and we refer to \cite[\S 8.2-8.3]{YZZ} and \cite[\S 8.2]{YZ} for how to form a pseudo-theta series using $m_{\phi_v}(y,u)$ and $\phi^v$ to express arithmetic intersection numbers of CM points above $v$. For our purpose, $\bar k_{\phi_v}(y,u)$ extends to a Schwartz function in $\ol\CS(B(v)_v\times F_v\cross)$, as a combination of \cite[Lemma 7.4, Lemma 8.7]{YZ}. This fits into the setting of nonsingular pseudo-theta series. In the following, we compute the action of $w=\matrixx{}{1}{-1}{}$ on this Schwartz function. \begin{lem} \label{average k} For any non-archimedean place $v$ nonsplit in $\BB$, $$ r(w) \bar k_{\phi_v}(0,u) =-r(w) \phi_{v}(0,u) \cdot \begin{cases} \displaystyle \left(\frac{N_v}{N_v+1}+\frac{v(d_v)}{2}\right) \log N_v , & 2\mid v(d_v); \\ \displaystyle \frac{v(d_v)+1}{2} \log N_v , & 2\nmid v(d_v). \end{cases} $$ \end{lem} \begin{proof} Write $y=y_1+y_2$ according to $B(v)_v=E_v+E_vj_v$ as usual. Note that $E_v$ is unramified over $F_v$, and $v(q(j_v))=0$. By \cite[Lemma 8.7]{YZ}, $$ m_{\phi _v}(y, u)= \phi_{v}(y_1,u) 1_{\oev j_v}(y_2) \cdot \frac{1}{2} v(q(y_2)). $$ The function \begin{equation*} k_{\phi_v}(1,y,u)= \frac{L(1,\eta_v)}{\mathrm{vol}(E_v^1)} \phi_{1,v}(y_1,u) {W_{uq(y_2),v}^\circ}'(0,1,u, \phi_{2,v}). \end{equation*} is computed in \cite[Lemma 7.4]{YZ}. Here $\mathrm{vol}(E_v^1)=|d_v|^{\frac 12}$ in the current case. From the proof of \cite[Lemma 7.4]{YZ} (written in \cite[p. 596]{YZ}), which has also computed ${W_{a,v}^\circ}'(0,1,u)$ for $-v(d_v) \leq v(a) <0$, we have \begin{equation*} k_{\phi_v}(1,y,u)= (\log N_v)\phi_{1,v}(y_1,u) \cdot \begin{cases} \ 0, & v(q(y_2))<-v(d_v); \\ \displaystyle \frac{N_v|q(y_2)|^{-1}-|d_v|}{N_v^2-1}, & -v(d_v) \leq v(q(y_2))<0;\\ \displaystyle \left(\frac{N_v-|d_v|}{N_v^2-1} + \frac{1}{2} v(q(y_2)) \right), \quad & v(q(y_2))\geq 0. \end{cases} \end{equation*} It follows that \begin{equation*} \bar k_{\phi_v}(y,u)= (\log N_v)\phi_{1,v}(y_1,u) \phi_{2,v}'(y_2,u), \end{equation*} where $\phi_{2,v}'\in \ol\CS(E_vj_v \times F_v^\times)$ is given by \begin{equation*} \phi_{2,v}'(y_2,u) = 1_{\ofv\cross}(u) \cdot \begin{cases} \ 0, & v(q(y_2))<-v(d_v); \\ \displaystyle \frac{N_v|q(y_2)|^{-1}-|d_v|}{N_v^2-1}, & -v(d_v) \leq v(q(y_2))<0;\\ \displaystyle \frac{N_v-|d_v|}{N_v^2-1} , \quad & v(q(y_2))\geq 0. \end{cases} \end{equation*} Now we compute $$ r(w) \phi_{2,v}'(0,u)= \gamma(E_vj_v, uq) \int_{E_vj_v} \phi_{2,v}'(y_2,u)d_uy_2. $$ Assume $u\in \ofv\cross$. Note that $E_v$ is unramified over $F_v$, $v(q(j_v))=0$, and $\vol(\oev j_v)=|d_v|$. Writing $v(q(y_2))=2i$ for $i\in \ZZ$, we have $$ r(w) \phi_{2,v}'(0,u)= \gamma(E_vj_v, uq) \left(\sum_{-v(d_v)\leq 2i<0} \frac{N_v N_v^{2i}-|d_v|}{N_v^2-1} \vol(\varpi_v^i\oev^\times j_v) + \frac{N_v-|d_v|}{N_v^2-1} \vol(\oev j_v) \right), $$ where $\varpi_v\in\ofv$ is a uniformizer. It follows that $$ r(w) \phi_{2,v}'(0,u)= \gamma(E_vj_v, uq) |d_v| \left(\sum_{-v(d_v)\leq 2i<0} \frac{N_v N_v^{2i}-|d_v|}{N_v^2-1} (1-N_v^{-2}) N_v^{-2i} + \frac{N_v-|d_v|}{N_v^2-1} \right). $$ An elementary computation gives $$ r(w) \phi_{2,v}'(0,u)= \gamma(E_vj_v, uq)\cdot |d_v| \cdot 1_{\ofv\cross}(u) \cdot \begin{cases} \displaystyle \frac{1}{N_v+1}+\frac{v(d_v)}{2N_v}, & 2\mid v(d_v); \\ \displaystyle \frac{v(d_v)+1}{2N_v} , & 2\nmid v(d_v). \end{cases} $$ Note that $$ r(w) \phi_{2,v}(0,u)= \gamma(E_v\jv, uq)\cdot N_v^{-1}|d_v| \cdot 1_{\ofv\cross}(u). $$ It remains to check $\gamma(E_vj_v, uq)=-\gamma(E_v\jv, uq)$ for the Weil indexes. This follows from $$ \gamma(E_v, uq)\gamma(E_v\jv, uq) =\gamma(\bv, uq)=\gamma(\bv, q) =-1, $$ $$ \gamma(E_v, uq)\gamma(E_vj_v, uq) =\gamma(B(v)_v, uq)=\gamma(B(v)_v, q) =1. $$ \end{proof} \section{Height series} \label{sec height series} The goal of this section is to decompose the height series $Z(g, (t_1, t_2))_U$ into a sum of pseudo-theta series and pseudo-Eisenstein series, and compute some related terms. This is mainly treated in \cite{YZZ,YZ}, but we do need to compute some extra terms for the purpose here. \subsection{Shimura curve and height series} \label{sec shimura curve} The goal of this subsection is to recall the basics of Shimura curves and the height series in \cite{YZZ,YZ}. \subsubsection*{Shimura curve} Let $F$ be a totally real number field, and $\BB=\prod_v'\BB_v$ be a totally definite incoherent quaternion algebra over $F$ with ramification set $\Sigma$. Then $\Sigma$ is a finite set of places of $F$ of odd cardinality and containing all the archimedean places. For any open compact subgroup $U$ of $\bfcross$, we have a Shimura curve $X_U$, which is a projective and smooth curve over $F$. For any embedding $\sigma: F\hookrightarrow \BC$, it has the usual complex uniformization $$X_{U, \sigma}(\BC) = B(\sigma)^\times \bs \CH^\pm\times \bb_f^\times / U \cup \{\rm cusps\}.$$ Here $B(\sigma)$ denotes the nearby quaternion algebra over $F$ with ramification set $\Sigma \setminus \{\sigma\}$, the action of $B(\sigma)^\times$ on $\CH^\pm$ is via a fixed isomorphism $B(\sigma)_\sigma\simeq M_2(\RR)$, and the action of $B(\sigma)^\times$ on $\bfcross$ is via a fixed isomorphism $B(\sigma)_{\af}\to \BB_f$ over $\af$ as in \S\ref{choices}. \subsubsection*{Integral models} The exposition here agrees with that of \cite[\S4.2]{YZ}. Fix a maximal order $O_{\BB_f}=\prod_{v\nmid\infty} O_{\BB_v}$ of $\BB_f$. This gives a maximal open compact subgroup $O_{\BB_f}^\times$ of $\bfcross$. Let $U=\prod_v U_v$ be an open compact subgroup of $O_{\BB_f}^\times$ such that $U_v$ is maximal at every $v\in \Sigma_f$. Here we briefly recall the integral model $\CX_U$ of $X_U$ over $O_F$. For any positive integer $N$, denote by $U(N)=(1+N O_{\BB_f})^\times$ the open compact subgroup of $O_{\BB_f}^\times$. Its local component is $U(N)_v=(1+N O_{\BB_v})^\times$. By the works of Deligne--Rapoport \cite{DR}, Carayol \cite{Ca}, and Boutot--Zink \cite{BZ}, if $U$ is sufficiently small in the sense that it is contained in $U(N)$ for some $N\geq 3$, there is a canonical integral model $\CX_{U}$ of $X_U$ over $O_{F}$, which is a projective and flat regular scheme over $O_{F}$. It is further smooth at every place $v\notin\Sigma_f$ such that $U_v$ is maximal, and semistable at every place $v\in\Sigma_f$ (where $U_v$ is maximal by assumption). For general $U$ (still with $U_v$ maximal for all $v\in \Sigma_f$), the canonical integral model $\CX_{U}$ of $X_U$ over $O_{F}$ is constructed as follows. Take a sufficiently small open compact subgroup $U'$ of $\bb_f^\times$ such that $U_v'$ maximal for all $v\in \Sigma_f$ and that $U'$ is a normal subgroup of $U$. From the above paragraph, we have a canonical integral model $\CX_{U'}$ of $X_{U'}$ over $O_{F}$. The finite group $U/U'$ acts on $X_{U'}$ via the right Hecke translation, and the quotient is exactly $X_U$. The action extends to an action of $U/U'$ on $\CX_{U'}$, so the quotient scheme $$\CX_{U}:=\CX_{U'}/(U/U')$$ is an integral model of $X_U$ over $O_F$. As in \cite[Corollary 4.6]{YZ}, $\CX_U$ is a normal and $\QQ$-factorial integral scheme, projective and flat over $O_{F}$. Here ``$\QQ$-factorial'' means that any Weil divisor has a positive integer multiple which is a Cartier divisor. By Raynaud's abstract result in \cite[Appendice]{Ra}, $\CX_U$ is actually semistable over $O_F$, and smooth over every non-archimedean place $v$ split in $\BB$. The smoothness can be found at the top of page 195 of the loc. cit.. As a consequence, for any finite extension $H$ of $F$, the base change $\CX_U\times_{O_F}O_H$ is also flat and semistable, so it is still normal and $\QQ$-factorial. The integral model $\CX_U$ does not depend on the choice of $N$. We remark that $\CX_U$ is also $\QQ$-Gorentein in the sense that there is a natural notion of dualizing sheaf $\omega_{\CX_{U}/O_F}$ as a $\QQ$-line bundle on $\CX_U$. In fact, denote by $\CX_U^{\mathrm{reg}}$ the regular locus of $\CX_U$. The singular locus $\CX_U\setminus \CX_U^{\mathrm{reg}}$ is finite since $\CX_U$ is normal. Then the relative dualizing sheaf $\omega_{\CX_U^{\mathrm{reg}}/O_F}$ is a line bundle over $\CX_U^{\mathrm{reg}}$. Take a divisor $\CD$ on $\CX_U^{\mathrm{reg}}$ linearly equivalent to $\omega_{\CX_U^{\mathrm{reg}}/O_F}$, and take $\CD'$ to be the Zariski closure of $\CD$ in $\CX_U$. By $\QQ$-factoriality, some multiple of $\CD'$ is a Cartier divisor, and thus the same multiple of $\omega_{\CX_U^{\mathrm{reg}}/O_F}$ extends to a line bundle on $\CX_U$. \subsubsection*{Arithmetic Hodge bundle} The exposition here follows from that of \cite[\S4.2]{YZ}, which introduces the {canonical arithmetic Hodge bundle} $\CL_U$ over $\CX_U$. Recall that the {Hodge bundle} of $X_U$, as $\QQ$-line bundle over $X_U$, is defined by $$ L_{U}= \omega_{X_{U}/F} \otimes\CO_{X_U}\Big(\sum_{Q\in X_U(\overline F)} (1-e_Q^{-1}) Q\Big). $$ We refer to \S\ref{sec intro shimura curve} for an explanation of the ramification indexes. As in \S\ref{sec intro shimura curve}, denote $$S(U)=\Spec\, O_F\setminus \{v: U_v \text{ is not maximal}\}.$$ Then $\CL_U$ is a $\QQ$-line bundle on $\CX_U$ constructed as follows. If $U$ is sufficiently small in the sense that $U$ is contained in $U(N)=(1+N O_{\bb_f})^\times$ for some integer $N\geq 3$, then over the open subscheme $\CX_{U, S(U)}=\CX_{U}\times_{\Spec\, O_F} S(U)$, we have $$ \CL_U|_{\CX_{U, S(U)}}= \omega_{\CX_{U}/O_F} \otimes \CO_{\CX_{U, S(U)}}\Big(\sum_{Q\in X_U} (1-e_Q^{-1}) \CQ\Big). $$ Here $\omega_{\CX_{U}/O_F}$ is the relative dualizing sheaf, the summation is through closed points $Q$ of $X_U$, $\CQ$ is the Zariski closure of $Q$ in $\CX_{U,S(U)}$, and $e_Q$ is the ramification index of any point of $X_U(\ol F)$ corresponding to $Q$. If $U$ is a maximal open compact subgroup of $\bfcross$, for any sufficiently small normal open compact subgroup $U'$ of $\bfcross$ contained in $U$, we have $$ \CX_{U, S(U')}= \CX_{U',S(U')}/(U/U'), \quad \CL_{U}|_{\CX_{U, S(U')}}= \deg(\pi)^{-1}N_{\pi} (\CL_{U'}|_{\CX_{U', S(U')}}), $$ where $N_{\pi}:\Pic(\CX_{U', S(U')})\to \Pic(\CX_{U, S(U')})$ is the norm map with respect to the natural map $\pi: \CX_{U', S(U')}\to \CX_{U, S(U')}$. Varying $U'$, we glue $\{\CL_{U}|_{\CX_{U, S(U')}}\}_{U'}$ together to form the $\QQ$-line bundle $\CL_U$ over $\CX_U$ for maximal $U$. For general $U$, we take an embedding $U\subset U_0$ into a maximal $U_0$, and than define $\CL_U$ to be the pull-back of $\CL_{U_0}$ via the natural map $\CX_U \to \CX_{U_0}$. At any archimedean place $\sigma:F\to \BC$, the {Petersson metric} of $\CL_U$ is given by $$\|f(\tau)d\tau\|_{\mathrm{Pet}}=2\, \Im(\tau) |f(\tau)|,$$ where $\tau$ is the standard coordinate function on $\CH$, and $f(\tau)$ is any meromorphic modular form of weight 2 over $X_{U,\sigma}(\CC)$. Thus we have {the arithmetic Hodge bundle} $$\ol\CL_U=(\CL_U, \{\|\cdot\|_\sigma\}_\sigma).$$ It is a hermitian $\QQ$-line bundle over $\CX_U$. As in \cite[Theorem 4.7]{YZ}, the system $\{\ol\CL_U\}_U$ is compatible with pull-back morphisms. In other words, if $U'\subset U$ are two open compact subgroups, then the natural morphism $\pi:\CX_{U'}\to \CX_U$ gives $\pi^*\ol\CL_U= \ol\CL_{U'}$ in $\wh\Pic(\CX_{U'})_\QQ$. Moreover, the system is also compatible with the norm map in the sense that $N_{\pi}(\ol\CL_{U'})=\deg(\pi)\ol\CL_{U}$ in $\wh\Pic(\CX_{U})_\QQ$. This is used as the construction in \cite[Theorem 4.7]{YZ}, but can also be deduced by the identity $N_{\pi}\circ \pi^*=\deg(\pi)$ as operators in $\wh\Pic(\CX_{U})_\QQ$. Note that $X_U$ is connected by not geometrically connected over $F$. For any connected component $X_\alpha$ of $X_{U, \overline F}$, denote by $L_\alpha=L_{U,\alpha}$ the pull-back of $L_U$ to $X_\alpha$. Denote by $\kappa_U^\circ$ the degree of $L_\alpha$ over $X_\alpha$, which is independent of $\alpha$ by the Galois action. Denote $$\xi_U=(\kappa_U^\circ)^{-1}L_U, \quad \hat\xi_U=(\kappa_U^\circ)^{-1}\ol\CL_U, \quad \xi_\alpha=(\kappa_U^\circ)^{-1}L_\alpha,$$ which are respectively a $\QQ$-line bundle over $X_U$, a hermitian $\QQ$-line bundle over $\CX_U$, and a $\QQ$-line bundle over $X_\alpha$. \subsection*{Quotient at split places} If $F=\QQ$ and $U$ is maximal, by \cite[Lem. 2.1]{Yu2}, we have $$ \CL_U= \omega_{\CX_{U}/O_F} \otimes \CO_{\CX_{U}}\Big(\sum_{Q\in X_U} (1-e_Q^{-1}) \CQ\Big), $$ where $\CQ$ is the Zariski closure of $Q$ in $\CX_{U}$. This gives an equivalent definition of $\CL_U$ in this case. If $F$ is totally real, the result does not hold over $O_F$, which is the reason why we have to take the above intricate definition of $\CL_U$. However, the following result asserts that the result still holds at places $v$ split in $\BB$. It will not be used in elsewhere of this paper, but we include it here for completeness. \begin{prop} Let $U'\subset U$ be a normal open subgroup which is sufficiently small as an open compact subgroup of $\bb_f^\times$. Let $v$ be a non-archimedean place of $F$ split in $\BB$ such that $U$ and $U'$ are maximal at $v$. Denote by $k$ the residue field of $v$ in $O_F$. Then the following hold. \begin{itemize} \item[(1)] The fibers $\CX_{U',k}$ and $\CX_{U, k}$ are smooth over $k$, and the morphism $\CX_{U',k}\to \CX_{U, k}$ induces an isomorphism $\CX_{U',k}/(U/U')\to \CX_{U, k}$. \item[(2)] The quotient morphism $\CX_{U'}\to \CX_{U}$ is \'etale at every generic point of the fiber $\CX_{U, k}$ of $\CX_{U}$ above $v$. \item[(3)] Over $S=\Spec\, O_F\setminus \Sigma_f$, there is a canonical isomorphism $$ \CL_{U, S}= \omega_{\CX_{U,S}/S} \otimes \CO_{\CX_{U,S}}\Big(\sum_{Q\in X_U} (1-e_Q^{-1}) \CQ\Big), $$ where $\CQ$ is the Zariski closure of $Q$ in $\CX_{U,S}$. \end{itemize} \end{prop} \begin{proof} By the uniformization, an element $g\in U$ acts trivially on $X_{U'}$ if and only if $g$ lies in $(F^\times U')\cap U=(F^\times \cap U)U'=O_F^\times U'$. It follows that the action induces a faithful action of the finite group $G=U/(O_F^\times U')$ on $X_{U'}$. We need the basic theory of quotient of schemes by finite groups. We refer to \cite[p. 66, Thm]{Mu} for the case of varieties, where the current case is similar. In particular, the map $\CX_{U'}\to \CX_{U}$ is a topological quotient in the sense that the underlying topological space of $\CX_U$ is exactly the quotient of the underlying topological space of $\CX_{U'}$ by $G$. Moreover, if $\CX_{U'}$ is a union of affine open subschemes $\mathrm{Spec}(A)$ stabilized by $G$, then the quotient $\CX_{U}$ is the union of affine open subschemes $\mathrm{Spec}(A^G)$. As $\CX_{U'}$ is regular, the affine ring $A$ is normal, so $A^G$ is normal by definition. This implies that $\CX_U$ is normal. By the action of $G$ on $\CX_{U',k}$, we also have a quotient $\CX_{U',k}/G$. There is a natural morphism $\CX_{U',k}/G \to \CX_{U,k}$, induced by the homomorphism $A^G\otimes_{O_{F,(v)}} k \to (A\otimes_{O_{F,(v)}} k)^G$ from the above description of the quotient process. The morphism $\CX_{U',k}/G \to \CX_{U,k}$ is a homeomorphism, since the quotients are topological quotients. As $\CX_{U',k}$ is smooth over $k$, the quotient $\CX_{U',k}/G$ is also smooth over $k$. By \cite[Appendice, p. 195]{Ra}, $\CX_{U,k}$ is also smooth. This forces the homeomorphism $\CX_{U',k}/G \to \CX_{U,k}$ to be an isomorphism. This proves (1). Now we see that (2) implies (3). By varying $U'$, it suffices to prove the isomorphism of (3) over the local ring $O_{F,(v)}$ of $O_{F}$ at $v$. Denote $\CX_{U,(v)}=\CX_U\times_{O_F}O_{F,(v)}$ and $\CX_{U',(v)}=\CX_{U'}\times_{O_F}O_{F,(v)}$ for convenience. As above, we already know that $\CX_{U,(v)}$ is smooth over $O_{F,(v)}$. Once (2) holds, the ramification divisor of $\pi:\CX_{U',(v)}\to \CX_{U,(v)}$ is horizontal, thus it is just the Zariski closure of the ramification divisor of $\CX_{U'}\to \CX_{U}$. This proves that in the case $|\Sigma_f|>1$, $$ N_\pi(\omega_{\CX_{U',(v)}/O_{F,(v)}}) \simeq \omega_{\CX_{U,(v)}/O_{F,(v)}} \otimes \CO_{\CX_{U,(v)}}\Big(\sum_{Q\in X_U} (1-e_Q^{-1}) \CQ_{O_{F,(v)}}\Big); $$ in the case $|\Sigma_f|=1$, the formula holds after suitably modifying by cusps. This proves (3). Now we prove (2), which is the essential part of the theorem. Let $\xi$ be a generic point of (an irreducible component of) $\CX_{U',k}$, and denote by $k(\xi)$ the residue field of $\xi$. Denote the stabilizers $$G_\xi^+=\{g\in G:g(\xi)=\xi\}, \quad G_\xi=\{g\in G_\xi^+: g \text{\ acts trivially on } k(\xi)\}.$$ We claim that $G$ acts freely at $\xi$ in the sense that $G_\xi$ is trivial. We first see that the claim implies that $\pi:\CX_{U'}\to \CX_{U}$ is \'etale at $\xi$. In fact, denote by $\eta$ the image of $\xi$ in $\CX_U$. Denote by $G\xi\subset \CX_{U'}$ the orbit of $\xi$, and denote $k(G\xi)=\oplus_{\xi'\in G\xi} k(\xi')$. Note that there is a homomorphism $k(\eta)\to k(G \xi)^G\simeq k(\xi)^{G_\xi^+}$. By assumption, $G_\xi^+$ acts freely on $k(\xi)$, so the index $[k(G \xi):k(\xi)^{G_\xi^+}]=|G|$. It follows that $[k(G \xi):k(\eta)]\geq |G|$. On the other hand, as $\CX_U$ is normal, the local ring of $\CX_{U}$ at $\eta$ is a discrete valuation ring, and thus it is easy to see that $\pi:\CX_{U'}\to \CX_{U}$ is flat over $\eta$. As a consequence, we have $\deg(\pi^{-1}(\eta)/\eta)=|G|$. Note that $\mathrm{Spec}\, k(G \xi)$ is the reduced structure of $\pi^{-1}(\eta)$. The bound of the degree forces $\pi^{-1}(\eta)=\mathrm{Spec}\, k(G \xi)$, $k(\eta)\simeq k(\xi)^{G_\xi^+}\simeq k(G \xi)^G$, and that $k(\xi)$ is Galois over $k(\eta)$. As a consequence, $\pi^{-1}(\eta)$ is unramified over $\eta$, and thus $\CX_{U'}\to \CX_{U}$ is \'etale at $\xi$. It remains to prove that $G$ acts freely at every generic point of $\CX_{U',k}$. It suffices to prove that $G$ acts freely at every generic point of $\CX_{U',\bar k}$. Our proof is based on the theory of Carayol \cite{Ca}. In particular, \cite[\S 6]{Ca} introduces a universal $p$-divisible group $E_\infty$ of height two over $\CX_{U'}$. As in \cite[\S 6.7]{Ca}, every geometric point of $\CX_{U',\bar k}$ is either supersingular or ordinary. By \cite[\S 11.2]{Ca}, the set of supersingular points of $\CX_{U',\bar k}$ can be expressed as $$ \CX_{U',\bar k}^{\rm ss}\simeq B^\times \backslash \ZZ\times B_{\BA_f^v}^\times /U'^v. $$ Here $B=B(v)$ is the nearby quaternion algebra over $F$ by switching the invariant of $\BB$ at $v$, and $B^\times$ acts on $\ZZ$ via the canonical isomorphism $\tilde v:B_v^\times/O_{B_v}^\times\to \ZZ$ via the isomorphism, where $O_{B_v}$ is the unique maximal order of the division algebra $B_v$. Fix a supersingular point $x$ of $\CX_{U',\bar k}$, and assume that $x$ lies on an irreducible component $C$ of $\CX_{U',\bar k}$. By \cite[\S 6.6]{Ca}, the deformation of $E_\infty|_x$ over the category of local Artinian rings with residue field $\bar k$ is represented by the completion $\widehat \CO_{\CX', x}$ of the local ring of $\CX'=\CX_{U',O_{F_v}^{\rm ur}}$ at $x$. As $\widehat \CO_{C, x}=\widehat \CO_{\CX', x}/\wp$ for the maximal ideal of $\wp$ of $O_{F_v}^{\rm ur}$, the deformation of $E_\infty|_x$ over the category of local Artinian $\bar k$-algebras with residue field $\bar k$ is represented by $\widehat \CO_{C, x}$. Moreover, the base change $E_\infty|_{\widehat \CO_{C, x}}$ is the universal $p$-divisible group. By definition, the deformation induces an action of $\Aut(E_\infty|_x)\simeq O_{B_v}^\times$ on $\widehat \CO_{C, x}$, whose kernel is exactly $\Aut(E_\infty|_{\widehat \CO_{C, x}})$. A key property is that $\Aut(E_\infty|_{\widehat \CO_{C, x}})\simeq O_{F_v}^\times$. In fact, denote by $M$ the algebraic closure of the fraction field of $\widehat \CO_{C, x}$. Note that $\End(E_\infty|_{\widehat \CO_{C, x}})$ has natural injections into both $\End(E_\infty|_M)$ and $\End(E_\infty|_x)\simeq O_{B_v}$ as $O_{F_v}$-algebras. The $p$-divisible group $E_\infty|_M$ is ordinary, since the supersingular locus $\CX_{U',\bar k}^{\rm ss}$ is a finite set, and thus $\End(E_\infty|_M)\simeq O_{F_v}\oplus O_{F_v}$ by splitting $E_\infty|_M$ into a direct sum of a local part and an \'etale part. These force $\End(E_\infty|_{\widehat \CO_{C, x}})=O_{F_v}$, and thus $\Aut(E_\infty|_{\widehat \CO_{C, x}})\simeq O_{F_v}^\times$. Hence, we conclude that the kernel of $O_{B_v}^\times \to \Aut(\widehat \CO_{C, x})$ is $O_{F_v}^\times$. Now we are ready to prove that $G$ acts freely at every generic point of $\CX_{U',k}$. Let $C$ be an irreducible component of $\CX_{U',\bar k}$, and let $\xi_C$ be its generic point. Let $x$ be a supersingular point of $C$. To prove that $G$ acts freely on $\xi_C$, it suffices to prove that $G_x$ acts freely on the local ring $\CO_{C, x}$ of $x\in C$, so it suffices to prove that $G_x$ acts freely on the completion $\widehat \CO_{C, x}$ of $\CO_{C, x}$. Assume that $x$ is represented by $(m_x,b_x)\in \ZZ\times B_{\BA_f^v}^\times$ in the above coset expression of $\CX_{U',\bar k}^{\rm ss}$. An element $g\in U$ fixes $x$ if and only if $$B^\times (m_x,b_x U'^v) =B^\times (m_x,b_x (g^v)^{-1} U'^v),$$ which holds if and only if there is $\gamma\in B^\times$ such that $$ \tilde v(\gamma)=0,\quad g^v\in b_x^{-1} \gamma b_x U'^v. $$ In this case, $g$ acts on $\widehat \CO_{C, x}$ via $\gamma\in O_{B_v}^\times$, and the action of $O_{B_v}^\times$ on $\widehat \CO_{C, x}$ is the one described above in terms of the deformation theory. In particular, the kernel of $O_{B_v}^\times \to \Aut(\widehat \CO_{C, x})$ is $O_{F_v}^\times$. Assume that $g$ acts trivially on $\widehat \CO_{C, x}$. This implies $\gamma\in O_{F_v}^\times$ by the kernel identity. Recall that $\gamma\in B^\times$ is a global element, so $\gamma\in F^\times$. Then the above relation of $\gamma$ and $g$ becomes $$ \tilde v(\gamma)=0,\quad g^v\in \gamma U'^v. $$ It follows that $g\in F^\times U'$, and thus $g$ lies in $(F^\times U')\cap U=O_F^\times U'$. Then the image of $g$ in $G=U/(O_F^\times U')$ is trivial. This proves that $G_x$ acts freely on $\widehat \CO_{C, x}$, and thus $G$ acts freely on $\xi_C$. The proof is complete. \end{proof} \subsubsection*{CM points} Let $E/F$ be a totally imaginary quadratic extension, with a fixed embedding $E_\BA\hookrightarrow \BB$ over $\BA=\BA_F$. As in \S\ref{choices}, for any archimedean place $\sigma:F\hookrightarrow \CC$, we also fix an embedding $E\hookrightarrow B(\sigma)$ compatible with $E_\BA\hookrightarrow \BB$ and $B(\sigma)_{\af}\to \BB_f$. Then $E^\times$ acts on $\CH^\pm$ via $E\hookrightarrow B(\sigma)$. Fix an embedding $\sigma_0:F\hookrightarrow \CC$. Let $z_0\in\CH$ be the unique fixed point of $E\cross$ on $\CH$, where the action is via $E\to B(\sigma_0)$. Via the complex uniformization, we have a CM point $$ [\beta]_U=[z_0,\beta]_U $$ on $X_{U, \sigma_0}(\BC)$ for any $\beta\in \bfcross$. By definition of the canonical model, $[\beta]_U$ descends to an algebraic point of $X_U$ defined over $E^\ab$. Here $E^\ab$ is the maximal abelian extension of $E$, endowed with embeddings $F\to E\to E^\ab\to\ol F\to \CC$ refining $\sigma_0$. We may also abbreviate $[\beta]_U$ as $[\beta]$ or just $\beta$. In particular, we have the CM point $$P=P_U=[z_0,1]_U.$$ We will view the finite abelian group $$C_U=E\cross\bs E^\times_{\af} / (U\cap E^\times_{\af})$$ as a set of CM points via the natural injection $$ C_U \lra X_U(E^\ab), \quad t\longmapsto [t]_U. $$ Note that the point $[t]_U$ depends only on the class of $t$ in $C_U$. For any CM point $[\beta]_U$ with $\beta\in \bfcross$, assume that it lies in the connected component $X_\alpha$ of $X_{U,\overline F}$. Then the divisor $$[\beta]_U^\circ:=[\beta]_U-\xi_\alpha$$ is a divisor of degree 0 on $X_\alpha$, also viewed as a divisor on $X_{U,\overline F}$ by push-forward. We usually abbreviate $\beta^\circ=[\beta]_U^\circ$. By abuse of notations, we may also write $\xi_\alpha$ as $\xi_\beta$. Note the set of connected components of $X_{U, \overline F}$ is isomorphic to the group $F_+^\times\bs\afcross/q(U)$, and under this isomorphism we have $\alpha=q(\beta)$ (assuming $[\beta]_U$ lies in $X_\alpha$). \subsubsection*{Height series} Finally, we recall the generating series in \cite[\S3.4.5]{YZZ}. Let $\phi\in \ol\CS (\BB\times \BA^\times)$ be a Schwartz function invariant under $K=U\times U$; i.e., for any $(h_1,h_2)\in U\times U$, the Weil representation $r(h_1,h_2)\phi=\phi$ in the sense of \S\ref{sec theta eisenstein}. More precisely, this means that $$ \phi(h_1^{-1}xh_2 , q(h_1)q(h_2)^{-1}u)=\phi(x,u), \quad (x,u)\in \BB\times \across. $$ Then the generating series is defined by \begin{equation*} Z(g,\phi)_U=Z_0(g,\phi)_U+Z_*(g,\phi)_U, \quad g\in \gla, \end{equation*} where \begin{eqnarray*} Z_0(g,\phi)_U&=& - \sum_{\alpha\in F_+\cross\bs \afcross/q(U)} \sum_{u\in \mu_U^2 \bs F\cross} E_0(\alpha^{-1}u,r(g)\phi)\ L_{K,\alpha},\\ Z_*(g,\phi)_U&=& w_U \sum_{a\in F^\times} \sum_{x\in U\bs \bb_f^\times/U} r(g)\phi(x,aq(x)^{-1})\ Z(x)_U. \end{eqnarray*} Here $\mu_U=F^\times \cap U$, and $w_U=|\{1,-1\}\cap U|$ is equal to 1 or 2. For any $x\in \bfcross$, $Z(x)_U$ denotes the Hecke correspondence on $X_U$ determined by the double coset $UxU$, which is also viewed as a divisor on $X_U\times X_U$. By \cite[Theorem 3.17]{YZZ}, $Z(g,\phi)_U$ is an automorphic form of $g\in\gla$ with coefficients in $\Pic(X_U\times X_U)_\CC$. Let $E/F$ be a totally imaginary quadratic extension, with a fixed embedding $E_\BA\hookrightarrow \BB$ over $\BA$. Recall from \cite[\S3.5.1, \S5.1.2]{YZZ} and \cite[\S8.1]{YZ} that we have a height series \begin{eqnarray*} Z(g, (t_1, t_2), \phi)_U = \pair{Z(g, \phi)_U\ t_1^{\circ},\ t_2^{\circ} }_{\NT}, \quad t_1,t_2\in E^\times(\af). \end{eqnarray*} Here $Z(g, \phi)_U$ acts on $t_1^{\circ}$ as a correspondence, and the Neron--Tate height over $F$ is defined as in \cite[\S7.1.2]{YZZ}. By the modularity, $Z(g, (t_1, t_2), \phi)_U$ is an automorphic form in $g\in \gla$. By \cite[Lemma 3.19]{YZZ}, it is actually a cusp form. In particular, the constant term $Z_0(g,\phi)$ of the generating function plays no role here. Throughout this section, we will assume the basic conditions in \S \ref{choices}. In particular, $\phi$ is standard at every archimedean place $v$. It follows that $Z(g, \phi)_U$ and $Z(g, (t_1, t_2), \phi)_U$ are automorphic forms in $g\in \gla$, holomorphic of parallel weight two at archimedean places. We will assume the restrictive conditions when it comes to explicit calculations. \subsection{Weakly admissible extensions} \label{sec admissible} In order to decompose the height series in terms of the arithmetic Hodge index theorem of Faltings--Hriljac, the notion of admissible extensions are used in \cite{YZ, YZZ}. However, there is a minor mistake involving misconceptions about admissible extensions in \cite{YZ, YZZ}. In fact, the Green's function is not admissible, but only weakly admissible in the current sense. As we will see, this mistake does not affect the main results of \cite{YZ, YZZ}, but it does affect the results here. In the following, we review the admissibility notion as described in \cite[\S 7.1-7.2]{YZZ}, introduce the weak admissibility notion in the mean time, and then point out the mistake and the correction. \subsubsection*{Terminology for arithmetic intersection theory} Here we review and modify some terminology of \cite[\S7.1.3-7.1.5]{YZZ} and make some additional definitions in the following. Our setting is slightly more general than the loc. cit. by allowing the arithmetic surface to be $\QQ$-factorial (instead of being integral and semistable). Note that the integral model $\CX_U$ of the Shimura curve is $\QQ$-factorial as in the last subsection, and it remains so by reasonable base changes. Let $X$ be a projective and smooth curve over a number field $F$. By taking the definitions over every connected component, we can assume that $X$ is connected, but we do not assume that it is geometrically connected. Denote by $F'$ the algebraic closure of $F$ in the function field of $X$, so that $X$ is geometrically connected over $F'$. Let $\CX$ be a projective, flat, normal and $\QQ$-factorial integral model of $X$ over $O_F$. By $\QQ$-factoriality, local intersection multiplicities of properly intersecting Weil divisors can be defined as rational numbers, so we can still consider arithmetic intersection theory over $\CX$. In the following, a divisor on $\CX$ means a Weil divisor on $\CX$, and the finite part of an arithmetic divisor on $\CX$ is allowed to be a Weil divisor. By linear combination of rational coefficients, we also have the notion of $\QQ$-divisors and arithmetic $\QQ$-divisors. Let $\wh \CD_1=(\CD_1, g_1)$ and $\wh \CD_2=(\CD_2, g_2)$ be arithmetic $\QQ$-divisors on $\CX$. Here as a convention, $\CD_i$ is a $\QQ$-divisor on $\CX$, and $g_i$ is a Green's function of $\CD_i(\CC)$ on $X(\CC)=\coprod_{\sigma:F\to \CC} {X_{\sigma}(\CC)}$. Note that $X_{\sigma}(\CC)$ is not connected unless $F'=F$, but this does not affect our exposition. Recall that the arithmetic intersection number is defined as $$ \wh \CD_1\cdot \wh \CD_2 =\wh \CD_1\cdot \CD_2 + \sum_{\sigma:F\to \CC} \int_{X_{\sigma}(\CC)} g_{2,\sigma}\, c_1(\CD_{1,\sigma}, g_{1,\sigma}), $$ where the extra subscripts $\sigma$ indicate base change or restriction to the compact Riemann surface $X_{\sigma}(\CC)$, and $c_1(\CD_{1,\sigma}, g_{1,\sigma})$ denotes the Chern form of the hermitian line bundle associated to $(\CD_{1,\sigma}, g_{1,\sigma})$ on $X_{\sigma}(\CC)$. It remains to explain the intersection number $\wh \CD_1\cdot \CD_2$, which is of independent importance. If $\CD_1$ intersects $\CD_2$ properly on $\CX$, we have $$ \wh \CD_1\cdot \CD_2=(\CD_1\cdot \CD_2)+ \sum_{\sigma:F\to \CC} g_{1,\sigma}(\CD_{2,\sigma}(\CC)), $$ where the finite part $$ \CD_1\cdot \CD_2 =\sum_{v\nmid\infty} (\CD_1\cdot \CD_2)_{v} $$ is the usual intersection number on $\CX$ decomposed in terms of non-archimedean places $v$ of $F$, and the infinite part $g_{1,\sigma}(\CD_{2,\sigma}(\CC))$ is understood as $\sum_i a_ig_{1,\sigma}(z_i)$ if $\CD_{2,\sigma}(\CC)=\sum_i a_i z_i$ as a divisor on $X_{\sigma}(\CC)$. In general, there is an arithmetic $\QQ$-divisor $\wh \CD_1'=(\CD_1', g_1')$ on $\CX$ linearly equivalent to $\wh \CD_1$ such that $\CD_1'$ intersects $\CD_2$ properly, and then we set $\wh \CD_1\cdot \CD_2=\wh \CD_1'\cdot \CD_2.$ The result is independent of the choice of $\wh \CD_1'$. Note that $\CX$ is actually a scheme over $\Spec\, O_{F'}$ since it is normal. We can also group the above intersection numbers in terms of places over $F'$. For example, if $\CD_1$ intersects $\CD_2$ properly on $\CX$ as above, then $$ \wh \CD_1\cdot \CD_2=(\CD_1\cdot \CD_2)+ \sum_{\sigma':F'\to \CC} g_{1,\sigma'}(\CD_{2,\sigma'}(\CC)), $$ where the finite part $$ \CD_1\cdot \CD_2 =\sum_{v'\nmid\infty} (\CD_1\cdot \CD_2)_{v'} $$ is a sum over non-archimedean places $v'$ of $F'$. \subsubsection*{Flat arithmetic divisors} An arithmetic $\QQ$-divisor $\wh \CD=(\CD,g_\CD)$ on $\CX$ is said to be \emph{flat} if its intersection number with any vertical arithmetic divisor is 0, or equivalently the following two conditions hold: \begin{itemize} \item[(a)] The Chern form $c_1(\CD_{\sigma}(\CC), g_{\CD,\sigma})$ on $X_{\sigma}(\BC)$ is 0 for any embedding $\sigma:F\to \CC$; \item[(b)] The intersection number $\CD \cdot \CV=0$ for any irreducible component $\CV$ of any closed fiber of $\CX$ above $\Spec\,O_{F}$. \end{itemize} These imply that $\CD$ has degree zero on $X$ over $F$. The notion ``flat'' depends only on the arithmetic divisor class of $\wh \CD$, so it is naturally a property for arithmetic divisor classes or hermitian line bundles. The Hodge index theorem of Faltings and Hriljac (cf. \cite[Theorem 7.4]{YZZ}) also holds here. Namely, let $\wh \CD_1=(\CD_1, g_1)$ and $\wh \CD_2=(\CD_2, g_2)$ be two flat arithmetic $\QQ$-divisors on $\CX$, then $$ \wh\CD_1\cdot \wh\CD_2=-\pair{\CD_{1,F},\CD_{2,F}}_\NT. $$ In fact, let $\CX'\to \CX$ be a resolution of singularity. The pull-back of $\wh\CD_1$ and $\wh\CD_2$ are still flat over $\CX'$, so the result follows from that on $\CX'$. \subsubsection*{Weakly admissible extensions} Resume the above notations for $(X, \CX, F, F')$. Now we introduce our key admissibility notion. Fix an arithmetic divisor class $\hat\xi \in \wh{\Pic}(\CX)_\QQ$ whose generic fiber has degree 1 on $X$ over $F'$. Let $\wh \CD=(\CD,g_\CD)$ be an arithmetic $\QQ$-divisor on $\CX$. We can always write $\CD=\CH+\CV$ where $\CH$ is the horizontal part of $\CD$, and $\CV$ is the vertical part of $\CD$. The arithmetic divisor $\wh \CD$ is called \emph{$\hat\xi$-admissible} if the following conditions hold: \begin{itemize} \item[(1)] The difference $\wh \CD-\deg_{F'} (\CD_{F'})\cdot \hat\xi$ is flat over $\CX$; \item[(2)] The intersection number $(\CV\cdot \hat\xi)_{v'}=0$ for any non-archimedean place $v'$ of $F'$; \item[(3)] The integral $\ds\int_{X_{\sigma'}(\BC)} g_\CD c_1(\hat\xi)=0$ for any embedding $\sigma':F'\to \CC$. \end{itemize} The arithmetic divisor $\wh \CD$ is called \emph{weakly $\hat\xi$-admissible} if it satisfies conditions (1) and (2). A \emph{$\hat\xi$-admissible extension} (resp. \emph{weakly $\hat\xi$-admissible extension}) of a $\QQ$-divisor $D_0$ over $X$ is a {$\hat\xi$-admissible} (resp. \emph{weakly $\hat\xi$-admissible}) arithmetic $\QQ$-divisor $\wh \CD=(\CD,g_\CD)$ over $\CX$ such that the generic fiber $\CD_{F}=D_0$ over $X$. The notion \emph{$\hat\xi$-admissible} is introduced in \cite[\S7.1.5]{YZZ}, while the notion \emph{weakly $\hat\xi$-admissible} is a new one added here. Note that the {$\hat\xi$-admissible} extension exists and is unique. On the other hand, without condition (3), condition (1) only determines the Green's function up to constant functions over $X_{\sigma'}(\CC)$. Nonetheless, in our calculation over the Shimura curve, we do have a fixed choice of Green's functions as follows. To illustrate the idea, we will specify a symmetric and smooth function $g: X(\CC)\times X(\CC)\setminus \Delta\to \RR$ such that for any $P\in X(\CC)$, the 1-variable function $g(P,\cdot)$ is a Green's function for the divisor $P$ over $X(\CC)$ with curvature form equal to $c_1(\hat\xi)$. Then for any divisor $D$ over $X$, we take the Green's function $g_D=g(D,\cdot)$. Let $D_1, D_2$ be two $\QQ$-divisors over $X_{\ol F}$. Then $D_1, D_2$ are realized as $\QQ$-divisors over $X_{L}$ for a finite extension $L$ of $F$. Assume that $\CX_{O_L}$ is still $\QQ$-factorial. By abuse of notations, still denote the pull-back of $\hat\xi$ to $\CX_{O_L}$ by $\hat\xi$. For $i=1,2$, let $\wh D_i=(\ol D_i+\CV_i, g_i)$ be a weakly $\hat\xi$-admissible extension of $D_i$ over $\CX_{O_L}$. Here $\ol D_i$ is the Zariski closure of $D_i$ in $\CX_{O_L}$, $\CV_i$ is the (uniquely determined) vertical $\QQ$-divisor over $\CX_{O_L}$, and $g_i=g_{D_i}$ is a Green's function over $X_L(\CC)$ determined by the 2-variable function $g$ above. As in \cite[\S7.1.6]{YZZ}, it will be convenient to denote $$ \pair{D_1, D_2}:=-\frac{1}{[L:F]}\wh D_1\cdot \wh D_2. $$ The definition is independent of the choice of $L$. We will have a decomposition $\pair{\cdot, \cdot}=-i-j$ in the following. We first have equalities $$ \wh D_1\cdot \wh D_2 =(\ol D_1, g_1)\cdot (\ol D_2+\CV_2, g_2) =(\ol D_1, g_1)\cdot \ol D_2+\ol D_1\cdot \CV_2+ \int_{X_L(\CC)} g_2\, c_1(D_1,g_1). $$ Here the first equality holds by $\CV_1\cdot \wh D_2=0$, a consequence of condition (1) for $\wh D_2$ and condition (2) for $\CV_1$, and the intersection number $(\ol D_1, g_1)\cdot \ol D_2$ is explained above. So we can write $$ \pair{D_1, D_2}=-i(D_1,D_2)-j(D_1,D_2) $$ with $$ i(D_1,D_2):=\frac{1}{[L:F]} (\ol D_1, g_1)\cdot \ol D_2 $$ and $$ j(D_1,D_2):=\frac{1}{[L:F]} \ol D_1\cdot \CV_2+\frac{1}{[L:F]} \int_{X_L(\CC)} g_2\, c_1(D_1,g_1). $$ We further have a decomposition according to places $v$ of $F$ by $$j(D_1,D_2)=\sum_{v} j_v(D_1,D_2)\log N_v$$ with $$ j_v(D_1,D_2):= \begin{cases} \ds\frac{1}{[L:F]} (\ol D_1\cdot \CV_2)_v, & v\nmid\infty,\\ \ds \frac{1}{[L:F]} \int_{X_v(\CC)} g_2\, c_1(D_1,g_1), & v\mid\infty. \end{cases} $$ Here we take the convention $\log N_v=1$ for archimedean $v$. The local intersection numbers make sense by viewing $\CX_{O_L}$ as a scheme over $O_F$. The pairing $j_v(D_1,D_2)$ is symmetric in $D_1,D_2$ for any place $v$. For example, if $v$ is non-archimedean, this is because $$ (\ol D_1\cdot \CV_2)_v-(\ol D_2\cdot \CV_1)_v =((\ol D_1+\CV_1)\cdot \CV_2)_v+((\ol D_2+\CV_2)\cdot \CV_1)_v =(\deg_{F'}(D_1)\hat\xi\cdot \CV_2)_v+(\deg_{F'}(D_2)\hat\xi\cdot \CV_1)_v =0. $$ Note that $j_v(D_1,D_2)$ for archimedean $v$ does not necessarily vanish if $\wh D_2$ is not $\hat\xi$-admissible but only weakly $\hat\xi$-admissible. This is different from \cite[\S7.1.7]{YZZ}, where it considers the $\hat\xi$-admissible case, and thus $j_v(D_1,D_2)=0$ for archimedean $v$. If $D_1, D_2$ have disjoint supports over $X_{\ol F}$, we can also decompose $$i(D_1,D_2)=\sum_{v} i_v(D_1,D_2)\log N_v$$ with $$ i_v(D_1,D_2):= \begin{cases} \ds\frac{1}{[L:F]} (\ol D_1\cdot \ol D_2)_v, & v\nmid\infty,\\ \ds\frac{1}{[L:F]} g_1(D_{2,v}(\CC)), & v\mid\infty. \end{cases} $$ Each of the pairings $i,j,i_v,j_v$ is symmetric as long as it is defined. In fact, for non-archimedean $v$, this is automatic for $i_v$, and this holds for $j_v$ by $(\ol D_1\cdot \CV_2)_v=-(\CV_1\cdot \CV_2)_v$. For archimedean $v$, $g_1(D_{2,v}(\CC))=g_2(D_{1,v}(\CC))$ as they come from the same symmetric 2-variable function $g$, so $i_v$ is symmetric, which implies the symmetry of $j_v$ by Stokes' formula. As in \cite[\S7.1.7]{YZZ}, we can also introduce the pairings $i_{\bar v}$ and $j_{\bar v}$, and write $i_{ v}$ and $j_{ v}$ respectively as averages of $i_{\bar v}$ and $j_{\bar v}$ over the Galois group $\Gal(\ol F/F)$. The mistake in \cite{YZZ,YZ} is that the arithmetic extensions used to compute the height pairing are not $\hat\xi$-admissible, but only weakly $\hat\xi$-admissible extension. This will incur $j_v(D_1,D_2)$ for archimedean $v$. In the following, we first review the Green's function, compute this extra term, and then decompose the height series by taking into account of the integration term. We will see that the extra term does not affect the main results of \cite{YZZ,YZ}, but do affect the main result of this paper. \subsubsection*{Local nature} The pairings $i_v, j_v$ for a non-archimedean $v$ has a local nature in that it can be defined in terms of the integral model $\CX_{O_{F_v}}$ instead of $\CX$. Here we explore the situation slightly. Let $R$ be a discrete valuation ring with function field $K$. Let $Y$ be a projective and smooth curve over $K$. For simplicity, assume that $Y$ is geometrically connected, and we omit the extension of the notion to the more general setting. Let $\CY$ be a projective, flat, normal and $\QQ$-factorial integral model of $Y$ over $R$. By a divisor (or $\QQ$-divisor), we mean a Weil divisor (or Weil $\QQ$-divisor). Fix a $\QQ$-divisor class $\xi \in {\Pic}(\CY)_\QQ$ whose generic fiber has degree 1 on $Y$. A $\QQ$-divisor $\CD=\CH+\CV$ on $\CY$, with a horizontal part $\CH$ and a vertical part $\CV$, is called \emph{$\xi$-admissible} if the following conditions hold: \begin{itemize} \item[(1)] The difference $\CD-\deg(\CD_{K})\cdot \xi$ is \emph{flat} in that its intersection with any irreducible component of the special fiber of $\CY$ is 0; \item[(2)] The intersection number $\CV\cdot \xi=0$. \end{itemize} Then we have the notion of a \emph{$\xi$-admissible extension} of a $\QQ$-divisor over $Y$. Let $D_1, D_2$ be two $\QQ$-divisors over $Y$. For $i=1,2$, let $\wh D_i=\ol D_i+\CV_i$ be a $\xi$-admissible extension of $D_i$ over $\CY$. Here $\ol D_i$ is the Zariski closure of $D_i$ in $\CY$, and $\CV_i$ is the (uniquely determined) vertical $\QQ$-divisor over $\CY$. Assume that the supports of $D_1$ and $D_2$ on $Y$ are disjoint. Then we have $$ \wh D_1\cdot \wh D_2 =\ol D_1\cdot \ol D_2+\ol D_1\cdot \CV_2. $$ This gives a decomposition $$ \pair{D_1, D_2}=-i_R(D_1,D_2)-j_R(D_1,D_2) $$ with $$ \pair{D_1, D_2}_R=-\wh D_1\cdot \wh D_2,\quad i_R(D_1,D_2)=\ol D_1\cdot \ol D_2, \quad j_R(D_1,D_2)= \ol D_1\cdot \CV_2. $$ Note that $i_R(D_1,D_2)$ is only defined if the supports of $D_1$ and $D_2$ on $Y$ are disjoint, but $j_R(D_1,D_2)$ can actually be defined by the same formula without this condition. It is worth noting that if the special fiber of $\CY$ is irreducible, then $\CV_1=\CV_2=0$ and $j_R(D_1,D_2)=0$ identically. Finally, we mention a projection formula for $i_R$ and $j_R$. With the pair $(Y, \CY, \xi)$ over $R$, let $(Y',\CY',\xi')$ be another such pair. Assume that there is finite morphism $\pi:Y'\to Y$ extending to an $R$-morphism $\tilde\pi:\CY'\to \CY$ such that $\tilde\pi^*\xi=\xi'$. Let $D$ and $D'$ be $\QQ$-divisors over $Y$ and $Y'$ respectively. Then we have $$ j_R(D',\pi^* D)=j_R(\pi_*D', D). $$ If the supports of $\pi_*D'$ and $D$ on $Y$ are disjoint, we also have $$ i_R(D',\pi^* D)=i_R(\pi_*D', D). $$ \subsection{Integral of the Green's function} Return to the situation that $F$ is totally real, and $X_U$ is a Shimura curve over $F$. Fix an archimedean place $v$ of $F$. The Green's function $g$ over $X_{U,v}(\BC)$ is defined in \cite[\S8.1.1]{YZZ} following the original idea of Gross--Zagier \cite{GZ}. As noted above, the Green's function is not $\hat\xi$-admissible but only weakly $\hat\xi$-admissible. The goal here is to compute the integral of the Green's function, which measures the failure of admissibility. The results of this subsection work under the basic conditions in \S\ref{choices}. Let us first recall the Green's function briefly. For any two points $z_1, z_2\in \CH$, the hyperbolic cosine of the hyperbolic distance between them is given by $$d(z_1,z_2)=1+\frac{|z_1-z_2|^2}{2\Im(z_1)\Im(z_2)}.$$ It is invariant under the action of $\gl(\RR)$. For any $s\in \BC$ with $\Re(s)>0$, denote $$m_s(z_1,z_2)=Q_s(d(z_1,z_2)),$$ where $$Q_s(t)=\int_0^{\infty} \left(t+\sqrt{t^2-1}\cosh u\right)^{-1-s}du $$ is the Legendre function of the second kind. Denote by $B=B(v)$ the nearby quaternion algebra. For any two distinct points of $$X_{U,v}(\BC)=B_+\cross\bs \CH\times B^\times(\BA_f) /U$$ represented by $(z_1, \beta_1), (z_2, \beta_2)\in \CH\times B^\times_{\af} $, we denote $$ g_s((z_1, \beta_1), (z_2, \beta_2)): =\sum_{\gamma\in \mu_U \backslash B_+\cross} m_s(z_1,\gamma z_2)\ 1_{U}(\beta_1^{-1}\gamma\beta_2). $$ It converges for $\Re(s)>0$ and has meromorphic continuation to $s=0$ with a simple pole. The Green's function $g: X_{U,v}(\BC)^2\setminus \Delta\to \RR$ is defined by $$g((z_1, \beta_1), (z_2, \beta_2)):=\quasilim \ g_s((z_1, \beta_1), (z_2, \beta_2)).$$ Here $\quasilim$ denotes the constant term at $s=0$ of the Laurent expansion of $g_s((z_1, \beta_1), (z_2, \beta_2))$. In particular, for a fixed point $P=(z_1, \beta_1)\in X_{U,v}(\BC)$, we can view $g(P,\cdot)$ as a function over $X_{U,v}(\BC)$ with logarithmic singularity at $P$. The first part of the following result is a classical one in the computation of Selberg's trace formula, which is a special case of \cite[Proposition 6.3.1(3)]{OT}. The second part of the following result is essentially a special case of \cite[Proposition 3.1.2]{OT}, and our proof is a variant of that of the loc. cit.. \begin{lem}\label{int of Green} Let $v$ be an archimedean place of $F$ and $P\in X_{U,v}(\BC)$ be a point. \begin{enumerate}[(1)] \item The residue $\Res_{s=0}g_s\, (P,Q)$ is nonzero only if $Q$ lies in the same connected component as $P$. In that case, $$ \Res_{s=0}g_s\, (P,Q) =\frac{1}{\kappa_U^\circ}, $$ where $\kappa_U^\circ$ denotes the degree of $L_U$ on a connected component of $X_{U,v}(\BC)$. \item The integral $$ \int_{X_{U,v}(\BC)} g(P,\cdot) c_1(\ol\CL_U)=-1. $$ \end{enumerate} \end{lem} \begin{proof} We will prove that for $\Re(s)>0$, $$ \int_{X_{U,v}(\BC)} g_s(P,\cdot) c_1(\ol\CL_U)= \frac{1}{s(s+1)}. $$ This implies (2) by taking the constant term. It also implies (1). In fact, the differential equation of the Legendre function transfers to a functional equation $$ \Delta g_s(P,\cdot)=s(s+1) g_s(P,\cdot). $$ This implies $\Delta (\Res_{s=0} g_s(P,\cdot))=0$, since $g_s(P,\cdot)$ has at most a simple pole at $s=0$. It follows that $\Res_{s=0} g_s(P,\cdot)$ is constant on the connected component of $P$. Then the integration of $g_s(P,\cdot)$ determines the constant. Now prove the formula for the integration of $g_s(P,\cdot)$. Denote $P=(z_1, \beta_1)$ as above. As in \cite[\S8.1.1]{YZZ}, the function $g_s(P,\cdot)$ is nonzero only over the connected component of $X_{U,v}(\BC)$ containing $P$. This connected component is isomorphic to $\Gamma\bs\CH$ with $\Gamma= B_+\cross\cap \beta_1 U \beta_1^{-1}$, and the embedding $\Gamma\bs\CH\to X_{U,v}(\BC)$ is given by $z\mapsto (z,\beta_1)$. Then the induced function $g_s(P,\cdot)$ on $\Gamma\bs\CH$ is given by $$ g_s(P,z)=g_s((z_1, \beta_1),(z,\beta_1)) =\sum_{\gamma\in \mu_U \backslash B_+\cross} m_s(z_1,\gamma z)\ 1_{U}(\beta_1^{-1}\gamma\beta_1) =\sum_{\gamma\in \mu_U \backslash \Gamma} m_s(z_1,\gamma z). $$ It follows that $$ \int_{X_{U,v}(\BC)} g_s(P,\cdot) c_1(\ol\CL_U)= \int_{\Gamma\bs\CH} \sum_{\gamma\in \mu_U \backslash \Gamma} m_s(z_1,\gamma z) c_1(\ol\CL_U). $$ Note that the stabilizer of $\CH$ in $\Gamma$ is exactly $\Gamma\cap F\cross=\mu_U$. Moreover, as in the proof of \cite[Lemma 3.1]{YZZ}, $c_1(\ol\CL_U)$ is represented by the standard volume form $\ds\frac{dx\wedge dy}{2\pi y^2}$ over $\CH$. Therefore, the integral is further equal to $$ \int_{\CH} m_s(z_1, z) \frac{dx dy}{2\pi y^2}, $$ where $z=x+iy$ is as in the convention. Note that $m_s(\gamma z_1, \gamma z)=m_s(z_1, z)$ for any $\gamma\in \SL_2(\RR)$, the above integral is independent of $z_1$. It follows that we can assume $z_1=i$. This gives $$m_s(i,z)=Q_s(d(i,z))$$ with $$d(i,z)=1+\frac{|i-z|^2}{2\Im(i)\Im(z)} =\frac{x^2+y^2+1}{2y} .$$ Then the integral becomes $$ \int_{\CH} Q_s\big(\frac{x^2+y^2+1}{2y}\big) \frac{dx dy}{2\pi y^2}. $$ We need to prove that this integral is equal to $\ds\frac{1}{s(s+1)}$. The remaining part is purely analysis. Denote by $\mathbb D=\{z'\in \CC:|z'|<1\}$ the standard open unit disc. Under the standard isomorphism $\CH\to \BD$ given by $\ds z'=\frac{z-i}{z+i}$ and $\ds z=i\frac{1+z'}{1-z'}$, the integral becomes $$ \int_{\BD} Q_s\big(\frac{1+|z'|^2}{1-|z'|^2}\big) \frac{4dx' dy'}{2\pi (1-|z'|^2)^2}. $$ Here $z'=x'+iy'$ as usual. In terms of the polar coordinate $z'=re^{i\theta}$, the integral becomes $$ \int_0^1 Q_s\big(\frac{1+r^2}{1-r^2}\big) \frac{4rdr}{(1-r^2)^2} =\int_1^\infty Q_s(t) dt. $$ Recall from \cite[\S II.2]{GZ} that the Legendre function $Q_s(t)$ satisfies the differential equation $$ \left((1-t^2)\frac{d^2}{dt^2} -2t\frac{d}{dt} + s(s+1) \right)Q_s=0. $$ This gives $$ s(s+1) Q_s= \frac{d}{dt} \left((t^2-1)\frac{d}{dt} Q_s \right). $$ As a consequence, the original integral is equal to $$ \frac{1}{s(s+1)} \left.\left( (t^2-1)\frac{d}{dt} Q_s \right)\right|_1^\infty. $$ By \cite[II, (2.6)]{GZ}, we can express $Q_s$ by $$ Q_s(t)=\frac{2^s\Gamma(s+1)^2}{\Gamma(2s+2)}\big(\frac{1}{t+1}\big)^{s+1} F\big(s+1,s+1;2s+2;\frac{2}{t+1}\big). $$ Here the hypergeometric function $$ F(a,b;c;t)=\sum_{n=0}^\infty \frac{(a)_n(b)_n}{(c)_n} \frac{t^n}{n!}, $$ where $(a)_n=a(a+1)\cdots (a+n-1).$ If $c$ is not a negative integer, the series $F(a,b;c;t)$ is absolutely convergent for $|t|<1$, and satisfies the functional equation $$ \frac{d}{dt} F(a,b;c;t)=\frac{ab}{c}F(a+1,b+1;c+1;t). $$ This gives \begin{align*} (t^2-1)\frac{d}{dt} Q_s(t) =& -(s+1)\frac{2^s\Gamma(s+1)^2}{\Gamma(2s+2)} \frac{t-1}{(t+1)^{s+1}} F\big(s+1,s+1;2s+2;\frac{2}{t+1}\big)\\ & -\frac{2^{s+1}\Gamma(s+2)^2}{\Gamma(2s+3)} \frac{t-1}{(t+1)^{s+2}} F\big(s+2,s+2;2s+3;\frac{2}{t+1}\big). \end{align*} It follows that for $\Re(s)>0$, the function $\ds (t^2-1)\frac{d}{dt} Q_s(t)$ converges to 0 as $t\to \infty$. By \cite[Theorem 2.1.3]{AAR}, as $t\to 1^+$, $$ \big(1-\frac{2}{t+1}\big) F\big(s+1,s+1;2s+2;\frac{2}{t+1}\big)\lra 0$$ and $$ \big(1-\frac{2}{t+1}\big) F\big(s+2,s+2;2s+3;\frac{2}{t+1}\big)\lra \frac{\Gamma(2s+3)}{\Gamma(s+2)^2}. $$ Therefore, $$ \lim_{t\to 1^+} (t^2-1)\frac{d}{dt} Q_s(t)=-\frac{2^{s+1}\Gamma(s+2)^2}{\Gamma(2s+3)} \frac{1}{2^{s+1}} \frac{\Gamma(2s+3)}{\Gamma(s+2)^2} =-1. $$ This finishes the proof. \end{proof} \subsection{Decomposition of the height series} Assume all the conditions in \S\ref{choices}. Note that we particularly need the condition that $U$ contains $\wh O_E^\times$. The goal of this subsection to decompose the height series \begin{eqnarray*} Z(g, (t_1, t_2), \phi)_U = \pair{Z(g, \phi)_U\ t_1^{\circ},\ t_2^{\circ} }_{\NT}, \quad t_1,t_2\in E^\times(\af). \end{eqnarray*} The series only depends on the classes of $t_1, t_2\in C_U$ as introduced in \S\ref{sec shimura curve}. The height series was treated in \cite[\S 7.1-7.2]{YZZ} in terms of the arithmetic Hodge index theorem and admissible extensions. But as mentioned above, there is a minor mistake caused by the fact that the Green's function is only weakly admissible, so we will present the correct result here. We will still follow the idea of \cite{YZZ, YZ}, but we will also take into account the extra term caused by weak admissibility. By \ref{sec shimura curve}, we have a canonical arithmetic model $(\CX_U,\ol\CL_U)$ of $(X_U,L_U)$ over $O_F$. Note that $\CX_U$ is a projective and flat normal integral scheme over $O_F$. Moreover, the base change $\CX_{U, O_{H}}$ is $\QQ$-factorial for any finite extension $H$ of $F$, so intersection theory is still well-defined for Weil divisors over $\CX_{U, O_{H}}$. Recall that $\kappa_U^\circ$ is the degree of $L_U$ over any connected component of $X_{U,\ol F}$. Denote $$\xi=(\kappa_U^\circ)^{-1} L_U\in \Pic(X_U)_\QQ,\quad \hat\xi=(\kappa_U^\circ)^{-1} \ol\CL_U\in \wh\Pic(\CX_U)_\QQ.$$ For any finite extension $M$ of $F$ unramified over $\Sigma_f$, we can pull $\hat\xi$ back to the base change $\CX_{U, O_{M}}$. Still denote the pull-back by $\hat\xi$ by abuse of notations. Then we have the notion of weakly $\hat\xi$-admissible extensions of divisors over $\CX_{U, O_{M}}$. In particular, for any CM point $[\beta]_U$ represented by $\beta\in \bfcross$, it is defined over the abelian extension $H(\beta)$ of $E$ determined by the open compact subgroup $\beta U \beta^{-1}\cap E^\times(\af)$ of $E^\times(\af)$ via the class field theory. By assumption, $U_v$ is maximal at any $v\in \Sigma_f$, so $\beta_v U_v \beta_v^{-1}\cap E_v^\times= U_v \cap E_v^\times=O_{E_v}\cross$. It follows that the extension $H(\beta)$ of $E$ is unramified over $\Sigma_f$. By this, we obtain a weakly $\hat\xi$-admissible extension $$ \hat\beta=(\ol P_\beta+V_\beta, g(P_\beta,\cdot)) $$ of $P_\beta$ over $\CX_{U, O_{H(\beta)}}$. Here $P_\beta$ is the point of $X_U(H(\beta))$ corresponding to $[\beta]$, $\ol P_\beta$ is the Zariski closure in $\CX_{U, O_{H(\beta)}}$, $V_\beta$ is a vertical divisor over $\CX_{U, O_{H(\beta)}}$, and $g(P_\beta,\cdot)$ is the Green's function reviewed above. Note that the weakly $\hat\xi$-admissible extension $\hat\beta$ is unique, as the Green's function is already chosen. Moreover, the base change of $\hat\beta$ by any extension $M$ of $H(\beta)$ unramified over $\Sigma_f$ is still weakly a $\hat\xi$-admissible extension, which we still denote by $\hat\beta$ by abuse of notations. Finally, consider $$ Z(g, (t_1, t_2), \phi)_U = \pair{Z_*(g, \phi)_U(t_1-\xi_{t_1}),\ t_2-\xi_{t_2} }_{\NT}, \quad t_1,t_2\in E^\times(\af). $$ Then the arithmetic Hodge index theorem of Faltings and Hriljac (cf. \cite[Theorem 7.4]{YZZ}) gives $$ Z(g, (t_1, t_2), \phi)_U =-( (Z_*(g, \phi)_Ut_1)^\wedge- (Z_*(g, \phi)_U\xi_{t_1})^\wedge )\cdot (\hat t_2-\hat \xi_{t_2} ). $$ We understand that the arithmetic intersection on the right-hand side involves base changes by finite extensions of $F$ unramified over $\Sigma_f$ to realize $t_1$ as a rational point, and the intersection numbers should be normalized by the degrees of the base changes. The extension $\hat\xi_{t_2}$ of $\xi_{t_2}$ is given by the corresponding connected component of $\hat\xi$ (over suitable base changes of $X_U$). The extension $(Z_*(g, \phi)_U\xi_{t_1})^\wedge$ of $ {Z_*(g, \phi)_U \xi_{t_1}}$ is defined similarly, as $ {Z_*(g, \phi)_U \xi_{t_1}}$ is a linear combination of connected components of $\xi$. The weakly $\hat \xi$-admissible extension $\hat t_2$ of $t_2$ is introduced above. The weakly $\hat \xi$-admissible extension $(Z_*(g, \phi)_Ut_1)^\wedge$ of $Z_*(g, \phi)_Ut_1$ is defined similarly, as $Z_*(g, \phi)_Ut_1$ is a linear combination of CM points of the form $[\beta]\in \CMU$. Take the notational convention $$ \pair{D, D'}:=-\wh D\cdot \wh D', $$ where $D, D'$ are the divisor classes involved above, and $\wh D, \wh D'$ are the arithmetic extensions introduced above. The right-hand side involves a normalizing factor again if a base change is taken. Then the decomposition is written as $$Z(g, (t_1, t_2))_U =\pair{Z_*(g,\phi)_U t_1, t_2}-\pair{Z_*(g,\phi)_U t_1, \xi_{t_2}} -\pair{Z_*(g,\phi)_U \xi_{t_1}, t_2} +\pair{Z_*(g,\phi)_U\xi_{t_1}, \xi_{t_2}}.$$ Now we summarize the result term by term in the following. \begin{thm} \label{height series} For any $t_1,t_2\in C_U$, $$Z(g, (t_1, t_2))_U =\pair{Z_*(g,\phi)_U t_1, t_2}-\pair{Z_*(g,\phi)_U t_1, \xi_{t_2}} -\pair{Z_*(g,\phi)_U \xi_{t_1}, t_2} +\pair{Z_*(g,\phi)_U\xi_{t_1}, \xi_{t_2}},$$ where the first term on the right-hand side has the expression \begin{align*} \pair{Z_*(g,\phi)_U t_1, t_2} =& -\sum_{v\ \nonsplit} (\log N_v) \barint_{C_U} \CM^{(v)}_{\phi}(g,(tt_1,tt_2)) dt\\ & -\sum_{v\nmid\infty} \CN^{(v)}_{\phi}(g,(t_1,t_2)) \log N_v-\sum_{v\nmid\infty} j_v(Z_*(g,\phi)t_1,t_2)\log N_v\\ & -\frac{i_0(t_2,t_2)}{[E\cross\cap U:\mu_U]} \Omega_\phi(g,(t_1,t_2))\\ &-\frac12 [F:\QQ] E_*(0,g,r(t_1,t_2)\phi). \end{align*} Here $E_*(0,g,\phi)$ is the non-constant part of the Eisenstein series introduced right before Proposition \ref{analytic series extra}, and the first three lines on the right hand side are the same as the formula of $Z(g, (t_1,t_2),\phi))_U$ in \cite[Theorem 8.6]{YZ}. Namely, they are explained in the following. \begin{itemize} \item[(1)] The modified arithmetic self-intersection number $$i_0(t_2,t_2)=i(t_2,t_2)- \sum_{v} i_v(t_2,t_2) \log N_v,$$ where the local term $$ i_v(t_2,t_2)=\barint_{C_U} i_{\bar v}(tt_2,tt_2) dt. $$ Here the term $i_{\bar v}$ is defined in \cite[\S8.2]{YZ} by case-by-case formulas according to the type of the place $v$, which extends the notion of the $i_{\bar v}$-part in \S\ref{sec admissible} to the current setting of non-proper intersection. \item[(2)] The pseudo-theta series \begin{eqnarray*} \Omega_\phi(g,(t_1,t_2)) = \sumu \sum_{y \in E\cross } r(g,(t_1,t_2))\phi(y,u). \end{eqnarray*} \item[(3)] For any place $v$ nonsplit in $E$, \begin{eqnarray*} \CM^{(v)}_{\phi}(g,(t_1,t_2)) &=&w_U \sum_{a\in F\cross} \quasilim \sum_{y \in \mu_U \backslash (B_+\cross-E\cross)} r(g,(t_1,t_2))\phi(y)_a m_s(y), \quad v|\infty, \\ \CM^{(v)}_\phi(g,(t_1,t_2)) &=&\sum_{u\in \mu_U^2\bs F\cross}\sum _{y\in B-E} r(g,(t_1,t_2))\phi^v (y, u)\ m_{r(g,(t_1,t_2))\phi _v}(y, u),\quad v\nmid \infty. \end{eqnarray*} Here $m_s(y)$ is introduced in \cite[\S8.2]{YZ}, and $m_{\phi _v}(y, u)$ is introduced in \cite[Proposition 8.3]{YZ} (and originally in \cite[\S8.2, Notation 8.3]{YZZ}). \item[(4)] For any non-archimedean $v$, \begin{eqnarray*} \CN^{(v)}_\phi(g,(t_1,t_2)) &=&\sum_{u\in \mu_U^2\bs F\cross}\sum _{y \in E\cross} r(g,(t_1,t_2))\phi^v (y, u)\ r(t_1,t_2)n_{r(g)\phi _v}(y, u). \end{eqnarray*} Here $n_{\phi _v}(y, u)$ is introduced in \cite[Proposition 8.3]{YZ} for $v$ nonsplit in $E$ and in \cite[Proposition 8.5]{YZ} for $v$ split in $E$. \end{itemize} \end{thm} \begin{proof} This is computed in \cite[Theorem 8.6]{YZ}, except that we will have an extra term coming from the weak admissibility. In fact, we first write $$\pair{Z_*(g) t_1, t_2}=-i({Z_*(g) t_1, t_2})-j({Z_*(g) t_1, t_2}).$$ Then we write $$ j({Z_*(g) t_1, t_2}) =\sum_v j_v({Z_*(g) t_1, t_2}), $$ where the sum is over all places $v$ of $F$ instead of just non-archimedean places. The extra terms are $j_v({Z_*(g) t_1, t_2})$ for archimedean $v$, while the other terms are computed in the proof of \cite[Theorem 8.6]{YZ}. If $v$ is archimedean, by definition $$ j_v({Z_*(g) t_1, t_2}) =\int_{X_{U,v}(\CC)} g(t_2,\cdot) c_1((Z_*(g) t_1)^\wedge). $$ Note that only the part of $c_1((Z_*(g) t_1)^\wedge)$ supported on the connected components of $t_2$ contributes to the integral. Recall the terminology for the connected components of $Z_*(g)$ in \cite[\S4.3.1]{YZZ}. Then we only need to consider the component $Z_*(g)_{q(t_1^{-1}t_2)}$ of $Z_*(g)$. By the weak admissibility, $$ c_1((Z_*(g)_{q(t_1^{-1}t_2)} t_1)^\wedge) =\deg(Z_*(g)_{q(t_1^{-1}t_2)} )\, c_1(\hat \xi_{t_2}). $$ By \cite[Proposition 4.2]{YZZ}, $$ \deg(Z_*(g)_{q(t_1^{-1}t_2)} ) =-\frac12 \kappa_U^\circ E_*(0,g,r(t_1,t_2)\phi). $$ It follows that $$ j_v({Z_*(g) t_1, t_2}) =-\frac12 \kappa_U^\circ E_*(0,g,r(t_1,t_2)\phi)\int_{X_{U,v}(\CC)} g(t_2,\cdot) c_1(\hat \xi_{t_2}). $$ By Lemma \ref{int of Green}, $$ \int_{X_{U,v}(\BC)} g(t_2,\cdot) c_1(\ol\CL_U)=-1. $$ Hence, $$ j_v({Z_*(g) t_1, t_2}) =\frac12 E_*(0,g,r(t_1,t_2)\phi). $$ This finishes the proof. \end{proof} \begin{remark} \label{erratum} The extra term $\ds -\frac12 [F:\QQ] E_*(0,g,r(t_1,t_2)\phi)$ in Theorem \ref{height series} appears due to the weak admissibility. This term is a priori missed in \cite{YZZ,YZ}. However, it does not affect the main results of \cite{YZZ,YZ}, since both articles assume \cite[Assumption 5.4]{YZZ}, under which the extra term vanishes. \end{remark} \subsection{Comparison at archimedean places} Assume the basic conditions in \S\ref{choices}. Let $v$ be an archimedean place of $F$. Recall that in Theorem \ref{height series}, $Z(g,(t_1,t_2))_U$ has a $v$-component \begin{eqnarray*} \CM^{(v)}_{\phi}(g,(t_1,t_2)) =w_U \sum_{a\in F\cross} \quasilim \sum_{y \in \mu_U \backslash (B_+\cross-E\cross)} r(g,(t_1,t_2))\phi(y)_a m_s(y). \end{eqnarray*} On the other hand, recall that in Theorem \ref{analytic series}, $\pr' I'(0,g,\phi)_U$ has a $v$-component $$ \overline \CK^{(v)}_{\phi}(g,(t_1,t_2)) = w_U \sum_{a\in F\cross} \quasilim \sum_{y\in \mu_U\backslash (B(v)_+\cross -E\cross)} r(g,(t_1,t_2)) \phi(y)_a\ k_{v, s}(y), $$ where $$ k_{v, s}(y) = \frac{\Gamma(s+1)}{2(4\pi)^{s}} \int_1^{\infty} \frac{1}{t(1-\lambda(y)t)^{s+1}} dt, \qquad \lambda(y)=q(y_2)/q(y). $$ The goal of this subsection is to compute their difference. The final result is as follows. \begin{prop} \label{archimedean comparison} For any $t_1,t_2\in C_U$, $$ \CK^{(v)}_{\phi}(g,(t_1,t_2))-\overline \CM^{(v)}_{\phi}(g,(t_1,t_2)) =\frac12 (\gamma+\log(4\pi)-1) E_*(0,g,r(t_1,t_2)\phi). $$ Here $\gamma$ is Euler's constant. \end{prop} \begin{proof} This is computed as in \cite[Proposition 8.1]{YZZ}, but we need some extra work to take care of contribution from the residue of the Green's function, which is missed in the loc. cit.. As in the calculation of Gross--Zagier \cite{GZ}, $$\int_1^{\infty} \frac{1}{t(1-\lambda t)^{s+1}}dt= \frac{2\Gamma(2s+2)}{\Gamma(s+1)\Gamma(s+2)}Q_s(1-2\lambda)+O_s(|\lambda|^{-s-2}).$$ Moreover, the error term $O_s(|\lambda|^{-s-2})$ vanishes at $s=0$. This is a combination of the equations in the first line and in the 12th line of \cite[p. 304]{GZ}, by noting that the left-hand sides of those two equations are equal. It follows that $$ k_{v, s}(y) = \frac{\Gamma(2s+2)}{(4\pi)^{s}\Gamma(s+2)}Q_s(1-2\lambda(y))+O_s(|\lambda(y)|^{-s-2}). $$ Denote \begin{eqnarray*} \CM^{(v)}_{\phi}(s,g,(t_1,t_2)) =w_U \sum_{a\in F\cross} \sum_{y \in \mu_U \backslash (B_+\cross-E\cross)} r(g,(t_1,t_2))\phi(y)_a m_s(y). \end{eqnarray*} Then we have $$ \overline \CM^{(v)}_{\phi}(g,(t_1,t_2)) =\quasilim \overline \CM^{(v)}_{\phi}(s,g,(t_1,t_2)) $$ and $$ \overline \CK^{(v)}_{\phi}(g,(t_1,t_2)) = \quasilim \frac{\Gamma(2s+2)}{(4\pi)^{s}\Gamma(s+2)} \overline \CM^{(v)}_{\phi}(s,g,(t_1,t_2)). $$ Then $$ \overline \CK^{(v)}_{\phi}(g,(t_1,t_2)) -\overline \CM^{(v)}_{\phi}(g,(t_1,t_2)) = \quasilim \left(\frac{\Gamma(2s+2)}{(4\pi)^{s}\Gamma(s+2)}-1\right) \overline \CM^{(v)}_{\phi}(s,g,(t_1,t_2)). $$ Note that $\ds\frac{\Gamma(2s+2)}{(4\pi)^{s}\Gamma(s+2)}-1$ vanishes at $s=0$, and its derivative at $s=0$ is given by $$ \Gamma'(2)-\log(4\pi)=1-\gamma-\log(4\pi). $$ We will see that the series $\overline \CM^{(v)}_{\phi}(s,g,(t_1,t_2))$ has a simple pole at $s=0$, coming from the pole of $g_s$ as in Lemma \ref{int of Green}. Hence, $$ \overline \CK^{(v)}_{\phi}(g,(t_1,t_2)) -\overline \CM^{(v)}_{\phi}(g,(t_1,t_2)) = (1-\gamma-\log(4\pi))\, \Res_{s=0} \overline \CM^{(v)}_{\phi}(s,g,(t_1,t_2)). $$ We can see the simple pole of $\overline \CM^{(v)}_{\phi}(s,g,(t_1,t_2))$ and compute the residue as follows. By a simple transformation as in \cite[Proposition 8.1]{YZZ}, we have \begin{eqnarray*} \CM^{(v)}_{\phi}(s,g,(t_1,t_2)) =\sum_{a\in F^\times} \sum_{x\in \bb_f^\times/U} r(g)\phi(x)_a\ g_s(t_1x,t_2) =g_s(Z_*(g,\phi)t_1, t_2). \end{eqnarray*} By Lemma \ref{int of Green}, we have \begin{eqnarray*} \Res_{s=0} \CM^{(v)}_{\phi}(s,g,(t_1,t_2)) =\frac{1}{\kappa_U^\circ} \deg(Z_*(g,\phi)_{q(t_1^{-1}t_2)}). \end{eqnarray*} Here $Z_*(g,\phi)_{q(t_1^{-1}t_2)} t_1$ is the part of $Z_*(g,\phi) t_1$ that lies in the same connected component as $t_2$. See \cite[\S4.3.1]{YZZ} for the connected components of $Z_*(g,\phi)$. In particular, \cite[Proposition 4.2]{YZZ} gives $$ \deg(Z_*(g,\phi)_{q(t_1^{-1}t_2)} ) =-\frac12 \kappa_U^\circ E_*(0,g,r(t_1,t_2)\phi). $$ This finishes the proof. \end{proof} \begin{remark} \label{erratum2} Note that \cite[Proposition 8.1]{YZZ} asserts $\CM^{(v)}_{\phi}(g,(t_1,t_2))=\overline \CK^{(v)}_{\phi}(g,(t_1,t_2))$, which is a priori wrong by Proposition \ref{archimedean comparison}. However, it holds under \cite[Assumption 5.4]{YZZ}, so the correction does not affect the main results of \cite{YZZ,YZ}. The situation is similar to Remark \ref{erratum}. \end{remark} \subsection{The $j$-part from bad reduction} Assume only the basic conditions in \S\ref{choices}. If $v$ is a non-archimedean place of $F$ split in $\BB$, then the $j$-part $j_{v}(Z_*(g,\phi)t_1,t_2)=0$ automatically. This is a trivial consequence of the fact that every connected component of the fiber $\CX_U$ above $v$ is integral, since it is the quotient of an integral model smooth above $v$. Let $v$ be a non-archimedean place nonsplit in $\BB$ (and thus inert in $E$). The $j$-part $j_{v}(Z_*(g,\phi)t_1,t_2)$ is treated briefly in \cite{YZZ} and \cite[Lemma 8.9]{YZ}. For the purpose here, we need some precise information. Note that $U_v$ is maximal by the basic conditions. We further assume that $\phi_v=1_{O_\bv^\times\times\ofv\cross}$, which is part of the restrictive conditions in \S\ref{choices}, but we do not assume that $U$ is maximal. This gives us flexibility to vary $U$, which is essential in our proof of the following result. \begin{prop} \label{averaged j} Let $v$ be a non-archimedean place nonsplit in $\BB$ and inert in $E$. Then the $j$-part $j_{\bar v}(Z_*(g,\phi)_Ut_1,t_2)$ is a non-singular pseudo-theta series of the form $$ \sum_{u\in \mu_U^2\backslash F\cross} \sum_{y\in B(v)-\{0\}} r(g,(t_1,t_2)) \phi^v(y,u)\ r(t_1,t_2)l_{r(g)\phi_v}(y,u). $$ Furthermore, $$ \int_{B(v)_v} l_{\phi_v}(y,u)dy=\frac14 |d_v|^2N_v^{-1} (1-N_v^{-1})^2 \cdot 1_{O_{F_v}^\times}(u), $$ and thus $$ r(w)l_{\phi_v}(0,u)=- \frac{N_v-1}{4(N_v+1)} r(w)\phi_v(0,u). $$ \end{prop} Note that the first part of the proposition is exactly \cite[Lemma 8.9]{YZ}. The goal of this subsection is to prove the proposition. The idea is still to compute the intersection numbers in terms of the $p$-adic uniformization, but to make a clear picture of the quotient process, we will pass to sufficiently small level structure. For simplicity, we denote by $B=B(v)$ the nearby quaternion algebra in the following. Recall that \S\ref{choices} chooses an isomorphism $B(v)_{\AA^v}\simeq \BB^v$ and an embedding $E\to B(v)$, which are compatible with the embedding $E_\AA\to \BB$. Here we further fix an isomorphism $B(v)_v\simeq M_2(F_v)$. \subsubsection*{Pass to smaller level} Let $U'$ be an open compact subgroup of $U$ with $U'_w=U_w=O_{\BB_w}^\times$ for all non-archimedean places $w$ nonsplit in $\BB$. We first check that Proposition \ref{averaged j} for $U'$ implies that for $U$. This essentially follows from the local nature and the projection formula of the $j$-part as explained in \S\ref{sec admissible}. Denote by $\pi:X_{U'}\to X_{U}$ the natural map, and denote by $\tilde\pi:\CX_{U'}\to \CX_{U}$ the induced map between the integral models over $O_F$. Note the compatibility $\tilde\pi^*\ol\CL_U=\ol\CL_{U'}.$ By the projection formula explained in \S\ref{sec admissible}, we have $$j_{\bar v}\big( \pi^*\big(Z_*(g,\phi)_U [t_1]_U\big),[t_2]_{U'}\big) =j_{\bar v}(Z_*(g,\phi)_U[t_1]_U,[t_2]_U).$$ Here we have used the easy fact $\pi_*([t_2]_{U'})=[t_2]_U$. We claim that $$ \pi^*\big(Z_*(g,\phi)_U [t_1]_U\big)=[\mu_{U'}^2:\mu_U^2]\cdot Z_*(g,\phi)_{U'} [t_1]_{U'} $$ as a divisor on $X_{U'}$. In fact, by \cite[Lemma 3.2]{YZZ}, $$ \pi^*\big(Z_*(g,\phi)_U [t_1]_U\big)=\big((\pi\times\pi)^*Z_*(g,\phi)_{U}\big) [t_1]_{U'}. $$ By \cite[Lemma 3.18]{YZZ}, $$ (\pi\times\pi)^*Z_*(g,\phi)_{U}=[\mu_{U}^2:\mu_{U'}^2]^{-1} Z_*(g,\phi)_{U'}. $$ This gives the claim. As a consequence, the projection formula gives $$ j_{\bar v}\big( Z_*(g,\phi)_{U'} [t_1]_{U'},[t_2]_{U'}\big) =[\mu_{U}^2:\mu_{U'}^2] \, j_{\bar v}\big( Z_*(g,\phi)_{U} [t_1]_U,[t_2]_U\big).$$ Assume that the proposition holds for $U'$. Then we have $$j_{\bar v}\big( Z_*(g,\phi)_{U'} [t_1]_{U'},[t_2]_{U'}\big) =\sum_{u\in \mu_{U'}^2\backslash F\cross} \sum_{y\in B-\{0\}} r(g,(t_1,t_2)) \phi^v(y,u)\ r(t_1,t_2)l_{r(g)\phi_v}(y,u)_{U'}. $$ Here the last subscript of $l_{r(g)\phi_v}(y,u)_{U'}$ indicates its dependence on $U'$. We assume the slight extra condition that $l_{r(g)\phi_v}(y,u)_{U'}$ as a function of $u$ is invariant under the action of $\ofv\cross$, which will be seen later by its precise expression when $U'$ is sufficiently small. Then the proposition holds for $U$ by setting $$l_{r(g)\phi_v}(y,u)_{U}=l_{r(g)\phi_v}(y,u)_{U'}.$$ \subsubsection*{Sufficiently small level} As in \S\ref{sec shimura curve}, for any positive integer $N$, denote by $U(N)=(1+N O_{\BB_f})^\times$ the open compact subgroup of $O_{\BB_f}^\times$. We say that $U$ (with $U_w$ maximal for all $w\in\Sigma_f$) is \emph{sufficiently small} if $U\subset U(N)$ for some integer $N\geq 3$ coprime to the places in $\Sigma_f$. Denote by $k_v$ the residue field of $v$, and $\bar k_v$ its algebraic closure. Denote by $\CX_{U, \bar k_v}=\CX_U\times_{O_F} \bar k_v$ the geometric special fiber. The goal here is to prove that if $U$ is {sufficiently small}, then $\CX_{U, \bar k_v}$ is a (reduced) semistable curve over the residue field $\bar k_v$, in which every irreducible component $C$ is isomorphic to $\BP^1_{\bar k_v}$, and intersects exactly $N_v+1$ other irreducible components respectively at $N_v+1$ distinct points of $C(\bar k_v)$. Moreover, the set of irreducible components can be written as a disjoint union $S_0\cup S_1$ of two subsets, such that any two distinct components in the same subset do not intersect. The proof is an application of the $p$-adic uniformization of \v Cerednik--Drinfe'ld (cf. \cite{BC}) over $\QQ$ and that of Boutot--Zink \cite{BZ} over a totally real field. Recall that the uniformization gives an isomorphism $$\wh\CX_{{U}}\times_{\Spf\, O_{F_v}} \Spf\, O_{F_v^\ur} =B^{\times}\bs (\wh\Omega \times_{\Spf\, O_{F_v}} \Spf\, O_{F_v^\ur})\times \BB_f\cross/{U}.$$ Here $\wh\CX_{U}$ denotes the formal completion of $\CX_{U}$ along the special fiber above $v$, $F_v^\ur$ denotes the completion of the maximal unramified extension of $F_v$, and $\wh \Omega$ is Deligne's integral model of Drinfe'ld (rigid-analytic) upper half plane $\Omega$ over $O_{F_v}$. The group $B_v\cross\cong \GL _2(F_v)$ acts on $\wh\Omega$ by the fractional linear transformation, and on $\bb_v^\times/U_v\cong \ZZ$ via translation by $v\circ q=v\circ\det$. Denote a subgroup $$ B^\dagger=\{\gamma\in B: v(q(\gamma)) =0\}. $$ Then the uniformization is equivalent to $$\wh\CX_{U,O_{F_v^\ur}} =B^\dagger\bs \wh\Omega_{O_{F_v^\ur}}\times (\BB_f^v)\cross/{U^v} =(\mu_{U}\bs B^\dagger)\bs \wh\Omega_{O_{F_v^\ur}}\times (\BB_f^v)\cross/{U^v}.$$ Here we denote $\wh\CX_{U, O_{F_v^\ur}}=\wh\CX_{{U}}\times_{\Spf\, O_{F_v}} \Spf\, O_{F_v^\ur}$ and $\wh\Omega_{O_{F_v^\ur}}=\wh\Omega \times_{\Spf\, O_{F_v}} \Spf\, O_{F_v^\ur}$. Note that $\wh\Omega_{O_{F_v^\ur}}\times (\BB_f^v)\cross/{U^v}$ is a disjoint union of countably many copies of $\wh\Omega_{O_{F_v^\ur}}$. Recall that the dual graph of the special fiber (or equivalently the underlying scheme) of $\wh\Omega$ is just the Bruhat--Tits tree of $\gl$ over $F_v$, whose vertices are homethety classes of lattices in $F_v^2$. Choose an irreducible component $C_0\simeq \BP^1$ of the special fiber of $\wh \Omega$. Apply the transitive action of $\gl(F_v)$ on the set of all irreducible components. Note that the stabilizer of $\gl(F_v)$ on $C_0$ is $F_v^\times \gl(\ofv)$. It follows that the irreducible components of the special fiber of $\wh \Omega$ are indexed by $\gl(F_v)/F_v^\times \gl(\ofv).$ Denote by $\wt S_0$ (resp. $\wt S_1$) the set of irreducible components of the special fiber of $\wh\Omega_{O_{F_v^\ur}}\times (\BB_f^v)\cross/{U^v}$ represented by $\alpha_v F_v^\times \gl(\ofv) \times \beta^v U^v$ such that $2|v(q(\alpha_v))$ (resp. $2\nmid v(q(\alpha_v))$). We say that two irreducible components $C_1, C_2$ have the \emph{same parity}, if they belong to the same $\wt S_i$ for some $i=0,1$; otherwise, we say that they have \emph{different parities}. By definition, any $\gamma\in B^\dagger$ satisfies $v(q(\gamma))=0$, so the action of $B^\dagger$ stabilizes $\wt S_0$ and $\wt S_1$ respectively. In other words, the action does not change the parity of an irreducible component. Denote by $S_0$ and $S_1$ respectively the set of irreducible components of the special fiber of $\CX_{U, O_{F_v}^\ur}$ coming from $\wt S_0$ and $\wt S_1$ respectively via the quotient process. Then $S_0$ and $S_1$ are still disjoint. So we can also talk about parities of irreducible components of the special fiber of $\CX_{U, O_{F_v}^\ur}$. By the property of Bruhat--Tits tree, if $C$ and $C'$ intersect, then they correspond to adjacent lattices of $F_v^2$, so $C$ and $C'$ should have different parities. In other words, components of the same parity do not intersect. In fact, for two adjacent lattices $\Lambda, \Lambda'$ of $F_v^2$ (corresponding to $C$ and $C'$), the relation $p_v \Lambda\subset \Lambda' \subset \Lambda$ implies that $v(\det(\Lambda))$ and $v(\det(\Lambda'))$ have different parities. Here $\det(\Lambda)$ denotes the transition matrix of the determinant of an $O_{F_v}$-basis of $\Lambda$ to the standard basis of $F_v^2$. Return to the uniformization $$\wh\CX_{U,O_{F_v^\ur}} =(\mu_{U}\bs B^\dagger)\bs \wh\Omega_{O_{F_v^\ur}}\times (\BB_f^v)\cross/{U^v}.$$ Then the irreducible components of the special fiber of $\CX_{U,O_{F_v^\ur}}$ are indexed by $$ (\mu_{U}\bs B^\dagger)\bs B_{\af}\cross/(F_v^\times \gl(\ofv) U^v). $$ We claim that for sufficiently small $U^v$, the action of $\mu_{U}\bs B^\dagger$ on $B_{\af}\cross/(F_v^\times \gl(\ofv) U^v)$ is free. To prove the claim, take $b\in B_{\af}\cross$, and assume that $\gamma\in B^\dagger$ stabilizes $b(F_v^\times \gl(\ofv) U^v)$, and we need to prove $\gamma\in \mu_{U}$. The stabilizing condition gives $$ \gamma b(F_v^\times \gl(\ofv) U^v) =b(F_v^\times \gl(\ofv) U^v), \quad \gamma \in b(F_v^\times \gl(\ofv) U^v)b^{-1}. $$ By the definition of $B^\dagger$, we have $v(q(\gamma))=0$, so $$\gamma \in b(\gl(\ofv) U^v)b^{-1}.$$ It suffices to prove $\gamma \in F^\times$. The proof is very similar to that in \cite[Proposition 4.1]{YZ}, but we reproduce it here for convenience of readers. Assume the contrary that $\gamma\notin F^\times$. Then $E'=F(\gamma)=F+F\gamma$ is a quadratic CM extension of $F$ contained in $B$. Moreover, $\gamma$ lies in $b(\gl(\ofv) U^v)b^{-1}\cap E_{\af}'^\times$, which is an open and compact subgroup of $E_{\af}'^\times$. Note that $\wh O_{E'}^\times$ is the unique maximal open compact subgroup of $E_{\af}'^\times$. It follows that $\gamma\in \wh O_{E'}^\times$, and thus $\gamma\in O_{E'}^\times$ is a unit. On the other hand, by the assumption $U\subset U(N)$, we have $1-\gamma\in N b(M_2(\ofv) O_{\BB^v})b^{-1}$. The intersection $b(M_2(\ofv) O_{\BB^v})b^{-1}\cap E_{\af}'$ is a compact subring of $E_{\af}'$, so it is contained in the unique maximal compact subring $\wh O_{E'}$ of $E_{\af}'$. It follows that $1-\gamma$ lies in $N\wh O_{E'}\cap E'=N O_{E'}$. With the condition $\gamma\in O_{E'}^\times$ and $\gamma\in 1+ N O_{E'}$, it is easy to have a contradiction. In fact, the element $\zeta=\gamma/ \bar\gamma$ lies in $O_F^\times$ and has absolute value 1 at all archimedean places, so it must be a root of unity. By $\gamma\in 1+ N O_{E'}$, we have $\zeta\in 1+N O_{E'}$ and $(1-\zeta)/N\in O_{E'}$. Then $(1-\zeta)/N\in \ZZ[\zeta]$ since $\ZZ[\zeta]$ is integrally closed. It follows that we have $\ZZ[\zeta]/N \ZZ[\zeta] \simeq \ZZ/N\ZZ$. Note that $\ZZ[\zeta]$ is a free $\ZZ$-module, so it has rank 1 over $\ZZ$. It follows that $\zeta=\pm 1$. As $\zeta\in 1+N O_{E'}$, we have $\zeta=1$ and $\gamma\in F^\times.$ In summary, we have just proved that for sufficiently small $U$, the action of $\mu_{U}\bs B^\dagger$ on the set of irreducible components of the special fiber of $\wh\Omega_{O_{F_v^\ur}}\times (\BB_f^v)\cross/{U^v}$ is free. Now we prove that for any closed point $x$ on the special fiber, the action of $\mu_{U}\bs B^\dagger$ is free at $x$. If $x$ lies on a single irreducible component $C$, then the stabilizer of $x$ is contained in the stabilizer of $C$ and thus must be trivial. If $x$ is a node lying in two distinct irreducible components $C, C'$, assume that $\gamma\in \mu_{U}\bs B^\dagger$ is an element fixing $x$, and we need to prove $\gamma=1$. Note that the action of $\gamma$ either switches $C$ and $C'$, or stabilizes each of $C$ and $C'$. As $C$ and $C'$ intersect, they must have different parities. Then $\gamma$ cannot switch them because $\gamma$ does not change parities. As a consequence $\gamma$ stabilizes each of $C$ and $C'$. Then $\gamma=1$ as proved above. Therefore, the action of $\mu_{U}\bs B^\dagger$ on the special fiber of $\wh\Omega_{O_{F_v^\ur}}\times (\BB_f^v)\cross/{U^v}$ is free. Then the quotient map is actually \'etale, and any irreducible component $C$ is mapped birationally to its image $\overline C$ in the special fiber of $\CX_{U,O_{F_v^\ur}}$. To prove that $C\to \overline C$ is actually an isomorphism, we need to further check that the nodes of $C$ is injective into $\overline C$. In other words, we need to prove that there is no $\gamma\in \mu_{U}\bs B^\dagger$ mapping a node $x_1\in C$ to another node $x_2\in C$. Assume that $\gamma$ exists. Denote by $C_1$ (resp. $C_2$) the irreducible component other than $C$ passing through $x_1$ (resp. $x_2$). By the intersection configuration, $C_1$ and $C_2$ have the same parity, which is different from that of $C$. Note $\gamma$ maps the set $\{C, C_1\}$ to the set $\{C, C_2\}$. It follows that $\gamma$ maps $C$ to $C$, as it does not change parity. Then $\gamma$ is trivial again, and we have a contradiction. Via the quotient process, we conclude that the special fiber $\CX_{U, \bar k_v}$ of $\CX_{U,O_{F_v^\ur}}$ is a semistable curve over $\bar k_v$, in which every irreducible component $C$ is isomorphic to $\BP^1_{k_v}$, and intersects exactly $N_v+1$ other irreducible components respectively at $N_v+1$ points in $C$. Moreover, the set of irreducible components can be written as a disjoint union $S_0\cup S_1$ of two subsets, such that any two distinct components in the same subset do not intersect. In the following, we assume that $U$ is sufficiently small, so that the quotient process is free, and the special fiber has the above nice properties. \subsubsection*{Pass to the uniformization} Note that the first part of the proposition is exactly \cite[Lemma 8.9]{YZ}. In the following, we first recall the formula of $l_{\phi_v}(g,y,u)$ in \cite[Lemma 8.9]{YZ}, and then compute its average by a more careful analysis of the $p$-adic uniformization. By the basic conditions in \S\ref{choices}, $O_{E_v}^\times\subset O_{\BB_v}\cross$, so the CM point $[t]_U$ for any $t\in E_{\af}^\times$ is defined over $F_v^\ur$. Therefore, to compute $j_{\overline v}$, by choosing an embedding $F_v^\ur\to (\ol F)_{\bar v}$, it suffices to compute it over the integral model $\CX_{U,O_{F_v^\ur}}$. Note that the $j$-part can be defined locally as in \S\ref{sec admissible}. Recall the uniformization $$\wh\CX_{U, O_{F_v^\ur}} =B^{\times}\bs \wh\Omega_{O_{F_v^\ur}} \times \BB_f\cross/U.$$ By definition, $$j_{\overline v}(Z_*(g)t_1, t_2)= \overline{Z_*(g)t_1}\cdot V_{t_2}.$$ Here $\overline{Z_*(g)t_1}$ is the Zariski closure in $\CX_{U,O_{F_v^\ur}}$, and $V_{t_2}$ is the unique vertical divisor on $\CX_{U,O_{F_v^\ur}}$, supported on the geometrically connected component of $t_2$ in $\CX_{U,O_{F_v^\ur}}$, satisfying the following properties: \begin{itemize} \item[(1)] $(V_{t_2}+\bar t_2)\cdot C=\hat\xi \cdot C$ for any vertical divisor $C$ of $\CX_{U,O_{F_v^\ur}}$; \item[(2)] $V_{t_2} \cdot \hat\xi =0$. \end{itemize} Write $V_{1}=\sum_i a_i W_i$ (for $t_2=1$), where $\{W_i\}_i$ is the set of irreducible components of the special fiber of $\CX_{U,O_{F_v^\ur}}$ lying in the same connected component as $1$. Let $\wt W_i$ be an irreducible component of the special fiber of $\wh\Omega _{O_{F_v^\ur}}$ lifting $W_i$. Write $\wt V=\sum_i a_i \wt W_i$, viewed as a vertical divisor of $\wh\Omega_{O_{F_v^\ur}}$. Via the $p$-adic uniformization, the proof of \cite[Lemma 8.9]{YZ} actually gives a non-singular pseudo-theta series $$j_{\overline v}(Z_*(g)t_1, t_2)= \sumu \sum_{\gamma \in B^\times} r(g,(t_1,t_2))\phi^v(\gamma, u)\ r(t_1,t_2) l_{\phi_v}(g,\gamma,u), $$ where for $g\in \gl(F_v), \ \gamma\in B_v^\times,\ u\in F_v^\times$, $$l_{\phi_v}(g,\gamma,u)= \sum_{x \in\bb_v^\times /U_v} r(g)\phi_v(x, uq(\gamma)/q(x)) 1_{O_{F_v}^\times}(q(x)/q(\gamma))\ (\gamma^{-1} z_0\cdot \wt V). $$ Here $z_0\in \Omega (E_v)$ is a point in $\Omega (\BC_v)$ fixed by $E_v^\times$, viewed as a point of $\Omega (F_v^\ur)$ via an embedding $E_v\to F_v^\ur$ fixed by us once for all. Moreover, we also have $$l_{\phi_v}(1,\gamma,u)= (\gamma^{-1}\bar z_0\cdot \wt V)\cdot 1_{O_{F_v}^\times}(q(\gamma)) \cdot 1_{O_{F_v}^\times}(u).$$ Here $\bar z_0$ denotes the section of $\wh\Omega_{O_{F_v^\ur}}$ corresponding to $z_0$. The non-singularity of the pseudo-theta series is implied by the fact that $l_{\phi_v}(1,\gamma,u)$ is actually a compactly-supported function of $(\gamma, u)\in B_v^\times \times F_v^\times$. To make the right-hand nonzero, assume that $u\in O_{F_v}^\times$ in the following. Note that the proof of \cite[Lemma 8.9]{YZ} claims that there is a unique point of $\Omega (F_v^\ur)$ fixed by $E_v^\times$. This is wrong. In fact, there are exactly two points of $\Omega (\BC_v)=\CC_v-F_v$ fixed by $E_v^\times$, both of which are defined over $E_v$. To see this, by the Noether--Skolem theorem, the embedding $E_v\to M_2(F_v)$ is a conjugate of the embedding $$ a+b\sqrt{D_v} \longmapsto \matrixx{a}{bD_v}{b}{a}, $$ where $D_v\in F_v^\times$ is the relative discriminant of $E_v$ over $F_v$. This explicit case gives fixed points $\pm\sqrt{D_v}$. This mistake of the loc. cit. does not affect the other results, since any fixed point gives the same results for our purpose. \subsubsection*{Compute the average} Let $\widetilde W^0$ to be the unique irreducible component of the special fiber of $\wh \Omega_{O_{F_v}^\ur}$ intersecting the section $\bar z_0$. Recall that the irreducible components of the special fiber of $\wh \Omega_{O_{F_v}^\ur}$ are indexed by $\gl(F_v)/F_v^\times \gl(\ofv).$ We can further adjust the indexing such that $\widetilde W^0$ corresponds to the trivial coset. Denote by $\alpha_i F_v^\times \gl(\ofv)$ the coset representing the component $\wt W_i$. Then we have $$ (\gamma^{-1} z_0\cdot \wt W_i) =1_{\alpha_iF_v^\times \gl(\ofv)}(\gamma^{-1}) =1_{F_v^\times \gl(\ofv) \alpha_i^{-1}}(\gamma). $$ By $\wt V=\sum_i a_i \wt W_i$, we have $$\int_{B_v} l_{\phi_v}(1,\gamma,u) d\gamma = \sum_i a_i \int_{B_v^\dagger} (\gamma^{-1} z_0\cdot \wt W_i) d\gamma =\sum_i a_i\ \vol(B_v^\dagger\cap F_v^\times \gl(\ofv)\alpha_i^{-1}).$$ Here $$ B_v^\dagger=\{\gamma\in B_{v}: q(\gamma) \in O_{F_v}^\times\}. $$ For any $\alpha\in B_v^\times$, it is easy to have $$B_v^\dagger\cap F_v^\times \gl(\ofv) \alpha^{-1} =\begin{cases} \emptyset, & 2\nmid v(q(\alpha)); \\ \gl(\ofv) \varpi_v^{v(q(\alpha))/2} \alpha^{-1}, & 2\mid v(q(\alpha)). \end{cases}$$ Here $\varpi_v$ is a uniformizer of $F_v$. Note that the self-dual measure on $B_v=M_2(F_v)$ gives $$\vol(\GL_2(O_{F_v}))=|\GL_2(O_{F_v}/p_v)|\cdot\vol(1+p_vM_2(O_{F_v}))=|d_v|^2(1-N_v^{-1})(1-N_v^{-2}).$$ As a consequence $$\int_{B_v} l_{\phi_v}(1,\gamma,u) d\gamma =|d_v|^2(1-N_v^{-1})(1-N_v^{-2}) \sum_{i:\, 2\mid v(q(\alpha_i))} a_i.$$ Recall that $S_0$ (resp. $S_1$) denotes the set of $i$ such that $2\mid v(q(\alpha_i))$ (resp. $2\nmid v(q(\alpha_i))$). Then $S=S_0\cup S_1$ is the set of all indexes $i$. Denote $$ A_0=\sum_{i\in S_0} a_i, \qquad A_1=\sum_{i\in S_1} a_i. $$ We need to compute $A_0$. We are going to prove the following equation: $$ A_0+A_1=0, \qquad A_0-A_1=\frac{1}{2(N_v+1)}. $$ The relations give $$A_0=\frac{1}{4(N_v+1)}, \qquad \int_{B_v} l_{\phi_v}(1,\gamma,u) d\gamma =\frac14 |d_v|^2N_v^{-1}(1-N_v^{-1})^2. $$ Then the last equality of the proposition follows from Lemma \ref{derivative of intertwining}. It remains to prove the two equations of $A_0$ and $A_1$. We need the following intersection results: \begin{enumerate}[(1)] \item The orders $|S_0|$ and $|S_1|$ are equal. In fact, for any $x_v\in \BB_v\cross$ with $2\nmid v(q(x_v))$, the Hecke correspondence $Z(x_v)_U$ corresponding to $U_vx_vU_v=x_vU_v$ is an automorphism of $\CX_{U,O_{F_v^\ur}}$ and switches $S_0$ with $S_1$. Denote $n=|S_0|=|S_1|$ in the following, so $2n=|S|$. \item $W_i\cdot W_i=-(N_v+1)$ for any $i\in S$. In fact, by the above result (as we have assumed that $U$ is sufficiently small), any irreducible component of the special fiber of $\CX_{U, O_{F_v^\ur}}$ is isomorphic to $\BP^1$, and intersects with exactly $N_v+1$ other components. As a consequence, $$ W_i\cdot W_i=-W_i\cdot \sum_{j\in S, \ j\neq i} W_j=-(N_v+1). $$ \item Fix $r=0,1$. Then $W_i\cdot W_{j}=0$ for $i,j\in S_r$ with $i\neq j$. This is just the above fact that components of the same parity do not intersect. \item $W_i\cdot \hat\xi=1/(2n)$ for any $i\in S$. Note that $\hat\xi \cdot\sum_{i\in S} W_i=1$, since $\xi$ has degree one on every connected component of $X_{U,F_v^\ur}$. Then it suffices to prove that $W_i\cdot \hat\xi$ is independent of $i$, or equivalently $W_i\cdot \CL_U$ is independent of $i$. As $U$ is sufficiently small, the complex uniformization for $X_U$ is also a free quotient. This fact is contained in the proof of \cite[Proposition 4.1]{YZ}. Moreover, $X_U$ has no cusp since we are in the case that $v$ is nonsplit in $\BB$. As a consequence, the Hodge bundle $\CL_U$ is just the relative dualizing sheaf $\omega_{\CX_U/O_F}$ of the regular scheme $\CX_U$ over $O_F$. Apply the adjunction formula $$ 2g(W_i)-2= W_i\cdot \CL+W_i\cdot W_i. $$ As $g(W_i)=0$ and $W_i\cdot W_i=-(N_v+1)$. This gives $W_i\cdot \CL=N_v-1$. \end{enumerate} Now we are ready to establish the equations for $A_0$ and $A_1$. By the definition of $V_1=\sum_i a_i W_i$, we have $V_1 \cdot \hat\xi =0$. This is just $A_0+A_1=0$ by (4). On the other hand, the definition of $V_1$ also gives $(\bar 1+V_1)\cdot C=\hat\xi\cdot C$ for any vertical divisor $C$ of $\CX_{U,O_{F_v^\ur}}$. Take $C=\sum_{j\in S_1} W_j$. It does not intersect the Zariski closure of $1$. Furthermore, by (2) and (3), $W_i\cdot C=N_v+1$ for $i\in S_0$ and $W_i\cdot C=-(N_v+1)$ for $i\in S_1$. By (1) and (4), we have $\hat\xi\cdot C=1/2$. Then the identity $(\bar 1+V_1)\cdot C=\hat\xi\cdot C$ becomes $$ \sum_{i\in S_0} a_i (N_v+1) -\sum_{i\in S_1} a_i (N_v+1)=\frac12. $$ This gives our second equation. The proof of Proposition \ref{averaged j} is complete. \subsection{Hecke action on arithmetic Hodge classes} \label{sec 4.3} In last subsection, we have the decomposition $$Z(g, (t_1, t_2)) =\pair{Z_*(g,\phi) t_1, t_2} -\pair{Z_*(g,\phi) \xi_{t_1}, t_2} +\pair{Z_*(g,\phi)\xi_{t_1}, \xi_{t_2}} -\pair{Z_*(g,\phi) t_1, \xi_{t_2}}.$$ We have also considered a decomposition of the first term on the right-hand side. In this subsection we consider the remaining three terms. The treatment here is an enhanced version of \cite[\S7.3]{YZZ}. We still assume all the conditions in \S\ref{choices} in the following, unless otherwise instructed. \subsubsection*{Two easy terms} Recall that $\kappa_U^\circ$ is the degree of $L_U$ on a connected component of $X_{U,\ol F}$. The following result works under the basic conditions of \S\ref{choices}. \begin{pro} \label{geometric series extra1} $$\pair{Z_*(g,\phi) \xi_{t_1}, t_2}=-\frac 12 \kappa_U^\circ\ E_*(0,g,r(t_1,t_2) \phi)_U\cdot \pair{\xi_{t_2}, t_2},$$ $$\pair{Z_*(g,\phi)\xi_{t_1}, \xi_{t_2}}=-\frac 12 \kappa_U^\circ\ E_*(0,g,r(t_1,t_2) \phi)_U\cdot \pair{\xi_{t_2}, \xi_{t_2}}.$$ \end{pro} \begin{proof} We first compute $\pair{Z_*(g,\phi) \xi_{t_1}, t_2}$. By definition, $Z_*(g,\phi) \xi_{t_1}$ is a linear combination of $Z(x) \xi_{t_1}$. By construction, the correspondence $Z(x)$ keeps the canonical bundle up to a multiple under pull-back and push-forward. More precisely, one has $$Z(x) \xi_{t_1} =(\deg Z(x)) \xi_{t_1 x},\quad\forall x\in\bb_f\cross.$$ Note that $\pair{Z(x) \xi_{t_1}, t_2}$ is nonzero only if $\xi_{t_1 x}$ and $t_2$ lie in the same geometrically connected component of $X_U$. It follows that $$\pair{Z_*(g,\phi) \xi_{t_1}, t_2} =\pair{Z_*(g,\phi)_{U,q(t_1^{-1}t_2)} \xi_{t_1}, t_2} =\deg Z_*(g,\phi)_{U,q(t_1^{-1}t_2)} \cdot \pair{\xi_{t_2}, t_2}.$$ Here $Z_*(g,\phi)_{U,q(t_1^{-1}t_2)}$ consists of the $q(t_1^{-1}t_2)$-component of $Z_*(g,\phi)_{U}$ as introduced in \cite[\S 4.2.4]{YZZ}. By \cite[Proposition 4.2]{YZZ}, \begin{eqnarray*} \deg Z(g, \phi)_{U,q(t_1^{-1}t_2)}=-\frac 12 \kappa_U^\circ\ E(0,g,r(t_1,t_2) \phi)_U. \end{eqnarray*} This gives the formula for $\pair{Z_*(g,\phi) \xi_{t_1}, t_2}$. The same method also proves the formula for $\pair{Z_*(g,\phi) \xi_{t_1}, \xi_{t_2}}$. \end{proof} \subsubsection*{Almost eigenvector} It remains to consider $\pair{Z_*(g) t_1, \xi_{t_2}}$. We follow the treatment of \cite[\S7.3.2]{YZZ} with some modification to fit the current setting. For any $x\in \bb_f\cross$, let $\CZ(x)$ be the Zariski closure of $Z(x)$ in $\CX_U\times_{O_F} \CX_U$. Note that $U$ is maximal by assumption. The following are true: \begin{itemize} \item[(1)] $\CZ(x_1)$ commutes with $\CZ(x_2)$ for any $x_1, x_2\in \bb_f\cross$; \item[(2)] $\CZ(x)=\prod_{v\nmid\infty} \CZ(x_v)$ for any $x\in \bb_f\cross$; \item[(3)] for any $x\in \bb_f\cross$, both structure projections from $\CZ(x)$ to $\CX_U$ are finite. \end{itemize} In the proof of Proposition \ref{geometric series extra1}, we already see that $$ Z(x) \xi= (\deg Z(x))\ \xi. $$ In other words, $\xi$ is an eigenvector of $Z(x)$ over $X_U$. For the arithmetic version, we will see that $\hat\xi$ generally fails to be an eigenvector of $\CZ(x)$, but the failure is explicitly computable. Define an arithmetic class $D(x)$ on $\CX_U$ by $$D(x):=\CZ(x) \hat\xi- (\deg Z(x))\ \hat\xi.$$ Then $D(x)$ is a vertical arithmetic $\BQ$-divisor since it is zero on the generic fiber. If $x\in \bb_v^\times$ for some non-archimedean place $v$ nonsplit in $\BB$, then we have $\deg Z(x)=1$ and $D(x)=0$. In fact, since $U_v=O_{\BB_v}^\times$, the double coset $U_vxU_v=xU_v$ is a single coset depending only on $v(q(x))$. As a consequence, $Z(x)$ is just an automorphism of $X_U$, and thus $\CZ(x)$ is an automorphism of $\CX_U$. For any subgroup $U'^v \subset U^v$, we have a similar automorphism on $\CX_{U_vU'^v}$ determined by $x$, and this automorphism does not change the relative dualizing sheaf of $\CX_{U_vU'^v}$. By the compatibility of the arithmetic Hodge bundle with the norm map reviewed in \S\ref{sec shimura curve}, we see that $\CZ(x)$ fixes the arithmetic class $\hat\xi$. If $x\in \bb^\Sigma$, then $D(x)$ is a \emph{constant $\BQ$-divisor}, i.e., the pull-back of an arithmetic $\BQ$-divisor from $\Spec (O_{F'})$, where $F'$ is the algebraic closure of $F$ in the functor field of $X_U$. Note that $F'$ is the abelian extension of $F$ with Galois group $\pi_0(X_{U,\overline F})=F_+^\times\bs \afcross/q(U)$ via the class field theory. See the reason for the constancy of $D(x)$ in \cite[\S7.3.2]{YZZ}. Hence, for all $x\in \bb_f^\times$, $D(x)$ is a constant $\BQ$-divisor, i.e., the pull back of an arithmetic $\BQ$-divisor from $\Spec (O_{F'})$. By abuse of notation, we also denote by $D(x)$ the arithmetic degree of the arithmetic $\BQ$-divisor on $\Spec (O_{F'})$. Hence we get a number $D(x)\in \BR$. It is more convenient to introduce $$ D_0(x):= \frac{1}{\deg Z(x)} D(x). $$ By definition, $D_0(x)$ is \emph{additive} in that $$ D_0(x)=\sum_{v\nmid\infty} D_0(x_v). $$ The sum has only finitely many nonzero terms. Now we have the following basic result. \begin{lem} \label{derivation basic} For any $t\in C_U$, \begin{align*} \pair{Z(x) t, \xi} = \deg Z(x)\, \pair{t, \xi} - \deg Z(x)\, \sum_{v\nmid \infty}D_0(x_v) . \end{align*} \end{lem} \begin{proof} This is a direct consequence of \cite[Lemma 7.7]{YZZ}, which asserts \begin{align*} \pair{Z(x) D, \xi} = \deg Z(x)\, \pair{D, \xi} - \deg(D) D(x), \quad D\in \Div(X_{U,\ol F}), \ x\in \bvcross. \end{align*} There is a gap in the proof of the loc. cit. due to the extra term caused by the weak admissibility, but the conclusion still holds. In fact, the loc. cit. proves that \begin{align*} \pair{\overline{Z(x) D}, \hat\xi} = \deg Z(x)\, \pair{\ol D, \hat\xi} - \deg(D) D(x). \end{align*} On the other hand, by Lemma \ref{int of Green}, $$ \pair{D, \xi}=\pair{\overline{D}, \hat\xi}+(\kappa_U^\circ)^{-1} \deg(D), $$ and $$ \pair{Z(x) D, \xi}=\pair{\overline{Z(x) D}, \hat\xi}+(\kappa_U^\circ)^{-1} \deg Z(x) \deg(D). $$ This implies the original statement. \end{proof} \subsubsection*{The last term} Now we are ready to compute $\pair{Z_*(g,\phi) t_1, \xi_{t_2}}$. \begin{pro} \label{geometric series extra2} \begin{eqnarray*} \pair{Z_*(g,\phi) t_1, \xi_{t_2}} = -\frac 12 \kappa_U^\circ\ E_*(0,g,r(t_1,t_2) \phi)_U\ \pair{[1], \xi} +\frac 12 \sum_{v\notin \Sigma} \CF^{(v)}_\phi(g, (t_1,t_2)), \end{eqnarray*} where $$ \CF^{(v)}_\phi(g, (t_1,t_2)) = \sumu\sum_{a\in F^\times} W_{a}^v(0,g,u,r(t_1,t_2)\phi) \, f_{\phi_v, a}(g,(t_1,t_2),u)$$ with $$ f_{\phi_v, a}(g,(t_1,t_2),u)=(1-N_v^{-2})|d_v|^{\frac32} |au^{-1}|_v\, \kappa_U^\circ \sum_{y\in \bb_v(au^{-1})/U_v^1} r(g, (t_1,t_2))\phi_v(y,u) D_0(t_{1,v}^{-1}y t_{2,v}). $$ Here $\bb_v(a)=\{x\in \bb_v: q(x)=a \}.$ \end{pro} \begin{proof} Denote $t=t_1t_2^{-1}$. We have $$\pair{Z_*(g,\phi) t_1, \xi_{t_2}} =\pair{Z_*(g,\phi)_{q(1/t)} t_1, \xi_{t_2}} =\pair{Z_*(g,\phi)_{q(1/t)} t_1, \xi} =\pair{Z_*(g,\phi)_{q(1/t)} [1], \xi}.$$ Here the first equality holds as in the proof of Proposition \ref{geometric series extra1}, the second equality holds by a similar reason of geometrically connected components, and the third equality holds by the Galois action associated to $t_1$. Recall that from \cite[\S 4.2.4]{YZZ} we have \begin{eqnarray*} Z_*(g)_{q(1/t)} &=& w_U \sum_{u\in \mu_U'\bs F^\times} \sum_{a\in F_+\cross} \sum_{y\in K^t\bs \bb_f(a)} r(g, (t,1))\phi(y,u)Z(t^{-1}y). \end{eqnarray*} Here $$K^t= \GSpin(\BB_f,q)\cap tKt^{-1} = \{(h_1, h_2)\in (tUt^{-1}) \times U: q(h_1)=q(h_2) \}$$ acts on $$\bb_f(a)=\{x\in \bb_f: q(x)=a \}$$ by $(h_1,h_2):x\mapsto h_1xh_2^{-1}$. Hence, Lemma \ref{derivation basic} gives \begin{eqnarray*} \pair{Z_*(g,\phi)_{q(1/t)} [1], \xi} = \deg(Z_*(g,\phi)_{q(1/t)}) \pair{[1], \xi} +\sum_{v\nmid\infty} \CF^{(v)}_\phi(g, (t_1,t_2)), \end{eqnarray*} where $$ \CF^{(v)}_\phi(g, (t_1,t_2)) =-w_U \sum_{u\in \mu_U'\bs F^\times} \sum_{a\in F_+\cross} \sum_{y\in K^t\bs \bb_f(a)} r(g, (t,1))\phi(y,u)\cdot \deg Z(t^{-1}y)\cdot D_0(t_v^{-1}y_v). $$ As in the proof of Proposition \ref{geometric series extra1}, we already have \begin{eqnarray*} \deg Z(g, \phi)_{U,q(t^{-1})}=-\frac 12 \kappa_U^\circ\ E(0,g,r(t_1,t_2) \phi)_U. \end{eqnarray*} It remains to convert the above expression of $\CF^{(v)}_\phi(g, (t_1,t_2))$ to the form in the proposition. Consider the last summation \begin{eqnarray*} \sum_{y\in K^t\bs \bb_f(a)} r(g, (t,1))\phi(y,u) \deg Z(t^{-1}y)\cdot D_0(t_v^{-1}y_v). \end{eqnarray*} By $\deg Z(t^{-1}y)=|Ut^{-1}yU/U|$, the summation is equal to \begin{eqnarray*} \sum_{y\in K^t\bs \bb_f(a)} \ \sum_{x\in Ut^{-1}yU/U} r(g)\phi(x,q(t)u) D_0(x_v). \end{eqnarray*} Note that $$Ut^{-1}yU/U=Kt^{-1}y/U=t^{-1}(tKt^{-1}y/U)=t^{-1}(K^ty/U^1).$$ The summation becomes \begin{eqnarray*} && \sum_{y\in \bb_f(a)/U^1} r(g)\phi(t^{-1}y,q(t)u) D_0(t_v^{-1}y_v) \\ &=& \left( \sum_{y\in \bb_f^v(a)/(U^v)^1} r(g, (t,1))\phi^v(y,u) \right) \cdot \left( \sum_{y_v\in \bb_v(a)/U_v^1} r(g, (t,1))\phi_v(y,u) D_0(t_v^{-1}y_v)\right). \end{eqnarray*} We assume that $v$ is split in $\BB$; otherwise $D_0(t_v^{-1}y_v)=0$ identically. It suffices to convert the first summation on the right-hand side in this case. The proof is similar to the proof of \cite[Proposition 4.2]{YZZ}, except that we do not convert the second summation on the right-hand side. In fact, by \cite[Proposition 2.9]{YZZ}, \begin{eqnarray*} \sum_{y\in \bb_f^v(a)/(U^v)^1} r(g, (t,1))\phi^v(y,u) = -\frac{|a|_v}{\vol((U^v)^1)\vol(\bb_\infty^1)}W_{au}^v(0,g,u,r(h)\phi). \end{eqnarray*} The negative sign comes from the Weil index of $\BB^v$, which is $-1$ since $\BB_v$ is a matrix algebra. Finally, apply equation (4.3.2) in the proof of \cite[Proposition 4.2]{YZZ}. Note that $\vol(U_v^1)=(1-N_v^{-2})|d_v|^{\frac32}$ by the normalization in \cite[\S1.6.2]{YZZ}. It remains to check $$ f_{\phi_v, a}(g,(t_1,t_2),u)=f_{\phi_v, a}(g,(t,1),u). $$ This can be obtained by writing the sum over $\bb_v(au^{-1})/U_v^1$ as an integral over $\bb_v(au^{-1})$. \end{proof} For simplicity, write $$ \CF^{(v)}_\phi(g) = \CF^{(v)}_\phi(g, (1,1)), \quad f_{\phi_v, a}(g,u)=f_{\phi_v, a}(g,(1,1),u), \quad f_{\phi_v, a}(1,u)=f_{\phi_v, a}(1,(1,1),u). $$ \begin{pro} \label{geometric series extra3} For any non-archimedean place $v$ nonsplit in $\BB$, $f_{\phi_v, a}(1, u)\neq 0$ only if $a\in \ofv$ and $u\in\ofv\cross$. In that case, $$ f_{\phi_v, a}(1,u)= |d_v|^{\frac32} \frac{1+N_v^{-1}}{1-N_v^{-1}} \big((r+2)N_v^{-(r+1)}-rN_v^{-(r+2)}-(r+2)N_v^{-1}+r\big) \log N_v. $$ Here $r=v(a)$. \end{pro} \begin{proof} This is essentially computed in \cite{Zh1}. By definition, $$ f_{\phi_v, a}(1,u)=(1-N_v^{-2})|d_v|^{\frac32} |au^{-1}|_v \, \kappa_U^\circ \sum_{y\in \bb_v(au^{-1})/U_v^1} \phi_v(y,u) D_0(y). $$ It is nonzero only if $u\in \ofv\cross$ and $a\in \ofv$, which we assume in the following. Identify $\bb_2=M_2(F_v)$ and $O_{\BB_v}=M_2(\ofv)$. Note $r=v(a)\geq 0$. Denote $$ M_2(\ofv)_r=\{y \in M_2(\ofv)_r: v(\det(y))=r\}. $$ Then the summation equals $$ \sum_{y\in (\bb_v(au^{-1}) \cap O_{\BB_v})/U_v^1} D_0(y) =\sum_{y\in M_2(\ofv)_r/\GL_2(\ofv)} D_0(y) =\sum_{y\in \GL_2(\ofv)\bs M_2(\ofv)_r/\GL_2(\ofv)} D(y). $$ Note that the double coset in the last summation corresponds exactly to the classical Hecke correspondence $T(p_v^r)$. Hence, the above further equals $$ T(p_v^r) \hat\xi- \deg(T(p_v^r)) \hat\xi. $$ Here $\deg(T(p_v^r))=\sigma_1(p_v^r)=1+N_v+\cdots+ N_v^r$. By \cite[Proposition 4.3.2]{Zh1}, $$ T(p_v^r) \overline\CL- \sigma_1(p_v^r)\ \overline\CL = -2\sum_{i=0}^r i N_v^{r-i}\log N_v+ \log (N_v^{r\sigma_1(p_v^r)}) $$ In fact, the proposition considers a morphism $$ T(p_v^r) \CL\lra \CL^{\otimes \deg(T(p_v^r))} $$ and computes the norms of this morphism at non-archimedean places in part 1 and at archimedean places in part 2. Note that the result in part 2 of the proposition should be $N(m)^{\sigma_1(m)}$ instead of $N(m)^{2\sigma_1(m)}$. The sum of the logarithms of these norms gives the formula. An elementary computation gives $$ T(p_v^r) \overline\CL- \sigma_1(p_v^r)\ \overline\CL = \frac{(r+2)N_v^{-(r+1)}-rN_v^{-(r+2)}-(r+2)N_v^{-1}+r}{(1-N_v^{-1})^2} N_v^r\log N_v. $$ The result follows by $\overline\CL=\kappa_U^\circ \cdot \hat\xi$. \end{proof} The expression of $f_{\phi_v, a}(1,u)$ in the above lemma happens to be very close to that of $W_{a,v}'(0,1,u)-\frac12\log|a|_v W_{a,v}(0,1,u)$ in Lemma \ref{local explicit} (1). They will give great cancelation in our matching of the derivative series and the height series. \section{Comparison of the two series} \label{sec comparison} In this section, we will combine results in the last two sections to prove Theorem \ref{main}. The upshot is to apply Lemma \ref{pseudo} to the difference $$\mathcal D(g,\phi)=\pr I'(0, g, \phi)_U- 2 Z(g,(1,1))_U. $$ Here we take $t_1=t_2=1$ for the CM points. We refer to \S\ref{sec pseudo} for the notion of pseudo-Eisenstein series, and to \S\ref{sec key lemma} for a quick review of the notion of pseudo-theta series introduced in \cite[\S6.2]{YZ}. We will see that $\mathcal D(g,\phi)$ is a sum of finitely many non-singular pseudo-Eisenstein series and non-singular pseudo-theta series. Then Lemma \ref{pseudo} will imply that $\mathcal D(g,\phi)$ is the sum of the corresponding Eisenstein series and theta series. Since $\mathcal D(g,\phi)$ is cuspidal, its constant must be zero. This implies that the sum of the constant terms of the corresponding Eisenstein series and theta series is zero, which gives an equality involving the modular height of $X_U$. After computing all other terms, we get a formula of the modular height. To start with, let $(F,E,\BB, U,\phi)$ be as in \S \ref{choices}. We assume all the conditions of \S \ref{choices} throughout this section. By Theorem \ref{analytic series}, $$ \pr I'(0, g,\phi)_U =\pr' I'(0, g,\phi)_U-\pr'\CJ'(0, g, \phi)_U, $$ By Theorem \ref{height series}, $$Z(g, (1, 1))_U =\pair{Z_*(g,\phi)_U 1, 1}-\pair{Z_*(g,\phi)_U 1, \xi_{1}} -\pair{Z_*(g,\phi)_U \xi_{1}, 1} +\pair{Z_*(g,\phi)_U\xi_{1}, \xi_{1}}.$$ Then the difference \begin{eqnarray*} \mathcal D(g,\phi) &=& \pr' I'(0, g,\phi)_U-2\pair{Z_*(g,\phi)_U 1, 1}\\ &&-\pr'\CJ'(0, g, \phi)_U+ 2\pair{Z_*(g,\phi)_U 1, \xi_{1}}\\ && +2\pair{Z_*(g,\phi)_U \xi_{1}, 1} -2\pair{Z_*(g,\phi)_U\xi_{1}, \xi_{1}}. \end{eqnarray*} In the following, for each of the three lines on the right-hand side of the above expression of $\mathcal D(g,\phi)$, we will describe the computational result, check that it is non-singular in the pseudo sense, and give its contribution in the equality after applying Lemma \ref{pseudo}. \subsubsection*{Third line} Start with the third line, which has the simplest expression. By Proposition \ref{geometric series extra1}, $$\pair{Z_*(g,\phi)_U \xi_{1}, 1}=-\frac 12 \kappa_U^\circ\ E_*(0,g,\phi)_U\cdot \pair{\xi_{1}, 1},$$ $$\pair{Z_*(g,\phi)_U \xi_{1}, \xi_{1}}=-\frac 12 \kappa_U^\circ\ E_*(0,g,\phi)_U\cdot \pair{\xi_{1}, \xi_{1}}.$$ Here $\kappa_U^\circ$ denotes the degree of $L_U$ on a geometrically connected component of $X_U$. The contribution of $2\pair{Z_*(g,\phi)_U \xi_{1}, 1} -2\pair{Z_*(g,\phi)_U\xi_{1}, \xi_{1}}$ after Lemma \ref{pseudo} is \begin{equation}\label{line3} \kappa_U^\circ\cdot (\pair{\xi_{1}, \xi_1}-\pair{\xi_{1}, 1})\cdot E(0,g,\phi)_U. \end{equation} \subsubsection*{Second line} Now we consider the second line. Denote $c_3'=(1+\log 4)[F:\QQ]$. By Proposition \ref{analytic series extra}, $$ \pr'\CJ'(0, g, \phi) =- (c_0+c_3') E_*(0,g,\phi)- \sum_{v\nmid\infty} C_*(0, g,\phi)(v) +2\sum_{v\nmid\infty} E'(0,g,\phi)(v).$$ Here we have Eisenstein series \begin{eqnarray*} E(s, g, \phi) &=&\sumu \sum _{\gamma \in P(F)\bs \GL_2(F)} \delta(\gamma g)^s r(\gamma g)\phi(0,u),\\ C(s, g, \phi)(v) &=&\sum_{v\nmid\infty} \sumu \sum _{\gamma \in P(F)\bs \GL_2(F)} \delta(\gamma g)^s c_{\phi_v}(\gamma g,0,u)\ r(\gamma g^v)\phi^v(0,u), \end{eqnarray*} with $$c_{\phi_v}(g,y,u)=r(g)\phi_{1,v}(y,u) W_{0,v}^{\circ}\, '(0,g,u,\phi_{2,v}) + \log \delta(g_v)r(g)\phi_v(y,u);$$ and we have a pseudo-Eisenstein series $$ E'(0,g,\phi)(v) =\sumu\sum_{a\in F^\times} W_a^v(0,g,u,\phi^v) \left(W_{a,v}'(0,g,u,\phi_v)-\frac12\log|a|_v\cdot W_{a,v}(0,g,u,\phi_v)\right).$$ By Proposition \ref{geometric series extra2}, \begin{eqnarray*} \pair{Z_*(g,\phi) 1, \xi_{1}} = -\frac 12 \kappa_U^\circ\ E_*(0,g,\phi)_U\ \pair{1, \xi} + \frac 12 \sum_{v\notin \Sigma} \CF^{(v)}_\phi(g), \end{eqnarray*} where the pseudo-Eisenstein series $$ \CF^{(v)}_\phi(g) = \sumu\sum_{a\in F^\times} W_{a}^v(0,g,u,\phi) \, f_{\phi_v, a}(g,u)$$ with $$ f_{\phi_v, a}(g,u)=(1-N_v^{-2})|d_v|^{\frac32} |au^{-1}|_v\, \kappa_U^\circ \sum_{y\in \bb_v(au^{-1})/U_v^1} r(g)\phi_v(y,u) D_0(t_{1,v}^{-1}y t_{2,v}). $$ The difference gives \begin{eqnarray*} &&-\pr'\CJ'(0, g, \phi)_U+ 2\pair{Z_*(g,\phi) 1, \xi_{1}}\\ &=& (c_0+c_3'-\kappa_U^\circ \pair{1, \xi}) E_*(0,g,\phi) + \sum_{v\nmid\infty} C_*(0, g,\phi)(v) \\ && -2 \sum_{v\in \Sigma_f} E'(0,g,\phi)(v) -2\sum_{v\notin \Sigma} (E'(0,g,\phi)(v)-\frac12\CF^{(v)}_\phi(g)). \end{eqnarray*} This is a finite sum of Eisenstein series and pseudo-Eisenstein series, by the following considerations using the explicit local results. \begin{itemize} \item[(1)] For any $v\in \Sigma_f$, the explicit result of Lemma \ref{local explicit}(2) implies that $$W_{a,v}'(0,1,u,\phi_v)-\frac12\log|a|_v\cdot W_{a,v}(0,1,u,\phi_v),$$ as a function of $(a,u)\in F_v^\times\times F_v^\times$, satisfies the condition of Lemma \ref{whittaker image2}. Therefore, $E'(0,g,\phi)(v)$ is a non-singular pseudo-Eisenstein series in this case. Denote by $$E(0,g, \phi_v^+\otimes\phi^v)+E(0,g, \phi_v^-\otimes\phi^v)$$ the associated Eisenstein series. Note that Lemma \ref{whittaker image1} and Lemma \ref{local explicit}(2) further give $$ r(w)\phi_v^+(0,u)+r(w)\phi_v^-(0,u)=0, \quad \forall u\in F_v\cross. $$ \item[(2)] For any $v\notin \Sigma$, the pseudo-Eisenstein series $$E'(0,g,\phi)(v)- \frac12\CF^{(v)}_\phi(g) =\sumu\sum_{a\in F^\times} W_a^v(0,g,u,\phi^v) \tilde f_{\phi_v, a}(g,u),$$ where $$ \tilde f_{\phi_v, a}(g,u) =\left(W_{a,v}'(0,g,u,\phi_v)-\frac12\log|a|_v\cdot W_{a,v}(0,g,u,\phi_v)\right) -\frac12 f_{\phi_v, a}(g,u) $$ for any $a,u\in F_v^\times, \ g\in \gl(F_v)$. By the explicit results of Lemma \ref{local explicit}(1) and Proposition \ref{geometric series extra3}, $\tilde f_{\phi_v, a}(1, u)\neq 0$ only if $u\in\ofv\cross$ and $v(a)\geq -v(d_v)$. Moreover, for $u\in \ofv\cross$ and $a\in \ofv$, \begin{eqnarray*} \tilde f_{\phi_v, a}(1,u) = \left(-\zeta_v'(2)/\zeta_v(2) + \log|d_v|\right) W_{a,v}(0,1,u) + |d_v|^{\frac{3}{2}} \frac{1-|d_v|}{N_v-1}\log N_v. \end{eqnarray*} By Lemma \ref{whittaker image2}, we see that $E'(0,g,\phi)(v)- \frac12\CF^{(v)}_\phi(g)$ is a non-singular pseudo-Eisenstein series in this case. The associated Eisenstein series is of the form $$ \left(-\zeta_v'(2)/\zeta_v(2) + \log|d_v|\right) E(0,g, \phi)+E(0,g, \phi_v^+\otimes\phi^v)+E(0,g, \phi_v^-\otimes\phi^v).$$ The last two terms are 0 for almost all $v\notin\Sigma$. Moreover, Lemma \ref{whittaker image2} also gives for all $v\notin\Sigma$, $$ \phi_v^+(0,u)+\phi_v^-(0,u)=0, \quad \forall u\in F_v\cross. $$ \end{itemize} Therefore, the contribution of $-\pr'\CJ'(0, g, \phi)_U+ 2\pair{Z_*(g,\phi) 1, \xi_{1}}$ after Lemma \ref{pseudo} is \begin{eqnarray}\label{line2} && (c_0+c_3'-\kappa_U^\circ \pair{1, \xi}+2\sum_{v\notin \Sigma} \left(\zeta_v'(2)/\zeta_v(2) - \log|d_v|\right)) E(0,g,\phi) \nonumber\\ &&+ \sum_{v\nmid\infty} C(0, g,\phi)(v) - 2 \sum_{v\nmid \infty} \big(E(0,g, \phi_v^+\otimes\phi^v)+E(0,g, \phi_v^-\otimes\phi^v)\big). \end{eqnarray} \subsubsection*{First line} It remains to consider $$\pr' I'(0, g,\phi)_U-2\pair{Z_*(g,\phi)_U 1, 1}.$$ By Theorem \ref{analytic series}, the current $\pr' I'(0, g,\phi)_U$ has the same expression as the old $\pr I'(0, g,\phi)_U$ in \cite[Theorem 7.2]{YZ}. By Theorem \ref{height series}, the current $\pair{Z_*(g,\phi)_U 1, 1}$ has the expression of the old $Z(g, (1,1),\phi))_U$ in \cite[Theorem 8.6]{YZ} with the extra term $\ds-\frac12 [F:\QQ] E_*(0,g,\phi)$. Consequently, the current difference $\pr' I'(0, g,\phi)_U-2\pair{Z_*(g,\phi)_U 1, 1}$ has the expression of the old $\mathcal D(g,\phi)$ in \cite[\S9.1]{YZ} with an extra term $[F:\QQ] E_*(0,g,\phi)$. Note that the choice of $\phi$ in \cite{YZ} is slightly different from what we have here. In \cite[\S7.2]{YZ}, it has an extra set $S_2$ of two non-archimedean places $v$ of $F$ split in $E$ with certain degenerate $\phi_v$. This assumption is made to kill the terms close to $E(s,g,\phi)$. However, the computations for $\mathcal D(g,\phi)$ holds for our $\pr' I'(0, g,\phi)_U-2\pair{Z_*(g,\phi)_U 1, 1}$ by pretending $S_2=\emptyset$. Hence, the translation of the computational results of \cite[\S9.1]{YZ} to the current setting gives \begin{eqnarray*} && \pr' I'(0, g,\phi)_U-2\pair{Z_*(g,\phi)_U 1, 1}\\ &=& -2 \sum_{v\nmid\infty \ \nonsplit} \barint_{C_U} (\CK^{(v)}_{\phi}(g,(t,t))-\CM^{(v)}_{\phi}(g,(t,t))\log N_v) dt\\ &&+\sum_{v\in \Sigma_f} (2\log N_v) \barint_{C_U} j_{\bar v}(Z_*(g,\phi)_Ut,t) dt\\ &&+ \sum_{v\nmid\infty} \sumu \sum_{y\in E^\times} d_{\phi_v}(g,y,u)\ r(g)\phi^v(y,u)\\ && +(\frac{2i_0(1,1)}{[O_E\cross:O_F\cross]} -c_1) \sumu \sum_{y\in E^\times} r(g)\phi(y,u)\\ && +[F:\QQ] E_*(0,g,\phi)\\ && -[F:\QQ] (\gamma+\log(4\pi)-1) E_*(0,g,\phi). \end{eqnarray*} Here the last line comes from and is equal to $$ -2 \sum_{v|\infty} \barint_{C_U} (\overline\CK^{(v)}_{\phi}(g,(t,t))-\CM^{(v)}_{\phi}(g,(t,t))) dt, $$ which follows from Proposition \ref{archimedean comparison} and was missed in \cite[\S9.1]{YZ}. The last expression is a sum of finitely many non-singular pseudo-theta series and non-singular pseudo-Eisenstein series. We refer to \S\ref{sec key lemma} for quick review of the notion of pseudo-theta series introduced in \cite[\S6.2]{YZ}. To explain why the fourth and the fifth line are pseudo-theta series, take the fourth line as an example. We have the series $$ \sumu \sum_{y\in E^\times} d_{\phi_v}(g,y,u)\ r(g)\phi^v(y,u). $$ Let $B$ be a totally definite quaternion algebra over $F$, with an embedding $E\to B$ and an isomorphism $B\otimes_F \AA^{S_0}\to \BB^{S_0}$ compatible with $E_\AA\to \BB$. Here $S_0$ is the set of places $v$ at which $B_v$ is not isomorphic to $\BB_v$. Then $\phi^{S_0\cup \{v\}}$ is also viewed as a Schwartz function on $B_{\AA^{S_0\cup \{v\}}}\times (\AA^{S_0\cup \{v\}})^\times$. It follows that we can view the series as a pseudo-theta series sitting on quadratic spaces $0\subset E\subset B$. Note that neither the fourth line or the fifth line contributes to the result after Lemma \ref{pseudo} because they are \textit{degenerate} pseudo theta series. So we only recall the other lines. Recall that \begin{eqnarray*} &&\CK^{(v)}_\phi(g,(t_1,t_2))-\CM^{(v)}_\phi(g,(t_1,t_2))(\log N_v)\\ &=&\sum_{u\in \mu_U^2\bs F\cross}\sum _{y\in B(v)-E} r(g,(t_1,t_2))\phi^v (y, u) \bar k_{r(t_1,t_2)\phi _v}(g,y, u) \end{eqnarray*} where $$ \bar k_{\phi_v}(g,y,u)= k_{\phi_v}(g,y,u)-m_{r(g)\phi_v}(y,u)\log N_v. $$ In particular. $$ \bar k_{\phi_v}(y,u)= k_{\phi_v}(1,y,u)-m_{\phi_v}(y,u)\log N_v $$ extends to a Schwartz function in $\overline\CS(B(v)_v\times F_v\cross)$. Similarly, \begin{eqnarray*} j_{\bar v}(Z_*(g,\phi)_Ut_1,t_2) &=&\sum_{u\in \mu_U^2\bs F\cross}\sum _{y\in B(v)^\times} r(g,(t_1,t_2))\phi^v (y, u) r(t_1,t_2)l_{r(g)\phi _v}(y, u) \end{eqnarray*} where $l_{\phi_v}(y,u)$ extends to a Schwartz function in $\overline\CS(B(v)_v\times F_v\cross)$. The contribution of $\pr' I'(0, g,\phi)_U-2\pair{Z_*(g,\phi)_U 1, 1}$ after Lemma \ref{pseudo} is \begin{eqnarray} && -2 \sum_{v\nmid\infty \ \nonsplit} \barint_{C_U} \theta(g,(t,t), \bar k_{\phi_v}\otimes\phi^v) dt \nonumber\\ && +\sum_{v\in \Sigma_f} (2\log N_v) \barint_{C_U} \theta(g,(t,t), l_{\phi_v}\otimes\phi^v) dt \nonumber\\ && -[F:\QQ] (\gamma+\log(4\pi)-2) E_*(0,g,\phi). \label{line1} \end{eqnarray} \subsubsection*{The sum} As a conclusion, the difference $\mathcal D(g,\phi)$ is the sum of finitely many non-singular pseudo-Eisenstein series and finitely many non-singular pseudo-theta series. We are finally ready to apply Lemma \ref{pseudo}. For the conclusion, the sum of (\ref{line3}), (\ref{line2}) and (\ref{line1}) gives \begin{eqnarray} \mathcal D(g,\phi) &=& -2 \sum_{v\nmid\infty \ \nonsplit} \barint_{C_U} \theta(g,(t,t), \bar k_{\phi_v}\otimes\phi^v) dt +\sum_{v\in \Sigma_f} (2\log N_v) \barint_{C_U} \theta(g,(t,t), l_{\phi_v}\otimes\phi^v) dt \nonumber\\ &&+ \sum_{v\nmid\infty} C(0, g,\phi)(v) - 2 \sum_{v\nmid\infty} \big(E(0,g, \phi_v^+\otimes\phi^v)+E(0,g, \phi_v^-\otimes\phi^v)\big) \nonumber\\ &&+c_4\cdot E(0,g,\phi)_U, \label{final D} \end{eqnarray} where $$c_4=c_0+c_3'-2\kappa_U^\circ \pair{1, \xi}+\kappa_U^\circ \pair{\xi_1, \xi_1} +2\sum_{v\notin \Sigma} \left(\zeta_v'(2)/\zeta_v(2) - \log|d_v|\right) -[F:\QQ] (\gamma+\log(4\pi)-2).$$ Here we have used the identity $ \pair{1, \xi}= \pair{1, \xi_1}$, which holds by considering geometrically connected components. Moreover, we have the following result. \begin{lem} $$ \kappa_U^\circ \pair{\xi_1, \xi_1}=-2\, h_{\ol \CL_U}(X_U), \qquad \kappa_U^\circ \pair{1, \xi}=- h_{\ol\CL_U}(P_U)+[F:\QQ]. $$ \end{lem} \begin{proof} Denote $|\pi_0|$ the number of geometrically connected component of $X_U$. The first result goes as follows: $$ \kappa_U^\circ \pair{\xi_1, \xi_1} = \frac{\kappa_U^\circ}{|\pi_0|}\pair{\xi, \xi} = \frac{\kappa_U^\circ}{|\pi_0|} \frac{1}{(\kappa_U^\circ)^2} \pair{\ol\CL_U, \ol\CL_U} = \frac{1}{\deg(L_U)} \pair{\ol\CL_U, \ol\CL_U} =- \frac{1}{\deg(L_U)} \wh\deg(\hat c_1(\ol\CL_U)^2). $$ The first equality holds by considering geometrically connected components of $X_U$, and the other equalities holds by definition. Note that the negative sign of the last equality is due to different normalizations of the intersection numbers, which is originally from the negative sign in the arithmetic Hodge index theorem. The second result goes as follows: $$ \kappa_U^\circ \pair{1, \xi} = \pair{1, \ol\CL_U} = \pair{\bar P_U, \ol\CL_U}+[F:\QQ] = - h_{\ol\CL_U}(P_U)+[F:\QQ]. $$ Here the second equality follows from Lemma \ref{int of Green}. \end{proof} By the lemma, $c_4$ contains the height $h_{\ol \CL_U}(X_U)$ which we need to compute. For $h_{\ol\CL_U}(P_U)$, by Theorem \ref{height CM}, the main result of Part II of \cite{YZ}, $$ h_{\ol\CL_U}(P_U) =-\frac{L_f'(0,\eta)}{L_f(0,\eta)} +\frac 12 \log \frac{d_\BB}{d_{E/F}}. $$ It cancels the major part of $$c_0=2\frac{L'(0,\eta)}{L(0,\eta)} +\log|d_E/d_F| =2\frac{L_f'(0,\eta)}{L_f(0,\eta)} +\log|d_E/d_F| - [F:\BQ](\gamma+\log 4\pi ).$$ More precisely, $$ c_0+2\,h_{\ol\CL_U}(P_U)=\log|d_\BB d_F| - [F:\BQ](\gamma+\log 4\pi ). $$ By $c_3'=(1+\log 4)[F:\QQ]$, we further have $$ c_0+c_3'+2\,h_{\ol\CL_U}(P_U)=\log|d_\BB d_F| - [F:\BQ](\gamma+\log \pi-1). $$ Hence, we can simplify $c_4$ to get $$ c_4=-2\, h_{\ol \CL_U}(X_U) +2\sum_{v\notin \Sigma} \left(\zeta_v'(2)/\zeta_v(2) - \log|d_v|\right) +\log|d_\BB d_F| -[F:\QQ] (2\gamma+2\log(2\pi)-1). $$ \subsubsection*{The constant terms} Note that $\mathcal D(g,\phi)$ is a cusp form, so its constant term must be 0. Then the constant terms of the right-hand side of (\ref{final D}) should be 0. This will give the result we need. In the following, we first treat the case $|\Sigma|>1$ and then mention the difference for the easier case $|\Sigma|=1$. While it is straightforward to write down the constant terms of the theta series, it takes a little extra effort to treat those for the Eisenstein series. We claim that the constant terms of the Eisenstein series $$ E(0,g,\phi)_U,\qquad C(0, g,\phi)(v), \qquad E(0,g, \phi_v^+\otimes\phi^v)+E(0,g, \phi_v^-\otimes\phi^v), $$ are respectively equal to \begin{eqnarray*} && \sumu r(g)\phi(0,u), \\ && \sumu c_{\phi_v}(g,0,u)\ r(g^v)\phi^v(0,u),\\ && \sumu (r(g_v)\phi_v^+(0,u)+r(g_v)\phi_v^-(0,u))\ r(g^v)\phi^v(0,u). \end{eqnarray*} In other words, the contribution from the intertwining part at $s=0$ is 0. The claim is a consequence of the assumption $|\Sigma|>1$. In fact, the result for $E(0,g,\phi)_U$ is immediately a consequence of \cite[Proposition 2.9(3)]{YZZ}. The other two Eisenstein series are similar, and we take $C(0, g,\phi)(v)$ for example. Recall from \S\ref{sec 3.1} that $$C(s, g, \phi)(v)= \sumu C(s, g, u,\phi)(v)$$ with $$C(s, g, u, \phi)(v)= \sum _{\gamma \in P^1(F)\bs \SL_2(F)} \delta(\gamma g)^s \Psi(\gamma g, u),$$ where $$\Psi(g,u) =c_{\phi_v}(g_v,0,u)\ r(g^v)\phi^v(0,u) $$ is a principal series in the sense that $$ \Psi(m(a)n(b)g,u)=|a|_\AA^{2}\Psi(g,u), \quad a\in \AA^\times, \ b\in \AA. $$ The constant term $$ C_0(s, g, u, \phi)(v)=\delta(g)^s \Psi(g, u) + W_0(s,g,u,\Psi) $$ with the intertwining part $$ W_0(s,g,u,\Psi)=\int_\AA \delta(wn(b) g)^s \Psi(wn(b) g, u) db. $$ Note that $\Psi=\otimes_w \Psi_w$ is naturally a product of local terms, and we can define $W_{0,w}(s,g,u,\Psi_w)$ similarly. Following \cite[\S2.5.2]{YZZ}, for the sake of analytic continuation at $s=0$, we write $$ W_0(s,g,u,\Psi)=\tilde\zeta_F(s+1)\prod_w W_{0,w}^\circ(s,g,u,\Psi_w), $$ where the normalized term $$ W_{0,w}^\circ(s,g,u,\Psi_w)=\frac{1}{\zeta_w(s+1)}W_{0,w}(s,g,u,\Psi_w) $$ is holomorphic at $s=0$ for any $w$. By \cite[Proposition 2.9(1)(a)]{YZZ}, $$ W_{0,w}^\circ(0,g,u,\Psi_w)=0, \quad w\in \Sigma\setminus\{v\}.$$ In other words, $W_{0,w}^\circ(s,g,u,\Psi_w)$ has a zero at $s=0$ for any $w\in \Sigma\setminus\{v\}$. On the other hand, there is simple pole of $\zeta_F(s+1)$ at $s=0$. By the above product expression, $W_0(s,g,u,\Psi)$ has a zero at $s=0$ of order at least $|\Sigma\setminus\{v\}|-1\geq 1$ by $|\Sigma|\geq 3$. It follows that $W_0(0,g,u,\Psi)=0$. This proves the claim. Taking the constant terms of (\ref{final D}), we end up with \begin{eqnarray*} 0 &=& -2 \sum_{v\nmid\infty \ \nonsplit} \sumu r(g)(\bar k_{\phi_v}\otimes\phi^v)(0,u) \\ &&+\sum_{v\in \Sigma_f} (2\log N_v) \sumu r(g)(l_{\phi_v}\otimes\phi^v)(0,u)\\ &&+ \sum_{v\nmid\infty} \sumu c_{\phi_v}(g,0,u)\ r(g^v)\phi^v(0,u)\\ &&- 2 \sum_{v\nmid\infty} \sumu (r(g_v)\phi_v^+(0,u)+r(g_v)\phi_v^-(0,u))\ r(g^v)\phi^v(0,u)\\ &&+c_4 \sumu r(g)\phi(0,u). \end{eqnarray*} The goal is to get a formula of $c_4$ from the expression. Then it suffices to take a specific $g\in \gla$ such that $$\sumu r(g)\phi(0,u)\neq 0.$$ Note that $g=1$ does not work since $\phi_v(0,u)=0$ for any $v\in \Sigma_f$. Define $g=(g_v)_v\in\gla$ by $$ g_v=\begin{cases} w=\matrixx{}{1}{-1}{}, & v\in \Sigma_f, \\ 1, & v\notin \Sigma_f. \end{cases} $$ Now we simplify the above equality for this $g$. By the above discussion, we already have $$r(g_v)\phi_v^+(0,u)+r(g_v)\phi_v^-(0,u)=0$$ for any $v\nmid\infty$. It follows the fourth line of the right-hand side of the equality is 0. The equation becomes \begin{eqnarray} 0 &=& -2 \sum_{v\nmid\infty \ \nonsplit} \sumu r(g)(\bar k_{\phi_v}\otimes\phi^v)(0,u) \nonumber\\ &&+\sum_{v\in \Sigma_f} (2\log N_v) \sumu r(g)(l_{\phi_v}\otimes\phi^v)(0,u) \nonumber\\ &&+ \sum_{v\nmid\infty} \sumu c_{\phi_v}(g,0,u)\ r(g^v)\phi^v(0,u) \nonumber\\ &&+c_4 \sumu r(g)\phi(0,u). \label{constant term} \end{eqnarray} Here we recall that $$ \bar k_{\phi_v}(y,u)= k_{\phi_v}(1,y,u)-m_{\phi_v}(y,u)\log N_v $$ extends to a Schwartz function in $\overline\CS(B(v)_v\times F_v\cross)$. Note that each of the first three lines of the right-hand side of (\ref{constant term}) has a sum over places certain places $v$ of $F$. In the following, for each non-archimedean place $v$, we consider the contribution of this fixed $v$ from our these three lines. \begin{itemize} \item[(1)] If $v$ is split in $E$, then only the third line has contribution from $v$. In this case, by \cite[Lemma 7.6]{YZ}, $$ c_{\phi_v}(1,0,u) =\log |d_v|\ \phi_v(0,u). $$ \item[(2)] If $v$ is nonsplit in $E$ but split in $\BB$, then both the first line and the the third line has contribution from $v$. In this case, by \cite[Lemma 7.4, Lemma 7.6, Lemma 8.7]{YZ}, $$ -2 \bar k_{\phi_v}(0,u)+c_{\phi_v}(1,0,u)=\log |d_v|\ \phi_v(0,u). $$ See also \cite[Proposition 9.2]{YZ}. \item[(3)] If $v$ is nonsplit in $\BB$, by Lemma \ref{derivative of intertwining}, Lemma \ref{average k}, and Proposition \ref{averaged j}, $$ -2 r(w)\bar k_{\phi_v}(0,u)+2r(w)l_{\phi_v}(0,u)\log N_v+c_{\phi_v}(w,0,u)=(-\log |d_v|+\alpha_v\log N_v) r(w)\phi_v(0,u),$$ where $$ \alpha_v= 1-\frac{N_v-1}{2(N_v+1)}. $$ Note that the expressions in Lemma \ref{derivative of intertwining} and Lemma \ref{average k} depend on the parity of $v(d_v)$, but their combined expression for $\alpha_v$ happens to be uniform for all $v(d_v)$. This can be explained by the fact that Lemma \ref{derivative of intertwining} treats $W_{0,v}'(0,g,u, \phi_{2,v})$, while Lemma \ref{average k} treats $W_{a,v}'(0,g,u, \phi_{2,v})$. \end{itemize} Taking all these into consideration, the equation becomes \begin{eqnarray*} 0 &=& \left(\sum_{v\notin\Sigma} \log |d_v|+ \sum_{v\in\Sigma_f} (-\log |d_v|+\alpha_v\log N_v) +c_4 \right) \sumu r(g)\phi(0,u). \end{eqnarray*} Note that $$ \sumu r(g)\phi(0,u)>0 $$ by our choice of $g$. We get an equation \begin{eqnarray} \sum_{v\notin\Sigma} \log |d_v|+ \sum_{v\in\Sigma_f} (-\log |d_v|+\alpha_v\log N_v) +c_4=0.\label{c4} \end{eqnarray} This is obtained for the case $|\Sigma|>1$. If $|\Sigma|=1$, we claim that (\ref{c4}) also holds. In this case, the constant terms for $E(0,g,\phi)_U$ and other similar series might contain nonzero intertwining parts by \cite[Proposition 2.9(3)]{YZZ}. We may figure our the effect of this by extra argument. Alternatively, (\ref{final D}) simply implies $$\mathcal D(g,\phi) =c_4\cdot E(0,g,\phi)_U, $$ since the other terms are zero by the computational results. Comparing the constant terms, we easily have $c_4=0$, since $\mathcal D(g,\phi)$ is cuspidal. This agrees with (\ref{c4}). \subsubsection*{Logarithmic derivative} Recall that $$ c_4=-2\, h_{\ol \CL_U}(X_U) +2\sum_{v\notin \Sigma} \left(\zeta_v'(2)/\zeta_v(2) - \log|d_v|\right) +\log|d_\BB d_F| -[F:\QQ] (2\gamma+2\log(2\pi)-1) $$ Then (\ref{c4}) becomes \begin{eqnarray*} -2\, h_{\ol \CL_U}(X_U) +2\sum_{v\notin \Sigma} \zeta_v'(2)/\zeta_v(2) +\log|d_\BB d_F^2| -[F:\QQ] (2\gamma+2\log(2\pi)-1) +\sum_{v\in\Sigma_f} \alpha_v\log N_v =0. \end{eqnarray*} Note that $$ \frac{\zeta_{F}'(2)}{\zeta_{F}(2)} =\sum_{v\nmid\infty} \frac{\zeta_v'(2)}{\zeta_v(2)}, \qquad \frac{\zeta_v'(2)}{\zeta_v(2)} =-\frac{N_v^{-2}}{1-N_v^{-2}}\log N_v.$$ The first equality holds because the Euler product of $\zeta_{F}(s)$ is absolutely convergent for $\Re(s)>1$. Hence, we finally end up with \begin{eqnarray*} -2\, h_{\ol \CL_U}(X_U) +2\frac{\zeta_{F}'(2)}{\zeta_{F}(2)}+ \sum_{v\in\Sigma_f} \left(\alpha_v+\frac{2N_v^{-2}}{1-N_v^{-2}}+1\right)\log N_v +\log|d_F^2| -[F:\QQ] (2\gamma+2\log(2\pi)-1)=0. \end{eqnarray*} Here the local term $$ \alpha_v+\frac{2N_v^{-2}}{1-N_v^{-2}}+1 =1-\frac{N_v-1}{2(N_v+1)}+\frac{2N_v^{-2}}{1-N_v^{-2}}+1 =\frac32+\frac{1}{N_v-1} =\frac{3N_v-1}{2(N_v-1)}. $$ Therefore, \begin{eqnarray*} h_{\ol \CL_U}(X_U) =\frac{\zeta_{F}'(2)}{\zeta_{F}(2)}+ \sum_{v\in\Sigma_f} \frac{3N_v-1}{4(N_v-1)}\log N_v +\log|d_F|-(\gamma+\log(2\pi)-\frac12)[F:\QQ] . \end{eqnarray*} \subsubsection*{Functional equation} We can convert the logarithmic derivative at $2$ to that at $-1$ by the functional equation. In fact, the completed Dedekind zeta function $$ \tilde\zeta_{F}(s)=\tilde\zeta_{F,\infty}(s)\zeta_{F}(s)$$ with the gamma factor $$ \tilde\zeta_{F,\infty}(s)=(\pi^{-s/2}\Gamma(s/2))^{[F:\BQ]} $$ has functional equation $$ \tilde\zeta_{F}(1-s)=|d_K|^{s-\frac12} \tilde\zeta_{F}(s). $$ Note that \begin{align*} \frac{\tilde\zeta_{F,\infty}'(2)}{\tilde\zeta_{F,\infty}(2)} =& - \frac{1}{2}(\gamma+\log \pi )[F:\QQ],\\ \frac{\tilde\zeta_{F,\infty}'(-1)}{\tilde\zeta_{F,\infty}(-1)} =& - \frac{1}{2}(\gamma+\log (4\pi) )[F:\QQ]+[F:\QQ]. \end{align*} It follows that \begin{eqnarray*} h_{\ol \CL_U}(X_U) &=&\frac{\tilde\zeta_{F}'(2)}{\tilde\zeta_{F}(2)} +\log|d_F| -\frac12[F:\QQ] (\gamma+\log(4\pi)-1) +\sum_{v\in\Sigma_f} \frac{3N_v-1}{4(N_v-1)}\log N_v\\ &=&-\frac{\tilde\zeta_{F}'(-1)}{\tilde\zeta_{F}(-1)} -\frac12[F:\QQ] (\gamma+\log(4\pi)-1) +\sum_{v\in\Sigma_f} \frac{3N_v-1}{4(N_v-1)}\log N_v\\ &=&-\frac{\zeta_{F}'(-1)}{\zeta_{F}(-1)} -\frac12[F:\QQ] +\sum_{v\in\Sigma_f} \frac{3N_v-1}{4(N_v-1)}\log N_v. \end{eqnarray*} This prove Theorem \ref{main}. \begin{thebibliography}{[AB]} \bibitem[AAR]{AAR} G. E. Andrews, R. Askey, R. Roy, \emph{Special Functions}, Encyclopedia of Mathematics and its Applications 71, Cambridge University Press. \bibitem[AGHM]{AGHM} F. Andr\'eatta, E. Goren, B. Howard, and K. Madapusi-Pera, {\em Faltings heights of abelian varieties with complex multiplication}, Ann. of Math. (2) 187 (2018), no. 2, 391--531. \bibitem[BBK]{BBK} J. H. Bruinier, J. I. Burgos Gil, U. K\"uhn, \emph{Borcherds products and arithmetic intersection theory on Hilbert modular surfaces}, Duke Math. J. 139 (2007), 1--88. \bibitem[BC]{BC} J.-F. Boutot, H. Carayol, {\em Uniformisation p-adique des courbes de Shimura: les th\'eor\`emes de \v Cerednik et de Drinfel'd. } Courbes modulaires et courbes de Shimura (Orsay, 1987/1988). Ast\'erisque No. 196-197 (1991), 7, 45--158 (1992). \bibitem[BH]{BH} J. H. Bruinier, B. Howard, \emph{Arithmetic volumes of unitary Shimura varieties}, arXiv:2105.11274. \bibitem[Bo]{Bo} J. -B. Bost, \emph{Potential theory and Lefschetz theorems for arithmetic surfaces}, Ann. Sci. \'Ecole Norm. Sup. 32 (1999), 241--312. \bibitem[BZ]{BZ} J. -F. Boutot, T. Zink, \emph{The p-adic uniformization of Shimura curves,} available at \url{https://www.math.uni-bielefeld.de/~zink/z_publ.html}. \bibitem[Ca]{Ca} H. Carayol, {\em Sur la mauvaise r\'eduction des courbes de Shimura}, Compositio Mathematica 59.2 (1986): 151-230. \bibitem[CS]{CS} S. Chowla and A. Selberg, {\em On Epstein's zeta-function}, J. Reine Angew. Math. 227 1967 86-110. \bibitem[DR]{DR} P. Deligne, M. Rapoport, Les sch\'emas de modules de courbes elliptiques. Modular Functions of One Variable II, Proc. Internat. Summer School, Univ. Antwerp 1972, Lect. Notes Math. 349 (1973), 143--316. \bibitem[FS]{FS} G. Freixas i Montplet, S. Sankaran, \emph{Twisted Hilbert modular surfaces}, arithmetic intersections and the Jacquet-Langlands correspondence, Adv. in Math. 329 (2018), 1--84. \bibitem[GS]{GS} H. Gillet, C. Soul\'e, \emph{Arithmetic intersection theory}, Publ. Math. IHES, 72 (1990), 93--174. \bibitem[GZ]{GZ}{B. Gross, D. Zagier, \emph{Heegner points and derivatives of $L$-series}, Invent. Math. 84 (1986), no. 2, 225--320. } \bibitem[Ho]{Ho} F. H\"ormann, \emph{The geometric and arithmetic volume of Shimura varieties of orthogonal type}, CRM Monograph Series 35, American Mathematical Society, Providence, RI, 2014. \bibitem[KRY1]{KRY1} S. Kudla, M. Rapoport, T. Yang, \emph{Derivatives of Eisenstein series and Faltings heights}, Compos. Math. 140 (2004), no. 4, 887--951. \bibitem[KRY2]{KRY2}{ S. Kudla, M. Rapoport, and T. Yang, \emph{Modular forms and special cycles on Shimura curves}, Annals of Math. Studies series, vol 161, Princeton Univ. Publ., 2006.} \bibitem[Kud]{Kud}{S. Kudla, \emph{Central derivatives of Eisenstein series and height pairings}, Ann. of Math. (2) 146 (1997), no. 3, 545--646. } \bibitem[Kuh]{Kuh} U. K\"uhn, \emph{Generalized arithmetic intersection numbers}, J. reine angew. Math. 534 (2001), 209--236. \bibitem[MR1]{MR1} V. Maillot, D. R\"ossler, \emph{Conjectures sur les d\'eriv\'ees logarithmiques des fonctions L dÕArtin aux entiers n\'egatifs}, Math. Res. Lett. 9 (2002), no. 5-6, 715--724. \bibitem[MR2]{MR2} V. Maillot, D. R\"ossler, \emph{Conjectures on the logarithmic derivatives of Artin L-functions II}, arXiv:1808.03068. \bibitem[Mu]{Mu} D. Mumford, Abelian varieties. With appendices by C. P. Ramanujam and Yuri Manin. Corrected reprint of the second (1974) edition. Tata Inst. Fund. Res. Stud. Math., 5 Published for the Tata Institute of Fundamental Research, Bombay; by Hindustan Book Agency, New Delhi, 2008. \bibitem[OT]{OT} T. Oda, M. Tsuzuki, \emph{Automorphic Green functions associated with the secondary spherical functions}, Publications of the Research Institute for Mathematical Sciences 39.3 (2003): 451--533. \bibitem[Qi]{Qi} C. Qiu, \emph{Arithmetic modularity of special divisors and arithmetic mixed Siegel-Weil formula} (with an appendix by Yujie Xu), arXiv:2204.13457. \bibitem[Ra]{Ra} M. Raynaud, $p$-groupes et r\'eduction semi-stable des courbes, In: P. Cartier et al. (eds), The Grothendieck Festschrift, III. Progr. in Math. 88, Birkh\"auser, Basel, 1990, pp. 179--197. \bibitem[Ts]{Ts} J. Tsimerman, {\em The Andre-Oort conjecture for $\CA_g$, } Ann. of Math. (2) 187 (2018), no. 2, 379--390. \bibitem[Vi]{Vi}{M. Vign\'eras, \emph{Arithm\'etique des alg\`ebres de quaternions}, Lecture Notes in Mathematics, 800. Springer, Berlin, 1980.} \bibitem[Wa]{Wa}{J. Waldspurger, {\em Sur les valeurs de certaines fonctions L automorphes en leur centre de sym\'etrie}, Compositio Math. 54 (1985), no. 2, 173--242.} \bibitem[We]{We} A. Weil, {\em Elliptic functions according to Eisenstein and Kronecker}, Ergebnisse der Math. und ihrer Grenzgebiete 88, Springer-Verlag, 1976. \bibitem[YZ]{YZ} X. Yuan, S. Zhang, {\em On the averaged Colmez conjecture}, Ann. of Math. (2) 187 (2018), no. 2, 533--638. \bibitem[YZZ]{YZZ} X. Yuan, S. Zhang, W. Zhang, {\em The Gross--Zagier formula on Shimura curves}, Annals of Mathematics Studies, No. 184, Princeton University Press, 2012. \bibitem[Yu1]{Yu1} X. Yuan, \emph{On Faltings heights of abelian varieties with complex multiplication}, Proceedings of the Seventh International Congress of Chinese Mathematicians, Vol. I, 521-536, Adv. Lect. Math. (ALM), 43, Int. Press, Somerville, MA, 2019. \bibitem[Yu2]{Yu2} X. Yuan, \emph{Explicit Kodaira--Spencer map over Shimura curves}, arXiv:2205.11334. \bibitem[Zh1]{Zh1}{S. Zhang, \textit{Heights of Heegner points on Shimura curves}, Ann. of Math. (2) 153 (2001), no. 1, 27--147. } \bibitem[Zh2]{Zh2} S. Zhang, {\em Standard Conjectures and Height Pairings}, arXiv:2009.07089. \end{thebibliography} \ \noindent \small{Beijing International Center for Mathematical Research, Peking University, Beijing 100871, China} \noindent \small{\it Email: [email protected]} \end{document}
2205.13823v7
http://arxiv.org/abs/2205.13823v7
Decomposable Fourier Multipliers and an Operator-Algebraic Characterization of Amenability
\documentclass[leqno]{article} \usepackage[frenchb,english]{babel} \usepackage[T1]{fontenc} \usepackage{amsmath} \usepackage{amssymb} \usepackage{amsfonts} \usepackage{enumerate} \usepackage{vmargin} \usepackage[all]{xy} \usepackage{mathrsfs} \usepackage{mathtools} \usepackage{lmodern} \usepackage[colorlinks=true,linkcolor=blue,pagebackref=true]{hyperref}\usepackage{ulem} \usepackage[all]{xy} \usepackage{comment} \usepackage{centernot} \setmarginsrb{3cm}{3cm}{3.5cm}{3cm}{0cm}{0cm}{1.5cm}{3cm} \footskip1.3cm \newcommand{\A}{\ensuremath{\mathcal{A}}} \newcommand{\B}{\mathrm{B}} \newcommand{\C}{\mathrm{C}} \newcommand{\E}{\ensuremath{\mathbb{E}}} \newcommand{\F}{\ensuremath{\mathbb{F}}} \newcommand{\Fc}{\ensuremath{\mathcal{F}}} \newcommand{\G}{\ensuremath{\mathbb{G}}} \newcommand{\K}{\ensuremath{\mathbb{K}}} \newcommand{\I}{\mathrm{I}} \newcommand{\W}{\mathrm{W}} \let\L\relax \newcommand{\L}{\mathrm{L}} \newcommand{\SL}{\mathrm{SL}} \newcommand{\M}{\mathrm{M}} \newcommand{\g}{\mathfrak{g}} \newcommand{\ad}{\mathrm{ad}} \newcommand{\Mat}{\ensuremath{\mathbb{M}}} \newcommand{\N}{\ensuremath{\mathbb{N}}} \newcommand{\Q}{\ensuremath{\mathbb{Q}}} \newcommand{\R}{\ensuremath{\mathbb{R}}} \newcommand{\T}{\ensuremath{\mathbb{T}}} \newcommand{\Z}{\ensuremath{\mathbb{Z}}} \newcommand{\X}{\ensuremath{\mathrm{X}}} \newcommand{\trans}[1]{\prescript{t}{}{#1}} \newcommand{\vect}{\ensuremath{\mathop{\rm Span\,}\nolimits}} \newcommand{\ran}{\ensuremath{\mathop{\rm Im\,}}} \newcommand{\QG}{\mathbb{G}} \newcommand{\QH}{\mathbb{H}} \newcommand{\hQG}{\widehat{\QG}} \renewcommand{\leq}{\ensuremath{\leqslant}} \renewcommand{\geq}{\ensuremath{\geqslant}} \newcommand{\qed}{\hfill \vrule height6pt width6pt depth0pt} \newcommand{\bnorm}[1]{ \big\| #1 \big\|} \newcommand{\Bnorm}[1]{ \Big\| #1 \Big\|} \newcommand{\bgnorm}[1]{ \bigg\| #1 \bigg\|} \newcommand{\Bgnorm}[1]{ \Bigg\| #1 \Bigg\|} \newcommand{\norm}[1]{\left\Vert#1\right\Vert} \newcommand{\scr}{\mathscr} \newcommand{\xra}{\xrightarrow} \newcommand{\otp}{\widehat{\ot}} \newcommand{\otpb}{\hat{\ot}} \newcommand{\ot}{\otimes} \newcommand{\epsi}{\varepsilon} \newcommand{\ovl}{\overline} \newcommand{\otvn}{\ovl\ot} \newcommand{\ul}{\mathcal{U}} \newcommand{\dsp}{\displaystyle} \newcommand{\la}{\langle} \newcommand{\ra}{\rangle} \newcommand{\co}{\colon} \renewcommand{\d}{\mathop{}\mathopen{}\mathrm{d}} \let\i\relax \newcommand{\i}{\mathrm{i}} \newcommand{\w}{\mathrm{w}} \newcommand{\exc}{\mathrm{exc}} \newcommand{\ov}{\overset} \let\cal\relax \newcommand{\cal}{\mathcal} \newcommand{\Rad}{\mathrm{Rad}} \newcommand{\rad}{\mathrm{rad}} \newcommand{\col}{\mathrm{Col}} \newcommand{\row}{\mathrm{Row}} \newcommand{\dec}{\mathrm{dec}} \newcommand{\Dec}{\mathrm{Dec}} \newcommand{\reg}{\mathrm{reg}} \newcommand{\Aut}{\mathrm{Aut}} \newcommand{\Ad}{\mathrm{Ad}} \newcommand{\Reg}{\mathrm{Reg}} \newcommand{\isom}{\mathrm{Isom}} \newcommand{\QWEP}{\mathrm{QWEP}} \newcommand{\CBAP}{\mathrm{CBAP}} \newcommand{\CCAP}{\mathrm{CCAP}} \newcommand{\OAP}{\mathrm{OAP}} \newcommand{\AP}{\mathrm{AP}} \newcommand{\sign}{\mathrm{sign}} \newcommand{\res}{\mathrm{res}} \newcommand{\disc}{\mathrm{disc}} \newcommand{\dist}{\mathrm{dist}} \newcommand{\Id}{\mathrm{Id}} \newcommand{\VN}{\mathrm{VN}} \newcommand{\CB}{\mathrm{CB}} \newcommand{\SAIN}{\mathrm{(SAIN)}} \newcommand{\SIN}{\mathrm{(SIN)}} \newcommand{\SO}{\mathrm{SO}} \newcommand{\IF}{\mathrm{(IF)}} \newcommand{\sIF}{\mathrm{(sIF)}} \newcommand{\loc}{\mathrm{loc}} \newcommand{\op}{\mathrm{op}} \newcommand{\e}{\mathrm{e}} \let\ker\relax \DeclareMathOperator{\ker}{Ker} \DeclareMathOperator{\Ran}{Ran} \DeclareMathOperator{\weak}{w-} \DeclareMathOperator{\ind}{ind} \DeclareMathOperator{\tr}{Tr} \DeclareMathOperator{\weakstar}{w*-} \DeclareMathOperator{\supp}{supp} \DeclareMathOperator{\card}{card} \DeclareMathOperator{\Span}{span} \newcommand{\cb}{\mathrm{cb}} \newcommand{\cp}{\mathrm{cp}} \newcommand{\CV}{\mathrm{CV}} \newcommand{\CP}{\mathrm{CP}} \newcommand{\HS}{\mathrm{HS}} \newcommand{\tree}{\mathcal{T}} \newcommand{\CC}{\mathrm{CC}} \newcommand{\St}{\mathrm{St}} \newcommand{\ALSS}{\mathrm{ALSS}} \newcommand{\ALS}{\mathrm{ALS}} \newcommand{\ADS}{\mathrm{ADS}} \newcommand{\sing}{\mathrm{sing}} \newcommand{\diam}{\mathrm{diam}} \newcommand{\inner}{\mathrm{int}} \renewcommand{\subseteq}{\subset} \selectlanguage{english} \newtheorem{thm}{Theorem}[section] \newtheorem{defi}[thm]{Definition} \newtheorem{quest}[thm]{Question} \newtheorem{prop}[thm]{Proposition} \newtheorem{conj}[thm]{Conjecture} \newtheorem{Property}[thm]{Property} \newtheorem{cor}[thm]{Corollary} \newtheorem{lemma}[thm]{Lemma} \newtheorem{fact}[thm]{Fact} \newtheorem{prob}[thm]{Problem} \newtheorem{example}[thm]{Example} \newtheorem{remark}[thm]{Remark} \newenvironment{rk}{\begin{remark}\textrm }{\end{remark}} \newenvironment{proof}[1][]{\noindent {\it Proof #1} : }{\hbox{~}\qed \smallskip } \usepackage{tocloft} \setlength{\cftbeforesecskip}{0pt} \numberwithin{equation}{section} \usepackage[nottoc,notlot,notlof]{tocbibind} \let\OLDthebibliography\thebibliography \renewcommand\thebibliography[1]{ \OLDthebibliography{#1} \setlength{\parskip}{0pt} \setlength{\itemsep}{0pt plus 0.3ex} } \begin{document} \selectlanguage{english} \title{\bfseries{Decomposable Fourier Multipliers and an Operator-Algebraic Characterization of Amenability}} \date{} \author{\bfseries{C\'edric Arhancet - Christoph Kriegler}} \maketitle \begin{abstract} We study the algebra $\mathfrak{M}^{\infty,\mathrm{dec}}(G)$ of decomposable Fourier multipliers on the group von Neumann algebra $\mathrm{VN}(G)$ of a locally compact group $G$, and its relation to the Fourier–Stieltjes algebra $\mathrm{B}(G)$. For discrete groups, we prove that these two algebras coincide isometrically. In contrast, we show that the identity $\mathfrak{M}^{\infty,\mathrm{dec}}(G) = \mathrm{B}(G)$ fails for various classes of non-discrete groups, and that, among second-countable unimodular groups, inner amenability ensures the equality. Our approach relies on the existence of contractive projections preserving complete positivity from the space of completely bounded weak* continuous operators on $\mathrm{VN}(G)$ onto the subspace of completely bounded Fourier multipliers. We show that such projections exist in the inner amenable case. As an application, we obtain a new operator-algebraic characterization of amenability. We also investigate the analogous problem for the space of completely bounded Fourier multipliers on the noncommutative $\mathrm{L}^p$-spaces $\mathrm{L}^p(\mathrm{VN}(G))$, for $1 \leq p \leq \infty$. Using Lie group theory and results stemming from the solution to Hilbert's fifth problem, we prove that second-countable unimodular finite-dimensional amenable locally compact groups admit compatible projections at \( p = 1 \) and \( p = \infty \). These results reveal new structural links between harmonic analysis, operator algebras, and the geometry of locally compact groups. \end{abstract} \makeatletter \renewcommand{\@makefntext}[1]{#1} \makeatother \footnotetext{\noindent 2020 {\it Mathematics subject classification:} 46L51, 43A15, 46L07, 43A07. \\ {\it Key words}: Fourier-Stieltjes algebras, von Neumann algebras, decomposable operators, Fourier multipliers, Schur multipliers, inner amenability, amenability, complementations, groupoids, operator spaces.} \tableofcontents \section{Introduction} \label{sec:Introduction} \subsection{Context and motivation} The theory of Fourier–Stieltjes algebras and Fourier multipliers has played a central role in abstract harmonic analysis since the foundational works of Eymard \cite{Eym64}, Haagerup \cite{Haa79} and de Canniere--Haagerup \cite{DCH85}. The theory of Fourier multipliers on general (possibly non-abelian) groups has seen significant developments \cite{CGPT23}, \cite{CoH89}, \cite{HaL13}, \cite{JuR03}, \cite{JMP14}, \cite{JMP18}, \cite{LaS11}, \cite{MeR17}, \cite{PRS22}, \cite{PST25} particularly in relation to weak amenability, related approximation properties of von Neumann algebras, noncommutative $\L^p$-spaces and Schur multipliers. In the setting of group von Neumann algebras, the space of completely bounded Fourier multipliers provides a natural extension of the classical Fourier--Stieltjes algebra. The aim of this paper is to explore the structural relation between the Fourier-Stieltjes algebra $\B(G)$ and the algebra $\frak{M}^{\infty,\dec}(G)$ of decomposable Fourier multipliers on the group von Neumann algebra $\VN(G)$ of a locally compact group $G$, and to use this relation to characterize various forms of amenability in locally compact groups. Our results offer a new analytic perspective on inner amenability and decomposability, and provide an operator-algebraic route to understanding the harmonic analytic properties of $G$. We prove that, for discrete groups, these two algebras coincide isometrically. In contrast, we show that the identity $\frak{M}^{\infty,\dec}(G) = \B(G)$ fails in general for non-discrete groups. One of our main results establishes that the equality is a consequence of inner amenability among second-countable unimodular locally compact groups. Our approach involves a detailed study of bounded projections from the space of completely bounded operators on the von Neumann algebra $\VN(G)$ preserving complete positivity onto the space of completely bounded Fourier multipliers. We show that the existence of such projections is intimately related to structural properties of the group, such as (inner) amenability. This leads to a new analytic characterization of amenability, formulated in operator-algebraic terms. We further investigate the setting of noncommutative $\L^p$-spaces, using tools from geometric group theory and the structure theory of locally compact groups. In particular, we also study the existence of bounded projections from the space of completely bounded operators on the noncommutative $\L^p$-space $\L^p(\VN(G))$ onto the space of completely bounded $\L^p$-Fourier multipliers. Recall that the Fourier-Stieltjes algebra $\B(G)$ of a locally compact group $G$ is a generalization of the algebra of bounded regular complex Borel measures of an abelian locally compact group to non-abelian groups. Since its introduction by Eymard in \cite{Eym64}, this commutative unital Banach algebra has become a central object in noncommutative harmonic analysis and is closely related to the unitary representation theory of $G$. More precisely, the elements of $\B(G)$ are exactly the matrix coefficients of continuous unitary representations of $G$ on complex Hilbert spaces, i.e. \begin{equation} \label{BG-as-entries} \B(G) \ov{\mathrm{def}}{=} \big\{ \langle \pi(\cdot)\xi,\eta \rangle_H : \pi \text{ is a unitary representation of } G \text{ on $H$ and } \xi,\eta \in H \big\}. \end{equation} The norm is defined by \begin{equation} \label{Norm-BG} \norm{\varphi}_{\B(G)} \ov{\mathrm{def}}{=} \inf_{} \norm{\xi}\norm{\eta}, \end{equation} where the infimum is taken over all $\pi, \xi,\eta$ such that $\varphi=\langle\pi(\cdot)\xi,\eta\rangle_H $. The operations of this algebra are pointwise multiplication and addition. Also note that $\B(G)$ is a complete invariant of $G$, i.e.~$\B(G_1)$ and $\B(G_2)$ are isometrically isomorphic as Banach algebras if and only if $G_1$ and $G_2$ are topologically isomorphic as locally compact groups as proved by Walter in \cite{Wal74} (see also \cite{Wal70} and \cite[Theorem 3.2.5 p.~99]{KaL18}). Decomposable maps is a class of operators between $\mathrm{C}^*$-algebras generalizing completely positive maps. The class of decomposable maps is perhaps the most general class of tractable operators. If $A$ and $B$ are $\mathrm{C}^*$-algebras, recall that a linear map $T \co A \to B$ is called decomposable \cite{Haa85} if there exist linear maps $v_1,v_2 \co A \to B$ such that the linear map \begin{equation} \label{Matrice-2-2-Phi} \Phi=\begin{bmatrix} v_1 & T \\ T^\circ & v_2 \\ \end{bmatrix} \co \M_2(A) \to \M_2(B), \quad \begin{bmatrix} a & b \\ c & d \\ \end{bmatrix}\mapsto \begin{bmatrix} v_1(a) & T(b) \\ T^\circ(c) & v_2(d) \\ \end{bmatrix} \end{equation} is completely positive, where $T^\circ(c) \ov{\mathrm{def}}{=} T(c^*)^*$. In this case, the maps $v_1$ and $v_2$ are completely positive and the decomposable norm of $T$ is defined by \begin{equation} \label{Norm-dec} \norm{T}_{\dec,A \to B} \ov{\mathrm{def}}{=} \inf\big\{\max\{\norm{v_1},\norm{v_2}\}\big\}, \end{equation} where the infimum is taken over all maps $v_1$ and $v_2$. See the books \cite{BlM04}, \cite{EfR00} and \cite{Pis03} for more information on this classical notion. We also refer to \cite{ArK23} and \cite{JuR04} for the analogue notion for operators acting on a noncommutative $\L^p$-space $\L^p(\mathcal{M})$ associated to a von Neumann algebra $\cal{M}$ endowed with a normal semifinite faithful trace, for any $1 \leq p \leq \infty$. If $\cal{M}$ is approximately finite-dimensional (which is equivalent to injective), it is known that we have the isometric complex interpolation formula \begin{equation} \label{Regular-as-interpolation-space} \Dec(\L^p(\mathcal{M})) =(\CB(\mathcal{M}),\CB(\L^1(\mathcal{M})))^\frac{1}{p}, \end{equation} for the Banach space $\Dec(\L^p(\mathcal{M}))$ of decomposable operators acting on the noncommutative $\L^p$-space $\L^p(\mathcal{M})$, which is a combination of \cite[Theorem 3.7]{Pis95} and the isometric identification \cite[Theorem 3.24 p.~41]{ArK23} between regular and decomposable operators. Here $\CB(\L^1(\mathcal{M}))$ is the space of completely bounded operators acting on the Banach space $\L^1(\mathcal{M})$ and the space $\CB(\mathcal{M})$ is defined similarly. Recall that the group von Neumann algebra $\VN(G)$ of a locally compact group $G$ is the von Neumann algebra generated by the range $\lambda(G)$ of the left regular representation $\lambda$ of $G$ on the complex Hilbert space $\L^2(G)$ and that the subspace $\Span \{\lambda_s : s \in G\}$ is weak* dense in $\VN(G)$. If $G$ is abelian, then the von Neumann algebra $\VN(G)$ is $*$-isomorphic to the algebra $\L^\infty(\hat{G})$ of essentially bounded functions on the Pontryagin dual $\hat{G}$ of $G$. As fundamental models of quantum groups, these algebras play a crucial role in operator algebras. A Fourier multiplier acting on $\VN(G)$ is a weak* continuous linear operator $T \co \VN(G) \to \VN(G)$ that satisfies $T(\lambda_s)=\varphi_s\lambda_s$ for all $s \in G$, for some measurable function $\varphi \co G \to \mathbb{C}$. In this case, we let $M_\varphi \ov{\mathrm{def}}{=} T$. Our first result, proved in Corollary \ref{dec-vs-B(G)-discrete-group} is the following statement. This identification provides a concrete realization of the abstract space $\B(G)$ in terms of decomposable Fourier multipliers on $\VN(G)$, thereby bridging representation theory and decomposable operators. \begin{thm} The Fourier-Stieltjes algebra $\B(G)$ of a discrete group $G$ is canonically isometrically isomorphic to the algebra $\frak{M}^{\infty,\dec}(G)$ of decomposable Fourier multipliers on the group von Neumann algebra $\VN(G)$ via the map $\varphi \to M_\varphi$. \end{thm} This identification further highlights the ubiquity of the Fourier-Stieltjes algebra $\B(G)$ for a discrete group $G$. Indeed, we will show in Proposition \ref{prop-B(G)-inclus-dec} that for any locally compact group $G$ there exists a well-defined injective \textit{contractive} map from the Fourier-Stieltjes algebra $\B(G)$ into the space $\frak{M}^{\infty,\dec}(G)$ of decomposable Fourier multipliers and we will also examine the surjectivity of this map. We will show that the following property\footnote{\thefootnote. The subscript w* means <<weak* continuous>>. With the projection provided by \cite[Proposition 3.1 p.~24]{ArK23}, we could replace the space $\CB_{\w^*}(\VN(G))$ by the space $\CB(\VN(G))$ in this definition.} plays an important role in this problem. \begin{defi} \label{Defi-tilde-kappa} Let $G$ be a locally compact group. We say that $G$ has property $(\kappa_\infty)$ if there exists a bounded projection $P_{G}^\infty \co \CB_{\w^*}(\VN(G)) \to \CB_{\w^*}(\VN(G))$ preserving the complete positivity onto the space $\mathfrak{M}^{\infty,\cb}(G)$ of completely bounded Fourier multipliers on the von Neumann algebra $\VN(G)$. In this case, the infimum of bounds of such projections will be denoted $\kappa_\infty(G)$: \begin{equation} \label{Kprime-def} \kappa_\infty(G) \ov{\mathrm{def}}{=} \inf \norm{P_{G}^\infty}_{\CB_{\w^*}(\VN(G)) \to \CB_{\w^*}(\VN(G))}. \end{equation} Finally, we let $\kappa_\infty(G) \ov{\mathrm{def}}{=} \infty$ if the locally compact group $G$ does not have $(\kappa_\infty)$. \end{defi} The constant $\kappa_\infty(G)$ is a variant of the relative projection constant $$ \inf \big\{\norm{P}_{X \to X} : P\text{ is a bounded projection from $X$ onto }Y \big\} $$ of a closed subspace $Y$ of a Banach space $X$, e.g.~\cite[Definition 4.b.1 p.~231]{Kon86} or \cite[p.~112]{Wot91}. The property $(\kappa_\infty)$ means in particular that the space\footnote{\thefootnote. This space is denoted sometimes $\M_0\mathrm{A}(G)$ or $\M_\cb \mathrm{A}(G)$.} $\mathfrak{M}^{\infty,\cb}(G)$ is complemented in the space $\CB_{\w^*}(\VN(G))$ of weak* continuous completely bounded operators acting on $\VN(G)$. Indeed, in Proposition \ref{conj-1-1-correspondance}, we will prove that this property suffices to ensure that the previous inclusion $\B(G) \hookrightarrow \frak{M}^{\infty,\dec}(G)$ is a bijection. In order to prove that this map is an isometry, we need a matricial generalization of property $(\kappa_\infty)$ (satisfied for any discrete group $G$) and surprisingly (at first sight) the use of results on \textit{groupoids}. Note also that the existence of non-discrete and non-abelian locally compact groups with $(\kappa_\infty)$ (and even with the stronger property $(\kappa)$ of Definition \ref{Defi-complementation-G}) was a rather surprising result of the paper \cite{ArK23} since the proof of property $(\kappa_\infty)$ of a discrete group $G$ is an average argument relying on the compactness of the compact quantum group $(\VN(G),\Delta)$ defined by the group von Neumann algebra $\VN(G)$ and its canonical coproduct $\Delta$. According to \cite[Theorem 6.38 p.~121]{ArK23}, a second-countable pro-discrete locally compact group $G$ satisfies $\kappa_\infty(G)=1$. With sharp contrast, we will observe in this paper (see Example \ref{example-SL}), as announced in \cite{ArK23}, that the unimodular locally compact group $G=\SL_2(\R)$ does not have $(\kappa_\infty)$. If $G$ is a locally compact group, with our result we can insert the space $\frak{M}^{\infty,\dec}(G)$ of decomposable Fourier multipliers acting on the von Neumann algebra $\VN(G)$ in the classical contractive inclusion $\B(G) \subseteq \frak{M}^{\infty,\cb}(G)$: \begin{equation} \label{Inclusions} \B(G) \subseteq \frak{M}^{\infty,\dec}(G) \subseteq \frak{M}^{\infty,\cb}(G). \end{equation} It is known \cite[p.~54]{Pis01} that the equality $\B(G) = \frak{M}^{\infty,\cb}(G)$ characterizes amenability for locally compact groups. This observation allows us to revisit another nice characterization of amenability of Lau and Paterson \cite[Corollary 3.2 p.~161]{LaP91} \cite[p.~85]{Pat88a}, which is described by the next theorem. \begin{thm}[Lau-Paterson] \label{Th-Lau-Paterson} Let $G$ be a locally compact group. The following properties are equivalent. \begin{enumerate} \item The group von Neumann algebra $\VN(G)$ is injective and $G$ is inner amenable. \item $G$ is amenable. \end{enumerate} \end{thm} Recall that a locally compact group $G$ equipped with a left Haar measure is inner amenable if there exists a conjugation-invariant state on the algebra $\L^\infty(G)$. We introduce the following conjecture. \begin{conj} \label{conj} Let $G$ be a locally compact group. \begin{enumerate} \item $G$ is inner amenable if and only if we have the equality $\B(G) = \frak{M}^{\infty,\dec}(G)$. \item The von Neumann algebra $\VN(G)$ is injective if and only if we have $\frak{M}^{\infty,\dec}(G)= \frak{M}^{\infty,\cb}(G)$. \end{enumerate} \end{conj} We will prove the <<only if>> part of the first assertion for second-countable unimodular locally compact groups by showing in Theorem \ref{thm-SAIN-tilde-kappa} that inner amenability implies $\kappa_\infty(G)=1$, and hence $\B(G) = \frak{M}^{\infty,\dec}(G)$. We refer to Section \ref{Sec-approach} for a detailed presentation of our approach. This is our first main result. \begin{thm} Let $G$ be a second-countable unimodular locally compact group. If $G$ is inner amenable then we have $\B(G) = \frak{M}^{\infty,\dec}(G)$. \end{thm} The <<only if>> part of the second assertion of Conjecture \ref{conj} is true by a classical result of Haagerup \cite[Corollary 2.8 p.~201]{Haa85}. A consequence of our results is that the second point of Conjecture \ref{conj} is true for discrete groups, see Theorem \ref{Thm-conj-discrete-case}, and also for second-countable unimodular inner amenable locally compact groups (see Corollary \ref{cor-inner-66}), i.e.~we can state the following result. \begin{thm} Let $G$ be a discrete group or a second-countable unimodular inner amenable locally compact group. Then the von Neumann algebra $\VN(G)$ is injective if and only if we have $\frak{M}^{\infty,\dec}(G)= \frak{M}^{\infty,\cb}(G)$. \end{thm} As a byproduct, we also obtain in Theorem \ref{thm-links-K-injective} the following new characterization of amenability, which is in the same spirit as the characterization of Lau and Paterson, previously discussed in Theorem \ref{Th-Lau-Paterson}. \begin{thm} \label{thm-links-K-injective-intro} Let $G$ be a second-countable unimodular locally compact group. Then the following are equivalent. \begin{enumerate} \item $\VN(G)$ is injective and $G$ has $(\kappa_\infty)$. \item $G$ is amenable. \end{enumerate} \end{thm} Finally, we will observe in Example \ref{Example-SL2} that the first inclusion in \eqref{Inclusions} can also be strict, e.g.~for $G=\SL_2(\R)$. The converses of these results will need further investigations. We also give in Section \ref{subsec-inner-Folner} other characterizations of inner amenability for unimodular locally compact groups and we will use one of these in the proofs of our results. Note that if $G$ is a unimodular locally compact group, there exists a canonical normal semifinite faithful trace on the group von Neumann algebra $\VN(G)$, allowing to introduce the associated noncommutative $\L^p$-space $\L^p(\VN(G))$ for any $1 \leq p < \infty$. In this context, we can introduce the space $\mathfrak{M}^{p,\cb}(G)$ of completely bounded Fourier multipliers acting on the noncommutative $\L^p$-space $\L^p(\VN(G))$. We describe a new class of locally compact groups with the following property introduced in \cite[Definition 1.1 p.~3]{ArK23} which requires a bounded projection at the level $p=\infty$ and a compatible\footnote{\thefootnote. The following remark is important to note. If $P_G^\infty \co \CB_{\w^*}(\VN(G)) \to \CB_{\w^*}(\VN(G))$ is a bounded projection onto the subspace $\mathfrak{M}^{\infty,\cb}(G)$ then we can define a map $P_{G}^1 \co \CB(\L^1(\VN(G))) \to \CB(\L^1(\VN(G)))$ by $$ P_G^1(T) \ov{\textrm{def}}{=} (P_{G}^\infty(T^*)_*), \quad T \in \CB(\L^1(\VN(G))). $$ It is then easy to check that $P_G^1$ is a bounded projection preserving complete positivity onto the subspace $\mathfrak{M}^{1,\cb}(G)$ and its norm is equal to the one of $P_G^\infty$. It is important to note that there is no evidence that the maps $P_G^\infty$ and $P_G^1$ are compatible in the sense of interpolation. Consequently, the properties $(\kappa_\infty)$ and $(\kappa)$ seem to be different.} bounded projection at the level $p=1$. The compatibility is taken in the sense of interpolation theory described in the books \cite{BeL76} and \cite{Tri95}. This compatibility property is crucial in a companion paper in order to describe the decomposable norm of Fourier multipliers acting on noncommutative $\L^p$-spaces with the interpolation formula \eqref{Regular-as-interpolation-space} and a classical argument. \begin{defi} \label{Defi-complementation-G} We say that a locally compact group $G$ has property $(\kappa)$ if there exist compatible bounded projections $P_{G}^\infty \co \CB_{\w^*}(\VN(G)) \to \CB_{\w^*}(\VN(G))$ and $P_{G}^1 \co \CB(\L^1(\VN(G))) \to \CB(\L^1(\VN(G)))$ onto the subspaces $\mathfrak{M}^{\infty,\cb}(G)$ and $\mathfrak{M}^{1,\cb}(G)$, preserving the complete positivity. In this case, we introduce the constant \begin{equation} \label{Kappa-eq-def} \kappa(G) \ov{\mathrm{def}}{=} \inf \max\Big\{\norm{P_G^\infty}_{\CB_{\w^*}(\VN(G)) \to \CB_{\w^*}(\VN(G))},\norm{P_G^1}_{\CB(\L^1(\VN(G))) \to \CB(\L^1(\VN(G)))} \Big\}, \end{equation} where the infimum is taken on all admissible couples $(P_G^\infty,P_G^1)$ of projections. Finally, we let $\kappa(G) \ov{\mathrm{def}}{=} \infty$ if the locally compact group $G$ does not have $(\kappa)$. \end{defi} The well-known average trick \cite[proof of Lemma 2.5]{Haa16} of Haagerup essentially implies that $\kappa(G)=1$ for any discrete group $G$, see \cite[Section 4.2]{ArK23}. In \cite[Proposition 6.43 p.~125]{ArK23} and \cite[Theorem 6.38 p.~121]{ArK23}, it is proved that an abelian locally compact group satisfies $\kappa(G)=1$ and that a second-countable pro-discrete locally compact group $G$ satisfies $\kappa(G)=1$. It is equally proved in \cite[Theorem 6.16 p.~96]{ArK23} that some class of second-countable unimodular locally compact groups approximable by lattice subgroups have $(\kappa)$. Another very significant result that we obtain in this paper is described in the following statement, see Corollary \ref{cor-the-compatible-complementation}. Let us first recall that the concept of dimension of a \textit{suitable} topological space can be defined using the small inductive dimension, the large inductive dimension, or the covering dimension. In the case of a locally compact group $G$, these three notions of dimension coincide. We refer to Section \ref{Sec-finite-dim} for more background. \begin{thm} \label{th-intro-kappa} A second-countable unimodular finite-dimensional amenable locally compact group $G$ has property $(\kappa)$. \end{thm} An upper estimate of $\kappa(G)$ is possible for some groups. For example, in the case of a second-countable unimodular totally disconnected amenable locally compact group, our method gives $\kappa(G)=1$, which is a sharp result. Note that these results complement the result of our previous paper \cite{ArK23}. From this point of view, totally disconnected locally compact groups behave better than Lie groups, phenomenon that we already noticed in \cite{ArK23}. We refer to Section \ref{Sec-approach} for a detailed presentation of our approach. The proof relies on the structure of finite-dimensional locally compact groups extracted from the solution to Hilbert's fifth problem. More precisely, we use a version of Iwasawa's local splitting theorem, which says that an $n$-dimensional second-countable locally compact group is \textit{locally} isomorphic to the product of a totally disconnected compact group $K$ and a Lie group $L$ of dimension $n$, to reduce the problem to totally disconnected groups and to connected Lie groups. It allows us to use doubling constants of the Carnot-Carath\'eodory metric of connected Lie groups for small balls to construct special suitable <<noncommutative functions>>, which are crucial for our proof. We will prove in Corollary \ref{cor-the-full-referees-complementation} a different result for the case of a second-countable unimodular amenable locally compact group $G$, using some other special <<noncommutative functions>>. We obtain that the space $\mathfrak{M}^{p,\cb}(G)$ of completely bounded Fourier multipliers on the noncommutative $\L^p$-space $\L^p(\VN(G))$ is contractively complemented in the space $\CB(\L^p(\VN(G)))$ of completely bounded operators acting on the Banach space $\L^p(\VN(G))$, by a contractive projection preserving the complete positivity. Note that this map is \textit{contractive} which is better than the \textit{boundedness} of the maps $P_G^1$ and $P_G^\infty$ provided by Theorem \ref{th-intro-kappa} (when it applies), but only for \textit{one} value of $p$. Note also that this property does not characterize amenability if $1<p<\infty$ since a discrete group $G$ such that the von Neumann algebra $\VN(G)$ is $\QWEP$\footnote{\thefootnote. It is not clear if this assumption is removable or not. It is required by the use of vector-valued noncommutative $\L^p$-spaces and the most general known theory needs the $\QWEP$ assumption.} also satisfies this property, see \cite[p.~334]{JuR03} and \cite[Theorem 4.2 p.~62]{ArK23}. \begin{thm} \label{thm-the-full-referees-intro} Let $G$ be a second-countable unimodular amenable locally compact group. Let $1 < p < \infty$ such that $\frac{p}{p^*}$ is rational. Then there exists a contractive projection \[ P^p_G \co \CB(\L^p(\VN(G))) \to \CB(\L^p(\VN(G))) \] onto the subspace $\mathfrak{M}^{p,\cb}(G)$, preserving the complete positivity. \end{thm} In contrast to Theorem \ref{th-intro-kappa}, this result cannot be used to characterize the norms of decomposable multipliers on noncommutative $\L^p$-spaces. Our results deepen the connection between the structural theory of locally compact groups and the analytic properties of their associated operator algebras, with new characterizations of amenability emerging from the perspective of Fourier multipliers. \subsection{Structure of the paper} To facilitate access to individual topics, each section is made as self-contained as possible. The paper is structured as follows. Section \ref{Overview-kappa} provides background on Fourier-Stieltjes algebras, groupoids, and operator algebras. In Proposition \ref{Prop-Ruan-Dec}, we demonstrate that the space $\Dec(A,B)$ of decomposable operators between $\C^*$-algebras admits a canonical operator space structure. In Section \ref{Sec-FS-and-dec}, we prove in Proposition \ref{prop-B(G)-inclus-dec} that for any locally compact group $G$, there exists a well-defined injective completely contractive map from the Fourier-Stieltjes algebra $\B(G)$ into the space $\mathfrak{M}^{\infty,\dec}(G)$, consisting of decomposable Fourier multipliers on the group von Neumann algebra $\VN(G)$. Furthermore, we explore in Proposition \ref{conj-1-1-correspondance} the relationship between the equality $\B(G) = \mathfrak{M}^{\infty,\dec}(G)$ and property $(\kappa_{\infty})$. We also show in Theorem \ref{dec-vs-B(G)-discrete-group} that the Fourier-Stieltjes algebra $\B(G)$ of a discrete group $G$ is isometrically isomorphic to the algebra $\mathfrak{M}^{\infty,\dec}(G)$ of decomposable Fourier multipliers. In Example \ref{Example-SL2}, we demonstrate that for $G = \SL_2(\R)$, the inclusion $\B(G) \subset \mathfrak{M}^{\infty,\dec}(G)$ is strict. In Section \ref{Sec-prelim-inner}, we provide background on inner amenability and amenability. In Section \ref{subsec-inner-Folner}, we establish various characterizations of inner amenability for unimodular locally compact groups, using asymptotically central nets of functions or inner F\o{}lner nets. These characterizations are employed in Section \ref{Sec-Herz-Schur}. Section \ref{Sec-prel-complet} offers background on measurable Schur multipliers and Plancherel weights on group von Neumann algebras. In Section \ref{Sec-approach}, we outline the technical approach of this chapter. Section \ref{Mappings} presents the construction of some Schur multipliers derived from a (weak* continuous if $p = \infty$) completely bounded map $T \co \L^p(\VN(G)) \to \L^p(\VN(G))$, acting on the noncommutative $\L^p$-space $\L^p(\VN(G))$ of a second-countable unimodular locally compact group $G$. In Section \ref{Sec-Herz-Schur}, we show that the symbol can be chosen as a Herz-Schur symbol if the group $G$ is inner amenable. Section \ref{Section-p=1-p-infty} examines the symbols of these Schur multipliers for $p=1$ and $p=\infty$. Section \ref{Sec-convergence-continuous} explores the convergence of the symbols of Schur multipliers, while Section \ref{Sec-finite-dim} focuses on Lie groups and totally disconnected locally compact groups, culminating in the proof in Section \ref{Sec-Th-complementation} that unimodular finite-dimensional amenable locally compact groups have property $(\kappa)$. In Section \ref{Section-Schur}, we construct a contractive projection from the space of completely bounded Schur multipliers $\mathfrak{M}^{p,\cb}_G$ onto the subspace $\mathfrak{M}^{p,\cb,\HS}_G$ of Herz-Schur multipliers, in the case where $G$ is amenable. This result will be used in Section \ref{Sec-Th-complementation}, which contains our main complementation results. In Section \ref{Sec-charac-amen}, Theorem \ref{thm-links-K-injective} presents a new characterization of amenability for second-countable unimodular locally compact groups. In Example \ref{example-SL}, we observe that the unimodular locally compact group $G=\SL_2(\R)$ does not have property $(\kappa_\infty)$. Finally, in Section \ref{Sec-Herz}, we show that if there exists a bounded projection $Q \co \mathfrak{M}^{\infty}_G \to \mathfrak{M}^{\infty}_G$ onto the space of completely bounded Herz-Schur multipliers $\mathfrak{M}^{\infty,\HS}_G$ over the space $\cal{B}(\L^2(G))$ of bounded operators on the Hilbert space $\L^2(G)$, preserving the complete positivity for some second-countable unimodular locally compact group $G$ such that the von Neumann algebra $\VN(G)$ is injective, then $G$ must be amenable. \section{Fourier-Stieltjes algebras and decomposable multipliers on $\VN(G)$} \label{sec-Divers} \subsection{Preliminaries} \label{Overview-kappa} \paragraph{Decomposable maps} Recall that the notion of decomposable map is defined in \eqref{Matrice-2-2-Phi}. Consider some $\mathrm{C}^*$-algebras $A$, $B$ and $C$. Let $T_1 \co A \to B$ and $T_2 \co B \to C$ be some decomposable maps. Then by \cite[Proposition 1.3 (5) p.~177]{Haa85} that the composition $T_2 \circ T_1 \co A \to C$ is decomposable and that \begin{equation} \label{Composition-dec} \norm{T_2 \circ T_1}_{\dec, A \to C} \leq \norm{T_2}_{\dec, B \to C} \norm{T_1}_{\dec, A \to B}. \end{equation} By \cite[Proposition 1.3 (4) p.~177]{Haa85}, any completely positive map $T \co A \to B$ between $\C^*$-algebras is decomposable and we have \begin{equation} \label{dec-et-cp} \norm{T}_{\dec,A \to B} =\norm{T}_{\cb,A \to B} =\norm{T}_{A \to B}. \end{equation} It is known that the space $\Dec(A, B)$ of decomposable maps is a Banach space by \cite[Proposition 1.4 p.~182]{Haa85} and coincides with the span of completely positive maps, see \cite[p.~175]{Haa85}. By \cite[Proposition 1.3 (3) p.~177]{Haa85} or \cite[Lemma 5.4.3 p.~96]{EfR00}, any decomposable map $T \co A \to B$ is completely bounded with $\norm{T}_{\cb,A \to B} \leq \norm{T}_{\dec,A \to B}$. Moreover, if $B$ is injective, then according to \cite[Theorem 1.6 p.~184]{Haa85}, we have \begin{equation} \label{dec=cb} \norm{T}_{\dec,A \to B} =\norm{T}_{\cb,A \to B}. \end{equation} We will use in the first proof of Proposition \ref{prop-B(G)-inclus-dec} the following elementary lemma. \begin{lemma} \label{Lemma-tensor-dec-2} Let $\cal{M}_1,\cal{M}_2$ and $\cal{N}$ be von Neumann algebras and let $T \co \cal{M}_1 \to \cal{M}_2$ be a weak* continuous decomposable map. Then we have a well-defined weak* continuous decomposable map $\Id_\cal{N} \ot T \co \cal{N} \otvn \cal{M}_1 \to \cal{N} \otvn \cal{M}_2$ and \begin{equation} \label{dec-tensor-2} \norm{\Id_\cal{N} \ot T}_{\dec,\cal{N} \otvn \cal{M}_1 \to \cal{N} \otvn \cal{M}_2} \leq \norm{T}_{\dec,\cal{M}_1 \to \cal{M}_2}. \end{equation} \end{lemma} \begin{proof} Note that the decomposable map $T$ is completely bounded by \cite[Proposition 1.3 (3) p.~177]{Haa85}. By \cite[p.~40]{BlM04}, we infer that we have a well-defined weak* continuous completely bounded map $\Id_\cal{N} \ot T \co \cal{N} \otvn \cal{M}_1 \to \cal{N} \otvn \cal{M}_2$. By \cite[Remark 1.5 p.~183]{Haa85}, the infimum in the definition of the decomposable norm given in \eqref{Norm-dec} is actually a minimum. Consequently, there exist some linear maps $v_1,v_2 \co \cal{M}_1 \to \cal{M}_2$ such that the map $ \begin{bmatrix} v_1 & T \\ T^\circ & v_2 \end{bmatrix} \co \M_2(\cal{M}_1) \to \M_2(\cal{M}_2)$ is completely positive with $\max\{\norm{v_1},\norm{v_2}\} = \norm{T}_{\dec,\cal{M}_1 \to \cal{M}_2}$. It is not difficult to see that we can suppose that $v_1$ and $v_2$ are weak* continuous by using \cite[Proposition 3.1 p.~24]{ArK23} as in the proof of \cite[Proposition 3.4 p.~26]{ArK23}. Then by \cite[Proposition 4.3.7 p.~225]{Li92} the tensor product $$ \begin{bmatrix} \Id_\cal{N} \ot v_1 & \Id_\cal{N} \ot T \\ \Id_\cal{N} \ot T^\circ & \Id_\cal{N} \ot v_2 \end{bmatrix} =\Id_{\cal{N}} \ot \begin{bmatrix} v_1 & T \\ T^\circ & v_2 \end{bmatrix} \co \M_2(\cal{N} \otvn \cal{M}_1) \to \M_2(\cal{N} \otvn \cal{M}_2) $$ is a well-defined completely positive map. We deduce that the map $\Id_\cal{N} \ot T \co \cal{N} \otvn \cal{M}_1 \to \cal{N} \otvn \cal{M}_2$ is decomposable with \begin{align*} \MoveEqLeft \norm{\Id_\cal{N} \ot T}_{\dec} \ov{\eqref{Norm-dec}}{\leq} \max\{\norm{\Id_\cal{N} \ot v_1},\norm{\Id_\cal{N} \ot v_2 }\} \leq \max\{\norm{v_1}_{\cb},\norm{v_2}_{\cb}\} \\ &\ov{\eqref{dec-et-cp}}{=} \max\{\norm{v_1},\norm{v_2}\} = \norm{T}_{\dec,\cal{M}_1 \to \cal{M}_2}, \end{align*} where we use in the first equality the complete positivity of the linear maps $v_1$ and $v_2$. \end{proof} Finally, if $A$ and $B$ are $\mathrm{C}^*$-algebras, with $B$ unital, we will show that the space $\Dec(A, B)$ of decomposable maps can be endowed with an operator space structure. To demonstrate this, suppose that $[T_{ij}]$ belongs to the matrix space $\M_{n}(\Dec(A, B))$, where $n \geq 1$ is an integer. We identify the matrix $[T_{ij}]$ with the map $A \mapsto \M_{n}(B)$, $x \mapsto [T_{ij}(x)]$. We define a norm on the space $\M_{n}(\Dec(A,B))$ by setting \begin{equation} \label{Norms-dec} \bnorm{[T_{i j}]}_{\M_{n}(\Dec(A,B))} \ov{\mathrm{def}}{=} \bnorm{x \mapsto [T_{ij}(x)]}_{\Dec(A,\M_{n}(B))}. \end{equation} In short, we make the identification $\M_{n}(\Dec(A,B)) = \Dec(A,\M_{n}(B))$. \begin{prop} \label{Prop-Ruan-Dec} Let $A$ and $B$ be $\mathrm{C}^*$-algebras, with $B$ unital. When endowed with the matricial norms from \eqref{Norms-dec}, the Banach space $\Dec(A, B)$ acquires the structure of an operator space. \end{prop} \begin{proof} Let $X,Y \in \M_{n}$ and $[T_{i j}] \in \M_{n}(\Dec(A,B))$ for some integer $n \geq 1$. Note that by \cite[Exercise 12.1 p.~251]{Pis03} the two-sided multiplication map $u \co \M_{n}(B) \to \M_{n}(B)$, $y \mapsto (X \ot 1_B)y(Y \ot 1_B)$ is decomposable with $\norm{u}_{\dec,\M_{n}(B) \to \M_{n}(B)} \leq \norm{X \ot 1_B}_{\M_{n}(B)} \norm{Y \ot 1_B}_{\M_{n}(B)}$. Using this observation in the equality, we obtain \begin{align*} \MoveEqLeft \bnorm{X[T_{i j}]Y}_{\M_{n}(\Dec(A,B))} \ov{\eqref{Norms-dec}}{=} \bnorm{x \mapsto X [T_{ij}(x)]Y}_{\Dec(A,\M_{n}(B))} \\ &=\bnorm{x \mapsto (X \ot 1_B) [T_{ij}(x)](Y \ot 1_B)}_{\Dec(A,\M_{n}(B))} \\ &\leq \norm{X \ot 1_B}_{\M_{n}(B)} \norm{Y \ot 1_B}_{\M_{n}(B)} \bnorm{x \mapsto [T_{ij}(x)]}_{\Dec(A,\M_{n}(B))} \\ &\ov{\eqref{Norms-dec}}{=} \norm{X}_{\M_{n}} \bnorm{[T_{i j}]}_{\M_{n}(\Dec(A,B))} \norm{Y}_{\M_{n}}. \end{align*} Let $[T_{i j}] \in \M_{n}(\Dec(A,B))$ and $[S_{kl}] \in \M_{m}(\Dec(A,B))$ for some integers $n,m \geq 1$. Using \cite[Lemma 6.8 p.~118]{Pis20} in the second equality, we have \begin{align*} \MoveEqLeft \bnorm{[T_{i j}] \oplus [S_{kl}]}_{\M_{n+m}(\Dec(A,B))} \ov{\eqref{Norms-dec}}{=} \bnorm{x \mapsto [T_{ij}(x)] \oplus [S_{kl}(x)]}_{\Dec(A,\M_{n+m}(B))} \\ &=\max\Big\{\bnorm{x \mapsto [T_{ij}(x)]}_{\Dec(A,\M_{n}(B))},\bnorm{x \mapsto [S_{kl}(x)]}_{\Dec(A,\M_{m}(B))} \Big\} \\ &\ov{\eqref{Norms-dec}}{=} \max\Big\{\norm{[T_{i j}]}_{\M_{n}(\Dec(A,B))} +\norm{[S_{kl}]}_{\M_{m}(\Dec(A,B))} \Big\}. \end{align*} Now, it suffices to use Ruan's theorem \cite[p.~35]{Pis03} or \cite[Proposition 2.3.6 p.~34]{EfR00}. \end{proof} We finish by providing another formula for the decomposable norm. \begin{prop} \label{prop-dec-sqrt} Consider a decomposable map $T \co A \to B$ between $\C^*$-algebras. Then \begin{equation} \label{Norm-dec-sqrt} \norm{T}_{\dec,A \to B} = \inf\left\{ \norm{v_1}^{\frac12} \norm{v_2}^{\frac12} \right\}, \end{equation} where the infimum is taken over all maps $v_1$ and $v_2$ such that the operator $\Phi$ introduced in \eqref{Matrice-2-2-Phi} is completely positive. \end{prop} \begin{proof} The inequality $\geq$ is obvious. Now, we show the reverse inequality, assume that the operator $\Phi \ov{\mathrm{def}}{=} \begin{bmatrix} v_1 & T \\ T^\circ & v_2 \\ \end{bmatrix}$ of \eqref{Matrice-2-2-Phi} is completely positive for some linear maps $v_1,v_2$. Suppose that $v_1=0$. Consider a positive element $x \in A$. Since the element $\begin{bmatrix} x & x \\ x & x \\ \end{bmatrix}$ in $\M_2(A)$ is positive, we see that $\left(\begin{bmatrix} 0 & T \\ T^\circ & v_2 \\ \end{bmatrix}\right)\left(\begin{bmatrix} x & x \\ x & x \\ \end{bmatrix}\right)=\begin{bmatrix} 0 & T(x) \\ T^\circ(x) & v_2(x) \\ \end{bmatrix}$ is positive. By \cite[Proposition 1.3.2 p.~13]{Bha07}, we infer that $\norm{T(x)} \leq \norm{0}\bnorm{v_2(x)^{\frac{1}{2}}}$. As every element of $A$ is a linear combination of positive elements \cite[p.~17]{BlM04}, we conclude that $T=0$. %The situation is identical if $v_2=0$. So we can suppose that $v_1 \not =0$. For any $t > 0$ we define the positive matrix $A_t \ov{\mathrm{def}}{=} \begin{pmatrix} \sqrt{t} & 0 \\ 0 & \frac{1}{\sqrt{t}} \end{pmatrix}$ in $\M_2$. Then the linear map $\Phi_t \co \M_2(A) \to \M_2(B)$, $x \mapsto A_t \Phi(x) A_t$ is also completely positive. For any $t > 0$, observe that $ \Phi_t = \begin{pmatrix} t v_1 & T \\ T^\circ & \frac{1}{t} v_2 \end{pmatrix} $. Hence $\norm{T}_{\dec,A \to B} \ov{\eqref{Norm-dec}}{\leq} \inf_{t > 0} \max \{ t \norm{v_1}, t^{-1} \norm{v_2} \} $. The choice $t = \norm{v_2}^{\frac12} \norm{v_1}^{-\frac12}$ gives the inequality $\norm{T}_{\dec,A \to B} \leq \norm{v_1}^{\frac12} \norm{v_2}^{\frac12}$. Taking the infimum on $v_1,v_2$, we obtain $\norm{T}_{\dec,A \to B} \leq \inf\left\{ \norm{v_1}^{\frac12} \norm{v_2}^{\frac12} \right\}$. \end{proof} \paragraph{Full group $\mathrm{C}^*$-algebras} Let $G$ be a locally compact group equipped with a left Haar measure $\mu_G$. Consider the direct sum $U$ of all equivalence classes of cyclic continuous unitary representations of $G$. We denote by $H$ the associated Hilbert space. Following \cite[Definition 8.B.1 p.~243]{BeH20}, we define the full group $\C^*$-algebra $\C^*(G)$ to be the norm closure in $\mathcal{B}(H)$ of $U(\L^1(G))$, where $U \co \L^1(G) \to \cal{B}(H)$, $f \mapsto \int_G f(s)U_s \d\mu_G(s)$ denotes the (injective) integrated representation associated to $U$. Here the latter integral is understood in the weak operator sense. So we can identify $\L^1(G)$ as a dense subspace of the algebra $\mathrm{C}^*(G)$. Recall that there is a one-to-one correspondence between the continuous unitary representations of the group $G$ and the non-degenerate representations of the $\mathrm{C}^*$-algebra $\mathrm{C}^*(G)$, see \cite[Theorem 13.9.3 p.~303]{Dix77} and \cite[Theorem 12.4.1 p.~1383]{Pal01} for details. Finally, we denote by $\W^*(G)$ the enveloping von Neumann algebra of the $\mathrm{C}^*$-algebra $\mathrm{C}^*(G)$, introduced in \cite{Ern64}, under the name <<big group algebra>>. This means that this von Neumann algebra is the weak closure of $\pi(\mathrm{C}^*(G))$, where $\pi$ is the universal representation of the $\C^*$-algebra $\mathrm{C}^*(G)$. By \cite[p.~265]{Dix77}, we have a canonical isometric isomorphism $\W^*(G)=\C^*(G)^{**}$, which is bicontinuous for the weak operator topology on $\W^*(G)$ and the weak* topology on the bidual $\C^*(G)^{**}$. \begin{example} \normalfont If the locally compact group $G$ is \textit{abelian} then by \cite[Example p.~225]{Fol16} the $\mathrm{C}^*$-algebra $\mathrm{C}^*(G)$ is $*$-isomorphic to the $\mathrm{C}^*$-algebra $\C_0(\hat{G})$, where $\hat{G}$ is the Pontryagin dual of $G$. \end{example} \paragraph{Fourier-Stieltjes algebras} Recall that we defined the Fourier-Stieltjes algebra $\B(G)$ of a locally compact group $G$ in \eqref{BG-as-entries}. It is known that it is the complex linear span of the set of all continuous positive definite functions on $G$, see \cite[Definition 2.1.5 p.~40]{KaL18}. Equipped with pointwise multiplication and addition $\B(G)$ becomes a commutative unital Banach algebra by \cite[Theorem 2.1.11 p.~44]{KaL18}. Let $\mu_G$ be a left Haar measure of $G$. If $\varphi \in \B(G)$ then by \cite[p.~193]{Eym64} the linear form $\omega_\varphi \co \L^1(G) \to \mathbb{C}$ defined by \begin{equation} \label{Def-omega-varphi} \omega_\varphi(f) \ov{\mathrm{def}}{=} \int_G \varphi(s)f(s) \d\mu_G(s) \end{equation} extends to a bounded linear form $\omega_\varphi \co \mathrm{C}^*(G) \to \mathbb{C}$ with $\norm{\varphi}_{\B(G)}=\norm{\omega_\varphi}$. It is well-known that each bounded linear form on $\mathrm{C}^*(G)$ satisfies this description, i.e.~we have $\B(G)=\mathrm{C}^*(G)^*$ isometrically, see \cite[p.~192]{Eym64} or \cite[p.~40]{KaL18}. Moreover, by \cite[Lemma 1.4 p.~370]{Fel60} and \cite[Theorem 1.6.1 p.~29]{KaL18} the linear form $\L^1(G) \to \mathbb{C}$, $f \mapsto \int_G \varphi(s)f(s) \d\mu_G(s)$ extends to a positive linear form on the $\mathrm{C}^*$-algebra $\mathrm{C}^*(G)$ if and only if $\varphi$ is a continuous positive definite function. \begin{example} \normalfont \label{norm-B(G)-commutatif} If the locally compact group $G$ is \textit{abelian}, recall that the Fourier transform $\hat{\mu} \co G \to \mathbb{C}$ of a bounded regular complex Borel measure $\mu \in \M(\hat{G})$ on the Pontryagin dual $\hat{G}$ is given by $\hat{\mu}(s) \ov{\mathrm{def}}{=} \int_{\hat{G}} \ovl{\chi(s)}\d\mu(\chi)$ where $s \in G$. According to \cite[Exemple p.~92]{Eym64}, $\B(G)$ is the space of Fourier transforms $\varphi=\hat{\mu}$ of bounded regular complex Borel measures $\mu \in \M(\hat{G})$ and $\norm{\varphi}_{\B(G)}=\norm{\mu}_{\M(\hat{G})}$. \end{example} We will use the next observation written without proof in \cite[p.~188]{Eym64}. For the sake of completeness, we give a proof. \begin{prop} \label{Conj-pos-def} Let $G$ be a locally compact group. Let $\varphi \co G \to \mathbb{C}$ be a continuous positive definite function. We have \begin{equation} \label{norm-pos-def} \norm{\varphi}_{\B(G)} =\varphi(e). \end{equation} \end{prop} \begin{proof} \textit{First proof if $G$ is discrete.} Let $G$ be a discrete group. By \cite[13.9.2 p.~303]{Dix77}, the full $\mathrm{C}^*$-algebra $\mathrm{C}^*(G)$ of $G$ is unital\footnote{\thefootnote. Actually, by \cite{Mil71} the full $\mathrm{C}^*$-algebra $\mathrm{C}^*(G)$ of a locally compact group $G$ is unital if and only if $G$ is discrete.}. By \eqref{Def-omega-varphi}, we have a positive linear form $\omega_\varphi \co \mathrm{C}^*(G) \to \mathbb{C}$, $U(s) \mapsto \varphi(s)$. So using \cite[Theorem 4.3.2 p.~256]{KaR97} in the second equality, we conclude that $$ \norm{\varphi}_{\B(G)} =\norm{\omega_\varphi}_{\mathrm{C}^*(G)^*} =\omega_\varphi(1) =\omega_\varphi(U(e)) =\varphi(e). $$ \noindent\textit{Second proof if $G$ is locally compact.} Using \cite[Remark 2.1.10 p.~43]{KaL18}, we know that $\norm{\varphi}_{\B(G)} \geq \norm{\varphi}_{\L^\infty(G)} \geq \varphi(e)$. Furthermore, with \cite[Theorem 13.4.5 p.~288]{Dix77}, we can write $\varphi=\langle \pi(\cdot)\xi,\xi \rangle_H$, where $\pi$ is a continuous unitary representation of $G$ on some complex Hilbert space $H$ and $\xi \in H$. We deduce that $\varphi(e)=\langle \xi,\xi \rangle_H=\norm{\xi}_H^2 \ov{\eqref{Norm-BG}}{\geq} \norm{\varphi}_{\B(G)}$. \end{proof} \paragraph{Fell's absorption principle} Let $G$ be a locally compact group. Let $\pi \co G \to \mathcal{B}(H)$ be any continuous unitary representation of $G$. Recall Fell's absorption principle, e.g.~\cite[Lemma 5.5.3 p.~187]{KaL18} (see also \cite[Proposition 8.1 p.~149]{Pis03} for the discrete case). If $1_H \co G \to \mathcal{B}(H)$, $s \mapsto \Id_H$ is the identity representation, we have a unitary equivalence \begin{equation} \label{Fell} \lambda \ot \pi \approx\lambda \ot 1_H. \end{equation} \paragraph{Groupoids} We refer to \cite{Hah78}, \cite{Muh90}, \cite{Pat03}, \cite{Pat04}, \cite{Pat99}, \cite{Ren97} \cite{Ren80} and \cite{RaW97} for background on groupoids. A groupoid is a set $G$ together with a distinguished subset $G^{(2)} \subseteq G \times G$, a multiplication map $G^{(2)} \to G$, $(s,t) \mapsto st$ and an inverse map $G \to G$, $s \mapsto s^{-1}$ such that \begin{enumerate} \item\label{it:gpd0} for any $s \in G$ we have $(s^{-1})^{-1} = s$, \item\label{it:gpd1} If $(s,t), (t,r) \in G^{(2)}$ then $(st,r)$ and $(s,tr)$ belong to $G^{(2)}$ and $(st)r = s(tr)$, \item\label{it:gpd2} for any $s \in G$ we have $(s,s^{-1}) \in G^{(2)}$ and if $s,r \in G$ satisfies $(s,r) \in G^{(2)}$, we have $s^{-1} (s r) = r$ and $(sr)r^{-1} = s$. \end{enumerate} We say that $G^{(2)}$ is the set of composable pairs. Second axiom shows that for products of three groupoid elements, there is no ambiguity in dropping the parentheses, and simply writing $str$ for $(st)r$. A groupoid $G$ is a group if and only if its unit space $G^{(0)}$ is a singleton. Given a groupoid $G$ we shall write $G^{(0)} \ov{\mathrm{def}}{=} \{s^{-1}s : s \in G\}$ and refer to elements of $G^{(0)}$ as units and to $G^{(0)}$ itself as the unit space. Since $(s^{-1})^{-1} = s$ for any $s \in G$, we also have $G^{(0)} = \{ss^{-1} : s \in G\}$. We define the range and domain maps $r,d \co G \to G^{(0)}$ by $$ r(s) \ov{\mathrm{def}}{=} ss^{-1} \qquad\text{ and }\qquad d(s) \ov{\mathrm{def}}{=} s^{-1}s, \quad s \in G. $$ For any $s,t \in G$ we have $(s,t) \in G^{(2)}$ if and only if $d(s) = r(t)$. For any unit $u \in G^{(0)}$, we let $G^u \ov{\mathrm{def}}{=} r^{-1}(\{u\})$ and $G_u \ov{\mathrm{def}}{=} d^{-1}(\{u\})$. \paragraph{Measured groupoids} A locally compact groupoid is a groupoid $G$ equipped with a locally compact topology, where the inversion map $s \mapsto s^{-1}$ is continuous, and the multiplication map $(s,t) \mapsto st$ is continuous with respect to the relative topology on $G^{(2)}$, considered as a subset of $G \times G$. Following \cite[Definition 2.2 p.~16]{Ren80} and \cite[Definition 2.28 p.~24]{Muh90}, a left Haar system for $G$ is defined as being a family $(\nu^u)_{u \in G^{(0)}}$ of positive Radon measures on $G$ such that \begin{enumerate} \item the support $\supp \nu^u$ of the measure $\nu^u$ is $G^u$, \item for any function $f \in \C_c(G)$, the map $G^{(0)} \to \mathbb{C}$, $u \mapsto \int_G f \d\nu^u$ is continuous, \item for any function $f \in \C_c(G)$ and any $s \in G$ we have $\int_G f(st) \d \nu^{d(s)}(t)=\int_G f(t) \d \nu^{r(s)}(t)$. \end{enumerate} Roughly speaking, to each unit $u$ we associate a measure $\nu^u$ supported on $G^u$. With such system, the space $\C_c(G)$ of continuous functions with compact support, endowed with the operations $$ (f*g)(s) \ov{\mathrm{def}}{=} \int_G f(t)g(t^{-1}s) \d\nu^{r(s)}(t), \quad f^*(s) \ov{\mathrm{def}}{=} \ovl{f(s^{-1})}, \quad s \in G, $$ is a $*$-algebra, according to \cite[p.~38]{Pat99}. Let us additionally consider a positive Radon measure $\mu$ on the unit space $G^{(0)}$. Following \cite[Definition 3.1 p.~22]{Ren80} and \cite[p.~86]{Pat99}, we can introduce the measure $\nu \ov{\mathrm{def}}{=} \int_{G^{(0)}} \nu^u\d\mu(u)$ induced on $G$ by $\mu$. The measure $\mu$ is said to be quasi-invariant if the measure $\nu$ is equivalent to its image by the inversion map $G \to G$, $s \mapsto s^{-1}$. A measured groupoid $(G,\nu,\mu)$ is a locally compact groupoid equipped with a left Haar system $\nu$ and a quasi-invariant measure\footnote{\thefootnote. Strictly speaking, only the class on the measure is important. But we do not need this point in this paper.} $\mu$. \begin{example} \normalfont Every locally compact group $G$ can be viewed as a locally compact groupoid, with $G^{(0)} = \{e\}$, multiplication given by the group operation, and inversion by the usual group inverse. We obtain a measured groupoid with a left Haar measure and the Dirac measure as a quasi-invariant measure on $G^{(0)}$. \end{example} \begin{example} \normalfont Let $X$ be a locally compact space. Set $G \ov{\mathrm{def}}{=} X \times X$ and $G^{(2)}\ov{\mathrm{def}}{=} \{((x,y),(y,z)) : x,y,z \in X\}$. Moreover, for any $x,y,z \in X$ we define $(x,y)(y,z) \ov{\mathrm{def}}{=} (x,z)$ and $(x,y)^{-1} \ov{\mathrm{def}}{=} (y,x)$. We obtain the pair groupoid (or Brandt groupoid). We have $G^{0}=\{(x,x) : x \in X\}$, which can be identified with $X$. Moreover, for any $x,y \in X$ we have $r(x,y)=x$ and $d(x,y)= y$. For any unit $u \in X$, we have $G_u=X \times \{u\}$ and $G^u=\{u\} \times X$. If we equip $X$ with a positive Radon measure $\mu$, we can define for any unit $u \in X$ the measure $\nu^u \ov{\mathrm{def}}{=} \delta_u \ot \mu$ on $G$, where $\delta_u$ is the unit measure at $u$. In this case, the measure $\mu$ is quasi-invariant. If we consider the discrete space $X=\{1,\ldots,n\}$ for some integer $n \geq 1$, equipped with the counting measure $\mu_n$, we denote by $\mathrm{P}_n$ the associated measured groupoid. \end{example} \begin{example} \normalfont If $G_1$ and $G_2$ are groupoids, it is clear that the product $G_1 \times G_2$ has a canonical structure of groupoid with $(G_1 \times G_2)^{(2)}=\big\{((x_1,y_1),(x_2,y_2)) : (x_1,x_2) \in G_1^{(2)}, (y_1,y_2) \in G_2^{(2)}\big\}$, \begin{equation} \label{compo-product} (x,y)^{-1}=(x^{-1},y^{-1}) \quad \text{and} \quad (x_1,y_1)(x_2,y_2)=(x_1x_2,y_1y_2). \end{equation} We have $(G_1 \times G_2)^{(0)}=G_1^{(0)} \times G_2^{(0)}$, $d(x,y)=(d(x),d(y))$ and $r(x,y)=(r(x),r(y))$ If $G_1=\mathrm{P}_n$ and $G_2=G$ is a group, an element of the product $\mathrm{P}_n \times G$ can be written under the form $(i,j,s)$ with $i,j \in \{1,\ldots,n\}$ and $s \in G$. We can see a complex function $F \co \mathrm{P}_n \times G \to \mathbb{C}$ as a $n \times n$ matrix-valued function $[F_{ij}]_{1 \leq i,j \leq n}$ on the group $G$, where $F_{ij} \co G \to \mathbb{C}$, $s \mapsto F(i,j,s)$. \end{example} \paragraph{von Neumann algebras and multipliers} The von Neumann algebra of a measured groupoid $(G,\lambda,\mu)$ is the von Neumann algebra generated by $\lambda(\C_c(G))$, where $\lambda$ is the regular representation defined in \cite[p.~55]{Ren80} and \cite[pp.~93-94]{Pat99} of the measured groupoid $(G,\nu,\mu)$. If $G=\mathrm{P}_n$ for some integer $n \geq 1$ then it is easy to check that the von Neumann algebra $\VN(G)$ is $*$-isomorphic to the matrix algebra $\M_n$. Following \cite[Definition 3.1 p.~475]{Ren97}, we say that a function $\varphi \in \L^\infty(G)$ induces a bounded Fourier multiplier if it induces a weak* continuous\footnote{\thefootnote. In \cite[Proposition 3.1 p.~474]{Ren97}, <<bounded>> must be replaced with <<weak* continuous>> to ensure the correctness of the statement.} operator $\VN(G) \to \VN(G)$, $\lambda(f) \mapsto \lambda(\varphi f)$. \paragraph{Positive definite functions} Let $(G,\nu,\mu)$ be a measured groupoid. By \cite[Proposition 1.1 p.~457 and Definition 1.1 p.~458]{Ren97}, a function $\varphi \in \L^\infty(G)$ is said to be positive definite if for any integer $n \geq 1$ and any complex numbers $\alpha_1,\ldots ,\alpha_n \in \mathbb{C}$, the inequality \begin{equation} \label{def-pos-def} \sum_{k,l=1}^n \alpha_{k} \ovl{\alpha_{l}} \varphi(\gamma_{k}^{-1}\gamma_{l}) \geq 0 \end{equation} holds for $\mu$-almost all $u \in G^{(0)}$ and $\nu^u$-almost all $\gamma_1,\ldots ,\gamma_n \in G^{u}$. We now naturally relate this condition to \cite[Proposition 8.4 p.~166]{ArK23}, where the proof holds for the case of a locally compact group endowed with a trivial cocycle. \begin{lemma} \label{Lemma-Bloc-def-pos} Let $G$ be a locally compact group and let $n \geq 1$ be an integer. The $n \times n$ matrix-valued function $F=[F_{ij}]_{1 \leq i,j \leq n}$ in the space $\L^\infty(\mathrm{P}_n \times G)$ defines a positive definite function on the groupoid $\mathrm{P}_n \times G$ if and only if for any integer $m \geq 1$, any elements $i_1,\ldots, i_m \in \{1,,\ldots,n\}$, any $s_1,\ldots,s_m \in G$ and any complex numbers $\alpha_1,\ldots,\alpha_m \in \mathbb{C}$, we have the inequality \begin{equation} \label{Condition-ArK} \sum_{k,l=1}^{m} \alpha_k \ovl{\alpha_{l}} F_{i_{k}i_{l}}(s_{k}^{-1}s_{l}) \geq 0. \end{equation} \end{lemma} \begin{proof} Note that the unit space of the groupoid $\mathrm{P}_n \times G$ identifies to $(\mathrm{P}_n \times G)^{(0)}=\mathrm{P}_n^{(0)} \times G^{(0)}=\{1,\ldots,n\}$. Fix some $q \in \{1,\ldots,n\}$. If $\gamma_k=(q,i_k,s_k)$ and $\gamma_l=(q,j_l,s_l)$ are elements of the groupoid $\mathrm{P}_n \times G$ with $i_k,j_l \in \{1,\ldots,n\}$ and $s_k,s_l \in G$ then we have $$ \gamma_{k}^{-1}\gamma_{l} =(q,i_k,s_k)^{-1}(q,j_l,s_l) \ov{\eqref{compo-product}}{=} (i_k,q,s_k^{-1})(q,j_l,s_l) \ov{\eqref{compo-product}}{=} (i_k,j_l,s_k^{-1}s_l). $$ So the condition \eqref{def-pos-def} translates to \begin{equation} \sum_{k,l=1}^m \alpha_{k} \ovl{\alpha_{l}} F(i_k,j_l,s_k^{-1}s_l) \geq 0 \quad \text{i.e.} \sum_{k,l=1}^m \alpha_{k} \ovl{\alpha_{l}} F_{i_k,j_l}(s_k^{-1}s_l) \geq 0 \end{equation} \end{proof} \paragraph{A characterization of functions of Fourier-Stieltjes algebras} In the case of groupoids, we caution the reader that there exist three notions of Fourier-Stieltjes algebra, introduced in the papers \cite{Pat04}, \cite{Ren97} and \cite{RaW97}. We refer to the excellent survey \cite{Pat03} for more information. We require a specific case (for groups) of a result, essentially stated in \cite[Proposition 1.3 p.~459 and Lemma 1.1 p.~460]{Ren97} and \cite[Proposition 5 p.~1266]{Pat04} that is more generally stated for Fourier-Stieltjes algebras associated with measured groupoids. Unfortunately, the proof of \cite[Proposition 1.3 p.~459]{Ren97} is false\footnote{\thefootnote. The operator $L'(\gamma)$ of \cite[Proposition 1.3 p.~459]{Ren97} is not a unitary.} (the result \cite[Proposition 5 p.~1266]{Pat04} is incomplete) and must be corrected. Consequently, we provide an argument sufficient for our purposes. For the proof, we will use the notion of $G$-Hilbert bundle on a locally compact groupoid $G$, which is a Hilbert bundle $\cal{H}$ over its unit space $G^{(0)}$ such that there is a linear unitary operator $\pi_s \co \cal{H}_{d(s)} \to \cal{H}_{r(s)}$ for each $s \in G$ such that for all continuous bounded sections $\xi$ and $\eta$ of $\cal{H}$, the map $(\xi, \eta) \co G \to \cal{B}(\cal{H}_{d(s)},\cal{H}_{r(s)})$, $s \mapsto \big\la \pi_s\xi(d(s)), \eta(r(s)) \big\ra_{\cal{H}_{r(s)}}$ is continuous, and the map $s \mapsto \pi_s$ is a groupoid homomorphism from $G$ into the isomorphism groupoid of the fibered set $\cup_{u \in G^{(0)}} \cal{H}_u$, see \cite[Chapter~1]{Muh90}. Finally, recall that if $\phi \co G \to \mathbb{C}$ is a continuous function then $\phi$ is positive definite if and only if $\phi$ is of the form $(\xi,\xi)$ for some $G$-Hilbert bundle. This result is proved in \cite[Theorem 1 p.~1264]{Pat04}. \begin{prop} \label{Prop-carac-BG-2-2} Let $G$ be a locally compact group. A continuous function $\varphi \co G \to \mathbb{C}$ belongs to the Fourier-Stieltjes algebra $\B(G)$ if and only if there exists continuous positive definite functions $\psi_1,\psi_2 \co G \to \mathbb{C}$ such that the matrix $\begin{bmatrix} \psi_1 & \varphi \\ \check{\ovl{\varphi}} & \psi_2 \end{bmatrix}$ defines a continuous positive definite function on the measured groupoid $\mathrm{P}_2 \times G$. In this case, we have \begin{equation} \label{Norm-B-G-utile} \norm{\varphi}_{\B(G)} = \inf \norm{\psi_1}_{\L^\infty(G)}^{\frac{1}{2}} \norm{\psi_2}_{\L^\infty(G)}^{\frac{1}{2}} , \end{equation} where the infimum is taken over all $\psi_1$ and $\psi_2$ satisfying the previous condition. \end{prop} \begin{proof} $\Rightarrow$: Let $\epsi > 0$. Using \eqref{BG-as-entries} and \eqref{Norm-BG}, we can write $\varphi=\la \pi(\cdot)\xi_1,\xi_2 \ra_{H}$ for some vectors $\xi_1$ and $\xi_2$ in a complex Hilbert space $H$ and some continuous unitary representation $\pi$ of $G$ on $H$ with $\norm{\xi_1}_{H} \norm{\xi_2}_{H} \leq \norm{\varphi}_{\B(G)} + \epsi$. For any $s \in G$, we have $$ \check{\ovl{\varphi}}(s) =\ovl{\la \pi(s^{-1})\xi_1,\xi_2 \ra_{H}} =\la \xi_2, \pi(s)^*\xi_1\ra_{H} =\la \pi(s)\xi_2, \xi_1\ra_{H}. $$ Hence $\check{\ovl{\varphi}}=\la \pi(\cdot)\xi_2,\xi_1 \ra_{H}$. Now, we introduce the continuous positive definite functions $\psi_1 \ov{\mathrm{def}}{=} \la\pi(\cdot)\xi_1,\xi_1 \ra_H$ and $\psi_2 \ov{\mathrm{def}}{=} \la \pi(\cdot)\xi_2,\xi_2 \ra_H$ on the group $G$. Now, we consider the Hilbert $(\mathrm{P}_2 \times G)$-bundle $\cal{H}$ over the discrete space $(\mathrm{P}_2 \times G)^{(0)}=\{1,2\}$ defined by $\cal{H}_{1} \ov{\mathrm{def}}{=} H$ and $\cal{H}_{2} \ov{\mathrm{def}}{=} H$ and $\pi_{i,j,s} \ov{\mathrm{def}}{=} \pi(s) \co \cal{H}_{j} \to \cal{H}_{i}$ for any $i,j \in \{1,2\}$ and any $s \in G$. For any $i \in \{1,2\}$, we introduce the vector $\zeta(i) \ov{\mathrm{def}}{=} \xi_i$. This defines a section $\zeta$ of the bundle $\cal{H}$. For any $i,j \in \{1,2\}$ and any $s \in G$, we obtain \begin{align} \MoveEqLeft \label{blabla-34} (\zeta,\zeta)(i,j,s) = \big\la \pi_{i,j,s}\zeta(d(i,j,s)), \zeta(r(i,j,s)) \big\ra_{\cal{H}_i} =\big\la \pi_{i,j,s}\zeta(j), \zeta(i) \big\ra_{\cal{H}_i} \\ &=\big\la \pi(s)\xi_j, \xi_i \big\ra_{\cal{H}_i} =\la \pi(s)\xi_j, \xi_i \ra_{H}. \nonumber \end{align} We deduce that \begin{align*} \MoveEqLeft \begin{bmatrix} \psi_1 & \varphi \\ \check{\ovl{\varphi}} & \psi_2 \end{bmatrix} =\begin{bmatrix} \la\pi(\cdot)\xi_1,\xi_1 \ra_H & \la \pi(\cdot)\xi_1,\xi_2 \ra_{H} \\ \la \pi(\cdot)\xi_2,\xi_1 \ra_{H} & \la\pi(\cdot)\xi_2,\xi_2 \ra_H \end{bmatrix} \ov{\eqref{blabla-34}}{=} (\zeta,\zeta). \end{align*} Consequently by \cite[Theorem 1 p.~1264]{Pat04}, the continuous function $\begin{bmatrix} \psi_1 & \varphi \\ \check{\ovl{\varphi}} & \psi_2 \end{bmatrix}$ is positive definite on the measured groupoid $\mathrm{P}_2 \times G$. Moreover, we have \begin{align*} \MoveEqLeft \norm{\psi_1}_{\L^\infty(G)}^{\frac{1}{2}} \norm{\psi_2}_{\L^\infty(G)}^{\frac{1}{2}} \ov{\eqref{norm-pos-def}}{=} \psi_1(e)^{\frac{1}{2}}\psi_2(e)^{\frac{1}{2}} =\la\pi(e) \xi_1 ,\xi_1 \ra_H^{\frac{1}{2}} \la\pi(e) \xi_2,\xi_2 \ra_H^{\frac{1}{2}} \\ &= \norm{\xi_1}_H \norm{\xi_2}_H \leq \norm{\varphi}_{\B(G)} + \epsi. \end{align*} Since $\epsi > 0$ is arbitrary, we conclude that $\inf \norm{\psi_1}_{\L^\infty(G)}^{\frac{1}{2}} \norm{\psi_2}_{\L^\infty(G)}^{\frac{1}{2}} \leq \norm{\varphi}_{\B(G)}$. $\Leftarrow$: Suppose that there exists some continuous positive definite functions $\psi_1,\psi_2 \co G \to \mathbb{C}$ such that the matrix $F \ov{\mathrm{def}}{=} \begin{bmatrix} \psi_1 & \varphi \\ \check{\ovl{\varphi}} & \psi_2 \end{bmatrix}$ defines a continuous positive definite function on the measured groupoid $\mathrm{P}_2 \times G$. By \cite[Theorem 1 p.~1264]{Pat04}, there exists a Hilbert $(\mathrm{P}_2 \times G)$-bundle $\cal{H}$ over the discrete space $(\mathrm{P}_2 \times G)^{(0)}=\{1,2\}$, with groupoid homomorphism $(i,j,s) \mapsto \pi_{(i,j,s)}$, and a section $\zeta \co \{1,2\} \to $ of $\cal{H}$ such that $\begin{bmatrix} \psi_1 & \varphi \\ \check{\ovl{\varphi}} & \psi_2 \end{bmatrix} = (\zeta,\zeta)$. So we have two complex Hilbert spaces $\cal{H}_1$ and $\cal{H}_2$. Here $\pi_{(i,j,s)} \co \cal{H}_j \to \cal{H}_i$ is a unitary operator. Note that $\pi_{(1,1,e)}=\Id_{\cal{H}_1}$ and $\pi_{(2,2,e)}=\Id_{\cal{H}_2}$. We consider the operator \begin{equation} \label{} P \ov{\mathrm{def}}{=} \frac{1}{2}\begin{bmatrix} \Id_{\cal{H}_1} & \pi_{(1,2,e)} \\ \pi_{(2,1,e)} & \Id_{\cal{H}_2} \end{bmatrix} \end{equation} acting on the Hilbert space $\cal{H}_1 \oplus \cal{H}_2$. It is easy to see that $P$ is a selfadjoint projection and that it commutes with each operator $\frac{1}{2}\begin{bmatrix} \pi_{(1,1,s)} & \pi_{(1,2,s)} \\ \pi_{(2,1,s)} & \pi_{(2,2,s)} \end{bmatrix}$. We introduce the complex Hilbert space $H \ov{\mathrm{def}}{=} P(\cal{H}_1 \oplus \cal{H}_2)$. Observe that an element $(x,y)$ in $\cal{H}_1 \oplus \cal{H}_2$ belongs to the subspace $H$ if and only if $\pi_{(1,2,e)}(y)=x$ and $\pi_{(2,1,e)}(x)=y$. Consequently, we can consider the operator $$ \tilde{\pi}_{s} \ov{\mathrm{def}}{=} \frac{1}{2}\begin{bmatrix} \pi_{(1,1,s)} & \pi_{(1,2,s)} \\ \pi_{(2,1,s)} & \pi_{(2,2,s)} \end{bmatrix}|_{H}, \quad s \in G. $$ It is easy to check\footnote{\thefootnote. For any $s,t \in G$, we have $$ \bigg(\frac{1}{2}\begin{bmatrix} \pi_{(1,1,s)} & \pi_{(1,2,s)} \\ \pi_{(2,1,s)} & \pi_{(2,2,s)} \end{bmatrix}\bigg) \bigg(\frac{1}{2}\begin{bmatrix} \pi_{(1,1,t)} & \pi_{(1,2,t)} \\ \pi_{(2,1,t)} & \pi_{(2,2,t)} \end{bmatrix}\bigg) =\frac{1}{2}\begin{bmatrix} \pi_{(1,1,st)} & \pi_{(1,2,st)} \\ \pi_{(2,1,st)} & \pi_{(2,2,st)} \end{bmatrix}. $$ Thus we also have $$ \bigg(\frac{1}{2}\begin{bmatrix} \pi_{(1,1,s)} & \pi_{(1,2,s)} \\ \pi_{(2,1,s)} & \pi_{(2,2,s)} \end{bmatrix}\bigg) \bigg(\frac{1}{2}\begin{bmatrix} \pi_{(1,1,s^{-1})} & \pi_{(1,2,s^{-1})} \\ \pi_{(2,1,s^{-1})} & \pi_{(2,2,s^{-1})} \end{bmatrix}\bigg) =\frac{1}{2}\begin{bmatrix} \pi_{(1,1,e)} & \pi_{(1,2,e)} \\ \pi_{(2,1,e)} & \pi_{(2,2,e)} \end{bmatrix} = P = \Id_H. $$} that we have a continuous unitary representation of $G$ on the Hilbert space $H$. Indeed, for any $(x,y) \in H$, we have \begin{align*} \MoveEqLeft \norm{\tilde{\pi}_{s}(x,y)}_H^2 =\frac14 \left( \norm{\pi_{1,1,s}(x) + \pi_{1,2,s}(y)}_{\cal{H}_1}^2 + \norm{\pi_{2,1,s}(x) + \pi_{2,2,s}(y)}_{\cal{H}_2}^2 \right) \\ &=\frac14 \left( \norm{\pi_{1,1,s}\big( x+ \pi_{1,2,e}(y))}_{\cal{H}_1}^2 + \norm{\pi_{2,2,s}(\pi_{2,1,e}(x)+ y)}_{\cal{H}_2}^2 \right)\\ &=\frac14 \left( \norm{ x+ \pi_{1,2,e}(y)}_{\cal{H}_1}^2 + \norm{\pi_{2,1,e}(x)+ y}_{\cal{H}_2}^2 \right) =\frac14 \left( \norm{2x}_{\cal{H}_1}^2 + \norm{2y}_{\cal{H}_2}^2 \right) \\ &= \norm{x}_{\cal{H}_1}^2 +\norm{y}_{\cal{H}_2}^2 =\norm{(x,y)}_H^2. \end{align*} We consider the vectors $\xi \ov{\mathrm{def}}{=} \sqrt{2} P(0,\zeta(2))$ and $\eta \ov{\mathrm{def}}{=} \sqrt{2} P(\zeta(1),0)$ in the space $H$. Now, for any $s \in G$ we observe that \begin{align*} \MoveEqLeft \varphi(s) =(\zeta,\zeta)(1,2,s) =\big\la \pi_{(1,2,s)}\zeta(d(1,2,s)), \zeta(r(1,2,s)) \big\ra \\ &=\big\la \pi_{(1,2,s)}\zeta(2), \zeta(1) \big\ra \\ &=\bigg\la\frac{1}{2}\frac{1}{2}\begin{bmatrix} \Id_{\cal{H}_1} & \pi_{(1,2,e)} \\ \pi_{(2,1,e)} & \Id_{\cal{H}_2} \end{bmatrix} \begin{bmatrix} \pi_{(1,1,s)} & \pi_{(1,2,s)} \\ \pi_{(2,1,s)} & \pi_{(2,2,s)} \end{bmatrix}(0,\zeta(2)),(\zeta(1),0) \bigg\ra\\ &=2\la \tilde{\pi}_{s}P(0,\zeta(2)),P(\zeta(1),0) \ra_H = \la \tilde{\pi}_s \sqrt{2} P(0,\zeta(2)),\sqrt{2}P(\zeta(1),0) \ra_H = \la \tilde{\pi}_{s}\xi , \eta \ra_H. \end{align*} Hence the function $\varphi$ belongs to the Fourier-Stieltjes algebra $\B(G)$. Furthermore, if $i \in \{1,2\}$ we have \begin{align} \MoveEqLeft \label{inter-989} \norm{\psi_i}_{\L^\infty(G)} \ov{\eqref{norm-pos-def}}{=} \psi_i(e) =(\zeta,\zeta)(i,i,e) =\big\la \pi_{(i,i,e)}\zeta(d(i,i,e)), \zeta(r(i,i,e)) \big\ra \\ &=\big\la \pi_{(i,i,e)}\zeta(i), \zeta(i) \big\ra = \norm{\zeta(i)}_{\cal{H}_i}^2. \nonumber \end{align} Next, observe that \begin{align*} \MoveEqLeft \norm{\xi}_H^2 = \sqrt{2}^2 \norm{P(0,\zeta(2))}_H^2 = 2 \left( \frac14 \norm{\pi_{1,2,e}(\zeta(2))}_{\cal{H}_1}^2 + \frac14 \norm{\zeta(2)}_{\cal{H}_2}^2 \right) \\ & = \frac12 \left(\norm{\zeta(2)}_{\cal{H}_2}^2 + \norm{\zeta(2)}_{\cal{H}_2}^2\right) = \norm{\zeta(2)}_{\cal{H}_2}^2. \end{align*} In the same way, we have $\norm{\eta}_H^2 = \norm{\zeta(1)}_{\cal{H}_1}^2$. Moreover, we have $$ \norm{\varphi}_{\B(G)} \ov{\eqref{Norm-BG}}{\leq} \norm{\xi}_{H} \norm{\eta}_{H} = \norm{\zeta(2)}_{\cal{H}_2} \norm{\zeta(1)}_{\cal{H}_1} \ov{\eqref{inter-989}}{=} \norm{\psi_1}_{\L^\infty(G)}^{\frac{1}{2}} \norm{\psi_2}_{\L^\infty(G)}^{\frac{1}{2}}. $$ \end{proof} \subsection{Links between Fourier-Stieltjes algebras and decomposable multipliers} \label{Sec-FS-and-dec} Let $G$ be a locally compact group. Recall that by \cite[Corollary 1.8 (i) p.~465]{DCH85} or \cite[Corollary 5.4.11 p.~185]{KaL18} we have a contractive inclusion $\B(G) \subseteq \frak{M}^{\infty,\cb}(G)$. This is even a complete contraction by \cite[Corollary 4.3 p.~179]{Spr04}, where we equip the Fourier-Stieltjes algebra $\B(G)$ with the dual operator space structure induced by the equality $\mathrm{C}^*(G)^*=\B(G)$. In the next result, we strengthen this result by replacing the space $\frak{M}^{\infty,\cb}(G)$ of completely bounded Fourier multipliers on the von Neumann algebra $\VN(G)$ by the space $\frak{M}^{\infty,\dec}(G)$ of decomposable Fourier multipliers on $\VN(G)$. While the inclusion $\B(G) \subseteq \frak{M}^{\infty,\dec}(G)$ is straightforward\footnote{\thefootnote. Indeed, if $\varphi \in \B(G)$ then we can write $\varphi=\varphi_1-\varphi_2+\i \varphi_3-\varphi_4$, where each $\varphi_i$ is a continuous positive definite function. By \cite[Proposition 5.4.9 p.~184]{KaL18}, each Fourier multiplier $M_{\varphi_i} \co \VN(G)\to \VN(G)$ is completely positive. Then it is immediate that the Fourier multiplier $$ M_\varphi =M_{\varphi_1-\varphi_2+\i(\varphi_3-\varphi_4)} =M_{\varphi_1}-M_{\varphi_2}+\i(M_{\varphi_3}-M_{\varphi_4}) $$ is decomposable.}, the \textit{contractivity} of the inclusion $\B(G) \subseteq \frak{M}^{\infty,\dec}(G)$ is new, even in the case where $G$ is discrete. Here, we equip the space $\frak{M}^{\infty,\dec}(G)$ with the operator space structure induced by the one of the operator space $\Dec(\VN(G))$. For the proof, we will use the notion of a quasi-complete locally convex space. Recall that a locally convex space $X$ is called quasi-complete if every bounded Cauchy net in $X$ converges \cite[Definition 4.23 p.~107]{Osb14}. \begin{prop} \label{prop-B(G)-inclus-dec} Let $G$ be a locally compact group. The map $\B(G) \to \frak{M}^{\infty,\dec}(G)$, $\varphi \mapsto M_\varphi$ is a well-defined injective complete contraction from the Fourier-Stieltjes algebra $\B(G)$ into the space $\frak{M}^{\infty,\dec}(G)$ of decomposable Fourier multipliers. \end{prop} \begin{proof} We will present two distinct proofs. \noindent\textit{First proof.} We begin with a purely group-theoretic argument. Let $\varphi \in \B(G)$. By homogeneity, we can suppose that $\norm{\varphi}_{\B(G)}=1$. We will use the associated linear form $\omega_\varphi \co \C^*(G) \to \mathbb{C}$, $\int_G f(s) U_s \d\mu_G(s) \mapsto \int_G \varphi(s)f(s) \d\mu_G(s)$ defined in \eqref{Def-omega-varphi}. By \cite[Lemma A.2.2 p.~360]{BlM04}, we can consider the unique weak* continuous extension $\tilde{\omega}_\varphi \co \W^*(G) \to \mathbb{C}$ on the von Neumann algebra $\W^*(G)$, where we use here (and only in this step) the identification $\W^*(G)=\C^*(G)^{**}$ of \cite[p.~265]{Dix77}. We will prove that for any $s \in G$ the element $U_s$ belongs to the von Neumann algebra $\W^*(G)$ and the equality \begin{equation} \label{magic-equality-1} \tilde{\omega}_\varphi(U(s)) =\varphi(s), \quad s \in G. \end{equation} Let $s \in G$ and let $\mathfrak{B}$ be a neighbourhood basis at $s$ constituted of compact neighbourhoods. For any $V \in \mathfrak{B}$, consider a positive continuous function $f_V \co G \to \R^+$ on $G$ such that $\int_G f_V \d \mu_G=1$ with support contained in $V$. Then by \cite[ Corollary 3, VIII.17]{Bou04b} the net $(\int_G f_V(t)U_t \d\mu_G(t))$ converges to $U_s$ in the strong operator topology, and therefore also in the weak operator topology. Moreover, by \cite[VIII.15]{Bou04b} for any $V$ we have the estimate $$ \norm{\int_G f_V(t)U_t \d\mu_G(t)} \leq \int_G f_V \d\mu_G =1. $$ So the net $(\int_G f_V(t)U_t \d\mu_G(t))_V$ is bounded. We deduce that the net $(\int_G f_V(t)U_t \d\mu_G(t))_V$ converges to $U_s$ in the weak* topology by \cite[Lemma 2.5 p.~69]{Tak02}. In particular, we deduce that $U_s$ belongs to the von Neumann algebra $\W^*(G)$. Moreover, on the one hand, we infer by weak* continuity of the linear form $\tilde{\omega}_\varphi$ that the net $(\tilde{\omega}_\varphi(\int_G f_V(t)U_t \d\mu_G(t))_V)$ converges to $\tilde{\omega}_\varphi(U_s)$. On the other hand, using the continuity of the function $\varphi \co G \to \mathbb{C}$ and \cite[Corollary 2, VIII.17]{Bou04b} in the limit process, we obtain $$ \tilde{\omega}_\varphi\bigg(\int_G f_V(t)U_t \d\mu_G(t)\bigg) =\omega_\varphi\bigg(\int_G f_V(t)U_t \d\mu_G(t)\bigg) \ov{\eqref{Def-omega-varphi}}{=} \int_G \varphi(t)f_V(t) \d\mu_G(t) \xra[V]{} \varphi(s). $$ By uniqueness of the limit, we conclude that \eqref{magic-equality-1} is true. By Fell's absorption principle \eqref{Fell} applied to the representation $U \co G \to \mathcal{B}(H)$ instead of $\pi$, there exists a unitary $W \co \L^2(G,H) \to \L^2(G,H)$ such that for any $s \in G$ $$ W(\lambda_s \ot \Id_H)W^* =\lambda_s \ot U_s. $$ By \cite[p.~9 and p.~25]{Dix81}, we deduce that there exists a normal unital $*$-homomorphism $\Delta \co \VN(G) \to \VN(G) \otvn \W^*(G)$, $\lambda_s \mapsto \lambda_s \ot U_s$. Since any Banach space is barreled \cite[Theorem 4.5 p.~97]{Osb14}, we see by \cite[Corollary 4.25 (b) p.~107]{Osb14} that the weak* topology on the dual Banach space $\VN(G)$ is quasi-complete. Consequently, by \cite[Corollary 2, III p.~38]{Bou04a}, for any function $f \in \C_c(G)$, the integral $\int_G f(s)\lambda_s \d \mu_G(s)$ is a well-defined weak* integral. Using the weak* continuity of $\Delta$ together with \cite[Proposition 1, VI.3]{Bou04a} in the first equality, we deduce that \begin{align} \MoveEqLeft \label{Eq-1356} \Delta\bigg(\int_G f(s)\lambda_s \d \mu_G(s)\bigg) =\int_G f(s)\Delta(\lambda_s) \d \mu_G(s) =\int_G f(s) (\lambda_s \ot U_s) \d \mu_G(s). \end{align} Now, for any $s \in G$, we obtain again with \cite[Proposition 1, VI.3]{Bou04b} that \begin{align*} \MoveEqLeft (\Id \ot \tilde{\omega}_\varphi) \circ \Delta\bigg(\int_G f(s)\lambda_s \d \mu_G(s)\bigg) \ov{\eqref{Eq-1356}}{=} (\Id \ot \tilde{\omega}_\varphi)\bigg(\int_G f(s) (\lambda_s \ot U_s) \d \mu_G(s)\bigg)\\ &=\bigg(\int_G f(s) (\Id \ot \tilde{\omega}_\varphi)(\lambda_s \ot U_s) \d \mu_G(s)\bigg) \ov{\eqref{magic-equality-1}}{=}\int_G \varphi(s)f(s)\lambda_s \d \mu_G(s). \end{align*} We conclude that the weak* continuous map $(\Id \ot \tilde{\omega}_\varphi) \circ \Delta$ is the Fourier multiplier $M_\varphi$ of symbol $\varphi$. Note that the $*$-homomorphism $\Delta$ is decomposable since it is completely positive. According to \cite[Lemma 5.4.3 p.~96]{EfR00}, the linear form $\tilde{\omega}_\varphi$ is equally decomposable with \begin{equation} \label{inter-998} \norm{\tilde{\omega}_\varphi}_{\dec} \ov{\eqref{dec=cb}}{=} \norm{\tilde{\omega}_\varphi}_{\cb} =\norm{\tilde{\omega}_\varphi} =1, \end{equation} where we use \cite[Corollary 2.2.3 p.~24]{EfR00} in the second equality. By Lemma \ref{Lemma-tensor-dec-2}, we deduce that we have a well-defined weak* continuous decomposable map $\Id \ot \tilde{\omega}_\varphi \co \VN(G) \otvn \W^*(G) \to \VN(G)$. We conclude by composition that the linear map $M_\varphi=(\Id \ot \tilde{\omega}_\varphi) \circ \Delta \co \VN(G) \to \VN(G)$ is decomposable and that \begin{align*} \MoveEqLeft \norm{M_\varphi}_{\dec} =\bnorm{(\Id \ot \tilde{\omega}_\varphi) \circ \Delta}_{\dec} \ov{\eqref{Composition-dec}}{\leq} \norm{\Id \ot \tilde{\omega}_\varphi}_\dec \norm{\Delta}_\dec \\ &\ov{\eqref{dec-tensor-2}}{\leq} \norm{\tilde{\omega}_\varphi}_\dec \norm{\Delta}_\dec \ov{\eqref{inter-998}}{=} \norm{\Delta}_\dec \ov{\eqref{dec-et-cp}}{\leq} 1. \end{align*} Finally, it is easy to check that the map $\B(G) \to \frak{M}^{\infty,\dec}(G)$, $\varphi \mapsto M_\varphi$ is also injective. For the complete contractivity, the argument is similar. Consider a matrix $[\varphi_{ij}] \in \M_{n}(\B(G))$. We have a completely bounded map $[\omega_{\varphi_{ij}}] \co \C^*(G) \to \M_n$. Using \cite[1.4.8 p.~24]{BlM04}, its unique weak* continuous extension $[\tilde{\omega}_{\varphi_{ij}}] \co \W^*(G) \to \M_n$ is completely bounded with the same completely bounded norm. Note that this linear map is decomposable and its decomposable norm coincides with its completely bounded norm by \eqref{dec=cb}. Finally, we can write $[M_{\varphi_{ij}}]=(\Id \ot [\tilde{\omega}_{\varphi_{ij}}]) \circ (\Id_{\M_n} \ot \Delta)$. \noindent\textit{Second proof of the contractivity.} Now, we give a second proof using groupoids. Let $\varphi \in \B(G)$ and $\epsi >0$. By Proposition \ref{Prop-carac-BG-2-2}, there exists continuous positive definite functions $\psi_1$ and $\psi_2$ (hence bounded by \cite[Proposition C.4.2 p.~351]{BHV08}) such that the matrix $\begin{bmatrix} \psi_1 & \varphi \\ \check{\ovl{\varphi}} & \psi_2 \end{bmatrix}$ defines a continuous positive definite function $F$ on the groupoid $\mathrm{P}_2\times G$ with \begin{equation} \label{ine-epsi} \norm{\psi_1}_{\L^\infty(G)}^{\frac{1}{2}} \norm{\psi_2}_{\L^\infty(G)}^{\frac{1}{2}} \leq \norm{\varphi}_{\B(G)}+\epsi. \end{equation} Note the identification $\VN(\mathrm{P}_2 \times G)=\VN(\mathrm{P}_2) \otvn \VN(G)=\M_2 \otvn \VN(G)=\M_2(\VN(G))$. By generalizing the very transparent argument of \cite[Proposition 5.6.16 p.~206]{BrO08} with \cite[Proposition 4.12 and Remark 4.12]{Arh24}, we see that $F$ induces a completely positive multiplier on the von Neumann algebra $\VN( \mathrm{P}_2 \times G)$. This completely positive multiplier identifies to the map $\begin{bmatrix} M_{\psi_1} & M_\varphi \\ M_{\check{\ovl{\varphi}}} & M_{\psi_2} \end{bmatrix}=\begin{bmatrix} M_{\psi_1} & M_\varphi \\ M_\varphi^\circ & M_{\psi_2} \end{bmatrix} \co \M_2(\VN(G)) \to \M_2(\VN(G))$. Note that the Fourier multipliers $M_{\psi_1}$ and $M_{\psi_2}$ are completely positive. We conclude that the Fourier multiplier $M_\varphi \co \VN(G) \to \VN(G)$ is decomposable with \begin{align*} \MoveEqLeft \norm{M_\varphi}_{\dec,\VN(G) \to \VN(G)} \ov{\eqref{Norm-dec-sqrt}}{\leq} \norm{M_{\psi_1}}_{\VN(G) \to \VN(G)}^{\frac{1}{2}} \norm{M_{\psi_2}}_{\VN(G) \to \VN(G)}^{\frac{1}{2}} \\ &=\norm{\psi_1}_{\L^\infty(G)}^{\frac{1}{2}} \norm{\psi_2}_{\L^\infty(G)}^{\frac{1}{2}} \ov{\eqref{ine-epsi}}{\leq} \norm{\varphi}_{\B(G)}+\epsi. \end{align*} \end{proof} Now, we study the converse of Proposition \ref{prop-B(G)-inclus-dec} in Proposition \ref{conj-1-1-correspondance} and in Theorem \ref{dec-vs-B(G)-discrete-group}. We need the following result, which gives a description of the norm of the Fourier-Stieltjes algebra $\B(G)$ for some suitable functions. Here, we denote by $\mathrm{P}(G)$ the set of continuous positive definite functions on $G$, following \cite[Definition 1.4.18 p.~23]{KaL18}. \begin{prop} Let $G$ be a locally compact group. Let $\varphi \in \B(G)$ such that $\check{\varphi}=\ovl{\varphi}$. We have \begin{equation} \label{norm-B(G)} \norm{\varphi}_{\B(G)} =\inf \big\{\varphi_1(e)+\varphi_2(e): \varphi=\varphi_1-\varphi_2, \varphi_1,\varphi_2 \in \mathrm{P}(G) \big\}. \end{equation} \end{prop} \begin{proof} Suppose that $\varphi=\varphi_1-\varphi_2$ for some continuous positive definite functions $\varphi_1,\varphi_2 \co G \to \mathbb{C}$. We have $$ \norm{\varphi}_{\B(G)} =\norm{\varphi_1-\varphi_2}_{\B(G)} \leq \norm{\varphi_1}_{\B(G)} + \norm{\varphi_2}_{\B(G)} \ov{\eqref{norm-pos-def}}{=} \varphi_1(e)+\varphi_2(e). $$ Passing to the infimum, we obtain that $\norm{\varphi}_{\B(G)} \leq \inf \big\{\varphi_1(e)+\varphi_2(e): \varphi=\varphi_1-\varphi_2, \varphi_1,\varphi_2 \in \mathrm{P}(G) \big\}$. Indeed, by \cite[(2.7) p.~193]{Eym64} (or \cite[p.~41]{KaL18}\footnote{\thefootnote. Note that in this reference, the assumption ``$\check{u}=\ovl{u}$'' is missing.}) we have an equality in this last inequality and the infimum is a minimum. \end{proof} \begin{remark} \normalfont Suppose that the locally compact $G$ is \textit{abelian}. For any real bounded regular Borel measure $\mu$ on the dual group $\hat{G}$, the previous result combined with Example \ref{norm-B(G)-commutatif} and \eqref{norm-pos-def} implies that \begin{equation} \label{norm-M(G)} \norm{\mu}_{\M(\hat{G})} =\inf \big\{\norm{\mu_1}_{\M(\hat{G})}+\norm{\mu_2}_{\M(\hat{G})}: \mu=\mu_1-\mu_2, \mu_1,\mu_2 \geq 0 \big\}. \end{equation} We can replace the group $\hat{G}$ by a locally compact space $X$. Indeed, for any \textit{real} bounded regular Borel measure $\mu$, we can decompose the measure $\mu$ with \cite[III \S1.~8 Corollary 2]{Bou04a} as $\mu=\mu^+ -\mu^-$ for some bounded positive regular Borel measures $\mu_+$ and $\mu_-$ on $X$ with $\norm{\mu}_{\M(X)}=\norm{\mu^+}_{\M(X)}+\norm{\mu^-}_{\M(X)}$ and we can use a similar reasoning. \end{remark} By adding property $(\kappa_\infty)$ to the group $G$, we obtain a partial converse to Proposition \ref{prop-B(G)-inclus-dec}. For the proof, we will use the folklore fact that says that the symbol of any bounded multiplier on the von Neumann algebra $\VN(G)$ of a locally compact group $G$ is almost everywhere equal to a continuous function. This follows from the <<regularity>> of the Fourier algebra, established in \cite[Theorem 2.3.8 p.~53]{KaL18}. \begin{prop} \label{conj-1-1-correspondance} Let $G$ be a locally compact group. If $G$ has property $(\kappa_\infty)$, then the linear map $\B(G) \to \frak{M}^{\infty,\dec}(G)$, $\varphi \mapsto M_\varphi$ is a bijection from the Fourier-Stieltjes algebra $\B(G)$ onto the space $\frak{M}^{\infty,\dec}(G)$ of decomposable multipliers. Moreover, if $\kappa_\infty(G)=1$ and if the function $\varphi$ belongs to $\B(G)$ and satisfies $\check{\varphi}=\ovl{\varphi}$, we have $\norm{\varphi}_{\B(G)}=\norm{M_\varphi}_{\dec,\VN(G) \to \VN(G)}$. \end{prop} \begin{proof} In Proposition \ref{prop-B(G)-inclus-dec}, we established a (completely) contractive inclusion $\B(G) \subseteq \frak{M}^{\infty,\dec}(G)$. We show the reverse inclusion. Suppose that $M_\varphi \co \VN(G) \to \VN(G)$ is a decomposable Fourier multiplier (hence weak* continuous) with continuous symbol $\varphi \co G \to \mathbb{C}$. We can write \begin{equation} \label{in-543} M_\varphi =T_1 + T_2 + \i(T_3 - T_4) \end{equation} for some completely positive maps $T_1,T_2,T_3,T_4 \co \VN(G) \to \VN(G)$. By using the contractive projection $P_{\w^*} \co \cal{B}(\VN(G)) \to \cal{B}(\VN(G))$ of \cite[Proposition 3.1 p.~24]{ArK23}, which preserves the complete positivity, as in the proof of \cite[Proposition 3.4 p.~26]{ArK23}, we can suppose that these maps $T_1,T_2,T_3,T_4$ are weak* continuous since $P_{\w^*}(M_\varphi)=M_\varphi$. Using the bounded projection $P_{G}^\infty \co \CB_{\w^*}(\VN(G)) \to \CB_{\w^*}(\VN(G))$ provided by property $(\kappa_\infty)$, we obtain $$ M_\varphi =P_{G}^\infty(M_\varphi) \ov{\eqref{in-543}}{=} P_{G}^\infty\big(T_1-T_2+\i(T_3-T_4)\big) =P_{G}^\infty(T_1)-P_{G}^\infty(T_2)+\i(P_{G}^\infty(T_3)-P_{G}^\infty(T_4)), $$ where each $P_{G}^\infty(T_i) \co \VN(G) \to \VN(G)$ is a completely positive Fourier multiplier for some symbol $\varphi_i \co G \to \mathbb{C}$, i.e.~$P_{G}^\infty(T_i)=M_{\varphi_i}$. By \cite[Proposition 4.2 p.~487]{DCH85}, the function $\varphi_i$ is continuous and positive definite. We deduce that $$ M_\varphi =M_{\varphi_1}-M_{\varphi_2}+\i(M_{\varphi_3}-M_{\varphi_4}) =M_{\varphi_1-\varphi_2+\i(\varphi_3-\varphi_4)}. $$ We infer that $\varphi=\varphi_1-\varphi_2+\i \varphi_3-\varphi_4$. We conclude that the function $\varphi$ belongs to the Fourier-Stieltjes algebra $\B(G)$. Hence we have an inclusion $\frak{M}^{\infty,\dec}(G) \subset \B(G)$. Now, we prove the second part of the statement assuming $\kappa_\infty(G)=1$. Suppose that the function $\varphi$ belongs to the Fourier-Stieltjes algebra $\B(G)$ and satisfies $\check{\varphi}=\ovl{\varphi}$. This last condition means that the Fourier multiplier $M_\varphi \co \VN(G) \to \VN(G)$ is adjoint preserving, i.e.~$M_\varphi(x^*)=(M_\varphi(x))^*$ for any $x \in \VN(G)$. Let $\epsi > 0$. By \cite[Proposition 1.3 (1) p.~177]{Haa85} and \cite[p.~184]{Haa85}, there exists some completely positive operators $T_1,T_2 \co \VN(G) \to \VN(G)$ such that \begin{equation} \label{eqa-456} M_\varphi=T_1-T_2 \quad \text{with} \quad \norm{T_1+T_2} = \norm{M_\varphi}_{\dec,\VN(G) \to \VN(G)}. \end{equation} By using the contractive projection $P_{\w^*} \co \cal{B}(\VN(G)) \to \cal{B}(\VN(G))$ of \cite[Proposition 3.1 p.~24]{ArK23}, which preserves the complete positivity, it is easy to check that we can suppose that the linear maps $T_1$ and $T_2$ are weak* continuous since $P_{\w^*}(M_\varphi)=M_\varphi$. Let $\epsi>0$. Since $\kappa_\infty(G)=1$, we can consider a bounded projection $P_{G}^\infty \co \CB_{\w^*}(\VN(G)) \to \CB_{\w^*}(\VN(G))$ of norm $\leq 1+\epsi$, preserving the complete positivity. We deduce that \begin{equation} \label{Decompo-magic} M_\varphi =P_{G}^\infty(M_\varphi) \ov{\eqref{eqa-456}}{=} P_{G}^\infty(T_1-T_2) =P_{G}^\infty(T_1)-P_{G}^\infty(T_2). \end{equation} We denote by $\varphi_1$ and $\varphi_2$ the continuous symbols of the completely positive Fourier multipliers $P_{G}^\infty(T_1)$ and $P_{G}^\infty(T_2)$. These functions are positive definite again by \cite[Proposition 4.2 p.~487]{DCH85}. The equality \eqref{Decompo-magic} gives $\varphi=\varphi_1-\varphi_2$. By using \cite[Proposition 4.3 p.~489]{DCH85} in the second equality, we see that \begin{align*} \MoveEqLeft \norm{\varphi}_{\B(G)} \ov{\eqref{norm-B(G)}}{\leq} \varphi_1(e)+\varphi_2(e) =(\varphi_1+\varphi_2)(e) =\norm{M_{\varphi_1+\varphi_2}}_{\VN(G) \to \VN(G)} \\ &=\norm{M_{\varphi_1} + M_{\varphi_2}}_{\VN(G) \to \VN(G)} =\norm{P_{G}^\infty(T_1)+P_{G}^\infty(T_2)} = \norm{P_{G}^\infty(T_1+T_2)} \\ &\leq (1+\epsi)\norm{T_1+T_2} \ov{\eqref{eqa-456}}{=} (1+\epsi) \norm{M_\varphi}_{\dec,\VN(G) \to \VN(G)}. \end{align*} Since $\epsi>0$ is arbitrary, we deduce that $\norm{\varphi}_{\B(G)} \leq \norm{M_\varphi}_{\dec,\VN(G) \to \VN(G)}$. Combining with Proposition \ref{prop-B(G)-inclus-dec}, we conclude that $\norm{\varphi}_{\B(G)}=\norm{M_\varphi}_{\dec,\VN(G) \to \VN(G)}$. \end{proof} Now, we observe that the first inclusion in \eqref{Inclusions} can be strict. \begin{prop} \label{prop-groups-with-bad-multiplier} Let $G$ be a non-amenable locally compact group such that the von Neumann algebra $\VN(G)$ is injective. Then there exists a decomposable Fourier multiplier $T \co \VN(G) \to \VN(G)$, which is not induced by an element $\varphi \in \B(G)$. \end{prop} \begin{proof} Since the von Neumann algebra $\VN(G)$ is injective, we have by \cite[Theorem 1.6 p.~184]{Haa85} the equality $\frak{M}^{\infty,\cb}(G)=\frak{M}^{\infty,\dec}(G)$ isometrically. Since the group $G$ is not amenable, we know by an unpublished result of Ruan stated in \cite[p.~54]{Pis01} and \cite[p.~190]{Spr04} that $\B(G) \varsubsetneq \frak{M}^{\infty,\cb}(G)$. We conclude that $\B(G) \varsubsetneq \frak{M}^{\infty,\dec}(G)$. \end{proof} \begin{example} \normalfont \label{Example-SL2} By \cite[Corollary 7 p.~75]{Con76}, the von Neumann algebra $\VN(G)$ of a second-countable connected locally compact group $G$ is injective. This result applies for example to the locally compact group $G=\SL_2(\R)$, which is non-amenable by \cite[Example G.2.4 (i) p.~426]{BHV08}. We conclude that $\B(G) \varsubsetneq \frak{M}^{\infty,\dec}(G)$ in this case. \end{example} For discrete groups, a matricial improvement of property $(\kappa_\infty)$ is available in \cite[Theorem 4.2 p.~62]{ArK23}. Consequently, we can establish the following isometric result. \begin{thm} \label{dec-vs-B(G)-discrete-group} Let $G$ be a discrete group. The map $\B(G) \to \frak{M}^{\infty,\dec}(G)$, $\varphi \mapsto M_\varphi$ is an isometric isomorphism from the Fourier-Stieltjes $\B(G)$ onto the algebra $\frak{M}^{\infty,\dec}(G)$ of decomposable multipliers on the von Neumann algebra $\VN(G)$. \end{thm} \begin{proof} In Proposition \ref{prop-B(G)-inclus-dec}, we have seen that we have a contractive inclusion $\B(G) \subseteq \frak{M}^{\infty,\dec}(G)$. It suffices to show the reverse inclusion. Suppose that the Fourier multiplier $M_\varphi \co \VN(G) \to \VN(G)$ is decomposable. By \cite[Remark 1.5 p.~183]{Haa85}, there exist some linear maps $v_1,v_2 \co \VN(G) \to \VN(G)$ such that the linear map $ \begin{bmatrix} v_1 & M_\varphi \\ M_{\check{\ovl{\varphi}}} & v_2 \end{bmatrix} \co \M_2(\VN(G)) \to \M_2(\VN(G)) $ is completely positive with $\max\{\norm{v_1},\norm{v_2}\}=\norm{M_\varphi}_{\dec,\VN(G) \to \VN(G)}$. We can suppose that the completely positive maps $v_1$ and $v_2$ are in addition weak* continuous by using \cite[Proposition 3.1 p.~24]{ArK23}. Now, we consider the projection $P_{\{1,2\},G}^\infty \co \CB_{\w^*}(\M_2(\VN(G))) \to \CB_{\w^*}(\M_2(\VN(G)))$, preserving the complete positivity and contractive, provided by \cite[Theorem 4.2 p.~62]{ArK23}. The proof shows that in case it is applied to an element of special structure as $\begin{bmatrix} v_1 & M_\varphi \\ M_{\check{\ovl{\varphi}}} & v_2 \end{bmatrix}$, the mapping is $P_{\{1,2\},G}^\infty=\begin{bmatrix} P_{G}^\infty & P_{G}^\infty \\ P_{G}^\infty & P_{G}^\infty \\ \end{bmatrix}$, where $P_{G}^\infty \co \CB_{\w^*}(\VN(G)) \to \CB_{\w^*}(\VN(G))$ is the contractive projection onto the space of completely bounded Fourier multipliers, provided by \cite[Theorem 4.2 p.~62]{ArK23}. We obtain that the map \begin{align} \MoveEqLeft \label{Map-2x2-ttt-discrete} \begin{bmatrix} P_G^\infty(v_1) & M_\varphi \\ M_{\check{\ovl{\varphi}}} & P_G^\infty(v_2) \\ \end{bmatrix} = \begin{bmatrix} P_G^\infty(v_1) & P_G^\infty(M_\varphi) \\ P_G^\infty(M_{\check{\ovl{\varphi}}}) & P_G^\infty(v_2) \\ \end{bmatrix} =P_{\{1,2\},G}^\infty\left( \begin{bmatrix} v_1 & M_\varphi \\ M_{\check{\ovl{\varphi}}} & v_2 \end{bmatrix} \right) \end{align} is completely positive. Moreover, we have \begin{align} \label{Useful-estimation} \MoveEqLeft \max\big\{\norm{P_G^\infty(v_1)},\norm{P_G^\infty(v_2)}\big\} \leq \norm{P_G^\infty} \max\big\{\norm{v_1},\norm{v_2}\big\} =\norm{M_\varphi}_{\dec,\VN(G) \to \VN(G)}. \end{align} We can write $P_G^\infty(v_1)=M_{\psi_1}$ and $P_G^\infty(v_2)=M_{\psi_2}$ for some continuous positive definite functions $\psi_1,\psi_2 \co G \to \mathbb{C}$. By \cite[Proposition 8.4 p.~166]{ArK23}, the condition \eqref{Condition-ArK} is satisfied with $\begin{bmatrix} \psi_1 & \varphi \\ \check{\ovl{\varphi}} & \psi_2 \end{bmatrix}$ instead of $\begin{bmatrix} F_{11} & F_{12}\\ F_{21} & F_{22} \end{bmatrix}$. By Lemma \ref{Lemma-Bloc-def-pos}, we conclude that $F \ov{\mathrm{def}}{=} \begin{bmatrix} \psi_1 & \varphi \\ \check{\ovl{\varphi}} & \psi_2 \end{bmatrix}$ identifies to a continuous positive definite function on the groupoid $\mathrm{P}_2 \times G$. According to Proposition \ref{Prop-carac-BG-2-2}, we obtain that the function $\varphi$ belongs to the Fourier-Stieltjes algebra $\B(G)$. Moreover, using the well-known contractive inclusion $\frak{M}^\infty(G) \subseteq \L^\infty(G)$ of \cite[Proposition 5.1.2 p.~154]{KaL18} in the first inequality, we infer that \begin{align*} \MoveEqLeft \norm{\varphi}_{\B(G)} \ov{\eqref{Norm-B-G-utile}}{\leq} \norm{\psi_1}_{\L^\infty(G)}^{\frac{1}{2}} \norm{\psi_2}_{\L^\infty(G)}^{\frac{1}{2}} \leq \max\big\{\norm{M_{\psi_1}},\norm{M_{\psi_2}}\big\} \\ &= \max\big\{\norm{P_G^\infty(v_1)},\norm{P_G^\infty(v_2)}\big\} \ov{\eqref{Useful-estimation}}{\leq} \norm{M_\varphi}_{\dec,\VN(G) \to \VN(G)}. \end{align*} \end{proof} Finally, we prove the second part of Conjecture \ref{conj} in the discrete case. This result improves \cite[Proposition 3.32 (1) p.~51]{ArK23} which says that the second inclusion of \eqref{Inclusions} is strict for any non-amenable weakly amenable discrete group $G$. This result can be seen as a new characterization of amenability for discrete groups. \begin{thm} \label{Thm-conj-discrete-case} Let $G$ be a discrete group. The von Neumann algebra $\VN(G)$ is injective if and only if we have $\frak{M}^{\infty,\dec}(G)= \frak{M}^{\infty,\cb}(G)$. \end{thm} \begin{proof} By Corollary \ref{dec-vs-B(G)-discrete-group}, we have an isometric isomorphism $\frak{M}^{\infty,\dec}(G)=\B(G)$. It suffices to use the result stated in \cite[p.~54]{Pis01}, which says that $\B(G) = \frak{M}^{\infty,\cb}(G)$ if and only if the group $G$ is amenable. For a discrete group $G$, the amenability is equivalent to the injectivity of the von Neumann algebra $\VN(G)$ by \cite[Theorem 3.8.2 p.~51]{SiS08} (or Theorem \ref{Th-Lau-Paterson}). \end{proof} \section{Inner amenability} \label{Inner-amenability} \subsection{Background on inner amenability and amenability} \label{Sec-prelim-inner} We warn the reader that different notions of inner amenability coexist in the literature, see \cite[p.~84]{Pat88a} for more information. We say that a locally compact group $G$, equipped with a left Haar measure $\mu_G$, is inner amenable if there exists a state $m$ on the algebra $\L^\infty(G)$ such that \begin{equation} \label{inner-mean} m(\inner_s f) =m(f) \end{equation} for any $s \in G$, where \begin{equation} \label{def-conj-functions} (\inner_sf)(t) \ov{\mathrm{def}}{=} f\big(s^{-1}ts\big), \quad s,t \in G. \end{equation} It is worth noting that by \cite[Proposition 3.2 p.~2527]{CrT17}, a locally compact group $G$ is inner amenable if and only if there exists a state $m$ on the group von Neumann algebra $\VN(G)$ such that \begin{equation*} \label{state-G-invariant} m(\lambda_s^* x\lambda_s) =m(x), \quad s \in G, x \in \VN(G). \end{equation*} Such a state is said to be $G$-invariant. According to \cite[Proposition 3.3 p.~2528]{CrT17}, any closed subgroup $H$ of an inner amenable locally compact group $G$ is inner amenable. If in addition $H$ is normal then the group $G/H$ is also inner amenable by \cite[Proposition 6.2 p.~168]{LaP91}. \begin{example} \normalfont \label{Ex-inner-2} Every amenable locally compact group $G$ is inner amenable. Indeed, by \cite[Theorem 4.19 p.~36]{Pie84} there exists a state $m$ on $\L^\infty(G)$, which is two-sided invariant. \end{example} \begin{example} \normalfont \label{Ex-inner-5} Following \cite[p.~1273]{Pal01}, we say that a locally compact group $G$ is said to have an invariant neighborhood if there exists a compact neighbourhood $V$ of the identity $e$ in $G$ such that $V$ is stable under all inner automorphisms of $G$, i.e.~$s^{-1}Vs = V$ for all $s \in G$. Such a group is said to be an IN-group. By \cite[Proposition 12.1.9 p.~1273]{Pal01}, any IN-group $G$ is unimodular. Note that if $\mu_G$ is a Haar measure on an IN-group $G$, it is clear using \cite[(31) and (33) VII.13]{Bou04b} that the state $m \co \L^\infty(G) \to \mathbb{C}$, $f \mapsto \frac{1}{\mu_G(V)}\int_V f$ satisfies the equation \eqref{inner-mean}. Hence an IN-group is inner amenable. By \cite[Proposition 6.36 p.~119]{ArK23}, a locally compact group $G$ is pro-discrete if and only if it admits a basis $(X_j)$ of neighborhoods of the identity $e$ consisting of open compact normal subgroups. Consequently, pro-discrete locally compact groups are IN-groups. Moreover, according to \cite[Proposition 12.1.9 p.~1273]{Pal01}, compact groups, locally compact abelian groups and discrete groups groups are IN-groups. These groups are therefore all inner amenable. In particular, inner amenability is significantly weaker than amenability. \end{example} \begin{example} \normalfont \label{Contre-example} Recall that a topological group $G$ is of type I \cite[Definition 6.D.1 p.~196 and Proposition 7.C.I p.~219]{BeH20} if for any continuous unitary representation $\pi$ of $G$, the von Neumann algebra $\pi(G)''$ is of type I, hence injective by \cite[Proposition 10.23 p.~144]{Str81}. In particular, by Theorem \ref{Th-Lau-Paterson} a second-countable locally compact group $G$ of type I is inner amenable if and only if it is amenable. We refer to \cite[Theorem 6.E.19 p.~208 and Theorem 6.E.20 p.~209]{BeH20} for an extensive list of locally compact groups of type I, including connected nilpotent locally compact groups and linear algebraic groups over a local field of characteristic 0. \end{example} \begin{example} \normalfont \label{Example-almost} If a locally compact group $G$ is almost connected, i.e.~$G/G_e$ is compact if $G_e$ is the connected component of the identity $e$, then its von Neumann algebra $\VN(G)$ is injective by \cite[p.~228]{Pat88b}. Again, by Theorem \ref{Th-Lau-Paterson} such a group is inner amenable if and only if it is amenable. This result in the connected case was first proved by Losert and Rindler in \cite[Theorem 1 p.~222]{LoR87} and proven again in \cite[Corollary 3.4 p.~161]{LaP91}. \end{example} If $A$ and $B$ are two subsets of a set $E$, the notation $A \Delta B\ov{\mathrm{def}}{=}(A-B) \cup (B-A)$ denotes here the symmetric difference of $A$ and $B$. Recall that \begin{equation} \label{Indicator-formula} |1_A-1_B| =1_{A \Delta B}. \end{equation} We will use the following reformulation of \cite[Lemma 8.6 p.~43]{CPPR15}, which is actually and essentially a variant of a classical trick in amenability theory used in \cite[pp.~364-365]{EiW17}, \cite[pp.~441-442]{BHV08} and \cite[p.~410]{Fre13}. We give the two lines of calculus for the sake of completeness. \begin{lemma} \label{Lemma-CPPR} Let $G$ be a locally compact group equipped with a left Haar measure $\mu_G$. Let $\epsi >0$ and consider some positive functions $f,g_1,\ldots,g_n$ in the space $\L^1(G)$ satisfying the inequality $\sum_{k=1}^{n} \norm{f-g_k}_{\L^1(G)} < \epsi$ and $\norm{f}_{\L^1(G)}=1$. Then there exists $t > 0$ such that \begin{equation} \label{Equa-CPPR} \sum_{k=1}^{n} \mu_G\big(\{f >t\} \Delta \{g_k >t\} \big) < \epsi \mu_G(\{f > t\}) . \end{equation} \end{lemma} \begin{proof} For any $s \in G$ and integer $1 \leq k \leq n$, we have by \cite[Lemma G.5.2 p.~441]{BHV08} and \eqref{Indicator-formula} the equalities \begin{equation} \label{BHV08} \norm{f}_{\L^1(G)} =\int_0^\infty \mu_G(\{f > t\}) \d t \quad \text{and} \quad \norm{f - g_k}_{\L^1(G)} =\int_0^\infty \mu_G\big(\{f >t\} \Delta \{g_k >t\}\big) \d t. \end{equation} We deduce that \begin{align*} \MoveEqLeft \int_0^\infty \sum_{k=1}^n \mu_G\big(\{f >t\} \Delta \{g_k >t\}\big) \d t =\sum_{k=1}^{n}\int_0^\infty \mu_G\big(\{f >t\} \Delta \{g_k >t\}\big) \d t \\ &\ov{\eqref{BHV08}}{=} \sum_{k=1}^n \norm{ f - g_k }_{\L^1(G)} < \epsi \norm{f}_{\L^1(G)} \ov{\eqref{BHV08}}{=} \epsi \int_0^\infty \mu_G(\{f > t\}) \d t . \end{align*} The conclusion is obvious. \end{proof} \paragraph{Convolution} If $G$ is a \textsl{unimodular} locally compact group equipped with a Haar measure $\mu_G$, recall that the convolution product of two functions $f$ and $g$ is given, when it exists, by \begin{equation} \label{Convolution-formulas} (f*g)(s) \ov{\mathrm{def}}{=} \int_G f(r)g(r^{-1}s) \d\mu_G(r) =\int_G f(sr^{-1})g(r) \d\mu_G(r). \end{equation} \subsection{Some characterizations of inner amenability} \label{subsec-inner-Folner} Now, we introduce the following definition which is an <<inner variant>> of the well-known definition of the notion of <<F\o{}lner net>> in amenability theory. \begin{defi} \label{def-IF} A locally compact group $G$ is said to be inner F\o{}lner (in short $G \in \IF$) if for every finite subset $F$ of $G$ there exists a net $(V_j^F)_j$ of measurable subsets of $G$ such that $\mu(V_j^F) \in (0,\infty)$, with the property that for all $s \in F$, \begin{equation} \label{Inner-Folner} \frac{\mu(V_j^F \Delta (s^{-1}V_j^Fs))}{\mu(V_j^F)} \xra[j \to \infty]{} 0. \end{equation} \end{defi} Now, we give different characterizations of inner amenability for unimodular locally compact groups. The equivalence between the first and the second point is sketched in \cite[Proposition 1 p.~222]{LoR87}. For the sake of completeness, we give a complete proof. \begin{thm} \label{thm-inner-amenable-Folner} Let $G$ be a unimodular locally compact group. The following are equivalent. \begin{enumerate} \item $G$ is inner amenable. \item There exists an asymptotically central net $(f_j)$ of functions in the space $\L^1(G)$, i.e.~for any $s \in G$, we have \begin{equation} \label{asymt-central} \frac{\norm{f_j-\inner_sf_j}_{\L^1(G)}}{\norm{f_j}_{\L^1(G)}} \xra[j ]{} 0. \end{equation} \item $G$ is inner F\o{}lner. \item There exists a net $(f_j)$ of positive functions in the space $\L^1(G)$ with $\int_G f_j \d\mu = 1$ such that for all $s \in G$, we have $\norm{f_j - \inner_s f_j}_{\L^1(G)} \xra[j]{} 0$. \item The same property as before, but the $f_j$'s belong in addition to the space $\C_c(G)$ and are positive definite. \item $G$ is inner F\o{}lner and in addition the sets $V_j^F$ can be chosen to be symmetric, open and containing $e$. \end{enumerate} Finally, the net $(V_j^F)_j$ in the previous definition of inner F\o{}lner can be chosen to be a sequence. \end{thm} \begin{proof} 1. $\Longrightarrow$ 4.: Let $m \in \L^\infty(G)^*$ be an inner invariant mean. By \cite[Proposition 3.3 p.~25]{Pie84} (see also \cite[Lemma 10.16 p.~366]{EiW17}), we can approximate $m$ in the weak* topology by a net $(f_j)$ of functions in $\L^1(G)$ with $f_j \geq 0$ and $\norm{f_j}_{\L^1(G)}=1$. For any $s \in G$ and any $g \in \L^\infty(G)$, we have \begin{align*} \MoveEqLeft \la \inner_s(f_j), g \ra_{\L^1(G),\L^\infty(G)} = \la f_j, \inner_{s^{-1}}(g) \ra_{\L^1(G),\L^\infty(G)} \xra[j \to \infty]{} \la m, \inner_{s^{-1}}(g) \ra =\la m, g \ra. \end{align*} and $\la f_j, g \ra_{\L^1(G),\L^\infty(G)} \to \la m, g \ra_{\L^\infty(G)^*,\L^\infty(G)}$. With an $\frac{\epsi}{2}$-argument, it follows that for any $s \in G$ we have $\w-\lim_j (\inner_sf_j-f_j)=0$. Since for convex sets the weak closure coincides with the norm closure \cite[Theorem 2.5.16 p.~216]{Meg98}, we can replace $f_j$ by some convex combinations to get $\lim_j \norm{\inner_s(f_j) -f_j}_{\L^1(G)}=0$. This replacement can be seen in the following way. Let $F \ov{\mathrm{def}}{=} \{ s_1, \ldots, s_n\}$ be a finite set of $G$. According to the above, $(0,\ldots,0)$ belongs to the weak-closure of the convex hull of $\{(\inner_{s_1}(f_j) - f_j, \inner_{s_2}(f_j) - f_j, \ldots, \inner_{s_n}(f_j) - f_j) : j \}$, hence to the $\L^1(G)^n$ norm closure of this convex hull. Thus there exists a sequence $(g_k)_k$ in this convex hull converging to $(0,\ldots,0)$ in norm. For any $k \in \N$, we can write \begin{align*} \MoveEqLeft g_k =\sum_{\ell=1}^L \lambda_\ell \big(\inner_{s_1}(f_{j_\ell}) - f_{j_\ell},\ldots, \inner_{s_n}(f_{j_\ell}) - f_{j_\ell}\big) \\ &= \bigg(\inner_{s_1}\bigg(\sum_{\ell=1}^L \lambda_\ell f_{j_\ell}\bigg) - \sum_{\ell=1}^L \lambda_\ell f_{j_\ell},\ldots, \inner_{s_n}\bigg(\sum_{\ell=1}^L \lambda_\ell f_{j_\ell}\bigg) - \sum_{\ell=1}^L \lambda_\ell f_{j_\ell}\bigg) \\ &= \big(\inner_{s_1}(h_k) - h_k, \ldots, \inner_{s_n}( h_k) - h_k\big), \end{align*} where $\lambda_\ell \geq 0$, $\sum_{\ell=1}^L \lambda_\ell = 1$ and where $h_k \ov{\mathrm{def}}{=} \sum_{\ell=1}^L \lambda_\ell f_{j_\ell}$ still is a positive normalized element in $\L^1(G)$. We can suppose that $\norm{\inner_{s}(h_k) - h_k}_1 \leq \frac{1}{k}$ for any $s \in F$. Now write $h_k = h_{k,F}$, let $F$ vary in the set of finite subsets of $G$ directed by inclusion, so that $(h_{k,F})_{k,F}$ becomes a net in $\L^1(G)$ such that $\norm{\inner_s(h_{k,F}) - h_{k,F}}_1 \to 0$ as $(k,F) \to \infty$ for any $s \in G$. 4. $\Longrightarrow$ 1.: Note that we have an isometric inclusion $\L^1(G) \subseteq \L^\infty(G)^*$. Consider a cluster point $m \in \L^\infty(G)^*$ of this net for the weak* topology which is positive and satisfies clearly $m(1)=1$. For any $f \in \L^\infty(G)$ and any $s \in G$, we have \begin{align*} \MoveEqLeft \left|\la f_j, f \ra_{\L^1(G),\L^\infty(G)}-\big\la f_j,\inner_s f\big\ra_{\L^1(G),\L^\infty(G)}\right| =\left|\la f_j, f \ra-\big\la \inner_{s^{-1}} f_j,f \big\ra\right| =\left|\big\la f_j-\inner_{s^{-1}} f_j, f \big\ra \right| \\ &\leq \norm{f_j-\inner_{s^{-1}} f_j}_{\L^1(G)} \norm{f}_{\L^\infty(G)}\xra[j]{} 0. \end{align*} With a $\frac{\epsi}{3}$-argument we conclude that \eqref{inner-mean} is satisfied. 3. $\Longrightarrow$ 2.: Let $F$ be a finite subset of $G$. According to the assumption, there exists a subset $V_j^F$ such that $\frac{\mu(V_j^F \Delta (s^{-1}V_j^Fs))}{\mu(V_j^F)} \leq \frac{1}{j}$ for any $s \in F$. Putting $f_j^F \ov{\mathrm{def}}{=} 1_{V_j^F}$ and using \eqref{Indicator-formula}, we obtain from Definition \ref{def-IF} that $\norm{f_j^F - \inner_s f_j^F}_1 / \norm{f_j^F}_1 \leq \frac{1}{j}$ for any $s \in F$. Directing the subsets $F$ by inclusion and the $j$ in the usual manner, we obtain a net of positive functions $f_j^F$ in $\L^1(G)$ as in \eqref{asymt-central}. \noindent 2. $\Longrightarrow$ 4.: Using a normalization, it suffices to see that by the elementary inequality $\int_G \big||f|-|g|\big| \leq \int_G |f-g|$, the $f_j$'s in \eqref{asymt-central} can be chosen positive. \noindent 4. $\Longrightarrow$ 5.: Let $(f_j)_j$ be the net as in 4. For $\epsi > 0$, choose some $f_{j,\epsi} \in \C_c(G)$ such that $f_{j,\epsi} \geq 0$, and $\norm{f_{j,\epsi} - f_j}_1 < \epsi$. Since the map $\inner_s \co \L^1(G) \to \L^1(G)$ is isometric, for any $s \in G$ we have \[ \norm{f_{j,\epsi} - \inner_s f_{j,\epsi}}_1 \leq \norm{f_{j,\epsi}-f_j}_1 + \norm{f_j - \inner_s f_j}_1 + \norm{\inner_s f_j - \inner_s f_{j,\epsi}}_1 \leq \epsi + \norm{f_j - \inner_s f_j}_1 + \epsi. \] We can suppose that $\int_G f_{j,\epsi} \d\mu = 1$. Replacing the index $j$ by $(j,\epsi)$ and equipping it with the suitable order, we obtain a net $(f_{j,\epsi})_{j,\epsi}$ of positive normalized continuous functions with compact support with the convergence property from 3. We may and do thus assume now that the net $(f_j)_j$ in the third point consists of continuous functions with compact support. For any $j$, put now $g_j \ov{\mathrm{def}}{=} f_j \ast \check{f}_j$ where $\check{f}_j (s) \ov{\mathrm{def}}{=} f_j(s^{-1})$. For any $ s \in G$ we have \begin{equation} \label{Def-de-g} g_j(s) =(f_j \ast \check{f}_j)(s) \ov{\eqref{Convolution-formulas}}{=} \int_G f_j(st^{-1})\check{f}_j(t) \d \mu(t) =\int_G f_j(sr)f_j(r) \d \mu(r) \geq 0. \end{equation} Then for any $j$ \begin{align*} \MoveEqLeft \norm{g_j}_1 =\int_G g_j(s) \d \mu(s) \ov{\eqref{Def-de-g}}{=} \int_G \int_G f_j(sr) f_j(r) \d\mu(r) \d\mu(s) =\int_G \bigg(\int_G f_j(sr) \d\mu(s)\bigg) f_j(r) \d\mu(r) \\ &=\int_G \bigg(\int_G f_j(u) \d\mu(u)\bigg) f_j(r) \d\mu(r) = \norm{f_j}_1^2 = 1. \end{align*} Moreover, for any $t \in G$, we have \begin{equation} \label{divers-33455} (\inner_t g_j)(s) \ov{\eqref{def-conj-functions}}{=} g_j(t^{-1}st) \ov{\eqref{Def-de-g}}{=} \int_G f_j(t^{-1}st r) f_j(r) \d\mu(r) = \int_G f_j(t^{-1}srt) f_j(t^{-1}rt) \d\mu(r). \end{equation} Thus we obtain \begin{align*} \MoveEqLeft \norm{g_j - \inner_t g_j}_1 =\int_G |g_j(s) - (\inner_t g_j)(s)| \d\mu(s) \\ &\ov{\eqref{Def-de-g} \eqref{divers-33455}}{=} \int_G \left| \int_G f_j(sr) f_j(r) - f_j(t^{-1}srt) f_j(t^{-1}rt) \d\mu(r) \right| \d\mu(s) \\ & \leq \int_G \left| \int_G f_j(sr) [f_j(r) - f_j(t^{-1}rt) ] \d\mu(r) \right| + \left| \int_G [ f_j(sr) - f_j(t^{-1}srt) ] f_j(t^{-1}rt) \d\mu(r) \right| \d\mu(s) \\ &\leq \int_{G \times G} f_j(sr) |f_j(r) - f_j(t^{-1}rt)| \d\mu(r)\d\mu(s) \\ &+ \int_{G \times G} | f_j(sr) - f_j(t^{-1}srt)|f_j(t^{-1}rt) \d\mu(r) \d\mu(s) \\ &\leq \norm{f_j}_1 \norm{f_j - \inner_t f_j}_1 + \norm{f_j - \inner_t f_j}_1 \norm{\inner_t f_j}_1 = 2 \norm{f_j - \inner_t f_j}_1. \end{align*} This shows that the $g_j$'s have the same normalisation and convergence property than the $f_j$'s. Moreover, by \cite[p.~281]{HeR70} the $g_j$'s are continuous positive definite functions with compact support. \noindent 5. $\Longrightarrow$ 6.: Let $F = \{ s_1, \ldots, s_n \}$ be a finite subset of $G$ and $\epsi > 0$. According to the fifth point and \eqref{asymt-central}, choose some positive definite functions $f_j \in \C_c(G)$ such that $\norm{f_j - \inner_s f_j}_1 / \norm{f_j}_1 < \epsi / \card F$ for all $s \in F$. Using Lemma \ref{Lemma-CPPR} with $n = \card F$, $f = f_j$ and $g_k = \inner_{s_k^{-1}} f_j$ and the subset $V \ov{\mathrm{def}}{=} \{ f_j> t \}$ of $G$, we deduce that for some suitable $t > 0$ and $s \in F$, \begin{align*} \MoveEqLeft \sum_{s \in F} \mu\big(V \Delta(s^{-1} V s)\big) = \sum_{s \in F} \mu\big(\{f_j > t\} \Delta \{\inner_{s^{-1}} f_j > t\}\big) \ov{\eqref{Equa-CPPR}}{<} \epsi \mu(\{f_j > t\}) = \epsi \mu(V). \end{align*} Therefore, the group $G$ is inner F\o{}lner. Moreover, since $f_j$ is continuous, $V$ is an open subset of $G$. Furthermore, since the function $f_j$ is positive definite, we have $\norm{f_j}_\infty = f_j(e_G)$ by \cite[p.~23]{KaL18}. We deduce that $e_G$ belongs to $V$ since otherwise we would have $V = \emptyset$ and the previous strict inequality could not hold. Finally, \cite[Proposition 1.4.16 (ii) p.~22]{KaL18}, we have $f_j=\check{f_j}$ since $f_j \geq 0$. We conclude that $V$ is symmetric. \noindent 6. $\Longrightarrow$ 3.: trivial. We turn to the last sentence of the statement. So we assume that $G$ is an inner F\o{}lner group, such that for any finite subset $F$ of $G$ there exists a net $(V_\alpha^F)_\alpha$ of measurable subsets of $G$ such that $\mu(V_\alpha^F) \in (0,\infty)$, with the property that for all $s \in F$, \begin{equation} \label{equ-comment-11122024} \frac{\mu(V_\alpha^F \Delta (s^{-1}V_\alpha^Fs))}{\mu(V_\alpha^F)} \xra[\alpha \to \infty]{} 0. \end{equation} We will construct a sequence $(W_j^F)_j$, indexed by $j \in \N$, that satisfies the same convergence property \eqref{equ-comment-11122024} as the $(V_\alpha^F)_\alpha$. Start by putting $\epsi = 1$. By \eqref{equ-comment-11122024}, for all $s \in F$, there exists some $\alpha(1,s)$ such that if $\alpha \geq \alpha(1,s)$, then \[ \frac{\mu(V_\alpha^F \Delta (s^{-1}V_\alpha^Fs))}{\mu(V_\alpha^F)} \leq 1 . \] Choose some $\alpha(1) \geq \alpha(1,s)$ for all $s \in F$ (directed set property) and put $W_1^F \ov{\mathrm{def}}{=} V_{\alpha(1)}^F$. Now, let $\epsi = \frac12$. Again by \eqref{equ-comment-11122024}, for all $s \in F$, there exists some $\alpha(\frac12,s)$ such that if $\alpha \geq \alpha(\frac12,s)$, then \[ \frac{\mu(V_\alpha^F \Delta (s^{-1}V_\alpha^Fs))}{\mu(V_\alpha^F)} \leq \frac12 . \] Choose some $\alpha(\frac12) \geq \alpha(\frac12,s)$ for all $s \in F$ and put $W_2^F \ov{\mathrm{def}}{=} V_{\alpha(\frac12)}^F$. Continue with $\epsi = \frac14,\frac18,\ldots$ and obtain a sequence of subsets $W_j^F \ov{\mathrm{def}}{=} V_{\alpha(\frac{1}{2^{j-1}})}^F$ such that for all $s \in F$ we have \[ \frac{\mu(W_j^F \Delta (s^{-1}W_j^Fs))}{\mu(W_j^F)} = \frac{\mu(V_{\alpha(\frac{1}{2^{j-1}})}^F \Delta (s^{-1}V_{\alpha(\frac{1}{2^{j-1}})}^Fs))}{\mu(V_{\alpha(\frac{1}{2^{j-1}})}^F)} \leq \frac{1}{2^{j-1}}. \] For any $s \in F$, we infer that \[ \frac{\mu(W_j^F \Delta (s^{-1}W_j^Fs))}{\mu(W_j^F)} \xra[j \to \infty]{} 0. \] \end{proof} \section{Projections on the space of completely bounded Fourier multipliers} \label{Sec-complementation} \subsection{Preliminaries} \label{Sec-prel-complet} \paragraph{Hilbert-Schmidt operators} Let $\Omega$ be a $\sigma$-finite measure space. We will use the space $S^\infty_\Omega \ov{\mathrm{def}}{=} S^\infty(\L^2(\Omega))$ of compact operators, its dual $S^1_\Omega$ and the space $\cal{B}(\L^2(\Omega))$ of bounded operators on the complex Hilbert space $\L^2(\Omega)$. If $f \in \L^2(\Omega \times \Omega)$, we denote the associated Hilbert-Schmidt operator by \begin{equation} \label{Def-de-Kf} \begin{array}{cccc} K_f \co & \L^2(\Omega) & \longrightarrow & \L^2(\Omega) \\ & \xi & \longmapsto & \int_{X} f(\cdot,y)\xi(y) \d y \\ \end{array}. \end{equation} Using the notation $\check{f}(x,y) \ov{\mathrm{def}}{=} f(y,x)$, we have $(K_f)^*=K_{\check{\ovl{f}}}$. Note that the linear map $\L^2(\Omega \times \Omega) \to S^2_\Omega$, $f \mapsto K_f$ is an isometry from the Hilbert space $\L^2(\Omega \times \Omega)$ onto the Hilbert space $S^2_\Omega$ of Hilbert-Schmidt operators acting on the Hilbert space $\L^2(\Omega)$. This means that \begin{equation} \label{dual-trace} \tr(K_f K_g) =\int_{\Omega \times \Omega} f \check{g}, \quad f,g \in \L^2(\Omega \times \Omega). \end{equation} \paragraph{Schur multipliers acting on $S^p_\Omega$} Suppose that $1 \leq p \leq \infty$. We say that a measurable function $\varphi \co \Omega \times \Omega \to \mathbb{C}$ induces a bounded Schur multiplier on the Schatten class $S^p_\Omega \ov{\mathrm{def}}{=} S^p(\L^2(\Omega))$ if for any $f \in \L^2(\Omega \times \Omega)$ satisfying $K_f \in S^p_\Omega$ we have $K_{\varphi f} \in S^p_\Omega$ and if the map $S^2_\Omega \cap S^p_\Omega \to S^p_\Omega$, $K_f \mapsto K_{\varphi f}$ extends to a bounded map $M_\varphi$ from $S^p_\Omega$ into $S^p_\Omega$ called the Schur multiplier associated with $\varphi$. It is well-known \cite[Remark 1.4 p.~77]{LaS11} that in this case $\varphi \in \L^\infty(\Omega \times \Omega)$ and that \begin{equation} \label{ine-infty} \norm{\varphi}_{\L^\infty(\Omega \times \Omega)} \leq \norm{M_\varphi}_{S^p_\Omega \to S^p_\Omega}. \end{equation} We denote by $\mathfrak{M}_{\Omega}^{p}$ the space of bounded Schur multipliers on $S^p_\Omega$ and by $\mathfrak{M}_{\Omega}^{p,\cb}$ the subspace of completely bounded ones. \paragraph{Schur multipliers acting on $\cal{B}(\L^2(\Omega))$} We say that a function $\varphi \in \L^\infty(\Omega \times \Omega)$ induces a Schur multiplier on $\cal{B}(\L^2(\Omega))$ if the map $S^2_\Omega \mapsto \cal{B}(\L^2(\Omega))$, $K_{f} \mapsto K_{\varphi f}$ induces a bounded operator from $S^\infty_\Omega$ into $\cal{B}(\L^2(\Omega))$. In this case, the operator $S^\infty_\Omega \mapsto \cal{B}(\L^2(\Omega))$, $K_{f}\mapsto K_{\varphi f}$ admits by \cite[Lemma A.2.2 p.~360]{BlM04} a unique weak* extension $M_\varphi \co \cal{B}(\L^2(\Omega)) \to \cal{B}(\L^2(\Omega))$ called the Schur multiplier associated with $\varphi$. It is known that $M_\varphi$ induces a bounded map $M_\varphi \co S^p_\Omega \to S^p_\Omega$ for any $1 \leq p \leq \infty$. We refer to the surveys \cite{ToT10} and \cite{Tod15} for more information. See also the papers \cite{Arh24} and \cite{Spr04}. \begin{example} \normalfont If the set $\Omega=\{1,\ldots,n\}$ is equipped with the counting measure, we can identify the space $\cal{B}(\L^2(\Omega))$ with the matrix algebra $\M_n$. Then each operator $K_{f}$ identifies to the matrix $[f(i,j)]$. A Schur multiplier is given by a map $M_\varphi \co \M_n \to \M_n$, $[f(i,j)] \mapsto [\varphi(i,j)f(i,j)]$. \end{example} By \cite[Proposition 4.3]{Arh24}, the map $S^2_\Omega \to S^2_\Omega$, $K_f \mapsto K_{\check{f}}$ extends to an involutive normal $*$-antiautomorphism $R \co \cal{B}(\L^2(\Omega)) \to \cal{B}(\L^2(\Omega))$. We introduce the following duality bracket \begin{equation} \label{Duality-bracket} \langle z,y \rangle_{\cal{B}(\L^2(\Omega)), S^1_\Omega} \ov{\mathrm{def}}{=} \tr(R(z)y), \quad z \in \cal{B}(\L^2(\Omega)), y \in S^1_\Omega, \end{equation} which is more suitable than the bracket $\langle z,y \rangle= \tr(zy)$ since we have \begin{equation} \label{auto-adjoint} \big\langle M_{\varphi}(z),y \big\rangle_{\cal{B}(\L^2(\Omega)), S^1_\Omega} =\big\langle z,M_{\varphi}(y) \big\rangle_{\cal{B}(\L^2(\Omega)), S^1_\Omega}, \quad z \in \cal{B}(\L^2(\Omega)), y \in S^1_\Omega \end{equation} for any Schur multiplier $M_\varphi$ and since the operator space duality requires taking the opposite structure into account. \paragraph{Herz-Schur multipliers} Let $G$ be a (second-countable) unimodular locally compact group. Following \cite[p.~179]{Spr04}, a bounded Schur multiplier $M_\varphi \co \cal{B}(\L^2(G)) \to \cal{B}(\L^2(G))$ is a Herz-Schur multiplier if for any $r \in G$ we have $\varphi(sr,t)=\varphi(s,tr^{-1})$ for marginally almost all $(s,t)$ in $G \times G$. We define similarly the notion of Herz-Schur multiplier on $S^p_G$. We denote by $\frak{M}^{p,\cb,\HS}_G$ the subspace of $\frak{M}^{p,\cb}_G$ of completely bounded Herz-Schur multipliers. We define similarly $\frak{M}^{p,\HS}_G$. If $\varphi \co G \to \mathbb{C}$, we introduce the function $\varphi^\HS \co G \times G \to \mathbb{C}$, $(s,t) \mapsto \varphi(st^{-1})$. By \cite{BoF84} and \cite[Theorem 5.3 p.~181]{Spr04}, the linear map $\frak{M}^{\infty,\cb}(G) \to \frak{M}^{\infty,\cb,\HS}_G=\frak{M}^{\infty,\HS}_G$, $M_\varphi \mapsto M_{\varphi^\HS}$ is a surjective isometry. We let $M_{\varphi}^{\HS} \ov{\mathrm{def}}{=} M_{\varphi^\HS}$. \paragraph{Plancherel weights} Let $G$ be a locally compact group. A function $g \in \L^2(G)$ is called left bounded \cite[Definition 2.1]{Haa78b} if the convolution operator $\lambda(g) \co \C_c(G) \to \C_c(G)$, $f\mapsto g*f$ induces a bounded operator on the Hilbert space $\L^2(G)$. The Plancherel weight $\tau_G \co \VN(G)^+\to [0,\infty]$ is\footnote{\thefootnote. This is the natural weight associated with the left Hilbert algebra $\C_c(G)$.} defined by the formula $$ \tau_G(x) = \begin{cases} \norm{g}^2_{\L^2(G)} & \text{if }x^{\frac{1}{2}}=\lambda(g) \text{ for some left bounded function } g \in \L^2(G)\\ +\infty & \text{otherwise} \end{cases}. $$ By \cite[Proposition 2.9 p.~129]{Haa78b} (see also \cite[Theorem 7.2.7 p.~236]{Ped79}), the canonical left ideal $\mathfrak{n}_{\tau_G}=\big\{x \in \VN(G)\ : \ \tau_G(x^*x) <\infty\big\}$ is given by $$ \mathfrak{n}_{\tau_G} =\big\{\lambda(g)\ :\ g \in \L^2(G)\text{ is left bounded}\big\}. $$ Recall that $\mathfrak{m}_{\tau_G}^+$ denotes the set $\big\{x \in \VN(G)^+ : \tau_G(x)<\infty\big\}$ and that $\mathfrak{m}_{\tau_G}$ is the complex linear span of $\mathfrak{m}_{\tau_G}^+$, which is a two-sided ideal of the group von Neumann algebre $\VN(G)$. By \cite[Proposition 2.9 p.~129]{Haa78b} and \cite[Proposition p.~280]{Str81}, we have $$ \mathfrak{m}_{\tau_G}^+ =\big\{\lambda(g) : g \in \L^2(G) \text{ continuous and left bounded}, \ \lambda(g)\geq 0\big\}. $$ By \cite[Proposition 7.2.8 p.~237]{Ped79}, the Plancherel weight $\tau_G$ on the von Neumann algebra $\VN(G)$ is tracial if and only if the locally compact group $G$ is unimodular, which means that the left Haar measure of $G$ and the right Haar measure of $G$ coincide. Now, in the sequel, we suppose that the locally compact group $G$ is unimodular. We will use the involution $f^*(t) \ov{\mathrm{def}}{=} \ovl{f(t^{-1})}$. By \cite[Theorem 4 p.~530]{Kun58}, if the functions $f,g \in \L^2(G)$ are left bounded then $f*g$ and $f^*$ are left bounded and we have \begin{equation} \label{composition-et-lambda} \lambda(f)\lambda(g) =\lambda(f*g) \quad \text{and} \quad \lambda(f)^*=\lambda(f^*). \end{equation} If $f,g \in \L^2(G)$ it is well-known \cite[VIII pp.~39-40]{Bou04b} that the function $f*g$ is continuous and that we have $(f*g)(e)=(g*f)(e)=\int_G \check{g} f \d\mu_G$, where $e$ denotes the identity element of $G$ and where $\check{g}(s) \overset{\textrm{def}}= g(s^{-1})$. By \cite[(4) p.~282]{StZ75}, if $f,g \in \L^2(G)$ are left bounded, the operator $\lambda(g)^*\lambda(f)$ belongs to $\mathfrak{m}_{\tau_G}$ and we have the fundamental <<noncommutative Plancherel formula>> \begin{equation} \label{Formule-Plancherel} \tau_G\big(\lambda(g)^*\lambda(f)\big) =\langle g,f\rangle_{\L^2(G)}, \quad \text{which gives} \quad \tau_G\big(\lambda(g)\lambda(f)\big) =\int_G \check{g} f \d\mu_G =(g*f)(e). \end{equation} In particular, this formula can be used with any functions belonging to the space $\L^1(G) \cap \L^2(G)$. If we introduce the subset $ \C_e(G) \ov{\mathrm{def}}{=} \Span\big\{g^* * f : g,f \in \L^2(G)\text{ left bounded}\big\} $ of the space $\C(G)$ considered in \cite[p.~238]{Ped79}, then we have \begin{equation} \label{Def-mtauG} \mathfrak{m}_{\tau_G} =\lambda(\C_e(G)). \end{equation} In this context, $\tau_G$ can be interpreted as the functional that evaluates functions of $\C_e(G)$ at the identity element $e_G$. While the formula $\tau_G(\lambda(h)) = h(e)$ appears meaningful for every function $h$ in $\C_c(G)$, we caution the reader that, in general, it is not true that $\lambda(\C_c(G)) \subset \mathfrak{m}_{\tau_G}$. Unfortunately, this misconception is frequently encountered in the literature. \paragraph{Noncommutative $\L^p$-spaces} In this paper, we focus on noncommutative $\L^{p}$-spaces associated to semifinite von Neumann algebras. Let $\cal{M}$ be a semifinite von Neumann algebra equipped with a normal semifinite faithful trace $\tau$. Let $\cal{S}^{+}$ be the set of all $x \in \cal{M}_{+}$ such that $\tau(\supp(x))<\infty$, where $\supp(x)$ denotes the support of $x$. Let $\cal{S}$ be the linear span of $\cal{S}^{+}$, then $\cal{S}$ is weak* dense $*$-subalgebra of $\cal{M}$. Suppose that $1 \leq p < \infty$. For any $x \in \cal{S}$, the operator $\vert x\vert^p$ belongs to $\cal{S}_+$ and we set \begin{equation} \label{Def-norm-Lp} \norm{x}_{\L^p(\cal{M})} \ov{\mathrm{def}}{=} \bigl(\tau(\vert x\vert^p)\bigr)^{\frac{1}{p}}. \end{equation} Here $\vert x \vert \ov{\mathrm{def}}{=}(x^*x)^{\frac{1}{2}}$ denotes the modulus of $x$. It turns out that $\norm{\cdot}_{\L^p(\cal{M})}$ is a norm on $\cal{S}$. By definition, the noncommutative $\L^p$-space $\L^p(\cal{M})$ associated with $(\cal{M},\tau)$ is the completion of $(\cal{S},\norm{\cdot}_{\L^p(\cal{M})})$. For convenience, we also set $\L^{\infty}(\cal{M}) \ov{\mathrm{def}}{=} \cal{M}$ equipped with its operator norm. Note that by definition, $\L^p(\cal{M}) \cap \cal{M}$ is dense in $\L^p(\cal{M})$ for any $1 \leq p < \infty$. See \cite{PiX03} for more information on noncommutative $\L^{p}$-spaces. Furthermore, the trace $\tau$ uniquely extends to a bounded linear functional on the Banach space $\L^1(\cal{M})$, still denoted by $\tau$. Actually, we have \begin{equation} \label{trace-continuity} \vert\tau(x)\vert \leq \norm{x}_{\L^1(\cal{M})}, \quad x \in \L^1(\cal{M}). \end{equation} Recall the noncommutative H\"older's inequality. If $1 \leq p,q,r \leq \infty$ satisfy $\frac{1}{r}=\frac{1}{p}+\frac{1}{q}$ then \begin{equation} \label{Holder} \norm{xy} _{\L^r(\cal{M})} \leq \norm{x}_{\L^p(\cal{M})} \norm{y}_{\L^q(\cal{M})},\qquad x\in \L^p(\cal{M}), y \in \L^q(\cal{M}). \end{equation} For any $1 \leq p < \infty$, let $p^* \ov{\mathrm{def}}{=} \frac{p}{p-1}$ be the conjugate number of $p$. Applying \eqref{Holder} with $q=p^*$ and $r=1$ together with \eqref{trace-continuity}, we obtain a linear map $\L^{p^*}(\cal{M}) \to (\L^p(\cal{M}))^*$, $y \mapsto \tau(xy)$, which induces an isometric isomorphism \begin{equation} (\L^p(\cal{M}))^* =\L^{p^*}(\cal{M}),\qquad 1 \leq p <\infty,\quad \frac{1}{p} +\frac{1}{p^*} =1. \end{equation} In particular, we may identify the Banach space $\L^1(\cal{M})$ with the unique predual $\cal{M}_*$ of the von Neumann algebra $\cal{M}$. \paragraph{Operator theory} Suppose that $1 \leq p < \infty$. Let $T \co \L^p(\cal{M}) \to \L^p(\cal{M})$ be any bounded operator. We will denote by $T^{*}$ the adjoint of $T$ defined by $$ \tau(T(x)y) =\tau(xT^*(y)),\qquad x\in \L^p(\cal{M}), y\in \L^{p^*}(\cal{M}). $$ For any $1 \leq p \leq \infty$ and any $T \co \L^p(\cal{M}) \to \L^p(\cal{M})$, we can consider the map $T^{\circ} \co \L^p(\cal{M}) \to \L^p(\cal{M})$ defined by \begin{equation} \label{2circ} T^\circ(x) \ov{\mathrm{def}}{=} T(x^{*})^{*},\qquad x \in \L^p(\cal{M}). \end{equation} If $p=2$ and if we denote by $T^{\dag} \co \L^2(\cal{M}) \to \L^2(\cal{M})$ the adjoint of $T\co \L^2(\cal{M}) \to \L^2(\cal{M})$ in the usual sense of Hilbertian operator theory, that is $$ \tau\bigl(T(x)y^{*}\bigr) =\tau\bigl(x(T^{\dag}(y))^{*}\bigr),\qquad x, y \in \L^2(\cal{M}), $$ we see that \begin{equation} \label{2dual4} T^{\dag} = T^{*\circ}. \end{equation} \subsection{Overview of the method} \label{Sec-approach} Suppose that $1 \leq p \leq \infty$ and let $G$ be a locally compact group. In this section, we present an approach for obtaining some bounded projections $P_G^p \co \CB(\L^p(\VN(G))) \to\CB(\L^p(\VN(G)))$ onto the subspace $\mathfrak{M}^{p,\cb}(G)$ of completely bounded Fourier multipliers on $\L^p(\VN(G))$, beyond the case of discrete groups, for a suitable locally compact group. The methods are different from the ones of \cite{ArK23} and complement the results of this paper. If $G$ is a locally compact group, we will use the fundamental unitary $W \co \L^2 (G \times G) \to \L^2(G \times G)$ in $\cal{B}(\L^2(G)) \otvn \VN(G)$ and its inverse $W^{-1}$ defined in \cite[Example 2.2.10 p.~26]{Vae01} (see also \cite[Remark 5.16 p.~150]{Kus05}) by \begin{equation} \label{Def-fund-unitary} (W\xi)(s,t) \ov{\mathrm{def}}{=} \xi(s,s^{-1}t), \quad (W^{-1}\xi)(s,t)= \xi(s,st), \quad s,t \in G, \xi \in \L^2(G \times G). \end{equation} Before going into the details, let us shortly present the roadmap of the proof of results of Section \ref{Sec-Th-complementation}. Suppose that the group $G$ is discrete and recall the well-known construction. Consider the coproduct $\Delta \co \VN(G) \to \VN(G) \otvn \VN(G)$, $\lambda_s \mapsto \lambda_s \ot \lambda_s$. This \textit{trace preserving} normal unital injective $*$-homomorphism extends to a completely positive isometric map $\Delta_p \co \L^p(\VN(G)) \to \L^p(\VN(G) \otvn \VN(G))$ for any $1 \leq p \leq \infty$. With the adjoint $(\Delta_{p^*})^* \co \L^p(\VN(G) \otvn \VN(G)) \to \L^p(\VN(G))$, the map $P_G^p \co \CB(\L^p(\VN(G))) \to \CB(\VN(G))$ defined by \begin{equation} \label{Projection-discrete-case} P_G^p(T) =(\Delta_{p^*})^* (\Id_{\L^p(\VN(G))} \ot T)\Delta_p, \quad T \in \CB(\L^p(\VN(G))) \end{equation} is a contractive projection from the Banach space $\CB(\L^p(\VN(G)))$ onto the subspace $\mathfrak{M}^{p,\cb}(G)$ of completely bounded Fourier multipliers acting on $\L^p(\VN(G))$ which preserves the complete positivity (in the case $p=\infty$ replace $\CB(\L^p(\VN(G)))$ by the space $\CB_{\w^*}(\VN(G))$). By \cite[p.~26]{Vae01} and \cite[p.~267]{Str74}, we can factorize\footnote{\thefootnote. Indeed, this factorization is the definition of the coproduct.} the coproduct as $$ \Delta(x) =W(x \ot 1)W^{-1}, \quad x \in \VN(G). $$ If $u,v \in \VN(G)$, we can therefore rewrite the formula \eqref{Projection-discrete-case} as $$ \big\langle P_G^p(T)u,v \big\rangle_{\L^{p}(\VN(G)),\L^{p^*}(\VN(G))} =\big\langle (\Id \ot T)\Delta_p(u),\Delta_{p^*}(v) \big\rangle_{\L^p,\L^{p^*}} $$ and finally \begin{equation} \label{Magic-equa-1} \big\langle P_G^p(T)u,v \big\rangle =\big\langle (\Id \ot T)(W (u \ot 1)W^{-1}),W (v\ot 1)W^{-1} \big\rangle_{\L^p,\L^{p^*}}. \end{equation} Now, if $G$ is a (second-countable unimodular) locally compact group and if $T \co \L^p(\VN(G)) \to \L^p(\VN(G))$ is again a completely bounded map, we wish to replace one or both units 1 of the formula \eqref{Magic-equa-1} by suitable sequences $(x_j)$ and $(y_j)$ of elements which approximate 1 in some sense. Actually, we start by replacing in \eqref{Magic-equa-1} the elements 1 by elements $x,y \in \L^1(\VN(G)) \cap \VN(G)$ and $u,v$ by elements $u \in S^p_G$ and $v \in S^{p^*}_G$. We will show that there exists a completely bounded Schur multiplier $M_{x,y,T} \co S^p_G \to S^p_G$ (replace the Schatten class $S^p_G$ by the von Neumann algebra $\cal{B}(\L^2(G))$ if $p=\infty$) such that \begin{equation} \label{MxyT} \big\langle M_{x,y,T}(u),v\big\rangle_{S^p_G, S^{p^*}_G} =\big\langle (\Id \ot T)(W (u \ot x)W^{-1}),W (v \ot y)W^{-1} \big\rangle_{S^p_G(\L^p(\VN(G))),S^{p^*}_G(\L^{p^*}(\VN(G)))} \end{equation} for any suitable elements $u \in S^p_G$ and $v \in S^{p^*}_G$. Note that $x \in \L^p(\VN(G))$, $y \in \L^{p^*}(\VN(G))$ and that $W,W^{-1} \in \cal{B}(\L^2(G)) \otvn \VN(G)$. Moreover, we will compute the symbol $\varphi_{x,y,T}$, belonging to $\L^\infty(G \times G)$, of the Schur multiplier $M_{x,y,T}$ and we will get \begin{equation} \label{symbol-phixyT} \varphi_{x,y,T}(s,t) =\tau_G\big(\lambda_ty \lambda_{s^{-1}} T(\lambda_s x \lambda_{t^{-1}}) \big) \quad s,t \in G. \end{equation} In the particular case of \textit{finite} groups, these assertions are straightforward and we refer to the end of this section for a short proof of \eqref{MxyT} and \eqref{symbol-phixyT}. For the case of a locally compact group, this step unfortunately uses a painful approximation procedure described in Section \ref{Mappings} relying on a sequence $(M_{\phi_n})$ of completely bounded Fourier multipliers $M_{\phi_n} \co \L^p(\VN(G)) \to \L^2(\VN(G))$ which allows us to consider the completely bounded maps $M_{\phi_n}T \co \L^p(\VN(G)) \to \L^2(\VN(G))$ in order to reduce the problem to the level $p=2$. We therefore obtain a map $P_{x,y} \co \CB(\L^p(\VN(G))) \to \CB(S^p_G)$, $T \mapsto M_{x,y,T}$ and it is easy to check that this map preserves the complete positivity. Introducing suitable sequences $(x_j)$ and $(y_j)$ of elements in $\L^1(\VN(G)) \cap \VN(G)$ which approximate the element 1 we obtain a sequence $(P_j)$ of linear maps $P_j \ov{\mathrm{def}}{=} P_{x_j,y_j} \co \CB(\L^p(\VN(G))) \to \CB(S^p_G)$. One of the difficulties in this area is to construct suitable sequences with the chosen assumptions on the group $G$. Essentially, in the sequel we capture a cluster point of the bounded family $(P_j)$ and we obtain a bounded map $P^{(1)} \co \CB(\L^p(\VN(G))) \to \CB(S^p_G)$. Each map $P^{(1)}(T)$ is a completely bounded Schur multiplier. \paragraph{Case where $G$ is inner amenable and $p=\infty$} With a \textit{suitable} choice of the sequences $(x_j)$ and $(y_j)$ provided by the inner amenability of $G$, the map $P^{(1)} \co \CB_{\w^*}(\VN(G)) \to \CB(\cal{B}(\L^2(G))$ is \textit{contractive} and the Schur multiplier $P^{(1)}(T) \co \cal{B}(\L^2(G)) \to \cal{B}(\L^2(G))$ is a \textit{Herz-Schur} multiplier for all weak* continuous completely bounded maps $T \co \VN(G) \to \VN(G)$. So, we can see the linear map $P^{(1)}$ as a map $P^{(1)} \co \CB_{\w^*}(\VN(G)) \to \mathfrak{M}^{\infty,\HS}_G=\mathfrak{M}^{\infty,\cb,\HS}_G$. Now, it suffices to identify (completely) bounded Herz-Schur multipliers acting on the space $\cal{B}(\L^2(G))$ isometrically with completely bounded Fourier multipliers acting on $\VN(G)$, while preserving the complete positivity. This step is well-known \cite{BoF84} and true for any locally compact group $G$ \textit{without} amenability assumption. Denoting $I \co \mathfrak{M}^{\infty,\HS}_G \to \CB(\VN(G))$ the associated isometry with range $\mathfrak{M}^{\infty,\cb}(G)$, the final contractive projection will be $P_G^\infty \ov{\mathrm{def}}{=} I \circ P^{(1)}$. Indeed, in the case where $T = M_\phi \co \VN(G) \to \VN(G)$ is a Fourier multiplier we will prove that the symbol $\phi_{j,T}$ of the Schur multiplier $P_{j}(M_\phi) \co \cal{B}(\L^2(G)) \to \cal{B}(\L^2(G))$ is equal to the symbol $\phi^\HS \co (s,t) \mapsto \phi(st^{-1})$ for any $j$. By passing the limit, $P^{(1)}(M_\phi) = M_{\phi^\HS}$ and finally $$ P_G^\infty(M_\phi) =I \circ P^{(1)}(M_\phi) = I\big(M_{\phi^\HS}\big) = M_\phi. $$ So we obtain the property $(\kappa_\infty)$ of Definition \ref{Defi-tilde-kappa} for these groups with constant $\kappa_\infty(G)=1$. \paragraph{Case where $G$ is finite-dimensional and amenable and simultaneous cases $p=1$ and $p=\infty$} In the case where the group $G$ is in addition finite-dimensional and amenable, replacing the sequences $(x_j)$ and $(y_j)$ of the proof of the last case by new ones, we obtain linear maps $P_p^{(1)} \co \CB(\L^p(\VN(G))) \to \CB(S^p_G)$ for $p=1$ and $p=\infty$ (replace $\CB(\L^\infty(\VN(G)))$ by $\CB_{\w^*}(\VN(G))$ here and in the sequel), that we see as maps $P_p^{(1)} \co \CB(\L^p(\VN(G))) \to \mathfrak{M}^{p,\cb}_G$. The cost of this replacement of sequences is the non-contractivity of $P_p^{(1)}$ but we obtain the compatibility of the maps $P_\infty^{(1)}(T)$ and $P_1^{(1)}(T)$. For the construction of the sequences $(x_j)$ and $(y_j)$, our approach relies on the structure of locally compact groups from the solution to Hilbert's fifth problem which makes appear connected Lie groups in this context and the use of Carnot-Carath\'eodory metrics on connected Lie groups. Now, we construct and use a contractive map $Q \co \mathfrak{M}^{p,\cb}_G \to \mathfrak{M}^{p,\cb,\HS}_G$ from the space $\mathfrak{M}^{p,\cb}_G$ of Schur multipliers onto the subspace of Herz-Schur multipliers which preserves the complete positivity and the Herz-Schur multipliers\footnote{\thefootnote. We can see $Q$ as a contractive projection $Q \co \mathfrak{M}^{p,\cb}_G \to \mathfrak{M}^{p,\cb}_G$ onto the subspace $\mathfrak{M}^{p,\cb,\HS}_G$ of completely bounded Herz-Schur multipliers.}. In this essentially folklore step, we need the amenability of the group $G$ in sharp contrast with our previous work \cite{ArK23}. Then put $P^{(2)}_p \ov{\mathrm{def}}{=} Q \circ P_p^{(1)} \co \CB(\L^p(\VN(G))) \to \mathfrak{M}^{p,\cb,\HS}_G$. At present, it suffices with \cite{CaS15} to identify completely bounded Herz-Schur multipliers isometrically with completely bounded Fourier multipliers, preserving the complete positivity. Denoting $I \co \mathfrak{M}^{p,\cb,\HS}_G \to \CB(\L^p(\VN(G)))$ the associated isometry\footnote{\thefootnote. Actually, is it showed in \cite{CaS15} that the map $I$ is a contraction (when $G$ is amenable), which is an isometry on a large subspace.} with range $\mathfrak{M}^{p,\cb}(G)$, the final contractive projection will be $$ P_G^p \ov{\mathrm{def}}{=} I \circ P^{(2)}_p = I \circ Q \circ P^{(1)}_p. $$ In the case where $T = M_\phi$ is a Fourier multiplier, we will prove that the symbol $\phi_{j,T}$, element in $\L^\infty(G \times G)$, of the completely bounded Schur multiplier $P_{j}(T)$ converges to the symbol $\phi^\HS \co (s,t) \mapsto \phi(st^{-1})$ for the weak* topology of of the dual Banach space $\L^\infty(G \times G)$. From this, we deduce that the limit $P^{(1)}_p(M_\phi)$ of the sequence $(P_j(T))$ also admits the symbol $\phi^\HS$. We conclude that $$ P_G^p(M_\phi) =I \circ Q \circ P^{(1)}_p(M_\phi) =I \circ Q\big(M_{\phi^\HS}\big) =I \big(M_{\phi^\HS}\big) =M_\phi. $$ We conclude that we obtain the property $(\kappa)$ of Definition \ref{Defi-complementation-G} for these groups. For totally disconnected groups, the method gives the sharp result $\kappa(G)=1$. \paragraph{Case where $G$ is amenable and $1 < p < \infty$ with $\frac{p}{p^*}$ being rational.} In the case where the group $G$ is amenable, using some sequences $(x_j)$ and $(y_j)$, we obtain a \textit{contractive} linear map $P_p^{(1)} \co \CB(\L^p(\VN(G))) \to \CB(S^p_G)$ which is better than the \textit{boundedness} of the previous case, but only for \textit{one} value of $p$. The method is similar to the previous case but we use \cite{CaS15} (see also \cite{NeR11}) instead of \cite{BoF84} to identify completely bounded Herz-Schur multipliers isometrically with completely bounded Fourier multipliers (which require the amenability of $G$ once again). \paragraph{Particular case of finite groups: proof of \eqref{MxyT} and \eqref{symbol-phixyT}} If the group $G$ is finite and if $(e_i)$ is an orthonormal basis of the Hilbert space $\ell^2_G$ then \eqref{Def-fund-unitary} translates to \begin{equation} \label{W-discret} W(e_t \ot e_r) =e_t \ot e_{tr}, \quad W^{-1}(e_t \ot e_r) =e_t \ot e_{t^{-1}r}, \quad t,r \in G. \end{equation} For any $i,j,s,t,u \in G$, we have \begin{align*} \MoveEqLeft W (e_{st} \ot \lambda_u)W^{-1}(e_i \ot e_j) \ov{\eqref{W-discret}}{=} W (e_{st} \ot \lambda_u)(e_i \ot e_{i^{-1}j}) =W\big(e_{st}e_i \ot \lambda_u(e_{i^{-1}j})\big) \\ &= \delta_{t=i}W\big(e_s \ot e_{ui^{-1}j})\big) \ov{\eqref{W-discret}}{=} \delta_{t=i} e_s \ot e_{sut^{-1}j}. \end{align*} Hence in $\cal{B}(\ell^2(G)) \otvn \VN(G)$, we have \begin{equation} \label{calcul-890} W (e_{st} \ot \lambda_u)W^{-1} = e_{st} \ot \lambda_{sut^{-1}}. \end{equation} We deduce that \begin{equation} \label{Equa-456} (\Id \ot T)\big(W (e_{st} \ot \lambda_u)W^{-1}\big) =e_{st} \ot T(\lambda_{sut^{-1}}). \end{equation} We infer that \begin{align*} \MoveEqLeft (\tr \ot \tau_G)\big[(\Id \ot T)\big(W (e_{st} \ot \lambda_u)W^{-1}\big) (W (e_{ij} \ot \lambda_{r}W^{-1}) \big] \\ &\ov{\eqref{Equa-456}\eqref{calcul-890}}{=} (\tr \ot \tau_G)\big[(e_{st} \ot T(\lambda_{sut^{-1}})) (e_{ij} \ot \lambda_{irj^{-1}}) \big] \\ &=\tr(e_{st}e_{ij}) \tau_G\big(T(\lambda_{sut^{-1}})\lambda_{irj^{-1}} \big) =\delta_{t=i}\delta_{s=j}\tau_G\big(\lambda_{i}\lambda_{r}\lambda_{j^{-1}}T(\lambda_{sut^{-1}}) \big). \end{align*} By linearity, we deduce on the one hand for any $x \in \L^p(\VN(G))$ and any $y \in \L^{p^*}(\VN(G))$ $$ (\tr \ot \tau_G)\big[(\Id \ot T)\big(W (e_{st} \ot x)W^{-1}\big) (W (e_{ij} \ot y)W^{-1}) \big] =\delta_{t=i}\delta_{s=j}\tau_G\big(\lambda_{i}y\lambda_{j^{-1}}T(\lambda_{s} x \lambda_{t^{-1}}) \big) $$ On the other hand, if we consider the Schur multiplier $M_{x,y,T} \co S^p_G \to S^p_G$ with symbol \eqref{symbol-phixyT}, we have \begin{align*} \MoveEqLeft \big\langle M_{x,y,T}(e_{st} ),e_{ij}\big\rangle_{S^p_G, S^{p^*}_G} \ov{\eqref{symbol-phixyT}}{=} \delta_{t=i}\delta_{s=j} \tau_G\big(\lambda_ty \lambda_{s^{-1}} T(\lambda_s x \lambda_{t^{-1}}) \big). \end{align*} \begin{remark} \normalfont Note that with $x=y=1$, the Schur multiplier $M_{x,y,T}$ is a Herz-Schur multiplier. See Section \ref{Sec-Herz-Schur} for a generalization of this crucial observation. \end{remark} \subsection{Step 1: construction of the maps $P_j(T)$} \label{Mappings} In this section, we establish \eqref{MxyT} and \eqref{symbol-phixyT}. We caution the reader that while this part is technically involved, the underlying idea is quite simple. Specifically, we reduce the computation to the case $p = 2$, where Parseval's identity can be applied. Let $G$ be a unimodular locally compact group. We denote by $\tr_G$ and $\tau_G$ the canonical traces $\tr_G$ of the von Neumann algebras $\cal{B}(\L^2(G))$ and $\VN(G)$. Suppose that $G$ is second-countable and fix an orthonormal basis $(e_i)$ of the Hilbert space $\L^2(G)$ such that each function $e_i$ is continuous with compact support\footnote{\thefootnote. To demonstrate the existence of such an orthonormal basis, consider a sequence of continuous functions with compact support that is dense in $\L^2(G)$, and apply the Gram-Schmidt procedure.}. Note that by \cite[p.~40]{BlM04} we have a canonical identification $\cal{B}(\L^2(G)) \otvn \VN(G)=\M_\infty(\VN(G))$. That means that an element $X$ belonging to the von Neumann tensor product $\cal{B}(\L^2(G)) \otvn \VN(G)$ identifies to a matrix $[x_{ij}]$ with entries in the von Neumann algebra $\VN(G)$. For any $h \in \L^2(G)$ and any integer $k$, note that in $\L^2(G)$ \begin{equation} \label{eval} \big(x_{kk}(h)\big)(w) =\int_G \big(X(e_k \ot h)\big)(s,w) \ovl{e_k(s)} \d \mu_G(s), \quad h \in \L^2(G), \text{ a.e. }w \in G. \end{equation} If $1 \leq p <\infty$, we have by \cite{Pis98} a similar isometry $\L^p(\cal{B}(\L^2(G)) \otvn \VN(G))=S^p_G(\L^p(\VN(G)))$. Moreover, if $X$ belongs to the intersection $\L^1(\cal{B}(\L^2(G)) \otvn \VN(G)) \cap \big[\cal{B}(\L^2(G)) \otvn \VN(G)\big]$ we have $x_{kk} \in \L^1(\VN(G)) \cap \VN(G)$ for any integer $k$ and \begin{equation} \label{trace-Xn} (\tr_G \ot \tau_G)(X) = \sum_{k=1}^{\infty} \tau_G(x_{kk}). \end{equation} In the next result, we use the operator $W$ in $\cal{B}(\L^2(G)) \otvn \VN(G)$ and its inverse from \eqref{Def-fund-unitary}. \begin{lemma} \label{lem-referee-proof-step-1-calcul-du-symbole-coefficients-L2} Let $G$ be a second-countable unimodular locally compact group. \begin{enumerate} \item Let $\phi \in \L^2(G \times G)$ such that $K_\phi$ belongs to $S^1_G$ and $x \in \L^1(\VN(G)) \cap \VN(G)$. Then $W(K_\phi \ot x)W^{-1}$ belongs to $\L^1(\cal{B}(\L^2(G)) \otvn \VN(G))$ and to $\cal{B}(\L^2(G)) \otvn \VN(G)$. \item If $f$ belongs to the space $\C_c(G)$ and if $g$ belongs to the space $\C_c(G)*\C_c(G)$, we have for any integers $i,j$ \begin{align} \label{equ-referee-proof-step-1-calcul-du-symbole-coefficients-L2} \MoveEqLeft (\tr_G \ot \tau_G)\big[W( K_\phi \ot \lambda(g) )W^{-1} \cdot ( e_{ij}^* \ot \lambda(f))\big] \\ & =\int_G \int_G \phi(s,t) \tau_G\big[\lambda_s \lambda(g) \lambda_{t^{-1}} \lambda(f)\big] \ovl{e_i(s)}e_j(t) \d \mu_G(s)\d \mu_G(t). \nonumber \end{align} \end{enumerate} \end{lemma} \begin{proof} 1. The element $K_\phi \ot x$ belongs to $S^1_G \ot [ \L^1(\VN(G)) \cap \VN(G) ]$, hence to the space $\L^1(\cal{B}(\L^2(G)) \otvn \VN(G)) \cap \big[\cal{B}(\L^2(G)) \otvn \VN(G)\big]$. Then the claim follows since $W$ and $W^{-1}$ belong to the space $\cal{B}(\L^2(G)) \otvn \VN(G)$ and since $\L^1(\cal{B}(\L^2(G)) \otvn \VN(G)) \cap \big[\cal{B}(\L^2(G)) \otvn \VN(G)\big]$ is an ideal of the von Neumann algebra $\cal{B}(\L^2(G)) \otvn \VN(G)$. 2. By the first part, observe that the element $X \ov{\mathrm{def}}{=} W( K_\phi \ot \lambda(g) ) W^{-1} \cdot \big(e_{ij}^* \ot \lambda(f)\big)$ belongs to the intersection $\L^1(\cal{B}(\L^2(G)) \otvn \VN(G)) \cap \big[\cal{B}(\L^2(G)) \otvn \VN(G)\big]$. According to \eqref{trace-Xn}, we have $$ (\tr_G \ot \tau_G)\big[W( K_\phi \ot \lambda(g) )W^{-1} (e_{ij}^* \ot \lambda(f))\big] \ov{\eqref{trace-Xn}}{=} \sum_{k=1}^{\infty} \tau_G(x_{kk}) $$ Now, we want to compute $\tau_G(x_{kk})$ with \eqref{eval}. If $k \neq i$, we have $$ X(e_k \ot h) = W(K_\phi \ot \lambda(g))W^{-1}\big(e_{ji} \ot \lambda(f)\big)(e_k \ot h) = 0. $$ Hence $x_{kk}=0$ in this case and therefore $\tau_G(x_{kk})=0$. Thus, we only need to consider $k = i$ in the sequel. Then replacing $r$ by $tv^{-1}s^{-1}r$ in the last equation for $h \in \C_c(G)$ \begin{align} \MoveEqLeft \label{Eq-10987} \big(X(e_i \ot h)\big)(s,w) = \big(W(K_\phi \ot \lambda(g))W^{-1}(e_{ji} \ot \lambda(f) )(e_i \ot h)\big)(s,w) \\ &=\big(W(K_\phi \ot \lambda(g))W^{-1}(e_j \ot \lambda(f)(h))\big)(s,w) \nonumber \\ &\ov{\eqref{Def-fund-unitary}}{=} \big((K_\phi \ot \lambda(g))W^{-1}(e_j \ot \lambda(f)h)\big)(s,s^{-1}w) \nonumber\\ & \ov{\eqref{Def-de-Kf} \eqref{Convolution-formulas} }{=} \int_G \int_G \phi(s,t) g(v) W^{-1}(e_j \ot \lambda(f)h)(t,v^{-1}s^{-1}w) \d \mu_G(t) \d \mu_G(v) \nonumber\\ & \ov{\eqref{Def-fund-unitary}}{=} \int_G \int_G \phi(s,t) g(v)(e_j \ot \lambda(f)h)(t,tv^{-1}s^{-1}w) \d \mu_G(t) \d \mu_G(v) \nonumber\\ & \ov{\eqref{Convolution-formulas}}{=} \int_G \int_G \int_G \phi(s,t)e_j(t) g(v) f(r) h(r^{-1} t v^{-1}s^{-1}w) \d \mu_G(t) \d \mu_G(v) \d \mu_G(r)\nonumber\\ &=\int_G \int_G \int_G \phi(s,t) g(v) f(tv^{-1}s^{-1}r) h(r^{-1}w) e_j(t)\d \mu_G(t) \d \mu_G(v) \d \mu_G(r). \nonumber \end{align} Hence for almost all $t \in G$ \begin{align*} \MoveEqLeft \big(x_{ii}(h)\big)(w) \ov{\eqref{eval}}{=}\int_G \big(X(e_i \ot h)\big)(s,w) \ovl{e_i(s)} \d \mu_G(s) \\ &\ov{\eqref{Eq-10987}}{=} \int_G \int_G \int_G \int_G \phi(s,t) g(v) f(tv^{-1}s^{-1}r) h(r^{-1}w) \ovl{e_i(s)} e_j(t)\d \mu_G(t) \d \mu_G(v) \d \mu_G(r) \d \mu_G(s). \end{align*} So we obtain \[ x_{ii} = \int_G \int_G \int_G \int_G \phi(s,t) g(v) f(tv^{-1}s^{-1}r) \ovl{e_i(s)}e_j(t) \lambda_{r^{-1}} \d \mu_G(t) \d \mu_G(v) \d \mu_G(r) \d \mu_G(s) . \] which identifies to the convolution operator $\lambda(k)$ where $k$ is the function defined by \begin{equation} \label{Function-k} k(r) \ov{\mathrm{def}}{=} \int_G \int_G \int_G \phi(s,t) g(v) f(tv^{-1}s^{-1}r) \ovl{e_i(s)} e_j(t) \d \mu_G(t) \d \mu_G(v) \d \mu_G(s). \end{equation} We can easily evaluate the trace of the diagonal entry $x_{ii}$ which is an element in the space $\L^1(\VN(G)) \cap \VN(G)$. Indeed, replacing $v$ by $s^{-1}v$ in the second equality, we have \begin{align*} \MoveEqLeft \tau_G(x_{ii}) =k(e) \ov{\eqref{Function-k}}{=} \int_G \int_G \int_G \phi(s,t) g(v) f(tv^{-1}s^{-1}) \ovl{e_i(s)}e_j(t) \d \mu_G(t) \d \mu_G(v) \d \mu_G(s) \\ &= \int_G \int_G \int_G \phi(s,t) g(s^{-1}v) f(tv^{-1}) \ovl{e_i(s)} e_j(t) \d \mu_G(t) \d \mu_G(v) \d \mu_G(s) \\ &=\int_G \int_G \phi(s,t) \bigg(\int_G g(s^{-1}v) f(tv^{-1}) \d \mu_G(v)\bigg) \ovl{e_i(s)} e_j(t) \d \mu_G(t) \d \mu_G(s)\\ &\ov{\eqref{Formule-Plancherel}}{=} \int_G \int_G \phi(s,t) \tau_G\big[\lambda_s \lambda(g) \lambda_{t^{-1}} \lambda(f)\big] \ovl{e_i(s)} e_j(t) \d \mu_G(t) \d \mu_G(s). \end{align*} \end{proof} In a similar way to Lemma \ref{lem-referee-proof-step-1-calcul-du-symbole-coefficients-L2}, we have the following result. \begin{lemma} \label{lem-referee-proof-step-1-calcul-du-symbole-coefficients-L2-petite-extension} Let $G$ be a second-countable unimodular locally compact group. Let $\phi \in \L^2(G \times G)$ such that $K_\phi \in S^1_G$ and $x \in \L^1(\VN(G)) \cap \VN(G)$. Suppose that $1 \leq p \leq 2$. Let $T \co \L^p(\VN(G)) \to \L^2(\VN(G))$ be a completely bounded operator. Then $(\Id \ot T)(W(K_\phi \ot x)W^{-1})$ belongs to $\L^2(\cal{B}(\L^2(G)) \otvn \VN(G))$. If $g \in \C_c(G) * \C_c(G)$ we have for any integers $i,j$ and any $f \in \C_c(G)$ \begin{align} \MoveEqLeft \label{Lp-L2} (\tr_G \ot \tau_G)\big[(\Id \ot T)(W( K_\phi \ot \lambda(g) )W^{-1}) \cdot (e_{ij}^* \ot \lambda(f) )\big] \\ & =\int_G \int_G \phi(s,t) \tau_G\big[\lambda_s \lambda(g) \lambda_{t^{-1}} T^*(\lambda(f))\big] \ovl{e_i(s)}e_j(t) \d \mu_G(s) \d \mu_G(t). \nonumber \end{align} \end{lemma} \begin{proof} According to Lemma \ref{lem-referee-proof-step-1-calcul-du-symbole-coefficients-L2}, the element $W (K_\phi \ot \lambda(g)) W^{-1}$ belongs to the space $\L^1(\cal{B}(\L^2(G)) \otvn \VN(G)) \cap \big[\cal{B}(\L^2(G)) \otvn \VN(G)\big]$, hence to the Banach space $$ \L^p(\cal{B}(\L^2(G)) \otvn \VN(G)) = S^p_G(\L^p(\VN(G))). $$ By the complete boundedness of $T \co \L^p(\VN(G)) \to \L^2(\VN(G))$ and \cite[Lemma 1.7 p.~23]{Pis98}, we infer that the element $(\Id_{S^p_G} \ot T)(W (K_\phi \ot x)W^{-1})$ belongs to the space $S^p_G(\L^2(\VN(G)))$. Since $p \leq 2$, it belongs to the Banach space $S^2_G(\L^2(\VN(G))) = \L^2(\cal{B}(\L^2(G))\otvn \VN(G))$. We have immediately \begin{align*} \MoveEqLeft (\tr_G \ot \tau_G)\big[(\Id \ot T)(W( K_\phi \ot \lambda(g) )W^{-1}) \cdot (e_{ij}^* \ot \lambda(f))\big] \\ &= (\tr_G \ot \tau_G)\big[(W(K_\phi \ot \lambda(g))W^{-1}) \cdot (e_{ij}^* \ot T^* \lambda(f))\big]. \end{align*} Now, it suffices to show that \eqref{equ-referee-proof-step-1-calcul-du-symbole-coefficients-L2} holds for generic elements in $\L ^{p^*}(\VN(G))$ instead of $\lambda(f)$. This is indeed the case by density since both sides of \eqref{equ-referee-proof-step-1-calcul-du-symbole-coefficients-L2} are continuous as functions in $\lambda(f) \in \L^{p^*}(\VN(G))$. \end{proof} \begin{lemma} \label{lem-referee-proof-step-1-cacul-du-symbol-approximation} Let $G$ be a secound countable unimodular locally compact group. Suppose that $1 \leq p \leq 2$. Let $T \co \L^p(\VN(G)) \to \L^p(\VN(G))$ be a completely bounded map. There exists a sequence $(M_{\phi_n})$ of bounded Fourier multipliers $M_{\phi_n} \co \VN(G) \to \VN(G)$ such that $\phi_n \in \C_c(G)$, $\norm{\phi_n}_{\infty} \leq 1$, $M_{\phi_n} \co \L^p(\VN(G)) \to \L^2(\VN(G))$ is completely bounded, satisfying for any $g \in \C_c(G)$, any $\phi \in \C_c(G \times G)$ such that $K_\phi \in S^1_G$ and any sufficiently large $n$ $$ M_{\phi_n}(\lambda(g)) = \lambda(g) \quad \text{and} \quad (\Id \ot M_{\phi_n})(W (K_\phi \ot \lambda(g)) W^{-1}) = W (K_\phi \ot \lambda(g) ) W^{-1}. $$ \end{lemma} \begin{proof} Since the group $G$ is second-countable, we can consider a sequence $(K_n)$ of \textit{symmetric} compacts in $G$ such that for any compact $K$ of $G$, one has $K \subseteq K_n$ for sufficiently large enough $n$. By \cite[Proposition 2.3.2 p.~50]{KaL18}, for any $n$ there exists a function $\phi_n \co G \to \mathbb{C}$ which is a finite linear combination of continuous positive definite functions with compact support with $0 \leq \phi_n \leq 1$ and $\phi_n(s) = 1$ for any $s \in K_n$. By essentially \cite[Proposition 5.4.9 p.~184]{KaL18} each function induces a completely bounded Fourier multiplier $M_{\phi_n} \co \VN(G) \to \VN(G)$. Furthermore, since $\phi_n \in \L^2(G)$ each map $M_{\phi_n} \co \L^1(\VN(G)) \to \L^2(\VN(G))$ is completely bounded by \cite[Remark 2.4 p.~899]{GJP17} and duality. Now, it suffices to interpolate with $M_{\phi_n} \co \L^2(\VN(G)) \to \L^2(\VN(G))$ to obtain a completely bounded Fourier multiplier $M_{\phi_n} \co \L^p(\VN(G)) \to \L^2(\VN(G))$. Now, let $g \in \C_c(G)$ and $\phi \in \C_c(G \times G)$ such that $K_\phi \in S^1_G$. Consider the compact $K \ov{\mathrm{def}}{=} \supp g$ and some compacts $L_1,L_2$ of $G$ such that $\supp \phi \subseteq L_1 \times L_2$ and let $L \ov{\mathrm{def}}{=} L_1 \cdot K \cdot L_2^{-1}$, which is also compact. Then for any sufficiently large enough $n$ such that $K \subseteq K_n$, \[ M_{\phi_n}(\lambda(g)) = \lambda(\phi_n g) = \lambda(g). \] Moreover, consider some sufficiently large enough $n$ such that $L \subseteq K_n=\check{K}_n$. For any $s \in L_1$ and any $t \in L_2$, the element $\lambda_s \lambda(g) \lambda_{t^{-1}}$ has its Fourier support in $L_1 \cdot K \cdot L_2^{-1} = L$. Thus, $M_{\check{\phi}_n}(\lambda_s \lambda(g) \lambda_{t^{-1}}) = \lambda_s \lambda(g) \lambda_{t^{-1}}$. Then for any integers $i,j$ and any function $f \in \C_c(G)*\C_c(G)$ we have $M_{\phi_n}^*=M_{\check{\phi}_n}$. Hence \begin{align*} \MoveEqLeft (\tr_G \ot \tau_G)\big[(\Id \ot M_{\phi_n})(W( K_\phi \ot \lambda(g) )W^{-1}) \cdot (e_{ij}^* \ot \lambda(f))\big] \\ & \ov{\eqref{Lp-L2}}{=} \int_G \int_G \phi(s,t) \tau_G\big[\lambda_s \lambda(g) \lambda_{t^{-1}} M_{\phi_n}^*(\lambda(f))\big] \ovl{e_i(s)}e_j(t) \d \mu_G(s) \d \mu_G(t). \\ & = \int_{L_1} \int_{L_2} \phi(s,t) \tau_G\big[ M_{\check{\phi}_n}(\lambda_s \lambda(g) \lambda_{t^{-1}}) \lambda(f) \big] \ovl{e_i(s)} e_j(t) \d \mu_G(s)\d \mu_G(t) \\ & = \int_{L_1} \int_{L_2} \phi(s,t) \tau_G\big[ \lambda_s \lambda(g) \lambda_{t^{-1}} \lambda(f) \big] \ovl{e_i(s)} e_j(t) \d \mu_G(s) \d \mu_G(t) \\ & \ov{\eqref{equ-referee-proof-step-1-calcul-du-symbole-coefficients-L2}}{=} (\tr_G \ot \tau_G)\big[W( K_\phi \ot \lambda(g) )W^{-1} \cdot(e_{ij}^* \ot \lambda(f))\big]. \end{align*} By density of the $e_{ij}^* \ot \lambda(f)$'s, we infer that $$ (\Id \ot M_{\phi_n})(W( K_\phi \ot \lambda(g) )W^{-1}) = W( K_\phi \ot \lambda(g) )W^{-1}. $$ \end{proof} Recall that $(e_n)$ is an orthonormal basis of the Hilbert space $\L^2(G)$ such that each function $e_n$ is continuous with compact support. So the family $(\lambda(e_k))$ is an orthonormal basis of $\L^2(\VN(G))$ and $(e_{ij} \ot \lambda(e_k))_{i,j,k}$ is an orthonormal basis of the Hilbert space $\L^2(\cal{B}(\L^2(G)) \otvn \VN(G))$. \begin{prop} \label{prop-referee-proof-step-1-calcul-du-symbol-avec-coefficients-L2} Let $G$ be a second-countable unimodular locally compact group. Suppose that $1 \leq p \leq \infty$. Let $T \co \L^p(\VN(G)) \to \L^p(\VN(G))$ be a completely bounded operator (normal if $p = \infty$). Let $\phi,\psi \in \L^2(G \times G)$ such that $K_\phi,K_\psi \in S^1_G$, and $x,y \in \L^1(\VN(G)) \cap \VN(G)$. With the symbol \begin{equation} \label{Def-symbol-varphi-1} \varphi_{x,y,T}(s,t) \ov{\mathrm{def}}{=} \tau_G\big(\lambda_ty \lambda_{s^{-1}} T(\lambda_s x \lambda_{t^{-1}}) \big). \end{equation} we have \begin{equation} \label{MxyT-bis} \big\langle (\Id \ot T)(W (K_\phi \ot x)W^{-1}),W (K_\psi \ot y)W^{-1} \big\rangle_{S^p_G(\L^p(\VN(G))),S^{p^*}_G(\L^{p^*}(\VN(G)))} =\big\langle M_{\varphi_{x,y,T}}(K_\phi),K_\psi\big\rangle_{S^p_G, S^{p^*}_G}. \end{equation} Finally, if $p = \infty$, the same holds for any $x \in \VN(G)$ and if $p = 1$, the same holds for any $y \in \VN(G)$. \end{prop} \begin{proof} Note first that by a simple duality argument, we can suppose that $1 \leq p \leq 2$ and that the functions $\phi$ and $\psi$ belong to the space $\C_c(G \times G)$. We start with the case where the operator $T$ also induces a completely bounded map $T \co \L^p(\VN(G)) \to \L^2(\VN(G))$. Then by Lemma \ref{lem-referee-proof-step-1-calcul-du-symbole-coefficients-L2} and Lemma \ref{lem-referee-proof-step-1-calcul-du-symbole-coefficients-L2-petite-extension}, the elements $W (K_\psi \ot y)W^{-1}$ and $(\Id \ot T)(W (K_\phi \ot x)W^{-1})$ belong to the Hilbert space $\L^2(\cal{B}(\L^2(G)) \otvn \VN(G))$. So the left-hand side of \eqref{equ-prop-referee-proof-step-1-calcul-du-symbol-avec-coefficients-L2} is well-defined, and can be calculated with Parseval's formula and the orthonormal basis $(e_{ij} \ot \lambda(e_k))_{i,j,k}$. With Lemma \ref{lem-referee-proof-step-1-calcul-du-symbole-coefficients-L2-petite-extension}, we get \begin{align*} \MoveEqLeft \big\langle (\Id \ot T)(W (K_\phi \ot x)W^{-1}),W (K_\psi \ot y)W^{-1} \big\rangle_{S^2_G(\L^2(\VN(G)))} \\ & = \sum_{i,j,k} (\tr_G \ot \tau_G)\big[(\Id \ot T)(W(K_\phi \ot x)W^{-1}) (e_{ij}^* \ot \lambda(e_k^*))\big] \\ &\times \ovl{(\tr_G \ot \tau_G)\big[W(K_\psi \ot y)W^{-1} (e_{ij}^* \ot \lambda(e_k^*))\big]} \\ &\ov{\eqref{Lp-L2}\eqref{equ-referee-proof-step-1-calcul-du-symbole-coefficients-L2} }{=} \sum_{i,j,k} \int_G \int_G \phi(s,t) \tau_G\big[ \lambda_s x \lambda_{t^{-1}}T^*(\lambda(e_k^*)) \big] \ovl{e_i(s)} e_j(t) \d \mu_G(s) \d \mu_G(t) \\ & \times \ovl{\int_G \int_G \psi(s,t) \tau_G\big[\lambda_{s} y \lambda_{t^{-1}} \lambda(e_k^*) \big] \ovl{e_i(s)} e_j(t) \d \mu_G(s)\d \mu_G(t)}. \end{align*} Since the functions $(s,t) \mapsto \tau_G\big[ \lambda_s x \lambda_{t^{-1}}T^*(\lambda(e_k^*)) \big] \phi(s,t)$ and $(s,t) \mapsto \tau_G\big[\lambda_{s} y \lambda_{t^{-1}} \lambda(e_k^*) \big] \psi(s,t)$ belong to the Hilbert space $\L^2(G \times G)$, we can use the orthonormal basis $(\ovl{e_i} \ot e_j)_{i,j}$ of the space $\L^2(G \times G)$ and reduce in the previous expression the sum over $i,j$ and the integral over $s,t$, which then becomes \[ \sum_k \int_G \int_G \phi(s,t) \tau_G\big[ \lambda_s x \lambda_{t^{-1}}T^*(\lambda(e_k^*)) \big] \ovl{\psi(s,t) \tau_G\big[\lambda_{s} y \lambda_{t^{-1}} \lambda(e_k^*) \big] } \d \mu_G(s) \d\mu_G(t). \] Recall that $T^{*\circ} \ov{\eqref{2dual4}}{=} T^{\dag}$ where $T^{\dag}$ is the hilbertian adjoint. We fix $s,t \in G$ and calculate using Parseval's identity in the fourth equality \begin{align*} \MoveEqLeft \sum_k \tau_G\big[ \lambda_s x \lambda_{t^{-1}}T^*(\lambda(e_k^*)) \big] \ovl{\tau_G\big[\lambda_{s} y \lambda_{t^{-1}} \lambda(e_k^*) \big]} = \sum_k \big\langle \lambda_s x \lambda_{t^{-1}},(T^*(\lambda(e_k)^*))^* \big\rangle_{\L^2} \ovl{\big\langle \lambda_{s} y \lambda_{t^{-1}}, \lambda(e_k)\big\rangle}_{\L^2} \\ &=\sum_k \big\langle \lambda_s x \lambda_{t^{-1}},T^{\dag}(\lambda(e_k)) \big\rangle_{\L^2} \ovl{\big\langle \lambda_{s} y \lambda_{t^{-1}}, \lambda(e_k)\big\rangle_{\L^2}} =\sum_k \big\langle T(\lambda_s x \lambda_{t^{-1}}),\lambda(e_k) \big\rangle \ \ovl{\big\langle \lambda_{s} y \lambda_{t^{-1}}, \lambda(e_k)\big\rangle} \\ &=\big\langle T(\lambda_s x \lambda_{t^{-1}}), \lambda_s y \lambda_{t^{-1}} \big\rangle_{\L^2(\VN(G))} =\tau_G\big(\lambda_t y^* \lambda_{s^{-1}} T(\lambda_s x \lambda_{t^{-1}})\big). \end{align*} Note that $\norm{T(\lambda_s x \lambda_{t^{-1}})}_2 \leq C$ and $\norm{\lambda_s y \lambda_{t^{-1}}}_2 \leq C$ justifies that we can integrate over $s$ and $t$. Consequently, we obtain \begin{align*} \MoveEqLeft \big\langle (\Id \ot T)(W (K_\phi \ot x)W^{-1}),W (K_\psi \ot y)W^{-1} \big\rangle_{S^2_G(\L^2(\VN(G))} \\ &=\int_{G \times G} \tau_G\big(\lambda_t y^* \lambda_{s^{-1}} T(\lambda_s x \lambda_{t^{-1}})\big)\phi(s,t) \ovl{\psi(s,t)} \d \mu_G(s) \d\mu_G(t) \\ &\ov{\eqref{dual-trace}}{=} \tr(K_{\varphi_{x,y,T}\phi} K_\psi^*) =\big\langle M_{\varphi_{x,y,T}}(K_\phi), K_\psi \big\rangle_{S^2_G}. \end{align*} Thus we have shown the formula \begin{align} \MoveEqLeft \label{equ-prop-referee-proof-step-1-calcul-du-symbol-avec-coefficients-L2} \big\langle (\Id \ot T)(W (K_\phi \ot x)W^{-1}),W (K_\psi \ot y)W^{-1} \big\rangle_{S^2_G(\L^2(\VN(G)))} =\big\langle M_{\varphi_{x,y,T}}(K_\phi), K_\psi \big\rangle_{S^2_G}, \end{align} under the additional assumption that the linear map $T$ defines a completely bounded operator $\L^p(\VN(G)) \to \L^2(\VN(G))$. Replacing both brackets, this formula translates to \eqref{MxyT-bis}. For the general case, we use the first part of Lemma \ref{lem-referee-proof-step-1-cacul-du-symbol-approximation} with the approximation sequence $(M_{\phi_n})$ of Fourier multipliers $M_{\phi_n} \co \VN(G) \to \VN(G)$. By a density argument, we can assume that $y \in \lambda(\C_c(G))$. According to Lemma \ref{lem-referee-proof-step-1-cacul-du-symbol-approximation}, the composition $M_{\phi_n} T \co \L^p(\VN(G)) \to \L^2(\VN(G))$ is completely bounded. So the first part of the proof applies to this operator. We obtain \[ \big\langle (\Id \ot M_{\phi_n}T)(W(K_\phi \ot x)W^{-1}),W(K_\psi \ot y)W^{-1} \big\rangle =\big\langle M_{\varphi_n}(K_\phi), K_\psi \big\rangle_{S^2_G}, \] with $\varphi_n(s,t) \ov{\mathrm{def}}{=} \tau_G\big(\lambda_ty^* \lambda_{s^{-1}} M_{\phi_n}T(\lambda_s x \lambda_{t^{-1}}) \big)$. By the approximation from Lemma \ref{lem-referee-proof-step-1-cacul-du-symbol-approximation}, we have \begin{align*} \MoveEqLeft \big\langle (\Id \ot M_{\phi_n}T)(W(K_\phi \ot x)W^{-1}),W(K_\psi \ot y)W^{-1} \big\rangle \\ &= \big\langle (\Id \ot T)(W(K_\phi \ot x)W^{-1}, (\Id \ot M_{\phi_n}^*) (W(K_\psi \ot y)W^{-1} \big\rangle \\ & \xra[n \to \infty]{} \big\langle (\Id \ot T)(W(K_\phi \ot x)W^{-1}),W(K_\psi \ot y)W^{-1} \big\rangle. \end{align*} Again with Lemma \ref{lem-referee-proof-step-1-cacul-du-symbol-approximation}, we have \begin{align*} \MoveEqLeft \varphi_n(s,t) =\big\langle M_{\phi_n} T(\lambda_s x \lambda_{t^{-1}}) , \lambda_s y \lambda_{t^{-1}} \big\rangle \\ & = \big\langle T(\lambda_s x \lambda_{t^{-1}}) , M_{\phi_n}^*(\lambda_s y \lambda_{t^{-1}}) \big\rangle \xra[n \to \infty]{} \big\langle T(\lambda_s x \lambda_{t^{-1}}) , \lambda_s y \lambda_{t^{-1}} \big\rangle \ov{\eqref{Def-symbol-varphi-1}}{=} \varphi_{x,y,T}(s,t), \end{align*} pointwise in $s,t \in G$. Even stronger, if $s,t$ vary in given compacts, $\varphi_n(s,t) = \varphi(s,t)$ for $n$ sufficiently large. Since the functions $\phi$ and $\psi$ are assumed to belong to the space $\C_c(G\times G)$, we obtain then $\langle M_{\varphi_n}(K_\phi), K_\psi \rangle = \langle M_\varphi(K_\phi), K_\psi \rangle$ if $n$ sufficiently large. In summary, we have established the formula \[ \big\langle (\Id \ot T)(W(K_\phi \ot x)W^{-1}),W(K_\psi \ot y)W^{-1} \big\rangle = \langle M_{\varphi_{x,y,T}}(K_\phi), K_\psi \rangle. \] \end{proof} \begin{lemma} \label{Lemma-estimation-cb} Suppose that $1 \leq p \leq \infty$. Let $T \co \L^p(\VN(G)) \to \L^p(\VN(G))$ be a completely bounded operator (weak* continuous if $p = \infty$). For any elements $x$ and $y$ in the space $\L^1(\VN(G)) \cap \VN(G)$, we have the estimate \begin{equation} \label{div-987} \norm{M_{\varphi_{x,y,T}}}_{\cb,S^p_G \to S^p_G} \leq\norm{T}_{\cb, \L^p(\VN(G)) \to \L^p(\VN(G))} \norm{x}_{\L^p(\VN(G))} \norm{y}_{\L^{p^*}(\VN(G))}, \end{equation} with the usual convention if $p=1$ or $p=\infty$. If $p=\infty$ (resp. $p = 1$), we can also take $x \in \VN(G)$ (resp. $y \in \VN(G)$). Moreover, if the the linear map $T$ is completely positive then the Schur multiplier $M_{\varphi_{x,y,T}}$ is also completely positive. \end{lemma} \begin{proof} By \cite[Definition 2.1]{Pis95}, the duality \cite[Theorem 4.7 p.~49]{Pis98} and Plancherel formula \eqref{Formule-Plancherel}, we have according to Proposition \ref{prop-referee-proof-step-1-calcul-du-symbol-avec-coefficients-L2}, \begin{align*} \MoveEqLeft \norm{M_{\varphi_{x,y,T}}}_{\cb,S^p_G \to S^p_G} \\ &\leq \sup \left \{ \left| \sum_{ij} \big\langle (\Id \ot T)(W(K_{\phi_{ij}} \ot x)W^{-1}), W (K_{\psi_{ij}} \ot y) W^{-1} \big\rangle \right| \right. :\norm{[K_{\phi_{ij}}]}_p,\\ & \bigg. \norm{[K_{\psi_{ij}}]}_{p^*} \leq 1 \Bigg\} \\ & \leq \norm{\Id \ot \Id \ot T}_{\cal{B}(S^p(S^p_G(\L^p(\VN(G)))))} \norm{W}_\infty \norm{x}_p\norm{W^{-1}}_\infty \norm{W}_\infty \norm{y}_{p^*} \norm{W^{-1}}_\infty \\ & \leq \norm{T}_{\cb, \L^p(\VN(G)) \to \L^p(\VN(G))} \norm{x}_{\L^p(\VN(G))} \norm{y}_{\L^{p^*}(\VN(G))}. \end{align*} If the linear map $T \co \L^p(\VN(G)) \to \L^p(\VN(G))$ is completely positive (and weak* continuous if $p = \infty$), then for any positive elements $[K_{\phi_{ij}}]$ and $[K_{\psi_{ij}}]$ we have \[ \sum_{ij} \big\langle M_{x,y,T} K_{\phi_{ij}}, K_{\psi_{ij}} \big\rangle = \sum_{ij} \big\langle (\Id \ot T)(W(K_{\phi_{ij}} \ot x)W^{-1}), W (K_{\psi_{ij}} \ot y) W^{-1} \big\rangle \geq 0 . \] Indeed, the map $\Id \ot \Id \ot T$ preserves the positivity and $[W(K_{\phi_{ij}} \ot x)W^{-1}]$ and $[W (K_{\psi_{ij}} \ot y) W^{-1}]$ are positive. We infer by \cite[Lemma 2.6 p.~13]{ArK23} that $[M_{\varphi_{x,y,T}} K_{\phi_{ij}}]$ is positive, hence the map $M_{\varphi_{x,y,T}}$ is completely positive. \end{proof} \subsection{Step 2: the symbol of $P_j(T)$ is Herz-Schur when $G$ is inner amenable} \label{Sec-Herz-Schur} In the following result, we show that if the group $G$ is inner amenable, we are able to make appear Herz-Schur multipliers. Recall that $\varphi_{x,y,T}$ is defined in \eqref{Def-symbol-varphi-1}. \begin{lemma} \label{lem-SAIN-Herz-Schur} Let $G$ be a second-countable unimodular inner amenable locally compact group. Let $F$ be a finite subset of $G$ and let $(V_j^F)_j$ be a sequence of subsets of $G$ satisfying the last point of Theorem \ref{thm-inner-amenable-Folner}. Consider a weak* continuous completely bounded map $T \co \VN(G) \to \VN(G)$. With the notation \eqref{Def-symbol-varphi-1}, we let \begin{equation} \label{Def-ds-inner} y_j^F \ov{\mathrm{def}}{=} c_j^F |\lambda(1_{V_j^F})|^2 \quad \text{and} \quad \phi_{j,T}^F \ov{\mathrm{def}}{=} \varphi_{1,y_j^F,T}, \end{equation} where $c_j^F > 0$ is the normalisation to have $\norm{y_j^F}_{\L^1(\VN(G))} = 1$. Then any weak* cluster point $\phi_T^F$ of the sequence $(\phi_{j,T}^F)_j$ satisfies \[ \phi_{T}^F(sr,tr) = \phi_{T}^F(s,t), \quad s,t \in G, \: r \in F. \] Moreover, any weak* cluster point of such $(\phi_T^F)_F$, where the finite subsets $F$ of $G$ are directed by inclusion, is also a Herz-Schur symbol. \end{lemma} \begin{proof} For any $s,t \in G$ and any $r \in F$, we have \begin{align*} \MoveEqLeft \phi_{j,T}^F(sr,tr) - \phi_{j,T}^F(s,t) \ov{\eqref{Def-ds-inner}}{=} \varphi_{1,y_j^F,T}(sr,tr)-\varphi_{1,y_j^F,T}(s,t) \\ &\ov{\eqref{Def-symbol-varphi-1}}{=}\tau_G\big(\lambda_{tr}y_j^F \lambda_{(sr)^{-1}} T(\lambda_{sr} \lambda_{(tr)^{-1}}) \big)-\tau_G\big(\lambda_{t}y_j^F \lambda_{s^{-1}} T(\lambda_s \lambda_{t^{-1}}) \big)\\ &=\tau_G\big(\lambda_t \lambda_r y_j^F \lambda_{r^{-1}} \lambda_{s^{-1}} T(\lambda_{st^{-1}})\big) - \tau_G\big(\lambda_t y_j^F \lambda_{s^{-1}} T(\lambda_{st^{-1}})\big) \\ &= \tau_G\left(\lambda_t (\lambda_r y_j^F \lambda_{r^{-1}} - y_j^F) \lambda_{s^{-1}} T(\lambda_{st^{-1}}) \right). \end{align*} If we can show that \begin{equation} \label{equ-1-proof-lemma-SAIN-Herz-Schur} \norm{\lambda_r y_j^F \lambda_{r^{-1}} - y_j^F}_{\L^1(\VN(G))} \xra[j \to \infty]{} 0, \end{equation} then we will obtain the pointwise convergence $\phi_{j,T}^F(sr,tr) - \phi_{j,T}^F(s,t) \to 0$ as $j \to \infty$, for fixed $s,t \in G$ and $r \in F$. Since $\phi_{j,T}^F(sr,tr) - \phi_{j,T}^F(s,t)$ is uniformly bounded in the Banach space $\L^\infty(G \times G)$, by dominated convergence, it follows that this sequence converges for the weak* topology to $0$ in the space $\L^\infty(G \times G)$. Thus, if $\phi_T^F$ is a cluster point of $(\phi_{j,T}^F)_{j}$, it is easy to check by a $\frac{\epsi}{3}$-argument, using the weak* continuity of translations on $\L^\infty$, that $\phi_T^F(sr,tr) = \phi_T^F(s,t)$ for any $s,t \in G$ and $r \in F$. It remains to show \eqref{equ-1-proof-lemma-SAIN-Herz-Schur}. First, for any $j$ we have \begin{equation} \label{equal-cjF} (c_j^F)^{-1} = \bnorm{|\lambda(1_{V_j^F})|^2}_{1} = \bnorm{\lambda(1_{V_j^F})}_{\L^2(\VN(G))}^2 = \bnorm{1_{V_j^F}}_{\L^2(G)}^2 = \mu\big(V_j^F\big). \end{equation} Now, observe by unimodularity in the second equality \begin{align*} \MoveEqLeft \bnorm{\lambda_r |\lambda(1_{V_j^F})|^2 \lambda_{r^{-1}} - |\lambda(1_{V_j^F})|^2 }_1 \ov{\eqref{composition-et-lambda}}{=} \bnorm{\lambda_r \lambda\big(1_{V_j^F} \ast 1_{V_j^F}\big) \lambda_{r^{-1}} - \lambda\big(1_{V_j^F} \ast 1_{V_j^F}\big) }_1 \\ &=\bnorm{\lambda\big( 1_{V_j^F} \ast 1_{V_j^F}(r^{-1} (\cdot) r) - 1_{V_j^F} \ast 1_{V_j^F} \big)}_1 \\ &\leq \bnorm{\lambda\big( 1_{V_j^F} \ast 1_{V_j^F}(r^{-1} (\cdot) r)-1_{V_j^F} \ast 1_{rV_j^Fr^{-1}}+1_{V_j^F} \ast 1_{rV_j^Fr^{-1}}- 1_{V_j^F} \ast 1_{V_j^F} \big)}_1 \\ &\leq \bnorm{\lambda\big(1_{V_j^F} \ast 1_{rV_j^Fr^{-1}} - 1_{V_j^F} \ast 1_{V_j^F}(r^{-1}(\cdot)r)\big)}_1+\bnorm{\lambda\big(1_{V_j^F} \ast (1_{rV_j^Fr^{-1}} - 1_{V_j^F})\big)}_1. \end{align*} We estimate the second summand with unimodularity by \begin{align*} \MoveEqLeft \bnorm{1_{V_j^F}}_{\L^2(G)} \bnorm{1_{rV_j^Fr^{-1}} - 1_{V_j^F}}_{\L^2(G)} \ov{\eqref{Indicator-formula}}{=} \mu\big(V_j^F\big)^{\frac12} \mu\big(rV_j^Fr^{-1} \Delta V_j^F\big)^{\frac12}. \end{align*} Now, we manipulate the first summand. By remplacing $t$ by $tr^{-1}$ and $t$ by $r^{-1}t$ in the fourth equality, we obtain \begin{align} \MoveEqLeft \label{Infinite-34} 1_{V_j^F} \ast 1_{rV_j^Fr^{-1}}(s) - 1_{V_j^F} \ast 1_{V_j^F}(r^{-1}sr) \\ &\ov{\eqref{Convolution-formulas}}{=} \int_G 1_{V_j^F}(t) 1_{rV_j^Fr^{-1}}(t^{-1}s) - 1_{V_j^F}(t) 1_{V_j^F}(t^{-1}r^{-1}sr) \d\mu_G(t) \nonumber\\ &=\int_G 1_{V_j^F}(t) 1_{V_j^Fr^{-1}}(r^{-1}t^{-1}s)\d \mu_G(t) -\int_G 1_{V_j^F}(t) 1_{V_j^Fr^{-1}}(t^{-1}r^{-1}s) \d \mu_G(t) \nonumber \\ &= \int_G 1_{V_j^F}(tr^{-1}) 1_{V_j^Fr^{-1}}(t^{-1}s) \d\mu_G(t)-\int_G 1_{V_j^F}(r^{-1}t) 1_{V_j^Fr^{-1}}(t^{-1}s) \d\mu_G(t) \nonumber\\ &= \int_G \big(1_{V_j^Fr} - 1_{r V_j^F}\big)(t) 1_{V_j^Fr^{-1}}(t^{-1}s) \d\mu_G(t) \nonumber \ov{\eqref{Convolution-formulas}}{=} \big(1_{V_j^Fr} - 1_{r V_j^F}\big) \ast 1_{V_j^F r^{-1}}(s).\nonumber \end{align} Using the invariance of $\mu_G$, we therefore obtain the following estimate for the first summand: \begin{align*} \MoveEqLeft \bnorm{\lambda\big(1_{V_j^F} \ast 1_{rV_j^Fr^{-1}} - 1_{V_j^F} \ast 1_{V_j^F}(r^{-1}(\cdot)r)\big)}_1 \ov{\eqref{Infinite-34}}{\leq} \bnorm{1_{V_j^Fr} - 1_{rV_j^F}}_{\L^2(G)} \bnorm{1_{V_j^F r^{-1}}}_{\L^2(G)} \\ & \ov{\eqref{Indicator-formula}}{=} \mu\big(V_j^Fr \Delta r V_j^F\big)^{\frac12} \mu\big(V_j^Fr^{-1}\big)^{\frac12} = \mu\big(V_j^F \Delta r V_j^F r^{-1}\big)^{\frac12} \mu\big(V_j^F\big)^{\frac12}. \end{align*} Combining the two estimates, we obtain \begin{align*} \MoveEqLeft \norm{\lambda_r y_j \lambda_r^{-1} - y_j}_1 \leq 2 c_j^F \mu\big(V_j^F \Delta r V_j^F r^{-1}\big)^{\frac12} \mu\big(V_j^F\big)^{\frac12} \\ &\ov{\eqref{equal-cjF}}{=} 2 \left[ \frac{\mu(V_j^F \Delta r V_j^F r^{-1})}{\mu(V_j^F)} \right]^{\frac12} \xra[j]{\eqref{Inner-Folner}} 0, \end{align*} according to the inner amenability assumption. Now, for any finite subset $F$ of the group $G$, we fix a weak* cluster point $\phi_T^F$ of the net $(\phi_{j,T}^F)_j$. Let $\phi_T$ be a weak* cluster point of $(\phi_T^F)_F$. Then for any function $f \in \L^1(G \times G)$ and any $r \in G$, the function $f(\cdot\, r^{-1},\cdot\, r^{-1})$ belongs to the space $\L^1(G \times G)$. Moreover, using unimodularity in the first and third steps and the first part of the proof valid for $F$ containing $\{r\}$ in the forth step, we obtain \begin{align*} \MoveEqLeft \big\langle \phi_T(\cdot\, r, \cdot\, r) , f \big\rangle_{\L^\infty(G \times G),\L^1(G \times G)} = \big\langle \phi_T, f(\cdot\, r^{-1}, \cdot\, r^{-1}) \big\rangle_{\L^\infty,\L^1} = \lim_{F \to \infty} \big\langle \phi_T^F, f(\cdot\, r^{-1}, \cdot\, r^{-1}) \big\rangle_{\L^\infty,\L^1} \\ & = \lim_{F \to \infty} \big\langle \phi_T^F(\cdot\, r, \cdot\, r), f \big\rangle_{\L^\infty,\L^1} = \lim_{F \to \infty} \big\langle \phi_T^F, f \big\rangle_{\L^\infty,\L^1} = \langle \phi_T, f \rangle_{\L^\infty(G \times G),\L^1(G \times G)}. \end{align*} We deduce that the function $\phi_T$ is a Herz-Schur symbol. \end{proof} \subsection{Step 2: the symbol of $P_j(T)$ for a Fourier multiplier $T$ if $p=\infty$ or $p=1$} \label{Section-p=1-p-infty} We start with the case $p=\infty$. Let $T = M_\phi \co \VN(G) \to \VN(G)$ be a completely bounded Fourier multiplier. If $x \in \VN(G)$ and $y \in \L^1(\VN(G)) \cap \VN(G)$, recall that the symbol $\varphi_{x,y,T}$ is defined in \eqref{Def-symbol-varphi-1}. \begin{lemma} \label{lemma-symbol-step-1-p=infty} Let $G$ be a second-countable unimodular locally compact group. Consider a completely bounded Fourier multiplier $T = M_\phi \co \VN(G) \to \VN(G)$. Let $y$ be a positive element in the space $\L^1(\VN(G)) \cap \VN(G)$ such that $\tau_G(y) = 1$. We have \begin{equation} \label{} \varphi_{1,y,T}(s,t) =\phi(st^{-1}), \quad s,t \in G. \end{equation} \end{lemma} \begin{proof} For any $s,t \in G$, we have \begin{align*} \MoveEqLeft \varphi_{1,y,T}(s,t) \ov{\eqref{Def-symbol-varphi-1}}{=} \tau_G(y \lambda_{s^{-1}} M_\phi(\lambda_s \lambda_{t^{-1}}) \lambda_t) =\phi(st^{-1})\tau_G(\lambda_ty \lambda_{s^{-1}} \lambda_s \lambda_{t^{-1}} ) \\ &=\phi(st^{-1})\tau_G(y) =\phi(st^{-1}). \end{align*} \end{proof} \begin{example} \normalfont \label{example-p=infy} Let $g$ be a continuous functions with compact support on $G$ with $\norm{g}_{\L^2(G)} = 1$. With $y \ov{\mathrm{def}}{=} \lambda(g^**g)$, the assumptions of Lemma \ref{lemma-symbol-step-1-p=infty} are satisfied by \eqref{composition-et-lambda} and since $ \tau_G(y) \ov{\eqref{composition-et-lambda}}{=} \tau_G(\lambda(g)^*\lambda(g)) \ov{\eqref{Formule-Plancherel}}{=} \norm{g}_{\L^2(G)}^2 =1$. \end{example} We continue with the case $p=1$. We can prove the following similar result. \begin{lemma} \label{lemma-symbol-step-1-p=1} Let $G$ be a second-countable unimodular locally compact group. Consider a completely bounded Fourier multiplier $T = M_\phi \co \L^1(\VN(G)) \to \L^1(\VN(G))$. Let $x$ be a positive element in the space $\L^1(\VN(G)) \cap \VN(G)$ such that $\tau_G(x) = 1$. We have \begin{equation} \label{} \varphi_{x,1,T}(s,t) =\phi(st^{-1}), \quad s,t \in G. \end{equation} \end{lemma} \begin{proof} For any $s,t \in G$, we have \begin{align*} \MoveEqLeft \varphi_{x,1,T}(s,t) \ov{\eqref{Def-symbol-varphi-1}}{=} \tau_G\big(\lambda_t\lambda_{s^{-1}} M_\phi(\lambda_s x\lambda_{t^{-1}}) \big) =\tau_G\big(\lambda_t\lambda_{s^{-1}} M_\phi(\lambda_s x\lambda_{t^{-1}}) \big) \\ &=\tau_G\big( M_{\check{\phi}}(\lambda_{ts^{-1}}) \lambda_s x\lambda_{t^{-1}}) \big) =\phi(st^{-1})\tau_G\big(\lambda_{ts^{-1}} \lambda_s x\lambda_{t^{-1}}) \big) =\phi(st^{-1})\tau_G(x) =\phi(st^{-1}). \end{align*} \end{proof} \subsection{Step 2: convergence of the symbols for a multiplier $T$ with arbitrary symbol} \label{Sec-convergence-continuous} We show that for a suitable choice of sequences of functions, we obtain the convergence of symbols to the desired Herz-Schur symbol. \begin{prop} \label{th-convergence} Let $G$ be a second-countable unimodular locally compact group. Suppose that $1 \leq p \leq \infty$. Consider some completely bounded Fourier multiplier $T = M_\phi \co \L^p(\VN(G)) \to \L^p(\VN(G))$. Let $(f_j)$ and $(g_j)$ be nets of positive functions with compact support belonging to the space $\C_e(G)$ such that if $x_j \ov{\mathrm{def}}{=} \lambda(f_j)$, $y_j \ov{\mathrm{def}}{=} \lambda(g_j)$ we have \begin{itemize} \item $\norm{x_j}_{\L^p(\VN(G))} \norm{y_j}_{\L^{p^*}(\VN(G))} \leq C$ for all $j$ for some positive constant $C$, \item $\tau_G(x_j y_j) = 1$ for all $j$, \item $\supp f_j \to \{e\}$ or $\supp g_j \to \{e\}$. \end{itemize} Moreover, let \begin{equation} \label{def-symbol-phi-alpha} \phi_{j,T}(s,t) \ov{\mathrm{def}}{=} \varphi_{x_j,y_j,T}(s,t) \ov{\eqref{Def-symbol-varphi-1}}{=} \tau_G \big(\lambda_ty_j \lambda_{s^{-1}} T(\lambda_s x_j \lambda_{t^{-1}}) \big), \quad s,t \in G. \end{equation} Then the sequence $(\phi_{j,T})_j$ of elements in the space $\L^\infty(G \times G)$ converges for the weak* topology to the function $\phi^\HS \co (s,t) \mapsto \phi(st^{-1})$. \end{prop} \begin{proof} For any $j$ and almost all $s,t \in G$, we have using a change of variables in the last equality \begin{align} \MoveEqLeft \label{Calcul-symbole} \phi_{j,T}(s,t) \ov{\eqref{def-symbol-phi-alpha}}{=} \tau_G \big(y_j \lambda_{s^{-1}} T(\lambda_s x_j \lambda_{t^{-1}}) \lambda_t\big) =\tau_G \big(\lambda_t\lambda(g_j) \lambda_{s^{-1}} T(\lambda_s \lambda(f_j) \lambda_{t^{-1}}) \big)\\ &=\tau_G \big(\lambda(g_j(t^{-1}\cdot s)) M_\phi(\lambda(f_j(s^{-1}\cdot t) \big) =\tau_G \big(\lambda(g_j(t^{-1}\cdot s)) \lambda\big(\phi f_j(s^{-1}\cdot t) \big) \nonumber\\ &\ov{\eqref{Formule-Plancherel}}{=} \int_G g_j(t^{-1}u^{-1} s)) \phi(u) f_j(s^{-1}u t) \d\mu_G(u) =\int_G \phi(sut^{-1}) g_j(u^{-1})f_j(u) \d\mu_G(u). \nonumber \end{align} By Lemma \ref{Lemma-estimation-cb} and \eqref{symbol-phixyT}, we deduce that $\bnorm{M_{\phi_{j,T}}}_{\cb,S^p_G \to S^p_G} \leq C$ for any $j$. Using the inequality \eqref{ine-infty}, we see that the net $(\phi_{j,T})_j$ of functions is uniformly bounded in the Banach space $\L^\infty(G \times G)$. Thus to check the claimed weak* convergence, it suffices by \cite[Proposition 1.21 p.~8]{Dou98} to test against a function $h \in \C_c(G \times G)$. We suppose that $\supp f_j \to \{e\}$. Since $\int_G \check{g}_jf_j \d \mu_G \ov{\eqref{Formule-Plancherel}}{=} \tau\big(\lambda(g_j)\lambda(f_j)\big) =\tau(x_jy_j)=1$, we have, using unimodularity in a change of variables, and with the notation $K_j = \supp h \cup \{(s,t):\: \exists \: u \in \supp f_j:\: (su^{-1},t) \in \supp h\}$ \begin{align*} \MoveEqLeft \left|\int_{G \times G} \big(\phi_{j,T}(s,t) -\phi^\HS(s,t)\big) h(s,t) \d\mu_G(s)\d\mu_G(t)\right| \\ & \ov{\eqref{Calcul-symbole}}{=} \left|\int_G \int_G \int_G \big(\phi(sut^{-1}) -\phi(st^{-1})\big)\check{g}_j(u)f_j(u) h(s,t) \d\mu_G(u)\d\mu_G(s)\d\mu_G(t)\right| \\ &=\left|\int_G \int_G \int_G \phi(st^{-1}) \check{g}_j(u)f_j(u) \big( h(su^{-1},t) - h(s,t) \big) \d\mu_G(u) \d\mu_G(s) \d\mu_G(t) \right| \\ &\leq \sup \left\{ |h(su^{-1},t) - h(s,t)| : \:u \in \supp f_j,(s,t) \in \supp h, (su^{-1},t) \in \supp h \right\} \cdot \\ & \cdot \int_{K_j} \int_G |\phi(st^{-1})| f_j(u)\check{g}_j(u)\d\mu_G(u)\d\mu_G(s)\d\mu_G(t) \\ & = \sup \left\{ |h(su^{-1},t) - h(s,t)| : \:u \in \supp f_j,(s,t) \in \supp h, (su^{-1},t) \in \supp h \right\} \cdot \\ & \cdot \int_{K_j} |\phi(st^{-1})| \d\mu_G(s)\d\mu_G(t) \xra[j]{} 0, \end{align*} since $K_j$ is contained in a fixed compact, so that the last integral is uniformly bounded in $j$, and $h$ was supposed to be continuous. We can use a similar reasoning if $\supp g_j \to \{e\}$. \end{proof} \begin{example} \normalfont \label{Essai} Let $G$ be a second-countable unimodular locally compact group. Consider some value $p \in (1,\infty)$ and assume that $\frac{p}{p^*}$ is rational. That is, $p = \frac{p}{p^*} + 1$ is rational, which implies that both $\frac{1}{p}$ and $\frac{1}{p^*}$ are also rational. Therefore, there exist integers $l,m,n \geq 1$ such that $\frac{1}{p}=\frac{m}{n}$ and $\frac{1}{p^*}=\frac{l}{n}$. Consequently, $\frac{n}{p}=m$ and $\frac{n}{p^*}=l$ are integers. Consider a sequence $(k_j)$ of positive functions belonging to the space $\C_c(G)$ with $\supp k_j \to \{e\}$. For each integer $j$, we define the function $h_j \ov{\mathrm{def}}{=} k_j^* \ast k_j$. We can suppose that $\norm{\lambda(h_j)}_{\L^{n}(\VN(G))} = 1$. We let \begin{equation} \label{xj-yj} x_j \ov{\mathrm{def}}{=} (\lambda(h_j))^m \quad \text{and} \quad y_j \ov{\mathrm{def}}{=} (\lambda(h_j))^l. \end{equation} Note that by \eqref{composition-et-lambda} these elements belong to $\mathfrak{m}_{\tau_G}$, as defined in \eqref{Def-mtauG}. Then the sequences $(x_j)$ and $(y_j)$ satisfy the assumptions of Proposition \ref{th-convergence}. Indeed, the $x_j$ and $y_j$ are positive and we have $$ \norm{x_j}_p \ov{\eqref{xj-yj}}{=}\bnorm{(\lambda(h_j))^m}_p = \bnorm{\lambda(h_j)^{\frac{n}{p}}}_p =\norm{\lambda(h_j)}_{n}^{\frac{n}{p}} = 1 $$ and similarly $$ \norm{y_j}_{p^*} \ov{\eqref{xj-yj}}{=} \bnorm{(\lambda(h_j))^{l}}_{p^*} = \bnorm{\lambda(h_j)^{\frac{n}{p^*}}}_{p^*} =\norm{\lambda(h_j)}_{n}^{\frac{n}{p^*}} = 1. $$ Finally, we observe that $$ \tau_G(x_j y_j) = \tau_G\big( \lambda(h_j)^{m}\lambda(h_j)^{l}\big) =\tau_G\big( \lambda(h_j)^{n}\big) =\norm{\lambda(h_j)}_{n}^{n} = 1. $$ Note that these sequences depend on $p$. \end{example} \subsection{Step 2: the case of totally disconnected and finite-dimensional groups} \label{Sec-finite-dim} In order to achieve a complementation that ensures the compatibility of the resulting projection $P^p_G$ for different values of $p$, one needs to select different sequences than those defined in Example \ref{Essai}. This will be achieved in Corollary \ref{Cor-38} in the case where the locally compact group $G$ is finite-dimensional. \paragraph{Dimensions of topological spaces} Recall that three notions of dimension of a \textit{suitable} topological space $X$ exist: the small inductive dimension, the large inductive dimension and the covering dimension. These dimensions are defined, for example, in \cite[Chapter 7]{Eng89}. Recall the definition of small inductive dimension. Let $X$ be a regular topological space. We say that $\ind X = -1$ if $X$ is empty. If $n$ is a natural number, then we say that $\ind X \leq n$ if for every point $x \in X$ and every neighborhood $V$ of $x$ in $X$ there exists a open set $U$ included in $V$ such that $x \in U$ and such that the boundary $\partial U$ satisfies $\ind \partial U \leq n-1$. We say that $\ind X=n$ if $\ind X \leq n$ and $\ind X \leq n-1$ does not hold. Finally, we say that $\ind X=\infty$ if the inequality $\ind X \leq n$ does not hold for any integer $n$. If two regular topological spaces $X$ and $Y$ are homeomorphic then $\ind X=\ind Y$. We refer to the book \cite{Eng89} for more information. By \cite[Theorem 7.3.3, p.~404]{Eng89}, these notions coincide when $X$ is metrizable and separable. Note that a second-countable locally compact group $G$ satisfies this property\footnote{\thefootnote. Such a group is metrizable by \cite[Theorem 2.B.2 p.~20]{CoH16} and second-countable topological spaces are separable by \cite[Corollary 1.3.8 p.~25]{Eng89}. See also \cite[Theorem 2.A.10 p.~15]{CoH16}, which presents a characterization of locally compact spaces which are second-countable.}. Indeed, Arhangel'skii and Pasynkov showed in \cite{Arh60} and \cite{Pas60} that these notions coincide for an \textit{arbitrary} locally compact group $G$. We refer to the survey \cite[p.~205]{ArM18} for more information. \begin{example} \normalfont \label{0-dim-space} According to \cite[p.~360]{Eng89}, a topological space $X$ is called zero-dimensional if it is a non-empty $T_1$-space with a basis of open-and-closed subsets. If $X$ is locally compact and paracompact, it is equivalent, by \cite[Theorem 6.2.10, p.~362]{Eng89}, to say that $X$ is totally disconnected\footnote{\thefootnote. In \cite[p.~360]{Eng89}, the term <<hereditarily disconnected>> is used for this notion.}, meaning it contains no connected subspace with more than one point. Furthermore, \cite[Theorem 7.1.12, p.~388]{Eng89} shows that this is also equivalent to $\ind X=0$. It is worth noting that every metrizable space is paracompact, as stated in \cite[Theorem 5.1.3, p.~300]{Eng89}. \end{example} \begin{example} \normalfont \label{ex-finite-loc-compact} By \cite[Remark 39.5 (d) p.~283]{Str06}, a finite-dimensional locally compact group $G$ is a Lie group if and only if it is locally connected\footnote{\thefootnote. This result is stronger than \cite[Exercise 1.6.9 p.~122]{Tao14}, which says without proof that a locally compact group $G$ is a Lie group if and only if it is first-countable, locally connected and finite-dimensional. Moreover, the notion of dimension of \cite[Exercise 1.6.9 p.~122]{Tao14} is different.}. See also \cite[Theorem 70, p.~337]{Pon66} for the compact case. \end{example} We need background on local isomorphisms since we will use Iwasawa's local splitting theorem, which provides some local isomorphism. \paragraph{Local isomorphisms} Recall that two topological groups $G$ and $H$ are said to be locally isomorphic \cite[p.~224]{Bou98} if there exist open neighborhoods $V$ and $W$ of the identity elements $e_G$ and $e_H$ and a homeomorphism $f \co V \to W$ satisfying $f(xy)=f(x)f(y)$ for all $x,y\in V$ such that $xy \in V$ and if $g$ is the mapping inverse to $f$, then for each pair of points $x', y'$ in $W$ such that $x'y' \in W$, we have $g(x'y') = g(x') g(y')$. We say that $f$ is a local isomorphism of $G$ with $H$. The following result from \cite[pp.~18-19]{Bou04b} describes the relationship between Haar measures and local isomorphisms. \begin{lemma} \label{Lemma-locally-isomorphic-1} Let $G$ and $G'$ be locally isomorphic locally compact groups via a local homeomorphism $f \co V \to W$. Consider a left Haar measure $\mu_G$ of $G$ and its restriction $\mu_G^{V}$ on $V$. Then $f(\mu_G^V)$ is the restriction of a unique left Haar measure on $G'$. \end{lemma} We caution the reader that the property of being unimodular is not preserved under local isomorphisms. For an example of a non-unimodular locally compact group $G$ that is locally isomorphic to the unimodular locally compact group $\R$, see \cite[Exercise 5, VII.78]{Bou04b}. \paragraph{Splitting theorem} We will use the following form \cite[Theorem B p.~92]{Glu60} of Iwasawa's local splitting theorem. See also \cite[Exercise 1.6.8 p.~122]{Tao14} and \cite[Theorem 70 p.~337]{Pon66} for a version for the particular case of compact groups. \begin{thm} \label{cor-spitting-2} Every second-countable finite-dimensional locally compact group is locally isomorphic to the product of a totally disconnected compact group and a connected Lie group. \end{thm} \paragraph{Doubling metric measure spaces} A Borel regular measure $\mu$ on a metric space $(X,\dist)$ is called a doubling measure \cite[p.~76]{HKST15} if every ball in $X$ has positive and finite measure and if there exists a constant $c \geq 1$ such that \begin{equation} \label{doubling-def} \mu(B(x , 2r)) \leq c\,\mu(B(x,r)), \quad x \in X,\, r >0. \end{equation} Here $B(x,r) \ov{\mathrm{def}}{=} \{y \in X : \dist(x, y) < r\}$ is the open ball with radius $r$ centred at $x$. We call the triple $(X,\dist,\mu)$ a doubling metric measure space if $\mu$ is a doubling measure on $X$. Such a space $X$ is separable as a topological space by \cite[p.~76]{HKST15}. We refer to the paper \cite{SoT19} for more information on the least doubling constant $\inf\{ c \text{ as in } \eqref{doubling-def}: \mu\text{ doubling measure on }(X,\dist)\}$ of a metric space $(X,\dist)$. We introduce and will use the weaker notion of <<doubling measure for small balls>> replacing the inequality \eqref{doubling-def} by \begin{equation} \label{doubling-def-local} \mu(B(x,2r)) \leq c\, \mu(B(x,r)), \quad x \in X,\, r \in (0,\tfrac{1}{2}]. \end{equation} \paragraph{Carnot-Caratheodory distances} Consider a connected Lie group $G$ equipped with a left Haar measure $\mu_G$ and identity element $e$. We consider a finite sequence $X \ov{\mathrm{def}}{=}(X_1,\ldots,X_m)$ of left invariant vector fields, the generated Lie algebra of which is the Lie algebra $\frak{g}$ of the Lie group $G$ such that the vectors $X_1(e),\ldots, X_m(e)$ are linearly independent. We say that it is a family of left invariant H\"ormander vector fields. Let $\gamma \colon [0,1] \to G$ be an absolutely continuous path such that $\dot\gamma(t)$ belongs to the subspace $\Span \{ X_1|_{\gamma(t)}, \ldots, X_m|_{\gamma(t)} \}$ for almost every $t \in [0,1]$. If $\dot\gamma(t) = \sum_{k=1}^m \gamma_k(t) \, X_k|_{\gamma(t)}$ for almost every $t \in [0,1]$, where each $\dot\gamma_k$ is measurable, we can define the length of $\gamma$ by $$ \ell(\gamma) \ov{\mathrm{def}}{=} \int_0^1 \Big( \sum_{k=1}^m |\dot\gamma_k(t)|^2 \Big)^{1/2} \d t, $$ which belongs to $[0,\infty]$. For any $s,s' \in G$, there exists such a path $\gamma \co [0,1] \to G$ with finite length such that $\gamma(0) = s$ and $\gamma(1) = s'$. If $s,s' \in G$ then we define the Carnot-Carath\'eodory distance \begin{equation} \label{distance-Carnot} \dist_\CC(s,s') \ov{\mathrm{def}}{=} \inf_{\gamma(0)=s,\gamma(1)=s'} \ell(\gamma) \end{equation} between $s$ and $s'$ to be the infimum of the length of all such paths with $\gamma(0) = s$ and $\gamma(1) = s'$. Then it is known that $\dist_\CC$ is a left invariant distance on $G$, inducing the same topology as the one of $G$, see \cite[Proposition III.4.1 p.~39]{VSCC92} and \cite[pp.~22-23]{DtER03}. By \cite[p.~124]{VSCC92} there exist $c_1,c_2> 0$ and $d \in \N$ such that for all $r \in (0,1]$ we have \begin{equation} \label{Equivalence-measure-ball} c_1 \, r^d \leq \mu_G(B(e,r)) \leq c_2 \, r^d. \end{equation} The integer $d$ is called the local dimension of $(G,X)$. We infer that there exists $c > 0$ such that \eqref{doubling-def-local} is satisfied, i.e.~$\mu_G$ is a doubling measure for small balls. By \cite[Proposition 2.4 p.~199]{BEM13}, the metric measure space $(G,\dist_{\CC},\mu_G)$ is a doubling metric measure space if and only if the Lie group $G$ has polynomial growth. Recall finally that the connected component of a Lie group is second-countable by \cite[Proposition 9.1.15 p.~293]{HiN12}. \paragraph{Construction of some neighborhoods} We start with a technical result. \begin{lemma} \label{lem-Lie-group-estimate} Let $G$ be a second-countable locally compact group equipped with a left invariant distance $\dist$ and a doubling left Haar measure $\mu_G$ for small balls. There exists a sequence $(B_j)$ of open balls $B_j \ov{\mathrm{def}}{=} B(e,r_j)$ with decreasing radius $r_j \to 0$ satisfying \begin{equation} \label{Lie-estimate} \mu_G(B_j)^3 \leq c^3\int_{B_j} \mu_G(B_j \cap sB_j)^2 \d \mu_G(s), \quad j \geq 1, \end{equation} where $c$ is a constant satisfying \eqref{doubling-def-local}. \end{lemma} \begin{proof} Assume that $0 < \epsi < 1$. For any integer $j \geq 1$, we introduce the ball $B_j \ov{\mathrm{def}}{=} B \big(e,\frac{\epsi}{j}\big)$. We have \begin{equation} \label{mesure-Vj} \mu_G(B_{j}) = \mu_G\big(B \big(e,\tfrac{\epsi}{j}\big)\big) \ov{\eqref{doubling-def-local}}{\leq} c\,\mu_G\big(B \big(e,\tfrac{\epsi}{2j}\big)\big) =c \,\mu_G(B_{2j}). \end{equation} For any element $s$ in the open ball $B_{2j}=B(e,\frac{\epsi}{2j})$, we will show that \begin{equation} \label{inclusion-balls} B\big(e,\tfrac{\epsi}{2j}\big) \subset B\big(e,\tfrac{\epsi}{j}\big) \cap sB\big(e,\tfrac{\epsi}{j}\big), \quad \text{i.e.} \quad B_{2j} \subset B_j \cap sB_j. \end{equation} Indeed, if $r \in G$ satisfies $\dist(e,r) < \frac{\epsi}{2j}$ we have obviously $r \in B(e,\frac{\epsi}{j})$ and using left invariance of the distance, we obtain $$ \dist(e,s^{-1}r) =\dist(s,r) \leq \dist(s,e) + \dist(e,r) < \frac{\epsi}{2j} + \frac{\epsi}{2j} =\frac{\epsi}{j}. $$ So $s^{-1}r \in B(e,\frac{\epsi}{j})$ and consequently $r \in sB(e,\frac{\epsi}{j})$. So the claim \eqref{inclusion-balls} is proved. For any integer $j \geq 1$, we deduce that \begin{align*} \MoveEqLeft c^3\int_{B_j} \mu_G(B_j \cap sB_j)^2 \d \mu_G(s) \geq c^3\int_{B_{2j}} \mu_G(B_j \cap sB_j)^2 \d \mu_G(s) \\ &\ov{\eqref{inclusion-balls}}{\geq} c^3\int_{B_{2j}} \mu_G(B_{2j})^2 \d\mu_G(s) = c^3\,\mu_G(B_{2j})^3 \ov{\eqref{mesure-Vj}}{\geq} \mu_G(B_j)^3. \end{align*} \end{proof} We continue by proving another technical result for totally disconnected groups. \begin{lemma} \label{lem-disconnected-group-estimate} Let $G$ be a second-countable totally disconnected locally compact group equipped with a left Haar measure $\mu_G$. Then there exists a basis $(K_j)$ of symmetric open compact neighborhoods $K_j$ of $e$ such that \begin{equation} \label{disco-estim} \mu_G(K_j)^3 =\int_{K_j} \mu_G(K_j \cap s K_j)^2 \d \mu_G(s), \quad j \geq 1. \end{equation} \end{lemma} \begin{proof} According to Van Dantzig's theorem \cite[(7.7) Theorem p.~62]{HeR79} or \cite[Theorem 2.E.6 p.~44]{CoH16}, $G$ admits a basis $(K_j)$ of open compact subgroups. Clearly, each $K_j$ is symmetric, being a group, is a neighborhood of $e$, being an open subset, and of finite measure due to its compactness. Since we have assumed $G$ to be second-countable, it follows from the proof of \cite[Theorem 2.E.6 p.~44]{CoH16} that the basis can be chosen as a sequence. Since $K_j$ is a subgroup, we have $K_j \cap sK_j = \emptyset$ for $s \not\in K_j$ and $K_j \cap sK_j = K_j$ for $s \in K_j$. Thus, \[ \int_{K_j} \mu_G\left(K_j \cap sK_j \right)^2 \d\mu_G(s) = \int_{K_j} \mu_G(K_j)^2 \d\mu_G(s) = \mu_G(K_j)^3 . \] \end{proof} Using a version of Iwasawa's local splitting theorem, we are now able to obtain a result for \textit{finite-dimensional} locally compact groups. \begin{lemma} \label{lem-finite-dimensional-group-estimate} Let $G$ be a second-countable finite-dimensional locally compact group equipped with a left Haar measure $\mu_G$. Then there exists a basis $(V_j)$ of symmetric open neighborhoods $V_j$ of $e$ and a constant $c > 0$ such that \begin{equation} \label{subtil-estimate} \mu_G(V_j)^3 \leq c^3\int_{V_j} \mu_G(V_j \cap sV_j)^2 \d \mu_G(s), \quad j \geq 1. \end{equation} \end{lemma} \begin{proof} We denote by $n$ the dimension of $G$. According to Theorem \ref{cor-spitting-2}, $G$ is locally isomorphic to the product of a totally disconnected compact group $K$ and a Lie group $L$ of dimension $n$. So there exists a neighborhood $V$ of $e_G$ which is homeomorphic to the direct product $W \times U$ of a neighborhood $W$ of the neutral element $e_K$ and an open neighborhood $U$ of the neutral element $e_L$. We identify $V$ with $W \times U$. By Lemma \ref{Lemma-locally-isomorphic-1}, we can choose left Haar measures $\mu_K$ and $\mu_L$ on the groups $K$ and $L$ such that \begin{equation} \label{equ-1-proof-lem-finite-dimensional-group-estimate} \mu_G(A \times B) = \mu_K(A) \mu_L(B), \quad A \subseteq W, \: B \subseteq U. \end{equation} Next, we consider the left invariant metric on the connected Lie group $L$ given by the Carnot-Carath\'eodory distance \eqref{distance-Carnot} with respect to some fixed sequence of left invariant vector fields. We consider a neighborhood basis sequence $(K_j)$ of open compact subgroups of $K$ whose existence is guaranteed by Van Dantzig's theorem, see the proof of Lemma \ref{lem-disconnected-group-estimate}. Furthermore, we let $(B_j)$ be a sequence as in Lemma \ref{lem-Lie-group-estimate} for the Lie group $L$. For any integer $j \geq 1$, we put \begin{equation} \label{Def-V_j} V_j \ov{\mathrm{def}}{=} K_j \times B_j \end{equation} and we can suppose that $K_j \subseteq W$ and that $B_j \subseteq U$ for any integer $j \geq 1$. Recall that each $K_j$ resp. each ball $B_j$ is symmetric, being a subgroup resp. a ball with respect to a left invariant metric\footnote{\thefootnote. Note that $\dist_\CC(e,s)=\dist_\CC(s^{-1},e)$.}. We conclude that $V_j$ is symmetric as well. In view of the previous local product structure of the Haar measure described in \eqref{equ-1-proof-lem-finite-dimensional-group-estimate}, we deduce that for any integer $j$ \begin{align*} \MoveEqLeft c^3\int_{V_j} \mu_G(V_j \cap sV_j)^2 \d \mu_G(s) \ov{\eqref{Def-V_j}}{=} c^3\int_{K_j \times B_j} \mu_G(V_j \cap sV_j)^2 \d\mu_{K \times L}(s) \\ &\ov{\eqref{equ-1-proof-lem-finite-dimensional-group-estimate}, \eqref{Def-V_j}}{=} c^3\int_{K_j \times B_j} \mu_G\left(\big(K_j \times B_j\big) \cap (r,t)\big(K_j \times B_j\big)\right)^2 \d\mu_K(r) \d \mu_L(t) \\ &\ov{\eqref{equ-1-proof-lem-finite-dimensional-group-estimate}}{=} c^3 \int_{K_j \times B_j} \mu_K(K_j \cap rK_j)^2 \mu_L\left(B_j \cap t B_j\right)^2 \d\mu_K(r) \d\mu_L(t) \\ &= c^3\int_{K_j} \mu_K(K_j\cap rK_j)^2 \d\mu_K(r) \int_{B_j}\mu_L\left(B_j \cap t B_j\right)^2 \d\mu_L(t) \\ &\ov{\eqref{Lie-estimate} }{\geq} \mu_K(K_j)^3 \mu_G(B_j)^3 \ov{\eqref{equ-1-proof-lem-finite-dimensional-group-estimate}}{=} \mu_G\left(K_j \times B_j\right)^3 \ov{\eqref{Def-V_j}}{=} \mu_G(V_j)^3. \end{align*} \end{proof} Now, we show the interest of the previous lemmas. \begin{prop} \label{cor-2-referees-proof-step-1-weak-star-convergence-bis} Let $G$ be a second-countable unimodular locally compact group. Suppose that $1 \leq p \leq \infty$. Let $(V_j)$ be a basis of symmetric neighborhoods of $e$ and a constant $c > 0$ such that \begin{equation} \label{subtil-estimate-bis} \mu_G(V_j)^3 \leq c^3\int_{V_j} \mu_G(V_j \cap sV_j)^2 \d \mu_G(s), \quad j \geq 1. \end{equation} Moreover, we put \begin{equation} \label{def-fj} f_j \ov{\mathrm{def}}{=} 1_{V_j} \ast 1_{V_j},\quad x_j \ov{\mathrm{def}}{=} a_j \lambda(f_j) \quad \text{and} \quad y_j \ov{\mathrm{def}}{=} b_j \lambda(f_j) \end{equation} with \begin{equation} \label{aj} a_j \ov{\mathrm{def}}{=} \norm{\lambda(f_j)}_{p^*} \norm{\lambda(f_j)}_2^{-2} \quad \text{and} \quad b_j \ov{\mathrm{def}}{=} \norm{\lambda(f_j)}_{p^*}^{-1} . \end{equation} Then the sequences $(x_j)$ and $(y_j)$ satisfy the assumptions from Proposition \ref{th-convergence}. More precisely, we have $\norm{x_j}_p \leq c^3$ and $\norm{y_j}_{p^*} =1$. \end{prop} \begin{proof} We denote by $\mu_G$ a Haar measure on the group $G$. Consider any measurable subset $V$ of $G$ of measure $\mu_G(V) \in (0,\infty)$. Recall the isometric complex interpolation formula $\L^{p}(\VN(G))=(\L^\infty(\VN(G)), \L^2(\VN(G)))_{\frac{2}{p}}$ of \cite[(2.1) p.~1466]{PiX03}. In particular, we have by \cite[Corollary 2.8 p.~53]{Lun18}, the inequality $\norm{\cdot}_{\L^{p}(\VN(G))} \leq \norm{\cdot}_{\L^\infty(\VN(G))}^{1-\frac{2}{p}} \norm{\cdot}_{\L^2(\VN(G))}^{\frac{2}{p}} $. Using this inequality and Young's inequality \cite[Corollary 20.14 p.~293]{HeR79} in the second inequality, we obtain \begin{align*} \MoveEqLeft \norm{\lambda(1_{V^{-1}} \ast 1_{V})}_p \ov{\eqref{composition-et-lambda}}{=} \norm{\lambda(1_{V})^*\lambda(1_{V})}_p =\norm{|\lambda(1_{V})|^2}_p = \norm{ \lambda(1_V)}_{2p}^2 \leq \norm{\lambda(1_V)}_\infty^{\frac{2}{p^*}}\norm{\lambda(1_V)}_2^{\frac{2}{p}} \\ &=\norm{\lambda(1_V)}_{\L^2(G) \to \L^2(G)}^{\frac{2}{p^*}} \norm{\lambda(1_V)}_2^{\frac{2}{p}} \leq \mu_G(V)^{\frac{2}{p^*}} \cdot \mu_G(V)^{\frac{1}{p}} = \mu_G(V)^{1 + \frac{1}{p^*}}. \end{align*} Taking $V = V_j$ and using the previous calculation also for $p^*$ in place of $p$, we deduce with Lemma \ref{lem-finite-dimensional-group-estimate} that \begin{align} \label{equ-1-proof-cor-2-referees-proof-step-1-weak-star-convergence} \MoveEqLeft \norm{\lambda(1_{V_j} \ast 1_{V_j})}_p \cdot \norm{\lambda(1_{V_j} \ast 1_{V_j})}_{p^*} \leq \mu_G(V_j)^{1 + \frac{1}{p^*} + 1 + \frac{1}{p}} \\ &= \mu_G(V_j)^{3} \ov{\eqref{subtil-estimate-bis}}{\leq} c^3\int_{V_j} \mu_G(V_j \cap sV_j)^2 \d \mu_G(s).\nonumber \end{align} On the other hand, using $V_j = V_j^{-1}$, we obtain \begin{align} \MoveEqLeft \label{equ-1-proof-cor-2-referees-proof-step-1-weak-star} \norm{\lambda(1_{V_j} \ast 1_{V_j})}_2^2 \ov{\eqref{Convolution-formulas}}{=} \int_G \left| \int_G 1_{V_j}(t)1_{V_j}(t^{-1}s) \d \mu_G(t) \right|^2 \d \mu_G(s) \nonumber\\ &= \int_G \left| \int_G 1_{V_j}(t)1_{V_js^{-1}}(t^{-1}) \d \mu_G(t) \right|^2 \d \mu_G(s) =\int_G \left| \int_G 1_{V_j}(t)1_{sV_j}(t) \d \mu_G(t) \right|^2 \d \mu_G(s) \nonumber \\ &= \int_G \mu_G^2(V_j \cap sV_j) \d\mu_G(s). \end{align} Combining \eqref{equ-1-proof-cor-2-referees-proof-step-1-weak-star-convergence} and \eqref{equ-1-proof-cor-2-referees-proof-step-1-weak-star}, we see that \begin{equation} \label{divers-500} \norm{\lambda(1_{V_j} \ast 1_{V_j})}_p \cdot \norm{\lambda(1_{V_j} \ast 1_{V_j})}_{p^*} \leq c^3 \norm{\lambda(1_{V_j} \ast 1_{V_j})}_2^2. \end{equation} Note that with the choice of $a_j$ and $b_j$, we finally obtain \[ \norm{x_j}_p \ov{\eqref{def-fj}}{=} \norm{a_j \lambda(f_j)}_p \ov{\eqref{aj}}{=} \norm{\lambda(f_j)}_{p^*} \norm{\lambda(f_j)}_2^{-2}\norm{\lambda(f_j)}_p \ov{\eqref{def-fj} \eqref{divers-500}}{\leq} c^3 \] and $\norm{y_j}_{p^*} \ov{\eqref{def-fj}}{=} \norm{b_j \lambda(f_j)}_{p^*} = 1$, as well as $\tau_G(x_j y_j) \ov{\eqref{aj}}{=} \norm{\lambda(f_j)}_2^{-2} \tau_G(\lambda(f_j)^2 ) = 1$. \end{proof} A combination of Proposition \ref{cor-2-referees-proof-step-1-weak-star-convergence-bis} and the previous lemmas gives the next result, which is the main result of this section. \begin{cor} \label{Cor-38} \begin{enumerate} \item Let $G$ be a second-countable finite-dimensional unimodular locally compact group equipped with a Haar measure $\mu_G$. Then there exist sequences $(x_j)$ and $(y_j)$ satisfying the assumptions from Proposition \ref{th-convergence}. More precisely, we have $\norm{x_j}_p \leq c^3$ and $\norm{y_j}_{p^*} =1$ for any integer $j$ for some constant $c>0$. \item Let $G$ be a second-countable totally disconnected unimodular locally compact group equipped with a Haar measure $\mu_G$. Then there exist sequences $(x_j)$ and $(y_j)$ satisfying the assumptions from Proposition \ref{th-convergence} with $\norm{x_j}_p \leq 1$ and $\norm{y_j}_{p^*} =1$ for any integer $j$. \end{enumerate} \end{cor} \subsection{Step 3: the projection on the space of Herz-Schur multipliers} \label{Section-Schur} The first part of the following result says that the unit ball of the space $\mathfrak{M}^{p}_\Omega$ of measurable Schur multipliers is closed for the weak* topology of the dual Banach space $\L^\infty(\Omega \times \Omega)$. \begin{lemma} \label{Lemma-symbol-weak} Let $\Omega$ be a $\sigma$-finite measure space. Suppose that $1 \leq p \leq \infty$. Let $(M_{\phi_j})$ be a bounded net of bounded Schur multipliers on the Schatten class $S^p_\Omega$ and suppose that $\phi$ is an element in $\L^\infty(\Omega \times \Omega)$ such that the net $(\phi_j)$ converges to $\phi$ for the weak* topology of $\L^\infty(\Omega \times \Omega)$. Then the function $\phi$ induces a bounded Schur multiplier on $S^p_\Omega$. Moreover, the net $(M_{\phi_j})$ converges to the operator $M_{\phi}$ for the weak operator topology of the space $\cal{B}(S^p_\Omega)$ (point weak* topology if $p = 1$) and \begin{equation} \label{estim-divers-35} \norm{M_{\phi}}_{S^p_\Omega \to S^p_\Omega} \leq \liminf_{j \to \infty} \norm{M_{\phi_j}}_{S^p_\Omega \to S^p_\Omega}. \end{equation} A similar statement is true upon replacing <<bounded>> by <<completely bounded>> and the norm $\norm{\cdot}_{S^p_\Omega \to S^p_\Omega}$ by the norm $\norm{\cdot }_{\cb, S^p_\Omega \to S^p_\Omega}$. \end{lemma} \begin{proof} Consider some functions $f,g \in \L^2(\Omega \times \Omega)$ such that $K_{f} \in S^p_\Omega$ and $K_{g} \in S^{p^*}_\Omega$. Note that we have $f\check{g} \in \L^1(\Omega \times \Omega)$. For any $j$, we have \begin{align*} \MoveEqLeft \left| \int_{\Omega \times \Omega} \phi_j f \check{g} \right| \ov{\eqref{dual-trace}}{=}\left| \big\langle M_{\phi_j}(K_f) , K_g \big\rangle_{S^p_\Omega,S^{p^*}_\Omega} \right| \leq \bnorm{M_{\phi_j}(K_f)}_{S^p_\Omega} \bnorm{K_g}_{S^{p^*}_\Omega} \\ &\leq \norm{M_{\phi_j}}_{S^p_\Omega \to S^p_\Omega} \bnorm{K_f}_{S^p_\Omega} \bnorm{K_g}_{S^{p^*}_\Omega}. \end{align*} Passing to the limit, we obtain \begin{align*} \MoveEqLeft \left| \big\langle K_{\phi f} ,K_g\big\rangle_{S^p_\Omega,S^{p^*}_\Omega} \right|\ov{\eqref{dual-trace}}{=} \left| \int_{\Omega \times \Omega} \phi f \check{g} \right| \leq \liminf_{j \to \infty} \norm{M_{\phi_j}}_{S^p_\Omega \to S^p_\Omega} \bnorm{K_f}_{S^p_\Omega} \bnorm{K_g}_{S^{p^*}_\Omega}. \end{align*} By density, we conclude that the function $\phi$ induces a bounded Schur multiplier on $S^p_\Omega$ with the estimate \eqref{estim-divers-35} on the norm of this operator. Using again the weak* convergence of the net $(\phi_j)$, we see that for any functions $f, g \in \L^2(\Omega \times \Omega)$ such that $K_f \in S^p_\Omega$ and $K_g \in S^{p^*}_\Omega$ \begin{align*} \MoveEqLeft \tr\big((M_{\phi}-M_{\phi_j})(K_f)K_g\big) =\tr\big(K_{(\phi-\phi_j)f} K_g)\big) \ov{\eqref{dual-trace}}{=} \iint_{\Omega \times \Omega} (\phi-\phi_j) f\check{g} \\ &=\big\langle \phi-\phi_j,f\check{g} \big\rangle_{\L^\infty(\Omega \times \Omega),\L^1(\Omega \times \Omega)} \xra[\ j \ ]{} 0. \end{align*} By density, using an $\frac{\epsi}{4}$-argument and the boundedness of the net, we conclude\footnote{\thefootnote. More precisely, if $X$ is a Banach space, if $E_1$ is dense subset of $X$, if $E_2$ is a dense subset of $X^*$ and if $(T_j)$ is a bounded net of $\cal{B}(X)$ with an element $T$ of $\cal{B}(X)$ such that $\langle T_j(x),x^*\rangle \xra[i \to +\infty]{} \langle T(x), x^*\rangle$ for any $x \in E_1$ and any $x^* \in E_2$, then the net $(T_j)$ converges to $T$ for the weak operator topology of the space $\cal{B}(X)$.} that the net $(M_{\phi_j})$ converges to the operator $M_{\phi}$ for the weak operator topology of $\cal{B}(S^p_\Omega)$ (point weak* topology if $p = 1$). Now, we prove the last sentence. For any functions $f_{kl},g_{kl} \in \L^2(\Omega \times \Omega)$ where $1 \leq k,l \leq N$, we have $f_{kl} \check{g}_{kl}\in \L^1(\Omega \times \Omega)$. For any $j$, we infer by \cite[Theorem 4.7 p.~49]{Pis98} that \begin{align*} \MoveEqLeft \left| \big\langle \big[ M_{\phi_{j}}(K_{f_{kl}})\big] , \big[K_{g_{kl}}\big] \big\rangle_{\M_N(S^p_\Omega),S^1_N(S^{p^*}_\Omega)} \right| \leq \norm{M_{\phi_j}}_{\cb,S^p_\Omega \to S^p_\Omega} \bnorm{\big[K_{f_{kl}}\big]}_{\M_N(S^p_\Omega)} \bnorm{\big[K_{g_{kl}}\big]}_{S^1_N(S^{p^*}_\Omega)}, \end{align*} that is, \begin{align*} \MoveEqLeft \left|\sum_{k,l=1}^N \int_{\Omega \times \Omega} \phi_j f_{kl} \check{g}_{kl} \right| \leq \norm{M_{\phi_j}}_{\cb,S^p_\Omega \to S^p_\Omega} \bnorm{\big[K_{f_{kl}}\big]}_{\M_N(S^p_\Omega)} \bnorm{\big[K_{g_{kl}}\big]}_{S^1_N(S^{p^*}_\Omega)}. \end{align*} Passing to the limit, we obtain \begin{align*} \MoveEqLeft \left|\sum_{k,l=1}^N \int_{\Omega \times \Omega} \phi f_{kl} \check{g}_{kl} \right| \leq \liminf_{j \to \infty} \norm{M_{\phi_j}}_{\cb,S^p_\Omega \to S^p_\Omega} \bnorm{\big[K_{f_{kl}}\big]}_{\M_N(S^p_\Omega)} \bnorm{\big[K_{g_{kl}}\big]}_{S^1_N(S^{p^*}_\Omega)}. \end{align*} We deduce that the function $\phi$ induces a completely bounded Schur multiplier on the Schatten space $S^p_\Omega$ with the suitable estimate on the completely bounded norm. \end{proof} If $1 \leq p < \infty$, note that the Schatten space $S^p_\Omega$ is a dual Banach space. So the Banach space $\CB(S^p_\Omega)$ is also a dual space with predual $S^p_\Omega \widehat{\ot} S^{p^*}_\Omega$, where $\widehat{\ot}$ denotes the operator space projective tensor product and the duality bracket is given by \begin{equation} \label{Belle-dualite} \langle T, x \ot y \rangle_{\CB(S^p_\Omega),S^p_\Omega \widehat{\ot} S^{p^*}_\Omega} =\big\langle T(x), y \big\rangle_{S^p_\Omega, S^{p^*}_\Omega}. \end{equation} \begin{lemma} \label{lem-Schur-weak-star-closed} Let $\Omega$ be a $\sigma$-finite measure space. \begin{enumerate} \item Let $1 \leq p < \infty$. Then the space $\mathfrak{M}^{p,\cb}_\Omega$ of completely bounded Schur multipliers is weak* closed in $\CB(S^p_\Omega)$ and the space $\mathfrak{M}^{p}_\Omega$ of bounded Schur multipliers is weak* closed in $\cal{B}(S^p_\Omega)$. \item The space $\mathfrak{M}^{\infty,\cb}_\Omega=\mathfrak{M}^{\infty}_\Omega$ of (completely) bounded Schur multipliers is weak* closed in the space $\CB(S^\infty_\Omega,\cal{B}(\L^2(\Omega)))$.\end{enumerate} \end{lemma} \begin{proof} We start by proving the first assertion. By the Banach-Dieudonn\'e theorem \cite[p.~154]{Hol75}, it suffices to show that the closed unit ball of the space $\mathfrak{M}^{p,\cb}_\Omega$ is weak* closed in the space $\CB(S^p_\Omega)$. Let $(M_{\phi_j})$ be a net in that unit ball converging for the weak* topology to some completely bounded map $T \co S^p_\Omega \to S^p_\Omega$. We have for any $j$ the inequality $$ \norm{\phi_j}_{\L^\infty(\Omega \times \Omega)} \ov{\eqref{ine-infty}}{\leq} \norm{M_{\phi_j}}_{\cb,S^p_\Omega \to S^p_\Omega} \leq 1. $$ By Banach-Alaoglu's theorem, there exists a subnet of $(\phi_j)$ converging for the weak* topology to some function $\phi \in \L^\infty(\Omega \times \Omega)$. It remains to show that $T=M_\phi$. By \eqref{Belle-dualite}, we have $\big\langle M_{\phi_j}(x), y\big\rangle \xra[j]{} \langle T(x),y \rangle$ for any $x \in S^p_\Omega$ and any $y \in S^{p^*}_\Omega$. That means that the net $(M_{\phi_j})$ converges to $T$ for the weak operator topology (point weak* topology if $p = 1$). By Lemma \ref{Lemma-symbol-weak}, the net $(M_{\phi_j})$ converges to $M_\phi$. We conclude by uniqueness of the limit that $T = M_\phi$. The statement on the space $\mathfrak{M}^{p}_\Omega$ can be proved in a similar manner, using the predual $S^p_\Omega \hat{\ot} S^{p^*}_\Omega$ of the dual Banach space $\cal{B}(S^p_\Omega)$, where $\hat{\ot}$ denotes the Banach space projective tensor product. The second point is also similar. \end{proof} The following is essentially folklore. The case $p=\infty$ is explicitly proved in \cite[Proposition 5.2 p.~375]{SpT02} and \cite[Corollary 5.4 p.~183]{Spr04} with a slightly different method relying on the use of an invariant mean. We sketch a proof since it is important for us. \begin{prop} \label{prop-referee-step-2} Let $G$ be an amenable unimodular locally compact group. Suppose that $1 \leq p \leq \infty$. Then there exists a contractive projection $Q \co \mathfrak{M}^{p,\cb}_G \to \mathfrak{M}^{p,\cb}_{G}$ onto the space $\mathfrak{M}^{p,\cb,\HS}_{G}$ of completely bounded Herz-Schur multipliers acting on the Schatten space $S^p_G$ ($\cal{B}(\L^2(G))$ if $p=\infty$), preserving the complete positivity. Moreover, the obtained projections are compatible for all different values of $1 \leq p \leq \infty$. \end{prop} \begin{proof} Let $(F_j)$ be a F\o{}lner net in $G$ provided by the amenability of the group $G$. For any $K_f \in S^2_G \cap S^p_G$, the map $G \to S^p_G$, $r \mapsto \Ad(\rho_r) (K_f)$ is continuous since the composition of operators is strongly continuous on bounded sets by \cite[Proposition C.19 p.~517]{EFHN15} (recall the notation $\Ad(\rho_s)(x)=\rho_s x \rho_{s^{-1}}$). Similarly for any $M_\phi \in \mathfrak{M}^{p,\cb}_G$, the map $G \to S^p_G$, $r \mapsto \big[\Ad(\rho_r^*) M_\phi \Ad(\rho_r) \big](K_f)$ is also continuous, hence Bochner integrable on the compact $F_j$. Now, for any $K_f \in S^2_G \cap S^p_G$ and any $M_\phi \in \mathfrak{M}^{p,\cb}_G$, put \begin{equation} \label{Equa33} Q_j(M_\phi)(K_f) =\frac{1}{\mu_G(F_j)} \int_{F_j} \big[\Ad(\rho_r^*) M_\phi \Ad(\rho_r) \big](K_f) \d\mu_G(r). \end{equation} For any $K_f \in S^2_G \cap S^p_G$ and any completely bounded Schur multiplier $M_\phi \co S^p_G \to S^p_G$, we have \begin{align*} \MoveEqLeft \bnorm{Q_j(M_\phi)(K_f)}_{S^p_G} =\frac{1}{\mu_G(F_j)}\norm{\int_{F_j} \big[\Ad(\rho_r^*) M_\phi \Ad(\rho_r) \big](K_f)\d\mu_G(r)}_{S^p_G}\\ &\leq \frac{1}{\mu_G(F_j)}\int_{F_j} \norm{\big[\Ad(\rho_r^*) M_\phi \Ad(\rho_r) \big](K_f)}_{S^p_G} \d\mu_G(r) \leq \norm{M_\phi}_{S^p_G \to S^p_G}\norm{K_f}_{S^p_G}. \end{align*} A similar argument shows that $\norm{Q_j(M_\phi)}_{\cb, S^p_G \to S^p_G}\leq \norm{M_\phi}_{\cb, S^p_G \to S^p_G}$. Consequently, we have a well-defined contractive map $Q_j \co \mathfrak{M}^{p,\cb}_G \to \CB(S^p_G)$, $M_\phi \mapsto \frac{1}{\mu_G(F_j)} \int_{F_j} \Ad(\rho_r^*) M_\phi \Ad(\rho_r) \d\mu_G(r)$. If the linear map $M_\phi$ is completely positive then observe that the map $\big[\Ad(\rho_r^*) M_\phi \Ad(\rho_r)\big]$ is also completely positive. Thus the map $Q_j$ preserves the complete positivity. It is easy to check that $Q_j(M_\phi)$ is a Schur multiplier with symbol \begin{equation} \label{Divers-234} \phi_j(s,t) \ov{\mathrm{def}}{=} \frac{1}{\mu_G(F_j)} \int_{F_j}\phi(sr,tr) \d\mu_G(r) \end{equation} (Gelfand integral in $\L^\infty(G \times G)$). We continue with the case $1 \leq p < \infty$. Since the space $\mathfrak{M}^{p,\cb}_G$ is weak* closed in $\CB(S^p_\Omega)$, the space $\mathfrak{M}^{p,\cb}_G$ is a dual Banach space. Hence $\cal{B}( \mathfrak{M}^{p,\cb}_G,\mathfrak{M}^{p,\cb}_G )$ is a dual space. By Banach-Alaoglu's theorem, the uniformly bounded net $(Q_j)$ admits a weak* accumulation point that we denote by $Q$ which is obviously a contraction. So we can suppose that $Q_j \to Q$ for the weak* topology. So, for each completely bounded Schur multiplier $M_\phi \co S^p_G \to S^p_G$ this implies that $Q(M_\phi) = \lim_{j} Q_{j}(M_\phi)$ in the weak operator topology. Recall that the weak* topology on $\CB(S^p_G)$ coincides on bounded subsets with the point weak* topology. Since $Q_{j}(M_\phi)$ belongs to the space $\mathfrak{M}^{p,\cb}_G$ and since the latter space is weak* closed in $\CB(S^p_G)$ according to Lemma \ref{lem-Schur-weak-star-closed}, we obtain that $Q(M_\phi)$ also belongs to the space $\mathfrak{M}^{p,\cb}_G$. Since each map $Q_j$ preserves complete positivity, by \cite[Lemma 2.10 2.~p.~15]{ArK23}, the map $Q$ also preserves complete positivity. For any completely bounded Schur multiplier $M_\phi \co S^p_G \to S^p_G$, it remains to show that $Q(M_\phi)$ is in addition a Herz-Schur multiplier. That is, for any $r_0 \in G$ we have to show that $\lim_{j} M_{\phi_{j}(sr_0,tr_0)} = \lim_{j} M_{\phi_{j}(s,t)}$. Fix some $r_0 \in G$ and some $j$. Using the F\o{}lner condition in the last line, we have \begin{align*} \MoveEqLeft \norm{M_{\phi_{j}(s,t)} - M_{\phi_{j}(sr_0,tr_0)} }_{\cb,S^p_G \to S^p_G} \\ &\ov{\eqref{Divers-234}}{=} \norm{\frac{1}{\mu_G(F_j)} \int_{F_j} M_{\phi(sr,tr)} \d\mu_G(r) - \frac{1}{\mu(F_j)} \int_{F_j} M_{\phi(sr_0u,tr_0u)} \d\mu_G(u) }_{\cb,S^p_G \to S^p_G} \\ & = \frac{1}{\mu_G(F_j)} \norm{\int_{F_j} M_{\phi(sr,tr)} \d\mu_G(r) - \int_{r_0 F_j} M_{\phi(sr,tr)} \d\mu_G(r)}_{\cb,S^p_G \to S^p_G} \\ & \leq \frac{1}{\mu_G(F_j)} \int_{F_j \bigtriangleup r_0 F_j} \norm{M_{\phi(sr,tr)}}_{\cb,S^p_G \to S^p_G} \d\mu_G(r) \\ &= \frac{\mu_G(F_j \bigtriangleup r_0 F_j)}{\mu_G(F_j)} \norm{M_\phi}_{\cb,S^p_G \to S^p_G} \xra[j \to \infty]{} 0. \end{align*} Using the weak* lower semicontinuity of the norm \cite[Theorem 2.6.14 p.~227]{Meg98}, we infer that \[ \norm{Q(M_\phi)-Q(M_{\phi_{(\cdot r_0, \cdot r_0)}})}_{\cb,S^p_G \to S^p_G} \leq \liminf_{j} \norm{M_{\phi_{j}(s,t)} - M_{\phi_{j}(sr_0,tr_0)}}_{\cb,S^p_G \to S^p_G} = 0. \] Finally, it is easy to see that $Q(M_{\phi_{(\cdot r_0, \cdot r_0)}})=Q(M_{\phi})_{(\cdot r_0, \cdot r_0)}$. The case $p = \infty$ is similar. In order to obtain that the mappings $Q^{(p)} \co \mathfrak{M}^{p,\cb}_G \to \mathfrak{M}^{p,\cb}_G$ are compatible for different values of $1 \leq p \leq \infty$, it suffices to observe that we can choose the indices $j'$ in the converging subnet $Q_{j'}^{(p)}$ independent of $p$, in the same manner as done in the proof of Corollary \ref{cor-the-compatible-complementation} below by means of an argument relying on Tychonoff's theorem. The proof is complete. \end{proof} \begin{remark} \normalfont \label{Remark-Herz-Schur-amenability} We have a similar result for spaces of bounded Schur multipliers. \end{remark} \begin{remark} \normalfont \label{Remark-Herz-Schur-amenability-bis} If $G$ is compact, the proof is simpler. We do not need to use an approximation procedure. See \cite[Proposition 2.3 p.~365]{SpT02} for the case $p=\infty$. \end{remark} \subsection{Combining Steps 1-3: Complementation theorems} \label{Sec-Th-complementation} Let $G$ be a locally compact group. Note that the space $\cal{B}(\CB(\VN(G)),\CB(S^\infty_G,\cal{B}(\L^2(G))))$ is a dual space and admits the predual \begin{equation*} \label{equ-predual-bracket} \CB(\VN(G)) \hat \ot \big(S^\infty_G \widehat{\ot} S^1_G \big), \end{equation*} where $\hat{\ot}$ denotes the Banach space projective tensor product and where $\widehat{\ot}$ denotes the operator space projective tensor product. The duality bracket is given by \begin{equation} \label{Duality-bracket-gros} \big\langle P , T \ot (x \ot y) \big\rangle =\big\langle P(T) x, y \big\rangle_{\cal{B}(\L^2(G)),S^1_G}. \end{equation} Now, we prove one of our main results. \begin{thm} \label{thm-SAIN-tilde-kappa} Let $G$ be a second-countable unimodular inner amenable locally compact group. Then $G$ has property $(\kappa_\infty)$ with $\kappa_\infty(G) = 1$. More precisely, there exists a contractive projection $P_{G}^\infty \co \CB_{\w^*}(\VN(G)) \to \CB_{\w^*}(\VN(G))$ preserving the complete positivity onto the space $\mathfrak{M}^{\infty,\cb}(G)$ of completely bounded Fourier multipliers on the group von Neumann algebra $\VN(G)$. \end{thm} \begin{proof} Fix some finite subset $F$ of the group $G$. We can consider a sequence $(V_j^F)_j$ of subsets of $G$ satisfying the last point of Theorem \ref{thm-inner-amenable-Folner}. As in Lemma \ref{lem-SAIN-Herz-Schur}, if $T \co \VN(G) \to \VN(G)$ is a weak* continuous completely bounded map then we consider the elements $y_j^F \ov{\eqref{Def-ds-inner}}{=} c_j^F |\lambda(1_{V_j^F}) |^2$ in $\L^1(\VN(G)) \cap \VN(G)$ and the symbol $\phi_{j,T}^F \ov{\eqref{Def-ds-inner}}{=} \varphi_{1,y_j^F,T}$. Recall that $\norm{y_j^F}_{\L^1(\VN(G))} = 1$. \paragraph{Step 1} Consider the mapping $P_j^F \co \CB(\VN(G)) \to \CB(S^\infty_G,\cal{B}(\L^2(G)))$, $T \mapsto M_{\phi_{j,P_{\w^*}(T)}^F}$, where the projection $P_{\w^*} \co \CB(\VN(G)) \to \CB(\VN(G))$, preserving the complete positivity, is defined in \cite[Proposition 3.1 p.~24]{ArK23}. By Lemma \ref{Lemma-estimation-cb}, we have the estimate $$ \bnorm{M_{\phi_{j,T}^F}}_{\cb,S^\infty_G \to \cal{B}(\L^2(G))} =\bnorm{M_{\varphi_{1,y_j^F,T}}}_{\cb,S^\infty_G \to \cal{B}(\L^2(G))} \ov{\eqref{div-987}}{\leq} \norm{T}_{\cb,\VN(G) \to \VN(G)}. $$ Hence the maps $P_j^F$ belong to the unit ball of the space $\cal{B}(\CB(\VN(G)),\CB(S^\infty_G,\cal{B}(\L^2(G))))$. By Banach-Alaoglu's theorem, we can introduce a weak* accumulation point $P^{F} \co \CB(\VN(G)) \to \CB(S^\infty_G,\cal{B}(\L^2(G)))$. So, we have a net $(P_{j(k)}^F)$ which converges to $P^F$ in the weak* topology. Taking into account \eqref{Duality-bracket-gros}, this implies that the bounded net $(P_{j(k)}^F(T))$, that is $\big(M_{\phi_{j(k),P_{\w^*}(T)}^F}\big)$, converges in the point weak* topology of the space $\CB(S^\infty_G,\cal{B}(\L^2(G)))$ to $P^F(T)$. Since the weak* topology on the space $\CB(S^\infty_G,\cal{B}(\L^2(G)))$ coincides, essentially by the same proof as the one of \cite[Lemma 7.2 p.~85]{Pau02}, on bounded subsets with the point weak* topology, we conclude by the second part of Lemma \ref{lem-Schur-weak-star-closed} that the map $P^F(T) \co S^\infty_G \to \cal{B}(\L^2(G))$ is itself a Schur multiplier. Note that by the weak* lower semicontinuity of the norm \cite[Theorem 2.6.14 p.~227]{Meg98}, we have $$ \bnorm{P^F}_{\CB(\VN(G)) \to \CB(S^\infty_G,\cal{B}(\L^2(G)))} \leq \liminf_{k \to \infty} \bnorm{P_{j(k)}^F}_{\CB(\VN(G)) \to \CB(S^\infty_G,\cal{B}(\L^2(G)))} \leq 1. $$ We next show that the map $P^F$ preserves the complete positivity. Suppose that the map $T$ is completely positive. Using Lemma \ref{Lemma-estimation-cb}, we see that each map $M_{\phi_{j,P_{\w^*}(T)}^F}$ is completely positive. Since $P^F(T)$ is the limit in the point weak* topology of the $M_{\phi_{j(k),P_{\w^*}(T)}^F}$'s, the complete positivity of $M_{\phi_{j,P_{\w^*}(T)}^F}$ carries over to that of $P^F(T)$ by \cite[Lemma 2.10 p.~15]{ArK23}. Now, we consider a weak* accumulation point $P^{(1)} \co \CB(\VN(G)) \to \CB(S^\infty_G,\cal{B}(\L^2(G)))$ of the net $(P^F)_F$ and by the same reasoning as before, the map $P^{(1)}(T) \co S^\infty_G \to \cal{B}(\L^2(G))$ is again a completely bounded Schur multiplier and preserves the complete positivity. The map $P^{(1)}$ is contractive. \paragraph{Step 2} For any weak* continuous completely bounded map $T \co \VN(G) \to \VN(G)$, we claim that the map $P^{(1)}(T) \co S^\infty_G \to \cal{B}(\L^2(G))$ is in fact a Herz-Schur multiplier. It is easy to check that the weak* convergence of a subnet of $(M_{\phi_{j,P_{\w^*}(T)}^F})_j$ to $M_{\varphi^F}\ov{\mathrm{def}}{=} P^{F}(T)$ implies that $\varphi^F$ is a cluster point of $(\phi_{j,P_{\w^*}(T)}^F)_j$ for the weak* topology of the dual space $\L^\infty(G \times G)$. In the same manner, the symbol $\varphi$ of the Schur multiplier $P^{(1)}(T)$ is a cluster point of $(\varphi^F)_F$ for the weak* topology of $\L^\infty(G \times G)$. Thus according to Lemma \ref{lem-SAIN-Herz-Schur}, the function $\varphi$ is a Herz-Schur symbol. This is the step where we use the assumption of inner amenability on the group $G$. \paragraph{Step 3} By \cite{BoF84} and \cite[Theorem 5.3 p.~181]{Spr04}, we have an isometric map $I \co \mathfrak{M}^{\infty,\cb,\HS}_G \to \CB_{\w^*}(\VN(G))$, $M_{\varphi}^\HS \mapsto M_\varphi$ with range $\mathfrak{M}^{\infty,\cb}(G)$, preserving the complete positivity. Indeed, on the one hand, recall that by \cite[Proposition 6.11 p.~90]{ArK23}, a Fourier multiplier $M_\varphi \co \VN(G) \to \VN(G)$ is completely positive if and only if $\varphi$ is equal almost everywhere to a continuous positive definite function, i.e.~the kernel $(s,t) \mapsto \varphi(st^{-1})$ is of positive type \cite[p.~351]{BHV08} and that a bounded Fourier multiplier necessarily has a symbol that is equal almost everywhere to a continuous function (see the discussion \cite[p.~85]{ArK23}). On the other hand, the Herz-Schur multiplier $M_\varphi^\HS$ is completely positive if and only if $\varphi^\HS \co (s,t) \mapsto \varphi(st^{-1})$ is equal almost everywhere to a bounded and measurable function of positive type (see \cite[Proposition 3.3 p.~781 and Remark 4.8 p.~785]{Arh24}). We introduce the linear map $P \ov{\mathrm{def}}{=} I \circ P^{(1)} \co \CB_{\w^*}(\VN(G)) \to \CB_{\w^*}(\VN(G))$ with values in the space $\mathfrak{M}^{\infty,\cb}(G)$ of completely bounded Fourier multipliers. By composition, this map is contractive and preserves the complete positivity. Finally, if $T = M_\varphi \co \VN(G) \to \VN(G)$ is a completely bounded Fourier multiplier, then for any $j$ the symbol $\phi_{j,T}^F$ of the Schur multiplier $P_j^F(T) \co S^\infty_G \to \cal{B}(\L^2(G))$ is given by \begin{align*} \MoveEqLeft \phi_{j,T}^F(s,t) \ov{\eqref{Def-ds-inner}}{=} \varphi_{1,y_j^F}(s,t) \ov{\eqref{Def-symbol-varphi-1}}{=} \tau_G\big( y_j^F \lambda_{s^{-1}} T(\lambda_{st^{-1}})\lambda_t\big) = \varphi(st^{-1}) \tau_G\big( y_j^F \lambda_{s^{-1}} \lambda_{st^{-1}}\lambda_t\big) \\ & = \varphi(st^{-1}) \tau_G\big(y_j^F\big) = \varphi(st^{-1}). \end{align*} Thus $P^{(1)}(T) = P^F(T) = M_{\varphi}^\HS$ and $P(T) = I \circ P^{(1)}(T)= I(M_{\varphi}^\HS) = M_\varphi = T$. \end{proof} Now, we state the following general theorem of complementation of the space of completely bounded Fourier multipliers. The proof uses in a crucial way that completely bounded Herz-Schur multipliers acting on $S^p_G$ are in one-to-one (linear, norm and order) correspondence with completely bounded $\L^p$-Fourier multipliers, thanks to the amenability of the group $G$. \begin{thm} \label{thm-general-complementation} Let $G$ be a second-countable unimodular amenable locally compact group. Suppose that $1 < p < \infty$. Let $(f_j)$ and $(g_j)$ be nets of positive functions with compact support belonging to the space $\C_e(G)$ such that if $x_j \ov{\mathrm{def}}{=} \lambda(f_j)$, $y_j \ov{\mathrm{def}}{=} \lambda(g_j)$ we have \begin{itemize} \item for some positive constant $C$ we have $\norm{x_j}_{\L^p(\VN(G))}, \norm{y_j}_{\L^{p^*}(\VN(G))} \leq C$ for all $j$, \item $\tau_G(x_j y_j) = 1$ for all $j$, \item $\supp f_j \to \{e\}$ or $\supp g_j \to \{e\}$. \end{itemize} Then there exists a bounded projection $P_G^p \co \CB(\L^p(\VN(G))) \to \CB(\L^p(\VN(G)))$ onto the space $\mathfrak{M}^{p,\cb}(G)$ of completely bounded Fourier multipliers with the properties \begin{enumerate} \item $\norm{P_G^p} \leq C^2$, \item $P_G^p(T)$ is completely positive whenever $T$ is completely positive. \end{enumerate} \end{thm} \begin{proof} The proof consists of several steps. \paragraph{Step 1} Recall that the function $\phi_{j,T} \ov{\mathrm{def}}{=} \varphi_{x_j,y_j,T}$ belonging to the space $\L^\infty(G \times G)$ is defined in \eqref{def-symbol-phi-alpha}. Consider the linear map $P_j \co \CB(\L^p(\VN(G))) \to \CB(S^p_G)$, $T \mapsto M_{\phi_{j,T}}$. Using Lemma \ref{Lemma-estimation-cb}, we obtain the estimate \begin{align*} \MoveEqLeft \norm{P_j(T)}_{\cb,S^p_G \to S^p_G} \ov{\eqref{div-987}}{\leq} \norm{T}_{\cb, \L^p(\VN(G)) \to \L^p(\VN(G))} \norm{x_j}_{\L^p(\VN(G))} \norm{y_j}_{\L^{p^*}(\VN(G))} \\ &\leq C^2 \norm{T}_{\cb,\L^p(\VN(G)) \to \L^p(\VN(G))}, \end{align*} according to the assumptions of the theorem. Then $(P_j)$ is a bounded net in the space $\cal{B}(\CB(\L^p(\VN(G))),\CB(S^p_G))$, which is a dual space with predual \begin{equation*} \label{equ-predual-bracket-bis} \CB(\L^p(\VN(G))) \hat \ot \big(S^p_G \widehat{\ot} S^{p^*}_G \big), \end{equation*} where $\hat{\ot}$ denotes the Banach space projective tensor product and where $\widehat{\ot}$ denotes the operator space projective tensor product. The duality bracket is given by \begin{equation} \label{Derniere-eq} \big\langle P , T \ot (x \ot y) \big\rangle =\big\langle P(T) x, y \big\rangle_{S^p_G,S^{p^*}_G}. \end{equation} By the Banach-Alaoglu theorem, the net $(P_j)$ admits a convergent subnet $(P_{j(k)})$, which converges to some element $P^{(1)}$, i.e.~the net $(P_{j(k)})$ converges to $P^{(1)}$ for the weak* topology. With \eqref{Derniere-eq}, we see that for any $T \in \CB(\L^p(\VN(G)))$ this implies that $(P_{j(k)}(T))$ converges for the weak operator topology to $P^{(1)}(T)$. Observe that the weak* topology on the space $\CB(S^p_G)$ coincides on bounded subsets with the weak operator topology by \cite[Lemma 7.2 p.~85]{Pau02}. We conclude by Lemma \ref{lem-Schur-weak-star-closed} that $P^{(1)}(T)$ is a Schur multiplier. Lemma \ref{Lemma-estimation-cb} says that the Schur multiplier $P_j(T) \co S^p_G \to S^p_G$ is completely positive if $T$ is completely positive. By \cite[Lemma 2.10 p.~15]{ArK23}, the weak operator limit $P^{(1)}(T)$ is also completely positive if $T$ is so. Suppose that $T = M_\phi$ is a Fourier multiplier. By Proposition \ref{th-convergence}, the sequence $(\phi_{j,T})_j$ of elements in $\L^\infty(G \times G)$ converges for the weak* topology to the function $\phi^\HS \co (s,t) \mapsto \phi(st^{-1})$. By Lemma \ref{Lemma-symbol-weak}, we deduce that the net $(P_{j}(T))$ converges to the Schur multiplier $M_{\phi^\HS}$ in the weak operator topology. So the symbol of the Schur multiplier $P^{(1)}(T)$ is $\phi^\HS$. \paragraph{Step 2} We will use the contractive projection onto the space $\mathfrak{M}^{p,\cb,\HS}_{G}$ of completely bounded Herz-Schur multipliers of Proposition \ref{prop-referee-step-2}. Seeing this projection as a map $Q \co \mathfrak{M}^{p,\cb}_{G} \to \mathfrak{M}^{p,\cb,\HS}_{G}$, we let $P^{(2)}\ov{\mathrm{def}}{=} Q \circ P^{(1)} \co \CB(\L^p(\VN(G))) \to \mathfrak{M}^{p,\cb,\HS}_{G}$. Then by this proposition together with Step 1, the map $P^{(2)}$ satisfies $\bnorm{P^{(2)}} \leq C^2$ and preserves the complete positivity and $P^{(2)}(M_{\varphi})$ has again symbol $(s,t) \mapsto \varphi(st^{-1})$. \paragraph{Step 3} With the result of the paper \cite[Corollary 5.3 p.~7008]{CaS15} (see also \cite{NeR11}), we can consider the contractive\footnote{\thefootnote. The result \cite[Corollary 5.3 p.~7008]{CaS15} is stated for some class of symbols. However, the proof shows that the result is true for any completely bounded Herz-Schur multiplier on $S^p_G$.} map $I \co \mathfrak{M}^{p,\cb,\HS}_G \to \CB(\L^p(\VN(G)))$, $M_\varphi^\HS \mapsto M_\varphi$ onto the space $\mathfrak{M}^{p,\cb}(G)$ of completely bounded Fourier multipliers. As in the proof of Theorem \ref{thm-SAIN-tilde-kappa}, if the Herz-Schur multiplier $M_\varphi^\HS \co S^p_G \to S^p_G$ is completely positive, it is easy to see that the Fourier multiplier $I(M_\varphi^\HS)$ is completely positive. We finally put $P_G^p \ov{\mathrm{def}}{=} I \circ P^{(2)} \co \CB(\L^p(\VN(G))) \to \CB(\L^p(\VN(G)))$ and need to check the claimed properties. First, we have the estimate $\norm{P_G^p} \leq \norm{I} \norm{P^{(2)}} \leq C^2$. Second, if the operator $T$ is completely positive, then $P^{(2)}(T)$ is also completely positive, and consequently, $P_G^p(T) = I \circ P^{(2)}(T)$ inherits this property. Third, if $T = M_{\varphi}$ is itself a multiplier, then $P^{(2)}(T)$ is the Herz-Schur multiplier with symbol $(s,t) \mapsto \varphi(st^{-1})$. So the map $P_G^p(T)$ is the Fourier multiplier with symbol $\varphi$ again. \end{proof} Note that the proof of the previous result can be adapted to the cases $p=1$ and $p=\infty$ using Section \ref{Section-p=1-p-infty}. It is worth mentioning that the construction of the projection in the case $p=\infty$ differs from that in Theorem \ref{thm-SAIN-tilde-kappa}. Combining this result with Example \ref{Essai}, we obtain the following result. \begin{cor}\label{cor-the-full-referees-complementation} Let $G$ be a second-countable unimodular amenable locally compact group. Let $1 < p < \infty$ such that $\frac{p}{p^*}$ is rational. Then there exists a contractive projection \[ P^p_G \co \CB(\L^p(\VN(G))) \to \CB(\L^p(\VN(G))) \] on the space $\mathfrak{M}^{p,\cb}(G)$ of completely bounded Fourier multipliers such that the map $P^p_G(T)$ is completely positive whenever $T$ is completely positive. \end{cor} \begin{remark}\normalfont Going through the different steps of the proof, one sees that the projection in the Corollary \ref{cor-the-full-referees-complementation} has the property that $P^p_G(T)^* = P^{p^*}_G(T^*)$ for any completely bounded map $T \co \L^p(\VN(G)) \to \L^p(\VN(G))$ and any $1 \leq p < \infty$. Here, we emphasize that $P = P^p_G$ depends on $1 \leq p \leq \infty$. However, it is not clear that the maps $P^p_G(T)$ and $P^q_G(T)$ coincide for $1 \leq p,q \leq \infty$ and an operator $T$ acting on both $\L^p(\VN(G))$ and $\L^q(\VN(G))$. \end{remark} Using Theorem \ref{thm-general-complementation}, we obtain the following important property of compatibility of projections. \begin{cor} \label{cor-the-compatible-complementation} Any second-countable unimodular finite-dimensional amenable locally compact group $G$ has property $(\kappa)$. More precisely, for all $1 \leq p \leq \infty$ there exists a bounded projection \[ P^p_G \co \CB(\L^p(\VN(G))) \to \CB(\L^p(\VN(G))) \] onto the subspace $\mathfrak{M}^{p,\cb}(G)$ (resp $P^\infty_G \co \CB_{\w^*}(\VN(G)) \to \CB_{\w^*}(\VN(G))$ on $\mathfrak{M}^{\infty,\cb}(G)$) with the properties \begin{enumerate} \item $\norm{P_G^p} \leq C$, where the constant $C$ depends on $G$ but not on $p$. \item $P^p_G(T)$ is completely positive whenever the map $T$ is completely positive. \item If $T$ belongs to $\CB(\L^p(\VN(G)))$ and to $\CB(\L^q(\VN(G)))$ for two values $1 \leq p, q \leq \infty$, then the Fourier multipliers $P^p_G(T)$ and $P^q_G(T)$ are compatible mappings coinciding on $\L^p(\VN(G)) \cap \L^q(\VN(G))$, i.e.~have the same symbol. \end{enumerate} \end{cor} \begin{proof} Consider first the case $1 < p \leq \infty$. It suffices to pick the sequence $(f_j)$ in $\C_c(G)$ and the associated $x_j \ov{\eqref{def-fj}}{=} a_j \lambda(f_j)$, $y_j \ov{\eqref{def-fj}}{=} b_j \lambda(f_j)$ from Corollary \ref{cor-2-referees-proof-step-1-weak-star-convergence-bis}. Then apply this corollary together with Theorem \ref{thm-general-complementation} to deduce the two first points. Let us show the last point 3. Due to the specific choice of $x_j = a_j \lambda(f_j)$ and $y_j = b_j \lambda(f_j)$ with $a_j \cdot b_j$ being independent of $p$ (see Corollary \ref{cor-2-referees-proof-step-1-weak-star-convergence-bis}), we deduce that $P_j^p(T)K_\phi = P_j^q(T)K_\phi$ for $K_\phi \in S^p \cap S^q$, where $P_j^p$ and $P_j^q$ denote the mappings from Step 1 of the proof of Theorem \ref{thm-general-complementation}. Indeed, the symbol of the Schur multiplier $P_j^p(T)$ is $$ \phi_{j,T}(s,t) \ov{\eqref{def-symbol-phi-alpha}}{=} a_j b_j\tau_G \big(\lambda_t \lambda(f_j) \lambda_{s^{-1}} T(\lambda_s \lambda(f_j) \lambda_{t^{-1}}) \big), \quad s,t \in G. $$ Consider the product $$ X \ov{\mathrm{def}}{=} \prod_{1 \leq p < \infty} \cal{B}(\CB(\L^p(\VN(G))),\CB(S^p_G)) \times \cal{B}(\CB(\VN(G)),\CB(S^\infty_G,\cal{B}(\L^2(G)))), $$ which is a topological space when equipped with the product topology of the weak* topology. Then $P_j = (P_j^p)_{1 \leq p \leq \infty}$ lies in a compact subspace of $X$ for all $j$, since $\norm{P_j^p} \leq C$ with a constant independent of $p$ and Tychonoff's theorem for the product of compact spaces applies here. Thus, the net $(P_j)$ admits an accumulation point in $X$, that is to say that for the same subnet $j(k)$ for all $p$, we have $P^{(1),p} = \lim_{j(k)} P_{j(k)}^{p}$. We infer that $P^{(1),p}(T) K_\phi = P^{(1),q}(T) K_\phi$ for any $K_\phi \in S^p \cap S^q$, where $P^{(1),p}$ and $P^{(1),q}$ denote the mappings from Step 1 of the proof of Theorem \ref{thm-general-complementation}. Since the mapping $Q$ from Proposition \ref{prop-referee-step-2} is compatible for different values $p$ and $q$, also $P^{(2),p}$ and $P^{(2),q}$ from Step 2 of the proof of Theorem \ref{thm-general-complementation} are compatible. Finally, since the mappings $I=I_p$ from Step 3 of the proof of Theorem \ref{thm-general-complementation} are compatible for different values $p$ and $q$, the mappings $P^p_G(T)$ and $P^q_G(T)$ are compatible for any $T$. \end{proof} \begin{example} \normalfont \label{ex-totally-disconnected-contractive-complementation} Let $G$ be a second-countable unimodular finite-dimensional amenable locally compact group. Consequently, Corollary \ref{cor-the-compatible-complementation} applies. Suppose that $G$ is in addition totally disconnected. Then the projections $P_G^p$ in that result are in fact contractions. Indeed, an inspection of the proof of Corollary \ref{cor-the-compatible-complementation} (see also Step 1 of the proof of Theorem \ref{thm-general-complementation}) shows that $\norm{P^p_G} \leq \sup_j \norm{x_j}_p \norm{y_j}_{p^*}$. According to Corollary \ref{Cor-38}, the right-hand side is less than $1$. Therefore, Corollary \ref{cor-the-compatible-complementation} gives a variant of \cite[Theorem 6.38 p.~121]{ArK23}. Note that Corollary \ref{cor-the-compatible-complementation} needs that the group $G$ is amenable in the cases $p=\infty$ and $p=1$ in contrast to \cite[Theorem 6.38 p.~121]{ArK23}. \end{example} \begin{remark} \normalfont \label{rem-K2} We have resisted to the temptation to write a matricial version of Theorem \ref{thm-SAIN-tilde-kappa} in the spirit of \cite[Theorem 4.2 p.~62]{ArK23}. It is likely that the same method works. \end{remark} \begin{remark} \normalfont \label{non-unimodular} We make no attempt with non-unimodular locally compact groups. It is likely that the same strategy works in the case $p=\infty$. We leave this case as an exercise for the reader. \end{remark} \section{Final remarks} \label{Sec-applications} \subsection{A characterization of the amenability of unimodular locally compact groups} \label{Sec-charac-amen} We present a new characterization of amenability, in the same spirit as that of Theorem \ref{Th-Lau-Paterson}. Recall that property $(\kappa_\infty)$ is defined in Definition \ref{Defi-tilde-kappa}. \begin{thm} \label{thm-links-K-injective} Let $G$ be a second-countable unimodular locally compact group. Then the following are equivalent. \begin{enumerate} \item The von Neumann algebra $\VN(G)$ is injective and $G$ has property $(\kappa_\infty)$. \item The group $G$ is amenable. \end{enumerate} Moreover, the implication 1. $\Rightarrow$ 2. is true without the assumption <<second-countable unimodular>>. \end{thm} \begin{proof} 1. $\Rightarrow$ 2. Since the locally compact group $G$ has property $(\kappa_\infty)$, Proposition \ref{conj-1-1-correspondance} gives the equality $$ \frak{M}^{\infty,\dec}(G) =\B(G). $$ Since $\VN(G)$ is injective, by \cite[Theorem 1.6 p.~184]{Haa85} each completely bounded operator $T \co \VN(G) \to \VN(G)$ is decomposable with $\norm{T}_{\cb,\VN(G) \to \VN(G)} \ov{\eqref{dec=cb}}{=} \norm{T}_{\dec,\VN(G) \to \VN(G)}$. In particular, we have $\frak{M}^{\infty,\cb}(G)=\frak{M}^{\infty,\dec}(G)$ isometrically. We deduce that $$ \frak{M}^{\infty,\cb}(G) =\B(G). $$ Using a result stated in \cite[p.~54]{Pis01} (see also \cite[p.~190]{Spr04}), which says that this equality is equivalent to the amenability of $G$, we conclude that the group $G$ is amenable. 2. $\Rightarrow$ 1. If the locally compact group $G$ is amenable, then by Theorem \ref{Th-Lau-Paterson} the von Neumann algebra $\VN(G)$ is injective. Note that the group $G$ is inner amenable by Example \ref{Ex-inner-2}. Consequently, for the second property, it suffices to use Theorem \ref{thm-SAIN-tilde-kappa}. \end{proof} As a result, we can provide explicit examples of locally compact groups that do not satisfy property $(\kappa_\infty)$. \begin{cor} \label{Cor-gropes-without-K} Any non-amenable second-countable connected locally compact group $G$ does not have property $(\kappa_\infty)$. \end{cor} \begin{proof} By \cite[Corollary 7 p.~75]{Con76}, the von Neumann algebra $\VN(G)$ of a second-countable connected locally compact group is injective. Suppose that the group $G$ has property $(\kappa_\infty)$. By the implication 1. $\Rightarrow$ 2. of Theorem \ref{thm-links-K-injective}, we obtain that $G$ is amenable, i.e.~a contradiction. \end{proof} \begin{example} \normalfont \label{example-SL} This result applies for example to the connected locally compact group $\SL_2(\R)$, which is non-amenable by \cite[Example G.2.4 (i) p.~426]{BHV08} and unimodular by \cite[p.~4]{Lan75}, contradicting\footnote{\thefootnote. We would like to thank Adam Skalski for his confirmation of this problem in this small remark by email \textit{on his own initiative.} The results of the nice paper \cite{DFSW16} remain correct.} the observation \cite[Remark 7.6 p.~24]{DFSW16}, stated for unimodular locally compact quantum groups. \end{example} The following result generalizes Theorem \ref{Thm-conj-discrete-case} when second countability is not assumed. \begin{cor} \label{cor-inner-66} Let $G$ be a second-countable unimodular inner amenable locally compact group. Then the von Neumann algebra $\VN(G)$ is injective if and only if we have $\frak{M}^{\infty,\dec}(G)= \frak{M}^{\infty,\cb}(G)$. \end{cor} \begin{proof} The <<only if>> part is true by a result of Haagerup \cite[Corollary 2.8 p.~201]{Haa85}. Now, assume that $\frak{M}^{\infty,\dec}(G)= \frak{M}^{\infty,\cb}(G)$. By Proposition \ref{conj-1-1-correspondance} and Theorem \ref{thm-SAIN-tilde-kappa}, we have a bijection from the space $\frak{M}^{\infty,\cb}(G)$ onto the Fourier-Stieltjes algebra $\B(G)$. So the group $G$ is amenable by a result stated in \cite[p.~54]{Pis01}. We conclude that the von Neumann algebra $\VN(G)$ is injective by Theorem \ref{Th-Lau-Paterson}. \end{proof} \subsection{Bounded Herz–Schur multipliers and their relation to amenability} \label{Sec-Herz} Consider a locally compact group $G$. To know whether the amenability of $G$ is characterized by the complementation of the space $\mathfrak{M}^{\infty,\HS}_{G}$ of (completely) bounded Herz-Schur multipliers in the space $\mathfrak{M}^{\infty}_G$ of (completely) bounded Schur multipliers over $\cal{B}(\L^2(G))$ is a well-known open question, explicitly stated in \cite[p.~184]{Spr04}. Now, we present the first progress on this classical question. \begin{prop} Let $G$ be a second-countable unimodular locally compact group such that the von Neumann algebra $\VN(G)$ is injective. Suppose that there exists a bounded projection $Q \co \mathfrak{M}^{\infty}_G \to \mathfrak{M}^{\infty}_{G}$ onto the space $\mathfrak{M}^{\infty,\HS}_{G}$ of bounded Herz-Schur multipliers over the space $\cal{B}(\L^2(G))$, preserving the complete positivity. Then the group $G$ is amenable. Conversely, if $G$ is amenable, then such a $Q$ exists according to Proposition \ref{prop-referee-step-2}. \end{prop} \begin{proof} Since the von Neumann algebra $\VN(G)$ is injective, it suffices by Theorem \ref{thm-links-K-injective} to show that the group $G$ has property $(\kappa_\infty)$. Now, we follow the proof of Theorem \ref{thm-general-complementation}. For the first step, we use Lemma \ref{lemma-symbol-step-1-p=infty} and we define the linear map $P^{(1)}_G \co \CB_{\w^*}(\VN(G)) \to \CB(\cal{B}(\L^2(G)))$, $T \mapsto M_{\varphi_{1,y,T}}$ for some positive element $y$ in the space $\L^1(\VN(G)) \cap \VN(G)$ such that $\tau_G(y) = 1$. The second step of the proof is addressed using the existence of the projection $Q$. If $p=\infty$, the third step is done without amenability. So we obtain property $(\kappa_\infty)$. \end{proof} \begin{example} \normalfont By Example \ref{Example-almost}, the von Neumann algebra $\VN(G)$ of a second-countable almost connected unimodular locally compact group is injective. In particular, the previous result applies to any connected unimodular Lie group. Such a group is amenable if and only if there exists a bounded projection $Q \co \mathfrak{M}^{\infty}_G \to \mathfrak{M}^{\infty}_G$ onto the subspace $\mathfrak{M}^{\infty,\HS}_{G}$ that preserves complete positivity. \end{example} \paragraph{Declaration of interest} None. \paragraph{Competing interests} The authors declare that they have no competing interests. \paragraph{Acknowledgment} The authors are deeply grateful to the anonymous referee of \cite{ArK23}. His report suggested a promising research direction which, when combined with unpublished results, new ideas, and further work, led to the development of this paper. We extend our thanks to Claire Anantharaman-Delaroche and Martin Walter for brief but valuable discussions, as well as to Matthew Daws, Adam Skalski, and Ami Viselter for earlier discussions and their continued encouragement. We are also grateful to Marco Peloso for his insights regarding \eqref{Equivalence-measure-ball} in the non-unimodular case. This work was supported by the ANR-18-CE40-0021 grant (HASCON project). The second author also acknowledges partial support from the ANR-17-CE40-0021 grant (FRONT project), funded by the French National Research Agency. \small \begin{thebibliography}{79} \nocite{*} \bibitem[Arh60]{Arh60} A. Arhangel'skii. \newblock On the identity of the dimension $\ind G$ and $\dim G$ for locally bicompact groups. \newblock Dolk. Akad. Nauk SSSR 132 980--981 (Russian); translated as Soviet Math. Dokl. 1 1960 670--671. \bibitem[Arh24]{Arh24} C. Arhancet. \newblock Dilations of markovian semigroups of measurable Schur multipliers. \newblock Canad. J. Math. 76 (2024), no. 3, 774--797. \bibitem[ArK23]{ArK23} C. Arhancet and C. Kriegler. \newblock Projections, multipliers and decomposable maps on noncommutative $\L^p$-spaces. \newblock M\'emoires de la Soci\'et\'e Math\'ematique de France (N.S.) (2023), no. 177. \bibitem[ArM18]{ArM18} A. V. Arhangel'skii and J. van Mill. \newblock Some aspects of dimension theory for topological groups. \newblock Indag. Math. (N.S.) 29 (2018), no. 1, 202--225. \bibitem[BEM13]{BEM13} L. Bandara, A. F. M. ter Elst and A. McIntosh. \newblock Square roots of perturbed subelliptic operators on Lie groups. \newblock Studia Math. 216 (2013), no. 3, 193--217. \bibitem[BHV08]{BHV08} B. Bekka, P. de la Harpe, Pierre and A. Valette. \newblock Kazhdan's property (T). \newblock New Mathematical Monographs, 11. Cambridge University Press, Cambridge, 2008. \bibitem[BeH20]{BeH20} B. Bekka and P. de la Harpe. \newblock Unitary representations of groups, duals, and characters. \newblock Math. Surveys Monogr., 250. American Mathematical Society, Providence, RI, 2020. \bibitem[BeL76]{BeL76} J. Bergh, J. L\"ofstr\"om. \newblock Interpolation spaces. An Introduction. \newblock Springer-Verlag, Berlin, Heidelberg, New York, 1976. \bibitem[Bha07]{Bha07} R. Bhatia. \newblock Positive definite matrices. \newblock Princeton Series in Applied Mathematics. Princeton University Press, Princeton, NJ, 2007. \bibitem[BlM04]{BlM04} D. Blecher and C. Le Merdy. \newblock Operator algebras and their modules-an operator space approach. \newblock London Mathematical Society Monographs. New Series, 30. Oxford Science Publications. The Clarendon Press, Oxford University Press, Oxford, 2004. \bibitem[Bou04a]{Bou04a} N. Bourbaki. \newblock Integration. I. Chapters 1--6. Translated from the 1959, 1965 and 1967 French originals by Sterling K. Berberian. Elements of Mathematics. \newblock Springer-Verlag, Berlin, 2004. \bibitem[Bou04b]{Bou04b} N. Bourbaki. \newblock Integration. II. Chapters 7--9. Translated from the 1963 and 1969 French originals by Sterling K. Berberian. Elements of Mathematics (Berlin). \newblock Springer-Verlag, Berlin, 2004. \bibitem[Bou98]{Bou98} N. Bourbaki. \newblock General topology. Chapters 1--4. Translated from the French. Reprint of the 1989 English translation. Elements of Mathematics. \newblock (Berlin). Springer-Verlag, Berlin, 1998. \bibitem[BoF84]{BoF84} M. Bo\.{z}ejko and G. Fendler. \newblock Herz-Schur multipliers and completely bounded multipliers of the Fourier algebra of a locally compact group. \newblock Boll. Un. Mat. Ital. A (6) 3 (1984), no. 2, 297--302. \bibitem[BrO08]{BrO08} N. P. Brown and N. Ozawa. \newblock $C^*$-algebras and finite-dimensional approximations. \newblock Graduate Studies in Mathematics, 88. American Mathematical Society, Providence, RI, 2008. \bibitem[CaS15]{CaS15} M. Caspers and M. de la Salle. \newblock Schur and Fourier multipliers of an amenable group acting on non-commutative $L^p$-spaces. \newblock Trans. Amer. Math. Soc. 367 (2015), no. 10, 6997--7013. \bibitem[CPPR15]{CPPR15} M. Caspers, J. Parcet, M. Perrin and \'E. Ricard. \newblock Noncommutative de Leeuw theorems. \newblock Forum Math. Sigma 3 (2015), e21. \bibitem[CGPT23]{CGPT23} J. M. Conde-Alonso, A. M. Gonzalez-Perez, J. Parcet and E. Tablate. \newblock Schur multipliers in Schatten-von Neumann classes. \newblock Ann. of Math. (2) 198 (2023), no. 3, 1229--1260. \bibitem[Con76]{Con76} A. Connes. \newblock Classification of injective factors. Cases $\mathrm{II}_1$, $\mathrm{II}_\infty$, $\mathrm{III}_\lambda$, $\lambda \not=1$. \newblock Ann. of Math. (2) 104 (1976), no. 1, 73--115. \bibitem[CoH16]{CoH16} Y. Cornulier and P. de la Harpe. \newblock Metric Geometry of Locally Compact Groups. \newblock European Mathematical Society, 2016. \bibitem[CoH89]{CoH89} M. Cowling and U. Haagerup. \newblock Completely bounded multipliers of the Fourier algebra of a simple Lie group of real rank one. \newblock Invent. Math. 96 (1989), no. 3, 507--549. \bibitem[CrT17]{CrT17} J. Crann and Z. Tanko. \newblock On the operator homology of the Fourier algebra and its $cb$-multiplier completion. \newblock J. Funct. Anal. 273 (2017), no. 7, 2521--2545. \bibitem[DFSW16]{DFSW16} M. Daws, P. Fima, A. Skalski and S. White. \newblock The Haagerup property for locally compact quantum groups. \newblock J. Reine Angew. Math. 711 (2016), 189--229. \bibitem[DCH85]{DCH85} J. de Canni\`ere and U. Haagerup. \newblock Multipliers of the Fourier algebras of some simple Lie groups and their discrete subgroups. \newblock Amer. J. Math. 107 (1985), no. 2, 455--500. \bibitem[Dix77]{Dix77} J. Dixmier. \newblock $C\sp*$-algebras. Translated from the French by Francis Jellett. \newblock North-Holland Mathematical Library, Vol. 15. North-Holland Publishing Co., Amsterdam-New York-Oxford, 1977. \bibitem[Dix81]{Dix81} J. Dixmier. \newblock Von Neumann algebras. With a preface by E. C. Lance. Translated from the second French edition by F. Jellett \newblock North-Holland Mathematical Library, 27. North-Holland Publishing Co., Amsterdam-New York, 1981. \bibitem[Dou98]{Dou98} R. G. Douglas. \newblock Banach algebra techniques in operator theory. \newblock Grad. Texts in Math., 179. Springer-Verlag, New York, 1998. \bibitem[DtER03]{DtER03} N. Dungey, A.F.M. ter Elst and D. Robinson. \newblock Analysis on Lie groups with polynomial growth. \newblock Progress in Mathematics, 214. Birkh\"auser Boston, Inc., Boston, MA, 2003. \bibitem[EfR00]{EfR00} E. Effros and Z.-J. Ruan. \newblock Operator spaces. \newblock Oxford University Press, 2000. \bibitem[EiW17]{EiW17} M. Einsiedler and T. Ward. \newblock Functional analysis, spectral theory, and applications. \newblock Graduate Texts in Mathematics, 276. Springer, Cham, 2017. \bibitem[EFHN15]{EFHN15} T. Eisner, B. Farkas, M. Haase and R. Nagel. \newblock Operator theoretic aspects of ergodic theory. \newblock Graduate Texts in Mathematics, 272. Springer, Cham, 2015. \bibitem[Eng89]{Eng89} R. Engelking. \newblock General topology. \newblock Translated from the Polish by the author. Second edition. Sigma Series in Pure Mathematics, 6. Heldermann Verlag, Berlin, 1989. \bibitem[Ern64]{Ern64} J. Ernest. \newblock A new group algebra for locally compact groups. \newblock Amer. J. Math. 86 (1964), 467--492. \bibitem[Eym64]{Eym64} P. Eymard. \newblock L'alg\`ebre de Fourier d'un groupe localement compact (French). \newblock Bull. Soc. Math. France 92 (1964), 181--236. \bibitem[Fel60]{Fel60} J. M. G. Fell. \newblock The dual spaces of $C^*$-algebras. \newblock Trans. Amer. Math. Soc. 94 (1960), 365--403. \bibitem[Fol16]{Fol16} G. Folland. \newblock A course in abstract harmonic analysis. \newblock Second edition. Textbooks in Mathematics. CRC Press, Boca Raton, FL, 2016. \bibitem[Fre13]{Fre13} D. H. Fremlin. \newblock Measure theory. Vol. 4. Topological measure spaces. Part I. Second edition of the 2003 original. \newblock Torres Fremlin, Colchester, 2013. \bibitem[Glu60]{Glu60} V. M. Gluskov \newblock The structure of locally compact groups and Hilbert's fifth problem. \newblock Amer. Math. Soc. Transl. (2) 15 1960 55--93. \bibitem[GJP17]{GJP17} A. Gonz\'alez-P\'erez, M. Junge and J. Parcet. \newblock Smooth Fourier multipliers in group algebras via Sobolev dimension. \newblock Ann. Sci. \'Ec. Norm. Sup\'er. (4) 50 (2017), no. 4, 879--925. \bibitem[Haa78b]{Haa78b} U. Haagerup. \newblock On the dual weights for crossed products of von Neumann algebras. II. Application of operator-valued weights. \newblock Math. Scand. 43 (1978/79), no. 1, 119--140. \bibitem[Haa79]{Haa79} U. Haagerup. \newblock An Example of a nonnuclear $C^*$-Algebra, which has the metric approximation property. \newblock Invent. Math. 50 (1978/79), no. 3, 279--293. \bibitem[Haa85]{Haa85} U. Haagerup. \newblock Injectivity and decomposition of completely bounded maps. \newblock Operator algebras and their connections with topology and ergodic theory (Bu\c{s}teni, 1983), 170--222, Lecture Notes in Math., 1132, Springer, Berlin, 1985. \bibitem[Haa16]{Haa16} U. Haagerup. \newblock Group $\mathrm{C}^*$-algebras without the completely bounded approximation property. \newblock J. Lie Theory 26 (2016), no. 3, 861--887. \bibitem[HaL13]{HaL13} U. Haagerup and T. de Laat. \newblock Simple Lie groups without the approximation property. \newblock Duke Math. J. 162 (2013), no. 5, 925--964. \bibitem[Hah78]{Hah78} P. Hahn. \newblock The regular representations of measure groupoids. \newblock Trans. Amer. Math. Soc. 242 (1978), 35--72. \bibitem[HKST15]{HKST15} J. Heinonen, P. Koskela, N. Shanmugalingam and J. T. Tyson. \newblock Sobolev spaces on metric measure spaces. An approach based on upper gradients. \newblock New Mathematical Monographs, 27. Cambridge University Press, Cambridge, 2015. \bibitem[HeR70]{HeR70} E. Hewitt and K. A. Ross. \newblock Abstract harmonic analysis. Vol. II: Structure and analysis for compact groups. Analysis on locally compact Abelian groups. \newblock Die Grundlehren der mathematischen Wissenschaften, Band 152, Springer-Verlag, New York-Berlin, 1970. \bibitem[HeR79]{HeR79} E. Hewitt and K. A. Ross. \newblock Abstract harmonic analysis. Vol. I. Structure of topological groups, integration theory, group representations. Second edition. \newblock Grundlehren der Mathematischen Wissenschaften, 115. Springer-Verlag, Berlin-New York, 1979. \bibitem[HiN12]{HiN12} J. Hilgert and K.-H. Neeb. \newblock Structure and geometry of Lie groups. \newblock Springer Monogr. Math., New York, 2012. \bibitem[Hol75]{Hol75} R. B. Holmes. \newblock Geometric functional analysis and its applications. \newblock Graduate Texts in Mathematics, No. 24. Springer-Verlag, New York-Heidelberg, 1975. \bibitem[JMP14]{JMP14} M. Junge, T. Mei and J. Parcet. \newblock Smooth Fourier multipliers on group von Neumann algebras. \newblock Geom. Funct. Anal. 24 (2014), no. 6, 1913--1980. \bibitem[JMP18]{JMP18} M. Junge, T. Mei and J. Parcet. \newblock Noncommutative Riesz transforms-dimension free bounds and Fourier multipliers. \newblock J. Eur. Math. Soc. 20 (2018), no. 3, 529--595. \bibitem[JuR03]{JuR03} M. Junge and Z.-J. Ruan. \newblock Approximation properties for noncommutative $L_p$-spaces associated with discrete groups. \newblock Duke Math. J. 117 (2003), no. 2, 313--341. \bibitem[JuR04]{JuR04} M. Junge and Z.-J. Ruan. \newblock Decomposable maps on non-commutative $L_p$-spaces. \newblock Operator algebras, quantization, and noncommutative geometry, 355--381, Contemp. Math., 365, Amer. Math. Soc., Providence, RI, 2004. \bibitem[KaR97]{KaR97} R. V. Kadison and J. R. Ringrose. \newblock Fundamentals of the theory of operator algebras. Vol. I. Elementary theory. \newblock Reprint of the 1983 original. Graduate Studies in Mathematics, 15. American Mathematical Society, Providence, RI, 1997. \bibitem[KaL18]{KaL18} E. Kaniuth and A. T.-M. Lau. \newblock Fourier and Fourier-Stieltjes algebras on locally compact groups. \newblock Mathematical Surveys and Monographs, 231. American Mathematical Society, Providence, RI, 2018. \bibitem[Kon86]{Kon86} H. K\"onig. \newblock Eigenvalue distribution of compact operators. \newblock Oper. Theory Adv. Appl., 16. Birkh\"auser Verlag, Basel, 1986. \bibitem[Kun58]{Kun58} R. A. Kunze. \newblock $\L_p$ Fourier transforms on locally compact unimodular groups. \newblock Trans. Amer. Math. Soc. 89 (1958), 519--540. \bibitem[Kus05]{Kus05} J. Kustermans. \newblock Locally compact quantum groups. \newblock Quantum independent increment processes. I, 99--180, Lecture Notes in Math., 1865, Springer, Berlin, 2005. \bibitem[LaS11]{LaS11} V. Lafforgue and M. de la Salle. \newblock Noncommutative $L^p$-spaces without the completely bounded approximation property. \newblock Duke Math. J. 160 (2011), no. 1, 71--116. \bibitem[Lan75]{Lan75} S. Lang. \newblock $\mathrm{SL}_2(\R)$. \newblock Addison-Wesley Publishing Co., Reading, Mass.-London-Amsterdam, 1975. \bibitem[LaP91]{LaP91} A. T.-M. Lau and A. L. T. Paterson. \newblock Inner amenable locally compact groups. \newblock Trans. Amer. Math. Soc. 325 (1991), no. 1, 155--169. \bibitem[Li92]{Li92} B.-R. Li. \newblock Introduction to operator algebras. \newblock World Scientific Publishing, 1992. \bibitem[LoR87]{LoR87} V. Losert and H. Rindler. \newblock Conjugation-invariant means. \newblock Colloq. Math. 51 (1987), 221--225. \bibitem[Lun18]{Lun18} A. Lunardi. \newblock Interpolation theory. \newblock Appunti. Sc. Norm. Super. Pisa (N. S.), 16 [Lecture Notes. Scuola Normale Superiore di Pisa (New Series)]. Edizioni della Normale, Pisa, 2018. \bibitem[Meg98]{Meg98} R. E. Megginson. \newblock An introduction to Banach space theory. \newblock Graduate Texts in Mathematics, 183. Springer-Verlag, New York, 1998. \bibitem[MeR17]{MeR17} T. Mei and \'E. Ricard. \newblock Free Hilbert Transforms. \newblock Duke Math. J. 166, no. 11 (2017), 2153--2182. \bibitem[Mil71]{Mil71} P. Milnes. \newblock Identities of group algebras. \newblock Proc. Amer. Math. Soc. 29 (1971), 421--422. \bibitem[Muh90]{Muh90} P. S. Muhly. \newblock Coordinates in operator algebras. \newblock Preprint. \bibitem[NeR11]{NeR11} S. Neuwirth and \'E. Ricard. \newblock Transfer of Fourier multipliers into Schur multipliers and sumsets in a discrete group. \newblock Canad. J. Math. 63 (2011), no. 5, 1161--1187. \bibitem[Osb14]{Osb14} M. S. Osborne. \newblock Locally convex spaces. \newblock Graduate Texts in Mathematics, 269. Springer, Cham, 2014. \bibitem[Pal01]{Pal01} T. W. Palmer. \newblock Banach algebras and the general theory of $*$-algebras. Vol. 2. \newblock Encyclopedia of Mathematics and its Applications, 79. Cambridge University Press, Cambridge, 2001. \bibitem[PRS22]{PRS22} J. Parcet, \'E. Ricard, M. de la Salle. \newblock Fourier multipliers in $\SL_n(\mathbf{R})$. \newblock Duke Math. J. 171 (2022), no. 6, 1235--1297. \bibitem[PST25]{PST25} J. Parcet, M. de la Salle and E. Tablate. \newblock The local geometry of idempotent Schur multipliers. \newblock Forum Math. Pi 13 (2025), Paper No. e14. \bibitem[Pas60]{Pas60} B. Pasynkov. \newblock The coincidence of various definitions of dimensionality for locally bicompact groups. \newblock Dokl. Akad. Nauk SSSR 132 1035--1037 (Russian); translated as Soviet Math. Dokl. 1 1960 720--722. \bibitem[Pat88a]{Pat88a} A. L. T. Paterson. \newblock Amenability. \newblock Mathematical Surveys and Monographs, 29. American Mathematical Society, Providence, RI, 1988. \bibitem[Pat88b]{Pat88b} A. L. T. Paterson. \newblock The class of locally compact groups $G$ for which $C^*(G)$ is amenable. \newblock Harmonic analysis (Luxembourg, 1987), 226--237, Lecture Notes in Math., 1359, Springer, Berlin, 1988. \bibitem[Pat99]{Pat99} A. L. T. Paterson. \newblock Groupoids, inverse semigroups, and their operator algebras \newblock Progress in Mathematics, 170. Birkh\"auser Boston, Inc., Boston, MA, 1999. \bibitem[Pat03]{Pat03} A. L. T. Paterson. \newblock The Fourier-Stieltjes and Fourier algebras for locally compact groupoids. \newblock Trends in Banach spaces and operator theory (Memphis, TN, 2001), 223--237, Contemp. Math., 321, Amer. Math. Soc., Providence, RI, 2003. \bibitem[Pat04]{Pat04} A. L. T. Paterson. \newblock The Fourier algebra for locally compact groupoids. \newblock Canad. J. Math. 56 (2004), no. 6, 1259--1289. \bibitem[Pau02]{Pau02} V. Paulsen. \newblock Completely bounded maps and operator algebras. \newblock Cambridge Univ. Press, 2002. \bibitem[Ped79]{Ped79} G. K. Pedersen. \newblock $C^{\ast} $-algebras and their automorphism groups. \newblock London Mathematical Society Monographs, 14. Academic Press, Inc. [Harcourt Brace Jovanovich, Publishers], London-New York, 1979. \bibitem[Pie84]{Pie84} J.-P. Pier. \newblock Amenable locally compact groups. \newblock Pure and Applied Mathematics (New York). A Wiley-Interscience Publication. John Wiley \& Sons, Inc., New York, 1984. \bibitem[Pis95]{Pis95} G. Pisier. \newblock Regular operators between non-commutative $L_p$-spaces. \newblock Bull. Sci. Math. 119 (1995), no. 2, 95--118. \bibitem[Pis98]{Pis98} G. Pisier. \newblock Non-commutative vector valued $L_p$-spaces and completely $p$-summing maps. \newblock Ast\'erisque, 247, 1998. \bibitem[Pis01]{Pis01} G. Pisier. \newblock Similarity problems and completely bounded maps, volume 1618 of Lecture Notes in Mathematics. \newblock Springer-Verlag, expanded edition, 2001. \bibitem[Pis03]{Pis03} G. Pisier. \newblock Introduction to operator space theory. \newblock Cambridge University Press, Cambridge, 2003. \bibitem[Pis20]{Pis20} G. Pisier. \newblock Tensor Products of $C^*$-Algebras and Operator Spaces.Tensor products of $C^*$-algebras and operator spaces--the Connes-Kirchberg problem \newblock London Math. Soc. Stud. Texts, 96. Cambridge University Press, Cambridge, 2020. \bibitem[PiX03]{PiX03} G. Pisier and Q. Xu. \newblock Non-commutative $L^p$-spaces. \newblock 1459--1517 in Handbook of the Geometry of Banach Spaces, Vol. II, edited by W.B. Johnson and J. Lindenstrauss, Elsevier, 2003. \bibitem[Pon66]{Pon66} L. S. Pontryagin. \newblock Topological groups. Second Edition. \newblock Gordon and Breach, 1966. \bibitem[RaW97]{RaW97} A. Ramsay and M. E. Walter. \newblock Fourier-Stieltjes algebras of locally compact groupoids. \newblock J. Funct. Anal. 148 (1997), no. 2, 314--367. \bibitem[Ren80]{Ren80} J. Renault. \newblock A groupoid approach to $C^*$-algebras. \newblock Lecture Notes in Mathematics, 793. Springer, Berlin, 1980. \bibitem[Ren97]{Ren97} J. Renault. \newblock The Fourier algebra of a measured groupoid and its multipliers. \newblock J. Funct. Anal. 145 (1997), no. 2, 455--490. \bibitem[SiS08]{SiS08} A. Sinclair and R. Smith. \newblock Finite von Neumann algebras and masas. \newblock London Mathematical Society Lecture Note Series, 351. Cambridge University Press, Cambridge, 2008. \bibitem[SoT19]{SoT19} J. Soria and P. Tradacete. \newblock The least doubling constant of a metric measure space. \newblock Ann. Acad. Sci. Fenn. Math. 44 (2019), no. 2, 1015--1030. \bibitem[Spr04]{Spr04} N. Spronk. \newblock Measurable Schur multipliers and completely bounded multipliers of the Fourier algebras. \newblock Proc. London Math. Soc. (3) 89 (2004), no. 1, 161--192. \bibitem[SpT02]{SpT02} N. Spronk and L. Turowska. \newblock Spectral synthesis and operator synthesis for compact groups. \newblock J. London Math. Soc. (2) 66 (2002), no. 2, 361--376. \bibitem[Str81]{Str81} S. Stratila, \newblock Modular theory in operator algebras. \newblock Translated from the Romanian by the author. Editura Academiei Republicii Socialiste Rom\^ania, Bucharest, Abacus Press, Tunbridge Wells, 1981. \bibitem[StZ75]{StZ75} S. Stratila and L. Zsido. \newblock Lectures on von Neumann algebras. \newblock Translated from the Romanian by Silviu Teleman. Editura Academiei, Bucharest; Abacus Press, Tunbridge Wells, 1975. \bibitem[Str06]{Str06} M. Stroppel. \newblock Locally compact groups. \newblock EMS Textbooks in Mathematics. European Mathematical Society (EMS), Z{\"u}rich, 2006. \bibitem[Str74]{Str74} R. A. Struble. \newblock Metrics in locally compact groups. \newblock Compositio Math. 28 (1974), 217--222. \bibitem[Tak02]{Tak02} M. Takesaki. \newblock Theory of operator algebras. I. Reprint of the first (1979) edition. \newblock Encyclopaedia of Mathematical Sciences, 124. Operator Algebras and Non-commutative Geometry, 5. Springer-Verlag, Berlin, 2002. \bibitem[Tao14]{Tao14} T. Tao. \newblock Hilbert's fifth problem and related topics. \newblock Graduate Studies in Mathematics, 153. American Mathematical Society, Providence, RI, 2014. \bibitem[ToT10]{ToT10} I. G. Todorov and L. Turowska. \newblock Schur and operator multipliers. \newblock Banach algebras 2009, 385--410, Banach Center Publ., 91, Polish Acad. Sci. Inst. Math., Warsaw, 2010. \bibitem[Tod15]{Tod15} I. G. Todorov. \newblock Interactions between harmonic analysis and operator theory. \newblock Serdica Math. J. 41 (2015), no. 1, 13--34. \bibitem[Tri95]{Tri95} H. Triebel. \newblock Interpolation theory, function spaces, differential operators. Second edition. \newblock Johann Ambrosius Barth, Heidelberg, 1995. \bibitem[Vae01]{Vae01} S. Vaes. \newblock Locally compact quantum groups. With Johan Kustermans, Leonid Vainerman, Alfons Van Daele and Stanislaw Woronowicz. \newblock Lecture Notes School/Conference on Noncommutative Geometry and Quantum groups, Warsaw, 2001, Banach Centre Publication. \bibitem[VSCC92]{VSCC92} N. Varopoulos, L. Saloff-Coste and T. Coulhon. \newblock Analysis and geometry on groups. \newblock Cambridge Tracts in Mathematics, 100. Cambridge University Press, Cambridge, 1992. \bibitem[Wal70]{Wal70} M. E. Walter. \newblock Group duality and isomorphisms of Fourier and Fourier-Stieltjes algebras from a $W^*$-algebra point of view. \newblock Bull. Amer. Math. Soc. 76 (1970), 1321--1325. \bibitem[Wal74]{Wal74} M. E. Walter. \newblock A duality between locally compact groups and certain Banach algebras. \newblock J. Funct. Anal. 17 (1974), 131--160. \bibitem[Wot91]{Wot91} P. Wojtaszczyk. \newblock Banach spaces for analysts. \newblock Cambridge Studies in Advanced Mathematics, 25. Cambridge University Press, Cambridge, 1991. \end{thebibliography} {\footnotesize \vspace{0.1cm} \noindent C\'edric Arhancet\\ 6 rue Didier Daurat \\ 81000 ALBI \\ FRANCE\\ URL: \href{http://sites.google.com/site/cedricarhancet}{https://sites.google.com/site/cedricarhancet}\\ [email protected]\\ ORCID: 0000-0002-5179-6972 \\ \noindent Christoph Kriegler\\ Universit\'e Clermont Auvergne\\ CNRS\\ LMBP\\ F-63000 CLERMONT-FERRAND\\ FRANCE \\ URL: \href{https://lmbp.uca.fr/~kriegler/indexenglish.html}{https://lmbp.uca.fr/{\raise.17ex\hbox{$\scriptstyle\sim$}}\hspace{-0.1cm} kriegler/indexenglish.html}\\ [email protected] \\ ORCID: 0000-0001-8120-6251 } \end{document}
2205.13592v1
http://arxiv.org/abs/2205.13592v1
Generalized Riemann Functions, Their Weights, and the Complete Graph
\documentclass[10pt]{amsart} \setcounter{secnumdepth}{3} \usepackage{epsf,latexsym,amsmath,amssymb,amscd,datetime} \usepackage{amsmath,amsthm,amssymb,enumerate,eucal,url,calligra,mathrsfs} \usepackage{subcaption} \usepackage{graphicx} \usepackage{color} \newenvironment{jfnote}{ \bgroup \color{red} }{\egroup} \newenvironment{nfnote}{ \bgroup \color[rgb]{0.0,0.5,0.0} }{\egroup} \newenvironment{gennote}{ \bgroup \color{blue} }{\egroup} \newcommand{\ourv}[1]{{\bf #1}} \DeclareMathOperator{\Sky}{Sky} \DeclareMathOperator{\CoSky}{CoSky} \DeclareMathOperator{\Yext}{Yext} \DeclareMathOperator{\Rank}{Rank} \DeclareMathOperator{\MatRank}{MatRank} \newcommand{\red}{\color[rgb]{1.0,0.2,0.2}} \DeclareMathOperator{\SHom}{\mathscr{H}\text{\kern -3pt {\calligra\large om}}\,} \DeclareMathOperator{\SExt}{\mathscr{E}\text{\kern -2pt {\calligra\large xt}}\,\,} \newcommand{\cpxd}{\bullet} \newcommand{\opp}{{\rm opp}} \DeclareMathOperator{\Funct}{Funct} \DeclareMathOperator{\Derb}{{\cD}^{\rm b}} \newcommand{\catzero}{{\bf [0]}} \newcommand{\catone}{{\bf [1]}} \newcommand{\cattwo}{{\bf [2]}} \newcommand{\SupSky}{{\rm SupSky}} \newcommand{\Pre}{{\bf Pre}} \DeclareMathOperator{\Star}{Star} eld_{\{0\}}}} eld}} eld_{\{1\}}}} \DeclareMathOperator{\Ob}{Ob} \DeclareMathOperator{\Fl}{Fl} \DeclareMathOperator{\Lder}{{\bf L}} \DeclareMathOperator{\Rder}{{\bf R}} \IfFileExists{my_xrefs}{\input my_xrefs}{} \DeclareMathOperator{\VisitedSubgraph}{VisSub} \DeclareMathOperator{\Subgraph}{Subgraph} \newcommand{\Type}{{\rm Type}} \newcommand{\nontrav}{{\rm nontrav}} \newcommand{\trav}{{\rm trav}} \newcommand{\tangle}{{\rm tangle}} \newcommand{\subgr}{{\rm subgr}} \newcommand{\simple}{{\rm simple}} \newcommand{\var}{{\rm var}} x}{{\rm fix}} \newcommand{\orig}{{\rm orig}} \newcommand{\term}{{\rm term}} \newcommand{\orient}{{\rm or}} \newcommand{\walk}{{\rm walk}} \newcommand{\one}{{\rm one}} \newcommand{\ba}{\xi} \newcommand{\bba}{{\bec\xi}} \newcommand{\Ba}{\Xi} \DeclareMathOperator{\Subgr}{Subgr} \DeclareMathOperator{\Inc}{Inc} \newcommand{\naturals}{{\mathbb N}} \newcommand{\iodc}{{i.o.d.}} \newcommand{\Eor}{E^{\mathrm{or}}} \newcommand{\mec}[1]{{\bf #1}} \newcommand{\bec}[1]{{\boldsymbol #1}} \DeclareMathOperator{\Inj}{Inj} \newcommand{\injection}{\hookrightarrow} \newcommand{\surjection}{\twoheadrightarrow} \newcommand{\numb}[1]{\#\,#1} \DeclareMathOperator{\TraceIn}{TraceIn} \DeclareMathOperator{\trace}{Trace} \DeclareMathOperator{\Trace}{Trace} \DeclareMathOperator{\Perm}{Perm} \DeclareMathOperator{\PermSI}{PermSI} \DeclareMathOperator{\Cycle}{Cycle} \DeclareMathOperator{\CycleSI}{CycleSI} \newcommand{\intersect}{\cap} \newcommand{\union}{\cup} \newcommand{\Rexcept}{{R_{\rm except}}} \newcommand{\realeigs}{{\rm RSpec}} \DeclareMathOperator{\logO}{LogO} \DeclareMathOperator{\LogO}{LogO} \newcommand{\CNB}{{\rm ClosedNonBack}} \newcommand{\wt}{\widetilde} \newcommand{\anarboreal}{anarboreal} \newcommand{\Gr}{{\rm Gr}} \newcommand{\RowSpace}{{\rm RowSpace}} \newcommand{\GrassCo}{{\rm GrassCo}} \newcommand{\LocSub}{{\rm LocSub}} \newcommand{\weight}{{\rm Weight}} \newcommand{\double}{{\rm Double}} \newcommand{\excess}{{\rm excess}} \newcommand{\me}{{\bf m.e.}} \newcommand{\dme}{{\bf d.m.e.}} \newcommand{\lme}{{\bf l.m.e.}} \newcommand{\loce}{{\bf l.e.}} \newcommand{\gkd}{{\bf g.k.d.}} \newcommand{\gap}{{\rm gap}} \newcommand{\Schreier}{{\rm Sch}} \newcommand{\Sch}{{\rm Sch}} \newcommand{\realsToThe}[1]{\reals^{#1}} \newcommand{\vleft}[1]{V_{#1,{\rm L}}} \newcommand{\vright}[1]{V_{#1,{\rm R}}} \DeclareMathOperator{\TypeGraph}{TypeGraph} \newcommand{\dmax}{d_{\max{}}} \newcommand{\rhonew}{\rho^{\mathrm{new}}} \newcommand{\specnew}{\Spec^{\mathrm{new}}} \newcommand{\Specnew}{\Spec^{\mathrm{new}}} \newcommand{\Gnd}{\mathcal{G}_{n,d}} \newcommand{\probb}[1]{\Prob_{G\in \cC_n(B)}\left[ #1 \right]} \newcommand{\probw}[1]{\Prob_{G\in \cC_n(W_{d/2})}\left[ #1 \right]} \newcommand{\expectb}[1]{\EE_{G\in \cC_n(B)}\left[ #1 \right]} \newcommand{\expectw}[1]{\EE_{G\in \cC_n(W_{d/2})}\left[ #1 \right]} \newcommand{\rhoroot}[1]{\rho^{1/2}(H_{#1})} \usepackage{mathrsfs} \usepackage{amssymb} \usepackage{dsfont} \usepackage{verbatim} \usepackage{url} \newcommand{\PF}{{\rm PF}} \newcommand{\Edir}{E^{\mathrm{dir}}} \newcommand{\Fdir}{F^{\mathrm{dir}}} \newcommand{\Pdir}{P^{\mathrm{dir}}} \newcommand{\etafund}{{\eta_{\rm \,fund}}} \newcommand{\bscat}[1]{{\mathcal BS}_{#1}} \newcommand{\TF}{{\rm TF}} \newcommand{\MT}{{\rm ModHashTr}} \newcommand{\SNB}{{\rm StrNonBack}} \theoremstyle{plain} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{conjecture}[theorem]{Conjecture} \newtheorem{sublemma}[theorem]{Sublemma} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{exercise}[theorem]{Exercise} \newtheorem{problem}[theorem]{Problem} \newtheorem{xca}{Exercise}[section] \newenvironment{subxca} { \renewcommand{\theenumi}{\arabic{section}.\arabic{xca}(\alph{enumi})} \renewcommand{\labelenumi}{\theenumi} \begin{enumerate} } { \end{enumerate} \renewcommand{\theenumi}{\arabic{enumi}} \renewcommand{\labelenumi}{\theenumi} } \newenvironment{hardxca}{ \renewcommand{\thexca}{\arabic{section}.\arabic{xca}$^*$} \begin{xca} } { \end{xca}\renewcommand{\thexca}{\arabic{section}.\arabic{xca}} } \newcommand{\hardsubxca}{ \renewcommand{\theenumi}{\arabic{section}.\arabic{xca}(\alph{enumi})$^*$} \item \renewcommand{\theenumi}{\arabic{section}.\arabic{xca}(\alph{enumi})} } \newtheorem{predefinition}[theorem]{Pre-Definition} \newtheorem{setting}[theorem]{Setting} \newtheorem{summary}[theorem]{Summary} \newtheorem{definitions}[theorem]{Definitions} \newtheorem{example}[theorem]{Example} \newtheorem{observations}[theorem]{Observations} \newtheorem{examples}[theorem]{Examples} \newtheorem{notation}[theorem]{Notation} \newtheorem{notrev}[theorem]{Notation and Review} \newtheorem{convention}[theorem]{Convention} \newtheorem{conventions}[theorem]{Conventions} \newtheorem{open}[theorem]{Some Open Problems} \newtheorem{remark}[theorem]{Remark} \newtheorem{conjectures}[theorem]{Conjectures} \newtheorem{question}[theorem]{Question} \newtheorem{remarks}[theorem]{Remarks} \newtheorem{conclusions}[theorem]{Conclusions} \newtheorem{outline}[theorem]{Outline} \newtheorem{background}[theorem]{Background} \newtheorem{hypotheses}[theorem]{Hypotheses} \newtheorem{review}[theorem]{Review} \newcommand{\threeAsterisks}{\medskip\centerline{*\hbox{\hskip1truein}*{\hskip1truein}*}\smallskip} \newcommand{\lara}[1]{\left\langle #1 \right\rangle} \newcommand{\mt}{\widetilde} \newcommand{\free}{{\rm Free}\,} \newcommand{\kone}{L} \newcommand{\twist}{{\rm twist}} \newcommand{\zeroed}[2]{{{#1}[{#2}]}} \newcommand{\subspace}{\subset} \newcommand{\subgraph}{\subset} \newcommand{\markcom}[1]{{\complex}_{#1}} \newcommand{\mydot}{{\bullet}} \newcommand{\comp}{{\rm comp}} \newcommand{\kernel}{{\rm ker}} \newcommand{\image}{{\rm im}} \newcommand{\vs}[1]{\myfield\left( {#1} \right)} \newcommand{\LR}{{{\rm L}\to{\rm R}}} \newcommand{\RL}{{{\rm R}\to{\rm L}}} \newcommand{\Cat}[1]{{{\rm Cat}(#1)}} \newcommand{\Top}[1]{{{\rm Top}(#1)}} \newcommand{\objects}[1]{{{\rm Ob}\left( {#1} \right)}} \newcommand{\morphisms}[1]{{{\rm Fl}\left( {#1} \right)}} \newcommand{\fleches}[2]{{{\rm Fl}^{#1}\left( {#2} \right)}} \newcommand{\underfleches}[2]{{{\underline {\rm Fl}}^{#1}\left( {#2} \right)}} \newcommand{\twoleftarrows}{\; \mbox{\vbox{\hbox{$\leftarrow$}\vskip-.35truecm\hbox{$\leftarrow$} \vskip-.05truecm}}\;} \newcommand{\threeleftarrows}{\; \mbox{\vbox{\hbox{$\leftarrow$}\vskip-.35truecm\hbox{$\leftarrow$} \vskip-.35truecm\hbox{$\leftarrow$}\vskip-.15truecm}}\;} \newcommand{\tworightarrows}{\; \mbox{\vbox{\hbox{$\rightarrow$}\vskip-.35truecm\hbox{$\rightarrow$} \vskip-.05truecm}}\;} \newcommand{\threerightarrows}{\; \mbox{\vbox{\hbox{$\rightarrow$}\vskip-.35truecm\hbox{$\rightarrow$} \vskip-.35truecm\hbox{$\rightarrow$}\vskip-.15truecm}}\;} \newcommand{\kbig}{{K_{\rm big}}} \newcommand{\ksmall}{{K_{\rm small}}} \newcommand{\zbig}{{Z_{\rm big}}} \newcommand{\zsmall}{{Z_{\rm small}}} \newcommand{\isom}{\simeq} \newcommand{\scl}[1]{{\rm sc}\left({#1}\right)} \newcommand{\tcl}[1]{{\rm tc}\left({#1}\right)} \newcommand{\shriek}{{ATTENTION!!!! ATTENTION!!!! ATTENTION!!!! }} \newcommand{\ignore}[1]{} \newcommand{\espace}{{\em espace \'etal\'e}} \newcommand{\espaces}{{\em espaces \'etal\'es}} \newcommand{\floor}[1]{\left\lfloor #1\right\rfloor} \newcommand{\Hom}{{\rm Hom}} \newcommand{\simexp}[2]{{\rm SHom}\left({#1},{#2}\right)} \newcommand{\rder}{{\underline{\underline{ R}}}} \newcommand{\lder}{{\underline{\underline{ L}}}} \newcommand{\cat}[1]{{\Delta_{#1}}} \newcommand{\dercat}[1]{{\cdb(\myfield({#1}))}} \newcommand{\cohcomp}{{\rm cc}} \renewcommand{\complement}[1]{#1^{\rm c}} \newcommand{\lin}{{\rm Lin}} \newcommand{\fdvs}{{\rm FDVS}} \newcommand{\affine}{{\mathbb A}} eld}{{\mathbb F}} \newcommand{\mono}{{\mathbb M}} \newcommand{\bool}{{\mathbb B}} \newcommand{\reals}{{\mathbb R}} \newcommand{\rreals}{{\mathbb R}} \newcommand{\projective}{{\mathbb P}} \newcommand{\integers}{{\mathbb Z}} \newcommand{\rationals}{{\mathbb Q}} \newcommand{\complex}{{\mathbb C}} \newcommand{\csphere}{\complex\cup\{\infty\}} \newcommand{\zero}{{\vec 0}} \newcommand{\E}[1]{\mbox{E}\left[#1\right] } \newcommand{\prob}[2]{{\PP}_{#1}{\left[\; #2\; \right]}} \newcommand\BB{\mathbb{B}} \newcommand\CC{\mathbb{C}} \newcommand\DD{\mathbb{D}} \newcommand\EE{\mathbb{E}} \newcommand\FF{\mathbb{F}} \newcommand\GG{\mathbb{G}} \newcommand\HH{\mathbb{H}} \newcommand\II{\mathbb{I}} \newcommand\JJ{\mathbb{J}} \newcommand\KK{\mathbb{K}} \newcommand\MM{\mathbb{M}} \newcommand\NN{\mathbb{N}} \newcommand\OO{\mathbb{O}} \newcommand\PP{\mathbb{P}} \newcommand\QQ{\mathbb{Q}} \newcommand\RR{\mathbb{R}} \newcommand\TT{\mathbb{T}} \newcommand\UU{\mathbb{U}} \newcommand\VV{\mathbb{V}} \newcommand\WW{\mathbb{W}} \newcommand\XX{\mathbb{X}} \newcommand\YY{\mathbb{Y}} \newcommand\ZZ{\mathbb{Z}} \newcommand\bA{\mathbf{A}} \newcommand\bB{\mathbf{B}} \newcommand\bC{\mathbf{C}} \newcommand\bD{\mathbf{D}} \newcommand\bE{\mathbf{E}} \newcommand\bF{\mathbf{F}} \newcommand\bG{\mathbf{G}} \newcommand\bH{\mathbf{H}} \newcommand\bI{\mathbf{I}} \newcommand\bJ{\mathbf{J}} \newcommand\bK{\mathbf{K}} \newcommand\bL{\mathbf{L}} \newcommand\bM{\mathbf{M}} \newcommand\bN{\mathbf{N}} \newcommand\bO{\mathbf{O}} \newcommand\bP{\mathbf{P}} \newcommand\bQ{\mathbf{Q}} \newcommand\bR{\mathbf{R}} \newcommand\bS{\mathbf{S}} \newcommand\bT{\mathbf{T}} \newcommand\bU{\mathbf{U}} \newcommand\bV{\mathbf{V}} \newcommand\bW{\mathbf{W}} \newcommand\bX{\mathbf{X}} \newcommand\bY{\mathbf{Y}} \newcommand\bZ{\mathbf{Z}} \DeclareMathAlphabet{\mathcal}{OMS}{cmsy}{m}{n} \newcommand\cA{\mathcal{A}} \newcommand\cB{\mathcal{B}} \newcommand\cC{\mathcal{C}} \newcommand\cD{\mathcal{D}} \newcommand\cE{\mathcal{E}} \newcommand\cF{\mathcal{F}} \newcommand\cG{\mathcal{G}} \newcommand\cH{\mathcal{H}} \newcommand\cI{\mathcal{I}} \newcommand\cJ{\mathcal{J}} \newcommand\cK{\mathcal{K}} \newcommand\cL{\mathcal{L}} \newcommand\cM{\mathcal{M}} \newcommand\cN{\mathcal{N}} \newcommand\cO{\mathcal{O}} \newcommand\cP{\mathcal{P}} \newcommand\cQ{\mathcal{Q}} \newcommand\cR{\mathcal{R}} \newcommand\cS{\mathcal{S}} \newcommand\cT{\mathcal{T}} \newcommand\cU{\mathcal{U}} \newcommand\cV{\mathcal{V}} \newcommand\cW{\mathcal{W}} \newcommand\cX{\mathcal{X}} \newcommand\cY{\mathcal{Y}} \newcommand\cZ{\mathcal{Z}} \newcommand\sA{\mathscr{A}} \newcommand\sB{\mathscr{B}} \newcommand\sC{\mathscr{C}} \newcommand\sD{\mathscr{D}} \newcommand\sE{\mathscr{E}} \newcommand\sF{\mathscr{F}} \newcommand\sG{\mathscr{G}} \newcommand\sH{\mathscr{H}} \newcommand\sI{\mathscr{I}} \newcommand\sJ{\mathscr{J}} \newcommand\sK{\mathscr{K}} \newcommand\sL{\mathscr{L}} \newcommand\sM{\mathscr{M}} \newcommand\sN{\mathscr{N}} \newcommand\sO{\mathscr{O}} \newcommand\sP{\mathscr{P}} \newcommand\sQ{\mathscr{Q}} \newcommand\sR{\mathscr{R}} \newcommand\sS{\mathscr{S}} \newcommand\sT{\mathscr{T}} \newcommand\sU{\mathscr{U}} \newcommand\sV{\mathscr{V}} \newcommand\sW{\mathscr{W}} \newcommand\sX{\mathscr{X}} \newcommand\sY{\mathscr{Y}} \newcommand\sZ{\mathscr{Z}} \newcommand\frakS{\mathfrak{S}} \newcommand\frakT{\mathfrak{T}} \newcommand\frakM{\mathfrak{M}} \newcommand\fraka{\mathfrak{a}} \newcommand\frakb{\mathfrak{b}} \newcommand\frakc{\mathfrak{c}} \newcommand\frakd{\mathfrak{d}} \newcommand\frake{\mathfrak{e}} \newcommand\frakf{\mathfrak{f}} \newcommand\frakg{\mathfrak{g}} \newcommand\frakh{\mathfrak{h}} \newcommand\fraki{\mathfrak{i}} \newcommand\frakj{\mathfrak{j}} \newcommand\frakk{\mathfrak{k}} \newcommand\frakl{\mathfrak{l}} \newcommand\frakm{\mathfrak{m}} \newcommand\frakn{\mathfrak{n}} \newcommand\frako{\mathfrak{o}} \newcommand\frakp{\mathfrak{p}} \newcommand\frakq{\mathfrak{q}} \newcommand\frakr{\mathfrak{r}} \newcommand\fraks{\mathfrak{s}} \newcommand\frakt{\mathfrak{t}} \newcommand\fraku{\mathfrak{u}} \newcommand\frakv{\mathfrak{v}} \newcommand\frakw{\mathfrak{w}} \newcommand\frakx{\mathfrak{x}} \newcommand\fraky{\mathfrak{y}} \newcommand\frakz{\mathfrak{z}} \newcommand{\expect}[2]{{\EE}_{#1} \left[ {#2} \right] } \newcommand{\Expect}[2]{{\EE}_{#1} \left[ {#2} \right] } \newcommand{\Exp}{\mathbb{E}} \DeclareMathOperator{\Walk}{Walk} \DeclareMathOperator{\Walks}{Walks} \DeclareMathOperator{\WalkSum}{WalkSum} \DeclareMathOperator{\error}{error} \DeclareMathOperator{\CertTr}{CertTr} \DeclareMathOperator{\Prob}{Prob} \DeclareMathOperator{\Cone}{Cone} \DeclareMathOperator{\VLG}{VLG} \DeclareMathOperator{\Minimal}{Minimal} \DeclareMathOperator{\Tangle}{Tangle} \DeclareMathOperator{\Types}{Types} \DeclareMathOperator{\Line}{Line} \DeclareMathOperator{\Graph}{Graph} \DeclareMathOperator{\support}{support} \DeclareMathOperator{\Occurs}{Occurs} \DeclareMathOperator{\Irred}{Irred} \DeclareMathOperator{\Ball}{Ball} \DeclareMathOperator{\SNBC}{SNBC} \DeclareMathOperator{\SNBCL}{SNBC-Len} \DeclareMathOperator{\SNBCM}{SNBC-Mult} \DeclareMathOperator{\NBM}{NB-Mult} \DeclareMathOperator{\CertSNBC}{CertSNBC} \DeclareMathOperator{\Term}{Term} \def\N{\mathbb {N}} \def\Z{\mathbb {Z}} \def\Q{\mathbb {Q}} \def\R{\mathbb {R}} \def\C{\mathbb {C}} \newcommand\restrict{\!\upharpoonright} \def\actson{\curvearrowright} \def\comp{\urcorner} \def\embed{\hookrightarrow} \def\from{\colon} \def\onto{\twoheadrightarrow} \def\ideal{\lhd} \def\isom{\simeq} \def\lap{\triangle} \def\Lap{\lap} \def\nisom{{\not\simeq}} \def\subgp{<} \def\surj{\onto} \def\tensor{\otimes} \def\eqdef{\overset{\text{def}}{=}} \def\bs{\backslash} \def\supp{\qopname\relax o{supp}} \DeclareMathOperator{\Ad}{Ad} \DeclareMathOperator{\ad}{ad} \DeclareMathOperator{\aff}{aff} \DeclareMathOperator{\Alt}{Alt} \def\Ann{\qopname\relax o{Ann}} \DeclareMathOperator{\Area}{Area} \DeclareMathOperator{\Aut}{Aut} \DeclareMathOperator{\Br}{Br} \DeclareMathOperator{\Cay}{Cay} \DeclareMathOperator{\Chr}{chr} \DeclareMathOperator{\chr}{char} \DeclareMathOperator{\codim}{codim} \DeclareMathOperator{\Coker}{Coker} \DeclareMathOperator{\coker}{coker} \DeclareMathOperator{\Comm}{Comm} \DeclareMathOperator{\cond}{cond} \DeclareMathOperator{\conv}{conv} \DeclareMathOperator{\Cov}{Cov} \DeclareMathOperator{\diag}{diag} \DeclareMathOperator{\diam}{diam} \DeclareMathOperator{\dist}{dist} \DeclareMathOperator{\Der}{Der} \DeclareMathOperator{\Diff}{Diff} \DeclareMathOperator{\Discr}{Discr} \DeclareMathOperator{\discr}{discr} \DeclareMathOperator{\Div}{Div} \DeclareMathOperator{\Dom}{Dom} \DeclareMathOperator{\divi}{div} \DeclareMathOperator{\End}{End} \DeclareMathOperator{\Ext}{Ext} \DeclareMathOperator{\esssup}{\mathrm{ess}\sup} \def\Fix{\qopname\relax o{Fix}} \def\Frob{\qopname\relax o{Frob}} \DeclareMathOperator{\Gal}{Gal} \def\Hom{\qopname\relax o{Hom}} \DeclareMathOperator{\id}{id} \DeclareMathOperator{\Id}{Id} \def\Image{\qopname\relax o{Im}} \def\ind{\qopname\relax o{ind}} \def\Ind{\qopname\relax o{Ind}} \DeclareMathOperator{\Inf}{Inf} \DeclareMathOperator{\Inn}{Inn} \DeclareMathOperator{\Isom}{Isom} \def\Ker{\qopname\relax o{Ker}} \DeclareMathOperator{\lcm}{lcm} \def\Li{\qopname\relax o{Li}} \DeclareMathOperator{\Lk}{Lk} \DeclareMathOperator{\Lie}{Lie} \DeclareMathOperator{\Lip}{Lip} \DeclareMathOperator{\Mor}{Mor} \def\Norm{\qopname\relax o{N}} \DeclareMathOperator{\Op}{Op} \newcommand{\Oph}{\Op_\hbar} \newcommand{\OpW}{\Oph^\textrm{W}} \DeclareMathOperator{\ord}{ord} \DeclareMathOperator{\Out}{Out} \DeclareMathOperator{\Pic}{Pic} \DeclareMathOperator{\pr}{pr} \DeclareMathOperator{\Rad}{Rad} \DeclareMathOperator{\Ran}{Ran} \DeclareMathOperator{\rank}{rank} \DeclareMathOperator{\Rep}{Rep} \def\res{\qopname\relax o{res}} \def\Res{\qopname\relax o{Res}} \DeclareMathOperator{\rk}{rk} \def\sgn{\qopname\relax o{sgn}} \def\sinc{\qopname\relax o{sinc}} \DeclareMathOperator{\Span}{Span} \DeclareMathOperator{\Spec}{Spec} \DeclareMathOperator{\Stab}{Stab} \DeclareMathOperator{\sym}{sym} \DeclareMathOperator{\Sym}{Sym} \DeclareMathOperator{\tr}{tr} \DeclareMathOperator{\Tr}{Tr} \DeclareMathOperator{\typ}{typ} \DeclareMathOperator{\Var}{Var} \DeclareMathOperator{\Vol}{Vol} \DeclareMathOperator{\vol}{vol} \DeclareMathOperator{\Zcl}{Zcl} \def\implies{\Rightarrow} \newcommand\sups[1]{\raisebox{+1ex}{\small #1}} \newcommand\subs[1]{\raisebox{-1ex}{\small #1}} \newcommand\leftexp[2]{{\vphantom{#2}}^{#1}{#2}} \newcommand\abs[1]{\left| {#1} \right|} \newcommand\norm[1]{\left\Vert {#1} \right\Vert} \newcommand\nlip[1]{{\norm{#1}}_\mathrm{Lip}} \newcommand\nHS[1]{{\norm{#1}}_\mathrm{HS}} \newcommand{\wklim}[1]{\xrightarrow[#1]{\textrm{wk-*}}} \newcommand\bra[1]{\left\langle {#1} \right|} \newcommand\ket[1]{\left| {#1} \right\rangle} \def\hf{\frac{1}{2}} \def\cprime{\ensuremath{'}} \DeclareRobustCommand \rddots{\mathinner{\mkern1mu\raise\p@ \vbox{\kern7\p@\hbox{.}}\mkern2mu \raise4\p@\hbox{.}\mkern2mu\raise7\p@\hbox{.}\mkern1mu}} \def\Ps{\mathcal{P}} \newcommand{\Cc}{C_{\mathrm{c}}} \newcommand{\Cb}{C_{\mathrm{b}}} \newcommand{\Ci}{C^{\infty}} \newcommand{\Cic}{\Cc^{\infty}} \newcommand\xhookrightarrow[2][]{\ext@arrow 0062{\hookrightarrowfill@}{#1}{#2}} \def\hookrightarrowfill@{\arrowfill@\lhook\relbar\rightarrow} \newcommand\SL{\mathrm{SL}} \newcommand\GL{\mathrm{GL}} \newcommand\PSL{\mathrm{PSL}} \newcommand\PGL{\mathrm{PGL}} \newcommand\SU{\mathrm{SU}} \newcommand\SO{\mathrm{SO}} \newcommand\Sp{\mathrm{Sp}} \newcommand\GSp{\mathrm{GSp}} \newcommand\gO{\mathrm{O}} \newcommand\gU{\mathrm{U}} \newcommand\GU{\mathrm{GU}} \newcommand\PGO{\mathrm{PGO}} \newcommand\GSpin{\mathrm{GSpin}} \newcommand\gE{\mathrm{E}} \newcommand\Aff{\mathrm{Aff}} \newcommand\lsl{\mathfrak{sl}} \newcommand\gl{\mathfrak{gl}} \newcommand\pgl{\mathfrak{pgl}} \newcommand\su{\mathfrak{su}} \newcommand\so{\mathfrak{so}} \newcommand\lsp{\mathfrak{sp}} \newcommand\gsp{\mathfrak{gsp}} \newcommand\lo{\mathfrak{o}} \newcommand\lu{\mathfrak{u}} \newcommand\Ga{\mathbb{G}_\textrm{a}} \newcommand\Gm{\mathbb{G}_\textrm{m}} \newcommand\liea{\mathfrak{a}} \newcommand\lieb{\mathfrak{b}} \newcommand\liec{\mathfrak{c}} \newcommand\lied{\mathfrak{d}} \newcommand\liee{\mathfrak{e}} \newcommand\lief{\mathfrak{f}} \newcommand\lieg{\mathfrak{g}} \newcommand\lieh{\mathfrak{h}} \newcommand\liei{\mathfrak{i}} \newcommand\liej{\mathfrak{j}} \newcommand\liek{\mathfrak{k}} \newcommand\liel{\mathfrak{l}} \newcommand\liem{\mathfrak{m}} \newcommand\lien{\mathfrak{n}} \newcommand\lieo{\mathfrak{o}} \newcommand\liep{\mathfrak{p}} \newcommand\lieq{\mathfrak{q}} \newcommand\lier{\mathfrak{r}} \newcommand\lies{\mathfrak{s}} \newcommand\liet{\mathfrak{t}} \newcommand\lieu{\mathfrak{u}} \newcommand\liev{\mathfrak{v}} \newcommand\liew{\mathfrak{w}} \newcommand\liex{\mathfrak{x}} \newcommand\liey{\mathfrak{y}} \newcommand\liez{\mathfrak{z}} \newcommand\lienb{\bar\lien} \newcommand\ars{\liea_{\R}^{*}} \newcommand\acs{\liea_{\C}^{*}} \newcommand\diff{\mathop{}\!\mathrm{d}} \newcommand\da{\diff a} \newcommand\db{\diff b} \newcommand\dc{\diff c} \newcommand\dd{\diff d} \newcommand\de{\diff e} \newcommand\df{\diff f} \newcommand\dg{\diff g} \renewcommand\dh{\diff h} \newcommand\di{\diff i} \newcommand\dk{\diff k} \newcommand\dl{\diff l} \newcommand\dm{\diff l} \newcommand\dn{\diff n} \newcommand\dq{\diff q} \newcommand\dr{\diff r} \newcommand\ds{\diff s} \newcommand\dt{\diff t} \newcommand\du{\diff u} \newcommand\dv{\diff v} \newcommand\dw{\diff w} \newcommand\dx{\diff x} \newcommand\dy{\diff y} \newcommand\dz{\diff z} \newcommand\dmu{\diff\mu} \newcommand\dnu{\diff\nu} \newcommand\dth{\diff\theta} \newcommand\dvol{\diff\vol} \newcommand\dVol{\diff\Vol} \newcommand\Adele{\mathbb{A}} \newcommand\AQ{\Adele} \newcommand\AF{\Adele_F} \newcommand\Af{\Adele_\mathrm{f}} \newcommand\GQ{\GG(\Q)} \newcommand\GF{\GG(F)} \newcommand\GA{\GG(\Adele)} \newcommand\GAF{\GG(\AF)} \newcommand\GAf{\GG(\Af)} \newcommand\Kf{K_\mathrm{f}} \newcommand\Ki{K_\infty} \newcommand\OF{\cO_F} \newcommand\gf{g_\mathrm{f}} \newcommand\gi{g_\infty} \newcommand\kf{k_\mathrm{f}} \newcommand\Lcusp{L^2_\textrm{cusp}} \newcommand\Ldisc{L^2_\textrm{disc}} \newcommand\Lcts{L^2_\textrm{cts}} \newcommand\HdR{H_\textrm{dR}} \def\aas{a.a.s.\ } \def\ae{a.e.\ } \def\cf{cf.\ } \def\eg{e.g.\ } \def\Eg{E.g.\ } \def\fg{f.g.\ } \def\ie{i.e.\ } \def\Ie{I.e.\ } \def\lub{l.u.b.\ } \def\onb{o.n.b.\ } \def\resp{resp.\ } \def\st{s.t.\ } \def\viz{viz.\ } \def\wlg{w.l.g.\ } \def\Wlg{W.l.g.\ } \def\wrt{w.r.t.\ } \newcommand{\enuref}[1]{(\ref{enu:#1})} \newcommand{\thmref}[1]{Theorem \ref{thm:#1}} \newcommand{\lemref}[1]{Lemma \ref{lem:#1}} \newcommand{\propref}[1]{Proposition \ref{prop:#1}} \newcommand{\corref}[1]{Corollary \ref{cor:#1}} \newcommand{\remref}[1]{Remark \ref{rem:#1}} \newcommand{\secref}[1]{Section \ref{sec:#1}} \newcommand{\probref}[1]{Problem \ref{prob:#1}} \newcommand\Vect{\textrm{Vect}} \newcommand\Sheaf{\textrm{Shv}} \newcommand\CdV{Colin de Verdière} \newcommand\Shrd{Schrödinger} \newcommand\Erdos{Erd\H{o}s} \usepackage{relsize} \usepackage{tikz} \usetikzlibrary{matrix,arrows,decorations.pathmorphing} \usepackage{tikz-cd} \usetikzlibrary{cd} \usepackage[pdftex,colorlinks,linkcolor=blue,citecolor=brown]{hyperref} \usepackage{blkarray} \usepackage{array} \usetikzlibrary{shapes.misc} \tikzset{cross/.style={cross out, draw=black, minimum size=2*(#1-\pgflinewidth), inner sep=0pt, outer sep=0pt}, cross/.default={1pt}} \tolerance=10000 \newcommand{\CTwoV}{{\cC_{\rm 2V}}} \newcommand{\XTwoV}{X_{\rm 2V}} \newcommand{\kIndAD}{{\underline k}_{\II(\mec a\le\mec d)}} \newcommand{\WSumIndD}{{\underline k}_{\II(\cdot\le\mec d)}^{W(\cdot)}} \newcommand{\DiracDeltaKAD}{\delta_{{\underline k},\mec a,\le\mec d}} \newcommand{\ConvDiracDeltaKW}{\delta_{{\underline k},\le\mec d}^W} \tolerance=2000 \begin{document} \title[Riemann Functions and Their Weights] {Generalized Riemann Functions, Their Weights, and the Complete Graph} \author{Nicolas Folinsbee} \address{Department of Mathematics, University of British Columbia, Vancouver, BC\ \ V6T 1Z2, CANADA. } \curraddr{} \email{{\tt [email protected]}} \thanks{Research supported in part by an NSERC grant.} \author{Joel Friedman} \address{Department of Computer Science, University of British Columbia, Vancouver, BC\ \ V6T 1Z4, CANADA. } \curraddr{} \email{{\tt [email protected]}} \thanks{Research supported in part by an NSERC grant.} \date{\today} \subjclass[2010]{Primary: 05C99.} \keywords{} \begin{abstract} By a {\em Riemann function} we mean a function $f\from\integers^n\to\integers$ such that $f(\mec d)$ is equals $0$ for $d_1+\cdots+d_n$ sufficiently small, and equals $d_1+\cdots+d_n+C$ for a constant, $C$, for $d_1+\cdots+d_n$ sufficiently large. By adding $1$ to the Baker-Norine rank function of a graph, one gets an equivalent Riemann function, and similarly for related rank functions. To each Riemann function we associate a related function $W\from\integers^n\to\integers$ via M\"obius inversion that we call the {\em weight} of the Riemann function. We give evidence that the weight seems to organize the structure of a Riemann function in a simpler way: first, a Riemann function $f$ satisfies a Riemann-Roch formula iff its weight satisfies a simpler symmetry condition. Second, we will calculate the weight of the Baker-Norine rank for certain graphs and show that the weight function is quite simple to describe; we do this for graphs on two vertices and for the complete graph. For the complete graph, we build on the work of Cori and Le Borgne who gave a linear time method to compute the Baker-Norine rank of the complete graph. The associated weight function has a simple formula and is extremely sparse (i.e., mostly zero). Our computation of the weight function leads to another linear time algorithm to compute the Baker-Norine rank, via a formula likely related to one of Cori and Le Borgne, but seemingly simpler, namely $$ r_{{\rm BN},K_n}(\mec d) = -1+\biggl| \biggl\{ i=0,\ldots,\deg(\mec d) \ \Bigm| \ \sum_{j=1}^{n-2} \bigl( (d_j-d_{n-1}+i) \bmod n \bigr) \le \deg(\mec d)-i \biggr\} \biggr|. $$ Our study of weight functions leads to a natural generalization of Riemann functions, with many of the same properties exhibited by Riemann functions. \end{abstract} \maketitle \setcounter{tocdepth}{3} \tableofcontents \newcommand{\axiscubism}{ \begin{center} \begin{tikzpicture}[scale=0.5] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-5,0); \coordinate (XAxisMax) at (5,0); \coordinate (YAxisMin) at (0,-5); \coordinate (YAxisMax) at (0,5); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-5,...,5}{ \foreach \y in {-5,-4,...,5}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } \node[draw=none,fill=none] at (0.5,.5) {$1$}; \node[draw=none,fill=none] at (-0.5,.5) {$1$}; \node[draw=none,fill=none] at (0.5,-.5) {$1$}; \node[draw=none,fill=none] at (-0.5,-.5) {$1$}; \node[draw=none,fill=none] at (1.5,.5) {$2$}; \node[draw=none,fill=none] at (.5,1.5) {$2$}; \node[draw=none,fill=none] at (-.5,1.5) {$2$}; \node[draw=none,fill=none] at (-1.5,.5) {$2$}; \node[draw=none,fill=none] at (.5,-1.5) {$2$}; \node[draw=none,fill=none] at (1.5,-.5) {$2$}; \node[draw=none,fill=none] at (-.5,-1.5) {$2$}; \node[draw=none,fill=none] at (-1.5,-.5) {$2$}; \node[draw=none,fill=none] at (2.5,.5) {$3$}; \node[draw=none,fill=none] at (1.5,1.5) {$3$}; \node[draw=none,fill=none] at (.5,2.5) {$3$}; \node[draw=none,fill=none] at (-2.5,.5) {$3$}; \node[draw=none,fill=none] at (-1.5,1.5) {$3$}; \node[draw=none,fill=none] at (-.5,2.5) {$3$}; \node[draw=none,fill=none] at (2.5,-.5) {$3$}; \node[draw=none,fill=none] at (1.5,-1.5) {$3$}; \node[draw=none,fill=none] at (.5,-2.5) {$3$}; \node[draw=none,fill=none] at (-2.5,-.5) {$3$}; \node[draw=none,fill=none] at (-1.5,-1.5) {$3$}; \node[draw=none,fill=none] at (-.5,-2.5) {$3$}; \draw[blue,thick] (-3,-1) -- (3,-1); \draw[blue,thick] (-3,0) -- (3,0); \draw[blue,thick] (-3,1) -- (3,1); \draw[blue,thick] (-2,2) -- (2,2); \draw[blue,thick] (-2,-2) -- (2,-2); \draw[blue,thick] (-1,3) -- (1,3); \draw[blue,thick] (-1,-3) -- (1,-3); \draw[blue,thick] (-1,-3) -- (-1,3); \draw[blue,thick] (0,-3) -- (0,3); \draw[blue,thick] (1,-3) -- (1,3); \draw[blue,thick] (2,-2) -- (2,2); \draw[blue,thick] (-2,-2) -- (-2,2); \draw[blue,thick] (-3,1) -- (-3,-1); \draw[blue,thick] (3,1) -- (3,-1); \end{tikzpicture} \end{center} } \newcommand{\degreecubism}{ \begin{center} \begin{tikzpicture}[scale=0.5] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-5,0); \coordinate (XAxisMax) at (5,0); \coordinate (YAxisMin) at (0,-5); \coordinate (YAxisMax) at (0,5); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-5,...,5}{ \foreach \y in {-5,-4,...,5}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } \node[draw=none,fill=none] at (0.5,.5) {$1$}; \node[draw=none,fill=none] at (-0.5,.5) {$1$}; \node[draw=none,fill=none] at (0.5,-.5) {$1$}; \node[draw=none,fill=none] at (-0.5,-.5) {$1$}; \node[draw=none,fill=none] at (1.5,-1.5) {$2$}; \node[draw=none,fill=none] at (.5,-1.5) {$2$}; \node[draw=none,fill=none] at (1.5,-0.5) {$2$}; \node[draw=none,fill=none] at (-1.5,1.5) {$2$}; \node[draw=none,fill=none] at (-.5,1.5) {$2$}; \node[draw=none,fill=none] at (-1.5,0.5) {$2$}; \node[draw=none,fill=none] at (-2.5,2.5) {$3$}; \node[draw=none,fill=none] at (-1.5,2.5) {$3$}; \node[draw=none,fill=none] at (-2.5,1.5) {$3$}; \node[draw=none,fill=none] at (1.5,.5) {$3$}; \node[draw=none,fill=none] at (.5,1.5) {$3$}; \node[draw=none,fill=none] at (-1.5,-.5) {$3$}; \node[draw=none,fill=none] at (-.5,-1.5) {$3$}; \node[draw=none,fill=none] at (2.5,-2.5) {$3$}; \node[draw=none,fill=none] at (1.5,-2.5) {$3$}; \node[draw=none,fill=none] at (2.5,-1.5) {$3$}; \draw[blue,thick] (-3,3) -- (-1,3); \draw[blue,thick] (-3,2) -- (1,2); \draw[blue,thick] (-3,1) -- (2,1); \draw[blue,thick] (-2,0) -- (2,0); \draw[blue,thick] (-2,-1) -- (3,-1); \draw[blue,thick] (-1,-2) -- (3,-2); \draw[blue,thick] (1,-3) -- (3,-3); \draw[blue,thick] (3,-3) -- (3,-1); \draw[blue,thick] (2,-3) -- (2,1); \draw[blue,thick] (1,-3) -- (1,2); \draw[blue,thick] (0,-2) -- (0,2); \draw[blue,thick] (-1,-2) -- (-1,3); \draw[blue,thick] (-2,-1) -- (-2,3); \draw[blue,thick] (-3,1) -- (-3,3); \end{tikzpicture} \end{center} } \newcommand{\PicCubeZero}{ \begin{tikzpicture}[scale=0.5] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-5,0); \coordinate (XAxisMax) at (5,0); \coordinate (YAxisMin) at (0,-5); \coordinate (YAxisMax) at (0,5); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-5,...,5}{ \foreach \y in {-5,-4,...,5}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } ll[red] (-5,0) circle (6pt); ll[red] (-4,0) circle (6pt); ll[red] (-3,0) circle (6pt); ll[red] (-2,0) circle (6pt); ll[red] (-1,0) circle (6pt); ll[red] (0,0) circle (6pt); ll[red] (1,0) circle (6pt); ll[red] (2,0) circle (6pt); ll[red] (3,0) circle (6pt); ll[red] (4,0) circle (6pt); ll[red] (5,0) circle (6pt); ll[red] (0,-5) circle (6pt); ll[red] (0,-4) circle (6pt); ll[red] (0,-3) circle (6pt); ll[red] (0,-2) circle (6pt); ll[red] (0,-1) circle (6pt); ll[red] (0,0) circle (6pt); ll[red] (0,1) circle (6pt); ll[red] (0,2) circle (6pt); ll[red] (0,3) circle (6pt); ll[red] (0,4) circle (6pt); ll[red] (0,5) circle (6pt); \end{tikzpicture} } \newcommand{\PicCubeOne}{ \begin{tikzpicture}[scale=0.5] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-5,0); \coordinate (XAxisMax) at (5,0); \coordinate (YAxisMin) at (0,-5); \coordinate (YAxisMax) at (0,5); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-5,...,5}{ \foreach \y in {-5,-4,...,5}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } ll[blue] (-5,0) circle (6pt); ll[blue] (-4,0) circle (6pt); ll[blue] (-3,0) circle (6pt); ll[blue] (-2,0) circle (6pt); ll[blue] (-1,0) circle (6pt); ll[blue] (0,0) circle (6pt); ll[blue] (1,0) circle (6pt); ll[blue] (2,0) circle (6pt); ll[blue] (3,0) circle (6pt); ll[blue] (4,0) circle (6pt); ll[blue] (5,0) circle (6pt); ll[blue] (0,-5) circle (6pt); ll[blue] (0,-4) circle (6pt); ll[blue] (0,-3) circle (6pt); ll[blue] (0,-2) circle (6pt); ll[blue] (0,-1) circle (6pt); ll[blue] (0,0) circle (6pt); ll[blue] (0,1) circle (6pt); ll[blue] (0,2) circle (6pt); ll[blue] (0,3) circle (6pt); ll[blue] (0,4) circle (6pt); ll[blue] (0,5) circle (6pt); ll[red] (1,1) circle (6pt); ll[red] (1,-1) circle (6pt); ll[red] (-1,1) circle (6pt); ll[red] (-1,-1) circle (6pt); \end{tikzpicture} } \newcommand{\PicCubeTwo}{ \begin{tikzpicture}[scale=0.5] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-5,0); \coordinate (XAxisMax) at (5,0); \coordinate (YAxisMin) at (0,-5); \coordinate (YAxisMax) at (0,5); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-5,...,5}{ \foreach \y in {-5,-4,...,5}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } ll[blue] (-5,0) circle (6pt); ll[blue] (-4,0) circle (6pt); ll[blue] (-3,0) circle (6pt); ll[blue] (-2,0) circle (6pt); ll[blue] (-1,0) circle (6pt); ll[blue] (0,0) circle (6pt); ll[blue] (1,0) circle (6pt); ll[blue] (2,0) circle (6pt); ll[blue] (3,0) circle (6pt); ll[blue] (4,0) circle (6pt); ll[blue] (5,0) circle (6pt); ll[blue] (0,-5) circle (6pt); ll[blue] (0,-4) circle (6pt); ll[blue] (0,-3) circle (6pt); ll[blue] (0,-2) circle (6pt); ll[blue] (0,-1) circle (6pt); ll[blue] (0,0) circle (6pt); ll[blue] (0,1) circle (6pt); ll[blue] (0,2) circle (6pt); ll[blue] (0,3) circle (6pt); ll[blue] (0,4) circle (6pt); ll[blue] (0,5) circle (6pt); ll[blue] (1,1) circle (6pt); ll[blue] (1,-1) circle (6pt); ll[blue] (-1,1) circle (6pt); ll[blue] (-1,-1) circle (6pt); ll[red] (2,1) circle (6pt); ll[red] (1,2) circle (6pt); ll[red] (2,-1) circle (6pt); ll[red] (1,-2) circle (6pt); ll[red] (-2,1) circle (6pt); ll[red] (-1,2) circle (6pt); ll[red] (-2,-1) circle (6pt); ll[red] (-1,-2) circle (6pt); \end{tikzpicture} } \newcommand{\PicCubeThree}{ \begin{tikzpicture}[scale=0.5] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-5,0); \coordinate (XAxisMax) at (5,0); \coordinate (YAxisMin) at (0,-5); \coordinate (YAxisMax) at (0,5); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-5,...,5}{ \foreach \y in {-5,-4,...,5}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } ll[blue] (-5,0) circle (6pt); ll[blue] (-4,0) circle (6pt); ll[blue] (-3,0) circle (6pt); ll[blue] (-2,0) circle (6pt); ll[blue] (-1,0) circle (6pt); ll[blue] (0,0) circle (6pt); ll[blue] (1,0) circle (6pt); ll[blue] (2,0) circle (6pt); ll[blue] (3,0) circle (6pt); ll[blue] (4,0) circle (6pt); ll[blue] (5,0) circle (6pt); ll[blue] (0,-5) circle (6pt); ll[blue] (0,-4) circle (6pt); ll[blue] (0,-3) circle (6pt); ll[blue] (0,-2) circle (6pt); ll[blue] (0,-1) circle (6pt); ll[blue] (0,0) circle (6pt); ll[blue] (0,1) circle (6pt); ll[blue] (0,2) circle (6pt); ll[blue] (0,3) circle (6pt); ll[blue] (0,4) circle (6pt); ll[blue] (0,5) circle (6pt); ll[blue] (1,1) circle (6pt); ll[blue] (1,-1) circle (6pt); ll[blue] (-1,1) circle (6pt); ll[blue] (-1,-1) circle (6pt); ll[blue] (2,1) circle (6pt); ll[blue] (1,2) circle (6pt); ll[blue] (2,-1) circle (6pt); ll[blue] (1,-2) circle (6pt); ll[blue] (-2,1) circle (6pt); ll[blue] (-1,2) circle (6pt); ll[blue] (-2,-1) circle (6pt); ll[blue] (-1,-2) circle (6pt); ll[red] (3,1) circle (6pt); ll[red] (2,2) circle (6pt); ll[red] (1,3) circle (6pt); ll[red] (3,-1) circle (6pt); ll[red] (2,-2) circle (6pt); ll[red] (1,-3) circle (6pt); ll[red] (-3,1) circle (6pt); ll[red] (-2,2) circle (6pt); ll[red] (-1,3) circle (6pt); ll[red] (-3,-1) circle (6pt); ll[red] (-2,-2) circle (6pt); ll[red] (-1,-3) circle (6pt); \end{tikzpicture} } \newcommand{\PicDegCubeZero}{ \begin{tikzpicture}[scale=0.5] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-5,0); \coordinate (XAxisMax) at (5,0); \coordinate (YAxisMin) at (0,-5); \coordinate (YAxisMax) at (0,5); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-5,...,5}{ \foreach \y in {-5,-4,...,5}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } ll[red] (0,0) circle (6pt); ll[red] (-4,5) circle (6pt); ll[red] (-3,4) circle (6pt); ll[red] (-2,3) circle (6pt); ll[red] (-1,2) circle (6pt); ll[red] (0,1) circle (6pt); ll[red] (1,0) circle (6pt); ll[red] (2,-1) circle (6pt); ll[red] (3,-2) circle (6pt); ll[red] (4,-3) circle (6pt); ll[red] (5,-4) circle (6pt); ll[red] (-5,4) circle (6pt); ll[red] (-4,3) circle (6pt); ll[red] (-3,2) circle (6pt); ll[red] (-2,1) circle (6pt); ll[red] (-1,0) circle (6pt); ll[red] (0,-1) circle (6pt); ll[red] (1,-2) circle (6pt); ll[red] (2,-3) circle (6pt); ll[red] (3,-4) circle (6pt); ll[red] (4,-5) circle (6pt); \end{tikzpicture} } \newcommand{\PicDegCubeOne}{ \begin{tikzpicture}[scale=0.5] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-5,0); \coordinate (XAxisMax) at (5,0); \coordinate (YAxisMin) at (0,-5); \coordinate (YAxisMax) at (0,5); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-5,...,5}{ \foreach \y in {-5,-4,...,5}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } ll[blue] (0,0) circle (6pt); ll[blue] (-4,5) circle (6pt); ll[blue] (-3,4) circle (6pt); ll[blue] (-2,3) circle (6pt); ll[blue] (-1,2) circle (6pt); ll[blue] (0,1) circle (6pt); ll[blue] (1,0) circle (6pt); ll[blue] (2,-1) circle (6pt); ll[blue] (3,-2) circle (6pt); ll[blue] (4,-3) circle (6pt); ll[blue] (5,-4) circle (6pt); ll[blue] (-5,4) circle (6pt); ll[blue] (-4,3) circle (6pt); ll[blue] (-3,2) circle (6pt); ll[blue] (-2,1) circle (6pt); ll[blue] (-1,0) circle (6pt); ll[blue] (0,-1) circle (6pt); ll[blue] (1,-2) circle (6pt); ll[blue] (2,-3) circle (6pt); ll[blue] (3,-4) circle (6pt); ll[blue] (4,-5) circle (6pt); ll[red] (-1,1) circle (6pt); ll[red] (1,-1) circle (6pt); ll[red] (1,1) circle (6pt); ll[red] (-1,-1) circle (6pt); \end{tikzpicture} } \newcommand{\PicDegCubeTwo}{ \begin{tikzpicture}[scale=0.5] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-5,0); \coordinate (XAxisMax) at (5,0); \coordinate (YAxisMin) at (0,-5); \coordinate (YAxisMax) at (0,5); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-5,...,5}{ \foreach \y in {-5,-4,...,5}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } ll[blue] (0,0) circle (6pt); ll[blue] (-4,5) circle (6pt); ll[blue] (-3,4) circle (6pt); ll[blue] (-2,3) circle (6pt); ll[blue] (-1,2) circle (6pt); ll[blue] (0,1) circle (6pt); ll[blue] (1,0) circle (6pt); ll[blue] (2,-1) circle (6pt); ll[blue] (3,-2) circle (6pt); ll[blue] (4,-3) circle (6pt); ll[blue] (5,-4) circle (6pt); ll[blue] (-5,4) circle (6pt); ll[blue] (-4,3) circle (6pt); ll[blue] (-3,2) circle (6pt); ll[blue] (-2,1) circle (6pt); ll[blue] (-1,0) circle (6pt); ll[blue] (0,-1) circle (6pt); ll[blue] (1,-2) circle (6pt); ll[blue] (2,-3) circle (6pt); ll[blue] (3,-4) circle (6pt); ll[blue] (4,-5) circle (6pt); ll[blue] (-1,1) circle (6pt); ll[blue] (1,-1) circle (6pt); ll[blue] (1,1) circle (6pt); ll[blue] (-1,-1) circle (6pt); ll[red] (-2,2) circle (6pt); ll[red] (2,-2) circle (6pt); ll[red] (0,2) circle (6pt); ll[red] (2,0) circle (6pt); ll[red] (0,-2) circle (6pt); ll[red] (-2,0) circle (6pt); \end{tikzpicture} } \newcommand{\PicDegCubeThree}{ \begin{tikzpicture}[scale=0.5] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-5,0); \coordinate (XAxisMax) at (5,0); \coordinate (YAxisMin) at (0,-5); \coordinate (YAxisMax) at (0,5); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-5,...,5}{ \foreach \y in {-5,-4,...,5}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } ll[blue] (0,0) circle (6pt); ll[blue] (-4,5) circle (6pt); ll[blue] (-3,4) circle (6pt); ll[blue] (-2,3) circle (6pt); ll[blue] (-1,2) circle (6pt); ll[blue] (0,1) circle (6pt); ll[blue] (1,0) circle (6pt); ll[blue] (2,-1) circle (6pt); ll[blue] (3,-2) circle (6pt); ll[blue] (4,-3) circle (6pt); ll[blue] (5,-4) circle (6pt); ll[blue] (-5,4) circle (6pt); ll[blue] (-4,3) circle (6pt); ll[blue] (-3,2) circle (6pt); ll[blue] (-2,1) circle (6pt); ll[blue] (-1,0) circle (6pt); ll[blue] (0,-1) circle (6pt); ll[blue] (1,-2) circle (6pt); ll[blue] (2,-3) circle (6pt); ll[blue] (3,-4) circle (6pt); ll[blue] (4,-5) circle (6pt); ll[blue] (-1,1) circle (6pt); ll[blue] (1,-1) circle (6pt); ll[blue] (1,1) circle (6pt); ll[blue] (-1,-1) circle (6pt); ll[blue] (-2,2) circle (6pt); ll[blue] (2,-2) circle (6pt); ll[blue] (-2,0) circle (6pt); ll[blue] (2,0) circle (6pt); ll[blue] (0,2) circle (6pt); ll[blue] (0,-2) circle (6pt); ll[red] (-3,3) circle (6pt); ll[red] (3,-3) circle (6pt); ll[red] (-1,3) circle (6pt); ll[red] (1,2) circle (6pt); ll[red] (2,1) circle (6pt); ll[red] (1,-3) circle (6pt); ll[red] (-1,-2) circle (6pt); ll[red] (-2,-1) circle (6pt); ll[red] (-3,1) circle (6pt); ll[red] (3,-1) circle (6pt); \end{tikzpicture} } \newcommand{\ronegraph}{ \begin{center} \begin{tikzpicture} \tikzset{vertex/.style = {shape = circle,fill=black,minimum size=0.1cm}} \node[vertex] (ta) at (-3,2) {}; \node[vertex] (tb) at (-2,2) {}; \node[vertex] (tc) at (-1,2) {}; \node[vertex] (td) at (0,2) {}; \node[vertex] (te) at (1,2) {}; \node[vertex] (tf) at (2,2) {}; \node[vertex] (tg) at (3,2) {}; \node[vertex] (ba) at (-3,0) {}; \node[vertex] (bb) at (-2,0) {}; \node[vertex] (bc) at (-1,0) {}; \node[vertex] (bd) at (0,0) {}; \node[vertex] (be) at (1,0) {}; \node[vertex] (bf) at (2,0) {}; \node[vertex] (bg) at (3,0) {}; \draw[red] (ta) to (bg); \draw[red] (tb) to (bf); \draw[red] (tc) to (be); \draw[red] (td) to (bd); \draw[red] (te) to (bc); \draw[red] (tf) to (bb); \draw[red] (tg) to (ba); \node[draw=none,fill=none] at (-3.6,2) {$\boldsymbol{\cdots} $}; \node[draw=none,fill=none] at (-3,2.5) {$-3$}; \node[draw=none,fill=none] at (-2,2.5) {$-2$}; \node[draw=none,fill=none] at (-1,2.5) {$-1$}; \node[draw=none,fill=none] at (0,2.5) {$0$}; \node[draw=none,fill=none] at (1,2.5) {$1$}; \node[draw=none,fill=none] at (2,2.5) {$2$}; \node[draw=none,fill=none] at (3,2.5) {$3$}; \node[draw=none,fill=none] at (3.7,2) {$\boldsymbol{\cdots} $}; \node[draw=none,fill=none] at (-3.6,0) {$\boldsymbol{\cdots} $}; \node[draw=none,fill=none] at (-3,-0.5) {$-3$}; \node[draw=none,fill=none] at (-2,-0.5) {$-2$}; \node[draw=none,fill=none] at (-1,-0.5) {$-1$}; \node[draw=none,fill=none] at (0,-0.5) {$0$}; \node[draw=none,fill=none] at (1,-0.5) {$1$}; \node[draw=none,fill=none] at (2,-0.5) {$2$}; \node[draw=none,fill=none] at (3,-0.5) {$3$}; \node[draw=none,fill=none] at (3.7,0) {$\boldsymbol{\cdots} $}; \end{tikzpicture} \end{center} } \newcommand{\rtwograph}{ \begin{center} \begin{tikzpicture} \tikzset{vertex/.style = {shape = circle,fill=black,minimum size=0.1cm}} \node[vertex] (tb) at (-2,2) {}; \node[vertex] (tc) at (-1,2) {}; \node[vertex] (td) at (0,2) {}; \node[vertex] (te) at (1,2) {}; \node[vertex] (tf) at (2,2) {}; \node[vertex] (tg) at (3,2) {}; \node[vertex] (bb) at (-2,0) {}; \node[vertex] (bc) at (-1,0) {}; \node[vertex] (bd) at (0,0) {}; \node[vertex] (be) at (1,0) {}; \node[vertex] (bf) at (2,0) {}; \node[vertex] (bg) at (3,0) {}; \draw[red] (tb) to (bg); \draw[red] (tc) to (bf); \draw[red] (td) to (bd); \draw[red] (te) to (be); \draw[red] (tf) to (bc); \draw[red] (tg) to (bb); \node[draw=none,fill=none] at (-2.6,2) {$\boldsymbol{\cdots} $}; \node[draw=none,fill=none] at (-2,2.5) {$-2$}; \node[draw=none,fill=none] at (-1,2.5) {$-1$}; \node[draw=none,fill=none] at (0,2.5) {$0$}; \node[draw=none,fill=none] at (1,2.5) {$1$}; \node[draw=none,fill=none] at (2,2.5) {$2$}; \node[draw=none,fill=none] at (3,2.5) {$3$}; \node[draw=none,fill=none] at (3.7,2) {$\boldsymbol{\cdots} $}; \node[draw=none,fill=none] at (-2.6,0) {$\boldsymbol{\cdots} $}; \node[draw=none,fill=none] at (-2,-0.5) {$-2$}; \node[draw=none,fill=none] at (-1,-0.5) {$-1$}; \node[draw=none,fill=none] at (0,-0.5) {$0$}; \node[draw=none,fill=none] at (1,-0.5) {$1$}; \node[draw=none,fill=none] at (2,-0.5) {$2$}; \node[draw=none,fill=none] at (3,-0.5) {$3$}; \node[draw=none,fill=none] at (3.7,0) {$\boldsymbol{\cdots} $}; \end{tikzpicture} \end{center} } \newcommand{\diagramone} { \begin{tikzpicture}[scale=0.5] \node (A) at (0,0) {A}; \node (B) at (0,3) {B}; \node (C) at (3,3) {C}; \node (D) at (3,0) {D}; \node (E) at (-6,-5) {E}; \node (F) at (-6,-2) {F}; \node (G) at (-3,-2) {G}; \node (H) at (-3,-5) {H}; \node (I) at (6,-5) {I}; \node (J) at (6,-2) {J}; \node (K) at (9,-2) {K}; \node (L) at (9,-5) {L}; \path [->,blue] (A) edge node[left,black] {$e_j$} (B); \path [->,red] (B) edge node[above,black] {$e_i$} (C); \path [->,red] (A) edge node {$.$} (D); \path [->,blue] (D) edge node {$.$} (C); \path [->,blue] (E) edge node {$.$} (F); \path [->,red] (F) edge node {$.$} (G); \path [->,red] (E) edge node {$.$} (H); \path [->,blue] (H) edge node {$.$} (G); \path [->,blue] (I) edge node {$.$} (J); \path [->,red] (J) edge node {$.$} (K); \path [->,red] (I) edge node {$.$} (L); \path [->,blue] (L) edge node {$.$} (K); \path [->,teal] (E) edge node {$.$} (A); \path [->,teal] (F) edge node[above,black] {$e_k$} (B); \path [->,teal] (G) edge node {$.$} (C); \path [->,teal] (H) edge node {$.$} (D); \path [->,orange] (I) edge node {$.$} (A); \path [->,orange] (J) edge node {$.$} (B); \path [->,orange] (K) edge node[above,black] {$e_{k'}$} (C); \path [->,orange] (L) edge node {$.$} (D); \end{tikzpicture} } \newcommand{\diagramtwo} { \begin{tikzpicture}[scale=0.5] \node (A) at (0,0) {a}; \node (B) at (0,3) {a+1}; \node (C) at (3,3) {a+1}; \node (D) at (3,0) {a+1}; \node (E) at (-6,-5) {a-1}; \node[text=red] (F) at (-6,-2) {a}; \node (G) at (-3,-2) {*}; \node[text=red] (H) at (-3,-5) {a}; \node (I) at (6,-5) {**}; \node[text=red] (J) at (6,-2) {a}; \node (K) at (9,-2) {a}; \node[text=red] (L) at (9,-5) {a}; \path [->,blue] (A) edge node {$.$} (B); \path [->,red] (B) edge node {$.$} (C); \path [->,red] (A) edge node {$.$} (D); \path [->,blue] (D) edge node {$.$} (C); \path [->,blue] (E) edge node {$.$} (F); \path [->,red] (F) edge node {$.$} (G); \path [->,red] (E) edge node {$.$} (H); \path [->,blue] (H) edge node {$.$} (G); \path [->,blue] (I) edge node {$.$} (J); \path [->,red] (J) edge node {$.$} (K); \path [->,red] (I) edge node {$.$} (L); \path [->,blue] (L) edge node {$.$} (K); \path [->,teal] (E) edge node {$.$} (A); \path [->,teal] (F) edge node {$.$} (B); \path [->,teal] (G) edge node {$.$} (C); \path [->,teal] (H) edge node {$.$} (D); \path [->,orange] (I) edge node {$.$} (A); \path [->,orange] (J) edge node {$.$} (B); \path [->,orange] (K) edge node {$.$} (C); \path [->,orange] (L) edge node {$.$} (D); \end{tikzpicture} } \newcommand{\diagramthree} { \begin{tikzpicture}[scale=0.5] \node (A) at (0,0) {a}; \node (B) at (0,3) {a+1}; \node (C) at (3,3) {a+1}; \node (D) at (3,0) {a+1}; \node (E) at (-6,-5) {a-1}; \node (F) at (-6,-2) {a}; \node (G) at (-3,-2) {a}; \node (H) at (-3,-5) {a+1}; \node (I) at (6,-5) {a}; \node (J) at (6,-2) {a}; \node (K) at (9,-2) {a}; \node (L) at (9,-5) {a}; \node[text=red] (M) at (0,-10) {a-1}; \node (N) at (0,-7) {*}; \node[text=red] (O) at (3,-7) {a}; \node (P) at (3,-10) {**}; \path [->,blue] (A) edge node {$.$} (B); \path [->,red] (B) edge node {$.$} (C); \path [->,red] (A) edge node {$.$} (D); \path [->,blue] (D) edge node {$.$} (C); \path [->,blue] (E) edge node {$.$} (F); \path [->,red] (F) edge node {$.$} (G); \path [->,red] (E) edge node {$.$} (H); \path [->,blue] (H) edge node {$.$} (G); \path [->,blue] (I) edge node {$.$} (J); \path [->,red] (J) edge node {$.$} (K); \path [->,red] (I) edge node {$.$} (L); \path [->,blue] (L) edge node {$.$} (K); \path [->,blue] (M) edge node {$.$} (N); \path [->,red] (N) edge node {$.$} (O); \path [->,red] (M) edge node {$.$} (P); \path [->,blue] (P) edge node {$.$} (O); \path [->,teal] (E) edge node {$.$} (A); \path [->,teal] (F) edge node {$.$} (B); \path [->,teal] (G) edge node {$.$} (C); \path [->,teal] (H) edge node {$.$} (D); \path [->,orange] (I) edge node {$.$} (A); \path [->,orange] (J) edge node {$.$} (B); \path [->,orange] (K) edge node {$.$} (C); \path [->,orange] (L) edge node {$.$} (D); \path [->,orange] (M) edge node {$.$} (E); \path [->,orange] (N) edge node {$.$} (F); \path [->,orange] (O) edge node {$.$} (G); \path [->,orange] (P) edge node {$.$} (H); \path [->,teal] (M) edge node {$.$} (I); \path [->,teal] (N) edge node {$.$} (J); \path [->,teal] (O) edge node {$.$} (K); \path [->,teal] (P) edge node {$.$} (L); \end{tikzpicture} } \newcommand{\DiagramCDOne}{ \begin{tikzpicture}[scale=0.5] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-4,0); \coordinate (XAxisMax) at (6,0); \coordinate (YAxisMin) at (0,-4); \coordinate (YAxisMax) at (0,6); \draw [thin, black,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, black,-latex] (YAxisMin) -- (YAxisMax); \clip (-5,-5) rectangle (10cm,10cm); \foreach \x in {-4,-3,...,6}{ \foreach \y in {-4,-3,...,6}{ \node[draw,circle,inner sep=1.2pt,fill] at (1*\x,1*\y) {}; } } ll[red] (-2,4) circle (7pt); ll[red] (1,1) circle (7pt); ll[red] (4,-2) circle (7pt); ll[red] (-3,6) circle (7pt); ll[red] (0,3) circle (7pt); ll[red] (3,0) circle (7pt); ll[red] (6,-3) circle (7pt); ll[red] (-4,5) circle (7pt); ll[red] (-1,2) circle (7pt); ll[red] (2,-1) circle (7pt); ll[red] (5,-4) circle (7pt); \end{tikzpicture} } \newcommand{\DiagramCDTwo}{ \begin{tikzpicture}[scale=0.5] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-4,0); \coordinate (XAxisMax) at (6,0); \coordinate (YAxisMin) at (0,-4); \coordinate (YAxisMax) at (0,6); \draw [thin, black,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, black,-latex] (YAxisMin) -- (YAxisMax); \clip (-5,-5) rectangle (10cm,10cm); \foreach \x in {-4,-3,...,6}{ \foreach \y in {-4,-3,...,6}{ \node[draw,circle,inner sep=1.2pt,fill] at (1*\x,1*\y) {}; } } ll[red] (-2,4) circle (7pt); ll[red] (1,1) circle (7pt); ll[red] (4,-2) circle (7pt); ll[red] (-3,6) circle (7pt); ll[red] (0,3) circle (7pt); ll[red] (3,0) circle (7pt); ll[red] (6,-3) circle (7pt); ll[red] (-4,5) circle (7pt); ll[red] (-1,2) circle (7pt); ll[red] (2,-1) circle (7pt); ll[red] (5,-4) circle (7pt); \end{tikzpicture} } \newcommand{\ThreeVertex}{ \begin{center} \begin{tikzpicture} \tikzset{vertex/.style = {shape = circle,fill=black,minimum size=0.1cm}} \tikzset{edge/.style = {-,> = latex'}} \node[vertex] (b) at (1.4,2) {}; \node[vertex] (a) at (-1.4,2) {}; \node[vertex] (c) at (0,0) {}; \draw[edge] (b) to[bend left=10] (c); \draw[edge] (b) to[bend left=20] node[below right] {t} (c); \draw[edge] (b) to[bend right=10] node[below,rotate=50] {$\mathellipsis$} (c); \draw[edge] (b) to[bend right=20] (c); \draw[edge] (a) to[bend left=10] (b); \draw[edge] (a) to[bend left=20] node[above] {r} (b); \draw[edge] (a) to[bend right=10] node[above] {$\mathellipsis$} (b); \draw[edge] (a) to[bend right=20] (b); \draw[edge] (a) to[bend left=10] (c); \draw[edge] (a) to[bend left=20] (c); \draw[edge] (a) to[bend right=10] node[above,rotate=-50] {$\mathellipsis$} (c); \draw[edge] (a) to[bend right=20] node[below left]{s} (c); \node[draw=none,fill=none] at (0.3,-0.3) {$v_3$}; \node[draw=none,fill=none] at (-1.7,2.3) {$v_1$}; \node[draw=none,fill=none] at (1.7,2.3) {$v_2$}; \end{tikzpicture} \end{center} } \newcommand{\DiagramCDThree}{ \begin{tikzpicture}[scale=0.65] \draw[fill=blue!15!white,blue!15!white] (5,3)--(-4,3)--(-4,-10)--(5,-10)--cycle; \draw[fill=green!15!white,green!15!white] (2,5)--(-4,5)--(-4,-10)--(2,-10)--cycle; \draw[fill=teal!15!white,teal!15!white] (2,3)--(-4,3)--(-4,-10)--(2,-10)--cycle; \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-4,0); \coordinate (XAxisMax) at (10,0); \coordinate (YAxisMin) at (0,-10); \coordinate (YAxisMax) at (0,10); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-4,-3,...,10}{ \foreach \y in {-10,-9,...,10}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } ll[red] (0,0) circle (6pt); ll[red] (1,2) circle (6pt); ll[red] (2,3) circle (6pt); ll[red] (3,-2) circle (6pt); ll[red] (4,1) circle (6pt); ll[red] (5,-1) circle (6pt); ll[red] (6,-4) circle (6pt); ll[red] (7,-3) circle (6pt); ll[red] (8,-8) circle (6pt); ll[red] (9,-6) circle (6pt); ll[red] (10,-5) circle (6pt); ll[red] (-1,5) circle (6pt); ll[red] (-2,4) circle (6pt); ll[red] (-3,7) circle (6pt); ll[red] (-4,9) circle (6pt); \node[draw=none,fill=none] at (2,5.3) {\footnotesize $f(2,5)$}; \node[draw=none,fill=none] at (5,3.3) {\footnotesize $g(2,5)$}; \end{tikzpicture} } \newcommand{\ThreeVertexTwo}{ \begin{center} \begin{tikzpicture} \tikzset{vertex/.style = {shape = circle,fill=black,minimum size=0.1cm}} \tikzset{edge/.style = {-,> = latex'}} \node[vertex] (b) at (1.4,2) {}; \node[vertex] (a) at (-1.4,2) {}; \node[vertex] (c) at (0,0) {}; \draw[edge] (b) to[bend left=10] (c); \draw[edge] (b) to[bend right=10] (c); \draw[edge] (a) to[bend left=10] (b); \draw[edge] (a) to[bend right=10] (b); \draw[edge] (a) to (c); \node[draw=none,fill=none] at (0.3,-0.3) {$v_3$}; \node[draw=none,fill=none] at (-1.7,2.3) {$v_1$}; \node[draw=none,fill=none] at (1.7,2.3) {$v_2$}; \end{tikzpicture} \end{center} } \newcommand{\FourVertex}{ \begin{center} \begin{tikzpicture} \tikzset{vertex/.style = {shape = circle,fill=black,minimum size=0.1cm}} \tikzset{edge/.style = {-,> = latex'}} \node[vertex] (a) at (0,2) {}; \node[vertex] (b) at (0,0) {}; \node[vertex] (c) at (2,2) {}; \node[vertex] (d) at (2,0) {}; \draw[edge] (a) to (c); \draw[edge] (a) to (b); \draw[edge] (a) to (d); \draw[edge] (b) to (c); \draw[edge] (b) to[bend left=10] (d); \draw[edge] (b) to[bend right=10] (d); \node[draw=none,fill=none] at (-0.3,2.3) {$v_1$}; \node[draw=none,fill=none] at (-0.3,-0.3) {$v_2$}; \node[draw=none,fill=none] at (2.3,2.3) {$v_3$}; \node[draw=none,fill=none] at (2.3,-0.3) {$v_4$}; \end{tikzpicture} \end{center} } \newcommand{\DiagramCDFour}{ \begin{tikzpicture}[scale=0.65] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-4,0); \coordinate (XAxisMax) at (10,0); \coordinate (YAxisMin) at (0,-10); \coordinate (YAxisMax) at (0,10); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-4,-3,...,10}{ \foreach \y in {-10,-9,...,10}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } ll[red] (0,0) circle (6pt); ll[red] (1,2) circle (6pt); ll[red] (2,3) circle (6pt); ll[red] (3,-2) circle (6pt); ll[red] (5,1) circle (6pt); ll[red] (4,-1) circle (6pt); ll[red] (6,-4) circle (6pt); ll[red] (7,-3) circle (6pt); ll[red] (8,-8) circle (6pt); ll[red] (9,-6) circle (6pt); ll[red] (10,-5) circle (6pt); ll[red] (-1,5) circle (6pt); ll[red] (-2,4) circle (6pt); ll[red] (-3,7) circle (6pt); ll[red] (-4,9) circle (6pt); \end{tikzpicture} } \newcommand{\DiagramCDFive}{ \begin{tikzpicture}[scale=0.4] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-3,0); \coordinate (XAxisMax) at (12,0); \coordinate (YAxisMin) at (0,-10); \coordinate (YAxisMax) at (0,12); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-3,...,11}{ \foreach \y in {-10,-9,...,11}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } ll[red] (-3,6) circle (6pt); ll[red] (-2,7) circle (6pt); ll[red] (-1,8) circle (6pt); ll[red] (0,0) circle (6pt); ll[red] (1,10) circle (6pt); ll[red] (2,11) circle (6pt); ll[red] (3,3) circle (6pt); ll[red] (4,4) circle (6pt); ll[red] (5,5) circle (6pt); ll[red] (6,-3) circle (6pt); ll[red] (7,-2) circle (6pt); ll[red] (8,-1) circle (6pt); ll[red] (9,-9) circle (6pt); ll[red] (10,1) circle (6pt); ll[red] (11,2) circle (6pt); \end{tikzpicture} } \newcommand{\DiagramCDEight}{ \begin{tikzpicture}[scale=0.4] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-3,0); \coordinate (XAxisMax) at (12,0); \coordinate (YAxisMin) at (0,-10); \coordinate (YAxisMax) at (0,12); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-3,...,11}{ \foreach \y in {-10,-9,...,11}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } ll[red] (-3,6) circle (6pt); ll[red] (-2,7) circle (6pt); ll[red] (-1,8) circle (6pt); ll[red] (0,0) circle (6pt); ll[red] (1,1) circle (6pt); ll[red] (2,11) circle (6pt); ll[red] (3,3) circle (6pt); ll[red] (4,4) circle (6pt); ll[red] (5,5) circle (6pt); ll[red] (6,-3) circle (6pt); ll[red] (7,-2) circle (6pt); ll[red] (8,-1) circle (6pt); ll[red] (9,-9) circle (6pt); ll[red] (10,-8) circle (6pt); ll[red] (11,2) circle (6pt); \end{tikzpicture} } \newcommand{\DiagramCDNine}{ \begin{tikzpicture}[scale=0.4] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-3,0); \coordinate (XAxisMax) at (12,0); \coordinate (YAxisMin) at (0,-10); \coordinate (YAxisMax) at (0,12); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-3,...,11}{ \foreach \y in {-10,-9,...,11}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } ll[red] (-3,6) circle (6pt); ll[red] (-2,7) circle (6pt); ll[red] (-1,8) circle (6pt); ll[red] (0,0) circle (6pt); ll[red] (1,1) circle (6pt); ll[red] (2,2) circle (6pt); ll[red] (3,3) circle (6pt); ll[red] (4,4) circle (6pt); ll[red] (5,5) circle (6pt); ll[red] (6,-3) circle (6pt); ll[red] (7,-2) circle (6pt); ll[red] (8,-1) circle (6pt); ll[red] (9,-9) circle (6pt); ll[red] (10,-8) circle (6pt); ll[red] (11,-7) circle (6pt); \end{tikzpicture} } \newcommand{\DiagramCDSeven}{ \begin{tikzpicture}[scale=0.65] \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-2,0); \coordinate (XAxisMax) at (9,0); \coordinate (YAxisMin) at (0,-6); \coordinate (YAxisMax) at (0,5); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-2,...,9}{ \foreach \y in {-6,-5,...,5}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } ll[red] (-2,4) circle (6pt); ll[red] (-1,5) circle (6pt); ll[red] (0,0) circle (6pt); ll[red] (1,1) circle (6pt); ll[red] (2,2) circle (6pt); ll[red] (3,3) circle (6pt); ll[red] (4,-2) circle (6pt); ll[red] (5,-1) circle (6pt); ll[red] (6,-6) circle (6pt); ll[red] (7,-5) circle (6pt); ll[red] (8,-4) circle (6pt); \end{tikzpicture} } \newcommand{\DiagramCDTen}{ \begin{tikzpicture}[scale=0.7] \draw[fill=blue!15!white,green!15!white] (3,2)--(-3,2)--(-3,-10)--(3,-10)--cycle; \draw[fill=green!15!white,blue!15!white] (4,3)--(11,3)--(11,11)--(4,11)--cycle; \draw[fill=green!15!white,gray!15!white] (3,3)--(3,11)--(-3,11)--(-3,3)--cycle; \draw[fill=green!15!white,gray!15!white] (4,2)--(11,2)--(11,-10)--(4,-10)--cycle; \coordinate (Origin) at (0,0); \coordinate (XAxisMin) at (-3,0); \coordinate (XAxisMax) at (12,0); \coordinate (YAxisMin) at (0,-10); \coordinate (YAxisMax) at (0,12); \draw [thin, gray,-latex] (XAxisMin) -- (XAxisMax); \draw [thin, gray,-latex] (YAxisMin) -- (YAxisMax); \foreach \x in {-3,...,11}{ \foreach \y in {-10,-9,...,11}{ \node[draw,circle,inner sep=0.8pt,fill] at (1*\x,1*\y) {}; } } ll[red] (-3,6) circle (6pt); ll[red] (-2,7) circle (6pt); ll[red] (-1,8) circle (6pt); ll[red] (0,0) circle (6pt); ll[red] (1,1) circle (6pt); ll[red] (2,2) circle (6pt); ll[red] (3,3) circle (6pt); ll[red] (4,4) circle (6pt); ll[red] (5,5) circle (6pt); ll[red] (6,-3) circle (6pt); ll[red] (7,-2) circle (6pt); ll[red] (8,-1) circle (6pt); ll[red] (9,-9) circle (6pt); ll[red] (10,-8) circle (6pt); ll[red] (11,-7) circle (6pt); \end{tikzpicture} } \section{Introduction} The main goal of this article is to give a combinatorial study of what we call {\em Riemann functions} and their {\em weights}. Our main motivation is to gain insight into the special case that is the Graph Riemann-Roch fomula of Baker and Norine \cite{baker_norine}; the Baker-Norine formula has received a lot of recent attention \cite{cori_le_borgne,backman, Mohammadi,Caporaso}, as has its generalization to {\em tropical curves} and other settings in recent years \cite{backman,Gathmann, Hladk, James, amini2013, manjunath2012, amini_manjunath,Cools}. We were first interested in weights to address a question posed in \cite{baker_norine} regarding whether or not their Graph Riemann-Roch formula could be understood as an Euler characteristic equation; this is partially answered in \cite{folinsbee_friedman_Euler_characteristics}. However, weights are interesting for a number of purely combinatorial reasons: first, a Riemann-Roch formula is simpler to express in terms of the weight of the Riemann function. Second, the weights of the Riemann-Roch functions of certain graphs are very simple to write down. For example, in this article we build on the methods of Cori and Le Borgne \cite{cori_le_borgne} to give a very simple formula for the weights of the Baker-Norine rank function of a complete graph; this will allow us to prove a likely simpler variant of their algorithm to compute the values of this rank function. Furthermore, for the above reasons, as well as its connections to sheaves and Euler characteristics in \cite{folinsbee_friedman_Euler_characteristics}, we suspect that weights may be a useful way to describe many Riemann functions. This article has two types of results: foundational results on Riemann functions and Riemann-Roch type formulas, and calculations of the weights of Baker-Norine rank functions of two types of graphs. Let us briefly summarize the results, assuming some terminology that will be made precise in Section~\ref{se_basic_Riemann}. \subsection{Riemann Functions and Weights} \label{su_Riemann_Functions_and_Weights} By a {\em Riemann function} we mean a function $f\from\integers^n\to\integers$ such that $f(\mec d)=f(d_1,\ldots,d_n)$ is {\em initially zero}, meaning $f(\mec d)=0$ for $\deg(\mec d)= d_1+\cdots+d_n$ sufficiently small, and {\em eventually}---meaning for $\deg(\mec d)$ sufficiently large---equals $\deg(\mec d)+C$ for a constant, $C\in\integers$, which we call the {\em offset of $f$}. By adding $1$ to the Baker-Norine rank function of a graph, one gets an equivalent Riemann function, and similarly for related rank functions. If $f\from\integers^n\to\integers$ is any function that is initially zero, then there is a unique, initially zero $W$ such that $$ f(\mec d) = \sum_{\mec d' \le \mec d} W(\mec d') $$ where $\le$ the usual partial order on $\integers^n$ (i.e., $\mec d'\le\mec d$ means $d'_i\le d_i$ for all $i=1,\ldots,n$); we call $W$ the {\em weight} of $f$. If $f$ is a Riemann function, then $W$ is also eventually zero; much of what we prove about Riemann functions also holds for {\em generalized Riemann functions}, which we define as any initially zero function $f$ whose weight is eventually zero. Returning to a Riemann function $f\from\integers^n\to\integers$ with offset $C$, for any $\mec K\in\integers^n$ there exists a unique function $f^\wedge_{\mec K}$ such that for all $\mec d\in\integers^n$ we have \begin{equation}\label{eq_intro_generalized_Riemann_Roch} f(\mec d) - f^\wedge_{\mec K}(\mec K - \mec d) = \deg(\mec d)+C, \end{equation} and we refer to as a {\em generalized Riemann-Roch formula}; $f^\wedge_{\mec K}$ is also a Riemann function. Furthermore, if $f^\wedge_{\mec K}=f$ for some $f,K$, then the formula reads $$ f(\mec d) - f(\mec K - \mec d) = \deg(\mec d)+C, $$ which is the usual type of Riemann-Roch formula, both the classical formula of Riemann-Roch, and the Baker-Norine analog. Hence, our view of Riemann-Roch formulas is more ``happy-go-lucky'' than is common in the literature: for each $f,\mec K$ there is a generalized Riemann-Roch formula \eqref{eq_intro_generalized_Riemann_Roch}; we study any such formula, and view the case where $f^\wedge_{\mec K}=f$ as a special case which we call {\em self-duality}. We are interested in weight functions, $W$, for a number of reasons: \begin{enumerate} \item the weights of the Baker-Norine rank (plus $1$) of the graphs we study in this article turn out be be simple to describe and very sparse (i.e., mostly $0$); by contrast, at least for the complete graph, the Baker-Norine function is more difficult to compute. Hence the weights may be a more efficient way to encode certain Riemann functions of interest. \item For a Riemann function $f\from\integers^n\to\integers$, the weight of $f^\wedge_{\mec K}$ turns out to equal $(-1)^n W^*_{\mec L}$, where $\mec L=\mec K+\mec 1$ (where $\mec 1=(1,\ldots,1))$, and $W^*_{\mec L}$ is the function $W^*_{\mec L}(\mec d)=W(\mec L-\mec d)$; hence it seems easier to check self-duality using the weight, $W$, rather than directly on $f$. \item In \cite{folinsbee_friedman_Euler_characteristics}, we model Riemann functions by restricting $f\from\integers^n\to\integers$ to two of its variables, while holding the other $n-2$ variables fixed; if $f$ satisfies self-duality, a two-variable restriction, $\widetilde f\from\integers^2\to\integers$, of $f$ will generally not be self-dual; however $\widetilde{\mec K}\in\integers^2$ can be described as a restriction of $f^\wedge_{\mec K}$ (for any $\mec K\in\integers^n$). Since self-duality isn't preserved under restrictions, but generalized Riemann-Roch formulas behave well under restrictions, it seems essential to work with generalized Riemann-Roch formulas \eqref{eq_intro_generalized_Riemann_Roch} in \cite{folinsbee_friedman_Euler_characteristics} or whenever we wish to work with restrictions of Riemann functions to a subset of their variables. \item In certain Riemann functions of interest, such as those considered by Amini and Manjunath \cite{amini_manjunath}, self-duality does not generally hold, and yet one can always work with weights and generalized Riemann-Roch formulas. \item The formalism of weights applies to generalized Riemann functions, which is a much wider class of functions, and we believe likely to be useful in future work to model other interesting functions. In this case \eqref{eq_intro_generalized_Riemann_Roch} is replaced by $$ f(\mec d) - f^\wedge_{\mec K}(\mec K - \mec d) = h(\mec d), $$ where $h$ is the unique {\em modular function} that eventually equals $f$ (see Section~\ref{se_generalized_Riemann}). One might expect such formulas to hold when, for example $f=f(\mec d)$ is the sum of even Betti numbers of a sheaf depending on a parameter $\mec d\in\integers^n$, whose Euler characteristic equals a modular function $h$. \end{enumerate} \subsection{The Weight of the Baker-Norine rank for Two Types of Graphs} The second type of result in this article concerns the weights of the Baker-Norine rank function (plus $1$) for two types of graphs, namely graphs on two vertices and the complete graph, $K_n$, on $n$ vertices. Both types of weight functions are quite simple and very sparse (i.e., mostly $0$). For $K_n$ we build on the ideas of Cori and Le Borgne \cite{cori_le_borgne} to compute the weight of the Baker-Norine rank. A side effect of this computation is a formula for the Baker-Norine rank: $$ r_{{\rm BN},K_n}(\mec d) = -1+\biggl| \biggl\{ i=0,\ldots,\deg(\mec d) \ \Bigm| \ \sum_{j=1}^{n-2} \bigl( (d_j-d_{n-1}+i) \bmod n \bigr) \le \deg(\mec d)-i \biggr\} \biggr|, $$ where the ``mod'' function above returns a value in $\{0,\ldots,n-1\}$; this looks related to a formula given by Cori and Le Borgne. We also explain that---like the Cori and Le Borgne algorithm---there is an algorithm that computes this function in time $O(n)$. Our proof of this formula is self-contained, although uses some of the observations of Cori and Le Borge including one short and rather ingenious idea of theirs regarding the Baker-Norine function on a complete graph. \subsection{Organization of this Article} The rest of this article is organized as follows. In Section~\ref{se_basic_Riemann} we give some basic terminology, including the definition of a {\em Riemann function} and some examples, which (after subtracting $1$) includes the Baker-Norine rank. In Section~\ref{se_generalized_Riemann} we discuss what we mean by the {\em weight} of a Riemann function; this leads to a notation of {\em generalized Riemann functions}, which share many of the properties of Riemann functions. In Section~\ref{se_riemann_roch_formulas} we define what we mean by a Riemann-Roch formula; we describe the equivalent condition on weights, which is simpler; these ideas generalize in a natural way to the setting of generalized Riemann functions. In Section~\ref{se_two_vertices} we compute the weight of the Baker-Norine rank for graphs on two vertices, joined by any number of edges. In Section~\ref{se_completegraph} we compute the weight of the Baker-Norine rank for a complete graph on $n$ vertices, and we give a formula for the Baker-Norine rank, which---like a related formula of Cori and Le Borgne---allows the rank to be computed in linear time in $n$. In Section~\ref{se_fundamental_domains} we prove our main theorems---stated earlier---that characterize {\em modular functions} used to define generalized Riemann functions. \newcommand{\nEqualsFourTables}{ \begin{tabular}{|*3{c|}} \hline $i=0$ & $i=1$ & $i=2$ \\ \hline $$ \begin{blockarray}{ccccc} \phantom{} & 0 & 1 & 2 & 3 \\ \begin{block}{c(cccc)} 0 & 1 & 0 & 0 & 0 \\ 1 & 0 & 0 & 0 & 0 \\ 2 & 0 & 0 & 0 & 0 \\ 3 & 0 & 0 & 0 & 0 \\ \end{block} \end{blockarray} $$ & $$ \begin{blockarray}{ccccc} \phantom{} & 0 & 1 & 2 & 3 \\ \begin{block}{c(cccc)} 0 & 0 & 1 & 0 & 0 \\ 1 & 1 & 0 & 0 & 0 \\ 2 & 0 & 0 & 0 & 0 \\ 3 & 0 & 0 & 0 & 0 \\ \end{block} \end{blockarray} $$ & $$ \begin{blockarray}{ccccc} \phantom{} & 0 & 1 & 2 & 3 \\ \begin{block}{c(cccc)} 0 & 0 & 0 & 1 & 0 \\ 1 & 0 & 1 & 0 & 0 \\ 2 & 1 & 0 & 0 & 0 \\ 3 & 0 & 0 & 0 & 0 \\ \end{block} \end{blockarray} $$ \\ \hline $i=3$ & $i=4$ & $i=5$ \\ \hline $$ \begin{blockarray}{ccccc} \phantom{} & 0 & 1 & 2 & 3 \\ \begin{block}{c(cccc)} 0 & 0 & 0 & 0 & 1 \\ 1 & 0 & 0 & 1 & 0 \\ 2 & 0 & 1 & 0 & 0 \\ 3 & 1 & 0 & 0 & 0 \\ \end{block} \end{blockarray} $$ & $$ \begin{blockarray}{ccccc} \phantom{} & 0 & 1 & 2 & 3 \\ \begin{block}{c(cccc)} 0 & 0 & 0 & 0 & 0 \\ 1 & 0 & 0 & 0 & 1 \\ 2 & 0 & 0 & 1 & 0 \\ 3 & 0 & 1 & 0 & 0 \\ \end{block} \end{blockarray} $$ & $$ \begin{blockarray}{ccccc} \phantom{} & 0 & 1 & 2 & 3 \\ \begin{block}{c(cccc)} 0 & 0 & 0 & 0 & 0 \\ 1 & 0 & 0 & 0 & 0 \\ 2 & 0 & 0 & 0 & 1 \\ 3 & 0 & 0 & 1 & 0 \\ \end{block} \end{blockarray} $$ \\ \hline $i=6$ & & \\ \hline \hline $$ \begin{blockarray}{ccccc} \phantom{} & 0 & 1 & 2 & 3 \\ \begin{block}{c(cccc)} 0 & 0 & 0 & 0 & 0 \\ 1 & 0 & 0 & 0 & 0 \\ 2 & 0 & 0 & 0 & 0 \\ 3 & 0 & 0 & 0 & 1 \\ \end{block} \end{blockarray} $$ & & \\ \hline \end{tabular} } \newcommand{\nBoolThreeCubeBCoords}{ \tikzset{every picture/.style={line width=0.75pt}} \begin{tikzpicture}[x=0.75pt,y=0.75pt,yscale=-1,xscale=1] \draw (495.94,177.48) -- (495.12,36.54) ; \draw [shift={(495.11,34.54)}, rotate = 449.67] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (236.77,114.04) node [font=\small] {$< b-e_{1} -e_{\ 2} ,i-2 >$}; \draw (337.74,19.68) node [font=\small] {$< b-e_{1} ,i-1 >$}; \draw (236.77,300.58) node [font=\footnotesize] {$< b-e_{1} -e_{2} -e_{\ 3} ,i-3 >$}; \draw (492.81,19.93) node {$b$}; \draw (418.22,114.04) node [font=\small] {$< b-e_{2} ,i-2 >$}; \draw (337.57,187.2) node [font=\footnotesize] {$< b-e_{1} -e_{\ 3} ,i-2 >$}; \draw (499.56,186.98) node [font=\footnotesize] {$< b-e_{\ 3} ,i-1 >$}; \draw (420.29,300.58) node [font=\footnotesize] {$< b-e_{2} -e_{\ 3} ,i-2 >$}; \draw (395.74,19.77) -- (481.81,19.91) ; \draw [shift={(483.81,19.92)}, rotate = 180.09] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (249.61,102.04) -- (323.43,33.04) ; \draw [shift={(324.9,31.68)}, rotate = 496.94] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (309.77,114.04) -- (358.22,114.04) ; \draw [shift={(360.22,114.04)}, rotate = 180] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (427.73,102.04) -- (482.57,32.85) ; \draw [shift={(483.81,31.28)}, rotate = 488.4] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (236.77,128.04) -- (236.77,289.58) ; \draw [shift={(236.77,126.04)}, rotate = 90] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (314.77,300.58) -- (352.79,300.58) ; \draw [shift={(354.79,300.58)}, rotate = 180] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (427.96,289.58) -- (490.74,199.62) ; \draw [shift={(491.89,197.98)}, rotate = 484.91] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (246.55,289.58) -- (326.47,199.7) ; \draw [shift={(327.79,198.2)}, rotate = 491.64] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (403.07,187.11) -- (417.74,187.09)(425.74,187.08) -- (444.56,187.06) ; \draw [shift={(446.56,187.05)}, rotate = 539.9200000000001] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (337.72,33.68) -- (337.65,109.49)(337.64,117.49) -- (337.58,176.2) ; \draw [shift={(337.72,31.68)}, rotate = 90.06] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (418.37,128.04) -- (420.17,289.58) ; \draw [shift={(418.35,126.04)}, rotate = 89.36] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \end{tikzpicture} } \newcommand{\nBoolThreeCubeIValues}{ \tikzset{every picture/.style={line width=0.75pt}} \begin{tikzpicture}[x=0.75pt,y=0.75pt,yscale=-1,xscale=1] \draw (407,120.5) -- (407,40.5) ; \draw [shift={(407,38.5)}, rotate = 450] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (235.84,83.25) node {$i=5$}; \draw (304.46,26.49) node {$i=10$}; \draw (237.16,200.1) node {$i=0$}; \draw (410.03,26.49) node {$i=15$}; \draw (357.24,84.41) node {$i=10$}; \draw (304.46,130.75) node {$i=5$}; \draw (410.03,130.75) node {$i=10$}; \draw (357.24,200.25) node {$i=5$}; \draw (329.96,26.49) -- (382.53,26.49) ; \draw [shift={(384.53,26.49)}, rotate = 180] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (250.35,71.25) -- (288.41,39.76) ; \draw [shift={(289.95,38.49)}, rotate = 500.4] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (257.34,83.46) -- (325.74,84.11) ; \draw [shift={(327.74,84.13)}, rotate = 180.55] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (369.09,71.41) -- (397.74,39.96) ; \draw [shift={(399.09,38.49)}, rotate = 492.34] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (236,97.25) -- (237.03,188.1) ; \draw [shift={(235.98,95.25)}, rotate = 89.35] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (258.66,200.12) -- (333.74,200.22) ; \draw [shift={(335.74,200.23)}, rotate = 180.08] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (366.36,188.25) -- (399.7,144.34) ; \draw [shift={(400.91,142.75)}, rotate = 487.21] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (248.81,188.1) -- (290.45,145.18) ; \draw [shift={(291.85,143.75)}, rotate = 494.14] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (329.46,130.75) -- (349.34,130.75)(357.34,130.75) -- (382.53,130.75) ; \draw [shift={(384.53,130.75)}, rotate = 180] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (304.46,40.49) -- (304.46,79.76)(304.46,87.76) -- (304.46,117.75) ; \draw [shift={(304.46,38.49)}, rotate = 90] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \draw (357.24,99.41) -- (357.24,188.25) ; \draw [shift={(357.24,97.41)}, rotate = 90] [color={rgb, 255:red, 0; green, 0; blue, 0 } ][line width=0.75] (10.93,-3.29) .. controls (6.95,-1.4) and (3.31,-0.3) .. (0,0) .. controls (3.31,0.3) and (6.95,1.4) .. (10.93,3.29) ; \end{tikzpicture} } \section{Basic Terminology and Riemann Functions} \label{se_basic_Riemann} In this section we introduce some basic terminology and define the notion of a Riemann function. Then we give some examples of Riemann functions. \subsection{Basic Notation} We use $\integers,\naturals$ to denote the integers and positive integers; for $a\in\integers$, we use $\integers_{\le a}$ to denote the integers less than or equal to $a$, and similarly for the subscript $\ge a$. For $n\in\naturals$ we use $[n]$ to denote $\{1,\ldots,n\}$. We use bold face $\mec d=(d_1,\ldots,d_n)$ to denote elements of $\integers^n$, using plain face for the components of $\mec d$; by the {\em degree} of $\mec d$, denoted $\deg(\mec d)$ or at times $|\mec d|$, we mean $d_1+\ldots+d_n$. We set $$ \integers^n_{\deg 0} = \{ \mec d\in\integers^n \ | \ \deg(\mec d)=0 \}, $$ and for $a\in \integers$ we similarly set $$ \integers^n_{\deg a} = \{ \mec d\in\integers^n \ | \ \deg(\mec d)=a \}, \quad \integers^n_{\deg \le a} = \{ \mec d\in\integers^n \ | \ \deg(\mec d)\le a \}. $$ We use $\mec e_i\in\integers^n$ (with $n$ understood) be the $i$-th standard basis vector (i.e., whose $j$-th component is $1$ if $j=i$ and $0$ otherwise), and for $I\subset [n]$ (with $n$ understood) we set \begin{equation}\label{eq_e_I_notation} \mec e_I = \sum_{i\in I} \mec e_i; \end{equation} hence in case $I=\emptyset$ is the empty set, then $\mec e_\emptyset=\mec 0=(0,\ldots,0)$, and similarly $e_{[n]}=\mec 1=(1,\ldots,1)$. For $n\in\naturals$, we endow $\integers^n$ with the usual partial order, that is $$ \mec d'\le \mec d \quad\mbox{iff}\quad d'_i\le d_i\ \forall i\in[n], $$ where $[n]=\{1,2,\ldots,n\}$. \subsection{Riemann Functions} \label{se_riemann_functions} In this section we define {\em Riemann functions} and give examples that have appeared in the literature. \begin{definition} We say that a function $f\from\integers^n\to\integers$ is a Riemann function if for some $C,a,b\in\integers$ we have \begin{enumerate} \item $f(\mec d)=0$ if $\deg(\mec d)\le a$; and \item $f(\mec d)=\deg(\mec d)+C$ if $\deg(\mec d)\ge b$; \end{enumerate} we refer to $C$ as the {\em offset} of $f$. \end{definition} In our study of Riemann functions, it will be useful to introduce the following terminology. \begin{definition} If $f,g$ are functions $\integers^n\to\integers$, we say that {\em $f$ equals $g$ initially} (respectively, {\em eventually}) if $f(\mec d)=g(\mec d)$ for $\deg(\mec d)$ sufficiently small (respectively, sufficiently large); similarly, we say that that $f$ is {\em initially zero} (respectively {\em eventually zero}) if $f(\mec d)=0$ for $\deg(\mec d)$ sufficiently small (respectively, sufficiently large). \end{definition} Therefore $f\from \integers^n\to\integers$ is a Riemann function iff it is initially zero and it eventually equals the function $\deg(\mec d)+C$, where $C$ is the offset of $f$. \subsection{The Baker-Norine Rank and Riemann-Roch Formula} In this article we study examples of the Baker-Norine rank for various graphs. In this subsection we briefly review its definition and its properties; for more details, see \cite{baker_norine}. We will consider graphs, $G=(V,E)$ that are connected and may have multiple edges but no self-loops. Recall that if $G=(V,E)$ is any graph, then its {\em Laplacian}, $\Delta_G$ equals $D_G-A_G$ where $D_G$ is the diagonal degree counting matrix of $G$, and $A_G$ is the adjacency matrix of $G$. \begin{definition}[The Baker-Norine rank function of a graph] \label{de_baker_norine_rank} Let $G=(V,E)$ be a connected graph without self-loops (but possibly multiple edges) on $n$ vertices that are ordered as $v_1,\ldots,v_n$. Hence we view its Laplacian, $\Delta_G$, as a map $\integers^n\to\integers^n$. Let $L={\rm Image}(\Delta)$. We say that $\mec d,\mec d'\in\integers^n$ are {\em equivalent}, written $\mec d\sim\mec d'$, if $\mec d-\mec d'\in L$, and say that $\mec d$ is {\em effective} if $\mec d\ge \mec 0$. Let $\cN$ be the elements of $\integers^n$ that are not equivalent to an effective element of $\integers^n$; in particular $$ \deg(\mec d)<0 \implies \mec d\in \cN. $$ Consider \begin{equation}\label{eq_f_distance_cN} f(\mec d)=\rho_{L^1}(\mec d,\cN)=\min_{\mec d'\in\cN}\|\mec d-\mec d'\|_{L^1}, \end{equation} where $\|\, \cdot\, \|_{L^1}$ is the usual $L^1$-norm $$ \| (x_1,\ldots,x_n)\|_{L^1}=|x_1|+\cdots + |x_n|. $$ We also write $f=f_G$, to emphasize the graph $G$, although its definition as a function $\integers^n\to\integers$ also depends on the ordering $v_1,\ldots,v_n$ of its vertices. The {\em Baker-Norine rank} of $\mec d$, denoted $r_{\rm BN}(\mec d)$, is $f(\mec d)-1$. \end{definition} Since $f(\mec d)=0$ iff $\mec d\in \cN$, which is the case if $\deg(\mec d)<0$, it follows $f$ is initially zero, and hence $r_{\rm BN}(\mec d)$ initially equals $-1$. We remark that for $f(\mec d)\ge 0$ we easily see that both: \begin{enumerate} \item $f(\mec d)$ equals the largest integer $m\ge 0$ such that for any $\mec a\ge\mec 0$ and of degree $m$ we have that $\mec d-\mec a$ is equivalent to an effective element of $\integers^n$, and \item $f(\mec d)=1+\min_{i\in[n]} f(\mec d-\mec e_i)$. \end{enumerate} The Baker-Norine {\em Graph Riemann-Roch} formula states that for all $\mec d$ we have \begin{equation}\label{eq_baker_norine_formula} r_{\rm BN}(\mec d) - r_{\rm BN}(\mec K -\mec d) = \deg(\mec d)+1-g \end{equation} where \begin{enumerate} \item $g=1 + |E| - |V|$ (which is non-negative since $G$ is connected), and \item $\mec K= \bigl( \deg_G(v_1)-2,\ldots,\deg_G(v_n)-2 \bigr)$, where $\deg_G(v)$ is the degree of $v$ in $G$, i.e., the number of edges incident upon $v$ in $G$. \end{enumerate} It follows that for all $\mec d\in\integers^n$ \begin{equation}\label{eq_baker_norine_formula_for_f} f(\mec d) - f(\mec K -\mec d) = \deg(\mec d)+1-g. \end{equation} It follows that for $\mec d$ such that $$ \deg(\mec d)>\deg(\mec K)=\sum_i \Bigl( \deg_G(v_i)-2 \Bigr)=2|E|-2|V| $$ we have $f(\mec K -\mec d)=0$; hence \begin{equation}\label{eq_when_BN_rank_is_linear} \deg(\mec d)> 2|E|-2|V| \quad\implies\quad f(\mec d) = \deg(\mec d)+1-g , \end{equation} i.e., $f(\mec d)$ eventually equals $\deg(\mec d)+1-g$. Hence $f$ is a Riemann function with offset $C=1-g$. The Baker-Norine formula is an analog of the classical Riemann-Roch formula for algebraic curves or Riemann surfaces; we briefly discuss this in Subsection~\ref{su_classical_RR}. \subsection{Generalizations of the Baker-Norine Rank} Many variants of the Baker-Norine rank have been studied. We remark that in literature that generalizes that Baker-Norine rank, e.g., \cite{amini_manjunath}, one typically studies the function $r=f-1$ where $f$ is as in \eqref{eq_f_distance_cN} for various $\cN$, and hence $r$ is initially $-1$ instead of initially $0$. \begin{example}\label{ex_amini_manjunath} Amini and Manjunath \cite{amini_manjunath} generalized Definition~\ref{de_baker_norine_rank} by taking $L\subset\integers^n_{\deg 0}$ be any lattice of full rank in $\integers^n_{\deg 0}$ (i.e., rank $n-1$); it this case the definitions of ``equivalent,'' ``effective,'' and of $\cN$ in Definition~\ref{de_baker_norine_rank} carry over; they show that $f$ as in \eqref{eq_f_distance_cN} is a Riemann funtion with offset is $1-g_{\max}(L)$, with $g_{\max}(L)$ as defined on page~5 there. They also give conditions on $L$ so that a Riemann-Roch analog \eqref{eq_baker_norine_formula_for_f} holds; one of their conditions is that all maximal points of $\cN$ have the same degree (i.e., $g_{\min}=g_{\max}$ as in \cite{amini_manjunath}); they give a second, more technical condition. \end{example} To generalize the above examples, let us give some conditions on a subset $\cN\subset\integers^n$ which ensure that $f$ in \eqref{eq_f_distance_cN} gives a Riemann function. \begin{proposition}\label{pr_cN} Let $n\in\naturals$ and $\cN\subset \integers^n$ such that \begin{enumerate} \item for some $m,m'\in\integers$ we have \begin{equation}\label{eq_cN_bounded} \integers^n_{\deg\le m} \subset \cN \subset \integers^n_{\deg\le m'}, \end{equation} and \item \label{it_largest_degree_elt_N} setting $M$ to be the largest degree of an element of $\cN$, then there exists a $C$ such that if $\mec d\in\integers^n_{\deg M}$, then then some $\mec d'\in\cN \cap \integers^n_{\deg M}$ has $\|\mec d-\mec d'\|_1 \le C$. \end{enumerate} Then $f$ as in \eqref{eq_f_distance_cN} is a Riemann function with offset $-M$. \end{proposition} \begin{proof} Since $\mec d\in\cN$ for $\deg(\mec d)\le m$, we have that $f$ is initially zero. By induction on $\deg(\mec d)$, we easily show that for any $\mec d$ with $\deg(\mec d)>M$, the $L^1$ distance from $\mec d$ to $\integers_{\le M}$ is at least $\deg(\mec d)-M$. Hence \begin{equation}\label{eq_f_upper_bound_M} f(\mec d) \ge \deg(\mec d)-M; \end{equation} let us show that equality holds for $\deg(\mec d)\ge M+Cn$. Say that $\mec d\in\integers^n$ satisfies $\deg(\mec d)\ge M+Cn$. Then setting $b=\deg(\mec d)-M-Cn\ge 0$ we have $$ \widetilde{\mec d}=\mec d - C \mec 1 - b \mec e_1 $$ has degree $M$; hence for some $\mec d'\in\cN\cap\integers^n_M$ we have $$ \widetilde{\mec d}-\mec d' = \mec a $$ where $$ |a_1|+\cdots+|a_n| \le C; $$ hence $|a_i|\le C$ for all $i$. It follows that setting $\mec a'$ to be $$ \mec a' = \mec d - \mec d' = \mec d - (\mec a+\widetilde{\mec d}) = C\mec 1 + b\mec e_1 -\mec a, $$ we have $a_1'=C+a_1+b$ and for $i\ge 2$, $a_i'=C+a_i$, and hence all $a_i'\ge 0$. Hence the $L^1$ distance of $\mec d$ to $\mec d'$ is at most $$ a_1'+\cdots+a_n' = \deg(\mec d)-\deg(\mec d')=\deg(\mec d)-M, $$ and hence $f(\mec d)\le \deg(\mec d)-M$. Hence, \eqref{eq_f_upper_bound_M} holds with equality whenever $\deg(\mec d)\ge M+Cn$. \end{proof} Let us make some further remarks on examples provided by Proposition~\ref{pr_cN}. \begin{remark} Condition~\eqref{it_largest_degree_elt_N} of Proposition~\ref{pr_cN} on $\cN$ above follows from the following stronger condition: for any $\cN\subset\integers^n$, say that $\mec d\in\integers^n$ is an {\em invariant translation of $\cN$} if for all $\mec d'\in\integers^n$, $\mec d'\in\cN$ iff $\mec d+\mec d'\in\cN$. We easily see that the set, $T=T(\cN)$ of all invariant translations is a subgroup of the additive group $\integers^n$, and that \eqref{eq_cN_bounded} implies that $T\subset\integers^n_{\deg 0}$. If $T$ is a full rank subgroup of $\integers^n_{\deg 0}$ (i.e., of rank $n-1$), then condition~\eqref{it_largest_degree_elt_N} of Proposition~\ref{pr_cN} is automatically satisfied. \end{remark} \begin{remark} In typical examples $\cN$ above is a {\em downset}, i.e., $\mec d\in\cN$ and $\mec d'\le\mec d$ implies that $\mec d'\in\cN$. In this case if the closest point in $\cN$ to some $\mec d\in\integers^n$ is $\mec d'\in\cN$, then clearly (1) $\mec d'\le\mec d$, and (2) with $f$ as in \eqref{eq_f_distance_cN}, $f(\mec d)=\deg(\mec d-\mec d')$; we easily verify the converse, i.e., $$ f(\mec d)= \min\{ \deg(\mec d-\mec d') \ | \ \mec d'\in\cN, \ \mec d'\le\mec d\} $$ $$ =\min\{ \deg(\mec d-\mec d') \ | \ f(\mec d')=0\}. $$ Furthermore, if $\cN$ is a downset, then for any $i\in [n]$, any path from a $\mec d\in\integers^n$ to a $\mec d'\in \cN$ translates to a path of the same length from $\mec d-\mec e_i$ to $\mec d'-\mec e_i$, which again lies in $\cN$. Hence if $\cN$ is a downset, then $f=f(\mec d)$ as in \eqref{eq_f_distance_cN} is a non-decreasing function of $\mec d$. \end{remark} \begin{remark} We remark that if $L\subset\integers^n_{\deg 0}$ is not of full rank in Example~\ref{ex_amini_manjunath}, then condition~(2) of Proposition~\ref{pr_cN} fails to hold, and we easily see that $f$ in \eqref{eq_f_distance_cN} fails to be a Riemann function. \end{remark} \subsection{Examples Based on Riemann's Theorem} \label{su_classical_RR} All the above discussion is based on the classical {\em Riemann's theorem} and {\em Riemann-Roch theorem}. However, we use these examples only for illustration, and they are not essential to our discussion of the Baker-Norine rank functions and of most of the rest of this article. Let $X$ be an algebraic curve over an algebraically closed field $k$, and $K$ be its function field; one understands either (1) $K$ is a finite extension of $k(x)$ where $x$ is an indeterminate (i.e., transcendental) and $X$ is its set of discrete valuations (e.g., \cite{lang_algebraic_functions}, Section~1.2), or (2) $X$ is projective curve in the usual sense (e.g., \cite{hartshorne}, Section~4.1), and $K$ is its function field. (For $k=\complex$ one can also view $X$ as a compact Riemann surface, and $K$ as its field of meromorphic functions.) To each $f\in K\setminus\{0\}$ one associates the divisor (i.e., Weil divisor) equal to $(f)=\sum_{v\in X}{\rm ord}_v(f) v$ \cite{lang_algebraic_functions}\footnote{ Here ${\rm ord}_v(f)$ is (1) $0$ if $f(v)$ is finite and non-zero, (2) the multiplicity of the zero at $v$ if $f(v)=0$, and (3) minus the multiplicity of the pole at $v$ if $f(v)=\infty$. }. For each divisor $D$ one sets $$ L(D) = \{0\}\cup \{ f\in K \ | \ (f)\ge -D\}, $$ where we regard $0\in K$ as having divisor $(0)\ge -D$ for all $D$; this makes $L(D)\subset K$ a $k$-linear subspace, and we set $$ l(D) = \dim_k L(D). $$ For a divisor $D$, we use $\deg(D)$ to denote the sum of the $\integers$-coefficients in $D$. For $f\in K\setminus\{0\}$, $f$ has the same number of zeroes and poles, counted with multiplicity, i.e., $\deg((f))=0$. It follows that $l(D)=0$ when $\deg(D)<0$. {\em Riemann's theorem} says that for the $genus$ $g\in\integers_{\ge 0}$ of $X$, for any divisor $D$ with $\deg(D)$ sufficiently large, $$ l(D) = \deg(D)+1-g. $$ Hence for any points $P_1,\ldots,P_n\in X$ we have \begin{equation}\label{eq_f_based_on_classical_RR} f(\mec d)\eqdef l(d_1P_1+\cdots+d_n P_n) \end{equation} is a Riemann function. The Riemann-Roch formula states that $$ l(D) = l(\omega-D)+\deg(D)+1-g $$ where $\omega$ is the {\em canonical divisor}, i.e., the divisor associated to any $1$-form. \begin{example} Let $K$ be an elliptic curve, i.e., a curve of genus $g=0$, and $P_1,P_2$ two points of the curve. The Riemann-Roch theorem implies that $f(\mec d)=0$ if $\deg(\mec d)<0$ and $f(\mec d)=\deg(\mec d)-1$ if $\deg(\mec d)>0$. Hence it remains to determine $f(\mec d)$ for $\mec d=(d_1,-d_1)$ of degree $0$, and $f(d_1,-d_1)$ is either $0$ or $1$. If $P_1-P_2$ has infinite order in the group law (which, for fixed $P_1$, holds for all but countably many $P_2$), then $f(d_1,-d_1)=1$ iff $d_1=0$; by contrast, if $P_1-P_2$ has order $r\in\naturals$, then $f(d_1,-d_1)=1$ iff $d_1$ is divisible by $r$. \end{example} \subsection{Riemann Functions from other Riemann Functions} \begin{example} If for some $k,n\in\naturals$, $f_1,\ldots,f_{2k+1}$ are Riemann functions, then so is $$ f_1 - f_2 + f_3 - \cdots - f_{2k}+f_{2k+1}. $$ \end{example} One can restrict any Riemann function to a subset of its variables, the others taking fixed values, to get a Riemann function on fewer variables. In \cite{folinsbee_friedman_Euler_characteristics} the restriction to two variables is the most important. Let us define the appropriate notation. \begin{example}\label{ex_Riemann_function_restriction} Let $f\from\integers^n\to\integers$ be any Riemann function with $f(\mec d)=\deg(\mec d)+C$ for $\deg(\mec d)$ sufficiently large. Then for any distinct $i,j\in[n]$ and $\mec d\in\integers^n$, the function $f_{i,j,\mec d}\from\integers^2\to\integers$ given as \begin{equation}\label{eq_two_variable_restriction} f_{i,j,\mec d}(a_i,a_j) = f\bigl(\mec d + a_i\mec e_i + a_j\mec e_j \bigr) \end{equation} is a Riemann function $\integers^2\to\integers$, and for $a_i+a_j$ large we have \begin{equation}\label{eq_two_variable_restriction_constant} f_{i,j,\mec d}(a_i,a_j) = a_i+a_j+ C',\quad\mbox{where}\quad C'=\deg(\mec d)+ C. \end{equation} We call $f_{i,j,\mec d}$ a {\em two-variable restriction} of $f$; we may similarly restrict $f$ to one variable or three or more variables, and any such restriction is clearly a Riemann function. \end{example} [It turns out that in \cite{folinsbee_friedman_Euler_characteristics}, it is important that that $C'$ depends only on $\mec d$ and not on $i,j$.] \subsection{Typical Properties of Riemann Functions} Let us describe some typical properties of Riemann functions above. \begin{definition}\label{de_slowly_growing_and_periodic} We say that a function $f\from\integers^n\to\integers$ is \begin{enumerate} \item {\em slowly growing} if for all $\mec d\in\integers^n$ and $i\in[n]$ we have $$ f(\mec d)\le f(\mec d+\mec e_i) \le f(\mec d)+1, $$ and \item {\em $p$-periodic} for a $p\in\naturals$ if for all $i,j\in[n]$ and all $\mec d\in\integers^n$ we have $$ f(\mec d+p\,\mec e_i-p\,\mec e_j)=f(\mec d). $$ \end{enumerate} \end{definition} We easily see: \begin{enumerate} \item $f$ in \eqref{eq_f_based_on_classical_RR} is always slowly growing, but not generally periodic; \item $f$ in \eqref{eq_f_distance_cN}, then \eqref{eq_f_distance_cN} is slowly growing whenever $\cN$ is a {\em downset} (as remarked above); \item in Example~\ref{ex_amini_manjunath}, $f$ is $p$-periodic for any $p$ such that each element of $\integers^n_{\deg 0}/L$ has order divisible by $p$ (hence this holds for $p=|\integers^n_{\deg 0}/L|$); \item in Example~\ref{ex_Riemann_function_restriction}, if $f\from\integers^n\to\integers$ is either slowly growing or $p$-periodic for some $p$, then the same holds of any restriction of $f$ to two (or any number) of its variables. \end{enumerate} \section{The Weight of a Riemann Function, and Generalized Riemann Functions} \label{se_generalized_Riemann} In this section we define the {\em weights} of a Riemann function, a notion central to this article. Since a Riemann function $\integers^2\to\integers$ eventually equals $d_1+d_2+C$, one may consider that one possible generalization of this notion for a function $\integers^3\to\integers$ might be a function that eventually equals a polynomial of degree two in $d_1,d_2,d_3$. In fact, most everything we say about Riemann functions hold for a much larger class of functions $\integers^n\to\integers$ which we call {\em generalized Riemann functions}; this includes all polynomials of $d_1,\ldots,d_n$ of degree $n-1$, but many more functions. \subsection{Weights and M\"obuis Inversion} If $f\from\integers^n\to\integers$ is initially zero, then there is a unique initially zero $W\in\integers^n\to\integers$ for which \begin{equation}\label{eq_define_sigma} f(\mec d)=\sum_{\mec d'\le\mec d} W(\mec d'), \end{equation} since we can determine $W(\mec d)$ inductively on $\deg(\mec d)$ set \begin{equation}\label{eq_inductively_define_W_from_f} W(\mec d) = f(\mec d)-\sum_{\mec d'\le\mec d,\ \mec d'\ne \mec d} W(\mec d'). \end{equation} Recall from \eqref{eq_e_I_notation} the notation $\mec e_I$ for $I\subset [n]$. \begin{proposition}\label{pr_Mobius_inversion} Consider the operator $\frakm$ on functions $f\from\integers^n\to\integers$ defined via \begin{equation}\label{eq_define_mu} (\frakm f)(\mec d) = \sum_{I\subset [n]} (-1)^{|I|} f(\mec d-\mec e_I), \end{equation} and the operator on functions $W\from\integers^n\to\integers$ that are initially zero given by \begin{equation}\label{eq_define_s} (\fraks W)(\mec d) = \sum_{\mec d'\le\mec d} W(\mec d'), \end{equation} Then if $f$ is any initially zero function, and $W$ is given by the equation $f=\fraks W$ (i.e., $W$ is defined inductively by \eqref{eq_inductively_define_W_from_f}), then $W=\frakm f$. \end{proposition} The above can be viewed as the M\"obius inversion formula for the partial order $\le$ on $\integers^n$. \begin{proof} We have $f(\mec d)=0$ whenever $\deg(\mec d)\le b$ for some $b$, and then \eqref{eq_define_mu} shows that $(\frakm f)(\mec d)=0$ for $\deg(\mec d)\le b$ as well. Since there is a unique initially zero $W$ with $\fraks W=f$, it suffices to show that $\fraks\frakm f=f$. Since $f$ is initially zero, for any $\mec d\in\integers^n$ write $(\fraks\frakm f)(\mec d)$ as $$ (\fraks\frakm f)(\mec d) = \sum_{\mec d'\le \mec d} \sum_{I\subset [n]} (-1)^{|I|} f(\mec d-\mec e_I) $$ which is a double sum of finitely many terms since $f$ is initially zero; hence we may rearrange terms, set $\mec d''=\mec d-\mec e_I$ and write this double sum as $$ \sum_{\mec d''\le \mec d} f(\mec d'') \,a_{\mec d''}, \quad\mbox{where}\quad a_{\mec d''}=\sum_{I\ {\rm s.t.}\ \mec d''+\mec e_I \le \mec d} (-1)^{|I|}; $$ to compute $a_{\mec d''}$, setting $J = \{ j \in [n] \ | \ d_j'' < d_j \}$, we have $$ \sum_{I\ {\rm s.t.}\ \mec d''+\mec e_I \le \mec d} (-1)^{|I|} = \sum_{I\subset J}(-1)^{|I|} $$ which equals $1$ if $J=\emptyset$ and otherwise equals $0$. It follows that $a_{\mec d}=1$, and for $\mec d''\ne\mec d$, we have $a_{\mec d''}=0$. \end{proof} \begin{definition} Throughout this article we reserve the symbols $\frakm,\fraks$ for their meanings in \eqref{eq_define_sigma} and \eqref{eq_define_mu}. If $f,W$ are initially zero functions $\integers^n\to\integers$ with $f=\fraks W$, we say that $f$ {\em counts} $W$ and that $W$ is the {\em weight} of $f$. A function $h\from\integers^n\to\integers$ is {\em modular} if $f\in\ker\frakm$ (i.e., $\frakm f$ is the zero function). We say that $f\from\integers^n\to\integers$ is a {\em generalized Riemann function} if \begin{enumerate} \item $f$ is initially zero, and \item $f$ eventually equals a modular function, i.e., for some $h\in\ker\frakm$ we have $f(\mec d)=h(\mec d)$ for $\deg(\mec d)$ sufficiently large. \end{enumerate} \end{definition} \subsection{Weights of Riemann Functions $\integers^2\to\integers$} We will be especially interested in Riemann functions $\integers^2\to\integers$ and their weights $W=\frakm f$. It is useful to notice that for such functions we that that for any fixed $d_1$ and $d_2$ sufficiently large, $$ f(d_1,d_2)-f(d_1-1,d_2) = 1, $$ and hence, for fixed $d_1$, \begin{equation}\label{eq_two_dim_row_sums} \sum_{d_2=-\infty}^\infty W(d_1,d_2) = 1, \end{equation} and similarly, for fixed $d_2$ we have \begin{equation}\label{eq_two_dim_col_sums} \sum_{d_1=-\infty}^\infty W(d_1,d_2) = 1. \end{equation} Viewing $W$ as a two-dimensional infinite array of numbers indexed in $\integers\times\integers$, one can therefore say that $W\from\integers^2\to\integers$ is a Riemann weight iff all its ``row sums'' \eqref{eq_two_dim_row_sums} and all its ``column sums'' \eqref{eq_two_dim_col_sums} equal one. \subsection{Examples and Classification of Generalized Riemann Functions} At times it is convenient to write $\frakm$ using the ``downward shift operators,'' $\frakt_i$ for $i\in[n]$, where $\frakt_i$ is the operator on functions $\integers^n\to\integers$ given by \begin{equation}\label{eq_frakt_def} (\frakt_i f)(\mec d) = f(\mec d - \mec e_i); \end{equation} one easily verifies that the $\frakt_i$ commute with one another, and that $$ \frakm = (1-\frakt_1)\ldots(1-\frakt_n), $$ (where 1 is the identity operator). In particular, it follows that if $f=f(\mec d)$ is independent of its $i$-th variable, then $(1-\frakt_i)f=0$, and hence $\frakm f=0$. In particular $\frakm f=0$ if (1) $f$ is a sum of functions, each of which is independent in some variable, and, in particular, (2) if $f$ is a polynomial of degree at most $n-1$. Hence $\deg(\mec d)+C$ is a modular function for any $n\ge 1$, and hence a Riemann function is, indeed, a generalized Riemann function. We now characterize modular functions in two different ways. \begin{theorem}\label{th_modular_function_as_sum} A function $h\from\integers^n\to\integers$ is modular iff it can be written as a sum of functions each of which depends on only $n-1$ of its $n$ variables. \end{theorem} We postpone its proof to Section~\ref{se_fundamental_domains}. The following description of modular functions will be needed when we discuss what we call {\em Riemann-Roch formulas}. \begin{theorem}\label{th_modular_function_from_strip} If $a\in\integers$, $n\in\naturals$, and $h$ is any integer-valued function defined on $\mec d\in\integers^n$ with $a\le \deg(\mec d)\le a+n-1$, then $h$ has a unique extension to a modular function $\integers^n\to\integers$. \end{theorem} We also postpone the proof of this theorem to Section~\ref{se_fundamental_domains}. According to this theorem, if $h_1,h_2$ are two modular functions, then $h_1$ and $h_2$ are equal whenever they are eventually equal (i.e., $h_1(\mec d)=h_2(\mec d)$ for $\deg(\mec d)$ sufficiently large), then $h_1=h_2$. In particular, if $f\from\integers^n\to\integers$ is a generalized Riemann function, then the modular function $h$ that is eventually equal to $f$ is uniquely determined. \subsection{The Weight of the Baker-Norine Rank and Other Functions Initially Equal to $-1$} \label{su_weight_of_Baker_Norine} Since the Baker-Norine rank and many similar functions are initially equal to $-1$, we make the following convention. \begin{definition}\label{de_weight_Baker_Norine} If $r\from\integers^n\to\integers$ is a function that is initially equal to $-1$, by the {\em weight} of $r$ we mean the function $\frakm r$, which clearly equals $\frakm f$ with $f=1+r$. \end{definition} We also note that in the above definition, for any $i\in[n]$ we have $(1-\frakt_i)r=(1-\frakt_i)f$. Hence, as soon as we apply either all of $\frakm$, or merely one of its factors $1-\frakt_i$, there is no difference in working with $r$ or $f$. When computing the weight of Baker-Norine type functions, we often use the more suggestive $r_{\rm BN}$ rather than $f=1+r_{\rm BN}$. \section{Riemann-Roch Formulas and Self-Duality} \label{se_riemann_roch_formulas} In this section we express Riemann-Roch formulas more simply in terms of the weight of the Riemann function. \begin{definition}\label{de_generalized_Riemann_Roch_formula} Let $f\from\integers^n\to\integers$ be a generalized Riemann function, and $h$ the modular function eventually equal to $f$. For $\mec K\in\integers^n$, the {\em $\mec K$-dual of $f$}, denoted $f^\wedge_{\mec K}$, refers to the function $\integers^n\to\integers$ given by \begin{equation}\label{eq_first_dual_formulation} f^{\wedge}_{\mec K}(\mec d)=f(\mec K-\mec d)-h(\mec K-\mec d). \end{equation} We equivalently write \begin{equation}\label{eq_generalized_riemann_roch} f(\mec d) - f^{\wedge}_{\mec K}(\mec K-\mec d) = h(\mec d) \end{equation} and refer to this equation as a {\em generalized Riemann-Roch formula}. \end{definition} In particular, if $f$ is a Riemann function with offset $C$, then $h(\mec d)=\deg(\mec d)+C$, and \eqref{eq_generalized_riemann_roch} means that \begin{equation}\label{eq_riemann_roch} f(\mec d) - f^{\wedge}_{\mec K}(\mec K-\mec d) = \deg(\mec d)+C. \end{equation} The usual Riemann-Roch formulas---the classical one and the Baker-Norine formula---are cases where $f^\wedge_{\mec K}=f$ equals $f$ for some $f,\mec K$. Hence the above definition is very loose: it says that for any generalized Riemann function, $f$, and any $\mec K\in\integers^n$, there is always a ``generalized Riemann-Roch formula;'' we refer to the special cases where $f=f^\wedge_{\mec K}$ for some $\mec K$ as {\em self-duality} in Definition~\ref{de_self_dual_generalized_Riemann_function} below. In Subsection~\ref{su_Riemann_Functions_and_Weights} we explained some reasons we work with generalized Riemann-Roch formulas; briefly, these reasons are: (1) requiring self-duality would eliminate many interesting Riemann functions, such as the general ones considered by \cite{amini_manjunath}, and likely some interesting generalized Riemann functions; and (2) self-duality does not behave well under fixing some of the variables of a Riemann function and considering the resulting restriction. We now give remarks, a theorem, and examples regarding generalized Riemann-Roch formulas. \begin{definition} If $W\from\integers^n\to\integers$ is any function and $\mec L\in\integers^n$, the {\em $\mec L$-dual weight of $W$}, denoted $W^*_{\mec L}$ refers to the function given by $$ W^*_{\mec L}(\mec d)=W(\mec L-\mec d). $$ \end{definition} It is immediate that $(W^*_{\mec L})^*_{\mec L}=W$. \begin{theorem}\label{th_easy_dual_functions_theorem} Let $f\from\integers^n\to\integers$ be a generalized Riemann function, and $W=\frakm f$. Let $\mec K\in\integers^n$ and let $\mec L = \mec K + \mec 1$. \begin{enumerate} \item we have \begin{equation}\label{eq_dual_weight_equation} \frakm\bigl(f^\wedge_{\mec K}\bigr) = (-1)^n W^*_\mec L = (-1)^n (\frakm f)^*_{\mec L}. \end{equation} \item $f^\wedge_{\mec K}$ is a generalized Riemann function, and a Riemann function if $f$ is. \item $(f^\wedge_{\mec K})^\wedge_{\mec K}=f$. \item $f^\wedge_{\mec K}=f$ iff $W^*_{\mec L}=(-1)^n W$. \end{enumerate} \end{theorem} \begin{proof} Proof of~(1): applying $\frakm$ to \eqref{eq_first_dual_formulation} we have \begin{equation}\label{eq_first_step_in_K_to_L} ( \frakm \bigl(f^\wedge_{\mec K}\bigr) )(\mec d) = \sum_{I\subset[n]} (-1)^{|I|} f^\wedge_{\mec K}(\mec d-\mec e_I) \end{equation} which, in view of \eqref{eq_first_dual_formulation}, equals \begin{equation}\label{eq_second_step_in_K_to_L} \sum_{I\subset[n]} (-1)^{|I|} \Bigl( f(\mec K-\mec d+\mec e_I) - h(\mec K-\mec d+\mec e_I) \Bigr). \end{equation} Substituting $J=[n]\setminus I$, for any $g\from\integers^n\to\integers$ we can write $$ \sum_{I\subset[n]} (-1)^{|I|} g(\mec K-\mec d+\mec e_I) = \sum_{J\subset[n]} (-1)^{n-|J|} g(\mec K-\mec d+\mec 1 - \mec e_J) $$ $$ = (-1)^n \sum_{J\subset[n]} (-1)^{|J|} g(\mec K-\mec d+\mec 1 - \mec e_J) = (-1)^n (\frakm g)(\mec K-\mec d+\mec 1) = (-1)^n (\frakm g)^*_{\mec L}(\mec d). $$ Taking $g=f-h$, and using $\frakm f=W$ and $\frakm h=0$, we have \eqref{eq_second_step_in_K_to_L} equals $(-1)^n W^*_{\mec L}(\mec d)$, and since this also equals \eqref{eq_first_dual_formulation} we get \eqref{eq_dual_weight_equation}. Proof of~(2): $f$ is a generalized Riemann function iff $W=\frakm$ is of finite support, which is equivalent to $W^*_{\mec L}$ being of finite support; hence $f$ is a generalized Riemann function iff $f^\wedge_{\mec K}$ is. Moreover, $f$ is a Riemann function iff in addition \eqref{eq_generalized_riemann_roch} has $h(\mec d)=\deg(\mec d)+C$; in this case \eqref{eq_riemann_roch} with $\mec d$ replaced with $\mec K-\mec d$ is equivalent to $$ f(K-\mec d) - f^{\wedge}_{\mec K}(\mec d) = h(K-\mec d) $$ for all $\mec d$, which reversing the sign gives $$ f^{\wedge}_{\mec K}(\mec d) - f(\mec K-\mec d) = - h(\mec K-\mec d) = -\deg(\mec K-\mec d)+C = \deg(\mec d)+C', $$ where $C' = C-\deg(\mec K)$. Proof of~(3): we may write \eqref{eq_dual_weight_equation} as $$ f^\wedge_{\mec K}=\fraks (-1)^n (\frakm f)^*_{\mec L}, $$ and hence $$ (f^\wedge_{\mec K})^\wedge_{\mec K} =\fraks (-1)^n (\frakm f^\wedge_{\mec K})^*_{\mec L} =\fraks (-1)^n \bigl( (-1)^n W^*_{\mec L}\bigr)^*_{\mec L} =\fraks W = f. $$ Proof of~(4): $f^\wedge_{\mec K}=f$ (since both functions are initially zero) iff $\frakm f^\wedge_{\mec K}=\frakm f$, and by \eqref{eq_dual_weight_equation} this is equivalent to $(-1)^n W^*_{\mec L}=W$. \end{proof} \begin{definition}\label{de_self_dual_generalized_Riemann_function} We say that a generalized Riemann function $f\from\integers^n\to\integers$ is {\em self-dual} if either of the equivalent conditions holds: \begin{enumerate} \item for some $\mec K\in\integers^n$, $f^\wedge_{\mec K}=f$; \item for some $\mec L\in\integers^n$, $W^*_{\mec L}=(-1)^n W$. \end{enumerate} \end{definition} Let us remark on the uniqueness of $\mec K$ and $\mec L$ in the above definition: if $W^*_{\mec L_1}=W^*_{\mec L_2}$, it follows that for all $\mec d\in\integers^n$, $$ W(\mec d) = \bigl( (W^*_{\mec L_2})^*_{\mec L_2}\bigr) (\mec d) = \bigl( (W^*_{\mec L_1})^*_{\mec L_2}\bigr) (\mec d) = W^*_{\mec L_1}(\mec L_2-\mec d) = W(\mec L_1 - \mec L_2 +\mec d), $$ and therefore $W$ is translation invariant by $\mec L_1-\mec L_2$; since $f=\fraks W$, and $\fraks$ commutes with translation, $f$ is also translation invariant by $\mec L_1-\mec L_2$. Similarly, if $f^\wedge_{\mec K_1}=f^\wedge_{\mec K_2}$, then $W^*_{\mec L_1}=W^*_{\mec L_2}$ where $\mec L_j=\mec K_j+\mec 1$, and $\mec L_1-\mec L_2=\mec K_1-\mec K_2$, and hence $f$ and $W$ are both translation invariant by ${\mec K_1} - {\mec K_2}$. Hence $f$ and $W$ have the same set of invariant translations, $T\subset\integers^n_{\deg 0}$. Hence $\mec K$ and $\mec L$ in Definition~\ref{de_self_dual_generalized_Riemann_function} are unique up to a translation by the set $T$. We remark that the condition $(-1)^n W^*_{\mec L}=W$ seems to have more direct symmetry than the equivalent condition $f^\wedge_\mec K=f$; furthermore, in the examples of the $W$ that we compute in Sections~\ref{se_two_vertices} and~\ref{se_completegraph}, the $W$ are very sparse (i.e., mostly $0$), and so verifying $(-1)^n W^*_{\mec L}=W$ seems simpler. Of course, the classical or Graph Riemann-Roch formulas, in terms of our Definition~\ref{de_self_dual_generalized_Riemann_function}, are assertions that self-duality holds in these cases. \begin{example} The Baker-Norine \cite{baker_norine} Graph Riemann-Roch theorem for a graph, $G=(V,E)$, with $V=\{v_1,\ldots,v_n\}$ can be stated as $$ r_{{\rm BN},G}(\mec d)-r_{{\rm BN},G}(\mec K-\mec d) = \deg(\mec d)+1-g, $$ where $g=|E|-|V|+1$ and $\mec K=\sum_i\mec e_i (\deg_G(v_i)-2)$. Since $f=r_{{\rm BN},G}+1$ is the associated Riemann function, the left-hand-side above also equals $f(\mec d)-f_K^\wedge(\mec K-\mec d)$, and hence $f=f_K^{\wedge}$ is self-dual. \end{example} \begin{example} Amini and Manjunath \cite{amini_manjunath} give conditions for $f$ as in \eqref{eq_f_distance_cN} with $\cN$ as in Example~\ref{ex_amini_manjunath} to satisfy self-duality. The first is that all maximal points of $\cN$ have the same degree ($g_{\min}=g_{\max}$ in \cite{amini_manjunath}); the second is more technical. However, to us these Riemann functions seem interesting to study whether or not self-duality holds. \end{example} \section{The Weight of Two Vertex Graphs and Riemann Functions of Two Variables} \label{se_two_vertices} In this section we prove the following theorem. \begin{theorem}\label{th_two_vertices} Let $G$ be a graph on two vertices, $v_1,v_2$ with $r\ge 1$ edges joining $v_1$ and $v_2$. Let $r_{\rm BN}\from\integers^2\to\integers$ be the Baker-Norine rank, let $f=1+r_{\rm BN}$, i.e., $f$ is as in \eqref{eq_f_distance_cN} in Definition~\ref{de_baker_norine_rank}. Then $\mec d$ is in the image of the Laplacian iff $\mec d$ is an integral multiple of $(r,-r)$. Let $W=\frakm f$ be the weight of $f$. Then $$ W(0,0)=W(1,1)=\ldots = W(r-1,r-1)=1; $$ furthermore $W(\mec d)=1$ if $\mec d$ is equivalent to one of $(i,i)$ with $i=0,\ldots,r-1$, and otherwise $W(\mec d)=0$. \end{theorem} \subsection{Perfect Matchings and Slowly Growing Riemann Functions} In this subsection we make some remarks on weights that we call ``perfect matchings.'' \begin{definition} Let $W$ be a function $\integers^2\to\integers$ that is initially and eventually zero. We say that $W$ is a {\em perfect matching} if there exists a permutation (i.e., a bijection) $\pi\from\integers\to\integers$ such that \begin{equation}\label{eq_W_perfect_and_pi} W(i,j) = \left\{ \begin{array}{ll} 1 & \mbox{if $j=\pi(i)$, and} \\ 0 & \mbox{otherwise.} \end{array} \right. \end{equation} \end{definition} It follows that for $\pi$ as above, $\pi(i)+i$ is bounded above and below, since $W$ is initially and eventually $0$. Of course, if $W$ is $r$-periodic, i.e., for all $\mec d\in\integers^2$, $W(\mec d)=W(\mec d+(r,-r))$, then $\pi$ is {\em skew-periodic} in the sense that $\pi(i+r)=\pi(i)-r$ for all $i\in\integers$. \begin{proposition}\label{pr_W_either_zero_one_minus_one} Let $f\from\integers^2\to\integers$ be a slowly growing Riemann function, i.e., for $i=1,2$ and any $\mec d\in\integers^2$ we have $$ f(\mec d) \le f(\mec d+\mec e_i) \le f(\mec d)+1. $$ Let $W=\frakm f$ be the weight of $f$. Then $W$ takes only the values $0$ and $\pm 1$. Furthermore, for any $\mec d\in\integers^2$, let $a=f(\mec d)$ \begin{equation}\label{eq_W_is_one} W(\mec d)=1 \iff f(\mec d-\mec e_1)=f(\mec d-\mec e_2)=f(\mec d - \mec e_1 - \mec e_2)=a-1, \end{equation} and \begin{equation}\label{eq_W_is_minus_one} W(\mec d)=-1 \iff f(\mec d-\mec e_1)=f(\mec d-\mec e_2)=a=f(\mec d - \mec e_1 - \mec e_2)+1. \end{equation} We say that $f$ is {\em supermodular} when $W(\mec d)\ge 0$ for all $0$; in this case $W$ is a perfect matching. \end{proposition} \begin{proof} For $\mec d\in\integers^2$, let $a=f(\mec d)$. Then $f(\mec d - \mec e_1 - \mec e_2)$ is between $a-2$ and $a$, since $f$ is slowly growing. We proceed by a case analysis: \begin{enumerate} \item if $f(\mec d - \mec e_1 - \mec e_2)=a=2$, then $f(\mec d-\mec e_1)$ differs by at most $1$ from both $a$ and $a-2$, and hence $f(\mec d-\mec e_1)=a-1$; similarly $f(\mec d-\mec e_2)=a-1$, and so $W(\mec d)=0$. \item if $f(\mec d - \mec e_1 - \mec e_2)=a$, then since $f$ is non-decreasing we have $f(\mec d-\mec e_i)=a$ for $i=1,2$, and hence $W(\mec d)=0$; \item if $f(\mec d - \mec e_1 - \mec e_2)=a-1$, then since $f$ is non-decreasing we have that for each $i=1,2$, $f(\mec d-\mec e_i)$ is either $a$ or $a-1$; this gives four cases to check, which imply \eqref{eq_W_is_one} and \eqref{eq_W_is_minus_one}. \end{enumerate} If $W$ never takes the value $-1$, then \eqref{eq_two_dim_row_sums} implies that for each $d_1$ there is a unique $d_2$ with $W(d_1,d_2)=1$, so setting $\pi(d_1)=d_2$ gives a map $\pi\from\integers\to\integers$; then \eqref{eq_two_dim_col_sums} implies that $\pi$ has an inverse. \end{proof} \begin{proof}[Proof of Theorem~\ref{th_two_vertices}] The rows of the Laplacian of $G$ are $(r,-r)$ and $(-r,r)$, and hence the image, $L$, of the Laplacian equals the integer multiples of $(r,-r)$. First let us prove that $f$ is supermodular by a case analysis: indeed, \begin{enumerate} \item if $f(\mec d)=0$, then $f(\mec d')=0$ for $\mec d'\le\mec d$ and hence $W(\mec d)=0$; \item if $f(\mec d)\ge 1$, then there is a path from $\mec d$ to $\cN$ as in \eqref{eq_f_distance_cN} of positive length through the points of $\integers^2$, and hence for some $i=1,2$ we have $f(\mec d-\mec e_i)=f(\mec d)-1$; then Proposition~\ref{pr_W_either_zero_one_minus_one} implies that $W(\mec d)\ge 0$. \end{enumerate} It follows that $W$ is a perfect matching, and hence $W$ is given by \eqref{eq_W_perfect_and_pi} for some perfect matching $\pi$; since $f$ is $r$-periodic, it suffices to determine $\pi(i)$ for $i=0,1,\ldots,r-1$. Let us do so by finding some values of $f$. Since $(0,0)\in L$, we have $f(0,0)=1$, and for all $i\ge 0$, $f(i,0)\ge 1$. But $(i,0)-\mec e_2$ cannot be effective for $i\le r-1$, since then for some $m\in\integers$ we would have $(i,-1)\ge m(r,-r)$, which implies both $m\le i/r<1$ and $m\ge 1/r>0$, which is impossible. Hence for $0\le i\le r-1$ we have $f(i,0)=1$. On the other hand, we can prove that for $i\ge 0$ we have $f(i,i)\ge i+1$, using induction on $i$: for $i=0$ we have $f(0,0)=1$, and for the inductive claim with $i\ge 1$, since $(i,i)$ is effective we have $$ f(i,i) = 1 + \max\bigl( f(i-1,i),f(i,i-1) \bigr) \ge 1+f(i-1,i-1)\ge 1+i $$ by the inductive hypothesis. For $0\le i\le r-1$, since $f(i,0)=1$ and $f(i,i)\ge i+1$, the fact that $f$ is slowly growing implies that $f(i,j)=j+1$ for $0\le j\le i$. Similarly, for such $i,j$ with $0\le i\le j$ , $f(i,j)=i+1$. Using this, it follows that for $i=0,\ldots,r-1$ we have $$ W(i,i) = f(i,i)-2 f(i,i-1) + f(i-1,i-1) = i - 2(i-1) + i-1 = 1. $$ It follows that $\pi(i)=i$ for $0\le i\le r-1$, and the theorem follows. \end{proof} Notice that this computation proves the Riemann-Roch formula in this case: this computation shows that $W=W^*_{\mec L}$ for $L=(r-1,r-1)$. Hence $f=f^{\wedge}_{\mec K}$ for $\mec K=(r-2,r-2)$, and therefore $$ f(\mec d) - f(\mec K-\mec d) = \deg(\mec d)+C $$ for some $C$. Taking $\mec d=0$ and using $f(0,0)=1$ we get $$ 1-f(\mec K)=C, $$ and taking $\mec d=\mec K$ we get $$ f(\mec K)-1 = \deg(\mec K)+C = 2(r-2)+C; $$ adding these last two equations, the $f(\mec K)$ cancels and we get $0=2(r-2)+2C$, and so $C=2-r$ is the offset. Hence $$ f(\mec d) - f(\mec K-\mec d) = \deg(\mec d)-r+2. $$ \section{The Weight of the Riemann-Roch Rank of the Complete Graph and Related Graphs} \label{se_completegraph} The point of this subsection is to give a self-contained computation of the remarkably simple and sparse weight function of the Baker-Norine rank for the complete graph. Our proof uses many standard ideas in the graph Riemann-Roch literature \cite{baker_norine,backman,amini_manjunath,cori_le_borgne}, but also one rather ingenious idea of Cori and Le Borgne \cite{cori_le_borgne}. \subsection{Proof Overview and Computer-Aided Computations} \label{su_first_and_second_coords_summary} Our analysis of the weights for the complete graph and the resulting formula of the Baker-Norine function is based on seeing some remarkable patterns in computer-aided computation. Explaining this also serves as an overview for our proofs below, and motivates the notation that we introduce. Let $G$ be a graph on $n$-vertices ordered $v_1,\ldots,v_n$. To compute the Baker-Norine function, $r_{\rm BN}$ of a graph (and the resulting weight, $W$), we note tht $r_{\rm BN}(\mec d)=-1$ if $\deg(\mec d)<0$; it suffices to compute $r_{\rm BN}(\mec d)$ on $\integers^n_{\deg 0}$, then on $\integers^n_{\deg 1}$, then $\integers^n_{\deg 2}$, etc. Since $r_{\rm BN}$ and $W$ are invariant under the image of the Laplacian, $\Delta_G$, it suffices to determine the value of $r_{\rm BN}$ on a set of representatives of $$ \Pic_i(G) = \integers^n_{\deg i}/{\rm Image}(\Delta_G) $$ for $i=0,1,\ldots$. To do so, it is natural to: find a set of ``convenient coordinates'' for $\Pic_0(G)=\integers^n_{\deg 0}/{\rm Image}(\Delta_G)$, meaning a set $\cB$ and a bijection $\iota\from\cB\to \Pic_0(G)$ such that the computations below are easy to do for $i=0,1,\ldots$, namely: \begin{enumerate} \item for all $\mec b\in\cB$, determine if $\iota(\mec b)+i\mec e_n$ is not effective, i.e., if $r_{\rm BN}(\iota(\mec b)+i\mec e_n)=-1$; and \item for all other $\mec b\in \cB$ we compute $r_{\rm BN}(\mec b+i\mec e_n)$ via the formula $$ r_{\rm BN}(b+i\mec e_n) = 1 + \min_{j\in[n]}\, r_{\rm BN}(\mec b+i\mec e_n-\mec e_j); $$ hence we need a reasonably fast algorithm to determine the element of $\cB$ that is equivalent to $\iota^{-1}(\mec b+\mec e_n-\mec e_j)$. [We are finished when $i\ge \deg(\mec L)$ where $\mec L=\mec K+\mec 1$ where $K$ is the Baker-Norine canonical divisor, and hence when $i\ge 2(|E|-|V|)+|V|=2|E|-|V|$; we may use $W=(-1)^nW^*_{\mec L}$ to finish when $i\ge |E|+(1-|V|)/2$.] \end{enumerate} Of course, one can replace $\mec e_n$ above by any of $\mec e_1,\ldots, \mec e_{n-1}$, or, more generally, any element of $\integers^n$ of degree $1$; our choice of $\mec e_n$ is convenient for the representatives of $\cB$ below. It turns out that there is a very convenient choice for $\cB$ suggested in \cite{cori_le_borgne}: namely, we give their proof that every element of $\integers^n$ is equivalent to a unique element of $\cA$ given by $$ \cA=\bigl\{ \mec a \ | \ a_1,\ldots,a_{n-2}\in\{0,\ldots,n-1\}, a_{n-1}=0 \bigr\}, $$ i.e., some element of the form $$ (a_1,\ldots,a_n) \in \cA=\{0,\ldots,n-1\}^{n-2}\times\{0\}\times \integers \subset\integers^n $$ The only problem is that the group law in $\Pic(K_n)$ is a bit tricky to write down, since if $\mec a,\mec a'\in\cA$, then the element of $\cA$ that is equivalent to $\mec a+\mec a'$ has, for all $i\le n-2$, its $i$-th coordinate equal to $(a_i+a_i')\bmod n$, but the $n$-th coordinate needs to take into account the number of $i$ such that $a_i+a_i'\ge n$. In other words, the addition law on the first $n-2$ coordinates of $\cA$ is that of $(\integers/n\integers)^{n-2}$ (and the $(n-1)$-th coordinate is always $0$), but addition on the $n$-th coordinate depends on the first $n-2$ coordinates; in other words, the addition law on $\cA$ induced by the law on $\Pic$ gives an isomorphism between $\cA$ and a semidirect product $(\integers/n\integers)^{n-2} \ltimes \integers$. Of course, since $\cA\subset\integers^n$, this type of complicated addition law cannot be helped: the order of any nonzero element of $\integers^n$ is infinite, whereas the order of each element in $\Pic_0$ is finite; hence if $\Pic_0$ is nontrivial (or, equivalently, $G$ is not a tree), then no set of representatives of $\Pic$ can have a simple addition law. To get a simpler addition law, we define a second set of coordinates: namely, we set $\cB=\{0,\ldots,n-1\}^{n-2}$, we define $\iota\from \cB\to\Pic_0$ via $$ \iota\mec b = \bigl(b_1,\ldots,b_{n-2},0,-b_1-\cdots-b_{n-2}\bigr)\in\integers^n_{\deg 0}. $$ In order to avoid writing $\iota$ all the time, for $(\mec b,i)\in\cB\times\integers$ we set $$ \langle \mec b,i \rangle = \iota(\mec b) + i \mec e_n, $$ which equals $$ \bigl(b_1,\ldots,b_{n-2},0,i-b_1-\cdots-b_{n-2} \bigr)\in\integers^n_{\deg i}. $$ Hence we leave the first $n-1$ coordinates as is in $\cA$, but we form $\langle \mec b,i\rangle$ to have degree $i$. In this way $$ \langle \mec b,i \rangle + \langle \mec b',i' \rangle $$ has degree $i+i'$, has $(n-1)$-th coordinate $0$, and has the first $n-2$ coordinates given by addition in $(\integers/n\integers)^{n-2}$; hence the addition law in $\Pic$ in the second coordinates $(\mec b,i)$, is just addition on $(\integers/n\integers)^{n-2}\times\integers$. The theorems we give below simply reflect the patterns that we saw, namely: we first noticed that the weights $W=\frakm r_{\rm BN}$ for the complete graph were very sparse, i.e., mostly $0$'s, and the non-zero values of $W$ followed a simple pattern. Then, since $$ \frakm = (1 - \frakt_1)\ldots(1-\frakt_n) $$ (recall that $\frakt_i$ is the ``downward shift operator'' given in \eqref{eq_frakt_def}), we tried computing some subset of the $1-\frakt_i$ applied to $r_{\rm BN}$ to find a simple pattern. After a number of unsuccessful attempts, we discovered that $(1-\frakt_{n-1})r_{\rm BN}$ had a remarkably simple pattern, namely that for small $n$, $$ (1-\frakt_{n-1})r_{\rm BN}\bigl(\langle \mec b,i \rangle\bigr) = \left\{ \begin{array}{ll} 1 & \mbox{if $b_1+\cdots+b_n\le i$} \\ 0 & \mbox{otherwise.} \end{array}\right. $$ From this one also easily sees the pattern $$ (1-\frakt_n) (1-\frakt_{n-1})r_{\rm BN}\bigl( \langle \mec b,i \rangle\bigr) = \left\{ \begin{array}{ll} 1 & \mbox{if $b_1+\cdots+b_n=i$} \\ 0 & \mbox{otherwise.} \end{array}\right. $$ The rest of this section is devoted to proving that these patterns above, which we observed for small $n$, indeed hold for all $n$. Our starting point for the proof requires some important techniques of \cite{cori_le_borgne}, which are more simply stated in terms of the representatives $\cA$ of $\Pic(K_n)=\integers^n/{\rm Image}(\Delta_{K_n})$ used by used in \cite{cori_le_borgne}. \subsection{Maximal Decrease} The following is a standard tool used in studying the graph Riemann-Roch rank, used by Baker-Norine \cite{baker_norine} and many subsequent papers. It is valid in the general setting of \eqref{eq_f_distance_cN} when $\cN$ is a downset. Recall from Definition~\ref{de_slowly_growing_and_periodic} that $f\from\integers^n\to\integers$ if for all $j\in[n]$ and $\mec d\in\integers^n$ we have $$ f(\mec d)\le f(\mec d+\mec e_j) \le f(\mec d)+1. $$ If so, an easy induction argument (on $\deg(\mec d-\mec d')$) shows that if $\mec d',\mec d\in\integers^n$ with $\mec d'\le\mec d$, then \begin{equation}\label{eq_maximally_decreasing} f(\mec d') \ge f(\mec d) - \deg(\mec d-\mec d'). \end{equation} \begin{definition} Let $f\from\integers^n\to\integers$ be slowly growing. Let $\mec d',\mec d\in\integers^n$ with $\mec d'\le\mec d$. We say that {\em $f$ is maximally decreasing from $\mec d$ to $\mec d'$} if equality holds in \eqref{eq_maximally_decreasing}, or equivalently $$ f(\mec d) = f(\mec d') + \deg(\mec d-\mec d'). $$ \end{definition} The following is Lemma~5 of \cite{cori_le_borgne}, but is used in most papers we have seen involving the Baker-Norine rank, e.g., \cite{baker_norine,backman,amini_manjunath}. \begin{proposition} Let $f\from\integers^n\to\integers$ be slowly growing. Then for any $\mec d'',\mec d',\mec d\in\integers^n$, $f$ is maximally decreasing from $\mec d$ to $\mec d''$ iff it is maximally decreasing from both $\mec d$ to $\mec d'$ and from $\mec d'$ to $\mec d''$. \end{proposition} The proof is immediate from the fact that the two inequalities \begin{align*} f(\mec d) - f(\mec d') & \le \deg(\mec d-\mec d'), \\ f(\mec d') - f(\mec d'') & \le \deg(\mec d'-\mec d'') \end{align*} both hold with equality iff their sum does, and their sum is $$ f(\mec d) - f(\mec d'') \le \deg(\mec d-\mec d') . $$ We remark that $f$ is slowly growing whenever it is of the form \eqref{eq_f_distance_cN} where $\cN$ is a downset such that $\integers^n_{\deg \le m}\subset \cN$ for some $m$ (so that $f$ takes on finite values). We also remark that in this case $\mec d\in\integers^n$, and $\mec d''\in\cN$ is such that $$ \| \mec d-\mec d''\| = \min_{\mec d'\in\cN} \| \mec d-\mec d'\|, $$ then $f$ is maximally decreasing from $\mec d$ to $\mec d''$. \subsection{A Generalization of a Fundamental Lemma of Cori and Le Borgne} Next we give an elegant and rather ingenious observation of \cite{cori_le_borgne} (half of the proof of Proposition~10 there) that is the starting point of their (and our) study the Baker-Norine rank for the complete graph; we state their observation in slightly more general terms. \begin{lemma}\label{le_cori_borgne_generalization} Fix $n\in\naturals$, and let $K_n=(V,E)$ be the complete graph on vertex set $V=[n]$, i.e., $E$ consists of exactly one edge joining any two distinct vertices. Consider the Baker-Norine rank $r_{\rm BN}\from\integers^n\to\integers$ on $K_n$. If $\mec a\ge\mec 0$ then \begin{equation}\label{eq_strictly_decreasing_a_when_zero} a_{n-1}=0 \quad\implies\quad r_{\rm BN}(\mec a) =r_{\rm BN}(\mec a-\mec e_{n-1})+1. \end{equation} \end{lemma} Of course, by symmetry \eqref{eq_strictly_decreasing_a_when_zero} holds with both occurrences of $n-1$ replaced by any $j\in[n]$. \begin{proof} Since $\mec a\ge\mec 0$, $r_{\rm BN}(\mec a)\ge 0$, and hence $r_{\rm BN}$ is maximally decreasing from $\mec a$ to $\mec a-\mec b$ for some $\mec b\ge\mec 0$ with $r_{\rm BN}(\mec a-\mec b)=-1$. Since $r_{\rm BN}(\mec a-\mec b)=-1$, we must have $a_j-b_j\le -1$ for some $j\in[n]$; fix any such $j$. Then $b_j\ge a_j+1\ge 1$; setting $\mec a'=\mec a-b_j\mec e_j$ we have $$ \mec a - \mec b \le \mec a' \le \mec a, $$ and hence $r_{\rm BN}$ is maximally decreasing from $\mec a$ to $\mec a'$. But the vector \begin{equation}\label{eq_mec_a_prime_prime} \mec a''=\mec a-a_j\mec e_j - (b_j-a_j)\mec e_{n-1} \end{equation} is merely the vector $\mec a'$ followed by an exchange of the $(n-1)$-th and $j$-th coordinates (if $j=n-1$, then $\mec a''=\mec a'$). Hence $\mec a'',\mec a'$ have the same degree and same value of $r_{\rm BN}$; hence $f$ is also maximally decreasing from $\mec a$ to $\mec a''$. Since $b_j-a_j\ge 1$, \eqref{eq_mec_a_prime_prime} implies $$ \mec a''\le\mec a-\mec e_{n-1}\le \mec a ; $$ since $f$ is maximally decreasing from $\mec a$ to $\mec a''$, $f$ is maximally decreasing from $\mec a$ to $\mec a-\mec e_{n-1}$ as well, and hence \eqref{eq_strictly_decreasing_a_when_zero} holds. \end{proof} \begin{remark}\label{re_cori_leborne_generalized} If $n,m\in\naturals$, we use $K_n^m=(V,E)$ to denote the graph with $V=[n]$ and $m$ edges between any two vertices (so $K_n^1=K_n$). Then $r_{{\rm BN},K_n^m}(\mec d)$ is again a symmetric function of its variables $(d_1,\ldots,d_n)=\mec d$, and the same argument shows that for any $b\in\integers_{\ge 0}$, $\mec a\ge b\mec 1$ and $a_{n-1}=b$ implies that $f(\mec d)=f(\mec d-\mec e_{n-1})+1$. We believe it is possible to use this observation, specifically for $b=m$, to give an analog of Theorem~\ref{th_complete_graph_sigma_n_minus_one} below regarding $K_n^m$. \end{remark} \subsection{The First Coordinates for Pic, D'apr\`es Cori-Le Borgne} Let us recall some more standard graph Riemann-Roch terminology (see, e.g., \cite{baker_norine,cori_le_borgne}, and then give our first set of coordinates for the {\em Picard group} of a graph. These coordinates are those found in the Algorithm at the end of Section~2.1 of \cite{cori_le_borgne}. Recall $\integers^n_{\deg i}$ consists of the elements of $\integers^n$ of degree $i$. Recall \cite{baker_norine} the {\em Picard group} of a graph, $G$, with $n$ vertices $v_1,\ldots,v_n$ is defined as $$ \Pic(G) = \integers^n/{\rm Image}(\Delta_G); $$ since ${\rm Image}(\Delta_G)$ consists entirely of vectors of degree $0$, $\Pic(G)$ is the union over $i\in\integers$ of \begin{equation}\label{eq_pic_i_def} \Pic_i(G) = \integers^n_{\deg i} / {\rm Image}(\Delta_G). \end{equation} It is known that for all $i$, $|\Pic_i(G)|$ equals $(1/n)\det'(\Delta_G)$, where $\det'$ denotes the product of the nonzero eigenvalues of $\Delta_G$ (and Kirchoff's theorem says that this is the number of unrooted spanning trees of $G$). For $G=K_n$ it is a standard fact that this number of trees is $n^{n-2}$, i.e., \begin{equation}\label{eq_Pic_zero_size_K_n} |\Pic_i(K_n)| = n^{n-2}. \end{equation} Next we pick a convenient set of representatives for each class in $\integers^n/{\rm Image}(\Delta_{K_n})$. \begin{notation}\label{no_first_coordinates} For any $n\in\naturals$, we let \begin{equation}\label{eq_cA_pic_rep} \cA=\cA(n) =\{ \mec a\in\integers^n \ | \ a_1,\ldots,a_{n-2}\in\{0,\ldots,n-1\}, a_{n-1}=0 \} \end{equation} $$ =\{0,\ldots,n-1\}^{n-2}\times\{0\}\times\integers $$ (we usually simply write $\cA$ since $n$ will be understood and fixed); in addition, for $i\in\integers$, we use $\cA_{\deg i}$ to denote the set $$ \cA_{\deg i} \eqdef \cA\cap\integers^n_{\deg i}=\{\mec a\in\cA\ | \ \deg(\mec a)=i\}. $$ \end{notation} In the above notation, note that $$ \mec a \in\cA_{\deg i} \quad \iff \quad a_n = i - a_1-\cdots-a_{n-2} $$ and hence \begin{align} \label{eq_a_n_and_sum_versus_i_ge} \mec a \in\cA_{\deg i} \ \implies\ & \Bigl( a_n \ge 0 \iff a_1+\cdots+a_{n-2} \le i \Bigr) \\ \label{eq_a_n_and_sum_versus_i_eq} \mec a \in\cA_{\deg i} \ \implies\ & \Bigl( a_n = 0 \iff a_1+\cdots+a_{n-2} = i \Bigr) \end{align} \begin{lemma}\label{le_first_coordinates} Fix $n\in\naturals$, and let $K_n=(V,E)$ be the complete graph on vertex set $V=[n]$. Then for all $\mec d\in\integers^n$ there exists a unique $\mec a\in\cA=\cA(n)$ with $\mec d\sim\mec a$ (i.e., $\mec d-\mec a\in{\rm Image}(\Delta_{K_n})$), given by: for $j\in[n-2]$, $a_j=(d_j-d_{n-1})\bmod n$, i.e., $a_j$ is the element of $\{0,\ldots,n-1\}$ congruent to $d_j-d_{n-1}$ modulo $n$, $a_{n-1}=0$, and $a_n=\deg(\mec d)-a_1-\cdots-a_{n-2}$. \end{lemma} \begin{proof} Existence is shown in ``Algorithm'' at the end of Section~2.1 of \cite{cori_le_borgne}: we note that the image of $\Delta_G$ contains $(1,\ldots,1,1-n)$ and, for any $j\in[n]$, $n(\mec e_j-\mec e_n)$. For any $\mec d$ we get an equivalent vector with $(n-1)$-th coordinate $0$ by subtracting multiples of $(1,\ldots,1,1-n)$; then we find an equivalent vector with the first $n-2$ coordinates between $0$ and $n-1$ by subtracting multiples of $n(\mec e_j-\mec e_n)$ for $j\in[n-2]$. Note that the above algorithm determines a map $\mu\from\integers^n\to\cA$ that such that \begin{equation}\label{eq_mu_takes_mec_d_to_equivalent} \forall\mec d\in\integers^n,\quad \mec d\sim \mu(\mec d) , \end{equation} i.e., $\mec d$ and $\mu(\mec d)$ are equivalent modulo ${\rm Image}(K_n)$. To prove that each $\mec d$ is equivalent to a unique element of $\cA$, we need to show that if $\mec a,\mec a'\in\cA$ are equivalent, i.e., $\mec a-\mec a'\in{\rm Image}(\Delta_{K_n})$, then we must have $\mec a=\mec a'$. Note that if $\mec a,\mec a'$ are equivalent, then they have the same degree and hence both lie in $\cA_{\deg i}$ for the same $i$. Hence it suffices to show that each element of $\cA_{\deg i}$ is in a distinct class of $\Pic_i(K_n)$. Let us rephrase this condition. Note that since $\cA_{\deg i}\subset\integers^n_{\deg i}$, the quotient map $$ \integers^n_{\deg i}\to \integers^n_{\deg i}/{\rm Image}(\Delta_{K_n}) = \Pic_i(K_n) $$ restricts to a map $$ \nu_i\from\cA_{\deg i}\to\Pic_i(K_n) . $$ To show that each element of $\cA_{\deg i}$ is in its own class of $\Pic_i(K_n)$ simply means that $\nu_i$ is injective. Let us prove this. So fix an $i\in\integers$. Choosing a set of representatives, $\cP_i\subset\integers^n_i$ for $\Pic_i$; in view of \eqref{eq_mu_takes_mec_d_to_equivalent}, $\mu$ restricted to $\cP_i$ gives a map of sets $\mu|_{\cP_i}\from\cP_i\to\cA_{\deg i}$ that takes each element in the domain to a vector equivalent to it; hence this gives a map of sets $\mu_i\from \Pic_i\to\cA_{\deg i}$ such that $\mu_i$ takes each $p\in \Pic_i$ to an element that lies in $p$. It follows that the map $\nu_i\mu_i$ is the identity map on $\Pic_i$. But we easily see that $\cA_{\deg i}$ has size $n^{n-2}$, since if $\mec a=(a_1,\ldots,a_n)\in\cA_{\deg i}$ then $a_1,\ldots,a_{n-2}\in\{0,\ldots,n-1\}$, and any $a_1,\ldots,a_{n-2}\in\{0,\ldots,n-1\}$ determine the values of $a_{n-1},a_n$, namely $$ a_{n-1}=0,\quad a_n = i-a_1-\cdots-a_{n-2}. $$ Since $\nu_i\mu_i$ is the identity map on $\Pic_i$, and this map factors through the set $\cA_{\deg i}$ of the same size, both $\nu_i$ and $\mu_i$ must be bijections. Hence $\nu_i$ is an injection, which proves the desired uniqueness property. \end{proof} Here is how we often use the above theorem. \begin{corollary} Fix an $n\in\naturals$. For each $i\in\integers$, $\cA_{\deg i}$ is a set of representatives of the classes $\Pic_i(K_n)$ in $\integers^n_{\deg i}$. Similarly, for any $\mec d\in\integers^n$, as $\mec a$ ranges over $\cA_{\deg i}$, $\mec a-\mec d$ ranges over a set of representatives of $\cA_{\deg i'}$ where $i'=i-\deg(\mec d)$. \end{corollary} \subsection{An Intermediate Weight Calculation: $(1-\frakt_{n-1})r_{\rm BN}$} In this section we prove that the pattern we noticed in computer-aided calculation for small values of $n$ can be proved to hold for all $n$. \begin{theorem}\label{th_complete_graph_sigma_n_minus_one_intermediate} Fix $n\in\naturals$, and let $K_n=(V,E)$ be the complete graph on vertex set $V=[n]$. Consider the Baker-Norine rank $r_{\rm BN}\from\integers^n\to\integers$ on $K_n$. For any $\mec a\in\cA_{\deg i}$, \begin{equation}\label{eq_generalize_cori_le_borgne} a_1+\cdots+a_{n-2}\le i \ \iff\ a_n\ge 0 \ \iff\ r_{\rm BN}(\mec a)=r_{\rm BN}(\mec a-\mec e_{n-1})+1. \end{equation} \end{theorem} We remark that \eqref{eq_generalize_cori_le_borgne} generalizes Proposition~10 of \cite{cori_le_borgne}. \begin{proof} For all $\mec a\in\cA$, $\mec a\ge \mec 0$ iff $a_n\ge 0$, since all other coordinates of $\mec a$ are non-negative. For $\mec a\in\cA_{\deg i}$, in view of \eqref{eq_a_n_and_sum_versus_i_ge} when get $$ \mec a\ge \mec 0 \ \iff a_n\ge 0\ \iff\ a_1+\cdots+a_{n-2}\le i. $$ Hence Lemma~\ref{le_cori_borgne_generalization} implies that for $\mec a\in\cA_{\deg i}$, \begin{equation}\label{eq_implies_decrease_n_minus_one} a_1+\cdots+a_{n-2} \le i \quad\implies\quad r_{\rm BN}(\mec a)=r_{\rm BN}(\mec a-\mec e_{n-1})+1 . \end{equation} We now prove the reverse implication by, roughly speaking, giving a calculation that shows that there is ``no more room'' for $r_{\rm BN}(\mec a)-r_{\rm BN}(\mec a-\mec e_i)$ to be $1$ otherwise, given that we know the offset of $1+r_{{\rm BN},K_n}$. Let us make this precise. For any $i\in\integers$, let $$ M_i = \bigl| \{ \mec a\in\cA_{\deg i} \ |\ r_{\rm BN}(\mec a)= r_{\rm BN}(\mec a-\mec e_{n-1}) + 1 \} \bigr| $$ and let $$ N_i = \bigl| \{ \mec a\in\cA_{\deg i} \ | \ a_1+\cdots+a_{n-2}\le i \} \bigr| . $$ Then \eqref{eq_implies_decrease_n_minus_one} implies $M_i\ge N_i$, and \eqref{eq_generalize_cori_le_borgne} holds provided that we can show $M_i=N_i$ for all $i$. Since $\mec a\in\cA$ implies that $a_1,\ldots,a_{n-2}\ge 0$, it follows that for $i\le -1$ we have $M_i=N_i=0$; similarly, since $a_1,\ldots,a_{n-2}\le n-1$ for $\mec a\in\cA$, we have $a_1+\cdots+a_{n-2}\le (n-1)(n-2)$; hence for $i\ge n(n-2)$ we have $$ a_1+\cdots+a_{n-2} \le n(n-2) \le i, $$ and hence for such $i$ we have $N_i=|\Pic_i|=n^{n-2}$, and hence $M_i=n^{n-2}$ as well. Our strategy will be to show that for sufficiently large $\ell\in\naturals$ we have $$ M_0+\cdots+M_\ell = N_0+\cdots+N_\ell; $$ if so, then the inequalities $M_i\ge N_i$ must hold with equality (i.e., there is ``no room'' for some $N_i$ to be strictly smaller than $M_i$). Let us take a large $\ell\in\naturals$; and consider $M_0+\cdots+M_\ell$: for each $\mec a\in \cA_{\deg\ell}$ we have $r_{\rm BN}(\mec a)=\ell-g$ and $r_{\rm BN}\bigl(\mec a-\mec e_{n-1}(\ell+1)\bigr)=-1$, and hence \begin{equation}\label{eq_cancelling_r_BN_diff} \sum_{i=0}^{\ell} \bigl( r_{\rm BN}(\mec a-i\mec e_{n-1}) - r_{\rm BN}(\mec a-(i+1)\mec e_{n-1}) \bigr) = r_{\rm BN}(\mec a) - r_{\rm BN}\bigl(\mec a-\mec e_{n-1}(\ell+1)\bigr) = \ell - g + 1. \end{equation} But for all $j$, $\cA_j$ is a set of $\Pic_j$ representatives; hence for fixed $i$, as $\mec a$ varies over $\cA_\ell$, and $\mec a-i\mec e_n$ varies over a set of $\Pic_{\ell-i}$ representatives; hence \begin{align*} \sum_{\mec a\in \cA_\ell} \bigl( r_{\rm BN}(\mec a-i\mec e_{n-1}) &- r_{\rm BN}(\mec a-(i+1)\mec e_{n-1}) \bigr) \\ &= \sum_{p\in \Pic_{\ell-i}} \bigl( r_{\rm BN}(p) - r_{\rm BN}(p-\mec e_{n-1}) \bigr) \\ &= \sum_{\mec a'\in \cA_{\ell-i}} \bigl( r_{\rm BN}(\mec a') - r{\rm BN}(\mec a'-\mec e_{n-1}) \bigr) \\ &= M_{\ell-i} \end{align*} (since $r_{\rm BN}(\mec a')-r_{\rm BN}(\mec a'-\mec e_{n-1})$ is either $0$ or $1$, and $M_{\ell-i}$ counts the total number equal to $1$). Hence summing \eqref{eq_cancelling_r_BN_diff} over all $\mec a\in \cA_\ell$ we get \begin{equation}\label{eq_sum_of_M_is} M_\ell+M_{\ell-1}+\cdots+M_0 = n^{n-2} (\ell-g+1). \end{equation} Next consider $N_0+\cdots+N_\ell$ for $\ell$ large: note that for all $(a_1,\ldots,a_{n-2})\in \{0,\ldots,n-1\}^{n-2}$ and $i\in\integers$, we have \begin{align*} \mbox{either}\quad a_1+\cdots+a_{n-2} & \le i \\ \mbox{or}\quad a_1+\cdots+a_{n-2} & \ge i+1 \end{align*} (i.e., exactly one of the two inequalities above holds), and hence \begin{align*} \mbox{either}\quad a_1+\cdots+a_{n-2} & \le i \\ \quad\mbox{or}\quad (n-1-a_1)+\cdots+(n-1-a_{n-2}) & \le (n-1)(n-2)-i-1. \end{align*} Since $(a_1,\ldots,a_{n-2})\mapsto (n-1-a_1,\ldots,n-1-a_{n-2})$ is a bijection of $\{0,\ldots,n-1\}^{n-2}$ to itself, it follows that for all $i$ and all $a_1,\ldots,a_{n-2}\in\{0,\ldots,n-1\}$, either $(a_1,\ldots,a_{n-2})\in\{0,\ldots,n-1\}^{n-2}$ is counted once either in $N_i$, or $(n-1-a_1,\ldots,n-1-a_{n-2})$ is counted once in $N_{(n-2)(n-1)-i-1}$; hence $$ N_i+N_{(n-2)(n-1)-i-1}=n^{n-2}. $$ Hence for all $i\in\integers$ we have $$ N_0+\cdots+N_{(n-2)(n-1)-1} = \frac{(n-2)(n-1)n^{n-2}}{2}, $$ and for $\ell\ge (n-1)(n-2)-1$ we have \begin{align*} N_0+\ldots+N_\ell =& \frac{(n-2)(n-1)n^{n-2}}{2} + n^{n-2}\bigl(\ell-(n-1)(n-2)+1\bigr) \\ =& n^{n-2} \left( \frac{(n-1)(n-2)}{2} + \ell - (n-1)(n-2)+1 \right) \\ =& n^{n-2}(\ell-g+1), \end{align*} in view of the fact that $$ g = 1+|E|-|V|=1+\frac{n(n-1)}{2}-n=\frac{2+n^2-n-2n}{2}=\frac{(n-1)(n-2)}{2}. $$ Hence, from \eqref{eq_sum_of_M_is} we have $$ N_0+\ldots+N_\ell = n^{n-2} (\ell-g+1) = M_0+\cdots+M_\ell $$ for $\ell$ large. But since $M_i\ge N_i$ for all $i$, we must have $N_i=M_i$ for all $0\le i\le \ell$; hence $N_i=M_i$ for all $i$. \end{proof} \subsection{A New Rank Formula for the Complete Graph and an Algorithm} Cori and Le Borgne \cite{cori_le_borgne} (after Proposition~6, bottom of page~9 and in \cite{cori_le_borgne2},Proposition~13) describe an $O(n)$ algorithm that computes $r_{\rm BN}(\mec d)$ for the complete graph $K_n$. Also, they show that when $\mec d$ is a {\em sorted parking configuration}, meaning that $0\le d_i<i$ for $i<n$ and $d_1\le d_2\le \cdots\le d_{n-1}$ (and $d_n$ is unconstrained), they show (see Theorem~12 \cite{cori_le_borgne2}) that setting $$ q = \lfloor (d_n+1)/(n-1) \rfloor , \quad r = (d_n+1) \bmod (n-1) $$ one has $$ r_{\rm BN}(\mec d) = -1 + \sum_{i=1}^n \max\Bigl(0, q - i + 1 + d_i+ \chi\bigr( i\le r \bigr) \Bigr), $$ where $\chi(P)$ is $1$ if $P$ is true, and $0$ if $P$ is false. Here we give another formula for the rank, perhaps related to the above formula; by contrast, our formula holds for $\mec a\in\cA$, but easily generalizes to all $\mec d\in\integers^n$. The formula is a corollary to Theorem~\ref{th_complete_graph_sigma_n_minus_one_intermediate}. \begin{corollary}\label{co_complete_graph_rank_formula} Let $n\in\integers$, and $\cA$ be as in \eqref{eq_cA_pic_rep}. For any $\mec a\in\cA$ we have \begin{equation}\label{eq_f_complete_graph} r_{{\rm BN},K_n}(\mec a) = -1+\biggl| \biggl\{ i=0,\ldots,\deg(\mec a) \ \biggm| \ \sum_{j=1}^{n-2} \bigl( (a_j+i) \bmod n \bigr) \le \deg(\mec a)-i \biggr\} \biggr|. \end{equation} In particular, for any $\mec d\in\integers^n$ we have \begin{equation}\label{eq_f_complete_graph_mec_d_unconstrained} r_{{\rm BN},K_n}(\mec d) = -1+\biggl| \biggl\{ i=0,\ldots,\deg(\mec d) \ \biggm| \ \sum_{j=1}^{n-2} \bigl( (d_j-d_{n-1}+i) \bmod n \bigr) \le \deg(\mec d)-i \biggr\} \biggr|. \end{equation} \end{corollary} \begin{proof} Since $\mec a - (\deg(\mec a)+1) e_{n-1}$ has negative degree, we have \begin{equation}\label{eq_r_BN_telescoping_sum} \sum_{i=0}^{\deg(\mec a)} \Bigl( r_{\rm BN}(\mec a - i \mec e_{n-1}) - r_{\rm BN}(\mec a - (i+1) \mec e_{n-1}) \Bigr) = r_{\rm BN}(\mec a) - (-1). \end{equation} According to Theorem~\ref{th_complete_graph_sigma_n_minus_one_intermediate}, for a fixed $i$, $$ r_{\rm BN}(\mec a - i \mec e_{n-1}) - r_{\rm BN}(\mec a - (i+1) \mec e_{n-1}) $$ equals $1$ or $0$ according to whether or not the unique $\mec a'\in\cA$ that is equivalent to $\mec a-i\mec e_{n-1}$ satisfies \begin{equation}\label{eq_mec_a_prime_condition} a_1'+\cdots+a_{n-2}' \le \deg(\mec a'). \end{equation} According to Lemma~\ref{le_first_coordinates}, since the $(n-1)$-th component of $\mec a-i\mec e_{n-1}$ is $-i$, $\mec a'$ is given as $$ \forall j\in [n-2], \quad a_j' = (a_j+i) \bmod n, $$ and ($a_{n-1}'=0$) and $\deg(\mec a')=\deg(\mec a)-i$. Hence \eqref{eq_mec_a_prime_condition} holds iff $$ \sum_{j=1}^{n-2} \bigl( (a_j+i) \bmod n \bigr) \le \deg(\mec a)-i. $$ Hence, in view of \eqref{eq_r_BN_telescoping_sum} we have \eqref{eq_f_complete_graph}. To prove \eqref{eq_f_complete_graph_mec_d_unconstrained}, we note that any $\mec d\in\integers^n$ is equivalent to $\mec a\in\cA$, where $$ a_j = (d_j-d_{n-1})\bmod n $$ for $j\le n-2$, and $\deg(\mec a)=\deg(\mec d)$. \end{proof} \begin{remark} In the proof above we are making use of the fact that if $f\from\integers^n\to\integers$ is any function that is initially equal to a constant, then then $$ f(\mec d) = \Bigl( \bigl( (1-\frakt) + (1-\frakt_{n-1})\frakt_{n-1} + (1-\frakt_{n-1})\frakt_{n-1}^2 + \cdots \bigr) f \Bigr)(\mec d) $$ where the right-hand-side represents a finite sum, since for any fixed $\mec d$, for sufficiently large $m\in\naturals$ we have $$ \bigl( (1-\frakt_{n-1})\frakt_{n-1}^m f \bigr) (\mec d) = 0. $$ One can similarly write, for any $i\in[n]$, $$ (1-\frakt_i)^{-1} = 1 + \frakt_i + \frakt_i^2 + \cdots $$ with the right-hand-side representing a finite sum when applied to an initially vanishing function $f$ at any given value $\mec d$. It follows that if $f,f'$ are initially zero, then \begin{equation}\label{eq_inverse_one_minus_frakt_i} (1-\frakt_i)f=h \quad\iff\quad f=(1+ \frakt_i + \frakt_i^2 + \cdots)h. \end{equation} At times one of the two conditions above is easier to show that the other, at times not. For example, Theorem~\ref{th_complete_graph_sigma_n_minus_one_intermediate} above gives us a formula for $f=(1-\frakt_{n-1}) r_{\rm BN}$ over $\mec a\in\cA$; in Theorem~\ref{th_complete_graph_with_frakt_n} we determine $h=(1-\frakt_n)f$, but it is just as easy to apply either side of \eqref{eq_inverse_one_minus_frakt_i} with $i=n$. On the other hand, to compute the weight of $r_{\rm BN}$ in Theorem~\ref{th_complete_graph_sigma_n_minus_one}, with $h$ as above and $$ W = (1-\frakt_1)\ldots (1-\frakt_{n-2}) h, $$ the above formula seems easier to verity than the equivalent $$ h = (1+\frakt_1+\frakt_1^2+\cdots)\ldots (1+\frakt_{n-2}+\frakt_{n-2}^2+\cdots) W. $$ \end{remark} Next we briefly give a linear time algorithm to compute $r_{\rm BN}$ of the complete graph based on \eqref{eq_f_complete_graph} or \eqref{eq_f_complete_graph_mec_d_unconstrained} in Corollary~\ref{co_complete_graph_rank_formula}. First, for simplicity, take an arbitrary $\mec d\in\integers^n$ and note that the equivalent $\mec a\in\cA$ has $a_i=(d_i-d_{n-1})\bmod n$ for $i\le n-2$ and $\deg(\mec a)=\deg(\mec d)$. Hence it suffices to show how to compute \eqref{eq_f_complete_graph} with $\mec a\in\cA$. Setting $$ g(i)=\sum_{j=1}^{n-2} \bigl( (a_j+i) \bmod n \bigr) $$ we have that $g(i+n)=g(i)$ for all $i$, and \begin{equation}\label{eq_convenient_way_to_compute_g} g(i) = - m_i n + \sum_{j=1}^{n-2} a_j , \end{equation} where $m_i$ is the number of $j\in[n-2]$ such that $a_j + i \ge n$, i.e., with $a_j\ge n-i$. Next, we claim that we can compute $m_0,\ldots,m_{n-1}$ in linear time: indeed, by a single pass through $a_1,\ldots,a_{n-2}$, one can count for each $k=1,\ldots,n-1$ the number, $$ m'_k = \bigl| \{ j\in[n-2] \ | \ a_j=k \} \bigr|, $$ i.e., the number of $j$ for which $a_j=k$; then one computes $m_0,\ldots,m_{n-1}$ by setting $m_0=0$ and for $k=1,\ldots,n-1$ setting $m_k=m'_{n-k}+m_{k-1}$. Once we compute $m_0,\ldots,m_{n-1}$, we can compute $g(0),\ldots,g(n-1)$ in linear time by computing $\sum_j a_j$ (once) and then applying \eqref{eq_convenient_way_to_compute_g} for each $i=0,\ldots,n-1$. Now note that for $k=\{0,\ldots,n-1\}$, we have that for any $i\in\{0,\ldots,\deg(\mec a)\}$ with $i\bmod n=k$, we have $g(i)=g(k)$, and hence the condition $$ \sum_{j=1}^{n-2} \bigl( (a_j+i) \bmod n \bigr) \le \deg(\mec a)-i $$ is equivalent to $$ i+ g(k) \le \deg(\mec a) , $$ and hence the number of such $i$, for $k$ fixed, is $$ \Bigl\lfloor \bigl( \deg(\mec a)-g(k)+n\bigr)/n \Bigr\rfloor. $$ Hence one can write $$ r_{\rm BN}(\mec a) = -1 + \sum_{k=0}^{n-1} \Bigl\lfloor \bigl( \deg(\mec a)-g(k)+n\bigr)/n \Bigr\rfloor, $$ which completes an $O(n)$ time algorithm to compute $r_{\rm BN}$. \subsection{The Second Coordinates for Pic} To complete our computation of the weight of $r_{\rm BN}$ of the complete graph, we use a new set of coordinates. As explained in Subsection~\ref{su_first_and_second_coords_summary}, the second coordinates turn out to represent Pic as a product \begin{equation}\label{eq_semidirect} {\rm Pic} = (\integers/n\integers)^{n-2} \times \integers . \end{equation} \begin{notation}\label{no_second_coordinates} For any $n\in\naturals$ and $i\in\integers$, we use \begin{enumerate} \item $\cB=\cB(n)$ to denote the set $\{0,\ldots,n-1\}^{n-2}$ (and usually we just write $\cB$ since $n$ will be fixed); and \item for any $\mec b \in\cB$ and $i\in\integers$, we use $\langle \mec b,i \rangle$ to denote \begin{equation}\label{eq_define_b_pic_coords} \langle \mec b,i \rangle = (b_1,\ldots,b_{n-2},0,i-b_1-\cdots-b_{n-2}) \in \cA_{\deg i} \subset\integers^n_{\deg i}\subset\integers^n . \end{equation} \item if $\mec c\in\integers^{n-2}$, we use $\mec c \bmod n$ to denote the component-wise application of $\bmod n$, i.e., $$ \mec c \bmod n = \bigl( c_1 \bmod n,\ldots,c_{n-2}\bmod n \bigr) \in \cB=\{0,\ldots,n-1\}^{n-2}. $$ \end{enumerate} \end{notation} \begin{definition} For fixed $n\in\integers$, we refer to $\cB=\cB(n)$ and the map $\cB\times\integers\to\integers^n$ in \eqref{eq_define_b_pic_coords} as the {\em second coordinates} of $\Pic(K_n)$ representatives. \end{definition} \begin{proposition} Let $n\in\naturals$, and let notation be as in Notation~\ref{no_first_coordinates} and~\ref{no_second_coordinates}. Consider the complete graph, $K_n$, and equivalence modulo ${\rm Image}(\Delta_{K_n})$. Then: \begin{enumerate} \item for each $\mec b\in\cB$ and $i\in\integers$, $$ \langle (b_1,\ldots,b_{n-2}),i \rangle = (a_1,\ldots,a_n), $$ where $$ a_1=b_1,\ \ldots,\ a_{n-2}=b_{n-2}, \ a_{n-1}=0, $$ and $$ a_n = i - b_1 -\cdots - b_{n-2}. $$ \item For all $i\in \integers$, the set $\cB\times \{i\}$ is taken via $\langle \cdot,\cdot\rangle$ bijectively to $\cA_{\deg i}$, and hence to a set of representatives of $\Pic_i$. \item For all $i\in\integers$, each $\mec d\in\integers^n_{\deg i}$ is equivalent to a unique element of the form $\langle \mec b,i\rangle$ with $\mec b\in\cB$, namely with $$ \mec b = \bigl(d_1-d_{n-1},\ldots, d_{n-2}-d_{n-1} \bigr) \bmod n, $$ where $\bmod\ n$ is the component-wise application of $\bmod\ n$, i.e., $b_i = (d_i-d_{n-1})\bmod n\in\{0,\ldots,n-1\}$. \item For any $\mec b,\mec b'\in\cB=\{0,\ldots,n-1\}^{n-2}$ and any $i,i'\in\integers$, we have $$ \langle \mec b,i \rangle + \langle \mec b',i' \rangle \sim \langle (\mec b+\mec b')\bmod n, i+i' \rangle. $$ Similarly for subtraction, i.e., with $-$ everywhere replacing $+$. \end{enumerate} \end{proposition} \begin{proof} (1)~is immediate from the notation. (2)~follows from~(1). (3)~follows from~(1) and Lemma~\ref{le_first_coordinates}. (4)~follows from(3). \end{proof} \begin{example}\label{ex_second_coordinates_standard_basis_vectors} Applying the above proposition, we see that \begin{equation}\label{eq_e_i_in_second_coordinates} \mec e_1 \sim \langle \mec e_1,1\rangle, \ \ldots, \mec e_{n-2} \sim \langle \mec e_{n-2},1\rangle, \ \mec e_{n-1} \sim \langle (n-1)\mec 1,1\rangle, \ \mec e_n \sim \langle \mec 0,1 \rangle, \end{equation} where we use $\mec e_i$ to denote the vector in $\integers^n$ or in $\integers^{n-2}$, as appropriate. Moreover, equality holds in all the above, except for $\mec e_{n-1}$, where $$ \mec e_{n-1} \sim \langle (n-1)\mec 1,1\rangle = \bigl( n-1,\ldots,n-1,0,1-(n-2)(n-1) \bigr) . $$ \end{example} \subsection{Computation of $(1-\frakt_n)(1-\frakt_{n-1})r_{\rm BN}$} \begin{theorem}\label{th_complete_graph_with_frakt_n} Fix $n\in\naturals$, and let $K_n=(V,G)$ be the complete graph on vertex set $V=[n]$, i.e., $E$ consists of exactly one edge joining any two distinct vertices. Consider the Baker-Norine rank $r_{\rm BN}\from\integers^n\to\integers$ on $K_n$. \begin{enumerate} \item If $\mec a\in\cA_{\deg i}$, then \begin{equation}\label{eq_sigma_n_sigma_n_minus_one_pre} (1-\frakt_n)(1-\frakt_{n-1})r_{{\rm BN},K_n}(\mec a) = \left\{\begin{array}{ll} 1 & \mbox{if $a_1+\cdots+a_{n-2} = i$, and} \\ 0 & \mbox{otherwise.} \end{array}\right. \end{equation} \item For all $\mec b \in\cB$ and $i\in\integers$, \begin{equation}\label{eq_sigma_n_sigma_n_minus_one} (1-\frakt_n)(1-\frakt_{n-1})r_{{\rm BN},K_n}(\langle \mec b,i\rangle) = \left\{\begin{array}{ll} 1 & \mbox{if $b_1+\cdots+b_{n-2} = i$, and} \\ 0 & \mbox{otherwise.} \end{array}\right. \end{equation} \end{enumerate} \end{theorem} \begin{proof} The left-hand-side of \eqref{eq_sigma_n_sigma_n_minus_one_pre} equals $$ (1-\frakt_n)(1-\frakt_{n-1})r_{{\rm BN},K_n}(\mec a) = (1-\frakt_{n-1})r_{{\rm BN},K_n}(\mec a) - (1-\frakt_{n-1})r_{{\rm BN},K_n}(\mec a - \mec e_n). $$ Note that if $\mec a\in\cA_{\deg i}$, then $$ \mec a - \mec e_n = (a_1,\ldots,a_{n-2},0,i-1-a_1-\cdots-a_{n-2})\in \cA_{\deg i-1}. $$ By Theorem~\ref{th_complete_graph_sigma_n_minus_one_intermediate}, $(1-\frakt_{n-1})r_{{\rm BN},K_n}(\mec a)$ is $1$ or $0$ according to whether or not $a_1+\cdots+a_{n-2}\le i$ or not, and similarly with $\mec a$ replaced by $\mec a - \mec e_n\in\cA_{\deg i-1}$, according to whether or not $a_1+\cdots+a_{n-2}\le i-1$. Hence we conclude \eqref{eq_sigma_n_sigma_n_minus_one_pre}. (2)~(i.e., \eqref{eq_sigma_n_sigma_n_minus_one}) follows immediately from~(1) (i.e., \eqref{eq_sigma_n_sigma_n_minus_one_pre}). \end{proof} When going through the weight calculations in the next two sections, it may be helpful to visualize consequences of Theorem~\ref{th_complete_graph_sigma_n_minus_one_intermediate} in the case $n=4$, and to consider what \eqref{eq_sigma_n_sigma_n_minus_one} means in terms of the $\langle \mec b,i\rangle$ coordinates, namely that $b_1+b_2=i$; see Figure~\ref{fi_nEqualsFourTables}. \begin{figure} \nEqualsFourTables \caption{The non-zero values of of $(1-\frakt_{n-1})(1-\frakt_n) r_{\rm BN} (\langle b,i\rangle)$ for $n=4$, $\mec b=(b_1,b_2)\in\{0,1,2,3\}^2$, namely $1$ if $b_1+b_2=i$, and $0$ otherwise.} \label{fi_nEqualsFourTables} \end{figure} \subsection{A Generalization of the Weight Calculation} To compute the weight of the Baker-Norine rank on $K_n$, we need to apply $$ (1-\frakt_1)\ldots (1-\frakt_{n-2}). $$ However, \eqref{eq_sigma_n_sigma_n_minus_one} implies that $$ (1-\frakt_n)(1-\frakt_{n-1})r_{{\rm BN},K_n}(\langle \mec b,i\rangle) = g(b_1+\cdots+b_{n-2}-i), $$ for some function $g$ (namely the ``Dirac delta function at $0$,'' i.e., the function that is $1$ at $0$ and otherwise $0$). We find it conceptually simpler to prove a theorem that applies $$ (1-\frakt_1)\ldots (1-\frakt_{n-2}) $$ to any function of $\langle b,i\rangle$ of the form $$ g(b_1+\cdots+b_{n-2}-i). $$ Here is the result. It will be helpful to introduce the following ``tensor'' notation: if $J\subset [n-2]$, then set \begin{equation}\label{eq_frakt_J_tensor_notation} \frakt_J = \prod_{j\in J} \frakt_{j} . \end{equation} \begin{proposition}\label{pr_preliminary_clarifying} Let $h\from\integers^n\to\integers$ be any function that is invariant under translation by the image of the Laplacian of the complete graph. Say that for all $(\mec b,i)\in\cB\times \integers$, $h(\langle \mec b,i\rangle) = g(b_1+\cdots+b_{n-2}-i)$ for some function $g$, i.e., $h$ depends only on the value of $b_1+\cdots+b_{n-2}-i$. Then \begin{enumerate} \item if $j\in[n-2]$ and $\mec b\in\cB=\{0,\cdots,n-1\}^{n-2}$ has $b_j>0$, then for all $i\in\integers$ we have \begin{equation}\label{eq_vanishing_b_j_positive} ((1-\frakt_j)h)(\langle b,i\rangle )=0 ; \end{equation} \item let $j\in [n-2]$ and $J'\subset[n-2]$ with $j\notin J'$; if $\mec b\in\cB=\{0,\cdots,n-1\}^{n-2}$ has $b_j>0$, then for all $i\in\integers$ we have \begin{equation}\label{eq_vanishing_b_j_positive_shift_J_prime} \bigl((1-\frakt_j)\frakt_{J'} h\bigr)(\langle b,i\rangle )=0 \end{equation} (using the ``tensor'' notation \eqref{eq_frakt_J_tensor_notation}); \item if $\mec b\in \cB$ with $\mec b\ne\mec 0$ (hence $b_j>0$ for some $j\in[n-2]$), \begin{equation}\label{eq_at_least_one_positive_b_j} \bigl( (1-\frakt_1)\ldots (1-\frakt_{n-2})h \bigr) (\langle \mec b,i\rangle) = 0; \end{equation} and \item (in the remaining case, $\mec b=\mec 0$) \begin{equation}\label{eq_all_b_j_zero} \bigl( (1-\frakt_1)\ldots (1-\frakt_{n-2})h \bigr) (\langle \mec 0,i\rangle) = \sum_{k=0}^{n-2} (-1)^k \binom{n-2}{k} g(i-kn) . \end{equation} \end{enumerate} \end{proposition} We remark that the proof below shows that claims~(1) and~(2) above hold, more generally, whenever $$ h(\langle \mec b,i\rangle) = g(b_1,\ldots,b_{j-1},b_j-i,b_{j+1},\ldots,b_{n-2}) $$ for some $g$, i.e., $h$ is an arbitrary function, except that its dependence on $b_j$ and $i$ is only on $b_j-i$ and the rest of the $b_{j'}$ with $j'\ne j$. \begin{proof} Our proof will constantly use \eqref{eq_e_i_in_second_coordinates}. Proof of~(1): if $b_j>0$, then $\mec b-\mec e_j\in\cB$, and hence $$ \langle \mec b,i\rangle - \mec e_j = \langle \mec b-\mec e_j,i-1\rangle , $$ and hence $$ \bigl((1-\frakt_j)h\bigr)(\langle b,i\rangle )= h(\langle \mec b,i\rangle) - h( \langle \mec b-\mec e_j,i-1\rangle) $$ $$ = g\bigl( (b_1+\cdots+b_{n-2})-i \bigr) - g\bigl( (b_1+\cdots+b_{n-2}-1 )-(i-1) \bigr) = 0. $$ This gives \eqref{eq_vanishing_b_j_positive}. Proof of~(2): let $$ \mec b' = ( \mec b - \mec e_{J'} ) \bmod n. $$ Since $j\notin J'$ we have $b'_j=b_j>0$, and hence $\mec b'-\mec e_j\in\cB$. Hence \begin{align*} \bigl( \frakt_J h \bigr)(\langle \mec b,i\rangle ) & = h \bigl( \langle \mec b', i-|J'| \rangle \bigr) \\ \bigl( \frakt_j \frakt_{J'} h \bigr)(\langle \mec b,i\rangle \bigr) & = h \bigl( \langle \mec b'-\mec e_j, i-|J'|-1 \rangle \bigr). \end{align*} Hence the same calculation as in the previous paragraph (with $\mec b'$ replacing $\mec b$ and $i-|J'|$ replacing $i$) gives \eqref{eq_vanishing_b_j_positive_shift_J_prime}. Proof of~(3): we have $$ (1-\frakt_1)\ldots (1-\frakt_{n-2}) = \sum_{J'\subset [n-2]\setminus\{j\}} (-1)^{|J'|} (1-\frakt_j) \frakt_{J'} , $$ and so \eqref{eq_vanishing_b_j_positive_shift_J_prime} implies~\eqref{eq_at_least_one_positive_b_j}. Proof of~(4): for any $J\subset[n-2]$, using \eqref{eq_e_i_in_second_coordinates} we have $$ \langle \mec 0,i \rangle - \mec e_J \sim \langle (n-1)\mec e_J , i-|J| \rangle, $$ and hence $$ f\bigl( \langle \mec 0,i \rangle - \mec e_J \bigr) = f\bigl( \langle (n-1)\mec e_J , i-|J| \rangle \bigr) = g( (n-1)|J| - i+|J|) = g(n|J|-i). $$ Since $$ (1-\frakt_1)\ldots (1-\frakt_{n-2}) = \sum_{J\subset [n-2]} (-1)^{|J|} \frakt_J , $$ we get $$ \bigl( (1-\frakt_1)\ldots (1-\frakt_{n-2}) h \bigr) \bigl( \langle \mec 0,i \rangle \bigr) = \sum_{J\subset [n-2]} (-1)^{|J|} g\bigl( n|J| - i \bigr) $$ and \eqref{eq_all_b_j_zero} follows. \end{proof} \subsection{Computation of $W$} \begin{theorem}\label{th_complete_graph_sigma_n_minus_one} Fix $n\in\naturals$, and let $K_n=(V,E)$ be the complete graph on vertex set $V=[n]$. Consider the Baker-Norine rank $r_{\rm BN}\from\integers^n\to\integers$ on $K_n$. The weight, $W=\frakm (r_{{\rm BN},K_n})$, is given by \begin{equation}\label{eq_weights_complete_graph} W(\langle\mec b,i\rangle)= \left\{ \begin{array}{ll} (-1)^\ell \binom{n-2}{\ell} & \mbox{if $\mec b=\mec 0$ and $i=n\ell$ for some $\ell=0,\ldots,n-2$, and} \\ 0 & \mbox{otherwise.} \end{array} \right. \end{equation} \end{theorem} \begin{proof} Setting $$ h( \langle \mec b,i\rangle) = \bigl( (1-\frakt_{n-1})(1-\frakt_n) r_{\rm BN} \bigr) ( \langle \mec b,i\rangle) , $$ \eqref{eq_sigma_n_sigma_n_minus_one} shows that $$ h( \langle \mec b,i\rangle) = g(b_1+\cdots+b_{n-2}-i), $$ where $g(0)=1$ and elsewhere $g$ vanishes. Since $$ W = (1-\frakt_1)\cdots(1-\frakt_{n-2})h, $$ we may apply Proposition~\ref{pr_preliminary_clarifying} and conclude: (1) if $\mec b\in\cB$ is nonzero, then \eqref{eq_at_least_one_positive_b_j} implies that $$ W( \langle \mec b,i \rangle ) = 0, $$ and (2) if $\mec b=\mec 0$, then $$ W( \langle \mec 0,i \rangle ) = \sum_{k=0}^{n-2} (-1)^k \binom{n-2}{k} g(nk-i). $$ Hence $W( \langle \mec 0,i \rangle )=0$ unless $i$ is of the form $nk$, with $0\le k\le n-2$, in which case $$ W( \langle \mec 0,nk \rangle ) = (-1)^k \binom{n-2}{k}. $$ \end{proof} \subsection{Remark on Theorem~\ref{th_complete_graph_sigma_n_minus_one}} Another important consequence of Theorem~\ref{th_complete_graph_sigma_n_minus_one} is that, by symmetry, for any $\mec d\in\integers^n$, and any distinct $i,j\in[n]$ we have $$ \bigl( (1-\frakt_i)(1-\frakt_j) W \bigr) (\mec d)\ge 0. $$ In \cite{folinsbee_friedman_Euler_characteristics} this will imply that when we can model $f=1+r_{{\rm BN},K_n}$ as Euler characteristics of a family of sheaves in a sense explained there. \section{Fundamental Domains and the Proofs of Theorems~\ref{th_modular_function_as_sum} and~\ref{th_modular_function_from_strip}} \label{se_fundamental_domains} In this section we prove the Theorems~\ref{th_modular_function_as_sum} and~\ref{th_modular_function_from_strip}. We do so with a tool that we call a {\em cubism} of $\integers^n$. However, Theorems~\ref{th_modular_function_as_sum} has a more direct proof without using cubisms, so we first give the direct proof. In fact, the direct proof will motivate the definition of a cubism. \subsection{Proof of Theorem~\ref{th_modular_function_as_sum} Without Reference to Cubisms} \begin{lemma}\label{le_coord_domain_formula} Let $n\in\integers$, and let $\cD^n_{\rm coord}\subset\integers^n$ given by \begin{equation}\label{eq_coordinate_domain} \cD^n_{\rm coord}=\{ \mec d \ | \mbox{$d_i=0$ for at least one $i\in[n]$} \}. \end{equation} Then for any $f\from\cD^n_{\rm coord}\to\integers$, there exist functions $h_i\from\integers^n\to\integers$ for each $i\in[n]$ such that \begin{enumerate} \item $h_i=h_i(\mec d)$ is independent of the $i$-th variable, $d_i$, and \item \begin{equation}\label{eq_write_f_as_h_i_indep_of_ith_var} \forall \mec d\in\cD^n_{\rm coord}, \quad f(\mec d) = \sum_{i=1}^n h_i(\mec d). \end{equation} \end{enumerate} \end{lemma} Hence the function $\sum_i h_i$ above is an extension of $f$ to all of $\integers^n$ such that each $h_i$ is independent of its $i$-th variable. Before giving the formal proof, let us explain the ideas for small $n$. The case $n=1$ is immediate. The proof for $n=2$ is as follows: consider \begin{equation}\label{eq_g_as_alt_sum} g(d_1,d_2) = f(d_1,0) + f(0,d_2) - f(0,0): \end{equation} since $$ g(d_1,0) = f(d_1,0) + f(0,0) - f(0,0) = f(d_1,0) $$ we have $f(\mec d)=g(\mec d)$ whenever $d_2=0$; by symmetry, the same is true if $d_1=0$; hence $g=f$ on all of $\cD^2_{\rm coord}$. But we easily write the right-hand-side of \eqref{eq_g_as_alt_sum} as $h_1(d_2)+h_2(d_1)$, by setting, say, $h_1(d_2)=f(0,d_2)-f(0,0)$ and setting $h_2(d_1)=f(d_1,0)$. Similarly for $n=3$, and $$ g(d_1,d_2,d_3) = f(d_1,d_2,0)+ f(d_1,0,d_3) + f(0,d_2,d_3) - f(d_1,0,0)- f(0,d_2,0) - f(0,0,d_3) + f(0,0,0). $$ For all $n\ge 4$, we simply need to introduce convenient notation. \begin{proof} For $\mec d\in\integers^n$ and $I\subset[n]$, introduce the notation $$ \mec d_I = \sum_{i\in I} d_i \mec e_i. $$ Consider the function $g\from\integers^n\to\integers$ given by \begin{equation}\label{eq_extend_h_on_coord_to_modular} g(\mec d) = \sum_{I\subset [n],\ I\ne[n]} f(\mec d_I)(-1)^{n-1-|I|} \end{equation} (which makes sense, since $\mec d_I\in\cD^n_{\rm coord}$ whenever $I\ne[n]$). We claim that $g=f$ when restricted to $\mec d\in \cD^n_{\rm coord}$; by symmetry it suffices to check the case $d_n=0$, whereupon the term $f(\mec d_I)$ with $n\notin I$ cancels the term corresponding to $I\cup{n}$, except for the single remaining term where $I=\{1,\ldots,n-1\}$. Hence for $d_n=0$, $g(\mec d)=f(\mec d)$, and, by symmetry, $g=f$ on all of $\cD^n_{\rm coord}$. Now we see that the right-hand-side \eqref{eq_extend_h_on_coord_to_modular} is of the desired form $\sum_i h_i$ as in the statement of the lemma, by setting $$ h_i = \sum_{i\notin I,\ 1,\ldots,i-1\in I} f(\mec d_I)(-1)^{n-1-|I|}; $$ since for each $I\subset[n]$ with $I\ne[n]$ there is a unique $i\in[n]$ such that $i\notin I$ but $1,\ldots,i-1\in I$ (namely the lowest value of $i$ not in $I$), we have $\sum_i h_i$ equals the right-hand-side \eqref{eq_extend_h_on_coord_to_modular}. \end{proof} \begin{theorem}\label{th_coordinate_axes_function_unique_ext_to_mod} Let $n\in\naturals$ and $\cD^n_{\rm coord}$ be as in \eqref{eq_coordinate_domain}. Then any function $f\from \cD^n_{\rm coord}\to\integers$ has a unique extension to a modular function $h\from\integers^n\to\integers$. \end{theorem} \begin{proof} The existence of the extension of $h$ is guaranteed by Lemma~\ref{le_coord_domain_formula}. Let us prove uniqueness. By symmetry it suffices to show that the values of $h$ on the set $$ \naturals^n = \{ \mec d \ | \ \mbox{$d_i>0$ for all $i\in[n]$} \} $$ are uniquely determined. But if $h$ is modular, then \begin{equation}\label{eq_h_mec_d_given_by_lower_values} h(\mec d) = \sum_{I\subset [n],\ I\ne \emptyset} (-1)^{|I|+1} h(\mec d-\mec e_I). \end{equation} Now we prove by induction on $m$ that for all $m\ge n$, if $\mec d\in\naturals^n$ and $\deg(\mec d)=m$, then $h(\mec d)$ is uniquely determined. The base case is $m=n$, where the only element of degree $n$ in $\naturals^n$ is $\mec d=\mec 1$. But for each $I\subset[n]$ with $I\ne\emptyset$, $\mec 1-\mec e_I\in \cD^n_{\rm coord}$; hence \eqref{eq_h_mec_d_given_by_lower_values} uniquely determines $h(\mec 1)$. To prove the inductive claim: let $\mec d\in\naturals^n$ with $\deg(\mec d)=m$; for all $I\subset [n]$ with $I\ne\emptyset$, $\mec d-\mec e_I\ge \mec 0$ and $\mec d-\mec e_I$ and has degree less than $m$. Hence \eqref{eq_h_mec_d_given_by_lower_values} determines $h(\mec d)$ in terms of values of $h$ that, by induction, have already been determined. \end{proof} \begin{proof}[Proof of Theorem~\ref{th_modular_function_as_sum}] One direction is immediate; it suffices to show that any modular function, $h$, can be written as a sum of functions, each of which depends on only $n-1$ of its variables. So consider the restriction of $h$ to $\cD^n_{\rm coord}$; then this restriction determines a unique modular function, which must be $h$. But then Theorem~\ref{th_coordinate_axes_function_unique_ext_to_mod} implies that $h=\sum_i h_i$, where each $h_i$ is independent of its $i$-th variable. \end{proof} \subsection{Fundamental Modular Domains} Let us restate what we proved in the previous subsection. \begin{definition} Let $\cD\subset\integers^n$. We call $\cD$ a {\em fundamental modular domain} (respectively {\em subfundamental}, {\em superfundamental}) if for every function $f\from\cD\to\integers$ there exists a unique (respectively, at least one, at most one) modular function $h\from\integers^n\to\integers$ such that $f=h$ on $\cD$. \end{definition} We remark that our terminology results from the following almost immediate facts: a subset of a subfundamental modular domain is subfundamental, and a strict subset of a fundamental domain is not fundamental; similarly for supersets and superfundamental domains. In the last subsection, Theorem~\ref{th_modular_function_as_sum} was proven via Theorem~\ref{th_coordinate_axes_function_unique_ext_to_mod}, which proved that $\cD^n_{\rm coord}$ is a fundamental modular domain. Theorem~\ref{th_modular_function_from_strip} essentially states that for any $n\in\naturals$ and $a\in\integers$, $$ \cD = \{\mec d \in\integers^n \ | \ a\le \deg(\mec d)\le a+n-1 \} $$ is a fundamental modular domain. We can prove both ideas by the method of a {\em cubism}, that we now explain. \subsection{Cubisms: Motivation, Definition, and Implication of Domain Fundamentality} The proof of Theorem~\ref{th_coordinate_axes_function_unique_ext_to_mod} can be viewed as follows: we ordered the elements of $\naturals^n$ by a function $$ {\rm rank}(\mec d)=d_1+\cdots+d_n-(n-1), $$ (so the minimum rank of an element of $\integers^n$ is $1$), and proved by induction on $m\ge 1$ that there is a unique extension of a function $h\from\cD^n_{\rm coord}\to\integers$ to all points of rank at most $m$ so that $(\frakm h)(\mec d)=0$ for all $\mec d$ of rank at most $m$. Let us generalize this idea. \begin{definition} For $\mec d\in\integers^n$, the {\em $\mec d$-cube} refers to the set $$ {\rm Cube}(\mec d) = \{\mec d'\in\integers^n \ | \ \mec d-\mec 1\le\mec d'\le \mec d \}. $$ We refer to the set of all $\mec d$-cubes as the set of $n$-cubes. If $\cD\subset \integers^n$, we say that function $r\from\integers^n\to\naturals$ is a {\em cubism of $\cD$} if, setting \begin{equation}\label{eq_cal_D_sub_m} \cD_m = \cD\cup \bigcup_{r(\mec d)\le m} {\rm Cube}(\mec d) \end{equation} for $m\in\integers_{\ge 0}$ (hence $\cD_0=\cD$), we have \begin{enumerate} \item if $m\ge 1$ and $r(\mec d)=r(\mec d')=m$, then \begin{equation}\label{eq_rank_m_cubes_intersection_lower_rank} {\rm Cube}(\mec d)\cap{\rm Cube}(\mec d')\in \cD_{m-1}, \end{equation} and \item for all $m\ge 1$ and $\mec d\in\integers^n$ with $r(\mec d)=m$ we have \begin{equation}\label{eq_singleton_in_rank} \bigl| {\rm Cube}(\mec d)\setminus \cD_{m-1} \bigr| = 1 . \end{equation} \end{enumerate} \end{definition} In the last paragraph of this section we remark that in some cubisms it is more convenient to replace the partial ordering of the $n$-cubes induced by the function $r\from\integers^n\to\naturals$ above with, more generally, a well-ordering or a partial ordering such that each subset has a minimal element. \begin{example} In Figure~\ref{fi_axis_cubism} we illustrate an example of a cubism of $\cD$, with $\cD=\cD_{\rm coord}$ as above, suggested by the above proof of Theorem~\ref{th_coordinate_axes_function_unique_ext_to_mod} and $n=2$ (so the $n$-cubes are really squares). \end{example} \begin{figure} \centering \begin{subfigure}[b]{0.3\textwidth} \centering \hspace*{-4cm} \begin{tabular}{|*2{c|}} \hline $\cD_i,\ i=0$ & $\cD_i,\ i=1$ \\ \hline $$ \PicCubeZero $$ & $$ \PicCubeOne $$ \\ \hline $\cD_i,\ i=2$ & $\cD_i,\ i=3$ \\ \hline $$ \PicCubeTwo $$ & $$ \PicCubeThree $$ \\ \hline \end{tabular} \caption{New points $\cD_i\setminus\cD_{i-1}$ in red, old points, $\cD_{i-i}$ in blue} \end{subfigure} \begin{subfigure}[b]{0.3\textwidth} \centering \axiscubism \caption{The cubism after 4 steps.} \end{subfigure} \caption{A cubism for $\cD_{\rm coord}^n$ with $n=2$.} \label{fi_axis_cubism} \end{figure} \begin{proposition} If $\cD\subset\integers^n$ has a cubism, then $\cD$ is fundamental. \end{proposition} \begin{proof} Fix a function $f\from\cD\to\integers$, and set $g_0=f$. Let us prove by induction on $m\in\naturals$ that there is a unique function $\cD_m\to\integers$ such that \begin{enumerate} \item $(\frakm g_m)(\mec d)=0$ for all $\mec d$ with $r(\mec d)\le m$; \item the restriction of $g_m$ to $\cD_{m-1}$ equals $g_{m-1}$; and \item the value of $g_m$ on each $\mec c\in \cD_m\setminus \cD_{m-1}$ is determined by the equation $(\frakm g_m)(\mec d)=0$ for a unique $\mec d\in\cD_{m-1}$ such that $\mec c\in {\rm Cube}(\mec d)\setminus\cD_{m-1}$, via the equation \begin{equation}\label{eq_g_m_value_determined_by_cubism} - g_m(\mec c) (-1)^{\deg(\mec d-\mec c)} = \sum_{\mec c'\in {\rm Cube}(\mec d)\setminus\{\mec c\}} g_{m-1}(\mec c') (-1)^{\deg(\mec d-\mec c')} . \end{equation} \end{enumerate} The base case $m=1$ is argued almost exactly as the inductive claim from $m-1$ to $m$; so we will prove the base case $m=1$, leaving in $m$ everywhere. For $m=1$, we have that $\cD_{m-1}=\cD_0=\cD$, and \eqref{eq_singleton_in_rank} implies that for each $\mec d$ with $r(\mec d)=m$, there is a unique $\tilde{\mec d}\notin \cD_{m-1}$ in ${\rm Cube}(\mec d)$; the equation $(\frakm g)(\mec d)=0$ is equivalent to \begin{equation}\label{eq_frakm_as_sum_over_c_in_d_Cube} \sum_{\mec c\in{\rm Cube}(\mec d)} g_m(\mec c) (-1)^{\deg(\mec d-\mec c)} = 0. \end{equation} This determines $g_m(\tilde{\mec d})$ via \eqref{eq_g_m_value_determined_by_cubism} with $\mec c=\tilde{\mec d}$, since all other $\mec c\in{\rm Cube}(\mec d)$ in the sum \eqref{eq_frakm_as_sum_over_c_in_d_Cube} either lie in $\cD$ or have rank at most $m-1$; \eqref{eq_rank_m_cubes_intersection_lower_rank} shows that for distinct $\mec d,\mec d'$ of rank $m$, the corresponding $\tilde{\mec d},\tilde{\mec d'}$ are distinct, so that it is possible to set the value of $g_m$ as required on all $\tilde{\mec d}$ that are the unique element of ${\rm Cube}(\mec d)\setminus\cD_{m-1}$ for some $\mec d$ of rank $m$. For the inductive step, we assume the claim holds for $m-1$, and we repeat the same argument above. This shows that $g_m\from\cD_m\to\integers$ exist for all $m$ with the desired properties. Now define $h\from\integers^n\to\integers$ as follows: for any $\mec d\in\integers^n$, we have $\mec d\in{\rm Cube}(\mec d)\subset\cD_m$, where $m=r(\mec d)$; hence $g_m(\mec d)$ is defined; set $h(\mec d)=g_m(\mec d)$. We claim that $h$ above is modular: indeed, for any $\mec d\in\integers^n$, if $m=r(\mec d)$, then $\frakm g_m(\mec d)=0$ and $\cD_m$ contains ${\rm Cube}(\mec d)$; since $g_{m+1},g_{m+2},\ldots$ are all extensions of $g_m$, we have $\frakm h(\mec d)=\frakm g_m(\mec d)=0$. Now we claim that $h$ is the unique modular function $\integers^n\to\integers$ whose restriction to $\cD$ is $f$: indeed, assume that $h'$ is another such modular function, and that $h\ne h'$; then the definition of $h$ implies that there exists an $m$ such that $g_m$ does not equal the restriction of $h'$ to $\cD_m$; consider the smallest such $m$. Since the restrictions of $h$ and $h'$ to $\cD_0=\cD$ both equal $f$, we must have $m\ge 1$. It follows that $h(\mec c)\ne h'(\mec c)$ for some $\mec c\in\cD_m\setminus \cD_{m-1}$ with $m\ge 1$; fix such a $\mec c$. By condition~(3) on $g_m$ (i.e., \eqref{eq_g_m_value_determined_by_cubism} and above), there is some $\mec d$ with $r(\mec d)=m$ for which $\mec c$ which is the unique element of ${\rm Cube}(\mec d)\setminus\cD_{m-1}$. But since $h,h'$ agree on $g_{m-1}$, we have \begin{align*} (\frakm h')(\mec d) & = h'(\mec c) (-1)^{\deg(\mec d-\mec c)} + \sum_{\mec c'\in {\rm Cube}(\mec d)\setminus\{\mec c\}} g_{m-1}(\mec c') (-1)^{\deg(\mec d-\mec c')} \\ & \ne h(\mec c) (-1)^{\deg(\mec d-\mec c)} + \sum_{\mec c'\in {\rm Cube}(\mec d)\setminus\{\mec c\}} g_{m-1}(\mec c') (-1)^{\deg(\mec d-\mec c')} = 0, \end{align*} and hence $(\frakm h')(\mec d)\ne 0$; hence $h'$ is not modular. \end{proof} [Straying a bit, one could define a subcubism by replacing the $=1$ in \eqref{eq_singleton_in_rank} by $\ge 1$, and the same proof shows that a $\cD$ with a subcubism is subfundamental; similarly for supercubism and $\le 1$.] \subsection{Second Proof of Theorem~\ref{th_coordinate_axes_function_unique_ext_to_mod}} The proof of Theorem~\ref{th_coordinate_axes_function_unique_ext_to_mod} above can be viewed as giving a cubism (e.g., Figure~\ref{fi_axis_cubism} for $n=2$). Let us formalize this. \begin{proof}[Second proof of Theorem~\ref{th_coordinate_axes_function_unique_ext_to_mod}] For each $\mec d\in\integers^n$, let $$ r(\mec d) =|d_1|+\cdots+|d_n| + \bigl| \{ i\in[n] \ | \ d_i\le 0\}\bigr|-n+1 ; $$ more intuitively, $r(\mec d)$ is just the $L^1$ distance of the furthest point in ${\rm Cube}(\mec d)$ to $\cD^n_{\rm coord}$, since if all $d_i\ge 1$ then the furthest point is just $\mec d$, and $r(\mec d)$ is just $d_1+\cdots+d_n-n+1$, and otherwise we need minor corrections for those $d_i\le 0$. Now we claim that $r$ is a cubism. To show that $r$ attains only positive integer values, we can write $r$ as $$ r(\mec d) = 1+\sum_{i=1}^n \max(d_i-1,-d_i); $$ since $\max(d_i-1,-d_i)$ is non-negative for any $d_i\in\integers$, $r$ attains only positive values. We leave the verification of~(1) and~(2) in the definition of a cubism to the reader. \end{proof} We also remark that---unlike the above example---there is no need for $r^{-1}(\{m\})$ to be finite; in fact, the next example shows that it can be convenient for $r^{-1}\{m\}$ to be infinite. \subsection{Other Examples of Cubisms and the Proof of Theorem~\ref{th_modular_function_from_strip}} \begin{proof}[Proof of Theorem~\ref{th_modular_function_from_strip}] Let $$ \cD = \{\mec d \ | \ a\le \deg(\mec d)\le a+n-1 \}. $$ Define $r\from\integers^n\to\naturals$ as $$ r(\mec d) = \left\{ \begin{array}{ll} \deg(\mec d)-a+n+1 & \mbox{if $\deg(\mec d)\ge a+n$, and} \\ a+n-\deg(\mec d) & \mbox{if $\deg(\mec d)< a+n$.} \end{array} \right. $$ Setting $\cD_0=\cD$ and, for $m\in\naturals$, $\cD_m$ as in \eqref{eq_cal_D_sub_m}, we easily see that that if $r(\mec d)=m$ then ${\rm Cube}(\mec d)\setminus\cD_{m-1}$ consists of a single point, namely $\mec d$ if $\deg(\mec d)\ge a+n$, and otherwise the single point $\mec d-\mec 1$. We easily see that these single points are distinct as $\mec d$ varies over all $\mec d\notin\cD$, and it follows that $r$ is a cubism of $\cD$. \end{proof} \begin{example}\label{ex_strange_fund_dom} One can show by a cubism argument that the set $\cD\subset\integers^2$ given by $$ \{(0,0)\}\cup\{\mec d\in\integers^2 \ | \ \deg(\mec d)=\pm 1 \} $$ is fundamental, by defining $r(\mec d)$ to be $|d_1|$ if $\deg(\mec d)=1$ and otherwise $||\deg(\mec d)|-1|$; we depict this cubism in Figure~\ref{fi_degree_cubism}. It follows that any subset of $\cD$ is subfundamental (e.g., removing $(0,0)$), and any superset of $\cD$ is superfundamental. \end{example} It is intriguing---but not relevant to this article---to consider the various other fundamental modular domains of $\integers^n$. \begin{figure} \centering \begin{subfigure}[b]{0.3\textwidth} \centering \hspace*{-4cm} \begin{tabular}{|*2{c|}} \hline $\cD_i,\ i=0$ & $\cD_i,\ i=1$ \\ \hline $$ \PicDegCubeZero $$ & $$ \PicDegCubeOne $$ \\ \hline $\cD_i,\ i=2$ & $\cD_i,\ i=3$ \\ \hline $$ \PicDegCubeTwo $$ & $$ \PicDegCubeThree $$ \\ \hline \end{tabular} \caption{New points in red, old points in blue} \label{fi_points degree cubism} \end{subfigure} \begin{subfigure}[b]{0.3\textwidth} \centering \degreecubism \caption{The cubism after 4 steps.} \label{fi_squares axis} \end{subfigure} \caption{A Cubism for Example~\ref{ex_strange_fund_dom}.} \label{fi_degree_cubism} \end{figure} We also note that in Example~\ref{ex_strange_fund_dom}, it may be simpler to first extend a function $\cD\to\integers$ along all points of degree $0$, whereupon the extension is defined on all points of degree between $-1$ and $1$, and then further extend the function to all of $\integers^n$. In this case one can view the set of $2$-cubes as a well-ordered set, where all points of degree $0$ are ordered before all points of degrees not between $-1$ and $1$. One can therefore define a more general cubism as any well-ordering of the $n$-cubes of $\integers^n$, or, more generally, any partial ordering such that each subset of $n$-cubes has a minimal element. The proofs of all theorems easily generalize to these more general notions of a cubism. \newpage \providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} MR } \providecommand{\MRhref}[2]{ \href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2} } \providecommand{\href}[2]{#2} \begin{thebibliography}{CDPR12} \bibitem[AC13]{amini2013} Omid Amini and Lucia Caporaso, \emph{Riemann-{R}och theory for weighted graphs and tropical curves}, Adv. Math. \textbf{240} (2013), 1--23. \MR{3046301} \bibitem[AM10]{amini_manjunath} Omid Amini and Madhusudan Manjunath, \emph{Riemann-{R}och for sub-lattices of the root lattice {$A_n$}}, Electron. J. Combin. \textbf{17} (2010), no.~1, Research Paper 124, 50. \MR{2729373} \bibitem[Bac17]{backman} Spencer Backman, \emph{Riemann-{R}och theory for graph orientations}, Adv. Math. \textbf{309} (2017), 655--691. \MR{3607288} \bibitem[BN07]{baker_norine} Matthew Baker and Serguei Norine, \emph{Riemann-{R}och and {A}bel-{J}acobi theory on a finite graph}, Adv. Math. \textbf{215} (2007), no.~2, 766--788. \MR{2355607} \bibitem[CB13]{cori_le_borgne} Robert Cori and Yvan~Le Borgne, \emph{The {R}iemann-{R}och theorem for graphs and the rank in complete graphs}, 2013, available at \url{https://arxiv.org/abs/1308.5325}. \bibitem[CDPR12]{Cools} Filip Cools, Jan Draisma, Sam Payne, and Elina Robeva, \emph{A tropical proof of the {B}rill-{N}oether theorem}, Adv. Math. \textbf{230} (2012), no.~2, 759--776. \MR{2914965} \bibitem[CLB16]{cori_le_borgne2} Robert Cori and Yvan Le~Borgne, \emph{On computation of {B}aker and {N}orine's rank on complete graphs}, Electron. J. Combin. \textbf{23} (2016), no.~1, Paper 1.31, 47. \MR{3484736} \bibitem[CLM15]{Caporaso} Lucia Caporaso, Yoav Len, and Margarida Melo, \emph{Algebraic and combinatorial rank of divisors on finite graphs}, J. Math. Pures Appl. (9) \textbf{104} (2015), no.~2, 227--257. \MR{3365828} \bibitem[FF]{folinsbee_friedman_Euler_characteristics} Nicolas Folinsbee and Joel Friedman, \emph{Modeling {R}iemann functions and {R}iemann-{R}och formulas as {E}uler characteristics}, to appear. \bibitem[GK08]{Gathmann} Andreas Gathmann and Michael Kerber, \emph{A {R}iemann-{R}och theorem in tropical geometry}, Math. Z. \textbf{259} (2008), no.~1, 217--230. \MR{2377750} \bibitem[Har77]{hartshorne} Robin Hartshorne, \emph{Algebraic geometry}, Springer-Verlag, New York, 1977, Graduate Texts in Mathematics, No. 52. \MR{57 \#3116} \bibitem[HKN13]{Hladk} Jan Hladk\'{y}, Daniel Kr\'{a}\v{l}, and Serguei Norine, \emph{Rank of divisors on tropical curves}, J. Combin. Theory Ser. A \textbf{120} (2013), no.~7, 1521--1538. \MR{3092681} \bibitem[JM13]{James} Rodney James and Rick Miranda, \emph{A {R}iemann-{R}och theorem for edge-weighted graphs}, Proc. Amer. Math. Soc. \textbf{141} (2013), no.~11, 3793--3802. \MR{3091769} \bibitem[Lan82]{lang_algebraic_functions} Serge Lang, \emph{Introduction to algebraic and abelian functions}, second ed., Graduate Texts in Mathematics, vol.~89, Springer-Verlag, New York-Berlin, 1982. \MR{681120} \bibitem[MS13]{manjunath2012} Madhusudan Manjunath and Bernd Sturmfels, \emph{Monomials, binomials and {R}iemann-{R}och}, J. Algebraic Combin. \textbf{37} (2013), no.~4, 737--756. \MR{3047017} \bibitem[MS14]{Mohammadi} Fatemeh Mohammadi and Farbod Shokrieh, \emph{Divisors on graphs, connected flags, and syzygies}, Int. Math. Res. Not. IMRN (2014), no.~24, 6839--6905. \MR{3291642} \end{thebibliography} \end{document}
2205.13581v2
http://arxiv.org/abs/2205.13581v2
Combinatorial constructions of generating functions of cylindric partitions with small profiles into unrestricted or distinct parts
\documentclass[12pt]{amsart} \usepackage{latexsym} \usepackage{psfrag} \usepackage{amsmath} \usepackage{amssymb} \usepackage{epsfig} \usepackage{amsfonts} \usepackage{amscd} \usepackage{mathrsfs} \usepackage{graphicx} \usepackage{enumerate} \usepackage[autostyle=false, style=english]{csquotes} \MakeOuterQuote{"} \usepackage{ragged2e} \usepackage[all]{xy} \usepackage{mathtools} \newlength\ubwidth \newcommand\parunderbrace[2]{\settowidth\ubwidth{$#1$}\underbrace{#1}_{\parbox{\ubwidth}{\scriptsize\RaggedRight#2}}} \usepackage[dvipsnames]{xcolor} \newcommand{\red}{\color{red}} \usepackage{placeins} \usepackage{tikz} \usetikzlibrary{shapes, positioning} \usetikzlibrary{matrix,shapes.geometric,calc,backgrounds} \oddsidemargin 0in \textwidth 6.5in \evensidemargin 0in \topmargin -.4in \textheight 9in \parindent0pt \parskip1.6ex \newtheorem{theorem}{Theorem}\newtheorem{lemma}[theorem]{Lemma} \newtheorem{obs}[theorem]{Observation} \newtheorem{ex}[theorem]{Example} \newtheorem{claim}[theorem]{Claim} \newtheorem{cor}[theorem]{Corollary} \newtheorem{prop}[theorem]{Proposition} \newtheorem{conj}[theorem]{Conjecture} \newtheorem{defn}[theorem]{Definition} \newtheorem{question}[theorem]{Question} \newtheorem{alg}[theorem]{Procedure} \usepackage{amsmath,amssymb,amsthm} \usepackage[dvipsnames]{xcolor} \usepackage[colorlinks=true,citecolor=RoyalBlue,linkcolor=red,breaklinks=true]{hyperref} \usepackage{graphicx} \usepackage{mathdots} \usepackage[margin=2.5cm]{geometry} \usepackage{ytableau} \newcommand*\circled[1]{\tikz[baseline=(char.base)]{ \node[shape=circle,draw,inner sep=2pt] (char) {#1};}} \newcommand{\mathcolorbox}[2]{ \colorbox{#1}{$\displaystyle#2$}} \begin{document} \title[Construction of cylindric partitions with small profiles] {Combinatorial constructions of generating functions of cylindric partitions with small profiles into unrestricted or distinct parts} \author[Kur\c{s}ung\"{o}z]{Ka\u{g}an Kur\c{s}ung\"{o}z} \address{Ka\u{g}an Kur\c{s}ung\"{o}z, Faculty of Engineering and Natural Sciences, Sabanc{\i} University, Tuzla, Istanbul 34956, Turkey} \email{[email protected]} \author[\"{O}mr\"{u}uzun Seyrek]{Hal\.{ı}me \"{O}mr\"{u}uzun Seyrek} \address{Hal\.{ı}me \"{O}mr\"{u}uzun Seyrek, Faculty of Engineering and Natural Sciences, Sabanc{\i} University, Tuzla, Istanbul 34956, Turkey} \email{[email protected]} \subjclass[2010]{05A17, 05A15, 11P84} \keywords{integer partitions, cylindric partitions, partition generating function} \date{2022} \begin{abstract} In this paper, cylindric partitions into profiles $c=(1,1)$ and $c=(2,0)$ are considered. The generating functions into unrestricted cylindric partitions and cylindric partitions into distinct parts with these profiles are constructed. The constructions are combinatorial and they connect the cylindric partitions with ordinary partitions. \end{abstract} \maketitle \section{Introduction} Cylindric partitions were introduced by Gessel and Krattenthaler \cite{GesselKrattenthaler}. \begin{defn}\label{def:cylin} Let $k$ and $\ell$ be positive integers. Let $c=(c_1,c_2,\dots, c_k)$ be a composition, where $c_1+c_2+\dots+c_k=\ell$. A \emph{cylindric partition with profile $c$} is a vector partition $\Lambda = (\lambda^{(1)},\lambda^{(2)},\dots,\lambda^{(k)})$, where each $\lambda^{(i)} = \lambda^{(i)}_1+\lambda^{(i)}_2 + \cdots +\lambda^{(i)}_{s_i}$ is a partition, such that for all $i$ and $j$, $$\lambda^{(i)}_j\geq \lambda^{(i+1)}_{j+c_{i+1}} \quad \text{and} \quad \lambda^{(k)}_{j}\geq\lambda^{(1)}_{j+c_1}.$$ \end{defn} For example, the sequence $\Lambda=((6,5,4,4),(8,8,5,3),(7,6,4,2))$ is a cylindric partition with profile $(1,2,0)$. One can check that for all $j$, $\lambda^{(1)}_j\ge \lambda^{(2)}_{j+2}$, $\lambda^{(2)}_j\ge \lambda^{(3)}_{j}$ and $\lambda^{(3)}_j\ge \lambda^{(1)}_{j+1}$. We can visualize the required inequalities by writing the partitions in subsequent rows repeating the first row below the last one, and shifting the rows below as much as necessary to the left. Thus, the inequalities become the weakly decreasing of the parts to the right in each row, and downward in each column. \[ \begin{array}{ccc ccc ccc} & & & 6 & 5 & 4 & 4\\ & 8 & 8 & 5 & 3 & \\ & 7 & 6 & 4 & 2& \\ \textcolor{lightgray}{6} & \textcolor{lightgray}{5} & \textcolor{lightgray}{4} & \textcolor{lightgray}{4} \end{array} \] The repeated first row is shown in gray. The size $|\Lambda|$ of a cylindric partition $\Lambda = (\lambda^{(1)},\lambda^{(2)},\dots,\lambda^{(k)})$ is defined to be the sum of all the parts in the partitions $\lambda^{(1)},\lambda^{(2)},\dots,\lambda^{(k)}$. The largest part of a cylindric partition $\Lambda$ is defined to be the maximum part among all the partitions in $\Lambda$, and it is denoted by $\max(\Lambda)$. The following generating function $$F_c(z,q):=\sum_{\Lambda\in \mathcal{P}_c} z^{\max{(\Lambda)}}q^{|\Lambda |}$$ is the generating function for cylindric partitions, where $\mathcal{P}_c$ denotes the set of all cylindric partitions with profile $c$. In 2007, Borodin \cite{Borodin} showed that when one sets $z=1$ to this generating function, it turns out to be a very nice infinite product. \begin{theorem}[Borodin, 2007] \label{theorem-Borodin} Let $k$ and $\ell$ be positive integers, and let $c=(c_1,c_2,\dots,c_k)$ be a composition of $\ell$. Define $t:=k+\ell$ and $s(i,j) := c_i+c_{i+1}+\dots+ c_j$. Then, \begin{equation} \label{BorodinProd} F_c(1,q) = \frac{1}{(q^t;q^t)_\infty} \prod_{i=1}^k \prod_{j=i}^k \prod_{m=1}^{c_i} \frac{1}{(q^{m+j-i+s(i+1,j)};q^t)_\infty} \prod_{i=2}^k \prod_{j=2}^i \prod_{m=1}^{c_i} \frac{1}{(q^{t-m+j-i-s(j,i-1)};q^t)_\infty}. \end{equation} \end{theorem} The identity (\refeq{BorodinProd}) is a very strong tool to find product representation of generating functions of cylindric partitions with a given profile explicitly. Cylindric partitions have been studied intensively since their introduction \cite{GesselKrattenthaler}. Prominent examples are constructing Andrews-Gordon~\cite{Andrews-PNAS} type evidently positive multiple series companions to some cases in Borodin's theorem~\cite{CDU, CW, OW}, or even connections with theoretical physics~\cite{IKS}. The purpose of this paper is to construct generating functions of cylindric partitions with small profiles into unrestricted or distinct parts. In Section 2, we combinatorially reprove generating functions for cylindric partitions with profiles $c=(1,1)$ and $c=(2,0)$. The construction is based on the fact that if we have a cylindric partition with profile $c=(1,1)$ or $c=(2,0)$, then it can be decomposed into a pair of partitions $(\mu,\beta)$ by a series of combinatorial moves. The results in Section \ref{secGenFuncsUnrestricted} are limiting cases, therefore corollaries, of~\cite[eq. (7.25)]{Warnaar}. The proof techniques are different, though. The approach in Section \ref{secGenFuncsUnrestricted} seems to apply in~\cite[eq. (7.25)]{Warnaar} for $k = 1$ and $s=$ 1 or 2. In Section 3, we consider cylindric partitions with small profiles into distinct parts. We construct generating functions for such partitions with profiles $c=(1,1)$ or $c=(2,0)$, which turn out to be combinations of infinite products. We refer the reader to \cite{BU}, where cylindric partitions into distinct parts are also studied. We conclude by constructing an evidently positive series generating function for cylindric partitions with small profiles into odd parts. \section{Generating Functions of Cylindric Partitions With Profiles \\ $c=(1,1)$ and $c=(2,0)$} \label{secGenFuncsUnrestricted} By using (\ref{BorodinProd}), one can easily show that \begin{equation} \label{c=(1,1)} F_c(1,q) = \frac{(-q;q^2)_\infty}{(q;q)_\infty}, \end{equation} where $c=(1,1)$. In the following theorem, we will give a combinatorial proof of identity (\ref{c=(1,1)}). \begin{theorem} \label{Fc(1,q) when c=(1,1)} Let $c=(1,1)$. Then the generating function of cylindric partitions with profile $c$ is given by \begin{equation*} F_c(1,q) = \frac{(-q;q^2)_\infty}{(q;q)_\infty}. \end{equation*} \end{theorem} \begin{proof} We will show that each cylindric partition $\lambda$ with profile $c=(1,1)$ corresponds to a unique pair of partitions $(\mu,\beta)$, where $\mu$ is an ordinary partition and $\beta$ is a partition with distinct odd parts. Conversely, we will show that each pair of partitions $(\mu,\beta)$ will correspond to a unique cylindric partition with profile $c=(1,1)$, where $\mu$ is an ordinary partition and $\beta$ is a partition into distinct odd parts. In this way, we will get the desired generating function for cylindric partitions with profile $c=(1,1)$. \begin{align} \nonumber F_c(1,q) = \sum_{\lambda} q^{\vert \lambda \vert} = \sum_{(\mu, \beta)} q^{\vert \mu \vert + \vert \beta \vert} = \left( \sum_{\mu} q^{\vert \mu \vert} \right) \, \left( \sum_{\beta} q^{\vert \beta \vert} \right) = \frac{1}{ (q; q)_\infty } \, (-q; q^2)_\infty, \end{align} where $\lambda$, $\mu$, and $\beta$ are as described above. The first identity is the definition of $F_c(1,q)$. The second identity will be proven below. The third follows from the fact that $\mu$ and $\beta$ are independent, and the last one because unrestricted partitions and partitions into distinct odd parts have the displayed infinite product generating functions~\cite{TheBlueBook}. Let $\lambda$ be a cylindric partition with profile $c=(1,1)$. Then $\lambda$ has the following form: \begin{align} \nonumber \begin{array}{ccc ccc ccc} & & a_1 & a_2 & a_3 & \ldots &a_{r-1} & a_r \\ & b_1 & b_2 & b_3 & \ldots & b_s \\ \textcolor{lightgray}{a_1} & \textcolor{lightgray}{a_2} & \textcolor{lightgray}{a_3} & \textcolor{lightgray}{\ldots} & \textcolor{lightgray}{a_r} & \end{array}, \end{align} where $r-1 \leq s \leq r+1$. The last line is a repetition of the first one, and the parts are weakly decreasing from left to right and downward. If we allow zeros at the end of partitions, we can take $s = r$. Namely, if $s = r-1$, then we append $b_r = 0$; and if $s = r+1$, then we both append $a_{r+1} = 0$ and update $r+1$ as $r$. So, without loss of generality, our cylindric partition with profile $c=(1,1)$ looks like \begin{align} \nonumber \lambda = \begin{array}{ccc ccc ccc} & & a_1 & a_2 & a_3 & \ldots & a_{r-1} & a_r \\ & b_1 & b_2 & b_3 & \ldots & b_{r-1} & b_r \\ \textcolor{lightgray}{a_1} & \textcolor{lightgray}{a_2} & \textcolor{lightgray}{a_3} & \textcolor{lightgray}{\ldots} & \textcolor{lightgray}{a_{r-1}} & \textcolor{lightgray}{a_r} & \end{array}. \end{align} At this point, only $a_r$ or $b_r$ may be zero, but not both. Therefore either all parts or all parts but one in $\lambda$ are positive. During the process of obtaining $\mu$ and $\beta$, some or all parts of $\lambda$ may become zero. It is possible that $\mu$ is a partition consisting entirely of zeros, i.e., the empty partition. But that does not create a problem because $r$ is determined at the beginning, and it is fixed. Our goal is to transform $\lambda$ into another cylindric partition $\widetilde{\lambda}$ of the same profile \begin{align} \label{cylPtnLambdaTilde} \widetilde{\lambda} = \begin{array}{ccc ccc ccc} & & \widetilde{a}_1 & \widetilde{a}_2 & \widetilde{a}_3 & \ldots & \widetilde{a}_{r-1} & \widetilde{a}_r \\ & \widetilde{b}_1 & \widetilde{b}_2 & \widetilde{b}_3 & \ldots & \widetilde{b}_{r-1} & \widetilde{b}_r \\ \textcolor{lightgray}{\widetilde{a}_1} & \textcolor{lightgray}{\widetilde{a}_2} & \textcolor{lightgray}{\widetilde{a}_3} & \textcolor{lightgray}{\ldots} & \textcolor{lightgray}{\widetilde{a}_{r-1}} & \textcolor{lightgray}{\widetilde{a}_r} & \end{array} \end{align} with the additional property that $\widetilde{b}_j \geq \widetilde{a}_j$ for all $j = 1, 2, \ldots, r$, allowing zeros at the end. Then, parts of $\widetilde{\lambda}$ can be listed as \begin{align} \nonumber \mu = ( \widetilde{b}_1, \widetilde{a}_1, \widetilde{b}_2, \widetilde{a}_2, \ldots, \widetilde{b}_r, \widetilde{a}_r ) \end{align} to obtain the promised unrestricted partition $\mu$. The remaining inequalities $ \widetilde{a}_j \geq \widetilde{b}_{j+1} $ for $j = 1, 2, \ldots, (r-1)$ are ensured by the fact that $\widetilde{\lambda}$ is a cylindric partition with profile $c=(1,1)$. We will do this by a series of transformations on $\lambda$ which will be recorded as a partition $\beta$ into distinct odd parts. We will then argue that $ \vert \lambda \vert = \vert \mu \vert + \vert \beta \vert$. We read the parts of the cylindric partition \begin{align} \nonumber \lambda = \begin{array}{ccc ccc ccc} & & a_1 & a_2 & a_3 & \ldots & a_{r-1} & a_r \\ & b_1 & b_2 & b_3 & \ldots & b_{r-1} & b_r \\ \textcolor{lightgray}{a_1} & \textcolor{lightgray}{a_2} & \textcolor{lightgray}{a_3} & \textcolor{lightgray}{\ldots} & \textcolor{lightgray}{a_{r-1}} & \textcolor{lightgray}{a_r} & \end{array} \end{align} as the pairs: $[b_1,a_1], [b_2,a_2], [b_3,a_3], \ldots, [b_r,a_r]$. We start with the rightmost pair $[b_r,a_r]$. If $b_r \geq a_r$, there's nothing to do. We simply set $\widetilde{b}_r = b_r$, $\widetilde{a}_r = a_r$, and do not add any parts to $\beta$ yet. If $b_r < a_r$, then we \begin{itemize} \item switch places of $a_r$ and $b_r$, \item subtract 1 from each of the parts $a_1$, $a_2$, \ldots, $a_r$, $b_1$, $b_2$, \ldots $b_{r-1}$, \item set $\widetilde{b}_r = a_{r}-1$ and $\widetilde{a}_r = b_r$, \item add the part $(2r-1)$ to $\beta$. \end{itemize} We need to perform several checks here. First, we will show that at each of the steps listed above, the intermediate cylindric partition satisfies the weakly decreasing condition across rows and down columns. The affected parts are highlighted. \begin{align} \nonumber \begin{array}{ccc ccc ccc} & & a_1 & a_2 & a_3 & \ldots & \mathcolorbox{yellow!50}{a_{r-1}} & \mathcolorbox{yellow!50}{a_r} \\ & b_1 & b_2 & b_3 & \ldots & \mathcolorbox{yellow!50}{b_{r-1}} & \mathcolorbox{yellow!50}{b_r} \\ \textcolor{lightgray}{a_1} & \textcolor{lightgray}{a_2} & \textcolor{lightgray}{a_3} & \textcolor{lightgray}{\ldots} & \mathcolorbox{yellow!25}{\textcolor{lightgray}{a_{r-1}}} & \mathcolorbox{yellow!25}{\textcolor{lightgray}{a_r}} & \end{array} \end{align} \begin{align*} \Bigg\downarrow \textrm{ after switching places of } a_r \textrm{ and } b_r \end{align*} \begin{align} \nonumber \begin{array}{ccc ccc ccc} & & a_1 & a_2 & a_3 & \ldots & \mathcolorbox{yellow!50}{a_{r-1}} & \mathcolorbox{yellow!50}{b_r} \\ & b_1 & b_2 & b_3 & \ldots & \mathcolorbox{yellow!50}{b_{r-1}} & \mathcolorbox{yellow!50}{a_r} \\ \textcolor{lightgray}{a_1} & \textcolor{lightgray}{a_2} & \textcolor{lightgray}{a_3} & \textcolor{lightgray}{\ldots} & \mathcolorbox{yellow!25}{\textcolor{lightgray}{a_{r-1}}} & \mathcolorbox{yellow!25}{\textcolor{lightgray}{b_r}} & \end{array} \end{align} The inequalities $a_{r-1} \geq b_r$ and $a_{r-1} \geq a_r$ carry over from the original cylindric partition. The inequalities $b_{r-1} \geq a_r$ and $b_{r-1} \geq b_r$ are also two of the inequalities implied by the original cylindric partition. All other inequalities are untouched. At this point, we have not altered the weight of the cylindric partition yet. \begin{align*} \Bigg\downarrow \textrm{ after subtracting 1 from the listed parts } \end{align*} \begin{align} \nonumber \begin{array}{ccc ccc ccc} & & (a_1 - 1) & (a_2 - 1) & (a_3 - 1) & \ldots & \mathcolorbox{yellow!50}{(a_{r-1} - 1)} & \mathcolorbox{yellow!50}{b_r} \\ & (b_1 - 1) & (b_2 - 1) & (b_3 - 1) & \ldots & \mathcolorbox{yellow!50}{(b_{r-1} - 1)} & \mathcolorbox{yellow!50}{(a_r - 1)} \\ \textcolor{lightgray}{(a_1 - 1)} & \textcolor{lightgray}{(a_2 - 1)} & \textcolor{lightgray}{(a_3 - 1)} & \textcolor{lightgray}{\ldots} & \mathcolorbox{yellow!25}{\textcolor{lightgray}{(a_{r-1} - 1)}} & \mathcolorbox{yellow!25}{\textcolor{lightgray}{b_r}} & \end{array} \end{align} We argue that this is still a valid cylindric partition. The only inequalities that need to be verified are $a_{r-1} - 1 \geq b_r$ and $b_{r-1} - 1 \geq b_r$. Because of the original cylindric partition, we have $a_{r-1} \geq a_r$ and $b_{r-1} \geq a_r$. Because of the case we are examining $a_r > b_r$, so that $a_r - 1 \geq b_r$, both being integers. Combining $a_{r-1} - 1 \geq a_r -1$, $b_{r-1} - 1 \geq a_r - 1$ and $a_r - 1 \geq b_r$ yield the desired inequalities. \begin{align*} \Bigg\downarrow \textrm{ after relabeling } \end{align*} \begin{align} \nonumber \begin{array}{ccc ccc ccc} & & (a_1 - 1) & (a_2 - 1) & (a_3 - 1) & \ldots & {(a_{r-1} - 1)} & {\widetilde{a}_r} \\ & (b_1 - 1) & (b_2 - 1) & (b_3 - 1) & \ldots & {(b_{r-1} - 1)} & {\widetilde{b}_r} \\ \textcolor{lightgray}{(a_1 - 1)} & \textcolor{lightgray}{(a_2 - 1)} & \textcolor{lightgray}{(a_3 - 1)} & \textcolor{lightgray}{\ldots} & \textcolor{lightgray}{(a_{r-1} - 1)} & {\textcolor{lightgray}{\widetilde{a}_r}} & \end{array} \end{align} Now we have ${\widetilde{b}_r} \geq {\widetilde{a}_r}$ since $a_r - 1 \geq b_r$. Also, we subtracted 1 from exactly $2r-1$ parts. We add this $(2r-1)$ as a part in $\beta$. At the beginning, $\beta$ was the empty partition, so it is a partition into distinct odd parts both before and after this transformation. The sum of the weight of $\beta$ and the weight of the cylindric partition remains constant. It is possible that either or both $\widetilde{a}_r$ and $\widetilde{b}_r$ may be zero, along with some other parts. For example, in the extreme case that $a_1 = a_2 = \cdots = a_r = 1$, $b_1 = b_2 = \cdots = b_{r-1} = 1$ and $b_r = 0$, the cylindric partition becomes the empty partition after the transformation we illustrated. We should mention that after this point there is no harm in renaming $(a_i - 1)$'s $a_i$'s and $(b_i - 1)$'s $b_i$'s, where applicable. This will lead to the cleaner exposition down below. There is no loss of information, since the subtracted 1's are recorded as a part in $\beta$ already. Then, we repeat the following process for $j = (r-1), (r-2), \ldots, 2, 1$ in the given order. At the beginning of the $j$th step, we have the intermediate cylindric partition \begin{align} \nonumber \begin{array}{ccc ccc ccc c} & & a_1 & a_2 & \cdots & a_{j-1} & a_j & \widetilde{a}_{j+1} & \cdots & \widetilde{a}_r \\ & b_1 & b_2 & \cdots & b_{j-1} & b_j & \widetilde{b}_{j+1} & \cdots & \widetilde{b}_r & \\ \textcolor{lightgray}{a_1} & \textcolor{lightgray}{a_2} & \textcolor{lightgray}{\cdots} & \textcolor{lightgray}{a_{j-1}} & \textcolor{lightgray}{a_j} & \textcolor{lightgray}{\widetilde{a}_{j+1}} & \textcolor{lightgray}{\cdots} & \textcolor{lightgray}{\widetilde{a}_r} & & \end{array}. \end{align} The parts weakly decrease from left to right and downward, and the third line is a repetition of the first one. This intermediate cylindric partition satisfies the additional inequalities \begin{align} \nonumber \widetilde{b}_{j+1} \geq \widetilde{a}_{j+1}, \quad \widetilde{b}_{j+2} \geq \widetilde{a}_{j+2}, \quad \cdots \quad \widetilde{b}_{r} \geq \widetilde{a}_{r}. \end{align} Some or all parts in this intermediate partition may be zero. We focus on the $j$th pair $[b_j, a_j]$. If $b_j \geq a_j$ already, then we do not alter either the intermediate cylindric partition or the partition $\beta$ into distinct odd parts. We just relabel $b_j$ as $\widetilde{b}_j$, $a_j$ as $\widetilde{a}_j$, and move on to the $(j-1)$th pair. In the other case $a_j > b_j$, we \begin{itemize} \item switch places of $a_j$ and $b_j$, \item subtract 1 from each of the parts $a_1$, $a_2$, \ldots, $a_j$, $b_1$, $b_2$, \ldots $b_{j-1}$, \item set $\widetilde{b}_j = a_{j}-1$ and $\widetilde{a}_j = b_j$, \item add the part $(2j-1)$ to $\beta$. \end{itemize} We again perform several checks as in the $r$th case, but this time there are inequalities that involve parts that lie to the right of $a_j$ and $b_j$. We first show that the listed operations do not violate the weakly decreasing condition on the cylindric partition across rows and down columns. The affected parts are highlighted. We switch the places of $a_j$ and $b_j$ to obtain \begin{align} \nonumber \begin{array}{ccc ccc ccc c} & & a_1 & a_2 & \cdots & \mathcolorbox{yellow!50}{a_{j-1}} & \mathcolorbox{yellow!50}{b_j} & \mathcolorbox{yellow!50}{\widetilde{a}_{j+1}} & \cdots & \widetilde{a}_r \\ & b_1 & b_2 & \cdots & \mathcolorbox{yellow!50}{b_{j-1}} & \mathcolorbox{yellow!50}{a_j} & \mathcolorbox{yellow!50}{\widetilde{b}_{j+1}} & \cdots & \widetilde{b}_r & \\ \textcolor{lightgray}{a_1} & \textcolor{lightgray}{a_2} & \textcolor{lightgray}{\cdots} & \mathcolorbox{yellow!25}{\textcolor{lightgray}{a_{j-1}}} & \mathcolorbox{yellow!25}{\textcolor{lightgray}{b_j}} & \mathcolorbox{yellow!25}{\textcolor{lightgray}{\widetilde{a}_{j+1}}} & \textcolor{lightgray}{\cdots} & \textcolor{lightgray}{\widetilde{a}_r} & & \end{array}. \end{align} Each of the required inequalities \begin{align} \nonumber a_{j-1} \geq b_j \geq \widetilde{a}_{j+1}, \quad b_{j-1} \geq a_j \geq \widetilde{b}_{j+1}, \quad b_{j-1} \geq b_j, \quad a_{j-1} \geq a_j \geq \widetilde{a}_{j+1}, \quad \textrm{ and } \quad b_{j} \geq \widetilde{b}_{j+1} \end{align} are already implied in the cylindric partition before the change. The inequalities between the non-highlighted parts carry over. We then subtract one from each of the listed parts. The inequalities we need to verify are \begin{align} \nonumber a_{j-1} - 1 \geq b_j, \quad a_j - 1 \geq \widetilde{b}_{j+1}, \quad b_{j-1}-1 \geq b_j, \quad \textrm{ and } \quad a_j - 1 \geq \widetilde{a}_{j+1}. \end{align} By the cylindric partition two steps ago, we have \begin{align} \nonumber a_{j-1} \geq a_j, \quad b_j \geq \widetilde{b}_{j+1}, \quad b_{j-1} \geq a_j, \textrm{ and } \quad b_j \geq \widetilde{a}_{j+1}. \end{align} By the hypothesis, $a_j > b_j$, so $a_j-1 \geq b_j$. This last inequality, combined with the last four displayed inequalities yield the inequalities we wanted. Then we relabel $b_j$ as $\widetilde{a}_j$ and $a_j$ as $\widetilde{b}_j$ in their respective new places. We have $\widetilde{b}_j \geq \widetilde{a}_j$, since $a_j-1 \geq b_j$. On the other hand, we subtracted a total of $(2j-1)$ 1's from the parts of the intermediate cylindric partition, and now we add $(2j-1)$ to $\beta$. $\beta$ still has distinct odd parts, because the smallest part we had added to $\beta$ must be $\geq (2j+1)$ in the previous step. It is also possible that $\beta$ was empty before adding $(2j-1)$. We should note that $(2j-1)$ is the smallest part in $\beta$ at the moment. In any case, we have \begin{align} \nonumber \vert \lambda \vert = \vert \beta \vert + \textrm{the weight of the intermediate cylindric partition}. \end{align} $\lambda$ is the original cylindric partition, before any changes. Like after the $r$th step, there is no danger in renaming $(a_i - 1)$'s $a_i$'s and $(b_i - 1)$'s $b_i$'s, where necessary. Once this process is finished, we have the cylindric partition $\widetilde{\lambda}$ as given in \eqref{cylPtnLambdaTilde}, The nonzero parts of $\widetilde{\lambda}$ is listed as parts of the unrestricted partition $\mu$, the alterations in obtaining $\widetilde{\lambda}$ are recorded as parts of the partition $\beta$ into distinct odd parts, and one direction of the proof is over. Next; given $(\mu, \beta)$, where $\mu$ is an unrestricted partition, and $\beta$ is a partition into distinct odd parts, we will produce a unique cylindric partition $\lambda$ with profile $c=(1,1)$ such that \begin{align} \nonumber \vert \lambda \vert = \vert \mu \vert + \vert \beta \vert. \end{align} The parts of $\mu$ in their respective order are relabeled as: \begin{align} \nonumber \mu = & \mu_1 + \mu_2 + \cdots + \mu_l \\ = & \widetilde{b}_1 + \widetilde{a}_1 + \cdots + \widetilde{b}_s + \widetilde{a}_s. \end{align} The relabeling requires an even number of parts, which can be solved by appending a zero at the end of $\mu$ if necessary. Then, the $\widetilde{b}$'s and $\widetilde{a}$'s are arranged as the cylindric partition \begin{align} \nonumber \widetilde{\lambda} = \begin{array}{ccc ccc} & & \widetilde{a}_1 & \widetilde{a}_2 & \cdots & \widetilde{a}_s \\ & \widetilde{b}_1 & \widetilde{b}_2 & \cdots & \widetilde{b}_s & \\ \textcolor{lightgray}{\widetilde{a}_1} & \textcolor{lightgray}{\widetilde{a}_2} & \textcolor{lightgray}{\cdots} & \textcolor{lightgray}{\widetilde{a}_s} & & \end{array}. \end{align} All of the required inequalities $\widetilde{a}_j \geq \widetilde{a}_{j+1}$, $\widetilde{b}_j \geq \widetilde{b}_{j+1}$, $\widetilde{a}_j \geq \widetilde{b}_{j+1}$, and $\widetilde{b}_j \geq \widetilde{a}_{j+1}$ for $j = 1, 2, \ldots, s-1$ are implied by the inequalities between parts of $\mu$. $\widetilde{\lambda}$ has the additional property that $\widetilde{b}_j \geq \widetilde{a}_j$ for $j = 1, 2, \ldots, s$. This is the $\widetilde{\lambda}$ we obtained in the first half of the proof, except for the possibly different number of zeros at the end(s). The positive parts and their positions are the same. For the smallest part $(2j-1)$ in $\beta$, we do the following. \begin{itemize} \item delete the part from $\beta$, \item add 1 to all parts $a_1, \ldots, \widetilde{a}_{j-1}, b_1, \ldots, \widetilde{b}_j$, \item switch places of $\widetilde{a}_j$ and $(\widetilde{b}_j + 1)$, \item rename $(a_1 + 1)$, \ldots, $(\widetilde{a}_{j-1} + 1)$, $(\widetilde{b}_j + 1)$, $(b_1 + 1)$, \ldots, $(\widetilde{b}_{j-1} + 1)$, $\widetilde{a_j}$, in their respective order as $a_1$, \ldots, $a_{j-1}$, $a_j$, $b_1$, \ldots, $b_{j-1}$, $b_j$. \end{itemize} We repeat this procedure until $\beta$ becomes the empty partition, at which time $\widetilde{\lambda}$ has evolved into $\lambda$, the cylindric partition with profile $c=(1,1)$ we have been aiming at. There are a few details to clarify, including the notation. We start by verifying that the inequalities required by the cylindric partition are satisfied at each step. The affected parts are highlighted. We start with the cylindric partition just before the transformations. We add one to each of the listed parts. The required inequalities are naturally satisfied here, because the parts which are supposed to be weakly greater are increased. Then, we switch places of $\widetilde{a}_j$ and $\widetilde{b}_j + 1$. \begin{align} \nonumber \begin{array}{ccc ccc ccc} & & (a_1 + 1) & \cdots & \mathcolorbox{yellow!50}{ (\widetilde{a}_{j-1} + 1) } & \mathcolorbox{yellow!50}{ (\widetilde{b}_j + 1) } & \mathcolorbox{yellow!50}{\widetilde{a}_{j+1}} & \ldots & \widetilde{a}_s \\ & (b_1 + 1) & \cdots & \mathcolorbox{yellow!50}{ (\widetilde{b}_{j-1} + 1) } & \mathcolorbox{yellow!50}{ \widetilde{a}_j } & \mathcolorbox{yellow!50}{\widetilde{b}_{j+1}} & \ldots & \widetilde{b}_s & \\ \textcolor{lightgray}{ (a_1 + 1) } & \textcolor{lightgray}{\cdots} & \mathcolorbox{yellow!25}{ \textcolor{lightgray}{ (\widetilde{a}_{j-1} + 1) } } & \mathcolorbox{yellow!25}{ \textcolor{lightgray}{ (\widetilde{b}_j + 1) } } & \mathcolorbox{yellow!25}{ \textcolor{lightgray}{\widetilde{a}_{j+1}} } & \ldots & \widetilde{a}_s & & \end{array} \end{align} Again, the required inequalities are implied by the cylindric partition in the previous step. At the beginning of the first run, we do not have $a_1$ or $b_1$ in the cylindric partition, but rather $\widetilde{a}_1$ or $\widetilde{b}_1$, respectively. However, at the end of each run, the leftmost so many parts in the first and the second rows of the cylindric partition are labeled $a_1$, $b_1$, etc. Because we deleted $(2j-1)$ from $\beta$, and we added 1 to exactly $(2j-1)$ of the parts in the intermediate cylindric partition, the sum of weights of $\beta$ and of the intermediate cylindric partition remains constant. It equals the sum of weights of $\mu$ and the original $\beta$. The relabeling of $(a_1 + 1)$ as $a_1$ etc. does not interfere with any of the operations before it, and certainly not any of the possible operations that some after it; therefore, it should not cause any confusion. We tacitly assumed that $j < s$ in the displayed cylindric partition above. This does not have to be the case, as $\beta$ may have a part greater than the length of $\mu$. The remedy is to append zeros, and increase $s$ as much as necessary. This takes care of the extreme case of $\widetilde{\lambda}$ being the empty partition. All of the arguments above apply for non-negative parts as well as strictly positive parts. We also implicitly assumed that $\beta$ is nonempty to start with. If $\beta$ is the empty partition, we do not need the perform any operations $\widetilde{\lambda}$ at all. We simply call $\widetilde{a}_j$'s $a_j$'s, and $\widetilde{b}_j$'s $b_j$'s. Once all parts of $\beta$ are exhausted, we clear the trailing pairs of zeros in the cylindric partition at hand, and we declare the obtained cylindric partition $\lambda$. Because the sum of the weights of $\beta$ and of the intermediate cylindric partition remained constant at each step of the transformation and $\beta$ is the empty partition at the end, we have \begin{align} \nonumber \vert \lambda \vert = \vert \mu \vert + \vert \textrm{(the original)} \beta \vert. \end{align} Except for the relabelings, the adding or subtracting 1's, and adding or deleting parts of $\beta$ are done in exact reverse order, and they are clearly inverse operations of each other, the process is reversible, and the collection of profile $c=(1,1)$ cylindric partitions $\lambda$'s are in one-to-one correspondence with the pairs $(\mu, \beta)$ of an unrestricted partition and a partition into distinct odd parts. The relabelings in the two phases of the proof are consistent at the beginning and at the end of the transformation, and between the rounds of operations. This concludes the proof. \end{proof} The following example demonstrates how we construct the pair of partitions $(\mu, \beta)$ if we are given a cylindric partition $\lambda$ with profile $c=(1,1)$. \begin{ex} \normalfont Let $\lambda$ be the following cylindric partition with profile $c=(1,1)$: \[ \begin{array}{ccc ccc ccc} & & & 7 & 4 & 4 & 3 & \\ & & 6 & 5 & 4 & \end{array} \] We read the parts of $\lambda$ as pairs: $[6,7], [5,4], [4,4]$ and $[0,3]$. \[ \begin{array}{ccc ccc ccc} & & & 7 & 4 & 4 & 3 & \\ & & 6 & 5 & 4 & 0 \end{array} \] We now start to perform the moves defined in the proof of Theorem \ref{Fc(1,q) when c=(1,1)}. We first change the places of $0$ and $3$ in the rightmost pair and we get the following intermediate partition: \[ \begin{array}{ccc ccc ccc} & & & 7 & 4 & 4 & 0 & \\ & & 6 & 5 & 4 & \circled{3} \end{array} \] \begin{center} $\Big\downarrow \scriptsize\parbox{7cm}{subtract $1$ from circled 3 and the parts take place above and on the left of it}$ \end{center} \[ \begin{array}{ccc ccc ccc} & & & 6 & 3 & 3 & 0 \\ & & 5 & 4 & 3 & 2 \end{array} \] We changed the total weight by $7$, so we have $\beta_1=7$. We do not touch to the pairs $[3,3]$ and $[4,3]$ since $3 \geq 3$ and $4 \geq 3$. We now correct the places of $6$ and $5$, then we perform the last possible move: \[ \begin{array}{ccc ccc ccc} & & & 5 & 3 & 3 & 0 \\ & & \circled{6} & 4 & 3 & 2 \end{array} \] \hspace{70mm} $\Big\downarrow \scriptsize\parbox{7cm}{subtract $1$ from circled 6 }$ \[ \begin{array}{ccc ccc ccc} & & & 5 & 3 & 3 & 0 \\ & & 5 & 4 & 3 & 2 \end{array} \] We changed the total weight by $1$, so we have $\beta_2=1$. Therefore, we decomposed $\lambda$ into the pair of partitions $(\mu, \beta)$, where $\beta=7+1$ and $\mu=5+5+4+3+3+3+2$. \end{ex} The following example demonstrates how we construct a unique cylindric partition $\lambda$ with profile $c=(1,1)$, if we are given a pair of partitions $(\mu,\beta)$ which is described as in the proof of Theorem \ref{Fc(1,q) when c=(1,1)}. \begin{ex}\normalfont Let $\mu=6+5+5+3+1$ and $\beta=9+7+3$. We read the parts of $\mu$ as follows: \[ \begin{array}{ccc ccc ccc} & & & 5 & 3 & 0 & 0 & 0 & \\ & & 6 & 5 & 1 & 0 & 0 & \end{array} \] The first part of $\beta$ is $9$. Since we want to increase the weight by $9$, we add $0$'s as many as we need when we construct the pairs. \[ \begin{array}{ccc ccc ccc} & & & 5 & 3 & 0 & 0 & 0 & \\ & & 6 & 5 & 1 & 0 & \circled{0} & \end{array} \] \hspace{70mm} $\Big\downarrow \scriptsize\parbox{7cm}{increase by $1$ circled 0 and all corresponding parts }$ \[ \begin{array}{ccc ccc ccc} & & & 6 & 4 & 1 & 1 & 0 & \\ & & 7 & 6 & 2 & 1& 1 & \end{array} \] \hspace{70mm} $\Big\downarrow \scriptsize\parbox{7cm}{correct the places of parts in the last pair }$ \[ \begin{array}{ccc ccc ccc} & & & 6 & 4 & 1 & 1 & 1 & \\ & & 7 & 6 & 2 & \circled{1}& 0 & \end{array} \] \hspace{70mm} $\Big\downarrow \scriptsize\parbox{7cm}{increase by $1$ circled 0 and all corresponding parts }$ \[ \begin{array}{ccc ccc ccc} & & & 7 & 5 & 2 & 1 & 1 & \\ & & 8 & 7 & 3 & 2& 0 & \end{array} \] \hspace{70mm} $\Big\downarrow \scriptsize\parbox{7cm}{correct the places of parts in the second pair from the right }$ \[ \begin{array}{ccc ccc ccc} & & & 7 & 5 & 2 & 2 & 1 & \\ & & 8 & \circled{7} & 3 & 1& 0 & \end{array} \] \hspace{70mm} $\Big\downarrow \scriptsize\parbox{7cm}{increase by $1$ circled 7 and all corresponding parts }$ \[ \begin{array}{ccc ccc ccc} & & & 8 & 5 & 2 & 2 & 1 & \\ & &9 & 8 & 3 & 1& 0 & \end{array} \] \hspace{70mm} $\Big\downarrow \scriptsize\parbox{7cm}{correct the places of parts in the fourth pair from the right }$ \[ \begin{array}{ccc ccc ccc} \lambda= & & & 8 & 8 & 2 & 2 & 1 & \\ & &9 & 5 & 3 & 1& 0 & \end{array} \] $\lambda$ is the unique cylindric partition with profile $c=(1,1)$ corresponding to the pair of partitions $(\mu,\beta)$. \end{ex} \begin{theorem} Let $c=(1,1)$. Then the generating function of cylindric partitions with profile $c$ is given by \begin{equation*} F_c(z,q) = \frac{(-zq;q^2)_\infty}{(zq;q)_\infty}. \end{equation*} where the exponent of variable $z$ keeps track of the largest part of the cylindric partitions. \begin{proof} In the proof of Theorem \ref{Fc(1,q) when c=(1,1)}, we show that there is a one-to-one correspondence between the cylindric partitions with profile $c=(1,1)$ and the pairs of partitions $(\mu,\beta)$ such that $\mu$ is an ordinary partition and $\beta$ is a partition into distinct odd parts. For the proof, we will use this correspondence. If we take a pair of partitions $(\mu,\beta)$, then during the construction of $\lambda$, each part of $\beta$ increases the largest part of $\mu$ by $1$. Hence, when the whole procedure is done, the largest part of $\mu$ is increased by the number of parts in $\beta$. Because of that fact, we write the generating function of $\beta$ by keeping track of the number of parts, which gives $(-zq;q^2)_\infty$. The partition $\mu$ is an ordinary partition and the generating function of ordinary partitions such that the largest part is $M$ is given by \begin{align*} \frac{q^M}{(1-q)\ldots(1-q^M)}. \end{align*} If we take sum over all $M$ by keeping track of the largest parts with the exponent of $z$, we get \begin{align*} \sum_{M\geq0}\frac{z^Mq^M}{(1-q)\ldots(1-q^M)}=\sum_{M\geq0}\frac{(zq)^M}{(q;q)_M}=\frac{1}{(zq;q)_\infty}. \end{align*} The second identity follows from Euler's identity~\cite{TheBlueBook}. There is a one-to-one correspondence between the partitions with exactly $k$ parts and the partitions with largest part equals to $k$ via conjugation \cite{TheBlueBook}. Thus, the latter generating function can also be considered as the generating function of ordinary partitions, where the exponent of $z$ keeps track of the number of parts. Finally, since $\mu$ and $\beta$ are two independent partitions, we get the desired generating function. \end{proof} \end{theorem} \begin{theorem} \label{Fc(1,q) when c=(2,0)} Let $c=(2,0)$. Then the generating function of cylindric partitions with profile $c$ is given by \begin{equation*} F_c(1,q) = \frac{(-q^2;q^2)_\infty}{(q; q)_\infty}. \end{equation*} \end{theorem} \begin{proof} The proof is very similar to the proof of Theorem \ref{Fc(1,q) when c=(1,1)}. We read the parts of the cylindric partition \begin{align} \nonumber \lambda = \begin{array}{ccc ccc ccc} & & a_0 & a_1 & a_2 & \ldots & a_{r-1} & a_r \\ & &b_1 & b_2 & \ldots & b_{r-1} & b_{r} \\ \textcolor{lightgray}{a_0} & \textcolor{lightgray}{a_1} & \textcolor{lightgray}{a_2} & \textcolor{lightgray}{a_3} & \textcolor{lightgray}{\ldots} & \textcolor{lightgray}{a_r} & \end{array}. \end{align} as the pairs: $[b_1,a_1], [b_2,a_2], [b_3,a_3], \ldots, [b_r,a_r]$. We note that the largest part of the cylindric partition $\lambda$, namely, $a_0$ is not contained in any pairs. We consider it as a single part. $a_0$ is not switched with any part, but it is increased or decreased accordingly when we construct or incorporate $\beta$ as in the proof of Theorem \ref{Fc(1,q) when c=(1,1)}. Thus, $\beta$ consists of distinct even parts, as opposed to distinct odd parts. \end{proof} If we construct the generating function of cylindric partitions with profile $c=(2,0)$ by using \eqref{BorodinProd}, we get \begin{equation*} F_c(1,q) = \frac{1}{(q;q)_\infty(q^2;q^4)_\infty}. \end{equation*} If we compare that generating function with the generating function in Theorem \ref{Fc(1,q) when c=(2,0)}, we see that they are equal. Both generating functions have the factor $(q;q)_\infty$ in the denominators. If we cancel that factor, we should check whether \begin{equation} \label{Borodin-check} \frac{1}{(q^2;q^4)_\infty}=(-q^2;q^2)_\infty \end{equation} or not. This identity holds due to the beautiful identity of Euler which states that the number of partitions of a non-negative integer $n$ into odd parts is equal to the number of partitions of $n$ into distinct parts. To obtain \eqref{Borodin-check}, we make the substitution $q^2 \rightarrow q$ in Euler's identity. \section{Cylindric partitions into distinct parts} If all parts in a cylindric partition with profile $c=(1,1)$ are distinct, then the inequalities between parts are strict. Given such a partition, if we label the parts in the top row as $a_1, a_2, \ldots$, and the parts in the bottom row as $b_1, b_2, \ldots$, \begin{align} \label{ptnC11CylDistPartGeneric} \begin{array}{ccc ccc ccc} & & a_1 & a_2 & \cdots & a_{r-1} & a_r & \cdots & a_n \\ & b_1 & b_2 & \cdots & b_{r-1} & b_r & \cdots & b_n & \\ \textcolor{lightgray}{a_1} & \textcolor{lightgray}{a_2} & \textcolor{lightgray}\cdots & \textcolor{lightgray}{a_{r-1}} & \textcolor{lightgray}{a_r} & \textcolor{lightgray}\cdots & \textcolor{lightgray}{a_n} & & \end{array}, \end{align} we have the inequalities \begin{align} \nonumber a_r > a_{r+1}, \qquad b_r > b_{r+1}, \qquad a_r > b_{r+1}, \qquad \textrm{ and } \qquad b_r > a_{r+1} \end{align} for $r = 1, 2, \ldots, n-1$. In particular, \begin{align} \label{ineqDistC11} \mathrm{min}\{ a_r, b_r \} > \mathrm{max}\{ a_{r+1}, b_{r+1} \} \end{align} for $r = 1, 2, \ldots, n-1$. As in the proof of Theorem \ref{Fc(1,q) when c=(1,1)}, we lose no generality by assuming that the top row and the bottom row have equal number of parts. We achieve this by allowing one of $a_n$ or $b_n$ to be zero. The inequality \eqref{ineqDistC11} ensures that we can switch the places of $a_r$ and $b_r$ without violating the condition for cylindric partition with profile $c=(1, 1)$ for $r = 1, 2, \ldots, n$. There are $2^n$ ways to do this. Therefore, given a cylindric partition into $2n$ distinct parts with profile $c=(1, 1)$, we can switch places of $a_r$ and $b_r$ to make $b_r > a_r$ for $r = 1, 2, \ldots, n$ so that \begin{align} \label{ineqDistC11aug} b_1 > a_1 > b_2 > a_2 > \cdots > b_n > a_n, \end{align} where $a_n$ is possibly zero. In other words, we obtain a partition into $2n$ distinct parts in which the smallest part is allowed to be zero. Conversely, if we start with a partition into $2n$ distinct parts in which the smallest part can be zero, we can label the parts as in \eqref{ineqDistC11aug}, then place them as in \eqref{ptnC11CylDistPartGeneric}, and allow switching places of $a_r$ and $b_r$ for $r = 1, 2, \ldots, n$; then we will have generated a cylindric partition into $2n$ distinct parts with profile $c=(1,1)$, where one of the parts is allowed to be zero. It is clear that any such cylindric partition corresponds to a unique partition into an even number of distinct parts, and any partition into $2n$ distinct parts gives rise to $2^n$ cylindric partitions. We have almost proved the following lemma. \begin{lemma} \label{lemmaGenFuncCylPtnC11Dist} Let $d_{(1,1)}(m, n)$ denote the number of cylindric partitions of $n$ into distinct parts with profile $c = (1, 1)$ and the largest part equal to $m$. Then, \begin{align} \nonumber D_{(1,1)}(t, q) = \sum_{n, m \geq 0} d_{(1,1)}(m, n) t^m q^n = \sum_{n \geq 0} \frac{ q^{\binom{2n}{2}} t^{2n-1} 2^n }{ (tq; q)_{2n} }. \end{align} \end{lemma} \begin{proof} We build the proof on the discussion preceding the statement of the lemma. A partition into $2n$ distinct parts is generated by \begin{align} \nonumber \frac{q^{\binom{2n}{2}}}{ (q; q)_{2n} }, \end{align} where the smallest part is allowed to be zero. When we want to keep track of the largest part, we start by the minimal partition into $2n$ distinct parts \begin{align} \nonumber (2n-1), (2n-2), \ldots, 1, 0, \end{align} hence the $t^{2n-1}$ in the numerator in the rightmost sum in the Lemma. Then, for $j = 1, 2, \ldots, 2n$, the factor $(1 - tq^j)$ in the denominator contributes to the $j$ largest parts in the partition into distinct parts. \end{proof} A similar discussion ensues for cylindric partitions into distinct parts with profile $c=(2, 0)$. The generic cylindric partition is \begin{align} \label{ptnC20CylDistPartGeneric} \begin{array}{ccc ccc ccc c} & & a_0 & a_1 & a_2 & \cdots & a_{r-1} & a_r & \cdots & a_n \\ & & b_1 & b_2 & \cdots & b_{r-1} & b_r & \cdots & b_n & \\ \textcolor{lightgray}{a_0} & \textcolor{lightgray}{a_1} & \textcolor{lightgray}{a_2} & \textcolor{lightgray}\cdots & \textcolor{lightgray}{a_{r-1}} & \textcolor{lightgray}{a_r} & \textcolor{lightgray}\cdots & \textcolor{lightgray}{a_n} & & \end{array}, \end{align} where the top row contains $n+1$ parts, the bottom row contains $n$ parts, and one of $a_n$ or $b_n$ is allowed to be zero. We still have the inequalities \eqref{ineqDistC11}. In addition, $a_0$ is the absolute largest part. Needless to say that $a_0$ is zero if and only if we have the empty cylindric partition. $a_r$ and $b_r$ can switch places for $r = 1, 2, \ldots, n$ to obtain the augmented chain of inequalities \begin{align} \nonumber a_0 > b_1 > a_1 > b_2 > a_2 > \cdots > b_n > a_n, \end{align} to get a partition into $(2n+1)$ distinct parts, where the smallest part is allowed to be zero. Conversely, any partition into $(2n+1)$ distinct parts in which the smallest part is allowed to be zero gives rise to exactly $2^n$ cylindric partitions into distinct parts with profile $c = (2, 0)$. We have again almost proved the following lemma. \begin{lemma} \label{lemmaGenFuncCylPtnC20Dist} Let $d_{(2,0)}(m, n)$ denote the number of cylindric partitions of $n$ into distinct parts with profile $c = (2, 0)$ and the largest part equal to $m$. Then, \begin{align} \nonumber D_{(2,0)}(t, q) = \sum_{n, m \geq 0} d_{(2,0)}(m, n) t^m q^n = \sum_{n \geq 0} \frac{ q^{\binom{2n+1}{2}} t^{2n} 2^n }{ (tq; q)_{2n+1} }. \end{align} \end{lemma} The proof is almost the same as that of Lemma \ref{lemmaGenFuncCylPtnC11Dist}, and is skipped. Next, we dissect one of Euler's $q$-series identities~\cite{TheBlueBook} \begin{align} \nonumber \sum_{n \geq 0} \frac{ q^{\binom{n}{2}} a^n }{ (q; q)_n } = (-a; q)_\infty \end{align} to separate odd and even powers of $a$. \begin{align} \nonumber \sum_{n \geq 0} \frac{ q^{\binom{2n}{2}} (a^2)^n }{ (q; q)_{2n} } = \frac{ (-a; q)_\infty + (a; q)_\infty }{2}, \qquad a \sum_{n \geq 0} \frac{ q^{\binom{2n+1}{2}} (a^2)^n }{ (q; q)_{2n+1} } = \frac{ (-a; q)_\infty - (a; q)_\infty }{2}. \end{align} If we plug in $t = 1$ in Lemmas \ref{lemmaGenFuncCylPtnC11Dist} and \ref{lemmaGenFuncCylPtnC20Dist}, and $a = \sqrt{2}$ in the above formulas, we obtain the following theorem. We repeat the descriptions of the partition enumerants for ease of reference. \begin{theorem} \label{thmGenFuncCylPtnDistC11C20} Let $d_{(1,1)}(m, n)$ and $d_{(2, 0)}(m, n)$ be the number of cylindric partitions of $n$ into distinct parts where the largest part is $m$ with profiles $c=(1,1)$ and $c=(2,0)$, respectively. Let \begin{align} \nonumber D_{(1, 1)}(t, q) = \sum_{m, n \geq 0} d_{(1,1)}(m, n) t^m q^n, \quad \textrm{ and } \quad D_{(2, 0)}(t, q) = \sum_{m, n \geq 0} d_{(2,0)}(m, n) t^m q^n \end{align} be the respective generating functions. Then, \begin{align} \nonumber D_{(1,1)}(1, q) & = \frac{ (-\sqrt{2}; q)_\infty + ( \sqrt{2}; q)_\infty }{ 2 }, \\ \nonumber D_{(2,0)}(1, q) & = \frac{ (-\sqrt{2}; q)_\infty - ( \sqrt{2}; q)_\infty }{ 2 \sqrt{2} }, \\ \nonumber D_{(1,1)}(1, q) + \sqrt{2} \; D_{(2,0)}(1, q) & = (-\sqrt{2}; q)_\infty. \end{align} \end{theorem} \section{Discussion} In Theorem \ref{Fc(1,q) when c=(1,1)}, to construct the desired generating function for cylindric partitions with profile $c=(1,1)$, we decompose each cylindric partition $\lambda$ with profile $c=(1,1)$ into a pair of partitions $(\mu,\beta)$, where $\mu$ is an ordinary partition and $\beta$ is a partition with distinct odd parts. Conversely, if a pair $(\mu,\beta)$ is given, then we find a unique cylindric partition $\lambda$ with profile $c=(1,1)$. In that way, we find a one-to-one correspondence between $\lambda$'s and $(\mu,\beta)$'s. Here, the partitions $\mu$ and $\beta$ are two independent partitions. In the following theorem, we consider the pair of partitions $(\mu,\beta)$ such that $\mu$ and $\beta$ are dependent partitions and we construct a family of cylindric partitions with profile $c=(1,1)$ corresponding to the pairs $(\mu,\beta)$. \begin{theorem} Let $O_c(1,q)$ be the generating function of cylindric partitions with profile $c=(1,1)$ such that all parts are odd. Then, \begin{align} \label{O_c(1,q)} O_c(1,q)=&\sum_{k \geq 0}\frac{q^{2k}}{(q^2;q^2)_{2k}}.(-q^2;q^4)_{k}+\sum_{k \geq 0}\frac{q^{2k+1}}{(q^2;q^2)_{2k+1}}.(-q^2;q^4)_{k+1} \\ =&\sum_{k \geq 0}\frac{q^{2k}(-q^2;q^4)_{k}(1+q-q^{4k+2}+q^{4k+3})}{(q^2;q^2)_{2k+1}} \nonumber. \end{align} \end{theorem} \begin{proof} We will construct a one-to-one correspondence between the cylindric partitions with profile $c=(1,1)$ such that all parts are odd and the pairs of partitions $(\mu,\beta)$, where $\mu$ is a partition into odd parts, $\beta$ is a partition into distinct odd parts such that each part is counted twice. Moreover, $\mu$ and $\beta$ are dependent on each other with respect to the number of parts in $\mu$ and the largest odd part in $\beta$ as follows: \begin{enumerate}[(a)] \item if $\mu$ has $2k$ parts, the largest odd part in $\beta$ is $2k-1$, \item if $\mu$ has $2k+1$ parts, the largest odd part in $\beta$ is $2k+1$. \end{enumerate} By using exactly the same construction in the proof of Theorem \ref{Fc(1,q) when c=(1,1)}, whenever a pair of partitions $(\mu,\beta)$ as in case $(a)$ or $(b)$, we may construct a unique cylindric partition $\lambda$ with profile $c=(1,1)$ such that all parts are odd. The only change in the construction is that we increase/decrease the weight of each part in the intermediate cylindric partition by $2$ instead of $1$. It is clear that the parts of the cylindric partition have to be odd, since all parts of the partition $\mu$ are odd and the parts of $\beta$ are counted twice, i.e., we do not change the parity of the parts during the transformations. Conversely, if we are given a cylindric partition $\lambda$ with profile $c=(1,1)$ such that all parts are odd, then we may find a unique pair of partitions $(\mu,\beta)$ just described as above. The first term in the sum in \eqref{O_c(1,q)} is the generating function of pairs $(\mu,\beta)$ having the property $(a)$ and the latter term in the summation is the generating function of pairs $(\mu,\beta)$ having the property $(b)$. \end{proof} A natural question is to ask if similar constructions to the proof of Theorems \ref{Fc(1,q) when c=(1,1)} and \ref{Fc(1,q) when c=(2,0)} could be done for cylindric partitions with larger profiles. Another natural question is to ask if similar infinite product generating functions to Theorem \ref{thmGenFuncCylPtnDistC11C20} could be discovered for cylindric partitions with larger profiles into distinct parts. \section*{Acknowledgements} The authors are indebted to the anonymous referee for careful scrutinization of the paper, for helpful suggestions to improve the exposition, and for pointing out~\cite{BU}. The authors also thank Ole Warnaar for notifying them of the connections between Section \ref{secGenFuncsUnrestricted} and~\cite{Warnaar}. \bibliographystyle{amsplain} \begin{thebibliography}{10} \bibitem{Andrews-PNAS} George E. Andrews, An analytic generalization of the Rogers-Ramanujan identities for odd moduli, \emph{Proceedings of the National Academy of Sciences}, {\bf 71} (10):4082--4085, 1974. \url{https://doi.org/10.1073/pnas.71.10.4082} \bibitem{TheBlueBook} George E. Andrews, \emph{The theory of partitions}, No. 2. Cambridge university press, 1998. \bibitem {Borodin} Alexei Borodin, Periodic Schur process and cylindric partitions, \emph{Duke Math. J.}, {\bf 140} (3):391--468, 2007. \url{https://doi.org/10.1215/S0012-7094-07-14031-6} \bibitem{BU} Walter Bridges and Ali Kemal Uncu, Weighted cylindric partitions, \emph{Journal of Algebraic Combinatorics}, {\bf 56}:1309--1337, 2022. \url{https://doi.org/10.1007/s10801-022-01156-9} \bibitem{CDU} Sylvie Corteel, Jehanne Dousse and Ali Kemal Uncu, Cylindric partitions and some new Rogers–Ramanujan identities, \emph{Proc. Amer. Math. Soc.}, {\bf 150} (2022):481--497, 2021. \url{https://doi.org/10.1090/proc/15570} \bibitem{CW} Sylvie Corteel and Trevor Welsh, The $A_2$ Rogers–Ramanujan Identities Revisited, \emph{Ann. Comb.}, {\bf 23}:683--694, 2019. \url{https://doi.org/10.1007/s00026-019-00446-7} \bibitem{OW} Omar Foda and Trevor A. Welsh, Cylindric partitions, ${{\boldsymbol{ \mathcal W }}}_{r}$ characters and the Andrews–Gordon–Bressoud identities, \emph{Journal of Physics A: Mathematical and Theoretical}, {\bf 49} (16):164004, 2016. \url{https://doi.org/10.1088/1751-8113/49/16/164004} \bibitem {GesselKrattenthaler} Ira M. Gessel and C. Krattenthaler, Cylindric partitions, \emph{Trans. Amer. Math. Soc.}, {\bf 349} (2):429--479, 1997. \url{http://dx.doi.org/10.1090/S0002-9947-97-01791-1} \bibitem{IKS} Amer Iqbal, Can Kozçaz and Khurram Shabbir, Refined topological vertex, cylindric partitions and U(1) adjoint theory, \emph{Nuclear Physics B}, {\bf 838} (3):422--457, 2010. \url{https://doi.org/10.1016/j.nuclphysb.2010.06.010} \bibitem{Warnaar} S. Ole Warnaar, The $A_2$ Andrews-Gordon identities and cylindric partitions, {\tt arXiv:2111.07550 [math.CO]}. \end{thebibliography} \end{document}
2205.13406v1
http://arxiv.org/abs/2205.13406v1
Differentially Private Formation Control: Privacy and Network Co-Design
\documentclass[10pt,draftcls]{IEEEtran} \usepackage[utf8]{inputenc} \usepackage{amsmath,amsfonts} \usepackage{amssymb} \usepackage{bbold} \usepackage{graphicx} \usepackage{physics} \usepackage{caption} \usepackage{subcaption} \usepackage{enumerate} \usepackage{tikz} \usepackage{diagbox} \usepackage{cite} \usetikzlibrary{calc} \usetikzlibrary{matrix} \usetikzlibrary{shapes} \usetikzlibrary{positioning} \usetikzlibrary{backgrounds} \usetikzlibrary{arrows} \tikzset{main node/.style={circle,fill=blue!20,draw,minimum size=1cm,inner sep=0pt},} \newcommand{\fatone}{\mathbb{1}} \newcommand{\mhmargin}[1]{\marginpar{\textcolor{red}{\tiny MH: #1}}} \newcommand{\red}[1]{\textcolor{red}{#1}} \newcommand{\blue}[1]{\textcolor{blue}{#1}} \newcommand{\cove}{\Sigma_{e}} \ignorespaces} \newtheorem{corollary}{Corollary} \newtheorem{remark}{Remark} \newtheorem{lemma}{Lemma} \newtheorem{theorem}{Theorem} \newtheorem{assumption}{Assumption} \newtheorem{definition}{Definition} \newtheorem{problem}{Problem} \newcommand{\mh}[1]{\marginpar{\tiny \red{#1}}} \usepackage{mathtools} \mathtoolsset{showonlyrefs=true} \title{Differentially Private Formation Control: Privacy and Network Co-Design} \author{Calvin Hawkins and Matthew Hale$^*$\thanks{ $^*$The authors are with the Department of Mechanical and Aerospace Engineering, Herbert Wertheim College of Engineering, University of Florida. Emails: \texttt{\{calvin.hawkins,matthewhale\}@ufl.edu}. This work was supported by AFOSR under grant~FA9550-19-1-0169, by ONR under grant~N00014-21-1-2502, and by NSF CAREER grant~1943275. } } \date{January 2021} \begin{document} \maketitle \begin{abstract} As multi-agent systems proliferate, there is increasing need for coordination protocols that protect agents’ sensitive information while still allowing them to collaborate. Often, a network system and controller are first designed and implemented, and then privacy is only incorporated after that. However, the absence of privacy from the design process can make it difficult to implement without significantly harming system performance. To address this need, this paper presents a co-design framework for multi-agent networks and private controllers that we apply to the problem of private formation control. Agents’ state trajectories are protected using differential privacy, which is a statistical notion of privacy that protects data by adding noise to it. Privacy noise alters the performance of the network, which we quantify by computing a bound on the steady-state mean square error for private formations. Then, we analyze trade-offs between privacy level, system performance, and connectedness of the network’s communication topology. These trade-offs are used to formulate a co-design optimization framework to design the optimal communication topology and privacy parameters for a network running private formation control. Simulation results illustrate the scalability of our proposed privacy/network co-design problem, as well as the high quality of formations one can attain, even with privacy implemented. \end{abstract} \section{Introduction} \label{sec:intro} Multi-agent systems, such as robotic swarms and social networks, require agents to share information to collaborate. In some cases, the information shared between agents may be sensitive. For example, self-driving cars may share location data to be routed to a destination. Geo-location data and other data streams can be quite revealing about users and sensitive data should be protected, though this data must still be useful for multi-agent coordination. Thus, privacy in multi-agent control must simultaneously protect agents’ sensitive data while guaranteeing that privatized data enables the network to achieve a common task. This type of privacy has recently been achieved using differential privacy. Differential privacy comes from the computer science literature, where it was originally used to protect sensitive data when databases are queried \cite{dwork2014algorithmic,dwork2006calibrating}. Differential privacy is appealing because it is immune to post-processing and robust to side information \cite{dwork2014algorithmic}. These properties mean that privacy guarantees are not compromised by performing operations on differentially private data, and that they are not weakened by much by an adversary with additional information about data-producing agents~\cite{kasiviswanathan2014semantics}. Recently, differential privacy has been applied to dynamic systems~\cite{le2013differentially,yazdani2018differentially,hale2017cloud,le2017differentially,jones2019towards,mitraC,geirEnt, xu2020differentially, wang2016differentially}. One form of differential privacy in dynamic systems protects sensitive trajectory-valued data, and this is the notion of differential privacy used in this paper. Privacy of this form ensures that an adversary is unlikely to learn much about the state trajectory of a system by observing its outputs. In multi-agent control, this lets an agent share its outputs with other agents while protecting its state trajectory from those agents and eavesdroppers~\cite{le2013differentially,yazdani2018differentially,hale2017cloud,le2017differentially}. In this paper, we develop a framework for private multi-agent formation control using differential privacy. Formation control is a well-studied network control problem that can represent, e.g., robots physically assembling into geometric shapes or non-physical agents maintaining relative state offsets. For differential privacy, agents add privacy noise to their states before sharing them with other agents. The other agents use privatized states in their update laws, and then this process repeats at every time step. This paper focuses on private formation control, though the methods presented can be used (with only minor modifications) to design and analyze private consensus-style protocols, which underlie many multi-agent control and optimization algorithms, as well as coverage controllers and others with linear Laplacian dynamics~\cite{mesbahi2010graph,nedic18}. The private formation control protocol we present can be implemented in a completely distributed manner, and, contrary to some existing privacy approaches, it does not require a central coordinator. In many control applications, privacy is only a post-hoc concern that is incorporated after a network and/or a controller is designed, which can make privacy difficult to implement. Therefore, this paper formulates a co-design problem to design a network topology and a differential privacy implementation together. This problem accounts for (i) the strength of privacy protections, (ii) the formation control error induced by privacy, and (iii) the topology of the network that runs the formation control protocol. The benefits of co-design have been illustrated for problems of security in control systems~\cite{hashemi2020co} and the co-design framework in this paper brings these same benefits to problems in privacy. A preliminary version of this paper appeared in \cite{hawkins2020differentially}. This paper adds the co-design framework, closed-form solution to the steady-state formation error covariance, new simulations, and proofs of all results. The rest of this paper is organized as follows. Section~\ref{sec:background} gives graph theory and differential privacy background. Section~\ref{sec:probform} provides formal problem statements and outlines how privacy can be implemented in formation control. Section~\ref{sec:closedform} analyzes the performance of the private formation control protocol. In Section~\ref{sec:codesign}, we define, analyze, and provide methods to solve the privacy/network co-design problem. Next, in Section~\ref{sec:sims} we provide numerical examples of privacy/network co-design, and Section~\ref{sec:conclusions} provides concluding remarks. \noindent\textbf{Notation } $I_a\in\mathbb{R}^{a\times a}$ is the identitiy matrix in $a$ dimensions, and $\fatone$ is the vector of all ones in $\mathbb{R}^N.$ Other symbols are defined as they are used. \section{Background and Preliminaries} \label{sec:background} In this section we briefly review the required background on graph theory and differential privacy. \subsection{Graph Theory Background} A graph $\mathcal{G} = (V,E)$ is defined over a set of nodes $V$ and edges are contained in the set $E$. For $N$ nodes, $V$ is indexed over $\{1,...,N\}$. The edge set of $\mathcal{G}$ is a subset $E \subseteq V \times V$, where the pair $(i,j) \in E$ if nodes $i$ and $j$ share a connection and $(i,j) \notin E$ if they do not. This paper considers undirected, weighted, simple graphs. Undirectedness means that an edge $(i,j) \in E$ is not distinguished from $(j,i) \in E$. Simplicity means that $(i,i) \notin E$ for all $i \in V$. Weightedness means that the edge $(i,j) \in E$ has a weight $w_{ij} = w_{ji} >0$. Of particular interest are connected graphs. \begin{definition}[Connected Graph] A graph $\mathcal{G}$ is connected if, for all $i,j \in \{1,...,N\}$, $i \neq j$, there is a sequence of edges one can traverse from node $i$ to node $j$.\hfill $\triangle$ \end{definition} This paper uses the weighted graph Laplacian, which is defined with weighted adjacency and weighted degree matrices. The weighted adjacency matrix $A(\mathcal{G}) \in \mathbb{R}^{N \times N}$ of $\mathcal{G}$ is defined element-wise as \begin{equation*} A(\mathcal{G})_{ij} = \begin{cases} w_{ij} & (i,j) \in E \\ 0 & \text{otherwise} \end{cases}. \end{equation*} Because we only consider undirected graphs, $A(\mathcal{G})$ is symmetric. The weighted degree of node $i \in V$ is defined as ${d_i = \sum_{j \mid (i,j) \in E} w_{ij}.}$ The maximum degree is~${d_{max} = \max_i d_i}$. The degree matrix $D(\mathcal{G}) \in \mathbb{R}^{N \times N}$ is the diagonal matrix $D(\mathcal{G}) = \textnormal{diag}(d_1,...,d_N)$. The weighted Laplacian of $\mathcal{G}$ is then defined as $L(\mathcal{G}) = D(\mathcal{G}) - A(\mathcal{G})$. Let $\lambda_k(\cdot)$ be the $k^{th}$ smallest eigenvalue of a matrix. By definition, $\lambda_1(L(\mathcal{G})) = 0$ for all graph Laplacians and $$ 0 = \lambda_1(L(\mathcal{G})) \leq \lambda_2(L(\mathcal{G})) \leq \dots \leq \lambda_N(L(\mathcal{G})).$$ The value of $\lambda_2(L(\mathcal{G}))$ plays a key role in this paper. \begin{definition}[Algebraic Connectivity \cite{fiedler1973algebraic}] The algebraic connectivity of a graph $\mathcal{G}$ is the second smallest eigenvalue of its Laplacian and $\mathcal{G}$ is connected if and only if $\lambda_2(L(\mathcal{G})) > 0$. \hfill $\triangle$ \end{definition} Node $i$'s neighborhood set $N_i$ is the set of all agents that agent~$i$ communicates with, denoted $N_i = \{j \mid (i,j) \in E \}$. \subsection{Differential Privacy Background} This section provides a brief description of the differential privacy background needed for the remainder of the paper. More complete expositions can be found in \cite{le2013differentially,cynthia2006differential}. Overall, the goal of differential privacy is to make similar pieces of data appear approximately indistinguishable from one another. Differential privacy is appealing because its privacy guarantees are immune to post-processing \cite{cynthia2006differential}. For example, private data can be filtered without threatening its privacy guarantees \cite{le2013differentially, 9147779}. More generally, arbitrary post-hoc computations on private data do not harm differential privacy. In addition, after differential privacy is implemented, an adversary with complete knowledge of the mechanism used to implement privacy has no advantage over another adversary without mechanism knowledge \cite{dwork2014algorithmic,dwork2006calibrating}. In this paper we use differential privacy to privatize state trajectories of mobile autonomous agents. We consider vector-valued trajectories of the form ${Z = (Z(1),Z(2),...,Z(k),...),}$ where $Z(k) \in \mathbb{R}^d$ for all $k$. The $\ell_2$ norm of $Z$ is defined as $\| Z \|_{\ell_2} = \left(\sum_{k=1}^{\infty} \|Z(k)\|^2_2 \right)^{\frac{1}{2}}$, where $\|\cdot\|_2$ is the ordinary $2$-norm on $\mathbb{R}^d$. We consider privacy over the set of trajectories \begin{equation*} \tilde{\ell}_2^d = \{ Z \mid \|Z(k)\|_2 < \infty \text{ for all } k\}. \end{equation*} This set is similar to the ordinary~$\ell_2$-space, except that the entire trajectory need not have finite~$\ell_2$-norm. Instead, only each entry of a trajectory must have finite~$2$-norm in~$\mathbb{R}^d$. Thus, the set~$\tilde{\ell}_2^d$ contains trajectories that do not converge, which admits a wide variety of trajectories seen in control systems. We consider a network of $N$ agents, where agent $i$'s state trajectory is denoted by $x_i$. The $k^{th}$ element of agent $i$'s state trajectory is $x_i(k) \in \mathbb{R}^d$ for $d \in \mathbb{N}$, and agent $i$'s state trajectory belongs to $\tilde{\ell}_2^{d}$. The goal of differential privacy is to make ``similar'' pieces of data approximately indistinguishable, and an adjacency relation is used to quantify when pieces of data are ``similar.'' In this work, we provide privacy to trajectories of single agents. That is, each agent is only concerned with its own privacy and agents will privatize their own state trajectories before they are ever shared. To reflect this setup, our choice of adjacency relation is defined for single agents. This is in contrast to some other works that privatize collections of trajectories at once. The approach we consider, which is sometimes called \emph{input perturbation} in the literature~\cite{le2013differentially}, has also been widely used, and it amounts to privatizing data, then using it in some computation, rather than performing some computation with sensitive data and then privatizing its output. In this work we used a parameterized adjacency relation with parameter $b_i.$ \begin{definition}[Adjacency] Fix an adjacency parameter $b_i > 0$ for agent $i$. $\text{Adj}_{b_i}: \tilde{\ell}_2^d \times \tilde{\ell}_2^d \xrightarrow{} \{0,1\}$ is defined as \begin{equation*} \text{Adj}_{b_i}(v_i,w_i) = \begin{cases} 1 & \|v_i - w_i\|_{\ell_2} \leq b_i \\ 0 & \text{otherwise.}\tag*{$\triangle$} \end{cases} \end{equation*} \end{definition} In words, two state trajectories that agent $i$ could produce are adjacent if and only if the $\ell_2$-norm of their difference is upper bounded by $b_i$. This means that every state trajectory within distance $b_i$ from agent $i$'s actual state trajectory must be made approximately indistinguishable from it to enforce differential privacy. To calibrate differential privacy's protections, agent $i$ selects privacy parameters $\epsilon_i$ and $\delta_i$. Typically, $\epsilon_i \in [0.1, \ln{3}]$ and $\delta_i \leq 0.05$ for all $i$ \cite{yazdani2018differentially}. The value of $\delta_i$ can be regarded as the probability that differential privacy fails for agent $i$, while $\epsilon_i$ can be regarded as the information leakage about agent $i$. This work provides differential privacy for each agent individually using input perturbation, i.e., by adding noise to sensitive data directly. Noise is added by a privacy mechanism, which is a randomized map. We next provide a formal definition of differential privacy. First, fix a probability space $(\Omega, \mathcal{F},\mathbb{P})$. We consider outputs in $\tilde{\ell}_2^{d}$ and use a $\sigma$-algebra over $\tilde{\ell}_2^{d}$, denoted $\Sigma_2^{d}$ \cite{hajek2015random}. \begin{definition}[Differential Privacy] Let $\epsilon_i > 0$ and $\delta_i \in [0,\frac{1}{2})$ be given. A mechanism $M: \tilde{\ell}_2^{d} \cross \Omega \xrightarrow{}\tilde{\ell}_2^{d}$ is $(\epsilon_i,\delta_i)$-differentially private if, for all adjacent $x_i,x_i^\prime \in \tilde{\ell}_2^{d}$, we have \begin{equation*} \mathbb{P}[M(x_i) \in S] \leq e^{\epsilon_i}\mathbb{P}[M(x_i^\prime) \in S] + \delta_i \text{ for all } S \in \Sigma_2^{d}.\tag*{$\triangle$} \end{equation*} \end{definition} The Gaussian mechanism will be used to implement differential privacy in this work. The Gaussian mechanism adds zero-mean i.i.d. noise drawn from a Gaussian distribution pointwise in time. Stating the required distribution uses the $Q$-function, defined as $Q(y) = \frac{1}{\sqrt{2\pi}} \int_y^{\infty} e^{-\frac{z^2}{2}}dz.$ \begin{lemma}[Gaussian Mechanism \cite{le2013differentially}]\label{lem:gausmech} Let~$b_i > 0$, $\epsilon_i > 0$, and $\delta_i \in (0,\frac{1}{2})$ be given, fix the adjacency relation~$\textnormal{Adj}_{b_i}$, and let $x_i \in \tilde{\ell}_2^{d}$. When sharing the state trajectory~$x_i$ itself, the Gaussian mechanism takes the form ${\tilde{x}_i(k) = x_i(k) + v_i(k)}$. Here $v_i$ is a stochastic process with $v_i(k) \sim \mathcal{N}(0, \Sigma_{v_i})$, where $\Sigma_{v_i}=\sigma^2_i I_{d}$ with $$\sigma_i \geq \frac{b_i}{2\epsilon_i}(K_{\delta_i} + \sqrt{K_{\delta_i}^{2} + 2\epsilon_i})$$ and~${K_{\delta_i} = Q^{-1}(\delta_i).}$ This mechanism provides ($\epsilon_i$,$\delta_i$)-differential privacy to $x_i$.\hfill $\blacksquare$ \end{lemma} For convenience, let ${\kappa(\delta_i,\epsilon_i) = \frac{1}{2\epsilon_i}(K_{\delta_i} + \sqrt{K_{\delta_i}^{2} + 2\epsilon_i})}$. We next formally define the problems that are the focus of the rest of the paper. \section{Problem Formulation} \label{sec:probform} In this section we state and analyze the differentially private formation control problem. We begin with the problem statement itself, then elaborate on the underlying technical details. \subsection{Problem Statement} \begin{problem} \label{prob:main} Consider a network of $N$ agents with communication topology modeled by the undirected, simple, connected, and weighted graph $\mathcal{G}$. Let $x_i(k) \in \mathbb{R}^d$ be agent $i$'s state at time $k$, $N_i$ be agent $i$'s neighborhood set, $\gamma > 0$ be a stepsize, and $w_{ij}$ be a positive weight on the edge~$(i, j) \in E$. Let~$x_i(k) \in \mathbb{R}^d$ denote agent~$i$'s state at time~$k$, and let~$n_i(k)$ denote the process noise in agent~$i$'s state dynamics at time~$k$. We define $\Delta_{ij} \in \mathbb{R}^d$ for all $ (i,j) \in E$ as the desired relative state offset between agents $i$ and $j$. Do each of the following: \begin{enumerate}[i.] \item Implement the formation control protocol \begin{equation} x_i(k+1) = x_i(k) + \gamma \sum_{j \in N_i}w_{ij}(x_j(k) - x_i(k) - \Delta_{ij})+n_i(k),\label{nodelevel_no_noise} \end{equation} in a differentially private, decentralized manner. \label{prob1} \item Bound the performance of the network in terms of the privacy parameters of each agent and the algebraic connectivity of the underlying communication topology; use those bounds to quantify trade offs between privacy, connectedness, and network performance.\label{prob2} \item Use those tradeoffs to formulate an optimization problem to co-design the communication topology and privacy parameters of the network. \label{prob3} \hfill{$\triangle$} \end{enumerate} \end{problem} Before solving Problem 1, we give the necessary definitions for formation control. First, we define agent- and network-level dynamics and detail how each agent will enforce differential privacy. Then, we explain how differentially private communications affect the performance of a formation control protocol and how to quantify the quality of a formation. \subsection{Multi-Agent Formation Control} The goal of formation control is for agents in a network to assemble into some geometric shape or set of relative states. Multi-agent formation control is a well-researched problem and there are several mathematical formulations one can use to achieve similar results \cite{jadbabaie2015scaling,krick2009stabilisation,ren2007information,ren2007consensus,fax2004information,olfati2007consensus,mesbahi2010graph}. We will define relative offsets between agents that communicate and the control objective is for all agents to maintain these relative offsets to each of their neighbors. This approach is similar to that of~\cite{ren2007information}. For the formation to be feasible, we require $\Delta_{ij}=-\Delta_{ji}$ for all $(i,j)\in E$. The network control objective is driving ${\lim_{k\xrightarrow{}\infty}(x_{j}(k)-x_{i}(k))=\Delta_{ij}}$ for all $(i,j)\in E.$ The formation can be centered around any point in $\mathbb{R}^{d}$ and meet this requirement, i.e., we allow formations to be translationally invariant \cite{mesbahi2010graph}. Now we define the agents' update law. We model agents as single integrators, i.e., \begin{equation} x_i(k+1)=x_i(k)+u_i(k)+n_i(k), \end{equation} where $n_i(k)\sim\mathcal{N}(0,s_i^2I_d)$ is process noise and $u_i(k)\in\mathbb{R}^d$ is agent $i$'s input. Let $\{p_{1},...,p_{N}\}$ be any collection of points in formation such that $p_{j}-p_{i}=\Delta_{ij}$ for all $(i,j)\in E$ and let $p=(p_{1}^{T},\dots,p_{N}^{T})^{T}\in\mathbb{R}^{Nd}$ be the network-level formation specification. We consider the formation control protocol in~\eqref{nodelevel_no_noise}. At the network level, let $x(k)=(x_{1}(k)^{T},...,x_{N}(k)^{T})^{T}\in\mathbb{R}^{Nd},$ $n(k)=(n_{1}(k)^{T},...,n_{N}(k)^{T})^{T}\in\mathbb{R}^{Nd},$ and let ${\bar{x}(k)=x(k)-p}$ with~$\bar{x}_i(k) = x_i(k) - p_i$. Then we analyze \begin{equation} \bar{x}(k+1)=\Big((I_N-\gamma L\big(\mathcal{G})\big)\otimes I_{d}\Big)\bar{x}(k) +n(k).\label{networklevel_nonoise} \end{equation} Let $\bar{x}_{i_{[l]}}$ be the $l^{th}$ scalar element of $\bar{x}_{i}$. Then \begin{equation} \bar{x}_{[l]} = [\bar{x}_{1_{[l]}},\dots,\bar{x}_{N_{[l]}}]^{T}\label{statediml} \end{equation} is the vector of all agents' states in the $l^{th}$ dimension, and \begin{equation} \bar{n}_{[l]} = [n_{1_{[l]}},\dots,n_{N_{[l]}}]^{T} \end{equation} is the vector of corresponding noise terms. The protocol ${\bar{x}(k+1)=\left((I_N-\gamma L(\mathcal{G}))\otimes I_{d}\right)\bar{x}(k)+n(k)}$ is equivalent to running the protocol \begin{equation} \bar{x}_{[l]}(k+1)=(I_N-\gamma L(\mathcal{G}))\bar{x}_{[l]}(k)+n_{[l]}(k)\label{dimJproto} \end{equation} for all $l\in\{1,\dots,d\}$ simultaneously. \subsection{Private Communications for Formations (Solution to Problem~\ref{prob:main}.\ref{prob1})} To privately implement the protocol in~\eqref{nodelevel_no_noise}, agent $j$ starts by selecting privacy parameters $\epsilon_{j}>0$, $\delta_{j}\in(0,\frac{1}{2})$, and adjacency relation $\text{Adj}_{b_{j}}$ with $b_{j}>0$. Then, agent $j$ privatizes its state trajectory $x_{j}$ with the Gaussian mechanism. Let $\tilde{x}_{j}$ denote the differentially private version of $x_{j}$, where, pointwise in time, $\tilde{x}_{j}(k)=x_{j}(k)+v_{j}(k),$ with $v_{j}(k)\sim\mathcal{N}(0,\Sigma_{v_{j}}),$ where $\Sigma_{v_{j}}=\sigma_{j}^{2}I_{d}$ and $\sigma_{j}\geq\kappa(\delta_{j},\epsilon_{j})b_{j}$. Lemma~\ref{lem:gausmech} shows that this setup keeps agent~$j$'s state trajectory~$(\epsilon_j, \delta_j)$-differentially private. Agent $j$ then shares $\tilde{\bar{x}}_{j}(k)=\tilde{x}_{j}(k)-p_{j}$ with its neighbors, and this is also~$(\epsilon_j,\delta_j)$-differentially private because subtracting $p_{j}$ is merely post-processing \cite{cynthia2006differential}. This process is shown in Figure~\ref{fig:privacy_mechanism}. In this privacy implementation, each agent is concerned with privatizing its own trajectory rather than implementing privacy for the network-level trajectory $x.$ That is, each agent privatizes its own information and then shares it with the other agents. In the privacy literature, the protection of one agents information is sometimes referred to as \emph{local differential privacy}~\cite{yang2020local}, and it means that privacy guarantees are provided at the agent level. We emphasize here that our differential privacy implementation differs from existing works that privatize each state value individually. In particular, we implement the trajectory-level notion of differential privacy used in~\cite{le2013differentially} and~\cite{hall2013differential}. This form of differential privacy protects elements of~$\tilde{\ell}^d_2$, which are infinite-length trajectories. It does not seek to protect single states in~$\mathbb{R}^d$ as was done in~\cite{huang2015differentially} and other works. In those other works, correlations among state values over time cause privacy to weaken. However, the trajectory-level privacy that we use is designed to mask differences between entire infinite-length trajectories that an agent's dynamics could produce, and it does not weaken over time. \begin{figure} \centering \includegraphics[width=.4\textwidth]{Figure_for_Co_Design.pdf} \caption{To implement privacy, at each time~$k$ agent $j$ adds privacy noise $v_j(k)$ to its state $x_j(k)$ to obtain~$\tilde{x}_j(k).$ Then at each time~$k$, $\tilde{\bar{x}}_j(k) = \tilde{x}_j(k) - p_j$ is shared with agent $j$'s neighbors in $N_j$. Each agent uses this same setup to implement differential privacy for its own state trajectory. } \label{fig:privacy_mechanism} \end{figure} When each agent implements privacy as illustrated above, agent $i$ only has access to $\tilde{x}_{j}(k)$ for $j\in N_i$. Plugging this into the node-level formation control protocol in \eqref{nodelevel_no_noise} gives \begin{equation} \bar{x}_{i}(k+1)=\bar{x}_{i}(k)+\gamma\sum_{j\in N_i}w_{ij}(\tilde{\bar{x}}_{j}(k)-\bar{x}_{i}(k))+n_i(k).\label{DP_node_level} \end{equation} This solves Problem~\ref{prob:main}.\ref{prob1}. To solve Problems~\ref{prob:main}.\ref{prob2} and~\ref{prob:main}.\ref{prob3}, we will analyze this protocol at the network level. For analysis, let $x_{[l]}=[x_{1_{[l]}},\dots,x_{N_{[l]}}]^{T}$, $p_{[l]}=[p_{1_{[l]}},\dots,p_{N_{[l]}}]^{T},$ and $\bar{x}_{[l]}=x_{[l]}-p_{[l]}$. Also let ${\Sigma_v=\textnormal{diag}(\sigma_1^2,\dots,\sigma_N^2)}$ and $\Sigma_n=\textnormal{diag}(s_1^2,\dots,s_N^2)$ We begin by formulating the network-level dynamics in each coordinate. \begin{lemma} \label{lem:ntwrk_dyn} Let a network of $N$ agents communicate over the weighted, simple graph $\mathcal{G}$ with Laplacian $L(\mathcal{G})$ and adjacency matrix $A(\mathcal{G})$. Suppose that agent~$j$ uses privacy parameters~$\epsilon_j > 0$ and~$\delta_j \in (0, 1/2)$ and the adjacency parameter~$b_j > 0$. Suppose it uses the Gaussian mechanism to generate private states via~$\tilde{x}_j(k)=x_j(k)+v_j(k)$, where $v_j(k)\sim\mathcal{N}(0,\sigma_j^2I_d)$ and $\sigma_j\geq\kappa(\delta_j,\epsilon_j)b_j$ as in Lemma~\ref{lem:gausmech}. Then when each agent implements the protocol in~\eqref{DP_node_level}, the network-level dynamics are \[ \bar{x}_{[l]}(k+1)=(I_N-\gamma L(\mathcal{G}))\bar{x}_{[l]}(k)+z_{[l]}(k), \] for each $l \in\{1,\dots,d\}$, where $z_{[l]}(k)\sim\mathcal{N}(0,\Sigma_{z}),$ ${\Sigma_{z}=\gamma^{2}A(\mathcal{G})\Sigma_{v}A(\mathcal{G})+\Sigma_n},\Sigma_v=\textnormal{diag}(\sigma_1^2,\dots\sigma_N^2)$ and $\Sigma_n=\textnormal{diag}(s_1^2,\dots,s_N^2)$. \end{lemma} \begin{proof} See Appendix~\ref{sec:ntwrk_dyn_proof}. \end{proof} Then to analyze network-level performance, let $\beta_{[l]}(k):=\frac{1}{N}\fatone^{T}\bar{x}_{[l]}(k)\fatone,$ which is the state vector the protocol in \eqref{dimJproto} would converge to with initial state $x_{[l]}(k)$ and without privacy or process noise. Also let \begin{equation} e_{[l]}(k)=\bar{x}_{[l]}(k)-\beta_{[l]}(k),\label{errordiml} \end{equation} which is the offset of the current state from the state the protocol would converge to without noise. We analyze this error term in the next section. \section{Performance of Differentially Private Formation Control} \label{sec:closedform} In this section we solve Problem \ref{prob:main}.\ref{prob2}. First, in Section~\ref{sec:lyap} we derive network-level error dynamics and show that the total mean square error, $e_{ss},$ can be computed using the trace of a solution to a Lyapunov equation. Then, we use these results in Section~\ref{sec:error_bounds} to derive several performance bounds that are functions of the underlying graph topology and each agent's privacy parameters. In this work we use the total mean square error of the network at steady-state, denoted $e_{ss}$, to quantify performance. Agent $i$'s private formation control protocol in~\eqref{DP_node_level} is in~$\mathbb{R}^d$, and each agent in the network runs this protocol. This is equivalent to running $d$ identical copies of \eqref{dimJproto}, which is in $\mathbb{R}^N$. Thus using~\eqref{dimJproto}, we can compute the mean square error in dimension $l$ and then multiply by $d$ to compute $e_{ss}$. The mean-square error in dimension~$l$ is equal to~$\lim_{k \to \infty} \frac{1}{N} \sum_{i=1}^{N} E[e^2_{[l],i}(k)],$ where $e_{[l],i}(k)$ is the $i^{th}$ element of $e_{[l]}(k).$ Then we have \[ e_{ss}:=\lim_{k\to\infty}\frac{d}{N}\sum_{i=1}^{N}E\left[e_{[l],i}^{2}(k)\right]. \] \subsection{Connections with the Lyapunov Equation} \label{sec:lyap} The main error bound in this paper uses the fact that we can represent the total error in the system as the trace of a covariance matrix. We define $\Sigma_{e_{[l]}}(k)=E\left[e_{[l]}(k)e_{[l]}(k)^{T}\right]$ and $\Sigma_{\infty}=\lim_{k\to\infty}\Sigma_{e_{[l]}}(k).$ Then we have $e_{ss}=\frac{d}{N}Tr(\Sigma_{\infty}).$ Now we will analyze the dynamics of $e_{[l]}(k)$ and $\Sigma_{e_{[l]}}(k)$. For a given~$\gamma > 0$ and a given graph~$\mathcal{G}$, let~${\mathcal{M}=I_N-\gamma L(\mathcal{G})-\frac{1}{N}\fatone\fatone^T}.$ Then we have the following. \begin{lemma} \label{lem:jbberror} Let~$N$ agents communicate over a given weighted, undirected, simple graph~$\mathcal{G}$ with Laplacian $L(\mathcal{G})$ and adjacency matrix $A(\mathcal{G})$. Suppose for all $i$ that agent~$i$ implements differential privacy using the Gaussian mechanism in~Lemma~\ref{lem:gausmech} with privacy parameters $\epsilon_i>0$ and $\delta_i\in(0,\frac{1}{2}).$ When agent $i$ implements the private formation control protocol~\eqref{DP_node_level}, the network level error $e_{[l]}(k)$ in~\eqref{errordiml} evolves via \begin{equation} e_{[l]}(k+1)=\mathcal{M}e_{[l]}(k)+(I_N-\frac{1}{N}\fatone\fatone^T)z_{[l]}(k),\label{error_dyn} \end{equation} $\Sigma_{e_{[l]}}(k)$ evolves according to \begin{equation} \Sigma_{e_{[l]}}(k+1)=\mathcal{M}\Sigma_{e_{[l]}}(k)\mathcal{M}+(I_N-\frac{1}{N}\fatone\fatone^T)\Sigma_{z}(I_N-\frac{1}{N}\fatone\fatone^T),\label{cov_dyn} \end{equation} and $\Sigma_{e_{[l]}}(k)$ can be computed via \begin{equation} \Sigma_{e_{[l]}}(k)=\sum_{i=0}^{k-1}\mathcal{M}^{i}(I_N-\frac{1}{N}\fatone\fatone^T)\Sigma_{z}(I_N-\frac{1}{N}\fatone\fatone^T)\mathcal{M}^{i}. \label{cov_expand} \end{equation} \end{lemma} \begin{proof} See Appendix~\ref{sec:error_dyn_proof}. \end{proof} \begin{lemma} \label{lem:M_properties} Let~$N$ agents communicate over a given weighted, undirected, simple, connected graph~$\mathcal{G}$ with Laplacian $L(\mathcal{G})$ and ${\mathcal{M}=I_N-\gamma L(\mathcal{G})-\frac{1}{N}\fatone\fatone^T}.$ Then the eigenvalues of $\mathcal{M}$ are strictly less than $1.$ Furthermore, the maximum singular value of $\mathcal{M}$ is given by \begin{equation} \sigma_{\max}(\mathcal{M})=1-\gamma\lambda_{2}(L).\label{eq:sing_val} \end{equation} \end{lemma} \begin{proof} See Appendix~\ref{sec:M_proof} \end{proof} We now show that the steady-state error covariance matrix, $\Sigma_{\infty},$ is a solution to a discrete-time Lyapunov equation. \begin{theorem} \label{thm:lyap} Let~$N$ agents communicate over a given undirected, simple graph~$\mathcal{G}$ with weighted Laplacian $L(\mathcal{G})$ and adjacency matrix $A(\mathcal{G})$. Suppose that agent~$i$ implements differential privacy using the Gaussian mechanism with privacy parameters $\epsilon_i>0$ and $\delta_i\in(0,\frac{1}{2})$ and implements the private formation control protocol~\eqref{DP_node_level}. If the underlying graph $\mathcal{G}$ is connected, $\Sigma_{\infty}$ is equal to the unique solution to the discrete time Lyapunov equation \begin{equation} \Sigma_{\infty}=Q+\mathcal{M}\Sigma_{\infty}\mathcal{M}, \label{lyap_eq} \end{equation} where $Q=(I_N-\frac{1}{N}\fatone\fatone^T)\Sigma_{z}(I_N-\frac{1}{N}\fatone\fatone^T)$. \end{theorem} \begin{proof} From Lemma~\ref{lem:jbberror}, we have \begin{align} \Sigma_{\infty}&=\lim_{k\to\infty}\Sigma_{e_{[l]}}(k)\\ &=\sum_{i=0}^{\infty}\mathcal{M}^{i}(I_N-\frac{1}{N}\fatone\fatone^T)\Sigma_{z}(I_N-\frac{1}{N}\fatone\fatone^T)\mathcal{M}^{i}. \end{align} Taking the first term out of the sum gives \begin{equation} \Sigma_{\infty}=Q+\sum_{i=1}^{\infty}\mathcal{M}^{i}Q\mathcal{M}^{i}. \end{equation} Factoring out $\mathcal{M}$ on both sides of the sum gives \begin{equation*} \Sigma_{\infty}=Q+\mathcal{M}\left(\sum_{i=1}^{\infty}\mathcal{M}^{i-1}Q\mathcal{M}^{i-1}\right)\mathcal{M}. \end{equation*} The remaining infinite sum is precisely $\Sigma_{\infty}$. Thus, we arrive at the equation $\Sigma_{\infty}=Q+\mathcal{M}\Sigma_{\infty}\mathcal{M},$ which is the discrete-time Lyapunov equation. From Lemma~\ref{lem:M_properties}, $\mathcal{M}$ has eigenvalues strictly less than $1$ for any undirected, connected graph $\mathcal{G}$ and $Q$ is positive definite. Using Proposition 2.1 from \cite{gahinet1990sensitivity}, \eqref{lyap_eq} has a unique, symmetric solution $\Sigma_{\infty}.$ \end{proof} Theorem~\ref{thm:lyap}, along with the fact that $e_{ss}=\frac{d}{N}Tr(\Sigma_{\infty})$, allows us to solve for $e_{ss}$. That is, given a communication topology $\mathcal{G}$ and set of privacy parameters $\{(\epsilon_i,\delta_i)\}_{i=1}^N,$ we can determine the performance of the network, encoded by $e_{ss},$ before runtime. In this work, we are interested in designing a communication topology that allows agents to be as private as possible while meeting performance constraints. These performance constraints will take the form of $e_{ss}\leq e_R$ where $e_R$ is the maximum allowable error at steady-state. While Theorem~\ref{thm:lyap} provides a means to evaluate the performance of a given network, it does not help us in designing a network to meet a specified performance requirement of the form $e_{ss}\leq e_R$. For example, if we are given a communication topology $\mathcal{G}$ and set of privacy parameters $\{(\epsilon_i,\delta_i)\}_{i=1}^N,$ we can use Theorem~\ref{thm:lyap} to compute $e_{ss}$ and can check if $e_{ss}\leq e_R,$ but Theorem~\ref{thm:lyap} does not provide a direct way to design a network that achieves $e_{ss}\leq e_R$ if the bound is not already met. In the next subsection, we find a scalar bound on $e_{ss}$ that will be used to solve network design problems of this type in Section~\ref{sec:codesign}. \subsection{Analytical Result and Bounds} \label{sec:error_bounds} In this section we find a scalar bound on $e_{ss}$ in terms of the privacy parameters $\{(\epsilon_i,\delta_i)\}_{i=1}^N,$ and properties of the communication toplogy $\mathcal{G}$ that will allow us to design networks that achieve given performance constraints. In the following theorem, the main bound is a result of $\Sigma_{\infty}$ being the solution to a Lyapunov equation as shown in Theorem~\ref{thm:lyap}. Several bounds and properties of Lyapunov equations have been explored in the literature, and some of these results have been surveyed in~\cite{kwon1996bounds}. We have the following bound on $e_{ss}.$ \begin{theorem} \label{thm:error_bound} Let all the conditions from Theorem~\ref{thm:lyap} hold. With $$e_{ss}:=\lim_{k\to\infty}\frac{d}{N}\sum_{i=1}^{N}E\left[e_{[l],i}^{2}(k)\right],$$ we have \[ e_{ss}\leq\frac{\gamma d \sum_{i=1}^{N}\left(\sum_{j=1}^{N}w_{ij}^{2} -\frac{d_i^2}{N}\right)\sigma_i^2 +\frac{N-1}{N}\sum_{i=1}^N s_i^2}{N\lambda_{2}(L)\left(2-\gamma\lambda_{2}(L)\right)}. \] \end{theorem} \begin{proof} See Appendix~\ref{sec:big_proof}. \end{proof} Theorem~\ref{thm:error_bound} solves Problem~\ref{prob:main}.\ref{prob2}. This result gives a scalar bound on $e_{ss}$ that depends on the the privacy parameters $\{(\epsilon_i,\delta_i)\}_{i=1}^N$ through $\sigma_i,$ where $\sigma_i\geq\kappa(\delta_i,\epsilon_i)b_i$, the edge weights in the graph $w_{ij},$ the weighted degree of each agent, the algebraic connectivity $\lambda_2(L),$ and the variance of the process noise $s_i^2.$ In the previous subsection we detailed how Theorem~\ref{thm:lyap} was not sufficient for designing networks to meet performance constraints of the form $e_{ss}\leq e_R.$ Theorem~\ref{thm:error_bound} provides a method to enforce these constraints. For example, if the network must achieve $e_{ss}\leq e_R$ this can be achieved by requiring that \begin{equation} \frac{\gamma d \sum_{i=1}^{N}\left(\sum_{j=1}^{N}w_{ij}^{2} -\frac{d_i^2}{N}\right)\sigma_i^2 +\frac{N-1}{N}\sum_{i=1}^N s_i^2}{N\lambda_{2}(L)\left(2-\gamma\lambda_{2}(L)\right)}\leq e_R. \end{equation} This requirement will impose constraints on the communication topology $\mathcal{G},$ the weights in the communication topology $w_{ij},$ and privacy parameters $\{(\epsilon_i,\delta_i)\}_{i=1}^N.$ Overall, the bound on $e_{ss}$ in Theorem~\ref{thm:error_bound} gives us a method to translate a performance requirement into a joint constraint on the communication topology and privacy parameters. In the next section, we use this constraint to formulate an optimization problem where the privacy parameters and communication topology are the decision variables to design a private formation control network that allows agents to be as private as possible while meeting global performance requirements. \section{Privacy and Network Co-design} \label{sec:codesign} In this section we solve Problem~\ref{prob:main}.\ref{prob3}. Given the aforementioned bounds on formation error, we now focus on designing networks for performing private formation control. Our goal is to design a network, through selecting entries of $L(\mathcal{G})$, and privacy scheme, through selecting the values of $\{\epsilon_i\}_{i=1}^N$, that meet global performance requirements, agent-level privacy requirements, and other constraints. Here we keep $\delta_i$ fixed and tune $\epsilon_i$ to achieve the desired level of privacy. Since $\delta_i$ is the probability that differential privacy fails, the existing privacy literature is primarily focused on tuning $\epsilon_i$ and we do so here. The key tradeoff in designing a private formation control network is balancing agent-level privacy requirements with global performance. For example, if some agents use very strong privacy, then the high-variance privacy noise they use will make global performance poor, even if many other agents have only weak privacy requirements. These effects can also be amplified or attenuated by the communication topology of the network, e.g., if an agent with strong privacy sends very noisy messages to other agents along heavily weighted edges. Privacy/network co-design thus requires unifying and balancing these tradeoffs while designing a weighted, undirected graph and the privacy levels that agents use. \subsection{Co-Design Search Space} In this section we consider the following setup. We are given $N$ agents that wish to implement private formation control and each agent has a minimum strength of privacy that it will accept. We are tasked with designing a network that allows agents to be as private as possible while meeting a global performance requirement. In many settings, each agent's neighborhood set will be fixed \emph{a priori} based on hardware compatibility or physical location, and these neighborhoods specify an unweighted, undirected graph of edges that can be used. As network designers, we must design the privacy parameters for each agent and all edge weights for the given graph. That is, we can assign a zero or non-zero weight to each edge that is present, but we cannot attempt to assign non-zero weight to an edge that is absent. We denote the given unweighted graph by~$\mathcal{G}_0$ and denote its unweighted Laplacian by~$L_0$. We define~$\mathcal{L}(L_{0})$ as the space of all weighted graph Laplacians~$L$ such that~$L_{ij} = 0$ if~$L_{0,ij} = 0$. Thus, if the original undirected graph has $|E|$ edges, we must select $|E|+N$ parameters, namely one weight for each edge and the values of~$\{\epsilon_i\}_{i=1}^{N}$. Designing these parameters manually for large networks will be infeasible and thus we develop a numerical framework for their design. \subsection{Co-Design Problem Statement} Below we formally state the privacy/network co-design problem and then describe its features. \begin{problem}[Co-design Problem] \label{prob:codesign} Given an input undirected, unweighted, simple graph $\mathcal{G}_{0}$ with Laplacian~$L_0,$ a required global error bound $e_R,$ a minimum connectivity parameter $\lambda_{2_L},$ weighting factor $\vartheta,$ and a minimum level of privacy $\epsilon_i^{max}$ for agent $i$ for all $i\in[N]$, to co-design privacy and agents' communication topology, solve \begin{align} &\min_{L(\mathcal{G})\in\mathcal{L}(L_{0}),\{\epsilon_{i}\}_{i=1}^N} \quad\textnormal{Tr}(L(\mathcal{G}))+\vartheta\sum_{i=1}^N\epsilon_{i}^{2}\\ \text{subject to } &\frac{\gamma d \sum_{i=1}^{N} \!\left(\sum_{j=1}^{N}\!w_{ij}^{2} \!-\! \frac{d_i^2}{N}\right)\sigma_i^2 \!+\!\frac{N-1}{N}\!\sum_{i=1}^N \!s_i^2}{\lambda_{2}(L)\left(2-\gamma\lambda_{2}(L)\right)} \!\leq\! e_{R}\\ \quad & \quad\epsilon_{i}\leq\epsilon_{i}^{max}\quad\textnormal{for all }i\\ \quad & \lambda_{2}(L(\mathcal{G}))\geq\lambda_{2_{L}}.\label{codesign_opt} \end{align}\hfill $\triangle$ \end{problem} First, we describe the objective function of Problem~\ref{prob:codesign}. In the objective function, the purpose of $Tr(L(\mathcal{G}))$ is to produce a network that uses as little edge weight as possible. In Theorem~\ref{thm:error_bound}, the numerator of the error bound grows as $w_{ij}$ grows. With $Tr(L(\mathcal{G}))=\sum_{i\in[N]}d_{i},$ minimizing this will produce a solution where each agent has a small degree, which promotes better network-level performance. If edge weights represent or are correlated to a monetary cost, minimizing $Tr(L(\mathcal{G}))$ also produces a solution that will use as little cost as possible. For the other term in the objective function, $\sum_{i\in[N]}\epsilon_{i}^{2},$ minimizing this function will make each agent's privacy level as strong as possible; since the strength of privacy grows as $\epsilon_{i}$ shrinks, this term will force the solution to have a strong level of privacy for each agent. The weighting factor $\vartheta\in\mathbb{R}$ allows the user to prioritize a solution with a strong level of privacy or small degrees for each agent. We now describe the constraints of Problem~\ref{prob:codesign}. The first constraint requires that the total mean square error of the network, $e_{ss},$ is less than some user-defined value $e_{R}$. A sufficient condition for $e_{ss}\leq e_{R}$ can be found by requiring that the bound in Theorem 2 is less than $e_{R},$ which is what we implement here. The second constraint requires that each agent's privacy level is at least as strong as that agent's weakest acceptable privacy level, which is set by $\epsilon_{i}^{max}$. The last constraint requires that the solution produces a connected graph. Connectivity for the weighted graph can be ensured by $\lambda_{2}(L(\mathcal{G}))>0$, but we implement the constraint $\lambda_{2}(L(\mathcal{G}))\geq\lambda_{2_{L}}$ for some user defined $\lambda_{2_{L}}>0$ so the user has some control over how connected the resulting graph is. For example, $\lambda_{2_{L}}$ can be set to the algebraic connectivity of the line graph on $N$ nodes, which has the least algebraic connectivity among graphs on $N$ nodes. Overall, solving Problem~\ref{prob:codesign} will produce a communication topology and privacy parameters that will meet global performance and connectivity constraints while allowing agents to be as private as possible. This solves Problem~\ref{prob:main}.\ref{prob3}. \subsection{Numerically Solving Problem~\ref{prob:codesign}} We have built a MATLAB program to solve Problem~\ref{prob:codesign}, which is available on GitHub \cite{hawkingithub}. Due to the non-linearities in the problem, i.e., some of our constraints are in terms of the second largest eigenvalue of one of our decision variables, we found MATLAB and \texttt{fmincon} to perform well for problems of this kind. When optimizing over $L(\mathcal{G})\in \mathcal{L}(L_0),$ since we only consider undirected, symmetrically weighted graphs, we need only find the upper triangular elements of $L(\mathcal{G})$, which helps reduce computation time. Further commentary and examples are provided in the next section. \input{new_sims} \section{Conclusions}\label{sec:conclusions} In this paper we have studied the problem of differentially private formation control. This work enables agents to collaboratively assemble into formations with bounded steady state error and provides methods for solving for the error covariance matrix at steady-state. This work also develops and solves an optimization problem to design the optimal network and privacy parameters for differentially private formation control. Future work includes generalizing to other privacy/performance co-design problems and implementation on mobile robots. \bibliographystyle{IEEEtran} \bibliography{mybib} \input{appendix} \end{document} \section{Simulations} \label{sec:sims} \begin{figure} \centering \includegraphics[width=0.3\textwidth]{initial_topology.pdf} \caption{The fixed input topology used for the simulation results presented in Section~\ref{sec:sims}. This topology contains $10$ nodes and specifies the edges that can be present, though the edge weights are not specified and will be optimized over.} \label{FixedTopology} \end{figure} In this section we provide simulation results for optimal privacy/network co-design. There are four main parameters that we have control over in Problem~\ref{prob:codesign}: (i) $e_R,$ to control the performance of the system, (ii) $\epsilon^{max}_i,$ to specify the weakest allowable privacy level for agent $i$, (iii) $\lambda_{2L},$ to control the connectivity of the designed network, and (iv) $\vartheta,$ which weights optimizing performance versus privacy. In this section, we first define an input topology as the undirected, simple graph $\mathcal{G}$ shown in Figure~\ref{FixedTopology}. Then, we manually tune the parameters $e_R,\epsilon_i^{max},\lambda_{2_L},$ and $\vartheta,$ and run privacy/network co-design for various sets of parameters to obtain a weighted graph and set of privacy parameters $\{\epsilon_i\}_{i=1}^N$. Throughout this section the smaller a node is drawn the more private it is, i.e., $\epsilon_i$ gets smaller as node $i$ shrinks, and the thicker an edge is drawn the more weight it has. In simulation, edges with weights satisfying $w_{ij}<10^{-4}$ are considered deleted. We begin by manually adjusting $e_R$ with all other parameters fixed. \emph{Example 1: (Trading off privacy and performance)} Fix the input graph $\mathcal{G}$ shown in Figure~\ref{FixedTopology}. Fix $$\epsilon^{max}= [0.4 ,0.9, 0.55, 0.35, 0.8, 0.45, 0.7, 0.5, 0.52, 0.58]^T,$$ $\gamma=1/2N,\vartheta=10,$ $\lambda_{2L}=0.2,$ $\delta_i=0.05,$ and $b_i=1$ for all $i.$ Now let $e_R$ take on the values $$e_R\in\{2,4,8,16,32,64\}.$$ For each of these values, privacy/network co-design was used to design the weighted graphs shown in Figure~\ref{fig:er_tune} and the privacy parameters shown in Figure~\ref{fig:eR_tune_bar}. In Figure~\ref{fig:er_tune}, as we allow weaker performance, quantified by larger $e_R,$ the agents are able to use a stronger level of privacy. This is illustrated by the nodes shrinking as $e_R$ increases from Figure~\ref{fig:biggest_er} to~\ref{fig:smallest_er}. Furthermore, privacy/network co-design makes the resulting graph less connected when we allow weaker performance. For example, when comparing Figure~\ref{fig:biggest_er} to Figure~\ref{fig:smallest_er}, the output network topology has fewer edges when weaker performance is allowed. In Figure~\ref{fig:eR_tune_bar}, we can see that as the required level of performance decreases, co-design allows the agents to be more private. This trend persists for all agents in varying magnitude which is influenced by the topology and each $\epsilon_i^{max}.$ We can also see that $\epsilon_i$ decreases rapidly from $e_R=2$ to $e_R=8,$ and then decreases slower for $e_R>8.$ This shows that relatively small changes in $e_R$ can lead to large changes in the resulting privacy level, i.e., if we relax performance slightly it is possible to gain a much stronger level of privacy, which occurs when $\epsilon_i$ is small for each agent.\hfill$\triangle$ \begin{figure*} \centering \begin{subfigure}[t]{\textwidth} \centering \subfloat[$e_R= 2$\label{fig:biggest_er}]{ \includegraphics[width=0.35\linewidth]{ess_2.pdf}} \quad\quad\quad \subfloat[$e_R=4$]{ \includegraphics[width=0.35\linewidth]{ess_4.pdf}} \end{subfigure} \begin{subfigure}[t]{\textwidth} \centering \subfloat[$e_R=8$]{ \includegraphics[width=0.35\linewidth]{ess_8.pdf}} \quad\quad\quad \subfloat[$e_R=16$]{ \includegraphics[width=0.35\linewidth]{ess_16.pdf}} \end{subfigure} \begin{subfigure}[t]{\textwidth} \centering \subfloat[$e_R= 32$]{ \includegraphics[width=0.35\linewidth]{ess_32.pdf}} \quad\quad\quad \subfloat[$e_R=64$\label{fig:smallest_er}]{ \includegraphics[width=0.35\linewidth]{ess_64.pdf}} \end{subfigure} \caption{The outputs of privacy/network co-design with fixed parameters specified in Example $1$ for different values of $e_R\in\{2,4,8,16,32,64\}.$ The smaller a node is drawn, the more private it is, i.e., its value of $\epsilon_i$ is smaller. The thicker an edge is drawn, the larger the edge weight, i.e., $w_{ij}$ is larger. We can see that when we allow weaker performance, indicated by larger $e_R,$ each agent has a stronger level of privacy and the topology uses less weight as illustrated by the nodes shrinking from Figure~\ref{fig:biggest_er} to Figure~\ref{fig:smallest_er}. Furthermore, privacy/network co-design makes the resulting graph less connected when we allow weaker performance. For example, when comparing Figure~\ref{fig:biggest_er} to Figure~\ref{fig:smallest_er}, the output network topology has fewer edges when weaker performance is allowed. } \label{fig:er_tune} \end{figure*} \begin{figure} \centering \includegraphics[width=.5\textwidth]{ess_tune_bars_1.pdf} \caption{The numerical values of privacy parameters as designed by privacy/network co-design with fixed parameters specified in Example $1$ and $e_R\in\{2,16,64\}.$ As the required level of performance decreases, co-design allows the agents to be more private. This trend persists for all agents.} \label{fig:eR_tune_bar} \end{figure} \emph{Example 2: (The Maximum Level of Privacy)} Here we fix all of the parameters other than $\epsilon_i^{max}.$ Specifically, fix ${\gamma=1/2N},\lambda_{2L}=0.2,$ $\vartheta=10,e_R=2,\delta_i=0.05,$ and $b_i=1$ for all $i.$ We consider the homogeneous case where each agent has the same required level of privacy, i.e., $\epsilon_i^{max}=\epsilon^{max}$ for $i\in\{1,\dots,N\}.$ Privacy/network co-design was run for $\epsilon^{max}\in\{0.03,0.04,0.05\}.$ Figure~\ref{fig:eps_max_tune} presents the output communication topologies and Figure~\ref{fig:eps_max_bars} shows the output privacy parameters for each agent as designed by privacy/network co-design. In Figure~\ref{fig:eps_max_tune}, we can see that as we allow each agent to be less private, co-design actually removes edges. Specifically the edges $(8,9),(2,4),$ and $(4,5)$ in Figure~\ref{fig:smallest_eps} when $\epsilon^{max}=0.03$ are not present in Figure~\ref{fig:biggest_eps} when $\epsilon^{max}=0.05.$ This shows that co-design is trying to use as little edge weight or as few edges as possible to meet the constraints. In others words, when each agent uses less privacy, i.e., larger $\epsilon_i$ and $\epsilon^{max}$ for each agent $i,$ co-design designs a network with less communication to achieve the same level of performance. In Figure~\ref{fig:eps_max_bars}, we can see that when $\epsilon_i^{max}=0.03,$ each $\epsilon_i$ is close to $\epsilon_i^{max},$ i.e., the agents are using the weakest privacy possible. This occurs because, with $e_R=2,$ we require relatively strong performance, which limits the strength of agents' privacy. As a result, when $\epsilon_i^{max}=0.03$ privacy/network co-design finds the optimal network to be one in which each agent uses the weakest privacy possible, i.e., $\epsilon_i$ is small and significantly less than $\epsilon_i^{max}$ for each $i.$ This is because with a stronger level of privacy, the communication topology needs to be more connected, which causes $\textnormal{Tr}(L(\mathcal{G}))$ in the objective function of Problem~\ref{prob:codesign} to grow. However, when $\epsilon_i^{max}\in\{0.04,0.05\}$ the agents are using stronger privacy than the weakest level they specified, as the resulting $\epsilon_i$'s are not at their constrained maximum.\hfill$\triangle$ \begin{figure*} \centering \subfloat[$\epsilon^{max}=0.03$\label{1a}]{\label{fig:smallest_eps} \includegraphics[width=0.3\linewidth]{eps_03.pdf}} \subfloat[$\epsilon^{max}=0.04$\label{1b}]{ \includegraphics[width=0.3\linewidth]{eps_04.pdf}} \subfloat[$\epsilon^{max}=0.05$\label{1c}]{\label{fig:biggest_eps} \includegraphics[width=0.3\linewidth]{eps_05.pdf}} \caption{The outputs of privacy/network co-design with fixed parameters specified in Example $2$ for different values of $\epsilon_i^{max}\in\{0.03,0.04,0.05\}.$ The smaller a node is drawn, the more private it is, i.e., its value of $\epsilon_i$ is smaller. The thicker an edge is drawn, the larger the edge weight, i.e., $w_{ij}$ is larger. As we allow each agent to be less private, the network actually removes edges. Specifically, the edges $(8,9),(2,4),$ and $(4,5)$ in Figure~\ref{fig:smallest_eps} are not present in Figure~\ref{fig:biggest_eps}. This shows that co-design is trying to use as little edge weight or as few edges as possible to meet the constraints, and when agents are less private they need less communication to achieve the same level of performance. } \label{fig:eps_max_tune} \end{figure*} \begin{figure} \centering \includegraphics[width=.5\textwidth]{eps_max_tune_bars.pdf} \caption{The numerical values of privacy parameters as designed by privacy/network co-design with fixed parameters specified in Example $2$ and $\epsilon_i^{max}\in\{0.03,0.04,0.05\}.$ When $\epsilon_i^{max}=0.03,$ the agents are using the weakest privacy possible, which occurs because $e_R=2$ requires relatively strong performance, and agents must share more information to facilitate this. However, when $\epsilon_i^{max}\in\{0.04,0.05\}$ the agents are using stronger privacy than the weakest level they specified, as the resulting $\epsilon_i$'s are not at their constrained maximum.} \label{fig:eps_max_bars} \end{figure} \emph{Example 3: (The Minimum Level of Connectivity)} Here we fix all of the parameters other than $\lambda_{2L}.$ Specifically, fix $\gamma=1/2N,\lambda_{2L}=0.2,$ $\vartheta=10,$ $e_R=1,$ $\delta_i=0.05,b_i=1$ for all $i,$ and $$\epsilon^{max}= [0.4 ,0.9, 0.55, 0.35, 0.8, 0.45, 0.7, 0.5, 0.52, 0.58]^T.$$ Then we solve the privacy/network co-design problem for $\lambda_{2L}\in\{0.1,0.5,1\}.$ Figure~\ref{fig:tune_lambda2l} gives the output communication topologies and Figure~\ref{fig:lamdba2l_bars} shows the output privacy parameters for each agent. In Figure~\ref{fig:tune_lambda2l}, we can see that as we require the output network to be more connected, more edge weight is used and each agent uses weaker privacy, as illustrated by the growing nodes and edges from Figure~\ref{fig:smallest_lambda} to Figure~\ref{fig:biggest_lambda}. We can also see that co-design is adding weight to certain edges more than others. Specifically, the weight of the edge $(1,7)$ drastically increases while the weight of the edge $(9,10)$ does not increase much. Agents $9$ and $10$ are more private than agents $1$ and $7$ when $\lambda_{2L}=1.0$ as illustrated by the size of the nodes in Figure~\ref{fig:tune_lambda2l}. Thus, privacy/network co-design is using smaller edge weights for agents with stronger privacy. This makes intuitive sense since agents with strong privacy inject higher-variance noise into the formation control protocol then agents with weaker privacy, and reducing the weights of edges connected to those agents with weaker noise helps mitigate this impact. In Figure~\ref{fig:lamdba2l_bars}, as $\lambda_{2L}$ is increased, $\epsilon_i$ increases for most agents, i.e., as the network is required to be more connected, the agents use weaker privacy. Here privacy/network co-design adds the necessary edge weight to meet the connectivity constraint, and then weakens privacy to meet the performance constraint. Thus privacy/network co-design is trading off privacy and performance as desired.\hfill$\triangle$ \begin{figure*} \centering \subfloat[$\lambda_{2L}=0.1$\label{fig:smallest_lambda}]{ \includegraphics[width=0.3\linewidth]{lambda2l_p1.pdf}} \subfloat[$\lambda_{2L}=0.5$]{ \includegraphics[width=0.3\linewidth]{lambda2l_p5.pdf}} \subfloat[$\lambda_{2L}=1.0$\label{fig:biggest_lambda}]{ \includegraphics[width=0.3\linewidth]{lambda2l_1.pdf}} \caption{ The outputs of privacy/network co-design with fixed parameters specified in Example $3$ for different values of $\lambda_{2L}\in\{0.1,0.5,1\}.$ The smaller a node is drawn, the more private it is, i.e., its value of $\epsilon_i$ is smaller. The thicker an edge is drawn, the larger the edge weight, i.e., $w_{ij}$ is larger. As we require the output network to be more connected, more edge weight is used and each agent uses weaker privacy. When $\lambda_{2L}=1,$ the output $\epsilon_i$ is close to $\epsilon_i^{max}$ for each $i.$ Here privacy/network co-design is adding edge weight to meet the connectivity constraint, and then weakens privacy to meet the performance constraint, thus trading off privacy and performance as desired. } \label{fig:tune_lambda2l} \end{figure*} \begin{figure} \centering \includegraphics[width=.5\textwidth]{lambda2l_tune_bars.pdf} \caption{The numerical values of privacy parameters as designed by privacy/network co-design with fixed parameters specified in Example $3$ and $\lambda_{2L}\in\{0.1,0.5,1\}.$ As the output is required to be more connected, increasing $\lambda_{2L},$ each agent uses weaker privacy. When $\lambda_{2L}=1,$ the largest value, the agents are using near their weakest required level of privacy in order to meet the performance requirement.} \label{fig:lamdba2l_bars} \end{figure} \emph{Example 4: (Tuning the Objective Function)} Here we fix all parameters other than $\vartheta.$ Specifically, fix $\gamma=1/2N,\lambda_{2L}=0.2,$ $\lambda_{2L}=0.05,$ $e_R=1,\delta_i=0.05,$ and $b_i=1$ for all $i,$ and $$\epsilon^{max}= [0.4 ,0.9, 0.55, 0.35, 0.8, 0.45, 0.7, 0.5, 0.52, 0.58]^T.$$ Then we solve the co-design problem for $\vartheta\in\{1,100,1000\}.$ Figure~\ref{fig:vartheta_tune} presents the output communication topologies and Figure~\ref{fig:vartheta_bars} shows the output privacy parameters for each agent. In Figure~\ref{fig:vartheta_tune}, we can see that as we increase $\vartheta$ from $\vartheta=1$ in Figure~\ref{fig:smallest_var} to $\vartheta=1000$ in Figure~\ref{fig:biggest_var} each of the edge weights increase slightly. Intuitively, as we increase $\vartheta,$ we are prioritizing minimizing $\sum_{i=1}^N \epsilon_i^2$ rather than $\textnormal{Tr}(L(\mathcal{G})).$ As $\vartheta$ changes, the weights do not change very much, though the privacy levels change drastically as illustrated in Figure~\ref{fig:vartheta_bars}. In Figure~\ref{fig:vartheta_bars}, we can see that as $\vartheta$ is increased, the output privacy parameters are much lower for each agent. When $\vartheta=1,$ the privacy parameters $\epsilon_i$ are near their constrained maxima, $\epsilon_i^{max}.$ When $\vartheta=1000,$ we are prioritizing minimizing $\sum_i \epsilon_i^2$ rather than $\textnormal{Tr}(L(\mathcal{G})),$ and thus the output privacy parameters are much smaller. Overall, this shows that increasing $\vartheta$ can be used to allow the agents to achieve a stronger level of privacy.\hfill$\triangle$ \begin{figure*} \centering \subfloat[$\vartheta=1$\label{fig:smallest_var}]{ \includegraphics[width=0.3\linewidth]{vartheta_1.pdf}} \subfloat[$\vartheta=100$]{ \includegraphics[width=0.3\linewidth]{vartheta_100.pdf}} \subfloat[$\vartheta=1000$\label{fig:biggest_var}]{ \includegraphics[width=0.3\linewidth]{vartheta_1000.pdf}} \caption{The outputs of privacy/network co-design with fixed parameters specified in Example $4$ for different values of $\vartheta\in\{1,100,1000\}.$ The smaller a node is drawn, the more private it is, i.e., its value of $\epsilon_i$ is smaller. The thicker an edge is drawn, the larger the edge weight, i.e., $w_{ij}$ is larger. As we increase $\vartheta,$ the edge weights increase. This is intuitive because as we increase $\vartheta$ we are prioritizing minimizing $\sum_i \epsilon_i^2$ rather than $\textnormal{Tr}(L(\mathcal{G})).$ Each agent also uses much stronger privacy as $\vartheta$ is increased, as illustrated by the size of each node shrinking from Figure~\ref{fig:smallest_var} to Figure~\ref{fig:biggest_var} and further illustrated in Figure~\ref{fig:vartheta_bars}. } \label{fig:vartheta_tune} \end{figure*} \begin{figure} \centering \includegraphics[width=.5\textwidth]{vartheta_tune_bars.pdf} \caption{The numerical values of privacy parameters as designed by privacy/network co-design with fixed parameters specified in Example $4$ and $\vartheta\in\{1,100,1000\}.$ As $\vartheta$ is increased, the output privacy parameters are much lower for each agent. When $\vartheta=1,$ the privacy parameters $\epsilon_i$ are near their constrained maxima, $\epsilon_i^{max}.$ When $\vartheta=1000,$ we prioritize minimizing $\sum_i \epsilon_i^2$ rather than $\textnormal{Tr}(L(\mathcal{G})),$ and thus the output privacy parameters are much smaller. This further illustrates the flexibility of privacy/network co-design.} \label{fig:vartheta_bars} \end{figure} \appendix \subsection{Proof of Lemma~\ref{lem:ntwrk_dyn}} \label{sec:ntwrk_dyn_proof} From~\eqref{DP_node_level} we have the node level protocol \[ \bar{x}_{i}(k+1)=\bar{x}_{i}(k)+\gamma\sum_{j\in N_i}w_{ij}(\bar{x}_{j}(k)+v_{j}(k)-\bar{x}_{i}(k))+n_i(k), \] which we factor as \begin{align*} \bar{x}_{i}(k+1)=\bar{x}_{i}(k)+\gamma\sum_{j\in N_i}w_{ij}(\bar{x}_{j}(k)-\bar{x}_{i}(k))\\ +\gamma\sum_{j\in N_i}w_{ij}v_{j}(k)+n_i(k). \end{align*} Let $z_{i}(k)=\gamma\sum_{j\in N_i}w_{ij}v_{j}(k)+n_i(k)$ and note that \[ z_{i}(k)=\gamma \big([A(\mathcal{G})]_{row\ i}\otimes I_d \big)v(k) +n_i(k), \] where~$[A(\mathcal{G})]_{row\ i} \in \mathbb{R}^{1 \times n}$ is the~$i^{th}$ row of~$A(\mathcal{G})$ and~$\otimes$ is the Kronecker product. Next we define the vector~$z_{[l]}(k)=[z_{1_{[l]}}(k)^{T},\dots,z_{N_{[l]}}(k)^{T}]^{T}\in\mathbb{R}^N$ and we have $z_{[l]}(k)=\gamma A(\mathcal{G})v_{[l]}(k)+n_{[l]}(k)$ at the network level. Since $v_{[l]}(k)$ and $n_{[l]}(k)$ are zero mean, $z_{[l]}(k)$ is zero mean. The covariance of $z_{[l]}(k)$ is calculated as \begin{align} \Sigma_{z} &= E[z_{[l]}(k)z_{[l]}(k)^{T}]\\ &= E[\left(\gamma A(\mathcal{G})v_{[l]}(k)+n_{[l]}(k)\right)\left(\gamma A(\mathcal{G})v_{[l]}(k)+n_{[l]}(k)\right)^{T}]. \end{align} Then, since $v_{[l]}(k)$ and $n_{[l]}(k)$ are statistically independent, $E[v_{[l]}(k)n_{[l]}(k)^T]=E[n_{[l]}(k)v_{[l]}(k)^T]=0$. Applying this fact, the linearity of expectation, and the symmetry of~$A(\mathcal{G})$ gives \begin{align} \Sigma_{z} & =\gamma^{2}A(\mathcal{G})E[v_{[l]}(k)v_{[l]}(k)^{T}]A(\mathcal{G})+E[n_{[l]}(k)n_{[l]}(k)^{T}]\\ & =\gamma^{2}A(\mathcal{G})\Sigma_{v}A(\mathcal{G})+\Sigma_n. \end{align} Then $z_{[l]}(k)\sim\mathcal{N}(0,\gamma^{2}A(\mathcal{G})\Sigma_{v}A(\mathcal{G})+\Sigma_n)).$ \hfill $\blacksquare$ \subsection{Proof of Lemma~\ref{lem:jbberror}} \label{sec:error_dyn_proof} First, we expand $e_{[l]}(k+1),$ \begin{align*} e_{[l]}(k+1) & =\bar{x}_{[l]}(k+1)-\beta_{[l]}(k+1)\\ & =\bar{x}_{[l]}(k+1)-\frac{1}{N}\mathbb{1}^{T}\bar{x}_{[l]}(k+1)\mathbb{1}\\ & =\left(I_N-\frac{1}{N}\mathbb{1}\mathbb{1}^{T}\right)x_{[l]}(k+1)\\ & =\left(I_N-\frac{1}{N}\mathbb{1}\mathbb{1}^{T}\right)\left((I_N-\gamma L(\mathcal{G}))\bar{x}_{[l]}(k)+z_{[l]}(k)\right). \end{align*} Expanding further gives \begin{align} e_{[l]}(k+1) &=\left(I_N-\gamma L(\mathcal{G})-\frac{1}{N}\mathbb{1}\mathbb{1}^{T}\right)\bar{x}_{[l]}(k)\\&+\left(I_N-\frac{1}{N}\mathbb{1}\mathbb{1}^{T}\right)z_{[l]}(k), \end{align} where we have used the fact that $L(\mathcal{G})\fatone=0.$ Let ${\mathcal{M}=I_N-\gamma L(\mathcal{G})-\frac{1}{N}\fatone\fatone^T}$ and note that \[ \mathcal{M}\beta_{[l]}(k)=\left(I_N-\frac{1}{N}\mathbb{1}\mathbb{1}^{T}\right)\beta_{[l]}(k)=0. \] Then we have \begin{align*} e_{[l]}(k+1) & =\mathcal{M}\bar{x}_{[l]}(k)+\left(I_N-\frac{1}{N}\mathbb{1}\mathbb{1}^{T}\right)z_{[l]}(k)\\ & =\mathcal{M}\left(\bar{x}_{[l]}(k)-\beta_{[l]}(k)\right)+\left(I_N-\frac{1}{N}\mathbb{1}\mathbb{1}^{T}\right)z_{[l]}(k)\\ & =\mathcal{M}e_{[l]}(k)+\left(I_N-\frac{1}{N}\mathbb{1}\mathbb{1}^{T}\right)z_{[l]}(k), \end{align*} which proves~\eqref{error_dyn}. Plugging this into the definition of $\Sigma_{e_{[l]}}(k+1)$ gives \begin{align*} \Sigma_{e_{[l]}}(k+1) & =E\left[e(k+1)e(k+1)^{T}\right]\\ & =\mathcal{M}E\left[e_{[l]}(k)e_{[l]}(k)^{T}\right]\mathcal{M}\\&+\left(I_N-\frac{1}{N}\mathbb{1}\mathbb{1}^{T}\right)\Sigma_z \left(I_N-\frac{1}{N}\mathbb{1}\mathbb{1}^{T}\right)\end{align*} \begin{align*} & =\mathcal{M}\Sigma_{e_{[l]}}(k)\mathcal{M}+\left(I_N-\frac{1}{N}\mathbb{1}\mathbb{1}^{T}\right)\Sigma_{z}\left(I_N-\frac{1}{N}\mathbb{1}\mathbb{1}^{T}\right), \end{align*} which proves~\eqref{cov_dyn}. Then~\eqref{cov_expand} follows by applying~\eqref{cov_dyn} recursively.\hfill$\blacksquare$ \subsection{Proof of Lemma~\ref{lem:M_properties}} \label{sec:M_proof} We begin by analyzing the eigenvalues of $\mathcal{M}.$ Note that $\mathcal{M}\fatone=0,$ and thus $\mathcal{M}$ has eigenvalue $0$ with eigenvector $\fatone.$ Now let $(\lambda,\ v)$ be an eigenpair of $\mathcal{M}$ with $\lambda\neq0.$ We now show that $\lambda\neq0$ implies that $(\lambda,\ v)$ is also an eigenpair of $I-\gamma L(\mathcal{G}).$ Since $\mathcal{G}$ is connected and undirected, $I-\gamma L(\mathcal{G})$ is a doubly stochastic matrix, it has one eigenvalue of modulus $1$ which is $\lambda(I-\gamma L(\mathcal{G}))=1$ with an eigenvector of $\fatone$, and the rest of the eigenvalues are positive and lie strictly in the unit disk \cite[Lemma 3]{olfati2007consensus}. Using $\fatone^{T}(I_N-\gamma L(\mathcal{G}))=\fatone^{T},$ we have \[ \fatone^{T}v=\fatone^{T}(I_N-\gamma L(\mathcal{G}))v. \] Adding and subtracting $\fatone^{T}(\frac{1}{N}\fatone\fatone^T)v$ gives \begin{align*} \fatone^{T}v & =\fatone^{T}(I_N-\gamma L(\mathcal{G}))v-\fatone^{T}(\frac{1}{N}\fatone\fatone^T)v+\fatone^{T}(\frac{1}{N}\fatone\fatone^T)v\\ & =\fatone^{T}\mathcal{M}v+\fatone^{T}(\frac{1}{N}\fatone\fatone^T)v, \end{align*} then plugging in $\mathcal{M}v=\lambda v$ and $\fatone^{T}(\frac{1}{N}\fatone\fatone^T)=\fatone^{T}$ gives \begin{align*} \fatone^{T}v & =\lambda\fatone^{T}v+\fatone^{T}v\\ & =(\lambda+1)\fatone^{T}v. \end{align*} Since $\lambda\neq0,$ we must have $\fatone^{T}v=0$ for the above to be true. This implies that $v$ is orthogonal to $\fatone.$ Furthermore we have that $(\frac{1}{N}\fatone\fatone^T)v=0,$ so \begin{align*} \lambda v &=\mathcal{M}v =(I_N-\gamma L(\mathcal{G})-\frac{1}{N}\fatone\fatone^T)v\\ & =(I_N-\gamma L(\mathcal{G}))v. \end{align*} This means that any non-zero eigenvalue of $\mathcal{M}$ is also an eigenvalue of $I-\gamma L(\mathcal{G})$ and the associated eigenvector $v$ is orthogonal to $\fatone.$ Furthermore, $L(\mathcal{G})$ and $I_N-\gamma L(\mathcal{G})$ have the same eigenvectors and we have that $$\lambda_i(I_N-\gamma L(\mathcal{G}))=1-\gamma\lambda_i(L(\mathcal{G})).$$ Thus, $\lambda_i(\mathcal{M})=1-\gamma\lambda_i(L(\mathcal{G}))$ for ${i\in\{2,\dots,N\}}.$ With the sorting $\lambda_2(L(\mathcal{G}))\leq\dots\leq\lambda_N(L(\mathcal{G})),$ we have that $\lambda_{\max}(\mathcal{M})=1-\gamma \lambda_2(L(\mathcal{G}))<1.$\footnote{While $\lambda_1(L(\mathcal{G}))=0,$ we still have $\lambda_{\max}(\mathcal{M})=1-\gamma \lambda_2(L(\mathcal{G}))$ since $\lambda_1(L(\mathcal{G}))$ has a corresponding eigenvector of $\fatone$ and the non-zero eigenvalues of $\mathcal{M}$ must have eigenvectors orthogonal to $\fatone.$} This implies that all eigenvalues of $\mathcal{M}$ lie strictly in the unit disk. Now $\sigma_{\max}(\mathcal{M})=(1-\gamma\lambda_{2}(L))$ follows from the fact that \begin{align} \left[\sigma_{\max}(\mathcal{M})\right]^{2}& =\left[\left(\lambda_{\max}\left((\mathcal{M})(\mathcal{M})\right)\right)^{1/2}\right]^{2}\\ & =\lambda_{\max}\left((\mathcal{M})^{2}\right)\\ & =\lambda_{\max}\left((\mathcal{M})\right)^{2}\\ & =\left(1-\gamma\lambda_{2}(L(\mathcal{G}))\right)^{2}.\label{sing_val_expr} \end{align} \hfill$\blacksquare$ \subsection{Proof of Theorem~\ref{thm:error_bound}} \label{sec:big_proof} First, with Theorem~\ref{thm:lyap} and~\cite[Equation 151]{kwon1996bounds}~\cite{1102905} we can bound $Tr\left(\Sigma_{\infty}\right)$ as \begin{equation} Tr\left(\Sigma_{\infty}\right)\leq\frac{Tr(Q)}{1-\left(\sigma_{\max}(\mathcal{M})\right)^{2}},\label{survey_bound} \end{equation} where $\sigma_{\max}(\mathcal{M})$ denotes the maximum singular value of $\mathcal{M}.$ We start by expanding $Tr(Q)$ in~\eqref{survey_bound}, which gives \begin{align*} Tr(Q) & =Tr\left((I_N-\frac{1}{N}\fatone\fatone^{T})\Sigma_z(I_N-\frac{1}{N}\fatone\fatone^{T})\right). \end{align*} Applying cyclic permutation of the trace gives \begin{align} Tr(Q) &=Tr\left((I_N-\frac{1}{N}\fatone\fatone^{T})(I_N-\frac{1}{N}\fatone\fatone^{T})\Sigma_z\right).\label{trace_temp} \end{align} Note that $(I_N-\frac{1}{N}\fatone\fatone^{T})(I_N-\frac{1}{N}\fatone\fatone^{T})=I_N-\frac{1}{N}\fatone\fatone^{T}.$ Plugging this and $\Sigma_z=\gamma^{2}A(G)\Sigma_{v}A(G)+\Sigma_{n}$ into~\eqref{trace_temp} gives \begin{align} Tr(Q)& =Tr\left((I_N-\frac{1}{N}\fatone\fatone^{T})\left(\gamma^{2}A(G)\Sigma_{v}A(G)+\Sigma_{n}\right)\right)\\ & =\gamma^{2}Tr\left((I_N-\frac{1}{N}\fatone\fatone^{T})A(G)\Sigma_{v}A(G)\right)\\&+Tr\left((I_N-\frac{1}{N}\fatone\fatone^{T})\Sigma_{n}\right).\label{total_trace} \end{align} We now simplify $Tr\left((I_N-\frac{1}{N}\fatone\fatone^{T})\Sigma_{n}\right).$ First, with $\Sigma_{n}=\textnormal{diag}(s_{1}^{2},\dots,s_{N}^{2})$ the $i^{th}$ diagonal term of $(I_N-\frac{1}{N}\fatone\fatone^{T})\Sigma_{n}$ is given by \[ \left[(I_N-\frac{1}{N}\fatone\fatone^{T})\Sigma_{n}\right]_{ii}=(1-\frac{1}{N})s_{i}^{2}. \] Then we have that \begin{equation} Tr\left((I_N-\frac{1}{N}\fatone\fatone^{T})\Sigma_{n}\right)=\frac{N-1}{N}\sum_{i=1}^N s_{i}^{2}. \label{first_trace} \end{equation} To simplify $\gamma^{2}Tr\left((I_N-\frac{1}{N}\fatone\fatone^{T})A(G)\Sigma_{v}A(G)\right)$, cyclic permutation of the trace gives that \begin{align} &\gamma^{2}Tr\left((I_N-\frac{1}{N}\fatone\fatone^{T})A(G)\Sigma_{v}A(G)\right) \\ &=\gamma^{2}Tr\left(A(G)(I_N-\frac{1}{N}\fatone\fatone^{T})A(G)\Sigma_{v}\right). \end{align} Now, $(I_N-\frac{1}{N}\fatone\fatone^{T})A(G)$ has the form \[ (I_N-\frac{1}{N}\fatone\fatone^{T})A(G)=A(G)-\frac{1}{N}\fatone\fatone^{T}A(G), \] and note that \[ \frac{1}{N}\fatone\fatone^{T}A(G)=\frac{1}{N}\begin{bmatrix}d_{1} & \dots & d_{N}\\ & \ddots\\ d_{1} & \dots & d_{N} \end{bmatrix}. \] Thus \begin{align} &(I_N-\frac{1}{N}\fatone\fatone^{T})A(G)=\\&\begin{bmatrix}-\frac{1}{N}d_{1} & w_{12}-\frac{1}{N}d_{2} & \dots & w_{1N}-\frac{1}{N}d_{N}\\ w_{21}-\frac{1}{N}d_{1} & -\frac{1}{N}d_{2} & & w_{2N}-\frac{1}{N}d_{N}\\ & & \ddots\\ w_{N1}-\frac{1}{N}d_{1} & & \dots & -\frac{1}{N}d_{N} \end{bmatrix}. \end{align} Now, it follows that the $i^{th}$ diagonal term of $A(G)(I_N-\frac{1}{N}\fatone\fatone^{T})A(G)$ is \begin{align*} \left[A(G)(I_N-\frac{1}{N}\fatone\fatone^{T})A(G)\right]_{ii} & =\sum_{j=1}^N w_{ij}^{2}-\frac{1}{N}d_{i}\sum_{j=1}^N w_{ij}\\ & =\sum_{j=1}^N w_{ij}^{2}-\frac{d_{i}^{2}}{N}, \end{align*} and \begin{align} \gamma^{2}Tr\left(A(G)(I_N-\frac{1}{N}\fatone\fatone^{T})A(G)\Sigma_{v}\right)\\=\gamma^{2}\sum_{i=1}^N \left(\sum_{j=1}^N w_{ij}^{2}-\frac{d_{i}^{2}}{N}\right)\sigma_{i}^{2}.\label{second_trace} \end{align} Plugging~\eqref{first_trace} and~\eqref{second_trace} into~\eqref{total_trace} gives \[ Tr(Q)=\gamma^{2}\sum_{i=1}^N \left(\sum_{j=1}^N w_{ij}^{2}-\frac{d_{i}^{2}}{N}\right)\sigma_{i}^{2}+\frac{N-1}{N}\sum_{i=1}^N s_{i}^{2}, \] and plugging this into~\eqref{survey_bound} gives \begin{equation} Tr\left(\Sigma_{\infty}\right)\leq\frac{\gamma^{2}\sum_{i=1}^N \left(\sum_{j=1}^N w_{ij}^{2}-\frac{d_{i}^{2}}{N}\right)\sigma_{i}^{2}+\frac{N-1}{N}\sum_{i=1}^N s_{i}^{2}}{1-\left(\sigma_{\max}(\mathcal{M})\right)^{2}}.\label{bound_simp_trace} \end{equation} We now simplify $\left(\sigma_{\max}(\mathcal{M})\right)^{2},$ plugging in~\eqref{eq:sing_val} from Lemma~\ref{lem:M_properties} gives \begin{equation} Tr\left(\Sigma_{\infty}\right)\leq\frac{\gamma^{2}\sum_{i=1}^N\left(\sum_{j}w_{ij}^{2}-\frac{d_{i}^{2}}{N}\right)\sigma_{i}^{2}+\frac{N-1}{N}\sum_{i=1}^Ns_{i}^{2}}{1-\left(1-\gamma\lambda_{2}(L)\right)^{2}}. \end{equation} Expanding the denominator and simplifying gives \begin{align*} Tr(\Sigma_{\infty}) & =\frac{\gamma^{2}\sum_{i=1}^{N}\left(\sum_{j=1}^{N}w_{ij}^{2} -\frac{d_i^2}{N}\right)\sigma_i^2 +\frac{N-1}{N}\sum_{i=1}^N s_i^2}{1-\left(1-2\gamma\lambda_{2}(L)+\gamma^{2}\lambda_{2}(L)^{2}\right)}\\ & =\frac{\gamma\sum_{i=1}^{N}\left(\sum_{j=1}^{N}w_{ij}^{2} -\frac{d_i^2}{N}\right)\sigma_i^2 +\frac{N-1}{N}\sum_{i=1}^N s_i^2}{\lambda_{2}(L)\left(2-\gamma\lambda_{2}(L)\right)}. \end{align*} Then using $e_{ss}=\frac{d}{N}Tr(\Sigma_{\infty})$ we arrive at the expression of interest.\hfill$\blacksquare$
2205.13403v2
http://arxiv.org/abs/2205.13403v2
Mean Field Games of Controls: Propagation of Monotonicities
\documentclass[11pt]{article} \usepackage{amsmath} \usepackage{amssymb} \usepackage{latexsym} \usepackage{enumitem} \usepackage{mathrsfs} \usepackage{comment} \usepackage{color} \usepackage[colorlinks=true,urlcolor=blue, citecolor=red,linkcolor=blue,linktocpage,pdfpagelabels,bookmarksnumbered,bookmarksopen]{hyperref} \def\red#1{{\textcolor{red}{#1}}} \def\blue#1{{\textcolor{blue}{#1}}} \renewcommand{\baselinestretch}{1.3} \setlength{\textheight}{8.65in} \setlength{\textwidth}{6.35in} \setlength{\evensidemargin}{0.1in} \setlength{\oddsidemargin}{0.1in} \setlength{\headheight}{0in} \setlength{\headsep}{0in} \newtheorem{definition}{Definition} \newtheorem{theorem}{Theorem} \newtheorem{corollary}{Corollary} \newtheorem{lemma}{Lemma} \newtheorem{remark}{Remark} \newtheorem{example}{Example} \newtheorem{exercise}{Exercise} \newtheorem{assumption}{Assumption} \def\qed{ \ \vrule width.2cm height.2cm depth0cm\smallskip} \newenvironment{proof}{\noindent {\bf Proof.\/}}{$\qed$\vskip 0.1in} \newenvironment{proofof}[1]{\noindent {\bf Proof of #1.\/}}{$\qed$\vskip 0.1in} \newcommand{\la}{\langle} \newcommand{\ra}{\rangle} \newcommand{\hP}{\hat\dbP} \newcommand{\esssup}{\operatornamewithlimits{ess sup}} \newcommand{\ol}{\overline} \newcommand{\ul}{\underline} \newcommand{\eps}{\varepsilon} \newcommand{\LDP}{{{LDP }}} \newcommand{\MDP}{{{MDP }}} \newcommand{\LDPx}{{{LDP}}} \newcommand{\Las}{{\La^*}} \newcommand{\ba}{\begin{array}} \newcommand{\ea}{\end{array}} \newcommand{\be}{\begin{equation}} \newcommand{\ee}{\end{equation}} \newcommand{\bea}{\begin{eqnarray}} \newcommand{\eea}{\end{eqnarray}} \newcommand{\beaa}{\begin{eqnarray*}} \newcommand{\eeaa}{\end{eqnarray*}} \newcommand{\Remark}{\noindent{\bf Remark:}\ } \newcommand{\Remarks}{\noindent{\bf Remarks:}\ } \newcommand{\essinf}{\operatornamewithlimits{essinf}} \def\neg{\negthinspace} \def\dbB{\mathbb{B}} \def\dbC{\mathbb{C}} \def\dbD{\mathbb{D}} \def\dbE{\mathbb{E}} \def\dbF{\mathbb{F}} \def\dbG{\mathbb{G}} \def\dbH{\mathbb{H}} \def\dbI{\mathbb{I}} \def\dbJ{\mathbb{J}} \def\dbK{\mathbb{K}} \def\dbL{\mathbb{L}} \def\dbM{\mathbb{M}} \def\dbN{\mathbb{N}} \def\dbP{\mathbb{P}} \def\dbR{\mathbb{R}} \def\dbS{\mathbb{S}} \def\dbT{\mathbb{T}} \def\dbQ{\mathbb{Q}} \def\dbZ{\mathbb{Z}} \def\Dom{{\rm dom}} \def\sL{\mathscr{L}} \def\sN{\mathscr{N}} \def\a{\alpha} \def\b{\beta} \def\g{\gamma} \def\d{\delta} \def\e{\varepsilon} \def\z{\zeta} \def\k{\kappa} \def\l{\lambda} \def\m{\mu} \def\n{\nu} \def\si{\sigma} \def\t{\tau} \def\f{\varphi} \def\th{\theta} \def\o{\omega} \def\h{\widehat} \def\G{\Gamma} \def\D{\Delta} \def\Th{\Theta} \def\L{\Lambda} \def\Si{\Sigma} \def\F{\Phi} \def\O{\Omega} \def\cA{{\cal A}} \def\cB{{\cal B}} \def\cC{{\cal C}} \def\cD{{\cal D}} \def\cE{{\cal E}} \def\cF{{\cal F}} \def\cG{{\cal G}} \def\cH{{\cal H}} \def\cI{{\cal I}} \def\cJ{{\cal J}} \def\cK{{\cal K}} \def\cL{{\cal L}} \def\cM{{\cal M}} \def\cN{{\cal N}} \def\cO{{\cal O}} \def\cP{{\cal P}} \def\cQ{{\cal Q}} \def\cR{{\cal R}} \def\cS{{\cal S}} \def\cT{{\cal T}} \def\cU{{\cal U}} \def\cV{{\cal V}} \def\cW{{\cal W}} \def\cX{{\cal X}} \def\cY{{\cal Y}} \def\cZ{{\cal Z}} \def\ch{\textsc{h}} \def\no{\noindent} \def\eq{\eqalign} \def\ss{\smallskip} \def\ms{\medskip} \def\bs{\bigskip} \def\q{\quad} \def\qq{\qquad} \def\hb{\hbox} \def\pa{\partial} \def\cd{\cdot} \def\cds{\cdots} \def\lan{\langle} \def\ran{\rangle} \def\td{\nabla} \def\bD{{\bf D}} \def\bF{{\bf F}} \def\bG{{\bf G}} \def\tr{\hbox{\rm tr}} \def\qed{ \hfill \vrule width.25cm height.25cm depth0cm\smallskip} \newcommand{\dfnn}{\stackrel{\triangle}{=}} \newcommand{\basa}{\begin{assumption}} \newcommand{\easa}{\end{assumption}} \newcommand{\tbar}{\overline{t}} \newcommand{\xbar}{\overline{x}} \newcommand{\bas}{\begin{assum}} \newcommand{\eas}{\end{assum}} \newcommand{\lime}{\lim_{\epsilon \rightarrow 0}} \newcommand{\zep}{z^\epsilon} \newcommand{\bep}{b^\epsilon} \newcommand{\hbep}{\hat{b}^\epsilon} \def\limsup{\mathop{\overline{\rm lim}}} \def\liminf{\mathop{\underline{\rm lim}}} \def\ua{\mathop{\uparrow}} \def\da{\mathop{\downarrow}} \def\Ra{\mathop{\Rightarrow}} \def\La{\mathop{\Leftarrow}} \def\lan{\mathop{\langle}} \def\ran{\mathop{\rangle}} \def\embed{\mathop{\hookrightarrow}} \def\esup{\mathop{\rm ess\;sup}} \def\einf{\mathop{\rm ess\;inf}} \def\limw{\mathop{\buildrel w\over\rightharpoonup}} \def\limws{\mathop{\buildrel *\over\rightharpoonup}} \def\lims{\mathop{\buildrel s\over\rightarrow}} \def\lq{\leqno} \def\rq{\eqno} \def\pa{\partial} \def\h{\widehat} \def\wt{\widetilde} \def\vr{\vrule width 1.7 pt height 6.8 pt depth 2.5pt} \def\cd{\cdot} \def\cds{\cdots} \def\ae{\hbox{\rm -a.e.{ }}} \def\as{\hbox{\rm -a.s.{ }}} \def\sgn{\hbox{\rm sgn$\,$}} \def\meas{\hbox{\rm meas$\,$}} \def\supp{\hbox{\rm supp$\,$}} \def\co{\mathop{{\rm co}}} \def\coh{\mathop{\overline{\rm co}}} \def\cl{\overline} \def\codim{\hbox{\rm codim$\,$}} \def\Int{\hbox{\rm Int$\,$}} \def\diam{\hbox{\rm diam$\,$}} \def\deq{\mathop{\buildrel\D\over=}} \def\tr{\hbox{\rm tr$\,$}} \def\deq{\mathop{\buildrel\D\over=}} \def\Re{\hbox{\rm Re$\,$}} \def\bnm{{\,|\neg\neg|\neg\neg|\neg\neg|\,}} \def\dis{\displaystyle} \def\wt{\widetilde} \def\wh{\widehat} \def\dh{\dot{h}} \def\dF{\dot{F}} \def\bF{{\bf F}} \def\bx{{\bf x}} \def\cad{c\`{a}dl\`{a}g} \def\cag{c\`{a}gl\`{a}d~} \def\bP{{\bf P}} \def\1{{\bf 1}} \def\by{{\bf y}} \def\hSM{\widehat {\cS\!\cM}^2} \def\:{\!:\!} \def\reff{\eqref} \def \proof{{\noindent \bf Proof.\quad}} \def \dbf{{\mathbf{d}}} \def \Usup{\overline{\cU}} \def \Usub{\underline{\cU}} \font\nrm=cmr10 at 9pt \font\nit=cmti10 at 9pt \font\nsl=cmsl10 at 9pt \definecolor{alp}{rgb}{0.0, 0.5, 0.0} \def\spt{\mathrm{spt}} \def\R{\mathbb{R}} \newtheorem{thm}{Theorem}[section] \newtheorem{lem}[thm]{Lemma} \newtheorem{cor}[thm]{Corollary} \newtheorem{prop}[thm]{Proposition} \newtheorem{rem}[thm]{Remark} \newtheorem{eg}[thm]{Example} \newtheorem{defn}[thm]{Definition} \newtheorem{assum}[thm]{Assumption} \renewcommand {\theequation}{\arabic{section}.\arabic{equation}} \def\thesection{\arabic{section}} \begin{document} \title{\bf Mean Field Games of Controls: Propagation of Monotonicities} \author{Chenchen Mou\thanks{\noindent Dept. of Math., City University of Hong Kong. E-mail: \href{mailto:[email protected]}{[email protected]}. This author is supported in part by CityU Start-up Grant 7200684 and Hong Kong RGC Grant ECS 9048215.} ~ and ~ Jianfeng Zhang\thanks{\noindent Dept. of Math., University of Southern California. E-mail: \href{mailto:[email protected]}{[email protected]}. This author is supported in part by NSF grant DMS-1908665 and and DMS-2205972. } } \date{} \maketitle \begin{abstract} The theory of Mean Field Game of Controls considers a class of mean field games where the interaction is through the joint distribution of the state and control. It is well known that, for standard mean field games, certain monotonicity condition is crucial to guarantee the uniqueness of mean field equilibria and then the global wellposedness for master equations. In the literature, the monotonicity condition could be the Lasry-Lions monotonicity, the displacement monotonicity, or the anti-monotonicity conditions. In this paper, we investigate all these three types of monotonicity conditions for Mean Field Games of Controls and show their propagation along the solutions to the master equations with common noises. In particular, we extend the displacement monotonicity to semi-monotonicity, whose propagation result\footnote{We would like to thank Gangbo and M\'esz\'aros for helpful discussions on this part.} is new even for standard mean field games. This is the first step towards the global wellposedness theory for master equations of Mean Field Games of Controls. \end{abstract} \no{\bf Keywords.} Mean field game of controls, master equation, Lasry-Lions monotonicity, displacement semi-monotonicity, anti-monotonicity \ms \no{\it 2020 AMS Mathematics subject classification:} 35R15, 49N80, 60H30, 91A16, 93E20 \vfill\eject \section{Introduction} \label{sect-Introduction} \setcounter{equation}{0} The theory of Mean Field Games (MFGs) was introduced independently by Huang-Caines-Malham\'e \cite{HCM06} and Lasry-Lions \cite{LL07a}. Since then, its literature has witnessed a vast increase in various directions and the theory turns out to be extremely rich in applications, including economics \cite{ALLM,LLG}, engineering \cite{HCM071,HCM072}, finance \cite{LackerSoret, LackerZariphopoulou}, social science \cite{BTB,Gelfand} and many others. We refer to Lions \cite{Lions}, Cardaliaguet \cite{Cardaliaguet} and Bensoussan-Frehse-Yam \cite{BFY} for the introduction of the subject in the early stage and Camona-Delarue \cite{CD1, CD2} and Cardaliaguet-Porretta \cite{CP} for more recent developments. Such problems consider limit behavior of large systems where the agents interact with each other in some symmetric way, with the systemic risk as a notable application. The master equation, introduced by Lions \cite{Lions}, characterizes the value of the MFG provided there is a unique mean field equilibrium. It plays the role of the PDE in the standard literature of controls/games, and is a powerful tool in the mean field framework. The main feature of the master equation is that its state variables include a probability measure $\mu$, representing the distribution of the population, so it can be viewed as a PDE on the Wasserstein space of probability measures. In a standard MFG, the interaction is only through the law of the state. In many applications, however, the interaction could be through the joint law of the state and the control. Such a game is called a Mean Field Game of Controls (MFGCs), which was also termed as extended MFGs in the early stage. To be precise, let $B$ and $B^0$ stand for the idiosyncratic and common noises respectively. Given an $\mathbb F^{B^0}$-adapted stochastic measure flow $\{\nu_\cd\}=\{\nu_t\}_{t\in [0,T]}\subset \mathcal{P}_2(\mathbb R^{2d})$, we denote its first marginal by $\mu_t:={\pi_1}_\#\nu_t\in\mathcal{P}_2(\mathbb R^d)$ where $\pi_1(x,a)=x$, for any $(x,a)\in\mathbb R^{d}\times\mathbb R^d$, is a projection. Given above $\{\nu_\cd\}$, we would like to minimize the following cost functional over all admissible controls $\alpha:[0,T]\times\mathbb R^d\times C([0,T];\mathbb R^d) \to\mathbb R^d$: for any $\xi\in \dbL^2(\cF_0, \dbR^d)$, \begin{equation}\label{eq:MFGCint} J(\xi,\{\nu_\cd\};\alpha):=\mathbb E\Big[ G(X_T^{\xi, \{\nu_\cd\},\alpha},\mu_T)+\int_0^Tf(X_t^{\xi,\{\nu_\cd\},\alpha},\alpha(t,X_t^{\xi,\{\nu_\cd\},\alpha},B_{[0,t]}^0),\nu_t)dt\Big], \end{equation} where, for a constant $\b\ge 0$, \begin{equation}\label{eq:Xint} X_t^{\xi,\{\nu_\cd\},\alpha}=\xi+\int_{0}^tb(X_s^{\xi,\{\nu_\cd\},\alpha},\alpha(s,X_s^{\xi,\{\nu_\cd\},\alpha}, B_{[0,s]}^0),\nu_s)ds+ B_t+\beta B_t^0. \end{equation} Here the running drift and cost $b, f$ depend on the joint law of the state and control, while the terminal cost $G$ depends on the law of the state only. We call $(\a^*,\{\nu^*_\cd\})$ a Nash equilibrium if \beaa \alpha^*\in\arg\min_{\alpha}J(\xi,\{\nu_\cd^*\};\alpha),\q\mbox{and}\q \nu_t^*=\mathcal{L}_{(X_t^{\xi, \{\nu^*_\cd\},\alpha^*},\a^*_t)|\cF^{B^0}_t}. \eeaa Introduce the Hamiltonian $H$: \begin{equation}\label{eq:Hint} H(x,p,\nu):=\inf_{a\in\mathbb R^d}\big[p\cdot b(x,a, \nu)+f(x,a,\nu)\big],~\mbox{with an optimal argument}~ a^* = \phi(x, p, \nu). \end{equation} The above problem leads to the following MFGC system of forward-backward stochastic partial differential equations (FBSPDEs) with a solution $(\{\mu_\cd\}, \{\nu_\cd\}, u, v)$: \begin{equation} \label{SPDEint} \left.\begin{array}{lll} d\mu_t(x) = \big[\frac{\hat \beta^2}{2} \tr(\partial_{xx} \mu_t(x)) - div\big(\mu_t(x) \partial_p H(x,\partial_x u(t,x),\nu_t)\big)\big]dt-\beta\partial_x\mu_t(x)\cdot d B_t^0;\ms\\ d u(t, x)= v(t,x)\cdot dB_t^0 - \big[\frac{\hat\beta^2}{2} \tr(\partial_{xx} u(t,x))+\beta\tr(\partial_x v^\top(t,x)) + H(x,\partial_x u(t,x),\nu_t) \big]dt;\ms\\ \nu_t=(id, \phi(\cdot,\pa_x u(t,\cdot),\nu_t))_{\#}\mu_t;\qq \hat \b^2= 1+\b^2; \ms\\ \mu_0 = \cL_\xi,\quad u(T,x) = G(x, \mu_T). \end{array}\right. \end{equation} The wellposedness of the above MFGC system has been investigated by many authors in recent years, essentially in the case $\b=0$ and $b(x,a,\nu) = a$. For example, Gomes-Patrizi-Voskanyan \cite{GPV}, Kobeissi \cite{K1}, Graber-Mayorga \cite{GraberMayorga} investigated the system under some smallness conditions, and the global wellposedness (especially the uniqueness) was studied by Gomes-Voskanyan \cite{GV0,GV1}, Carmona-Lacker \cite{CarmonaLacker}, Carmona-Delarue \cite{CD1}, Cardaliaguet-Lehalle \cite{CardaliaguetLehalle}, Kobeissi \cite{K0}, under the crucial Lasry-Lions monotonicity condition. We also refer to Djete \cite{Djete} for some convergence analysis from $N$-player games to MFGCs and Achdou-Kobeissi \cite{AK} for some numerical studies of MFGCs, without requiring the uniqueness of the equilibria. However, to our best knowledge, the wellposedness of master equations for MFGCs remains completely open. We recall that the master equation is the PDE to characterize the value function $V$ of the MFGC, provided the equilibrium is unique, and it also serves as the decoupling function $V$ of the MFGC system \reff{SPDEint}: \beaa u(t,x) = V(t,x, \mu_t). \eeaa The monotonicity condition is used to guarantee the uniqueness of the mean field equilibria, and then the global wellposedness of MFG master equations. There are three types of monotonicity conditions in the literature for master equations of standard MFGs: the Lasry-Lions monotonicity, the displacement monotonicity and the anti-monotonicity. The Lasry-Lions monotonicity, introduced by Lions \cite{Lions} and extensively used in the literature, can be formulated as following: for any $\xi,\eta\in \dbL^2(\cF_T^1; \dbR^d)$ and their independent copies $\tilde \xi, \tilde \eta$ in the probability space $(\tilde \Omega, \tilde \dbF,\tilde \dbP)$ (see their definitions in Section 2), \begin{equation}\label{eq:LLint} \tilde{\mathbb E}\Big[\langle\pa_{x\mu}G(\xi,\mathcal{L}_{\xi},\tilde\xi)\tilde{\eta},\eta\rangle\Big]\geq 0. \end{equation} The displacement monotonicity, originating in Ahuja \cite{Ahuja}, is \begin{equation}\label{eq:disint} \tilde{\mathbb E}\Big[\langle\pa_{x\mu}G(\xi,\mathcal{L}_{\xi},\tilde\xi)\tilde{\eta},\eta\rangle + \langle\pa_{xx}G(\xi,\mathcal{L}_{\xi})\eta,\eta\rangle\Big]\geq 0, \end{equation} which can be further weaken to the displacement semi-monotonicity: for some constant $\l\ge 0$, \begin{equation}\label{eq:dissemiint} \tilde{\mathbb E}\Big[\langle\pa_{x\mu}G(\xi,\mathcal{L}_{\xi},\tilde\xi)\tilde{\eta},\eta\rangle + \langle\pa_{xx}G(\xi,\mathcal{L}_{\xi})\eta,\eta\rangle + \l |\eta|^2\Big]\geq 0. \end{equation} See, e.g., Bensoussan-Graber-Yam \cite{BGY2} and Gangbo-Meszaros-Mou-Zhang \cite{GMMZ}. It is worth noting that if $G$ is Lasry-Lions monotone and $\pa_{xx}G$ is bounded, then $G$ is displacement semi-monotone. The anti-monotonicity, recently introduced by the authors \cite{MZ3}, takes the following form \begin{equation} \label{eq:antiint} \left.\begin{array}{lll} \qq\,\,\,\,\tilde{\mathbb E}\Big[\l_0\langle\pa_{xx}G(\xi,\mathcal{L}_{\xi})\eta,\eta\rangle+\l_1\langle\pa_{x\mu}G(\xi,\mathcal{L}_{\xi},\tilde \xi)\tilde\eta,\eta\rangle\\ +|\pa_{xx}G(\xi,\mathcal{L}_{\xi})\eta|^2+\l_2\Big|\tilde{\mathbb E}[\pa_{x\mu}G(\xi,\mathcal{L}_{\xi},\tilde\xi)\tilde\eta]\Big|^2-\l_3|\eta|^2\Big]\leq 0, \end{array}\right. \end{equation} for some appropriate constants $\l_0>0$, $\l_1\in\mathbb R$, $\l_2>0$, $\l_3\geq 0$. In \cite{GMMZ, MZ3} we made a simple but crucial observation: the propagation of a monotonicity is crucial for the global wellposedness of the (standard) MFG master equations. That is, provided the terminal condition $G$ satisfies one of the above three types of monotonicity conditions, if one can show a priori that any classical solution $V$ of the master equation satisfies the same type of monotonicity for all time $t$, then one can establish the global wellposedness of the master equation, which in turn will imply the uniqueness of mean field equilibria and the convergence from the $N$-player game to the MFG. Our goal is to extend all these results to MFGCs, but in this paper we focus only on the propagation of these three types of monotonicities. That is, we shall follow the approach in \cite{GMMZ, MZ3} to find sufficient conditions on the Hamiltonian $H$ (or alternatively on $b$ and $f$) so that the monotonicity of $G$ can be propagated along $V(t, \cd,\cd)$, provided the master equation has a classical solution $V$. We shall leave the global wellposedness of the master equations and the convergence of the $N$-player games to an accompanying paper. The Lasry-Lions monotonicity condition has already been used to study the MFGC system \reff{SPDEint}, as mentioned earlier. It is observed in \cite{GMMZ} that, for standard MFGs with non-separable $f$, the Lasry-Lions monotonicity can hardly be propagated. The extra dependence on the law of the control actually helps for propagating the Lasry-Lions monotonicity, in particular, the separability of $f$ is not required anymore. The displacement semi-monotonicity condition has been introduced in \cite{GMMZ}, however, only the propagation of displacement monotonicity is established there. In this paper, we manage to propagate the displacement semi-monotonicity for MFGCs, so it improves the result of \cite{GMMZ} even for standard MFGs. In particular, by combining with the arguments in \cite{GMMZ}, we obtain easily the global wellposedness result of standard MFG master equations under displacement semi-monotonicity conditions. We remark again that the displacement semi-monotonicity is weaker than both displacement monotonicity and Lasry-Lions monotonicity (provided $\pa_{xx} V$ is bounded, which is typically the case), so in this sense our result provides a unified framework for the wellposedness theory of master equations under the Lasry-Lions monotonicity and displacement monotonicity conditions. Another feature of our results is that we allow for a general form of the drift $b$. In the literature, one typically sets $b(\cd, a, \cd) = a$ (or slightly more general forms), and then focuses on appropriate monotonicity conditions of $f$ to ensure the uniqueness of the mean field equilibria and/or the wellposedness of the master equations. However, for a general $b$, especially when $b$ depends on the law (of the state and/or the control), it does not make sense to propose monotonicity condition on $f$ alone. A conceivable notion of monotonicity on the general $b$ has never been studied, to our best knowledge. Our approach works on the Hamiltonian $H$ directly, which has the mixed impacts of $b$ and $f$ together. Again, our results are new in this aspect even for standard MFGs. The rest of the paper is organized as follows. In Section \ref{sect-MFGC} we introduce MFGCs. In Section \ref{sect-master} we introduce the master equation and the notions of monotonicities. In Sections \ref{sect-LL}, \ref{sect-displacement}, and \ref{sect-anti} we propagate the three types of monotonicities, one in each section. In particular, in Subsection \ref{sect-wellposedness} we also establish the global wellposedness of standard MFG master equations under displacement semi-monotonicity conditions. Finally, some technical proofs are postponed to Appendix. \section{Mean Field Games of Controls} \label{sect-MFGC} \setcounter{equation}{0} We consider the setting in \cite{GMMZ}. Let $d$ be a dimension and $[0, T]$ a fixed finite time horizon. Let $(\O_0, \dbF^0, \dbP_0)$ and $(\O_1, \dbF^1, \dbP_1)$ be two filtered probability spaces, on which are defined $d$-dimensional Brownian motions $B^0$ and $B$, respectively. For $\dbF^i =\{\cF^i_t\}_{0\le t\le T}$, $i=0,1$, we assume $\cF^0_t=\cF^{B^0}_t$, $\cF^1_t =\cF^1_0 \vee \cF^{B}_t$, and $\dbP_1$ has no atom in $\cF^1_0$ so it can support any measure on $\dbR^d$ with finite second order moment. Consider the product spaces \begin{equation}\label{product} \O := \O_0 \times \O_1,\q \dbF = \{\cF_t\}_{0\le t\le T} := \{\cF^0_t \otimes \cF^1_t\}_{0\le t\le T},\q \dbP := \dbP_0\otimes \dbP_1,\q \dbE:= \dbE^\dbP. \end{equation} In particular, $\cF_t := \si(A_0\times A_1: A_0\in \cF^0_t, A_1\in \cF^1_t\}$ and $\dbP(A_0\times A_1) = \dbP_0(A_0) \dbP_1(A_1)$. We shall automatically extend $B^0, B, \dbF^0, \dbF^1$ to the product space in the obvious sense, but using the same notation. Note that $B^0$ and $B^1$ are independent $\dbP$-Brownian motions and are independent of $\cF_0$. It is convenient to introduce another filtered probability space $(\tilde \O_1, \tilde \dbF^1, \tilde B, \tilde \dbP_1)$ in the same manner as $(\O_1, \dbF^1, B, \dbP_1)$, and consider the larger filtered probability space given by \begin{equation}\label{product2} \tilde \O := \O\times \tilde\O_1 ,\q \tilde\dbF = \{\tilde \cF_t\}_{0\le t\le T} := \{\cF_t \otimes \tilde \cF^1_t\}_{0\le t\le T},\q \tilde \dbP := \dbP\otimes \tilde\dbP_1,\q \tilde \dbE:= \dbE^{\tilde \dbP}. \end{equation} Given an $\cF_t$-measurable random variable $\xi = \xi(\o^0, \o^1)$, we say $\tilde \xi = \tilde \xi(\o^0, \tilde \o^1)$ is a conditionally independent copy of $\xi$ if, for each $\o^0$, the $\dbP_1$-distribution of $\xi(\o^0, \cd)$ is equal to the $\tilde\dbP_1$-distribution of $\tilde\xi(\o^0, \cd)$. That is, conditional on $\cF^0_t$, by extending to $\tilde \O$ the random variables $\xi$ and $\tilde \xi$ are conditionally independent and have the same conditional distribution under $\tilde \dbP$. Note that, for any appropriate deterministic function $\f$, \bea \label{conditional expectation} \left.\ba{c} \dis \tilde \dbE_{\cF^0_t} \big[ \f(\xi, \tilde \xi)\big] (\o^0) = \dbE^{\dbP_1\otimes \tilde\dbP_1}\Big[\f\big(\xi(\o^0, \cdot), \tilde\xi(\o^0, \tilde\cdot)\big)\Big],\q \dbP_0-\mbox{a.e.}~\o^0;\\ \dis \tilde \dbE_{\cF_t} \big[ \f(\xi, \tilde \xi)\big] (\o^0,\o^1) = \dbE^{\tilde\dbP_1}\Big[\f\big(\xi(\o^0, \o^1), \tilde\xi(\o^0, \tilde \cdot)\big)\Big],\q \dbP-\mbox{a.e.}~(\o^0, \o^1). \ea\right. \eea Here $ \dbE^{\tilde\dbP_1}$ is the expectation on $\tilde \o^1$, and $\dbE^{\dbP_1\times \tilde\dbP_1}$ is on $(\o^1, \tilde \o^1)$. Throughout the paper, we will use the probability space $(\O, \dbF, \dbP)$. However, when conditionally independent copies of random variables or processes are needed, we will tacitly use the extension to the larger space $(\tilde \O, \tilde \dbF, \tilde \dbP)$ without mentioning. When we need two conditionally independent copies, we introduce further $(\bar \O_1, \bar \dbF^1, \bar B, \bar \dbP_1)$ and the product space $(\bar\O, \bar \dbF, \bar\dbP, \bar\dbE)$ as in \reff{product2}, and set the joint product space \begin{align}\label{product3} \bar{\tilde \O} := \O\times \tilde\O_1\times \bar\O_1 ,~ \bar{\tilde \dbF} = \{\bar{\tilde \cF}_t\}_{0\le t\le T} := \{\cF_t\otimes\tilde\cF^1_t \otimes \bar \cF^1_t\}_{0\le t\le T}, ~\bar {\tilde \dbP} := \dbP\otimes \tilde\dbP_1\otimes \bar\dbP_1,~ \bar{\tilde \dbE}:= \dbE^{\bar{\tilde \dbP}}. \end{align} For any dimension $k$ and any constant $p\ge 1$, let $\cP(\dbR^k)$ denote the set of probability measures on $\dbR^k$, and $\cP_p(\dbR^k)$ the subset of $\mu\in \cP(\dbR^k)$ with finite $p$-th moment, equipped with the $p$-Wasserstein distance $W_p$. Moreover, for any sub-$\sigma$-algebra $\mathcal{G}\subset \mathcal{F}_T$, $\mathbb L^p(\mathcal{G})$ denotes the set of $\mathbb R^k$-valued, $\mathcal{G}$-measurable, and $p$-integrable random variables; and for any $\mu\in \cP_p(\dbR^k)$, $\mathbb L^p(\mathcal{G};\mu)$ denotes the set of $\xi\in\mathbb L^p(\mathcal{G})$ with law $\mathcal{L}_{\xi}=\mu$. Similarly, for any sub-filtration $\dbG \subset \dbF$, $\dbL(\dbG; \dbR^k)$ denotes the set of $\dbG$-progressively measurable $\dbR^k$-valued processes. For a continuous function $U:\cP_2(\dbR^k)\to\dbR$, we recall its linear functional derivative ${\d U\over \d\mu}: \cP_2(\dbR^k)\times\dbR^k\to\dbR$ and Lions derivative $\pa_\mu U: \cP_2(\dbR^k)\times\dbR^k\to\dbR^k$. We say $U\in\cC^1(\cP_2(\dbR^k))$ if $\pa_\mu U$ exists and is continuous on $\cP_2(\dbR^k)\times\dbR^k$, and we note that $\pa_\mu U(\mu, \tilde x) = \pa_{\tilde x} {\d U\over \d\mu}(\mu, \tilde x)$. Similarly we can define the second order derivative $\pa_{\mu\mu} U(\mu,\tilde x,\bar x)$, and we say $U\in \cC^2(\cP_2(\dbR^k))$ if $\pa_\mu U$, $\pa_{\tilde x\mu}U$ and $\pa_{\mu\mu}U$ exist and are continuous. We refer to \cite[Chapter 5]{CD1} or \cite{GT} for more details. Our mean field game of controls (MFGC) will depend on the following data: \beaa b: \mathbb R^{2d}\times\mathcal{P}_{2}(\mathbb R^{2d})\to\mathbb \dbR^d;\q f: \mathbb R^{2d}\times\mathcal{P}_2(\mathbb R^{2d})\to\mathbb \dbR;\q G: \mathbb R^{d}\times\mathcal{P}_2(\mathbb R^{d})\to\mathbb R;~\mbox{and}~ \b \in [0, \infty). \eeaa We shall always assume appropriate technical conditions so that all the equations in this section are wellposed and all the involved random variables are integrable. Given $t_0\in [0,T]$, denote $B_t^{t_0}:=B_t-B_{t_0}$, $B_t^{0,t_0}:=B_t^0-B_{t_0}^0$, $t\in [t_0,T]$. Let $\cA_{t_0}$ denote the set of admissible controls $\a: [t_0,T]\times \mathbb R^{d}\times C([t_0,T];\mathbb R^d)\to\mathbb R^d$ which are progressively measurable and adapted in the path variable and square integrable; and $\dbL^2(\dbF^{B^{0, t_0}}; \cP_2(\dbR^{2d}))$ the set of $\dbF^{B^{0, t_0}}$-progressively measurable stochastic measure flows $\{\nu_\cdot\}=\{\nu_t\}_{t\in [t_0,T]}\subset \cP_2(\mathbb R^{2d})$. Here for notational simplicity we assume the controls also take values in $\dbR^d$, and $b$ and $f$ do not depend on time, but one can remove these constraints without any difficulty. Given $t_0\in [0,T]$, $x\in\mathbb R^d$, $\alpha\in\mathcal{A}_{t_0}$, and $\{\nu_\cd\}\in \dbL^2(\dbF^{B^{0, t_0}}; \cP_2(\dbR^{2d}))$, the state of the agent satisfies the following controlled SDE on $[t_0,T]$: \bea \label{Xx} \left.\ba{c} \dis X_t^{ \{\nu_\cdot\},\a}=x+\int_{t_0}^{t}b(X_s^{ \{\nu_\cdot\},\a},\alpha_s, \nu_s)ds+B_t^{t_0}+\beta B_t^{0,t_0};\\ \dis \mbox{where}\q X^{ \{\nu_\cdot\},\a} = X^{t_0, \{\nu_{\cdot}\}; x, \alpha},\q \a_s := \alpha (s,X_s^{ \{\nu_\cdot\},\a}, B^{0,t_0}_{[t_0,s]}). \ea\right. \eea Consider the expected cost for the MFGC: denoting by ${\pi_1}_{\#} \nu_{t} $ the first component of $\nu_{t}$, \bea \label{Ja} J(t_0,x; \{\nu_\cdot\},\a):= \inf_{\alpha\in\mathcal{A}_{t_0}}\mathbb E\Big[G(X_{T}^{ \{\nu_\cdot\},\a},{\pi_1}_{\#} \nu_T) +\int_{t_0}^Tf(X_t^{ \{\nu_\cdot\},\a}, \alpha_t, \nu_t)dt\Big]. \eea \begin{defn} For any $(t,\mu)\in [0,T]\times\mathcal{P}_2(\mathbb R^d)$, we say $(\alpha^*,\{\nu_\cdot^*\})\in \cA_t \times \dbL^2(\dbF^{B^{0, t_0}}; \cP_2(\dbR^{2d}))$ is a mean field equilibrium (MFE) at $(t,\mu)$ if \bea\label{MFE} \left.\ba{c} \dis J(t,x;\{\nu_\cd^*\},\alpha^*) = \inf_{\alpha\in\mathcal{A}_{t}}J(t,x;\{\nu_\cd^*\},\alpha),\q \text{for $\mu$-a.e. $x\in\mathbb R^d$};\\ \dis \pi_{1\#}\nu_t^* =\mu,\q \nu_s^*:=\mathcal{L}_{(X_s^*,\alpha^* (s,X_s^*, B^{0,t_0}_{[t_0,s]}))|\cF^0_s}, \q\mbox{where}\\ \dis X_t^*=\xi+\int_{t_0}^tb(X_s^*,\alpha^* (s,X_s^{*}, B^{0,t_0}_{[t_0,s]}),\nu_s^*)ds+B_t^{t_0}+\beta B_t^{0,t_0},\q \xi\in \dbL^2(\cF^1_t, \mu). \ea\right. \eea \end{defn} When there is a unique MFE for each $(t,\mu)\in [0,T]\times\mathcal{P}_2(\mathbb R^d)$, denoted as $(\alpha^*(t,\mu; \cd),\{\nu_\cd^*(t,\mu)\})$, then the game problem leads to the following value function for the agent: \begin{equation}\label{value} V(t,x,\mu):=J(t,x;\{\nu_\cd^*(t,\mu)\}, \alpha^*(t,\mu; \cd))\quad\text{for any $x\in\mathbb R^d$}. \end{equation} We remark that, by \reff{MFE} the above $V$ is well defined only for $\mu$-a.e. $x$. However, for each $t$, its continuous extension to $\dbR^d\times \cP_2(\dbR^d)$ is unique, and we shall always consider this continuous extension. Our goal is to study the master equation for the value function $V(t,x,\mu)$. For this purpose, we introduce the Hamiltonian: for $(x, p, \nu)\in \dbR^d\times \dbR^d\times \cP_2(\dbR^{2d})$, \bea \label{H} H(x,p,\nu):= \inf_{a\in \dbR^d} h(x,p, \nu, a),\q h(x,p, \nu, a):= p\cdot b(x,a,\nu)+f(x,a,\nu). \eea Note that $H$ depends on $\nu$, while $V$ depends only on $\mu = {\pi_1}_{\#} \nu$. We also remark that the Hamiltonian in \cite{GMMZ, MZ3} is $-H$. To introduce the master equation, which we will do in the next section, we need the following fixed point. \begin{assum} \label{assum-fix} (i) The Hamiltonian $H$ has a unique minimizer $a^* = \phi(x, p, \nu)$, namely \bea \label{I} H(x,p,\nu) = h(x,p, \nu, \phi(x, p, \nu)). \eea (ii) For any $\xi \in \dbL^2(\cF)$ and $\eta \in \dbL^2(\si(\xi))$, the following mapping on $\cP_2(\dbR^{2d})$: \bea \label{cI} \cI^{\xi,\eta}(\nu) := \cL_{(\xi, \phi(\xi, \eta, \nu))} \eea has a unique fixed point $\nu^*$: $\cI^{\xi,\eta}(\nu^*)=\nu^*$, and we shall denote it as $\Phi(\cL_{(\xi, \eta)})$. \end{assum} We refer to \cite[Lemma 4.60]{CD1} for some sufficient conditions on the existence of $\Phi$. By \reff{I} one can easily check that \bea \label{Hp} b(x,\phi(x,p,\nu),\nu)=\pa_pH(x,p,\nu), \q f(x,\phi(x,p,\nu),\nu)= H(x, p, \nu) - p\cd \pa_pH(x,p,\nu). \eea As in the standard MFG theory, provided $V$ is smooth, $p$ corresponds to $\pa_x V(t, x, \mu)$. Consequently, later on the above fixed point will be applied as follows: given $(t, \mu)$ and $\xi \in \dbL^2(\cF^1_t, \mu)$, \bea \label{optimal} \eta = \pa_x V(t, \xi, \mu),\q \nu^* := \Phi(\cL_{\xi, \pa_x V(t, \xi, \mu)}),\q \a^* := \phi(\xi, \pa_x V(t, \xi, \mu), \Phi(\cL_{\xi, \pa_x V(t, \xi, \mu)})). \eea Pluging these into \reff{MFE} we obtain the following McKean-Vlasov SDE: recalling \reff{Hp}, \bea \label{X*} \left.\ba{c} \dis X_t^{*}=\xi+\int_{t_0}^{t} \pa_p H\big(X_s^{*}, \pa_x V(s, X^*_s, \mu^*_s\big), \nu^*_s)ds+B_t^{t_0}+\beta B_t^{0,t_0},\\ \dis\mbox{where}\q \mu^*_s := \cL_{X^*_s|\cF^0_s},\q \nu^*_s := \Phi\big(\cL_{(X^*_s, \pa_x V(s, X^*_s, \mu^*_s))|\cF^0_s}\big). \ea\right. \eea That is, if $V$ is smooth, then under Assumption \ref{assum-fix} we may obtain the unique MFE $\a^*$ through \reff{optimal} and \reff{X*} (by abusing the notation $\a^*$): given $(t_0, \mu)$ and $\xi\in \dbL^2(\cF^1_{t_0}, \mu)$, \bea \label{a*} \a^*(s, x, B^{0, t_0}_{[t_0, s]}) = \phi(x, \pa_x V(s, x, \mu^*_s), \nu^*_s). \eea Here we used the fact that $\mu^*_s, \nu^*_s$ are actually adapted to the shifted filtration generated by $B^{0, t_0}$. Assumption \ref{assum-fix} (i) is more or less standard in the literature, for example when $h$ in \reff{H} is convex in $a$. In particular, when $b(x,a,\nu) = a$, which is often the case in the literature, we have $\phi = \pa_p H$. We next provide two examples for Assumption \ref{assum-fix} (ii). \begin{eg} \label{eg-separable} Assume $b, f$ are separable in the following sense: \bea \label{separable} b(x, a, \nu) = b_0(x, a, {\pi_1}_{\#} \nu) + b_1(x, \nu),\q f(x, a, \nu) = f_0(x, a, {\pi_1}_{\#} \nu) + f_1(x, \nu). \eea In this case \reff{H} becomes: \beaa &\dis H(x,p,\nu) = H_0(x, p, {\pi_1}_{\#} \nu) + H_1(x, p, \nu),\q \mbox{where}\\ &\dis H_0(x,p,\mu):=\inf_{a\in \dbR^d} \big[p\cdot b_0(x,a,\mu)+f_0(x,a,\mu)\big],\q H_1(x,p, \nu):= p\cd b_1(x, \nu) + f_1(x, \nu). \eeaa Assume Assumption \ref{assum-fix} (i) holds, and clearly in this case we have $a^* = \phi(x, p, \mu)$, with the dependence on $\nu$ only through its first component $\mu={\pi_1}_{\#} \nu$. Then $\cI^{\xi,\eta}(\nu) := \cL_{(\xi, \phi(\xi, \eta, {\pi_1}_{\#} \nu))}$. Notice further that the fixed point requires ${\pi_1}_{\#} \nu^* = \cL_\xi$. Then it is obvious that Assumption \ref{assum-fix} (ii) holds with $\Phi(\cL_{(\xi, \eta)}) = \cL_{(\xi, \phi(\xi, \eta, \cL_\xi))}$. \end{eg} We note that the above $f$ satisfies the conditions in \cite[Lemma 4.60]{CD1}, while the drift $b$ is more general. The next example, however, is out of the scope of \cite[Lemma 4.60]{CD1}. \begin{eg} \label{eg-nonseparable} Assume $d=1$ and, by writing $\dbE_\nu[\a]$ to indicate expectation under law $\cL_{\a} = {\pi_2}_{\#} \nu$, \bea \label{nonseparable} b(x, a, \nu) = -b_0(x, {\pi_1}_{\#} \nu) a + b_1(x, \nu), \q f(x, a, \nu) = {|a|^2\over 2} - a f_0(x, {\pi_1}_{\#} \nu, \dbE_{\nu}[\a]) + f_1(x, \nu). \eea One can easily see that $\phi(x, p, \nu) = f_0(x, {\pi_1}_{\#} \nu, \dbE_\nu[\a])+p b_0(x, {\pi_1}_{\#} \nu)$ and thus \beaa \cI^{\xi,\eta}(\nu) := \cL_{(\xi, ~ f_0(\xi, {\pi_1}_{\#} \nu, \dbE_\nu[\a])+ b_0(\xi, {\pi_1}_{\#} \nu)\eta)}. \eeaa Then $\cI^{\xi,\eta}$ has a fixed point if and only if the following mapping has a fixed point: \bea \label{psi} m\in \dbR \to \psi^{\xi, \eta}(m):= \dbE\big[ f_0(\xi, \cL_\xi, m)+ b_0(\xi, \cL_\xi)\eta\Big]. \eea Assume $\pa_m f_0 \le 1-\e$ for some $\e>0$, in particular if $f_0$ is decreasing in $m$, then $\pa_m \psi^{\xi, \eta}\le 1-\e$ and thus $\psi^{\xi, \eta}$ has a unique fixed point $m^* = \f(\cL_{(\xi, \eta)})$. Therefore, $\cI^{\xi,\eta}$ has a unique fixed point: \bea \label{fixpsi} \Phi(\cL_{(\xi, \eta)}) = \cL_{(\xi, ~ f_0(\xi, \cL_\xi, \f(\cL_{(\xi, \eta)}))+ b_0(\xi, \cL_\xi)\eta)}. \eea \end{eg} \subsection{Derivatives of measure valued functions} Note that $\Phi$ is a mapping from $\cP_2(\dbR^{2d})$ to $\cP_2(\dbR^{2d})$. Consider an arbitrary dimension $k$. In this subsection we introduce the linear functional derivative of functions mapping from $\cP_2(\dbR^k)$ to $\cP_2(\dbR^k)$, which is interesting in its own right. We refer to \cite[Eq. (5.52)]{CD1} for the linear functional derivative of functions mapping from $\cP_2(\dbR^k)$ to $\dbR$. Let $\mathcal{S}(\mathbb R^k)$ denote the Schwartz space, namely the set of smooth functions $u\in \cC^\infty(\dbR^k; \dbR)$ such that $u$ and all its derivatives decrease rapidly when $|x|\to \infty$, and $\cS'(\dbR^k)$ its dual space, namely the space of tempered distributions. \begin{defn} \label{defn-derivative} Consider a mapping $\Phi: \cP_2(\dbR^k) \to \cP_2(\dbR^k)$. We say ${\d \Phi\over \d\rho}: \cP_2(\dbR^k) \times \dbR^k \to \mathcal{S}'(\mathbb R^k)$ is the linear functional derivative of $\Phi$ if, for any $\psi \in \mathcal{S}(\mathbb R^k)$, \bea \label{derivative} \frac{\delta \Psi}{\delta\rho} (\rho, x) = \Big\la {\d \Phi\over \d\rho}(\rho, x), ~ \psi\Big\ra,\q\mbox{where}\q \Psi(\rho) := \int_{\dbR^k} \psi(x) \Phi(\rho; dx). \eea \end{defn} We note that $ {\d \Phi\over \d\rho}(\rho, x)$ is well defined for $\rho$-a.e. $x$. For the applications later, we will require ${\d \Phi\over \d\rho}$ to have stronger properties. For this purpose, let $\mathcal{SM}_2(\mathbb R^k)$ denote the set of the square integrable signed measures of bounded variation on $\mathbb R^k$. That is, $m$ has the unique decomposition $m=m_1-m_2$ and $\int_{\mathbb R^k}(1+|y|^2)|m|(dy)<\infty$, where $m_1,m_2$ are mutually singular non-negative measures on $\mathbb R^k$, and $|m|(dy):=m_1(dy)+m_2(dy)$, see e.g. \cite{Bogachev} for details. Moreover, for any $n\ge 0$, let $\mathcal{DSM}^n_2(\dbR^k)\subset \cS'(\dbR^k)$ denote the linear span of generalized derivatives of signed measures in $\mathcal{SM}_2(\mathbb R^k)$ up to order $n$, namely the span of terms taking the form $\pa_{y_1}^{j_1}\cds \pa_{y_k}^{j_k} m$, where $m\in \mathcal{SM}_2(\mathbb R^k)$ and $\sum_{i=1}^k j_i \le n$. On the other hand, let $\cC^n_2(\dbR^k)$ denote the set of functions $\psi: \dbR^k \to \dbR$ such that $\psi$ has continuous derivatives up to order $n$ and \bea \label{n-norm} \|\psi\|_n := \sup_{y\in \dbR^k} \sum_{j_1+\cds+j_k\le n} {|\pa_{y_1}^{j_1}\cds \pa_{y_k}^{j_k} \psi(y)|\over 1+|y|^2} <\infty. \eea Then clearly $\mathcal{DSM}^n_2(\dbR^k)$ is in the dual space of $\cC^n_2(\dbR^k)$ in the sense that \bea \label{integration-by-parts} \Big\la \pa_{x_1}^{j_1}\cds \pa_{x_k}^{j_k} m,~ \psi \Big\ra = (-1)^{\sum_{i=1}^k j_i} \int_{\dbR^k} \pa_{y_1}^{j_1}\cds \pa_{y_k}^{j_k} \psi(y) m(dy). \eea Now if ${\d \Phi\over \d\rho}(\rho, x) \in \mathcal{DSM}^n_2(\dbR^k)$, then we may extend \reff{derivative} to all $\psi \in \cC^n_2(\dbR^k)$, and we shall write \beaa \int_{\dbR^k} \psi(y) {\d \Phi\over \d\rho}(\rho, x; dy):= \big\la {\d \Phi\over \d\rho}(\rho, x), ~\psi\big\ra,\q \forall \psi\in \cC^n_2(\dbR^k), \eeaa where the right side is in the sense of \reff{integration-by-parts}. We next show two examples. \begin{eg} \label{eg-trivial} Let $\Phi(\rho)=\rho$ for any $\rho\in\cP_2(\mathbb R^k)$. Then $\frac{\delta\Phi}{\delta\rho}(\rho,x; dy)=\delta_x(dy)$, namely $ \frac{\delta\Phi}{\delta\rho}(\rho,x) \in {\cal SM}_2(\dbR^k) = {\cal DSM}^0_2(\dbR^k)$ for all $\rho\in\mathcal{P}_2(\mathbb R^d)$ and $x\in\mathbb R^k$. \end{eg} \proof For any $\psi\in\mathcal{S}(\mathbb R^k)$, by \eqref{derivative} we have $\Psi(\rho) = \int_{\dbR^d} \psi(x) \rho(dx)$. Then $ \frac{\delta \Psi}{\delta\rho}(\rho,x)=\psi(x)=\int_{\mathbb R^k}\psi(y)\delta_x(dy), $ and thus $\frac{\delta\Phi}{\delta\rho}(\rho,x)=\delta_x\in\mathcal{SM}_2(\dbR^k)$. \qed \begin{eg} \label{eg-derivative} Set $\Phi(\cL_{(\xi, \eta)}) := \cL_{(\xi, \eta + c\dbE[\eta])}$, $\forall \xi, \eta\in \dbL^2(\cF; \dbR^d)$, for some constant $c\in \dbR$. Then $ \frac{\delta\Phi}{\delta\rho}(\rho,x, p) \in {\cal DSM}^1_2(\dbR^{2d})$. More precisely, letting $\dbE_\rho$ denote expectation under law $\rho=\cL_{(\xi, \eta)}$, \bea \label{derivative-example} \dis {\d \Phi\over \d\rho}(\rho, x, p; d\tilde x, d\tilde p) = \d_x (d\tilde x) \d_{p+ c\dbE_\rho[\eta]}(d\tilde p) - c ~\pa_{\tilde p}\Phi(\rho)(d\tilde x, d\tilde p)\cdot p. \eea \end{eg} \proof For any $\psi \in \cS(\dbR^{2d})$, we have $\Psi(\rho)= \dbE_{\rho}\big[\psi(\xi, \eta + c\dbE_{\rho}[\eta])\big]$. Then \beaa \frac{\delta\Psi}{\delta\rho}(\rho,x,p) &=& \psi\big(x,p+c\dbE_{\rho}[\eta]\big)+ c\dbE_{\rho}\big[\pa_p\psi(\xi, \eta + c\dbE_{\rho}[\eta])\big]\cdot p\\ &=& \psi\big(x,p+c\dbE_{\rho}[\eta]\big)+ cp \cdot \int_{\dbR^k} \pa_{\tilde p} \psi(\tilde x, \tilde p) \Phi(\rho)(d\tilde x, d \tilde p). \eeaa Compare this with \reff{derivative}, we obtain \reff{derivative-example} immediately. \qed Our main result of this part is the following chain rule. We shall use the notation $\nu = \Phi(\rho)$. \begin{prop}\label{prop-chain} Let $\Phi: \cP_2(\dbR^k)\to \cP_2(\dbR^k)$, $U: \cP_2(\dbR^k)\to \dbR$. Assume (i) $\Phi$ has a linear functional derivative $\frac{\delta\Phi}{\delta\rho}(\rho,x)\in\mathcal{DSM}^n_2(\dbR^k)$ for all $(\rho,x)\in\cP_2(\dbR^k)\times\dbR^k$; $\frac{\delta\Phi}{\delta\rho}(\rho,x)$ is continuous in $(\rho, x)$ under the weak topology, that is, for any $\psi \in \cC^n_2(\dbR^k)$, the mapping $(\rho, x)\to \big\la\frac{\delta\Phi}{\delta\rho}(\rho,x), \psi\big\ra$ is continuous (under $\cW_2$ for $\rho$); and, for any compact set $K \subset \cP_2(\dbR^k)$, there exists a constant $C_K>0$ such that \bea \label{Phiquadratic} \sup_{\rho\in K}\Big|\big\la \frac{\delta\Phi}{\delta\rho}(\rho, x),~ \psi\big\ra\Big| \le C_K \|\psi\|_n [1+|x|^2],\q \forall \psi \in \cC^n_2(\dbR^k). \eea (ii) $U$ has a linear functional derivative $\frac{\delta U}{\delta \nu}$; for each $\nu \in \cP_2(\dbR^k)$, $\frac{\delta U}{\delta\nu}(\nu, \cd) \in \cC^n_2(\dbR^k)$; and, by equipping $\cC^n_2(\dbR^k)$ with the norm $\|\cd\|_n$ in \reff{n-norm}, the mapping $\nu \to \frac{\delta U}{\delta\nu}(\nu, \cd)$ is continuous. Then the composite function $\wh U:= U\circ \Phi:\cP_2(\dbR^k)\to\mathbb R$ has a linear functional derivative: \bea \label{linearchain} \frac{\delta \wh U}{\delta\rho} (\rho, x) = \int_{\dbR^k} \frac{\delta U}{\delta \nu} (\Phi(\rho), y) {\d \Phi\over \d\rho}(\rho, x; dy). \eea \end{prop} \proof Fix $\rho, \rho' \in \cP_2(\dbR^k)$. For $0< \e<1$, denote $\rho_\e:= \rho + \e(\rho'-\rho)$. By the definition of ${\d U\over \d\nu}$ we have \beaa &\dis \wh U(\rho_\e) - \wh U(\rho) = U(\Phi(\rho_\e)) - U(\Phi(\rho)) =\int_0^1 \Big[\Psi_\th(\rho_\e) - \Psi_\th(\rho)\Big] d\th,\\ &\dis \mbox{where}\q \psi_\th(x) := {\d U\over \d \nu}\Big(\th \Phi(\rho_\e) +(1-\th) \Phi(\rho), x\Big),\q \Psi_\th(\tilde\rho) := \int_{\dbR^k} \psi_\th(x) \Phi(\tilde\rho; dx),~ \forall \tilde \rho\in \cP_2(\dbR^k). \eeaa Then, by \reff{derivative} we have \beaa \frac{\delta \Psi_\th}{\delta\rho}(\tilde \rho, x) = \int_{\dbR^k} \psi_\th(y) {\d \Phi\over \d\rho}(\tilde\rho, x; dy) = \int_{\dbR^k} \frac{\delta U}{\delta\nu} \Big(\th \Phi(\rho_\e) +(1-\th) \Phi(\rho), y\Big) {\d \Phi\over \d\rho}(\tilde\rho, x; dy). \eeaa Note that $\rho + \tilde \th (\rho_\e - \rho) = \rho_{\tilde \th \e}$, then \beaa \left.\ba{lll} \dis {1\over \e}\big[ \wh U(\rho_\e) - \wh U(\rho) \big] = {1\over \e} \int_0^1 \int_0^1 \int_{\dbR^k}{\d \Psi_\th\over \d \rho} (\rho_{\tilde \th\e}, x) (\rho_\e - \rho)(dx) d\tilde \th d\th\ms\\ \dis = \int_0^1 \int_0^1 \int_{\dbR^k} \int_{\dbR^k} \frac{\delta U}{\delta\nu} \Big(\th \Phi(\rho_\e) +(1-\th) \Phi(\rho), y\Big) {\d \Phi\over \d\rho}(\rho_{\tilde \th \e}, x; dy) (\rho' - \rho)(dx) d\tilde \th d\th\ms\\ \dis= I_1(\e) + I_2(\e), \ea\right. \eeaa where \beaa I_1(\e) &:=& \int_0^1 \int_{\dbR^k} \int_{\dbR^k} \frac{\delta U}{\delta\nu} \Big(\Phi(\rho), y\Big) {\d \Phi\over \d\rho}(\rho_{\tilde \th \e}, x; dy) (\rho' - \rho)(dx) d\tilde \th;\\ I_2(\e) &:=& \int_0^1 \int_0^1 \int_{\dbR^k} \int_{\dbR^k} \Big[\frac{\delta U}{\delta\nu} \Big(\th \Phi(\rho_\e) +(1-\th) \Phi(\rho), y\Big)- \frac{\delta U}{\delta\nu} \big(\Phi(\rho), y\big)\Big] \times\\ && {\d \Phi\over \d\rho}(\rho_{\tilde \th \e}, x; dy) (\rho' - \rho)(dx) d\tilde \th d\th.\ \eeaa Clearly $\lim_{\e\to 0}W_2(\rho_{\tilde \th \e}, \rho)=0$. By the continuity of ${\d \Phi\over \d\rho}$ we have \beaa \lim_{\e\to 0}\int_{\dbR^k} \frac{\delta U}{\delta\nu} \Big(\Phi(\rho), y\Big) {\d \Phi\over \d\rho}(\rho_{\tilde \th \e}, x; dy)=\int_{\dbR^k} \frac{\delta U}{\delta\nu} \Big(\Phi(\rho), y\Big) {\d \Phi\over \d\rho}(\rho, x; dy),\q \forall \tilde \th, x. \eeaa Moreover, note that $K:= \{\rho_\e: 0\le \e\le 1\} \subset \cP_2(\dbR^k)$ is compact. Then by \reff{Phiquadratic} we have \beaa \Big|\int_{\dbR^k} \frac{\delta U}{\delta\nu} \Big(\Phi(\rho), y\Big) {\d \Phi\over \d\rho}(\rho_{\tilde \th \e}, x; dy)\Big| \le C\|\frac{\delta U}{\delta\nu} \big(\Phi(\rho), \cd\big)\|_n [1+|x|^2]. \eeaa Now it follows from the dominated convergence theorem that \bea \label{I1e} \lim_{\e\to 0} I_1(\e) =\int_{\dbR^k}\int_{\dbR^k} \frac{\delta U}{\delta\nu} \Big(\Phi(\rho), y\Big) {\d \Phi\over \d\rho}(\rho, x; dy) (\rho'-\rho)(dx). \eea Moreover, by \reff{Phiquadratic} again we have \beaa &&\dis \Big|\int_{\dbR^k} \Big[\frac{\delta U}{\delta\nu} \Big(\th \Phi(\rho_\e) +(1-\th) \Phi(\rho), y\Big)- \frac{\delta U}{\delta\nu} \big( \Phi(\rho), y\big)\Big] {\d \Phi\over \d\rho}(\rho_{\tilde \th \e}, x; dy) \Big|\\ &&\dis\le C\|\frac{\delta U}{\delta\nu} \Big(\th \Phi(\rho_\e) +(1-\th) \Phi(\rho), \cd\Big)- \frac{\delta U}{\delta\nu} \big( \Phi(\rho), \cd\big)\|_n [1+|x|^2]. \eeaa Then \beaa |I_2(\e)| &\le& C\int_0^1 \int_{\dbR^k}\|\frac{\delta U}{\delta\nu} \Big(\th \Phi(\rho_\e) +(1-\th) \Phi(\rho), \cd\Big)- \frac{\delta U}{\delta\nu} \big( \Phi(\rho), \cd\big)\|_n [1+|x|^2] (\rho'+\rho)(dx) d\th\\ &\le& C\int_0^1 \|\frac{\delta U}{\delta\nu} \Big(\th \Phi(\rho_\e) +(1-\th) \Phi(\rho), \cd\Big)- \frac{\delta U}{\delta\nu} \big( \Phi(\rho), \cd\big)\|_n d\th \to 0, ~ \mbox{as}~ \e\to 0, \eeaa thanks to the continuity of ${\d U\over \d\nu}$ in $\nu$ under $\|\cd\|_n$. This, together with \reff{I1e}, leads to \beaa \lim_{\e\to 0}{1\over \e}\big[ \wh U(\rho_\e) - \wh U(\rho) \big] = \int_{\dbR^k}\int_{\dbR^k} \frac{\delta U}{\delta\nu} \Big(\Phi(\rho), y\Big) {\d \Phi\over \d\rho}(\rho, x; dy) (\rho'-\rho)(dx), \eeaa which implies \reff{linearchain} immediately. \qed \begin{rem} \label{rem-derivative} By considering generalized derivatives in appropriate dual space, we may define higher order derivatives of $\Phi$, including the Lions derivative $\pa_\rho \Phi(\rho, x):= \pa_x {\d \Phi\over \d \rho}(\rho, x)$. Alternatively, since later on we will always consider certain composite function $\wh U$, we may define higher order derivatives through the left side of \reff{linearchain}. \end{rem} \section{The master equation and the monotonicities} \label{sect-master} \setcounter{equation}{0} Throughout the paper, Assumption \ref{assum-fix} will always be in force. Denote \bea \label{barH} \wh H(x, p, \rho) := H(x, p, \Phi(\rho)),\q (x, p, \rho)\in \dbR^d\times \dbR^d \times \cP_2(\dbR^{2d}). \eea The derivatives of $\wh H$ with respect to $\rho$ are understood as in Proposition \ref{prop-chain} and Remark \ref{rem-derivative}. Then \reff{X*} becomes \bea \label{barX*} \left.\ba{c} \dis X_t^{*}=\xi+\int_{t_0}^{t} \pa_p \wh H\Big(X_s^{*}, \pa_x V(s, X^*_s, \mu^*_s), \rho^*_s\Big)ds+B_t^{t_0}+\beta B_t^{0,t_0},\\ \mbox{where}\q \mu^*_s:= \cL_{X^*_s|\cF^0_s},\q \rho^*_s:= \cL_{(X^*_s, \pa_x V(s, X^*_s, \mu^*_s))|\cF^0_s}. \ea\right. \eea On the other hand, it follows from the standard stochastic control theory that, for given $t_0, \mu$, the optimization \reff{MFE} is associated with the following Backward SDE: recalling \reff{Hp}, \bea \label{Y*} \left.\ba{c} \dis Y_t^{*}= G(X^*_T, \mu_T^*) - \int_t^T Z^*_s dB_s -\int_t^T Z^{0,*}_s dB_s^0\\ \dis +\int_{t}^T \Big[\wh H(\cd) - \pa_x V(s, X^*_s, \mu^*_s) \cd \pa_p \wh H(\cd)\Big]\Big(X_s^{*}, \pa_x V(s, X^*_s,\mu^*_s), \rho^*_s\Big)ds, \ea\right. \eea which, together with \reff{barX*} form the MFGC system. We note that this is the SDE counterpart of the MFGC system \reff{SPDEint}. In particular, we have \bea \label{YVX} Y^*_t = V\big(t, X^*_t, \mu_t^*\big). \eea Then, by applying the It\^{o}'s formula (c.f. \cite[Theorem 4.17]{CD2},\cite{BLPR,CCD}) we obtain \bea &&d V(t, X^*_t, \mu^*_t) = \Big[\pa_t V + \pa_x V\cdot \pa_p \wh H(X^*_t, \pa_x V, \rho^*_t) + \frac{1+\b^2}{2} \tr\big(\pa_{xx} V \big)\Big](t, X^*_t, \mu^*_t) dt \nonumber\\ &&+\pa_xV(t,X^*_t,\mu^*_t)\cd dB_t + \beta\Big[\pa_xV(t,X^*_t,\mu^*_t) + \tilde{\mathbb E}_{\cF_t}\big[\pa_\mu V(t,X^*_t,\mu^*_t,\tilde X^*_t) \big]\Big]\cd dB_t^0 \nonumber\\ &&+\tr \Big(\tilde \dbE_{\cF_t}\big[\pa_\mu V(t,X^*_t,\mu_t^*,\tilde X_t^*) (\pa_p\wh H(t,\tilde X_t^*, \pa_x V(t, \tilde X_t, \mu_t^*), \rho^*_t))^\top\big]\Big) dt \label{Ito}\\ &&+ \tr \Big(\b^2\tilde \dbE_{\cF_t}\big[\pa_x\pa_\mu V(t,X^*_t,\mu^*_t,\tilde X^*_t)+\frac{1+\beta^2}{2} \pa_{\tilde x}\pa_\mu V(t, X^*_t, \mu_t^*, \tilde X^*_t)\big]\nonumber\\ &&\qq +\frac{\beta^2}{2}\bar{\tilde \dbE}_{\cF_t}\big[\pa_{\mu\mu}V(t,X^*_t,\mu^*_t,\tilde X^*_t,\bar X^*_t) \big]\Big)dt.\nonumber \eea Here as usual $\tilde X^*,\bar X^*$ are conditionally independent copies of $X^*$, conditional on $\dbF^0$. Comparing this with \eqref{Y*}, we derive the master equation: for independent copies $\xi, \tilde \xi, \bar \xi$ with law $\mu$, \bea \label{master} \left.\ba{c} \dis \cL V(t,x,\mu) :=\pa_t V + \frac{\h\b^2}{2} \tr(\pa_{xx} V) + \wh H(x,\partial_x V,\mathcal{L}_{(\xi,\pa_xV(t,\xi,\mu))}) + \cM V =0, \\ \dis V(T,x,\mu) = G(x,\mu),\q\mbox{where}\\ \dis \cM V(t,x,\mu) := \tr\Big( \bar{\tilde \dbE}\Big[\frac{\h\b^2}{2} \pa_{\tilde x} \pa_\mu V(t,x, \mu, \tilde \xi) +\b^2\pa_x\pa_\mu V(t,x,\mu,\tilde \xi)+\frac{\b^2}{2}\pa_{\mu\mu}V(t,x,\mu,\bar\xi,\tilde\xi) \\ \dis + \pa_\mu V(t, x, \mu, \tilde \xi)(\pa_p\wh H)^\top(\tilde \xi,\pa_x V(t, \tilde \xi, \mu), \mathcal{L}_{(\xi,\pa_xV(t,\xi,\mu))}) \Big]\Big), \q\mbox{and}\q \h \b^2 := 1+\b^2. \ea\right. \eea In addition to Assumption \ref{assum-fix}, we assume \begin{assum} \label{assum-regH} $\wh H\in \cC^2(\mathbb R^{2d}\times\mathcal{P}_2(\mathbb R^{2d}))$ with bounded $\pa_{xp}\wh H, \pa_{xx}\wh H, \pa_{pp}\wh H, \pa_{x\mu}\wh H, \pa_{p\mu}\wh H$. \end{assum} Since we will work on the master equation, here we impose our conditions directly on $\wh H$, rather than on $b, f$. It is not hard to find some sufficient conditions on $b$ and $f$ to ensure these. \subsection{The monotonicities} In this subsection we introduce three types of monotonicity conditions: Lasry-Lions monotonicity, displacement semi-monotonicity, and anti-monotonicity. \begin{defn} Assume $U\in\mathcal{C}^1(\mathbb R^d\times\mathcal{P}_2(\mathbb R^d))$ and $\pa_\mu U(\cd,\mu,\tilde x)\in \cC^1(\mathbb R^d)$ for all $(\mu,\tilde x)\in\mathcal{P}_2(\mathbb R^d)\times\mathbb R^d$. We say $U$ is Lasry-Lions monotone if \begin{equation} \label{LL} MON^{LL} U(\xi, \eta):=\tilde \dbE\Big[\big\langle \pa_{x\mu }U(\xi,\mathcal{L}_\xi,\tilde \xi)\tilde\eta,\eta\big\rangle\Big]\ge 0,\q \forall \xi,\eta\in \mathbb L^2(\cF^1_T). \end{equation} \end{defn} We note that, since $(\xi,\eta)$ is $\cF^1_T$-measurable, so here $(\tilde \xi, \tilde \eta)$ is an independent copy (instead of conditionally independent copy). \begin{defn} Assume $U,\pa_x U\in\mathcal{C}^1(\mathbb R^d\times\mathcal{P}_2(\mathbb R^d))$. For any $\l\ge 0$, we say $U$ is displacement $\l$-monotone if, for all $\xi,\eta\in \mathbb L^2(\cF^1_T)$ \begin{equation} \label{lambdadisplacementsm} MON^{disp}_\l U(\xi, \eta):=\tilde\dbE\Big[\big\langle \pa_{x\mu }U(\xi,\mathcal{L}_\xi,\tilde \xi)\tilde\eta,\eta\big\rangle+\big\langle\pa_{xx}U(\xi,\mathcal{L}_{\xi})\eta,\eta\big\rangle + \l |\eta|^2\Big] \ge 0. \end{equation} In particular, we say $U$ is displacement monotone when $\l=0$, and displacement semi-monotone if it is displacement $\l$-monotone for some $\l>0$. \end{defn} Moreover, denote \bea \label{D4} D_4 := \Big\{\vec \l = (\l_0, \l_1, \l_2, \l_3): \l_0>0, \l_1\in \dbR, \l_2 >0, \l_3\ge 0\Big\}. \eea \begin{defn} \label{defn-anti} Let $U\in \cC^2(\dbR^d\times \cP_2(\mathbb R^d))$ and $\vec\l\in D_4$. We say $U$ is \mbox{$\vec\l$-anti-monotone} if, \bea \label{anti} \left.\ba{c} MON^{anti}_{\vec \l} U(\xi, \eta):= \tilde\dbE\bigg[\l_0\langle\pa_{xx}U(\xi,\mathcal{L}_{\xi})\eta,\eta\rangle+\l_1\langle\pa_{x\mu}U(\xi,\mathcal{L}_{\xi},\tilde\xi)\tilde\eta,\eta\rangle+\left|\pa_{xx}U(\xi,\mathcal{L}_{\xi})\eta\right|^2\\ +\l_2\big|\tilde{\mathbb E}_\cF[\pa_{x\mu}U(\xi,\mathcal{L}_{\xi},\tilde\xi)\tilde \eta]\big|^2 - \l_3|\eta|^2 \bigg]\leq 0,\q \forall \xi,\eta\in \mathbb L^2(\cF^1_T). \ea\right. \eea \end{defn} \begin{rem} \label{rem-mon} (i) By \cite[Remark 2.4]{GMMZ}, Lasry-Lions monotonicity and displacement monotonicity are equivalent to the following forms, respectively, which are more often seen in the literature: \beaa &\dis \dbE\Big[ U(\xi_1, \cL_{\xi_1}) + U(\xi_2, \cL_{\xi_2}) - U(\xi_1, \cL_{\xi_2}) -U(\xi_2, \cL_{\xi_1}) \Big] \ge 0,\q \forall \xi_1, \xi_2\in \dbL^2(\cF^1_T);\\ &\dis \dbE\Big[ \big\langle \pa_xU(\xi_1, \cL_{\xi_1}) -\pa_x U(\xi_2, \cL_{\xi_2}),~ \xi_1-\xi_2\big\rangle \Big] \ge 0, \q \forall \xi_1, \xi_2\in \dbL^2(\cF^1_T). \eeaa (ii) Consider the case that $\pa_x U(x, \mu) = \pa_\mu \cU(\mu, x)$ for some $\mathcal{U}\in\cC^2(\cP_2(\dbR^d))$. Then the Lasry-Lions monotonicity of $U$ is equivalent to the convexity of the mapping $\mu \in \cP_2(\dbR^d) \mapsto \cU(\mu)$, and the displacement monotonicity of $U$ is equivalent to the convexity of the mapping $\xi \in \dbL^2(\cF^1_T) \mapsto \cU(\cL_\xi)$, see e.g. \cite{CDLL,CD1}. (iii) Both the Lasry-Lions monotonicity (provided $\pa_{xx} U$ is bounded) and the displacement monotonicity imply the displacement semi-monotonicity. However, the Lasry-Lions monotonicity and the displacement monotonicity do not imply each other, see \cite[Remark 2.5]{GMMZ}. (iv) By setting $\l_0=\l_1=\l_2=1$ and $\l_3=0$, \reff{anti} implies \beaa \tilde\dbE\bigg[\langle\pa_{xx}U(\xi,\mathcal{L}_{\xi})\eta,\eta\rangle+\langle\pa_{x\mu}U(\xi,\mathcal{L}_{\xi},\tilde\xi)\tilde\eta,\eta\rangle \bigg]\leq 0, \eeaa which is in the opposite direction of \reff{lambdadisplacementsm} with $\l=0$. Moreover, if $\pa_{xx}U$ is non-negative definite, then we further have \beaa \tilde\dbE\bigg[\langle\pa_{x\mu}U(\xi,\mathcal{L}_{\xi},\tilde\xi)\tilde\eta,\eta\rangle \bigg]\leq 0, \eeaa which is in the opposite direction of \eqref{LL}. That's why we call \reff{anti} anti-monotonicity. (v) If $U$ satisfies \eqref{lambdadisplacementsm} for some $\l\geq 0$, then $\pa_{xx}U+\l I$ is non-negative definite, see \cite[Lemma 2.6]{GMMZ}. \end{rem} \subsection{A road map towards the global wellposedness} \label{sect-strategy} Our ultimate goal is to establish the global wellposedness of the master equation \reff{master}. We shall adopt the strategy in \cite{GMMZ, MZ3}, which consists of three steps: \vskip 2pt {\it Step 1.} Introduce appropriate monotonicity condition on data which ensures the propagation of the monotonicity, one of the three types introduced in the previous subsection, along any classical solution to the master equation. \vskip 2pt {\it Step 2.} Show that the monotonicity of $V(t,\cdot,\cdot)$ implies an (a priori) uniform Lipschitz continuity of $V$ in the measure variable $\mu$. \vskip 2pt {\it Step 3.} Combine the local wellposedness of classical solutions and the above uniform Lipschitz continuity to obtain the global wellposedness of classical solutions. Moreover, following \cite{CDLL} one may continue to investigate the convergence problem: \vskip 2pt {\it Step 4.} Use the classical solution $V$ to prove the convergence of the related $N$-player game. \vskip 2pt In this paper we shall focus on {\it Step 1} only, and we leave the remaining three steps to future research. We emphasize that {\it Step 1} (and {\it Step 2}) considers a prior estimates, and thus throughout the paper we shall also assume: \begin{assum} \label{assum-regV} $V\in \mathcal{C}^{1,2,2}([0,T]\times\mathbb R^d\times\mathcal{P}_2(\mathbb R^d))$ is a classical solution of the master equation \reff{master} such that $\pa_{xx}V(t,\cd,\cd)\in\mathcal{C}^2(\dbR^d\times\mathcal{P}_2(\mathbb R^d))$, $\pa_{x\mu}V(t,\cd,\cd,\cd)\in\mathcal{C}^2(\dbR^d\times\mathcal{P}_2(\mathbb R^d)\times \dbR^d)$, and all the second and higher order derivatives of $V$ involved above are uniformly bounded and continuous in $t$. \end{assum} We note that we do not require $V$ or its first order derivatives to be bounded. Moreover, since $G = V(T,\cd,\cd)$, so the above assumption also ensures the regularity of $G$. We shall also remark that, Assumption \ref{assum-regV} is about the existence of classical solutions of the master equation \reff{master}, which implies the uniqueness of the mean field equilibrium (c.f. \cite[Remark 2.10 (ii)]{GMMZ}). The uniqueness of classical solutions satisfying the desired Lipschitz continuity is standard, see e.g. the arguments in \cite[Theorem 6.3]{GMMZ}. \section{Propagation of Lasry-Lions monotonicity} \label{sect-LL} \setcounter{equation}{0} To propagate the Lasry-Lions monotonicity of $V$, we impose the following assumption on $\wh H$. \begin{assum}\label{assum-LL} For any $\xi, \eta, \g, \zeta\in\mathbb L^2(\cF^1_T)$ and $\varphi: \dbR^d\to \dbR^d$ Lipschitz continuous, \bea \label{HLL} \left.\ba{c} \dis \tilde\dbE\bigg[ \Big\langle \zeta, \wh H_{pp}(\xi)\zeta\Big\rangle - \Big\langle \eta, ~\wh H_{x\rho_1}(\xi,\tilde \xi) \tilde \eta+ \wh H_{x\rho_2}(\xi,\tilde \xi)[ \tilde\g +\tilde \zeta]\Big\rangle \\ \dis\qq -\Big\langle \g -\zeta, ~\wh H_{p\rho_1}(\xi, \tilde \xi) \tilde \eta+\wh H_{p\rho_2}(X_t,\tilde X_t)[ \tilde \g+\tilde \zeta] \Big\rangle \bigg] \le 0, \ea\right. \eea where $\wh H_{pp}(x) := \pa_{pp} \wh H\Big(x, \f(x), \cL_{(\xi, \f(\xi))}\Big)$, $\wh H_{x\rho}(x,\tilde x) := \pa_{x\rho}\wh H\Big(x, \f(x), \cL_{(\xi, \f(\xi))}, \tilde x, \f(\tilde x)\Big)$, and similarly for $\wh H_{p\rho}(x,\tilde x)$. \end{assum} Our main result of this section is: \begin{thm}\label{thm:LL} Let Assumptions \ref{assum-fix}, \ref{assum-regH}, \ref{assum-regV}, and \ref{assum-LL} hold. If $G$ satisfies the Lasry-Lions monotonicity \reff{LL}, then $V(t,\cd,\cd)$ satisfies \reff{LL} for all $t\in[0,T]$. \end{thm} \proof Without loss of generality, we shall prove the theorem only for $t=0$. For any $\xi,\eta\in\mathbb L^2(\mathcal{F}_0)$, inspired by \reff{barX*} we consider the following system of McKean-Vlasov SDEs, which clearly has a unique solution $(X,\delta X)$ under Assumptions \ref{assum-regH} and \ref{assum-regV}: \bea \label{XY} \left.\ba{lll} \dis X_t = \xi +\int_0^t \wh H_p(X_s, \pa_x V(s, X_s, \mu_s), \rho_s) ds + B_t+\beta B_t^0;\\ \dis \delta X_t = \eta +\int_0^t \Big[\wh H_{px} (X_s) \delta X_s + \wh H_{pp}(X_s)[\Gamma_s+\Upsilon_s ] + N_s\Big]ds;\q\mbox{where}\\ \dis \mu_t:=\mathcal{L}_{X_t|\mathcal{F}_t^{0}},\q \rho_t := \cL_{(X_t,\pa_xV(t,X_t,\mu_t))|\mathcal{F}_t^{0}};\ms\\ \dis \Gamma_t:=\pa_{xx}V(X_t)\delta X_t,\q \Upsilon_t:=\tilde \dbE_{\mathcal{F}_t}[\pa_{x\mu}V(X_t,\tilde X_t)\delta \tilde X_t];\ms\\ \dis N_t :=\tilde \dbE_{\mathcal{F}_t}\Big[ \wh H_{p\rho_1}(X_t,\tilde X_t) \delta \tilde X_t+ \wh H_{p\rho_2}(X_t,\tilde X_t) \big[\tilde\Gamma_t+\tilde \Upsilon_t\big]\Big]. \ea\right. \eea Here $(\tilde X, \d\tilde X, \tilde \G, \tilde \Upsilon)$ is a conditionally independent copy of $(X, \d X, \G, \Upsilon)$, conditional on $\dbF^0$. Moreover, here and in the sequel, for simplicity of notation, we omit the variables $(t, \mu_t)$ inside $V$ and its derivatives, and omit $\rho_t$ and $\partial_x V$ inside $\wh H$ and its derivatives, for example, \bea \label{omit} \left.\ba{c} \pa_{x\mu} V(X_t, \tilde X_t) = \pa_{x\mu}V(t, X_t, \mu_t, \tilde X_t),\q \wh H_p(X_t) := \pa_p \wh H(X_t, \pa_x V(t, X_t, \mu_t) ,\rho_t ), \\ \wh H_{p\rho}(X_t, \tilde X_t):= (\wh H_{p\rho_1},\wh H_{p\rho_2})(X_t, \tilde X_t):= \pa_{p\rho}\wh H(X_t,\pa_x V(t, X_t, \mu_t), \rho_t, \tilde X_t, \pa_x V(t, \tilde X_t, \mu_t)). \ea\right. \eea Introduce, \bea \label{It} I(t):=\mathbb E[\langle\Upsilon_t,\delta X_t\rangle] = MON^{LL} V(t,\cd,\cd)(X_t, \d X_t). \eea Apply It\^{o} formula \reff{Ito} and since $V$ satisfies the master equation \reff{master}, we get \bea\label{dtI-LL} \left.\ba{c} \dis{d\over dt} I(t)=\tilde\dbE\bigg[ \Big\langle \Upsilon_t, ~\wh H_{pp}(X_t)\Upsilon_t\Big\rangle - \Big\langle \delta X_t, ~\wh H_{x\rho_1}(X_t,\tilde X_t) \delta \tilde X_t+ \wh H_{x\rho_2}(X_t,\tilde X_t)[\tilde \Gamma_t+\tilde \Upsilon_t]\Big\rangle \\ \dis -\Big\langle \Gamma_t- \Upsilon_t, ~\wh H_{p\rho_1}(X_t, \tilde X_t)\delta \tilde X_t+\wh H_{p\rho_2}(X_t,\tilde X_t)\big[\tilde\Gamma_t+\tilde \Upsilon_t] \Big\rangle \bigg]. \ea\right. \eea The calculation is lengthy but quite straightforward, we postpone the details to Appendix. Take conditional expectation on $\cF^0_t$, then by the desired conditional independence we may apply \eqref{HLL} to obtain: \bea \label{dI} {d\over dt} I(t) \le 0. \eea Note that, by the Lasry-Lions monotonicity of $G = V(T,\cd,\cd)$, we have $I(T)\ge 0$. Then \reff{dI} clearly implies $I(0) \ge 0$, and hence $V(0,\cd,\cd)$ satisfies the Lasry-Lions monotonicity \reff{LL}. \qed \begin{rem} In \eqref{XY} $X$ is the agent's state process along the (unique) mean field equilibrium, and $\d X$ is the gradient of $X$ when its initial condition $\xi$ is perturbed along the direction $\eta$. \end{rem} \begin{rem} \label{rem-LL} Note that \reff{dtI-LL} is an equality, so our condition \reff{HLL} is essentially sharp for the propagation of Lasry-Lions monotonicity, in particular for \reff{dI}. In \cite{CardaliaguetLehalle,CD1, K0} the uniqueness of the mean field game system is obtained when $b(\cdot,a,\cdot) = a$ (or slightly more general form), and $f$ satisfies the Lasry-Lions monotonicity in the following sense: for any $\xi_i, \a_i\in \cL^2(\cF)$, $i=1,2$, \bea \label{fmon} \mathbb E\big[f(\xi^1,\alpha^1,\mathcal{L}_{(\xi^1,\alpha^1)})+f(\xi^2,\alpha^2,\mathcal{L}_{(\xi^2,\alpha^2)})-f(\xi^1,\alpha^1,\mathcal{L}_{(\xi^2,\alpha^2)})-f(\xi^2,\alpha^2,\mathcal{L}_{(\xi^1,\alpha^1)})\big]\geq 0. \eea We claim that in this case \reff{dI} holds true, and hence the Lasry-Lions monotonicity propagates. We postpone its proof to the Appendix. \end{rem} \begin{rem} \label{rem-LL-separable} For the standard MFG with $b(x,a,\nu) = a$ (and $f= f(x, a, \mu)$), it is observed in \cite{GMMZ} that it is hard to propagate the Lasry-Lions monotonicity unless $f$ is separable: $f(x, a, \mu) = f_0(x, a) + f_1(x, \mu)$. The dependence on the law of $\a$ in MFGC actually helps for the propagation of the Lasry-Lions monotonicity. In particular, in this case we do not require $f$ to be separable. \end{rem} We next provide an example with a more general $b$, which does not seem to be covered by the analysis of mean field game systems (or master equations) in the literature. \begin{eg} \label{eg-LasryLions} We consider a special case of \reff{nonseparable} with $d=1$: \bea \label{nonseparable2} \left.\ba{c} \dis b(x, a, \cL_{(\xi,\a)}) = -a + b_1(\dbE[\xi],\dbE[\a]) + b_2(x), \ms\\ \dis f(x, a, \cL_{(\xi,\a)}) = {|a|^2\over 2} - c_1 a \dbE[\a] + c_2 x\dbE[\xi] + c_3 x \dbE[\a] + f_1(x), \ea\right. \eea where $0<c_1<1$ and $c_2, c_3>0$ are constants. Assume the matrix \bea \label{matrix1} \begin{bmatrix} 1-[\bar c_1\pa_{m_2} b_1 -\hat c_1]& 0& {1\over 2}[\hat c_3 - \pa_{m_1} b_1 ] \\ 0 & [\bar c_1\pa_{m_2} b_1 -\hat c_1] & {1\over 2}[\hat c_3 + \pa_{m_1} b_1 ]\\ {1\over 2}[\hat c_3 - \pa_{m_1} b_1 ]& {1\over 2}[\hat c_3 + \pa_{m_1} b_1 ]& c_2 \end{bmatrix} \ge 0, \eea where $ \hat c_1 := {c_1\over 1-c_1}$, $\bar c_1:=\frac{1}{1-c_1}$, $\hat c_3 := {c_3\over 1-c_1}$, and $m_1, m_2$ stand for $\dbE[\xi], \dbE[\a]$. Then \reff{HLL} holds true. \end{eg} \proof By Example \ref{eg-nonseparable} we see that \beaa &\dis \Phi(\cL_{(\xi, \eta)}) = \cL_{(\xi, ~ \hat c_1\dbE[\eta] + \eta)},\\ &\dis H(x, p, \cL_{(\xi,\a)}) = -{1\over 2} \big|c_1\dbE[\a] + p\big|^2 + p\big[b_1(\dbE[\xi], \dbE[\a]) + b_2(x)\big] +c_2 x\dbE[\xi] + c_3 x \dbE[\a] + f_1(x). \eeaa Note that $\dbE[\a] = [1+\hat c_1] \dbE[\eta] = \bar c_1 \dbE[\eta]$. Then \beaa \wh H(x, p, \cL_{(\xi, \eta)}) = -{1\over 2} \big|\hat c_1\dbE[\eta] +p\big|^2 +p\big[ b_1(\dbE[\xi], \bar c_1\dbE[\eta])+b_2(x)\big] +c_2 x\dbE[\xi]+ \hat c_3 x \dbE[\eta] + f_1(x). \eeaa One may compute straightforwardly that \bea \label{whHpp} \wh H_{pp} = -1,\q \wh H_{x\rho_1} = c_2,\q \wh H_{x\rho_2} = \hat c_3,\q \wh H_{p \rho_1}= \pa_{m_1} b_1,\q \wh H_{p\rho_2}=\bar c_1\pa_{m_2}b_1-\hat c_1. \eea Then, noting that $ \pa_{m_1} b_1$ and $ \pa_{m_1} b_2$ are deterministic, \beaa \left.\ba{lll} \dis \tilde\dbE\Big[ - \wh H_{pp}(\xi)|\zeta|^2 + \eta\big[\wh H_{x\rho_1}(\xi,\tilde \xi) \tilde \eta+ \wh H_{x\rho_2}(\xi,\tilde \xi)[ \tilde\g +\tilde \zeta]\big] \\ \dis\qq +[\g -\zeta]\big[\wh H_{p\rho_1}(\xi, \tilde \xi) \tilde \eta+\wh H_{p\rho_2}(X_t,\tilde X_t)[ \tilde \g+\tilde \zeta] \big] \Big] \\ \dis=\tilde\dbE\Big[|\zeta|^2 + c_2 \eta \tilde \eta + \hat c_3 \eta [ \tilde\g +\tilde \zeta]\big] +[\g -\zeta]\big[\pa_{m_1} b_1\tilde \eta+ [\bar c_1\pa_{m_2} b_1 -\hat c_1][ \tilde \g+\tilde \zeta] \big] \Big]\ms\\ \dis = \dbE[|\zeta|^2] + c_2 \big|\dbE[\eta]\big|^2 + \hat c_3 \dbE[\eta] \big[\dbE[\g] + \dbE[\zeta]\big] + \pa_{m_1} b_1 \dbE[\eta]\big[\dbE[\g]-\dbE[\zeta]\big]\ms \\ \dis\qq +[\bar c_1\pa_{m_2} b_1 -\hat c_1] \big[ |\dbE[\g]|^2 - |\dbE[\zeta]|^2\big]\ms\\ \dis \ge \big[1- [\bar c_1\pa_{m_2} b_1 -\hat c_1]\big] \big|\dbE[\zeta]\big|^2 + [\bar c_1\pa_{m_2} b_1 -\hat c_1] \big|\dbE[\g]\big|^2 + c_2 \big|\dbE[\eta]\big|^2\ms\\ \dis \q + \big[\hat c_3 + \pa_{m_1} b_1 \big] \dbE[\eta] \dbE[\g] + \big[\hat c_3 - \pa_{m_1} b_1 \big] \dbE[\eta] \dbE[\zeta]. \ea\right. \eeaa This, together with \reff{matrix1}, clearly implies \reff{HLL}. \qed \section{Propagation of displacement $\l$-monotonicity} \label{sect-displacement} \setcounter{equation}{0} In this section we fix a constant $\l\ge 0$. \begin{assum} \label{assum-displacement} For any $\xi, \eta, \g, \zeta\in\mathbb L^2(\cF^1_T)$ and $\varphi: \dbR^d\to \dbR^d$ Lipschitz continuous, \bea \label{Hdisplacement} \left.\ba{lll} \dis \tilde\dbE\bigg[ \Big\langle\g + \zeta, \wh H_{pp}(\xi)[\g + \zeta]\Big\rangle - \Big\langle \eta, [\wh H_{xx}(\xi)-2\l \wh H_{px}(\xi)]\eta\Big\rangle \\ \dis+\Big\langle \g + \zeta, ~\big[\wh H_{p\rho_1}(\xi, \tilde \xi)+\wh H_{\rho_2 x}(\tilde \xi,\xi)+2\l \wh H_{\rho_2 p}(\tilde \xi,\xi)\big]\tilde\eta+2\l \wh H_{pp}(\xi)\eta\Big\rangle\\ \dis +\Big\langle\g + \zeta, \wh H_{p\rho_2}(\xi,\tilde \xi)[\tilde \g + \tilde \zeta]\Big]\Big\rangle- \Big\langle \eta, ~[\wh H_{x\rho_1}(\xi,\tilde \xi)-2\l \wh H_{p\rho_1}(\xi,\tilde \xi)] \tilde \eta\Big\rangle\bigg]\leq 0, \ea\right. \eea where $\wh H_{pp}, \wh H_{x\rho}, \wh H_{p\rho}$ are as in Assumption \ref{assum-LL}. \end{assum} \begin{thm}\label{thm:displacement} Let Assumptions \ref{assum-fix}, \ref{assum-regH}, \ref{assum-regV}, and \ref{assum-displacement} hold. If $G$ satisfies the displacement $\l$-monotonicity \reff{lambdadisplacementsm}, then $V(t,\cd,\cd)$ satisfies \reff{lambdadisplacementsm} for all $t\in[0,T]$. \end{thm} \proof Without loss of generality, we shall prove the theorem only for $t=0$. We will continue to use the notation in the proof of Theorem \ref{thm:LL}. Introduce \begin{equation}\label{barI} \bar I(t):=\mathbb E[\langle\Gamma_t,\delta X_t\rangle],\q\mbox{and thus}\q I(t)+\bar I(t) + \l\mathbb E[|\delta X_t|^2] = MON^{disp}_\l V(t,\cd,\cd)(X_t, \d X_t). \end{equation} Similarly to \reff{dtI-LL} we can show that, again see more details in Appendix, \bea\label{cdbarI} \left.\ba{c} \dis {d\over dt} {\bar I}(t) =\tilde\dbE\bigg[ \Big\langle \wh H_{pp}(X_t)\Gamma_t, \Gamma_t\Big\rangle+ 2 \Big\langle \wh H_{pp}(X_t) \Gamma_t, \Upsilon_t\Big]\Big\rangle\\ \dis+ 2\Big\langle\Gamma_t, \wh H_{p\rho_1}(X_t, \tilde X_t)\delta \tilde X_t+\wh H_{p\rho_2}(X_t,\tilde X_t)[\tilde \Gamma_t+\tilde \Upsilon_t]\Big\rangle - \Big\langle \wh H_{xx}(X_t) \delta X_t, \delta X_t\Big\rangle\bigg]. \ea\right. \eea Moreover, by \eqref{XY} we have \begin{equation}\label{deltaX2} \frac{d}{dt}\mathbb E\left[|\delta X_t|^2\right]=2\mathbb E\Big[\Big\langle \wh H_{px}(X_t)\delta X_t+\wh H_{pp}(X_t)[ \Upsilon_t+\Gamma_t]+N_t,~\delta X_t \Big\rangle\Big]. \end{equation} Combining \eqref{dtI-LL}, \eqref{cdbarI}, and \eqref{deltaX2}, and recalling the $N$ in \reff{XY}, we deduce that \bea \label{cdI} \left.\ba{lll} \dis {d\over dt} \Big[MON^{disp}_\l V(t,\cd,\cd)(X_t, \d X_t)\Big] = {d\over dt}\Big[I(t)+\bar I(t) + \l\mathbb E[|\delta X_t|^2]\Big]\\ \dis =\tilde\dbE\bigg[ \Big\langle \Upsilon_t+\Gamma_t, \wh H_{pp}(X_t)[\Upsilon_t+\Gamma_t]+\wh H_{p\rho_2}(X_t,\tilde X_t)[\tilde \Gamma_t+\tilde \Upsilon_t]\Big\rangle \\ \dis\q +\Big\langle \Upsilon_t+ \Gamma_t, [\wh H_{p\rho_1}(X_t, \tilde X_t)+\wh H_{\rho_2x}(\tilde X_t,X_t)+2\l \wh H_{\rho_2p}(\tilde X_t,X_t)]\delta \tilde X_t+2\l \wh H_{pp}(X_t)\delta X_t\Big\rangle\\ \dis \q- \Big\langle \delta X_t, [\wh H_{x\rho_1}(X_t,\tilde X_t)-2\l \wh H_{p\rho_1}(X_t,\tilde X_t)] \delta \tilde X_t+[\wh H_{xx}(X_t)-2\l \wh H_{px}(X_t)]\delta X_t\Big\rangle\bigg]. \ea\right. \eea Then, by the desired conditional independence of the involved processes above, conditional on $\cF^0_t$, we have \bea \label{dIbar} {d\over dt} \Big[MON^{disp}_\l V(t,\cd,\cd)(X_t, \d X_t)\Big] \le 0. \eea Note that $V(T,\cd, \cd)=G$ satisfies \reff{lambdadisplacementsm}, then clearly $V(0,\cd,\cd)$ also satisfies \reff{lambdadisplacementsm}. \qed \ms We next provide a sufficient condition for Assumption \ref{assum-displacement}. Denote, for any $A\in \dbR^{d\times d}$, \bea\label{kappaA} \left.\ba{c} \dis |A| := \sup_{|x|=|y|=1} \langle Ax, y\rangle,\q \underline \k(A) := \inf_{|x|=1} \langle Ax, x\rangle = \mbox{the smallest eigenvalue of ${1\over 2}[A+A^\top]$},\\ \dis \ol \k(A):=\sup_{|x|=1} \langle Ax, x\rangle=-\ul\k(-A). \ea\right. \eea \begin{prop} \label{prop-dissufficient} Assume there exists a constant $c_0\ge 0$ such that $|\pa_{p\rho_2}\wh H|\le c_0$, and $\wh H_{pp} < - c_0 I_d$, where $I_d$ denotes the $d\times d$ identity matrix. Then the following condition implies \reff{Hdisplacement}: \bea \label{lambdaHH} \left.\ba{c} \dis\tilde\dbE\bigg[ \Big\langle \eta, [\wh H_{xx}(\xi)-2\l \wh H_{px}(\xi)]\eta\Big\rangle + \Big\langle \eta, ~[\wh H_{x\rho_1}(\xi,\tilde \xi)-2\l \wh H_{p\rho_1}(\xi,\tilde \xi)] \tilde \eta\Big\rangle - {|\L(\xi, \eta)|^2\over 4}\bigg] \ge 0, \\ \dis \mbox{where}\q \L(\xi, \eta) := (- \wh H_{pp}(\xi) - c_0I_d)^{-{1\over 2}} \times\ms\\ \dis \Big[ \tilde \dbE_{\cF^1_T}\big[[\wh H_{p\rho_1}(\xi, \tilde \xi)+\wh H_{\rho_2 x}(\tilde \xi,\xi)+2\l \wh H_{\rho_2 p}(\tilde \xi,\xi)]\tilde\eta\big]+2\l \wh H_{pp}(\xi)\eta\Big], \ea\right. \eea for all $\xi, \eta\in \dbL^2(\cF_T^1)$. In particular, when $\l=0$, the above reduces to: \bea \label{lambdaHH2} \left.\ba{c} \dis\tilde\dbE\bigg[ \Big\langle \eta, \wh H_{xx}(\xi)\eta\Big\rangle + \Big\langle \eta, \wh H_{x\rho_1}(\xi,\tilde \xi) \tilde \eta\Big\rangle - {|\L(\xi, \eta)|^2\over 4}\bigg] \ge 0, \ms\\ \dis \mbox{where}\q \L(\xi, \eta) := (- \wh H_{pp}(\xi) - c_0I_d)^{-{1\over 2}} \tilde \dbE_{\cF_T^1}\Big[[\wh H_{p\rho_1}(\xi, \tilde \xi)+\wh H_{\rho_2 x}(\tilde \xi,\xi)]\tilde\eta\Big]. \ea\right. \eea \end{prop} \proof Denote $\Xi:= \big[\wh H_{p\rho_1}(\xi, \tilde \xi)+\wh H_{\rho_2 x}(\tilde \xi,\xi)+2\l \wh H_{\rho_2 p}(\tilde \xi,\xi)\big]\tilde\eta$. Note that \beaa &&\dis \tilde\dbE\bigg[ \Big\langle\g + \zeta, \wh H_{pp}(\xi)[\g + \zeta]\Big\rangle+\Big\langle \g + \zeta, \Xi+2\l \wh H_{pp}(\xi)\eta\Big\rangle+\Big\langle\g + \zeta, \wh H_{p\rho_2}(\xi,\tilde \xi)[\tilde \g + \tilde \zeta]\Big]\Big\rangle\bigg]\\ &&\dis \le \tilde\dbE\bigg[\Big\langle\g + \zeta, \wh H_{pp}(\xi)[\g + \zeta]\Big\rangle+\Big\langle \g + \zeta, \Xi+2\l \wh H_{pp}(\xi)\eta\Big\rangle+{c_0\over 2}[|\g + \zeta|^2+|\tilde \g + \tilde \zeta|^2]\Big]\bigg]\\ &&\dis = \dbE\bigg[\Big\langle\g + \zeta, [\wh H_{pp}(\xi)+ c_0I_d][\g + \zeta]\Big\rangle+\Big\langle \g + \zeta, \tilde \dbE_{\cF^1_T}[\Xi]+2\l \wh H_{pp}(\xi)\eta\Big\rangle\bigg]\\ &&\dis = \dbE\bigg[- \Big|\Big[(-\wh H_{pp}(\xi)- c_0I_d)^{1\over 2}[\g + \zeta]- {1\over 2}\L(\xi,\eta)\Big|^2 + {1\over 4} |\L(\xi,\eta)|^2\Big]\bigg]\\ &&\dis \le {1\over 4} \dbE\big[|\L(\xi,\eta)|^2\big]. \eeaa Then clearly \reff{lambdaHH} implies \reff{Hdisplacement}. \qed \begin{rem} \label{rem-displacement} For standard MFGs where $b, f$ do not depend on the law of $\a$, we have $\wh H(x, p, \rho) = H(x, p, \mu)$ where $\mu = {\pi_1}_\# \rho$, and thus $\pa_{\rho_1} \wh H = \pa_\mu H$, $\pa_{\rho_2} \wh H = 0$, $c_0=0$. Note that $H$ is concave in $p$. We shall assume it is strictly concave and thus $H_{pp} <0$. Then \reff{lambdaHH} reduces to \bea \label{Hdisplacement3} \left.\ba{c} \dis \tilde\dbE\bigg[ \Big\langle \eta, [ H_{xx}(\xi)-2\l H_{px}(\xi)]\eta\Big\rangle + \Big\langle \eta, ~[ H_{x\mu}(\xi,\tilde \xi)-2\l H_{p\mu}(\xi,\tilde \xi)] \tilde \eta\Big\rangle \\ \dis - {1\over 4} \Big|(-H_{pp}(\xi))^{-{1\over 2}}\big[\tilde \dbE_{\cF^1_T}[H_{p\mu}(\xi, \tilde \xi)\tilde\eta]+2\l H_{pp}(\xi)\eta\big]\Big|^2\bigg]\geq 0. \ea\right. \eea Moreover, when $\l=0$, \reff{Hdisplacement3} (and \reff{lambdaHH2}) reduces further to \bea \label{Hdisplacement4} \dis \tilde\dbE\bigg[ \Big\langle \eta, H_{xx}(\xi)\eta\Big\rangle + \Big\langle \eta, ~H_{x\mu}(\xi,\tilde \xi) \tilde \eta\Big\rangle - {1\over 4} \Big|(-H_{pp}(\xi))^{-{1\over 2}}\tilde \dbE_{\cF^1_T}[H_{p\mu}(\xi, \tilde \xi)\tilde\eta]\Big|^2\bigg]\geq 0. \eea This is exactly the condition in \cite[Definition 3.4]{GMMZ}, except that \cite{GMMZ} uses $-H$ instead of $H$. \end{rem} We now present an example which satisfies \reff{lambdaHH2}, and hence \reff{Hdisplacement} with $\l=0$. \begin{eg} \label{eg-displacement} We consider a special case of \reff{nonseparable} with $d=1$: for some constant $0<c<1$, \bea \label{nonseparable3} \left.\ba{c} \dis b(x, a, \cL_{(\xi,\a)}) = -a + b_1(\cL_\xi, \dbE[\a]), \q \dis f(x, a, \cL_{(\xi,\a)}) = {|a|^2\over 2} - c a \dbE[\a] + f_1(x, \cL_{(\xi, \a)}). \ea\right. \eea Assume there exist constants $0\le c_0<1$ and $\k>0$ such that \bea \label{lambdaHH3} |\bar c \pa_{m_2} b_1 -\hat c|\le c_0,\q \pa_{xx} f_1 \ge \k \ge \|\pa_{x \nu_1} f_1\| + {1\over 4(1-c_0)}\Big(\|\pa_{m_1} b_1\|+[1+ \hat c]\|\pa_{x \nu_2} f_1\|\Big)^2, \eea where $\hat c:= {c\over 1-c}$, $\bar c:=\frac{1}{1-c}$, $m_1, m_2$ stand for $\dbE[\xi]$ and $\dbE[\a]$, respectively, and $\|\cd\|$ denotes the supremum norm of the function over all variables. Then \reff{lambdaHH2} holds true. \end{eg} \proof By Example \ref{eg-nonseparable} we see that \beaa &\dis \Phi(\cL_{(\xi, \eta)}) = \cL_{(\xi, ~ \hat c \dbE[\eta] + \eta)},\\ &\dis H(x, p, \cL_{(\xi,\a)}) = -{1\over 2} \Big|c\dbE[\a] + p\Big|^2 + p b_1(\cL_\xi, \dbE[\a]) +f_1(x, \cL_{(\xi,\a)}),\\ &\dis \wh H(x, p, \cL_{(\xi, \eta)}) = -{1\over 2} \Big|\hat c\dbE[\eta] +p\Big|^2 +p b_1(\cL_\xi, \bar c\dbE[\eta]) + f_1(x, \cL_{(\xi, ~ \hat c \dbE[\eta] + \eta)}). \eeaa Applying Proposition \ref{prop-chain} and Example \ref{eg-derivative} we have, for $\wh f_1(x,\rho):= f_1(x, \Phi(\rho))$ where $\rho=\cL_{(\xi, \eta)}$, \beaa &\dis \pa_{\rho_1} \wh f_1(x, \rho, \tilde x, \tilde p) = \pa_{\nu_1} f_1(x, \Phi(\rho), \tilde x, \tilde p + \hat c \dbE_{\rho}[\eta]),\\ &\dis \pa_{\rho_2} \wh f_1(x, \rho, \tilde x, \tilde p) = \pa_{\nu_2} f_1(x, \Phi(\rho), \tilde x, \tilde p + \hat c \dbE_{\rho}[\eta]) + \hat c \dbE_{\rho}\Big[ \pa_{\nu_2} f_1(x, \Phi(\rho), \xi, \eta + \hat c \dbE_{\rho}[\eta])\Big]. \eeaa Then one may compute straightforwardly that \beaa &\wh H_{pp} = -1,\q \wh H_{xx} = \pa_{xx} f_1, \q \wh H_{x\rho_1} = \pa_{x \nu_1} f_1,\q \wh H_{p \rho_1}= \pa_{m_1} b_1,\\ &\wh H_{x\rho_2} = \pa_{x\nu_2} f_1 + \hat c \dbE_{\rho}[ \pa_{x\nu_2} f_1],\q \wh H_{p\rho_2}= \bar c \pa_{m_2} b_1 -\hat c. \eeaa Then $|\wh H_{p\rho_2}|\le c_0$, and \reff{lambdaHH2} becomes \bea \label{lambdaHH4} \left.\ba{c} \dis \tilde\dbE_{\rho}\Big[ \pa_{xx} f_1|\eta|^2 + \pa_{x\nu_1}f_1 \eta\tilde\eta - {|\L(\xi)|^2\over 4(1-c_0)}\Big] \ge 0,\q \mbox{where}\\ \dis \L(x) := \bar{\tilde\dbE}_\rho\bigg[\tilde \eta \Big[\pa_{m_1} b_1\big(\cL_\xi, \tilde \xi, \bar c \dbE_{\rho}[\eta]\big) + \pa_{x\nu_2} f_1\big(\tilde \xi, \Phi(\rho), x, \f(x) + \hat c \dbE_{\rho}[\f(\xi)]\big) \\ \dis + \hat c \pa_{x\nu_2} f_1(\tilde \xi, \Phi(\rho), \bar \xi, \f(\bar \xi) + \hat c \dbE_{\rho}[\f(\xi)])\Big]\bigg]. \ea\right. \eea Clearly \reff{lambdaHH3} implies \reff{lambdaHH4}, and hence \reff{lambdaHH2}. \qed \subsection{Global wellposedness for master equations of standard MFGs} \label{sect-wellposedness} For standard MFGs, by combining Proposition \ref{prop-dissufficient} and the strategy in \cite{GMMZ}, see also Subsection \ref{sect-strategy}, one can easily establish the following global wellposedness result for the master equation under displacement semi-monotonicity, which generalizes \cite[Theorem 6.3]{GMMZ}. We remark again that, for MFGC master equations, we shall investigate their global wellposedness in future research. \begin{thm} \label{thm-global} Assume $\l\geq 0$, $b(x, a, \nu)=a$ and $f(x, a, \nu) = f(x, a, \mu)$. Assume further that: (i) $H$ and $G$ have the regularity: \beaa &\dis H,\pa_{xx}H,\pa_{xp}H,\pa_{pp}H,\pa_{xxp}H,\pa_{xpp}H,\pa_{ppp}H\in \cC^2(\dbR^{2d}\times\cP_2(\dbR^d)),\\ &\dis \pa_{x\mu}H,\pa_{p\mu}H,\pa_{xp\mu}H,\pa_{pp\mu}H\in \cC^2(\dbR^{2d}\times\cP_2(\dbR^d)\times\dbR^d),\\ &\dis G,\pa_{xx}G\in \cC^2(\dbR^d\times\cP_2(\dbR^d)),\qq \pa_{x\mu}G\in\cC^2\dbR^d\times\cP_2(\dbR^d)\times\dbR^d, \eeaa and all the second and higher order derivatives of $H$ and $G$ involved above are uniformly bounded; (ii) $H$ is uniformly concave in $p$: $\pa_{pp}H\leq -c_0 I_d$ for some constant $c_0>0$. (iii) \eqref{Hdisplacement3} holds for $H$ and \eqref{lambdadisplacementsm} holds for $G$. \no Then the master equation \eqref{master} on $[0,T]$ admits a unique classical solution $V$ with bounded $\pa_{xx}V$ and $\pa_{x\mu}V$. \end{thm} \proof We shall follow the road map given in Section 3.2 to show the global wellposedness. Since the arguments are very similar to those in \cite{GMMZ, MZ3}, at below we sketch a proof only. {\it Step 1.} We apply Theorem \ref{thm:displacement} to show that, if $V$ is a classical solutions of the master equation \eqref{master} and $V$ satisfies the Assumptions \ref{assum-regV}, then $V$ propagates the displacement $\l$-monotonicity, i.e. $V(t,\cdot,\cdot)$ satisfies \eqref{lambdadisplacementsm} for all $t\in[0,T]$. {\it Step 2.} We shall follow the same proof as the one in \cite[Theorem 5.1]{GMMZ} to show an a priori uniform $\cW_2$-Lipschitz continuity of $\pa_xV$ in $\mu$, uniformly in $(t,x)\in[0,T]\times\dbR^d$. We note that $V$ might not be uniformly $\cW_2$-Lipschitz continuous in $\mu$ under our (weaker) assumptions. The key assumption we used in \cite[Theorem 5.1]{GMMZ} is the boundedness of $\pa_{xx}V$, which was proved using the first order derivatives of $H$ and $G$ in \cite[Proposition 6.1]{GMMZ}. This is not the case anymore here. To show it, we first apply Theorem \ref{thm:displacement} to prove that $V(t,\cd,\cd)$ satisfies \reff{lambdadisplacementsm} for all $t\in[0,T]$. By Remark \ref{rem-mon}-(iv), $\pa_{xx}V$ is uniformly semi-convex in $x$, uniformly in $(t,\mu)\in [0,T]\times\cP_2(\dbR^d)$. It is standard to obtain the uniform semi-concavity of $V$ in $x$ from the boundedness of the second order derivatives of $H$ and $G$ by the classical control theory. Thus, we obtain the a priori boundedness of $\pa_{xx}V$. Then we obtain the uniform $\cW_2$-Lipschitz continuity of $V$ in $\mu$. By \cite[Proposition 6.2]{GMMZ}, we can further strengthen the above a priori $\cW_2$-Lipschitz continuity to an a priori $\cW_1$-Lipschitz continuity for $\pa_{x}V$ in $\mu$. {\it Step 3.} We shall follow the same proof as the one in \cite[Theorem 7.1]{MZ3} to show the global wellposedness of the master equation \eqref{master}. The desired regularity of the solution $V$ is a byproduct of {\it Step 2}. However, we cannot show directly the wellposedness of the master equation due to the lack of the a priori Lipschitz continuity of $V$ in $x$ and $\mu$, we thus use the approach in \cite[Section 7]{MZ3}. That is, we first use the a priori Lipschitz estimate of $\pa_xV$ constructed in {\it Step 2} to show the wellposedness of the vectorial master equation for $\vec U := \pa_x V$. We then utilize the solution to the vectorial master equation to establish the wellposedness of the master equation \eqref{master}. \qed \begin{rem} If $G$ satisfies the Lasry-Lions monotonicity and $\pa_{xx}G$ is bounded by $\l$, then $G$ is displacement semi-monotone. Therefore, we obtain that, if $H$ and $G$ satisfy the assumptions (i), (ii) in Theorem \ref{thm-global}, $H$ satisfies \eqref{Hdisplacement3} and $G$ is Lasry-Lions monotone, then the master equation is wellposed on $[0,T]$. In this sense, Theorem \ref{thm-global} unifies the wellposedness results under the Lasry-Lions monotonicity and the displacement monotonicity. We shall remark though, even when $G$ is Lasry-Lions monotone, $V$ propagates the displacement semi-monotonicity, not necessarily the Lasry-Lions monotonicity (when $f$ is non-separable). \end{rem} \section{Propagation of anti-monotonicity} \label{sect-anti} \setcounter{equation}{0} In this section we fix $\vec \l \in D_4$. Recall \reff{kappaA}. \begin{assum}\label{assum-antidisplacement} (i) $\wh H\in \cC^2(\mathbb R^{d}\times \dbR^d\times\mathcal{P}_2(\mathbb R^{2d}))$ and there exist constants $\ol L, L_0>0$ and $\overline \g >\underline \g >0$ such that \bea \label{Hbound} &\dis |\pa_{xp}\wh H|\leq \overline \g L_0,\quad |\pa_{xx}\wh H|\leq \overline\g L_0,\q |\pa_{pp}\wh H|,|\pa_{x\rho_1}\wh H|, |\pa_{x\rho_2}\wh H|, |\pa_{p\rho_1}\wh H|, |\pa_{p\rho_2}\wh H|\le \ol L;\\ \label{antiH1} &\dis \underline\k(-\pa_{xp}\wh H)\geq L_0,\q \underline\k(-\pa_{xx}\wh H) \geq\underline \g L_0. \eea (ii) There exists a constant $L^u_{xx}>0$ such that \bea \label{theta} \th_1 := { \overline \g [1+L^u_{xx}] \over \sqrt{4( \underline \g \l_0+2\l_3)}} <1,\q\mbox{and}\q \ol L \ol \k(A_1^{-1}A_2) \le L_0, \eea where: \bea \label{A} &&\dis A_1:= \begin{bmatrix} 4[1-\th_1] & 0& 0 \\ 0 & 2\l_2 & 0\\ 0& 0& [1-\th_1][\l_0\underline \g+2\l_3 ] \end{bmatrix},\nonumber\\ &&\dis A_2:= B_1L_{xx}^u+B_2:= \begin{bmatrix} 2 & 2+\l_2 & 1\\ 2+\l_2 & 4\l_2 &\l_2\\ 1 & \l_2& 0 \end{bmatrix} L_{xx}^u \\ &&\dis +\begin{bmatrix} \l_0+2|\l_0-\l_1| & \l_0+|\l_0-\frac{1}{2}\l_1|+\frac{1}{2}|\l_1|+\l_2 & |\l_0-{1\over 2}\l_1|+\frac{1}{2}|\l_1|+2\l_3 \\ \l_0+|\l_0-\frac{1}{2}\l_1|+\frac{1}{2}|\l_1|+\l_2 & 2| \l_1|+2\l_2 &|\l_1|+ \l_2 +2\l_3 \\ |\l_0-{1\over 2}\l_1|+\frac{1}{2}|\l_1|+2\l_3 & |\l_1|+ \l_2 +2\l_3 & |\l_1|+2\l_3 \end{bmatrix}.\nonumber \eea \end{assum} \ms \begin{thm}\label{thm:anti} Let Assumptions \ref{assum-fix}, \ref{assum-regH}, \ref{assum-regV}, and \ref{assum-antidisplacement} hold. Assume further that, for the constant $L^u_{xx} $ in Assumption \ref{assum-antidisplacement} (ii), \bea \label{Vxx} |\pa_{xx} V|\le L^u_{xx}. \eea If $G$ satisfies the $\vec \l$-anti-monotonicity \reff{anti}, then $V(t,\cd,\cd)$ satisfies \reff{anti} for all $t\in[0,T]$. \end{thm} We remark that the bound $L^u_{xx}$ of $\pa_{xx} V$ can be estimated a priori by using the HJB equation or the backward SDE in the mean field game system, see \cite[Section 6]{MZ3} for more details. \proof Without loss of generality, we shall prove the theorem only for $t=0$. We will continue to use the notation in the proofs of Theorem \ref{thm:LL} and \ref{thm:displacement}. Introduce: \beaa \Xi_t:=\l_0\bar I(t)+\l_1 I(t)+\mathbb E\big[|\Gamma_t|^2+\l_2|\Upsilon_t|^2-\l_3|\delta X_t|^2\big] = MON^{anti}_{\vec \l} V(t,\cd, \cd)(X_t, \d X_t). \eeaa Then it is sufficient to show that \bea \label{dXi} {d\over dt} \Xi_t \ge 0. \eea Following the calculation in \cite[Theorem 4.1]{MZ3} we have \bea\label{dUpsilon} \left.\ba{lll} \dis d\Upsilon_t = \big[ -K_1(t) \Upsilon_t - K_2(t)\big] dt + (dB_t)^\top K_3(t) + \b (dB^0_t)^\top K_4(t);\\ d\G_t = \big[ -2\wh H_{xp}(X_t) \G_t+\pa_{xx}V(X_t)\wh H_{pp}(X_t)\Upsilon_t - \bar K_1(t)\big] dt + (dB_t)^\top \bar K_2(t) + \b (dB^0_t)^\top \bar K_3(t), \ea\right. \eea where \bea \label{K} \left.\ba{lll} K_1(t) := \wh H_{xp}(X_t) + \pa_{xx} V(X_t) \wh H_{pp}(X_t),\\ K_2(t) := \tilde\dbE_{\mathcal{F}_t}\Big[ \big[\wh H_{x\rho_1}(X_t,\tilde X_t) \delta\tilde X_t+\wh H_{x\rho_2}(X_t,\tilde X_t)[\tilde \Gamma_t+\tilde \Upsilon_t]\big] \\ \qq\qq\qq\q+ \pa_{xx} V(X_t)\big[\wh H_{p\rho_1}(X_t,\tilde X_t) \delta\tilde X_t+\wh H_{p\rho_2}(X_t,\tilde X_t)[\tilde \Gamma_t+\tilde \Upsilon_t]\big]\Big], \\ K_3(t) := \tilde{\mathbb E}_{\mathcal{F}_t}\big[\pa_{xx\mu}V(X_t,\tilde X_t)\delta \tilde X_t\big],\\ K_4(t):= K_3(t) + \bar{\tilde \dbE}_{\mathcal{F}_t}\Big[\big[(\pa_{\mu x\mu}V)(X_t,\bar X_t,\tilde X_t) +\pa_{\tilde xx\mu}V(X_t,\tilde X_t)\big]\delta\tilde X_t\Big],\\ \bar K_1(t) :=[\wh H_{xx}(X_t)-\pa_{xx}V(X_t)\wh H_{px}(X_t)]\delta X_t\\ \qq\qq- \pa_{xx}V(X_t)\tilde\dbE_{\mathcal{F}_t} \big[\wh H_{p\rho_1}(X_t,\tilde X_t)\delta \tilde X_t+\wh H_{p\rho_2}(X_t,\tilde X_t)[\tilde \Gamma_t+\tilde \Upsilon_t]\big], \\ \bar K_2(t) := \pa_{xxx}V(X_t)\delta X_t,\\ \bar K_3(t):= \bar K_2(t) + \tilde \dbE_{\mathcal{F}_t}\Big[(\pa_{\mu xx}V)(X_t,\tilde X_t)\delta\tilde X_t\Big]. \ea\right. \eea In particular, this implies that \bea\label{dUpsilon2} \left.\ba{lll} \dis {d\over dt} \dbE[|\Upsilon_t|^2] \ge 2\dbE\Big[\big\langle \Upsilon_t,~ -K_1(t) \Upsilon_t - K_2(t)\big\rangle\Big];\ss\\ \dis {d\over dt} \dbE[|\G_t|^2] \ge 2 \dbE\Big[\big\langle \G_t ,~ -2\wh H_{xp}(X_t) \G_t+\pa_{xx}V(X_t)\wh H_{pp}(X_t)\Upsilon_t - \bar K_1(t)\big\rangle\Big]. \ea\right. \eea Thus, combining \reff{dtI-LL}, \reff{cdbarI}, and \reff{deltaX2}, and recalling the $N$ in \reff{XY}, we have \beaa \left.\ba{lll} \dis {d\over dt}\Xi_t \ge \l_0 \tilde\dbE\Big[\big\langle \wh H_{pp}(X_t)\G_t,\G_t\big\rangle+2\big\langle \wh H_{pp}(X_t)\G_t, \Upsilon_t\big\rangle\\ \dis\qq\qq + 2\big\langle\G_t, \wh H_{p\rho_1}(X_t,\tilde X_t)\delta \tilde X_t+\wh H_{p\rho_2}(X_t,\tilde X_t)[\tilde\Gamma_t+\tilde \Upsilon_t]\big\rangle-\big\langle \wh H_{xx}(X_t)\delta X_t,\delta X_t\big\rangle \Big] \\ \dis ~ + \l_1 \tilde\dbE\Big[\big\langle \wh H_{pp}(X_t) \Upsilon_t,~\Upsilon_t\big\rangle - \big\langle \wh H_{x\rho_1}(X_t,\tilde X_t) \delta \tilde X_t+\wh H_{x\rho_2}(X_t,\tilde X_t)[\tilde\G_t+\tilde\Upsilon_t], \delta X_t\big\rangle\\ \dis\qq\qq -\big\langle \wh H_{p\rho_1}(X_t,\tilde X_t)\delta \tilde X_t+\wh H_{p\rho_2}(X_t,\tilde X_t)[\tilde\G_t+\tilde \Upsilon_t], \G_t-\Upsilon_t \big\rangle \Big]\\ \dis ~+2\tilde\dbE\Big[ \big\langle \G_t, \big[ -2\wh H_{xp}(X_t) \G_t+\pa_{xx}V(X_t)\wh H_{pp}(X_t)\Upsilon_t - \bar K_1(t)\big] \big\rangle +\l_2\big\langle\Upsilon_t, \big[ -K_1(t) \Upsilon_t - K_2(t)\big] \big\rangle\Big]\\ \dis~ -2\l_3\tilde\dbE\Big[\big\langle \wh H_{px}(X_t)\delta X_t+\wh H_{p\rho_1}(X_t,\tilde{X_t})\delta \tilde X_t+\wh H_{p\rho_2}(X_t,\tilde X_t)[\tilde \G_t+\tilde\Upsilon_t]+\wh H_{pp}(X_t)[ \Upsilon_t+\G_t],\delta X_t \big\rangle\Big]\\ \dis = \tilde\dbE\bigg[\Big\langle \big[ \l_0 \wh H_{pp}(X_t) -4 \wh H_{xp}(X_t)\big] \G_t, ~\G_t\Big\rangle + \Big\langle \big[\l_1\wh H_{pp}(X_t)- 2\l_2K_1(t)] \Upsilon_t, \Upsilon_t \Big\rangle\\ \dis ~ + \Big\langle \big[-\l_0 \wh H_{xx}(X_t) -2\l_3\wh H_{px}(X_t)\big] \d X_t, \d X_t\Big\rangle +\Big\langle \big[2\l_0-\l_1+2\pa_{xx}V(X_t)\big]\wh H_{p\rho_2}(X_t,\tilde X_t)\tilde \Gamma_t, \G_t \Big\rangle\\ \dis ~ +\Big \langle\big[[\l_1-2\l_2\pa_{xx}V(X_t)]\wh H_{p\rho_2}(X_t,\tilde X_t)-2\l_2\wh H_{x\rho_2}(X_t,\tilde X_t)\big]\tilde \Upsilon_t, \Upsilon_t\Big\rangle \\ \dis~ -\Big\langle \big[ \l_1\wh H_{x\rho_1}(X_t,\tilde X_t) +2\l_3\wh H_{p\rho_1}(X_t,\tilde X_t)\big]\delta \tilde X_t, \d X_t\Big\rangle\\ \dis~ +\Big\langle 2 [\l_0 \wh H_{pp}(X_t) + \pa_{xx} V(X_t) \wh H_{pp}(X_t)]\Upsilon_t+[2\l_0-\l_1+2\pa_{xx}V(X_t)]\wh H_{p\rho_2}(X_t,\tilde X_t)\tilde \Upsilon_t,\G_t\Big\rangle \\ \dis~+\Big\langle \big[[ \l_1-2\l_2\pa_{xx}V(X_t)]\wh H_{p\rho_2}(X_t,\tilde X_t) -2\l_2\wh H_{x\rho_2}(X_t,\tilde X_t)\big]\tilde \Gamma_t,\Upsilon_t\Big\rangle\\ \dis~ +\Big\langle \big[ [2\l_0-\l_1+2\pa_{xx}V(X_t)] \wh H_{p\rho_1}(X_t,\tilde X_t) -\l_1\wh H_{\rho_2x}(\tilde X_t,X_t)-2\l_3\wh H_{\rho_2p}(\tilde X_t,X_t)\big]\delta\tilde X_t\\ \dis~\q\,\,+2\big[-\wh H_{xx}(X_t)+\pa_{xx}V(X_t)\wh H_{px}(X_t)-\l_3\wh H_{pp}(X_t)\big]\delta X_t, \G_t\Big\rangle \\ \dis~ + \Big\langle \big[[\l_1-2\l_2\pa_{xx}V(X_t)] \wh H_{p\rho_1}(X_t,\tilde X_t) -\l_1\wh H_{\rho_2x}(\tilde X_t,X_t)\\ \dis~\q-2\l_2\wh H_{x\rho_1}(X_t,\tilde X_t)-2\l_3\wh H_{\rho_2 p}(\tilde X_t, X_t)\big]\delta \tilde X_t-2\l_3\wh H_{pp}(X_t)\delta X_t ,\Upsilon_t\Big\rangle \bigg]. \ea\right. \eeaa Recall \reff{D4}, \reff{kappaA}, and \reff{K}, by \reff{Hbound} and \reff{antiH1} we have \beaa \left.\ba{lll} \dis {d\over dt}\Xi_t \ge \big[ 4 L_0 - \l_0\ol L\big] \dbE[|\G_t|^2] + \big[ 2\l_2 L_0 - |\l_1|\ol L - 2\l_2 L^u_{xx} \ol L\big] \dbE[|\Upsilon_t|^2] \\ \dis \qq + \big[\l_0 \ul \g L_0 + 2\l_3 L_0\big] \dbE[|\d X_t|^2] -\big[ |2\l_0-\l_1| + 2L^u_{xx}\big] \ol L \big(\dbE[|\G_t|]\big)^2 \\ \dis\qq -\big[|\l_1|+2\l_2L^u_{xx} +2\l_2\big]\ol L \big(\dbE[|\Upsilon_t|]\big)^2 -\big[ |\l_1| +2\l_3\big]\ol L\big(\dbE[|\d X_t|]\big)^2\\ \dis\qq -\big[2 [\l_0 + L^u_{xx}] +|2\l_0-\l_1| + 2L^u_{xx} + |\l_1|+2\l_2 L^u_{xx} +2\l_2 \big]\ol L \dbE[|\G_t|] \dbE[| \Upsilon_t|] \\ \dis\qq - \big[ [|2\l_0 -\l_1|+2L^u_{xx} + |\l_1| + 2\l_3] \ol L +2[\ol \g L_0 + L^u_{xx} \ol \g L_0 + \l_3 \ol L]\big] \dbE[|\d X_t|] \dbE[|\G_t|] \\ \dis\qq - \big[[|\l_1|+2\l_2L^u_{xx} + |\l_1| + 2\l_2 +2\l_3 +2\l_3] \ol L \dbE[|\delta X_t|]\dbE[|\Upsilon_t|]\\ \dis\q \ge \Big[ 4 L_0 - \big[\l_0+ |2\l_0-\l_1| + 2L^u_{xx}\big] \ol L\Big] \big(\dbE[|\G_t|]\big)^2 \\ \dis \qq + \Big[ 2\l_2 L_0 - \big[ 2|\l_1| + 4\l_2 L^u_{xx}+2\l_2\big] \ol L\Big] \big(\dbE[|\Upsilon_t|]\big)^2 \\ \dis \qq + \Big[[\l_0 \ul \g + 2\l_3 ]L_0 -\big[ |\l_1| +2\l_3\big]\ol L\Big]\big(\dbE[|\d X_t|]\big)^2\\ \dis\qq - 2\Big[\l_0 +|\l_0-{\l_1\over 2}| + {|\l_1|\over 2}+ \l_2 + [2+\l_2] L^u_{xx} \Big]\ol L \dbE[|\G_t|] \dbE[| \Upsilon_t|] \\ \dis\qq - 2\Big[ \overline \g [1+ L^u_{xx}] L_0 + \big[|\l_0-{\l_1\over 2}| + {|\l_1|\over 2}+2\l_3+L^{u}_{xx}]\ol L\Big] \dbE[|\d X_t|] \dbE[|\G_t|] \\ \dis\qq - 2\Big[|\l_1|+ \l_2 +\l_2 L^u_{xx} +2\l_3\Big] \ol L \dbE[|\delta X_t|]\dbE[|\Upsilon_t|]\\ \dis\q \ge \Big[ 4 [1-\th_1]L_0 - \big[\l_0+ |2\l_0-\l_1| + 2L^u_{xx}\big] \ol L\Big] \big(\dbE[|\G_t|]\big)^2 \\ \dis \qq + \Big[ 2\l_2 L_0 - \big[ 2|\l_1| + 4\l_2 L^u_{xx}+2\l_2\big] \ol L\Big] \big(\dbE[|\Upsilon_t|]\big)^2 \\ \dis \qq + \Big[(1-\th_1)[\l_0 \ul \g + 2\l_3 ]L_0 -\big[ |\l_1| +2\l_3\big]\ol L\Big]\big(\dbE[|\d X_t|]\big)^2\\ \dis\qq - 2\Big[\l_0 +|\l_0-{\l_1\over 2}| + {|\l_1|\over 2}+ \l_2 + [2+\l_2] L^u_{xx} \Big]\ol L \dbE[|\G_t|] \dbE[| \Upsilon_t|] \\ \dis\qq - 2\Big[ \big[|\l_0-{\l_1\over 2}| + {|\l_1|\over 2}+2\l_3+L^{u}_{xx}]\ol L\Big] \dbE[|\d X_t|] \dbE[|\G_t|] \\ \dis\qq - 2\Big[|\l_1|+ \l_2 +\l_2 L^u_{xx} +2\l_3\Big] \ol L \dbE[|\delta X_t|]\dbE[|\Upsilon_t|], \ea\right. \eeaa where in the last step we used the fact that: recalling the $\th_1$ in \reff{theta}, \beaa 2\overline \g [1+ L^u_{xx}] \dbE[|\d X_t|] \dbE[|\G_t|] \le 4 \th_1 \big(\dbE[|\G_t|]\big)^2 + \th_1 [\l_0\underline \g +2\l_3] \big(\dbE[|\d X_t|]\big)^2, \eeaa Then, recalling \reff{A} and denoting $e := \big( \dbE[|\G_t|],~ \dbE[|\Upsilon_t|],~ \dbE[|\d X_t|]\big)$, we have \beaa {d\over dt} \Xi_t \ge e \big[ A_1 L_0 - A_2 \ol L\big] e^\top \ge 0, \eeaa thanks to \reff{theta} and the fact that $A_1 > 0$. \qed \begin{eg} \label{eg-anti} Again we consider a special case of \reff{nonseparable} with $d=1$: \beaa \left.\ba{c} \dis b(x, a, \cL_{(\xi,\a)}) = -a -L_0 x+ b_1(\cL_\xi, \dbE[\a]), ~ \dis f(x, a, \cL_{(\xi,\a)}) = {|a|^2\over 2} - c a \dbE[\a] - {\gamma L_0\over 2} x^2+ f_1(x, \cL_{(\xi, \a)}), \ea\right. \eeaa for some constants $0<c<1$, $\gamma>0$, $L_0>0$. For any $L^u_{xx}>0$, when $L_0$ is large enough, there exist appropriate $\ol L>0$, $\ol \g > \ul \g >0$, and $\vec{\l}\in D_4$ such that Assumption \ref{assum-antidisplacement} holds true. \end{eg} \proof By Example \ref{eg-nonseparable} and recalling the notations $\hat c, \bar c$ in Example \ref{eg-displacement}, we see that \beaa &\dis \Phi(\cL_{(\xi, \eta)}) = \cL_{(\xi, ~ \hat c \dbE[\eta] + \eta)},\\ &\dis H(x, p, \cL_{(\xi,\a)}) = -{1\over 2} \Big|c\dbE[\a] + p\Big|^2 - L_0 xp+ p b_1(\cL_\xi, \dbE[\a]) - {\gamma L_0\over 2} x^2 +f_1(x, \cL_{(\xi,\a)}),\\ &\dis \wh H(x, p, \cL_{(\xi, \eta)}) = -{1\over 2} \Big|\hat c\dbE[\eta] +p\Big|^2 -L_0 xp +p b_1(\cL_\xi,\bar c\dbE[\eta])- {\gamma L_0\over 2} x^2 + f_1(x, \cL_{(\xi, ~ \hat c \dbE[\eta] + \eta)}). \eeaa Following the same calculation as that in Example \ref{eg-displacement}, \beaa &\wh H_{pp} = -1,\q \wh H_{xp}=- L_0,\q \wh H_{xx} = -\gamma L_0 +\pa_{xx}f_1, \q \wh H_{x\rho_1} = \pa_{x \nu_1} f_1,\\ &\wh H_{x\rho_2} = \pa_{x\nu_2} f_1 + \hat c \dbE[ \pa_{x\nu_2} f_1],\q \wh H_{p \rho_1}= \pa_{m_1} b_1,\q \wh H_{p\rho_2}= \bar c \pa_{m_2} b_1 -\hat c. \eeaa For given functions $f_1, b_1$, clearly there exists a fixed constant $\ol L>0$ such that \[ |\pa_{pp}\wh H|,|\pa_{x\rho_1}\wh H|, |\pa_{x\rho_2}\wh H|, |\pa_{p\rho_1}\wh H|, |\pa_{p\rho_2}\wh H|\le \ol L. \] Set $\ul \g := {\g\over 2}$, $\ol \g := \g + 1$, for $L_0$ sufficiently large, we have \beaa &\ul \k (-\wh H_{xp}) = L_0,\qq |\wh H_{xp})| = L_0 \le \ol \k L_0;\\ &|\pa_{xx} \wh H| - \ol \g L_0 \le \g L_0 + \ol L - \ol \g L_0 = \ol L - L_0 \le 0;\\ & \ul\k (-\pa_{xx} \wh H) - \ul \g L_0 \ge \g L_0 - \ol L - \ul \g L_0 = {\g\over 2} L_0 - \ol L \ge 0. \eeaa That is, \eqref{Hbound} and \eqref{antiH1} hold true. We next fix arbitrary $\l_0, \l_2 >0$ and $\l_1 \in \dbR$. Choose $\l_3> 0$ sufficiently large we have $\th_1 \le {1\over 2}$. Finally, set $L_0$ sufficiently large such that $L_0 \ge \ol L \ol \k(A_1^{-1}A_2)$, we verify \reff{theta} as well. \qed We shall point out though, by \reff{Vxx} $L^u_{xx}$ may in turn depend on $L_0$, so extra efforts are needed in order to ensure full compatibility of our conditions. This, however, requires the a priori estimate for $\pa_{xx} V$ which is not carried out in this paper. We thus leave it to our accompanying paper on global wellposedness of MFGC master equations. We remark that we have a complete result in \cite{MZ3} for standard MFG master equations. \section{Appendix} \label{sect-proof} \setcounter{equation}{0} \no {\bf Proof of \reff{dtI-LL}}. We first apply the It\^o's formula \reff{Ito} on $\pa_{x\mu}V(t, X_t, \cL_{X_t|\cF^0_t}, \tilde X_t)$ to obtain \begin{equation}\label{eq:ito_double1} {d\over dt} I(t)= I_1 + I_2 + I_3, \end{equation} where, by using $\hat X$ to denote another conditionally independent copy, \beaa I_1&:=& \bar{\tilde \dbE}\bigg[\bigg\langle \Big\{\pa_{tx\mu} V(X_t, \tilde X_t) + {\widehat \beta^2\over 2} ((\tr\pa_{xx})\pa_{x\mu} V)(X_t, \tilde X_t) +\wh H_p (X_t)^\top\pa_{xx\mu} V(X_t, \tilde X_t)\\ &&+\beta^2(\tr(\pa_{x\mu})\pa_{x\mu}V)(X_t,\bar X_t,\tilde X_t)+\beta^2(\tr(\pa_{\tilde x\mu})\pa_{x\mu}V)(X_t,\bar X_t,\tilde X_t)\\ &&+\beta^2(\tr(\pa_{\tilde x x})\pa_{x\mu}V)(X_t,\tilde X_t)+\frac{\beta^2}{2}(\tr(\pa_{\mu\mu})\pa_{x\mu}V)(X_t,\hat X_t,\bar X_t,\tilde X_t) \\ &&+ {\widehat\beta^2\over 2}(\tr(\pa_{\bar x\mu}) \pa_{ x\mu} V)(X_t, \bar X_t, \tilde X_t) + \wh H_p(\bar X_t)^\top\pa_{\mu x\mu} V(X_t, \bar X_t, \tilde X_t)\\ && + {\widehat\beta^2\over 2} (\tr(\pa_{\tilde x\tilde x})\pa_{x\mu} V)(X_t, \tilde X_t) + \wh H_p(\tilde X_t)^\top\pa_{\tilde xx\mu} V(X_t, \tilde X_t) \Big\}\delta \tilde X_t, ~ \delta X_t\bigg\rangle \bigg];\\ I_2&:=&- \hat{\bar{\tilde \dbE}}\bigg[\Big\langle \pa_{\mu x} V(X_t, \tilde X_t) \Big\{ \big[\wh H_{px} (X_t) + \wh H_{pp} (X_t)\pa_{xx} V(X_t) \big]\delta X_t \\ && + \Big[ \wh H_{p\rho_1}(X_t, \bar X_t)+\wh H_{p\rho_2}(X_t,\bar X_t)\pa_{xx}V(\bar X_t) + \wh H_{pp}(X_t)\pa_{x\mu} V(X_t, \bar X_t) \Big]\delta \bar X_t\\ && + \wh H_{p\rho_2}(X_t,\bar X_t)\pa_{x\mu}V(\bar X_t,\hat X_t)\delta \hat X_t \Big\}, ~ \delta \tilde X_t\Big\rangle \bigg];\\ I_3&:=&- \hat{\bar{\tilde \dbE}}\bigg[\Big\langle \pa_{x\mu} V(X_t, \tilde X_t) \Big\{ \big[\wh H_{px}(\tilde X_t)+ \wh H_{pp}(\tilde X_t)\pa_{xx} V(\tilde X_t)\big] \delta \tilde X_t \\ && + \Big[\wh H_{p\rho_1}(\tilde X_t, \bar X_t)+\wh H_{p\rho_2}(\tilde X_t, \bar X_t)\pa_{xx}V(\bar X_t)+\wh H_{pp}(\tilde X_t) \pa_{x\mu} V(\tilde X_t,\bar X_t) \Big]\delta \bar X_t\\ && + \wh H_{p\rho_2}(\tilde X_t,\bar X_t)\pa_{x\mu}V(\bar X_t,\hat X_t)\delta \hat X_t\Big\}, \delta X_t\Big\rangle\bigg]. \eeaa On the other hand, applying $\partial_{x\mu}$ to \reff{master} we obtain \begin{equation}\label{paxmucLV} 0 = (\pa_{x\mu} \sL V)(t, x, \mu, \tilde x) = J_1+ J_2 +J_3, \end{equation} where \beaa J_1&:=& \pa_{tx\mu } V (x, \tilde x)+ {\widehat \beta^2\over 2}(\tr({\pa_{xx}})\pa_{x\mu} V)(x, \tilde x) + \wh H_{x\rho_1}(x,\tilde x) \\ && + \wh H_{x\rho_2}(x,\tilde x)\pa_{xx}V(\tilde x)+\bar\dbE[\wh H_{x\rho_2}(x,\bar\xi)\pa_{x\mu}V(\bar \xi,\tilde x)]\\ &&+ \pa_{xx} V(x) \Big[\wh H_{p\rho_1}(x,\tilde x)+\wh H_{p\rho_2}(x,\tilde x)\pa_{xx}V(\tilde x)+ \bar\dbE[\wh H_{p\rho_2}(x,\bar\xi)\pa_{x\mu}V(\bar \xi,\tilde x)]\Big]\\ && +\Big[\wh H_{xp}(x) +\pa_{xx} V(x) \wh H_{pp}(x) \Big] \pa_{x\mu} V(x, \tilde x) + \wh H_p(x)^\top \pa_{xx\mu} V(x, \tilde x);\\ J_2 &:=& {{\widehat\beta^2\over 2} (\pa_{x \tilde x } \tr(\pa_{\tilde x\mu})V)(x, \tilde x)} + \pa_{x\mu} V(x,\tilde x) \Big[\wh H_{px} (\tilde x) + \wh H_{pp}(\tilde x) \pa_{xx} V(\tilde x)\Big] \\ &&+ \wh H_p(\tilde x)^\top\pa_{\tilde x x \mu} V(x,\tilde x)+ \beta^2(\pa_{x\tilde x}\tr(\pa_{x\mu})V)(x,\tilde x)+\beta^2\bar\dbE\big[(\pa_{x\tilde x}\tr(\pa_{\mu\mu})V)(x,\bar\xi,\tilde x)\big];\\ J_3 &:=& \hat{\bar \dbE}\bigg[{\widehat \beta^2\over 2}(\tr(\pa_{\bar x\mu}) \pa_{x \mu } V)(x, \tilde x, \bar \xi) + \wh H_p(\bar \xi)^\top \pa_{\mu x \mu} V(x, \tilde x, \bar \xi)\\ &&+ \pa_{x\mu} V(x, \bar \xi) \Big[ \wh H_{p\rho_1}(\bar\xi, \tilde x)+\wh H_{p\rho_2}(\bar \xi,\tilde x)\pa_{xx}V(\tilde x)+\wh H_{p\rho_2}(\bar\xi,\hat \xi)\pa_{x\mu}V(\hat \xi,\tilde x) + \wh H_{pp} (\bar \xi) \pa_{x\mu} V(\bar \xi, \tilde x)\Big]\\ &&+\beta^2(\tr(\pa_{x\mu})\pa_{x\mu}V)(x,\tilde x,\bar \xi)+\frac{\beta^2}{2}(\tr(\pa_{\mu\mu})\pa_{x\mu}V)(x,\tilde x,\hat\xi,\bar\xi)\bigg]. \eeaa Evaluate \eqref{paxmucLV} along $(X_t,\mu_t,\tilde X_t)$ and plug into \eqref{eq:ito_double1}. As in \cite[Theorem 4.1]{GMMZ}, by straightforward calculation we obtain \reff{dtI-LL}. \qed \no {\bf Proof of Remark \ref{rem-LL}}. Given $\xi_i\in \dbL^2(\cF_0)$, $i=1,2$, let $X^i$ solve the McKean-Vlasov SDE: \bea\label{Xi} \left.\ba{c} \dis X_t^i = \xi_i +\int_0^t \pa_p\wh H(X_s^i, \pa_x V(s, X_s^i, \mu_s^i), \rho_s^i) ds + B_t+\beta B_t^0,\\ \dis \mbox{where}\q \mu_t^i:=\mathcal{L}_{X_t^i|\mathcal{F}_t^{0}},\q \rho_t^i := \cL_{(X_t^i,\pa_xV(t,X_t^i,\mu_t^i))|\mathcal{F}_t^{0}}. \ea\right. \eea It is standard that the optimal control is $ \alpha^{i}_s:=\pa_p\wh H(X_s^i, \pa_x V(s, X_s^i, \mu_s^i), \rho_s^i), $ and thus \bea \label{DPP1} \dbE[V(t,X_t^i,\mu^i_t)] &=& \mathbb E\Big[V(t_\d ,X_{t_\d}^i,\mu^i_{t_\d})+\int_t^{t_\d} f(X_s^i,\alpha_s^{i},\rho^i_s)ds\Big], \eea where $ t_\delta:=t+\delta$. Let $\alpha^\delta$ be any admissible control in $\mathcal{A}_{t_\delta}$. Consider \beaa \left.\ba{c} \dis \alpha^{i,\delta}(s,x):=\left\{\ba{lll}\pa_p\wh H(x,\pa_xV(s,x,\mu_s^i),\rho_s^i) ,\quad s\in[t,t_\delta);\\ \alpha^\delta(s,x),\qq\qq\qq\qq\, s\in [t_\delta,T]. \ea\right.\ms\\ \dis X_s^{i,\delta} = X_t^i +\int_t^s \alpha^{i,\delta}(s,X_s^{i,\delta})ds + B_s^t+\beta B_s^{0,t},\quad s\in[t,T]. \ea\right. \eeaa Since $b(\cdot,a,\cdot)=a$, we have $X_s^{i,\delta}=X_s^i$ and $\alpha_s^i=\alpha^{i,\delta}(s,X_s^{i,\delta})$ for any $s\in[t,t_\delta]$. Moreover \beaa \dis X_s^{i,\delta} = X_{t_\delta}^i +\int_{t_\delta}^s \alpha^{\delta}(s,X_s^{i,\delta})ds + B_s^{t_\delta}+\beta B_s^{0,t_\delta},\quad s\in[t_\delta,T]. \eeaa Thus, for $i, j=1,2$ with $i\neq j$, \beaa \dis \dbE[V(t,X_t^j,\mu^i_t)] &\leq& \mathbb E\Big[G(X_T^{j,\delta},\mu_T^i)+\int_t^Tf(X_s^{j,\delta},\alpha^{j,\delta}(s,X_s^{j,\delta}),\rho_s^i)ds\Big]\\ &=& \mathbb E\Big[G(X_T^{j,\delta},\mu_T^i)+\int_{t_\delta}^{T}f(X_s^{j,\delta},\alpha^{\delta}(s,X_s^{j,\delta}),\rho_s^i)ds+\int_t^{t_\d}f(X_s^j,\alpha_s^j,\rho^i_s)ds\Big]. \eeaa Taking infimum over all admissible controls $\alpha^\delta$ in $\mathcal{A}_{t_\delta}$ above, we have \bea \label{DPP2} \dbE[V(t,X_t^j,\mu^i_t)] &\leq& \mathbb E\Big[V(t_\d,X_{t_\d}^j,\mu^i_{t_\d})+\int_t^{t_\d}f(X_s^j,\alpha_s^j,\rho^i_s)ds\Big]. \eea Therefore, by \reff{DPP1}, \reff{DPP2}, and \reff{fmon}, \beaa &&\mathbb E\Big[V(t_\d,X_{t_\d}^1,\mu_{t_\d}^1)+V(t_\d,X_{t_\d}^2,\mu^2_{t_\d})-V(t_\d,X_{t_\d}^1,\mu^2_{t_\d})-V(t_\d,X_{t_\d}^2,\mu^1_{t_\d})\Big]\\ &&\q -\mathbb E\Big[V(t,X_t^1,\mu^1_t)+V(t,X_t^2,\mu^2_t)-V(t,X_t^1,\mu^2_t)-V(t,X_t^2,\mu^1_t)\Big]\\ &&\leq-\mathbb E\Big[\int_t^{t+\d} \big[f(X_s^1,\alpha_s^1,\rho^1_s)+f(X_s^2,\alpha_s^2,\rho^2_s) -f(X_s^1,\alpha_s^1,\rho^2_s)-f(X_s^2,\alpha_s^2,\rho^1_s)\big]ds\Big]\le 0. \eeaa Divide both sides by $\d$ and then send $\d\to 0$, we obtain \begin{eqnarray*} \frac{d}{dt}\dbE \Big[V(t,X_t^1,\mu^1_t)+V(t,X_t^2,\mu^2_t)-V(t,X_t^1,\mu^2_t)-V(t,X_t^2,\mu^1_t)\Big]\leq 0, \end{eqnarray*} which implies that, denoting $\D X_t := X^2_t - X^1_t$, \bea \label{Xiest} \frac{d}{dt}\tilde \dbE\Big[\int_{0}^1\big\langle \pa_{x\mu}V(t,X_t^1+\theta \Delta X_t,\mathcal{L}_{(X_t^1+\theta \Delta X_t)|\cF^0_t},\tilde X_t^1+\theta\Delta \tilde X_t)\Delta \tilde X_t,~\Delta X_t\big\rangle \Big]d\theta\leq 0. \eea Now fix $\xi, \eta\in \dbL^2(\cF_0)$ and set $\xi_1:= \xi$, $\xi_2 := \xi + \e \eta$. Then $X^1$ identifies the $X$ in \reff{XY}, and by denoting $X^\e = X^2$, one can verify that $\lim_{\e\to 0} {1\over \e}[X^\e_t - X_t] = \d X_t$, where the limit is in $\dbL^2$ sense and $\d X$ is defined in \reff{XY}. Then, by dividing \reff{Xiest} with $\e^2$ and sending $\e\to 0$, it follows from the regularity of $V$ that \beaa \frac{d}{dt}\tilde\dbE\Big[\big\langle \pa_{x\mu}V(t,X_t,\mathcal{L}_{X_t|\cF^0_t},\tilde X_t)\tilde\d X_t,~\d X_t\big\rangle \Big]\leq 0. \eeaa This is exactly \reff{dI}. \qed \no {\bf Proof of \reff{cdbarI}}. We first apply the It\^o formula \reff{Ito} to obtain \begin{equation}\label{eq:ito_double2} {d\over dt} {\bar I}(t)= \bar I_1 +\bar I_2 +\bar I_3, \end{equation} where, \beaa &&\bar I_1:= \tilde\dbE\bigg[\Big\langle\Big\{\pa_{txx} V(X_t) + {\widehat\beta^2\over 2} (\tr(\pa_{xx})\pa_{xx} V)(X_t) +\wh H_p(X_t)^\top \pa_{xxx} V(X_t)\\ && \qq +\beta^2(\tr(\pa_{x\mu})\pa_{xx}V)(X_t,\tilde X_t)\Big\} \delta X_t, \delta X_t \Big\rangle \bigg],\\ &&\overline{I}_2 :=\bar{\tilde\dbE}\bigg[\Big\langle \Big\{\frac{\beta^2}{2}(\tr(\pa_{\mu\mu})\pa_{xx}V)(X_t,\tilde X_t,\bar X_t)\\ &&\qq + {\widehat\beta^2\over 2} (\tr(\pa_{\tilde x\mu})\pa_{ xx} V)(X_t,\tilde X_t) + \wh H_p(\tilde X_t)^\top\pa_{\mu xx} V(X_t, \tilde X_t)\Big\} \delta X_t, \delta X_t\Big\rangle\bigg],\\ &&\overline{I}_3:= 2\bar{\tilde\dbE}\bigg[ \Big\langle\pa_{xx} V(X_t) \Big\{ \big[\wh H_{px} (X_t) + \wh H_{pp} (X_t)\pa_{xx} V(X_t) \big]\delta X_t\\ &&\qq \big[ \wh H_{p\rho_1}(X_t, \tilde X_t)+\wh H_{p\rho_2}(X_t,\tilde X_t)\pa_{xx}V(\tilde X_t) + \wh H_{pp}(X_t)\pa_{x\mu} V(X_t, \tilde X_t) \big] \delta \tilde X_t\\ &&\qq + \wh H_{p\rho_2}(X_t,\tilde X_t)\pa_{x\mu}V(\tilde X_t,\bar X_t)\delta \bar X_t\Big\},~ \delta X_t \Big\rangle \bigg]. \eeaa On the other hand, applying $\partial_{xx}$ to \reff{master} we obtain \begin{equation}\label{paxxVmaster} 0= (\pa_{xx} \sL V)(t, x, \mu)= \bar J_1 + \bar{J_2}, \end{equation} \no where \begin{align*} {\bar J_1}&:= \pa_{txx} V + {\widehat\beta^2\over 2}(\tr(\pa_{xx})\pa_{xx} V) + \wh H_{xx}(x) + 2\wh H_{xp}(x)\pa_{xx} V(x)\\ &+ \pa_{xx} V(x)\wh H_{pp}(x) \pa_{xx} V(x)+\wh H_p(x)^{\top} \pa_{xxx} V(x),\\ {\bar J_2}&:= \bar{\tilde\dbE}\Big[{\widehat\beta^2\over 2} (\tr(\pa_{\tilde x\mu})\pa_{xx} V)(x, \tilde \xi) + \wh H_p( \tilde \xi)^\top\pa_{\mu xx} V(x, \tilde \xi)\\ &+\beta^2(\tr(\pa_{x\mu})\pa_{xx}V)(x,\tilde\xi)+\frac{\beta^2}{2}(\tr(\pa_{\mu\mu})\pa_{xx}V)(x,\bar\xi,\tilde\xi)\Big]. \end{align*} \normalsize Evaluate \reff{paxxVmaster} along $(X_t,\mu_t)$, and plug into \reff{eq:ito_double2}, we obtain \reff{cdbarI} straightforwardly. \qed \begin{thebibliography}{99} \bibitem{ALLM} Achdou, Y., Han, J., Lasry, J.M., Lions, P.L. and Moll, B., Income and wealth distribution in macroeconomics: a countinuous-time approach, {\sl The Review of Economic Studies} 89 (2022), no.1., 45--86. \bibitem{AK} Achdou, Y. and Kobeissi, Z., Mean field games of controls: finite difference approximations, {\sl Math. Eng.} 3 (2021), no.3., Paper No. 024, 35pp. \bibitem{Ahuja} Ahuja, S., Wellposedness of mean field games with common noise under a weak monotonicity condition, {\sl SIAM J. Control Optim.} 54 (2016), 30--48. \bibitem{BTB} Bauso, D., Tembine, H. and Basar, T., Opinion dynamics in social networks through mean-field games, {\sl SIAM J. Control Optim.} 54 (2016), no.6, 3225--3257. \bibitem{BFY} Bensoussan, A., Frehse, J., and Yam, S.C.P., {\sl Mean field games and mean field type control theory}, Springer Briefs in Mathematics. Springer, New York, (2013). \bibitem{BGY2} Bensoussan, A., Graber, P.J. and Yam, S.C.P., Control on Hilbert spaces and application to mean field type control theory, preprint, arXiv:2005.10770. \bibitem{Bogachev} Bogachev, V.I., {\sl Measure theory}, vol. 1. Springer, Berlin Heidelberg, (2007). \bibitem{BLPR} Buckdahn, R., Li, J., Peng, S. and Rainer, C., Mean-field stochastic differential equations and associated PDEs, {\sl Ann. Probab.} 45 (2017), 824--878. \bibitem{HCM06} Huang, M., Caines, P. E. and Malham\'e, R. P., Large population stochastic dynamic games: closed-loop McKean-Vlasov systems and the Nash certainty equivalence principle, {\sl Commun. Inf. Syst.} 6 (2006), no. 3, 221--251. \bibitem{HCM071} Huang, M., Caines, P. E. and Malham\'e, R. P., Large-population cost-coupled LQG problems with nonuniform agents: individual-mass behavior and decentralized $\epsilon$-Nash equilibria, {\sl IEEE Transactions on Automatic Control} 52 (2007), no.9, 1560--1571. \bibitem{HCM072} Huang, M., Caines, P. E. and Malham\'e, R. P., The Nash certainty equivalence principle and McKean-Vlasov systems: an invariance principle and entry adaptation, {\sl 46th IEEE Conference on Decision and Control} 121--123, 2007. \bibitem{Cardaliaguet} Cardaliaguet, P., {\it Notes on mean field games}, lectures by P.L. Lions, Coll\`ege de France, (2010). \bibitem{CardaliaguetLehalle} Cardaliaguet, P. and Lehalle C.A., Mean field game of controls and an application to trade crowding, {\sl Math. Financ. Econ.} 12 (2018), no.3, 335--363. \bibitem{CP} Cardaliaguet, P. and Porretta, A., {\it An introduction to mean field game theory}, Lecture Notes in Mathematics, Vol. 2281 (2020), 1--158. \bibitem{CDLL} Cardaliaguet, P., Delarue, F., Lasry, J.M. and Lions, P.L., {\it The master equation and the convergence problem in mean field games}, Annals of Mathematics Studies, 201. Princeton University Press, Princeton, NJ, (2019). x+212 pp. \bibitem{CD1} Carmona, R. and Delarue, F., {\sl Probabilistic theory of mean field games with applications I - Mean field FBSDEs, control, and games}, Probability Theory and Stochastic Modeling, 83. Springer, Cham, (2018). \bibitem{CD2} Carmona, R. and Delarue, F., {\sl Probabilistic theory of mean field games with applications II - Mean field games with common noise and master equations}, Probability Theory and Stochastic Modeling, 84. Springer, Cham, (2018). xxv+697 pp. \bibitem{CarmonaLacker} Carmona, R. and Lacker, D., A probabilistic weak formulation of mean field games and applications, {\sl Ann. Appl. Probab.} 25 (2015), no. 3, 1189--1231. \bibitem{CCD} Chassagneux, J.F., Crisan, D. and Delarue, F., A probabilistic approach to classical solutions of the master equation for large population equilibria, accepted in {\sl Mem. Amer. Math. Soc.}, arXiv: 1411.3009. \bibitem{Djete} Djete, M.F., Large population games with interactions through controls and common noise: convergence results and equivalence between open-loop and closed-loop controls, preprint, arXiv:2108.02992. \bibitem{GMMZ} Gangbo, W., M\'esz\'aros, A.R., Mou, C. and Zhang, J., Mean field games master equations with non-separable Hamiltonians and displacement monotonicity, accepted in {\sl Ann. Probab.}, arXiv:2101.12362. \bibitem{Gelfand} Gelfand, M.J., Explaining the puzzle of human diversity, {\sl Science} 366 (2019), 686--687. \bibitem{GT} Gangbo, W. and Tudorascu, A., On differentiability in the Wasserstein space and well-posedness for Hamilton-Jacobi equations, {\sl J. Math. Pures et Appliqu\'ees} 125 (2019), 119--174. \bibitem{GPV} Gomes, D.A., Patrizi, S. and Voskanyan, V.K., On the existence of classical solutions for stationary extended mean field games, {\sl Nonlinear Anal.} 99 (2014), 49--79. \bibitem{GV0} Gomes, D.A. and Voskanyan, V.K., Extended mean field games, {\sl Izv. Nats. Akad. Nauk Armenii Mat.} 48 (2013), no.2, 63--76. \bibitem{GV1} Gomes, D.A. and Voskanyan, V.K., Extended deterministic mean-field games, {\sl SIAM J. Control Optim.} 54 (2016), no.2, 1030--1055. \bibitem{GraberMayorga} Graber, J. and Mayorga, S., A note on mean field games of controls with state constraints: existence of mild solutions, preprint, arXiv:2109.11655. \bibitem{K0} Kobeissi, Z., Mean field games with monotonous interactions through the law of states and controls of the agents, preprint, arXiv:2006.12949. \bibitem{K1} Kobeissi, Z., On classical solutions to mean field game system of controls, {\sl Comm. Partial Differential Equations} 47 (2022), no.3, 453--488. \bibitem{LackerSoret} Lacker, D. and Soret, A., Many-player games of optimal consumption and investment under relative performance criteria, {\sl Math. Financ. Econ.} (2020), 1--19. \bibitem{LackerZariphopoulou} Lacker, D. and Zariphopoulou, T., Mean field and $N$-agent games for optimal investment under relative performance criteria, {\sl Math. Finance} 29 (2019), 1003--1038. \bibitem{LL07a} Lasry, J.M. and Lions, P.L., Mean field games, {\sl Jpn. J. Math.} 2 (2007), 229--260. \bibitem{LLG} Lasry, J.M., Lions, P.L. and Gu\'eant, O., Applications of mean field games to growth theory, preprint, HAL Id: hal-00348376. \bibitem{Lions} Lions, P.L., {\it Cours au Coll\`ege de France}, http://www.college-de-france.fr. \bibitem{MZ3} Mou, C. and Zhang, J., Mean field game master equations with anti-monotonicity conditions, preprint, arXiv:2201.10762. \end{thebibliography} \end{document}
2205.13381v1
http://arxiv.org/abs/2205.13381v1
Equivariant symplectic homology, linearized contact homology and the Lagrangian capacity
\documentclass[a4paper,12pt,twoside,openright]{report} \input{thesis_preamble} \input{thesis_macros} \title{Equivariant symplectic homology, linearized contact homology and the Lagrangian capacity} \author{Miguel Barbosa Pereira} \date{\today} \hypersetup{ pdftitle={\thetitle}, pdfauthor={\theauthor}, pdflang={en-GB} } \begin{document} \pagenumbering{roman} \input{front_matter/title_page.tex} \input{front_matter/prufung.tex} \input{front_matter/abstract.tex} \input{front_matter/acknowledgements.tex} \cleardoublepage\pdfbookmark{\contentsname}{contents} \tableofcontents \newpage \pagenumbering{arabic} \input{./chapters/1_introduction.tex} \input{./chapters/2_symplectic_manifolds.tex} \input{./chapters/3_indices.tex} \input{./chapters/4_holomorphic_curves.tex} \input{./chapters/5_floer_homology.tex} \input{./chapters/6_symplectic_capacities.tex} \input{./chapters/7_contact_homology.tex} \AtEndDocument{ \bibliographystyle{alpha} \bibliography{thesis} } \end{document} \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage{lmodern} \usepackage{geometry} \usepackage[square,numbers]{natbib} \usepackage[nottoc,notlot,notlof]{tocbibind} \usepackage{enumitem} \usepackage{xparse} \usepackage{xstring} \usepackage{etoolbox} \usepackage{parskip} \usepackage{titling} \usepackage{mathtools} \usepackage{amssymb} \usepackage{amsthm} \usepackage{IEEEtrantools} \usepackage{tensor} \usepackage{tikz} \usepackage{hyperref} \usepackage{bookmark} \usepackage[capitalise]{cleveref} \usepackage[all]{hypcap} \apptocmd{\sloppy}{\hbadness 10000\relax}{}{} \renewcommand\theequationdis{\normalfont\normalcolor(\theequation)} \allowdisplaybreaks \graphicspath{{./figures/}} \newlength{\alphabet} \settowidth{\alphabet}{\normalfont abcdefghijklmnopqrstuvwxyz} \geometry{textwidth=3\alphabet,textheight=4.5\alphabet,hcentering} \setlist[description]{font=\normalfont} \setlist[enumerate]{font=\normalfont} \setlist[enumerate,1]{label = {(\arabic*)}} \setlist[enumerate,2]{label = {(\arabic{enumi}.\arabic*)}} \newcounter{dummy} \makeatletter \newcommand\myitem[1][]{\item[#1]\refstepcounter{dummy}\def\@currentlabel{#1}} \makeatother \usetikzlibrary{decorations.pathreplacing} \usetikzlibrary{math} \usetikzlibrary{calc} \usetikzlibrary{cd} \tikzset{ symbol/.style={ draw=none, every to/.append style={ edge node={node [sloped, allow upside down, auto=false]{$#1$}} }, }, } \hypersetup{ bookmarksnumbered=true, colorlinks=true, linkcolor=blue, citecolor=blue, urlcolor=blue } \theoremstyle{plain} \newtheorem{theorem} {Theorem} [chapter] \newtheorem{proposition} [theorem] {Proposition} \newtheorem{lemma} [theorem] {Lemma} \newtheorem{corollary} [theorem] {Corollary} \newtheorem{conjecture} [theorem] {Conjecture} \theoremstyle{definition} \newtheorem{exercise} [theorem] {Exercise} \newtheorem{definition} [theorem] {Definition} \newtheorem{example} [theorem] {Example} \newtheorem{remark} [theorem] {Remark} \newtheorem{assumption} [theorem] {Assumption} \NewDocumentCommand{\plabel}{m}{\phantomsection\label{#1}} \newcommand{\rmn}[1]{\mathrm{\MakeUppercase{\romannumeral #1}}} \NewDocumentCommand{\signature}{}{\operatorname{sign}} \NewDocumentCommand{\symp} { }{\mathbf{Symp}} \NewDocumentCommand{\liouvndg} { }{\mathbf{Liouv}_{\mathrm{ndg}}^{\mathrm{gle}}}\NewDocumentCommand{\liouvle} { }{\mathbf{Liouv}_{\mathrm{ndg}}}\NewDocumentCommand{\liouvgle} { }{\mathbf{Liouv}^{\mathrm{gle}}}\NewDocumentCommand{\modl} { }{\mathbf{Mod}} \NewDocumentCommand{\komp} { }{\mathbf{Comp}} \NewDocumentCommand{\comp} { }{\mathbf{hComp}} \NewDocumentCommand{\admissible}{m}{\mathbf{I}_{#1}} \NewDocumentCommand{\stair} {m}{\mathbf{I}_{#1}} \NewDocumentCommand{\admstair} {m}{\mathbf{K}_{#1}} \NewDocumentCommand {\cgh} {m} {c^{\mathrm{GH}}_{#1}} \NewDocumentCommand {\csh} {m} {c^{S^1}_{#1}} \NewDocumentCommand{\Pvb}{}{P} \NewDocumentCommand{\Ivb}{}{I} \NewDocumentCommand{\shf}{}{S} \NewDocumentCommand{\inc}{}{\tilde{i}} \NewDocumentCommand{\union} { }{\cup} \NewDocumentCommand{\bigunion} { }{\bigcup} \NewDocumentCommand{\intersection} { }{\cap} \NewDocumentCommand{\bigintersection} { }{\bigcap} \NewDocumentCommand{\product} { }{\mathbin{\rotatebox[origin=c]{180}{$\amalg$}}} \NewDocumentCommand{\bigproduct} { }{\prod} \NewDocumentCommand{\coproduct} { }{\amalg} \NewDocumentCommand{\bigcoproduct} { }{\coprod} \NewDocumentCommand{\tensorpr} { }{\otimes} \NewDocumentCommand{\bigtensorpr} { }{\bigotimes} \NewDocumentCommand{\directsum} { }{\oplus} \NewDocumentCommand{\bigdirectsum} { }{\bigoplus} \NewDocumentCommand{\N}{}{\mathbb{N}} \NewDocumentCommand{\Z}{}{\mathbb{Z}} \NewDocumentCommand{\Q}{}{\mathbb{Q}} \NewDocumentCommand{\R}{}{\mathbb{R}} \NewDocumentCommand{\C}{}{\mathbb{C}} \NewDocumentCommand{\imag}{ }{\operatorname{Im}} \NewDocumentCommand{\real}{ }{\operatorname{Re}} \NewDocumentCommand{\cnjg}{m}{\overline{#1}} \NewDocumentCommand{\id} {}{\operatorname{id}} \NewDocumentCommand{\img}{}{\operatorname{im}} \RenewDocumentCommand{\emptyset}{}{\varnothing} \NewDocumentCommand{\idm}{}{I} \NewDocumentCommand{\Hom} { }{\operatorname{Hom}} \NewDocumentCommand{\End} { }{\operatorname{End}} \NewDocumentCommand{\Aut} { }{\operatorname{Aut}} \NewDocumentCommand{\coker} { }{\operatorname{coker}} \NewDocumentCommand{\codim} { }{\operatorname{codim}} \NewDocumentCommand{\colim} { }{\operatorname{colim}} \NewDocumentCommand{\spn} { }{\operatorname{span}} \NewDocumentCommand{\Ann} { }{\operatorname{Ann}} \NewDocumentCommand{\itr} {}{\operatorname{int}} \NewDocumentCommand{\cl} {}{\operatorname{cl}} \NewDocumentCommand{\supp}{}{\operatorname{supp}} \NewDocumentCommand{\duality}{}{\operatorname{D}} \NewDocumentCommand{\capp} {}{\frown} \NewDocumentCommand{\cupp} {}{\smile} \NewDocumentCommand{\intp} {}{\cdot} \NewDocumentCommand {\critpt} { } {\operatorname{CritPt}} \NewDocumentCommand {\critval} { } {\operatorname{CritVal}} \NewDocumentCommand {\regpt} { } {\operatorname{RegPt}} \NewDocumentCommand {\regval} { } {\operatorname{RegVal}} \NewDocumentCommand {\dv} {} {\mathrm{D}} \NewDocumentCommand {\odv} {m m} {\frac{\mathrm{d} #1}{\mathrm{d} #2}} \NewDocumentCommand {\pdv} {m m} {\frac{\partial #1}{\partial #2}} \NewDocumentCommand {\edv} {} {\mathrm{d}} \NewDocumentCommand {\ldv} {m} {{L}_{#1}} \NewDocumentCommand {\cdv} {m} {\nabla_{#1}} \NewDocumentCommand {\del} {} {\partial} \NewDocumentCommand {\delbar} {} {\overline{\partial}} \DeclareMathOperator {\grad} {grad} \DeclareMathOperator {\hess} {Hess} \NewDocumentCommand{\ind} {}{\mu} \NewDocumentCommand{\fredholm} {}{\operatorname{ind}} \NewDocumentCommand{\morse} {}{\mu_{\operatorname{M}}} \NewDocumentCommand{\maslov} {}{\mu} \NewDocumentCommand{\conleyzehnder}{}{\mu_{\operatorname{CZ}}} \NewDocumentCommand{\robbinsalamon}{}{\operatorname{RS}} \newcommand{\lpar}{(} \newcommand{\rpar}{)} \newcommand{\lsize}{} \newcommand{\rsize}{} \NewDocumentCommand{\SetParenthesisTypeSize}{m m}{ \renewcommand{\lpar}{(} \renewcommand{\rpar}{)} \renewcommand{\lsize}{} \renewcommand{\rsize}{} \IfEq{#1}{(} { \renewcommand{\lpar}{(} \renewcommand{\rpar}{)} }{} \IfEq{#1}{()}{ \renewcommand{\lpar}{(} \renewcommand{\rpar}{)} }{} \IfEq{#1}{c} { \renewcommand{\lpar}{\{} \renewcommand{\rpar}{\}} }{} \IfEq{#1}{<} { \renewcommand{\lpar}{\langle} \renewcommand{\rpar}{\rangle} }{} \IfEq{#1}{[} { \renewcommand{\lpar}{[} \renewcommand{\rpar}{]} }{} \IfEq{#1}{[]}{ \renewcommand{\lpar}{[} \renewcommand{\rpar}{]} }{} \IfEq{#1}{|} { \renewcommand{\lpar}{\lvert} \renewcommand{\rpar}{\rvert} }{} \IfEq{#1}{||}{ \renewcommand{\lpar}{\lVert} \renewcommand{\rpar}{\rVert} }{} \IfEq{#1}{L} { \renewcommand{\lpar}{\lfloor} \renewcommand{\rpar}{\rfloor} }{} \IfEq{#1}{T} { \renewcommand{\lpar}{\lceil} \renewcommand{\rpar}{\rceil} }{} \IfEq{#2}{0}{ \renewcommand{\lsize}{} \renewcommand{\rsize}{} }{} \IfEq{#2}{1}{ \renewcommand{\lsize}{\bigl} \renewcommand{\rsize}{\bigr} }{} \IfEq{#2}{2}{ \renewcommand{\lsize}{\Bigl} \renewcommand{\rsize}{\Bigr} }{} \IfEq{#2}{3}{ \renewcommand{\lsize}{\biggl} \renewcommand{\rsize}{\biggr} }{} \IfEq{#2}{4}{ \renewcommand{\lsize}{\Biggl} \renewcommand{\rsize}{\Biggr} }{} \IfEq{#2}{a}{ \renewcommand{\lsize}{\left} \renewcommand{\rsize}{\right} }{} } \NewDocumentCommand{\p}{m m m}{ \IfEq{#1}{n}{}{\SetParenthesisTypeSize{#1}{#2} \lsize \lpar} #3 \IfEq{#1}{n}{}{\SetParenthesisTypeSize{#1}{#2} \rsize \rpar} } \NewDocumentCommand{\sbn}{o m m}{ \IfValueF{#1}{ \{ #2 \ | \ #3 \} }{} \IfValueT{#1}{ \IfEq{#1}{0}{ \{ #2 \ | \ #3 \} }{} \IfEq{#1}{1}{ \bigl \{ #2 \ \big | \ #3 \bigr \} }{} \IfEq{#1}{2}{ \Bigl \{ #2 \ \Big | \ #3 \Bigr \} }{} \IfEq{#1}{3}{ \biggl \{ #2 \ \bigg | \ #3 \biggr \} }{} \IfEq{#1}{4}{ \Biggl \{ #2 \ \Bigg | \ #3 \Biggr \} }{} }{} } \newcommand {\modifier} {} \newcommand {\equivariant} {} \newcommand {\manifold} {} \newcommand {\theory} {} \newcommand {\complex} {} ltration} {} \newcommand {\grading} {} \NewDocumentCommand{\homology}{m m m m m m m}{ \renewcommand {\modifier} {} \renewcommand {\equivariant} {} \renewcommand {\manifold} {} \renewcommand {\theory} {} \renewcommand {\complex} {} ltration} {} \renewcommand {\grading} {} \renewcommand {\modifier} {#1} \renewcommand {\equivariant} {#2} \renewcommand {\manifold} {#3} \renewcommand {\theory} {#4} \renewcommand {\complex} {#5} ltration} {#6} \renewcommand {\grading} {#7} \IfEq {#1} {} {} {\renewcommand {\equivariant} {#1}} \IfEq {#1} {L} {\renewcommand {\equivariant} {}} {}ltration} {\star}} {} ltration} {\dagger}} {} ltration_\grading} ltration}_{\manifold\grading}} } \NewDocumentEnvironment{copiedtheorem} {o m} { \theoremstyle{plain} \newtheorem*{copytheorem:#2}{\cref{#2}} \IfNoValueTF{#1} { \begin{copytheorem:#2} } { \begin{copytheorem:#2}[{#1}] } } { \end{copytheorem:#2} } \NewDocumentEnvironment{secondcopy} {o m} { \IfNoValueTF{#1} { \begin{copytheorem:#2} } { \begin{copytheorem:#2}[{#1}] } } { \end{copytheorem:#2} } \begin{titlepage} \centering \hspace{0pt} \vfill {\LARGE\bfseries \thetitle\par} \vspace{1.5cm} {\Large\bfseries Dissertation\par} \vspace{1.5cm} {\large zur Erlangung des akademischen Grades\par Dr. rer. nat.\par} \vspace{1.5cm} {\large eingereicht an der\par Mathematisch-Naturwissenschaftlich-Technischen Fakultät\par der Universität Augsburg\par} \vspace{1.5cm} {\large von\par} {\large\bfseries \theauthor\par} \vspace{2cm} {\large Augsburg, März 2022\par} \vspace{1cm} \includegraphics{chapters/unia_logo.pdf} \end{titlepage} { \centering \hspace{0pt} \vfill \begin{tabular}{ r l } Betreuer: & Prof. Dr. Kai Cieliebak, Universität Augsburg \\ Gutachter: & Prof. Dr. Urs Frauenfelder, Universität Augsburg \\ & Prof. Dr. Klaus Mohnke, Humboldt-Universität zu Berlin \\ \\ \end{tabular} \newline \begin{tabular}{ r l } Tag der mündlichen Prüfung: & 20.05.2022 \end{tabular} } \cleardoublepage\pdfbookmark{Abstract}{abstract} \chapter*{Abstract} We establish computational results concerning the Lagrangian capacity from \cite{cieliebakPuncturedHolomorphicCurves2018}. More precisely, we show that the Lagrangian capacity of a 4-dimensional convex toric domain is equal to its diagonal. The proof involves comparisons between the Lagrangian capacity, the McDuff--Siegel capacities from \cite{mcduffSymplecticCapacitiesUnperturbed2022}, and the Gutt--Hutchings capacities from \cite{guttSymplecticCapacitiesPositive2018}. Working under the assumption that there is a suitable virtual perturbation scheme which defines the curve counts of linearized contact homology, we extend the previous result to toric domains which are convex or concave and of any dimension. For this, we use the higher symplectic capacities from \cite{siegelHigherSymplecticCapacities2020}. The key step is showing that moduli spaces of asymptotically cylindrical holomorphic curves in ellipsoids are transversely cut out. \cleardoublepage\pdfbookmark{Acknowledgements}{acknowledgements} \chapter*{Acknowledgements} First and foremost, I would like to thank my advisor, Kai Cieliebak, for introducing me to this topic and for the guidance he gave me during this project. We had many fruitful discussions about the various details of this problem and I am very grateful for that. Next, I want to thank my Mom Manuela, my Dad Manuel, and my Brother Pedro for their continued support during my PhD and their belief in me. Finally, I want to thank the mathematical community at the University of Augsburg, for making it a pleasant place to work at. Special thanks go to Kathrin Helmsauer and Robert Nicholls for their help on several occasions, and to Yannis Bähni, Marián Poppr, Frederic Wagner, Thorsten Hertl, and Artem Nepechiy for listening to my talks about this subject and giving valuable feedback. I am also grateful to Kyler Siegel for productive discussions, and to Urs Frauenfelder and Klaus Mohnke for reading and refereeing my thesis. \chapter{Introduction} \section{Symplectic capacities and their uses} A \textbf{symplectic manifold} is a pair $(X, \omega)$, where $X$ is a manifold and $\omega \in \Omega^2(X)$ is a closed and nondegenerate $2$-form on $X$. An example of a symplectic manifold is $\C^{n}$ with the canonical symplectic structure \begin{IEEEeqnarray*}{c} \omega_0 \coloneqq \sum_{j=1}^{n} \edv x^j \wedge \edv y^j. \end{IEEEeqnarray*} An embedding $\phi \colon (X, \omega_X) \longrightarrow (Y, \omega_Y)$ between symplectic manifolds is \textbf{symplectic} if $\phi^* \omega_Y = \omega_X$. A \textbf{symplectomorphism} is a symplectic diffeomorphism. \textbf{Darboux' theorem} implies that any symplectic manifold $(X, \omega)$ is locally symplectomorphic to $(\C^n, \omega_0)$. We point out that the analogue of this theorem in Riemannian geometry is clearly false: such a theorem would imply that every Riemannian manifold is flat. Conversely, Darboux' theorem also implies that it is not possible to define local invariants of symplectic manifolds that are analogues of the curvature of a Riemannian manifold. There are, however, examples of global invariants of symplectic manifolds, for example symplectic capacities. A \textbf{symplectic capacity} is a function $c$ that assigns to every symplectic manifold $(X,\omega)$ (in a restricted subclass of all symplectic manifolds) a number $c(X,\omega) \in [0,+\infty]$, satisfying \begin{description} \item[(Monotonicity)] If there exists a symplectic embedding (possibly in a restricted subset of all symplectic embeddings) $(X, \omega_X) \longrightarrow (Y, \omega_Y)$, then $c(X, \omega_X) \leq c(Y, \omega_Y)$; \item[(Conformality)] If $\alpha > 0$ then $c(X, \alpha \omega_X) = \alpha \, c(X, \omega_X)$. \end{description} By the monotonicity property, symplectic capacities are symplectomorphism invariants of symplectic manifolds. There are many examples of symplectic capacities, a simple one being the \textbf{volume capacity} $c_{\mathrm{vol}}$, defined as follows for a $2n$-dimensional symplectic manifold $(X, \omega)$. Since $\omega$ is nondegenerate, $\omega^n / n!$ is a volume form on $X$. Define \begin{IEEEeqnarray*}{rCl} \operatorname{vol}(X) & \coloneqq & \int_{X} \frac{\omega^n}{n!}, \\ c_{\mathrm{vol}}(X) & \coloneqq & \p{}{2}{\frac{\operatorname{vol}(X)}{\operatorname{vol}(B)}}^{1/n}, \end{IEEEeqnarray*} where $B = \{z \in \C^n \mid \pi |z|^2 \leq 1 \}$. Symplectic capacities are especially relevant when discussing symplectic embedding problems. Notice that by the monotonicity property, a symplectic capacity can provide an obstruction to the existence of a symplectic embedding. We provide an example from physics. A \textbf{classical mechanical system} is a symplectic manifold $(X, \omega)$ together with a function $H$ called the \textbf{Hamiltonian}. The \textbf{Hamiltonian vector field} of $H$ is the unique vector field $X_H$ on $X$ such that \begin{IEEEeqnarray*}{c} \edv H = - \iota_{X_H} \omega. \end{IEEEeqnarray*} Denote by $\phi^t_H$ the flow of $X_H$, which is a symplectomorphism. \textbf{Liouville's theorem} for a classical mechanical system says that for any subset $O \subset X$, the symplectic volume $c_{\mathrm{vol}}(\phi^t_H(O))$ is independent of $t$. The proof of this statement works for any capacity $c$ replacing the volume capacity. So, for every symplectic capacity we get a theorem analogous to Liouville's theorem, giving restrictions on what regions of the phase space flow onto other regions. In more generality, one could say that \textbf{a symplectic capacity is a quantitative encoding of some specific property of symplectic manifolds}. To make this statement less vague, let us mention some symplectic capacities we will be working with in this thesis. \begin{enumerate} \item If $(X, \omega)$ is a $2n$-dimensional symplectic manifold, a submanifold $L \subset (X, \omega)$ is \textbf{Lagrangian} if $\dim L = n$ and $\omega|_L = 0$. The \textbf{minimal symplectic area} of $L$ is given by \begin{IEEEeqnarray*}{c} A_{\mathrm{min}}(L) \coloneqq \inf \{ \omega(\sigma) \mid \sigma \in \pi_2(X,L), \, \omega(\sigma) > 0 \}. \end{IEEEeqnarray*} Cieliebak--Mohnke \cite[Section 1.2]{cieliebakPuncturedHolomorphicCurves2018} define the \textbf{Lagrangian capacity} of $(X, \omega)$ by \begin{IEEEeqnarray*}{c} c_L(X,\omega) \coloneqq \sup \{ A_{\mathrm{min}}(L) \mid L \subset X \text{ is an embedded Lagrangian torus}\}. \end{IEEEeqnarray*} \item If $(X, \lambda)$ is a nondegenerate \textbf{Liouville domain} (this implies that $X$ is a compact manifold with boundary together with a $1$-form $\lambda$ such that $(X, \edv \lambda)$ is symplectic, see \cref{def:liouville domain}), one can define its \textbf{$S^1$-equivariant symplectic homology}, denoted $\homology{}{S^1}{}{S}{H}{}{}(X,\lambda)$ (see \cref{sec:Floer homology}). This is a $\Q$-module which comes equipped with a filtration, i.e. for every $a \in \R$ we have a $\Q$-module $\homology{}{S^1}{}{S}{H}{a}{}(X,\lambda)$ and a map \begin{equation*} \iota^a \colon \homology{}{S^1}{}{S}{H}{a}{}(X,\lambda) \longrightarrow \homology{}{S^1}{}{S}{H}{}{}(X,\lambda). \end{equation*} In particular, we can define the $S^1$-equivariant symplectic homology associated to intervals $(a,b] \subset \R$ and $(a, +\infty) \subset \R$ by taking the quotient: \begin{IEEEeqnarray*}{rCl} \homology{}{S^1}{}{S}{H}{(a,b]}{}(X,\lambda) & \coloneqq & \homology{}{S^1}{}{S}{H}{b}{}(X,\lambda) / \iota^{b,a}(\homology{}{S^1}{}{S}{H}{a}{}(X,\lambda)), \\ \homology{}{S^1}{}{S}{H}{(a,+\infty)}{}(X,\lambda) & \coloneqq & \homology{}{S^1}{}{S}{H}{}{} (X,\lambda) / \iota^{a}(\homology{}{S^1}{}{S}{H}{a}{}(X,\lambda)). \end{IEEEeqnarray*} The \textbf{positive $S^1$-equivariant symplectic homology} is given by $\homology{}{S^1}{}{S}{H}{+}{}(X,\lambda) = \homology{}{S^1}{}{S}{H}{(\varepsilon, + \infty)}{}(X,\lambda)$, where $\varepsilon > $ is a small number. The $S^1$-equivariant symplectic homology also comes with maps $U$ and $\delta$, which can be composed to obtain the map \begin{equation*} \delta \circ U^{k-1} \circ \iota^a \colon \homology{}{S^1}{}{S}{H}{(\varepsilon,a]}{}(X) \longrightarrow H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q). \end{equation*} The $k$th \textbf{Gutt--Hutchings} capacity of $(X,\lambda)$ (\cite[Definition 4.1]{guttSymplecticCapacitiesPositive2018}) is given by \begin{IEEEeqnarray*}{c} \cgh{k}(X) \coloneqq \inf \{ a > 0 \mid [\mathrm{pt}] \otimes [X] \in \img (\delta \circ U^{k-1} \circ \iota^a) \}. \end{IEEEeqnarray*} \item Let $(X,\lambda)$ be a nondegenerate Liouville domain. There is a map \begin{equation*} \iota^{a,\varepsilon} \circ \alpha^{-1} \colon H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q) \longrightarrow \homology{}{S^1}{}{S}{H}{a}{}(X). \end{equation*} The $k$th $\textbf{$S^1$-equivariant symplectic homology capacity}$ was defined by Irie in \cite[Section 2.5]{irieSymplecticHomologyFiberwise2021}, and it is given by \begin{IEEEeqnarray*}{c} \csh{k}(X) \coloneqq \inf \{ a > 0 \mid \iota^{a,\varepsilon} \circ \alpha^{-1}([\C P^{k-1}] \otimes [X]) = 0 \}. \end{IEEEeqnarray*} \item Let $(X, \lambda)$ be a nondegenerate Liouville domain. Choose a point $x \in \itr X$ and a \textbf{symplectic divisor} (germ of a symplectic submanifold of codimension 2) $D \subset X$ through $x$. The boundary $(\partial X, \lambda|_{\partial X})$ is a \textbf{contact manifold} (\cref{def:contact manifold}) and therefore has a \textbf{Reeb vector field} (\cref{def:Reeb vector field}). The \textbf{completion} of $(X, \lambda)$ (\cref{def:completion of a Liouville domain}) is the exact symplectic manifold \begin{equation*} (\hat{X}, \hat{\lambda}) \coloneqq (X, \lambda) \cup_{\partial X} (\R_{\geq 0} \times \partial X, e^r \lambda|_{\partial X}). \end{equation*} Let $\mathcal{M}_X^J(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x}$ denote the moduli space of $J$-holomorphic curves in $\hat{X}$ which are positively asymptotic to the tuple of Reeb orbits $\Gamma = (\gamma_1, \ldots, \gamma_p)$ and which have contact order $k$ to $D$ at $x$. Finally, for $\ell, k \in \Z_{\geq 1}$, the \textbf{McDuff--Siegel} capacities of $(X,\lambda)$ (\cite[Definition 3.3.1]{mcduffSymplecticCapacitiesUnperturbed2022}) are given by \begin{IEEEeqnarray*}{c} \tilde{\mathfrak{g}}^{\leq \ell}_k(X) \coloneqq \sup_{J \in \mathcal{J}(X,D)} \mathop{\inf\vphantom{\mathrm{sup}}}_{\Gamma_1, \dots, \Gamma_p} \sum_{i=1}^{p} \mathcal{A}(\Gamma_i), \end{IEEEeqnarray*} where $\mathcal{J}(X,D)$ is a set of almost complex structures on $\hat{X}$ which are cylindrical at infinity and compatible with $D$ (see \cref{sec:moduli spaces of holomorphic curves}) and the infimum is over tuples of Reeb orbits $\Gamma_1, \ldots, \Gamma_p$ such that there exist $k_1, \ldots, k_p \in \Z_{\geq 1}$ with \begin{IEEEeqnarray*}{c+x*} \sum_{i=1}^{p} \# \Gamma_i \leq \ell, \qquad \sum_{i=1}^{p} k_i \geq k, \qquad \bigproduct_{i=1}^{p} \mathcal{M}_X^J(\Gamma_i)\p{<}{}{\mathcal{T}^{(k_i)}x} \neq \varnothing. \end{IEEEeqnarray*} \item Let $(X, \lambda)$ be a nondegenerate Liouville domain. If one assumes the existence of a suitable virtual perturbation scheme, one can define the \textbf{linearized contact homology} $\mathcal{L}_{\infty}$-algebra of $(X,\lambda)$, denoted $CC(X)[-1]$ (see \cref{def:l infinity algebra,def:linearized contact homology,def:lch l infinity}). We can then consider its \textbf{bar complex} $\mathcal{B}(CC(X)[-1])$ (see \cref{def:bar complex}) and the homology of the bar complex, $H(\mathcal{B}(CC(X)[-1]))$. There is an \textbf{augmentation map} (see \cref{def:augmentation map}) \begin{IEEEeqnarray*}{c+x*} {\epsilon}_k \colon \mathcal{B}(CC(X)[-1]) \longrightarrow \Q \end{IEEEeqnarray*} which counts $J$-holomorphic curves satisfying a tangency constraint. For $\ell, k \in \Z_{\geq 1}$, Siegel \cite[Section 6.1]{siegelHigherSymplecticCapacities2020} defines the \textbf{higher symplectic capacities} by\footnote{To be precise, the definition we give may be slightly different from the one given in \cite{siegelHigherSymplecticCapacities2020}. This is due to the fact that we use an action filtration to define $\mathfrak{g}^{\leq \ell}_k(X)$, while the definition given in \cite{siegelHigherSymplecticCapacities2020} uses coefficients in a Novikov ring. See \cref{rmk:novikov coefficients} for further discussion.} \begin{IEEEeqnarray*}{c} \mathfrak{g}^{\leq \ell}_k(X) \coloneqq \inf \{ a > 0 \mid \epsilon_k \colon H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell}(CC(X)[-1])) \longrightarrow \Q \text{ is nonzero} \}, \end{IEEEeqnarray*} where $\mathcal{A}^{\leq a}$ denotes the action filtration (\cref{def:action filtration lch}) and $\mathcal{B}^{\leq \ell}$ denotes the word length filtration (\cref{def:word length filtration}). \end{enumerate} The previous examples illustrate the fact that capacities can be defined using many tools that exist in symplectic geometry. If a capacity encodes a quantitative property between symplectic manifolds, then an inequality between two capacities encodes a relationship between said properties. So, capacities are also an efficient language to describe quantitative relations between properties of symplectic manifolds. Consider also that one can chain inequalities together to obtain new inequalities. In fact, one of the main goals of this thesis is to compute the Lagrangian capacity of convex or concave toric domains (a toric domain is a special type of Liouville domain, see \cref{def:toric domain}). We give two results in this direction (\cref{lem:computation of cl,thm:my main theorem}), and the proof of both results consists in composing together several inequalities between capacities (namely the capacities $\cgh{k}$, $\tilde{\mathfrak{g}}^{\leq 1}_k$ and $\mathfrak{g}^{\leq 1}_k$ which were defined above), where each of those inequalities is proven separately. Notice that in this case, we are able to compute the Lagrangian capacity of (some) toric domains, whose definition only concerns Lagrangian submanifolds, by considering other capacities whose definition concerns holomorphic curves in the toric domain. \section{Historical remarks} The first symplectic capacity, the \textbf{Gromov width}, was constructed by Gromov \cite{gromovPseudoHolomorphicCurves1985}, although at this time the nomenclature of ``symplectic capacity'' had not been introduced. The notion of symplectic capacity was first introduced by Ekeland--Hofer in \cite{ekelandSymplecticTopologyHamiltonian1989}. In the sequel \cite{ekelandSymplecticTopologyHamiltonian1990}, the authors define the \textbf{Ekeland--Hofer capacities} $c_k^{\mathrm{EH}}$ (for every $k \in \Z_{\geq 1}$) using variational techniques for the symplectic action functional. The \textbf{Hofer--Zehnder capacity} \cite{hoferNewCapacitySymplectic1990,hoferSymplecticInvariantsHamiltonian2011} is another example of a capacity which can be defined by considering Hamiltonian systems. One can consider \textbf{spectral capacities}, which are generally defined as a minimal or maximal action of an orbit (Hamiltonian or Reeb) which is ``topologically visible''. The Gutt--Hutchings capacities \cite{guttSymplecticCapacitiesPositive2018}, $S^1$-equivariant symplectic homology capacities \cite{irieSymplecticHomologyFiberwise2021}, and Siegel's higher symplectic capacities \cite{siegelHigherSymplecticCapacities2020} mentioned above are examples of this principle. Other authors have used constructions like this, namely Hofer \cite{hoferEstimatesEnergySymplectic1993}, Viterbo \cite{viterboSymplecticTopologyGeometry1992,viterboFunctorsComputationsFloer1999}, Schwarz \cite{schwarzActionSpectrumClosed2000}, Oh \cite{ohChainLevelFloer2002,ohMinimaxTheorySpectral2002,ohSpectralInvariantsLength2005}, Frauenfelder--Schlenk \cite{frauenfelderHamiltonianDynamicsConvex2007}, Schlenk \cite{schlenkEmbeddingProblemsSymplectic2008} and Ginzburg--Shon \cite{ginzburgFilteredSymplecticHomology2018}. Using embedded contact homology (ECH), Hutchings \cite{hutchingsQuantitativeEmbeddedContact2011} defines the \textbf{ECH capacities} $c_k^{\mathrm{ECH}}$ (for every $k \in \Z_{\geq 1}$). \section{Main results} As explained before, one of the main goals of this thesis is to compute the Lagrangian capacity of (some) toric domains. A \textbf{toric domain} is a Liouville domain of the form $X_{\Omega} \coloneqq \mu^{-1}(\Omega) \subset \C^n$, where $\Omega \subset \R^n_{\geq 0}$ and $\mu(z_1,\ldots,z_n) = \pi(|z_1|^2,\ldots,|z_n|^2)$. The \textbf{ball}, the \textbf{cylinder} and the \textbf{ellipsoid}, which are defined by \begin{IEEEeqnarray*}{rCrClCl} B^{2n}(a) & \coloneqq & \{ z & = & (z_1,\ldots,z_n) \in \C^n & \mid & \pi |z|^2 \leq a \}, \\ Z^{2n}(a) & \coloneqq & \{ z & = & (z_1,\ldots,z_n) \in \C^n & \mid & \pi |z_1|^2 \leq a \}, \\ E^{2n}(a_1,\ldots,a_n) & \coloneqq & \Big\{ z & = & (z_1,\ldots,z_n) \in \C^n & \Big| & \sum_{j=1}^{n} \frac{\pi |z_j|^2}{a_j} \leq 1 \Big\}, \end{IEEEeqnarray*} are examples of toric domains.\footnote{Strictly speaking, the cylinder is noncompact, so it is not a toric domain. We will mostly ignore this small discrepancy in nomenclature, but sometimes we will refer to spaces like the cylinder as ``noncompact toric domains''.} The \textbf{diagonal} of a toric domain $X_{\Omega}$ is \begin{IEEEeqnarray*}{c} \delta_\Omega \coloneqq \max \{ a \mid (a,\ldots,a) \in \Omega \}. \end{IEEEeqnarray*} It is easy to show (see \cref{lem:c square leq c lag,lem:c square geq delta}) that $c_L(X_\Omega) \geq \delta_\Omega$ for any convex or concave toric domain $X_{\Omega}$. Cieliebak--Mohnke give the following results for the Lagrangian capacity of the ball and the cylinder. \begin{copiedtheorem}[{\cite[Corollary 1.3]{cieliebakPuncturedHolomorphicCurves2018}}]{prp:cl of ball} The Lagrangian capacity of the ball is \begin{IEEEeqnarray*}{c+x*} c_L(B^{2n}(1)) = \frac{1}{n}.\footnote{In this introduction, we will be showcasing many results from the main text. The theorems appear here as they do on the main text, in particular with the same numbering. The numbers of the theorems in the introduction have hyperlinks to their corresponding location in the main text.} \end{IEEEeqnarray*} \end{copiedtheorem} \begin{copiedtheorem}[{\cite[p.~215-216]{cieliebakPuncturedHolomorphicCurves2018}}]{prp:cl of cylinder} The Lagrangian capacity of the cylinder is \begin{IEEEeqnarray*}{c+x*} c_L(Z^{2n}(1)) = 1. \end{IEEEeqnarray*} \end{copiedtheorem} In other words, if $X_{\Omega}$ is the ball or the cylinder then $c_L(X_{\Omega}) = \delta_\Omega$. This motivates the following conjecture by Cieliebak--Mohnke. \begin{copiedtheorem}[{\cite[Conjecture 1.5]{cieliebakPuncturedHolomorphicCurves2018}}]{conj:cl of ellipsoid} The Lagrangian capacity of the ellipsoid is \begin{equation*} c_L(E(a_1,\ldots,a_n)) = \p{}{2}{\frac{1}{a_1} + \cdots + \frac{1}{a_n}}^{-1}. \end{equation*} \end{copiedtheorem} A more general form of the previous conjecture is the following. \begin{copiedtheorem}{conj:the conjecture} If $X_{\Omega}$ is a convex or concave toric domain then \begin{IEEEeqnarray*}{c+x*} c_L(X_{\Omega}) = \delta_\Omega. \end{IEEEeqnarray*} \end{copiedtheorem} The goal of this project is to prove \cref{conj:the conjecture}. We will offer two main results in this direction. \begin{enumerate} \item In \cref{lem:computation of cl}, we prove that $c_L(X_\Omega) = \delta_\Omega$ whenever $X_{\Omega}$ is convex and $4$-dimensional. \item In \cref{thm:my main theorem}, using techniques from contact homology we prove that $c_L(X_\Omega) = \delta_\Omega$ for any convex or concave toric domain $X_{\Omega}$. More specifically, in this case we are working under the assumption that there is a virtual perturbation scheme such that the linearized contact homology of a nondegenerate Liouville domain can be defined (see \cref{sec:assumptions of virtual perturbation scheme}). \end{enumerate} Notice that by the previous discussion, we only need to prove the hard inequality $c_L(X_{\Omega}) \leq \delta_\Omega$. We now describe our results concerning the capacities mentioned so far. The key step in proving $c_L(X_{\Omega}) \leq \delta_\Omega$ is the following inequality between $c_L$ and $\tilde{\mathfrak{g}}^{\leq 1}_k$. \begin{copiedtheorem}{thm:lagrangian vs g tilde} If $(X, \lambda)$ is a Liouville domain then \begin{IEEEeqnarray*}{c+x*} c_L(X) \leq \inf_k^{} \frac{\tilde{\mathfrak{g}}_k^{\leq 1}(X)}{k}. \end{IEEEeqnarray*} \end{copiedtheorem} Indeed, this result can be combined with the following results from \cite{mcduffSymplecticCapacitiesUnperturbed2022} and \cite{guttSymplecticCapacitiesPositive2018}. \begin{copiedtheorem}[{\cite[Proposition 5.6.1]{mcduffSymplecticCapacitiesUnperturbed2022}}]{prp:g tilde and cgh} If $X_{\Omega}$ is a $4$-dimensional convex toric domain then \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq 1}_k(X_\Omega) = \cgh{k}(X_\Omega). \end{IEEEeqnarray*} \end{copiedtheorem} \begin{copiedtheorem}[{\cite[Lemma 1.19]{guttSymplecticCapacitiesPositive2018}}]{lem:cgh of nondisjoint union of cylinders} $\cgh{k}(N^{2n}(\delta)) = \delta \, (k + n - 1)$. \end{copiedtheorem} Here, \begin{IEEEeqnarray*}{c} N^{2n}(\delta) \coloneqq \p{c}{2}{ (z_1,\ldots,z_n) \in \C^n \ \Big| \ \exists j=1,\ldots,n \colon \frac{\pi |z_j|^2}{\delta} \leq 1 } \end{IEEEeqnarray*} is the \textbf{nondisjoint union of cylinders}. Combining the three previous results, we get the following particular case of \cref{conj:the conjecture}. Since the proof is short, we present it here as well. \begin{copiedtheorem}{lem:computation of cl} If $X_{\Omega}$ is a $4$-dimensional convex toric domain then \begin{IEEEeqnarray*}{c+x*} c_L(X_{\Omega}) = \delta_\Omega. \end{IEEEeqnarray*} \end{copiedtheorem} \begin{proof} For every $k \in \Z_{\geq 1}$, \begin{IEEEeqnarray*}{rCls+x*} \delta_\Omega & \leq & c_L(X_{\Omega}) & \quad [\text{by \cref{lem:c square geq delta,lem:c square leq c lag}}] \\ & \leq & \frac{\tilde{\mathfrak{g}}^{\leq 1}_{k}(X_{\Omega})}{k} & \quad [\text{by \cref{thm:lagrangian vs g tilde}}] \\ & = & \frac{\cgh{k}(X_{\Omega})}{k} & \quad [\text{by \cref{prp:g tilde and cgh}}] \\ & \leq & \frac{\cgh{k}(N(\delta_\Omega))}{k} & \quad [\text{$X_{\Omega}$ is convex, hence $X_{\Omega} \subset N(\delta_\Omega)$}] \\ & = & \frac{\delta_\Omega(k+1)}{k} & \quad [\text{by \cref{lem:cgh of nondisjoint union of cylinders}}]. \end{IEEEeqnarray*} The result follows by taking the infimum over $k$. \end{proof} Notice that in the proof of this result, we used the Gutt--Hutchings capacities because the value $\cgh{k}(N^{2n}(\delta))$ is known and provides the desired upper bound for $c_L(X_{\Omega})$. Notice also that the hypothesis of the toric domain being convex and $4$-dimensional is present because we wish to use \cref{prp:g tilde and cgh} to compare $\tilde{\mathfrak{g}}^{\leq 1}_k$ and $\cgh{k}$. This suggests that we try to compare $c_L$ and $\cgh{k}$ directly. \begin{copiedtheorem}{thm:main theorem} If $X$ is a Liouville domain, $\pi_1(X) = 0$ and $c_1(TX)|_{\pi_2(X)} = 0$, then \begin{equation*} c_L(X,\lambda) \leq \inf_k \frac{\cgh{k}(X,\lambda)}{k}. \end{equation*} \end{copiedtheorem} We will try to prove \cref{thm:main theorem} by mimicking the proof of \cref{thm:lagrangian vs g tilde}. Unfortunately we will be unsuccessful, because we run into difficulties coming from the fact that in $S^1$-equivariant symplectic homology, the Hamiltonians and almost complex structures can depend on the domain and on a high dimensional sphere $S^{2N+1}$. Before we move on to the discussion about computations using contact homology, we show one final result which uses only the properties of $S^1$-equivariant symplectic homology. \begin{copiedtheorem}{thm:ghc and s1eshc} If $(X, \lambda)$ is a Liouville domain, then \begin{enumerate} \item $\cgh{k}(X) \leq \csh{k}(X)$; \item $\cgh{k}(X) = \csh{k}(X)$ provided that $X$ is star-shaped. \end{enumerate} \end{copiedtheorem} We now present another approach that can be used to compute $c_L$, using linearized contact homology. This has the disadvantage that at the time of writing, linearized contact homology has not yet been defined in the generality that we need (see \cref{sec:assumptions of virtual perturbation scheme} and more specifically \cref{assumption}). Using linearized contact homology, one can define the higher symplectic capacities $\mathfrak{g}^{\leq \ell}_k$. The definition of $\mathfrak{g}^{\leq \ell}_k$ for any $\ell \in \Z_{\geq 1}$ relies on the $\mathcal{L}_{\infty}$-algebra structure of the linearized contact homology chain complex, as well as an $\mathcal{L}_{\infty}$-augmentation map $\epsilon_k$. However, to prove that $c_L(X_{\Omega}) \leq \delta_\Omega$, we will only need the capacity $\mathfrak{g}^{\leq 1}_k$, and for this the $\mathcal{L}_{\infty}$-algebra structure is not necessary. The key idea is that the capacities $\mathfrak{g}^{\leq 1}_k$ can be compared to $\tilde{\mathfrak{g}}^{\leq 1}_k$ and $\cgh{k}$. \begin{copiedtheorem}[{\cite[Section 3.4]{mcduffSymplecticCapacitiesUnperturbed2022}}]{thm:g tilde vs g hat} If $X$ is a Liouville domain then \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq \ell}_k(X) \leq {\mathfrak{g}}^{\leq \ell}_k(X). \end{IEEEeqnarray*} \end{copiedtheorem} \begin{copiedtheorem}{thm:g hat vs gh} If $X$ is a Liouville domain such that $\pi_1(X) = 0$ and $2 c_1(TX) = 0$ then \begin{IEEEeqnarray*}{c+x*} {\mathfrak{g}}^{\leq 1}_k(X) = \cgh{k}(X). \end{IEEEeqnarray*} \end{copiedtheorem} These two results show that $\tilde{\mathfrak{g}}^{\leq 1}_k(X_\Omega) \leq \cgh{k}(X_\Omega)$ (under \cref{assumption}). Using the same proof as before, we conclude that $c_L(X_{\Omega}) = \delta_\Omega$. \begin{copiedtheorem}{thm:my main theorem} Under \cref{assumption}, if $X_\Omega$ is a convex or concave toric domain then \begin{IEEEeqnarray*}{c+x*} c_L(X_{\Omega}) = \delta_\Omega. \end{IEEEeqnarray*} \end{copiedtheorem} \section{Proof sketches} In the last section, we explained our proof of $c_L(X_{\Omega}) = \delta_\Omega$ (first in the case where $X_{\Omega}$ is convex and $4$-dimensional, and second assuming that \cref{assumption} holds). In this section, we explain the proofs of the relations \begin{IEEEeqnarray*}{rCls+x*} c_L(X) & \leq & \inf_k \frac{\tilde{\mathfrak{g}}^{\leq 1}_k(X)}{k}, \\ \tilde{\mathfrak{g}}^{\leq \ell}_k(X) & \leq & \mathfrak{g}^{\leq \ell}_k(X), \\ \mathfrak{g}_k^{\leq 1}(X) & = & \cgh{k}(X), \end{IEEEeqnarray*} which were mentioned without proof in the last section. Each of these relations will be proved in the main text, so the proof sketches of this section act as a way of showcasing what technical tools will be required for our purposes. In \cref{sec:symplectic capacities}, we study the question of extending the domain of a symplectic capacities from the class of nondegenerate Liouville domains to the class of Liouville domains which are possibly degenerate. By this discussion, it suffices to prove each theorem for nondegenerate Liouville domains only. \begin{secondcopy}{thm:lagrangian vs g tilde} If $(X, \lambda)$ is a Liouville domain then \begin{IEEEeqnarray*}{c+x*} c_L(X) \leq \inf_k^{} \frac{\tilde{\mathfrak{g}}_k^{\leq 1}(X)}{k}. \end{IEEEeqnarray*} \end{secondcopy} \begin{proof}[Proof sketch] Let $k \in \Z_{\geq 1}$ and $L \subset \itr X$ be an embedded Lagrangian torus. Denote $a \coloneqq \tilde{\mathfrak{g}}_k^{\leq 1}(X)$. We wish to show that there exists $\sigma \in \pi_2(X,L)$ such that $0 < \omega(\sigma) \leq a / k$. Choose a suitable Riemannian metric on $L$, given by \cref{lem:geodesics lemma CM abs} (which is a restatement of \cite[Lemma 2.2]{cieliebakPuncturedHolomorphicCurves2018}). Now, consider the unit cotangent bundle $S^* L$ of $L$. Choose a point $x$ inside the unit codisk bundle $D^* L$, a symplectic divisor $D$ through $x$, and a sequence $(J_t)_{t \in [0,1)}$ of almost complex structures on $\hat{X}$ realizing SFT neck stretching along $S^* L$. By definition of $\tilde{\mathfrak{g}}_k^{\leq 1}(X) \eqqcolon a$, there exists a Reeb orbit $\gamma_0$ together with a sequence $(u_t)_t$ of $J_t$-holomorphic curves $u_t \in \mathcal{M}^{J_t}_X(\gamma_0)\p{<}{}{\mathcal{T}^{(k)}x}$. By the SFT-compactness theorem, the sequence $(u_t)_{t}$ converges to a holomorphic building $F = (F^1,\ldots,F^N)$, where each $F^{\nu}$ is a holomorphic curve. Denote by $C$ the component of $F^1 \subset T^* L$ which carries the tangency constraint. The choices of almost complex structures $J_t$ can be done in such a way that the simple curve corresponding to $C$ is regular, i.e. it is an element of a moduli space which is a manifold. Using the dimension formula for this moduli space, it is possible to conclude that $C$ must have at least $k + 1$ punctures (see \cref{thm:transversality with tangency,lem:punctures and tangency simple,lem:punctures and tangency}). This implies that $C$ gives rise to at least $k > 0$ disks $D_1, \ldots, D_k$ in $X$ with boundary on $L$. The total energy of the disks is less or equal to $a$. Therefore, one of the disks must have energy less or equal to $a/k$. We now address a small imprecision in the proof we just described. We need to show that $\omega(D_i) \leq a$ for some $i = 1, \ldots, k$. However, the above proof actually shows that $\tilde{\omega}(D_i) \leq a$, where $\tilde{\omega}$ is a piecewise smooth $2$-form on $\hat{X} \setminus L$ given as in \cref{def:energy of a asy cylindrical holomorphic curve}. This form has the property that $\omega = \tilde{\omega}$ outside $S^* L$. The solution then is to neck stretch along $S_{\delta}^* L$ for some small $\delta > 0$. In this case, one can bound $\omega(D_i)$ by $\tilde{\omega}(D_i)$ times a function of $\delta$ (see \cref{lem:energy wrt different forms}), and we can still obtain the desired bound for $\omega(D_i)$. \end{proof} \begin{secondcopy}[\cite[Section 3.4]{mcduffSymplecticCapacitiesUnperturbed2022}]{thm:g tilde vs g hat} If $X$ is a Liouville domain then \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq \ell}_k(X) \leq {\mathfrak{g}}^{\leq \ell}_k(X). \end{IEEEeqnarray*} \end{secondcopy} \begin{proof}[Proof sketch] Choose a point $x \in \itr X$ and a symplectic divisor $D$ through $x$. Let $J \in \mathcal{J}(X,D)$ and consider the bar complex $\mathcal{B}(CC(X)[-1])$, computed with respect to $J$. Suppose that $a > 0$ and $\beta \in H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell}(CC(X)[-1]))$ is such that $\epsilon_k(\beta) \neq 0$. By \cref{thm:g tilde two definitions}, \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq \ell}_k(X) = \sup_{J \in \mathcal{J}(X,D)} \mathop{\inf\vphantom{\mathrm{sup}}}_{\Gamma} \mathcal{A}(\Gamma), \end{IEEEeqnarray*} where the infimum is taken over tuples of Reeb orbits $\Gamma = (\gamma_1, \ldots, \gamma_p)$ such that $p \leq \ell$ and $\overline{\mathcal{M}}^{J}_{X}(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x} \neq \varnothing$. The class $\beta$ is a linear combination of words of Reeb orbits $\Gamma$ such that $\# \Gamma \leq \ell$ and $\mathcal{A}(\Gamma) \leq a$. Since $\epsilon_k(\beta) \neq 0$, one of the words in this linear combination, say $\Gamma$, is such that the virtual count of $\overline{\mathcal{M}}^{J}_{X}(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x}$ is nonzero. By assumption on the virtual perturbation scheme, $\overline{\mathcal{M}}^{J}_{X}(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x}$ is nonempty, which is the condition in the definition of $\tilde{\mathfrak{g}}^{\leq \ell}_k(X)$. \end{proof} \begin{secondcopy}{thm:g hat vs gh} If $X$ is a Liouville domain such that $\pi_1(X) = 0$ and $2 c_1(TX) = 0$ then \begin{IEEEeqnarray*}{c+x*} {\mathfrak{g}}^{\leq 1}_k(X) = \cgh{k}(X). \end{IEEEeqnarray*} \end{secondcopy} \begin{proof}[Proof sketch] Choose a small ellipsoid $E$ such that there exists a strict exact symplectic embedding $\phi \colon E \longrightarrow X$. There are associated Viterbo transfer maps (see \cref{sec:viterbo transfer map of liouville embedding,sec:viterbo transfer map of exact symplectic embedding}, where we define the Viterbo transfer map of $S^1$-equivariant symplectic homology) \begin{IEEEeqnarray*}{rCls+x*} \phi_!^{S^1} \colon \homology{}{S^1}{}{S}{H}{}{}(X) & \longrightarrow & \homology{}{S^1}{}{S}{H}{}{}(E), \\ \phi_! \colon CH(X) & \longrightarrow & CH(E). \end{IEEEeqnarray*} Because of the topological conditions on $X$, the $S^1$-equivariant symplectic homology and the linearized contact homology have $\Z$-gradings given by the Conley--Zehnder index. In this context, one can offer an alternative definition of the Gutt--Hutchings capacities via the Viterbo transfer map, namely $\cgh{k}(X)$ is the infimum over $a$ such that the map \begin{equation*} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{(\varepsilon,a]}{n - 1 + 2k}(X) \ar[r, "\iota^{S^1,a}"] & \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(X) \ar[r, "\phi_!^{S^1}"] & \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(E) \end{tikzcd} \end{equation*} is nonzero (see \cref{def:ck alternative}). Bourgeois--Oancea \cite{bourgeoisEquivariantSymplecticHomology2016} define an isomorphism \begin{IEEEeqnarray*}{c+x*} \Phi_{\mathrm{BO}} \colon \homology{}{S^1}{}{S}{H}{+}{}(X) \longrightarrow CH(X) \end{IEEEeqnarray*} between positive $S^1$-equivariant symplectic homology and linearized symplectic homology (whenever the latter is defined). All the maps we have just described assemble into the following commutative diagram. \begin{equation*} \begin{tikzcd} SH^{S^1,(\varepsilon,a]}_{n - 1 + 2k}(X) \ar[r, "\iota^{S^1,a}"] \ar[d, hook, two heads, swap, "\Phi_{\mathrm{BO}}^a"] & SH^{S^1,+}_{n - 1 + 2k}(X) \ar[r, "\phi_!^{S^1}"] \ar[d, hook, two heads, "\Phi_{\mathrm{BO}}"] & SH^{S^1,+}_{n - 1 + 2k}(E) \ar[d, hook, two heads, "\Phi_{\mathrm{BO}}"] \\ CH^{a}_{n - 1 + 2k}(X) \ar[r, "\iota^{a}"] \ar[d, equals] & CH_{n - 1 + 2k}(X) \ar[r, "\phi_!"] \ar[d, equals] & CH_{n - 1 + 2k}(E) \ar[d, "{\epsilon}^E_k"] \\ CH^{a}_{n - 1 + 2k}(X) \ar[r, swap, "\iota^{a}"] & CH_{n - 1 + 2k}(X) \ar[r, swap, "{\epsilon}_k^X"] & \Q \end{tikzcd} \end{equation*} Here, the vertical arrows between the top two rows are the Bourgeois--Oancea isomorphism and the maps $\epsilon_k^X$ and $\epsilon_k^E$ are the augmentation maps of $X$ and $E$. Using this information, we can show that $\cgh{k}(X) \leq \mathfrak{g}^{\leq 1}_k(X)$: \begin{IEEEeqnarray*}{rCls+x*} \cgh{k}(X) & = & \inf \{ a > 0 \mid \phi_!^{S^1} \circ \iota^{S^1,a} \neq 0 \} & \quad [\text{by the alternative definition of $\cgh{k}$}] \\ & \leq & \inf \{ a > 0 \mid {\epsilon}_k^X \circ \iota^{a} \neq 0 \} & \quad [\text{since the diagram commutes}] \\ & = & {\mathfrak{g}}^{\leq 1}_k(X) & \quad [\text{by definition of $\mathfrak{g}^{\leq 1}_k$}]. \end{IEEEeqnarray*} In this computation, the inequality in the second line is an equality if $\epsilon^E_k$ is an isomorphism. The proof of this statement is done in \cref{sec:augmentation map of an ellipsoid}, using the techniques from \cref{sec:cr operators,sec:functional analytic setup}. The key ideas are the following. One can show that $CH_{n - 1 + 2k}(E) \cong \Q$ (see \cref{lem:lch of ellipsoid}), and therefore it is enough to show that $\epsilon_k^E$ is nonzero. Recall that $\epsilon_k^E$ is given by the virtual count of holomorphic curves in $X$ satisfying a tangency constraint. We count those curves explicitly in \cref{lem:moduli spaces of ellipsoids have 1 element}. Notice that here we need to justify that the virtual count of curves equals the usual signed count. This follows by assumption on the virtual perturbation scheme and because in \cref{sec:augmentation map of an ellipsoid}, we also show that the moduli spaces are transversely cut out. \end{proof} \section{Outline of the thesis} We now give a chapter by chapter outline of this thesis. In \textbf{\cref{chp:symplectic manifolds}} we review the various types of manifolds that will show up in this thesis, i.e. symplectic manifolds and contact manifolds. We talk about the various types of vector fields in these manifolds (Hamiltonian vector field, Liouville vector field, Reeb vector field) and mention the properties of their flows. We give the definition of special types of symplectic manifolds, from less to more specific: Liouville domains, star-shaped domains, toric domains. Finally, we explain two constructions which will be present throughout: the symplectization of a contact manifold, and the completion of a Liouville domain. In \textbf{\cref{chp:indices}} we give a review of the Conley--Zehnder indices. In order to list the properties of the Conley--Zehnder index, one needs to mention the Maslov index and the first Chern class, so we offer a review of those as well. We explain how to define the Conley--Zehnder index of an orbit in a symplectic or contact manifold by defining an induced path of symplectic matrices via a trivialization. Finally, we study the Conley--Zehnder index of a Reeb orbit in a unit cotangent bundle. The Conley--Zehnder index is needed for our purposes because it provides the grading of $S^1$-equivariant symplectic homology and of linearized contact homology. \textbf{\cref{chp:holomorphic curves}} is about the analytic properties of holomorphic curves and Floer trajectories. We define punctured Riemann surfaces as the domains for such curves, and symplectic cobordisms as the targets for such curves. We prove the energy identity for holomorphic curves, as well as the maximum principle. Then, we discuss the known compactness and transversality for moduli spaces of asymptotically cylindrical holomorphic curves (these are the moduli spaces which are considered in linearized contact homology). The second half of this chapter is about solutions of the ``parametrized Floer equation'' (solutions to this equation are the trajectories which are counted in the differential of $S^1$-equivariant Floer chain complex). We prove an energy inequality for Floer trajectories, as well as three ``confinement lemmas'': the maximum principle, the asymptotic behaviour lemma, and the no escape lemma. Finally, we prove compactness and transversality for moduli spaces of solutions of the parametrized Floer equation using the corresponding results for moduli spaces of solutions of the Floer equation. In \textbf{\cref{chp:floer}} we define the $S^1$-equivariant symplectic homology and establish its structural properties. First we define the $S^1$-equivariant Floer chain complex and its homology. The $S^1$-equivariant symplectic homology is then defined by taking the limit with respect to an increasing sequence of Hamiltonians of the $S^1$-equivariant Floer homology. We devote two sections to showing that $S^1$-equivariant symplectic homology is a functor, which amounts to defining the Viterbo transfer maps and proving their properties. Finally, we define a $\delta$ map, which enters the definition of the Gutt--Hutchings capacities. \textbf{\cref{chp:symplectic capacities}} is about symplectic capacities. The first section is about generalities about symplectic capacities. We show how to extend a capacity of nondegenerate Liouville domains to a capacity of (possibly degenerate) Liouville domains. The next three sections are each devoted to defining and proving the properties of a specific capacity, namely the Lagrangian capacity $c_L$, the Gutt--Hutchings capacities $\cgh{k}$ and the $S^1$-equivariant symplectic homology capacities $\csh{k}$, and finally the McDuff--Siegel capacities $\tilde{\mathfrak{g}}^{\leq \ell}_k$. In the section about the Lagrangian capacity, we also state the conjecture that we will try to solve in the remainder of the thesis, i.e. $c_L(X_{\Omega}) = \delta_\Omega$ for a convex or concave toric domain $X_{\Omega}$. The final section is devoted to computations. We show that $c_L(X) \leq \inf_k^{} \tilde{\mathfrak{g}}^{\leq 1}_k(X) / k$. We use this result to prove the conjecture in the case where $X_{\Omega}$ is $4$-dimensional and convex. \textbf{\cref{chp:contact homology}} introduces the linearized contact homology of a nondegenerate Liouville domain. The idea is that using the linearized contact homology, one can define the higher symplectic capacities, which will allow us to prove $c_L(X_{\Omega}) = \delta_\Omega$ for any convex or concave toric domain $X_{\Omega}$ (but under the assumption that linearized contact homology and the augmentation map are well-defined). We give a review of real linear Cauchy--Riemann operators on complex vector bundles, with a special emphasis on criteria for surjectivity in the case where the bundle has complex rank $1$. We use this theory to prove that moduli spaces of curves in ellipsoids are transversely cut out and in particular that the augmentation map of an ellipsoid is an isomorphism. The final section is devoted to computations. We show that $\mathfrak{g}^{\leq 1}_k(X) = \cgh{k}(X)$, and use this result to prove our conjecture (again, under \cref{assumption}). \chapter{Symplectic and contact manifolds} \label{chp:symplectic manifolds} \section{Symplectic manifolds} In this section, we recall some basics about symplectic manifolds. \begin{definition} \label{def:symplectic manifold} A \textbf{symplectic manifold} is a manifold $X$ together with a $2$-form $\omega$ which is closed and nondegenerate. In this case we say that $\omega$ is a \textbf{symplectic form}. An \textbf{exact symplectic manifold} is a manifold $X$ together with a $1$-form $\lambda$ such that $\omega = \edv \lambda$ is a symplectic form. In this case we call $\lambda$ a \textbf{symplectic potential} for $\omega$. \end{definition} \begin{example} \label{exa:cn symplectic} Consider $\C^n$ with coordinates $(x^1, \ldots, x^n, y^1, \ldots, y^n)$, where $z^j = x^j + i y^j$ for every $j = 1, \ldots, n$. We define \begin{IEEEeqnarray*}{rCls+x*} \lambda & \coloneqq & \frac{1}{2} \sum_{j=1}^{n} (x^j \edv y^j - y^j \edv x^j), \\ \omega & \coloneqq & \edv \lambda = \sum_{j=1}^{n} \edv x^j \wedge \edv y^j. \end{IEEEeqnarray*} Then, $(\C^n, \lambda)$ is an exact symplectic manifold. \end{example} \begin{example} \label{exa:cotangent bundle} Let $L$ be a manifold and consider the \textbf{cotangent bundle} of $L$, which is a vector bundle $\pi \colon T^*L \longrightarrow L$. As a set, $T^*L = \bigunion_{q \in L}^{} T^*_qL$. As a vector bundle, $T^*L$ is given as follows. For each coordinate chart $(U,q^1,\ldots,q^n)$ on $L$, there is a coordinate chart $(\pi ^{-1}(U),q^1 \circ \pi,\ldots,q^n \circ \pi,p_1,\ldots,p_n)$ on $T^*L$, where the $p_i$ are given by \begin{IEEEeqnarray*}{c} p_i(u) \coloneqq u \p{}{2}{ \pdv{}{q^i} \Big|_{\pi(u)} } \end{IEEEeqnarray*} for $u \in T^*L$. For simplicity, denote $q^i = q^i \circ \pi$. Define a 1-form $\lambda$ on $T^*L$, called the \textbf{canonical symplectic potential} or \textbf{Liouville $1$-form}, as follows. For each $u \in T^*L$, the linear map $\lambda _{u} \colon T _{u} T^*L \longrightarrow \R$ is given by $\lambda_{u} \coloneqq u \circ \dv \pi(u)$. The form $\omega \coloneqq \edv \lambda$ is the \textbf{canonical symplectic form}. In coordinates, \begin{IEEEeqnarray*}{rCls+x*} \lambda & = & \sum_{i=1}^{n} p_i \edv q^i, \\ \omega & = & \sum_{i=1}^{n} \edv p_i \wedge \edv q^i. \end{IEEEeqnarray*} Then, $(T^*L,\lambda)$ is an exact symplectic manifold. \end{example} If $(X, \omega)$ is a symplectic manifold, then using symplectic linear algebra we conclude that $X$ must be even dimensional, i.e. $\dim X = 2n$ for some $n$ (see for example \cite[Theorem 1.1]{silvaLecturesSymplecticGeometry2008}). In particular, $\omega^n$ is a volume form on $X$. \begin{definition} \label{def:types of embeddings} Let $(X,\omega_X)$, $(Y,\omega_Y)$ be symplectic manifolds and $\varphi \colon X \longrightarrow Y$ be an embedding. Then, $\varphi$ is \textbf{symplectic} if $\varphi^* \omega_Y = \omega_X$. A \textbf{symplectomorphism} is a symplectic embedding which is a diffeomorphism. We say that $\varphi$ is \textbf{strict} if $\varphi(X) \subset \itr Y$. If $(X,\lambda_X)$, $(Y,\lambda_Y)$ are exact, then we say that $\varphi$ is: \begin{enumerate} \item \label{def:types of embeddings 1} \textbf{symplectic} if $\varphi^* \lambda_Y - \lambda_X$ is closed (this is equivalent to the previous definition); \item \label{def:types of embeddings 2} \textbf{generalized Liouville} if $\varphi^* \lambda_Y - \lambda_X$ is closed and $(\varphi^* \lambda_Y - \lambda_X)|_{\partial X}$ is exact; \item \label{def:types of embeddings 3} \textbf{exact symplectic} if $\varphi^* \lambda_Y - \lambda_X$ is exact; \item \label{def:types of embeddings 4} \textbf{Liouville} if $\varphi^* \lambda_Y - \lambda_X = 0$. \end{enumerate} \end{definition} \begin{remark} \label{rmk:closed equivalent to exact} In the context of \cref{def:types of embeddings}, if $H^1_{\mathrm{dR}}(X) = 0$ then \ref{def:types of embeddings 1} $\Longleftrightarrow$ \ref{def:types of embeddings 2} $\Longleftrightarrow$ \ref{def:types of embeddings 3}. \end{remark} \begin{remark} The composition of generalized Liouville embeddings is not necessarily a generalized Liouville embedding. This means that exact symplectic manifolds together with generalized Liouville embeddings do not form a category. \end{remark} \begin{definition} Let $(X,\omega)$ be a symplectic manifold of dimension $2n$ and $\iota \colon L \longrightarrow X$ be an immersed submanifold of dimension $n$. Then, $L$ is \textbf{Lagrangian} if $\iota^* \omega = 0$. If $(X,\lambda)$ is exact, then we say that $L$ is: \begin{enumerate} \item \textbf{Lagrangian} if $\iota^* \lambda$ is closed (this is equivalent to the previous definition); \item \textbf{exact Lagrangian} if $\iota^* \lambda$ is exact. \end{enumerate} \end{definition} \begin{example} Let $L$ be a manifold and consider its cotangent bundle, $T^*L$. Then, the zero section $z \colon L \longrightarrow T^*L$ is an exact Lagrangian. In fact, $z^* \lambda = 0$. \end{example} \begin{lemma}[Moser's trick] \label{lem:mosers trick} Let $X$ be a manifold, $\alpha_t$ be a smooth $1$-parameter family of forms on $X$ and $Y_t$ be a complete time dependent vector field on $X$ with flow $\phi_t$. Then, \begin{equation*} \phi^*_t \alpha_t^{} - \alpha_0^{} = \int_{0}^{t} \phi^*_s \p{}{1}{ \dot{\alpha}_s + \ldv{Y_s} \alpha_s } \edv s = \int_{0}^{t} \phi^*_s \p{}{1}{ \dot{\alpha}_s + \edv \iota _{Y_s} \alpha_s + \iota _{Y_s} \edv \alpha_s } \edv s. \end{equation*} \end{lemma} \begin{proof} \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\phi^*_t \alpha_t^{} - \alpha_0^{}}\\ \quad & = & \phi^*_t \alpha_t^{} - \phi^*_0 \alpha_0^{} & \quad [\text{since $\phi_0 = \id$}] \\ & = & \int_{0}^{t} \odv{}{s} \phi^*_s \alpha_s \, \edv s & \quad [\text{by the fundamental theorem of calculus}] \\ & = & \int_{0}^{t} \phi^*_s \p{}{1}{ \dot{\alpha}_s + \ldv{Y_s} \alpha_s } \edv s & \quad [\text{by definition of Lie derivative}] \\ & = & \int_{0}^{t} \phi^*_s \p{}{1}{ \dot{\alpha}_s + \edv \iota _{Y_s} \alpha_s + \iota _{Y_s} \edv \alpha_s } \edv s & \quad [\text{by the Cartan magic formula}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{theorem}[Darboux] Let $(X,\omega)$ be a symplectic manifold. Then, for every $p \in X$, there exists a coordinate neighbourhood $(U,x^1,\ldots,x^n,y^1,\ldots,y^n)$ of $p$ such that \begin{equation*} \omega = \sum_{i=1}^{n} \edv x^i \wedge \edv y^i. \end{equation*} \end{theorem} \begin{proof} Taking a coordinate chart on $X$, it is enough to assume that $\omega_0$, $\omega_1$ are symplectic forms on a neighbourhood of $0$ in $\C^n$ and to prove that there exists a local diffeomorphism $\phi$ of $\C^n$ such that $\phi^* \omega_1 = \omega_0$. Choosing the initial coordinate chart carefully, we may assume in addition that $\omega_j$ has a primitive $\lambda_j$, i.e. $\omega_j = \edv \lambda_j$, for $j = 0, 1$, and also that $\omega_0$ and $\omega_1$ are equal at $0 \in \C$, i.e. $\omega_0|_0 = \omega_1|_0$. Let \begin{IEEEeqnarray*}{rCls+x*} \lambda_t & \coloneqq & \lambda_0 + t (\lambda_1 - \lambda_0), \\ \omega_t & \coloneqq & \edv \omega_t = \omega_0 + t (\omega_1 - \omega_0). \end{IEEEeqnarray*} Since $\omega_t|_0 = \omega_0|_0$ is symplectic, possibly after passing to a smaller neighbourhood of $0$ we may assume that $\omega_t$ is symplectic. Let $Y_t$ be the unique time-dependent vector field such that $\dot{\lambda}_t + \iota_{Y_t} \omega_t = 0$ and denote by $\phi_t$ the flow of $Y_t$. Then, \begin{IEEEeqnarray*}{rCls+x*} \phi^*_t \omega_t^{} - \omega_0^{} & = & \int_{0}^{t} \phi^*_s \p{}{}{ \dot{\omega}_s + \edv \iota _{Y_s} \omega_s + \iota _{Y_s} \edv \omega_s } \edv s & \quad [\text{by Moser's trick (\cref{lem:mosers trick})}] \\ & = & \int_{0}^{t} \phi^*_s \edv \p{}{}{ \dot{\lambda}_s + \edv \iota _{Y_s} \omega_s } \edv s & \quad [\text{since $\omega_t = \edv \lambda_t$}] \\ & = & 0 & \quad [\text{by definition of $Y_t$}], \end{IEEEeqnarray*} which shows that $\phi_1$ is the desired local diffeomorphism. \end{proof} \begin{definition} \label{def:liouville vf} If $(X,\lambda)$ is an exact symplectic manifold, then the \textbf{Liouville vector field} of $(X,\lambda)$ is the unique vector field $Z$ such that \begin{IEEEeqnarray*}{c} \lambda = \iota_Z \omega. \end{IEEEeqnarray*} \end{definition} \begin{lemma} \label{lem:liouville vf} The Liouville vector field satisfies \begin{IEEEeqnarray*}{c} \ldv{Z} \lambda = \lambda. \end{IEEEeqnarray*} \end{lemma} \begin{proof} \begin{IEEEeqnarray*}{rCls+x*} \ldv{Z} \lambda & = & \edv \iota_Z \lambda + \iota_Z \edv \lambda & \quad [\text{by the Cartan magic formula}] \\ & = & \edv \iota_Z \lambda + \iota_Z \omega & \quad [\text{since $\omega = \edv \lambda$}] \\ & = & \edv \iota_Z \iota_Z \omega + \lambda & \quad [\text{by definition of Liouville vector field, $\lambda = \iota_Z \omega$}] \\ & = & \lambda & \quad [\text{since $\omega$ is antisymmetric, $\iota_Z \iota_Z \omega = 0$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{definition} \label{def:Hamiltonian v field} Let $H \in C^\infty(X,\R)$ be a function on $X$. The \textbf{Hamiltonian vector field} of $H$, denoted $X_H$, is the unique vector field on $X$ satisfying \begin{IEEEeqnarray*}{c} \edv H = -\iota _{X_H} \omega. \end{IEEEeqnarray*} \end{definition} \begin{proposition} \phantomsection\label{lem:hamiltonian vector field preserves symplectic form} The Hamiltonian vector field preserves the symplectic form, i.e. \begin{IEEEeqnarray*}{c} \ldv{X_H} \omega = 0. \end{IEEEeqnarray*} \end{proposition} \begin{proof} \begin{IEEEeqnarray*}{rCls+x*} \ldv{X_H} \omega & = & \edv \iota_{X_H} \omega + \iota_{X_H} \edv \omega & \quad [\text{by the Cartan magic formula}] \\ & = & \edv \iota_{X_H} \omega & \quad [\text{since $\omega$ is closed}] \\ & = & - \edv^2 H & \quad [\text{by definition of $X_H$}] \\ & = & 0 & \quad [\text{since $\edv^2 = 0$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{proposition}[Liouville's theorem] The Hamiltonian vector field preserves the symplectic volume form, i.e. \begin{equation*} \ldv{X_H} \p{}{2}{\frac{\omega^n}{n!}} = 0. \end{equation*} \end{proposition} \begin{proof} By \cref{lem:hamiltonian vector field preserves symplectic form} and the fact that Lie derivatives obey the Leibniz rule. \end{proof} \begin{proposition}[conservation of energy] \label{lem:conservation of energy} The Hamiltonian is constant along the Hamiltonian vector field, i.e. \begin{IEEEeqnarray*}{c} X_H(H) = 0. \end{IEEEeqnarray*} \end{proposition} \begin{proof} \begin{IEEEeqnarray*}{rCls+x*} X_H(H) & = & \edv H(X_H) & \quad [\text{by definition of exterior derivative}] \\ & = & - \iota_{X_H} \omega (X_H) & \quad [\text{by definition of $X_H$}] \\ & = & - \omega(X_H, X_H) & \quad [\text{by definition of interior product}] \\ & = & 0 & \quad [\text{since $\omega$ is a form}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \section{Contact manifolds} In this section, we recall some basics about contact manifolds. \begin{definition} \label{def:contact manifold} A \textbf{contact manifold} is a pair $(M,\xi)$, where $M$ is a smooth manifold and $\xi$ is a distribution on $M$ of codimension 1, called the \textbf{contact structure}, such that for all locally defining forms $\alpha \in \Omega^1(U)$ for $\xi$ (i.e. such that $\xi = \ker \alpha$), $\edv \alpha |_{\xi}$ is nondegenerate. In this case we call $\alpha$ a \textbf{local contact form} for $M$. In the case where $\alpha \in \Omega^1(M)$ we say that $\alpha$ is a \textbf{global contact form} for $M$. A \textbf{strict contact manifold} is a pair $(M,\alpha)$ such that $(M,\ker \alpha)$ is a contact manifold. \end{definition} The following lemma characterizes the linear algebra of contact manifolds. \begin{lemma} \label{lem:contact manifold} Let $M$ be an $m$-dimensional manifold, $\alpha \in \Omega^1(M)$ be nonvanishing and $\xi = \ker \alpha$. Then, the following are equivalent: \begin{enumerate} \item \label{lem:contact manifold 1} The form $\edv \alpha |_{\xi}$ is nondegenerate, i.e. $(M,\alpha)$ is a contact manifold; \item \label{lem:contact manifold 3} The tangent bundle of $M$ decomposes as $T M = \ker \edv \alpha \directsum \ker \alpha$; \item \label{lem:contact manifold 2} There exists an $n \in \Z_{\geq 0}$ such that $m = 2n + 1$ and $\alpha \wedge (\edv \alpha)^{n}$ is a volume form. \end{enumerate} \end{lemma} \begin{proof} {\ref{lem:contact manifold 1}} $\Longrightarrow$ {\ref{lem:contact manifold 3}}: We show that $\ker \edv \alpha \cap \ker \alpha = 0$. For this, it suffices to assume that $v \in \ker \edv \alpha \cap \ker \alpha$ and to prove that $v = 0$. Since $\edv \alpha|_{\ker \alpha}(v) = 0$ and $\edv \alpha|_{\ker \alpha}$ is nondegenerate we conclude that $v = 0$. We show that $\dim TM = \dim \ker \edv \alpha + \dim \ker \alpha$. Since $\alpha$ is nonvanishing, $\dim \ker \alpha = \dim TM - 1$. Since $\ker \edv \alpha \cap \ker \alpha = 0$, this implies that $\dim \ker \edv \alpha \in \{0,1\}$. Considering that $\edv \alpha|_{\ker \alpha}$ is nondegenerate and that $\dim TM = \dim \ker \alpha + 1$, we conclude that $\edv \alpha|_{TM}$ is degenerate. Therefore, $\dim \ker \edv \alpha = 1$. {\ref{lem:contact manifold 3}} $\Longrightarrow$ {\ref{lem:contact manifold 2}}: Since $T M = \ker \edv \alpha \oplus \ker \alpha$, we conclude that the forms $\alpha|_{\ker \edv \alpha}$ and $\edv \alpha|_{\ker \alpha}$ are nondegenerate. In particular, $\ker \alpha$ is even dimensional, i.e. $\dim \ker \alpha = 2n$ for some $n$, and $(\edv \alpha|_{\ker \alpha})^n$ is a volume form on $\ker \alpha$. So, $\alpha \wedge (\edv \alpha)^n$ is a volume form on $M$. {\ref{lem:contact manifold 2}} $\Longrightarrow$ {\ref{lem:contact manifold 1}}: If $v \in \xi = \ker \alpha$ is such that $v \in \ker \edv \alpha|_{\xi}$, then $\iota_v (\alpha \wedge (\edv \alpha)^n) = 0$, which implies that $v = 0$. \end{proof} \begin{definition} Let $(M,\xi_M)$, $(N,\xi_N)$ be contact manifolds. A \textbf{contactomorphism} from $M$ to $N$ is a diffeomorphism $\phi \colon M \longrightarrow N$ such that $T \phi(\xi_M) = \xi_N$. If $(M,\alpha_M)$, $(N,\alpha_N)$ are strict contact manifolds, a \textbf{strict contactomorphism} from $M$ to $N$ is a diffeomorphism $\phi \colon M \longrightarrow N$ such that $\phi^* \alpha_N = \alpha_M$. \end{definition} \begin{remark} We will consider only strict contact manifolds and strict contactomorphisms, and for simplicity we will drop the word ``strict'' from our nomenclature. \end{remark} \begin{definition} \label{def:Reeb vector field} The \textbf{Reeb vector field} of $(M,\alpha)$ is the unique vector field $R$ satisfying \begin{IEEEeqnarray*}{rCls+x*} \iota_R \edv \alpha & = & 0, \\ \iota_R \alpha & = & 1. \end{IEEEeqnarray*} \end{definition} \begin{remark} \cref{lem:contact manifold} {\ref{lem:contact manifold 3}} can also be written as $TM = \p{<}{}{R} \directsum \xi$. \end{remark} \begin{lemma} \label{lem:reeb vf preserves contact form} The Reeb vector field preserves the contact form, i.e. \begin{IEEEeqnarray*}{c+x*} \ldv{R} \alpha = 0. \end{IEEEeqnarray*} \end{lemma} \begin{proof} \begin{IEEEeqnarray*}{rCls+x*} \ldv{R} \alpha & = & \iota _{R} \edv \alpha + \edv \iota _{R} \alpha & \quad [\text{by the Cartan magic formula}] \\ & = & 0 + \edv 1 & \quad [\text{by definition of $R$}] \\ & = & 0. & \quad & \qedhere \end{IEEEeqnarray*} \end{proof} We now consider contact manifolds which are hypersurfaces of symplectic manifolds. \begin{definition} \label{def:hypersurface of contact type} Let $(X,\omega)$ be a symplectic manifold of dimension $2n$, $(M, \alpha)$ be a contact manifold of dimension $2n - 1$ such that $M \subset X$, and denote by $\iota \colon M \longrightarrow X$ the inclusion. We say that $M$ is a \textbf{hypersurface of contact type} if $\edv \alpha = \iota^* \omega$. In this case, the \textbf{Liouville vector field} is the unique vector field $Z \in C^{\infty}(\iota^* TX)$ such that \begin{IEEEeqnarray*}{c+x*} \iota_Z \omega = \alpha. \end{IEEEeqnarray*} \end{definition} \begin{example} Let $(L,g)$ be a Riemannian manifold. Recall that $(T^*L, \lambda)$ is an exact symplectic manifold. Consider the \textbf{unit cotangent bundle} \begin{IEEEeqnarray*}{c+x*} S^* L \coloneqq \{ u \in T^* L \mid \| u \| = 1 \}. \end{IEEEeqnarray*} The form $\alpha \coloneqq \lambda|_{S^*L}$ is a contact form on $S^* L$. Therefore, $(S^*L, \alpha) \subset (T^*L, \lambda)$ is a hypersurface of contact type. More generally, we can also define the cotangent bundle of radius $r > 0$ by $S^*_r L \coloneqq \{ u \in T^* L \mid \| u \| = r \}$, which is also a hypersurface of contact type. \end{example} \begin{lemma} \label{lem:decomposition coming from contact hypersurface} We have the decompositions \begin{IEEEeqnarray*}{rCls+x*} \iota^* TX & = & \p{<}{}{Z} \directsum \p{<}{}{R} \directsum \xi, \\ TM & = & \p{<}{}{R} \directsum \xi, \\ \xi^\perp & = & \p{<}{}{Z} \directsum \p{<}{}{R}. \end{IEEEeqnarray*} \end{lemma} \begin{proof} By \cref{lem:contact manifold}, we have that $TM = \p{<}{}{R} \directsum \xi$. To show that $\xi^\perp = \p{<}{}{Z} \directsum \p{<}{}{R}$, by considering the rank of the vector bundles it suffices to show that $\p{<}{}{Z} \directsum \p{<}{}{R} \subset \xi^\perp$. Let $v \in \xi_p = \ker \alpha_p$. We wish to show that $\omega(Z_p, v) = 0$ and $\omega(R_p, v) = 0$. \begin{IEEEeqnarray*}{rCls+x*} \omega(Z_p, v) & = & \alpha(v) & \quad [\text{by definition of $Z$}] \\ & = & 0 & \quad [\text{since $v \in \ker \alpha_p$}], \\ \\ \omega(R_p, v) & = & \edv \alpha(R_p, v) & \quad [\text{by definition of hypersurface of contact type}] \\ & = & 0 & \quad [\text{by definition of Reeb vector field}]. \end{IEEEeqnarray*} Then, as oriented vector bundles, $\iota^* TX = \xi^\perp \directsum \xi = \p{<}{}{Z} \directsum \p{<}{}{R} \directsum \xi$. \end{proof} \begin{lemma} \label{lem:HR flow} Let $H \colon X \longrightarrow \R$ and assume that $M$ is the preimage of $H$ under a regular value $c \in \R$, i.e. $M = H^{-1}(c)$. Then, there exists a unique vector field $X_H^M$ on $M$ which is $\iota$-related to $X_H$. In addition, $X_H^M = \alpha(X_H^M) R$. \end{lemma} \begin{proof} To prove the first statement, it suffices to show that $X_H|_p \in T_p M$ for every $p \in M$. By conservation of energy (\cref{lem:conservation of energy}), we have that \begin{IEEEeqnarray*}{rCls+x*} X_H|_p & \in & \ker \edv H(p) \\ & = & T_p (H ^{-1}(c)) \\ & = & T_p M. \end{IEEEeqnarray*} We now show that $\iota_{X_H^M} \edv \alpha = 0$. \begin{IEEEeqnarray*}{rCls+x*} \iota _{X_H^ M} \edv \alpha & = & \iota _{X_H^ M} \iota^* \omega & \quad [\text{by definition of hypersurface of contact type}] \\ & = & \iota^* \iota _{X_H} \omega & \quad [\text{since $X_H^M$ is $\iota$-related to $X_H$}] \\ & = & - \iota^* \edv H & \quad [\text{by definition of Hamiltonian vector field}] \\ & = & - \edv \iota^* H & \quad [\text{by naturality of $\edv$}] \\ & = & 0 & \quad [\text{since $H$ is constant equal to $c$ on $M$}]. \end{IEEEeqnarray*} By definition of Reeb vector field, we conclude that $X_H^M$ and $R$ are collinear, and in particular $X_H^M = \alpha(X_H^M) R$. \end{proof} We now compare the dynamics from the points of view of Riemannian, symplectic and contact geometry. Let $(L,g)$ be a Riemannian manifold of dimension $n$. The manifold $L$ has a tangent bundle $TL$ and a cotangent bundle $T^*L$, and the map $\tilde{g} \colon TL \longrightarrow T^*L$ given by $\tilde{g}(v) = g(v,\cdot)$ is a vector bundle isomorphism. Consider the unit cotangent bundle $\iota \colon S^*L \longrightarrow T^*L$, which has a Reeb vector field $R$, and the function \begin{IEEEeqnarray*}{rrCl} H \colon & T^*L & \longrightarrow & \R \\ & u & \longmapsto & \frac{1}{2} \p{||}{}{u}_{}^2. \end{IEEEeqnarray*} \begin{definition} We define a vector field $G$ on $TL$, called the \textbf{geodesic field}, as follows. At $v \in TL$, $G _{v}$ is given by \begin{equation*} G _{v} \coloneqq \odv{}{t}\Big|_{t=0} \dot{\gamma}(t), \end{equation*} where $\gamma \colon I \longrightarrow L$ is the unique geodesic with $\dot{\gamma}(0) = v$ and $\dot{\gamma} \colon I \longrightarrow TL$ is the lift of $\gamma$. \end{definition} A curve $\gamma$ in $L$ is a geodesic if and only if its lift $\dot{\gamma}$ to $TL$ is a flow line of $G$. \begin{theorem} \label{thm:flow geodesic vs hamiltonian} The vector field $G$ is $\tilde{g}$-related to $X_H$. \end{theorem} \begin{proof} See for example \cite[Theorem 1.5.2]{geigesIntroductionContactTopology2008} or \cite[Theorem 2.3.1]{frauenfelderRestrictedThreeBodyProblem2018}. \end{proof} \begin{theorem} \label{thm:flow reeb vs hamiltonian} The vector field $R$ is $\iota$-related to $X_H$. \end{theorem} \begin{proof} Notice that $S^*L = H^{-1}(2)$. By \cref{lem:HR flow}, it suffices to show that $\lambda(X_H) \circ \iota = 1$. Let $(q^1, \ldots, q^n)$ be coordinates on $L$, with induced coordinates $(q^1, \ldots, q^n, p_1, \ldots, p_n)$ on $T^* L$. With respect to these coordinates, $X_H$ can be written as \begin{IEEEeqnarray}{rCls+x*} X_H & = & \sum_{i = 1}^{n} \p{}{2}{ \pdv{H}{p_i} \pdv{}{q^i} - \pdv{H}{q^i} \pdv{}{p_i} } \IEEEnonumber \\ & = & \sum_{i = 1}^{n} \p{}{2}{ \sum_{j=1}^{n} g^{ij} p_j \pdv{}{q^i} - \sum_{j,k=1}^{n} \pdv{g^{jk}}{q^i} p_j p_k \pdv{}{p_i} }. \plabel{eq:hamiltonian vector field in coordinates} \end{IEEEeqnarray} We show that $\p{<}{}{\dv \pi(u) X_H|_{u}, \cdot } = u$. \begin{IEEEeqnarray*}{rCls+x*} \p{<}{}{\dv \pi (u) X_{H}|_{u}, v} & = & \sum_{i,j=1}^{n} g _{ij} (\dv \pi (u) X_{H}|_{u})^i v^j \\ & = & \sum_{i,j,k=1}^{n} g _{ij} g ^{ik} p_k v^j \\ & = & \sum_{j,k=1}^{n} \delta^k_j p_k v^j \\ & = & \sum_{j=1}^{n} p_j v^j \\ & = & \sum_{i=1}^{n} p_i \edv q^i \p{}{2}{ \sum_{j=1}^{n} v^j \pdv{}{q^j} } \\ & = & u(v). \end{IEEEeqnarray*} We show that $\lambda(X_H) = 2 H$: \begin{IEEEeqnarray*}{rCls+x*} \lambda(X_{H})|_{u} & = & u (\dv \pi (u) X_{H}|_{u}) & \quad [\text{by definition of $\lambda$}] \\ & = & \p{<}{}{ \dv \pi (u) X_{H}|_{u},\dv \pi (u) X_{H}|_{u} } & \quad [\text{since $u = \p{<}{}{\dv \pi(u) X_H|_{u}, \cdot }$}] \\ & = & \p{||}{}{ \dv \pi (u) X_{H}|_{u} }^2 & \quad [\text{by definition of the norm}] \\ & = & \p{||}{}{u}^2 & \quad [\text{since $u = \p{<}{}{\dv \pi(u) X_H|_{u}, \cdot }$}] \\ & = & 2 H (u) & \quad [\text{by definition of $H$}]. \end{IEEEeqnarray*} By definition of $H$, this implies that $\lambda(X_H) \circ \iota = 1$, as desired. \end{proof} \section{Liouville domains} In this section we introduce Liouville domains, which are going to be the main type of symplectic manifold we will work with. \begin{definition} \label{def:liouville domain} A \textbf{Liouville domain} is a pair $(X,\lambda)$, where $X$ is a compact, connected smooth manifold with boundary $\del X$ and $\lambda \in \Omega^1(X)$ is such that $\edv \lambda \in \Omega^2(X)$ is symplectic, $\lambda|_{\del X}$ is contact and the orientations on $\del X$ coming from $(X,\edv \lambda)$ and coming from $\lambda|_{\del X}$ are equal. \end{definition} \begin{example} Let $(L,g)$ be a Riemannian manifold. The \textbf{unit codisk bundle}, \begin{IEEEeqnarray*}{c+x*} D^* L \coloneqq \{ u \in T^*L \mid \| u \| \leq 1 \}, \end{IEEEeqnarray*} is a Liouville domain. More generally, we can define the codisk bundle of radius $r > 0$ by $D^*_r L \coloneqq \{ u \in T^*L \mid \| u \| \leq r \}$, which is also a Liouville domain. \end{example} \begin{definition} \label{def:star shaped} A \textbf{star-shaped domain} is a compact, connected $2n$-dimensional submanifold $X$ of $\C^{n}$ with boundary $\del X$ such that $(X,\lambda)$ is a Liouville domain, where $\lambda$ is the symplectic potential of \cref{exa:cn symplectic}. \end{definition} \begin{definition} \label{def:moment map} The \textbf{moment map} is the map $\mu \colon \C^n \longrightarrow \R^n _{\geq 0}$ given by \begin{IEEEeqnarray*}{c+x*} \mu(z_1,\ldots,z_n) \coloneqq \pi(|z_1|^2,\ldots,|z_n|^2). \end{IEEEeqnarray*} Define also \begin{IEEEeqnarray*}{rCrClClrCl} \Omega_X & \coloneqq & \Omega(X) & \coloneqq & \hphantom{{}^{-1}} \mu(X) \subset \R_{\geq 0}^n, & \qquad & \text{for every } & X & \subset & \C^n, \\ X_{\Omega} & \coloneqq & X(\Omega) & \coloneqq & \mu^{-1}(\Omega) \subset \C^n, & \qquad & \text{for every } & \Omega & \subset & \R^{n}_{\geq 0}, \\ \delta_{\Omega} & \coloneqq & \delta(\Omega) & \coloneqq & \sup \{ a \mid (a, \ldots, a) \in \Omega \}, & \qquad & \text{for every } & \Omega & \subset & \R^{n}_{\geq 0}. \end{IEEEeqnarray*} We call $\delta_\Omega$ the \textbf{diagonal} of $\Omega$. \end{definition} \begin{definition} \label{def:toric domain} A \textbf{toric domain} is a star-shaped domain $X$ such that $X = X(\Omega(X))$. A toric domain $X = X _{\Omega}$ is \begin{enumerate} \item \textbf{convex} if $\hat{\Omega} \coloneqq \{ (x_1, \ldots, x_n) \in \R^n \mid (|x_1|,\ldots,|x_n|) \in \Omega \} $ is convex; \item \textbf{concave} if $\R^n _{\geq 0} \setminus \Omega$ is convex. \end{enumerate} \end{definition} \begin{example} \phantomsection\label{exa:toric domains} Here we give some examples of toric domains. See \cref{fig:Toric domains} for a picture of the examples given below. \begin{enumerate} \item The \textbf{ellipsoid} is the convex and concave toric domain given by \begin{IEEEeqnarray*}{rCls+x*} E(a_1,\ldots,a_n) & \coloneqq & \p{c}{2}{ (z_1,\ldots,z_n) \in \C^n \ \Big| \ \sum_{j=1}^{n} \frac{\pi |z_j|^2}{a_j} \leq 1 } \\ \Omega_E(a_1,\ldots,a_n) & \coloneqq & \p{c}{2}{ (x_1,\ldots,x_n) \in \R^n _{\geq 0} \ \Big| \ \sum_{j=1}^{n} \frac{x_j}{a_j} \leq 1 }. \end{IEEEeqnarray*} Its limit shape, the \textbf{ball}, is $B^{2n}(a) \coloneqq B(a) \coloneqq E(a,\ldots,a)$. \item The \textbf{polydisk} is the convex ``toric domain with corners'' given by \begin{IEEEeqnarray*}{rCls+x*} P(a_1,\ldots,a_n) & \coloneqq & \p{c}{2}{ (z_1,\ldots,z_n) \in \C^n \ \Big| \ \forall j=1,\ldots,n \colon \frac{\pi |z_j|^2}{a_j} \leq 1 } \\ \Omega_P(a_1,\ldots,a_n) & \coloneqq & \p{c}{2}{ (x_1,\ldots,x_n) \in \R^n _{\geq 0} \ \Big| \ \forall j=1,\ldots,n \colon \frac{x_j}{a_j} \leq 1 }. \end{IEEEeqnarray*} Its limit shape, the \textbf{cube}, is $P^{2n}(a) \coloneqq P(a) \coloneqq P(a,\ldots,a)$. \item The \textbf{nondisjoint union of cylinders} is the concave ``noncompact toric domain with corners'' given by \begin{IEEEeqnarray*}{rCls+x*} N(a_1,\ldots,a_n) & \coloneqq & \p{c}{2}{ (z_1,\ldots,z_n) \in \C^n \ \Big| \ \exists j=1,\ldots,n \colon \frac{\pi |z_j|^2}{a_j} \leq 1 } \\ \Omega_N(a_1,\ldots,a_n) & \coloneqq & \p{c}{2}{ (x_1,\ldots,x_n) \in \R^n _{\geq 0} \ \Big| \ \exists j=1,\ldots,n \colon \frac{x_j}{a_j} \leq 1 }. \end{IEEEeqnarray*} Its limit shape is denoted $N^{2n}(a) \coloneqq N(a) \coloneqq N(a,\ldots,a)$. \item The \textbf{cylinder} is the convex and concave ``noncompact toric domain'' given by \begin{IEEEeqnarray*}{rCls+x*} Z(a) & \coloneqq & \p{c}{2}{ (z_1,\ldots,z_n) \in \C^n \ \Big| \ \frac{\pi |z_1|^2}{a_1} \leq 1 } \\ \Omega_Z(a) & \coloneqq & \p{c}{2}{ (x_1,\ldots,x_n) \in \R^n _{\geq 0} \ \Big| \ \frac{x_1}{a_1} \leq 1 }. \end{IEEEeqnarray*} Note that $Z^{2n}(a) \coloneqq Z(a) = E(a,\infty,\ldots,\infty) = P(a,\infty,\ldots,\infty)$. \end{enumerate} \end{example} \begin{figure}[ht] \centering \begin{tikzpicture} [ nn/.style={thick, color = gray}, zz/.style={thick, color = gray}, pp/.style={thick, color = gray}, bb/.style={thick, color = gray} ] \tikzmath{ \x = 1.5; \y = 3; \z = 1.0; coordinate \o, \a, \b, \c, \d, \e, \r, \s, \q; \o{ball} = (0 , 0 ) + 0*(\y+\z,0); \a{ball} = (\x, 0 ) + 0*(\y+\z,0); \b{ball} = (0 , \x) + 0*(\y+\z,0); \c{ball} = (\x, \x) + 0*(\y+\z,0); \d{ball} = (\x, \y) + 0*(\y+\z,0); \e{ball} = (\y, \x) + 0*(\y+\z,0); \r{ball} = (\y, 0 ) + 0*(\y+\z,0); \s{ball} = (0 , \y) + 0*(\y+\z,0); \q{ball} = (\y, \y) + 0*(\y+\z,0); \o{cube} = (0 , 0 ) + 1*(\y+\z,0); \a{cube} = (\x, 0 ) + 1*(\y+\z,0); \b{cube} = (0 , \x) + 1*(\y+\z,0); \c{cube} = (\x, \x) + 1*(\y+\z,0); \d{cube} = (\x, \y) + 1*(\y+\z,0); \e{cube} = (\y, \x) + 1*(\y+\z,0); \r{cube} = (\y, 0 ) + 1*(\y+\z,0); \s{cube} = (0 , \y) + 1*(\y+\z,0); \q{cube} = (\y, \y) + 1*(\y+\z,0); \o{cyld} = (0 , 0 ) + 2*(\y+\z,0); \a{cyld} = (\x, 0 ) + 2*(\y+\z,0); \b{cyld} = (0 , \x) + 2*(\y+\z,0); \c{cyld} = (\x, \x) + 2*(\y+\z,0); \d{cyld} = (\x, \y) + 2*(\y+\z,0); \e{cyld} = (\y, \x) + 2*(\y+\z,0); \r{cyld} = (\y, 0 ) + 2*(\y+\z,0); \s{cyld} = (0 , \y) + 2*(\y+\z,0); \q{cyld} = (\y, \y) + 2*(\y+\z,0); \o{ndju} = (0 , 0 ) + 3*(\y+\z,0); \a{ndju} = (\x, 0 ) + 3*(\y+\z,0); \b{ndju} = (0 , \x) + 3*(\y+\z,0); \c{ndju} = (\x, \x) + 3*(\y+\z,0); \d{ndju} = (\x, \y) + 3*(\y+\z,0); \e{ndju} = (\y, \x) + 3*(\y+\z,0); \r{ndju} = (\y, 0 ) + 3*(\y+\z,0); \s{ndju} = (0 , \y) + 3*(\y+\z,0); \q{ndju} = (\y, \y) + 3*(\y+\z,0); } \foreach \domain in {ball, cube, cyld, ndju}{ \draw[->] (\o{\domain}) -- (\r{\domain}); \draw[->] (\o{\domain}) -- (\s{\domain}); \node[anchor = north] at (\a{\domain}) {$1$}; \node[anchor = east] at (\b{\domain}) {$1$}; } \node[anchor = north east] at (\q{ball}) {$\Omega_B(1)$}; ll[bb, opacity=0.5] (\o{ball}) -- (\a{ball}) -- (\b{ball}) -- cycle; \draw[bb] (\o{ball}) -- (\a{ball}) -- (\b{ball}) -- cycle; \node[anchor = north east] at (\q{cube}) {$\Omega_P(1)$}; ll[pp, opacity=0.5] (\o{cube}) -- (\a{cube}) -- (\c{cube}) -- (\b{cube}) -- cycle; \draw[pp] (\o{cube}) -- (\a{cube}) -- (\c{cube}) -- (\b{cube}) -- cycle; \node[anchor = north east] at (\q{cyld}) {$\Omega_Z(1)$}; ll[zz, opacity=0.5] (\o{cyld}) -- (\a{cyld}) -- (\d{cyld}) -- (\s{cyld}); \draw[zz] (\s{cyld}) -- (\o{cyld}) -- (\a{cyld}) -- (\d{cyld}); \node[anchor = north east] at (\q{ndju}) {$\Omega_N(1)$}; ll[nn, opacity=0.5] (\o{ndju}) -- (\s{ndju}) -- (\d{ndju}) -- (\c{ndju}) -- (\e{ndju}) -- (\r{ndju}) -- cycle; \draw[nn] (\d{ndju}) -- (\c{ndju}) -- (\e{ndju}); \draw[nn] (\s{ndju}) -- (\o{ndju}) -- (\r{ndju}); \end{tikzpicture} \caption{Toric domains} \label{fig:Toric domains} \end{figure} \section{Symplectization of a contact manifold} Let $(M,\alpha)$ be a contact $(2n - 1)$-dimensional manifold. \begin{definition} \label{def:symplectization} The \textbf{symplectization} of $(M,\alpha)$ is the exact symplectic manifold $(\R \times M, e^r \alpha)$, where $r$ is the coordinate on $\R$. \end{definition} \begin{lemma} \label{lem:symplectization form} The form $\edv (e^r \alpha)$ is symplectic. \end{lemma} \begin{proof} The form $\edv (e^r \alpha)$ is exact, so it is closed. We show that $\edv (e^r \alpha)$ is nondegenerate. \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{( \edv (e^r \alpha) )^n}\\ \quad & = & ( e^r \edv r \wedge \alpha + e^r \edv \alpha )^n & \quad [\text{by the Leibniz rule}] \\ & = & e^{nr} \sum_{k=0}^{n} \binom{n}{k} ( \edv r \wedge \alpha)^k \wedge (\edv \alpha)^{n-k} & \quad [\text{by the binomial theorem}] \\ & = & e^{n r} \edv r \wedge \alpha \wedge (\edv \alpha)^{n-1} & \quad [\text{since $\alpha^2 = 0$ and $(\edv \alpha)^n = 0$}] \\ & \neq & 0 & \quad [\text{since $\alpha \wedge (\edv \alpha)^{n-1}$ is a volume form on $M$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:symplectization lvf} The Liouville vector field of $(\R \times M, e^r \alpha)$ is $Z = \partial_r$. \end{lemma} \begin{proof} By definition of Liouville vector field, we need to show that $\iota_{\partial_r} \edv (e^r \alpha) = e^r \alpha$. \begin{IEEEeqnarray*}{rCls+x*} \iota_{\partial_r} \edv (e^r \alpha) & = & \iota_{\partial_r} (e^r \edv r \wedge \alpha + e^r \edv \alpha) & \quad [\text{by the Leibniz rule}] \\ & = & e^r (\edv r (\partial_r) \alpha - \alpha(\partial_r) \edv r + \iota_{\partial_r} \edv \alpha) & \quad [\text{since $\iota_Z$ is a derivation}] \\ & = & e^r \alpha & \quad [\text{since $\alpha$ is a form on $M$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{example} Let $(L,g)$ be a Riemannian manifold. Recall that $(T^*L,\lambda)$ is an exact symplectic manifold and that $(S^*L, \alpha)$ is a hypersurface of contact type. Consider the symplectization of $S^*L$, which is $(\R \times S^*L, e^r \alpha)$. Then, the map $\R \times S^*L \longrightarrow T^*L \setminus L$ given by $(r,u) \longmapsto e^r u$ is a Liouville diffeomorphism. \end{example} Defining $R_{(r,x)} = R_x$ we can view the Reeb vector field of $M$ as a vector field in $\R \times M$. Analogously, we define a distribution $\xi$ on $\R \times M$ by $\xi_{(r,x)} = \xi_x$. Then, $T(\R \times M) = \p{<}{}{Z} \directsum \p{<}{}{R} \directsum \xi$. Let $H \colon \R \times M \longrightarrow \R$ be a function which only depends on $\R$, (i.e. $H(r,x) = H(r)$). Define $h \coloneqq H \circ \exp^{-1} \colon \R_{> 0} \longrightarrow \R$ and $T(r) \coloneqq H'(r) / e^r = h'(e^r)$. \begin{lemma} \label{lem:reeb equals hamiltonian on symplectization} The Hamiltonian vector field of $H$ satisfies $\alpha(X_H) = T$ and $X_H = T R$. \end{lemma} \begin{proof} By \cref{lem:HR flow}, $X_H$ and $R$ are collinear. By definition of Reeb vector field, this implies that $X_H = \alpha(X_H) R$. It remains to show that $\alpha(X_H) = T$. For this, we compute \begin{IEEEeqnarray*}{rCls+x*} H' \edv r & = & \edv H & \quad [\text{by definition of exterior derivative}] \\ & = & - \iota _{X_H} \edv (e^r \alpha) & \quad [\text{by definition of Hamiltonian v.f.}] \\ & = & - \iota _{X_H} (e^r \edv r \wedge \alpha + e^r \edv \alpha) & \quad [\text{Leibniz rule for exterior derivative}] \\ & = & - e^r (\edv r(X_H) \alpha - \alpha(X_H) \edv r + \iota _{X_H} \edv \alpha) & \quad [\text{interior product is a derivation}]. \end{IEEEeqnarray*} Therefore, $H' \edv r = e^r \alpha(X_H) \edv r$, which implies that $\alpha(X_H) = H'/\exp = T$. \end{proof} \begin{corollary} \phantomsection\label{cor:hamiltonian orbits are reeb orbits} Suppose that $\gamma = (r,\rho) \colon S^1 \longrightarrow \R \times M$ is a $1$-periodic orbit of $X_H$, i.e. $\dot{\gamma}(t) = X_H(\gamma(t))$. Then: \begin{enumerate} \item $r \colon S^1 \longrightarrow \R$ is constant; \item $\rho \colon S^1 \longrightarrow M$ is a $T(r)$-periodic orbit of $R$, i.e. $\dot{\rho}(t) = T(r) R(\rho(t))$. \end{enumerate} \end{corollary} \begin{proof} The function $r \colon S^1 \longrightarrow \R$ is constant because $X_H$ is tangent to $\{r\} \times M$. Since $\dot{\gamma}(t) = X_H(\gamma(t))$ and by \cref{lem:reeb equals hamiltonian on symplectization}, we conclude that $\dot{\rho}(t) = T(r) R(\rho(t))$. \end{proof} \begin{lemma} \label{lem:action in symplectization} Let $\gamma = (r,\rho) \colon S^1 \longrightarrow \R \times M$ be a $1$-periodic orbit of $X_H$ and consider its action, given by \begin{IEEEeqnarray*}{c+x*} \mathcal{A}_H(\gamma) = \int_{S^1}^{} \gamma^* (e^r \alpha) - \int_{S^1}^{} H(\gamma(t)) \, \edv t. \end{IEEEeqnarray*} Then, $\mathcal{A}_H(\gamma) \eqqcolon \mathcal{A}_H(r)$ only depends on $r$, and we have the following formulas for $\mathcal{A}_H$ and $\mathcal{A}'_H$ (as functions of $r$): \begin{IEEEeqnarray*}{rClCl} \mathcal{A}_H (r) & = & H' (r) - H (r) & = & e^{ r} h' (e^r) - h(e^r), \\ \mathcal{A}'_H(r) & = & H''(r) - H'(r) & = & e^{2r} h''(e^r). \end{IEEEeqnarray*} \end{lemma} \begin{proof} We show only that $\mathcal{A}_H(\gamma) = H'(r) - H(r)$, since the other formulas follow from this one by elementary calculus. \begin{IEEEeqnarray*}{rCls+x*} \mathcal{A}_H(\gamma) & = & \int_{S^1}^{} \gamma^* ( e^r \alpha) - \int_{S^1}^{} H(\gamma(t)) \, \edv t & \quad [\text{by definition of action}] \\ & = & \int_{S^1}^{} e^r \rho^* \alpha - \int_{0}^{1} H(r, \rho(t)) \, \edv t & \quad [\text{since $\gamma(t) = (r, \rho(t))$}] \\ & = & e^r \int_{S^1}^{} \rho^* \alpha - \int_{0}^{1} H(r) \, \edv t & \quad [\text{since $H = H(r)$}] \\ & = & e^r T(\rho) - H(r) & \quad [\text{by \cref{cor:hamiltonian orbits are reeb orbits}}] \\ & = & H'(r) - H(r) & \quad [\text{by definition of $T$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{definition} \label{def:J cylindrical} Let $J$ be an almost complex structure on $(\R \times M, e^r \alpha)$. We say that $J$ is \textbf{cylindrical} if $J(\partial_r) = R$, if $J(\xi) \subset \xi$, and if the almost complex structure $J \colon \xi \longrightarrow \xi$ is compatible with $\edv \alpha$ and independent of $r$. We denote by $\mathcal{J}(M)$ the set of such $J$. \end{definition} \begin{lemma} \label{lem:J cylindrical forms} If $J$ is cylindrical then $\alpha \circ J = \edv r$. \end{lemma} \begin{proof} It suffices to show that $\alpha \circ J = \edv r$ on $\partial_r$, $R$ and $V \in \xi$. \begin{IEEEeqnarray*}{rCrClCl+x*} \alpha \circ J (\partial_r) & = & \alpha (R) & = & 1 & = & \edv r (\partial_r) \\ \alpha \circ J (R) & = & - \alpha (\partial_r) & = & 0 & = & \edv r (R) \\ \alpha \circ J (V) & = & \alpha(J(V)) & = & 0 & = & \edv r (V). & \qedhere \end{IEEEeqnarray*} \end{proof} \section{Completion of a Liouville domain} \label{sec:completion of liouville domain} Let $(X,\lambda)$ be a Liouville domain and $\omega = \edv \lambda$. Our goal in this section is to define the completion of $(X,\lambda)$, which is an exact symplectic manifold denoted by $(\hat{X}, \hat{\lambda})$. Recall that $(\del X, \lambda|_{\del X})$ is contact. Consider the symplectization $(\R \times \del X, e^r \lambda|_{\del X})$ of $(\del X, \lambda|_{\del X})$. Let $Z$ be the Liouville vector field of $(X, \lambda)$, which is given by $\lambda = \iota_Z \omega$. Denote the flow of $Z$ by \begin{IEEEeqnarray*}{rrCl} \Phi_Z \colon & \R_{\leq 0} \times \del X & \longrightarrow & X \\ & (t,x) & \longmapsto & \phi^t_Z(x). \end{IEEEeqnarray*} Since the vector field $Z$ is outward pointing at $\partial X$, the map $\Phi_Z$ is well-defined. Also, since $\Phi_Z$ is given by flowing along the vector field $Z$, it is an embedding. \begin{lemma} \label{lem:flow of liouville} The map $\Phi_Z$ is a Liouville embedding, i.e. $\Phi_Z^* \lambda = e^r \lambda|_{\del X}$. \end{lemma} \begin{proof} If $(t,x) \in \R_{\leq 0} \times \partial X$ and $(u,v) \in T_{(t,x)} (\R_{\leq 0} \times \partial X) = \R \oplus T_x \partial X$, then \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{(\Phi_Z^* \lambda)(u,v)} \\ \quad & = & \lambda(\dv \Phi_Z(t,x)(u,v)) & \quad [\text{by definition of pullback}] \\ & = & \lambda(\dv \Phi_Z(t,x)(0,v)) + \lambda(\dv \Phi_Z(t,x)(u,0)) & \quad [\text{by linearity of the derivative}] \\ & = & \lambda(\dv \phi^t_Z (x)(v)) + u \, \lambda(Z_{\phi^t_Z(x)}) & \quad [\text{by definition of $\Phi_Z$}]\\ & = & \lambda(\dv \phi^t_Z (x)(v)) + u \, \omega(Z_{\phi^t_Z(x)},Z_{\phi^t_Z(x)}) & \quad [\text{by definition of $Z$}] \\ & = & \lambda(\dv \phi^t_Z (x)(v)) & \quad [\text{since $\omega$ is antisymmetric}]\\ & = & ((\phi^t_Z)^* \lambda)(v) & \quad [\text{by definition of pullback}] \\ & = & e^t \lambda (v) & \quad [\text{by \cref{lem:mosers trick,lem:liouville vf}}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{definition} \label{def:completion of a Liouville domain} We define an exact symplectic manifold $(\hat{X},\hat{\lambda})$ called the \textbf{completion} of $(X,\lambda)$, as follows. As a smooth manifold, $\hat{X}$ is the gluing of $X$ and $\R \times \del X$ along the map $\Phi _{Z} \colon \R_{\leq 0} \times \del X \longrightarrow \Phi_Z(\R_{\leq 0} \times \del X)$. This gluing comes with embeddings \begin{IEEEeqnarray*}{rCls+x*} \iota_X \colon X & \longrightarrow & \hat{X}, \\ \iota_{\R \times \del X} \colon \R \times \del X & \longrightarrow & \hat{X}. \end{IEEEeqnarray*} The form $\hat{\lambda}$ is the unique $1$-form on $\hat{X}$ such that \begin{IEEEeqnarray*}{rCls+x*} \iota_X^* \hat{\lambda} & = & \lambda, \\ \iota _{\R \times \del X}^* \hat{\lambda} & = & e^r \lambda|_{\del X}. \end{IEEEeqnarray*} The symplectic form of $\hat{X}$ is given by $\hat{\omega} \coloneqq \edv \hat{\lambda}$, which satisfies \begin{IEEEeqnarray*}{rCls+x*} \iota_X^* \hat{\omega} & = & \omega, \\ \iota _{\R \times \del X}^* \hat{\omega} & = & \edv (e^r \lambda|_{\del X}). \end{IEEEeqnarray*} The Liouville vector field of $\hat{X}$ is the unique vector field $\hat{Z}$ such that $\iota_{\hat{Z}} \hat{\omega} = \hat{\lambda}$, which satisfies \begin{IEEEeqnarray*}{rRls+x*} Z & \text{ is $\iota_X$-related to } & \hat{Z}, \\ \partial_r & \text{ is $\iota_{\R \times \partial X}$-related to } & \hat{Z}. \end{IEEEeqnarray*} \end{definition} \begin{example} Let $(L,g)$ be a Riemannian manifold. Recall that $T^*L$ is an exact symplectic manifold, $S^*L$ is a hypersurface of contact type and that $D^*L$ is a Liouville domain. Also recall that there is a Liouville embedding $\varphi \colon \R \times S^* L \longrightarrow T^*L$ given by $\varphi(r,u) = e^r u$. Then, we can define a Liouville diffeomorphism $\hat{\varphi} \colon \widehat{D^*L} \longrightarrow T^*L$ as the unique map such that the following diagram commutes: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \widehat{D^* L} \ar[dr, hook, two heads, "\hat{\varphi}"] & \R \times S^* L \ar[l, hook'] \ar[d, hook, "\varphi"] \\ D^* L \ar[u, hook] \ar[r, hook] & T^* L \end{tikzcd} \end{IEEEeqnarray*} \end{example} \begin{lemma} \label{lem:properties of completion} The diagram \begin{IEEEeqnarray*}{c} \begin{tikzcd}[ampersand replacement = \&] \R_{\leq 0} \times \del X \ar[d, swap, hook, "\Phi_Z"] \ar[r, hookrightarrow] \& \R \times \del X \ar[d, hookrightarrow, "\iota _{\R \times \del X}"] \ar[r, hookrightarrow] \& \R \times \hat{X} \ar[d, two heads, "\Phi _{\hat{Z}}"] \\ X \ar[r, swap, hookrightarrow, "\iota_X"] \& \hat{X} \ar[r, equals] \& \hat{X} \end{tikzcd} \end{IEEEeqnarray*} commutes. \end{lemma} \begin{proof} The left square commutes by definition of $\hat{X}$. To prove that the right square commutes, let $(t,x) \in \R \times \del X$. We wish to show that $\Phi_{\hat{Z}}(t,x) = \iota_{\R \times \del X}(t,x)$. \begin{IEEEeqnarray*}{rCls+x*} \iota_{\R \times \partial X} (t, x) & = & \iota_{\R \times \partial X} \circ \phi^t_{\partial_r} (0, x) & \quad [\text{by definition of flow of $\partial_r$}] \\ & = & \phi^t_{\hat{Z}} \circ \iota_{\R \times \partial X}(0, x) & \quad [\text{since $\partial_r$ is $\iota_{\R \times \partial X}$-related to $\hat{Z}$}] \\ & = & \phi^t_{\hat{Z}} \circ \iota_X(x) & \quad [\text{by definition of completion}] \\ & = & \Phi_{\hat{Z}}(t,x) & \quad [\text{by definition of $\Phi_{\hat{Z}}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:codim 0 liouville emb preserves lvf} If $(X, \lambda_X)$ and $(Y, \lambda_Y)$ are Liouville domains and $\varphi \colon X \longrightarrow Y$ is a Liouville embedding of codimension $0$ then $Z_X$ is $\varphi$-related to $Z_Y$. \end{lemma} \begin{proof} For any $x \in X$ and $v \in T_x X$, \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\omega_Y (\dv \varphi(x) (Z_X|_x) - Z_Y|_{\varphi(x)}, \dv \varphi(x)(v))}\\ \quad & = & (\iota_{Z_X} \varphi^* \omega_Y - \varphi^* \iota_{Z_Y} \omega_Y) (v) & \quad [\text{by the definitions of $\iota_{Z_X}$, $\iota_{Z_Y}$, and $\varphi^*$}] \\ \quad & = & (\iota_{Z_X} \omega_X - \varphi^* \iota_{Z_Y} \omega_Y) (v) & \quad [\text{since $\varphi$ is a Liouville embedding}] \\ \quad & = & (\lambda_X - \varphi^* \lambda_X) (v) & \quad [\text{by definition of Liouville vector field}] \\ \quad & = & 0 & \quad [\text{since $\varphi$ is a Liouville embedding}]. \end{IEEEeqnarray*} Since $\omega_Y$ is nondegenerate and $\varphi$ is a $0$-codimensional embedding, the result follows. \end{proof} We will now explain how to view the construction of taking the completion of a Liouville domain as a functor. Let $(X,\lambda_X)$, $(Y,\lambda_Y)$ be Liouville domains and $\varphi \colon X \longrightarrow Y$ be a Liouville embedding such that $Z_X$ is $\varphi$-related to $Z_Y$ (by \cref{lem:codim 0 liouville emb preserves lvf}, this is true whenever $\varphi$ is $0$-codimensional, although here we assume only that the Liouville vector fields are related). We wish to define an embedding $\varphi \colon \hat{X} \longrightarrow \hat{Y}$, using the following diagram as a guide (we will show that this diagram commutes in \cref{lem:diagram for map on completions commutes}): \begin{IEEEeqnarray}{c} \plabel{eq:diagram for induced map on completions} \begin{tikzcd}[ampersand replacement = \&, row sep=scriptsize, column sep=0.2em] \& \R_{\leq 0} \times \del X \ar[dd, near end, swap, "\Phi_{Z_X}"] \ar[rr, "{\iota _{\R_{\leq 0}} \times \id_{\del X}}"] \& \& \R \times \del X \ar[dd, near start, swap, "{\iota _{\R \times \del X}}"] \ar[rr, "{\id \times \iota _{ \del X }}"] \& \& \R \times X \ar[ld, swap, "\id \times {\varphi}"] \ar[dd, near end] \ar[rr, "{\id \times \iota_X}"] \& \& \R \times \hat{X} \ar[ld,swap, "\id \times \hat{\varphi}"]\ar[dd, "\Phi _{\hat{Z}_X}"] \\ \R_{\leq 0} \times \del Y \ar[dd, swap, "\Phi_{Z_Y}"] \ar[rr, crossing over] \& \& \R \times \del Y \ar[rr, crossing over] \& \& \R \times Y \ar[rr, crossing over, near end, "\hphantom{-}\id \times \iota_Y"] \& \& \R \times \hat{Y} \& \\ \& X \ar[ld, "{\varphi}"] \ar[rr, near end, "\iota_X"] \& \& \hat{X} \ar[ld, "\hat{\varphi}"] \ar[rr, equals] \& \& \hat{X} \ar[ld, "\hat{\varphi}"]\ar[rr, equals] \& \& \hat{X} \ar[ld, "\hat{\varphi}"]\\ Y \ar[rr, swap, "\iota_Y"] \& \& \hat{Y} \ar[uu, crossing over, near start, leftarrow, "{\iota _{\R \times \del Y}}"]\ar[rr, equals] \& \& \hat{Y} \ar[uu, near start, crossing over, leftarrow]\ar[rr, equals] \& \& \hat{Y} \ar[uu, near start, crossing over, leftarrow, "\Phi _{\hat{Z}_Y}"]\& \end{tikzcd} \IEEEeqnarraynumspace \end{IEEEeqnarray} \begin{definition} \label{def:embedding on completions coming from Liouville embedding} We define an embedding $\hat{\varphi} \colon \hat{X} \longrightarrow \hat{Y}$ by \begin{IEEEeqnarray*}{rCls+x*} \hat{\varphi} \circ \iota_X & \coloneqq & \iota_Y \circ \varphi, \\ \hat{\varphi} \circ \iota_{\R \times \del X} & \coloneqq & \Phi_{\hat{Z}_Y} \circ (\id_ \R \times (\iota_Y \circ \varphi \circ \iota_{\partial X})). \end{IEEEeqnarray*} \end{definition} For $\hat{\varphi}$ to be well-defined, we need to check that the definitions of $\varphi$ on each region agree on the overlap. \begin{lemma} \label{def:map on completions is well defined} The map $\hat{\varphi}$ is well-defined, i.e. \begin{IEEEeqnarray*}{c} \iota_Y \circ \varphi \circ \Phi _{Z_X} = \Phi_{\hat{Z}_Y} \circ (\id_ \R \times (\iota_Y \circ \varphi \circ \iota_{\partial X})) \circ (\iota _{\R_{\leq 0}} \times \id _{\del X}). \end{IEEEeqnarray*} \end{lemma} \begin{proof} It suffices to assume that $(t,x) \in \R_{\leq 0} \times \del X$ and to prove that $\iota_Y \circ \varphi \circ \Phi _{Z_X}(t,x) = \Phi _{\hat{Z}_Y}(t,\iota_Y(\varphi(x)))$. \begin{IEEEeqnarray*}{rCls+x*} \iota_Y \circ \varphi \circ \Phi _{Z_X}(t,x) & = & \iota_Y \circ \varphi \circ \phi^t _{Z_X}(x) & \quad [\text{by definition of $\Phi _{Z_X}$}] \\ & = & \iota_Y \circ \phi^t _{Z_Y} \circ \varphi(x) & \quad [\text{since $Z_X$ is $\varphi$-related to $Z_Y$}] \\ & = & \phi^t _{\hat{Z}_Y} \circ \iota_Y \circ \varphi(x) & \quad [\text{since $Z_Y$ is $\iota_Y$-related to $\hat{Z}_Y$}] \\ & = & \Phi _{\hat{Z}_Y}(t,\iota_Y(\varphi(x))) & \quad [\text{by definition of $\Phi _{\hat{Z}_Y}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{def:map on completions is liouville embedding} The map $\hat{\varphi}$ is a Liouville embedding, i.e. $\hat{\varphi}^* \hat{\lambda}_Y = \hat{\lambda}_X$. \end{lemma} \begin{proof} We need to show that $\hat{\varphi}^* \hat{\lambda}_Y = \hat{\lambda}_X$, which is equivalent to \begin{IEEEeqnarray}{rCls+x*} \iota_X^* \hat{\varphi}^* \hat{\lambda}_Y & = & \iota_X^* \hat{\lambda}_X, \plabel{eq:map on completion is liouville embedding 1} \\ \iota_{\R \times \del X}^* \hat{\varphi}^* \hat{\lambda}_Y & = & \iota_{\R \times \del X}^* \hat{\lambda}_X. \plabel{eq:map on completion is liouville embedding 2} \end{IEEEeqnarray} We prove Equation \eqref{eq:map on completion is liouville embedding 1}. \begin{IEEEeqnarray*}{rCls+x*} \iota_X^* \hat{\varphi}^* \hat{\lambda}_Y & = & (\hat{\varphi} \circ \iota_X)^* \hat{\lambda}_Y & \quad [\text{by functoriality of pullbacks}] \\ & = & (\iota_Y \circ \varphi)^* \hat{\lambda}_Y & \quad [\text{by definition of $\hat{\varphi}$}] \\ & = & \varphi^* \iota_Y^* \hat{\lambda}_Y & \quad [\text{by functoriality of pullbacks}] \\ & = & \varphi^* \lambda_Y & \quad [\text{by definition of $\hat{\lambda}_Y$}] \\ & = & \lambda_X & \quad [\text{since $\varphi$ is a Liouville embedding}] \\ & = & \iota_X^* \hat{\lambda}_X & \quad [\text{by definition of $\hat{\lambda}_X$}]. \end{IEEEeqnarray*} We prove Equation \eqref{eq:map on completion is liouville embedding 2}. \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\iota _{\R \times \del X}^* \hat{\varphi}^* \hat{\lambda}_Y}\\ \quad & = & (\hat{\varphi} \circ \iota _{\R \times \del X})^* \hat{\lambda}_Y & \quad [\text{by functoriality of pullbacks}] \\ & = & ( \Phi _{\hat{Z}_Y} \circ (\id_ \R \times (\iota_Y \circ \varphi \circ \iota _{\del X})) )^* \hat{\lambda}_Y & \quad [\text{by definition of $\hat{\varphi}$}] \\ & = & (\id_ \R \times (\iota_Y \circ \varphi \circ \iota _{\del X}))^* \Phi _{\hat{Z}_Y}^* \hat{\lambda}_Y & \quad [\text{by functoriality of pullbacks}] \\ & = & (\id_ \R \times (\iota_Y \circ \varphi \circ \iota _{\del X}))^* e^r \hat{\lambda}_Y & \quad [\text{by \cref{lem:mosers trick,lem:liouville vf}}] \\ & = & e^r \iota _{\del X}^* \varphi^* \iota_Y^* \hat{\lambda}_Y & \quad [\text{by functoriality of pullbacks}] \\ & = & e^r \iota _{\del X}^* \varphi^* \lambda_Y & \quad [\text{by definition of $\hat{\lambda}_Y$}] \\ & = & e^r \iota _{\del X}^* \lambda_X & \quad [\text{since $\varphi$ is a Liouville embedding}] \\ & = & \iota^* _{\R \times \del X} \hat{\lambda}_X & \quad [\text{by definition of $\hat{\lambda}_X$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:liouville vector fields on completion are related} The Liouville vector fields $\hat{Z}_X$ and $\hat{Z}_Y$ are $\hat{\varphi}$-related. \end{lemma} \begin{proof} We need to show that \begin{IEEEeqnarray}{Rls+x*} Z_X \text{ is $(\iota_Y \circ \varphi)$-related to } & \hat{Z}_Y, \plabel{eq:liouville vector fields on completion are related 1} \\ \partial_r \text{ is $(\Phi_{\hat{Z}_Y} \circ (\id_ \R \times (\iota_Y \circ \varphi \circ \iota_{\partial X})))$-related to } & \hat{Z}_Y. \plabel{eq:liouville vector fields on completion are related 2} \end{IEEEeqnarray} Here, \eqref{eq:liouville vector fields on completion are related 1}, follows because $Z_X$ is $\varphi$-related to $Z_Y$. To prove \eqref{eq:liouville vector fields on completion are related 2}, notice that for every $(t,x) \in \R \times \partial X$, we have $\partial_r = (1,0) \in \R \oplus T_x \partial X$ and therefore \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\dv ( \Phi_{\hat{Z}_Y} \circ (\id_ \R \times (\iota_Y \circ \varphi \circ \iota_{\partial X})) )(t,x) (1,0)}\\ \quad & = & \dv \Phi_{\hat{Z}_Y} (t, \varphi(x)) (1, 0) & \quad [\text{by the chain rule}] \\ & = & \hat{Z}_Y(t, \varphi(x)) & \quad [\text{by definition of $\Phi_{\hat{Z}_Y}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:diagram for map on completions commutes} Diagram \eqref{eq:diagram for induced map on completions} commutes. \end{lemma} \begin{proof} We have already proven in \cref{lem:properties of completion} that the squares on the front and back commute. The first square on the bottom commutes by definition of $\hat{\varphi}$. The other two squares on the bottom commute trivially. The top square commutes because $\hat{\varphi} \circ \iota_X = \iota_Y \circ \varphi$ by definition of $\hat{\varphi}$. We prove that the right square commutes. For $(t,x) \in \R \times \hat{X}$, \begin{IEEEeqnarray*}{rCls+x*} \hat{\varphi} \circ \Phi _{\hat{Z}_X}(t,x) & = & \hat{\varphi} \circ \phi^t _{\hat{Z}_X}(x) & \quad [\text{by definition of $\Phi _{\hat{Z}_X}$}] \\ & = & \phi^t _{\hat{Z}_Y} \circ \hat{\varphi} (x) & \quad [\text{by \cref{lem:liouville vector fields on completion are related}}] \\ & = & \Phi _{\hat{Z}_Y} (t, \hat{\varphi}(x)) & \quad [\text{by definition of $\Phi _{\hat{Z}_Y}$}] \\ & = & \Phi _{\hat{Z}_Y} \circ (\id_ \R \times \hat{\varphi})(x) & \quad [\text{by definition of $\id_ \R \times \hat{\varphi}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} Finally, we check that the induced maps on the completions behave nicely with respect to compositions. \begin{proposition} \phantomsection\label{prop:completion is a functor} The operation of taking the completion is a functor. \end{proposition} \begin{proof} We show that identities are preserved. Let $(X,\lambda)$ be a Liouville domain. We wish to prove that $\widehat{\id_X} = \id _{\hat{X}} \colon \hat{X} \longrightarrow \hat{X}$, which is equivalent to \begin{IEEEeqnarray}{rCls+x*} \widehat{\id_X} \circ \iota_X & = & \id_{\hat{X}} \circ \iota_X, \plabel{eq:completion functor identity 1} \\ \widehat{\id_X} \circ \iota_{\R \times \del X} & = & \id_{\hat{X}} \circ \iota_{\R \times \del X}. \plabel{eq:completion functor identity 2} \end{IEEEeqnarray} We prove Equation \eqref{eq:completion functor identity 1}. \begin{IEEEeqnarray*}{rCls+x*} \widehat{\id_X} \circ \iota_X & = & \iota_X \circ \id_X & \quad [\text{by definition of $\widehat{\id_X}$}] \\ & = & \iota_X & \quad [\text{since $\id_X$ is the identity map}] \\ & = & \id _{\hat{X}} \circ \iota_X & \quad [\text{since $\id_{\hat{X}}$ is the identity map}]. \end{IEEEeqnarray*} We prove Equation \eqref{eq:completion functor identity 2}. \begin{IEEEeqnarray*}{rCls+x*} \widehat{\id_X} \circ \iota _{\R \times \del X} & = & \Phi_{\hat{Z}} \circ (\id_\R \times (\iota_X \circ \id_X \circ \iota_{\partial X})) & \quad [\text{by definition of $\widehat{\id_X}$}] \\ & = & \id_{\hat{X}} \circ \iota_{\R \times \del X} & \quad [\text{by \cref{lem:properties of completion}}]. \end{IEEEeqnarray*} Now, we prove that compositions are preserved. Let $(X,\lambda_X)$, $(Y,\lambda_Y)$ and $(W,\lambda_W)$ be Liouville domains and $f \colon X \longrightarrow Y$ and $g \colon Y \longrightarrow W$ be Liouville embeddings. We wish to prove that $\widehat{g \circ f} = \hat{g} \circ \hat{f}$, which is equivalent to \begin{IEEEeqnarray}{rCls+x*} \widehat{g \circ f} \circ \iota_X & = & \hat{g} \circ \hat{f} \circ \iota_X, \plabel{eq:completion functor composition 1} \\ \widehat{g \circ f} \circ \iota_{\R \times \del X} & = & \hat{g} \circ \hat{f} \circ \iota_{\R \times \del X}. \plabel{eq:completion functor composition 2} \end{IEEEeqnarray} We prove Equation \eqref{eq:completion functor composition 1}. \begin{IEEEeqnarray*}{rCls+x*} \widehat{g \circ f} \circ \iota_X & = & \iota_W \circ g \circ f & \quad [\text{by definition of $\widehat{g \circ f}$}] \\ & = & \hat{g} \circ \iota_Y \circ f & \quad [\text{by definition of $\hat{g}$}]\\ & = & \hat{g} \circ \hat{f} \circ \iota_X & \quad [\text{by definition of $\hat{f}$}]. \end{IEEEeqnarray*} We prove Equation \eqref{eq:completion functor composition 2}. \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\widehat{g \circ f} \circ \iota _{\R \times \del X}} \\ \quad & = & \Phi_{\hat{Z}_W} \circ (\id_{\R} \times (\iota_W \circ g \circ f \circ \iota_{\partial X})) & \quad [\text{by definition of $\widehat{g \circ f}$}] \\ & = & \Phi_{\hat{Z}_W} \circ (\id_{\R} \times (\hat{g} \circ \iota_Y \circ f \circ \iota_{\partial X})) & \quad [\text{by definition of $\hat{g}$}]\\ & = & \Phi_{\hat{Z}_W} \circ (\id_{\R} \times \hat{g}) \circ (\id_{\R} \times (\iota_Y \circ f \circ \iota_{\partial X})) & \\ & = & \hat{g} \circ \Phi_{\hat{Z}_Y} \circ (\id_{\R} \times (\iota_Y \circ f \circ \iota_{\partial X})) & \quad [\text{by diagram \eqref{eq:diagram for induced map on completions}}] \\ & = & \hat{g} \circ \hat{f} \circ \iota _{\R \times \del X} & \quad [\text{by definition of $\hat{f}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \chapter{Indices} \label{chp:indices} \section{Maslov indices} \label{sec:maslov indices} In this section, our goal is to define the Maslov index of a loop of symplectic matrices and the Maslov index of a loop of Lagrangian subspaces. Our presentation is based on \cite{mcduffIntroductionSymplecticTopology2017}. We start by recalling relevant facts and notation about symplectic linear algebra. Let $V$ be a finite dimensional vector space. The vector spaces $V \directsum V^*$ and $V^* \oplus V$ admit symplectic structures given by \begin{IEEEeqnarray*}{rCls+x*} \omega_{V \directsum V^*}((a,\alpha),(b,\beta)) & = & \beta(a) - \alpha(b), \\ \omega_{V^* \directsum V}((\alpha,a),(\beta,b)) & = & \alpha(b) - \beta(a). \end{IEEEeqnarray*} If $V$ has an inner product $\p{<}{}{\cdot,\cdot}$, then we define a symplectic structure on $V \directsum V$ by \begin{IEEEeqnarray}{c+x*} \plabel{eq:symplectic structure on v + v} \omega_{V \directsum V}((u,v),(x,y)) = \p{<}{}{u,y} - \p{<}{}{v,x}. \end{IEEEeqnarray} In this case, the maps \begin{IEEEeqnarray*}{rrClCrrCl} \phi \colon & V \directsum V & \longrightarrow & V \directsum V^* & \qquad & \psi \colon & V \directsum V & \longrightarrow & V^* \directsum V \\ & (x,y) & \longmapsto & (x,\p{<}{}{y,\cdot}), & & & (x,y) & \longmapsto & (\p{<}{}{x,\cdot},y) \end{IEEEeqnarray*} are isomorphisms of symplectic vector spaces. For each $n$, define the $2n \times 2n$ matrices \begin{IEEEeqnarray*}{c+x*} J_0 = \begin{bmatrix} 0 & -\idm \\ \idm & 0 \end{bmatrix}, \quad \Omega_0 = \begin{bmatrix} 0 & \idm \\ -\idm & 0 \end{bmatrix}. \end{IEEEeqnarray*} The canonical symplectic structure of $\R^{2n} = \R^n \directsum \R^n$, denoted $\omega_0$, is defined as in Equation \eqref{eq:symplectic structure on v + v} (where we use the Euclidean inner product). For $\mathbf{u} = (u,v) \in \R^{2n}$ and $\mathbf{x} = (x,y) \in \R^{2n}$, $\omega_0(\mathbf{u},\mathbf{v})$ is given by \begin{IEEEeqnarray*}{rCls+x*} \omega_0((u,v),(x,y)) & = & \p{<}{}{u,y} - \p{<}{}{v,x} \\ & = & \mathbf{u}^T \Omega_0 \mathbf{v}. \end{IEEEeqnarray*} The \textbf{symplectic group} is given by \begin{IEEEeqnarray*}{c+x*} \operatorname{Sp}(2n) \coloneqq \{ A \in \operatorname{GL}(2n,\R) \ | \ A^T \Omega_0 A = \Omega_0 \}. \end{IEEEeqnarray*} Denote by $C(S^1,\operatorname{Sp}(2n))$ the set of continuous maps from $S^1$ to $\operatorname{Sp}(2n)$, i.e. the set of loops of symplectic matrices. \begin{theorem}[{\cite[Theorem 2.2.12]{mcduffIntroductionSymplecticTopology2017}}] \phantomsection\label{thm:maslov sympl properties} There exists a unique function \begin{IEEEeqnarray*}{c+x*} \maslov \colon C(S^1,\operatorname{Sp}(2n)) \longrightarrow \Z, \end{IEEEeqnarray*} called the \emph{\textbf{Maslov index}}, which satisfies the following properties: \begin{description} \item[(Homotopy)] The Maslov index descends to an isomorphism $\maslov \colon \pi_1(\operatorname{Sp}(2n)) \longrightarrow \Z$. \item[(Product)] If $A_1,A_2 \in C(S^1, \operatorname{Sp}(2n))$ then $\maslov(A_1 A_2) = \maslov(A_1) + \maslov(A_2)$. \item[(Direct sum)] If $A_i \in C(S^1, \operatorname{Sp}(2 n_i))$ for $i=1,2$ then $\maslov(A_1 \directsum A_2) = \maslov(A_1) + \maslov(A_2)$. \item[(Normalization)] If $A \in C(S^1, \operatorname{Sp}(2))$ is given by \begin{IEEEeqnarray*}{c+x*} A(t) = \begin{bmatrix} \cos(2 \pi t) & -\sin(2 \pi t) \\ \sin(2 \pi t) & \cos(2 \pi t) \end{bmatrix} \end{IEEEeqnarray*} then $\maslov(A) = 1$. \end{description} \end{theorem} Let $(V,\omega)$ be a symplectic vector space. A subspace $W$ of $V$ is \textbf{Lagrangian} if $\dim W = 1/2 \dim V$ and $\omega|_W = 0$. The \textbf{Lagrangian Grassmannian} of $(V,\omega)$, denoted $\mathcal{L}(V,\omega)$, is the set of Lagrangian subspaces of $(V,\omega)$. Denote $\mathcal{L}(n) = \mathcal{L}(\R ^{2n},\omega_0)$. \begin{theorem}[{\cite[Theorem 2.3.7]{mcduffIntroductionSymplecticTopology2017}}] \label{thm:maslov lagrangian properties} There exists a unique function \begin{IEEEeqnarray*}{c+x*} \maslov \colon C(S^1,\mathcal{L}(n)) \longrightarrow \Z, \end{IEEEeqnarray*} called the \emph{\textbf{Maslov index}}, which satisfies the following properties: \begin{description} \item[(Homotopy)] The Maslov index descends to an isomorphism $\maslov \colon \pi_1(\mathcal{L}(n)) \longrightarrow \Z$. \item[(Product)] If $W \in C(S^1,\mathcal{L}(n))$ and $A \in C(S^1,\operatorname{Sp}(2 n))$ then $\mu(AW) = \mu(W) + 2 \mu(A)$. \item[(Direct sum)] If $W_i \in C(S^1,\mathcal{L}(n_i))$ for $i = 1,2$ then $\mu(W_1 \directsum W_2) = \mu(W_1) + \mu(W_2)$. \item[(Normalization)] If $W \in C(S^1, \mathcal{L}(n))$ is given by $W(t) = e^{\pi i t} \R \subset \C$ then $\mu(W) = 1$. \item[(Zero)] A constant loop has Maslov index zero. \end{description} \end{theorem} \section{Conley--Zehnder index} In this section we define the Conley--Zehnder index of a path of symplectic matrices. We define \begin{IEEEeqnarray*}{rCls+x*} \operatorname{Sp}^\star(2n) & \coloneqq & \{ A \in \operatorname{Sp}(2n) \ | \ \det(A - \idm) \neq 0 \}, \\ \mathrm{SP}(n) & \coloneqq & \left\{ A \colon [0,1] \longrightarrow \mathrm{Sp}(2n) \ \middle\vert \begin{array}{l} A \text{ is continuous, } \\ A(0) = \idm, \\ A(1) \in \mathrm{Sp}^{\star}(2n) \end{array} \right\}. \end{IEEEeqnarray*} The following theorem characterizes the Conley--Zehnder index of a path of symplectic matrices. Originally, this result has appeared in \cite{salamonMorseTheoryPeriodic1992} and \cite{salamonLecturesFloerHomology1999}. However, we will use a restatement from \cite{guttConleyZehnderIndex2012}. Recall that if $S$ is a symmetric matrix, its \textbf{signature}, denoted by $\signature S$, is the number of positive eigenvalues of $S$ minus the number of negative eigenvalues of $S$. \begin{theorem}[{\cite[Propositions 35 and 37]{guttConleyZehnderIndex2012}}] \phantomsection\label{thm:properties of cz} There exists a unique function \begin{IEEEeqnarray*}{c+x*} \conleyzehnder \colon \operatorname{SP}(n) \longrightarrow \Z, \end{IEEEeqnarray*} called the \emph{\textbf{Conley--Zehnder index}}, which satisfies the following properties: \begin{description} \item[(Naturality)] If $B \colon [0,1] \longrightarrow \operatorname{Sp}(2n)$ is a continuous path, then $\conleyzehnder(B A B ^{-1}) = \conleyzehnder(A)$; \item[(Homotopy)] $\conleyzehnder$ is constant on each component of $\operatorname{SP}(n)$; \item[(Zero)] If $A(s)$ has no eigenvalue on the unit circle for $s > 0$ then $\conleyzehnder(A) = 0$; \item[(Product)] If $A_i \in \operatorname{SP}(n_i)$ for $i=1,2$ then $\conleyzehnder(A_1 \directsum A_2) = \conleyzehnder(A_1) + \conleyzehnder(A_2)$; \item[(Loop)] If $B \in C(S^1, \operatorname{Sp}(2n))$ and $B(0) = B(1) = \idm$ then $\conleyzehnder(B A) = \conleyzehnder(A) + 2 \maslov(B)$. \item[(Signature)] If $S$ is a symmetric nondegenerate $2n \times 2n$-matrix with operator norm $\p{||}{}{S} < 2 \pi$ and $A(t) = \exp(J_0 S t)$, then $\conleyzehnder(A) = \frac{1}{2} \signature (S)$; \item[(Determinant)] ${n - \conleyzehnder(A)}$ is even if and only if $\det (\idm - A(1)) > 0$; \item[(Inverse)] $\conleyzehnder(A ^{-1}) = \conleyzehnder (A^T) = - \conleyzehnder(A)$. \end{description} \end{theorem} \begin{remark} By \cite[Proposition 37]{guttConleyZehnderIndex2012}, the homotopy, loop and signature properties are enough to determine the Conley--Zehnder index uniquely. \end{remark} We finish this section with a result which we will use later on to compute a Conley--Zehnder index. \begin{proposition}[{\cite[Proposition 41]{guttConleyZehnderIndex2012}}] \label{prp:gutts cz formula} Let $S$ be a symmetric, nondegenerate $2 \times 2$-matrix and $T > 0$ be such that $\exp(T J_0 S) \neq \idm$. Consider the path of symplectic matrices $A \colon [0,T] \longrightarrow \operatorname{Sp}(2)$ given by \begin{IEEEeqnarray*}{c+x*} A(t) \coloneqq \exp(t J_0 S). \end{IEEEeqnarray*} Let $a_1$ and $a_2$ be the eigenvalues of $S$ and $\signature S$ be its signature. Then, \begin{IEEEeqnarray*}{c+x*} \conleyzehnder(A) = \begin{cases} \p{}{1}{\frac{1}{2} + \p{L}{1}{\frac{\sqrt{a_1 a_2} T}{2 \pi}}} \signature S & \text{if } \signature S \neq 0, \\ 0 & \text{if } \signature S = 0. \end{cases} \end{IEEEeqnarray*} \end{proposition} \section{First Chern class} Denote by $\mathbf{Man}^2$ the category of manifolds which are $2$-dimensional, connected, compact, oriented and with empty boundary. We will give a definition of the first Chern class of a symplectic vector bundle $E \longrightarrow \Sigma$ where $\Sigma \in \mathbf{Man}^2$. Our presentation is based on \cite{mcduffIntroductionSymplecticTopology2017}. We will start by setting up some categorical language. Define a contravariant functor $\mathbf{Man}^2 \longrightarrow \mathbf{Set}$: \begin{IEEEeqnarray*}{rrCl} \mathcal{E} \colon & \mathbf{Man}^2 & \longrightarrow & \mathbf{Set} \\ & \Sigma & \longmapsto & \mathcal{E}(\Sigma) \coloneqq \{ \text{symplectic vector bundles with base $\Sigma$} \}/\sim \\ & f \downarrow & \longmapsto & \uparrow f^* \\ & \Sigma' & \longmapsto & \mathcal{E}(\Sigma') \coloneqq \{ \text{symplectic vector bundles with base $\Sigma'$} \}/\sim, \end{IEEEeqnarray*} where $\sim$ is the equivalence relation coming from isomorphisms of symplectic vector bundles. Define also the following contravariant functors $\mathbf{Man}^2 \longrightarrow \mathbf{Set}$: \begin{IEEEeqnarray*}{rrCl} H^2 \coloneqq H^2(-;\Z) \colon & \mathbf{Man}^2 & \longrightarrow & \mathbf{Set}, \\ \\ H_2^* \coloneqq \operatorname{Hom}(H_2(-;\Z),\Z) \colon & \mathbf{Man}^2 & \longrightarrow & \mathbf{Set}, \\ \\ \mathcal{Z} \colon & \mathbf{Man}^2 & \longrightarrow & \mathbf{Set} \\ & \Sigma & \longmapsto & \mathcal{Z}(\Sigma) \coloneqq \Z \\ & f \downarrow & \longmapsto & \uparrow \times \deg f \\ & \Sigma' & \longmapsto & \mathcal{Z}(\Sigma') \coloneqq \Z. \end{IEEEeqnarray*} We have a natural transformation $\alpha \colon H^2 \longrightarrow H_2^*$ which is given by \begin{IEEEeqnarray*}{rrCl} \alpha_\Sigma \colon & H^2(\Sigma;\Z) & \longrightarrow & \operatorname{Hom}(H_2(\Sigma;\Z),\Z) \\ & [\omega] & \longmapsto & \alpha_\Sigma([\omega]), \end{IEEEeqnarray*} where $\alpha_\Sigma([\omega])([\sigma]) = [\omega(\sigma)]$. By the universal coefficient theorem for cohomology (see for example \cite{rotmanIntroductionHomologicalAlgebra2009}), $\alpha_\Sigma$ is surjective. Both $H^2(\Sigma;\Z)$ and $\operatorname{Hom}(H_2(\Sigma;\Z),\Z)$ are isomorphic to $\Z$, since $\Sigma \in \mathbf{Man}^2$. Therefore, $\alpha$ is a natural isomorphism. We also have a natural isomorphism $\operatorname{ev} \colon H_2^* \longrightarrow \mathcal{Z}$, given by \begin{IEEEeqnarray*}{rrCl} \operatorname{ev}_\Sigma \colon & \operatorname{Hom}(H_2(\Sigma;\Z),\Z) & \longrightarrow & \Z \\ & \phi & \longmapsto & \phi([\Sigma]). \end{IEEEeqnarray*} As we will see, the first Chern class is a natural transformation $c_1 \colon \mathcal{E} \longrightarrow H^2$ and the first Chern number is a natural transformation (which we denote by the same symbol) $c_1 \colon \mathcal{E} \longrightarrow \mathcal{Z}$. These functors and natural transformations will all fit into the following commutative diagram: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd}[ampersand replacement = \&] \mathcal{E} \ar[r, "c_1"] \ar[rrr, bend right=50, swap, "c_1"] \& H^2 \ar[r, hook, two heads, "\alpha"] \& H_2^* \ar[r, hook, two heads, "\operatorname{ev}"] \& \mathcal{Z}. \end{tikzcd} \end{IEEEeqnarray*} Therefore, the first Chern class determines and is determined by the first Chern number. More precisely, if $E \longrightarrow \Sigma$ is a symplectic vector bundle then the first Chern number of $E$ equals the first Chern class of $E$ evaluated on $\Sigma$: \begin{IEEEeqnarray}{c+x*} \plabel{eq:first chern class vs number} c_1(E) = c_1(E)[\Sigma]. \end{IEEEeqnarray} \begin{definition}[{\cite[Section 2.7]{mcduffIntroductionSymplecticTopology2017}}] \label{def:c1} Let $\Sigma \in \mathbf{Man}^2$ (i.e. $\Sigma$ is $2$-dimensional, connected, compact, oriented, with empty boundary) and $E \longrightarrow \Sigma$ be a symplectic vector bundle. We define the \textbf{first Chern number} of $E$, $c_1(E) \in \Z$, as follows. Choose embedded $0$-codimensional manifolds $\Sigma_1$ and $\Sigma_2$ of $\Sigma$ such that \begin{IEEEeqnarray*}{c+x*} S \coloneqq \del \Sigma_1 = \del \Sigma_2 = \Sigma_1 \cap \Sigma_2 \end{IEEEeqnarray*} and $\Sigma$ is the gluing of $\Sigma_1$ and $\Sigma_2$ along $S$. Orient $S$ as the boundary of $\Sigma_1$. For $i=1,2$, denote by $\iota_i \colon \Sigma_i \longrightarrow \Sigma$ the inclusion and choose a symplectic trivialization \begin{IEEEeqnarray*}{c+x*} \tau^i \colon \iota_i^* E \longrightarrow \Sigma_i \times \R ^{2n}. \end{IEEEeqnarray*} Define the overlap map $A \colon S \longrightarrow \operatorname{Sp}(2n)$ by $A(x) = \tau^1_x \circ (\tau^2_x)^{-1}$. Denote by $S_1, \ldots, S_k$ the connected components of $S$ and parametrize each component by a loop $\gamma_i \colon S^1 \longrightarrow S_i$ such that $\dot{\gamma}_i(t)$ is positively oriented. Finally, let \begin{IEEEeqnarray*}{c+x*} c_1(E) \coloneqq \sum_{i=1}^{k} \mu(A \circ \gamma_i), \end{IEEEeqnarray*} where $\mu$ is the Maslov index as in \cref{thm:maslov sympl properties}. \end{definition} \begin{theorem}[{\cite[Theorem 2.7.1]{mcduffIntroductionSymplecticTopology2017}}] The first Chern number is well-defined and it is the unique natural transformation $c_1 \colon \mathcal{E} \longrightarrow \mathcal{Z}$ which satisfies the following properties: \begin{description} \item[(Classification)] If $E, E' \in \mathcal{E}(\Sigma)$ then $E$ and $E'$ are isomorphic if and only if $\operatorname{rank} E = \operatorname{rank} E'$ and $c_1(E) = c_1(E')$. \item[(Naturality)] If $f \colon \Sigma \longrightarrow \Sigma'$ is a smooth map and $E \in \mathcal{E}(\Sigma)$ then $c_1(f^*E) = \deg(f) c_1(E)$. \item[(Additivity)] If $E, E' \in \mathcal{E}(\Sigma)$ then $c_1(E \directsum E') = c_1(E) + c_1(E')$. \item[(Normalization)] The first Chern number of $T \Sigma$ is $c_1(T\Sigma) = 2 - 2g$. \end{description} \end{theorem} \section{Conley--Zehnder index of a periodic orbit} Let $(X,\omega)$ be a symplectic manifold of dimension $2n$ and $H \colon S^1 \times X \longrightarrow \R$ be a time-dependent Hamiltonian. For each $t \in S^1$ we denote by $H_t$ the map $H_t = H(t,\cdot) \colon X \longrightarrow \R$. The Hamiltonian $H$ has a corresponding time-dependent Hamiltonian vector field $X_H$ which is uniquely determined by \begin{IEEEeqnarray*}{c+x*} \edv H_t = - \iota_{X_{H_t}} \omega. \end{IEEEeqnarray*} We denote by $\phi^t_{X_H}$ the time-dependent flow of $X_{H}$. \begin{definition} \label{def:orbit of hamiltonian} A \textbf{$1$-periodic orbit} of $H$ is a map $\gamma \colon S^1 \longrightarrow X$ such that \begin{IEEEeqnarray*}{c+x*} \dot{\gamma}(t) = X_{H_t} (\gamma(t)) \end{IEEEeqnarray*} for every $t \in S^1$. If $\lambda$ is a symplectic potential for $(X,\omega)$, then the \textbf{action} of $\gamma$ is \begin{IEEEeqnarray*}{c+x*} \mathcal{A}_H(\gamma) \coloneqq \int_{S^1}^{} \gamma^* \lambda - \int_{S^1}^{} H(t, \gamma(t)) \edv t. \end{IEEEeqnarray*} \end{definition} \begin{definition} \label{def:nondegenerate hamiltonian orbit} Let $\gamma$ be a $1$-periodic orbit of $H$. We say that $\gamma$ is \textbf{nondegenerate} if the linear map \begin{IEEEeqnarray*}{c+x*} \dv \phi^{1}_{X_H} \colon T_{\gamma(0)} X \longrightarrow T_{\gamma(1)} X = T_{\gamma(0)} X \end{IEEEeqnarray*} does not have $1$ as an eigenvalue. We say that the Hamiltonian $H$ is \textbf{nondegenerate} if every $1$-periodic orbit of $H$ is nondegenerate. \end{definition} \begin{definition} \phantomsection\label{def:cz of hamiltonian orbit wrt trivialization} Let $\gamma$ be a $1$-periodic orbit of $H$ and $\tau$ be a symplectic trivialization of $\gamma^* TX$. We define the \textbf{Conley--Zehnder index} of $\gamma$ with respect to $\tau$, denoted $\conleyzehnder^{\tau}(\gamma)$, as follows. First, define a path of symplectic matrices $A^{\gamma,\tau} \colon [0,1] \longrightarrow \operatorname{Sp}(2n)$ by the equation $A^{\gamma,\tau}(t) \coloneqq \tau_t \circ \dv \phi^t_{X_H}(\gamma(0)) \circ \tau_{0}^{-1}$. In other words, $A^{\gamma,\tau}(t)$ is the unique linear map such that the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} T_{\gamma(0)} X \ar[d, swap, "\dv \phi^t_{X_{H}}(\gamma(0))"] \ar[r, "\tau_0"] & \R^{2n} \ar[d, "A^{\gamma,\tau}(t)"] \\ T_{\gamma(t)} \ar[r, swap, "\tau_t"] & \R^{2n} \end{tikzcd} \end{IEEEeqnarray*} commutes. Notice that since $\gamma$ is nondegenerate, $A^{\gamma,\tau} \in \operatorname{SP}(n)$. Then, define \begin{IEEEeqnarray*}{c+x*} \conleyzehnder^{\tau}(\gamma) \coloneqq \conleyzehnder(A^{\gamma,\tau}). \end{IEEEeqnarray*} \end{definition} Let $D = \{ z \in \C \mid |z| \leq 1 \}$ be the disk and denote by $\iota_{D,S^1} \colon S^1 \longrightarrow D$ the inclusion on the boundary, i.e. $\iota_{D,S^1}(t) = e^{2 \pi i t}$. \begin{lemma} \label{lem:cz of hamiltonian is independent of triv over filling disk} Let $\gamma$ be a $1$-periodic orbit of $H$. For $i = 1,2$, let $u_i \colon D \longrightarrow X$ be a filling disk for $\gamma$ (i.e. $\gamma = u_i \circ \iota_{D,S^1}$) and $\tau^i$ be a symplectic trivialization of $u_i^* TX$. If $c_1(TX)|_{\pi_2(X)} = 0$, then \begin{IEEEeqnarray*}{c+x*} \conleyzehnder^{\tau^1}(\gamma) = \conleyzehnder^{\tau^2}(\gamma). \end{IEEEeqnarray*} \end{lemma} \begin{proof} Consider the diagram \begin{IEEEeqnarray}{c+x*} \plabel{eq:diagram cz indep choices} \begin{tikzcd} \R^{2n} \ar[d, swap, "A^{\gamma,\tau^1}(t)"] & T_{\gamma(0)} X \ar[d, "\dv \phi^t_{X_H}(\gamma(0))"] \ar[l, swap, "\tau^1_0"] \ar[r, "\tau^2_0"] & \R ^{2n} \ar[ll, bend right=50, swap, "B(0)"] \ar[d, "A^{\gamma,\tau^2}(t)"] \\ \R^{2n} & T_{\gamma(t)} X \ar[l, "\tau^1_t"] \ar[r, swap, "\tau^2_t"] & \R ^{2n} \ar[ll, bend left=50, "B(t)"] \\ \end{tikzcd} \end{IEEEeqnarray} where we have defined $B(t) \coloneqq \tau^1_t \circ (\tau^2_t)^{-1}$. Let $\sigma \colon S^2 \longrightarrow X$ be the gluing of the disks $u_1$ and $u_2$ along their common boundary $\gamma$. Then, \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\conleyzehnder^{\tau^1}(\gamma) - \conleyzehnder^{\tau^2}(\gamma)}\\ \quad & = & \conleyzehnder(A^{\gamma,\tau^1}) - \conleyzehnder(A^{\gamma,\tau^2}) & \quad [\text{by \cref{def:cz of hamiltonian orbit wrt trivialization}}]\\ & = & \conleyzehnder(B A^{\gamma,\tau^2} B(0)^{-1}) - \conleyzehnder(A^{\gamma,\tau^2}) & \quad [\text{by diagram \eqref{eq:diagram cz indep choices}}] \\ & = & \conleyzehnder(B(0)^{-1} B A^{\gamma,\tau^2}) - \conleyzehnder(A^{\gamma,\tau^2}) & \quad [\text{by naturality of $\conleyzehnder$}] \\ & = & 2 \mu(B(0)^{-1} B) & \quad [\text{by the loop property of $\conleyzehnder$}] \\ & = & 2 \mu(B) & \quad [\text{by homotopy invariance of $\maslov$}] \\ & = & 2 c_1(\sigma^* TX) & \quad [\text{by definition of the first Chern number}] \\ & = & 2 c_1 (TX) ([\sigma]) & \quad [\text{by Equation \eqref{eq:first chern class vs number}}] \\ & = & 0 & \quad [\text{by assumption}]. & \qedhere \end{IEEEeqnarray*} \end{proof} Let $(M,\alpha)$ be a contact manifold of dimension $2n + 1$ with Reeb vector field $R$. Our goal is to repeat the discussion of the first part of this section in the context of periodic orbits of $R$. \begin{definition} A \textbf{Reeb orbit} is a map $\gamma \colon \R / T \Z \longrightarrow M$ such that \begin{IEEEeqnarray*}{c+x*} \dot{\gamma}(t) = R(\gamma(t)) \end{IEEEeqnarray*} for every $t \in S^1$. In this case, we call $T$ the \textbf{period} of $\gamma$. The \textbf{multiplicity} of $\gamma$, which we will usually denote by $m$, is the degree of the map $\gamma \colon \R / T \Z \longrightarrow \img \gamma$. The \textbf{action} of $\gamma$ is \begin{IEEEeqnarray*}{c+x*} \mathcal{A}(\gamma) \coloneqq \int_{0}^{T} \gamma^* \lambda = T. \end{IEEEeqnarray*} \end{definition} \begin{remark} Alternatively, a $T$-periodic Reeb orbit can be seen as a map $\gamma \colon S^1 \longrightarrow M$ such that $\dot{\gamma}(t) = T R(\gamma(t))$. We will use the two possible descriptions interchangeably. \end{remark} Since $\ldv{R} \alpha = 0$ (by \cref{lem:reeb vf preserves contact form}) and using \cref{lem:mosers trick}, we conclude that $(\phi^t_R)^* \alpha = \alpha$. In particular, $\dv \phi^t_R(p) (\xi_p) \subset \xi_{\phi^t_R(p)}$ and \begin{IEEEeqnarray*}{c+x*} \dv \phi^t_R(p) \colon \xi_p \longrightarrow \xi_{\phi^t_R(p)} \end{IEEEeqnarray*} is a symplectic linear map. \begin{definition} A Reeb orbit $\gamma$ of $M$ is \textbf{nondegenerate} if the linear map \begin{IEEEeqnarray*}{c+x*} \dv \phi^1_R(\gamma(0)) \colon \xi_{\gamma(0)} \longrightarrow \xi_{\gamma(1)} = \xi_{\gamma(0)} \end{IEEEeqnarray*} does not have $1$ as an eigenvalue. We say that $(M, \alpha)$ is \textbf{nondegenerate} if every Reeb orbit in $M$ is nondegenerate. If $(X, \lambda)$ is a Liouville domain, then $(X, \lambda)$ is \textbf{nondegenerate} if $(\partial X, \lambda|_{\partial X})$ is nondegenerate. \end{definition} \begin{definition} \label{def:cz of reeb orbit wrt trivialization} Let $\gamma$ be a periodic orbit of $R$ and $\tau$ be a symplectic trivialization of $\gamma^* \xi$. The \textbf{Conley--Zehnder index} of $\gamma$ is given by \begin{IEEEeqnarray*}{c+x*} \conleyzehnder^{\tau}(\gamma) \coloneqq \conleyzehnder(A^{\gamma,\tau}), \end{IEEEeqnarray*} where $A^{\gamma,\tau} \colon [0,1] \longrightarrow \operatorname{Sp}(2n)$ is the path of symplectic matrices given by the equation $A^{\gamma,\tau}(t) \coloneqq \tau_t \circ \dv \phi^t_{R}(\gamma(0)) \circ \tau_{0}^{-1}$. \end{definition} \begin{lemma} \label{lem:cz of reeb is independent of triv over filling disk} Let $(X, \lambda)$ be a Liouville domain and $\gamma \colon S^1 \longrightarrow \partial X$ be a Reeb orbit. For $i = 1,2$, let $u_i \colon D \longrightarrow X$ be a filling disk for $\gamma$ (i.e. $\iota_{X,\partial X} \circ \gamma = u_i \circ \iota_{D,S^1}$). Let $\tau^i$ be a symplectic trivialization of $u_i^* TX$ and denote also by $\tau^i$ the induced trivialization of $(\iota_{X,\partial X} \circ \gamma)^* TX$. Assume that \begin{IEEEeqnarray*}{rClCl} \tau^i_{t}(Z_{\gamma(t)}) & = & e_1 & \in & \R^{2n}, \\ \tau^i_{t}(R_{\gamma(t)}) & = & e_{n+1} & \in & \R^{2n}, \end{IEEEeqnarray*} for every $t \in S^1$. If $2 c_1(TX) = 0$, then \begin{IEEEeqnarray*}{c+x*} \conleyzehnder^{\tau^1}(\gamma) = \conleyzehnder^{\tau^2}(\gamma). \end{IEEEeqnarray*} \end{lemma} \begin{proof} By the assumptions on $\tau^i$, the diagram \begin{IEEEeqnarray}{c+x*} \plabel{eq:diagram cz reeb indep triv} \begin{tikzcd} \xi_{\gamma(t)} \ar[r] \ar[d, swap, "\tau^i_t"] & T_{\gamma(t)} X \ar[d, "\tau^i_t"] & \xi^{\perp}_{\gamma(t)} \ar[d, "\tau^i_t"] \ar[l] \\ \R^{2n-2} \ar[r, swap, "\iota_{\R^{2n-2}}"] & \R^{2n} & \R^{2} \ar[l, "\iota_{\R^{2}}"] \end{tikzcd} \end{IEEEeqnarray} commutes, where \begin{IEEEeqnarray*}{rCls+x*} \iota_{\R^{2n-2}}(x^2,\ldots,x^n,y^2,\ldots,y^n) & = & (0,x^2,\ldots,x^n,0,y^2,\ldots,y^n), \\ \iota_{\R^{2}}(x,y) & = & (x,0,\ldots,0,y,0,\ldots,0). \end{IEEEeqnarray*} Define \begin{IEEEeqnarray*}{rCcCrCl} B^{2n}(t) & \coloneqq & \tau^1_t \circ (\tau^2_t)^{-1} & \colon & \R^{2n} & \longrightarrow & \R^{2n}, \\ B^{2n-2}(t) & \coloneqq & \tau^1_t \circ (\tau^2_t)^{-1} & \colon & \R^{2n-2} & \longrightarrow & \R^{2n-2}, \end{IEEEeqnarray*} By the assumptions on $\tau^i$, and diagram \eqref{eq:diagram cz reeb indep triv}, \begin{IEEEeqnarray}{c+x*} \plabel{eq:decomposition of b} B^{2n}(t) = \begin{bmatrix} \id_{\R^2} & 0 \\ 0 & B^{2n-2} \end{bmatrix}. \end{IEEEeqnarray} Let $\sigma \colon S^2 \longrightarrow X$ be the gluing of the disks $u_1$ and $u_2$ along their common boundary $\gamma$. Finally, we compute \begin{IEEEeqnarray*}{rCls+x*} \conleyzehnder^{\tau^1}(\gamma) - \conleyzehnder^{\tau^2}(\gamma) & = & 2 \mu (B^{2n-2}) & \quad [\text{by the same computation as in \cref{lem:cz of hamiltonian is independent of triv over filling disk}}] \\ & = & 2 \mu (B^{2n}) & \quad [\text{by Equation \eqref{eq:decomposition of b} and \cref{thm:maslov sympl properties}}] \\ & = & 2 c_1(\sigma^* TX) & \quad [\text{by definition of first Chern class}] \\ & = & 0 & \quad [\text{by assumption}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{remark} \label{rmk:notation for tuples of orbits} Suppose that $\Gamma = (\gamma_1, \ldots, \gamma_p)$ is a tuple of (Hamiltonian or Reeb) orbits and $\tau$ is a trivialization of the relevant symplectic vector bundle over each orbit. We will frequently use the following notation: \begin{IEEEeqnarray*}{rCls+x*} \mathcal{A}(\Gamma) & \coloneqq & \sum_{i=1}^{p} \mathcal{A}(\gamma_i), \\ \conleyzehnder^{\tau}(\Gamma) & \coloneqq & \sum_{i=1}^{p} \conleyzehnder^{\tau}(\gamma_i). \end{IEEEeqnarray*} If $\beta = \sum_{i=1}^{m} a_i \Gamma_i$ is a formal linear combination of tuples of orbits, then we denote \begin{IEEEeqnarray*}{c+x*} \mathcal{A}(\beta) \coloneqq \max_{i = 1, \ldots, m} \mathcal{A}(\Gamma_i). \end{IEEEeqnarray*} The action of a formal linear combination is going to be relevant only in \cref{chp:contact homology}, where we will consider the action filtration on linearized contact homology. \end{remark} \section{Periodic Reeb orbits in a unit cotangent bundle} Let $(L, g)$ be an orientable Riemannian manifold of dimension $n$. Recall that $L$ has a cotangent bundle $\pi \colon T^* L \longrightarrow L$, which is an exact symplectic manifold with symplectic potential $\lambda \in \Omega^1(T^* L)$, symplectic form $\omega \coloneqq \edv \lambda$ and Liouville vector field $Z$ given by $\iota_Z \omega = \lambda$. We will denote by $z \colon L \longrightarrow T^*L$ the zero section. Consider the unit cotangent bundle $\pi \colon S^* L \longrightarrow L$ and denote by $\iota \colon S^* L \longrightarrow L$ the inclusion. Then, $\alpha \coloneqq \iota^* \lambda$ is a contact form on $S^* L$, with associated contact distribution $\xi = \ker \alpha \subset T S^* L$ and Reeb vector field $R \in \mathfrak{X}(S^* L)$. The Riemannian metric $g$ defines a vector bundle isomorphism $\tilde{g} \colon TL \longrightarrow T^*L$ given by $\tilde{g}(v) = g(v, \cdot)$. Let $\ell > 0$ and $c \colon \R / \ell \Z \longrightarrow L$ be a curve which is parametrized by arclength. Define $\gamma \coloneqq \tilde{g} \circ \dot{c} \colon \R / \ell \Z \longrightarrow S^* L$. Then, by \cref{thm:flow geodesic vs hamiltonian,thm:flow reeb vs hamiltonian}, the curve $c$ is a geodesic (of length $\ell$) if and only if $\gamma$ is a Reeb orbit (of period $\ell$). We will assume that this is the case. The goal of this section is to study specific sets of trivializations and maps between these sets (see diagram \eqref{eq:diagram of maps of trivializations}), which can be used to define the Conley--Zehnder index of $\gamma$ (see \cref{thm:index of geodesic or reeb orbit isometric triv}). Since $T^* L$ is a symplectic manifold, $T T^* L \longrightarrow T^* L$ is a symplectic vector bundle. The hyperplane distribution $\xi$ is a symplectic subbundle of $\iota^* T T^* L \longrightarrow S^* L$. We can consider the symplectic complement of $\xi$, which by \cref{lem:decomposition coming from contact hypersurface} is given by \begin{IEEEeqnarray*}{c+x*} \xi^{\perp}_{u} = \p{<}{}{Z_u} \oplus \p{<}{}{R_u} \end{IEEEeqnarray*} for every $u \in S^* L$. Finally, $T^* L \oplus T L \longrightarrow L$ is a symplectic vector bundle, with symplectic structure given by \begin{IEEEeqnarray*}{c+x*} \omega_{T^* L \oplus TL}((u,v), (x,y)) = u(y) - x(v). \end{IEEEeqnarray*} \begin{remark} \label{rmk:connections} Let $\pi \colon E \longrightarrow B$ be a vector bundle. Consider the vector bundles $\pi^* E$, $TE$ and $\pi^* TB$ over $E$. There is a short exact sequence \begin{IEEEeqnarray*}{c+x*} \phantomsection\label{eq:short exact sequence of vector bundles} \begin{tikzcd} 0 \ar[r] & \pi^* E \ar[r, "I^V"] & TE \ar[r, "P^H"] & \pi^* T B \ar[r] & 0 \end{tikzcd} \end{IEEEeqnarray*} of vector bundles over $E$, where \begin{IEEEeqnarray*}{rClCrClCl} I^V_e & \coloneqq & \dv \iota_e(e) & \colon & E_{\pi(e)} & \longrightarrow & T_e E, & \quad & \text{where } \iota_e \colon E_{\pi(e)} \longrightarrow E \text{ is the inclusion,} \\ P^H_e & \coloneqq & \dv \parbox{\widthof{$\iota_e$}}{$\pi$} (e) & \colon & T_e E & \longrightarrow & T_{\pi(e)} B, \end{IEEEeqnarray*} for every $e \in E$. Recall that a \textbf{Koszul connection} on $E$ is a map \begin{IEEEeqnarray*}{c+x*} \nabla \colon \mathfrak{X}(B) \times \Gamma(E) \longrightarrow \Gamma(E) \end{IEEEeqnarray*} which is $C^{\infty}$-linear on $\mathfrak{X}(B)$ and satisfies the Leibniz rule on $\Gamma(E)$. A \textbf{linear Ehresmann connection} on $E$ is a vector bundle map $P^V \colon TE \longrightarrow \pi^* E$ such that $P^V \circ I^V = \id_{\pi^* TB}$ and $P^V \circ T m_{\lambda} = m_{\lambda} \circ P^V$ for every $\lambda \in \R$, where $m_{\lambda} \colon E \longrightarrow E$ is the map which multiplies by $\lambda$. The sets of Koszul connections on $E$ and of linear Ehresmann connections on $E$ are in bijection. If $\nabla$ is a Koszul connection on $E$, the corresponding linear Ehresmann connection is given as follows. Let $I^H \colon \pi^* TB \longrightarrow TE$ be the map which is given by \begin{IEEEeqnarray*}{c+x*} I^H_e(u) \coloneqq \dv s (\pi(e)) u - I^V_e(\nabla_u^{} s) \end{IEEEeqnarray*} for every $e \in E$ and $u \in T_{\pi(e)} B$, where $s$ in any choice of section of $\pi \colon E \longrightarrow B$ such that $s(\pi(e)) = e$. The map $I^H$ is independent of the choice of section $s$ and satisfies $P^H \circ I^H = \id_{\pi^* TB}$. Let $P^V \colon TE \longrightarrow \pi^* E$ be the map which is given by \begin{IEEEeqnarray*}{c+x*} P^V_e(w) \coloneqq (I^V_e)^{-1} (w - I^H_e \circ P^H_e (w)) \end{IEEEeqnarray*} for every $e \in E$ and $w \in T_e E$. We point out that this definition is well-posed, since $w - I^H_e \circ P^H_e (w) \in \ker P^H_e = \img I^V_e$. As before, $P^V \circ I^V = \id_{\pi^* E}$. Finally, the maps \begin{IEEEeqnarray*}{rCrCrCl} I & \coloneqq & I^V & \oplus & I^H & \colon & \pi^* E \oplus \pi^* T B \longrightarrow TE, \\ P & \coloneqq & P^V & \times & P^H & \colon & TE \longrightarrow \pi^* E \oplus \pi^* T B, \end{IEEEeqnarray*} are isomorphisms and inverses of one another. \end{remark} Consider the Levi-Civita connection on $L$, which is a Koszul connection on $T L$. There is an induced Koszul connection on $T^* L$ given by \begin{IEEEeqnarray*}{c+x*} (\nabla_X \beta)(Y) \coloneqq X(\beta(Y)) - \beta(\nabla_X Y), \end{IEEEeqnarray*} for every $X, Y \in \mathfrak{X}(L)$ and $\beta \in \Gamma(T^* L) = \Omega^1(L)$. By \cref{rmk:connections} (with $B = L$ and $E = T^*L$), there is an induced linear Ehresmann connection on $\pi \colon T^*L \longrightarrow L$ which is given by maps \begin{IEEEeqnarray*}{rCrCrCl} I & \coloneqq & I^V & \oplus & I^H & \colon & \pi^* T^* L \oplus \pi^* T L \longrightarrow T T^* L, \\ P & \coloneqq & P^V & \times & P^H & \colon & T T^* L \longrightarrow \pi^* T^* L \oplus \pi^* T L. \end{IEEEeqnarray*} \begin{lemma} \label{prop:properties of p} The maps $I$ and $P$ are isomorphisms of symplectic vector bundles. Moreover, \begin{IEEEeqnarray}{rClCl} P(Z_u) & = & (u,0), & \quad & \text{ for every } u \in T^* L, \plabel{eq:p of vfs 1} \\ P(R_u) & = & (0,\tilde{g}^{-1}(u)), & \quad & \text{ for every } u \in S^* L. \plabel{eq:p of vfs 2} \end{IEEEeqnarray} \end{lemma} \begin{proof} Let $q \coloneqq \pi(u)$ and choose normal coordinates $(q^1,\ldots,q^n)$ on $L$ centred at $q$ (this means that with respect to these coordinates, $g_{ij}(q) = \delta_{ij}$ and $\partial_k g_{ij} (q) = 0$). Let $(q^1, \ldots, q^n, p_1, \ldots, p_n)$ be the induced coordinates on $T^* L$. Then, the vector spaces $T_u T^*L$ and $T^*_q L \directsum T_q L$ have the following symplectic bases: \begin{IEEEeqnarray}{rCls+x*} T_ u T^*L & = & \spn \p{c}{2}{ \pdv{}{p_1}\Big|_{u}, \cdots, \pdv{}{p_n}\Big|_{u}, \pdv{}{q^1}\Big|_{u}, \cdots, \pdv{}{q^n}\Big|_{u} }, \plabel{eq:basis 1} \\ T^*_q L \directsum T_q L & = & \spn \p{c}{1}{ \edv q^1|_q, \ldots, \edv q^n|_q } \directsum \spn \p{c}{2}{ \pdv{}{q^1}\Big|_{q}, \cdots, \pdv{}{q^n}\Big|_{q} }. \plabel{eq:basis 2} \end{IEEEeqnarray} By the definitions of $P$ and $I$ in \cref{rmk:connections}, we have \begin{IEEEeqnarray}{rCls+x*} I^V_u (\edv q^i|_q) & = & \pdv{}{p_i}\Big|_u, \IEEEnonumber\\ P^H_u \p{}{2}{ \pdv{}{q^i}\Big|_{u} } & = & \pdv{}{q^i}\Big|_{q}, \plabel{eq:p horizontal in coordinates} \\ P^V_u \p{}{2}{ \pdv{}{p_i}\Big|_{u} } & = & P^V_u \circ I^V_u (\edv q^i|_{q}) = \edv q^i|_q, \plabel{eq:p vertical in coordinates} \end{IEEEeqnarray} which implies that $P$ is the identity matrix when written with respect to the bases \eqref{eq:basis 1} and \eqref{eq:basis 2}. Since these bases are symplectic, $P$ is a symplectic linear map. With respect to the coordinates $(q^1, \ldots, q^n, p_1, \ldots, p_n)$, the Liouville vector field is given by \begin{IEEEeqnarray}{c+x*} Z = \sum_{i=1}^{n} p_i \pdv{}{p_i}. \plabel{eq:liouville vector field in coordinates} \end{IEEEeqnarray} By \cref{thm:flow reeb vs hamiltonian} and Equation \eqref{eq:hamiltonian vector field in coordinates}, and since the coordinates are normal, the Reeb vector field is given by \begin{IEEEeqnarray}{rCl} R_u & = & \sum_{i=1}^{n} p_i(u) \pdv{}{q^i}\Big|_{u}. \plabel{eq:reeb vector field in coordinates} \end{IEEEeqnarray} Equations \eqref{eq:liouville vector field in coordinates} and \eqref{eq:reeb vector field in coordinates} together with equations \eqref{eq:p horizontal in coordinates} and \eqref{eq:p vertical in coordinates} imply Equations \eqref{eq:p of vfs 1} and \eqref{eq:p of vfs 2}. \end{proof} Define \begin{IEEEeqnarray*}{rCls+x*} \mathcal{T}(c^* TL) & \coloneqq & \left\{ \kappa \ \middle\vert \begin{array}{l} \kappa \text{ is an isometric trivialization of } c^* TL \\ \text{such that } \kappa_t (\dot{c}(t)) = e_1 \in \R^n \text{ for every } t \in \R / \ell \Z \end{array} \right\}, \\ \mathcal{T}(\gamma^* \xi) & \coloneqq & \{ \tau \mid \tau \text{ is a symplectic trivialization of } \gamma^* \xi \}, \\ \mathcal{T}((z \circ c)^* T T^* L) & \coloneqq & \{ \sigma \mid \sigma \text{ is a symplectic trivialization of } (z \circ c)^* T T^* L \}. \end{IEEEeqnarray*} We will define maps $\tau$, $\sigma_0$ and $\sigma$ (see \cref{def:map of trivializations tau,def:map of trivializations sigma 0,def:map of trivializations sigma}) which fit into the following diagram. \begin{IEEEeqnarray}{c+x*} \plabel{eq:diagram of maps of trivializations} \begin{tikzcd} \mathcal{T}(c^* TL) \ar[d, swap, "\tau"] \ar[dr, "\sigma"] \\ \mathcal{T}(\gamma^* \xi) \ar[r, swap, "\sigma_0"] & \mathcal{T}((z \circ c)^* T T^* L) \end{tikzcd} \end{IEEEeqnarray} We will check that this diagram commutes in \cref{lem:diagram of maps of trivalizations commutes}. Consider the following diagram of symplectic vector spaces and symplectic linear maps. \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \xi_{\gamma(t)}^{} \ar[r, "\iota_{\xi_{\gamma(t)}}"] & \xi^{\perp}_{\gamma(t)} \oplus \xi_{\gamma(t)}^{} \ar[r, equals] & T_{\gamma(t)}^{} T^* L \ar[r, "P_{\gamma(t)}"] & T^*_{c(t)} L \oplus T_{c(t)}^{} L & T_{z \circ c(t)}^{} T^* L \ar[l, swap, "P_{z \circ c(t)}"] \end{tikzcd} \end{IEEEeqnarray*} We now define the maps $\tau$, $\sigma_0$ and $\sigma$. \begin{definition} \phantomsection\label{def:map of trivializations tau} For every $\kappa \in \mathcal{T}(c^* TL)$, we define $\tau(\kappa) \in \mathcal{T}(\gamma^* \xi)$ by \begin{IEEEeqnarray*}{c+x*} \tau(\kappa)_t \coloneqq \pi_{\R^{2n-2}} \circ \tilde{\kappa}_t \circ P_{\gamma(t)} \circ \iota_{\xi_{\gamma(t)}}, \end{IEEEeqnarray*} where $\tilde{\kappa}_t \colon T^*_{c(t)} L \oplus T_{c(t)}^{} L \longrightarrow \R^n \oplus \R^n$ and $\pi_{\R^{2n-2}} \colon \R^{2n} \longrightarrow \R^{2n-2}$ are given by \begin{IEEEeqnarray*}{rCl} \tilde{\kappa}_t(u,v) & \coloneqq & (\kappa_t \circ \tilde{g}^{-1}_{c(t)}(u), \kappa_t(v)), \\ \pi_{\R^{2n-2}}(x^1,\ldots,x^n,y^1,\ldots,y^n) & \coloneqq & (x^2,\ldots,x^n,y^2,\ldots,y^n). \end{IEEEeqnarray*} \end{definition} For \cref{def:map of trivializations tau} to be well-posed, we need $\tilde{\kappa}_t$ to be a symplectic linear map. We check this in \cref{lem:kappa tl is symplectic} below. \begin{definition} \phantomsection\label{def:map of trivializations sigma 0} For every $\tau \in \mathcal{T}(\gamma^* \xi)$, we define $\sigma_0(\tau) \in \mathcal{T}((z \circ c)^* T T^*L)$ by \begin{IEEEeqnarray*}{c+x*} \sigma_0 (\tau)_t \coloneqq \tilde{\tau}_t \circ P^{-1}_{\gamma(t)} \circ P_{z \circ c(t)}, \end{IEEEeqnarray*} where $\tilde{\tau}_t \colon \xi^{\perp}_{\gamma(t)} \oplus \xi_{\gamma(t)}^{} \longrightarrow \R^{2n}$ is the symplectic linear map given by \begin{IEEEeqnarray*}{rCls+x*} \tilde{\tau}_t (Z_{\gamma(t)}) & = & e_1, \\ \tilde{\tau}_t (R_{\gamma(t)}) & = & e_{n+1}, \\ \tilde{\tau}_t (v) & = & \iota_{\R^{2n-2}} \circ \tau_t(v), \quad \text{for every } v \in \xi_{\gamma(t)}, \end{IEEEeqnarray*} and $\iota_{\R^{2n-2}} \colon \R^{2n-2} \longrightarrow \R^{2n}$ is given by \begin{IEEEeqnarray*}{c+x*} \iota_{\R^{2n-2}}(x^2,\ldots,x^n,y^2,\ldots,y^n) = (0,x^2,\ldots,x^n,0,y^2,\ldots,y^n). \end{IEEEeqnarray*} \end{definition} \begin{definition} \label{def:map of trivializations sigma} For every $\kappa \in \mathcal{T}(c^* TL)$, we define $\sigma(\kappa) \in \mathcal{T}((z \circ c)^* T T^*L)$ by \begin{IEEEeqnarray*}{c+x*} \sigma(\kappa)_t \coloneqq \tilde{\kappa}_t \circ P_{z \circ c(t)}. \end{IEEEeqnarray*} \end{definition} \begin{lemma} \label{lem:kappa tl is symplectic} The map $\tilde{\kappa}_t$ from \cref{def:map of trivializations tau,def:map of trivializations sigma} is symplectic. \end{lemma} \begin{proof} For $(u,v), (x,y) \in T^*_{c(t)} L \oplus T_{c(t)}^{} L$, we have \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\omega_{\R^n \oplus \R^n} \p{}{1}{ \tilde{\kappa}_t \p{}{}{u,v}, \tilde{\kappa}_t \p{}{}{x,y} } }\\ \ & = & \omega_{\R^n \oplus \R^n} \p{}{1}{ \p{}{1}{ \kappa_t \circ \tilde{g}_{c(t)}^{-1} (u), \kappa_t (v)}, \p{}{1}{ \kappa_t \circ \tilde{g}_{c(t)}^{-1} (x), \kappa_t (y)} } & \quad [\text{by definition of $\tilde{\kappa}_t$}] \\ & = & \p{<}{1}{ \kappa_t \circ \tilde{g}_{c(t)}^{-1} (u), \kappa_t (y) }_{\R^n} - \p{<}{1}{ \kappa_t \circ \tilde{g}_{c(t)}^{-1} (x), \kappa_t (v) }_{\R^n} & \quad [\text{by definition of $\omega_{\R^n \oplus \R^n}$}] \\ & = & \p{<}{1}{ \tilde{g}_{c(t)}^{-1} (u), y }_{TL} - \p{<}{1}{ \tilde{g}_{c(t)}^{-1} (x), v }_{TL} & \quad [\text{since $\kappa_t$ is an isometry}] \\ & = & u(y) - x(v) & \quad [\text{by definition of $\tilde{g}$}] \\ & = & \omega_{T^*L \oplus TL} \p{}{1}{(u,v),(x,y)} & \quad [\text{by definition of $\omega_{T^*L \oplus TL}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:diagram of maps of trivalizations commutes} Diagram \eqref{eq:diagram of maps of trivializations} commutes, i.e. $\sigma = \sigma_0 \circ \tau$. \end{lemma} \begin{proof} By \cref{def:map of trivializations tau,def:map of trivializations sigma 0,def:map of trivializations sigma}, \begin{IEEEeqnarray*}{rCls+x*} \sigma(\kappa)_t & = & \tilde{\kappa}_t \circ P_{z \circ c(t)}, \\ \sigma_0(\tau(\kappa)) & = & \widetilde{\tau(\kappa)}_t \circ P_{\gamma(t)}^{-1} \circ P_{z \circ c(t)}. \end{IEEEeqnarray*} Therefore, it is enough to show that $\tilde{\kappa}_t \circ P_{\gamma(t)} = \widetilde{\tau(\kappa)}_t \colon T_{\gamma(t)} T^*L \longrightarrow \R^{2n}$. We show that $\tilde{\kappa}_t \circ P_{\gamma(t)}(Z_{\gamma(t)}) = \widetilde{\tau(\kappa)}_t(Z_{\gamma(t)})$. \begin{IEEEeqnarray*}{rCls+x*} \tilde{\kappa}_{t} \circ P_{\gamma(t)} (Z_{\gamma(t)}) & = & \tilde{\kappa}_t(\gamma(t), 0) & \quad [\text{by \cref{prop:properties of p}}] \\ & = & (\kappa_t \circ \tilde{g}^{-1}_{c(t)}(\gamma(t)), 0) & \quad [\text{by definition of $\tilde{\kappa}_t$}] \\ & = & (\kappa_t(\dot{c}(t)), 0) & \quad [\text{by definition of $\gamma$}] \\ & = & (e_1,0) & \quad [\text{since $\kappa \in \mathcal{T}(c^* TL)$}] \\ & = & \widetilde{\tau(\kappa)}_t (Z_{\gamma(t)}) & \quad [\text{by definition of $\widetilde{\tau(\kappa)}_t$}]. \end{IEEEeqnarray*} We show that $\tilde{\kappa}_t \circ P_{\gamma(t)}(R_{\gamma(t)}) = \widetilde{\tau(\kappa)}_t(R_{\gamma(t)})$. \begin{IEEEeqnarray*}{rCls+x*} \tilde{\kappa}_{t} \circ P_{\gamma(t)} (R_{\gamma(t)}) & = & \tilde{\kappa}_t(0, \tilde{g}^{-1}_{c(t)}(\gamma(t))) & \quad [\text{by \cref{prop:properties of p}}] \\ & = & (0, \kappa_t \circ \tilde{g}^{-1}_{c(t)}(\gamma(t))) & \quad [\text{by definition of $\tilde{\kappa}_t$}] \\ & = & (0, \kappa_t(\dot{c}(t))) & \quad [\text{by definition of $\gamma$}] \\ & = & (0,e_1) & \quad [\text{since $\kappa \in \mathcal{T}(c^* TL)$}] \\ & = & \widetilde{\tau(\kappa)}_t (R_{\gamma(t)}) & \quad [\text{by definition of $\widetilde{\tau(\kappa)}_t$}]. \end{IEEEeqnarray*} The previous computations show that \begin{IEEEeqnarray*}{c+x*} P_{\gamma(t)} \circ \tilde{\kappa}_t (\xi_{\gamma(t)}^{\perp}) = \ker \pi_{\R^{2n-2}}, \end{IEEEeqnarray*} which in turn implies that \begin{IEEEeqnarray}{c+x*} \plabel{eq:image of p kappa} P_{\gamma(t)} \circ \tilde{\kappa}_t (\xi_{\gamma(t)}) = (\ker \pi_{\R^{2n-2}})^{\perp} = \img \iota_{\R^{2n - 2}}. \end{IEEEeqnarray} Finally, we show that $\tilde{\kappa}_t \circ P_{\gamma(t)}(v) = \widetilde{\tau(\kappa)}_t(v)$ for every $v \in \xi_{\gamma(t)}$. \begin{IEEEeqnarray*}{rCls+x*} \widetilde{\tau(\kappa)}_t (v) & = & \iota_{\R^{2n-2}} \circ \tau(\kappa)_t (v) & \quad [\text{by definition of $\widetilde{\tau(\kappa)}_t$}] \\ & = & \iota_{\R^{2n-2}} \circ \pi_{\R^{2n-2}} \circ \tilde{\kappa}_t \circ P_{\gamma(t)} \circ \iota_{\xi_{\gamma(t)}} (v) & \quad [\text{by definition of $\tau$}] \\ & = & \tilde{\kappa}_t \circ P_{\gamma(t)}(v) & \quad [\text{by Equation \eqref{eq:image of p kappa}}]. & \qedhere \end{IEEEeqnarray*} \end{proof} This finishes the ``construction'' of diagram \eqref{eq:diagram of maps of trivializations}. Our goal is to show that $\conleyzehnder^{\tau(\kappa)}(\gamma)$ is independent of the choice of $\kappa \in \mathcal{T}(c^* TL)$ (see \cref{thm:index of geodesic or reeb orbit isometric triv}). Indeed, we will actually show that $\conleyzehnder^{\tau(\kappa)}(\gamma) = \morse(c)$. To make sense of this statement, we start by explaining the meaning of the Morse index of a geodesic. \begin{remark} \label{rmk:morse theory for geodesics} Define $X \coloneqq W^{1,2}(\R / \ell \Z,L)$ (maps from $\R / \ell \Z$ to $L$ of Sobolev class $W ^{1,2}$). Then, $X$ is a Hilbert manifold. At $c \in X$, the tangent space of $X$ is \begin{IEEEeqnarray*}{c+x*} T_{c} X = W ^{1,2}(\R / \ell \Z,c^* TL), \end{IEEEeqnarray*} which is a Hilbert space. We can define the \textbf{Energy functional} by \begin{IEEEeqnarray*}{rrCl} E \colon & X & \longrightarrow & \R \\ & c & \longmapsto & \frac{1}{2} \int_{\R / \ell \Z}^{} \p{||}{}{ \dot{c}(t) }^2 \edv t. \end{IEEEeqnarray*} Then, $c \in X$ is a critical point of $E$ if and only if $c$ is smooth and a geodesic in $L$. We say that $c$ is \textbf{nondegenerate} if the kernel of the map \begin{IEEEeqnarray*}{c+x*} \operatorname{Hess} E (c) \colon T _{c} X \longrightarrow T _{c}^* X \end{IEEEeqnarray*} is $\ker \operatorname{Hess} E(c) = \p{<}{}{\dot{c}}$. If $c$ is a critical point of $E$, i.e. a geodesic, then we define the \textbf{Morse index} of $c$ by \begin{IEEEeqnarray*}{c+x*} \morse(c) = \sup \left\{ \dim V \ \middle\vert \begin{array}{l} V \text{ is a subspace of } T _{c} X, \\ \operatorname{Hess} E (c)|_V \colon V \times V \longrightarrow \R \text{ is negative definite} \end{array} \right\}. \end{IEEEeqnarray*} Recall that $c$ is a geodesic if and only if $\gamma \coloneqq \tilde{g} \circ \dot{c}$ is a Reeb orbit. In this case, $c$ is a nondegenerate critical point of $E$ if and only if ${\gamma}$ is a nondegenerate Reeb orbit. \end{remark} \begin{definition} \phantomsection\label{lem:maslov index of a geodesic} For $\sigma \in \mathcal{T}((z \circ c)^* T T^* L)$, we define the \textbf{Maslov index} of $c$ with respect to $\sigma$, denoted $\maslov^{\sigma}(c)$, as follows. First, let $W^{c,\sigma}$ be the loop of Lagrangian subspaces of $\R^{2n}$ given by \begin{IEEEeqnarray*}{c+x*} W^{c,\sigma}(t) \coloneqq \sigma_t \circ \dv z(c(t)) (T_{c(t)} L). \end{IEEEeqnarray*} Then, define $\maslov^{\sigma}(c)$ to be the Maslov index of $W^{c,\sigma}$ in the sense of \cref{thm:maslov lagrangian properties}. \end{definition} \begin{lemma} \label{lem:maslov index of a geodesic is zero} For any $\kappa \in \mathcal{T}(c^* TL)$, \begin{IEEEeqnarray*}{c+x*} \maslov^{\sigma(\kappa)}(c) = 0. \end{IEEEeqnarray*} \end{lemma} \begin{proof} We will show that $W^{c,\sigma(\kappa)} = \{0\} \oplus \R^{n}$. By the zero property of the Maslov index for a path of Lagrangian subspaces, this implies the result. We start by showing that $P^V_{z(x)} \circ \dv z(x) = 0$ for any $x \in L$. For any $w \in T_x L$, \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{P^V_{z(x)} \circ \dv z(x) w}\\ \quad & = & (I^V_{z(x)})^{-1} (\dv z(x) w - I^H_{z(x)} \circ P^H_{z(x)} (\dv z(x) w)) & \quad [\text{by definition of $P^V$}] \\ & = & (I^V_{z(x)})^{-1} (\dv z(x) w - \dv z(x) \circ \dv \pi (z(x)) \circ \dv z(x) w) & \quad [\text{by definition of $I^H$ and $P^H$}] \\ & = & 0 & \quad [\text{since $\pi \circ z = \id_L$}]. \end{IEEEeqnarray*} We compute $W^{c,\sigma(\kappa)}$. \begin{IEEEeqnarray*}{rCls+x*} W^{c,\sigma(\kappa)} & = & \sigma(\kappa)_t \circ \dv z(c(t)) (T_{c(t)} L) & \quad [\text{by definition of $W^{c,\sigma(\kappa)}$}] \\ & = & \tilde{\kappa}_t \circ P_{z \circ c(t)} \circ \dv z(c(t))(T_{c(t)} L) & \quad [\text{by definition of $\sigma(\kappa)$}] \\ & = & \tilde{\kappa}_t (0, P^H_{z \circ c(t)} \circ \dv z(c(t)) (T_{c(t)} L) ) & \quad [\text{since $P^V_{z(c(t))} \circ \dv z(c(t)) = 0$}] \\ & = & (0, \kappa_t \circ P^H_{z \circ c(t)} \circ \dv z(c(t)) (T_{c(t)} L) ) & \quad [\text{by definition of $\tilde{\kappa}_t$}] \\ & = & (0, \kappa_t(T_{c(t)} L)) & \quad [\text{since $P^H_{z \circ c(t)} = \dv \pi(z \circ c(t))$}] \\ & = & \{0\} \oplus \R^n & \quad [\text{since $\kappa_t$ is an isomorphism}]. & \qedhere \end{IEEEeqnarray*} \end{proof} The following theorem was originally proven in \cite{viterboNewObstructionEmbedding1990}, but we will use a restatement of it from \cite{cieliebakPuncturedHolomorphicCurves2018}. \begin{theorem}[{\cite[Lemma 2.1]{cieliebakPuncturedHolomorphicCurves2018}}] \label{thm:index of geod reeb} For any $\tau \in \mathcal{T}(\gamma^* \xi)$, \begin{IEEEeqnarray*}{c+x*} \conleyzehnder^{\tau}({\gamma}) + \maslov^{\sigma_0(\tau)}(c) = \morse(c). \end{IEEEeqnarray*} \end{theorem} \begin{theorem} \label{thm:index of geodesic or reeb orbit isometric triv} For any $\kappa \in \mathcal{T}(c^* TL)$, \begin{IEEEeqnarray*}{c+x*} \conleyzehnder^{\tau(\kappa)}({\gamma}) = \morse(c). \end{IEEEeqnarray*} \end{theorem} \begin{proof} By \cref{lem:diagram of maps of trivalizations commutes,lem:maslov index of a geodesic is zero,thm:index of geod reeb}. \end{proof} Finally, we state a result which will be necessary to prove \cref{thm:lagrangian vs g tilde}. \begin{lemma}[{\cite[Lemma 2.2]{cieliebakPuncturedHolomorphicCurves2018}}] \label{lem:geodesics lemma CM abs} Let $L$ be a compact $n$-dimensional manifold without boundary. Let $\mathrm{Riem}(L)$ be the set of Riemannian metrics on $L$, equipped with the $C^2$-topology. If $g_0 \in \mathrm{Riem}(L)$ is a Riemannian metric of nonpositive sectional curvature and $\mathcal{U} \subset \mathrm{Riem}(L)$ is an open neighbourhood of $g_0$, then for all $\ell_0 > 0$ there exists a Riemannian metric $g \in \mathcal{U}$ on $L$ such that with respect to $g$, any closed geodesic $c$ in $L$ of length $\ell(c) \leq \ell_0$ is noncontractible, nondegenerate, and such that $0 \leq \morse(c) \leq n - 1$. \end{lemma} \chapter{Holomorphic curves} \label{chp:holomorphic curves} \section{Holomorphic curves} In this section we define asymptotically cylindrical holomorphic curves (see \cref{def:asy cyl holomorphic curve}). The domain of such a curve is a punctured Riemann surface (see \cref{def:punctures asy markers cyl ends}), and the target is a symplectic cobordism (see \cref{def:symplectic cobordism}). \begin{definition} \label{def:punctures asy markers cyl ends} Let $(\Sigma, j)$ be a Riemann surface. A \textbf{puncture} on $\Sigma$ is a point $z \in \Sigma$. Denote by $D$ the closed unit disk in $\C$ and by $Z^{\pm}$ the positive or negative half-cylinders: \begin{IEEEeqnarray*}{rCls+x*} Z^+ & \coloneqq & \R_{\geq 0} \times S^1, \\ Z^- & \coloneqq & \R_{\leq 0} \times S^1, \end{IEEEeqnarray*} with coordinates $(s,t) \in Z^{\pm}$ and complex structure $j$ given by $j(\partial_s) = \partial_t$. Consider the holomorphic maps \begin{IEEEeqnarray*}{rClCrCl} \psi^\pm \colon Z^{\pm} & \longrightarrow & D \setminus \{0\}, & \quad & \psi^\pm(s,t) & = & \exp(\mp 2 \pi (s + i t)). \end{IEEEeqnarray*} A positive or negative \textbf{cylindrical end} near $z$ is a holomorphic embedding $\phi^{\pm} \colon Z^{\pm} \longrightarrow \Sigma \setminus \{z\}$ of the form $\phi^{\pm} \coloneqq \varphi \circ \psi^\pm$, where $\varphi \colon D \longrightarrow \Sigma$ is a holomorphic embedding such that $\varphi(0) = z$. In this case, we say that $(s,t)$ are \textbf{cylindrical coordinates} near $z$. A \textbf{punctured Riemann surface} is a Riemann surface $(\Sigma, j)$ together with sets \begin{IEEEeqnarray*}{rClCrCl} \mathbf{z} & = & \mathbf{z}^+ \cup \mathbf{z}^-, & \quad & \mathbf{z}^{\pm} & = & \{z^{\pm}_1,\ldots,z^{\pm}_{p^{\pm}}\} \subset \Sigma, \quad \mathbf{z}^+ \cap \mathbf{z}^- = \varnothing, \end{IEEEeqnarray*} of positive and negative punctures. In this case, we denote $\dot{\Sigma} \coloneqq \Sigma \setminus \mathbf{z}$. Whenever we talk about cylindrical coordinates near a puncture, it is implicit that we mean the cylindrical coordinates induced from a positive of negative cylindrical end, in accordance to whether the puncture is positive or negative. \end{definition} \begin{definition} \label{def:symplectic cobordism} A \textbf{symplectic cobordism} is a compact symplectic manifold $(X, \omega)$ with boundary $\partial X$, together with a $1$-form $\lambda$ defined on an open neighbourhood of $\partial X$, such that $\edv \lambda = \omega$ and the restriction of $\lambda$ to $\partial X$ is a contact form. Let $\partial^+ X$ (respectively $\partial^- X$) be the subset of $\partial X$ where the orientation defined by $\lambda|_{\partial X}$ as a contact form agrees with the boundary orientation (respectively negative boundary orientation). \end{definition} \begin{definition} \phantomsection\label{def:liouville cobordism} A \textbf{Liouville cobordism} is a symplectic cobordism $(X,\omega,\lambda)$ such that $\lambda$ is defined on $X$. \end{definition} \begin{example} A Liouville domain is a Liouville cobordism whose negative boundary is empty. \end{example} \begin{remark} We can define the completion of a symplectic cobordism $(X,\omega,\lambda)$ like in \cref{sec:completion of liouville domain}, with the difference that now we attach half-symplectizations to the negative and positive boundaries: \begin{IEEEeqnarray*}{c+x*} \hat{X} \coloneqq \R_{\leq 0} \times \partial^- X \cup_{\partial^- X} X \cup_{\partial^+ X} \R_{\geq 0} \times \partial^+ X. \end{IEEEeqnarray*} \end{remark} \begin{definition} \label{def:admissible} Let $(X,\omega,\lambda)$ be a symplectic cobordism and consider its completion $\hat{X}$. An almost complex structure $J$ on $\hat{X}$ is \textbf{cylindrical} if $J$ is compatible with $\hat{\omega}$ and $J$ is cylindrical on $\R_{\geq 0} \times \partial^+ X$ and $\R_{\leq 0} \times \partial^- X$. Denote by $\mathcal{J}(X)$ the set of such $J$. \end{definition} \begin{definition} \label{def:asy cyl holomorphic curve} Let $(X, \omega, \lambda)$ be a symplectic cobordism, $J \in \mathcal{J}(X)$ be a cylindrical almost complex structure on $\hat{X}$ and $\Gamma^{\pm} = (\gamma^{\pm}_1, \ldots, \gamma^{\pm}_{p^{\pm}})$ be tuples of Reeb orbits in $\partial^{\pm} X$. Let $T_{i}^{\pm}$ denote the period of $\gamma_i^{\pm}$. An \textbf{asymptotically cylindrical holomorphic curve} in $\hat{X}$ from $\Gamma^-$ to $\Gamma^+$ is given by a Riemann surface $(\Sigma, j)$ with punctures $\mathbf{z}^{\pm} = \{z_1^{\pm}, \ldots, z^{\pm}_{p^{\pm}}\}$ together with a $J$-holomorphic map $u \colon \dot{\Sigma} \longrightarrow \hat{X}$, such that: \begin{enumerate} \item $u$ is positively asymptotic to $\gamma^{+}_i$ at $z^{+}_{i}$, i.e. there exist cylindrical coordinates $(s,t)$ near $z_i^+$ such that $u(s,t) \in \R_{\geq 0} \times \partial^+ X$ for $s$ big enough and \begin{IEEEeqnarray*}{rrCl} \lim_{s \to + \infty} & \pi_{\R} \circ u(s,t) & = & + \infty, \\ \lim_{s \to + \infty} & \pi_{\partial^+ X} \circ u(s,t) & = & \gamma^+_i(t T^+_i); \end{IEEEeqnarray*} \item $u$ is negatively asymptotic to $\gamma^{-}_i$ at $z^{-}_{i}$, i.e. there exist cylindrical coordinates $(s,t)$ near $z_i^-$ such that $u(s,t) \in \R_{\leq 0} \times \partial^- X$ for $s$ small enough and \begin{IEEEeqnarray*}{rrCl} \lim_{s \to - \infty} & \pi_{\R} \circ u(s,t) & = & - \infty, \\ \lim_{s \to - \infty} & \pi_{\partial^- X} \circ u(s,t) & = & \gamma^-_i(t T^-_i). \end{IEEEeqnarray*} \end{enumerate} \end{definition} We now explain some analytical properties of asymptotically cylindrical holomorphic curves. The key results are the maximum principle (\cref{thm:maximum principle holomorphic}) and a lemma comparing the energy of such a curve and the action of the asymptotic Reeb orbits (\cref{lem:action energy for holomorphic}). The following lemma is an auxiliary result which will allow us to prove that the energy (see \cref{def:energy of a asy cylindrical holomorphic curve}) is a nonnegative number. \begin{lemma} \label{lem:holomorphic curves in symplectizations} Let $(M, \alpha)$ be a contact manifold and $J$ be a cylindrical almost complex structure on $\R \times M$. If $u = (a, f) \colon \dot{\Sigma} \longrightarrow \R \times M$ is a holomorphic curve, then $f^* \edv \alpha \geq 0$ and \begin{IEEEeqnarray}{rCls+x*} - \edv a \circ j & = & f^* \alpha \plabel{eq:holomorphic curves in symplectizations 1} \\ \pi_{\xi} \circ \dv f \circ j & = & J_{\xi}({f}) \circ \pi_{\xi} \circ \dv f. \plabel{eq:holomorphic curves in symplectizations 2} \end{IEEEeqnarray} \end{lemma} \begin{proof} We prove equation \eqref{eq:holomorphic curves in symplectizations 1}: \begin{IEEEeqnarray*}{rCls+x*} - \edv a \circ j & = & - \edv r \circ \dv u \circ j & \quad [\text{by definition of $a$}] \\ & = & - \edv r \circ J({u}) \circ \dv u & \quad [\text{${u}$ is holomorphic}] \\ & = & \alpha \circ \dv u & \quad [\text{by \cref{lem:J cylindrical forms}}] \\ & = & f^* \alpha & \quad [\text{by definition of pullback}]. \end{IEEEeqnarray*} Equation \eqref{eq:holomorphic curves in symplectizations 2} follows by applying $\pi_{\xi} \colon T(\R \times M) \longrightarrow \xi$ to the equation $J \circ Tu = Tu \circ j$. We show that $f^* \edv \alpha \geq 0$: \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{f^*\edv \alpha(S, j (S))}\\ \quad & = & \edv \alpha (\dv f (S), \dv f \circ j (S)) & \quad [\text{by definition of pullback}] \\ & = & \edv \alpha (\pi_{\xi} \circ \dv f (S), \pi_{\xi} \circ \dv f \circ j (S)) & \quad [\text{$TM = \p{<}{}{R} \directsum \xi = \ker \edv \alpha \directsum \ker \alpha$}] \\ & = & \edv \alpha (\pi_{\xi} \circ \dv f (S), J_{\xi}(f) \circ \pi_{\xi} \circ \dv f (S)) & \quad [\text{by Equation \eqref{eq:holomorphic curves in symplectizations 2}}] \\ & = & \| \pi_{\xi} \circ \dv f (S) \|^2_{J_{\xi}({f}), \edv \alpha} & \quad [\text{since $J$ is cylindrical}] \\ & \geq & 0. & & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:laplacian} Let $\omega_{\dot{\Sigma}}$ be a symplectic form on $\dot{\Sigma}$ such that $g_{\dot{\Sigma}} \coloneqq \omega_{\dot{\Sigma}}(\cdot, j \cdot)$ is a Riemannian metric. Denote by $\operatorname{dvol}_{\dot{\Sigma}}$ the Riemannian volume element of $\dot{\Sigma}$. Let $a$ be a function on $\dot{\Sigma}$ and consider the Laplacian of $a$, $\Delta a \coloneqq \operatorname{div} (\nabla a)$. Then, $\omega_{\dot{\Sigma}} = \operatorname{dvol}_{\dot{\Sigma}}$ and \begin{IEEEeqnarray*}{c+x*} \Delta a \, \omega_{\dot{\Sigma}} = - \edv (\edv a \circ j). \end{IEEEeqnarray*} \end{lemma} \begin{proof} For any unit vector $S \in T \dot{\Sigma}$, if we define $T \coloneqq j (S)$ then $\{S, T\}$ is an orthonormal basis of $T \dot{\Sigma}$ and $\omega_{\dot{\Sigma}}(S, T) = 1$, which implies $\omega_{\dot{\Sigma}} = \operatorname{dvol}_{\dot{\Sigma}}$. We now prove the formula for the Laplacian. \begin{IEEEeqnarray*}{rCls+x*} \Delta a \, \omega_{\dot{\Sigma}} & = & \operatorname{div} (\nabla a) \omega_{\dot{\Sigma}} & \quad [\text{by definition of Laplacian}] \\ & = & \ldv{\nabla a} \omega_{\dot{\Sigma}} & \quad [\text{by definition of divergence and $\omega_{\dot{\Sigma}} = \operatorname{dvol}_{\dot{\Sigma}}$}] \\ & = & \edv \iota_{\nabla a} \omega_{\dot{\Sigma}} & \quad [\text{by the Cartan magic formula}]. \end{IEEEeqnarray*} It remains to show that $\iota_{\nabla a} \omega_{\dot{\Sigma}} = - \edv a \circ j$. \begin{IEEEeqnarray*}{rCls+x*} \iota_{\nabla a} \omega_{\dot{\Sigma}} (S) & = & \omega_{\dot{\Sigma}} (\nabla a, S) & \quad [\text{by definition of interior product}] \\ & = & - \omega_{\dot{\Sigma}} (\nabla a, j \circ j (S)) & \quad [\text{by definition of almost complex structure}] \\ & = & - g_{\dot{\Sigma}} (\nabla a, j (S)) & \quad [\text{by definition of $g_{\dot{\Sigma}}$}] \\ & = & - \edv a \circ j (S) & \quad [\text{by definition of gradient}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma}[maximum principle] \label{thm:maximum principle holomorphic} Assume that $\dot{\Sigma}$ is connected. Let $(M, \alpha)$ be a contact manifold and $J$ be a cylindrical almost complex structure on $\R \times M$. If \begin{IEEEeqnarray*}{c+x*} u = (a, f) \colon \dot{\Sigma} \longrightarrow \R \times M \end{IEEEeqnarray*} is a holomorphic curve and $a \colon \dot{\Sigma} \longrightarrow \R$ has a local maximum then $a$ is constant. \end{lemma} \begin{proof} Define $L = -\Delta$. The operator $L$ is a linear elliptic partial differential operator (as in \cite[p.~312]{evansPartialDifferentialEquations2010}). We show that $L a \leq 0$. For this, choose $\omega_{\dot{\Sigma}}$ a symplectic structure on $\dot{\Sigma}$ such that $g_{\dot{\Sigma}} \coloneqq \omega_{\dot{\Sigma}}(\cdot, j \cdot)$ is a Riemannian metric. \begin{IEEEeqnarray*}{rCls+x*} L a \, \omega_{\dot{\Sigma}} & = & - \Delta a \, \omega_{\dot{\Sigma}} & \quad [\text{by definition of $L$}] \\ & = & \edv (\edv a \circ j) & \quad [\text{by \cref{lem:laplacian}}] \\ & = & - \edv f^* \alpha & \quad [\text{by \cref{lem:holomorphic curves in symplectizations}}] \\ & = & - f^* \edv \alpha & \quad [\text{by naturality of exterior derivative}] \\ & \leq & 0 & \quad [\text{by \cref{lem:holomorphic curves in symplectizations}}]. \end{IEEEeqnarray*} This shows that $L a \leq 0$. By the strong maximum principle for elliptic partial differential operators in \cite[p.~349-350]{evansPartialDifferentialEquations2010}, if $a$ has a local maximum then $a$ is constant. \end{proof} \begin{lemma} \label{lem:integrand of energy is well-defined} Let $(V,j)$ be a complex vector space of real dimension 2, $(W,J,\omega,g)$ be a complex vector space with a symplectic form $\omega$ and inner product $g = \omega(\cdot,J \cdot)$, and $\phi \colon V \longrightarrow W$ be a linear map. For each choice of $s \in V$, define \begin{IEEEeqnarray*}{rCls+x*} t & \coloneqq & js, \\ \{\sigma, \tau\} & \coloneqq & \text{basis of } V^* \text{ dual to } \{s,t\}, \\ \omega_V & \coloneqq & \sigma \wedge \tau, \\ \| \phi \|^2 & \coloneqq & \| \phi s \|^2 + \|\phi t\|^2. \end{IEEEeqnarray*} Then, \begin{IEEEeqnarray*}{c+x*} \frac{1}{2} \| \phi \|^2 \omega_V = (\phi ^{1,0})^* \omega - (\phi ^{0,1})^* \omega, \end{IEEEeqnarray*} which is independent of the choice of $s$. \end{lemma} \begin{proof} Recall the definitions of $\phi^{1,0}$ and $\phi^{0,1}$: \begin{IEEEeqnarray*}{rCls+x*} \phi^{1,0} & \coloneqq & \frac{1}{2} (\phi - J \circ \phi \circ j), \\ \phi^{0,1} & \coloneqq & \frac{1}{2} (\phi + J \circ \phi \circ j). \end{IEEEeqnarray*} These equations imply that $\phi^{1,0}$ is holomorphic, while $\phi^{0,1}$ is anti-holomorphic: \begin{IEEEeqnarray}{c+x*} \plabel{eq:phi holo and anti holo} \phi^{1,0} \circ j = J \circ \phi^{1,0}, \qquad \phi^{0,1} \circ j = - J \circ \phi^{0,1}. \end{IEEEeqnarray} Finally, we compute \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\| \phi \|^2 \omega_V(s,js)} \\ \quad & = & \| \phi (s) \|^2 + \| \phi \circ j (s) \|^2 & \quad [\text{definitions of $\|\phi\|$, $\omega_V$}] \\ & = & \| \phi ^{1,0} (s) + \phi ^{0,1} (s) \|^2 + \| \phi ^{1,0} \circ j (s) + \phi ^{0,1} \circ j (s) \|^2 & \quad [\text{since $\phi = \phi^{1,0} + \phi^{0,1}$}] \\ & = & \| \phi ^{1,0} (s) + \phi ^{0,1} (s) \|^2 + \| J \circ \phi ^{1,0} (s) - J \circ \phi ^{0,1} (s) \|^2 & \quad [\text{by \eqref{eq:phi holo and anti holo}}] \\ & = & \| \phi ^{1,0} (s) + \phi ^{0,1} (s) \|^2 + \| \phi ^{1,0} (s) - \phi ^{0,1} (s) \|^2 & \quad [\text{since $g = \omega(\cdot, J \cdot)$}] \\ & = & 2 \| \phi ^{1,0} (s) \|^2 + 2 \| \phi ^{0,1} (s) \|^2 & \quad [\text{by the parallelogram law}] \\ & = & 2 \omega (\phi ^{1,0} (s), J \circ \phi ^{1,0} (s)) + 2 \omega (\phi ^{0,1} (s), J \circ \phi ^{0,1} (s)) & \quad [\text{since $g = \omega(\cdot, J \cdot)$}] \\ & = & 2 \omega (\phi ^{1,0} (s), \phi ^{1,0} \circ j (s)) - 2 \omega (\phi ^{0,1} (s), \phi ^{0,1} \circ j (s)) & \quad [\text{by \eqref{eq:phi holo and anti holo}}] \\ & = & 2 (\phi ^{1,0})^* \omega (s,js) - 2 (\phi ^{0,1})^* \omega (s,js) & \quad [\text{by definition of pullback}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{definition} \phantomsection\label{def:energy of a asy cylindrical holomorphic curve} Define a piecewise smooth $2$-form $\tilde{\omega} \in \Omega^2(\hat{X})$ by \begin{IEEEeqnarray*}{c+x*} \tilde{\omega} \coloneqq \begin{cases} \edv \lambda|_{\partial^+ X} & \text{on } \R_{\geq 0} \times \partial^+ X, \\ \omega & \text{on } X, \\ \edv \lambda|_{\partial^- X} & \text{on } \R_{\leq 0} \times \partial^- X. \end{cases} \end{IEEEeqnarray*} If $u$ is an asymptotically cylindrical holomorphic curve, its \textbf{energies} are given by \begin{IEEEeqnarray*}{rClCl} E_{\hat{\omega}}(u) & \coloneqq & \int_{\dot{\Sigma}}^{} u^* \hat{\omega}, \\ E_{\tilde{\omega}}(u) & \coloneqq & \int_{\dot{\Sigma}}^{} u^* \tilde{\omega}. \end{IEEEeqnarray*} \end{definition} We point out that if $u$ has positive punctures, then $E_{\hat{\omega}}(u) = + \infty$. Whenever we talk about the energy of an asymptotically cylindrical holomorphic curve, we mean the $E_{\tilde{\omega}}$ energy, unless otherwise specified. We included $E_{\hat{\omega}}$ in the definition above because we will need to use it in \cref{thm:lagrangian vs g tilde} to compare the Lagrangian and the McDuff--Siegel capacities. In \cref{lem:energy wrt different forms}, we compare $E_{\hat{\omega}}$ and $E_{\tilde{\omega}}$. \begin{lemma} \label{lem:action energy for holomorphic} If $(X, \omega, \lambda)$ is a Liouville cobordism then \begin{IEEEeqnarray*}{c+x*} 0 \leq E_{\tilde{\omega}}(u) = \mathcal{A}(\Gamma^+) - \mathcal{A}(\Gamma^-). \end{IEEEeqnarray*} \end{lemma} \begin{proof} Since $(X, \omega, \lambda)$ is a Liouville cobordism, $E_{\tilde{\omega}}(u)$ is given by \begin{IEEEeqnarray*}{rCls+x*} E_{\tilde{\omega}}(u) & = & \int_{\dot{\Sigma}}^{} u^* \tilde{\omega} \\ & = & \int_{u^{-1}(\R_{\leq 0} \times \partial^- X)} u^* \edv \lambda|_{\partial^- X} + \int_{u^{-1}(X)} u^* \edv \lambda + \int_{u^{-1}(\R_{\geq 0} \times \partial^+ X)} u^* \edv \lambda|_{\partial^+ X}. \end{IEEEeqnarray*} Here, the first and third terms are nonnegative by \cref{lem:holomorphic curves in symplectizations}, while the second term is nonnegative by \cref{lem:integrand of energy is well-defined}. This shows that $E_{\tilde{\omega}}(u) \geq 0$. Since $u$ is asymptotic to $\Gamma^{\pm}$ and by Stokes' theorem, $E_{\tilde{\omega}}(u) = \mathcal{A}(\Gamma^+) - \mathcal{A}(\Gamma^-)$. \end{proof} \begin{lemma} \label{lem:energy wrt different forms} Assume that $\Sigma$ has no positive punctures. Let $(X, \omega, \lambda)$ be a symplectic cobordism, and $J \in \mathcal{J}(X)$ be a cylindrical almost complex structure on $\hat{X}$. Assume that the canonical symplectic embedding \begin{align*} (\R_{\leq 0} \times \partial^- X, \edv (e^r \lambda|_{\partial^- X})) \longrightarrow (\hat{X}, \hat{\omega}) & \\ \intertext{can be extended to a symplectic embedding} (\R_{\leq K} \times \partial^- X, \edv (e^r \lambda|_{\partial^- X})) \longrightarrow (\hat{X}, \hat{\omega}) & \end{align*} for some $K > 0$. Let $u \colon \dot{\Sigma} \longrightarrow \hat{X}$ be a $J$-holomorphic curve which is negatively asymptotic to a tuple of Reeb orbits $\Gamma$ of $\partial^- X$. Consider the energies $E_{\hat{\omega}}(u)$ and $E_{\tilde{\omega}}(u)$ of \cref{def:energy of a asy cylindrical holomorphic curve}. Then, \begin{IEEEeqnarray}{rCls+x*} \mathcal{A}(\Gamma) & \leq & \frac{1 }{e^K - 1} E_{\tilde{\omega}}(u), \plabel{eq:action is bounded by vertical energy} \\ E_{\hat{\omega}}(u) & \leq & \frac{e^K}{e^K - 1} E_{\tilde{\omega}}(u). \plabel{eq:energy is bounded by vertical energy} \end{IEEEeqnarray} \end{lemma} \begin{proof} It is enough to show that \begin{IEEEeqnarray}{rCls+x*} E_{\hat{\omega}}(u) - E_{\tilde{\omega}}(u) & = & \mathcal{A}(\Gamma), \plabel{eq:vertical energy bounds 1} \\ E_{\hat{\omega}}(u) & \geq & e^K \mathcal{A}(\Gamma), \plabel{eq:vertical energy bounds 2} \end{IEEEeqnarray} since these equations imply Equations \eqref{eq:action is bounded by vertical energy} and \eqref{eq:energy is bounded by vertical energy}. Since $u$ has no positive punctures, the maximum principle (\cref{thm:maximum principle holomorphic}) implies that $u$ is contained in $\R_{\leq 0} \times \partial^- X \cup X$. We prove Equation \eqref{eq:vertical energy bounds 1}. For simplicity, denote $M = \partial^- X$ and $\alpha = \lambda|_{\partial^- X}$. \begin{IEEEeqnarray*}{rCls+x*} E_{\hat{\omega}}(u) - E_{\tilde{\omega}}(u) & = & \int_{\dot{\Sigma}}^{} u^* (\hat{\omega} - \tilde{\omega}) & \quad [\text{by definition of $E_{\hat{\omega}}$ and $E_{\tilde{\omega}}$}] \\ & = & \int_{u^{-1}(\R_{\leq 0} \times M)}^{} u^* \edv ((e^r - 1) \alpha) & \quad [\text{by definition of $\hat{\omega}$ and $\tilde{\omega}$}] \\ & = & \mathcal{A}(\Gamma) & \quad [\text{by Stokes' theorem}]. \end{IEEEeqnarray*} We prove Equation \eqref{eq:vertical energy bounds 2}. \begin{IEEEeqnarray*}{rCls+x*} E_{\hat{\omega}}(u) & = & \int_{\dot{\Sigma}}^{} u^* \hat{\omega} & \quad [\text{by definition of $E_{\hat{\omega}}$}] \\ & \geq & \int_{u^{-1}(\R_{\leq K} \times M)}^{} u^* \edv (e^r \alpha) & \quad [\text{by definition of $\hat{\omega}$ and $u^* \hat{\omega} \geq 0$}] \\ & = & e^K \int_{u^{-1}( \{K\} \times M)}^{} u^* \alpha & \quad [\text{by Stokes' theorem}] \\ & = & e^K \int_{u^{-1}( \R_{\leq K} \times M)}^{} u^* \edv \alpha + e^K \mathcal{A}(\Gamma) & \quad [\text{by Stokes' theorem}] \\ & \geq & e^K \mathcal{A}(\Gamma) & \quad [\text{by \cref{lem:holomorphic curves in symplectizations}}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \section{Moduli spaces of Holomorphic curves} \label{sec:moduli spaces of holomorphic curves} If $(M, \alpha)$ is a contact manifold, we denote by $\mathcal{J}(M)$ the set of cylindrical almost complex structures on $\R \times M$ (see \cref{def:J cylindrical}). If $(X, \omega, \lambda)$ is a symplectic cobordism, we denote by $\mathcal{J}(X)$ the set of cylindrical almost complex structures on $\hat{X}$ (see \cref{def:admissible}). If $J^{\pm} \in \mathcal{J}(\partial^{\pm} X)$ is a cylindrical almost complex structure on $\R \times \partial^{\pm} X$, then we define the following subsets of $\mathcal{J}(X)$: \begin{IEEEeqnarray*}{rCls+x*} \mathcal{J}^{J^+}(X) & \coloneqq & \{ J \in \mathcal{J}(X) \mid J = J^{+} \text{ on } \R_{\geq 0} \times \partial^+ X \}, \\ \mathcal{J}_{J^-}^{\hphantom{J^+}}(X) & \coloneqq & \{ J \in \mathcal{J}(X) \mid J = J^{-} \text{ on } \R_{\leq 0} \times \partial^- X \}, \\ \mathcal{J}^{J^+}_{J^-}(X) & \coloneqq & \{ J \in \mathcal{J}(X) \mid J = J^{+} \text{ on } \R_{\geq 0} \times \partial^+ X \text{ and } J = J^{-} \text{ on } \R_{\leq 0} \times \partial^- X \}. \end{IEEEeqnarray*} Let $\Gamma^{\pm} = (\gamma^{\pm}_1, \ldots, \gamma^{\pm}_{p ^{\pm}})$ be a tuple of Reeb orbits in $\partial^{\pm} X$ and $J \in \mathcal{J}(X)$ be a cylindrical almost complex structure on $\hat{X}$. Define a moduli space \begin{IEEEeqnarray*}{c+x*} \mathcal{M}^{J}_{X}(\Gamma^+, \Gamma^-) \coloneqq \left\{ (\Sigma, u) \ \middle\vert \begin{array}{l} \Sigma \text{ is a connected closed Riemann surface} \\ \text{of genus $0$ with punctures $\mathbf{z}^{\pm} = \{z^{\pm}_1, \ldots, z^{\pm}_{p ^{\pm}}\}$,} \\ u \colon \dot{\Sigma} \longrightarrow \hat{X} \text{ is as in \cref{def:asy cyl holomorphic curve}} \end{array} \right\} / \sim, \end{IEEEeqnarray*} where $(\Sigma_0, u_0) \sim (\Sigma_1, u_1)$ if and only if there exists a biholomorphism $\phi \colon \Sigma_0 \longrightarrow \Sigma_1$ such that $u_1 \circ \phi = u_0$ and $\phi(z^{\pm}_{0,i}) = z^{\pm}_{1,i}$ for every $i = 1,\ldots,p ^{\pm}$. If $\Gamma^{\pm} = (\gamma^{\pm}_1, \ldots, \gamma^{\pm}_{p ^{\pm}})$ is a tuple of Reeb orbits on a contact manifold $M$ and $J \in \mathcal{J}(M)$, we define a moduli space $\mathcal{M}_{M}^{J}(\Gamma^+, \Gamma^-)$ of holomorphic curves in $\R \times M$ analogously. Since $J$ is invariant with respect to translations in the $\R$ direction, $\mathcal{M}_{M}^{J}(\Gamma^+, \Gamma^-)$ admits an action of $\R$ by composition on the target by a translation. One can try to show that the moduli space $\mathcal{M}_{X}^{J}(\Gamma^+, \Gamma^-)$ is transversely cut out by showing that the relevant linearized Cauchy--Riemann operator is surjective at every point of the moduli space. In this case, the moduli space is an orbifold whose dimension is given by the Fredholm index of the linearized Cauchy--Riemann operator. However, since the curves in $\mathcal{M}_{X}^{J}(\Gamma^+, \Gamma^-)$ are not necessarily simple, this proof will in general not work, and we cannot say that the moduli space is an orbifold. However, the Fredholm theory part of the proof still works, which means that we still have a dimension formula. In this case the expected dimension given by the Fredholm theory is usually called a virtual dimension. For the moduli space above, the virtual dimension at a point $u$ is given by (see \cite[Section 4]{bourgeoisCoherentOrientationsSymplectic2004}) \begin{IEEEeqnarray*}{c} \operatorname{virdim}_u \mathcal{M}_{X}^{J}(\Gamma^+, \Gamma^-) = (n - 3)(2 - p^+ - p^-) + c_1^{\tau}(u^* T \hat{X}) + \conleyzehnder^{\tau} (\Gamma^+) - \conleyzehnder^{\tau} (\Gamma^-), \end{IEEEeqnarray*} where $\tau$ is a unitary trivialization of the contact distribution over each Reeb orbit. We now discuss curves satisfying a tangency constraint. Our presentation is based on \cite[Section 2.2]{mcduffSymplecticCapacitiesUnperturbed2022} and \cite[Section 3]{cieliebakPuncturedHolomorphicCurves2018}. Let $(X,\omega,\lambda)$ be a symplectic cobordism and $x \in \itr X$. A \textbf{symplectic divisor} through $x$ is a germ of a $2$-codimensional symplectic submanifold $D \subset X$ containing $x$. A cylindrical almost complex structure $J \in \mathcal{J}(X)$ is \textbf{compatible} with $D$ if $J$ is integrable near $x$ and $D$ is holomorphic with respect to $J$. We denote by $\mathcal{J}(X,D)$ the set of such almost complex structures. In this case, there are complex coordinates $(z^1, \ldots, z^n)$ near $x$ such that $D$ is given by $h(z_1,\ldots,z_n) = 0$, where $h(z_1,\ldots,z_n) = z_1$. Let $u \colon \Sigma \longrightarrow X$ be a $J$-holomorphic curve together with a marked point $w \in \Sigma$. For $k \geq 1$, we say that $u$ has \textbf{contact order $k$} to $D$ at $x$ if $u(w) = x$ and\begin{IEEEeqnarray*}{c+x*} (h \circ u \circ \varphi)^{(1)}(0) = \cdots = (h \circ u \circ \varphi)^{(k-1)}(0) = 0, \end{IEEEeqnarray*} for some local biholomorphism $\varphi \colon (\C,0) \longrightarrow (\Sigma, w)$. We point out that the condition of having ``contact order $k$'' as written above is equal to the condition of being ``tangent of order $k-1$'' as defined in \cite[Section 3]{cieliebakPuncturedHolomorphicCurves2018}. Following \cite{mcduffSymplecticCapacitiesUnperturbed2022}, we will use the notation $\p{<}{}{\mathcal{T}^{(k)}x}$ to denote moduli spaces of curves which have contact order $k$, i.e. we will denote them by $\mathcal{M}_{X}^{J}(\Gamma^+, \Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x}$ and $\mathcal{M}_{M}^{J}(\Gamma^+, \Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x}$. The virtual dimension is given by (see \cite[Equation (2.2.1)]{mcduffSymplecticCapacitiesUnperturbed2022}) \begin{IEEEeqnarray*}{l} \operatorname{virdim}_u \mathcal{M}_{X}^{J}(\Gamma^+, \Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x} \\ \quad = (n - 3)(2 - p^+ - p^-) + c_1^{\tau}(u^* T \hat{X}) + \conleyzehnder^{\tau} (\Gamma^+) - \conleyzehnder^{\tau} (\Gamma^-) - 2n - 2k + 4. \end{IEEEeqnarray*} The following theorem says that moduli spaces of simple, asymptotically cylindrical holomorphic curves are transversely cut out. \begin{theorem}[{\cite[Proposition 6.9]{cieliebakSymplecticHypersurfacesTransversality2007}}] \label{thm:transversality with tangency} Let $(X,\omega,\lambda)$ be a symplectic cobordism, $x \in \itr X$ and $D$ be a symplectic divisor at $x$. There exists a comeagre set $\mathcal{J}_{\mathrm{reg}}(X,D) \subset \mathcal{J}(X,D)$ with the following property. If $J \in \mathcal{J}_{\mathrm{reg}}(X,D)$ is a regular almost complex structure, $\Gamma^{\pm} = (\gamma^\pm_1,\ldots,\gamma^\pm_{p^{\pm}})$ is a tuple of Reeb orbits of $\partial^{\pm} X$ and $A \in H_2(X,\Gamma^+ \cup \Gamma^-)$, then the moduli space $\mathcal{M}_{X,A,s}^J(\Gamma^+,\Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x} \subset \mathcal{M}_{X}^J(\Gamma^+,\Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x}$ of simple curves representing the homology class $A$ is a manifold of dimension \begin{IEEEeqnarray*}{l} \dim \mathcal{M}_{X,A,s}^J(\Gamma^+,\Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x} \\ \quad = (n-3)(2 - p^+ - p^-) + 2 c_1^{\tau}(TX) \cdot A + \conleyzehnder^{\tau}(\Gamma^+) - \conleyzehnder^{\tau}(\Gamma^-) - 2n - 2k + 4. \end{IEEEeqnarray*} \end{theorem} We will now use this transversality result to state two lemmas from \cite{cieliebakPuncturedHolomorphicCurves2018}, namely \cref{lem:punctures and tangency,lem:punctures and tangency simple}, which we will use in the proof of \cref{thm:lagrangian vs g tilde}. For the sake of completeness, we will also give proofs of the results. We point out that in order to achieve the conditions in the statement of the lemmas, we can use a metric as in \cref{lem:geodesics lemma CM abs}. Finally, notice that \cref{lem:punctures and tangency} generalizes \cref{lem:punctures and tangency simple} to the case where the curve is not necessarily simple. \begin{lemma}[{\cite[Lemma 3.2]{cieliebakPuncturedHolomorphicCurves2018}}] \phantomsection\label{lem:punctures and tangency simple} Let $(L,g)$ be an $n$-dimensional Riemannian manifold with the property that for some $\ell_0 > 0$, all closed geodesics $\gamma$ of length $\ell(\gamma) \leq \ell_0$ are noncontractible and nondegenerate and have Morse index $\morse(\gamma) \leq n - 1$. Let $x \in T^*L$ and $D$ be a symplectic divisor through $x$. For generic $J$ every simple punctured $J$-holomorphic sphere $C$ in $T^*L$ which is asymptotic at the punctures to geodesics of length $\leq \ell_0$ and which has contact order $k$ to $D$ at $x$ must have at least $k + 1$ punctures. \end{lemma} \begin{proof} Let $(\gamma_1, \ldots, \gamma_p)$ be the tuple of asymptotic Reeb orbits of $C$, which have corresponding geodesics also denoted by $(\gamma_1, \ldots, \gamma_p)$. By assumption, $\morse(\gamma_i) \leq n - 1$ for every $i = 1,\ldots,p$. Choose a trivialization $\tau$ of $C^* T T^*L$ such that the induced trivialization over the asymptotic Reeb orbits is as in \cref{thm:index of geodesic or reeb orbit isometric triv}. We show that $p \geq k + 1$. \begin{IEEEeqnarray*}{rCls+x*} 0 & \leq & \dim_{C} \mathcal{M}_{X,s}^J(\Gamma^+,\Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x} \\ & = & (n-3)(2-p) + 2 c_1^{\tau}(TX) \cdot [C] + \sum_{i=1}^{p} \conleyzehnder^{\tau}(\gamma_i) - 2n - 2k + 4 \\ & = & (n-3)(2-p) + \sum_{i=1}^{p} \morse(\gamma_i) - 2n - 2k + 4 \\ & \leq & (n-3)(2-p) + \sum_{i=1}^{p} (n-1) - 2n - 2k + 4 \\ & = & 2 (p - 1 - k). & & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma}[{\cite[Corollary 3.3]{cieliebakPuncturedHolomorphicCurves2018}}] \label{lem:punctures and tangency} Let $(L,g)$ be an $n$-dimensional Riemannian manifold with the property that for some $\ell_0 > 0$, all closed geodesics $\gamma$ of length $\ell(\gamma) \leq \ell_0$ are noncontractible and nondegenerate and have Morse index $\morse(\gamma) \leq n - 1$. Let $x \in T^*L$ and $D$ be a symplectic divisor through $x$. For generic $J$ every (not necessarily simple) punctured $J$-holomorphic sphere $\tilde{C}$ in $T^*L$ which is asymptotic at the punctures to geodesics of length $\leq \ell_0$ and which has contact order $\tilde{k}$ to $D$ at $x$ must have at least $\tilde{k} + 1$ punctures. \end{lemma} \begin{proof} Let $\tilde{z}_1,\ldots,\tilde{z}_{\tilde{p}}$ be the punctures of $\tilde{C}$. Then $\tilde{C}$ is a map $\tilde{C} \colon S^2 \setminus \{\tilde{z}_1,\ldots,\tilde{z}_{\tilde{p}}\} \longrightarrow T^*L$ which has contact order $\tilde{k}$ at $\tilde{z}_0$ to $D$, for some $\tilde{z}_0 \in S^2 \setminus \{\tilde{z}_1,\ldots,\tilde{z}_{\tilde{p}}\}$. There exists a $d$-fold branched cover $\phi \colon S^2 \longrightarrow S^2$ and a simple punctured $J$-holomorphic sphere $C$ with $p$ punctures $\{z_1,\ldots,z_p\}$ which has contact order $k$ at $z_0 = \phi(\tilde{z}_0)$ to $D$, such that the following diagram commutes: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} S^2 \setminus \{\tilde{z}_1,\ldots,\tilde{z}_{\tilde{p}}\} \ar[d, swap, "\phi"] \ar[rd, "\tilde{C}"] \\ S^2 \setminus \{z_1,\ldots,z_p\} \ar[r, swap, "C"] & T^*L \end{tikzcd} \end{IEEEeqnarray*} Define $b = \operatorname{ord}(\tilde{z}_0)$. Since the asymptotic Reeb orbits of $\tilde{C}$ are multiples of the asymptotic Reeb orbits of $C$, we have that the Reeb orbits of $C$ all have period less or equal to $\ell_0$. Therefore, applying \cref{lem:punctures and tangency simple} to $C$ we conclude that $p - 1 \geq k$. We show that $k b \geq \tilde{k}$. For this, choose holomorphic coordinates centred at $z_0 \in S^2$, $\tilde{z}_0 \in S^2$, and $x \in X$ such that $D$ is given by $h(z_1,\ldots,z_n) = 0$, where $h(z_1,\ldots,z_n) = z_1$. Then, with respect to these coordinates \begin{IEEEeqnarray*}{rCls+x*} \phi(z) & = & z^b, \\ h \circ C(z) & = & \sum_{j=1}^{+\infty} a_j z^j, \end{IEEEeqnarray*} and therefore \begin{IEEEeqnarray*}{c+x*} h \circ \tilde{C}(z) = h \circ C \circ \phi(z) = \sum_{j=1}^{+\infty} a_j z^{b j}. \end{IEEEeqnarray*} Since $\tilde{C}$ has contact order $\tilde{k}$ to $D$, \begin{IEEEeqnarray*}{c+x*} 0 = (h \circ \tilde{C})^{(r)}(0) = \sum_{j=1}^{+\infty} a_j (b j)^r z^{b j - r} \Big|_{z = 0} \end{IEEEeqnarray*} for every $r = 1,\ldots,\tilde{k}-1$. Therefore, for every $j \in \Z_{\geq 1}$ if there exists $r = 1,\ldots,\tilde{k}-1$ such that if $b j - r = 0$, then $a_j = 0$. In other words $a_1 = \cdots = a_\ell = 0$, where \begin{IEEEeqnarray*}{rCll} \ell & = & \max & \{ j \in \Z_{\geq 1} \mid b j \leq \tilde{k} - 1 \} \\ & = & \min & \{ j \in \Z_{\geq 1} \mid b (j+1) \geq \tilde{k} \}. \end{IEEEeqnarray*} So, we conclude that $b k \geq b (\ell + 1) \geq \tilde{k}$. We show that $\tilde{p} \geq (p - 2) d + b + 1$. \begin{IEEEeqnarray*}{rCls+x*} 2 d - 2 & = & \sum_{\tilde{z} \in S^2}^{} (\operatorname{ord}(\tilde{z}) - 1) & \quad [\text{by the Riemann-Hurwitz formula}] \\ & \geq & \sum_{i=1}^{\tilde{p}} (\operatorname{ord}(\tilde{z}_i) - 1) + \operatorname{ord}(\tilde{z}_0) - 1 & \quad [\text{since $\operatorname{ord}(z) \geq 1$ for every $z \in S^2$}] \\ & = & p d - \tilde{p} + \operatorname{ord}(\tilde{z}_0) - 1 & \quad [\text{since $\phi(\{\tilde{z}_1,\ldots,\tilde{z}_{\tilde{p}}\}) = \{z_1,\ldots,z_p\}$}] \\ & = & p d - \tilde{p} + b - 1 & \quad [\text{by definition of $b$}]. \end{IEEEeqnarray*} Since $\phi$ is a $d$-fold covering, $d \geq b$. Combining all the facts which we have proven, we conclude that \begin{IEEEeqnarray*}{rCls+x*} \tilde{p} & \geq & (p-2)d + b + 1 & \quad [\text{by the last computation}] \\ & \geq & (k-1)d + b + 1 & \quad [\text{since $p - 1 \geq k$}] \\ & \geq & k b + 1 & \quad [\text{since $d \geq b$}] \\ & \geq & \tilde{k} + 1 & \quad [\text{since $k b \geq \tilde{k}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \section{SFT compactness} \label{sec:sft compactness} In this section we present the SFT compactness theorem, which describes the compactifications of the moduli spaces of the previous section. This theorem was first proven by Bourgeois--Eliashberg--Hofer--Wysocki--Zehnder \cite{bourgeoisCompactnessResultsSymplectic2003}. Cieliebak--Mohnke \cite{cieliebakCompactnessPuncturedHolomorphic2005} have given a proof of this theorem using different methods. Our presentation is based primarily on \cite{cieliebakPuncturedHolomorphicCurves2018} and \cite{mcduffSymplecticCapacitiesUnperturbed2022}. \begin{definition} \label{def:nodal riemann surface} A \textbf{nodal Riemann surface} is a Riemann surface $(\Sigma, j)$ together with a set $\mathbf{n}$ of \textbf{nodes} of the form $\mathbf{n} = \{n_1^+, n_1^-, \ldots, n_k^+, n_k^-\}$. \end{definition} \begin{definition} \label{def:nodal holomorphic curve} Let $(\Sigma, j)$ be a Riemann surface with a set $\mathbf{n} = \{n_1^+, n_1^-, \ldots, n_k^+, n_k^-\}$ of nodes and $(X, J)$ be an almost complex manifold. A \textbf{nodal $J$-holomorphic curve} is a $J$-holomorphic curve $u \colon (\Sigma, j) \longrightarrow (X, J)$ such that $u(n^+_i) = u(n^-_i)$ for every $i = 1, \ldots, k$.\end{definition} Let $(X, \omega, \lambda)$ be a symplectic cobordism and choose almost complex structures $J^{\pm} \in \mathcal{J}(\partial^{\pm} X)$ and $J \in \mathcal{J}^{J^+}_{J^-}(X)$. Let $\Gamma^{\pm} = (\gamma^{\pm}_1, \ldots, \gamma^{\pm}_{p ^{\pm}})$ be a tuple of Reeb orbits in $\partial^{\pm} X$. \begin{definition} \label{def:sft compactification} For $1 \leq L \leq N$, let $\alpha^{\pm} \coloneqq \lambda|_{\partial^{\pm} X}$ and define \begin{IEEEeqnarray*}{rCl} (X^{\nu}, \omega^\nu, \tilde{\omega}^{\nu}, J^{\nu}) & \coloneqq & \begin{cases} (\R \times \partial^- X, \edv(e^r \alpha^-), \edv \alpha^- , J^-) & \text{if } \nu = 1 , \ldots, L - 1, \\ (\hat{X} , \hat{\omega} , \tilde{\omega} , J ) & \text{if } \nu = L , \\ (\R \times \partial^+ X, \edv(e^r \alpha^+), \edv \alpha^+ , J^+) & \text{if } \nu = L+1 ,\ldots ,N , \end{cases} \\ (X^*, \omega^*, \tilde{\omega}^*, J^*) & \coloneqq & \bigcoproduct_{\nu = 1}^N (X^{\nu}, \omega^\nu, \tilde{\omega}^{\nu}, J^{\nu}). \end{IEEEeqnarray*} The moduli space of \textbf{holomorphic buildings}, denoted $\overline{\mathcal{M}}^{J}_X(\Gamma^+, \Gamma^-)$, is the set of tuples $F = (F^1, \ldots, F^N)$, where $F^{\nu} \colon \dot{\Sigma}^\nu \longrightarrow X^\nu$ is an asymptotically cylindrical nodal $J^{\nu}$-holomorphic curve in $X^{\nu}$ with sets of asymptotic Reeb orbits $\Gamma^{\pm}_{\nu}$. Here, each $F^{\nu}$ is possibly disconnected and if $X^{\nu}$ is a symplectization then $F^{\nu}$ is only defined up to translation in the $\R$ direction. We assume in addition that $F$ satisfies the following conditions. \begin{enumerate} \item The sets of asymptotic Reeb orbits $\Gamma_{\nu}^{\pm}$ are such that \begin{IEEEeqnarray*}{rCls+x*} \Gamma^+_{\nu} & = & \Gamma^-_{\nu + 1} \quad \text{for every } \nu = 1, \ldots, N - 1, \\ \Gamma^-_1 & = & \Gamma^-, \\ \Gamma^+_N & = & \Gamma^+. \end{IEEEeqnarray*} \item Define the graph of $F$ to be the graph whose vertices are the components of $F^1, \ldots, F^N$ and whose edges are determined by the asymptotic Reeb orbits. Then the graph of $F$ is a tree. \item The building $F$ has no symplectization levels consisting entirely of trivial cylinders, and any constant component of $F$ has negative Euler characteristic after removing all special points. \end{enumerate} \end{definition} \begin{definition} The \textbf{energy} of a holomorphic building $F = (F^1, \ldots, F^N)$ is \begin{IEEEeqnarray*}{c+x*} E_{\tilde{\omega}^*}(F) \coloneqq \sum_{\nu = 1}^{N} E_{\tilde{\omega}^{\nu}}(F^{\nu}), \end{IEEEeqnarray*} where $E_{\tilde{\omega}^{\nu}}(F^{\nu})$ is given as in \cref{def:energy of a asy cylindrical holomorphic curve}. \end{definition} The moduli space $\overline{\mathcal{M}}_X^J(\Gamma^+, \Gamma^-)$ admits a metrizable topology (see \cite[Appendix B]{bourgeoisEquivariantSymplecticHomology2016}). With this language, the SFT compactness theorem can be stated as follows. \begin{theorem}[SFT compactness] The moduli space $\overline{\mathcal{M}}_X^J(\Gamma^+, \Gamma^-)$ is compact.\end{theorem} We now consider the case where the almost complex structure on $\hat{X}$ is replaced by a family of almost complex structures obtained via \textbf{neck stretching}. Let $(X^{\pm}, \omega^{\pm}, \lambda^{\pm})$ be symplectic cobordisms with common boundary \begin{IEEEeqnarray*}{c+x*} (M, \alpha) = (\partial^- X^{+}, \lambda^+|_{\partial^- X^+}) = (\partial^+ X^-, \lambda^-|_{\partial^+ X^-}). \end{IEEEeqnarray*} Choose almost complex structures \begin{IEEEeqnarray*}{rCls+x*} J_M & \in & \mathcal{J}(M), \\ J_+ & \in & \mathcal{J}_{J_M}(X^+), \\ J_- & \in & \mathcal{J}^{J_M}(X^-), \end{IEEEeqnarray*} and denote by $J_{\partial^{\pm} X^{\pm}} \in \mathcal{J}(\partial^{\pm} X^{\pm})$ the induced cylindrical almost complex structure on $\R \times \partial^{\pm} X^{\pm}$. Let $(X, \omega, \lambda) \coloneqq (X^-, \omega^-, \lambda^-) \circledcirc (X^+, \omega^+, \lambda^+)$ be the gluing of $X^-$ and $X^+$ along $M$. We wish to define a family of almost complex structures $(J_t)_{t \in \R_{\geq 0}} \subset \mathcal{J}(X)$. For every $t \geq 0$, let \begin{IEEEeqnarray*}{c+x*} X_t \coloneqq X^- \cup_M [-t, 0] \times M \cup_M X^+. \end{IEEEeqnarray*} There exists a canonical diffeomorphism $\phi_t \colon X \longrightarrow X_t$. Define an almost complex structure $J_t$ on $X_t$ by \begin{IEEEeqnarray*}{c+x*} J_t \coloneqq \begin{cases} J^{\pm} & \text{on } X^{\pm}, \\ J_M & \text{on } [-t, 0] \times M. \end{cases} \end{IEEEeqnarray*} Denote also by $J_t$ the pullback of $J_t$ to ${X}$, as well as the induced almost complex structure on the completion $\hat{X}$. Finally, consider the moduli space \begin{IEEEeqnarray*}{c+x*} \mathcal{M}_X^{(J_t)_t}(\Gamma^+, \Gamma^-) \coloneqq \bigcoproduct_{t \in \R_{\geq 0}} \mathcal{M}^{J_t}_{X}(\Gamma^+, \Gamma^-). \end{IEEEeqnarray*} \begin{definition} \phantomsection\label{def:sft compactification neck stretching} For $1 \leq L^- < L^+ \leq N$, let $\alpha^{\pm} \coloneqq \lambda^{\pm}|_{\partial^{\pm} X^\pm}$ and define \begin{IEEEeqnarray*}{rCls+x*} (X^{\nu}, \omega^\nu, \tilde{\omega}^{\nu}, J^{\nu}) & \coloneqq & \begin{cases} (\R \times \partial^- X^-, \edv(e^r \alpha^-) , \edv \alpha^- , J_{\partial^- X^-}) & \text{if } \nu = 1 , \ldots, L^- - 1, \\ (X^- , \omega^- , \tilde{\omega}^-, J^-) & \text{if } \nu = L^-, \\ (\R \times M , \edv(e^r \alpha) , \edv \alpha , J_M) & \text{if } \nu = L^- + 1 , \ldots, L^+ - 1, \\ (X^+ , \omega^+ , \tilde{\omega}^+, J^+) & \text{if } \nu = L^+, \\ (\R \times \partial^+ X^+, \edv (e^r \alpha^+) , \edv \alpha^+ , J_{\partial^+ X^+}) & \text{if } \nu = L^+ + 1 , \ldots, N , \\ \end{cases} \\ (X^*, \omega^*, \tilde{\omega}^*, J^*) & \coloneqq & \bigcoproduct_{\nu = 1}^N (X^{\nu}, \omega^\nu, \tilde{\omega}^{\nu}, J^{\nu}). \end{IEEEeqnarray*} Define $\overline{\mathcal{M}}^{(J_t)_t}_X(\Gamma^+, \Gamma^-)$ to be the set of tuples $F = (F^1, \ldots, F^N)$, where $F^{\nu} \colon \dot{\Sigma}^\nu \longrightarrow X^\nu$ is an asymptotically cylindrical nodal $J^{\nu}$-holomorphic curve in $X^{\nu}$ with sets of asymptotic Reeb orbits $\Gamma^{\pm}_{\nu}$, such that $F$ satisfies conditions analogous to those of \cref{def:sft compactification}. \end{definition} \begin{theorem}[SFT compactness] The moduli space $\overline{\mathcal{M}}^{(J_t)_t}_X(\Gamma^+, \Gamma^-)$ is compact.\end{theorem} \begin{remark} \label{rmk:compactifications with tangency} The discussion above also applies to compactifications of moduli spaces of curves satisfying tangency constraints. The compactification $\overline{\mathcal{M}}^{J}_{X}(\Gamma^+,\Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x}$ consists of buildings $F = (F^1, \ldots, F^N) \in \overline{\mathcal{M}}^J_X(\Gamma^+, \Gamma^-)$ such that exactly one component $C$ of $F$ inherits the tangency constraint $\p{<}{}{\mathcal{T}^{(k)}x}$, and which satisfy the following additional condition. Consider the graph obtained from the graph of $F$ by collapsing adjacent constant components to a point. Let $C_1, \ldots, C_p$ be the (necessarily nonconstant) components of $F$ which are adjacent to $C$ in the new graph. Then we require that there exist $k_1, \ldots, k_p \in \Z_{\geq 1}$ such that $k_1 + \cdots + k_p \geq k$ and $C_i$ satisfies the constraint $\p{<}{}{\mathcal{T}^{(k_i)}x}$ for every $i = 1, \ldots, p$. This definition is natural to consider by \cite[Lemma 7.2]{cieliebakSymplecticHypersurfacesTransversality2007}. We can define $\overline{\mathcal{M}}^{(J_t)_t}_X(\Gamma^+, \Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x}$ analogously. \end{remark} \begin{remark} We point out that in \cite[Definition 2.2.1]{mcduffSymplecticCapacitiesUnperturbed2022}, the compactification of \cref{rmk:compactifications with tangency} is denoted by $\overline{\overline{\mathcal{M}}}^{J}_{X}(\Gamma^+,\Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x}$, while the notation $\overline{\mathcal{M}}^{J}_{X}(\Gamma^+,\Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x}$ is used to denote the moduli space of buildings $F = (F^1, \ldots, F^N) \in \overline{\mathcal{M}}^J_X(\Gamma^+, \Gamma^-)$ such that exactly one component $C$ of $F$ inherits the tangency constraint $\p{<}{}{\mathcal{T}^{(k)}x}$, but which do not necessarily satisfy the additional condition of \cref{rmk:compactifications with tangency}. \end{remark} \begin{lemma} \label{lem:no nodes} Suppose that $\Gamma^- = \varnothing$ and $\Gamma^+ = (\gamma)$ consists of a single Reeb orbit. Let $F$ be a holomorphic building of genus $0$ in any of the following compactified moduli spaces: \begin{IEEEeqnarray*}{lCl} \overline{\mathcal{M}}^J_X(\gamma), & \quad & \overline{\mathcal{M}}^J_X(\gamma)\p{<}{}{\mathcal{T}^{(k)}x}, \\ \overline{\mathcal{M}}^{(J_t)_t}_X(\gamma), & \quad & \overline{\mathcal{M}}^{(J_t)_t}_X(\gamma)\p{<}{}{\mathcal{T}^{(k)}x}. \end{IEEEeqnarray*} Then $F$ has no nodes. \end{lemma} \begin{proof} Assume by contradiction that $F$ has a node. Let $\overline{\Sigma}$ be the topological space obtained by gluing the $\Sigma^{\nu}$ along the matching punctures. Let $\overline{X}$ be the topological space obtained by gluing the $X^{\nu}$ along the matching ends. The space $\overline{X}$ is homeomorphic to $\hat{X}$, and therefore we can identify homology classes in $\overline{X}$ and $\hat{X}$. The holomorphic building $F$ defines a continuous map $\overline{F} \colon \overline{\Sigma} \longrightarrow \overline{X}$ (for more details on the definitions of $\overline{F} \colon \overline{\Sigma} \longrightarrow \overline{X}$, see \cite[Section 2.6]{cieliebakPuncturedHolomorphicCurves2018}). By the assumptions on $F$ and since $F$ has a node, it is possible to decompose $\overline{F}$ along the node into two continuous maps \begin{IEEEeqnarray*}{rCls+x*} \overline{F}_0 \colon \overline{\Sigma}_0 & \longrightarrow & \overline{X}, \\ \overline{F}_1 \colon \overline{\Sigma}_1 & \longrightarrow & \overline{X}, \end{IEEEeqnarray*} where $\overline{F}_0$ is a plane and $\overline{F}_1$ is a sphere. Since $\overline{F}_1$ is a sphere, it defines a homology class $[\overline{F}_1] \in H_2(\hat{X}; \Z)$. Then, \begin{IEEEeqnarray*}{rCls+x*} 0 & = & \edv \hat{\lambda}([\overline{F}_1]) & \quad [\text{since $\edv \hat{\lambda} = 0 \in H^2_{\mathrm{dR}}(\hat{X})$}] \\ & > & 0 & \quad [\text{by \cite[Lemma 2.8]{cieliebakPuncturedHolomorphicCurves2018}}], \end{IEEEeqnarray*} which gives the desired contradiction. \end{proof} \section{Solutions of the parametrized Floer equation} \label{sec:floer trajectories} The goal of this section is to introduce the trajectories that appear in $S^1$-equivariant symplectic homology (see \cref{def:floer trajectory abstract}). We will write these trajectories as maps whose domain is any punctured Riemann surface, but we point out that in \cref{chp:floer}, where we discuss $S^1$-equivariant symplectic homology, all trajectories have as domain the cylinder $\R \times S^1$. Let $(\Sigma, j)$ be a Riemann surface with punctures \begin{IEEEeqnarray*}{c+x*} \mathbf{z} = \mathbf{z}^+ \cup \mathbf{z}^-, \qquad \mathbf{z}^{\pm} = \{z^{\pm}_1, \ldots, z^{\pm}_{p^{\pm}}\}. \end{IEEEeqnarray*} We assume that near every puncture $z$, there are cylindrical coordinates $(s,t)$ as in \cref{def:punctures asy markers cyl ends}. Let $\sigma, \tau \in \Omega^1(\dot{\Sigma})$ be $1$-forms such that for every (positive or negative) puncture $z$, if we denote by $(s,t)$ the coordinates on the cylindrical end of $\dot{\Sigma}$ near $z$, then\begin{IEEEeqnarray*}{rCls+x*} \sigma & = & A \, \edv s, \\ \tau & = & B \, \edv t, \end{IEEEeqnarray*} for some $A, B > 0$. Finally, we assume that there is an action \begin{IEEEeqnarray*}{c+x*} S^1 \times \dot{\Sigma} \longrightarrow \dot{\Sigma} \end{IEEEeqnarray*} of $S^1$ on $\dot{\Sigma}$ which preserves $j$, $\sigma$ and $\tau$ and such that if $t' \in S^1$ and $(s,t)$ belongs to any cylindrical coordinate neighbourhood, then \begin{IEEEeqnarray*}{c+x*} t' \cdot (s, t) = (s, t + t'). \end{IEEEeqnarray*} \begin{example} \label{exa:sphere and cylinder} Consider the cylinder $\R \times S^1$ with coordinates $(s,t)$ and almost complex structure given by $j(\partial_s) = \partial_t$. We have the $1$-forms $\sigma \coloneqq \edv s$ and $\tau \coloneqq \edv t$. The cylinder is biholomorphic to the sphere $S^2$ with the north and south poles removed. There is an action of $S^1$ on $\R \times S^1$ given by $t' \cdot (s,t) = (s,t + t')$. Therefore, $\R \times S^1$ can be seen as a special case of the assumptions above. In this case, we will typically denote $\dot{\Sigma} = \R \times S^1$ and $\Sigma = S^2$. \end{example} Let $(S,g^S)$ be a Riemannian manifold together with an action $S^1 \times S \longrightarrow S$ which is free, proper and by isometries. Define $C = S / S^1$ and denote the projection by $\pi \colon S \longrightarrow C$. Since the action is by isometries, there exists a unique Riemannian metric $g^C$ on $C$ such that $\pi \colon S \longrightarrow C$ is a Riemannian submersion. Let $f \colon C \longrightarrow \R$ be a Morse function and define $\tilde{f} \coloneqq f \circ \pi \colon S \longrightarrow \R$, which is Morse--Bott. \begin{example} For $N \in \Z_{\geq 1}$, let \begin{IEEEeqnarray*}{rCls+x*} S & \coloneqq & S^{2N+1}, \\ C & \coloneqq & \C P^N, \\ f & \coloneqq & f_N, \end{IEEEeqnarray*} where \begin{IEEEeqnarray*}{c+x*} f_N([w_0:\cdots:w_N]) \coloneqq \frac{ \sum_{j=0}^{N} j |w_j|^2 }{ \sum_{j=0}^{N} |w_j|^2 }. \end{IEEEeqnarray*} As we will discuss in \cref{sec:action functional}, $S$, $C$ and $f$ given above are as in the previous paragraph. \end{example} Finally, let $(X,\lambda)$ be a Liouville domain. \begin{definition} \label{def:admissible hamiltonian abstract} An \textbf{admissible Hamiltonian} is a map $H \colon \dot{\Sigma} \times S \times \hat{X} \longrightarrow \R$ such that: \begin{enumerate} \item \label{def:admissible hamiltonian abstract 1} For every puncture $z$, the restriction of $H$ to the cylindrical end near $z$ is independent of $s$ for $s$ large enough. In other words, there is a map $H_z \colon S^1 \times S \times \hat{X} \longrightarrow \R$ such that $H(s,t,w,x) = H_z(t,w,x)$ for $s$ large enough. \item \label{def:admissible hamiltonian abstract 2} For every critical point $w$ of $\tilde{f}$, there exists a neighbourhood $V$ of $w$ in $S$ such that the restriction $H \colon \dot{\Sigma} \times V \times \hat{X} \longrightarrow \R$ is independent of $V$. \item Consider the action of $S^1$ on $\dot{\Sigma} \times S \times \hat{X}$ given by $t \cdot (z, w, x) = (t \cdot z, t \cdot w, x)$. Then, the Hamiltonian $H$ is invariant under the action of $S^1$. \item For every puncture $z$, there exist $D \in \R$, $C \in \R_{> 0} \setminus \operatorname{Spec}(\partial X, \lambda|_{\partial X})$ and $\delta > 0$ such that on $S^1 \times S \times [\delta,+\infty) \times \partial X$, we have that $H_z(t,w,r,x) = C e^r + D$. \item For every puncture $z$ and critical point $w$ of $\tilde{f}$ the Hamiltonian $H_{z,w} \colon S^1 \times \hat{X} \longrightarrow \R$ is nondegenerate. \item \label{def:admissible hamiltonian abstract 3} For every $(z,w,x) \in \dot{\Sigma} \times S \times \hat{X}$ we have \begin{IEEEeqnarray*}{rCls+x*} H_{w,x} \, \edv \tau & \leq & 0, \\ \edv_{\dot{\Sigma}} H_{w,x} \wedge \tau & \leq & 0, \\ \p{<}{}{ \nabla_S H_{z,x}(w), \nabla \tilde{f} (w) } \, \sigma_z \wedge \tau_z & \leq & 0. \end{IEEEeqnarray*} \end{enumerate} \end{definition} \begin{definition} \label{def:admissible acs abstract} An \textbf{admissible almost complex structure} on $\hat{X}$ is a section $J \colon \dot{\Sigma} \times S \times \hat{X} \longrightarrow \End(T \hat{X})$ such that $J^2 = - \id_{TX}$ and: \begin{enumerate} \item \label{def:admissible acs abstract 1} For every puncture $z$, the restriction of $J$ to the cylindrical end near $z$ is independent of $s$ for $s$ large enough. In other words, there is a function $J_z \colon S^1 \times S \times \hat{X} \longrightarrow \End(T \hat{X})$ such that $J(s,t,w,x) = J_z(t,w,x)$ for $s$ large enough. \item \label{def:admissible acs abstract 2} For every critical point $w$ of $\tilde{f}$, there exists a neighbourhood $V$ of $w$ in $S$ such that the restriction $J \colon \dot{\Sigma} \times V \times \hat{X} \longrightarrow \End(T \hat{X})$ is independent of $V$. \item The almost complex structure $J$ is $S^1$-invariant. \item $J$ is \textbf{compatible}, i.e. $g \coloneqq \omega(\cdot, J \cdot) \colon \dot{\Sigma} \times S \times \hat{X} \longrightarrow T^* \hat{X} \otimes T^* \hat{X}$ is a Riemannian metric on $X$ parametrized by $\dot{\Sigma} \times S$. \item $J$ is \textbf{cylindrical}, i.e. if $(z,w) \in \dot{\Sigma} \times S$ then $J_{z,w}$ is cylindrical on $\R_{\geq 0} \times \partial X$. \end{enumerate} \end{definition} \begin{definition} \label{def:floer trajectory abstract} Let $w \colon \dot{\Sigma} \longrightarrow S$ and $u \colon \dot{\Sigma} \longrightarrow \hat{X}$ be maps. We will denote by $\mathbf{u}$ the map $\mathbf{u} \coloneqq (\id_{\dot{\Sigma}}, w, u) \colon \dot{\Sigma} \longrightarrow \dot{\Sigma} \times S \times \hat{X}$. We say that $(w,u)$ is a solution of the \textbf{parametrized Floer equation} if \begin{IEEEeqnarray}{rCls+x*} \dv w - \nabla \tilde{f} (w) \otimes \sigma & = & 0, \phantomsection\label{eq:parametrized floer equation 1} \\ (\dv u - X_H(\mathbf{u}) \otimes \tau)^{0,1}_{J(\mathbf{u}), j} & = & 0. \phantomsection\label{eq:parametrized floer equation 2} \end{IEEEeqnarray} \end{definition} \begin{example} Suppose that $(\dot{\Sigma}, j, \sigma, \tau) = (\R \times S^1, j, \edv s, \edv t)$ is the cylinder from \cref{exa:sphere and cylinder}. Then, $(w,u)$ is a solution of the parametrized Floer equation if and only if $w \colon \R \times S^1 \longrightarrow S$ is independent of $t \in S^1$, thus defining a map $w \colon \R \longrightarrow S$, and \begin{IEEEeqnarray*}{rCls+x*} \pdv{w}{s}(s) & = & \nabla \tilde{f}(w(s)), \\ \pdv{u}{s}(s,t) & = & - J(s, t, w(s), u(s,t)) \p{}{2}{ \pdv{u}{t}(s,t) - X_{H}(s, t,w(s),u(s,t)) }. \end{IEEEeqnarray*} \end{example} \begin{definition} \label{def:1 periodic orbit abstract} Let $z$ be a puncture and $B > 0$ be such that $\tau = B \, \edv t$, where $(s,t)$ are the cylindrical coordinates near $z$. A \textbf{$1$-periodic orbit} of $H$ at $z$ is a pair $(w ,\gamma)$ such that $w \in S$ is a critical point of $\tilde{f}$ and $\gamma$ is a $1$-periodic orbit of $H_{z,w} \colon S^1 \times \hat{X} \longrightarrow \R$. Denote by $\mathcal{P}(H,z)$ the set of such pairs. The \textbf{action} of $(w, \gamma)$ is \begin{IEEEeqnarray*}{c+x*} \mathcal{A}_{H}(w,\gamma) \coloneqq \mathcal{A}_{B H_{z,w}}(\gamma) = \int_{S^1}^{} \gamma^* \hat{\lambda} - B \int_{S^1}^{} H_{z,w} (t, \gamma(t)) \edv t. \end{IEEEeqnarray*} \end{definition} \begin{definition} \label{def:asymptotic} Let $(w,u)$ be a solution of the parametrized Floer equation. We say that $(w,u)$ is \textbf{asymptotic} at $z^{\pm}_i$ to $(w^{\pm}_i, \gamma^{\pm}_i) \in \mathcal{P}(H, z^{\pm}_i)$ if \begin{IEEEeqnarray*}{rCls+x*} \lim_{s \to \pm \infty} w(s) & = & w^{\pm}_i, \\ \lim_{s \to \pm \infty} u(s,t) & = & \gamma^{\pm}_i, \end{IEEEeqnarray*} where $(s,t)$ are the cylindrical coordinates near $z^{\pm}_i$. \end{definition} \begin{definition} \label{def:energy of floer trajectory} The \textbf{energy} of $(w,u)$ is \begin{IEEEeqnarray*}{c+x*} E(u) \coloneqq \frac{1}{2} \int_{\dot{\Sigma}}^{} \| \dv u - X_H(\mathbf{u}) \otimes \tau \|^2_{J(\mathbf{u}), \hat{\omega}} \, \omega_{\Sigma}. \end{IEEEeqnarray*} \end{definition} We will now state the analytical results about solutions of the parametrized Floer equation. Some results we will state are analogous to previous results about solutions of a pseudoholomorphic curve equation. Namely, in \cref{lem:action energy for floer trajectories} we compare the energy of a solution with the action at the asymptotes, and in \cref{lem:maximum principle} we show that solutions satisfy a maximum principle. \begin{lemma} \phantomsection\label{lem:action energy for floer trajectories} If $(w,u)$ is a solution of the parametrized Floer equation which is asymptotic at $z^{\pm}_i$ to $(w^{\pm}_i, \gamma^{\pm}_i) \in \mathcal{P}(H, z^{\pm}_i)$, then \begin{IEEEeqnarray*}{c+x*} 0 \leq E(u) \leq \sum_{i=1}^{p^+} \mathcal{A}_H(w^+_i, \gamma^+_i) - \sum_{i=1}^{p^-} \mathcal{A}_H(w^-_i, \gamma^-_i). \end{IEEEeqnarray*} \end{lemma} \begin{proof} We show that $1/2 \| \dv u - X_H(\mathbf{u}) \otimes \tau \|^{2}_{J(\mathbf{u}),j} \, \omega_{\dot{\Sigma}} = u^* \hat{\omega} - u^* \edv_{\hat{X}} H(\mathbf{u}) \wedge \tau$. \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\frac{1}{2} \| \dv u - X_H(\mathbf{u}) \otimes \tau \|^{2}_{J(\mathbf{u}), \hat{\omega}} \, \omega_{\dot{\Sigma}}(S, T)}\\ \quad & = & (\dv u - X_H(\mathbf{u}) \otimes \tau)^* \hat{\omega}(S, T) \\ & = & \hat{\omega}(\dv u (S) - X_{H}(\mathbf{u}) \tau(S), \dv u (T) - X_{H}(\mathbf{u}) \tau(T)) \\ & = & \hat{\omega} (\dv u (S), \dv u (T)) - \hat{\omega} (\dv u (S), X_{H}(\mathbf{u})) \tau(T) - \hat{\omega} (X_{H}(\mathbf{u}), \dv u (T)) \tau(S) \\ & = & u^* \hat{\omega} (S,T) + u^* \iota_{X_H(\mathbf{u})} \hat{\omega} \wedge \tau (S,T) \\ \quad & = & u^* \hat{\omega} (S,T) - u^* \edv_{\hat{X}} H(\mathbf{u}) \wedge \tau (S,T), \end{IEEEeqnarray*} Where in the first equality we used \cref{lem:integrand of energy is well-defined} and the fact that $\dv u - X_H(\mathbf{u}) \otimes \tau$ is holomorphic, and in the last equality we used the definition of Hamiltonian vector field. We show that $u^* \hat{\omega} - u^* \edv_{\hat{X}} H (\mathbf{u}) \wedge \tau \leq u^* \hat{\omega} - \edv(\mathbf{u}^* H \wedge \tau)$. \begin{IEEEeqnarray*}{rCls+x*} \edv (\mathbf{u}^* H \wedge \tau) & = & \mathbf{u}^* H \wedge \edv \tau + \mathbf{u}^* \edv H \wedge \tau \\ & = & \mathbf{u}^* H \wedge \edv \tau + \edv_{\dot{\Sigma}} H (\mathbf{u}) \wedge \tau + w^* \edv_S H(\mathbf{u}) \wedge \tau + u^* \edv_{\hat{X}} H(\mathbf{u}) \wedge \tau \\ & = & \mathbf{u}^* H \wedge \edv \tau + \edv_{\dot{\Sigma}} H (\mathbf{u}) \wedge \tau + \p{<}{}{\nabla_S H(\mathbf{u}), \nabla \tilde{f}(w)} \, \sigma \wedge \tau + u^* \edv_{\hat{X}} H(\mathbf{u}) \wedge \tau \\ & \leq & u^* \edv_{\hat{X}} H (\mathbf{u}) \wedge \tau \end{IEEEeqnarray*} Here, in the third equality we used Equation \eqref{eq:parametrized floer equation 1} and in the last line of the computation we used the fact that $H$ is admissible. Combining these results, \begin{IEEEeqnarray*}{rCls+x*} 0 & \leq & E(u) \\ & \leq & \int_{\dot{\Sigma}}^{} u^* \edv \hat{\lambda} - \int_{\dot{\Sigma}}^{} \edv (\mathbf{u}^* H \wedge \tau) \\ & = & \sum_{i=1}^{p^+} \mathcal{A}_H(w^+_i, \gamma^+_i) - \sum_{i=1}^{p^-} \mathcal{A}_H(w^-_i, \gamma^-_i), \end{IEEEeqnarray*} where in the last line we used Stokes' theorem. \end{proof} \begin{lemma} \label{lem:floer eq proj} Suppose that $(M, \alpha)$ is a contact manifold, $H \colon \dot{\Sigma} \times S \times \R \times M \longrightarrow \R$ is a Hamiltonian which is independent of $M$ and $J \colon \dot{\Sigma} \times S \times \R \times M \longrightarrow \End(T(\R \times M))$ is a cylindrical almost complex structure. If \begin{IEEEeqnarray*}{c+x*} \mathbf{u} = (\id_{\dot{\Sigma}}, w, u) = (\id_{\dot{\Sigma}}, w, (a, f)) \colon \dot{\Sigma} \longrightarrow \dot{\Sigma} \times S \times \R \times M \end{IEEEeqnarray*} is a solution of the parametrized Floer equation, then $f^* \edv \alpha \geq 0$ and \begin{IEEEeqnarray}{rCls+x*} - \edv a \circ j & = & f^* \alpha - \alpha(X_H(\mathbf{u})) \tau \plabel{eq:floer eq proj 1} \\ \pi_{\xi} \circ \dv f \circ j & = & J_{\xi}(\mathbf{u}) \circ \pi_{\xi} \circ \dv f. \plabel{eq:floer eq proj 2} \end{IEEEeqnarray} \end{lemma} \begin{proof} We prove equation \eqref{eq:floer eq proj 1}: \begin{IEEEeqnarray*}{rCls+x*} - \edv a \circ j & = & - \edv r \circ \dv u \circ j & \quad [\text{by definition of $a$}] \\ & = & - \edv r \circ (\dv u - X_H(\mathbf{u}) \tensorpr \tau) \circ j & \quad [\text{$H$ is independent of $M$}] \\ & = & - \edv r \circ J(\mathbf{u}) \circ (\dv u - X_H(\mathbf{u}) \tensorpr \tau) & \quad [\text{$\dv u - X_H(\mathbf{u}) \tensorpr \tau$ is holomorphic}] \\ & = & \alpha \circ (\dv u - X_H(\mathbf{u}) \tensorpr \tau) & \quad [\text{by \cref{lem:J cylindrical forms}}] \\ & = & f^* \alpha - \alpha(X_H(\mathbf{u})) \tau & \quad [\text{by definition of pullback}]. \end{IEEEeqnarray*} Equation \eqref{eq:floer eq proj 2} follows by applying $\pi_{\xi} \colon T(\R \times M) \longrightarrow \xi$ to $(\dv u - X_H(\mathbf{u}) \tensorpr \tau)^{0,1}_{J(\mathbf{u}),j} = 0$. The proof of $f^* \edv \alpha \geq 0$ is equal to the one presented in \cref{lem:holomorphic curves in symplectizations}. \end{proof} The following is an adaptation to solutions of the parametrized Floer equation of the maximum principle from \cref{thm:maximum principle holomorphic}. Other authors have proven similar results about solutions of a Floer equation satisfying a maximum principle, namely Viterbo \cite[Lemma 1.8]{viterboFunctorsComputationsFloer1999}, Oancea \cite[Lemma 1.5]{oanceaSurveyFloerHomology2004}, Seidel \cite[Section 3]{seidelBiasedViewSymplectic2008} and Ritter \cite[Lemma D.1]{ritterTopologicalQuantumField2013}. \begin{lemma}[maximum principle] \label{lem:maximum principle} Under the assumptions of \cref{lem:floer eq proj}, define \begin{IEEEeqnarray*}{rClCrCl} h \colon \dot{\Sigma} \times S \times \R & \longrightarrow & \R, & \quad & h(z,w,\rho) & = & H(z,w,\ln(\rho)), \\ \rho \colon \dot{\Sigma} & \longrightarrow & \R, & \quad & \rho & = & \exp \circ a. \end{IEEEeqnarray*} If \begin{IEEEeqnarray}{rCl} \partial_{\rho} h(z,w,\rho) \, \edv \tau & \leq & 0, \plabel{eq:maximum principle 1} \\ \edv_{\dot{\Sigma}} (\partial_{\rho} h(z,w,\rho)) \wedge \tau & \leq & 0, \plabel{eq:maximum principle 2} \\ \p{<}{}{\nabla_{S} \partial_{\rho} h(z,w,\rho), \nabla \tilde{f} (w) } \, \sigma \wedge \tau & \leq & 0, \plabel{eq:maximum principle 3} \end{IEEEeqnarray} and $a \colon \dot{\Sigma} \longrightarrow \R$ has a local maximum then $a$ is constant. \end{lemma} \begin{proof} Choose a symplectic structure $\omega_{\dot{\Sigma}}$ on $\dot{\Sigma}$ such that $g_{\dot{\Sigma}} \coloneqq \omega_{\dot{\Sigma}}(\cdot, j \cdot)$ is a Riemannian metric. Define $L \colon C^{\infty}(\dot{\Sigma}, \R) \longrightarrow C^{\infty}(\dot{\Sigma}, \R)$ by \begin{IEEEeqnarray*}{c+x*} L \nu = - \Delta \nu - \rho \, \partial^2_{\rho} h (z,w,\rho) \frac{\edv \nu \wedge \tau}{\omega_{\dot{\Sigma}}}, \end{IEEEeqnarray*} for every $\nu \in C^{\infty}(\dot{\Sigma}, \R)$. The map $L$ is a linear elliptic partial differential operator (as in \cite[p.~312]{evansPartialDifferentialEquations2010}). We wish to show that $L \rho \leq 0$. For this, we start by computing $\Delta \rho \, \omega_{\dot{\Sigma}}$. \begin{IEEEeqnarray*}{rCls+x*} - \Delta \rho \, \omega_{\dot{\Sigma}} & = & \edv (\edv \rho \circ j) & \quad [\text{by \cref{lem:laplacian}}] \\ & = & - \edv (u^*(e^r \alpha) - \rho \, \alpha(X_H(\mathbf{u})) \, \tau) & \quad [\text{by \cref{lem:floer eq proj}}] \\ & = & - u^* \edv (e^r \alpha) + \edv (\rho \, \partial_{\rho} h (z,w,\rho) \, \tau) & \quad [\text{by \cref{lem:reeb equals hamiltonian on symplectization}}] \\ & = & - u^* \edv (e^r \alpha) + \partial_{\rho} h (z,w,\rho) \, \edv \rho \wedge \tau & \quad [\text{by the Leibniz rule}] \\ & & \hphantom{- u^* \edv (e^r \alpha)} + \rho \, \edv (\partial_{\rho} h (z,w,\rho)) \wedge \tau \\ & & \hphantom{- u^* \edv (e^r \alpha)} + \rho \, \partial_{\rho} h (z,w,\rho) \, \edv \tau. \end{IEEEeqnarray*} By Equation \eqref{eq:maximum principle 1}, the last term on the right is nonnegative. We show that the sum of the first two terms on the right is nonnegative. \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{- u^* \edv (e^r \alpha) + \partial_{\rho} h (z,w,\rho) \, \edv \rho \wedge \tau}\\ \quad & = & - u^* \edv (e^r \alpha) + u^* \edv_{\R \times M} H(\mathbf{u}) \wedge \tau & \quad [\text{by definition of $h$}] \\ & = & - \frac{1}{2} \| \dv u - X_H(\mathbf{u}) \otimes \tau \|^2_{J(\mathbf{u}), \edv(e^r \alpha)} \, \omega_{\dot{\Sigma}} & \quad [\text{by the computation in \cref{lem:action energy for floer trajectories}}] \\ & \leq & 0. \end{IEEEeqnarray*} Finally, we show that $\rho \, \edv (\partial_{\rho} h (z,w,\rho)) \wedge \tau \leq \rho \, \partial^2_{\rho} h(z,w,\rho) \, \edv \rho \wedge \tau$: \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\rho \, \edv (\partial_{\rho} h (z,w,\rho)) \wedge \tau}\\ \quad & = & \rho \, \edv_{\dot{\Sigma}} \partial_{\rho} h(z,w,\rho) \wedge \tau + \rho \, \p{<}{}{\nabla_{S} \partial_{\rho} h(z,w,\rho), \nabla \tilde{f}(w)} \, \sigma \wedge \tau + \rho \, \partial^2_{\rho} h(z,w,\rho) \, \edv \rho \wedge \tau \\ & \leq & \rho \, \partial^2_{\rho} h(z,w,\rho) \, \edv \rho \wedge \tau, \end{IEEEeqnarray*} where in the last line we used Equations \eqref{eq:maximum principle 2} and \eqref{eq:maximum principle 3}. This shows that $L \rho \leq 0$. By the strong maximum principle in \cite[p.~349-350]{evansPartialDifferentialEquations2010}, if $\rho$ has a local maximum then $\rho$ is constant. Since $\rho = \exp \circ a$, the same is true for $a$. \end{proof} The next lemma is an adaptation to our setup of an argument by Bourgeois--Oancea which first appeared in \cite[p.~654-655]{bourgeoisExactSequenceContact2009}. The same argument was also used by Cieliebak--Oancea \cite[Lemma 2.3]{cieliebakSymplecticHomologyEilenberg2018} in a different setup. \begin{lemma}[asymptotic behaviour] \label{lem:asymptotic behaviour} Consider the half-cylinder $Z^{\pm}$ of \cref{def:punctures asy markers cyl ends}, with $1$-forms $\sigma \coloneqq \edv s$ and $\tau \coloneqq \edv t$. Assume the same conditions as in \cref{lem:floer eq proj}, but with $\dot{\Sigma}$ replaced by $Z^{\pm}$. Suppose that $\mathbf{u}$ is asymptotic at $\pm \infty$ to a $1$-periodic orbit $(z_{\pm}, \gamma_{\pm})$ of $H_{\pm \infty}$ of the form $\gamma_{\pm}(t) = (r_{\pm}, \rho_{\pm}(t))$, where $z_{\pm}$ is a critical point of $\tilde{f}$, $r_{\pm} \in \R$ and $\rho_{\pm} \colon S^1 \longrightarrow M$ is a periodic Reeb orbit in $M$. Define $h \colon Z^{\pm} \times S \times \R \longrightarrow \R$ by $h(s,t,z,r) = H(s,t,z,\ln(r))$ (recall that $H$ is independent of $M$). If \begin{IEEEeqnarray}{rCls+x*} \pm \del_r^2 h(s,t,z_{\pm},e^{r_{\pm}}) & < & 0 \plabel{lem:asymptotic behaviour gen 1} \\ \p{<}{}{ \nabla_S \del_r h(s, t, z_{\pm}, e^{r_{\pm}}), \nabla \tilde{f}(z_{\pm}) } & < & 0 \plabel{lem:asymptotic behaviour gen 2} \\ \del_s \del_r h(s,t,z_{\pm},e^{r_{\pm}}) & \leq & 0, \plabel{lem:asymptotic behaviour gen 3} \end{IEEEeqnarray} then either there exists $(s_0,t_0) \in Z^{\pm}$ such that $a(s_0, t_0) > r_{\pm}$ or $\mathbf{u}$ is of the form $\mathbf{u}(s,t) = (s,t, w(s), r_{\pm}, \rho_{\pm}(t))$. \end{lemma} \begin{proof} It suffices to assume that $a(s,t) \leq r_{\pm}$ for all $(s,t) \in Z^{\pm}$ and to prove that $a(s,t) = r_{\pm}$ and $f(s,t) = \rho_{\pm}(t)$ for all $(s,t) \in Z^{\pm}$. After replacing $Z^{\pm}$ by a smaller half-cylinder we may assume the following analogues of \eqref{lem:asymptotic behaviour gen 1} and \eqref{lem:asymptotic behaviour gen 2}: \begin{IEEEeqnarray}{rCls+x*} \pm \del_r^2 h(s,t,w(s),e^{a(s,t)}) & \leq & 0, \plabel{lem:asymptotic behaviour gen 1b} \\ \p{<}{}{ \nabla_S \del_r h(s, t, w(s), e^{r_{\pm}}), \nabla \tilde{f}(w(s)) } & \leq & 0. \plabel{lem:asymptotic behaviour gen 2b} \end{IEEEeqnarray} Define the average of $a$, which we denote by $\overline{a} \colon \R^{\pm}_0 \longrightarrow \R$, by \begin{IEEEeqnarray*}{c+x*} \overline{a}(s) \coloneqq \int_{0}^{1} a(s,t) \edv t. \end{IEEEeqnarray*} Then, \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\pm \del_s \overline{a}(s)}\\ \quad & = & \pm \int_{0}^{1} \del_s a(s,t) \edv t & \quad [\text{by definition of $\overline{a}$}] \\ & = & \pm \int_{0}^{1} f_s^* \alpha \mp \int_{0}^{1} \alpha(X_H(\mathbf{u}(s,t))) \edv t & \quad [\text{by \cref{lem:floer eq proj}}] \\ & = & \pm \int_{0}^{1} \rho_{\pm}^* \alpha \mp \int_{s}^{\pm \infty} \int_{0}^{1} f^* \edv \alpha \mp \int_{0}^{1} \alpha(X_H(\mathbf{u}(s,t))) \edv t & \quad [\text{by Stokes' theorem}] \\ & \leq & \pm \int_{0}^{1} \rho_{\pm}^* \alpha \mp \int_{0}^{1} \alpha(X_H(\mathbf{u}(s,t))) \edv t & \quad [\text{since $f^* \edv \alpha \geq 0$}] \\ & = & \pm \int_{0}^{1} \del_r h(\pm \infty, t, z_{\pm}, e^{r_{\pm}}) \edv t \mp \int_{0}^{1} \del_r h(s, t, w(s), e^{a(s,t)}) \edv t & \quad [\text{by \cref{lem:reeb equals hamiltonian on symplectization}}] \\ & \leq & \pm \int_{0}^{1} \del_r h(\pm \infty, t, z_{\pm}, e^{r_{\pm}}) \edv t \mp \int_{0}^{1} \del_r h(s, t, w(s), e^{r_{\pm}}) \edv t & \quad [\text{by Equation \eqref{lem:asymptotic behaviour gen 1b}}] \\ & \leq & \pm \int_{0}^{1} \del_r h(\pm \infty, t, z_{\pm}, e^{r_{\pm}}) \edv t \mp \int_{0}^{1} \del_r h(s, t, z_{\pm}, e^{r_{\pm}}) \edv t & \quad [\text{by Equation \eqref{lem:asymptotic behaviour gen 2b}}] \\ & \leq & 0 & \quad [\text{by Equation \eqref{lem:asymptotic behaviour gen 3}}]. \end{IEEEeqnarray*} Since $\pm \del_s \overline{a}(s) \leq 0$ and $\overline{a}(\pm \infty) = r_{\pm}$, we have that $\overline{a}(s) \geq r_{\pm}$ for all $s$. By assumption, $a(s,t) \leq r_{\pm}$, and therefore $a(s,t) = r_{\pm}$ for all $(s,t) \in Z^{\pm}$. This implies that every inequality in the previous computation is an equality, and in particular $f^* \edv \alpha = 0$. Therefore, $f$ is independent of $s$ and $f(s,t) = \rho_{\pm}(t)$ for all $(s,t) \in Z^{\pm}$. \end{proof} The following lemma is an adaptation of a result originally proven by Abouzaid--Seidel \cite[Lemma 7.2]{abouzaidOpenStringAnalogue2010}. Other authors have proven variations of this result, namely Ritter \cite[Lemma D.3]{ritterTopologicalQuantumField2013}, Gutt \cite[Theorem 3.1.6]{guttMinimalNumberPeriodic2014} and Cieliebak--Oancea \cite[Lemma 2.2]{cieliebakSymplecticHomologyEilenberg2018}. \begin{lemma}[no escape] \label{lem:no escape} Let $V \subset (X, \lambda)$ be a Liouville domain such that $\iota \colon V \longrightarrow (X, \lambda)$ is a strict Liouville embedding, $H \colon \dot{\Sigma} \times S \times \hat{X} \longrightarrow \R$ be an admissible Hamiltonian, $J \colon \dot{\Sigma} \times S \times \hat{X} \longrightarrow \End(T \hat{X})$ be a compatible almost complex structure and $\mathbf{u} = (\id_{\dot{\Sigma}}, w, u) \colon \dot{\Sigma} \longrightarrow \dot{\Sigma} \times S \times \hat{X}$ be a solution of the parametrized Floer equation such that all the asymptotic $1$-periodic orbits of $\mathbf{u}$ are inside $V$. Assume that there exists $\varepsilon > 0$ such that: \begin{enumerate} \item The restriction of $H$ to $\dot{\Sigma} \times S \times (-\varepsilon, \varepsilon) \times \del V$ is independent of $\del V$. \item The restriction of \parbox{\widthof{$H$}}{$J$} to $\dot{\Sigma} \times S \times (-\varepsilon, \varepsilon) \times \del V$ is cylindrical. \item If $\mathcal{A}_{H} \colon \dot{\Sigma} \times S \times (-\varepsilon,\varepsilon) \longrightarrow \R$ is given by $\mathcal{A}_H(z,w,r) \coloneqq \lambda(X_H)(z,w,r) - H(z,w,r)$, then for every $(z,w,r) \in \dot{\Sigma} \times S \times (-\varepsilon,\varepsilon)$, \begin{IEEEeqnarray*}{rCls+x*} \mathcal{A}_H(z,w,r) \, \edv \tau & \leq & 0, \plabel{eq:no escape eq 1} \\ \edv_{\dot{\Sigma}} \mathcal{A}_H(z,w,r) \wedge \tau & \leq & 0, \plabel{eq:no escape eq 2} \\ \p{<}{}{\nabla_S \mathcal{A}_H(z,w,r), \nabla \tilde{f}(w)} \, \sigma \wedge \tau & \leq & 0. \plabel{eq:no escape eq 3} \end{IEEEeqnarray*} \end{enumerate} Then, $\img u \subset V$. \end{lemma} \begin{proof} Assume by contradiction that $\img u$ is not contained in $V$. After changing $V$ to $\hat{V} \setminus \{ (r,x) \in \R \times \del V \mid r > r_0 \}$, for some $r_0 \in (-\varepsilon,\varepsilon)$, we may assume without loss of generality that $\img u$ is not contained in $V$ and that $u$ is transverse to $\del V$. Then, ${\Sigma_V} \coloneqq u ^{-1}(\hat{X} \setminus \itr V)$ is a compact surface with boundary. We show that $E({u}|_{\Sigma_V}) = 0$. \begin{IEEEeqnarray*}{rCls+x*} 0 & \leq & \frac{1}{2} \int_{\Sigma_V}^{} \| \dv u - X_{H} (\mathbf{u}) \tensorpr \tau \|^2_{J(\mathbf{u}), \edv \lambda} \, \omega _{\Sigma_V} & \quad [\text{by positivity of norms}] \\ & \leq & \int_{{\Sigma_V}} \edv (u^* \lambda - H(\mathbf{u}) \, \tau) & \quad [\text{by the computation in \cref{lem:action energy for floer trajectories}}] \\ & = & \int_{\del {\Sigma_V}}^{} u^* \lambda - H(\mathbf{u}) \, \tau & \quad [\text{by Stokes' theorem}] \\ & \leq & \int_{\del {\Sigma_V}}^{} u^* \lambda - \lambda(X_H(\mathbf{u})) \, \tau & \quad [\text{(a), proven below}] \\ & = & \int_{\del {\Sigma_V}}^{} \lambda \circ (\dv u - X_H(\mathbf{u}) \tensorpr \tau) & \quad [\text{by definition of pullback}] \\ & = & - \int_{\del {\Sigma_V}}^{} \lambda \circ J(\mathbf{u}) \circ (\dv u - X_H(\mathbf{u}) \tensorpr \tau) \circ j & \quad [\text{$\dv u - X_H(\mathbf{u}) \tensorpr \tau$ is holomorphic}] \\ & = & - \int_{\del {\Sigma_V}}^{} \edv \exp \circ (\dv u - X_H(\mathbf{u}) \tensorpr \tau) \circ j & \quad [\text{$J$ is cylindrical near $u(\del {\Sigma_V}) \subset \del V$}] \\ & = & - \int_{\del {\Sigma_V}}^{} \edv \exp \circ \dv u \circ j & \quad [\text{$H$ is independent of $\del V$}] \\ & \leq & 0 & \quad [\text{(b), proven below}]. \end{IEEEeqnarray*} The proof of (a) is the computation \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\int_{\del {\Sigma_V}}^{} ( \lambda(X_H(\mathbf{u})) - H(\mathbf{u}) ) \, \tau}\\ \quad & = & \int_{\del {\Sigma_V}}^{} \mathcal{A}_H(z,w,r_0) \, \tau & \quad [\text{by definition of $\mathcal{A}_H$ and $u(\del {\Sigma_V}) \subset \del V$}] \\ & = & \int_{{\Sigma_V}}^{} \edv_{{\Sigma_V}} (\mathcal{A}_H(z,w,r_0) \, \tau) & \quad [\text{by Stokes' theorem}] \\ & \leq & 0 & \quad [\text{by the assumptions on $\mathcal{A}_H$}]. \end{IEEEeqnarray*} Statement (b) is true because if $\xi$ is a vector tangent to $\del {\Sigma_V}$ giving the boundary orientation, then $j (\xi)$ points into ${\Sigma_V}$, therefore $\dv u \circ j (\xi)$ points out of $V$. Then, we conclude that $E({u}|_{\Sigma_V}) = 0$ and that $\dv u = X_H(\mathbf{u}) \tensorpr \tau$, and since $X_H(\mathbf{u})$ is tangent to $\del V$ it follows that $\img u \subset \del V$. This contradicts the fact that $u$ is not contained in $V$. \end{proof} \section{Compactness for solutions of the parametrized Floer equation} In this section, we assume that $(\dot{\Sigma}, j, \sigma, \tau) = (\R \times S^1, j, \edv s, \edv t)$ is the cylinder from \cref{exa:sphere and cylinder}. Suppose that $H \colon \dot{\Sigma} \times S \times \hat{X} \longrightarrow \R$ is an admissible Hamiltonian as in \cref{def:admissible hamiltonian abstract}. In this case, there exist Hamiltonians $H^{\pm} \colon S^1 \times S \times \hat{X} \longrightarrow \R$ such that $H(s,t,w,x) = H^{\pm}(t,w,x)$ for $\pm s \geq s_0$. Assume also that $J \colon \dot{\Sigma} \times S \times \hat{X} \longrightarrow \End(T \hat{X})$ is an admissible almost complex structure as in \cref{def:admissible acs abstract}, which has associated limit almost complex structures $J^{\pm} \colon S^1 \times S \times \hat{X} \longrightarrow \End(T \hat{X})$. Note that since $\dot{\Sigma} = \R \times S^1$, we can also view $H^{\pm}$ and $J^{\pm}$ as maps whose domain is $\dot{\Sigma}$. For $N \in \Z_{\geq 1}$ and $L, \nu = 1,\ldots,N$, define \begin{IEEEeqnarray*}{c+x*} H^{L,\nu} \coloneqq \begin{cases} H^{+} & \text{if } \nu > L, \\ H & \text{if } \nu = L, \\ H^{-} & \text{if } \nu < L, \end{cases} \quad J^{L,\nu} \coloneqq \begin{cases} J^{+} & \text{if } \nu > L, \\ J & \text{if } \nu = L, \\ J^{-} & \text{if } \nu < L. \end{cases} \end{IEEEeqnarray*} Finally, let $(H_m)_m$ be a sequence of admissible Hamiltonians converging to $H$, $(J_m)_m$ be a sequence of admissible almost complex structures converging to $J$, and for every $m \in \Z_{\geq 1}$ let $(w_m, u_m)$ be a solution of the parametrized Floer equation with respect to $H_m, J_m$ with asymptotes $(z^\pm_m, \gamma^\pm_m)$. \begin{definition} \label{def:broken floer cylinder} Let $(z^{\pm}, \gamma^{\pm})$ be Hamiltonian $1$-periodic orbits of $H^{\pm}$. A \textbf{broken Floer trajectory} from $(z^-, \gamma^-)$ to $(z^+, \gamma^+)$ is given by: \begin{enumerate} \item Numbers $N \in \Z_{\geq 1}$ and $L = 1, \ldots, N$; \item Hamiltonian $1$-periodic orbits $(z^-, \gamma^-) = (z^1, \gamma^1), \ldots, (z^L, \gamma^L)$ of $H^-$ and Hamiltonian $1$-periodic orbits $(z^{L+1}, \gamma^{L+1}), \ldots, (z^{N+1}, \gamma^{N+1}) = (z^+, \gamma^+)$ of $H^+$; \item For every $\nu = 1, \ldots, N$, a Floer trajectory $(w^\nu,u^\nu)$ with respect to $H^{L,\nu}, J^{L,\nu}$ with negative asymptote $(z^\nu, \gamma^\nu)$ and positive asymptote $(z^{\nu+1}, \gamma^{\nu+1})$. \end{enumerate} \end{definition} \begin{definition} We say that $(w_m, u_m)_{m}$ \textbf{converges} to $(w^{\nu}, u^{\nu})_{\nu}$ if there exist numbers $s^1_m \leq \cdots \leq s^N_m$ such that \begin{IEEEeqnarray*}{rCls+x*} \lim_{m \to +\infty} s^L_m & \in & \R, \\ \lim_{m \to +\infty} (s^{\nu + 1}_m - s^\nu_m) & = & + \infty, \\ \lim_{m \to +\infty} w_m( \cdot + s^\nu_m) & = & w^\nu_m, \\ \lim_{m \to +\infty} u_m( \cdot + s^\nu_m, \cdot) & = & u^\nu_m. \end{IEEEeqnarray*} \end{definition} \begin{theorem} \label{thm:compactness in s1eft} There exists a subsequence (whose index we still denote by $m$) and a broken Floer trajectory $(w^{\nu}, u^{\nu})_{\nu}$ such that $(w_m, u_m)_m$ converges to $(w^{\nu}, u^{\nu})_{\nu}$. \end{theorem} \begin{proof} Since $f \colon C \longrightarrow \R$ is Morse and $H_{z,w} \colon S^1 \times \hat{X} \longrightarrow \R$ is nondegenerate for every puncture $z$ and critical point $w$ of $\tilde{f}$, we conclude that we can pass to a subsequence such that $(z_m^{\pm}, \gamma_m^{\pm})$ converges to $(z^{\pm}, \gamma^{\pm})$. By compactness in Morse theory, there exists a further subsequence and a broken Morse trajectory $(w^\nu)_{\nu = 1,\ldots,N}$, where $w^{\nu} \colon \R \longrightarrow S$ is a Morse trajectory from $z^{\nu}$ to $z^{\nu + 1}$, $z^1 = z^-$ and $z^{N+1} = z^+$, such that $(w_m)_m$ converges in the sense of Morse theory to $(w^{\nu})_{\nu}$. More precisely, this means that there exist numbers $s^1_m \leq \cdots \leq s^N_m$ and $L \leq N$ such that \begin{IEEEeqnarray*}{rCls+x*} \lim_{m \to +\infty} s^L_m & \in & \R, \\ \lim_{m \to +\infty} (s^{\nu+1}_m - s^\nu_m) & = & + \infty, \\ \lim_{m \to +\infty} w_m(\cdot + s^\nu_m) & = & w^\nu. \end{IEEEeqnarray*} Possibly after reparametrizing the $w^\nu$, we may assume that $s^L_m = 0$ for every $m$. Now, for $\nu = 1,\ldots,N$, define \begin{IEEEeqnarray*}{rCLCRCl} u^\nu_m \colon \R \times S^1 & \longrightarrow & \hat{X}, & \quad & u^\nu_m(s,t) & = & u_m(s + s^\nu_m, t), \\ H^\nu_m \colon \R \times S^1 \times \hat{X} & \longrightarrow & \R, & \quad & H^\nu_m(s,t,x) & = & H_m(s + s^\nu_m, t, w_m(s + s^\nu_m), x), \\ H^\nu \colon \R \times S^1 \times \hat{X} & \longrightarrow & \R, & \quad & H^\nu(s,t,x) & = & H^{L,\nu}(s, t, w^\nu(s), x), \\ J^\nu_m \colon \R \times S^1 \times \hat{X} & \longrightarrow & \End(T \hat{X}), & \quad & J^\nu_m(s,t,x) & = & J_m(s + s^\nu_m, t, w_m(s + s^\nu_m), x), \\ J^\nu \colon \R \times S^1 \times \hat{X} & \longrightarrow & \End(T \hat{X}), & \quad & J^\nu(s,t,x) & = & J^{L,\nu}(s, t, w^\nu(s), x). \end{IEEEeqnarray*} Then, $u^\nu_m$ is a solution of the equation \begin{IEEEeqnarray*}{c+x*} \pdv{u^\nu_m}{s} = - J^\nu_m(s,t,u^\nu_m) \p{}{2}{ \pdv{u^\nu_m}{t} - X_{H^\nu_m}(s,t,u^\nu_m) }, \end{IEEEeqnarray*} and \begin{IEEEeqnarray*}{rCls+x*} \lim_{m \to + \infty} H^\nu_m & = & H^\nu, \\ \lim_{m \to + \infty} J^\nu_m & = & J^\nu. \end{IEEEeqnarray*} By compactness in Floer theory, there exists a further subsequence such that for every $\nu = 1,\ldots,N$ there exists a broken Floer trajectory $(u^{\nu,\mu})_{\mu = 1,\ldots,M_{\nu}}$ from $\gamma^{\nu,\mu}$ to $\gamma^{\nu,\mu+1}$ with respect to $(H^\nu, J^\nu)$, such that \begin{IEEEeqnarray*}{rCls+x*} \gamma^{1,1} & = & \gamma^-, \\ \gamma^{N,M_{N}} & = & \gamma^+, \end{IEEEeqnarray*} and $(u^\nu_m)_m^{}$ converges to $(u^{\nu,\mu})_{\mu}$. More precisely, this means that there exist $L_\nu = 1,\ldots,N_\nu$ and numbers $s_m^{\nu,1} \leq \cdots \leq s_m^{\nu,M_\nu}$ such that \begin{IEEEeqnarray*}{rCls+x*} \lim_{m \to +\infty} s_m^{\nu,L_\nu} & \in & \R, \\ \lim_{m \to +\infty} (s_m^{\nu,\mu+1} - s_m^{\nu,\mu}) & = & + \infty, \\ \lim_{m \to +\infty} u^{\nu}_m(\cdot + s^{\nu,\mu}_m, \cdot) & = & u^{\nu,\mu}. \end{IEEEeqnarray*} Consider the list $(w^\nu, u^{\nu,\mu})_{\nu,\mu}$ ordered according to the dictionary order of the indices $\nu, \mu$. In this list, if two elements $(w^\nu, u^{\nu,\mu})$, $(w^{\nu'}, u^{\nu',\mu'})$ are equal then they must be adjacent. The list obtained from $(w^\nu, u^{\nu,\mu})_{\nu,\mu}$ by removing duplicate elements is the desired broken Floer trajectory. \end{proof} \section{Transversality for solutions of the parametrized Floer equation} In this section, let $(\dot{\Sigma}, j, \sigma, \tau) = (\R \times S^1, j, \edv s, \edv t)$ be the cylinder from \cref{exa:sphere and cylinder} and $(X, \lambda)$ be a nondegenerate Liouville domain. Let $H \colon S^1 \times S \times \hat{X} \longrightarrow \R$ be a function such that the pullback $H \colon \R \times S^1 \times S \times \hat{X} \longrightarrow \R$ is as in \cref{def:admissible hamiltonian abstract}. Define $\mathcal{J}$ to be the set of almost complex structures $J \colon S^1 \times S \times \hat{X} \longrightarrow \End(T \hat{X})$ such that the pullback $J \colon \R \times S^1 \times S \times \hat{X} \longrightarrow \End(T \hat{X})$ is as in \cref{def:admissible acs abstract}. The set $\mathcal{J}$ admits the structure of a smooth Fréchet manifold, and therefore the tangent space $T_{J} \mathcal{J}$ at $J$ is a Fréchet space. Let $(z^{\pm}, \gamma^{\pm})$ be $1$-periodic orbits of $H$, i.e. $z^{\pm} \in S$ is a critical point of $\tilde{f}$ and $\gamma^{\pm}$ is a $1$-periodic orbit of $H_{z^{\pm}} \colon S^1 \times \hat{X} \longrightarrow \R$. If $w \colon \R \longrightarrow S$ and $u \colon \R \times S^1 \longrightarrow \hat{X}$ are maps, we will denote by $\mathbf{u}$ the map \begin{IEEEeqnarray*}{c+x*} \mathbf{u} \colon \R \times S^1 \longrightarrow S^1 \times S \times \hat{X}, \qquad \mathbf{u}(s,t) \coloneqq (t, w(s), u(s,t)). \end{IEEEeqnarray*} The pair $(w,u)$ is a solution of the parametrized Floer equation if \begin{IEEEeqnarray*}{rCls+x*} \partial_s w - \nabla \tilde{f}(w) & = & 0, \\ (\dv u - X_H(\mathbf{u}) \otimes \tau)^{0,1}_{J(\mathbf{u}), j} & = & 0. \end{IEEEeqnarray*} Define $[z^{\pm}, \gamma^{\pm}]$ to be the equivalence class \begin{IEEEeqnarray*}{rCls+x*} [z^{\pm}, \gamma^{\pm}] & \coloneqq & \{ t \cdot (z^{\pm}, \gamma^{\pm}) \mid t \in S^1 \} \\ & = & \{ (t \cdot z^{\pm}, \gamma^{\pm}(\cdot + t)) \mid t \in S^1 \}, \end{IEEEeqnarray*} and denote by $\hat{\mathcal{M}}(X,H,J,[z^+,\gamma^+],[z^-,\gamma^-])$ the moduli space of solutions $(w,u) \in C^{\infty}(\R, S) \times C^{\infty}(\R \times S^1, \hat{X})$ of the parametrized Floer equation such that \begin{IEEEeqnarray*}{c+x*} \lim_{s \to \pm \infty} (w(s), u(s, \cdot)) \in [z^{\pm}, \gamma^{\pm}]. \end{IEEEeqnarray*} Denote by $\mathcal{M}$ the moduli space of gradient flow lines $w \colon \R \longrightarrow S$ of $\tilde{f}$ such that \begin{IEEEeqnarray*}{c+x*} \lim_{s \to \pm \infty} w(s) \in [z^{\pm}]. \end{IEEEeqnarray*} By the assumptions on $(S, g^{S}, \tilde{f})$ explained in \cref{sec:floer trajectories} and \cite[Section 3.2]{austinMorseBottTheoryEquivariant1995}, the space $\mathcal{M}$ is a smooth finite dimensional manifold. Moreover, \begin{IEEEeqnarray}{c+x*} \dim \mathcal{M} = \morse(z^+) + \morse(z^-) + 1. \plabel{eq:dimension of m} \end{IEEEeqnarray} Let $\varepsilon = (\varepsilon_{\ell})_{\ell \in \Z_{\geq 0}}$ be a sequence of positive numbers $\varepsilon_{\ell}$ such that $\lim_{\ell \to +\infty} \varepsilon_{\ell} = 0$. Define a function \begin{IEEEeqnarray*}{rrCl} \| \cdot \|^{\varepsilon} \colon & T_{J_{\mathrm{ref}}} \mathcal{J} & \longrightarrow & [0, + \infty] \\ & Y & \longmapsto & \sum_{\ell=0}^{+ \infty} \varepsilon_{\ell} \| Y \|_{C^{\ell}(S^1 \times S \times X)}, \end{IEEEeqnarray*} where $\| \cdot \|_{C^{\ell}(S^1 \times S \times X)}$ is the $C^{\ell}$-norm which is determined by some finite covering of $T {X} \longrightarrow S^1 \times S \times X$ by coordinate charts and local trivializations. Define \begin{IEEEeqnarray*}{c+x*} T^{\varepsilon}_{J_{\mathrm{ref}}} \mathcal{J} \coloneqq \{ Y \in T_{J_{\mathrm{ref}}} \mathcal{J} \mid \| Y \|^{\varepsilon} < + \infty \}. \end{IEEEeqnarray*} By \cite[Lemma 5.1]{floerUnregularizedGradientFlow1988}, $(T^{\varepsilon}_{J_{\mathrm{ref}}} \mathcal{J}, \| \cdot \|^{\varepsilon})$ is a Banach space consisting of smooth sections and containing sections with support in arbitrarily small sets. For every $Y \in T_{J_{\mathrm{ref}}}^{\varepsilon} \mathcal{J}$, define \begin{IEEEeqnarray*}{c+x*} \exp_{J_{\mathrm{ref}}}(Y) \coloneqq J_{Y} \coloneqq \p{}{2}{1 + \frac{1}{2} J_{\mathrm{ref}} Y} J_{\mathrm{ref}} \p{}{2}{1 + \frac{1}{2} J_{\mathrm{ref}} Y}^{-1}. \end{IEEEeqnarray*} There exists a neighbourhood $\mathcal{O} \subset T_{J_{\mathrm{ref}}}^{\varepsilon} \mathcal{J}$ of $0$ such that $\exp_{J_{\mathrm{ref}}}^{} \colon \mathcal{O} \longrightarrow \mathcal{J}$ is injective. Define $\mathcal{J}^{\varepsilon} \coloneqq \exp_{J_{\mathrm{ref}}}^{}(\mathcal{O})$, which is automatically a Banach manifold with one global parametrization $\exp_{J_{\mathrm{ref}}}^{} \colon \mathcal{O} \longrightarrow \mathcal{J}^{\varepsilon}$. The tangent space of $\mathcal{J}^{\varepsilon}$ at $J_{\mathrm{ref}}$ is given by \begin{IEEEeqnarray*}{c+x*} T_{J_{\mathrm{ref}}} \mathcal{J}^{\varepsilon} = T_{J_{\mathrm{ref}}}^{\varepsilon} \mathcal{J}. \end{IEEEeqnarray*} Notice that the definition of $\mathcal{J}^{\varepsilon}$ involved making several choices, namely the sequence $\varepsilon$, the choices necessary to define the $C^{\ell}$-norm, and a reference almost complex structure $J_{\mathrm{ref}}$. \begin{definition} For $w \in \mathcal{M}$, let $\mathcal{F}_w$ be the Banach manifold of maps $u \colon \R \times S^1 \longrightarrow \hat{X}$ of the form \begin{IEEEeqnarray*}{c+x*} u(s,t) = \exp_{u_0(s,t)} \xi(s,t), \end{IEEEeqnarray*} where \begin{IEEEeqnarray*}{rCls+x*} u_0 & \in & C^{\infty}(\R \times S^1, \hat{X}) \text{ is such that } \lim_{s \to \pm \infty} (w(s), u_0(s, \cdot)) \in [z^{\pm}, \gamma^{\pm}], \\ \xi & \in & W^{1,p}(\R \times S^1, u_0^* T \hat{X}). \end{IEEEeqnarray*} \end{definition} \begin{definition} For $J \in \mathcal{J}^{\varepsilon}$, we define a bundle $\pi^J \colon \mathcal{E}^J \longrightarrow \mathcal{B}$ as follows. The base, fibre and total space are given by \begin{IEEEeqnarray*}{rCls+x*} \mathcal{B} & \coloneqq & \{ (w,u) \mid w \in \mathcal{M}, \, u \in \mathcal{F}_w \}, \\ \mathcal{E}^J_{(w,u)} & \coloneqq & L^p(\Hom^{0,1}_{J(\mathbf{u}), j} (T \dot{\Sigma}, u^* T \hat{X})), \\ \mathcal{E}^J & \coloneqq & \{ (w,u,\xi) \mid (w,u) \in \mathcal{B}, \, \xi \in \mathcal{E}^J_{(w,u)} \}. \end{IEEEeqnarray*} The projection is given by $\pi^J(w,u,\xi) \coloneqq (w,u)$. The \textbf{Cauchy--Riemann operator} is the section $\delbar\vphantom{\partial}^J \colon \mathcal{B} \longrightarrow \mathcal{E}^J$ given by \begin{IEEEeqnarray*}{c+x*} \delbar\vphantom{\partial}^J(w,u) \coloneqq (\dv u - X_H(\mathbf{u}) \otimes \tau)^{0,1}_{J(\mathbf{u}),j} \in \mathcal{E}^J_{(w,u)}. \end{IEEEeqnarray*} \end{definition} With this definition, $(\delbar\vphantom{\partial}^J)^{-1}(0) = \hat{\mathcal{M}}(X,H,J,[z^+,\gamma^+],[z^-,\gamma^-])$. \begin{definition} Define the universal bundle, $\pi \colon \mathcal{E} \longrightarrow \mathcal{B} \times \mathcal{J}^{\varepsilon}$, and the \textbf{universal Cauchy--Riemann operator}, $\delbar \colon \mathcal{B} \times \mathcal{J}^{\varepsilon} \longrightarrow \mathcal{E}$, by \begin{IEEEeqnarray*}{rCls+x*} \mathcal{E} & \coloneqq & \{ (w,u,J,\xi) \mid (w,u) \in \mathcal{B}, \, J \in \mathcal{J}^{\varepsilon}, \, \xi \in \mathcal{E}^{J}_{(w,u)} \}, \\ \pi & \colon & \mathcal{E} \longrightarrow \mathcal{B} \times \mathcal{J}^{\varepsilon}, \qquad \pi(w,u,J,\xi) \coloneqq (w,u,J), \\ \delbar & \colon & \mathcal{B} \times \mathcal{J}^{\varepsilon} \longrightarrow \mathcal{E}, \qquad \delbar(w,u,J) \coloneqq \delbar\vphantom{\partial}^J(w,u). \end{IEEEeqnarray*} \end{definition} For $(w,u,J)$ such that $\delbar(w,u,J) = 0$, choose a splitting $T_{(w,u)} \mathcal{B} = T_w \mathcal{M} \oplus T_u \mathcal{F}_w$. The sections $\delbar\vphantom{\partial}^J$ and $\delbar$ have corresponding linearized operators, which we denote by \begin{IEEEeqnarray*}{rCls+x*} \mathbf{D}_{(w,u,J)} & \colon & T_w \mathcal{M} \oplus T_u \mathcal{F}_w \longrightarrow \mathcal{E}^J_{(w,u)}, \\ \mathbf{L}_{(w,u,J)} & \colon & T_w \mathcal{M} \oplus T_u \mathcal{F}_w \oplus T_J \mathcal{J}^{\varepsilon} \longrightarrow \mathcal{E}^J_{(w,u)}, \end{IEEEeqnarray*} respectively. We can write these operators with respect to the decompositions above as block matrices \begin{IEEEeqnarray}{rCl} \mathbf{D}_{(w,u,J)} & = & \begin{bmatrix} \mathbf{D}^{\mathcal{M}}_{(w,u,J)} & \mathbf{D}^{\mathcal{F}}_{(w,u,J)} \end{bmatrix}, \plabel{eq:splitting linearized ops 1} \\ \mathbf{L}_{(w,u,J)} & = & \begin{bmatrix} \mathbf{D}^{\mathcal{M}}_{(w,u,J)} & \mathbf{D}^{\mathcal{F}}_{(w,u,J)} & \mathbf{J}_{(w,u,J)} \end{bmatrix}. \plabel{eq:splitting linearized ops 2} \end{IEEEeqnarray} Let $\tau$ be a trivialization of $u^* T \hat{X}$ and denote also by $\tau$ the induced trivializations of $(\gamma^{\pm})^* T \hat{X}$. We can consider the Conley--Zehnder indices $\conleyzehnder^{\tau}(\gamma^{\pm})$ of $\gamma^{\pm}$ computed with respect to $\tau$. We denote $\ind^{\tau}(z^{\pm}, \gamma^{\pm}) \coloneqq \morse(z^\pm) + \conleyzehnder^{\tau}(\gamma^{\pm})$. \begin{theorem} \phantomsection\label{thm:s1eft d is fredholm} The operators $\mathbf{D}^{\mathcal{F}}_{(w,u,J)}$ and $\mathbf{D}_{(w,u,J)}$ are Fredholm and \begin{IEEEeqnarray}{rCls+x*} \operatorname{ind} \mathbf{D}^{\mathcal{F}}_{(w,u,J)} & = & \conleyzehnder^{\tau}(\gamma^+) - \conleyzehnder^{\tau}(\gamma^-), \plabel{eq:s1eft fredholm ind 1} \\ \operatorname{ind} \mathbf{D}_{(w,u,J)} & = & \ind^{\tau}(z^+, \gamma^+) - \ind^{\tau}(z^-,\gamma^-) + 1. \plabel{eq:s1eft fredholm ind 2} \end{IEEEeqnarray} \end{theorem} \begin{proof} The operator $\mathbf{D}^{\mathcal{F}}_{(w,u,J)}$ is the linearized operator in Floer theory, which is Fredholm and has index given by Equation \eqref{eq:s1eft fredholm ind 1}. Therefore, \begin{IEEEeqnarray*}{c+x*} 0 \oplus \mathbf{D}^{\mathcal{F}}_{(w,u,J)} \colon T_w \mathcal{M} \oplus T_u \mathcal{F}_w \longrightarrow \mathcal{E}^J_{(w,u)} \end{IEEEeqnarray*} is Fredholm and \begin{IEEEeqnarray}{c+x*} \operatorname{ind} (0 \oplus \mathbf{D}^{\mathcal{F}}_{(w,u,J)}) = \dim T_w \mathcal{M} + \operatorname{ind} \mathbf{D}^{\mathcal{F}}_{(w,u,J)}. \plabel{eq:index of operator floer} \end{IEEEeqnarray} Since $\mathbf{D}^{\mathcal{M}}_{(w,u,J)} \oplus 0 \colon T_w \mathcal{M} \oplus T_w \mathcal{F}_w \longrightarrow \mathcal{E}^J_{(w,u)}$ is compact, the operator \begin{IEEEeqnarray*}{c+x*} \mathbf{D}_{(w,u,J)} = \mathbf{D}^{\mathcal{M}}_{(w,u,J)} \oplus \mathbf{D}^{\mathcal{F}}_{(w,u,J)} = \mathbf{D}^{\mathcal{M}}_{(w,u,J)} \oplus 0 + 0 \oplus \mathbf{D}^{\mathcal{F}}_{(w,u,J)} \end{IEEEeqnarray*} is Fredholm and \begin{IEEEeqnarray*}{rCls+x*} \operatorname{ind} \mathbf{D}_{(w,u,J)} & = & \operatorname{ind} (\mathbf{D}^{\mathcal{M}}_{(w,u,J)} \oplus \mathbf{D}^{\mathcal{F}}_{(w,u,J)}) & \quad [\text{by Equation \eqref{eq:splitting linearized ops 1}}] \\ & = & \operatorname{ind} (0 \oplus \mathbf{D}^{\mathcal{F}}_{(w,u,J)}) & \quad [\text{since $\mathbf{D}^{\mathcal{M}}_{(w,u,J)}$ is compact}] \\ & = & \dim T_w \mathcal{M} + \operatorname{ind} \mathbf{D}^{\mathcal{F}}_{(w,u,J)} & \quad [\text{by Equation \eqref{eq:index of operator floer}}] \\ & = & \ind^{\tau}(z^+, \gamma^+) - \ind^{\tau}(z^-,\gamma^-) + 1 & \quad [\text{by Equations \eqref{eq:dimension of m} and \eqref{eq:s1eft fredholm ind 1}}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{theorem} \label{thm:s1eft l is surjective} The operator $\mathbf{L}_{(w,u,J)}$ is surjective. \end{theorem} \begin{proof} It suffices to prove that \begin{IEEEeqnarray*}{c+x*} \mathbf{L}^{\mathcal{F}}_{(w,u,J)} \coloneqq \mathbf{D}^{\mathcal{F}}_{(w,u,J)} \oplus \mathbf{J}_{(w,u,J)} \colon T_u \mathcal{F}_w \oplus T_J \mathcal{J}^{\varepsilon} \longrightarrow \mathcal{E}^{J}_{(w,u)} \end{IEEEeqnarray*} is surjective. Since $\mathbf{D}^{\mathcal{F}}_{(w,u,J)}$ is Fredholm (by \cref{thm:s1eft d is fredholm}), its image is closed and has finite codimension. This implies that $\img \mathbf{L}^{\mathcal{F}}_{(w,u,J)}$ is also of finite codimension and closed. So, it suffices to show that $\img \mathbf{L}^{\mathcal{F}}_{(w,u,J)}$ is dense, which is equivalent to showing that the annihilator $\Ann \img \mathbf{L}^{\mathcal{F}}_{(w,u,J)}$ is zero. Let $\eta \in \Ann \img \mathbf{L}^{\mathcal{F}}_{(w,u,J)}$, i.e. \begin{IEEEeqnarray*}{c+x*} \eta \in L^q(\Hom^{0,1}_{J(\mathbf{u}), j} (T \dot{\Sigma}, u^* T \hat{X})) \end{IEEEeqnarray*} is such that \begin{IEEEeqnarray}{rClCsrCl} 0 & = & \p{<}{}{\eta, \mathbf{D}^{\mathcal{F}}_{(w,u,J)}(\xi)}_{L^2} & \quad & \text{ for all } & \xi & \in & T_u \mathcal{F}_w, \plabel{eq:element in annihilator 1} \\ 0 & = & \p{<}{}{\eta, \mathbf{J} _{(w,u,J)}(Y )}_{L^2} & \quad & \text{ for all } & Y & \in & T_J \mathcal{J}^{\varepsilon}. \plabel{eq:element in annihilator 2} \end{IEEEeqnarray} By Equation \eqref{eq:element in annihilator 1}, $\eta$ satisfies the Cauchy--Riemann type equation $(\mathbf{D}^{\mathcal{F}}_{(w,u,J)})^{*} \eta = 0$, and therefore $\eta$ is smooth (by elliptic regularity) and satisfies unique continuation. We prove that $\eta = 0$ in the case where $w$ is constant. In this case, $w(s) \eqqcolon w_0$ for every $s$, we can view $\gamma^{\pm}$ as $1$-periodic orbits of $H_{w_0}$ (after a reparametrization) and $u$ is a solution of the Floer equation: \begin{IEEEeqnarray*}{c+x*} \pdv{u}{s}(s,t) + J_{w_0}(t,u(s,t)) \p{}{2}{ \pdv{u}{t}(s,t) - X_{H_{w_0}}(t,u(s,t)) } = 0. \end{IEEEeqnarray*} Let $R(u)$ be the set of regular points of $u$, i.e. points $z = (s,t)$ such that \begin{IEEEeqnarray}{c+x*} \plabel{eq:set of regular points of u} \pdv{u}{s}(s,t) \neq 0, \qquad u(s,t) \neq \gamma^{\pm}(t), \qquad u(s,t) \notin u(\R - \{s\}, t). \end{IEEEeqnarray} By \cite[Theorem 4.3]{floerTransversalityEllipticMorse1995}, $R(u)$ is open. By unique continuation, it is enough to show that $\eta$ vanishes in $R(u)$. Let $z_0 = (s_0,t_0) \in R(u)$ and assume by contradiction that $\eta(z_0) \neq 0$. By \cite[Lemma 3.2.2]{mcduffHolomorphicCurvesSymplectic2012}, there exists $Y \in T_J \mathcal{J}$ such that \begin{IEEEeqnarray}{c+x*} \plabel{eq:variation of acs before cut off} \p{<}{}{\eta(z_0), Y(\mathbf{u}(z_0)) \circ (\dv u(z_0) - X_H(\mathbf{u}(z_0)) \otimes \tau_{z_0}) \circ j_{z_0} } > 0. \end{IEEEeqnarray} Choose a neighbourhood $V = V_{\R} \times V_{S^1}$ of $z_0 = (s_0,t_0)$ in $\dot{\Sigma} = \R \times S^1$ such that \begin{IEEEeqnarray}{c+x*} \plabel{eq:inner product bigger than 0 in v} \p{<}{}{\eta, Y(\mathbf{u}) \circ (\dv u - X_H(\mathbf{u}) \otimes \tau) \circ j }|_V > 0. \end{IEEEeqnarray} Since $z_0$ is as in \eqref{eq:set of regular points of u}, there exists a neighbourhood $U_{\hat{X}}$ of $u(z_0)$ in $\hat{X}$ such that \begin{IEEEeqnarray*}{c+x*} u(s,t) \in U_{\hat{X}} \Longrightarrow s \in V_{\R}. \end{IEEEeqnarray*} Choose a slice $A \subset S^1 \times S$ which contains $(t_0, w_0)$ and which is transverse to the action of $S^1$ on $S^1 \times S$. Define $U_{S^1 \times S} = S^1 \cdot A$. For $A$ chosen small enough, \begin{IEEEeqnarray*}{c+x*} (t, w_0) \in U_{S^1 \times S} \Longrightarrow t \in V_{S^1}. \end{IEEEeqnarray*} Then, defining $U \coloneqq U_{S^1 \times S} \times U_{\hat{X}}$ we have that $\mathbf{u}^{-1}(U) \subset V$. Choose an $S^1$-invariant function $\beta \colon S^1 \times S \times \hat{X} \longrightarrow [0,1]$ such that \begin{IEEEeqnarray}{c+x*} \plabel{eq:bump function for transversality} \supp \beta \subset U, \qquad \beta(\mathbf{u}(z_0)) = 1, \qquad \beta Y \in T_J \mathcal{J}^{\varepsilon}. \end{IEEEeqnarray} Here, we can achieve that $\beta Y$ is of class $C^{\varepsilon}$ by \cite[Theorem B.6]{wendlLecturesSymplecticField2016}. Since $\mathbf{u}^{-1}(U) \subset V$ and $\supp \beta \subset U$, we have that $\supp (\beta \circ \mathbf{u}) \subset V$. Then, \begin{IEEEeqnarray*}{rCls+x*} 0 & = & \p{<}{}{\eta, \mathbf{J}_{(w,u,J)}(\beta Y)}_{L^2} & \quad [\text{by Equation \eqref{eq:element in annihilator 2}}] \\ & = & \p{<}{}{\eta, \beta(\mathbf{u}) \, \mathbf{J}_{(w,u,J)}(Y)}_{L^2} & \quad [\text{since $\mathbf{J}_{(w,u,J)}$ is $C^\infty$-linear}] \\ & = & \p{<}{}{\eta, \beta(\mathbf{u}) \, \mathbf{J}_{(w,u,J)}(Y)}_{L^2(V)} & \quad [\text{since $\supp (\beta \circ \mathbf{u}) \subset V$}] \\ & > & 0 & \quad [\text{by Equation \eqref{eq:inner product bigger than 0 in v}}], \end{IEEEeqnarray*} which is the desired contradiction. We prove that $\eta = 0$ in the case where $w$ is not constant. Let $z_0 = (t_0, s_0) \in \R \times S^1$ and assume by contradiction that $\eta(z_0) \neq 0$. Choose $Y$ as in \eqref{eq:variation of acs before cut off} and $V$ as in \eqref{eq:inner product bigger than 0 in v}. Choose a slice $A \subset S^1 \times S$ which contains $(t_0, w(0))$ and which is transverse to the action of $S^1$ on $S^1 \times S$. Define $U_{S^1 \times S} = S^1 \cdot A$. Since $w$ is orthogonal to the infinitesimal action on $S$, for $A$ chosen small enough we have \begin{IEEEeqnarray*}{c+x*} (t, w(s)) \in U_{S^1 \times S} \Longrightarrow (s,t) \in V. \end{IEEEeqnarray*} Defining $U = U_{S^1 \times S} \times \hat{X}$, we have that $\mathbf{u}^{-1}(U) \subset V$. Choosing $\beta$ as in \eqref{eq:bump function for transversality}, we obtain a contradiction in the same way as in the previous case. \end{proof} \begin{remark} We recall some terminology related to the Baire category theorem (we use the terminology from \cite[Section 10.2]{roydenRealAnalysis2010}). Let $X$ be a complete metric space and $E \subset X$. Then, $E$ is \textbf{meagre} or of the \textbf{first category} if $E$ is a countable union of nowhere dense subsets of $X$. We say that $E$ is \textbf{nonmeagre} or of the \textbf{second category} if $E$ is not meagre. We say that $E$ is \textbf{comeagre} or \textbf{residual} if $X \setminus E$ is meagre. Hence, a countable intersection of comeagre sets is comeagre. With this terminology, the Baire category theorem (see \cite[Section 10.2]{roydenRealAnalysis2010}) says that if $E$ is comeagre then $E$ is dense. The Sard--Smale theorem (see \cite[Theorem 1.3]{smaleInfiniteDimensionalVersion1965}) says that if $f \colon M \longrightarrow N$ is a Fredholm map between separable connected Banach manifolds of class $C^q$, for some $q > \max \{0, \operatorname{ind} f \}$, then the set of regular values of $f$ is comeagre. \end{remark} \begin{theorem} \label{thm:transversality in s1eft} There exists a dense subset $\mathcal{J}_{\mathrm{reg}} \subset \mathcal{J}$ with the following property. Let $J \in \mathcal{J}_{\mathrm{reg}}$ be an almost complex structure, $[z^{\pm}, \gamma^{\pm}]$ be equivalence classes of $1$-periodic orbits of $H$, and $(w,u) \in \hat{\mathcal{M}}(X, H, J, [z^+, \gamma^+], [z^-, \gamma^-])$. Then, near $(w,u)$ the space $\hat{\mathcal{M}}(X, H, J, [z^+, \gamma^+], [z^-, \gamma^-])$ is a manifold of dimension \begin{IEEEeqnarray*}{c+x*} \dim_{(w,u)} \hat{\mathcal{M}}(X, H, J, [z^+, \gamma^+], [z^-, \gamma^-]) = \ind^{\tau}(z^+, \gamma^+) - \ind^{\tau}(z^-, \gamma^-) + 1. \end{IEEEeqnarray*} \end{theorem} \begin{proof} Recall that the space $\mathcal{J}^{\varepsilon}$ is defined with respect to a reference almost complex structure $J_{\mathrm{ref}}$. We will now emphasize this fact using the notation $\mathcal{J}^{\varepsilon}(J_{\mathrm{ref}})$. As a first step, we show that for every $[z^{\pm}, \gamma^{\pm}]$ and every reference almost complex structure $J_{\mathrm{ref}}$ there exists a comeagre set $\mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}, [z^{\pm}, \gamma^{\pm}]) \subset \mathcal{J}^{\varepsilon}(J_{\mathrm{ref}})$ such that every $J \in \mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}, [z^{\pm}, \gamma^{\pm}])$ has the property in the statement of the theorem. For shortness, for every $J$ let $\hat{\mathcal{M}}(J,[z^{\pm}, \gamma^{\pm}]) \coloneqq \hat{\mathcal{M}}(X, H, J, [z^+, \gamma^+], [z^-, \gamma^-])$. By \cref{thm:s1eft l is surjective} and the implicit function theorem \cite[Theorem A.3.3]{mcduffHolomorphicCurvesSymplectic2012}, the universal moduli space \begin{IEEEeqnarray*}{c+x*} \hat{\mathcal{M}}([z^{\pm}, \gamma^{\pm}]) \coloneqq \{ (w,u,J) \mid J \in \mathcal{J}^{\varepsilon}(J_{\mathrm{ref}}), \, (w,u) \in \hat{\mathcal{M}}(J, [z^{\pm}, \gamma^{\pm}]) \} \end{IEEEeqnarray*} is a smooth Banach manifold. Consider the smooth map \begin{IEEEeqnarray*}{c} \pi \colon \hat{\mathcal{M}}([z^{\pm}, \gamma^{\pm}]) \longrightarrow \mathcal{J}^{\varepsilon}(J_{\mathrm{ref}}), \qquad \pi(w,u,J) = J. \end{IEEEeqnarray*} By \cite[Lemma A.3.6]{mcduffHolomorphicCurvesSymplectic2012}, \begin{IEEEeqnarray}{rCr} \ker \dv \pi(w,u,J) & \cong & \ker \mathbf{D}_{(w,u,J)} \plabel{eq:d pi and d u have isomorphic kernels}, \\ \coker \dv \pi(w,u,J) & \cong & \coker \mathbf{D}_{(w,u,J)} \plabel{eq:d pi and d u have isomorphic cokernels}. \end{IEEEeqnarray} Therefore, $\dv \pi (w,u,J)$ is Fredholm and has the same index as $\mathbf{D}_{(w,u,J)}$. By the Sard--Smale theorem, the set $\mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}, [z^{\pm}, \gamma^{\pm}]) \subset \mathcal{J}^{\varepsilon}(J_{\mathrm{ref}})$ of regular values of $\pi$ is comeagre. By Equation \eqref{eq:d pi and d u have isomorphic cokernels}, $J \in \mathcal{J}^{\varepsilon}(J_{\mathrm{ref}})$ is a regular value of $\pi$ if and only if $\mathbf{D}_{(w,u,J)}$ is surjective for every $(w,u) \in (\delbar\vphantom{\partial}^{J})^{-1}(0)$. Therefore, by the implicit function theorem, for every $J \in \mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}, [z^{\pm}, \gamma^{\pm}])$ the set $\hat{\mathcal{M}}(J,[z^{\pm},\gamma^{\pm}]) = (\delbar\vphantom{\partial}^J)^{-1}(0) \subset \mathcal{B}$ is a manifold of dimension \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\dim_{(w,u)} \hat{\mathcal{M}}(J,[z^{\pm},\gamma^{\pm}])}\\ \quad & = & \dim \ker \mathbf{D}_{(w,u,J)} & \quad [\text{by the implicit function theorem}] \\ & = & \operatorname{ind} \mathbf{D}_{(w,u,J)} & \quad [\text{since $\mathbf{D}_{(w,u,J)}$ is surjective}] \\ & = & \ind^{\tau}(z^+, \gamma^+) - \ind^{\tau}(z^-, \gamma^-) + 1 & \quad [\text{by \cref{thm:s1eft d is fredholm}}]. \end{IEEEeqnarray*} As a second step, we show that we can switch the order of the quantifiers in the first step, i.e. that for every reference almost complex structure $J_{\mathrm{ref}}$ there exists a comeagre set $\mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}) \subset \mathcal{J}^{\varepsilon}(J_{\mathrm{ref}})$ such that for every $J \in \mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{})$ and every $[z^{\pm}, \gamma^{\pm}]$, the property in the statement of the theorem statement holds. For this, define \begin{IEEEeqnarray*}{c+x*} \mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}) \coloneqq \bigcap_{[z^{\pm}, \gamma^{\pm}]} \mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}, [z^{\pm}, \gamma^{\pm}]). \end{IEEEeqnarray*} Since $H$ is nondegenerate, in the above expression we are taking an intersection over a finite set of data, and hence $\mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{})$ is comeagre. This finishes the proof of the second step. By the Baire category theorem, $\mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}) \subset \mathcal{J}^{\varepsilon}(J_{\mathrm{ref}}^{})$ is dense. Finally, define \begin{IEEEeqnarray*}{c+x*} \mathcal{J}_{\mathrm{reg}} \coloneqq \bigcup_{J_{\mathrm{ref}} \in \mathcal{J}} \mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}). \end{IEEEeqnarray*} Then $\mathcal{J}_{\mathrm{reg}}$ is the desired set of almost complex structures. \end{proof} \chapter{\texorpdfstring{$S^1$}{S1}-equivariant Floer homology} \label{chp:floer} \section{Categorical setup} In this section, we define categories that will allow us to express the constructions of this chapter as functors. We will define a category of complexes (see \cref{def:category complexes,def:category of complexes up to homotopy}) and a category of modules (see \cref{def:category modules}). Associated to these, there is a Homology functor between the two categories (\cref{def:homology functor}). \begin{remark} Recall that a \textbf{preorder} on a set $S$ is a binary relation $\leq$ which is reflexive and transitive. A preordered set $(S,\leq)$ can be seen as a category $S$ by declaring that objects of $S$ are elements of the set $S$ and that there exists a unique morphism from $a$ to $b$ if and only if $a \leq b$, for $a, b \in S$. Throughout this thesis, we will view $\R$ as a category in this sense. \end{remark} \begin{definition} Let $\mathbf{C}$ be a category. A \textbf{filtered object} in $\mathbf{C}$ is a functor $V \colon \R \longrightarrow \mathbf{C}$. A \textbf{morphism} of filtered objects from $V$ to $W$ is a natural transformation $\phi \colon V \longrightarrow W$. We denote by $\Hom(\R, \mathbf{C})$ the category of filtered objects in $\mathbf{C}$. In this case, we will use the following notation. If $a \in \R$, we denote by $V^a$ the corresponding object of $\mathbf{C}$. If $\mathbf{C}$ is abelian and $a \leq b \in \R$, we denote $V^{(a,b]} \coloneqq V^b / V^a \coloneqq \coker (\iota^{b,a} \colon V^a \longrightarrow V^b)$. \end{definition} \begin{definition} \label{def:category complexes} Denote by $\tensor[_\Q]{\mathbf{Mod}}{}$ the category of $\Q$-modules. We define a category $\komp$ as follows. An object of $\komp$ is a triple $(C,\del,U)$, where $C \in \Hom(\R, \tensor[_\Q]{\mathbf{Mod}}{})$ is a filtered $\Q$-module and $\partial, U \colon C \longrightarrow C$ are natural transformations such that \begin{IEEEeqnarray*}{lCls+x*} \partial \circ \partial & = & 0, \\ \partial \circ U & = & U \circ \partial. \end{IEEEeqnarray*} A morphism in $\komp$ from $(C,\del^C,U^C)$ to $(D,\del^D,U^D)$ is a natural transformation $\phi \colon C \longrightarrow D$ for which there exists a natural transformation $T \colon C \longrightarrow D$ such that \begin{IEEEeqnarray*}{rCrCl} \partial^D & \circ \phi - \phi \circ & \partial^C & = & 0, \\ U^D & \circ \phi - \phi \circ & U^C & = & \partial^D \circ T + T \circ \partial^C. \end{IEEEeqnarray*} \end{definition} \begin{definition} \phantomsection\label{def:category of complexes up to homotopy} Let $\phi, \psi \colon (C, \partial^C, U^C) \longrightarrow (D, \partial^D, U^D)$ be morphisms in $\komp$. A \textbf{chain homotopy} from $\phi$ to $\psi$ is a natural transformation $T \colon C \longrightarrow D$ such that \begin{IEEEeqnarray*}{c+x*} \psi - \phi = \partial^D \circ T + T \circ \partial^C. \end{IEEEeqnarray*} The notion of chain homotopy defines an equivalence relation $\sim$ on each set of morphisms in $\komp$. We denote the quotient category (see for example \cite[Theorem 0.4]{rotmanIntroductionAlgebraicTopology1988}) by \begin{IEEEeqnarray*}{c+x*} \comp \coloneqq \komp / \sim. \end{IEEEeqnarray*} \end{definition} As we will see in \cref{sec:Floer homology}, the $S^1$-equivariant Floer chain complex of $X$ (with respect to a Hamiltonian $H$ and almost complex structure $J$) is an object \begin{IEEEeqnarray*}{c+x*} \homology{}{S^1}{}{F}{C}{}{}(X,H,J) \in \comp. \end{IEEEeqnarray*} \begin{definition} \label{def:category modules} We define a category $\modl$ as follows. An object of $\modl$ is a pair $(C,U)$, where $C \in \Hom(\R, \tensor[_\Q]{\mathbf{Mod}}{})$ is a filtered $\Q$-module and $U \colon C \longrightarrow C$ is a natural transformation. A morphism in $\modl$ from $(C,U^C)$ to $(D,U^D)$ is a natural transformation $\phi \colon C \longrightarrow D$ such that $\phi \circ U^C = U^D \circ \phi$. \end{definition} In \cref{sec:Floer homology}, we will show that the $S^1$-equivariant Floer homology of $X$ (with respect to a Hamiltonian $H$ and almost complex structure $J$) and the $S^1$-equivariant symplectic homology of $X$ are objects of $\modl$: \begin{IEEEeqnarray*}{rCls+x*} \homology{}{S^1}{}{F}{H}{}{}(X,H,J) & \in & \modl, \\ \homology{}{S^1}{}{S}{H}{}{}(X) & \in & \modl. \end{IEEEeqnarray*} \begin{lemma} The category $\modl$ is abelian, complete and cocomplete. \end{lemma} \begin{proof} Recall the definition of (co)complete: a category $\mathbf{I}$ is small if the class of morphisms of $\mathbf{I}$ is a set. A category is (co)complete if for any $\mathbf{I}$ small and for any functor $F \colon \mathbf{I} \longrightarrow \modl$, the (co)limit of $F$ exists. By \cite[Theorem 3.4.12]{riehlCategoryTheoryContext2016}, it suffices to show that $\modl$ has products, coequalizers, coproducts and coequalizers. First, notice that $\tensor[_\Q]{\mathbf{Mod}}{}$ is abelian, complete and cocomplete. Therefore, the same is true for $\Hom(\R, \tensor[_\Q]{\mathbf{Mod}}{})$. Let $f \colon C \longrightarrow D$ be a morphism in $\modl$. Then $f$ has a kernel and a cokernel in $\Hom(\R, \tensor[_\Q]{\mathbf{Mod}}{})$. We need to show that the kernel and the cokernel are objects of $\modl$, i.e. that they come equipped with a $U$ map. The $U$ maps for $\ker f, \coker f$ are the unique maps (coming from the universal property of the (co)kernel) such that diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \ker f \ar[r] \ar[d, swap, dashed, "\exists ! U_{\ker f}"] & C \ar[d, "U_C"] \ar[r, "f"] & D \ar[d, "U_D"] \ar[r] & \coker f \ar[d, dashed, "\exists ! U_{\coker f}"] \\ {\ker f} \ar[r] & {C} \ar[r, "f"] & {D} \ar[r] & {\coker f} \end{tikzcd} \end{IEEEeqnarray*} commutes. Let $C_i$, for $i \in I$, be a family of objects in $\modl$. Then, the product $\prod_{i \in I}^{} C_i$ and the coproduct $\bigoplus_{i \in I}^{} C_i$ exist in $\Hom(\R, \tensor[_\Q]{\mathbf{Mod}}{})$. Again, we need to show that the product and coproduct come equipped with a $U$ map. The $U$ maps for the product and coproduct are the maps \begin{IEEEeqnarray*}{LCRRCRCL+x*} U_{\bigproduct_{i \in I}^{} C_i} & = & \bigproduct_{i \in I}^{} U_{C_i} \colon & \bigproduct_{i \in I}^{} C_i & \longrightarrow & \bigproduct_{i \in I}^{} C_i, \\ U_{\bigdirectsum_{i \in I}^{} C_i} & = & \bigdirectsum_{i \in I}^{} U_{C_i} \colon & \bigdirectsum_{i \in I}^{} C_i & \longrightarrow & \bigdirectsum_{i \in I}^{} C_i, \end{IEEEeqnarray*} coming from the respective universal properties. \end{proof} \begin{definition} \label{def:homology functor} Let $(C,\partial,U) \in \comp$. The \textbf{homology} of $(C,\partial,U)$ is the object of $\modl$ given by $H(C, \partial, U) \coloneqq (H(C, \partial), H(U))$, where $H(C, \partial) = \ker \partial / \img \partial$ and $H(U)$ is the unique map such that the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \img \partial \ar[r] \ar[d, swap, "U"] & \ker \partial \ar[r] \ar[d, "U"] & \ker \partial / \img \partial \ar[d, dashed, "\exists !"] \ar[r, equals] & H(C, \partial) \ar[d, "H(U)"] \\ \img \partial \ar[r] & \ker \partial \ar[r] & \ker \partial / \img \partial \ar[r, equals] & H(C, \partial) \end{tikzcd} \end{IEEEeqnarray*} commutes. If $\phi \colon (C, \partial^C, U^C) \longrightarrow (D, \partial^D, U^D)$ is a morphism in $\comp$, we define the induced morphism on homology, $H(\phi) \colon H(C, \partial^C) \longrightarrow H(D, \partial^D)$, to be the unique map such that the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \img \partial^C \ar[r] \ar[d, swap, "\phi"] & \ker \partial^C \ar[r] \ar[d, "\phi"] & \ker \partial^C / \img \partial^C \ar[d, dashed, "\exists !"] \ar[r, equals] & H(C, \partial^C) \ar[d, "H(\phi)"] \\ \img \partial^D \ar[r] & \ker \partial^D \ar[r] & \ker \partial^D / \img \partial^D \ar[r, equals] & H(D, \partial^D) \end{tikzcd} \end{IEEEeqnarray*} commutes. With these definitions, homology is a functor $H \colon \comp \longrightarrow \modl$. \end{definition} \section{Action functional} \label{sec:action functional} Our goal in this section is to establish the definitions that we will need to later define the $S^1$-equivariant Floer Chain complex. We define suitable families of admissible Hamiltonians (\cref{def:hamiltonians}) and almost complex structures (\cref{def:acs}). The key points of this section are \cref{def:generators}, where we define the set of generators of the $S^1$-equivariant Floer chain complex, and \cref{def:flow lines}, where we define the trajectories that are counted in the differential of the $S^1$-equivariant Floer chain complex. We also define the action of a generator (\cref{def:action functional}), which will induce a filtration on the $S^1$-equivariant Floer chain complex. We will assume that $(X,\lambda)$ is a nondegenerate Liouville domain with completion $(\hat{X},\hat{\lambda})$. Let $\varepsilon \coloneqq \frac{1}{2} \operatorname{Spec}(\partial X,\lambda|_{\partial X})$. We start by recalling some basic facts about $S^{2N+1}$ and $\C P^N$. For each $N \in \Z_{\geq 1}$ we denote\begin{IEEEeqnarray*}{c+x*} S^{2N + 1} \coloneqq \{ (z_0,\ldots,z_N) \in \C ^{N+1} \ | \ |z_0|^2 + \cdots + |z_N|^2 = 1 \}. \end{IEEEeqnarray*} There is an action $S^1 \times S^{2N + 1} \longrightarrow S^{2N + 1}$ given by $(t,z) \longmapsto e ^{2 \pi i t} z$. This action is free and proper, so we can consider the quotient manifold $S^{2N+1}/S^1$. The Riemannian metric of $\C ^{N+1} = \R ^{2(N+1)}$ pulls back to a Riemannian metric on $S^{2N + 1}$. The action of $S^1$ on $S^{2N + 1}$ is by isometries, so there exists a unique Riemannian metric on $S^{2N+1}/S^1$ such that the projection $S^{2N+1} \longrightarrow S^{2N+1}/S^1$ is a Riemannian submersion. The set $\C \setminus \{0\}$ is a group with respect to multiplication, and it acts on $\C ^{N+1} \setminus \{0\}$ by multiplication. This action is free and proper, so we can form the quotient \begin{IEEEeqnarray*}{c+x*} \C P^{N} \coloneqq (\C ^{N+1} \setminus \{0\})/(\C \setminus \{0\}). \end{IEEEeqnarray*} By the universal property of the quotient, there exists a unique map $S^{2N+1}/S^1 \longrightarrow \C P^N$ such that the following diagram commutes: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} S^{2N + 1} \ar[r, hook] \ar[d, two heads] & \C ^{N+1} \setminus \{0\} \ar[d, two heads] \\ S^{2N + 1} / S^1 \ar[r, hook, two heads, dashed, swap, "\exists !"] & \C P^N \end{tikzcd} \end{IEEEeqnarray*} The map $S^{2N + 1} / S^1 \longrightarrow \C P^N$ is a diffeomorphism. Define the Fubini--Study metric on $\C P^N$ to be the unique Riemannian metric on $\C P^N$ such that $S^{2N + 1} / S^1 \longrightarrow \C P^N$ is an isometry. We will now consider a special family of functions on $S^{2N+1}$ and $\C P^N$. Define a function\begin{IEEEeqnarray*}{rrCl} f_N \colon & \C P^N & \longrightarrow & \R \\ & [w] & \longmapsto & \frac{\sum_{j=0}^{N} j|w_j|^2}{\sum_{j=0}^{N} |w_j|^2}. \end{IEEEeqnarray*} Define $\tilde{f}_N$ to be the pullback of $f_N$ to $S^{2N+1}$. Let $e_0,\ldots,e_N$ be the canonical basis of $\C ^{N+1}$ (as a vector space over $\C$). Then, \begin{IEEEeqnarray*}{rCls+x*} \critpt \tilde{f}_N & = & \{ e^{2 \pi i t} e_j \mid t \in S^1, j = 0,\ldots,N \}, \\ \critpt f_N & = & \{[e_0],\ldots,[e_N]\}. \end{IEEEeqnarray*} The function $f_N$ is Morse, while $\tilde{f}_N$ is Morse--Bott. The Morse indices are given by \begin{IEEEeqnarray*}{rCll} \morse([e_j],f_N) & = & 2j, & \quad \text{for all } j=0,\ldots,N, \\ \morse(z,\tilde{f}_N) & = & \morse([z], f_N), & \quad \text{for all } z \in \critpt f_N. \end{IEEEeqnarray*} We will use the notation $\morse(z) \coloneqq \morse(z,\tilde{f}_N) = \morse([z], f_N)$. We now study the relation between $\tilde{f}_{N^-}$ and $\tilde{f}_{N^+}$ for $N^- \geq N^+$. For every $k$ such that $0 \leq k \leq N^- - N^+$, define maps \begin{IEEEeqnarray*}{rrCl} \inc^{N^-,N^+}_k \colon & S^{2N^++1} & \longrightarrow & S^{2N^-+1} \\ & (z_0,\ldots,z_{N^+}) & \longmapsto & (\underbrace{0,\ldots,0}_k,z_0,\ldots,z_{N^+},0,\ldots,0). \end{IEEEeqnarray*} Let $I_k \colon \R \longrightarrow \R$ be given by $I_k(x) = x + k$. Then, the following diagram commutes: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd}[row sep=scriptsize, column sep={{{{6em,between origins}}}}] & S^{2N^+ + 1} \arrow[dl, swap, "\inc_{k}^{N^-,N^+}"] \arrow[rr, "\tilde{f}_{N^+}"] \arrow[dd] & & \R \arrow[dl, "I_k"] \arrow[dd, equals] \\ S^{2N^- + 1} \arrow[rr, crossing over, near end, "\tilde{f}_{N^-}"] \arrow[dd] & & \R \\ & \C P^{N^+} \arrow[dl, dashed, swap, outer sep = -4pt, "\exists ! i_{k}^{N^-,N^+}"] \arrow[rr, near start, "f_{N^+}"] & & \R \arrow[dl, "I_k"] \\ \C P ^{N^-} \arrow[rr, swap, "f_{N^-}"] & & \R \arrow[from=uu, crossing over, equals] \end{tikzcd} \end{IEEEeqnarray*} The vector fields $\nabla \tilde{f}_{N^+}$ and $\nabla \tilde{f}_{N^-}$ are $\inc_{k}^{N^-,N^+}$-related, and analogously the vector fields $\nabla {f}_{N^+}$ and $\nabla {f}_{N^-}$ are ${i}_{k}^{N^-,N^+}$-related. For $t \in \R$, denote by $\phi^t_{\tilde{f}_{N^-}}$ the time-$t$ gradient flow of $\tilde{f}_{N^-}$ and analogously for $\phi^t_{f_{N^+}}$. Then, the following diagram commutes: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd}[row sep=scriptsize, column sep={{{{6em,between origins}}}}] & S^{2N^+ + 1} \arrow[dl, swap, "{\inc_k^{N^-,N^+}}"] \arrow[rr, "\phi^t_{\tilde{f}_N}"] \arrow[dd] & & S^{2N^+ + 1} \arrow[dl, near end, "\inc_k^{N^-,N^+}"] \arrow[dd] \\ S^{2N^- + 1} \arrow[rr, crossing over, near end, "\phi^t_{\tilde{f}_{N^-}}"] \arrow[dd] & & S^{2N^- + 1} \\ & \C P^{N^+} \arrow[dl, swap, "i_k^{N^-,N^+}"] \arrow[rr, near start, "\phi^t_{f_{N^+}}"] & & \C P^{N^+} \arrow[dl, "i_k^{N^-,N^+}"] \\ \C P ^{N^-} \arrow[rr, swap, "\phi^t_{f_{N^-}}"] & & \C P^{N^-} \arrow[from=uu, crossing over] \end{tikzcd} \end{IEEEeqnarray*} \begin{definition} \label{def:hamiltonians} A parametrized Hamiltonian $H \colon S^1 \times S^{2N+1} \times \hat{X} \longrightarrow \R$ is \textbf{admissible} if it satisfies the conditions in \cref{item:invariant,item:profile,item:ndg,item:flow lines,item:pullbacks}. We denote the set of such $H$ by $\mathcal{H}(X,N)$. \begin{enumerate} \item \label{item:profile} There exist $D \in \R$, $C \in \R_{>0} \setminus \operatorname{Spec}(\del X, \lambda|_{\del X})$ and $\delta > 0$ such that: \begin{enumerate}[label=(\Roman*)] \item on $S^1 \times S^{2N+1} \times X$, we have that $- \varepsilon < H < 0$, $H$ is $S^1$-independent and $H$ is $C^2$-small (so that there are no nonconstant $1$-periodic orbits); \item on $S^1 \times S^{2N+1} \times [0,\delta] \times \del X$, we have that $-\varepsilon < H < \varepsilon$ and $H$ is $C^2$-close to $(t,z,r,x) \longmapsto h(e^r)$, where $h \colon [1,e ^{\delta}] \longrightarrow \R$ is increasing and strictly convex; \item[(S)] on $S^1 \times S^{2N+1} \times [\delta, + \infty) \times \del X$, we have that $H(t,z,r,x) = C e^r + D$. \end{enumerate} \item \label{item:invariant} Consider the action of $S^1$ on $S^1 \times S^{2N+1} \times \hat{X}$ given by $t' \cdot (t,z,x) = (t' + t, e ^{2 \pi i t'} z, x)$. Then $H$ is invariant under this action, i.e. $H(t'+ t, e ^{2 \pi i t'} z, x) = H(t,z,x)$. \item \label{item:ndg} If $z$ is a critical point of $\tilde{f}_N$ then $H_z$ is nondegenerate. \item \label{item:flow lines} For every $(t,z,x) \in S^1 \times S^{2N+1} \times \hat{X}$ we have $\p{<}{}{\nabla_{S^{2N+1}}H(t,z,x), \nabla \tilde{f}_N(z)} \leq 0$. \item \label{item:pullbacks} There exists $E \geq 0$ such that $(\inc^{N,N-1}_0)^* H = (\inc^{N,N-1}_1)^* H + E$. \end{enumerate} \end{definition} \begin{definition} \label{def:acs} A parametrized almost complex structure $J \colon S^1 \times S^{2N+1} \times \hat{X} \longrightarrow \End(T \hat{X})$ is \textbf{admissible} if it satisfies the conditions in \cref{def:acs 1,def:acs 2,def:acs 3,def:acs 4}. We denote the set of such $J$ by $\mathcal{J}(X,N)$. \begin{enumerate} \item \label{def:acs 1} $J$ is $S^1$-invariant, i.e. $J(t' + t, e ^{2 \pi i t'} z, x) = J(t, z, x)$ for every $t' \in S^1$ and $(t,z,x) \in S^1 \times S^{2N+1} \times \hat{X}$. \item \label{def:acs 2} $J$ is $\hat{\omega}$-compatible. \item \label{def:acs 3} The restriction of $J$ to $S^1 \times S^{2N+1} \times \R_{\geq 0} \times \del X$ is cylindrical. \item \label{def:acs 4} $(\inc_0^{N,N-1})^* J = (\inc_1^{N,N-1})^* J$. \end{enumerate} \end{definition} \begin{definition} Denote by $\admissible{X}$ the set of tuples \begin{IEEEeqnarray*}{c+x*} (H,J) \in \bigcoproduct_{N \in \Z_{\geq 1}}^{} \mathcal{H}(X,N) \times \mathcal{J}(X,N) \end{IEEEeqnarray*} which are regular, where ``regular'' means that the moduli spaces of \cref{def:flow lines} are transversely cut out. Define a preorder $\leq$ on $\admissible{X}$ by \begin{IEEEeqnarray*}{rCl} (H^+,J^+) \leq (H^-,J^-) & \mathrel{\mathop:}\Longleftrightarrow & N^+ \leq N^- \text{ and } H^+ \leq (i_0 ^{N^-,N^+})^* H^-. \end{IEEEeqnarray*} \end{definition} \begin{definition} \label{def:generators} Let $N \in \Z_{\geq 1}$ and $H \in \mathcal{H}(X,N)$. Define \begin{IEEEeqnarray*}{c+x*} \hat{\mathcal{P}}(H) \coloneqq \left\{ (z, \gamma) \ \middle\vert \begin{array}{l} z \in S^{2N+1} \text{ is a critical point of } \tilde{f}_N, \\ \gamma \in C^{\infty}(S^1, \hat{X}) \text{ is a $1$-periodic orbit of } H_z \end{array} \right\}. \end{IEEEeqnarray*} There is an action of $S^1$ on $\hat{\mathcal{P}}(H)$ given by $t \cdot (z,\gamma) \coloneqq (e ^{2 \pi i t'} z, \gamma(\cdot - t))$. Define the quotient \begin{IEEEeqnarray*}{c+x*} \mathcal{P}(H) \coloneqq \hat{\mathcal{P}}(H) / S^1. \end{IEEEeqnarray*} \end{definition} \begin{remark} \label{rmk:types of orbits} If $(z, \gamma) \in \hat{\mathcal{P}}(H)$, then either $\img \gamma$ is in region $\rmn{1}$ and $\gamma$ is constant or $\img \gamma$ is in region $\rmn{2}$ and $\gamma$ is nonconstant. In the slope region, i.e. region S, there are no $1$-periodic orbits of $H$ because $C$ is not in $\operatorname{Spec}(\del X, \lambda|_{\del X})$ and by \cref{cor:hamiltonian orbits are reeb orbits}. \end{remark} \begin{definition} \label{def:flow lines} Let $N \in \Z_{\geq 1}$, $H \in \mathcal{H}(X,N)$ and $J \in \mathcal{J}(X,N)$. A pair $(w,u)$, where $w \colon \R \longrightarrow S^{2N+1}$ and $u \colon \R \times S^1 \longrightarrow \hat{X}$ is a solution of the \textbf{parametrized Floer equation} if \begin{equation*} \left\{ \, \begin{IEEEeqnarraybox}[ \IEEEeqnarraystrutmode \IEEEeqnarraystrutsizeadd{7pt} {7pt}][c]{rCl} \dot{w}(s) & = & \nabla \tilde{f}_N(w(s)) \\ \pdv{u}{s}(s,t) & = & - J^t_{w(s)}(u(s,t)) \p{}{2}{ \pdv{u}{t}(s,t) - X_{H^t_{w(s)}} (u(s,t)) }. \end{IEEEeqnarraybox} \right. \end{equation*} For $[z^+,\gamma^+], [z^-,\gamma^-] \in \mathcal{P}(H)$, define $\hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])$ to be the moduli space of solutions $(w,u)$ of the parametrized Floer equation such that $(w(s),u(s,\cdot))$ converges as $s \to \pm \infty$ to an element in the equivalence class $[z^\pm,\gamma^\pm]$. We define the following two group actions. \begin{IEEEeqnarray*}{rsrsrCl} \R & \quad \text{acts on} \quad & \hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-]) & \quad \text{by} \quad & s \cdot (w,u) & \coloneqq & (w(\cdot - s), u(\cdot-s, \cdot)), \\ S^1 & \quad \text{acts on} \quad & \hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-]) & \quad \text{by} \quad & t \cdot (w,u) & \coloneqq & (e ^{2 \pi i t} w, u(\cdot, \cdot - t)). \end{IEEEeqnarray*} The actions of $\R$ and $S^1$ on $\hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])$ commute, so they define an action of $\R \times S^1$ on $\hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])$. Finally, let \begin{IEEEeqnarray*}{c+x*} \mathcal{M}(H,J,[z^+,\gamma^+],[z^-,\gamma^-]) \coloneqq \hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-]) / \R \times S^1. \end{IEEEeqnarray*} \end{definition} \begin{definition} \phantomsection\label{def:action functional} For $(z, \gamma) \in \hat{\mathcal{P}}(H)$, the \textbf{action} of $(z, \gamma)$, denoted $\mathcal{A}_H(z, \gamma)$, is given by \begin{IEEEeqnarray*}{c+x*} \mathcal{A}_{H}(z,\gamma) \coloneqq \mathcal{A}_{H_z}(\gamma) = \int_{S^1}^{} \gamma^* \hat{\lambda} - \int_{S^1}^{} H(t,z,\gamma(t)) \edv t. \end{IEEEeqnarray*} The action functional is a map $\mathcal{A}_H \colon \hat{\mathcal{P}}(H) \longrightarrow \R$. Since $H$ is $S^1$-invariant, $\mathcal{A}_H$ is $S^1$-invariant as well, and therefore there is a corresponding map $\mathcal{A}_H$ whose domain is $\mathcal{P}(H)$. \end{definition} \begin{lemma} \label{lem:action admissible} The actions of $1$-periodic orbits of $H$ are ordered according to \begin{IEEEeqnarray*}{c+x*} 0 < \mathcal{A}_H(\rmn{1}) < \varepsilon < \mathcal{A}_H(\rmn{2}). \end{IEEEeqnarray*} \end{lemma} \begin{proof} Consider \cref{fig:action ordering 1}. By \cref{lem:action in symplectization,def:hamiltonians}, we have that $\mathcal{A}_H$ is constant equal to $-H$ in regions $\rmn{1}$ and S and $\mathcal{A}_H$ is strictly increasing in region $\rmn{2}$. We remark that strictly speaking, the Hamiltonian plotted in the picture is not $H$ but instead a Hamiltonian which is $C^2$-close to $H$. However, it suffices to prove the statement for the Hamiltonian which approximates $H$. From this discussion, we conclude that $0 < \mathcal{A}_H(\rmn{1}) < \varepsilon$. We show that $\mathcal{A}_H(\rmn{2}) > \varepsilon$. \begin{IEEEeqnarray*}{rCls+x*} \mathcal{A}_H(\rmn{2}) & = & e^r T(r) - H(r) & \quad [\text{by \cref{lem:action in symplectization}}] \\ & \geq & 2 \varepsilon e^r - H(r) & \quad [\text{$2 \varepsilon = \min \operatorname{Spec}(\del X, \lambda|_{\del X})$ and $T(r) \in \operatorname{Spec}(\del X, \lambda|_{\del X})$}] \\ & > & \varepsilon (2 e^r - 1) & \quad [\text{$H(r) < \varepsilon$}] \\ & > & \varepsilon & \quad [\text{$r > 0$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{figure}[ht] \centering \begin{tikzpicture} [ help lines/.style={thin, draw = black!50}, Hamiltonian/.style={thick}, action/.style={thick} ] \tikzmath{ \a = 4; \b = 1; \c = 3; \d = 1; \h = 0.5; \sml = 0.05; \y = -0.3; \z = -0.1; \f = \c + \d; \m = - 12 * (-\y + \z) / (-1+exp(\d))^4; \n = 2 * (-1 + 3 * exp(\d)) * (-\y + \z) / (-1+exp(\d))^4; \o = ( -2 * exp(\d) * \y + 6 * exp(2 * \d) * \y - 4 * exp(3 * \d) * \y + exp(4 * \d) * \y + \z - 2 * exp(\d) * \z ) / (-1+exp(\d))^4; \u = -2 * (\y - \z) / (-1+exp(\d)); \v = (2 * exp(\d) * \y - \z - exp(\d) * \z) / (-1+exp(\d)); function h1 (\r) { return \y; }; function h2 (\r) { return {\o + \n * \r + 1/2 * exp(\d) * \m * \r^2 + 1/6 * (-1 - exp(\d)) * \m * \r^3 + 1/12 * \m * \r^4 }; }; function h2p(\r) { return {\n + 1/6 * \m * \r * (-3 * exp(\d) * (-2 + \r) + \r * (-3 + 2 * \r))}; }; function hs (\r) { return { \u * \r + \v }; }; function H1(\r) { return { \y }; }; function H2(\r) { return { h2(exp(\r)) }; }; function Hs(\r) { return { hs(exp(\r)) }; }; function a1(\r) { return { -\y }; }; function a2(\r) { return { exp(\r) * h2p(exp(\r)) - H2(\r) }; }; function as(\r) { return { -\v }; }; \e = ln((\a-\v)/\u) - \d; \g = \f + \e; } \draw[->] (0 , 0) -- (\g, 0); \draw[->] (0 ,-\b) -- (0 ,\a) node[above] {$\R$}; \draw[->] (\c,-\b) node[below] {$0$} -- (\c,\a) node[above] {$\R$}; \draw[help lines] (0 , \h) node[left] {$+\varepsilon$} -- (\g, \h); \draw[help lines] (0 ,-\h) node[left] {$-\varepsilon$} -- (\g,-\h); \draw[help lines] (\f,-\b) node[below] {$\delta$} -- (\f, \a); \draw[Hamiltonian, domain = 0:\c] plot (\x, {H1(\x - \c)}); \draw[Hamiltonian, domain = \c:\f] plot (\x, {H2(\x - \c)}); \draw[Hamiltonian, domain = \f:\g] plot (\x, {Hs(\x - \c)}) node[right] {$H$}; \draw[action, domain = 0:\c] plot (\x, {a1(\x - \c)}); \draw[action, domain = \c:\f] plot (\x, {a2(\x - \c)}); \draw[action, domain = \f:\g] plot (\x, {as(\x - \c)}) node[right] {$\mathcal{A}_H$}; \draw (\c/2 ,\a) node[below] {$\mathrm{I}$}; \draw (\c + \d/2 ,\a) node[below] {$\mathrm{II}$}; \draw (\c + 3*\d/2,\a) node[below] {$\mathrm{S}$}; \draw[help lines, decoration = {brace, mirror, raise=5pt}, decorate] (0,-\b-.75) -- node[below=6pt] {$X$} (\c - \sml,-\b-.75); \draw[help lines, decoration = {brace, mirror, raise=5pt}, decorate] (\c + \sml,-\b-.75) -- node[below=6pt] {$\R_{\geq 0} \times \del X$} (\g,-\b-.75); \end{tikzpicture} \caption{Action of a $1$-periodic orbit of $H$} \label{fig:action ordering 1} \end{figure} \begin{remark} Denote by $\critpt \mathcal{A}_{H} \subset S^{2N+1} \times C^\infty(S^1,\hat{X})$ the set of critical points of the action functional. Then, $\hat{\mathcal{P}}(H) = \critpt \mathcal{A}_{H}$, as is usual for various Floer theories. However, if $(w,u)$ is a path in $S^{2N+1} \times C^\infty(S^1,\hat{X})$, it is not true that $(w,u)$ is a gradient flow line of $\mathcal{A}_{H}$ if and only if $(w,u)$ is a solution of the parametrized Floer equations. \end{remark} \section{\texorpdfstring{$S^1$}{S1}-equivariant Floer homology} \label{sec:Floer homology} Let $(X,\lambda)$ be a nondegenerate Liouville domain. In this section, we define the $S^1$-equivariant Floer chain complex of $(X,\lambda)$ and other related invariants, namely the $S^1$-equivariant Floer homology, the positive $S^1$-equivariant Floer homology, the $S^1$-equivariant symplectic homology and the positive $S^1$-equivariant symplectic homology. The presentation we will give will be based on \cite{guttSymplecticCapacitiesPositive2018}. Other references discussing $S^1$-equivariant symplectic homology are \cite{guttMinimalNumberPeriodic2014,guttPositiveEquivariantSymplectic2017,bourgeoisGysinExactSequence2013,bourgeoisFredholmTheoryTransversality2010,bourgeoisEquivariantSymplecticHomology2016,seidelBiasedViewSymplectic2008}. The $S^1$-equivariant Floer complex of $X$ depends on the additional data of $(H,J) \in \admissible{X}$. More precisely, it can be encoded in a functor $\homology{}{S^1}{X}{F}{C}{}{} \colon \admissible{X}^{} \longrightarrow \comp$. We start by defining this functor on objects. For each $I = (H,J) \in \admissible{X}$, we need to say what is $\homology{}{S^1}{X}{F}{C}{}{}(H,J) \coloneqq \homology{}{S^1}{}{F}{C}{}{}(X,H,J) \in \comp$. \begin{definition} We define $\homology{}{S^1}{}{F}{C}{}{}(X,H,J)$ to be the free $\Q$-module generated by the elements of $\mathcal{P}(H)$. Define $\homology{}{S^1}{}{F}{C}{a}{}(X,H,J)$ to be the subspace generated by the elements $[z,\gamma]$ of $\mathcal{P}(H)$ such that $\mathcal{A}_{H}(z,\gamma) \leq a$. These modules come equipped with inclusion maps \begin{IEEEeqnarray*}{rCls+x*} \iota^{a} \colon \homology{}{S^1}{}{F}{C}{a}{}(X,H,J) & \longrightarrow & \homology{}{S^1}{}{F}{C}{}{}(X,H,J), & \quad for $a \in \R$, \\ \iota^{b,a} \colon \homology{}{S^1}{}{F}{C}{a}{}(X,H,J) & \longrightarrow & \homology{}{S^1}{}{F}{C}{b}{}(X,H,J), & \quad for $a \leq b$. \end{IEEEeqnarray*} \end{definition} For $[z^\pm,\gamma^\pm] \in \mathcal{P}(H)$, consider the moduli space $\mathcal{M}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])$. Near a point $(w,u) \in \mathcal{M}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])$, this space is a manifold (see \cref{thm:transversality in s1eft}) of dimension \begin{IEEEeqnarray}{c+x*} \plabel{eq:dimension for ms} \dim_{(w,u)} \mathcal{M}(H,J,[z^+,\gamma^+],[z^-,\gamma^-]) = \ind^{\tau^+}(z^+,\gamma^+) - \ind^{\tau^-}(z^-,\gamma^-) - 1, \end{IEEEeqnarray} where \begin{IEEEeqnarray*}{c+x*} \ind^{\tau^\pm}(z^\pm,\gamma^\pm) \coloneqq \morse(z^{\pm}) + \conleyzehnder^{\tau^{\pm}}(\gamma^{\pm}) \end{IEEEeqnarray*} and $\tau^{\pm}$ are symplectic trivializations of $(\gamma^{\pm})^* T \hat{X}$ which extend to a symplectic trivialization $\tau$ of $u^* T \hat{X}$. With $\tau^{\pm}$ chosen like this, even though each individual term on the right-hand side of Equation \eqref{eq:dimension for ms} depends on $\tau^{\pm}$, the right-hand side is independent of the choice of $\tau$. Throughout this chapter, if $\mathcal{M}$ is a moduli space of solutions of the parametrized Floer equation, we will denote by $\# \mathcal{M}$ the signed count of points $(w,u)$ in $\mathcal{M}$ such that $\dim_{(w,u)} \mathcal{M} = 0$. \begin{definition} \label{def:differential} We define $\del \colon \homology{}{S^1}{}{F}{C}{}{}(X,H,J) \longrightarrow \homology{}{S^1}{}{F}{C}{}{}(X,H,J)$ by \begin{IEEEeqnarray*}{c+x*} \del ([z^+,\gamma^+]) \coloneqq \sum_{[z^-,\gamma^-] \in \mathcal{P}(H)}^{} \# \mathcal{M}_{\vphantom{0}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-]) \cdot [z^-,\gamma^-], \end{IEEEeqnarray*} for each $[z^+,\gamma^+] \in \mathcal{P}(H)$.\end{definition} By \cref{lem:action energy for floer trajectories}, the differential respects the action filtration, i.e. the differential $\del$ maps $\homology{}{S^1}{}{F}{C}{a}{}(X,H,J)$ to itself. By \cite[Proposition 2.2]{bourgeoisEquivariantSymplecticHomology2016}, $\partial \circ \partial = 0$. \begin{definition} \phantomsection\label{def:U map} We define a map $U \colon \homology{}{S^1}{}{F}{C}{}{}(X,H,J) \longrightarrow \homology{}{S^1}{}{F}{C}{}{}(X,H,J)$ as follows. First, recall that a critical point $z$ of $\tilde{f}_N$ is of the form $z = e^{2 \pi i t} e_j$, for $t \in S^1$ and $j = 0, \ldots, N$. If $j \geq 1$, let $\shf(e^{2 \pi i t} e_j) \coloneqq e^{2 \pi i t} e_{j-1}$. Finally, define \begin{IEEEeqnarray*}{c+x*} U ([z,\gamma]) \coloneqq \begin{cases} [\shf(z),\gamma] & \text{if } \morse(z) \geq 2, \\ 0 & \text{if } \morse(z) = 0, \end{cases} \end{IEEEeqnarray*} for $[z,\gamma] \in \mathcal{P}(H)$. \end{definition} The definition of $U$ is well-posed because by \cref{def:hamiltonians} \ref{item:pullbacks}, the Hamiltonians $H_{e_j}$ and $H_{e_{j-1}}$ differ by a constant. Therefore, if $\gamma$ is a $1$-periodic orbit of $H_{e_j}$ then it is also a $1$-periodic orbit of $H_{e_{j-1}}$. By \cite[Section 6.3]{guttSymplecticCapacitiesPositive2018}, $U$ is a chain map, i.e. $U \circ \partial = \partial \circ U$. \begin{lemma} The map $U \colon \homology{}{S^1}{}{F}{C}{}{}(X,H,J) \longrightarrow \homology{}{S^1}{}{F}{C}{}{}(X,H,J)$ respects the filtration. \end{lemma} \begin{proof} Let $[z,\gamma] \in \mathcal{P}(H)$ be such that $\morse(z) \geq 2$ and $\mathcal{A}_{H}(z,\gamma) \leq a$. We wish to show that $\mathcal{A}_{H}(\shf(z),\gamma) \leq \mathcal{A}_{H}(z,\gamma) \leq a$. Assumption \ref{item:pullbacks} of \cref{def:hamiltonians} implies that $H_{\shf(z)} = H_z + E$, where $E \geq 0$. Then, \begin{IEEEeqnarray*}{rCls+x*} \mathcal{A}_{H}(\shf(z),\gamma) & = & \int_{S^1}^{} \gamma^* \hat{\lambda} - \int_{0}^{1} H(t,\shf(z),\gamma(t)) \edv t & \quad [\text{by definition of $\mathcal{A}_{H}$}] \\ & = & \int_{S^1}^{} \gamma^* \hat{\lambda} - \int_{0}^{1} H(t,z,\gamma(t)) \edv t - E & \quad [\text{since $H_{\shf(z)} = H_z + E$}] \\ & = & \mathcal{A}_{H}(z,\gamma) - E & \quad [\text{by definition of $\mathcal{A}_{H}$}] \\ & \leq & \mathcal{A}_{H}(z,\gamma) & \quad [\text{since $E \geq 0$}] \\ & \leq & a & \quad [\text{by assumption on $[z,\gamma]$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} We will now define the continuation maps. For $(H^+,J^+) \leq (H^-,J^-) \in \admissible{X}$, we want to define a morphism $\phi^{-,+} \colon \homology{}{S^1}{}{F}{C}{}{}(X,H^+,J^+) \longrightarrow \homology{}{S^1}{}{F}{C}{}{}(X,H^-,J^-)$. Consider the map \begin{IEEEeqnarray*}{rrCl} \inc^{N^-,N^+}_k \colon & \hat{\mathcal{P}}((\inc_k ^{N^-,N^+})^* H^-) & \longrightarrow & \hat{\mathcal{P}}(H^-) \\ & (z,\gamma) & \longmapsto & (\inc^{N^-,N^+}_k(z),\gamma). \end{IEEEeqnarray*} This map fits into the commutative diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd}[row sep=scriptsize, column sep={{{{6em,between origins}}}}] & \hat{\mathcal{P}}((\inc_k^{N^-,N^+})^* H^-) \arrow[dl, "\inc^{N^-,N^+}_k"] \arrow[rr] \arrow[dd] & & \critpt (\tilde{f}_{N^+}) \arrow[dl, "\inc^{N^-,N^+}_k"] \arrow[dd] \\ \hat{\mathcal{P}}(H^-) \arrow[rr, crossing over, near end] \arrow[dd] & & \critpt (\tilde{f}_{N^-}) & \\ & \mathcal{P}((\inc_k^{N^-,N^+})^* H^-) \arrow[dl, dashed, "\exists ! i^{N^-,N^+}_k"] \arrow[rr] & & \critpt (f_{N^+}) \arrow[dl, "i^{N^-,N^+}_k"] \\ \mathcal{P}(H^-) \arrow[rr] \ar[uu, leftarrow, crossing over] & & \critpt (f_{N^-}) \ar[uu, leftarrow, crossing over] & \end{tikzcd} \end{IEEEeqnarray*} \begin{definition} An \textbf{admissible} homotopy of parametrized Hamiltonians from $H^-$ to $H^+$ is a map $H \colon \R \times S^1 \times S^{2N^+ +1} \times \hat{X} \longrightarrow \R$ which satisfies the conditions in \cref{item:homotopy h 1,item:homotopy h 2,item:homotopy h 3}, where $H_s(t,z,x) = H(s,t,z,x)$. We denote the set of such $H$ by $\mathcal{H}(H^+,H^-)$. \begin{enumerate} \item \label{item:homotopy h 3} For every $s \in \R$, we have that $H_s$ satisfies all the assumptions in \cref{def:hamiltonians}, with the exceptions that $C_s$ may be in $\operatorname{Spec}(\del X,\lambda|_{\del X})$, and it is not necessarily true that $z \in \critpt \tilde{f}_N$ implies that $H_{s,z}$ is nondegenerate. \item \label{item:homotopy h 1} There exists $s_0 > 0$ such that if $\pm s > s_0$ then $H_s = (\inc^{N^\pm,N^+}_0)^* H^\pm$. \item \label{item:homotopy h 2} For every $(s,t,z,x) \in \R \times S^1 \times S^{2N^+ + 1} \times \hat{X}$ we have that $\del_s H(s,t,x,z) \leq 0$. \end{enumerate} \end{definition} \begin{definition} An \textbf{admissible} homotopy of parametrized almost complex structures from $J^-$ to $J^+$ is a map $J \colon \R \times S^1 \times S^{2N^+ +1} \times \hat{X} \longrightarrow \End(T \hat{X})$ which satisfies the conditions in \cref{item:homotopy j 1,item:homotopy j 3}, where $J_s(t,z,x) = J(s,t,z,x)$. We denote the set of such $J$ by $\mathcal{J}(J^+,J^-)$. \begin{enumerate} \item \label{item:homotopy j 3} For every $s \in \R$, we have that $J_s$ satisfies all the assumptions in \cref{def:acs}. \item \label{item:homotopy j 1} There exists $s_0 > 0$ such that if $\pm s > s_0$ then $J_s = (\inc^{N^\pm,N^+}_0)^* J^\pm$. \end{enumerate} \end{definition} \begin{definition} Let $[z^\pm,\gamma^\pm] \in \mathcal{P}((\inc^{N^\pm,N^+}_0)^* H^\pm)$ and $(H,J)$ be a homotopy from $(H^-,J^-)$ to $(H^+,J^+)$. A pair $(w,u)$, where $w \colon \R \longrightarrow S^{2N^+ +1}$ and $u \colon \R \times S^1 \longrightarrow \hat{X}$ is a solution of the \textbf{parametrized Floer equation} (with respect to $(H, J)$) if \begin{equation*} \left\{ \, \begin{IEEEeqnarraybox}[ \IEEEeqnarraystrutmode \IEEEeqnarraystrutsizeadd{7pt} {7pt}][c]{rCl} \dot{w}(s) & = & \nabla \tilde{f}_N(w(s)) \\ \pdv{u}{s}(s,t) & = & - J^t_{s,w(s)}(u(s,t)) \p{}{2}{ \pdv{u}{t}(s,t) - X_{H^t_{s,w(s)}} (u(s,t)) }. \end{IEEEeqnarraybox} \right. \end{equation*} Define $\hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])$ to be the moduli space of solutions $(w,u)$ of the pa\-ra\-me\-trized Floer equation such that $(w(s),u(s,\cdot))$ converges as $s \to \pm \infty$ to an element in the equivalence class $[z^\pm,\gamma^\pm]$. Define an action of $S^1$ on $\hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])$ by \begin{IEEEeqnarray*}{c+x*} t \cdot (w,u) = (e ^{2 \pi i t} w, u(\cdot, \cdot - t)). \end{IEEEeqnarray*} Finally, let $\mathcal{M}(H,J,[z^+,\gamma^+],[z^-,\gamma^-]) \coloneqq \hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])/S^1$. \end{definition} \begin{definition} \label{def:continuation map} The \textbf{continuation map} is the map \begin{IEEEeqnarray*}{c+x*} \phi^{-,+} \colon \homology{}{S^1}{}{F}{C}{}{}(X,H^+,J^+) \longrightarrow \homology{}{S^1}{}{F}{C}{}{}(X,H^-,J^-) \end{IEEEeqnarray*} given as follows. Choose a regular homotopy $(H, J)$ from $(H^-,J^-)$ to $(H^+,J^+)$. Then, for every $[z^+, \gamma^+] \in \mathcal{P}(H^+)$, \begin{IEEEeqnarray*}{c} \phi^{-,+}([z^+,\gamma^+]) \coloneqq \sum_{[z^-,\gamma^-] \in \mathcal{P}((\inc_0 ^{N^-,N^+})^* H^-)} \# \mathcal{M}_{\vphantom{0}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-]) \cdot [\inc^{N^-,N^+}_0 (z^-),\gamma^-]. \end{IEEEeqnarray*} \end{definition} \begin{lemma} The map $\phi^{-,+}$ respects the action filtrations. \end{lemma} \begin{proof} Assume that $[z^\pm,\gamma^\pm] \in \mathcal{P}((\inc_0 ^{N^\pm,N^+})^* H^\pm)$ is such that $\mathcal{A}_{H^+}(z^+,\gamma^+) \leq a$ and $\mathcal{M}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])$ is nonempty. We wish to show that $\mathcal{A}_{H^-}(\inc^{N^-,N^+}_0(z^-),\gamma^-) \leq a$. The proof is the following computation. \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\mathcal{A}_{H^-}(\inc^{N^-,N^+}_0(z^-),\gamma^-)}\\ \quad & = & \int_{S^1}^{} \gamma^* \hat{\lambda} - \int_{0}^{1} H^-(t, \inc^{N^-,N^+}_0(z^-),\gamma^-(t)) \edv t & \quad [\text{definition of action functional}] \\ & = & \int_{S^1}^{} \gamma^* \hat{\lambda} - \int_{0}^{1} ((\inc_0 ^{N^-,N^+})^* H^-)(t, z^-,\gamma^-(t)) \edv t & \quad [\text{definition of $\inc^{N^-,N^+}_0$}] \\ & = & \mathcal{A}_{(\inc_0 ^{N^-,N^+})^* H^-}(z^-,\gamma^-) & \quad [\text{definition of action functional}] \\ & \leq & \mathcal{A}_{H^+}(z^+,\gamma^+) & \quad [\text{by \cref{lem:action energy for floer trajectories}}] \\ & \leq & a & \quad [\text{by assumption}]. & \qedhere \end{IEEEeqnarray*} \end{proof} By \cite[Section 2.4]{bourgeoisEquivariantSymplecticHomology2016}, the $U$ maps and the continuation maps commute. Moreover, by the usual arguments in Floer theory, we have (see also \cite[Section 5.3]{guttSymplecticCapacitiesPositive2018}): \begin{enumerate} \item The continuation map $\phi^{-,+}$ is a chain map, i.e. $\phi^{-,+} \circ \del^+ = \del^- \circ \phi^{-,+}$. \item The continuation map $\phi^{-,+}$ is independent (up to chain homotopy, i.e. as a morphism in $\comp$) on the choice of regular homotopy $(H, J)$. \item The continuation maps are functorial, i.e. if $(H^0,J^0) \leq (H^1,J^1) \leq (H^2,J^2) \in \admissible{X}$ then $\phi^{2,1} \circ \phi^{1,0} = \phi^{2,0}$. \end{enumerate} \begin{remark} \label{rmk:grading for s1esh} By the determinant property of \cref{thm:properties of cz}, the parity of the Conley--Zehnder index of a Hamiltonian $1$-periodic orbit is independent of the choice of trivialization. Therefore, $\homology{}{S^1}{}{F}{C}{}{}(X,H,J)$ has a $\Z_{2}$-grading given by \begin{IEEEeqnarray}{c} \deg([z,\gamma]) \coloneqq \mu([z,\gamma]) \coloneqq \morse(z) + \conleyzehnder(\gamma). \plabel{eq:grading s1esh} \end{IEEEeqnarray} If $\pi_1(X) = 0$ and $c_1(TX)|_{\pi_2(X)} = 0$, then by \cref{lem:cz of hamiltonian is independent of triv over filling disk} we have well-defined Conley--Zehnder indices in $\Z$. Therefore, Equation \eqref{eq:grading s1esh} defines a $\Z$-grading on $\homology{}{S^1}{}{F}{C}{}{}(X,H,J)$. With respect to this grading, \begin{IEEEeqnarray*}{rCls+x*} \deg(\partial) & = & -1, \\ \deg(U) & = & -2, \\ \deg(\phi^{-,+}) & = & 0. \end{IEEEeqnarray*} \end{remark} \begin{definition} If $(X,\lambda)$ is a nondegenerate Liouville domain, the \textbf{$S^1$-equivariant Floer chain complex} of $X$ is the functor \begin{IEEEeqnarray*}{rrCl} \homology{}{S^1}{X}{F}{C}{}{} \colon & \admissible{X} & \longrightarrow & \comp \\ & (H^+,J^+) & \longmapsto & (\homology{}{S^1}{}{F}{C}{}{}(X,H^+,J^+), \del^+, U^+) \\ & \downarrow & \longmapsto & \downarrow \phi^{-,+} \\ & (H^-,J^-) & \longmapsto & (\homology{}{S^1}{}{F}{C}{}{}(X,H^-,J^-), \del^-, U^-), \end{IEEEeqnarray*} The \textbf{$S^1$-equivariant Floer homology} of $X$ is the functor $\homology{}{S^1}{X}{F}{H}{}{} = H \circ \homology{}{S^1}{X}{F}{C}{}{}$. The \textbf{positive $S^1$-equivariant Floer homology} of $X$ is the functor $\homology{}{S^1}{X}{F}{H}{+}{}$ given by \begin{IEEEeqnarray*}{rCls+x*} \homology{}{S^1}{X}{F}{H}{+}{}(H,J) & \coloneqq & \homology{}{S^1}{}{F}{H}{(\varepsilon, +\infty)}{}(X,H,J) \\ & = & \homology{}{S^1}{}{F}{H}{}{}(X,H,J) / \homology{}{S^1}{}{F}{H}{\varepsilon}{}(X,H,J). \end{IEEEeqnarray*} \end{definition} \begin{definition} For $(X,\lambda)$ is a nondegenerate Liouville domain, the \textbf{$S^1$-equivariant symplectic homology} of $X$ is the object in $\modl$ given by $\homology{}{S^1}{}{S}{H}{}{}(X,\lambda) \coloneqq \colim \homology{}{S^1}{X}{F}{H}{}{}$. The \textbf{positive $S^1$-equivariant symplectic homology} of $X$ is given by \begin{IEEEeqnarray*}{rCls+x*} \homology{}{S^1}{}{S}{H}{+}{}(X,\lambda) & \coloneqq & \colim \homology{}{S^1}{X}{F}{H}{+}{} \\ & = & \homology{}{S^1}{}{S}{H}{(\varepsilon, +\infty)}{}(X, \lambda) \\ & = & \homology{}{S^1}{}{S}{H}{}{}(X, \lambda) / \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X, \lambda). \end{IEEEeqnarray*} \end{definition} \section{Viterbo transfer map of a Liouville embedding} \label{sec:viterbo transfer map of liouville embedding} Our goal is to prove that $\homology{}{S^1}{}{S}{H}{}{}$ is a contravariant functor from a suitable category of Liouville domains onto $\modl$. More specifically, suppose that $(V,\lambda_V)$ and $(W,\lambda_W)$ are nondegenerate Liouville domains and $\varphi \colon (V,\lambda_V) \longrightarrow (W,\lambda_W)$ is a $0$-codimensional strict generalized Liouville embedding. We will define a \textbf{Viterbo transfer map} \begin{IEEEeqnarray*}{rrCl} \varphi_! \colon & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) & \longrightarrow & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V), \\ \varphi_! \colon & \homology{}{S^1}{}{S}{H}{+}{}(W,\lambda_W) & \longrightarrow & \homology{}{S^1}{}{S}{H}{+}{}(V,\lambda_V), \end{IEEEeqnarray*} which is a morphism in $\modl$. We will start by definition the Viterbo transfer map in the case where $\varphi$ is a Liouville embedding instead of just a generalized Liouville embedding. Consider the completions $\hat{V}$ and $\hat{W}$ of $V$ and $W$ respectively, as well as the induced map $\hat{\varphi} \colon \hat{V} \longrightarrow \hat{W}$. Choose $R$ so small that $\hat{\varphi}(V \union ([0,R] \times \del V)) \subset W$. We define \begin{IEEEeqnarray*}{rCls+x*} \varepsilon_V & \coloneqq & \frac{1}{2} \min \operatorname{Spec}(\del V, \lambda_V), \\ \varepsilon_W & \coloneqq & \frac{1}{2} \min \operatorname{Spec}(\del W, \lambda_W), \\ \varepsilon & \coloneqq & \min \{ \varepsilon_V, \varepsilon_W \}. \end{IEEEeqnarray*} \begin{definition} \label{def:stair hamiltonians} A \textbf{stair} parametrized Hamiltonian is a map $\overline{H} \colon S^1 \times S^{2N+1} \times \hat{W} \longrightarrow \R$ such that $\overline{H}$ satisfies the conditions in \cref{item:invariant,item:flow lines,item:pullbacks,item:ndg} from \cref{def:hamiltonians} as well as the conditions in the Items below. We denote the set of such $\overline{H}$ by $\mathcal{H}(W,V,N)$. \begin{enumerate}[label=(\Roman*)] \item \label{item:stair 1} On $S^1 \times S^{2N+1} \times V$, we have that $\hat{\varphi}^* \overline{H}$ has values in $(0, \varepsilon)$, is $S^1$-independent and is $C^2$-close to a constant. \item \label{item:stair 2} On $S^1 \times S^{2N+1} \times [0, \delta_V] \times \del V$, we have that $-\varepsilon < \hat{\varphi}^* \overline{H} < \varepsilon$ and $\hat{\varphi}^* \overline{H}$ is $C^2$-close to $(t,z,r,x) \longmapsto h_{\rmn{2}}(e^r)$, where $h_{\rmn{2}} \colon [1,e^{\delta_V}] \longrightarrow \R$ is increasing and strictly convex. \myitem[($\mathrm{S}_{V}$)] \plabel{item:stair v} On $S^1 \times S^{2N+1} \times [\delta_V, R - \delta_V] \times \del V$, we have that $\hat{\varphi}^* \overline{H}(t,z,r,x) = C_V e^r + D_V$, for $D_V \in \R$ and $C_V \in \R_{>0} \setminus \operatorname{Spec}(\del V, \lambda_V|_{\del V}) \union \operatorname{Spec}(\del W, \lambda_W|_{\del W})$. \item \label{item:stair 3} On $S^1 \times S^{2N+1} \times [R - \delta_V, R] \times \del V$, we have that $\hat{\varphi}^* \overline{H}$ is $C^2$-close to the function $(t,z,r,x) \longmapsto h_{\rmn{3}}(e^r)$, where $h_{\rmn{3}} \colon [e^{R - \delta_V},e^{R}] \longrightarrow \R$ is increasing and strictly concave. \item \label{item:stair 4} On $S^1 \times S^{2N+1} \times W \setminus \hat{\varphi}(V \union [0, R] \times \del V)$, the function $\overline{H}$ is $C^2$-close to a constant. \item \label{item:stair 5} On $S^1 \times S^{2N+1} \times [0, \delta_W] \times \del W$, we have that $\overline{H}$ is $C^2$-close to $(t,z,r,x) \longmapsto h_{\rmn{5}}(e^r)$, where $h \colon [1,e^{\delta_W}] \longrightarrow \R$ is increasing and strictly convex. \myitem[($\mathrm{S}_{W}$)] \plabel{item:stair w} On $S^1 \times S^{2N+1} \times [\delta_W, +\infty) \times \del W$, we have that $\overline{H}(t,z,r,x) = C_W e^r + D_W$, for $D_W \in \R$ and $C_W \in \R_{>0} \setminus \operatorname{Spec}(\del V, \lambda_V|_{\del V}) \union \operatorname{Spec}(\del W, \lambda_W|_{\del W})$ such that $C_W < e^{-\delta_W}(C_V e^{R - \delta_V} + D_V)$. \end{enumerate} \end{definition} \begin{remark} If $(z, \gamma) \in \hat{\mathcal{P}}(H)$, then either $\gamma$ is nonconstant and $\img \gamma$ is in region $\rmn{2}$, $\rmn{3}$ or $\rmn{5}$, or $\gamma$ is constant and $\img \gamma$ is in region $\rmn{1}$ or $\rmn{4}$. There are no $1$-periodic orbits in the slope regions $\mathrm{S}_{V}$ and $\mathrm{S}_{W}$. \end{remark} \begin{lemma} \label{lem:action stair} The actions of $1$-periodic orbits of $\overline{H}$ are ordered according to \begin{IEEEeqnarray*}{c+x*} \mathcal{A}_{\overline{H}}(\rmn{4}) < \mathcal{A}_{\overline{H}}(\rmn{5}) < 0 < \mathcal{A}_{\overline{H}}(\rmn{1}) < \varepsilon < \mathcal{A}_{\overline{H}}(\rmn{2}). \end{IEEEeqnarray*} \end{lemma} \begin{proof} Consider \cref{fig:action stair}. By \cref{lem:action in symplectization,def:stair hamiltonians}, we have that $\mathcal{A}_{\overline{H}}$ is constant in regions $\rmn{1}$, $\mathrm{S}_{V}$, $\rmn{4}$ and $\mathrm{S}_{W}$, $\mathcal{A}_{\overline{H}}$ is strictly increasing in regions $\rmn{2}$ and $\rmn{5}$, and $\mathcal{A}_{\overline{H}}$ is strictly decreasing in region $\rmn{3}$. From this reasoning, we conclude that $\mathcal{A}_{\overline{H}}(\rmn{4}) < \mathcal{A}_{\overline{H}}(\rmn{5})$ and $0 < \mathcal{A}_{\overline{H}}(\rmn{1}) < \varepsilon$. By the same argument as in the proof of \cref{lem:action admissible}, we conclude that $\varepsilon < \mathcal{A}_{\overline{H}}(\rmn{2})$. We show that $\mathcal{A}_{\overline{H}}(\rmn{5}) < 0$. \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\mathcal{A}_{\overline{H}}(\rmn{5})}\\ \quad & = & e^{r_W} T(r_W) - H(r_W) & \quad [\text{by \cref{lem:action in symplectization}}] \\ \quad & < & e^{r_W} C_W - H(r_W) & \quad [\text{$T(\delta_W) = C_W$ and $T' = \exp \cdot h_{\rmn{5}}'' \circ \exp > 0$}] \\ \quad & < & e^{r_W} C_W - (C_V e^{R-\delta_V} + D_V) & \quad [\text{$H(r_W) > H(R - \delta_V) = C_V e^{R-\delta_V} + D_V$}] \\ \quad & < & e^{\delta_W} C_W - (C_V e^{R-\delta_V} + D_V) & \quad [\text{since $r_W < \delta_W$}] \\ \quad & < & 0 & \quad [\text{since $C_W < e^{-\delta_W}(C_V e^{R - \delta_V} + D_V)$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{figure}[ht] \centering \begin{tikzpicture} [ help lines/.style={thin, draw = black!50}, Hamiltonian/.style={thick}, action/.style={thick}, axisv/.style={}, axisw/.style={} ] \tikzmath{ \a = 4; \b = 3; \c = 3; \d = 0.5; \e = 3; \f = 3; \g = 1; \h = 0.4; \sml = 0.05; \dOne = -0.3; \dFour = 2.5; \vFive = 2.6; \mTwo = -(12 * (-\dOne + \dFour) * exp(\d))/((-1 + exp(\d))^3 * (1 + exp(\d)) * (-exp(\d) + exp(\e))); \n = (2 * (-\dOne + \dFour) * exp(\d) * (-1 + 3 * exp(\d)))/((-1 + exp(\d))^3 * (1 + exp(\d)) * (-exp(\d) + exp(\e))); \o = (\dFour * exp(1)^\d - 2 * \dFour * exp(2 * \d) + 2 * \dOne * exp(4 * \d) - \dOne * exp(5 * \d) - \dOne * exp(\e) + 2 * \dOne * exp(\d + \e) - 2 * \dOne * exp(3 * \d + \e) + \dOne * exp(4 * \d + \e))/((-1 + exp(\d))^3 * (1 + exp(\d)) * (-exp(\d) + exp(\e))); \uv = (2 * (-\dOne + \dFour) * exp(\d))/((1 + exp(\d)) * (-exp(\d) + exp(\e))) ; \vv = (\dFour * exp(\d) - \dOne * exp(\e))/(exp(\d) - exp(\e)) ; \mThree = -(12 * (-\dOne + \dFour) * exp(4 * \d + \e))/((-1 + exp(\d))^3 * (1 + exp(\d)) * (exp(\d) - exp(\e))); \q = - (2 * (-\dOne + \dFour) * exp(3 * \d + \e) * (-3 + exp(\d)))/((-1 + exp(\d))^3 * (1 + exp(\d)) * (exp(\d) - exp(\e))); \s = (-\dFour * exp(\d) + 2 * \dFour * exp(2 * \d) - 2 * \dFour * exp(4 * \d) + \dFour * exp(5 * \d) + \dFour * exp(\e) - 2 * \dFour * exp(\d + \e) + 2 * \dOne * exp(3 * \d + \e) - \dOne * exp(4 * \d + \e))/((-1 + exp(\d))^3 * (1 + exp(\d)) * (exp(\d) - exp(\e))); \uw = -2 * (\dFour - \vFive) / (-1+exp(\g)); \vw = (2 * exp(\g) * \dFour - \vFive - exp(\g) * \vFive) / (-1+exp(\g)); \jj = - 12 * (-\dFour + \vFive) / (-1+exp(\g))^4; \kk = 2 * (-1 + 3 * exp(\g)) * (-\dFour + \vFive) / (-1+exp(\g))^4; \la = ( -2 * exp(\g) * \dFour + 6 * exp(2 * \g) * \dFour - 4 * exp(3 * \g) * \dFour + exp(4 * \g) * \dFour + \vFive - 2 * exp(\g) * \vFive ) / (-1+exp(\g))^4; function h2 (\r) { return {\o + \n * \r + 1/2 * exp(\d) * \mTwo * \r^2 + 1/6 * (-1 - exp(\d)) * \mTwo * \r^3 + (\mTwo * \r^4)/12}; }; function dh2 (\r) { return {\n + 1/6 * \mTwo * \r * (-3 * exp(\d) * (-2 + \r) + \r * (-3 + 2 * \r))}; }; function h3 (\r) { return {\s + \q * \r - (1/6) * exp(-\d) * \mThree * (-3 + \r) * \r^2 + 1/12 * \mThree * (-2 + \r) * \r^3}; }; function dh3 (\r) { return {\q + (1/6) * exp(-\d) * \mThree * \r * (6 - 3 * (1 + exp(\d)) * \r + 2 * exp(\d) * \r^2) }; }; function h5 (\r) { return {\la + \kk * \r + 1/2 * exp(\g) * \jj * \r^2 + 1/6 * (-1 - exp(\g)) * \jj * \r^3 + 1/12 * \jj * \r^4 }; }; function dh5 (\r) { return {\kk + 1/6 * \jj * \r * (-3 * exp(\g) * (-2 + \r) + \r * (-3 + 2 * \r))}; }; function hsv (\r) { return {\uv * \r + \vv}; }; function hsw (\r) { return {\uw * \r + \vw}; }; function H2 (\r) { return {h2 (exp(\r))}; }; function H3 (\r) { return {h3 (exp(\r))}; }; function H5 (\r) { return {h5 (exp(\r))}; }; function Hsv (\r) { return {hsv(exp(\r))}; }; function Hsw (\r) { return {hsw(exp(\r))}; }; function a2 (\r) { return { exp(\r) * dh2(exp(\r)) - H2(\r) }; }; function a3 (\r) { return { exp(\r) * dh3(exp(\r)) - H3(\r) }; }; function a5 (\r) { return { exp(\r) * dh5(exp(\r)) - H5(\r) }; }; \i = ln((\a-\vw)/\uw) - \g; \test = -\uw + exp(-\g) * (\uv * exp(\e-\d) + \vv); } \draw[Hamiltonian, domain = 0 :\c ] plot (\x, {\dOne}); \draw[Hamiltonian, domain = \c :\c+\d ] plot (\x, {H2(\x - \c)}); \draw[Hamiltonian, domain = \c+\d :\c+\e-\d ] plot (\x, {Hsv(\x - \c)}); \draw[Hamiltonian, domain = \c+\e-\d :\c+\e ] plot (\x, {H3(\x - \c - \e)}); \draw[Hamiltonian, domain = \c+\e :\c+\e+\f ] plot (\x, {\dFour}); \draw[Hamiltonian, domain = \c+\e+\f :\c+\e+\f+\g ] plot (\x, {H5(\x - \c - \e - \f)}); \draw[Hamiltonian, domain = \c+\e+\f+\g:\c+\e+\f+\g+\i] plot (\x, {Hsw(\x - \c - \e - \f)}) node[right] {$\overline{H}$}; \draw[action, domain = 0 :\c ] plot (\x, {-\dOne}); \draw[action, domain = \c :\c+\d ] plot (\x, {a2(\x - \c)}); \draw[action, domain = \c+\d :\c+\e-\d ] plot (\x, {-\vv}); \draw[action, domain = \c+\e-\d :\c+\e ] plot (\x, {a3(\x - \c - \e)}); \draw[action, domain = \c+\e :\c+\e+\f ] plot (\x, {-\dFour}); \draw[action, domain = \c+\e+\f :\c+\e+\f+\g ] plot (\x, {a5(\x - \c - \e - \f)}); \draw[action, domain = \c+\e+\f+\g:\c+\e+\f+\g+\i] plot (\x, {-\vw}) node[right] {$\mathcal{A}_{\overline{H}}$}; \draw[help lines] (0,\h) node[left] {$+\varepsilon$} -- (\c+\e+\f+\g+\i,\h); \draw[help lines] (0,-\h) node[left] {$-\varepsilon$} -- (\c+\e+\f+\g+\i,-\h); \draw[help lines] (\c+\d,-\b) node[below, axisv] {$\delta_V$} -- (\c+\d,\a); \draw[help lines] (\c+\e-\d,-\b) node[below, axisv] {$R-\delta_V\hspace{1.5em}$} -- (\c+\e-\d,\a); \draw[help lines] (\c+\e,-\b) node[below, axisv] {$\hspace{0.5em}R$} -- (\c+\e,\a); \draw[help lines] (\c+\e+\f+\g,-\b) node[below, axisw] {$\delta_W$} -- (\c+\e+\f+\g,\a); \draw[->] (0,-\b) -- (0,\a) node[above] {$\R$}; \draw (0,0) -- (\c,0); \draw[->, axisw] (\c+\e+\f,0) -- (\c+\e+\f+\g+\i,0); \draw[->, axisw] (\c+\e+\f,-\b) node[below] {$0$} -- (\c+\e+\f,\a) node[above] {$\R$}; \draw[->, axisv] (\c,0) -- (\c+\e+\f,0); \draw[->, axisv] (\c,-\b) node[below] {$0$} -- (\c,\a) node[above] {$\R$}; \draw (\c/2,\a) node[below] {$\mathrm{I}$}; \draw (\c+\d/2,\a) node[below] {$\mathrm{II}$}; \draw (\c+\e/2,\a) node[below] {$\mathrm{S}_{{V}}$}; \draw (\c+\e-\d/2,\a) node[below] {$\mathrm{III}$}; \draw (\c+\e+\f/2,\a) node[below] {$\mathrm{IV}$}; \draw (\c+\e+\f+\g/2,\a) node[below] {$\mathrm{V}$}; \draw (\c+\e+\f+\g+1,\a) node[below] {$\mathrm{S}_{{W}}$}; \draw[help lines, decoration = {brace, mirror, raise=5pt}, decorate] (0,-\b-.75) -- node[below=6pt] {\scriptsize $V$} (\c - \sml,-\b-.75); \draw[help lines, decoration = {brace, mirror, raise=5pt}, decorate] (\c+\sml,-\b-.75) -- node[below=6pt] {\scriptsize $[0,R] \times \del V$} (\c + \e - \sml,-\b-.75); \draw[help lines, decoration = {brace, mirror, raise=5pt}, decorate] (\c+\e+\sml,-\b-.75) -- node[below=6pt] {\scriptsize ${W \setminus \hat{\varphi} (V \union [0,R] \times \del V)}$} (\c + \e + \f - \sml,-\b-.75); \draw[help lines, decoration = {brace, mirror, raise=5pt}, decorate] (\c+\e+\f+\sml,-\b-.75) -- node[below=6pt] {\scriptsize $\R_{\geq 0} \times \del W$} (\c+\e+\f+\g+\i,-\b-.75); \end{tikzpicture} \caption{Action of a $1$-periodic orbit of $\overline{H}$} \label{fig:action stair} \end{figure} \begin{definition} \phantomsection\label{def:stair acs} A \textbf{stair} parametrized almost complex structure is a map $\overline{J} \colon S^1 \times S^{2N+1} \times \hat{W} \longrightarrow \End(T \hat{W})$ satisfying the conditions in \cref{def:stair acs 1,def:stair acs 2,def:stair acs 3,def:stair acs 4} below. We denote the set of such $\overline{J}$ by $\mathcal{J}(W,V,N)$. \begin{enumerate} \item \label{def:stair acs 1} $\overline{J}$ is $S^1$-invariant. \item \label{def:stair acs 2} $\overline{J}$ is $\hat{\omega}$-compatible. \item \label{def:stair acs 3} $\overline{J}$ is cylindrical on $S^1 \times S^{2N+1} \times [0, \delta] \times \del V$ and on $S^1 \times S^{2N+1} \times \R_{\geq 0} \times \del W$. \item \label{def:stair acs 4} $(\tilde{\iota}_0^{N,N-1})^* \overline{J} = (\tilde{\iota}_1^{N,N-1})^* \overline{J}$. \end{enumerate} \end{definition} \begin{definition} Define sets \begin{IEEEeqnarray*}{rCls+x*} \stair{W,V} & \coloneqq & \left\{ (\overline{H}, \overline{J}) \ \middle\vert \begin{array}{l} \overline{H} \in \mathcal{H}(W,V,N) \text{ and } \overline{J} \in \mathcal{J}(W,V,N) \text{ for some }N, \\ (\overline{H}, \overline{J}) \text{ is regular} \end{array} \right\}, \\ \admstair{W,V} & \coloneqq & \left\{ (H,J,\overline{H}, \overline{J}) \ \middle\vert \begin{array}{l} H \in \mathcal{H}(W,N), J \in \mathcal{J}(W,N), \\ \overline{H} \in \mathcal{H}(W,V,N) \text{ and } \overline{J} \in \mathcal{J}(W,V,N) \text{ for some }N, \\ H \leq \overline{H}, \text{ and } (H,J) \text{ and } (\overline{H}, \overline{J}) \text{ are regular} \end{array} \right\}. \end{IEEEeqnarray*} Define preorders on $\stair{W,V}$ and $\admstair{W,V}$ by \begin{IEEEeqnarray*}{rCls+x*} (\overline{H}^+,\overline{J}^+) \leq (\overline{H}^-,\overline{J}^-) & \mathrel{\mathop:}\Longleftrightarrow & \left\{ \begin{array}{l} N^+ \leq N^-, \\ \overline{H}^+ \leq (\inc_0 ^{N^-,N^+})^* \overline{H}^-, \end{array} \right. \\ (H^+,J^+,\overline{H}^+,\overline{J}^+) \leq (H^-,J^-,\overline{H}^-,\overline{J}^-) & \mathrel{\mathop:}\Longleftrightarrow & \left\{ \begin{array}{l} N^+ \leq N^-, \\ H^+ \leq (\inc_0 ^{N^-,N^+})^* H^-, \\ \overline{H}^+ \leq (\inc_0 ^{N^-,N^+})^* \overline{H}^-. \end{array} \right. \end{IEEEeqnarray*} \end{definition} \begin{definition} Define a function $\pi^{\mathcal{H}}_{W,V,N} \colon \mathcal{H}(W,V,N) \longrightarrow \mathcal{H}(V,N)$ by $\pi_{W,V,N}^{\mathcal{H}}(\overline{H}) = \overline{H}_V$, where \begin{IEEEeqnarray*}{c+x*} \overline{H}_V(t,z,x) \coloneqq \begin{cases} \overline{H}(t,z,\hat{\varphi}(x)) & \text{if } x \in V \union ([0,R] \times \del V), \\ C_V e^r + D_V & \text{if } x = (r,y) \in [R, +\infty) \times \del V. \end{cases} \end{IEEEeqnarray*} Define a function $\pi^{\mathcal{J}}_{W,V,N} \colon \mathcal{J}(W,V,N) \longrightarrow \mathcal{J}(V,N)$ by $\pi_{W,V,N}^{\mathcal{J}}(\overline{J}) = \overline{J}_V$, where \begin{IEEEeqnarray*}{c+x*} \overline{J}_V(t,z,x) \coloneqq \begin{cases} \dv \hat{\varphi}^{-1}(\hat{\varphi}(x)) \circ \overline{J}(t,z,\hat{\varphi}(x)) \circ \dv \hat{\varphi}(x) & \text{if } x \in V \union ([0,R] \times \del V), \\ \dv \hat{\varphi}^{-1}(\hat{\varphi}(0,y)) \circ \overline{J}(t,z,\hat{\varphi}(0,y)) \circ \dv \hat{\varphi}(0,y) & \text{if } x = (r,y) \in [0, +\infty) \times \del V. \end{cases} \end{IEEEeqnarray*} \end{definition} \begin{definition} Define the functors \begin{IEEEeqnarray*}{rrClCl} \pi_W \colon & \admstair{W,V} & \longrightarrow & \admissible{W}, & \text{ given by } & \pi_W(H,J,\overline{H},\overline{J}) \coloneqq (H,J), \\ \pi_{W,V} \colon & \admstair{W,V} & \longrightarrow & \stair{W,V}, & \text{ given by } & \pi_W(H,J,\overline{H},\overline{J}) \coloneqq (\overline{H}, \overline{J}), \\ \pi_{W,V}^{\mathcal{H} \times \mathcal{J}} \colon & \stair{W,V} & \longrightarrow & \admissible{V}, & \text{ given by } & \pi_{W,V}^{\mathcal{H} \times \mathcal{J}}(\overline{H},\overline{J}) \coloneqq (\pi^{\mathcal{H}}_{W,V,N}(\overline{H}),\pi^{\mathcal{J}}_{W,V,N}(\overline{J})) = (\overline{H}_V, \overline{J}_V), \end{IEEEeqnarray*} for $(\overline{H}, \overline{J}) \in \mathcal{H}(W,V,N) \times \mathcal{J}(W,V,N)$. Let $\pi_V^{} \coloneqq \pi_{W,V}^{\mathcal{H} \times \mathcal{J}} \circ \pi_{W,V}^{} \colon \admstair{W,V}^{} \longrightarrow \admissible{V}^{}$. \end{definition} \begin{definition} \phantomsection\label{def:homotopy stair to admissible hamiltonian} Let $H^+ \in \mathcal{H}(W,N^+)$ be an admissible parametrized Hamiltonian and $H^- \in \mathcal{H}(W,V,N^-)$ be a stair parametrized Hamiltonian. Assume that $N^+ \leq N^-$ and $(\tilde{i}_0^{N^-,N^+}) H^+ \leq H^-$. An \textbf{admissible} homotopy of parametrized Hamiltonians from $H^-$ to $H^+$ is a map $H \colon \R \times S^1 \times S^{2 N^+ + 1} \times \hat{W} \longrightarrow \R$ which satisfies the conditions in \cref{item:homotopy stair to admissible hamiltonian 1,item:homotopy stair to admissible hamiltonian 2,item:homotopy stair to admissible hamiltonian 3} for some $s_0 > 0$, where $H_s(t,z,x) = H(s,t,z,x)$. We denote the set of such $H$ by $\mathcal{H}(H^+,H^-)$. \begin{enumerate} \item \label{item:homotopy stair to admissible hamiltonian 1} For every $s \in (-s_0, s_0)$, we have that $H_s$ satisfies all the conditions in \cref{def:stair hamiltonians} with the exceptions that $C_{W,s}$ and $C_{V,s}$ are possibly in $\operatorname{Spec}(\del W, \lambda_W|_{\del W}) \union \operatorname{Spec}(\del V, \lambda_V|_{\del V})$ and $H_{s,z}$ is not necessarily nondegenerate for $z \in \critpt \tilde{f}_{N^+}$. \item \label{item:homotopy stair to admissible hamiltonian 2} For every $s$, if $\pm s \geq s_0$ then $H_s = (\tilde{i}_0^{N^\pm, N^+})^* H^\pm$. \item \label{item:homotopy stair to admissible hamiltonian 3} For every $(s,t,z,x) \in \R \times S^1 \times S^{2 N^+ + 1} \times \hat{W}$ we have $\del_s H(s,t,x,z) \leq 0$. \end{enumerate} \end{definition} \begin{remark} In \cref{def:homotopy stair to admissible hamiltonian}, the parameters of $H_s$ depend on $s$. In particular, the ``constant'' value that $H_s$ takes in regions $\rmn{1}$ and $\rmn{4}$ is dependent on $s$. However, the parameter $R$ does not depend on $s$. \end{remark} \begin{definition} \label{def:homotopy stair to admissible acs} Let $J^+ \in \mathcal{J}(W,N^+)$ be an admissible parametrized almost complex structure and $J^- \in \mathcal{J}(W,V,N^-)$ be a stair parametrized almost complex structure. An \textbf{admissible} homotopy of parametrized almost complex structures from $J^-$ to $J^+$ is a map $J \colon \R \times S^1 \times S^{2 N^+ + 1} \times \hat{W} \longrightarrow \End(T \hat{W})$ which satisfies the conditions in \cref{item:homotopy stair to admissible acs 1,item:homotopy stair to admissible acs 2} for some $s_0 > 0$, where $J_s(t,z,x) = J(s,t,z,x)$. We denote the set of such $J$ by $\mathcal{J}(J^+,J^-)$. \begin{enumerate} \item \label{item:homotopy stair to admissible acs 1} For every $s \in (-s_0, s_0)$, we have that $J_s$ satisfies all the conditions in \cref{def:stair acs}. \item \label{item:homotopy stair to admissible acs 2} For every $s$, if $\pm s \geq s_0$ then $J_s = (\tilde{i}_0^{N^\pm, N^+})^* J^\pm$. \end{enumerate} \end{definition} \begin{remark} \label{rmk:floer complex wrt stair} Let $(H,J,\overline{H},\overline{J}) \in \admstair{W,V}$ and consider $\pi_W(K) = (H,J) \in \admissible{W}$ and $\pi_{W,V}(K) = (\overline{H},\overline{J}) \in \stair{W,V}$. In \cref{sec:Floer homology} we defined $\homology{}{S^1}{}{F}{C}{}{}(W,H,J)$, the Floer chain complex of $W$ with respect to the auxiliary data $(H,J)$, for every $(H,J) \in \admissible{W}$. Despite the fact that $(\overline{H}, \overline{J})$ is not an element of $\admissible{W}$, the Floer Chain complex $\homology{}{S^1}{}{F}{C}{}{}(W,\overline{H}, \overline{J})$ of $W$ with respect to the auxiliary data $(\overline{H}, \overline{J})$ is well-defined. More precisely, it is possible to replicate the results of \cref{sec:Floer homology} but with the category $\stair{W,V}$ instead of $\admissible{W}$. Then, we can define a functor \begin{IEEEeqnarray*}{rrCl} \homology{\mathrm{I-V}}{S^1}{W}{F}{C}{}{} \colon & \stair{W,V} & \longrightarrow & \comp \\ & (\overline{H}, \overline{J}) & \longmapsto & \homology{\mathrm{I-V}}{S^1}{W}{F}{C}{}{}(\overline{H},\overline{J}) \coloneqq \homology{}{S^1}{}{F}{C}{}{}(W,\overline{H}, \overline{J}). \end{IEEEeqnarray*} For every $(H^+, J^+, H^-, J^-) \in \admstair{W,V}$, we have that $H^+ \leq {H}^-$, and therefore we can define a continuation map $\phi^{-,+} \colon \homology{}{S^1}{}{F}{C}{}{}(W,H^+,J^+) \longrightarrow \homology{}{S^1}{}{F}{C}{}{}(W,H^-,J^-)$ which is given by counting solutions of the Floer equation with respect to $H \in \mathcal{H}(H^+,H^-)$ and $J \in \mathcal{J}(J^+,J^-)$. These continuation maps assemble into a natural transformation \begin{IEEEeqnarray*}{c+x*} \phi \colon \homology{}{S^1}{W}{F}{C}{}{} \circ \pi_W^{} \longrightarrow \homology{\mathrm{I-V}}{S^1}{W}{F}{C}{}{} \circ \pi_{W,V}^{}. \end{IEEEeqnarray*} \end{remark} \begin{definition} \label{def:subcomplex} We define a functor $\homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{} \colon \stair{W,V}^{} \longrightarrow \comp$ as follows. If $(\overline{H},\overline{J}) \in \stair{W,V}$, then the module $\homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{}(\overline{H}, \overline{J}) \coloneqq \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J})$ is the submodule of $\homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J})$ which is generated by (equivalence classes of) $1$-periodic orbits $[z, \gamma]$ of $\overline{H}$ such that $\img \gamma$ is in region $\rmn{3}$, $\rmn{4}$ or $\rmn{5}$. The maps \begin{IEEEeqnarray*}{rrCl} \del \colon & \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}) & \longrightarrow & \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}), \\ U \colon & \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}) & \longrightarrow & \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}), \\ \phi^{-,+} \colon & \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H}^+,\overline{J}^+) & \longrightarrow & \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H}^-,\overline{J}^-). \end{IEEEeqnarray*} are the restrictions (see \cref{lem:maps restrict to subcomplex}) of the maps \begin{IEEEeqnarray*}{rrCl} \del \colon & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}) & \longrightarrow & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}), \\ U \colon & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}) & \longrightarrow & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}), \\ \phi^{-,+} \colon & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H}^+,\overline{J}^+) & \longrightarrow & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H}^-,\overline{J}^-), \end{IEEEeqnarray*} This completes the definition of $\homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{}$. Since $\homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J})$ is a subcomplex of $\homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J})$, we have an inclusion natural transformation $\iota \colon \homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{} \longrightarrow \homology{\mathrm{I-V}}{S^1}{W}{F}{C}{}{}$. \end{definition} \begin{lemma} \label{lem:maps restrict to subcomplex} In \cref{def:subcomplex}, the maps $\del, U$ and $\phi^{-,+}$ restrict to maps on $\homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{}$. \end{lemma} \begin{proof} To show that $U$ restricts to a map on $\homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{}$, we simply note that by definition $U$ affects only $z$ and not $\gamma$. We show that $\del$ restricts to a map on $\homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{}$. For this, let $[z^{\pm}, \gamma^{\pm}] \in \mathcal{P}(\overline{H})$ be such that $\img \gamma^+$ is in region $\rmn{3}$, $\rmn{4}$ or $\rmn{5}$ and assume that there exists a Floer trajectory from $[z^+, \gamma^+]$ to $[z^-, \gamma^-]$ with respect to $(\overline{H}, \overline{J})$. We need to show that $\img \gamma^-$ is in region $\rmn{3}$, $\rmn{4}$ or $\rmn{5}$. Assume by contradiction that $\img \gamma^-$ is in region $\rmn{1}$ or $\rmn{2}$. In the case where $\img \gamma^+$ is in region $\rmn{4}$ or $\rmn{5}$, the computation \begin{IEEEeqnarray*}{rCls+x*} 0 & < & \mathcal{A}_{\overline{H}}(z^-,\gamma^-) & \quad [\text{by \cref{lem:action stair}}] \\ & \leq & \mathcal{A}_{\overline{H}}(z^+,\gamma^+) & \quad [\text{by \cref{lem:action energy for floer trajectories}}] \\ & < & 0 & \quad [\text{by \cref{lem:action stair}}] \end{IEEEeqnarray*} gives a contradiction. It remains to derive a contradiction in the case where $\img \gamma^+$ is in region $\rmn{3}$. By \cref{cor:hamiltonian orbits are reeb orbits}, $\gamma^+$ is (approximately) of the form $\gamma^+(t) = (r^+, \rho^+(t))$ for some Reeb orbit $\rho^+$ in $(\del V, \lambda_V|_{\del V})$. The ``no escape'' lemma (\cref{lem:no escape}) implies that the Floer trajectory is inside $\hat{\varphi}(V \union [0, r^+] \times \del V)$, while the ``asymptotic behaviour'' lemma (\cref{lem:asymptotic behaviour}) implies that the Floer trajectory must leave $\hat{\varphi}(V \union [0, r^+] \times \del V)$. This completes the proof that $\del$ restricts to a map on $\homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{}$. To show that $\phi^{-,+}$ restricts to a map on $\homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{}$, we would use a proof analogous to that of $\del$. The key difference is that now the Floer trajectory would be defined with respect to homotopies of Hamiltonians and almost complex structures. This does not affect the proof because \cref{lem:action energy for floer trajectories,lem:asymptotic behaviour,lem:no escape} also apply to homotopies. \end{proof} \begin{definition} \label{def:quotient complex} Define a functor $\homology{\mathrm{I,II}}{S^1}{W}{F}{C}{}{} \colon \stair{W,V}^{} \longrightarrow \comp$ as follows. For $(\overline{H},\overline{J}) \in \stair{W,V}$, the module $\homology{\mathrm{I,II}}{S^1}{W}{F}{C}{}{}(\overline{H}, \overline{J}) \coloneqq \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H}, \overline{J})$ is given by the quotient \begin{IEEEeqnarray*}{rCls+x*} \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}) & \coloneqq & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H}, \overline{J}) / \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}). \end{IEEEeqnarray*} For $(\overline{H}^+,\overline{J}^+) \leq (\overline{H}^{-},\overline{J}^-) \in \stair{W,V}$, the continuation map $\phi^{-,+} \colon \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H}^+,\overline{J}^+) \longrightarrow \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H}^-,\overline{J}^-)$ is the unique map such that the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H}^+,\overline{J}^+) \ar[r, hookrightarrow, "\iota^{+}"] \ar[d, swap, "\phi^{-,+}"] & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H}^+,\overline{J}^+) \ar[d, "\phi^{-,+}"] \ar[r, two heads, "\pi^{+}"] & \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H}^+,\overline{J}^+) \ar[d, dashed, "\exists ! \phi^{-,+}"]\\ \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H}^-,\overline{J}^-) \ar[r, hookrightarrow, swap, "\iota^{-}"] & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H}^-,\overline{J}^-) \ar[r, two heads, swap, "\pi^{-}"] & \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H}^-,\overline{J}^-) \end{tikzcd} \end{IEEEeqnarray*} commutes. There is a projection natural transformation $\pi \colon \homology{\mathrm{I-V}}{S^1}{W}{F}{C}{}{} \longrightarrow \homology{\mathrm{I,II}}{S^1}{W}{F}{C}{}{}$. \end{definition} \begin{definition} \label{def:v with respect to stair nt} We define a natural transformation $\eta \colon \homology{}{S^1}{V}{F}{C}{}{} \circ \pi^{\mathcal{H} \times \mathcal{J}}_{W,V} \longrightarrow \homology{\mathrm{I,II}}{S^1}{W}{F}{C}{}{}$ as follows. For $(\overline{H},\overline{J}) \in \stair{W,V}$, the map $\eta^{\overline{H},\overline{J}} \colon \homology{}{S^1}{}{F}{C}{}{}(V,\overline{H}_V, \overline{J}_V) \longrightarrow \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H}, \overline{J})$ is given by $\eta^{\overline{H},\overline{J}}([z,\gamma]) \coloneqq [z, \hat{\varphi} \circ \gamma]$. \end{definition} \begin{lemma} \cref{def:v with respect to stair nt} is well posed, i.e.: \begin{enumerate} \item \label{lem:v with respect to stair nt 1} $\eta^{\overline{H},\overline{J}}$ is well-defined and it is a morphism of filtered modules. \item \label{lem:v with respect to stair nt 2} $\eta^{\overline{H},\overline{J}}$ commutes with the $U$ map. \item \label{lem:v with respect to stair nt 3} $\eta^{\overline{H},\overline{J}}$ is a chain map. \item \label{lem:v with respect to stair nt 4} The maps $\eta^{\overline{H},\overline{J}}$ assemble into a natural transformation. \end{enumerate} \end{lemma} \begin{proof} \ref{lem:v with respect to stair nt 1}: Since $\hat{\varphi}$ is a Liouville embedding, if $[z,\gamma] \in \mathcal{P}(\overline{H}_V)$ then $[z,\hat{\varphi} \circ \gamma] \in \mathcal{P}(\overline{H})$ and $\mathcal{A}_{\overline{H}}(z,\hat{\varphi} \circ \gamma) = \mathcal{A}_{\overline{H}_V}(z,\gamma)$. \ref{lem:v with respect to stair nt 2}: We need to show that $U^{}_W \circ \eta^{\overline{H},\overline{J}}([z,\gamma]) = \eta^{\overline{H},\overline{J}} \circ U ^{}_V ([z,\gamma])$, for $[z,\gamma] \in \mathcal{P}(\overline{H}_V)$. If $\morse(z) = 0$, then both sides of the equation are $0$. If $\morse(z) > 0$, then \begin{IEEEeqnarray*}{rCls+x*} U^{}_W \circ \eta^{\overline{H},\overline{J}}([z,\gamma]) & = & U^{}_W ([z,\hat{\varphi} \circ \gamma]) & \quad [\text{by definition of $\eta$}] \\ & = & [\shf(z),\hat{\varphi} \circ \gamma] & \quad [\text{by definition of $U$}] \\ & = & \eta^{\overline{H},\overline{J}} [\shf(z),\gamma] & \quad [\text{by definition of $\eta$}] \\ & = & \eta^{\overline{H},\overline{J}} \circ U ^{}_V ([z,\gamma]) & \quad [\text{by definition of $U$}]. \end{IEEEeqnarray*} \ref{lem:v with respect to stair nt 3}: We need to show that $\eta^{\overline{H},\overline{J}} \circ \del ^{}_V([z^+,\gamma^+]) = \del ^{}_W \circ \eta^{\overline{H},\overline{J}}([z^+,\gamma^+])$, for every $[z^+,\gamma^+] \in \mathcal{P}(\overline{H}_V)$. By the ``no escape'' lemma (\cref{lem:no escape}), if $[z^-,\gamma^-] \in \mathcal{P}(\overline{H}_V)$ then the map \begin{IEEEeqnarray*}{rrCl} & \mathcal{M}_{\vphantom{0}}(\overline{H}_V,\overline{J}_V,[z^+,\gamma^+],[z^-,\gamma^-]) & \longrightarrow & \mathcal{M}_{\vphantom{0}}(\overline{H},\overline{J},[z^+,\hat{\varphi} \circ \gamma^+],[z^-,\hat{\varphi} \circ \gamma^-]) \\ & [w,u] & \longmapsto & [w,\hat{\varphi} \circ u] \end{IEEEeqnarray*} is an orientation preserving diffeomorphism. Then, we compute \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\eta^{\overline{H},\overline{J}} \circ \del ^{}_V([z^+,\gamma^+])}\\ \quad & = & \sum_{[z^-,\gamma^-] \in \mathcal{P}(\overline{H}_V) } \# \mathcal{M}_{\vphantom{0}}(\overline{H}_V, \overline{J}_V, [z^+,\gamma^+] , [z^-,\gamma^-] ) \cdot \eta^{\overline{H},\overline{J}} ([z^-,\gamma^-]) \\ \quad & = & \sum_{[z^-,\gamma^-] \in \mathcal{P}(\overline{H}_V) } \# \mathcal{M}_{\vphantom{0}}(\overline{H}_V, \overline{J}_V, [z^+,\gamma^+] , [z^-,\gamma^-] ) \cdot [z^-,\hat{\varphi} \circ \gamma^-] \\ \quad & = & \sum_{[z^-,\gamma^-] \in \mathcal{P}(\overline{H}_V) } \# \mathcal{M}_{\vphantom{0}}(\overline{H} , \overline{J} , [z^+,\hat{\varphi} \circ \gamma^+], [z^-,\hat{\varphi} \circ \gamma^-]) \cdot [z^-,\hat{\varphi} \circ \gamma^-] \\ \quad & = & \sum_{[z^-,\gamma^-_W] \in \mathcal{P}^{\mathrm{I,II}}(\overline{H})} \# \mathcal{M}_{\vphantom{0}}(\overline{H} , \overline{J} , [z^-,\gamma^-_W] , [z^+,\gamma^+_W]) \cdot [z^-,\gamma^-_W] \\ \quad & = & \sum_{[z^-,\gamma^-_W] \in \mathcal{P}(\overline{H}) } \# \mathcal{M}_{\vphantom{0}}(\overline{H} , \overline{J} , [z^-,\gamma^-_W] , [z^+,\gamma^+_W]) \cdot [z^-,\gamma^-_W] \\ \quad & = & \del ^{}_W ([z^+,\hat{\varphi} \circ \gamma^+]) \\ \quad & = & \del ^{}_W \circ \eta^{\overline{H},\overline{J}}([z^+,\gamma^+]). \end{IEEEeqnarray*} In this computation, in the third equality we used the orientation preserving diffeomorphism defined above, in the fourth equality we performed the variable change $[z^-,\gamma^-_W] \coloneqq [z^-,\hat{\varphi} \circ \gamma^-] \in \mathcal{P}^{\mathrm{I,II}}(\overline{H})$ and in the fifth equality we used the fact that if $[z^-,\gamma^-_W] \in \mathcal{P}^{\mathrm{III,IV,V}}(\overline{H})$ then $[z^-,\gamma^-_W] = 0$ as an element of $\homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J})$. \ref{lem:v with respect to stair nt 4}: This proof is analogous to that of \ref{lem:v with respect to stair nt 3}. \end{proof} \begin{proposition} The map $\eta \colon \homology{}{S^1}{V}{F}{C}{}{} \circ \pi^{\mathcal{H} \times \mathcal{J}}_{W,V} \longrightarrow \homology{\mathrm{I,II}}{S^1}{W}{F}{C}{}{}$ is a natural isomorphism. \end{proposition} \begin{proof} It suffices to show that $\eta^{\overline{H},\overline{J}} \colon \homology{}{S^1}{}{F}{C}{}{}(V,\overline{H}_V,\overline{J}_V) \longrightarrow \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J})$ admits an inverse as a map of $\Q$-modules. Define $\nu^{\overline{H},\overline{J}} \colon \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}) \longrightarrow \homology{}{S^1}{}{F}{C}{}{}(V,\overline{H}_V,\overline{J}_V)$ by \begin{IEEEeqnarray*}{c+x*} \nu^{\overline{H},\overline{J}}([z,\gamma]) = \begin{cases} [z,\hat{\varphi}^{-1} \circ \gamma] & \text{if } [z,\gamma] \in \mathcal{P}^{\mathrm{I,II}}(\overline{H}), \\ 0 & \text{if } [z,\gamma] \in \mathcal{P}^{\mathrm{III,IV,V}}(\overline{H}). \end{cases} \end{IEEEeqnarray*} Then, by the universal property of the quotient of $\Q$-modules, $\nu^{\overline{H},\overline{J}}$ descends to a map $\nu^{\overline{H},\overline{J}} \colon \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}) \longrightarrow \homology{}{S^1}{}{F}{C}{}{}(V,\overline{H}_V,\overline{J}_V)$, which is the inverse of $\eta^{\overline{H},\overline{J}}$. \end{proof} \begin{definition} \label{def:viterbo transfer map} The \textbf{Viterbo transfer map}, $\varphi_! \colon \homology{}{S^1}{}{S}{H}{}{}(W, \lambda_W) \longrightarrow \homology{}{S^1}{}{S}{H}{}{}(V, \lambda_V)$, is given as follows. Consider the following diagram in the category of functors from $\admstair{W,V}$ to $\comp$: \begin{IEEEeqnarray}{c+x*} \plabel{eq:viterbo transfer map diagram} \begin{tikzcd} \homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{} \circ \pi_{W,V}^{} \ar[r, hook, "\iota \circ \pi_{W,V}"] & \homology{\mathrm{I-V}}{S^1}{W}{F}{C}{}{} \circ \pi_{W,V}^{} \ar[r, hook, "\pi \circ \pi_{W,V}"] & \homology{\mathrm{I,II}}{S^1}{W}{F}{C}{}{} \circ \pi_{W,V}^{} \\ & \homology{}{S^1}{W}{F}{C}{}{} \circ \pi_{W}^{} \ar[u, "\phi"] \ar[r, dashed, swap, "\exists ! \varphi"] & \homology{}{S^1}{V}{F}{C}{}{} \circ \pi_{V}^{} \ar[u, swap, two heads, hook, "\eta \circ \pi_{W,V}"] \end{tikzcd} \end{IEEEeqnarray} Passing to homology, we get a natural transformation $H \varphi \colon \homology{}{S^1}{W}{F}{H}{}{} \circ \pi_{W}^{} \longrightarrow \homology{}{S^1}{V}{F}{H}{}{} \circ \pi_{V}^{}$. Then, $\varphi_!$ is the unique map such that the following diagram commutes: \begin{IEEEeqnarray}{c+x*} \plabel{eq:viterbo transfer map} \begin{tikzcd} \homology{}{S^1}{W}{F}{H}{}{} \circ \pi_W^{} \ar[d, "H \varphi"] \ar[r] & \colim \homology{}{S^1}{W}{F}{H}{}{} \circ \pi_W^{} \ar[r, equal] \ar[d, dashed, "\exists ! \varphi_! = \colim H \varphi"] & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) \ar[d, dashed, "\exists ! \varphi_!"] \\ \homology{}{S^1}{V}{F}{H}{}{} \circ \pi_V^{} \ar[r] & \colim \homology{}{S^1}{V}{F}{H}{}{} \circ \pi_V^{} \ar[r, equal] & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V) \end{tikzcd} \end{IEEEeqnarray} We define the \textbf{Viterbo transfer map} on positive $S^1$-equivariant symplectic homology by declaring it to be the unique map such that the following diagram commutes: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{\varepsilon}{}(W,\lambda_W) \ar[r] \ar[d, swap, "\varphi^\varepsilon_!"] & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) \ar[r] \ar[d, "\varphi_!"] & \homology{}{S^1}{}{S}{H}{+}{}(W,\lambda_W) \ar[d, dashed, "\exists ! \varphi^+_!"] \\ \homology{}{S^1}{}{S}{H}{\varepsilon}{}(W,\lambda_W) \ar[r] & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) \ar[r] & \homology{}{S^1}{}{S}{H}{+}{}(W,\lambda_W) \end{tikzcd} \end{IEEEeqnarray*} \end{definition} \begin{remark} \label{rmk:viterbo transfer map def} We have the following observations about \cref{def:viterbo transfer map}. \begin{enumerate} \item In diagram \eqref{eq:viterbo transfer map}, we view $\colim \homology{}{S^1}{W}{F}{H}{}{} \circ \pi_W$ and $\colim \homology{}{S^1}{V}{F}{H}{}{} \circ \pi_V$ as constant functors, and we view $\varphi_! \colon \colim \homology{}{S^1}{W}{F}{H}{}{} \circ \pi_W \longrightarrow \colim \homology{}{S^1}{V}{F}{H}{}{} \circ \pi_V$ as a constant natural transformation, which is just a map. Existence and uniqueness of $\varphi$ comes from the universal property of colimits. \item Since $\pi_W ( \admstair{W,V} )$ is a cofinal subset of $\admissible{W}$, we have $\homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) = \colim \homology{}{S^1}{W}{F}{H}{}{} = \colim \homology{}{S^1}{W}{F}{H}{}{} \circ \pi_W$, and analogously for $V$. \item We are also using the fact that \begin{IEEEeqnarray*}{rCls+x*} \homology{}{S^1}{}{S}{H}{+}{}(W,\lambda_W) & = & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) / \homology{}{S^1}{}{S}{H}{\varepsilon_W}{} (W,\lambda_W) \\ & = & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) / \homology{}{S^1}{}{S}{H}{\varepsilon}{}(W,\lambda_W). \end{IEEEeqnarray*} This is true because $\homology{}{S^1}{}{S}{H}{}{}$ is obtained as a direct limit of Floer homologies for increasing Hamiltonians, and for $(H,J) \in \admissible{W}$ with $H$ big enough we have that $H$ restricted to the interior of $W$ takes values in $(-\varepsilon,0) \subset (-\varepsilon_W,0)$ (and analogously for $V$). \end{enumerate} \end{remark} Let $\liouvle$ be the category whose objects are nondegenerate Liouville domains and whose morphisms are $0$-codimensional Liouville embeddings which are either strict or diffeomorphisms. \begin{theorem}[{\cite[Theorem 3.1.16]{guttMinimalNumberPeriodic2014}}] \label{thm:sh is functor not generalized} The following are contravariant functors: \begin{IEEEeqnarray*}{rrClCrrCl} \homology{}{S^1}{}{S}{H}{}{} \colon & \liouvle & \longrightarrow & \modl & \qquad & \homology{}{S^1}{}{S}{H}{+}{} \colon & \liouvle & \longrightarrow & \modl \\ & (V,\lambda_V) & \longmapsto & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V) & \qquad & & (V,\lambda_V) & \longmapsto & \homology{}{S^1}{}{S}{H}{+}{}(V,\lambda_V) \\ & \varphi \downarrow & \longmapsto & \uparrow \varphi_! & \qquad & & \varphi \downarrow & \longmapsto & \uparrow \varphi_!^+ \\ & (W,\lambda_W) & \longmapsto & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W), & \qquad & & (W,\lambda_W) & \longmapsto & \homology{}{S^1}{}{S}{H}{+}{}(W,\lambda_W). \end{IEEEeqnarray*} \end{theorem} \section{Viterbo transfer map of a generalized Liouville embedding} \label{sec:viterbo transfer map of exact symplectic embedding} We now define the Viterbo transfer map in the case where $\varphi \colon (V,\lambda_V) \longrightarrow (W,\lambda_W)$ is a generalized Liouville embedding, i.e. $\varphi^* \edv \lambda_W = \edv \lambda_V$ and $(\varphi^* \lambda_W - \lambda_V)|_{\partial V}$ is exact. \begin{lemma}[{\cite[Lemma 7.5]{guttSymplecticCapacitiesPositive2018}}] \label{lem:exists deformed form} If $\phi \colon (V,\lambda_V) \longrightarrow (W, \lambda_W)$ is a $0$-codimensional strict generalized Liouville embedding, then there exists a $1$-form $\lambda'_W$ on $W$ such that $\edv \lambda'_W = \edv \lambda_W^{}$, $\lambda'_W = \lambda_W^{}$ near $\partial W$ and $\phi^* \lambda'_W = \lambda_V^{}$. \end{lemma} \begin{lemma} \phantomsection\label{lem:sh indep of potential} Let $(X,\lambda_X)$ and $(Y,\lambda_Y)$ be nondegenerate Liouville domains and assume that $\phi \colon (X,\lambda_X) \longrightarrow (Y, \lambda_Y)$ is a $0$-codimensional strict Liouville embedding. Suppose that $\lambda'_X \in \Omega^1(X)$ and $\lambda'_Y \in \Omega^1(Y)$ are $1$-forms such that \begin{IEEEeqnarray*}{rClCrCl} \edv \lambda'_X & = & \edv \lambda_X^{}, & \quad & \lambda'_X & = & \lambda_X^{} \text{ near } \partial X, \\ \edv \lambda'_Y & = & \edv \lambda_Y^{}, & \quad & \lambda'_Y & = & \lambda_Y^{} \text{ near } \partial Y, \\ \phi^* \lambda'_Y & = & \lambda'_X. \end{IEEEeqnarray*} Then, \begin{IEEEeqnarray*}{rClCl} \homology{}{S^1}{}{S}{H}{}{}(X,\lambda_X) & = & \homology{}{S^1}{}{S}{H}{}{}(X,\lambda'_X), \\ \homology{}{S^1}{}{S}{H}{+}{}(X,\lambda_X) & = & \homology{}{S^1}{}{S}{H}{+}{}(X,\lambda'_X), \end{IEEEeqnarray*} and the diagrams \begin{IEEEeqnarray}{c+x*} \plabel{eq:viterbo transfer map indep potential} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{}{}(Y,\lambda_Y) \ar[r, equal] \ar[d, swap, "\phi_!"] & \homology{}{S^1}{}{S}{H}{}{}(Y,\lambda'_Y) \ar[d, "\phi'_!"] \\ \homology{}{S^1}{}{S}{H}{}{}(X,\lambda_X) \ar[r, equal] & \homology{}{S^1}{}{S}{H}{}{}(X,\lambda'_X) \end{tikzcd} \quad \begin{tikzcd} \homology{}{S^1}{}{S}{H}{+}{}(Y,\lambda_Y) \ar[r, equal] \ar[d, swap, "\phi_!^+"] & \homology{}{S^1}{}{S}{H}{+}{}(Y,\lambda'_Y) \ar[d, "{\phi'}_!^+"] \\ \homology{}{S^1}{}{S}{H}{+}{}(X,\lambda_X) \ar[r, equal] & \homology{}{S^1}{}{S}{H}{+}{}(X,\lambda'_X) \end{tikzcd} \end{IEEEeqnarray} commute. \end{lemma} \begin{proof} We note that the following concepts only depend on $\edv \lambda_X$ and on $\lambda_X$ near $\del X$: the set of admissible Hamiltonians and admissible almost complex structures, the Hamiltonian vector field, action, the module which underlies the Floer complex (by all the previous statements), the Floer equation and the notion of Floer trajectories (also by the previous statements), the $U$ map, the differential and the continuation maps. All the statements follow immediately from the definitions given in \cref{sec:Floer homology}, except the fact that the action actually only depends on $\edv \lambda_X$ and on $\lambda_X|_{\partial X}$. To prove this, it is enough to show that \begin{IEEEeqnarray}{c+x*} \phantomsection\label{eq:action indep form} \int_{S^1}^{} \gamma^* (\hat{\lambda}_X^{} - \hat{\lambda}'_X) = 0. \end{IEEEeqnarray} Since $\hat{\lambda}_X^{} - \hat{\lambda}'_X$ is closed, it defines a cohomology class $[\hat{\lambda}_X^{} - \hat{\lambda}'_X] \in H^1_{\mathrm{dR}}(\hat{X})$. The orbit $\gamma$ also defines a homology class $[\gamma] \coloneqq \gamma_* [S^1] \in H_1(\hat{X};\Z)$. Equation \eqref{eq:action indep form} can be restated as \begin{IEEEeqnarray}{c+x*} \phantomsection\label{eq:action indep form topology} [\hat{\lambda}_X^{} - \hat{\lambda}'_X]([\gamma]) = 0. \end{IEEEeqnarray} If $\gamma$ is contractible, then Equation \eqref{eq:action indep form topology} holds. If $\gamma$ is noncontractible, $\gamma$ must have an associated Reeb orbit $\rho \in C^{\infty}(S^1, \partial X)$. Denote by $\iota \colon \partial X \longrightarrow \hat{X}$ the inclusion. \begin{IEEEeqnarray*}{rCls+x*} [\hat{\lambda}_X^{} - \hat{\lambda}'_X]([\gamma]) & = & [\hat{\lambda}_X^{} - \hat{\lambda}'_X](\iota_* [\rho]) & \quad [\text{since $\gamma$ and $\iota \circ \rho$ are homotopic}] \\ & = & (\iota^*[\hat{\lambda}_X^{} - \hat{\lambda}'_X])([\rho]) & \quad [\text{by definition of pullback}] \\ & = & 0 & \quad [\text{since $\lambda'_X = \lambda_X^{}$ near $\partial X$}]. \end{IEEEeqnarray*} Since the functors and natural transformations in diagram \eqref{eq:viterbo transfer map diagram} only depend on $\edv \lambda_X, \edv \lambda_Y$ and on $\lambda_X, \lambda_Y$ near the boundaries, the diagrams \eqref{eq:viterbo transfer map indep potential} commute. \end{proof} \begin{definition}[{\cite[Definition 7.6]{guttSymplecticCapacitiesPositive2018}}] \phantomsection\label{def:viterbo transfer generalized} If $\varphi \colon (V,\lambda_V) \longrightarrow (W,\lambda_W)$ is a strict generalized Liouville embedding of codimension $0$, then the \textbf{Viterbo transfer map} of $\varphi$ is defined as follows. Choose $\lambda'_W \in \Omega^1(W)$ as in \cref{lem:exists deformed form}. Denote by $\varphi' \colon (V,\lambda_V) \longrightarrow (W,\lambda'_W)$ the Liouville embedding which as a map of sets coincides with $\varphi$. Then, define \begin{IEEEeqnarray*}{rRCRCl} \varphi_! \colon & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) & = & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda'_W) & \xrightarrow{\varphi'_!} & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V), \\ \varphi^+_! \colon & \homology{}{S^1}{}{S}{H}{+}{}(W,\lambda_W) & = & \homology{}{S^1}{}{S}{H}{+}{}(W,\lambda'_W) & \xrightarrow{\varphi'_!} & \homology{}{S^1}{}{S}{H}{+}{}(V,\lambda_V), \end{IEEEeqnarray*} where the equality was explained in \cref{lem:sh indep of potential} and the arrows are the Viterbo transfer maps of a Liouville embedding as in \cref{def:viterbo transfer map}.\end{definition} \begin{lemma} In \cref{def:viterbo transfer generalized}, $\varphi_!$ and $\varphi_!^+$ are independent of the choice of $\lambda'_W$. \end{lemma} \begin{proof} Let $\lambda'_W$ and $\lambda''_W$ be $1$-forms as in \cref{lem:exists deformed form}, and denote the corresponding Liouville embeddings by $\varphi' \colon (W,\lambda'_W) \longrightarrow (V,\lambda_V)$ and $\varphi'' \colon (W,\lambda''_W) \longrightarrow (V,\lambda_V)$ (note that as set theoretic maps, $\varphi' = \varphi'' = \varphi$). Then, by \cref{lem:sh indep of potential}, the following diagram commutes: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) \ar[r, equals] \ar[d, equals] & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda'_W) \ar[d, equals] \ar[r, "\varphi'_!"] & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V) \ar[d, equals] \\ \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) \ar[r, equals] & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda''_W) \ar[r, "\varphi''_!"] & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V) \end{tikzcd} \end{IEEEeqnarray*} In this diagram, the top arrow is the Viterbo transfer map defined with respect to $\lambda'_W$ and the bottom arrow is the Viterbo transfer map defined with respect to $\lambda''_W$. \end{proof} Let $\liouvndg$ be the ``category'' whose objects are nondegenerate Liouville domains and whose morphisms are $0$-codimensional generalized Liouville embeddings which are either strict or diffeomorphisms. Strictly speaking, since composition of generalized Liouville embeddings is not in general a generalized Liouville embedding, this is not a category. However, $\liouvndg$ does fit into the notion of \textbf{categroid} (see \cref{def:categroid}), which is an object like a category with only partially defined compositions. One can then talk about functors between categroids. \begin{theorem} The assignments \begin{IEEEeqnarray*}{rrClCrrCl} \homology{}{S^1}{}{S}{H}{}{} \colon & \liouvndg & \longrightarrow & \modl & \qquad & \homology{}{S^1}{}{S}{H}{+}{} \colon & \liouvndg & \longrightarrow & \modl \\ & (V,\lambda_V) & \longmapsto & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V) & \qquad & & (V,\lambda_V) & \longmapsto & \homology{}{S^1}{}{S}{H}{+}{}(V,\lambda_V) \\ & \varphi \downarrow & \longmapsto & \uparrow \varphi_! & \qquad & & \varphi \downarrow & \longmapsto & \uparrow \varphi_!^+ \\ & (W,\lambda_W) & \longmapsto & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W), & \qquad & & (W,\lambda_W) & \longmapsto & \homology{}{S^1}{}{S}{H}{+}{}(W,\lambda_W) \end{IEEEeqnarray*} are contravariant functors. \end{theorem} \begin{proof} We prove the result only for $\homology{}{S^1}{}{S}{H}{}{}$, since the proof for $\homology{}{S^1}{}{S}{H}{+}{}$ is analogous. It suffices to assume that $\varphi \colon (V, \lambda_V) \longrightarrow (W, \lambda_W)$ and $\psi \colon (W, \lambda_W) \longrightarrow (Z, \lambda_Z)$ are composable strict, generalized Liouville embeddings of codimension 0 and to prove that $(\psi \circ \varphi)_! = \varphi_! \circ \psi_!$. Here, ``composable'' means that the composition $\psi \circ \varphi$ is also a generalized Liouville embedding. We start by choosing \begin{IEEEeqnarray*}{rClCrClrCllCrCl} \lambda'_W & \in & \Omega^1(W) & \quad\text{such that}\quad & \edv \lambda'_W & = & \edv \lambda_W^{},\quad & \lambda'_W & = & \lambda_W^{} & \text{ near } \partial W, & \quad\text{and}\quad & \varphi^* \lambda'_W & = & \lambda_V^{}, \\ \lambda'_Z & \in & \Omega^1(Z) & \quad\text{such that}\quad & \edv \lambda'_Z & = & \edv \lambda_Z^{},\quad & \lambda'_Z & = & \lambda_Z^{} & \text{ near } \partial Z, & \quad\text{and}\quad & \psi^* \lambda'_Z & = & \lambda_W^{}, \\ \lambda''_Z & \in & \Omega^1(Z) & \quad\text{such that}\quad & \edv \lambda''_Z & = & \edv \lambda'_Z, \quad & \lambda''_Z & = & \lambda'_Z & \text{ near } \partial Z, & \quad\text{and}\quad & \psi^* \lambda''_Z & = & \lambda'_W. \end{IEEEeqnarray*} Therefore, we have Liouville embeddings \begin{IEEEeqnarray*}{rCrCl} \varphi' & \colon & (V,\lambda_V^{}) & \longrightarrow & (W, \lambda'_W), \\ \psi' & \colon & (W,\lambda_W^{}) & \longrightarrow & (Z, \lambda'_Z), \\ \psi'' & \colon & (W,\lambda'_W) & \longrightarrow & (Z, \lambda''_Z). \end{IEEEeqnarray*} We can define the Viterbo transfer maps \begin{IEEEeqnarray*}{rLCLCl} \varphi_! \colon & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) & = & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda'_W) & \xrightarrow{\varphi'_!} & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V), \\ \psi_! \colon & \homology{}{S^1}{}{S}{H}{}{}(Z,\lambda_Z) & = & \homology{}{S^1}{}{S}{H}{}{}(Z,\lambda'_Z) & \xrightarrow{\psi'_!} & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W), \\ (\varphi \circ \psi)_! \colon & \homology{}{S^1}{}{S}{H}{}{}(Z,\lambda_Z) & = & \homology{}{S^1}{}{S}{H}{}{}(Z,\lambda''_Z) & \xrightarrow{(\psi'' \circ \varphi')_!} & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V). \end{IEEEeqnarray*} Consider the following commutative diagram: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{}{}(Z,\lambda_Z) \ar[r, equals] \ar[dr, dashed, swap, "\psi_!"] \ar[drdr, dashed, bend right, swap, "(\psi \circ \varphi)_!"] & \homology{}{S^1}{}{S}{H}{}{}(Z,\lambda'_Z) \ar[d, "\psi'_!"] \ar[r, equals] & \homology{}{S^1}{}{S}{H}{}{}(Z,\lambda''_Z) \ar[d, "\psi''_!"] \ar[dd, bend left=90, "(\psi'' \circ \varphi')_!"] \\ & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) \ar[r, equals] \ar[dr, swap, dashed, "\varphi_!"] & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda'_W) \ar[d, "\varphi'_!"] \\ & & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V) \end{tikzcd} \end{IEEEeqnarray*} Here, the two small triangles and the outside arrows commute by definition of the Viterbo transfer map of a generalized Liouville embedding, the square commutes by \cref{lem:sh indep of potential}, and $(\psi'' \circ \varphi')_! = \varphi'_! \circ \psi''_!$ by \cref{thm:sh is functor not generalized}. Therefore, $(\psi \circ \varphi)_! = \varphi_! \circ \psi_!$. \end{proof} \section{\texorpdfstring{$\delta$}{Delta} map} \label{sec:delta map} Let $(X,\lambda)$ be a nondegenerate Liouville domain. Our goal in this section is to define a map $\delta \colon \homology{}{S^1}{}{S}{H}{+}{}(X) \longrightarrow H_\bullet(BS^1;\Q) \otimes H_\bullet(X,\partial X; \Q)$. As we will see, $\delta = \alpha \circ \delta_0$, where $\delta_0 \colon \homology{}{S^1}{}{S}{H}{+}{}(X) \longrightarrow \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X)$ is the continuation map associated to a long exact sequence in homology (see \cref{def:delta map}) and $\alpha \colon \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X) \longrightarrow H_\bullet(BS^1;\Q) \otimes H_\bullet(X,\partial X; \Q)$ is an isomorphism which we define in several steps (see \cref{lem:iso floer and alt floer,lem:iso from floer to morse,lem:iso from floer to singular,lem:iso from symplectic to singular}). For every $(H,J) \in \admissible{X}$, define \begin{IEEEeqnarray*}{rCrCrCls+x*} H' & \coloneqq & H_{e_0} & \colon & S^1 \times \hat{X} & \longrightarrow & \R, \\ J' & \coloneqq & J_{e_0} & \colon & S^1 \times \hat{X} & \longrightarrow & \End(T \hat{X}), \end{IEEEeqnarray*} where $e_0 \in S^{2N+1} \subset \C^{N+1}$ is the first vector in the canonical basis of $\C^{N+1}$. We start by giving an alternative definition of the $S^1$-equivariant Floer chain complex. \begin{definition}[{\cite[Remark 5.15]{guttSymplecticCapacitiesPositive2018}}] We define a chain complex $\homology{}{S^1}{}{F}{C}{}{}(X,H,J)_{\mathrm{alt}}$ as follows. Let $u$ be a formal variable of degree $2$ and consider $\Q \{1,\ldots,u^N\}$, the $\Q$-module of polynomials in $u$ of degree less or equal to $2N$. As a $\Q$-module, \begin{IEEEeqnarray*}{c+x*} \homology{}{S^1}{}{F}{C}{}{}(X,H,J)_{\mathrm{alt}} \coloneqq \Q \{1,\ldots,u^N\} \otimes \homology{}{}{}{F}{C}{}{}(X,H',J'), \end{IEEEeqnarray*} where $\homology{}{}{}{F}{C}{}{}(X,H',J')$ is the Floer chain complex (not $S^1$-equivariant) of $X$ with respect to $(H',J')$, with $\Q$ coefficients. We will now define a differential $\partial_{\mathrm{alt}}$ on $\homology{}{S^1}{}{F}{C}{}{}(X,H,J)_{\mathrm{alt}}$. For every $j = 0,\ldots,N$, define a map $\varphi_j \colon \homology{}{}{}{F}{C}{}{}(X,H',J') \longrightarrow \homology{}{}{}{F}{C}{}{}(X,H',J')$ by \begin{IEEEeqnarray*}{c+x*} \varphi_j(\gamma^+) \coloneqq \sum_{\gamma^- \in \mathcal{P}(H')} \# \mathcal{M}_{\vphantom{0}}(H,J,[e_j,\gamma^+],[e_0,\gamma^-]) \cdot \gamma^-, \end{IEEEeqnarray*} for every $\gamma^+ \in \mathcal{P}(H')$. Note that $\varphi_0 \colon \homology{}{}{}{F}{C}{}{}(X,H',J') \longrightarrow \homology{}{}{}{F}{C}{}{}(X,H',J')$ is the usual differential of the Floer chain complex. Finally, we define \begin{IEEEeqnarray*}{rrCl} \del_{\mathrm{alt}} \colon & \Q \{1,\ldots,u^N\} \tensorpr \homology{}{}{}{F}{C}{}{}(X,H',J') & \longrightarrow & \Q \{1,\ldots,u^N\} \tensorpr \homology{}{}{}{F}{C}{}{}(X,H',J') \\ & u^k \tensorpr \gamma & \longmapsto & \sum_{j=0}^{k} u ^{k-j} \tensorpr \varphi_j(\gamma). \end{IEEEeqnarray*} \end{definition} \begin{lemma}[{\cite[Section 2.3]{bourgeoisEquivariantSymplecticHomology2016}}] \label{lem:iso floer and alt floer} The map \begin{IEEEeqnarray*}{rCl} \homology{}{S^1}{}{F}{C}{}{}(X,H,J) & \longrightarrow & \homology{}{S^1}{}{F}{C}{}{}(X,H,J)_{\mathrm{alt}} \\ {[e_j, \gamma]} & \longmapsto & u^j \otimes \gamma \end{IEEEeqnarray*} is an isomorphism of chain complexes. \end{lemma} Recall that in $X$, the Hamiltonian $H$ is assumed to be $C^2$-small and $S^1$-independent. Therefore, if $\gamma \colon S^1 \longrightarrow \hat{X}$ is a $1$-periodic orbit of $H'$ and $\img \gamma \subset X$, then $\gamma$ is constant with value $x \in X$, where $x$ is a critical point of $H'$. We will now assume that the Hamiltonian $H$ is chosen such that if $x^{\pm}$ are critical points of $H'$, then \begin{IEEEeqnarray}{c+x*} \plabel{eq:self indexing} H'(x^+) \leq H'(x^-) \Longrightarrow \morse(x^+,H') \geq \morse(x^-,H'). \end{IEEEeqnarray} We will denote by $(MC(X,H'), \partial^M)$ the Morse complex of $X$ with respect to $H'$, defined with the following conventions. As a vector space, $MC(X,H')$ is the vector space over $\Q$ generated by the critical points of $H'$. If $x^\pm$ are critical points of $H'$, the coefficient $\p{<}{}{\partial^{M} (x^+), x^-}$ is the count of gradient flow lines of $H'$ from $x^-$ to $x^+$. Finally, the degree of a critical point $x$ is the Morse index of $x$. \begin{lemma} \label{lem:iso from floer to morse} There is a canonical isomorphism of chain complexes \begin{IEEEeqnarray*}{c+x*} (\homology{}{S^1}{}{F}{C}{\varepsilon}{}(X,H,J), \partial_{\mathrm{alt}}) = (\Q \{1,\ldots,u^N\} \otimes MC(X,H'), \id \otimes \partial^M). \end{IEEEeqnarray*} \end{lemma} \begin{proof} By \cref{rmk:types of orbits,lem:action admissible,lem:iso floer and alt floer}, there is a canonical isomorphism of $\Q$-modules \begin{IEEEeqnarray*}{c+x*} \homology{}{S^1}{}{F}{C}{\varepsilon}{}(X,H,J) = \Q \{1,\ldots,u^N\} \otimes MC(X,H'). \end{IEEEeqnarray*} We show that this isomorphism is a chain map. We claim that if $j \geq 1$ and $x^+, x^-$ are critical points of $H'$, then $\dim_{(w,u)} \mathcal{M}(H,J,[e_j,x^+],[e_0,x^-]) \geq 1$. To see this, we compute \begin{IEEEeqnarray*}{rCls+x*} \dim_{(w,u)} \mathcal{M}(H,J,[e_j,x^+],[e_0,x^-]) & = & \ind(e_j, x^+) - \ind(e_0, x^-) - 1 \\ & = & \morse(e_j) - \morse(e_0) + \morse(x^+,H') - \morse(x^-,H') - 1 \\ & = & 2 j + \morse(x^+,H') - \morse(x^-,H') - 1 \\ & \geq & 2 j - 1 \\ & \geq & 1, \end{IEEEeqnarray*} where in the fourth line we used \cref{lem:action energy for floer trajectories} and Equation \eqref{eq:self indexing}. Therefore, if $j \geq 1$ and $x^+$ is a critical point of $H'$ then $\varphi_j(x^+) = 0$. This implies that \begin{IEEEeqnarray*}{c+x*} \partial_{\mathrm{alt}}(u^k \otimes x^+) = u^k \otimes \varphi_0(x^+), \end{IEEEeqnarray*} where $\varphi_0(x^+) = \partial^M(x^+)$ is the Morse theory differential applied to $x^+$. \end{proof} \begin{lemma} \label{lem:iso from floer to singular} There is a canonical isomorphism \begin{IEEEeqnarray*}{c+x*} \homology{}{S^1}{}{F}{H}{\varepsilon}{}(X,H,J) = \Q \{1,\ldots,u^N\} \otimes H_\bullet(X, \partial X; \Q). \end{IEEEeqnarray*} \end{lemma} \begin{proof} \begin{IEEEeqnarray*}{rCls+x*} \homology{}{S^1}{}{F}{H}{\varepsilon}{}(X,H,J) & = & H(\Q \{1,\ldots,u^N\} \otimes MC(X,H')) \\ & = & \Q \{1,\ldots,u^N\} \otimes MH_\bullet(X,H') \\ & = & \Q \{1,\ldots,u^N\} \otimes H_{\bullet}(X, \partial X; \Q), \end{IEEEeqnarray*} where in the first equality we used \cref{lem:iso from floer to morse}, in the second equality we used the definition of the differential of $\Q \{1,\ldots,u^N\} \otimes MC(X,H')$, and in the third equality we used the isomorphism between Morse homology and singular homology. \end{proof} \begin{lemma} \label{lem:iso from symplectic to singular} There is a canonical isomorphism \begin{IEEEeqnarray*}{c+x*} \alpha \colon \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X) \longrightarrow H_\bullet(BS^1;\Q) \otimes H_\bullet(X,\partial X; \Q). \end{IEEEeqnarray*} \end{lemma} \begin{proof} \begin{IEEEeqnarray*}{rCls+x*} \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X) & = & \varinjlim_{N,H,J} \homology{}{S^1}{}{F}{H}{\varepsilon}{}(X,H,J) \\ & = & \varinjlim_{N,H,J} \Q \{1,\ldots,u^N\} \otimes H_\bullet(X, \partial X; \Q) \\ & = & \Q[u] \otimes H_\bullet(X, \partial X; \Q) \\ & = & H_\bullet(BS^1; \Q) \otimes H_\bullet(X, \partial X; \Q), \end{IEEEeqnarray*} where in the first equality we used the definition of $S^1$-equivariant symplectic homology and in the second equality we used \cref{lem:iso from floer to singular}. \end{proof} \begin{definition} \phantomsection\label{def:delta map} We define a map $\delta \colon \homology{}{S^1}{}{S}{H}{+}{}(X) \longrightarrow H_\bullet(BS^1;\Q) \otimes H_\bullet(X,\partial X; \Q)$ as follows. For every $(H,J) \in \admissible{X}$, consider the short exact sequence of complexes \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} 0 \ar[r] & \homology{}{S^1}{}{F}{C}{\varepsilon}{}(X,H,J) \ar[r] & \homology{}{S^1}{}{F}{C}{}{}(X,H,J) \ar[r] & \homology{}{S^1}{}{F}{C}{+}{}(X,H,J) \ar[r] & 0 \end{tikzcd} \end{IEEEeqnarray*} There is an associated long exact sequence in homology \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \cdots \ar[r] & \homology{}{S^1}{}{F}{H}{}{}(X,H,J) \ar[r] & \homology{}{S^1}{}{F}{H}{+}{}(X,H,J) \ar[r, "\delta^{H,J}"] & \homology{}{S^1}{}{F}{H}{\varepsilon}{}(X,H,J) \ar[r] & \cdots \end{tikzcd} \end{IEEEeqnarray*} Passing to the colimit, we obtain a sequence \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \cdots \ar[r] & \homology{}{S^1}{}{S}{H}{}{}(X) \ar[r] & \homology{}{S^1}{}{S}{H}{+}{}(X) \ar[r, "\delta_0"] & \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X) \ar[r] & \cdots \end{tikzcd} \end{IEEEeqnarray*} Finally, define $\delta \coloneqq \alpha \circ \delta_0 \colon \homology{}{S^1}{}{S}{H}{+}{}(X) \longrightarrow H_\bullet(BS^1;\Q) \otimes H_\bullet(X,\partial X; \Q)$, where $\alpha$ is the isomorphism from \cref{lem:iso from symplectic to singular}. \end{definition} Let $\varphi \colon (X,\lambda_X) \longrightarrow (Y, \lambda_Y)$ be a $0$-codimensional strict generalized Liouville embedding. Define $\rho \colon H_\bullet(Y,\partial Y; \Q) \longrightarrow H_\bullet(X,\partial X; \Q)$ to be the unique map such that the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} H_\bullet(X,\del X; \Q) \ar[r, hook, two heads, "\varphi_*"] & H_\bullet(\varphi(X),\varphi(\del X); \Q) \ar[d, hook, two heads] \\ H_\bullet(Y,\del Y; \Q) \ar[r] \ar[u, dashed, "\exists ! \rho"] & H_\bullet(Y, Y \setminus \varphi(\itr X); \Q) \end{tikzcd} \end{IEEEeqnarray*} commutes, where $\varphi_*$ is an isomorphism by functoriality of homology and the vertical arrow on the right is an isomorphism by excision. The map $\rho$ is such that $\rho([Y]) = [X]$. \begin{proposition}[{\cite[Proposition 3.3]{guttSymplecticCapacitiesPositive2018}}] The diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{+}{}(Y) \ar[r, "\delta_Y"] \ar[d, swap, "\varphi_!"] & H_\bullet(BS^1;\Q) \otimes H_\bullet(Y,\partial Y; \Q) \ar[d, "\id \otimes \rho"] \\ \homology{}{S^1}{}{S}{H}{+}{}(X) \ar[r, swap, "\delta_X"] & H_\bullet(BS^1;\Q) \otimes H_\bullet(X,\partial X; \Q) \end{tikzcd} \end{IEEEeqnarray*} commutes. \end{proposition} \chapter{Symplectic capacities} \label{chp:symplectic capacities} \section{Symplectic capacities} \label{sec:symplectic capacities} In this section we define the notion of symplectic capacity (\cref{def:symplectic capacity}). A capacity is a function $c$ which assigns to every symplectic manifold $X$ (in a restricted subclass) a number $c(X) \in [0,+\infty]$, and which is functorial with respect to symplectic embeddings (in a restricted subclass). In the remaining sections of this chapter, we will define various capacities, namely the Lagrangian capacity (\cref{def:lagrangian capacity}), the Gutt--Hutchings capacities (\cref{def:gutt hutchings capacities}) and the McDuff--Siegel capacities (\cref{def:g tilde}). In this section we also deal with two small technicalities: \begin{enumerate} \item Most of the capacities we will deal with in this thesis are functorial with respect to generalized Liouville embeddings, which do not form a category. However, they form an object which is like a category but has only partially defined composition of morphisms. We will use the nomenclature of \cite{andersenTQFTQuantumTeichmuller2014} and call such an object a categroid (\cref{def:categroid}). \item As we will see, some capacities we will consider are defined on the class of nondegenerate Liouville domains. In the last part of this section, we will see how such a capacity can be extended uniquely to a capacity of Liouville domains. \end{enumerate} \begin{definition}[{\cite[Definition 22]{andersenTQFTQuantumTeichmuller2014}}] \label{def:categroid} A \textbf{categroid} $\mathbf{C}$ consists of a family of objects $\operatorname{Obj}(\mathbf{C})$ and for any pair of objects $A,B \in \mathbf{C}$ a set $\Hom_{\mathbf{C}}(A,B)$ such that the following holds. \begin{enumerate} \item For any three objects $A$, $B$, $C$ there is a subset $\operatorname{Comp}_{\mathbf{C}}(A,B,C) \subset \Hom_{\mathbf{C}}(B,C) \times \Hom_{\mathbf{C}}(A,B)$ of \textbf{composable morphisms} and an associated \textbf{composition map} \begin{IEEEeqnarray*}{c+x*} \circ \colon \operatorname{Comp}_{\mathbf{C}}(A,B,C) \longrightarrow \Hom_{\mathbf{C}}(A,C) \end{IEEEeqnarray*} such that composition of composable morphisms is associative. \item For any object $A$ there exists an \textbf{identity morphism} $\id_A \in \Hom_{\mathbf{C}}(A,A)$ which is composable with any morphism $f \in \Hom_{\mathbf{C}}(A,B)$ or $g \in \Hom_{\mathbf{C}}(B,A)$ and satisfies \begin{IEEEeqnarray*}{rCls+x*} f \circ \id_A & = & f, \\ \id_A \circ g & = & g. \end{IEEEeqnarray*} \end{enumerate} \end{definition} In this context, one has obvious definitions of subcategroids and also of functors between categroids. Denote by $\symp$ the category of symplectic manifolds, where morphisms are $0$-codimensional symplectic embeddings. \begin{definition} \label{def:symplectic categroid} A \textbf{symplectic categroid} is a subcategroid $\mathbf{C}$ of $\symp$ such that $(X,\omega) \in \mathbf{C}$ implies $(X,\alpha \omega) \in \mathbf{C}$ for all $\alpha > 0$. \end{definition} \begin{definition} \label{def:symplectic capacity} Let $\mathbf{C}$ be a symplectic categroid. A \textbf{symplectic capacity} is a functor $c \colon \mathbf{C} \longrightarrow [0,+\infty]$ satisfying \begin{description} \item[(Monotonicity)] If $(X,\omega_X) \longrightarrow (Y, \omega_Y)$ is a morphism in $\mathbf{C}$ then $c(X,\omega_X) \leq c(Y,\omega_Y)$; \item[(Conformality)] If $\alpha > 0$ then $c(X,\alpha \omega) = \alpha \, c(X, \omega)$. \end{description} \end{definition} Notice that the monotonicity property is just a restatement of the fact that $c$ is a functor. \begin{definition} \label{def:nontrivial} Let $c \colon \mathbf{C} \longrightarrow [0, +\infty]$ be a symplectic capacity with the property that $B^{2n}(1), Z^{2n}(1) \in \mathbf{C}$ for every $n$. We say that $c$ is \textbf{nontrivial} or \textbf{normalized} if it satisfies \begin{description} \item[(Nontriviality)] $0 < c(B^{2n}(1)) \leq c(Z^{2n}(1)) < + \infty$; \item[(Normalization)] $0 < c(B^{2n}(1)) = 1 = c(Z^{2n}(1)) < + \infty$. \end{description} \end{definition} \begin{example} Let $(X, \omega)$ be a $2n$-dimensional symplectic manifold. Recall that the \textbf{symplectic volume} of $X$ is given by \begin{IEEEeqnarray*}{c+x*} \operatorname{vol}(X) \coloneqq \int_{X}^{} \frac{\omega^n}{n!}. \end{IEEEeqnarray*} The \textbf{volume capacity} of $X$ is given by \begin{IEEEeqnarray*}{c+x*} c_{\mathrm{vol}}(X) \coloneqq \p{}{2}{\frac{\operatorname{vol}(X)}{\operatorname{vol}(B)}}^{1/n}, \end{IEEEeqnarray*} where $B \coloneqq B^{2n}(1) \coloneqq \{z \in \C^{n} \mid \pi |z|^2 \leq 1 \}$. \end{example} \begin{example} Let $(Y,\Omega)$ be a symplectic manifold. We define the \textbf{embedding capacities}, denoted by $c_{(Y,\Omega)}$ and $c^{(Y,\Omega)}$, by \begin{IEEEeqnarray*}{rCll} c_{(Y,\Omega)}(X, \omega) & \coloneqq & \sup & \{ a > 0 \mid \text{there exists a symplectic embedding } (Y, a \Omega) \longrightarrow (X, \omega) \}, \\ c^{(Y,\Omega)}(X, \omega) & \coloneqq & \inf & \{ a > 0 \mid \text{there exists a symplectic embedding } (X, \omega) \longrightarrow (Y, a \Omega) \}, \end{IEEEeqnarray*} for any symplectic manifold $(X, \omega)$. Let $\omega_0$ denote the canonical symplectic structure of $\C^n$. In the case where $(Y, \Omega) = (B^{2n}(1), \omega_0)$ or $(Y, \Omega) = (P^{2n}(1), \omega_0)$, we denote \begin{IEEEeqnarray*}{lClCl} c_B(X,\omega) & \coloneqq & c_{(B^{2n}(1), \omega)}(X, \omega) & = & \sup \{ a \ | \ \text{$\exists$ symplectic embedding } B^{2n}(a) \longrightarrow X \}, \\ c_P(X,\omega) & \coloneqq & c_{(P^{2n}(1), \omega)}(X, \omega) & = & \sup \{ a \ | \ \text{$\exists$ symplectic embedding } P^{2n}(a) \longrightarrow X \}. \end{IEEEeqnarray*} Embedding capacities tend to be hard to compute, since they are defined as a restatement of a hard embedding problem. For example, a restatement of Gromov's nonsqueezing theorem \cite{gromovPseudoHolomorphicCurves1985} is that $c_B$ is a normalized symplectic capacity. The capacity $c_B$ is also called \textbf{Gromov width}. \end{example} \begin{definition}[{\cite[Section 4.2]{guttSymplecticCapacitiesPositive2018}}] \phantomsection\label{def:perturbation of liouville domain} If $(X,\lambda)$ is a Liouville domain and $f \colon \partial X \longrightarrow \R$ is a smooth function, we define a new Liouville domain $(X_f,\lambda_f)$ as follows. Consider the completion $\hat{X}$, which has as subsets $X \subset \hat{X}$ and $\R \times \partial X \subset \hat{X}$. Then, \begin{IEEEeqnarray*}{c+x*} X_f \coloneqq \hat{X} \setminus \{ (\rho,y) \in \R \times \partial X \mid \rho > f(y) \} \end{IEEEeqnarray*} and $\lambda_f$ is the restriction of $\hat{\lambda}$ to $X_f$. Define $\mathcal{F}_{X}^{\pm}$ to be the set of $f^{\pm} \colon \partial X \longrightarrow \R^\pm$ such that $(X_{f^\pm}, \lambda_{f^\pm})$ is nondegenerate. \end{definition} \begin{definition} \label{def:liouville categroid} A \textbf{Liouville categroid} is a subcategroid $\mathbf{L}$ of $\symp$ such that \begin{enumerate} \item Every object of $\mathbf{L}$ is a Liouville domain. \item If $X \in \mathbf{L}$ and $f^{+} \in \mathcal{F}^{+}_X$ then $X_{f^{+}} \in \mathbf{L}$ and the inclusion $X \longrightarrow X_{f^+}$ is a morphism in $\mathbf{L}$ which is composable with any other morphisms $Y \longrightarrow X$ or $X_{f^+} \longrightarrow Z$ in $\mathbf{L}$. \item If $X \in \mathbf{L}$ and $f^{-} \in \mathcal{F}^{-}_X$ then $X_{f^{-}} \in \mathbf{L}$ and the inclusion $X_{f^-} \longrightarrow X$ is a morphism in $\mathbf{L}$ which is composable with any other morphisms $Y \longrightarrow X_{f^-}$ or $X \longrightarrow Z$ in $\mathbf{L}$. \end{enumerate} \end{definition} \begin{example} Let $\liouvgle$ be the categroid whose objects are Liouville domains and whose morphisms are $0$-codimensional generalized Liouville embeddings. Then $\liouvgle$ is a Liouville categroid. \end{example} \begin{lemma} \label{lem:c is the unique extension to lvds} Let $\mathbf{L}$ be a Liouville categroid. Let $\mathbf{L}_{\mathrm{ndg}}$ be the full subcategroid of $\mathbf{L}$ of nondegenerate Liouville domains (i.e., if $X, Y \in \mathbf{L}_{\mathrm{ndg}}$ then $\Hom_{\mathbf{L}_{\mathrm{ndg}}}(X,Y) = \Hom_{\mathbf{L}}(X,Y)$). If $c \colon \mathbf{L}_{\mathrm{ndg}} \longrightarrow [0, +\infty]$ is a symplectic capacity, then there exists a unique symplectic capacity $\overline{c} \colon \mathbf{L} \longrightarrow [0, + \infty]$ such that the following diagram commutes: \begin{IEEEeqnarray}{c+x*} \plabel{eq:diagram extend cap liouv} \begin{tikzcd} \mathbf{L}_{\mathrm{ndg}} \ar[d] \ar[dr, "c"] & \\ \mathbf{L} \ar[r, swap, "\overline{c}"] & {[0,+\infty]} \end{tikzcd} \end{IEEEeqnarray} \end{lemma} \begin{proof} This proof is based on \cite[Section 4.2]{guttSymplecticCapacitiesPositive2018}. We claim that if $\varepsilon > 0$ and $(X, \lambda)$ is a nondegenerate Liouville domain in $\mathbf{L}_{\mathrm{ndg}}$, then $(X_{\varepsilon}, \lambda_{\varepsilon})$ is nondegenerate and \begin{IEEEeqnarray}{c+x*} \plabel{eq:capacity of deformed domain} c(X_\varepsilon, \lambda_\varepsilon) = e^{\varepsilon} c (X, \lambda). \end{IEEEeqnarray} To see this, notice that the time $\varepsilon$ flow of the Liouville vector field $Z$ of $\hat{X}$ restricts to a Liouville embedding $\phi \colon (X, e^{\varepsilon} \lambda) \longrightarrow (X_\varepsilon, \lambda_\varepsilon)$ and also to a contactomorphism $\phi \colon (\partial X, e^{\varepsilon} \lambda|_{\partial X}) \longrightarrow (\partial X_\varepsilon, \partial \lambda_\varepsilon|_{\partial X_\varepsilon})$. This shows that $(X_\varepsilon, \lambda_\varepsilon)$ is nondegenerate. In particular, $(X_\varepsilon, \lambda_\varepsilon) \in \mathbf{L}_{\mathrm{ndg}}$. Finally, \begin{IEEEeqnarray*}{rCls+x*} c(X_\varepsilon, \lambda_\varepsilon) & = & c(X, e^{\varepsilon} \lambda) & \quad [\text{by functoriality of $c$}] \\ & = & e^{\varepsilon} c(X,\lambda) & \quad [\text{by conformality}]. & \end{IEEEeqnarray*} This finishes the proof of Equation \eqref{eq:capacity of deformed domain}. Define functions $c^{\pm} \colon \mathbf{L} \longrightarrow [0,+\infty]$ by \begin{IEEEeqnarray*}{rCls+x*} c^+(X) & \coloneqq & \inf_{f^+ \in \mathcal{F}^+_X} c(X_{f^+}), \\ c^-(X) & \coloneqq & \sup_{f^- \in \mathcal{F}^-_X} c(X_{f^-}). \end{IEEEeqnarray*} We claim that if $(X, \lambda) \in \mathbf{L}$ is a Liouville domain then \begin{IEEEeqnarray}{c+x*} \plabel{eq:c minus equals c plus} c^-(X) = c^+(X). \end{IEEEeqnarray} Monotonicity of $c$ implies $c^-(X) \leq c^+(X)$. To show the reverse inequality, it is enough to show that $c^+(X) \leq e^{\varepsilon} c^-(X)$ for every $\varepsilon > 0$. For this, choose $f^- \in \mathcal{F}^{-}_X$ such that $\img f^- \subset (- \varepsilon, 0)$ and define $f^+ = f^- + \varepsilon$. By the previous discussion, $(X_{f^+}, \lambda_{f^+})$ is nondegenerate and $f^+ \in \mathcal{F}^+_X$. Then, \begin{IEEEeqnarray*}{rCls+x*} c^+(X) & = & \inf_{g^+ \in \mathcal{F}^+_X} c(X_{g^+}) & \quad [\text{by definition of $c^+$}] \\ & \leq & c(X_{f^+}) & \quad [\text{since $f^+ \in \mathcal{F}^+_X$}] \\ & = & e^{\varepsilon} c(X_{f^-}) & \quad [\text{by Equation \eqref{eq:capacity of deformed domain}}] \\ & \leq & e^{\varepsilon} \sup_{g^- \in \mathcal{F}^-_X} c(X_{g^-}) & \quad [\text{since $f^- \in \mathcal{F}^-_X$}] \\ & = & e^{\varepsilon} c^-(X) & \quad [\text{by definition of $c^-$}], \end{IEEEeqnarray*} which finishes the proof of Equation \eqref{eq:c minus equals c plus}. Moreover, if $(X, \lambda) \in \mathbf{L}_{\mathrm{ndg}}$ is nondegenerate, then $c^-(X) \leq c(X) \leq c^+(X) = c^-(X)$, which implies \begin{IEEEeqnarray*}{c+x*} c^-(X) = c(X) = c^+(X). \end{IEEEeqnarray*} We now show that $c^{\pm}$ are symplectic capacities. The conformality property is immediate. To prove monotonicity, let $X \longrightarrow Y$ be a morphism in $\mathbf{L}$. \begin{IEEEeqnarray*}{rCls+x*} c^-(X) & = & \sup_{f^- \in \mathcal{F}^-_X} c(X_{f^-}) & \quad [\text{by definition of $c^-$}] \\ & \leq & \inf_{g^+ \in \mathcal{F}^+_Y} c(Y_{g^+}) & \quad [\text{since $X_{f^-} \subset X \longrightarrow Y \subset Y_{g^+}$ and by monotonicity of $c$}] \\ & = & c^+(Y) & \quad [\text{by definition of $c^+$}]. \end{IEEEeqnarray*} The result follows from Equation \eqref{eq:c minus equals c plus}. To prove existence, simply notice that by the above discussion, the function $\overline{c} \coloneqq c^- = c^+ \colon \mathbf{L} \longrightarrow [0, +\infty]$ has all the desired properties. To prove uniqueness, let $\overline{c}$ be any function as in the statement of the lemma. We wish to show that $\overline{c} \coloneqq c^- = c^+$. We start by showing that $c^-(X) \leq \overline{c}(X)$. \begin{IEEEeqnarray*}{rCls+x*} c^-(X) & = & \sup_{f^- \in \mathcal{F}^-_X} c(X_{f^-}) & \quad [\text{by definition of $c^-$}] \\ & = & \sup_{f^- \in \mathcal{F}^-_X} \overline{c}(X_{f^-}) & \quad [\text{by assumption on $\overline{c}$}] \\ & \leq & \sup_{f^- \in \mathcal{F}^-_X} \overline{c}(X) & \quad [\text{by monotonicity of $\overline{c}$}] \\ & = & \overline{c}(X). \end{IEEEeqnarray*} Analogously, we can show that $c^+(X) \geq \overline{c}(X)$, which concludes the proof.\end{proof} \begin{lemma} \label{lem:can prove ineqs for ndg} For $i = 0,1$, let $c_i \colon \mathbf{L}_{\mathrm{ndg}} \rightarrow [0, +\infty]$ be symplectic capacities with extensions $\overline{c}_i \colon \mathbf{L} \rightarrow [0, +\infty]$ as in \cref{lem:c is the unique extension to lvds}. If $c_0(Y) \leq c_1(Y)$ for every nondegenerate Liouville domain $Y \in \mathbf{L}_{\mathrm{ndg}}$ then $\overline{c}_0(X) \leq \overline{c}_1(X)$ for every Liouville domain $X \in \mathbf{L}$. \end{lemma} \begin{proof} \begin{IEEEeqnarray*}{rCls+x*} \overline{c}_0(X) & = & \sup_{f^- \in \mathcal{F}^-_X} c_0(X_{f^-}) & \quad [\text{by the definition of $\overline{c}_0$ in \cref{lem:c is the unique extension to lvds}}] \\ & \leq & \sup_{f^- \in \mathcal{F}^-_X} c_1(X_{f^-}) & \quad [\text{by assumption on $c_0$ and $c_1$}] \\ & = & \overline{c}_1(X) & \quad [\text{by the definition of $\overline{c}_1$ in \cref{lem:c is the unique extension to lvds}}]. & \qedhere \end{IEEEeqnarray*} \end{proof} By the exposition above, if $c$ is a capacity of nondegenerate Liouville domains then it can be extended to a capacity of Liouville domains. In particular, $c(X)$ is defined for any star-shaped domain $X$. However, it will be useful to us to compute capacities of the cube $P(r)$ and of the nondisjoint union of cylinders $N(r)$. These spaces are not quite star-shaped domains, because they have corners and $N(r)$ is noncompact. So we will consider a further extension of the capacity $c$. Let $\mathbf{Star}$ be the category of star-shaped domains, where there is a unique morphism $X \longrightarrow Y$ if and only if $X \subset Y$. Denote by $\mathbf{Star}_{\mathrm{ncp}}$ the category of ``star-shaped domains'' which are possibly noncompact or possibly have corners, with the same notion of morphisms. \begin{lemma} \label{lem:c is the smallest extension to ss} Let $c \colon \mathbf{Star} \longrightarrow [0, +\infty]$ be a symplectic capacity. Define a symplectic capacity $\overline{c} \colon \mathbf{Star}_{\mathrm{ncp}} \longrightarrow [0, +\infty]$ by \begin{IEEEeqnarray*}{c+x*} \overline{c}(X) = \sup_{Y \subset X} c(Y), \end{IEEEeqnarray*} where the supremum is taken over star-shaped domains $Y \subset X$ which are compact and have smooth boundary. Then, the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \mathbf{Star} \ar[dr, "c"] \ar[d] \\ \mathbf{Star}_{\mathrm{ncp}} \ar[r, swap, "\overline{c}"] & {[0, + \infty]} \end{tikzcd} \end{IEEEeqnarray*} commutes. Moreover, $\overline{c}$ is the smallest capacity making this diagram commute. \end{lemma} \begin{proof} It is immediate that $\overline{c}$ is a symplectic capacity. We show that the diagram commutes. If $X$ is a compact star-shaped domain with smooth boundary, then \begin{IEEEeqnarray*}{rCls+x*} c(X) & \leq & \sup_{Y \subset X} c(Y) & \quad [\text{since $X$ is compact and has smooth boundary}] \\ & \leq & c(X) & \quad [\text{by monotonicity}]. \end{IEEEeqnarray*} If $\tilde{c} \colon \mathbf{Star}_{\mathrm{ncp}} \longrightarrow [0, +\infty]$ is another capacity making the diagram commute, then \begin{IEEEeqnarray*}{rCls+x*} \overline{c}(X) & = & \sup_{Y \subset X} c(Y) & \quad [\text{by definition of $\overline{c}$}] \\ & = & \sup_{Y \subset X} \tilde{c}(Y) & \quad [\text{since $\tilde{c}$ makes the diagram commute}] \\ & \leq & \tilde{c}(X) & \quad [\text{by monotonicity of $\tilde{c}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{remark} We will always assume that every capacity of nondegenerate Liouville domains that we define is extended as in \cref{lem:c is the unique extension to lvds,lem:c is the smallest extension to ss} to possibly degenerate Liouville domains and to ``star-shaped domains'' which are possibly noncompact or possibly have corners. \end{remark} \section{Lagrangian capacity} Here, we define the Lagrangian capacity (\cref{def:lagrangian capacity}) and state its properties (\cref{prop:properties of cL}). One of the main goals of this thesis is to study whether the Lagrangian capacity can be computed in some cases, for example for toric domains. In the end of the section, we state some easy inequalities concerning the Lagrangian capacity (\cref{lem:c square leq c lag,lem:c square geq delta}), known computations (\cref{prp:cl of ball,prp:cl of cylinder}) and finally the main conjecture of this thesis (\cref{conj:the conjecture}), which is inspired by all the previous results. The Lagrangian capacity is defined in terms of the minimal area of Lagrangian submanifolds, which we now define. \begin{definition} Let $(X,\omega)$ be a symplectic manifold. If $L$ is a Lagrangian submanifold of $X$, then we define the \textbf{minimal symplectic area of} $L$, denoted $A_{\mathrm{min}}(L)$, by \begin{IEEEeqnarray*}{c+x*} A_{\mathrm{min}}(L) \coloneqq \inf \{ \omega(\sigma) \mid \sigma \in \pi_2(X,L), \, \omega(\sigma) > 0 \}. \end{IEEEeqnarray*} \end{definition} \begin{lemma} \label{lem:properties of minimal area} Let $\iota \colon (X,\omega) \longrightarrow (X',\omega')$ be a symplectic embedding, $L \subset X$ be an embedded Lagrangian submanifold and $L' = \iota(L)$. In this case, \begin{enumerate} \item \label{lem:properties of minimal area 1} $A_{\mathrm{min}}(L) \geq A_{\mathrm{min}}(L')$; \item \label{lem:properties of minimal area 2} $A_{\mathrm{min}}(L) = A_{\mathrm{min}}(L')$, provided that $\pi_2(X',\iota(X)) = 0$. \end{enumerate} \end{lemma} \begin{proof} \ref{lem:properties of minimal area 1}: By definition of minimal area and since the diagram \begin{IEEEeqnarray}{c+x*} \plabel{eq:diag minimal area} \begin{tikzcd}[ampersand replacement = \&] \pi_2(X,L) \ar[d, swap, "\iota_*"] \ar[dr, "\omega"] \\ \pi_2(X',L') \ar[r, swap, "\omega'"] \& \R \end{tikzcd} \end{IEEEeqnarray} commutes. \ref{lem:properties of minimal area 2}: Considering the long exact sequence of the triple $(X',\iota(X),L')$, \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd}[ampersand replacement = \&] \cdots \ar[r] \& \pi_2(\iota(X),L') \ar[r] \& \pi_2(X',L') \ar[r] \& \pi_2(X',\iota(X)) = 0 \end{tikzcd} \end{IEEEeqnarray*} we conclude that $\iota_{*} \colon \pi_2(X,L) \longrightarrow \pi_2(X',L')$ is surjective. Again, the result follows by the definition of minimal area and diagram \eqref{eq:diag minimal area}. \end{proof} \begin{lemma} \label{lem:a min with exact symplectic manifold} Let $(X,\lambda)$ be an exact symplectic manifold and $L \subset X$ be a Lagrangian submanifold. If $\pi_1(X) = 0$, then \begin{IEEEeqnarray*}{c+x*} A _{\mathrm{min}}(L) = \inf \left\{ \lambda(\rho) \ | \ \rho \in \pi_1(L), \ \lambda(\rho) > 0 \right\}. \end{IEEEeqnarray*} \end{lemma} \begin{proof} The diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd}[ampersand replacement = \&] \pi_2(L) \ar[d, swap, "0"] \ar[r] \& \pi_2(X) \ar[d, "\omega"] \ar[r] \& \pi_2(X,L) \ar[d, "\omega"] \ar[r, two heads,"\del"] \& \pi_1(L) \ar[d, "\lambda"] \ar[r, "0"] \& \pi_1(X) \ar[d, "\lambda"] \\ \R \ar[r, equals] \& \R \ar[r, equals] \& \R \ar[r, equals] \& \R \ar[r, equals] \& \R \end{tikzcd} \end{IEEEeqnarray*} commutes, where $\del([\sigma]) = [\sigma|_{S^1}]$, and the top row is exact. \end{proof} \begin{definition}[{\cite[Section 1.2]{cieliebakPuncturedHolomorphicCurves2018}}] \phantomsection\label{def:lagrangian capacity} Let $(X,\omega)$ be a symplectic manifold. We define the \textbf{Lagrangian capacity} of $(X,\omega)$, denoted $c_L(X,\omega)$, by \begin{IEEEeqnarray*}{c} c_L(X,\omega) \coloneqq \sup \{ A_{\mathrm{min}}(L) \mid L \subset X \text{ is an embedded Lagrangian torus}\}. \end{IEEEeqnarray*} \end{definition} \begin{proposition}[{\cite[Section 1.2]{cieliebakPuncturedHolomorphicCurves2018}}] \label{prop:properties of cL} The Lagrangian capacity $c_L$ satisfies: \begin{description} \item[(Monotonicity)] If $(X,\omega) \longrightarrow (X',\omega')$ is a symplectic embedding with $\pi_2(X',\iota(X)) = 0$, then $c_L(X,\omega) \leq c_L(X',\omega')$. \item[(Conformality)] If $\alpha \neq 0$, then $c_L(X,\alpha \omega) = |\alpha| \, c_L(X,\omega)$. \end{description} \end{proposition} \begin{proof} We prove monotonicity. \begin{IEEEeqnarray*}{rCls+x*} c_L(X,\omega) & = & \sup _{L \subset X} A _{\min}(L) & \quad [\text{by definition of $c_L$}] \\ & \leq & \sup _{L' \subset X'} A _{\min}(L') & \quad [\text{by \cref{lem:properties of minimal area}}] \\ & = & c_L(X',\omega') & \quad [\text{by definition of $c_L$}]. \end{IEEEeqnarray*} We prove conformality. Note that a submanifold $L \subset X$ is Lagrangian with respect to $\omega$ if and only if it is Lagrangian with respect to $\alpha \omega$. \begin{IEEEeqnarray*}{rCls+x*} c_L(X,\alpha \omega) & = & \sup _{L \subset (X,\alpha \omega)} A _{\mathrm{min}}(L,\alpha \omega) & \quad [\text{by definition of $c_L$}] \\ & = & \sup _{L \subset (X,\omega) } |\alpha| A _{\mathrm{min}}(L, \omega) & \quad [\text{by definition of minimal area}] \\ & = & |\alpha| \, c_L(X,\omega) & \quad [\text{by definition of $c_L$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:c square leq c lag} If $X$ is a star-shaped domain, then $c_L(X) \geq c_P(X)$. \end{lemma} \begin{proof} Let $\iota \colon P(a) \longrightarrow X$ be a symplectic embedding, for some $a > 0$. We want to show that $c_L(X) \geq a$. Define $T = \{ z \in \C^n \mid |z_1|^2 = a/\pi, \ldots, |z_n|^2 = a/ \pi \} \subset \partial P(a)$ and $L = \iota(T)$. Then, \begin{IEEEeqnarray*}{rCls+x*} c_L(X) & \geq & A_{\mathrm{min}}(L) & \quad [\text{by definition of $c_L$}] \\ & = & A_{\mathrm{min}}(T) & \quad [\text{by \cref{lem:properties of minimal area}}] \\ & = & a & \quad [\text{by \cref{lem:a min with exact symplectic manifold}}]. & \qedhere \end{IEEEeqnarray*} \end{proof} Recall that if $X_{\Omega}$ is a toric domain, its diagonal is given by $\delta_{\Omega} \coloneqq \sup \{ a \mid (a, \ldots, a) \in \Omega \}$ (see \cref{def:moment map}). \begin{lemma} \label{lem:c square geq delta} If $X_{\Omega}$ is a convex or concave toric domain, then $c_P(X_{\Omega}) \geq \delta_\Omega$. \end{lemma} \begin{proof} Since $X_{\Omega}$ is a convex or concave toric domain, we have that $P(\delta_\Omega) \subset X_{\Omega}$. The result follows by definition of $c_P$. \end{proof} Actually, Gutt--Hutchings show that $c_P(X_{\Omega}) = \delta_\Omega$ for any convex or concave toric domain $X_{\Omega}$ (\cite[Theorem 1.18]{guttSymplecticCapacitiesPositive2018}). However, for our purposes we will only need the inequality in \cref{lem:c square geq delta}. We now consider the results by Cieliebak--Mohnke for the Lagrangian capacity of the ball and the cylinder. \begin{proposition}[{\cite[Corollary 1.3]{cieliebakPuncturedHolomorphicCurves2018}}] \phantomsection\label{prp:cl of ball} The Lagrangian capacity of the ball is \begin{IEEEeqnarray*}{c+x*} c_L(B^{2n}(1)) = \frac{1}{n}. \end{IEEEeqnarray*} \end{proposition} \begin{proposition}[{\cite[p.~215-216]{cieliebakPuncturedHolomorphicCurves2018}}] \label{prp:cl of cylinder} The Lagrangian capacity of the cylinder is \begin{IEEEeqnarray*}{c+x*} c_L(Z^{2n}(1)) = 1. \end{IEEEeqnarray*} \end{proposition} By \cref{lem:c square leq c lag,lem:c square geq delta}, if $X_{\Omega}$ is a convex or concave toric domain then $c_L(X_\Omega) \geq \delta_\Omega$. But as we have seen in \cref{prp:cl of ball,prp:cl of cylinder}, if $X_\Omega$ is the ball or the cylinder then $c_L(X_\Omega) = \delta_\Omega$. This motivates \cref{conj:cl of ellipsoid} below for the Lagrangian capacity of an ellipsoid, and more generally \cref{conj:the conjecture} below for the Lagrangian capacity of any convex or concave toric domain. \begin{conjecture}[{\cite[Conjecture 1.5]{cieliebakPuncturedHolomorphicCurves2018}}] \label{conj:cl of ellipsoid} The Lagrangian capacity of the ellipsoid is \begin{IEEEeqnarray*}{c+x*} c_L(E(a_1,\ldots,a_n)) = \p{}{2}{\frac{1}{a_1} + \cdots + \frac{1}{a_n}}^{-1}. \end{IEEEeqnarray*} \end{conjecture} \begin{conjecture} \label{conj:the conjecture} If $X_{\Omega}$ is a convex or concave toric domain then \begin{IEEEeqnarray*}{c+x*} c_L(X_{\Omega}) = \delta_\Omega. \end{IEEEeqnarray*} \end{conjecture} In \cref{lem:computation of cl,thm:my main theorem} we present our results concerning \cref{conj:the conjecture}. \section{Gutt--Hutchings capacities} \label{sec:equivariant capacities} In this section we will define the Gutt--Hutchings capacities (\cref{def:gutt hutchings capacities}) and the $S^1$-equivariant symplectic homology capacities (\cref{def:s1esh capacities}), and list their properties (\cref{thm:properties of gutt-hutchings capacities,prp:properties of s1esh capacities} respectively). We will also compare the two capacities (\cref{thm:ghc and s1eshc}). The definition of these capacities relies on $S^1$-equivariant symplectic homology. In the commutative diagram below, we display the modules and maps which will play a role in this section, for a nondegenerate Liouville domain $X$. \begin{IEEEeqnarray}{c+x*} \plabel{eq:diagram for s1esh capacities} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{(\varepsilon,a]}{}(X) \ar[r, "\delta^a_0"] \ar[d, swap, "\iota^a"] & \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X) \ar[d, two heads, hook, "\alpha"] \ar[r, "\iota^{a,\varepsilon}"] & \homology{}{S^1}{}{S}{H}{a}{}(X) \\ \homology{}{S^1}{}{S}{H}{+}{}(X) \ar[ur, "\delta_0"] \ar[r, swap, "\delta"] & H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q) \end{tikzcd} \end{IEEEeqnarray} Here, $\iota^a$ and $\iota^{a, \varepsilon}$ are the maps induced by the action filtration, $\delta_0$ and $\delta$ are the maps from \cref{def:delta map} and $\alpha$ is the isomorphism from \cref{lem:iso from symplectic to singular}. We point out that every vertex in the above diagram has a $U$ map and every map in the diagram commutes with this $U$ map. Specifically, all the $S^1$-equivariant symplectic homologies have the $U$ map given as in \cref{def:U map} and $H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q) \cong \Q[u] \otimes H_\bullet(X, \partial X;\Q)$ has the map $U \coloneqq u^{-1} \otimes \id$. We will also make use of a version of diagram \eqref{eq:diagram for s1esh capacities} in the case where $X$ is star-shaped, namely diagram \eqref{eq:diagram for s1esh capacities case ss} below. In this case, the modules in the diagram admit gradings and every map is considered to be a map in a specific degree. By \cite[Proposition 3.1]{guttSymplecticCapacitiesPositive2018}, $\delta$ and $\delta_0$ are isomorphisms. \begin{IEEEeqnarray}{c+x*} \plabel{eq:diagram for s1esh capacities case ss} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{(\varepsilon,a]}{n - 1 + 2k}(X) \ar[r, "\delta^a_0"] \ar[d, swap, "\iota^a"] & \homology{}{S^1}{}{S}{H}{\varepsilon}{n - 2 + 2k}(X) \ar[d, two heads, hook, "\alpha"] \ar[r, "\iota^{a,\varepsilon}"] & \homology{}{S^1}{}{S}{H}{a}{n - 2 + 2k}(X) \\ \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(X) \ar[ur, two heads, hook, "\delta_0"] \ar[r, swap, two heads, hook, "\delta"] & H_{2k-2}(BS^1;\Q) \otimes H_{2n}(X, \partial X;\Q) \end{tikzcd} \end{IEEEeqnarray} \begin{definition}[{\cite[Definition 4.1]{guttSymplecticCapacitiesPositive2018}}] \label{def:gutt hutchings capacities} If $k \in \Z_{\geq 1}$ and $(X,\lambda)$ is a nondegenerate Liouville domain, the \textbf{Gutt--Hutchings capacities} of $X$, denoted $\cgh{k}(X)$, are defined as follows. Consider the map \begin{IEEEeqnarray*}{c+x*} \delta \circ U^{k-1} \circ \iota^a \colon \homology{}{S^1}{}{S}{H}{(\varepsilon,a]}{}(X) \longrightarrow H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q) \end{IEEEeqnarray*} from diagram \eqref{eq:diagram for s1esh capacities}. Then, we define \begin{IEEEeqnarray*}{c+x*} \cgh{k}(X) \coloneqq \inf \{ a > 0 \mid [\mathrm{pt}] \otimes [X] \in \img (\delta \circ U^{k-1} \circ \iota^a) \}. \end{IEEEeqnarray*} \end{definition} \begin{theorem}[{\cite[Theorem 1.24]{guttSymplecticCapacitiesPositive2018}}] \label{thm:properties of gutt-hutchings capacities} The functions $\cgh{k}$ of Liouville domains satisfy the following axioms, for all equidimensional Liouville domains $(X,\lambda_X)$ and $(Y,\lambda_Y)$: \begin{description} \item[(Monotonicity)] If $X \longrightarrow Y$ is a generalized Liouville embedding then $\cgh{k}(X) \leq \cgh{k}(Y)$. \item[(Conformality)] If $\alpha > 0$ then $\cgh{k}(X, \alpha \lambda_X) = \alpha \, \cgh{k}(X, \lambda_X)$. \item[(Nondecreasing)] $\cgh{1}(X) \leq \cgh{2}(X) \leq \cdots \leq +\infty$. \item[(Reeb orbits)] If $\cgh{k}(X) < + \infty$, then $\cgh{k}(X) = \mathcal{A}(\gamma)$ for some Reeb orbit $\gamma$ which is contractible in $X$. \end{description} \end{theorem} The following lemma provides an alternative definition of $\cgh{k}$, in the spirit of \cite{floerApplicationsSymplecticHomology1994}. \begin{lemma} \label{def:ck alternative} Let $(X,\lambda)$ be a nondegenerate Liouville domain such that $\pi_1(X) = 0$ and $c_1(TX)|_{\pi_2(X)} = 0$. Let $E \subset \C^n$ be a nondegenerate star-shaped domain and suppose that $\phi \colon E \longrightarrow X$ is a symplectic embedding. Consider the map \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{(\varepsilon,a]}{n - 1 + 2k}(X) \ar[r, "\iota^a"] & \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(X) \ar[r, "\phi_!"] & \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(E) \end{tikzcd} \end{IEEEeqnarray*} Then, $\cgh{k}(X) = \inf \{ a > 0 \mid \phi_! \circ \iota^a \text{ is nonzero} \}$. \end{lemma} \begin{proof} For every $a \in \R$ consider the following commutative diagram: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{(\varepsilon, a]}{n - 1 + 2k}(X) \ar[r, "\iota^a_X"] \ar[d, swap, "\phi_!^a"] & \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(X) \ar[r, "U ^{k-1}_X"] \ar[d, "\phi_!"] & \homology{}{S^1}{}{S}{H}{+}{n+1}(X) \ar[r, "\delta_X"] \ar[d, "\phi_!"] & H_0(BS^1) \tensorpr H_{2n}(X,\del X) \ar[d, hook, two heads, "\id \tensorpr \rho"] \\ \homology{}{S^1}{}{S}{H}{(\varepsilon, a]}{n - 1 + 2k}(E) \ar[r, swap, "\iota^a_E"] & \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(E) \ar[r, swap, hook, two heads, "U ^{k-1}_E"] & \homology{}{S^1}{}{S}{H}{+}{n+1}(E) \ar[r, swap, hook, two heads, "\delta_E"] & H_0(BS^1) \tensorpr H_{2n}(E,\del E) \end{tikzcd} \end{IEEEeqnarray*} By \cite[Proposition 3.1]{guttSymplecticCapacitiesPositive2018} and since $E$ is star-shaped, the maps $U_E$ and $\delta_E$ are isomorphisms. Since $\rho([X]) = [E]$, the map $\rho$ is an isomorphism. By definition, $\cgh{k}$ is the infimum over $a$ such that the top arrow is surjective. This condition is equivalent to $\phi_! \circ \iota^a_X$ being nonzero. \end{proof} The following computation will be useful to us in the proofs of \cref{lem:computation of cl,thm:my main theorem}. \begin{lemma}[{\cite[Lemma 1.19]{guttSymplecticCapacitiesPositive2018}}] \label{lem:cgh of nondisjoint union of cylinders} $\cgh{k}(N^{2n}(\delta)) = \delta \, (k + n - 1)$. \end{lemma} We now consider other capacities which can be defined using $S^1$-equivariant symplectic homology. \begin{definition}[{\cite[Section 2.5]{irieSymplecticHomologyFiberwise2021}}] \label{def:s1esh capacities} If $k \in \Z_{\geq 1}$ and $(X,\lambda)$ is a nondegenerate Liouville domain, the \textbf{$S^1$-equivariant symplectic homology capacities} of $X$, denoted $\csh{k}(X)$, are defined as follows. Consider the map \begin{IEEEeqnarray*}{c+x*} \iota^{a,\varepsilon} \circ \alpha^{-1} \colon H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q) \longrightarrow \homology{}{S^1}{}{S}{H}{a}{}(X) \end{IEEEeqnarray*} from diagram \eqref{eq:diagram for s1esh capacities}. Then, we define \begin{IEEEeqnarray*}{c+x*} \csh{k}(X) \coloneqq \inf \{ a > 0 \mid \iota^{a,\varepsilon} \circ \alpha^{-1}([\C P^{k-1}] \otimes [X]) = 0 \}. \end{IEEEeqnarray*} \end{definition} \begin{theorem} \label{prp:properties of s1esh capacities} The functions $\csh{k}$ of Liouville domains satisfy the following axioms, for all Liouville domains $(X,\lambda_X)$ and $(Y,\lambda_Y)$ of the same dimension: \begin{description} \item[(Monotonicity)] If $X \longrightarrow Y$ is a generalized Liouville embedding then $\csh{k}(X) \leq \csh{k}(Y)$. \item[(Conformality)] If $\mu > 0$ then $\csh{k}(X, \mu \lambda_X) = \mu \, \csh{k}(X, \lambda_X)$. \item[(Nondecreasing)] $\csh{1}(X) \leq \csh{2}(X) \leq \cdots \leq +\infty$. \end{description} \end{theorem} \begin{proof} We prove monotonicity. Consider the following commutative diagram: \begin{IEEEeqnarray}{c+x*} \plabel{eq:s1eshc diagram} \begin{tikzcd} H_\bullet(BS^1;\Q) \otimes H_\bullet(Y, \partial Y;\Q) \ar[d, swap, "\id \otimes \rho"] & \homology{}{S^1}{}{S}{H}{\varepsilon}{}(Y) \ar[l, swap, hook', two heads, "\alpha_Y"] \ar[r, "\iota^{a, \varepsilon}_Y"] \ar[d, "\phi_!^\varepsilon"] & \homology{}{S^1}{}{S}{H}{a}{}(Y) \ar[d, "\phi^a_!"] \\ H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q) & \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X) \ar[l, hook', two heads, "\alpha_X"] \ar[r, swap, "\iota^{a, \varepsilon}_X"] & \homology{}{S^1}{}{S}{H}{a}{}(X) \end{tikzcd} \end{IEEEeqnarray} If $\iota_Y^{a,\varepsilon} \circ \alpha_Y^{-1}([\C P^{k-1}] \otimes [Y]) = 0$, then \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\iota_X^{a,\varepsilon} \circ \alpha_X^{-1}([\C P^{k-1}] \otimes [X])} \\ \quad & = & \iota_X^{a,\varepsilon} \circ \alpha_X^{-1} \circ (\id \otimes \rho)([\C P^{k-1}] \otimes [Y]) & \quad [\text{since $\rho([Y]) = [X]$}] \\ & = & \phi_! \circ \iota_Y^{a,\varepsilon} \circ \alpha_{Y}^{-1} ([\C P^{k-1}] \otimes [Y]) & \quad [\text{by diagram \eqref{eq:s1eshc diagram}}] \\ & = & 0 & \quad [\text{by assumption}]. \end{IEEEeqnarray*} To prove conformality, choose $\varepsilon > 0$ such that $\varepsilon, \mu \varepsilon < \min \operatorname{Spec}(\partial X, \lambda|_{\partial X})$. Since the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q) \ar[d, equals] & \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X, \lambda) \ar[d, equals] \ar[l, swap, hook', two heads, "\alpha_{\lambda}"] \ar[r, "\iota^{a, \varepsilon}_\lambda"] & \homology{}{S^1}{}{S}{H}{a}{}(X, \lambda) \ar[d, equals] \\ H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q) & \homology{}{S^1}{}{S}{H}{\mu \varepsilon}{}(X, \mu \lambda) \ar[l, hook', two heads, "\alpha_{\mu \lambda}"] \ar[r, swap, "\iota^{\mu a, \mu \varepsilon}_{\mu \lambda}"] & \homology{}{S^1}{}{S}{H}{\mu a}{}(X, \mu \lambda) \end{tikzcd} \end{IEEEeqnarray*} commutes (by \cite[Proposition 3.1]{guttSymplecticCapacitiesPositive2018}), the result follows. To prove the nondecreasing property, note that if $\iota^{a,\varepsilon} \circ \alpha^{-1}([\C P ^{k}] \otimes [X]) = 0$, then \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\iota^{a,\varepsilon} \circ \alpha^{-1}([\C P ^{k-1}] \otimes [X])}\\ \quad & = & \iota^{a,\varepsilon} \circ \alpha^{-1} \circ U ([\C P ^{k}] \otimes [X]) & \quad [\text{since $U([\C P^k] \otimes [X]) = [\C P^{k-1}] \otimes [X]$}] \\ & = & U^{a} \circ \iota^{a,\varepsilon} \circ \alpha^{-1} ([\C P ^{k}] \otimes [X]) & \quad [\text{since $\iota^{a,\varepsilon}$ and $\alpha$ commute with $U$}] \\ & = & 0 & \quad [\text{by assumption}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{theorem} \label{thm:ghc and s1eshc} If $(X, \lambda)$ is a Liouville domain, then \begin{enumerate} \item \label{thm:comparison cgh csh 1} $\cgh{k}(X) \leq \csh{k}(X)$; \item \label{thm:comparison cgh csh 2} $\cgh{k}(X) = \csh{k}(X)$ provided that $X$ is star-shaped. \end{enumerate} \end{theorem} \begin{proof} By \cref{lem:can prove ineqs for ndg}, we may assume that $X$ is nondegenerate. Since \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\iota^{a,\varepsilon} \circ \alpha^{-1}([\C P ^{k-1}] \otimes [X]) = 0}\\ \quad & \Longleftrightarrow & \alpha^{-1}([\C P ^{k-1}] \otimes [X]) \in \ker \iota^{a,\varepsilon} & \quad [\text{by definition of kernel}] \\ \quad & \Longleftrightarrow & \alpha^{-1}([\C P ^{k-1}] \otimes [X]) \in \img \delta^a_0 & \quad [\text{since the top row of \eqref{eq:diagram for s1esh capacities} is exact}] \\ \quad & \Longleftrightarrow & [\C P ^{k-1}] \otimes [X] \in \img (\alpha \circ \delta^a_0) & \quad [\text{by definition of image}] \\ \quad & \Longleftrightarrow & [\C P ^{k-1}] \otimes [X] \in \img (\delta \circ \iota^a) & \quad [\text{since diagram \eqref{eq:diagram for s1esh capacities} commutes}] \\ \quad & \Longrightarrow & [\mathrm{pt}] \otimes [X] \in \img (U^{k-1} \circ \delta \circ \iota^a) & \quad [\text{since $U^{k-1}([\C P ^{k-1}] \otimes [X]) = [\mathrm{pt}] \otimes [X]$}] \\ \quad & \Longleftrightarrow & [\mathrm{pt}] \otimes [X] \in \img (\delta \circ U^{k-1} \circ \iota^a) & \quad [\text{since $\delta$ and $U$ commute}], \end{IEEEeqnarray*} we have that $\cgh{k}(X) \leq \csh{k}(X)$. If $X$ is a star-shaped domain, we can view the maps of the computation above as being the maps in diagram \eqref{eq:diagram for s1esh capacities case ss}, i.e. they are defined in a specific degree. In this case, $U^{k-1} \colon H_{2k-2}(BS^1) \otimes H_{2n}(X, \partial X) \longrightarrow H_{0}(BS^1) \otimes H_{2n}(X, \partial X)$ is an isomorphism, and therefore the implication in the previous computation is actually an equivalence. \end{proof} \begin{remark} The capacities $\cgh{k}$ and $\csh{k}$ are defined in terms of a certain homology class being in the kernel or in the image of a map with domain or target the $S^1$-equivariant symplectic homology. Other authors have constructed capacities in an analogous manner, for example Viterbo \cite[Definition 2.1]{viterboSymplecticTopologyGeometry1992} and \cite[Section 5.3]{viterboFunctorsComputationsFloer1999}, Schwarz \cite[Definition 2.6]{schwarzActionSpectrumClosed2000} and Ginzburg--Shon \cite[Section 3.1]{ginzburgFilteredSymplecticHomology2018}. \end{remark} \section{McDuff--Siegel capacities} We now define the McDuff--Siegel capacities. These will assist us in our goal of proving \cref{conj:the conjecture} (at least in particular cases) because they can be compared with the Lagrangian capacity (\cref{thm:lagrangian vs g tilde}) and with the Gutt--Hutchings capacities (\cref{prp:g tilde and cgh}). \begin{definition}[{\cite[Definition 3.3.1]{mcduffSymplecticCapacitiesUnperturbed2022}}] \label{def:g tilde} Let $(X,\lambda)$ be a nondegenerate Liouville domain. For $\ell, k \in \Z_{\geq 1}$, we define the \textbf{McDuff--Siegel capacities} of $X$, denoted $\tilde{\mathfrak{g}}^{\leq \ell}_k(X)$, as follows. Choose $x \in \itr X$ and $D$ a symplectic divisor at $x$. Then, \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq \ell}_k(X) \coloneqq \sup_{J \in \mathcal{J}(X,D)} \mathop{\inf\vphantom{\sup}}_{\Gamma_1, \ldots, \Gamma_p} \sum_{i=1}^{p} \mathcal{A}(\Gamma_i), \end{IEEEeqnarray*} where the infimum is over tuples of Reeb orbits $\Gamma_1, \ldots, \Gamma_p$ such that there exist integers $k_1, \ldots, k_p \geq 1$ with \begin{IEEEeqnarray}{c+x*} \phantomsection\label{eq:g tilde two definitions conditions} \sum_{i=1}^{p} \# \Gamma_i \leq \ell, \qquad \sum_{i=1}^{p} k_i \geq k, \qquad \bigproduct_{i=1}^{p} \mathcal{M}_X^J(\Gamma_i)\p{<}{}{\mathcal{T}^{(k_i)}x} \neq \varnothing. \end{IEEEeqnarray} \end{definition} The following theorem shows that the definition of $\tilde{\mathfrak{g}}^{\leq \ell}_k$ we give in \cref{def:g tilde} and the one given in \cite[Definition 3.3.1]{mcduffSymplecticCapacitiesUnperturbed2022} are equal. \begin{theorem}[{\cite[Remark 3.1.2]{mcduffSymplecticCapacitiesUnperturbed2022}}] \label{thm:g tilde two definitions} If $(X, \lambda)$ is a nondegenerate Liouville domain, $\ell, k \in \Z_{\geq 1}$, $x \in \itr X$ and $D$ is a symplectic divisor through $x$, then \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq \ell}_k(X) = \sup_{J \in \mathcal{J}(X,D)} \mathop{\inf\vphantom{\sup}}_{\Gamma} \mathcal{A}(\Gamma), \end{IEEEeqnarray*} where the infimum is taken over tuples of Reeb orbits $\Gamma = (\gamma_1, \ldots, \gamma_p)$ such that $p \leq \ell$ and $\overline{\mathcal{M}}^{J}_{X}(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x} \neq \varnothing$. \end{theorem} \begin{proof} $(\geq)$: Let $\Gamma_1, \ldots, \Gamma_p$ and $k_1, \ldots, k_p$ be as in \eqref{eq:g tilde two definitions conditions}. We wish to show that there exists a tuple of Reeb orbits $\Gamma$ such that \begin{IEEEeqnarray*}{c+x*} \# \Gamma \leq \ell, \qquad \mathcal{A}(\Gamma) \leq \sum_{i=1}^{p} \mathcal{A}(\Gamma_i), \qquad \overline{\mathcal{M}}_X^J(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x} \neq \varnothing. \end{IEEEeqnarray*} By \cref{rmk:compactifications with tangency}, the tuple $\Gamma = \Gamma_1 \cup \cdots \cup \Gamma_p$ is as desired. $(\leq)$: Let $\Gamma^+$ be a tuple of Reeb orbits such that $\# \Gamma^+ \leq \ell$ and $\overline{\mathcal{M}}^{J}_{X}(\Gamma^+)\p{<}{}{\mathcal{T}^{(k)}x} \neq \varnothing$. We wish to show that there exist tuples of Reeb orbits $\Gamma^-_1, \ldots, \Gamma^-_p$ and numbers $k_1, \ldots, k_p$ satisfying \eqref{eq:g tilde two definitions conditions} and \begin{IEEEeqnarray*}{c+x*} \sum_{i=1}^{p} \mathcal{A}(\Gamma_i) \leq \mathcal{A}(\Gamma). \end{IEEEeqnarray*} Choose $F = (F^1, \ldots, F^N) \in \overline{\mathcal{M}}^J_X(\Gamma^+)\p{<}{}{\mathcal{T}^{(k)}x}$ and let $C$ be the component of $F$ which inherits the constraint $\p{<}{}{\mathcal{T}^{(k)}x}$. We prove the result in the case where $C$ is nonconstant. In this case, $C \in \mathcal{M}^J_X(\Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x}$ for some tuple of Reeb orbits $\Gamma^-$. By \cref{lem:action energy for holomorphic}, $\mathcal{A}(\Gamma^-) \leq \mathcal{A}(\Gamma^+)$. We show that $\# \Gamma^- \leq \# \Gamma^+ \leq \ell$. Let $\mathbf{n}$ be the set of nodal points of $C$. Since the graph of $F$ is a tree, for every $\gamma \in \Gamma^+$ there exists a unique $f(\gamma) \in \Gamma^- \cup \mathbf{n}$ such that the subtree of $F$ emanating from $C$ at $f(\gamma)$ is positively asymptotic to $\gamma$. By the maximum principle (\cref{thm:maximum principle holomorphic}), $f \colon \Gamma^+ \longrightarrow \Gamma^- \cup \mathbf{n}$ is surjective, and therefore $\# \Gamma^- \leq \# \Gamma^+ \leq \ell$. We prove the result in the case where $C$ is constant. Let $C_1, \ldots, C_p$ be the nonconstant components near $C$ as in \cref{rmk:compactifications with tangency}. There exist tuples of Reeb orbits $\Gamma_1^-, \ldots, \Gamma_p^-$ and $k_1, \ldots, k_p \in \Z_{\geq 1}$ such that \begin{IEEEeqnarray*}{c+x*} \sum_{i=1}^{p} \mathcal{A}(\Gamma_i^-) \leq \mathcal{A}(\Gamma^+), \qquad \sum_{i=1}^{p} k_i \geq k, \qquad C_i \in \mathcal{M}^J_X(\Gamma_i^-)\p{<}{}{\mathcal{T}^{(k_i)}x} \neq \varnothing. \end{IEEEeqnarray*} By a reasoning similar to the previous case, $\sum_{i=1}^{p} \# \Gamma_i^- \leq \# \Gamma^+ \leq \ell$. \end{proof} \begin{remark} \phantomsection\label{cor:g tilde 1} If $(X, \lambda)$ is a nondegenerate Liouville domain, $k \in \Z_{\geq 1}$, $x \in \itr X$ and $D$ is a symplectic divisor through $x$, then \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq 1}_k(X) = \sup_{J \in \mathcal{J}(X,D)} \mathop{\inf\vphantom{\sup}}_{\gamma} \mathcal{A}(\gamma), \end{IEEEeqnarray*} where the infimum is over Reeb orbits $\gamma$ such that $\mathcal{M}^J_X(\gamma)\p{<}{}{\mathcal{T}^{(k)}x} \neq \varnothing$. \end{remark} \begin{theorem}[{\cite[Theorem 3.3.2]{mcduffSymplecticCapacitiesUnperturbed2022}}] \label{thm:properties of g tilde} The functions $\tilde{\mathfrak{g}}^{\leq \ell}_k$ are independent of the choices of $x$ and $D$ and satisfy the following properties, for all nondegenerate Liouville domains $(X,\lambda_X)$ and $(Y,\lambda_Y)$ of the same dimension: \begin{description} \item[(Monotonicity)] If $X \longrightarrow Y$ is a generalized Liouville embedding then $\tilde{\mathfrak{g}}^{\leq \ell}_k(X) \leq \tilde{\mathfrak{g}}^{\leq \ell}_k(Y)$. \item[(Conformality)] If $\alpha > 0$ then $\tilde{\mathfrak{g}}^{\leq \ell}_k(X, \alpha \lambda_X) = \alpha \, \tilde{\mathfrak{g}}^{\leq \ell}_k(X, \lambda_X)$. \item[(Nondecreasing)] $\tilde{\mathfrak{g}}^{\leq \ell}_1(X) \leq \tilde{\mathfrak{g}}^{\leq \ell}_{2}(X) \leq \cdots \leq +\infty$. \end{description} \end{theorem} We now state a result comparing the McDuff--Siegel capacities and the Gutt--Hutchings capacities. We will later apply this result to show that $c_L(X_{\Omega}) = \delta_\Omega$ for every $4$-dimensional convex toric domain $X_{\Omega}$ (\cref{lem:computation of cl}). \begin{proposition}[{\cite[Proposition 5.6.1]{mcduffSymplecticCapacitiesUnperturbed2022}}] \label{prp:g tilde and cgh} If $X_{\Omega}$ is a $4$-dimensional convex toric domain then \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq 1}_k(X_\Omega) = \cgh{k}(X_\Omega). \end{IEEEeqnarray*} \end{proposition} Finally, we state two stabilization results which we will use in \cref{sec:augmentation map of an ellipsoid}. \begin{lemma}[{\cite[Lemma 3.6.2]{mcduffSymplecticCapacitiesUnperturbed2022}}] \label{lem:stabilization 1} Let $(X, \lambda)$ be a Liouville domain. For any $c, \varepsilon \in \R_{> 0}$, there is a subdomain with smooth boundary $\tilde{X} \subset X \times B^2(c)$ such that: \begin{enumerate} \item The Liouville vector field $Z_{\tilde{X}} = Z_{X} + Z_{B^2(c)}$ is outwardly transverse along $\partial \tilde{X}$. \item $X \times \{0\} \subset \tilde{X}$ and the Reeb vector field of $\partial \tilde{X}$ is tangent to $\partial X \times \{0\}$. \item Any Reeb orbit of the contact form $(\lambda + \lambda_0)|_{\partial \tilde{X}}$ (where $\lambda_0 = 1/2 (x \edv y - y \edv x)$) with action less than $c - \varepsilon$ is entirely contained in $\partial X \times \{0\}$ and has normal Conley--Zehnder index equal to $1$. \end{enumerate} \end{lemma} \begin{lemma}[{\cite[Lemma 3.6.3]{mcduffSymplecticCapacitiesUnperturbed2022}}] \label{lem:stabilization 2} Let $X$ be a Liouville domain, and let $\tilde{X}$ be a smoothing of $X \times B^2(c)$ as in \cref{lem:stabilization 1}. \begin{enumerate} \item Let $J \in \mathcal{J}(\tilde{X})$ be a cylindrical almost complex structure on the completion of $\tilde{X}$ for which $\hat{X} \times \{0\}$ is $J$-holomorphic. Let $C$ be an asymptotically cylindrical $J$-holomorphic curve in $\hat{X}$, all of whose asymptotic Reeb orbits are nondegenerate and lie in $\partial X \times \{0\}$ with normal Conley--Zehnder index $1$. Then $C$ is either disjoint from the slice $\hat{X} \times \{0\}$ or entirely contained in it. \item Let $J \in \mathcal{J}(\partial \tilde{X})$ be a cylindrical almost complex structure on the symplectization of $\partial \tilde{X}$ for which $\R \times \partial X \times \{0\}$ is $J$-holomorphic. Let $C$ be an asymptotically cylindrical $J$-holomorphic curve in $\R \times \partial \tilde{X}$, all of whose asymptotic Reeb orbits are nondegenerate and lie in $\partial X \times \{0\}$ with normal Conley--Zehnder index $1$. Then $C$ is either disjoint from the slice $\R \times \partial X \times \{0\}$ or entirely contained in it. Moreover, only the latter is possible if $C$ has at least one negative puncture. \end{enumerate} \end{lemma} \section{Computations not requiring contact homology} We now state and prove one of our main theorems, which is going to be a key step in proving that $c_L(X_{\Omega}) = \delta_{\Omega}$. The proof uses techniques similar to those used in the proof of \cite[Theorem 1.1]{cieliebakPuncturedHolomorphicCurves2018}. \begin{theorem} \label{thm:lagrangian vs g tilde} If $(X, \lambda)$ is a Liouville domain then \begin{IEEEeqnarray*}{c+x*} c_L(X) \leq \inf_k^{} \frac{\tilde{\mathfrak{g}}_k^{\leq 1}(X)}{k}. \end{IEEEeqnarray*} \end{theorem} \begin{proof} By \cref{lem:can prove ineqs for ndg}, we may assume that $X$ is nondegenerate. Let $k \in \Z_{\geq 1}$ and $L \subset \itr X$ be an embedded Lagrangian torus. We wish to show that for every $\varepsilon > 0$ there exists $\sigma \in \pi_2(X,L)$ such that $0 < \omega(\sigma) \leq \tilde{\mathfrak{g}}_k^{\leq 1}(X) / k + \varepsilon$. Define \begin{IEEEeqnarray*}{rCls+x*} a & \coloneqq & \tilde{\mathfrak{g}}_k^{\leq 1}(X), \\ K_1 & \coloneqq & \ln(2), \\ K_2 & \coloneqq & \ln(1 + a / \varepsilon k), \\ K & \coloneqq & \max \{K_1, K_2\}, \\ \delta & \coloneqq & e^{-K}, \\ \ell_0 & \coloneqq & a / \delta. \end{IEEEeqnarray*} By \cref{lem:geodesics lemma CM abs} and the Lagrangian neighbourhood theorem, there exists a Riemannian metric $g$ on $L$ and a symplectic embedding $\phi \colon D^*L \longrightarrow X$ such that $\phi(D^*L) \subset \itr X$, $\phi|_L = \id_L$ and such that if $\gamma$ is a closed geodesic in $L$ with length $\ell(\gamma) \leq \ell_0$ then $\gamma$ is noncontractible, nondegenerate and satisfies $0 \leq \morse(\gamma) \leq n - 1$. Let $D^*_{\delta} L$ be the codisk bundle of radius $\delta$. Notice that $\delta$ has been chosen in such a way that the symplectic embedding $\phi \colon D^* L \longrightarrow X$ can be seen as an embedding like that of \cref{lem:energy wrt different forms}. We will now use the notation of \cref{sec:sft compactness}. Define symplectic cobordisms \begin{IEEEeqnarray*}{rCl} (X^+, \omega^+) & \coloneqq & (X \setminus \phi(D^*_{\delta} L), \omega), \\ (X^-, \omega^-) & \coloneqq & (D^*_{\delta} L, \edv \lambda_{T^* L}), \end{IEEEeqnarray*} which have the common contact boundary \begin{IEEEeqnarray*}{c+x*} (M, \alpha) \coloneqq (S^*_{\delta} L, \lambda_{T^* L}). \end{IEEEeqnarray*} Here, it is implicit that we are considering the restriction of the form $\lambda_{T^*L}$ on $T^* L$ to $D^*_{\delta} L$ or $S^*_{\delta} L$. Then, $(X,\omega) = (X^-, \omega^-) \circledcirc (X^+, \omega^+)$. Recall that there are piecewise smooth $2$-forms $\tilde{\omega} \in \Omega^2(\hat{X})$ and $\tilde{\omega}^{\pm} \in \Omega^2(\hat{X}^{\pm})$ which are given as in \cref{def:energy of a asy cylindrical holomorphic curve}. Choose $x \in \itr \phi(D^*_{\delta} L)$ and let $D \subset \phi(D^*_{\delta} L)$ be a symplectic divisor through $x$. Choose also generic almost complex structures \begin{IEEEeqnarray*}{rCls+x*} J_M & \in & \mathcal{J}(M), \\ J^+ & \in & \mathcal{J}_{J_M}(X^+), \\ J^- & \in & \mathcal{J}^{J_M}(X^-, D), \end{IEEEeqnarray*} and denote by $J_{\partial X} \in \mathcal{J}(\partial X)$ the ``restriction'' of $J^+$ to $\R \times \partial X$. Let $(J_t)_{t} \subset \mathcal{J}(X, D)$ be the corresponding neck stretching family of almost complex structures. Since $a = \tilde{\mathfrak{g}}_k^{\leq 1}(X)$ and by \cref{cor:g tilde 1}, for every $t$ there exists a Reeb orbit $\gamma_t$ in $\partial X = \partial^+ X^+$ and a $J_t$-holomorphic curve $u_t \in \mathcal{M}_X^{J_t}(\gamma_t)\p{<}{}{\mathcal{T}^{(k)}x}$ such that $\mathcal{A}(\gamma_t) \leq a$. Since $\partial X$ has nondegenerate Reeb orbits, there are only finitely many Reeb orbits in $\partial X$ with action less than $a$. Therefore, possibly after passing to a subsequence, we may assume that $\gamma_t \eqqcolon \gamma_0$ is independent of $t$. The curves $u_t$ satisfy the energy bound $E_{\tilde{\omega}}(u_t) \leq a$. By the SFT compactness theorem, the sequence $(u_t)_{t}$ converges to a holomorphic building \begin{IEEEeqnarray*}{c+x*} F = (F^1, \ldots, F^{L_0-1}, F^{L_0}, F^{{L_0}+1}, \ldots, F^N) \in \overline{\mathcal{M}}_X^{(J_t)_{t}}(\gamma_0)\p{<}{}{\mathcal{T}^{(k)}x}, \end{IEEEeqnarray*} where \begin{IEEEeqnarray*}{rCls+x*} (X^{\nu}, \omega^\nu, \tilde{\omega}^{\nu}, J^{\nu}) & \coloneqq & \begin{cases} (T^* L , \edv \lambda_{T^* L} , \tilde{\omega}^- , J^-) & \text{if } \nu = 1 , \\ (\R \times M , \edv(e^r \alpha) , \edv \alpha , J_M) & \text{if } \nu = 2 , \ldots, {L_0} - 1, \\ (\hat{X} \setminus L , \hat{\omega} , \tilde{\omega}^+ , J^+) & \text{if } \nu = {L_0} , \\ (\R \times \partial X, \edv (e^r \lambda|_{\partial X}) , \edv \lambda|_{\partial X} , J_{\partial X}) & \text{if } \nu = {L_0} + 1, \ldots, N , \\ \end{cases} \\ (X^*, \omega^*, \tilde{\omega}^*, J^*) & \coloneqq & \bigcoproduct_{\nu = 1}^N (X^{\nu}, \omega^\nu, \tilde{\omega}^{\nu}, J^{\nu}), \end{IEEEeqnarray*} and $F^{\nu}$ is a $J^\nu$-holomorphic curve in $X^{\nu}$ with asymptotic Reeb orbits $\Gamma^{\pm}_{\nu}$ (see \cref{fig:holomorphic building in the proof}). The holomorphic building $F$ satisfies the energy bound \begin{IEEEeqnarray}{c+x*} \plabel{eq:energy of holo building in proof} E_{\tilde{\omega}^*}(F) \coloneqq \sum_{\nu = 1}^{N} E_{\tilde{\omega}^{\nu}}(F^{\nu}) \leq a. \end{IEEEeqnarray} \begin{figure}[ht] \centering \begin{tikzpicture} [ scale = 0.5, help/.style = {very thin, draw = black!50}, curve/.style = {thick} ] \tikzmath{ \rx = 0.6; \ry = 0.25; } \node[anchor=west] at (20, 13.5) {$F^3 \subset X^3 = X^+ = \hat{X} \setminus L$}; \draw (0,6) rectangle (19,11); \node[anchor=west] at (20, 8.5) {$F^2 \subset X^2 = \R \times M$}; \draw (0,11) rectangle (19,16); \node[anchor=west] at (20, 3) {$F^1 \subset X^1 = X^- = T^* L$}; \draw (0,3) -- (0,6) -- (19,6) -- (19,3); \draw (0,3) .. controls (0,-1) and (19,-1) .. (19,3); \coordinate (G0) at ( 2,16); \coordinate (G1) at ( 2, 6); \coordinate (G2) at ( 8, 6); \coordinate (G3) at (11, 6); \coordinate (F1) at ( 2,11); \coordinate (F2) at ( 8,11); \coordinate (F3) at (11,11); \coordinate (F4) at ( 5,11); \coordinate (F5) at (14,11); \coordinate (F6) at (17,11); \coordinate (L) at (-\rx,0); \coordinate (R) at (+\rx,0); \coordinate (G0L) at ($ (G0) + (L) $); \coordinate (G1L) at ($ (G1) + (L) $); \coordinate (G2L) at ($ (G2) + (L) $); \coordinate (G3L) at ($ (G3) + (L) $); \coordinate (F1L) at ($ (F1) + (L) $); \coordinate (F2L) at ($ (F2) + (L) $); \coordinate (F3L) at ($ (F3) + (L) $); \coordinate (F4L) at ($ (F4) + (L) $); \coordinate (F5L) at ($ (F5) + (L) $); \coordinate (F6L) at ($ (F6) + (L) $); \coordinate (G0R) at ($ (G0) + (R) $); \coordinate (G1R) at ($ (G1) + (R) $); \coordinate (G2R) at ($ (G2) + (R) $); \coordinate (G3R) at ($ (G3) + (R) $); \coordinate (F1R) at ($ (F1) + (R) $); \coordinate (F2R) at ($ (F2) + (R) $); \coordinate (F3R) at ($ (F3) + (R) $); \coordinate (F4R) at ($ (F4) + (R) $); \coordinate (F5R) at ($ (F5) + (R) $); \coordinate (F6R) at ($ (F6) + (R) $); \coordinate (P) at (9,3); \coordinate (D) at (3,1); \draw[curve] (G0) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_0$}; \draw[curve] (G1) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_1$}; \draw[curve] (G2) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_2$}; \draw[curve] (G3) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_3$}; \draw[curve] (F1) ellipse [x radius = \rx, y radius = \ry]; \draw[curve] (F2) ellipse [x radius = \rx, y radius = \ry]; \draw[curve] (F3) ellipse [x radius = \rx, y radius = \ry]; \draw[curve] (F4) ellipse [x radius = \rx, y radius = \ry]; \draw[curve] (F5) ellipse [x radius = \rx, y radius = \ry]; \draw[curve] (F6) ellipse [x radius = \rx, y radius = \ry]; ll (P) circle (2pt) node[anchor = north west] {$x$}; \draw[curve] ($ (P) - (D) $) -- ( $ (P) + (D) $ ) node[anchor = west] {$D$}; \draw[curve] (G1L) -- (G0L); \draw[curve] (F1R) -- (G0R); \draw[curve] (G2L) -- (F2L); \draw[curve] (G2R) -- (F2R); \draw[curve] (G3L) -- (F3L); \draw[curve] (G3R) -- (F3R); \draw[curve] (F4L) .. controls ($ (F4L) + (0,2) $) and ($ (F4R) + (0,2) $) .. (F4R); \draw[curve] (F2L) .. controls ($ (F2L) + (0,2) $) and ($ (F2R) + (0,2) $) .. (F2R); \draw[curve] (F6L) .. controls ($ (F6L) + (0,2) $) and ($ (F6R) + (0,2) $) .. (F6R); \draw[curve] (F3R) .. controls ($ (F3R) + (0,1) $) and ($ (F5L) + (0,1) $) .. (F5L); \draw[curve] (F5R) .. controls ($ (F5R) - (0,1) $) and ($ (F6L) - (0,1) $) .. (F6L); \draw[curve] (F3L) .. controls ($ (F3L) + (0,2.5) $) and ($ (F5R) + (0,2.5) $) .. (F5R); \draw[curve] (F5L) .. controls ($ (F5L) - (0,2.5) $) and ($ (F6R) - (0,2.5) $) .. (F6R); \draw[curve] (F1R) .. controls ($ (F1R) - (0,1) $) and ($ (F4L) - (0,1) $) .. (F4L); \draw[curve] (G1R) .. controls ($ (G1R) + (0,2) $) and ($ (F4R) - (0,2) $) .. (F4R); \draw[curve] (G1R) .. controls ($ (G1R) - (0,1.5) $) and ($ (G2L) - (0,1.5) $) .. (G2L); \draw[curve] (G2R) .. controls ($ (G2R) - (0,1) $) and ($ (G3L) - (0,1) $) .. (G3L); \draw[curve] (G1L) .. controls ($ (G1L) - (0,2) $) and ($ (P) - (D) $) .. (P); \draw[curve] (G3R) .. controls ($ (G3R) - (0,1) $) and ($ (P) + (D) $) .. (P); \node at ($ (F2) + (0,2) $) {$D_2$}; \node at ($ (F6) + (0,2) $) {$D_3$}; \end{tikzpicture} \caption{The holomorphic building $F = (F^1, \ldots, F^N)$ in the case ${L_0} = N = p = 3$} \label{fig:holomorphic building in the proof} \end{figure} Moreover, by \cref{lem:no nodes}, $F$ has no nodes. Let $C$ be the component of $F$ in $X^-$ which carries the tangency constraint $\p{<}{}{\mathcal{T}^{(k)}x}$. Then, $C$ is positively asymptotic to Reeb orbits $(\gamma_1, \ldots, \gamma_p)$ of $M$. For $\mu = 1, \ldots, p$, let $C_\mu$ be the subtree emanating from $C$ at $\gamma_\mu$. For exactly one $\mu = 1, \ldots, p$, the top level of the subtree $C_\mu$ is positively asymptotic to $\gamma_0$, and we may assume without loss of generality that this is true for $\mu = 1$. By the maximum principle, $C_\mu$ has a component in $X^{L_0} = \hat{X} \setminus L$ for every $\mu = 2, \ldots, p$. Also by the maximum principle, there do not exist components of $C_\mu$ in $X^{L_0} = \hat{X} \setminus L$ which intersect $\R_{\geq 0} \times \partial X$ or components of $C_\mu$ in the top symplectization layers $X^{{L_0}+1}, \ldots, X^N$, for every $\mu = 2, \ldots, p$. We claim that if $\gamma$ is a Reeb orbit in $M$ which is an asymptote of $F^\nu$ for some $\nu = 2,\ldots,{L_0}-1$, then $\mathcal{A}(\gamma) \leq a$. To see this, notice that \begin{IEEEeqnarray*}{rCls+x*} a & \geq & E_{\tilde{\omega}^*}(F) & \quad [\text{by Equation \eqref{eq:energy of holo building in proof}}] \\ & \geq & E_{\tilde{\omega}^N}(F^N) & \quad [\text{by monotonicity of $E$}] \\ & \geq & (e^K - 1) \mathcal{A}(\Gamma^-_N) & \quad [\text{by \cref{lem:energy wrt different forms}}] \\ & \geq & \mathcal{A}(\Gamma^-_N) & \quad [\text{since $K \geq K_1$}] \\ & \geq & \mathcal{A}(\Gamma^-_\nu) & \quad [\text{by \cref{lem:action energy for holomorphic}}] \end{IEEEeqnarray*} for every $\nu = 2, \ldots, {L_0}-1$. Every such $\gamma$ has a corresponding geodesic in $L$ (which by abuse of notation we denote also by $\gamma$) such that $\ell(\gamma) = \mathcal{A}(\gamma)/\delta \leq a / \delta = \ell_0$. Hence, by our choice of Riemannian metric, the geodesic $\gamma$ is noncontractible, nondegenerate and such that $\morse(\gamma) \leq n - 1$. Therefore, the Reeb orbit $\gamma$ is noncontractible, nondegenerate and such that $\conleyzehnder(\gamma) \leq n - 1$. We claim that if $D$ is a component of $C_\mu$ for some $\mu = 2,\ldots,p$ and $D$ is a plane, then $D$ is in $X^{L_0} = \hat{X} \setminus L$. Assume by contradiction otherwise. Notice that since $D$ is a plane, $D$ is asymptotic to a unique Reeb orbit $\gamma$ in $M = S^*_{\delta} L$ with corresponding noncontractible geodesic $\gamma$ in $L$. We will derive a contradiction by defining a filling disk for $\gamma$. If $D$ is in a symplectization layer $\R \times S^*_\delta L$, then the map $\pi \circ D$, where $\pi \colon \R \times S^*_{\delta} L \longrightarrow L$ is the projection, is a filling disk for the geodesic $\gamma$. If $D$ is in the bottom level, i.e. $X^1 = T^*L$, then the map $\pi \circ D$, where $\pi \colon T^*L \longrightarrow L$ is the projection, is also a filling disk. This proves the claim. So, summarizing our previous results, we know that for every $\mu = 2,\ldots,p$ there is a holomorphic plane $D_\mu$ in $X^{L_0} \setminus (\R_{\geq 0} \times \partial X) = X \setminus L$. For each plane $D_\mu$ there is a corresponding disk in $X$ with boundary on $L$, which we denote also by $D_\mu$. It is enough to show that $E_{\omega}(D_{\mu_0}) \leq a/k + \varepsilon$ for some $\mu_0 = 2,\ldots,p$. By \cref{lem:punctures and tangency}, $p \geq k + 1 \geq 2$. By definition of average, there exists $\mu_0 = 2,\ldots,p$ such that \begin{IEEEeqnarray*}{rCls+x*} E_{\omega}(D_{\mu_0}) & \leq & \frac{1}{p-1} \sum_{\mu=2}^{p} E_{\omega}(D_{\mu}) & \quad [\text{by definition of average}] \\ & = & \frac{E_{\omega}(D_2 \cup \cdots \cup D_p)}{p-1} & \quad [\text{since energy is additive}] \\ & \leq & \frac{e^K}{e^K - 1} \frac{E_{\tilde{\omega}}(D_2 \cup \cdots \cup D_p)}{p-1} & \quad [\text{by \cref{lem:energy wrt different forms}}] \\ & \leq & \frac{e^K}{e^K - 1} \frac{a}{p-1} & \quad [\text{by Equation \eqref{eq:energy of holo building in proof}}] \\ & \leq & \frac{e^K}{e^K - 1} \frac{a}{k} & \quad [\text{since $p \geq k + 1$}] \\ & \leq & \frac{a}{k} + \varepsilon & \quad [\text{since $K \geq K_2$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{theorem} \label{lem:computation of cl} If $X_{\Omega}$ is a $4$-dimensional convex toric domain then \begin{IEEEeqnarray*}{c+x*} c_L(X_{\Omega}) = \delta_\Omega. \end{IEEEeqnarray*} \end{theorem} \begin{proof} For every $k \in \Z_{\geq 1}$, \begin{IEEEeqnarray*}{rCls+x*} \delta_\Omega & \leq & c_P(X_{\Omega}) & \quad [\text{by \cref{lem:c square geq delta}}] \\ & \leq & c_L(X_{\Omega}) & \quad [\text{by \cref{lem:c square leq c lag}}] \\ & \leq & \frac{\tilde{\mathfrak{g}}^{\leq 1}_{k}(X_{\Omega})}{k} & \quad [\text{by \cref{thm:lagrangian vs g tilde}}] \\ & = & \frac{\cgh{k}(X_{\Omega})}{k} & \quad [\text{by \cref{prp:g tilde and cgh}}] \\ & \leq & \frac{\cgh{k}(N(\delta_\Omega))}{k} & \quad [\text{$X_{\Omega}$ is convex, hence $X_{\Omega} \subset N(\delta_\Omega)$}] \\ & = & \frac{\delta_\Omega(k+1)}{k} & \quad [\text{by \cref{lem:cgh of nondisjoint union of cylinders}}]. \end{IEEEeqnarray*} The result follows by taking the infimum over $k$. \end{proof} The proof of \cref{lem:computation of cl} suggests the following conjecture. Notice that \cref{thm:main theorem} implies \cref{conj:the conjecture}. \begin{conjecture} \label{thm:main theorem} If $X$ is a Liouville domain, $\pi_1(X) = 0$ and $c_1(TX)|_{\pi_2(X)} = 0$, then \begin{IEEEeqnarray*}{c+x*} c_L(X,\lambda) \leq \inf_k \frac{\cgh{k}(X,\lambda)}{k}. \end{IEEEeqnarray*} \end{conjecture} \begin{proof}[Proof attempt] By \cref{lem:can prove ineqs for ndg}, we may assume that $X$ is nondegenerate. Let $k \in \Z_{\geq 1}$ and $L \subset \itr X$ be an embedded Lagrangian torus. Let also $a > \cgh{k}(X)$. We wish to show that for every $\varepsilon > 0$ there exists $\sigma \in \pi_2(X,L)$ such that $0 < \omega(\sigma) \leq a / k + \varepsilon$. Start by replicating word by word the proof of \cref{thm:lagrangian vs g tilde} until the point where we choose $x \in \phi(D^*_{\delta} L)$. Instead of choosing $x$, choose a nondegenerate star-shaped domain $E \subset \C^n$ and an exact symplectic embedding $\varphi \colon E \longrightarrow X$ such that $\varphi(E) \subset \itr \phi(D^*_{\delta} L)$. Since $a > \cgh{k}(X)$ and by \cref{def:ck alternative}, the map \begin{IEEEeqnarray}{c+x*} \plabel{eq:nonzero map in proof of cl leq cgh} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{(\varepsilon,a]}{n - 1 + 2k}(X) \ar[r, "\iota^a"] & \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(X) \ar[r, "\varphi_!"] & \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(E) \end{tikzcd} \end{IEEEeqnarray} is nonzero. Choose Hamiltonians \begin{IEEEeqnarray*}{rClCrClCs} H^+ \colon S^1 \times S^{2N+1} \times \hat{X} & \longrightarrow & \R, & \quad & H^+ & \in & \mathcal{H}(X,N), & \quad & (see \cref{def:hamiltonians}), \\ H^- \colon S^1 \times S^{2N+1} \times \hat{X} & \longrightarrow & \R, & \quad & H^- & \in & \mathcal{H}(X,E,N), & \quad & (see \cref{def:stair hamiltonians}), \\ H \colon \R \times S^1 \times S^{2N+1} \times \hat{X} & \longrightarrow & \R, & \quad & H & \in & \mathcal{H}(H^+, H^-), & \quad & (see \cref{def:homotopy stair to admissible hamiltonian}). \end{IEEEeqnarray*} Choose also an almost complex structure \begin{IEEEeqnarray*}{rClCrClCs} J \colon S^1 \times S^{2N+1} \times \hat{X} & \longrightarrow & \End(T \hat{X}), & \quad & J & \in & \mathcal{J}(X, E, N), & \quad & (see \cref{def:stair acs}). \end{IEEEeqnarray*} The almost complex structure $J$ defines a neck stretching family of almost complex structures \begin{IEEEeqnarray*}{rClCrClCs} J_m \colon S^1 \times S^{2N+1} \times \hat{X} & \longrightarrow & \End(T \hat{X}), & \quad & J_m & \in & \mathcal{J}(X, E, N), \end{IEEEeqnarray*} for $m \in \Z_{\geq 1}$. Since the map \eqref{eq:nonzero map in proof of cl leq cgh} is nonzero and by definition of the Viterbo transfer map, if $N, H^\pm, H$ are chosen big enough (in the sense of the partial orders defined in \cref{sec:Floer homology,sec:viterbo transfer map of liouville embedding}) then for every $m$ there exist $(z^{\pm}_m, \gamma^{\pm}_m) \in \hat{\mathcal{P}}(H^{\pm})$ and a Floer trajectory $(w_m, u_m)$ with respect to $H, J_m$ from $(z^-_m, \gamma^-_m)$ to $(z^+_m, \gamma^+_m)$, such that \begin{enumerate} \item $\img \gamma^+_m$ is near $\partial X$ and $\mathcal{A}_{H^+}(z^+_m, \gamma^+_m) \leq a$; \item $\img \gamma^-_m$ is near \parbox{\widthof{$\partial X$}}{$\partial E$} and $\ind (z^-_m, \gamma^-_m) \geq n - 1 + 2k$. \end{enumerate} By \cref{lem:action energy for floer trajectories}, we have the energy bound $E(w_m, u_m) \leq a$. Possibly after passing to a subsequence, we may assume that $(z^{\pm}_m, \gamma^{\pm}_m)$ converges to $(z_0^{\pm}, \gamma^{\pm}_0) \in \hat{\mathcal{P}}(H^{\pm})$. Now we come to the first challenge of the proof. We would like to use an adaptation of the SFT compactness theorem to take the limit of the sequence $(w_m, u_m)_m$. We will assume that such a theorem can be proven, and that we get a resulting limit $F = (F^1, \ldots, F^N)$ as in the proof of \cref{thm:lagrangian vs g tilde}, but where each $F^{\nu} = (w^\nu, u^\nu) \colon \dot{\Sigma}^\nu \longrightarrow S^{2 N + 1} \times X^{\nu}$ is a solution of the parametrized Floer equation (\cref{def:floer trajectory abstract}). Let $C$ be the component of $F$ in $X^-$ which is negatively asymptotic to $(z_0^-, \gamma_0^-)$. Notice that near $X \setminus \phi(D^*_{\delta} L)$, the Hamiltonian $H$ is independent of $\hat{X}$. Therefore, in the intermediate symplectization levels (i.e. for $\nu = 2,\ldots,L-1$) the map $u^{\nu} \colon \dot{\Sigma}^{\nu} \longrightarrow X^{\nu}$ is $J^{\nu}_{w^{\nu}}$-holomorphic, where $J^{\nu}_{w^{\nu}} \colon \dot{\Sigma}^{\nu} \times X^{\nu} \longrightarrow \End(T X^{\nu})$ is a domain dependent almost complex structure obtained from composing an almost complex structure $J^{\nu} \colon \dot{\Sigma}^{\nu} \times S^{2 N + 1} \times X^{\nu} \longrightarrow \End(T X^{\nu})$ with $w^\nu$. Hence, as in the proof of \cref{thm:lagrangian vs g tilde}, the component $C$ has $p$ positive punctures asymptotic to Reeb orbits $(\gamma_1, \ldots, \gamma_p)$ and for every $\mu = 2, \ldots, p$ there is a disk $D_{\mu}$ in $X$ with boundary on $L$. At this point, we need to show that $p \geq k + 1$, which brings us to the main difficulty in the proof. In the proof of \cref{thm:lagrangian vs g tilde}, we chose a generic almost complex structure so that $C$ would be regular. Then, the index formula for $C$ implied that $p \geq k + 1$ (see \cref{thm:transversality with tangency,lem:punctures and tangency simple,lem:punctures and tangency}). In line with this reasoning, we wish to show that $p \geq k + 1$ using the following computation: \begin{IEEEeqnarray*}{rCls+x*} 0 & \leq & \operatorname{ind}(C) \\ & = & (n - 3)(1 - p) + \sum_{\mu=1}^{p} \conleyzehnder(\gamma_\mu) - \ind(z^-_0, \gamma^-_0) \\ & \leq & (n - 3)(1 - p) + \sum_{\mu=1}^{p} (n - 1) - (n - 1 + 2k) \\ & = & 2 (p - k - 1), \end{IEEEeqnarray*} where in the first line we would need to use a transversality theorem which applies to $C$, and in the second line we would need to use a Fredholm theory theorem which gives us the desired index formula for $C$. We point out a few difficulties that arise with this approach. \begin{enumerate} \item Because of the domain dependence of the almost complex structures and Hamiltonians, it is not clear how to choose the initial almost complex structure $J \colon S^1 \times S^{2N+1} \times \hat{X} \longrightarrow \End(T \hat{X})$ in such a way that the resulting almost complex structure $J^1 \colon \dot{\Sigma}^1 \times S^{2N+1} \times X^1 \longrightarrow \End(T X^1)$ is regular. \item We are working under the assumption that the analogue of the SFT compactness theorem which applies to solutions of the parametrized Floer equation produces a building $F$ whose symplectization levels are asymptotic to Reeb orbits. More specifically, this means that the gradient flow line in $S^{2N+1}$ corresponding to $C$ is not asymptotic at the punctures to critical points of $\tilde{f}_N$. Therefore, in this case the linearized operator corresponding to the gradient flow line equation on $S^{2N+1}$ will not be Fredholm. \item However, the assumption in the previous item could be wrong. Another reasonable possibility is that the analogue of the SFT compactness theorem which applies to solutions of the parametrized Floer equation produces a building $F$ whose bottom component is positively asymptotic to pairs $(z_\mu, \gamma_\mu)$, where $z_{\mu} \in S^{2N+1}$ is a critical point of $\tilde{f}_N$ and $\gamma_\mu$ is a Reeb orbit. In this case, one would expect that the relevant operator is Fredholm. However, the Morse index of the critical points $z_{\mu}$ would appear in the index formula, and the previous computation would no longer imply that $p \geq k + 1$. \end{enumerate} Finally, we point out that if $p \geq k + 1$, then by the same computation as in the proof of \cref{thm:lagrangian vs g tilde}, we have the desired energy bound \begin{IEEEeqnarray*}{c+x*} E_{\omega}(D_{\mu_0}) \leq \frac{a}{k} + \varepsilon \end{IEEEeqnarray*} for some $\mu_0 = 2, \ldots, p$. This finishes the proof attempt. \end{proof} \chapter{Contact homology} \label{chp:contact homology} \section{Assumptions on virtual perturbation scheme} \label{sec:assumptions of virtual perturbation scheme} In this chapter, we wish to use techniques from contact homology to prove \cref{conj:the conjecture}. Consider the proof of \cref{lem:computation of cl}: to prove the inequality $c_L(X_{\Omega}) \leq \delta_\Omega$, we needed to use the fact that $\tilde{\mathfrak{g}}^{\leq 1}_k(X_{\Omega}) \leq \cgh{k}(X_{\Omega})$ (which is true if $X_{\Omega}$ is convex and $4$-dimensional). Our approach here will be to consider the capacities $\mathfrak{g}^{\leq \ell}_{k}$ from \cite{siegelHigherSymplecticCapacities2020}, which satisfy $\tilde{\mathfrak{g}}^{\leq 1}_k(X) \leq {\mathfrak{g}}^{\leq 1}_k(X) = \cgh{k}(X)$. As we will see, $\mathfrak{g}^{\leq \ell}_{k}(X)$ is defined using the linearized contact homology of $X$, where $X$ is any nondegenerate Liouville domain. Very briefly, the linearized contact homology chain complex, denoted $CC(X)$, is generated by the good Reeb orbits of $\partial X$, and therefore maps whose domain is $CC(X)$ should count holomorphic curves which are asymptotic to Reeb orbits. The ``naive'' way to define such counts of holomorphic curves would be to show that they are the elements of a moduli space which is a compact, $0$-dimensional orbifold. However, there is the possibility that a curve is multiply covered. This means that in general it is no longer possible to show that the moduli spaces are transversely cut out, and therefore we do not have access to counts of moduli spaces of holomorphic curves (or at least not in the usual sense of the notion of signed count). In the case where the Liouville domain is $4$-dimensional, there exists the possibility of using automatic transversality techniques to show that the moduli spaces are regular. This is the approach taken by Wendl \cite{wendlAutomaticTransversalityOrbifolds2010}. Nelson \cite{nelsonAutomaticTransversalityContact2015}, Hutchings--Nelson \cite{hutchingsCylindricalContactHomology2016} and Bao--Honda \cite{baoDefinitionCylindricalContact2018} use automatic transversality to define cylindrical contact homology. In order to define contact homology in more general contexts, one needs to replace the notion of count by a suitable notion of virtual count, which is obtained through a virtual perturbation scheme. This was done by Pardon \cite{pardonAlgebraicApproachVirtual2016,pardonContactHomologyVirtual2019} to define contact homology in greater generality. The theory of polyfolds by Hofer--Wysocki--Zehnder \cite{hoferPolyfoldFredholmTheory2021} can also be used to define virtual moduli counts. Alternative approaches using Kuranishi structures have been given by Ishikawa \cite{ishikawaConstructionGeneralSymplectic2018} and Bao--Honda \cite{baoSemiglobalKuranishiCharts2021}. Unfortunately, linearized contact homology is not yet defined in the generality we need. \begin{enumerate} \item In order to prove \cref{conj:the conjecture}, we only need the capacities $\mathfrak{g}^{\leq \ell}_k$ for $\ell = 1$. These are defined using the linearized contact homology (as a chain complex) and an augmentation map which counts curves satisfying a tangency constraint. As far as we know, the current work on defining virtual moduli counts does not yet deal with moduli spaces of curves satisfying tangency constraints. \item In addition to \cref{conj:the conjecture}, in this chapter we will also prove some properties of the capacities $\mathfrak{g}^{\leq \ell}_k$ for $\ell > 1$. The definition of these capacities for $\ell > 1$ requires the structure of an $\mathcal{L}_{\infty}$-algebra on the linearized contact homology as well as an $\mathcal{L}_{\infty}$-augmentation map counting curves which satisfy a tangency constraint. \end{enumerate} So, during this chapter, we will work under assumption that it is possible to define a virtual perturbation scheme which makes the invariants and maps described above well-defined (this is expected to be the case). \begin{assumption} \label{assumption} We assume the existence of a virtual perturbation scheme which to every compactified moduli space $\overline{\mathcal{M}}$ of asymptotically cylindrical holomorphic curves (in a symplectization or in a Liouville cobordism, possibly satisfying a tangency constraint) assigns a virtual count $\#^{\mathrm{vir}} \overline{\mathcal{M}}$. We will assume in addition that the virtual perturbation scheme has the following properties. \begin{enumerate} \item If $\#^{\mathrm{vir}} \overline{\mathcal{M}} \neq 0$ then $\operatorname{virdim} \overline{\mathcal{M}} = 0$; \item If $\overline{\mathcal{M}}$ is transversely cut out then $\#^{\mathrm{vir}} \overline{\mathcal{M}} = \# \overline{\mathcal{M}}$. In particular, if $\overline{\mathcal{M}}$ is empty then $\#^{\mathrm{vir}} \overline{\mathcal{M}} = 0$; \item The virtual count of the boundary of a moduli space (defined as a sum of virtual counts of the moduli spaces that constitute the codimension one boundary strata) is zero. In particular, the expected algebraic identities ($\partial^2 = 0$ for differentials, $\varepsilon \circ \partial = 0$ for augmentations) hold, as well as independence of auxiliary choices of almost complex structure and symplectic divisor. \end{enumerate} \end{assumption} \section{\texorpdfstring{$\mathcal{L}_{\infty}$-}{L infinity }algebras} In this section, we give a brief review of the algebraic definitions which will play a role. Our main reference is \cite[Section 2]{siegelHigherSymplecticCapacities2020}. The key definitions are that of $\mathcal{L}_{\infty}$-algebra (\cref{def:l infinity algebra}) and its associated bar complex (\cref{def:bar complex}). We start by defining the suspension of a graded vector space. The purpose of this definition is to define $\mathcal{L}_{\infty}$-algebras in such a way that the $\mathcal{L}_{\infty}$-relations do not have extra signs (these extra signs are ``absorbed'' by the degree shift in the suspension). \begin{definition} Let $V = \bigoplus_{k \in \Z} V^k$ be a graded vector space over a field $K$. The \textbf{suspension} of $V$ is the graded vector space $V[+1] = \bigoplus_{k \in \Z} (V[+1])^k$ given by $(V[+1])^k = V^{k+1}$. Define $s \colon V \longrightarrow V[+1]$ to be the linear map of degree $-1$ given by $s(v) = v$. \end{definition} \begin{remark} We use the Koszul sign convention, i.e. if $f,g \colon V \longrightarrow V$ are linear maps and $x, y \in V$ then $(f \otimes g)(x \otimes y) = (-1)^{\deg(x) \deg(g)} f(x) \otimes g(y)$. \end{remark} \begin{definition} Let $k \in \Z_{\geq 1}$ and denote by $\operatorname{Sym}(k)$ the symmetric group on $k$ elements. Let $V$ be a vector field over a field $K$. We define an action of $\operatorname{Sym}(k)$ on $\bigotimes_{j=1}^{k} V$ as follows. For $\sigma \in \operatorname{Sym}(k)$ and $v_1, \ldots, v_k \in V$, let \begin{IEEEeqnarray*}{rCls+x*} \operatorname{sign}(\sigma, v_1, \ldots, v_k) & \coloneqq & (-1)^{\operatorname{sum} \{ \deg(v_i) \deg(v_j) \, \mid \, 1 \leq i < j \leq k , \sigma(i) > \sigma(j) \} }, \\ \sigma \cdot (v_1 \otimes \cdots \otimes v_k) & \coloneqq & \operatorname{sign}(\sigma, v_1, \ldots, v_k) \, v_{\sigma(1)} \otimes \cdots \otimes v_{\sigma(k)}. \end{IEEEeqnarray*} Define $\bigodot_{j=1}^k V \coloneqq \bigotimes_{j=1}^{k} V / \operatorname{Sym}(k)$ and denote by $v_1 \odot \cdots \odot v_k$ the equivalence class of $v_1 \otimes \cdots \otimes v_k$. \end{definition} We come to the main definition of this section, which encodes the algebraic structure of linearized contact homology (see \cref{def:lch l infinity}). \begin{definition} \label{def:l infinity algebra} An \textbf{$\mathcal{L}_{\infty}$-algebra} is a graded vector space $V = \bigoplus_{k \in \Z} V^k$ together with a family $\ell = (\ell^k)_{k \in \Z_{\geq 1}}$ of maps $\ell^k \colon \bigodot_{j=1}^{k} V[+1] \longrightarrow V[+1]$ of degree $1$, satisfying the \textbf{$\mathcal{L}_{\infty}$-relations}, i.e. \begin{IEEEeqnarray*}{l} 0 = \sum_{k=1}^{n} \sum_{\sigma \in \operatorname{Sh}(k,n-k)} \operatorname{sign}(\sigma, s v_1, \ldots, s v_n) \\ \hphantom{0 = \sum_{k=1}^{n} \sum_{\sigma \in \operatorname{Sh}(k,n-k)} \quad} \ell^{n-k+1} ( \ell^k ( s v_{\sigma(1)} \odot \cdots \odot s v_{\sigma(k)} ) \odot s v_{\sigma(k+1)} \odot \cdots \odot s v_{\sigma(n)} ) \end{IEEEeqnarray*} for every $v_1,\ldots,v_n \in V$. Here, $\operatorname{Sh}(k,n-k) \subset \operatorname{Sym}(n)$ is the subgroup of permutations $\sigma$ such that $\sigma(1) < \cdots < \sigma(k)$ and $\sigma(k+1) < \cdots < \sigma(n)$. \end{definition} The definition of $\mathcal{L}_{\infty}$-algebra can be expressed more compactly via the notion of bar complex. Indeed, the family of maps $(\ell^k)_{k \in \Z_{\geq 1}}$ satisfies the $\mathcal{L}_{\infty}$-relations if and only if the map $\hat{\ell}$ defined below is a differential, i.e. $\hat{\ell} \circ \hat{\ell} = 0$. \begin{definition} \label{def:bar complex} Let $(V,\ell)$ be an $\mathcal{L}_{\infty}$-algebra. The \textbf{bar complex} of $(V,\ell)$ is the vector space $\mathcal{B} V = \bigoplus_{k = 1}^{+\infty} \bigodot_{j=1}^k V[+1]$ together with the degree $1$ differential $\hat{\ell} \colon \mathcal{B} V \longrightarrow \mathcal{B} V$ given by \begin{IEEEeqnarray*}{rCl} \IEEEeqnarraymulticol{3}{l}{\hat{\ell}(v_1 \odot \cdots \odot v_n)}\\ \quad & = & \sum_{k=1}^{n} \sum_{\sigma \in \operatorname{Sh}(k,n-k)} \operatorname{sign}(\sigma, v_1, \ldots, v_n) \, \ell^k ( v_{\sigma(1)} \odot \cdots \odot v_{\sigma(k)} ) \odot v_{\sigma(k+1)} \odot \cdots \odot v_{\sigma(n)}. \end{IEEEeqnarray*} \end{definition} \begin{definition} Let $(V,\ell)$ be an $\mathcal{L}_{\infty}$-algebra. A \textbf{filtration} on $V$ is a family $(\mathcal{F}^{\leq a} V)_{a \in \R}$ of subspaces $\mathcal{F}^{\leq a} V \subset V$, satisfying the following properties: \begin{enumerate} \item if $a \leq b$ then $\mathcal{F}^{\leq a} V \subset \mathcal{F}^{\leq b} V$; \item $\bigcup_{a \in \R} \mathcal{F}^{\leq a} V = V$; \item $\ell^k( \mathcal{F}^{\leq a_1} V[+1] \odot \cdots \odot \mathcal{F}^{\leq a_k} V[+1] ) \subset \mathcal{F}^{\leq a_1 + \cdots + a_k} V[+1]$. \end{enumerate} \end{definition} \begin{definition} Let $(V, \ell)$ be an $\mathcal{L}_{\infty}$-algebra together with a filtration $(\mathcal{F}^{\leq a} V)_{a \in \R}$. The \textbf{induced filtration} on the bar complex is the family of complexes $(\mathcal{F}^{\leq a} \mathcal{B} V, \hat{\ell})_{a \in \R}$, where \begin{IEEEeqnarray*}{c+x*} \mathcal{F}^{\leq a} \mathcal{B} V \coloneqq \bigoplus_{k=1}^{+\infty} \, \bigcup_{a_1 + \cdots + a_k \leq a} \, \bigodot_{j=1}^{k} \mathcal{F}^{\leq a_j} V[+1] \end{IEEEeqnarray*} and $\hat{\ell} \colon \mathcal{F}^{\leq a} \mathcal{B} V \longrightarrow \mathcal{F}^{\leq a} \mathcal{B} V$ is the restriction of $\hat{\ell} \colon \mathcal{B} V \longrightarrow \mathcal{B} V$. \end{definition} The linearized contact homology will have a filtration induced by the action of the Reeb orbits (see \cref{def:action filtration lch}). Also, the bar complex of any $\mathcal{L}_{\infty}$-algebra has a filtration by word length, which is defined below. \begin{definition} \phantomsection\label{def:word length filtration} Let $(V, \ell)$ be an $\mathcal{L}_{\infty}$-algebra and consider its bar complex $(\mathcal{B}V, \hat{\ell})$. The \textbf{word length filtration} of $(\mathcal{B}V, \hat{\ell})$ is the family of complexes $(\mathcal{B}^{\leq m} V, \hat{\ell})_{m \in \Z_{\geq 1}}$, where $\mathcal{B}^{\leq m} V \coloneqq \bigoplus_{k=1}^{m} \bigodot_{j=1}^{k} V[+1]$ and $\hat{\ell} \colon \mathcal{B}^{\leq m} V \longrightarrow \mathcal{B}^{\leq m} V$ is the restriction of $\hat{\ell} \colon \mathcal{B}V \longrightarrow \mathcal{B}V$. \end{definition} \section{Contact homology} In this section, we define the linearized contact homology of a nondegenerate Liouville domain $X$. This is the homology of a chain complex $CC(X)$, which is described in \cref{def:linearized contact homology}. This complex has additional structure, namely it is also an $\mathcal{L}_{\infty}$-algebra (\cref{def:lch l infinity}) and it admits a filtration by action (\cref{def:action filtration lch}). We also define an augmentation map (\cref{def:augmentation map}), which is necessary to define the capacities $\mathfrak{g}^{\leq \ell}_k$. \begin{definition} Let $(M,\alpha)$ be a contact manifold and $\gamma$ be a Reeb orbit in $M$. We say that $\gamma$ is \textbf{bad} if $\conleyzehnder(\gamma) - \conleyzehnder(\gamma_0)$ is odd, where $\gamma_0$ is the simple Reeb orbit that corresponds to $\gamma$. We say that $\gamma$ is \textbf{good} if it is not bad. \end{definition} Since the parity of the Conley--Zehnder index of a Reeb orbit is independent of the choice of trivialization, the definition above is well posed. \begin{definition} \label{def:linearized contact homology} If $(X,\lambda)$ is a nondegenerate Liouville domain, the \textbf{linearized contact homology chain complex} of $X$, denoted $CC(X)$, is a chain complex given as follows. First, let $CC(X)$ be the vector space over $\Q$ generated by the set of good Reeb orbits of $(\partial X, \lambda|_{\partial X})$. The differential of $CC(X)$, denoted $\partial$, is given as follows. Choose $J \in \mathcal{J}(X)$. If $\gamma$ is a good Reeb orbit of $\partial X$, we define \begin{IEEEeqnarray*}{c+x*} \partial \gamma = \sum_{\eta} \p{<}{}{\partial \gamma, \eta} \, \eta, \end{IEEEeqnarray*} where $\p{<}{}{\partial \gamma, \eta}$ is the virtual count (with combinatorial weights) of holomorphic curves in $\R \times \partial X$ with one positive asymptote $\gamma$, one negative asymptote $\eta$, and $k \geq 0$ extra negative asymptotes $\alpha_1,\ldots,\alpha_k$ (called \textbf{anchors}), each weighted by the count of holomorphic planes in $\hat{X}$ asymptotic to $\alpha_j$ (see \cref{fig:differential of lch}). \end{definition} \begin{figure}[htp] \centering \begin{tikzpicture} [ scale = 0.5, help/.style = {very thin, draw = black!50}, curve/.style = {thick} ] \tikzmath{ \rx = 0.75; \ry = 0.25; } \node[anchor=west] at (13,9) {$\R \times \partial X$}; \draw (0,6) rectangle (12,12); \node[anchor=west] at (13,3) {$\hat{X}$}; \draw (0,3) -- (0,6) -- (12,6) -- (12,3); \draw (0,3) .. controls (0,-1) and (12,-1) .. (12,3); \coordinate (G) at ( 2,12); \coordinate (E) at ( 2, 6); \coordinate (A) at ( 6, 6); \coordinate (B) at (10, 6); \coordinate (L) at (-\rx,0); \coordinate (R) at (+\rx,0); \coordinate (GL) at ($ (G) + (L) $); \coordinate (EL) at ($ (E) + (L) $); \coordinate (AL) at ($ (A) + (L) $); \coordinate (BL) at ($ (B) + (L) $); \coordinate (GR) at ($ (G) + (R) $); \coordinate (ER) at ($ (E) + (R) $); \coordinate (AR) at ($ (A) + (R) $); \coordinate (BR) at ($ (B) + (R) $); \draw[curve] (G) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma$}; \draw[curve] (E) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\eta$}; \draw[curve] (A) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\alpha_1$}; \draw[curve] (B) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\alpha_2$}; \draw[curve] (ER) .. controls ($ (ER) + (0,2) $) and ($ (AL) + (0,2) $) .. (AL); \draw[curve] (AR) .. controls ($ (AR) + (0,2) $) and ($ (BL) + (0,2) $) .. (BL); \draw[curve] (AL) .. controls ($ (AL) - (0,2) $) and ($ (AR) - (0,2) $) .. (AR); \draw[curve] (BL) .. controls ($ (BL) - (0,2) $) and ($ (BR) - (0,2) $) .. (BR); \draw[curve] (GR) .. controls ($ (GR) - (0,5) $) and ($ (BR) + (0,5) $) .. (BR); \coordinate (C) at ($ (E) + (0,3) $); \draw[curve] (EL) .. controls ($ (EL) + (0,1) $) and ($ (C) - (0,1) $) .. (C); \draw[curve] (GL) .. controls ($ (GL) - (0,1) $) and ($ (C) + (0,1) $) .. (C); \end{tikzpicture} \caption{A holomorphic curve with anchors contributing to the coefficient $\p{<}{}{\partial \gamma, \eta}$} \label{fig:differential of lch} \end{figure} By assumption on the virtual perturbation scheme, $\partial \circ \partial = 0$ and $CC(X)$ is independent (up to chain homotopy equivalence) of the choice of almost complex structure $J$. In general, $CC(X)$ is not $\Z$-graded but only $\Z_2$-graded (see \cref{rmk:grading for lch}). We wish to define a structure of $\mathcal{L}_{\infty}$-algebra on $CC(X)[-1]$. Notice that the definition of $\mathcal{L}_{\infty}$-structure on a vector space (\cref{def:l infinity algebra}) also makes sense when the vector space is only $\Z_2$-graded. \begin{definition} \label{def:lch l infinity} We define a structure of $\mathcal{L}_{\infty}$-algebra on $CC(X)[-1]$, given by maps $\ell^k \colon \bigodot^k CC(X) \longrightarrow CC(X)$, as follows. Choose an almost complex structure $J \in \mathcal{J}(X)$. If $\Gamma = (\gamma_1,\ldots,\gamma_k)$ is a tuple of good Reeb orbits, we define \begin{IEEEeqnarray*}{c+x*} \ell^{k} (\gamma_1 \odot \cdots \odot \gamma_{k}) = \sum_{\eta} \p{<}{}{\ell^{k} (\gamma_1 \odot \cdots \odot \gamma_{k}) , \eta} \, \eta, \end{IEEEeqnarray*} where $\p{<}{}{\ell^{k} (\gamma_1 \odot \cdots \odot \gamma_{k}) , \eta}$ is the virtual count of holomorphic curves in $\R \times \partial X$ with positive asymptotes $\gamma_1, \ldots, \gamma_k$, one negative asymptote $\eta$, and a number of extra negative asymptotes with anchors in $\hat{X}$, such that exactly one of the components in the symplectization level is nontrivial (see \cref{fig:l infinity ops of lch}). \end{definition} \begin{figure}[htp] \centering \begin{tikzpicture} [ scale = 0.5, help/.style = {very thin, draw = black!50}, curve/.style = {thick} ] \tikzmath{ \rx = 0.75; \ry = 0.25; } \node[anchor=west] at (17,9) {$\R \times \partial X$}; \draw (0,6) rectangle (16,12); \node[anchor=west] at (17,3) {$\hat{X}$}; \draw (0,3) -- (0,6) -- (16,6) -- (16,3); \draw (0,3) .. controls (0,-1) and (16,-1) .. (16,3); \coordinate (G1) at ( 3,12); \coordinate (G2) at ( 7,12); \coordinate (G3) at (11,12); \coordinate (G4) at (14,12); \coordinate (F3) at (11, 6); \coordinate (F4) at (14, 6); \coordinate (E0) at ( 2, 6); \coordinate (A1) at ( 5, 6); \coordinate (A2) at ( 8, 6); \coordinate (L) at (-\rx,0); \coordinate (R) at (+\rx,0); \coordinate (G1L) at ($ (G1) + (L) $); \coordinate (G2L) at ($ (G2) + (L) $); \coordinate (G3L) at ($ (G3) + (L) $); \coordinate (G4L) at ($ (G4) + (L) $); \coordinate (F3L) at ($ (F3) + (L) $); \coordinate (F4L) at ($ (F4) + (L) $); \coordinate (E0L) at ($ (E0) + (L) $); \coordinate (A1L) at ($ (A1) + (L) $); \coordinate (A2L) at ($ (A2) + (L) $); \coordinate (G1R) at ($ (G1) + (R) $); \coordinate (G2R) at ($ (G2) + (R) $); \coordinate (G3R) at ($ (G3) + (R) $); \coordinate (G4R) at ($ (G4) + (R) $); \coordinate (F3R) at ($ (F3) + (R) $); \coordinate (F4R) at ($ (F4) + (R) $); \coordinate (E0R) at ($ (E0) + (R) $); \coordinate (A1R) at ($ (A1) + (R) $); \coordinate (A2R) at ($ (A2) + (R) $); \draw[curve] (G1) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_1$}; \draw[curve] (G2) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_2$}; \draw[curve] (G3) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_3$}; \draw[curve] (G4) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_4$}; \draw[curve] (F3) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_3$}; \draw[curve] (F4) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_4$}; \draw[curve] (E0) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\eta$}; \draw[curve] (A1) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\alpha_1$}; \draw[curve] (A2) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\alpha_2$}; \draw[curve] (G1R) .. controls ($ (G1R) - (0,2) $) and ($ (G2L) - (0,2) $) .. (G2L); \draw[curve] (E0R) .. controls ($ (E0R) + (0,2) $) and ($ (A1L) + (0,2) $) .. (A1L); \draw[curve] (A1R) .. controls ($ (A1R) + (0,2) $) and ($ (A2L) + (0,2) $) .. (A2L); \draw[curve] (A1L) .. controls ($ (A1L) - (0,3) $) and ($ (A1R) - (0,3) $) .. (A1R); \draw[curve] (A2L) .. controls ($ (A2L) - (0,3) $) and ($ (F4R) - (0,3) $) .. (F4R); \draw[curve] (A2R) .. controls ($ (A2R) - (0,1) $) and ($ (F3L) - (0,1) $) .. (F3L); \draw[curve] (F3R) .. controls ($ (F3R) - (0,1) $) and ($ (F4L) - (0,1) $) .. (F4L); \draw[curve] (E0L) .. controls ($ (E0L) + (0,2) $) and ($ (G1L) - (0,2) $) .. (G1L); \draw[curve] (A2R) .. controls ($ (A2R) + (0,2) $) and ($ (G2R) - (0,2) $) .. (G2R); \draw[curve] (F3L) -- (G3L); \draw[curve] (F3R) -- (G3R); \draw[curve] (F4L) -- (G4L); \draw[curve] (F4R) -- (G4R); \node[rotate = 90] at ($ (F3) + (0,3) $) {trivial}; \node[rotate = 90] at ($ (F4) + (0,3) $) {trivial}; \end{tikzpicture} \caption{A holomorphic building contributing to the coefficient $\p{<}{}{ \ell^4 (\gamma_1 \odot \cdots \odot \gamma_4), \eta}$} \label{fig:l infinity ops of lch} \end{figure} By the assumptions on the virtual perturbation scheme, the maps $\ell^k$ satisfy the $\mathcal{L}_{\infty}$-relations and $CC(X)$ is independent (as an $\mathcal{L}_{\infty}$-algebra, up to $\mathcal{L}_{\infty}$-homotopy equivalence) of the choice of $J$. We point out that the first $\mathcal{L}_{\infty}$-operation is equal to the differential of linearized contact homology, i.e. $\ell^1 = \partial$. \begin{remark} \label{rmk:grading for lch} In general, the Conley--Zehnder index of a Reeb orbit is well-defined as an element in $\Z_2$. Therefore, the complex $CC(X)$ has a $\Z_{2}$-grading given by $\deg(\gamma) \coloneqq n - 3 - \conleyzehnder(\gamma)$, and with respect to this definition of degree every $\mathcal{L}_{\infty}$-operation $\ell^k$ has degree $1$. If $\pi_1(X) = 0$ and $2 c_1(TX) = 0$, then by \cref{lem:cz of reeb is independent of triv over filling disk} we have well-defined Conley--Zehnder indices in $\Z$, which means that $CC(X)$ is $\Z$-graded. For some purposes, it will be enough to consider only the chain complex structure on $CC(X)$ and not the $\mathcal{L}_{\infty}$-algebra structure (namely, when we consider only the capacity $\mathfrak{g}^{\leq 1}_{k}$ instead of the higher capacities $\mathfrak{g}^{\leq \ell}_{k}$). In this case, to make comparisons with $S^1$-equivariant symplectic homology simpler, we define the grading instead by $\deg(\gamma) \coloneqq \conleyzehnder(\gamma)$, which implies that $\partial$ has degree $-1$. \end{remark} \begin{definition} \label{def:action filtration lch} For every $a \in \R$, we denote by $\mathcal{A}^{\leq a} CC(X)[-1]$ the submodule of $CC(X)[-1]$ generated by the good Reeb orbits $\gamma$ with action $\mathcal{A}(\gamma) \leq a$. We call this filtration the \textbf{action filtration} of $CC[-1]$. \end{definition} In the next lemma, we check that this filtration is compatible with the $\mathcal{L}_{\infty}$-structure. \begin{lemma} \label{lem:action filtration of lch} $\ell^k ( \mathcal{A}^{\leq a_1} CC(X) \odot \cdots \odot \mathcal{A}^{\leq a_k} CC(X) ) \subset \mathcal{A}^{\leq a_1 + \cdots + a_k} CC(X)$. \end{lemma} \begin{proof} Let $\gamma_1^+, \ldots, \gamma_k^+, \eta$ be good Reeb orbits such that \begin{IEEEeqnarray*}{rCls+x*} \mathcal{A}(\gamma_i^+) & \leq & a_i, \\ \p{<}{}{\ell^k(\gamma_1^+ \odot \cdots \odot \gamma^+_k), \eta} & \neq & 0. \end{IEEEeqnarray*} We wish to show that $\mathcal{A}(\eta) \leq a_1 + \cdots + a_k$. Since $\p{<}{}{\ell^k(\gamma_1^+ \odot \cdots \odot \gamma^+_k), \eta} \neq 0$ and by assumption on the virtual perturbation scheme, there exists a tuple of Reeb orbits $\Gamma^-$ and a (nontrivial) punctured $J$-holomorphic sphere in $\R \times \partial X$ with asymptotes $\Gamma^\pm$, such that $\eta \in \Gamma^-$ and $\Gamma^+ \subset (\gamma^+_1,\ldots,\gamma^+_k)$. Then, \begin{IEEEeqnarray*}{rCls+x*} \mathcal{A}(\eta) & \leq & \mathcal{A}(\Gamma^-) & \quad [\text{since $\eta \in \Gamma^-$}] \\ & \leq & \mathcal{A}(\Gamma^+) & \quad [\text{by \cref{lem:action energy for holomorphic}}] \\ & \leq & \mathcal{A}(\gamma^+_1, \ldots, \gamma^+_k) & \quad [\text{since $\Gamma^+ \subset (\gamma^+_1,\ldots,\gamma^+_k)$}] \\ & \leq & a_1 + \cdots + a_k. & \quad [\text{by definition of action of a tuple}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{definition} \label{def:augmentation map} Consider the bar complex $(\mathcal{B}(CC(X)[-1]), \hat{\ell})$. For each $k \in \Z_{\geq 1}$, we define an augmentation ${\epsilon}_k \colon \mathcal{B}(CC(X)[-1]) \longrightarrow \Q$ as follows. Choose $x \in \itr X$, a symplectic divisor $D$ at $x$, and an almost complex structure $J \in \mathcal{J}(X,D)$. Then, for every tuple of good Reeb orbits $\Gamma = (\gamma_1, \ldots, \gamma_p)$ define ${\epsilon}_k (\gamma_1 \odot \cdots \odot \gamma_p)$ to be the virtual count of $J$-holomorphic planes in $\hat{X}$ which are positively asymptotic to $\Gamma$ and have contact order $k$ to $D$ at $x$ (see \cref{fig:augmentation of lch}). \end{definition} \begin{figure}[htp] \centering \begin{tikzpicture} [ scale = 0.5, help/.style = {very thin, draw = black!50}, curve/.style = {thick} ] \tikzmath{ \rx = 0.75; \ry = 0.25; } \node[anchor=west] at (13,3) {$\hat{X}$}; \draw (0,3) -- (0,6) -- (12,6) -- (12,3); \draw (0,3) .. controls (0,-1) and (12,-1) .. (12,3); \coordinate (G1) at (4,6); \coordinate (G2) at (8,6); \coordinate (L) at (-\rx,0); \coordinate (R) at (+\rx,0); \coordinate (G1L) at ($ (G1) + (L) $); \coordinate (G2L) at ($ (G2) + (L) $); \coordinate (G1R) at ($ (G1) + (R) $); \coordinate (G2R) at ($ (G2) + (R) $); \coordinate (P) at (7,3); \coordinate (D) at (2,1); \draw[curve] (G1) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_1$}; \draw[curve] (G2) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_2$}; ll (P) circle (2pt) node[anchor = north west] {$x$}; \draw[curve] ($ (P) - (D) $) -- ( $ (P) + (D) $ ) node[anchor = west] {$D$}; \draw[curve] (G1R) .. controls ($ (G1R) - (0,2) $) and ($ (G2L) - (0,2) $) .. (G2L); \draw[curve] (G1L) .. controls ($ (G1L) - (0,2) $) and ($ (P) - (D) $) .. (P); \draw[curve] (G2R) .. controls ($ (G2R) - (0,2) $) and ($ (P) + (D) $) .. (P); \end{tikzpicture} \caption{A holomorphic curve contributing to the count $\epsilon_k(\gamma_1 \odot \gamma_2)$} \label{fig:augmentation of lch} \end{figure} By assumption on the virtual perturbation scheme, ${\epsilon}_k$ is an augmentation, i.e. ${\epsilon}_k \circ \hat{\ell} = 0$. In addition, ${\epsilon}_k$ is independent (up to chain homotopy) of the choices of $x, D, J$. \section{Higher symplectic capacities} Here we define the symplectic capacities $\mathfrak{g}^{\leq \ell}_k$ from \cite{siegelHigherSymplecticCapacities2020}. We will prove the usual properties of symplectic capacities (see \cref{thm:properties of hsc}), namely monotonicity and conformality. In addition, we prove that the value of the capacities $\mathfrak{g}^{\leq \ell}_k$ can be represented by the action of a tuple of Reeb orbits. In \cref{rmk:computations using reeb orbits property} we show how this property could in principle be combined with results from \cite{guttSymplecticCapacitiesPositive2018} to compare the capacities $\mathfrak{g}^{\leq 1}_k(X_{\Omega})$ and $\cgh{k}(X_{\Omega})$ when $X_{\Omega}$ is a convex or concave toric domain. \begin{definition}[{\cite[Section 6.1]{siegelHigherSymplecticCapacities2020}}] \label{def:capacities glk} Let $k, \ell \in \Z_{\geq 1}$ and $(X,\lambda)$ be a nondegenerate Liouville domain. The \textbf{higher symplectic capacities} of $X$ are given by \begin{IEEEeqnarray*}{c+x*} \mathfrak{g}^{\leq \ell}_k(X) \coloneqq \inf \{ a > 0 \mid \epsilon_k \colon H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell}(CC(X)[-1])) \longrightarrow \Q \text{ is nonzero} \}. \end{IEEEeqnarray*} \end{definition} The capacities $\mathfrak{g}^{\leq \ell}_{k}$ will be useful to us because they have similarities with the McDuff--Siegel capacities $\tilde{\mathfrak{g}}^{\leq \ell}_k$, but also with the Gutt--Hutchings capacities $\cgh{k}$ (for $\ell = 1$). More specifically: \begin{enumerate} \item Both $\mathfrak{g}^{\leq \ell}_{k}$ and $\tilde{\mathfrak{g}}^{\leq \ell}_k$ are related to the energy of holomorphic curves in $X$ which are asymptotic to a word of $p \leq \ell$ Reeb orbits and satisfy a tangency constraint. In \cref{thm:g tilde vs g hat}, we will actually show that $\tilde{\mathfrak{g}}^{\leq \ell}_k(X) \leq {\mathfrak{g}}^{\leq \ell}_k(X)$. The capacities $\mathfrak{g}^{\leq \ell}_k$ can be thought of as the SFT counterparts of $\tilde{\mathfrak{g}}^{\leq \ell}_k$, or alternatively the capacities $\tilde{\mathfrak{g}}^{\leq \ell}_k$ can be thought of as the counterparts of $\mathfrak{g}^{\leq \ell}_k$ whose definition does not require the holomorphic curves to be regular. \item Both $\mathfrak{g}^{\leq 1}_{k}$ and $\cgh{k}$ are defined in terms of a map in homology being nonzero. In the case of $\mathfrak{g}^{\leq 1}_{k}$, we consider the linearized contact homology, and in the case of $\cgh{k}$ the invariant in question is $S^1$-equivariant symplectic homology. Taking into consideration the Bourgeois--Oancea isomorphism (see \cite{bourgeoisEquivariantSymplecticHomology2016}) between linearized contact homology and positive $S^1$-equivariant symplectic homology, one can think of $\mathfrak{g}^{\leq 1}_{k}$ and $\cgh{k}$ as restatements of one another under this isomorphism. This is the idea behind the proof of \cref{thm:g hat vs gh}, where we show that $\mathfrak{g}^{\leq 1}_{k}(X) = \cgh{k}(X)$. \end{enumerate} \begin{remark} \label{rmk:novikov coefficients} In the case where $X$ is only an exact symplectic manifold instead of a Liouville domain, the proof of \cref{lem:action filtration of lch} does not work. In this case, we do not have access to an action filtration on $CC(X)$. However, it is possible to define linearized contact homology with coefficients in a Novikov ring $\Lambda_{\geq 0}$, in which case a coefficient in $\Lambda_{\geq 0}$ encodes the energy of a holomorphic curve. This is the approach taken in \cite{siegelHigherSymplecticCapacities2020} to define the capacities $\mathfrak{g}^{\leq \ell}_{k}$. It is not obvious that the definition of $\mathfrak{g}^{\leq \ell}_k$ we give and the one in \cite{siegelHigherSymplecticCapacities2020} are equivalent. However, \cref{def:capacities glk} seems to be the natural analogue when we have access to an action filtration, and in addition the definition we provide will be enough for our purposes. \end{remark} \begin{theorem} \label{thm:properties of hsc} The functions ${\mathfrak{g}}^{\leq \ell}_k$ satisfy the following properties, for all nondegenerate Liouville domains $(X,\lambda_X)$ and $(Y,\lambda_Y)$ of the same dimension: \begin{description} \item[(Monotonicity)] If $X \longrightarrow Y$ is an exact symplectic embedding then $\mathfrak{g}^{\leq \ell}_k(X) \leq \mathfrak{g}^{\leq \ell}_k(Y)$. \item[(Conformality)] If $\mu > 0$ then ${\mathfrak{g}}^{\leq \ell}_k(X, \mu \lambda_X) = \mu \, {\mathfrak{g}}^{\leq \ell}_k(X, \lambda_X)$. \item[(Reeb orbits)] If $\pi_1(X) = 0$, $2 c_1(TX) = 0$ and ${\mathfrak{g}}^{\leq \ell}_k(X) < + \infty$, then there exists a tuple $\Gamma = (\gamma_1, \ldots, \gamma_p)$ of Reeb orbits such that \begin{enumerate} \item ${\mathfrak{g}}^{\leq \ell}_k(X) = \mathcal{A}(\Gamma)$; \item $\conleyzehnder(\Gamma) = p (n - 3) + 2 (k + 1)$; \item $1 \leq p \leq \ell$. \end{enumerate} \end{description} \end{theorem} \begin{proof} We prove monotonicity. If $(X, \lambda^X) \longrightarrow (Y, \lambda^Y)$ is an exact symplectic embedding, then it is possible to define a Viterbo transfer map $H(\mathcal{B}(CC(Y)[-1])) \longrightarrow H(\mathcal{B}(CC(X)[-1]))$. This map respects the action filtration as well as the augmentation maps, i.e. the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell} (CC(Y)[-1])) \ar[d] \ar[r] & H(\mathcal{B} (CC(Y)[-1])) \ar[d] \ar[r, "{\epsilon}_{k}^Y"] & \Q \ar[d, equals] \\ H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell} (CC(X)[-1])) \ar[r] & H(\mathcal{B} (CC(X)[-1])) \ar[r, swap, "{\epsilon}_{k}^X"] & \Q \end{tikzcd} \end{IEEEeqnarray*} commutes. The result then follows by definition of $\tilde{\mathfrak{g}}^{\leq \ell}_k$. We prove conformality. If $\gamma$ is a Reeb orbit of $(\partial X, \lambda|_{\partial X})$ of action $\mathcal{A}_{\lambda}(\gamma)$ then $\gamma$ is a Reeb orbit of $(\partial X, \mu \lambda|_{\partial X})$ of action $\mathcal{A}_{\mu \lambda}(\gamma) = \mu \mathcal{A}_{\lambda}(\gamma)$. Therefore, there is a diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell} (CC(X, \lambda)[-1])) \ar[d, equals] \ar[r] & H(\mathcal{B} (CC(X, \lambda)[-1])) \ar[d, equals] \ar[r, "{\epsilon}_{k}^{\lambda}"] & \Q \ar[d, equals] \\ H(\mathcal{A}^{\leq \mu a} \mathcal{B}^{\leq \ell} (CC(X, \mu \lambda)[-1])) \ar[r] & H(\mathcal{B} (CC(X, \mu \lambda)[-1])) \ar[r, swap, "{\epsilon}_{k}^{\mu \lambda}"] & \Q \end{tikzcd} \end{IEEEeqnarray*} Again, the result follows by definition of $\mathfrak{g}^{\leq \ell}_{k}$. We prove the Reeb orbits property. Choose a point $x \in \itr X$, a symplectic divisor $D$ through $x$ and an almost complex structure $J \in \mathcal{J}(X,D)$. Consider the bar complex $\mathcal{B}^{\leq \ell} (CC(X)[-1])$, computed with respect to $J$. By assumption and definition of $\mathfrak{g}^{\leq \ell}_{k}$, \begin{IEEEeqnarray*}{rCls+x*} + \infty & > & {\mathfrak{g}}^{\leq \ell}_k(X) \\ & = & \inf \{ a > 0 \mid \epsilon_k \colon H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell}(CC(X)[-1])) \longrightarrow \Q \text{ is nonzero} \} \\ & = & \inf \{ a > 0 \mid \text{there exists } \beta \in H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell}(CC(X)[-1])) \text{ such that } {\epsilon}_k (\beta) \neq 0 \} \\ & = & \inf \{ \mathcal{A}(\beta) \mid \beta \in H(\mathcal{B}^{\leq \ell}(CC(X)[-1])) \text{ such that } {\epsilon}_k (\beta) \neq 0 \}, \end{IEEEeqnarray*} where $\mathcal{A}(\beta)$ is given as in \cref{rmk:notation for tuples of orbits}. Since the action spectrum of $(\partial X, \lambda|_{\partial X})$ is a discrete subset of $\R$, we conclude that in the above expression the infimum is a minimum. More precisely, there exists $\beta \in H(\mathcal{B}^{\leq \ell}(CC(X)[-1]))$ such that $\epsilon_k(\beta) \neq 0$ and ${\mathfrak{g}}^{\leq \ell}_k(X) = \mathcal{A}(\beta)$. The element $\beta$ can be written as a finite linear combination of words of Reeb orbits $\Gamma = (\gamma_1, \ldots, \gamma_p)$, where every word has length $p \leq \ell$ and Conley--Zehnder index equal to $p(n-3) + 2(k+1)$. Here, the statement about the Conley--Zehnder index follows from the computation \begin{IEEEeqnarray*}{rCls+x*} 0 & = & \operatorname{virdim} \overline{\mathcal{M}}^J_X(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x} \\ & = & (n-3)(2 - p) + \conleyzehnder(\Gamma) - 2n - 2k + 4 \\ & = & \conleyzehnder(\Gamma) - p(n-3) - 2(k+1). \end{IEEEeqnarray*} One of the words in this linear combination is such that $\mathcal{A}(\Gamma) = \mathcal{A}(\beta) = {\mathfrak{g}}^{\leq \ell}_k(X)$. \end{proof} \begin{remark} \label{rmk:computations using reeb orbits property} In \cite[Theorem 1.6]{guttSymplecticCapacitiesPositive2018} (respectively \cite[Theorem 1.14]{guttSymplecticCapacitiesPositive2018}) Gutt--Hutchings give formulas for $\cgh{k}$ of a convex (respectively concave) toric domain. However, the given proofs only depend on specific properties of the Gutt--Hutchings capacity and not on the definition of the capacity itself. These properties are monotonicity, conformality, a Reeb orbits property similar to the one of \cref{thm:properties of hsc}, and finally that the capacity be finite on star-shaped domains. If we showed that $\mathfrak{g}^{\leq 1}_{k}$ is finite on star-shaped domains, we would conclude that $\mathfrak{g}^{\leq 1}_{k} = \cgh{k}$ on convex or concave toric domains, because in this case both capacities would be given by the formulas in the previously mentioned theorems. Showing that $\mathfrak{g}^{\leq 1}_{k}$ is finite boils down to showing that the augmentation map is nonzero, which we will do in \cref{sec:augmentation map of an ellipsoid}. However, in \cref{thm:g hat vs gh} we will use this information in combination with the Bourgeois--Oancea isomorphism to conclude that $\mathfrak{g}^{\leq 1}_{k}(X) = \cgh{k}(X)$ for any nondegenerate Liouville domain $X$. Therefore, the proof suggested above will not be necessary, although it is a proof of $\mathfrak{g}^{\leq 1}_{k}(X) = \cgh{k}(X)$ alternative to that of \cref{thm:g hat vs gh} when $X$ is a convex or concave toric domain. \end{remark} \section{Cauchy--Riemann operators on bundles} \label{sec:cr operators} In order to show that $\mathfrak{g}^{\leq 1}_{k}(X) = \cgh{k}(X)$, we will need to show that the augmentation map of a small ellipsoid in $X$ is nonzero (see the proof of \cref{thm:g hat vs gh}). Recall that the augmentation map counts holomorphic curves satisfying a tangency constraint. In \cref{sec:augmentation map of an ellipsoid}, we will explicitly compute how many such holomorphic curves there are. However, a count obtained by explicit methods will not necessarily agree with the virtual count that appears in the definition of the augmentation map. By assumption on the virtual perturbation scheme, it does agree if the relevant moduli space is transversely cut out. Therefore, in this section and the next we will describe the framework that allows us to show that this moduli space is transversely cut out. This section deals with the theory of real linear Cauchy--Riemann operators on line bundles, and our main reference is \cite{wendlAutomaticTransversalityOrbifolds2010}. The outline is as follows. First, we review the basic definitions about real linear Cauchy--Riemann operators (\cref{def:real linear cauchy riemann operator}). By the Riemann-Roch theorem (\cref{thm:riemann roch with punctures}), these operators are Fredholm and their index can be computed from a number of topological quantities associated to them. We will make special use of a criterion by Wendl (\cref{prp:wen D surjective injective criterion}) which guarantees that a real linear Cauchy--Riemann operator defined on a complex line bundle is surjective. For our purposes, we will also need an adaptation of this result to the case where the operator is accompanied by an evaluation map, which we state in \cref{lem:D plus E is surjective}. We now state the assumptions for the rest of this section. Let $(\Sigma, j)$ be a compact Riemann surface without boundary, of genus $g$, with sets of positive and negative punctures $\mathbf{z}^{\pm} = \{z^{\pm}_1,\ldots,z^{\pm}_{p^{\pm}}\}$. Denote $\mathbf{z} = \mathbf{z}^{+} \cup \mathbf{z}^{-}$ and $\dot{\Sigma} = \Sigma \setminus \mathbf{z}$. Choose cylindrical coordinates $(s,t)$ near each puncture $z \in \mathbf{z}$ and denote $\mathcal{U}_z \subset \dot{\Sigma}$ the domain of the coordinates $(s,t)$. \begin{definition} \label{def:asymptotically hermitian vector bundle} An \textbf{asymptotically Hermitian vector bundle} over $\dot{\Sigma}$ is given by a complex vector bundle $(E, J) \longrightarrow \dot{\Sigma}$ and for each $z \in \mathbf{z}$ a Hermitian vector bundle $(E_z, J_z, \omega_z) \longrightarrow S^1$ together with a complex vector bundle isomorphism $\Phi_z^{} \colon \pi^*_z E_z^{} \longrightarrow \iota_z^* E$, where $\iota_z \colon \mathcal{U}_z \longrightarrow \dot{\Sigma}$ is the inclusion and $\pi_{z} \colon \mathcal{U}_z \longrightarrow S^1$ is given by $\pi_{z}(w) = t(w)$: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} E_z^{} \ar[d] & \pi_z^* E_z^{} \ar[r, "\Phi_z"] \ar[d] \ar[l] & \iota_z^* E \ar[r] \ar[d] & E \ar[d] \\ S^1 & \mathcal{U}_z \ar[r, equals] \ar[l, "\pi_z"] & \mathcal{U}_z \ar[r, swap, "\iota_z"] & \dot{\Sigma} \end{tikzcd} \end{IEEEeqnarray*} \end{definition} From now until the end of this section, we will assume that $E$ is an asymptotically Hermitian vector bundle over $\dot{\Sigma}$ of complex rank $n$. \begin{definition} \label{def:asymptotic trivialization} An \textbf{asymptotic trivialization} of an asymptotically Hermitian vector bundle $E$ is a family $\tau = (\tau_z)_{z \in \mathbf{z}}$ of unitary trivializations $\tau_z$ of $(E_z, J_z, \omega_z)$. By \cref{def:asymptotically hermitian vector bundle}, every such $\tau_z$ defines a complex trivialization of $\iota^*_z E$. If $\tau$ is an asymptotic trivialization, we will typically denote each $\tau_z$ also by $\tau$. \end{definition} \begin{definition} \label{def:sobolev spaces} Let $E$ be an asymptotically Hermitian vector bundle over $\dot{\Sigma}$, together with an asymptotic trivialization $\tau$. If $\eta$ is a section of $E$ and $z$ is a puncture, denote by $\eta_z \colon Z^{\pm} \longrightarrow \R^{2n}$ the map $\eta$ written with respect to the trivialization $\tau$ and cylindrical coordinates near $z$. The \textbf{Sobolev space} of sections of $E$ is \begin{IEEEeqnarray*}{c+x*} W^{k,p}(E) \coloneqq \{ \eta \in W^{k,p}_{\mathrm{loc}}(E) \mid \eta_z \in W^{k,p}(Z^{\pm}, \R^{2n}) \text{ for every } z \in \mathbf{z}^{\pm} \}. \end{IEEEeqnarray*} If $\delta > 0$, the \textbf{weighted Sobolev space} of sections of $E$ is \begin{IEEEeqnarray*}{c+x*} W^{k,p,\delta}(E) \coloneqq \{ \eta \in W^{k,p}_{\mathrm{loc}}(E) \mid e^{\pm \delta s} \eta_z \in W^{k,p}(Z^{\pm}, \R^{2n}) \text{ for every } z \in \mathbf{z}^{\pm} \}. \end{IEEEeqnarray*} \end{definition} \begin{definition} \label{def:real linear cauchy riemann operator} A \textbf{real linear Cauchy--Riemann operator} is a map \begin{IEEEeqnarray*}{c+x*} \mathbf{D} \colon W^{1,p}(\dot{\Sigma}, E) \longrightarrow L^p(\dot{\Sigma}, \Hom^{0,1}(T \dot{\Sigma}, E)) \end{IEEEeqnarray*} such that $\mathbf{D}$ is linear as a map of vector spaces over $\R$ and $\mathbf{D}$ satisfies the Leibniz rule, i.e. if $v \in W^{1,p}(\dot{\Sigma}, E)$ and $f \in C^{\infty}(\dot{\Sigma}, \R)$ then $\mathbf{D}(f v) = f \mathbf{D} v + v \otimes \overline{\partial} f$. \end{definition} We now consider the asymptotic operators of $\mathbf{D}$. Their relevance comes from the fact that the Fredholm index of $\mathbf{D}$ is determined by the asymptotic operators at the punctures. \begin{definition} An \textbf{asymptotic operator} at $z \in \mathbf{z}$ is a bounded linear operator $\mathbf{A} \colon H^1(E_z) \longrightarrow L^2(E_z)$ such that when written with respect to a unitary trivialization of $E_z$, $\mathbf{A}$ takes the form \begin{IEEEeqnarray*}{rrCl} & H^1(S^1,\R^{2n}) & \longrightarrow & L^2(S^1,\R^{2n}) \\ & \eta & \longmapsto & - J_0 \dot{\eta} - S \eta, \end{IEEEeqnarray*} where $S \colon S^1 \longrightarrow \End(\R^{2n})$ is a loop of symmetric $2n \times 2n$ matrices. We say that $\mathbf{A}$ is nondegenerate if its spectrum does not contain $0$. \end{definition} \begin{definition} Let $\mathbf{D}$ be a real linear Cauchy--Riemann operator and $\mathbf{A}$ be an asymptotic operator at $z \in \mathbf{z}$. We say that $\mathbf{D}$ is \textbf{asymptotic} to $\mathbf{A}$ at $z$ if the expressions for $\mathbf{D}$ and $\mathbf{A}$ with respect to an asymptotic trivialization near $z$ are of the form \begin{IEEEeqnarray*}{rCls+x*} (\mathbf{D} \xi)(s,t) & = & \partial_s \xi (s,t) + J_0 \partial_t \xi (s,t) + S(s,t) \xi(s,t) \\ (\mathbf{A} \eta)(t) & = & - J_0 \partial_t \eta (t) - S(t) \eta(t), \end{IEEEeqnarray*} where $S(s,t)$ converges to $S(t)$ uniformly as $s \to \pm \infty$. \end{definition} \begin{remark} Suppose that $E$ splits as a direct sum of complex vector bundles $E = E_1 \oplus E_2$. In this case, there are canonical inclusions \begin{IEEEeqnarray*}{rCls+x*} W^{1,p}(\dot{\Sigma}, E_i) & \subset & W^{1,p}(\dot{\Sigma}, E), \\ L^p(\dot{\Sigma}, \Hom^{0,1}(T \dot{\Sigma}, E_i)) & \subset & L^p(\dot{\Sigma}, \Hom^{0,1}(T \dot{\Sigma}, E)) \end{IEEEeqnarray*} for $i = 1,2$, and we have the following decompositions: \begin{IEEEeqnarray*}{rCls+x*} W^{1,p}(\dot{\Sigma}, E) & = & W^{1,p}(\dot{\Sigma}, E_1) \oplus W^{1,p}(\dot{\Sigma}, E_2), \\ L^p(\dot{\Sigma}, \Hom^{0,1}(T \dot{\Sigma}, E)) & = & L^p(\dot{\Sigma}, \Hom^{0,1}(T \dot{\Sigma}, E_1)) \oplus L^p(\dot{\Sigma}, \Hom^{0,1}(T \dot{\Sigma}, E_2)) \end{IEEEeqnarray*} We can write $\mathbf{D}$ with respect to these decompositions as a block matrix: \begin{IEEEeqnarray*}{c+x*} \mathbf{D} = \begin{bmatrix} \mathbf{D}_{11} & \mathbf{D}_{12} \\ \mathbf{D}_{21} & \mathbf{D}_{22} \end{bmatrix}. \end{IEEEeqnarray*} By \cite[Exercise 7.8]{wendlLecturesSymplecticField2016}, the diagonal terms $\mathbf{D}_{11}$ and $\mathbf{D}_{22}$ are real linear Cauchy--Riemann operators, while the off diagonal terms $\mathbf{D}_{12}$ and $\mathbf{D}_{21}$ are tensorial. \end{remark} Let $\mathbf{D}$ be a real linear Cauchy--Riemann operator and for every puncture $z \in \mathbf{z}$ let $\mathbf{A}_z$ be a nondegenerate asymptotic operator at $z$. By the Riemann-Roch theorem with punctures (\cref{thm:riemann roch with punctures}), $\mathbf{D}$ is a Fredholm operator. We now explain how to compute the Fredholm index of $\mathbf{D}$. Choose an asymptotic trivialization $\tau$ as in \cref{def:asymptotic trivialization}. First, recall that the \textbf{Euler characteristic} of $\dot{\Sigma}$ is given by $\chi(\dot{\Sigma}) = 2 - 2 g - \# \mathbf{z}$, where $g$ is the genus of $\Sigma$. \begin{definition}[{\cite[Definition 5.1]{wendlLecturesSymplecticField2016}}] \label{def:relative first chern number} Let $S$ be a compact oriented surface with boundary and $(E,J)$ be a complex vector bundle over $S$. Let $\tau$ be a complex trivialization of $E|_{\partial S}$. The \textbf{relative first Chern number} of $E$ with respect to $\tau$, denoted $c_1^{\tau}(E) \in \Z$, is defined by the following properties. \begin{enumerate} \item If $E$ has complex rank $1$, then $c_1^{\tau}(E)$ is the signed count of zeros of a generic smooth section $\eta \colon S \longrightarrow E$ such that $\tau \circ \eta|_{\partial S} \colon \partial S \longrightarrow \C$ is constant. \item If $E_1$ and $E_2$ are complex vector bundles over $S$ with trivializations $\tau_1$ and $\tau_2$ over $\partial S$, then $c_1^{\tau_1 \oplus \tau_2}(E_1 \oplus E_2) = c_1^{\tau}(E_1) + c_1^{\tau}(E_2)$. \end{enumerate} \end{definition} The definition of relative first Chern number extends to the class of asymptotically Hermitian vector bundles over punctured surfaces. \begin{definition} The \textbf{Conley--Zehnder} index of an asymptotic operator $\mathbf{A}_z$ is given as follows. Let $(\mathbf{A}_z \eta)(t) = -J_0 \partial_t \eta(t) - S(t) \eta(t)$ be the expression of $\mathbf{A}_z$ with respect to $\tau$. Let $\Psi \colon [0,1] \longrightarrow \operatorname{Sp}(2n)$ be the unique path of symplectic matrices such that \begin{IEEEeqnarray*}{rCls+x*} \Psi(0) & = & \id_{\R^{2n}}, \\ \dot{\Psi}(t) & = & J_0 S(t) \Psi(t). \end{IEEEeqnarray*} Since $\mathbf{A}_z$ is nondegenerate, $\Psi$ is an element of $\operatorname{SP}(n)$. Finally, define $\conleyzehnder^{\tau}(\mathbf{A}_z) \coloneqq \conleyzehnder(\Psi)$. \end{definition} \begin{theorem}[Riemann-Roch, {\cite[Theorem 5.4]{wendlLecturesSymplecticField2016}}] \label{thm:riemann roch with punctures} The operator $\mathbf{D}$ is Fredholm and its (real) Fredholm index is given by \begin{IEEEeqnarray*}{c+x*} \operatorname{ind} \mathbf{D} = n \chi (\dot{\Sigma}) + 2 c_1^{\tau}(E) + \sum_{z \in \mathbf{z}^+} \conleyzehnder^{\tau}(\mathbf{A}_z) - \sum_{z \in \mathbf{z}^-} \conleyzehnder^{\tau}(\mathbf{A}_z). \end{IEEEeqnarray*} \end{theorem} For the rest of this section, we restrict ourselves to the case where $n = \operatorname{rank}_{\C} E = 1$. We retain the assumption that $\mathbf{D}$ is a real linear Cauchy--Riemann operator and $\mathbf{A}_{z}$ is a nondegenerate asymptotic operator for every puncture $z \in \mathbf{z}$. Our goal is to state a criterion that guarantees surjectivity of $\mathbf{D}$. This criterion depends on other topological quantities which we now define. For every $\lambda$ in the spectrum of $\mathbf{A}_z$, let $w^{\tau}(\lambda)$ be the winding number of any nontrivial section in the $\lambda$-eigenspace of $\mathbf{A}_z$ (computed with respect to the trivialization $\tau$). Define the \textbf{winding numbers} \begin{IEEEeqnarray*}{rClls+x*} \alpha_-^{\tau}(\mathbf{A}_z) & \coloneqq & \max & \{ w^{\tau}(\lambda) \mid \lambda < 0 \text{ is in the spectrum of }\mathbf{A}_z \}, \\ \alpha_+^{\tau}(\mathbf{A}_z) & \coloneqq & \min & \{ w^{\tau}(\lambda) \mid \lambda > 0 \text{ is in the spectrum of }\mathbf{A}_z \}. \end{IEEEeqnarray*} The \textbf{parity} (the reason for this name is Equation \eqref{eq:cz winding parity} below) and associated sets of even and odd punctures are given by \begin{IEEEeqnarray*}{rCls+x*} p(\mathbf{A}_{z}) & \coloneqq & \alpha_{+}^{\tau}(\mathbf{A}_z) - \alpha^{\tau}_{-}(\mathbf{A}_z) \in \{0,1\}, \\ \mathbf{z}_0 & \coloneqq & \{ z \in \mathbf{z} \mid p(\mathbf{A}_z) = 0 \}, \\ \mathbf{z}_1 & \coloneqq & \{ z \in \mathbf{z} \mid p(\mathbf{A}_z) = 1 \}. \end{IEEEeqnarray*} Finally, the \textbf{adjusted first Chern number} is given by \begin{IEEEeqnarray*}{c+x*} c_1(E,\mathbf{A}_{\mathbf{z}}) = c_1^{\tau}(E) + \sum_{z \in \mathbf{z}^+} \alpha_-^{\tau}(\mathbf{A}_z) - \sum_{z \in \mathbf{z}^-} \alpha_-^{\tau}(\mathbf{A}_z). \end{IEEEeqnarray*} These quantities satisfy the following equations. \begin{IEEEeqnarray}{rCls+x*} \conleyzehnder^{\tau}(\mathbf{A}_z) & = & 2 \alpha_{-}^{\tau}(\mathbf{A_z}) + p(\mathbf{A}_z) = 2 \alpha_{+}^{\tau}(\mathbf{A_z}) - p(\mathbf{A}_z), \plabel{eq:cz winding parity} \\ 2 c_1 (E,\mathbf{A}_{\mathbf{z}}) & = & \operatorname{ind} \mathbf{D} - 2 - 2g + \# \mathbf{z}_0. \plabel{eq:chern and index} \end{IEEEeqnarray} \begin{proposition}[{\cite[Proposition 2.2]{wendlAutomaticTransversalityOrbifolds2010}}] \phantomsection\label{prp:wen D surjective injective criterion} \begin{enumerate} \item[] \item If $\operatorname{ind} \mathbf{D} \leq 0$ and $c_1(E, \mathbf{A}_{\mathbf{z}}) < 0$ then $\mathbf{D}$ is injective. \item If $\operatorname{ind} \mathbf{D} \geq 0$ and $c_1(E, \mathbf{A}_{\mathbf{z}}) < \operatorname{ind} \mathbf{D}$ then $\mathbf{D}$ is surjective. \end{enumerate} \end{proposition} We will apply the proposition above to moduli spaces of punctured spheres which have no even punctures. The following lemma is just a restatement of the previous proposition in this simpler case. \begin{lemma} \label{lem:conditions for D surjective genus zero} Assume that $g = 0$ and $\# \mathbf{z}_0 = 0$. Then, \begin{enumerate} \item If $\operatorname{ind} \mathbf{D} \leq 0$ then $\mathbf{D}$ is injective. \item If $\operatorname{ind} \mathbf{D} \geq 0$ then $\mathbf{D}$ is surjective. \end{enumerate} \end{lemma} \begin{proof} By \cref{prp:wen D surjective injective criterion} and Equation \eqref{eq:chern and index}. \end{proof} We now wish to deal with the case where $\mathbf{D}$ is taken together with an evaluation map (see \cref{lem:D plus E is surjective} below). The tools we need to prove this result are explained in the following remark. \begin{remark} \label{rmk:formulas for xi in ker nonzero} Suppose that $\ker \mathbf{D} \neq \{0\}$. If $\xi \in \ker \mathbf{D} \setminus \{0\}$, it is possible to show that $\xi$ has only a finite number of zeros, all of positive order, i.e. if $w$ is a zero of $\xi$ then $\operatorname{ord}(\xi;w) > 0$. For every $z \in \mathbf{z}$, there is an \textbf{asymptotic winding number} $\operatorname{wind}_z^{\tau}(\xi) \in \Z$, which has the properties \begin{IEEEeqnarray*}{rCls+x*} z \in \mathbf{z}^+ & \Longrightarrow & \operatorname{wind}_z^{\tau}(\xi) \leq \alpha_-^{\tau}(\mathbf{A}_z), \\ z \in \mathbf{z}^- & \Longrightarrow & \operatorname{wind}_z^{\tau}(\xi) \geq \alpha_+^{\tau}(\mathbf{A}_z). \end{IEEEeqnarray*} Define the \textbf{asymptotic vanishing} of $\xi$, denoted $Z_{\infty}(\xi)$, and the \textbf{count of zeros}, denoted $Z(\xi)$, by \begin{IEEEeqnarray*}{rCls+x*} Z_{\infty}(\xi) & \coloneqq & \sum_{z \in \mathbf{z}^+} \p{}{1}{\alpha_-^{\tau}(\mathbf{A}_z) - \operatorname{wind}_z^{\tau}(\xi)} + \sum_{z \in \mathbf{z}^-} \p{}{1}{\operatorname{wind}_z^{\tau}(\xi) - \alpha_+^{\tau}(\mathbf{A}_z)} \in \Z_{\geq 0}, \\ Z(\xi) & \coloneqq & \sum_{w \in \xi^{-1}(0)} \operatorname{ord}(\xi;w) \in \Z_{\geq 0}. \end{IEEEeqnarray*} In this case, we have the formula (see \cite[Equation 2.7]{wendlAutomaticTransversalityOrbifolds2010}) \begin{IEEEeqnarray}{c} \plabel{eq:c1 and asy vanishing} c_1(E,\mathbf{A}_{\mathbf{z}}) = Z(\xi) + Z_{\infty}(\xi). \end{IEEEeqnarray} \end{remark} \begin{lemma} \label{lem:D plus E is surjective} Let $w \in \dot{\Sigma}$ be a point and $\mathbf{E} \colon W^{1,p}(\dot{\Sigma}, E) \longrightarrow E_w$ be the evaluation map at $w$, i.e. $\mathbf{E}(\xi) = \xi_w$. Assume that $g = 0$ and $\# \mathbf{z}_0 = 0$. If $\operatorname{ind} \mathbf{D} = 2$ then $\mathbf{D} \oplus \mathbf{E} \colon W^{1,p}(\dot{\Sigma}, E) \longrightarrow L^p(\dot{\Sigma}, \Hom^{0,1}(T \dot{\Sigma}, E)) \oplus E_w$ is surjective. \end{lemma} \begin{proof} It is enough to show that the maps \begin{IEEEeqnarray*}{rCls+x*} \mathbf{D} \colon W^{1,p}(\dot{\Sigma}, E) & \longrightarrow & L^p(\dot{\Sigma}, \Hom^{0,1}(T \dot{\Sigma}, E)), \\ \mathbf{E}|_{\ker \mathbf{D}} \colon \ker \mathbf{D} & \longrightarrow & E_w \end{IEEEeqnarray*} are surjective. By \cref{lem:conditions for D surjective genus zero}, $\mathbf{D}$ is surjective. Since $\dim \ker \mathbf{D} = \operatorname{ind} \mathbf{D} = 2$ and $\dim_{\R} E_w = 2$, the map $\mathbf{E}|_{\ker \mathbf{D}}$ is surjective if and only if it is injective. So, we show that $\ker(E|_{\ker \mathbf{D}}) = \ker \mathbf{E} \cap \ker \mathbf{D} = \{0\}$. For this, let $\xi \in \ker \mathbf{E} \cap \ker \mathbf{D}$ and assume by contradiction that $\xi \neq 0$. Consider the quantities defined in \cref{rmk:formulas for xi in ker nonzero}. We compute \begin{IEEEeqnarray*}{rCls+x*} 0 & = & \operatorname{ind} \mathbf{D} - 2 & \quad [\text{by assumption}] \\ & = & 2 c_1(E,\mathbf{A}_{\mathbf{z}}) & \quad [\text{by Equation \eqref{eq:chern and index}}] \\ & = & 2 Z(\xi) + 2 Z_{\infty}(\xi) & \quad [\text{by Equation \eqref{eq:c1 and asy vanishing}}] \\ & \geq & 0 & \quad [\text{by definition of $Z$ and $Z_{\infty}$}], \end{IEEEeqnarray*} which implies that $Z(\xi) = 0$. This gives the desired contradiction, because \begin{IEEEeqnarray*}{rCls+x*} 0 & = & Z(\xi) & \quad [\text{by the previous computation}] \\ & = & \sum_{z \in \xi^{-1}(0)} \operatorname{ord}(\xi;z) & \quad [\text{by definition of $Z$}] \\ & \geq & \operatorname{ord}(\xi;w) & \quad [\text{since $\xi_w = \mathbf{E}(\xi) = 0$}] \\ & > & 0 & \quad [\text{by \cref{rmk:formulas for xi in ker nonzero}}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \section{Cauchy--Riemann operators as sections} \label{sec:functional analytic setup} In this section, we phrase the notion of a map $u \colon \dot{\Sigma} \longrightarrow \hat{X}$ being holomorphic in terms of $u$ being in the zero set of a section $\overline{\partial} \colon \mathcal{T} \times \mathcal{B} \longrightarrow \mathcal{E}$ (see \cref{def:bundle for cr op,def:cauchy riemann operator}). The goal of this point of view is that we can then think of moduli spaces of holomorphic curves in $\hat{X}$ as the zero set of the section $\overline{\partial}$. To see if such a moduli space is regular near $(j, u)$, one needs to consider the linearization $\mathbf{L}_{(j,u)}$ of $\overline{\partial}$ at $(j,u)$ (see \cref{def:linearized cr op}), and prove that it is surjective. We will see that a suitable restriction of $\mathbf{L}_{(j,u)}$ is a real linear Cauchy--Riemann operator (\cref{lem:D is a rlcro}), and therefore we can use the theory from the last section to show that $\mathbf{L}_{(j,u)}$ is surjective in some particular cases (\cref{lem:Du is surjective case n is 1,lem:DX surj implies DY surj}). \begin{definition} \label{def:asymptotic marker} Let $(\Sigma,j)$ be a Riemann surface and $z \in \Sigma$ be a puncture. An \textbf{asymptotic marker} at $z$ is a half-line $v \in (T_z \Sigma \setminus \{0\}) / \R_{> 0}$. \end{definition} \begin{definition} \label{def:moduli space of curves with asymtotic marker} Let $(X, \omega, \lambda)$ be a symplectic cobordism, $J \in \mathcal{J}(X)$ be a cylindrical almost complex structure on $\hat{X}$, and $\Gamma^{\pm} = (\gamma^{\pm}_1, \ldots, \gamma^{\pm}_{p^{\pm}})$ be tuples of Reeb orbits on $\partial^{\pm} X$. Let $\mathcal{M}^{\$,J}_X(\Gamma^+, \Gamma^-)$ be the moduli space of (equivalence classes of) tuples \begin{IEEEeqnarray*}{c+x*} (\Sigma, j, \mathbf{z}, \mathbf{v}, u), \qquad \mathbf{z} = \mathbf{z}^+ \cup \mathbf{z}^-, \qquad \mathbf{v} = \mathbf{v}^+ \cup \mathbf{v}^{-} \end{IEEEeqnarray*} where $(\Sigma, j, \mathbf{z}, u)$ is as in \cref{def:asy cyl holomorphic curve} and $\mathbf{v}^{\pm} = \{v^{\pm}_1, \ldots, v^{\pm}_{p^{\pm}}\}$ is a set of asymptotic markers on $\mathbf{z}^{\pm} = \{z^{\pm}_1, \ldots, z^{\pm}_{p^{\pm}}\}$ such that \begin{IEEEeqnarray*}{c+x*} \lim_{t \to 0^+} u(c(t)) = (\pm \infty, \gamma^{\pm}_i(0)) \end{IEEEeqnarray*} for every $i = 1, \ldots, p^{\pm}$ and every path $c$ in $\Sigma$ with $c(t) = z^{\pm}_i$ and $\dot{c}(0) = v^{\pm}_i$. Two such tuples $(\Sigma_0, j_0, \mathbf{z}_0, \mathbf{v}_0, u_0)$ and $(\Sigma_1, j_1, \mathbf{z}_1, \mathbf{v}_1, u_1)$ are equivalent if there exists a biholomorphism $\phi \colon \Sigma_0 \longrightarrow \Sigma_1$ such that \begin{IEEEeqnarray*}{rCls+x*} u_1 \circ \phi & = & u_0, \\ \phi(z^{\pm}_{0,i}) & = & z^{\pm}_{1,i}, \\ \dv \phi (z^{\pm}_{0,i}) v_{0,i}^{\pm} & = & v_{1,i}^{\pm}. \end{IEEEeqnarray*} \end{definition} \begin{remark} \label{rmk:moduli space may assume sigma is sphere} Consider the sphere $S^2$, without any specified almost complex structure. Let $\mathbf{z}^{\pm} = \{z^{\pm}_1, \ldots, z^{\pm}_{p^{\pm}}\} \subset S^2$ be sets of punctures and $\mathbf{v}^{\pm} = \{v^{\pm}_1, \ldots, v^{\pm}_{p^{\pm}}\}$ be corresponding sets of asymptotic markers. Then, \begin{IEEEeqnarray*}{c+x*} \mathcal{M}^{\$, J}_{X}(\Gamma^+, \Gamma^-) \cong \left\{ (j, u) \ \middle\vert \begin{array}{l} j \text{ is an almost complex structure on }S^2, \\ u \colon (\dot{S}^2, j) \longrightarrow (\hat{X}, J) \text{ is as in \cref{def:asy cyl holomorphic curve}} \end{array} \right\} / \sim, \end{IEEEeqnarray*} where two tuples $(j_0, u_0)$ and $(j_1, u_1)$ are equivalent if there exists a biholomorphism $\phi \colon (S^2, j_0) \longrightarrow (S^2, j_1)$ such that \begin{IEEEeqnarray*}{rCls+x*} u_1 \circ \phi & = & u_0, \\ \phi(z^{\pm}_{i}) & = & z^{\pm}_{i}, \\ \dv \phi (z^{\pm}_{i}) v_{i}^{\pm} & = & v_{i}^{\pm}. \end{IEEEeqnarray*} \end{remark} \begin{remark} \label{rmk:counts of moduli spaces with or without asy markers} There is a surjective map $\pi^{\$} \colon \mathcal{M}^{\$, J}_{X}(\Gamma^+, \Gamma^-) \longrightarrow \mathcal{M}^{J}_{X}(\Gamma^+, \Gamma^-)$ given by forgetting the asymptotic markers. By \cite[Proposition 11.1]{wendlLecturesSymplecticField2016}, for every $u \in \mathcal{M}^{J}_{X}(\Gamma^+, \Gamma^-)$ the preimage $(\pi^{\$})^{-1}(u)$ contains exactly \begin{IEEEeqnarray*}{c+x*} \frac{\bigproduct_{\gamma \in \Gamma^+ \cup \Gamma^-} m(\gamma)}{|\operatorname{Aut}(u)|} \end{IEEEeqnarray*} elements, where $m(\gamma)$ is the multiplicity of the Reeb orbit $\gamma$ and $\operatorname{Aut}(u)$ is the automorphism group of $u = (\Sigma, j, \mathbf{z}, u)$, i.e. an element of $\operatorname{Aut}(u)$ is a biholomorphism $\phi \colon \Sigma \longrightarrow \Sigma$ such that $u \circ \phi = u$ and $\phi(z_i^{\pm}) = z_i^{\pm}$ for every $i$. \end{remark} We will work with the following assumptions. Let $\Sigma = S^2$, (without any specified almost complex structure). Let $\mathbf{z} = \{z_1, \ldots, z_p\} \subset \Sigma$ be a set of punctures and $\mathbf{v} = \{v_1, \ldots, v_p\}$ be a corresponding set of asymptotic markers. Assume also that we have a set $\mathbf{j} = \{j_1, \ldots, j_p\}$, where $j_i$ is an almost complex structure defined on a neighbourhood of $z_i$ for every $i = 1, \ldots,p$. For every $i$, there are cylindrical coordinates $(s, t)$ on $\dot{\Sigma}$ near $z_i$ as in \cref{def:punctures asy markers cyl ends}, with the additional property that $v_i$ agrees with the direction $t = 0$. We will also assume that $\mathcal{T} \subset \mathcal{J}(\Sigma)$ is a Teichmüller slice as in \cite[Section 3.1]{wendlAutomaticTransversalityOrbifolds2010}, where $\mathcal{J}(\Sigma)$ denotes the set of almost complex structures on $\Sigma = S^2$. Finally, let $(X, \lambda)$ be a nondegenerate Liouville domain of dimension $2n$ and $J \in \mathcal{J}(X)$ be an admissible almost complex structure on $\hat{X}$. \begin{definition} Let $\gamma$ be an unparametrized simple Reeb orbit of $\partial X$. An \textbf{admissible parametrization} near $\gamma$ is a diffeomorphism $\phi \colon S^1 \times D^{2n-2} \longrightarrow O$, where $O \subset \partial X$ is an open neighbourhood of $\gamma$ and \begin{IEEEeqnarray*}{c+x*} D^{2n-2} \coloneqq \{(z^1,\ldots,z^{n-1}) \in \C^{n-1} \mid |z^1| < 1, \ldots, |z^{n-1}| < 1 \} \end{IEEEeqnarray*} is the polydisk, such that $t \longmapsto \phi(t,0)$ is a parametrization of $\gamma$. In this case, we denote by $(\vartheta, \zeta) = \phi^{-1} \colon O \longrightarrow S^1 \times D^{2n-2}$ the coordinates near $\gamma$. \end{definition} Let $\Gamma = (\gamma_{1},\ldots,\gamma_{p})$ be a tuple of (unparametrized) Reeb orbits in $\partial X$. Denote by $m_i$ the multiplicity of $\gamma_i$ and by $T_i$ the period of the simple Reeb orbit underlying $\gamma_i$ (so, the period of $\gamma_i$ is $m_i T_i$). For every $i = 1,\ldots,p $, choose once and for all an admissible parametrization $\phi_i \colon S^1 \times D^{2n-2} \longrightarrow O_i$ near the simple Reeb orbit underlying $\gamma_i$. \begin{definition} \label{def:bundle for cr op} We define a vector bundle $\pi \colon \mathcal{E} \longrightarrow \mathcal{T} \times \mathcal{B}$ as follows. Let $\mathcal{B}$ be the set of maps $u \colon \dot{\Sigma} \longrightarrow \hat{X}$ of class $W^{k,p}_{\mathrm{loc}}$ satisfying the following property for every puncture $z_i$. Write $u$ with respect to the cylindrical coordinates $(s,t)$ defined from $(z_i, v_i)$. First, we require that $u(s,t) \in \R_{\geq 0} \times O_i$ for $s$ big enough. Write $u$ with respect to the coordinates $(\vartheta, \zeta)$ near $\gamma$ on the target and cylindrical coordinates $(s,t)$ on the domain: \begin{IEEEeqnarray*}{rCls+x*} u(s,t) & = & (\pi_{\R} \circ u(s,t), \pi_{\partial X} \circ u (s,t)) \\ & = & (\pi_{\R} \circ u(s,t), \vartheta(s,t), \zeta(s,t)). \end{IEEEeqnarray*} Finally, we require that there exists $a \in \R$ such that the map \begin{IEEEeqnarray*}{c+x*} (s,t) \longmapsto (\pi_{\R} \circ u(s,t), \vartheta(s,t), \zeta(s,t)) - (m_i T_i s + a, m_i T_i t, 0) \end{IEEEeqnarray*} is of class $W^{k,p,\delta}$. The fibre, total space, projection and zero section are defined by \begin{IEEEeqnarray*}{rCls+x*} \mathcal{E}_{(j,u)} & \coloneqq & W^{k-1,p,\delta}(\Hom^{0,1}((T \dot{\Sigma}, j), (u^* T \hat{X}, J))), \quad \text{for every } (j,u) \in \mathcal{T} \times \mathcal{B}, \\ \mathcal{E} & \coloneqq & \bigcoproduct_{(j,u) \in \mathcal{T} \times \mathcal{B}} \mathcal{E}_{(j,u)} = \{ (j, u, \xi) \mid (j,u) \in \mathcal{T} \times \mathcal{B}, \, \xi \in \mathcal{E}_{(j,u)} \}, \\ \pi(j,u, \eta) & \coloneqq & (j,u), \\ z(j,u) & \coloneqq & (j,u,0). \end{IEEEeqnarray*} \end{definition} \begin{definition} \label{def:cauchy riemann operator} The \textbf{Cauchy--Riemann operators} are the sections \begin{IEEEeqnarray*}{rClCrCl} \overline{\partial}_j \colon \mathcal{B} & \longrightarrow & \mathcal{E}, & \qquad & \overline{\partial}_j(u) & \coloneqq & \frac{1}{2} (T u + J \circ Tu \circ j) \in \mathcal{E}_{(j,u)}, \\ \overline{\partial} \colon \mathcal{T} \times \mathcal{B} & \longrightarrow & \mathcal{E}, & \qquad & \overline{\partial}(j,u) & \coloneqq & \overline{\partial}_j(u). \end{IEEEeqnarray*} \end{definition} \begin{definition} \label{def:linearized cr op} Let $(j,u) \in \mathcal{T} \times \mathcal{B}$ be such that $\overline{\partial}(j ,u) = 0$. Define the \textbf{vertical projection} \begin{IEEEeqnarray*}{c+x*} P_{(j,u)} \colon T_{(j,u,0)} \mathcal{E} \longrightarrow \mathcal{E}_{(j,u)}, \qquad P_{(j,u)} (\eta) \coloneqq \eta - \dv (z \circ \pi)(j,u,0) \eta. \end{IEEEeqnarray*} The \textbf{linearized Cauchy--Riemann operators} are the linear maps \begin{IEEEeqnarray*}{rCls+x*} \mathbf{D}_{(j,u)} & \coloneqq & P_{(j,u)} \circ \dv (\overline{\partial}_j)(u) \colon T_u \mathcal{B} \longrightarrow \mathcal{E}_{(j,u)}, \\ \mathbf{L}_{(j,u)} & \coloneqq & P_{(j,u)} \circ \dv (\overline{\partial})(j,u) \colon T_j \mathcal{T} \oplus T_u \mathcal{B} \longrightarrow \mathcal{E}_{(j,u)}. \end{IEEEeqnarray*} Define also the restriction \begin{IEEEeqnarray*}{c+x*} \mathbf{F}_{(j,u)} \coloneqq \mathbf{L}_{(j,u)}|_{T_j \mathcal{T}} \colon T_j \mathcal{T} \longrightarrow \mathcal{E}_{(j,u)}. \end{IEEEeqnarray*} \end{definition} \begin{remark} \label{rmk:tangent of base of bundle} Choose a smooth function $\beta \colon \R \longrightarrow [0,1]$ such that $\beta(s) = 0$ if $s < 0$, $\beta(s) = 1$ if $s > 1$ and $0 \leq \beta'(s) \leq 2$. Consider the Liouville vector field $\hat{Z}^{X} \in \mathfrak{X}(\hat{X})$ and the Reeb vector field $R^{\partial X} \in \mathfrak{X}(\partial X)$. For every puncture $z$, let $(s,t)$ be the cylindrical coordinates near $z$ and define sections \begin{IEEEeqnarray*}{rClCrCl} \hat{Z}^X_z & \in & \Gamma(u^* T \hat{X}), & \quad & \hat{Z}^X_z(s,t) & = & \beta(s) \hat{Z}^X(u(s,t)), \\ R^{\partial X}_z & \in & \Gamma(u^* T \hat{X}), & \quad & R^{\partial X}_z(s,t) & = & \beta(s) R^{\partial X}(u(s,t)). \end{IEEEeqnarray*} Denote $V = \bigoplus_{i=1}^{p} \spn \{\hat{Z}^X_{z_i}, R^{\partial X}_{z_i}\}$. Then, the tangent space of $\mathcal{B}$ is given by \begin{IEEEeqnarray*}{c+x*} T_u \mathcal{B} = V \oplus W^{k,p,\delta}(\dot{\Sigma}, u^* T \hat{X}). \end{IEEEeqnarray*} \end{remark} \begin{definition} \label{def:conjugate and restriction operators} Let $(j,u) \in \mathcal{T} \times \mathcal{B}$ be such that $\overline{\partial}(j,u) = 0$ and consider the linearized Cauchy--Riemann operator $\mathbf{D}_{(j,u)}$. Choose a smooth function $f \colon \dot{\Sigma} \longrightarrow \R$ such that $f(s,t) = \delta s$ on every cylindrical end of $\dot{\Sigma}$. Define the \textbf{restriction} of $\mathbf{D}_{(j,u)}$, denoted $\mathbf{D}_{\delta}$, and the \textbf{conjugation} of $\mathbf{D}_{(j,u)}$, denoted $\mathbf{D}_0$, to be the unique maps such that the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} T_u \mathcal{B} \ar[d, swap, "\mathbf{D}_{(j,u)}"] & W^{k,p,\delta}(u^* T \hat{X}) \ar[d, "\mathbf{D}_{\delta}"] \ar[l, hook'] \ar[r, hook, two heads, "\xi \mapsto e^f \xi"] & W^{k,p}(u^* T \hat{X}) \ar[d, "\mathbf{D}_0"] \\ \mathcal{E}_{(j,u)} \ar[r, equals] & W^{k-1,p,\delta}(\Hom^{0,1}(T \dot{\Sigma}, u^* T \hat{X})) \ar[r, hook, two heads, swap, "\eta \mapsto e^f \eta"] & W^{k-1,p}(\Hom^{0,1}(T \dot{\Sigma}, u^* T \hat{X})) \end{tikzcd} \end{IEEEeqnarray*} commutes. \end{definition} \begin{lemma} \label{lem:D is a rlcro} The maps $\mathbf{D}_\delta$ and $\mathbf{D}_0$ are real linear Cauchy--Riemann operators. \end{lemma} \begin{proof} By \cite[Proposition 3.1.1]{mcduffHolomorphicCurvesSymplectic2012}, the map $\mathbf{D}_{\delta}$ is given by the equation \begin{IEEEeqnarray*}{c+x*} \mathbf{D}_{\delta} \xi = \frac{1}{2} \p{}{}{\nabla \xi + J(u) \nabla \xi \circ j} - \frac{1}{2} J(u) (\nabla_{\xi} J)(u) \partial(u), \end{IEEEeqnarray*} where $\nabla$ is the Levi-Civita connection on $\hat{X}$ associated to the Riemannian metric determined by $J$ and $\edv \hat{\lambda}$. Since $\nabla \colon \mathfrak{X}(\Sigma) \times \Gamma(u^* T \hat{X}) \longrightarrow \Gamma(u^* T \hat{X})$ satisfies the Leibniz rule with respect to the $\Gamma(u^* T \hat{X})$ argument, $\mathbf{D}_{\delta}$ is a real linear Cauchy--Riemann operator. We show that $\mathbf{D}_0$ satisfies the Leibniz rule. \begin{IEEEeqnarray*}{rCls+x*} \mathbf{D}_0 (g \xi) & = & e^f \mathbf{D}_{\delta} (e^{-f} g \xi) & \quad [\text{by definition of $\mathbf{D}_{\delta}$}] \\ & = & g e^f \mathbf{D}_{\delta} (e^{-f} \xi) + \xi \otimes \overline{\partial} g & \quad [\text{$\mathbf{D}_{\delta}$ obeys the Leibniz rule}] \\ & = & g \mathbf{D}_{0} (\xi) + \xi \otimes \overline{\partial} g & \quad [\text{by definition of $\mathbf{D}_{\delta}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:Du is surjective case n is 1} If $n=1$ then $\mathbf{L}_{(j,u)}$ is surjective. \end{lemma} \begin{proof} Let $\tau_1$ be a global complex trivialization of $u^* T \hat{X}$ extending to an asymptotic unitary trivialization near the punctures. Let $\tau_2$ be the unitary trivialization of $u^* T \hat{X}$ near the punctures which is induced from the decomposition $T_{(r,x)}(\R \times \partial X) = \p{<}{}{\partial_r} \oplus \p{<}{}{R^{\partial X}_x}$. It is shown in the proof of \cite[Lemma 7.10]{wendlLecturesSymplecticField2016} that the operator $\mathbf{D}_0$ is asymptotic at $z_i$ to $- J \partial_t + \delta$, which is nondegenerate and has Conley--Zehnder index $\conleyzehnder^{\tau_2}(- J \partial_t + \delta) = -1$. Therefore, every $z_i$ is an odd puncture and $\# \mathbf{z}_0 = 0$. We show that $c_1^{\tau_2}(u^* T \hat{X}) = \sum_{i=1}^{p} m_i$, where $m_i$ is the multiplicity of the asymptotic Reeb orbit $\gamma_i$: \begin{IEEEeqnarray*}{rCls+x*} c_1^{\tau_2}(u^* T \hat{X}) & = & c_1^{\tau_1}(u^* T \hat{X}) + \sum_{i=1}^{p} \deg(\tau_1|_{E_{z_i}} \circ (\tau_2|_{E_{z_i}})^{-1}) & \quad [\text{by \cite[Exercise 5.3]{wendlLecturesSymplecticField2016}}] \\ & = & \sum_{i=1}^{p} \deg(\tau_1|_{E_{z_i}} \circ (\tau_2|_{E_{z_i}})^{-1}) & \quad [\text{by \cref{def:relative first chern number}}] \\ & = & \sum_{i=1}^{p} m_i, \end{IEEEeqnarray*} where in the last equality we have used the fact that if $(s,t)$ are the cylindrical coordinates near $z_i$, then for $s$ large enough the map $t \longmapsto \tau_1|_{u(s,t)} \circ (\tau_2|_{u(s,t)})^{-1}$ winds around the origin $m_i$ times. We show that $\operatorname{ind} \mathbf{D}_0 \geq 2$. \begin{IEEEeqnarray*}{rCls+x*} \operatorname{ind} \mathbf{D}_0 & = & n \chi(\dot{\Sigma}) + 2 c_1^{\tau_2}(u^* T \hat{X}) + \sum_{i=1}^{p} \conleyzehnder^{\tau_2}(- J \partial_t + \delta) & \quad [\text{by \cref{thm:riemann roch with punctures}}] \\ & = & 2 + 2 \sum_{i=1}^{p} (m_i - 1) & \quad [\text{since $n = 1$ and $g = 0$}] \\ & \geq & 2 & \quad [\text{since $m_i \geq 1$ for every $i$}]. \end{IEEEeqnarray*} By \cref{lem:conditions for D surjective genus zero}, this implies that $\mathbf{D}_0$ is surjective. By \cref{def:conjugate and restriction operators}, the operator $\mathbf{D}_{(j,u)}$ is also surjective. Therefore, $\mathbf{L}_{(j,u)} = \mathbf{F}_{(j,u)} + \mathbf{D}_{(j,u)}$ is also surjective. \end{proof} From now until the end of this section, let $(X, \lambda^X)$ be a Liouville domain of dimension $2n$ and $(Y, \lambda^Y)$ be a Liouville domain of dimension $2n + 2$ such that \begin{enumerate} \item $X \subset Y$ and $\partial X \subset \partial Y$; \item the inclusion $\iota \colon X \longrightarrow Y$ is a Liouville embedding; \item if $x \in X$ then $Z_x^{X} = Z_x^{Y}$; \item if $x \in \partial X$ then $R_x^{\partial X} = R^{\partial Y}_x$. \end{enumerate} In this case, we have an inclusion of completions $\hat{X} \subset \hat{Y}$ as sets. By assumption, $Z^X$ is $\iota$-related to $Z^Y$, which implies that there is a map $\hat{\iota} \colon \hat{X} \longrightarrow \hat{Y}$ on the level of completions. Since in this case $\hat{X} \subset \hat{Y}$ and by \cref{def:embedding on completions coming from Liouville embedding}, $\hat{\iota}$ is the inclusion. Assume that $J^X \in \mathcal{J}(X)$ and $J^Y \in \mathcal{J}(Y)$ are almost complex structures on $\hat{X}$ and $\hat{Y}$ respectively, such that $\hat{\iota} \colon \hat{X} \longrightarrow \hat{Y}$ is holomorphic. As before, let $\Gamma = (\gamma_{1},\ldots,\gamma_{p})$ be a tuple of unparametrized Reeb orbits in $\partial X$. Notice that each $\gamma_i$ can also be seen as a Reeb orbit in $\partial Y$. For every $i = 1,\ldots,p$, choose once and for all admissible parametrizations $\phi_i^X \colon S^1 \times D^{2n-2} \longrightarrow O_i^X$ and $\phi_i^Y \colon S^1 \times D^{2n} \longrightarrow O_i^Y$ near $\gamma_i$ with the property that the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} S^1 \times D^{2n - 2} \ar[r, hook, two heads, "\phi^X_i"] \ar[d, hook] & O^X_i \ar[r, hook] \ar[d, hook, dashed, "\exists !"] & \partial X \ar[d, hook, "\iota_{\partial Y, \partial X}"] \\ S^1 \times D^{2n} \ar[r, hook, two heads, "\phi^Y_i"] & O^Y_i \ar[r, hook] & \partial Y \end{tikzcd} \end{IEEEeqnarray*} commutes. We will consider the bundle of \cref{def:bundle for cr op} as well as the Cauchy--Riemann operator and its linearization for both $X$ and $Y$. We will use the notation \begin{IEEEeqnarray*}{rClCrClCrCl} \pi^X \colon \mathcal{E}X & \longrightarrow & \mathcal{T} \times \mathcal{B}X, & \qquad & \overline{\partial}\vphantom{\partial}^X \colon \mathcal{T} \times \mathcal{B}X & \longrightarrow & \mathcal{E} X, & \qquad & \mathbf{L}^X_{(j,u)} \colon T_j \mathcal{T} \oplus T_u \mathcal{B} X & \longrightarrow & \mathcal{E}_{(j,u)} X, \\ \pi^Y \colon \mathcal{E}Y & \longrightarrow & \mathcal{T} \times \mathcal{B}Y, & \qquad & \overline{\partial}\vphantom{\partial}^Y \colon \mathcal{T} \times \mathcal{B}Y & \longrightarrow & \mathcal{E} Y, & \qquad & \mathbf{L}^Y_{(j,w)} \colon T_j \mathcal{T} \oplus T_w \mathcal{B} Y & \longrightarrow & \mathcal{E}_{(j,w)} Y \end{IEEEeqnarray*} to distinguish the bundles and maps for $X$ and $Y$. Define maps \begin{IEEEeqnarray*}{rClCrCl} \mathcal{B}\iota \colon \mathcal{B} X & \longrightarrow & \mathcal{B}Y, & \quad & \mathcal{B}\iota(u) & \coloneqq & \hat{\iota} \circ u, \\ \mathcal{E}\iota \colon \mathcal{E} X & \longrightarrow & \mathcal{E}Y, & \quad & \mathcal{E}\iota(j,u,\eta) & \coloneqq & (j, \hat{\iota} \circ u, T \hat{\iota} \circ \eta). \end{IEEEeqnarray*} Then, the diagrams \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \mathcal{E}X \ar[r, "\pi^X"] \ar[d, swap, "\mathcal{E}\iota"] & \mathcal{T} \times \mathcal{B}X \ar[d, "\id_{\mathcal{T}} \times \mathcal{B}\iota"] & & \mathcal{T} \times \mathcal{B}X \ar[d, swap, "\id_{\mathcal{T}} \times \mathcal{B}\iota"] \ar[r, "z^X"] & \mathcal{E}X \ar[d, "\mathcal{E}\iota"] \\ \mathcal{E}Y \ar[r, swap, "\pi^Y"] & \mathcal{T} \times \mathcal{B}Y & & \mathcal{T} \times \mathcal{B}Y \ar[r, swap, "z^Y"] & \mathcal{E}Y \\ \mathcal{T} \times \mathcal{B}X \ar[r, "\overline{\partial}\vphantom{\partial}^X"] \ar[d, swap, "\id_{\mathcal{T}} \times \mathcal{B}\iota"] & \mathcal{E}X \ar[d, "\mathcal{E}\iota"] & & (z^X)^* T \mathcal{E} X \ar[r, "P^X"] \ar[d, swap, "T \mathcal{E} \iota"] & \mathcal{E} X \ar[d, "\mathcal{E} \iota"] \\ \mathcal{T} \times \mathcal{B}Y \ar[r, swap, "\overline{\partial}\vphantom{\partial}^Y"] & \mathcal{E}Y & & (z^Y)^* T \mathcal{E} Y \ar[r, swap, "P^Y"] & \mathcal{E} Y \end{tikzcd} \end{IEEEeqnarray*} commute. By the chain rule, the diagram \begin{IEEEeqnarray}{c+x*} \plabel{eq:diag naturality of lcro} \begin{tikzcd} T_u \mathcal{B} X \ar[rr, bend left = 40, "\mathbf{D}^X_{(j,u)}"] \ar[r, "\dv \overline{\partial}\vphantom{\partial}^X_j(u)"] \ar[d, swap, "\dv(\mathcal{B} \iota)(u)"] & T_{(j,u,0)} \mathcal{E} X \ar[r, "P_{(j,u)}^X"] \ar[d, "\dv(\mathcal{E}\iota)(\overline{\partial}\vphantom{\partial}^X_j(u))"] & \mathcal{E}_{(j,u)} X \ar[d, "\mathcal{E}_{(j,u)} \iota"] \\ T_{\hat{\iota} \circ u} \mathcal{B} Y \ar[rr, swap, bend right = 40, "\mathbf{D}^Y_{(j,\hat{\iota} \circ u)}"] \ar[r, swap, "\dv \overline{\partial}\vphantom{\partial}^Y_j(\hat{\iota} \circ u)"] & T_{(j, \hat{\iota} \circ u, 0)} \mathcal{E} Y \ar[r, swap, "P^Y_{(j,\hat{\iota} \circ u)}"] & \mathcal{E}_{(j, \hat{\iota} \circ u)} Y \end{tikzcd} \end{IEEEeqnarray} is also commutative whenever $\overline{\partial}\vphantom{\partial}^X(j,u) = 0$. \begin{remark} \label{rmk:splittings of B and E} Consider the formula for the tangent space of $\mathcal{B}X$ from \cref{rmk:tangent of base of bundle}. By the assumptions on the Liouville domains $X$ and $Y$, we have that $V^X = V^Y$. Also, the diagrams \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} T_u \mathcal{B} X \ar[r, hook] & T_{u} \mathcal{B} Y & W^{k,p,\delta}(u^* (T \hat{X})^{\perp}) \ar[l, hook'] \ar[d, equals] \\ W^{k,p,\delta}(u^* T \hat{X}) \ar[r, hook] \ar[d, two heads, hook] \ar[u, hook] & W^{k,p,\delta}(u^* T \hat{Y}) \ar[u, hook] \ar[d, two heads, hook] & W^{k,p,\delta}(u^* (T \hat{X})^{\perp}) \ar[l, hook'] \ar[d, two heads, hook] \\ W^{k,p}(u^* T \hat{X}) \ar[r, hook] & W^{k,p}(u^* T \hat{Y}) & W^{k,p}(u^* (T \hat{X})^{\perp}) \ar[l, hook'] \end{tikzcd} \\ \begin{tikzcd} \mathcal{E}_{(j,u)} X \ar[r, hook] \ar[d, hook, two heads] & \mathcal{E}_{(j,u)} Y \ar[d, hook, two heads] & \Omega^{k-1,p,\delta}_j(u^*(T \hat{X})^{\perp}) \ar[d, hook, two heads] \ar[l, hook'] \\ \Omega^{k-1,p}_j(u^*T \hat{X}) \ar[r, hook] & \Omega^{k-1,p}_j(u^*T \hat{Y}) & \Omega^{k-1,p}_j(u^*(T \hat{X})^{\perp}) \ar[l, hook'] \end{tikzcd} \end{IEEEeqnarray*} commute, where for shortness we are using the notation \begin{IEEEeqnarray*}{c+x*} \Omega^{k,p}_{j}(E) = W^{k,p}(\Hom^{0,1}((T \dot{\Sigma}, j), (E, J))) \end{IEEEeqnarray*} for any complex vector bundle $(E, J) \longrightarrow \dot{\Sigma}$. In both diagrams, the middle term of every row is the direct sum of the left and right terms. In addition, the vertical maps in the middle of both diagrams are block diagonal when written with respect to these decompositions. \end{remark} \begin{definition} Let $z_0 \in \dot{\Sigma}$. Define the \textbf{evaluation map} \begin{IEEEeqnarray*}{rrCl} \operatorname{ev}^X \colon & \mathcal{B} X & \longrightarrow & \hat{X} \\ & u & \longmapsto & u(z_0) \end{IEEEeqnarray*} as well as its derivative $\mathbf{E}^X_u \coloneqq \dv (\operatorname{ev}^{X})(u) \colon T_u \mathcal{B} X \longrightarrow T_{u(z_0)} \hat{X}$. \end{definition} In the following lemma, we show that if a holomorphic curve $u$ in $X$ is regular (in $X$) then the corresponding holomorphic curve $\hat{\iota} \circ u$ in $Y$ is also regular. See also \cite[Proposition A.1]{mcduffSymplecticCapacitiesUnperturbed2022} for a similar result. \begin{lemma} \label{lem:DX surj implies DY surj} Let $u \in \mathcal{B}X$ be holomorphic and denote $\hat{\iota} \circ u \in \mathcal{B} Y$ simply by $u$. Assume that the normal Conley--Zehnder index of every asymptotic Reeb orbit $\gamma_i$ is $1$. \begin{enumerate} \item \label{lem:DX surj implies DY surj 1} If $\mathbf{L}_{(j,u)}^X$ is surjective then so is $\mathbf{L}^Y_{(j,u)}$. \item \label{lem:DX surj implies DY surj 2} If $\mathbf{L}_{(j,u)}^X \oplus \mathbf{E}^X_u$ is surjective then so is $\mathbf{L}^Y_{(j,u)} \oplus \mathbf{E}^Y_u$. \end{enumerate} \end{lemma} \begin{proof} Consider the decomposition $T_x \hat{Y} = T_x \hat{X} \oplus (T_x \hat{X})^{\perp}$ for $x \in \hat{X}$. Let $\tau$ be a global complex trivialization of $u^* T \hat{Y}$, extending to an asymptotic unitary trivialization near the punctures, and such that $\tau$ restricts to a trivialization of $u^* T \hat{X}$ and $u^* (T \hat{X})^{\perp}$. By \cref{rmk:splittings of B and E}, there are splittings \begin{IEEEeqnarray*}{rCls+x*} T_u \mathcal{B} Y & = & T_u \mathcal{B} X \oplus T_u^{\perp} \mathcal{B} X, \\ \mathcal{E}_{(j,u)} Y & = & \mathcal{E}_{(j,u)} X \oplus \mathcal{E}_{(j,u)}^{\perp} X. \end{IEEEeqnarray*} We can write the maps \begin{IEEEeqnarray*}{rCl} \mathbf{L}_{(j,u)}^Y & \colon & T_j \mathcal{T} \oplus T_u \mathcal{B} X \oplus T_u^{\perp} \mathcal{B} X \longrightarrow \mathcal{E}_{(j,u)} X \oplus \mathcal{E}_{(j,u)}^{\perp} X, \\ \mathbf{D}_{(j,u)}^Y & \colon & T_u \mathcal{B} X \oplus T_u^{\perp} \mathcal{B} X \longrightarrow \mathcal{E}_{(j,u)} X \oplus \mathcal{E}_{(j,u)}^{\perp} X, \\ \mathbf{L}_{(j,u)}^X & \colon & T_j \mathcal{T} \oplus T_u \mathcal{B} X \longrightarrow \mathcal{E}_{(j,u)} X, \\ \mathbf{F}_{(j,u)}^Y & \colon & T_j \mathcal{T} \longrightarrow \mathcal{E}_{(j,u)} X \oplus \mathcal{E}_{(j,u)}^{\perp} X, \\ \mathbf{E}_{u}^Y & \colon & T_u \mathcal{B} X \oplus T_u^{\perp} \mathcal{B} X \longrightarrow T_x \hat{X} \oplus (T_x \hat{X})^{\perp} \end{IEEEeqnarray*} as block matrices \begin{IEEEeqnarray}{rCl} \mathbf{L}_{(j,u)}^Y & = & \begin{bmatrix} \mathbf{F}^X_{(j,u)} & \mathbf{D}^X_{(j,u)} & \mathbf{D}^{TN}_{(j,u)} \\ 0 & 0 & \mathbf{D}^{NN}_{(j,u)} \end{bmatrix}, \plabel{eq:decomposition of cr ops 1}\\ \mathbf{D}_{(j,u)}^Y & = & \begin{bmatrix} \mathbf{D}^X_{(j,u)} & \mathbf{D}^{TN}_{(j,u)} \\ 0 & \mathbf{D}^{NN}_{(j,u)} \end{bmatrix}, \plabel{eq:decomposition of cr ops 2}\\ \mathbf{L}_{(j,u)}^X & = & \begin{bmatrix} \mathbf{F}^X_{(j,u)} & \mathbf{D}^X_{(j,u)} \end{bmatrix}, \plabel{eq:decomposition of cr ops 3}\\ \mathbf{F}_{(j,u)}^Y & = & \begin{bmatrix} \mathbf{F}^X_{(j,u)} \\ 0 \end{bmatrix}, \plabel{eq:decomposition of cr ops 4}\\ \mathbf{E}_{u}^Y & = & \begin{bmatrix} \mathbf{E}^X_{u} & 0 \\ 0 & \mathbf{E}^{NN}_{u} \end{bmatrix}, \plabel{eq:decomposition of cr ops 5} \end{IEEEeqnarray} where \eqref{eq:decomposition of cr ops 5} follows by definition of the evaluation map, \eqref{eq:decomposition of cr ops 4} is true since $\mathbf{F}^{Y}_{(j,u)}$ is given by the formula $\mathbf{F}^{Y}_{(j,u)}(y) = \frac{1}{2} (J \circ T u \circ y)$, \eqref{eq:decomposition of cr ops 2} follows because diagram \eqref{eq:diag naturality of lcro} commutes, and \eqref{eq:decomposition of cr ops 3} and \eqref{eq:decomposition of cr ops 1} then follow by \cref{def:linearized cr op}. Let $\mathbf{D}^{NN}_\delta$ be the restriction and $\mathbf{D}_0^{NN}$ be the conjugation of $\mathbf{D}^{NN}_{(j,u)}$ (as in \cref{def:conjugate and restriction operators}). Denote by $\mathbf{B}^{NN}_{\gamma_i}$ the asymptotic operator of $\mathbf{D}^{NN}_{\delta}$ at $z_i$. Then the asymptotic operator of $\mathbf{D}^{NN}_0$ at $z_i$ is $\mathbf{B}^{NN}_{\gamma_i} + \delta$, which by assumption has Conley--Zehnder index equal to $1$. We show that $\operatorname{ind} \mathbf{D}_0^{NN} = 2$. \begin{IEEEeqnarray*}{rCls+x*} \operatorname{ind} \mathbf{D}_0^{NN} & = & \chi(\dot{\Sigma}) + 2 c_1^{\tau}(u^* T \hat{X}) + \sum_{i=1}^{p} \conleyzehnder^{\tau}(\mathbf{B}^{NN}_{{\gamma_i}} + \delta) & \quad [\text{by \cref{thm:riemann roch with punctures}}] \\ & = & 2 & \quad [\text{since $\conleyzehnder^{\tau}(\mathbf{B}^{NN}_{{\gamma_i}} + \delta) = 1$}]. \end{IEEEeqnarray*} We prove \ref{lem:DX surj implies DY surj 1}. \begin{IEEEeqnarray*}{rCls+x*} \operatorname{ind} \mathbf{D}_0^{NN} = 2 & \Longrightarrow & \mathbf{D}_0^{NN} \text{ is surjective} & \quad [\text{by \cref{lem:conditions for D surjective genus zero}}] \\ & \Longrightarrow & \mathbf{D}_\delta^{NN} \text{ is surjective} & \quad [\text{$\mathbf{D}_0^{NN}$ and $\mathbf{D}_{\delta}^{NN}$ are conjugated}] \\ & \Longrightarrow & \mathbf{D}_{(j,u)}^{NN} \text{ is surjective} & \quad [\text{$\mathbf{D}_{\delta}^Y$ is a restriction of $\mathbf{D}_{(j,u)}^Y$}] \\ & \Longrightarrow & \mathbf{L}_{(j,u)}^Y \text{ is surjective} & \quad [\text{$\mathbf{L}_{(j,u)}^X$ is surjective by assumption}]. \end{IEEEeqnarray*} We prove \ref{lem:DX surj implies DY surj 2}. \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\operatorname{ind} \mathbf{D}_0^{NN} = 2}\\ \quad & \Longrightarrow & \mathbf{D}_0^{NN} \oplus \mathbf{E}_u^{NN} \text{ is surjective} & \quad [\text{by \cref{lem:D plus E is surjective}}] \\ & \Longrightarrow & \mathbf{D}_\delta^{NN} \oplus \mathbf{E}_u^{NN} \text{ is surjective} & \quad [\text{$\mathbf{D}_0^{NN} \oplus \mathbf{E}^{NN}_u$ and $\mathbf{D}_{\delta}^{NN} \oplus \mathbf{E}^{NN}_{u}$ are conjugated}] \\ & \Longrightarrow & \mathbf{D}_{(j,u)}^{NN} \oplus \mathbf{E}_u^{NN} \text{ is surjective} & \quad [\text{$\mathbf{D}_{\delta}^Y \oplus \mathbf{E}^{Y}_{u}$ is a restriction of $\mathbf{D}_{(j,u)}^Y \oplus \mathbf{E}^{Y}_u$}] \\ & \Longrightarrow & \mathbf{L}_{(j,u)}^Y \oplus \mathbf{E}_u^{Y} \text{ is surjective} & \quad [\text{$\mathbf{L}_{(j,u)}^X \oplus \mathbf{E}_u^{X}$ is surjective by assumption}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \section{Moduli spaces of curves in ellipsoids} \label{sec:augmentation map of an ellipsoid} We now use the techniques explained in the past two sections to compute the augmentation map of an ellipsoid (\cref{thm:augmentation is nonzero}). The proof of this theorem consists in an explicit count of curves in the ellipsoid satisfying a tangency constraint (\cref{lem:moduli spaces of ellipsoids have 1 element}) together with the fact that the moduli space of such curves is transversely cut out (\cref{prp:moduli spaces without point constraint are tco,prp:moduli spaces w point are tco,prp:moduli spaces w tangency are tco}). Therefore, the explicit count agrees with the virtual count. We now state the assumptions for this section. Let $a_1 < \cdots < a_n \in \R_{> 0}$ be rationally linearly independent and consider the ellipsoid $E(a_1,\ldots,a_n) \subset \C^n$. By \cite[Section 2.1]{guttSymplecticCapacitiesPositive2018}, $\partial E(a_1, \ldots, a_n)$ has exactly $n$ simple Reeb orbits $\gamma_1, \ldots, \gamma_n$, which satisfy \begin{IEEEeqnarray}{rCls+x*} \gamma_j(t) & = & \sqrt{\frac{a_j}{\pi}} e^{\frac{2 \pi i t}{a_j}} e_j, \\ \mathcal{A}(\gamma^m_j) & = & m a_j, \\ \conleyzehnder(\gamma^m_j) & = & n - 1 + 2 \sum_{i=1}^{n} \p{L}{2}{\frac{m a_j}{a_i}}, \plabel{eq:cz of reeb in ellipsoid} \end{IEEEeqnarray} where $\gamma_j \colon \R / a_j \Z \longrightarrow \partial E(a_1, \ldots, a_n)$ and $e_j$ is the $j$th vector of the canonical basis of $\C^n$ as a vector space over $\C$. For simplicity, for every $\ell = 1, \ldots, n$ denote $E_\ell = E(a_1,\ldots,a_\ell) \subset \C^\ell$. Notice that $\gamma_1$ is a Reeb orbit of $\partial E_1, \ldots, \partial E_n$. Define maps \begin{IEEEeqnarray*}{rClCrCl} \iota_{\ell} \colon \C^{\ell} & \longrightarrow & \C^{\ell + 1}, & \quad & \iota_\ell(z_1,\ldots,z_\ell) & \coloneqq & (z_1,\ldots,z_\ell,0) \\ h_{\ell} \colon \C^{\ell} & \longrightarrow & \C, & \quad & h_\ell(z_1,\ldots,z_\ell) & \coloneqq & z_1. \end{IEEEeqnarray*} The maps $\iota_{\ell} \colon E_\ell \longrightarrow E_{\ell+1}$ are Liouville embeddings satisfying the assumptions in \cref{sec:functional analytic setup}. Define also \begin{IEEEeqnarray*}{rCls+x*} x_\ell & \coloneqq & 0 \in \C^\ell, \\ D_{\ell} & \coloneqq & \{ (z_1,\ldots,z_\ell) \in \C^{\ell} \mid z_1 = 0 \} = h_{\ell}^{-1}(0). \end{IEEEeqnarray*} Choose an admissible almost complex structure $J_{\ell} \in \mathcal{J}(E_\ell, D_\ell)$ on $\hat{E}_{\ell}$ such that $J_{\ell}$ is the canonical almost complex structure of $\C^\ell$ near $0$. We assume that the almost complex structures are chosen in such a way that $\hat{\iota}_{\ell} \colon \hat{E}_{\ell} \longrightarrow \hat{E}_{\ell + 1}$ is holomorphic and also such that there exists a biholomorphism $\varphi \colon \hat{E}_1 \longrightarrow \C$ such that $\varphi(z) = z$ for $z$ near $0 \in \C$ (see \cref{lem:biholomorphism explicit} below). Let $m \in \Z_{\geq 1}$ and assume that $m a_1 < a_2 < \cdots < a_n$. Consider the sphere $S^2$, without any specified almost complex structure, with a puncture $z_1 \in S^2$ and an asymptotic marker $v_1 \in (T_{z_1} S^2 \setminus \{0\}) / \R_{> 0}$, and also a marked point $z_0 \in \dot{S}^2 = S^2 \setminus \{z_1\}$. For $k \in \Z_{\geq 0}$, denote\begin{IEEEeqnarray*}{lCls+x*} \mathcal{M}^{\ell,(k)}_{\mathrm{p}} & \coloneqq & \mathcal{M}_{E_{\ell}}^{\$, J_{\ell}}(\gamma^m_1)\p{<}{}{\mathcal{T}^{(k)}x_\ell}_{\mathrm{p}} \\ & \coloneqq & \left\{ (j, u) \ \middle\vert \begin{array}{l} j \text{ is an almost complex structure on }S^2, \\ u \colon (\dot{S}^2, j) \longrightarrow (\hat{E}_\ell, J_\ell) \text{ is as in \cref{def:asy cyl holomorphic curve}}, \\ u(z_0) = x_\ell \text{ and $u$ has contact order $k$ to $D_\ell$ at $x_\ell$} \end{array} \right\}. \end{IEEEeqnarray*} Here, the subscript $\mathrm{p}$ means that the moduli space consists of parametrized curves, i.e. we are not quotienting by biholomorphisms. Denote the moduli spaces of regular curves and of unparametrized curves by \begin{IEEEeqnarray*}{lCls+x*} \mathcal{M}^{\ell,(k)}_{\mathrm{p,reg}} & \coloneqq & \mathcal{M}_{E_{\ell}}^{\$, J_{\ell}}(\gamma^m_1)\p{<}{}{\mathcal{T}^{(k)}x_\ell}_{\mathrm{p,reg}}, \\ \mathcal{M}^{\ell,(k)} & \coloneqq & \mathcal{M}_{E_{\ell}}^{\$, J_{\ell}}(\gamma^m_1)\p{<}{}{\mathcal{T}^{(k)}x_\ell} \coloneqq \mathcal{M}^{\ell,(k)}_{\mathrm{p}} / \sim. \end{IEEEeqnarray*} Here, $\mathcal{M}^{\ell,(0)} \coloneqq \mathcal{M}_{E_{\ell}}^{\$, J_{\ell}}(\gamma^m_1)\p{<}{}{\mathcal{T}^{(0)}x_\ell} \coloneqq \mathcal{M}_{E_{\ell}}^{\$, J_{\ell}}(\gamma^m_1)$ and analogously for $\mathcal{M}^{\ell,(0)}_{\mathrm{p,reg}}$ and $\mathcal{M}^{\ell,(0)}_{\mathrm{p}}$. \begin{lemma} \phantomsection\label{lem:biholomorphism explicit} For any $a > 0$, there exists an almost complex structure $J$ on $\hat{B}(a)$ and a biholomorphism $\varphi \colon \hat{B}(a) \longrightarrow \C$ such that \begin{enumerate} \item \label{lem:biholomorphism explicit 1} $J$ is cylindrical on $\R_{\geq 0} \times \partial B(a)$; \item \label{lem:biholomorphism explicit 2} $J$ is the canonical almost complex structure of $\C$ near $0 \in B(a) \subset \C$; \item \label{lem:biholomorphism explicit 3} $\varphi(z) = z$ for $z$ near $0 \in B(a) \subset \C$. \end{enumerate} \end{lemma} \begin{proof} Choose $\rho_0 < 0$ and let $g \colon \R \longrightarrow \R_{>0}$ be a function such that $g(\rho) = a/4 \pi$ for $\rho \leq \rho_0$ and $g(\rho) = 1$ for $\rho \geq 0$. For $(\rho, w) \in \R \times \partial B(a)$, define \begin{IEEEeqnarray*}{rCls+x*} f(\rho) & \coloneqq & \exp \p{}{2}{\frac{\rho_0}{2} + \frac{2 \pi}{a} \int_{\rho_0}^{\rho} g(\sigma) \edv \sigma}, \\ J_{(\rho, w)} (\partial_{\rho}) & \coloneqq & g (\rho) R^{\partial B(a)}_{w}, \\ \varphi(\rho, w) & \coloneqq & f(\rho) w. \end{IEEEeqnarray*} Property \ref{lem:biholomorphism explicit 1} follows from the fact that $g(\rho) = 1$ for $\rho \geq 0$. Consider the Liouville vector field of $\C$, which is denoted by $Z$ and given by $Z(w) = w/2$. Let $\Phi \colon \R \times \partial B(a) \longrightarrow \C$ be the map given by $\Phi(\rho, w) = \phi^\rho_Z(w) = \exp(\rho/2) w$. By definition of completion, $\Phi|_{B(a) \setminus \{0\}} \colon B(a) \setminus \{0\} \longrightarrow \C$ is the inclusion. To prove property \ref{lem:biholomorphism explicit 3}, it suffices to show that $\varphi(\rho, w) = \Phi(\rho, w)$ for every $(\rho, w) \in \R_{\leq \rho_0} \times \partial B(a)$. For this, simply note that \begin{IEEEeqnarray*}{rCls+x*} f(\rho) & = & \exp \p{}{2}{\frac{\rho_0}{2} + \frac{2 \pi}{a} \int_{\rho_0}^{\rho} g(\sigma) \edv \sigma} & \quad [\text{by definition of $f$}] \\ & = & \exp \p{}{2}{\frac{\rho_0}{2} + \frac{2 \pi}{a} (\rho - \rho_0) \frac{a}{4 \pi} } & \quad [\text{$\rho \leq \rho_0$ implies $g(\rho) = a / 4 \pi$}] \\ & = & \exp \p{}{2}{\frac{\rho}{2}}. \end{IEEEeqnarray*} Therefore, $\varphi(z) = z$ for $z$ near $0 \in B(a) \subset \C$, and in particular $\varphi$ can be extended smoothly to a map $\varphi \colon \hat{B}(a) \longrightarrow \C$. We show that $\varphi$ is holomorphic. \begin{IEEEeqnarray*}{rCls+x*} j \circ \dv \varphi(\rho, w) (\partial_{\rho}) & = & j \p{}{2}{\pdv{}{\rho} \p{}{1}{f(\rho) |w|} \pdv{}{r}\Big|_{\varphi(\rho, w)}} & \quad [\text{by definition of $\varphi$}] \\ & = & \frac{2 \pi}{a} \, g(\rho) \, j \p{}{2}{ f(\rho) |w| \pdv{}{r}\Big|_{\varphi(\rho, w)}} & \quad [\text{by definition of $f$}] \\ & = & \frac{2 \pi}{a} \, g(\rho) \, j \p{}{2}{ |\varphi(\rho,w)| \pdv{}{r}\Big|_{\varphi(\rho, w)}} & \quad [\text{by definition of $\varphi$}] \\ & = & \frac{2 \pi}{a} \, g(\rho) \, \pdv{}{\theta}\Big|_{\varphi(\rho, w)} & \quad [\text{by definition of $j$}] \\ & = & g(\rho) \, \dv \varphi(\rho, w) (R^{\partial B(a)}_w) & \quad [\text{by \cite[Equation (2.2)]{guttSymplecticCapacitiesPositive2018}}] \\ & = & \dv \varphi(\rho, w) \circ J (\partial_{\rho}) & \quad [\text{by definition of $J$}], \end{IEEEeqnarray*} Where $(r, \theta)$ are the polar coordinates of $\C$. Since $\varphi$ is holomorphic and $\varphi$ is the identity near the origin, we conclude that $J$ is the canonical almost complex structure of $\C$ near the origin. In particular, $J$ can be extended smoothly to an almost complex structure on $\hat{B}(a)$, which proves \ref{lem:biholomorphism explicit 2}. Finally, we show that $\varphi$ is a diffeomorphism. For this, it suffices to show that $\Phi^{-1} \circ \varphi \colon \R \times \partial B(a) \longrightarrow \R \times \partial B(a)$ is a diffeomorphism. This map is given by $\Phi^{-1} \circ \varphi(\rho, w) = (2 \ln(f(\rho)), w)$. Since \begin{IEEEeqnarray*}{c+x*} \odv{}{\rho} (2 \ln(f(\rho))) = 2 \frac{f'(\rho)}{f(\rho)} = \frac{4 \pi}{a} g(\rho) > 0, \end{IEEEeqnarray*} $\varphi$ is a diffeomorphism. \end{proof} \begin{lemma} \label{lem:psi j} Let $\operatorname{inv} \colon \overline{\C} \longrightarrow \overline{\C}$ be the map given by $\operatorname{inv}(z) = 1/z$ and consider the vector $V \coloneqq \dv \operatorname{inv}(0) \partial_x \in T_{\infty} \overline{\C}$. For every $j \in \mathcal{T}$ there exists a unique biholomorphism $\psi_j \colon (\overline{\C}, j_0) \longrightarrow (S^2, j)$ such that \begin{IEEEeqnarray*}{c+x*} \psi_j(0) = z_0, \qquad \psi_j(\infty) = z_1, \qquad \dv \psi_j(\infty) V = \frac{v_1}{\| v_1 \|}, \end{IEEEeqnarray*} where $\| \cdot \|$ is the norm coming from the canonical Riemannian metric on $S^2$ as the sphere of radius $1$ in $\R^3$. \end{lemma} \begin{proof} By the uniformization theorem \cite[Theorem XII.0.1]{desaint-gervaisUniformizationRiemannSurfaces2016}, there exists a biholomorphism $\phi \colon (S^2, j) \longrightarrow (\overline{\C}, j_0)$. Since there exists a unique Möbius transformation $\psi_0 \colon (\overline{\C}, j_0) \longrightarrow (\overline{\C}, j_0)$ such that \begin{IEEEeqnarray*}{c+x*} \psi_0(0) = \phi(z_0), \qquad \psi_0(\infty) = \phi(z_1), \qquad \dv \psi_0 (\infty) V = \dv \phi(z_1) \frac{v_1}{\| v_1 \|}, \end{IEEEeqnarray*} the result follows. \end{proof} We will denote also by $\psi_j$ the restriction $\psi_j \colon (\C, j_0) \longrightarrow (S^2, j)$. \begin{lemma} \label{lem:u is a polynomial} If $(j,u) \in \mathcal{M}^{1,(0)}$ then $\varphi \circ u \circ \psi_j \colon \C \longrightarrow \C$ is a polynomial of degree $m$. \end{lemma} \begin{proof} Since $u$ is positively asymptotic to $\gamma^m_1$, the map $\varphi \circ u \circ \psi_j$ goes to $\infty$ as $z$ goes to $\infty$. Therefore, $\varphi \circ u \circ \psi_j$ is a polynomial. Again using the fact that $u$ is positively asymptotic to $\gamma^m_1$, we conclude that for $r$ big enough the path $\theta \longmapsto \varphi \circ u \circ \psi_j(r e^{i \theta})$ winds around the origin $m$ times. This implies that the degree of $\varphi \circ u \circ \psi_j$ is $m$. \end{proof} \begin{lemma} \label{lem:normal cz is one} For every $\ell = 1,\ldots,n-1$, view $\gamma^m_1$ as a Reeb orbit of $\partial E_{\ell} \subset \partial E_{\ell + 1}$. The normal Conley--Zehnder index of $\gamma^m_1$ is $1$. \end{lemma} \begin{proof} By \cite[Equation (2.2)]{guttSymplecticCapacitiesPositive2018}, the Reeb vector field of $\partial E_{\ell + 1}$ is given by \begin{IEEEeqnarray*}{c+x*} R^{\partial E_{\ell + 1}} = 2 \pi \sum_{j=1}^{\ell+1} \frac{1}{a_j} \pdv{}{\theta_{j}}, \end{IEEEeqnarray*} where $\theta_j$ denotes the angular polar coordinate of the $j$th summand of $\C^{\ell+1}$. Therefore, the flow of $R^{\partial E_{\ell + 1}}$ is given by \begin{IEEEeqnarray*}{rrCl} \phi^{t}_{R} \colon & \partial E_{\ell+1} & \longrightarrow & \partial E_{\ell+1} \\ & (z_1,\ldots,z_{\ell+1}) & \longmapsto & \p{}{2}{e^{\frac{2 \pi i}{a_1}} z_1, \ldots, e^{\frac{2 \pi i}{a_{\ell+1}}} z_{\ell+1}}. \end{IEEEeqnarray*} The diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \xi^{\partial E_{\ell}}_{\gamma^m_1(0)} \ar[r] \ar[d, swap, "\dv \phi^t_{R}(\gamma^m_1(0))"] & \xi^{\partial E_{\ell+1}}_{\gamma^m_1(0)} \ar[d, "\dv \phi^t_{R}(\gamma^m_1(0))"] & \big(\xi^{\partial E_{\ell+1}}_{\gamma^m_1(0)}\big)^{\perp} \ar[l] \ar[d, "\dv \phi^t_{R}(\gamma^m_1(0))"] \ar[r, equals] & \C \ar[d, "\times \exp \p{}{1}{\frac{2 \pi i t}{a_{\ell+1}}}"] \\ \xi^{\partial E_{\ell}}_{\gamma^m_1(t)} \ar[r] & \xi^{\partial E_{\ell+1}}_{\gamma^m_1(t)} & \big(\xi^{\partial E_{\ell+1}}_{\gamma^m_1(t)}\big)^{\perp} \ar[l] \ar[r, equals] & \C \end{tikzcd} \end{IEEEeqnarray*} commutes. Define a path $A_{\gamma^m_1} \colon [0,m a_1] \longrightarrow \operatorname{Sp}(2)$ by $A_{\gamma^m_1}(t) = \exp (t J_0 S)$, where \begin{IEEEeqnarray*}{c+x*} S = \frac{2 \pi}{a_{\ell + 1}} \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix}. \end{IEEEeqnarray*} The only eigenvalue of $S$ is $2 \pi / a_{\ell+1}$, which has multiplicity $2$. Therefore, the signature of $S$ is $\signature S = 2$. These facts allow us to compute $\conleyzehnder^{\perp}(\gamma^m_1)$ using \cref{prp:gutts cz formula}: \begin{IEEEeqnarray*}{rCls+x*} \conleyzehnder^{\perp}(\gamma^m_1) & = & \conleyzehnder(A_{\gamma^m_1}) & \quad [\text{by definition of $\conleyzehnder^{\perp}$}] \\ & = & \p{}{2}{\frac{1}{2} + \p{L}{2}{\sqrt{\frac{2 \pi}{a_{\ell + 1}}\frac{2 \pi}{a_{\ell + 1}}} \frac{m a_1}{2 \pi}}} \signature S & \quad [\text{by \cref{prp:gutts cz formula}}] \\ & = & \frac{1}{2} \signature S & \quad [\text{since $m a_1 < a_2 < \cdots < a_n$}] \\ & = & 1 & \quad [\text{by the discussion above}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:unique reeb orbit with cz equal to} If $\ell = 1,\ldots,n$ then $\gamma^m_1$ is the unique Reeb orbit of $\partial E_{\ell}$ such that $\conleyzehnder(\gamma^m_1) = \ell - 1 + 2m$. \end{lemma} \begin{proof} First, notice that \begin{IEEEeqnarray*}{rCls+x*} \conleyzehnder(\gamma^m_1) & = & \ell - 1 + 2 \sum_{j=1}^{\ell} \p{L}{2}{\frac{m a_1}{a_j}} & \quad [\text{by equation \eqref{eq:cz of reeb in ellipsoid}}] \\ & = & \ell - 1 + 2 m & \quad [\text{since $m a_1 < a_2 < \cdots < a_n$}]. \end{IEEEeqnarray*} Conversely, let $\gamma = \gamma^k_i$ be a Reeb orbit of $\partial E_\ell$ with $\conleyzehnder(\gamma) = \ell - 1 + 2m$. By equation \eqref{eq:cz of reeb in ellipsoid}, this implies that \begin{IEEEeqnarray}{c+x*} \label{eq:k is sum of floors} m = \sum_{j=1}^{\ell} \p{L}{2}{\frac{k a_i}{a_j}}. \end{IEEEeqnarray} We show that $i = 1$. Assume by contradiction otherwise. Then \begin{IEEEeqnarray*}{rCls+x*} m & = & \sum_{1 \leq j \leq \ell} \p{L}{2}{\frac{k a_i}{a_j}} & \quad [\text{by equation \eqref{eq:k is sum of floors}}] \\ & \geq & \sum_{1 \leq j \leq i} \p{L}{2}{\frac{k a_i}{a_j}} & \quad [\text{since every term in the sum is $\geq 0$}] \\ & = & \p{L}{2}{\frac{k a_i}{a_1}} + \sum_{1 < j < i} \p{L}{2}{\frac{k a_i}{a_j}} + k & \quad [\text{since by assumption, $i > 1$}] \\ & \geq & (m + i - 1) k & \quad [\text{$m a_1 < a_2 < \cdots < a_i$}] \\ & > & m k & \quad [\text{since by assumption, $i > 1$}], \end{IEEEeqnarray*} which is a contradiction, and therefore $i = 1$. We show that $k = m$, using the fact that $m \geq \lfloor k a_i / a_1 \rfloor = k$. \begin{IEEEeqnarray*}{rCls+x*} m & = & \sum_{1 \leq j \leq \ell} \p{L}{2}{\frac{k a_1}{a_j}} & \quad [\text{by equation \eqref{eq:k is sum of floors} and since $i = 1$}] \\ & = & k + \sum_{2 \leq j \leq \ell} \p{L}{2}{\frac{k a_1}{a_j}} & \\ & = & k & \quad [\text{since $k \leq m$ and $k a_1 \leq m a_1 < a_1 < \cdots < a_n$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:lch of ellipsoid} The module $CH_{n - 1 + 2m}(E_n)$ is the free $\Q$-module generated by $\gamma^m_1$. \end{lemma} \begin{proof} By equation \eqref{eq:cz of reeb in ellipsoid}, every Reeb orbit of $\partial E_n$ is good. We claim that the differential $\partial \colon CC(E_n) \longrightarrow CC(E_n)$ is zero. Assume by contradiction that there exists a Reeb orbit $\gamma$ such that $\partial \gamma \neq 0$. By definition of $\partial$, this implies that there exist Reeb orbits $\eta, \alpha_1, \ldots, \alpha_p$ such that \begin{IEEEeqnarray*}{rCls+x*} 0 & \neq & \#^{\mathrm{vir}} \overline{\mathcal{M}}^{J_n}_{\partial E_n}(\gamma; \eta, \alpha_1, \ldots, \alpha_p), \\ 0 & \neq & \#^{\mathrm{vir}} \overline{\mathcal{M}}^{J_n}_{E_n}(\alpha_j), \quad \text{for } j=1,\ldots,p. \end{IEEEeqnarray*} By assumption on the virtual perturbation scheme, \begin{IEEEeqnarray*}{rCls+x*} 0 & = & \operatorname{virdim} \overline{\mathcal{M}}^{J_n}_{E_n}(\alpha_j) = n - 3 + \conleyzehnder(\alpha_j) \quad \text{for every } j = 1,\ldots,p, \\ \\ 0 & = & \operatorname{virdim} \overline{\mathcal{M}}^{J_n}_{\partial E_n}(\gamma; \eta, \alpha_1, \ldots, \alpha_p) \\ & = & (n-3)(2 - (2+p)) + \conleyzehnder(\gamma) - \conleyzehnder(\eta) - \sum_{j=1}^{p} \conleyzehnder(\alpha_j) - 1 \\ & = & \conleyzehnder(\gamma) - \conleyzehnder(\eta) - 1 \\ & \in & 1 + 2 \Z, \end{IEEEeqnarray*} where in the last line we used equation \eqref{eq:cz of reeb in ellipsoid}. This gives the desired contradiction, and we conclude that $\partial \colon CC(E_n) \longrightarrow CC(E_n)$ is zero. Therefore, $CH(E_n) = CC(E_n)$ is the free $\Q$-module generated by the Reeb orbits of $\partial E_n$. By \cref{lem:unique reeb orbit with cz equal to}, $\gamma^m_1$ is the unique Reeb orbit of $\partial E_n$ with $\conleyzehnder(\gamma^m_1) = n - 1 + 2m$, from which the result follows. \end{proof} \begin{lemma} \phantomsection\label{lem:moduli spaces of ellipsoids are all equal} If $\ell = 1,\ldots,n$ and $k \in \Z_{\geq 1}$ then $\mathcal{M}^{\ell,(k)}_{\mathrm{p}} = \mathcal{M}^{1,(k)}_{\mathrm{p}}$ and $\mathcal{M}^{\ell,(k)} = \mathcal{M}^{1,(k)}$. \end{lemma} \begin{proof} It suffices to show that $\mathcal{M}^{\ell,(k)}_{\mathrm{p}} = \mathcal{M}^{\ell+1,(k)}_{\mathrm{p}}$ for every $\ell = 1,\ldots,n-1$. The inclusion $\mathcal{M}^{\ell,(k)}_{\mathrm{p}} \subset \mathcal{M}^{\ell+1,(k)}_{\mathrm{p}}$ follows from the fact that the inclusion $\hat{E}_\ell \hookrightarrow \hat{E}_{\ell+1}$ is holomorphic and the assumptions on the symplectic divisors. To prove that $\mathcal{M}^{\ell+1,(k)}_{\mathrm{p}} \subset \mathcal{M}^{\ell,(k)}_{\mathrm{p}}$, it suffices to assume that $(j,u) \in \mathcal{M}^{\ell+1,(k)}_{\mathrm{p}}$ and to show that the image of $u$ is contained in $\hat{E}_\ell \subset \hat{E}_{\ell+1}$. Since $u$ has contact order $k$ to $D_{\ell+1}$ at $x_{\ell+1} = \iota_{\ell}(x_{\ell})$, we conclude that $u$ is not disjoint from $\hat{E}_\ell$. By \cref{lem:stabilization 2}, $u$ is contained in $\hat{E}_\ell$. \end{proof} We now prove that the moduli spaces $\mathcal{M}^{\ell,(k)}$ are regular. The proof strategy is as follows. \begin{enumerate} \item \cref{prp:moduli spaces without point constraint are tco} deals with the moduli spaces $\mathcal{M}^{1,(0)}$. We show that the linearized Cauchy--Riemann operator is surjective using \cref{lem:Du is surjective case n is 1}. \item \cref{prp:moduli spaces w point are tco} deals with the moduli spaces $\mathcal{M}^{\ell,(1)}$. Here, we need to consider the linearized Cauchy--Riemann operator together with an evaluation map. We show inductively that this map is surjective using \cref{lem:DX surj implies DY surj}. \item Finally, \cref{prp:moduli spaces w tangency are tco} deals with the moduli spaces $\mathcal{M}^{\ell,(k)}$. We now need to consider the jet evaluation map. We prove inductively that this map is surjective by writing it explicitly. \end{enumerate} \begin{proposition} \label{prp:moduli spaces without point constraint are tco} The moduli spaces $\mathcal{M}^{1,(0)}_{\mathrm{p}}$ and $\mathcal{M}^{1,(0)}$ are transversely cut out. \end{proposition} \begin{proof} It is enough to show that $\mathcal{M}^{1,(0)}_{\mathrm{p}}$ is transversely cut out, since this implies that $\mathcal{M}^{1,(0)}$ is transversely cut out as well. Recall that $\mathcal{M}^{1,(0)}_{\mathrm{p}}$ can be written as the zero set of the Cauchy--Riemann operator $\overline{\partial}\vphantom{\partial}^{1} \colon \mathcal{T} \times \mathcal{B} E_{1} \longrightarrow \mathcal{E} E_{1}$. It suffices to assume that $(j,u) \in (\overline{\partial}\vphantom{\partial}^{1})^{-1}(0)$ and to prove that the linearization \begin{IEEEeqnarray*}{c+x*} \mathbf{L}_{(j,u)}^1 \colon T_j \mathcal{T} \oplus T_u \mathcal{B} E_1 \longrightarrow \mathcal{E}_{(j,u)} E_1 \end{IEEEeqnarray*} is surjective. This follows from \cref{lem:Du is surjective case n is 1}. \end{proof} \begin{proposition} \label{prp:moduli spaces w point are tco} If $\ell = 1,\ldots,n$ then $\mathcal{M}^{\ell,(1)}_{\mathrm{p}}$ and $\mathcal{M}^{\ell,(1)}$ are transversely cut out. \end{proposition} \begin{proof} We will use the notation of \cref{sec:functional analytic setup} with $X = E_{\ell}$ and $Y = E_{\ell + 1}$. We will show by induction on $\ell$ that $\mathcal{M}^{\ell,(1)}_{\mathrm{p}}$ is transversely cut out. This implies that $\mathcal{M}^{\ell,(1)}$ is transversely cut out as well. We prove the base case. By \cref{prp:moduli spaces without point constraint are tco}, $\mathcal{M}^{1,(0)}_{\mathrm{p}}$ is a smooth manifold. Consider the evaluation map \begin{IEEEeqnarray*}{rrCl} \operatorname{ev}^{1} \colon & \mathcal{M}^{1,(0)}_{\mathrm{p}} & \longrightarrow & \hat{E}_1 \\ & (j,u) & \longmapsto & u(z_0). \end{IEEEeqnarray*} Notice that $\mathcal{M}^{1,(1)}_{\mathrm{p}} = (\operatorname{ev}^1)^{-1}(x_1)$. We wish to show that the linearized evaluation map $\mathbf{E}^1_{(j,u)} = \dv (\operatorname{ev}^1)(j,u) \colon T_{(j,u)} \mathcal{M}^{1,(0)}_{\mathrm{p}} \longrightarrow T_{u(z_0)} \hat{E}_1$ is surjective whenever $u(z_0) = \operatorname{ev}^{1}(j,u) = x_1$. There are commutative diagrams \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \mathcal{M}^{1,(0)}_{\mathrm{p}} \ar[r, two heads, "\Phi"] \ar[d, swap, "\operatorname{ev}^1"] & \mathcal{M} \ar[d, "\operatorname{ev}_{\mathcal{M}}"] & \mathcal{C} \ar[l, swap, hook', two heads, "\mathcal{P}"] \ar[d, "\operatorname{ev}_{\mathcal{C}}"] & & T_{(j,u)} \mathcal{M}^{1,(0)}_{\mathrm{p}} \ar[r, two heads, "{\dv \Phi(j,u)}"] \ar[d, swap, "{\mathbf{E}^1_{(j,u)}}"] & T_f \mathcal{M} \ar[d, "\mathbf{E}_{\mathcal{M}}"] & \C^{m+1} \ar[l, swap, hook', two heads, "\dv \mathcal{P}(a)"] \ar[d, "\mathbf{E}_{\mathcal{C}}"] \\ \hat{E}_1 \ar[r, hook, two heads, swap, "\varphi"] & \C \ar[r, equals] & \C & & T_{x_1} \hat{E}_1 \ar[r, hook, two heads, swap, "\dv \varphi(x_1)"] & \C \ar[r, equals] & \C \end{tikzcd} \end{IEEEeqnarray*} where \begin{IEEEeqnarray*}{rCls+x*} \mathcal{M} & \coloneqq & \{f \colon \C \longrightarrow \C \mid f \text{ is a polynomial of degree }m \}, \\ \mathcal{C} & \coloneqq & \{(a_0,\ldots,a_m) \in \C^{m+1} \mid a_m \neq 0\}, \\ \Phi(j,u) & \coloneqq & \varphi \circ u \circ \psi_j, \\ \operatorname{ev}_{\mathcal{M}}(f) & \coloneqq & f(0), \\ \operatorname{ev}_{\mathcal{C}}(a_0,\ldots,a_m) & \coloneqq & a_0, \\ \mathcal{P}(a_0,\ldots,a_m)(z) & \coloneqq & a_0 + a_1 z + \cdots + a_m z^m, \end{IEEEeqnarray*} and the diagram on the right is obtained by linearizing the one on the left. The map $\Phi$ is well-defined by \cref{lem:u is a polynomial}. Since $\mathbf{E}_{\mathcal{C}}(a_0,\ldots,a_m) = a_0$ is surjective, $\mathbf{E}^1_u$ is surjective as well. This finishes the proof of the base case. We prove the induction step, i.e. that if $\mathcal{M}^{\ell,(1)}_p$ is transversely cut out then so is $\mathcal{M}^{\ell+1,(1)}_p$. We prove that $\mathcal{M}^{\ell,(1)}_{\mathrm{p,reg}} \subset \mathcal{M}^{\ell+1,(1)}_{\mathrm{p,reg}}$. For this, assume that $(j,u) \in \mathcal{M}^{\ell,(1)}_{\mathrm{p}}$ is such that $\mathbf{L}_{(j,u)}^\ell \oplus \mathbf{E}_u^\ell \colon T_j \mathcal{T} \oplus T_{u} \mathcal{B} E_\ell \longrightarrow \mathcal{E}_{(j,u)} E_\ell \oplus T_{x_\ell} \hat{E}_\ell$ is surjective. By \cref{lem:DX surj implies DY surj}, \begin{IEEEeqnarray*}{c+x*} \mathbf{L}_{(j,u)}^{\ell+1} \oplus \mathbf{E}_u^{\ell+1} \colon T_j \mathcal{T} \oplus T_{u} \mathcal{B} E_{\ell+1} \longrightarrow \mathcal{E}_{(j,u)} E_{\ell+1} \oplus T_{x_{\ell+1}} \hat{E}_{\ell+1} \end{IEEEeqnarray*} is also surjective, which means that $(j,u) \in \mathcal{M}^{\ell+1,(1)}_{\mathrm{p,reg}}$. This concludes the proof of $\mathcal{M}^{\ell,(1)}_{\mathrm{p,reg}} \subset \mathcal{M}^{\ell+1,(1)}_{\mathrm{p,reg}}$. Finally, we show that $\mathcal{M}^{\ell+1,(1)}_{\mathrm{p,reg}} = \mathcal{M}^{\ell+1,(1)}_{\mathrm{p}}$. \begin{IEEEeqnarray*}{rCls+x*} \mathcal{M}^{\ell+1,(1)}_{\mathrm{p,reg}} & \subset & \mathcal{M}^{\ell+1,(1)}_{\mathrm{p}} & \quad [\text{since regular curves form a subset}] \\ & = & \mathcal{M}^{\ell,(1)}_{\mathrm{p}} & \quad [\text{by \cref{lem:moduli spaces of ellipsoids are all equal}}] \\ & = & \mathcal{M}^{\ell,(1)}_{\mathrm{p,reg}} & \quad [\text{by the induction hypothesis}] \\ & \subset & \mathcal{M}^{\ell+1,(1)}_{\mathrm{p,reg}} & \quad [\text{proven above}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{proposition} \label{prp:moduli spaces w tangency are tco} If $\ell = 1,\ldots, n$ and $k = 1,\ldots,m$ then $\mathcal{M}^{\ell,(k)}_{\mathrm{p}}$ and $\mathcal{M}^{\ell,(k)}$ are transversely cut out. \end{proposition} \begin{proof} By \cref{prp:moduli spaces w point are tco}, $\mathcal{M}^{\ell,(1)}_{\mathrm{p}}$ is a smooth manifold. Consider the jet evaluation map \begin{IEEEeqnarray*}{rrCl} j^{\ell,(k)} \colon & \mathcal{M}^{\ell,(1)}_{\mathrm{p}} & \longrightarrow & \C^{k-1} \\ & (j,u) & \longmapsto & ((h_{\ell} \circ u \circ \psi_j)^{(1)}(0), \ldots, (h_{\ell} \circ u \circ \psi_j)^{(k-1)}(0)). \end{IEEEeqnarray*} The moduli space $\mathcal{M}^{\ell,(k)}_{\mathrm{p}}$ is given by $\mathcal{M}^{\ell,(k)}_{\mathrm{p}} = (j^{\ell,(k)})^{-1}(0)$. We will prove by induction on $\ell$ that $\mathcal{M}^{\ell,(k)}_{\mathrm{p}}$ is transversely cut out. This shows that $\mathcal{M}^{\ell,(k)}$ is transversely cut out as well. Define $\mathbf{J}^{\ell,(k)}_{(j,u)} \coloneqq \dv(j^{\ell,(k)})(j,u) \colon T_{(j,u)} \mathcal{M}^{\ell,(1)}_{\mathrm{p}} \longrightarrow \C^{k-1}$. We prove the base case, i.e. that $\mathcal{M}^{1,(k)}_{\mathrm{p}}$ is transversely cut out. For this, it suffices to assume that $(j,u) \in \mathcal{M}^{1,(1)}_{\mathrm{p}}$ is such that $j^{1,(k)}(j,u) = 0$ and to prove that $\mathbf{J}^{1,(k)}_{(j,u)}$ is surjective. There are commutative diagrams \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \mathcal{M}^{1,(1)}_{\mathrm{p}} \ar[r, two heads, "\Phi"] \ar[d, swap, "j^{1,(k)}"] & \mathcal{M} \ar[d, "j^{(k)}_{\mathcal{M}}"] & \mathcal{C} \ar[l, swap, hook', two heads, "\mathcal{P}"] \ar[d, "j^{(k)}_{\mathcal{C}}"] & & T_{(j,u)} \mathcal{M}^{1,(1)}_{\mathrm{p}} \ar[r, two heads, "{\dv \Phi(j,u)}"] \ar[d, swap, "{\mathbf{J}^{1,(k)}_{(j,u)}}"] & T_f \mathcal{M} \ar[d, "\mathbf{J}^{(k)}_{\mathcal{M}}"] & \C^{m} \ar[l, swap, hook', two heads, "\dv \mathcal{P}(a)"] \ar[d, "\mathbf{J}^{(k)}_{\mathcal{C}}"] \\ \C^{k-1} \ar[r, equals] & \C^{k-1} \ar[r, equals] & \C^{k-1} & & \C^{k-1} \ar[r, equals] & \C^{k-1} \ar[r, equals] & \C^{k-1} \end{tikzcd} \end{IEEEeqnarray*} where \begin{IEEEeqnarray*}{rCls+x*} \mathcal{M} & \coloneqq & \{f \colon \C \longrightarrow \C \mid f \text{ is a polynomial of degree }m \text{ with }f(0)=0 \}, \\ \mathcal{C} & \coloneqq & \{(a_1,\ldots,a_m) \in \C^{m} \mid a_m \neq 0\}, \\ \Phi(j,u) & \coloneqq & \varphi \circ u \circ \psi_j, \\ j^{(k)}_{\mathcal{M}}(f) & \coloneqq & (f^{(1)}(0),\ldots,f^{(k-1)}(0)), \\ j^{(k)}_{\mathcal{C}}(a_1,\ldots,a_m) & \coloneqq & (a_1,\ldots,(k-1)! a_{k-1}), \\ \mathcal{P}(a_1,\ldots,a_m)(z) & \coloneqq & a_1 z + \cdots + a_m z^m, \end{IEEEeqnarray*} and the diagram on the right is obtained by linearizing the one on the left. The map $\Phi$ is well-defined by \cref{lem:u is a polynomial}. Since $\mathbf{J}^{(k)}_{\mathcal{C}}(a_1,\ldots,a_m) = (a_1,\ldots,(k-1)! a_{k-1})$ is surjective, $\mathbf{J}^{1,(k)}_u$ is surjective as well. This finishes the proof of the base case. We prove the induction step, i.e. that if $\mathcal{M}^{\ell,(k)}_{\mathrm{p}}$ is transversely cut out then so is $\mathcal{M}^{\ell+1,(k)}_{\mathrm{p}}$. We show that $\mathcal{M}^{\ell,(k)}_{\mathrm{p,reg}} \subset \mathcal{M}^{\ell+1,(k)}_{\mathrm{p,reg}}$. For this, it suffices to assume that $(j,u) \in \mathcal{M}^{\ell,(k)}_{\mathrm{p}}$ is such that $\mathbf{J}^{\ell,(k)}_{(j,u)}$ is surjective, and to prove that $\mathbf{J}^{\ell+1,(k)}_{(j,u)}$ is surjective as well. This follows because the diagrams \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \mathcal{M}^{\ell,(1)}_{\mathrm{p}} \ar[d] \ar[dr, "j^{\ell,(k)}"] & & & T_{(j,u)} \mathcal{M}^{\ell,(1)}_{\mathrm{p}} \ar[d] \ar[dr, "\mathbf{J}^{\ell,(k)}_u"] \\ \mathcal{M}^{\ell+1,(1)}_{\mathrm{p}} \ar[r, swap, "j^{\ell+1,(k)}"] & \C^{k-1} & & T_{(j,u)} \mathcal{M}^{\ell+1,(1)}_{\mathrm{p}} \ar[r, swap, "\mathbf{J}_u^{\ell+1,(k)}"] & \C^{k-1} \end{tikzcd} \end{IEEEeqnarray*} commute. Finally, we show that $\mathcal{M}^{\ell+1,(k)}_{\mathrm{p,reg}} = \mathcal{M}^{\ell+1,(k)}_{\mathrm{p}}$. \begin{IEEEeqnarray*}{rCls+x*} \mathcal{M}^{\ell+1,(k)}_{\mathrm{p,reg}} & \subset & \mathcal{M}^{\ell+1,(k)}_{\mathrm{p}} & \quad [\text{since regular curves form a subset}] \\ & = & \mathcal{M}^{\ell,(k)}_{\mathrm{p}} & \quad [\text{by \cref{lem:moduli spaces of ellipsoids are all equal}}] \\ & = & \mathcal{M}^{\ell,(k)}_{\mathrm{p,reg}} & \quad [\text{by the induction hypothesis}] \\ & \subset & \mathcal{M}^{\ell+1,(k)}_{\mathrm{p,reg}} & \quad [\text{proven above}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{proposition} \label{lem:moduli spaces of ellipsoids have 1 element} If $\ell = 1,\ldots,n$ then $\#^{\mathrm{vir}} \overline{\mathcal{M}}^{\ell,(m)} = \# \overline{\mathcal{M}}^{\ell,(m)} = 1$. \end{proposition} \begin{proof} By assumption on the perturbation scheme and \cref{prp:moduli spaces w tangency are tco}, $\#^{\mathrm{vir}} \overline{\mathcal{M}}^{\ell,(m)} = \# \overline{\mathcal{M}}^{\ell,(m)}$. Again by \cref{prp:moduli spaces w tangency are tco}, the moduli space $\mathcal{M}^{\ell,(m)}$ is transversely cut out and \begin{IEEEeqnarray*}{c} \dim \mathcal{M}^{\ell,(m)} = (n -3)(2 - 1) + \conleyzehnder(\gamma_1^m) - 2 \ell - 2 m + 4 = 0, \end{IEEEeqnarray*} where in the second equality we have used \cref{lem:unique reeb orbit with cz equal to}. This implies that $\mathcal{M}^{\ell,(m)}$ is compact, and in particular $\# \overline{\mathcal{M}}^{\ell,(m)} = \# \mathcal{M}^{\ell,(m)}$. By \cref{lem:moduli spaces of ellipsoids are all equal}, $\# \mathcal{M}^{\ell,(m)} = \# \mathcal{M}^{1,(m)}$. It remains to show that $\# \mathcal{M}^{1,(m)} = 1$. For this, notice that $\mathcal{M}^{1,(m)}$ is the set of equivalence classes of pairs $(j,u)$, where $j$ is an almost complex structure on $\Sigma = S^2$ and $u \colon (\dot{\Sigma}, j) \longrightarrow (\hat{E}_1, J_1)$ is a holomorphic map such that \begin{enumerate} \item $u(z_0) = x_1$ and $u$ has contact order $m$ to $D_1$ at $x_1$; \item if $(s,t)$ are the cylindrical coordinates on $\dot{\Sigma}$ near $z_1$ such that $v_1$ agrees with the direction $t = 0$, then \begin{IEEEeqnarray*}{rrCls+x*} \lim_{s \to +\infty} & \pi_{\R} \circ u(s,t) & = & + \infty, \\ \lim_{s \to +\infty} & \pi_{\partial E_1} \circ u(s,t) & = & \gamma_1 (a_1 m t). \end{IEEEeqnarray*} \end{enumerate} Here, two pairs $(j_0, u_0)$ and $(j_1, u_1)$ are equivalent if there exists a biholomorphism $\phi \colon (\Sigma, j_0) \longrightarrow (\Sigma, j_1)$ such that \begin{IEEEeqnarray*}{c+x*} \phi(z_0) = z_0, \qquad \phi(z_1) = z_1, \qquad \dv \phi(z_1) v_1 = v_1. \end{IEEEeqnarray*} We claim that any two pairs $(j_0, u_0)$ and $(j_1, u_1)$ are equivalent. By \cref{lem:u is a polynomial}, the maps $\varphi \circ u_0 \circ \psi_{j_0}$ and $\varphi \circ u_1 \circ \psi_{j_1}$ are polynomials of degree $m$: \begin{IEEEeqnarray*}{rCls+x*} \varphi \circ u_0 \circ \psi_{j_0} (z) & = & a_0 + \cdots + a_m z^m, \\ \varphi \circ u_1 \circ \psi_{j_1} (z) & = & b_0 + \cdots + b_m z^m. \end{IEEEeqnarray*} Since $u_0$ and $u_1$ have contact order $m$ to $D_1$ at $x_1$, for every $\nu = 0,\ldots,m-1$ we have \begin{IEEEeqnarray*}{rCls+x*} 0 & = & (\varphi \circ u_0 \circ \psi_{j_0})^{(\nu)}(0) = \nu! a_{\nu}, \\ 0 & = & (\varphi \circ u_1 \circ \psi_{j_1})^{(\nu)}(0) = \nu! b_{\nu}. \end{IEEEeqnarray*} Since $u_0$ and $u_1$ have the same asymptotic behaviour, $\operatorname{arg}(a_m) = \operatorname{arg}(b_m)$. Hence, there exists $\lambda \in \R_{>0}$ such that $\lambda^m b_m = a_m$. Then, \begin{IEEEeqnarray*}{c+x*} u_1 \circ \psi_{j_1} (\lambda z) = u_0 \circ \psi_{j_0} (z). \end{IEEEeqnarray*} Therefore, $(j_0, u_0)$ and $(j_1, u_1)$ are equivalent and $\# \mathcal{M}^{1,(m)} = 1$. \end{proof} \begin{remark} In \cite[Proposition 3.4]{cieliebakPuncturedHolomorphicCurves2018}, Cieliebak and Mohnke show that the signed count of the moduli space of holomorphic curves in $\C P^n$ in the homology class $[\C P^1]$ which satisfy a tangency condition $\p{<}{}{\mathcal{T}^{(n)}x}$ equals $(n-1)!$. It is unclear how this count relates to the one of \cref{lem:moduli spaces of ellipsoids have 1 element}. \end{remark} Finally, we will use the results of this section to compute the augmentation map of the ellipsoid $E_n$. \begin{theorem} \label{thm:augmentation is nonzero} The augmentation map $\epsilon_m \colon CH_{n - 1 + 2m}(E_n) \longrightarrow \Q$ is an isomorphism. \end{theorem} \begin{proof} By \cref{lem:moduli spaces of ellipsoids have 1 element}, \cref{rmk:counts of moduli spaces with or without asy markers} and definition of the augmentation map, we have $\epsilon_m(\gamma^m_1) \neq 0$. By \cref{lem:lch of ellipsoid}, $\epsilon_m$ is an isomorphism. \end{proof} \section{Computations using contact homology} Finally, we use the tools developed in this chapter to prove \cref{conj:the conjecture} (see \cref{thm:my main theorem}). The proof we give is the same as that of \cref{lem:computation of cl}, with the update that we will use the capacity $\mathfrak{g}^{\leq 1}_{k}$ to prove that \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq 1}_k(X) \leq \mathfrak{g}^{\leq 1}_k(X) = \cgh{k}(X) \end{IEEEeqnarray*} for any nondegenerate Liouville domain $X$. Notice that in \cref{lem:computation of cl}, $\tilde{\mathfrak{g}}^{\leq 1}_k(X) \leq \cgh{k}(X)$ held because by assumption $X$ was a $4$-dimensional convex toric domain. We start by showing that $\tilde{\mathfrak{g}}^{\leq \ell}_k(X) \leq \mathfrak{g}^{\leq \ell}_k(X)$. This result has already been proven in \cite[Section 3.4]{mcduffSymplecticCapacitiesUnperturbed2022}, but we include a proof for the sake of completeness. \begin{theorem}[{\cite[Section 3.4]{mcduffSymplecticCapacitiesUnperturbed2022}}] \phantomsection\label{thm:g tilde vs g hat} If $X$ is a Liouville domain then \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq \ell}_k(X) \leq {\mathfrak{g}}^{\leq \ell}_k(X). \end{IEEEeqnarray*} \end{theorem} \begin{proof} By \cref{lem:can prove ineqs for ndg}, we may assume that $X$ is nondegenerate. Choose a point $x \in \itr X$ and a symplectic divisor $D$ through $x$. Let $J \in \mathcal{J}(X,D)$ be an almost complex structure on $\hat{X}$ and consider the bar complex $\mathcal{B}(CC(X)[-1])$, computed with respect to $J$. Suppose that $a > 0$ is such that the augmentation map \begin{IEEEeqnarray*}{c+x*} \epsilon_k \colon H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell}(CC(X)[-1])) \longrightarrow \Q \end{IEEEeqnarray*} is nonzero. By \cref{thm:g tilde two definitions}, it is enough to show that there exists a word of Reeb orbits $\Gamma = (\gamma_1,\ldots,\gamma_p)$ such that \begin{IEEEeqnarray*}{c+x*} p \leq \ell, \qquad \mathcal{A}(\Gamma) \leq a, \qquad \overline{\mathcal{M}}^{J}_{X}(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x} \neq \varnothing. \end{IEEEeqnarray*} Choose a homology class $\beta \in H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell}(CC(X)[-1]))$ such that $\epsilon_k(\beta) \neq 0$. The element $\beta$ can be written as a finite linear combination of Reeb orbits $\Gamma = (\gamma_1,\ldots,\gamma_p)$, where every word has length $p \leq \ell$ and action $\mathcal{A}(\Gamma) \leq a$. One of the words in this linear combination, say $\Gamma = (\gamma_1,\ldots,\gamma_{p})$, is such that $\#^{\mathrm{vir}} \overline{\mathcal{M}}^{J}_{X}(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x} \neq 0$. By assumption on the virtual perturbation scheme, $\overline{\mathcal{M}}^{J}_{X}(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x}$ is nonempty. \end{proof} \begin{theorem} \label{thm:g hat vs gh} If $X$ is a Liouville domain such that $\pi_1(X) = 0$ and $2 c_1(TX) = 0$ then \begin{IEEEeqnarray*}{c+x*} {\mathfrak{g}}^{\leq 1}_k(X) = \cgh{k}(X). \end{IEEEeqnarray*} \end{theorem} \begin{proof} By \cref{lem:can prove ineqs for ndg}, we may assume that $X$ is nondegenerate. Let $E = E(a_1,\ldots,a_n)$ be an ellipsoid as in \cref{sec:augmentation map of an ellipsoid} such that there exists a strict exact symplectic embedding $\phi \colon E \longrightarrow X$. In \cite{bourgeoisEquivariantSymplecticHomology2016}, Bourgeois--Oancea define an isomorphism between linearized contact homology and positive $S^1$-equivariant contact homology, which we will denote by $\Phi_{\mathrm{BO}}$. This isomorphism commutes with the Viterbo transfer maps and respects the action filtration. In addition, the Viterbo transfer maps in linearized contact homology commute with the augmentation maps of \cref{def:augmentation map}. Therefore, there is a commutative diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} SH^{S^1,(\varepsilon,a]}_{n - 1 + 2k}(X) \ar[r, "\iota^{S^1,a}"] \ar[d, hook, two heads, swap, "\Phi_{\mathrm{BO}}^a"] & SH^{S^1,+}_{n - 1 + 2k}(X) \ar[r, "\phi_!^{S^1}"] \ar[d, hook, two heads, "\Phi_{\mathrm{BO}}"] & SH^{S^1,+}_{n - 1 + 2k}(E) \ar[d, hook, two heads, "\Phi_{\mathrm{BO}}"] \\ CH^{a}_{n - 1 + 2k}(X) \ar[r, "\iota^{a}"] \ar[d, equals] & CH_{n - 1 + 2k}(X) \ar[r, "\phi_{!}"] \ar[d, equals] & CH_{n - 1 + 2k}(E) \ar[d, hook, two heads, "{\epsilon}^E_k"] \\ CH^{a}_{n - 1 + 2k}(X) \ar[r, swap, "\iota^{a}"] & CH_{n - 1 + 2k}(X) \ar[r, swap, "{\epsilon}_k^X"] & \Q \end{tikzcd} \end{IEEEeqnarray*} Here, the map ${\epsilon}_k^E$ is nonzero, or equivalently an isomorphism, by \cref{thm:augmentation is nonzero}. Then, \begin{IEEEeqnarray*}{rCls+x*} \cgh{k}(X) & = & \inf \{ a > 0 \mid \phi_!^{S^1} \circ \iota^{S^1,a} \neq 0 \} & \quad [\text{by \cref{def:ck alternative}}] \\ & = & \inf \{ a > 0 \mid {\epsilon}_k^X \circ \iota^{a} \neq 0 \} & \quad [\text{since the diagram commutes}] \\ & = & {\mathfrak{g}}^{\leq 1}_k(X) & \quad [\text{by \cref{def:capacities glk}}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{theorem} \phantomsection\label{thm:my main theorem} Under \cref{assumption}, if $X_\Omega$ is a convex or concave toric domain then \begin{IEEEeqnarray*}{c+x*} c_L(X_{\Omega}) = \delta_\Omega. \end{IEEEeqnarray*} \end{theorem} \begin{proof} Since $X_{\Omega}$ is concave or convex, we have $X_{\Omega} \subset N(\delta_\Omega)$. For every $k \in \Z_{\geq 1}$, \begin{IEEEeqnarray*}{rCls+x*} \delta_\Omega & \leq & c_P(X_{\Omega}) & \quad [\text{by \cref{lem:c square geq delta}}] \\ & \leq & c_L(X_{\Omega}) & \quad [\text{by \cref{lem:c square leq c lag}}] \\ & \leq & \frac{\tilde{\mathfrak{g}}^{\leq 1}_{k}(X_{\Omega})}{k} & \quad [\text{by \cref{thm:lagrangian vs g tilde}}] \\ & \leq & \frac{{\mathfrak{g}}^{\leq 1}_{k}(X_{\Omega})}{k} & \quad [\text{by \cref{thm:g tilde vs g hat}}] \\ & = & \frac{\cgh{k}(X_{\Omega})}{k} & \quad [\text{by \cref{thm:g hat vs gh}}] \\ & \leq & \frac{\cgh{k}(N(\delta_\Omega))}{k} & \quad [\text{since $X_{\Omega} \subset N(\delta_\Omega)$}] \\ & = & \frac{\delta_\Omega(k+n-1)}{k} & \quad [\text{by \cref{lem:cgh of nondisjoint union of cylinders}}]. \end{IEEEeqnarray*} The result follows by taking the infimum over $k$. \end{proof} \chapter{Contact homology} \label{chp:contact homology} \section{Assumptions on virtual perturbation scheme} \label{sec:assumptions of virtual perturbation scheme} In this chapter, we wish to use techniques from contact homology to prove \cref{conj:the conjecture}. Consider the proof of \cref{lem:computation of cl}: to prove the inequality $c_L(X_{\Omega}) \leq \delta_\Omega$, we needed to use the fact that $\tilde{\mathfrak{g}}^{\leq 1}_k(X_{\Omega}) \leq \cgh{k}(X_{\Omega})$ (which is true if $X_{\Omega}$ is convex and $4$-dimensional). Our approach here will be to consider the capacities $\mathfrak{g}^{\leq \ell}_{k}$ from \cite{siegelHigherSymplecticCapacities2020}, which satisfy $\tilde{\mathfrak{g}}^{\leq 1}_k(X) \leq {\mathfrak{g}}^{\leq 1}_k(X) = \cgh{k}(X)$. As we will see, $\mathfrak{g}^{\leq \ell}_{k}(X)$ is defined using the linearized contact homology of $X$, where $X$ is any nondegenerate Liouville domain. Very briefly, the linearized contact homology chain complex, denoted $CC(X)$, is generated by the good Reeb orbits of $\partial X$, and therefore maps whose domain is $CC(X)$ should count holomorphic curves which are asymptotic to Reeb orbits. The ``naive'' way to define such counts of holomorphic curves would be to show that they are the elements of a moduli space which is a compact, $0$-dimensional orbifold. However, there is the possibility that a curve is multiply covered. This means that in general it is no longer possible to show that the moduli spaces are transversely cut out, and therefore we do not have access to counts of moduli spaces of holomorphic curves (or at least not in the usual sense of the notion of signed count). In the case where the Liouville domain is $4$-dimensional, there exists the possibility of using automatic transversality techniques to show that the moduli spaces are regular. This is the approach taken by Wendl \cite{wendlAutomaticTransversalityOrbifolds2010}. Nelson \cite{nelsonAutomaticTransversalityContact2015}, Hutchings--Nelson \cite{hutchingsCylindricalContactHomology2016} and Bao--Honda \cite{baoDefinitionCylindricalContact2018} use automatic transversality to define cylindrical contact homology. In order to define contact homology in more general contexts, one needs to replace the notion of count by a suitable notion of virtual count, which is obtained through a virtual perturbation scheme. This was done by Pardon \cite{pardonAlgebraicApproachVirtual2016,pardonContactHomologyVirtual2019} to define contact homology in greater generality. The theory of polyfolds by Hofer--Wysocki--Zehnder \cite{hoferPolyfoldFredholmTheory2021} can also be used to define virtual moduli counts. Alternative approaches using Kuranishi structures have been given by Ishikawa \cite{ishikawaConstructionGeneralSymplectic2018} and Bao--Honda \cite{baoSemiglobalKuranishiCharts2021}. Unfortunately, linearized contact homology is not yet defined in the generality we need. \begin{enumerate} \item In order to prove \cref{conj:the conjecture}, we only need the capacities $\mathfrak{g}^{\leq \ell}_k$ for $\ell = 1$. These are defined using the linearized contact homology (as a chain complex) and an augmentation map which counts curves satisfying a tangency constraint. As far as we know, the current work on defining virtual moduli counts does not yet deal with moduli spaces of curves satisfying tangency constraints. \item In addition to \cref{conj:the conjecture}, in this chapter we will also prove some properties of the capacities $\mathfrak{g}^{\leq \ell}_k$ for $\ell > 1$. The definition of these capacities for $\ell > 1$ requires the structure of an $\mathcal{L}_{\infty}$-algebra on the linearized contact homology as well as an $\mathcal{L}_{\infty}$-augmentation map counting curves which satisfy a tangency constraint. \end{enumerate} So, during this chapter, we will work under assumption that it is possible to define a virtual perturbation scheme which makes the invariants and maps described above well-defined (this is expected to be the case). \begin{assumption} \label{assumption} We assume the existence of a virtual perturbation scheme which to every compactified moduli space $\overline{\mathcal{M}}$ of asymptotically cylindrical holomorphic curves (in a symplectization or in a Liouville cobordism, possibly satisfying a tangency constraint) assigns a virtual count $\#^{\mathrm{vir}} \overline{\mathcal{M}}$. We will assume in addition that the virtual perturbation scheme has the following properties. \begin{enumerate} \item If $\#^{\mathrm{vir}} \overline{\mathcal{M}} \neq 0$ then $\operatorname{virdim} \overline{\mathcal{M}} = 0$; \item If $\overline{\mathcal{M}}$ is transversely cut out then $\#^{\mathrm{vir}} \overline{\mathcal{M}} = \# \overline{\mathcal{M}}$. In particular, if $\overline{\mathcal{M}}$ is empty then $\#^{\mathrm{vir}} \overline{\mathcal{M}} = 0$; \item The virtual count of the boundary of a moduli space (defined as a sum of virtual counts of the moduli spaces that constitute the codimension one boundary strata) is zero. In particular, the expected algebraic identities ($\partial^2 = 0$ for differentials, $\varepsilon \circ \partial = 0$ for augmentations) hold, as well as independence of auxiliary choices of almost complex structure and symplectic divisor. \end{enumerate} \end{assumption} \section{\texorpdfstring{$\mathcal{L}_{\infty}$-}{L infinity }algebras} In this section, we give a brief review of the algebraic definitions which will play a role. Our main reference is \cite[Section 2]{siegelHigherSymplecticCapacities2020}. The key definitions are that of $\mathcal{L}_{\infty}$-algebra (\cref{def:l infinity algebra}) and its associated bar complex (\cref{def:bar complex}). We start by defining the suspension of a graded vector space. The purpose of this definition is to define $\mathcal{L}_{\infty}$-algebras in such a way that the $\mathcal{L}_{\infty}$-relations do not have extra signs (these extra signs are ``absorbed'' by the degree shift in the suspension). \begin{definition} Let $V = \bigoplus_{k \in \Z} V^k$ be a graded vector space over a field $K$. The \textbf{suspension} of $V$ is the graded vector space $V[+1] = \bigoplus_{k \in \Z} (V[+1])^k$ given by $(V[+1])^k = V^{k+1}$. Define $s \colon V \longrightarrow V[+1]$ to be the linear map of degree $-1$ given by $s(v) = v$. \end{definition} \begin{remark} We use the Koszul sign convention, i.e. if $f,g \colon V \longrightarrow V$ are linear maps and $x, y \in V$ then $(f \otimes g)(x \otimes y) = (-1)^{\deg(x) \deg(g)} f(x) \otimes g(y)$. \end{remark} \begin{definition} Let $k \in \Z_{\geq 1}$ and denote by $\operatorname{Sym}(k)$ the symmetric group on $k$ elements. Let $V$ be a vector field over a field $K$. We define an action of $\operatorname{Sym}(k)$ on $\bigotimes_{j=1}^{k} V$ as follows. For $\sigma \in \operatorname{Sym}(k)$ and $v_1, \ldots, v_k \in V$, let \begin{IEEEeqnarray*}{rCls+x*} \operatorname{sign}(\sigma, v_1, \ldots, v_k) & \coloneqq & (-1)^{\operatorname{sum} \{ \deg(v_i) \deg(v_j) \, \mid \, 1 \leq i < j \leq k , \sigma(i) > \sigma(j) \} }, \\ \sigma \cdot (v_1 \otimes \cdots \otimes v_k) & \coloneqq & \operatorname{sign}(\sigma, v_1, \ldots, v_k) \, v_{\sigma(1)} \otimes \cdots \otimes v_{\sigma(k)}. \end{IEEEeqnarray*} Define $\bigodot_{j=1}^k V \coloneqq \bigotimes_{j=1}^{k} V / \operatorname{Sym}(k)$ and denote by $v_1 \odot \cdots \odot v_k$ the equivalence class of $v_1 \otimes \cdots \otimes v_k$. \end{definition} We come to the main definition of this section, which encodes the algebraic structure of linearized contact homology (see \cref{def:lch l infinity}). \begin{definition} \label{def:l infinity algebra} An \textbf{$\mathcal{L}_{\infty}$-algebra} is a graded vector space $V = \bigoplus_{k \in \Z} V^k$ together with a family $\ell = (\ell^k)_{k \in \Z_{\geq 1}}$ of maps $\ell^k \colon \bigodot_{j=1}^{k} V[+1] \longrightarrow V[+1]$ of degree $1$, satisfying the \textbf{$\mathcal{L}_{\infty}$-relations}, i.e. \begin{IEEEeqnarray*}{l} 0 = \sum_{k=1}^{n} \sum_{\sigma \in \operatorname{Sh}(k,n-k)} \operatorname{sign}(\sigma, s v_1, \ldots, s v_n) \\ \hphantom{0 = \sum_{k=1}^{n} \sum_{\sigma \in \operatorname{Sh}(k,n-k)} \quad} \ell^{n-k+1} ( \ell^k ( s v_{\sigma(1)} \odot \cdots \odot s v_{\sigma(k)} ) \odot s v_{\sigma(k+1)} \odot \cdots \odot s v_{\sigma(n)} ) \end{IEEEeqnarray*} for every $v_1,\ldots,v_n \in V$. Here, $\operatorname{Sh}(k,n-k) \subset \operatorname{Sym}(n)$ is the subgroup of permutations $\sigma$ such that $\sigma(1) < \cdots < \sigma(k)$ and $\sigma(k+1) < \cdots < \sigma(n)$. \end{definition} The definition of $\mathcal{L}_{\infty}$-algebra can be expressed more compactly via the notion of bar complex. Indeed, the family of maps $(\ell^k)_{k \in \Z_{\geq 1}}$ satisfies the $\mathcal{L}_{\infty}$-relations if and only if the map $\hat{\ell}$ defined below is a differential, i.e. $\hat{\ell} \circ \hat{\ell} = 0$. \begin{definition} \label{def:bar complex} Let $(V,\ell)$ be an $\mathcal{L}_{\infty}$-algebra. The \textbf{bar complex} of $(V,\ell)$ is the vector space $\mathcal{B} V = \bigoplus_{k = 1}^{+\infty} \bigodot_{j=1}^k V[+1]$ together with the degree $1$ differential $\hat{\ell} \colon \mathcal{B} V \longrightarrow \mathcal{B} V$ given by \begin{IEEEeqnarray*}{rCl} \IEEEeqnarraymulticol{3}{l}{\hat{\ell}(v_1 \odot \cdots \odot v_n)}\\ \quad & = & \sum_{k=1}^{n} \sum_{\sigma \in \operatorname{Sh}(k,n-k)} \operatorname{sign}(\sigma, v_1, \ldots, v_n) \, \ell^k ( v_{\sigma(1)} \odot \cdots \odot v_{\sigma(k)} ) \odot v_{\sigma(k+1)} \odot \cdots \odot v_{\sigma(n)}. \end{IEEEeqnarray*} \end{definition} \begin{definition} Let $(V,\ell)$ be an $\mathcal{L}_{\infty}$-algebra. A \textbf{filtration} on $V$ is a family $(\mathcal{F}^{\leq a} V)_{a \in \R}$ of subspaces $\mathcal{F}^{\leq a} V \subset V$, satisfying the following properties: \begin{enumerate} \item if $a \leq b$ then $\mathcal{F}^{\leq a} V \subset \mathcal{F}^{\leq b} V$; \item $\bigcup_{a \in \R} \mathcal{F}^{\leq a} V = V$; \item $\ell^k( \mathcal{F}^{\leq a_1} V[+1] \odot \cdots \odot \mathcal{F}^{\leq a_k} V[+1] ) \subset \mathcal{F}^{\leq a_1 + \cdots + a_k} V[+1]$. \end{enumerate} \end{definition} \begin{definition} Let $(V, \ell)$ be an $\mathcal{L}_{\infty}$-algebra together with a filtration $(\mathcal{F}^{\leq a} V)_{a \in \R}$. The \textbf{induced filtration} on the bar complex is the family of complexes $(\mathcal{F}^{\leq a} \mathcal{B} V, \hat{\ell})_{a \in \R}$, where \begin{IEEEeqnarray*}{c+x*} \mathcal{F}^{\leq a} \mathcal{B} V \coloneqq \bigoplus_{k=1}^{+\infty} \, \bigcup_{a_1 + \cdots + a_k \leq a} \, \bigodot_{j=1}^{k} \mathcal{F}^{\leq a_j} V[+1] \end{IEEEeqnarray*} and $\hat{\ell} \colon \mathcal{F}^{\leq a} \mathcal{B} V \longrightarrow \mathcal{F}^{\leq a} \mathcal{B} V$ is the restriction of $\hat{\ell} \colon \mathcal{B} V \longrightarrow \mathcal{B} V$. \end{definition} The linearized contact homology will have a filtration induced by the action of the Reeb orbits (see \cref{def:action filtration lch}). Also, the bar complex of any $\mathcal{L}_{\infty}$-algebra has a filtration by word length, which is defined below. \begin{definition} \phantomsection\label{def:word length filtration} Let $(V, \ell)$ be an $\mathcal{L}_{\infty}$-algebra and consider its bar complex $(\mathcal{B}V, \hat{\ell})$. The \textbf{word length filtration} of $(\mathcal{B}V, \hat{\ell})$ is the family of complexes $(\mathcal{B}^{\leq m} V, \hat{\ell})_{m \in \Z_{\geq 1}}$, where $\mathcal{B}^{\leq m} V \coloneqq \bigoplus_{k=1}^{m} \bigodot_{j=1}^{k} V[+1]$ and $\hat{\ell} \colon \mathcal{B}^{\leq m} V \longrightarrow \mathcal{B}^{\leq m} V$ is the restriction of $\hat{\ell} \colon \mathcal{B}V \longrightarrow \mathcal{B}V$. \end{definition} \section{Contact homology} In this section, we define the linearized contact homology of a nondegenerate Liouville domain $X$. This is the homology of a chain complex $CC(X)$, which is described in \cref{def:linearized contact homology}. This complex has additional structure, namely it is also an $\mathcal{L}_{\infty}$-algebra (\cref{def:lch l infinity}) and it admits a filtration by action (\cref{def:action filtration lch}). We also define an augmentation map (\cref{def:augmentation map}), which is necessary to define the capacities $\mathfrak{g}^{\leq \ell}_k$. \begin{definition} Let $(M,\alpha)$ be a contact manifold and $\gamma$ be a Reeb orbit in $M$. We say that $\gamma$ is \textbf{bad} if $\conleyzehnder(\gamma) - \conleyzehnder(\gamma_0)$ is odd, where $\gamma_0$ is the simple Reeb orbit that corresponds to $\gamma$. We say that $\gamma$ is \textbf{good} if it is not bad. \end{definition} Since the parity of the Conley--Zehnder index of a Reeb orbit is independent of the choice of trivialization, the definition above is well posed. \begin{definition} \label{def:linearized contact homology} If $(X,\lambda)$ is a nondegenerate Liouville domain, the \textbf{linearized contact homology chain complex} of $X$, denoted $CC(X)$, is a chain complex given as follows. First, let $CC(X)$ be the vector space over $\Q$ generated by the set of good Reeb orbits of $(\partial X, \lambda|_{\partial X})$. The differential of $CC(X)$, denoted $\partial$, is given as follows. Choose $J \in \mathcal{J}(X)$. If $\gamma$ is a good Reeb orbit of $\partial X$, we define \begin{IEEEeqnarray*}{c+x*} \partial \gamma = \sum_{\eta} \p{<}{}{\partial \gamma, \eta} \, \eta, \end{IEEEeqnarray*} where $\p{<}{}{\partial \gamma, \eta}$ is the virtual count (with combinatorial weights) of holomorphic curves in $\R \times \partial X$ with one positive asymptote $\gamma$, one negative asymptote $\eta$, and $k \geq 0$ extra negative asymptotes $\alpha_1,\ldots,\alpha_k$ (called \textbf{anchors}), each weighted by the count of holomorphic planes in $\hat{X}$ asymptotic to $\alpha_j$ (see \cref{fig:differential of lch}). \end{definition} \begin{figure}[htp] \centering \begin{tikzpicture} [ scale = 0.5, help/.style = {very thin, draw = black!50}, curve/.style = {thick} ] \tikzmath{ \rx = 0.75; \ry = 0.25; } \node[anchor=west] at (13,9) {$\R \times \partial X$}; \draw (0,6) rectangle (12,12); \node[anchor=west] at (13,3) {$\hat{X}$}; \draw (0,3) -- (0,6) -- (12,6) -- (12,3); \draw (0,3) .. controls (0,-1) and (12,-1) .. (12,3); \coordinate (G) at ( 2,12); \coordinate (E) at ( 2, 6); \coordinate (A) at ( 6, 6); \coordinate (B) at (10, 6); \coordinate (L) at (-\rx,0); \coordinate (R) at (+\rx,0); \coordinate (GL) at ($ (G) + (L) $); \coordinate (EL) at ($ (E) + (L) $); \coordinate (AL) at ($ (A) + (L) $); \coordinate (BL) at ($ (B) + (L) $); \coordinate (GR) at ($ (G) + (R) $); \coordinate (ER) at ($ (E) + (R) $); \coordinate (AR) at ($ (A) + (R) $); \coordinate (BR) at ($ (B) + (R) $); \draw[curve] (G) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma$}; \draw[curve] (E) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\eta$}; \draw[curve] (A) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\alpha_1$}; \draw[curve] (B) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\alpha_2$}; \draw[curve] (ER) .. controls ($ (ER) + (0,2) $) and ($ (AL) + (0,2) $) .. (AL); \draw[curve] (AR) .. controls ($ (AR) + (0,2) $) and ($ (BL) + (0,2) $) .. (BL); \draw[curve] (AL) .. controls ($ (AL) - (0,2) $) and ($ (AR) - (0,2) $) .. (AR); \draw[curve] (BL) .. controls ($ (BL) - (0,2) $) and ($ (BR) - (0,2) $) .. (BR); \draw[curve] (GR) .. controls ($ (GR) - (0,5) $) and ($ (BR) + (0,5) $) .. (BR); \coordinate (C) at ($ (E) + (0,3) $); \draw[curve] (EL) .. controls ($ (EL) + (0,1) $) and ($ (C) - (0,1) $) .. (C); \draw[curve] (GL) .. controls ($ (GL) - (0,1) $) and ($ (C) + (0,1) $) .. (C); \end{tikzpicture} \caption{A holomorphic curve with anchors contributing to the coefficient $\p{<}{}{\partial \gamma, \eta}$} \label{fig:differential of lch} \end{figure} By assumption on the virtual perturbation scheme, $\partial \circ \partial = 0$ and $CC(X)$ is independent (up to chain homotopy equivalence) of the choice of almost complex structure $J$. In general, $CC(X)$ is not $\Z$-graded but only $\Z_2$-graded (see \cref{rmk:grading for lch}). We wish to define a structure of $\mathcal{L}_{\infty}$-algebra on $CC(X)[-1]$. Notice that the definition of $\mathcal{L}_{\infty}$-structure on a vector space (\cref{def:l infinity algebra}) also makes sense when the vector space is only $\Z_2$-graded. \begin{definition} \label{def:lch l infinity} We define a structure of $\mathcal{L}_{\infty}$-algebra on $CC(X)[-1]$, given by maps $\ell^k \colon \bigodot^k CC(X) \longrightarrow CC(X)$, as follows. Choose an almost complex structure $J \in \mathcal{J}(X)$. If $\Gamma = (\gamma_1,\ldots,\gamma_k)$ is a tuple of good Reeb orbits, we define \begin{IEEEeqnarray*}{c+x*} \ell^{k} (\gamma_1 \odot \cdots \odot \gamma_{k}) = \sum_{\eta} \p{<}{}{\ell^{k} (\gamma_1 \odot \cdots \odot \gamma_{k}) , \eta} \, \eta, \end{IEEEeqnarray*} where $\p{<}{}{\ell^{k} (\gamma_1 \odot \cdots \odot \gamma_{k}) , \eta}$ is the virtual count of holomorphic curves in $\R \times \partial X$ with positive asymptotes $\gamma_1, \ldots, \gamma_k$, one negative asymptote $\eta$, and a number of extra negative asymptotes with anchors in $\hat{X}$, such that exactly one of the components in the symplectization level is nontrivial (see \cref{fig:l infinity ops of lch}). \end{definition} \begin{figure}[htp] \centering \begin{tikzpicture} [ scale = 0.5, help/.style = {very thin, draw = black!50}, curve/.style = {thick} ] \tikzmath{ \rx = 0.75; \ry = 0.25; } \node[anchor=west] at (17,9) {$\R \times \partial X$}; \draw (0,6) rectangle (16,12); \node[anchor=west] at (17,3) {$\hat{X}$}; \draw (0,3) -- (0,6) -- (16,6) -- (16,3); \draw (0,3) .. controls (0,-1) and (16,-1) .. (16,3); \coordinate (G1) at ( 3,12); \coordinate (G2) at ( 7,12); \coordinate (G3) at (11,12); \coordinate (G4) at (14,12); \coordinate (F3) at (11, 6); \coordinate (F4) at (14, 6); \coordinate (E0) at ( 2, 6); \coordinate (A1) at ( 5, 6); \coordinate (A2) at ( 8, 6); \coordinate (L) at (-\rx,0); \coordinate (R) at (+\rx,0); \coordinate (G1L) at ($ (G1) + (L) $); \coordinate (G2L) at ($ (G2) + (L) $); \coordinate (G3L) at ($ (G3) + (L) $); \coordinate (G4L) at ($ (G4) + (L) $); \coordinate (F3L) at ($ (F3) + (L) $); \coordinate (F4L) at ($ (F4) + (L) $); \coordinate (E0L) at ($ (E0) + (L) $); \coordinate (A1L) at ($ (A1) + (L) $); \coordinate (A2L) at ($ (A2) + (L) $); \coordinate (G1R) at ($ (G1) + (R) $); \coordinate (G2R) at ($ (G2) + (R) $); \coordinate (G3R) at ($ (G3) + (R) $); \coordinate (G4R) at ($ (G4) + (R) $); \coordinate (F3R) at ($ (F3) + (R) $); \coordinate (F4R) at ($ (F4) + (R) $); \coordinate (E0R) at ($ (E0) + (R) $); \coordinate (A1R) at ($ (A1) + (R) $); \coordinate (A2R) at ($ (A2) + (R) $); \draw[curve] (G1) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_1$}; \draw[curve] (G2) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_2$}; \draw[curve] (G3) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_3$}; \draw[curve] (G4) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_4$}; \draw[curve] (F3) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_3$}; \draw[curve] (F4) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_4$}; \draw[curve] (E0) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\eta$}; \draw[curve] (A1) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\alpha_1$}; \draw[curve] (A2) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\alpha_2$}; \draw[curve] (G1R) .. controls ($ (G1R) - (0,2) $) and ($ (G2L) - (0,2) $) .. (G2L); \draw[curve] (E0R) .. controls ($ (E0R) + (0,2) $) and ($ (A1L) + (0,2) $) .. (A1L); \draw[curve] (A1R) .. controls ($ (A1R) + (0,2) $) and ($ (A2L) + (0,2) $) .. (A2L); \draw[curve] (A1L) .. controls ($ (A1L) - (0,3) $) and ($ (A1R) - (0,3) $) .. (A1R); \draw[curve] (A2L) .. controls ($ (A2L) - (0,3) $) and ($ (F4R) - (0,3) $) .. (F4R); \draw[curve] (A2R) .. controls ($ (A2R) - (0,1) $) and ($ (F3L) - (0,1) $) .. (F3L); \draw[curve] (F3R) .. controls ($ (F3R) - (0,1) $) and ($ (F4L) - (0,1) $) .. (F4L); \draw[curve] (E0L) .. controls ($ (E0L) + (0,2) $) and ($ (G1L) - (0,2) $) .. (G1L); \draw[curve] (A2R) .. controls ($ (A2R) + (0,2) $) and ($ (G2R) - (0,2) $) .. (G2R); \draw[curve] (F3L) -- (G3L); \draw[curve] (F3R) -- (G3R); \draw[curve] (F4L) -- (G4L); \draw[curve] (F4R) -- (G4R); \node[rotate = 90] at ($ (F3) + (0,3) $) {trivial}; \node[rotate = 90] at ($ (F4) + (0,3) $) {trivial}; \end{tikzpicture} \caption{A holomorphic building contributing to the coefficient $\p{<}{}{ \ell^4 (\gamma_1 \odot \cdots \odot \gamma_4), \eta}$} \label{fig:l infinity ops of lch} \end{figure} By the assumptions on the virtual perturbation scheme, the maps $\ell^k$ satisfy the $\mathcal{L}_{\infty}$-relations and $CC(X)$ is independent (as an $\mathcal{L}_{\infty}$-algebra, up to $\mathcal{L}_{\infty}$-homotopy equivalence) of the choice of $J$. We point out that the first $\mathcal{L}_{\infty}$-operation is equal to the differential of linearized contact homology, i.e. $\ell^1 = \partial$. \begin{remark} \label{rmk:grading for lch} In general, the Conley--Zehnder index of a Reeb orbit is well-defined as an element in $\Z_2$. Therefore, the complex $CC(X)$ has a $\Z_{2}$-grading given by $\deg(\gamma) \coloneqq n - 3 - \conleyzehnder(\gamma)$, and with respect to this definition of degree every $\mathcal{L}_{\infty}$-operation $\ell^k$ has degree $1$. If $\pi_1(X) = 0$ and $2 c_1(TX) = 0$, then by \cref{lem:cz of reeb is independent of triv over filling disk} we have well-defined Conley--Zehnder indices in $\Z$, which means that $CC(X)$ is $\Z$-graded. For some purposes, it will be enough to consider only the chain complex structure on $CC(X)$ and not the $\mathcal{L}_{\infty}$-algebra structure (namely, when we consider only the capacity $\mathfrak{g}^{\leq 1}_{k}$ instead of the higher capacities $\mathfrak{g}^{\leq \ell}_{k}$). In this case, to make comparisons with $S^1$-equivariant symplectic homology simpler, we define the grading instead by $\deg(\gamma) \coloneqq \conleyzehnder(\gamma)$, which implies that $\partial$ has degree $-1$. \end{remark} \begin{definition} \label{def:action filtration lch} For every $a \in \R$, we denote by $\mathcal{A}^{\leq a} CC(X)[-1]$ the submodule of $CC(X)[-1]$ generated by the good Reeb orbits $\gamma$ with action $\mathcal{A}(\gamma) \leq a$. We call this filtration the \textbf{action filtration} of $CC[-1]$. \end{definition} In the next lemma, we check that this filtration is compatible with the $\mathcal{L}_{\infty}$-structure. \begin{lemma} \label{lem:action filtration of lch} $\ell^k ( \mathcal{A}^{\leq a_1} CC(X) \odot \cdots \odot \mathcal{A}^{\leq a_k} CC(X) ) \subset \mathcal{A}^{\leq a_1 + \cdots + a_k} CC(X)$. \end{lemma} \begin{proof} Let $\gamma_1^+, \ldots, \gamma_k^+, \eta$ be good Reeb orbits such that \begin{IEEEeqnarray*}{rCls+x*} \mathcal{A}(\gamma_i^+) & \leq & a_i, \\ \p{<}{}{\ell^k(\gamma_1^+ \odot \cdots \odot \gamma^+_k), \eta} & \neq & 0. \end{IEEEeqnarray*} We wish to show that $\mathcal{A}(\eta) \leq a_1 + \cdots + a_k$. Since $\p{<}{}{\ell^k(\gamma_1^+ \odot \cdots \odot \gamma^+_k), \eta} \neq 0$ and by assumption on the virtual perturbation scheme, there exists a tuple of Reeb orbits $\Gamma^-$ and a (nontrivial) punctured $J$-holomorphic sphere in $\R \times \partial X$ with asymptotes $\Gamma^\pm$, such that $\eta \in \Gamma^-$ and $\Gamma^+ \subset (\gamma^+_1,\ldots,\gamma^+_k)$. Then, \begin{IEEEeqnarray*}{rCls+x*} \mathcal{A}(\eta) & \leq & \mathcal{A}(\Gamma^-) & \quad [\text{since $\eta \in \Gamma^-$}] \\ & \leq & \mathcal{A}(\Gamma^+) & \quad [\text{by \cref{lem:action energy for holomorphic}}] \\ & \leq & \mathcal{A}(\gamma^+_1, \ldots, \gamma^+_k) & \quad [\text{since $\Gamma^+ \subset (\gamma^+_1,\ldots,\gamma^+_k)$}] \\ & \leq & a_1 + \cdots + a_k. & \quad [\text{by definition of action of a tuple}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{definition} \label{def:augmentation map} Consider the bar complex $(\mathcal{B}(CC(X)[-1]), \hat{\ell})$. For each $k \in \Z_{\geq 1}$, we define an augmentation ${\epsilon}_k \colon \mathcal{B}(CC(X)[-1]) \longrightarrow \Q$ as follows. Choose $x \in \itr X$, a symplectic divisor $D$ at $x$, and an almost complex structure $J \in \mathcal{J}(X,D)$. Then, for every tuple of good Reeb orbits $\Gamma = (\gamma_1, \ldots, \gamma_p)$ define ${\epsilon}_k (\gamma_1 \odot \cdots \odot \gamma_p)$ to be the virtual count of $J$-holomorphic planes in $\hat{X}$ which are positively asymptotic to $\Gamma$ and have contact order $k$ to $D$ at $x$ (see \cref{fig:augmentation of lch}). \end{definition} \begin{figure}[htp] \centering \begin{tikzpicture} [ scale = 0.5, help/.style = {very thin, draw = black!50}, curve/.style = {thick} ] \tikzmath{ \rx = 0.75; \ry = 0.25; } \node[anchor=west] at (13,3) {$\hat{X}$}; \draw (0,3) -- (0,6) -- (12,6) -- (12,3); \draw (0,3) .. controls (0,-1) and (12,-1) .. (12,3); \coordinate (G1) at (4,6); \coordinate (G2) at (8,6); \coordinate (L) at (-\rx,0); \coordinate (R) at (+\rx,0); \coordinate (G1L) at ($ (G1) + (L) $); \coordinate (G2L) at ($ (G2) + (L) $); \coordinate (G1R) at ($ (G1) + (R) $); \coordinate (G2R) at ($ (G2) + (R) $); \coordinate (P) at (7,3); \coordinate (D) at (2,1); \draw[curve] (G1) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_1$}; \draw[curve] (G2) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_2$}; ll (P) circle (2pt) node[anchor = north west] {$x$}; \draw[curve] ($ (P) - (D) $) -- ( $ (P) + (D) $ ) node[anchor = west] {$D$}; \draw[curve] (G1R) .. controls ($ (G1R) - (0,2) $) and ($ (G2L) - (0,2) $) .. (G2L); \draw[curve] (G1L) .. controls ($ (G1L) - (0,2) $) and ($ (P) - (D) $) .. (P); \draw[curve] (G2R) .. controls ($ (G2R) - (0,2) $) and ($ (P) + (D) $) .. (P); \end{tikzpicture} \caption{A holomorphic curve contributing to the count $\epsilon_k(\gamma_1 \odot \gamma_2)$} \label{fig:augmentation of lch} \end{figure} By assumption on the virtual perturbation scheme, ${\epsilon}_k$ is an augmentation, i.e. ${\epsilon}_k \circ \hat{\ell} = 0$. In addition, ${\epsilon}_k$ is independent (up to chain homotopy) of the choices of $x, D, J$. \section{Higher symplectic capacities} Here we define the symplectic capacities $\mathfrak{g}^{\leq \ell}_k$ from \cite{siegelHigherSymplecticCapacities2020}. We will prove the usual properties of symplectic capacities (see \cref{thm:properties of hsc}), namely monotonicity and conformality. In addition, we prove that the value of the capacities $\mathfrak{g}^{\leq \ell}_k$ can be represented by the action of a tuple of Reeb orbits. In \cref{rmk:computations using reeb orbits property} we show how this property could in principle be combined with results from \cite{guttSymplecticCapacitiesPositive2018} to compare the capacities $\mathfrak{g}^{\leq 1}_k(X_{\Omega})$ and $\cgh{k}(X_{\Omega})$ when $X_{\Omega}$ is a convex or concave toric domain. \begin{definition}[{\cite[Section 6.1]{siegelHigherSymplecticCapacities2020}}] \label{def:capacities glk} Let $k, \ell \in \Z_{\geq 1}$ and $(X,\lambda)$ be a nondegenerate Liouville domain. The \textbf{higher symplectic capacities} of $X$ are given by \begin{IEEEeqnarray*}{c+x*} \mathfrak{g}^{\leq \ell}_k(X) \coloneqq \inf \{ a > 0 \mid \epsilon_k \colon H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell}(CC(X)[-1])) \longrightarrow \Q \text{ is nonzero} \}. \end{IEEEeqnarray*} \end{definition} The capacities $\mathfrak{g}^{\leq \ell}_{k}$ will be useful to us because they have similarities with the McDuff--Siegel capacities $\tilde{\mathfrak{g}}^{\leq \ell}_k$, but also with the Gutt--Hutchings capacities $\cgh{k}$ (for $\ell = 1$). More specifically: \begin{enumerate} \item Both $\mathfrak{g}^{\leq \ell}_{k}$ and $\tilde{\mathfrak{g}}^{\leq \ell}_k$ are related to the energy of holomorphic curves in $X$ which are asymptotic to a word of $p \leq \ell$ Reeb orbits and satisfy a tangency constraint. In \cref{thm:g tilde vs g hat}, we will actually show that $\tilde{\mathfrak{g}}^{\leq \ell}_k(X) \leq {\mathfrak{g}}^{\leq \ell}_k(X)$. The capacities $\mathfrak{g}^{\leq \ell}_k$ can be thought of as the SFT counterparts of $\tilde{\mathfrak{g}}^{\leq \ell}_k$, or alternatively the capacities $\tilde{\mathfrak{g}}^{\leq \ell}_k$ can be thought of as the counterparts of $\mathfrak{g}^{\leq \ell}_k$ whose definition does not require the holomorphic curves to be regular. \item Both $\mathfrak{g}^{\leq 1}_{k}$ and $\cgh{k}$ are defined in terms of a map in homology being nonzero. In the case of $\mathfrak{g}^{\leq 1}_{k}$, we consider the linearized contact homology, and in the case of $\cgh{k}$ the invariant in question is $S^1$-equivariant symplectic homology. Taking into consideration the Bourgeois--Oancea isomorphism (see \cite{bourgeoisEquivariantSymplecticHomology2016}) between linearized contact homology and positive $S^1$-equivariant symplectic homology, one can think of $\mathfrak{g}^{\leq 1}_{k}$ and $\cgh{k}$ as restatements of one another under this isomorphism. This is the idea behind the proof of \cref{thm:g hat vs gh}, where we show that $\mathfrak{g}^{\leq 1}_{k}(X) = \cgh{k}(X)$. \end{enumerate} \begin{remark} \label{rmk:novikov coefficients} In the case where $X$ is only an exact symplectic manifold instead of a Liouville domain, the proof of \cref{lem:action filtration of lch} does not work. In this case, we do not have access to an action filtration on $CC(X)$. However, it is possible to define linearized contact homology with coefficients in a Novikov ring $\Lambda_{\geq 0}$, in which case a coefficient in $\Lambda_{\geq 0}$ encodes the energy of a holomorphic curve. This is the approach taken in \cite{siegelHigherSymplecticCapacities2020} to define the capacities $\mathfrak{g}^{\leq \ell}_{k}$. It is not obvious that the definition of $\mathfrak{g}^{\leq \ell}_k$ we give and the one in \cite{siegelHigherSymplecticCapacities2020} are equivalent. However, \cref{def:capacities glk} seems to be the natural analogue when we have access to an action filtration, and in addition the definition we provide will be enough for our purposes. \end{remark} \begin{theorem} \label{thm:properties of hsc} The functions ${\mathfrak{g}}^{\leq \ell}_k$ satisfy the following properties, for all nondegenerate Liouville domains $(X,\lambda_X)$ and $(Y,\lambda_Y)$ of the same dimension: \begin{description} \item[(Monotonicity)] If $X \longrightarrow Y$ is an exact symplectic embedding then $\mathfrak{g}^{\leq \ell}_k(X) \leq \mathfrak{g}^{\leq \ell}_k(Y)$. \item[(Conformality)] If $\mu > 0$ then ${\mathfrak{g}}^{\leq \ell}_k(X, \mu \lambda_X) = \mu \, {\mathfrak{g}}^{\leq \ell}_k(X, \lambda_X)$. \item[(Reeb orbits)] If $\pi_1(X) = 0$, $2 c_1(TX) = 0$ and ${\mathfrak{g}}^{\leq \ell}_k(X) < + \infty$, then there exists a tuple $\Gamma = (\gamma_1, \ldots, \gamma_p)$ of Reeb orbits such that \begin{enumerate} \item ${\mathfrak{g}}^{\leq \ell}_k(X) = \mathcal{A}(\Gamma)$; \item $\conleyzehnder(\Gamma) = p (n - 3) + 2 (k + 1)$; \item $1 \leq p \leq \ell$. \end{enumerate} \end{description} \end{theorem} \begin{proof} We prove monotonicity. If $(X, \lambda^X) \longrightarrow (Y, \lambda^Y)$ is an exact symplectic embedding, then it is possible to define a Viterbo transfer map $H(\mathcal{B}(CC(Y)[-1])) \longrightarrow H(\mathcal{B}(CC(X)[-1]))$. This map respects the action filtration as well as the augmentation maps, i.e. the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell} (CC(Y)[-1])) \ar[d] \ar[r] & H(\mathcal{B} (CC(Y)[-1])) \ar[d] \ar[r, "{\epsilon}_{k}^Y"] & \Q \ar[d, equals] \\ H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell} (CC(X)[-1])) \ar[r] & H(\mathcal{B} (CC(X)[-1])) \ar[r, swap, "{\epsilon}_{k}^X"] & \Q \end{tikzcd} \end{IEEEeqnarray*} commutes. The result then follows by definition of $\tilde{\mathfrak{g}}^{\leq \ell}_k$. We prove conformality. If $\gamma$ is a Reeb orbit of $(\partial X, \lambda|_{\partial X})$ of action $\mathcal{A}_{\lambda}(\gamma)$ then $\gamma$ is a Reeb orbit of $(\partial X, \mu \lambda|_{\partial X})$ of action $\mathcal{A}_{\mu \lambda}(\gamma) = \mu \mathcal{A}_{\lambda}(\gamma)$. Therefore, there is a diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell} (CC(X, \lambda)[-1])) \ar[d, equals] \ar[r] & H(\mathcal{B} (CC(X, \lambda)[-1])) \ar[d, equals] \ar[r, "{\epsilon}_{k}^{\lambda}"] & \Q \ar[d, equals] \\ H(\mathcal{A}^{\leq \mu a} \mathcal{B}^{\leq \ell} (CC(X, \mu \lambda)[-1])) \ar[r] & H(\mathcal{B} (CC(X, \mu \lambda)[-1])) \ar[r, swap, "{\epsilon}_{k}^{\mu \lambda}"] & \Q \end{tikzcd} \end{IEEEeqnarray*} Again, the result follows by definition of $\mathfrak{g}^{\leq \ell}_{k}$. We prove the Reeb orbits property. Choose a point $x \in \itr X$, a symplectic divisor $D$ through $x$ and an almost complex structure $J \in \mathcal{J}(X,D)$. Consider the bar complex $\mathcal{B}^{\leq \ell} (CC(X)[-1])$, computed with respect to $J$. By assumption and definition of $\mathfrak{g}^{\leq \ell}_{k}$, \begin{IEEEeqnarray*}{rCls+x*} + \infty & > & {\mathfrak{g}}^{\leq \ell}_k(X) \\ & = & \inf \{ a > 0 \mid \epsilon_k \colon H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell}(CC(X)[-1])) \longrightarrow \Q \text{ is nonzero} \} \\ & = & \inf \{ a > 0 \mid \text{there exists } \beta \in H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell}(CC(X)[-1])) \text{ such that } {\epsilon}_k (\beta) \neq 0 \} \\ & = & \inf \{ \mathcal{A}(\beta) \mid \beta \in H(\mathcal{B}^{\leq \ell}(CC(X)[-1])) \text{ such that } {\epsilon}_k (\beta) \neq 0 \}, \end{IEEEeqnarray*} where $\mathcal{A}(\beta)$ is given as in \cref{rmk:notation for tuples of orbits}. Since the action spectrum of $(\partial X, \lambda|_{\partial X})$ is a discrete subset of $\R$, we conclude that in the above expression the infimum is a minimum. More precisely, there exists $\beta \in H(\mathcal{B}^{\leq \ell}(CC(X)[-1]))$ such that $\epsilon_k(\beta) \neq 0$ and ${\mathfrak{g}}^{\leq \ell}_k(X) = \mathcal{A}(\beta)$. The element $\beta$ can be written as a finite linear combination of words of Reeb orbits $\Gamma = (\gamma_1, \ldots, \gamma_p)$, where every word has length $p \leq \ell$ and Conley--Zehnder index equal to $p(n-3) + 2(k+1)$. Here, the statement about the Conley--Zehnder index follows from the computation \begin{IEEEeqnarray*}{rCls+x*} 0 & = & \operatorname{virdim} \overline{\mathcal{M}}^J_X(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x} \\ & = & (n-3)(2 - p) + \conleyzehnder(\Gamma) - 2n - 2k + 4 \\ & = & \conleyzehnder(\Gamma) - p(n-3) - 2(k+1). \end{IEEEeqnarray*} One of the words in this linear combination is such that $\mathcal{A}(\Gamma) = \mathcal{A}(\beta) = {\mathfrak{g}}^{\leq \ell}_k(X)$. \end{proof} \begin{remark} \label{rmk:computations using reeb orbits property} In \cite[Theorem 1.6]{guttSymplecticCapacitiesPositive2018} (respectively \cite[Theorem 1.14]{guttSymplecticCapacitiesPositive2018}) Gutt--Hutchings give formulas for $\cgh{k}$ of a convex (respectively concave) toric domain. However, the given proofs only depend on specific properties of the Gutt--Hutchings capacity and not on the definition of the capacity itself. These properties are monotonicity, conformality, a Reeb orbits property similar to the one of \cref{thm:properties of hsc}, and finally that the capacity be finite on star-shaped domains. If we showed that $\mathfrak{g}^{\leq 1}_{k}$ is finite on star-shaped domains, we would conclude that $\mathfrak{g}^{\leq 1}_{k} = \cgh{k}$ on convex or concave toric domains, because in this case both capacities would be given by the formulas in the previously mentioned theorems. Showing that $\mathfrak{g}^{\leq 1}_{k}$ is finite boils down to showing that the augmentation map is nonzero, which we will do in \cref{sec:augmentation map of an ellipsoid}. However, in \cref{thm:g hat vs gh} we will use this information in combination with the Bourgeois--Oancea isomorphism to conclude that $\mathfrak{g}^{\leq 1}_{k}(X) = \cgh{k}(X)$ for any nondegenerate Liouville domain $X$. Therefore, the proof suggested above will not be necessary, although it is a proof of $\mathfrak{g}^{\leq 1}_{k}(X) = \cgh{k}(X)$ alternative to that of \cref{thm:g hat vs gh} when $X$ is a convex or concave toric domain. \end{remark} \section{Cauchy--Riemann operators on bundles} \label{sec:cr operators} In order to show that $\mathfrak{g}^{\leq 1}_{k}(X) = \cgh{k}(X)$, we will need to show that the augmentation map of a small ellipsoid in $X$ is nonzero (see the proof of \cref{thm:g hat vs gh}). Recall that the augmentation map counts holomorphic curves satisfying a tangency constraint. In \cref{sec:augmentation map of an ellipsoid}, we will explicitly compute how many such holomorphic curves there are. However, a count obtained by explicit methods will not necessarily agree with the virtual count that appears in the definition of the augmentation map. By assumption on the virtual perturbation scheme, it does agree if the relevant moduli space is transversely cut out. Therefore, in this section and the next we will describe the framework that allows us to show that this moduli space is transversely cut out. This section deals with the theory of real linear Cauchy--Riemann operators on line bundles, and our main reference is \cite{wendlAutomaticTransversalityOrbifolds2010}. The outline is as follows. First, we review the basic definitions about real linear Cauchy--Riemann operators (\cref{def:real linear cauchy riemann operator}). By the Riemann-Roch theorem (\cref{thm:riemann roch with punctures}), these operators are Fredholm and their index can be computed from a number of topological quantities associated to them. We will make special use of a criterion by Wendl (\cref{prp:wen D surjective injective criterion}) which guarantees that a real linear Cauchy--Riemann operator defined on a complex line bundle is surjective. For our purposes, we will also need an adaptation of this result to the case where the operator is accompanied by an evaluation map, which we state in \cref{lem:D plus E is surjective}. We now state the assumptions for the rest of this section. Let $(\Sigma, j)$ be a compact Riemann surface without boundary, of genus $g$, with sets of positive and negative punctures $\mathbf{z}^{\pm} = \{z^{\pm}_1,\ldots,z^{\pm}_{p^{\pm}}\}$. Denote $\mathbf{z} = \mathbf{z}^{+} \cup \mathbf{z}^{-}$ and $\dot{\Sigma} = \Sigma \setminus \mathbf{z}$. Choose cylindrical coordinates $(s,t)$ near each puncture $z \in \mathbf{z}$ and denote $\mathcal{U}_z \subset \dot{\Sigma}$ the domain of the coordinates $(s,t)$. \begin{definition} \label{def:asymptotically hermitian vector bundle} An \textbf{asymptotically Hermitian vector bundle} over $\dot{\Sigma}$ is given by a complex vector bundle $(E, J) \longrightarrow \dot{\Sigma}$ and for each $z \in \mathbf{z}$ a Hermitian vector bundle $(E_z, J_z, \omega_z) \longrightarrow S^1$ together with a complex vector bundle isomorphism $\Phi_z^{} \colon \pi^*_z E_z^{} \longrightarrow \iota_z^* E$, where $\iota_z \colon \mathcal{U}_z \longrightarrow \dot{\Sigma}$ is the inclusion and $\pi_{z} \colon \mathcal{U}_z \longrightarrow S^1$ is given by $\pi_{z}(w) = t(w)$: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} E_z^{} \ar[d] & \pi_z^* E_z^{} \ar[r, "\Phi_z"] \ar[d] \ar[l] & \iota_z^* E \ar[r] \ar[d] & E \ar[d] \\ S^1 & \mathcal{U}_z \ar[r, equals] \ar[l, "\pi_z"] & \mathcal{U}_z \ar[r, swap, "\iota_z"] & \dot{\Sigma} \end{tikzcd} \end{IEEEeqnarray*} \end{definition} From now until the end of this section, we will assume that $E$ is an asymptotically Hermitian vector bundle over $\dot{\Sigma}$ of complex rank $n$. \begin{definition} \label{def:asymptotic trivialization} An \textbf{asymptotic trivialization} of an asymptotically Hermitian vector bundle $E$ is a family $\tau = (\tau_z)_{z \in \mathbf{z}}$ of unitary trivializations $\tau_z$ of $(E_z, J_z, \omega_z)$. By \cref{def:asymptotically hermitian vector bundle}, every such $\tau_z$ defines a complex trivialization of $\iota^*_z E$. If $\tau$ is an asymptotic trivialization, we will typically denote each $\tau_z$ also by $\tau$. \end{definition} \begin{definition} \label{def:sobolev spaces} Let $E$ be an asymptotically Hermitian vector bundle over $\dot{\Sigma}$, together with an asymptotic trivialization $\tau$. If $\eta$ is a section of $E$ and $z$ is a puncture, denote by $\eta_z \colon Z^{\pm} \longrightarrow \R^{2n}$ the map $\eta$ written with respect to the trivialization $\tau$ and cylindrical coordinates near $z$. The \textbf{Sobolev space} of sections of $E$ is \begin{IEEEeqnarray*}{c+x*} W^{k,p}(E) \coloneqq \{ \eta \in W^{k,p}_{\mathrm{loc}}(E) \mid \eta_z \in W^{k,p}(Z^{\pm}, \R^{2n}) \text{ for every } z \in \mathbf{z}^{\pm} \}. \end{IEEEeqnarray*} If $\delta > 0$, the \textbf{weighted Sobolev space} of sections of $E$ is \begin{IEEEeqnarray*}{c+x*} W^{k,p,\delta}(E) \coloneqq \{ \eta \in W^{k,p}_{\mathrm{loc}}(E) \mid e^{\pm \delta s} \eta_z \in W^{k,p}(Z^{\pm}, \R^{2n}) \text{ for every } z \in \mathbf{z}^{\pm} \}. \end{IEEEeqnarray*} \end{definition} \begin{definition} \label{def:real linear cauchy riemann operator} A \textbf{real linear Cauchy--Riemann operator} is a map \begin{IEEEeqnarray*}{c+x*} \mathbf{D} \colon W^{1,p}(\dot{\Sigma}, E) \longrightarrow L^p(\dot{\Sigma}, \Hom^{0,1}(T \dot{\Sigma}, E)) \end{IEEEeqnarray*} such that $\mathbf{D}$ is linear as a map of vector spaces over $\R$ and $\mathbf{D}$ satisfies the Leibniz rule, i.e. if $v \in W^{1,p}(\dot{\Sigma}, E)$ and $f \in C^{\infty}(\dot{\Sigma}, \R)$ then $\mathbf{D}(f v) = f \mathbf{D} v + v \otimes \overline{\partial} f$. \end{definition} We now consider the asymptotic operators of $\mathbf{D}$. Their relevance comes from the fact that the Fredholm index of $\mathbf{D}$ is determined by the asymptotic operators at the punctures. \begin{definition} An \textbf{asymptotic operator} at $z \in \mathbf{z}$ is a bounded linear operator $\mathbf{A} \colon H^1(E_z) \longrightarrow L^2(E_z)$ such that when written with respect to a unitary trivialization of $E_z$, $\mathbf{A}$ takes the form \begin{IEEEeqnarray*}{rrCl} & H^1(S^1,\R^{2n}) & \longrightarrow & L^2(S^1,\R^{2n}) \\ & \eta & \longmapsto & - J_0 \dot{\eta} - S \eta, \end{IEEEeqnarray*} where $S \colon S^1 \longrightarrow \End(\R^{2n})$ is a loop of symmetric $2n \times 2n$ matrices. We say that $\mathbf{A}$ is nondegenerate if its spectrum does not contain $0$. \end{definition} \begin{definition} Let $\mathbf{D}$ be a real linear Cauchy--Riemann operator and $\mathbf{A}$ be an asymptotic operator at $z \in \mathbf{z}$. We say that $\mathbf{D}$ is \textbf{asymptotic} to $\mathbf{A}$ at $z$ if the expressions for $\mathbf{D}$ and $\mathbf{A}$ with respect to an asymptotic trivialization near $z$ are of the form \begin{IEEEeqnarray*}{rCls+x*} (\mathbf{D} \xi)(s,t) & = & \partial_s \xi (s,t) + J_0 \partial_t \xi (s,t) + S(s,t) \xi(s,t) \\ (\mathbf{A} \eta)(t) & = & - J_0 \partial_t \eta (t) - S(t) \eta(t), \end{IEEEeqnarray*} where $S(s,t)$ converges to $S(t)$ uniformly as $s \to \pm \infty$. \end{definition} \begin{remark} Suppose that $E$ splits as a direct sum of complex vector bundles $E = E_1 \oplus E_2$. In this case, there are canonical inclusions \begin{IEEEeqnarray*}{rCls+x*} W^{1,p}(\dot{\Sigma}, E_i) & \subset & W^{1,p}(\dot{\Sigma}, E), \\ L^p(\dot{\Sigma}, \Hom^{0,1}(T \dot{\Sigma}, E_i)) & \subset & L^p(\dot{\Sigma}, \Hom^{0,1}(T \dot{\Sigma}, E)) \end{IEEEeqnarray*} for $i = 1,2$, and we have the following decompositions: \begin{IEEEeqnarray*}{rCls+x*} W^{1,p}(\dot{\Sigma}, E) & = & W^{1,p}(\dot{\Sigma}, E_1) \oplus W^{1,p}(\dot{\Sigma}, E_2), \\ L^p(\dot{\Sigma}, \Hom^{0,1}(T \dot{\Sigma}, E)) & = & L^p(\dot{\Sigma}, \Hom^{0,1}(T \dot{\Sigma}, E_1)) \oplus L^p(\dot{\Sigma}, \Hom^{0,1}(T \dot{\Sigma}, E_2)) \end{IEEEeqnarray*} We can write $\mathbf{D}$ with respect to these decompositions as a block matrix: \begin{IEEEeqnarray*}{c+x*} \mathbf{D} = \begin{bmatrix} \mathbf{D}_{11} & \mathbf{D}_{12} \\ \mathbf{D}_{21} & \mathbf{D}_{22} \end{bmatrix}. \end{IEEEeqnarray*} By \cite[Exercise 7.8]{wendlLecturesSymplecticField2016}, the diagonal terms $\mathbf{D}_{11}$ and $\mathbf{D}_{22}$ are real linear Cauchy--Riemann operators, while the off diagonal terms $\mathbf{D}_{12}$ and $\mathbf{D}_{21}$ are tensorial. \end{remark} Let $\mathbf{D}$ be a real linear Cauchy--Riemann operator and for every puncture $z \in \mathbf{z}$ let $\mathbf{A}_z$ be a nondegenerate asymptotic operator at $z$. By the Riemann-Roch theorem with punctures (\cref{thm:riemann roch with punctures}), $\mathbf{D}$ is a Fredholm operator. We now explain how to compute the Fredholm index of $\mathbf{D}$. Choose an asymptotic trivialization $\tau$ as in \cref{def:asymptotic trivialization}. First, recall that the \textbf{Euler characteristic} of $\dot{\Sigma}$ is given by $\chi(\dot{\Sigma}) = 2 - 2 g - \# \mathbf{z}$, where $g$ is the genus of $\Sigma$. \begin{definition}[{\cite[Definition 5.1]{wendlLecturesSymplecticField2016}}] \label{def:relative first chern number} Let $S$ be a compact oriented surface with boundary and $(E,J)$ be a complex vector bundle over $S$. Let $\tau$ be a complex trivialization of $E|_{\partial S}$. The \textbf{relative first Chern number} of $E$ with respect to $\tau$, denoted $c_1^{\tau}(E) \in \Z$, is defined by the following properties. \begin{enumerate} \item If $E$ has complex rank $1$, then $c_1^{\tau}(E)$ is the signed count of zeros of a generic smooth section $\eta \colon S \longrightarrow E$ such that $\tau \circ \eta|_{\partial S} \colon \partial S \longrightarrow \C$ is constant. \item If $E_1$ and $E_2$ are complex vector bundles over $S$ with trivializations $\tau_1$ and $\tau_2$ over $\partial S$, then $c_1^{\tau_1 \oplus \tau_2}(E_1 \oplus E_2) = c_1^{\tau}(E_1) + c_1^{\tau}(E_2)$. \end{enumerate} \end{definition} The definition of relative first Chern number extends to the class of asymptotically Hermitian vector bundles over punctured surfaces. \begin{definition} The \textbf{Conley--Zehnder} index of an asymptotic operator $\mathbf{A}_z$ is given as follows. Let $(\mathbf{A}_z \eta)(t) = -J_0 \partial_t \eta(t) - S(t) \eta(t)$ be the expression of $\mathbf{A}_z$ with respect to $\tau$. Let $\Psi \colon [0,1] \longrightarrow \operatorname{Sp}(2n)$ be the unique path of symplectic matrices such that \begin{IEEEeqnarray*}{rCls+x*} \Psi(0) & = & \id_{\R^{2n}}, \\ \dot{\Psi}(t) & = & J_0 S(t) \Psi(t). \end{IEEEeqnarray*} Since $\mathbf{A}_z$ is nondegenerate, $\Psi$ is an element of $\operatorname{SP}(n)$. Finally, define $\conleyzehnder^{\tau}(\mathbf{A}_z) \coloneqq \conleyzehnder(\Psi)$. \end{definition} \begin{theorem}[Riemann-Roch, {\cite[Theorem 5.4]{wendlLecturesSymplecticField2016}}] \label{thm:riemann roch with punctures} The operator $\mathbf{D}$ is Fredholm and its (real) Fredholm index is given by \begin{IEEEeqnarray*}{c+x*} \operatorname{ind} \mathbf{D} = n \chi (\dot{\Sigma}) + 2 c_1^{\tau}(E) + \sum_{z \in \mathbf{z}^+} \conleyzehnder^{\tau}(\mathbf{A}_z) - \sum_{z \in \mathbf{z}^-} \conleyzehnder^{\tau}(\mathbf{A}_z). \end{IEEEeqnarray*} \end{theorem} For the rest of this section, we restrict ourselves to the case where $n = \operatorname{rank}_{\C} E = 1$. We retain the assumption that $\mathbf{D}$ is a real linear Cauchy--Riemann operator and $\mathbf{A}_{z}$ is a nondegenerate asymptotic operator for every puncture $z \in \mathbf{z}$. Our goal is to state a criterion that guarantees surjectivity of $\mathbf{D}$. This criterion depends on other topological quantities which we now define. For every $\lambda$ in the spectrum of $\mathbf{A}_z$, let $w^{\tau}(\lambda)$ be the winding number of any nontrivial section in the $\lambda$-eigenspace of $\mathbf{A}_z$ (computed with respect to the trivialization $\tau$). Define the \textbf{winding numbers} \begin{IEEEeqnarray*}{rClls+x*} \alpha_-^{\tau}(\mathbf{A}_z) & \coloneqq & \max & \{ w^{\tau}(\lambda) \mid \lambda < 0 \text{ is in the spectrum of }\mathbf{A}_z \}, \\ \alpha_+^{\tau}(\mathbf{A}_z) & \coloneqq & \min & \{ w^{\tau}(\lambda) \mid \lambda > 0 \text{ is in the spectrum of }\mathbf{A}_z \}. \end{IEEEeqnarray*} The \textbf{parity} (the reason for this name is Equation \eqref{eq:cz winding parity} below) and associated sets of even and odd punctures are given by \begin{IEEEeqnarray*}{rCls+x*} p(\mathbf{A}_{z}) & \coloneqq & \alpha_{+}^{\tau}(\mathbf{A}_z) - \alpha^{\tau}_{-}(\mathbf{A}_z) \in \{0,1\}, \\ \mathbf{z}_0 & \coloneqq & \{ z \in \mathbf{z} \mid p(\mathbf{A}_z) = 0 \}, \\ \mathbf{z}_1 & \coloneqq & \{ z \in \mathbf{z} \mid p(\mathbf{A}_z) = 1 \}. \end{IEEEeqnarray*} Finally, the \textbf{adjusted first Chern number} is given by \begin{IEEEeqnarray*}{c+x*} c_1(E,\mathbf{A}_{\mathbf{z}}) = c_1^{\tau}(E) + \sum_{z \in \mathbf{z}^+} \alpha_-^{\tau}(\mathbf{A}_z) - \sum_{z \in \mathbf{z}^-} \alpha_-^{\tau}(\mathbf{A}_z). \end{IEEEeqnarray*} These quantities satisfy the following equations. \begin{IEEEeqnarray}{rCls+x*} \conleyzehnder^{\tau}(\mathbf{A}_z) & = & 2 \alpha_{-}^{\tau}(\mathbf{A_z}) + p(\mathbf{A}_z) = 2 \alpha_{+}^{\tau}(\mathbf{A_z}) - p(\mathbf{A}_z), \plabel{eq:cz winding parity} \\ 2 c_1 (E,\mathbf{A}_{\mathbf{z}}) & = & \operatorname{ind} \mathbf{D} - 2 - 2g + \# \mathbf{z}_0. \plabel{eq:chern and index} \end{IEEEeqnarray} \begin{proposition}[{\cite[Proposition 2.2]{wendlAutomaticTransversalityOrbifolds2010}}] \phantomsection\label{prp:wen D surjective injective criterion} \begin{enumerate} \item[] \item If $\operatorname{ind} \mathbf{D} \leq 0$ and $c_1(E, \mathbf{A}_{\mathbf{z}}) < 0$ then $\mathbf{D}$ is injective. \item If $\operatorname{ind} \mathbf{D} \geq 0$ and $c_1(E, \mathbf{A}_{\mathbf{z}}) < \operatorname{ind} \mathbf{D}$ then $\mathbf{D}$ is surjective. \end{enumerate} \end{proposition} We will apply the proposition above to moduli spaces of punctured spheres which have no even punctures. The following lemma is just a restatement of the previous proposition in this simpler case. \begin{lemma} \label{lem:conditions for D surjective genus zero} Assume that $g = 0$ and $\# \mathbf{z}_0 = 0$. Then, \begin{enumerate} \item If $\operatorname{ind} \mathbf{D} \leq 0$ then $\mathbf{D}$ is injective. \item If $\operatorname{ind} \mathbf{D} \geq 0$ then $\mathbf{D}$ is surjective. \end{enumerate} \end{lemma} \begin{proof} By \cref{prp:wen D surjective injective criterion} and Equation \eqref{eq:chern and index}. \end{proof} We now wish to deal with the case where $\mathbf{D}$ is taken together with an evaluation map (see \cref{lem:D plus E is surjective} below). The tools we need to prove this result are explained in the following remark. \begin{remark} \label{rmk:formulas for xi in ker nonzero} Suppose that $\ker \mathbf{D} \neq \{0\}$. If $\xi \in \ker \mathbf{D} \setminus \{0\}$, it is possible to show that $\xi$ has only a finite number of zeros, all of positive order, i.e. if $w$ is a zero of $\xi$ then $\operatorname{ord}(\xi;w) > 0$. For every $z \in \mathbf{z}$, there is an \textbf{asymptotic winding number} $\operatorname{wind}_z^{\tau}(\xi) \in \Z$, which has the properties \begin{IEEEeqnarray*}{rCls+x*} z \in \mathbf{z}^+ & \Longrightarrow & \operatorname{wind}_z^{\tau}(\xi) \leq \alpha_-^{\tau}(\mathbf{A}_z), \\ z \in \mathbf{z}^- & \Longrightarrow & \operatorname{wind}_z^{\tau}(\xi) \geq \alpha_+^{\tau}(\mathbf{A}_z). \end{IEEEeqnarray*} Define the \textbf{asymptotic vanishing} of $\xi$, denoted $Z_{\infty}(\xi)$, and the \textbf{count of zeros}, denoted $Z(\xi)$, by \begin{IEEEeqnarray*}{rCls+x*} Z_{\infty}(\xi) & \coloneqq & \sum_{z \in \mathbf{z}^+} \p{}{1}{\alpha_-^{\tau}(\mathbf{A}_z) - \operatorname{wind}_z^{\tau}(\xi)} + \sum_{z \in \mathbf{z}^-} \p{}{1}{\operatorname{wind}_z^{\tau}(\xi) - \alpha_+^{\tau}(\mathbf{A}_z)} \in \Z_{\geq 0}, \\ Z(\xi) & \coloneqq & \sum_{w \in \xi^{-1}(0)} \operatorname{ord}(\xi;w) \in \Z_{\geq 0}. \end{IEEEeqnarray*} In this case, we have the formula (see \cite[Equation 2.7]{wendlAutomaticTransversalityOrbifolds2010}) \begin{IEEEeqnarray}{c} \plabel{eq:c1 and asy vanishing} c_1(E,\mathbf{A}_{\mathbf{z}}) = Z(\xi) + Z_{\infty}(\xi). \end{IEEEeqnarray} \end{remark} \begin{lemma} \label{lem:D plus E is surjective} Let $w \in \dot{\Sigma}$ be a point and $\mathbf{E} \colon W^{1,p}(\dot{\Sigma}, E) \longrightarrow E_w$ be the evaluation map at $w$, i.e. $\mathbf{E}(\xi) = \xi_w$. Assume that $g = 0$ and $\# \mathbf{z}_0 = 0$. If $\operatorname{ind} \mathbf{D} = 2$ then $\mathbf{D} \oplus \mathbf{E} \colon W^{1,p}(\dot{\Sigma}, E) \longrightarrow L^p(\dot{\Sigma}, \Hom^{0,1}(T \dot{\Sigma}, E)) \oplus E_w$ is surjective. \end{lemma} \begin{proof} It is enough to show that the maps \begin{IEEEeqnarray*}{rCls+x*} \mathbf{D} \colon W^{1,p}(\dot{\Sigma}, E) & \longrightarrow & L^p(\dot{\Sigma}, \Hom^{0,1}(T \dot{\Sigma}, E)), \\ \mathbf{E}|_{\ker \mathbf{D}} \colon \ker \mathbf{D} & \longrightarrow & E_w \end{IEEEeqnarray*} are surjective. By \cref{lem:conditions for D surjective genus zero}, $\mathbf{D}$ is surjective. Since $\dim \ker \mathbf{D} = \operatorname{ind} \mathbf{D} = 2$ and $\dim_{\R} E_w = 2$, the map $\mathbf{E}|_{\ker \mathbf{D}}$ is surjective if and only if it is injective. So, we show that $\ker(E|_{\ker \mathbf{D}}) = \ker \mathbf{E} \cap \ker \mathbf{D} = \{0\}$. For this, let $\xi \in \ker \mathbf{E} \cap \ker \mathbf{D}$ and assume by contradiction that $\xi \neq 0$. Consider the quantities defined in \cref{rmk:formulas for xi in ker nonzero}. We compute \begin{IEEEeqnarray*}{rCls+x*} 0 & = & \operatorname{ind} \mathbf{D} - 2 & \quad [\text{by assumption}] \\ & = & 2 c_1(E,\mathbf{A}_{\mathbf{z}}) & \quad [\text{by Equation \eqref{eq:chern and index}}] \\ & = & 2 Z(\xi) + 2 Z_{\infty}(\xi) & \quad [\text{by Equation \eqref{eq:c1 and asy vanishing}}] \\ & \geq & 0 & \quad [\text{by definition of $Z$ and $Z_{\infty}$}], \end{IEEEeqnarray*} which implies that $Z(\xi) = 0$. This gives the desired contradiction, because \begin{IEEEeqnarray*}{rCls+x*} 0 & = & Z(\xi) & \quad [\text{by the previous computation}] \\ & = & \sum_{z \in \xi^{-1}(0)} \operatorname{ord}(\xi;z) & \quad [\text{by definition of $Z$}] \\ & \geq & \operatorname{ord}(\xi;w) & \quad [\text{since $\xi_w = \mathbf{E}(\xi) = 0$}] \\ & > & 0 & \quad [\text{by \cref{rmk:formulas for xi in ker nonzero}}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \section{Cauchy--Riemann operators as sections} \label{sec:functional analytic setup} In this section, we phrase the notion of a map $u \colon \dot{\Sigma} \longrightarrow \hat{X}$ being holomorphic in terms of $u$ being in the zero set of a section $\overline{\partial} \colon \mathcal{T} \times \mathcal{B} \longrightarrow \mathcal{E}$ (see \cref{def:bundle for cr op,def:cauchy riemann operator}). The goal of this point of view is that we can then think of moduli spaces of holomorphic curves in $\hat{X}$ as the zero set of the section $\overline{\partial}$. To see if such a moduli space is regular near $(j, u)$, one needs to consider the linearization $\mathbf{L}_{(j,u)}$ of $\overline{\partial}$ at $(j,u)$ (see \cref{def:linearized cr op}), and prove that it is surjective. We will see that a suitable restriction of $\mathbf{L}_{(j,u)}$ is a real linear Cauchy--Riemann operator (\cref{lem:D is a rlcro}), and therefore we can use the theory from the last section to show that $\mathbf{L}_{(j,u)}$ is surjective in some particular cases (\cref{lem:Du is surjective case n is 1,lem:DX surj implies DY surj}). \begin{definition} \label{def:asymptotic marker} Let $(\Sigma,j)$ be a Riemann surface and $z \in \Sigma$ be a puncture. An \textbf{asymptotic marker} at $z$ is a half-line $v \in (T_z \Sigma \setminus \{0\}) / \R_{> 0}$. \end{definition} \begin{definition} \label{def:moduli space of curves with asymtotic marker} Let $(X, \omega, \lambda)$ be a symplectic cobordism, $J \in \mathcal{J}(X)$ be a cylindrical almost complex structure on $\hat{X}$, and $\Gamma^{\pm} = (\gamma^{\pm}_1, \ldots, \gamma^{\pm}_{p^{\pm}})$ be tuples of Reeb orbits on $\partial^{\pm} X$. Let $\mathcal{M}^{\$,J}_X(\Gamma^+, \Gamma^-)$ be the moduli space of (equivalence classes of) tuples \begin{IEEEeqnarray*}{c+x*} (\Sigma, j, \mathbf{z}, \mathbf{v}, u), \qquad \mathbf{z} = \mathbf{z}^+ \cup \mathbf{z}^-, \qquad \mathbf{v} = \mathbf{v}^+ \cup \mathbf{v}^{-} \end{IEEEeqnarray*} where $(\Sigma, j, \mathbf{z}, u)$ is as in \cref{def:asy cyl holomorphic curve} and $\mathbf{v}^{\pm} = \{v^{\pm}_1, \ldots, v^{\pm}_{p^{\pm}}\}$ is a set of asymptotic markers on $\mathbf{z}^{\pm} = \{z^{\pm}_1, \ldots, z^{\pm}_{p^{\pm}}\}$ such that \begin{IEEEeqnarray*}{c+x*} \lim_{t \to 0^+} u(c(t)) = (\pm \infty, \gamma^{\pm}_i(0)) \end{IEEEeqnarray*} for every $i = 1, \ldots, p^{\pm}$ and every path $c$ in $\Sigma$ with $c(t) = z^{\pm}_i$ and $\dot{c}(0) = v^{\pm}_i$. Two such tuples $(\Sigma_0, j_0, \mathbf{z}_0, \mathbf{v}_0, u_0)$ and $(\Sigma_1, j_1, \mathbf{z}_1, \mathbf{v}_1, u_1)$ are equivalent if there exists a biholomorphism $\phi \colon \Sigma_0 \longrightarrow \Sigma_1$ such that \begin{IEEEeqnarray*}{rCls+x*} u_1 \circ \phi & = & u_0, \\ \phi(z^{\pm}_{0,i}) & = & z^{\pm}_{1,i}, \\ \dv \phi (z^{\pm}_{0,i}) v_{0,i}^{\pm} & = & v_{1,i}^{\pm}. \end{IEEEeqnarray*} \end{definition} \begin{remark} \label{rmk:moduli space may assume sigma is sphere} Consider the sphere $S^2$, without any specified almost complex structure. Let $\mathbf{z}^{\pm} = \{z^{\pm}_1, \ldots, z^{\pm}_{p^{\pm}}\} \subset S^2$ be sets of punctures and $\mathbf{v}^{\pm} = \{v^{\pm}_1, \ldots, v^{\pm}_{p^{\pm}}\}$ be corresponding sets of asymptotic markers. Then, \begin{IEEEeqnarray*}{c+x*} \mathcal{M}^{\$, J}_{X}(\Gamma^+, \Gamma^-) \cong \left\{ (j, u) \ \middle\vert \begin{array}{l} j \text{ is an almost complex structure on }S^2, \\ u \colon (\dot{S}^2, j) \longrightarrow (\hat{X}, J) \text{ is as in \cref{def:asy cyl holomorphic curve}} \end{array} \right\} / \sim, \end{IEEEeqnarray*} where two tuples $(j_0, u_0)$ and $(j_1, u_1)$ are equivalent if there exists a biholomorphism $\phi \colon (S^2, j_0) \longrightarrow (S^2, j_1)$ such that \begin{IEEEeqnarray*}{rCls+x*} u_1 \circ \phi & = & u_0, \\ \phi(z^{\pm}_{i}) & = & z^{\pm}_{i}, \\ \dv \phi (z^{\pm}_{i}) v_{i}^{\pm} & = & v_{i}^{\pm}. \end{IEEEeqnarray*} \end{remark} \begin{remark} \label{rmk:counts of moduli spaces with or without asy markers} There is a surjective map $\pi^{\$} \colon \mathcal{M}^{\$, J}_{X}(\Gamma^+, \Gamma^-) \longrightarrow \mathcal{M}^{J}_{X}(\Gamma^+, \Gamma^-)$ given by forgetting the asymptotic markers. By \cite[Proposition 11.1]{wendlLecturesSymplecticField2016}, for every $u \in \mathcal{M}^{J}_{X}(\Gamma^+, \Gamma^-)$ the preimage $(\pi^{\$})^{-1}(u)$ contains exactly \begin{IEEEeqnarray*}{c+x*} \frac{\bigproduct_{\gamma \in \Gamma^+ \cup \Gamma^-} m(\gamma)}{|\operatorname{Aut}(u)|} \end{IEEEeqnarray*} elements, where $m(\gamma)$ is the multiplicity of the Reeb orbit $\gamma$ and $\operatorname{Aut}(u)$ is the automorphism group of $u = (\Sigma, j, \mathbf{z}, u)$, i.e. an element of $\operatorname{Aut}(u)$ is a biholomorphism $\phi \colon \Sigma \longrightarrow \Sigma$ such that $u \circ \phi = u$ and $\phi(z_i^{\pm}) = z_i^{\pm}$ for every $i$. \end{remark} We will work with the following assumptions. Let $\Sigma = S^2$, (without any specified almost complex structure). Let $\mathbf{z} = \{z_1, \ldots, z_p\} \subset \Sigma$ be a set of punctures and $\mathbf{v} = \{v_1, \ldots, v_p\}$ be a corresponding set of asymptotic markers. Assume also that we have a set $\mathbf{j} = \{j_1, \ldots, j_p\}$, where $j_i$ is an almost complex structure defined on a neighbourhood of $z_i$ for every $i = 1, \ldots,p$. For every $i$, there are cylindrical coordinates $(s, t)$ on $\dot{\Sigma}$ near $z_i$ as in \cref{def:punctures asy markers cyl ends}, with the additional property that $v_i$ agrees with the direction $t = 0$. We will also assume that $\mathcal{T} \subset \mathcal{J}(\Sigma)$ is a Teichmüller slice as in \cite[Section 3.1]{wendlAutomaticTransversalityOrbifolds2010}, where $\mathcal{J}(\Sigma)$ denotes the set of almost complex structures on $\Sigma = S^2$. Finally, let $(X, \lambda)$ be a nondegenerate Liouville domain of dimension $2n$ and $J \in \mathcal{J}(X)$ be an admissible almost complex structure on $\hat{X}$. \begin{definition} Let $\gamma$ be an unparametrized simple Reeb orbit of $\partial X$. An \textbf{admissible parametrization} near $\gamma$ is a diffeomorphism $\phi \colon S^1 \times D^{2n-2} \longrightarrow O$, where $O \subset \partial X$ is an open neighbourhood of $\gamma$ and \begin{IEEEeqnarray*}{c+x*} D^{2n-2} \coloneqq \{(z^1,\ldots,z^{n-1}) \in \C^{n-1} \mid |z^1| < 1, \ldots, |z^{n-1}| < 1 \} \end{IEEEeqnarray*} is the polydisk, such that $t \longmapsto \phi(t,0)$ is a parametrization of $\gamma$. In this case, we denote by $(\vartheta, \zeta) = \phi^{-1} \colon O \longrightarrow S^1 \times D^{2n-2}$ the coordinates near $\gamma$. \end{definition} Let $\Gamma = (\gamma_{1},\ldots,\gamma_{p})$ be a tuple of (unparametrized) Reeb orbits in $\partial X$. Denote by $m_i$ the multiplicity of $\gamma_i$ and by $T_i$ the period of the simple Reeb orbit underlying $\gamma_i$ (so, the period of $\gamma_i$ is $m_i T_i$). For every $i = 1,\ldots,p $, choose once and for all an admissible parametrization $\phi_i \colon S^1 \times D^{2n-2} \longrightarrow O_i$ near the simple Reeb orbit underlying $\gamma_i$. \begin{definition} \label{def:bundle for cr op} We define a vector bundle $\pi \colon \mathcal{E} \longrightarrow \mathcal{T} \times \mathcal{B}$ as follows. Let $\mathcal{B}$ be the set of maps $u \colon \dot{\Sigma} \longrightarrow \hat{X}$ of class $W^{k,p}_{\mathrm{loc}}$ satisfying the following property for every puncture $z_i$. Write $u$ with respect to the cylindrical coordinates $(s,t)$ defined from $(z_i, v_i)$. First, we require that $u(s,t) \in \R_{\geq 0} \times O_i$ for $s$ big enough. Write $u$ with respect to the coordinates $(\vartheta, \zeta)$ near $\gamma$ on the target and cylindrical coordinates $(s,t)$ on the domain: \begin{IEEEeqnarray*}{rCls+x*} u(s,t) & = & (\pi_{\R} \circ u(s,t), \pi_{\partial X} \circ u (s,t)) \\ & = & (\pi_{\R} \circ u(s,t), \vartheta(s,t), \zeta(s,t)). \end{IEEEeqnarray*} Finally, we require that there exists $a \in \R$ such that the map \begin{IEEEeqnarray*}{c+x*} (s,t) \longmapsto (\pi_{\R} \circ u(s,t), \vartheta(s,t), \zeta(s,t)) - (m_i T_i s + a, m_i T_i t, 0) \end{IEEEeqnarray*} is of class $W^{k,p,\delta}$. The fibre, total space, projection and zero section are defined by \begin{IEEEeqnarray*}{rCls+x*} \mathcal{E}_{(j,u)} & \coloneqq & W^{k-1,p,\delta}(\Hom^{0,1}((T \dot{\Sigma}, j), (u^* T \hat{X}, J))), \quad \text{for every } (j,u) \in \mathcal{T} \times \mathcal{B}, \\ \mathcal{E} & \coloneqq & \bigcoproduct_{(j,u) \in \mathcal{T} \times \mathcal{B}} \mathcal{E}_{(j,u)} = \{ (j, u, \xi) \mid (j,u) \in \mathcal{T} \times \mathcal{B}, \, \xi \in \mathcal{E}_{(j,u)} \}, \\ \pi(j,u, \eta) & \coloneqq & (j,u), \\ z(j,u) & \coloneqq & (j,u,0). \end{IEEEeqnarray*} \end{definition} \begin{definition} \label{def:cauchy riemann operator} The \textbf{Cauchy--Riemann operators} are the sections \begin{IEEEeqnarray*}{rClCrCl} \overline{\partial}_j \colon \mathcal{B} & \longrightarrow & \mathcal{E}, & \qquad & \overline{\partial}_j(u) & \coloneqq & \frac{1}{2} (T u + J \circ Tu \circ j) \in \mathcal{E}_{(j,u)}, \\ \overline{\partial} \colon \mathcal{T} \times \mathcal{B} & \longrightarrow & \mathcal{E}, & \qquad & \overline{\partial}(j,u) & \coloneqq & \overline{\partial}_j(u). \end{IEEEeqnarray*} \end{definition} \begin{definition} \label{def:linearized cr op} Let $(j,u) \in \mathcal{T} \times \mathcal{B}$ be such that $\overline{\partial}(j ,u) = 0$. Define the \textbf{vertical projection} \begin{IEEEeqnarray*}{c+x*} P_{(j,u)} \colon T_{(j,u,0)} \mathcal{E} \longrightarrow \mathcal{E}_{(j,u)}, \qquad P_{(j,u)} (\eta) \coloneqq \eta - \dv (z \circ \pi)(j,u,0) \eta. \end{IEEEeqnarray*} The \textbf{linearized Cauchy--Riemann operators} are the linear maps \begin{IEEEeqnarray*}{rCls+x*} \mathbf{D}_{(j,u)} & \coloneqq & P_{(j,u)} \circ \dv (\overline{\partial}_j)(u) \colon T_u \mathcal{B} \longrightarrow \mathcal{E}_{(j,u)}, \\ \mathbf{L}_{(j,u)} & \coloneqq & P_{(j,u)} \circ \dv (\overline{\partial})(j,u) \colon T_j \mathcal{T} \oplus T_u \mathcal{B} \longrightarrow \mathcal{E}_{(j,u)}. \end{IEEEeqnarray*} Define also the restriction \begin{IEEEeqnarray*}{c+x*} \mathbf{F}_{(j,u)} \coloneqq \mathbf{L}_{(j,u)}|_{T_j \mathcal{T}} \colon T_j \mathcal{T} \longrightarrow \mathcal{E}_{(j,u)}. \end{IEEEeqnarray*} \end{definition} \begin{remark} \label{rmk:tangent of base of bundle} Choose a smooth function $\beta \colon \R \longrightarrow [0,1]$ such that $\beta(s) = 0$ if $s < 0$, $\beta(s) = 1$ if $s > 1$ and $0 \leq \beta'(s) \leq 2$. Consider the Liouville vector field $\hat{Z}^{X} \in \mathfrak{X}(\hat{X})$ and the Reeb vector field $R^{\partial X} \in \mathfrak{X}(\partial X)$. For every puncture $z$, let $(s,t)$ be the cylindrical coordinates near $z$ and define sections \begin{IEEEeqnarray*}{rClCrCl} \hat{Z}^X_z & \in & \Gamma(u^* T \hat{X}), & \quad & \hat{Z}^X_z(s,t) & = & \beta(s) \hat{Z}^X(u(s,t)), \\ R^{\partial X}_z & \in & \Gamma(u^* T \hat{X}), & \quad & R^{\partial X}_z(s,t) & = & \beta(s) R^{\partial X}(u(s,t)). \end{IEEEeqnarray*} Denote $V = \bigoplus_{i=1}^{p} \spn \{\hat{Z}^X_{z_i}, R^{\partial X}_{z_i}\}$. Then, the tangent space of $\mathcal{B}$ is given by \begin{IEEEeqnarray*}{c+x*} T_u \mathcal{B} = V \oplus W^{k,p,\delta}(\dot{\Sigma}, u^* T \hat{X}). \end{IEEEeqnarray*} \end{remark} \begin{definition} \label{def:conjugate and restriction operators} Let $(j,u) \in \mathcal{T} \times \mathcal{B}$ be such that $\overline{\partial}(j,u) = 0$ and consider the linearized Cauchy--Riemann operator $\mathbf{D}_{(j,u)}$. Choose a smooth function $f \colon \dot{\Sigma} \longrightarrow \R$ such that $f(s,t) = \delta s$ on every cylindrical end of $\dot{\Sigma}$. Define the \textbf{restriction} of $\mathbf{D}_{(j,u)}$, denoted $\mathbf{D}_{\delta}$, and the \textbf{conjugation} of $\mathbf{D}_{(j,u)}$, denoted $\mathbf{D}_0$, to be the unique maps such that the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} T_u \mathcal{B} \ar[d, swap, "\mathbf{D}_{(j,u)}"] & W^{k,p,\delta}(u^* T \hat{X}) \ar[d, "\mathbf{D}_{\delta}"] \ar[l, hook'] \ar[r, hook, two heads, "\xi \mapsto e^f \xi"] & W^{k,p}(u^* T \hat{X}) \ar[d, "\mathbf{D}_0"] \\ \mathcal{E}_{(j,u)} \ar[r, equals] & W^{k-1,p,\delta}(\Hom^{0,1}(T \dot{\Sigma}, u^* T \hat{X})) \ar[r, hook, two heads, swap, "\eta \mapsto e^f \eta"] & W^{k-1,p}(\Hom^{0,1}(T \dot{\Sigma}, u^* T \hat{X})) \end{tikzcd} \end{IEEEeqnarray*} commutes. \end{definition} \begin{lemma} \label{lem:D is a rlcro} The maps $\mathbf{D}_\delta$ and $\mathbf{D}_0$ are real linear Cauchy--Riemann operators. \end{lemma} \begin{proof} By \cite[Proposition 3.1.1]{mcduffHolomorphicCurvesSymplectic2012}, the map $\mathbf{D}_{\delta}$ is given by the equation \begin{IEEEeqnarray*}{c+x*} \mathbf{D}_{\delta} \xi = \frac{1}{2} \p{}{}{\nabla \xi + J(u) \nabla \xi \circ j} - \frac{1}{2} J(u) (\nabla_{\xi} J)(u) \partial(u), \end{IEEEeqnarray*} where $\nabla$ is the Levi-Civita connection on $\hat{X}$ associated to the Riemannian metric determined by $J$ and $\edv \hat{\lambda}$. Since $\nabla \colon \mathfrak{X}(\Sigma) \times \Gamma(u^* T \hat{X}) \longrightarrow \Gamma(u^* T \hat{X})$ satisfies the Leibniz rule with respect to the $\Gamma(u^* T \hat{X})$ argument, $\mathbf{D}_{\delta}$ is a real linear Cauchy--Riemann operator. We show that $\mathbf{D}_0$ satisfies the Leibniz rule. \begin{IEEEeqnarray*}{rCls+x*} \mathbf{D}_0 (g \xi) & = & e^f \mathbf{D}_{\delta} (e^{-f} g \xi) & \quad [\text{by definition of $\mathbf{D}_{\delta}$}] \\ & = & g e^f \mathbf{D}_{\delta} (e^{-f} \xi) + \xi \otimes \overline{\partial} g & \quad [\text{$\mathbf{D}_{\delta}$ obeys the Leibniz rule}] \\ & = & g \mathbf{D}_{0} (\xi) + \xi \otimes \overline{\partial} g & \quad [\text{by definition of $\mathbf{D}_{\delta}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:Du is surjective case n is 1} If $n=1$ then $\mathbf{L}_{(j,u)}$ is surjective. \end{lemma} \begin{proof} Let $\tau_1$ be a global complex trivialization of $u^* T \hat{X}$ extending to an asymptotic unitary trivialization near the punctures. Let $\tau_2$ be the unitary trivialization of $u^* T \hat{X}$ near the punctures which is induced from the decomposition $T_{(r,x)}(\R \times \partial X) = \p{<}{}{\partial_r} \oplus \p{<}{}{R^{\partial X}_x}$. It is shown in the proof of \cite[Lemma 7.10]{wendlLecturesSymplecticField2016} that the operator $\mathbf{D}_0$ is asymptotic at $z_i$ to $- J \partial_t + \delta$, which is nondegenerate and has Conley--Zehnder index $\conleyzehnder^{\tau_2}(- J \partial_t + \delta) = -1$. Therefore, every $z_i$ is an odd puncture and $\# \mathbf{z}_0 = 0$. We show that $c_1^{\tau_2}(u^* T \hat{X}) = \sum_{i=1}^{p} m_i$, where $m_i$ is the multiplicity of the asymptotic Reeb orbit $\gamma_i$: \begin{IEEEeqnarray*}{rCls+x*} c_1^{\tau_2}(u^* T \hat{X}) & = & c_1^{\tau_1}(u^* T \hat{X}) + \sum_{i=1}^{p} \deg(\tau_1|_{E_{z_i}} \circ (\tau_2|_{E_{z_i}})^{-1}) & \quad [\text{by \cite[Exercise 5.3]{wendlLecturesSymplecticField2016}}] \\ & = & \sum_{i=1}^{p} \deg(\tau_1|_{E_{z_i}} \circ (\tau_2|_{E_{z_i}})^{-1}) & \quad [\text{by \cref{def:relative first chern number}}] \\ & = & \sum_{i=1}^{p} m_i, \end{IEEEeqnarray*} where in the last equality we have used the fact that if $(s,t)$ are the cylindrical coordinates near $z_i$, then for $s$ large enough the map $t \longmapsto \tau_1|_{u(s,t)} \circ (\tau_2|_{u(s,t)})^{-1}$ winds around the origin $m_i$ times. We show that $\operatorname{ind} \mathbf{D}_0 \geq 2$. \begin{IEEEeqnarray*}{rCls+x*} \operatorname{ind} \mathbf{D}_0 & = & n \chi(\dot{\Sigma}) + 2 c_1^{\tau_2}(u^* T \hat{X}) + \sum_{i=1}^{p} \conleyzehnder^{\tau_2}(- J \partial_t + \delta) & \quad [\text{by \cref{thm:riemann roch with punctures}}] \\ & = & 2 + 2 \sum_{i=1}^{p} (m_i - 1) & \quad [\text{since $n = 1$ and $g = 0$}] \\ & \geq & 2 & \quad [\text{since $m_i \geq 1$ for every $i$}]. \end{IEEEeqnarray*} By \cref{lem:conditions for D surjective genus zero}, this implies that $\mathbf{D}_0$ is surjective. By \cref{def:conjugate and restriction operators}, the operator $\mathbf{D}_{(j,u)}$ is also surjective. Therefore, $\mathbf{L}_{(j,u)} = \mathbf{F}_{(j,u)} + \mathbf{D}_{(j,u)}$ is also surjective. \end{proof} From now until the end of this section, let $(X, \lambda^X)$ be a Liouville domain of dimension $2n$ and $(Y, \lambda^Y)$ be a Liouville domain of dimension $2n + 2$ such that \begin{enumerate} \item $X \subset Y$ and $\partial X \subset \partial Y$; \item the inclusion $\iota \colon X \longrightarrow Y$ is a Liouville embedding; \item if $x \in X$ then $Z_x^{X} = Z_x^{Y}$; \item if $x \in \partial X$ then $R_x^{\partial X} = R^{\partial Y}_x$. \end{enumerate} In this case, we have an inclusion of completions $\hat{X} \subset \hat{Y}$ as sets. By assumption, $Z^X$ is $\iota$-related to $Z^Y$, which implies that there is a map $\hat{\iota} \colon \hat{X} \longrightarrow \hat{Y}$ on the level of completions. Since in this case $\hat{X} \subset \hat{Y}$ and by \cref{def:embedding on completions coming from Liouville embedding}, $\hat{\iota}$ is the inclusion. Assume that $J^X \in \mathcal{J}(X)$ and $J^Y \in \mathcal{J}(Y)$ are almost complex structures on $\hat{X}$ and $\hat{Y}$ respectively, such that $\hat{\iota} \colon \hat{X} \longrightarrow \hat{Y}$ is holomorphic. As before, let $\Gamma = (\gamma_{1},\ldots,\gamma_{p})$ be a tuple of unparametrized Reeb orbits in $\partial X$. Notice that each $\gamma_i$ can also be seen as a Reeb orbit in $\partial Y$. For every $i = 1,\ldots,p$, choose once and for all admissible parametrizations $\phi_i^X \colon S^1 \times D^{2n-2} \longrightarrow O_i^X$ and $\phi_i^Y \colon S^1 \times D^{2n} \longrightarrow O_i^Y$ near $\gamma_i$ with the property that the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} S^1 \times D^{2n - 2} \ar[r, hook, two heads, "\phi^X_i"] \ar[d, hook] & O^X_i \ar[r, hook] \ar[d, hook, dashed, "\exists !"] & \partial X \ar[d, hook, "\iota_{\partial Y, \partial X}"] \\ S^1 \times D^{2n} \ar[r, hook, two heads, "\phi^Y_i"] & O^Y_i \ar[r, hook] & \partial Y \end{tikzcd} \end{IEEEeqnarray*} commutes. We will consider the bundle of \cref{def:bundle for cr op} as well as the Cauchy--Riemann operator and its linearization for both $X$ and $Y$. We will use the notation \begin{IEEEeqnarray*}{rClCrClCrCl} \pi^X \colon \mathcal{E}X & \longrightarrow & \mathcal{T} \times \mathcal{B}X, & \qquad & \overline{\partial}\vphantom{\partial}^X \colon \mathcal{T} \times \mathcal{B}X & \longrightarrow & \mathcal{E} X, & \qquad & \mathbf{L}^X_{(j,u)} \colon T_j \mathcal{T} \oplus T_u \mathcal{B} X & \longrightarrow & \mathcal{E}_{(j,u)} X, \\ \pi^Y \colon \mathcal{E}Y & \longrightarrow & \mathcal{T} \times \mathcal{B}Y, & \qquad & \overline{\partial}\vphantom{\partial}^Y \colon \mathcal{T} \times \mathcal{B}Y & \longrightarrow & \mathcal{E} Y, & \qquad & \mathbf{L}^Y_{(j,w)} \colon T_j \mathcal{T} \oplus T_w \mathcal{B} Y & \longrightarrow & \mathcal{E}_{(j,w)} Y \end{IEEEeqnarray*} to distinguish the bundles and maps for $X$ and $Y$. Define maps \begin{IEEEeqnarray*}{rClCrCl} \mathcal{B}\iota \colon \mathcal{B} X & \longrightarrow & \mathcal{B}Y, & \quad & \mathcal{B}\iota(u) & \coloneqq & \hat{\iota} \circ u, \\ \mathcal{E}\iota \colon \mathcal{E} X & \longrightarrow & \mathcal{E}Y, & \quad & \mathcal{E}\iota(j,u,\eta) & \coloneqq & (j, \hat{\iota} \circ u, T \hat{\iota} \circ \eta). \end{IEEEeqnarray*} Then, the diagrams \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \mathcal{E}X \ar[r, "\pi^X"] \ar[d, swap, "\mathcal{E}\iota"] & \mathcal{T} \times \mathcal{B}X \ar[d, "\id_{\mathcal{T}} \times \mathcal{B}\iota"] & & \mathcal{T} \times \mathcal{B}X \ar[d, swap, "\id_{\mathcal{T}} \times \mathcal{B}\iota"] \ar[r, "z^X"] & \mathcal{E}X \ar[d, "\mathcal{E}\iota"] \\ \mathcal{E}Y \ar[r, swap, "\pi^Y"] & \mathcal{T} \times \mathcal{B}Y & & \mathcal{T} \times \mathcal{B}Y \ar[r, swap, "z^Y"] & \mathcal{E}Y \\ \mathcal{T} \times \mathcal{B}X \ar[r, "\overline{\partial}\vphantom{\partial}^X"] \ar[d, swap, "\id_{\mathcal{T}} \times \mathcal{B}\iota"] & \mathcal{E}X \ar[d, "\mathcal{E}\iota"] & & (z^X)^* T \mathcal{E} X \ar[r, "P^X"] \ar[d, swap, "T \mathcal{E} \iota"] & \mathcal{E} X \ar[d, "\mathcal{E} \iota"] \\ \mathcal{T} \times \mathcal{B}Y \ar[r, swap, "\overline{\partial}\vphantom{\partial}^Y"] & \mathcal{E}Y & & (z^Y)^* T \mathcal{E} Y \ar[r, swap, "P^Y"] & \mathcal{E} Y \end{tikzcd} \end{IEEEeqnarray*} commute. By the chain rule, the diagram \begin{IEEEeqnarray}{c+x*} \plabel{eq:diag naturality of lcro} \begin{tikzcd} T_u \mathcal{B} X \ar[rr, bend left = 40, "\mathbf{D}^X_{(j,u)}"] \ar[r, "\dv \overline{\partial}\vphantom{\partial}^X_j(u)"] \ar[d, swap, "\dv(\mathcal{B} \iota)(u)"] & T_{(j,u,0)} \mathcal{E} X \ar[r, "P_{(j,u)}^X"] \ar[d, "\dv(\mathcal{E}\iota)(\overline{\partial}\vphantom{\partial}^X_j(u))"] & \mathcal{E}_{(j,u)} X \ar[d, "\mathcal{E}_{(j,u)} \iota"] \\ T_{\hat{\iota} \circ u} \mathcal{B} Y \ar[rr, swap, bend right = 40, "\mathbf{D}^Y_{(j,\hat{\iota} \circ u)}"] \ar[r, swap, "\dv \overline{\partial}\vphantom{\partial}^Y_j(\hat{\iota} \circ u)"] & T_{(j, \hat{\iota} \circ u, 0)} \mathcal{E} Y \ar[r, swap, "P^Y_{(j,\hat{\iota} \circ u)}"] & \mathcal{E}_{(j, \hat{\iota} \circ u)} Y \end{tikzcd} \end{IEEEeqnarray} is also commutative whenever $\overline{\partial}\vphantom{\partial}^X(j,u) = 0$. \begin{remark} \label{rmk:splittings of B and E} Consider the formula for the tangent space of $\mathcal{B}X$ from \cref{rmk:tangent of base of bundle}. By the assumptions on the Liouville domains $X$ and $Y$, we have that $V^X = V^Y$. Also, the diagrams \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} T_u \mathcal{B} X \ar[r, hook] & T_{u} \mathcal{B} Y & W^{k,p,\delta}(u^* (T \hat{X})^{\perp}) \ar[l, hook'] \ar[d, equals] \\ W^{k,p,\delta}(u^* T \hat{X}) \ar[r, hook] \ar[d, two heads, hook] \ar[u, hook] & W^{k,p,\delta}(u^* T \hat{Y}) \ar[u, hook] \ar[d, two heads, hook] & W^{k,p,\delta}(u^* (T \hat{X})^{\perp}) \ar[l, hook'] \ar[d, two heads, hook] \\ W^{k,p}(u^* T \hat{X}) \ar[r, hook] & W^{k,p}(u^* T \hat{Y}) & W^{k,p}(u^* (T \hat{X})^{\perp}) \ar[l, hook'] \end{tikzcd} \\ \begin{tikzcd} \mathcal{E}_{(j,u)} X \ar[r, hook] \ar[d, hook, two heads] & \mathcal{E}_{(j,u)} Y \ar[d, hook, two heads] & \Omega^{k-1,p,\delta}_j(u^*(T \hat{X})^{\perp}) \ar[d, hook, two heads] \ar[l, hook'] \\ \Omega^{k-1,p}_j(u^*T \hat{X}) \ar[r, hook] & \Omega^{k-1,p}_j(u^*T \hat{Y}) & \Omega^{k-1,p}_j(u^*(T \hat{X})^{\perp}) \ar[l, hook'] \end{tikzcd} \end{IEEEeqnarray*} commute, where for shortness we are using the notation \begin{IEEEeqnarray*}{c+x*} \Omega^{k,p}_{j}(E) = W^{k,p}(\Hom^{0,1}((T \dot{\Sigma}, j), (E, J))) \end{IEEEeqnarray*} for any complex vector bundle $(E, J) \longrightarrow \dot{\Sigma}$. In both diagrams, the middle term of every row is the direct sum of the left and right terms. In addition, the vertical maps in the middle of both diagrams are block diagonal when written with respect to these decompositions. \end{remark} \begin{definition} Let $z_0 \in \dot{\Sigma}$. Define the \textbf{evaluation map} \begin{IEEEeqnarray*}{rrCl} \operatorname{ev}^X \colon & \mathcal{B} X & \longrightarrow & \hat{X} \\ & u & \longmapsto & u(z_0) \end{IEEEeqnarray*} as well as its derivative $\mathbf{E}^X_u \coloneqq \dv (\operatorname{ev}^{X})(u) \colon T_u \mathcal{B} X \longrightarrow T_{u(z_0)} \hat{X}$. \end{definition} In the following lemma, we show that if a holomorphic curve $u$ in $X$ is regular (in $X$) then the corresponding holomorphic curve $\hat{\iota} \circ u$ in $Y$ is also regular. See also \cite[Proposition A.1]{mcduffSymplecticCapacitiesUnperturbed2022} for a similar result. \begin{lemma} \label{lem:DX surj implies DY surj} Let $u \in \mathcal{B}X$ be holomorphic and denote $\hat{\iota} \circ u \in \mathcal{B} Y$ simply by $u$. Assume that the normal Conley--Zehnder index of every asymptotic Reeb orbit $\gamma_i$ is $1$. \begin{enumerate} \item \label{lem:DX surj implies DY surj 1} If $\mathbf{L}_{(j,u)}^X$ is surjective then so is $\mathbf{L}^Y_{(j,u)}$. \item \label{lem:DX surj implies DY surj 2} If $\mathbf{L}_{(j,u)}^X \oplus \mathbf{E}^X_u$ is surjective then so is $\mathbf{L}^Y_{(j,u)} \oplus \mathbf{E}^Y_u$. \end{enumerate} \end{lemma} \begin{proof} Consider the decomposition $T_x \hat{Y} = T_x \hat{X} \oplus (T_x \hat{X})^{\perp}$ for $x \in \hat{X}$. Let $\tau$ be a global complex trivialization of $u^* T \hat{Y}$, extending to an asymptotic unitary trivialization near the punctures, and such that $\tau$ restricts to a trivialization of $u^* T \hat{X}$ and $u^* (T \hat{X})^{\perp}$. By \cref{rmk:splittings of B and E}, there are splittings \begin{IEEEeqnarray*}{rCls+x*} T_u \mathcal{B} Y & = & T_u \mathcal{B} X \oplus T_u^{\perp} \mathcal{B} X, \\ \mathcal{E}_{(j,u)} Y & = & \mathcal{E}_{(j,u)} X \oplus \mathcal{E}_{(j,u)}^{\perp} X. \end{IEEEeqnarray*} We can write the maps \begin{IEEEeqnarray*}{rCl} \mathbf{L}_{(j,u)}^Y & \colon & T_j \mathcal{T} \oplus T_u \mathcal{B} X \oplus T_u^{\perp} \mathcal{B} X \longrightarrow \mathcal{E}_{(j,u)} X \oplus \mathcal{E}_{(j,u)}^{\perp} X, \\ \mathbf{D}_{(j,u)}^Y & \colon & T_u \mathcal{B} X \oplus T_u^{\perp} \mathcal{B} X \longrightarrow \mathcal{E}_{(j,u)} X \oplus \mathcal{E}_{(j,u)}^{\perp} X, \\ \mathbf{L}_{(j,u)}^X & \colon & T_j \mathcal{T} \oplus T_u \mathcal{B} X \longrightarrow \mathcal{E}_{(j,u)} X, \\ \mathbf{F}_{(j,u)}^Y & \colon & T_j \mathcal{T} \longrightarrow \mathcal{E}_{(j,u)} X \oplus \mathcal{E}_{(j,u)}^{\perp} X, \\ \mathbf{E}_{u}^Y & \colon & T_u \mathcal{B} X \oplus T_u^{\perp} \mathcal{B} X \longrightarrow T_x \hat{X} \oplus (T_x \hat{X})^{\perp} \end{IEEEeqnarray*} as block matrices \begin{IEEEeqnarray}{rCl} \mathbf{L}_{(j,u)}^Y & = & \begin{bmatrix} \mathbf{F}^X_{(j,u)} & \mathbf{D}^X_{(j,u)} & \mathbf{D}^{TN}_{(j,u)} \\ 0 & 0 & \mathbf{D}^{NN}_{(j,u)} \end{bmatrix}, \plabel{eq:decomposition of cr ops 1}\\ \mathbf{D}_{(j,u)}^Y & = & \begin{bmatrix} \mathbf{D}^X_{(j,u)} & \mathbf{D}^{TN}_{(j,u)} \\ 0 & \mathbf{D}^{NN}_{(j,u)} \end{bmatrix}, \plabel{eq:decomposition of cr ops 2}\\ \mathbf{L}_{(j,u)}^X & = & \begin{bmatrix} \mathbf{F}^X_{(j,u)} & \mathbf{D}^X_{(j,u)} \end{bmatrix}, \plabel{eq:decomposition of cr ops 3}\\ \mathbf{F}_{(j,u)}^Y & = & \begin{bmatrix} \mathbf{F}^X_{(j,u)} \\ 0 \end{bmatrix}, \plabel{eq:decomposition of cr ops 4}\\ \mathbf{E}_{u}^Y & = & \begin{bmatrix} \mathbf{E}^X_{u} & 0 \\ 0 & \mathbf{E}^{NN}_{u} \end{bmatrix}, \plabel{eq:decomposition of cr ops 5} \end{IEEEeqnarray} where \eqref{eq:decomposition of cr ops 5} follows by definition of the evaluation map, \eqref{eq:decomposition of cr ops 4} is true since $\mathbf{F}^{Y}_{(j,u)}$ is given by the formula $\mathbf{F}^{Y}_{(j,u)}(y) = \frac{1}{2} (J \circ T u \circ y)$, \eqref{eq:decomposition of cr ops 2} follows because diagram \eqref{eq:diag naturality of lcro} commutes, and \eqref{eq:decomposition of cr ops 3} and \eqref{eq:decomposition of cr ops 1} then follow by \cref{def:linearized cr op}. Let $\mathbf{D}^{NN}_\delta$ be the restriction and $\mathbf{D}_0^{NN}$ be the conjugation of $\mathbf{D}^{NN}_{(j,u)}$ (as in \cref{def:conjugate and restriction operators}). Denote by $\mathbf{B}^{NN}_{\gamma_i}$ the asymptotic operator of $\mathbf{D}^{NN}_{\delta}$ at $z_i$. Then the asymptotic operator of $\mathbf{D}^{NN}_0$ at $z_i$ is $\mathbf{B}^{NN}_{\gamma_i} + \delta$, which by assumption has Conley--Zehnder index equal to $1$. We show that $\operatorname{ind} \mathbf{D}_0^{NN} = 2$. \begin{IEEEeqnarray*}{rCls+x*} \operatorname{ind} \mathbf{D}_0^{NN} & = & \chi(\dot{\Sigma}) + 2 c_1^{\tau}(u^* T \hat{X}) + \sum_{i=1}^{p} \conleyzehnder^{\tau}(\mathbf{B}^{NN}_{{\gamma_i}} + \delta) & \quad [\text{by \cref{thm:riemann roch with punctures}}] \\ & = & 2 & \quad [\text{since $\conleyzehnder^{\tau}(\mathbf{B}^{NN}_{{\gamma_i}} + \delta) = 1$}]. \end{IEEEeqnarray*} We prove \ref{lem:DX surj implies DY surj 1}. \begin{IEEEeqnarray*}{rCls+x*} \operatorname{ind} \mathbf{D}_0^{NN} = 2 & \Longrightarrow & \mathbf{D}_0^{NN} \text{ is surjective} & \quad [\text{by \cref{lem:conditions for D surjective genus zero}}] \\ & \Longrightarrow & \mathbf{D}_\delta^{NN} \text{ is surjective} & \quad [\text{$\mathbf{D}_0^{NN}$ and $\mathbf{D}_{\delta}^{NN}$ are conjugated}] \\ & \Longrightarrow & \mathbf{D}_{(j,u)}^{NN} \text{ is surjective} & \quad [\text{$\mathbf{D}_{\delta}^Y$ is a restriction of $\mathbf{D}_{(j,u)}^Y$}] \\ & \Longrightarrow & \mathbf{L}_{(j,u)}^Y \text{ is surjective} & \quad [\text{$\mathbf{L}_{(j,u)}^X$ is surjective by assumption}]. \end{IEEEeqnarray*} We prove \ref{lem:DX surj implies DY surj 2}. \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\operatorname{ind} \mathbf{D}_0^{NN} = 2}\\ \quad & \Longrightarrow & \mathbf{D}_0^{NN} \oplus \mathbf{E}_u^{NN} \text{ is surjective} & \quad [\text{by \cref{lem:D plus E is surjective}}] \\ & \Longrightarrow & \mathbf{D}_\delta^{NN} \oplus \mathbf{E}_u^{NN} \text{ is surjective} & \quad [\text{$\mathbf{D}_0^{NN} \oplus \mathbf{E}^{NN}_u$ and $\mathbf{D}_{\delta}^{NN} \oplus \mathbf{E}^{NN}_{u}$ are conjugated}] \\ & \Longrightarrow & \mathbf{D}_{(j,u)}^{NN} \oplus \mathbf{E}_u^{NN} \text{ is surjective} & \quad [\text{$\mathbf{D}_{\delta}^Y \oplus \mathbf{E}^{Y}_{u}$ is a restriction of $\mathbf{D}_{(j,u)}^Y \oplus \mathbf{E}^{Y}_u$}] \\ & \Longrightarrow & \mathbf{L}_{(j,u)}^Y \oplus \mathbf{E}_u^{Y} \text{ is surjective} & \quad [\text{$\mathbf{L}_{(j,u)}^X \oplus \mathbf{E}_u^{X}$ is surjective by assumption}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \section{Moduli spaces of curves in ellipsoids} \label{sec:augmentation map of an ellipsoid} We now use the techniques explained in the past two sections to compute the augmentation map of an ellipsoid (\cref{thm:augmentation is nonzero}). The proof of this theorem consists in an explicit count of curves in the ellipsoid satisfying a tangency constraint (\cref{lem:moduli spaces of ellipsoids have 1 element}) together with the fact that the moduli space of such curves is transversely cut out (\cref{prp:moduli spaces without point constraint are tco,prp:moduli spaces w point are tco,prp:moduli spaces w tangency are tco}). Therefore, the explicit count agrees with the virtual count. We now state the assumptions for this section. Let $a_1 < \cdots < a_n \in \R_{> 0}$ be rationally linearly independent and consider the ellipsoid $E(a_1,\ldots,a_n) \subset \C^n$. By \cite[Section 2.1]{guttSymplecticCapacitiesPositive2018}, $\partial E(a_1, \ldots, a_n)$ has exactly $n$ simple Reeb orbits $\gamma_1, \ldots, \gamma_n$, which satisfy \begin{IEEEeqnarray}{rCls+x*} \gamma_j(t) & = & \sqrt{\frac{a_j}{\pi}} e^{\frac{2 \pi i t}{a_j}} e_j, \\ \mathcal{A}(\gamma^m_j) & = & m a_j, \\ \conleyzehnder(\gamma^m_j) & = & n - 1 + 2 \sum_{i=1}^{n} \p{L}{2}{\frac{m a_j}{a_i}}, \plabel{eq:cz of reeb in ellipsoid} \end{IEEEeqnarray} where $\gamma_j \colon \R / a_j \Z \longrightarrow \partial E(a_1, \ldots, a_n)$ and $e_j$ is the $j$th vector of the canonical basis of $\C^n$ as a vector space over $\C$. For simplicity, for every $\ell = 1, \ldots, n$ denote $E_\ell = E(a_1,\ldots,a_\ell) \subset \C^\ell$. Notice that $\gamma_1$ is a Reeb orbit of $\partial E_1, \ldots, \partial E_n$. Define maps \begin{IEEEeqnarray*}{rClCrCl} \iota_{\ell} \colon \C^{\ell} & \longrightarrow & \C^{\ell + 1}, & \quad & \iota_\ell(z_1,\ldots,z_\ell) & \coloneqq & (z_1,\ldots,z_\ell,0) \\ h_{\ell} \colon \C^{\ell} & \longrightarrow & \C, & \quad & h_\ell(z_1,\ldots,z_\ell) & \coloneqq & z_1. \end{IEEEeqnarray*} The maps $\iota_{\ell} \colon E_\ell \longrightarrow E_{\ell+1}$ are Liouville embeddings satisfying the assumptions in \cref{sec:functional analytic setup}. Define also \begin{IEEEeqnarray*}{rCls+x*} x_\ell & \coloneqq & 0 \in \C^\ell, \\ D_{\ell} & \coloneqq & \{ (z_1,\ldots,z_\ell) \in \C^{\ell} \mid z_1 = 0 \} = h_{\ell}^{-1}(0). \end{IEEEeqnarray*} Choose an admissible almost complex structure $J_{\ell} \in \mathcal{J}(E_\ell, D_\ell)$ on $\hat{E}_{\ell}$ such that $J_{\ell}$ is the canonical almost complex structure of $\C^\ell$ near $0$. We assume that the almost complex structures are chosen in such a way that $\hat{\iota}_{\ell} \colon \hat{E}_{\ell} \longrightarrow \hat{E}_{\ell + 1}$ is holomorphic and also such that there exists a biholomorphism $\varphi \colon \hat{E}_1 \longrightarrow \C$ such that $\varphi(z) = z$ for $z$ near $0 \in \C$ (see \cref{lem:biholomorphism explicit} below). Let $m \in \Z_{\geq 1}$ and assume that $m a_1 < a_2 < \cdots < a_n$. Consider the sphere $S^2$, without any specified almost complex structure, with a puncture $z_1 \in S^2$ and an asymptotic marker $v_1 \in (T_{z_1} S^2 \setminus \{0\}) / \R_{> 0}$, and also a marked point $z_0 \in \dot{S}^2 = S^2 \setminus \{z_1\}$. For $k \in \Z_{\geq 0}$, denote\begin{IEEEeqnarray*}{lCls+x*} \mathcal{M}^{\ell,(k)}_{\mathrm{p}} & \coloneqq & \mathcal{M}_{E_{\ell}}^{\$, J_{\ell}}(\gamma^m_1)\p{<}{}{\mathcal{T}^{(k)}x_\ell}_{\mathrm{p}} \\ & \coloneqq & \left\{ (j, u) \ \middle\vert \begin{array}{l} j \text{ is an almost complex structure on }S^2, \\ u \colon (\dot{S}^2, j) \longrightarrow (\hat{E}_\ell, J_\ell) \text{ is as in \cref{def:asy cyl holomorphic curve}}, \\ u(z_0) = x_\ell \text{ and $u$ has contact order $k$ to $D_\ell$ at $x_\ell$} \end{array} \right\}. \end{IEEEeqnarray*} Here, the subscript $\mathrm{p}$ means that the moduli space consists of parametrized curves, i.e. we are not quotienting by biholomorphisms. Denote the moduli spaces of regular curves and of unparametrized curves by \begin{IEEEeqnarray*}{lCls+x*} \mathcal{M}^{\ell,(k)}_{\mathrm{p,reg}} & \coloneqq & \mathcal{M}_{E_{\ell}}^{\$, J_{\ell}}(\gamma^m_1)\p{<}{}{\mathcal{T}^{(k)}x_\ell}_{\mathrm{p,reg}}, \\ \mathcal{M}^{\ell,(k)} & \coloneqq & \mathcal{M}_{E_{\ell}}^{\$, J_{\ell}}(\gamma^m_1)\p{<}{}{\mathcal{T}^{(k)}x_\ell} \coloneqq \mathcal{M}^{\ell,(k)}_{\mathrm{p}} / \sim. \end{IEEEeqnarray*} Here, $\mathcal{M}^{\ell,(0)} \coloneqq \mathcal{M}_{E_{\ell}}^{\$, J_{\ell}}(\gamma^m_1)\p{<}{}{\mathcal{T}^{(0)}x_\ell} \coloneqq \mathcal{M}_{E_{\ell}}^{\$, J_{\ell}}(\gamma^m_1)$ and analogously for $\mathcal{M}^{\ell,(0)}_{\mathrm{p,reg}}$ and $\mathcal{M}^{\ell,(0)}_{\mathrm{p}}$. \begin{lemma} \phantomsection\label{lem:biholomorphism explicit} For any $a > 0$, there exists an almost complex structure $J$ on $\hat{B}(a)$ and a biholomorphism $\varphi \colon \hat{B}(a) \longrightarrow \C$ such that \begin{enumerate} \item \label{lem:biholomorphism explicit 1} $J$ is cylindrical on $\R_{\geq 0} \times \partial B(a)$; \item \label{lem:biholomorphism explicit 2} $J$ is the canonical almost complex structure of $\C$ near $0 \in B(a) \subset \C$; \item \label{lem:biholomorphism explicit 3} $\varphi(z) = z$ for $z$ near $0 \in B(a) \subset \C$. \end{enumerate} \end{lemma} \begin{proof} Choose $\rho_0 < 0$ and let $g \colon \R \longrightarrow \R_{>0}$ be a function such that $g(\rho) = a/4 \pi$ for $\rho \leq \rho_0$ and $g(\rho) = 1$ for $\rho \geq 0$. For $(\rho, w) \in \R \times \partial B(a)$, define \begin{IEEEeqnarray*}{rCls+x*} f(\rho) & \coloneqq & \exp \p{}{2}{\frac{\rho_0}{2} + \frac{2 \pi}{a} \int_{\rho_0}^{\rho} g(\sigma) \edv \sigma}, \\ J_{(\rho, w)} (\partial_{\rho}) & \coloneqq & g (\rho) R^{\partial B(a)}_{w}, \\ \varphi(\rho, w) & \coloneqq & f(\rho) w. \end{IEEEeqnarray*} Property \ref{lem:biholomorphism explicit 1} follows from the fact that $g(\rho) = 1$ for $\rho \geq 0$. Consider the Liouville vector field of $\C$, which is denoted by $Z$ and given by $Z(w) = w/2$. Let $\Phi \colon \R \times \partial B(a) \longrightarrow \C$ be the map given by $\Phi(\rho, w) = \phi^\rho_Z(w) = \exp(\rho/2) w$. By definition of completion, $\Phi|_{B(a) \setminus \{0\}} \colon B(a) \setminus \{0\} \longrightarrow \C$ is the inclusion. To prove property \ref{lem:biholomorphism explicit 3}, it suffices to show that $\varphi(\rho, w) = \Phi(\rho, w)$ for every $(\rho, w) \in \R_{\leq \rho_0} \times \partial B(a)$. For this, simply note that \begin{IEEEeqnarray*}{rCls+x*} f(\rho) & = & \exp \p{}{2}{\frac{\rho_0}{2} + \frac{2 \pi}{a} \int_{\rho_0}^{\rho} g(\sigma) \edv \sigma} & \quad [\text{by definition of $f$}] \\ & = & \exp \p{}{2}{\frac{\rho_0}{2} + \frac{2 \pi}{a} (\rho - \rho_0) \frac{a}{4 \pi} } & \quad [\text{$\rho \leq \rho_0$ implies $g(\rho) = a / 4 \pi$}] \\ & = & \exp \p{}{2}{\frac{\rho}{2}}. \end{IEEEeqnarray*} Therefore, $\varphi(z) = z$ for $z$ near $0 \in B(a) \subset \C$, and in particular $\varphi$ can be extended smoothly to a map $\varphi \colon \hat{B}(a) \longrightarrow \C$. We show that $\varphi$ is holomorphic. \begin{IEEEeqnarray*}{rCls+x*} j \circ \dv \varphi(\rho, w) (\partial_{\rho}) & = & j \p{}{2}{\pdv{}{\rho} \p{}{1}{f(\rho) |w|} \pdv{}{r}\Big|_{\varphi(\rho, w)}} & \quad [\text{by definition of $\varphi$}] \\ & = & \frac{2 \pi}{a} \, g(\rho) \, j \p{}{2}{ f(\rho) |w| \pdv{}{r}\Big|_{\varphi(\rho, w)}} & \quad [\text{by definition of $f$}] \\ & = & \frac{2 \pi}{a} \, g(\rho) \, j \p{}{2}{ |\varphi(\rho,w)| \pdv{}{r}\Big|_{\varphi(\rho, w)}} & \quad [\text{by definition of $\varphi$}] \\ & = & \frac{2 \pi}{a} \, g(\rho) \, \pdv{}{\theta}\Big|_{\varphi(\rho, w)} & \quad [\text{by definition of $j$}] \\ & = & g(\rho) \, \dv \varphi(\rho, w) (R^{\partial B(a)}_w) & \quad [\text{by \cite[Equation (2.2)]{guttSymplecticCapacitiesPositive2018}}] \\ & = & \dv \varphi(\rho, w) \circ J (\partial_{\rho}) & \quad [\text{by definition of $J$}], \end{IEEEeqnarray*} Where $(r, \theta)$ are the polar coordinates of $\C$. Since $\varphi$ is holomorphic and $\varphi$ is the identity near the origin, we conclude that $J$ is the canonical almost complex structure of $\C$ near the origin. In particular, $J$ can be extended smoothly to an almost complex structure on $\hat{B}(a)$, which proves \ref{lem:biholomorphism explicit 2}. Finally, we show that $\varphi$ is a diffeomorphism. For this, it suffices to show that $\Phi^{-1} \circ \varphi \colon \R \times \partial B(a) \longrightarrow \R \times \partial B(a)$ is a diffeomorphism. This map is given by $\Phi^{-1} \circ \varphi(\rho, w) = (2 \ln(f(\rho)), w)$. Since \begin{IEEEeqnarray*}{c+x*} \odv{}{\rho} (2 \ln(f(\rho))) = 2 \frac{f'(\rho)}{f(\rho)} = \frac{4 \pi}{a} g(\rho) > 0, \end{IEEEeqnarray*} $\varphi$ is a diffeomorphism. \end{proof} \begin{lemma} \label{lem:psi j} Let $\operatorname{inv} \colon \overline{\C} \longrightarrow \overline{\C}$ be the map given by $\operatorname{inv}(z) = 1/z$ and consider the vector $V \coloneqq \dv \operatorname{inv}(0) \partial_x \in T_{\infty} \overline{\C}$. For every $j \in \mathcal{T}$ there exists a unique biholomorphism $\psi_j \colon (\overline{\C}, j_0) \longrightarrow (S^2, j)$ such that \begin{IEEEeqnarray*}{c+x*} \psi_j(0) = z_0, \qquad \psi_j(\infty) = z_1, \qquad \dv \psi_j(\infty) V = \frac{v_1}{\| v_1 \|}, \end{IEEEeqnarray*} where $\| \cdot \|$ is the norm coming from the canonical Riemannian metric on $S^2$ as the sphere of radius $1$ in $\R^3$. \end{lemma} \begin{proof} By the uniformization theorem \cite[Theorem XII.0.1]{desaint-gervaisUniformizationRiemannSurfaces2016}, there exists a biholomorphism $\phi \colon (S^2, j) \longrightarrow (\overline{\C}, j_0)$. Since there exists a unique Möbius transformation $\psi_0 \colon (\overline{\C}, j_0) \longrightarrow (\overline{\C}, j_0)$ such that \begin{IEEEeqnarray*}{c+x*} \psi_0(0) = \phi(z_0), \qquad \psi_0(\infty) = \phi(z_1), \qquad \dv \psi_0 (\infty) V = \dv \phi(z_1) \frac{v_1}{\| v_1 \|}, \end{IEEEeqnarray*} the result follows. \end{proof} We will denote also by $\psi_j$ the restriction $\psi_j \colon (\C, j_0) \longrightarrow (S^2, j)$. \begin{lemma} \label{lem:u is a polynomial} If $(j,u) \in \mathcal{M}^{1,(0)}$ then $\varphi \circ u \circ \psi_j \colon \C \longrightarrow \C$ is a polynomial of degree $m$. \end{lemma} \begin{proof} Since $u$ is positively asymptotic to $\gamma^m_1$, the map $\varphi \circ u \circ \psi_j$ goes to $\infty$ as $z$ goes to $\infty$. Therefore, $\varphi \circ u \circ \psi_j$ is a polynomial. Again using the fact that $u$ is positively asymptotic to $\gamma^m_1$, we conclude that for $r$ big enough the path $\theta \longmapsto \varphi \circ u \circ \psi_j(r e^{i \theta})$ winds around the origin $m$ times. This implies that the degree of $\varphi \circ u \circ \psi_j$ is $m$. \end{proof} \begin{lemma} \label{lem:normal cz is one} For every $\ell = 1,\ldots,n-1$, view $\gamma^m_1$ as a Reeb orbit of $\partial E_{\ell} \subset \partial E_{\ell + 1}$. The normal Conley--Zehnder index of $\gamma^m_1$ is $1$. \end{lemma} \begin{proof} By \cite[Equation (2.2)]{guttSymplecticCapacitiesPositive2018}, the Reeb vector field of $\partial E_{\ell + 1}$ is given by \begin{IEEEeqnarray*}{c+x*} R^{\partial E_{\ell + 1}} = 2 \pi \sum_{j=1}^{\ell+1} \frac{1}{a_j} \pdv{}{\theta_{j}}, \end{IEEEeqnarray*} where $\theta_j$ denotes the angular polar coordinate of the $j$th summand of $\C^{\ell+1}$. Therefore, the flow of $R^{\partial E_{\ell + 1}}$ is given by \begin{IEEEeqnarray*}{rrCl} \phi^{t}_{R} \colon & \partial E_{\ell+1} & \longrightarrow & \partial E_{\ell+1} \\ & (z_1,\ldots,z_{\ell+1}) & \longmapsto & \p{}{2}{e^{\frac{2 \pi i}{a_1}} z_1, \ldots, e^{\frac{2 \pi i}{a_{\ell+1}}} z_{\ell+1}}. \end{IEEEeqnarray*} The diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \xi^{\partial E_{\ell}}_{\gamma^m_1(0)} \ar[r] \ar[d, swap, "\dv \phi^t_{R}(\gamma^m_1(0))"] & \xi^{\partial E_{\ell+1}}_{\gamma^m_1(0)} \ar[d, "\dv \phi^t_{R}(\gamma^m_1(0))"] & \big(\xi^{\partial E_{\ell+1}}_{\gamma^m_1(0)}\big)^{\perp} \ar[l] \ar[d, "\dv \phi^t_{R}(\gamma^m_1(0))"] \ar[r, equals] & \C \ar[d, "\times \exp \p{}{1}{\frac{2 \pi i t}{a_{\ell+1}}}"] \\ \xi^{\partial E_{\ell}}_{\gamma^m_1(t)} \ar[r] & \xi^{\partial E_{\ell+1}}_{\gamma^m_1(t)} & \big(\xi^{\partial E_{\ell+1}}_{\gamma^m_1(t)}\big)^{\perp} \ar[l] \ar[r, equals] & \C \end{tikzcd} \end{IEEEeqnarray*} commutes. Define a path $A_{\gamma^m_1} \colon [0,m a_1] \longrightarrow \operatorname{Sp}(2)$ by $A_{\gamma^m_1}(t) = \exp (t J_0 S)$, where \begin{IEEEeqnarray*}{c+x*} S = \frac{2 \pi}{a_{\ell + 1}} \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix}. \end{IEEEeqnarray*} The only eigenvalue of $S$ is $2 \pi / a_{\ell+1}$, which has multiplicity $2$. Therefore, the signature of $S$ is $\signature S = 2$. These facts allow us to compute $\conleyzehnder^{\perp}(\gamma^m_1)$ using \cref{prp:gutts cz formula}: \begin{IEEEeqnarray*}{rCls+x*} \conleyzehnder^{\perp}(\gamma^m_1) & = & \conleyzehnder(A_{\gamma^m_1}) & \quad [\text{by definition of $\conleyzehnder^{\perp}$}] \\ & = & \p{}{2}{\frac{1}{2} + \p{L}{2}{\sqrt{\frac{2 \pi}{a_{\ell + 1}}\frac{2 \pi}{a_{\ell + 1}}} \frac{m a_1}{2 \pi}}} \signature S & \quad [\text{by \cref{prp:gutts cz formula}}] \\ & = & \frac{1}{2} \signature S & \quad [\text{since $m a_1 < a_2 < \cdots < a_n$}] \\ & = & 1 & \quad [\text{by the discussion above}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:unique reeb orbit with cz equal to} If $\ell = 1,\ldots,n$ then $\gamma^m_1$ is the unique Reeb orbit of $\partial E_{\ell}$ such that $\conleyzehnder(\gamma^m_1) = \ell - 1 + 2m$. \end{lemma} \begin{proof} First, notice that \begin{IEEEeqnarray*}{rCls+x*} \conleyzehnder(\gamma^m_1) & = & \ell - 1 + 2 \sum_{j=1}^{\ell} \p{L}{2}{\frac{m a_1}{a_j}} & \quad [\text{by equation \eqref{eq:cz of reeb in ellipsoid}}] \\ & = & \ell - 1 + 2 m & \quad [\text{since $m a_1 < a_2 < \cdots < a_n$}]. \end{IEEEeqnarray*} Conversely, let $\gamma = \gamma^k_i$ be a Reeb orbit of $\partial E_\ell$ with $\conleyzehnder(\gamma) = \ell - 1 + 2m$. By equation \eqref{eq:cz of reeb in ellipsoid}, this implies that \begin{IEEEeqnarray}{c+x*} \label{eq:k is sum of floors} m = \sum_{j=1}^{\ell} \p{L}{2}{\frac{k a_i}{a_j}}. \end{IEEEeqnarray} We show that $i = 1$. Assume by contradiction otherwise. Then \begin{IEEEeqnarray*}{rCls+x*} m & = & \sum_{1 \leq j \leq \ell} \p{L}{2}{\frac{k a_i}{a_j}} & \quad [\text{by equation \eqref{eq:k is sum of floors}}] \\ & \geq & \sum_{1 \leq j \leq i} \p{L}{2}{\frac{k a_i}{a_j}} & \quad [\text{since every term in the sum is $\geq 0$}] \\ & = & \p{L}{2}{\frac{k a_i}{a_1}} + \sum_{1 < j < i} \p{L}{2}{\frac{k a_i}{a_j}} + k & \quad [\text{since by assumption, $i > 1$}] \\ & \geq & (m + i - 1) k & \quad [\text{$m a_1 < a_2 < \cdots < a_i$}] \\ & > & m k & \quad [\text{since by assumption, $i > 1$}], \end{IEEEeqnarray*} which is a contradiction, and therefore $i = 1$. We show that $k = m$, using the fact that $m \geq \lfloor k a_i / a_1 \rfloor = k$. \begin{IEEEeqnarray*}{rCls+x*} m & = & \sum_{1 \leq j \leq \ell} \p{L}{2}{\frac{k a_1}{a_j}} & \quad [\text{by equation \eqref{eq:k is sum of floors} and since $i = 1$}] \\ & = & k + \sum_{2 \leq j \leq \ell} \p{L}{2}{\frac{k a_1}{a_j}} & \\ & = & k & \quad [\text{since $k \leq m$ and $k a_1 \leq m a_1 < a_1 < \cdots < a_n$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:lch of ellipsoid} The module $CH_{n - 1 + 2m}(E_n)$ is the free $\Q$-module generated by $\gamma^m_1$. \end{lemma} \begin{proof} By equation \eqref{eq:cz of reeb in ellipsoid}, every Reeb orbit of $\partial E_n$ is good. We claim that the differential $\partial \colon CC(E_n) \longrightarrow CC(E_n)$ is zero. Assume by contradiction that there exists a Reeb orbit $\gamma$ such that $\partial \gamma \neq 0$. By definition of $\partial$, this implies that there exist Reeb orbits $\eta, \alpha_1, \ldots, \alpha_p$ such that \begin{IEEEeqnarray*}{rCls+x*} 0 & \neq & \#^{\mathrm{vir}} \overline{\mathcal{M}}^{J_n}_{\partial E_n}(\gamma; \eta, \alpha_1, \ldots, \alpha_p), \\ 0 & \neq & \#^{\mathrm{vir}} \overline{\mathcal{M}}^{J_n}_{E_n}(\alpha_j), \quad \text{for } j=1,\ldots,p. \end{IEEEeqnarray*} By assumption on the virtual perturbation scheme, \begin{IEEEeqnarray*}{rCls+x*} 0 & = & \operatorname{virdim} \overline{\mathcal{M}}^{J_n}_{E_n}(\alpha_j) = n - 3 + \conleyzehnder(\alpha_j) \quad \text{for every } j = 1,\ldots,p, \\ \\ 0 & = & \operatorname{virdim} \overline{\mathcal{M}}^{J_n}_{\partial E_n}(\gamma; \eta, \alpha_1, \ldots, \alpha_p) \\ & = & (n-3)(2 - (2+p)) + \conleyzehnder(\gamma) - \conleyzehnder(\eta) - \sum_{j=1}^{p} \conleyzehnder(\alpha_j) - 1 \\ & = & \conleyzehnder(\gamma) - \conleyzehnder(\eta) - 1 \\ & \in & 1 + 2 \Z, \end{IEEEeqnarray*} where in the last line we used equation \eqref{eq:cz of reeb in ellipsoid}. This gives the desired contradiction, and we conclude that $\partial \colon CC(E_n) \longrightarrow CC(E_n)$ is zero. Therefore, $CH(E_n) = CC(E_n)$ is the free $\Q$-module generated by the Reeb orbits of $\partial E_n$. By \cref{lem:unique reeb orbit with cz equal to}, $\gamma^m_1$ is the unique Reeb orbit of $\partial E_n$ with $\conleyzehnder(\gamma^m_1) = n - 1 + 2m$, from which the result follows. \end{proof} \begin{lemma} \phantomsection\label{lem:moduli spaces of ellipsoids are all equal} If $\ell = 1,\ldots,n$ and $k \in \Z_{\geq 1}$ then $\mathcal{M}^{\ell,(k)}_{\mathrm{p}} = \mathcal{M}^{1,(k)}_{\mathrm{p}}$ and $\mathcal{M}^{\ell,(k)} = \mathcal{M}^{1,(k)}$. \end{lemma} \begin{proof} It suffices to show that $\mathcal{M}^{\ell,(k)}_{\mathrm{p}} = \mathcal{M}^{\ell+1,(k)}_{\mathrm{p}}$ for every $\ell = 1,\ldots,n-1$. The inclusion $\mathcal{M}^{\ell,(k)}_{\mathrm{p}} \subset \mathcal{M}^{\ell+1,(k)}_{\mathrm{p}}$ follows from the fact that the inclusion $\hat{E}_\ell \hookrightarrow \hat{E}_{\ell+1}$ is holomorphic and the assumptions on the symplectic divisors. To prove that $\mathcal{M}^{\ell+1,(k)}_{\mathrm{p}} \subset \mathcal{M}^{\ell,(k)}_{\mathrm{p}}$, it suffices to assume that $(j,u) \in \mathcal{M}^{\ell+1,(k)}_{\mathrm{p}}$ and to show that the image of $u$ is contained in $\hat{E}_\ell \subset \hat{E}_{\ell+1}$. Since $u$ has contact order $k$ to $D_{\ell+1}$ at $x_{\ell+1} = \iota_{\ell}(x_{\ell})$, we conclude that $u$ is not disjoint from $\hat{E}_\ell$. By \cref{lem:stabilization 2}, $u$ is contained in $\hat{E}_\ell$. \end{proof} We now prove that the moduli spaces $\mathcal{M}^{\ell,(k)}$ are regular. The proof strategy is as follows. \begin{enumerate} \item \cref{prp:moduli spaces without point constraint are tco} deals with the moduli spaces $\mathcal{M}^{1,(0)}$. We show that the linearized Cauchy--Riemann operator is surjective using \cref{lem:Du is surjective case n is 1}. \item \cref{prp:moduli spaces w point are tco} deals with the moduli spaces $\mathcal{M}^{\ell,(1)}$. Here, we need to consider the linearized Cauchy--Riemann operator together with an evaluation map. We show inductively that this map is surjective using \cref{lem:DX surj implies DY surj}. \item Finally, \cref{prp:moduli spaces w tangency are tco} deals with the moduli spaces $\mathcal{M}^{\ell,(k)}$. We now need to consider the jet evaluation map. We prove inductively that this map is surjective by writing it explicitly. \end{enumerate} \begin{proposition} \label{prp:moduli spaces without point constraint are tco} The moduli spaces $\mathcal{M}^{1,(0)}_{\mathrm{p}}$ and $\mathcal{M}^{1,(0)}$ are transversely cut out. \end{proposition} \begin{proof} It is enough to show that $\mathcal{M}^{1,(0)}_{\mathrm{p}}$ is transversely cut out, since this implies that $\mathcal{M}^{1,(0)}$ is transversely cut out as well. Recall that $\mathcal{M}^{1,(0)}_{\mathrm{p}}$ can be written as the zero set of the Cauchy--Riemann operator $\overline{\partial}\vphantom{\partial}^{1} \colon \mathcal{T} \times \mathcal{B} E_{1} \longrightarrow \mathcal{E} E_{1}$. It suffices to assume that $(j,u) \in (\overline{\partial}\vphantom{\partial}^{1})^{-1}(0)$ and to prove that the linearization \begin{IEEEeqnarray*}{c+x*} \mathbf{L}_{(j,u)}^1 \colon T_j \mathcal{T} \oplus T_u \mathcal{B} E_1 \longrightarrow \mathcal{E}_{(j,u)} E_1 \end{IEEEeqnarray*} is surjective. This follows from \cref{lem:Du is surjective case n is 1}. \end{proof} \begin{proposition} \label{prp:moduli spaces w point are tco} If $\ell = 1,\ldots,n$ then $\mathcal{M}^{\ell,(1)}_{\mathrm{p}}$ and $\mathcal{M}^{\ell,(1)}$ are transversely cut out. \end{proposition} \begin{proof} We will use the notation of \cref{sec:functional analytic setup} with $X = E_{\ell}$ and $Y = E_{\ell + 1}$. We will show by induction on $\ell$ that $\mathcal{M}^{\ell,(1)}_{\mathrm{p}}$ is transversely cut out. This implies that $\mathcal{M}^{\ell,(1)}$ is transversely cut out as well. We prove the base case. By \cref{prp:moduli spaces without point constraint are tco}, $\mathcal{M}^{1,(0)}_{\mathrm{p}}$ is a smooth manifold. Consider the evaluation map \begin{IEEEeqnarray*}{rrCl} \operatorname{ev}^{1} \colon & \mathcal{M}^{1,(0)}_{\mathrm{p}} & \longrightarrow & \hat{E}_1 \\ & (j,u) & \longmapsto & u(z_0). \end{IEEEeqnarray*} Notice that $\mathcal{M}^{1,(1)}_{\mathrm{p}} = (\operatorname{ev}^1)^{-1}(x_1)$. We wish to show that the linearized evaluation map $\mathbf{E}^1_{(j,u)} = \dv (\operatorname{ev}^1)(j,u) \colon T_{(j,u)} \mathcal{M}^{1,(0)}_{\mathrm{p}} \longrightarrow T_{u(z_0)} \hat{E}_1$ is surjective whenever $u(z_0) = \operatorname{ev}^{1}(j,u) = x_1$. There are commutative diagrams \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \mathcal{M}^{1,(0)}_{\mathrm{p}} \ar[r, two heads, "\Phi"] \ar[d, swap, "\operatorname{ev}^1"] & \mathcal{M} \ar[d, "\operatorname{ev}_{\mathcal{M}}"] & \mathcal{C} \ar[l, swap, hook', two heads, "\mathcal{P}"] \ar[d, "\operatorname{ev}_{\mathcal{C}}"] & & T_{(j,u)} \mathcal{M}^{1,(0)}_{\mathrm{p}} \ar[r, two heads, "{\dv \Phi(j,u)}"] \ar[d, swap, "{\mathbf{E}^1_{(j,u)}}"] & T_f \mathcal{M} \ar[d, "\mathbf{E}_{\mathcal{M}}"] & \C^{m+1} \ar[l, swap, hook', two heads, "\dv \mathcal{P}(a)"] \ar[d, "\mathbf{E}_{\mathcal{C}}"] \\ \hat{E}_1 \ar[r, hook, two heads, swap, "\varphi"] & \C \ar[r, equals] & \C & & T_{x_1} \hat{E}_1 \ar[r, hook, two heads, swap, "\dv \varphi(x_1)"] & \C \ar[r, equals] & \C \end{tikzcd} \end{IEEEeqnarray*} where \begin{IEEEeqnarray*}{rCls+x*} \mathcal{M} & \coloneqq & \{f \colon \C \longrightarrow \C \mid f \text{ is a polynomial of degree }m \}, \\ \mathcal{C} & \coloneqq & \{(a_0,\ldots,a_m) \in \C^{m+1} \mid a_m \neq 0\}, \\ \Phi(j,u) & \coloneqq & \varphi \circ u \circ \psi_j, \\ \operatorname{ev}_{\mathcal{M}}(f) & \coloneqq & f(0), \\ \operatorname{ev}_{\mathcal{C}}(a_0,\ldots,a_m) & \coloneqq & a_0, \\ \mathcal{P}(a_0,\ldots,a_m)(z) & \coloneqq & a_0 + a_1 z + \cdots + a_m z^m, \end{IEEEeqnarray*} and the diagram on the right is obtained by linearizing the one on the left. The map $\Phi$ is well-defined by \cref{lem:u is a polynomial}. Since $\mathbf{E}_{\mathcal{C}}(a_0,\ldots,a_m) = a_0$ is surjective, $\mathbf{E}^1_u$ is surjective as well. This finishes the proof of the base case. We prove the induction step, i.e. that if $\mathcal{M}^{\ell,(1)}_p$ is transversely cut out then so is $\mathcal{M}^{\ell+1,(1)}_p$. We prove that $\mathcal{M}^{\ell,(1)}_{\mathrm{p,reg}} \subset \mathcal{M}^{\ell+1,(1)}_{\mathrm{p,reg}}$. For this, assume that $(j,u) \in \mathcal{M}^{\ell,(1)}_{\mathrm{p}}$ is such that $\mathbf{L}_{(j,u)}^\ell \oplus \mathbf{E}_u^\ell \colon T_j \mathcal{T} \oplus T_{u} \mathcal{B} E_\ell \longrightarrow \mathcal{E}_{(j,u)} E_\ell \oplus T_{x_\ell} \hat{E}_\ell$ is surjective. By \cref{lem:DX surj implies DY surj}, \begin{IEEEeqnarray*}{c+x*} \mathbf{L}_{(j,u)}^{\ell+1} \oplus \mathbf{E}_u^{\ell+1} \colon T_j \mathcal{T} \oplus T_{u} \mathcal{B} E_{\ell+1} \longrightarrow \mathcal{E}_{(j,u)} E_{\ell+1} \oplus T_{x_{\ell+1}} \hat{E}_{\ell+1} \end{IEEEeqnarray*} is also surjective, which means that $(j,u) \in \mathcal{M}^{\ell+1,(1)}_{\mathrm{p,reg}}$. This concludes the proof of $\mathcal{M}^{\ell,(1)}_{\mathrm{p,reg}} \subset \mathcal{M}^{\ell+1,(1)}_{\mathrm{p,reg}}$. Finally, we show that $\mathcal{M}^{\ell+1,(1)}_{\mathrm{p,reg}} = \mathcal{M}^{\ell+1,(1)}_{\mathrm{p}}$. \begin{IEEEeqnarray*}{rCls+x*} \mathcal{M}^{\ell+1,(1)}_{\mathrm{p,reg}} & \subset & \mathcal{M}^{\ell+1,(1)}_{\mathrm{p}} & \quad [\text{since regular curves form a subset}] \\ & = & \mathcal{M}^{\ell,(1)}_{\mathrm{p}} & \quad [\text{by \cref{lem:moduli spaces of ellipsoids are all equal}}] \\ & = & \mathcal{M}^{\ell,(1)}_{\mathrm{p,reg}} & \quad [\text{by the induction hypothesis}] \\ & \subset & \mathcal{M}^{\ell+1,(1)}_{\mathrm{p,reg}} & \quad [\text{proven above}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{proposition} \label{prp:moduli spaces w tangency are tco} If $\ell = 1,\ldots, n$ and $k = 1,\ldots,m$ then $\mathcal{M}^{\ell,(k)}_{\mathrm{p}}$ and $\mathcal{M}^{\ell,(k)}$ are transversely cut out. \end{proposition} \begin{proof} By \cref{prp:moduli spaces w point are tco}, $\mathcal{M}^{\ell,(1)}_{\mathrm{p}}$ is a smooth manifold. Consider the jet evaluation map \begin{IEEEeqnarray*}{rrCl} j^{\ell,(k)} \colon & \mathcal{M}^{\ell,(1)}_{\mathrm{p}} & \longrightarrow & \C^{k-1} \\ & (j,u) & \longmapsto & ((h_{\ell} \circ u \circ \psi_j)^{(1)}(0), \ldots, (h_{\ell} \circ u \circ \psi_j)^{(k-1)}(0)). \end{IEEEeqnarray*} The moduli space $\mathcal{M}^{\ell,(k)}_{\mathrm{p}}$ is given by $\mathcal{M}^{\ell,(k)}_{\mathrm{p}} = (j^{\ell,(k)})^{-1}(0)$. We will prove by induction on $\ell$ that $\mathcal{M}^{\ell,(k)}_{\mathrm{p}}$ is transversely cut out. This shows that $\mathcal{M}^{\ell,(k)}$ is transversely cut out as well. Define $\mathbf{J}^{\ell,(k)}_{(j,u)} \coloneqq \dv(j^{\ell,(k)})(j,u) \colon T_{(j,u)} \mathcal{M}^{\ell,(1)}_{\mathrm{p}} \longrightarrow \C^{k-1}$. We prove the base case, i.e. that $\mathcal{M}^{1,(k)}_{\mathrm{p}}$ is transversely cut out. For this, it suffices to assume that $(j,u) \in \mathcal{M}^{1,(1)}_{\mathrm{p}}$ is such that $j^{1,(k)}(j,u) = 0$ and to prove that $\mathbf{J}^{1,(k)}_{(j,u)}$ is surjective. There are commutative diagrams \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \mathcal{M}^{1,(1)}_{\mathrm{p}} \ar[r, two heads, "\Phi"] \ar[d, swap, "j^{1,(k)}"] & \mathcal{M} \ar[d, "j^{(k)}_{\mathcal{M}}"] & \mathcal{C} \ar[l, swap, hook', two heads, "\mathcal{P}"] \ar[d, "j^{(k)}_{\mathcal{C}}"] & & T_{(j,u)} \mathcal{M}^{1,(1)}_{\mathrm{p}} \ar[r, two heads, "{\dv \Phi(j,u)}"] \ar[d, swap, "{\mathbf{J}^{1,(k)}_{(j,u)}}"] & T_f \mathcal{M} \ar[d, "\mathbf{J}^{(k)}_{\mathcal{M}}"] & \C^{m} \ar[l, swap, hook', two heads, "\dv \mathcal{P}(a)"] \ar[d, "\mathbf{J}^{(k)}_{\mathcal{C}}"] \\ \C^{k-1} \ar[r, equals] & \C^{k-1} \ar[r, equals] & \C^{k-1} & & \C^{k-1} \ar[r, equals] & \C^{k-1} \ar[r, equals] & \C^{k-1} \end{tikzcd} \end{IEEEeqnarray*} where \begin{IEEEeqnarray*}{rCls+x*} \mathcal{M} & \coloneqq & \{f \colon \C \longrightarrow \C \mid f \text{ is a polynomial of degree }m \text{ with }f(0)=0 \}, \\ \mathcal{C} & \coloneqq & \{(a_1,\ldots,a_m) \in \C^{m} \mid a_m \neq 0\}, \\ \Phi(j,u) & \coloneqq & \varphi \circ u \circ \psi_j, \\ j^{(k)}_{\mathcal{M}}(f) & \coloneqq & (f^{(1)}(0),\ldots,f^{(k-1)}(0)), \\ j^{(k)}_{\mathcal{C}}(a_1,\ldots,a_m) & \coloneqq & (a_1,\ldots,(k-1)! a_{k-1}), \\ \mathcal{P}(a_1,\ldots,a_m)(z) & \coloneqq & a_1 z + \cdots + a_m z^m, \end{IEEEeqnarray*} and the diagram on the right is obtained by linearizing the one on the left. The map $\Phi$ is well-defined by \cref{lem:u is a polynomial}. Since $\mathbf{J}^{(k)}_{\mathcal{C}}(a_1,\ldots,a_m) = (a_1,\ldots,(k-1)! a_{k-1})$ is surjective, $\mathbf{J}^{1,(k)}_u$ is surjective as well. This finishes the proof of the base case. We prove the induction step, i.e. that if $\mathcal{M}^{\ell,(k)}_{\mathrm{p}}$ is transversely cut out then so is $\mathcal{M}^{\ell+1,(k)}_{\mathrm{p}}$. We show that $\mathcal{M}^{\ell,(k)}_{\mathrm{p,reg}} \subset \mathcal{M}^{\ell+1,(k)}_{\mathrm{p,reg}}$. For this, it suffices to assume that $(j,u) \in \mathcal{M}^{\ell,(k)}_{\mathrm{p}}$ is such that $\mathbf{J}^{\ell,(k)}_{(j,u)}$ is surjective, and to prove that $\mathbf{J}^{\ell+1,(k)}_{(j,u)}$ is surjective as well. This follows because the diagrams \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \mathcal{M}^{\ell,(1)}_{\mathrm{p}} \ar[d] \ar[dr, "j^{\ell,(k)}"] & & & T_{(j,u)} \mathcal{M}^{\ell,(1)}_{\mathrm{p}} \ar[d] \ar[dr, "\mathbf{J}^{\ell,(k)}_u"] \\ \mathcal{M}^{\ell+1,(1)}_{\mathrm{p}} \ar[r, swap, "j^{\ell+1,(k)}"] & \C^{k-1} & & T_{(j,u)} \mathcal{M}^{\ell+1,(1)}_{\mathrm{p}} \ar[r, swap, "\mathbf{J}_u^{\ell+1,(k)}"] & \C^{k-1} \end{tikzcd} \end{IEEEeqnarray*} commute. Finally, we show that $\mathcal{M}^{\ell+1,(k)}_{\mathrm{p,reg}} = \mathcal{M}^{\ell+1,(k)}_{\mathrm{p}}$. \begin{IEEEeqnarray*}{rCls+x*} \mathcal{M}^{\ell+1,(k)}_{\mathrm{p,reg}} & \subset & \mathcal{M}^{\ell+1,(k)}_{\mathrm{p}} & \quad [\text{since regular curves form a subset}] \\ & = & \mathcal{M}^{\ell,(k)}_{\mathrm{p}} & \quad [\text{by \cref{lem:moduli spaces of ellipsoids are all equal}}] \\ & = & \mathcal{M}^{\ell,(k)}_{\mathrm{p,reg}} & \quad [\text{by the induction hypothesis}] \\ & \subset & \mathcal{M}^{\ell+1,(k)}_{\mathrm{p,reg}} & \quad [\text{proven above}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{proposition} \label{lem:moduli spaces of ellipsoids have 1 element} If $\ell = 1,\ldots,n$ then $\#^{\mathrm{vir}} \overline{\mathcal{M}}^{\ell,(m)} = \# \overline{\mathcal{M}}^{\ell,(m)} = 1$. \end{proposition} \begin{proof} By assumption on the perturbation scheme and \cref{prp:moduli spaces w tangency are tco}, $\#^{\mathrm{vir}} \overline{\mathcal{M}}^{\ell,(m)} = \# \overline{\mathcal{M}}^{\ell,(m)}$. Again by \cref{prp:moduli spaces w tangency are tco}, the moduli space $\mathcal{M}^{\ell,(m)}$ is transversely cut out and \begin{IEEEeqnarray*}{c} \dim \mathcal{M}^{\ell,(m)} = (n -3)(2 - 1) + \conleyzehnder(\gamma_1^m) - 2 \ell - 2 m + 4 = 0, \end{IEEEeqnarray*} where in the second equality we have used \cref{lem:unique reeb orbit with cz equal to}. This implies that $\mathcal{M}^{\ell,(m)}$ is compact, and in particular $\# \overline{\mathcal{M}}^{\ell,(m)} = \# \mathcal{M}^{\ell,(m)}$. By \cref{lem:moduli spaces of ellipsoids are all equal}, $\# \mathcal{M}^{\ell,(m)} = \# \mathcal{M}^{1,(m)}$. It remains to show that $\# \mathcal{M}^{1,(m)} = 1$. For this, notice that $\mathcal{M}^{1,(m)}$ is the set of equivalence classes of pairs $(j,u)$, where $j$ is an almost complex structure on $\Sigma = S^2$ and $u \colon (\dot{\Sigma}, j) \longrightarrow (\hat{E}_1, J_1)$ is a holomorphic map such that \begin{enumerate} \item $u(z_0) = x_1$ and $u$ has contact order $m$ to $D_1$ at $x_1$; \item if $(s,t)$ are the cylindrical coordinates on $\dot{\Sigma}$ near $z_1$ such that $v_1$ agrees with the direction $t = 0$, then \begin{IEEEeqnarray*}{rrCls+x*} \lim_{s \to +\infty} & \pi_{\R} \circ u(s,t) & = & + \infty, \\ \lim_{s \to +\infty} & \pi_{\partial E_1} \circ u(s,t) & = & \gamma_1 (a_1 m t). \end{IEEEeqnarray*} \end{enumerate} Here, two pairs $(j_0, u_0)$ and $(j_1, u_1)$ are equivalent if there exists a biholomorphism $\phi \colon (\Sigma, j_0) \longrightarrow (\Sigma, j_1)$ such that \begin{IEEEeqnarray*}{c+x*} \phi(z_0) = z_0, \qquad \phi(z_1) = z_1, \qquad \dv \phi(z_1) v_1 = v_1. \end{IEEEeqnarray*} We claim that any two pairs $(j_0, u_0)$ and $(j_1, u_1)$ are equivalent. By \cref{lem:u is a polynomial}, the maps $\varphi \circ u_0 \circ \psi_{j_0}$ and $\varphi \circ u_1 \circ \psi_{j_1}$ are polynomials of degree $m$: \begin{IEEEeqnarray*}{rCls+x*} \varphi \circ u_0 \circ \psi_{j_0} (z) & = & a_0 + \cdots + a_m z^m, \\ \varphi \circ u_1 \circ \psi_{j_1} (z) & = & b_0 + \cdots + b_m z^m. \end{IEEEeqnarray*} Since $u_0$ and $u_1$ have contact order $m$ to $D_1$ at $x_1$, for every $\nu = 0,\ldots,m-1$ we have \begin{IEEEeqnarray*}{rCls+x*} 0 & = & (\varphi \circ u_0 \circ \psi_{j_0})^{(\nu)}(0) = \nu! a_{\nu}, \\ 0 & = & (\varphi \circ u_1 \circ \psi_{j_1})^{(\nu)}(0) = \nu! b_{\nu}. \end{IEEEeqnarray*} Since $u_0$ and $u_1$ have the same asymptotic behaviour, $\operatorname{arg}(a_m) = \operatorname{arg}(b_m)$. Hence, there exists $\lambda \in \R_{>0}$ such that $\lambda^m b_m = a_m$. Then, \begin{IEEEeqnarray*}{c+x*} u_1 \circ \psi_{j_1} (\lambda z) = u_0 \circ \psi_{j_0} (z). \end{IEEEeqnarray*} Therefore, $(j_0, u_0)$ and $(j_1, u_1)$ are equivalent and $\# \mathcal{M}^{1,(m)} = 1$. \end{proof} \begin{remark} In \cite[Proposition 3.4]{cieliebakPuncturedHolomorphicCurves2018}, Cieliebak and Mohnke show that the signed count of the moduli space of holomorphic curves in $\C P^n$ in the homology class $[\C P^1]$ which satisfy a tangency condition $\p{<}{}{\mathcal{T}^{(n)}x}$ equals $(n-1)!$. It is unclear how this count relates to the one of \cref{lem:moduli spaces of ellipsoids have 1 element}. \end{remark} Finally, we will use the results of this section to compute the augmentation map of the ellipsoid $E_n$. \begin{theorem} \label{thm:augmentation is nonzero} The augmentation map $\epsilon_m \colon CH_{n - 1 + 2m}(E_n) \longrightarrow \Q$ is an isomorphism. \end{theorem} \begin{proof} By \cref{lem:moduli spaces of ellipsoids have 1 element}, \cref{rmk:counts of moduli spaces with or without asy markers} and definition of the augmentation map, we have $\epsilon_m(\gamma^m_1) \neq 0$. By \cref{lem:lch of ellipsoid}, $\epsilon_m$ is an isomorphism. \end{proof} \section{Computations using contact homology} Finally, we use the tools developed in this chapter to prove \cref{conj:the conjecture} (see \cref{thm:my main theorem}). The proof we give is the same as that of \cref{lem:computation of cl}, with the update that we will use the capacity $\mathfrak{g}^{\leq 1}_{k}$ to prove that \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq 1}_k(X) \leq \mathfrak{g}^{\leq 1}_k(X) = \cgh{k}(X) \end{IEEEeqnarray*} for any nondegenerate Liouville domain $X$. Notice that in \cref{lem:computation of cl}, $\tilde{\mathfrak{g}}^{\leq 1}_k(X) \leq \cgh{k}(X)$ held because by assumption $X$ was a $4$-dimensional convex toric domain. We start by showing that $\tilde{\mathfrak{g}}^{\leq \ell}_k(X) \leq \mathfrak{g}^{\leq \ell}_k(X)$. This result has already been proven in \cite[Section 3.4]{mcduffSymplecticCapacitiesUnperturbed2022}, but we include a proof for the sake of completeness. \begin{theorem}[{\cite[Section 3.4]{mcduffSymplecticCapacitiesUnperturbed2022}}] \phantomsection\label{thm:g tilde vs g hat} If $X$ is a Liouville domain then \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq \ell}_k(X) \leq {\mathfrak{g}}^{\leq \ell}_k(X). \end{IEEEeqnarray*} \end{theorem} \begin{proof} By \cref{lem:can prove ineqs for ndg}, we may assume that $X$ is nondegenerate. Choose a point $x \in \itr X$ and a symplectic divisor $D$ through $x$. Let $J \in \mathcal{J}(X,D)$ be an almost complex structure on $\hat{X}$ and consider the bar complex $\mathcal{B}(CC(X)[-1])$, computed with respect to $J$. Suppose that $a > 0$ is such that the augmentation map \begin{IEEEeqnarray*}{c+x*} \epsilon_k \colon H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell}(CC(X)[-1])) \longrightarrow \Q \end{IEEEeqnarray*} is nonzero. By \cref{thm:g tilde two definitions}, it is enough to show that there exists a word of Reeb orbits $\Gamma = (\gamma_1,\ldots,\gamma_p)$ such that \begin{IEEEeqnarray*}{c+x*} p \leq \ell, \qquad \mathcal{A}(\Gamma) \leq a, \qquad \overline{\mathcal{M}}^{J}_{X}(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x} \neq \varnothing. \end{IEEEeqnarray*} Choose a homology class $\beta \in H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell}(CC(X)[-1]))$ such that $\epsilon_k(\beta) \neq 0$. The element $\beta$ can be written as a finite linear combination of Reeb orbits $\Gamma = (\gamma_1,\ldots,\gamma_p)$, where every word has length $p \leq \ell$ and action $\mathcal{A}(\Gamma) \leq a$. One of the words in this linear combination, say $\Gamma = (\gamma_1,\ldots,\gamma_{p})$, is such that $\#^{\mathrm{vir}} \overline{\mathcal{M}}^{J}_{X}(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x} \neq 0$. By assumption on the virtual perturbation scheme, $\overline{\mathcal{M}}^{J}_{X}(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x}$ is nonempty. \end{proof} \begin{theorem} \label{thm:g hat vs gh} If $X$ is a Liouville domain such that $\pi_1(X) = 0$ and $2 c_1(TX) = 0$ then \begin{IEEEeqnarray*}{c+x*} {\mathfrak{g}}^{\leq 1}_k(X) = \cgh{k}(X). \end{IEEEeqnarray*} \end{theorem} \begin{proof} By \cref{lem:can prove ineqs for ndg}, we may assume that $X$ is nondegenerate. Let $E = E(a_1,\ldots,a_n)$ be an ellipsoid as in \cref{sec:augmentation map of an ellipsoid} such that there exists a strict exact symplectic embedding $\phi \colon E \longrightarrow X$. In \cite{bourgeoisEquivariantSymplecticHomology2016}, Bourgeois--Oancea define an isomorphism between linearized contact homology and positive $S^1$-equivariant contact homology, which we will denote by $\Phi_{\mathrm{BO}}$. This isomorphism commutes with the Viterbo transfer maps and respects the action filtration. In addition, the Viterbo transfer maps in linearized contact homology commute with the augmentation maps of \cref{def:augmentation map}. Therefore, there is a commutative diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} SH^{S^1,(\varepsilon,a]}_{n - 1 + 2k}(X) \ar[r, "\iota^{S^1,a}"] \ar[d, hook, two heads, swap, "\Phi_{\mathrm{BO}}^a"] & SH^{S^1,+}_{n - 1 + 2k}(X) \ar[r, "\phi_!^{S^1}"] \ar[d, hook, two heads, "\Phi_{\mathrm{BO}}"] & SH^{S^1,+}_{n - 1 + 2k}(E) \ar[d, hook, two heads, "\Phi_{\mathrm{BO}}"] \\ CH^{a}_{n - 1 + 2k}(X) \ar[r, "\iota^{a}"] \ar[d, equals] & CH_{n - 1 + 2k}(X) \ar[r, "\phi_{!}"] \ar[d, equals] & CH_{n - 1 + 2k}(E) \ar[d, hook, two heads, "{\epsilon}^E_k"] \\ CH^{a}_{n - 1 + 2k}(X) \ar[r, swap, "\iota^{a}"] & CH_{n - 1 + 2k}(X) \ar[r, swap, "{\epsilon}_k^X"] & \Q \end{tikzcd} \end{IEEEeqnarray*} Here, the map ${\epsilon}_k^E$ is nonzero, or equivalently an isomorphism, by \cref{thm:augmentation is nonzero}. Then, \begin{IEEEeqnarray*}{rCls+x*} \cgh{k}(X) & = & \inf \{ a > 0 \mid \phi_!^{S^1} \circ \iota^{S^1,a} \neq 0 \} & \quad [\text{by \cref{def:ck alternative}}] \\ & = & \inf \{ a > 0 \mid {\epsilon}_k^X \circ \iota^{a} \neq 0 \} & \quad [\text{since the diagram commutes}] \\ & = & {\mathfrak{g}}^{\leq 1}_k(X) & \quad [\text{by \cref{def:capacities glk}}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{theorem} \phantomsection\label{thm:my main theorem} Under \cref{assumption}, if $X_\Omega$ is a convex or concave toric domain then \begin{IEEEeqnarray*}{c+x*} c_L(X_{\Omega}) = \delta_\Omega. \end{IEEEeqnarray*} \end{theorem} \begin{proof} Since $X_{\Omega}$ is concave or convex, we have $X_{\Omega} \subset N(\delta_\Omega)$. For every $k \in \Z_{\geq 1}$, \begin{IEEEeqnarray*}{rCls+x*} \delta_\Omega & \leq & c_P(X_{\Omega}) & \quad [\text{by \cref{lem:c square geq delta}}] \\ & \leq & c_L(X_{\Omega}) & \quad [\text{by \cref{lem:c square leq c lag}}] \\ & \leq & \frac{\tilde{\mathfrak{g}}^{\leq 1}_{k}(X_{\Omega})}{k} & \quad [\text{by \cref{thm:lagrangian vs g tilde}}] \\ & \leq & \frac{{\mathfrak{g}}^{\leq 1}_{k}(X_{\Omega})}{k} & \quad [\text{by \cref{thm:g tilde vs g hat}}] \\ & = & \frac{\cgh{k}(X_{\Omega})}{k} & \quad [\text{by \cref{thm:g hat vs gh}}] \\ & \leq & \frac{\cgh{k}(N(\delta_\Omega))}{k} & \quad [\text{since $X_{\Omega} \subset N(\delta_\Omega)$}] \\ & = & \frac{\delta_\Omega(k+n-1)}{k} & \quad [\text{by \cref{lem:cgh of nondisjoint union of cylinders}}]. \end{IEEEeqnarray*} The result follows by taking the infimum over $k$. \end{proof} \chapter{Symplectic and contact manifolds} \label{chp:symplectic manifolds} \section{Symplectic manifolds} In this section, we recall some basics about symplectic manifolds. \begin{definition} \label{def:symplectic manifold} A \textbf{symplectic manifold} is a manifold $X$ together with a $2$-form $\omega$ which is closed and nondegenerate. In this case we say that $\omega$ is a \textbf{symplectic form}. An \textbf{exact symplectic manifold} is a manifold $X$ together with a $1$-form $\lambda$ such that $\omega = \edv \lambda$ is a symplectic form. In this case we call $\lambda$ a \textbf{symplectic potential} for $\omega$. \end{definition} \begin{example} \label{exa:cn symplectic} Consider $\C^n$ with coordinates $(x^1, \ldots, x^n, y^1, \ldots, y^n)$, where $z^j = x^j + i y^j$ for every $j = 1, \ldots, n$. We define \begin{IEEEeqnarray*}{rCls+x*} \lambda & \coloneqq & \frac{1}{2} \sum_{j=1}^{n} (x^j \edv y^j - y^j \edv x^j), \\ \omega & \coloneqq & \edv \lambda = \sum_{j=1}^{n} \edv x^j \wedge \edv y^j. \end{IEEEeqnarray*} Then, $(\C^n, \lambda)$ is an exact symplectic manifold. \end{example} \begin{example} \label{exa:cotangent bundle} Let $L$ be a manifold and consider the \textbf{cotangent bundle} of $L$, which is a vector bundle $\pi \colon T^*L \longrightarrow L$. As a set, $T^*L = \bigunion_{q \in L}^{} T^*_qL$. As a vector bundle, $T^*L$ is given as follows. For each coordinate chart $(U,q^1,\ldots,q^n)$ on $L$, there is a coordinate chart $(\pi ^{-1}(U),q^1 \circ \pi,\ldots,q^n \circ \pi,p_1,\ldots,p_n)$ on $T^*L$, where the $p_i$ are given by \begin{IEEEeqnarray*}{c} p_i(u) \coloneqq u \p{}{2}{ \pdv{}{q^i} \Big|_{\pi(u)} } \end{IEEEeqnarray*} for $u \in T^*L$. For simplicity, denote $q^i = q^i \circ \pi$. Define a 1-form $\lambda$ on $T^*L$, called the \textbf{canonical symplectic potential} or \textbf{Liouville $1$-form}, as follows. For each $u \in T^*L$, the linear map $\lambda _{u} \colon T _{u} T^*L \longrightarrow \R$ is given by $\lambda_{u} \coloneqq u \circ \dv \pi(u)$. The form $\omega \coloneqq \edv \lambda$ is the \textbf{canonical symplectic form}. In coordinates, \begin{IEEEeqnarray*}{rCls+x*} \lambda & = & \sum_{i=1}^{n} p_i \edv q^i, \\ \omega & = & \sum_{i=1}^{n} \edv p_i \wedge \edv q^i. \end{IEEEeqnarray*} Then, $(T^*L,\lambda)$ is an exact symplectic manifold. \end{example} If $(X, \omega)$ is a symplectic manifold, then using symplectic linear algebra we conclude that $X$ must be even dimensional, i.e. $\dim X = 2n$ for some $n$ (see for example \cite[Theorem 1.1]{silvaLecturesSymplecticGeometry2008}). In particular, $\omega^n$ is a volume form on $X$. \begin{definition} \label{def:types of embeddings} Let $(X,\omega_X)$, $(Y,\omega_Y)$ be symplectic manifolds and $\varphi \colon X \longrightarrow Y$ be an embedding. Then, $\varphi$ is \textbf{symplectic} if $\varphi^* \omega_Y = \omega_X$. A \textbf{symplectomorphism} is a symplectic embedding which is a diffeomorphism. We say that $\varphi$ is \textbf{strict} if $\varphi(X) \subset \itr Y$. If $(X,\lambda_X)$, $(Y,\lambda_Y)$ are exact, then we say that $\varphi$ is: \begin{enumerate} \item \label{def:types of embeddings 1} \textbf{symplectic} if $\varphi^* \lambda_Y - \lambda_X$ is closed (this is equivalent to the previous definition); \item \label{def:types of embeddings 2} \textbf{generalized Liouville} if $\varphi^* \lambda_Y - \lambda_X$ is closed and $(\varphi^* \lambda_Y - \lambda_X)|_{\partial X}$ is exact; \item \label{def:types of embeddings 3} \textbf{exact symplectic} if $\varphi^* \lambda_Y - \lambda_X$ is exact; \item \label{def:types of embeddings 4} \textbf{Liouville} if $\varphi^* \lambda_Y - \lambda_X = 0$. \end{enumerate} \end{definition} \begin{remark} \label{rmk:closed equivalent to exact} In the context of \cref{def:types of embeddings}, if $H^1_{\mathrm{dR}}(X) = 0$ then \ref{def:types of embeddings 1} $\Longleftrightarrow$ \ref{def:types of embeddings 2} $\Longleftrightarrow$ \ref{def:types of embeddings 3}. \end{remark} \begin{remark} The composition of generalized Liouville embeddings is not necessarily a generalized Liouville embedding. This means that exact symplectic manifolds together with generalized Liouville embeddings do not form a category. \end{remark} \begin{definition} Let $(X,\omega)$ be a symplectic manifold of dimension $2n$ and $\iota \colon L \longrightarrow X$ be an immersed submanifold of dimension $n$. Then, $L$ is \textbf{Lagrangian} if $\iota^* \omega = 0$. If $(X,\lambda)$ is exact, then we say that $L$ is: \begin{enumerate} \item \textbf{Lagrangian} if $\iota^* \lambda$ is closed (this is equivalent to the previous definition); \item \textbf{exact Lagrangian} if $\iota^* \lambda$ is exact. \end{enumerate} \end{definition} \begin{example} Let $L$ be a manifold and consider its cotangent bundle, $T^*L$. Then, the zero section $z \colon L \longrightarrow T^*L$ is an exact Lagrangian. In fact, $z^* \lambda = 0$. \end{example} \begin{lemma}[Moser's trick] \label{lem:mosers trick} Let $X$ be a manifold, $\alpha_t$ be a smooth $1$-parameter family of forms on $X$ and $Y_t$ be a complete time dependent vector field on $X$ with flow $\phi_t$. Then, \begin{equation*} \phi^*_t \alpha_t^{} - \alpha_0^{} = \int_{0}^{t} \phi^*_s \p{}{1}{ \dot{\alpha}_s + \ldv{Y_s} \alpha_s } \edv s = \int_{0}^{t} \phi^*_s \p{}{1}{ \dot{\alpha}_s + \edv \iota _{Y_s} \alpha_s + \iota _{Y_s} \edv \alpha_s } \edv s. \end{equation*} \end{lemma} \begin{proof} \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\phi^*_t \alpha_t^{} - \alpha_0^{}}\\ \quad & = & \phi^*_t \alpha_t^{} - \phi^*_0 \alpha_0^{} & \quad [\text{since $\phi_0 = \id$}] \\ & = & \int_{0}^{t} \odv{}{s} \phi^*_s \alpha_s \, \edv s & \quad [\text{by the fundamental theorem of calculus}] \\ & = & \int_{0}^{t} \phi^*_s \p{}{1}{ \dot{\alpha}_s + \ldv{Y_s} \alpha_s } \edv s & \quad [\text{by definition of Lie derivative}] \\ & = & \int_{0}^{t} \phi^*_s \p{}{1}{ \dot{\alpha}_s + \edv \iota _{Y_s} \alpha_s + \iota _{Y_s} \edv \alpha_s } \edv s & \quad [\text{by the Cartan magic formula}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{theorem}[Darboux] Let $(X,\omega)$ be a symplectic manifold. Then, for every $p \in X$, there exists a coordinate neighbourhood $(U,x^1,\ldots,x^n,y^1,\ldots,y^n)$ of $p$ such that \begin{equation*} \omega = \sum_{i=1}^{n} \edv x^i \wedge \edv y^i. \end{equation*} \end{theorem} \begin{proof} Taking a coordinate chart on $X$, it is enough to assume that $\omega_0$, $\omega_1$ are symplectic forms on a neighbourhood of $0$ in $\C^n$ and to prove that there exists a local diffeomorphism $\phi$ of $\C^n$ such that $\phi^* \omega_1 = \omega_0$. Choosing the initial coordinate chart carefully, we may assume in addition that $\omega_j$ has a primitive $\lambda_j$, i.e. $\omega_j = \edv \lambda_j$, for $j = 0, 1$, and also that $\omega_0$ and $\omega_1$ are equal at $0 \in \C$, i.e. $\omega_0|_0 = \omega_1|_0$. Let \begin{IEEEeqnarray*}{rCls+x*} \lambda_t & \coloneqq & \lambda_0 + t (\lambda_1 - \lambda_0), \\ \omega_t & \coloneqq & \edv \omega_t = \omega_0 + t (\omega_1 - \omega_0). \end{IEEEeqnarray*} Since $\omega_t|_0 = \omega_0|_0$ is symplectic, possibly after passing to a smaller neighbourhood of $0$ we may assume that $\omega_t$ is symplectic. Let $Y_t$ be the unique time-dependent vector field such that $\dot{\lambda}_t + \iota_{Y_t} \omega_t = 0$ and denote by $\phi_t$ the flow of $Y_t$. Then, \begin{IEEEeqnarray*}{rCls+x*} \phi^*_t \omega_t^{} - \omega_0^{} & = & \int_{0}^{t} \phi^*_s \p{}{}{ \dot{\omega}_s + \edv \iota _{Y_s} \omega_s + \iota _{Y_s} \edv \omega_s } \edv s & \quad [\text{by Moser's trick (\cref{lem:mosers trick})}] \\ & = & \int_{0}^{t} \phi^*_s \edv \p{}{}{ \dot{\lambda}_s + \edv \iota _{Y_s} \omega_s } \edv s & \quad [\text{since $\omega_t = \edv \lambda_t$}] \\ & = & 0 & \quad [\text{by definition of $Y_t$}], \end{IEEEeqnarray*} which shows that $\phi_1$ is the desired local diffeomorphism. \end{proof} \begin{definition} \label{def:liouville vf} If $(X,\lambda)$ is an exact symplectic manifold, then the \textbf{Liouville vector field} of $(X,\lambda)$ is the unique vector field $Z$ such that \begin{IEEEeqnarray*}{c} \lambda = \iota_Z \omega. \end{IEEEeqnarray*} \end{definition} \begin{lemma} \label{lem:liouville vf} The Liouville vector field satisfies \begin{IEEEeqnarray*}{c} \ldv{Z} \lambda = \lambda. \end{IEEEeqnarray*} \end{lemma} \begin{proof} \begin{IEEEeqnarray*}{rCls+x*} \ldv{Z} \lambda & = & \edv \iota_Z \lambda + \iota_Z \edv \lambda & \quad [\text{by the Cartan magic formula}] \\ & = & \edv \iota_Z \lambda + \iota_Z \omega & \quad [\text{since $\omega = \edv \lambda$}] \\ & = & \edv \iota_Z \iota_Z \omega + \lambda & \quad [\text{by definition of Liouville vector field, $\lambda = \iota_Z \omega$}] \\ & = & \lambda & \quad [\text{since $\omega$ is antisymmetric, $\iota_Z \iota_Z \omega = 0$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{definition} \label{def:Hamiltonian v field} Let $H \in C^\infty(X,\R)$ be a function on $X$. The \textbf{Hamiltonian vector field} of $H$, denoted $X_H$, is the unique vector field on $X$ satisfying \begin{IEEEeqnarray*}{c} \edv H = -\iota _{X_H} \omega. \end{IEEEeqnarray*} \end{definition} \begin{proposition} \phantomsection\label{lem:hamiltonian vector field preserves symplectic form} The Hamiltonian vector field preserves the symplectic form, i.e. \begin{IEEEeqnarray*}{c} \ldv{X_H} \omega = 0. \end{IEEEeqnarray*} \end{proposition} \begin{proof} \begin{IEEEeqnarray*}{rCls+x*} \ldv{X_H} \omega & = & \edv \iota_{X_H} \omega + \iota_{X_H} \edv \omega & \quad [\text{by the Cartan magic formula}] \\ & = & \edv \iota_{X_H} \omega & \quad [\text{since $\omega$ is closed}] \\ & = & - \edv^2 H & \quad [\text{by definition of $X_H$}] \\ & = & 0 & \quad [\text{since $\edv^2 = 0$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{proposition}[Liouville's theorem] The Hamiltonian vector field preserves the symplectic volume form, i.e. \begin{equation*} \ldv{X_H} \p{}{2}{\frac{\omega^n}{n!}} = 0. \end{equation*} \end{proposition} \begin{proof} By \cref{lem:hamiltonian vector field preserves symplectic form} and the fact that Lie derivatives obey the Leibniz rule. \end{proof} \begin{proposition}[conservation of energy] \label{lem:conservation of energy} The Hamiltonian is constant along the Hamiltonian vector field, i.e. \begin{IEEEeqnarray*}{c} X_H(H) = 0. \end{IEEEeqnarray*} \end{proposition} \begin{proof} \begin{IEEEeqnarray*}{rCls+x*} X_H(H) & = & \edv H(X_H) & \quad [\text{by definition of exterior derivative}] \\ & = & - \iota_{X_H} \omega (X_H) & \quad [\text{by definition of $X_H$}] \\ & = & - \omega(X_H, X_H) & \quad [\text{by definition of interior product}] \\ & = & 0 & \quad [\text{since $\omega$ is a form}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \section{Contact manifolds} In this section, we recall some basics about contact manifolds. \begin{definition} \label{def:contact manifold} A \textbf{contact manifold} is a pair $(M,\xi)$, where $M$ is a smooth manifold and $\xi$ is a distribution on $M$ of codimension 1, called the \textbf{contact structure}, such that for all locally defining forms $\alpha \in \Omega^1(U)$ for $\xi$ (i.e. such that $\xi = \ker \alpha$), $\edv \alpha |_{\xi}$ is nondegenerate. In this case we call $\alpha$ a \textbf{local contact form} for $M$. In the case where $\alpha \in \Omega^1(M)$ we say that $\alpha$ is a \textbf{global contact form} for $M$. A \textbf{strict contact manifold} is a pair $(M,\alpha)$ such that $(M,\ker \alpha)$ is a contact manifold. \end{definition} The following lemma characterizes the linear algebra of contact manifolds. \begin{lemma} \label{lem:contact manifold} Let $M$ be an $m$-dimensional manifold, $\alpha \in \Omega^1(M)$ be nonvanishing and $\xi = \ker \alpha$. Then, the following are equivalent: \begin{enumerate} \item \label{lem:contact manifold 1} The form $\edv \alpha |_{\xi}$ is nondegenerate, i.e. $(M,\alpha)$ is a contact manifold; \item \label{lem:contact manifold 3} The tangent bundle of $M$ decomposes as $T M = \ker \edv \alpha \directsum \ker \alpha$; \item \label{lem:contact manifold 2} There exists an $n \in \Z_{\geq 0}$ such that $m = 2n + 1$ and $\alpha \wedge (\edv \alpha)^{n}$ is a volume form. \end{enumerate} \end{lemma} \begin{proof} {\ref{lem:contact manifold 1}} $\Longrightarrow$ {\ref{lem:contact manifold 3}}: We show that $\ker \edv \alpha \cap \ker \alpha = 0$. For this, it suffices to assume that $v \in \ker \edv \alpha \cap \ker \alpha$ and to prove that $v = 0$. Since $\edv \alpha|_{\ker \alpha}(v) = 0$ and $\edv \alpha|_{\ker \alpha}$ is nondegenerate we conclude that $v = 0$. We show that $\dim TM = \dim \ker \edv \alpha + \dim \ker \alpha$. Since $\alpha$ is nonvanishing, $\dim \ker \alpha = \dim TM - 1$. Since $\ker \edv \alpha \cap \ker \alpha = 0$, this implies that $\dim \ker \edv \alpha \in \{0,1\}$. Considering that $\edv \alpha|_{\ker \alpha}$ is nondegenerate and that $\dim TM = \dim \ker \alpha + 1$, we conclude that $\edv \alpha|_{TM}$ is degenerate. Therefore, $\dim \ker \edv \alpha = 1$. {\ref{lem:contact manifold 3}} $\Longrightarrow$ {\ref{lem:contact manifold 2}}: Since $T M = \ker \edv \alpha \oplus \ker \alpha$, we conclude that the forms $\alpha|_{\ker \edv \alpha}$ and $\edv \alpha|_{\ker \alpha}$ are nondegenerate. In particular, $\ker \alpha$ is even dimensional, i.e. $\dim \ker \alpha = 2n$ for some $n$, and $(\edv \alpha|_{\ker \alpha})^n$ is a volume form on $\ker \alpha$. So, $\alpha \wedge (\edv \alpha)^n$ is a volume form on $M$. {\ref{lem:contact manifold 2}} $\Longrightarrow$ {\ref{lem:contact manifold 1}}: If $v \in \xi = \ker \alpha$ is such that $v \in \ker \edv \alpha|_{\xi}$, then $\iota_v (\alpha \wedge (\edv \alpha)^n) = 0$, which implies that $v = 0$. \end{proof} \begin{definition} Let $(M,\xi_M)$, $(N,\xi_N)$ be contact manifolds. A \textbf{contactomorphism} from $M$ to $N$ is a diffeomorphism $\phi \colon M \longrightarrow N$ such that $T \phi(\xi_M) = \xi_N$. If $(M,\alpha_M)$, $(N,\alpha_N)$ are strict contact manifolds, a \textbf{strict contactomorphism} from $M$ to $N$ is a diffeomorphism $\phi \colon M \longrightarrow N$ such that $\phi^* \alpha_N = \alpha_M$. \end{definition} \begin{remark} We will consider only strict contact manifolds and strict contactomorphisms, and for simplicity we will drop the word ``strict'' from our nomenclature. \end{remark} \begin{definition} \label{def:Reeb vector field} The \textbf{Reeb vector field} of $(M,\alpha)$ is the unique vector field $R$ satisfying \begin{IEEEeqnarray*}{rCls+x*} \iota_R \edv \alpha & = & 0, \\ \iota_R \alpha & = & 1. \end{IEEEeqnarray*} \end{definition} \begin{remark} \cref{lem:contact manifold} {\ref{lem:contact manifold 3}} can also be written as $TM = \p{<}{}{R} \directsum \xi$. \end{remark} \begin{lemma} \label{lem:reeb vf preserves contact form} The Reeb vector field preserves the contact form, i.e. \begin{IEEEeqnarray*}{c+x*} \ldv{R} \alpha = 0. \end{IEEEeqnarray*} \end{lemma} \begin{proof} \begin{IEEEeqnarray*}{rCls+x*} \ldv{R} \alpha & = & \iota _{R} \edv \alpha + \edv \iota _{R} \alpha & \quad [\text{by the Cartan magic formula}] \\ & = & 0 + \edv 1 & \quad [\text{by definition of $R$}] \\ & = & 0. & \quad & \qedhere \end{IEEEeqnarray*} \end{proof} We now consider contact manifolds which are hypersurfaces of symplectic manifolds. \begin{definition} \label{def:hypersurface of contact type} Let $(X,\omega)$ be a symplectic manifold of dimension $2n$, $(M, \alpha)$ be a contact manifold of dimension $2n - 1$ such that $M \subset X$, and denote by $\iota \colon M \longrightarrow X$ the inclusion. We say that $M$ is a \textbf{hypersurface of contact type} if $\edv \alpha = \iota^* \omega$. In this case, the \textbf{Liouville vector field} is the unique vector field $Z \in C^{\infty}(\iota^* TX)$ such that \begin{IEEEeqnarray*}{c+x*} \iota_Z \omega = \alpha. \end{IEEEeqnarray*} \end{definition} \begin{example} Let $(L,g)$ be a Riemannian manifold. Recall that $(T^*L, \lambda)$ is an exact symplectic manifold. Consider the \textbf{unit cotangent bundle} \begin{IEEEeqnarray*}{c+x*} S^* L \coloneqq \{ u \in T^* L \mid \| u \| = 1 \}. \end{IEEEeqnarray*} The form $\alpha \coloneqq \lambda|_{S^*L}$ is a contact form on $S^* L$. Therefore, $(S^*L, \alpha) \subset (T^*L, \lambda)$ is a hypersurface of contact type. More generally, we can also define the cotangent bundle of radius $r > 0$ by $S^*_r L \coloneqq \{ u \in T^* L \mid \| u \| = r \}$, which is also a hypersurface of contact type. \end{example} \begin{lemma} \label{lem:decomposition coming from contact hypersurface} We have the decompositions \begin{IEEEeqnarray*}{rCls+x*} \iota^* TX & = & \p{<}{}{Z} \directsum \p{<}{}{R} \directsum \xi, \\ TM & = & \p{<}{}{R} \directsum \xi, \\ \xi^\perp & = & \p{<}{}{Z} \directsum \p{<}{}{R}. \end{IEEEeqnarray*} \end{lemma} \begin{proof} By \cref{lem:contact manifold}, we have that $TM = \p{<}{}{R} \directsum \xi$. To show that $\xi^\perp = \p{<}{}{Z} \directsum \p{<}{}{R}$, by considering the rank of the vector bundles it suffices to show that $\p{<}{}{Z} \directsum \p{<}{}{R} \subset \xi^\perp$. Let $v \in \xi_p = \ker \alpha_p$. We wish to show that $\omega(Z_p, v) = 0$ and $\omega(R_p, v) = 0$. \begin{IEEEeqnarray*}{rCls+x*} \omega(Z_p, v) & = & \alpha(v) & \quad [\text{by definition of $Z$}] \\ & = & 0 & \quad [\text{since $v \in \ker \alpha_p$}], \\ \\ \omega(R_p, v) & = & \edv \alpha(R_p, v) & \quad [\text{by definition of hypersurface of contact type}] \\ & = & 0 & \quad [\text{by definition of Reeb vector field}]. \end{IEEEeqnarray*} Then, as oriented vector bundles, $\iota^* TX = \xi^\perp \directsum \xi = \p{<}{}{Z} \directsum \p{<}{}{R} \directsum \xi$. \end{proof} \begin{lemma} \label{lem:HR flow} Let $H \colon X \longrightarrow \R$ and assume that $M$ is the preimage of $H$ under a regular value $c \in \R$, i.e. $M = H^{-1}(c)$. Then, there exists a unique vector field $X_H^M$ on $M$ which is $\iota$-related to $X_H$. In addition, $X_H^M = \alpha(X_H^M) R$. \end{lemma} \begin{proof} To prove the first statement, it suffices to show that $X_H|_p \in T_p M$ for every $p \in M$. By conservation of energy (\cref{lem:conservation of energy}), we have that \begin{IEEEeqnarray*}{rCls+x*} X_H|_p & \in & \ker \edv H(p) \\ & = & T_p (H ^{-1}(c)) \\ & = & T_p M. \end{IEEEeqnarray*} We now show that $\iota_{X_H^M} \edv \alpha = 0$. \begin{IEEEeqnarray*}{rCls+x*} \iota _{X_H^ M} \edv \alpha & = & \iota _{X_H^ M} \iota^* \omega & \quad [\text{by definition of hypersurface of contact type}] \\ & = & \iota^* \iota _{X_H} \omega & \quad [\text{since $X_H^M$ is $\iota$-related to $X_H$}] \\ & = & - \iota^* \edv H & \quad [\text{by definition of Hamiltonian vector field}] \\ & = & - \edv \iota^* H & \quad [\text{by naturality of $\edv$}] \\ & = & 0 & \quad [\text{since $H$ is constant equal to $c$ on $M$}]. \end{IEEEeqnarray*} By definition of Reeb vector field, we conclude that $X_H^M$ and $R$ are collinear, and in particular $X_H^M = \alpha(X_H^M) R$. \end{proof} We now compare the dynamics from the points of view of Riemannian, symplectic and contact geometry. Let $(L,g)$ be a Riemannian manifold of dimension $n$. The manifold $L$ has a tangent bundle $TL$ and a cotangent bundle $T^*L$, and the map $\tilde{g} \colon TL \longrightarrow T^*L$ given by $\tilde{g}(v) = g(v,\cdot)$ is a vector bundle isomorphism. Consider the unit cotangent bundle $\iota \colon S^*L \longrightarrow T^*L$, which has a Reeb vector field $R$, and the function \begin{IEEEeqnarray*}{rrCl} H \colon & T^*L & \longrightarrow & \R \\ & u & \longmapsto & \frac{1}{2} \p{||}{}{u}_{}^2. \end{IEEEeqnarray*} \begin{definition} We define a vector field $G$ on $TL$, called the \textbf{geodesic field}, as follows. At $v \in TL$, $G _{v}$ is given by \begin{equation*} G _{v} \coloneqq \odv{}{t}\Big|_{t=0} \dot{\gamma}(t), \end{equation*} where $\gamma \colon I \longrightarrow L$ is the unique geodesic with $\dot{\gamma}(0) = v$ and $\dot{\gamma} \colon I \longrightarrow TL$ is the lift of $\gamma$. \end{definition} A curve $\gamma$ in $L$ is a geodesic if and only if its lift $\dot{\gamma}$ to $TL$ is a flow line of $G$. \begin{theorem} \label{thm:flow geodesic vs hamiltonian} The vector field $G$ is $\tilde{g}$-related to $X_H$. \end{theorem} \begin{proof} See for example \cite[Theorem 1.5.2]{geigesIntroductionContactTopology2008} or \cite[Theorem 2.3.1]{frauenfelderRestrictedThreeBodyProblem2018}. \end{proof} \begin{theorem} \label{thm:flow reeb vs hamiltonian} The vector field $R$ is $\iota$-related to $X_H$. \end{theorem} \begin{proof} Notice that $S^*L = H^{-1}(2)$. By \cref{lem:HR flow}, it suffices to show that $\lambda(X_H) \circ \iota = 1$. Let $(q^1, \ldots, q^n)$ be coordinates on $L$, with induced coordinates $(q^1, \ldots, q^n, p_1, \ldots, p_n)$ on $T^* L$. With respect to these coordinates, $X_H$ can be written as \begin{IEEEeqnarray}{rCls+x*} X_H & = & \sum_{i = 1}^{n} \p{}{2}{ \pdv{H}{p_i} \pdv{}{q^i} - \pdv{H}{q^i} \pdv{}{p_i} } \IEEEnonumber \\ & = & \sum_{i = 1}^{n} \p{}{2}{ \sum_{j=1}^{n} g^{ij} p_j \pdv{}{q^i} - \sum_{j,k=1}^{n} \pdv{g^{jk}}{q^i} p_j p_k \pdv{}{p_i} }. \plabel{eq:hamiltonian vector field in coordinates} \end{IEEEeqnarray} We show that $\p{<}{}{\dv \pi(u) X_H|_{u}, \cdot } = u$. \begin{IEEEeqnarray*}{rCls+x*} \p{<}{}{\dv \pi (u) X_{H}|_{u}, v} & = & \sum_{i,j=1}^{n} g _{ij} (\dv \pi (u) X_{H}|_{u})^i v^j \\ & = & \sum_{i,j,k=1}^{n} g _{ij} g ^{ik} p_k v^j \\ & = & \sum_{j,k=1}^{n} \delta^k_j p_k v^j \\ & = & \sum_{j=1}^{n} p_j v^j \\ & = & \sum_{i=1}^{n} p_i \edv q^i \p{}{2}{ \sum_{j=1}^{n} v^j \pdv{}{q^j} } \\ & = & u(v). \end{IEEEeqnarray*} We show that $\lambda(X_H) = 2 H$: \begin{IEEEeqnarray*}{rCls+x*} \lambda(X_{H})|_{u} & = & u (\dv \pi (u) X_{H}|_{u}) & \quad [\text{by definition of $\lambda$}] \\ & = & \p{<}{}{ \dv \pi (u) X_{H}|_{u},\dv \pi (u) X_{H}|_{u} } & \quad [\text{since $u = \p{<}{}{\dv \pi(u) X_H|_{u}, \cdot }$}] \\ & = & \p{||}{}{ \dv \pi (u) X_{H}|_{u} }^2 & \quad [\text{by definition of the norm}] \\ & = & \p{||}{}{u}^2 & \quad [\text{since $u = \p{<}{}{\dv \pi(u) X_H|_{u}, \cdot }$}] \\ & = & 2 H (u) & \quad [\text{by definition of $H$}]. \end{IEEEeqnarray*} By definition of $H$, this implies that $\lambda(X_H) \circ \iota = 1$, as desired. \end{proof} \section{Liouville domains} In this section we introduce Liouville domains, which are going to be the main type of symplectic manifold we will work with. \begin{definition} \label{def:liouville domain} A \textbf{Liouville domain} is a pair $(X,\lambda)$, where $X$ is a compact, connected smooth manifold with boundary $\del X$ and $\lambda \in \Omega^1(X)$ is such that $\edv \lambda \in \Omega^2(X)$ is symplectic, $\lambda|_{\del X}$ is contact and the orientations on $\del X$ coming from $(X,\edv \lambda)$ and coming from $\lambda|_{\del X}$ are equal. \end{definition} \begin{example} Let $(L,g)$ be a Riemannian manifold. The \textbf{unit codisk bundle}, \begin{IEEEeqnarray*}{c+x*} D^* L \coloneqq \{ u \in T^*L \mid \| u \| \leq 1 \}, \end{IEEEeqnarray*} is a Liouville domain. More generally, we can define the codisk bundle of radius $r > 0$ by $D^*_r L \coloneqq \{ u \in T^*L \mid \| u \| \leq r \}$, which is also a Liouville domain. \end{example} \begin{definition} \label{def:star shaped} A \textbf{star-shaped domain} is a compact, connected $2n$-dimensional submanifold $X$ of $\C^{n}$ with boundary $\del X$ such that $(X,\lambda)$ is a Liouville domain, where $\lambda$ is the symplectic potential of \cref{exa:cn symplectic}. \end{definition} \begin{definition} \label{def:moment map} The \textbf{moment map} is the map $\mu \colon \C^n \longrightarrow \R^n _{\geq 0}$ given by \begin{IEEEeqnarray*}{c+x*} \mu(z_1,\ldots,z_n) \coloneqq \pi(|z_1|^2,\ldots,|z_n|^2). \end{IEEEeqnarray*} Define also \begin{IEEEeqnarray*}{rCrClClrCl} \Omega_X & \coloneqq & \Omega(X) & \coloneqq & \hphantom{{}^{-1}} \mu(X) \subset \R_{\geq 0}^n, & \qquad & \text{for every } & X & \subset & \C^n, \\ X_{\Omega} & \coloneqq & X(\Omega) & \coloneqq & \mu^{-1}(\Omega) \subset \C^n, & \qquad & \text{for every } & \Omega & \subset & \R^{n}_{\geq 0}, \\ \delta_{\Omega} & \coloneqq & \delta(\Omega) & \coloneqq & \sup \{ a \mid (a, \ldots, a) \in \Omega \}, & \qquad & \text{for every } & \Omega & \subset & \R^{n}_{\geq 0}. \end{IEEEeqnarray*} We call $\delta_\Omega$ the \textbf{diagonal} of $\Omega$. \end{definition} \begin{definition} \label{def:toric domain} A \textbf{toric domain} is a star-shaped domain $X$ such that $X = X(\Omega(X))$. A toric domain $X = X _{\Omega}$ is \begin{enumerate} \item \textbf{convex} if $\hat{\Omega} \coloneqq \{ (x_1, \ldots, x_n) \in \R^n \mid (|x_1|,\ldots,|x_n|) \in \Omega \} $ is convex; \item \textbf{concave} if $\R^n _{\geq 0} \setminus \Omega$ is convex. \end{enumerate} \end{definition} \begin{example} \phantomsection\label{exa:toric domains} Here we give some examples of toric domains. See \cref{fig:Toric domains} for a picture of the examples given below. \begin{enumerate} \item The \textbf{ellipsoid} is the convex and concave toric domain given by \begin{IEEEeqnarray*}{rCls+x*} E(a_1,\ldots,a_n) & \coloneqq & \p{c}{2}{ (z_1,\ldots,z_n) \in \C^n \ \Big| \ \sum_{j=1}^{n} \frac{\pi |z_j|^2}{a_j} \leq 1 } \\ \Omega_E(a_1,\ldots,a_n) & \coloneqq & \p{c}{2}{ (x_1,\ldots,x_n) \in \R^n _{\geq 0} \ \Big| \ \sum_{j=1}^{n} \frac{x_j}{a_j} \leq 1 }. \end{IEEEeqnarray*} Its limit shape, the \textbf{ball}, is $B^{2n}(a) \coloneqq B(a) \coloneqq E(a,\ldots,a)$. \item The \textbf{polydisk} is the convex ``toric domain with corners'' given by \begin{IEEEeqnarray*}{rCls+x*} P(a_1,\ldots,a_n) & \coloneqq & \p{c}{2}{ (z_1,\ldots,z_n) \in \C^n \ \Big| \ \forall j=1,\ldots,n \colon \frac{\pi |z_j|^2}{a_j} \leq 1 } \\ \Omega_P(a_1,\ldots,a_n) & \coloneqq & \p{c}{2}{ (x_1,\ldots,x_n) \in \R^n _{\geq 0} \ \Big| \ \forall j=1,\ldots,n \colon \frac{x_j}{a_j} \leq 1 }. \end{IEEEeqnarray*} Its limit shape, the \textbf{cube}, is $P^{2n}(a) \coloneqq P(a) \coloneqq P(a,\ldots,a)$. \item The \textbf{nondisjoint union of cylinders} is the concave ``noncompact toric domain with corners'' given by \begin{IEEEeqnarray*}{rCls+x*} N(a_1,\ldots,a_n) & \coloneqq & \p{c}{2}{ (z_1,\ldots,z_n) \in \C^n \ \Big| \ \exists j=1,\ldots,n \colon \frac{\pi |z_j|^2}{a_j} \leq 1 } \\ \Omega_N(a_1,\ldots,a_n) & \coloneqq & \p{c}{2}{ (x_1,\ldots,x_n) \in \R^n _{\geq 0} \ \Big| \ \exists j=1,\ldots,n \colon \frac{x_j}{a_j} \leq 1 }. \end{IEEEeqnarray*} Its limit shape is denoted $N^{2n}(a) \coloneqq N(a) \coloneqq N(a,\ldots,a)$. \item The \textbf{cylinder} is the convex and concave ``noncompact toric domain'' given by \begin{IEEEeqnarray*}{rCls+x*} Z(a) & \coloneqq & \p{c}{2}{ (z_1,\ldots,z_n) \in \C^n \ \Big| \ \frac{\pi |z_1|^2}{a_1} \leq 1 } \\ \Omega_Z(a) & \coloneqq & \p{c}{2}{ (x_1,\ldots,x_n) \in \R^n _{\geq 0} \ \Big| \ \frac{x_1}{a_1} \leq 1 }. \end{IEEEeqnarray*} Note that $Z^{2n}(a) \coloneqq Z(a) = E(a,\infty,\ldots,\infty) = P(a,\infty,\ldots,\infty)$. \end{enumerate} \end{example} \begin{figure}[ht] \centering \begin{tikzpicture} [ nn/.style={thick, color = gray}, zz/.style={thick, color = gray}, pp/.style={thick, color = gray}, bb/.style={thick, color = gray} ] \tikzmath{ \x = 1.5; \y = 3; \z = 1.0; coordinate \o, \a, \b, \c, \d, \e, \r, \s, \q; \o{ball} = (0 , 0 ) + 0*(\y+\z,0); \a{ball} = (\x, 0 ) + 0*(\y+\z,0); \b{ball} = (0 , \x) + 0*(\y+\z,0); \c{ball} = (\x, \x) + 0*(\y+\z,0); \d{ball} = (\x, \y) + 0*(\y+\z,0); \e{ball} = (\y, \x) + 0*(\y+\z,0); \r{ball} = (\y, 0 ) + 0*(\y+\z,0); \s{ball} = (0 , \y) + 0*(\y+\z,0); \q{ball} = (\y, \y) + 0*(\y+\z,0); \o{cube} = (0 , 0 ) + 1*(\y+\z,0); \a{cube} = (\x, 0 ) + 1*(\y+\z,0); \b{cube} = (0 , \x) + 1*(\y+\z,0); \c{cube} = (\x, \x) + 1*(\y+\z,0); \d{cube} = (\x, \y) + 1*(\y+\z,0); \e{cube} = (\y, \x) + 1*(\y+\z,0); \r{cube} = (\y, 0 ) + 1*(\y+\z,0); \s{cube} = (0 , \y) + 1*(\y+\z,0); \q{cube} = (\y, \y) + 1*(\y+\z,0); \o{cyld} = (0 , 0 ) + 2*(\y+\z,0); \a{cyld} = (\x, 0 ) + 2*(\y+\z,0); \b{cyld} = (0 , \x) + 2*(\y+\z,0); \c{cyld} = (\x, \x) + 2*(\y+\z,0); \d{cyld} = (\x, \y) + 2*(\y+\z,0); \e{cyld} = (\y, \x) + 2*(\y+\z,0); \r{cyld} = (\y, 0 ) + 2*(\y+\z,0); \s{cyld} = (0 , \y) + 2*(\y+\z,0); \q{cyld} = (\y, \y) + 2*(\y+\z,0); \o{ndju} = (0 , 0 ) + 3*(\y+\z,0); \a{ndju} = (\x, 0 ) + 3*(\y+\z,0); \b{ndju} = (0 , \x) + 3*(\y+\z,0); \c{ndju} = (\x, \x) + 3*(\y+\z,0); \d{ndju} = (\x, \y) + 3*(\y+\z,0); \e{ndju} = (\y, \x) + 3*(\y+\z,0); \r{ndju} = (\y, 0 ) + 3*(\y+\z,0); \s{ndju} = (0 , \y) + 3*(\y+\z,0); \q{ndju} = (\y, \y) + 3*(\y+\z,0); } \foreach \domain in {ball, cube, cyld, ndju}{ \draw[->] (\o{\domain}) -- (\r{\domain}); \draw[->] (\o{\domain}) -- (\s{\domain}); \node[anchor = north] at (\a{\domain}) {$1$}; \node[anchor = east] at (\b{\domain}) {$1$}; } \node[anchor = north east] at (\q{ball}) {$\Omega_B(1)$}; ll[bb, opacity=0.5] (\o{ball}) -- (\a{ball}) -- (\b{ball}) -- cycle; \draw[bb] (\o{ball}) -- (\a{ball}) -- (\b{ball}) -- cycle; \node[anchor = north east] at (\q{cube}) {$\Omega_P(1)$}; ll[pp, opacity=0.5] (\o{cube}) -- (\a{cube}) -- (\c{cube}) -- (\b{cube}) -- cycle; \draw[pp] (\o{cube}) -- (\a{cube}) -- (\c{cube}) -- (\b{cube}) -- cycle; \node[anchor = north east] at (\q{cyld}) {$\Omega_Z(1)$}; ll[zz, opacity=0.5] (\o{cyld}) -- (\a{cyld}) -- (\d{cyld}) -- (\s{cyld}); \draw[zz] (\s{cyld}) -- (\o{cyld}) -- (\a{cyld}) -- (\d{cyld}); \node[anchor = north east] at (\q{ndju}) {$\Omega_N(1)$}; ll[nn, opacity=0.5] (\o{ndju}) -- (\s{ndju}) -- (\d{ndju}) -- (\c{ndju}) -- (\e{ndju}) -- (\r{ndju}) -- cycle; \draw[nn] (\d{ndju}) -- (\c{ndju}) -- (\e{ndju}); \draw[nn] (\s{ndju}) -- (\o{ndju}) -- (\r{ndju}); \end{tikzpicture} \caption{Toric domains} \label{fig:Toric domains} \end{figure} \section{Symplectization of a contact manifold} Let $(M,\alpha)$ be a contact $(2n - 1)$-dimensional manifold. \begin{definition} \label{def:symplectization} The \textbf{symplectization} of $(M,\alpha)$ is the exact symplectic manifold $(\R \times M, e^r \alpha)$, where $r$ is the coordinate on $\R$. \end{definition} \begin{lemma} \label{lem:symplectization form} The form $\edv (e^r \alpha)$ is symplectic. \end{lemma} \begin{proof} The form $\edv (e^r \alpha)$ is exact, so it is closed. We show that $\edv (e^r \alpha)$ is nondegenerate. \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{( \edv (e^r \alpha) )^n}\\ \quad & = & ( e^r \edv r \wedge \alpha + e^r \edv \alpha )^n & \quad [\text{by the Leibniz rule}] \\ & = & e^{nr} \sum_{k=0}^{n} \binom{n}{k} ( \edv r \wedge \alpha)^k \wedge (\edv \alpha)^{n-k} & \quad [\text{by the binomial theorem}] \\ & = & e^{n r} \edv r \wedge \alpha \wedge (\edv \alpha)^{n-1} & \quad [\text{since $\alpha^2 = 0$ and $(\edv \alpha)^n = 0$}] \\ & \neq & 0 & \quad [\text{since $\alpha \wedge (\edv \alpha)^{n-1}$ is a volume form on $M$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:symplectization lvf} The Liouville vector field of $(\R \times M, e^r \alpha)$ is $Z = \partial_r$. \end{lemma} \begin{proof} By definition of Liouville vector field, we need to show that $\iota_{\partial_r} \edv (e^r \alpha) = e^r \alpha$. \begin{IEEEeqnarray*}{rCls+x*} \iota_{\partial_r} \edv (e^r \alpha) & = & \iota_{\partial_r} (e^r \edv r \wedge \alpha + e^r \edv \alpha) & \quad [\text{by the Leibniz rule}] \\ & = & e^r (\edv r (\partial_r) \alpha - \alpha(\partial_r) \edv r + \iota_{\partial_r} \edv \alpha) & \quad [\text{since $\iota_Z$ is a derivation}] \\ & = & e^r \alpha & \quad [\text{since $\alpha$ is a form on $M$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{example} Let $(L,g)$ be a Riemannian manifold. Recall that $(T^*L,\lambda)$ is an exact symplectic manifold and that $(S^*L, \alpha)$ is a hypersurface of contact type. Consider the symplectization of $S^*L$, which is $(\R \times S^*L, e^r \alpha)$. Then, the map $\R \times S^*L \longrightarrow T^*L \setminus L$ given by $(r,u) \longmapsto e^r u$ is a Liouville diffeomorphism. \end{example} Defining $R_{(r,x)} = R_x$ we can view the Reeb vector field of $M$ as a vector field in $\R \times M$. Analogously, we define a distribution $\xi$ on $\R \times M$ by $\xi_{(r,x)} = \xi_x$. Then, $T(\R \times M) = \p{<}{}{Z} \directsum \p{<}{}{R} \directsum \xi$. Let $H \colon \R \times M \longrightarrow \R$ be a function which only depends on $\R$, (i.e. $H(r,x) = H(r)$). Define $h \coloneqq H \circ \exp^{-1} \colon \R_{> 0} \longrightarrow \R$ and $T(r) \coloneqq H'(r) / e^r = h'(e^r)$. \begin{lemma} \label{lem:reeb equals hamiltonian on symplectization} The Hamiltonian vector field of $H$ satisfies $\alpha(X_H) = T$ and $X_H = T R$. \end{lemma} \begin{proof} By \cref{lem:HR flow}, $X_H$ and $R$ are collinear. By definition of Reeb vector field, this implies that $X_H = \alpha(X_H) R$. It remains to show that $\alpha(X_H) = T$. For this, we compute \begin{IEEEeqnarray*}{rCls+x*} H' \edv r & = & \edv H & \quad [\text{by definition of exterior derivative}] \\ & = & - \iota _{X_H} \edv (e^r \alpha) & \quad [\text{by definition of Hamiltonian v.f.}] \\ & = & - \iota _{X_H} (e^r \edv r \wedge \alpha + e^r \edv \alpha) & \quad [\text{Leibniz rule for exterior derivative}] \\ & = & - e^r (\edv r(X_H) \alpha - \alpha(X_H) \edv r + \iota _{X_H} \edv \alpha) & \quad [\text{interior product is a derivation}]. \end{IEEEeqnarray*} Therefore, $H' \edv r = e^r \alpha(X_H) \edv r$, which implies that $\alpha(X_H) = H'/\exp = T$. \end{proof} \begin{corollary} \phantomsection\label{cor:hamiltonian orbits are reeb orbits} Suppose that $\gamma = (r,\rho) \colon S^1 \longrightarrow \R \times M$ is a $1$-periodic orbit of $X_H$, i.e. $\dot{\gamma}(t) = X_H(\gamma(t))$. Then: \begin{enumerate} \item $r \colon S^1 \longrightarrow \R$ is constant; \item $\rho \colon S^1 \longrightarrow M$ is a $T(r)$-periodic orbit of $R$, i.e. $\dot{\rho}(t) = T(r) R(\rho(t))$. \end{enumerate} \end{corollary} \begin{proof} The function $r \colon S^1 \longrightarrow \R$ is constant because $X_H$ is tangent to $\{r\} \times M$. Since $\dot{\gamma}(t) = X_H(\gamma(t))$ and by \cref{lem:reeb equals hamiltonian on symplectization}, we conclude that $\dot{\rho}(t) = T(r) R(\rho(t))$. \end{proof} \begin{lemma} \label{lem:action in symplectization} Let $\gamma = (r,\rho) \colon S^1 \longrightarrow \R \times M$ be a $1$-periodic orbit of $X_H$ and consider its action, given by \begin{IEEEeqnarray*}{c+x*} \mathcal{A}_H(\gamma) = \int_{S^1}^{} \gamma^* (e^r \alpha) - \int_{S^1}^{} H(\gamma(t)) \, \edv t. \end{IEEEeqnarray*} Then, $\mathcal{A}_H(\gamma) \eqqcolon \mathcal{A}_H(r)$ only depends on $r$, and we have the following formulas for $\mathcal{A}_H$ and $\mathcal{A}'_H$ (as functions of $r$): \begin{IEEEeqnarray*}{rClCl} \mathcal{A}_H (r) & = & H' (r) - H (r) & = & e^{ r} h' (e^r) - h(e^r), \\ \mathcal{A}'_H(r) & = & H''(r) - H'(r) & = & e^{2r} h''(e^r). \end{IEEEeqnarray*} \end{lemma} \begin{proof} We show only that $\mathcal{A}_H(\gamma) = H'(r) - H(r)$, since the other formulas follow from this one by elementary calculus. \begin{IEEEeqnarray*}{rCls+x*} \mathcal{A}_H(\gamma) & = & \int_{S^1}^{} \gamma^* ( e^r \alpha) - \int_{S^1}^{} H(\gamma(t)) \, \edv t & \quad [\text{by definition of action}] \\ & = & \int_{S^1}^{} e^r \rho^* \alpha - \int_{0}^{1} H(r, \rho(t)) \, \edv t & \quad [\text{since $\gamma(t) = (r, \rho(t))$}] \\ & = & e^r \int_{S^1}^{} \rho^* \alpha - \int_{0}^{1} H(r) \, \edv t & \quad [\text{since $H = H(r)$}] \\ & = & e^r T(\rho) - H(r) & \quad [\text{by \cref{cor:hamiltonian orbits are reeb orbits}}] \\ & = & H'(r) - H(r) & \quad [\text{by definition of $T$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{definition} \label{def:J cylindrical} Let $J$ be an almost complex structure on $(\R \times M, e^r \alpha)$. We say that $J$ is \textbf{cylindrical} if $J(\partial_r) = R$, if $J(\xi) \subset \xi$, and if the almost complex structure $J \colon \xi \longrightarrow \xi$ is compatible with $\edv \alpha$ and independent of $r$. We denote by $\mathcal{J}(M)$ the set of such $J$. \end{definition} \begin{lemma} \label{lem:J cylindrical forms} If $J$ is cylindrical then $\alpha \circ J = \edv r$. \end{lemma} \begin{proof} It suffices to show that $\alpha \circ J = \edv r$ on $\partial_r$, $R$ and $V \in \xi$. \begin{IEEEeqnarray*}{rCrClCl+x*} \alpha \circ J (\partial_r) & = & \alpha (R) & = & 1 & = & \edv r (\partial_r) \\ \alpha \circ J (R) & = & - \alpha (\partial_r) & = & 0 & = & \edv r (R) \\ \alpha \circ J (V) & = & \alpha(J(V)) & = & 0 & = & \edv r (V). & \qedhere \end{IEEEeqnarray*} \end{proof} \section{Completion of a Liouville domain} \label{sec:completion of liouville domain} Let $(X,\lambda)$ be a Liouville domain and $\omega = \edv \lambda$. Our goal in this section is to define the completion of $(X,\lambda)$, which is an exact symplectic manifold denoted by $(\hat{X}, \hat{\lambda})$. Recall that $(\del X, \lambda|_{\del X})$ is contact. Consider the symplectization $(\R \times \del X, e^r \lambda|_{\del X})$ of $(\del X, \lambda|_{\del X})$. Let $Z$ be the Liouville vector field of $(X, \lambda)$, which is given by $\lambda = \iota_Z \omega$. Denote the flow of $Z$ by \begin{IEEEeqnarray*}{rrCl} \Phi_Z \colon & \R_{\leq 0} \times \del X & \longrightarrow & X \\ & (t,x) & \longmapsto & \phi^t_Z(x). \end{IEEEeqnarray*} Since the vector field $Z$ is outward pointing at $\partial X$, the map $\Phi_Z$ is well-defined. Also, since $\Phi_Z$ is given by flowing along the vector field $Z$, it is an embedding. \begin{lemma} \label{lem:flow of liouville} The map $\Phi_Z$ is a Liouville embedding, i.e. $\Phi_Z^* \lambda = e^r \lambda|_{\del X}$. \end{lemma} \begin{proof} If $(t,x) \in \R_{\leq 0} \times \partial X$ and $(u,v) \in T_{(t,x)} (\R_{\leq 0} \times \partial X) = \R \oplus T_x \partial X$, then \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{(\Phi_Z^* \lambda)(u,v)} \\ \quad & = & \lambda(\dv \Phi_Z(t,x)(u,v)) & \quad [\text{by definition of pullback}] \\ & = & \lambda(\dv \Phi_Z(t,x)(0,v)) + \lambda(\dv \Phi_Z(t,x)(u,0)) & \quad [\text{by linearity of the derivative}] \\ & = & \lambda(\dv \phi^t_Z (x)(v)) + u \, \lambda(Z_{\phi^t_Z(x)}) & \quad [\text{by definition of $\Phi_Z$}]\\ & = & \lambda(\dv \phi^t_Z (x)(v)) + u \, \omega(Z_{\phi^t_Z(x)},Z_{\phi^t_Z(x)}) & \quad [\text{by definition of $Z$}] \\ & = & \lambda(\dv \phi^t_Z (x)(v)) & \quad [\text{since $\omega$ is antisymmetric}]\\ & = & ((\phi^t_Z)^* \lambda)(v) & \quad [\text{by definition of pullback}] \\ & = & e^t \lambda (v) & \quad [\text{by \cref{lem:mosers trick,lem:liouville vf}}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{definition} \label{def:completion of a Liouville domain} We define an exact symplectic manifold $(\hat{X},\hat{\lambda})$ called the \textbf{completion} of $(X,\lambda)$, as follows. As a smooth manifold, $\hat{X}$ is the gluing of $X$ and $\R \times \del X$ along the map $\Phi _{Z} \colon \R_{\leq 0} \times \del X \longrightarrow \Phi_Z(\R_{\leq 0} \times \del X)$. This gluing comes with embeddings \begin{IEEEeqnarray*}{rCls+x*} \iota_X \colon X & \longrightarrow & \hat{X}, \\ \iota_{\R \times \del X} \colon \R \times \del X & \longrightarrow & \hat{X}. \end{IEEEeqnarray*} The form $\hat{\lambda}$ is the unique $1$-form on $\hat{X}$ such that \begin{IEEEeqnarray*}{rCls+x*} \iota_X^* \hat{\lambda} & = & \lambda, \\ \iota _{\R \times \del X}^* \hat{\lambda} & = & e^r \lambda|_{\del X}. \end{IEEEeqnarray*} The symplectic form of $\hat{X}$ is given by $\hat{\omega} \coloneqq \edv \hat{\lambda}$, which satisfies \begin{IEEEeqnarray*}{rCls+x*} \iota_X^* \hat{\omega} & = & \omega, \\ \iota _{\R \times \del X}^* \hat{\omega} & = & \edv (e^r \lambda|_{\del X}). \end{IEEEeqnarray*} The Liouville vector field of $\hat{X}$ is the unique vector field $\hat{Z}$ such that $\iota_{\hat{Z}} \hat{\omega} = \hat{\lambda}$, which satisfies \begin{IEEEeqnarray*}{rRls+x*} Z & \text{ is $\iota_X$-related to } & \hat{Z}, \\ \partial_r & \text{ is $\iota_{\R \times \partial X}$-related to } & \hat{Z}. \end{IEEEeqnarray*} \end{definition} \begin{example} Let $(L,g)$ be a Riemannian manifold. Recall that $T^*L$ is an exact symplectic manifold, $S^*L$ is a hypersurface of contact type and that $D^*L$ is a Liouville domain. Also recall that there is a Liouville embedding $\varphi \colon \R \times S^* L \longrightarrow T^*L$ given by $\varphi(r,u) = e^r u$. Then, we can define a Liouville diffeomorphism $\hat{\varphi} \colon \widehat{D^*L} \longrightarrow T^*L$ as the unique map such that the following diagram commutes: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \widehat{D^* L} \ar[dr, hook, two heads, "\hat{\varphi}"] & \R \times S^* L \ar[l, hook'] \ar[d, hook, "\varphi"] \\ D^* L \ar[u, hook] \ar[r, hook] & T^* L \end{tikzcd} \end{IEEEeqnarray*} \end{example} \begin{lemma} \label{lem:properties of completion} The diagram \begin{IEEEeqnarray*}{c} \begin{tikzcd}[ampersand replacement = \&] \R_{\leq 0} \times \del X \ar[d, swap, hook, "\Phi_Z"] \ar[r, hookrightarrow] \& \R \times \del X \ar[d, hookrightarrow, "\iota _{\R \times \del X}"] \ar[r, hookrightarrow] \& \R \times \hat{X} \ar[d, two heads, "\Phi _{\hat{Z}}"] \\ X \ar[r, swap, hookrightarrow, "\iota_X"] \& \hat{X} \ar[r, equals] \& \hat{X} \end{tikzcd} \end{IEEEeqnarray*} commutes. \end{lemma} \begin{proof} The left square commutes by definition of $\hat{X}$. To prove that the right square commutes, let $(t,x) \in \R \times \del X$. We wish to show that $\Phi_{\hat{Z}}(t,x) = \iota_{\R \times \del X}(t,x)$. \begin{IEEEeqnarray*}{rCls+x*} \iota_{\R \times \partial X} (t, x) & = & \iota_{\R \times \partial X} \circ \phi^t_{\partial_r} (0, x) & \quad [\text{by definition of flow of $\partial_r$}] \\ & = & \phi^t_{\hat{Z}} \circ \iota_{\R \times \partial X}(0, x) & \quad [\text{since $\partial_r$ is $\iota_{\R \times \partial X}$-related to $\hat{Z}$}] \\ & = & \phi^t_{\hat{Z}} \circ \iota_X(x) & \quad [\text{by definition of completion}] \\ & = & \Phi_{\hat{Z}}(t,x) & \quad [\text{by definition of $\Phi_{\hat{Z}}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:codim 0 liouville emb preserves lvf} If $(X, \lambda_X)$ and $(Y, \lambda_Y)$ are Liouville domains and $\varphi \colon X \longrightarrow Y$ is a Liouville embedding of codimension $0$ then $Z_X$ is $\varphi$-related to $Z_Y$. \end{lemma} \begin{proof} For any $x \in X$ and $v \in T_x X$, \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\omega_Y (\dv \varphi(x) (Z_X|_x) - Z_Y|_{\varphi(x)}, \dv \varphi(x)(v))}\\ \quad & = & (\iota_{Z_X} \varphi^* \omega_Y - \varphi^* \iota_{Z_Y} \omega_Y) (v) & \quad [\text{by the definitions of $\iota_{Z_X}$, $\iota_{Z_Y}$, and $\varphi^*$}] \\ \quad & = & (\iota_{Z_X} \omega_X - \varphi^* \iota_{Z_Y} \omega_Y) (v) & \quad [\text{since $\varphi$ is a Liouville embedding}] \\ \quad & = & (\lambda_X - \varphi^* \lambda_X) (v) & \quad [\text{by definition of Liouville vector field}] \\ \quad & = & 0 & \quad [\text{since $\varphi$ is a Liouville embedding}]. \end{IEEEeqnarray*} Since $\omega_Y$ is nondegenerate and $\varphi$ is a $0$-codimensional embedding, the result follows. \end{proof} We will now explain how to view the construction of taking the completion of a Liouville domain as a functor. Let $(X,\lambda_X)$, $(Y,\lambda_Y)$ be Liouville domains and $\varphi \colon X \longrightarrow Y$ be a Liouville embedding such that $Z_X$ is $\varphi$-related to $Z_Y$ (by \cref{lem:codim 0 liouville emb preserves lvf}, this is true whenever $\varphi$ is $0$-codimensional, although here we assume only that the Liouville vector fields are related). We wish to define an embedding $\varphi \colon \hat{X} \longrightarrow \hat{Y}$, using the following diagram as a guide (we will show that this diagram commutes in \cref{lem:diagram for map on completions commutes}): \begin{IEEEeqnarray}{c} \plabel{eq:diagram for induced map on completions} \begin{tikzcd}[ampersand replacement = \&, row sep=scriptsize, column sep=0.2em] \& \R_{\leq 0} \times \del X \ar[dd, near end, swap, "\Phi_{Z_X}"] \ar[rr, "{\iota _{\R_{\leq 0}} \times \id_{\del X}}"] \& \& \R \times \del X \ar[dd, near start, swap, "{\iota _{\R \times \del X}}"] \ar[rr, "{\id \times \iota _{ \del X }}"] \& \& \R \times X \ar[ld, swap, "\id \times {\varphi}"] \ar[dd, near end] \ar[rr, "{\id \times \iota_X}"] \& \& \R \times \hat{X} \ar[ld,swap, "\id \times \hat{\varphi}"]\ar[dd, "\Phi _{\hat{Z}_X}"] \\ \R_{\leq 0} \times \del Y \ar[dd, swap, "\Phi_{Z_Y}"] \ar[rr, crossing over] \& \& \R \times \del Y \ar[rr, crossing over] \& \& \R \times Y \ar[rr, crossing over, near end, "\hphantom{-}\id \times \iota_Y"] \& \& \R \times \hat{Y} \& \\ \& X \ar[ld, "{\varphi}"] \ar[rr, near end, "\iota_X"] \& \& \hat{X} \ar[ld, "\hat{\varphi}"] \ar[rr, equals] \& \& \hat{X} \ar[ld, "\hat{\varphi}"]\ar[rr, equals] \& \& \hat{X} \ar[ld, "\hat{\varphi}"]\\ Y \ar[rr, swap, "\iota_Y"] \& \& \hat{Y} \ar[uu, crossing over, near start, leftarrow, "{\iota _{\R \times \del Y}}"]\ar[rr, equals] \& \& \hat{Y} \ar[uu, near start, crossing over, leftarrow]\ar[rr, equals] \& \& \hat{Y} \ar[uu, near start, crossing over, leftarrow, "\Phi _{\hat{Z}_Y}"]\& \end{tikzcd} \IEEEeqnarraynumspace \end{IEEEeqnarray} \begin{definition} \label{def:embedding on completions coming from Liouville embedding} We define an embedding $\hat{\varphi} \colon \hat{X} \longrightarrow \hat{Y}$ by \begin{IEEEeqnarray*}{rCls+x*} \hat{\varphi} \circ \iota_X & \coloneqq & \iota_Y \circ \varphi, \\ \hat{\varphi} \circ \iota_{\R \times \del X} & \coloneqq & \Phi_{\hat{Z}_Y} \circ (\id_ \R \times (\iota_Y \circ \varphi \circ \iota_{\partial X})). \end{IEEEeqnarray*} \end{definition} For $\hat{\varphi}$ to be well-defined, we need to check that the definitions of $\varphi$ on each region agree on the overlap. \begin{lemma} \label{def:map on completions is well defined} The map $\hat{\varphi}$ is well-defined, i.e. \begin{IEEEeqnarray*}{c} \iota_Y \circ \varphi \circ \Phi _{Z_X} = \Phi_{\hat{Z}_Y} \circ (\id_ \R \times (\iota_Y \circ \varphi \circ \iota_{\partial X})) \circ (\iota _{\R_{\leq 0}} \times \id _{\del X}). \end{IEEEeqnarray*} \end{lemma} \begin{proof} It suffices to assume that $(t,x) \in \R_{\leq 0} \times \del X$ and to prove that $\iota_Y \circ \varphi \circ \Phi _{Z_X}(t,x) = \Phi _{\hat{Z}_Y}(t,\iota_Y(\varphi(x)))$. \begin{IEEEeqnarray*}{rCls+x*} \iota_Y \circ \varphi \circ \Phi _{Z_X}(t,x) & = & \iota_Y \circ \varphi \circ \phi^t _{Z_X}(x) & \quad [\text{by definition of $\Phi _{Z_X}$}] \\ & = & \iota_Y \circ \phi^t _{Z_Y} \circ \varphi(x) & \quad [\text{since $Z_X$ is $\varphi$-related to $Z_Y$}] \\ & = & \phi^t _{\hat{Z}_Y} \circ \iota_Y \circ \varphi(x) & \quad [\text{since $Z_Y$ is $\iota_Y$-related to $\hat{Z}_Y$}] \\ & = & \Phi _{\hat{Z}_Y}(t,\iota_Y(\varphi(x))) & \quad [\text{by definition of $\Phi _{\hat{Z}_Y}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{def:map on completions is liouville embedding} The map $\hat{\varphi}$ is a Liouville embedding, i.e. $\hat{\varphi}^* \hat{\lambda}_Y = \hat{\lambda}_X$. \end{lemma} \begin{proof} We need to show that $\hat{\varphi}^* \hat{\lambda}_Y = \hat{\lambda}_X$, which is equivalent to \begin{IEEEeqnarray}{rCls+x*} \iota_X^* \hat{\varphi}^* \hat{\lambda}_Y & = & \iota_X^* \hat{\lambda}_X, \plabel{eq:map on completion is liouville embedding 1} \\ \iota_{\R \times \del X}^* \hat{\varphi}^* \hat{\lambda}_Y & = & \iota_{\R \times \del X}^* \hat{\lambda}_X. \plabel{eq:map on completion is liouville embedding 2} \end{IEEEeqnarray} We prove Equation \eqref{eq:map on completion is liouville embedding 1}. \begin{IEEEeqnarray*}{rCls+x*} \iota_X^* \hat{\varphi}^* \hat{\lambda}_Y & = & (\hat{\varphi} \circ \iota_X)^* \hat{\lambda}_Y & \quad [\text{by functoriality of pullbacks}] \\ & = & (\iota_Y \circ \varphi)^* \hat{\lambda}_Y & \quad [\text{by definition of $\hat{\varphi}$}] \\ & = & \varphi^* \iota_Y^* \hat{\lambda}_Y & \quad [\text{by functoriality of pullbacks}] \\ & = & \varphi^* \lambda_Y & \quad [\text{by definition of $\hat{\lambda}_Y$}] \\ & = & \lambda_X & \quad [\text{since $\varphi$ is a Liouville embedding}] \\ & = & \iota_X^* \hat{\lambda}_X & \quad [\text{by definition of $\hat{\lambda}_X$}]. \end{IEEEeqnarray*} We prove Equation \eqref{eq:map on completion is liouville embedding 2}. \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\iota _{\R \times \del X}^* \hat{\varphi}^* \hat{\lambda}_Y}\\ \quad & = & (\hat{\varphi} \circ \iota _{\R \times \del X})^* \hat{\lambda}_Y & \quad [\text{by functoriality of pullbacks}] \\ & = & ( \Phi _{\hat{Z}_Y} \circ (\id_ \R \times (\iota_Y \circ \varphi \circ \iota _{\del X})) )^* \hat{\lambda}_Y & \quad [\text{by definition of $\hat{\varphi}$}] \\ & = & (\id_ \R \times (\iota_Y \circ \varphi \circ \iota _{\del X}))^* \Phi _{\hat{Z}_Y}^* \hat{\lambda}_Y & \quad [\text{by functoriality of pullbacks}] \\ & = & (\id_ \R \times (\iota_Y \circ \varphi \circ \iota _{\del X}))^* e^r \hat{\lambda}_Y & \quad [\text{by \cref{lem:mosers trick,lem:liouville vf}}] \\ & = & e^r \iota _{\del X}^* \varphi^* \iota_Y^* \hat{\lambda}_Y & \quad [\text{by functoriality of pullbacks}] \\ & = & e^r \iota _{\del X}^* \varphi^* \lambda_Y & \quad [\text{by definition of $\hat{\lambda}_Y$}] \\ & = & e^r \iota _{\del X}^* \lambda_X & \quad [\text{since $\varphi$ is a Liouville embedding}] \\ & = & \iota^* _{\R \times \del X} \hat{\lambda}_X & \quad [\text{by definition of $\hat{\lambda}_X$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:liouville vector fields on completion are related} The Liouville vector fields $\hat{Z}_X$ and $\hat{Z}_Y$ are $\hat{\varphi}$-related. \end{lemma} \begin{proof} We need to show that \begin{IEEEeqnarray}{Rls+x*} Z_X \text{ is $(\iota_Y \circ \varphi)$-related to } & \hat{Z}_Y, \plabel{eq:liouville vector fields on completion are related 1} \\ \partial_r \text{ is $(\Phi_{\hat{Z}_Y} \circ (\id_ \R \times (\iota_Y \circ \varphi \circ \iota_{\partial X})))$-related to } & \hat{Z}_Y. \plabel{eq:liouville vector fields on completion are related 2} \end{IEEEeqnarray} Here, \eqref{eq:liouville vector fields on completion are related 1}, follows because $Z_X$ is $\varphi$-related to $Z_Y$. To prove \eqref{eq:liouville vector fields on completion are related 2}, notice that for every $(t,x) \in \R \times \partial X$, we have $\partial_r = (1,0) \in \R \oplus T_x \partial X$ and therefore \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\dv ( \Phi_{\hat{Z}_Y} \circ (\id_ \R \times (\iota_Y \circ \varphi \circ \iota_{\partial X})) )(t,x) (1,0)}\\ \quad & = & \dv \Phi_{\hat{Z}_Y} (t, \varphi(x)) (1, 0) & \quad [\text{by the chain rule}] \\ & = & \hat{Z}_Y(t, \varphi(x)) & \quad [\text{by definition of $\Phi_{\hat{Z}_Y}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:diagram for map on completions commutes} Diagram \eqref{eq:diagram for induced map on completions} commutes. \end{lemma} \begin{proof} We have already proven in \cref{lem:properties of completion} that the squares on the front and back commute. The first square on the bottom commutes by definition of $\hat{\varphi}$. The other two squares on the bottom commute trivially. The top square commutes because $\hat{\varphi} \circ \iota_X = \iota_Y \circ \varphi$ by definition of $\hat{\varphi}$. We prove that the right square commutes. For $(t,x) \in \R \times \hat{X}$, \begin{IEEEeqnarray*}{rCls+x*} \hat{\varphi} \circ \Phi _{\hat{Z}_X}(t,x) & = & \hat{\varphi} \circ \phi^t _{\hat{Z}_X}(x) & \quad [\text{by definition of $\Phi _{\hat{Z}_X}$}] \\ & = & \phi^t _{\hat{Z}_Y} \circ \hat{\varphi} (x) & \quad [\text{by \cref{lem:liouville vector fields on completion are related}}] \\ & = & \Phi _{\hat{Z}_Y} (t, \hat{\varphi}(x)) & \quad [\text{by definition of $\Phi _{\hat{Z}_Y}$}] \\ & = & \Phi _{\hat{Z}_Y} \circ (\id_ \R \times \hat{\varphi})(x) & \quad [\text{by definition of $\id_ \R \times \hat{\varphi}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} Finally, we check that the induced maps on the completions behave nicely with respect to compositions. \begin{proposition} \phantomsection\label{prop:completion is a functor} The operation of taking the completion is a functor. \end{proposition} \begin{proof} We show that identities are preserved. Let $(X,\lambda)$ be a Liouville domain. We wish to prove that $\widehat{\id_X} = \id _{\hat{X}} \colon \hat{X} \longrightarrow \hat{X}$, which is equivalent to \begin{IEEEeqnarray}{rCls+x*} \widehat{\id_X} \circ \iota_X & = & \id_{\hat{X}} \circ \iota_X, \plabel{eq:completion functor identity 1} \\ \widehat{\id_X} \circ \iota_{\R \times \del X} & = & \id_{\hat{X}} \circ \iota_{\R \times \del X}. \plabel{eq:completion functor identity 2} \end{IEEEeqnarray} We prove Equation \eqref{eq:completion functor identity 1}. \begin{IEEEeqnarray*}{rCls+x*} \widehat{\id_X} \circ \iota_X & = & \iota_X \circ \id_X & \quad [\text{by definition of $\widehat{\id_X}$}] \\ & = & \iota_X & \quad [\text{since $\id_X$ is the identity map}] \\ & = & \id _{\hat{X}} \circ \iota_X & \quad [\text{since $\id_{\hat{X}}$ is the identity map}]. \end{IEEEeqnarray*} We prove Equation \eqref{eq:completion functor identity 2}. \begin{IEEEeqnarray*}{rCls+x*} \widehat{\id_X} \circ \iota _{\R \times \del X} & = & \Phi_{\hat{Z}} \circ (\id_\R \times (\iota_X \circ \id_X \circ \iota_{\partial X})) & \quad [\text{by definition of $\widehat{\id_X}$}] \\ & = & \id_{\hat{X}} \circ \iota_{\R \times \del X} & \quad [\text{by \cref{lem:properties of completion}}]. \end{IEEEeqnarray*} Now, we prove that compositions are preserved. Let $(X,\lambda_X)$, $(Y,\lambda_Y)$ and $(W,\lambda_W)$ be Liouville domains and $f \colon X \longrightarrow Y$ and $g \colon Y \longrightarrow W$ be Liouville embeddings. We wish to prove that $\widehat{g \circ f} = \hat{g} \circ \hat{f}$, which is equivalent to \begin{IEEEeqnarray}{rCls+x*} \widehat{g \circ f} \circ \iota_X & = & \hat{g} \circ \hat{f} \circ \iota_X, \plabel{eq:completion functor composition 1} \\ \widehat{g \circ f} \circ \iota_{\R \times \del X} & = & \hat{g} \circ \hat{f} \circ \iota_{\R \times \del X}. \plabel{eq:completion functor composition 2} \end{IEEEeqnarray} We prove Equation \eqref{eq:completion functor composition 1}. \begin{IEEEeqnarray*}{rCls+x*} \widehat{g \circ f} \circ \iota_X & = & \iota_W \circ g \circ f & \quad [\text{by definition of $\widehat{g \circ f}$}] \\ & = & \hat{g} \circ \iota_Y \circ f & \quad [\text{by definition of $\hat{g}$}]\\ & = & \hat{g} \circ \hat{f} \circ \iota_X & \quad [\text{by definition of $\hat{f}$}]. \end{IEEEeqnarray*} We prove Equation \eqref{eq:completion functor composition 2}. \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\widehat{g \circ f} \circ \iota _{\R \times \del X}} \\ \quad & = & \Phi_{\hat{Z}_W} \circ (\id_{\R} \times (\iota_W \circ g \circ f \circ \iota_{\partial X})) & \quad [\text{by definition of $\widehat{g \circ f}$}] \\ & = & \Phi_{\hat{Z}_W} \circ (\id_{\R} \times (\hat{g} \circ \iota_Y \circ f \circ \iota_{\partial X})) & \quad [\text{by definition of $\hat{g}$}]\\ & = & \Phi_{\hat{Z}_W} \circ (\id_{\R} \times \hat{g}) \circ (\id_{\R} \times (\iota_Y \circ f \circ \iota_{\partial X})) & \\ & = & \hat{g} \circ \Phi_{\hat{Z}_Y} \circ (\id_{\R} \times (\iota_Y \circ f \circ \iota_{\partial X})) & \quad [\text{by diagram \eqref{eq:diagram for induced map on completions}}] \\ & = & \hat{g} \circ \hat{f} \circ \iota _{\R \times \del X} & \quad [\text{by definition of $\hat{f}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \chapter{Introduction} \section{Symplectic capacities and their uses} A \textbf{symplectic manifold} is a pair $(X, \omega)$, where $X$ is a manifold and $\omega \in \Omega^2(X)$ is a closed and nondegenerate $2$-form on $X$. An example of a symplectic manifold is $\C^{n}$ with the canonical symplectic structure \begin{IEEEeqnarray*}{c} \omega_0 \coloneqq \sum_{j=1}^{n} \edv x^j \wedge \edv y^j. \end{IEEEeqnarray*} An embedding $\phi \colon (X, \omega_X) \longrightarrow (Y, \omega_Y)$ between symplectic manifolds is \textbf{symplectic} if $\phi^* \omega_Y = \omega_X$. A \textbf{symplectomorphism} is a symplectic diffeomorphism. \textbf{Darboux' theorem} implies that any symplectic manifold $(X, \omega)$ is locally symplectomorphic to $(\C^n, \omega_0)$. We point out that the analogue of this theorem in Riemannian geometry is clearly false: such a theorem would imply that every Riemannian manifold is flat. Conversely, Darboux' theorem also implies that it is not possible to define local invariants of symplectic manifolds that are analogues of the curvature of a Riemannian manifold. There are, however, examples of global invariants of symplectic manifolds, for example symplectic capacities. A \textbf{symplectic capacity} is a function $c$ that assigns to every symplectic manifold $(X,\omega)$ (in a restricted subclass of all symplectic manifolds) a number $c(X,\omega) \in [0,+\infty]$, satisfying \begin{description} \item[(Monotonicity)] If there exists a symplectic embedding (possibly in a restricted subset of all symplectic embeddings) $(X, \omega_X) \longrightarrow (Y, \omega_Y)$, then $c(X, \omega_X) \leq c(Y, \omega_Y)$; \item[(Conformality)] If $\alpha > 0$ then $c(X, \alpha \omega_X) = \alpha \, c(X, \omega_X)$. \end{description} By the monotonicity property, symplectic capacities are symplectomorphism invariants of symplectic manifolds. There are many examples of symplectic capacities, a simple one being the \textbf{volume capacity} $c_{\mathrm{vol}}$, defined as follows for a $2n$-dimensional symplectic manifold $(X, \omega)$. Since $\omega$ is nondegenerate, $\omega^n / n!$ is a volume form on $X$. Define \begin{IEEEeqnarray*}{rCl} \operatorname{vol}(X) & \coloneqq & \int_{X} \frac{\omega^n}{n!}, \\ c_{\mathrm{vol}}(X) & \coloneqq & \p{}{2}{\frac{\operatorname{vol}(X)}{\operatorname{vol}(B)}}^{1/n}, \end{IEEEeqnarray*} where $B = \{z \in \C^n \mid \pi |z|^2 \leq 1 \}$. Symplectic capacities are especially relevant when discussing symplectic embedding problems. Notice that by the monotonicity property, a symplectic capacity can provide an obstruction to the existence of a symplectic embedding. We provide an example from physics. A \textbf{classical mechanical system} is a symplectic manifold $(X, \omega)$ together with a function $H$ called the \textbf{Hamiltonian}. The \textbf{Hamiltonian vector field} of $H$ is the unique vector field $X_H$ on $X$ such that \begin{IEEEeqnarray*}{c} \edv H = - \iota_{X_H} \omega. \end{IEEEeqnarray*} Denote by $\phi^t_H$ the flow of $X_H$, which is a symplectomorphism. \textbf{Liouville's theorem} for a classical mechanical system says that for any subset $O \subset X$, the symplectic volume $c_{\mathrm{vol}}(\phi^t_H(O))$ is independent of $t$. The proof of this statement works for any capacity $c$ replacing the volume capacity. So, for every symplectic capacity we get a theorem analogous to Liouville's theorem, giving restrictions on what regions of the phase space flow onto other regions. In more generality, one could say that \textbf{a symplectic capacity is a quantitative encoding of some specific property of symplectic manifolds}. To make this statement less vague, let us mention some symplectic capacities we will be working with in this thesis. \begin{enumerate} \item If $(X, \omega)$ is a $2n$-dimensional symplectic manifold, a submanifold $L \subset (X, \omega)$ is \textbf{Lagrangian} if $\dim L = n$ and $\omega|_L = 0$. The \textbf{minimal symplectic area} of $L$ is given by \begin{IEEEeqnarray*}{c} A_{\mathrm{min}}(L) \coloneqq \inf \{ \omega(\sigma) \mid \sigma \in \pi_2(X,L), \, \omega(\sigma) > 0 \}. \end{IEEEeqnarray*} Cieliebak--Mohnke \cite[Section 1.2]{cieliebakPuncturedHolomorphicCurves2018} define the \textbf{Lagrangian capacity} of $(X, \omega)$ by \begin{IEEEeqnarray*}{c} c_L(X,\omega) \coloneqq \sup \{ A_{\mathrm{min}}(L) \mid L \subset X \text{ is an embedded Lagrangian torus}\}. \end{IEEEeqnarray*} \item If $(X, \lambda)$ is a nondegenerate \textbf{Liouville domain} (this implies that $X$ is a compact manifold with boundary together with a $1$-form $\lambda$ such that $(X, \edv \lambda)$ is symplectic, see \cref{def:liouville domain}), one can define its \textbf{$S^1$-equivariant symplectic homology}, denoted $\homology{}{S^1}{}{S}{H}{}{}(X,\lambda)$ (see \cref{sec:Floer homology}). This is a $\Q$-module which comes equipped with a filtration, i.e. for every $a \in \R$ we have a $\Q$-module $\homology{}{S^1}{}{S}{H}{a}{}(X,\lambda)$ and a map \begin{equation*} \iota^a \colon \homology{}{S^1}{}{S}{H}{a}{}(X,\lambda) \longrightarrow \homology{}{S^1}{}{S}{H}{}{}(X,\lambda). \end{equation*} In particular, we can define the $S^1$-equivariant symplectic homology associated to intervals $(a,b] \subset \R$ and $(a, +\infty) \subset \R$ by taking the quotient: \begin{IEEEeqnarray*}{rCl} \homology{}{S^1}{}{S}{H}{(a,b]}{}(X,\lambda) & \coloneqq & \homology{}{S^1}{}{S}{H}{b}{}(X,\lambda) / \iota^{b,a}(\homology{}{S^1}{}{S}{H}{a}{}(X,\lambda)), \\ \homology{}{S^1}{}{S}{H}{(a,+\infty)}{}(X,\lambda) & \coloneqq & \homology{}{S^1}{}{S}{H}{}{} (X,\lambda) / \iota^{a}(\homology{}{S^1}{}{S}{H}{a}{}(X,\lambda)). \end{IEEEeqnarray*} The \textbf{positive $S^1$-equivariant symplectic homology} is given by $\homology{}{S^1}{}{S}{H}{+}{}(X,\lambda) = \homology{}{S^1}{}{S}{H}{(\varepsilon, + \infty)}{}(X,\lambda)$, where $\varepsilon > $ is a small number. The $S^1$-equivariant symplectic homology also comes with maps $U$ and $\delta$, which can be composed to obtain the map \begin{equation*} \delta \circ U^{k-1} \circ \iota^a \colon \homology{}{S^1}{}{S}{H}{(\varepsilon,a]}{}(X) \longrightarrow H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q). \end{equation*} The $k$th \textbf{Gutt--Hutchings} capacity of $(X,\lambda)$ (\cite[Definition 4.1]{guttSymplecticCapacitiesPositive2018}) is given by \begin{IEEEeqnarray*}{c} \cgh{k}(X) \coloneqq \inf \{ a > 0 \mid [\mathrm{pt}] \otimes [X] \in \img (\delta \circ U^{k-1} \circ \iota^a) \}. \end{IEEEeqnarray*} \item Let $(X,\lambda)$ be a nondegenerate Liouville domain. There is a map \begin{equation*} \iota^{a,\varepsilon} \circ \alpha^{-1} \colon H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q) \longrightarrow \homology{}{S^1}{}{S}{H}{a}{}(X). \end{equation*} The $k$th $\textbf{$S^1$-equivariant symplectic homology capacity}$ was defined by Irie in \cite[Section 2.5]{irieSymplecticHomologyFiberwise2021}, and it is given by \begin{IEEEeqnarray*}{c} \csh{k}(X) \coloneqq \inf \{ a > 0 \mid \iota^{a,\varepsilon} \circ \alpha^{-1}([\C P^{k-1}] \otimes [X]) = 0 \}. \end{IEEEeqnarray*} \item Let $(X, \lambda)$ be a nondegenerate Liouville domain. Choose a point $x \in \itr X$ and a \textbf{symplectic divisor} (germ of a symplectic submanifold of codimension 2) $D \subset X$ through $x$. The boundary $(\partial X, \lambda|_{\partial X})$ is a \textbf{contact manifold} (\cref{def:contact manifold}) and therefore has a \textbf{Reeb vector field} (\cref{def:Reeb vector field}). The \textbf{completion} of $(X, \lambda)$ (\cref{def:completion of a Liouville domain}) is the exact symplectic manifold \begin{equation*} (\hat{X}, \hat{\lambda}) \coloneqq (X, \lambda) \cup_{\partial X} (\R_{\geq 0} \times \partial X, e^r \lambda|_{\partial X}). \end{equation*} Let $\mathcal{M}_X^J(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x}$ denote the moduli space of $J$-holomorphic curves in $\hat{X}$ which are positively asymptotic to the tuple of Reeb orbits $\Gamma = (\gamma_1, \ldots, \gamma_p)$ and which have contact order $k$ to $D$ at $x$. Finally, for $\ell, k \in \Z_{\geq 1}$, the \textbf{McDuff--Siegel} capacities of $(X,\lambda)$ (\cite[Definition 3.3.1]{mcduffSymplecticCapacitiesUnperturbed2022}) are given by \begin{IEEEeqnarray*}{c} \tilde{\mathfrak{g}}^{\leq \ell}_k(X) \coloneqq \sup_{J \in \mathcal{J}(X,D)} \mathop{\inf\vphantom{\mathrm{sup}}}_{\Gamma_1, \dots, \Gamma_p} \sum_{i=1}^{p} \mathcal{A}(\Gamma_i), \end{IEEEeqnarray*} where $\mathcal{J}(X,D)$ is a set of almost complex structures on $\hat{X}$ which are cylindrical at infinity and compatible with $D$ (see \cref{sec:moduli spaces of holomorphic curves}) and the infimum is over tuples of Reeb orbits $\Gamma_1, \ldots, \Gamma_p$ such that there exist $k_1, \ldots, k_p \in \Z_{\geq 1}$ with \begin{IEEEeqnarray*}{c+x*} \sum_{i=1}^{p} \# \Gamma_i \leq \ell, \qquad \sum_{i=1}^{p} k_i \geq k, \qquad \bigproduct_{i=1}^{p} \mathcal{M}_X^J(\Gamma_i)\p{<}{}{\mathcal{T}^{(k_i)}x} \neq \varnothing. \end{IEEEeqnarray*} \item Let $(X, \lambda)$ be a nondegenerate Liouville domain. If one assumes the existence of a suitable virtual perturbation scheme, one can define the \textbf{linearized contact homology} $\mathcal{L}_{\infty}$-algebra of $(X,\lambda)$, denoted $CC(X)[-1]$ (see \cref{def:l infinity algebra,def:linearized contact homology,def:lch l infinity}). We can then consider its \textbf{bar complex} $\mathcal{B}(CC(X)[-1])$ (see \cref{def:bar complex}) and the homology of the bar complex, $H(\mathcal{B}(CC(X)[-1]))$. There is an \textbf{augmentation map} (see \cref{def:augmentation map}) \begin{IEEEeqnarray*}{c+x*} {\epsilon}_k \colon \mathcal{B}(CC(X)[-1]) \longrightarrow \Q \end{IEEEeqnarray*} which counts $J$-holomorphic curves satisfying a tangency constraint. For $\ell, k \in \Z_{\geq 1}$, Siegel \cite[Section 6.1]{siegelHigherSymplecticCapacities2020} defines the \textbf{higher symplectic capacities} by\footnote{To be precise, the definition we give may be slightly different from the one given in \cite{siegelHigherSymplecticCapacities2020}. This is due to the fact that we use an action filtration to define $\mathfrak{g}^{\leq \ell}_k(X)$, while the definition given in \cite{siegelHigherSymplecticCapacities2020} uses coefficients in a Novikov ring. See \cref{rmk:novikov coefficients} for further discussion.} \begin{IEEEeqnarray*}{c} \mathfrak{g}^{\leq \ell}_k(X) \coloneqq \inf \{ a > 0 \mid \epsilon_k \colon H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell}(CC(X)[-1])) \longrightarrow \Q \text{ is nonzero} \}, \end{IEEEeqnarray*} where $\mathcal{A}^{\leq a}$ denotes the action filtration (\cref{def:action filtration lch}) and $\mathcal{B}^{\leq \ell}$ denotes the word length filtration (\cref{def:word length filtration}). \end{enumerate} The previous examples illustrate the fact that capacities can be defined using many tools that exist in symplectic geometry. If a capacity encodes a quantitative property between symplectic manifolds, then an inequality between two capacities encodes a relationship between said properties. So, capacities are also an efficient language to describe quantitative relations between properties of symplectic manifolds. Consider also that one can chain inequalities together to obtain new inequalities. In fact, one of the main goals of this thesis is to compute the Lagrangian capacity of convex or concave toric domains (a toric domain is a special type of Liouville domain, see \cref{def:toric domain}). We give two results in this direction (\cref{lem:computation of cl,thm:my main theorem}), and the proof of both results consists in composing together several inequalities between capacities (namely the capacities $\cgh{k}$, $\tilde{\mathfrak{g}}^{\leq 1}_k$ and $\mathfrak{g}^{\leq 1}_k$ which were defined above), where each of those inequalities is proven separately. Notice that in this case, we are able to compute the Lagrangian capacity of (some) toric domains, whose definition only concerns Lagrangian submanifolds, by considering other capacities whose definition concerns holomorphic curves in the toric domain. \section{Historical remarks} The first symplectic capacity, the \textbf{Gromov width}, was constructed by Gromov \cite{gromovPseudoHolomorphicCurves1985}, although at this time the nomenclature of ``symplectic capacity'' had not been introduced. The notion of symplectic capacity was first introduced by Ekeland--Hofer in \cite{ekelandSymplecticTopologyHamiltonian1989}. In the sequel \cite{ekelandSymplecticTopologyHamiltonian1990}, the authors define the \textbf{Ekeland--Hofer capacities} $c_k^{\mathrm{EH}}$ (for every $k \in \Z_{\geq 1}$) using variational techniques for the symplectic action functional. The \textbf{Hofer--Zehnder capacity} \cite{hoferNewCapacitySymplectic1990,hoferSymplecticInvariantsHamiltonian2011} is another example of a capacity which can be defined by considering Hamiltonian systems. One can consider \textbf{spectral capacities}, which are generally defined as a minimal or maximal action of an orbit (Hamiltonian or Reeb) which is ``topologically visible''. The Gutt--Hutchings capacities \cite{guttSymplecticCapacitiesPositive2018}, $S^1$-equivariant symplectic homology capacities \cite{irieSymplecticHomologyFiberwise2021}, and Siegel's higher symplectic capacities \cite{siegelHigherSymplecticCapacities2020} mentioned above are examples of this principle. Other authors have used constructions like this, namely Hofer \cite{hoferEstimatesEnergySymplectic1993}, Viterbo \cite{viterboSymplecticTopologyGeometry1992,viterboFunctorsComputationsFloer1999}, Schwarz \cite{schwarzActionSpectrumClosed2000}, Oh \cite{ohChainLevelFloer2002,ohMinimaxTheorySpectral2002,ohSpectralInvariantsLength2005}, Frauenfelder--Schlenk \cite{frauenfelderHamiltonianDynamicsConvex2007}, Schlenk \cite{schlenkEmbeddingProblemsSymplectic2008} and Ginzburg--Shon \cite{ginzburgFilteredSymplecticHomology2018}. Using embedded contact homology (ECH), Hutchings \cite{hutchingsQuantitativeEmbeddedContact2011} defines the \textbf{ECH capacities} $c_k^{\mathrm{ECH}}$ (for every $k \in \Z_{\geq 1}$). \section{Main results} As explained before, one of the main goals of this thesis is to compute the Lagrangian capacity of (some) toric domains. A \textbf{toric domain} is a Liouville domain of the form $X_{\Omega} \coloneqq \mu^{-1}(\Omega) \subset \C^n$, where $\Omega \subset \R^n_{\geq 0}$ and $\mu(z_1,\ldots,z_n) = \pi(|z_1|^2,\ldots,|z_n|^2)$. The \textbf{ball}, the \textbf{cylinder} and the \textbf{ellipsoid}, which are defined by \begin{IEEEeqnarray*}{rCrClCl} B^{2n}(a) & \coloneqq & \{ z & = & (z_1,\ldots,z_n) \in \C^n & \mid & \pi |z|^2 \leq a \}, \\ Z^{2n}(a) & \coloneqq & \{ z & = & (z_1,\ldots,z_n) \in \C^n & \mid & \pi |z_1|^2 \leq a \}, \\ E^{2n}(a_1,\ldots,a_n) & \coloneqq & \Big\{ z & = & (z_1,\ldots,z_n) \in \C^n & \Big| & \sum_{j=1}^{n} \frac{\pi |z_j|^2}{a_j} \leq 1 \Big\}, \end{IEEEeqnarray*} are examples of toric domains.\footnote{Strictly speaking, the cylinder is noncompact, so it is not a toric domain. We will mostly ignore this small discrepancy in nomenclature, but sometimes we will refer to spaces like the cylinder as ``noncompact toric domains''.} The \textbf{diagonal} of a toric domain $X_{\Omega}$ is \begin{IEEEeqnarray*}{c} \delta_\Omega \coloneqq \max \{ a \mid (a,\ldots,a) \in \Omega \}. \end{IEEEeqnarray*} It is easy to show (see \cref{lem:c square leq c lag,lem:c square geq delta}) that $c_L(X_\Omega) \geq \delta_\Omega$ for any convex or concave toric domain $X_{\Omega}$. Cieliebak--Mohnke give the following results for the Lagrangian capacity of the ball and the cylinder. \begin{copiedtheorem}[{\cite[Corollary 1.3]{cieliebakPuncturedHolomorphicCurves2018}}]{prp:cl of ball} The Lagrangian capacity of the ball is \begin{IEEEeqnarray*}{c+x*} c_L(B^{2n}(1)) = \frac{1}{n}.\footnote{In this introduction, we will be showcasing many results from the main text. The theorems appear here as they do on the main text, in particular with the same numbering. The numbers of the theorems in the introduction have hyperlinks to their corresponding location in the main text.} \end{IEEEeqnarray*} \end{copiedtheorem} \begin{copiedtheorem}[{\cite[p.~215-216]{cieliebakPuncturedHolomorphicCurves2018}}]{prp:cl of cylinder} The Lagrangian capacity of the cylinder is \begin{IEEEeqnarray*}{c+x*} c_L(Z^{2n}(1)) = 1. \end{IEEEeqnarray*} \end{copiedtheorem} In other words, if $X_{\Omega}$ is the ball or the cylinder then $c_L(X_{\Omega}) = \delta_\Omega$. This motivates the following conjecture by Cieliebak--Mohnke. \begin{copiedtheorem}[{\cite[Conjecture 1.5]{cieliebakPuncturedHolomorphicCurves2018}}]{conj:cl of ellipsoid} The Lagrangian capacity of the ellipsoid is \begin{equation*} c_L(E(a_1,\ldots,a_n)) = \p{}{2}{\frac{1}{a_1} + \cdots + \frac{1}{a_n}}^{-1}. \end{equation*} \end{copiedtheorem} A more general form of the previous conjecture is the following. \begin{copiedtheorem}{conj:the conjecture} If $X_{\Omega}$ is a convex or concave toric domain then \begin{IEEEeqnarray*}{c+x*} c_L(X_{\Omega}) = \delta_\Omega. \end{IEEEeqnarray*} \end{copiedtheorem} The goal of this project is to prove \cref{conj:the conjecture}. We will offer two main results in this direction. \begin{enumerate} \item In \cref{lem:computation of cl}, we prove that $c_L(X_\Omega) = \delta_\Omega$ whenever $X_{\Omega}$ is convex and $4$-dimensional. \item In \cref{thm:my main theorem}, using techniques from contact homology we prove that $c_L(X_\Omega) = \delta_\Omega$ for any convex or concave toric domain $X_{\Omega}$. More specifically, in this case we are working under the assumption that there is a virtual perturbation scheme such that the linearized contact homology of a nondegenerate Liouville domain can be defined (see \cref{sec:assumptions of virtual perturbation scheme}). \end{enumerate} Notice that by the previous discussion, we only need to prove the hard inequality $c_L(X_{\Omega}) \leq \delta_\Omega$. We now describe our results concerning the capacities mentioned so far. The key step in proving $c_L(X_{\Omega}) \leq \delta_\Omega$ is the following inequality between $c_L$ and $\tilde{\mathfrak{g}}^{\leq 1}_k$. \begin{copiedtheorem}{thm:lagrangian vs g tilde} If $(X, \lambda)$ is a Liouville domain then \begin{IEEEeqnarray*}{c+x*} c_L(X) \leq \inf_k^{} \frac{\tilde{\mathfrak{g}}_k^{\leq 1}(X)}{k}. \end{IEEEeqnarray*} \end{copiedtheorem} Indeed, this result can be combined with the following results from \cite{mcduffSymplecticCapacitiesUnperturbed2022} and \cite{guttSymplecticCapacitiesPositive2018}. \begin{copiedtheorem}[{\cite[Proposition 5.6.1]{mcduffSymplecticCapacitiesUnperturbed2022}}]{prp:g tilde and cgh} If $X_{\Omega}$ is a $4$-dimensional convex toric domain then \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq 1}_k(X_\Omega) = \cgh{k}(X_\Omega). \end{IEEEeqnarray*} \end{copiedtheorem} \begin{copiedtheorem}[{\cite[Lemma 1.19]{guttSymplecticCapacitiesPositive2018}}]{lem:cgh of nondisjoint union of cylinders} $\cgh{k}(N^{2n}(\delta)) = \delta \, (k + n - 1)$. \end{copiedtheorem} Here, \begin{IEEEeqnarray*}{c} N^{2n}(\delta) \coloneqq \p{c}{2}{ (z_1,\ldots,z_n) \in \C^n \ \Big| \ \exists j=1,\ldots,n \colon \frac{\pi |z_j|^2}{\delta} \leq 1 } \end{IEEEeqnarray*} is the \textbf{nondisjoint union of cylinders}. Combining the three previous results, we get the following particular case of \cref{conj:the conjecture}. Since the proof is short, we present it here as well. \begin{copiedtheorem}{lem:computation of cl} If $X_{\Omega}$ is a $4$-dimensional convex toric domain then \begin{IEEEeqnarray*}{c+x*} c_L(X_{\Omega}) = \delta_\Omega. \end{IEEEeqnarray*} \end{copiedtheorem} \begin{proof} For every $k \in \Z_{\geq 1}$, \begin{IEEEeqnarray*}{rCls+x*} \delta_\Omega & \leq & c_L(X_{\Omega}) & \quad [\text{by \cref{lem:c square geq delta,lem:c square leq c lag}}] \\ & \leq & \frac{\tilde{\mathfrak{g}}^{\leq 1}_{k}(X_{\Omega})}{k} & \quad [\text{by \cref{thm:lagrangian vs g tilde}}] \\ & = & \frac{\cgh{k}(X_{\Omega})}{k} & \quad [\text{by \cref{prp:g tilde and cgh}}] \\ & \leq & \frac{\cgh{k}(N(\delta_\Omega))}{k} & \quad [\text{$X_{\Omega}$ is convex, hence $X_{\Omega} \subset N(\delta_\Omega)$}] \\ & = & \frac{\delta_\Omega(k+1)}{k} & \quad [\text{by \cref{lem:cgh of nondisjoint union of cylinders}}]. \end{IEEEeqnarray*} The result follows by taking the infimum over $k$. \end{proof} Notice that in the proof of this result, we used the Gutt--Hutchings capacities because the value $\cgh{k}(N^{2n}(\delta))$ is known and provides the desired upper bound for $c_L(X_{\Omega})$. Notice also that the hypothesis of the toric domain being convex and $4$-dimensional is present because we wish to use \cref{prp:g tilde and cgh} to compare $\tilde{\mathfrak{g}}^{\leq 1}_k$ and $\cgh{k}$. This suggests that we try to compare $c_L$ and $\cgh{k}$ directly. \begin{copiedtheorem}{thm:main theorem} If $X$ is a Liouville domain, $\pi_1(X) = 0$ and $c_1(TX)|_{\pi_2(X)} = 0$, then \begin{equation*} c_L(X,\lambda) \leq \inf_k \frac{\cgh{k}(X,\lambda)}{k}. \end{equation*} \end{copiedtheorem} We will try to prove \cref{thm:main theorem} by mimicking the proof of \cref{thm:lagrangian vs g tilde}. Unfortunately we will be unsuccessful, because we run into difficulties coming from the fact that in $S^1$-equivariant symplectic homology, the Hamiltonians and almost complex structures can depend on the domain and on a high dimensional sphere $S^{2N+1}$. Before we move on to the discussion about computations using contact homology, we show one final result which uses only the properties of $S^1$-equivariant symplectic homology. \begin{copiedtheorem}{thm:ghc and s1eshc} If $(X, \lambda)$ is a Liouville domain, then \begin{enumerate} \item $\cgh{k}(X) \leq \csh{k}(X)$; \item $\cgh{k}(X) = \csh{k}(X)$ provided that $X$ is star-shaped. \end{enumerate} \end{copiedtheorem} We now present another approach that can be used to compute $c_L$, using linearized contact homology. This has the disadvantage that at the time of writing, linearized contact homology has not yet been defined in the generality that we need (see \cref{sec:assumptions of virtual perturbation scheme} and more specifically \cref{assumption}). Using linearized contact homology, one can define the higher symplectic capacities $\mathfrak{g}^{\leq \ell}_k$. The definition of $\mathfrak{g}^{\leq \ell}_k$ for any $\ell \in \Z_{\geq 1}$ relies on the $\mathcal{L}_{\infty}$-algebra structure of the linearized contact homology chain complex, as well as an $\mathcal{L}_{\infty}$-augmentation map $\epsilon_k$. However, to prove that $c_L(X_{\Omega}) \leq \delta_\Omega$, we will only need the capacity $\mathfrak{g}^{\leq 1}_k$, and for this the $\mathcal{L}_{\infty}$-algebra structure is not necessary. The key idea is that the capacities $\mathfrak{g}^{\leq 1}_k$ can be compared to $\tilde{\mathfrak{g}}^{\leq 1}_k$ and $\cgh{k}$. \begin{copiedtheorem}[{\cite[Section 3.4]{mcduffSymplecticCapacitiesUnperturbed2022}}]{thm:g tilde vs g hat} If $X$ is a Liouville domain then \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq \ell}_k(X) \leq {\mathfrak{g}}^{\leq \ell}_k(X). \end{IEEEeqnarray*} \end{copiedtheorem} \begin{copiedtheorem}{thm:g hat vs gh} If $X$ is a Liouville domain such that $\pi_1(X) = 0$ and $2 c_1(TX) = 0$ then \begin{IEEEeqnarray*}{c+x*} {\mathfrak{g}}^{\leq 1}_k(X) = \cgh{k}(X). \end{IEEEeqnarray*} \end{copiedtheorem} These two results show that $\tilde{\mathfrak{g}}^{\leq 1}_k(X_\Omega) \leq \cgh{k}(X_\Omega)$ (under \cref{assumption}). Using the same proof as before, we conclude that $c_L(X_{\Omega}) = \delta_\Omega$. \begin{copiedtheorem}{thm:my main theorem} Under \cref{assumption}, if $X_\Omega$ is a convex or concave toric domain then \begin{IEEEeqnarray*}{c+x*} c_L(X_{\Omega}) = \delta_\Omega. \end{IEEEeqnarray*} \end{copiedtheorem} \section{Proof sketches} In the last section, we explained our proof of $c_L(X_{\Omega}) = \delta_\Omega$ (first in the case where $X_{\Omega}$ is convex and $4$-dimensional, and second assuming that \cref{assumption} holds). In this section, we explain the proofs of the relations \begin{IEEEeqnarray*}{rCls+x*} c_L(X) & \leq & \inf_k \frac{\tilde{\mathfrak{g}}^{\leq 1}_k(X)}{k}, \\ \tilde{\mathfrak{g}}^{\leq \ell}_k(X) & \leq & \mathfrak{g}^{\leq \ell}_k(X), \\ \mathfrak{g}_k^{\leq 1}(X) & = & \cgh{k}(X), \end{IEEEeqnarray*} which were mentioned without proof in the last section. Each of these relations will be proved in the main text, so the proof sketches of this section act as a way of showcasing what technical tools will be required for our purposes. In \cref{sec:symplectic capacities}, we study the question of extending the domain of a symplectic capacities from the class of nondegenerate Liouville domains to the class of Liouville domains which are possibly degenerate. By this discussion, it suffices to prove each theorem for nondegenerate Liouville domains only. \begin{secondcopy}{thm:lagrangian vs g tilde} If $(X, \lambda)$ is a Liouville domain then \begin{IEEEeqnarray*}{c+x*} c_L(X) \leq \inf_k^{} \frac{\tilde{\mathfrak{g}}_k^{\leq 1}(X)}{k}. \end{IEEEeqnarray*} \end{secondcopy} \begin{proof}[Proof sketch] Let $k \in \Z_{\geq 1}$ and $L \subset \itr X$ be an embedded Lagrangian torus. Denote $a \coloneqq \tilde{\mathfrak{g}}_k^{\leq 1}(X)$. We wish to show that there exists $\sigma \in \pi_2(X,L)$ such that $0 < \omega(\sigma) \leq a / k$. Choose a suitable Riemannian metric on $L$, given by \cref{lem:geodesics lemma CM abs} (which is a restatement of \cite[Lemma 2.2]{cieliebakPuncturedHolomorphicCurves2018}). Now, consider the unit cotangent bundle $S^* L$ of $L$. Choose a point $x$ inside the unit codisk bundle $D^* L$, a symplectic divisor $D$ through $x$, and a sequence $(J_t)_{t \in [0,1)}$ of almost complex structures on $\hat{X}$ realizing SFT neck stretching along $S^* L$. By definition of $\tilde{\mathfrak{g}}_k^{\leq 1}(X) \eqqcolon a$, there exists a Reeb orbit $\gamma_0$ together with a sequence $(u_t)_t$ of $J_t$-holomorphic curves $u_t \in \mathcal{M}^{J_t}_X(\gamma_0)\p{<}{}{\mathcal{T}^{(k)}x}$. By the SFT-compactness theorem, the sequence $(u_t)_{t}$ converges to a holomorphic building $F = (F^1,\ldots,F^N)$, where each $F^{\nu}$ is a holomorphic curve. Denote by $C$ the component of $F^1 \subset T^* L$ which carries the tangency constraint. The choices of almost complex structures $J_t$ can be done in such a way that the simple curve corresponding to $C$ is regular, i.e. it is an element of a moduli space which is a manifold. Using the dimension formula for this moduli space, it is possible to conclude that $C$ must have at least $k + 1$ punctures (see \cref{thm:transversality with tangency,lem:punctures and tangency simple,lem:punctures and tangency}). This implies that $C$ gives rise to at least $k > 0$ disks $D_1, \ldots, D_k$ in $X$ with boundary on $L$. The total energy of the disks is less or equal to $a$. Therefore, one of the disks must have energy less or equal to $a/k$. We now address a small imprecision in the proof we just described. We need to show that $\omega(D_i) \leq a$ for some $i = 1, \ldots, k$. However, the above proof actually shows that $\tilde{\omega}(D_i) \leq a$, where $\tilde{\omega}$ is a piecewise smooth $2$-form on $\hat{X} \setminus L$ given as in \cref{def:energy of a asy cylindrical holomorphic curve}. This form has the property that $\omega = \tilde{\omega}$ outside $S^* L$. The solution then is to neck stretch along $S_{\delta}^* L$ for some small $\delta > 0$. In this case, one can bound $\omega(D_i)$ by $\tilde{\omega}(D_i)$ times a function of $\delta$ (see \cref{lem:energy wrt different forms}), and we can still obtain the desired bound for $\omega(D_i)$. \end{proof} \begin{secondcopy}[\cite[Section 3.4]{mcduffSymplecticCapacitiesUnperturbed2022}]{thm:g tilde vs g hat} If $X$ is a Liouville domain then \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq \ell}_k(X) \leq {\mathfrak{g}}^{\leq \ell}_k(X). \end{IEEEeqnarray*} \end{secondcopy} \begin{proof}[Proof sketch] Choose a point $x \in \itr X$ and a symplectic divisor $D$ through $x$. Let $J \in \mathcal{J}(X,D)$ and consider the bar complex $\mathcal{B}(CC(X)[-1])$, computed with respect to $J$. Suppose that $a > 0$ and $\beta \in H(\mathcal{A}^{\leq a} \mathcal{B}^{\leq \ell}(CC(X)[-1]))$ is such that $\epsilon_k(\beta) \neq 0$. By \cref{thm:g tilde two definitions}, \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq \ell}_k(X) = \sup_{J \in \mathcal{J}(X,D)} \mathop{\inf\vphantom{\mathrm{sup}}}_{\Gamma} \mathcal{A}(\Gamma), \end{IEEEeqnarray*} where the infimum is taken over tuples of Reeb orbits $\Gamma = (\gamma_1, \ldots, \gamma_p)$ such that $p \leq \ell$ and $\overline{\mathcal{M}}^{J}_{X}(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x} \neq \varnothing$. The class $\beta$ is a linear combination of words of Reeb orbits $\Gamma$ such that $\# \Gamma \leq \ell$ and $\mathcal{A}(\Gamma) \leq a$. Since $\epsilon_k(\beta) \neq 0$, one of the words in this linear combination, say $\Gamma$, is such that the virtual count of $\overline{\mathcal{M}}^{J}_{X}(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x}$ is nonzero. By assumption on the virtual perturbation scheme, $\overline{\mathcal{M}}^{J}_{X}(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x}$ is nonempty, which is the condition in the definition of $\tilde{\mathfrak{g}}^{\leq \ell}_k(X)$. \end{proof} \begin{secondcopy}{thm:g hat vs gh} If $X$ is a Liouville domain such that $\pi_1(X) = 0$ and $2 c_1(TX) = 0$ then \begin{IEEEeqnarray*}{c+x*} {\mathfrak{g}}^{\leq 1}_k(X) = \cgh{k}(X). \end{IEEEeqnarray*} \end{secondcopy} \begin{proof}[Proof sketch] Choose a small ellipsoid $E$ such that there exists a strict exact symplectic embedding $\phi \colon E \longrightarrow X$. There are associated Viterbo transfer maps (see \cref{sec:viterbo transfer map of liouville embedding,sec:viterbo transfer map of exact symplectic embedding}, where we define the Viterbo transfer map of $S^1$-equivariant symplectic homology) \begin{IEEEeqnarray*}{rCls+x*} \phi_!^{S^1} \colon \homology{}{S^1}{}{S}{H}{}{}(X) & \longrightarrow & \homology{}{S^1}{}{S}{H}{}{}(E), \\ \phi_! \colon CH(X) & \longrightarrow & CH(E). \end{IEEEeqnarray*} Because of the topological conditions on $X$, the $S^1$-equivariant symplectic homology and the linearized contact homology have $\Z$-gradings given by the Conley--Zehnder index. In this context, one can offer an alternative definition of the Gutt--Hutchings capacities via the Viterbo transfer map, namely $\cgh{k}(X)$ is the infimum over $a$ such that the map \begin{equation*} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{(\varepsilon,a]}{n - 1 + 2k}(X) \ar[r, "\iota^{S^1,a}"] & \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(X) \ar[r, "\phi_!^{S^1}"] & \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(E) \end{tikzcd} \end{equation*} is nonzero (see \cref{def:ck alternative}). Bourgeois--Oancea \cite{bourgeoisEquivariantSymplecticHomology2016} define an isomorphism \begin{IEEEeqnarray*}{c+x*} \Phi_{\mathrm{BO}} \colon \homology{}{S^1}{}{S}{H}{+}{}(X) \longrightarrow CH(X) \end{IEEEeqnarray*} between positive $S^1$-equivariant symplectic homology and linearized symplectic homology (whenever the latter is defined). All the maps we have just described assemble into the following commutative diagram. \begin{equation*} \begin{tikzcd} SH^{S^1,(\varepsilon,a]}_{n - 1 + 2k}(X) \ar[r, "\iota^{S^1,a}"] \ar[d, hook, two heads, swap, "\Phi_{\mathrm{BO}}^a"] & SH^{S^1,+}_{n - 1 + 2k}(X) \ar[r, "\phi_!^{S^1}"] \ar[d, hook, two heads, "\Phi_{\mathrm{BO}}"] & SH^{S^1,+}_{n - 1 + 2k}(E) \ar[d, hook, two heads, "\Phi_{\mathrm{BO}}"] \\ CH^{a}_{n - 1 + 2k}(X) \ar[r, "\iota^{a}"] \ar[d, equals] & CH_{n - 1 + 2k}(X) \ar[r, "\phi_!"] \ar[d, equals] & CH_{n - 1 + 2k}(E) \ar[d, "{\epsilon}^E_k"] \\ CH^{a}_{n - 1 + 2k}(X) \ar[r, swap, "\iota^{a}"] & CH_{n - 1 + 2k}(X) \ar[r, swap, "{\epsilon}_k^X"] & \Q \end{tikzcd} \end{equation*} Here, the vertical arrows between the top two rows are the Bourgeois--Oancea isomorphism and the maps $\epsilon_k^X$ and $\epsilon_k^E$ are the augmentation maps of $X$ and $E$. Using this information, we can show that $\cgh{k}(X) \leq \mathfrak{g}^{\leq 1}_k(X)$: \begin{IEEEeqnarray*}{rCls+x*} \cgh{k}(X) & = & \inf \{ a > 0 \mid \phi_!^{S^1} \circ \iota^{S^1,a} \neq 0 \} & \quad [\text{by the alternative definition of $\cgh{k}$}] \\ & \leq & \inf \{ a > 0 \mid {\epsilon}_k^X \circ \iota^{a} \neq 0 \} & \quad [\text{since the diagram commutes}] \\ & = & {\mathfrak{g}}^{\leq 1}_k(X) & \quad [\text{by definition of $\mathfrak{g}^{\leq 1}_k$}]. \end{IEEEeqnarray*} In this computation, the inequality in the second line is an equality if $\epsilon^E_k$ is an isomorphism. The proof of this statement is done in \cref{sec:augmentation map of an ellipsoid}, using the techniques from \cref{sec:cr operators,sec:functional analytic setup}. The key ideas are the following. One can show that $CH_{n - 1 + 2k}(E) \cong \Q$ (see \cref{lem:lch of ellipsoid}), and therefore it is enough to show that $\epsilon_k^E$ is nonzero. Recall that $\epsilon_k^E$ is given by the virtual count of holomorphic curves in $X$ satisfying a tangency constraint. We count those curves explicitly in \cref{lem:moduli spaces of ellipsoids have 1 element}. Notice that here we need to justify that the virtual count of curves equals the usual signed count. This follows by assumption on the virtual perturbation scheme and because in \cref{sec:augmentation map of an ellipsoid}, we also show that the moduli spaces are transversely cut out. \end{proof} \section{Outline of the thesis} We now give a chapter by chapter outline of this thesis. In \textbf{\cref{chp:symplectic manifolds}} we review the various types of manifolds that will show up in this thesis, i.e. symplectic manifolds and contact manifolds. We talk about the various types of vector fields in these manifolds (Hamiltonian vector field, Liouville vector field, Reeb vector field) and mention the properties of their flows. We give the definition of special types of symplectic manifolds, from less to more specific: Liouville domains, star-shaped domains, toric domains. Finally, we explain two constructions which will be present throughout: the symplectization of a contact manifold, and the completion of a Liouville domain. In \textbf{\cref{chp:indices}} we give a review of the Conley--Zehnder indices. In order to list the properties of the Conley--Zehnder index, one needs to mention the Maslov index and the first Chern class, so we offer a review of those as well. We explain how to define the Conley--Zehnder index of an orbit in a symplectic or contact manifold by defining an induced path of symplectic matrices via a trivialization. Finally, we study the Conley--Zehnder index of a Reeb orbit in a unit cotangent bundle. The Conley--Zehnder index is needed for our purposes because it provides the grading of $S^1$-equivariant symplectic homology and of linearized contact homology. \textbf{\cref{chp:holomorphic curves}} is about the analytic properties of holomorphic curves and Floer trajectories. We define punctured Riemann surfaces as the domains for such curves, and symplectic cobordisms as the targets for such curves. We prove the energy identity for holomorphic curves, as well as the maximum principle. Then, we discuss the known compactness and transversality for moduli spaces of asymptotically cylindrical holomorphic curves (these are the moduli spaces which are considered in linearized contact homology). The second half of this chapter is about solutions of the ``parametrized Floer equation'' (solutions to this equation are the trajectories which are counted in the differential of $S^1$-equivariant Floer chain complex). We prove an energy inequality for Floer trajectories, as well as three ``confinement lemmas'': the maximum principle, the asymptotic behaviour lemma, and the no escape lemma. Finally, we prove compactness and transversality for moduli spaces of solutions of the parametrized Floer equation using the corresponding results for moduli spaces of solutions of the Floer equation. In \textbf{\cref{chp:floer}} we define the $S^1$-equivariant symplectic homology and establish its structural properties. First we define the $S^1$-equivariant Floer chain complex and its homology. The $S^1$-equivariant symplectic homology is then defined by taking the limit with respect to an increasing sequence of Hamiltonians of the $S^1$-equivariant Floer homology. We devote two sections to showing that $S^1$-equivariant symplectic homology is a functor, which amounts to defining the Viterbo transfer maps and proving their properties. Finally, we define a $\delta$ map, which enters the definition of the Gutt--Hutchings capacities. \textbf{\cref{chp:symplectic capacities}} is about symplectic capacities. The first section is about generalities about symplectic capacities. We show how to extend a capacity of nondegenerate Liouville domains to a capacity of (possibly degenerate) Liouville domains. The next three sections are each devoted to defining and proving the properties of a specific capacity, namely the Lagrangian capacity $c_L$, the Gutt--Hutchings capacities $\cgh{k}$ and the $S^1$-equivariant symplectic homology capacities $\csh{k}$, and finally the McDuff--Siegel capacities $\tilde{\mathfrak{g}}^{\leq \ell}_k$. In the section about the Lagrangian capacity, we also state the conjecture that we will try to solve in the remainder of the thesis, i.e. $c_L(X_{\Omega}) = \delta_\Omega$ for a convex or concave toric domain $X_{\Omega}$. The final section is devoted to computations. We show that $c_L(X) \leq \inf_k^{} \tilde{\mathfrak{g}}^{\leq 1}_k(X) / k$. We use this result to prove the conjecture in the case where $X_{\Omega}$ is $4$-dimensional and convex. \textbf{\cref{chp:contact homology}} introduces the linearized contact homology of a nondegenerate Liouville domain. The idea is that using the linearized contact homology, one can define the higher symplectic capacities, which will allow us to prove $c_L(X_{\Omega}) = \delta_\Omega$ for any convex or concave toric domain $X_{\Omega}$ (but under the assumption that linearized contact homology and the augmentation map are well-defined). We give a review of real linear Cauchy--Riemann operators on complex vector bundles, with a special emphasis on criteria for surjectivity in the case where the bundle has complex rank $1$. We use this theory to prove that moduli spaces of curves in ellipsoids are transversely cut out and in particular that the augmentation map of an ellipsoid is an isomorphism. The final section is devoted to computations. We show that $\mathfrak{g}^{\leq 1}_k(X) = \cgh{k}(X)$, and use this result to prove our conjecture (again, under \cref{assumption}). \chapter{\texorpdfstring{$S^1$}{S1}-equivariant Floer homology} \label{chp:floer} \section{Categorical setup} In this section, we define categories that will allow us to express the constructions of this chapter as functors. We will define a category of complexes (see \cref{def:category complexes,def:category of complexes up to homotopy}) and a category of modules (see \cref{def:category modules}). Associated to these, there is a Homology functor between the two categories (\cref{def:homology functor}). \begin{remark} Recall that a \textbf{preorder} on a set $S$ is a binary relation $\leq$ which is reflexive and transitive. A preordered set $(S,\leq)$ can be seen as a category $S$ by declaring that objects of $S$ are elements of the set $S$ and that there exists a unique morphism from $a$ to $b$ if and only if $a \leq b$, for $a, b \in S$. Throughout this thesis, we will view $\R$ as a category in this sense. \end{remark} \begin{definition} Let $\mathbf{C}$ be a category. A \textbf{filtered object} in $\mathbf{C}$ is a functor $V \colon \R \longrightarrow \mathbf{C}$. A \textbf{morphism} of filtered objects from $V$ to $W$ is a natural transformation $\phi \colon V \longrightarrow W$. We denote by $\Hom(\R, \mathbf{C})$ the category of filtered objects in $\mathbf{C}$. In this case, we will use the following notation. If $a \in \R$, we denote by $V^a$ the corresponding object of $\mathbf{C}$. If $\mathbf{C}$ is abelian and $a \leq b \in \R$, we denote $V^{(a,b]} \coloneqq V^b / V^a \coloneqq \coker (\iota^{b,a} \colon V^a \longrightarrow V^b)$. \end{definition} \begin{definition} \label{def:category complexes} Denote by $\tensor[_\Q]{\mathbf{Mod}}{}$ the category of $\Q$-modules. We define a category $\komp$ as follows. An object of $\komp$ is a triple $(C,\del,U)$, where $C \in \Hom(\R, \tensor[_\Q]{\mathbf{Mod}}{})$ is a filtered $\Q$-module and $\partial, U \colon C \longrightarrow C$ are natural transformations such that \begin{IEEEeqnarray*}{lCls+x*} \partial \circ \partial & = & 0, \\ \partial \circ U & = & U \circ \partial. \end{IEEEeqnarray*} A morphism in $\komp$ from $(C,\del^C,U^C)$ to $(D,\del^D,U^D)$ is a natural transformation $\phi \colon C \longrightarrow D$ for which there exists a natural transformation $T \colon C \longrightarrow D$ such that \begin{IEEEeqnarray*}{rCrCl} \partial^D & \circ \phi - \phi \circ & \partial^C & = & 0, \\ U^D & \circ \phi - \phi \circ & U^C & = & \partial^D \circ T + T \circ \partial^C. \end{IEEEeqnarray*} \end{definition} \begin{definition} \phantomsection\label{def:category of complexes up to homotopy} Let $\phi, \psi \colon (C, \partial^C, U^C) \longrightarrow (D, \partial^D, U^D)$ be morphisms in $\komp$. A \textbf{chain homotopy} from $\phi$ to $\psi$ is a natural transformation $T \colon C \longrightarrow D$ such that \begin{IEEEeqnarray*}{c+x*} \psi - \phi = \partial^D \circ T + T \circ \partial^C. \end{IEEEeqnarray*} The notion of chain homotopy defines an equivalence relation $\sim$ on each set of morphisms in $\komp$. We denote the quotient category (see for example \cite[Theorem 0.4]{rotmanIntroductionAlgebraicTopology1988}) by \begin{IEEEeqnarray*}{c+x*} \comp \coloneqq \komp / \sim. \end{IEEEeqnarray*} \end{definition} As we will see in \cref{sec:Floer homology}, the $S^1$-equivariant Floer chain complex of $X$ (with respect to a Hamiltonian $H$ and almost complex structure $J$) is an object \begin{IEEEeqnarray*}{c+x*} \homology{}{S^1}{}{F}{C}{}{}(X,H,J) \in \comp. \end{IEEEeqnarray*} \begin{definition} \label{def:category modules} We define a category $\modl$ as follows. An object of $\modl$ is a pair $(C,U)$, where $C \in \Hom(\R, \tensor[_\Q]{\mathbf{Mod}}{})$ is a filtered $\Q$-module and $U \colon C \longrightarrow C$ is a natural transformation. A morphism in $\modl$ from $(C,U^C)$ to $(D,U^D)$ is a natural transformation $\phi \colon C \longrightarrow D$ such that $\phi \circ U^C = U^D \circ \phi$. \end{definition} In \cref{sec:Floer homology}, we will show that the $S^1$-equivariant Floer homology of $X$ (with respect to a Hamiltonian $H$ and almost complex structure $J$) and the $S^1$-equivariant symplectic homology of $X$ are objects of $\modl$: \begin{IEEEeqnarray*}{rCls+x*} \homology{}{S^1}{}{F}{H}{}{}(X,H,J) & \in & \modl, \\ \homology{}{S^1}{}{S}{H}{}{}(X) & \in & \modl. \end{IEEEeqnarray*} \begin{lemma} The category $\modl$ is abelian, complete and cocomplete. \end{lemma} \begin{proof} Recall the definition of (co)complete: a category $\mathbf{I}$ is small if the class of morphisms of $\mathbf{I}$ is a set. A category is (co)complete if for any $\mathbf{I}$ small and for any functor $F \colon \mathbf{I} \longrightarrow \modl$, the (co)limit of $F$ exists. By \cite[Theorem 3.4.12]{riehlCategoryTheoryContext2016}, it suffices to show that $\modl$ has products, coequalizers, coproducts and coequalizers. First, notice that $\tensor[_\Q]{\mathbf{Mod}}{}$ is abelian, complete and cocomplete. Therefore, the same is true for $\Hom(\R, \tensor[_\Q]{\mathbf{Mod}}{})$. Let $f \colon C \longrightarrow D$ be a morphism in $\modl$. Then $f$ has a kernel and a cokernel in $\Hom(\R, \tensor[_\Q]{\mathbf{Mod}}{})$. We need to show that the kernel and the cokernel are objects of $\modl$, i.e. that they come equipped with a $U$ map. The $U$ maps for $\ker f, \coker f$ are the unique maps (coming from the universal property of the (co)kernel) such that diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \ker f \ar[r] \ar[d, swap, dashed, "\exists ! U_{\ker f}"] & C \ar[d, "U_C"] \ar[r, "f"] & D \ar[d, "U_D"] \ar[r] & \coker f \ar[d, dashed, "\exists ! U_{\coker f}"] \\ {\ker f} \ar[r] & {C} \ar[r, "f"] & {D} \ar[r] & {\coker f} \end{tikzcd} \end{IEEEeqnarray*} commutes. Let $C_i$, for $i \in I$, be a family of objects in $\modl$. Then, the product $\prod_{i \in I}^{} C_i$ and the coproduct $\bigoplus_{i \in I}^{} C_i$ exist in $\Hom(\R, \tensor[_\Q]{\mathbf{Mod}}{})$. Again, we need to show that the product and coproduct come equipped with a $U$ map. The $U$ maps for the product and coproduct are the maps \begin{IEEEeqnarray*}{LCRRCRCL+x*} U_{\bigproduct_{i \in I}^{} C_i} & = & \bigproduct_{i \in I}^{} U_{C_i} \colon & \bigproduct_{i \in I}^{} C_i & \longrightarrow & \bigproduct_{i \in I}^{} C_i, \\ U_{\bigdirectsum_{i \in I}^{} C_i} & = & \bigdirectsum_{i \in I}^{} U_{C_i} \colon & \bigdirectsum_{i \in I}^{} C_i & \longrightarrow & \bigdirectsum_{i \in I}^{} C_i, \end{IEEEeqnarray*} coming from the respective universal properties. \end{proof} \begin{definition} \label{def:homology functor} Let $(C,\partial,U) \in \comp$. The \textbf{homology} of $(C,\partial,U)$ is the object of $\modl$ given by $H(C, \partial, U) \coloneqq (H(C, \partial), H(U))$, where $H(C, \partial) = \ker \partial / \img \partial$ and $H(U)$ is the unique map such that the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \img \partial \ar[r] \ar[d, swap, "U"] & \ker \partial \ar[r] \ar[d, "U"] & \ker \partial / \img \partial \ar[d, dashed, "\exists !"] \ar[r, equals] & H(C, \partial) \ar[d, "H(U)"] \\ \img \partial \ar[r] & \ker \partial \ar[r] & \ker \partial / \img \partial \ar[r, equals] & H(C, \partial) \end{tikzcd} \end{IEEEeqnarray*} commutes. If $\phi \colon (C, \partial^C, U^C) \longrightarrow (D, \partial^D, U^D)$ is a morphism in $\comp$, we define the induced morphism on homology, $H(\phi) \colon H(C, \partial^C) \longrightarrow H(D, \partial^D)$, to be the unique map such that the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \img \partial^C \ar[r] \ar[d, swap, "\phi"] & \ker \partial^C \ar[r] \ar[d, "\phi"] & \ker \partial^C / \img \partial^C \ar[d, dashed, "\exists !"] \ar[r, equals] & H(C, \partial^C) \ar[d, "H(\phi)"] \\ \img \partial^D \ar[r] & \ker \partial^D \ar[r] & \ker \partial^D / \img \partial^D \ar[r, equals] & H(D, \partial^D) \end{tikzcd} \end{IEEEeqnarray*} commutes. With these definitions, homology is a functor $H \colon \comp \longrightarrow \modl$. \end{definition} \section{Action functional} \label{sec:action functional} Our goal in this section is to establish the definitions that we will need to later define the $S^1$-equivariant Floer Chain complex. We define suitable families of admissible Hamiltonians (\cref{def:hamiltonians}) and almost complex structures (\cref{def:acs}). The key points of this section are \cref{def:generators}, where we define the set of generators of the $S^1$-equivariant Floer chain complex, and \cref{def:flow lines}, where we define the trajectories that are counted in the differential of the $S^1$-equivariant Floer chain complex. We also define the action of a generator (\cref{def:action functional}), which will induce a filtration on the $S^1$-equivariant Floer chain complex. We will assume that $(X,\lambda)$ is a nondegenerate Liouville domain with completion $(\hat{X},\hat{\lambda})$. Let $\varepsilon \coloneqq \frac{1}{2} \operatorname{Spec}(\partial X,\lambda|_{\partial X})$. We start by recalling some basic facts about $S^{2N+1}$ and $\C P^N$. For each $N \in \Z_{\geq 1}$ we denote\begin{IEEEeqnarray*}{c+x*} S^{2N + 1} \coloneqq \{ (z_0,\ldots,z_N) \in \C ^{N+1} \ | \ |z_0|^2 + \cdots + |z_N|^2 = 1 \}. \end{IEEEeqnarray*} There is an action $S^1 \times S^{2N + 1} \longrightarrow S^{2N + 1}$ given by $(t,z) \longmapsto e ^{2 \pi i t} z$. This action is free and proper, so we can consider the quotient manifold $S^{2N+1}/S^1$. The Riemannian metric of $\C ^{N+1} = \R ^{2(N+1)}$ pulls back to a Riemannian metric on $S^{2N + 1}$. The action of $S^1$ on $S^{2N + 1}$ is by isometries, so there exists a unique Riemannian metric on $S^{2N+1}/S^1$ such that the projection $S^{2N+1} \longrightarrow S^{2N+1}/S^1$ is a Riemannian submersion. The set $\C \setminus \{0\}$ is a group with respect to multiplication, and it acts on $\C ^{N+1} \setminus \{0\}$ by multiplication. This action is free and proper, so we can form the quotient \begin{IEEEeqnarray*}{c+x*} \C P^{N} \coloneqq (\C ^{N+1} \setminus \{0\})/(\C \setminus \{0\}). \end{IEEEeqnarray*} By the universal property of the quotient, there exists a unique map $S^{2N+1}/S^1 \longrightarrow \C P^N$ such that the following diagram commutes: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} S^{2N + 1} \ar[r, hook] \ar[d, two heads] & \C ^{N+1} \setminus \{0\} \ar[d, two heads] \\ S^{2N + 1} / S^1 \ar[r, hook, two heads, dashed, swap, "\exists !"] & \C P^N \end{tikzcd} \end{IEEEeqnarray*} The map $S^{2N + 1} / S^1 \longrightarrow \C P^N$ is a diffeomorphism. Define the Fubini--Study metric on $\C P^N$ to be the unique Riemannian metric on $\C P^N$ such that $S^{2N + 1} / S^1 \longrightarrow \C P^N$ is an isometry. We will now consider a special family of functions on $S^{2N+1}$ and $\C P^N$. Define a function\begin{IEEEeqnarray*}{rrCl} f_N \colon & \C P^N & \longrightarrow & \R \\ & [w] & \longmapsto & \frac{\sum_{j=0}^{N} j|w_j|^2}{\sum_{j=0}^{N} |w_j|^2}. \end{IEEEeqnarray*} Define $\tilde{f}_N$ to be the pullback of $f_N$ to $S^{2N+1}$. Let $e_0,\ldots,e_N$ be the canonical basis of $\C ^{N+1}$ (as a vector space over $\C$). Then, \begin{IEEEeqnarray*}{rCls+x*} \critpt \tilde{f}_N & = & \{ e^{2 \pi i t} e_j \mid t \in S^1, j = 0,\ldots,N \}, \\ \critpt f_N & = & \{[e_0],\ldots,[e_N]\}. \end{IEEEeqnarray*} The function $f_N$ is Morse, while $\tilde{f}_N$ is Morse--Bott. The Morse indices are given by \begin{IEEEeqnarray*}{rCll} \morse([e_j],f_N) & = & 2j, & \quad \text{for all } j=0,\ldots,N, \\ \morse(z,\tilde{f}_N) & = & \morse([z], f_N), & \quad \text{for all } z \in \critpt f_N. \end{IEEEeqnarray*} We will use the notation $\morse(z) \coloneqq \morse(z,\tilde{f}_N) = \morse([z], f_N)$. We now study the relation between $\tilde{f}_{N^-}$ and $\tilde{f}_{N^+}$ for $N^- \geq N^+$. For every $k$ such that $0 \leq k \leq N^- - N^+$, define maps \begin{IEEEeqnarray*}{rrCl} \inc^{N^-,N^+}_k \colon & S^{2N^++1} & \longrightarrow & S^{2N^-+1} \\ & (z_0,\ldots,z_{N^+}) & \longmapsto & (\underbrace{0,\ldots,0}_k,z_0,\ldots,z_{N^+},0,\ldots,0). \end{IEEEeqnarray*} Let $I_k \colon \R \longrightarrow \R$ be given by $I_k(x) = x + k$. Then, the following diagram commutes: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd}[row sep=scriptsize, column sep={{{{6em,between origins}}}}] & S^{2N^+ + 1} \arrow[dl, swap, "\inc_{k}^{N^-,N^+}"] \arrow[rr, "\tilde{f}_{N^+}"] \arrow[dd] & & \R \arrow[dl, "I_k"] \arrow[dd, equals] \\ S^{2N^- + 1} \arrow[rr, crossing over, near end, "\tilde{f}_{N^-}"] \arrow[dd] & & \R \\ & \C P^{N^+} \arrow[dl, dashed, swap, outer sep = -4pt, "\exists ! i_{k}^{N^-,N^+}"] \arrow[rr, near start, "f_{N^+}"] & & \R \arrow[dl, "I_k"] \\ \C P ^{N^-} \arrow[rr, swap, "f_{N^-}"] & & \R \arrow[from=uu, crossing over, equals] \end{tikzcd} \end{IEEEeqnarray*} The vector fields $\nabla \tilde{f}_{N^+}$ and $\nabla \tilde{f}_{N^-}$ are $\inc_{k}^{N^-,N^+}$-related, and analogously the vector fields $\nabla {f}_{N^+}$ and $\nabla {f}_{N^-}$ are ${i}_{k}^{N^-,N^+}$-related. For $t \in \R$, denote by $\phi^t_{\tilde{f}_{N^-}}$ the time-$t$ gradient flow of $\tilde{f}_{N^-}$ and analogously for $\phi^t_{f_{N^+}}$. Then, the following diagram commutes: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd}[row sep=scriptsize, column sep={{{{6em,between origins}}}}] & S^{2N^+ + 1} \arrow[dl, swap, "{\inc_k^{N^-,N^+}}"] \arrow[rr, "\phi^t_{\tilde{f}_N}"] \arrow[dd] & & S^{2N^+ + 1} \arrow[dl, near end, "\inc_k^{N^-,N^+}"] \arrow[dd] \\ S^{2N^- + 1} \arrow[rr, crossing over, near end, "\phi^t_{\tilde{f}_{N^-}}"] \arrow[dd] & & S^{2N^- + 1} \\ & \C P^{N^+} \arrow[dl, swap, "i_k^{N^-,N^+}"] \arrow[rr, near start, "\phi^t_{f_{N^+}}"] & & \C P^{N^+} \arrow[dl, "i_k^{N^-,N^+}"] \\ \C P ^{N^-} \arrow[rr, swap, "\phi^t_{f_{N^-}}"] & & \C P^{N^-} \arrow[from=uu, crossing over] \end{tikzcd} \end{IEEEeqnarray*} \begin{definition} \label{def:hamiltonians} A parametrized Hamiltonian $H \colon S^1 \times S^{2N+1} \times \hat{X} \longrightarrow \R$ is \textbf{admissible} if it satisfies the conditions in \cref{item:invariant,item:profile,item:ndg,item:flow lines,item:pullbacks}. We denote the set of such $H$ by $\mathcal{H}(X,N)$. \begin{enumerate} \item \label{item:profile} There exist $D \in \R$, $C \in \R_{>0} \setminus \operatorname{Spec}(\del X, \lambda|_{\del X})$ and $\delta > 0$ such that: \begin{enumerate}[label=(\Roman*)] \item on $S^1 \times S^{2N+1} \times X$, we have that $- \varepsilon < H < 0$, $H$ is $S^1$-independent and $H$ is $C^2$-small (so that there are no nonconstant $1$-periodic orbits); \item on $S^1 \times S^{2N+1} \times [0,\delta] \times \del X$, we have that $-\varepsilon < H < \varepsilon$ and $H$ is $C^2$-close to $(t,z,r,x) \longmapsto h(e^r)$, where $h \colon [1,e ^{\delta}] \longrightarrow \R$ is increasing and strictly convex; \item[(S)] on $S^1 \times S^{2N+1} \times [\delta, + \infty) \times \del X$, we have that $H(t,z,r,x) = C e^r + D$. \end{enumerate} \item \label{item:invariant} Consider the action of $S^1$ on $S^1 \times S^{2N+1} \times \hat{X}$ given by $t' \cdot (t,z,x) = (t' + t, e ^{2 \pi i t'} z, x)$. Then $H$ is invariant under this action, i.e. $H(t'+ t, e ^{2 \pi i t'} z, x) = H(t,z,x)$. \item \label{item:ndg} If $z$ is a critical point of $\tilde{f}_N$ then $H_z$ is nondegenerate. \item \label{item:flow lines} For every $(t,z,x) \in S^1 \times S^{2N+1} \times \hat{X}$ we have $\p{<}{}{\nabla_{S^{2N+1}}H(t,z,x), \nabla \tilde{f}_N(z)} \leq 0$. \item \label{item:pullbacks} There exists $E \geq 0$ such that $(\inc^{N,N-1}_0)^* H = (\inc^{N,N-1}_1)^* H + E$. \end{enumerate} \end{definition} \begin{definition} \label{def:acs} A parametrized almost complex structure $J \colon S^1 \times S^{2N+1} \times \hat{X} \longrightarrow \End(T \hat{X})$ is \textbf{admissible} if it satisfies the conditions in \cref{def:acs 1,def:acs 2,def:acs 3,def:acs 4}. We denote the set of such $J$ by $\mathcal{J}(X,N)$. \begin{enumerate} \item \label{def:acs 1} $J$ is $S^1$-invariant, i.e. $J(t' + t, e ^{2 \pi i t'} z, x) = J(t, z, x)$ for every $t' \in S^1$ and $(t,z,x) \in S^1 \times S^{2N+1} \times \hat{X}$. \item \label{def:acs 2} $J$ is $\hat{\omega}$-compatible. \item \label{def:acs 3} The restriction of $J$ to $S^1 \times S^{2N+1} \times \R_{\geq 0} \times \del X$ is cylindrical. \item \label{def:acs 4} $(\inc_0^{N,N-1})^* J = (\inc_1^{N,N-1})^* J$. \end{enumerate} \end{definition} \begin{definition} Denote by $\admissible{X}$ the set of tuples \begin{IEEEeqnarray*}{c+x*} (H,J) \in \bigcoproduct_{N \in \Z_{\geq 1}}^{} \mathcal{H}(X,N) \times \mathcal{J}(X,N) \end{IEEEeqnarray*} which are regular, where ``regular'' means that the moduli spaces of \cref{def:flow lines} are transversely cut out. Define a preorder $\leq$ on $\admissible{X}$ by \begin{IEEEeqnarray*}{rCl} (H^+,J^+) \leq (H^-,J^-) & \mathrel{\mathop:}\Longleftrightarrow & N^+ \leq N^- \text{ and } H^+ \leq (i_0 ^{N^-,N^+})^* H^-. \end{IEEEeqnarray*} \end{definition} \begin{definition} \label{def:generators} Let $N \in \Z_{\geq 1}$ and $H \in \mathcal{H}(X,N)$. Define \begin{IEEEeqnarray*}{c+x*} \hat{\mathcal{P}}(H) \coloneqq \left\{ (z, \gamma) \ \middle\vert \begin{array}{l} z \in S^{2N+1} \text{ is a critical point of } \tilde{f}_N, \\ \gamma \in C^{\infty}(S^1, \hat{X}) \text{ is a $1$-periodic orbit of } H_z \end{array} \right\}. \end{IEEEeqnarray*} There is an action of $S^1$ on $\hat{\mathcal{P}}(H)$ given by $t \cdot (z,\gamma) \coloneqq (e ^{2 \pi i t'} z, \gamma(\cdot - t))$. Define the quotient \begin{IEEEeqnarray*}{c+x*} \mathcal{P}(H) \coloneqq \hat{\mathcal{P}}(H) / S^1. \end{IEEEeqnarray*} \end{definition} \begin{remark} \label{rmk:types of orbits} If $(z, \gamma) \in \hat{\mathcal{P}}(H)$, then either $\img \gamma$ is in region $\rmn{1}$ and $\gamma$ is constant or $\img \gamma$ is in region $\rmn{2}$ and $\gamma$ is nonconstant. In the slope region, i.e. region S, there are no $1$-periodic orbits of $H$ because $C$ is not in $\operatorname{Spec}(\del X, \lambda|_{\del X})$ and by \cref{cor:hamiltonian orbits are reeb orbits}. \end{remark} \begin{definition} \label{def:flow lines} Let $N \in \Z_{\geq 1}$, $H \in \mathcal{H}(X,N)$ and $J \in \mathcal{J}(X,N)$. A pair $(w,u)$, where $w \colon \R \longrightarrow S^{2N+1}$ and $u \colon \R \times S^1 \longrightarrow \hat{X}$ is a solution of the \textbf{parametrized Floer equation} if \begin{equation*} \left\{ \, \begin{IEEEeqnarraybox}[ \IEEEeqnarraystrutmode \IEEEeqnarraystrutsizeadd{7pt} {7pt}][c]{rCl} \dot{w}(s) & = & \nabla \tilde{f}_N(w(s)) \\ \pdv{u}{s}(s,t) & = & - J^t_{w(s)}(u(s,t)) \p{}{2}{ \pdv{u}{t}(s,t) - X_{H^t_{w(s)}} (u(s,t)) }. \end{IEEEeqnarraybox} \right. \end{equation*} For $[z^+,\gamma^+], [z^-,\gamma^-] \in \mathcal{P}(H)$, define $\hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])$ to be the moduli space of solutions $(w,u)$ of the parametrized Floer equation such that $(w(s),u(s,\cdot))$ converges as $s \to \pm \infty$ to an element in the equivalence class $[z^\pm,\gamma^\pm]$. We define the following two group actions. \begin{IEEEeqnarray*}{rsrsrCl} \R & \quad \text{acts on} \quad & \hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-]) & \quad \text{by} \quad & s \cdot (w,u) & \coloneqq & (w(\cdot - s), u(\cdot-s, \cdot)), \\ S^1 & \quad \text{acts on} \quad & \hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-]) & \quad \text{by} \quad & t \cdot (w,u) & \coloneqq & (e ^{2 \pi i t} w, u(\cdot, \cdot - t)). \end{IEEEeqnarray*} The actions of $\R$ and $S^1$ on $\hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])$ commute, so they define an action of $\R \times S^1$ on $\hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])$. Finally, let \begin{IEEEeqnarray*}{c+x*} \mathcal{M}(H,J,[z^+,\gamma^+],[z^-,\gamma^-]) \coloneqq \hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-]) / \R \times S^1. \end{IEEEeqnarray*} \end{definition} \begin{definition} \phantomsection\label{def:action functional} For $(z, \gamma) \in \hat{\mathcal{P}}(H)$, the \textbf{action} of $(z, \gamma)$, denoted $\mathcal{A}_H(z, \gamma)$, is given by \begin{IEEEeqnarray*}{c+x*} \mathcal{A}_{H}(z,\gamma) \coloneqq \mathcal{A}_{H_z}(\gamma) = \int_{S^1}^{} \gamma^* \hat{\lambda} - \int_{S^1}^{} H(t,z,\gamma(t)) \edv t. \end{IEEEeqnarray*} The action functional is a map $\mathcal{A}_H \colon \hat{\mathcal{P}}(H) \longrightarrow \R$. Since $H$ is $S^1$-invariant, $\mathcal{A}_H$ is $S^1$-invariant as well, and therefore there is a corresponding map $\mathcal{A}_H$ whose domain is $\mathcal{P}(H)$. \end{definition} \begin{lemma} \label{lem:action admissible} The actions of $1$-periodic orbits of $H$ are ordered according to \begin{IEEEeqnarray*}{c+x*} 0 < \mathcal{A}_H(\rmn{1}) < \varepsilon < \mathcal{A}_H(\rmn{2}). \end{IEEEeqnarray*} \end{lemma} \begin{proof} Consider \cref{fig:action ordering 1}. By \cref{lem:action in symplectization,def:hamiltonians}, we have that $\mathcal{A}_H$ is constant equal to $-H$ in regions $\rmn{1}$ and S and $\mathcal{A}_H$ is strictly increasing in region $\rmn{2}$. We remark that strictly speaking, the Hamiltonian plotted in the picture is not $H$ but instead a Hamiltonian which is $C^2$-close to $H$. However, it suffices to prove the statement for the Hamiltonian which approximates $H$. From this discussion, we conclude that $0 < \mathcal{A}_H(\rmn{1}) < \varepsilon$. We show that $\mathcal{A}_H(\rmn{2}) > \varepsilon$. \begin{IEEEeqnarray*}{rCls+x*} \mathcal{A}_H(\rmn{2}) & = & e^r T(r) - H(r) & \quad [\text{by \cref{lem:action in symplectization}}] \\ & \geq & 2 \varepsilon e^r - H(r) & \quad [\text{$2 \varepsilon = \min \operatorname{Spec}(\del X, \lambda|_{\del X})$ and $T(r) \in \operatorname{Spec}(\del X, \lambda|_{\del X})$}] \\ & > & \varepsilon (2 e^r - 1) & \quad [\text{$H(r) < \varepsilon$}] \\ & > & \varepsilon & \quad [\text{$r > 0$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{figure}[ht] \centering \begin{tikzpicture} [ help lines/.style={thin, draw = black!50}, Hamiltonian/.style={thick}, action/.style={thick} ] \tikzmath{ \a = 4; \b = 1; \c = 3; \d = 1; \h = 0.5; \sml = 0.05; \y = -0.3; \z = -0.1; \f = \c + \d; \m = - 12 * (-\y + \z) / (-1+exp(\d))^4; \n = 2 * (-1 + 3 * exp(\d)) * (-\y + \z) / (-1+exp(\d))^4; \o = ( -2 * exp(\d) * \y + 6 * exp(2 * \d) * \y - 4 * exp(3 * \d) * \y + exp(4 * \d) * \y + \z - 2 * exp(\d) * \z ) / (-1+exp(\d))^4; \u = -2 * (\y - \z) / (-1+exp(\d)); \v = (2 * exp(\d) * \y - \z - exp(\d) * \z) / (-1+exp(\d)); function h1 (\r) { return \y; }; function h2 (\r) { return {\o + \n * \r + 1/2 * exp(\d) * \m * \r^2 + 1/6 * (-1 - exp(\d)) * \m * \r^3 + 1/12 * \m * \r^4 }; }; function h2p(\r) { return {\n + 1/6 * \m * \r * (-3 * exp(\d) * (-2 + \r) + \r * (-3 + 2 * \r))}; }; function hs (\r) { return { \u * \r + \v }; }; function H1(\r) { return { \y }; }; function H2(\r) { return { h2(exp(\r)) }; }; function Hs(\r) { return { hs(exp(\r)) }; }; function a1(\r) { return { -\y }; }; function a2(\r) { return { exp(\r) * h2p(exp(\r)) - H2(\r) }; }; function as(\r) { return { -\v }; }; \e = ln((\a-\v)/\u) - \d; \g = \f + \e; } \draw[->] (0 , 0) -- (\g, 0); \draw[->] (0 ,-\b) -- (0 ,\a) node[above] {$\R$}; \draw[->] (\c,-\b) node[below] {$0$} -- (\c,\a) node[above] {$\R$}; \draw[help lines] (0 , \h) node[left] {$+\varepsilon$} -- (\g, \h); \draw[help lines] (0 ,-\h) node[left] {$-\varepsilon$} -- (\g,-\h); \draw[help lines] (\f,-\b) node[below] {$\delta$} -- (\f, \a); \draw[Hamiltonian, domain = 0:\c] plot (\x, {H1(\x - \c)}); \draw[Hamiltonian, domain = \c:\f] plot (\x, {H2(\x - \c)}); \draw[Hamiltonian, domain = \f:\g] plot (\x, {Hs(\x - \c)}) node[right] {$H$}; \draw[action, domain = 0:\c] plot (\x, {a1(\x - \c)}); \draw[action, domain = \c:\f] plot (\x, {a2(\x - \c)}); \draw[action, domain = \f:\g] plot (\x, {as(\x - \c)}) node[right] {$\mathcal{A}_H$}; \draw (\c/2 ,\a) node[below] {$\mathrm{I}$}; \draw (\c + \d/2 ,\a) node[below] {$\mathrm{II}$}; \draw (\c + 3*\d/2,\a) node[below] {$\mathrm{S}$}; \draw[help lines, decoration = {brace, mirror, raise=5pt}, decorate] (0,-\b-.75) -- node[below=6pt] {$X$} (\c - \sml,-\b-.75); \draw[help lines, decoration = {brace, mirror, raise=5pt}, decorate] (\c + \sml,-\b-.75) -- node[below=6pt] {$\R_{\geq 0} \times \del X$} (\g,-\b-.75); \end{tikzpicture} \caption{Action of a $1$-periodic orbit of $H$} \label{fig:action ordering 1} \end{figure} \begin{remark} Denote by $\critpt \mathcal{A}_{H} \subset S^{2N+1} \times C^\infty(S^1,\hat{X})$ the set of critical points of the action functional. Then, $\hat{\mathcal{P}}(H) = \critpt \mathcal{A}_{H}$, as is usual for various Floer theories. However, if $(w,u)$ is a path in $S^{2N+1} \times C^\infty(S^1,\hat{X})$, it is not true that $(w,u)$ is a gradient flow line of $\mathcal{A}_{H}$ if and only if $(w,u)$ is a solution of the parametrized Floer equations. \end{remark} \section{\texorpdfstring{$S^1$}{S1}-equivariant Floer homology} \label{sec:Floer homology} Let $(X,\lambda)$ be a nondegenerate Liouville domain. In this section, we define the $S^1$-equivariant Floer chain complex of $(X,\lambda)$ and other related invariants, namely the $S^1$-equivariant Floer homology, the positive $S^1$-equivariant Floer homology, the $S^1$-equivariant symplectic homology and the positive $S^1$-equivariant symplectic homology. The presentation we will give will be based on \cite{guttSymplecticCapacitiesPositive2018}. Other references discussing $S^1$-equivariant symplectic homology are \cite{guttMinimalNumberPeriodic2014,guttPositiveEquivariantSymplectic2017,bourgeoisGysinExactSequence2013,bourgeoisFredholmTheoryTransversality2010,bourgeoisEquivariantSymplecticHomology2016,seidelBiasedViewSymplectic2008}. The $S^1$-equivariant Floer complex of $X$ depends on the additional data of $(H,J) \in \admissible{X}$. More precisely, it can be encoded in a functor $\homology{}{S^1}{X}{F}{C}{}{} \colon \admissible{X}^{} \longrightarrow \comp$. We start by defining this functor on objects. For each $I = (H,J) \in \admissible{X}$, we need to say what is $\homology{}{S^1}{X}{F}{C}{}{}(H,J) \coloneqq \homology{}{S^1}{}{F}{C}{}{}(X,H,J) \in \comp$. \begin{definition} We define $\homology{}{S^1}{}{F}{C}{}{}(X,H,J)$ to be the free $\Q$-module generated by the elements of $\mathcal{P}(H)$. Define $\homology{}{S^1}{}{F}{C}{a}{}(X,H,J)$ to be the subspace generated by the elements $[z,\gamma]$ of $\mathcal{P}(H)$ such that $\mathcal{A}_{H}(z,\gamma) \leq a$. These modules come equipped with inclusion maps \begin{IEEEeqnarray*}{rCls+x*} \iota^{a} \colon \homology{}{S^1}{}{F}{C}{a}{}(X,H,J) & \longrightarrow & \homology{}{S^1}{}{F}{C}{}{}(X,H,J), & \quad for $a \in \R$, \\ \iota^{b,a} \colon \homology{}{S^1}{}{F}{C}{a}{}(X,H,J) & \longrightarrow & \homology{}{S^1}{}{F}{C}{b}{}(X,H,J), & \quad for $a \leq b$. \end{IEEEeqnarray*} \end{definition} For $[z^\pm,\gamma^\pm] \in \mathcal{P}(H)$, consider the moduli space $\mathcal{M}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])$. Near a point $(w,u) \in \mathcal{M}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])$, this space is a manifold (see \cref{thm:transversality in s1eft}) of dimension \begin{IEEEeqnarray}{c+x*} \plabel{eq:dimension for ms} \dim_{(w,u)} \mathcal{M}(H,J,[z^+,\gamma^+],[z^-,\gamma^-]) = \ind^{\tau^+}(z^+,\gamma^+) - \ind^{\tau^-}(z^-,\gamma^-) - 1, \end{IEEEeqnarray} where \begin{IEEEeqnarray*}{c+x*} \ind^{\tau^\pm}(z^\pm,\gamma^\pm) \coloneqq \morse(z^{\pm}) + \conleyzehnder^{\tau^{\pm}}(\gamma^{\pm}) \end{IEEEeqnarray*} and $\tau^{\pm}$ are symplectic trivializations of $(\gamma^{\pm})^* T \hat{X}$ which extend to a symplectic trivialization $\tau$ of $u^* T \hat{X}$. With $\tau^{\pm}$ chosen like this, even though each individual term on the right-hand side of Equation \eqref{eq:dimension for ms} depends on $\tau^{\pm}$, the right-hand side is independent of the choice of $\tau$. Throughout this chapter, if $\mathcal{M}$ is a moduli space of solutions of the parametrized Floer equation, we will denote by $\# \mathcal{M}$ the signed count of points $(w,u)$ in $\mathcal{M}$ such that $\dim_{(w,u)} \mathcal{M} = 0$. \begin{definition} \label{def:differential} We define $\del \colon \homology{}{S^1}{}{F}{C}{}{}(X,H,J) \longrightarrow \homology{}{S^1}{}{F}{C}{}{}(X,H,J)$ by \begin{IEEEeqnarray*}{c+x*} \del ([z^+,\gamma^+]) \coloneqq \sum_{[z^-,\gamma^-] \in \mathcal{P}(H)}^{} \# \mathcal{M}_{\vphantom{0}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-]) \cdot [z^-,\gamma^-], \end{IEEEeqnarray*} for each $[z^+,\gamma^+] \in \mathcal{P}(H)$.\end{definition} By \cref{lem:action energy for floer trajectories}, the differential respects the action filtration, i.e. the differential $\del$ maps $\homology{}{S^1}{}{F}{C}{a}{}(X,H,J)$ to itself. By \cite[Proposition 2.2]{bourgeoisEquivariantSymplecticHomology2016}, $\partial \circ \partial = 0$. \begin{definition} \phantomsection\label{def:U map} We define a map $U \colon \homology{}{S^1}{}{F}{C}{}{}(X,H,J) \longrightarrow \homology{}{S^1}{}{F}{C}{}{}(X,H,J)$ as follows. First, recall that a critical point $z$ of $\tilde{f}_N$ is of the form $z = e^{2 \pi i t} e_j$, for $t \in S^1$ and $j = 0, \ldots, N$. If $j \geq 1$, let $\shf(e^{2 \pi i t} e_j) \coloneqq e^{2 \pi i t} e_{j-1}$. Finally, define \begin{IEEEeqnarray*}{c+x*} U ([z,\gamma]) \coloneqq \begin{cases} [\shf(z),\gamma] & \text{if } \morse(z) \geq 2, \\ 0 & \text{if } \morse(z) = 0, \end{cases} \end{IEEEeqnarray*} for $[z,\gamma] \in \mathcal{P}(H)$. \end{definition} The definition of $U$ is well-posed because by \cref{def:hamiltonians} \ref{item:pullbacks}, the Hamiltonians $H_{e_j}$ and $H_{e_{j-1}}$ differ by a constant. Therefore, if $\gamma$ is a $1$-periodic orbit of $H_{e_j}$ then it is also a $1$-periodic orbit of $H_{e_{j-1}}$. By \cite[Section 6.3]{guttSymplecticCapacitiesPositive2018}, $U$ is a chain map, i.e. $U \circ \partial = \partial \circ U$. \begin{lemma} The map $U \colon \homology{}{S^1}{}{F}{C}{}{}(X,H,J) \longrightarrow \homology{}{S^1}{}{F}{C}{}{}(X,H,J)$ respects the filtration. \end{lemma} \begin{proof} Let $[z,\gamma] \in \mathcal{P}(H)$ be such that $\morse(z) \geq 2$ and $\mathcal{A}_{H}(z,\gamma) \leq a$. We wish to show that $\mathcal{A}_{H}(\shf(z),\gamma) \leq \mathcal{A}_{H}(z,\gamma) \leq a$. Assumption \ref{item:pullbacks} of \cref{def:hamiltonians} implies that $H_{\shf(z)} = H_z + E$, where $E \geq 0$. Then, \begin{IEEEeqnarray*}{rCls+x*} \mathcal{A}_{H}(\shf(z),\gamma) & = & \int_{S^1}^{} \gamma^* \hat{\lambda} - \int_{0}^{1} H(t,\shf(z),\gamma(t)) \edv t & \quad [\text{by definition of $\mathcal{A}_{H}$}] \\ & = & \int_{S^1}^{} \gamma^* \hat{\lambda} - \int_{0}^{1} H(t,z,\gamma(t)) \edv t - E & \quad [\text{since $H_{\shf(z)} = H_z + E$}] \\ & = & \mathcal{A}_{H}(z,\gamma) - E & \quad [\text{by definition of $\mathcal{A}_{H}$}] \\ & \leq & \mathcal{A}_{H}(z,\gamma) & \quad [\text{since $E \geq 0$}] \\ & \leq & a & \quad [\text{by assumption on $[z,\gamma]$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} We will now define the continuation maps. For $(H^+,J^+) \leq (H^-,J^-) \in \admissible{X}$, we want to define a morphism $\phi^{-,+} \colon \homology{}{S^1}{}{F}{C}{}{}(X,H^+,J^+) \longrightarrow \homology{}{S^1}{}{F}{C}{}{}(X,H^-,J^-)$. Consider the map \begin{IEEEeqnarray*}{rrCl} \inc^{N^-,N^+}_k \colon & \hat{\mathcal{P}}((\inc_k ^{N^-,N^+})^* H^-) & \longrightarrow & \hat{\mathcal{P}}(H^-) \\ & (z,\gamma) & \longmapsto & (\inc^{N^-,N^+}_k(z),\gamma). \end{IEEEeqnarray*} This map fits into the commutative diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd}[row sep=scriptsize, column sep={{{{6em,between origins}}}}] & \hat{\mathcal{P}}((\inc_k^{N^-,N^+})^* H^-) \arrow[dl, "\inc^{N^-,N^+}_k"] \arrow[rr] \arrow[dd] & & \critpt (\tilde{f}_{N^+}) \arrow[dl, "\inc^{N^-,N^+}_k"] \arrow[dd] \\ \hat{\mathcal{P}}(H^-) \arrow[rr, crossing over, near end] \arrow[dd] & & \critpt (\tilde{f}_{N^-}) & \\ & \mathcal{P}((\inc_k^{N^-,N^+})^* H^-) \arrow[dl, dashed, "\exists ! i^{N^-,N^+}_k"] \arrow[rr] & & \critpt (f_{N^+}) \arrow[dl, "i^{N^-,N^+}_k"] \\ \mathcal{P}(H^-) \arrow[rr] \ar[uu, leftarrow, crossing over] & & \critpt (f_{N^-}) \ar[uu, leftarrow, crossing over] & \end{tikzcd} \end{IEEEeqnarray*} \begin{definition} An \textbf{admissible} homotopy of parametrized Hamiltonians from $H^-$ to $H^+$ is a map $H \colon \R \times S^1 \times S^{2N^+ +1} \times \hat{X} \longrightarrow \R$ which satisfies the conditions in \cref{item:homotopy h 1,item:homotopy h 2,item:homotopy h 3}, where $H_s(t,z,x) = H(s,t,z,x)$. We denote the set of such $H$ by $\mathcal{H}(H^+,H^-)$. \begin{enumerate} \item \label{item:homotopy h 3} For every $s \in \R$, we have that $H_s$ satisfies all the assumptions in \cref{def:hamiltonians}, with the exceptions that $C_s$ may be in $\operatorname{Spec}(\del X,\lambda|_{\del X})$, and it is not necessarily true that $z \in \critpt \tilde{f}_N$ implies that $H_{s,z}$ is nondegenerate. \item \label{item:homotopy h 1} There exists $s_0 > 0$ such that if $\pm s > s_0$ then $H_s = (\inc^{N^\pm,N^+}_0)^* H^\pm$. \item \label{item:homotopy h 2} For every $(s,t,z,x) \in \R \times S^1 \times S^{2N^+ + 1} \times \hat{X}$ we have that $\del_s H(s,t,x,z) \leq 0$. \end{enumerate} \end{definition} \begin{definition} An \textbf{admissible} homotopy of parametrized almost complex structures from $J^-$ to $J^+$ is a map $J \colon \R \times S^1 \times S^{2N^+ +1} \times \hat{X} \longrightarrow \End(T \hat{X})$ which satisfies the conditions in \cref{item:homotopy j 1,item:homotopy j 3}, where $J_s(t,z,x) = J(s,t,z,x)$. We denote the set of such $J$ by $\mathcal{J}(J^+,J^-)$. \begin{enumerate} \item \label{item:homotopy j 3} For every $s \in \R$, we have that $J_s$ satisfies all the assumptions in \cref{def:acs}. \item \label{item:homotopy j 1} There exists $s_0 > 0$ such that if $\pm s > s_0$ then $J_s = (\inc^{N^\pm,N^+}_0)^* J^\pm$. \end{enumerate} \end{definition} \begin{definition} Let $[z^\pm,\gamma^\pm] \in \mathcal{P}((\inc^{N^\pm,N^+}_0)^* H^\pm)$ and $(H,J)$ be a homotopy from $(H^-,J^-)$ to $(H^+,J^+)$. A pair $(w,u)$, where $w \colon \R \longrightarrow S^{2N^+ +1}$ and $u \colon \R \times S^1 \longrightarrow \hat{X}$ is a solution of the \textbf{parametrized Floer equation} (with respect to $(H, J)$) if \begin{equation*} \left\{ \, \begin{IEEEeqnarraybox}[ \IEEEeqnarraystrutmode \IEEEeqnarraystrutsizeadd{7pt} {7pt}][c]{rCl} \dot{w}(s) & = & \nabla \tilde{f}_N(w(s)) \\ \pdv{u}{s}(s,t) & = & - J^t_{s,w(s)}(u(s,t)) \p{}{2}{ \pdv{u}{t}(s,t) - X_{H^t_{s,w(s)}} (u(s,t)) }. \end{IEEEeqnarraybox} \right. \end{equation*} Define $\hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])$ to be the moduli space of solutions $(w,u)$ of the pa\-ra\-me\-trized Floer equation such that $(w(s),u(s,\cdot))$ converges as $s \to \pm \infty$ to an element in the equivalence class $[z^\pm,\gamma^\pm]$. Define an action of $S^1$ on $\hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])$ by \begin{IEEEeqnarray*}{c+x*} t \cdot (w,u) = (e ^{2 \pi i t} w, u(\cdot, \cdot - t)). \end{IEEEeqnarray*} Finally, let $\mathcal{M}(H,J,[z^+,\gamma^+],[z^-,\gamma^-]) \coloneqq \hat{\mathcal{M}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])/S^1$. \end{definition} \begin{definition} \label{def:continuation map} The \textbf{continuation map} is the map \begin{IEEEeqnarray*}{c+x*} \phi^{-,+} \colon \homology{}{S^1}{}{F}{C}{}{}(X,H^+,J^+) \longrightarrow \homology{}{S^1}{}{F}{C}{}{}(X,H^-,J^-) \end{IEEEeqnarray*} given as follows. Choose a regular homotopy $(H, J)$ from $(H^-,J^-)$ to $(H^+,J^+)$. Then, for every $[z^+, \gamma^+] \in \mathcal{P}(H^+)$, \begin{IEEEeqnarray*}{c} \phi^{-,+}([z^+,\gamma^+]) \coloneqq \sum_{[z^-,\gamma^-] \in \mathcal{P}((\inc_0 ^{N^-,N^+})^* H^-)} \# \mathcal{M}_{\vphantom{0}}(H,J,[z^+,\gamma^+],[z^-,\gamma^-]) \cdot [\inc^{N^-,N^+}_0 (z^-),\gamma^-]. \end{IEEEeqnarray*} \end{definition} \begin{lemma} The map $\phi^{-,+}$ respects the action filtrations. \end{lemma} \begin{proof} Assume that $[z^\pm,\gamma^\pm] \in \mathcal{P}((\inc_0 ^{N^\pm,N^+})^* H^\pm)$ is such that $\mathcal{A}_{H^+}(z^+,\gamma^+) \leq a$ and $\mathcal{M}(H,J,[z^+,\gamma^+],[z^-,\gamma^-])$ is nonempty. We wish to show that $\mathcal{A}_{H^-}(\inc^{N^-,N^+}_0(z^-),\gamma^-) \leq a$. The proof is the following computation. \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\mathcal{A}_{H^-}(\inc^{N^-,N^+}_0(z^-),\gamma^-)}\\ \quad & = & \int_{S^1}^{} \gamma^* \hat{\lambda} - \int_{0}^{1} H^-(t, \inc^{N^-,N^+}_0(z^-),\gamma^-(t)) \edv t & \quad [\text{definition of action functional}] \\ & = & \int_{S^1}^{} \gamma^* \hat{\lambda} - \int_{0}^{1} ((\inc_0 ^{N^-,N^+})^* H^-)(t, z^-,\gamma^-(t)) \edv t & \quad [\text{definition of $\inc^{N^-,N^+}_0$}] \\ & = & \mathcal{A}_{(\inc_0 ^{N^-,N^+})^* H^-}(z^-,\gamma^-) & \quad [\text{definition of action functional}] \\ & \leq & \mathcal{A}_{H^+}(z^+,\gamma^+) & \quad [\text{by \cref{lem:action energy for floer trajectories}}] \\ & \leq & a & \quad [\text{by assumption}]. & \qedhere \end{IEEEeqnarray*} \end{proof} By \cite[Section 2.4]{bourgeoisEquivariantSymplecticHomology2016}, the $U$ maps and the continuation maps commute. Moreover, by the usual arguments in Floer theory, we have (see also \cite[Section 5.3]{guttSymplecticCapacitiesPositive2018}): \begin{enumerate} \item The continuation map $\phi^{-,+}$ is a chain map, i.e. $\phi^{-,+} \circ \del^+ = \del^- \circ \phi^{-,+}$. \item The continuation map $\phi^{-,+}$ is independent (up to chain homotopy, i.e. as a morphism in $\comp$) on the choice of regular homotopy $(H, J)$. \item The continuation maps are functorial, i.e. if $(H^0,J^0) \leq (H^1,J^1) \leq (H^2,J^2) \in \admissible{X}$ then $\phi^{2,1} \circ \phi^{1,0} = \phi^{2,0}$. \end{enumerate} \begin{remark} \label{rmk:grading for s1esh} By the determinant property of \cref{thm:properties of cz}, the parity of the Conley--Zehnder index of a Hamiltonian $1$-periodic orbit is independent of the choice of trivialization. Therefore, $\homology{}{S^1}{}{F}{C}{}{}(X,H,J)$ has a $\Z_{2}$-grading given by \begin{IEEEeqnarray}{c} \deg([z,\gamma]) \coloneqq \mu([z,\gamma]) \coloneqq \morse(z) + \conleyzehnder(\gamma). \plabel{eq:grading s1esh} \end{IEEEeqnarray} If $\pi_1(X) = 0$ and $c_1(TX)|_{\pi_2(X)} = 0$, then by \cref{lem:cz of hamiltonian is independent of triv over filling disk} we have well-defined Conley--Zehnder indices in $\Z$. Therefore, Equation \eqref{eq:grading s1esh} defines a $\Z$-grading on $\homology{}{S^1}{}{F}{C}{}{}(X,H,J)$. With respect to this grading, \begin{IEEEeqnarray*}{rCls+x*} \deg(\partial) & = & -1, \\ \deg(U) & = & -2, \\ \deg(\phi^{-,+}) & = & 0. \end{IEEEeqnarray*} \end{remark} \begin{definition} If $(X,\lambda)$ is a nondegenerate Liouville domain, the \textbf{$S^1$-equivariant Floer chain complex} of $X$ is the functor \begin{IEEEeqnarray*}{rrCl} \homology{}{S^1}{X}{F}{C}{}{} \colon & \admissible{X} & \longrightarrow & \comp \\ & (H^+,J^+) & \longmapsto & (\homology{}{S^1}{}{F}{C}{}{}(X,H^+,J^+), \del^+, U^+) \\ & \downarrow & \longmapsto & \downarrow \phi^{-,+} \\ & (H^-,J^-) & \longmapsto & (\homology{}{S^1}{}{F}{C}{}{}(X,H^-,J^-), \del^-, U^-), \end{IEEEeqnarray*} The \textbf{$S^1$-equivariant Floer homology} of $X$ is the functor $\homology{}{S^1}{X}{F}{H}{}{} = H \circ \homology{}{S^1}{X}{F}{C}{}{}$. The \textbf{positive $S^1$-equivariant Floer homology} of $X$ is the functor $\homology{}{S^1}{X}{F}{H}{+}{}$ given by \begin{IEEEeqnarray*}{rCls+x*} \homology{}{S^1}{X}{F}{H}{+}{}(H,J) & \coloneqq & \homology{}{S^1}{}{F}{H}{(\varepsilon, +\infty)}{}(X,H,J) \\ & = & \homology{}{S^1}{}{F}{H}{}{}(X,H,J) / \homology{}{S^1}{}{F}{H}{\varepsilon}{}(X,H,J). \end{IEEEeqnarray*} \end{definition} \begin{definition} For $(X,\lambda)$ is a nondegenerate Liouville domain, the \textbf{$S^1$-equivariant symplectic homology} of $X$ is the object in $\modl$ given by $\homology{}{S^1}{}{S}{H}{}{}(X,\lambda) \coloneqq \colim \homology{}{S^1}{X}{F}{H}{}{}$. The \textbf{positive $S^1$-equivariant symplectic homology} of $X$ is given by \begin{IEEEeqnarray*}{rCls+x*} \homology{}{S^1}{}{S}{H}{+}{}(X,\lambda) & \coloneqq & \colim \homology{}{S^1}{X}{F}{H}{+}{} \\ & = & \homology{}{S^1}{}{S}{H}{(\varepsilon, +\infty)}{}(X, \lambda) \\ & = & \homology{}{S^1}{}{S}{H}{}{}(X, \lambda) / \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X, \lambda). \end{IEEEeqnarray*} \end{definition} \section{Viterbo transfer map of a Liouville embedding} \label{sec:viterbo transfer map of liouville embedding} Our goal is to prove that $\homology{}{S^1}{}{S}{H}{}{}$ is a contravariant functor from a suitable category of Liouville domains onto $\modl$. More specifically, suppose that $(V,\lambda_V)$ and $(W,\lambda_W)$ are nondegenerate Liouville domains and $\varphi \colon (V,\lambda_V) \longrightarrow (W,\lambda_W)$ is a $0$-codimensional strict generalized Liouville embedding. We will define a \textbf{Viterbo transfer map} \begin{IEEEeqnarray*}{rrCl} \varphi_! \colon & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) & \longrightarrow & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V), \\ \varphi_! \colon & \homology{}{S^1}{}{S}{H}{+}{}(W,\lambda_W) & \longrightarrow & \homology{}{S^1}{}{S}{H}{+}{}(V,\lambda_V), \end{IEEEeqnarray*} which is a morphism in $\modl$. We will start by definition the Viterbo transfer map in the case where $\varphi$ is a Liouville embedding instead of just a generalized Liouville embedding. Consider the completions $\hat{V}$ and $\hat{W}$ of $V$ and $W$ respectively, as well as the induced map $\hat{\varphi} \colon \hat{V} \longrightarrow \hat{W}$. Choose $R$ so small that $\hat{\varphi}(V \union ([0,R] \times \del V)) \subset W$. We define \begin{IEEEeqnarray*}{rCls+x*} \varepsilon_V & \coloneqq & \frac{1}{2} \min \operatorname{Spec}(\del V, \lambda_V), \\ \varepsilon_W & \coloneqq & \frac{1}{2} \min \operatorname{Spec}(\del W, \lambda_W), \\ \varepsilon & \coloneqq & \min \{ \varepsilon_V, \varepsilon_W \}. \end{IEEEeqnarray*} \begin{definition} \label{def:stair hamiltonians} A \textbf{stair} parametrized Hamiltonian is a map $\overline{H} \colon S^1 \times S^{2N+1} \times \hat{W} \longrightarrow \R$ such that $\overline{H}$ satisfies the conditions in \cref{item:invariant,item:flow lines,item:pullbacks,item:ndg} from \cref{def:hamiltonians} as well as the conditions in the Items below. We denote the set of such $\overline{H}$ by $\mathcal{H}(W,V,N)$. \begin{enumerate}[label=(\Roman*)] \item \label{item:stair 1} On $S^1 \times S^{2N+1} \times V$, we have that $\hat{\varphi}^* \overline{H}$ has values in $(0, \varepsilon)$, is $S^1$-independent and is $C^2$-close to a constant. \item \label{item:stair 2} On $S^1 \times S^{2N+1} \times [0, \delta_V] \times \del V$, we have that $-\varepsilon < \hat{\varphi}^* \overline{H} < \varepsilon$ and $\hat{\varphi}^* \overline{H}$ is $C^2$-close to $(t,z,r,x) \longmapsto h_{\rmn{2}}(e^r)$, where $h_{\rmn{2}} \colon [1,e^{\delta_V}] \longrightarrow \R$ is increasing and strictly convex. \myitem[($\mathrm{S}_{V}$)] \plabel{item:stair v} On $S^1 \times S^{2N+1} \times [\delta_V, R - \delta_V] \times \del V$, we have that $\hat{\varphi}^* \overline{H}(t,z,r,x) = C_V e^r + D_V$, for $D_V \in \R$ and $C_V \in \R_{>0} \setminus \operatorname{Spec}(\del V, \lambda_V|_{\del V}) \union \operatorname{Spec}(\del W, \lambda_W|_{\del W})$. \item \label{item:stair 3} On $S^1 \times S^{2N+1} \times [R - \delta_V, R] \times \del V$, we have that $\hat{\varphi}^* \overline{H}$ is $C^2$-close to the function $(t,z,r,x) \longmapsto h_{\rmn{3}}(e^r)$, where $h_{\rmn{3}} \colon [e^{R - \delta_V},e^{R}] \longrightarrow \R$ is increasing and strictly concave. \item \label{item:stair 4} On $S^1 \times S^{2N+1} \times W \setminus \hat{\varphi}(V \union [0, R] \times \del V)$, the function $\overline{H}$ is $C^2$-close to a constant. \item \label{item:stair 5} On $S^1 \times S^{2N+1} \times [0, \delta_W] \times \del W$, we have that $\overline{H}$ is $C^2$-close to $(t,z,r,x) \longmapsto h_{\rmn{5}}(e^r)$, where $h \colon [1,e^{\delta_W}] \longrightarrow \R$ is increasing and strictly convex. \myitem[($\mathrm{S}_{W}$)] \plabel{item:stair w} On $S^1 \times S^{2N+1} \times [\delta_W, +\infty) \times \del W$, we have that $\overline{H}(t,z,r,x) = C_W e^r + D_W$, for $D_W \in \R$ and $C_W \in \R_{>0} \setminus \operatorname{Spec}(\del V, \lambda_V|_{\del V}) \union \operatorname{Spec}(\del W, \lambda_W|_{\del W})$ such that $C_W < e^{-\delta_W}(C_V e^{R - \delta_V} + D_V)$. \end{enumerate} \end{definition} \begin{remark} If $(z, \gamma) \in \hat{\mathcal{P}}(H)$, then either $\gamma$ is nonconstant and $\img \gamma$ is in region $\rmn{2}$, $\rmn{3}$ or $\rmn{5}$, or $\gamma$ is constant and $\img \gamma$ is in region $\rmn{1}$ or $\rmn{4}$. There are no $1$-periodic orbits in the slope regions $\mathrm{S}_{V}$ and $\mathrm{S}_{W}$. \end{remark} \begin{lemma} \label{lem:action stair} The actions of $1$-periodic orbits of $\overline{H}$ are ordered according to \begin{IEEEeqnarray*}{c+x*} \mathcal{A}_{\overline{H}}(\rmn{4}) < \mathcal{A}_{\overline{H}}(\rmn{5}) < 0 < \mathcal{A}_{\overline{H}}(\rmn{1}) < \varepsilon < \mathcal{A}_{\overline{H}}(\rmn{2}). \end{IEEEeqnarray*} \end{lemma} \begin{proof} Consider \cref{fig:action stair}. By \cref{lem:action in symplectization,def:stair hamiltonians}, we have that $\mathcal{A}_{\overline{H}}$ is constant in regions $\rmn{1}$, $\mathrm{S}_{V}$, $\rmn{4}$ and $\mathrm{S}_{W}$, $\mathcal{A}_{\overline{H}}$ is strictly increasing in regions $\rmn{2}$ and $\rmn{5}$, and $\mathcal{A}_{\overline{H}}$ is strictly decreasing in region $\rmn{3}$. From this reasoning, we conclude that $\mathcal{A}_{\overline{H}}(\rmn{4}) < \mathcal{A}_{\overline{H}}(\rmn{5})$ and $0 < \mathcal{A}_{\overline{H}}(\rmn{1}) < \varepsilon$. By the same argument as in the proof of \cref{lem:action admissible}, we conclude that $\varepsilon < \mathcal{A}_{\overline{H}}(\rmn{2})$. We show that $\mathcal{A}_{\overline{H}}(\rmn{5}) < 0$. \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\mathcal{A}_{\overline{H}}(\rmn{5})}\\ \quad & = & e^{r_W} T(r_W) - H(r_W) & \quad [\text{by \cref{lem:action in symplectization}}] \\ \quad & < & e^{r_W} C_W - H(r_W) & \quad [\text{$T(\delta_W) = C_W$ and $T' = \exp \cdot h_{\rmn{5}}'' \circ \exp > 0$}] \\ \quad & < & e^{r_W} C_W - (C_V e^{R-\delta_V} + D_V) & \quad [\text{$H(r_W) > H(R - \delta_V) = C_V e^{R-\delta_V} + D_V$}] \\ \quad & < & e^{\delta_W} C_W - (C_V e^{R-\delta_V} + D_V) & \quad [\text{since $r_W < \delta_W$}] \\ \quad & < & 0 & \quad [\text{since $C_W < e^{-\delta_W}(C_V e^{R - \delta_V} + D_V)$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{figure}[ht] \centering \begin{tikzpicture} [ help lines/.style={thin, draw = black!50}, Hamiltonian/.style={thick}, action/.style={thick}, axisv/.style={}, axisw/.style={} ] \tikzmath{ \a = 4; \b = 3; \c = 3; \d = 0.5; \e = 3; \f = 3; \g = 1; \h = 0.4; \sml = 0.05; \dOne = -0.3; \dFour = 2.5; \vFive = 2.6; \mTwo = -(12 * (-\dOne + \dFour) * exp(\d))/((-1 + exp(\d))^3 * (1 + exp(\d)) * (-exp(\d) + exp(\e))); \n = (2 * (-\dOne + \dFour) * exp(\d) * (-1 + 3 * exp(\d)))/((-1 + exp(\d))^3 * (1 + exp(\d)) * (-exp(\d) + exp(\e))); \o = (\dFour * exp(1)^\d - 2 * \dFour * exp(2 * \d) + 2 * \dOne * exp(4 * \d) - \dOne * exp(5 * \d) - \dOne * exp(\e) + 2 * \dOne * exp(\d + \e) - 2 * \dOne * exp(3 * \d + \e) + \dOne * exp(4 * \d + \e))/((-1 + exp(\d))^3 * (1 + exp(\d)) * (-exp(\d) + exp(\e))); \uv = (2 * (-\dOne + \dFour) * exp(\d))/((1 + exp(\d)) * (-exp(\d) + exp(\e))) ; \vv = (\dFour * exp(\d) - \dOne * exp(\e))/(exp(\d) - exp(\e)) ; \mThree = -(12 * (-\dOne + \dFour) * exp(4 * \d + \e))/((-1 + exp(\d))^3 * (1 + exp(\d)) * (exp(\d) - exp(\e))); \q = - (2 * (-\dOne + \dFour) * exp(3 * \d + \e) * (-3 + exp(\d)))/((-1 + exp(\d))^3 * (1 + exp(\d)) * (exp(\d) - exp(\e))); \s = (-\dFour * exp(\d) + 2 * \dFour * exp(2 * \d) - 2 * \dFour * exp(4 * \d) + \dFour * exp(5 * \d) + \dFour * exp(\e) - 2 * \dFour * exp(\d + \e) + 2 * \dOne * exp(3 * \d + \e) - \dOne * exp(4 * \d + \e))/((-1 + exp(\d))^3 * (1 + exp(\d)) * (exp(\d) - exp(\e))); \uw = -2 * (\dFour - \vFive) / (-1+exp(\g)); \vw = (2 * exp(\g) * \dFour - \vFive - exp(\g) * \vFive) / (-1+exp(\g)); \jj = - 12 * (-\dFour + \vFive) / (-1+exp(\g))^4; \kk = 2 * (-1 + 3 * exp(\g)) * (-\dFour + \vFive) / (-1+exp(\g))^4; \la = ( -2 * exp(\g) * \dFour + 6 * exp(2 * \g) * \dFour - 4 * exp(3 * \g) * \dFour + exp(4 * \g) * \dFour + \vFive - 2 * exp(\g) * \vFive ) / (-1+exp(\g))^4; function h2 (\r) { return {\o + \n * \r + 1/2 * exp(\d) * \mTwo * \r^2 + 1/6 * (-1 - exp(\d)) * \mTwo * \r^3 + (\mTwo * \r^4)/12}; }; function dh2 (\r) { return {\n + 1/6 * \mTwo * \r * (-3 * exp(\d) * (-2 + \r) + \r * (-3 + 2 * \r))}; }; function h3 (\r) { return {\s + \q * \r - (1/6) * exp(-\d) * \mThree * (-3 + \r) * \r^2 + 1/12 * \mThree * (-2 + \r) * \r^3}; }; function dh3 (\r) { return {\q + (1/6) * exp(-\d) * \mThree * \r * (6 - 3 * (1 + exp(\d)) * \r + 2 * exp(\d) * \r^2) }; }; function h5 (\r) { return {\la + \kk * \r + 1/2 * exp(\g) * \jj * \r^2 + 1/6 * (-1 - exp(\g)) * \jj * \r^3 + 1/12 * \jj * \r^4 }; }; function dh5 (\r) { return {\kk + 1/6 * \jj * \r * (-3 * exp(\g) * (-2 + \r) + \r * (-3 + 2 * \r))}; }; function hsv (\r) { return {\uv * \r + \vv}; }; function hsw (\r) { return {\uw * \r + \vw}; }; function H2 (\r) { return {h2 (exp(\r))}; }; function H3 (\r) { return {h3 (exp(\r))}; }; function H5 (\r) { return {h5 (exp(\r))}; }; function Hsv (\r) { return {hsv(exp(\r))}; }; function Hsw (\r) { return {hsw(exp(\r))}; }; function a2 (\r) { return { exp(\r) * dh2(exp(\r)) - H2(\r) }; }; function a3 (\r) { return { exp(\r) * dh3(exp(\r)) - H3(\r) }; }; function a5 (\r) { return { exp(\r) * dh5(exp(\r)) - H5(\r) }; }; \i = ln((\a-\vw)/\uw) - \g; \test = -\uw + exp(-\g) * (\uv * exp(\e-\d) + \vv); } \draw[Hamiltonian, domain = 0 :\c ] plot (\x, {\dOne}); \draw[Hamiltonian, domain = \c :\c+\d ] plot (\x, {H2(\x - \c)}); \draw[Hamiltonian, domain = \c+\d :\c+\e-\d ] plot (\x, {Hsv(\x - \c)}); \draw[Hamiltonian, domain = \c+\e-\d :\c+\e ] plot (\x, {H3(\x - \c - \e)}); \draw[Hamiltonian, domain = \c+\e :\c+\e+\f ] plot (\x, {\dFour}); \draw[Hamiltonian, domain = \c+\e+\f :\c+\e+\f+\g ] plot (\x, {H5(\x - \c - \e - \f)}); \draw[Hamiltonian, domain = \c+\e+\f+\g:\c+\e+\f+\g+\i] plot (\x, {Hsw(\x - \c - \e - \f)}) node[right] {$\overline{H}$}; \draw[action, domain = 0 :\c ] plot (\x, {-\dOne}); \draw[action, domain = \c :\c+\d ] plot (\x, {a2(\x - \c)}); \draw[action, domain = \c+\d :\c+\e-\d ] plot (\x, {-\vv}); \draw[action, domain = \c+\e-\d :\c+\e ] plot (\x, {a3(\x - \c - \e)}); \draw[action, domain = \c+\e :\c+\e+\f ] plot (\x, {-\dFour}); \draw[action, domain = \c+\e+\f :\c+\e+\f+\g ] plot (\x, {a5(\x - \c - \e - \f)}); \draw[action, domain = \c+\e+\f+\g:\c+\e+\f+\g+\i] plot (\x, {-\vw}) node[right] {$\mathcal{A}_{\overline{H}}$}; \draw[help lines] (0,\h) node[left] {$+\varepsilon$} -- (\c+\e+\f+\g+\i,\h); \draw[help lines] (0,-\h) node[left] {$-\varepsilon$} -- (\c+\e+\f+\g+\i,-\h); \draw[help lines] (\c+\d,-\b) node[below, axisv] {$\delta_V$} -- (\c+\d,\a); \draw[help lines] (\c+\e-\d,-\b) node[below, axisv] {$R-\delta_V\hspace{1.5em}$} -- (\c+\e-\d,\a); \draw[help lines] (\c+\e,-\b) node[below, axisv] {$\hspace{0.5em}R$} -- (\c+\e,\a); \draw[help lines] (\c+\e+\f+\g,-\b) node[below, axisw] {$\delta_W$} -- (\c+\e+\f+\g,\a); \draw[->] (0,-\b) -- (0,\a) node[above] {$\R$}; \draw (0,0) -- (\c,0); \draw[->, axisw] (\c+\e+\f,0) -- (\c+\e+\f+\g+\i,0); \draw[->, axisw] (\c+\e+\f,-\b) node[below] {$0$} -- (\c+\e+\f,\a) node[above] {$\R$}; \draw[->, axisv] (\c,0) -- (\c+\e+\f,0); \draw[->, axisv] (\c,-\b) node[below] {$0$} -- (\c,\a) node[above] {$\R$}; \draw (\c/2,\a) node[below] {$\mathrm{I}$}; \draw (\c+\d/2,\a) node[below] {$\mathrm{II}$}; \draw (\c+\e/2,\a) node[below] {$\mathrm{S}_{{V}}$}; \draw (\c+\e-\d/2,\a) node[below] {$\mathrm{III}$}; \draw (\c+\e+\f/2,\a) node[below] {$\mathrm{IV}$}; \draw (\c+\e+\f+\g/2,\a) node[below] {$\mathrm{V}$}; \draw (\c+\e+\f+\g+1,\a) node[below] {$\mathrm{S}_{{W}}$}; \draw[help lines, decoration = {brace, mirror, raise=5pt}, decorate] (0,-\b-.75) -- node[below=6pt] {\scriptsize $V$} (\c - \sml,-\b-.75); \draw[help lines, decoration = {brace, mirror, raise=5pt}, decorate] (\c+\sml,-\b-.75) -- node[below=6pt] {\scriptsize $[0,R] \times \del V$} (\c + \e - \sml,-\b-.75); \draw[help lines, decoration = {brace, mirror, raise=5pt}, decorate] (\c+\e+\sml,-\b-.75) -- node[below=6pt] {\scriptsize ${W \setminus \hat{\varphi} (V \union [0,R] \times \del V)}$} (\c + \e + \f - \sml,-\b-.75); \draw[help lines, decoration = {brace, mirror, raise=5pt}, decorate] (\c+\e+\f+\sml,-\b-.75) -- node[below=6pt] {\scriptsize $\R_{\geq 0} \times \del W$} (\c+\e+\f+\g+\i,-\b-.75); \end{tikzpicture} \caption{Action of a $1$-periodic orbit of $\overline{H}$} \label{fig:action stair} \end{figure} \begin{definition} \phantomsection\label{def:stair acs} A \textbf{stair} parametrized almost complex structure is a map $\overline{J} \colon S^1 \times S^{2N+1} \times \hat{W} \longrightarrow \End(T \hat{W})$ satisfying the conditions in \cref{def:stair acs 1,def:stair acs 2,def:stair acs 3,def:stair acs 4} below. We denote the set of such $\overline{J}$ by $\mathcal{J}(W,V,N)$. \begin{enumerate} \item \label{def:stair acs 1} $\overline{J}$ is $S^1$-invariant. \item \label{def:stair acs 2} $\overline{J}$ is $\hat{\omega}$-compatible. \item \label{def:stair acs 3} $\overline{J}$ is cylindrical on $S^1 \times S^{2N+1} \times [0, \delta] \times \del V$ and on $S^1 \times S^{2N+1} \times \R_{\geq 0} \times \del W$. \item \label{def:stair acs 4} $(\tilde{\iota}_0^{N,N-1})^* \overline{J} = (\tilde{\iota}_1^{N,N-1})^* \overline{J}$. \end{enumerate} \end{definition} \begin{definition} Define sets \begin{IEEEeqnarray*}{rCls+x*} \stair{W,V} & \coloneqq & \left\{ (\overline{H}, \overline{J}) \ \middle\vert \begin{array}{l} \overline{H} \in \mathcal{H}(W,V,N) \text{ and } \overline{J} \in \mathcal{J}(W,V,N) \text{ for some }N, \\ (\overline{H}, \overline{J}) \text{ is regular} \end{array} \right\}, \\ \admstair{W,V} & \coloneqq & \left\{ (H,J,\overline{H}, \overline{J}) \ \middle\vert \begin{array}{l} H \in \mathcal{H}(W,N), J \in \mathcal{J}(W,N), \\ \overline{H} \in \mathcal{H}(W,V,N) \text{ and } \overline{J} \in \mathcal{J}(W,V,N) \text{ for some }N, \\ H \leq \overline{H}, \text{ and } (H,J) \text{ and } (\overline{H}, \overline{J}) \text{ are regular} \end{array} \right\}. \end{IEEEeqnarray*} Define preorders on $\stair{W,V}$ and $\admstair{W,V}$ by \begin{IEEEeqnarray*}{rCls+x*} (\overline{H}^+,\overline{J}^+) \leq (\overline{H}^-,\overline{J}^-) & \mathrel{\mathop:}\Longleftrightarrow & \left\{ \begin{array}{l} N^+ \leq N^-, \\ \overline{H}^+ \leq (\inc_0 ^{N^-,N^+})^* \overline{H}^-, \end{array} \right. \\ (H^+,J^+,\overline{H}^+,\overline{J}^+) \leq (H^-,J^-,\overline{H}^-,\overline{J}^-) & \mathrel{\mathop:}\Longleftrightarrow & \left\{ \begin{array}{l} N^+ \leq N^-, \\ H^+ \leq (\inc_0 ^{N^-,N^+})^* H^-, \\ \overline{H}^+ \leq (\inc_0 ^{N^-,N^+})^* \overline{H}^-. \end{array} \right. \end{IEEEeqnarray*} \end{definition} \begin{definition} Define a function $\pi^{\mathcal{H}}_{W,V,N} \colon \mathcal{H}(W,V,N) \longrightarrow \mathcal{H}(V,N)$ by $\pi_{W,V,N}^{\mathcal{H}}(\overline{H}) = \overline{H}_V$, where \begin{IEEEeqnarray*}{c+x*} \overline{H}_V(t,z,x) \coloneqq \begin{cases} \overline{H}(t,z,\hat{\varphi}(x)) & \text{if } x \in V \union ([0,R] \times \del V), \\ C_V e^r + D_V & \text{if } x = (r,y) \in [R, +\infty) \times \del V. \end{cases} \end{IEEEeqnarray*} Define a function $\pi^{\mathcal{J}}_{W,V,N} \colon \mathcal{J}(W,V,N) \longrightarrow \mathcal{J}(V,N)$ by $\pi_{W,V,N}^{\mathcal{J}}(\overline{J}) = \overline{J}_V$, where \begin{IEEEeqnarray*}{c+x*} \overline{J}_V(t,z,x) \coloneqq \begin{cases} \dv \hat{\varphi}^{-1}(\hat{\varphi}(x)) \circ \overline{J}(t,z,\hat{\varphi}(x)) \circ \dv \hat{\varphi}(x) & \text{if } x \in V \union ([0,R] \times \del V), \\ \dv \hat{\varphi}^{-1}(\hat{\varphi}(0,y)) \circ \overline{J}(t,z,\hat{\varphi}(0,y)) \circ \dv \hat{\varphi}(0,y) & \text{if } x = (r,y) \in [0, +\infty) \times \del V. \end{cases} \end{IEEEeqnarray*} \end{definition} \begin{definition} Define the functors \begin{IEEEeqnarray*}{rrClCl} \pi_W \colon & \admstair{W,V} & \longrightarrow & \admissible{W}, & \text{ given by } & \pi_W(H,J,\overline{H},\overline{J}) \coloneqq (H,J), \\ \pi_{W,V} \colon & \admstair{W,V} & \longrightarrow & \stair{W,V}, & \text{ given by } & \pi_W(H,J,\overline{H},\overline{J}) \coloneqq (\overline{H}, \overline{J}), \\ \pi_{W,V}^{\mathcal{H} \times \mathcal{J}} \colon & \stair{W,V} & \longrightarrow & \admissible{V}, & \text{ given by } & \pi_{W,V}^{\mathcal{H} \times \mathcal{J}}(\overline{H},\overline{J}) \coloneqq (\pi^{\mathcal{H}}_{W,V,N}(\overline{H}),\pi^{\mathcal{J}}_{W,V,N}(\overline{J})) = (\overline{H}_V, \overline{J}_V), \end{IEEEeqnarray*} for $(\overline{H}, \overline{J}) \in \mathcal{H}(W,V,N) \times \mathcal{J}(W,V,N)$. Let $\pi_V^{} \coloneqq \pi_{W,V}^{\mathcal{H} \times \mathcal{J}} \circ \pi_{W,V}^{} \colon \admstair{W,V}^{} \longrightarrow \admissible{V}^{}$. \end{definition} \begin{definition} \phantomsection\label{def:homotopy stair to admissible hamiltonian} Let $H^+ \in \mathcal{H}(W,N^+)$ be an admissible parametrized Hamiltonian and $H^- \in \mathcal{H}(W,V,N^-)$ be a stair parametrized Hamiltonian. Assume that $N^+ \leq N^-$ and $(\tilde{i}_0^{N^-,N^+}) H^+ \leq H^-$. An \textbf{admissible} homotopy of parametrized Hamiltonians from $H^-$ to $H^+$ is a map $H \colon \R \times S^1 \times S^{2 N^+ + 1} \times \hat{W} \longrightarrow \R$ which satisfies the conditions in \cref{item:homotopy stair to admissible hamiltonian 1,item:homotopy stair to admissible hamiltonian 2,item:homotopy stair to admissible hamiltonian 3} for some $s_0 > 0$, where $H_s(t,z,x) = H(s,t,z,x)$. We denote the set of such $H$ by $\mathcal{H}(H^+,H^-)$. \begin{enumerate} \item \label{item:homotopy stair to admissible hamiltonian 1} For every $s \in (-s_0, s_0)$, we have that $H_s$ satisfies all the conditions in \cref{def:stair hamiltonians} with the exceptions that $C_{W,s}$ and $C_{V,s}$ are possibly in $\operatorname{Spec}(\del W, \lambda_W|_{\del W}) \union \operatorname{Spec}(\del V, \lambda_V|_{\del V})$ and $H_{s,z}$ is not necessarily nondegenerate for $z \in \critpt \tilde{f}_{N^+}$. \item \label{item:homotopy stair to admissible hamiltonian 2} For every $s$, if $\pm s \geq s_0$ then $H_s = (\tilde{i}_0^{N^\pm, N^+})^* H^\pm$. \item \label{item:homotopy stair to admissible hamiltonian 3} For every $(s,t,z,x) \in \R \times S^1 \times S^{2 N^+ + 1} \times \hat{W}$ we have $\del_s H(s,t,x,z) \leq 0$. \end{enumerate} \end{definition} \begin{remark} In \cref{def:homotopy stair to admissible hamiltonian}, the parameters of $H_s$ depend on $s$. In particular, the ``constant'' value that $H_s$ takes in regions $\rmn{1}$ and $\rmn{4}$ is dependent on $s$. However, the parameter $R$ does not depend on $s$. \end{remark} \begin{definition} \label{def:homotopy stair to admissible acs} Let $J^+ \in \mathcal{J}(W,N^+)$ be an admissible parametrized almost complex structure and $J^- \in \mathcal{J}(W,V,N^-)$ be a stair parametrized almost complex structure. An \textbf{admissible} homotopy of parametrized almost complex structures from $J^-$ to $J^+$ is a map $J \colon \R \times S^1 \times S^{2 N^+ + 1} \times \hat{W} \longrightarrow \End(T \hat{W})$ which satisfies the conditions in \cref{item:homotopy stair to admissible acs 1,item:homotopy stair to admissible acs 2} for some $s_0 > 0$, where $J_s(t,z,x) = J(s,t,z,x)$. We denote the set of such $J$ by $\mathcal{J}(J^+,J^-)$. \begin{enumerate} \item \label{item:homotopy stair to admissible acs 1} For every $s \in (-s_0, s_0)$, we have that $J_s$ satisfies all the conditions in \cref{def:stair acs}. \item \label{item:homotopy stair to admissible acs 2} For every $s$, if $\pm s \geq s_0$ then $J_s = (\tilde{i}_0^{N^\pm, N^+})^* J^\pm$. \end{enumerate} \end{definition} \begin{remark} \label{rmk:floer complex wrt stair} Let $(H,J,\overline{H},\overline{J}) \in \admstair{W,V}$ and consider $\pi_W(K) = (H,J) \in \admissible{W}$ and $\pi_{W,V}(K) = (\overline{H},\overline{J}) \in \stair{W,V}$. In \cref{sec:Floer homology} we defined $\homology{}{S^1}{}{F}{C}{}{}(W,H,J)$, the Floer chain complex of $W$ with respect to the auxiliary data $(H,J)$, for every $(H,J) \in \admissible{W}$. Despite the fact that $(\overline{H}, \overline{J})$ is not an element of $\admissible{W}$, the Floer Chain complex $\homology{}{S^1}{}{F}{C}{}{}(W,\overline{H}, \overline{J})$ of $W$ with respect to the auxiliary data $(\overline{H}, \overline{J})$ is well-defined. More precisely, it is possible to replicate the results of \cref{sec:Floer homology} but with the category $\stair{W,V}$ instead of $\admissible{W}$. Then, we can define a functor \begin{IEEEeqnarray*}{rrCl} \homology{\mathrm{I-V}}{S^1}{W}{F}{C}{}{} \colon & \stair{W,V} & \longrightarrow & \comp \\ & (\overline{H}, \overline{J}) & \longmapsto & \homology{\mathrm{I-V}}{S^1}{W}{F}{C}{}{}(\overline{H},\overline{J}) \coloneqq \homology{}{S^1}{}{F}{C}{}{}(W,\overline{H}, \overline{J}). \end{IEEEeqnarray*} For every $(H^+, J^+, H^-, J^-) \in \admstair{W,V}$, we have that $H^+ \leq {H}^-$, and therefore we can define a continuation map $\phi^{-,+} \colon \homology{}{S^1}{}{F}{C}{}{}(W,H^+,J^+) \longrightarrow \homology{}{S^1}{}{F}{C}{}{}(W,H^-,J^-)$ which is given by counting solutions of the Floer equation with respect to $H \in \mathcal{H}(H^+,H^-)$ and $J \in \mathcal{J}(J^+,J^-)$. These continuation maps assemble into a natural transformation \begin{IEEEeqnarray*}{c+x*} \phi \colon \homology{}{S^1}{W}{F}{C}{}{} \circ \pi_W^{} \longrightarrow \homology{\mathrm{I-V}}{S^1}{W}{F}{C}{}{} \circ \pi_{W,V}^{}. \end{IEEEeqnarray*} \end{remark} \begin{definition} \label{def:subcomplex} We define a functor $\homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{} \colon \stair{W,V}^{} \longrightarrow \comp$ as follows. If $(\overline{H},\overline{J}) \in \stair{W,V}$, then the module $\homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{}(\overline{H}, \overline{J}) \coloneqq \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J})$ is the submodule of $\homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J})$ which is generated by (equivalence classes of) $1$-periodic orbits $[z, \gamma]$ of $\overline{H}$ such that $\img \gamma$ is in region $\rmn{3}$, $\rmn{4}$ or $\rmn{5}$. The maps \begin{IEEEeqnarray*}{rrCl} \del \colon & \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}) & \longrightarrow & \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}), \\ U \colon & \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}) & \longrightarrow & \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}), \\ \phi^{-,+} \colon & \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H}^+,\overline{J}^+) & \longrightarrow & \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H}^-,\overline{J}^-). \end{IEEEeqnarray*} are the restrictions (see \cref{lem:maps restrict to subcomplex}) of the maps \begin{IEEEeqnarray*}{rrCl} \del \colon & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}) & \longrightarrow & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}), \\ U \colon & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}) & \longrightarrow & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}), \\ \phi^{-,+} \colon & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H}^+,\overline{J}^+) & \longrightarrow & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H}^-,\overline{J}^-), \end{IEEEeqnarray*} This completes the definition of $\homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{}$. Since $\homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J})$ is a subcomplex of $\homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J})$, we have an inclusion natural transformation $\iota \colon \homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{} \longrightarrow \homology{\mathrm{I-V}}{S^1}{W}{F}{C}{}{}$. \end{definition} \begin{lemma} \label{lem:maps restrict to subcomplex} In \cref{def:subcomplex}, the maps $\del, U$ and $\phi^{-,+}$ restrict to maps on $\homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{}$. \end{lemma} \begin{proof} To show that $U$ restricts to a map on $\homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{}$, we simply note that by definition $U$ affects only $z$ and not $\gamma$. We show that $\del$ restricts to a map on $\homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{}$. For this, let $[z^{\pm}, \gamma^{\pm}] \in \mathcal{P}(\overline{H})$ be such that $\img \gamma^+$ is in region $\rmn{3}$, $\rmn{4}$ or $\rmn{5}$ and assume that there exists a Floer trajectory from $[z^+, \gamma^+]$ to $[z^-, \gamma^-]$ with respect to $(\overline{H}, \overline{J})$. We need to show that $\img \gamma^-$ is in region $\rmn{3}$, $\rmn{4}$ or $\rmn{5}$. Assume by contradiction that $\img \gamma^-$ is in region $\rmn{1}$ or $\rmn{2}$. In the case where $\img \gamma^+$ is in region $\rmn{4}$ or $\rmn{5}$, the computation \begin{IEEEeqnarray*}{rCls+x*} 0 & < & \mathcal{A}_{\overline{H}}(z^-,\gamma^-) & \quad [\text{by \cref{lem:action stair}}] \\ & \leq & \mathcal{A}_{\overline{H}}(z^+,\gamma^+) & \quad [\text{by \cref{lem:action energy for floer trajectories}}] \\ & < & 0 & \quad [\text{by \cref{lem:action stair}}] \end{IEEEeqnarray*} gives a contradiction. It remains to derive a contradiction in the case where $\img \gamma^+$ is in region $\rmn{3}$. By \cref{cor:hamiltonian orbits are reeb orbits}, $\gamma^+$ is (approximately) of the form $\gamma^+(t) = (r^+, \rho^+(t))$ for some Reeb orbit $\rho^+$ in $(\del V, \lambda_V|_{\del V})$. The ``no escape'' lemma (\cref{lem:no escape}) implies that the Floer trajectory is inside $\hat{\varphi}(V \union [0, r^+] \times \del V)$, while the ``asymptotic behaviour'' lemma (\cref{lem:asymptotic behaviour}) implies that the Floer trajectory must leave $\hat{\varphi}(V \union [0, r^+] \times \del V)$. This completes the proof that $\del$ restricts to a map on $\homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{}$. To show that $\phi^{-,+}$ restricts to a map on $\homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{}$, we would use a proof analogous to that of $\del$. The key difference is that now the Floer trajectory would be defined with respect to homotopies of Hamiltonians and almost complex structures. This does not affect the proof because \cref{lem:action energy for floer trajectories,lem:asymptotic behaviour,lem:no escape} also apply to homotopies. \end{proof} \begin{definition} \label{def:quotient complex} Define a functor $\homology{\mathrm{I,II}}{S^1}{W}{F}{C}{}{} \colon \stair{W,V}^{} \longrightarrow \comp$ as follows. For $(\overline{H},\overline{J}) \in \stair{W,V}$, the module $\homology{\mathrm{I,II}}{S^1}{W}{F}{C}{}{}(\overline{H}, \overline{J}) \coloneqq \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H}, \overline{J})$ is given by the quotient \begin{IEEEeqnarray*}{rCls+x*} \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}) & \coloneqq & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H}, \overline{J}) / \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}). \end{IEEEeqnarray*} For $(\overline{H}^+,\overline{J}^+) \leq (\overline{H}^{-},\overline{J}^-) \in \stair{W,V}$, the continuation map $\phi^{-,+} \colon \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H}^+,\overline{J}^+) \longrightarrow \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H}^-,\overline{J}^-)$ is the unique map such that the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H}^+,\overline{J}^+) \ar[r, hookrightarrow, "\iota^{+}"] \ar[d, swap, "\phi^{-,+}"] & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H}^+,\overline{J}^+) \ar[d, "\phi^{-,+}"] \ar[r, two heads, "\pi^{+}"] & \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H}^+,\overline{J}^+) \ar[d, dashed, "\exists ! \phi^{-,+}"]\\ \homology{\mathrm{III,IV,V}}{S^1}{}{F}{C}{}{}(W,\overline{H}^-,\overline{J}^-) \ar[r, hookrightarrow, swap, "\iota^{-}"] & \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H}^-,\overline{J}^-) \ar[r, two heads, swap, "\pi^{-}"] & \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H}^-,\overline{J}^-) \end{tikzcd} \end{IEEEeqnarray*} commutes. There is a projection natural transformation $\pi \colon \homology{\mathrm{I-V}}{S^1}{W}{F}{C}{}{} \longrightarrow \homology{\mathrm{I,II}}{S^1}{W}{F}{C}{}{}$. \end{definition} \begin{definition} \label{def:v with respect to stair nt} We define a natural transformation $\eta \colon \homology{}{S^1}{V}{F}{C}{}{} \circ \pi^{\mathcal{H} \times \mathcal{J}}_{W,V} \longrightarrow \homology{\mathrm{I,II}}{S^1}{W}{F}{C}{}{}$ as follows. For $(\overline{H},\overline{J}) \in \stair{W,V}$, the map $\eta^{\overline{H},\overline{J}} \colon \homology{}{S^1}{}{F}{C}{}{}(V,\overline{H}_V, \overline{J}_V) \longrightarrow \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H}, \overline{J})$ is given by $\eta^{\overline{H},\overline{J}}([z,\gamma]) \coloneqq [z, \hat{\varphi} \circ \gamma]$. \end{definition} \begin{lemma} \cref{def:v with respect to stair nt} is well posed, i.e.: \begin{enumerate} \item \label{lem:v with respect to stair nt 1} $\eta^{\overline{H},\overline{J}}$ is well-defined and it is a morphism of filtered modules. \item \label{lem:v with respect to stair nt 2} $\eta^{\overline{H},\overline{J}}$ commutes with the $U$ map. \item \label{lem:v with respect to stair nt 3} $\eta^{\overline{H},\overline{J}}$ is a chain map. \item \label{lem:v with respect to stair nt 4} The maps $\eta^{\overline{H},\overline{J}}$ assemble into a natural transformation. \end{enumerate} \end{lemma} \begin{proof} \ref{lem:v with respect to stair nt 1}: Since $\hat{\varphi}$ is a Liouville embedding, if $[z,\gamma] \in \mathcal{P}(\overline{H}_V)$ then $[z,\hat{\varphi} \circ \gamma] \in \mathcal{P}(\overline{H})$ and $\mathcal{A}_{\overline{H}}(z,\hat{\varphi} \circ \gamma) = \mathcal{A}_{\overline{H}_V}(z,\gamma)$. \ref{lem:v with respect to stair nt 2}: We need to show that $U^{}_W \circ \eta^{\overline{H},\overline{J}}([z,\gamma]) = \eta^{\overline{H},\overline{J}} \circ U ^{}_V ([z,\gamma])$, for $[z,\gamma] \in \mathcal{P}(\overline{H}_V)$. If $\morse(z) = 0$, then both sides of the equation are $0$. If $\morse(z) > 0$, then \begin{IEEEeqnarray*}{rCls+x*} U^{}_W \circ \eta^{\overline{H},\overline{J}}([z,\gamma]) & = & U^{}_W ([z,\hat{\varphi} \circ \gamma]) & \quad [\text{by definition of $\eta$}] \\ & = & [\shf(z),\hat{\varphi} \circ \gamma] & \quad [\text{by definition of $U$}] \\ & = & \eta^{\overline{H},\overline{J}} [\shf(z),\gamma] & \quad [\text{by definition of $\eta$}] \\ & = & \eta^{\overline{H},\overline{J}} \circ U ^{}_V ([z,\gamma]) & \quad [\text{by definition of $U$}]. \end{IEEEeqnarray*} \ref{lem:v with respect to stair nt 3}: We need to show that $\eta^{\overline{H},\overline{J}} \circ \del ^{}_V([z^+,\gamma^+]) = \del ^{}_W \circ \eta^{\overline{H},\overline{J}}([z^+,\gamma^+])$, for every $[z^+,\gamma^+] \in \mathcal{P}(\overline{H}_V)$. By the ``no escape'' lemma (\cref{lem:no escape}), if $[z^-,\gamma^-] \in \mathcal{P}(\overline{H}_V)$ then the map \begin{IEEEeqnarray*}{rrCl} & \mathcal{M}_{\vphantom{0}}(\overline{H}_V,\overline{J}_V,[z^+,\gamma^+],[z^-,\gamma^-]) & \longrightarrow & \mathcal{M}_{\vphantom{0}}(\overline{H},\overline{J},[z^+,\hat{\varphi} \circ \gamma^+],[z^-,\hat{\varphi} \circ \gamma^-]) \\ & [w,u] & \longmapsto & [w,\hat{\varphi} \circ u] \end{IEEEeqnarray*} is an orientation preserving diffeomorphism. Then, we compute \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\eta^{\overline{H},\overline{J}} \circ \del ^{}_V([z^+,\gamma^+])}\\ \quad & = & \sum_{[z^-,\gamma^-] \in \mathcal{P}(\overline{H}_V) } \# \mathcal{M}_{\vphantom{0}}(\overline{H}_V, \overline{J}_V, [z^+,\gamma^+] , [z^-,\gamma^-] ) \cdot \eta^{\overline{H},\overline{J}} ([z^-,\gamma^-]) \\ \quad & = & \sum_{[z^-,\gamma^-] \in \mathcal{P}(\overline{H}_V) } \# \mathcal{M}_{\vphantom{0}}(\overline{H}_V, \overline{J}_V, [z^+,\gamma^+] , [z^-,\gamma^-] ) \cdot [z^-,\hat{\varphi} \circ \gamma^-] \\ \quad & = & \sum_{[z^-,\gamma^-] \in \mathcal{P}(\overline{H}_V) } \# \mathcal{M}_{\vphantom{0}}(\overline{H} , \overline{J} , [z^+,\hat{\varphi} \circ \gamma^+], [z^-,\hat{\varphi} \circ \gamma^-]) \cdot [z^-,\hat{\varphi} \circ \gamma^-] \\ \quad & = & \sum_{[z^-,\gamma^-_W] \in \mathcal{P}^{\mathrm{I,II}}(\overline{H})} \# \mathcal{M}_{\vphantom{0}}(\overline{H} , \overline{J} , [z^-,\gamma^-_W] , [z^+,\gamma^+_W]) \cdot [z^-,\gamma^-_W] \\ \quad & = & \sum_{[z^-,\gamma^-_W] \in \mathcal{P}(\overline{H}) } \# \mathcal{M}_{\vphantom{0}}(\overline{H} , \overline{J} , [z^-,\gamma^-_W] , [z^+,\gamma^+_W]) \cdot [z^-,\gamma^-_W] \\ \quad & = & \del ^{}_W ([z^+,\hat{\varphi} \circ \gamma^+]) \\ \quad & = & \del ^{}_W \circ \eta^{\overline{H},\overline{J}}([z^+,\gamma^+]). \end{IEEEeqnarray*} In this computation, in the third equality we used the orientation preserving diffeomorphism defined above, in the fourth equality we performed the variable change $[z^-,\gamma^-_W] \coloneqq [z^-,\hat{\varphi} \circ \gamma^-] \in \mathcal{P}^{\mathrm{I,II}}(\overline{H})$ and in the fifth equality we used the fact that if $[z^-,\gamma^-_W] \in \mathcal{P}^{\mathrm{III,IV,V}}(\overline{H})$ then $[z^-,\gamma^-_W] = 0$ as an element of $\homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J})$. \ref{lem:v with respect to stair nt 4}: This proof is analogous to that of \ref{lem:v with respect to stair nt 3}. \end{proof} \begin{proposition} The map $\eta \colon \homology{}{S^1}{V}{F}{C}{}{} \circ \pi^{\mathcal{H} \times \mathcal{J}}_{W,V} \longrightarrow \homology{\mathrm{I,II}}{S^1}{W}{F}{C}{}{}$ is a natural isomorphism. \end{proposition} \begin{proof} It suffices to show that $\eta^{\overline{H},\overline{J}} \colon \homology{}{S^1}{}{F}{C}{}{}(V,\overline{H}_V,\overline{J}_V) \longrightarrow \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J})$ admits an inverse as a map of $\Q$-modules. Define $\nu^{\overline{H},\overline{J}} \colon \homology{\mathrm{I-V}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}) \longrightarrow \homology{}{S^1}{}{F}{C}{}{}(V,\overline{H}_V,\overline{J}_V)$ by \begin{IEEEeqnarray*}{c+x*} \nu^{\overline{H},\overline{J}}([z,\gamma]) = \begin{cases} [z,\hat{\varphi}^{-1} \circ \gamma] & \text{if } [z,\gamma] \in \mathcal{P}^{\mathrm{I,II}}(\overline{H}), \\ 0 & \text{if } [z,\gamma] \in \mathcal{P}^{\mathrm{III,IV,V}}(\overline{H}). \end{cases} \end{IEEEeqnarray*} Then, by the universal property of the quotient of $\Q$-modules, $\nu^{\overline{H},\overline{J}}$ descends to a map $\nu^{\overline{H},\overline{J}} \colon \homology{\mathrm{I,II}}{S^1}{}{F}{C}{}{}(W,\overline{H},\overline{J}) \longrightarrow \homology{}{S^1}{}{F}{C}{}{}(V,\overline{H}_V,\overline{J}_V)$, which is the inverse of $\eta^{\overline{H},\overline{J}}$. \end{proof} \begin{definition} \label{def:viterbo transfer map} The \textbf{Viterbo transfer map}, $\varphi_! \colon \homology{}{S^1}{}{S}{H}{}{}(W, \lambda_W) \longrightarrow \homology{}{S^1}{}{S}{H}{}{}(V, \lambda_V)$, is given as follows. Consider the following diagram in the category of functors from $\admstair{W,V}$ to $\comp$: \begin{IEEEeqnarray}{c+x*} \plabel{eq:viterbo transfer map diagram} \begin{tikzcd} \homology{\mathrm{III,IV,V}}{S^1}{W}{F}{C}{}{} \circ \pi_{W,V}^{} \ar[r, hook, "\iota \circ \pi_{W,V}"] & \homology{\mathrm{I-V}}{S^1}{W}{F}{C}{}{} \circ \pi_{W,V}^{} \ar[r, hook, "\pi \circ \pi_{W,V}"] & \homology{\mathrm{I,II}}{S^1}{W}{F}{C}{}{} \circ \pi_{W,V}^{} \\ & \homology{}{S^1}{W}{F}{C}{}{} \circ \pi_{W}^{} \ar[u, "\phi"] \ar[r, dashed, swap, "\exists ! \varphi"] & \homology{}{S^1}{V}{F}{C}{}{} \circ \pi_{V}^{} \ar[u, swap, two heads, hook, "\eta \circ \pi_{W,V}"] \end{tikzcd} \end{IEEEeqnarray} Passing to homology, we get a natural transformation $H \varphi \colon \homology{}{S^1}{W}{F}{H}{}{} \circ \pi_{W}^{} \longrightarrow \homology{}{S^1}{V}{F}{H}{}{} \circ \pi_{V}^{}$. Then, $\varphi_!$ is the unique map such that the following diagram commutes: \begin{IEEEeqnarray}{c+x*} \plabel{eq:viterbo transfer map} \begin{tikzcd} \homology{}{S^1}{W}{F}{H}{}{} \circ \pi_W^{} \ar[d, "H \varphi"] \ar[r] & \colim \homology{}{S^1}{W}{F}{H}{}{} \circ \pi_W^{} \ar[r, equal] \ar[d, dashed, "\exists ! \varphi_! = \colim H \varphi"] & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) \ar[d, dashed, "\exists ! \varphi_!"] \\ \homology{}{S^1}{V}{F}{H}{}{} \circ \pi_V^{} \ar[r] & \colim \homology{}{S^1}{V}{F}{H}{}{} \circ \pi_V^{} \ar[r, equal] & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V) \end{tikzcd} \end{IEEEeqnarray} We define the \textbf{Viterbo transfer map} on positive $S^1$-equivariant symplectic homology by declaring it to be the unique map such that the following diagram commutes: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{\varepsilon}{}(W,\lambda_W) \ar[r] \ar[d, swap, "\varphi^\varepsilon_!"] & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) \ar[r] \ar[d, "\varphi_!"] & \homology{}{S^1}{}{S}{H}{+}{}(W,\lambda_W) \ar[d, dashed, "\exists ! \varphi^+_!"] \\ \homology{}{S^1}{}{S}{H}{\varepsilon}{}(W,\lambda_W) \ar[r] & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) \ar[r] & \homology{}{S^1}{}{S}{H}{+}{}(W,\lambda_W) \end{tikzcd} \end{IEEEeqnarray*} \end{definition} \begin{remark} \label{rmk:viterbo transfer map def} We have the following observations about \cref{def:viterbo transfer map}. \begin{enumerate} \item In diagram \eqref{eq:viterbo transfer map}, we view $\colim \homology{}{S^1}{W}{F}{H}{}{} \circ \pi_W$ and $\colim \homology{}{S^1}{V}{F}{H}{}{} \circ \pi_V$ as constant functors, and we view $\varphi_! \colon \colim \homology{}{S^1}{W}{F}{H}{}{} \circ \pi_W \longrightarrow \colim \homology{}{S^1}{V}{F}{H}{}{} \circ \pi_V$ as a constant natural transformation, which is just a map. Existence and uniqueness of $\varphi$ comes from the universal property of colimits. \item Since $\pi_W ( \admstair{W,V} )$ is a cofinal subset of $\admissible{W}$, we have $\homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) = \colim \homology{}{S^1}{W}{F}{H}{}{} = \colim \homology{}{S^1}{W}{F}{H}{}{} \circ \pi_W$, and analogously for $V$. \item We are also using the fact that \begin{IEEEeqnarray*}{rCls+x*} \homology{}{S^1}{}{S}{H}{+}{}(W,\lambda_W) & = & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) / \homology{}{S^1}{}{S}{H}{\varepsilon_W}{} (W,\lambda_W) \\ & = & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) / \homology{}{S^1}{}{S}{H}{\varepsilon}{}(W,\lambda_W). \end{IEEEeqnarray*} This is true because $\homology{}{S^1}{}{S}{H}{}{}$ is obtained as a direct limit of Floer homologies for increasing Hamiltonians, and for $(H,J) \in \admissible{W}$ with $H$ big enough we have that $H$ restricted to the interior of $W$ takes values in $(-\varepsilon,0) \subset (-\varepsilon_W,0)$ (and analogously for $V$). \end{enumerate} \end{remark} Let $\liouvle$ be the category whose objects are nondegenerate Liouville domains and whose morphisms are $0$-codimensional Liouville embeddings which are either strict or diffeomorphisms. \begin{theorem}[{\cite[Theorem 3.1.16]{guttMinimalNumberPeriodic2014}}] \label{thm:sh is functor not generalized} The following are contravariant functors: \begin{IEEEeqnarray*}{rrClCrrCl} \homology{}{S^1}{}{S}{H}{}{} \colon & \liouvle & \longrightarrow & \modl & \qquad & \homology{}{S^1}{}{S}{H}{+}{} \colon & \liouvle & \longrightarrow & \modl \\ & (V,\lambda_V) & \longmapsto & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V) & \qquad & & (V,\lambda_V) & \longmapsto & \homology{}{S^1}{}{S}{H}{+}{}(V,\lambda_V) \\ & \varphi \downarrow & \longmapsto & \uparrow \varphi_! & \qquad & & \varphi \downarrow & \longmapsto & \uparrow \varphi_!^+ \\ & (W,\lambda_W) & \longmapsto & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W), & \qquad & & (W,\lambda_W) & \longmapsto & \homology{}{S^1}{}{S}{H}{+}{}(W,\lambda_W). \end{IEEEeqnarray*} \end{theorem} \section{Viterbo transfer map of a generalized Liouville embedding} \label{sec:viterbo transfer map of exact symplectic embedding} We now define the Viterbo transfer map in the case where $\varphi \colon (V,\lambda_V) \longrightarrow (W,\lambda_W)$ is a generalized Liouville embedding, i.e. $\varphi^* \edv \lambda_W = \edv \lambda_V$ and $(\varphi^* \lambda_W - \lambda_V)|_{\partial V}$ is exact. \begin{lemma}[{\cite[Lemma 7.5]{guttSymplecticCapacitiesPositive2018}}] \label{lem:exists deformed form} If $\phi \colon (V,\lambda_V) \longrightarrow (W, \lambda_W)$ is a $0$-codimensional strict generalized Liouville embedding, then there exists a $1$-form $\lambda'_W$ on $W$ such that $\edv \lambda'_W = \edv \lambda_W^{}$, $\lambda'_W = \lambda_W^{}$ near $\partial W$ and $\phi^* \lambda'_W = \lambda_V^{}$. \end{lemma} \begin{lemma} \phantomsection\label{lem:sh indep of potential} Let $(X,\lambda_X)$ and $(Y,\lambda_Y)$ be nondegenerate Liouville domains and assume that $\phi \colon (X,\lambda_X) \longrightarrow (Y, \lambda_Y)$ is a $0$-codimensional strict Liouville embedding. Suppose that $\lambda'_X \in \Omega^1(X)$ and $\lambda'_Y \in \Omega^1(Y)$ are $1$-forms such that \begin{IEEEeqnarray*}{rClCrCl} \edv \lambda'_X & = & \edv \lambda_X^{}, & \quad & \lambda'_X & = & \lambda_X^{} \text{ near } \partial X, \\ \edv \lambda'_Y & = & \edv \lambda_Y^{}, & \quad & \lambda'_Y & = & \lambda_Y^{} \text{ near } \partial Y, \\ \phi^* \lambda'_Y & = & \lambda'_X. \end{IEEEeqnarray*} Then, \begin{IEEEeqnarray*}{rClCl} \homology{}{S^1}{}{S}{H}{}{}(X,\lambda_X) & = & \homology{}{S^1}{}{S}{H}{}{}(X,\lambda'_X), \\ \homology{}{S^1}{}{S}{H}{+}{}(X,\lambda_X) & = & \homology{}{S^1}{}{S}{H}{+}{}(X,\lambda'_X), \end{IEEEeqnarray*} and the diagrams \begin{IEEEeqnarray}{c+x*} \plabel{eq:viterbo transfer map indep potential} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{}{}(Y,\lambda_Y) \ar[r, equal] \ar[d, swap, "\phi_!"] & \homology{}{S^1}{}{S}{H}{}{}(Y,\lambda'_Y) \ar[d, "\phi'_!"] \\ \homology{}{S^1}{}{S}{H}{}{}(X,\lambda_X) \ar[r, equal] & \homology{}{S^1}{}{S}{H}{}{}(X,\lambda'_X) \end{tikzcd} \quad \begin{tikzcd} \homology{}{S^1}{}{S}{H}{+}{}(Y,\lambda_Y) \ar[r, equal] \ar[d, swap, "\phi_!^+"] & \homology{}{S^1}{}{S}{H}{+}{}(Y,\lambda'_Y) \ar[d, "{\phi'}_!^+"] \\ \homology{}{S^1}{}{S}{H}{+}{}(X,\lambda_X) \ar[r, equal] & \homology{}{S^1}{}{S}{H}{+}{}(X,\lambda'_X) \end{tikzcd} \end{IEEEeqnarray} commute. \end{lemma} \begin{proof} We note that the following concepts only depend on $\edv \lambda_X$ and on $\lambda_X$ near $\del X$: the set of admissible Hamiltonians and admissible almost complex structures, the Hamiltonian vector field, action, the module which underlies the Floer complex (by all the previous statements), the Floer equation and the notion of Floer trajectories (also by the previous statements), the $U$ map, the differential and the continuation maps. All the statements follow immediately from the definitions given in \cref{sec:Floer homology}, except the fact that the action actually only depends on $\edv \lambda_X$ and on $\lambda_X|_{\partial X}$. To prove this, it is enough to show that \begin{IEEEeqnarray}{c+x*} \phantomsection\label{eq:action indep form} \int_{S^1}^{} \gamma^* (\hat{\lambda}_X^{} - \hat{\lambda}'_X) = 0. \end{IEEEeqnarray} Since $\hat{\lambda}_X^{} - \hat{\lambda}'_X$ is closed, it defines a cohomology class $[\hat{\lambda}_X^{} - \hat{\lambda}'_X] \in H^1_{\mathrm{dR}}(\hat{X})$. The orbit $\gamma$ also defines a homology class $[\gamma] \coloneqq \gamma_* [S^1] \in H_1(\hat{X};\Z)$. Equation \eqref{eq:action indep form} can be restated as \begin{IEEEeqnarray}{c+x*} \phantomsection\label{eq:action indep form topology} [\hat{\lambda}_X^{} - \hat{\lambda}'_X]([\gamma]) = 0. \end{IEEEeqnarray} If $\gamma$ is contractible, then Equation \eqref{eq:action indep form topology} holds. If $\gamma$ is noncontractible, $\gamma$ must have an associated Reeb orbit $\rho \in C^{\infty}(S^1, \partial X)$. Denote by $\iota \colon \partial X \longrightarrow \hat{X}$ the inclusion. \begin{IEEEeqnarray*}{rCls+x*} [\hat{\lambda}_X^{} - \hat{\lambda}'_X]([\gamma]) & = & [\hat{\lambda}_X^{} - \hat{\lambda}'_X](\iota_* [\rho]) & \quad [\text{since $\gamma$ and $\iota \circ \rho$ are homotopic}] \\ & = & (\iota^*[\hat{\lambda}_X^{} - \hat{\lambda}'_X])([\rho]) & \quad [\text{by definition of pullback}] \\ & = & 0 & \quad [\text{since $\lambda'_X = \lambda_X^{}$ near $\partial X$}]. \end{IEEEeqnarray*} Since the functors and natural transformations in diagram \eqref{eq:viterbo transfer map diagram} only depend on $\edv \lambda_X, \edv \lambda_Y$ and on $\lambda_X, \lambda_Y$ near the boundaries, the diagrams \eqref{eq:viterbo transfer map indep potential} commute. \end{proof} \begin{definition}[{\cite[Definition 7.6]{guttSymplecticCapacitiesPositive2018}}] \phantomsection\label{def:viterbo transfer generalized} If $\varphi \colon (V,\lambda_V) \longrightarrow (W,\lambda_W)$ is a strict generalized Liouville embedding of codimension $0$, then the \textbf{Viterbo transfer map} of $\varphi$ is defined as follows. Choose $\lambda'_W \in \Omega^1(W)$ as in \cref{lem:exists deformed form}. Denote by $\varphi' \colon (V,\lambda_V) \longrightarrow (W,\lambda'_W)$ the Liouville embedding which as a map of sets coincides with $\varphi$. Then, define \begin{IEEEeqnarray*}{rRCRCl} \varphi_! \colon & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) & = & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda'_W) & \xrightarrow{\varphi'_!} & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V), \\ \varphi^+_! \colon & \homology{}{S^1}{}{S}{H}{+}{}(W,\lambda_W) & = & \homology{}{S^1}{}{S}{H}{+}{}(W,\lambda'_W) & \xrightarrow{\varphi'_!} & \homology{}{S^1}{}{S}{H}{+}{}(V,\lambda_V), \end{IEEEeqnarray*} where the equality was explained in \cref{lem:sh indep of potential} and the arrows are the Viterbo transfer maps of a Liouville embedding as in \cref{def:viterbo transfer map}.\end{definition} \begin{lemma} In \cref{def:viterbo transfer generalized}, $\varphi_!$ and $\varphi_!^+$ are independent of the choice of $\lambda'_W$. \end{lemma} \begin{proof} Let $\lambda'_W$ and $\lambda''_W$ be $1$-forms as in \cref{lem:exists deformed form}, and denote the corresponding Liouville embeddings by $\varphi' \colon (W,\lambda'_W) \longrightarrow (V,\lambda_V)$ and $\varphi'' \colon (W,\lambda''_W) \longrightarrow (V,\lambda_V)$ (note that as set theoretic maps, $\varphi' = \varphi'' = \varphi$). Then, by \cref{lem:sh indep of potential}, the following diagram commutes: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) \ar[r, equals] \ar[d, equals] & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda'_W) \ar[d, equals] \ar[r, "\varphi'_!"] & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V) \ar[d, equals] \\ \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) \ar[r, equals] & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda''_W) \ar[r, "\varphi''_!"] & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V) \end{tikzcd} \end{IEEEeqnarray*} In this diagram, the top arrow is the Viterbo transfer map defined with respect to $\lambda'_W$ and the bottom arrow is the Viterbo transfer map defined with respect to $\lambda''_W$. \end{proof} Let $\liouvndg$ be the ``category'' whose objects are nondegenerate Liouville domains and whose morphisms are $0$-codimensional generalized Liouville embeddings which are either strict or diffeomorphisms. Strictly speaking, since composition of generalized Liouville embeddings is not in general a generalized Liouville embedding, this is not a category. However, $\liouvndg$ does fit into the notion of \textbf{categroid} (see \cref{def:categroid}), which is an object like a category with only partially defined compositions. One can then talk about functors between categroids. \begin{theorem} The assignments \begin{IEEEeqnarray*}{rrClCrrCl} \homology{}{S^1}{}{S}{H}{}{} \colon & \liouvndg & \longrightarrow & \modl & \qquad & \homology{}{S^1}{}{S}{H}{+}{} \colon & \liouvndg & \longrightarrow & \modl \\ & (V,\lambda_V) & \longmapsto & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V) & \qquad & & (V,\lambda_V) & \longmapsto & \homology{}{S^1}{}{S}{H}{+}{}(V,\lambda_V) \\ & \varphi \downarrow & \longmapsto & \uparrow \varphi_! & \qquad & & \varphi \downarrow & \longmapsto & \uparrow \varphi_!^+ \\ & (W,\lambda_W) & \longmapsto & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W), & \qquad & & (W,\lambda_W) & \longmapsto & \homology{}{S^1}{}{S}{H}{+}{}(W,\lambda_W) \end{IEEEeqnarray*} are contravariant functors. \end{theorem} \begin{proof} We prove the result only for $\homology{}{S^1}{}{S}{H}{}{}$, since the proof for $\homology{}{S^1}{}{S}{H}{+}{}$ is analogous. It suffices to assume that $\varphi \colon (V, \lambda_V) \longrightarrow (W, \lambda_W)$ and $\psi \colon (W, \lambda_W) \longrightarrow (Z, \lambda_Z)$ are composable strict, generalized Liouville embeddings of codimension 0 and to prove that $(\psi \circ \varphi)_! = \varphi_! \circ \psi_!$. Here, ``composable'' means that the composition $\psi \circ \varphi$ is also a generalized Liouville embedding. We start by choosing \begin{IEEEeqnarray*}{rClCrClrCllCrCl} \lambda'_W & \in & \Omega^1(W) & \quad\text{such that}\quad & \edv \lambda'_W & = & \edv \lambda_W^{},\quad & \lambda'_W & = & \lambda_W^{} & \text{ near } \partial W, & \quad\text{and}\quad & \varphi^* \lambda'_W & = & \lambda_V^{}, \\ \lambda'_Z & \in & \Omega^1(Z) & \quad\text{such that}\quad & \edv \lambda'_Z & = & \edv \lambda_Z^{},\quad & \lambda'_Z & = & \lambda_Z^{} & \text{ near } \partial Z, & \quad\text{and}\quad & \psi^* \lambda'_Z & = & \lambda_W^{}, \\ \lambda''_Z & \in & \Omega^1(Z) & \quad\text{such that}\quad & \edv \lambda''_Z & = & \edv \lambda'_Z, \quad & \lambda''_Z & = & \lambda'_Z & \text{ near } \partial Z, & \quad\text{and}\quad & \psi^* \lambda''_Z & = & \lambda'_W. \end{IEEEeqnarray*} Therefore, we have Liouville embeddings \begin{IEEEeqnarray*}{rCrCl} \varphi' & \colon & (V,\lambda_V^{}) & \longrightarrow & (W, \lambda'_W), \\ \psi' & \colon & (W,\lambda_W^{}) & \longrightarrow & (Z, \lambda'_Z), \\ \psi'' & \colon & (W,\lambda'_W) & \longrightarrow & (Z, \lambda''_Z). \end{IEEEeqnarray*} We can define the Viterbo transfer maps \begin{IEEEeqnarray*}{rLCLCl} \varphi_! \colon & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) & = & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda'_W) & \xrightarrow{\varphi'_!} & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V), \\ \psi_! \colon & \homology{}{S^1}{}{S}{H}{}{}(Z,\lambda_Z) & = & \homology{}{S^1}{}{S}{H}{}{}(Z,\lambda'_Z) & \xrightarrow{\psi'_!} & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W), \\ (\varphi \circ \psi)_! \colon & \homology{}{S^1}{}{S}{H}{}{}(Z,\lambda_Z) & = & \homology{}{S^1}{}{S}{H}{}{}(Z,\lambda''_Z) & \xrightarrow{(\psi'' \circ \varphi')_!} & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V). \end{IEEEeqnarray*} Consider the following commutative diagram: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{}{}(Z,\lambda_Z) \ar[r, equals] \ar[dr, dashed, swap, "\psi_!"] \ar[drdr, dashed, bend right, swap, "(\psi \circ \varphi)_!"] & \homology{}{S^1}{}{S}{H}{}{}(Z,\lambda'_Z) \ar[d, "\psi'_!"] \ar[r, equals] & \homology{}{S^1}{}{S}{H}{}{}(Z,\lambda''_Z) \ar[d, "\psi''_!"] \ar[dd, bend left=90, "(\psi'' \circ \varphi')_!"] \\ & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda_W) \ar[r, equals] \ar[dr, swap, dashed, "\varphi_!"] & \homology{}{S^1}{}{S}{H}{}{}(W,\lambda'_W) \ar[d, "\varphi'_!"] \\ & & \homology{}{S^1}{}{S}{H}{}{}(V,\lambda_V) \end{tikzcd} \end{IEEEeqnarray*} Here, the two small triangles and the outside arrows commute by definition of the Viterbo transfer map of a generalized Liouville embedding, the square commutes by \cref{lem:sh indep of potential}, and $(\psi'' \circ \varphi')_! = \varphi'_! \circ \psi''_!$ by \cref{thm:sh is functor not generalized}. Therefore, $(\psi \circ \varphi)_! = \varphi_! \circ \psi_!$. \end{proof} \section{\texorpdfstring{$\delta$}{Delta} map} \label{sec:delta map} Let $(X,\lambda)$ be a nondegenerate Liouville domain. Our goal in this section is to define a map $\delta \colon \homology{}{S^1}{}{S}{H}{+}{}(X) \longrightarrow H_\bullet(BS^1;\Q) \otimes H_\bullet(X,\partial X; \Q)$. As we will see, $\delta = \alpha \circ \delta_0$, where $\delta_0 \colon \homology{}{S^1}{}{S}{H}{+}{}(X) \longrightarrow \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X)$ is the continuation map associated to a long exact sequence in homology (see \cref{def:delta map}) and $\alpha \colon \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X) \longrightarrow H_\bullet(BS^1;\Q) \otimes H_\bullet(X,\partial X; \Q)$ is an isomorphism which we define in several steps (see \cref{lem:iso floer and alt floer,lem:iso from floer to morse,lem:iso from floer to singular,lem:iso from symplectic to singular}). For every $(H,J) \in \admissible{X}$, define \begin{IEEEeqnarray*}{rCrCrCls+x*} H' & \coloneqq & H_{e_0} & \colon & S^1 \times \hat{X} & \longrightarrow & \R, \\ J' & \coloneqq & J_{e_0} & \colon & S^1 \times \hat{X} & \longrightarrow & \End(T \hat{X}), \end{IEEEeqnarray*} where $e_0 \in S^{2N+1} \subset \C^{N+1}$ is the first vector in the canonical basis of $\C^{N+1}$. We start by giving an alternative definition of the $S^1$-equivariant Floer chain complex. \begin{definition}[{\cite[Remark 5.15]{guttSymplecticCapacitiesPositive2018}}] We define a chain complex $\homology{}{S^1}{}{F}{C}{}{}(X,H,J)_{\mathrm{alt}}$ as follows. Let $u$ be a formal variable of degree $2$ and consider $\Q \{1,\ldots,u^N\}$, the $\Q$-module of polynomials in $u$ of degree less or equal to $2N$. As a $\Q$-module, \begin{IEEEeqnarray*}{c+x*} \homology{}{S^1}{}{F}{C}{}{}(X,H,J)_{\mathrm{alt}} \coloneqq \Q \{1,\ldots,u^N\} \otimes \homology{}{}{}{F}{C}{}{}(X,H',J'), \end{IEEEeqnarray*} where $\homology{}{}{}{F}{C}{}{}(X,H',J')$ is the Floer chain complex (not $S^1$-equivariant) of $X$ with respect to $(H',J')$, with $\Q$ coefficients. We will now define a differential $\partial_{\mathrm{alt}}$ on $\homology{}{S^1}{}{F}{C}{}{}(X,H,J)_{\mathrm{alt}}$. For every $j = 0,\ldots,N$, define a map $\varphi_j \colon \homology{}{}{}{F}{C}{}{}(X,H',J') \longrightarrow \homology{}{}{}{F}{C}{}{}(X,H',J')$ by \begin{IEEEeqnarray*}{c+x*} \varphi_j(\gamma^+) \coloneqq \sum_{\gamma^- \in \mathcal{P}(H')} \# \mathcal{M}_{\vphantom{0}}(H,J,[e_j,\gamma^+],[e_0,\gamma^-]) \cdot \gamma^-, \end{IEEEeqnarray*} for every $\gamma^+ \in \mathcal{P}(H')$. Note that $\varphi_0 \colon \homology{}{}{}{F}{C}{}{}(X,H',J') \longrightarrow \homology{}{}{}{F}{C}{}{}(X,H',J')$ is the usual differential of the Floer chain complex. Finally, we define \begin{IEEEeqnarray*}{rrCl} \del_{\mathrm{alt}} \colon & \Q \{1,\ldots,u^N\} \tensorpr \homology{}{}{}{F}{C}{}{}(X,H',J') & \longrightarrow & \Q \{1,\ldots,u^N\} \tensorpr \homology{}{}{}{F}{C}{}{}(X,H',J') \\ & u^k \tensorpr \gamma & \longmapsto & \sum_{j=0}^{k} u ^{k-j} \tensorpr \varphi_j(\gamma). \end{IEEEeqnarray*} \end{definition} \begin{lemma}[{\cite[Section 2.3]{bourgeoisEquivariantSymplecticHomology2016}}] \label{lem:iso floer and alt floer} The map \begin{IEEEeqnarray*}{rCl} \homology{}{S^1}{}{F}{C}{}{}(X,H,J) & \longrightarrow & \homology{}{S^1}{}{F}{C}{}{}(X,H,J)_{\mathrm{alt}} \\ {[e_j, \gamma]} & \longmapsto & u^j \otimes \gamma \end{IEEEeqnarray*} is an isomorphism of chain complexes. \end{lemma} Recall that in $X$, the Hamiltonian $H$ is assumed to be $C^2$-small and $S^1$-independent. Therefore, if $\gamma \colon S^1 \longrightarrow \hat{X}$ is a $1$-periodic orbit of $H'$ and $\img \gamma \subset X$, then $\gamma$ is constant with value $x \in X$, where $x$ is a critical point of $H'$. We will now assume that the Hamiltonian $H$ is chosen such that if $x^{\pm}$ are critical points of $H'$, then \begin{IEEEeqnarray}{c+x*} \plabel{eq:self indexing} H'(x^+) \leq H'(x^-) \Longrightarrow \morse(x^+,H') \geq \morse(x^-,H'). \end{IEEEeqnarray} We will denote by $(MC(X,H'), \partial^M)$ the Morse complex of $X$ with respect to $H'$, defined with the following conventions. As a vector space, $MC(X,H')$ is the vector space over $\Q$ generated by the critical points of $H'$. If $x^\pm$ are critical points of $H'$, the coefficient $\p{<}{}{\partial^{M} (x^+), x^-}$ is the count of gradient flow lines of $H'$ from $x^-$ to $x^+$. Finally, the degree of a critical point $x$ is the Morse index of $x$. \begin{lemma} \label{lem:iso from floer to morse} There is a canonical isomorphism of chain complexes \begin{IEEEeqnarray*}{c+x*} (\homology{}{S^1}{}{F}{C}{\varepsilon}{}(X,H,J), \partial_{\mathrm{alt}}) = (\Q \{1,\ldots,u^N\} \otimes MC(X,H'), \id \otimes \partial^M). \end{IEEEeqnarray*} \end{lemma} \begin{proof} By \cref{rmk:types of orbits,lem:action admissible,lem:iso floer and alt floer}, there is a canonical isomorphism of $\Q$-modules \begin{IEEEeqnarray*}{c+x*} \homology{}{S^1}{}{F}{C}{\varepsilon}{}(X,H,J) = \Q \{1,\ldots,u^N\} \otimes MC(X,H'). \end{IEEEeqnarray*} We show that this isomorphism is a chain map. We claim that if $j \geq 1$ and $x^+, x^-$ are critical points of $H'$, then $\dim_{(w,u)} \mathcal{M}(H,J,[e_j,x^+],[e_0,x^-]) \geq 1$. To see this, we compute \begin{IEEEeqnarray*}{rCls+x*} \dim_{(w,u)} \mathcal{M}(H,J,[e_j,x^+],[e_0,x^-]) & = & \ind(e_j, x^+) - \ind(e_0, x^-) - 1 \\ & = & \morse(e_j) - \morse(e_0) + \morse(x^+,H') - \morse(x^-,H') - 1 \\ & = & 2 j + \morse(x^+,H') - \morse(x^-,H') - 1 \\ & \geq & 2 j - 1 \\ & \geq & 1, \end{IEEEeqnarray*} where in the fourth line we used \cref{lem:action energy for floer trajectories} and Equation \eqref{eq:self indexing}. Therefore, if $j \geq 1$ and $x^+$ is a critical point of $H'$ then $\varphi_j(x^+) = 0$. This implies that \begin{IEEEeqnarray*}{c+x*} \partial_{\mathrm{alt}}(u^k \otimes x^+) = u^k \otimes \varphi_0(x^+), \end{IEEEeqnarray*} where $\varphi_0(x^+) = \partial^M(x^+)$ is the Morse theory differential applied to $x^+$. \end{proof} \begin{lemma} \label{lem:iso from floer to singular} There is a canonical isomorphism \begin{IEEEeqnarray*}{c+x*} \homology{}{S^1}{}{F}{H}{\varepsilon}{}(X,H,J) = \Q \{1,\ldots,u^N\} \otimes H_\bullet(X, \partial X; \Q). \end{IEEEeqnarray*} \end{lemma} \begin{proof} \begin{IEEEeqnarray*}{rCls+x*} \homology{}{S^1}{}{F}{H}{\varepsilon}{}(X,H,J) & = & H(\Q \{1,\ldots,u^N\} \otimes MC(X,H')) \\ & = & \Q \{1,\ldots,u^N\} \otimes MH_\bullet(X,H') \\ & = & \Q \{1,\ldots,u^N\} \otimes H_{\bullet}(X, \partial X; \Q), \end{IEEEeqnarray*} where in the first equality we used \cref{lem:iso from floer to morse}, in the second equality we used the definition of the differential of $\Q \{1,\ldots,u^N\} \otimes MC(X,H')$, and in the third equality we used the isomorphism between Morse homology and singular homology. \end{proof} \begin{lemma} \label{lem:iso from symplectic to singular} There is a canonical isomorphism \begin{IEEEeqnarray*}{c+x*} \alpha \colon \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X) \longrightarrow H_\bullet(BS^1;\Q) \otimes H_\bullet(X,\partial X; \Q). \end{IEEEeqnarray*} \end{lemma} \begin{proof} \begin{IEEEeqnarray*}{rCls+x*} \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X) & = & \varinjlim_{N,H,J} \homology{}{S^1}{}{F}{H}{\varepsilon}{}(X,H,J) \\ & = & \varinjlim_{N,H,J} \Q \{1,\ldots,u^N\} \otimes H_\bullet(X, \partial X; \Q) \\ & = & \Q[u] \otimes H_\bullet(X, \partial X; \Q) \\ & = & H_\bullet(BS^1; \Q) \otimes H_\bullet(X, \partial X; \Q), \end{IEEEeqnarray*} where in the first equality we used the definition of $S^1$-equivariant symplectic homology and in the second equality we used \cref{lem:iso from floer to singular}. \end{proof} \begin{definition} \phantomsection\label{def:delta map} We define a map $\delta \colon \homology{}{S^1}{}{S}{H}{+}{}(X) \longrightarrow H_\bullet(BS^1;\Q) \otimes H_\bullet(X,\partial X; \Q)$ as follows. For every $(H,J) \in \admissible{X}$, consider the short exact sequence of complexes \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} 0 \ar[r] & \homology{}{S^1}{}{F}{C}{\varepsilon}{}(X,H,J) \ar[r] & \homology{}{S^1}{}{F}{C}{}{}(X,H,J) \ar[r] & \homology{}{S^1}{}{F}{C}{+}{}(X,H,J) \ar[r] & 0 \end{tikzcd} \end{IEEEeqnarray*} There is an associated long exact sequence in homology \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \cdots \ar[r] & \homology{}{S^1}{}{F}{H}{}{}(X,H,J) \ar[r] & \homology{}{S^1}{}{F}{H}{+}{}(X,H,J) \ar[r, "\delta^{H,J}"] & \homology{}{S^1}{}{F}{H}{\varepsilon}{}(X,H,J) \ar[r] & \cdots \end{tikzcd} \end{IEEEeqnarray*} Passing to the colimit, we obtain a sequence \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \cdots \ar[r] & \homology{}{S^1}{}{S}{H}{}{}(X) \ar[r] & \homology{}{S^1}{}{S}{H}{+}{}(X) \ar[r, "\delta_0"] & \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X) \ar[r] & \cdots \end{tikzcd} \end{IEEEeqnarray*} Finally, define $\delta \coloneqq \alpha \circ \delta_0 \colon \homology{}{S^1}{}{S}{H}{+}{}(X) \longrightarrow H_\bullet(BS^1;\Q) \otimes H_\bullet(X,\partial X; \Q)$, where $\alpha$ is the isomorphism from \cref{lem:iso from symplectic to singular}. \end{definition} Let $\varphi \colon (X,\lambda_X) \longrightarrow (Y, \lambda_Y)$ be a $0$-codimensional strict generalized Liouville embedding. Define $\rho \colon H_\bullet(Y,\partial Y; \Q) \longrightarrow H_\bullet(X,\partial X; \Q)$ to be the unique map such that the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} H_\bullet(X,\del X; \Q) \ar[r, hook, two heads, "\varphi_*"] & H_\bullet(\varphi(X),\varphi(\del X); \Q) \ar[d, hook, two heads] \\ H_\bullet(Y,\del Y; \Q) \ar[r] \ar[u, dashed, "\exists ! \rho"] & H_\bullet(Y, Y \setminus \varphi(\itr X); \Q) \end{tikzcd} \end{IEEEeqnarray*} commutes, where $\varphi_*$ is an isomorphism by functoriality of homology and the vertical arrow on the right is an isomorphism by excision. The map $\rho$ is such that $\rho([Y]) = [X]$. \begin{proposition}[{\cite[Proposition 3.3]{guttSymplecticCapacitiesPositive2018}}] The diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{+}{}(Y) \ar[r, "\delta_Y"] \ar[d, swap, "\varphi_!"] & H_\bullet(BS^1;\Q) \otimes H_\bullet(Y,\partial Y; \Q) \ar[d, "\id \otimes \rho"] \\ \homology{}{S^1}{}{S}{H}{+}{}(X) \ar[r, swap, "\delta_X"] & H_\bullet(BS^1;\Q) \otimes H_\bullet(X,\partial X; \Q) \end{tikzcd} \end{IEEEeqnarray*} commutes. \end{proposition} \chapter{Symplectic capacities} \label{chp:symplectic capacities} \section{Symplectic capacities} \label{sec:symplectic capacities} In this section we define the notion of symplectic capacity (\cref{def:symplectic capacity}). A capacity is a function $c$ which assigns to every symplectic manifold $X$ (in a restricted subclass) a number $c(X) \in [0,+\infty]$, and which is functorial with respect to symplectic embeddings (in a restricted subclass). In the remaining sections of this chapter, we will define various capacities, namely the Lagrangian capacity (\cref{def:lagrangian capacity}), the Gutt--Hutchings capacities (\cref{def:gutt hutchings capacities}) and the McDuff--Siegel capacities (\cref{def:g tilde}). In this section we also deal with two small technicalities: \begin{enumerate} \item Most of the capacities we will deal with in this thesis are functorial with respect to generalized Liouville embeddings, which do not form a category. However, they form an object which is like a category but has only partially defined composition of morphisms. We will use the nomenclature of \cite{andersenTQFTQuantumTeichmuller2014} and call such an object a categroid (\cref{def:categroid}). \item As we will see, some capacities we will consider are defined on the class of nondegenerate Liouville domains. In the last part of this section, we will see how such a capacity can be extended uniquely to a capacity of Liouville domains. \end{enumerate} \begin{definition}[{\cite[Definition 22]{andersenTQFTQuantumTeichmuller2014}}] \label{def:categroid} A \textbf{categroid} $\mathbf{C}$ consists of a family of objects $\operatorname{Obj}(\mathbf{C})$ and for any pair of objects $A,B \in \mathbf{C}$ a set $\Hom_{\mathbf{C}}(A,B)$ such that the following holds. \begin{enumerate} \item For any three objects $A$, $B$, $C$ there is a subset $\operatorname{Comp}_{\mathbf{C}}(A,B,C) \subset \Hom_{\mathbf{C}}(B,C) \times \Hom_{\mathbf{C}}(A,B)$ of \textbf{composable morphisms} and an associated \textbf{composition map} \begin{IEEEeqnarray*}{c+x*} \circ \colon \operatorname{Comp}_{\mathbf{C}}(A,B,C) \longrightarrow \Hom_{\mathbf{C}}(A,C) \end{IEEEeqnarray*} such that composition of composable morphisms is associative. \item For any object $A$ there exists an \textbf{identity morphism} $\id_A \in \Hom_{\mathbf{C}}(A,A)$ which is composable with any morphism $f \in \Hom_{\mathbf{C}}(A,B)$ or $g \in \Hom_{\mathbf{C}}(B,A)$ and satisfies \begin{IEEEeqnarray*}{rCls+x*} f \circ \id_A & = & f, \\ \id_A \circ g & = & g. \end{IEEEeqnarray*} \end{enumerate} \end{definition} In this context, one has obvious definitions of subcategroids and also of functors between categroids. Denote by $\symp$ the category of symplectic manifolds, where morphisms are $0$-codimensional symplectic embeddings. \begin{definition} \label{def:symplectic categroid} A \textbf{symplectic categroid} is a subcategroid $\mathbf{C}$ of $\symp$ such that $(X,\omega) \in \mathbf{C}$ implies $(X,\alpha \omega) \in \mathbf{C}$ for all $\alpha > 0$. \end{definition} \begin{definition} \label{def:symplectic capacity} Let $\mathbf{C}$ be a symplectic categroid. A \textbf{symplectic capacity} is a functor $c \colon \mathbf{C} \longrightarrow [0,+\infty]$ satisfying \begin{description} \item[(Monotonicity)] If $(X,\omega_X) \longrightarrow (Y, \omega_Y)$ is a morphism in $\mathbf{C}$ then $c(X,\omega_X) \leq c(Y,\omega_Y)$; \item[(Conformality)] If $\alpha > 0$ then $c(X,\alpha \omega) = \alpha \, c(X, \omega)$. \end{description} \end{definition} Notice that the monotonicity property is just a restatement of the fact that $c$ is a functor. \begin{definition} \label{def:nontrivial} Let $c \colon \mathbf{C} \longrightarrow [0, +\infty]$ be a symplectic capacity with the property that $B^{2n}(1), Z^{2n}(1) \in \mathbf{C}$ for every $n$. We say that $c$ is \textbf{nontrivial} or \textbf{normalized} if it satisfies \begin{description} \item[(Nontriviality)] $0 < c(B^{2n}(1)) \leq c(Z^{2n}(1)) < + \infty$; \item[(Normalization)] $0 < c(B^{2n}(1)) = 1 = c(Z^{2n}(1)) < + \infty$. \end{description} \end{definition} \begin{example} Let $(X, \omega)$ be a $2n$-dimensional symplectic manifold. Recall that the \textbf{symplectic volume} of $X$ is given by \begin{IEEEeqnarray*}{c+x*} \operatorname{vol}(X) \coloneqq \int_{X}^{} \frac{\omega^n}{n!}. \end{IEEEeqnarray*} The \textbf{volume capacity} of $X$ is given by \begin{IEEEeqnarray*}{c+x*} c_{\mathrm{vol}}(X) \coloneqq \p{}{2}{\frac{\operatorname{vol}(X)}{\operatorname{vol}(B)}}^{1/n}, \end{IEEEeqnarray*} where $B \coloneqq B^{2n}(1) \coloneqq \{z \in \C^{n} \mid \pi |z|^2 \leq 1 \}$. \end{example} \begin{example} Let $(Y,\Omega)$ be a symplectic manifold. We define the \textbf{embedding capacities}, denoted by $c_{(Y,\Omega)}$ and $c^{(Y,\Omega)}$, by \begin{IEEEeqnarray*}{rCll} c_{(Y,\Omega)}(X, \omega) & \coloneqq & \sup & \{ a > 0 \mid \text{there exists a symplectic embedding } (Y, a \Omega) \longrightarrow (X, \omega) \}, \\ c^{(Y,\Omega)}(X, \omega) & \coloneqq & \inf & \{ a > 0 \mid \text{there exists a symplectic embedding } (X, \omega) \longrightarrow (Y, a \Omega) \}, \end{IEEEeqnarray*} for any symplectic manifold $(X, \omega)$. Let $\omega_0$ denote the canonical symplectic structure of $\C^n$. In the case where $(Y, \Omega) = (B^{2n}(1), \omega_0)$ or $(Y, \Omega) = (P^{2n}(1), \omega_0)$, we denote \begin{IEEEeqnarray*}{lClCl} c_B(X,\omega) & \coloneqq & c_{(B^{2n}(1), \omega)}(X, \omega) & = & \sup \{ a \ | \ \text{$\exists$ symplectic embedding } B^{2n}(a) \longrightarrow X \}, \\ c_P(X,\omega) & \coloneqq & c_{(P^{2n}(1), \omega)}(X, \omega) & = & \sup \{ a \ | \ \text{$\exists$ symplectic embedding } P^{2n}(a) \longrightarrow X \}. \end{IEEEeqnarray*} Embedding capacities tend to be hard to compute, since they are defined as a restatement of a hard embedding problem. For example, a restatement of Gromov's nonsqueezing theorem \cite{gromovPseudoHolomorphicCurves1985} is that $c_B$ is a normalized symplectic capacity. The capacity $c_B$ is also called \textbf{Gromov width}. \end{example} \begin{definition}[{\cite[Section 4.2]{guttSymplecticCapacitiesPositive2018}}] \phantomsection\label{def:perturbation of liouville domain} If $(X,\lambda)$ is a Liouville domain and $f \colon \partial X \longrightarrow \R$ is a smooth function, we define a new Liouville domain $(X_f,\lambda_f)$ as follows. Consider the completion $\hat{X}$, which has as subsets $X \subset \hat{X}$ and $\R \times \partial X \subset \hat{X}$. Then, \begin{IEEEeqnarray*}{c+x*} X_f \coloneqq \hat{X} \setminus \{ (\rho,y) \in \R \times \partial X \mid \rho > f(y) \} \end{IEEEeqnarray*} and $\lambda_f$ is the restriction of $\hat{\lambda}$ to $X_f$. Define $\mathcal{F}_{X}^{\pm}$ to be the set of $f^{\pm} \colon \partial X \longrightarrow \R^\pm$ such that $(X_{f^\pm}, \lambda_{f^\pm})$ is nondegenerate. \end{definition} \begin{definition} \label{def:liouville categroid} A \textbf{Liouville categroid} is a subcategroid $\mathbf{L}$ of $\symp$ such that \begin{enumerate} \item Every object of $\mathbf{L}$ is a Liouville domain. \item If $X \in \mathbf{L}$ and $f^{+} \in \mathcal{F}^{+}_X$ then $X_{f^{+}} \in \mathbf{L}$ and the inclusion $X \longrightarrow X_{f^+}$ is a morphism in $\mathbf{L}$ which is composable with any other morphisms $Y \longrightarrow X$ or $X_{f^+} \longrightarrow Z$ in $\mathbf{L}$. \item If $X \in \mathbf{L}$ and $f^{-} \in \mathcal{F}^{-}_X$ then $X_{f^{-}} \in \mathbf{L}$ and the inclusion $X_{f^-} \longrightarrow X$ is a morphism in $\mathbf{L}$ which is composable with any other morphisms $Y \longrightarrow X_{f^-}$ or $X \longrightarrow Z$ in $\mathbf{L}$. \end{enumerate} \end{definition} \begin{example} Let $\liouvgle$ be the categroid whose objects are Liouville domains and whose morphisms are $0$-codimensional generalized Liouville embeddings. Then $\liouvgle$ is a Liouville categroid. \end{example} \begin{lemma} \label{lem:c is the unique extension to lvds} Let $\mathbf{L}$ be a Liouville categroid. Let $\mathbf{L}_{\mathrm{ndg}}$ be the full subcategroid of $\mathbf{L}$ of nondegenerate Liouville domains (i.e., if $X, Y \in \mathbf{L}_{\mathrm{ndg}}$ then $\Hom_{\mathbf{L}_{\mathrm{ndg}}}(X,Y) = \Hom_{\mathbf{L}}(X,Y)$). If $c \colon \mathbf{L}_{\mathrm{ndg}} \longrightarrow [0, +\infty]$ is a symplectic capacity, then there exists a unique symplectic capacity $\overline{c} \colon \mathbf{L} \longrightarrow [0, + \infty]$ such that the following diagram commutes: \begin{IEEEeqnarray}{c+x*} \plabel{eq:diagram extend cap liouv} \begin{tikzcd} \mathbf{L}_{\mathrm{ndg}} \ar[d] \ar[dr, "c"] & \\ \mathbf{L} \ar[r, swap, "\overline{c}"] & {[0,+\infty]} \end{tikzcd} \end{IEEEeqnarray} \end{lemma} \begin{proof} This proof is based on \cite[Section 4.2]{guttSymplecticCapacitiesPositive2018}. We claim that if $\varepsilon > 0$ and $(X, \lambda)$ is a nondegenerate Liouville domain in $\mathbf{L}_{\mathrm{ndg}}$, then $(X_{\varepsilon}, \lambda_{\varepsilon})$ is nondegenerate and \begin{IEEEeqnarray}{c+x*} \plabel{eq:capacity of deformed domain} c(X_\varepsilon, \lambda_\varepsilon) = e^{\varepsilon} c (X, \lambda). \end{IEEEeqnarray} To see this, notice that the time $\varepsilon$ flow of the Liouville vector field $Z$ of $\hat{X}$ restricts to a Liouville embedding $\phi \colon (X, e^{\varepsilon} \lambda) \longrightarrow (X_\varepsilon, \lambda_\varepsilon)$ and also to a contactomorphism $\phi \colon (\partial X, e^{\varepsilon} \lambda|_{\partial X}) \longrightarrow (\partial X_\varepsilon, \partial \lambda_\varepsilon|_{\partial X_\varepsilon})$. This shows that $(X_\varepsilon, \lambda_\varepsilon)$ is nondegenerate. In particular, $(X_\varepsilon, \lambda_\varepsilon) \in \mathbf{L}_{\mathrm{ndg}}$. Finally, \begin{IEEEeqnarray*}{rCls+x*} c(X_\varepsilon, \lambda_\varepsilon) & = & c(X, e^{\varepsilon} \lambda) & \quad [\text{by functoriality of $c$}] \\ & = & e^{\varepsilon} c(X,\lambda) & \quad [\text{by conformality}]. & \end{IEEEeqnarray*} This finishes the proof of Equation \eqref{eq:capacity of deformed domain}. Define functions $c^{\pm} \colon \mathbf{L} \longrightarrow [0,+\infty]$ by \begin{IEEEeqnarray*}{rCls+x*} c^+(X) & \coloneqq & \inf_{f^+ \in \mathcal{F}^+_X} c(X_{f^+}), \\ c^-(X) & \coloneqq & \sup_{f^- \in \mathcal{F}^-_X} c(X_{f^-}). \end{IEEEeqnarray*} We claim that if $(X, \lambda) \in \mathbf{L}$ is a Liouville domain then \begin{IEEEeqnarray}{c+x*} \plabel{eq:c minus equals c plus} c^-(X) = c^+(X). \end{IEEEeqnarray} Monotonicity of $c$ implies $c^-(X) \leq c^+(X)$. To show the reverse inequality, it is enough to show that $c^+(X) \leq e^{\varepsilon} c^-(X)$ for every $\varepsilon > 0$. For this, choose $f^- \in \mathcal{F}^{-}_X$ such that $\img f^- \subset (- \varepsilon, 0)$ and define $f^+ = f^- + \varepsilon$. By the previous discussion, $(X_{f^+}, \lambda_{f^+})$ is nondegenerate and $f^+ \in \mathcal{F}^+_X$. Then, \begin{IEEEeqnarray*}{rCls+x*} c^+(X) & = & \inf_{g^+ \in \mathcal{F}^+_X} c(X_{g^+}) & \quad [\text{by definition of $c^+$}] \\ & \leq & c(X_{f^+}) & \quad [\text{since $f^+ \in \mathcal{F}^+_X$}] \\ & = & e^{\varepsilon} c(X_{f^-}) & \quad [\text{by Equation \eqref{eq:capacity of deformed domain}}] \\ & \leq & e^{\varepsilon} \sup_{g^- \in \mathcal{F}^-_X} c(X_{g^-}) & \quad [\text{since $f^- \in \mathcal{F}^-_X$}] \\ & = & e^{\varepsilon} c^-(X) & \quad [\text{by definition of $c^-$}], \end{IEEEeqnarray*} which finishes the proof of Equation \eqref{eq:c minus equals c plus}. Moreover, if $(X, \lambda) \in \mathbf{L}_{\mathrm{ndg}}$ is nondegenerate, then $c^-(X) \leq c(X) \leq c^+(X) = c^-(X)$, which implies \begin{IEEEeqnarray*}{c+x*} c^-(X) = c(X) = c^+(X). \end{IEEEeqnarray*} We now show that $c^{\pm}$ are symplectic capacities. The conformality property is immediate. To prove monotonicity, let $X \longrightarrow Y$ be a morphism in $\mathbf{L}$. \begin{IEEEeqnarray*}{rCls+x*} c^-(X) & = & \sup_{f^- \in \mathcal{F}^-_X} c(X_{f^-}) & \quad [\text{by definition of $c^-$}] \\ & \leq & \inf_{g^+ \in \mathcal{F}^+_Y} c(Y_{g^+}) & \quad [\text{since $X_{f^-} \subset X \longrightarrow Y \subset Y_{g^+}$ and by monotonicity of $c$}] \\ & = & c^+(Y) & \quad [\text{by definition of $c^+$}]. \end{IEEEeqnarray*} The result follows from Equation \eqref{eq:c minus equals c plus}. To prove existence, simply notice that by the above discussion, the function $\overline{c} \coloneqq c^- = c^+ \colon \mathbf{L} \longrightarrow [0, +\infty]$ has all the desired properties. To prove uniqueness, let $\overline{c}$ be any function as in the statement of the lemma. We wish to show that $\overline{c} \coloneqq c^- = c^+$. We start by showing that $c^-(X) \leq \overline{c}(X)$. \begin{IEEEeqnarray*}{rCls+x*} c^-(X) & = & \sup_{f^- \in \mathcal{F}^-_X} c(X_{f^-}) & \quad [\text{by definition of $c^-$}] \\ & = & \sup_{f^- \in \mathcal{F}^-_X} \overline{c}(X_{f^-}) & \quad [\text{by assumption on $\overline{c}$}] \\ & \leq & \sup_{f^- \in \mathcal{F}^-_X} \overline{c}(X) & \quad [\text{by monotonicity of $\overline{c}$}] \\ & = & \overline{c}(X). \end{IEEEeqnarray*} Analogously, we can show that $c^+(X) \geq \overline{c}(X)$, which concludes the proof.\end{proof} \begin{lemma} \label{lem:can prove ineqs for ndg} For $i = 0,1$, let $c_i \colon \mathbf{L}_{\mathrm{ndg}} \rightarrow [0, +\infty]$ be symplectic capacities with extensions $\overline{c}_i \colon \mathbf{L} \rightarrow [0, +\infty]$ as in \cref{lem:c is the unique extension to lvds}. If $c_0(Y) \leq c_1(Y)$ for every nondegenerate Liouville domain $Y \in \mathbf{L}_{\mathrm{ndg}}$ then $\overline{c}_0(X) \leq \overline{c}_1(X)$ for every Liouville domain $X \in \mathbf{L}$. \end{lemma} \begin{proof} \begin{IEEEeqnarray*}{rCls+x*} \overline{c}_0(X) & = & \sup_{f^- \in \mathcal{F}^-_X} c_0(X_{f^-}) & \quad [\text{by the definition of $\overline{c}_0$ in \cref{lem:c is the unique extension to lvds}}] \\ & \leq & \sup_{f^- \in \mathcal{F}^-_X} c_1(X_{f^-}) & \quad [\text{by assumption on $c_0$ and $c_1$}] \\ & = & \overline{c}_1(X) & \quad [\text{by the definition of $\overline{c}_1$ in \cref{lem:c is the unique extension to lvds}}]. & \qedhere \end{IEEEeqnarray*} \end{proof} By the exposition above, if $c$ is a capacity of nondegenerate Liouville domains then it can be extended to a capacity of Liouville domains. In particular, $c(X)$ is defined for any star-shaped domain $X$. However, it will be useful to us to compute capacities of the cube $P(r)$ and of the nondisjoint union of cylinders $N(r)$. These spaces are not quite star-shaped domains, because they have corners and $N(r)$ is noncompact. So we will consider a further extension of the capacity $c$. Let $\mathbf{Star}$ be the category of star-shaped domains, where there is a unique morphism $X \longrightarrow Y$ if and only if $X \subset Y$. Denote by $\mathbf{Star}_{\mathrm{ncp}}$ the category of ``star-shaped domains'' which are possibly noncompact or possibly have corners, with the same notion of morphisms. \begin{lemma} \label{lem:c is the smallest extension to ss} Let $c \colon \mathbf{Star} \longrightarrow [0, +\infty]$ be a symplectic capacity. Define a symplectic capacity $\overline{c} \colon \mathbf{Star}_{\mathrm{ncp}} \longrightarrow [0, +\infty]$ by \begin{IEEEeqnarray*}{c+x*} \overline{c}(X) = \sup_{Y \subset X} c(Y), \end{IEEEeqnarray*} where the supremum is taken over star-shaped domains $Y \subset X$ which are compact and have smooth boundary. Then, the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \mathbf{Star} \ar[dr, "c"] \ar[d] \\ \mathbf{Star}_{\mathrm{ncp}} \ar[r, swap, "\overline{c}"] & {[0, + \infty]} \end{tikzcd} \end{IEEEeqnarray*} commutes. Moreover, $\overline{c}$ is the smallest capacity making this diagram commute. \end{lemma} \begin{proof} It is immediate that $\overline{c}$ is a symplectic capacity. We show that the diagram commutes. If $X$ is a compact star-shaped domain with smooth boundary, then \begin{IEEEeqnarray*}{rCls+x*} c(X) & \leq & \sup_{Y \subset X} c(Y) & \quad [\text{since $X$ is compact and has smooth boundary}] \\ & \leq & c(X) & \quad [\text{by monotonicity}]. \end{IEEEeqnarray*} If $\tilde{c} \colon \mathbf{Star}_{\mathrm{ncp}} \longrightarrow [0, +\infty]$ is another capacity making the diagram commute, then \begin{IEEEeqnarray*}{rCls+x*} \overline{c}(X) & = & \sup_{Y \subset X} c(Y) & \quad [\text{by definition of $\overline{c}$}] \\ & = & \sup_{Y \subset X} \tilde{c}(Y) & \quad [\text{since $\tilde{c}$ makes the diagram commute}] \\ & \leq & \tilde{c}(X) & \quad [\text{by monotonicity of $\tilde{c}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{remark} We will always assume that every capacity of nondegenerate Liouville domains that we define is extended as in \cref{lem:c is the unique extension to lvds,lem:c is the smallest extension to ss} to possibly degenerate Liouville domains and to ``star-shaped domains'' which are possibly noncompact or possibly have corners. \end{remark} \section{Lagrangian capacity} Here, we define the Lagrangian capacity (\cref{def:lagrangian capacity}) and state its properties (\cref{prop:properties of cL}). One of the main goals of this thesis is to study whether the Lagrangian capacity can be computed in some cases, for example for toric domains. In the end of the section, we state some easy inequalities concerning the Lagrangian capacity (\cref{lem:c square leq c lag,lem:c square geq delta}), known computations (\cref{prp:cl of ball,prp:cl of cylinder}) and finally the main conjecture of this thesis (\cref{conj:the conjecture}), which is inspired by all the previous results. The Lagrangian capacity is defined in terms of the minimal area of Lagrangian submanifolds, which we now define. \begin{definition} Let $(X,\omega)$ be a symplectic manifold. If $L$ is a Lagrangian submanifold of $X$, then we define the \textbf{minimal symplectic area of} $L$, denoted $A_{\mathrm{min}}(L)$, by \begin{IEEEeqnarray*}{c+x*} A_{\mathrm{min}}(L) \coloneqq \inf \{ \omega(\sigma) \mid \sigma \in \pi_2(X,L), \, \omega(\sigma) > 0 \}. \end{IEEEeqnarray*} \end{definition} \begin{lemma} \label{lem:properties of minimal area} Let $\iota \colon (X,\omega) \longrightarrow (X',\omega')$ be a symplectic embedding, $L \subset X$ be an embedded Lagrangian submanifold and $L' = \iota(L)$. In this case, \begin{enumerate} \item \label{lem:properties of minimal area 1} $A_{\mathrm{min}}(L) \geq A_{\mathrm{min}}(L')$; \item \label{lem:properties of minimal area 2} $A_{\mathrm{min}}(L) = A_{\mathrm{min}}(L')$, provided that $\pi_2(X',\iota(X)) = 0$. \end{enumerate} \end{lemma} \begin{proof} \ref{lem:properties of minimal area 1}: By definition of minimal area and since the diagram \begin{IEEEeqnarray}{c+x*} \plabel{eq:diag minimal area} \begin{tikzcd}[ampersand replacement = \&] \pi_2(X,L) \ar[d, swap, "\iota_*"] \ar[dr, "\omega"] \\ \pi_2(X',L') \ar[r, swap, "\omega'"] \& \R \end{tikzcd} \end{IEEEeqnarray} commutes. \ref{lem:properties of minimal area 2}: Considering the long exact sequence of the triple $(X',\iota(X),L')$, \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd}[ampersand replacement = \&] \cdots \ar[r] \& \pi_2(\iota(X),L') \ar[r] \& \pi_2(X',L') \ar[r] \& \pi_2(X',\iota(X)) = 0 \end{tikzcd} \end{IEEEeqnarray*} we conclude that $\iota_{*} \colon \pi_2(X,L) \longrightarrow \pi_2(X',L')$ is surjective. Again, the result follows by the definition of minimal area and diagram \eqref{eq:diag minimal area}. \end{proof} \begin{lemma} \label{lem:a min with exact symplectic manifold} Let $(X,\lambda)$ be an exact symplectic manifold and $L \subset X$ be a Lagrangian submanifold. If $\pi_1(X) = 0$, then \begin{IEEEeqnarray*}{c+x*} A _{\mathrm{min}}(L) = \inf \left\{ \lambda(\rho) \ | \ \rho \in \pi_1(L), \ \lambda(\rho) > 0 \right\}. \end{IEEEeqnarray*} \end{lemma} \begin{proof} The diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd}[ampersand replacement = \&] \pi_2(L) \ar[d, swap, "0"] \ar[r] \& \pi_2(X) \ar[d, "\omega"] \ar[r] \& \pi_2(X,L) \ar[d, "\omega"] \ar[r, two heads,"\del"] \& \pi_1(L) \ar[d, "\lambda"] \ar[r, "0"] \& \pi_1(X) \ar[d, "\lambda"] \\ \R \ar[r, equals] \& \R \ar[r, equals] \& \R \ar[r, equals] \& \R \ar[r, equals] \& \R \end{tikzcd} \end{IEEEeqnarray*} commutes, where $\del([\sigma]) = [\sigma|_{S^1}]$, and the top row is exact. \end{proof} \begin{definition}[{\cite[Section 1.2]{cieliebakPuncturedHolomorphicCurves2018}}] \phantomsection\label{def:lagrangian capacity} Let $(X,\omega)$ be a symplectic manifold. We define the \textbf{Lagrangian capacity} of $(X,\omega)$, denoted $c_L(X,\omega)$, by \begin{IEEEeqnarray*}{c} c_L(X,\omega) \coloneqq \sup \{ A_{\mathrm{min}}(L) \mid L \subset X \text{ is an embedded Lagrangian torus}\}. \end{IEEEeqnarray*} \end{definition} \begin{proposition}[{\cite[Section 1.2]{cieliebakPuncturedHolomorphicCurves2018}}] \label{prop:properties of cL} The Lagrangian capacity $c_L$ satisfies: \begin{description} \item[(Monotonicity)] If $(X,\omega) \longrightarrow (X',\omega')$ is a symplectic embedding with $\pi_2(X',\iota(X)) = 0$, then $c_L(X,\omega) \leq c_L(X',\omega')$. \item[(Conformality)] If $\alpha \neq 0$, then $c_L(X,\alpha \omega) = |\alpha| \, c_L(X,\omega)$. \end{description} \end{proposition} \begin{proof} We prove monotonicity. \begin{IEEEeqnarray*}{rCls+x*} c_L(X,\omega) & = & \sup _{L \subset X} A _{\min}(L) & \quad [\text{by definition of $c_L$}] \\ & \leq & \sup _{L' \subset X'} A _{\min}(L') & \quad [\text{by \cref{lem:properties of minimal area}}] \\ & = & c_L(X',\omega') & \quad [\text{by definition of $c_L$}]. \end{IEEEeqnarray*} We prove conformality. Note that a submanifold $L \subset X$ is Lagrangian with respect to $\omega$ if and only if it is Lagrangian with respect to $\alpha \omega$. \begin{IEEEeqnarray*}{rCls+x*} c_L(X,\alpha \omega) & = & \sup _{L \subset (X,\alpha \omega)} A _{\mathrm{min}}(L,\alpha \omega) & \quad [\text{by definition of $c_L$}] \\ & = & \sup _{L \subset (X,\omega) } |\alpha| A _{\mathrm{min}}(L, \omega) & \quad [\text{by definition of minimal area}] \\ & = & |\alpha| \, c_L(X,\omega) & \quad [\text{by definition of $c_L$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:c square leq c lag} If $X$ is a star-shaped domain, then $c_L(X) \geq c_P(X)$. \end{lemma} \begin{proof} Let $\iota \colon P(a) \longrightarrow X$ be a symplectic embedding, for some $a > 0$. We want to show that $c_L(X) \geq a$. Define $T = \{ z \in \C^n \mid |z_1|^2 = a/\pi, \ldots, |z_n|^2 = a/ \pi \} \subset \partial P(a)$ and $L = \iota(T)$. Then, \begin{IEEEeqnarray*}{rCls+x*} c_L(X) & \geq & A_{\mathrm{min}}(L) & \quad [\text{by definition of $c_L$}] \\ & = & A_{\mathrm{min}}(T) & \quad [\text{by \cref{lem:properties of minimal area}}] \\ & = & a & \quad [\text{by \cref{lem:a min with exact symplectic manifold}}]. & \qedhere \end{IEEEeqnarray*} \end{proof} Recall that if $X_{\Omega}$ is a toric domain, its diagonal is given by $\delta_{\Omega} \coloneqq \sup \{ a \mid (a, \ldots, a) \in \Omega \}$ (see \cref{def:moment map}). \begin{lemma} \label{lem:c square geq delta} If $X_{\Omega}$ is a convex or concave toric domain, then $c_P(X_{\Omega}) \geq \delta_\Omega$. \end{lemma} \begin{proof} Since $X_{\Omega}$ is a convex or concave toric domain, we have that $P(\delta_\Omega) \subset X_{\Omega}$. The result follows by definition of $c_P$. \end{proof} Actually, Gutt--Hutchings show that $c_P(X_{\Omega}) = \delta_\Omega$ for any convex or concave toric domain $X_{\Omega}$ (\cite[Theorem 1.18]{guttSymplecticCapacitiesPositive2018}). However, for our purposes we will only need the inequality in \cref{lem:c square geq delta}. We now consider the results by Cieliebak--Mohnke for the Lagrangian capacity of the ball and the cylinder. \begin{proposition}[{\cite[Corollary 1.3]{cieliebakPuncturedHolomorphicCurves2018}}] \phantomsection\label{prp:cl of ball} The Lagrangian capacity of the ball is \begin{IEEEeqnarray*}{c+x*} c_L(B^{2n}(1)) = \frac{1}{n}. \end{IEEEeqnarray*} \end{proposition} \begin{proposition}[{\cite[p.~215-216]{cieliebakPuncturedHolomorphicCurves2018}}] \label{prp:cl of cylinder} The Lagrangian capacity of the cylinder is \begin{IEEEeqnarray*}{c+x*} c_L(Z^{2n}(1)) = 1. \end{IEEEeqnarray*} \end{proposition} By \cref{lem:c square leq c lag,lem:c square geq delta}, if $X_{\Omega}$ is a convex or concave toric domain then $c_L(X_\Omega) \geq \delta_\Omega$. But as we have seen in \cref{prp:cl of ball,prp:cl of cylinder}, if $X_\Omega$ is the ball or the cylinder then $c_L(X_\Omega) = \delta_\Omega$. This motivates \cref{conj:cl of ellipsoid} below for the Lagrangian capacity of an ellipsoid, and more generally \cref{conj:the conjecture} below for the Lagrangian capacity of any convex or concave toric domain. \begin{conjecture}[{\cite[Conjecture 1.5]{cieliebakPuncturedHolomorphicCurves2018}}] \label{conj:cl of ellipsoid} The Lagrangian capacity of the ellipsoid is \begin{IEEEeqnarray*}{c+x*} c_L(E(a_1,\ldots,a_n)) = \p{}{2}{\frac{1}{a_1} + \cdots + \frac{1}{a_n}}^{-1}. \end{IEEEeqnarray*} \end{conjecture} \begin{conjecture} \label{conj:the conjecture} If $X_{\Omega}$ is a convex or concave toric domain then \begin{IEEEeqnarray*}{c+x*} c_L(X_{\Omega}) = \delta_\Omega. \end{IEEEeqnarray*} \end{conjecture} In \cref{lem:computation of cl,thm:my main theorem} we present our results concerning \cref{conj:the conjecture}. \section{Gutt--Hutchings capacities} \label{sec:equivariant capacities} In this section we will define the Gutt--Hutchings capacities (\cref{def:gutt hutchings capacities}) and the $S^1$-equivariant symplectic homology capacities (\cref{def:s1esh capacities}), and list their properties (\cref{thm:properties of gutt-hutchings capacities,prp:properties of s1esh capacities} respectively). We will also compare the two capacities (\cref{thm:ghc and s1eshc}). The definition of these capacities relies on $S^1$-equivariant symplectic homology. In the commutative diagram below, we display the modules and maps which will play a role in this section, for a nondegenerate Liouville domain $X$. \begin{IEEEeqnarray}{c+x*} \plabel{eq:diagram for s1esh capacities} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{(\varepsilon,a]}{}(X) \ar[r, "\delta^a_0"] \ar[d, swap, "\iota^a"] & \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X) \ar[d, two heads, hook, "\alpha"] \ar[r, "\iota^{a,\varepsilon}"] & \homology{}{S^1}{}{S}{H}{a}{}(X) \\ \homology{}{S^1}{}{S}{H}{+}{}(X) \ar[ur, "\delta_0"] \ar[r, swap, "\delta"] & H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q) \end{tikzcd} \end{IEEEeqnarray} Here, $\iota^a$ and $\iota^{a, \varepsilon}$ are the maps induced by the action filtration, $\delta_0$ and $\delta$ are the maps from \cref{def:delta map} and $\alpha$ is the isomorphism from \cref{lem:iso from symplectic to singular}. We point out that every vertex in the above diagram has a $U$ map and every map in the diagram commutes with this $U$ map. Specifically, all the $S^1$-equivariant symplectic homologies have the $U$ map given as in \cref{def:U map} and $H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q) \cong \Q[u] \otimes H_\bullet(X, \partial X;\Q)$ has the map $U \coloneqq u^{-1} \otimes \id$. We will also make use of a version of diagram \eqref{eq:diagram for s1esh capacities} in the case where $X$ is star-shaped, namely diagram \eqref{eq:diagram for s1esh capacities case ss} below. In this case, the modules in the diagram admit gradings and every map is considered to be a map in a specific degree. By \cite[Proposition 3.1]{guttSymplecticCapacitiesPositive2018}, $\delta$ and $\delta_0$ are isomorphisms. \begin{IEEEeqnarray}{c+x*} \plabel{eq:diagram for s1esh capacities case ss} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{(\varepsilon,a]}{n - 1 + 2k}(X) \ar[r, "\delta^a_0"] \ar[d, swap, "\iota^a"] & \homology{}{S^1}{}{S}{H}{\varepsilon}{n - 2 + 2k}(X) \ar[d, two heads, hook, "\alpha"] \ar[r, "\iota^{a,\varepsilon}"] & \homology{}{S^1}{}{S}{H}{a}{n - 2 + 2k}(X) \\ \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(X) \ar[ur, two heads, hook, "\delta_0"] \ar[r, swap, two heads, hook, "\delta"] & H_{2k-2}(BS^1;\Q) \otimes H_{2n}(X, \partial X;\Q) \end{tikzcd} \end{IEEEeqnarray} \begin{definition}[{\cite[Definition 4.1]{guttSymplecticCapacitiesPositive2018}}] \label{def:gutt hutchings capacities} If $k \in \Z_{\geq 1}$ and $(X,\lambda)$ is a nondegenerate Liouville domain, the \textbf{Gutt--Hutchings capacities} of $X$, denoted $\cgh{k}(X)$, are defined as follows. Consider the map \begin{IEEEeqnarray*}{c+x*} \delta \circ U^{k-1} \circ \iota^a \colon \homology{}{S^1}{}{S}{H}{(\varepsilon,a]}{}(X) \longrightarrow H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q) \end{IEEEeqnarray*} from diagram \eqref{eq:diagram for s1esh capacities}. Then, we define \begin{IEEEeqnarray*}{c+x*} \cgh{k}(X) \coloneqq \inf \{ a > 0 \mid [\mathrm{pt}] \otimes [X] \in \img (\delta \circ U^{k-1} \circ \iota^a) \}. \end{IEEEeqnarray*} \end{definition} \begin{theorem}[{\cite[Theorem 1.24]{guttSymplecticCapacitiesPositive2018}}] \label{thm:properties of gutt-hutchings capacities} The functions $\cgh{k}$ of Liouville domains satisfy the following axioms, for all equidimensional Liouville domains $(X,\lambda_X)$ and $(Y,\lambda_Y)$: \begin{description} \item[(Monotonicity)] If $X \longrightarrow Y$ is a generalized Liouville embedding then $\cgh{k}(X) \leq \cgh{k}(Y)$. \item[(Conformality)] If $\alpha > 0$ then $\cgh{k}(X, \alpha \lambda_X) = \alpha \, \cgh{k}(X, \lambda_X)$. \item[(Nondecreasing)] $\cgh{1}(X) \leq \cgh{2}(X) \leq \cdots \leq +\infty$. \item[(Reeb orbits)] If $\cgh{k}(X) < + \infty$, then $\cgh{k}(X) = \mathcal{A}(\gamma)$ for some Reeb orbit $\gamma$ which is contractible in $X$. \end{description} \end{theorem} The following lemma provides an alternative definition of $\cgh{k}$, in the spirit of \cite{floerApplicationsSymplecticHomology1994}. \begin{lemma} \label{def:ck alternative} Let $(X,\lambda)$ be a nondegenerate Liouville domain such that $\pi_1(X) = 0$ and $c_1(TX)|_{\pi_2(X)} = 0$. Let $E \subset \C^n$ be a nondegenerate star-shaped domain and suppose that $\phi \colon E \longrightarrow X$ is a symplectic embedding. Consider the map \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{(\varepsilon,a]}{n - 1 + 2k}(X) \ar[r, "\iota^a"] & \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(X) \ar[r, "\phi_!"] & \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(E) \end{tikzcd} \end{IEEEeqnarray*} Then, $\cgh{k}(X) = \inf \{ a > 0 \mid \phi_! \circ \iota^a \text{ is nonzero} \}$. \end{lemma} \begin{proof} For every $a \in \R$ consider the following commutative diagram: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{(\varepsilon, a]}{n - 1 + 2k}(X) \ar[r, "\iota^a_X"] \ar[d, swap, "\phi_!^a"] & \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(X) \ar[r, "U ^{k-1}_X"] \ar[d, "\phi_!"] & \homology{}{S^1}{}{S}{H}{+}{n+1}(X) \ar[r, "\delta_X"] \ar[d, "\phi_!"] & H_0(BS^1) \tensorpr H_{2n}(X,\del X) \ar[d, hook, two heads, "\id \tensorpr \rho"] \\ \homology{}{S^1}{}{S}{H}{(\varepsilon, a]}{n - 1 + 2k}(E) \ar[r, swap, "\iota^a_E"] & \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(E) \ar[r, swap, hook, two heads, "U ^{k-1}_E"] & \homology{}{S^1}{}{S}{H}{+}{n+1}(E) \ar[r, swap, hook, two heads, "\delta_E"] & H_0(BS^1) \tensorpr H_{2n}(E,\del E) \end{tikzcd} \end{IEEEeqnarray*} By \cite[Proposition 3.1]{guttSymplecticCapacitiesPositive2018} and since $E$ is star-shaped, the maps $U_E$ and $\delta_E$ are isomorphisms. Since $\rho([X]) = [E]$, the map $\rho$ is an isomorphism. By definition, $\cgh{k}$ is the infimum over $a$ such that the top arrow is surjective. This condition is equivalent to $\phi_! \circ \iota^a_X$ being nonzero. \end{proof} The following computation will be useful to us in the proofs of \cref{lem:computation of cl,thm:my main theorem}. \begin{lemma}[{\cite[Lemma 1.19]{guttSymplecticCapacitiesPositive2018}}] \label{lem:cgh of nondisjoint union of cylinders} $\cgh{k}(N^{2n}(\delta)) = \delta \, (k + n - 1)$. \end{lemma} We now consider other capacities which can be defined using $S^1$-equivariant symplectic homology. \begin{definition}[{\cite[Section 2.5]{irieSymplecticHomologyFiberwise2021}}] \label{def:s1esh capacities} If $k \in \Z_{\geq 1}$ and $(X,\lambda)$ is a nondegenerate Liouville domain, the \textbf{$S^1$-equivariant symplectic homology capacities} of $X$, denoted $\csh{k}(X)$, are defined as follows. Consider the map \begin{IEEEeqnarray*}{c+x*} \iota^{a,\varepsilon} \circ \alpha^{-1} \colon H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q) \longrightarrow \homology{}{S^1}{}{S}{H}{a}{}(X) \end{IEEEeqnarray*} from diagram \eqref{eq:diagram for s1esh capacities}. Then, we define \begin{IEEEeqnarray*}{c+x*} \csh{k}(X) \coloneqq \inf \{ a > 0 \mid \iota^{a,\varepsilon} \circ \alpha^{-1}([\C P^{k-1}] \otimes [X]) = 0 \}. \end{IEEEeqnarray*} \end{definition} \begin{theorem} \label{prp:properties of s1esh capacities} The functions $\csh{k}$ of Liouville domains satisfy the following axioms, for all Liouville domains $(X,\lambda_X)$ and $(Y,\lambda_Y)$ of the same dimension: \begin{description} \item[(Monotonicity)] If $X \longrightarrow Y$ is a generalized Liouville embedding then $\csh{k}(X) \leq \csh{k}(Y)$. \item[(Conformality)] If $\mu > 0$ then $\csh{k}(X, \mu \lambda_X) = \mu \, \csh{k}(X, \lambda_X)$. \item[(Nondecreasing)] $\csh{1}(X) \leq \csh{2}(X) \leq \cdots \leq +\infty$. \end{description} \end{theorem} \begin{proof} We prove monotonicity. Consider the following commutative diagram: \begin{IEEEeqnarray}{c+x*} \plabel{eq:s1eshc diagram} \begin{tikzcd} H_\bullet(BS^1;\Q) \otimes H_\bullet(Y, \partial Y;\Q) \ar[d, swap, "\id \otimes \rho"] & \homology{}{S^1}{}{S}{H}{\varepsilon}{}(Y) \ar[l, swap, hook', two heads, "\alpha_Y"] \ar[r, "\iota^{a, \varepsilon}_Y"] \ar[d, "\phi_!^\varepsilon"] & \homology{}{S^1}{}{S}{H}{a}{}(Y) \ar[d, "\phi^a_!"] \\ H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q) & \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X) \ar[l, hook', two heads, "\alpha_X"] \ar[r, swap, "\iota^{a, \varepsilon}_X"] & \homology{}{S^1}{}{S}{H}{a}{}(X) \end{tikzcd} \end{IEEEeqnarray} If $\iota_Y^{a,\varepsilon} \circ \alpha_Y^{-1}([\C P^{k-1}] \otimes [Y]) = 0$, then \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\iota_X^{a,\varepsilon} \circ \alpha_X^{-1}([\C P^{k-1}] \otimes [X])} \\ \quad & = & \iota_X^{a,\varepsilon} \circ \alpha_X^{-1} \circ (\id \otimes \rho)([\C P^{k-1}] \otimes [Y]) & \quad [\text{since $\rho([Y]) = [X]$}] \\ & = & \phi_! \circ \iota_Y^{a,\varepsilon} \circ \alpha_{Y}^{-1} ([\C P^{k-1}] \otimes [Y]) & \quad [\text{by diagram \eqref{eq:s1eshc diagram}}] \\ & = & 0 & \quad [\text{by assumption}]. \end{IEEEeqnarray*} To prove conformality, choose $\varepsilon > 0$ such that $\varepsilon, \mu \varepsilon < \min \operatorname{Spec}(\partial X, \lambda|_{\partial X})$. Since the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q) \ar[d, equals] & \homology{}{S^1}{}{S}{H}{\varepsilon}{}(X, \lambda) \ar[d, equals] \ar[l, swap, hook', two heads, "\alpha_{\lambda}"] \ar[r, "\iota^{a, \varepsilon}_\lambda"] & \homology{}{S^1}{}{S}{H}{a}{}(X, \lambda) \ar[d, equals] \\ H_\bullet(BS^1;\Q) \otimes H_\bullet(X, \partial X;\Q) & \homology{}{S^1}{}{S}{H}{\mu \varepsilon}{}(X, \mu \lambda) \ar[l, hook', two heads, "\alpha_{\mu \lambda}"] \ar[r, swap, "\iota^{\mu a, \mu \varepsilon}_{\mu \lambda}"] & \homology{}{S^1}{}{S}{H}{\mu a}{}(X, \mu \lambda) \end{tikzcd} \end{IEEEeqnarray*} commutes (by \cite[Proposition 3.1]{guttSymplecticCapacitiesPositive2018}), the result follows. To prove the nondecreasing property, note that if $\iota^{a,\varepsilon} \circ \alpha^{-1}([\C P ^{k}] \otimes [X]) = 0$, then \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\iota^{a,\varepsilon} \circ \alpha^{-1}([\C P ^{k-1}] \otimes [X])}\\ \quad & = & \iota^{a,\varepsilon} \circ \alpha^{-1} \circ U ([\C P ^{k}] \otimes [X]) & \quad [\text{since $U([\C P^k] \otimes [X]) = [\C P^{k-1}] \otimes [X]$}] \\ & = & U^{a} \circ \iota^{a,\varepsilon} \circ \alpha^{-1} ([\C P ^{k}] \otimes [X]) & \quad [\text{since $\iota^{a,\varepsilon}$ and $\alpha$ commute with $U$}] \\ & = & 0 & \quad [\text{by assumption}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{theorem} \label{thm:ghc and s1eshc} If $(X, \lambda)$ is a Liouville domain, then \begin{enumerate} \item \label{thm:comparison cgh csh 1} $\cgh{k}(X) \leq \csh{k}(X)$; \item \label{thm:comparison cgh csh 2} $\cgh{k}(X) = \csh{k}(X)$ provided that $X$ is star-shaped. \end{enumerate} \end{theorem} \begin{proof} By \cref{lem:can prove ineqs for ndg}, we may assume that $X$ is nondegenerate. Since \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\iota^{a,\varepsilon} \circ \alpha^{-1}([\C P ^{k-1}] \otimes [X]) = 0}\\ \quad & \Longleftrightarrow & \alpha^{-1}([\C P ^{k-1}] \otimes [X]) \in \ker \iota^{a,\varepsilon} & \quad [\text{by definition of kernel}] \\ \quad & \Longleftrightarrow & \alpha^{-1}([\C P ^{k-1}] \otimes [X]) \in \img \delta^a_0 & \quad [\text{since the top row of \eqref{eq:diagram for s1esh capacities} is exact}] \\ \quad & \Longleftrightarrow & [\C P ^{k-1}] \otimes [X] \in \img (\alpha \circ \delta^a_0) & \quad [\text{by definition of image}] \\ \quad & \Longleftrightarrow & [\C P ^{k-1}] \otimes [X] \in \img (\delta \circ \iota^a) & \quad [\text{since diagram \eqref{eq:diagram for s1esh capacities} commutes}] \\ \quad & \Longrightarrow & [\mathrm{pt}] \otimes [X] \in \img (U^{k-1} \circ \delta \circ \iota^a) & \quad [\text{since $U^{k-1}([\C P ^{k-1}] \otimes [X]) = [\mathrm{pt}] \otimes [X]$}] \\ \quad & \Longleftrightarrow & [\mathrm{pt}] \otimes [X] \in \img (\delta \circ U^{k-1} \circ \iota^a) & \quad [\text{since $\delta$ and $U$ commute}], \end{IEEEeqnarray*} we have that $\cgh{k}(X) \leq \csh{k}(X)$. If $X$ is a star-shaped domain, we can view the maps of the computation above as being the maps in diagram \eqref{eq:diagram for s1esh capacities case ss}, i.e. they are defined in a specific degree. In this case, $U^{k-1} \colon H_{2k-2}(BS^1) \otimes H_{2n}(X, \partial X) \longrightarrow H_{0}(BS^1) \otimes H_{2n}(X, \partial X)$ is an isomorphism, and therefore the implication in the previous computation is actually an equivalence. \end{proof} \begin{remark} The capacities $\cgh{k}$ and $\csh{k}$ are defined in terms of a certain homology class being in the kernel or in the image of a map with domain or target the $S^1$-equivariant symplectic homology. Other authors have constructed capacities in an analogous manner, for example Viterbo \cite[Definition 2.1]{viterboSymplecticTopologyGeometry1992} and \cite[Section 5.3]{viterboFunctorsComputationsFloer1999}, Schwarz \cite[Definition 2.6]{schwarzActionSpectrumClosed2000} and Ginzburg--Shon \cite[Section 3.1]{ginzburgFilteredSymplecticHomology2018}. \end{remark} \section{McDuff--Siegel capacities} We now define the McDuff--Siegel capacities. These will assist us in our goal of proving \cref{conj:the conjecture} (at least in particular cases) because they can be compared with the Lagrangian capacity (\cref{thm:lagrangian vs g tilde}) and with the Gutt--Hutchings capacities (\cref{prp:g tilde and cgh}). \begin{definition}[{\cite[Definition 3.3.1]{mcduffSymplecticCapacitiesUnperturbed2022}}] \label{def:g tilde} Let $(X,\lambda)$ be a nondegenerate Liouville domain. For $\ell, k \in \Z_{\geq 1}$, we define the \textbf{McDuff--Siegel capacities} of $X$, denoted $\tilde{\mathfrak{g}}^{\leq \ell}_k(X)$, as follows. Choose $x \in \itr X$ and $D$ a symplectic divisor at $x$. Then, \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq \ell}_k(X) \coloneqq \sup_{J \in \mathcal{J}(X,D)} \mathop{\inf\vphantom{\sup}}_{\Gamma_1, \ldots, \Gamma_p} \sum_{i=1}^{p} \mathcal{A}(\Gamma_i), \end{IEEEeqnarray*} where the infimum is over tuples of Reeb orbits $\Gamma_1, \ldots, \Gamma_p$ such that there exist integers $k_1, \ldots, k_p \geq 1$ with \begin{IEEEeqnarray}{c+x*} \phantomsection\label{eq:g tilde two definitions conditions} \sum_{i=1}^{p} \# \Gamma_i \leq \ell, \qquad \sum_{i=1}^{p} k_i \geq k, \qquad \bigproduct_{i=1}^{p} \mathcal{M}_X^J(\Gamma_i)\p{<}{}{\mathcal{T}^{(k_i)}x} \neq \varnothing. \end{IEEEeqnarray} \end{definition} The following theorem shows that the definition of $\tilde{\mathfrak{g}}^{\leq \ell}_k$ we give in \cref{def:g tilde} and the one given in \cite[Definition 3.3.1]{mcduffSymplecticCapacitiesUnperturbed2022} are equal. \begin{theorem}[{\cite[Remark 3.1.2]{mcduffSymplecticCapacitiesUnperturbed2022}}] \label{thm:g tilde two definitions} If $(X, \lambda)$ is a nondegenerate Liouville domain, $\ell, k \in \Z_{\geq 1}$, $x \in \itr X$ and $D$ is a symplectic divisor through $x$, then \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq \ell}_k(X) = \sup_{J \in \mathcal{J}(X,D)} \mathop{\inf\vphantom{\sup}}_{\Gamma} \mathcal{A}(\Gamma), \end{IEEEeqnarray*} where the infimum is taken over tuples of Reeb orbits $\Gamma = (\gamma_1, \ldots, \gamma_p)$ such that $p \leq \ell$ and $\overline{\mathcal{M}}^{J}_{X}(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x} \neq \varnothing$. \end{theorem} \begin{proof} $(\geq)$: Let $\Gamma_1, \ldots, \Gamma_p$ and $k_1, \ldots, k_p$ be as in \eqref{eq:g tilde two definitions conditions}. We wish to show that there exists a tuple of Reeb orbits $\Gamma$ such that \begin{IEEEeqnarray*}{c+x*} \# \Gamma \leq \ell, \qquad \mathcal{A}(\Gamma) \leq \sum_{i=1}^{p} \mathcal{A}(\Gamma_i), \qquad \overline{\mathcal{M}}_X^J(\Gamma)\p{<}{}{\mathcal{T}^{(k)}x} \neq \varnothing. \end{IEEEeqnarray*} By \cref{rmk:compactifications with tangency}, the tuple $\Gamma = \Gamma_1 \cup \cdots \cup \Gamma_p$ is as desired. $(\leq)$: Let $\Gamma^+$ be a tuple of Reeb orbits such that $\# \Gamma^+ \leq \ell$ and $\overline{\mathcal{M}}^{J}_{X}(\Gamma^+)\p{<}{}{\mathcal{T}^{(k)}x} \neq \varnothing$. We wish to show that there exist tuples of Reeb orbits $\Gamma^-_1, \ldots, \Gamma^-_p$ and numbers $k_1, \ldots, k_p$ satisfying \eqref{eq:g tilde two definitions conditions} and \begin{IEEEeqnarray*}{c+x*} \sum_{i=1}^{p} \mathcal{A}(\Gamma_i) \leq \mathcal{A}(\Gamma). \end{IEEEeqnarray*} Choose $F = (F^1, \ldots, F^N) \in \overline{\mathcal{M}}^J_X(\Gamma^+)\p{<}{}{\mathcal{T}^{(k)}x}$ and let $C$ be the component of $F$ which inherits the constraint $\p{<}{}{\mathcal{T}^{(k)}x}$. We prove the result in the case where $C$ is nonconstant. In this case, $C \in \mathcal{M}^J_X(\Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x}$ for some tuple of Reeb orbits $\Gamma^-$. By \cref{lem:action energy for holomorphic}, $\mathcal{A}(\Gamma^-) \leq \mathcal{A}(\Gamma^+)$. We show that $\# \Gamma^- \leq \# \Gamma^+ \leq \ell$. Let $\mathbf{n}$ be the set of nodal points of $C$. Since the graph of $F$ is a tree, for every $\gamma \in \Gamma^+$ there exists a unique $f(\gamma) \in \Gamma^- \cup \mathbf{n}$ such that the subtree of $F$ emanating from $C$ at $f(\gamma)$ is positively asymptotic to $\gamma$. By the maximum principle (\cref{thm:maximum principle holomorphic}), $f \colon \Gamma^+ \longrightarrow \Gamma^- \cup \mathbf{n}$ is surjective, and therefore $\# \Gamma^- \leq \# \Gamma^+ \leq \ell$. We prove the result in the case where $C$ is constant. Let $C_1, \ldots, C_p$ be the nonconstant components near $C$ as in \cref{rmk:compactifications with tangency}. There exist tuples of Reeb orbits $\Gamma_1^-, \ldots, \Gamma_p^-$ and $k_1, \ldots, k_p \in \Z_{\geq 1}$ such that \begin{IEEEeqnarray*}{c+x*} \sum_{i=1}^{p} \mathcal{A}(\Gamma_i^-) \leq \mathcal{A}(\Gamma^+), \qquad \sum_{i=1}^{p} k_i \geq k, \qquad C_i \in \mathcal{M}^J_X(\Gamma_i^-)\p{<}{}{\mathcal{T}^{(k_i)}x} \neq \varnothing. \end{IEEEeqnarray*} By a reasoning similar to the previous case, $\sum_{i=1}^{p} \# \Gamma_i^- \leq \# \Gamma^+ \leq \ell$. \end{proof} \begin{remark} \phantomsection\label{cor:g tilde 1} If $(X, \lambda)$ is a nondegenerate Liouville domain, $k \in \Z_{\geq 1}$, $x \in \itr X$ and $D$ is a symplectic divisor through $x$, then \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq 1}_k(X) = \sup_{J \in \mathcal{J}(X,D)} \mathop{\inf\vphantom{\sup}}_{\gamma} \mathcal{A}(\gamma), \end{IEEEeqnarray*} where the infimum is over Reeb orbits $\gamma$ such that $\mathcal{M}^J_X(\gamma)\p{<}{}{\mathcal{T}^{(k)}x} \neq \varnothing$. \end{remark} \begin{theorem}[{\cite[Theorem 3.3.2]{mcduffSymplecticCapacitiesUnperturbed2022}}] \label{thm:properties of g tilde} The functions $\tilde{\mathfrak{g}}^{\leq \ell}_k$ are independent of the choices of $x$ and $D$ and satisfy the following properties, for all nondegenerate Liouville domains $(X,\lambda_X)$ and $(Y,\lambda_Y)$ of the same dimension: \begin{description} \item[(Monotonicity)] If $X \longrightarrow Y$ is a generalized Liouville embedding then $\tilde{\mathfrak{g}}^{\leq \ell}_k(X) \leq \tilde{\mathfrak{g}}^{\leq \ell}_k(Y)$. \item[(Conformality)] If $\alpha > 0$ then $\tilde{\mathfrak{g}}^{\leq \ell}_k(X, \alpha \lambda_X) = \alpha \, \tilde{\mathfrak{g}}^{\leq \ell}_k(X, \lambda_X)$. \item[(Nondecreasing)] $\tilde{\mathfrak{g}}^{\leq \ell}_1(X) \leq \tilde{\mathfrak{g}}^{\leq \ell}_{2}(X) \leq \cdots \leq +\infty$. \end{description} \end{theorem} We now state a result comparing the McDuff--Siegel capacities and the Gutt--Hutchings capacities. We will later apply this result to show that $c_L(X_{\Omega}) = \delta_\Omega$ for every $4$-dimensional convex toric domain $X_{\Omega}$ (\cref{lem:computation of cl}). \begin{proposition}[{\cite[Proposition 5.6.1]{mcduffSymplecticCapacitiesUnperturbed2022}}] \label{prp:g tilde and cgh} If $X_{\Omega}$ is a $4$-dimensional convex toric domain then \begin{IEEEeqnarray*}{c+x*} \tilde{\mathfrak{g}}^{\leq 1}_k(X_\Omega) = \cgh{k}(X_\Omega). \end{IEEEeqnarray*} \end{proposition} Finally, we state two stabilization results which we will use in \cref{sec:augmentation map of an ellipsoid}. \begin{lemma}[{\cite[Lemma 3.6.2]{mcduffSymplecticCapacitiesUnperturbed2022}}] \label{lem:stabilization 1} Let $(X, \lambda)$ be a Liouville domain. For any $c, \varepsilon \in \R_{> 0}$, there is a subdomain with smooth boundary $\tilde{X} \subset X \times B^2(c)$ such that: \begin{enumerate} \item The Liouville vector field $Z_{\tilde{X}} = Z_{X} + Z_{B^2(c)}$ is outwardly transverse along $\partial \tilde{X}$. \item $X \times \{0\} \subset \tilde{X}$ and the Reeb vector field of $\partial \tilde{X}$ is tangent to $\partial X \times \{0\}$. \item Any Reeb orbit of the contact form $(\lambda + \lambda_0)|_{\partial \tilde{X}}$ (where $\lambda_0 = 1/2 (x \edv y - y \edv x)$) with action less than $c - \varepsilon$ is entirely contained in $\partial X \times \{0\}$ and has normal Conley--Zehnder index equal to $1$. \end{enumerate} \end{lemma} \begin{lemma}[{\cite[Lemma 3.6.3]{mcduffSymplecticCapacitiesUnperturbed2022}}] \label{lem:stabilization 2} Let $X$ be a Liouville domain, and let $\tilde{X}$ be a smoothing of $X \times B^2(c)$ as in \cref{lem:stabilization 1}. \begin{enumerate} \item Let $J \in \mathcal{J}(\tilde{X})$ be a cylindrical almost complex structure on the completion of $\tilde{X}$ for which $\hat{X} \times \{0\}$ is $J$-holomorphic. Let $C$ be an asymptotically cylindrical $J$-holomorphic curve in $\hat{X}$, all of whose asymptotic Reeb orbits are nondegenerate and lie in $\partial X \times \{0\}$ with normal Conley--Zehnder index $1$. Then $C$ is either disjoint from the slice $\hat{X} \times \{0\}$ or entirely contained in it. \item Let $J \in \mathcal{J}(\partial \tilde{X})$ be a cylindrical almost complex structure on the symplectization of $\partial \tilde{X}$ for which $\R \times \partial X \times \{0\}$ is $J$-holomorphic. Let $C$ be an asymptotically cylindrical $J$-holomorphic curve in $\R \times \partial \tilde{X}$, all of whose asymptotic Reeb orbits are nondegenerate and lie in $\partial X \times \{0\}$ with normal Conley--Zehnder index $1$. Then $C$ is either disjoint from the slice $\R \times \partial X \times \{0\}$ or entirely contained in it. Moreover, only the latter is possible if $C$ has at least one negative puncture. \end{enumerate} \end{lemma} \section{Computations not requiring contact homology} We now state and prove one of our main theorems, which is going to be a key step in proving that $c_L(X_{\Omega}) = \delta_{\Omega}$. The proof uses techniques similar to those used in the proof of \cite[Theorem 1.1]{cieliebakPuncturedHolomorphicCurves2018}. \begin{theorem} \label{thm:lagrangian vs g tilde} If $(X, \lambda)$ is a Liouville domain then \begin{IEEEeqnarray*}{c+x*} c_L(X) \leq \inf_k^{} \frac{\tilde{\mathfrak{g}}_k^{\leq 1}(X)}{k}. \end{IEEEeqnarray*} \end{theorem} \begin{proof} By \cref{lem:can prove ineqs for ndg}, we may assume that $X$ is nondegenerate. Let $k \in \Z_{\geq 1}$ and $L \subset \itr X$ be an embedded Lagrangian torus. We wish to show that for every $\varepsilon > 0$ there exists $\sigma \in \pi_2(X,L)$ such that $0 < \omega(\sigma) \leq \tilde{\mathfrak{g}}_k^{\leq 1}(X) / k + \varepsilon$. Define \begin{IEEEeqnarray*}{rCls+x*} a & \coloneqq & \tilde{\mathfrak{g}}_k^{\leq 1}(X), \\ K_1 & \coloneqq & \ln(2), \\ K_2 & \coloneqq & \ln(1 + a / \varepsilon k), \\ K & \coloneqq & \max \{K_1, K_2\}, \\ \delta & \coloneqq & e^{-K}, \\ \ell_0 & \coloneqq & a / \delta. \end{IEEEeqnarray*} By \cref{lem:geodesics lemma CM abs} and the Lagrangian neighbourhood theorem, there exists a Riemannian metric $g$ on $L$ and a symplectic embedding $\phi \colon D^*L \longrightarrow X$ such that $\phi(D^*L) \subset \itr X$, $\phi|_L = \id_L$ and such that if $\gamma$ is a closed geodesic in $L$ with length $\ell(\gamma) \leq \ell_0$ then $\gamma$ is noncontractible, nondegenerate and satisfies $0 \leq \morse(\gamma) \leq n - 1$. Let $D^*_{\delta} L$ be the codisk bundle of radius $\delta$. Notice that $\delta$ has been chosen in such a way that the symplectic embedding $\phi \colon D^* L \longrightarrow X$ can be seen as an embedding like that of \cref{lem:energy wrt different forms}. We will now use the notation of \cref{sec:sft compactness}. Define symplectic cobordisms \begin{IEEEeqnarray*}{rCl} (X^+, \omega^+) & \coloneqq & (X \setminus \phi(D^*_{\delta} L), \omega), \\ (X^-, \omega^-) & \coloneqq & (D^*_{\delta} L, \edv \lambda_{T^* L}), \end{IEEEeqnarray*} which have the common contact boundary \begin{IEEEeqnarray*}{c+x*} (M, \alpha) \coloneqq (S^*_{\delta} L, \lambda_{T^* L}). \end{IEEEeqnarray*} Here, it is implicit that we are considering the restriction of the form $\lambda_{T^*L}$ on $T^* L$ to $D^*_{\delta} L$ or $S^*_{\delta} L$. Then, $(X,\omega) = (X^-, \omega^-) \circledcirc (X^+, \omega^+)$. Recall that there are piecewise smooth $2$-forms $\tilde{\omega} \in \Omega^2(\hat{X})$ and $\tilde{\omega}^{\pm} \in \Omega^2(\hat{X}^{\pm})$ which are given as in \cref{def:energy of a asy cylindrical holomorphic curve}. Choose $x \in \itr \phi(D^*_{\delta} L)$ and let $D \subset \phi(D^*_{\delta} L)$ be a symplectic divisor through $x$. Choose also generic almost complex structures \begin{IEEEeqnarray*}{rCls+x*} J_M & \in & \mathcal{J}(M), \\ J^+ & \in & \mathcal{J}_{J_M}(X^+), \\ J^- & \in & \mathcal{J}^{J_M}(X^-, D), \end{IEEEeqnarray*} and denote by $J_{\partial X} \in \mathcal{J}(\partial X)$ the ``restriction'' of $J^+$ to $\R \times \partial X$. Let $(J_t)_{t} \subset \mathcal{J}(X, D)$ be the corresponding neck stretching family of almost complex structures. Since $a = \tilde{\mathfrak{g}}_k^{\leq 1}(X)$ and by \cref{cor:g tilde 1}, for every $t$ there exists a Reeb orbit $\gamma_t$ in $\partial X = \partial^+ X^+$ and a $J_t$-holomorphic curve $u_t \in \mathcal{M}_X^{J_t}(\gamma_t)\p{<}{}{\mathcal{T}^{(k)}x}$ such that $\mathcal{A}(\gamma_t) \leq a$. Since $\partial X$ has nondegenerate Reeb orbits, there are only finitely many Reeb orbits in $\partial X$ with action less than $a$. Therefore, possibly after passing to a subsequence, we may assume that $\gamma_t \eqqcolon \gamma_0$ is independent of $t$. The curves $u_t$ satisfy the energy bound $E_{\tilde{\omega}}(u_t) \leq a$. By the SFT compactness theorem, the sequence $(u_t)_{t}$ converges to a holomorphic building \begin{IEEEeqnarray*}{c+x*} F = (F^1, \ldots, F^{L_0-1}, F^{L_0}, F^{{L_0}+1}, \ldots, F^N) \in \overline{\mathcal{M}}_X^{(J_t)_{t}}(\gamma_0)\p{<}{}{\mathcal{T}^{(k)}x}, \end{IEEEeqnarray*} where \begin{IEEEeqnarray*}{rCls+x*} (X^{\nu}, \omega^\nu, \tilde{\omega}^{\nu}, J^{\nu}) & \coloneqq & \begin{cases} (T^* L , \edv \lambda_{T^* L} , \tilde{\omega}^- , J^-) & \text{if } \nu = 1 , \\ (\R \times M , \edv(e^r \alpha) , \edv \alpha , J_M) & \text{if } \nu = 2 , \ldots, {L_0} - 1, \\ (\hat{X} \setminus L , \hat{\omega} , \tilde{\omega}^+ , J^+) & \text{if } \nu = {L_0} , \\ (\R \times \partial X, \edv (e^r \lambda|_{\partial X}) , \edv \lambda|_{\partial X} , J_{\partial X}) & \text{if } \nu = {L_0} + 1, \ldots, N , \\ \end{cases} \\ (X^*, \omega^*, \tilde{\omega}^*, J^*) & \coloneqq & \bigcoproduct_{\nu = 1}^N (X^{\nu}, \omega^\nu, \tilde{\omega}^{\nu}, J^{\nu}), \end{IEEEeqnarray*} and $F^{\nu}$ is a $J^\nu$-holomorphic curve in $X^{\nu}$ with asymptotic Reeb orbits $\Gamma^{\pm}_{\nu}$ (see \cref{fig:holomorphic building in the proof}). The holomorphic building $F$ satisfies the energy bound \begin{IEEEeqnarray}{c+x*} \plabel{eq:energy of holo building in proof} E_{\tilde{\omega}^*}(F) \coloneqq \sum_{\nu = 1}^{N} E_{\tilde{\omega}^{\nu}}(F^{\nu}) \leq a. \end{IEEEeqnarray} \begin{figure}[ht] \centering \begin{tikzpicture} [ scale = 0.5, help/.style = {very thin, draw = black!50}, curve/.style = {thick} ] \tikzmath{ \rx = 0.6; \ry = 0.25; } \node[anchor=west] at (20, 13.5) {$F^3 \subset X^3 = X^+ = \hat{X} \setminus L$}; \draw (0,6) rectangle (19,11); \node[anchor=west] at (20, 8.5) {$F^2 \subset X^2 = \R \times M$}; \draw (0,11) rectangle (19,16); \node[anchor=west] at (20, 3) {$F^1 \subset X^1 = X^- = T^* L$}; \draw (0,3) -- (0,6) -- (19,6) -- (19,3); \draw (0,3) .. controls (0,-1) and (19,-1) .. (19,3); \coordinate (G0) at ( 2,16); \coordinate (G1) at ( 2, 6); \coordinate (G2) at ( 8, 6); \coordinate (G3) at (11, 6); \coordinate (F1) at ( 2,11); \coordinate (F2) at ( 8,11); \coordinate (F3) at (11,11); \coordinate (F4) at ( 5,11); \coordinate (F5) at (14,11); \coordinate (F6) at (17,11); \coordinate (L) at (-\rx,0); \coordinate (R) at (+\rx,0); \coordinate (G0L) at ($ (G0) + (L) $); \coordinate (G1L) at ($ (G1) + (L) $); \coordinate (G2L) at ($ (G2) + (L) $); \coordinate (G3L) at ($ (G3) + (L) $); \coordinate (F1L) at ($ (F1) + (L) $); \coordinate (F2L) at ($ (F2) + (L) $); \coordinate (F3L) at ($ (F3) + (L) $); \coordinate (F4L) at ($ (F4) + (L) $); \coordinate (F5L) at ($ (F5) + (L) $); \coordinate (F6L) at ($ (F6) + (L) $); \coordinate (G0R) at ($ (G0) + (R) $); \coordinate (G1R) at ($ (G1) + (R) $); \coordinate (G2R) at ($ (G2) + (R) $); \coordinate (G3R) at ($ (G3) + (R) $); \coordinate (F1R) at ($ (F1) + (R) $); \coordinate (F2R) at ($ (F2) + (R) $); \coordinate (F3R) at ($ (F3) + (R) $); \coordinate (F4R) at ($ (F4) + (R) $); \coordinate (F5R) at ($ (F5) + (R) $); \coordinate (F6R) at ($ (F6) + (R) $); \coordinate (P) at (9,3); \coordinate (D) at (3,1); \draw[curve] (G0) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_0$}; \draw[curve] (G1) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_1$}; \draw[curve] (G2) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_2$}; \draw[curve] (G3) ellipse [x radius = \rx, y radius = \ry] node[above = 1] {$\gamma_3$}; \draw[curve] (F1) ellipse [x radius = \rx, y radius = \ry]; \draw[curve] (F2) ellipse [x radius = \rx, y radius = \ry]; \draw[curve] (F3) ellipse [x radius = \rx, y radius = \ry]; \draw[curve] (F4) ellipse [x radius = \rx, y radius = \ry]; \draw[curve] (F5) ellipse [x radius = \rx, y radius = \ry]; \draw[curve] (F6) ellipse [x radius = \rx, y radius = \ry]; ll (P) circle (2pt) node[anchor = north west] {$x$}; \draw[curve] ($ (P) - (D) $) -- ( $ (P) + (D) $ ) node[anchor = west] {$D$}; \draw[curve] (G1L) -- (G0L); \draw[curve] (F1R) -- (G0R); \draw[curve] (G2L) -- (F2L); \draw[curve] (G2R) -- (F2R); \draw[curve] (G3L) -- (F3L); \draw[curve] (G3R) -- (F3R); \draw[curve] (F4L) .. controls ($ (F4L) + (0,2) $) and ($ (F4R) + (0,2) $) .. (F4R); \draw[curve] (F2L) .. controls ($ (F2L) + (0,2) $) and ($ (F2R) + (0,2) $) .. (F2R); \draw[curve] (F6L) .. controls ($ (F6L) + (0,2) $) and ($ (F6R) + (0,2) $) .. (F6R); \draw[curve] (F3R) .. controls ($ (F3R) + (0,1) $) and ($ (F5L) + (0,1) $) .. (F5L); \draw[curve] (F5R) .. controls ($ (F5R) - (0,1) $) and ($ (F6L) - (0,1) $) .. (F6L); \draw[curve] (F3L) .. controls ($ (F3L) + (0,2.5) $) and ($ (F5R) + (0,2.5) $) .. (F5R); \draw[curve] (F5L) .. controls ($ (F5L) - (0,2.5) $) and ($ (F6R) - (0,2.5) $) .. (F6R); \draw[curve] (F1R) .. controls ($ (F1R) - (0,1) $) and ($ (F4L) - (0,1) $) .. (F4L); \draw[curve] (G1R) .. controls ($ (G1R) + (0,2) $) and ($ (F4R) - (0,2) $) .. (F4R); \draw[curve] (G1R) .. controls ($ (G1R) - (0,1.5) $) and ($ (G2L) - (0,1.5) $) .. (G2L); \draw[curve] (G2R) .. controls ($ (G2R) - (0,1) $) and ($ (G3L) - (0,1) $) .. (G3L); \draw[curve] (G1L) .. controls ($ (G1L) - (0,2) $) and ($ (P) - (D) $) .. (P); \draw[curve] (G3R) .. controls ($ (G3R) - (0,1) $) and ($ (P) + (D) $) .. (P); \node at ($ (F2) + (0,2) $) {$D_2$}; \node at ($ (F6) + (0,2) $) {$D_3$}; \end{tikzpicture} \caption{The holomorphic building $F = (F^1, \ldots, F^N)$ in the case ${L_0} = N = p = 3$} \label{fig:holomorphic building in the proof} \end{figure} Moreover, by \cref{lem:no nodes}, $F$ has no nodes. Let $C$ be the component of $F$ in $X^-$ which carries the tangency constraint $\p{<}{}{\mathcal{T}^{(k)}x}$. Then, $C$ is positively asymptotic to Reeb orbits $(\gamma_1, \ldots, \gamma_p)$ of $M$. For $\mu = 1, \ldots, p$, let $C_\mu$ be the subtree emanating from $C$ at $\gamma_\mu$. For exactly one $\mu = 1, \ldots, p$, the top level of the subtree $C_\mu$ is positively asymptotic to $\gamma_0$, and we may assume without loss of generality that this is true for $\mu = 1$. By the maximum principle, $C_\mu$ has a component in $X^{L_0} = \hat{X} \setminus L$ for every $\mu = 2, \ldots, p$. Also by the maximum principle, there do not exist components of $C_\mu$ in $X^{L_0} = \hat{X} \setminus L$ which intersect $\R_{\geq 0} \times \partial X$ or components of $C_\mu$ in the top symplectization layers $X^{{L_0}+1}, \ldots, X^N$, for every $\mu = 2, \ldots, p$. We claim that if $\gamma$ is a Reeb orbit in $M$ which is an asymptote of $F^\nu$ for some $\nu = 2,\ldots,{L_0}-1$, then $\mathcal{A}(\gamma) \leq a$. To see this, notice that \begin{IEEEeqnarray*}{rCls+x*} a & \geq & E_{\tilde{\omega}^*}(F) & \quad [\text{by Equation \eqref{eq:energy of holo building in proof}}] \\ & \geq & E_{\tilde{\omega}^N}(F^N) & \quad [\text{by monotonicity of $E$}] \\ & \geq & (e^K - 1) \mathcal{A}(\Gamma^-_N) & \quad [\text{by \cref{lem:energy wrt different forms}}] \\ & \geq & \mathcal{A}(\Gamma^-_N) & \quad [\text{since $K \geq K_1$}] \\ & \geq & \mathcal{A}(\Gamma^-_\nu) & \quad [\text{by \cref{lem:action energy for holomorphic}}] \end{IEEEeqnarray*} for every $\nu = 2, \ldots, {L_0}-1$. Every such $\gamma$ has a corresponding geodesic in $L$ (which by abuse of notation we denote also by $\gamma$) such that $\ell(\gamma) = \mathcal{A}(\gamma)/\delta \leq a / \delta = \ell_0$. Hence, by our choice of Riemannian metric, the geodesic $\gamma$ is noncontractible, nondegenerate and such that $\morse(\gamma) \leq n - 1$. Therefore, the Reeb orbit $\gamma$ is noncontractible, nondegenerate and such that $\conleyzehnder(\gamma) \leq n - 1$. We claim that if $D$ is a component of $C_\mu$ for some $\mu = 2,\ldots,p$ and $D$ is a plane, then $D$ is in $X^{L_0} = \hat{X} \setminus L$. Assume by contradiction otherwise. Notice that since $D$ is a plane, $D$ is asymptotic to a unique Reeb orbit $\gamma$ in $M = S^*_{\delta} L$ with corresponding noncontractible geodesic $\gamma$ in $L$. We will derive a contradiction by defining a filling disk for $\gamma$. If $D$ is in a symplectization layer $\R \times S^*_\delta L$, then the map $\pi \circ D$, where $\pi \colon \R \times S^*_{\delta} L \longrightarrow L$ is the projection, is a filling disk for the geodesic $\gamma$. If $D$ is in the bottom level, i.e. $X^1 = T^*L$, then the map $\pi \circ D$, where $\pi \colon T^*L \longrightarrow L$ is the projection, is also a filling disk. This proves the claim. So, summarizing our previous results, we know that for every $\mu = 2,\ldots,p$ there is a holomorphic plane $D_\mu$ in $X^{L_0} \setminus (\R_{\geq 0} \times \partial X) = X \setminus L$. For each plane $D_\mu$ there is a corresponding disk in $X$ with boundary on $L$, which we denote also by $D_\mu$. It is enough to show that $E_{\omega}(D_{\mu_0}) \leq a/k + \varepsilon$ for some $\mu_0 = 2,\ldots,p$. By \cref{lem:punctures and tangency}, $p \geq k + 1 \geq 2$. By definition of average, there exists $\mu_0 = 2,\ldots,p$ such that \begin{IEEEeqnarray*}{rCls+x*} E_{\omega}(D_{\mu_0}) & \leq & \frac{1}{p-1} \sum_{\mu=2}^{p} E_{\omega}(D_{\mu}) & \quad [\text{by definition of average}] \\ & = & \frac{E_{\omega}(D_2 \cup \cdots \cup D_p)}{p-1} & \quad [\text{since energy is additive}] \\ & \leq & \frac{e^K}{e^K - 1} \frac{E_{\tilde{\omega}}(D_2 \cup \cdots \cup D_p)}{p-1} & \quad [\text{by \cref{lem:energy wrt different forms}}] \\ & \leq & \frac{e^K}{e^K - 1} \frac{a}{p-1} & \quad [\text{by Equation \eqref{eq:energy of holo building in proof}}] \\ & \leq & \frac{e^K}{e^K - 1} \frac{a}{k} & \quad [\text{since $p \geq k + 1$}] \\ & \leq & \frac{a}{k} + \varepsilon & \quad [\text{since $K \geq K_2$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{theorem} \label{lem:computation of cl} If $X_{\Omega}$ is a $4$-dimensional convex toric domain then \begin{IEEEeqnarray*}{c+x*} c_L(X_{\Omega}) = \delta_\Omega. \end{IEEEeqnarray*} \end{theorem} \begin{proof} For every $k \in \Z_{\geq 1}$, \begin{IEEEeqnarray*}{rCls+x*} \delta_\Omega & \leq & c_P(X_{\Omega}) & \quad [\text{by \cref{lem:c square geq delta}}] \\ & \leq & c_L(X_{\Omega}) & \quad [\text{by \cref{lem:c square leq c lag}}] \\ & \leq & \frac{\tilde{\mathfrak{g}}^{\leq 1}_{k}(X_{\Omega})}{k} & \quad [\text{by \cref{thm:lagrangian vs g tilde}}] \\ & = & \frac{\cgh{k}(X_{\Omega})}{k} & \quad [\text{by \cref{prp:g tilde and cgh}}] \\ & \leq & \frac{\cgh{k}(N(\delta_\Omega))}{k} & \quad [\text{$X_{\Omega}$ is convex, hence $X_{\Omega} \subset N(\delta_\Omega)$}] \\ & = & \frac{\delta_\Omega(k+1)}{k} & \quad [\text{by \cref{lem:cgh of nondisjoint union of cylinders}}]. \end{IEEEeqnarray*} The result follows by taking the infimum over $k$. \end{proof} The proof of \cref{lem:computation of cl} suggests the following conjecture. Notice that \cref{thm:main theorem} implies \cref{conj:the conjecture}. \begin{conjecture} \label{thm:main theorem} If $X$ is a Liouville domain, $\pi_1(X) = 0$ and $c_1(TX)|_{\pi_2(X)} = 0$, then \begin{IEEEeqnarray*}{c+x*} c_L(X,\lambda) \leq \inf_k \frac{\cgh{k}(X,\lambda)}{k}. \end{IEEEeqnarray*} \end{conjecture} \begin{proof}[Proof attempt] By \cref{lem:can prove ineqs for ndg}, we may assume that $X$ is nondegenerate. Let $k \in \Z_{\geq 1}$ and $L \subset \itr X$ be an embedded Lagrangian torus. Let also $a > \cgh{k}(X)$. We wish to show that for every $\varepsilon > 0$ there exists $\sigma \in \pi_2(X,L)$ such that $0 < \omega(\sigma) \leq a / k + \varepsilon$. Start by replicating word by word the proof of \cref{thm:lagrangian vs g tilde} until the point where we choose $x \in \phi(D^*_{\delta} L)$. Instead of choosing $x$, choose a nondegenerate star-shaped domain $E \subset \C^n$ and an exact symplectic embedding $\varphi \colon E \longrightarrow X$ such that $\varphi(E) \subset \itr \phi(D^*_{\delta} L)$. Since $a > \cgh{k}(X)$ and by \cref{def:ck alternative}, the map \begin{IEEEeqnarray}{c+x*} \plabel{eq:nonzero map in proof of cl leq cgh} \begin{tikzcd} \homology{}{S^1}{}{S}{H}{(\varepsilon,a]}{n - 1 + 2k}(X) \ar[r, "\iota^a"] & \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(X) \ar[r, "\varphi_!"] & \homology{}{S^1}{}{S}{H}{+}{n - 1 + 2k}(E) \end{tikzcd} \end{IEEEeqnarray} is nonzero. Choose Hamiltonians \begin{IEEEeqnarray*}{rClCrClCs} H^+ \colon S^1 \times S^{2N+1} \times \hat{X} & \longrightarrow & \R, & \quad & H^+ & \in & \mathcal{H}(X,N), & \quad & (see \cref{def:hamiltonians}), \\ H^- \colon S^1 \times S^{2N+1} \times \hat{X} & \longrightarrow & \R, & \quad & H^- & \in & \mathcal{H}(X,E,N), & \quad & (see \cref{def:stair hamiltonians}), \\ H \colon \R \times S^1 \times S^{2N+1} \times \hat{X} & \longrightarrow & \R, & \quad & H & \in & \mathcal{H}(H^+, H^-), & \quad & (see \cref{def:homotopy stair to admissible hamiltonian}). \end{IEEEeqnarray*} Choose also an almost complex structure \begin{IEEEeqnarray*}{rClCrClCs} J \colon S^1 \times S^{2N+1} \times \hat{X} & \longrightarrow & \End(T \hat{X}), & \quad & J & \in & \mathcal{J}(X, E, N), & \quad & (see \cref{def:stair acs}). \end{IEEEeqnarray*} The almost complex structure $J$ defines a neck stretching family of almost complex structures \begin{IEEEeqnarray*}{rClCrClCs} J_m \colon S^1 \times S^{2N+1} \times \hat{X} & \longrightarrow & \End(T \hat{X}), & \quad & J_m & \in & \mathcal{J}(X, E, N), \end{IEEEeqnarray*} for $m \in \Z_{\geq 1}$. Since the map \eqref{eq:nonzero map in proof of cl leq cgh} is nonzero and by definition of the Viterbo transfer map, if $N, H^\pm, H$ are chosen big enough (in the sense of the partial orders defined in \cref{sec:Floer homology,sec:viterbo transfer map of liouville embedding}) then for every $m$ there exist $(z^{\pm}_m, \gamma^{\pm}_m) \in \hat{\mathcal{P}}(H^{\pm})$ and a Floer trajectory $(w_m, u_m)$ with respect to $H, J_m$ from $(z^-_m, \gamma^-_m)$ to $(z^+_m, \gamma^+_m)$, such that \begin{enumerate} \item $\img \gamma^+_m$ is near $\partial X$ and $\mathcal{A}_{H^+}(z^+_m, \gamma^+_m) \leq a$; \item $\img \gamma^-_m$ is near \parbox{\widthof{$\partial X$}}{$\partial E$} and $\ind (z^-_m, \gamma^-_m) \geq n - 1 + 2k$. \end{enumerate} By \cref{lem:action energy for floer trajectories}, we have the energy bound $E(w_m, u_m) \leq a$. Possibly after passing to a subsequence, we may assume that $(z^{\pm}_m, \gamma^{\pm}_m)$ converges to $(z_0^{\pm}, \gamma^{\pm}_0) \in \hat{\mathcal{P}}(H^{\pm})$. Now we come to the first challenge of the proof. We would like to use an adaptation of the SFT compactness theorem to take the limit of the sequence $(w_m, u_m)_m$. We will assume that such a theorem can be proven, and that we get a resulting limit $F = (F^1, \ldots, F^N)$ as in the proof of \cref{thm:lagrangian vs g tilde}, but where each $F^{\nu} = (w^\nu, u^\nu) \colon \dot{\Sigma}^\nu \longrightarrow S^{2 N + 1} \times X^{\nu}$ is a solution of the parametrized Floer equation (\cref{def:floer trajectory abstract}). Let $C$ be the component of $F$ in $X^-$ which is negatively asymptotic to $(z_0^-, \gamma_0^-)$. Notice that near $X \setminus \phi(D^*_{\delta} L)$, the Hamiltonian $H$ is independent of $\hat{X}$. Therefore, in the intermediate symplectization levels (i.e. for $\nu = 2,\ldots,L-1$) the map $u^{\nu} \colon \dot{\Sigma}^{\nu} \longrightarrow X^{\nu}$ is $J^{\nu}_{w^{\nu}}$-holomorphic, where $J^{\nu}_{w^{\nu}} \colon \dot{\Sigma}^{\nu} \times X^{\nu} \longrightarrow \End(T X^{\nu})$ is a domain dependent almost complex structure obtained from composing an almost complex structure $J^{\nu} \colon \dot{\Sigma}^{\nu} \times S^{2 N + 1} \times X^{\nu} \longrightarrow \End(T X^{\nu})$ with $w^\nu$. Hence, as in the proof of \cref{thm:lagrangian vs g tilde}, the component $C$ has $p$ positive punctures asymptotic to Reeb orbits $(\gamma_1, \ldots, \gamma_p)$ and for every $\mu = 2, \ldots, p$ there is a disk $D_{\mu}$ in $X$ with boundary on $L$. At this point, we need to show that $p \geq k + 1$, which brings us to the main difficulty in the proof. In the proof of \cref{thm:lagrangian vs g tilde}, we chose a generic almost complex structure so that $C$ would be regular. Then, the index formula for $C$ implied that $p \geq k + 1$ (see \cref{thm:transversality with tangency,lem:punctures and tangency simple,lem:punctures and tangency}). In line with this reasoning, we wish to show that $p \geq k + 1$ using the following computation: \begin{IEEEeqnarray*}{rCls+x*} 0 & \leq & \operatorname{ind}(C) \\ & = & (n - 3)(1 - p) + \sum_{\mu=1}^{p} \conleyzehnder(\gamma_\mu) - \ind(z^-_0, \gamma^-_0) \\ & \leq & (n - 3)(1 - p) + \sum_{\mu=1}^{p} (n - 1) - (n - 1 + 2k) \\ & = & 2 (p - k - 1), \end{IEEEeqnarray*} where in the first line we would need to use a transversality theorem which applies to $C$, and in the second line we would need to use a Fredholm theory theorem which gives us the desired index formula for $C$. We point out a few difficulties that arise with this approach. \begin{enumerate} \item Because of the domain dependence of the almost complex structures and Hamiltonians, it is not clear how to choose the initial almost complex structure $J \colon S^1 \times S^{2N+1} \times \hat{X} \longrightarrow \End(T \hat{X})$ in such a way that the resulting almost complex structure $J^1 \colon \dot{\Sigma}^1 \times S^{2N+1} \times X^1 \longrightarrow \End(T X^1)$ is regular. \item We are working under the assumption that the analogue of the SFT compactness theorem which applies to solutions of the parametrized Floer equation produces a building $F$ whose symplectization levels are asymptotic to Reeb orbits. More specifically, this means that the gradient flow line in $S^{2N+1}$ corresponding to $C$ is not asymptotic at the punctures to critical points of $\tilde{f}_N$. Therefore, in this case the linearized operator corresponding to the gradient flow line equation on $S^{2N+1}$ will not be Fredholm. \item However, the assumption in the previous item could be wrong. Another reasonable possibility is that the analogue of the SFT compactness theorem which applies to solutions of the parametrized Floer equation produces a building $F$ whose bottom component is positively asymptotic to pairs $(z_\mu, \gamma_\mu)$, where $z_{\mu} \in S^{2N+1}$ is a critical point of $\tilde{f}_N$ and $\gamma_\mu$ is a Reeb orbit. In this case, one would expect that the relevant operator is Fredholm. However, the Morse index of the critical points $z_{\mu}$ would appear in the index formula, and the previous computation would no longer imply that $p \geq k + 1$. \end{enumerate} Finally, we point out that if $p \geq k + 1$, then by the same computation as in the proof of \cref{thm:lagrangian vs g tilde}, we have the desired energy bound \begin{IEEEeqnarray*}{c+x*} E_{\omega}(D_{\mu_0}) \leq \frac{a}{k} + \varepsilon \end{IEEEeqnarray*} for some $\mu_0 = 2, \ldots, p$. This finishes the proof attempt. \end{proof} \chapter{Indices} \label{chp:indices} \section{Maslov indices} \label{sec:maslov indices} In this section, our goal is to define the Maslov index of a loop of symplectic matrices and the Maslov index of a loop of Lagrangian subspaces. Our presentation is based on \cite{mcduffIntroductionSymplecticTopology2017}. We start by recalling relevant facts and notation about symplectic linear algebra. Let $V$ be a finite dimensional vector space. The vector spaces $V \directsum V^*$ and $V^* \oplus V$ admit symplectic structures given by \begin{IEEEeqnarray*}{rCls+x*} \omega_{V \directsum V^*}((a,\alpha),(b,\beta)) & = & \beta(a) - \alpha(b), \\ \omega_{V^* \directsum V}((\alpha,a),(\beta,b)) & = & \alpha(b) - \beta(a). \end{IEEEeqnarray*} If $V$ has an inner product $\p{<}{}{\cdot,\cdot}$, then we define a symplectic structure on $V \directsum V$ by \begin{IEEEeqnarray}{c+x*} \plabel{eq:symplectic structure on v + v} \omega_{V \directsum V}((u,v),(x,y)) = \p{<}{}{u,y} - \p{<}{}{v,x}. \end{IEEEeqnarray} In this case, the maps \begin{IEEEeqnarray*}{rrClCrrCl} \phi \colon & V \directsum V & \longrightarrow & V \directsum V^* & \qquad & \psi \colon & V \directsum V & \longrightarrow & V^* \directsum V \\ & (x,y) & \longmapsto & (x,\p{<}{}{y,\cdot}), & & & (x,y) & \longmapsto & (\p{<}{}{x,\cdot},y) \end{IEEEeqnarray*} are isomorphisms of symplectic vector spaces. For each $n$, define the $2n \times 2n$ matrices \begin{IEEEeqnarray*}{c+x*} J_0 = \begin{bmatrix} 0 & -\idm \\ \idm & 0 \end{bmatrix}, \quad \Omega_0 = \begin{bmatrix} 0 & \idm \\ -\idm & 0 \end{bmatrix}. \end{IEEEeqnarray*} The canonical symplectic structure of $\R^{2n} = \R^n \directsum \R^n$, denoted $\omega_0$, is defined as in Equation \eqref{eq:symplectic structure on v + v} (where we use the Euclidean inner product). For $\mathbf{u} = (u,v) \in \R^{2n}$ and $\mathbf{x} = (x,y) \in \R^{2n}$, $\omega_0(\mathbf{u},\mathbf{v})$ is given by \begin{IEEEeqnarray*}{rCls+x*} \omega_0((u,v),(x,y)) & = & \p{<}{}{u,y} - \p{<}{}{v,x} \\ & = & \mathbf{u}^T \Omega_0 \mathbf{v}. \end{IEEEeqnarray*} The \textbf{symplectic group} is given by \begin{IEEEeqnarray*}{c+x*} \operatorname{Sp}(2n) \coloneqq \{ A \in \operatorname{GL}(2n,\R) \ | \ A^T \Omega_0 A = \Omega_0 \}. \end{IEEEeqnarray*} Denote by $C(S^1,\operatorname{Sp}(2n))$ the set of continuous maps from $S^1$ to $\operatorname{Sp}(2n)$, i.e. the set of loops of symplectic matrices. \begin{theorem}[{\cite[Theorem 2.2.12]{mcduffIntroductionSymplecticTopology2017}}] \phantomsection\label{thm:maslov sympl properties} There exists a unique function \begin{IEEEeqnarray*}{c+x*} \maslov \colon C(S^1,\operatorname{Sp}(2n)) \longrightarrow \Z, \end{IEEEeqnarray*} called the \emph{\textbf{Maslov index}}, which satisfies the following properties: \begin{description} \item[(Homotopy)] The Maslov index descends to an isomorphism $\maslov \colon \pi_1(\operatorname{Sp}(2n)) \longrightarrow \Z$. \item[(Product)] If $A_1,A_2 \in C(S^1, \operatorname{Sp}(2n))$ then $\maslov(A_1 A_2) = \maslov(A_1) + \maslov(A_2)$. \item[(Direct sum)] If $A_i \in C(S^1, \operatorname{Sp}(2 n_i))$ for $i=1,2$ then $\maslov(A_1 \directsum A_2) = \maslov(A_1) + \maslov(A_2)$. \item[(Normalization)] If $A \in C(S^1, \operatorname{Sp}(2))$ is given by \begin{IEEEeqnarray*}{c+x*} A(t) = \begin{bmatrix} \cos(2 \pi t) & -\sin(2 \pi t) \\ \sin(2 \pi t) & \cos(2 \pi t) \end{bmatrix} \end{IEEEeqnarray*} then $\maslov(A) = 1$. \end{description} \end{theorem} Let $(V,\omega)$ be a symplectic vector space. A subspace $W$ of $V$ is \textbf{Lagrangian} if $\dim W = 1/2 \dim V$ and $\omega|_W = 0$. The \textbf{Lagrangian Grassmannian} of $(V,\omega)$, denoted $\mathcal{L}(V,\omega)$, is the set of Lagrangian subspaces of $(V,\omega)$. Denote $\mathcal{L}(n) = \mathcal{L}(\R ^{2n},\omega_0)$. \begin{theorem}[{\cite[Theorem 2.3.7]{mcduffIntroductionSymplecticTopology2017}}] \label{thm:maslov lagrangian properties} There exists a unique function \begin{IEEEeqnarray*}{c+x*} \maslov \colon C(S^1,\mathcal{L}(n)) \longrightarrow \Z, \end{IEEEeqnarray*} called the \emph{\textbf{Maslov index}}, which satisfies the following properties: \begin{description} \item[(Homotopy)] The Maslov index descends to an isomorphism $\maslov \colon \pi_1(\mathcal{L}(n)) \longrightarrow \Z$. \item[(Product)] If $W \in C(S^1,\mathcal{L}(n))$ and $A \in C(S^1,\operatorname{Sp}(2 n))$ then $\mu(AW) = \mu(W) + 2 \mu(A)$. \item[(Direct sum)] If $W_i \in C(S^1,\mathcal{L}(n_i))$ for $i = 1,2$ then $\mu(W_1 \directsum W_2) = \mu(W_1) + \mu(W_2)$. \item[(Normalization)] If $W \in C(S^1, \mathcal{L}(n))$ is given by $W(t) = e^{\pi i t} \R \subset \C$ then $\mu(W) = 1$. \item[(Zero)] A constant loop has Maslov index zero. \end{description} \end{theorem} \section{Conley--Zehnder index} In this section we define the Conley--Zehnder index of a path of symplectic matrices. We define \begin{IEEEeqnarray*}{rCls+x*} \operatorname{Sp}^\star(2n) & \coloneqq & \{ A \in \operatorname{Sp}(2n) \ | \ \det(A - \idm) \neq 0 \}, \\ \mathrm{SP}(n) & \coloneqq & \left\{ A \colon [0,1] \longrightarrow \mathrm{Sp}(2n) \ \middle\vert \begin{array}{l} A \text{ is continuous, } \\ A(0) = \idm, \\ A(1) \in \mathrm{Sp}^{\star}(2n) \end{array} \right\}. \end{IEEEeqnarray*} The following theorem characterizes the Conley--Zehnder index of a path of symplectic matrices. Originally, this result has appeared in \cite{salamonMorseTheoryPeriodic1992} and \cite{salamonLecturesFloerHomology1999}. However, we will use a restatement from \cite{guttConleyZehnderIndex2012}. Recall that if $S$ is a symmetric matrix, its \textbf{signature}, denoted by $\signature S$, is the number of positive eigenvalues of $S$ minus the number of negative eigenvalues of $S$. \begin{theorem}[{\cite[Propositions 35 and 37]{guttConleyZehnderIndex2012}}] \phantomsection\label{thm:properties of cz} There exists a unique function \begin{IEEEeqnarray*}{c+x*} \conleyzehnder \colon \operatorname{SP}(n) \longrightarrow \Z, \end{IEEEeqnarray*} called the \emph{\textbf{Conley--Zehnder index}}, which satisfies the following properties: \begin{description} \item[(Naturality)] If $B \colon [0,1] \longrightarrow \operatorname{Sp}(2n)$ is a continuous path, then $\conleyzehnder(B A B ^{-1}) = \conleyzehnder(A)$; \item[(Homotopy)] $\conleyzehnder$ is constant on each component of $\operatorname{SP}(n)$; \item[(Zero)] If $A(s)$ has no eigenvalue on the unit circle for $s > 0$ then $\conleyzehnder(A) = 0$; \item[(Product)] If $A_i \in \operatorname{SP}(n_i)$ for $i=1,2$ then $\conleyzehnder(A_1 \directsum A_2) = \conleyzehnder(A_1) + \conleyzehnder(A_2)$; \item[(Loop)] If $B \in C(S^1, \operatorname{Sp}(2n))$ and $B(0) = B(1) = \idm$ then $\conleyzehnder(B A) = \conleyzehnder(A) + 2 \maslov(B)$. \item[(Signature)] If $S$ is a symmetric nondegenerate $2n \times 2n$-matrix with operator norm $\p{||}{}{S} < 2 \pi$ and $A(t) = \exp(J_0 S t)$, then $\conleyzehnder(A) = \frac{1}{2} \signature (S)$; \item[(Determinant)] ${n - \conleyzehnder(A)}$ is even if and only if $\det (\idm - A(1)) > 0$; \item[(Inverse)] $\conleyzehnder(A ^{-1}) = \conleyzehnder (A^T) = - \conleyzehnder(A)$. \end{description} \end{theorem} \begin{remark} By \cite[Proposition 37]{guttConleyZehnderIndex2012}, the homotopy, loop and signature properties are enough to determine the Conley--Zehnder index uniquely. \end{remark} We finish this section with a result which we will use later on to compute a Conley--Zehnder index. \begin{proposition}[{\cite[Proposition 41]{guttConleyZehnderIndex2012}}] \label{prp:gutts cz formula} Let $S$ be a symmetric, nondegenerate $2 \times 2$-matrix and $T > 0$ be such that $\exp(T J_0 S) \neq \idm$. Consider the path of symplectic matrices $A \colon [0,T] \longrightarrow \operatorname{Sp}(2)$ given by \begin{IEEEeqnarray*}{c+x*} A(t) \coloneqq \exp(t J_0 S). \end{IEEEeqnarray*} Let $a_1$ and $a_2$ be the eigenvalues of $S$ and $\signature S$ be its signature. Then, \begin{IEEEeqnarray*}{c+x*} \conleyzehnder(A) = \begin{cases} \p{}{1}{\frac{1}{2} + \p{L}{1}{\frac{\sqrt{a_1 a_2} T}{2 \pi}}} \signature S & \text{if } \signature S \neq 0, \\ 0 & \text{if } \signature S = 0. \end{cases} \end{IEEEeqnarray*} \end{proposition} \section{First Chern class} Denote by $\mathbf{Man}^2$ the category of manifolds which are $2$-dimensional, connected, compact, oriented and with empty boundary. We will give a definition of the first Chern class of a symplectic vector bundle $E \longrightarrow \Sigma$ where $\Sigma \in \mathbf{Man}^2$. Our presentation is based on \cite{mcduffIntroductionSymplecticTopology2017}. We will start by setting up some categorical language. Define a contravariant functor $\mathbf{Man}^2 \longrightarrow \mathbf{Set}$: \begin{IEEEeqnarray*}{rrCl} \mathcal{E} \colon & \mathbf{Man}^2 & \longrightarrow & \mathbf{Set} \\ & \Sigma & \longmapsto & \mathcal{E}(\Sigma) \coloneqq \{ \text{symplectic vector bundles with base $\Sigma$} \}/\sim \\ & f \downarrow & \longmapsto & \uparrow f^* \\ & \Sigma' & \longmapsto & \mathcal{E}(\Sigma') \coloneqq \{ \text{symplectic vector bundles with base $\Sigma'$} \}/\sim, \end{IEEEeqnarray*} where $\sim$ is the equivalence relation coming from isomorphisms of symplectic vector bundles. Define also the following contravariant functors $\mathbf{Man}^2 \longrightarrow \mathbf{Set}$: \begin{IEEEeqnarray*}{rrCl} H^2 \coloneqq H^2(-;\Z) \colon & \mathbf{Man}^2 & \longrightarrow & \mathbf{Set}, \\ \\ H_2^* \coloneqq \operatorname{Hom}(H_2(-;\Z),\Z) \colon & \mathbf{Man}^2 & \longrightarrow & \mathbf{Set}, \\ \\ \mathcal{Z} \colon & \mathbf{Man}^2 & \longrightarrow & \mathbf{Set} \\ & \Sigma & \longmapsto & \mathcal{Z}(\Sigma) \coloneqq \Z \\ & f \downarrow & \longmapsto & \uparrow \times \deg f \\ & \Sigma' & \longmapsto & \mathcal{Z}(\Sigma') \coloneqq \Z. \end{IEEEeqnarray*} We have a natural transformation $\alpha \colon H^2 \longrightarrow H_2^*$ which is given by \begin{IEEEeqnarray*}{rrCl} \alpha_\Sigma \colon & H^2(\Sigma;\Z) & \longrightarrow & \operatorname{Hom}(H_2(\Sigma;\Z),\Z) \\ & [\omega] & \longmapsto & \alpha_\Sigma([\omega]), \end{IEEEeqnarray*} where $\alpha_\Sigma([\omega])([\sigma]) = [\omega(\sigma)]$. By the universal coefficient theorem for cohomology (see for example \cite{rotmanIntroductionHomologicalAlgebra2009}), $\alpha_\Sigma$ is surjective. Both $H^2(\Sigma;\Z)$ and $\operatorname{Hom}(H_2(\Sigma;\Z),\Z)$ are isomorphic to $\Z$, since $\Sigma \in \mathbf{Man}^2$. Therefore, $\alpha$ is a natural isomorphism. We also have a natural isomorphism $\operatorname{ev} \colon H_2^* \longrightarrow \mathcal{Z}$, given by \begin{IEEEeqnarray*}{rrCl} \operatorname{ev}_\Sigma \colon & \operatorname{Hom}(H_2(\Sigma;\Z),\Z) & \longrightarrow & \Z \\ & \phi & \longmapsto & \phi([\Sigma]). \end{IEEEeqnarray*} As we will see, the first Chern class is a natural transformation $c_1 \colon \mathcal{E} \longrightarrow H^2$ and the first Chern number is a natural transformation (which we denote by the same symbol) $c_1 \colon \mathcal{E} \longrightarrow \mathcal{Z}$. These functors and natural transformations will all fit into the following commutative diagram: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd}[ampersand replacement = \&] \mathcal{E} \ar[r, "c_1"] \ar[rrr, bend right=50, swap, "c_1"] \& H^2 \ar[r, hook, two heads, "\alpha"] \& H_2^* \ar[r, hook, two heads, "\operatorname{ev}"] \& \mathcal{Z}. \end{tikzcd} \end{IEEEeqnarray*} Therefore, the first Chern class determines and is determined by the first Chern number. More precisely, if $E \longrightarrow \Sigma$ is a symplectic vector bundle then the first Chern number of $E$ equals the first Chern class of $E$ evaluated on $\Sigma$: \begin{IEEEeqnarray}{c+x*} \plabel{eq:first chern class vs number} c_1(E) = c_1(E)[\Sigma]. \end{IEEEeqnarray} \begin{definition}[{\cite[Section 2.7]{mcduffIntroductionSymplecticTopology2017}}] \label{def:c1} Let $\Sigma \in \mathbf{Man}^2$ (i.e. $\Sigma$ is $2$-dimensional, connected, compact, oriented, with empty boundary) and $E \longrightarrow \Sigma$ be a symplectic vector bundle. We define the \textbf{first Chern number} of $E$, $c_1(E) \in \Z$, as follows. Choose embedded $0$-codimensional manifolds $\Sigma_1$ and $\Sigma_2$ of $\Sigma$ such that \begin{IEEEeqnarray*}{c+x*} S \coloneqq \del \Sigma_1 = \del \Sigma_2 = \Sigma_1 \cap \Sigma_2 \end{IEEEeqnarray*} and $\Sigma$ is the gluing of $\Sigma_1$ and $\Sigma_2$ along $S$. Orient $S$ as the boundary of $\Sigma_1$. For $i=1,2$, denote by $\iota_i \colon \Sigma_i \longrightarrow \Sigma$ the inclusion and choose a symplectic trivialization \begin{IEEEeqnarray*}{c+x*} \tau^i \colon \iota_i^* E \longrightarrow \Sigma_i \times \R ^{2n}. \end{IEEEeqnarray*} Define the overlap map $A \colon S \longrightarrow \operatorname{Sp}(2n)$ by $A(x) = \tau^1_x \circ (\tau^2_x)^{-1}$. Denote by $S_1, \ldots, S_k$ the connected components of $S$ and parametrize each component by a loop $\gamma_i \colon S^1 \longrightarrow S_i$ such that $\dot{\gamma}_i(t)$ is positively oriented. Finally, let \begin{IEEEeqnarray*}{c+x*} c_1(E) \coloneqq \sum_{i=1}^{k} \mu(A \circ \gamma_i), \end{IEEEeqnarray*} where $\mu$ is the Maslov index as in \cref{thm:maslov sympl properties}. \end{definition} \begin{theorem}[{\cite[Theorem 2.7.1]{mcduffIntroductionSymplecticTopology2017}}] The first Chern number is well-defined and it is the unique natural transformation $c_1 \colon \mathcal{E} \longrightarrow \mathcal{Z}$ which satisfies the following properties: \begin{description} \item[(Classification)] If $E, E' \in \mathcal{E}(\Sigma)$ then $E$ and $E'$ are isomorphic if and only if $\operatorname{rank} E = \operatorname{rank} E'$ and $c_1(E) = c_1(E')$. \item[(Naturality)] If $f \colon \Sigma \longrightarrow \Sigma'$ is a smooth map and $E \in \mathcal{E}(\Sigma)$ then $c_1(f^*E) = \deg(f) c_1(E)$. \item[(Additivity)] If $E, E' \in \mathcal{E}(\Sigma)$ then $c_1(E \directsum E') = c_1(E) + c_1(E')$. \item[(Normalization)] The first Chern number of $T \Sigma$ is $c_1(T\Sigma) = 2 - 2g$. \end{description} \end{theorem} \section{Conley--Zehnder index of a periodic orbit} Let $(X,\omega)$ be a symplectic manifold of dimension $2n$ and $H \colon S^1 \times X \longrightarrow \R$ be a time-dependent Hamiltonian. For each $t \in S^1$ we denote by $H_t$ the map $H_t = H(t,\cdot) \colon X \longrightarrow \R$. The Hamiltonian $H$ has a corresponding time-dependent Hamiltonian vector field $X_H$ which is uniquely determined by \begin{IEEEeqnarray*}{c+x*} \edv H_t = - \iota_{X_{H_t}} \omega. \end{IEEEeqnarray*} We denote by $\phi^t_{X_H}$ the time-dependent flow of $X_{H}$. \begin{definition} \label{def:orbit of hamiltonian} A \textbf{$1$-periodic orbit} of $H$ is a map $\gamma \colon S^1 \longrightarrow X$ such that \begin{IEEEeqnarray*}{c+x*} \dot{\gamma}(t) = X_{H_t} (\gamma(t)) \end{IEEEeqnarray*} for every $t \in S^1$. If $\lambda$ is a symplectic potential for $(X,\omega)$, then the \textbf{action} of $\gamma$ is \begin{IEEEeqnarray*}{c+x*} \mathcal{A}_H(\gamma) \coloneqq \int_{S^1}^{} \gamma^* \lambda - \int_{S^1}^{} H(t, \gamma(t)) \edv t. \end{IEEEeqnarray*} \end{definition} \begin{definition} \label{def:nondegenerate hamiltonian orbit} Let $\gamma$ be a $1$-periodic orbit of $H$. We say that $\gamma$ is \textbf{nondegenerate} if the linear map \begin{IEEEeqnarray*}{c+x*} \dv \phi^{1}_{X_H} \colon T_{\gamma(0)} X \longrightarrow T_{\gamma(1)} X = T_{\gamma(0)} X \end{IEEEeqnarray*} does not have $1$ as an eigenvalue. We say that the Hamiltonian $H$ is \textbf{nondegenerate} if every $1$-periodic orbit of $H$ is nondegenerate. \end{definition} \begin{definition} \phantomsection\label{def:cz of hamiltonian orbit wrt trivialization} Let $\gamma$ be a $1$-periodic orbit of $H$ and $\tau$ be a symplectic trivialization of $\gamma^* TX$. We define the \textbf{Conley--Zehnder index} of $\gamma$ with respect to $\tau$, denoted $\conleyzehnder^{\tau}(\gamma)$, as follows. First, define a path of symplectic matrices $A^{\gamma,\tau} \colon [0,1] \longrightarrow \operatorname{Sp}(2n)$ by the equation $A^{\gamma,\tau}(t) \coloneqq \tau_t \circ \dv \phi^t_{X_H}(\gamma(0)) \circ \tau_{0}^{-1}$. In other words, $A^{\gamma,\tau}(t)$ is the unique linear map such that the diagram \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} T_{\gamma(0)} X \ar[d, swap, "\dv \phi^t_{X_{H}}(\gamma(0))"] \ar[r, "\tau_0"] & \R^{2n} \ar[d, "A^{\gamma,\tau}(t)"] \\ T_{\gamma(t)} \ar[r, swap, "\tau_t"] & \R^{2n} \end{tikzcd} \end{IEEEeqnarray*} commutes. Notice that since $\gamma$ is nondegenerate, $A^{\gamma,\tau} \in \operatorname{SP}(n)$. Then, define \begin{IEEEeqnarray*}{c+x*} \conleyzehnder^{\tau}(\gamma) \coloneqq \conleyzehnder(A^{\gamma,\tau}). \end{IEEEeqnarray*} \end{definition} Let $D = \{ z \in \C \mid |z| \leq 1 \}$ be the disk and denote by $\iota_{D,S^1} \colon S^1 \longrightarrow D$ the inclusion on the boundary, i.e. $\iota_{D,S^1}(t) = e^{2 \pi i t}$. \begin{lemma} \label{lem:cz of hamiltonian is independent of triv over filling disk} Let $\gamma$ be a $1$-periodic orbit of $H$. For $i = 1,2$, let $u_i \colon D \longrightarrow X$ be a filling disk for $\gamma$ (i.e. $\gamma = u_i \circ \iota_{D,S^1}$) and $\tau^i$ be a symplectic trivialization of $u_i^* TX$. If $c_1(TX)|_{\pi_2(X)} = 0$, then \begin{IEEEeqnarray*}{c+x*} \conleyzehnder^{\tau^1}(\gamma) = \conleyzehnder^{\tau^2}(\gamma). \end{IEEEeqnarray*} \end{lemma} \begin{proof} Consider the diagram \begin{IEEEeqnarray}{c+x*} \plabel{eq:diagram cz indep choices} \begin{tikzcd} \R^{2n} \ar[d, swap, "A^{\gamma,\tau^1}(t)"] & T_{\gamma(0)} X \ar[d, "\dv \phi^t_{X_H}(\gamma(0))"] \ar[l, swap, "\tau^1_0"] \ar[r, "\tau^2_0"] & \R ^{2n} \ar[ll, bend right=50, swap, "B(0)"] \ar[d, "A^{\gamma,\tau^2}(t)"] \\ \R^{2n} & T_{\gamma(t)} X \ar[l, "\tau^1_t"] \ar[r, swap, "\tau^2_t"] & \R ^{2n} \ar[ll, bend left=50, "B(t)"] \\ \end{tikzcd} \end{IEEEeqnarray} where we have defined $B(t) \coloneqq \tau^1_t \circ (\tau^2_t)^{-1}$. Let $\sigma \colon S^2 \longrightarrow X$ be the gluing of the disks $u_1$ and $u_2$ along their common boundary $\gamma$. Then, \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\conleyzehnder^{\tau^1}(\gamma) - \conleyzehnder^{\tau^2}(\gamma)}\\ \quad & = & \conleyzehnder(A^{\gamma,\tau^1}) - \conleyzehnder(A^{\gamma,\tau^2}) & \quad [\text{by \cref{def:cz of hamiltonian orbit wrt trivialization}}]\\ & = & \conleyzehnder(B A^{\gamma,\tau^2} B(0)^{-1}) - \conleyzehnder(A^{\gamma,\tau^2}) & \quad [\text{by diagram \eqref{eq:diagram cz indep choices}}] \\ & = & \conleyzehnder(B(0)^{-1} B A^{\gamma,\tau^2}) - \conleyzehnder(A^{\gamma,\tau^2}) & \quad [\text{by naturality of $\conleyzehnder$}] \\ & = & 2 \mu(B(0)^{-1} B) & \quad [\text{by the loop property of $\conleyzehnder$}] \\ & = & 2 \mu(B) & \quad [\text{by homotopy invariance of $\maslov$}] \\ & = & 2 c_1(\sigma^* TX) & \quad [\text{by definition of the first Chern number}] \\ & = & 2 c_1 (TX) ([\sigma]) & \quad [\text{by Equation \eqref{eq:first chern class vs number}}] \\ & = & 0 & \quad [\text{by assumption}]. & \qedhere \end{IEEEeqnarray*} \end{proof} Let $(M,\alpha)$ be a contact manifold of dimension $2n + 1$ with Reeb vector field $R$. Our goal is to repeat the discussion of the first part of this section in the context of periodic orbits of $R$. \begin{definition} A \textbf{Reeb orbit} is a map $\gamma \colon \R / T \Z \longrightarrow M$ such that \begin{IEEEeqnarray*}{c+x*} \dot{\gamma}(t) = R(\gamma(t)) \end{IEEEeqnarray*} for every $t \in S^1$. In this case, we call $T$ the \textbf{period} of $\gamma$. The \textbf{multiplicity} of $\gamma$, which we will usually denote by $m$, is the degree of the map $\gamma \colon \R / T \Z \longrightarrow \img \gamma$. The \textbf{action} of $\gamma$ is \begin{IEEEeqnarray*}{c+x*} \mathcal{A}(\gamma) \coloneqq \int_{0}^{T} \gamma^* \lambda = T. \end{IEEEeqnarray*} \end{definition} \begin{remark} Alternatively, a $T$-periodic Reeb orbit can be seen as a map $\gamma \colon S^1 \longrightarrow M$ such that $\dot{\gamma}(t) = T R(\gamma(t))$. We will use the two possible descriptions interchangeably. \end{remark} Since $\ldv{R} \alpha = 0$ (by \cref{lem:reeb vf preserves contact form}) and using \cref{lem:mosers trick}, we conclude that $(\phi^t_R)^* \alpha = \alpha$. In particular, $\dv \phi^t_R(p) (\xi_p) \subset \xi_{\phi^t_R(p)}$ and \begin{IEEEeqnarray*}{c+x*} \dv \phi^t_R(p) \colon \xi_p \longrightarrow \xi_{\phi^t_R(p)} \end{IEEEeqnarray*} is a symplectic linear map. \begin{definition} A Reeb orbit $\gamma$ of $M$ is \textbf{nondegenerate} if the linear map \begin{IEEEeqnarray*}{c+x*} \dv \phi^1_R(\gamma(0)) \colon \xi_{\gamma(0)} \longrightarrow \xi_{\gamma(1)} = \xi_{\gamma(0)} \end{IEEEeqnarray*} does not have $1$ as an eigenvalue. We say that $(M, \alpha)$ is \textbf{nondegenerate} if every Reeb orbit in $M$ is nondegenerate. If $(X, \lambda)$ is a Liouville domain, then $(X, \lambda)$ is \textbf{nondegenerate} if $(\partial X, \lambda|_{\partial X})$ is nondegenerate. \end{definition} \begin{definition} \label{def:cz of reeb orbit wrt trivialization} Let $\gamma$ be a periodic orbit of $R$ and $\tau$ be a symplectic trivialization of $\gamma^* \xi$. The \textbf{Conley--Zehnder index} of $\gamma$ is given by \begin{IEEEeqnarray*}{c+x*} \conleyzehnder^{\tau}(\gamma) \coloneqq \conleyzehnder(A^{\gamma,\tau}), \end{IEEEeqnarray*} where $A^{\gamma,\tau} \colon [0,1] \longrightarrow \operatorname{Sp}(2n)$ is the path of symplectic matrices given by the equation $A^{\gamma,\tau}(t) \coloneqq \tau_t \circ \dv \phi^t_{R}(\gamma(0)) \circ \tau_{0}^{-1}$. \end{definition} \begin{lemma} \label{lem:cz of reeb is independent of triv over filling disk} Let $(X, \lambda)$ be a Liouville domain and $\gamma \colon S^1 \longrightarrow \partial X$ be a Reeb orbit. For $i = 1,2$, let $u_i \colon D \longrightarrow X$ be a filling disk for $\gamma$ (i.e. $\iota_{X,\partial X} \circ \gamma = u_i \circ \iota_{D,S^1}$). Let $\tau^i$ be a symplectic trivialization of $u_i^* TX$ and denote also by $\tau^i$ the induced trivialization of $(\iota_{X,\partial X} \circ \gamma)^* TX$. Assume that \begin{IEEEeqnarray*}{rClCl} \tau^i_{t}(Z_{\gamma(t)}) & = & e_1 & \in & \R^{2n}, \\ \tau^i_{t}(R_{\gamma(t)}) & = & e_{n+1} & \in & \R^{2n}, \end{IEEEeqnarray*} for every $t \in S^1$. If $2 c_1(TX) = 0$, then \begin{IEEEeqnarray*}{c+x*} \conleyzehnder^{\tau^1}(\gamma) = \conleyzehnder^{\tau^2}(\gamma). \end{IEEEeqnarray*} \end{lemma} \begin{proof} By the assumptions on $\tau^i$, the diagram \begin{IEEEeqnarray}{c+x*} \plabel{eq:diagram cz reeb indep triv} \begin{tikzcd} \xi_{\gamma(t)} \ar[r] \ar[d, swap, "\tau^i_t"] & T_{\gamma(t)} X \ar[d, "\tau^i_t"] & \xi^{\perp}_{\gamma(t)} \ar[d, "\tau^i_t"] \ar[l] \\ \R^{2n-2} \ar[r, swap, "\iota_{\R^{2n-2}}"] & \R^{2n} & \R^{2} \ar[l, "\iota_{\R^{2}}"] \end{tikzcd} \end{IEEEeqnarray} commutes, where \begin{IEEEeqnarray*}{rCls+x*} \iota_{\R^{2n-2}}(x^2,\ldots,x^n,y^2,\ldots,y^n) & = & (0,x^2,\ldots,x^n,0,y^2,\ldots,y^n), \\ \iota_{\R^{2}}(x,y) & = & (x,0,\ldots,0,y,0,\ldots,0). \end{IEEEeqnarray*} Define \begin{IEEEeqnarray*}{rCcCrCl} B^{2n}(t) & \coloneqq & \tau^1_t \circ (\tau^2_t)^{-1} & \colon & \R^{2n} & \longrightarrow & \R^{2n}, \\ B^{2n-2}(t) & \coloneqq & \tau^1_t \circ (\tau^2_t)^{-1} & \colon & \R^{2n-2} & \longrightarrow & \R^{2n-2}, \end{IEEEeqnarray*} By the assumptions on $\tau^i$, and diagram \eqref{eq:diagram cz reeb indep triv}, \begin{IEEEeqnarray}{c+x*} \plabel{eq:decomposition of b} B^{2n}(t) = \begin{bmatrix} \id_{\R^2} & 0 \\ 0 & B^{2n-2} \end{bmatrix}. \end{IEEEeqnarray} Let $\sigma \colon S^2 \longrightarrow X$ be the gluing of the disks $u_1$ and $u_2$ along their common boundary $\gamma$. Finally, we compute \begin{IEEEeqnarray*}{rCls+x*} \conleyzehnder^{\tau^1}(\gamma) - \conleyzehnder^{\tau^2}(\gamma) & = & 2 \mu (B^{2n-2}) & \quad [\text{by the same computation as in \cref{lem:cz of hamiltonian is independent of triv over filling disk}}] \\ & = & 2 \mu (B^{2n}) & \quad [\text{by Equation \eqref{eq:decomposition of b} and \cref{thm:maslov sympl properties}}] \\ & = & 2 c_1(\sigma^* TX) & \quad [\text{by definition of first Chern class}] \\ & = & 0 & \quad [\text{by assumption}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{remark} \label{rmk:notation for tuples of orbits} Suppose that $\Gamma = (\gamma_1, \ldots, \gamma_p)$ is a tuple of (Hamiltonian or Reeb) orbits and $\tau$ is a trivialization of the relevant symplectic vector bundle over each orbit. We will frequently use the following notation: \begin{IEEEeqnarray*}{rCls+x*} \mathcal{A}(\Gamma) & \coloneqq & \sum_{i=1}^{p} \mathcal{A}(\gamma_i), \\ \conleyzehnder^{\tau}(\Gamma) & \coloneqq & \sum_{i=1}^{p} \conleyzehnder^{\tau}(\gamma_i). \end{IEEEeqnarray*} If $\beta = \sum_{i=1}^{m} a_i \Gamma_i$ is a formal linear combination of tuples of orbits, then we denote \begin{IEEEeqnarray*}{c+x*} \mathcal{A}(\beta) \coloneqq \max_{i = 1, \ldots, m} \mathcal{A}(\Gamma_i). \end{IEEEeqnarray*} The action of a formal linear combination is going to be relevant only in \cref{chp:contact homology}, where we will consider the action filtration on linearized contact homology. \end{remark} \section{Periodic Reeb orbits in a unit cotangent bundle} Let $(L, g)$ be an orientable Riemannian manifold of dimension $n$. Recall that $L$ has a cotangent bundle $\pi \colon T^* L \longrightarrow L$, which is an exact symplectic manifold with symplectic potential $\lambda \in \Omega^1(T^* L)$, symplectic form $\omega \coloneqq \edv \lambda$ and Liouville vector field $Z$ given by $\iota_Z \omega = \lambda$. We will denote by $z \colon L \longrightarrow T^*L$ the zero section. Consider the unit cotangent bundle $\pi \colon S^* L \longrightarrow L$ and denote by $\iota \colon S^* L \longrightarrow L$ the inclusion. Then, $\alpha \coloneqq \iota^* \lambda$ is a contact form on $S^* L$, with associated contact distribution $\xi = \ker \alpha \subset T S^* L$ and Reeb vector field $R \in \mathfrak{X}(S^* L)$. The Riemannian metric $g$ defines a vector bundle isomorphism $\tilde{g} \colon TL \longrightarrow T^*L$ given by $\tilde{g}(v) = g(v, \cdot)$. Let $\ell > 0$ and $c \colon \R / \ell \Z \longrightarrow L$ be a curve which is parametrized by arclength. Define $\gamma \coloneqq \tilde{g} \circ \dot{c} \colon \R / \ell \Z \longrightarrow S^* L$. Then, by \cref{thm:flow geodesic vs hamiltonian,thm:flow reeb vs hamiltonian}, the curve $c$ is a geodesic (of length $\ell$) if and only if $\gamma$ is a Reeb orbit (of period $\ell$). We will assume that this is the case. The goal of this section is to study specific sets of trivializations and maps between these sets (see diagram \eqref{eq:diagram of maps of trivializations}), which can be used to define the Conley--Zehnder index of $\gamma$ (see \cref{thm:index of geodesic or reeb orbit isometric triv}). Since $T^* L$ is a symplectic manifold, $T T^* L \longrightarrow T^* L$ is a symplectic vector bundle. The hyperplane distribution $\xi$ is a symplectic subbundle of $\iota^* T T^* L \longrightarrow S^* L$. We can consider the symplectic complement of $\xi$, which by \cref{lem:decomposition coming from contact hypersurface} is given by \begin{IEEEeqnarray*}{c+x*} \xi^{\perp}_{u} = \p{<}{}{Z_u} \oplus \p{<}{}{R_u} \end{IEEEeqnarray*} for every $u \in S^* L$. Finally, $T^* L \oplus T L \longrightarrow L$ is a symplectic vector bundle, with symplectic structure given by \begin{IEEEeqnarray*}{c+x*} \omega_{T^* L \oplus TL}((u,v), (x,y)) = u(y) - x(v). \end{IEEEeqnarray*} \begin{remark} \label{rmk:connections} Let $\pi \colon E \longrightarrow B$ be a vector bundle. Consider the vector bundles $\pi^* E$, $TE$ and $\pi^* TB$ over $E$. There is a short exact sequence \begin{IEEEeqnarray*}{c+x*} \phantomsection\label{eq:short exact sequence of vector bundles} \begin{tikzcd} 0 \ar[r] & \pi^* E \ar[r, "I^V"] & TE \ar[r, "P^H"] & \pi^* T B \ar[r] & 0 \end{tikzcd} \end{IEEEeqnarray*} of vector bundles over $E$, where \begin{IEEEeqnarray*}{rClCrClCl} I^V_e & \coloneqq & \dv \iota_e(e) & \colon & E_{\pi(e)} & \longrightarrow & T_e E, & \quad & \text{where } \iota_e \colon E_{\pi(e)} \longrightarrow E \text{ is the inclusion,} \\ P^H_e & \coloneqq & \dv \parbox{\widthof{$\iota_e$}}{$\pi$} (e) & \colon & T_e E & \longrightarrow & T_{\pi(e)} B, \end{IEEEeqnarray*} for every $e \in E$. Recall that a \textbf{Koszul connection} on $E$ is a map \begin{IEEEeqnarray*}{c+x*} \nabla \colon \mathfrak{X}(B) \times \Gamma(E) \longrightarrow \Gamma(E) \end{IEEEeqnarray*} which is $C^{\infty}$-linear on $\mathfrak{X}(B)$ and satisfies the Leibniz rule on $\Gamma(E)$. A \textbf{linear Ehresmann connection} on $E$ is a vector bundle map $P^V \colon TE \longrightarrow \pi^* E$ such that $P^V \circ I^V = \id_{\pi^* TB}$ and $P^V \circ T m_{\lambda} = m_{\lambda} \circ P^V$ for every $\lambda \in \R$, where $m_{\lambda} \colon E \longrightarrow E$ is the map which multiplies by $\lambda$. The sets of Koszul connections on $E$ and of linear Ehresmann connections on $E$ are in bijection. If $\nabla$ is a Koszul connection on $E$, the corresponding linear Ehresmann connection is given as follows. Let $I^H \colon \pi^* TB \longrightarrow TE$ be the map which is given by \begin{IEEEeqnarray*}{c+x*} I^H_e(u) \coloneqq \dv s (\pi(e)) u - I^V_e(\nabla_u^{} s) \end{IEEEeqnarray*} for every $e \in E$ and $u \in T_{\pi(e)} B$, where $s$ in any choice of section of $\pi \colon E \longrightarrow B$ such that $s(\pi(e)) = e$. The map $I^H$ is independent of the choice of section $s$ and satisfies $P^H \circ I^H = \id_{\pi^* TB}$. Let $P^V \colon TE \longrightarrow \pi^* E$ be the map which is given by \begin{IEEEeqnarray*}{c+x*} P^V_e(w) \coloneqq (I^V_e)^{-1} (w - I^H_e \circ P^H_e (w)) \end{IEEEeqnarray*} for every $e \in E$ and $w \in T_e E$. We point out that this definition is well-posed, since $w - I^H_e \circ P^H_e (w) \in \ker P^H_e = \img I^V_e$. As before, $P^V \circ I^V = \id_{\pi^* E}$. Finally, the maps \begin{IEEEeqnarray*}{rCrCrCl} I & \coloneqq & I^V & \oplus & I^H & \colon & \pi^* E \oplus \pi^* T B \longrightarrow TE, \\ P & \coloneqq & P^V & \times & P^H & \colon & TE \longrightarrow \pi^* E \oplus \pi^* T B, \end{IEEEeqnarray*} are isomorphisms and inverses of one another. \end{remark} Consider the Levi-Civita connection on $L$, which is a Koszul connection on $T L$. There is an induced Koszul connection on $T^* L$ given by \begin{IEEEeqnarray*}{c+x*} (\nabla_X \beta)(Y) \coloneqq X(\beta(Y)) - \beta(\nabla_X Y), \end{IEEEeqnarray*} for every $X, Y \in \mathfrak{X}(L)$ and $\beta \in \Gamma(T^* L) = \Omega^1(L)$. By \cref{rmk:connections} (with $B = L$ and $E = T^*L$), there is an induced linear Ehresmann connection on $\pi \colon T^*L \longrightarrow L$ which is given by maps \begin{IEEEeqnarray*}{rCrCrCl} I & \coloneqq & I^V & \oplus & I^H & \colon & \pi^* T^* L \oplus \pi^* T L \longrightarrow T T^* L, \\ P & \coloneqq & P^V & \times & P^H & \colon & T T^* L \longrightarrow \pi^* T^* L \oplus \pi^* T L. \end{IEEEeqnarray*} \begin{lemma} \label{prop:properties of p} The maps $I$ and $P$ are isomorphisms of symplectic vector bundles. Moreover, \begin{IEEEeqnarray}{rClCl} P(Z_u) & = & (u,0), & \quad & \text{ for every } u \in T^* L, \plabel{eq:p of vfs 1} \\ P(R_u) & = & (0,\tilde{g}^{-1}(u)), & \quad & \text{ for every } u \in S^* L. \plabel{eq:p of vfs 2} \end{IEEEeqnarray} \end{lemma} \begin{proof} Let $q \coloneqq \pi(u)$ and choose normal coordinates $(q^1,\ldots,q^n)$ on $L$ centred at $q$ (this means that with respect to these coordinates, $g_{ij}(q) = \delta_{ij}$ and $\partial_k g_{ij} (q) = 0$). Let $(q^1, \ldots, q^n, p_1, \ldots, p_n)$ be the induced coordinates on $T^* L$. Then, the vector spaces $T_u T^*L$ and $T^*_q L \directsum T_q L$ have the following symplectic bases: \begin{IEEEeqnarray}{rCls+x*} T_ u T^*L & = & \spn \p{c}{2}{ \pdv{}{p_1}\Big|_{u}, \cdots, \pdv{}{p_n}\Big|_{u}, \pdv{}{q^1}\Big|_{u}, \cdots, \pdv{}{q^n}\Big|_{u} }, \plabel{eq:basis 1} \\ T^*_q L \directsum T_q L & = & \spn \p{c}{1}{ \edv q^1|_q, \ldots, \edv q^n|_q } \directsum \spn \p{c}{2}{ \pdv{}{q^1}\Big|_{q}, \cdots, \pdv{}{q^n}\Big|_{q} }. \plabel{eq:basis 2} \end{IEEEeqnarray} By the definitions of $P$ and $I$ in \cref{rmk:connections}, we have \begin{IEEEeqnarray}{rCls+x*} I^V_u (\edv q^i|_q) & = & \pdv{}{p_i}\Big|_u, \IEEEnonumber\\ P^H_u \p{}{2}{ \pdv{}{q^i}\Big|_{u} } & = & \pdv{}{q^i}\Big|_{q}, \plabel{eq:p horizontal in coordinates} \\ P^V_u \p{}{2}{ \pdv{}{p_i}\Big|_{u} } & = & P^V_u \circ I^V_u (\edv q^i|_{q}) = \edv q^i|_q, \plabel{eq:p vertical in coordinates} \end{IEEEeqnarray} which implies that $P$ is the identity matrix when written with respect to the bases \eqref{eq:basis 1} and \eqref{eq:basis 2}. Since these bases are symplectic, $P$ is a symplectic linear map. With respect to the coordinates $(q^1, \ldots, q^n, p_1, \ldots, p_n)$, the Liouville vector field is given by \begin{IEEEeqnarray}{c+x*} Z = \sum_{i=1}^{n} p_i \pdv{}{p_i}. \plabel{eq:liouville vector field in coordinates} \end{IEEEeqnarray} By \cref{thm:flow reeb vs hamiltonian} and Equation \eqref{eq:hamiltonian vector field in coordinates}, and since the coordinates are normal, the Reeb vector field is given by \begin{IEEEeqnarray}{rCl} R_u & = & \sum_{i=1}^{n} p_i(u) \pdv{}{q^i}\Big|_{u}. \plabel{eq:reeb vector field in coordinates} \end{IEEEeqnarray} Equations \eqref{eq:liouville vector field in coordinates} and \eqref{eq:reeb vector field in coordinates} together with equations \eqref{eq:p horizontal in coordinates} and \eqref{eq:p vertical in coordinates} imply Equations \eqref{eq:p of vfs 1} and \eqref{eq:p of vfs 2}. \end{proof} Define \begin{IEEEeqnarray*}{rCls+x*} \mathcal{T}(c^* TL) & \coloneqq & \left\{ \kappa \ \middle\vert \begin{array}{l} \kappa \text{ is an isometric trivialization of } c^* TL \\ \text{such that } \kappa_t (\dot{c}(t)) = e_1 \in \R^n \text{ for every } t \in \R / \ell \Z \end{array} \right\}, \\ \mathcal{T}(\gamma^* \xi) & \coloneqq & \{ \tau \mid \tau \text{ is a symplectic trivialization of } \gamma^* \xi \}, \\ \mathcal{T}((z \circ c)^* T T^* L) & \coloneqq & \{ \sigma \mid \sigma \text{ is a symplectic trivialization of } (z \circ c)^* T T^* L \}. \end{IEEEeqnarray*} We will define maps $\tau$, $\sigma_0$ and $\sigma$ (see \cref{def:map of trivializations tau,def:map of trivializations sigma 0,def:map of trivializations sigma}) which fit into the following diagram. \begin{IEEEeqnarray}{c+x*} \plabel{eq:diagram of maps of trivializations} \begin{tikzcd} \mathcal{T}(c^* TL) \ar[d, swap, "\tau"] \ar[dr, "\sigma"] \\ \mathcal{T}(\gamma^* \xi) \ar[r, swap, "\sigma_0"] & \mathcal{T}((z \circ c)^* T T^* L) \end{tikzcd} \end{IEEEeqnarray} We will check that this diagram commutes in \cref{lem:diagram of maps of trivalizations commutes}. Consider the following diagram of symplectic vector spaces and symplectic linear maps. \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} \xi_{\gamma(t)}^{} \ar[r, "\iota_{\xi_{\gamma(t)}}"] & \xi^{\perp}_{\gamma(t)} \oplus \xi_{\gamma(t)}^{} \ar[r, equals] & T_{\gamma(t)}^{} T^* L \ar[r, "P_{\gamma(t)}"] & T^*_{c(t)} L \oplus T_{c(t)}^{} L & T_{z \circ c(t)}^{} T^* L \ar[l, swap, "P_{z \circ c(t)}"] \end{tikzcd} \end{IEEEeqnarray*} We now define the maps $\tau$, $\sigma_0$ and $\sigma$. \begin{definition} \phantomsection\label{def:map of trivializations tau} For every $\kappa \in \mathcal{T}(c^* TL)$, we define $\tau(\kappa) \in \mathcal{T}(\gamma^* \xi)$ by \begin{IEEEeqnarray*}{c+x*} \tau(\kappa)_t \coloneqq \pi_{\R^{2n-2}} \circ \tilde{\kappa}_t \circ P_{\gamma(t)} \circ \iota_{\xi_{\gamma(t)}}, \end{IEEEeqnarray*} where $\tilde{\kappa}_t \colon T^*_{c(t)} L \oplus T_{c(t)}^{} L \longrightarrow \R^n \oplus \R^n$ and $\pi_{\R^{2n-2}} \colon \R^{2n} \longrightarrow \R^{2n-2}$ are given by \begin{IEEEeqnarray*}{rCl} \tilde{\kappa}_t(u,v) & \coloneqq & (\kappa_t \circ \tilde{g}^{-1}_{c(t)}(u), \kappa_t(v)), \\ \pi_{\R^{2n-2}}(x^1,\ldots,x^n,y^1,\ldots,y^n) & \coloneqq & (x^2,\ldots,x^n,y^2,\ldots,y^n). \end{IEEEeqnarray*} \end{definition} For \cref{def:map of trivializations tau} to be well-posed, we need $\tilde{\kappa}_t$ to be a symplectic linear map. We check this in \cref{lem:kappa tl is symplectic} below. \begin{definition} \phantomsection\label{def:map of trivializations sigma 0} For every $\tau \in \mathcal{T}(\gamma^* \xi)$, we define $\sigma_0(\tau) \in \mathcal{T}((z \circ c)^* T T^*L)$ by \begin{IEEEeqnarray*}{c+x*} \sigma_0 (\tau)_t \coloneqq \tilde{\tau}_t \circ P^{-1}_{\gamma(t)} \circ P_{z \circ c(t)}, \end{IEEEeqnarray*} where $\tilde{\tau}_t \colon \xi^{\perp}_{\gamma(t)} \oplus \xi_{\gamma(t)}^{} \longrightarrow \R^{2n}$ is the symplectic linear map given by \begin{IEEEeqnarray*}{rCls+x*} \tilde{\tau}_t (Z_{\gamma(t)}) & = & e_1, \\ \tilde{\tau}_t (R_{\gamma(t)}) & = & e_{n+1}, \\ \tilde{\tau}_t (v) & = & \iota_{\R^{2n-2}} \circ \tau_t(v), \quad \text{for every } v \in \xi_{\gamma(t)}, \end{IEEEeqnarray*} and $\iota_{\R^{2n-2}} \colon \R^{2n-2} \longrightarrow \R^{2n}$ is given by \begin{IEEEeqnarray*}{c+x*} \iota_{\R^{2n-2}}(x^2,\ldots,x^n,y^2,\ldots,y^n) = (0,x^2,\ldots,x^n,0,y^2,\ldots,y^n). \end{IEEEeqnarray*} \end{definition} \begin{definition} \label{def:map of trivializations sigma} For every $\kappa \in \mathcal{T}(c^* TL)$, we define $\sigma(\kappa) \in \mathcal{T}((z \circ c)^* T T^*L)$ by \begin{IEEEeqnarray*}{c+x*} \sigma(\kappa)_t \coloneqq \tilde{\kappa}_t \circ P_{z \circ c(t)}. \end{IEEEeqnarray*} \end{definition} \begin{lemma} \label{lem:kappa tl is symplectic} The map $\tilde{\kappa}_t$ from \cref{def:map of trivializations tau,def:map of trivializations sigma} is symplectic. \end{lemma} \begin{proof} For $(u,v), (x,y) \in T^*_{c(t)} L \oplus T_{c(t)}^{} L$, we have \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\omega_{\R^n \oplus \R^n} \p{}{1}{ \tilde{\kappa}_t \p{}{}{u,v}, \tilde{\kappa}_t \p{}{}{x,y} } }\\ \ & = & \omega_{\R^n \oplus \R^n} \p{}{1}{ \p{}{1}{ \kappa_t \circ \tilde{g}_{c(t)}^{-1} (u), \kappa_t (v)}, \p{}{1}{ \kappa_t \circ \tilde{g}_{c(t)}^{-1} (x), \kappa_t (y)} } & \quad [\text{by definition of $\tilde{\kappa}_t$}] \\ & = & \p{<}{1}{ \kappa_t \circ \tilde{g}_{c(t)}^{-1} (u), \kappa_t (y) }_{\R^n} - \p{<}{1}{ \kappa_t \circ \tilde{g}_{c(t)}^{-1} (x), \kappa_t (v) }_{\R^n} & \quad [\text{by definition of $\omega_{\R^n \oplus \R^n}$}] \\ & = & \p{<}{1}{ \tilde{g}_{c(t)}^{-1} (u), y }_{TL} - \p{<}{1}{ \tilde{g}_{c(t)}^{-1} (x), v }_{TL} & \quad [\text{since $\kappa_t$ is an isometry}] \\ & = & u(y) - x(v) & \quad [\text{by definition of $\tilde{g}$}] \\ & = & \omega_{T^*L \oplus TL} \p{}{1}{(u,v),(x,y)} & \quad [\text{by definition of $\omega_{T^*L \oplus TL}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:diagram of maps of trivalizations commutes} Diagram \eqref{eq:diagram of maps of trivializations} commutes, i.e. $\sigma = \sigma_0 \circ \tau$. \end{lemma} \begin{proof} By \cref{def:map of trivializations tau,def:map of trivializations sigma 0,def:map of trivializations sigma}, \begin{IEEEeqnarray*}{rCls+x*} \sigma(\kappa)_t & = & \tilde{\kappa}_t \circ P_{z \circ c(t)}, \\ \sigma_0(\tau(\kappa)) & = & \widetilde{\tau(\kappa)}_t \circ P_{\gamma(t)}^{-1} \circ P_{z \circ c(t)}. \end{IEEEeqnarray*} Therefore, it is enough to show that $\tilde{\kappa}_t \circ P_{\gamma(t)} = \widetilde{\tau(\kappa)}_t \colon T_{\gamma(t)} T^*L \longrightarrow \R^{2n}$. We show that $\tilde{\kappa}_t \circ P_{\gamma(t)}(Z_{\gamma(t)}) = \widetilde{\tau(\kappa)}_t(Z_{\gamma(t)})$. \begin{IEEEeqnarray*}{rCls+x*} \tilde{\kappa}_{t} \circ P_{\gamma(t)} (Z_{\gamma(t)}) & = & \tilde{\kappa}_t(\gamma(t), 0) & \quad [\text{by \cref{prop:properties of p}}] \\ & = & (\kappa_t \circ \tilde{g}^{-1}_{c(t)}(\gamma(t)), 0) & \quad [\text{by definition of $\tilde{\kappa}_t$}] \\ & = & (\kappa_t(\dot{c}(t)), 0) & \quad [\text{by definition of $\gamma$}] \\ & = & (e_1,0) & \quad [\text{since $\kappa \in \mathcal{T}(c^* TL)$}] \\ & = & \widetilde{\tau(\kappa)}_t (Z_{\gamma(t)}) & \quad [\text{by definition of $\widetilde{\tau(\kappa)}_t$}]. \end{IEEEeqnarray*} We show that $\tilde{\kappa}_t \circ P_{\gamma(t)}(R_{\gamma(t)}) = \widetilde{\tau(\kappa)}_t(R_{\gamma(t)})$. \begin{IEEEeqnarray*}{rCls+x*} \tilde{\kappa}_{t} \circ P_{\gamma(t)} (R_{\gamma(t)}) & = & \tilde{\kappa}_t(0, \tilde{g}^{-1}_{c(t)}(\gamma(t))) & \quad [\text{by \cref{prop:properties of p}}] \\ & = & (0, \kappa_t \circ \tilde{g}^{-1}_{c(t)}(\gamma(t))) & \quad [\text{by definition of $\tilde{\kappa}_t$}] \\ & = & (0, \kappa_t(\dot{c}(t))) & \quad [\text{by definition of $\gamma$}] \\ & = & (0,e_1) & \quad [\text{since $\kappa \in \mathcal{T}(c^* TL)$}] \\ & = & \widetilde{\tau(\kappa)}_t (R_{\gamma(t)}) & \quad [\text{by definition of $\widetilde{\tau(\kappa)}_t$}]. \end{IEEEeqnarray*} The previous computations show that \begin{IEEEeqnarray*}{c+x*} P_{\gamma(t)} \circ \tilde{\kappa}_t (\xi_{\gamma(t)}^{\perp}) = \ker \pi_{\R^{2n-2}}, \end{IEEEeqnarray*} which in turn implies that \begin{IEEEeqnarray}{c+x*} \plabel{eq:image of p kappa} P_{\gamma(t)} \circ \tilde{\kappa}_t (\xi_{\gamma(t)}) = (\ker \pi_{\R^{2n-2}})^{\perp} = \img \iota_{\R^{2n - 2}}. \end{IEEEeqnarray} Finally, we show that $\tilde{\kappa}_t \circ P_{\gamma(t)}(v) = \widetilde{\tau(\kappa)}_t(v)$ for every $v \in \xi_{\gamma(t)}$. \begin{IEEEeqnarray*}{rCls+x*} \widetilde{\tau(\kappa)}_t (v) & = & \iota_{\R^{2n-2}} \circ \tau(\kappa)_t (v) & \quad [\text{by definition of $\widetilde{\tau(\kappa)}_t$}] \\ & = & \iota_{\R^{2n-2}} \circ \pi_{\R^{2n-2}} \circ \tilde{\kappa}_t \circ P_{\gamma(t)} \circ \iota_{\xi_{\gamma(t)}} (v) & \quad [\text{by definition of $\tau$}] \\ & = & \tilde{\kappa}_t \circ P_{\gamma(t)}(v) & \quad [\text{by Equation \eqref{eq:image of p kappa}}]. & \qedhere \end{IEEEeqnarray*} \end{proof} This finishes the ``construction'' of diagram \eqref{eq:diagram of maps of trivializations}. Our goal is to show that $\conleyzehnder^{\tau(\kappa)}(\gamma)$ is independent of the choice of $\kappa \in \mathcal{T}(c^* TL)$ (see \cref{thm:index of geodesic or reeb orbit isometric triv}). Indeed, we will actually show that $\conleyzehnder^{\tau(\kappa)}(\gamma) = \morse(c)$. To make sense of this statement, we start by explaining the meaning of the Morse index of a geodesic. \begin{remark} \label{rmk:morse theory for geodesics} Define $X \coloneqq W^{1,2}(\R / \ell \Z,L)$ (maps from $\R / \ell \Z$ to $L$ of Sobolev class $W ^{1,2}$). Then, $X$ is a Hilbert manifold. At $c \in X$, the tangent space of $X$ is \begin{IEEEeqnarray*}{c+x*} T_{c} X = W ^{1,2}(\R / \ell \Z,c^* TL), \end{IEEEeqnarray*} which is a Hilbert space. We can define the \textbf{Energy functional} by \begin{IEEEeqnarray*}{rrCl} E \colon & X & \longrightarrow & \R \\ & c & \longmapsto & \frac{1}{2} \int_{\R / \ell \Z}^{} \p{||}{}{ \dot{c}(t) }^2 \edv t. \end{IEEEeqnarray*} Then, $c \in X$ is a critical point of $E$ if and only if $c$ is smooth and a geodesic in $L$. We say that $c$ is \textbf{nondegenerate} if the kernel of the map \begin{IEEEeqnarray*}{c+x*} \operatorname{Hess} E (c) \colon T _{c} X \longrightarrow T _{c}^* X \end{IEEEeqnarray*} is $\ker \operatorname{Hess} E(c) = \p{<}{}{\dot{c}}$. If $c$ is a critical point of $E$, i.e. a geodesic, then we define the \textbf{Morse index} of $c$ by \begin{IEEEeqnarray*}{c+x*} \morse(c) = \sup \left\{ \dim V \ \middle\vert \begin{array}{l} V \text{ is a subspace of } T _{c} X, \\ \operatorname{Hess} E (c)|_V \colon V \times V \longrightarrow \R \text{ is negative definite} \end{array} \right\}. \end{IEEEeqnarray*} Recall that $c$ is a geodesic if and only if $\gamma \coloneqq \tilde{g} \circ \dot{c}$ is a Reeb orbit. In this case, $c$ is a nondegenerate critical point of $E$ if and only if ${\gamma}$ is a nondegenerate Reeb orbit. \end{remark} \begin{definition} \phantomsection\label{lem:maslov index of a geodesic} For $\sigma \in \mathcal{T}((z \circ c)^* T T^* L)$, we define the \textbf{Maslov index} of $c$ with respect to $\sigma$, denoted $\maslov^{\sigma}(c)$, as follows. First, let $W^{c,\sigma}$ be the loop of Lagrangian subspaces of $\R^{2n}$ given by \begin{IEEEeqnarray*}{c+x*} W^{c,\sigma}(t) \coloneqq \sigma_t \circ \dv z(c(t)) (T_{c(t)} L). \end{IEEEeqnarray*} Then, define $\maslov^{\sigma}(c)$ to be the Maslov index of $W^{c,\sigma}$ in the sense of \cref{thm:maslov lagrangian properties}. \end{definition} \begin{lemma} \label{lem:maslov index of a geodesic is zero} For any $\kappa \in \mathcal{T}(c^* TL)$, \begin{IEEEeqnarray*}{c+x*} \maslov^{\sigma(\kappa)}(c) = 0. \end{IEEEeqnarray*} \end{lemma} \begin{proof} We will show that $W^{c,\sigma(\kappa)} = \{0\} \oplus \R^{n}$. By the zero property of the Maslov index for a path of Lagrangian subspaces, this implies the result. We start by showing that $P^V_{z(x)} \circ \dv z(x) = 0$ for any $x \in L$. For any $w \in T_x L$, \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{P^V_{z(x)} \circ \dv z(x) w}\\ \quad & = & (I^V_{z(x)})^{-1} (\dv z(x) w - I^H_{z(x)} \circ P^H_{z(x)} (\dv z(x) w)) & \quad [\text{by definition of $P^V$}] \\ & = & (I^V_{z(x)})^{-1} (\dv z(x) w - \dv z(x) \circ \dv \pi (z(x)) \circ \dv z(x) w) & \quad [\text{by definition of $I^H$ and $P^H$}] \\ & = & 0 & \quad [\text{since $\pi \circ z = \id_L$}]. \end{IEEEeqnarray*} We compute $W^{c,\sigma(\kappa)}$. \begin{IEEEeqnarray*}{rCls+x*} W^{c,\sigma(\kappa)} & = & \sigma(\kappa)_t \circ \dv z(c(t)) (T_{c(t)} L) & \quad [\text{by definition of $W^{c,\sigma(\kappa)}$}] \\ & = & \tilde{\kappa}_t \circ P_{z \circ c(t)} \circ \dv z(c(t))(T_{c(t)} L) & \quad [\text{by definition of $\sigma(\kappa)$}] \\ & = & \tilde{\kappa}_t (0, P^H_{z \circ c(t)} \circ \dv z(c(t)) (T_{c(t)} L) ) & \quad [\text{since $P^V_{z(c(t))} \circ \dv z(c(t)) = 0$}] \\ & = & (0, \kappa_t \circ P^H_{z \circ c(t)} \circ \dv z(c(t)) (T_{c(t)} L) ) & \quad [\text{by definition of $\tilde{\kappa}_t$}] \\ & = & (0, \kappa_t(T_{c(t)} L)) & \quad [\text{since $P^H_{z \circ c(t)} = \dv \pi(z \circ c(t))$}] \\ & = & \{0\} \oplus \R^n & \quad [\text{since $\kappa_t$ is an isomorphism}]. & \qedhere \end{IEEEeqnarray*} \end{proof} The following theorem was originally proven in \cite{viterboNewObstructionEmbedding1990}, but we will use a restatement of it from \cite{cieliebakPuncturedHolomorphicCurves2018}. \begin{theorem}[{\cite[Lemma 2.1]{cieliebakPuncturedHolomorphicCurves2018}}] \label{thm:index of geod reeb} For any $\tau \in \mathcal{T}(\gamma^* \xi)$, \begin{IEEEeqnarray*}{c+x*} \conleyzehnder^{\tau}({\gamma}) + \maslov^{\sigma_0(\tau)}(c) = \morse(c). \end{IEEEeqnarray*} \end{theorem} \begin{theorem} \label{thm:index of geodesic or reeb orbit isometric triv} For any $\kappa \in \mathcal{T}(c^* TL)$, \begin{IEEEeqnarray*}{c+x*} \conleyzehnder^{\tau(\kappa)}({\gamma}) = \morse(c). \end{IEEEeqnarray*} \end{theorem} \begin{proof} By \cref{lem:diagram of maps of trivalizations commutes,lem:maslov index of a geodesic is zero,thm:index of geod reeb}. \end{proof} Finally, we state a result which will be necessary to prove \cref{thm:lagrangian vs g tilde}. \begin{lemma}[{\cite[Lemma 2.2]{cieliebakPuncturedHolomorphicCurves2018}}] \label{lem:geodesics lemma CM abs} Let $L$ be a compact $n$-dimensional manifold without boundary. Let $\mathrm{Riem}(L)$ be the set of Riemannian metrics on $L$, equipped with the $C^2$-topology. If $g_0 \in \mathrm{Riem}(L)$ is a Riemannian metric of nonpositive sectional curvature and $\mathcal{U} \subset \mathrm{Riem}(L)$ is an open neighbourhood of $g_0$, then for all $\ell_0 > 0$ there exists a Riemannian metric $g \in \mathcal{U}$ on $L$ such that with respect to $g$, any closed geodesic $c$ in $L$ of length $\ell(c) \leq \ell_0$ is noncontractible, nondegenerate, and such that $0 \leq \morse(c) \leq n - 1$. \end{lemma} \chapter{Holomorphic curves} \label{chp:holomorphic curves} \section{Holomorphic curves} In this section we define asymptotically cylindrical holomorphic curves (see \cref{def:asy cyl holomorphic curve}). The domain of such a curve is a punctured Riemann surface (see \cref{def:punctures asy markers cyl ends}), and the target is a symplectic cobordism (see \cref{def:symplectic cobordism}). \begin{definition} \label{def:punctures asy markers cyl ends} Let $(\Sigma, j)$ be a Riemann surface. A \textbf{puncture} on $\Sigma$ is a point $z \in \Sigma$. Denote by $D$ the closed unit disk in $\C$ and by $Z^{\pm}$ the positive or negative half-cylinders: \begin{IEEEeqnarray*}{rCls+x*} Z^+ & \coloneqq & \R_{\geq 0} \times S^1, \\ Z^- & \coloneqq & \R_{\leq 0} \times S^1, \end{IEEEeqnarray*} with coordinates $(s,t) \in Z^{\pm}$ and complex structure $j$ given by $j(\partial_s) = \partial_t$. Consider the holomorphic maps \begin{IEEEeqnarray*}{rClCrCl} \psi^\pm \colon Z^{\pm} & \longrightarrow & D \setminus \{0\}, & \quad & \psi^\pm(s,t) & = & \exp(\mp 2 \pi (s + i t)). \end{IEEEeqnarray*} A positive or negative \textbf{cylindrical end} near $z$ is a holomorphic embedding $\phi^{\pm} \colon Z^{\pm} \longrightarrow \Sigma \setminus \{z\}$ of the form $\phi^{\pm} \coloneqq \varphi \circ \psi^\pm$, where $\varphi \colon D \longrightarrow \Sigma$ is a holomorphic embedding such that $\varphi(0) = z$. In this case, we say that $(s,t)$ are \textbf{cylindrical coordinates} near $z$. A \textbf{punctured Riemann surface} is a Riemann surface $(\Sigma, j)$ together with sets \begin{IEEEeqnarray*}{rClCrCl} \mathbf{z} & = & \mathbf{z}^+ \cup \mathbf{z}^-, & \quad & \mathbf{z}^{\pm} & = & \{z^{\pm}_1,\ldots,z^{\pm}_{p^{\pm}}\} \subset \Sigma, \quad \mathbf{z}^+ \cap \mathbf{z}^- = \varnothing, \end{IEEEeqnarray*} of positive and negative punctures. In this case, we denote $\dot{\Sigma} \coloneqq \Sigma \setminus \mathbf{z}$. Whenever we talk about cylindrical coordinates near a puncture, it is implicit that we mean the cylindrical coordinates induced from a positive of negative cylindrical end, in accordance to whether the puncture is positive or negative. \end{definition} \begin{definition} \label{def:symplectic cobordism} A \textbf{symplectic cobordism} is a compact symplectic manifold $(X, \omega)$ with boundary $\partial X$, together with a $1$-form $\lambda$ defined on an open neighbourhood of $\partial X$, such that $\edv \lambda = \omega$ and the restriction of $\lambda$ to $\partial X$ is a contact form. Let $\partial^+ X$ (respectively $\partial^- X$) be the subset of $\partial X$ where the orientation defined by $\lambda|_{\partial X}$ as a contact form agrees with the boundary orientation (respectively negative boundary orientation). \end{definition} \begin{definition} \phantomsection\label{def:liouville cobordism} A \textbf{Liouville cobordism} is a symplectic cobordism $(X,\omega,\lambda)$ such that $\lambda$ is defined on $X$. \end{definition} \begin{example} A Liouville domain is a Liouville cobordism whose negative boundary is empty. \end{example} \begin{remark} We can define the completion of a symplectic cobordism $(X,\omega,\lambda)$ like in \cref{sec:completion of liouville domain}, with the difference that now we attach half-symplectizations to the negative and positive boundaries: \begin{IEEEeqnarray*}{c+x*} \hat{X} \coloneqq \R_{\leq 0} \times \partial^- X \cup_{\partial^- X} X \cup_{\partial^+ X} \R_{\geq 0} \times \partial^+ X. \end{IEEEeqnarray*} \end{remark} \begin{definition} \label{def:admissible} Let $(X,\omega,\lambda)$ be a symplectic cobordism and consider its completion $\hat{X}$. An almost complex structure $J$ on $\hat{X}$ is \textbf{cylindrical} if $J$ is compatible with $\hat{\omega}$ and $J$ is cylindrical on $\R_{\geq 0} \times \partial^+ X$ and $\R_{\leq 0} \times \partial^- X$. Denote by $\mathcal{J}(X)$ the set of such $J$. \end{definition} \begin{definition} \label{def:asy cyl holomorphic curve} Let $(X, \omega, \lambda)$ be a symplectic cobordism, $J \in \mathcal{J}(X)$ be a cylindrical almost complex structure on $\hat{X}$ and $\Gamma^{\pm} = (\gamma^{\pm}_1, \ldots, \gamma^{\pm}_{p^{\pm}})$ be tuples of Reeb orbits in $\partial^{\pm} X$. Let $T_{i}^{\pm}$ denote the period of $\gamma_i^{\pm}$. An \textbf{asymptotically cylindrical holomorphic curve} in $\hat{X}$ from $\Gamma^-$ to $\Gamma^+$ is given by a Riemann surface $(\Sigma, j)$ with punctures $\mathbf{z}^{\pm} = \{z_1^{\pm}, \ldots, z^{\pm}_{p^{\pm}}\}$ together with a $J$-holomorphic map $u \colon \dot{\Sigma} \longrightarrow \hat{X}$, such that: \begin{enumerate} \item $u$ is positively asymptotic to $\gamma^{+}_i$ at $z^{+}_{i}$, i.e. there exist cylindrical coordinates $(s,t)$ near $z_i^+$ such that $u(s,t) \in \R_{\geq 0} \times \partial^+ X$ for $s$ big enough and \begin{IEEEeqnarray*}{rrCl} \lim_{s \to + \infty} & \pi_{\R} \circ u(s,t) & = & + \infty, \\ \lim_{s \to + \infty} & \pi_{\partial^+ X} \circ u(s,t) & = & \gamma^+_i(t T^+_i); \end{IEEEeqnarray*} \item $u$ is negatively asymptotic to $\gamma^{-}_i$ at $z^{-}_{i}$, i.e. there exist cylindrical coordinates $(s,t)$ near $z_i^-$ such that $u(s,t) \in \R_{\leq 0} \times \partial^- X$ for $s$ small enough and \begin{IEEEeqnarray*}{rrCl} \lim_{s \to - \infty} & \pi_{\R} \circ u(s,t) & = & - \infty, \\ \lim_{s \to - \infty} & \pi_{\partial^- X} \circ u(s,t) & = & \gamma^-_i(t T^-_i). \end{IEEEeqnarray*} \end{enumerate} \end{definition} We now explain some analytical properties of asymptotically cylindrical holomorphic curves. The key results are the maximum principle (\cref{thm:maximum principle holomorphic}) and a lemma comparing the energy of such a curve and the action of the asymptotic Reeb orbits (\cref{lem:action energy for holomorphic}). The following lemma is an auxiliary result which will allow us to prove that the energy (see \cref{def:energy of a asy cylindrical holomorphic curve}) is a nonnegative number. \begin{lemma} \label{lem:holomorphic curves in symplectizations} Let $(M, \alpha)$ be a contact manifold and $J$ be a cylindrical almost complex structure on $\R \times M$. If $u = (a, f) \colon \dot{\Sigma} \longrightarrow \R \times M$ is a holomorphic curve, then $f^* \edv \alpha \geq 0$ and \begin{IEEEeqnarray}{rCls+x*} - \edv a \circ j & = & f^* \alpha \plabel{eq:holomorphic curves in symplectizations 1} \\ \pi_{\xi} \circ \dv f \circ j & = & J_{\xi}({f}) \circ \pi_{\xi} \circ \dv f. \plabel{eq:holomorphic curves in symplectizations 2} \end{IEEEeqnarray} \end{lemma} \begin{proof} We prove equation \eqref{eq:holomorphic curves in symplectizations 1}: \begin{IEEEeqnarray*}{rCls+x*} - \edv a \circ j & = & - \edv r \circ \dv u \circ j & \quad [\text{by definition of $a$}] \\ & = & - \edv r \circ J({u}) \circ \dv u & \quad [\text{${u}$ is holomorphic}] \\ & = & \alpha \circ \dv u & \quad [\text{by \cref{lem:J cylindrical forms}}] \\ & = & f^* \alpha & \quad [\text{by definition of pullback}]. \end{IEEEeqnarray*} Equation \eqref{eq:holomorphic curves in symplectizations 2} follows by applying $\pi_{\xi} \colon T(\R \times M) \longrightarrow \xi$ to the equation $J \circ Tu = Tu \circ j$. We show that $f^* \edv \alpha \geq 0$: \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{f^*\edv \alpha(S, j (S))}\\ \quad & = & \edv \alpha (\dv f (S), \dv f \circ j (S)) & \quad [\text{by definition of pullback}] \\ & = & \edv \alpha (\pi_{\xi} \circ \dv f (S), \pi_{\xi} \circ \dv f \circ j (S)) & \quad [\text{$TM = \p{<}{}{R} \directsum \xi = \ker \edv \alpha \directsum \ker \alpha$}] \\ & = & \edv \alpha (\pi_{\xi} \circ \dv f (S), J_{\xi}(f) \circ \pi_{\xi} \circ \dv f (S)) & \quad [\text{by Equation \eqref{eq:holomorphic curves in symplectizations 2}}] \\ & = & \| \pi_{\xi} \circ \dv f (S) \|^2_{J_{\xi}({f}), \edv \alpha} & \quad [\text{since $J$ is cylindrical}] \\ & \geq & 0. & & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma} \label{lem:laplacian} Let $\omega_{\dot{\Sigma}}$ be a symplectic form on $\dot{\Sigma}$ such that $g_{\dot{\Sigma}} \coloneqq \omega_{\dot{\Sigma}}(\cdot, j \cdot)$ is a Riemannian metric. Denote by $\operatorname{dvol}_{\dot{\Sigma}}$ the Riemannian volume element of $\dot{\Sigma}$. Let $a$ be a function on $\dot{\Sigma}$ and consider the Laplacian of $a$, $\Delta a \coloneqq \operatorname{div} (\nabla a)$. Then, $\omega_{\dot{\Sigma}} = \operatorname{dvol}_{\dot{\Sigma}}$ and \begin{IEEEeqnarray*}{c+x*} \Delta a \, \omega_{\dot{\Sigma}} = - \edv (\edv a \circ j). \end{IEEEeqnarray*} \end{lemma} \begin{proof} For any unit vector $S \in T \dot{\Sigma}$, if we define $T \coloneqq j (S)$ then $\{S, T\}$ is an orthonormal basis of $T \dot{\Sigma}$ and $\omega_{\dot{\Sigma}}(S, T) = 1$, which implies $\omega_{\dot{\Sigma}} = \operatorname{dvol}_{\dot{\Sigma}}$. We now prove the formula for the Laplacian. \begin{IEEEeqnarray*}{rCls+x*} \Delta a \, \omega_{\dot{\Sigma}} & = & \operatorname{div} (\nabla a) \omega_{\dot{\Sigma}} & \quad [\text{by definition of Laplacian}] \\ & = & \ldv{\nabla a} \omega_{\dot{\Sigma}} & \quad [\text{by definition of divergence and $\omega_{\dot{\Sigma}} = \operatorname{dvol}_{\dot{\Sigma}}$}] \\ & = & \edv \iota_{\nabla a} \omega_{\dot{\Sigma}} & \quad [\text{by the Cartan magic formula}]. \end{IEEEeqnarray*} It remains to show that $\iota_{\nabla a} \omega_{\dot{\Sigma}} = - \edv a \circ j$. \begin{IEEEeqnarray*}{rCls+x*} \iota_{\nabla a} \omega_{\dot{\Sigma}} (S) & = & \omega_{\dot{\Sigma}} (\nabla a, S) & \quad [\text{by definition of interior product}] \\ & = & - \omega_{\dot{\Sigma}} (\nabla a, j \circ j (S)) & \quad [\text{by definition of almost complex structure}] \\ & = & - g_{\dot{\Sigma}} (\nabla a, j (S)) & \quad [\text{by definition of $g_{\dot{\Sigma}}$}] \\ & = & - \edv a \circ j (S) & \quad [\text{by definition of gradient}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma}[maximum principle] \label{thm:maximum principle holomorphic} Assume that $\dot{\Sigma}$ is connected. Let $(M, \alpha)$ be a contact manifold and $J$ be a cylindrical almost complex structure on $\R \times M$. If \begin{IEEEeqnarray*}{c+x*} u = (a, f) \colon \dot{\Sigma} \longrightarrow \R \times M \end{IEEEeqnarray*} is a holomorphic curve and $a \colon \dot{\Sigma} \longrightarrow \R$ has a local maximum then $a$ is constant. \end{lemma} \begin{proof} Define $L = -\Delta$. The operator $L$ is a linear elliptic partial differential operator (as in \cite[p.~312]{evansPartialDifferentialEquations2010}). We show that $L a \leq 0$. For this, choose $\omega_{\dot{\Sigma}}$ a symplectic structure on $\dot{\Sigma}$ such that $g_{\dot{\Sigma}} \coloneqq \omega_{\dot{\Sigma}}(\cdot, j \cdot)$ is a Riemannian metric. \begin{IEEEeqnarray*}{rCls+x*} L a \, \omega_{\dot{\Sigma}} & = & - \Delta a \, \omega_{\dot{\Sigma}} & \quad [\text{by definition of $L$}] \\ & = & \edv (\edv a \circ j) & \quad [\text{by \cref{lem:laplacian}}] \\ & = & - \edv f^* \alpha & \quad [\text{by \cref{lem:holomorphic curves in symplectizations}}] \\ & = & - f^* \edv \alpha & \quad [\text{by naturality of exterior derivative}] \\ & \leq & 0 & \quad [\text{by \cref{lem:holomorphic curves in symplectizations}}]. \end{IEEEeqnarray*} This shows that $L a \leq 0$. By the strong maximum principle for elliptic partial differential operators in \cite[p.~349-350]{evansPartialDifferentialEquations2010}, if $a$ has a local maximum then $a$ is constant. \end{proof} \begin{lemma} \label{lem:integrand of energy is well-defined} Let $(V,j)$ be a complex vector space of real dimension 2, $(W,J,\omega,g)$ be a complex vector space with a symplectic form $\omega$ and inner product $g = \omega(\cdot,J \cdot)$, and $\phi \colon V \longrightarrow W$ be a linear map. For each choice of $s \in V$, define \begin{IEEEeqnarray*}{rCls+x*} t & \coloneqq & js, \\ \{\sigma, \tau\} & \coloneqq & \text{basis of } V^* \text{ dual to } \{s,t\}, \\ \omega_V & \coloneqq & \sigma \wedge \tau, \\ \| \phi \|^2 & \coloneqq & \| \phi s \|^2 + \|\phi t\|^2. \end{IEEEeqnarray*} Then, \begin{IEEEeqnarray*}{c+x*} \frac{1}{2} \| \phi \|^2 \omega_V = (\phi ^{1,0})^* \omega - (\phi ^{0,1})^* \omega, \end{IEEEeqnarray*} which is independent of the choice of $s$. \end{lemma} \begin{proof} Recall the definitions of $\phi^{1,0}$ and $\phi^{0,1}$: \begin{IEEEeqnarray*}{rCls+x*} \phi^{1,0} & \coloneqq & \frac{1}{2} (\phi - J \circ \phi \circ j), \\ \phi^{0,1} & \coloneqq & \frac{1}{2} (\phi + J \circ \phi \circ j). \end{IEEEeqnarray*} These equations imply that $\phi^{1,0}$ is holomorphic, while $\phi^{0,1}$ is anti-holomorphic: \begin{IEEEeqnarray}{c+x*} \plabel{eq:phi holo and anti holo} \phi^{1,0} \circ j = J \circ \phi^{1,0}, \qquad \phi^{0,1} \circ j = - J \circ \phi^{0,1}. \end{IEEEeqnarray} Finally, we compute \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\| \phi \|^2 \omega_V(s,js)} \\ \quad & = & \| \phi (s) \|^2 + \| \phi \circ j (s) \|^2 & \quad [\text{definitions of $\|\phi\|$, $\omega_V$}] \\ & = & \| \phi ^{1,0} (s) + \phi ^{0,1} (s) \|^2 + \| \phi ^{1,0} \circ j (s) + \phi ^{0,1} \circ j (s) \|^2 & \quad [\text{since $\phi = \phi^{1,0} + \phi^{0,1}$}] \\ & = & \| \phi ^{1,0} (s) + \phi ^{0,1} (s) \|^2 + \| J \circ \phi ^{1,0} (s) - J \circ \phi ^{0,1} (s) \|^2 & \quad [\text{by \eqref{eq:phi holo and anti holo}}] \\ & = & \| \phi ^{1,0} (s) + \phi ^{0,1} (s) \|^2 + \| \phi ^{1,0} (s) - \phi ^{0,1} (s) \|^2 & \quad [\text{since $g = \omega(\cdot, J \cdot)$}] \\ & = & 2 \| \phi ^{1,0} (s) \|^2 + 2 \| \phi ^{0,1} (s) \|^2 & \quad [\text{by the parallelogram law}] \\ & = & 2 \omega (\phi ^{1,0} (s), J \circ \phi ^{1,0} (s)) + 2 \omega (\phi ^{0,1} (s), J \circ \phi ^{0,1} (s)) & \quad [\text{since $g = \omega(\cdot, J \cdot)$}] \\ & = & 2 \omega (\phi ^{1,0} (s), \phi ^{1,0} \circ j (s)) - 2 \omega (\phi ^{0,1} (s), \phi ^{0,1} \circ j (s)) & \quad [\text{by \eqref{eq:phi holo and anti holo}}] \\ & = & 2 (\phi ^{1,0})^* \omega (s,js) - 2 (\phi ^{0,1})^* \omega (s,js) & \quad [\text{by definition of pullback}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{definition} \phantomsection\label{def:energy of a asy cylindrical holomorphic curve} Define a piecewise smooth $2$-form $\tilde{\omega} \in \Omega^2(\hat{X})$ by \begin{IEEEeqnarray*}{c+x*} \tilde{\omega} \coloneqq \begin{cases} \edv \lambda|_{\partial^+ X} & \text{on } \R_{\geq 0} \times \partial^+ X, \\ \omega & \text{on } X, \\ \edv \lambda|_{\partial^- X} & \text{on } \R_{\leq 0} \times \partial^- X. \end{cases} \end{IEEEeqnarray*} If $u$ is an asymptotically cylindrical holomorphic curve, its \textbf{energies} are given by \begin{IEEEeqnarray*}{rClCl} E_{\hat{\omega}}(u) & \coloneqq & \int_{\dot{\Sigma}}^{} u^* \hat{\omega}, \\ E_{\tilde{\omega}}(u) & \coloneqq & \int_{\dot{\Sigma}}^{} u^* \tilde{\omega}. \end{IEEEeqnarray*} \end{definition} We point out that if $u$ has positive punctures, then $E_{\hat{\omega}}(u) = + \infty$. Whenever we talk about the energy of an asymptotically cylindrical holomorphic curve, we mean the $E_{\tilde{\omega}}$ energy, unless otherwise specified. We included $E_{\hat{\omega}}$ in the definition above because we will need to use it in \cref{thm:lagrangian vs g tilde} to compare the Lagrangian and the McDuff--Siegel capacities. In \cref{lem:energy wrt different forms}, we compare $E_{\hat{\omega}}$ and $E_{\tilde{\omega}}$. \begin{lemma} \label{lem:action energy for holomorphic} If $(X, \omega, \lambda)$ is a Liouville cobordism then \begin{IEEEeqnarray*}{c+x*} 0 \leq E_{\tilde{\omega}}(u) = \mathcal{A}(\Gamma^+) - \mathcal{A}(\Gamma^-). \end{IEEEeqnarray*} \end{lemma} \begin{proof} Since $(X, \omega, \lambda)$ is a Liouville cobordism, $E_{\tilde{\omega}}(u)$ is given by \begin{IEEEeqnarray*}{rCls+x*} E_{\tilde{\omega}}(u) & = & \int_{\dot{\Sigma}}^{} u^* \tilde{\omega} \\ & = & \int_{u^{-1}(\R_{\leq 0} \times \partial^- X)} u^* \edv \lambda|_{\partial^- X} + \int_{u^{-1}(X)} u^* \edv \lambda + \int_{u^{-1}(\R_{\geq 0} \times \partial^+ X)} u^* \edv \lambda|_{\partial^+ X}. \end{IEEEeqnarray*} Here, the first and third terms are nonnegative by \cref{lem:holomorphic curves in symplectizations}, while the second term is nonnegative by \cref{lem:integrand of energy is well-defined}. This shows that $E_{\tilde{\omega}}(u) \geq 0$. Since $u$ is asymptotic to $\Gamma^{\pm}$ and by Stokes' theorem, $E_{\tilde{\omega}}(u) = \mathcal{A}(\Gamma^+) - \mathcal{A}(\Gamma^-)$. \end{proof} \begin{lemma} \label{lem:energy wrt different forms} Assume that $\Sigma$ has no positive punctures. Let $(X, \omega, \lambda)$ be a symplectic cobordism, and $J \in \mathcal{J}(X)$ be a cylindrical almost complex structure on $\hat{X}$. Assume that the canonical symplectic embedding \begin{align*} (\R_{\leq 0} \times \partial^- X, \edv (e^r \lambda|_{\partial^- X})) \longrightarrow (\hat{X}, \hat{\omega}) & \\ \intertext{can be extended to a symplectic embedding} (\R_{\leq K} \times \partial^- X, \edv (e^r \lambda|_{\partial^- X})) \longrightarrow (\hat{X}, \hat{\omega}) & \end{align*} for some $K > 0$. Let $u \colon \dot{\Sigma} \longrightarrow \hat{X}$ be a $J$-holomorphic curve which is negatively asymptotic to a tuple of Reeb orbits $\Gamma$ of $\partial^- X$. Consider the energies $E_{\hat{\omega}}(u)$ and $E_{\tilde{\omega}}(u)$ of \cref{def:energy of a asy cylindrical holomorphic curve}. Then, \begin{IEEEeqnarray}{rCls+x*} \mathcal{A}(\Gamma) & \leq & \frac{1 }{e^K - 1} E_{\tilde{\omega}}(u), \plabel{eq:action is bounded by vertical energy} \\ E_{\hat{\omega}}(u) & \leq & \frac{e^K}{e^K - 1} E_{\tilde{\omega}}(u). \plabel{eq:energy is bounded by vertical energy} \end{IEEEeqnarray} \end{lemma} \begin{proof} It is enough to show that \begin{IEEEeqnarray}{rCls+x*} E_{\hat{\omega}}(u) - E_{\tilde{\omega}}(u) & = & \mathcal{A}(\Gamma), \plabel{eq:vertical energy bounds 1} \\ E_{\hat{\omega}}(u) & \geq & e^K \mathcal{A}(\Gamma), \plabel{eq:vertical energy bounds 2} \end{IEEEeqnarray} since these equations imply Equations \eqref{eq:action is bounded by vertical energy} and \eqref{eq:energy is bounded by vertical energy}. Since $u$ has no positive punctures, the maximum principle (\cref{thm:maximum principle holomorphic}) implies that $u$ is contained in $\R_{\leq 0} \times \partial^- X \cup X$. We prove Equation \eqref{eq:vertical energy bounds 1}. For simplicity, denote $M = \partial^- X$ and $\alpha = \lambda|_{\partial^- X}$. \begin{IEEEeqnarray*}{rCls+x*} E_{\hat{\omega}}(u) - E_{\tilde{\omega}}(u) & = & \int_{\dot{\Sigma}}^{} u^* (\hat{\omega} - \tilde{\omega}) & \quad [\text{by definition of $E_{\hat{\omega}}$ and $E_{\tilde{\omega}}$}] \\ & = & \int_{u^{-1}(\R_{\leq 0} \times M)}^{} u^* \edv ((e^r - 1) \alpha) & \quad [\text{by definition of $\hat{\omega}$ and $\tilde{\omega}$}] \\ & = & \mathcal{A}(\Gamma) & \quad [\text{by Stokes' theorem}]. \end{IEEEeqnarray*} We prove Equation \eqref{eq:vertical energy bounds 2}. \begin{IEEEeqnarray*}{rCls+x*} E_{\hat{\omega}}(u) & = & \int_{\dot{\Sigma}}^{} u^* \hat{\omega} & \quad [\text{by definition of $E_{\hat{\omega}}$}] \\ & \geq & \int_{u^{-1}(\R_{\leq K} \times M)}^{} u^* \edv (e^r \alpha) & \quad [\text{by definition of $\hat{\omega}$ and $u^* \hat{\omega} \geq 0$}] \\ & = & e^K \int_{u^{-1}( \{K\} \times M)}^{} u^* \alpha & \quad [\text{by Stokes' theorem}] \\ & = & e^K \int_{u^{-1}( \R_{\leq K} \times M)}^{} u^* \edv \alpha + e^K \mathcal{A}(\Gamma) & \quad [\text{by Stokes' theorem}] \\ & \geq & e^K \mathcal{A}(\Gamma) & \quad [\text{by \cref{lem:holomorphic curves in symplectizations}}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \section{Moduli spaces of Holomorphic curves} \label{sec:moduli spaces of holomorphic curves} If $(M, \alpha)$ is a contact manifold, we denote by $\mathcal{J}(M)$ the set of cylindrical almost complex structures on $\R \times M$ (see \cref{def:J cylindrical}). If $(X, \omega, \lambda)$ is a symplectic cobordism, we denote by $\mathcal{J}(X)$ the set of cylindrical almost complex structures on $\hat{X}$ (see \cref{def:admissible}). If $J^{\pm} \in \mathcal{J}(\partial^{\pm} X)$ is a cylindrical almost complex structure on $\R \times \partial^{\pm} X$, then we define the following subsets of $\mathcal{J}(X)$: \begin{IEEEeqnarray*}{rCls+x*} \mathcal{J}^{J^+}(X) & \coloneqq & \{ J \in \mathcal{J}(X) \mid J = J^{+} \text{ on } \R_{\geq 0} \times \partial^+ X \}, \\ \mathcal{J}_{J^-}^{\hphantom{J^+}}(X) & \coloneqq & \{ J \in \mathcal{J}(X) \mid J = J^{-} \text{ on } \R_{\leq 0} \times \partial^- X \}, \\ \mathcal{J}^{J^+}_{J^-}(X) & \coloneqq & \{ J \in \mathcal{J}(X) \mid J = J^{+} \text{ on } \R_{\geq 0} \times \partial^+ X \text{ and } J = J^{-} \text{ on } \R_{\leq 0} \times \partial^- X \}. \end{IEEEeqnarray*} Let $\Gamma^{\pm} = (\gamma^{\pm}_1, \ldots, \gamma^{\pm}_{p ^{\pm}})$ be a tuple of Reeb orbits in $\partial^{\pm} X$ and $J \in \mathcal{J}(X)$ be a cylindrical almost complex structure on $\hat{X}$. Define a moduli space \begin{IEEEeqnarray*}{c+x*} \mathcal{M}^{J}_{X}(\Gamma^+, \Gamma^-) \coloneqq \left\{ (\Sigma, u) \ \middle\vert \begin{array}{l} \Sigma \text{ is a connected closed Riemann surface} \\ \text{of genus $0$ with punctures $\mathbf{z}^{\pm} = \{z^{\pm}_1, \ldots, z^{\pm}_{p ^{\pm}}\}$,} \\ u \colon \dot{\Sigma} \longrightarrow \hat{X} \text{ is as in \cref{def:asy cyl holomorphic curve}} \end{array} \right\} / \sim, \end{IEEEeqnarray*} where $(\Sigma_0, u_0) \sim (\Sigma_1, u_1)$ if and only if there exists a biholomorphism $\phi \colon \Sigma_0 \longrightarrow \Sigma_1$ such that $u_1 \circ \phi = u_0$ and $\phi(z^{\pm}_{0,i}) = z^{\pm}_{1,i}$ for every $i = 1,\ldots,p ^{\pm}$. If $\Gamma^{\pm} = (\gamma^{\pm}_1, \ldots, \gamma^{\pm}_{p ^{\pm}})$ is a tuple of Reeb orbits on a contact manifold $M$ and $J \in \mathcal{J}(M)$, we define a moduli space $\mathcal{M}_{M}^{J}(\Gamma^+, \Gamma^-)$ of holomorphic curves in $\R \times M$ analogously. Since $J$ is invariant with respect to translations in the $\R$ direction, $\mathcal{M}_{M}^{J}(\Gamma^+, \Gamma^-)$ admits an action of $\R$ by composition on the target by a translation. One can try to show that the moduli space $\mathcal{M}_{X}^{J}(\Gamma^+, \Gamma^-)$ is transversely cut out by showing that the relevant linearized Cauchy--Riemann operator is surjective at every point of the moduli space. In this case, the moduli space is an orbifold whose dimension is given by the Fredholm index of the linearized Cauchy--Riemann operator. However, since the curves in $\mathcal{M}_{X}^{J}(\Gamma^+, \Gamma^-)$ are not necessarily simple, this proof will in general not work, and we cannot say that the moduli space is an orbifold. However, the Fredholm theory part of the proof still works, which means that we still have a dimension formula. In this case the expected dimension given by the Fredholm theory is usually called a virtual dimension. For the moduli space above, the virtual dimension at a point $u$ is given by (see \cite[Section 4]{bourgeoisCoherentOrientationsSymplectic2004}) \begin{IEEEeqnarray*}{c} \operatorname{virdim}_u \mathcal{M}_{X}^{J}(\Gamma^+, \Gamma^-) = (n - 3)(2 - p^+ - p^-) + c_1^{\tau}(u^* T \hat{X}) + \conleyzehnder^{\tau} (\Gamma^+) - \conleyzehnder^{\tau} (\Gamma^-), \end{IEEEeqnarray*} where $\tau$ is a unitary trivialization of the contact distribution over each Reeb orbit. We now discuss curves satisfying a tangency constraint. Our presentation is based on \cite[Section 2.2]{mcduffSymplecticCapacitiesUnperturbed2022} and \cite[Section 3]{cieliebakPuncturedHolomorphicCurves2018}. Let $(X,\omega,\lambda)$ be a symplectic cobordism and $x \in \itr X$. A \textbf{symplectic divisor} through $x$ is a germ of a $2$-codimensional symplectic submanifold $D \subset X$ containing $x$. A cylindrical almost complex structure $J \in \mathcal{J}(X)$ is \textbf{compatible} with $D$ if $J$ is integrable near $x$ and $D$ is holomorphic with respect to $J$. We denote by $\mathcal{J}(X,D)$ the set of such almost complex structures. In this case, there are complex coordinates $(z^1, \ldots, z^n)$ near $x$ such that $D$ is given by $h(z_1,\ldots,z_n) = 0$, where $h(z_1,\ldots,z_n) = z_1$. Let $u \colon \Sigma \longrightarrow X$ be a $J$-holomorphic curve together with a marked point $w \in \Sigma$. For $k \geq 1$, we say that $u$ has \textbf{contact order $k$} to $D$ at $x$ if $u(w) = x$ and\begin{IEEEeqnarray*}{c+x*} (h \circ u \circ \varphi)^{(1)}(0) = \cdots = (h \circ u \circ \varphi)^{(k-1)}(0) = 0, \end{IEEEeqnarray*} for some local biholomorphism $\varphi \colon (\C,0) \longrightarrow (\Sigma, w)$. We point out that the condition of having ``contact order $k$'' as written above is equal to the condition of being ``tangent of order $k-1$'' as defined in \cite[Section 3]{cieliebakPuncturedHolomorphicCurves2018}. Following \cite{mcduffSymplecticCapacitiesUnperturbed2022}, we will use the notation $\p{<}{}{\mathcal{T}^{(k)}x}$ to denote moduli spaces of curves which have contact order $k$, i.e. we will denote them by $\mathcal{M}_{X}^{J}(\Gamma^+, \Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x}$ and $\mathcal{M}_{M}^{J}(\Gamma^+, \Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x}$. The virtual dimension is given by (see \cite[Equation (2.2.1)]{mcduffSymplecticCapacitiesUnperturbed2022}) \begin{IEEEeqnarray*}{l} \operatorname{virdim}_u \mathcal{M}_{X}^{J}(\Gamma^+, \Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x} \\ \quad = (n - 3)(2 - p^+ - p^-) + c_1^{\tau}(u^* T \hat{X}) + \conleyzehnder^{\tau} (\Gamma^+) - \conleyzehnder^{\tau} (\Gamma^-) - 2n - 2k + 4. \end{IEEEeqnarray*} The following theorem says that moduli spaces of simple, asymptotically cylindrical holomorphic curves are transversely cut out. \begin{theorem}[{\cite[Proposition 6.9]{cieliebakSymplecticHypersurfacesTransversality2007}}] \label{thm:transversality with tangency} Let $(X,\omega,\lambda)$ be a symplectic cobordism, $x \in \itr X$ and $D$ be a symplectic divisor at $x$. There exists a comeagre set $\mathcal{J}_{\mathrm{reg}}(X,D) \subset \mathcal{J}(X,D)$ with the following property. If $J \in \mathcal{J}_{\mathrm{reg}}(X,D)$ is a regular almost complex structure, $\Gamma^{\pm} = (\gamma^\pm_1,\ldots,\gamma^\pm_{p^{\pm}})$ is a tuple of Reeb orbits of $\partial^{\pm} X$ and $A \in H_2(X,\Gamma^+ \cup \Gamma^-)$, then the moduli space $\mathcal{M}_{X,A,s}^J(\Gamma^+,\Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x} \subset \mathcal{M}_{X}^J(\Gamma^+,\Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x}$ of simple curves representing the homology class $A$ is a manifold of dimension \begin{IEEEeqnarray*}{l} \dim \mathcal{M}_{X,A,s}^J(\Gamma^+,\Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x} \\ \quad = (n-3)(2 - p^+ - p^-) + 2 c_1^{\tau}(TX) \cdot A + \conleyzehnder^{\tau}(\Gamma^+) - \conleyzehnder^{\tau}(\Gamma^-) - 2n - 2k + 4. \end{IEEEeqnarray*} \end{theorem} We will now use this transversality result to state two lemmas from \cite{cieliebakPuncturedHolomorphicCurves2018}, namely \cref{lem:punctures and tangency,lem:punctures and tangency simple}, which we will use in the proof of \cref{thm:lagrangian vs g tilde}. For the sake of completeness, we will also give proofs of the results. We point out that in order to achieve the conditions in the statement of the lemmas, we can use a metric as in \cref{lem:geodesics lemma CM abs}. Finally, notice that \cref{lem:punctures and tangency} generalizes \cref{lem:punctures and tangency simple} to the case where the curve is not necessarily simple. \begin{lemma}[{\cite[Lemma 3.2]{cieliebakPuncturedHolomorphicCurves2018}}] \phantomsection\label{lem:punctures and tangency simple} Let $(L,g)$ be an $n$-dimensional Riemannian manifold with the property that for some $\ell_0 > 0$, all closed geodesics $\gamma$ of length $\ell(\gamma) \leq \ell_0$ are noncontractible and nondegenerate and have Morse index $\morse(\gamma) \leq n - 1$. Let $x \in T^*L$ and $D$ be a symplectic divisor through $x$. For generic $J$ every simple punctured $J$-holomorphic sphere $C$ in $T^*L$ which is asymptotic at the punctures to geodesics of length $\leq \ell_0$ and which has contact order $k$ to $D$ at $x$ must have at least $k + 1$ punctures. \end{lemma} \begin{proof} Let $(\gamma_1, \ldots, \gamma_p)$ be the tuple of asymptotic Reeb orbits of $C$, which have corresponding geodesics also denoted by $(\gamma_1, \ldots, \gamma_p)$. By assumption, $\morse(\gamma_i) \leq n - 1$ for every $i = 1,\ldots,p$. Choose a trivialization $\tau$ of $C^* T T^*L$ such that the induced trivialization over the asymptotic Reeb orbits is as in \cref{thm:index of geodesic or reeb orbit isometric triv}. We show that $p \geq k + 1$. \begin{IEEEeqnarray*}{rCls+x*} 0 & \leq & \dim_{C} \mathcal{M}_{X,s}^J(\Gamma^+,\Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x} \\ & = & (n-3)(2-p) + 2 c_1^{\tau}(TX) \cdot [C] + \sum_{i=1}^{p} \conleyzehnder^{\tau}(\gamma_i) - 2n - 2k + 4 \\ & = & (n-3)(2-p) + \sum_{i=1}^{p} \morse(\gamma_i) - 2n - 2k + 4 \\ & \leq & (n-3)(2-p) + \sum_{i=1}^{p} (n-1) - 2n - 2k + 4 \\ & = & 2 (p - 1 - k). & & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{lemma}[{\cite[Corollary 3.3]{cieliebakPuncturedHolomorphicCurves2018}}] \label{lem:punctures and tangency} Let $(L,g)$ be an $n$-dimensional Riemannian manifold with the property that for some $\ell_0 > 0$, all closed geodesics $\gamma$ of length $\ell(\gamma) \leq \ell_0$ are noncontractible and nondegenerate and have Morse index $\morse(\gamma) \leq n - 1$. Let $x \in T^*L$ and $D$ be a symplectic divisor through $x$. For generic $J$ every (not necessarily simple) punctured $J$-holomorphic sphere $\tilde{C}$ in $T^*L$ which is asymptotic at the punctures to geodesics of length $\leq \ell_0$ and which has contact order $\tilde{k}$ to $D$ at $x$ must have at least $\tilde{k} + 1$ punctures. \end{lemma} \begin{proof} Let $\tilde{z}_1,\ldots,\tilde{z}_{\tilde{p}}$ be the punctures of $\tilde{C}$. Then $\tilde{C}$ is a map $\tilde{C} \colon S^2 \setminus \{\tilde{z}_1,\ldots,\tilde{z}_{\tilde{p}}\} \longrightarrow T^*L$ which has contact order $\tilde{k}$ at $\tilde{z}_0$ to $D$, for some $\tilde{z}_0 \in S^2 \setminus \{\tilde{z}_1,\ldots,\tilde{z}_{\tilde{p}}\}$. There exists a $d$-fold branched cover $\phi \colon S^2 \longrightarrow S^2$ and a simple punctured $J$-holomorphic sphere $C$ with $p$ punctures $\{z_1,\ldots,z_p\}$ which has contact order $k$ at $z_0 = \phi(\tilde{z}_0)$ to $D$, such that the following diagram commutes: \begin{IEEEeqnarray*}{c+x*} \begin{tikzcd} S^2 \setminus \{\tilde{z}_1,\ldots,\tilde{z}_{\tilde{p}}\} \ar[d, swap, "\phi"] \ar[rd, "\tilde{C}"] \\ S^2 \setminus \{z_1,\ldots,z_p\} \ar[r, swap, "C"] & T^*L \end{tikzcd} \end{IEEEeqnarray*} Define $b = \operatorname{ord}(\tilde{z}_0)$. Since the asymptotic Reeb orbits of $\tilde{C}$ are multiples of the asymptotic Reeb orbits of $C$, we have that the Reeb orbits of $C$ all have period less or equal to $\ell_0$. Therefore, applying \cref{lem:punctures and tangency simple} to $C$ we conclude that $p - 1 \geq k$. We show that $k b \geq \tilde{k}$. For this, choose holomorphic coordinates centred at $z_0 \in S^2$, $\tilde{z}_0 \in S^2$, and $x \in X$ such that $D$ is given by $h(z_1,\ldots,z_n) = 0$, where $h(z_1,\ldots,z_n) = z_1$. Then, with respect to these coordinates \begin{IEEEeqnarray*}{rCls+x*} \phi(z) & = & z^b, \\ h \circ C(z) & = & \sum_{j=1}^{+\infty} a_j z^j, \end{IEEEeqnarray*} and therefore \begin{IEEEeqnarray*}{c+x*} h \circ \tilde{C}(z) = h \circ C \circ \phi(z) = \sum_{j=1}^{+\infty} a_j z^{b j}. \end{IEEEeqnarray*} Since $\tilde{C}$ has contact order $\tilde{k}$ to $D$, \begin{IEEEeqnarray*}{c+x*} 0 = (h \circ \tilde{C})^{(r)}(0) = \sum_{j=1}^{+\infty} a_j (b j)^r z^{b j - r} \Big|_{z = 0} \end{IEEEeqnarray*} for every $r = 1,\ldots,\tilde{k}-1$. Therefore, for every $j \in \Z_{\geq 1}$ if there exists $r = 1,\ldots,\tilde{k}-1$ such that if $b j - r = 0$, then $a_j = 0$. In other words $a_1 = \cdots = a_\ell = 0$, where \begin{IEEEeqnarray*}{rCll} \ell & = & \max & \{ j \in \Z_{\geq 1} \mid b j \leq \tilde{k} - 1 \} \\ & = & \min & \{ j \in \Z_{\geq 1} \mid b (j+1) \geq \tilde{k} \}. \end{IEEEeqnarray*} So, we conclude that $b k \geq b (\ell + 1) \geq \tilde{k}$. We show that $\tilde{p} \geq (p - 2) d + b + 1$. \begin{IEEEeqnarray*}{rCls+x*} 2 d - 2 & = & \sum_{\tilde{z} \in S^2}^{} (\operatorname{ord}(\tilde{z}) - 1) & \quad [\text{by the Riemann-Hurwitz formula}] \\ & \geq & \sum_{i=1}^{\tilde{p}} (\operatorname{ord}(\tilde{z}_i) - 1) + \operatorname{ord}(\tilde{z}_0) - 1 & \quad [\text{since $\operatorname{ord}(z) \geq 1$ for every $z \in S^2$}] \\ & = & p d - \tilde{p} + \operatorname{ord}(\tilde{z}_0) - 1 & \quad [\text{since $\phi(\{\tilde{z}_1,\ldots,\tilde{z}_{\tilde{p}}\}) = \{z_1,\ldots,z_p\}$}] \\ & = & p d - \tilde{p} + b - 1 & \quad [\text{by definition of $b$}]. \end{IEEEeqnarray*} Since $\phi$ is a $d$-fold covering, $d \geq b$. Combining all the facts which we have proven, we conclude that \begin{IEEEeqnarray*}{rCls+x*} \tilde{p} & \geq & (p-2)d + b + 1 & \quad [\text{by the last computation}] \\ & \geq & (k-1)d + b + 1 & \quad [\text{since $p - 1 \geq k$}] \\ & \geq & k b + 1 & \quad [\text{since $d \geq b$}] \\ & \geq & \tilde{k} + 1 & \quad [\text{since $k b \geq \tilde{k}$}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \section{SFT compactness} \label{sec:sft compactness} In this section we present the SFT compactness theorem, which describes the compactifications of the moduli spaces of the previous section. This theorem was first proven by Bourgeois--Eliashberg--Hofer--Wysocki--Zehnder \cite{bourgeoisCompactnessResultsSymplectic2003}. Cieliebak--Mohnke \cite{cieliebakCompactnessPuncturedHolomorphic2005} have given a proof of this theorem using different methods. Our presentation is based primarily on \cite{cieliebakPuncturedHolomorphicCurves2018} and \cite{mcduffSymplecticCapacitiesUnperturbed2022}. \begin{definition} \label{def:nodal riemann surface} A \textbf{nodal Riemann surface} is a Riemann surface $(\Sigma, j)$ together with a set $\mathbf{n}$ of \textbf{nodes} of the form $\mathbf{n} = \{n_1^+, n_1^-, \ldots, n_k^+, n_k^-\}$. \end{definition} \begin{definition} \label{def:nodal holomorphic curve} Let $(\Sigma, j)$ be a Riemann surface with a set $\mathbf{n} = \{n_1^+, n_1^-, \ldots, n_k^+, n_k^-\}$ of nodes and $(X, J)$ be an almost complex manifold. A \textbf{nodal $J$-holomorphic curve} is a $J$-holomorphic curve $u \colon (\Sigma, j) \longrightarrow (X, J)$ such that $u(n^+_i) = u(n^-_i)$ for every $i = 1, \ldots, k$.\end{definition} Let $(X, \omega, \lambda)$ be a symplectic cobordism and choose almost complex structures $J^{\pm} \in \mathcal{J}(\partial^{\pm} X)$ and $J \in \mathcal{J}^{J^+}_{J^-}(X)$. Let $\Gamma^{\pm} = (\gamma^{\pm}_1, \ldots, \gamma^{\pm}_{p ^{\pm}})$ be a tuple of Reeb orbits in $\partial^{\pm} X$. \begin{definition} \label{def:sft compactification} For $1 \leq L \leq N$, let $\alpha^{\pm} \coloneqq \lambda|_{\partial^{\pm} X}$ and define \begin{IEEEeqnarray*}{rCl} (X^{\nu}, \omega^\nu, \tilde{\omega}^{\nu}, J^{\nu}) & \coloneqq & \begin{cases} (\R \times \partial^- X, \edv(e^r \alpha^-), \edv \alpha^- , J^-) & \text{if } \nu = 1 , \ldots, L - 1, \\ (\hat{X} , \hat{\omega} , \tilde{\omega} , J ) & \text{if } \nu = L , \\ (\R \times \partial^+ X, \edv(e^r \alpha^+), \edv \alpha^+ , J^+) & \text{if } \nu = L+1 ,\ldots ,N , \end{cases} \\ (X^*, \omega^*, \tilde{\omega}^*, J^*) & \coloneqq & \bigcoproduct_{\nu = 1}^N (X^{\nu}, \omega^\nu, \tilde{\omega}^{\nu}, J^{\nu}). \end{IEEEeqnarray*} The moduli space of \textbf{holomorphic buildings}, denoted $\overline{\mathcal{M}}^{J}_X(\Gamma^+, \Gamma^-)$, is the set of tuples $F = (F^1, \ldots, F^N)$, where $F^{\nu} \colon \dot{\Sigma}^\nu \longrightarrow X^\nu$ is an asymptotically cylindrical nodal $J^{\nu}$-holomorphic curve in $X^{\nu}$ with sets of asymptotic Reeb orbits $\Gamma^{\pm}_{\nu}$. Here, each $F^{\nu}$ is possibly disconnected and if $X^{\nu}$ is a symplectization then $F^{\nu}$ is only defined up to translation in the $\R$ direction. We assume in addition that $F$ satisfies the following conditions. \begin{enumerate} \item The sets of asymptotic Reeb orbits $\Gamma_{\nu}^{\pm}$ are such that \begin{IEEEeqnarray*}{rCls+x*} \Gamma^+_{\nu} & = & \Gamma^-_{\nu + 1} \quad \text{for every } \nu = 1, \ldots, N - 1, \\ \Gamma^-_1 & = & \Gamma^-, \\ \Gamma^+_N & = & \Gamma^+. \end{IEEEeqnarray*} \item Define the graph of $F$ to be the graph whose vertices are the components of $F^1, \ldots, F^N$ and whose edges are determined by the asymptotic Reeb orbits. Then the graph of $F$ is a tree. \item The building $F$ has no symplectization levels consisting entirely of trivial cylinders, and any constant component of $F$ has negative Euler characteristic after removing all special points. \end{enumerate} \end{definition} \begin{definition} The \textbf{energy} of a holomorphic building $F = (F^1, \ldots, F^N)$ is \begin{IEEEeqnarray*}{c+x*} E_{\tilde{\omega}^*}(F) \coloneqq \sum_{\nu = 1}^{N} E_{\tilde{\omega}^{\nu}}(F^{\nu}), \end{IEEEeqnarray*} where $E_{\tilde{\omega}^{\nu}}(F^{\nu})$ is given as in \cref{def:energy of a asy cylindrical holomorphic curve}. \end{definition} The moduli space $\overline{\mathcal{M}}_X^J(\Gamma^+, \Gamma^-)$ admits a metrizable topology (see \cite[Appendix B]{bourgeoisEquivariantSymplecticHomology2016}). With this language, the SFT compactness theorem can be stated as follows. \begin{theorem}[SFT compactness] The moduli space $\overline{\mathcal{M}}_X^J(\Gamma^+, \Gamma^-)$ is compact.\end{theorem} We now consider the case where the almost complex structure on $\hat{X}$ is replaced by a family of almost complex structures obtained via \textbf{neck stretching}. Let $(X^{\pm}, \omega^{\pm}, \lambda^{\pm})$ be symplectic cobordisms with common boundary \begin{IEEEeqnarray*}{c+x*} (M, \alpha) = (\partial^- X^{+}, \lambda^+|_{\partial^- X^+}) = (\partial^+ X^-, \lambda^-|_{\partial^+ X^-}). \end{IEEEeqnarray*} Choose almost complex structures \begin{IEEEeqnarray*}{rCls+x*} J_M & \in & \mathcal{J}(M), \\ J_+ & \in & \mathcal{J}_{J_M}(X^+), \\ J_- & \in & \mathcal{J}^{J_M}(X^-), \end{IEEEeqnarray*} and denote by $J_{\partial^{\pm} X^{\pm}} \in \mathcal{J}(\partial^{\pm} X^{\pm})$ the induced cylindrical almost complex structure on $\R \times \partial^{\pm} X^{\pm}$. Let $(X, \omega, \lambda) \coloneqq (X^-, \omega^-, \lambda^-) \circledcirc (X^+, \omega^+, \lambda^+)$ be the gluing of $X^-$ and $X^+$ along $M$. We wish to define a family of almost complex structures $(J_t)_{t \in \R_{\geq 0}} \subset \mathcal{J}(X)$. For every $t \geq 0$, let \begin{IEEEeqnarray*}{c+x*} X_t \coloneqq X^- \cup_M [-t, 0] \times M \cup_M X^+. \end{IEEEeqnarray*} There exists a canonical diffeomorphism $\phi_t \colon X \longrightarrow X_t$. Define an almost complex structure $J_t$ on $X_t$ by \begin{IEEEeqnarray*}{c+x*} J_t \coloneqq \begin{cases} J^{\pm} & \text{on } X^{\pm}, \\ J_M & \text{on } [-t, 0] \times M. \end{cases} \end{IEEEeqnarray*} Denote also by $J_t$ the pullback of $J_t$ to ${X}$, as well as the induced almost complex structure on the completion $\hat{X}$. Finally, consider the moduli space \begin{IEEEeqnarray*}{c+x*} \mathcal{M}_X^{(J_t)_t}(\Gamma^+, \Gamma^-) \coloneqq \bigcoproduct_{t \in \R_{\geq 0}} \mathcal{M}^{J_t}_{X}(\Gamma^+, \Gamma^-). \end{IEEEeqnarray*} \begin{definition} \phantomsection\label{def:sft compactification neck stretching} For $1 \leq L^- < L^+ \leq N$, let $\alpha^{\pm} \coloneqq \lambda^{\pm}|_{\partial^{\pm} X^\pm}$ and define \begin{IEEEeqnarray*}{rCls+x*} (X^{\nu}, \omega^\nu, \tilde{\omega}^{\nu}, J^{\nu}) & \coloneqq & \begin{cases} (\R \times \partial^- X^-, \edv(e^r \alpha^-) , \edv \alpha^- , J_{\partial^- X^-}) & \text{if } \nu = 1 , \ldots, L^- - 1, \\ (X^- , \omega^- , \tilde{\omega}^-, J^-) & \text{if } \nu = L^-, \\ (\R \times M , \edv(e^r \alpha) , \edv \alpha , J_M) & \text{if } \nu = L^- + 1 , \ldots, L^+ - 1, \\ (X^+ , \omega^+ , \tilde{\omega}^+, J^+) & \text{if } \nu = L^+, \\ (\R \times \partial^+ X^+, \edv (e^r \alpha^+) , \edv \alpha^+ , J_{\partial^+ X^+}) & \text{if } \nu = L^+ + 1 , \ldots, N , \\ \end{cases} \\ (X^*, \omega^*, \tilde{\omega}^*, J^*) & \coloneqq & \bigcoproduct_{\nu = 1}^N (X^{\nu}, \omega^\nu, \tilde{\omega}^{\nu}, J^{\nu}). \end{IEEEeqnarray*} Define $\overline{\mathcal{M}}^{(J_t)_t}_X(\Gamma^+, \Gamma^-)$ to be the set of tuples $F = (F^1, \ldots, F^N)$, where $F^{\nu} \colon \dot{\Sigma}^\nu \longrightarrow X^\nu$ is an asymptotically cylindrical nodal $J^{\nu}$-holomorphic curve in $X^{\nu}$ with sets of asymptotic Reeb orbits $\Gamma^{\pm}_{\nu}$, such that $F$ satisfies conditions analogous to those of \cref{def:sft compactification}. \end{definition} \begin{theorem}[SFT compactness] The moduli space $\overline{\mathcal{M}}^{(J_t)_t}_X(\Gamma^+, \Gamma^-)$ is compact.\end{theorem} \begin{remark} \label{rmk:compactifications with tangency} The discussion above also applies to compactifications of moduli spaces of curves satisfying tangency constraints. The compactification $\overline{\mathcal{M}}^{J}_{X}(\Gamma^+,\Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x}$ consists of buildings $F = (F^1, \ldots, F^N) \in \overline{\mathcal{M}}^J_X(\Gamma^+, \Gamma^-)$ such that exactly one component $C$ of $F$ inherits the tangency constraint $\p{<}{}{\mathcal{T}^{(k)}x}$, and which satisfy the following additional condition. Consider the graph obtained from the graph of $F$ by collapsing adjacent constant components to a point. Let $C_1, \ldots, C_p$ be the (necessarily nonconstant) components of $F$ which are adjacent to $C$ in the new graph. Then we require that there exist $k_1, \ldots, k_p \in \Z_{\geq 1}$ such that $k_1 + \cdots + k_p \geq k$ and $C_i$ satisfies the constraint $\p{<}{}{\mathcal{T}^{(k_i)}x}$ for every $i = 1, \ldots, p$. This definition is natural to consider by \cite[Lemma 7.2]{cieliebakSymplecticHypersurfacesTransversality2007}. We can define $\overline{\mathcal{M}}^{(J_t)_t}_X(\Gamma^+, \Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x}$ analogously. \end{remark} \begin{remark} We point out that in \cite[Definition 2.2.1]{mcduffSymplecticCapacitiesUnperturbed2022}, the compactification of \cref{rmk:compactifications with tangency} is denoted by $\overline{\overline{\mathcal{M}}}^{J}_{X}(\Gamma^+,\Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x}$, while the notation $\overline{\mathcal{M}}^{J}_{X}(\Gamma^+,\Gamma^-)\p{<}{}{\mathcal{T}^{(k)}x}$ is used to denote the moduli space of buildings $F = (F^1, \ldots, F^N) \in \overline{\mathcal{M}}^J_X(\Gamma^+, \Gamma^-)$ such that exactly one component $C$ of $F$ inherits the tangency constraint $\p{<}{}{\mathcal{T}^{(k)}x}$, but which do not necessarily satisfy the additional condition of \cref{rmk:compactifications with tangency}. \end{remark} \begin{lemma} \label{lem:no nodes} Suppose that $\Gamma^- = \varnothing$ and $\Gamma^+ = (\gamma)$ consists of a single Reeb orbit. Let $F$ be a holomorphic building of genus $0$ in any of the following compactified moduli spaces: \begin{IEEEeqnarray*}{lCl} \overline{\mathcal{M}}^J_X(\gamma), & \quad & \overline{\mathcal{M}}^J_X(\gamma)\p{<}{}{\mathcal{T}^{(k)}x}, \\ \overline{\mathcal{M}}^{(J_t)_t}_X(\gamma), & \quad & \overline{\mathcal{M}}^{(J_t)_t}_X(\gamma)\p{<}{}{\mathcal{T}^{(k)}x}. \end{IEEEeqnarray*} Then $F$ has no nodes. \end{lemma} \begin{proof} Assume by contradiction that $F$ has a node. Let $\overline{\Sigma}$ be the topological space obtained by gluing the $\Sigma^{\nu}$ along the matching punctures. Let $\overline{X}$ be the topological space obtained by gluing the $X^{\nu}$ along the matching ends. The space $\overline{X}$ is homeomorphic to $\hat{X}$, and therefore we can identify homology classes in $\overline{X}$ and $\hat{X}$. The holomorphic building $F$ defines a continuous map $\overline{F} \colon \overline{\Sigma} \longrightarrow \overline{X}$ (for more details on the definitions of $\overline{F} \colon \overline{\Sigma} \longrightarrow \overline{X}$, see \cite[Section 2.6]{cieliebakPuncturedHolomorphicCurves2018}). By the assumptions on $F$ and since $F$ has a node, it is possible to decompose $\overline{F}$ along the node into two continuous maps \begin{IEEEeqnarray*}{rCls+x*} \overline{F}_0 \colon \overline{\Sigma}_0 & \longrightarrow & \overline{X}, \\ \overline{F}_1 \colon \overline{\Sigma}_1 & \longrightarrow & \overline{X}, \end{IEEEeqnarray*} where $\overline{F}_0$ is a plane and $\overline{F}_1$ is a sphere. Since $\overline{F}_1$ is a sphere, it defines a homology class $[\overline{F}_1] \in H_2(\hat{X}; \Z)$. Then, \begin{IEEEeqnarray*}{rCls+x*} 0 & = & \edv \hat{\lambda}([\overline{F}_1]) & \quad [\text{since $\edv \hat{\lambda} = 0 \in H^2_{\mathrm{dR}}(\hat{X})$}] \\ & > & 0 & \quad [\text{by \cite[Lemma 2.8]{cieliebakPuncturedHolomorphicCurves2018}}], \end{IEEEeqnarray*} which gives the desired contradiction. \end{proof} \section{Solutions of the parametrized Floer equation} \label{sec:floer trajectories} The goal of this section is to introduce the trajectories that appear in $S^1$-equivariant symplectic homology (see \cref{def:floer trajectory abstract}). We will write these trajectories as maps whose domain is any punctured Riemann surface, but we point out that in \cref{chp:floer}, where we discuss $S^1$-equivariant symplectic homology, all trajectories have as domain the cylinder $\R \times S^1$. Let $(\Sigma, j)$ be a Riemann surface with punctures \begin{IEEEeqnarray*}{c+x*} \mathbf{z} = \mathbf{z}^+ \cup \mathbf{z}^-, \qquad \mathbf{z}^{\pm} = \{z^{\pm}_1, \ldots, z^{\pm}_{p^{\pm}}\}. \end{IEEEeqnarray*} We assume that near every puncture $z$, there are cylindrical coordinates $(s,t)$ as in \cref{def:punctures asy markers cyl ends}. Let $\sigma, \tau \in \Omega^1(\dot{\Sigma})$ be $1$-forms such that for every (positive or negative) puncture $z$, if we denote by $(s,t)$ the coordinates on the cylindrical end of $\dot{\Sigma}$ near $z$, then\begin{IEEEeqnarray*}{rCls+x*} \sigma & = & A \, \edv s, \\ \tau & = & B \, \edv t, \end{IEEEeqnarray*} for some $A, B > 0$. Finally, we assume that there is an action \begin{IEEEeqnarray*}{c+x*} S^1 \times \dot{\Sigma} \longrightarrow \dot{\Sigma} \end{IEEEeqnarray*} of $S^1$ on $\dot{\Sigma}$ which preserves $j$, $\sigma$ and $\tau$ and such that if $t' \in S^1$ and $(s,t)$ belongs to any cylindrical coordinate neighbourhood, then \begin{IEEEeqnarray*}{c+x*} t' \cdot (s, t) = (s, t + t'). \end{IEEEeqnarray*} \begin{example} \label{exa:sphere and cylinder} Consider the cylinder $\R \times S^1$ with coordinates $(s,t)$ and almost complex structure given by $j(\partial_s) = \partial_t$. We have the $1$-forms $\sigma \coloneqq \edv s$ and $\tau \coloneqq \edv t$. The cylinder is biholomorphic to the sphere $S^2$ with the north and south poles removed. There is an action of $S^1$ on $\R \times S^1$ given by $t' \cdot (s,t) = (s,t + t')$. Therefore, $\R \times S^1$ can be seen as a special case of the assumptions above. In this case, we will typically denote $\dot{\Sigma} = \R \times S^1$ and $\Sigma = S^2$. \end{example} Let $(S,g^S)$ be a Riemannian manifold together with an action $S^1 \times S \longrightarrow S$ which is free, proper and by isometries. Define $C = S / S^1$ and denote the projection by $\pi \colon S \longrightarrow C$. Since the action is by isometries, there exists a unique Riemannian metric $g^C$ on $C$ such that $\pi \colon S \longrightarrow C$ is a Riemannian submersion. Let $f \colon C \longrightarrow \R$ be a Morse function and define $\tilde{f} \coloneqq f \circ \pi \colon S \longrightarrow \R$, which is Morse--Bott. \begin{example} For $N \in \Z_{\geq 1}$, let \begin{IEEEeqnarray*}{rCls+x*} S & \coloneqq & S^{2N+1}, \\ C & \coloneqq & \C P^N, \\ f & \coloneqq & f_N, \end{IEEEeqnarray*} where \begin{IEEEeqnarray*}{c+x*} f_N([w_0:\cdots:w_N]) \coloneqq \frac{ \sum_{j=0}^{N} j |w_j|^2 }{ \sum_{j=0}^{N} |w_j|^2 }. \end{IEEEeqnarray*} As we will discuss in \cref{sec:action functional}, $S$, $C$ and $f$ given above are as in the previous paragraph. \end{example} Finally, let $(X,\lambda)$ be a Liouville domain. \begin{definition} \label{def:admissible hamiltonian abstract} An \textbf{admissible Hamiltonian} is a map $H \colon \dot{\Sigma} \times S \times \hat{X} \longrightarrow \R$ such that: \begin{enumerate} \item \label{def:admissible hamiltonian abstract 1} For every puncture $z$, the restriction of $H$ to the cylindrical end near $z$ is independent of $s$ for $s$ large enough. In other words, there is a map $H_z \colon S^1 \times S \times \hat{X} \longrightarrow \R$ such that $H(s,t,w,x) = H_z(t,w,x)$ for $s$ large enough. \item \label{def:admissible hamiltonian abstract 2} For every critical point $w$ of $\tilde{f}$, there exists a neighbourhood $V$ of $w$ in $S$ such that the restriction $H \colon \dot{\Sigma} \times V \times \hat{X} \longrightarrow \R$ is independent of $V$. \item Consider the action of $S^1$ on $\dot{\Sigma} \times S \times \hat{X}$ given by $t \cdot (z, w, x) = (t \cdot z, t \cdot w, x)$. Then, the Hamiltonian $H$ is invariant under the action of $S^1$. \item For every puncture $z$, there exist $D \in \R$, $C \in \R_{> 0} \setminus \operatorname{Spec}(\partial X, \lambda|_{\partial X})$ and $\delta > 0$ such that on $S^1 \times S \times [\delta,+\infty) \times \partial X$, we have that $H_z(t,w,r,x) = C e^r + D$. \item For every puncture $z$ and critical point $w$ of $\tilde{f}$ the Hamiltonian $H_{z,w} \colon S^1 \times \hat{X} \longrightarrow \R$ is nondegenerate. \item \label{def:admissible hamiltonian abstract 3} For every $(z,w,x) \in \dot{\Sigma} \times S \times \hat{X}$ we have \begin{IEEEeqnarray*}{rCls+x*} H_{w,x} \, \edv \tau & \leq & 0, \\ \edv_{\dot{\Sigma}} H_{w,x} \wedge \tau & \leq & 0, \\ \p{<}{}{ \nabla_S H_{z,x}(w), \nabla \tilde{f} (w) } \, \sigma_z \wedge \tau_z & \leq & 0. \end{IEEEeqnarray*} \end{enumerate} \end{definition} \begin{definition} \label{def:admissible acs abstract} An \textbf{admissible almost complex structure} on $\hat{X}$ is a section $J \colon \dot{\Sigma} \times S \times \hat{X} \longrightarrow \End(T \hat{X})$ such that $J^2 = - \id_{TX}$ and: \begin{enumerate} \item \label{def:admissible acs abstract 1} For every puncture $z$, the restriction of $J$ to the cylindrical end near $z$ is independent of $s$ for $s$ large enough. In other words, there is a function $J_z \colon S^1 \times S \times \hat{X} \longrightarrow \End(T \hat{X})$ such that $J(s,t,w,x) = J_z(t,w,x)$ for $s$ large enough. \item \label{def:admissible acs abstract 2} For every critical point $w$ of $\tilde{f}$, there exists a neighbourhood $V$ of $w$ in $S$ such that the restriction $J \colon \dot{\Sigma} \times V \times \hat{X} \longrightarrow \End(T \hat{X})$ is independent of $V$. \item The almost complex structure $J$ is $S^1$-invariant. \item $J$ is \textbf{compatible}, i.e. $g \coloneqq \omega(\cdot, J \cdot) \colon \dot{\Sigma} \times S \times \hat{X} \longrightarrow T^* \hat{X} \otimes T^* \hat{X}$ is a Riemannian metric on $X$ parametrized by $\dot{\Sigma} \times S$. \item $J$ is \textbf{cylindrical}, i.e. if $(z,w) \in \dot{\Sigma} \times S$ then $J_{z,w}$ is cylindrical on $\R_{\geq 0} \times \partial X$. \end{enumerate} \end{definition} \begin{definition} \label{def:floer trajectory abstract} Let $w \colon \dot{\Sigma} \longrightarrow S$ and $u \colon \dot{\Sigma} \longrightarrow \hat{X}$ be maps. We will denote by $\mathbf{u}$ the map $\mathbf{u} \coloneqq (\id_{\dot{\Sigma}}, w, u) \colon \dot{\Sigma} \longrightarrow \dot{\Sigma} \times S \times \hat{X}$. We say that $(w,u)$ is a solution of the \textbf{parametrized Floer equation} if \begin{IEEEeqnarray}{rCls+x*} \dv w - \nabla \tilde{f} (w) \otimes \sigma & = & 0, \phantomsection\label{eq:parametrized floer equation 1} \\ (\dv u - X_H(\mathbf{u}) \otimes \tau)^{0,1}_{J(\mathbf{u}), j} & = & 0. \phantomsection\label{eq:parametrized floer equation 2} \end{IEEEeqnarray} \end{definition} \begin{example} Suppose that $(\dot{\Sigma}, j, \sigma, \tau) = (\R \times S^1, j, \edv s, \edv t)$ is the cylinder from \cref{exa:sphere and cylinder}. Then, $(w,u)$ is a solution of the parametrized Floer equation if and only if $w \colon \R \times S^1 \longrightarrow S$ is independent of $t \in S^1$, thus defining a map $w \colon \R \longrightarrow S$, and \begin{IEEEeqnarray*}{rCls+x*} \pdv{w}{s}(s) & = & \nabla \tilde{f}(w(s)), \\ \pdv{u}{s}(s,t) & = & - J(s, t, w(s), u(s,t)) \p{}{2}{ \pdv{u}{t}(s,t) - X_{H}(s, t,w(s),u(s,t)) }. \end{IEEEeqnarray*} \end{example} \begin{definition} \label{def:1 periodic orbit abstract} Let $z$ be a puncture and $B > 0$ be such that $\tau = B \, \edv t$, where $(s,t)$ are the cylindrical coordinates near $z$. A \textbf{$1$-periodic orbit} of $H$ at $z$ is a pair $(w ,\gamma)$ such that $w \in S$ is a critical point of $\tilde{f}$ and $\gamma$ is a $1$-periodic orbit of $H_{z,w} \colon S^1 \times \hat{X} \longrightarrow \R$. Denote by $\mathcal{P}(H,z)$ the set of such pairs. The \textbf{action} of $(w, \gamma)$ is \begin{IEEEeqnarray*}{c+x*} \mathcal{A}_{H}(w,\gamma) \coloneqq \mathcal{A}_{B H_{z,w}}(\gamma) = \int_{S^1}^{} \gamma^* \hat{\lambda} - B \int_{S^1}^{} H_{z,w} (t, \gamma(t)) \edv t. \end{IEEEeqnarray*} \end{definition} \begin{definition} \label{def:asymptotic} Let $(w,u)$ be a solution of the parametrized Floer equation. We say that $(w,u)$ is \textbf{asymptotic} at $z^{\pm}_i$ to $(w^{\pm}_i, \gamma^{\pm}_i) \in \mathcal{P}(H, z^{\pm}_i)$ if \begin{IEEEeqnarray*}{rCls+x*} \lim_{s \to \pm \infty} w(s) & = & w^{\pm}_i, \\ \lim_{s \to \pm \infty} u(s,t) & = & \gamma^{\pm}_i, \end{IEEEeqnarray*} where $(s,t)$ are the cylindrical coordinates near $z^{\pm}_i$. \end{definition} \begin{definition} \label{def:energy of floer trajectory} The \textbf{energy} of $(w,u)$ is \begin{IEEEeqnarray*}{c+x*} E(u) \coloneqq \frac{1}{2} \int_{\dot{\Sigma}}^{} \| \dv u - X_H(\mathbf{u}) \otimes \tau \|^2_{J(\mathbf{u}), \hat{\omega}} \, \omega_{\Sigma}. \end{IEEEeqnarray*} \end{definition} We will now state the analytical results about solutions of the parametrized Floer equation. Some results we will state are analogous to previous results about solutions of a pseudoholomorphic curve equation. Namely, in \cref{lem:action energy for floer trajectories} we compare the energy of a solution with the action at the asymptotes, and in \cref{lem:maximum principle} we show that solutions satisfy a maximum principle. \begin{lemma} \phantomsection\label{lem:action energy for floer trajectories} If $(w,u)$ is a solution of the parametrized Floer equation which is asymptotic at $z^{\pm}_i$ to $(w^{\pm}_i, \gamma^{\pm}_i) \in \mathcal{P}(H, z^{\pm}_i)$, then \begin{IEEEeqnarray*}{c+x*} 0 \leq E(u) \leq \sum_{i=1}^{p^+} \mathcal{A}_H(w^+_i, \gamma^+_i) - \sum_{i=1}^{p^-} \mathcal{A}_H(w^-_i, \gamma^-_i). \end{IEEEeqnarray*} \end{lemma} \begin{proof} We show that $1/2 \| \dv u - X_H(\mathbf{u}) \otimes \tau \|^{2}_{J(\mathbf{u}),j} \, \omega_{\dot{\Sigma}} = u^* \hat{\omega} - u^* \edv_{\hat{X}} H(\mathbf{u}) \wedge \tau$. \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\frac{1}{2} \| \dv u - X_H(\mathbf{u}) \otimes \tau \|^{2}_{J(\mathbf{u}), \hat{\omega}} \, \omega_{\dot{\Sigma}}(S, T)}\\ \quad & = & (\dv u - X_H(\mathbf{u}) \otimes \tau)^* \hat{\omega}(S, T) \\ & = & \hat{\omega}(\dv u (S) - X_{H}(\mathbf{u}) \tau(S), \dv u (T) - X_{H}(\mathbf{u}) \tau(T)) \\ & = & \hat{\omega} (\dv u (S), \dv u (T)) - \hat{\omega} (\dv u (S), X_{H}(\mathbf{u})) \tau(T) - \hat{\omega} (X_{H}(\mathbf{u}), \dv u (T)) \tau(S) \\ & = & u^* \hat{\omega} (S,T) + u^* \iota_{X_H(\mathbf{u})} \hat{\omega} \wedge \tau (S,T) \\ \quad & = & u^* \hat{\omega} (S,T) - u^* \edv_{\hat{X}} H(\mathbf{u}) \wedge \tau (S,T), \end{IEEEeqnarray*} Where in the first equality we used \cref{lem:integrand of energy is well-defined} and the fact that $\dv u - X_H(\mathbf{u}) \otimes \tau$ is holomorphic, and in the last equality we used the definition of Hamiltonian vector field. We show that $u^* \hat{\omega} - u^* \edv_{\hat{X}} H (\mathbf{u}) \wedge \tau \leq u^* \hat{\omega} - \edv(\mathbf{u}^* H \wedge \tau)$. \begin{IEEEeqnarray*}{rCls+x*} \edv (\mathbf{u}^* H \wedge \tau) & = & \mathbf{u}^* H \wedge \edv \tau + \mathbf{u}^* \edv H \wedge \tau \\ & = & \mathbf{u}^* H \wedge \edv \tau + \edv_{\dot{\Sigma}} H (\mathbf{u}) \wedge \tau + w^* \edv_S H(\mathbf{u}) \wedge \tau + u^* \edv_{\hat{X}} H(\mathbf{u}) \wedge \tau \\ & = & \mathbf{u}^* H \wedge \edv \tau + \edv_{\dot{\Sigma}} H (\mathbf{u}) \wedge \tau + \p{<}{}{\nabla_S H(\mathbf{u}), \nabla \tilde{f}(w)} \, \sigma \wedge \tau + u^* \edv_{\hat{X}} H(\mathbf{u}) \wedge \tau \\ & \leq & u^* \edv_{\hat{X}} H (\mathbf{u}) \wedge \tau \end{IEEEeqnarray*} Here, in the third equality we used Equation \eqref{eq:parametrized floer equation 1} and in the last line of the computation we used the fact that $H$ is admissible. Combining these results, \begin{IEEEeqnarray*}{rCls+x*} 0 & \leq & E(u) \\ & \leq & \int_{\dot{\Sigma}}^{} u^* \edv \hat{\lambda} - \int_{\dot{\Sigma}}^{} \edv (\mathbf{u}^* H \wedge \tau) \\ & = & \sum_{i=1}^{p^+} \mathcal{A}_H(w^+_i, \gamma^+_i) - \sum_{i=1}^{p^-} \mathcal{A}_H(w^-_i, \gamma^-_i), \end{IEEEeqnarray*} where in the last line we used Stokes' theorem. \end{proof} \begin{lemma} \label{lem:floer eq proj} Suppose that $(M, \alpha)$ is a contact manifold, $H \colon \dot{\Sigma} \times S \times \R \times M \longrightarrow \R$ is a Hamiltonian which is independent of $M$ and $J \colon \dot{\Sigma} \times S \times \R \times M \longrightarrow \End(T(\R \times M))$ is a cylindrical almost complex structure. If \begin{IEEEeqnarray*}{c+x*} \mathbf{u} = (\id_{\dot{\Sigma}}, w, u) = (\id_{\dot{\Sigma}}, w, (a, f)) \colon \dot{\Sigma} \longrightarrow \dot{\Sigma} \times S \times \R \times M \end{IEEEeqnarray*} is a solution of the parametrized Floer equation, then $f^* \edv \alpha \geq 0$ and \begin{IEEEeqnarray}{rCls+x*} - \edv a \circ j & = & f^* \alpha - \alpha(X_H(\mathbf{u})) \tau \plabel{eq:floer eq proj 1} \\ \pi_{\xi} \circ \dv f \circ j & = & J_{\xi}(\mathbf{u}) \circ \pi_{\xi} \circ \dv f. \plabel{eq:floer eq proj 2} \end{IEEEeqnarray} \end{lemma} \begin{proof} We prove equation \eqref{eq:floer eq proj 1}: \begin{IEEEeqnarray*}{rCls+x*} - \edv a \circ j & = & - \edv r \circ \dv u \circ j & \quad [\text{by definition of $a$}] \\ & = & - \edv r \circ (\dv u - X_H(\mathbf{u}) \tensorpr \tau) \circ j & \quad [\text{$H$ is independent of $M$}] \\ & = & - \edv r \circ J(\mathbf{u}) \circ (\dv u - X_H(\mathbf{u}) \tensorpr \tau) & \quad [\text{$\dv u - X_H(\mathbf{u}) \tensorpr \tau$ is holomorphic}] \\ & = & \alpha \circ (\dv u - X_H(\mathbf{u}) \tensorpr \tau) & \quad [\text{by \cref{lem:J cylindrical forms}}] \\ & = & f^* \alpha - \alpha(X_H(\mathbf{u})) \tau & \quad [\text{by definition of pullback}]. \end{IEEEeqnarray*} Equation \eqref{eq:floer eq proj 2} follows by applying $\pi_{\xi} \colon T(\R \times M) \longrightarrow \xi$ to $(\dv u - X_H(\mathbf{u}) \tensorpr \tau)^{0,1}_{J(\mathbf{u}),j} = 0$. The proof of $f^* \edv \alpha \geq 0$ is equal to the one presented in \cref{lem:holomorphic curves in symplectizations}. \end{proof} The following is an adaptation to solutions of the parametrized Floer equation of the maximum principle from \cref{thm:maximum principle holomorphic}. Other authors have proven similar results about solutions of a Floer equation satisfying a maximum principle, namely Viterbo \cite[Lemma 1.8]{viterboFunctorsComputationsFloer1999}, Oancea \cite[Lemma 1.5]{oanceaSurveyFloerHomology2004}, Seidel \cite[Section 3]{seidelBiasedViewSymplectic2008} and Ritter \cite[Lemma D.1]{ritterTopologicalQuantumField2013}. \begin{lemma}[maximum principle] \label{lem:maximum principle} Under the assumptions of \cref{lem:floer eq proj}, define \begin{IEEEeqnarray*}{rClCrCl} h \colon \dot{\Sigma} \times S \times \R & \longrightarrow & \R, & \quad & h(z,w,\rho) & = & H(z,w,\ln(\rho)), \\ \rho \colon \dot{\Sigma} & \longrightarrow & \R, & \quad & \rho & = & \exp \circ a. \end{IEEEeqnarray*} If \begin{IEEEeqnarray}{rCl} \partial_{\rho} h(z,w,\rho) \, \edv \tau & \leq & 0, \plabel{eq:maximum principle 1} \\ \edv_{\dot{\Sigma}} (\partial_{\rho} h(z,w,\rho)) \wedge \tau & \leq & 0, \plabel{eq:maximum principle 2} \\ \p{<}{}{\nabla_{S} \partial_{\rho} h(z,w,\rho), \nabla \tilde{f} (w) } \, \sigma \wedge \tau & \leq & 0, \plabel{eq:maximum principle 3} \end{IEEEeqnarray} and $a \colon \dot{\Sigma} \longrightarrow \R$ has a local maximum then $a$ is constant. \end{lemma} \begin{proof} Choose a symplectic structure $\omega_{\dot{\Sigma}}$ on $\dot{\Sigma}$ such that $g_{\dot{\Sigma}} \coloneqq \omega_{\dot{\Sigma}}(\cdot, j \cdot)$ is a Riemannian metric. Define $L \colon C^{\infty}(\dot{\Sigma}, \R) \longrightarrow C^{\infty}(\dot{\Sigma}, \R)$ by \begin{IEEEeqnarray*}{c+x*} L \nu = - \Delta \nu - \rho \, \partial^2_{\rho} h (z,w,\rho) \frac{\edv \nu \wedge \tau}{\omega_{\dot{\Sigma}}}, \end{IEEEeqnarray*} for every $\nu \in C^{\infty}(\dot{\Sigma}, \R)$. The map $L$ is a linear elliptic partial differential operator (as in \cite[p.~312]{evansPartialDifferentialEquations2010}). We wish to show that $L \rho \leq 0$. For this, we start by computing $\Delta \rho \, \omega_{\dot{\Sigma}}$. \begin{IEEEeqnarray*}{rCls+x*} - \Delta \rho \, \omega_{\dot{\Sigma}} & = & \edv (\edv \rho \circ j) & \quad [\text{by \cref{lem:laplacian}}] \\ & = & - \edv (u^*(e^r \alpha) - \rho \, \alpha(X_H(\mathbf{u})) \, \tau) & \quad [\text{by \cref{lem:floer eq proj}}] \\ & = & - u^* \edv (e^r \alpha) + \edv (\rho \, \partial_{\rho} h (z,w,\rho) \, \tau) & \quad [\text{by \cref{lem:reeb equals hamiltonian on symplectization}}] \\ & = & - u^* \edv (e^r \alpha) + \partial_{\rho} h (z,w,\rho) \, \edv \rho \wedge \tau & \quad [\text{by the Leibniz rule}] \\ & & \hphantom{- u^* \edv (e^r \alpha)} + \rho \, \edv (\partial_{\rho} h (z,w,\rho)) \wedge \tau \\ & & \hphantom{- u^* \edv (e^r \alpha)} + \rho \, \partial_{\rho} h (z,w,\rho) \, \edv \tau. \end{IEEEeqnarray*} By Equation \eqref{eq:maximum principle 1}, the last term on the right is nonnegative. We show that the sum of the first two terms on the right is nonnegative. \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{- u^* \edv (e^r \alpha) + \partial_{\rho} h (z,w,\rho) \, \edv \rho \wedge \tau}\\ \quad & = & - u^* \edv (e^r \alpha) + u^* \edv_{\R \times M} H(\mathbf{u}) \wedge \tau & \quad [\text{by definition of $h$}] \\ & = & - \frac{1}{2} \| \dv u - X_H(\mathbf{u}) \otimes \tau \|^2_{J(\mathbf{u}), \edv(e^r \alpha)} \, \omega_{\dot{\Sigma}} & \quad [\text{by the computation in \cref{lem:action energy for floer trajectories}}] \\ & \leq & 0. \end{IEEEeqnarray*} Finally, we show that $\rho \, \edv (\partial_{\rho} h (z,w,\rho)) \wedge \tau \leq \rho \, \partial^2_{\rho} h(z,w,\rho) \, \edv \rho \wedge \tau$: \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\rho \, \edv (\partial_{\rho} h (z,w,\rho)) \wedge \tau}\\ \quad & = & \rho \, \edv_{\dot{\Sigma}} \partial_{\rho} h(z,w,\rho) \wedge \tau + \rho \, \p{<}{}{\nabla_{S} \partial_{\rho} h(z,w,\rho), \nabla \tilde{f}(w)} \, \sigma \wedge \tau + \rho \, \partial^2_{\rho} h(z,w,\rho) \, \edv \rho \wedge \tau \\ & \leq & \rho \, \partial^2_{\rho} h(z,w,\rho) \, \edv \rho \wedge \tau, \end{IEEEeqnarray*} where in the last line we used Equations \eqref{eq:maximum principle 2} and \eqref{eq:maximum principle 3}. This shows that $L \rho \leq 0$. By the strong maximum principle in \cite[p.~349-350]{evansPartialDifferentialEquations2010}, if $\rho$ has a local maximum then $\rho$ is constant. Since $\rho = \exp \circ a$, the same is true for $a$. \end{proof} The next lemma is an adaptation to our setup of an argument by Bourgeois--Oancea which first appeared in \cite[p.~654-655]{bourgeoisExactSequenceContact2009}. The same argument was also used by Cieliebak--Oancea \cite[Lemma 2.3]{cieliebakSymplecticHomologyEilenberg2018} in a different setup. \begin{lemma}[asymptotic behaviour] \label{lem:asymptotic behaviour} Consider the half-cylinder $Z^{\pm}$ of \cref{def:punctures asy markers cyl ends}, with $1$-forms $\sigma \coloneqq \edv s$ and $\tau \coloneqq \edv t$. Assume the same conditions as in \cref{lem:floer eq proj}, but with $\dot{\Sigma}$ replaced by $Z^{\pm}$. Suppose that $\mathbf{u}$ is asymptotic at $\pm \infty$ to a $1$-periodic orbit $(z_{\pm}, \gamma_{\pm})$ of $H_{\pm \infty}$ of the form $\gamma_{\pm}(t) = (r_{\pm}, \rho_{\pm}(t))$, where $z_{\pm}$ is a critical point of $\tilde{f}$, $r_{\pm} \in \R$ and $\rho_{\pm} \colon S^1 \longrightarrow M$ is a periodic Reeb orbit in $M$. Define $h \colon Z^{\pm} \times S \times \R \longrightarrow \R$ by $h(s,t,z,r) = H(s,t,z,\ln(r))$ (recall that $H$ is independent of $M$). If \begin{IEEEeqnarray}{rCls+x*} \pm \del_r^2 h(s,t,z_{\pm},e^{r_{\pm}}) & < & 0 \plabel{lem:asymptotic behaviour gen 1} \\ \p{<}{}{ \nabla_S \del_r h(s, t, z_{\pm}, e^{r_{\pm}}), \nabla \tilde{f}(z_{\pm}) } & < & 0 \plabel{lem:asymptotic behaviour gen 2} \\ \del_s \del_r h(s,t,z_{\pm},e^{r_{\pm}}) & \leq & 0, \plabel{lem:asymptotic behaviour gen 3} \end{IEEEeqnarray} then either there exists $(s_0,t_0) \in Z^{\pm}$ such that $a(s_0, t_0) > r_{\pm}$ or $\mathbf{u}$ is of the form $\mathbf{u}(s,t) = (s,t, w(s), r_{\pm}, \rho_{\pm}(t))$. \end{lemma} \begin{proof} It suffices to assume that $a(s,t) \leq r_{\pm}$ for all $(s,t) \in Z^{\pm}$ and to prove that $a(s,t) = r_{\pm}$ and $f(s,t) = \rho_{\pm}(t)$ for all $(s,t) \in Z^{\pm}$. After replacing $Z^{\pm}$ by a smaller half-cylinder we may assume the following analogues of \eqref{lem:asymptotic behaviour gen 1} and \eqref{lem:asymptotic behaviour gen 2}: \begin{IEEEeqnarray}{rCls+x*} \pm \del_r^2 h(s,t,w(s),e^{a(s,t)}) & \leq & 0, \plabel{lem:asymptotic behaviour gen 1b} \\ \p{<}{}{ \nabla_S \del_r h(s, t, w(s), e^{r_{\pm}}), \nabla \tilde{f}(w(s)) } & \leq & 0. \plabel{lem:asymptotic behaviour gen 2b} \end{IEEEeqnarray} Define the average of $a$, which we denote by $\overline{a} \colon \R^{\pm}_0 \longrightarrow \R$, by \begin{IEEEeqnarray*}{c+x*} \overline{a}(s) \coloneqq \int_{0}^{1} a(s,t) \edv t. \end{IEEEeqnarray*} Then, \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\pm \del_s \overline{a}(s)}\\ \quad & = & \pm \int_{0}^{1} \del_s a(s,t) \edv t & \quad [\text{by definition of $\overline{a}$}] \\ & = & \pm \int_{0}^{1} f_s^* \alpha \mp \int_{0}^{1} \alpha(X_H(\mathbf{u}(s,t))) \edv t & \quad [\text{by \cref{lem:floer eq proj}}] \\ & = & \pm \int_{0}^{1} \rho_{\pm}^* \alpha \mp \int_{s}^{\pm \infty} \int_{0}^{1} f^* \edv \alpha \mp \int_{0}^{1} \alpha(X_H(\mathbf{u}(s,t))) \edv t & \quad [\text{by Stokes' theorem}] \\ & \leq & \pm \int_{0}^{1} \rho_{\pm}^* \alpha \mp \int_{0}^{1} \alpha(X_H(\mathbf{u}(s,t))) \edv t & \quad [\text{since $f^* \edv \alpha \geq 0$}] \\ & = & \pm \int_{0}^{1} \del_r h(\pm \infty, t, z_{\pm}, e^{r_{\pm}}) \edv t \mp \int_{0}^{1} \del_r h(s, t, w(s), e^{a(s,t)}) \edv t & \quad [\text{by \cref{lem:reeb equals hamiltonian on symplectization}}] \\ & \leq & \pm \int_{0}^{1} \del_r h(\pm \infty, t, z_{\pm}, e^{r_{\pm}}) \edv t \mp \int_{0}^{1} \del_r h(s, t, w(s), e^{r_{\pm}}) \edv t & \quad [\text{by Equation \eqref{lem:asymptotic behaviour gen 1b}}] \\ & \leq & \pm \int_{0}^{1} \del_r h(\pm \infty, t, z_{\pm}, e^{r_{\pm}}) \edv t \mp \int_{0}^{1} \del_r h(s, t, z_{\pm}, e^{r_{\pm}}) \edv t & \quad [\text{by Equation \eqref{lem:asymptotic behaviour gen 2b}}] \\ & \leq & 0 & \quad [\text{by Equation \eqref{lem:asymptotic behaviour gen 3}}]. \end{IEEEeqnarray*} Since $\pm \del_s \overline{a}(s) \leq 0$ and $\overline{a}(\pm \infty) = r_{\pm}$, we have that $\overline{a}(s) \geq r_{\pm}$ for all $s$. By assumption, $a(s,t) \leq r_{\pm}$, and therefore $a(s,t) = r_{\pm}$ for all $(s,t) \in Z^{\pm}$. This implies that every inequality in the previous computation is an equality, and in particular $f^* \edv \alpha = 0$. Therefore, $f$ is independent of $s$ and $f(s,t) = \rho_{\pm}(t)$ for all $(s,t) \in Z^{\pm}$. \end{proof} The following lemma is an adaptation of a result originally proven by Abouzaid--Seidel \cite[Lemma 7.2]{abouzaidOpenStringAnalogue2010}. Other authors have proven variations of this result, namely Ritter \cite[Lemma D.3]{ritterTopologicalQuantumField2013}, Gutt \cite[Theorem 3.1.6]{guttMinimalNumberPeriodic2014} and Cieliebak--Oancea \cite[Lemma 2.2]{cieliebakSymplecticHomologyEilenberg2018}. \begin{lemma}[no escape] \label{lem:no escape} Let $V \subset (X, \lambda)$ be a Liouville domain such that $\iota \colon V \longrightarrow (X, \lambda)$ is a strict Liouville embedding, $H \colon \dot{\Sigma} \times S \times \hat{X} \longrightarrow \R$ be an admissible Hamiltonian, $J \colon \dot{\Sigma} \times S \times \hat{X} \longrightarrow \End(T \hat{X})$ be a compatible almost complex structure and $\mathbf{u} = (\id_{\dot{\Sigma}}, w, u) \colon \dot{\Sigma} \longrightarrow \dot{\Sigma} \times S \times \hat{X}$ be a solution of the parametrized Floer equation such that all the asymptotic $1$-periodic orbits of $\mathbf{u}$ are inside $V$. Assume that there exists $\varepsilon > 0$ such that: \begin{enumerate} \item The restriction of $H$ to $\dot{\Sigma} \times S \times (-\varepsilon, \varepsilon) \times \del V$ is independent of $\del V$. \item The restriction of \parbox{\widthof{$H$}}{$J$} to $\dot{\Sigma} \times S \times (-\varepsilon, \varepsilon) \times \del V$ is cylindrical. \item If $\mathcal{A}_{H} \colon \dot{\Sigma} \times S \times (-\varepsilon,\varepsilon) \longrightarrow \R$ is given by $\mathcal{A}_H(z,w,r) \coloneqq \lambda(X_H)(z,w,r) - H(z,w,r)$, then for every $(z,w,r) \in \dot{\Sigma} \times S \times (-\varepsilon,\varepsilon)$, \begin{IEEEeqnarray*}{rCls+x*} \mathcal{A}_H(z,w,r) \, \edv \tau & \leq & 0, \plabel{eq:no escape eq 1} \\ \edv_{\dot{\Sigma}} \mathcal{A}_H(z,w,r) \wedge \tau & \leq & 0, \plabel{eq:no escape eq 2} \\ \p{<}{}{\nabla_S \mathcal{A}_H(z,w,r), \nabla \tilde{f}(w)} \, \sigma \wedge \tau & \leq & 0. \plabel{eq:no escape eq 3} \end{IEEEeqnarray*} \end{enumerate} Then, $\img u \subset V$. \end{lemma} \begin{proof} Assume by contradiction that $\img u$ is not contained in $V$. After changing $V$ to $\hat{V} \setminus \{ (r,x) \in \R \times \del V \mid r > r_0 \}$, for some $r_0 \in (-\varepsilon,\varepsilon)$, we may assume without loss of generality that $\img u$ is not contained in $V$ and that $u$ is transverse to $\del V$. Then, ${\Sigma_V} \coloneqq u ^{-1}(\hat{X} \setminus \itr V)$ is a compact surface with boundary. We show that $E({u}|_{\Sigma_V}) = 0$. \begin{IEEEeqnarray*}{rCls+x*} 0 & \leq & \frac{1}{2} \int_{\Sigma_V}^{} \| \dv u - X_{H} (\mathbf{u}) \tensorpr \tau \|^2_{J(\mathbf{u}), \edv \lambda} \, \omega _{\Sigma_V} & \quad [\text{by positivity of norms}] \\ & \leq & \int_{{\Sigma_V}} \edv (u^* \lambda - H(\mathbf{u}) \, \tau) & \quad [\text{by the computation in \cref{lem:action energy for floer trajectories}}] \\ & = & \int_{\del {\Sigma_V}}^{} u^* \lambda - H(\mathbf{u}) \, \tau & \quad [\text{by Stokes' theorem}] \\ & \leq & \int_{\del {\Sigma_V}}^{} u^* \lambda - \lambda(X_H(\mathbf{u})) \, \tau & \quad [\text{(a), proven below}] \\ & = & \int_{\del {\Sigma_V}}^{} \lambda \circ (\dv u - X_H(\mathbf{u}) \tensorpr \tau) & \quad [\text{by definition of pullback}] \\ & = & - \int_{\del {\Sigma_V}}^{} \lambda \circ J(\mathbf{u}) \circ (\dv u - X_H(\mathbf{u}) \tensorpr \tau) \circ j & \quad [\text{$\dv u - X_H(\mathbf{u}) \tensorpr \tau$ is holomorphic}] \\ & = & - \int_{\del {\Sigma_V}}^{} \edv \exp \circ (\dv u - X_H(\mathbf{u}) \tensorpr \tau) \circ j & \quad [\text{$J$ is cylindrical near $u(\del {\Sigma_V}) \subset \del V$}] \\ & = & - \int_{\del {\Sigma_V}}^{} \edv \exp \circ \dv u \circ j & \quad [\text{$H$ is independent of $\del V$}] \\ & \leq & 0 & \quad [\text{(b), proven below}]. \end{IEEEeqnarray*} The proof of (a) is the computation \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\int_{\del {\Sigma_V}}^{} ( \lambda(X_H(\mathbf{u})) - H(\mathbf{u}) ) \, \tau}\\ \quad & = & \int_{\del {\Sigma_V}}^{} \mathcal{A}_H(z,w,r_0) \, \tau & \quad [\text{by definition of $\mathcal{A}_H$ and $u(\del {\Sigma_V}) \subset \del V$}] \\ & = & \int_{{\Sigma_V}}^{} \edv_{{\Sigma_V}} (\mathcal{A}_H(z,w,r_0) \, \tau) & \quad [\text{by Stokes' theorem}] \\ & \leq & 0 & \quad [\text{by the assumptions on $\mathcal{A}_H$}]. \end{IEEEeqnarray*} Statement (b) is true because if $\xi$ is a vector tangent to $\del {\Sigma_V}$ giving the boundary orientation, then $j (\xi)$ points into ${\Sigma_V}$, therefore $\dv u \circ j (\xi)$ points out of $V$. Then, we conclude that $E({u}|_{\Sigma_V}) = 0$ and that $\dv u = X_H(\mathbf{u}) \tensorpr \tau$, and since $X_H(\mathbf{u})$ is tangent to $\del V$ it follows that $\img u \subset \del V$. This contradicts the fact that $u$ is not contained in $V$. \end{proof} \section{Compactness for solutions of the parametrized Floer equation} In this section, we assume that $(\dot{\Sigma}, j, \sigma, \tau) = (\R \times S^1, j, \edv s, \edv t)$ is the cylinder from \cref{exa:sphere and cylinder}. Suppose that $H \colon \dot{\Sigma} \times S \times \hat{X} \longrightarrow \R$ is an admissible Hamiltonian as in \cref{def:admissible hamiltonian abstract}. In this case, there exist Hamiltonians $H^{\pm} \colon S^1 \times S \times \hat{X} \longrightarrow \R$ such that $H(s,t,w,x) = H^{\pm}(t,w,x)$ for $\pm s \geq s_0$. Assume also that $J \colon \dot{\Sigma} \times S \times \hat{X} \longrightarrow \End(T \hat{X})$ is an admissible almost complex structure as in \cref{def:admissible acs abstract}, which has associated limit almost complex structures $J^{\pm} \colon S^1 \times S \times \hat{X} \longrightarrow \End(T \hat{X})$. Note that since $\dot{\Sigma} = \R \times S^1$, we can also view $H^{\pm}$ and $J^{\pm}$ as maps whose domain is $\dot{\Sigma}$. For $N \in \Z_{\geq 1}$ and $L, \nu = 1,\ldots,N$, define \begin{IEEEeqnarray*}{c+x*} H^{L,\nu} \coloneqq \begin{cases} H^{+} & \text{if } \nu > L, \\ H & \text{if } \nu = L, \\ H^{-} & \text{if } \nu < L, \end{cases} \quad J^{L,\nu} \coloneqq \begin{cases} J^{+} & \text{if } \nu > L, \\ J & \text{if } \nu = L, \\ J^{-} & \text{if } \nu < L. \end{cases} \end{IEEEeqnarray*} Finally, let $(H_m)_m$ be a sequence of admissible Hamiltonians converging to $H$, $(J_m)_m$ be a sequence of admissible almost complex structures converging to $J$, and for every $m \in \Z_{\geq 1}$ let $(w_m, u_m)$ be a solution of the parametrized Floer equation with respect to $H_m, J_m$ with asymptotes $(z^\pm_m, \gamma^\pm_m)$. \begin{definition} \label{def:broken floer cylinder} Let $(z^{\pm}, \gamma^{\pm})$ be Hamiltonian $1$-periodic orbits of $H^{\pm}$. A \textbf{broken Floer trajectory} from $(z^-, \gamma^-)$ to $(z^+, \gamma^+)$ is given by: \begin{enumerate} \item Numbers $N \in \Z_{\geq 1}$ and $L = 1, \ldots, N$; \item Hamiltonian $1$-periodic orbits $(z^-, \gamma^-) = (z^1, \gamma^1), \ldots, (z^L, \gamma^L)$ of $H^-$ and Hamiltonian $1$-periodic orbits $(z^{L+1}, \gamma^{L+1}), \ldots, (z^{N+1}, \gamma^{N+1}) = (z^+, \gamma^+)$ of $H^+$; \item For every $\nu = 1, \ldots, N$, a Floer trajectory $(w^\nu,u^\nu)$ with respect to $H^{L,\nu}, J^{L,\nu}$ with negative asymptote $(z^\nu, \gamma^\nu)$ and positive asymptote $(z^{\nu+1}, \gamma^{\nu+1})$. \end{enumerate} \end{definition} \begin{definition} We say that $(w_m, u_m)_{m}$ \textbf{converges} to $(w^{\nu}, u^{\nu})_{\nu}$ if there exist numbers $s^1_m \leq \cdots \leq s^N_m$ such that \begin{IEEEeqnarray*}{rCls+x*} \lim_{m \to +\infty} s^L_m & \in & \R, \\ \lim_{m \to +\infty} (s^{\nu + 1}_m - s^\nu_m) & = & + \infty, \\ \lim_{m \to +\infty} w_m( \cdot + s^\nu_m) & = & w^\nu_m, \\ \lim_{m \to +\infty} u_m( \cdot + s^\nu_m, \cdot) & = & u^\nu_m. \end{IEEEeqnarray*} \end{definition} \begin{theorem} \label{thm:compactness in s1eft} There exists a subsequence (whose index we still denote by $m$) and a broken Floer trajectory $(w^{\nu}, u^{\nu})_{\nu}$ such that $(w_m, u_m)_m$ converges to $(w^{\nu}, u^{\nu})_{\nu}$. \end{theorem} \begin{proof} Since $f \colon C \longrightarrow \R$ is Morse and $H_{z,w} \colon S^1 \times \hat{X} \longrightarrow \R$ is nondegenerate for every puncture $z$ and critical point $w$ of $\tilde{f}$, we conclude that we can pass to a subsequence such that $(z_m^{\pm}, \gamma_m^{\pm})$ converges to $(z^{\pm}, \gamma^{\pm})$. By compactness in Morse theory, there exists a further subsequence and a broken Morse trajectory $(w^\nu)_{\nu = 1,\ldots,N}$, where $w^{\nu} \colon \R \longrightarrow S$ is a Morse trajectory from $z^{\nu}$ to $z^{\nu + 1}$, $z^1 = z^-$ and $z^{N+1} = z^+$, such that $(w_m)_m$ converges in the sense of Morse theory to $(w^{\nu})_{\nu}$. More precisely, this means that there exist numbers $s^1_m \leq \cdots \leq s^N_m$ and $L \leq N$ such that \begin{IEEEeqnarray*}{rCls+x*} \lim_{m \to +\infty} s^L_m & \in & \R, \\ \lim_{m \to +\infty} (s^{\nu+1}_m - s^\nu_m) & = & + \infty, \\ \lim_{m \to +\infty} w_m(\cdot + s^\nu_m) & = & w^\nu. \end{IEEEeqnarray*} Possibly after reparametrizing the $w^\nu$, we may assume that $s^L_m = 0$ for every $m$. Now, for $\nu = 1,\ldots,N$, define \begin{IEEEeqnarray*}{rCLCRCl} u^\nu_m \colon \R \times S^1 & \longrightarrow & \hat{X}, & \quad & u^\nu_m(s,t) & = & u_m(s + s^\nu_m, t), \\ H^\nu_m \colon \R \times S^1 \times \hat{X} & \longrightarrow & \R, & \quad & H^\nu_m(s,t,x) & = & H_m(s + s^\nu_m, t, w_m(s + s^\nu_m), x), \\ H^\nu \colon \R \times S^1 \times \hat{X} & \longrightarrow & \R, & \quad & H^\nu(s,t,x) & = & H^{L,\nu}(s, t, w^\nu(s), x), \\ J^\nu_m \colon \R \times S^1 \times \hat{X} & \longrightarrow & \End(T \hat{X}), & \quad & J^\nu_m(s,t,x) & = & J_m(s + s^\nu_m, t, w_m(s + s^\nu_m), x), \\ J^\nu \colon \R \times S^1 \times \hat{X} & \longrightarrow & \End(T \hat{X}), & \quad & J^\nu(s,t,x) & = & J^{L,\nu}(s, t, w^\nu(s), x). \end{IEEEeqnarray*} Then, $u^\nu_m$ is a solution of the equation \begin{IEEEeqnarray*}{c+x*} \pdv{u^\nu_m}{s} = - J^\nu_m(s,t,u^\nu_m) \p{}{2}{ \pdv{u^\nu_m}{t} - X_{H^\nu_m}(s,t,u^\nu_m) }, \end{IEEEeqnarray*} and \begin{IEEEeqnarray*}{rCls+x*} \lim_{m \to + \infty} H^\nu_m & = & H^\nu, \\ \lim_{m \to + \infty} J^\nu_m & = & J^\nu. \end{IEEEeqnarray*} By compactness in Floer theory, there exists a further subsequence such that for every $\nu = 1,\ldots,N$ there exists a broken Floer trajectory $(u^{\nu,\mu})_{\mu = 1,\ldots,M_{\nu}}$ from $\gamma^{\nu,\mu}$ to $\gamma^{\nu,\mu+1}$ with respect to $(H^\nu, J^\nu)$, such that \begin{IEEEeqnarray*}{rCls+x*} \gamma^{1,1} & = & \gamma^-, \\ \gamma^{N,M_{N}} & = & \gamma^+, \end{IEEEeqnarray*} and $(u^\nu_m)_m^{}$ converges to $(u^{\nu,\mu})_{\mu}$. More precisely, this means that there exist $L_\nu = 1,\ldots,N_\nu$ and numbers $s_m^{\nu,1} \leq \cdots \leq s_m^{\nu,M_\nu}$ such that \begin{IEEEeqnarray*}{rCls+x*} \lim_{m \to +\infty} s_m^{\nu,L_\nu} & \in & \R, \\ \lim_{m \to +\infty} (s_m^{\nu,\mu+1} - s_m^{\nu,\mu}) & = & + \infty, \\ \lim_{m \to +\infty} u^{\nu}_m(\cdot + s^{\nu,\mu}_m, \cdot) & = & u^{\nu,\mu}. \end{IEEEeqnarray*} Consider the list $(w^\nu, u^{\nu,\mu})_{\nu,\mu}$ ordered according to the dictionary order of the indices $\nu, \mu$. In this list, if two elements $(w^\nu, u^{\nu,\mu})$, $(w^{\nu'}, u^{\nu',\mu'})$ are equal then they must be adjacent. The list obtained from $(w^\nu, u^{\nu,\mu})_{\nu,\mu}$ by removing duplicate elements is the desired broken Floer trajectory. \end{proof} \section{Transversality for solutions of the parametrized Floer equation} In this section, let $(\dot{\Sigma}, j, \sigma, \tau) = (\R \times S^1, j, \edv s, \edv t)$ be the cylinder from \cref{exa:sphere and cylinder} and $(X, \lambda)$ be a nondegenerate Liouville domain. Let $H \colon S^1 \times S \times \hat{X} \longrightarrow \R$ be a function such that the pullback $H \colon \R \times S^1 \times S \times \hat{X} \longrightarrow \R$ is as in \cref{def:admissible hamiltonian abstract}. Define $\mathcal{J}$ to be the set of almost complex structures $J \colon S^1 \times S \times \hat{X} \longrightarrow \End(T \hat{X})$ such that the pullback $J \colon \R \times S^1 \times S \times \hat{X} \longrightarrow \End(T \hat{X})$ is as in \cref{def:admissible acs abstract}. The set $\mathcal{J}$ admits the structure of a smooth Fréchet manifold, and therefore the tangent space $T_{J} \mathcal{J}$ at $J$ is a Fréchet space. Let $(z^{\pm}, \gamma^{\pm})$ be $1$-periodic orbits of $H$, i.e. $z^{\pm} \in S$ is a critical point of $\tilde{f}$ and $\gamma^{\pm}$ is a $1$-periodic orbit of $H_{z^{\pm}} \colon S^1 \times \hat{X} \longrightarrow \R$. If $w \colon \R \longrightarrow S$ and $u \colon \R \times S^1 \longrightarrow \hat{X}$ are maps, we will denote by $\mathbf{u}$ the map \begin{IEEEeqnarray*}{c+x*} \mathbf{u} \colon \R \times S^1 \longrightarrow S^1 \times S \times \hat{X}, \qquad \mathbf{u}(s,t) \coloneqq (t, w(s), u(s,t)). \end{IEEEeqnarray*} The pair $(w,u)$ is a solution of the parametrized Floer equation if \begin{IEEEeqnarray*}{rCls+x*} \partial_s w - \nabla \tilde{f}(w) & = & 0, \\ (\dv u - X_H(\mathbf{u}) \otimes \tau)^{0,1}_{J(\mathbf{u}), j} & = & 0. \end{IEEEeqnarray*} Define $[z^{\pm}, \gamma^{\pm}]$ to be the equivalence class \begin{IEEEeqnarray*}{rCls+x*} [z^{\pm}, \gamma^{\pm}] & \coloneqq & \{ t \cdot (z^{\pm}, \gamma^{\pm}) \mid t \in S^1 \} \\ & = & \{ (t \cdot z^{\pm}, \gamma^{\pm}(\cdot + t)) \mid t \in S^1 \}, \end{IEEEeqnarray*} and denote by $\hat{\mathcal{M}}(X,H,J,[z^+,\gamma^+],[z^-,\gamma^-])$ the moduli space of solutions $(w,u) \in C^{\infty}(\R, S) \times C^{\infty}(\R \times S^1, \hat{X})$ of the parametrized Floer equation such that \begin{IEEEeqnarray*}{c+x*} \lim_{s \to \pm \infty} (w(s), u(s, \cdot)) \in [z^{\pm}, \gamma^{\pm}]. \end{IEEEeqnarray*} Denote by $\mathcal{M}$ the moduli space of gradient flow lines $w \colon \R \longrightarrow S$ of $\tilde{f}$ such that \begin{IEEEeqnarray*}{c+x*} \lim_{s \to \pm \infty} w(s) \in [z^{\pm}]. \end{IEEEeqnarray*} By the assumptions on $(S, g^{S}, \tilde{f})$ explained in \cref{sec:floer trajectories} and \cite[Section 3.2]{austinMorseBottTheoryEquivariant1995}, the space $\mathcal{M}$ is a smooth finite dimensional manifold. Moreover, \begin{IEEEeqnarray}{c+x*} \dim \mathcal{M} = \morse(z^+) + \morse(z^-) + 1. \plabel{eq:dimension of m} \end{IEEEeqnarray} Let $\varepsilon = (\varepsilon_{\ell})_{\ell \in \Z_{\geq 0}}$ be a sequence of positive numbers $\varepsilon_{\ell}$ such that $\lim_{\ell \to +\infty} \varepsilon_{\ell} = 0$. Define a function \begin{IEEEeqnarray*}{rrCl} \| \cdot \|^{\varepsilon} \colon & T_{J_{\mathrm{ref}}} \mathcal{J} & \longrightarrow & [0, + \infty] \\ & Y & \longmapsto & \sum_{\ell=0}^{+ \infty} \varepsilon_{\ell} \| Y \|_{C^{\ell}(S^1 \times S \times X)}, \end{IEEEeqnarray*} where $\| \cdot \|_{C^{\ell}(S^1 \times S \times X)}$ is the $C^{\ell}$-norm which is determined by some finite covering of $T {X} \longrightarrow S^1 \times S \times X$ by coordinate charts and local trivializations. Define \begin{IEEEeqnarray*}{c+x*} T^{\varepsilon}_{J_{\mathrm{ref}}} \mathcal{J} \coloneqq \{ Y \in T_{J_{\mathrm{ref}}} \mathcal{J} \mid \| Y \|^{\varepsilon} < + \infty \}. \end{IEEEeqnarray*} By \cite[Lemma 5.1]{floerUnregularizedGradientFlow1988}, $(T^{\varepsilon}_{J_{\mathrm{ref}}} \mathcal{J}, \| \cdot \|^{\varepsilon})$ is a Banach space consisting of smooth sections and containing sections with support in arbitrarily small sets. For every $Y \in T_{J_{\mathrm{ref}}}^{\varepsilon} \mathcal{J}$, define \begin{IEEEeqnarray*}{c+x*} \exp_{J_{\mathrm{ref}}}(Y) \coloneqq J_{Y} \coloneqq \p{}{2}{1 + \frac{1}{2} J_{\mathrm{ref}} Y} J_{\mathrm{ref}} \p{}{2}{1 + \frac{1}{2} J_{\mathrm{ref}} Y}^{-1}. \end{IEEEeqnarray*} There exists a neighbourhood $\mathcal{O} \subset T_{J_{\mathrm{ref}}}^{\varepsilon} \mathcal{J}$ of $0$ such that $\exp_{J_{\mathrm{ref}}}^{} \colon \mathcal{O} \longrightarrow \mathcal{J}$ is injective. Define $\mathcal{J}^{\varepsilon} \coloneqq \exp_{J_{\mathrm{ref}}}^{}(\mathcal{O})$, which is automatically a Banach manifold with one global parametrization $\exp_{J_{\mathrm{ref}}}^{} \colon \mathcal{O} \longrightarrow \mathcal{J}^{\varepsilon}$. The tangent space of $\mathcal{J}^{\varepsilon}$ at $J_{\mathrm{ref}}$ is given by \begin{IEEEeqnarray*}{c+x*} T_{J_{\mathrm{ref}}} \mathcal{J}^{\varepsilon} = T_{J_{\mathrm{ref}}}^{\varepsilon} \mathcal{J}. \end{IEEEeqnarray*} Notice that the definition of $\mathcal{J}^{\varepsilon}$ involved making several choices, namely the sequence $\varepsilon$, the choices necessary to define the $C^{\ell}$-norm, and a reference almost complex structure $J_{\mathrm{ref}}$. \begin{definition} For $w \in \mathcal{M}$, let $\mathcal{F}_w$ be the Banach manifold of maps $u \colon \R \times S^1 \longrightarrow \hat{X}$ of the form \begin{IEEEeqnarray*}{c+x*} u(s,t) = \exp_{u_0(s,t)} \xi(s,t), \end{IEEEeqnarray*} where \begin{IEEEeqnarray*}{rCls+x*} u_0 & \in & C^{\infty}(\R \times S^1, \hat{X}) \text{ is such that } \lim_{s \to \pm \infty} (w(s), u_0(s, \cdot)) \in [z^{\pm}, \gamma^{\pm}], \\ \xi & \in & W^{1,p}(\R \times S^1, u_0^* T \hat{X}). \end{IEEEeqnarray*} \end{definition} \begin{definition} For $J \in \mathcal{J}^{\varepsilon}$, we define a bundle $\pi^J \colon \mathcal{E}^J \longrightarrow \mathcal{B}$ as follows. The base, fibre and total space are given by \begin{IEEEeqnarray*}{rCls+x*} \mathcal{B} & \coloneqq & \{ (w,u) \mid w \in \mathcal{M}, \, u \in \mathcal{F}_w \}, \\ \mathcal{E}^J_{(w,u)} & \coloneqq & L^p(\Hom^{0,1}_{J(\mathbf{u}), j} (T \dot{\Sigma}, u^* T \hat{X})), \\ \mathcal{E}^J & \coloneqq & \{ (w,u,\xi) \mid (w,u) \in \mathcal{B}, \, \xi \in \mathcal{E}^J_{(w,u)} \}. \end{IEEEeqnarray*} The projection is given by $\pi^J(w,u,\xi) \coloneqq (w,u)$. The \textbf{Cauchy--Riemann operator} is the section $\delbar\vphantom{\partial}^J \colon \mathcal{B} \longrightarrow \mathcal{E}^J$ given by \begin{IEEEeqnarray*}{c+x*} \delbar\vphantom{\partial}^J(w,u) \coloneqq (\dv u - X_H(\mathbf{u}) \otimes \tau)^{0,1}_{J(\mathbf{u}),j} \in \mathcal{E}^J_{(w,u)}. \end{IEEEeqnarray*} \end{definition} With this definition, $(\delbar\vphantom{\partial}^J)^{-1}(0) = \hat{\mathcal{M}}(X,H,J,[z^+,\gamma^+],[z^-,\gamma^-])$. \begin{definition} Define the universal bundle, $\pi \colon \mathcal{E} \longrightarrow \mathcal{B} \times \mathcal{J}^{\varepsilon}$, and the \textbf{universal Cauchy--Riemann operator}, $\delbar \colon \mathcal{B} \times \mathcal{J}^{\varepsilon} \longrightarrow \mathcal{E}$, by \begin{IEEEeqnarray*}{rCls+x*} \mathcal{E} & \coloneqq & \{ (w,u,J,\xi) \mid (w,u) \in \mathcal{B}, \, J \in \mathcal{J}^{\varepsilon}, \, \xi \in \mathcal{E}^{J}_{(w,u)} \}, \\ \pi & \colon & \mathcal{E} \longrightarrow \mathcal{B} \times \mathcal{J}^{\varepsilon}, \qquad \pi(w,u,J,\xi) \coloneqq (w,u,J), \\ \delbar & \colon & \mathcal{B} \times \mathcal{J}^{\varepsilon} \longrightarrow \mathcal{E}, \qquad \delbar(w,u,J) \coloneqq \delbar\vphantom{\partial}^J(w,u). \end{IEEEeqnarray*} \end{definition} For $(w,u,J)$ such that $\delbar(w,u,J) = 0$, choose a splitting $T_{(w,u)} \mathcal{B} = T_w \mathcal{M} \oplus T_u \mathcal{F}_w$. The sections $\delbar\vphantom{\partial}^J$ and $\delbar$ have corresponding linearized operators, which we denote by \begin{IEEEeqnarray*}{rCls+x*} \mathbf{D}_{(w,u,J)} & \colon & T_w \mathcal{M} \oplus T_u \mathcal{F}_w \longrightarrow \mathcal{E}^J_{(w,u)}, \\ \mathbf{L}_{(w,u,J)} & \colon & T_w \mathcal{M} \oplus T_u \mathcal{F}_w \oplus T_J \mathcal{J}^{\varepsilon} \longrightarrow \mathcal{E}^J_{(w,u)}, \end{IEEEeqnarray*} respectively. We can write these operators with respect to the decompositions above as block matrices \begin{IEEEeqnarray}{rCl} \mathbf{D}_{(w,u,J)} & = & \begin{bmatrix} \mathbf{D}^{\mathcal{M}}_{(w,u,J)} & \mathbf{D}^{\mathcal{F}}_{(w,u,J)} \end{bmatrix}, \plabel{eq:splitting linearized ops 1} \\ \mathbf{L}_{(w,u,J)} & = & \begin{bmatrix} \mathbf{D}^{\mathcal{M}}_{(w,u,J)} & \mathbf{D}^{\mathcal{F}}_{(w,u,J)} & \mathbf{J}_{(w,u,J)} \end{bmatrix}. \plabel{eq:splitting linearized ops 2} \end{IEEEeqnarray} Let $\tau$ be a trivialization of $u^* T \hat{X}$ and denote also by $\tau$ the induced trivializations of $(\gamma^{\pm})^* T \hat{X}$. We can consider the Conley--Zehnder indices $\conleyzehnder^{\tau}(\gamma^{\pm})$ of $\gamma^{\pm}$ computed with respect to $\tau$. We denote $\ind^{\tau}(z^{\pm}, \gamma^{\pm}) \coloneqq \morse(z^\pm) + \conleyzehnder^{\tau}(\gamma^{\pm})$. \begin{theorem} \phantomsection\label{thm:s1eft d is fredholm} The operators $\mathbf{D}^{\mathcal{F}}_{(w,u,J)}$ and $\mathbf{D}_{(w,u,J)}$ are Fredholm and \begin{IEEEeqnarray}{rCls+x*} \operatorname{ind} \mathbf{D}^{\mathcal{F}}_{(w,u,J)} & = & \conleyzehnder^{\tau}(\gamma^+) - \conleyzehnder^{\tau}(\gamma^-), \plabel{eq:s1eft fredholm ind 1} \\ \operatorname{ind} \mathbf{D}_{(w,u,J)} & = & \ind^{\tau}(z^+, \gamma^+) - \ind^{\tau}(z^-,\gamma^-) + 1. \plabel{eq:s1eft fredholm ind 2} \end{IEEEeqnarray} \end{theorem} \begin{proof} The operator $\mathbf{D}^{\mathcal{F}}_{(w,u,J)}$ is the linearized operator in Floer theory, which is Fredholm and has index given by Equation \eqref{eq:s1eft fredholm ind 1}. Therefore, \begin{IEEEeqnarray*}{c+x*} 0 \oplus \mathbf{D}^{\mathcal{F}}_{(w,u,J)} \colon T_w \mathcal{M} \oplus T_u \mathcal{F}_w \longrightarrow \mathcal{E}^J_{(w,u)} \end{IEEEeqnarray*} is Fredholm and \begin{IEEEeqnarray}{c+x*} \operatorname{ind} (0 \oplus \mathbf{D}^{\mathcal{F}}_{(w,u,J)}) = \dim T_w \mathcal{M} + \operatorname{ind} \mathbf{D}^{\mathcal{F}}_{(w,u,J)}. \plabel{eq:index of operator floer} \end{IEEEeqnarray} Since $\mathbf{D}^{\mathcal{M}}_{(w,u,J)} \oplus 0 \colon T_w \mathcal{M} \oplus T_w \mathcal{F}_w \longrightarrow \mathcal{E}^J_{(w,u)}$ is compact, the operator \begin{IEEEeqnarray*}{c+x*} \mathbf{D}_{(w,u,J)} = \mathbf{D}^{\mathcal{M}}_{(w,u,J)} \oplus \mathbf{D}^{\mathcal{F}}_{(w,u,J)} = \mathbf{D}^{\mathcal{M}}_{(w,u,J)} \oplus 0 + 0 \oplus \mathbf{D}^{\mathcal{F}}_{(w,u,J)} \end{IEEEeqnarray*} is Fredholm and \begin{IEEEeqnarray*}{rCls+x*} \operatorname{ind} \mathbf{D}_{(w,u,J)} & = & \operatorname{ind} (\mathbf{D}^{\mathcal{M}}_{(w,u,J)} \oplus \mathbf{D}^{\mathcal{F}}_{(w,u,J)}) & \quad [\text{by Equation \eqref{eq:splitting linearized ops 1}}] \\ & = & \operatorname{ind} (0 \oplus \mathbf{D}^{\mathcal{F}}_{(w,u,J)}) & \quad [\text{since $\mathbf{D}^{\mathcal{M}}_{(w,u,J)}$ is compact}] \\ & = & \dim T_w \mathcal{M} + \operatorname{ind} \mathbf{D}^{\mathcal{F}}_{(w,u,J)} & \quad [\text{by Equation \eqref{eq:index of operator floer}}] \\ & = & \ind^{\tau}(z^+, \gamma^+) - \ind^{\tau}(z^-,\gamma^-) + 1 & \quad [\text{by Equations \eqref{eq:dimension of m} and \eqref{eq:s1eft fredholm ind 1}}]. & \qedhere \end{IEEEeqnarray*} \end{proof} \begin{theorem} \label{thm:s1eft l is surjective} The operator $\mathbf{L}_{(w,u,J)}$ is surjective. \end{theorem} \begin{proof} It suffices to prove that \begin{IEEEeqnarray*}{c+x*} \mathbf{L}^{\mathcal{F}}_{(w,u,J)} \coloneqq \mathbf{D}^{\mathcal{F}}_{(w,u,J)} \oplus \mathbf{J}_{(w,u,J)} \colon T_u \mathcal{F}_w \oplus T_J \mathcal{J}^{\varepsilon} \longrightarrow \mathcal{E}^{J}_{(w,u)} \end{IEEEeqnarray*} is surjective. Since $\mathbf{D}^{\mathcal{F}}_{(w,u,J)}$ is Fredholm (by \cref{thm:s1eft d is fredholm}), its image is closed and has finite codimension. This implies that $\img \mathbf{L}^{\mathcal{F}}_{(w,u,J)}$ is also of finite codimension and closed. So, it suffices to show that $\img \mathbf{L}^{\mathcal{F}}_{(w,u,J)}$ is dense, which is equivalent to showing that the annihilator $\Ann \img \mathbf{L}^{\mathcal{F}}_{(w,u,J)}$ is zero. Let $\eta \in \Ann \img \mathbf{L}^{\mathcal{F}}_{(w,u,J)}$, i.e. \begin{IEEEeqnarray*}{c+x*} \eta \in L^q(\Hom^{0,1}_{J(\mathbf{u}), j} (T \dot{\Sigma}, u^* T \hat{X})) \end{IEEEeqnarray*} is such that \begin{IEEEeqnarray}{rClCsrCl} 0 & = & \p{<}{}{\eta, \mathbf{D}^{\mathcal{F}}_{(w,u,J)}(\xi)}_{L^2} & \quad & \text{ for all } & \xi & \in & T_u \mathcal{F}_w, \plabel{eq:element in annihilator 1} \\ 0 & = & \p{<}{}{\eta, \mathbf{J} _{(w,u,J)}(Y )}_{L^2} & \quad & \text{ for all } & Y & \in & T_J \mathcal{J}^{\varepsilon}. \plabel{eq:element in annihilator 2} \end{IEEEeqnarray} By Equation \eqref{eq:element in annihilator 1}, $\eta$ satisfies the Cauchy--Riemann type equation $(\mathbf{D}^{\mathcal{F}}_{(w,u,J)})^{*} \eta = 0$, and therefore $\eta$ is smooth (by elliptic regularity) and satisfies unique continuation. We prove that $\eta = 0$ in the case where $w$ is constant. In this case, $w(s) \eqqcolon w_0$ for every $s$, we can view $\gamma^{\pm}$ as $1$-periodic orbits of $H_{w_0}$ (after a reparametrization) and $u$ is a solution of the Floer equation: \begin{IEEEeqnarray*}{c+x*} \pdv{u}{s}(s,t) + J_{w_0}(t,u(s,t)) \p{}{2}{ \pdv{u}{t}(s,t) - X_{H_{w_0}}(t,u(s,t)) } = 0. \end{IEEEeqnarray*} Let $R(u)$ be the set of regular points of $u$, i.e. points $z = (s,t)$ such that \begin{IEEEeqnarray}{c+x*} \plabel{eq:set of regular points of u} \pdv{u}{s}(s,t) \neq 0, \qquad u(s,t) \neq \gamma^{\pm}(t), \qquad u(s,t) \notin u(\R - \{s\}, t). \end{IEEEeqnarray} By \cite[Theorem 4.3]{floerTransversalityEllipticMorse1995}, $R(u)$ is open. By unique continuation, it is enough to show that $\eta$ vanishes in $R(u)$. Let $z_0 = (s_0,t_0) \in R(u)$ and assume by contradiction that $\eta(z_0) \neq 0$. By \cite[Lemma 3.2.2]{mcduffHolomorphicCurvesSymplectic2012}, there exists $Y \in T_J \mathcal{J}$ such that \begin{IEEEeqnarray}{c+x*} \plabel{eq:variation of acs before cut off} \p{<}{}{\eta(z_0), Y(\mathbf{u}(z_0)) \circ (\dv u(z_0) - X_H(\mathbf{u}(z_0)) \otimes \tau_{z_0}) \circ j_{z_0} } > 0. \end{IEEEeqnarray} Choose a neighbourhood $V = V_{\R} \times V_{S^1}$ of $z_0 = (s_0,t_0)$ in $\dot{\Sigma} = \R \times S^1$ such that \begin{IEEEeqnarray}{c+x*} \plabel{eq:inner product bigger than 0 in v} \p{<}{}{\eta, Y(\mathbf{u}) \circ (\dv u - X_H(\mathbf{u}) \otimes \tau) \circ j }|_V > 0. \end{IEEEeqnarray} Since $z_0$ is as in \eqref{eq:set of regular points of u}, there exists a neighbourhood $U_{\hat{X}}$ of $u(z_0)$ in $\hat{X}$ such that \begin{IEEEeqnarray*}{c+x*} u(s,t) \in U_{\hat{X}} \Longrightarrow s \in V_{\R}. \end{IEEEeqnarray*} Choose a slice $A \subset S^1 \times S$ which contains $(t_0, w_0)$ and which is transverse to the action of $S^1$ on $S^1 \times S$. Define $U_{S^1 \times S} = S^1 \cdot A$. For $A$ chosen small enough, \begin{IEEEeqnarray*}{c+x*} (t, w_0) \in U_{S^1 \times S} \Longrightarrow t \in V_{S^1}. \end{IEEEeqnarray*} Then, defining $U \coloneqq U_{S^1 \times S} \times U_{\hat{X}}$ we have that $\mathbf{u}^{-1}(U) \subset V$. Choose an $S^1$-invariant function $\beta \colon S^1 \times S \times \hat{X} \longrightarrow [0,1]$ such that \begin{IEEEeqnarray}{c+x*} \plabel{eq:bump function for transversality} \supp \beta \subset U, \qquad \beta(\mathbf{u}(z_0)) = 1, \qquad \beta Y \in T_J \mathcal{J}^{\varepsilon}. \end{IEEEeqnarray} Here, we can achieve that $\beta Y$ is of class $C^{\varepsilon}$ by \cite[Theorem B.6]{wendlLecturesSymplecticField2016}. Since $\mathbf{u}^{-1}(U) \subset V$ and $\supp \beta \subset U$, we have that $\supp (\beta \circ \mathbf{u}) \subset V$. Then, \begin{IEEEeqnarray*}{rCls+x*} 0 & = & \p{<}{}{\eta, \mathbf{J}_{(w,u,J)}(\beta Y)}_{L^2} & \quad [\text{by Equation \eqref{eq:element in annihilator 2}}] \\ & = & \p{<}{}{\eta, \beta(\mathbf{u}) \, \mathbf{J}_{(w,u,J)}(Y)}_{L^2} & \quad [\text{since $\mathbf{J}_{(w,u,J)}$ is $C^\infty$-linear}] \\ & = & \p{<}{}{\eta, \beta(\mathbf{u}) \, \mathbf{J}_{(w,u,J)}(Y)}_{L^2(V)} & \quad [\text{since $\supp (\beta \circ \mathbf{u}) \subset V$}] \\ & > & 0 & \quad [\text{by Equation \eqref{eq:inner product bigger than 0 in v}}], \end{IEEEeqnarray*} which is the desired contradiction. We prove that $\eta = 0$ in the case where $w$ is not constant. Let $z_0 = (t_0, s_0) \in \R \times S^1$ and assume by contradiction that $\eta(z_0) \neq 0$. Choose $Y$ as in \eqref{eq:variation of acs before cut off} and $V$ as in \eqref{eq:inner product bigger than 0 in v}. Choose a slice $A \subset S^1 \times S$ which contains $(t_0, w(0))$ and which is transverse to the action of $S^1$ on $S^1 \times S$. Define $U_{S^1 \times S} = S^1 \cdot A$. Since $w$ is orthogonal to the infinitesimal action on $S$, for $A$ chosen small enough we have \begin{IEEEeqnarray*}{c+x*} (t, w(s)) \in U_{S^1 \times S} \Longrightarrow (s,t) \in V. \end{IEEEeqnarray*} Defining $U = U_{S^1 \times S} \times \hat{X}$, we have that $\mathbf{u}^{-1}(U) \subset V$. Choosing $\beta$ as in \eqref{eq:bump function for transversality}, we obtain a contradiction in the same way as in the previous case. \end{proof} \begin{remark} We recall some terminology related to the Baire category theorem (we use the terminology from \cite[Section 10.2]{roydenRealAnalysis2010}). Let $X$ be a complete metric space and $E \subset X$. Then, $E$ is \textbf{meagre} or of the \textbf{first category} if $E$ is a countable union of nowhere dense subsets of $X$. We say that $E$ is \textbf{nonmeagre} or of the \textbf{second category} if $E$ is not meagre. We say that $E$ is \textbf{comeagre} or \textbf{residual} if $X \setminus E$ is meagre. Hence, a countable intersection of comeagre sets is comeagre. With this terminology, the Baire category theorem (see \cite[Section 10.2]{roydenRealAnalysis2010}) says that if $E$ is comeagre then $E$ is dense. The Sard--Smale theorem (see \cite[Theorem 1.3]{smaleInfiniteDimensionalVersion1965}) says that if $f \colon M \longrightarrow N$ is a Fredholm map between separable connected Banach manifolds of class $C^q$, for some $q > \max \{0, \operatorname{ind} f \}$, then the set of regular values of $f$ is comeagre. \end{remark} \begin{theorem} \label{thm:transversality in s1eft} There exists a dense subset $\mathcal{J}_{\mathrm{reg}} \subset \mathcal{J}$ with the following property. Let $J \in \mathcal{J}_{\mathrm{reg}}$ be an almost complex structure, $[z^{\pm}, \gamma^{\pm}]$ be equivalence classes of $1$-periodic orbits of $H$, and $(w,u) \in \hat{\mathcal{M}}(X, H, J, [z^+, \gamma^+], [z^-, \gamma^-])$. Then, near $(w,u)$ the space $\hat{\mathcal{M}}(X, H, J, [z^+, \gamma^+], [z^-, \gamma^-])$ is a manifold of dimension \begin{IEEEeqnarray*}{c+x*} \dim_{(w,u)} \hat{\mathcal{M}}(X, H, J, [z^+, \gamma^+], [z^-, \gamma^-]) = \ind^{\tau}(z^+, \gamma^+) - \ind^{\tau}(z^-, \gamma^-) + 1. \end{IEEEeqnarray*} \end{theorem} \begin{proof} Recall that the space $\mathcal{J}^{\varepsilon}$ is defined with respect to a reference almost complex structure $J_{\mathrm{ref}}$. We will now emphasize this fact using the notation $\mathcal{J}^{\varepsilon}(J_{\mathrm{ref}})$. As a first step, we show that for every $[z^{\pm}, \gamma^{\pm}]$ and every reference almost complex structure $J_{\mathrm{ref}}$ there exists a comeagre set $\mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}, [z^{\pm}, \gamma^{\pm}]) \subset \mathcal{J}^{\varepsilon}(J_{\mathrm{ref}})$ such that every $J \in \mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}, [z^{\pm}, \gamma^{\pm}])$ has the property in the statement of the theorem. For shortness, for every $J$ let $\hat{\mathcal{M}}(J,[z^{\pm}, \gamma^{\pm}]) \coloneqq \hat{\mathcal{M}}(X, H, J, [z^+, \gamma^+], [z^-, \gamma^-])$. By \cref{thm:s1eft l is surjective} and the implicit function theorem \cite[Theorem A.3.3]{mcduffHolomorphicCurvesSymplectic2012}, the universal moduli space \begin{IEEEeqnarray*}{c+x*} \hat{\mathcal{M}}([z^{\pm}, \gamma^{\pm}]) \coloneqq \{ (w,u,J) \mid J \in \mathcal{J}^{\varepsilon}(J_{\mathrm{ref}}), \, (w,u) \in \hat{\mathcal{M}}(J, [z^{\pm}, \gamma^{\pm}]) \} \end{IEEEeqnarray*} is a smooth Banach manifold. Consider the smooth map \begin{IEEEeqnarray*}{c} \pi \colon \hat{\mathcal{M}}([z^{\pm}, \gamma^{\pm}]) \longrightarrow \mathcal{J}^{\varepsilon}(J_{\mathrm{ref}}), \qquad \pi(w,u,J) = J. \end{IEEEeqnarray*} By \cite[Lemma A.3.6]{mcduffHolomorphicCurvesSymplectic2012}, \begin{IEEEeqnarray}{rCr} \ker \dv \pi(w,u,J) & \cong & \ker \mathbf{D}_{(w,u,J)} \plabel{eq:d pi and d u have isomorphic kernels}, \\ \coker \dv \pi(w,u,J) & \cong & \coker \mathbf{D}_{(w,u,J)} \plabel{eq:d pi and d u have isomorphic cokernels}. \end{IEEEeqnarray} Therefore, $\dv \pi (w,u,J)$ is Fredholm and has the same index as $\mathbf{D}_{(w,u,J)}$. By the Sard--Smale theorem, the set $\mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}, [z^{\pm}, \gamma^{\pm}]) \subset \mathcal{J}^{\varepsilon}(J_{\mathrm{ref}})$ of regular values of $\pi$ is comeagre. By Equation \eqref{eq:d pi and d u have isomorphic cokernels}, $J \in \mathcal{J}^{\varepsilon}(J_{\mathrm{ref}})$ is a regular value of $\pi$ if and only if $\mathbf{D}_{(w,u,J)}$ is surjective for every $(w,u) \in (\delbar\vphantom{\partial}^{J})^{-1}(0)$. Therefore, by the implicit function theorem, for every $J \in \mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}, [z^{\pm}, \gamma^{\pm}])$ the set $\hat{\mathcal{M}}(J,[z^{\pm},\gamma^{\pm}]) = (\delbar\vphantom{\partial}^J)^{-1}(0) \subset \mathcal{B}$ is a manifold of dimension \begin{IEEEeqnarray*}{rCls+x*} \IEEEeqnarraymulticol{3}{l}{\dim_{(w,u)} \hat{\mathcal{M}}(J,[z^{\pm},\gamma^{\pm}])}\\ \quad & = & \dim \ker \mathbf{D}_{(w,u,J)} & \quad [\text{by the implicit function theorem}] \\ & = & \operatorname{ind} \mathbf{D}_{(w,u,J)} & \quad [\text{since $\mathbf{D}_{(w,u,J)}$ is surjective}] \\ & = & \ind^{\tau}(z^+, \gamma^+) - \ind^{\tau}(z^-, \gamma^-) + 1 & \quad [\text{by \cref{thm:s1eft d is fredholm}}]. \end{IEEEeqnarray*} As a second step, we show that we can switch the order of the quantifiers in the first step, i.e. that for every reference almost complex structure $J_{\mathrm{ref}}$ there exists a comeagre set $\mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}) \subset \mathcal{J}^{\varepsilon}(J_{\mathrm{ref}})$ such that for every $J \in \mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{})$ and every $[z^{\pm}, \gamma^{\pm}]$, the property in the statement of the theorem statement holds. For this, define \begin{IEEEeqnarray*}{c+x*} \mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}) \coloneqq \bigcap_{[z^{\pm}, \gamma^{\pm}]} \mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}, [z^{\pm}, \gamma^{\pm}]). \end{IEEEeqnarray*} Since $H$ is nondegenerate, in the above expression we are taking an intersection over a finite set of data, and hence $\mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{})$ is comeagre. This finishes the proof of the second step. By the Baire category theorem, $\mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}) \subset \mathcal{J}^{\varepsilon}(J_{\mathrm{ref}}^{})$ is dense. Finally, define \begin{IEEEeqnarray*}{c+x*} \mathcal{J}_{\mathrm{reg}} \coloneqq \bigcup_{J_{\mathrm{ref}} \in \mathcal{J}} \mathcal{J}^{\varepsilon}_{\mathrm{reg}}(J_{\mathrm{ref}}^{}). \end{IEEEeqnarray*} Then $\mathcal{J}_{\mathrm{reg}}$ is the desired set of almost complex structures. \end{proof}
2205.13318v1
http://arxiv.org/abs/2205.13318v1
On the slope inequalities for extremal curves
\documentclass[12pt]{amsart} \usepackage{amsmath} \usepackage{palatino} \usepackage{amsfonts} \usepackage{amsthm} \usepackage{amssymb} \usepackage{amscd} \usepackage[all]{xy} \usepackage{enumerate} \usepackage{graphicx} \usepackage{xcolor} \textheight22truecm \textwidth17truecm \oddsidemargin-0.5truecm \evensidemargin-0.5truecm \keywords{Gonality sequence, gonal scroll, extremal curve, Hirzebruch surface} \subjclass[2010]{Primary 14H45; Secondary 14H51, 14J26} \pagestyle{myheadings} \theoremstyle{plain} \newtheorem{thm}{Theorem}[section] \newtheorem{mainthm}[thm]{Main Theorem} \newtheorem{thmx}{Theorem} \renewcommand{\thethmx}{\!} \newtheorem{prop}[thm]{Proposition} \newtheorem{prope}[thm]{Property} \newtheorem{cor}[thm]{Corollary} \newtheorem{corx}{Corollary} \renewcommand{\thecorx}{\!} \newtheorem{lem}[thm]{Lemma} \newtheorem{cla}[thm]{Claim} \newtheorem*{clann}{Claim} \newtheorem{empthm}[thm]{} \newtheorem{op}[thm]{Operation} \theoremstyle{definition} \newtheorem{defn}[thm]{Definition} \newtheorem{empdefn}[thm]{} \newtheorem{case}[thm]{Case division} \newtheorem{conj}[thm]{Conjecture} \newtheorem{prob}[thm]{Problem} \newtheorem{probs}[thm]{Problems} \newtheorem{que}[thm]{Question} \newtheorem{expl}[thm]{Example} \newtheorem{expls}[thm]{Examples} \newtheorem{assum}[thm]{Assumption} \newtheorem{mainassum}[thm]{Main Assumption} \newtheorem{nota}[thm]{Notation} \newtheorem{const}[thm]{Construction} \newtheorem{conpro}[thm]{Construction and Proposition} \newtheorem{conv}[thm]{Convention} \newtheorem{setup}[thm]{Set-up} \newtheorem*{ackn}{Acknowledgment} \newtheorem{fig}[thm]{Figure} \newtheorem{rmk}[thm]{Remark} \newcommand{\la}{\langle} \newcommand{\ra}{\rangle} \newcommand{\sA}{\mathcal{A}} \newcommand{\sB}{\mathcal{B}} \newcommand{\sC}{\mathcal{C}} \newcommand{\sD}{\mathcal{D}} \newcommand{\sE}{\mathcal{E}} \newcommand{\sF}{\mathcal{F}} \newcommand{\sG}{\mathcal{G}} \newcommand{\sH}{\mathcal{H}} \newcommand{\sI}{\mathcal{I}} \newcommand{\sJ}{\mathcal{J}} \newcommand{\sK}{\mathcal{K}} \newcommand{\sL}{\mathcal{L}} \newcommand{\sN}{\mathcal{N}} \newcommand{\sM}{\mathcal{M}} \newcommand{\sO}{\mathcal{O}} \newcommand{\sP}{\mathcal{P}} \newcommand{\sQ}{\mathcal{Q}} \newcommand{\sR}{\mathcal{R}} \newcommand{\sS}{\mathcal{S}} \newcommand{\sT}{\mathcal{T}} \newcommand{\sU}{\mathcal{U}} \newcommand{\sV}{\mathcal{V}} \newcommand{\sW}{\mathcal{W}} \newcommand{\sX}{\mathcal{X}} \newcommand{\sZ}{\mathcal{Z}} \newcommand{\tA}{{\widetilde{A}}} \newcommand{\mA}{\mathbb{A}} \newcommand{\mC}{\mathbb{C}} \newcommand{\mE}{\mathbb{E}} \newcommand{\mF}{\mathbb{F}} \newcommand{\mR}{\mathbb{R}} \newcommand{\mK}{\mathbb{K}} \newcommand{\mN}{\mathbb{N}} \newcommand{\mP}{\mathbb{P}} \newcommand{\mQ}{\mathbb{Q}} \newcommand{\mV}{\mathbb{V}} \newcommand{\mZ}{\mathbb{Z}} \newcommand{\mW}{\mathbb{W}} \newcommand{\Ima}{\mathrm{Im}\,} \newcommand{\Ker}{\mathrm{Ker}\,} \newcommand{\Alb}{\mathrm{Alb}\,} \newcommand{\ap}{\mathrm{ap}} \newcommand{\Bs}{\mathrm{Bs}\,} \newcommand{\Chow}{\mathrm{Chow}\,} \newcommand{\CP}{\mathrm{CP}} \newcommand{\Div}{\mathrm{Div}\,} \newcommand{\expdim}{\mathrm{expdim}\,} \newcommand{\ord}{\mathrm{ord}\,} \newcommand{\Aut}{\mathrm{Aut}\,} \newcommand{\Hilb}{\mathrm{Hilb}} \newcommand{\Hom}{\mathrm{Hom}} \newcommand{\sHom}{\mathcal{H}{\!}om\,} \newcommand{\Lie}{\mathrm{Lie}\,} \newcommand{\mult}{\mathrm{mult}} \newcommand{\Pic}{\mathrm{Pic}\,} \newcommand{\Spec}{\mathrm{Spec}\,} \newcommand{\Proj}{\mathrm{Proj}\,} \newcommand{\Rhom}{{\mathbb{R}\mathcal{H}{\!}om}\,} \newcommand{\aw}{\mathrm{aw}} \newcommand{\exc}{\mathrm{exc}\,} \newcommand{\emb}{\mathrm{emb\text{-}dim}} \newcommand{\codim}{\mathrm{codim}\,} \newcommand{\OG}{\mathrm{OG}} \newcommand{\Sing}{\mathrm{Sing}\,} \newcommand{\Supp}{\mathrm{Supp}\,} \newcommand{\SL}{\mathrm{SL}\,} \newcommand{\Reg}{\mathrm{Reg}\,} \newcommand{\rank}{\mathrm{rank}\,} \newcommand{\VSP}{\mathrm{VSP}\,} \newcommand{\wlp}{\widetilde \Lambda _\pi\,} \newcommand{\wdt}{\widetilde } \newcommand{\Q}{Q} \newcommand{\PGL}{\mathrm{PGL}} \numberwithin{equation}{section} \newcommand{\mfour} {{\mathcal M}^1 _{g,4}} \newcommand{\Val}[1]{{\color{blue}#1}} \newcommand{\Mik}[1]{{\color{red}#1}} \title{On the slope inequalities for extremal curves } \author{Valentina Beorchia and Michela Brundu} \address{Dipartimento di Matematica e Geoscienze, Universit\`a di Trieste\\ via Valerio 12/b, 34127 Trieste, Italy.\\ \texttt{[email protected], [email protected]}} \begin{document} \begin{abstract} The present paper concerns the question of the violation of the $r$-th inequality for extremal curves in $\mP^r$, posed in [KM]. We show that the answer is negative in many cases (Theorem \ref{mainthm} and Corollary \ref{fasciabianca}). The result is obtained by a detailed analysis of the geometry of extremal curves and their canonical model. As a consequence, we show that particular curves on a Hirzebruch surface do not violate the slope inequalities in a certain range (Theorem \ref{verylast}). \end{abstract} \maketitle \markboth{}{On the slope inequalities for extremal curves} \tableofcontents \section{Introduction} Let $X$ be a smooth and connected projective curve of genus $g \ge 3$ defined over an algebraically closed field with characteristic zero. For each integer $r \ge 1$, H. Lange and P. E. Newstead in \cite[Section 4]{LN} introduced the notion of {\it $r$-gonality} $d_r(X)$ of $X$, which is the minimal integer $d$ such that there exists a linear series $g^r_d$ (hence there is a rational map $X \dasharrow \mP^r$ of degree $d$). In particular, for $r=1$ we obtain the classical {\it gonality} $\gamma(X)$ of the curve $X$. The sequence $\{ d_r(X)\}_{r \ge 1}$ is called the {\it gonality sequence of $X$}. For any curve and any $r \ge g$, the numbers $d_r$ are known by the Riemann-Roch Theorem. Hence there are only finitely many interesting numbers in a gonality sequence and in \cite{LN} it is evident that these numbers are deeply related to each other. In particular, in many cases they satisfy the {\em $r$-th slope inequality}, that is \begin{equation}\label{satisfy slope} \frac{d_r(X)}{ r} \ge \frac{d_{r+1}(X) }{ r+1} \end{equation} and this has been widely studied also in \cite{LM}. Observe that if $X$ does not satisfy some slope inequality, then the corresponding Brill--Noether number is negative (see Remark \ref{ossBN}). Consequently, a Brill--Noether general curve must satisfy all slope inequalities. The same occurs also for very special curves like hyperelliptic, trigonal and bielliptic curves (see \cite[Remark 4.5]{LN}). The gonality sequence of a curve $X$ is related to Brill--Noether theory of vector bundles on $X$ (see \cite{LN}). Moreover, if a curve $X$ satisfies the $p$-th slope inequality for any $p < n$, then semistable vector bundles of rank $n$ on X satisfy Mercat's Conjecture, which governs the dimension of their spaces of global sections (cf. \cite[Conjecture 3.1 and Corollary 4.16]{LN}). These results motivate the discussion of slope inequalities for specific classes of curves. Some sporadic examples of curves violating some slope inequality can be found by in \cite{Ba1}, \cite{Ba2}, \cite{LM}, \cite{KM}, \cite{Pan} and various families have been detected in \cite{LM}. Among such examples there are {\em extremal curves}, that is curves attaining the Castelnuovo bound for the genus. In \cite[Theorem 4.13]{LM} the authors prove that extremal curves of degree $\ge 3r - 1$ in $\mP^r$ do not satisfy all slope inequalities. Moreover, in \cite[Theorem 4.4 and Corollary 4.5]{KM} Kato and Martens prove that an extremal curve in $\mP^3$ of degree $d \ge 10$ satisfies \begin{equation} \label{viol3} \frac{d_3}{ 3 }< \frac{d_{4} }{ {4}}. \end{equation} In the same paper (see the final Questions), the authors propose a new investigation in this direction; more precisely, they pose the following \medskip \noindent {\bf Question -} Is it true that extremal curves of degree $d \ge 3r+1$ in $\mP^r$ satisfy \begin{equation} \label{viol?} \frac{d_r}{ r }< \frac {d_{r+1} }{ {r+1}} \end{equation} for $r \ge 4$? \medskip In the present paper we show that the answer to the above question is, in many cases, negative. The main results are the following: \begin{thmx} Let $X$ be a $\gamma$-gonal extremal curve of degree $d \ge 3r - 1$ in $\mP^r$ ($r \ge 3$) where $\gamma \ge 4$. Then $d_r=d$. If, in addition, $X$ is not isomorphic to a plane curve, then $X$ satisfies the following conditions: \begin{itemize} \item[$i)$] $d_{r+1}\le d+\gamma -1$; \item[$ii)$] under some technical hypotheses on the congruence class of $d$, the $r$-th slope inequality holds, i.e. $$ \frac {d_r} r \ge \frac {d_{r+1} } {r+1}. $$ \end{itemize} \end{thmx} For more details on the assumptions, see Theorem \ref{mainthm}. As a consequence, the following result holds (see Corollary \ref{fasciabianca}): \begin{corx} Let $X$ be a $\gamma$-gonal extremal curve in $\mP^r$, where $\gamma \ge 4$. If the degree $d$ satisfies \begin{equation} r(\gamma-1) \le d \le \gamma(r-1)+1 \end{equation} then the $r$-th slope inequality holds. \end{corx} The technique involved in the proofs relies on the fact that extremal curves are either isomorphic to a smooth plane curve or they lie on a Hirzebruch surface (see Theorem \ref{acgh1}). Then, in order to bound the requested gonality number, we consider the residual divisor to a point in a general hyperplane section. It turns out that the dimension of the linear system associated with such a residual divisor can be estimated by looking at the canonical model of the curve. The organization of the paper is as follows. Section 2 contains some preliminaries about extremal curves and curves on Hirzebruch surfaces. Section 3 summarizes results on the gonal sequence and the slope inequality which can be found in the literature. Section 4 investigates the geometry and gonality of extremal curves. They are particular curves on a Hirzebruch surface, but not so rare. Indeed, thanks to Theorem \ref{everex}, any smooth irreducible curve on a Hirzebruch surface, under a suitable assumption on its class, can be embedded in a projective space as an extremal curve. Next we state and prove the main results, that is Theorem \ref{mainthm} and Corollary \ref{fasciabianca}. In Section 5, we recall a result of \cite{LM}, where the authors show that in degree $d=3r-1$ the $r$-th slope inequality is violated by extremal curves and we prove, by an {\em ad hoc} argument, that the same holds in degree $d=3r-2$ if $r \ge 5$. Finally, in Section 6, we focus on fourgonal extremal curves, and we show that certain foursecant curves on a Hirzebruch surface admit several embeddings as an extremal curve in some projective space. This allows us to exhibit specific classes of extremal curves whose $r$-th slope inequality holds for $r$ in a suitable interval (see Theorem \ref{verylast}). \ackn The authors are grateful to the anonymous Referees for the accurate reading of the first version of the present paper, and for the useful comments and suggestions. This research is supported by funds of the Universit\`a degli Studi di Trieste - Finanziamento di Ateneo per progetti di ricerca scientifica - FRA 2022, Project DMG. The first author is supported by italian MIUR funds, PRIN project Moduli Theory and Birational Classification (2017), and is member of GNSAGA of INdAM. \section{Notation and preliminary notions} If $x$ is a positive real number, by $[x]$ we denote the {\em integer part} of $x$, i.e. the largest integer not exceeding $x$. In this paper $\mP^{n}$ denotes the projective space over an algebraically closed field of characteristic zero. We shall also use the following notation: \begin{itemize} \item {} given a projective scheme $Z\subseteq \mP^{n}$, $\langle Z \rangle$ will denote the linear span of $Z$ in $\mP^{n}$; \item {} by a {\em curve} $X$ we shall mean a smooth irreducible curve, unless otherwise specified; \item {} given a linear system $|D|$ on a curve $X$, we will denote by $\varphi _{D}$ the morphism associated with $|D|$ and by $X_{D}= \varphi _{D} (X)$ the image of $X$ under $\varphi _{D}$; in particular, if $X$ is a non--hyperelliptic curve of genus $g$, $X_K \subset \mP^{g-1}$ will denote the {\em canonical model} of $X$; \item {} we say that a linear series $g^r_d$ is {\em simple} if for any divisor $D \in g^r_d$ the induced rational map $\varphi _{D}$ is birational onto its image. \end{itemize} \begin{defn} \label{indeko} If $X$ and $Y$ are two curves, a morphism $\varphi: \; X \rightarrow Y$ is said {\em indecomposable} if it cannot be factorized as $\varphi = \alpha \circ \beta$, where $\alpha$ and $ \beta$ are morphisms of degree bigger than one. In particular, if $Y = \mP^1$, we say that a linear series $|D| = g^1_d$ is {\em indecomposable} if the morphism $\varphi _{D}$ associated with $|D|$ is so. \end{defn} \begin{defn} The {\em gonality} of a curve $X$ is the minimum degree $d$ of a linear series $g^1_d$ on $X$; if $X$ has gonality $\gamma$ then each series $g^1_\gamma$ is called a {\em gonal series}. If $\Gamma \in g^1_\gamma$ denotes a general {\em gonal divisor} of $X$, then the morphism $\varphi_\Gamma: X \rightarrow \mP ^1$ is called a {\em gonal cover}. \end{defn} \medskip Let $X$ be a curve in $\mP ^r$ of degree $d \ge 2r+1$ (the motivation of such a bound will be explained in the forthcoming Remark \ref{remarcoa}). Setting $$ m=m_{d,r}=\left[ \frac {d-1} {r-1} \right], $$ we can write, for a suitable integer $\epsilon=\epsilon_{d,r}$, \begin{equation} \label{degm} d-1= m(r-1)+\epsilon , \quad 0\le \epsilon \le r-2. \end{equation} It is well--known that the genus $g(X)$ of $X$ satisfies the {\em Castelnuovo's bound} i.e. \begin{equation} \label{casbo} g(X) \le \pi(d,r):=m\left( \frac{m-1} 2 (r-1)+\epsilon\right). \end{equation} Clearly, the values of $m$ and $\epsilon$ depend on $d$ and $r$. So we introduce the following notation. \begin{defn} \label{razio} Let $X \subset \mP^r$ a curve of degree $d$. The integer $m=m_{d,r}$ in formula (\ref {degm}) will be called {\em $m$-ratio} of $X$ in $\mP^r$. Analogously, the integer $\epsilon=\epsilon_{d,r}$ will be called {\em $\epsilon$-remainder} of $X$ in $\mP^r$. \end{defn} \begin{defn} \label{estre} A curve $X$ is said an {\em extremal curve in $\mP ^r$} if it has a simple linear series $g^r_d$ of degree $d \ge 2r+1$ and $X$ has the maximal genus among all curves admitting such a linear series, i.e. \begin{equation} \label{maxgen} g(X) = \pi(d,r)= m_{d,r}\left( \frac{m_{d,r}-1} 2 (r-1)+\epsilon_{d,r} \right). \end{equation} \end{defn} Observe that the notion above is {\em relative} to the space $\mP^r$ where the curve lies. \medskip Finally, let us recall a few notions about rational ruled surfaces. \begin{defn} We denote by $\mF_n:= \mP (\sO _{\mP^1} \oplus \sO_{\mP^1}(-n))$ a {\em Hirzebruch surface} of invariant $n$, by $C_0$ the unique (if $n >0$) unisecant curve with $C_0^2 <0$ and by $L$ a line of its ruling. \end{defn} It is well-known that $\Pic(\mF_n) = \mZ [C_0] \oplus \mZ [L ]$, where $C_0^2 = -n$, $C_0 \cdot L= 1$ and $L^2 =0$. \begin{rmk} \label{fritto} If $H = C_0 + \beta L$ is a very ample divisor on $\mF_n$, then the associated morphism $\varphi_H$ embeds $\mF_n$ in $\mP^r$ as a rational normal ruled surface $R$ of degree $r-1$, where $r:= H^2 +1$. With an easy computation, one can see that $\beta = (r+n-1)/2$. Finally, we recall that the canonical divisor of $R$ is $K \sim -2H+(r-3)L$. \end{rmk} In the sequel we will treat curves on (possibly embedded) Hirzebruch surfaces. \begin{defn} \label{calcol} If $X \subset \mF_n$ is a $\gamma$-gonal curve, we say that {\em the gonality of $X$ is computed by the ruling of $\mF_n$} if $\gamma = X \cdot L$ (as far as $n \ge 1$). \end{defn} \section {The slope inequalities} Let us recall a definition that generalizes the notion of the gonality of a curve. \begin{defn} The {\it $r$-th gonality} of a curve $X$ is defined to be $$ \begin{aligned} d_r:= & \quad \min\{d \in \mN \;| \; \hbox{$X$ admits a linear series $g^r_d$} \} = \\ {} = & \quad \min\{\deg L \;| \; \hbox{$L$ line bundle on $X$ with $h^0(L) \ge r+1$} \} . \end{aligned} $$ Moreover, $(d_1, d_2, d_3, \dots, d_{g-1})$ is called the {\em gonality sequence} of $X$. \end{defn} Recall that the gonality sequence is strictly increasing and weakly additive (see \cite[Lemma 3.1]{LM}). \begin{lem} \label{incre} For any $\gamma$--gonal curve $X$ of genus $g$ and gonality sequence $(d_1, d_2, \dots, d_{g-1})$, the following properties hold: \begin{itemize} \item $d_1 < d_2 < d_3 < \cdots < d_{g-1}$, where $d_1 = \gamma$ and $d_{g-1}= 2g-2$; \item $d_{r+s} \le d_{r} + d_{s}$ for all $r,s$; \item $d_r \le r \gamma$, for all $r$. \end{itemize} \end{lem} \begin{rmk} The gonality sequence is defined up to $d_{g-1}$, since $d_r \ge 2g-1$ for $r \ge g$. Therefore, by the Riemann-Roch Theorem, $d_r = r+g$, for all $r\ge g$, as observed in \cite[Remark 4.4 (b)]{LN}. \label{gimenuno} \end{rmk} It is clear that, if the bound $d_r \le r \gamma$ is reached for all $r$ up to a certain integer $r_0$, then the sequence $$ \left( \frac{d_r} r \right)_{r=1, \dots, r_0} $$ is constant and equal to the gonality $\gamma$. Otherwise, we can compare the above ratios: let us recall the following notion. \begin{defn} The relation \begin{equation} \label{nonviol} \frac{d_r}{ r }\ge \frac{d_{r+1} } {r+1}. \end{equation} is called the {\it ($r$-th) slope inequality}. \end{defn} \begin{rmk} \label{ossBN} Assume that, for some $r$, the slope inequality does not hold, i.e. $$ \frac{d_r}{ r }< \frac{d_{r+1} } {r+1} $$ hence $(r+1) d_r < r d_{r+1}$. Using this inequality in the computation of the corresponding Brill--Noether number (see, for instance, \cite{ACGH}), we obtain that $$ \rho(d_r,r,g) = g-(r+1)(g-d_r+r) < r(d_{r+1} -g-r-1) \le 0. $$ Therefore $\rho(d_r,r,g)$ is strictly negative. \end{rmk} \begin{defn} A curve $X$ of genus $g$ is called {\em Brill--Noether general } if $\rho(d_r,r,g) \ge 0$, for all $1 \le r \le g-1$. \end{defn} As a straightforward consequence, we have the following fact. \begin{prop} Let $X$ be a Brill--Noether general curve of genus $g$ and gonality $\gamma$. Then \begin{equation} \label{catena} \gamma=d_1 \ge \frac{d_2 }{ 2} \ge \frac{d_3 }{ 3} \ge \frac{d_4 }{ 4} \ge \cdots \ge \frac{d_r }{ r} \ge \frac{d_{r+1} }{ {r+1} } \cdots \ge\frac{ d_{g-1} } {g-1} = 2, \end{equation} i.e. all the slope inequalities hold. \end{prop} \begin{rmk} \label{rem2} Nevertheless, also ``special" types of curves satisfy all the slope inequalities. For instance, in \cite[Remark 4.5]{LN}, one can find the explicit values of the gonality sequence of a $\gamma$-gonal curve $X$ in the following cases: - if $\gamma =2$ ($X$ hyperelliptic); - if $\gamma =3$ ($X$ trigonal); - if $\gamma =4$ and $X$ is bielliptic; - if $X$ is the general fourgonal curve. \medskip \noindent In all the cases above, all the slope inequalities hold. \end{rmk} For this reason, from now on we will assume $ \gamma \ge 4$. \section {Extremal curves and gonality} Let us first recall a result (see \cite[III, Theorem 2.5]{ACGH}) that turns out to be important in the sequel since it describes the geometry of extremal curves. Let us keep the notation introduced in Section 2. \begin{thm} \label{acgh1} Let $d$ and $r$ be integers such that $r \ge 3$, $d \ge 2r+1$. Then extremal curves $X \subset \mP^r$ of degree $d$ exist and any such a curve is one of the following: \begin{itemize} \item[(i)] The image of a smooth plane curve of degree $k$ under the Veronese map $ \mP^2 \rightarrow \mP^5$. In this case $r=5$, $d=2k$. \item[(ii)] A non-singular member of the linear system $|mH+L|$ on a rational normal ruled surface. In this case $\epsilon =0$. \item[(iii)] A non-singular member of the linear system $|(m+1)H- (r-\epsilon-2)L|$ on a rational normal ruled surface. \end{itemize} \end{thm} In particular, any irreducible extremal curve is smooth. \begin{rmk} \label{remarcoa} Observe that we assumed from the beginning that $d\ge 2r+1$. Namely, if $d< 2r$, by Clifford Theorem the $g_d^r$ is non-special and we obtain $g=d-r$. In particular, we have $r>g$; by Remark \ref{gimenuno} the gonality sequence is known and the $r$-th slope inequality holds. Moreover, if $d=2r$ then $m=2$ and $\epsilon =1$. Therefore $\pi(d,r) = r+1$. Hence, if $X$ is an extremal curve, then $d_r= d_{g-1}=2g-2=2r$. By Remark \ref{gimenuno}, we have $d_{r+1}= d_g= 2g=2r+2$, hence the $r$-th slope inequality holds. \end{rmk} \begin{rmk} \label{remarcob} In the sequel, we shall not consider the case (i) in the Theorem \ref{acgh1} where the extremal curve is the image of smooth plane curves under the Veronese map, being the gonal sequence of smooth plane curves completely understood by Max Noether's Theorem (see, for instance, \cite[Theorem 3.14]{Cili}). More precisely, a plane curve of degree $k \ge 5$ satisfies $$ d_r = \left\{ \begin{array}{ll} \alpha d - \beta, & {\rm if}\ r < g=\frac{(d-1)(d-2)}{2}\\ r+g, & {\rm if}\ r\ge g,\\ \end{array} \right. $$ where $\alpha$ and $\beta$ are the uniquely determined integers with $\alpha \ge 1$ and $0 \le \beta \le \alpha$ such that $r=\frac{\alpha (\alpha +3)} {2}$. In particular, as observed in \cite[Proposition 4.3]{LM}, whenever $\beta \neq 0$, the $r$-th slope inequality is satisfied, while if $\beta =0$ and $\alpha \le k-4$, such an inequality is violated. In the case (i) of Theorem \ref{acgh1}, we have $r=5$, so $\alpha=2$ and $\beta =0$. It follows that if $k \ge 6$, the $5$-th slope inequality is violated. \end{rmk} The converse of the cases $(ii)$ and $(iii)$ of Theorem \ref{acgh1} holds: namely the above classes of curves on a ruled surface force the curve to be extremal, as the following result shows. \begin{prop} \label{fiore} Let $X \subset R \subset \mP^r$, where $X$ is a smooth curve of degree $d$ and $R$ is a rational normal ruled surface. Setting $m= m_{d,r}$ and $\epsilon= \epsilon_{d,r}$, if $$ X \in \left\{ \begin{matrix} |mH+L| \hfill \\ or \hfill \\ |(m+1)H- (r-\epsilon-2)L|\\ \end{matrix} \right. $$ then $X$ is an extremal curve in $ \mP^r$. \end{prop} \begin{proof} The canonical divisor $K$ of the surface $R$ can be written as $K \sim -2H + (r-3)L$. Therefore we can apply the Adjuncton Formula on $R$ (where $g$ denotes the genus of $X$): $$ 2g-2 = (K+X) \cdot X. $$ In the first case $X \sim mH+L$ we then obtain $$ 2g-2 = ((m-2)H + (r-2) L) \cdot (mH+L). $$ Taking into account that $H^2 =r-1$, we finally obtain $$ 2g = m(m-1)(r-1). $$ On the other hand, $d= \deg(X) = X \cdot H = mH^2+L\cdot H = m(r-1)+1$, hence $d-1=m(r-1)$. Therefore, from \eqref{degm}, we have that $\epsilon =0$ and, so, $\pi(d,r)=g$ as requested (see (\ref{casbo})). In the second case $X \sim (m+1)H- (r-\epsilon-2)L$ so we get $$ 2g-2 = ((m-1)H + (\epsilon -1) L) \cdot ((m+1)H- (r-\epsilon-2)L). $$ It is immediate to see that $$ 2g = m(m-1)(r-1) +2m \epsilon, $$ so again $g= \pi(d,r)$. \end{proof} The two results above characterize embedded extremal curves in terms of rational ruled surfaces. Now we show that any smooth irreducible curve on a Hirzebruch surface, under a certain assumption on its class, can be embedded in a projective space as an extremal curve. To do this, the following known result will be useful (see, for instance, \cite[Ch.V, Corollary 2.18]{H}). \begin{prop} \label{cuori} Let $D$ be the divisor $aC_0 + b L$ on the rational ruled surface $\mF_n$. Then: \item{(a)} $D$ is very ample $ \iff D$ is ample $\iff a>0$ and $b>an$; \item{(b)} the linear system $|D|$ contains an irreducible smooth curve $\iff$ it contains an irreducible curve $\iff a=0, b=1$ or $a=1, b=0$ or $a>0, b>an$ or $a>0, b=an, n>0$. \end{prop} The Hirzebruch surface $\mF_0$ is isomorphic to $\mP^1 \times \mP^1$, so $\Pic (\mF_0)$ is generated by two lines belonging to distinct rulings. But also in this case, we denote these generators by $ C_0 $ and $L$ (like in the case of $\mF_n$, where $n >0$), even if their roles can be exchanged. \begin{thm} \label{everex} Let $X \sim \gamma C_0 + \lambda L$ be an irreducible smooth curve on $\mF_n$ with $\gamma \ge 2$ and not isomorphic to a plane curve. \noindent Dividing $\lambda -n-1$ by $\gamma -2$, let us denote by $\beta$ the quotient and $\epsilon$ the remainder, i.e. $$ \beta:= \left[ \frac{\lambda -n-1}{\gamma -2}\right] =\frac{\lambda -n-1-\epsilon}{\gamma -2}, \quad 0 \le \epsilon \le \gamma -3. $$ Moreover, set $$ r:=2\beta +1-n \quad \hbox{and} \quad d:=\gamma(\beta - n) + \lambda. $$ Consider the complete linear system $|H|$ on $\mF_n$ given by $$ H \sim C_0 + \beta L. $$ Then \begin{itemize} \item[$i)$] $\beta > n$, for all $n \ge 0$; \item[$ii)$] the morphism $\varphi_H$ embeds $\mF_n$ in $\mP^r$; \item[$iii)$] $\varphi_H(X)$ is a curve of degree $d$. \end{itemize} \noindent Assume in addition that $$ \lambda \ge \frac{\gamma(\gamma +n -2)}{2}. $$ Then \begin{itemize} \item[$iv)$] $m_{d,r} = \gamma -1$ and $ \epsilon_{d,r} = \epsilon$; \item[$v)$] $\varphi_H(X)$ is an extremal curve in $\mP^r$. \end{itemize} \end{thm} \begin{proof} Note first that, by assumption, $X$ is irreducible and smooth. Then, by Proposition \ref{cuori} and the assumption $\gamma \ge 2$, we have $$ \lambda > 0, \quad \hbox{if} \quad n =0 $$ and \begin{equation} \label{disu} \lambda \ge \gamma n, \quad \hbox{if} \quad n >0 \end{equation} \noindent $i)$ If $n =0$, it is clear that $\beta \ge 1 \iff \lambda -1 \ge \gamma -2$. If this is not the case, we observe that we can change the role of $\gamma$ and $\lambda$, since on $\mF_0 \cong \mP^1 \times \mP^1$ we can choose arbitrarily one of the two rulings. \noindent If $n=1$, then $\beta > 1$ if and only if $\lambda -2 > \gamma-2$ and this holds by the assumption that $X$ is not isomorphic to a plane curve. Indeed, in general, any irreducible curve $\gamma C_0 + \lambda L$ on $\mF_1$ satisfies $\lambda \ge \gamma$ by \eqref{disu}. In particular, if $\lambda=\gamma$, we have $\beta =1$ and $H = C_0 + L$. On one hand, it is clear that the linear system $|H|$ does not induce an embedding of $\mF_1$, as it maps surjectively to $\mP^2$ and corresponds to the contraction of the exceptional curve $C_0$; it is well known that this is the blowing up morphism $\mF_1 \to \mP^2$ of the plane in a point. On the other hand, under such a morphism, any smooth irreducible curve $X\sim \gamma (C_0 + L)$ is mapped isomorphically to a smooth degree $\gamma$ plane curve, which contradicts our assumption. \noindent If $n>1$, obviously $\beta > n$ if and only if $$ \lambda -n-1 > n(\gamma-2). $$ i.e. $\lambda > n(\gamma -1) +1$. But this holds since $n > 1$ implies $n\gamma -n+1 < n\gamma$ and, by (\ref{disu}), $n\gamma \le \lambda$. \medskip \noindent $ii)$ Again by Proposition \ref{cuori}, the linear system $|H| = | C_0 + \beta L|$ is very ample if and only if $\beta > 0$ if $n =0$ or $\beta > n$ if $n >0$ and this holds by $(i)$. Hence $\varphi_H$ is an embedding. \noindent Moreover, we have the well--known formula (see, for instance \cite[Proposition 1.8 - (ii)]{BS}): \begin{equation} \label{stellina} r+1 = h^0( \sO_{\mF_n} (C_0 + \beta L)) = 2\beta +2 -n. \end{equation} \medskip \noindent $iii)$ Therefore $\varphi_H$ embeds $X$ in $\mP^r$ as a curve of degree $$ (\gamma C_0+ \lambda L) \cdot (C_0 + \beta L)=\gamma(\beta - n) + \lambda = d, $$ as required. \medskip \noindent $iv)$ In order to show that $m_{d,r} = \gamma -1$ and $ \epsilon_{d,r} = \epsilon$, it is enough to prove that $$ d -1 -( \gamma -1)(r-1) = \epsilon \quad \hbox{and} \quad 0 \le \epsilon \le r-2. $$ Clearly $$ d -1 -( \gamma -1)(r-1) = \gamma(\beta - n) + \lambda -1 - ( \gamma -1)(2\beta -n ) = \lambda -n-1-\beta(\gamma -2) $$ and, substituting the value of $\beta$, we obtain the requested equality. In order to show that $\epsilon \le r-2$, note first that, for any $n \ge 0$, we have $$ \epsilon \le r-2 \iff \lambda -n-1-\beta(\gamma -2) \le 2\beta -n -1 \iff \lambda \le \beta \gamma. $$ Since $\epsilon \le \gamma -3$, clearly $$ \beta =\frac{\lambda -n-1-\epsilon}{\gamma -2} \ge \frac{\lambda -n +2 - \gamma}{\gamma -2}, $$ so in order to show that $\lambda \le \beta \gamma$, it is enough to prove that $$ \lambda \le \gamma \frac{\lambda -n +2 - \gamma}{\gamma -2} \quad \iff \quad 2 \lambda \ge \gamma(\gamma +n -2) $$ and this holds by assumption. \medskip \noindent $v)$ In order to prove that $\varphi_H(X) \subset \mP^r$ is an extremal curve, we compute the genus $g$ of $X$ using the Adjunction Formula obtaining \begin{equation} \label{genrig} \begin{array}{ll} 2g-2& =(K_{\mF_n} +X) \cdot X =(-2C_0 -(2+n)L + \gamma C_0+\lambda L) \cdot (\gamma C_0+\lambda L)=\\ &= 2(\lambda \gamma - \lambda -\gamma) - n \gamma (\gamma -1),\\ \end{array} \end{equation} which yields \begin{equation} \label{genecaste} g=\lambda \gamma - \lambda -\gamma +1 - \frac{n}{2} \gamma (\gamma -1) = (\lambda-1)(\gamma -1) - \frac{n}{2} \gamma (\gamma -1). \end{equation} Now we can compute the Castelnuovo bound $$ \pi(d,r) = m_{d,r}\left( \frac{m_{d,r}-1} 2 (r-1)+\epsilon_{d,r} \right) = (\gamma-1)\left( \frac{\gamma -2} 2 (2\beta -n)+\epsilon \right). $$ Since $$ 2\beta -n = 2 \; \frac{\lambda -n-1-\epsilon}{\gamma -2} - n = \frac{2\lambda -2-2\epsilon - n \gamma }{\gamma -2} $$ we obtain $$ \pi(d,r) = (\gamma-1)\left( \frac{1} 2 (2\lambda -2-2\epsilon - n \gamma)+\epsilon \right) = (\gamma-1)\left(\lambda -1-\epsilon -n\gamma/2 + \epsilon \right) $$ and, finally, $$ \pi(d,r) =(\gamma-1)(\lambda -1 -n\gamma/2 ) = (\gamma-1)(\lambda -1) - \frac{n}{2} \gamma (\gamma -1). $$ Comparing this formula with (\ref{genecaste}), we see that $\pi(d,r) = g$ and hence $\varphi_H(X)$ is an extremal curve in $\mP^r$. \end{proof} \begin{rmk} By the irreducibility of $X$, we have $\lambda \ge \gamma n$ from Proposition \ref{cuori}. As a consequence, the additional assumption in Theorem \ref{everex} $$ \lambda \ge \frac{\gamma(\gamma +n -2)}{2}. $$ holds if $n \ge \gamma -2$. \end{rmk} In the sequel we will need to relate the gonality of $X$ with its $m$-ratio. Since Theorem \ref{acgh1} claims that the extremal curves, not isomorphic to plane curves, lie on a rational normal ruled surface, we here recall the following result of Martens (see \cite{M}) which describes such a relationship in the wider case of curves on ruled surfaces (see Definition \ref{calcol}). \begin{thm} \label{marty} Let $X$ be a reduced and irreducible curve on a Hirzebruch surface $\mF_n$ and assume that $X$ is not a fibre. Then the gonality of $X$ is computed by a ruling of the surface, unless $n=1$ and $X \sim \alpha(C_0 + L)$ for some $\alpha \ge 2$, in which case $X$ is isomorphic to a plane curve of degree $\alpha$ and its gonality is $\alpha -1$. \end{thm} \begin{rmk} The exceptional case in Theorem \ref{marty} concerns curves of the type $$ X \sim \alpha (C_0 + L) $$ on $\mF_1$. We observe that such a situation never occurs in the framework of extremal curves of type (ii) and (iii) in Theorem 4.1. Note first that for a rational ruled surface $R \subset \mP^r$ of degree $r-1$ the hyperplane divisor $H$ satisfies $H \sim C_0 + \frac{r-1- C_0^2 }{ 2} L$ by Remark \ref{fritto}. Hence $R \cong \mF_1$ if and only if $$ H \sim C_0 + \frac{r}{ 2} L. $$ It follows that, in case (ii), we have $$ X \sim mH+L= m C_0 + \left(\frac{mr} { 2} +1\right)L $$ and, so, $X \sim \alpha C_0 + \alpha L$ if and only if $\alpha=m= \frac{2}{ 2-r}$, which is not possible for $r \ge 2$. \noindent In case (iii) we have $$ X \sim (m+1)H-(r-2-\epsilon)L= (m+1) C_0 +\left(\frac{(m+1)r} { 2}-r+2+\epsilon \right)L. $$ hence $X \sim \alpha C_0 + \alpha L$ if and only if $\alpha=m+1$ and $$ m+1 = \frac{(m+1)r} { 2}-r+2+\epsilon \quad \Rightarrow \quad \epsilon = (2-r) (m-1)/2. $$ But $\epsilon \ge 0$ so we get a contradiction for $r \ge 3$. \end{rmk} The two results recalled above (Theorems \ref{acgh1} and \ref{marty}) lead to the following consequence, whose formulas immediately come from (\ref{degm}) and (\ref{maxgen}). \begin{cor} \label{propc} Let $X$ be a $\gamma$-gonal extremal curve in $\mP^r$ (where $r \ge3$) of degree $d$, genus $g$ and $m$-ratio $m$. If $X$ is not isomorphic to a plane curve, then there exists a rational normal ruled surface $R$ such that $X \subset R \subset \mP^r$ and, setting $\Pic(R) = \mZ[H] \oplus \mZ[L]$, either: \begin{itemize} \item [i)] if $X \in |mH+L|$ on $R$ then $\epsilon =0$ and $m= \gamma$. Consequently, \begin{equation} \label{deggamma0} d= \gamma (r-1) +1 \end{equation} \begin{equation} \label{maxgengamma0} g= \frac {\gamma (\gamma-1)(r-1)} 2 . \end{equation} or \item [ii)] if $X \in |(m+1)H- (r-\epsilon-2)L|$ on $R$ then $m= \gamma -1$. Consequently, \begin{equation} \label{deggamma1} d= (\gamma -1)(r-1)+\epsilon +1 \end{equation} \begin{equation} \label{maxgengamma1} g=(\gamma-1) \left[\frac {\gamma-2} 2 \, (r-1) + \epsilon \right]; \end{equation} \end{itemize} \noindent In particular, the gonal series $g^1_\gamma$ on $X$ comes from the restriction of the fibration $\pi: R \rightarrow \mP^1$ given by the ruling. \end{cor} \begin{rmk} \label{bounddeg} Assume $\gamma \ge 4$ in Corollary \ref{propc} and consider the case $(ii)$: $m =\gamma -1$. If $\epsilon = 0$ and $d \ge 3r -1$ then, from (\ref{deggamma1}), we have $$ d = ( \gamma-1)(r-1) + 1 \ge 3r -1 \quad \Longrightarrow \quad (\gamma-4)r \ge \gamma -3 \quad \Longrightarrow \quad \gamma \ge 5. $$ \end{rmk} We shall need the following result (see \cite[Theorem 4.13 ]{LM}). \begin{thm} \label{LM1} Let $X$ be an extremal curve of degree $d \ge 3r - 1$ in $\mP^r$. Then $d_{r -1} = d - 1$ and $X$ does not satisfy all slope inequalities. \end{thm} Now we can state the main result of this section, which gives a negative answer to the question posed by Kato and Martens in \cite{KM}, i.e. if the $r$-th slope inequality is violated for extremal curves in $\mP^r$ for any $r \ge 4$. \begin{thm} \label{mainthm} Let $X$ be a $\gamma$-gonal extremal curve of degree $d \ge 3r - 1$ in $\mP^r$ ($r \ge 3$) where $\gamma \ge 4$. Then $d_r=d$. If, in addition, $X$ is not isomorphic to a plane curve, then $X$ satisfies the following conditions: \begin{itemize} \item[$i)$] $d_{r+1}\le d+\gamma -1$; \item[$ii)$] by assuming one of the following sets of hypotheses: \begin{itemize} \item[$(a)$] either $\epsilon =0$, $m = \gamma$ and $r \ge \gamma -1$, \item[$(b)$] or $\epsilon \ge \gamma -2$, \end{itemize} \noindent then the $r$-th slope inequality holds, i.e. $$ \frac {d_r} r \ge \frac {d_{r+1} } {r+1}. $$ \end{itemize} \end{thm} \begin{proof} From Theorem \ref{LM1}, we obtain $d_{r-1}= d-1$. Since $d_r > d_{r-1}= d-1$, we get that $d_r \ge d$. On the other hand $X$ possesses a $g^r_d$ by assumption, so $d_r \le d$, hence the first statement is proved. \medskip \noindent $(i)$ Since $X \subset \mP^r$ is a curve of degree $d$, denoting by $|H|$ the hyperplane linear system in $\mP^r$, the induced linear system $|H_X|$ on $X$ is a linear series $g^r_d$. From Corollary \ref{propc}, $X$ is a $\gamma$--secant curve on a rational ruled surface $R$ and the gonal series $g^1_\gamma$ on $X$ comes from the restriction of $\pi: R \rightarrow \mP^1$. So, for any $P \in X$, we set $\Gamma_P$ to be the gonal divisor contining $P$, i.e. $\Gamma_P = \pi^{-1}(\pi(P)) \cap X$. Also observe that the general hyperplane $H$ cuts on $R$ an irreducible unisecant curve, say $U_H$. In particular, the general hyperplane $H$ does not contain any line of the ruling. Therefore for a general $H_X \in g^r_d$ and for any $P \in H_X$, we have $$ H_X \cap \Gamma_P = H \cap X \cap \Gamma_P = (H \cap R)\cap X \cap \Gamma_P = U_H \cap X \cap \Gamma_P= \{P\}. $$ Setting $\Gamma_P = P + Q_1 + \cdots+ Q_{\gamma-1}$, let us consider the divisor obtained by adding to $H_X$ the $\gamma -1$ further points of the gonal divisor, i.e. $D = H_X + Q_1 + \cdots+ Q_{\gamma-1}$; we have $$ \deg D = \deg H_X +\gamma -1 = d+ \gamma -1. $$ Now let us consider the canonical model $X_K \subset \mP^{g-1}$; here we can apply the Geometric Riemann-Roch Theorem to all the divisors $\Gamma_P$, $H_X$ and $D$. First we obtain $$ \dim \langle \Gamma_P \rangle = \deg \Gamma_P - h^0({\mathcal O}(\Gamma_P)) = \gamma-2 $$ and $$ \dim \langle H_X \rangle = \deg H_X - h^0({\mathcal O}(H_X)) = d-r-1. $$ Consequently, since the intersection $ \langle \Gamma_P \rangle \cap \langle H_X \rangle $ contains $P$, we have $$ \dim \langle D \rangle \le \dim \langle H_X \rangle +\gamma -2 = d-r+\gamma -3. $$ Hence, again from the Geometric Riemann-Roch Theorem, we get that $$ t+1 := h^0({\mathcal O}(D)) = \deg D - \dim \langle D \rangle \ge d+\gamma -1 -(d-r+\gamma -3) = r+2. $$ Therefore there exists an integer $t \ge r+1$ such that $|D|$ is of the form $g^t_{d+\gamma -1}$. This implies that $d_t \le d+\gamma -1$. From Lemma \ref{incre}, we finally obtain that $d_{r+1} \le d_t \le d+\gamma -1$. \medskip \noindent $(ii)$ From the fact that $d_r=d$ and $(i)$, it is enough to show that $$ \frac {d} r \ge \frac {d+\gamma -1} {r+1}, $$ or, equivalently, \begin{equation} \label{tesina} d \ge r(\gamma -1). \end{equation} $(a)$ Assume $\epsilon =0$, $m = \gamma$ and $r +1 \ge \gamma$. Then, using (\ref {deggamma0}), $$ d= \gamma (r-1) +1 = r \gamma -\gamma +1 \ge r \gamma -(r+1) +1 = r(\gamma -1) $$ i.e. (\ref{tesina}), as required. \noindent $(b)$ Assume $\epsilon \ge \gamma -2 \ge 1$. Then we express the degree using (\ref {deggamma1}), obtaining $$ d= (\gamma -1)(r-1)+\epsilon +1 \ge (\gamma -1)(r-1)+\gamma -1 = r(\gamma -1) $$ so we get (\ref{tesina}), as required. \end{proof} Let us observe that the condition that $X$ is not isomorphic to a plane curve is necessary since, otherwise, the inequality $(i)$ does not hold (see Remark \ref{remarcob}) and, even assuming $(b)$, the $r$-th slope inequality fails. \begin{cor} \label{fasciabianca} Let $X$ be a $\gamma$-gonal extremal curve in $\mP^r$, where $\gamma \ge 4$. If the degree $d$ satisfies \begin{equation} \label{tradue} r(\gamma-1) \le d \le \gamma(r-1)+1 \end{equation} then the $r$-th slope inequality holds. \end{cor} \begin{proof} Let us note first that, under the assumption \eqref{tradue}, the curve $X$ cannot be isomorphic to a plane curve. If so, by Theorem \ref{acgh1}, (i), we would have $r=5$, $d=2k$ and $\gamma =k-1$ by Max Noether Theorem, contradicting \eqref{tradue}. Now observe that the assumption implies $$ r(\gamma-1) \le \gamma(r-1)+1 \quad \Rightarrow \quad r \ge \gamma -1. $$ Since $X$ is an extremal curve and $X$ is not isomorphic to a plane curve, by Corollary \ref{propc}, we have two possible cases. In the first one, $m=\gamma$, $\epsilon =0$ and $d= \gamma (r-1) +1$ from (\ref{deggamma0}). Since $r \ge \gamma - 1$, by Theorem \ref{mainthm} we obtain that the $r$-th slope inequality holds. In the second case, $m=\gamma -1$ and $d= (\gamma -1)(r-1)+\epsilon +1$ from (\ref{deggamma1}). The assumption (\ref{tradue}) yields $$ (\gamma -1)(r-1)+\epsilon +1 \ge r(\gamma-1) \quad \Rightarrow \quad \epsilon \ge \gamma -2. $$ Again by Theorem \ref{mainthm}, we obtain that the $r$-th slope inequality holds. \end{proof} The study above and, in particular, Corollary \ref{fasciabianca}, can be summarized in the following table where we shall consider only curves which are not isomorphic to plane curves. The first column concerns the increasing degree and the last one the $r$-th slope inequality. In particular, the case $d=2r$, here omitted, has been described in Remark \ref{remarcoa}. The first four lines of the table will be considered in detail in the next section. \medskip \begin{center} \begin{tabular}{|c c c c c |} \hline $d$ & $\quad \gamma \quad$ & $\quad m \quad$ & $\quad \epsilon \quad$ & \quad $r$-th slope \\ [0.8ex] \hline \hline $2r+1 \le d \le 3r-3$ \qquad & 3 & 2 & $2 \le \epsilon\le r-2$ & \quad yes (trigonal) \\ \hline $3r-2$ & 3 & 3 & 0 & \quad yes (trigonal) \\ \hline $3r-2$ & 4 & 3 & 0 & \quad $\star$ \quad \\ \hline $3r-1$ & 4 & 3 & 1 & \quad no\quad \\ \hline $3r \le d \le 4r-4$ \qquad & 4 & 3 & $2 \le \epsilon\le r-2$ & \quad yes \\ \hline $4r-3$ & 4 & 4 & 0 & \quad yes \\ \hline $4r-3$ & 5 & 4 & 0 & \quad \\ \hline $4r-2$ & 5 & 4 & 1 & \quad \\ \hline $4r-1$ & 5 & 4 & 2 & \quad \\ \hline $4r \le d \le 5r-5$ \qquad & 5 & 4 & $2 \le \epsilon\le r-2$ & \quad yes \\ \hline $5r-4$ & 5 & 5 & 0 & \quad yes \\ \hline $5r-4$ & 6 & 5 & 0 & \quad \\ \hline $5r-3$ & 6 & 5 & 1 & \quad \\ \hline $5r-2$ & 6 & 5 & 2 & \quad \\ \hline $5r-1$ & 6 & 5 & 3 & \quad \\ \hline $5r \le d \le 6r-6$ \qquad & 6 & 5 & $2 \le \epsilon\le r-2$ & \quad yes \\ \hline $6r-5$ & 6 & 6 & 0 & \quad yes \\ \hline ... & ... & ... & ... & ... \\ \hline \end{tabular} \end{center} \medskip \centerline{Table 1} \section {Violating cases} This section concerns the first four lines of the Table 1. More precisely, extremal curves of degree in the initial range are forced to have certain gonality (see Proposition \ref{nostra}). Moreover, the $r$-th slope inequality is violated by extremal curves of degree $d=3r-1$ (see Proposition \ref{altrui}). Finally, we will treat the case of curves of degree $d=3r-2$ in Proposition \ref{casor4} and Theorem \ref{caso3rm2}. \begin{prop} \label{nostra} Let $X \subset \mP^r$ (where $r \ge 3$) be an extremal curve of degree $d$ and genus $g$, not isomorphic to a plane curve and such that $$ 2r+1 \le d \le 3r -1. $$ Then: \item{i)} if $2r+1 \le d \le 3r -3$ then $X$ is trigonal ($m=2$, $\epsilon \ge 2$); \item{ii)} if $d = 3r -2$ then $X$ is either trigonal or fourgonal (where $m=3$, $\epsilon =0$); \item{iii)} if $d = 3r -1$ then $X$ is fourgonal (where $m=3$, $\epsilon =1$). \end{prop} \begin{proof} Let us recall that $d = m(r-1) + \epsilon +1$ where $m =[(d-1) / (r-1)]$. \noindent $(i)$ In this case $$ \frac{d-1}{r-1} \le \frac {3r-4}{r-1} <3 $$ hence $m=2$. So $d = 2(r-1) + \epsilon +1$ and the bound $d \ge 2r +1$ implies $\epsilon \ge 2$. Therefore we are in the case of Corollary \ref{propc} - (ii), hence $X$ admits a $g^1_3$, i.e. it is a trigonal curve. \noindent $(ii)$ In this case $$ \frac{d-1}{r-1} = \frac {3r-3}{r-1} =3 $$ hence $m=3$. Therefore $d = 3(r-1) + \epsilon +1 = 3r-2$ and, so, $\epsilon =0$. By Corollary \ref{propc} there are two possibilities: in case $(i)$ we have $\gamma = m =3$ so the curve $X$ is trigonal. \noindent Otherwise, in case $(ii)$ of Corollary \ref{propc}, we have that $\gamma = m+1 =4$ so $X$ is fourgonal. \noindent $(iii)$ In this case $$ \frac{d-1}{r-1} = \frac {3r-2}{r-1} $$ hence $m=3$. Therefore $d = 3(r-1) + \epsilon +1 = 3r-1$ and, so, $\epsilon =1$. So the situation is described by Corollary \ref{propc} - (ii) and, in particular, $X$ possesses a $g^1_4$. \end{proof} Concerning the slope inequalities, the cases described above behave as follows: as far as $X$ is trigonal, all the slope inequalities are fulfilled (see Remark \ref{rem2}). The case $d=3r-1$ is described by Lange--Martens (see \cite[Corollary 4.6]{LM}) as follows. \begin{prop} \label{altrui} For any $r \ge 2$ and any extremal curve $X$ of degree $d=3r-1$ in $\mP^r$, we have $$ \frac {d_r} r < \frac {d_{r+1} } {r+1}. $$ \end{prop} Finally, the case $d=3r-2$ is studied in the next Theorem \ref{caso3rm2}. Its proof will involve the following two results (see, respectively, \cite[Proposition 4.10 and Lemma 4.8]{LM}). \begin{prop} \label{LM3} Let $g_\delta^s$ be a very ample linear series on $X$ with $\delta \ge 3s - 1$ and $\epsilon_{\delta,s} \neq 0$. If $g > \pi(\delta, s) - m+ 2$, then $ 2\delta \le g + 3s - 2$. \end{prop} \begin{lem} \label{LM2} Let $X$ be a curve admitting a $g_\delta^s$ with $\delta \ge 2s-1 \ge 3$ such that \begin{enumerate} \item[i)] $d_{s-1} =\delta-1$, and \item[ii)] $ 2\delta \le g + 3s - 2$. \end{enumerate} \noindent Then $d_s = \delta$ and the linear series $g_\delta^s$ is complete and very ample. Moreover, if $g_{\delta'}^{s'} = |K_X - g_\delta^s|$ (hence $\delta' = 2g-2-\delta$ and $s' = g-1-\delta+s$), we have $s' \ge s$, $d_{s' +1} \ge \delta'+3$ and, so, $$ \frac {d_{s'} }{s'} < \frac {d_{{s'} +1} } {{s'} +1}. $$ \end{lem} By Proposition \ref{nostra} extremal curves in $\mP^r$ of degree $d=3r-2$ can be either trigonal or fourgonal. Since we know that trigonal curves satisfy all slope inequalities, we shall focus the fourgonal case. We shall see that for $r=4$ extremal curves of degree $3r-2=10$ satisfy all slope inequalities, while for $r \ge 5$, such curves violate the $r$-th slope inequality. \begin{prop} \label{casor4} Let $X \subset \mP^4$ be a fourgonal extremal curve of degree $10$. Then $X$ satisfies all slope inequalities. \end{prop} \begin{proof} Since $X$ is extremal, we have $g(X)= \pi(10,4)=9$. The curves of genus $g \le 13$ violating some slope inequality have been classified in \cite[Theorem 3.5 (i)]{KM}. In particular, in genus $g=9$ the only examples are the extremal curves of degree $8$ in $\mP^3$. The gonality sequence of such curves has been determined in \cite[Example 4.7]{LM}, and in particular it satisfies $d_4=11$. But in our case $X$ possesses a $g^4_{10}$, so $d_4 \le 10$. It follows that no slope inequality is violated. \end{proof} Now we turn to the case $r\ge 5$. \begin{thm} \label{caso3rm2} Let $X\subset \mP^r$ with $r \ge 5$ be a fourgonal extremal curve of degree $ d= 3r-2. $ Then $d_{r+1} = 3r +1$. In particular, $X$ violates the $r$-th slope inequality: $$ \frac{d_r }{ r} < \frac{d_{r+1}}{ {r+1}}. $$ \end{thm} \begin{proof} We note, first, that $X$ is not isomorphic to a plane curve; indeed, in this case we would have $r=5$ and $d=3r-2=13$, while such curves lie on the Veronese surface and hence their degree is even. Therefore $X$ lies on a rational normal surface. As observed in Proposition \ref{nostra}, $m_{d,r}=3$, $\epsilon_{d,r} =0$. Therefore by Theorem \ref{acgh1} and Corollary \ref{propc}, the class of $X$ on a ruled surface $R \subset \mP^r$ (of degree $r-1$) is given by \begin{equation} \label{classe di X} X \sim 4H -(r-2)L \end{equation} and the genus $g$ of $X$ turns out to be \begin{equation}\label{pidr} g= \pi (3r-2,r)= 3(r-1). \end{equation} It is not difficult to verify that the proof of Theorem \ref{mainthm}-$(i)$ applies also to our case, and since $\gamma = 4$, we have $$ d_{r+1} \le d+3 = 3r+1. $$ To prove that $d_{r+1} \ge 3r+1$ for $r\ge 5$, we claim that $X$ admits also an embedding in $\mP^{r-2}$ as an extremal curve. Keeping the notation of Remark \ref{fritto}, if $H \sim C_0 + \beta L$, then it is straightforward to see that $\beta \ge -C_0^2$ and the equality holds if and only if $R$ is a cone (see, for instance, \cite[Theorem 2.5 and Remark (b)]{R}). Since $X$ is irreducible we necessarily have $$ 0\le X \cdot C_0 = (4C_0 +(4\beta -r+2)L)\cdot C_0 = 4C_0^2 + 4\beta -r+2, $$ so that $$ \beta \ge -C_0^2+ \frac{r-2 }{ 4}> -C_0^2, $$ hence \begin{equation}\label{beta} \beta \ge -C_0^2 +1, \end{equation} in particular $R$ is not a cone. Therefore we can consider the projection of $R$ from a line $L$ of its ruling. Hence consider the divisor $H':= H -L$ on $R$; taking into account \eqref{beta} we see that the linear system $|H'|$ maps $R$ in $\mP^{r-2}$ as a degree $r-3$ rational normal surface or a degree $r-3$ rational normal cone (see, for instance, \cite[Theorem 2.5 and Remark (b)]{R}). Under such a morphism the image of $X$ has degree $$ X \cdot H'= (4H -(r-2)L) \cdot (H-L)=4H^2 -(r-2)-4=4(r-1)-r-2=3r-6. $$ As $m_{3r-6,r-2}=\left [ \frac{3r-7 }{ r-3} \right ] =3$ and $\epsilon_{3r-6,r-2}=2$, the maximal genus is in this case $$ \pi(3r-6,r-2)=3((r-3)+2)= 3(r-1)=g(X), $$ which proves the claim. The above construction provides a divisor $H' \in g^s_\delta:= g^{r-2}_{d-4}$, so we can consider the birational morphism $$ \varphi_{H'}: \; X \rightarrow Y:= \varphi_{H'}(X) \subset \mathbb P^s $$ where $Y$ is an extremal curve of genus $g = g(X)= 3r-3$, $$ \deg(Y) = \delta = d-4= 3r-6 \quad \hbox{and} \quad s = r-2. $$ Consequentely, $Y \subset \mathbb P^s$ is smooth and $g^s_\delta$ is very ample. Moreover, since $r \ge 5$ then $s \ge 3$. \noindent Furthermore, since $Y$ is an extremal curve, $\delta \ge 3s -1$ and $\epsilon_{\delta,s} =2$, we can apply both Theorem \ref{LM1} and Proposition \ref{LM3}, obtaining, respectively, that $d_{s-1} = \delta -1$ and $2 \delta \le g +3s -2$. \noindent Therefore all the assumptions of Lemma \ref{LM2} are verified and from it we obtain that $d_s = \delta$ and the Serre dual series $g_{\delta'}^{s'} = |K_X - g_\delta^s|$ of $g^s_\delta$ satisfies the following relation $$ d_{s' +1} \ge \delta'+3, $$ where $$ \delta' = 2g-2-\delta= 2(3r-3)-2 - (3r-6)=3r-2, $$ $$ s' = g-1-\delta+s = 3r-3-1 -(3r-6) + r-2=r. $$ Therefore the above inequality gives $$ d_{r +1} \ge 3r+1, $$ as required. Finally, since $d_r \le d$, we have $$ \frac{d_r} {r}\le \frac{d}{r} = \frac {3r-2}{r} < \frac{3r+1}{r+1}= \frac{d_{r+1}}{r+1}. $$ \end{proof} \section {Fourgonal extremal curves} The setting of the current section is the following: $X$ is a fourgonal extremal curve in $\mP^r$ (where $r \ge 3$) of degree $d$, genus $g$ and $m$-ratio $m$. \begin{rmk} Concerning space curves (i.e. the case $r=3$), let us consider the assumptions (a) and (b) of Theorem \ref{mainthm}-$(ii)$. Since $0 \le \epsilon \le r-2=1$, then the only possibility is case (a), where $\epsilon =0$ and $m=\gamma \le r+1 = 4$. For this reason, regarding space curves, Theorem \ref{mainthm} describes only the fourgonal case. More precisely, if $X$ is a fourgonal extremal curve of degree $d$ in $\mP^3$ then it satisfies the following conditions: \begin{itemize} \item[(i)] $d_3 = d$; \item[(ii)] $d_4\le d+3$. \item[(iii)] If, in addition, we assume $\epsilon = 0$ and $m=4$, then the $3$rd slope inequality holds, i.e. $$ \frac {d_3} 3 \ge \frac {d_4 } 4. $$ \end{itemize} Note that this situation is quite specific. Namely, formulas (\ref{deggamma0}) and (\ref {maxgengamma0}) give $d=9$ and $g=12$. So, comparing this fact with (\ref{viol3}) (i.e. the violation of the third slope inequality), which holds for $d \ge 10$, we obtain that this bound on the degree given by Kato--Martens is sharp. Moreover, the above claims fit with the description of the gonal sequence given by the same authors in \cite[Theorem 3.5]{KM}. \end{rmk} It is easy to specialize the results of Section 4 to the following results in the fourgonal case. \begin{cor} \label{mainthm4} Let $X$ be a fourgonal extremal curve of degree $d \ge 3r-1$ in $\mP^r$, where $r \ge 3$. Then $X$ satisfies the following conditions: \begin{itemize} \item[$i)$] $d_r = d$; \item[$ii)$] $d_{r+1}\le d+3$. \item[$iii)$] If, in addition, we assume that $d \ge 3r$ , then the $r$-th slope inequality holds, i.e. $$ \frac {d_r} r \ge \frac {d_{r+1} } {r+1}. $$ \end{itemize} \end{cor} \begin{proof} Note first that $X$ cannot be isomorphic to a plane curve (otherwise we would have $r=5$ and $d =2k \ge 14$, so its gonality would be $k-1\ge 6$). Therefore we can apply Theorem \ref{mainthm} and obtain $(i)$ and $(ii)$. Morever, $d \ge 3r$ and $\gamma=4$ imply that $\epsilon \ne 1$ (see Table 1). Hence either assumption $(a)$ or $(b)$ in Theorem \ref{mainthm} hold. \end{proof} Moreover, a stronger form of Corollary \ref{fasciabianca} holds. \begin{cor} Let $X$ be an extremal curve of degree $d$ in $\mP^r$, where $r \ge 3$. Then the following conditions are equivalent: \begin{itemize} \item[$i)$] the degree satisfies $3r \le d \le 4r-4$; \item[$ii)$] $X$ is fourgonal and the $r$-th slope inequality holds. \end{itemize} \end{cor} \begin{proof} Immediate, from Table 1. \end{proof} As a consequence of the results of the previous sections, we are able to determine the gonal sequence of some extremal curve in a certain interval and to show that all the slope inequalities hold there. Indeed, on one hand, in Theorem \ref{caso3rm2} we have seen that, by projecting an extremal curve lying in $\mP^r$ from a gonal divisor, we may obtain an extremal curve in $\mP^{r-2}$. On the other hand, for an extremal curve in $\mP^r$, the values of $d_{r}$ and $d_{r-1}$ are determined by Theorems \ref{LM1} and \ref{mainthm}. \begin{thm} \label{verylast} Let $$ X \sim 4(C_0 +n L) \subset \mF_n $$ be a smooth irreducible curve, where $n \ge 3$. For any integer $a$ such that \begin{equation} \label{questaea} 0\le a \le \left[ \frac{n-3}{ 2}\right], \end{equation} the gonal sequence of $X$ satisfies \begin{equation} \label{seqcp} d_{n+2a} = 4(n+a)-1, \quad d_{n+2a+1}= 4(n+a). \end{equation} \noindent Moreover, the following bound holds \begin{equation} \label{lastineq} d_{n+2\left[\frac{n-3}{ 2}\right]+2} \le 4n+4\left[\frac{n-3}{ 2}\right]+3. \end{equation} Consequentely, for any $n\le r \le n+2\left[ \frac{n-3}{ 2}\right]+1$, the $r$-th slope inequality holds. \end{thm} \begin{proof} For any integer $a$ as above, consider the divisor $H_a$ on $\mF_n$ defined by $$ H_a=C_0 +(n+a)L $$ and set $$ r_a:=n+2a+1. $$ By Proposition \ref{cuori}-(a) and (\ref{stellina}), if $a \neq 0$, then the linear system $|H_a|$ embeds $\mF_n$ isomorphically in ${\mP }^{r_a}$. \noindent If $a=0$, then $\varphi_{H_0} (\mF_n)$ is a ruled surface of degree $H_0^2 =n$ in $\mP^{n+1}$. Moreover $$ \deg(\varphi_{H_0}(C_0)) = C_0 \cdot H_0 = 0 $$ so the unisecant $C_0$ is contracted to a point. Hence $\varphi_{H_0} (\mF_n)$ is a rational normal cone and $\varphi_{H_0}$ is an isomorphism between $\mF_n \setminus C_0$ and its image. In both cases, the corresponding morphism $\varphi_{H_a}$ is an isomorphism on $X$. Namely, if $a \neq 0$ it is clear. If $a = 0$, it follows from $X \cdot C_0 = 4(C_0 + nL) \cdot C_0 =0$, so $X \subset (\mF_n \setminus C_0)$. Therefore, for any $a \ge 0$, the curve $X_a:= \varphi_{H_a}(X) \subset \mP^{r_a}$ has degree $$ \delta_a :=X \cdot H_a = 4(C_0 +nL)\cdot (C_0 +(n+a)L)=4(n+a). $$ We claim that $X_a$ is an extremal curve in $\mP^{r_a}$. In order to show this, we compute first the genus of $X$. As usual, the Adjunction formula gives $$ \begin{array}{ll} 2g-2& =(K_{\mF_n} +X) \cdot X =(-2C_0 -(2+n)L + 4 C_0+4n L) \cdot (4 C_0+4n L)=\\ &= 12 n -8.\\ \end{array} $$ Therefore we obtain \begin{equation} \label{genere di X} g=6n-3. \end{equation} Now we compute the Castelnuovo bound of the genus. Taking into account the bound (\ref{questaea}) of the integer $a$, we obtain that $$ m_{\delta_a,r_a}=\left [ \frac {4n+4a-1}{ n+2a}\right]=3, \quad \epsilon_{\delta_a,r_a}=n-2a-1 $$ and $$ 2 \le \epsilon_{\delta_a, r_a} \le n-1. $$ An immediate computation of the Castelnuovo bound given in (\ref{casbo}) shows that $$ \pi (\delta_a, r_a)= 3(n+2a+\epsilon_{\delta_a,r_a})=3(2n-1)=6n-3, $$ which coincides with the value of $g$ determined in (\ref{genere di X}), so $X_a \subset \mP^{r_a}$ is an extremal curve. In order to prove (\ref{seqcp}), note first that $\epsilon_{\delta_a,r_a} \ge 2$ implies that $n-2a \ge 3$; therefore $\delta_a \ge 3r_a $. So we can apply the cited result of Lange--Martens (see Theorem \ref{LM1}) to the extremal curve $X_a \subset \mP^{r_a}$, obtaining that $$ d_{n+2a}=\delta_a -1 = 4(n+a)-1. $$ Second, since $X_a$ is a fourgonal extremal curve and $r_a \ge3$ (again from (\ref{questaea})), we can apply also Corollary \ref{mainthm4}-$(i)$, obtaining that $$ d_{n+2a+1}=\delta_a = 4(n+a). $$ Therefore (\ref{seqcp}) is proved. As a consequence, for any $0\le a \le \left[ \frac{n-3}{ 2}\right]$, both the $(r_a-1)$-th and $r_a$-th slope inequalities are satisfied. Now consider the highest value $\overline a=\left[\frac{n-3}{ 2}\right]$. Also in this case $X_{\overline a}$ is an extremal fourgonal curve of degree $\delta_{\overline a}$ in $\mP^{r_{\overline a}}$. Hence, by Corollary \ref{mainthm4}-$(ii)$, $$ d_{n+2{\overline a}+2} \le 4(n+{\overline a})+3 $$ hence also (\ref{lastineq}) is proved. Finally, as shown before, $\delta_a \ge 3r_a $ and so Corollary \ref{mainthm4}-$(iii)$ yields the $r$-th slope inequality in the considered range of $r$. \end{proof} \begin{rmk} The gonal subsequence (\ref{seqcp}) and the bound (\ref{lastineq}) in Theorem \ref{verylast} can be explicitly written in both the following cases, according to the parity of $n$. If $n$ is even, i.e. $n = 2k$ for some $k$, then $2\left[ \frac{n-3}{ 2}\right] =2(k-2) = n-4$ so $$ d_{n}= 4n-1, \ d_{n+1}= 4n, \ d_{n+2}= 4n+3, \ d_{n+3} = 4n +4, \ \dots, \ d_{2n-4}= 6n-9, \ d_{2n-3}=6n-8 $$ and $d_{2n-2}\le 6n-5$. If $n$ is odd, i.e. $n = 2k+1$ for some $k$, then $2\left[ \frac{n-3}{ 2}\right] =2(k-1) = n-3$ so $$ d_{n}= 4n-1, \ d_{n+1}= 4n, \ d_{n+2}= 4n+3, \ d_{n+3} = 4n +4, \ \dots, \ d_{2n-3}= 6n-7, \ d_{2n-2}=6n-6 $$ and $d_{2n-1}\le 6n-3$. \end{rmk} Theorem \ref{verylast} shows that, as far as $n \ge 5$, there are classes of extremal curves in $\mP^n$ which gonal sequence does not follow the pattern given in \cite[Proposition 4.2]{KM}, for extremal curves in $\mP^3$. Moreover, using the results in \cite{BS2} and \cite{BeS}, it is possible to verify that the curves of Theorem \ref{verylast} are very special curves in the fourgonal moduli space. Therefore the problem of describing the locus of curves violating the slope inequalities is intriguing and deserves further investigations. \begin{thebibliography}{Muk04} \bibitem{ACGH} Arbarello, E.; Cornalba, M.; Griffiths, P. A.; Harris, J.: \emph{Geometry of algebraic curves. Vol. I.}, Grundlehren der Mathematischen Wissenschaften, {\bf 267}, Springer-Verlag, New York (1985), xvi + 386 pp. \bibitem{Ba1} Ballico, E.: \emph {On the gonality sequence of smooth curves}, Arch. Math. (Basel), 99, n. 1 (2012), 25--31. \bibitem{Ba2} Ballico, E.: \emph{On the gonality sequence of smooth curves: normalizations of singular curves in a quadric surface}, Acta Math. Vietnam., 38, n. 4 (2013), 563--573. \bibitem{BeS} Beorchia, V.; Sacchiero, G.: \emph{Weierstrass jump sequences and gonality}, Semigroup Forum 92 (2016), no. 3, 598--632. \bibitem{BS} {Brundu, M.; Sacchiero, G.}: \emph{On the varieties parametrizing trigonal curves with assigned Weierstrass points}, Comm. Algebra, 26(1998), n.10, 3291--3312. \bibitem{BS2} {Brundu, M.; Sacchiero, G.}: \emph{Stratification of the moduli space of four-gonal curves}, Proc. Edinb. Math. Soc. (2) 57 (2014), no. 3, 631--686. \bibitem{Cili} {Ciliberto, C.}: \emph{Alcune applicazioni di un classico procedimento di Castelnuovo}, Sem. di Geom., Dipart. di Matem., Univ. di Bologna (1982-1983), 17--43. \bibitem{Pan} {Cools, F.; D'Adderio, M.; Jensen, D.; Panizzut, M.}: \emph{Brill-Noether theory of curves on $\mP^1 \times \mP^1$: tropical and classical approaches}, Algebr. Comb. 2 (2019), no. 3, 323--341. \bibitem{GH} {Griffiths, Ph.; Harris, J.}: \emph {Principles of algebraic geometry}, Pure and Applied Mathematics, Wiley-Interscience [John Wiley \& Sons], New York, (1978), {xii+813} pp. \bibitem{H} {Hartshorne, R.}: \emph {Algebraic Geometry}, {Graduate Texts in Mathematics, No. 52}, {Springer-Verlag, New York-Heidelberg}, (1977). \bibitem{KM} Kato, T.; Martens, G.: \emph {Algebraic curves violating the slope inequalities}, Osaka J. Math. 52 (2015), 423--437. \bibitem{LM} Lange, H.; Martens,G.: \emph{On the gonality sequence of an algebraic curve}, Manuscripta Math. 137 (2012), no. 3-4, 457--473. \bibitem{LN} Lange, H.; Newstead, P.; \emph{Clifford indices for vector bundles on curves}, Affine flag manifolds and principal bundles, Trends Math., Birkh\"auser/Springer Basel AG, Basel (2010),165--202. \bibitem{M} Martens, G.: \emph{The gonality of curves on a {H}irzebruch surface}, Arch. Math. (Basel), 67, n. 4, (1996), {349--352}. \bibitem{R} Reid, M.: \emph{ Chapters on algebraic surfaces}, Complex algebraic geometry (Park City, UT, 1993), IAS/Park City Math. Ser., 3, AMS Providence, (1997), 3--159. \end{thebibliography} \end{document}
2205.13309v1
http://arxiv.org/abs/2205.13309v1
Sizing the White Whale
\NeedsTeXFormat{LaTeX2e}[1994/12/01]\documentclass{ijmart-mod} \chardef\bslash=`\\ \newcommand{\ntt}{\normalfont\ttfamily} \newcommand{\cn}[1]{{\protect\ntt\bslash#1}} \newcommand{\pkg}[1]{{\protect\ntt#1}} \newcommand{\fn}[1]{{\protect\ntt#1}} \newcommand{\env}[1]{{\protect\ntt#1}} \hfuzz1pc \usepackage{graphicx} \usepackage[breaklinks=true]{hyperref} \usepackage{hypcap} \usepackage{mathtools} \usepackage{xcolor} \usepackage[ruled,linesnumbered]{algorithm2e} \usepackage{multirow} \usepackage{array} \newtheorem{thm}{Theorem}[section] \newtheorem{cor}[thm]{Corollary} \newtheorem{lem}[thm]{Lemma} \newtheorem{prop}[thm]{Proposition} \newtheorem{ax}{Axiom} \theoremstyle{definition} \newtheorem{defn}[thm]{Definition} \newtheorem{rem}[thm]{Remark} \newtheorem{qtn}[thm]{Question} \theoremstyle{remark} \newtheorem{step}{Step} \newcommand{\thmref}[1]{Theorem~\ref{#1}} \newcommand{\secref}[1]{\S\ref{#1}} \newcommand{\lemref}[1]{Lemma~\ref{#1}} \newcommand{\interval}[1]{\mathinner{#1}} \newcommand{\eval}[2][\right]{\relax #2#1\rvert} \newcommand{\envert}[1]{\left\lvert#1\right\rvert} \let\abs=\envert \newcommand{\enVert}[1]{\left\lVert#1\right\rVert} \let\norm=\enVert \begin{document} \title{Sizing the White Whale} \author{Antoine Deza} \address{McMaster University, Hamilton, Ontario, Canada} \email{[email protected]} \author{Mingfei Hao} \address{McMaster University, Hamilton, Ontario, Canada} \email{[email protected]} \author{Lionel Pournin} \address{Universit{\'e} Paris 13, Villetaneuse, France} \email{[email protected]} \begin{abstract} We propose a computational, convex hull free framework that takes advantage of the combinatorial structure of a zonotope, as for example its symmetry group, to orbitwise generate all canonical representatives of its vertices. We illustrate the proposed framework by generating all the 1\,955\,230\,985\,997\,140 vertices of the $9$\nobreakdash-dimensional \emph{White Whale}. We also compute the number of edges of this zonotope up to dimension $9$ and exhibit a family of vertices whose degree is exponential in the dimension. The White Whale is the Minkowski sum of all the $2^d-1$ non-zero $0/1$\nobreakdash-valued $d$\nobreakdash-dimensional vectors. The central hyperplane arrangement dual to the White Whale, made up of the hyperplanes normal to these vectors, is called the {\em resonance arrangement} and has been studied in various contexts including algebraic geometry, mathematical physics, economics, psychometrics, and representation theory. \end{abstract} \maketitle \section{Introduction}\label{CZ.sec.introduction} Given a set $G=\{g^1,g^2,\ldots,g^m\}$ of non-zero vectors from $\mathbb{R}^d$, a zonotope $H_G$ can be defined as the convex hull of all the $2^m$ subsums of the vectors in $G$. Equivalently, $H_G$ is the Minkowski sum of the line segments between the origin of $\mathbb{R}^d$ and the extremity of a vector from $G$: $$ H_G=\mathrm{conv}\left\{\sum_{j=1}^{m}\varepsilon_j g^j : \varepsilon_j\in\{0,1\}\right\}\!\mbox{.} $$ Hereafter, the vectors contained in $G$ are referred to as the generators of $H_G$. The associated hyperplane arrangement $\mathcal{A}_{G}$ is made up of the hyperplanes $$ H^j=\{x\in\mathbb{R}^d : x^Tg^j =0\} $$ through the origin of $\mathbb{R}^d$ and orthogonal to a vector in $G$. The chambers, or regions, of $\mathcal{A}_{G}$ are the connected components of the complement in $\mathbb{R}^d$ of the union of the hyperplanes in $\mathcal{A}_{G}$. By the duality between zonotopes and hyperplane arrangements, the vertices of $H_G$ and the chambers of $\mathcal{A}_G$ are in one-to-one correspondence. The characteristic polynomial $\chi(\mathcal{A}_{G};t)$ of $\mathcal{A}_{G}$ is defined as $$\chi(\mathcal{A}_{G};t)= b_0(\mathcal{A}_{G})t^d-b_1(\mathcal{A}_{G})t^{d-1}+b_2(\mathcal{A}_{G})t^{d-2}\dots(-1)^d b_d(\mathcal{A}_{G}).$$ where the coefficients $b_i(\mathcal{A}_{G})$ are called the Betti numbers with $b_0(\mathcal{A}_{G})=1$ and $b_1(\mathcal{A}_{G})=m$~\cite{Stanley2012}. The number of chambers of $\mathcal{A}_{G}$, and thus the number of vertices of $H_G$, is equal to $b_0(\mathcal{A}_{G})+b_1(\mathcal{A}_{G})+\dots+b_d(\mathcal{A}_{G})$.\\ We propose a computational framework that goes beyond counting the vertices of $H_G$ as it explicitly generates all of these vertices. Since a zonotope is also a polytope, this can theoretically be achieved from a convex-hull computation. This kind of computation can be performed in a more efficient way by exploiting the potentially large symmetry group of $H_G$. Instead of generating all of the vertices of $H_G$, our framework restricts to generating one canonical representative in the orbit of each vertex under the action of that group. The whole vertex set of $H_G$ can then be recovered by letting the symmetry group of $H_G$ act on these representatives. Minkowski sum computations can be performed via recursive convex hulls by adding the generators one by one. We refer to~\cite{AvisBremnerSeidel1997,AvisFukuda1992,AvisJordan2018,DezaPournin2022,Fukuda2015,GawrilowJoswig2000} and references therein for more details about convex hull computations, orbitwise enumeration algorithms, and Minkowski sum computations. While a number of practical algorithms have been developed, this kind of task is highly computationally expensive. For this reason, our framework is convex hull free. It also exploits the combinatorial properties of Minkowski sums, and involves a linear optimization oracle whose complexity is polynomial in the number $m$ of generators. We establish additional combinatorial properties of a highly structured zonotope---the White Whale~\cite{Billera2019}---that allow for a significant reduction of the number of such linear optimization oracle calls, and thus to perform the orbitwise generation of all the 1 955 230 985 997 140 vertices of the $9$-dimensional White Whale. This zonotope appears in a number of contexts as for example algebraic geometry, mathematical physics, economics, psychometrics, and representation theory~\cite{Kuhne2021,ChromanSinghal2021,Evans1995,GutekunstMeszarosPetersen2019,KamiyaTakemuraTerao2011,Kuhne2020,vanEijck1995,Wang2013} and is a special case of the \emph{primitive zonotopes}, a family of zonotopes originally considered in relation with the question of how large the diameter of a lattice polytope can be \cite{DezaManoussakisOnn2018}. We refer to Fukuda~\cite{Fukuda2015}, Gr\"unbaum~\cite{Grunbaum2003}, and Ziegler~\cite{Ziegler1995} for polytopes and, in particular, zonotopes. In Section~\ref{sec:zonotope}, we present two algorithms that exploit the combinatorial structure of a zonotope to compute its vertices. In Section~\ref{sec:whitewhale}, we give several additional properties of the White Whale that allows for an improved version of these algorithms, making it possible to orbitwise generate the vertices of the $9$\nobreakdash-dimensional White Whale. We then explain in Section~\ref{edge-gen} how the number of edges of the White Whale can be recovered from the list of its vertices, and provide these numbers up to dimension $9$. Finally, we study the degrees of its vertices in Section~\ref{sec:degree} and, in particular, we determine the degree in all dimensions of a particular family of vertices, which shows that the degree of some of the vertices of the White Whale is exponential in the dimension. \section{Generating the vertices of a zonotope}\label{sec:zonotope} By its combinatorial structure, linear optimization over a zonotope is polynomial in the number $m$ of its generators. In particular, checking whether a point $p$, given as the sum of a subset $S$ of the generators of $H_G$, is a vertex of $H_G$ is equivalent to checking whether the following system of $m$ inequalities is feasible, which amounts to solving a linear optimization problem. $$ (LO_{S,G})\left\{ \begin{array}{rcl} c^Tg^j\geq1 & \mbox{ for all } & g^j\in S\mbox{,}\\ c^Tg^j\leq-1 & \mbox{ for all } & g^j\in G\mathord{\setminus}S\mbox{.} \end{array} \right. $$ Note that we can assume without loss of generality that no two generators of $H_G$ are collinear. In the sequel, we denote by $p(S)$ the sum of the vectors contained in a subset $S$ of $G$, with the convention that $p(\emptyset)$ is the origin of $\mathbb{R}^d$. Observe that for every vertex $v$ of $H_G$ there is a unique subset $S$ of $G$ such that $v$ is equal to $p(S)$. If $(LO_{S,G})$ is feasible; that is, if there exists a vector $c$ satisfying the above system of $m$ inequalities, then $p(S)$ is the unique point that maximizes $c^T x$ when $x$ ranges within $H_G$. A brute-force linear optimization based approach would essentially consist in calling the oracle $(LO_{S,G})$ on each of the $2^m$ subsets $S$ of $G$. Since any edge of a zonotope is, up to translation, the line segment between the origin and an element of $G$, for any vertex $v=p(S)$ of $H_G$ with $S\neq\emptyset$ there exists a generator $g^i$ in $S$ such that $v$ and $p(S\mathord{\setminus}\{g^i\})$ are the vertices of an edge of $H_G$. Consequently, the brute-force approach can be enhanced by considering the following layered formulation, that results in Algorithm~\ref{LOG}. Consider the layer $\mathcal{L}_k(G)$ made up of the vertices of $H_G$ obtained as the sum of exactly $k$ of its generators. By a slight abuse of notation, we identify from now on a subset $S$ of $G$ such that $p(S)$ is a vertex of $H_G$ with the vertex itself. Recall that two different subsets of $G$ cannot sum to a same vertex of $H_G$. By this identification, $\mathcal{L}_k(G)$ can be written as follows: $$ \mathcal{L}_k(G)=\{S\subseteq G \mbox{ such that } |S|=k \mbox{ and } p(S) \mbox{ is a vertex of } H_G \}\mbox{.} $$ Assuming that $\mathcal{L}_k(G)$ is known, one can consider for each $S$ in $\mathcal{L}_k(G)$ the $m-k$ points $p(S)+g^j$ for $g^j\in G\backslash S$. Calling $(LO_{S,G})$ on all such points $p(S)+g^j$ allows for the determination of all the vertices of $H_G$ that are equal to a subsum of exactly $k+1$ elements of $G$. That recursive layered approach allows for a significant speedup as the number of vertices equal to a subsum of exactly $k$ elements of $G$ is in practice much smaller that the upper bound of $$ {m\choose{k}} $$ and the number of $(LO_{S,G})$ calls is in practice much smaller than $$ 2^m=\sum_{k=0}^m{m\choose{k}}\!\mbox{.} $$ In order to compute the layer $\mathcal{L}_{k+1}(G)$, one only needs knowledge of the previous layer $\mathcal{L}_k(G)$. In particular, the memory required by the algorithm is limited to the storage of only two consecutive layers. In Line 10 of Algorithm~\ref{LOG}, the layer $\mathcal{L}_{k+1}(G)$ that has just been computed is stored. At the same time, the layer $\mathcal{L}_k(G)$ can be removed from the memory. \begin{algorithm}[t]\label{LOG} \KwIn{the set $G$ of all the $m=|G|$ generators of $H_G$} $\mathcal{L}_0(G)\leftarrow \emptyset$ \For{$k=0,\dots,m-1$}{ \For{each $S\in\mathcal{L}_k(G)$}{ \For{each $g^j\in G\backslash S$}{ \If{$(LO_{S\cup \{ g^j\},G})$ is feasible}{ $\mathcal{L}_{k+1}(G)\leftarrow \mathcal{L}_{k+1}(G) \cup \{S\cup \{ g^j \}\}$ } } } Save $\mathcal{L}_{k+1}(G)$ } \caption{Layered optimization-based vertex generation} \end{algorithm} \begin{algorithm}[b]\label{LOOG} \KwIn{set $G$ of all the $m=|G|$ generators of $H_G$} $\widetilde{\mathcal{L}}_0(G)\leftarrow\emptyset$ \For{$k=0,\dots,\lfloor m/2 \rfloor-1$}{ $i\leftarrow0$ \For{each $S\in\widetilde{\mathcal{L}}_k(G)$}{ \For{each $g^j\in G\backslash S$}{ \If{$(O_{S\cup\{ g^j\},G})$ returns {\sc true}}{ \If{$(LO_{S\cup \{ g^j\},G})$ is feasible}{ $S_{k+1}^i\leftarrow${\em canonical representative of} $S\cup \{ g^j \}$ \If{$S_{k+1}^i$ does not belong to $\widetilde{\mathcal{L}}_{k+1}(G)$}{ $\widetilde{\mathcal{L}}_{k+1}(G)\leftarrow\widetilde{\mathcal{L}}_{k+1}(G)\cup \{S_{k+1}^i\}$ $i\leftarrow{i+1}$ } } } } } Save $\widetilde{\mathcal{L}}_{k+1}(G)$ } \caption{Layered optimization-based orbitwise vertex generation} \end{algorithm} It should be noted that Algorithm~\ref{LOG} is a layered version of an algorithm given in \cite{DezaPournin2022}. It can be significantly improved into Algorithm~\ref{LOOG} by exploiting the structural properties of a zonotope $H_G$ as follows. \begin{rem} Consider a zonotope $H_G$ with $m=|G|$ generators. \begin{itemize} \item[$(i)$] $H_G$ is centrally symmetric with respect to the point $$ \sigma=\frac{1}{2}p(G)\mbox{.} $$ The point $p(S)$ is a vertex of $H_G$ if and only if $p(G\backslash S)$ is a vertex of $H_G$. Thus, when considering an orbitwise generation of the vertices of $H_G$, we can assume without loss of generality that $|S|\leq \lfloor m/2 \rfloor$. \item[$(ii)$] Assuming that $G$ is invariant under the action of a linear transformation group, as for example coordinate permutations, an orbitwise generation can be performed by replacing $\mathcal{L}_k(G)$ with the set $\widetilde{\mathcal{L}}_k(G)$ of all canonical representatives of the points from $\mathcal{L}_k(G)$. For coordinate permutations, $\widetilde{\mathcal{L}}_k(G)$ is the set of all the vertices of $\mathcal{L}_k(G)$ such that $$ p_i(S)\leq p_{i+1}(S) $$ for all integers $i$ satisfying $1\leq{i}<d$. \item[$(iii)$] Assuming that an oracle $(O_{S,G})$ certifying that $p(S)$ is not a vertex is available and computationally more efficient than $(LO_{S,G})$, we can further speed the algorithm up by calling $(O_{S,G})$ before calling $(LO_{S,G})$. Typically, $(O_{S,G})$ is a heuristic that returns {\sc false} if $(O_{S,G})$ is able to show that theres exists a subset $T$ of $G$ distinct from $S$ such that $p(S)=p(T)$. Thus, $p(S)$ admits two distinct decompositions into a subsum of $G$ and therefore, it cannot be a vertex of $H_G$. If that oracle is able to detect most of the subsums of generators of $H_G$ that do not form a vertex of $H_G$, this results in a significant speedup. \end{itemize} \end{rem} Observe that, in Line 7 of Algorithm~\ref{LOOG}, the subset $S^i_{k+1}$ of $G$ added into $\widetilde{\mathcal{L}}_{k+1}(G)$, should be the one such that $p(S^i_{k+1})$ is the canonical representative in the orbit of $p(S\cup\{g^j\})$ under the action of the chosen group. As was the case with Algorithm~\ref{LOG}, only two consecutive layers need to be kept in the memory by Algorithm~\ref{LOOG}. For instance, layer $\widetilde{\mathcal{L}}_k(G)$ can be deleted from memory in Line 17. As we shall see in Section~\ref{edge-gen}, that layered optimization-based vertex generation of $H_G$ also allows for the determination of all the edges of $H_G$. \section{Generating the vertices of the White Whale}\label{sec:whitewhale} We first recall a few results concerning the White Whale. Using the notations of~\cite{DezaManoussakisOnn2018,DezaPourninRakotonarivo2021}, the White Whale is the primitive zonotope $H_{\infty}^+(d,1)$ defined as the Minkowski sum of the $2^d-1$ non-zero $0/1$-valued $d$-dimensional vectors. Let us denote by $a(d)$ the number of vertices of $H_{\infty}^+(d,1)$. For example $H_{\infty}^+(3,1)$ is the zonotope with $a(3)=32$ vertices shown in Figure~\ref{Fig_H3}. Its seven generators are the vectors $(1,0,0)$, $(0,1,0)$, $(0,0,1)$, $(0,1,1)$, $(1,0,1),(1,1,0)$, and $(1,1,1)$. The central arrangement associated to $H_{\infty}^+(d,1)$, the $d$-dimensional resonance arrangement is denoted by $\mathcal{R}_d$, see~\cite{GutekunstMeszarosPetersen2019} and references therein. \begin{figure}[b] \begin{centering} \includegraphics[scale=1]{Hinftyd1-} \caption{The 3-dimensional White Whale $H_{\infty}^+(3,1)$.}\label{Fig_H3} \end{centering} \end{figure} \begin{prop}\label{combi} The White Whale $H_{\infty}^+(d,1)$ has the following properties. \begin{itemize} \item[$(i)$] $H_{\infty}^+(d,1)$ is invariant under the symmetries of $\mathbb{R}^d$ that consist in permuting coordinates, see \cite{DezaManoussakisOnn2018}. \item[$(ii)$] $H_{\infty}^+(d,1)$ is contained in the hypercube $ [0,2^{d-1}]^d$ and the intersection of $H_{\infty}^+(d,1)$ with any facet of that hypercube coincides, up to translation and rotation with $H_{\infty}^+(d-1,1)$, see \cite{DezaManoussakisOnn2018}. \item[$(iii)$] The number of vertices $a(d)$ of $H_{\infty}^+(d,1)$ is an even multiple of $d+1$, and satisfies (see \cite{DezaPourninRakotonarivo2021,GutekunstMeszarosPetersen2019,Wang2013}) $$ \frac{d+1}{2^{d+1}}2^{d^2(1-10/\ln d)}\leq a(d)\leq \frac{d+4}{2^{3(d-1)}}2^{d^2}\mbox{.} $$ \end{itemize} \end{prop} In view of assertion $(i)$ in the statement of Proposition~\ref{combi}, we call a vertex $v$ of $H_{\infty}^+(d,1)$ \emph{canonical} when $v_i\leq v_{i+1}$ for $1\leq{i}\leq{d-1}$. The values of $a(d)$ have been determined up to $d=9$ as recorded in sequence A034997 of the On-Line Encyclopedia of Integer Sequences~\cite{OEIS}. We report these values in Table~\ref{Table_a(d)} along with the references where they are obtained. The authors of the references where $a(d)$ is determined via the characteristic polynomial of $\mathcal{A}_{G}$; that is by counting, are indicated using {\sc capital letters}. \begin{rem} By Proposition~\ref{combi}, $a(d)$ is even and a multiple of $d+1$. Interestingly, when $d$ is equal to $5$, we obtain from Table~\ref{Table_a(d)} that $$ \frac{a(d)}{2(d+1)}=941\mbox{,} $$ which is a prime number. \end{rem} If the aim is to count but not to generate the vertices of $H_{\infty}^+(d,1)$, the approach proposed by Kamiya, Takemura, and Terao~\cite{KamiyaTakemuraTerao2011} can be applied. It was enhanced by Chroman and Singhal \cite{ChromanSinghal2021} who determined the characteristic polynomial of the $9$-dimensional resonance arrangement $\mathcal{R}_9$. In addition, a formula for Betti numbers $b_2(\mathcal{R}_d)$ and $b_3(\mathcal{R}_d)$ has been given by K{\"u}hne~\cite{Kuhne2020}, and a formula for $b_4(\mathcal{R}_d)$ by Chroman and Singhal~\cite{ChromanSinghal2021}. Pursuing the characteristic polynomial approach, Brysiewicz, Eble, and K{\"u}hne~\cite{Kuhne2021} computed the Betti numbers for a number of hyperplane arrangements with large symmetry groups and, independently and concurrently confirmed the value of $a(9)$. \begin{table}[t] $$ \begin{array}{c|c|c} d & a(d) & \mbox{References} \\ \hline 2 & 6 & \mbox{{Evans}~\cite{Evans1995} (1995)} \\ 3 & 32 & \mbox{{Evans}~\cite{Evans1995} (1995)} \\ 4 & 370 & \mbox{{Evans}~\cite{Evans1995} (1995), {van Eijck}~\cite{vanEijck1995} (1995)} \\ 5 & 11\,292 & \mbox{{Evans}~\cite{Evans1995} (1995), {van Eijck}~\cite{vanEijck1995} (1995)} \\ 6 & 1\,066\,044 & \mbox{{Evans}~\cite{Evans1995} (1995), {van Eijck}~\cite{vanEijck1995} (1995)} \\ 7 & 347\,326\,352 & \mbox{{van Eijck}~\cite{vanEijck1995} (1995), {\sc Kamiya et al.}~\cite{KamiyaTakemuraTerao2011} (2011)} \\ 8 & 419\,172\,756\,930 & \mbox{{Evans}~\cite{OEIS} (2011)} \\ 9 & 1\,955\,230\,985\,997\,140 & \mbox{{\sc Brysiewicz, Eble, and K{\"u}hne}~\cite{Kuhne2021} (2021)},\\ & & \mbox{{\sc Chroman and Singhal}~\cite{ChromanSinghal2021} (2021)} \\ \end{array} $$ \caption{Generating and {\sc counting} the vertices of the White Whale.}\label{Table_a(d)} \end{table} From now on, we denote by $G_d$ the set of the $2^d-1$ generators of $H_{\infty}^+(d,1)$. Throughout the article, we will illustrate the proposed methods using the following family of vertices. When $1\leq{k}\leq{d-1}$, denote by $U_d^k$ the set of all the $0/1$-valued $d$-dimensional vectors whose last coordinate is equal to $1$ and that admit at most $k$ non-zero coordinates. For example, when $k=2$, $$ U_d^2= \left\{ \left[ \begin{array}{c} 1\\ 0\\ 0\\ \vdots\\ 0\\ 1\\ \end{array}\right]\!\mbox{, } \left[ \begin{array}{c} 0\\ 1\\ 0\\ \vdots\\ 0\\ 1\\ \end{array}\right]\!\mbox{, }\ldots\mbox{, } \left[ \begin{array}{c} 0\\ 0\\ \vdots\\ 0\\ 1\\ 1\\ \end{array}\right]\!\mbox{, } \left[ \begin{array}{c} 0\\ 0\\ 0\\ \vdots\\ 0\\ 1\\ \end{array}\right] \right\}\!\mbox{,} $$ and $p(U_d^2)$ is equal to $(1,\dots,1,d)$. In general, $$ p(U_d^k)=\left(\sum_{i=0}^{k-2}{d-2 \choose i},\dots,\sum_{i=0}^{k-2}{d-2 \choose i},\sum_{i=0}^{k-1}{d-1 \choose i}\right)\mbox{.} $$ Proposition~\ref{sommet} illustrates how $(LO_{S,G_d})$ can be used to identify the vertices of the White Whale in any dimension in the special case of $p(U_d^k)$. \begin{prop}\label{sommet} The point $p(U_d^k)$ is a canonical vertex of $H_\infty^+(d,1)$. \end{prop} \begin{proof} As the coordinates of $p(U_d^k)$ are nondecreasing, if this point is a vertex of $H_\infty^+(d,1)$, it must be canonical. We consider the $d$-dimensional vector $$ c=(-2,\dots,-2,2k-1) $$ and use $(LO_{S,G_d})$ with $S=U_d^k)$ to show that $p(U_d^k)$ is indeed a vertex of $H_\infty^+(d,1)$. If $g$ is a vector in $U_d^k$, then $c^Tg\geq1$. Now if $g$ belongs to $G_d\mathord{\setminus}U_d^k$, then either $g_d=0$ or at least $k$ of its $d-1$ first coordinates are non-zero. In the former case, $c^Tg\leq-2$ because $g$ has at least one non-zero coordinate. In the latter case, $$ c_1g_1+\dots+c_{d-1}g_{d-1}\leq-2 $$ and $c_dg_d=2k-1$. Hence $c^Tg\leq-1$ and the result follows. \end{proof} Observe that the last coordinate of $p(U_d^k)$ is precisely the number $l$ of elements of $U_d^k$ and thus $p(U_d^k)$ belongs to $\widetilde{\mathcal{L}}_l(G)$. Using a similar approach as in Proposition~\ref{sommet}, one can obtain other families of canonical vertices of the White Whale. For instance, according to Proposition~\ref{sommets}, the sum of the generators belonging to the subset $W_d^k$ of $G_d$ made up of the $2^k-1$ vectors whose first $d-k$ coordinates are equal to zero is a vertex of $H_\infty^+(d,1)$. \begin{prop}\label{sommets} $\:$ \begin{itemize} \item[$(i)$] The point $p(W_d^k)=(0,\dots,0,2^{k-1},\dots,2^{k-1})$ whose first $d-k$ coordinates are equal to $0$ and whose last $k$ coordinates are equal to $2^{k-1}$ is a canonical vertex of $H_\infty^+(d,1)$ that belongs to $\widetilde{\mathcal{L}}_{2^{k}-1}(G_d)$. \item[$(ii)$] The only non-zero $0/1$-valued canonical vertex of $H_\infty^+(d,1)$ is $(0,\dots,0,1)$ and therefore, $\widetilde{\mathcal{L}}_{1}(G_d)=\{(0,\dots,0,1)\}$. \end{itemize} \end{prop} \begin{proof} In order to prove assertion $(i)$, consider the vector $c$ whose first $d-k$ coordinates are equal to $0$ and whose last $k$ coordinates are $1$. It suffices so use $(LO_{S,G_d})$ with $S=W_d^k$ to show that $p(W_d^k)$ is a vertex of $H_\infty^+(d,1)$. As the coordinates of this point are nondecreasing, it is a canonical vertex of $H_\infty^+(d,1)$. Observing that there are exactly $2^{k}-1$ vectors $g$ in $G_d$ such that $c\mathord{\cdot}g>0$ further shows that this vertex belongs to $\widetilde{\mathcal{L}}_{2^{k}-1}(G_d)$. Observe that taking $k=1$ in assertion $(i)$ proves that $(0,\dots,0,1)$ is a canonical vertex of $H_\infty^+(d,1)$. In order to prove assertion $(ii)$ recall that a vertex of $H_\infty^+(d,1)$ is the sum of a unique subset of $G_d$. However, any point from $\{0,1\}^d$ with at least two non-zero coordinates can be written as the sum of several different subsets of $G_d$ (as for instance the subset that contains the point itself, and a subset that contains several points with only one non-zero coordinate). \end{proof} Lemmas \ref{111} to \ref{edge} below, where ${\bf 1}$ denotes the generator $(1,\dots,1)$, are building blocks for an oracle that efficiently identifies that $p(S)$ is not a vertex of $H_{\infty}^+(d,1)$ for most subsets $S$ of $G_d$, by providing a computationally easy to check necessary condition for being a vertex of $H_{\infty}^+(d,1)$. \begin{lem}\label{111} Consider a subset $S$ of $G_d$ such that $p(S)$ is a vertex of $H_{\infty}^+(d,1)$. The vector ${\bf 1}$ belongs to $S$ if and only if $|S|\geq 2^{d-1}$. \begin{proof} The $2^d-2$ vectors in $G_d\backslash\{{\bf 1}\}$ can be partitioned into $2^{d-1}-1$ unordered pairs $\{g^i,\bar{g}^i\}$ such that $g^i+\bar{g}^i={\bf 1}$. Assume that ${\bf 1}$ belongs to $S$ and that, for some $i$, neither of the vectors in the pair $\{g^i,\bar{g}^i\}$ belong to $S$, then $$ p(S)= p([S\mathord{\setminus}\{{\bf 1}\}]\cup\{ g^i, \bar{g}^i\})\mbox{.} $$ Therefore, $p(S)$ admits two distinct decompositions, and thus can not be a vertex. It follows that, in addition to ${\bf 1}$, $S$ contains at least $2^{d-1}-1$ generators; that is $|S|\geq 2^{d-1}$. Since $p(S)$ is a vertex of $H_{\infty}^+(d,1)$ if and only if $p(G_d\mathord{\setminus}S)$ is a vertex of $H_{\infty}^+(d,1)$, ${\bf 1}\in S$ if and only if $|S|\geq 2^{d-1}$. \end{proof} \end{lem} \begin{lem}\label{edge111} Any edge of the $d$-dimensional White Whale that coincides, up to translation, with the line segment between the origin of $\mathbb{R}^d$ and the point ${\bf 1}$ connects a vertex that is the sum of exactly $2^{d-1}-1$ generators to a vertex that is the sum of exactly $2^{d-1}$ generators. \begin{proof} This is a direct consequence of Lemma~\ref{111}. \end{proof} \end{lem} When $k=d-1$, assertion $(i)$ of Proposition~\ref{sommets} tells that the point $$ p(W_d^{d-1})=(0,2^{d-2},\dots,2^{d-2}) $$ is a canonical vertex that belongs to $\mathcal{L}_{2^{d-1}-1}(G_d)$, which provides an illustration of Lemma~\ref{edge111} with the edge of $H_\infty^+(d,1)$ whose endpoints are $p(W_d^{d-1})$ and $p(W_d^{d-1}\cup\{ {\bf 1}\})$. For example, when $d=3$, the segment with vertices $(0,2,2)$ and $(1,3,3)$ is an edge of the $H_\infty^+(3,1)$ as shown in Figure~\ref{Fig_H3-L}. \begin{lem}\label{barg} Consider a subset $S$ of $G_d$ such that $p(S)$ is a vertex of $H_{\infty}^+(d,1)$ and a vector $g^j$ in $S$. If $|S|< 2^{d-1}$, then ${\bf 1}-g^j$ does not belong to $S$. \begin{proof} Assume that $|S|< 2^{d-1}$. By Lemma~\ref{111}, $S$ cannot contain ${\bf 1}$. Assume that both $g^j$ and ${\bf 1}-g^j$ belong to $S$. In this case, $$ p(S)= p([S\backslash \{g^j,{\bf 1}-g^j\}]\cup\{{\bf 1}\})\mbox{} $$ and $p(S)$ would admit two distinct decompositions, a contradiction. \end{proof} \end{lem} Proposition~\ref{sommet}, Lemma~\ref{sommets}, and Lemma~\ref{edge111} are illustrated in Figure~\ref{Fig_H3-L} where the vertices of $H_\infty^+(d,1)$ contained in the layer $\mathcal{L}_{k}(G_d)$ are marked $\circ$ when $k$ is even and $\bullet$ when $k$ is odd. The marks of the canonical vertices of $H_\infty^+(d,1)$ are further circled, and the edges equal, up to translation, to the line segment whose endpoints are the origin of $\mathbb{R}^d$ and the point ${\bf 1}$ are colored red. \begin{figure}[t] \begin{centering} \includegraphics[scale=1]{Hinftyd1} \caption{The partition into eight layers of the vertex set of the $3$-dimensional White Whale $H_{\infty}^+(3,1)$.}\label{Fig_H3-L} \end{centering} \end{figure} For a generator $g^j\in G_d$, let $\sigma(g^j)$ denote the {\em support} of $g^j$; that is the number of coordinates of $g^j$ that are equal to $1$. For any subset $S$ of $G_d$ and any vector $g^j$ in $G_d$, consider the following subset of $S$: $$ S\langle g^j\rangle=\{g\in{S}:\mbox{ if }g_i^j=0\mbox{, then }g_i=0\mbox{ for } 1\leq{i}\leq{d} \}\mbox{,} $$ or equivalently $$ S\langle g^j\rangle=\{g\in{S}:g_i\wedge {g_i^j}=g_i\mbox{ for } 1\leq{i}\leq{d} \}\mbox{.} $$ Lemma~\ref{edge} is a generalization of Lemma~\ref{edge111} that provides an easy to check necessary condition to be applied before calling $(LO_{S,G_d}$). \begin{lem}\label{edge} Consider a subset $S$ of $G_d$ such that $p(S)$ is a vertex of $H_{\infty}^+(d,1)$ and a vector $g^j$ contained in $G_d\mathord{\setminus}S$. If $|S\langle{g^j}\rangle|$ is not equal to $2^{\sigma(g^j)-1}-1$ then $p(S\cup\{g^j\})$ is not a vertex of $H_{\infty}^+(d,1)$. \begin{proof} The $2^{\sigma(g^j)}-2$ vectors in $G_d\langle g^j\rangle\mathord{\setminus}\{ g^j \}$ can be partitioned into $2^{\sigma(g^j)-1}-1$ unordered pairs $\{g^l,\bar{g}^l\}$ such that $g^l+\bar{g}^l=g^j$. If, for some $l$, neither of the vectors in the pair $\{g^l,\bar{g}^l\}$ belong to $S\langle{g^j}\rangle$, then $$ p(S\cup\{g^j\})= p(S\cup\{ g^l,\bar{g}^l\})\mbox{.} $$ In other words, $p(S\cup\{g^j\})$ can be obtained as the sums of two different subsets of $G_d$ and, therefore it cannot be a vertex of $H_\infty^+(d,1)$. Now assume that, for some $l$, both $g^l$ and $\bar{g}^l$ belong to $S\langle{g^j}\rangle$. Then $$ p(S)= p([S\mathord{\setminus}\{ g^l,\bar{g}^l\}]\cup\{g^{j}\})\mbox{.} $$ It follows that $p(S)$ is obtained as the sums of two different subsets of $G_d$ and cannot be a vertex of $H_\infty^+(d,1)$, a contradiction. This shows that, in order for $p(S\cup\{g^j\})$ to be a vertex of $H_\infty^+(d,1)$, it is necessary that $S\langle{g^j}\rangle$ contains exactly one vector from each of the $2^{\sigma(g^j)-1}-1$ unordered pairs $\{g^l,\bar{g}^l\}$ of vectors such that $g^l+\bar{g}^l=g^j$, as desired. \end{proof} \end{lem} Lemma~\ref{edge} immediately results in an oracle $(O_{S\cup\{g^j\},G_d})$, that returns {\sc false} when $S\langle{g^j}\rangle$ does not contain exactly $2^{\sigma (g^j)-1}-1$ vectors; that is, when the point $p(S\cup \{g^j\})$ is certified not to be a vertex of $H_\infty^+(d,1)$. Computationally, calling $(O_{S\cup\{g^j\},G_d})$ first is significantly more efficient than just calling $(LO_{S\cup\{g^j\},G_d})$ because, in practice it allows to quickly discard a large number of candidates for vertexhood. Proposition~\ref{L2} illustrates how $(O_{S\cup\{g^j\},G_d})$ can be used to identify vertices of the White Whale in any dimension. \begin{prop}\label{L2} For any $d\geq 2$, $\widetilde{\mathcal{L}}_{2}(G_d)$ is equal to $\{(0,\dots,0,1,2)\}$, or equivalently to $\{S_2^1\}$ where $S_2^1=\{(0,\dots,0,1),(0,\dots,0,1,1)\}$. \begin{proof} Consider a vertex $p(S)$ in $\widetilde{\mathcal{L}}_{k}(G_d)$ and a vector $g^j$ in $G\backslash S$. Since $S\langle{g^j}\rangle$ is a subset of $S$ and $g^j$ does not belong to $S$, the condition that $S\langle{g^j}\rangle\cup\{g^j\}$ admits exactly $2^{\sigma(g^j)-1}$ elements implies $$ 2^{\sigma(g^j)-1}\leq |S|+1\mbox{.} $$ As in addition, $S$ contains exactly $k$ elements, $$ {\sigma(g^j)}\leq 1+\lfloor\log_2(k+1)\rfloor\mbox{.} $$ Hence, taking $k=1$ yields ${\sigma(g^j)}\leq 2$. By assertion $(ii)$ in the statement of Proposition~\ref{sommets}, $\widetilde{\mathcal{L}}_{1}(G_d)=\{(0,\dots,0,1)\}$ and no other $0/1$-valued point is a vertex of $H_{\infty}^+(d,1)$ . Consequently, $g^j$ must satisfy $g^j_d=1$. Since ${\sigma(g^j)}\leq 2$, the only possible candidate for $g^j$ is, up to the relabeling of the first $d-1$ coordinates, the vector $(0,\dots,0,1,1)$. Since $(LO_{S,G_d})$ is feasible for $d=2$ and $$ S=\{(0,\dots,0,1),(0,\dots,0,1,1)\}\mbox{,} $$ we obtain $\widetilde{\mathcal{L}}_{2}(G_d)=\{(0,\dots,0,1,2)\}$ as desired. \end{proof} \end{prop} Using a similar approach as in Proposition~\ref{L2}, one obtains the first few canonical vertex layers of the White Whale. We recall that $S^i_k$ denotes the $i^{th}$ canonical vertex of the layer $\widetilde{\mathcal{L}}_{k}(G_d)$. \begin{prop}\label{Lk} The following assertions hold. \begin{itemize} \item[$(i)$] For any $d\geq 3$, $\widetilde{\mathcal{L}}_{3}(G_d)$ is equal to $\{(0,\dots,0,2,2),(0,\dots,0,1,1,3)\}$, or equivalently to $\{S^1_3,S^2_3\}$ where $$ \left\{ \begin{array}{l} S^1_3=S^1_2\cup \{(0,\dots,0,0,1,0)\}\mbox{,}\\ S^2_3=S^1_2\cup\{(0,\dots,0,1,0,1)\}\mbox{.}\\ \end{array} \right. $$ \item[$(ii)$] For any $d\geq 4$, $\widetilde{\mathcal{L}}_{4}(G_d)$ is equal to $$ \{(0,\dots,0,1,3,3),(0,\dots,0,2,2,4),(0,\dots,0,1,1,1,4)\}\mbox{,} $$ or equivalently to $\{S^1_4,S^2_4,S^3_4\}$ where $$ \left\{ \begin{array}{l} S^1_4=S^1_3\cup\{(0,\dots,0,0,1,1,1)\}\mbox{,}\\ S^2_4=S^2_3\cup\{(0,\dots,0,0,1,1,1)\}\mbox{,}\\ S^3_4=S^2_3\cup\{(0,\dots,0,1,0,0,1)\}\mbox{.}\\ \end{array} \right. $$ \end{itemize} \end{prop} Lemma~\ref{edge} allows to exploit the structure of the {White Whale in order to further enhance Algorithm~\ref{LOOG}, resulting in Algorithm~\ref{LOOGd} that can be used to efficiently generate all the canonical vertices of the White Whale. \begin{algorithm}[b]\label{LOOGd} \KwIn{the dimension $d$} $\widetilde{\mathcal{L}}_0(G)\leftarrow\emptyset$ \For{$k=0,\dots,2^{d-1}-2$}{ $i\leftarrow0$ \For{each $S\in\widetilde{\mathcal{L}}_k(G_d)$}{ \For{each $g^j\in G_d\backslash S$}{ \If{$(O_{S\cup \{g^j\},G_d})$ returns {\sc true}}{ \If{$(LO_{S\cup \{ g^j\},G_d})$ is feasible}{ $S^i_{k+1}\leftarrow$ {\em canonical representative of} $S\cup \{ g^j \}$ \If{$S^i_{k+1}$ does not belong to $\widetilde{\mathcal{L}}_{k+1}(G)$}{ $\widetilde{\mathcal{L}}_{k+1}(G)\leftarrow\widetilde{\mathcal{L}}_{k+1}(G)\cup \{S^i_{k+1}\}$ $i\leftarrow{i+1}$ } } } } } Save $\widetilde{\mathcal{L}}_{k+1}(G_d)$ } \caption{Orbitwise vertex generation for the White Whale} \end{algorithm} Note that in Line 5 of Algorithm~\ref{LOOGd}, we can restrict to only consider the vectors $g^j$ in $G_d\mathord{\setminus}S$ distinct from ${\bf 1}$ (by Lemma~\ref{111}), such that ${\bf 1}-g^j$ does not belong to $S$ (by Lemma~\ref{barg}), and such that $g^j_i\leq g^j_{i+1}$ when $p(S)_i=p(S)_{i+1}$ (by the assertion $(i)$ from Proposition~\ref{combi}). We benchmarked Algorithm~\ref{LOOGd} by generating all the canonical vertices of $H_{\infty}^+(d,1)$ till $d=9$. As an illustration, we list all the points in $\widetilde{\mathcal{L}}_{k}(G_d)$ for $0\leq{k}\leq2^{d-1}-1$ when $d=3$ in Table~\ref{a3-vertices} and when $d=4$ in Table~\ref{a4-vertices}, where $|\mathcal{O}_{p(S)}|$ denotes the size of the orbit generated by the action of the symmetry group of $H_{\infty}^+(d,1)$ on a canonical vertex $p(S)$. There are different implementations of the algorithm based on the size of the solution space. For $d=1,\dots,8$, the algorithm is directly executed on a \texttt{CPython} interpreter, which is optimized through \texttt{Cython} and accelerated by the \texttt{IBM CPLEX} optimizer. Although layers are calculated sequentially due to their geometrical positions, the vertex candidates are partitioned into bundles and dispatched to multiple processes for further CPU-bound calculations. For $d=9$, the algorithm is implemented as an \texttt{Apache Spark} pipeline. The task distribution, result collection and deduplication are managed by the underlying computation engine while the vertex-checking oracles are programmed as a map-reduce step, which is a \texttt{Python} script scheduled by \texttt{Spark} executors. The computation was run on an Ubuntu 16.04 server with a total of 72 threads $2\times$Intel\textsuperscript{\tiny\textregistered} Xeon\textsuperscript{\tiny\textregistered} Processor E5-2695 v4) and 300GB memory, and required 3 months of computational time. The output is stored on a cloud storage. \begin{table}[b] $$ \renewcommand{\arraystretch}{1.2} \begin{array}{c|c|c|c} \widetilde{\mathcal{L}}_k(G_3) & S^i_k & p(S^i_k) & |\mathcal{O}_{p(S^i_k)}|\\[0.5\smallskipamount] \hline \hline \widetilde{\mathcal{L}}_0(G_3) & S^1_0=\emptyset & (0,0,0) & 2\\ \hline \widetilde{\mathcal{L}}_1(G_3) & S^1_1=S^1_0\cup\{(0,0,1)\} & (0,0,1) & 6\\ \hline \widetilde{\mathcal{L}}_2(G_3) & S^1_2=S^1_1\cup\{(0,1,1)\} & (0,1,2) & 12\\ \hline \widetilde{\mathcal{L}}_3(G_3) & S^1_3=S^1_2\cup\{(0,1,0)\} & (0,2,2) & 6\\[-\smallskipamount] & S^2_3=S^1_2\cup\{(1,0,1)\} & (1,1,3) & 6\\ \hline \hline & & & a(3)=\sum |\mathcal{O}_{p(S^i_k)}|=32\\ \end{array} $$ \caption{Sizing the $3$-dimensional White Whale}\label{a3-vertices} \end{table} \begin{table}[t] $$ \renewcommand{\arraystretch}{1.2} \begin{array}{c|c|c|c} \widetilde{\mathcal{L}}_k(G_4) & S^i_k & p(S^i_k) & |\mathcal{O}_{p(S^i_k)}|\\[0.5\smallskipamount] \hline \hline \widetilde{\mathcal{L}}_0(G_4) & S^1_0=\emptyset & (0,0,0,0) & 2\\ \hline \widetilde{\mathcal{L}}_1(G_4) & S^1_1=S^1_0\cup\{(0,0,0,1)\} & (0,0,0,1) & 8\\ \hline \widetilde{\mathcal{L}}_2(G_4) & S^1_2=S^1_1\cup\{(0,0,1,1)\} & (0,0,1,2) & 24\\ \hline \widetilde{\mathcal{L}}_3(G_4) & S^1_3=S^1_2\cup\{(0,0,1,0)\} & (0,0,2,2) & 12\\[-\smallskipamount] & S^2_3=S^1_2\cup\{(0,1,0,1)\} & (0,1,1,3) & 24\\ \hline \widetilde{\mathcal{L}}_4(G_4) & S^1_4=S^1_3\cup\{(0,1,1,1)\} & (0,1,3,3) & 24\\[-\smallskipamount] & S^2_4=S^2_3\cup\{(0,1,1,1)\} & (0,2,2,4) & 24\\[-\smallskipamount] & S^3_4=S^2_3\cup\{(1,0,0,1)\} & (1,1,1,4) & 8\\ \hline \widetilde{\mathcal{L}}_5(G_4) & S^1_5=S^1_4\cup\{(0,1,0,1)\} & (0,2,3,4) & 48\\[-\smallskipamount] & S^2_5=S^1_4\cup\{(1,0,1,1)\} & (1,1,4,4) & 12\\[-\smallskipamount] & S^3_5=S^2_4\cup\{(1,0,0,1)\} & (1,2,2,5) & 24\\ \hline \widetilde{\mathcal{L}}_6(G_4) & S^1_6=S^1_5\cup\{(0,1,1,0)\} & (0,3,4,4) & 24\\[-\smallskipamount] & S^2_6=S^1_5\cup\{(1,0,1,1)\} & (1,2,4,5) & 48\\[-\smallskipamount] & S^3_6=S^3_5\cup\{(1,0,1,1)\} & (2,2,3,6) & 24\\ \hline \widetilde{\mathcal{L}}_7(G_4) & S^1_7=S^1_6\cup\{(0,1,0,0)\} & (0,4,4,4) & 8\\[-\smallskipamount] & S^2_7=S^1_6\cup\{(1,0,1,1)\} & (1,3,5,5) & 24\\[-\smallskipamount] & S^3_7=S^2_6\cup\{(1,0,0,1)\} & (2,2,4,6) & 24\\[-\smallskipamount] & S^4_7=S^3_6\cup\{(1,1,0,1)\} & (3,3,3,7) & 8\\ \hline \hline & & & a(4)=\sum |\mathcal{O}_{p(S^i_k)}|=370\\ \end{array} $$ \caption{Sizing the $4$-dimensional White Whale}\label{a4-vertices} \end{table} It is convenient to identify a generator $g$ with its binary representation. For example, the generator $$ g^j=(0,\dots,0,1,0,\dots,0,1) $$ is identified with the integer $2^j+1$. Likewise, the set $U_d^2$ of the generators summing up to the vertex $$ p(U_d^2)=(1,\dots,1,d) $$ that we considered in Proposition~\ref{sommet} can be identified with the set $$ \{1,3,5\dots,2^{d-2}+1,2^{d-1}+1\} $$ and the set $W_d^k$ of the generators summing up to the vertex $$ p(W_d^k)=(0,\dots,0,2^{k-1},\dots,2^{k-1}\} $$ considered in item $(i)$ of Proposition~\ref{sommets} can be identified with the set $$ \{1,2,3,\dots,2^{k}-1\}\mbox{.} $$ Since the generation of the canonical vertices of $H_{\infty}^+(8,1)$ gives the vertices of $\widetilde{\mathcal{L}}_{k}(G_d)$ up to $k=8$ for all $d$, we can slightly warm-start Algorithm~\ref{LOOGd} by beginning the computation from $\widetilde{\mathcal{L}}_{8}(G_9)$. It might be quite speculative to draw any empirical intuition based on data available only till $d=9$. However, the following pattern may hold at least for the first $d$: the algorithm reaches relatively quickly the layer $\widetilde{\mathcal{L}}_{2^{d-2}+d}(G_d)$, the last $d$ layers are also relatively easy to compute, and the bulk of the computation results from the determination of the remaining $2^{d-2}-2d$ layers. Over this range, the size of the layers grows almost linearly to reach about $4\%$ of $a(d)$ for $d=7$, $2\%$ for $d=8$, and $1\%$ for $d=9$. Assuming that the same trend continues for $d=10$, Algorithm~\ref{LOOGd} would require the determination of a layer of size $0.5\%$ of $a(10)$ which is currently intractable as the determination of the largest layer of $a(9)$ already requires between one and two days. \section{The edges of the White Whale}\label{edge-gen} Consider a subset $S$ of $G_d$ and an element $g$ of $S$. Assume that both $p(S)$ and $p(S\mathord{\setminus}\{g\})$ are vertices of $H_\infty^+(d,1)$. Since $H_\infty^+(d,1)$ is zonotope, it must then have an edge with vertices $p(S)$ and $p(S\backslash \{g\})$. In other words, any edge of $H_\infty^+(d,1)$ connects a vertex in $\mathcal{L}_{k-1}(G_d)$ to a vertex in $\mathcal{L}_{k}(G_d)$ for some $k$. As the proposed algorithms traverse the edges between two consecutive layers to generate the vertices, these algorithms can be used to generate the edges as well. However, in practice the number of edges can be significantly larger than the number of vertices and thus generating the edges of the White Whale quickly becomes intractable memory-wise. Consequently we propose an approach that, assuming that the vertices are determined by Algorithm~\ref{LOOGd}, counts the number of edges between $\mathcal{L}_{k-1}(G_d)$ and $\mathcal{L}_{k}(G_d)$ instead of generating them. The total number of edges is then obtained as a sum over $k$. Given a vertex $p(S)$ of $H_\infty^+(d,1)$ distinct from the origin $p(\emptyset)$, let $\delta^-_S$ denote the number of edges between $p(S)$ and a vertex in $\mathcal{L}_{|S|-1}(G_d)$: $$ \delta^-_S=|\{g\in{S}: p(S\backslash \{g\})\in\mathcal{L}_{|S|-1}(G_d)\}|\mbox{.} $$ We also set $\delta^-_\emptyset=0$. The quantity $\delta^-_S$ can be seen as the {\em degree from below} of $p(S)$; that is, the number of edges between $p(S)$ and a vertex in the layer immediately below the one containing $p(S)$. Consider for example $$ S=\{(0,0,1),(0,1,0),(0,1,1)\}\mbox{.} $$ In that case, $p(S)$ is equal to $(0,2,2)$ and is indeed a vertex of $H_\infty^+(3,1)$. In fact, $p(S)$ is a vertex of the hexagonal facet of $H_\infty^+(3,1)$ contained in the hyperplane of equation $x_1=0$. In particular, both $p(S\backslash \{(0,0,1)\})$ and $p(S\backslash \{(0,1,0)\})$ are vertices of $H_\infty^+(3,1)$ while $p(S\backslash \{(0,1,1)\})$ is not. Thus $\delta^-_S=2$ as illustrated in Figure~\ref{Fig_H3-L}. By Proposition~\ref{degree-}, the degree from below of a vertex $p(S)$ of $H_\infty^+(d,1)$ is always $1$ when $S$ contains exactly $2^{d-1}$ generators. \begin{prop}\label{degree-} If $S$ contains exactly $2^{d-1}$ generators and $p(S)$ is a vertex of $H_\infty^+(d,1)$, then $\delta^-_S=1$. Moreover, exactly $|\mathcal{L}_{2^{d-1}}(G_d)|$ edges of the White Whale are equal to ${\bf 1}$ up to translation. \begin{proof} By Lemma~\ref{111} the vector ${\bf 1}$ belongs to $S$. According to the same proposition, $p(S\backslash\{g\})$ is not a vertex of $H_\infty^+(d,1)$ when $g$ is an element of $S$ other than ${\bf 1}$. Thus, $\delta^-_S = 1$ and the set of edges between $\mathcal{L}_{2^{d-1}-1}(G_d)$ and $\mathcal{L}_{2^{d-1}}(G_d)$ consists of exactly $|\mathcal{L}_{2^{d-1}}(G_d)|$ edges equal, up to translation, to ${\bf 1}$, see Lemma~\ref{edge111}. As a consequence, $|\mathcal{L}_{2^{d-1}-1}(G_d)|=|\mathcal{L}_{2^{d-1}}(G_d)|$. \end{proof} \end{prop} Summing up the edges encountered while traversing all the layers of $H_\infty^+(d,1)$ yields that the number $e(d)$ of edges of the White Whale satisfies: $$ e(d) =\sum_{k=1}^{2^d-1} \sum_{p(S)\in\mathcal{L}_{k}(G_d)} \delta^-_S\mbox{.} $$ \begin{table} $$ \renewcommand{\arraystretch}{1.2} \begin{array}{c|c|c|c|c|c} \widetilde{\mathcal{L}}_k(G_3) & S^i_k & p(S^i_k) & |\mathcal{O}_{p(S^i_k)}| & \delta^-_{S^i_k} & |\mathcal{O}_{p(S^i_k)}|\delta^-_{S^i_k}\\[\smallskipamount] \hline \hline \widetilde{\mathcal{L}}_1(G_3) & S^1_1=S^1_0\cup\{(0,0,1)\} & (0,0,1) & 6 & 1 & 6\\ \hline \widetilde{\mathcal{L}}_2(G_3) & S^1_2=S^1_1\cup\{(0,1,1)\} & (0,1,2) & 12 & 1 & 12\\ \hline \widetilde{\mathcal{L}}_3(G_3) & S^1_3=S^1_2\cup\{(0,1,0)\} & (0,2,2) & 6 & 2 & 12\\[-\smallskipamount] & S^2_3=S^1_2\cup\{(1,0,1)\} & (1,1,3) & 6 & 2 & 12\\ \hline \hline & & & & & e(3)=48\\ \end{array} $$ \caption{Counting the edges of the $3$-dimensional White Whale}\label{a3-edges} \end{table} \begin{table} $$ \renewcommand{\arraystretch}{1.2} \begin{array}{c|c|c|c|c|c} \widetilde{\mathcal{L}}_k(G_4) & S^i_k & p(S^i_k) & |\mathcal{O}_{p(S^i_k)}| & \delta^-_{S^i_k} & |\mathcal{O}_{p(S^i_k)}|\delta^-_{S^i_k}\\[\smallskipamount] \hline \hline \widetilde{\mathcal{L}}_1(G_4) & S^1_1=S^1_0\cup\{(0,0,0,1)\} & (0,0,0,1) & 8 & 1 & 8\\ \hline \widetilde{\mathcal{L}}_2(G_4) & S^1_2=S^1_1\cup\{(0,0,1,1)\} & (0,0,1,2) & 24 & 1 & 24\\ \hline \widetilde{\mathcal{L}}_3(G_4) & S^1_3=S^1_2\cup\{(0,0,1,0)\} & (0,0,2,2) & 12 & 2 & 24\\[-\smallskipamount] & S^2_3=S^1_2\cup\{(0,1,0,1)\} & (0,1,1,3) & 24 & 2 & 48\\ \hline \widetilde{\mathcal{L}}_4(G_4) & S^1_4=S^1_3\cup\{(0,1,1,1)\} & (0,1,3,3) & 24 & 1 & 24\\[-\smallskipamount] & S^2_4=S^2_3\cup\{(0,1,1,1)\} & (0,2,2,4) & 24 & 1 & 24\\[-\smallskipamount] & S^3_4=S^2_3\cup\{(1,0,0,1)\} & (1,1,1,4) & 8 & 3 & 24\\ \hline \widetilde{\mathcal{L}}_5(G_4) & S^1_5=S^1_4\cup\{(0,1,0,1)\} & (0,2,3,4) & 48 & 2 & 96\\[-\smallskipamount] & S^2_5=S^1_4\cup\{(1,0,1,1)\} & (1,1,4,4) & 12 & 2 & 24\\[-\smallskipamount] & S^3_5=S^2_4\cup\{(1,0,0,1)\} & (1,2,2,5) & 24 & 2 & 48\\ \hline \widetilde{\mathcal{L}}_6(G_4) & S^1_6=S^1_5\cup\{(0,1,1,0)\} & (0,3,4,4) & 24 & 2 & 48\\[-\smallskipamount] & S^2_6=S^1_5\cup\{(1,0,1,1)\} & (1,2,4,5) & 48 & 2 & 96\\[-\smallskipamount] & S^3_6=S^3_5\cup\{(1,0,1,1)\} & (2,2,3,6) & 24 & 2 & 48\\ \hline \widetilde{\mathcal{L}}_7(G_4) & S^1_7=S^1_6\cup\{(0,1,0,0)\} & (0,4,4,4) & 8 & 3 & 24\\[-\smallskipamount] & S^2_7=S^1_6\cup\{(1,0,1,1)\} & (1,3,5,5) & 24 & 3 & 72\\[-\smallskipamount] & S^3_7=S^2_6\cup\{(1,0,0,1)\} & (2,2,4,6) & 24 & 3 & 72\\[-\smallskipamount] & S^4_7=S^3_6\cup\{(1,1,0,1)\} & (3,3,3,7) & 8 & 3 & 24\\ \hline \hline & & & & & e(4)=760\\ \end{array} $$ \caption{Counting the edges of the $4$-dimensional White Whale}\label{a4-edges} \end{table} The White Whale being centrally symmetric, the summation can be done up to $k=2^{d-1}-1$ to account for all the edges except for the $|\mathcal{L}_{2^{d-1}}(G_d)|$ edges between $\mathcal{L}_{2^{d-1}-1}(G_d)$ and $\mathcal{L}_{2^{d-1}}(G_d)$ identified in Proposition~\ref{degree-}. Further exploiting the symmetry group of $H_\infty^+(d,1)$, we obtain $$ e(d) = \left( \sum_{k=1}^{2^{d-1}-1} \sum_{p(S)\in\widetilde{\mathcal{L}}_{k}(G_d)} |\mathcal{O}_{p(S)}| \: \delta^-_S \right) + \left( \sum_{p(S)\in\widetilde{\mathcal{L}}_{2^{d-1}-1}(G_d)} \frac{|\mathcal{O}_{p(S)}|}{2} \right) $$ where $|\mathcal{O}_{p(S)}|$ denotes the size of the orbit generated by the action of the symmetry group of $H_{\infty}^+(d,1)$ on a canonical vertex $p(S)$. By this calculation, illustrated in Table~\ref{a3-edges}, the $3$-dimensional White Whale has $$ (6\times 1+12\times 1+6\times 2 +6\times 2)+\left(\frac{6}{2}+\frac{6}{2}\right)=48 $$ edges, see Figure~\ref{Fig_H3-L}. The corresponding calculation, but in the case of the $4$\nobreakdash-dimensional White Whale is illustrated in Table~\ref{a4-edges}. The values of $e(d)$ are yielded by two rounds of calculation, which are based on the output of $a(d)$ and deployed as \texttt{Spark} two sets of pipelines. The first set of pipelines are focused on the connectivity between consecutive layers, whose output is further passed to another set of pipelines to produce degree reports of each layer. The resulting number of edges are reported in Table \ref{final}. \section{The vertex degrees of the White Whale}\label{sec:degree} Similarly to the degree from below defined in Section~\ref{edge-gen}, we denote by $\delta^+_S$ the {\em degree from above} of a vertex $p(S)$ distinct from $p(G_d)$; that is, the number of edges connecting $p(S)$ to a vertex contained in the layer $\mathcal{L}_{|S|+1}(G_d)$. $$ \delta^+_S=|\{g\notin{S}: p(S\cup \{g\})\in\mathcal{L}_{|S|+1}(G_d)\}|\mbox{.} $$ In addition, we set $\delta^+_{G_d}$ to $0$. As $H_\infty^+(d,1)$ is centrally symmetric, Proposition~\ref{degree-} can be rewritten as follows. \begin{prop}\label{degree+} If a subset $S$ of $G_d$ contains exactly $2^{d-1}-1$ generators and $p(S)$ is a vertex of $H_\infty^+(d,1)$, then $\delta^+_S=1$. \end{prop} The degree $\delta_S$ of a vertex $p(S)$; that is, the number of edges of $H_\infty^+(d,1)$ incident to $p(S)$, is given by $\delta_S=\delta^-_S+\delta^+_S$. Note that $\delta_{\emptyset}$ and $\delta_{G_d}$ are both equal to $d$. For example, the $32$ vertices of $H_\infty^+(3,1)$ are all of degree $3$. in other words, $H_\infty^+(3,1)$ is a simple zonotope, see Table~\ref{a3-edges-} and Figure~\ref{Fig_H3-L}. \begin{table} $$ \renewcommand{\arraystretch}{1.2} \begin{array}{c|c|c|c|c||c} \widetilde{\mathcal{L}}_k(G_3) & S^i_k & p(S^i_k) & \delta^-_{S^i_k} & \delta^+_{S^i_k} & \delta_{S^i_k}\\[\smallskipamount] \hline \hline \widetilde{\mathcal{L}}_0(G_3) & S^1_0=\emptyset & (0,0,0) & 0 & 3 & 3\\ \hline \widetilde{\mathcal{L}}_1(G_3) & S^1_1=S^1_0\cup\{(0,0,1)\} & (0,0,1) & 1 & 2 & 3\\ \hline \widetilde{\mathcal{L}}_2(G_3) & S^1_2=S^1_1\cup\{(0,1,1)\} & (0,1,2) & 1 & 2 & 3\\ \hline \widetilde{\mathcal{L}}_3(G_3) & S^1_3=S^1_2\cup\{(0,1,0)\} & (0,2,2) & 2 & 1 & 3\\[-\smallskipamount] & S^2_3=S^1_2\cup\{(1,0,1)\} & (1,1,3) & 2 & 1 & 3\\ \end{array} $$ \caption{The vertex degrees of the $3$-dimensional White Whale}\label{a3-edges-} \end{table} \begin{table} $$ \renewcommand{\arraystretch}{1.2} \begin{array}{c|c|c|c|c||c} \widetilde{\mathcal{L}}_k(G_4) & S^i_k & p(S^i_k) & \delta^-_{S^i_k} & \delta^+_{S^i_k} & \delta_{S^i_k}\\[\smallskipamount] \hline \hline \widetilde{\mathcal{L}}_0(G_4) & S^1_0=\emptyset & (0,0,0,0) & 0 & 4 & 4\\ \hline \widetilde{\mathcal{L}}_1(G_4) & S^1_1=S^1_0\cup\{(0,0,0,1)\} & (0,0,0,1) & 1 & 3 & 4\\ \hline \widetilde{\mathcal{L}}_2(G_4) & S^1_2=S^1_1\cup\{(0,0,1,1)\} & (0,0,1,2) & 1 & 3 & 4\\ \hline \widetilde{\mathcal{L}}_3(G_4) & S^1_3=S^1_2\cup\{(0,0,1,0)\} & (0,0,2,2) & 2 & 2 & 4\\[-\smallskipamount] & S^2_3=S^1_2\cup\{(0,1,0,1)\} & (0,1,1,3) & 2 & 2 & 4\\ \hline \widetilde{\mathcal{L}}_4(G_4) & S^1_4=S^1_3\cup\{(0,1,1,1)\} & (0,1,3,3) & 1 & 3 & 4\\[-\smallskipamount] & S^2_4=S^2_3\cup\{(0,1,1,1)\} & (0,2,2,4) & 1 & 3 & 4\\[-\smallskipamount] & S^3_4=S^2_3\cup\{(1,0,0,1)\} & (1,1,1,4) & 3 & 3 & 6\\ \hline \widetilde{\mathcal{L}}_5(G_4) & S^1_5=S^1_4\cup\{(0,1,0,1)\} & (0,2,3,4) & 2 & 2 & 4\\[-\smallskipamount] & S^2_5=S^1_4\cup\{(1,0,1,1)\} & (1,1,4,4) & 2 & 4 & 6\\[-\smallskipamount] & S^3_5=S^2_4\cup\{(1,0,0,1)\} & (1,2,2,5) & 2 & 2 & 4\\ \hline \widetilde{\mathcal{L}}_6(G_4) & S^1_6=S^1_5\cup\{(0,1,1,0)\} & (0,3,4,4) & 2 & 2 & 4\\[-\smallskipamount] & S^2_6=S^1_5\cup\{(1,0,1,1)\} & (1,2,4,5) & 2 & 2 & 4\\[-\smallskipamount] & S^3_6=S^3_5\cup\{(1,0,1,1)\} & (2,2,3,6) & 2 & 2 & 4\\ \hline \widetilde{\mathcal{L}}_7(G_4) & S^1_7=S^1_6\cup\{(0,1,0,0)\} & (0,4,4,4) & 3 & 1 & 4\\[-\smallskipamount] & S^2_7=S^1_6\cup\{(1,0,1,1)\} & (1,3,5,5) & 3 & 1 & 4\\[-\smallskipamount] & S^3_7=S^2_6\cup\{(1,0,0,1)\} & (2,2,4,6) & 3 & 1 & 4\\[-\smallskipamount] & S^4_7=S^3_6\cup\{(1,1,0,1)\} & (3,3,3,7) & 3 & 1 & 4\\ \end{array} $$ \caption{The vertex degrees of the $4$-dimensional White Whale}\label{a4-edges-} \end{table} The calculation of the vertex-degrees of the $4$-dimensional White Whale is illustrated in Table~\ref{a4-edges-}. The number $o(d)$ of orbits or, equivalently the number of canonical vertices, the average vertex degree $2e(d)/a(d)$, and the average size of an orbit $a(d)/o(d)$ are all given up to dimension $9$ in Table~\ref{final}. These initial values may indicate that the average size of an orbit $a(d)/o(d)$ is a large fraction of the largest possible orbit size of $2d!$. \begin{table}[b] \makebox[\linewidth]{ $ \begin{array}{c|c|c|c|c|c} d & a(d) & e(d) & \frac{2e(d)}{a(d)} & o(d) & \frac{a(d)}{2d!o(d)} \\ \hline 2 & 6 & 6 & 2 & 2 & 75\%\\ 3 & 32 & 48 & 3 & 5 & \approx 53\%\\ 4 & 370 & 760 & \approx 4.1 & 18 & \approx 43\%\\ 5 & 11\,292 & 30\,540 & \approx 5.4 & 112 & \approx 43\%\\ 6 & 1\,066\,044 & 3\,662\,064 & \approx 6.9 & 1\:512 & \approx 49\%\\ 7 & 347\,326\,352 & 1\,463\,047\,264 & \approx 8.4 & 56\:220 & \approx 61\%\\ 8 & 419\,172\,756\,930 & 2\,105\,325\,742\,608 & \approx 10.0 & 6\:942\:047 & \approx 75\%\\ 9 & 1\,955\,230\,985\,997\,140 & 11\,463\,171\,860\,268\,180 & \approx 11.7 & 3\,140\,607\,258 & \approx 86\% \end{array} $ } \smallskip \caption{Some sizes of the White Whale.}\label{final} \end{table} \begin{rem} All the known values of $e(d)$ are multiples of $d(d+1)$ and, when $d$ is equal to $7$, we obtain from Table~\ref{final} that $$ \frac{e(d)}{4d(d+1)}= 6\,531\,461\mbox{,} $$ which is a prime number. \end{rem} Let us now turn our attention back to the vertices $p(U_d^k)$ of $H_\infty^+(d,1)$ provided by Proposition~\ref{sommet}. We can determine exactly the degree of these vertices. \begin{lem}\label{expo0} The degree of $p(U_d^k)$ from below is $\displaystyle\delta^-_{U_d^k}=\displaystyle{d-1 \choose k-1}$. \end{lem} \begin{proof} We recall that $U_d^k$ is defined when $1\leq{k}\leq{d-1}$. Let us first show that, if $g$ belongs to $U_d^k\mathord{\setminus}U_d^{k-1}$, then $p(U_d^k\mathord{\setminus}\{g\})$ is a vertex of $H_\infty^+(d,1)$. Observe that, when $k=1$, this is immediate as the origin of $\mathbb{R}^d$ is a vertex of $H_\infty^+(d,1)$. Hence we can assume that $k\geq2$. By symmetry, we can moreover assume without loss of generality that $g$ is the generator whose last $k$ coordinates are equal to $1$ and whose first $d-k$ coordinates are equal to $0$. We will use the linear optimization oracle ($LO_{S,G}$) with $S=U_d^k\mathord{\setminus}\{g\}$ and $G=G_d$. Consider the vector $c$ of $\mathbb{R}^d$ whose first $d-k$ coordinates are equal to $2-3k$, whose last coordinate is $3k^2-3k-1$, and whose remaining $d-k-1$ coordinates are $-3k$. Consider a vector $g'$ in $U_d^k\mathord{\setminus}\{g\}$. As $g'$ is distinct from $g$, either at least one of its $d-k$ first coordinates is non-zero, and $$ \sum_{i=1}^{d-1}c_ig'_i\geq (2-3k)-3k(k-2)=-3k^2+3k+2\mbox{,} $$ or at most $k-2$ of its $d-1$ first coordinates are non-zero, and $$ \sum_{i=1}^{d-1}c_ig'_i\geq -3k(k-2)=-3k^2+6k\mbox{.} $$ As $c_d=3k^2-3k-1$ and $k\geq1$, both of these inequalities imply that $c^Tg'\geq1$. Now consider a vector $g'$ in $G_d\mathord{\setminus}[U_d^k\mathord{\setminus}\{g\}]$. If $g'_d=0$, then $c^Tg'\leq-1$ because $g'$ has at least one non-zero coordinate and the first $d-1$ coordinates of $c$ are negative. If $g'_d=1$, then either $g'=g$ or at least $k$ of its $d-1$ first coordinates are non-zero. If $g'=g$, then by construction, $$ c^Tg'=-3k(k-1)+3k^2-3k-1=-1\mbox{.} $$ If at least $k$ of the $d-1$ first coordinates of $g'$ are non-zero, then $$ c^Tg'\leq(2-3k)k+3k^2-3k-1<-1\mbox{.} $$ This proves that $p(U_d^k\mathord{\setminus}\{g\})$ is a vertex of $H_\infty^+(d,1)$, as desired. We now show that, if $g$ belongs to $U_d^{k-1}$, then $p(U_d^k\mathord{\setminus}\{g\})$ is not a vertex of $H_\infty^+(d,1)$. As $U_d^k\mathord{\setminus}U_d^{k-1}$ contains exactly $$ {d-1 \choose k-1} $$ vectors, this will prove the proposition. Consider a vector $g$ from $U_d^{k-1}$. By symmetry, we can assume without loss of generality that the last $k-1$ coordinates of $g$ are equal to $1$ and that its first $d-k+1$ coordinates are equal to $0$. Denote by $g'$ the vector in $U_d^k$ whose $k$ last coordinates are equal to $1$ and by $g''$ the vector in $G_d\mathord{\setminus}U_d^k$ whose unique non-zero coordinate is $g''_{d-k+1}$. By construction, $g=g'-g''$ and as an immediate consequence, $$ p(U_d^k\mathord{\setminus}\{g\})=p([U_d^k\mathord{\setminus}\{g'\}]\cup\{g''\})\mbox{.} $$ This proves that $p(U_d^k\mathord{\setminus}\{g\})$ can be decomposed as a sum of two different subsets of $G_d$. Therefore, this point cannot be a vertex of $H_\infty^+(d,1)$. \end{proof} \begin{lem}\label{expo1} The degree of $p(U_d^k)$ from above is $\displaystyle\delta^+_{U_d^k}=\displaystyle{d-1 \choose k}$. \end{lem} \begin{proof} We recall that $U_d^k$ is defined when $1\leq{k}\leq{d-1}$. The proof proceeds as that of Lemma~\ref{expo0}. Consider a vector $g$ that belongs to $U_d^{k+1}\mathord{\setminus}U_d^k$. We show as a first step that $p(U_d^k\cup\{g\})$ is a vertex of $H_\infty^+(d,1)$ by using the oracle $(LO_{S,G})$ with $S=U_d^k\cup\{g\}$ and $G=G_d$. By symmetry, we can assume without loss of generality that the last $k+1$ coordinates of $g$ are non-zero. Consider the vector $c$ of $\mathbb{R}^d$ whose first $d-k-1$ coordinates are equal to $-2k-1$, whose last coordinate is equal to $2k^2-k+1$ and whose other $k$ coordinates are equal to $-2k+1$. Further consider a vector $g'$ in $U_d^k\cup\{g\}$. If $g'$ is equal to $g$, then by construction $$ c^Tg'=k(-2k+1)+2k^2-k+1=1\mbox{.} $$ If $g'$ is not equal to $g$, then at most $k-1$ of its first $d-1$ coordinates are non-zero. As a consequence, $$ \sum_{i=1}^{d-1}c_ig'_i\geq-(k-1)(2k+1)=-2k^2+k+1\mbox{.} $$ As $c_d=2k^2-k+1$ and $g'_d=1$, this yields $c^Tg'\geq2$. So far, we have shown that $c^Tg'\geq1$ for every $g'$ in $U_d^k\cup\{g\}$. Now let us consider a vector $g'$ in $G_d\mathord{\setminus}[U_d^k\cup\{g\}]$ and show that $c^Tg'\leq-1$. If $g'_d=0$, then $c^Tg'$ must be negative because $g'$ has at least one non-zero coordinate and the $d-1$ first coordinates of $c$ are negative. If $g'_d=1$, then $g'$ must have at least $k+1$ non-zero coordinates. As in addition $g'$ is distinct from $g$, at least one its first $d-k-1$ coordinates is equal to $1$. As a consequence, $$ \sum_{i=1}^{d-1}c_ig'_i\leq-(2k+1)-(k-1)(2k-1)=-2k^2+k-2\mbox{.} $$ Since $c_d=2k^2-k+1$ and $g'_d=1$, this yields $c^Tg'\leq-1$. According to the oracle $(LO_{S,G})$ with $S=U_d^k\cup\{g\}$ and $G=G_d$, the point $p(U_d^k\cup\{g\})$ is then necessarily a vertex of $H_\infty^+(d,1)$, as desired. Let us now show that for any vector $g$ in $G_d\mathord{\setminus}U_d^{k+1}$, the point $p(U_d^k\cup\{g\})$ is never a vertex of $H_\infty^+(d,1)$. Denote by $j$ the number of non-zero coordinates of $g$ and assume, first that $g_d=0$. By symmetry, we can further assume without loss of generality that $g_i=1$ exactly when $d-j\leq{i}\leq{d-1}$. Denote by $g'$ the vector in $G_d\mathord{\setminus}U_d^k$ such that $g'_i=1$ when $$ d-\max\{j,k\}\leq{i}\leq{d}\mbox{.} $$ By construction, $g'-g$ belongs to $U_d^k$ but $g'$ does not. Moreover, $$ p(U_d^k\cup\{g\})=p([U_d^k\mathord{\setminus}\{g'-g\}]\cup\{g'\})\mbox{.} $$ This shows that $p(U_d^k\cup\{g\})$ admits two decompositions into a sum of vectors from $G_d$ and therefore cannot be a vertex of $H_\infty^+(d,1)$. Finally, assume that $g_d=1$. In this case, $j$ is at least $k+2$. By symmetry we can further assume that last $j$ coordinates of $g$ are non-zero. Denote by $g'$ the vector in $G_d$ whose only non-zero coordinate is $c_{d-1}$ and observe that $g-g'$ does not belong to $U_d^k$ because it has at least $k+1$ non-zero coordinates. Moreover, $g'$ does not belong to $U_d^k\cup\{g\}$ either, and $$ p(U_d^k\cup\{g\})=p(U_d^k\cup\{g-g',g'\})\mbox{.} $$ As above, this shows that $p(U_d^k\cup\{g\})$ admits two decompositions into a sum of vectors from $G_d$. Therefore, it cannot be a vertex of $H_\infty^+(d,1)$. As there are exactly $$ {d-1 \choose k} $$ vectors in $U_d^{k+1}\mathord{\setminus}U_d^k$, this proves the lemma. \end{proof} \begin{thm}\label{expo} The degree of $p(U_d^k)$ is $\displaystyle{d \choose k}$. \end{thm} \begin{proof} Theorem~\ref{expo} immediately follows from Lemmas~\ref{expo0} and~\ref{expo1}. \end{proof} \begin{cor} Setting $k=\lfloor d/2 \rfloor$ in Theorem~\ref{expo} yields a vertex of degree $$ {d \choose \lfloor d/2 \rfloor }\geq\frac{2^d}{d+1} $$ thus providing the announced vertex of the White Whale whose degree is exponential in the dimension. \end{cor} \begin{rem} The degree of $p(U_d^k)$ is maximal among the vertices of $H_\infty^+(d,1)$ when $2\leq{d}\leq9$. We hypothesize it is the case in any dimension. \end{rem} \noindent{\bf Acknowledgement.} The authors thank the anonymous referees, Zachary Chroman, Lukas K\"uhne, and Mihir Singhal for providing valuable comments and suggestions. \bibliography{WhiteWhale} \bibliographystyle{ijmart} \end{document} \endinput
2205.13303v2
http://arxiv.org/abs/2205.13303v2
Gaussian Universality of Perceptrons with Random Labels
\documentclass[ amsmath,amssymb, aps, floatfix, ]{revtex4-2} \input{macros} \usepackage{dcolumn}\usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage[colorlinks=true, linkcolor=blue,citecolor=blue]{hyperref} \usepackage{url} \usepackage{booktabs} \usepackage{amsmath, amssymb, amsthm, amsfonts} \usepackage{nicefrac, xfrac} \usepackage{microtype} \usepackage{xcolor} \usepackage{bm} \usepackage{enumitem} \usepackage{graphicx} \usepackage{algorithm} \usepackage{algorithmic} \usepackage{hyperref} \makeatletter \newtheorem*{rep@theorem}{\rep@title} \newcommand{\newreptheorem}[2]{\newenvironment{rep#1}[1]{ \def\rep@title{#2 \ref{##1}} \begin{rep@theorem}} {\end{rep@theorem}}} \makeatother \newtheorem{theorem}{Theorem} \newreptheorem{theorem}{Theorem} \newtheorem{lemma}{Lemma} \newtheorem{proposition}{Proposition} \newtheorem{definition}{Definition} \newtheorem{corollary}{Corollary} \newtheorem{conjecture}{Conjecture} \newtheorem{assump}{Assumption} \begin{document} \preprint{APS/123-QED} \title{Gaussian Universality of Perceptrons with Random Labels} \author{Federica Gerace$^{1,4}$} \author{Florent Krzakala$^2$}\author{Bruno Loureiro$^{2,3}$} \author{Ludovic Stephan$^2$} \author{Lenka Zdeborov\'a$^4$} \affiliation{ $^1$ International School of Advanced Studies (SISSA). Trieste, Italy. }\affiliation{ $^2$EPFL, Information, Learning and Physics (IdePHICS) lab., Lausanne, Switzerland } \affiliation{ $^3$D\'epartement d'Informatique, \'Ecole Normale Sup\'erieure (ENS) - PSL \& CNRS, F-75230 Paris cedex 05, France} \affiliation{$^4$EPFL Statistical Physics of Computation (SPOC) lab.,Lausanne, Switzerland } \date{\today} \begin{abstract} While classical in many theoretical settings --- and in particular in statistical physics-inspired works--- the assumption of Gaussian {\it i.i.d.} input data is often perceived as a strong limitation in the context of statistics and machine learning. In this study, we redeem this line of work in the case of generalized linear classification, a.k.a. the perceptron model, with random labels. We argue that there is a large universality class of high-dimensional input data for which we obtain the same minimum training loss as for Gaussian data with corresponding data covariance. In the limit of vanishing regularization, we further demonstrate that the training loss is independent of the data covariance. On the theoretical side, we prove this universality for an arbitrary mixture of homogeneous Gaussian clouds. Empirically, we show that the universality holds also for a broad range of real datasets. \end{abstract} \maketitle \section{Introduction} \label{Introduction} Statistical physics studies of artificial neural networks have a long history, including many works that continue to have an impact on the current investigations of deep neural networks. A large fraction of this continuing line of works has focused on Gaussian input data, see \cite{gardner1989three,krauth1989storage,seung1992statistical} for some of the earliest and most influential examples. However, the Gaussian data assumption is not limited to works from statistical physics of learning. Indeed, it is a widespread assumption in the high-dimensional statistics literature, where it is also known under the umbrella of \emph{Gaussian design}, see for example \cite{donoho2009observed,candes2020phase, Bartlett30063}. Despite being both common and convenient for doing theory, {\it i.i.d.} Gaussian data might come across as a stringent limitation at first sight, out-of-touch with the real-world practice where data is structured. Indeed, an important branch of statistical learning theory is data-agnostic and avoids making too specific assumptions on the data distribution \cite{shalev2014understanding}. However, a number of recent observations (both heuristic and rigorous) suggest that the Gaussian assumption is not always that far-stretched for high-dimensional data (see for instance \cite{goldt2019modelling,seddik_2020_random,bordelon2020spectrum,hu2020universality,loureiro2021learning} and references therein). The goal of the present work is to redeem the Gaussian hypothesis for perhaps the simplest, yet deeply fundamental, problem of high-dimensional statistics: the perceptron problem, a.k.a. generalized linear classification, with random labels. Models with random labels are ubiquitous in the theory of machine learning. The problem of how many randomly labelled Gaussian patterns a perceptron model can fit, known as the \emph{storage capacity problem}, is at the root of the historical interest of the statistical physics community for machine learning problems. Indeed, works on this classical subject span more than four decades \cite{gardner1989three,krauth1989storage,brunel1992information,franz2017universality,ding2019capacity,aubin2019storage,abbaras2020rademacher,montanari2021tractability,alaoui2020algorithmic}. The interest for random labels is also not bound to the statistical physics of learning community. They appear in several contexts in statistical learning theory, such as in the definition of Rademacher complexities \cite{shalev2014understanding,vapnik1999nature}, in Wendel/Cover's pioneering studies \cite{wendel1962problem,cover1965geometrical} or in thought-provoking numerical experiments with deep learning \cite{zhang2021understanding,maennel2020neural}, In this work, we ask: how would these theories for random labels change if using a realistic data set instead of a Gaussian one? We consider the training loss of generalized linear classifiers (perceptrons) trained on random labels, including ridge, hinge and logistic classification \cite{james2013introduction}, but also kernel methods \cite{steinwart2008support} and neural networks trained in the lazy regime \cite{chizat2018lazy} (the so-called neural tangent kernel \cite{jacot2018neural}), as well as with engineered features such as the scattering transform \cite{bruna2013invariant}. We focus on the thermodynamic limit (known as the high-dimensional setting in statistics) where both $n$ (the number of training samples) and $p$ (the input dimension) go to infinity at a fixed rate $\alpha=n/p$. Our main result is to argue that in the aforementioned setting with random labels many input data distributions actually have the same learning properties as Gaussian data, thus providing a rather surprising Gaussian universality result for this problem. In particular, the minimum training loss for a wide range of settings is the same as that of a corresponding Gaussian problem with matching data covariance. Furthermore, in the limit of vanishing regularization, we show that Gaussian universality is even stronger, as the minimum training loss is independent of the data covariance (and therefore the same as the one of \emph{i.i.d.} Gaussian data). In other words: as far as random labels are concerned, it turns out that the theoretical results derived under the Gaussian data assumptions capture what is actually happening in practice. Certainly, the value of the interpolation (or capacity) threshold was known to be universal and occurs (for full-rank data) at $n=p$ for ridge regression, and for $n=2p$ for linear classifiers (perceptrons) \cite{cover1965geometrical}; however, the fact that the loss itself is universal is a stronger statement that redeems an entire line of work using the Gaussian data assumption, and in particular a large part of those from statistical physics of learning. \vspace{-5mm} \subsection*{Summary of main results} \vspace{-2mm} The main points of the present work can be summarized by Figures \ref{fig:all_datasets_zero_reg} and \ref{fig:all_datasets_finite_reg}, which show the training loss of real-world data sets trained with random labels and various feature maps, compared with the (analytical) prediction derived for Gaussian data with matching covariance. The code used to run these experiments is publicly available in a \href{https://github.com/IdePHICS/RandomLabelsUniversality}{GitHub repository}. As illustrated in these plots, Gaussian universality seems to hold even for finite-dimensional data, and for actual real datasets. Notably, we observe that when using random labels the training losses plotted as a function of the ratio between the number of samples and the dimension $\alpha=n/p$ are indistinguishable from results obtained for Gaussian input data when using MNIST \cite{lecun_1998_gradient}, fashion-MNIST \cite{xiao2017/online}, CIFAR10 \cite{krizhevsky_2009_learning} preprocessed through various standard feature maps. This conclusion seems robust and holds for different features of the raw data, such as random features \cite{rahimi2007random} or the convolutional scattering transform \cite{bruna2013invariant,andreux2020kymatio}. It also holds, as we prove, if we simply use a synthetic Gaussian mixture model, a classical model for complex multi-model data. The agreement between the real world and the asymptotic Gaussian theory is striking. While we may expect that such data could be approximated by a multimodal distribution such as a Gaussian mixture with enough modes, it should come as a rather puzzling fact that they lead to the same loss as a single Gaussian cloud. Our main contribution is to provide a rigorous theoretical foundation for these observations, that vindicates the classical line of works on Gaussian design, in particular the one stemming from statistical physics. We list here our \textbf{main results}: \begin{figure}[t!] \begin{center} \centerline{\includegraphics[width=500pt]{Figures/zero_reg.pdf}} \caption{Training loss as function of the number of samples $n$ per input dimension $p$ at regularization $\lambda = 10^{-15}$. In the left panel the square loss, and in the right panel the hinge loss. The black solid line represents the outcome of the replica calculation for \emph{i.i.d} Gaussian inputs, namely when the covariance matrix $\Sigma$ corresponds to the identity matrix. Dots refer to numerical simulations on different full-rank datasets. In particular, blue dots correspond to MNIST with Gaussian random features and error function non-linearity, red dots correspond to fashion-MNIST with wavelet scattering transform, green dots correspond to CIFAR10 in grayscale with Gaussian random features and ReLU non-linearity, yellow dots corresponds to a mixture of Gaussians, with means $\bm{\mu}_{\pm} = \left( \pm 1, 0,...,0 \right)$, covariances $\Sigma_{\pm}$ both equal to the identity matrix and relative class proportions $\rho_{\pm} = \sfrac{1}{2}$. Finally, black dots correspond to \emph{i.i.d.} Gaussian inputs. } \label{fig:all_datasets_zero_reg} \end{center} \vskip -0.2in \end{figure} \begin{figure}[t!] \begin{center} \centerline{\includegraphics[width=500pt]{Figures/finite_reg.pdf}} \caption{This figure shows the training loss as a function of the number of samples $n$ per dimension $p$ at finite regularization $\lambda$. In the top panel the square loss, and in the bottom panel the hinge loss. The first column refers to MNIST with Gaussian random features and error function non-linearity, the second column corresponds to fashion-MNIST with wavelet scattering transform, the third column corresponds to CIFAR10 in grayscale with Gaussian random features and ReLU non-linearity, the fourth column corresponds to a mixture of Gaussians, with means $\bm{\mu}_{\pm} = \left( \pm 1, 0,...,0 \right)$, covariances $\Sigma_{\pm}$ both equal to the identity matrix and relative class proportions $\rho_{\pm} = \sfrac{1}{2}$. Black solid lines correspond to the outcome of the replica calculation, obtained by assigning to $\Sigma$ the covariance matrix of each dataset plus the corresponding transformation. The coloured dots correspond to the simulations for different values of $\lambda$, as specified in the plot legend. Simulations are averaged over $10$ samples \& the error bars are not visible at the plot scale.} \label{fig:all_datasets_finite_reg} \end{center} \vskip -0.4in \end{figure} \begin{itemize} \item[a)] We provide a strong universality theorem for linear interpolators corresponding to ridgeless regression (with vanishing regularization) in high-dimensions and random labels, Theorem \ref{thm:ols}. Informally, we prove that a perceptron trained on randomly labelled Gaussian mixture data (a setting that encompasses complex multi-modal distributions) has the same minimum asymptotic loss as a perceptron trained on randomly labelled Gaussian data with isotropic covariance, that is $\mathcal{E}_{\ell}(\alpha)=\sfrac{1}{2}(1-\sfrac{1}{\alpha})_{+}$. This provides a theoretical explanation for the phenomena illustrated in Fig. \ref{fig:all_datasets_zero_reg} left. \item[b)] Under an additional homogeneity assumption on the different modes of the data, Gaussian universality can be generalised to {\it any convex loss} (and we conjecture that it is valid for non-convex losses as well), Theorem \ref{theorem:cov}. This provides a theoretical explanation of the phenomena illustrated in Fig. \ref{fig:all_datasets_zero_reg} (right). \item[c)] At finite regularization and under the same homogeneity assumption, we show that the asymptotic training loss depends solely on the {\it data covariance matrix}, such that it is, again, the same loss as the one of a single Gaussian cloud with matching covariance, Theorem \ref{thm:Gauss_universality}. This is illustrated in Fig. \ref{fig:all_datasets_finite_reg}. \end{itemize} The proof technique used to establish these universality theorems has an interest on its own. It builds on recent progress in high-dimensional statistics and in mathematical insights drawn from the replica method in statistical physics. In particular, we provide an {\it explicit} matching of the expression (obtained from a rigorous proof of the replica prediction) for the asymptotic minimal loss \cite{thrampoulidis2018precise, montanari2019generalization, loureiro2021learning, loureiro2021learning_gm}. We further demonstrate the strong universality for ridge regression with vanishing regularization, again by showing explicitly that the exact solution \cite{dobriban2018high,hastie2019surprises,loureiro2021learning} reduces to one of the homogeneous Gaussian cases. These results are obtained through methods that have been developed from statistical physics and mathematical physics-inspired techniques. \subsection*{Further Related work} \paragraph*{The perceptron ---} The question of how many samples can be perfectly fitted by a linear model is a classical one. For a ridge classifier, it amounts to asking whether a linear system of $n$ equations with $p$ unknowns is invertible so that for full-rank data the transition arises at $n=p$. For the 0/1 loss or its convex surrogate such as the hinge loss, the question of linear separability was famously discussed by \cite{cover1965geometrical} who showed that for full-rank data the transition is given by $n=2p$. In both cases, the transition is universal and does not depend on details of the data distribution (provided it is full rank, otherwise the rank replaces the dimension). For Gaussian data, such questions have witnessed a large amount of attention in the statistical physics community ~\citep{gardner1988optimal,gardner1989three,krauth1989storage,derrida1991finite,brunel1992information,franz2017universality} but also recently in theoretical computer science~\citep{ding2019capacity,aubin2019storage,abbaras2020rademacher,montanari2021tractability,alaoui2020algorithmic}. It is one of our goals to attract attention to these works, given the Gaussian universality we present shows that their relevance is not limited to idealistic Gaussian data.\looseness=-1 \paragraph*{Random Labels ---} Random labels are a fundamental and useful concept in machine learning. The pioneering work of \citep{zhang2021understanding}, for instance, was instrumental in the modern critics of classical measures of model complexity, including the Rademacher complexity or the VC-dimension. These considerations have driven an entire line of research aiming to find substantial differences between learning with true and random labels, for instance in training time \citep{arpit2017closer,han2018co,zhang2018generalized}, in minima sharpness \citep{keskar2016large,neyshabur2017exploring} or in what neural networks can actually learn with random labels \citep{maennel2020neural}. It has also been recently claimed \citep{maennel2020neural} that pre-training on random labels under a given initial condition scaling can consistently speed up neural network training on both true and random labels, with respect to training from scratch. \paragraph*{Gaussian Universality ---} There has been much progress on a similar, though more restricted, Gaussian universality for random feature maps on Gaussian input data \cite{rahimi2007random}. Following early insights by \cite{el2010spectrum}, the authors of \cite{pennington2017nonlinear, mei2019generalization} showed that the empirical distribution of the Gram matrix of random features is asymptotically equivalent to a linear model with matched covariance. This was extended to generic convex losses by \cite{gerace2020generalisation} using the heuristic replica method, and proven in \cite{dhifallah2021phase}. A specific \emph{Gaussian Equivalence Principle} \cite{goldt2019modelling} for learning with random features has been proven in a succession of works for convex penalties in \cite{goldt2020gaussian,hu_universality_2021} and some non-convex ones in \cite{montanari2022universality}. Early ideas on Gaussian universality have also appeared in the context of signal processing and compressed sensing in \cite{doi:10.1098/rsta.2009.0152, 8006947, 5730603, NEURIPS2019_dffbb6ef, NIPS2017_136f9513}. These theoretical results, however, fall short when considering realistic datasets as we do in this work. Indeed, these previous works considered only uni-modal Gaussian data (observed through random feature maps), a situation far from realistic multi-modal, complex, real-world datasets. Instead, \cite{seddik_2020_random,louart2018random,futureuniversality} argued that real datasets can be efficiently approximated in high dimensions by a finite {\it mixture of Gaussians}. These, of course, are multi-modal distributions that cannot be approximated by a single Gaussian. Gaussian mixtures will be the starting point of our theory. Finally, we note that the observation that Gaussian data can fit or represent well some real data has been heuristically observed in many situations, but without theoretical justification and often limited to ridge regression, see e.g. \cite{bordelon2020spectrum,li2021statistical,loureiro2021learning,cui2021generalization,Ingrosso2022}. \section{Setting, notation, and Asymptotic formulas} \label{section3} The focus of the present work is the analysis of high-dimensional binary linear classification (aka perceptron) on a dataset $\mathcal{D} = \left \{ \left(\bm{x}_{\mu}, y_{\mu}\right) \right \}_{\mu = 1}^n$. We shall consider a minimization problem of the form \begin{equation} \label{eq:def_min_problem_main} \widehat\cR_n^*(\bm X, \bm y) = \inf_{\bm \theta} \frac1n\sum_{\mu=1}^n \ell(\bm \theta^\top \bm x_\mu, y_\mu) + \frac{\lambda}{2} \vert\vert \bm{\bm \theta} \vert\vert_2^2 , \end{equation} where the $\bm x_\mu \in \dR^p$ are input vectors, $y_\mu\in \{-1,+1\}$ are binary labels. We assume that the loss $\ell$ only depends on the inputs $\bm{x}_\mu$ through a one-dimensional projection $\bm \theta^\top \bm x_\mu$, and we work in the so-called \emph{thermodynamic} or \emph{proportional high-dimensional limit}, where $n, p$ go to infinity with \[\frac np \to \alpha > 0. \] In practice, practitioners seldom use the raw data {\bf x} directly in their linear classifiers and usually perform a preprocessing step. For instance, instead of using the raw MNIST, a classical approach is to use a fixed feature map, and to observe the data as ${\bf x}=\sigma(F {\bf x})$, with $F$ a random matrix. This is called the random feature map \cite{rahimi2007random}, and it has the advantage, among others, that the effective data ${\bf x}$ are full-rank. One may use more complicated such as the convolutional scattering transform \cite{brunel1992information,bruna2013invariant}, or even pre-trained neural networks, a situation called transfer learning \cite{torrey2010transfer,gerace2022probing}. We shall as well applied such transforms to our real data, in order to avoid theoretical pitfalls in direct space (in images some pixels are always zero for instance, so that the data may not even be full-rank). There is also one more advantage of using fixed features: it corresponds to deep learning (with actual multi-layer nets) in the so-called lazy regime \cite{jacot2018neural, chizat2018lazy}. In this case, the feature matrix is a random matrix. Therefore, our results go beyond linear models and are also relevant to deep learning in the lazy regime. In our numerical experiments, we shall thus not only work with the original data (see appendix \ref{numerical_simulations}, and in particular Fig.\ref{fig:all_datasets_finite_reg_direct} ), but also ---and mainly--- with random features maps and fixed features maps (as in Fig.\ref{fig:all_datasets_zero_reg} and Fig.\ref{fig:all_datasets_finite_reg}) For the labels, we shall focus in this work on the \emph{random label} model, where the $y_\mu$ are independent of the inputs $\bm{x}_\mu$, and generated independently according to a Rademacher distribution: \begin{equation} y_{\mu} \sim \frac{1}{2}\left( \delta_{-1} + \delta_{+1}\right). \label{eq:y} \end{equation} In our theoretical approach, we shall use mainly two data models: \begin{itemize}[wide = 1pt] \item The simplest one is the {\bf Gaussian covariate model} (GCM), where the inputs $\bm x_\mu\in\mathbb{R}^{p}$ are independently drawn from a Gaussian distribution: \begin{equation} \label{eq:def:gcm} {\bm x}_\mu \sim {\cal N}(\bm 0,\bm\Sigma). \end{equation} The Gaussian covariate model has been the subject of much attention recently ~\citep{dobriban2018high,thrampoulidis2018precise, hastie2019surprises,mei2019generalization,wu2020optimal,Bartlett30063,jacot2020kernel,celentano2020lasso,aubin2020generalization, loureiro2021learning, loureiro2022fluc}. In particular, the asymptotic statistics of the minimizer of eq.~\eqref{eq:def_min_problem_main} for different models for the labels can be computed using the replica method, and rigorously proven as well. In particular, the random label limit relevant to our discussion can be obtained as a limit of the expressions derived using the replica method of statistical physics and mathematically proven in \cite{loureiro2021learning}. We shall use here the following expressions (see also Appendix \ref{sec:app:gcm}) \begin{theorem}[Asymptotics of the GCM for random labels, adapted from \cite{loureiro2021learning}, informal] \label{theorem:GM} Consider the minimization problem in eq.~\eqref{eq:def_min_problem_main}, with the inputs $\bm{x}_\mu$ generated according to a Gaussian covariate model. Assume that the loss $\ell$ is strictly convex (or that $\ell$ is convex and $\lambda > 0$). Under mild regularity conditions on the $\bm \mu$, $\Sigma$, as well as the loss and regularizer, then we have the asymptotic training performance of the empirical risk minimizer eq.~\eqref{eq:app:erm} for the random label Gaussian mixture model satisfying the scalings (\ref{assump:scalings}) in the proportional high-dimensional limit as $n \to \infty$: \begin{equation} \begin{split} \widehat \cR_n^*(\bm X, y(\bm X)) &\overset{\dP}{\longrightarrow} \mathcal{E}^{\text{gcm}}_{\ell}(\alpha, \lambda) \coloneqq \frac{1}{2}\sum\limits_{y\in\{-1,+1\}}\mathbb{E}_{\xi\sim\mathcal{N}(0,1)}\left[\ell(\prox_{V^{\star}\ell(\cdot, y)}\left(\sqrt{q^{\star}}\xi\right), y)\right] \end{split} \end{equation} where $\prox_{\tau f(\cdot)}$ is the proximal operator associated with the loss: \begin{equation} \prox_{\tau \ell(\cdot, y)}(x) \coloneqq \underset{z\in\mathcal{C}_{1}}{\argmin}\left[\frac{1}{2\tau}(z-x)^2+\ell(z,y)\right] \end{equation} and the parameters $(V^{\star}, q^{\star})$ are the (unique) fixed-point of the following self-consistent equations: \begin{align} &\begin{cases} \hat{V} = \frac{\alpha}{2} \sum\limits_{y\in\{-1,+1\}}\mathbb{E}_{\xi\sim\mathcal{N}(0,1)}\left[\partial_{\omega}f_{\ell}(y, \sqrt{q}\xi, V)\right]\\ \hat{q} = \frac{\alpha}{2} \sum\limits_{y\in\{-1,+1\}}\mathbb{E}_{\xi\sim\mathcal{N}(0,1)}\left[f_{\ell}(y, \sqrt{q}\xi, V)^2\right]\\ \end{cases}, && \begin{cases} V &= \frac{1}{p}\tr\Sigma\left(\lambda\bm{I}_{p}+\hat{V}\Sigma\right)^{-1}\\ q &= \frac{1}{p}\hat{q}\tr\Sigma^2\left(\lambda\bm{I}_{p}+\hat{V}\Sigma\right)^{-2}\\ \end{cases} \end{align} where $f_{g}(y,\omega,V) \coloneqq V^{-1}\left(\prox_{V\ell(\cdot, y)}(\omega)-\omega\right)$. \end{theorem} \item A more generic model of data, which has the advantage of being multi-modal to be fit complex situations, is the {\bf Gaussians Mixture Model} (GMM). In this case, the inputs $\bm{x}_{\mu}\in\mathbb{R}^{p}$ are independently generated as: \begin{equation} {\bm x}_\mu \sim \sum_{c\in\mathcal{C}} \rho_{c}\, {\cal N} (\bm{\mu}_{c},\bm{\Sigma}_{c}) \label{GMM-model} \end{equation} \noindent where $\mathcal{C} \coloneqq \{1,\cdots, K\}$ indexes the $K$ Gaussian clouds and $\rho_{c} \in[0,1]$ is the density of points in each cloud and satisfies $\sum_{c\in\mathcal{C}}\rho_{c} = 1$. The analysis of Gaussian mixture models in the high-dimensional regime has been the subject of many works. The exact asymptotic expression for the minimum training loss has been derived for a range of particular cases in, between others, \cite{kini2021phase,sifaou2019phase,mai2019large,mignacco2020role,taheri2020optimality,dobriban2020provable} and in full generality for arbitrary means and covariances in \cite{loureiro2021learning_gm}. We shall thus use the random label limit of their expression in the binary classification case: \begin{theorem}[Asymptotics of the GMM for random labels, adapted from \cite{loureiro2021learning_gm}, informal]\label{thm:app:asymp_error} \label{propGMM} Consider the minimization problem in eq.~\eqref{eq:def_min_problem_main}, with the inputs $\bm{x}_\mu$ generated according to a Gaussian mixture as in \eqref{GMM-model}. Assume that the loss $\ell$ is strictly convex (or that $\ell$ is convex and $\lambda > 0$). Under mild regularity conditions on the $\bm \mu_c$, $\bm \Sigma_c$, as well as the loss and regularizer, we have the training performance of the empirical risk minimizer eq.~\eqref{eq:app:erm} for the random label Gaussian mixture model satisfying the scalings (\ref{assump:scalings}) are given by: \begin{equation}\label{eq:app:asymptotic_risk} \begin{split} \widehat \cR_n^*(\bm X, y(\bm X)) &\overset{\dP}{\longrightarrow} \mathcal{E}^{\text{gmm}}_{\ell}(\alpha, \lambda, K) \coloneqq \frac{1}{2}\sum\limits_{c\in\mathcal{C}}\rho_{c}\sum\limits_{y\in\{-1,+1\}}\mathbb{E}_{\xi\sim\mathcal{N}(0,1)}\left[\ell(\prox_{V^{\star}_{c}\ell(\cdot, y)}\left(m_c^\star + \sqrt{q^{\star}_{c}}\xi\right), y)\right] \end{split} \end{equation} \noindent where $\ell$ is the loss function used in the empirical risk minimization in eq.~\eqref{eq:app:erm}, $\prox_{\tau f(\cdot)}$ is the proximal operator associated with the loss: \begin{equation} \prox_{\tau \ell(\cdot, y)}(x) \coloneqq \underset{z\in\mathcal{C}_{1}}{\argmin}\left[\frac{1}{2\tau}(z-x)^2+\ell(z,y)\right] \end{equation} \noindent and $(m_c^\star, V_{c}^{\star},q^{\star}_{c})_{c\in\mathcal{C}}$ are the \textbf{unique} fixed points of the following self-consistent equations: \begin{equation} \begin{split} \label{eq:app:sp:gmm} &\begin{cases} \hat{V}_{c} = \frac{\alpha}{2} \rho_{c}\sum\limits_{y\in\{-1,+1\}}\mathbb{E}_{\xi\sim\mathcal{N}(0,1)}\left[\partial_{\omega}f_{\ell}(y, m_{c}+\sqrt{q_{c}}\xi, V_{c})\right]\\ \hat{q}_{c} = \frac{\alpha}{2} p_{c}\sum\limits_{y\in\{-1,+1\}}\mathbb{E}_{\xi\sim\mathcal{N}(0,1)}\left[f_{\ell}(y, m_{c}+\sqrt{q_{c}}\xi, V_{c})^2\right]\\ \hat{m}_{c} = \frac{\alpha}{2} p_{c}\sum\limits_{y\in\{-1,+1\}}\mathbb{E}_{\xi\sim\mathcal{N}(0,1)}\left[f_{\ell}(y, m_{c}+\sqrt{q_{c}}\xi, V_{c})\right] \end{cases}\\ & \begin{cases} V_{c} &= \frac{1}{p}\tr\Sigma_{c}\left(\lambda\bm{I}_{p}+\sum\limits_{c'\in\mathcal{C}}\hat{V}_{c'}\Sigma_{c'}\right)^{-1}\\ q_{c} &= \frac{1}{p}\sum\limits_{c'\in\mathcal{C}}\left[\tr\left(\hat{q}_{c'}\Sigma_{c'}+\hat{m}_{c}\hat{m}_{c'}\bm{\mu}_{c'}\bm{\mu}_{c}^{\top}\right)\Sigma_{c}\left(\lambda\bm{I}_{p}+\sum\limits_{c''\in\mathcal{C}}\hat{V}_{c''}\Sigma_{c''}\right)^{-2}\right]\\ m_{c} &= \frac{1}{p}\sum\limits_{c'\in\mathcal{C}}\hat{m}_{c}\hat{m}_{c'}\left[\tr\bm{\mu}_{c'}\bm{\mu}_{c}^{\top}\left(\lambda\bm{I}_{p}+\sum\limits_{c''\in\mathcal{C}}\hat{V}_{c''}\Sigma_{c''}\right)^{-1}\right] \end{cases} \end{split} \end{equation} \noindent where $f_{\ell}(y,\omega,V) \coloneqq V^{-1}\left(\prox_{V\ell(\cdot, y)}(\omega)-\omega\right)$. \end{theorem} \end{itemize} \section{The main theoretical results: from mixtures to a single Gaussian} In this section, we present the main theoretical results of the present work and discuss their consequences: We show that with random labels, GMM models can be reduced to a single GCM model. This provides an explanation of the universality observed in Figs.~\ref{fig:all_datasets_zero_reg} and \ref{fig:all_datasets_finite_reg}. We would like the reader to note that we state our results using theorems because indeed we were able to establish them mathematically rigorously. However, the proofs are deferred to the appendices and the reasoning and derivations presented in this section follow the level of rigour common in theoretical physics. We made this choice to ensure readability to both physics and mathematics-oriented audiences. The starting point is the Gaussian Mixture Model (GMM). This is a very generic model of data and standard approximation results (e.g. the Stone-Weierstrass theorem) show in particular that one can approximate data density to arbitrary precision by Gaussian mixtures. While, in the worst case this would require a diverging number of Gaussian in the mixture, it can be shown that (as far as the generalized linear model is concerned) a mixture of a small number of Gaussians is actually able to approximate very complex data set in high-dimension \cite{seddik_2020_random,louart2018random,seddik_2021_unexpected}. More precisely, in the proportional high-dimensional regime, data generated by Generative Adversarial Networks (GAN), one of the state of the art techniques to generate realistic looking data, behave as Gaussian mixtures for such classifiers \cite{futureuniversality}. We shall thus use this model as our benchmark of ``complex'' data distribution. If a mixture model is a good approximation of reality in high-dimension, the question remains: {\it why is it that we can fit real dataset with a single Gaussian}. Our main technical question will thus be: if we use random labels, what is the difference between a GMM and a single Gaussian model? \subsection{Mean invariance with random labels} We thus now move to the random label case and show how we can surprisingly use a simple Gaussian distribution instead of the GMM. We are going to use theorem \ref{theorem:GM} and \ref{thm:app:asymp_error}. Note that the asymptotic value of the energy, or loss, only depends on the probability vector $\bm \rho\in[0,1]^{K}$ (with entries $\rho_{c}$ corresponding to the respective sizes of the $K$ clusters), the matrix of averages $\bm M\in\mathbb{R}^{K\times p}$ (with rows $\bm{\mu_{c}}\in\mathbb{R}^{p}$), and the concatenation of covariances $\bm \Sigma^{\otimes}\in\mathbb{R}^{K\times p\times p}$ (with rows $\bm \Sigma_{c}\in\mathbb{R}^{p\times p}$) and therefore we denote: \[ \mathcal{E}_{\ell} = \mathcal{E}_{\ell}^{\text{gmm}}(\bm \rho, \bm M, \bm \Sigma^\otimes). \] Similarly, for the Gaussian covariate model we define the limiting value \[ \mathcal{E}_{\ell} = \mathcal{E}_{\ell}^{\text{gcm}}(\bm m, \bm \Sigma). \] where in both cases we omitted the explicit dependence on the parameters $(\alpha, \lambda)$. We are now in a position to state a lemma crucial to our first main universality result: \begin{lemma}[Single mean lemma for random labels]\label{lemma:mean_universality} In the random label setting \eqref{eq:y}, assume that the loss $\ell$ is symmetric, in the sense that $\ell(x, y) = \ell(-x, -y)$ for $x, y \in \dR$. Then, the limiting value $\mathcal{E}_{\ell}$ of the risk is independent from the means, i.e. for all choices of $\bm\rho$, $\bm M$ and $\bm\Sigma^\otimes$ we have \[ \mathcal{E}_{\ell}^{\text{gmm}}(\bm \rho, \bm M, \bm \Sigma^\otimes) = \mathcal{E}_{\ell}^{\text{gmm}}(\bm \rho, \bm 0, \bm \Sigma^\otimes)\,. \] \end{lemma} The symmetry condition on the loss is not really restrictive and is satisfied by virtually all losses used in binary classification (in particular margin-based losses of the form $\ell(x, y) = \phi(xy)$). Since a mixture of Gaussians with equal means and covariances is equivalent to a single Gaussian, we can now write the following theorem: \begin{theorem}[Gaussian universality for random labels] \label{thm:Gauss_universality} Consider the same assumptions as in Lemma \ref{lemma:mean_universality}, and assume further that the data is homogeneous, i.e. \[ \bm\Sigma_{c} = \bm \Sigma \quad \text{for all} \quad c\in\mathcal{C}. \] Then the asymptotic risk is equivalent to that of a single centered Gaussian: \[ \mathcal{E}_{\ell}^{\text{gmm}}(\bm \rho, \bm M, \bm \Sigma^\otimes) = \mathcal{E}_{\ell}^{\text{gcm}}(\bm 0, \bm \Sigma). \] \end{theorem} This is our first main universality theorem: a mixture of homogeneous Gaussians \footnote{Also called homoskedastic Gaussians, as opposed to heteroskedastic Gaussians} can be replaced, when using random labels by a single Gaussian. This surprising fact, alone, explains the empirical observation presented in Fig.~\ref{fig:all_datasets_zero_reg} and Fig.~\ref{fig:all_datasets_finite_reg}, at least if we accept that the different modes are homogeneous (see discussion in Sec.\ref{sec:numerics}). \paragraph*{Proof sketch ---} Both lemma \ref{lemma:mean_universality} and Theorem \ref{thm:Gauss_universality} stem from the detailed analysis of the replica free energy for the GMM \cite{loureiro2021learning_gm}. Indeed, to prove our claims, it suffices to show that the fixed points of the replica equations are the same. This is done in detail in appendix \ref{proof2}, using the replica equation that we provide in appendix \ref{formulas}. In a nutshell, we show that the expression of the GMM reduces to those of the GCM. \hfill \qedsymbol \subsection{Generic loss with vanishing regularisation} Additionally, we note that in Fig.~\ref{fig:all_datasets_zero_reg} at vanishing regularization, we did not even require a matching covariance, and instead used a trivial one. This is because of the following consequence of Lemma \ref{lemma:mean_universality}:\looseness=-1 \begin{theorem}[Gaussian universality for vanishing regularization] \label{theorem:cov} Consider the same assumptions as in theorem \ref{thm:Gauss_universality}, then if the minimizer of $\ell$ is unique and the data covariance full-rank, then the asymptotic minimal loss for Gaussian data does not depend on the covariance when the regularization is absent, $\lambda =0$. \end{theorem} \begin{proof} Consider the empirical risk minimization problem in eq.~\eqref{eq:def_min_problem_main} with data from the Gaussian covariate model eq.~\eqref{eq:def:gcm} with random labels. Without loss of generality, we can write $\bm{x}_{\mu} = \bm \Sigma^{1/2}\bm{z}_{\mu}$, with ${\bm z}_{\mu}\sim\mathcal{N}(\bm{0}_{p},\bm{I}_{p})$. Then, making a change of variables $\bm \theta'=\bf \Sigma^{1/2} \bm \theta$, we can write: \begin{align} \widehat\cR_n^*(\bm X, \bm y) = \inf_{\bm \theta} \frac1n\sum_{\mu=1}^n \ell(\bm \theta^\top \bm x_\mu, y_\mu) + \frac{\lambda}{2} \vert\vert \bm{\bm \theta} \vert\vert_2^2 = \inf_{\bm \theta' \in \cS'_p} \frac1n\sum_{\mu=1}^n \ell(\bm \theta'^\top \bm z_\mu, y_\mu) + \frac{\lambda}{2} \vert\vert \Sigma^{-1/2}\bm{\bm \theta}' \vert\vert_2^2\, \notag \end{align} \noindent where $\cS'_p\subset\mathbb{R}^{p}$ is another compact set, and we have used the fact that $y_{\mu}$ are independent of $\bm{x}_{\mu}$. Since the minimizer of $\ell$ is unique, the result follows from taking $\lambda\to 0^{+}$. \end{proof} Note that in particular theorem \ref{theorem:cov} implies that for random labels, the GCM model with a covariance $\Sigma$ is equivalent to a Gaussian i.i.d. model with a different regularization given by the norm $\vert\vert \cdot\vert\vert_{\Sigma^{-1}}$ induced by the inverse covariance matrix $\Sigma^{-1}$. Therefore, in the case in which $\ell$ has several minima, the $\lambda\to 0^{+}$ limit will give the performance of the solution with minimum $\vert\vert \cdot\vert\vert_{\Sigma^{-1}}$ norm. Finally, we also note that this analysis also allows answering the important question: {\it what is being learned with random labels?}, discussed in particular in machine learning literature in \cite{maennel2020neural}. For generalized linear models: the model is simply fitting the 2nd-order statistics (the total covariance $\bf \Sigma$). \subsection{Ridge regression with vanishing regularization} Even though it seems to be well obeyed in practice, one may wonder if we can in some cases get rid of the homogeneity condition. As we shall see, the answer is no: in general, a mixture of {\it inhomogeneous } Gaussian cannot be strictly replaced by a single one. It turns out, however, that there is one exception, and that the hypothesis can be lifted in one case, ridge regression with vanishing regularization with the squared loss $\ell(x, y) = \frac12(x-y)^2$: \begin{theorem}[Strong universality for ridge loss] \label{thm:ols} In the ridge regression case with vanishing regularization, i.e. when $\lambda \to 0^+$, we have \[\lim\limits_{\lambda\to0^{+}} \mathcal{E}_{\ell}^{\text{gmm}}(\bm \rho, \bm M, \bm \Sigma^\otimes) = \frac12 \left(1 - \frac{1}{\alpha} \right)_+, \] for any choice of $\bm \rho, \bm M$, or $\bm \Sigma^\otimes$. \end{theorem} In particular, it means that in the unregularized limit, any Gaussian mixture behaves in terms of its loss as a single cluster Gaussian model with identity covariance, whose asymptotic training loss is given by $\lim_{\lambda\to 0^{+}}\mathcal{E}_{\ell}^{\text{gcm}}(\alpha, \lambda)=\sfrac{1}{2}(1-1/\alpha)_{+}$. \paragraph*{Proof sketch ---} The proof of the strong universality, that follows from a rigorous analysis of the replica predictions, amounts to showing that the replica free energy for GMM reduces to the one of a single Gaussian. Interestingly, although the fixed points of the replica equations differ between the GMM and Gaussian case, they do give rise to the same free energy. Details can, again, be found in Appendix \ref{sec:app:uni}. \hfill \qedsymbol \section{Numerical experiments} \label{sec:numerics} In this section, we describe more in detail the numerical experiments of Fig.~\ref{fig:all_datasets_zero_reg} \& Fig.~\ref{fig:all_datasets_finite_reg}. The coloured dots represent the outcome of the simulations on several full-rank datasets. In particular, the blue and the green dots refer to both MNIST and grayscale CIFAR-10 preprocessed with random Gaussian feature maps \citep{rahimi2007random}. In this case, the input data points are constructed as $\bm{x}_{\mu} = \sigma\left( \bm{z}_{\mu} \bm{F} \right)$, with $\bm{z}_{\mu} \in \mathbb{R}^d$ being a sample from one of the two datasets, $\bm{F} \in \mathbb{R}^{d\times p}$ representing the matrix of random features, whose row elements are sampled according to a normal distribution, and $\sigma$ being some point-wise non-linearity, namely $\mathrm{erf}$ for MNIST and $\mathrm{relu}$ for grayscale CIFAR-10. The red dots correspond instead to fashion-MNIST pre-processed with wavelet scattering transform, an ensemble of engineered features producing rotational and translational invariant representations of the input data points \cite{bruna2013invariant}. The orange dots correspond to simulations on the synthetic dataset built as a mixture of two Gaussians, with data covariance of the two clusters both equal to the identity matrix ($\bm{\Sigma}_1 \!=\! \bm{\Sigma}_2 \!=\! \bm{I}$, $\bm{\mu}_{1/2} \!=\! \left( \pm 1,0,...,0 \right)$ and $\rho_{1/2} \!=\! 1/2$. Further technical details are given in the appendix. \paragraph{Experiments with finite regularization ---} Fig.~\ref{fig:all_datasets_finite_reg} illustrates the Gaussian universality taking place at finite regularization. The coloured dots correspond to the outcome of the simulations for several values of the regularization strength. As we can see from this set of plots, the theoretical learning curve of a single Gaussian with matching covariance perfectly fits the behaviour of multi-modal and more realistic input data distributions. In fact, even though the experiment is performed for a realistic dataset and finite $n$ and $p$, the asymptotic Gaussian theory gives a perfect fit of the data. \paragraph{Experiments with vanishing regularization ---} Fig.~\ref{fig:all_datasets_zero_reg} provides an illustration of the universality effect occurring at vanishing regularization. Here we use $\lambda \!\to\!0$, and following theorem \ref{theorem:cov}, we observe a collapse on a single curve given by the asymptotic theory for a single Gaussian with unit covariance. It is quite remarkable that our asymptotic theory, which is valid only in the infinite high-dimensional limit, is validated by such experiments with finite dimension, and finite sample size.\looseness=-1 \paragraph{Homogeneity assumption ---} A remarkable point is that the homogeneity assumption (often called homoskedasticity in statistics) we use in theorem \ref{thm:Gauss_universality}, which can be relaxed only for ridge regression, does not seem to be that important in practice, as we observed on our experiments on real data. One may thus wonder if the strong universality of Theorem \ref{thm:ols} could be proved in full generality, and not only for the ridge loss. It turns out that the answer is no. Using Proposition~\ref{propGMM}, we can actually construct an artificial mixture of Gaussians, using {\it very different} covariances for each individual Gaussians, and observe small deviations from the strict universality. A mixture of {\it non-homogeneous} Gaussians is not strictly equivalent to a single one with random labels (except, as stated in Theorem \ref{thm:ols}, for the least squares that obey a strong universality). This is illustrated in Fig.~\ref{fig:no_match_reg_only_three_blocks} where we show the disagreement in the behaviour of the training loss between a single Gaussian and a mixture of two non-homogeneous Gaussians. This is a simple counter-example to the existence of a universal strong form of Gaussian universality, even for ridge regression (see discussions in e.g. \cite{goldt2020gaussian,bordelon2020spectrum,tomasini2022failure, Ingrosso2022, Ba2022}).\looseness=-1 It may thus come as surprising that real datasets, which certainly will not obey such a strict homogeneity of the different modes, display such a spectacular agreement with the theory. We believe that this is due to two effects: first, the deviations we observed, even in our designed counter-example, are small, so they might not even be seen in practice. Secondly, and especially after observing the data through random or scattering features, it turns out that when we measure the empirical correlation matrix of the different modes, they look quite similar. In fact, it has even been suggested that neural networks are {\it precisely} learning representations that find such homogeneous Gaussian mixtures \cite{papyan2020prevalence}. \begin{figure}[t!] \begin{center} \centerline{\includegraphics[width=500pt]{Figures/no_match_reg_only_three_blocks.pdf}} \caption{Ridge/square loss (left) \& hinge loss for a single Gaussian vs a mixture of inhomogeneous Gaussians at finite $\lambda$. Lines are the asymptotic exact results while dots are simulation ($p\!=\!900$, dark lines for mixture, lighter ones for single Gaussian). When the homogeneity assumption is not obeyed, then a mixture of two Gaussians does not yield equal results to those of a single Gaussian with matching covariance. (Here, a mixture with zero mean and a block covariance with, resp. diagonal elements equal to $0.01$, $0.98$ and $0.01$ for the first one, and $0.495$ and $0.01$, $0.495$ for the second). Note however that the universality is restored in the Ridge case when $\lambda \to 0$, as stated in Theorem \ref{thm:ols}. It is also very well obeyed with large enough $\lambda$ and deviations appear small in general.} \label{fig:no_match_reg_only_three_blocks} \end{center} \vskip -0.2in \end{figure} \paragraph{A remark on Rademacher complexity ---} A final comment is that the discussed universality indicates that, in high dimension, the Rademacher complexity can be effectively replaced by the one for Gaussian i.i.d. data. \emph{Rademacher complexity} is a key quantity appearing in generalization bounds for binary classification problems \cite{shalev2014understanding,vapnik1999nature} that measures the ability of estimators in a hypothesis class $\mathcal{H}$ to fit i.i.d. random labels $y^{\mu}\sim \text{Rad}(\sfrac{1}{2})$: \begin{equation} \text{Rad}_n\left( \mathcal{H}\right) = \mathbb{E} \left[ \underset{h\in \mathcal{H}}{\mbox{sup}} \ \frac{1}{n} \sum_{\mu = 1}^n y_{\mu} h\left( \bm{x}_{\mu} \right) \right]. \end{equation} It is explicitly dependent on the specific distribution of the input data points $\bm{x}_{\mu}$. As discussed in \citep{abbaras2020rademacher} there exists a direct mapping between the Rademacher complexity and the minimum 0/1 training loss - or ground state energy in the statistical physics parlance. Indeed, for a binary hypothesis class $\mathcal{H} = \{h:\mathbb{R}^{p}\to\{-1,+1\}\}$ the two are asymptotically related by the following equation: \begin{equation} \underset{n \rightarrow \infty}{\lim}\underset{h\in\mathcal{H}}{\inf}\frac{1}{n}\sum\limits_{\mu=1}^{n}\mathbb{P}\left(h(\bm{x_{\mu}})\neq y_{\mu}\right) = \frac{\alpha}{2}\left[ 1 - \text{Rad}_n\left( \mathcal{H}\right) \right]. \end{equation} Moreover, \cite{abbaras2020rademacher} discussed how to explicitly compute the Rademacher complexity for Gaussian data using the replica method from statistical physics. This is actually a classical problem, studied by the pioneers of the application of the replica method and spin glass theory to theoretical machine learning~\citep{gardner1988optimal,gardner1989three,krauth1989storage,derrida1991finite}. Given the universality advocated in this present work, these Gaussian results thus seem to be of more relevance than previously thought, and in fact, allow us to compute a closed-form asymptotic expression for the Rademacher complexity for realistic data. This is a very interesting outcome of the Gaussian universality with random labels. However, while we prove universality for convex losses, we so far only {\it conjecture it} for non-convex objectives, such as the ones appearing in the definition of the Rademacher complexity. The proof that a Gaussian mixture approximates well real datasets is still valid for non-convex losses. The identification of these mixtures to a single Gaussian is, however, using the replica formulas of \cite{loureiro2021learning, loureiro2021learning_gm} which have been proven only for the case of convex losses. Our conjecture thus depends on proving a similar result for non-convex (as well as replica symmetry breaking) losses. This (and similar questions on multi-layer networks) is left for future work. \section{Conclusion} For the classical problem of fitting random labels with perceptrons aka generalized linear models in high dimensions, we showed that, far from being only a toy example, the Gaussian i.i.d. assumption is an excellent model of reality. The conclusion extends to deep-learning models in the lazy regimes as those are essentially random feature models. There are a number of potentially interesting extensions of this work, including non-convex losses and multi-layer neural networks, and beyond the random label cases, that should be investigated in the future. These results, we believe, are of special interest given the number of theoretical studies with the Gaussian design and its variants, that are amenable to exact characterization, and that turn out to be less idealistic, and more realistic, than perhaps previously assumed. We believe, in particular, that these considerably strengthen the ensemble of results obtained within the statistical physics community, as well as in the statistical analysis of high-dimensional data. We anticipate that such redemption of the Gaussian assumption will lead to more work in this direction both those using Gaussian assumption and those aiming to extend out universality results. \section*{Acknowledgements} We acknowledge funding from the ERC under the European Union’s Horizon 2020 Research and Innovation Program Grant Agreement 714608-SMiLe, as well as by the Swiss National Science Foundation grant SNFS OperaGOST, $200021\_200390$ and the \textit{Choose France - CNRS AI Rising Talents} program. \bibliographystyle{unsrt} \bibliography{bib} \clearpage \newpage \appendix \newpage \input{appendix/formulas} \newpage \input{appendix/appendix_proofs} \newpage \input{appendix/numerical_simulations} \input{appendix/evaluation_of_data_covariance} \end{document} \usepackage{mathtools} \usepackage{amssymb} \usepackage{graphicx} \usepackage{etoolbox} \usepackage[utf8]{inputenc} \usepackage[OT1]{fontenc} \usepackage{amsthm} \newcommand{\dR}{\mathbb{R}} \newcommand{\dZ}[1][]{\ifblank{#1}{\mathbb{Z}}{\mathbb{Z}/#1\mathbb{Z}}} \newcommand{\dN}{\mathbb{N}} \newcommand{\dQ}{\mathbb{Q}} \newcommand{\dC}{\mathbb{C}} \newcommand{\dF}[1]{\mathbb{F}_#1} \newcommand{\dE}{\mathbb{E}} \newcommand{\dS}{\mathbb{S}} \newcommand{\dP}{\mathbb{P}} \newcommand{\cA}{\mathcal{A}} \newcommand{\cB}{\mathcal{B}} \newcommand{\cC}{\mathcal{C}} \newcommand{\cE}{\mathcal{E}} \newcommand{\cF}{\mathcal{F}} \newcommand{\cG}{\mathcal{G}} \newcommand{\cI}{\mathcal{I}} \newcommand{\cK}{\mathcal{K}} \newcommand{\cL}{\mathcal{L}} \newcommand{\cM}{\mathcal{M}} \newcommand{\cN}{\mathcal{N}} \newcommand{\cO}{\mathcal{O}} \newcommand{\cP}{\mathcal{P}} \newcommand{\cR}{\mathcal{R}} \newcommand{\cS}{\mathcal{S}} \newcommand{\cT}{\mathcal{T}} \newcommand{\cU}{\mathcal{U}} \newcommand{\cV}{\mathcal{V}} \newcommand{\cW}{\mathcal{W}} \newcommand{\ind}{\mathbf{1}} \newcommand{\dd}{\mathrm{d}} \providecommand{\given}{} \DeclarePairedDelimiterXPP{\Pb}[1]{\mathbb{P}}{\lparen}{\rparen}{}{\renewcommand{\given}{\nonscript{}\:\delimsize\vert\nonscript{}\:\mathopen{}} #1} \DeclarePairedDelimiterXPP{\E}[1]{\mathbb{E}}[]{}{\renewcommand{\given}{\nonscript{}\:\delimsize\vert\nonscript{}\:\mathopen{}} #1} \DeclarePairedDelimiterX{\Set}[1]\lbrace\rbrace{\renewcommand{\given}{\nonscript{}\:\delimsize\vert\nonscript{}\:\mathopen{}} #1} \DeclareMathOperator{\card}{Card} \DeclareMathOperator{\diag}{diag} \DeclareMathOperator{\tr}{tr} \DeclareMathOperator{\Var}{Var} \DeclareMathOperator{\Cov}{Cov} \DeclareMathOperator{\img}{im} \DeclareMathOperator{\vect}{vect} \DeclareMathOperator{\diam}{diam} \DeclareMathOperator{\rank}{rank} \DeclareMathOperator*{\argmax}{arg\,max} \DeclareMathOperator*{\argmin}{arg\,min} \DeclareMathOperator{\prox}{prox} \DeclarePairedDelimiterX{\norm}[1]\lVert\rVert{\ifblank{#1}{\: \cdot \:}{#1}} \DeclareMathOperator{\Poi}{Poi} \DeclareMathOperator{\Bin}{Bin} \DeclareMathOperator{\Ber}{Ber} \DeclareMathOperator{\dtv}{d_{TV}} \newcommand{\bilingualcommand}[3]{ \newcommand{#1}[1][\ ]{ ##1 \iflanguage{english}{\text{#2}}{ \iflanguage{french}{\text{#3}}{} } ##1 }} \bilingualcommand{\where}{where}{où} \bilingualcommand{\textif}{if}{si} \bilingualcommand{\textand}{and}{et} \bilingualcommand{\textiff}{if and only if}{si et seulement si} \bilingualcommand{\otherwise}{otherwise}{sinon} \newcommand{\eps}{\varepsilon} \renewcommand{\theenumi}{(\roman{enumi})} \renewcommand{\labelenumi}{\theenumi} \newcommand{\quand}{\quad \textand{} \quad} \newcommand{\qquand}{\qquad \textand{} \qquad} \def\sectionautorefname{Section} \def\subsectionautorefname{Subsection} \endinput \def\mat#1{\text{#1}} \renewcommand{\vec}[1]{\bm{#1}} \def\sign{\text{sign}} \section{Exact asymptotic performances of GCM and GMM} \label{formulas} In this appendix we summarize the exact asymptotic formulas for the performance of the generalized linear classifiers on random labels for the two structured data models studied in the the main body: the Gaussian covariate model (GCM) and the Gaussian mixture model (GMM). \subsection{Preliminaries: the setting} \label{sec:app:preliminaries} Before moving to the key formulas, let us recap the setting. We are interested in the performance of generalised linear classifiers: \begin{align} \hat{y}(\bm{x}) = \text{sign}(\hat{\bm{\theta}}^{\top}\bm{x}) \end{align} where $\hat{\bm{\theta}}\in\mathbb{R}^{p}$ is trained by minimising the following empirical risk on $n$ independent training samples $(\bm{x}_{\mu}, y_{\mu})_{\mu\in[n]}\in\mathbb{R}^{p}\times\{-1,+1\}$: \begin{align} \label{eq:app:erm} \widehat\cR_n^*(\bm X, \bm y) = \inf_{\bm \theta \in \cS_p} \frac1n\sum_{\mu=1}^n \ell(\bm \theta^\top \bm x_\mu, y_\mu) + \frac{\lambda}{2} \vert\vert \mathbf{\bm \theta} \vert\vert_2^2 , \end{align} for a compact subset $\cS_p\subset\mathbb{R}^{p}$ and convex loss function $\ell$. In particular, we are interested in the case where the labels $y_{\mu}\in\{-1,+1\}$ are randomized (i.e. not correlated with the inputs $\bm{x}_{\mu}$), \begin{align} y_{\mu} \sim \frac{1}{2}\left( \delta_{-1} + \delta_{+1}\right), \qquad \text{i.i.d.} \end{align} and the inputs are generated independently from one of the following two structured models: \begin{description} \item[Gaussian covariate model (GCM): ] $\bm{x}_{\mu}\sim\mathcal{N}(\bm{0}_{p}, \bm{\Sigma})$, \item[Gaussian mixture model (GMM): ] ${\bm x}_\mu \sim \sum_{c\in\mathcal{C}} \rho_{c}\, {\cal N} (\bm \mu_{c},\bm\Sigma_{c})$, \end{description} where $\mathcal{C} = \{1,\cdots, K\}$ is the label set for the Gaussian clouds and $\rho_{c}\in[0,1]$ are the density of points in each class, satisfying $\sum_{c\in\mathcal{C}}\rho_{c} = 1$. Note that in this random label setting the GCM model is a special case of the GMM, where $K\coloneqq |\mathcal{C}| = 1$ and $\bm{\mu}_{1}=\bm{0}_{p}$. In the following, we will be interested in describing the exact asymptotic limit of the following performance metrics in the proportional high-dimensional limit where $n,p\to\infty$ with the ratio $\alpha \coloneqq \sfrac{n}{p}$ and the number of clusters $K$ are fixed: \begin{description} \item[Training loss: ] $\hat{\mathcal{E}}_{\ell}(\bm{X},\bm{y}) \coloneqq \frac{1}{n}\sum\limits_{\mu=1}^{n}\ell\left(\hat{\bm{\theta}}^{\top}\bm{x}_{\mu}, y_{\mu}\right)$ \item[0/1 training error: ] $\hat{\mathcal{E}}_{0/1}(\bm{X},\bm{y}) \coloneqq \frac{1}{n}\sum\limits_{\mu=1}^{n}\mathbb{P}\left(\text{sign}(\hat{\bm{\theta}}^{\top}\bm{x}_{\mu})\neq y_{\mu}\right)$ \end{description} \noindent where we have defined the design matrix $\bm{X}\in\mathbb{R}^{p\times n}$ and the label vector $\bm{y}\in\{-1,+1\}^{n}$. Note that for convenience we will focus the discussion in this appendix to these two measures. But all results could have been stated for $\hat{\mathcal{R}}^{\star}_{n}$ instead. In particular, the training loss $\hat{\mathcal{E}}_{\ell}$ differs from the empirical risk $\hat{\mathcal{R}}^{\star}_{n}$ by the regularisation term. \paragraph{Note on scalings --} Although the model above is well defined for any scaling, in the following we focus in the case in which the means are covariance satisfy: \begin{align} ||\bm{\mu}_{c}||_{2}^{2} = O(1), && \tr\bm{\Sigma}_{c} = O(p). \label{assump:scalings} \end{align} This scaling of the mean and variance is indeed the natural one (see e.g. \cite{barkai1994statistical,lesieur2016phase,lelarge2019asymptotic,mignacco2020role,wang2021benign}) as well as the most interesting in high-dimensions. If the means have larger norm, then the problem becomes trivial (i.e. the Gaussians are trivially completely separable) while if the means are smaller it is impossible to separate them (i.e. they become trivially indistinguishable from a single Gaussian cloud). \paragraph{Ridge and ordinary least-squares classification --} Note that for the special case of the ridge classification in which $\ell(x,y) = \sfrac{1}{2}(y-x)^2$, the empirical risk minimization problem defined in eq.~\eqref{eq:app:erm} admits a closed form solution: \begin{align} \label{eq:app:ridge} \hat{\bm{\theta}} = \left(\lambda\bm{I}_{p}+\bm{X}\bm{X}^{\top}\right)^{-1}\bm{X}\bm{y} \end{align} \noindent and therefore the computation of the asymptotic training error or loss boils down to a Random Matrix Theory problem, with a solution equivalent to the one we will discuss shortly below. However, some qualitative features can be drawn just from this expression. First, note that for $\lambda>0$, the ridge estimator above will always have a non-zero training loss because of the bias introduced by the regularization term $\sfrac{1}{2}\lambda||\bm{\theta}||_{2}^{2}$. This can only be achieved in the limit of vanishing regularization $\lambda\to 0^{+}$, in which case the ridge estimator simplifies to: \begin{align} \label{eq:app:ln} \hat{\bm{\theta}}_{\text{ols}} \coloneqq (\bm{X}^{\top})^{\dagger}\bm{y} \end{align} where $\bm{X}^{\dagger}\in\mathbb{R}^{n\times p}$ is the Moore-Penrose inverse of $\bm{X}$. In the simplest case in which $\bm{X}$ is a full-rank matrix (which ultimately depends on the covariances), it can be explicitly written as: \begin{align} \bm{X}^{\dagger} \coloneqq \begin{cases} (\bm{X}^{\top}\bm{X})^{-1}\bm{X}^{\top} \text{ if } \alpha < 1\\ \bm{X}^{\top}(\bm{X}\bm{X}^{\top})^{-1} \text{ if } \alpha > 1 \end{cases} \end{align} An important property of the estimator in eq.~\eqref{eq:app:ln} is that it corresponds to the least $\ell_2$-norm interpolator when the system is underdetermined. Indeed, in the strict case when $\lambda=0$ (i.e. least-squares regression) the ERM problem in eq.~\eqref{eq:app:erm} is equivalent to inverting a linear system: \begin{align} \bm{y} = \bm{X}^{\top}\bm{\theta} \end{align} i.e. to solve a system of $n$ equations for $p$ unknowns. Again, assuming the data is full-rank\footnote{The general case is given by changing $p$ for the rank of the design matrix.}, for $\alpha = \sfrac{n}{p} < 1$ the system is \emph{underdetermined}, meaning that there are infinitely many solutions that perfectly interpolate the data. Among all of them, $\hat{\bm{\theta}}_{\text{ols}}$ is the one that has lowest $\ell_{2}$-norm. Instead, when $\alpha>1$, the system is overdetermined, and no interpolating (zero-loss) solution exists. \subsection{Gaussian mixture model with general labels} \label{sec:app:gmm} Exact asymptotics of generalized linear classification with Gaussian Mixtures in the proportional regime have been derived under different settings in the literature \cite{kini2021phase,sifaou2019phase,mai2019large,mignacco2020role,taheri2020optimality,dobriban2020provable}. Of particular interest to our work are the formulas proved in \cite{loureiro2021learning_gm} under the most general setting of a multi-class learning problem with convex losses \& penalties and generic means and covariances. In their work, the asymptotic performance of the minimiser in eq.~\eqref{eq:app:erm} was proven in the case where the labels are correlated to the mean. The formula we state in the text as theorem \ref{thm:app:asymp_error} is a straightforward adaptation of their result in the particular case of binary classification with $K$ clusters and randomized labels. \paragraph{Zero mean limit:} Of particular interest for what follows is the zero-mean limit $\bm{\mu_{c}}=\bm{0}_{p}$ of the above equations, which is simply given by: \begin{equation}\label{eq:app:sp:gmm_zeromean} \begin{split} &\begin{cases} \hat m_c = 0\\ \hat{V}_{c} = \frac{\alpha}{2} \rho_{c}\sum\limits_{y\in\{-1,+1\}}\mathbb{E}_{\xi\sim\mathcal{N}(0,1)}\left[\partial_{\omega}f_{\ell}(y, \sqrt{q_{c}}\xi, V_{c})\right]\\ \hat{q}_{c} = \frac{\alpha}{2} p_{c}\sum\limits_{y\in\{-1,+1\}}\mathbb{E}_{\xi\sim\mathcal{N}(0,1)}\left[f_{\ell}(y, \sqrt{q_{c}}\xi, V_{c})^2\right]\\ \end{cases}\\ & \begin{cases} m_c = 0\\ V_{c} = \frac{1}{p}\tr\Sigma_{c}\left(\lambda\bm{I}_{p}+\sum\limits_{c'\in\mathcal{C}}\hat{V}_{c'}\Sigma_{c'}\right)^{-1}\\ q_{c} = \frac{1}{p}\sum\limits_{c'\in\mathcal{C}}\left[\hat{q}_{c'}\tr\Sigma_{c'}\Sigma_{c}\left(\lambda\bm{I}_{p}+\sum\limits_{c''\in\mathcal{C}}\hat{V}_{c''}\Sigma_{c''}\right)^{-2}\right]\\ \end{cases} \end{split} \end{equation} \paragraph{A particular case: ridge classification --} The self-consistent equations above crucially depend on the loss function $\ell$. A case particular case of interest in this work - and for which the expressions considerable simply - is the case of ridge regression where $\ell(x,y)=\sfrac{1}{2}(x-y)^2$. In this case, the proximal can be explicitly writen as: \begin{align} \prox_{\tau \ell(\cdot, y)}(x) = \frac{x+\tau y}{1+\tau} \quad\Leftrightarrow\quad f_{\ell}(y,\omega,V) = \frac{y-\omega}{1+V} \end{align} \noindent and therefore the asymptotic training loss admits a closed-form expression: \begin{align} \label{eq:app:loss:ridge:gmm} \mathcal{E}^{\text{gmm}}_{\ell} = \sum\limits_{c\in\mathcal{C}}\rho_{c}\frac{1+q^{\star}_{c}}{2(1+V^{\star}_{c})^2} \end{align} for $(V_{c}^{\star},q^{\star}_{c})_{c\in\mathcal{C}}$ solutions of the following simplified self-consistent equations: \begin{align} &\begin{cases} \hat{V}_{c} = \frac{\alpha \rho_{c}}{1+V_{c}}\\ \hat{q}_{c} = \alpha p_{c}\frac{1+q_{c}}{(1+V_{c})^2}\\ \end{cases}, && \begin{cases} V_{c} &= \frac{1}{p}\tr\Sigma_{c}\left(\lambda\bm{I}_{p}+\sum\limits_{c'\in\mathcal{C}}\hat{V}_{c'}\Sigma_{c'}\right)^{-1}\\ q_{c} &= \frac{1}{p}\sum\limits_{c'\in\mathcal{C}}\left[\hat{q}_{c'}\tr\Sigma_{c'}\Sigma_{c}\left(\lambda\bm{I}_{p}+\sum\limits_{c''\in\mathcal{C}}\hat{V}_{c''}\Sigma_{c''}\right)^{-2}\right] \end{cases} \end{align} Note that in particular, at the fixed point, we can also express the training loss eq.~\eqref{eq:app:loss:ridge:gmm} as: \begin{align} \label{eq:app:loss:ridge:gmm-flo} \mathcal{E}^{\text{gmm}}_{\ell} = \sum\limits_{c\in\mathcal{C}} \frac{\hat q^*_c}{2 \alpha}. \end{align} \subsection{Gaussian covariate model} \label{sec:app:gcm} The asymptotic training loss for the Gaussian covariate model for a fairly general teacher-student setting was first proven in \cite{loureiro2021learning}. Although the random label limit can be obtained from this work, as discussed in Sec.~\ref{sec:app:preliminaries} the random label Gaussian covariate model can also be seen as a particular case of the general Gaussian mixture model with $K=1$ and $\bm{\mu}_{1}=\bm{0}_{p}$. Therefore, its asymptotic performance is included in the discussion above. This lead to theorem \ref{theorem:GM} in the main text. It is worth noting that, for the square loss the expressions simplify considerably. The training loss is given by: \begin{align} \mathcal{E}^{\text{gmm}}_{\ell} = \frac{1+q^{\star}}{2(1+V^{\star})^2} \end{align} where $(V^{\star},q^{\star})$ are solutions of the following simplified self-consistent equations: \begin{align} &\begin{cases} \hat{V} = \frac{\alpha}{1+V}\\ \hat{q} = \alpha \frac{1+q}{(1+V)^2}\\ \end{cases}, && \begin{cases} V &= \frac{1}{p}\tr\Sigma\left(\lambda\bm{I}_{p}+\hat{V}\Sigma\right)^{-1}\\ q &= \frac{1}{p}\hat{q}\tr\Sigma^2\left(\lambda\bm{I}_{p}+\hat{V}\Sigma\right)^{-2} \end{cases} \end{align} Since the covariance $\Sigma$ is positive-definite (and therefore invertible), in the overdetermined regime (for which the training loss is non-zero), the limit $\lambda\to 0^{+}$ can be easily taken, and the equations reduce to: \begin{align} &\begin{cases} \hat{V} = \frac{\alpha}{1+V}\\ \hat{q} = \alpha \frac{1+q}{(1+V)^2}\\ \end{cases}, && \begin{cases} V &= \frac{1}{\hat{V}}\\ q &= \frac{\hat{q}}{\hat{V}} \end{cases} \end{align} \noindent which is completely independent of the covariance matrix $\Sigma$\footnote{As discussed in Theorem \ref{theorem:cov}, the fact that the loss is independent of $\Sigma$ in this regime can be directly seen from the optimization.}. Moreover, it admits a closed-form solution given by: \begin{align} V^{\star}=q^{\star}=\frac{1}{\alpha-1}, && \hat{V}^{\star} = \hat{q}^{\star} = \alpha-1 \end{align} Therefore, the full training loss is given by: \begin{align} \lim\limits_{\lambda\to 0^{+}}\mathcal{E}_{\ell}^{\text{gcm}}(\alpha, \lambda) = \begin{cases} 0 & \text{ for } \alpha \leq 1\\ \frac{1}{2}\left(1-\frac{1}{\alpha}\right) & \text{ for } \alpha > 1 \end{cases} \end{align} \section{From Gaussian mixture to single Gaussian} \label{proof2} \subsection{Mixture of Gaussians with zero means} \label{sec:app:zeromean} We first prove Lemma \ref{lemma:mean_universality} in the main text. First, by Theorem \ref{thm:app:asymp_error}, the asymptotic loss $\mathcal{E}_{\ell}^{\text{gmm}}(\bm \rho, \bm M, \bm \Sigma^\otimes)$ (resp. $\mathcal{E}_{\ell}^{\text{gmm}}(\bm \rho, \bm 0, \bm \Sigma^\otimes)$) is a deterministic function of $(m_c^\star, q_c^\star, V_c^\star)_{c\in \cC}$, which are the \emph{unique} fixed points of \eqref{eq:app:sp:gmm} (resp \eqref{eq:app:sp:gmm_zeromean}). Since both saddle points equations differ only by setting $m_c = \hat m_c = 0$, Lemma \ref{lemma:mean_universality} is a consequence of the following: \begin{lemma} Let $(V_c^\star, q_c^\star)_{c\in\cC}$ be the solutions of Eqs. \eqref{eq:app:sp:gmm_zeromean}. Then, $(0, V_c^\star, q_c^\star)_{c \in \cC}$ satisfy the general fixed point equations of \eqref{eq:app:sp:gmm}. \end{lemma} \begin{proof} If we plug in $m_c = \hat m_c = 0$ for all $c\in \cC$, the equations for $V_c, \hat V_c, q_c, \hat q_c$ become identical in \eqref{eq:app:sp:gmm} and \eqref{eq:app:sp:gmm_zeromean}. It is also easy to check that $\hat m_c = 0$ for all $c$ implies that $m_c = 0$; what remains is to show that the last equation holds, i.e. \begin{equation} \frac{\alpha}{2} \rho_{c}\sum\limits_{y\in\{-1,+1\}}\mathbb{E}_{\xi\sim\mathcal{N}(0,1)}\left[f_{\ell}(y, \sqrt{q_{c}^\star}\xi, V_{c}^\star)\right] = 0. \end{equation} Define the function \[ g(\omega, V) = f_{\ell}(-1,\omega,V) + f_{\ell}(+1,\omega,V), \] so that \[ \hat m_c^\star \propto \dE_{\xi\sim\cN(0, 1)}\left[ g(\sqrt{q_c^\star}\xi, V_c^\star)\right]\] We shall show that $g$ is odd in $\omega$; since $\xi$ is centered, the lemma will be proven. To do so, we shall show that \[ f_\ell(y, \omega, V) = -f_\ell(-y, -\omega, V), \] for all $y \in \{-1, +1\}$, $\omega\in \dR$, and $V\in \dR$. By definition, we have \[ f_{\ell}(y,\omega,V) = V^{-1}\left(\prox_{V\ell(\cdot, y)}(\omega)-\omega\right), \] and the linear term in $\omega$ is immediate. For the proximal operator, we use the symmetry of $\ell$ and write \begin{align*} \prox_{V\ell(\cdot, y)}(\omega) &= \argmin_{z\in\cC_1} \left[\frac{1}{2\tau}(z-\omega)^2+\ell(z,y)\right] \\ &= \argmin_{z\in\cC_1} \left[\frac{1}{2\tau}((-z)-(-\omega))^2+\ell(-z,-y)\right] \\ &= -\prox_{V\ell(\cdot, -y)}(-\omega), \end{align*} which concludes the proof. \end{proof} \subsection{Strong universality of ordinary least-squares} \label{sec:app:uni} We now have all elements we need to establish the universality of the ordinary least-squares estimator stated in Theorem \ref{thm:ols} in the main. Our starting point is the ordinary least-squares problem for the Gaussian Mixture Model in the overdetermined regime $\alpha > 1$. In this cave, the training loss is given by eq.~\eqref{eq:app:loss:ridge:gmm} with $(V^{\star}_{c},q^{\star}_{c})_{c\in\mathcal{C}}$ unique solutions of the following equations: \begin{align} &\begin{cases} \hat{V}_{c} = \frac{\alpha \rho_{c}}{1+V_{c}}\\ \hat{q}_{c} = \alpha \rho_{c}\frac{1+q_{c}}{(1+V_{c})^2}\\ \end{cases}, && \begin{cases} V_{c} &= \frac{1}{d}\tr\Sigma_{c}\left(\sum\limits_{c'\in\mathcal{C}}\hat{V}_{c'}\Sigma_{c'}\right)^{-1}\\ q_{c} &= \frac{1}{d}\sum\limits_{c'\in\mathcal{C}}\left[\hat{q}_{c'}\tr\Sigma_{c'}\Sigma_{c}\left(\sum\limits_{c''\in\mathcal{C}}\hat{V}_{c''}\Sigma_{c''}\right)^{-2}\right] \end{cases} \label{ref:flo0} \end{align} We shall now show how to reduce these equation to a simple analytical formula, equivalent to the one of a single Gaussian. Combining the equations for $\hat{V}_{c}$ and $V_{c}$, one sees that the fixed point must satisfy the following identity: \begin{align} \sum\limits_{c\in\mathcal{C}}\hat{V}^\star_{c}V^\star_{c} = 1 \end{align} Similarly, multiplying the equation for $q_{c}$ by $\hat{V}_{c}$, summing over $c\in\mathcal{C}$ and doing the same for the equation for $\hat{q}_{c}$ with $V_{c}$, we get a second identity satisfied by the fixed-point: \begin{align} \sum\limits_{c\in\mathcal{C}}\left(\hat{V}_{c}^\star q_{c}^\star-V_{c}^\star\hat{q}_{c}^\star\right) = 0 \end{align} Note that, at this point these relations could have been derived for any loss functions. For the specific case of the square loss, further substituting the hat variables, these conditions are equivalent to: \begin{align} \sum\limits_{c\in\mathcal{C}}\rho_{c}\frac{V_{c}^\star}{1+V_{c}^\star} = \frac{1}{\alpha} \label{ref:flo1} \\ \sum\limits_{c\in\mathcal{C}}\rho_{c}\frac{V_{c}-q_{c}}{(1+V_{c})^{2}} = 0 \label{ref:flo2} \end{align} We thus find, combining the eq.~\eqref{ref:flo0} for $\hat q_c$ with eq.~\eqref{ref:flo2} \begin{align} \sum\limits_{c\in\mathcal{C}} \hat q_c^\star = \sum\limits_{c\in\mathcal{C}} \alpha \rho_c\frac{1+V^\star_{c}}{(1+V^\star_{c})^2} = \sum\limits_{c\in\mathcal{C}} \alpha \rho_c\frac 1{1+V^\star_{c}} \label{ref:flo3} \end{align} Our goal is to evaluate the loss at the fixed point, which is given by eq.~\eqref{eq:app:loss:ridge:gmm-flo}: \begin{align} \mathcal{E}^{\text{gmm}}_{\ell} = \sum\limits_{c\in\mathcal{C}} \frac{\hat q^\star_c}{2 \alpha} \end{align} Combining this definition with eqs.~\eqref{ref:flo2} and \eqref{ref:flo3}, we find that \begin{align} 2\mathcal{E}^{\text{gmm}}_{\ell} + \frac 1{\alpha} = \sum\limits_{c\in\mathcal{C}} \rho_c\frac 1{1+V^\star_{c}} + \sum\limits_{c\in\mathcal{C}}\rho_c\frac{V^\star_{c}}{1+V^\star_{c}} = 1 \end{align} so that finally, we reach the promised result: \begin{align} \lim\limits_{\lambda\to 0^{+}} \mathcal{E}^{\text{gmm}}_{\ell}(\alpha,\lambda,K) = \frac{1}{2}\left(1-\frac{1}{\alpha}\right)_{+} = \lim\limits_{\lambda\to 0^{+}}\mathcal{E}_{\ell}^{\text{gcm}}(\alpha,\lambda) \end{align} as claimed in Theorem \ref{thm:ols} in the main. \section{Numerical Simulations} \label{numerical_simulations} In this section, we provide further details concerning the protocol we used to perform the numerical simulations, which corroborate the theoretical results exemplified in the main manuscript. All codes are publicly available on the GitHub repository associated to the current paper at \href{https://github.com/IdePHICS/RandomLabelsUniversality}{https://github.com/IdePHICS/RandomLabelsUniversality}. For concreteness and completeness, we illustrate these simulations with yet again a different case with respect to what was already presented in the main text in Fig. \ref{fig:tiny_imagenet_finite_reg}, where we use a smaller version of the well-known Imagenet benchmark \cite{deng2009imagenet}. It is made of $100.000$ natural images, downsampled to $64 \times 64$ pixels each and grouped into $200$ different classes. \begin{figure}[ht] \begin{center} \centerline{\includegraphics[width=500pt]{Figures/tiny-imagenet.pdf}} \caption{Numerical simulations of universality: As in Fig. \ref{fig:all_datasets_finite_reg}, this figure shows the training loss as a function of the number of samples $n$ per dimension $p$ at various values of $\lambda$ for another data set we used here for completeness. Here we used a grayscale tiny-Imagenet pre-processed with Gaussian random features and $\mbox{tanh}$ non-linearity. In the left panel the square loss, in the middle panel the logistic loss and in the right panel the hinge loss. The coloured dots refer to numerical simulations while the black solid lines correspond to the theoretical prediction of single Gaussian with corresponding input covariance matrices. The numerical simulations are averaged over $10$ different realizations.} \label{fig:tiny_imagenet_finite_reg} \end{center} \vskip -0.2in \end{figure} In all the numerical experiments on real datasets shown so far, we have both normalized and then pre-processed the datasets with either random features or wavelet-scattering transform. Fig. \ref{fig:all_datasets_finite_reg_direct} compares instead the predictions of the Gaussian theory with respect to the numerical simulations on MNIST, fashion-MNIST, CIFAR10 and tiny ImageNet when no pre-processing is applied. As can be seen, despite the overall quite good agreement between theory and numerical experiments, we start observing some (very) small deviations from the Gaussian predictions. Indeed, as it is shown in sec. \ref{evaluation_data_covariance}, the covariance matrices associated to the different modes of the underlying real data distribution are, in this case, more heterogeneous than the ones observed when a pre-processing stage is applied. This is consistent with the homogeneous assumption in Theorem \ref{thm:Gauss_universality} and implying Gaussian Universality. \begin{figure}[t!] \begin{center} \centerline{\includegraphics[width=500pt]{Figures/finite_reg_direct.pdf}} \caption{This figure shows the training loss as a function of the number of samples $n$ per dimension $p$ at finite regularization $\lambda$. In the top panel the square loss, and in the bottom panel the hinge loss. The first column refers to MNIST, the second column corresponds to fashion-MNIST, the third column corresponds to CIFAR10 in grayscale, the fourth column corresponds to tiny ImageNet in grayscale. Black solid lines correspond to the outcome of the replica calculation, obtained by assigning to $\Sigma$ the covariance matrix of each dataset. The coloured dots correspond to the simulations for different values of $\lambda$, as specified in the plot legend. Simulations are averaged over $10$ samples \& the error bars are not visible at the plot scale.} \label{fig:all_datasets_finite_reg_direct} \end{center} \vskip -0.4in \end{figure} \paragraph{Dataset generation.} As we have seen in the main manuscript, we basically deal with three different types of datasets. Two of them are synthetic datasets and correspond to i.i.d Gaussian input data-points and Gaussian Mixtures. The remaining one accounts for real datasets, such as MNIST \cite{deng2012mnist}, fashion-MNIST \cite{xiao2017/online} and CIFAR10 \cite{xiao2017/online} in grayscale, pre-processed with either random feature maps \cite{rahimi2007random} or through wavelet scattering transform \cite{bruna2013invariant}. The procedure used to generate these kinds of datasets is exemplified in sec. \ref{sec:numerics}. For the sake of clarity, we summarized it through the pseudo code in algorithm \ref{alg:app:dataset}. \begin{algorithm}[H] \caption{Generating dataset $\mathcal{D} = \{\bm{x}^{\mu}, y^{\mu}\}_{\mu=1}^{n}$} \label{alg:app:dataset} \begin{algorithmic} \STATE {\bfseries Input:} Integer $p$, flag \emph{dataset}, matrix $F \in \mathbb{R}^{d\times p}$ of random Gaussian features \STATE {\bfseries If} the \emph{dataset type} is i.i.d. Gaussian: \STATE \hspace{2mm} Sample each input data-point as $\bm{x}^{\mu} \sim \mathcal{N}\left(0, \bm{I} \right)$, with $\bm{I} \in \mathbb{R}^{p\times p}$ the identity matrix; \STATE {\bfseries Else if} the \emph{dataset type} is a Gaussian Mixture: \STATE \hspace{2mm} Sample each input data-point as $\bm{x}^{\mu} \sim \sum_{k=1}^K \rho_k \ \mathcal{N}\left(\bm{\mu}_k, \Sigma_k \right)$, with $\bm{\mu}_{k}$ being the centroid of \STATE \hspace{2mm} the k-th cluster and $\Sigma_k$ the corresponding covariance matrix; \STATE {\bfseries Else if} the \emph{dataset type} is a real dataset pre-processed with random gaussian features: \STATE \hspace{2mm} Load the real dataset samples $\bm{z}^{\mu} \ \forall \mu = 1,...,n$ with Pytorch dataloaders; \STATE \hspace{2mm} Assign $\bm{x}^{\mu} \rightarrow \sigma \left( \bm{z}^{\mu} F\right)$; \STATE {\bfseries Else if} the \emph{dataset type} is a real dataset pre-processed with wavelet scattering: \STATE \hspace{2mm} Load the real dataset samples $\bm{z}^{\mu} \ \forall \mu = 1,...,n$; \STATE \hspace{2mm} Apply wavelet scattering transform on $\bm{z}^{\mu}$; \STATE Sample the labels according to the Rademacher distribution as $y^{\mu} \sim \frac{1}{2}\left(\delta_{+1} + \delta_{-1} \right)$ \STATE {\bfseries Return: $\mathcal{D} = \{\bm{x}^{\mu}, y^{\mu}\}_{\mu=1}^{n}$} \end{algorithmic} \end{algorithm} The real datasets are loaded through Pytorch dataloaders \cite{NEURIPS2019_9015}. In particular, the dataloader of CIFAR10 includes a grayscale transformation of the dataset in order to reduce to one the number of input channels of the RGB colour encoding scheme. The wavelet scattering transform is instead implemented by means of the Kymatio Python library \cite{andreux2020kymatio}. Note that, with the purpose of speeding up the realization of the learning curves and to reduce fluctuations, the pre-processed real datasets are generated once for all through algorithm \ref{alg:app:dataset} and then stored in a hdf5 file. \paragraph{Learning phase.} Given the dataset generated as in algorithm \ref{alg:app:dataset}, the aim is to infer the estimator $\bm{\theta}$ minimizing the empirical risk as in eq. \eqref{eq:def_min_problem_main} of the main manuscript. In the present work we consider three distinct kinds of loss functions: \begin{enumerate}[wide=2pt] \item \textbf{Square Loss.} In this specific case, the goal is to solve the following optimization problem: \begin{equation} \label{eq:app:def_min_problem_square_loss} \widehat\cR_n^*(\bm X, \bm y) = \inf_{\bm \theta \in \cS_p} \frac{1}{2n}\sum_{\mu=1}^n (\bm \theta^\top \bm x_\mu - y_\mu)^2 + \frac{\lambda}{2} \vert\vert \bm{\bm \theta} \vert\vert_2^2 , \end{equation} The estimator can be here determined through the Moore-Penrose inverse as it follows, without relying on any learning algorithm: \begin{equation} \bm \theta = \begin{cases} \left( \bm{X}^{\top}\bm{X} + \lambda \bm{I}_{p} \right)^{-1} \bm{X}^{\top} \bm{y} , & \text{if} \ n > p\\\\ \bm{X}^{\top}\left(\bm{X}\bm{X}^{\top} + \lambda \bm{I}_{n} \right)^{-1} \bm{y} , & \text{if} \ p > n \end{cases} \end{equation} \item \textbf{Logistic Loss.} In this specific case, the goal is to solve the following optimization problem: \begin{equation} \label{eq:app:def_min_problem_logistic_loss} \widehat\cR_n^*(\bm X, \bm y) = \inf_{\bm \theta \in \cS_p} \frac{1}{n}\sum_{\mu=1}^n \mbox{log}\left(1 + \mbox{exp}(-y_\mu\bm \theta^\top \bm x_\mu)\right) + \frac{\lambda}{2} \vert\vert \bm{\bm \theta} \vert\vert_2^2 , \end{equation} Since the estimator of logistic regression can not be determined through an explicit closed formula, we here made use of the \emph{lbgfs} solver with \emph{penalty} set to $\ell_2$. This optimizer corresponds to a Gradient Descent (GD)-like second order optimization method and it is implemented in the LogisticRegression class of the Scikit-Learn Python library \cite{scikit-learn}. The GD algorithm stops either if a maximum number of iterations has been reached or if the maximum component of the gradient goes below a certain threshold. We fixed this tolerance to $1e-5$ and the maximum number of iterations to $1e4$. \item{\textbf{Hinge Loss.}} In this specific case, the goal is to solve the following optimization problem: \begin{equation} \label{eq:app:def_min_problem_hinge_loss} \widehat\cR_n^*(\bm X, \bm y) = \inf_{\bm \theta \in \cS_p} \frac{1}{n}\sum_{\mu=1}^n \mbox{max}\left(0, 1 - y_\mu\bm \theta^\top \bm x_\mu)\right) + \frac{\lambda}{2} \vert\vert \bm{\bm \theta} \vert\vert_2^2 , \end{equation} As for logistic regression, even in this case we can not rely on any explicit formula for the estimator, it has rather to be inferred by means of learning algorithm. In particular, for the simulations at finite regularization strength, we made use of the LinearSVC class provided by Scikit-Learn \cite{scikit-learn} and implementing the Support Vector Classification (SVC) with linear kernels and $L_2$ regularization if \emph{penalty} is set to $\ell_2$. In this case, we set the tolerance of convergence to $1e-5$ and the maximum number of iterations to $5e5$. Unfortunately, LinearSVC struggles to converge for vanishing regularization strengths. Therefore, we made use of CVXPY \cite{agrawal2018rewriting,diamond2016cvxpy} in order to perform the simulations at $\lambda = 1e-15$. CVXPY is an open-source Python-embedded modeling language for convex optimization problems. We set the \emph{solver} option to None, in this way CVXPY chose automatically the most specialized solver for the optimization problem type. While being slower than LinearSVC, CVXPY guarantees convergence at vanishing regularization strengths. \end{enumerate} At the end of the training process, we evaluate the training loss $\ell$ on the minimizer of the corresponding empirical risk minimization problem. To get the learning curves, we then repeat the whole process for a specified range of $n/p$ and for a certain number of different realization of the learning problem, as exemplified in algorithm \ref{alg:app:learning}. \begin{algorithm}[H] \caption{Learning curve} \label{alg:app:learning} \begin{algorithmic} \STATE {\bfseries Input:} range of $n/p$, flag \emph{dataset type}, flag \emph{which estimator} \STATE {\bfseries For} $seed$ in a specified number of seeds {\bfseries do}: \STATE \hspace{2mm} {\bfseries For} $n/p$ in a specified range {\bfseries do}: \STATE \hspace{6mm} Choose the dataset according to \emph{dataset type}; \STATE \hspace{6mm} Compute the estimator according to the desired optimization problem as in (i)-(iii); \STATE \hspace{6mm} Compute the training loss $\ell$ at fixed $n/p$; \STATE \hspace{2mm} Update the mean train loss and its standard deviation with the new contribution from the current seed. \STATE {\bfseries Return:} Mean train loss and standard deviation as a function of $n/p$. \end{algorithmic} \end{algorithm} \section{Empirical evidence of the homogenity assumption} \label{evaluation_data_covariance} As seen in the counter example illustrated in Fig.~\ref{fig:no_match_reg_only_three_blocks}, in the case of very heterogeneous Gaussian Mixtures we can observe small deviations from universality both at zero and finite regularization. However, this disagreement between single Gaussian and Gaussian Mixtures does not appear in the experiments with real datasets of Fig.~\ref{fig:all_datasets_zero_reg} and Fig.~\ref{fig:all_datasets_finite_reg}, despite their certainly multi-modal and mode-heterogeneity nature. First, we must acknowledge that deviations are, in general, observed to be small with respect to the homogeneous case, and that the data presented in Fig.~\ref{fig:no_match_reg_only_three_blocks} were carefully tuned so that the difference is visible. Additionally, in this section, we also empirically demonstrate the similarity among the empirical correlation matrices of the various modes characterizing real dataset distributions. Fig.~\ref{fig:true_cifar10_cov} shows the correlation matrix of all grayscale CIFAR-10 images depicting airplanes (leftmost), automobiles (middle) and trucks (rightmost) respectively. The point we wish to convey in this plot is that, despite the fact that there exists some modes of the CIFAR-10 empirical distribution which display a consistently different correlation structure (airplane mode) with respect to the other modes (automobile and truck mode), there exists some others which look like more similar among each other (automobile and truck mode). \begin{figure}[ht] \begin{center} \centerline{\includegraphics[width=380pt]{Figures/true_cifar10_cov.pdf}} \vspace{-10mm} \caption{Input data correlation matrix of grayscale CIFAR10, conditioned on the true labels, e.g. airplane (leftmost), automobile (middle), truck (rightmost). Lighter colors refer to stronger correlation. } \label{fig:true_cifar10_cov} \end{center} \vskip -0.2in \end{figure} As can be seen in Fig.~\ref{fig:rf_cifar10_cov} and Fig.~\ref{fig:ws_cifar10_cov}, the structure similarity of the covariance matrices of the various mode is further enhanced when pre-processing grayscale CIFAR-10 with both Gaussian random feature maps and wavelet scattering transforms, at the point that even the less similar modes in the raw dataset conform to the others (see airplane mode). \begin{figure}[ht] \begin{center} \centerline{\includegraphics[width=380pt]{Figures/rf_cifar10_cov.pdf}} \vspace{-10mm} \caption{Input data correlation matrix of grayscale CIFAR10 pre-processed with Gaussian random features and $\mbox{erf}$ non-linearity. The correlation matrices are conditioned on the true labels, e.g. airplane (leftmost), automobile (middle), truck (rightmost). Lighter colors refer to stronger correlation. } \label{fig:rf_cifar10_cov} \end{center} \vskip -0.2in \end{figure} \begin{figure}[ht] \begin{center} \centerline{\includegraphics[width=380pt]{Figures/ws_cifar10_cov.pdf}} \vspace{-10mm} \caption{Input data correlation matrix of grayscale CIFAR10 pre-processed with wavelet scattering transform. The correlation matrices are conditioned on the true labels, e.g. airplane (leftmost), automobile (middle), truck (rightmost). Lighter colors refer to stronger correlation.} \label{fig:ws_cifar10_cov} \end{center} \vskip -0.2in \end{figure} \section{Appendix} \label{appendix} \subsection{Replica Analysis} In this section, we provide a detailed derivation of the analytical results in sec. (\ref{section3}). The calculation is tackled by means of the replica method from statistical physics. \subsubsection{Boltzmann-Gibbs Formulation} As we have seen in the main manuscript, the replica method allows to re-frame the learning problem in eq. (\ref{eq:optimization_w}) as a dynamical and exploratory process across the weight space. At equilibrium, we assume the weights to be distributed according to the Boltzmann-Gibbs measure, where the role of the Hamiltonian is actually played by the loss function defined on the dataset $\mathcal{D}$ in sec. (\ref{Model}): \begin{align} \pi_{\beta}\left( \mathbf{w}, \mathcal{D} \right) &= \frac{1}{\mathcal{Z}_{\beta}}\prod_{\mu = 1}^n e^{-\beta \ell\left(y_{\mu}, f_{\mathbf{w}}\left(\mathbf{x}_{\mu}\right) \right) }\prod_{i = 1}^p e^{ -\frac{\beta\lambda}{2} w^2_i}=\frac{P_{y}\left(Y|X,\mathbf{w} \right) P_w\left(\mathbf{w} \right)}{\mathcal{Z}_{\beta}} \notag\\ &=\frac{1}{\mathcal{Z}_\beta} e^{-\beta\left( \sum_{\mu = 1}^n \ell\left(y_{\mu}, f_{\mathbf{w}}\left(\mathbf{x}_{\mu}\right) + \frac{\lambda}{2} \vert\vert \mathbf{w} \vert\vert_2^2\right) \right)} \end{align} with $X \in \mathbb{R}^{n\times p}$ being the matrix of all input data-points and $Y \in \mathbb{R}^n$ being the vector containing all the $n$ labels. The Boltzmann-Gibbs measure can thus be equivalently interpreted as the posterior of the Bayesian inference problem in eq. (\ref{eq:optimization_w}), whose likelihood and prior are defined by the two probability distributions $P_y\left( \cdot \right)$ and $P_w\left( \cdot \right)$ respectively. In the limit of zero temperature or, in other words, for $\beta \rightarrow \infty$, the Boltzmann-Gibbs distribution concentrates precisely on the minima of the training loss function, which are nothing but the solutions of the optimization problem in eq.(\ref{eq:optimization_w}). Re-framing a learning problem in terms of statistical physics does not seem to be a great advantage considering that sampling from the Boltzmann-Gibbs measure is known to be not feasible in the high-dimensional limit, i.e. $n,p \rightarrow \infty$ while $\alpha = p/n$ fixed. This is where the replica method comes into the game. As we have seen in the main manuscript, the replica trick allows us to compute the typical value over different realization of the dataset $\mathcal{D}$ of the free-energy density, namely: \begin{equation} \mathcal{F}_{\beta} = -\underset{p \rightarrow \infty}{\mbox{lim}}\frac{1}{p} \mathbb{E}_{\mathcal{D}}\mbox{log}\mathcal{Z}_{\beta} \end{equation} From this quantity, all the other quantities of interest, including the training loss can be easily computed by following the replica recipe. In particular, the replica method allows us to compute the following parameters: \begin{equation} Q_\star = \frac{\mathbf{\hat{w}}^t \Sigma \mathbf{\hat{w}}}{p} \hspace{10mm} M_\star = \frac{\mathbf{\hat{w}}^t \mathbf{\mu}}{p} \end{equation} with $\mathbf{\hat{w}}$ being the solution of the optimization problem in eq.(\ref{eq:optimization_w}) or, equivalently, the ground states of the Boltzmann-Gibbs measure $\pi_{\beta}$. As we will see, the training loss, which in principle is an high-dimensional quantity, can be easily expressed in terms of these simple scalar quantities $Q_\star$ and $M_\star$. \subsubsection{Replica Calculation} The replica calculation is based on the fact that, the free energy density in eq. (\ref{eq:free_energy_density-beta}) is a \emph{self-averaging} random variable, meaning that, in the thermodynamic limit its distribution is sharply peaked around its typical value over different realization of the training set. As we have seen in the main manuscript, the typical free-energy density can be computed by means of the replica method through the realization of $r \in \mathbb{N}$ distinct and independent copies of the same learning system: \begin{equation} \mathcal{F}_{\beta} = -\underset{r\rightarrow 0^+}{\lim}\frac{d}{dr}\underset{p\rightarrow \infty}{\lim}\left[ \frac{\mathbb{E}_{\mathcal{D}} \mathcal{Z}^r_{\beta}\left(\mathcal{D}\right)}{p} \right] \end{equation} The averaged replicated partition function is therefore the starting point of the replica calculation. Re-writing it explicitly we have: \begin{equation} \mathbb{E}_{\mathcal{D}} \mathcal{Z}^r_{\beta}\left(\mathcal{D}\right) = \mathbb{E}_{\mathcal{D}} \left[\int \prod_{a = 1}^r d\mathbf{w}^a \prod_{a = 1}^r P_w\left( \mathbf{w}^a\right) \prod_{\mu = 1}^n \prod_{a = 1}^r P_y\left(y^{\mu}|\frac{\mathbf{x}_{\mu} \cdot \mathbf{w}^a}{\sqrt{p}} \right)\right] \end{equation} \paragraph{Average over the training dataset} The first step of the replica calculation recipe is the average over the training dataset. To this end, we first define the pre-activations as: \begin{equation} h_a^{\mu} = \frac{\mathbf{x}_{\mu} \cdot \mathbf{w}^a}{\sqrt{p}} \end{equation} We then express this definition in terms of the Dirac-delta and its integral representation as: \begin{equation} 1 \propto \int \prod_{a = 1}^r\prod_{\mu = 1}^n dh^{\mu}_a \ \delta\left( h_a^{\mu} - \frac{\mathbf{x}_{\mu} \cdot \mathbf{w}^a}{\sqrt{p}}\right) = \int \prod_{a = 1}^r\prod_{\mu = 1}^n \frac{dh^{\mu}_a d\hat{h}^{\mu}_a}{2\pi} \ \mbox{exp}\left(i\hat{h}_{a}^{\mu}\left( h_a^{\mu} - \frac{\mathbf{x}_{\mu} \cdot \mathbf{w}^a}{\sqrt{p}}\right)\right) \end{equation} Finally we insert this factor one into the expression of the replicated partition function, thus getting: \begin{equation} \begin{split} \mathbb{E}_{\mathcal{D}} \mathcal{Z}^r_{\beta}\left(\mathcal{D}\right) &= \int \prod_{a = 1}^r d\mathbf{w}^a \prod_{a = 1}^r P\left( \mathbf{w}^a\right) \prod_{\mu = 1}^n \prod_{a = 1}^r \frac{dh^{\mu}_a d\hat{h}^{\mu}_a}{2\pi} \mbox{exp}\left(i\sum_{a=1}^r\hat{h}_{\mu}^a h_{\mu}^a \right) \times\\ &\times \prod_{\mu = 1}^n \mathbb{E}_{y_\mu} \left[\prod_{a = 1}^r P_y\left(y_{\mu}|h_{\mu}^a \right)\mathbb{E}_{\mathbf{x}_\mu}\left[\mbox{exp}\left(-i\sum_{a = 1}^r\hat{h}_{\mu}^a \frac{\mathbf{x}_{\mu} \cdot \mathbf{w}^a}{\sqrt{p}}\right)\right]\right] \end{split} \end{equation} In the high-dimensional limit, the average over the input data-points $\{ \mathbf{x}_{\mu}\}_{\mu = 1}^n$ can be performed by Taylor-expansion of the exponential up to the second order in $p$: \begin{equation} \begin{split} \mathbb{E}_{\mathbf{x}_\mu}\left[\mbox{exp}\left(-i\sum_{a = 1}^r\hat{h}_{\mu}^a \frac{\mathbf{x}_{\mu} \cdot \mathbf{w}^a}{\sqrt{p}}\right)\right] &= 1 -i\sum_{a = 1}^r\hat{h}_{\mu}^a \frac{\mathbb{E}_{\mathbf{x}_{\mu}}\left[\mathbf{x}_{\mu}\right] \cdot \mathbf{w}^a}{\sqrt{p}} - \frac{1}{2} \sum_{a,b = 1}^r \frac{\left(\mathbf{w^a}\right)^t \mathbb{E}_{\mathbf{x}_{\mu}}\left[\mathbf{x}^t_{\mu}\mathbf{x}_{\mu}\right]\mathbf{w}^b}{p}\\ & = \mbox{exp}\left( -i\sum_{a = 1}^r \frac{\mathbf{m} \cdot \mathbf{w}^a}{p}\hat{h}_{\mu}^a - \frac{1}{2} \sum_{a,b = 1}^r \frac{\left(\mathbf{w^a}\right)^t \Sigma \mathbf{w}^b}{p}\hat{h}_{\mu}^a \hat{h}_{\mu}^b\right) \end{split} \end{equation} where we have defined $\mathbf{m}/\sqrt{p}$ as the mean of the input data-points and $\Sigma$ as the corresponding covariance matrix. Note that, while performing the average over the inputs we did not make any particular assumption on the input-data distribution except that it has to be characterized by a well-defined mean and covariance. This is for instance the case of the Gaussian Covariate model or the Gaussian Mixtures model defined in sec. (\ref{Model}) of the main manuscript. Having avergaed over the input data-points, the replicated partition function turns out to be given by: \begin{equation} \begin{split} \mathbb{E}_{\mathcal{D}} \mathcal{Z}^r_{\beta}\left(\mathcal{D}\right) &= \int \prod_{a = 1}^r d\mathbf{w}^a \prod_{a = 1}^r P_w\left( \mathbf{w}^a\right) \prod_{\mu = 1}^n \prod_{a = 1}^r \frac{dh^{\mu}_a d\hat{h}^{\mu}_a}{2\pi} \mbox{exp}\left(i\sum_{a=1}^r\hat{h}_{\mu}^a h_{\mu}^a \right) \times\\ &\times \prod_{\mu = 1}^n \mathbb{E}_{y_\mu} \left[\prod_{a = 1}^r P_y\left(y_{\mu}|h_{\mu}^a \right)\mbox{exp}\left( -i\sum_{a = 1}^r \frac{\mathbf{m} \cdot \mathbf{w}^a}{p}\hat{h}_{\mu}^a - \frac{1}{2} \sum_{a,b = 1}^r \frac{\left(\mathbf{w}^a\right)^t \Sigma \mathbf{w}^b}{p}\hat{h}_{\mu}^a \hat{h}_{\mu}^b\right)\right] \end{split} \end{equation} \paragraph{Introduction of the overlap parameters.} As a direct consequence of the averaging process over the input data-points, the replicas are now interacting. To proceed further in the calculation and work in the direction of decoupling the different copies of the learning system, we can once again introduce the definition of the following overlap parameters: \begin{equation} M^a = \frac{\mathbf{m} \cdot \mathbf{w}^a}{p} \hspace{15mm} Q^{ab} = \frac{\left(\mathbf{w}^a \right)^t \Sigma \mathbf{w}^b}{p} \end{equation} by means of Dirac-deltas and their integral representation as: \begin{equation} \begin{split} 1 &\propto \int \prod_{a = 1}^r dM^a \prod_{a = 1}^r \delta\left( M^a - \frac{\mathbf{m} \cdot \mathbf{w}^a}{p}\right) \prod_{a \leq b} dQ^{ab} \prod_{a \leq b} \delta\left( Q^{ab} - \frac{\left(\mathbf{w}^a \right)^t \Sigma \mathbf{w}^b}{p} \right)\\ &= \int \prod_{a = 1}^r \frac{dM^a d\hat{M}^a}{2\pi} \ \mbox{exp}\left(i\sum_{a = 1}^r\hat{M}^{a}\left( M^a - \frac{\mathbf{m} \cdot \mathbf{w}^a}{p}\right)\right)\int \prod_{a \leq b}^r \frac{dQ^{ab} d\hat{Q}^{ab}}{2\pi} \ \mbox{exp}\left(i\sum_{a = 1}^r\hat{Q}^{ab}\left( Q^{ab} - \frac{\left(\mathbf{w}^a \right)^t \Sigma \mathbf{w}^b}{p}\right)\right) \end{split} \end{equation} As before, we can insert this factor one into the expression of the replicated partition function, thus getting: \begin{equation} \mathbb{E}_{\mathcal{D}} \mathcal{Z}^r_{\beta}\left(\mathcal{D}\right) = \int \prod_{a = 1}^r \frac{dM^a d\hat{M}^a}{2\pi} \int \prod_{a \leq b}^r \frac{dQ^{ab} d\hat{Q}^{ab}}{2\pi} \mbox{exp}\left(p \Psi_\beta^{(r)}\left( \{ M^a, \hat{M}^a \}_{a = 1}^r, \{Q^{ab}, \hat{Q}^{ab} \}_{a\leq b} \right) \right)\label{eq:replica_saddle_point_appendix} \end{equation} with the potential $\Psi_\beta^{(r)}\left( \cdot \right)$ given by the sum of the following three terms: \begin{equation} \begin{split} &\psi = i\sum_{a = 1}^r \hat{M}^a M^a + i\sum_{a\leq b} \hat{Q}^{ab} Q^{ab}\\ &\psi_w = \frac{1}{p}\mbox{log} \int \prod_{a = 1}^r d\mathbf{w}^a \prod_{a = 1}^r P_w\left( \mathbf{w}^a \right) \mbox{exp}\left(-i\sum_{a = 1}^r \hat{M}^a \mathbf{m}\cdot \mathbf{w}^a - i\sum_{a\leq b} \hat{Q}^{ab} \left( \mathbf{w}^a \right)^t \Sigma \mathbf{w}^b \right)\\ &\psi_y = \alpha \mbox{log}\ \mathbb{E}_y \left[ \int \prod_{a = 1}^r \frac{dh^a d\hat{h}_a}{2\pi} \mbox{exp}\left(i\sum_{a = 1}^r \hat{h}_a h_a\right) \prod_{a = 1}^r P_y\left(y|h_a \right) \mbox{exp}\left( -i\sum_{a = 1}^r \hat{h}_a M^a - \frac{1}{2}\sum_{a,b = 1}^r \hat{h}_a \hat{h}_b Q^{ab} \right)\right] \end{split}\label{eq:replica_potentials_appendix} \end{equation} The term $\psi_w \left( \cdot \right)$ is often called prior channel or entropic potential because it encodes all the prior information of the weights of the learning model through the prior $P_w\left( \cdot \right)$, while $\psi_y\left( \cdot \right)$ is often called output channel or energetic potential because it incorporates all the information relative to both the labels and the training loss by mean of the likelihood $P_y\left( \cdot \right)$. All the above steps have lead to express the averaged replicated function as a saddle-point integral with respect to the input dimension $p$. In the limit $r\rightarrow 0^+$ and $p\rightarrow\infty$, this directly leads to eq. (\ref{eq:saddle-point_integrals_before_replica_ansatz}) in the main manuscript. The drawback of this operation is that we have now to deal with $r$ interacting copies of the learning system. To proceed further into the calculation, we need to postulate a specific replica structure. In the following, we will thus investigate two possible assumptions: the so-called \emph{replica-symmetry} (RS) assumption and the so-called \emph{one-step replica symmetry breaking} (1-RSB) assumption. \paragraph{Replica Symmetry} As shown in the main manuscript, since all replicas have been introduced independently from each other with no specific differences among them, it seems natural to assume that replicas should all play the same role. This inevitably leads to assume that the overlap paramaters and their hats counterparts should not depend on the specific replica index. In particular we assume: \begin{equation} \begin{split} Q^{ab}&= \begin{cases} R & \text{if}\ a = b \\ Q & \text{otherwise} \end{cases} \hspace{15mm} -i\hat{Q}^{ab}= \begin{cases} -\frac{1}{2}\hat{R} & \text{if}\ a = b \\ \hat{Q} & \text{otherwise} \end{cases}\\\\ M^a &= M \hspace{10mm} \forall a \hspace{22mm} -i\hat{M}^a = \hat{M} \\ \end{split} \end{equation} Applying this ansatz to eq. (\ref{eq:replica_saddle_point_appendix})-(\ref{eq:replica_potentials_appendix}), after some steps of algebra we then get the following expression for the three-terms of the $\Psi_{\beta}^{(r)} \left( \cdot \right)$ potential. Concerning the first term we have: \begin{equation} \psi = -rM\hat{M} + \frac{r}{2}R\hat{R} - \frac{1}{2}r\left( r - 1\right) Q\hat{Q} \end{equation} Concerning the prior term we instead have: \begin{equation} \psi_w = \frac{1}{p}\mbox{log} \int \prod_{a = 1}^r d\mathbf{w}^a \prod_{a = 1}^r P_w\left( \mathbf{w}^a \right) \mbox{exp}\left(\hat{M}\sum_{a = 1}^r \mathbf{m}\cdot \mathbf{w}^a - \frac{\hat{R}}{2}\sum_{a=1}^r \left( \mathbf{w}^a \right)^t \Sigma \mathbf{w}^a +\frac{\hat{Q}}{2} \sum_{a\leq b} \left( \mathbf{w}^a \right)^t \Sigma \mathbf{w}^b \right) \end{equation} In order to decouple the replicas, we can then apply the following Hubbard-Stratonovich transformation: \begin{equation} \mbox{exp}\left(\frac{\hat{Q}}{2} \sum_{a\leq b} \left( \mathbf{w}^a \right)^t \Sigma \mathbf{w}^b \right) = \int \mathcal{D}\boldsymbol{\xi} \ \mbox{exp}\left(\sum_{a = 1}^r\left( \mathbf{w}^a \right)^t\left( \hat{Q}\Sigma \right)^{1/2} \boldsymbol{\xi}\right) \end{equation} with $\boldsymbol{\xi} \sim \mathcal{N}\left(0, \mathbf{I}_p \right)$. We can insert the Hubbard-Stratonovich transformation in the expression of the prior channel, which, having decoupled all the replicas, can be now factorized over the replica index: \begin{equation} \psi_w = \frac{1}{p}\mbox{log}\int \mathbb{E}_{\boldsymbol{\xi}} \left[ \int d\mathbf{w} P_w\left( \mathbf{w} \right) \mbox{exp}\left(\hat{M} \mathbf{m}\cdot \mathbf{w}- \frac{\hat{R}}{2} \mathbf{w}^t \Sigma \mathbf{w} + \mathbf{w}^t\left( \hat{Q}\Sigma \right)^{1/2} \boldsymbol{\xi}\right)\right]^r \end{equation} Following the same steps, we can express the output channel in the same way as: \begin{equation} \psi_y = \alpha \left[\mbox{log}\ \mathbb{E}_{y,\xi} \left[ \int \frac{dhd\hat{h}}{2\pi} P_y\left(y|h \right) \mbox{exp}\left( -\frac{1}{2} \left( R - Q \right)\hat{h}^2 + i \left( h + \sqrt{Q}\xi - M \right) \hat{h}\right) \right]\right]^r \end{equation} with $\xi \sim \mathcal{N}\left(0., 1. \right)$.However, in this case we are not yet done since we can here integrate the Gaussian integral over $\hat{h}$, thus getting: \begin{equation} \psi_y = \alpha \left[\mbox{log}\ \mathbb{E}_{y,\xi} \left[ \int \frac{dh}{\sqrt{2\pi\left(R - Q \right)}} P_y\left(y|h \right) \mbox{exp}\left( - \frac{\left( h - \sqrt{Q}\xi - M \right)^2}{2\left( R - Q \right)}\right) \right]\right]^r \end{equation} At this point, we can combine these results and then take both the limit of $r \rightarrow 0^+$ and the limit of $p \rightarrow \infty$. This automatically allows to obtain the following free-energy density in the RS assumption: \begin{equation} \mathcal{F}_{\beta} = -\underset{r\rightarrow 0^+}{\lim}\frac{d}{dr}\underset{p\rightarrow \infty}{\lim}\left[ \frac{\mathbb{E}_{\mathcal{D}} \mathcal{Z}^r_{\beta}\left(\mathcal{D}\right)}{p} \right] = \underset{Q,R,M,\hat{Q},\hat{R},\hat{M}}{\mbox{extr}}\left[ \Psi_\beta^{(0)} \left(Q,R,M,\hat{Q},\hat{R},\hat{M} \right)\right] \label{eq:RS_free_energy_finite_beta} \end{equation} with the RS-potential $\Psi_\beta^{(0)}\left( \cdot \right)$ being given by the sum of the following three potentials: \begin{equation} \begin{split} &\psi = \frac{1}{2}R\hat{R} + \frac{1}{2}Q\hat{Q} - M\hat{M}\\ &\psi_w = \underset{p\rightarrow\infty}{\lim} \frac{1}{p} \mathbb{E}_{\boldsymbol{\xi}}\left[ \mbox{log}\int d\mathbf{w} P_w\left( \mathbf{w} \right) \mbox{exp}\left(-\frac{1}{2}\mathbf{w}^t \left( \hat{R} + \hat{Q} \right)\Sigma \mathbf{w} + \mathbf{w}^t\left( \left(\hat{Q}\Sigma \right)^{1/2} \boldsymbol{\xi} + \hat{M}\mathbf{m}\right)\right)\right]\\ & \psi_y = \alpha \mathbb{E}_{y,\xi} \left[ \mbox{log} \int \frac{dh}{\sqrt{2\pi\left(R - Q \right)}}\mbox{exp}\left( - \frac{\left( h - \sqrt{Q}\xi - M \right)^2}{2\left( R - Q \right)}\right) P_y\left(y|h \right) \right] \end{split} \label{eq:RS_potentials_finite_beta} \end{equation} Indeed, in this limit, the only contribution to the saddle-point integrals in eq. (\ref{eq:replica_potentials_appendix})-(\ref{eq:replica_saddle_point_appendix}) comes from the extremizers of the replica symmetric potential $\Psi_\beta^{(0)}$. \paragraph{One-step replica symmetry breaking} As pointed out in the main manuscript, the one-step replica symmetry breaking assumption aims to describe all those situations in which the space of solutions of the learning problem in eq. (\ref{eq:optimization_w}) breaks into more than one single ensemble of solutions. Because of that, in this case, it seems more reasonable to introduce one overlap parameters among replicas belonging to the same ensemble and another one among replicas belonging to different ensembles. We therefore assume: \begin{equation} \begin{split} Q^{ab}&= \begin{cases} R & \text{if}\ a = b \\ Q_1 & \text{if}\ a \neq b \ \mbox{and} \ 0<\vert a - b \vert< x_0\\ Q_0 &\mbox{otherwise} \end{cases} \hspace{15mm} -i\hat{Q}^{ab}= \begin{cases} -\frac{1}{2}\hat{R} & \text{if}\ a = b \\ \hat{Q}_1 & \text{if}\ a \neq b \ \mbox{and} \ 0<\vert a - b \vert< x_0\\ \hat{Q}_0 &\mbox{otherwise} \end{cases}\\\\ M^a &= M \hspace{10mm} \forall a \hspace{51mm} -i\hat{M}^a = \hat{M} \\ \end{split} \end{equation} In other words, the 1-RSB assumption leads to the following structure for the overlap matrix $O$ and its conjugate $\hat{O}$: \begin{equation} O = \begin{pmatrix} B & Q_0 \\ Q_0 & B \end{pmatrix} \in \mathbb{R}^{r\times r} \hspace{30mm} \hat{O} = \begin{pmatrix} \hat{B} & \hat{Q}_0 \\ \hat{Q}_0 & \hat{B} \end{pmatrix} \in \mathbb{R}^{r\times r} \end{equation} with the block matrices $B$ and $\hat{B}$ given by: \begin{equation} \hspace{10mm} B = \begin{pmatrix} R & Q_1 & ... & Q_1 \\ Q_1 & R & ... & Q_1 \\ . & . & . & .\\ Q_1 & ... & R & Q_1 \\ Q_1 & ... & Q_1 & R \end{pmatrix} \in \mathbb{R}^{x_0\times x_0} \hspace{22mm}B = \begin{pmatrix} -\frac{1}{2}\hat{R} & Q_1 & ... & Q_1 \\ \hat{Q}_1 & -\frac{1}{2}\hat{R} & ... & \hat{Q}_1 \\ . & . & . & .\\ \hat{Q}_1 & ... & -\frac{1}{2}\hat{R} & \hat{Q}_1 \\ \hat{Q}_1 & ... & \hat{Q}_1 & -\frac{1}{2}\hat{R} \end{pmatrix} \in \mathbb{R}^{x_0\times x_0} \end{equation} Gven the structure of the two matrices $O$ and $\hat{O}$, we can easily decompose in the sum of the three different matrices, as it follows: \begin{equation} \begin{split} O &= \left(R - Q_1\right) \mathbb{I}_{r\times r} + \left( Q_1 - Q_0 \right) \begin{pmatrix} \mathbb{I}_{x_0 \times x_0} & 0 & ... & 0 \\ 0 & \mathbb{I}_{x_0 \times x_0} & ... & 0 \\ . & . & . & .\\ 0 & ... & \mathbb{I}_{x_0 \times x_0} & 0 \\ 0 & ... & 0 & \mathbb{I}_{x_0 \times x_0} \end{pmatrix}_{r\times r} + Q_0\mathbb{I}_{r\times r}\\\\ \hat{O} &= -\frac{1}{2}\left(\hat{R} + \hat{Q}_1\right) \mathbb{I}_{r\times r} + \left( \hat{Q}_1 - \hat{Q}_0 \right) \begin{pmatrix} \mathbb{I}_{x_0 \times x_0} & 0 & ... & 0 \\ 0 & \mathbb{I}_{x_0 \times x_0} & ... & 0 \\ . & . & . & .\\ 0 & ... & \mathbb{I}_{x_0 \times x_0} & 0 \\ 0 & ... & 0 & \mathbb{I}_{x_0 \times x_0} \end{pmatrix}_{r\times r} + \hat{Q}_0\mathbb{I}_{r\times r} \end{split} \end{equation} Applying this ansatz to eq. (\ref{eq:replica_saddle_point_appendix})-(\ref{eq:replica_potentials_appendix}), after some steps of algebra, we then get the following expression for the three-terms of the $\Psi_{\beta}^{(r)} \left( \cdot \right)$ potential. Concerning the first term we have: \begin{equation} \psi = -rM\hat{M} + \frac{1}{2} R\hat{R} - \frac{r}{2}\left(x_0 - 1 \right)Q_1\hat{Q}_1 -\frac{r}{2}\left(r - x_0 \right)Q_0 \hat{Q}_0 \end{equation} Concerning the prior channel $\Psi_w\left(\cdot \right)$ we instead obtain: \begin{equation} \begin{split} \psi_w &= \frac{1}{p}\mbox{log} \int \prod_{a = 1}^r d\mathbf{w}^a \prod_{a = 1}^r P_w\left( \mathbf{w}^a\right)\mbox{exp}\left( \hat{M} \sum_{a = 1}^r \mathbf{m} \cdot \mathbf{w}^a -\frac{\hat{R} + \hat{Q_1}}{2} \sum_{a = 1}^r \left(\mathbf{w}^a\right)^t \Sigma \mathbf{w}^a\right)\times\\ &\times \mbox{exp}\left( \frac{\hat{Q}_0}{2} \sum_{a,b = 1}^r \left( \mathbf{w}^a \right)^t \Sigma \mathbf{w}^b + \frac{\hat{Q}_1 - \hat{Q}_0}{2} \sum_{k = 1}^{r/x_0} \sum_{a,b\in k} \left( \mathbf{w}^a \right)^t \Sigma \mathbf{w}^b\right) \end{split} \end{equation} As in the replica symmetric ansatz, in order to decouple the replicas, we apply the two following Hubbard-Stratonovich transformations: \begin{equation} \begin{split} &\mbox{exp}\left(\frac{\hat{Q}_0}{2} \sum_{a,b = 1}^r \left( \mathbf{w}^a \right)^t \Sigma \mathbf{w}^b \right) = \mathbb{E}_{\boldsymbol{\xi}_0}\left[ \mbox{exp}\left( \sum_{a = 1}^r \left( \mathbf{w}^a \right)^t \left( \hat{Q}_0 \Sigma \right)^{1/2}\boldsymbol{\xi}_0 \right)\right]\\ &\mbox{exp}\left( \frac{\hat{Q}_1 - \hat{Q}_0}{2} \sum_{k = 1}^{r/x_0} \sum_{a,b\in k} \left( \mathbf{w}^a \right)^t \Sigma \mathbf{w}^b \right) = \prod_{k = 1}^{r/x_0} \mathbb{E}_{\boldsymbol{\xi}_1^k}\left[ \mbox{exp}\left( \sum_{a \in k} \left( \mathbf{w}^a \right)^t \left( \left(\hat{Q}_1 - \hat{Q}_0\right) \Sigma \right)^{1/2}\boldsymbol{\xi}_1^k \right)\right] \end{split} \end{equation} with $\mathbf{\xi}_0, \mathbf{\xi}^k_1 \sim \mathcal{N}\left(0, \mathbf{I}_p\right)$. Having decoupled the different copies of the learning system, we can now factorize over the replica index, thus getting: \begin{equation} \begin{split} \psi_w &= \frac{1}{p} \mbox{log} \mathbb{E}_{\boldsymbol{\xi}_0}\left[ \mathbb{E}_{\boldsymbol{\xi}_1} \left[ \int d\mathbf{w} P_w\left( \mathbf{w}\right)\mbox{exp}\left( -\frac{1}{2} \mathbf{w}^t \left( \hat{R} + \hat{Q}_1 \right) \Sigma \mathbf{w} \right) \right. \right. \times\\ &\left. \left. \times \mbox{exp}\left(\mathbf{w}^t \left( \left(\left(\hat{Q}_1 - \hat{Q}_0 \right)\Sigma\right)^{1/2} + \left( \hat{Q}_0 \Sigma \right)^{1/2} \boldsymbol{\xi}_0 + \hat{M}\mathbf{m}\right) \right)\right]^{x_0}\right]^{r/x_0} \end{split} \end{equation} The same steps can be applied for the output channel $\Psi_y$, and, by further solving the Gaussian integral over $\hat{h}$, we then get: \begin{equation} \psi_y = \alpha\mathbb{E}_{y,\xi_0} \mbox{log}\ \left[ \mathbb{E}_{\xi_1} \left[\int \frac{dh}{\sqrt{2\pi\left( R - Q_1 \right)}} \mbox{exp}\left(-\frac{\left( h - \sqrt{Q_0}\xi_0 -\sqrt{Q_1 - Q_0} \xi_1 \right)^2}{2\left( R - Q_1 \right)}\right)P_y\left( y|h\right) \right]^{x_0} \right]^{r/x_0} \end{equation} At this point, we can combine these results and then take both the limit of $r \rightarrow 0^+$ and the limit of $p \rightarrow \infty$. This automatically allows to obtain the following free-energy density in the 1-RSB assumption: \begin{equation} \mathcal{F}_{\beta} = -\underset{r\rightarrow 0^+}{\lim}\frac{d}{dr}\underset{p\rightarrow \infty}{\lim}\left[ \frac{\mathbb{E}_{\mathcal{D}} \mathcal{Z}^r_{\beta}\left(\mathcal{D}\right)}{p} \right] = \underset{Q_0,Q_1,R,M,\hat{Q}_0,\hat{Q}_1,\hat{R},\hat{M}}{\mbox{extr}}\left[ \Psi_\beta^{(0)} \left(Q_0,Q_1,R,M,\hat{Q}_0,\hat{Q}_1,\hat{R},\hat{M}\right)\right] \end{equation} with the 1RSB-potential $\Psi_\beta^{(0)}\left( \cdot \right)$ being given by the sum of the following three potentials: \begin{equation} \begin{split} &\psi = \frac{1}{2}R\hat{R} - \frac{1}{2}\left(x_0 - 1 \right)Q_1\hat{Q}_1 + \frac{1}{2}x_0 Q_0 \hat{Q}_0 - M\hat{M}\\ &\psi_w = \underset{p\rightarrow \infty}{\lim} \frac{1}{p x_0} \mathbb{E}_{\boldsymbol{\xi}_0} \mbox{log} \mathbb{E}_{\boldsymbol{\xi}_1} \left[ \int d\mathbf{w} P_w\left( \mathbf{w}\right)\mbox{exp}\left( -\frac{1}{2} \mathbf{w}^t \left( \hat{R} + \hat{Q}_1 \right) \Sigma \mathbf{w} \right) \right. \times\\ &\hspace{8mm}\left. \times \mbox{exp}\left(\mathbf{w}^t \left( \left(\left(\hat{Q}_1 - \hat{Q}_0 \right)\Sigma\right)^{1/2} + \left( \hat{Q}_0 \Sigma \right)^{1/2} \boldsymbol{\xi}_0 + \hat{M}\mathbf{m}\right) \right)\right]^{x_0}\\ & \psi_y = \frac{\alpha}{x_0} \mathbb{E}_{y,\xi_0} \mbox{log} \mathbb{E}_{\xi_1} \left[ \int \frac{dh}{\sqrt{2\pi\left( R - Q_1 \right)}} \mbox{exp}\left(-\frac{\left( h - \sqrt{Q_0}\xi_0 -\sqrt{Q_1 - Q_0} \xi_1 \right)^2}{2\left( R - Q_1 \right)}\right)P_y\left( y|h\right) \right]^{x_0} \end{split} \end{equation} Indeed, in this limit, the only contribution to the saddle-point integrals in eq. (\ref{eq:replica_potentials_appendix})-(\ref{eq:replica_saddle_point_appendix}) comes from the extremizers of the 1-RSB potential $\Psi_\beta^{(0)}$. \subsubsection{Gaussian Prior} As we have pointed out in the main manuscript, the learning problem we are interested in is linear classification with random labels and $l_2$-regularization. In the context of Bayesian inference, the $l_2$-regularization plays the role of a Gaussian prior on the learning weights. In this section, we will thus evaluate the free-energy density in the specific case of Gaussian priors, namely: \begin{equation} P_w\left(\mathbf{w} \right) = \frac{1}{\left(2\pi\right)^{p/2}} \mbox{exp}\left(-\frac{1}{2}\beta \lambda \vert\vert \mathbf{w}\vert \vert_2^2 \right) \label{eq:Gaussian_Prior} \end{equation} \paragraph{Replica Symmetry} Given the free-energy density in eq. (\ref{eq:RS_free_energy_finite_beta})-(\ref{eq:RS_potentials_finite_beta}), we can see that the entropic potential is the only one depending on the prior distribution. When evaluated on the Gaussian prior in eq. (\ref{eq:Gaussian_Prior}), the prior channel acquires the following form: \begin{equation} \psi_w = \underset{p\rightarrow\infty}{\lim} \frac{1}{p} \mathbb{E}_{\boldsymbol{\xi}}\left[ \mbox{log}\int d\mathbf{w} \ \mbox{exp}\left(-\frac{1}{2}\mathbf{w}^t \left(\beta\lambda\mathbb{I}_{p\times p}+\hat{V}\Sigma\right) \mathbf{w} + \mathbf{w}^t\left( \left(\hat{Q}\Sigma \right)^{1/2} \boldsymbol{\xi} + \hat{M}\mathbf{m}\right)\right)\right] \end{equation} where we have defined $\hat{V} = \hat{R} + \hat{Q}$. With the choice of a Gaussian prior, the integral over the learning weights can be easily solved as it is nothing but a standard Gaussian integral, providing the following expression for the entropic potential evaluated on Gaussian priors: \begin{equation} \begin{split} \psi_w &= -\underset{p \rightarrow \infty}{\mbox{lim}}\frac{1}{2p} \mbox{tr} \ \mbox{log}\left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V} \Sigma \right) +\\ &+\underset{p \rightarrow \infty}{\mbox{lim}}\frac{1}{2p} \mathbb{E}_{\boldsymbol{\xi}} \left[ \left(\left( \hat{Q}\Sigma\right)^{1/2} \boldsymbol{\xi} + \hat{M}\mathbf{m}\right)^t\left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V} \Sigma \right)^{-1}\left(\left( \hat{Q}\Sigma\right)^{1/2} \boldsymbol{\xi} + \hat{M} \mathbf{m}\right) \right] \end{split} \end{equation} where we have used the property of determinants, that is the logarithm of the determinant of a given matrix corresponds to the trace of the logarithm of the same matrix. By finally computing the Gaussian integral over $\boldsymbol{\xi}$, we finally get: \begin{equation} \begin{split} \psi_w &= -\underset{p \rightarrow \infty}{\mbox{lim}}\frac{1}{2p} \mbox{tr} \ \mbox{log}\left( \beta \lambda \mathbb{I}_{p\times p} +\hat{V}\Sigma \right) + \underset{p \rightarrow \infty}{\mbox{lim}}\frac{1}{2p} \mbox{tr} \left( \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V} \Sigma \right)^{-1}\hat{Q}\Sigma \right) +\\ &+ \underset{p\rightarrow \infty}{\lim} \frac{1}{2p} \left(\hat{M}\mathbf{m} \right)^t \left( \beta\lambda \mathbb{I}_{p\times p} + \hat{V}\Sigma \right)^{-1}\left(\hat{M}\mathbf{m} \right) \end{split} \end{equation} The resulting free-energy density under the RS assumption for Gaussian prior is then given by: \begin{equation} \mathcal{F}_{\beta} = \underset{Q,V,M,\hat{Q},\hat{V},\hat{M}}{\mbox{extr}}\left[ \Psi_\beta^{(0)} \left(Q,V,M,\hat{Q},\hat{V},\hat{M} \right)\right] \label{eq:RS_free_energy_finite_beta_gp} \end{equation} with the RS-potential $\Psi_\beta^{(0)}\left( \cdot \right)$ evaluated on Gaussian priors given by the sum of the following three potentials: \begin{equation} \begin{split} &\psi = \frac{1}{2}\left(V + Q \right)( \hat{V} - \hat{Q}) + \frac{1}{2}Q\hat{Q} - M\hat{M}\\ &\psi_w = -\underset{p \rightarrow \infty}{\mbox{lim}}\frac{1}{2p} \mbox{tr} \ \mbox{log}\left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V} \Sigma \right) + \underset{p \rightarrow \infty}{\mbox{lim}}\frac{1}{2p} \mbox{tr} \left( \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V} \Sigma \right)^{-1}\hat{Q}\Sigma \right)+\\ &\hspace{9mm}+\underset{p \rightarrow \infty}{\mbox{lim}}\frac{1}{2p} \underset{p\rightarrow \infty}{\lim} \frac{1}{2p} \left(\hat{M}\mathbf{m} \right)^t \left(\beta\lambda \mathbb{I}_{p\times p} + \hat{V}\Sigma \right)^{-1}\left(\hat{M}\mathbf{m} \right)\\ & \psi_y = \alpha \mathbb{E}_{y,\xi} \left[\ \mbox{log} \int \frac{dh}{\sqrt{2\pi V}}\mbox{exp}\left( - \frac{\left( h - \sqrt{Q}\xi - M \right)^2}{2V}\right) P_y\left(y|h \right) \right] \end{split} \label{eq:RS_potentials_finite_beta} \end{equation} where we have defined $V = R - Q$. \paragraph{Replica Symmetry Breaking} The prior channel under the one-step replica symmetry assumption, when evaluated on Gaussian priors, is given by the following expression: \begin{equation} \begin{split} &\psi_w = \underset{p\rightarrow \infty}{\lim} \frac{1}{p x_0} \mathbb{E}_{\boldsymbol{\xi}_0} \mbox{log} \mathbb{E}_{\boldsymbol{\xi}_1} \left[ \int d\mathbf{w} \ \mbox{exp}\left( -\frac{1}{2} \mathbf{w}^t \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma\right) \mathbf{w} \right) \right. \times\\ &\hspace{8mm}\left. \times \mbox{exp}\left(\mathbf{w}^t \left( \left(\left(\hat{Q}_1 - \hat{Q}_0 \right)\Sigma\right)^{1/2}\boldsymbol{\xi}_1 + \left( \hat{Q}_0 \Sigma \right)^{1/2} \boldsymbol{\xi}_0 + \hat{M}\mathbf{m}\right) \right)\right]^{x_0} \end{split} \end{equation} Where we have defined $\hat{V} = \hat{R} + \hat{Q}_1$. The integral over $\mathbf{w}$ can be easily solved being nothing but a standard Gaussian integral, thus getting the following expression for the prior channel $\Psi_w \left( \cdot \right)$: \begin{equation} \begin{split} \psi_w &= - \underset{p \rightarrow \infty}{\lim} \frac{1}{2p} \mbox{tr}\ \mbox{log}\left(\beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right) + \\ & + \underset{p \rightarrow \infty}{\lim} \frac{1}{p x_0} \mathbb{E}_{\boldsymbol{\xi}_0}\mbox{log} \left[ \mathbb{E}_{\boldsymbol{\xi}_1}\left[\mbox{exp}\left(\frac{x_0}{2}\left( \left(\left(\hat{Q}_1 - \hat{Q}_0 \right)\Sigma\right)^{1/2} \boldsymbol{\xi}_1 + \left( \hat{Q}_0 \Sigma \right)^{1/2} \boldsymbol{\xi}_0 + \hat{M}\mathbf{m}\right)^t \right)\times \right. \right.\\ &\left.\left.\times \mbox{exp}\left( \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma\right)^{-1} \left( \left(\left(\hat{Q}_1 - \hat{Q}_0 \right)\Sigma\right)^{1/2}\boldsymbol{\xi}_1 + \left( \hat{Q}_0 \Sigma \right)^{1/2} \boldsymbol{\xi}_0 + \hat{M}\mathbf{m}\right)\right)\right]\right] \end{split} \end{equation} The remaining integrals over $\boldsymbol{\xi}_0$ and $\boldsymbol{\xi}_1$ are standard Gaussian integrals too. They can thus be directly solved, leading to the following expression for the prior channel: \begin{equation} \begin{split} \psi_w &= -\underset{p \rightarrow \infty}{\lim}\frac{1}{2p} \mbox{tr} \ \mbox{log}\left( \beta \lambda \mathbb{I}_{p\times p} +\hat{V}_1 \Sigma \right) + \underset{p \rightarrow \infty}{\mbox{lim}}\frac{1}{2p} \mbox{tr} \left( \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-1}\hat{Q}_0\Sigma \right) +\\ &+\underset{p\rightarrow \infty}{\lim} \frac{1}{2p} \left(\hat{M}\mathbf{m} \right)^t \left( \beta\lambda \mathbb{I}_{p\times p} + \hat{V}_1\Sigma \right)^{-1}\left(\hat{M}\mathbf{m} \right)+\\ & - \underset{p \rightarrow \infty}{\lim}\frac{1}{2p x_0} \mbox{tr}\ \mbox{log} \left( \mathbb{I}_{p\times p} -x_0 \left(\left(\hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-1}\left(\left(\hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \right) + \underset{p\rightarrow \infty}{\lim} \frac{x_0}{2p} \hat{Q}_0 \left( \hat{Q}_1 - \hat{Q}_0 \right) \times \\ & \times \mbox{tr}\left( \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-2} \left( \mathbb{I}_{p\times p} -x_0 \left(\left(\hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-1}\left(\left(\hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \right)\right)^{-1} +\\ & - \underset{p \rightarrow \infty}{\lim}\frac{x_0}{2p} \left( \hat{M} \mathbf{m} \right)^t \left( \left( \hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-1}\times \\ &\times \left( \mathbb{I}_{p\times p} -x_0 \left(\left(\hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-1}\left(\left(\hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \right)^{-1}\times\\ &\times \left( \left( \hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \left( \beta \lambda \mathbf{I} + \hat{V}_1 \Sigma \right)^{-1} \hat{M} \mathbf{m} \end{split} \end{equation} The resulting free-energy density under the RS assumption for Gaussian prior is then given by: \begin{equation} \mathcal{F}_{\beta} = \underset{Q_0,Q_1,V_1,M,\hat{Q}_0,\hat{Q}_1,\hat{V}_1,\hat{M}}{\mbox{extr}}\left[ \Psi_\beta^{(0)} \left(Q_0,Q_1,V_1,M,\hat{Q}_0,\hat{V}_1,\hat{R},\hat{M}\right)\right] \end{equation} with the 1RSB-potential $\Psi_\beta^{(0)}\left( \cdot \right)$ being given by the sum of the following three potentials: \begin{equation} \begin{split} &\psi = \frac{1}{2}\left(V_1 +Q_1\right)\left( \hat{V}_1 - \hat{Q}_1\right) - \frac{1}{2}\left(x_0 - 1 \right)Q_1\hat{Q}_1 + \frac{1}{2}x_0 Q_0 \hat{Q}_0 - M\hat{M}\\ & \psi_w = -\underset{p \rightarrow \infty}{\lim}\frac{1}{2p} \mbox{tr} \ \mbox{log}\left( \beta \lambda \mathbb{I}_{p\times p} +\hat{V}_1 \Sigma \right) + \underset{p \rightarrow \infty}{\mbox{lim}}\frac{1}{2p} \mbox{tr} \left( \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-1}\hat{Q}_0\Sigma \right) +\\ &+\underset{p\rightarrow \infty}{\lim} \frac{1}{2p} \left(\hat{M}\mathbf{m} \right)^t \left( \beta\lambda \mathbb{I}_{p\times p} + \hat{V}_1\Sigma \right)^{-1}\left(\hat{M}\mathbf{m} \right)+\\ & - \underset{p \rightarrow \infty}{\lim}\frac{1}{2p x_0} \mbox{tr}\ \mbox{log} \left( \mathbb{I}_{p\times p} -x_0 \left(\left(\hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-1}\left(\left(\hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \right) + \underset{p\rightarrow \infty}{\lim} \frac{x_0}{2p} \hat{Q}_0 \left( \hat{Q}_1 - \hat{Q}_0 \right) \times \\ & \times \mbox{tr}\left( \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-2} \left( \mathbb{I}_{p\times p} -x_0 \left(\left(\hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-1}\left(\left(\hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \right)\right)^{-1} +\\ & - \underset{p \rightarrow \infty}{\lim}\frac{x_0}{2p} \left( \hat{M} \mathbf{m} \right)^t \left( \left( \hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-1}\times \\ &\times \left( \mathbb{I}_{p\times p} -x_0 \left(\left(\hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-1}\left(\left(\hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \right)^{-1}\times\\ &\times \left( \left( \hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \left( \beta \lambda \mathbf{I} + \hat{V}_1 \Sigma \right)^{-1} \hat{M} \mathbf{m}\\ & \psi_y = \frac{\alpha}{x_0} \ \mathbb{E}_{y,\xi_0} \mbox{log} \mathbb{E}_{\xi_1} \left[ \int \frac{dh}{\sqrt{2\pi\left( R - Q_1 \right)}} \mbox{exp}\left(-\frac{\left( h - \sqrt{Q_0}\xi_0 -\sqrt{Q_1 - Q_0} \xi_1 \right)^2}{2\left( R - Q_1 \right)}\right)P_y\left( y|h\right) \right]^{x_0} \end{split} \end{equation} where we have defined $V_1 = R - Q_1$. \subsubsection{Zero Temperature Limit} The solutions of the optimization problem in eq. (\ref{eq:optimization_w}) correspond to the minima of the loss function, namely those weight configurations with the highest statistical significance in the zero-temperature limit. With the purpose of dealing with optimization problems, we therefore need to further take the zero-temperature limit of the replica equations in both replica symmetry and one-step replica symmetry breaking scenarios. \paragraph{Replica Symmetry} In the limit of $\beta \rightarrow \infty$, the overlap parameters obey the following scaling with respect to $\beta$: \begin{equation} \begin{split} Q &\sim O\left( 1 \right) \hspace{10mm} \hat{Q} \sim O\left( \beta^{-2}\right)\\ V &\sim O\left( \beta \right) \hspace{10mm} \hat{V} \sim O\left( \beta^{-1}\right)\\ M &\sim O\left( 1 \right) \hspace{10mm} \hat{M} \sim O\left( \beta^{-1}\right)\\ x_0 &\sim O\left( \beta \right) \end{split}\label{eq:beta_scaling} \end{equation} Applying the above scaling in $\beta$ for $\psi$ and $\psi_w$ is quite straightforward since it only requires the identification of the leading terms in $\beta$ and to neglect all the remaining once. This operation leads to the following results for the two potentials: \begin{equation} \begin{split} &\psi = \underset{\beta \rightarrow \infty}{\lim}\frac{\psi}{\beta} = \frac{1}{2}\left(V\hat{Q} - \hat{V}Q \right) - M\hat{M}\\ &\psi_w =\underset{\beta \rightarrow \infty}{\lim} \frac{\psi}{\beta} = \underset{p \rightarrow \infty}{\mbox{lim}}\frac{1}{2p} \underset{p\rightarrow \infty}{\lim} \frac{1}{2p} \left(\hat{M}\mathbf{m} \right)^t \left(\lambda \mathbb{I}_{p\times p} + \hat{V}\Sigma \right)^{-1}\left(\hat{M}\mathbf{m} \right)+ \underset{p \rightarrow \infty}{\mbox{lim}}\frac{1}{2p} \mbox{tr} \left( \left( \lambda \mathbb{I}_{p\times p} + \hat{V} \Sigma \right)^{-1}\hat{Q}\Sigma \right) \end{split} \label{eq:RS_potentials_finite_beta} \end{equation} Concerning the energetic potential, in order to detect the leading order in $\beta$ we first need to re-write the output channel in terms of the Boltzamnn-Gibbs measure: \begin{equation} \begin{split} \psi_y &= \alpha \mathbb{E}_{y,\xi}\mbox{log} \left[ \int \frac{dh}{\sqrt{2\pi V}}\mbox{exp}\left( - \frac{\left( h - \sqrt{Q}\xi - M \right)^2 - \beta \ell\left( y, h\right)}{2V}\right) \right]\\ & = \alpha \mathbb{E}_{y,\xi}\mbox{log} \left[ \int \frac{dh}{\sqrt{2\pi V}}\mbox{exp}\left( -\beta \frac{\left( h - \sqrt{Q}\xi - M \right)^2 - \ell\left( y, h\right)}{2V}\right) \right] \end{split} \label{eq:RS_potentials_finite_beta} \end{equation} where in the last equality we have applied the scaling in (\ref{eq:beta_scaling}) for $V$. In the limit of $\beta \rightarrow \infty$, the dominant contribution to the $h$-integral is the one maximizing the argument of the exponential function. Because of that, in this limit, the energetic potential can be written as it follows: \begin{equation} \psi_y = \underset{\beta \rightarrow \infty}{\lim}\frac{\psi_y}{\beta} = \alpha \mathbb{E}_{y,\xi} \left[ \frac{\left( \eta - \omega \right)^2 - \ell\left( y, \eta\right)}{2V} \right]\label{eq:psi_y_zero_temperature} \end{equation} where we have defined $\omega = \sqrt{Q}\xi - M$ and $\eta$ being the extremizer of the argument of the exponential function: \begin{equation} \eta = \underset{h \in \mathbb{R}}{\mbox{argmin}} \left[\frac{\left(h - \omega\right)^2}{2V_1} + \ell\left(y, h \right)\right] \label{eq:eta} \end{equation} By collecting all the different terms we finally get eq. (\ref{eq:free-energy}) with the potentials $\psi$, $\psi_w$ and $\psi_y$ defined as in the main manuscript. \paragraph{Replica Symmetry Breaking} In the limit of $\beta \rightarrow \infty$, the overlap parameters obey the following scaling with respect to $\beta$: \begin{equation} \begin{split} Q_1 &\sim O\left( 1 \right) \hspace{10mm} \hat{Q}_1 \sim O\left( \beta^{-2}\right)\\ Q_0 &\sim O\left( 1 \right) \hspace{10mm} \hat{Q}_0 \sim O\left( \beta^{-2}\right)\\ V &\sim O\left( \beta \right) \hspace{10mm} \hat{V} \sim O\left( \beta^{-1}\right)\\ M &\sim O\left( 1 \right) \hspace{10mm} \hat{M} \sim O\left( \beta^{-1}\right)\\ x_0 &\sim O\left( \beta \right) \end{split}\label{eq:beta_scaling} \end{equation} As in the RS assumption, applying the above scaling in $\beta$ for $\psi$ and $\psi_w$ is quite straightforward since it only requires the identification of the leading terms in $\beta$ and to neglect all the remaining once. This operation leads to the following results for the two potentials: \begin{equation} \begin{split} &\psi = \underset{\beta \rightarrow \infty}{\lim} \frac{1}{\beta} \psi= -\frac{1}{2}\left(V_1\hat{Q}_1 - \hat{V}_1Q_1 + x_0 \left( Q_1\hat{Q}_1 - Q_0\hat{Q}_0 \right)\right) - M\hat{M}\\ & \psi_w = \underset{\beta \rightarrow \infty}{\lim} \frac{1}{\beta} \psi_w = \underset{p \rightarrow \infty}{\mbox{lim}}\frac{1}{2p} \mbox{tr} \left( \left( \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-1}\hat{Q}_0\Sigma \right) +\underset{p\rightarrow \infty}{\lim} \frac{1}{2p} \left(\hat{M}\mathbf{m} \right)^t \left( \lambda \mathbb{I}_{p\times p} + \hat{V}_1\Sigma \right)^{-1}\left(\hat{M}\mathbf{m} \right)+\\ & + \underset{p\rightarrow \infty}{\lim} \frac{x_0}{2p} \hat{Q}_0 \left( \hat{Q}_1 - \hat{Q}_0 \right) \times \\ & \times \mbox{tr}\left( \left( \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-2} \left( \mathbb{I}_{p\times p} -x_0 \left(\left(\hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \left( \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-1}\left(\left(\hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \right)\right)^{-1} +\\ & - \underset{p \rightarrow \infty}{\lim}\frac{x_0}{2p} \left( \hat{M} \mathbf{m} \right)^t \left( \left( \hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-1}\times \\ &\times \left( \mathbb{I}_{p\times p} -x_0 \left(\left(\hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-1}\left(\left(\hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \right)^{-1}\times\\ &\times \left( \left( \hat{Q}_1 - \hat{Q}_0 \right) \Sigma \right)^{1/2} \left( \beta \lambda \mathbb{I}_{p\times p} + \hat{V}_1 \Sigma \right)^{-1} \left(\hat{M} \mathbf{m}\right)\\ \end{split} \end{equation} For what concerns the energetic potential, we can apply exactly the same reasoning seen for replica symmetry in the previous paragraph. This will lead to the following expression for the energetic potential: \begin{equation} \psi_y = \underset{\beta \rightarrow \infty}{\lim} \frac{1}{\beta} =\frac{\alpha}{x_0}\mathbb{E}_{y,\xi_0} \left[\mbox{log} \ \mathbb{E}_{\xi_1}\left[ \mbox{exp}\left(-x_0\left( \frac{\left(\eta_1 - \omega_1\right)^2}{2V_1} + \ell\left(y, \eta_1 \right) \right)\right)\right]\right] \end{equation} By combining the expression of the three potentials in the zero-temperature limit, we finally get the free-energy density of eq.(\ref{eq:free-energy_1rsb}) in the main manuscript. \subsubsection{Saddle Point equations under Replica Symmetry Assumption} The extremum operation in eq. (\ref{eq:free-energy}) requires the differentiation of the corresponding free-energy density with respect to both the overlap parameters and their conjugates. This leads to a set of coupled saddle-point equations, which for the overlap parameters are given by: \begin{equation} \begin{split} Q & = -2\frac{\partial \psi_w}{\partial\hat{V}} = -\frac{\hat{M}^2}{p} \mathbf{m}^t \left( \lambda \mathbb{I}_{p\times p} + \hat{V}\Sigma \right)^{-2} \Sigma \mathbf{m} + \mbox{tr}\left( \hat{Q}\Sigma \left( \lambda \mathbb{I}_{p\times p + \hat{V}\Sigma} \right)^{-2} \Sigma \right)\\ V & = 2\frac{\partial\psi_w}{\partial \hat{Q}} = \mbox{tr}\left(\Sigma \left( \lambda\mathbb{I}_{p\times p} + \hat{V}\Sigma \right)^{-1} \right) \\ M &=\frac{\partial \psi_w}{\partial \hat{M}} = \frac{\hat{M}}{p} \mathbf{m}^t \left(\lambda \mathbb{I}_{p\times p} + \hat{V}\Sigma \right)^{-1} \mathbf{m} \end{split} \end{equation} The saddle-point equations involving the conjugate overlap parameters, do actually depend on the specific choice of the training loss, since they involve the energetic potential through partial derivatives with respect to the overlap parameters. In this work we have derived those relative to the square and the hinge loss. \paragraph{Square Loss} In the specific case of the square loss function, the one-sample loss $\ell\left(y, h \right)$ in eq. (\ref{eq:eta}), is given by: \begin{equation} \ell\left( y, h\right) = \frac{1}{2} \left(y - h \right)^2 \end{equation} By plugging the square one-sample loss in eq. (\ref{eq:eta}), we then get the following expression for the extremizer $\eta$: \begin{equation} \eta = \frac{\omega + yV}{1+V} \end{equation} If we evaluate the energetic potential of eq. (\ref{eq:psi_y_zero_temperature}) in $\eta$, we can then easily solve the Gaussian integral over $\xi$ and the one over the uniformly distributed $y$. The integration will give as the following expression for the output channel: \begin{equation} \psi_y = -\frac{\alpha}{2}\left( \frac{1+Q+M^2}{1+V}\right) \end{equation} From this simple expression of the output channel, we can straightforwardly determine the saddle-point equations for the overlap parameters: \begin{equation} \begin{split} \hat{Q} &= 2\frac{\partial \psi_y}{\partial V} = \frac{1+Q+M^2}{\left( 1 + V\right)^2}\\ \hat{V} &= -2\frac{\partial \psi_y}{\partial Q} = \frac{1}{1+V}\\ \hat{M} &= \frac{\partial \psi_y}{\partial M} = \frac{M}{1+V} \end{split} \end{equation} \paragraph{Hinge Loss} In the specific case of the square loss function, the one-sample loss $\ell\left(y, h \right)$ in eq. (\ref{eq:eta}), is given by: \begin{equation} \ell\left( y, h\right) = \mbox{max} \left(0, 1- yh \right) \end{equation} By plugging the square one-sample loss in eq. (\ref{eq:eta}), we then get the following expression for the extremizer $\eta$ by considering all the possible cases required by the max non-linearity: \begin{equation} \eta = \begin{cases} \omega \ \text{if} \ y\omega \geq 1 \\ y \ \text{if} \ 1-V \geq y\omega \leq 1 \\ \omega + yV\ \text{if} \ y\omega \leq 1-V \\ \end{cases} \end{equation} If we evaluate the energetic potential of eq. (\ref{eq:psi_y_zero_temperature}) in $\eta$, we can then easily solve the Gaussian integral over $\xi$ and the one over the uniformly distributed $y$. The integration will give as the following expression for the output channel: \begin{equation} \psi_y = \psi^+_{y,1} + \psi^+_{y,2} + \psi^-_{y,1} + \psi^-_{y,2} \end{equation} where the above four terms given by: \begin{equation} \begin{split} \psi_{y,1}^{\pm} &= -\frac{\sqrt{Q}\left( 1 \mp M \right)}{4\sqrt{2\pi}V} \mbox{exp}\left( -\frac{\left( 1 \mp M\right)^2}{2Q}\right) + \frac{\sqrt{Q}\left( 1 \mp M + V\right)}{4\sqrt{2\pi}V} \mbox{exp}\left( -\frac{\left( 1 \mp M - V\right)^2}{2Q}\right)+\\ &-\frac{Q + \left( 1 \mp M\right)^2}{8V} \left( \mbox{erf}\left(\frac{1\mp M}{\sqrt{2Q}} - \frac{1\mp M-V}{\sqrt{2Q}} \right) \right)\\ \psi_{y,2}^{\pm} &= -\frac{1}{2}\left( \sqrt{\frac{Q}{2\pi}} \mbox{exp} \left( - \frac{\left( 1 - \mp M -V \right)^2}{2Q}\right) + \frac{1-M-V/2}{2}\left(1 + \mbox{erf}\left(\frac{1 \mp M -V}{\sqrt{2Q}} \right)\right) \right) \end{split} \end{equation} From this expression of the output channel, we can then determine the corresponding saddle-point equations for the overlap parameters: \begin{itemize} \item saddle-point equation for $\hat{Q}$: \begin{equation} \hat{Q} = 2\frac{\partial \psi_y}{\partial V} = -\partial_V \left[ \psi^+_{y,1} + \psi^+_{y,2} + \psi^-_{y,1} + \psi^-_{y,2} \right] \end{equation} where the partial derivatives are given by: \begin{equation} \begin{split} \partial_V \psi^{\pm}_{y,1} &= \frac{\left(\frac{V^3}{\sqrt{Q}} + \sqrt{Q}\left(1 \mp M + V \right) \right) \mbox{exp}\left(-\frac{\left( 1 \mp M - V\right)^2}{2Q} \right) - \sqrt{Q}\left(1\mp M \right) \mbox{exp}\left(-\frac{\left( 1 \mp M \right)^2}{2Q} \right)}{2\sqrt{2\pi} V^2} +\\ & - \frac{1}{4V^2} \left( \left( 1 \mp M\right)^2 + Q \right)\left( \mbox{erf}\left( \frac{1\mp M}{\sqrt{2Q}}\right) - \mbox{erf}\left(\frac{1\mp M-V}{\sqrt{2Q}} \right) \right)\\ \partial_V \psi^{\pm}_{y,2} &= -\frac{V}{2\sqrt{2\pi Q}} \mbox{exp}\left(-\frac{\left( 1 \mp M -V\right)^2}{2Q} \right) - \frac{1}{4}\left( 1 + \mbox{erf}\left(\frac{1\mp M-V}{\sqrt{2Q}} \right) \right) \end{split} \end{equation} \item saddle-point equation for $\hat{V}$: \begin{equation} \hat{V} =- 2\frac{\partial \psi_y}{\partial Q} = \partial_Q \left[ \psi^+_{y,1} + \psi^+_{y,2} + \psi^-_{y,1} + \psi^-_{y,2} \right] \end{equation} where the partial derivatives are given by: \begin{equation} \begin{split} \partial_Q \psi^{\pm}_{y,1} &= -\frac{\left(2Q - \left( 1 \mp M -V\right)V \right)}{4\sqrt{2\pi}Q^{3/2}}\mbox{exp}\left(-\frac{\left(1 \mp M -V \right)^2}{2Q} \right) +\frac{1}{4V} \left( \mbox{erf}\left(\frac{1\mp M}{\sqrt{2Q}} \right) - \mbox{erf}\left(\frac{1 \mp M-V}{\sqrt{2Q}} \right) \right)\\ \partial_Q \psi^{\pm}_{y,2} &= \frac{2Q - \left(1 \mp M-V \right)V}{4\sqrt{2\pi}Q^{3/2}} \mbox{exp}\left(-\frac{\left(1\mp M-V \right)^2}{2Q} \right) \end{split} \end{equation} \item saddle-point equation for $\hat{M}$: \begin{equation} \hat{M} =- 2\frac{\partial \psi_y}{\partial M} =-\frac{1}{2} \partial_M \left[ \psi^+_{y,1} + \psi^+_{y,2} + \psi^-_{y,1} + \psi^-_{y,2} \right] \end{equation} where the partial derivatives are given by: \begin{equation} \begin{split} \partial_M \psi^{\pm}_{y,1} &= \mp \frac{Q}{\sqrt{2\pi Q}V}\mbox{exp}\left(-\frac{\left( 1\mp M\right)^2}{2Q} \right) + \frac{\left(2Q + V^2 \right)}{2\sqrt{2\pi Q}V} \mbox{exp}\left(-\frac{\left( 1\mp M\right)\left(1\mp M -2V \right)+V^2}{2Q} \right) +\\ &\pm \frac{1\mp M}{2V}\left(\mbox{erf}\left(\frac{1\mp M-V}{\sqrt{2Q}} \right) - \mbox{erf}\left(\frac{1\mp M}{\sqrt{2Q}} \right) \right)\\ \partial_M \psi^{\pm}_{y,1} &= \mp \frac{V}{2\sqrt{2\pi Q}}\mbox{exp}\left(-\frac{\left( 1 \mp M -V\right)^2}{2Q} - \frac{1}{2} \left( 1 + \mbox{erf}\left(\frac{1\mp M-V}{\sqrt{2Q}} \right)\right)\right) \end{split} \end{equation} \end{itemize} \subsection{Technical Details of the Simulations} \section{Mathematical arguments towards universality of Gaussian Mixtures}\label{app:gaussian_universality} We work in a setting slightly more general than the main text; the minimization problem we consider is of the form \begin{equation}\label{eq:def_min_problem} \widehat\cR_n^*(\bm X, \bm y) = \inf_{\bm \Theta \in \cS_p^k} \frac1n\sum_{\mu=1}^n \ell(\bm \Theta^\top \bm x_\mu, y_\mu) + r(\bm\Theta), \end{equation} where the $\bm x_\mu \in \dR^p$ are input vectors, $y_\mu\in \dR$ are one-dimensional labels, and $\cS_p$ is a compact subset of $\dR^p$. We assume that the loss $\ell$ only depends on the $x_\mu$ through $k$ one-dimensional projections $\bm\theta_1^\top \bm x_\mu, \dots, \bm \theta_k^\top \bm x_\mu$, and we work in the so-called proportional high-dimensional limit, where $n, p$ go to infinity with \[\frac np \to \alpha > 0, \] while $k$ stays fixed. Throughout this section, $\norm{}$ will denote the spectral norm of a matrix, while $\norm{}_q$ for $q > 0$ will refer to the element-wise $q$-norms. For a subgaussian random variable $Y$, its sub-gaussian norm $\norm{Y}_{\psi_2}$ is defined as \[ \norm{Y}_{\psi_2} = \inf \Set*{t > 0 \given \E*{\exp\left(\frac{Y^2}{t^2}\right)} \leq 2}. \] \addtocontents{toc}{\protect\setcounter{tocdepth}{1}} \subsection{State of the art} \addtocontents{toc}{\protect\setcounter{tocdepth}{2}} There have been many recent progress on Gaussian-type low-dimensional CLT and universality recently \cite{goldt2020gaussian,hu_universality_2021,montanari2022universality}. We shall leverage on these results to prove our first theorem. In particular, the starting point for our mathematical proof will use the recent result of \cite{montanari2022universality} which we shall now review. Consider the minimization problem \eqref{eq:def_min_problem}, with $(\bm x_\mu, y_\mu)$ i.i.d random variables; the goal is to replace the $\bm x_\mu$ by their Gaussian equivalent model \begin{equation} \bm g_\mu \overset{i.i.d}{\sim} \cN(\bm 0, \bm \Sigma) \qquad \text{where}\qquad \bm \Sigma = \E*{\bm x\bm x^\top}. \end{equation} We make the following assumptions: \begin{assump}[Loss and regularization]\label{assump:loss} The loss function $\ell : \dR^{k+1} \to \dR$ is nonnegative and Lipschitz, and the regularization function $r : \dR^{p\times k} \to \dR$ is locally Lipschitz, with constants independent from $p$. \end{assump} \begin{assump}[Labels]\label{assump:labels} The $y_\mu$ are generated according to \begin{equation}\label{eq:label_definition_montanari} y_\mu = \eta(\bm \Theta^* \bm x_\mu, \eps_\mu), \end{equation} where $\eta : \dR^{k^*+1}\to \dR$ is a Lipschitz function, $\bm \Theta^* \in \cS_p^{k^*}$, and the $\eps_\mu$ are i.i.d subgaussian random variables with \[ \norm{\eps_\mu}_{\psi_2} \leq M \] for some constant $M > 0$. \end{assump} \begin{assump}[Concentration on the directions of $\cS_p$]\label{assump:concentration} We have \begin{equation}\label{eq:isotropy_montanari} \sup_{\bm \theta \in \cS_p, \norm{\bm \theta}_2 \leq 1} \norm{\bm \theta^\top \bm x}_{\psi_2} \leq M \qquand \sup_{\bm \theta \in \cS_p, \norm{\bm \theta}_2 \leq 1} \norm{\bm \Sigma \bm \theta}_{2} \leq M, \end{equation} for some constant $M > 0$. \end{assump} \begin{assump}[One-dimensional CLT]\label{assump:clt} For any bounded Lipschitz function $\varphi: \dR \to \dR$, \begin{equation} \lim_{p\to \infty} \sup_{\bm \theta\in\cS_p} \E*{\left\lvert\phi(\bm \theta^\top \bm x) - \phi(\bm \theta^\top \bm g)\right\rvert} = 0. \end{equation} \end{assump} Building on those assumptions, \cite{montanari2022universality} prove the following: \begin{theorem}[Theorem 1. in \cite{montanari2022universality}] \label{thm:montanari_universality} Suppose that Assumptions \ref{assump:loss}-\ref{assump:clt} hold. Then, for any bounded Lipschitz function $\Phi: \dR \to \dR$, we have \[\lim_{n, p \to \infty} \left| \E*{\Phi\left(\widehat\cR_n^*(\bm X, \bm y(\bm X))\right)} - \E*{\Phi\left(\widehat\cR_n^*(\bm G, \bm y(\bm G))\right)} \right| = 0\] In particular, for any $\rho \in \dR$, \[ \widehat\cR_n^*(\bm X, \bm y(\bm X)) \overset{\dP}{\longrightarrow} \rho \quad \text{if and only if} \quad \widehat\cR_n^*(\bm G, \bm y(\bm G)) \overset{\dP}{\longrightarrow} \rho \] \end{theorem} \subsection{Sketch of proof of Theorem \ref{thm:montanari_universality}, adapted from \cite{montanari2022universality}} \paragraph{Free energy approximation} Define the discretized free energy \begin{equation} f_{\epsilon, \beta}(\bm X) = \frac1{n\beta}\sum_{\bm\Theta\in\cN_\epsilon^k} \exp\left( -\beta\, \widehat\cR_n(\bm\Theta ; \bm X, \bm y(\bm X) ) \right), \end{equation} where $\widehat\cR_n(\bm\Theta ; \bm X, \bm y(\bm X) )$ is the quantity minimized in \eqref{eq:def_min_problem}, and $\cN_\epsilon$ is a minimal $\epsilon$-net of $\cS_p$. Using classical arguments from both the theory of $\epsilon$-nets and statistical physics, the authors show that \begin{equation}\label{eq:free_energy_approx} \left|f_{\epsilon, \beta}(\bm X) - \widehat\cR_n(\bm\Theta ; \bm X, \bm y(\bm X) \right| \leq C_1(\epsilon) + \frac{C_2(\epsilon)}{\beta}, \end{equation} and the same inequality holds for $\bm G$. Since $C_1, C_2$ do not depend on $n, p$, it is possible to choose first $\epsilon$, then $\beta$ so that the RHS of \eqref{eq:free_energy_approx} is as small as desired, and keep them fixed throughout the rest of the proof. We can therefore focus on studying the free energy approximation $f_{\epsilon, \beta}$ throughout the rest of the proof. \paragraph{Interpolation path} For any $0 \leq t \leq \pi/2$, define \[ \bm U_t = \cos(t)\bm X + \sin(t) \bm G\] Then $\bm U_t$ is a smooth interpolation path with independent columns, ranging from $\bm U_0 = \bm X$ to $\bm U_{\pi/2} = \bm G$. We can write, for any differentiable function $\psi$, \[ \left|\E*{\psi(f_{\epsilon, \beta}(\bm X))} - \E*{\psi(f_{\epsilon, \beta}(\bm G))} \right| \leq \int_0^{\pi/2} \left|\E*{\frac{d \psi(f_{\epsilon, \beta}(\bm U_t)))}{dt}}\right| dt,\] and by the dominated convergence theorem it suffices to show that the integrand converges to $0$ for any $t$. The chain rule gives \[ \frac{d \psi(f_{\epsilon, \beta}(\bm U_t)))}{dt} = \psi'(f_{\epsilon, \beta}(\bm U_t)))\left( \sum_{\mu=1}^n \left(\frac{d\bm u_{t, \mu}}{dt}\right)^\top \nabla_{\bm u_{t, \mu}} f_{\epsilon, \beta}(\bm U_t) \right), \] and the dependency in $\psi$ can be easily controlled. Since all columns of $\bm U_t$ are i.i.d, we are left with showing \begin{equation}\label{eq:interp_universality} \lim_{n, p \to \infty} n \dE_{(1)}\left[\left(\frac{d\bm u_{t, 1}}{dt}\right)^\top \nabla_{\bm u_{t, 1}} f_{\epsilon, \beta}(\bm U_t)\right] = 0 \quad \text{a.s.}, \end{equation} where $\dE_{(1)}$ denotes the expectation with respect to $(\bm x_1, \bm g_1, \eps_1)$. \paragraph{Showing \eqref{eq:interp_universality}} Imagine for a moment that $\bm x_1$ is Gaussian; then $\bm u_{t, 1}$ and $d\bm u_{t, 1}/dt$ are also jointly Gaussian, and we have \begin{align*} \E*{\left(\frac{d\bm u_{t, 1}}{dt}\right)^\top \bm u_{t, 1}} &= \E*{(-\sin(t)\bm x_1 + \cos(t)\bm g_1)^\top (\cos(t)\bm x_1 + \sin(t)\bm g_1)} \\ &= 0, \end{align*} since $x_1$ and $g_1$ have the same covariance by definition. Therefore, they are independent, and we have \begin{align*} \dE_{(1)}\left[\left(\frac{d\bm u_{t, 1}}{dt}\right)^\top \nabla_{\bm u_{t, 1}} f_{\epsilon, \beta}(\bm U_t)\right] = \dE_{(1)}\left[\left(\frac{d\bm u_{t, 1}}{dt}\right)\right]^\top \dE_{(1)}\left[\nabla_{\bm u_{t, 1}} f_{\epsilon, \beta}(\bm U_t)\right] = 0. \end{align*} On the other hand, it is possible to show that $\bm x_1$ only appears in \eqref{eq:interp_universality} through scalar products with $\bm \Theta$ or $\bm \Theta^*$. As a result, we can leverage Assumption $\ref{assump:clt}$ to replace $\bm x_1$ by a Gaussian vector $\bm w$ independent from $\bm g_1$ as $p \to \infty$. Then, the reasoning above can be repeated with $\bm w$ and $\bm g_1$ to conclude the proof. \subsection{Proof of Theorem \ref{thm:main_gaussian_universality}} In order to prove our theorem \ref{thm:main_gaussian_universality}, we now aim to adapt the proof from \cite{montanari2022universality} to the case where the distribution of $x$ can be a {\it mixture} of several other distributions, each with different mean and covariance. For a discrete set $\cC = \{1, \dots, K\}$, we consider a family of distributions $(\nu_c)_{c\in \cC}$ on $\dR^p$, with means and covariances \[ \bm \mu_c = \dE_{\bm z\sim \nu_c}[\bm z] \qquand \bm \Sigma_c = \dE_{\bm z\sim \nu_c}[\bm z \bm z^\top] \] Given a type assignment $\sigma: [n] \to \cC$, each sample $x_\mu$ is then drawn independently from $\nu_{\sigma(\mu)}$. The equivalent Gaussian model is straightforward: we simply take \[ \bm g_i \sim \cN(\bm \mu_{\sigma(\mu)}, \bm \Sigma_{\sigma(\mu)}), \] independently from each other. An important special case of this setting is when $\sigma$ is itself random, independently from the $\bm x_i$ and $\bm g_i$: the law of $\bm g_i$ is then a so-called Gaussian Mixture Model. The assumptions of Theorem \ref{thm:montanari_universality} are modified as follows: \begin{enumerate} \item Assumption \ref{assump:loss} is unchanged, \item We relax \eqref{eq:label_definition_montanari} in Assumption \ref{assump:labels} into \[ y_i = \eta_{\sigma(i)}(\bm \Theta^* \bm x_i, \eps_i), \] for a family $(\eta_c)_{c\in \cC}$ of Lipschitz functions. This allows in particular to incorporate classification problems in our setting, at no cost in the proof complexity. \item We impose in Assumption \ref{assump:concentration} the stronger condition \begin{equation}\label{eq:app:gaussian_subgaussian} \sup_{\bm \theta \in \cS_p, \norm{\bm \theta}_2 \leq 1} \norm{\bm \theta^\top \bm g}_{\psi_2} \leq M, \end{equation} which is a consequence of \eqref{eq:isotropy_montanari} when $g$ has zero-mean. This is in practice an additional condition on the means $\bm \mu_c$; indeed, \eqref{eq:app:gaussian_subgaussian} is equivalent to \begin{equation} \sup_{\bm \theta \in \cS_p, \norm{\bm \theta}_2 \leq 1} \langle \bm \mu_c, \bm \theta \rangle \leq M' \quand \sup_{\bm \theta \in \cS_p, \norm{\bm \theta}_2 \leq 1} \norm{\bm \Sigma_c\, \bm \theta}_{2} \leq M', \end{equation} for some different constant $M'$. \item We suppose that Assumptions \ref{assump:concentration} and \ref{assump:clt} hold for any possible distribution $\nu_c$ for $c \in \cC$ and its associated Gaussian equivalent model. \end{enumerate} Our result is then an extension of Theorem \ref{thm:montanari_universality} for mixtures: \begin{reptheorem}{thm:main_gaussian_universality} Suppose that the modified above assumptions hold. Then, for any bounded Lipschitz function $\Phi: \dR \to \dR$, we have \[\lim_{n, p \to \infty} \left| \E*{\Phi\left(\widehat\cR_n^*(\bm X, \bm y(\bm X))\right)} - \E*{\Phi\left(\widehat\cR_n^*(\bm G, \bm y(\bm G))\right)} \right| = 0\] In particular, for any $\rho \in \dR$, \[ \widehat\cR_n^*(\bm X, \bm y(\bm X)) \overset{\dP}{\longrightarrow} \rho \quad \text{if and only if} \quad \widehat\cR_n^*(\bm G, \bm y(\bm G)) \overset{\dP}{\longrightarrow} \rho \] \end{reptheorem} We now go through the proof of the previous section, highlighting the important changes. \paragraph{Free energy approximation} This section goes basically unchanged; the approximation between $\widehat\cR_n^*(\bm X, \bm y(\bm X))$ and $f_{\epsilon, \beta}(\bm X)$ relies on Lipschitz arguments and concentration bounds on the $\bm x_i$ and $\bm g_i$, which are satisfied by our modification of Assumption \ref{assump:concentration}. \paragraph{Interpolation path} Recall that the important property of $\bm U_t$ is that \begin{equation}\label{eq:ut_constant_norm} \E*{\left(\frac{d\bm U_t}{dt}\right)^\top \bm U_t} = 0. \end{equation} To this end, we set \[ \bm u_{t, \mu} = \bm \mu_{\sigma(\mu)} + \cos(t)(\bm x_\mu - \bm \mu_{\sigma(\mu)}) + \sin(t)(\bm g_\mu - \bm \mu_{\sigma(\mu)}), \] and it is easy to check that \eqref{eq:ut_constant_norm} is satisfied. Another problem is that the columns of $\bm U_t$ are not i.i.d anymore, so we have to control \begin{equation}\label{eq:interp_non_iid} \frac 1n \sum_{\mu=1}^n \left| \dE_{(\mu)}\left[\left(\frac{d\bm u_{t, \mu}}{dt}\right)^\top \nabla_{\bm u_{t, \mu}} f_{\epsilon, \beta}(\bm U_t)\right] \right|, \end{equation} where this time $\dE_{(\mu)}$ is the expectation w.r.t $(\bm x_\mu, \bm g_\mu, \eps_\mu)$. However, \eqref{eq:interp_non_iid} is a weighted average over all values of $\sigma(\mu)$, and since $\cC$ is finite is suffices to show \eqref{eq:interp_universality} for any value of $\sigma(1)$. \paragraph{Showing \eqref{eq:interp_universality}} This section again relies on concentration properties of the $\bm x_i$ and $\bm g_i$, as well as Assumption \ref{assump:clt}. The arguments thus translate directly from \cite{montanari2022universality}. \subsection{One-dimensional gaussian approximation} Although Theorem \ref{thm:montanari_universality} is a powerful result, it still relies on very strong assumptions. In particular, given a distribution $\nu$ for the inputs $\bm x_i$, characterizing the set of vectors $\bm \theta$ such that Assumption \ref{assump:clt} holds is in general a difficult task. \paragraph{Rigorous results} When the entries of $\bm x$ are i.i.d subgaussian, a classical application of the Lindeberg method \citep{lindeberg_1922_eine} shows that Assumptions \ref{assump:concentration} and \ref{assump:clt} are satisfied with \[ \cS_p = \Set*{\bm \theta \in \dR^p \given \norm{\bm \theta}_\infty = o_p(1)}. \] More recently, this result (often used under the name ``Gaussian Equivalence Theorem'') was extended to general feature models with approximate orthogonality constraints \citep{hu_universality_2021,goldt2020gaussian}, for the same choice of $\cS_p$. \cite{montanari2022universality} also provides a central limit theorem result for the Neural Tangent Kernel of \citep{jacot2018neural}, for a more convoluted parameter set $\cS_p$. While these papers provide a strong basis for the one-dimensional CLT, those rigorous results only concern (so far) a very restricted set of distributions. \paragraph{Concentration of the norm} Another, more informal line of work originating from \cite{seddik_2020_random}, argues that most distributions found in the real world satisfy some form of the central limit theorem. The starting point of this analysis is the following theorem, adapted from \cite{bobkov_concentration_2003}: \begin{theorem}[Corollary 2.5 from \cite{bobkov_concentration_2003}]\label{thm:almost_everywhere_clt} Let $\bm x \in\dR^p$ be a random variable, with $\E*{\bm x\bm x^\top} = \bm I_p$, and $\eta_p$ the smallest positive number such that \begin{equation}\label{eq:norm_concentration} \Pb*{\left| \frac{\norm{\bm x}_2}{\sqrt{p}} - 1 \right| \geq \eta_p} \leq \eta_p. \end{equation} Then for any $\delta > 0$, there exists a subset $\cS_p$ of the $p$-sphere $\dS^{p-1}$ of measure at least $4p^{3/8}e^{-c p\delta^4}$, such that \[ \sup_{\bm \theta\in \cS_p} \sup_{t\in \dR} \left|\Pb{\bm \theta^\top \bm x \geq t} - \Phi(t) \right| \leq \delta + 4\eta_p,\] where $\Phi$ is the characteristic function of a standard Gaussian, and $c$ is a universal constant. \end{theorem} If both $\delta$ and $\eta_p$ are $o(1)$, Theorem \ref{thm:almost_everywhere_clt} implies that Assumption \ref{assump:clt} is satisfied for any compact subset $\cS'_p \subseteq \cS_p$. This suggests that the norm concentration property of \eqref{eq:norm_concentration} is a convenient proxy for one-dimensional CLTs. However, the proof of this theorem uses isoperimetric inequalities, and is thus non-constructive; as a result, characterizing precisely the set $\cS_p$ remains an open and challenging mathematical problem. \paragraph{Concentrated vectors} In \cite{seddik_2020_random}, the authors consider the concept of \emph{concentrated} random variables, as defined in \cite{ledoux_2001_concentration}: \begin{definition}\label{def:concentrated} Let $\bm x\in \dR^p$ be a random vector. $\bm x$ is called (exponentially) concentrated if there exists two constants $C, c$ such that for any 1-Lipschitz function $f: \dR^p \to \dR$, we have \[ \Pb{\left|f(\bm x) - \E{f(\bm x)} \right| \geq t} \leq C e^{-ct^2}.\] \end{definition} Since the norm function is $1$-Lipschitz, it can be shown that any concentrated isotropic vector $\bm x$ satisfies \eqref{eq:norm_concentration}, with \[ \eta_p \propto \left(\frac{\log(p)}{p}\right)^{1/2} \] The converse is obviously not true; an exponential random vector still has $\eta_p \to 0$, but is not concentrated. However, even if it is stronger that \eqref{eq:norm_concentration}, the concept of concentrated vectors has two important properties: \begin{enumerate} \item a standard Gaussian vector $\bm x \sim \cN(\bm 0, \bm I_p)$ satisfies Definition \ref{def:concentrated} with constants $C, c$ independent from $p$, \item if $\bm x \in \dR^p$ is a concentrated vector with constants $C, c$ and $\Psi: \dR^p \to \dR^{q}$ is an $L$-Lipschitz function, then $\Psi(\bm x)$ is also a concentrated vector, with constants only depending on $c, C$ and $L$. \end{enumerate} \paragraph{Towards real-world datasets} The real-world data considered in machine learning is often composed of very high-dimensional inputs, corresponding to $p \gg 1$ in our setting. However, it is generally accepted that this data actually lies on a low-dimensional manifold of dimension $d_0$: this is the idea behind many dimensionality reduction techniques, from PCA \citep{pearson_1901_lines} to autoencoders \citep{kramer_1991_nonlinear}. Another, more recent line of work (see e.g. \cite{facco2017estimating}) studies the estimation of the latent dimension $d_0$; results for the MNIST dataset ($p=784$) yield $d_0 \approx 15$, while CIFAR-10 ($p=3072$) has estimated intrisic dimension $d_0\approx 35$ \citep{spigler2019asymptotic}. Following this heuristic, the most widely used method to model realistic data is to learn a map $f: \dR^{d_0} \to \dR^p$, usually through a deep neural network, and then generate the $x_i$ according to \begin{equation} \bm x = f(\bm z) \quad \text{with} \quad \bm z\sim \cN(\bm 0, \bm I_{d_0}) \end{equation} Examples of functions $f$ include GANs \citep{goodfellow2014generative}, variational auto-encoders \citep{kingma2013auto}, or normalizing flows \citep{rezende_2015_variational}. This ansatz has been studied theoretically, and the results compared with real-world datasets, in \cite{goldt2019modelling, loureiro2021learning}; the results indicate significant agreement between generated inputs and actual data. Finally, we argue that for a large class of generative networks, the learned function $f$ is actually Lipschitz, with a bounded constant. This is even often a design choice; indeed, theoretical results such as \cite{bartlett_2017_spectrally} imply that a smaller Lipschitz constant improve the generalization capabilities of a network, or its numerical stability \citep{behrmann_2021_understanding}. As a result, regularizations aimed at controlling the Lipschitz properties of a network are a common occurence; see e.g. \cite{miyato_2018_spectral} for the spectral regularization of GANs. This indicates that concentrated vectors are indeed a good approximation for real-world data.
2205.13207v2
http://arxiv.org/abs/2205.13207v2
Vague and weak convergence of signed measures
\documentclass[11pt,a4paper]{article} \usepackage[english]{babel} \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage{amsmath} \usepackage{amsthm} \usepackage{amssymb, color} \usepackage[a4paper,top=1.5in,bottom=1.5in,left=1in,right=1in,marginparwidth=1.75cm]{geometry} \usepackage{graphicx} \usepackage{enumerate} \usepackage{dsfont} \usepackage{mathrsfs} \usepackage{varioref} \usepackage{hyperref} \usepackage[capitalise]{cleveref} \usepackage{hyperref} \usepackage{todonotes} \usepackage{array} \usepackage{makecell} \usepackage{mathtools} \usepackage{comment} \theoremstyle{plain} \newtheorem{theorem}{Theorem}[section] \newtheorem{proposition}[theorem]{Proposition} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{assumption}[theorem]{Assumption} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \newtheorem{remark}[theorem]{Remark} \newcommand{\half}{\frac{1}{2}} \newcommand{\R}{\mathbb{R}} \newcommand{\Rbar}{\overline{\R}} \newcommand{\N}{\mathbb{N}} \newcommand{\Z}{\mathbb{Z}} \newcommand{\Q}{\mathbb{Q}} \newcommand{\C}{\mathbb{C}} \newcommand{\G}{\mathbb{G}} \newcommand{\F}{\mathbb{F}} \newcommand{\K}{\mathbb{K}} \newcommand{\E}{\mathbb{E}} \newcommand{\V}{\mathbb{V}} \newcommand{\prob}{\mathbb{P}} \newcommand{\bB}{\textbf{B}} \newcommand{\bV}{\textbf{V}} \newcommand{\bX}{\textbf{X}} \newcommand{\bY}{\textbf{Y}} \newcommand{\one}{\textbf{1}} \newcommand{\lS}{\mathbb{S}} \newcommand{\bbX}{\textbf{X}} \newcommand{\cA}{\mathcal{A}} \newcommand{\cB}{\mathcal{B}} \newcommand{\cD}{\mathcal{D}} \newcommand{\cJ}{\mathcal{J}} \newcommand{\cK}{\mathcal{K}} \newcommand{\cL}{\mathcal{L}} \newcommand{\cF}{\mathcal{F}} \newcommand{\cO}{\mathcal{O}} \newcommand{\cM}{\mathcal{M}} \newcommand{\cN}{\mathcal{N}} \newcommand{\cC}{\mathcal{C}} \newcommand{\cE}{\mathcal{E}} \newcommand{\cG}{\mathcal{G}} \newcommand{\cP}{\mathcal{P}} \newcommand{\cS}{\mathcal{S}} \newcommand{\cT}{\mathcal{T}} \newcommand{\cV}{\mathcal{V}} \newcommand{\sD}{\mathscr{D}} \newcommand{\sB}{\mathscr{B}} \newcommand{\sE}{\mathscr{E}} \newcommand{\sF}{\mathscr{F}} \newcommand{\sG}{\mathscr{G}} \newcommand{\sI}{\mathscr{I}} \newcommand{\sJ}{\mathscr{J}} \newcommand{\sK}{\mathscr{K}} \newcommand{\sL}{\mathscr{L}} \newcommand{\sM}{\mathscr{M}} \newcommand{\sS}{\mathscr{S}} \newcommand{\sT}{\mathscr{T}} \newcommand{\sV}{\mathscr{V}} \newcommand{\sX}{\mathscr{X}} \newcommand{\au}{\alpha^\mu} \newcommand{\bu}{\beta^\mu} \newcommand{\tm}{\tilde{m}} \newcommand{\tz}{\tilde{\zeta}} \newcommand{\tb}{\tilde{\beta}} \newcommand{\ta}{\tilde{\alpha}} \newcommand{\hz}{\hat{\zeta}} \newcommand{\hb}{\hat{\beta}} \newcommand{\Var}{\textnormal{Var}} \newcommand{\hp}{\hat{\rho}} \newcommand{\tX}{\tilde{X}} \newcommand{\da}{\delta} \newcommand{\s}{\sigma} \newcommand{\e}{\varepsilon} \newcommand{\ph}{\varphi} \newcommand{\tph}{\tilde{\varphi}} \numberwithin{equation}{section} \newcommand{\topology}{\boldsymbol{\tau}} \newcommand{\phn}{\varphi_t^n} \newcommand{\phl}{\varphi_t^\lambda} \newcommand{\phnl}{\varphi_t^{\lambda,n}} \newcommand{\dphnl}{\dot{\ph}_t^{\lambda,n}} \newcommand{\la}{\lambda} \newcommand{\vth}{\vartheta} \newcommand{\xti}{ X_t^{(1)} } \newcommand{\xtii}{ X_t^{(2)} } \newcommand{\Cyl}{\textnormal{Cyl}} \newcommand{\de}{\textnormal{ d}} \newcommand{\den}{\textnormal{d}} \newcommand{\bhat}{\hat{b}} \newcommand{\I}{\mathds{1}} \newcommand{\supply}{\mathbf{S}} \newcommand{\prim}{\textnormal{LiPr}_t} \newcommand{\wto}{\rightharpoonup} \newcommand{\triple}{(\Omega, \cF, \prob)} \newcommand{\myeq}{\stackrel{\mathclap{\normalfont\mbox{D}}}{=}} \newcommand{\expenv}[1]{\Big[ #1 \Big]} \newcommand{\squarebrac}[1]{\left[ #1 \right]} \newcommand{\curvebrac}[1]{\left( #1 \right)} \newcommand{\curlybrac}[1]{\left\{ #1 \right\}} \newcommand{\modd}[1]{\left| #1 \right|} \newcommand{\norm}[1]{\left\lVert#1\right\rVert} \newcommand{\alert}[1]{\color{red} #1 \color{black}} \newcommand{\var}{\mathbb{V}\textnormal{ar}} \newcommand{\evague}{\textnormal{v}_{\!0}\textnormal{-lim}} \newcommand{\BV}{\textnormal{BV}} \newcommand{\BVloc}{\textnormal{BV}_{\textnormal{loc}}} \DeclareMathOperator*{\weak}{w-lim} \DeclareMathOperator*{\vague}{v-lim} \definecolor{vargreen}{RGB}{0,150,0} \newcommand{\MH}[1]{{\color{vargreen}{#1}}} \newcommand{\OS}[1]{{\color{purple}{#1}}} \renewcommand{\labelenumi}{{\normalfont (\alph{enumi})}} \renewcommand{\labelenumii}{(\roman{enumii})} \begin{document} \title{ Vague and weak convergence of signed measures\thanks{The authors thank Lutz Mattner for helpful comments on weak convergence for general Hausdorff spaces.}} \author{Martin Herdegen, Gechun Liang, Osian Shelley\thanks{All authors: University of Warwick, Department of Statistics, Coventry, CV4 7AL, UK; \{m.herdegen, g.liang, o.d.shelley\}@warwick.ac.uk;}} \date{\today} \maketitle \begin{abstract} Necessary and sufficient conditions for weak and vague convergence of measures are important for a diverse host of applications. This paper aims to give a comprehensive description of the relationship between the two modes of convergence when the measures are signed, which is largely absent from the literature. Furthermore, when the underlying space is $\R$, we study the relationship between vague convergence of signed measures and the pointwise convergence of their distribution functions. \end{abstract} \bigskip \noindent\textbf{Mathematics Subject Classification (2020):} 28A33, 60B10. \bigskip \noindent\textbf{Keywords:} Weak convergence, vague convergence, signed measures, mass preserving condition. \section{Introduction} In this paper, we aim to provide necessary and sufficient conditions for weak and vague convergence of signed measures. They lie at the heart of key results in probability theory such as Karamata's Tauberian theorem (see e.g.~Feller \cite[XIII.5, Theorem 1]{feller_introduction_1971}), whose proof relies on the equivalence between the vague convergence of finite positive measures and the pointwise convergence of their distribution functions (at continuity points of the limiting measure). Motivated by an application in stochastic control, we extended Karamata's theorem to signed measures in Herdegen et al.~\cite{herdegen_tauberian_2022}. This requires to study the relationship between vague convergence of signed measures and pointwise convergence of their distribution functions. For positive measures, the relationship between weak convergence, vague convergence and convergence of their distribution functions is well understood; see e.g.~Dieudonné and Macdonald~\cite{dieudonne_treatise_1970}, Vere-Jones \cite{daley_introduction_2003} or Kallenberg \cite{kallenberg_random_2017,kallenberg_foundations_2021}. However, the conditions needed to extend this theory to the case of signed measures are seemingly absent from the literature. We fill this gap by providing a comprehensive description of the relationship between weak and vague convergence of signed measures (including their Hahn-Jordan decompositions), as well as pointwise convergence of their distribution functions. It turns out that one of the key conditions for the equivalence of different modes of convergence on a metrisable space is to check whether or not mass is preserved in the limit. For example, if \emph{mass is not lost at infinity}, then vague conference is equivalent to weak convergence. Such a mass preserving condition is usually referred as tightness condition in the literature. We further show that if \emph{mass is not lost on compact sets}, then vague convergence implies the convergence of the positive and negative parts in the Hahn-Jordan decomposition. Moreover, if \emph{mass is not lost globally}, then weak convergence of the positive and negative parts in the Hahn-Jordan decomposition also holds. These results are summarised in \Cref{table:1}. When restricted to $\R$, we also provide necessary and sufficient conditions for the equivalence between vague convergence of signed measures and pointwise convergence of their distribution functions (at continuity points of the limiting measure). To this end, we propose a new type of \emph{local (zero) mass preserving condition} in \Cref{def:no_mass}. It prevents the positive and negative parts of the singular decompositions to cancel in the limit. Using this new condition, we give in Theorem \ref{thm:vague_F_equivalence} a clear characterisation of the relationship between vague convergence of signed measures and pointwise convergence of their distribution functions. \subsection{The definition of vague and weak convergence}\label{subsection:notation_and_defnitinions} Throughout the paper, let $\Omega$ be a metrisable space and $\sB({\Omega})$ its Borel $\sigma$-algebra. Let $C(\Omega)$ be the space of all continuous $\R$-valued functions on $\Omega$, $C_b(\Omega)$ the subspace of all $f \in C(\Omega)$ such that $f$ is bounded, $C_0(\Omega)$ the subspace of all $f \in C(\Omega)$ such that for any $\e>0$, there exists a compact set $K_\e\in \sB(\Omega)$ with $\modd{f} < \e$ on $K_\e^c$, and $C_c(\Omega)$ the subspace of all $f \in C(\Omega)$ such that $f$ has compact support. We have the inclusions $C_c(\Omega) \subseteq C_0(\Omega)\subseteq C_b(\Omega) \subseteq C(\Omega)$. For a signed measure $\mu$ on $(\Omega, \sB({\Omega}))$, we denote its Hahn-Jordan decomposition by $\mu =\mu^+ - \mu^-$, and its associated variation measure by $\modd{\mu}:= \mu^+ + \mu^-$. The \emph{total variation} of a signed measure $\mu$ is denoted by $\norm{\mu}:= \modd{\mu}(\Omega)$, and we say that $\mu$ is \emph{finite} if $\norm{\mu} < \infty$. A finite signed measure $\mu$ on $(\Omega, \sB({\Omega}))$ is called a \emph{finite signed Radon measure} if $\modd{\mu}$ is \emph{inner regular}, i.e., for each $A \in \sB(\Omega)$, \begin{align*} \modd{\mu}(A) &= \sup\{\modd{\mu}(K): K\in \sB(\Omega), K \textnormal{ compact}, K \subset A\}. \end{align*} We denote the set of all finite signed Radon measures on $(\Omega, \sB({\Omega}))$ by $\cM(\Omega)$ and the subset of all finite positive Radon measures by $\cM^+(\Omega)$. We now come to the key definition of this paper. \begin{definition}\label{def:vague} For $\mu \in \cM(\Omega)$, define the map $I_\mu:C_b(\Omega) \to \R$ by \begin{equation*} I_\mu(f) = \int_\Omega f \de \mu. \end{equation*} We say that a sequence $\{\mu_n\} \subset \cM(\Omega)$ converges to $\mu \in \cM(\Omega)$ \begin{enumerate}[\normalfont(a)] \item \emph{weakly} if $I_{\mu_n}(f) \to I_{\mu}(f)$ for all $f \in C_b(\Omega)$,\footnote{Weak convergence is sometimes referred to as narrow convergence; see \cite[Section 8.1]{bogachev_measure_2007}.} and we write $$\weak_{n \to \infty}\mu_n = \mu;$$ \item \emph{vaguely} if $I_{\mu_n}(f) \to I_{\mu}(f)$ for all $f \in C_c(\Omega)$, and we write $$\vague_{n \to \infty}\mu_n = \mu.$$ \end{enumerate} \end{definition} Before making some comments on our definition of vague convergence, it is useful to recall the famous Riesz-Markov-Kakutani Representation Theorem; see \cite[Theorem 14.14]{aliprantis_infinite_1999} for a proof. \begin{theorem}[Riesz-Markov-Kakutani Representation Theorem]\label{thm:Riesz_Representation} Let $\Omega$ be locally compact. \begin{enumerate}[\normalfont(a)] \item The mapping $\mu \mapsto I_{\mu}$, where $I_{\mu}:C_0(\Omega)\to \R$, is an isometric isomorphism from $\cM(\Omega)$ to $(C_0(\Omega))^*$. \item The mapping $\mu \mapsto I_{\mu}$, where $I_{\mu}:C_c(\Omega)\to \R$, is a surjective isometry from $\cM(\Omega)$ to $(C_c(\Omega))^*$. \end{enumerate} \end{theorem} We also note the following straightforward result that sheds light on the relationship between parts (a) and (b) in Theorem \ref{thm:Riesz_Representation}. It follows directly from the Stone-Weierstraß Theorem \ref{thm:stoneWeierstrass} and the triangle inequality. \begin{proposition} \label{prop:vague convergence Cc vs C0} Let $\Omega$ be locally compact and $\{\mu_n\}\cup \{\mu\} \subset \cM(\Omega)$ with $\sup_{n \in \N}\norm{\mu_n} < \infty$. Then \begin{equation}\label{eq:extended_vague} I_{\mu_n}(f) \to I_\mu(f) \textnormal{ for all } f \in C_0(\Omega) \quad \text{ if and only if } \quad I_{\mu_n}(f) \to I_\mu(f) \textnormal{ for all } f \in C_c(\Omega). \end{equation} \end{proposition} Given that one can find a variety of definitions for vague convergence in the extant literature, some remarks on our definition are in order. \begin{remark} (a) Our definition of vague convergence is the most common one found in the literature; see e.g.~Berg et al \cite[Chapter 2]{berg_harmonic_1984}, Dieudonné and Macdonald \cite[Section XIII.4]{dieudonne_treatise_1970}, Kallenberg \cite[Chapter 5]{kallenberg_foundations_2021} or Klenke \cite[Section 13.2]{klenke_probability_2014}. (b) In a setting where $\Omega$ is locally compact and motivated by \Cref{thm:Riesz_Representation}, vague convergence is defined for test functions in $C_0(\Omega)$ (rather than in $C_c(\Omega)$) by Folland \cite[Section 7.3]{folland_real_1999}. However, in light of Proposition \ref{prop:vague convergence Cc vs C0}, this stronger definition coincides with our definition if the sequence of measures is uniformly bounded. (c) When $\Omega$ is a Polish space (i.e., complete and separable), the vague topology on $\cM^+(\Omega)$ (which characterises vague convergence) has alternatively been defined to be generated by the family of mappings $\pi_f:\cM^+(\Omega) \to \R_+$ where $f $ are nonnegative continuous functions with metric bounded support. This is the approach taken by Kallenberg \cite[Section 4.1]{kallenberg_random_2017} and Daley and Vere-Jones \cite[Section A2.6]{daley_introduction_2003}. Basrak and Planinić \cite{basrak_note_2019} show that this definition coincides with our definition using the theory of boundedness due to Hue \cite{hu_introduction_1966}. Moreover, \cite{basrak_note_2019} show explicitly that these vague topologies make $\cM^+(\Omega)$ a Polish space in its own right. In particular, this latter fact convinces us that our definition is the most natural one. \end{remark} \subsection{Organisation of the paper} The remainder of the paper is organised as follows. \cref{section:Vague_convergence} describes the relationship between vague and weak convergence in $\cM(\Omega)$, including the weak and vague convergence of the positive and negative parts in the Hahn-Jordan decomposition. The results are summarised in \Cref{table:1}. In the special case that $\Omega = \R$, \cref{section:BV} studies the relationship between the vague convergence of a sequence of measures $\{\mu_n\} \subset \cM(\R)$ and the pointwise convergence of their distribution functions $\{F_{\mu_n}\}$. \Cref{section:appendix} contains some auxiliary results needed in the main body of the paper. \section{Relationship between vague and weak convergence}\label{section:Vague_convergence} We first revisit the direct relationship between weak and vague convergence for signed measures. As a warm-up, we recall that vague convergence allows for a loss of mass in the limit, while weak convergence does not. \begin{example}\label{example:weak_vs_vague_1} Let $\mu$ be the zero measure and $\{\mu_n\} \subset \cM(\R)$ be such that $\mu_n := \da_{n} - \da_{-n}$, where for $x \in \R$, $\da_{x}$ denotes the Dirac measure at $x$. Then $\textnormal{v-lim}_{n \to \infty}\mu_n=\mu$ since for any $f \in C_c(\R)$, \begin{equation*} \lim_{n \to \infty} I_{\mu_n}(f) = \lim_{n\to \infty} (f(n) - f(-n)) = 0 = I_{\mu}(f). \end{equation*} Moreover, it holds that $\lim_{n \to \infty}\mu_n(\R) = \mu(\R)$, i.e. the \textit{signed mass} is preserved. Now take $f \in C_b(\R)$ such that \begin{equation*} f(x) = \begin{cases} x \hspace{1.5cm}\textnormal{ for } x \in (-1,1),\\ \textnormal{sign}(x)\hspace{0.5cm}\textnormal{ otherwise}, \end{cases} \end{equation*} Thus, we do not have $ \textnormal{w-lim}_{n \to \infty} = \mu$ since \begin{equation*} 2 = \lim_{n \to \infty}I_{\mu_n}(f) \neq \lim_{n \to \infty}I_{\mu}(f) = 0. \end{equation*} \end{example} Intuitively, what goes wrong in Example \ref{example:weak_vs_vague_1} is that mass is “sent to infinity”. The precise condition that avoids this is \emph{tightness}. \begin{definition} A sequence $\{\mu_n\} \subset \cM(\Omega)$ is called \emph{tight} if any $\e>0$ there exists a compact set $K_\e \subset \Omega$ such that \begin{equation} \label{def:tight} \sup_{n \in \N}\modd{\mu_n}(K_\e^c) \leq \e. \end{equation} \end{definition} \begin{remark} \label{rem:tight} Since each $\mu \in \cM(\Omega)$ is tight by inner regularity of $\modd{\mu}$, we can replace \eqref{def:tight} by \begin{equation} \label{eq:tight:limsup} \limsup_{n \to \infty} \modd{\mu_n}(K_\e^c) \leq \e. \end{equation} \end{remark} Tightness is exactly the condition that lifts vague to weak convergence for positive measures. This remains true for signed measures. The proof of the next result follows from Prohorov's theorem for signed measures, see \Cref{thm:Prohorov}.\footnote{A direct proof of \Cref{prop:vague_to_weak_strict}(a) follows also from a generalisation of \cite[Lemma 5.20]{kallenberg_foundations_2021}.} \begin{proposition}\label{prop:vague_to_weak_strict} Let $\{\mu_n\} \cup \{\mu\}\subset \cM(\Omega)$. \begin{enumerate}[\normalfont(a)] \item If $\vague_{n \to \infty}\mu_n = \mu$ and $\{\mu_n\}$ is tight, then $\textnormal{w-}\lim_{n \to \infty}\mu_n = \mu$. \item If $\textnormal{w-}\lim_{n \to \infty}\mu_n = \mu$, then $\vague_{n \to \infty}\mu_n = \mu$. If in addition $\Omega$ is Polish (i.e., complete and separable), then $\{\mu_n\}$ is tight. \end{enumerate} \end{proposition} If $\Omega$ is locally compact, the heuristic that vague convergence ignores mass “being sent to infinity” leads us to note that vague convergence in $\cM(\Omega)$ (without loss of signed mass) can be viewed as weak convergence in $\cM(\Omega_\infty)$, where $\Omega_\infty$ denotes the one-point compactification of $\Omega$; see \Cref{def:alexandroff_topology}. To this end, note that a measure $\mu \in \cM(\Omega)$ can be canonically extended to a measure $\mu^\infty \in \cM(\Omega_\infty)$ by setting $ \mu^\infty(A) := \mu(A)$ for $A \in \sB(\Omega)$ and $\modd{\mu^\infty}( \{\infty \}) :=0$. We then have the following result, which follows directly from Proposition \ref{prop:vague convergence Cc vs C0} and \Cref{thm:alexandroff_topology}. \begin{proposition}\label{prop:vague_to_weak_compactification} Let $\Omega$ be locally compact and $\{\mu_n\}\cup \{\mu\} \subset \cM(\Omega)$ with $\sup_{n \in \N} \Vert \mu_n \Vert < \infty$. Denote by $\mu^\infty_n$ and $\mu^\infty$ the canonical extension of $\mu_n$ and $\mu$, respectively. Then $\textnormal{v-}\lim_{n \to \infty }\mu_n = \mu$ and $\mu_n(\Omega) \to \mu(\Omega)$ if and only if $\textnormal{w-}\lim_{n \to \infty }\mu^\infty_n = \mu^\infty$. \end{proposition} \begin{remark} For \emph{signed} measures, weak convergence in $\cM(\Omega_\infty)$ is strictly weaker than weak convergence in $\cM(\Omega)$. Indeed, \Cref{example:weak_vs_vague_1} gives an example of $\{\mu_n\}\cup \{\mu\} \subset \cM(\Omega)$ with $\sup_{n \in \N} \Vert \mu_n \Vert < \infty$ such that $\vague_{n \to \infty}\mu_n = \mu$ and $\mu_n(\Omega) \to \mu(\Omega)$ (and hence $\textnormal{w-}\lim_{n \to \infty }\mu^\infty_n = \mu^\infty$), but $\weak_{n \to \infty}\mu_n \neq \mu$. \end{remark} We next investigate under which conditions vague convergence implies the convergence of the positive and negative parts in the Hahn--Jordan decomposition. The following result shows that the necessary and sufficient extra condition is that no mass is lost on compact sets. \begin{proposition}\label{prop:vague_convergence_of_pm} Let $\Omega$ be locally compact and $\{\mu_n\}\cup \{\mu\} \subset \cM(\Omega)$. Then $\vague_{n \to \infty}{\mu_n^\pm} = {\mu}^\pm$ if and only if $\vague_{n \to \infty}\mu_n = \mu$ and \begin{equation}\label{eq:vague_convergence_of_pm_compact_condition} \limsup_{n \to \infty} \modd{\mu_n}(K) \leq \modd{\mu}(K). \end{equation} for every compact set $K \subset \Omega$. \end{proposition} \begin{proof} First, suppose that $\vague_{n \to \infty}{\mu_n^\pm} = {\mu}^\pm$. Then clearly $\vague_{n \to \infty}\mu_n = \mu$, and \eqref{eq:vague_convergence_of_pm_compact_condition} is satisfied due to the Portmanteau Theorem in the form of \Cref{thm:portmanteau_extension}\normalfont(b). Conversely, suppose that $\vague_{n \to \infty}\mu_n = \mu$ and \eqref{eq:vague_convergence_of_pm_compact_condition} is satisfied. By \Cref{thm:appaendix_open}, for every open set $\Theta \subset \Omega$, \begin{equation*} \liminf_{n \to \infty} \modd{\mu_n}(\Theta) \geq \modd{\mu}(\Theta). \end{equation*} Thus, \Cref{thm:portmanteau_extension}\normalfont(b) gives $\vague_{n \to \infty}\modd{\mu_n} = \modd{\mu}$. Now $\vague_{n \to \infty}{\mu_n}^\pm = {\mu}^\pm$ follows by noting that \begin{equation*} \mu_n^+ =\half(\modd{\mu_n}+\mu_n)\quad \textnormal{and}\quad\mu_n^- =\half(\modd{\mu_n}-\mu_n). \qedhere \end{equation*} \end{proof} Note that Condition \eqref{eq:vague_convergence_of_pm_compact_condition} does not restrict “total mass being lost at infinity”. By imposing an additional restriction to mitigate this possibility, we can strengthen Proposition \ref{prop:vague_convergence_of_pm} to deduce that $\weak_{n \to \infty}{{\mu_n}^\pm} = {{\mu_n}^\pm}$. \begin{proposition}\label{prop:weak variation from vague} Let $\Omega$ be locally compact and $\{\mu_n\} \cup \{\mu\}\subset \cM(\Omega)$. Then $\weak_{n \to \infty}{\mu_n^\pm} = {\mu}^\pm$ if and only if $\vague_{n \to \infty}\mu_n = \mu$ and \begin{equation}\label{eq:prop:weak variation from vague} \limsup_{n\to \infty}\norm{\mu_n} \leq \norm{\mu}. \end{equation} \end{proposition} \begin{proof} First, suppose that $\weak_{n \to \infty}{\mu_n^\pm} = {\mu}^\pm$. Then $\weak_{n \to \infty}{\mu_n} = {\mu}$ and $\weak_{n \to \infty}{|\mu_n|} = {|\mu|}$. This implies in particular that $\vague_{n \to \infty}{\mu_n} = {\mu}$ and \begin{equation}\label{eq:lim_agrees} \lim_{n\to \infty}\norm{\mu_n} = \lim_{n\to \infty} \int_\Omega \de |\mu_n| = \int_\Omega \de |\mu| = \norm{\mu}. \end{equation} Conversely, suppose that $\vague_{n \to \infty}\mu_n = \mu$ and \eqref{eq:prop:weak variation from vague} is satisfied. By Propositions \ref{prop:vague_convergence_of_pm} and \ref{prop:vague_to_weak_strict}, it suffices to show that \eqref{eq:vague_convergence_of_pm_compact_condition} is satisfied and the sequence $\{\mu_n\}$ is tight. First, we establish \eqref{eq:vague_convergence_of_pm_compact_condition}. Seeking a contradiction, suppose there exists a compact set $K \subset \Omega$ such that \begin{equation} \label{eq:pf:prop:weak variation from vague:ineq 01} \limsup_{n \to \infty} \modd{\mu_n}(K) > \modd{\mu}(K). \end{equation} Since $K^c$ is open, it follows from Theorem \ref{thm:appaendix_open} that \begin{equation} \label{eq:pf:prop:weak variation from vague:ineq 02} \liminf_{n \to \infty} \modd{\mu_n}(K^c) \geq \modd{\mu}(K^c). \end{equation} Adding \eqref{eq:pf:prop:weak variation from vague:ineq 01} and \eqref{eq:pf:prop:weak variation from vague:ineq 02}, it follows that \begin{align*} \limsup_{n \to \infty} \norm{\mu_n} &= \limsup_{n \to \infty} \modd{\mu_n} (\Omega) \geq \limsup_{n \to \infty} \modd{\mu_n}(K) + \liminf_{n \to \infty} \modd{\mu_n}(K^c) > \modd{\mu}(\Omega)= \norm{\mu}, \end{align*} and we arrive at a contradiction to \eqref{eq:prop:weak variation from vague}. Next, we show that the sequence $\{\mu_n\}$ is tight. Let $\e > 0$. By inner regularity of $\mu$, there exists a compact set $K \subset \Omega$ such that $\modd{\mu}(K^c) \leq \e$. By local compactness of $\Omega$, there exists an open set $L \supset K$ such that its closure $\overline{L} =: K_\e$ is compact. Using \eqref{eq:prop:weak variation from vague} and Theorem \ref{thm:appaendix_open}, we obtain \begin{align*} \limsup_{n \to \infty}\modd{\mu_n}(K_\e^c) &= \limsup_{n \to \infty}\curvebrac{ \norm{\mu_n} - \modd{ \mu_n}(K_\e) }\leq \limsup_{n \to \infty}\curvebrac{ \norm{\mu_n} - \modd{\mu_n}(L) } \\ &\leq \norm{\mu} - \liminf_{n \to \infty}\modd{ \mu_n}(L)\leq \norm{\mu} - \modd{\mu}(L)\leq \norm{\mu} - \modd{\mu}(K) = \mu(K^c)\leq \e. \qedhere \end{align*} \end{proof} To summarise, starting from vague convergence $\vague_{n \to \infty}\mu_n = \mu$, \Cref{prop:vague_to_weak_strict} tells us that we get $\weak_{n \to \infty}\mu_n = \mu$ if mass is not “lost at infinity”. \Cref{prop:vague_convergence_of_pm} asserts that if mass is not “lost on compact sets”, then we get $\vague_{n \to \infty}\mu_n^\pm = \mu^\pm$. Finally, \Cref{prop:weak variation from vague} tells us that if mass is not “lost globally”, then we even get $\weak_{n \to \infty}\mu_n^\pm = \mu^\pm$. These results are summarised in \Cref{table:1}. \begin{table}[h!] \centering \caption{{$\Omega$ is a ({Polish$^\star$, locally compact$^{\star\star}$}) metrisable space and $\{\mu_n\}\cup \{\mu\} \subset \cM(\Omega)$. } } \label{table:1} \begin{tabular}{||c c c||} \hline \textbf{Condition(s) A} & & \textbf{Condition(s) B} \\ [0.5ex] \hline\hline \makecell{$\vague_{n \to \infty} \mu_n = \mu$,\\ and $\forall \e > 0$, $\exists$ compact set $K_\e$\\ such that $\limsup_{n \to \infty} \modd{\mu_n}(K_\e^c) \leq \e$} & \makecell{$\mathbf{\Rightarrow}$ \\ ${\stackrel{\star}{\Leftarrow}}$} &$\weak_{n \to \infty} \mu_n = \mu$ \\ \hline \makecell{$\vague_{n \to \infty} \mu_n = \mu$,\\ and $\forall$ compact $K \subset \Omega$ \\ $\limsup_{n \to \infty}\modd{\mu_n}(K) \leq \modd{\mu}(K)$} & ${\stackrel{\star\star}{\Leftrightarrow}}$ & $\vague_{n \to \infty} {\mu_n}^\pm = {\mu}^\pm$ \\ [1ex] \hline \makecell{$\vague_{n \to \infty} \mu_n = \mu$,\\ and $\limsup_{n \to \infty}\norm{\mu_n}\leq \norm{\mu}$} & ${\stackrel{\star\star}{\Leftrightarrow}}$ & $\weak_{n \to \infty} {\mu_n}^\pm = {\mu}^\pm$ \\ [1ex] \hline \end{tabular} \end{table} \section{Vague convergence and convergence of distribution functions}\label{section:BV} {In this section, we study the special case that $\Omega = \R$ (with the usual order topology) and link vague convergence on $\R$ to the pointwise convergence of their distribution functions (at continuity points of the limiting measure). To this end, we first need to introduce some further pieces of notation. Let $\BV(\R)$ denote the space of all functions of bounded variation on $\R$. For $F \in \BV(\R)$ and $x \in \R$, denote the total variation of $F$ on $(-\infty, x]$ by $\bV_F(x)$ and set $F^\uparrow(x) := \half(\bV_{F}(x) + F(x))$ and $F^\downarrow(x) := \half(\bV_{F}(x) - F(x))$. Note that $\bV_F, F^\uparrow, F^\downarrow :\R \to [0,\infty]$ are nondecreasing functions. For any $\alpha \in \R$ and $\mu \in \cM(\R)$, the \emph{distribution function of $\mu$, centred at $\alpha$,} is the function $F_{\mu}^{(\alpha)} \in \BV(\R)$ defined by \begin{equation*} F_{\mu}^{(\alpha)}(x) := \begin{cases} \hspace{.25cm}\mu((\alpha,x])\quad \textnormal{if }x \geq \alpha,\\ -\mu((x,\alpha])\quad\textnormal{if }x < \alpha. \end{cases} \end{equation*} Note that $F_{\mu}^{(\alpha)}$ is right-continuous, and for any $a \leq b$ with $a, b \in \R$, \begin{equation} \label{eq:dist:meas:rel} F_{\mu}^{(\alpha)}(b) - F_{\mu}^{(\alpha)}(a) = \mu((a,b]). \end{equation} The relationship \eqref{eq:dist:meas:rel} between distribution functions and signed measures is bijective, which follows from the following result; for a proof see \cite[Theorem 5.13]{leoni_first_2017}. \begin{theorem}\label{thm:BVloc_to_g_signed_measure} Let $F \in \BV(\R)$ be right-continuous. Then there exists a unique $\mu_F\in \cM(\R)$ such that \begin{equation*} \mu_F((a,b]) = F(b) - F(a) \end{equation*} for all $a \leq b$ with $a,b \in \R$. Moreover, $\modd{\mu_F} = \mu_{\bV_F}$. \end{theorem} } Let $[-\infty,\infty]$ be the (affine) extended real line (with the order topology). Any $\mu \in \cM(\R)$ can canonically be extended to $\cM([-\infty,\infty])$ by setting $\modd{\mu}(\{\pm \infty\}) := 0$. Similarly, for $\alpha \in \R$, $F^{(\alpha)}_{\mu}$ can canonically be extended to $[-\infty,\infty]$ by setting $F^{(\alpha)}_\mu(\pm\infty) := \lim_{x \to \pm \infty}F^{(\alpha)}_\mu(x)$. Finally, we can define $F^{(-\infty)}, F^{(+\infty)} \in \BV(\R)$ by \begin{equation*} F_\mu^{(-\infty)}(x) := \mu((-\infty,x]) \quad \text{and} \quad F_\mu^{(+\infty)}(x) := -\mu((x,\infty)), \quad x \in \R, \end{equation*} respectively, which again can canonically be extended to $[-\infty,\infty]$. Note that $F_\mu^{(-\infty)}$ is usually called the distribution function of $\mu$ and denoted by $F_\mu$. Last but not least, we say that $x \in \R$ is a \emph{continuity point} of $\mu \in \cM(\R)$ if $\mu(\{x\}) = 0$. \subsection{Relationship between the convergence of distribution functions and vague convergence} We start our discussion on the relationship between the convergence of distribution functions and vague convergence by recalling the key result for \emph{positive} measures. This type of result is essentially known -- at least under stronger conditions, see e.g.~\cite[Proposition 7.19]{folland_real_1999}. It will follow as a corollary of our main result, Theorem \ref{thm:vague_F_equivalence} below. \begin{theorem} \label{thm:vague:pos} Let $\{\mu_n\}\cup \{\mu\}\subset \cM^+(\R)$ and $\alpha \in \R$ be a continuity point of $\mu$. Then the following are equivalent: \begin{enumerate} \item $F^{(\alpha)}_{\mu_n} \to F^{(\alpha)}_\mu$ at the continuity points of $\mu$. \item $\vague_{n \to \infty}\mu_n = \mu$. \end{enumerate} Moreover, if $\alpha = -\infty$ or $\alpha =+\infty$, the equivalence remains true if we require in addition that $\lim_{K \downarrow -\infty}\squarebrac{\limsup_{n \to \infty} \mu_n((-\infty, K])} =0$ when $\alpha = -\infty$, or $\lim_{K \uparrow \infty}\squarebrac{\limsup_{n \to \infty} \mu_n( (K, \infty))} = 0$ when $\alpha = +\infty$. \end{theorem} \begin{remark} (a) The assumption that $\alpha$ is a continuity point of $\mu$ in Theorem \ref{thm:vague:pos} is necessary. Indeed, let $\mu_n := \da_{1/n}$ and $\mu := \da_0$. Then $\vague_{n \to \infty}\mu_n = \mu$ but \begin{equation*} F^{(0)}_{\mu_n}(x) = 0 \not \to -1 = F^{(0)}_{\mu}(x), \quad x < 0. \end{equation*} (b) As a sanity check, one notes that if $\{\mu_n\} \cup\{\mu\} \subset \cM^+(\R)$ are probability measures, whence $\limsup_{n \to \infty} \Vert \mu_n \Vert = \Vert \mu \Vert = 1$, then \Cref{thm:vague:pos} together with Proposition \ref{prop:weak variation from vague} shows that $\weak_{n \to \infty} \mu_n = \mu$ if and only if $F^{(-\infty)}_{\mu_n} \to F^{(-\infty)}_\mu$ at all continuity points of $\mu$. This is often shown as a consequence of Portmanteau's theorem for weak convergence. \end{remark} Both implications ``(a) $\Rightarrow$ (b)'' and ``(b) $\Rightarrow$ (a)'' in Theorem \ref{thm:vague:pos} are false for signed measures. The first counterexample shows that $F^{(\alpha)}_{\mu_n} \to F^{(\alpha)}_\mu$ at the continuity points of $\mu$ does \emph{not} imply that $\vague_{n \to \infty}\mu_n = \mu$. It relies on $\{F^{(\alpha)}_{\mu_n}\}$ being unbounded on a compact set. \begin{example}\label{example:sup_neccesary} Let $F_n:\R\to \R$ be supported on $[0,2/n]$ and linear between the points $\{0,1/n,2/n\}$ such that $F(0) := 0 =: F(2/n)$ and $F(1/n) := 2^n$; see \Cref{figure:sup_neccesary} for a clear visualisation. For $n \in \N$, let $\mu_n := \mu_{F_n}$ according to \Cref{thm:BVloc_to_g_signed_measure} and denote by $\mu$ the zero measure. Then for any $x \in \R$, we have $F^{(0)}_{\mu_n}(x) = F^{(0)}_n(x) \to F^{(0)}_\mu(x)$. Now take $f \in C_c(\R)$ such that \begin{equation*} f(x) := \begin{cases} (1+x)\quad \textnormal{for }x \in [-1,0), \\ (1-x)\quad \textnormal{for } x \in [0,1], \\ ~0 \hspace{1.25cm} \textnormal{for } x \in [-1,1]^c. \end{cases} \end{equation*} Then for $n \geq 2$. \begin{align*} I_{\mu_n}(f) & = 2^n\curlybrac{\int_{0}^{1/n} (1-x) \de x - \int_{1/n}^{2/n} (1-x) \de x}= \frac{2^{n+1}}{n^2}. \end{align*} Thus, $I_{\mu_n}(f) \not \to I_\mu(f) =0$. \end{example} \begin{figure}[ht] \centering{ \includegraphics[width=12cm]{example_new} \caption{A visualisation of $F_1$ and $F_2$ defined in \cref{example:sup_neccesary}\label{figure:sup_neccesary}.} } \end{figure} The next counterexample shows that $\vague_{n \to \infty} \mu_n = \mu$ does not imply $F^{(\alpha)}_{\mu_n}\to F^{(\alpha)}_\mu$ at the continuity points of $\mu$ since mass can be lost locally. This happens when the positive and negative parts of the singular decompositions of $\{\mu_n\}$ cancel in the limit. \begin{example}\label{example:weak_vs_vague_2} Let $\mu_n := \da_0 - \da_{1/n}$, and let $\mu$ be the zero measure. Then it is straightforward to check that $\vague_{n \to \infty} \mu_n = \mu$ (even $\weak_{n \to \infty} \mu_n = \mu$). However, we do not have $F^{(0)}_{\mu_n} \to F^{(0)}_\mu$ at all continuity points of $\mu$. Indeed, fix $x > 0$. Then for $n \geq \frac{1}{x}$, \begin{equation*} F^{(0)}_{\mu_n}(x) = \da_0((0,x])- \da_{1/n}((0,x]) = -1 \end{equation*} \begin{equation*} -1 = \lim_{n \to \infty}F^{(0)}_{\mu_n}(x) \neq F^{(0)}_{\mu}(x) = 0. \end{equation*} \end{example} Thus, in order to ensure that the distribution functions converge at continuity points, one must ensure that mass is preserved locally. This motivates the following definition. \begin{definition}\label{def:no_mass} Let $\Omega$ be a metrisable space and $\{\mu_n\} \subset \cM(\Omega)$. We say that the sequence $\{\mu_n\}$ \emph{has no mass at a point} $x \in \Omega$, if for any $\e > 0$, there exists an open neighbourhood $N_{x,\e}$ of $x$, such that \begin{equation*} \limsup_{n \to \infty}\modd{\mu_n}(N_{x,\e}) \leq \e. \end{equation*} In the case where $\Omega = \R$, we say that the sequence $\{\mu_n\}$ has no mass at $ +\infty \textnormal{ (resp.}-\infty\textnormal{)}$, when the family of canonical extensions of $\{\mu_n\}$ has no mass at $ +\infty \textnormal{ (resp.}-\infty\textnormal{)}$. \end{definition} \begin{remark} \Cref{def:no_mass} implies that the family $\{\mu_n\} \subset \cM(\R)$ is tight if and only if it has no mass at $+\infty$ and $-\infty$. \end{remark} For $\{\mu_n\} \subset \cM(\R)$, the preceding discussion leads us to a clear characterisation of vague and weak convergence of $\{\mu_n\}$ from the convergence of $F_{\mu_n}$, and vice versa. \begin{theorem}\label{thm:vague_F_equivalence} Let $\alpha \in \R$ and $\{\mu_n\}\cup \{\mu\}\subset \cM(\R)$. \begin{enumerate}[\normalfont(a)] \item If $F^{(\alpha)}_{\mu_n}(x) \to F^{(\alpha)}_{\mu}(x)$ at all continuity points of $\mu$ and $\{\mu_n\}$ is bounded on compact sets, then $\vague_{n \to \infty}\mu_n = \mu$. \item If $\vague_{n \to \infty}\mu_n = \mu$, $\alpha$ is a continuity point of $\mu$, and $\{\mu_n\}$ has no mass at the continuity points of $\mu$, then $F^{(\alpha)}_{\mu_n} \to F^{(\alpha)}_\mu$ at the continuity points of $\mu$. \end{enumerate} Moreover, if $\alpha = -\infty$ or $\alpha =+\infty$, both parts remain true if we require in addition in {\normalfont (a)} that $\{\mu_n\}$ is bounded on compact neighbourhoods of $\alpha$ (in the extended order topology) and in {\normalfont (b)} that $\{\mu_n\}$ has no mass at $\alpha$ (for the canonical extensions of $\{\mu_n\}$). \end{theorem} \begin{proof} We only establish the result for $\alpha \in \R$. The extension of the proof to $\alpha \in \{-\infty, \infty\}$ is straightforward. \normalfont(a) First, let $f \in \cC := C^1(\R) \cap C_c(\R)$. Then $f$ is supported by a compact interval $K \subset\R$, and we may assume without loss of generality that $\alpha \in K$. Then \{$F_{\mu_n}^{(\alpha)}\}$ is bounded on $K$ since \begin{equation*} \modd{F_{\mu_n}^{(\alpha)}(x)} \leq \sup_{n \in \N}\modd{\mu_n}(K) < \infty, \quad x \in K. \end{equation*} Moreover, $F^{(\alpha)}_{\mu_n}\to F^{(\alpha)}_{\mu}$ a.e.~by the fact that $\mu$ has only countably many atoms. Therefore, an integration by parts and the dominated convergence theorem give \begin{equation} \label{eq:thm:vague_F_equivalence:convergence dist} I_{\mu_n}(f) = -\int_K f'(x) F_{\mu_n}^{(\alpha)}(x)~ \den x\rightarrow -\int_K f'(x) F_{\mu}^{(\alpha)}(x)~\den x = I_{\mu}(f). \end{equation} Next, let $f \in C_c(\R)\subset C_0(\R)$ and $\e>0$. Since $\cC$ is a subalgebra of $C_0(\R)$ that separates points and vanishes nowhere, it is dense in $C_0(\R)$ by the Stone-Weierstraß Theorem; see~\cref{thm:stoneWeierstrass}. Thus, there exists $g \in \cC$ such that $\norm{f-g}_\infty < \e$. Then $f$ and $g$ are both supported by some compact interval $L$. Hence, the triangle inequality and \eqref{eq:thm:vague_F_equivalence:convergence dist} give \begin{align*} \limsup_{n \to \infty}\modd{I_{\mu_n}(f) - I_{\mu}(f)} &\leq \limsup_{n \to \infty}\curvebrac{\modd{I_{\mu_n}(f-g)} + \modd{I_{\mu_n}(f) - I_{\mu}(f)} + \modd{I_{\mu}(f-g)}}\\ &\leq \curvebrac{\sup_{n \in \N}\modd{\mu_n}(L) + \norm{\mu}}\e. \end{align*} Using that $\{\mu_n\}$ is bounded on compact sets and taking $\e \downarrow 0$ establishes the claim. (b) Let $t \in \R$ be a continuity point of $\mu$. The case when $t = \alpha$ is trivial, so we may assume without loss of generality that $t > \alpha$, since $F_{\mu}^{(\alpha)}(t) = -F_{\mu}^{(t)}(\alpha)$. For $\delta > 0$, define the cut-off function $\rho_\delta \in C_c(\R)$ by \begin{equation*} \rho_\delta(x) = \begin{cases} 0 & \text{if } x \notin (\alpha - \delta, t + \delta) \\ \frac{1}{\delta} (x + \delta - \alpha), & \text{if } x \in (\alpha -\delta, \alpha), \\ 1 & \text{if } x \in [\alpha, t], \\ \frac{1}{\delta} (t + \delta - x), & \text{if } x \in (t, t+\delta),\\ \end{cases} \end{equation*} and for $x \in \R$, the open ball around $x$ of radius $\delta$ by $B_\delta(x)$. Then \begin{align} &\limsup_{n \to \infty}\curvebrac{\modd{F_{\mu_n}^{(\alpha)}(t) - F_{\mu}^{(\alpha)}(t)}}\nonumber\\ \leq~&\limsup_{n\to \infty}\Big(\modd{\int(\I_{(\alpha,t]} - \rho)(x)\mu_n(\den x) }+ \modd{\int \rho(x)\mu_n(\den x) - \int \rho(x)\mu(\den x) }\nonumber\\ &\quad +\modd{\int(\I_{(\alpha,t]} - \rho)(x)\mu(\den x) }\Big) \nonumber \\ \leq ~& \limsup_{n \to \infty}\Big( \modd{\mu_n}((\alpha - \da,\alpha]) + \modd{\mu_n}((t,t+\da)) + \modd{\mu}((\alpha - \da,\alpha])+ \modd{\mu}((t,t+\da)) \Big) \nonumber\\ \leq ~& \limsup_{n \to \infty}\modd{\mu_n}(B_\delta(\alpha)) + \limsup_{n \to \infty}\modd{\mu_n}(B_\delta(t))+ \modd{\mu}(\alpha - \da,\alpha]) + \modd{\mu}((t,t+\da)). \label{eq:pf:thm:vague_F_equivalence} \end{align} Now the result follows by taking $\delta \to 0$, noting that the first two terms on the right had side of \eqref{eq:pf:thm:vague_F_equivalence} vanish by the fact that $\{\mu_n\}$ has no mass at $t$ and $\alpha$, whereas the last two terms on the right had side of \eqref{eq:pf:thm:vague_F_equivalence} vanish by $\sigma$-continuity of $\mu$ and the fact that $\alpha$ is a continuity point of~$\mu$. \end{proof} We proceed to prove Theorem \ref{thm:vague:pos}, which is in fact a corollary to Theorem \ref{thm:vague_F_equivalence}. \begin{proof}[Proof of Theorem \ref{thm:vague:pos}] We only establish the result for $\alpha \in \R$. The extension of the proof to $\alpha \in \{-\infty, \infty\}$ is straightforward. ``(a) $\Rightarrow$ (b)''. By \Cref{thm:vague_F_equivalence}(a), it suffices to show that $\{ \mu_n \}$ are bounded on compact sets. So let $K \subset \R$ be a compact set. Then there exists continuity points $b_1, b_2 \in \R$ of $\mu$ such that $K \subset (b_1, b_2]$. By hypothesis, $\lim_{n \to \infty} F^{(\alpha)}_{\mu_{n}}(b) = F^{(\alpha)}_{\mu}(b)$ for $b \in \{b_1, b_2\}$. Moreover, $\mu_{n}((b_1, b_2]) = F^{(\alpha)}_{\mu_{n}}(b_2)-F^{(\alpha)}_{\mu_{n}}(b_1)$ for each $n \in \N$. Thus, by positivity of $\{ \mu_n \}$, \begin{equation*} \limsup_{n \to \infty}\mu_{n}(K) \leq \lim_{n \to \infty}\mu_{n}((b_1, b_2]) = \lim_{n \to \infty} F^{(\alpha)}_{\mu_{n}}(b_2) -\lim_{n \to \infty} F^{(\alpha)}_{\mu_{n}}(b_1) = F_\mu^{(\alpha)}(b_2) - F_\mu^{(\alpha)}(b_1) < \infty. \end{equation*} ``(b) $\Rightarrow$ (a)''. By \Cref{thm:vague_F_equivalence}(a), it suffices to sow that $\{ \mu_n \}$ has no mass at the continuity points of $\mu$. So let $x \in \R$ be a continuity point of $\mu$ and fix $\e > 0$. For $\delta > 0$, denote by $B_\delta(x)$ the open ball around $x$ of radius $\da$ and by $\overline{B_{\delta}(x)}$ its closure. By $\s$-continuity of $\mu$, for any $\e >0$ there exists $\da >0$ such that $\mu(\overline{B_\da(x)})\leq\e$. Thus, by \Cref{thm:portmanteau_extension}(b), \begin{equation*} \limsup_{n \to \infty}\mu_n\curvebrac{B_{\da}(x)} \leq \limsup_{n \to \infty}\mu_n\curvebrac{\overline{B_{\da}(x)}} \leq \mu \curvebrac{\overline{B_{\da}(x)} } \leq \e. \qedhere \end{equation*} \end{proof} \begin{remark} The direction ``(a) $\Rightarrow$ (b)'' in Theorem \ref{thm:vague:pos} (for $\alpha \in \R$) follows also directly from `(a) $\Rightarrow$ (c)'' in the vague Portmanteau Theorem, see \ref{thm:portmanteau_extension}. \end{remark} Compared to Theorem \ref{thm:vague:pos}, parts (a) and (b) in Theorem \ref{thm:vague_F_equivalence} have an extra condition each. One might wonder if either part implies the hypothesis of the other one. We first show by a counterexample that part (a) in Theorem \ref{thm:vague_F_equivalence} does not imply the the hypothesis of part (b). \begin{example}\label{example:diadic} For $n \in \N$, let $F_n:\R \to \R$ be supported on $[-2^{-n},2^{-n}]$ and linear between the points $\{k 2^{-2n} : k\in \{ -2^n, \ldots, 2^{n} \} \}$ such that \begin{equation*} F_n\curvebrac{k 2^{-2n}} :=(k~\textnormal{mod}(2)) 2^{-n}, \quad k\in \{ -2^n, \ldots, 2^{n} \}; \end{equation*} see \Cref{figure:F_n} for a clear visualisation. Set $\mu_n := \mu_{F_n}$ and let $\mu$ be the zero measure. Then $\{\mu_n\} \subset \cM(\R)$ satisfies the properties: \begin{enumerate}[\normalfont(i)] \item $\mu_n$ is supported on $[-2^{-n},2^{-n}]$, \item $|\mu_n|([-2^{-n},2^{-n}]) = 2$, \item $\modd{F^{(0)}_{\mu_n}(x)} \leq 2^{-n}$ for all $x \in \R$. \end{enumerate} It follows that $F^{(0)}_{\mu_n}\to F^{(0)}_{\mu}$ for all $x \in \R$ and $\{\mu_n\}$ is bounded on compact sets but $\{\mu_n\}$ has mass at $0$, which is a continuity point of $\mu$. \end{example} \begin{figure}[ht] \centering{ \includegraphics[width=12cm]{example_2_14} \caption{A visualisation of $F_1$ and $F_2$ defined in \cref{example:diadic}\label{figure:F_n}.} } \end{figure} Fortunately, the assumption that $\{\mu_{n}\}$ has no mass at \emph{any} point is sufficient to establish a proper equivalence result. Note that this slightly stronger assumption is equivalent to the original assumption in the important case that $\mu$ does not have any atoms. \begin{theorem}\label{corollary:distil} Let $\{\mu_{n}\} \cup \{\mu\}\subset \cM(\R)$ and $\alpha \in \R$. Suppose $\{\mu_n\}$ does not have mass on any point of $\R$. Then the following are equivalent: \begin{enumerate} \item $F^{(\alpha)}_{\mu_n} \to F^{(\alpha)}_\mu$ at the continuity points of $\mu$. \item $\vague_{n \to \infty}\mu_n = \mu$. \end{enumerate} Moreover, if $\alpha = -\infty$ or $\alpha =+\infty$, the result remains to true under the additional assumption that $\{\mu_n\}$ has no mass at $\alpha$ (for the canonical extensions of $\mu_n$). \end{theorem} \begin{proof} We only establish the result for $\alpha \in \R$. The extension of the proof to $\alpha \in \{-\infty, \infty\}$ is straightforward. By Theorem \eqref{thm:vague_F_equivalence}, it suffices to show that the assumption that $\{\mu_n\}$ has no mass on any point of $\R$ implies that $\{\mu_n\}$ is bounded on compact sets. So let $K \subset \R$ be a compact set. By hypothesis, for each $x \in \R$, the exists an open neighbourhood $N_x$ of $x$ such that $\limsup_{n \to \infty} |\mu_n|(N_x) \leq 1$. Moreover, by compactness, there exists $x_1, \ldots, x_J\in \R$ such that $K \subset \bigcup_{j = 1}^J N_{x_j}$. It follows that \begin{equation*} \limsup_{n \to \infty} |\mu_n|(K) \leq \limsup_{n \to \infty} |\mu_n|\bigg(\bigcup_{j = 1}^J N_{x_j}\bigg) \leq \sum_{j =1}^J \limsup_{n \to \infty} |\mu_n|(N_{x_j}) \leq J < \infty. \qedhere \end{equation*} \end{proof} We end this section by noting that the assumption that $\{\mu_n\}$ has no mass at any point of $\R$ is not enough to conclude from $\vague_{n \to \infty}\mu_n = \mu$ that $\vague_{n\to \infty}\modd{\mu_n} = \modd{\mu}$. \begin{example}\label{example:diadic_2} For $n \in \N$, let $F_n:\R \to \R$ be supported on $[-1,1]$ and linear between the points $\{k 2^{-n}: k \in \{ 0,\dots, 2^{n} \} \}$ such that \begin{equation*} F_n\curvebrac{k 2^{-n}} :=(k~\textnormal{mod}(2)) 2^{-n}, \quad k\in \{ -2^n, \ldots, 2^{n} \}; \end{equation*} see \Cref{figure:F_n_2} for a clear visualisation. Set $\mu_n := \mu_{F_n}$ and let $\mu$ be the zero measure. Note that $|\mu_n| = |\mu_1|$ for each $n \in \N$. Hence it follows trivially that $\vague_{n \to \infty}|\mu_n| =|\mu_1|$. However, using that $\Vert \mu_n \Vert = 2$ and $|F^{(0)}_n| \leq 2^{-n}$ for each $n \in \N$, it follows from \Cref{thm:vague_F_equivalence} that $\vague_{n \to \infty}\mu_n = \mu$. It remains to show that $\{\mu_n\}$ has no mass at any point of $\R$. So fix $x \in \R$ and let $\e > 0$ be given. Let $N_{x,\e}$ be the open ball around $x$ of radius $\e/2$. Then \begin{equation} \limsup_{n \in \N} \modd{\mu_n}(N_{x,\e}) = \e \end{equation} \end{example} \begin{figure}[ht] \centering{ \includegraphics[width=12cm]{example_2_15} \caption{A visualisation of $F_1$ and $F_2$ defined in \cref{example:diadic_2}\label{figure:F_n_2}.} } \end{figure} \appendix \section{Appendix}\label{section:appendix} \subsection{Key results from Functional Analysis and Measure Theory}\label{section:appendix_weak_to_vague} In this appendix, we collect some key results from Functional Analysis and Measure Theory that are used throughout this paper. First, we recall the classical Stone-Weierstraß Theorem, see e.g. \cite{de_branges_stone-weierstrass_1959}. To this end, recall that a subset $\cC \subset C_0(\Omega)$ \emph{vanishes nowhere} if for all $x\in \Omega$, there exists some $f \in \cC$ such that $f(x)\neq 0$, and it \emph{separates points} if for each $x,y \in \Omega$ with $x \neq y$, there exists $f \in \cC$ such that $f(x) \neq f(y)$. \begin{theorem}[Stone-Weierstraß Theorem] \label{thm:stoneWeierstrass} Let $\Omega$ be a locally compact Hausdorff space and $\cC$ be a subalgebra of $C_0(\Omega)$. Then $\cC$ is dense in $C_0(\Omega)$ (for the topology of uniform convergence) if and only if it separates points and vanishes nowhere. \end{theorem} Next, we state a \emph{vague} version of Portmanteau's Theorem for \emph{positive} measures. While it is very difficult to pinpoint an exact reference, the proof is extremely similar to the weak version; see e.g.\cite[Theorem 13.16]{klenke_probability_2014} and left to the reader. \begin{theorem}[Vague Portmanteau Theorem for positive measures]\label{thm:portmanteau_extension} Let $\Omega$ be {a locally compact metrisable space} and $\{\mu_n\}\cup \{\mu\} \in \cM^+(\Omega)$. Then the following are equivalent: \begin{enumerate}[\normalfont(a)] \item $\vague_{n \to \infty}\mu_n = \mu$. \item For any compact set $K \subset \Omega$, \begin{equation*} \limsup_{n \to \infty} \mu_n(K) \leq \mu(K) \end{equation*} and for any open set $\Theta \subset \Omega$, \begin{equation*} \liminf_{n \to \infty} \mu_n(\Theta) \geq \mu(\Theta). \end{equation*} \item For any set $A \subset \Omega$ such that $A \subset K$ for some compact set $K$ and ${\mu}(\partial A) =0 $, \begin{equation*} \lim_{n \to \infty}{\mu_n}(A) = {\mu}(A). \end{equation*} \end{enumerate} \end{theorem} One part of the direction ``(a) $\Rightarrow$ (b)'' in the vague Portmanteau theorem (Theorem \ref{thm:portmanteau_extension} extends to signed measure. This result is attributed to Varadarajan; see \cite{varadarajan_measures_1965}. For the convenience of the reader, we provide a short modern proof. \begin{theorem}\label{thm:appaendix_open} Let $\Omega$ be a locally compact normal Hausdorff space. Let $\{\mu_n\} \cup \{\mu\}\subset \cM(\Omega)$ and assume that $\vague_{n \to \infty}\mu_n = \mu$. Then for any open set $\Theta \subset \Omega$, \begin{equation}\label{eq:appendix_open} \modd{\mu}(\Theta) \leq \liminf_{n \to \infty}\modd{\mu_n}(\Theta). \end{equation} In particular, $\norm{\mu} \leq \liminf_{n \to \infty}\norm{\mu_n}$. \end{theorem} \begin{proof} Let $\Theta \subset \Omega$ be open and $\e > 0$. Since $\mu$ is inner regular and $\Omega$ is normal and locally compact, as a consequence of Urysohn's lemma \cite[Lemma 2.46]{aliprantis_infinite_1999}, there exists $f \in C_c(\Omega)$ such that $\modd{f}\leq 1$, $\textnormal{supp}(f) \subset \Theta$ and \begin{equation*} \int f \de\mu \geq \modd{\mu}(\Theta) - \e. \end{equation*} Then by vague convergence of $\{\mu_n\}$, \begin{align*} \modd{\mu}(\Theta) - \e &\leq \int f \de \mu = \lim_{n \to \infty}\int f \de \mu_n \leq \liminf_{n \to \infty}\int |f| \de \modd{\mu_n} \leq \liminf_{n \to \infty}\modd{\mu_n}(\Theta) \end{align*} Now the result follows by letting $\e \downarrow 0$. \end{proof} Finally, we state a version of Prohorov's theorem for \emph{signed} measures. \begin{theorem}[Prohorov's Theorem]\label{thm:Prohorov} Let $\Omega$ be a metrisable space and $\mathbf{M} \subset \cM(\Omega)$ nonempty. \begin{enumerate}[\normalfont(a)] \item If $\mathbf{M}$ is uniformly bounded and tight, then $\mathbf{M}$ is weakly relatively sequentially compact. \item If the space $\Omega$ is Polish and $\mathbf{M}$ is weakly relatively sequentially compact, then $\mathbf{M}$ is uniformly bounded and tight. \end{enumerate} \end{theorem} \begin{proof} \normalfont(a) Take any $\{\mu_n\} \subset \mathbf{M}$. Since $\mathbf{M}$ is a uniformly bounded and tight sequence, both $\{\mu_n^+\}$ and $\{\mu_n^-\}$ are uniformly bounded and tight. By \cite[Theorem 13.29]{klenke_probability_2014}, it follows that there exists a subsequence $\{n_k\}$ such that $\weak_{k \to \infty}\mu_{n_k}^+ = \nu$, for some positive measure $\nu \in \cM(\Omega)$. Similarly, there exists a subsequence $\{n_{k_l}\} \subset \{n_k\}$ such that $\weak_{l \to \infty}\mu_{n_{k_l}}^- = \eta$, for some positive measure $\cM(\Omega)$. Thus it follows that $\weak_{l \to \infty}\mu_{n_{k_l}} = (\nu - \eta) \in \cM(\Omega)$. \normalfont(b) See \cite[Theorem 8.6.2]{bogachev_measure_2007}. \end{proof} \subsection{One-point compactification} In this appendix, we recall the one-point compactification of a non-compact locally compact Hausdorff space. \begin{definition} \label{def:alexandroff_topology} Let $\Omega$ be a non-compact locally compact Hausdorff space with topology $\boldsymbol{\tau}$. Set $\Omega_\infty := \Omega \cup \{\infty\}$, where $\infty \not \in \Omega$, and let \begin{equation*} \boldsymbol{\tau}_\infty := \boldsymbol{\tau} \cup \{\Omega_\infty \backslash K : K \subset \Omega \textnormal{ is compact}\} \end{equation*} Then $\Omega_\infty$ (with the topology $\boldsymbol{\tau}_\infty)$ is called the \emph{one-point} compactification of $\Omega$. \end{definition} The one-point compactification of a non-compact locally compact Hausdorff space has nice properties; see \cite[Proposition 4.36]{folland_real_1999} for a proof. \begin{theorem}\label{thm:alexandroff_topology} Let $\Omega$ be a non-compact locally compact Hausdorff space. Then $\Omega_\infty$ is a compact Hausdorff space and $\Omega$ is an open dense subset of $\Omega_\infty$. Moreover, $f \in C(\Omega)$ extends continuously to $f_\infty \in C(\Omega_\infty)$ if and only if $f = f_0 +c$ where $f_0\in C_0(\Omega)$ and $c$ is a constant. In this case, the extension satisfies $f_\infty(\infty) =c$. \end{theorem} \bibliographystyle{amsplain} \bibliography{vague} \end{document}
2205.13198v1
http://arxiv.org/abs/2205.13198v1
Constellation Design for Non-Coherent Fast-Forward Relays to Mitigate Full-Duplex Jamming Attacks
\documentclass[journal,onecolumn,12pt,twoside]{IEEEtran} \usepackage{graphicx,multicol} \usepackage{color,soul} \usepackage{mathtools} \usepackage{amsmath, amssymb} \usepackage{amsthm} \usepackage{mdframed} \usepackage{mathrsfs} \usepackage{cite} \usepackage{soul} \usepackage{algorithmic} \usepackage{array} \usepackage[font=small]{caption} \usepackage{subcaption} \usepackage{cases} \usepackage{multirow} \usepackage[draft]{hyperref} \usepackage[norelsize, linesnumbered, ruled, lined, boxed, commentsnumbered]{algorithm2e} \usepackage{setspace} \usepackage[normalem]{ulem} \usepackage{empheq} \usepackage{tikz,lipsum,lmodern} \usepackage{diagbox} \usepackage[most]{tcolorbox} \SetKwInput{KwInput}{Input} \SetKwInput{KwOutput}{Output} \newtheorem{theorem}{Theorem}\newtheorem{definition}{Definition} \newtheorem{problem}{Problem} \newtheorem{corollary}{Corollary} \newtheorem{proposition}{Proposition} \newtheorem{lemma}{Lemma} \newtheorem{rem}{Remark} \DeclareMathOperator*{\argmin}{\arg\!\min} \newcommand{\blue}{\textcolor{blue}} \newcommand{\red}{\textcolor{red}} \newcommand{\green}{\textcolor{green}} \newcommand{\nn}{\nonumber} \newcommand{\bieee}{\begin{eqnarray}{rCl}} \newcommand{\eieee}{\end{eqnarray}} \newcommand{\xp}{x^{\prime}} \newcommand{\yp}{y^{\prime}} \makeatletter \pretocmd\@bibitem{\color{black}\csname keycolor#1\endcsname}{}{\fail} \newcommand\citecolor[1]{\@namedef{keycolor#1}{\color{blue}}} \makeatother \hyphenation{op-tical net-works semi-conduc-tor} \setstretch{1.49} \linespread{1.478} \begin{document} \title{Constellation Design for Non-Coherent Fast-Forward Relays to Mitigate Full-Duplex Jamming Attacks} \author{Vivek~Chaudhary and Harshan~Jagadeesh\thanks{V. Chaudhary and H. Jagadeesh are with the Department of Electrical Engineering, Indian Institute of Technology, Delhi, 110016, India e-mail: ([email protected], [email protected]).}\thanks{Parts of this work have been presented in IEEE Globecom, 2021, Madrid, Spain \cite{my_GCOM}.}} \maketitle \begin{abstract} With potential applications to short-packet communication, we address communication of low-latency messages in fast-fading channels under the presence of a reactive jammer. Unlike a traditional jammer, we assume a full-duplex (FD) jammer capable of detecting pre-existing countermeasures and subsequently changing the target frequency band. To facilitate reliable communication amidst a strong adversary, we propose non-coherent fast-forward full-duplex relaying scheme wherein the victim uses a helper in its vicinity to fast-forward its messages to the base station, in addition to ensuring that the countermeasures are undetected by the FD adversary. Towards designing the constellations for the proposed scheme, we identify that existing non-coherent constellation for fast-fading channels are not applicable owing to the cooperative nature of the fast-forward scheme. As a result, we formulate an optimization problem of designing the non-coherent constellations at the victim and the helper such that the symbol-error-probability at the base station is minimized. We theoretically analyze the optimization problem and propose several strategies to compute near-optimal constellations based on the helper's data-rate and fast-forwarding abilities. We show that the proposed constellations provide near-optimal error performance and help the victim evade jamming. Finally, we also prove the scheme’s efficacy in deceiving the countermeasure detectors at the jammer. \end{abstract} \begin{IEEEkeywords} \centering Jamming, non-coherent communication, fast-forward relays, full-duplex. \end{IEEEkeywords} \IEEEpeerreviewmaketitle \section{Introduction} The next generation of wireless networks are pitched to enable new services by providing ultra-reliable and low-latency communication links, such as control of critical infrastructure, autonomous vehicles, and medical procedures. These applications often have mission-critical updates and use short-packet communication with low-rate signalling, e.g. control channel messages (PUCCH) in 5G \cite[Sec.6.3.2]{standard}, and status updates in IoT \cite{SP_DnF}. Since these packets have strict latency constraints, it makes them susceptible to security threats. One popular attack model is the jamming attack, because of which the receiver is unable to decode the packet resulting in deadline violations. Although traditional countermeasures, such as Frequency Hopping (FH) were designed to mitigate jamming attacks, they might not be effective against advanced jamming attacks executed by sophisticated radio devices. Therefore, there is a need to envision new threat models by sophisticated radios and propose strong countermeasures against them to facilitate low-latency communication for the victim. Among several radio-technologies that have risen in the recent past, the two prominent ones are (i) Full-Duplex (FD) radios with advanced Self-Interference Cancellation (SIC) methods \cite{FD1,FD2,FD3,FD4,FD5,FD6,FD7}, and (ii) Cognitive radios with advanced radio-frequency chains that scan across a wide range of frequency bands. Using these developments, in-band Full-Duplex Cognitive Radio (FDCR) \cite{FDCR1,FDCR2,FDCR3,FDCR4} have been introduced to scan and transmit in the vacant frequency bands simultaneously, thus improving the network throughput. In line with the motivation of our work, FDCRs have also been studied from an adversarial viewpoint. In particular, \cite{my_PIMRC} and \cite{my_TCCN} introduce an attack model, wherein the adversary, with the help of a \emph{jam-and-measure} FDCR, injects jamming energy on the victim's frequency band and also monitors its energy level after the jamming attack. Owing to the use of jam-and-measure FDCRs, \cite{my_PIMRC} and \cite{my_TCCN} also point out that the state-of-art countermeasures, like FH are ineffective, since the attacker can detect that the victim has vacated the jammed frequency band. As a consequence, they also propose several countermeasures wherein the victim node seeks assistance from a Fast-Forward FD (FFFD) \cite{FD8} relay to instantaneously forward its messages to the base station without getting detected by the FDCR. With the use of fast-forward relays, the countermeasures capture the best-case benefits in terms of facilitating low-latency communication for the victim node. Inspired by \cite{my_PIMRC} and \cite{my_TCCN}, we identify that FDCRs can also scan multiple frequencies while executing a \emph{jam-and-measure} attack on the victim's frequency. Subsequently, this can allow the adversary to compute a correlation measure between the symbols on the victim's frequency and other frequencies thereby detecting repetition coding across frequencies, such as the FFFD based countermeasures in \cite{my_PIMRC} and \cite{my_TCCN}. Thus, new countermeasures must be designed to mitigate adversaries which can scan multiple frequencies, in addition to monitoring the energy level on the jammed frequency band. We also point out that the modulation techniques designed as part of the countermeasures depend on the wireless environment. For instance, in slow-fading channels, coherent modulation based countermeasures must be designed by allowing the receiver to learn the Channel State Information (CSI) through pilots. However, acquiring CSI using pilots is difficult when channel conditions vary rapidly over time. As a result, non-coherent modulation based countermeasures must be designed when jam-and-measure attacks are executed in fast-fading channels, thereby allowing the receiver to decode the information symbols without instantaneous CSI. From the above discussion, we identify that the countermeasures proposed in \cite{my_PIMRC} and \cite{my_TCCN} are not applicable for fast-fading channels, thereby opening up new problem statements in designing non-coherent modulation based countermeasures. \subsection{Contribution} In this work, we design non-coherent modulation based countermeasures to mitigate jamming attacks by FDCRs. Amongst various non-coherent modulation techniques, we use energy detection based Amplitude Shift Keying (ASK) due to its higher spectral efficiency. Towards this end, we summarize the contribution of this work as follows: \begin{enumerate} \item We envisage an attack model wherein the adversary uses an FDCR to jam a victim that has low-latency symbols to communicate with the base station. The salient feature of the adversary is that it can scan multiple frequencies in the network while executing a jamming attack on the victim's frequency. In particular, the adversary uses an Energy Detector (ED) and a Correlation Detector (CD) to detect the state-of-art countermeasures. (See Sec.~\ref{sec:systemmodel}) \item As a countermeasure against the proposed threat, we propose a Non-Coherent FFFD (NC-FFFD) relaying scheme, wherein an FFFD helper assists the victim by instantaneously fast-forwarding victim's message along with its message to the base station. The proposed NC-FFFD scheme also uses a Gold-sequence based scrambler to cooperatively pour energy on the victim's frequency in order to evade detection by ED and CD. With On-Off Keying (OOK) at the victim and $M-$ary ASK at the helper, we propose an approximate joint maximum a posteriori decoder to compute the closed-form expression of symbol error probability for the NC-FFFD scheme. We then formulate an optimization problem of minimizing the SEP over the victim's and the helper's energy levels, subject to a modified average energy constraint at the helper. Subsequently, we solve the optimization problem for $M=2$ and then generalise it for $M>2$. (See Sec.~\ref{sec:NCFFFD},~\ref{sec:optimization}) \item We also consider the case when fast-forwarding at the helper is not instantaneous, i.e., imperfect fast-forwarding. Here, we propose Delay Tolerant NC-FFFD (DT NC-FFFD) scheme, where we solve the optimization problem similar to $M\geq 2$ by upper bounding the energy contributed by the victim by a small number. We show that the error performance of DT NC-FFFD scheme is independent of the delays introduced due to imperfect fast-forwarding. For all the cases, we provide strong analytical results and based on these results, we provide a family of algorithms to obtain near-optimal solutions to the optimization problem. (See Sec.~\ref{sec:DT_NC-FFFD}) \item Finally, through various analytical and simulation results, we show that despite having robust detectors, the adversary cannot detect the proposed mitigating scheme with high probability. (See Sec.~\ref{sec:Covert}) \end{enumerate} \begin{figure} \vspace{-0.25in} \centering \includegraphics[scale = 0.23]{Novelty_venn} \caption{\label{fig:venn} Novelty of our work w.r.t. existing contributions.} \end{figure} \subsection{Related Work and Novelty} FD radios have found their applications in aiding \cite{my_PIMRC,my_TCCN,FD8}, \cite{Aid_FD_1,Aid_FD_2,Aid_FD_3} as well as degrading \cite{my_PIMRC}, \cite{my_TCCN}, \cite{Foe_FD_1,Foe_FD_2,Foe_FD_3} a network's performance. Along the lines of \cite{my_PIMRC} and \cite{my_TCCN}, this work also uses FD radios at both the adversary and the helper node. However, in contrast, the threat model of this work is stronger than the one in \cite{my_PIMRC} and \cite{my_TCCN} as it can scan multiple frequencies to measure correlation between symbols on different frequencies. Furthermore, the FD radio at the helper in this work implements non-coherent modulation as against coherent modulation in \cite{my_PIMRC} and \cite{my_TCCN}. Our work can be viewed as a constellation design problem for a NC-FFFD strategy. In literature, \cite{ranjan,NC-p2p1,Goldsmith2,NC-p2p2,NC_Gao,new_ref} address the problem of constellation design for point-to-point Single-Input Multiple-Output (SIMO) non-coherent systems. Further, \cite{Goldsmith1}, \cite{Joint-MAC} study the constellation design for non-coherent Multiple Access Channel (MAC). However, due to distributed setting, our work cannot be viewed as a direct extension of \cite{ranjan,NC-p2p1,Goldsmith2,NC-p2p2,NC_Gao, new_ref,Goldsmith1,Joint-MAC}, as pointed in Fig.~\ref{fig:venn}. Some preliminary results on the NC-FFFD scheme have been presented by us in \cite{my_GCOM}, where we compute the optimal energy levels at the victim and the helper for $M=2$. In addition, the results of this work are generalisable for $M\geq 2$. Further, we provide solutions for imperfect fast-forwarding at the helper and also present an extensive analysis on the covertness of the proposed schemes. \section{System Model} \label{sec:systemmodel} We consider a \emph{crowded} network wherein multiple nodes communicate with a base station on orthogonal frequencies. In the context of this work, crowded network implies that all the nodes use orthogonal frequency bands to communicate with the base station such that the number of frequency bands is equal to the number of nodes in the network. Fig.~\ref{fig:NCFFFD}a captures one simple instantiation of the network where two nearby nodes, Alice and Charlie communicate with a multi-antenna base station, Bob. The uplink frequencies of Alice and Charlie are centred around $f_{AB}$ and $f_{CB}$, respectively. Alice is a single-antenna transmitter that has low-rate and low-latency messages to communicate with Bob. In contrast, Charlie, which is a Fast-Forward Full-Duplex (FFFD) node with $N_{C}$ receive-antennas and a single transmit-antenna, has arbitrary data-rate messages to communicate with no latency constraints. Here, fast-forwarding \cite{FD8} refers to Charlie's capability to instantaneously manipulate the received symbols on its uplink frequency and then multiplex them along with its information symbols to the base station. The mobility conditions of the network are such that the wireless channels from Alice to Bob, and from Charlie to Bob experience fast-fading with a coherence-interval of one symbol duration. Therefore, both Alice and Charlie use non-coherent Amplitude Shift Keying (ASK) for uplink communication. In particular, since Alice has low data-rate messages, she uses the On-Off Keying (OOK) scheme. On the other hand, since Charlie transmits at arbitrary data-rates, he uses an $M$-ary ASK scheme, for some $M = 2^{m}$, with $m \geq 1$. \begin{figure}[t] \vspace{-0.25in} \centering \includegraphics[width = 0.73\textwidth, height = 0.3\textwidth]{NCFFFD} \caption{\label{fig:NCFFFD}(a) A network model consisting legitimate nodes Alice and Charlie communicating with Bob, on $f_{AB}$, and $f_{CB}$, respectively. Dave is the FD adversary, jamming $f_{AB}$. He also measures the energy level on $f_{AB}$ and computes the correlation between the symbols on $f_{AB}$ and $f_{CB}$. (b) System model of NC-FFFD relaying scheme.} \end{figure} Within the same network, we also consider an adversary, named Dave, who is a cognitive jamming adversary equipped with an FD radio that constantly jams $f_{AB}$ and also monitors it to detect any countermeasures. We assume that Dave can learn Alice's frequency band by listening to the radio resource assignment information broadcast from the base station \cite{PRB}. To monitor $f_{AB}$ for any possible countermeasures, Dave uses an Energy Detector (ED), which measures the average energy level on $f_{AB}$. Furthermore, assuming that Dave does not have the knowledge of helper's frequency band, he uses a Correlation Detector (CD) that estimates the correlation between the symbols on $f_{AB}$ and all other frequencies in the network. To envision a practical adversarial model, we assume that Dave's FD radio experiences residual SI. From the above threat model, we note that Alice cannot use frequency hopping to evade the jamming attack due to two reasons: (i) the absence of vacant frequency bands in the uplink, and moreover, (ii) an ED at Dave restricts Alice to completely switch her operating frequency. This is because, if Alice switches her operating frequency, Dave measures a significant dip in the energy level of $f_{AB}$, thereby detecting a countermeasure. Other than frequency hopping, if Alice implements repetition coding using frequency-diversity techniques, where she replicates her messages on $f_{AB}$ and another frequency (say $f_{CB}$), simultaneously, then the CD at Dave detects a high correlation between the symbols on both the frequencies. Subsequently, a detection by either ED or CD compels Dave to jam $f_{CB}$ thereby degrading the network's performance. Therefore, Alice must use a countermeasure that helps her to communicate reliably with Bob while deceiving ED and CD at Dave. For ease of understanding, in Table~\ref{tab:notations}, we have provided the notations that appear in the rest of the paper. In the next section, we present a communication setting wherein Alice seeks assistance from Charlie to evade the jamming attack whilst deceiving the ED and the CD at Dave. \begin{table}[!htb] \caption{\label{tab:notations}FREQUENTLY OCCURRING NOTATIONS} \begin{minipage}[t]{.5\linewidth} \centering \scalebox{0.8}{ \begin{tabular}{ | m{2em} m{8cm} | } \hline $N_{C}$ & Receive antennas at Charlie \\ $N_{B}$ & Receive antennas at Bob \\ $M$ & Charlie's constellation size \\ $E_{A}$ & Alice's OOK symbol \\ $E_{C}$ & Charlie's multiplexed symbol \\ $\mathbf{r}_{C}$ & $N_{C}\times 1$ receive vector at Charlie \\ $\Omega_{i}$ & Energy received at Charlie corresponding to Alice's $i^{th}$ symbol \\ $\mathbf{r}_{B}$ & $N_{B}\times 1$ receive vector at Bob \\ $\mathbf{0}_{N_{C}}$ & $N_{C}\times 1$ vector of zeros \\ $\mathbf{I}_{N_{C}}$ & $N_{C}\times N_{C}$ Identity matrix \\ $S_{\ell}$ & Sum energy received at Bob on $f_{CB}$ \\ \hline \end{tabular} } \end{minipage} \begin{minipage}[t]{.5\linewidth} \centering \scalebox{0.8}{ \begin{tabular}{ | m{2em} m{8cm} | } \hline $\nu$ & Detection threshold at Charlie \\ $\rho_{\ell\ell^{*}}$ & Detection threshold between $S_{\ell}$ and $S_{\ell^{*}}$ \\ $\lambda$ & Residual self-interference \\ $\alpha$ & Energy splitting factor \\ $L$ & Number of symbols received at Dave \\ $E_{C,f_{AB}}$ & Avg. transmit energy of Charlie on $f_{CB}$ \\ $E_{D,f_{AB}}$ & Avg. receive energy of Dave on $f_{AB}$ \\ $r_{D}(l)$ & $l^{th}$ symbol received at Dave \\ $\mathbf{P}_{FA}$ & Probability of false-alarm at Dave before implementing the countermeasure. \\ $\mathbf{P}_{D}$ & Probability of detection at Dave after implementing the countermeasure. \\ \hline \end{tabular} } \end{minipage} \end{table} \section{Non-Coherent FastForward Full-Duplex Relaying Scheme (NC-FFFD)} \label{sec:NCFFFD} In order to help Alice evade the jamming attack, we propose a Non-Coherent Fast-Forward Full-Duplex (NC-FFFD) relaying scheme, described as follows: Bob directs Alice to broadcast her OOK symbols on $f_{CB}$ with $(1-\alpha)$ fraction of her energy, where $\alpha\in (0,1)$ is a design parameter. Since Charlie also has symbols to communicate to Bob, he uses his in-band FD radio to receive Alice's symbols on $f_{CB}$, decodes them, multiplexes them to his symbols, and then \emph{fast-forwards} them on $f_{CB}$, such that the average energy of the multiplexed symbols is $(1+\alpha)/2$ fraction of his original average energy. As a result, Bob observes a MAC on $f_{CB}$, and attempts to decode Alice's and Charlie's symbols jointly. To deceive the ED at Dave, the average energy level on $f_{AB}$ needs to be the same as before implementing the countermeasure. Therefore, Alice and Charlie use a Gold sequence-based scrambler as a pre-shared key to cooperatively transmit dummy OOK symbols on $f_{AB}$ by using residual $\alpha/2$ and $(1-\alpha)/2$ fractions of their average energies, respectively. Note that the use of dummy OOK symbols on $f_{AB}$ assists in deceiving the CD at Dave. In the next section, we discuss the signal model of the NC-FFFD scheme on $f_{CB}$ so as to focus on reliable communication of Alice's symbols with the help of Charlie. \subsection{Signal Model} \label{ssec:signalmodel} Before implementing the NC-FFFD relaying scheme, Alice transmits her OOK symbols with energy $E_{A} \in \{0, 1\}$, such that $E_{A}=0$ and $E_{A}=1$ correspond to symbols $i=0$ and $i=1$, respectively. Similarly, Charlie transmits his symbols using an $M-$ary ASK scheme with average energy $1$. When implementing the NC-FFFD relaying scheme, as illustrated in Fig.~\ref{fig:NCFFFD}b, Alice transmits her OOK symbols with energy $(1-\alpha)E_{A}$, for some $\alpha \in (0, 1)$ on $f_{CB}$. With this modification, the average transmit energy of Alice on $f_{CB}$, denoted by $\mathrm{E}_{A,f_{CB}}$, is $\mathrm{E}_{A,f_{CB}} = (1-\alpha)/2$. Since Charlie is an in-band FD radio, the received baseband vector at Charlie on $f_{CB}$ is, \bieee \mathbf{r}_{C} = \mathbf{h}_{AC}\sqrt{(1-\alpha)E_{A}} + \mathbf{h}_{CC} + \mathbf{n}_{C},\label{eq:rc} \eieee \noindent where $\mathbf{h}_{AC}\sim{\cal CN}\left(\mathbf{0}_{N_{C}},\sigma_{AC}^{2}\mathbf{I}_{N_{C}}\right)$ is $N_{C}\times 1$ channel vector. Further, $\mathbf{h}_{CC}\sim{\cal CN}\left(\mathbf{0}_{N_{C}},\lambda\mathrm{E}_{C,f_{CB}}\mathbf{I}_{N_{C}}\right)$ denotes the SI channel of the FD radio at Charlie \cite{my_TCCN}. Finally, $\mathbf{n}_{C}\sim{\cal CN}\left(\mathbf{0}_{N_{C}},N_{o}\mathbf{I}_{N_{C}}\right)$ is the $N_{C}\times 1$ Additive White Gaussian Noise (AWGN) vector. As a salient feature of the NC-FFFD scheme, Charlie uses $\mathbf{r}_{C}$ to instantaneously decode Alice's information symbol, and then transmits an energy level $E_{C}$ on $f_{CB}$, which is a function of Alice's decoded symbol and its information symbol. If $\hat{i}_{C}$ and $j\in\{1,\cdots,M\}$ denote Alice's decoded symbol and Charlie's information symbol, respectively, then the energy level, $E_{C}$ is given by \begin{equation} E_{C} = \begin{cases} \epsilon_{j} & \text{if } \hat{i}_{C}=0, \\ \eta_{j} & \text{if } \hat{i}_{C}=1. \end{cases} \label{eq:multiplexing_symbol} \end{equation} Here $\{\epsilon_{j}, \eta_{j} ~|~ j = 1, \cdots, M\}$, which represent the set of energy levels corresponding to different combinations of $\hat{i}_{C}$ and $j$, are the parameters under design consideration. Note that Charlie transmits $M$ energy levels corresponding to each value of $\hat{i}_{C}$. Towards designing $\{\epsilon_{j}, \eta_{j}\}$, the energy levels are such that, $0\leq\epsilon_{1}<\cdots<\epsilon_{M}$, $0\leq\eta_{1}<\cdots<\eta_{M}$, and $\epsilon_{j} < \eta_{j}$, if $j$ is odd and $\epsilon_{j} > \eta_{j}$, if $j$ is even. Given that Alice contributes an average energy of $(1-\alpha)/2$ on $f_{CB}$, Charlie is constrained to transmit his multiplexed symbols with an average energy of $(1+\alpha)/2$ so that the average energy on $f_{CB}$ continues to be unity. Thus, the average energy constraint on Charlie, denoted by $\mathrm{E}_{C,f_{CB}}$ is, \bieee \mathrm{E}_{C,f_{CB}} = \dfrac{1}{2M}\sum_{j=1}^{M}(\epsilon_{j}+\eta_{j}) &=& \dfrac{1+\alpha}{2}.\label{eq:new_constaint} \eieee Finally, upon transmission of the energy level $E_{C}$ from Charlie, Bob observes a multiple access channel on $f_{CB}$ from Alice and Charlie. Thus, the $N_{B}\times 1$ receive vector at Bob is, \bieee \mathbf{r}_{B} = \mathbf{h}_{AB}\sqrt{(1-\alpha)E_{A}} + \mathbf{h}_{CB}\sqrt{E_{C}} + \mathbf{n}_{B},\label{eq:rb} \eieee \noindent where $\mathbf{h}_{AB}\sim{\cal CN}\left(\mathbf{0}_{N_{B}},\sigma_{AB}^{2}\mathbf{I}_{N_{B}}\right)$, $\mathbf{h}_{CB}\sim{\cal CN}\left(\mathbf{0}_{N_{B}},\sigma_{CB}^{2}\mathbf{I}_{N_{B}}\right)$, and $\mathbf{n}_{B}\sim{\cal CN}\left(\mathbf{0}_{N_{B}},N_{o}\mathbf{I}_{N_{B}}\right)$ are the Alice-to-Bob link, Charlie-to-Bob link and the AWGN vector at Bob. We assume that all the channel realizations and noise samples are statistically independent. We also assume that only the channel statistics and not the instantaneous realizations of $\mathbf{h}_{AB}$ and $\mathbf{h}_{CB}$ are known to Bob. Similarly, only the channel statistics and not the instantaneous realizations of $\mathbf{h}_{AC}$ are known to Charlie. Further, due to the proximity of Alice and Charlie, we assume $\sigma_{AC}^{2}>\sigma_{AB}^{2}$ to capture higher Signal-to-Noise Ratio (SNR) in the Alice-to-Charlie link compared to Charlie-to-Bob link. Henceforth, throughout the paper, various noise variance at Charlie and Bob are given by $\text{SNR} = N_{o}^{-1}$ and $\sigma_{AB}^{2} = \sigma_{CB}^{2} = 1$. Given that Alice-to-Bob and Charlie-to-Bob channels are non-coherent, Bob must use $\mathbf{r}_{B}$ in \eqref{eq:rb} to jointly decode the information symbols of both Alice and Charlie. Towards that direction, in the next section, we study the distribution on $\mathbf{r}_{B}$ conditioned on their information symbols. \subsection{The Complementary Energy Levels and Distribution of $\mathbf{r}_{B}$} \label{ssec:com_energy} Based on the MAC in \eqref{eq:rb}, $\mathbf{r}_{B}$ is sampled from an underlying multi-dimensional Gaussian distribution whose parameters depend on $i$, $j$, and $\hat{i}_{C}$. If $e$ denotes the error event at Charlie, then, $e=0$, if $i=\hat{i}_{C}$ and $e=1$, if $i\neq \hat{i}_{C}$. Recall that for a given $j$, Charlie transmits $\epsilon_{j}$ or $\eta_{j}$ corresponding to $\hat{i}_{C}=0$ and $\hat{i}_{C}=1$, respectively. Therefore, Bob receives $\mathbf{r}_{B}$ sampled from two different sets with $2M$ multi-dimensional Gaussian distributions corresponding to $e=0$ and $e=1$. For example, assume that Alice transmits symbol $i=1$, and it gets decoded as $\hat{i}_{C}=0$ at Charlie. According to \eqref{eq:multiplexing_symbol}, Charlie transmits the energy level $\epsilon_{j}$, and as a result, each component of $\mathbf{r}_{B}$ is sampled from a circularly symmetric complex Gaussian distribution with mean zero and variance $1-\alpha+\epsilon_{j}+N_{o}$. On the other hand, if Charlie had decoded the symbol correctly, each component of $\mathbf{r}_{B}$ would be sampled from a circularly symmetric complex Gaussian distribution with mean zero and variance $1-\alpha + \eta_{j}+N_{o}$. To obtain these variance values, we have used the fact that $\mathbf{h}_{AB}\sim{\cal CN}\left(\mathbf{0}_{N_{B}},\mathbf{I}_{N_{B}}\right)$, $\mathbf{h}_{CB}\sim{\cal CN}\left(\mathbf{0}_{N_{B}},\mathbf{I}_{N_{B}}\right)$, and $\mathbf{n}_{B}\sim{\cal CN}\left(\mathbf{0}_{N_{B}},N_{o}\mathbf{I}_{N_{B}}\right)$. Overall, using \eqref{eq:rb}, the distribution of $\mathbf{r}_{B}$ is given as, \bieee \mathbf{r}_{B}\sim \begin{cases} {\cal CN}\left(\mathbf{0}_{N_{B}},(\epsilon_{j} + N_{o})\mathbf{I}_{N_{B}}\right) & \text{if } i=0,e=0, \\ {\cal CN}\left(\mathbf{0}_{N_{B}},(\eta_{j} + N_{o})\mathbf{I}_{N_{B}}\right) & \text{if } i=0,e=1, \\ {\cal CN}\left(\mathbf{0}_{N_{B}},(1-\alpha+\eta_{j} + N_{o})\mathbf{I}_{N_{B}}\right) & \text{if } i=1,e=0, \\ {\cal CN}\left(\mathbf{0}_{N_{B}},(1-\alpha+\epsilon_{j} + N_{o})\mathbf{I}_{N_{B}}\right) & \text{if } i=1,e=1, \end{cases} \label{eq:rb_distribution1} \eieee \noindent where we have substituted $E_{A}\!=\!0$ and $E_{A}\!=\!1$, for $i=0$ and $i=1$, respectively, and $\sigma_{AB}^{2}=\sigma_{CB}^{2}=1$ in \eqref{eq:rb}. From \eqref{eq:rb_distribution1}, it is clear that the sum of the energy levels transmitted by Alice and Charlie characterizes all the possible distributions from which $\mathbf{r}_{B}$ is sampled. We now define an index $\ell$ that is a one-to-one function of the transmit pair $(i,j)$, such that \bieee \ell = \frac{1}{2}\left[(-1)^{ij}\left(4j(1-i) + 4i(-1)^{j}+(-1)^{j+i}-1\right)\right].\label{eq:def_l} \eieee \noindent Since $(i,j)\in\{0,1\}\times\{1,\cdots, M\}$, we have $\ell\in\{1,\cdots, 2M\}$. We also define two sets of energy levels, denoted by $\mathcal{S}=\{S_{\ell}~\vert~\ell = 1,\cdots,2M\}$ and $\mathcal{\overline{S}}=\{\overline{S}_{\ell}~\vert~ \ell=1,\cdots,2M\}$ that correspond to the sum of energy levels jointly contributed by Alice and Charlie, and the AWGN at Bob when $e=0$ and $e=1$, respectively. In particular, the $\ell^{th}$ element of $\mathcal{S}$ and $\mathcal{\overline{S}}$ are given by \bieee S_{\ell} \triangleq \left(1-\alpha+\eta_{j}\right)i+\epsilon_{j}(1-i)+N_{o} \text{ and } \overline{S}_{\ell} \triangleq \left(1-\alpha+\epsilon_{j}\right)i+\eta_{j}(1-i)+N_{o}.\label{eq:map2} \eieee \noindent Since $\mathcal{\overline{S}}$, corresponds to the sum of energy levels when $e=1$, we refer to $\mathcal{\overline{S}}$ as the set of complementary energy levels. Note that there is one-to-one correspondence between the elements of $\mathcal{S}$ and $\mathcal{\overline{S}}$, and the distributions in \eqref{eq:rb_distribution1} corresponding to $e=0$ and $e=1$, respectively. Also, note that $\mathcal{S}$ is such that $S_{1}<S_{2}<\cdots<S_{2M-1}<S_{2M}$. To exemplify the sum of energy levels that characterises $\mathbf{r}_{B}$ at Bob, in Fig.~\ref{fig:consexample}, we present the elements of $\mathcal{S}$ and $\mathcal{\overline{S}}$ for $M=2,4$. \begin{figure}[t] \vspace{-0.25in} \centering \includegraphics[scale = 0.35]{cons_example} \caption{\label{fig:consexample}Illustration of multiplexing at Charlie and corresponding energy levels received at Bob.} \end{figure} \subsection{Joint Maximum A Posteriori (JMAP) decoder for NC-FFFD Relaying Scheme} \label{ssec:JMAP} Due to the decode-multiplex-and-forward nature of the NC-FFFD scheme, we first compute the error-rates introduced by Charlie while decoding Alice's symbols, and then compute the joint error-rates at Bob. Since Alice-to-Charlie link is non-coherent, Charlie uses energy detection to decode Alice's symbols. If $f\left(\mathbf{r}_{C}\vert i\right)$ is the PDF of $\mathbf{r}_{C}$ conditioned on the Alice's symbol, $i$, then the Maximum Likelihood (ML) decision rule for detection is \bieee \hat{i}_{C} = \arg\underset{i\in\{0,1\}}{\max\ }\ln\left\{f\left(\mathbf{r}_{C}\vert i\right)\right\} = \arg\underset{i\in\{0,1\}}{\min\ }N_{C}\ln(\pi\Omega_{i}) + \dfrac{\mathbf{r}_{C}^{H}\mathbf{r}_{C}}{\Omega_{i}},\label{eq:rule_rc} \eieee \noindent where $(\mathbf{r}_{C}\vert i)\sim{\cal CN}\left(\mathbf{0}_{N_{C}}, \Omega_{i}\mathbf{I}_{N_{C}}\right)$, such that $\Omega_{0} = \left(\lambda\frac{(1+\alpha)}{2}+N_{o}\right)$ and $\Omega_{1} = \left(\sigma_{AC}^{2}(1-\alpha) + \lambda\frac{(1+\alpha)}{2}+N_{o}\right)$ are the variance of the received symbol, when $i=0$ and $i=1$, respectively. Here, $(\cdot)^{H}$ denotes the Hermitian operator. Using the first principles, the energy detection threshold at Charlie, denoted by $\nu$, is given as, $N_{C}\frac{\Omega_{0}\Omega_{1}}{\Omega_{0}-\Omega_{1}}\ln\left(\frac{\Omega_{0}}{\Omega_{1}}\right)$. Using $\nu$, it is straightforward to prove the next theorem that presents the probability of error at Charlie in decoding Alice's symbols. \begin{theorem} \label{th:P01P10} If $P_{ik}$ denotes the probability of decoding symbol $i$ as symbol $k$, for $i,k=0,1$, then $P_{01} = \frac{\Gamma\left(N_{C}, \frac{\nu}{\Omega_{0}}\right)}{\Gamma(N_{C})}$ and $P_{10} = \frac{\gamma\left(N_{C}, \frac{\nu}{\Omega_{1}}\right)}{\Gamma(N_{C})}$, where $\gamma(\cdot,\cdot)$, $\Gamma(\cdot,\cdot)$, and $\Gamma(\cdot)$ are incomplete lower, incomplete upper, and complete Gamma functions, respectively. \end{theorem} \begin{lemma} \label{lm:P10P01_alpha} The terms $P_{01}$ and $P_{10}$ are increasing functions of $\alpha$ for a given SNR, $N_{C}$, and $\lambda$. \end{lemma} \begin{proof} Consider the expression of $P_{10}$ as given in Theorem~\ref{th:P01P10}. The ratio, $\nu/\Omega_{1}$ can be rewritten as, $\frac{\nu}{\Omega_{1}} = N_{C}\frac{\ln\left(1+\theta\right)}{\theta}$, where $\theta =\frac{ \left(\Omega_{1}-\Omega_{0}\right)}{\Omega_{0}}$. Differentiating $\theta$ w.r.t. $\alpha$, we get, $-\frac{N_{o}\sigma_{AC}^{2}}{\left(N_{o} + \lambda\frac{1+\alpha}{2}\right)^{2}}$. Since $\frac{d\theta}{d\alpha}<0$, as $\alpha$ increases $\theta$ decreases. Further, when $\theta$ decreases, $N_{C}\frac{\ln(1+\theta)}{\theta}$ increases. Therefore, $\frac{\nu}{\Omega_{1}}$ is an increasing function of $\alpha$. Finally, since $\gamma\left(N_{C}, \frac{\nu}{\Omega_{1}}\right)$ is an increasing function of $\frac{\nu}{\Omega_{1}}$, $P_{10}$ is an increasing function of $\alpha$. Using similar argument, we can prove that $P_{01}$ is also an increasing function of $\alpha$. \end{proof} Along the similar lines of Lemma~\ref{lm:P10P01_alpha}, the following lemma is also straightforward to prove. \begin{lemma} \label{lm:P10P01_nc} The terms $P_{01}$ and $P_{10}$ are decreasing functions of $N_{C}$ for a fixed SNR, $\alpha$, and $\lambda$. \end{lemma} Using $P_{01}$ and $P_{10}$ at Charlie, we study the performance of non-coherent decoder at Bob. With $i \in \{0, 1\}$ and $j \in \{1, 2, \ldots, M\}$ denoting Alice's and Charlie's information symbols, respectively, we define a transmit pair as the two-tuple $(i,j)$. Based on $\mathbf{r}_{B}$ in \eqref{eq:rb}, the JMAP decoder at Bob is \bieee \hat{i},\hat{j} = \arg\underset{i\in\{0,1\},j\in\{1,\cdots,M\}}{\max}g\left(\mathbf{r}_{B}\vert (i,j)\right),\label{eq:JMAP} \eieee \noindent where $g\left(\mathbf{r}_{B}\vert (i,j)\right)$ is the PDF of $\mathbf{r}_{B}$, conditioned on $i$ and $j$. However, note that due to errors introduced by Charlie in decoding Alice's symbols, $g(\cdot)$ is a Gaussian mixture for each realization of $i$. The conditional PDF of $g\left(\mathbf{r}_{B}\vert (i,j)\right)$ for $i = 0,1$ is, \bieee g\left(\mathbf{r}_{B}\vert (i,j)\right) &=& P_{ii}g\left(\mathbf{r}_{B}\vert (i,j), e=0\right)+ P_{i\overline{i}}g\left(\mathbf{r}_{B}\vert (i,j), e=1\right),\label{eq:JMAP_GM1} \eieee \noindent where $g\left(\mathbf{r}_{B}\vert (i,j), e=0\right)$ and $g\left(\mathbf{r}_{B}\vert (i,j), e=1 \right)$ are the PDFs given in \eqref{eq:rb_distribution1} and $\overline{i}$ is the complement of $i$. Since solving the error performance of the JMAP decoder using the Gaussian mixtures in \eqref{eq:JMAP_GM1} is non-trivial, we approximate the JMAP decoder by only considering the dominant terms in the summation of \eqref{eq:JMAP_GM1} \cite{my_TCCN} to obtain \bieee \hat{i},\hat{j} = \arg\underset{i\in\{0,1\},j\in\{1,\cdots,M\}}{\max\ }\tilde{g}\left(\mathbf{r}_{B}\vert (i,j), e=0\right),\label{eq:JD} \eieee \noindent where $\tilde{g}\left(\mathbf{r}_{B}\vert (i,j),e=0\right)$ is the first term on the RHS of \eqref{eq:JMAP_GM1}. Henceforth, we refer to the above decoder as the Joint Dominant (JD) decoder. To showcase the accuracy of the approximation in \eqref{eq:JD}, we tabulate the error-rates for arbitrary energy levels and $\alpha$ for JMAP and JD decoders in Table~\ref{tab:approximation_JMAP_JD}. We compute the relative-error between error-rates of JMAP and JD decoder as, $\left\vert\frac{{P\textsubscript{JMAP}}-{P\textsubscript{JD}}}{{P\textsubscript{JMAP}}}\right\vert$ and show that the maximum relative error is within $5.55\%$. Therefore, in the next section, we discuss the error analysis using JD decoder. \begin{table}[!h] \caption{\label{tab:approximation_JMAP_JD} ERROR-RATES AT BOB WHEN USING JMAP DECODER AND JD DECODER FOR $M=2$} \vspace{-0.25cm} \begin{center} \scalebox{0.85}{ \begin{tabular}{|ccccc|} \hline \multicolumn{5}{|c|}{$N_{C}=1$, $N_{B}=8$} \\ \hline \multicolumn{1}{|c|}{SNR} & \multicolumn{1}{c|}{$\{\epsilon_{1},\epsilon_{2},\eta_{1},\eta_{2},\alpha\}$} & \multicolumn{1}{c|}{$P_{\text{JMAP}}$} & \multicolumn{1}{c|}{$P_{\text{JD}}$} & rel. error \\ \hline \multicolumn{1}{|c|}{5 dB} & \multicolumn{1}{c|}{$\{0, 1\text{e}^{-6},0.3052,2.6421, 0.4736\}$}& \multicolumn{1}{c|}{$3.06\times 10^{-1}$}& \multicolumn{1}{c|}{$3.23\times 10^{-1}$}& $5.55\times 10^{-2}$\\ \hline \multicolumn{1}{|c|}{14 dB} & \multicolumn{1}{c|}{$\{0,1\text{e}^{-6},0.5554,3.0750,0.8152\}$}& \multicolumn{1}{c|}{$8.32\times 10^{-2}$}& \multicolumn{1}{c|}{$8.42\times 10^{-2}$}& $1.20\times 10^{-2}$\\ \hline \multicolumn{1}{|c|}{25 dB} & \multicolumn{1}{c|}{$\{ 0,1\text{e}^{-6},0.4382,3.4008,0.9195\}$} & \multicolumn{1}{c|}{$1.88\times 10^{-2}$}& \multicolumn{1}{c|}{$1.90\times 10^{-2}$} & $1.06\times 10^{-2}$\\ \hline \multicolumn{5}{|c|}{$N_{C}=2$, $N_{B}=4$} \\ \hline \multicolumn{1}{|c|}{SNR} & \multicolumn{1}{c|}{$\{\epsilon_{1},\epsilon_{2},\eta_{1},\eta_{2},\alpha\}$} & \multicolumn{1}{c|}{$P_{\text{JMAP}}$} & \multicolumn{1}{c|}{$P_{\text{JD}}$} & rel. error \\ \hline \multicolumn{1}{|c|}{5 dB} & \multicolumn{1}{c|}{$\{ 0,1\text{e}^{-6},0.4334,2.7135,0.5734\}$}& \multicolumn{1}{c|}{$3.735\times 10^{-1}$}& \multicolumn{1}{c|}{$3.782\times 10^{-1}$}& $1.25\times 10^{-2}$\\ \hline \multicolumn{1}{|c|}{14 dB}& \multicolumn{1}{c|}{$\{0,1\text{e}^{-6},0.5353,3.1645,0.8499\}$}& \multicolumn{1}{c|}{$1.32\times 10^{-1}$} & \multicolumn{1}{c|}{$1.33\times 10^{-1}$}& $7.57\times 10^{-4}$ \\ \hline \multicolumn{1}{|c|}{25 dB} & \multicolumn{1}{c|}{$\{0,1\text{e}^{-6},0.3228,3.6082,0.9655\}$}& \multicolumn{1}{c|}{$2.43\times 10^{-2}$} & \multicolumn{1}{c|}{$2.47\times 10^{-2}$} & $1.64\times 10^{-2}$\\ \hline \end{tabular} } \end{center} \end{table} \subsection{Joint Dominant (JD) Decoder for NC-FFFD Relaying Scheme} \label{ssec:JD} From \eqref{eq:def_l}, we observe that there exist a one-to-one correspondence between $(i, j)$ and $\ell$. Thus, the JD decoder in \eqref{eq:JD} can be rewritten as, $\hat{\ell} = \arg\underset{\ell \in\{1,\ldots, 2M\}}{\max\ }\tilde{g}\left(\mathbf{r}_{B}\vert \ell, e=0\right)$. Henceforth, a transmit pair jointly chosen by Alice and Charlie will be denoted by the index $\ell \in \{1, 2, \ldots, 2M\}$. As a consequence, the JD decoder only considers the likelihood functions corresponding to the $2M$ dominant energy levels in $\mathcal{S}$ with the assumption that no decoding error is introduced by Charlie. Let $\bigtriangleup_{\substack{\ell\rightarrow \ell^{*}\\ \ell \neq \ell^{*}}}$ denotes the event when Bob incorrectly decodes an index $\ell$ to $\ell^{*}$ such that $\ell \neq \ell^{*}$. Then, $\Pr\left(\bigtriangleup_{\substack{\ell\rightarrow \ell^{*}\\ \ell \neq \ell^{*}}}\right)=\Pr\left(\tilde{g}\left(\mathbf{r}_{B}\vert\ell, e=0\right)\leq \tilde{g}\left(\mathbf{r}_{B}\vert \ell^{*}, e=0\right)\right)$. To characterize $\Pr\left(\bigtriangleup_{\substack{\ell\rightarrow \ell^{*}\\ \ell \neq \ell^{*}}}\right)$, one should determine the energy detection threshold between the energy levels corresponding to ${\ell}$ and ${\ell^{*}}$. Towards this direction, we use the following lemma that computes the energy detection threshold between $S_{\ell}$ and $S_{\ell^{*}}$. \begin{lemma} \label{lm:rho} If $S_{\ell}$ denotes the energy level jointly contributed by Alice and Charlie corresponding to the transmitted index $\ell$ and $S_{\ell^{*}}$ denotes the energy level corresponding to the decoded index $\ell^{*}$ such that $\ell \neq \ell^{*}$, then the probability of the event $\bigtriangleup_{\substack{\ell\rightarrow \ell^{*}\\ \ell \neq \ell^{*}}}$ is given by $\Pr\left(\bigtriangleup_{\substack{\ell\rightarrow \ell^{*}\\ \ell \neq \ell^{*}}}\right) = \Pr(\mathbf{r}_{B}^{H}\mathbf{r}_{B} \geq \rho_{\ell,\ell^{*}})$, where the threshold $\rho_{\ell,\ell^{*}}$ is given by, $\rho_{\ell,\ell^{*}} \approx N_{B}\frac{S_{\ell}S_{\ell^{*}}}{S_{\ell^{*}}-S_{\ell}}\ln\left(\frac{S_{\ell^{*}}}{S_{\ell}}\right)$. \end{lemma} \begin{proof} Bob uses JD decoder and compares the conditional PDF of $\mathbf{r}_{B}$ conditioned on $\ell$ and $\ell^{*}$ as, \bieee \Pr\left(\bigtriangleup_{\substack{\ell\rightarrow \ell^{*}\\ \ell \neq \ell^{*}}}\right) &=& \Pr\left(\dfrac{\tilde{g}\left(\mathbf{r}_{B}\vert \ell^{*}, e=0\right)}{\tilde{g}\left(\mathbf{r}_{B}\vert \ell, e=0\right)}\leq 1\right)= \Pr\left(\dfrac{\dfrac{P_{i^{*}i^{*}}}{\left(\pi S_{\ell^{*}}\right)^{N_{B}}}\exp\left(-\frac{\mathbf{r}_{B}^{H}\mathbf{r}_{B}}{S_{\ell^{*}}}\right)}{\dfrac{P_{ii}}{\left(\pi S_{\ell}\right)^{N_{B}}}\exp\left(-\frac{\mathbf{r}_{B}^{H}\mathbf{r}_{B}}{S_{\ell}}\right)}\leq 1\right)= \Pr\left(\mathbf{r}_{B}^{H}\mathbf{r}_{B}\geq \rho_{\ell,\ell^{*}}\right),\nn \eieee \noindent where $\rho_{\ell,\ell^{*}}=\frac{S_{\ell}S_{\ell^{*}}}{S_{\ell^{*}}-S_{\ell}}\left(N_{B}\ln\left(\frac{S_{\ell^{*}}}{S_{\ell}}\right) + \ln\left(\frac{P_{ii}}{P_{i^{*}i^{*}}}\right)\right)$ and $P_{ii}$ and $P_{i^{*}i^{*}}$ are a priori probabilities of index $\ell$ and $\ell^{*}$, respectively. It is straightforward that when $i=i^{*}$, $\ln\left(\frac{P_{ii}}{P_{i^{*}i^{*}}}\right)=0$. Further, since $\ln\left(\frac{P_{00}}{P_{11}}\right)\approx 0$ for $N\geq 1$, when $i\neq i^{*}$, we have $\ln\left(\frac{P_{ii}}{P_{i^{*}i^{*}}}\right)=\ln\left(\frac{P_{00}}{P_{11}}\right)\approx 0$ and $\ln\left(\frac{P_{ii}}{P_{i^{*}i^{*}}}\right)=\ln\left(\frac{P_{11}}{P_{00}}\right)\approx 0$, for $i=0$ and $i=1$, respectively. Thus, $\rho_{\ell,\ell^{*}}\approx N_{B}\frac{S_{\ell}S_{\ell^{*}}}{S_{\ell^{*}}-S_{\ell}}\ln\left(\frac{S_{\ell^{*}}}{S_{\ell}}\right)$. \end{proof} Since $S_{1}<S_{2}\cdots<S_{2M-1}<S_{2M}$, the set of relevant thresholds for the JD decoder are $\{\rho_{\ell,\ell + 1}, \ell = 1, 2, \ldots, 2M-1\}$. Therefore, based on the received energy $\mathbf{r}_{B}^{H}\mathbf{r}_{B}$, the JD decoder for detecting $\hat{\ell}$ can be realized using an energy detector as, $\hat{\ell}=\ell$, if $\rho_{\ell - 1,\ell} < \mathbf{r}_{B}^{H}\mathbf{r}_{B} \leq \rho_{\ell,\ell + 1 }$, where $\rho_{0,1}=0$ and $\rho_{2M,\infty}=\infty$. Using $\hat{\ell}$, the average Symbol Error Probability (SEP), denoted by $P_{e}$, is given by, $P_{e} = \frac{1}{2M} \sum_{\ell = 1}^{2M} P_{e, \ell}$, where $P_{e,\ell} = \Pr\left(\ell\neq\ell^{*}\right)$ is the probability that Bob decodes a transmitted index $\ell$ as $\ell^{*}$, where $\ell\neq\ell^{*}$. Since, the decision of the energy detector is based on the received energies at Bob, we notice that sum energy levels can be from $\mathcal{S}$, when $e=0$ or $\mathcal{\overline{S}}$, when $e=1$. Therefore, $P_{e,\ell} = \Pr(e=0)\Pr\left(\ell\neq\ell^{*}\vert e=0\right) + \Pr(e=1)\Pr\left(\ell\neq\ell^{*}\vert e=1\right)$. Thus, we have \begin{equation} P_{e, \ell}= \begin{cases} P_{00}P_{e, S_{\ell}} + P_{01}P_{e, \overline{S}_{\ell}} & \text{if }\ell(\mathrm{mod}4)\leq 1, \\ P_{11}P_{e, S_{\ell}} + P_{10}P_{e, \overline{S}_{\ell}} & \text{if } \text{otherwise}, \end{cases} \label{eq:Pe_formal2} \end{equation} \noindent where $P_{e, S_{\ell}}$ and $P_{e, \overline{S}_{\ell}}$ are the terms associated with erroneous decision in decoding $\ell$, when $e=0$ and $e=1$, respectively. Since $\mathbf{r}_{B}^{H}\mathbf{r}_{B}$ is gamma distributed, we get $P_{e, S_{\ell}}$ as given in \eqref{eq:errors_dominant}. \begin{small} \begin{equation} P_{e,S_{\ell}}= \begin{cases} 1-\Pr\left(\mathbf{r}_{B}^{H}\mathbf{r}_{B}\leq\rho_{1, 2}\vert e=0\right) = \dfrac{\Gamma\left(N_{B}, \frac{\rho_{1,2}}{S_{1}}\right)}{\Gamma\left(N_{B}\right)} & \text{for }\ell=1, \\ 1-\Pr\left(\rho_{\ell-1,\ell}\leq\mathbf{r}_{B}^{H}\mathbf{r}_{B}\leq\rho_{\ell, \ell+1}\vert e=0\right) = \dfrac{\gamma\left(N_{B}, \frac{\rho_{\ell-1,\ell}}{S_{\ell}}\right)}{\Gamma\left(N_{B}\right)} + \dfrac{\Gamma\left(N_{B}, \frac{\rho_{\ell,\ell+1}}{S_{\ell}}\right)}{\Gamma\left(N_{B}\right)} & \text{for } 2\leq\ell\leq 2M-1, \\ 1-\Pr\left(\mathbf{r}_{B}^{H}\mathbf{r}_{B}>\rho_{2M-1,2M}\vert e=0\right) = \dfrac{\gamma\left(N_{B}, \frac{\rho_{2M-1,2M}}{S_{2M}}\right)}{\Gamma\left(N_{B}\right)} & \text{for } \ell=2M. \end{cases} \label{eq:errors_dominant} \end{equation} \end{small} \noindent Since Bob uses the same thresholds to compute $P_{e, \overline{S}_{\ell}}$, we obtain the expression of $P_{e, \overline{S}_{\ell}}$, by replacing $S_{\ell}$ by $\overline{S}_{\ell}$ in \eqref{eq:errors_dominant}. Finally, substituting \eqref{eq:Pe_formal2}, \eqref{eq:errors_dominant}, and corresponding $P_{e, \overline{S}_{\ell}}$ in $P_{e}$, we get, \begin{multline} P_{e} = \frac{1}{2M}\left[ \sum_{\ell_{1} = 1}^{M}\left(P_{00}P_{e, S_{\frac{1}{2}\left(4\ell_{1}+(-1)^{\ell_{1}}-1\right)}} + P_{01}P_{e, \overline{S}_{\frac{1}{2}\left(4\ell_{1}+(-1)^{\ell_{1}}-1\right)}}\right)\right.\\ \ \left. + \sum_{\ell_{2} = 1}^{M}\left(P_{11}P_{e, S_{\frac{1}{2}\left((-1)^{\ell_{2}}\left(4(-1)^{\ell_{2}}\ell_{2} + (-1)^{\ell_{2}+1}-1\right)\right)}} + P_{10}P_{e, \overline{S}_{\frac{1}{2}\left((-1)^{\ell_{2}}\left(4(-1)^{\ell_{2}}\ell_{2} + (-1)^{\ell_{2}+1}-1\right)\right)}}\right)\right].\label{eq:Pe} \end{multline} \section{Optimization of Energy Levels} \label{sec:optimization} In this section, we formulate an optimization problem in order to compute the optimal energy levels at Alice and Charlie. In particular, as given in \eqref{opt}, we fix $N_{C}$ and $N_{B}$ and then optimise the energy levels, $\{\epsilon_{j},\eta_{j}\}$, and $\alpha$ so as to minimise the SEP subject to the energy constraint in \eqref{eq:new_constaint}. \begin{mdframed} \bieee \underset{\epsilon_{1},\cdots,\epsilon_{M}, \eta_{1},\cdots,\eta_{M}, \alpha}{\min} \quad & & P_{e}\label{opt}\\ \text{subject to:} \quad & &\sum_{j=1}^{M}(\epsilon_{j}+\eta_{j}) = M(1+\alpha), \epsilon_{1}<\cdots<\epsilon_{M}, \eta_{1}<\cdots<\eta_{M}, 0<\alpha<1, \nn\\ & & \epsilon_{j}<\eta_{j} \text{ for }j\in\{1,3,\cdots, 2M-1\}, \epsilon_{j}>\eta_{j} \text{ for } j\in\{2,4,\cdots, 2M\}.\nn \eieee \end{mdframed} \noindent One can solve the above optimization problem by first formulating the Lagrangian and then solving the system of $2M+2$ non-linear equations. Since solving a system of non-linear equations is complex in general, we use an alternate approach for minimising $P_{e}$ using its analytical structure, as discussed in the next section. We first discuss the optimization of energy levels for $M=2$ and then propose a generalised approach of $M=2^{m}$ such that $m > 1$. \subsection{Optimization of Energy Levels for $M=2$} \label{ssec:Globecom} The expression of SEP in \eqref{eq:Pe} when $M=2$ is given as, \bieee P_{e}\! =\! \dfrac{1}{4}\left(P_{00}\left(P_{e,S_{1}}\! +\!P_{e,S_{4}}\right) \!+\! P_{11}\left(P_{e,S_{2}}\! +\!P_{e,S_{3}}\right)\! +\! P_{01}\left(P_{e,\overline{S}_{1}}\! +\!P_{e,\overline{S}_{4}}\right)\! +\! P_{10}\left(P_{e,\overline{S}_{2}}\! +\! P_{e,\overline{S}_{3}}\right)\right).\label{eq:Pe_M2} \eieee Instead of using $P_{e}$ for optimization problem, we use an upper-bound on $P_{e}$, where we upper-bound $P_{e,\overline{S}_{1}}\!\leq\! P_{e,\overline{S}_{4}}\!\leq\! P_{e,\overline{S}_{2}}\!\leq \! P_{e,\overline{S}_{3}}\!\leq\! 1$, such that, \bieee P_{e}\leq P_{e}^{\prime}\triangleq \dfrac{1}{4}\left(P_{00}\left(P_{e,S_{1}}\! +\!P_{e,S_{4}}\right) \!+\! P_{11}\left(P_{e,S_{2}}\! +\!P_{e,S_{3}}\right)\! +\! 2\left(P_{01}+P_{10}\right)\right).\label{eq:Pe_M2U} \eieee \noindent Henceforth, we optimise the energy levels, $\epsilon_{1}$, $\epsilon_{2}$, $\eta_{1}$, and $\eta_{2}$ and $\alpha$ so as to minimise $P_{e}^{\prime}$.\footnote{Later through simulation results, we show that, optimizing \eqref{eq:Pe_M2U} gives us near-optimal results.} Thus, the modified optimization problem when $M=2$ is, \bieee \underset{\epsilon_{1},\epsilon_{2}, \eta_{1},\eta_{2}, \alpha}{\min} \quad & & P_{e}^{\prime}\label{opt:M2}\\ \text{subject to:} \quad & &\epsilon_{1}+\epsilon_{2}+\eta_{1}+\eta_{2} = 2(1+\alpha), \epsilon_{1}<\epsilon_{2}, \eta_{1}<\eta_{2},0<\alpha<1, \epsilon_{1}<\eta_{1}<\eta_{2}<\epsilon_{2}.\nn \eieee In order to minimise $P_{e}^{\prime}$, it is clear that we must minimise each $P_{e,S_{\ell}}$, for $\ell=1,\cdots,4$ in \eqref{opt:M2}. Towards this direction, in the next lemma, we show that when $\epsilon_{1}=0$, $P_{e,S_{1}}$ is minimum. \begin{lemma}\label{lm:epsilon1} The expression $P_{e,S_{1}} = \dfrac{\Gamma\left(N_{B}, \frac{\rho_{1,2}}{S_{1}}\right)}{\Gamma\left(N_{B}\right)}$ is minimum when $\epsilon_{1}=0$. \end{lemma} \begin{proof} The expression of $P_{e,S_{1}}$ is an upper incomplete Gamma function. Since upper incomplete Gamma function is a decreasing function of the second parameter, $\Gamma\left(N_{B}, \frac{\rho_{1,2}}{S_{1}}\right)$ is a decreasing function of $\frac{\rho_{1,2}}{S_{1}}$. Therefore, $P_{e,S_{1}}$ is minimum when $\frac{\rho_{1,2}}{S_{1}}$ is maximum and $\frac{\rho_{1,2}}{S_{1}}$ is maximum when $S_{1}$ is minimum. Since $S_{1}=\epsilon_{1}+N_{o}$, $S_{1}$ is minimum when $\epsilon_{1}=0$. This completes the proof. \end{proof} \begin{lemma} \label{lm:P12P21} At high SNR, $P_{e,S_{1}}\ll 1$ and $P_{e,S_{2}}\approx \dfrac{\Gamma\left(N_{B}, \frac{\rho_{2,3}}{S_{2}}\right)}{\Gamma\left(N_{B}\right)}$. \end{lemma} \begin{proof} We first prove that $P_{e,S_{1}}\ll 1$. We have $P_{e,S_{1}}=\frac{\Gamma\left(N_{B}, \frac{\rho_{1,2}}{S_{1}}\right)}{\Gamma\left(N_{B}\right)}$. The ratio $\frac{\rho_{1,2}}{S_{1}}$ is expressed as, $N_{B}\frac{\ln(1+\kappa_{1})}{\kappa_{1}}$, where $\kappa_{1}=(S_{1}-S_{2})/S_{2}$. Further. since $S_{1}<S_{2}$, $-1<\kappa_{1}<0$. Also, the ratio $\frac{\ln(1+\kappa_{1})}{\kappa_{1}}$ follows the inequalities, $\frac{2}{2+\kappa_{1}}\leq\frac{\ln(1+\kappa_{1})}{\kappa_{1}}\leq \frac{2+\kappa_{1}}{2+2\kappa_{1}}$, for $\kappa > -1$. Therefore, $\frac{\Gamma\left(N_{B}, \frac{2N_{B}}{2+\kappa_{1}}\right)}{\Gamma\left(N_{B}\right)}\geq\frac{\Gamma\left(N_{B}, \frac{\rho_{1,2}}{S_{1}}\right)}{\Gamma\left(N_{B}\right)}\geq \frac{\Gamma\left(N_{B}, N_{B}\frac{2+\kappa_{1}}{2+2\kappa_{1}}\right)}{\Gamma\left(N_{B}\right)}$, where the second inequality is because $\Gamma\left(N_{B}, \frac{\rho_{1,2}}{S_{1}}\right)$ is a decreasing function of $\frac{\rho_{1,2}}{S_{1}}$. Thus, $\frac{\Gamma\left(N_{B}, \frac{\rho_{1,2}}{S_{1}}\right)}{\Gamma\left(N_{B}\right)}\leq \frac{\Gamma\left(N_{B}, \frac{2N_{B}}{2+\kappa_{1}}\right)}{\Gamma\left(N_{B}\right)} = \frac{\Gamma\left(N_{B}, 2N_{B}\right)}{\Gamma\left(N_{B}\right)}\ll 1$. Since $S_{1}\approx 0$ at high SNR, $2/(2+\kappa_{1}) = 2S_{2}/(S_{1}+S_{2})\approx 2$ and therefore, we have the second inequality. This proves the first part of Lemma. On similar lines, we can prove that at high SNR, the term $\frac{\gamma\left(N_{B}, \frac{\rho_{1,2}}{S_{2}}\right)}{\Gamma\left(N_{B}\right)}\leq\frac{\gamma\left(N_{B}, \frac{N_{B}}{2}\right)}{\Gamma\left(N_{B}\right)}$, thus, $\frac{\gamma\left(N_{B}, \frac{N_{B}}{2}\right)}{\Gamma\left(N_{B}\right)}\ll 1$ and therefore, we have $P_{e,S_{2}} \approx \frac{\Gamma\left(N_{B}, \frac{\rho_{2,3}}{S_{2}}\right)}{\Gamma\left(N_{B}\right)}$. \end{proof} Using the results of Lemma~\ref{lm:P12P21}, the expression of $P_{e}^{\prime}$ is approximated as, \bieee P_{e}^{\prime}\approx\dfrac{1}{4}\left(P_{00}P_{e,S_{4}} \!+\! P_{11}\left(P_{e,S_{2}}\! +\!P_{e,S_{3}}\right)\! +\! 2\left(P_{01}+P_{10}\right)\right).\label{eq:Pe_app} \eieee From \eqref{opt:M2} we have 5 variables, resulting in a 5-dimensional search space to find the optimal set $\{\epsilon_{1},\epsilon_{2},\eta_{1},\eta_{2},\alpha\}$. Using the result of Lemma~\ref{lm:epsilon1}, we have $\epsilon_{1}=0$. Further, rearranging the sum energy constraint, we express $\epsilon_{2}$ as a function of $\eta_{1}$, $\eta_{2}$, and $\alpha$, therefore, $\epsilon_{2} = 2(1+\alpha)-(\eta_{1}+\eta_{2})$. Thus, the search space is reduced to 3 dimensions. Through simulations we observe that, when we fix $\eta_{1}$ and $\alpha$, $P_{e}^{\prime}$ exhibits unimodal nature w.r.t. $\eta_{2}$. Similarly, $P_{e}^{\prime}$ is unimodal w.r.t. $\alpha$, when we fix $\eta_{1}$ and $\eta_{2}$. The variation of $P_{e}^{\prime}$, the increasing terms of $P_{e}^{\prime}$, and the decreasing terms of $P_{e}^{\prime}$, w.r.t. $\eta_{2}$ and $\alpha$ are shown in Fig.~\ref{fig:unimodal_eta2} and Fig.~\ref{fig:unimodal_alpha}, respectively. Further, we also observe that the unique mode in both the cases is very close to the intersection of increasing and decreasing terms of $P_{e}^{\prime}$. Therefore, in the next two theorems, we prove that the increasing and decreasing terms of $P_{e}^{\prime}$ w.r.t. $\eta_{2}$ and $\alpha$, have a unique intersection that is close to the local minima of $P_{e}^{\prime}$. \begin{figure}[!htb] \vspace{-0.25in} \centering \begin{minipage}[t]{.48\textwidth} \centering \includegraphics[width = 0.66\textwidth, height = 0.6\linewidth]{unimodality_Pe_eta2} \caption{\label{fig:unimodal_eta2} Variation of $P_{e}^{\prime}$, its increasing and decreasing terms as a function of $\eta_{2}$, when $\eta_{1}$ and $\alpha$ are fixed.} \end{minipage} \hfill \begin{minipage}[t]{0.48\textwidth} \centering \includegraphics[width = 0.66\textwidth, height = 0.6\linewidth]{unimodality_Pe_alpha} \caption{\label{fig:unimodal_alpha}Variation of $P_{e}^{\prime}$, its increasing and decreasing terms as a function of $\alpha$, when $\eta_{1}$ and $\eta_{2}$ are fixed.} \end{minipage} \end{figure} \begin{theorem} \label{th:Pe_eta2} For a given $\eta_{1}$ and $\alpha$, the increasing and decreasing terms in $P_{e}^{\prime}$ intersect only once for $\eta_{2}\in\left(\eta_{1},1+\alpha-0.5\eta_{1}\right)$. \end{theorem} \begin{proof} We first determine the increasing and decreasing terms of $P_{e}^{\prime}$. Towards this direction, we first analyse the behaviour of each term in \eqref{eq:Pe_app}, i.e., $P_{e,S_{2}}$, $P_{c,S_{3}}$, and $P_{e,S_{4}}$ as a function of $\eta_{2}$, where \bieee P_{e,B2} = \dfrac{\Gamma\left(N_{B}, \frac{\rho_{2,3}}{S_{2}}\right)}{\Gamma\left(N_{B}\right)},\ \ P_{e,S_{3}} = \dfrac{\Gamma\left(N_{B}, \frac{\rho_{3,4}}{S_{3}}\right)}{\Gamma\left(N_{B}\right)} + \dfrac{\gamma\left(N_{B}, \frac{\rho_{2,3}}{S_{3}}\right)}{\Gamma\left(N_{B}\right)},\ \ P_{c,S_{4}} = \dfrac{\gamma\left(N_{B}, \frac{\rho_{3,4}}{S_{4}}\right)}{\Gamma\left(N_{B}\right)}.\nn \eieee \noindent Consider the term $P_{e,S_{2}}$, where the ratio $\frac{\rho_{2,3}}{S_{2}}$ is given by, $N_{B}\frac{\ln(1+\kappa_{3})}{\kappa_{3}}$, where $\kappa_{3}=(S_{2}-S_{3})/S_{3}$. Since $S_{2}<S_{3}$, $\kappa_{3}<0$. Differentiating $\kappa_{3}$ w.r.t. $\eta_{2}$ we get $-S_{1}/S_{2}^{2}$. Therefore, as $\eta_{2}$ increases, $\kappa_{3}$ decreases. Since $\ln(1+\kappa_{3})/\kappa_{3}$ is a decreasing function of $\kappa_{3}$, as $\kappa_{3}$ decreases, $N_{B}{\ln(1+\kappa_{3})}/{\kappa_{3}}$ increases. Finally, since $\frac{\Gamma\left(N_{B}, \frac{\rho_{2,3}}{S_{2}}\right)}/{\Gamma\left(N_{B}\right)}$ is a decreasing function of ${\rho_{2,3}}/{S_{2}}$, $P_{e,B2}$ decreases with increasing ${\ln(1+\kappa_{3})}/{\kappa_{3}}$. Therefore, $P_{e,S_{4}}$ is a decreasing function of $\eta_{2}$. On similar lines, we can prove that $\frac{\gamma\left(N_{B}, \frac{\rho_{2,3}}{S_{3}}\right)}{\Gamma\left(N_{B}\right)}$ is also a decreasing function of $\eta_{2}$. In contrast, the terms, $\frac{\Gamma\left(N_{B}, \frac{\rho_{3,4}}{S_{3}}\right)}{\Gamma\left(N_{B}\right)}$ and $\frac{\gamma\left(N_{B}, \frac{\rho_{3,4}}{S_{4}}\right)}{\Gamma\left(N_{B}\right)}$ are increasing functions of $\eta_{2}$. To prove that the increasing and decreasing terms intersect only once, we can prove that the order of increasing and decreasing terms reverses at extreme values of $\eta_{2}\in(\eta_{1}, (1+\alpha-0.5\eta_{1}))$. Thus, we evaluate the sum of decreasing terms at left extreme, i.e., $\eta_{2}\rightarrow\eta_{1}$ and right extreme, i.e., $\eta_{2}\rightarrow(1+\alpha-0.5\eta_{1})$, \bieee \lim_{\eta_{2}\rightarrow\eta_{1}}\dfrac{\Gamma\left(N_{B}, \frac{\rho_{2,3}}{S_{2}}\right)}{\Gamma\left(N_{B}\right)} + \frac{\Gamma\left(N_{B}, \frac{\rho_{2,3}}{S_{3}}\right)}{\Gamma\left(N_{B}\right)} = 1 \text{ and } \lim_{\eta_{2}\rightarrow(1+\alpha-0.5\eta_{1})}\frac{\Gamma\left(N_{B}, \frac{\rho_{2,3}}{S_{2}}\right)}{\Gamma\left(N_{B}\right)} + \frac{\Gamma\left(N_{B}, \frac{\rho_{2,3}}{S_{3}}\right)}{\Gamma\left(N_{B}\right)} \ll 1.\nn \eieee \noindent Similarly, we evaluate the sum of increasing terms at left extreme and right extremes of $\eta_{1}$, \bieee \lim_{\eta_{2}\rightarrow\eta_{1}}\frac{\Gamma\left(N_{B}, \frac{\rho_{3,4}}{S_{3}}\right)}{\Gamma\left(N_{B}\right)} + \frac{\gamma\left(N_{B}, \frac{\rho_{3,4}}{S_{4}}\right)}{\Gamma\left(N_{B}\right)} \ll 1, \text{ and }\ \lim_{\eta_{2}\rightarrow(1+\alpha-0.5\eta_{1})} \frac{\Gamma\left(N_{B}, \frac{\rho_{3,4}}{S_{3}}\right)}{\Gamma\left(N_{B}\right)} + \frac{\gamma\left(N_{B}, \frac{\rho_{3,4}}{S_{4}}\right)}{\Gamma\left(N_{B}\right)} = 1.\nn \eieee The above discussion is summarised as, \begin{equation*} \begin{cases} \dfrac{\Gamma\left(N_{B}, \frac{\rho_{2,3}}{S_{2}}\right)}{\Gamma\left(N_{B}\right)} + \dfrac{\Gamma\left(N_{B}, \frac{\rho_{2,3}}{S_{3}}\right)}{\Gamma\left(N_{B}\right)} > \dfrac{\Gamma\left(N_{B}, \frac{\rho_{3,4}}{S_{3}}\right)}{\Gamma\left(N_{B}\right)} + \dfrac{\gamma\left(N_{B}, \frac{\rho_{3,4}}{S_{4}}\right)}{\Gamma\left(N_{B}\right)}, & \text{if $\eta_{2}\rightarrow\eta_{1}$},\\ \dfrac{\Gamma\left(N_{B}, \frac{\rho_{2,3}}{S_{2}}\right)}{\Gamma\left(N_{B}\right)} + \dfrac{\Gamma\left(N_{B}, \frac{\rho_{2,3}}{S_{3}}\right)}{\Gamma\left(N_{B}\right)} < \dfrac{\Gamma\left(N_{B}, \frac{\rho_{3,4}}{S_{3}}\right)}{\Gamma\left(N_{B}\right)} + \dfrac{\gamma\left(N_{B}, \frac{\rho_{3,4}}{S_{4}}\right)}{\Gamma\left(N_{B}\right)}, & \text{if $\eta_{2}\rightarrow(1+\alpha-0.5\eta_{1})$}. \end{cases} \end{equation*} \end{proof} \begin{theorem} \label{th:Pe_alpha} For a given $\eta_{1}$ and $\eta_{2}$, the increasing and decreasing terms in $P_{e}^{\prime}$ intersect only once for $\alpha\in\left(0,1\right)$. \end{theorem} \begin{proof} Since $\alpha$ is variable, we recall Lemma~\ref{lm:P10P01_alpha} to show that $P_{01}$ and $P_{10}$ are decreasing function of $\alpha$. Further, since $P_{01}$ and $P_{10}$ are decreasing functions of $\alpha$, $P_{00}$ and $P_{11}$ are decreasing functions of $\alpha$. In addition to these $4$ probabilities, $P_{e,S_{2}}$, $P_{e,S_{3}}$, and $P_{e,S_{4}}$ are also functions of $\alpha$ in \eqref{eq:Pe_app}. On similar lines of Theorem~\ref{th:Pe_eta2}, we prove that, $P_{e,S_{2}}$, $P_{e,S_{3}}$, and $P_{e,S_{4}}$ are decreasing function of $\alpha$. Therefore, we observe that $P_{00}P_{e,S_{4}}+ P_{11}\left(P_{e,S_{2}} + P_{e,S_{3}}\right)$ is a decreasing function of $\alpha$ and since $P_{00}=P_{11}\approx 0$, when $\alpha\rightarrow 1$, $P_{00}P_{e,S_{4}}+ P_{11}\left(P_{e,S_{2}} + P_{e,S_{3}}\right)\approx 0$, when $\alpha\rightarrow 1$. Further, $2(P_{01}+P_{10})$ is an increasing function of $\alpha$ such that, $2(P_{01}+P_{10})\approx 0$, when $\alpha\rightarrow 0$ and $2(P_{01}+P_{10})\approx 2$, when $\alpha\rightarrow 1$. Therefore, it is straightforward to observe that the increasing and decreasing terms of $P_{e}^{\prime}$ reverse their orders at extreme values of $\alpha$. Thus, they have a unique intersection point. \end{proof} In the next section, we use Theorem~\ref{th:Pe_eta2} and Theorem~\ref{th:Pe_alpha} to present a low-complexity algorithm to solve the optimization problem in \eqref{opt:M2}. Using this algorithm, we obtain a local minima over the variables $\eta_{2}$ and $\alpha$ for a given $\eta_{1}$. \subsubsection{Two-Layer Greedy Descent (TLGD) Algorithm} In this section, we present Two-Layer Greedy Descent (TLGD) algorithm, as presented in Algorithm~\ref{Algo:M2}. It first fixes $N_{C}$, $N_{B}$, and SNR and then initialise $\eta_{1} = 0$, and $\eta_{2}$ and $\alpha$ with arbitrary values $\eta_{2}^{o}$ and $\alpha^{o}$, respectively. Using the initial values, it computes $P_{e}^{o}$ using \eqref{eq:Pe_app} and then obtains $\eta_{2}^{i}$ and $\alpha^{i}$ using Theorem~\ref{th:Pe_eta2} and Theorem~\ref{th:Pe_alpha}, respectively. It then evaluates $P_{e}^{\eta_{2}}$, i.e., $P_{e}^{\prime}$ at $\left\{\eta_{1}, \eta_{2}^{i}, \alpha\right\}$ and $P_{e}^{\alpha}$, i.e., $P_{e}^{\prime}$ at $\left\{\eta_{1}, \eta_{2}, \alpha^{i}\right\}$. If for a given $\eta_{1}$, $\left\vert P_{e}^{\alpha}-P_{e}^{\eta_{2}}\right\vert < \delta_{P_{e}^{\prime}}$, for some $\delta_{P_{e}^{\prime}}>0$, then the algorithm exits the inner while-loop with $P_{e}^{\iota}$ such that $P_{e}^{\iota} = \min\left(P_{e}^{\alpha}, P_{e}^{\eta_{2}}\right)$ else, the algorithm iteratively descents in the steepest direction with new values of $\eta_{2}$ and $\alpha$. After traversing several values of $\eta_{1}$, TLGD finally stops when for a given $\eta_{1}$, the obtained $P_{e}^{\iota}$ is within $\delta_{P_{e}^{\prime}}$, resolution of the previously computed value. The points at which $P_{e}^{\prime}$ is minimum as computed by TLGD are given by $\eta_{1}^{\star}$, $\eta_{2}^{\star}$ and $\alpha^{\star}$. We rearrange the constraint in~\eqref{opt:M2} to obtain $\epsilon_{2}^{\star}=2(1+\alpha^{\star})-\left(\eta_{1}^{\star} + \eta_{2}^{\star}\right)$. Further, from Lemma~\ref{lm:epsilon1}, we have $\epsilon_{1}=0$, therefore, $\epsilon_{1}^{\star}=0$. Thus, TLGD computes all the 5 variables, i.e., $\epsilon_{1}^{\star}$, $\epsilon_{2}^{\star}$, $\eta_{1}^{\star}$, $\eta_{2}^{\star}$, and $\alpha^{\star}$. \begin{algorithm} \setstretch{0.33} \DontPrintSemicolon \KwInput{$P_{e}^{\prime}$ from~\eqref{eq:Pe_app}, $\delta_{P_{e}^{\prime}}>0$, $\delta_{\eta_{1}}>0$, $\epsilon_{1}=0$} \KwOutput{$\left\{\eta_{1}^{\star}, \eta_{2}^{\star},\alpha^{\star}\right\}$} Initialize: $\eta_{1}\gets 0$, $\eta_{2}\gets \eta_{2}^{o}$, $\alpha\gets \alpha^{o}$\\ $P_{e}^{o} \gets P_{e}^{\prime}\left(\alpha,\eta_{1},\eta_{2}\right)$\\ \While{true} { \While{true} { Compute $\eta_{2}^{i}$ using Theorem~\ref{th:Pe_eta2} and obtain $P_{e}^{\eta_{2}} \gets P_{e}^{\prime}\left(\eta_{1}, \eta_{2}^{i},\alpha\right)$\\ Compute $\alpha^{i}$ using Theorem~\ref{th:Pe_alpha} and obtain $P_{e}^{\alpha} \gets P_{e}^{\prime}\left(\eta_{1}, \eta_{2},\alpha^{i}\right)$\\ \If{$P_{e}^{\alpha}-P_{e}^{\eta_{2}} \geq \delta_{P_{e}^{\prime}}$} { $\eta_{2} \gets \eta_{2}^{i}$; continue } \ElseIf{$P_{e}^{\alpha}-P_{e}^{\eta_{2}} \leq -\delta_{P_{e}^{\prime}}$} { $\alpha \gets \alpha^{i}$; continue } \ElseIf {$\left\vert P_{e}^{\alpha}-P_{e}^{\eta_{2}}\right\vert<\delta_{P_{e}^{\prime}}$} { $P_{e}^{\iota} = \min\left(P_{e}^{\alpha}, P_{e}^{\eta_{2}}\right)$; break } } \If{$\left(P_{e}^{\iota}-P_{e}^{o}\right) \leq- \delta_{P_{e}^{\prime}}$} { $\eta_{1} \gets \eta_{1} + \delta_{\eta_{1}}$, $P_{e}^{o}\gets P_{e}^{\iota}$; $\alpha^{\ast}\gets \alpha$, $\eta_{2}^{\ast}\gets \eta_{2}$ } \ElseIf{$\left(P_{e}^{\iota}-P_{e}^{o}\right) \geq \delta_{P_{e}^{\prime}}$} { $\eta_{1}^{\star} \gets \eta_{1} - \delta_{\eta_{1}}$, $\eta_{2}^{\star} \gets \eta_{2}^{\ast}$, $\alpha^{\star} \gets \alpha^{\ast}$; break } \ElseIf{$\left\vert P_{e}^{\iota}-P_{e}^{o}\right\vert < \delta_{P_{e}^{\prime}}$} { $\eta_{1}^{\star} \gets \eta_{1}$, $\eta_{2}^{\star} \gets \eta_{2}^{i}$, $\alpha^{\star} \gets \alpha^{i}$; break\\ } } \caption{\label{Algo:M2} Two-Layer Greedy Descent Algorithm} \end{algorithm} \begin{figure}[!htb] \centering \begin{minipage}[t]{.32\textwidth} \centering \includegraphics[width = \textwidth, height = 0.9\textwidth]{Joint_error_performance} \caption{\label{fig:Joint_per} Performance of NC-FFFD using energy levels obtained using TLGD and the exhaustive search.} \end{minipage} \hfill \begin{minipage}[t]{0.32\textwidth} \centering \includegraphics[width = \textwidth, height = 0.9\textwidth]{Pe_SNR_varNc} \caption{\label{fig:Pe_OOK_varNc} Performance of NC-FFFD for fixed $N_{B}=8$ and varying $N_{C}$.} \end{minipage} \hfill \begin{minipage}[t]{0.32\textwidth} \centering \includegraphics[width = \textwidth, height = 0.9\textwidth]{Alice_performance} \caption{\label{fig:Alice_per} Alice's performance when using NC-FFFD scheme for $N_{C}=1$ and $N_{B}=8$.} \end{minipage} \end{figure} In Fig.~\ref{fig:Joint_per}, we plot the error performance of NC-FFFD scheme as a function of SNR and $N_{B}$ using Monte-Carlo simulations. We assume, $\sigma_{AB}^{2}=\sigma_{CB}^{2}=1$, $\lambda=-50$ dB, and $N_{C}=1$. Further, due to vicinity of Alice and Charlie, we assume $\sigma_{AC}^{2}=4$, thus, providing $6$ dB improvement in SNR on Alice-to-Charlie link as compared to Alice-to-Bob link. We compute the error-rates when the optimal energy levels and $\alpha$ are obtained using exhaustive search on \eqref{eq:Pe_M2}. We also compute the error-rates using the proposed algorithm. For both the scenarios, we observe that the error curves approximately overlap, indicating the efficacy of the proposed algorithm, as well as our approach of using \eqref{opt:M2} instead of \eqref{eq:Pe_M2}. Further, in Fig~\ref{fig:Pe_OOK_varNc}, for same parameters and $N_{B}=8$, we plot the error performance of ND-FFFD scheme as a function of SNR for various values of $N_{C}$. We observe that, the error performance of NC-FFFD scheme improves as a function of $N_{C}$. Finally, for the same parameters and $N_{B}=8$, in Fig.~\ref{fig:Alice_per}, we show the improvement in Alice's performance when using NC-FFFD relaying scheme. In terms of feasibility of implementation, the complexity analysis of TLGD algorithm has been discussed in the conference proceedings of this work \cite{my_GCOM}. \subsection{Optimization of Energy Levels for $M\geq 2$} \label{ssec:gncfffd} In this section, we provide a solution that computes the optimal energy levels, $\{\epsilon_{j},\eta_{j}\}$, and the factor $\alpha$, when $M\geq 2$. Since the average transmit energy of Charlie is constrained to $\mathrm{E}_{C,f_{CB}}$, increasing the data-rate at Charlie results in degraded joint error performance as compared to $M=2$. One way to improve the error performance is by using a large number of receive antennas at Bob. Despite this improvement, it is important to note that the joint error performance is also a function of the SNR of Alice-to-Charlie link. Therefore, an improved Alice-to-Charlie link can help to improve the overall performance of the scheme. This is also evident from Fig.~\ref{fig:Pe_OOK_varNc}, where we observe that the error performance of the scheme improves as a function of $N_{C}$. This motivates us to solve $P_{e}$ in \eqref{opt} for optimal $\{\epsilon_{j},\eta_{j}\}$, and $\alpha$ under the assumption that Charlie has a sufficiently large number of receive-antennas. In this section, we take a similar approach as that of Sec.~\ref{ssec:Globecom}, by upper bounding the complementary error terms by $1$ to obtain an upper bound on $P_{e}$ given by, \begin{small} \bieee P_{e}\leq P_{e}^{\prime} = \frac{1}{2M}\left[ \sum_{\ell_{1} = 1}^{M}P_{00}P_{e, S_{\frac{1}{2}\left(4\ell_{1}+(-1)^{\ell_{1}}-1\right)}} + \sum_{\ell_{2} = 1}^{M}P_{11}P_{e, S_{\frac{1}{2}\left((-1)^{\ell_{2}}\left(4(-1)^{\ell_{2}}\ell_{2} + (-1)^{\ell_{2}+1}-1\right)\right)}} + M\left(P_{01}+P_{10}\right)\right].\label{eq:Pe_upper} \eieee \end{small} \noindent Since $P_{e}^{\prime}$ is a function of $S_{\ell}$ and $\alpha$, besides $N_{C}$, $N_{B}$, and SNR, in the next theorem, we compute the optimal value of $\alpha\in(0,1)$, that minimises $P_{e}^{\prime}$, when $S_{1},\cdots,S_{2M}$, $N_{C}$, $N_{B}$, and SNR are fixed. \begin{theorem} \label{th:alpha_range} When $S_{1},\cdots,S_{2M}$ are fixed, such that $S_{2}<1$, the optimal value of $\alpha\in(0,1)$ that minimises $P_{e}^{\prime}$ in \eqref{eq:Pe_upper} is given by, $\alpha^{\dagger} = 1-S_{2}$. \end{theorem} \begin{proof} We will first show that $P_{e}^{\prime}$ in \eqref{eq:Pe_upper} is an increasing function of $\alpha$. Then, we compute a lower bound on $\alpha$ considering the feasible energy levels jointly contributed by Alice and Charlie. The expression of $P_{e}^{\prime}$ in \eqref{eq:Pe_upper} is a convex combination of $P_{00}$, $P_{01}$, $P_{10}$, and $P_{11}$. Further, we notice that $P_{00}$ and $P_{11}$ are decreasing functions of $\alpha$ (Lemma~\ref{lm:P10P01_alpha}). However, since $S_{1},\cdots,S_{2M}$ are fixed, the coefficients of $P_{00}$ and $P_{11}$ are independent of $\alpha$, such that, $\sum_{\ell_{1} = 1}^{M}P_{e, S_{\frac{1}{2}\left(4\ell_{1}+(-1)^{\ell_{1}}-1\right)}}\leq M$ and $\sum_{\ell_{2} = 1}^{M}P_{e, S_{\frac{1}{2}\left((-1)^{\ell_{2}}\left(4(-1)^{\ell_{2}}\ell_{2} + (-1)^{\ell_{2}+1}-1\right)\right)}}\leq M$. Further, since $P_{01}$ and $P_{10}$ are increasing functions of $\alpha$, it is straightforward that, $P_{e}^{\prime}$ is an increasing function of $\alpha$. This completes the first part of the proof. Although, we upper bound the energy levels $\overline{S}_{\ell}$ by $1$, in practice, Bob receives these energy levels when $e=1$ at Charlie. From \eqref{eq:map2}, we have, $\overline{S}_{\frac{1}{2}\left(4\ell_{1} + (-1)^{\ell_{1}}-1\right)} = S_{\frac{1}{2}\left((-1)^{\ell_{1}}\left(4(-1)^{\ell_{1}}+(-1)^{\ell_{1}+1}-1\right)\right)}-(1-\alpha)$. It is important to note that, if $S_{\frac{1}{2}\left((-1)^{\ell_{1}}\left(4(-1)^{\ell_{1}}+(-1)^{\ell_{1}+1}-1\right)\right)}<1-\alpha$ , then $\overline{S}_{\frac{1}{2}\left(4\ell_{1} + (-1)^{\ell_{1}}-1\right)}<0$. However, since $\overline{S}_{\ell}\in\mathcal{\overline{S}}$ are energy levels, $\overline{S}_{\ell}\geq 0$. Therefore, to achieve $\overline{S}_{\frac{1}{2}\left(4\ell_{1} + (-1)^{\ell_{1}}-1\right)}\geq 0$, we must have $S_{\frac{1}{2}\left((-1)^{\ell_{1}}\left(4(-1)^{\ell_{1}}+(-1)^{\ell_{1}+1}-1\right)\right)}\geq 1-\alpha$ or $\alpha\geq 1-S_{\frac{1}{2}\left((-1)^{\ell_{1}}\left(4(-1)^{\ell_{1}}+(-1)^{\ell_{1}+1}-1\right)\right)}$. Therefore, $\alpha\geq\max\left\{1-S_{\frac{1}{2}\left((-1)^{\ell_{1}}\left(4(-1)^{\ell_{1}}+(-1)^{\ell_{1}+1}-1\right)\right)}\right\}$, where $\ell_{1}=1,\cdots,M$. However, we know that, $S_{1}<\cdots<S_{2M}$, thus, we have $\alpha\geq 1-S_{2}$. Finally, since $P_{e}^{\prime}$ in \eqref{eq:Pe_upper} is an increasing function of $\alpha$ and $\alpha\geq 1-S_{2}$, $P_{e}^{\prime}$ is minimised when $\alpha=\alpha^{\dagger}=1-S_{2}$. \end{proof} The result of Lemma~\ref{lm:P10P01_nc} indicates that $P_{01}$ and $P_{10}$ are decreasing functions of $N_{C}$. Further, $S_{\ell}$, $\ell=1,\cdots,2M$ are independent of $N_{C}$, as a result, each convex combination in \eqref{eq:Pe_upper} decreases as $N_{C}$ increases. Therefore, it is straightforward to prove that $P_{e}^{\prime}$ is a decreasing function of $N_{C}$. \begin{proposition} \label{prop:Pe_nc_dec} For a fixed $\alpha\in(0,1)$, when $N_{C}\rightarrow\infty$, we have $P_{01}=P_{10}\approx 0$ and $P_{00}=P_{11}\approx 1$, we have, $P_{e}^{\prime}\geq P_{e,approx} = \frac{1}{2M}\!\left[ \sum_{\ell_{1} = 1}^{M}P_{e, S_{\frac{1}{2}\left(4\ell_{1}+(-1)^{\ell_{1}}-1\right)}} + \sum_{\ell_{2} = 1}^{M}P_{e, S_{\frac{1}{2}\left((-1)^{\ell_{2}}\left(4(-1)^{\ell_{2}}\ell_{2} + (-1)^{\ell_{2}+1}-1\right)\right)}}\right]$. \end{proposition} Motivated by the result of Proposition~\ref{prop:Pe_nc_dec}, instead of solving \eqref{opt} for a sufficiently large $N_{C}$ using the first principles, we take an alternate approach, where we first compute $S_{1},\cdots,S_{2M}$ that minimises $P_{e,approx}$ and then compute the respective $\{\epsilon_{j},\eta_{j}\}$, and $\alpha$ using the relation in \eqref{eq:map2}. Towards computing the optimal $S_{1},\cdots,S_{2M}$, we observe that since an energy level $S_{\ell}$ corresponds to the sum of energies contributed by Alice, Charlie, and the AWGN at Bob on $f_{CB}$, the sum energies contributed by Alice and Charlie will be $S_{\ell}-N_{o}$. Furthermore, since the average energy on $f_{CB}$ is $1$, we have the following constraint of $S_{\ell}$: \bieee \dfrac{1}{2M}\sum_{\ell=1}^{2M}\left(S_{\ell} - N_{o}\right) = 1.\label{eq:sum_const} \eieee Finally, we formulate the following optimization problem of computing optimal $S_{1}, \cdots, S_{2M}$ so as to minimise $P_{e,approx}$, subject to \eqref{eq:sum_const}. \bieee S_{1}^{\star},\cdots,S_{2M}^{\star} = \arg\underset{S_{1},\cdots,S_{2M}}{\min} \quad & & P_{e,approx}\label{opt2}\\ \text{subject to:} \quad & &\dfrac{1}{2M}\sum_{\ell=1}^{2M}\left(S_{\ell} - N_{o}\right) = 1, S_{1}<\cdots < S_{2M}.\nn \eieee While \eqref{opt2} can be solved using the first principles, \cite{ranjan} provides a near-optimal solution for \eqref{opt2}. Therefore, we use the results of \cite{ranjan} to compute $S_{1}^{\star},\cdots,S_{2M}^{\star}$. In the next lemma, we prove that, when we use $S_{1},\cdots,S_{2M}$ to obtain $\{\epsilon_{j},\eta_{j}\}$, such that $S_{1},\cdots, S_{2M}$ follows \eqref{eq:sum_const}, $\{\epsilon_{j},\eta_{j}\}$ satisfies \eqref{eq:new_constaint}. \begin{lemma} If $S_{1},\cdots,S_{2M}$ are fixed such that \eqref{eq:sum_const} is satisfied, then the average transmit energy of Charlie is given by \eqref{eq:new_constaint}. \end{lemma} \begin{proof} From \eqref{eq:map2}, we have $S_{\frac{1}{2}\left(4\ell_{1} + (-1)^{\ell_{1}}-1\right)} = \epsilon_{\ell_{1}}+N_{o},$ and $S_{\frac{1}{2}\left((-1)^{\ell_{1}}\left(4(-1)^{\ell_{1}}+(-1)^{\ell_{1}+1}-1\right)\right)} = 1-\alpha + \eta_{\ell_{1}} + N_{o}$ for $i=0,1$, respectively, where $\ell_{1}=1,\cdots,M$. Rearranging and summing LHS and RHS of both the equations, we get, $\sum_{\ell=1}^{2M}(S_{\ell} - N_{o}) = \sum_{\ell_{1}=1}^{M}\left(\epsilon_{\ell_{1}}+\eta_{\ell_{1}} + (1-\alpha)\right)$. Dividing both sides by $2M$ and rearranging, we get \eqref{eq:new_constaint}. \end{proof} In the next section, we propose the energy backtracking algorithm, where we first solve \eqref{opt2} using \cite{ranjan} to obtain $S_{1}^{\star},\cdots,S_{2M}^{\star}$ and then compute corresponding $\{\epsilon_{j},\eta_{j}\vert j=1,\cdots,M\}$, and $\alpha$. It is important to note that, since Charlie cannot have $N_{C}\rightarrow\infty$, we must bound the number of receive-antennas at Charlie. Thus, we use a parameter $0<\Delta_{RE}\ll 1$ to bound $N_{C}$. Therefore, we compute the minimum number of receive-antennas at Charlie, such that the relative error between $P_{e,approx}^{\star}$ and $P_{e,eval}$ is within $\Delta_{RE}$, where $P_{e,approx}^{\star}$ is $P_{e,approx}$ evaluated at $S_{1}^{\star},\cdots,S_{2M}^{\star}$ and $P_{e,eval}$ is $P_{e}$ evaluated at optimal $\{\epsilon_{j},\eta_{j}\vert j=1,\cdots,M\}$, and $\alpha$. \subsection{Energy Backtracking (EB) Algorithm} The Energy Backtracking (EB) Algorithm, first computes energy levels $S_{1}^{\star},\cdots,S_{2M}^{\star}$ using the semi-analytical results of \cite{ranjan}. It then computes $\alpha^{\dagger}$, and $\epsilon_{j}^{\dagger}$ and $\eta_{j}^{\dagger}$ based on Theorem~\ref{th:alpha_range} and the relation in \eqref{eq:map2}, respectively. It then sets $N_{C}=1$ and computes $P_{e,eval}$, i.e., $P_{e}$ at $\alpha^{\dagger}$, $\epsilon_{j}^{\dagger}$, $\eta_{j}^{\dagger}$ for the given $N_{B}$. The algorithm increments $N_{C}$ until relative error between the $P_{e,approx}^{\star}$ and $P_{e,eval}$ is within $\Delta_{RE}$. The algorithm exits the while-loop when the relative error is less than or equal to $\Delta_{RE}$. The pseudo-code for the proposed EB algorithm is given in Algorithm~\ref{Algo:Generalised}. \begin{algorithm} \setstretch{0.32} \DontPrintSemicolon \KwInput{$P_{e}$ \eqref{eq:Pe}, $P_{e,approx}$, $\Delta_{RE}>0$, $M$, $N_{B}$, $N_{o}$} \KwOutput{$\epsilon_{1}^{\dagger},\cdots,\epsilon_{M}^{\dagger}$, $\eta_{1}^{\dagger},\cdots,\eta_{M}^{\dagger}$, $N_{C}^{\dagger}$, $\alpha^{\dagger}$} Compute $S_{1}^{\star},\cdots,S_{2M}^{\star}$ using \cite{ranjan} and evaluate $P_{e,approx}^{\star}$.\\ $\alpha^{\dagger} = 1-S_{2}^{\star}$\\ ; $\epsilon_{j}^{\dagger} = S_{\frac{1}{2}\left(4j + (-1)^{j}-1\right)}^{\star}-N_{o}$; $\eta_{j}^{\dagger} = S_{\frac{1}{2}\left((-1)^{j}\left(4(-1)^{j}+(-1)^{j+1}-1\right)\right)}^{\star} - (1-\alpha^{\dagger})-N_{o}, \ j=1,\cdots,M$\\ Set: $N_{C}=1$, $P_{e,eval}=1$\\ \While{$\left\vert\dfrac{P_{e,approx}^{\star}-P_{e,eval}}{P_{e,approx}^{\star}}\right\vert\geq\Delta_{RE}$} { Substitute $S_{1}^{\star},\cdots,S_{2M}^{\star}$, $\alpha^{\dagger}$, $N_{C}$, and $N_{B}$ in \eqref{eq:Pe} and obtain $P_{e,eval}$ \If{$\left\vert\dfrac{P_{e,approx}^{\star}-P_{e,eval}}{P_{e,approx}^{\star}}\right\vert >\Delta_{RE}$} { $N_{C}=N_{C}+1$; continue } \Else { $N_{C}^{\dagger}=N_{C}$; break } } \caption{\label{Algo:Generalised} Energy Backtracking Algorithm} \end{algorithm} \begin{figure}[t] \vspace{-0.15in} \centering \begin{minipage}[t]{0.32\textwidth} \centering \includegraphics[width = \textwidth, height = 0.9\textwidth]{Algo2_M2_SOA} \caption{\label{fig:Algo2_M2} Error performance of NC-FFFD when energy levels are computed using EB algorithm for $M=2$.} \end{minipage} \hfill \begin{minipage}[t]{0.32\textwidth} \centering \includegraphics[width = \textwidth, height = 0.9\textwidth]{Algo2_M4} \caption{\label{fig:Algo2_M4} Error performance of NC-FFFD when energy levels are computed using EB algorithm for $M=4$.} \end{minipage} \hfill \begin{minipage}[t]{0.32\textwidth} \centering \includegraphics[width = \textwidth, height = 0.9\textwidth]{optAnt_SNR} \caption{\label{fig:opt_ant}$N_{C}^{\dagger}$ as a function of SNR for $M=2$ and $M=4$.} \end{minipage} \end{figure} In Fig.~\ref{fig:Algo2_M2} and Fig.~\ref{fig:Algo2_M4}, we plot the error performance of NC-FFFD relaying scheme when using the EB Algorithm for $M=2$ and $M=4$ for various values of $N_{B}$. In addition to the simulation parameters assumed above, we assume $\Delta_{RE}=10^{-2}$ for both the cases. For both the cases, we observe that the error performance improves as a function of SNR. In Fig.~\ref{fig:Algo2_M2}, we also plot the performance of NC-FFFD scheme when Charlie uses optimal energy levels for point-to-point communication obtained using \cite{ranjan} for $N_{B}=2,8$. From the plot it is clear that, although Charlie is using optimal energy levels for point-to-point communication, the corresponding error performance of NC-FFFD is poor. This reinforces that to minimise $P_{e}$, energy levels at both the users must be jointly optimised as proposed in Algorithm~\ref{Algo:Generalised}. Finally, in Fig.~\ref{fig:opt_ant}, we also plot $N_{C}^{\dagger}$ as a function of SNR, for various combinations of $M$ and $N_{B}$ and observe that with only tens of antennas at the helper, we can achieve the performance close to its large-antenna counterpart. If NC-FFFD scheme provides performance close to $P_{e,approx}^{\star}$, it assumes that fast-forwarding at Charlie is perfect. Therefore, the symbols on the direct link, i.e., Alice-to-Bob link and relayed link, i.e., Charlie-to-Bob link, arrive during the same time instant, thereby resulting in the signal model in \eqref{eq:rb}. In the next section, we discuss the case when fast-forwarding at Charlie is imperfect. In particular, we discuss the consequences related to this problem and a possible solution. \section{Delay-Tolerant NC-FFFD (DT NC-FFFD) Relaying Scheme} \label{sec:DT_NC-FFFD} If $nT$ denotes the delay on the relayed link w.r.t. the direct link, such that $n\geq 0$ and $T$ is the symbol duration, then $nT=0$, when fast-forwarding is perfect. However, when fast-forwarding is imperfect, $nT\neq 0$ and $\mathbf{r}_{B}$ must be a function of $nT$. In particular, when $nT\neq 0$, the symbol received at Bob is a function of Alice's current symbol, Charlie's current symbol, and Alice's symbol delayed by $nT$. Although, Charlie's current symbol and Alice's symbol delayed by $nT$ are captured by $E_{C}$, the current symbol of Alice creates an interference in the symbol decoding, thereby degrading the error performance. To illustrate this behaviour, we plot the error performance of NC-FFFD scheme in Fig.~\ref{fig:DT1}, when the symbols on the relayed link arrive one symbol period after the symbols on the direct link. The plot shows that, the error performance degrades as the energy on the direct link interferes when Bob tries to decode symbols using the relayed link. Towards computing the optimal energy levels at Alice and Charlie when $nT\neq 0$, one can formulate a new signal model, where $\mathbf{r}_{B}$ is a function of $nT$ and then compute the optimal energy levels using the first principles. However, we note that, Alice contributes \emph{zero} and $1-\alpha$ energies on the direct link, when she transmits symbol $0$ and symbol $1$, respectively. Thus, in order to reduce the interference from the direct link, we must reduce the term $1-\alpha$. Therefore, if we upper bound the contribution $1-\alpha$ by small value, then we can continue to use the same signal model on $\mathbf{r}_{B}$ as given in \eqref{eq:rb}, thereby making NC-FFFD scheme \emph{Delay Tolerant}. To this end, we propose an upper bound on $1-\alpha$ as, $1-\alpha\leq \Delta_{\text{DT}}N_{o}$, where $0<\Delta_{\text{DT}}\ll 1$ is the design parameter. Since $1-\alpha\leq \Delta_{\text{DT}}N_{o}$, we have the relation $\alpha\geq 1-\Delta_{\text{DT}}N_{o}$. Further, the result of Theorem~\ref{th:alpha_range} shows that $P_{e}^{\prime}$ is an increasing function of $\alpha$, therefore, the optimal choice of $\alpha$ would be, $\alpha= 1-\Delta_{\text{DT}}N_{o}$. However, since $\Delta_{\text{DT}}\ll 1$, $1-S_{2}<1-\Delta_{\text{DT}}N_{o}$ and therefore, using $\alpha=1-\Delta_{\text{DT}}N_{o}$ will degrade the error performance. In the following discussion, we show that we can achieve the same error performance at $\alpha = 1-\Delta_{\text{DT}}N_{o}$ as achieved in Sec.~\ref{ssec:gncfffd} at $\alpha=1-S_{2}$, by increasing the receive-diversity at Charlie. \begin{figure}[!htb] \begin{center} \begin{minipage}[t]{0.32\textwidth} \centering \includegraphics[width = \textwidth, height = 0.9\textwidth]{DelayTolerant_Ccomp} \caption{\label{fig:DT1} Error performance for $nT=0$ and $nT=T$.} \end{minipage} \hfill \begin{minipage}[t]{0.32\textwidth} \centering \includegraphics[width = \textwidth, height = 0.9\textwidth]{DelayTolerant_P01_P10} \caption{\label{fig:DT2} Variation of $\frac{P_{01}+P_{10}}{2}$ as a function of $N_{C}$ and $\alpha$.} \end{minipage} \hfill \begin{minipage}[t]{0.32\textwidth} \centering \includegraphics[width = \textwidth, height = 0.9\textwidth]{DT_M2_performance} \caption{\label{fig:DT_M2}Performance of DT NC-FFFD when energy levels are computed using DT-EB algorithm for $M=2$.} \end{minipage} \end{center} \end{figure} Since the terms $P_{00}$, $P_{01}$, $P_{10}$, and $P_{11}$ are functions of $\alpha$ and $N_{C}$ in $P_{e}^{\prime}$, we show that one can achieve the same $P_{00}$, $P_{01}$, $P_{10}$, and $P_{11}$ at different combinations of $\alpha$ and $N_{C}$. The results of Lemma~\ref{lm:P10P01_alpha} show that for a fixed $N_{C}$, $P_{01}$ and $P_{10}$ are increasing functions of $\alpha$. Subsequently, from Lemma~\ref{lm:P10P01_nc}, for a fixed $\alpha$, $P_{01}$ and $P_{10}$ are decreasing functions of $N_{C}$. In Fig.~\ref{fig:DT2}, we plot $\frac{P_{01}+P_{10}}{2}$ as a function of $\alpha$ for various $N_{C}$ at $25$ dB and observe that, for $N_{C}=1$ and $\alpha = 0.9003$, the average probability of error of Alice-to-Charlie link is $9.79\times 10^{-3}$. However, to obtain the same error performance at larger $\alpha$, i.e., $\alpha=0.9733$, we must use $N_{C}=4$. Based on the above discussion, in the next section, we propose a variant of EB algorithm, where we bound the interference from the direct link by $\Delta_{\text{DT}}N_{o}$ and obtain $\{\epsilon_{j},\eta_{j}\}$ and the minimum $N_{C}$, such that the error performance is close to $P_{e,approx}$. \subsection{Delay Tolerant Energy Backtracking (DT-EB) Algorithm} In the Delay Tolerant Energy Backtracking (DT-EB) algorithm, we obtain the optimal energy levels at Alice and Charlie, such that the energy level on the direct link is bounded by $\Delta_{\text{DT}}N_{o}$. To facilitate this, we use the EB algorithm with two variations, i) we set $\alpha=1-\Delta_{\text{DT}}N_{o}$, instead of $\alpha = 1-S_{2}^{\star}$, ii) the effective SNR to compute $S_{1}^{\star},\cdots,S_{2M}^{\star}$ is $\left(N_{o}+\Delta_{\text{DT}}N_{o}\right)^{-1}$. \begin{figure}[!htb] \begin{center} \begin{minipage}[t]{0.32\textwidth} \centering \includegraphics[width = \textwidth, height = 0.9\textwidth]{DT_M4_performance} \caption{\label{fig:DT_M4}Performance of DT NC-FFFD when energy levels are computed using DT-EB algorithm for $M=4$.} \end{minipage} \hfill \begin{minipage}[t]{0.32\textwidth} \centering \includegraphics[width = \textwidth, height = 0.9\textwidth]{optAnt_SNR_DT} \caption{\label{fig:opt_ant_DT} $N_{C}^{\ddagger}$ as a function of SNR for $M=2$ and $M=4$.} \end{minipage} \hfill \begin{minipage}[t]{0.32\textwidth} \centering \includegraphics[width = \textwidth, height = 0.9\textwidth]{DT_comp_NC_FFFD} \caption{\label{fig:DT_comp}DT NC-FFFD scheme, when $nT=0$ and $nT=T$ for $M=2$, $N_{B}=8$, $\Delta_{RE}=10^{-2}$, and $\Delta_{\text{DT}}=10^{-1}$.} \end{minipage} \end{center} \end{figure} We now demonstrate the performance of DT NC-FFFD scheme. For all simulation purposes, we assume $\Delta_{RE}=10^{-2}$, and $\Delta_{\text{DT}}=10^{-1}$, in addition to simulation parameters considered in the previous sections. Further, the effective SNR at Bob, denoted by SNR\textsubscript{eff}, is given by SNR\textsubscript{eff} (dB) = $\text{SNR (dB)}-\log\left(1+\Delta_{\text{DT}}\right)$. In Fig.~\ref{fig:DT_M2} and Fig.~\ref{fig:DT_M4}, we plot the error performance of DT NC-FFFD scheme as a function of SNR\textsubscript{eff} for $M=2$ and $M=4$, respectively, when $N_{B}=2,4,8,16$. From these plots, we show that the error performance of DT NC-FFFD improves as a function of SNR\textsubscript{eff}. However, to achieve this performance Charlie must use more receive-antennas as compared to its NC-FFFD counterpart. In Fig.~\ref{fig:opt_ant_DT}, we plot the optimal receive-antennas at Charlie, denoted by $N_{C}^{\ddagger}$, as a function of SNR for various combinations of $M$ and $N_{B}$, and observe that since $\alpha$ is a function of $N_{o}$, the number of receive-antennas required by Charlie is an increasing function of SNR. Further, it is clear from the plot that we need to mount more receive-antennas at Charlie for DT NC-FFFD scheme as compared to NC-FFFD scheme. Furthermore, we also plot the error performances of NC-FFFD and DT NC-FFFD schemes in Fig~\ref{fig:DT_comp}, for the case when $nT=0$ and $nT=T$, when $M=2$ and $N_{B}=8$. From the plots, we find that, when $nT=0$, the error performance of NC-FFFD and DT NC-FFFD exactly overlaps. However, when $nT=T$, the error-rates of DT NC-FFFD are better than the error-rates of NC-FFFD scheme. We also notice a marginal degradation in the performance of DT NC-FFFD when $nT=T$ compared to $nT=0$ due to lower effective SINR in the former case. \section{Covertness Analysis of NC-FFFD Relaying Scheme} \label{sec:Covert} When communicating in the presence of a reactive jamming adversary, it becomes imperative that the communication is covert. In the context of this work, covertness is the ability of Alice and Charlie to communicate without getting detected by Dave's ED or CD. Henceforth, we discuss Dave's capability to detect the proposed countermeasures by focusing on the communication over $f_{AB}$. \subsection{Energy Detector (ED)} After executing the jamming attack, Dave collects a frame of $L$ symbols on $f_{AB}$ and computes their average energy. A countermeasure is detected when the difference between the computed average energy (after the jamming attack) and the average energy (before the jamming attack) is greater than the tolerance limit $\tau$, where $\tau\geq 0$ is a small number of Dave's choice. When no countermeasure is implemented, Dave receives symbols from Alice on $f_{AB}$. Since Dave has single receive-antenna, the $l^{th}$ symbol received by Dave on $f_{AB}$ is, $r_{D}(l) = h_{AD}(l)x(l) + n_{D}(l),\ l = 1,\cdots , L$, where, $h_{AD}(l)\sim{\cal CN}\left(0,1\right)$ is the fading channel on the $l^{th}$ symbol on Alice-to-Dave link, $n_{D}(l)\sim{\cal CN}\left(0, \tilde{N}_{o}\right)$ is the effective AWGN at Dave, such that $\tilde{N}_{o}=N_{o}+\sigma_{DD}^{2}$, where $\sigma_{DD}^{2}$ is the variance of the residual SI at Dave and $N_{o}$ is the variance of the AWGN at Dave. Further, the scalar $x(l)\in\{0,1\}$ is the $l^{th}$ symbol transmitted by Alice. Due to uncoded communication over fast-fading channel, $r_{D}(l)$ is statistically independent over $l$. The average energy received by Dave on $f_{AB}$ corresponding to $r_{D}(l)$, $l\in\{1,\cdots,L\}$ is given by, $E_{D,f_{AB}}$, where $E_{D,f_{AB}} = \frac{1}{L}\sum_{l=1}^{L}\left\vert r_{D}(l)\right\vert^{2}$. Since $h_{AD}(l)$ and the AWGN $n_{D}(l)$ are Random Variables (RV), $E_{D,f_{AB}}$ is also a RV. Using weak law of large numbers, $\frac{1}{L}\sum_{l=1}^{L}\left\vert r_{D}(l)\right\vert^{2}\rightarrow E_{f_{AB}}$ in probability, where, $E_{f_{AB}} = \tilde{N}_{o} + 0.5$ denotes the expected energy of $r_{D}(l)$ on $f_{AB}$, before the jamming attack. Since low-latency messages typically have short packet-length, Dave cannot collect a large number of observation samples. Therefore, $L$ is generally small, and with probability $1$, $E_{D,f_{AB}}\neq E_{f_{AB}}$. If $\mathcal{H}_{0}$ and $\mathcal{H}_{1}$ denote the hypothesis of no countermeasure and countermeasure, respectively, then, given $\mathcal{H}_{0}$ is true, false-alarm is an event when $E_{D,f_{AB}}$ deviates from $E_{f_{AB}}$ by an amount greater than $\tau$. We now formally define the probability of false-alarm. \begin{definition}\label{def:pfa} The probability of false-alarm denoted by, $\mathbf{P}_{FA}$ is given as, $\Pr\left(\left.\left\vert E_{D,f_{AB}}- E_{f_{AB}}\right\vert > \tau\right\vert \mathcal{H}_{0}\right)$, for $\tau>0$. \end{definition} \noindent If $u_{l}$ denotes the energy of $l^{th}$ symbol on $f_{AB}$ without any countermeasure, then the RV corresponding to the average energy of $L$ symbols is denoted by, $\mathcal{U}_{L} = \frac{1}{L}\sum_{l=1}^{L}u_{l}$. In order to compute $\mathbf{P}_{FA}$, first we compute the distribution of $\mathcal{U}_{L}$ in the next theorem. \begin{theorem}\label{th:pdf_Um} Given $\mathcal{H}_{0}$ is true, if $\tilde{N}_{o}<<1$, then the PDF of $~\mathcal{U}_{L}$, i.e., $p_{\mathcal{U}_{L}}(\varsigma)$ is $\left(\frac{1}{2}\right)^{L}\sum_{l=0}^{L}{L \choose l}\frac{L^{l} e^{-L\varsigma} \varsigma^{l-1}}{\Gamma(l)}$, $\varsigma>0$. \cite[Theorem 5]{my_TCCN} \end{theorem} From Definition~\ref{def:pfa}, $\mathbf{P}_{FA} = \Pr\left(E_{D,f_{AB}}>E_{f_{AB}} + \tau\right) + \Pr\left(E_{D,f_{AB}}\leq E_{f_{AB}} -\tau\right)$. Therefore, using the PDF of $\mathcal{U}_{L}$ from Theorem~\ref{th:pdf_Um}, the closed-form expression of $\mathbf{P}_{FA}$ is given by, \bieee \mathbf{P}_{FA} &=& \dfrac{1}{2^{L}}\left(\sum_{l=0}^{L}{L \choose l}\dfrac{\Gamma\left(l, L(E_{f_{AB}}+\tau)\right)}{\Gamma(l)} + \sum_{l=0}^{L}{L \choose l}\dfrac{\gamma\left(l, L(E_{f_{AB}}-\tau)\right)}{\Gamma(l)}\right).\label{eq:pfa} \eieee When using NC-FFFD relaying scheme, at the $l^{th}$ symbol instant, Alice and Charlie synchronously transmit dummy OOK bit $b(l)\in\{0,1\}$ with energies $\alpha$ and $1-\alpha$, respectively, on $f_{AB}$, where $b(l)$ is the least significant bit of the pre-shared Gold sequence. The baseband symbol received at Dave is, $r_{D}(l) = h_{AD}(l)\sqrt{\alpha}b(l) + h_{CD}(l)\sqrt{1-\alpha}b(l) + n_{D}(l)$, where, for $l^{th}$ symbol, $h_{AD}(l)\sim{\cal CN}\left(0, 1\right)$ and $h_{CD}(l)\sim{\cal CN}\left(0, (1+\partial)\right)$ are Rayleigh fading channels for Alice-to-Dave and Charlie-to-Dave links, respectively. Since the location of Dave can be arbitrary, the variances of Alice-to-Dave and Charlie-to-Dave links are not identical. Thus, $\partial$ captures the relative difference in the variance. If $E_{D,f_{AB}}^{FF}$ denotes the average energy received at Dave, when Alice and Charlie use NC-FFFD scheme, then due to change in the signal model, $E_{D,f_{AB}}^{FF}\neq E_{D,f_{AB}}$. Along the similar lines of $\mathbf{P}_{FA}$, we now formally define the probability of detection at Dave when NC-FFFD scheme is implemented. \begin{definition}\label{def:pd} If $\mathbf{P}_{D}$ denotes the probability of detection at Dave when $\mathcal{H}_{1}$ true, then for any $\tau>0$, $\mathbf{P}_{D} = \Pr\left(\left.\left\vert E_{D,f_{AB}}^{FF}-E_{f_{AB}}\right\vert > \tau\right\vert \mathcal{H}_{1}\right)$. \end{definition} Further, if $v_{l}$ denotes the energy of $l^{th}$ received symbol when using the countermeasure, then $\mathcal{V}_{L}$ denotes the average energy of $L$ symbols, where, $\mathcal{V}_{L} = \frac{1}{L}\sum_{l=1}^{L}v_{l}$. We provide the closed-form expression of PDF of $v_{l}$ and $\mathcal{V}_{L}$ in the next theorem. \begin{theorem} \label{th:pdf_Vm} When $\tilde{N}_{o}<<1$ and $\mathcal{H}_{1}$ is true, the PDF of $\mathcal{V}_{L}$, i.e., $p_{\mathcal{V}_{L}}(\varsigma)$, is given by, \bieee \left(\frac{1}{2}\right)^{L}\sum_{l=0}^{L}{L \choose l}\frac{\left(\frac{L}{\mathcal{A}}\right)^{l} e^{-\frac{L}{\mathcal{A}}\varsigma} \varsigma^{l-1}}{\Gamma(l)}, \text{ where, $\varsigma>0$ and $\mathcal{A} = \alpha + (1-\alpha)(1+\partial)$.}\label{eq:pfd_Vm} \eieee \end{theorem} \begin{proof} When $\mathcal{H}_{1}$ is true, the received symbol at Dave on $f_{AB}$ is given as, $r_{D}(l) = h_{AD}(l)\sqrt{\alpha}b(l) + h_{CD}(l)\sqrt{1-\alpha}b(l) + n_{D}(l)$. Thus, the PDF of $v_{l}$ is given as, $p_{v_{l}}\left(\varsigma\right) = \frac{1}{2}\left(\dfrac{1}{\tilde{N}_{o}}e^{-\frac{\varsigma}{\tilde{N}_{o}}} + \frac{1}{\tilde{N}_{o}+\mathcal{A}}e^{-\frac{\varsigma}{\tilde{N}_{o}+\mathcal{A}}}\right)$, where $\mathcal{A} = \alpha + (1-\alpha)(1+\partial)$. Computing $\mathcal{V}_{M}$ requires us to sum $L$ independent $v_{l}$ random variables each scaled by $L$, i.e., sum of $L$ independent ${v_{m}}/{M}$ random variables. Therefore, \bieee p_{v_{l}/L}(\varsigma) = \dfrac{1}{2}\left(\dfrac{1}{\tilde{N}_{o}}e^{-\frac{L\varsigma}{\tilde{N}_{o}}} + \dfrac{L}{\tilde{N}_{o}+\mathcal{A}}e^{-\frac{L\varsigma}{\tilde{N}_{o}+\mathcal{A}}}\right)\approx \dfrac{1}{2}\left(\delta(\varsigma) + \dfrac{L}{\mathcal{A}}e^{-\frac{L}{\mathcal{A}}\varsigma}\right),\nn \eieee \noindent where, the approximation is because $\tilde{N}_{o}<<1$. The pdf of $\mathcal{V}_{L}$ is equivalent to computing $L$-fold convolution ($*L$) of $p_{v_{l}/L}(\varsigma)$. For simplification, we use the properties of Laplace transform ($\mathscr{L}[\cdot]$) and inverse Laplace transform ($\mathscr{L}^{-1}[\cdot]$) to compute the pdf of $\mathcal{V}_{L}$ as, \bieee p_{\mathcal{V}_{L}}(\varsigma) = \left(p_{v_{l}/L}(\varsigma)\right)^{*L} &=& \mathscr{L}^{-1}\left[\left(\mathscr{L}\left[p_{v_{l}/L}(\varsigma)\right]\right)^{L}\right] = \mathscr{L}^{-1}\left[\left(\dfrac{1}{2}\right)^{L}\left[1 + \frac{L/\mathcal{A}}{\left(s+L/\mathcal{A}\right)}\right]^{L}\right].\nn \eieee Using binomial expansion we expand $\left[1 + \frac{L/\mathcal{A}}{\left(s+L/\mathcal{A}\right)}\right]^{L}$ and substitute in above to obtain \eqref{eq:pfd_Vm}. \end{proof} Overall, from Definition~\ref{def:pd} we have, $\mathbf{P}_{D} = \Pr\left(E_{D,f_{AB}}^{FF}>E_{f_{AB}} + \tau\right) + \Pr\left(E_{D,f_{AB}}^{FF}\leq E_{f_{AB}} -\tau\right)$. Thus, the probability of miss-detection, $\mathbf{P}_{MD}$ is given by $1-\mathbf{P}_{D}$. From the result of Theorem~\ref{th:pdf_Vm}, \bieee \mathbf{P}_{MD} &=& 1 -\dfrac{1}{2^{L}}\sum_{l=0}^{L}{L \choose l}\dfrac{\Gamma\left(l, \frac{L}{\mathcal{A}}(E_{f_{AB}}+\tau)\right)}{\Gamma(l)} - \dfrac{1}{2^{L}}\sum_{l=0}^{L}{L \choose l}\dfrac{\gamma\left(l, \frac{L}{\mathcal{A}}(E_{f_{AB}}-\tau)\right)}{\Gamma(l)}.\label{eq:pmd} \eieee Ideally, a low $\mathbf{P}_{FA}$ and a high $\mathbf{P}_{D}$ allows Dave to detect a countermeasure. However, the legitimate nodes would like to drive the sum $\mathbf{P}_{FA}+\mathbf{P}_{MD}$ close to $1$ for any value of $\tau$. \begin{rem} If $\partial=0$ in \eqref{eq:pmd}, then $\mathbf{P}_{FA} + \mathbf{P}_{MD} = 1$, for all $\alpha\in (0,1)$ and arbitrary $M$ and $\tau$. \end{rem} \begin{figure}[!htb] \centering \begin{subfigure}[b]{0.48\textwidth} \centering \includegraphics[width = 0.8\textwidth, height = 5cm]{PFA_PMD_p_01} \end{subfigure} \hfill \begin{subfigure}[b]{0.48\textwidth} \centering \includegraphics[width = 0.8\textwidth, height = 5cm]{PFA_PMD_p_04} \end{subfigure} \caption{\label{fig:PFA_PMD} $\mathbf{P}_{FA} + \mathbf{P}_{MD}$ as a function of $L$ and $\partial$ at 25 dB (including the residual SI), $N_{B}=8$, and $\Delta_{\text{DT}}=0.1$.} \end{figure} Although, the above result theoretically guarantees $\mathbf{P}_{FA} + \mathbf{P}_{MD} = 1$ for specific value of $\partial$, Fig.~\ref{fig:PFA_PMD} shows the simulation results for $\partial=0.1$ and $\partial=0.4$. Since Alice and Charlie transmit $b(l)$, with energy $\alpha$ and $1-\alpha$, respectively, the communication on $f_{AB}$ is independent of $\{\epsilon_{j},\eta_{j}\vert j=1,\cdots,M\}$ and only depends on the value of $\alpha$. In the previous sections, we have computed the values of $\alpha$ using TLGD algorithm, EB algorithm, and DT-EB algorithm. Therefore, in Fig.~\ref{fig:PFA_PMD}, we plot the sum $\mathbf{P}_{FA} + \mathbf{P}_{MD}$ at Dave as a function of $L$, when the optimal value of $\alpha$ is computed using TLGD, EB, and DT-EB algorithm. For $\text{SNR}=25$ dB, $N_{B}=8$, and $\Delta_{\text{DT}}=0.1$, we observe that the sum $\mathbf{P}_{FA} + \mathbf{P}_{MD}\approx 1$, despite a large number of samples at Dave. These simulation results indicate that the ED at Dave is oblivious to the countermeasures implemented by Alice and Charlie. \subsection{Correlation Detector (CD)} In order to prevent Alice and Charlie from using repetitive coding across the frequencies \cite{my_PIMRC} and \cite{my_TCCN}, Dave deploys a CD to capture the correlation between the symbols on the jammed frequency and other frequencies in the network. Amongst several methods to estimate the correlation, Dave uses a CD that estimates the correlation in terms of Mutual Information (MI) to capture both, linear as well as non-linear correlation between the samples. However, estimating MI requires estimating the underlying marginal and joint PDFs, which is hard in general. Therefore, Dave needs a non-parametric method of MI estimation that does not require him to know the underlying joint and marginal PDFs. KSG estimators \cite{KSG} based on \textbf{k}-nearest neighbours (\textbf{k}-NN) are well known for non-parametric MI estimation for their ease of implementation. Thus, Dave uses a KSG estimator to detect the proposed countermeasures. Since the information symbols on the frequency bands other than $f_{CB}$ are implicitly independent of symbols on $f_{AB}$, we only focus on estimating the correlation between the symbols on $f_{AB}$ and $f_{CB}$ as $f_{CB}$ is the helper's frequency band. In the context of this work, Dave estimates the MI between the energies of the samples on $f_{AB}$ and $f_{CB}$. We will first show the effect of transmitting dummy OOK bit $b\in\{0,1\}$ on $f_{AB}$, from the pre-shared Gold sequence. When no countermeasure is used, and Alice and Charlie transmit independent symbols on $f_{AB}$ and $f_{CB}$, respectively, the energy scatter-plot is as shown in Fig.~\ref{fig:CD}a. If Alice and Charlie use repetitive coding across the frequencies, the energy samples are clustered only around few centres as shown in Fig.\ref{fig:CD}b. Further, when Alice and Charlie cooperatively use Gold-sequence bits, the scatter-plot is as shown in Fig.~\ref{fig:CD}c. It can be observed that the scatter-plot in Fig~\ref{fig:CD}a and Fig~\ref{fig:CD}c are more randomised as compared to Fig.~\ref{fig:CD}b. This suggests that, when Alice and Charlie transmits bits from a Gold-sequence based scrambler on $f_{AB}$, the observations at Dave are similar to when they were transmitting independent symbols. \begin{figure}[!hbt] \vspace{-0.25in} \centering \includegraphics[width = 0.73\textwidth, height = 8.5cm]{CD_image} \caption{\label{fig:CD}Scatter-plots representing the energy pairs received at Dave for SNR = 25 dB, $N_{B}=8$, $L=50$, when (a) Dave is not jamming. (b)Alice and Charlie use repetitive coding across $f_{AB}$ and $f_{CB}$. (c) Alice and Charlie cooperatively use Gold-sequence. (d) MI before jamming and after using NC-FFFD with Gold-sequence and with Repetitive coding as a function of $L$ at SNR = 25 dB, $\mathbf{k}=2$, and $N_{B}=8$. (e) $\mathbf{P}_{\text{D,CD}}$ when NC-FFFD is implemented with repetitive coding and with Gold-sequence, for $L=150$ at 25 dB, $N_{B}=8$, and $\mathbf{k}=2$.} \end{figure} To formally measure the correlation, let $U$ and $V$ denote the RVs corresponding to the energy of samples on $f_{AB}$ and $f_{CB}$, respectively. Before the jamming attack, since Alice and Charlie transmit independent symbols, the MI estimate denoted by, $\tilde{I}(U,V)$, should be zero. However, due to small number of samples, i.e., $L$, $\tilde{I}(U,V)$ is a small non-zero value which approaches zero as the number of samples increases. In Fig.~\ref{fig:CD}d, for 25 dB, $N_{B}=8$, and $\mathbf{k}=2$, we plot the estimated MI at Dave, before the jamming attack, and after implementing NC-FFFD scheme with repetitive coding and Gold-sequence. We use $10^{3}$ iterations for each value of $L$ to compute $\tilde{I}(U,V)$ before the jamming attack because Dave wants to have a good estimate of MI. However, after the jamming attack, Dave cannot use multiple iterations for estimating $\tilde{I}(U,V)$ for a given $L$. From Fig.~\ref{fig:CD}d, it is clear that the estimated MI for NC-FFFD is high and an increasing function of $L$ when repetitive coding is used. In contrast, when Gold-sequence is used, the estimated MI oscillates near the MI estimate before the jamming attack. If $\tau_{\text{CD}}$ denotes the resolution of Dave's CD, then probability of detection denoted by, $\mathbf{P}_{\text{D,CD}}$ is given as, $\mathbf{P}_{\text{D,CD}} = \Pr\left(\left.\left\vert\mathbb{E}\left[\tilde{I}_{\text{J}}(U,V)\right]-\tilde{I}_{\text{CJ}}(U,V)\right\vert \geq \tau_{\text{MID}}\right\vert\mathcal{H}_{1}\right)$, where, $\mathbb{E}\left[\tilde{I}_{\text{J}}(U,V)\right]$ denotes the long term estimate of MI before the jamming attack and $\tilde{I}_{\text{CJ}}(U,V)$ denotes the estimate of MI after implementing NC-FFFD with pre-shared Gold-sequence bits. In Fig.~\ref{fig:CD}e, we plot $\mathbf{P}_{\text{D,CD}}$ at SNR = 25 dB, $N_{B}=8$, and $L=150$ samples, where the optimal value of $\alpha$ is computed using the EB algorithm. Since $\tau_{\text{CD}}$ determines the accuracy of the CD, we assume $0\leq\tau_{\text{CD}}\leq 0.20$, because a very large resolution results in poor detection. When NC-FFFD is implemented and Alice uses repetitive coding, $\mathbf{P}_{\text{D,CD}}$ is close to $1$ for the given range of $\tau_{\text{CD}}$. However, when NC-FFFD is implemented using Gold-sequence bits $b$, $\mathbf{P}_{\text{D,CD}}$ reduces as a function of $\tau_{\text{CD}}$. Thus, when Alice and Charlie use Gold-sequence bits to transmit on $f_{AB}$, the probability of detecting the countermeasure at Dave is small, as the symbols on $f_{AB}$ and $f_{CB}$ are independent by design. \section{Conclusion} In this work, we have envisaged a strong FD adversary who injects jamming energy on the low-latency messages of the victim in a fast-fading environment. Unlike the reactive adversaries in the literature, the adversary in our model, uses an energy detector and a correlation detector to prevent the use of any pre-existing countermeasures. Against this threat model, we have proposed NC-FFFD relaying scheme, where the victim seeks help from a helper to fast-forward its symbols to the base station. Based on the helper's data-rate we have derived analytical results on the joint error performance, and then have proposed a family of algorithms to compute the near-optimal energy levels at the victim and the helper nodes. Further, we have also shown that, with high probability, the proposed scheme successfully engages the reactive adversary to the jammed frequency. Overall, this is the first work of its kind to address security threats from a reactive adversary in a fast-fading environment. \ifCLASSOPTIONcaptionsoff \newpage \bibliography{Ref} \bibliographystyle{IEEEtran} \end{document}
2205.13180v3
http://arxiv.org/abs/2205.13180v3
When is a polarised abelian variety determined by its $\boldsymbol{p}$-divisible group?
\documentclass[12pt]{amsart} \setcounter{tocdepth}{1} \usepackage{etex} \usepackage[usenames,dvipsnames]{pstricks} \usepackage{epsfig} \usepackage{graphicx,color} \usepackage{geometry} \geometry{a4paper} \usepackage[all]{xy} \usepackage{amssymb,amscd} \usepackage{cite} \usepackage{fullpage} \usepackage{marvosym} \xyoption{poly} \usepackage{url} \usepackage{comment} \usepackage{float} \usepackage{tikz} \usepackage{tikz-cd} \usetikzlibrary{decorations.pathmorphing} \newtheorem{introtheorem}{Theorem} \renewcommand{\theintrotheorem}{\Alph{introtheorem}} \newtheorem{introproposition}[introtheorem]{Proposition} \newtheorem{introcorollary}[introtheorem]{Corollary} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{notations}[theorem]{Notations} \newtheorem{notation}[theorem]{Notation} \newtheorem{remark}[theorem]{Remark} \newtheorem{remarks}[theorem]{Remarks} \newtheorem{example}[theorem]{Example} \newtheorem{question}[theorem]{Question} \newtheorem*{question*}{Question} \newtheorem{questions}[theorem]{Questions} \newtheorem*{questions*}{Questions} \newtheorem{steps}[theorem]{Answer/steps} \newtheorem*{steps*}{Answer/steps} \newtheorem{progress}[theorem]{Progress} \newtheorem*{progress*}{Progress} \newtheorem{classification}[theorem]{Classification} \newtheorem*{classification*}{Classification} \newtheorem{construction}[theorem]{Construction} \newtheorem*{construction*}{Classification} \newtheorem*{example*}{Example} \newtheorem{examples}[theorem]{Examples} \newtheorem{se}[theorem]{} \newtheorem*{remark*}{Remark} \newtheorem*{remarks*}{Remarks} \newtheorem*{definition*}{Definition} \usepackage{calrsfs} \usepackage{url} \usepackage{longtable} \usepackage[OT2, T1]{fontenc} \usepackage{textcomp} \usepackage{times} \usepackage[scaled=0.92]{helvet} \renewcommand{\tilde}{\widetilde} \newcommand{\isomto}{\overset{\sim}{\rightarrow}} \newcommand{\C}{\mathbb{C}} \newcommand{\Q}{\mathbb{Q}} \newcommand{\I}{\mathcal{I}} \newcommand{\R}{\mathbb{R}} \newcommand{\Z}{\mathbb{Z}} \newcommand{\N}{\mathbb{N}} \newcommand{\F}{\mathbb{F}} \newcommand{\PP}{\mathbb{P}} \newcommand{\cZ}{\mathcal{Z}} \newcommand{\cN}{\mathcal{N}} \newcommand{\cS}{\mathcal{S}} \newcommand{\cR}{\mathcal{R}} \newcommand{\cO}{\mathcal{O}} \newcommand{\cC}{\mathcal{C}} \newcommand{\M}{\mathcal{M}} \newcommand{\T}{\mathbf{T}} \newcommand{\PSL}{\mathrm{PSL}} \newcommand{\PGL}{\mathrm{PGL}} \newcommand{\Isom}{\mathrm{Isom}} \DeclareMathOperator{\dlog}{{dlog}} \newcommand{\na}{\circ} \newcommand{\f}{f} \newcommand{\nn}{\nonumber} \newcommand{\X}{\mathcal{X}} \DeclareMathOperator{\Res}{Res} \DeclareMathOperator{\SL}{SL} \DeclareMathOperator{\GL}{GL} \DeclareMathOperator{\V}{V} \DeclareMathOperator{\E}{E} \DeclareMathOperator{\Aut}{Aut} \DeclareMathOperator{\dirac}{dirac} \DeclareMathOperator{\Cay}{Cay} \DeclareMathOperator{\Coc}{C_{har}} \DeclareMathOperator{\Sp}{S} \DeclareMathOperator{\Gal}{Gal} \DeclareMathOperator{\sgn}{sgn} \DeclareMathOperator{\supp}{supp} \DeclareSymbolFont{cyrletters}{OT2}{wncyr}{m}{n} \DeclareMathSymbol{\Sha}{\mathalpha}{cyrletters}{"58} \makeatletter \def\greekbolds#1{ \@for\next:=#1\do{ \def\X##1;{ \expandafter\def\csname V##1\endcsname{\boldsymbol{\csname##1\endcsname}} } \expandafter\X\next; } } \greekbolds{alpha,beta,iota,gamma,lambda,nu,eta,Gamma,varsigma,Lambda} \def\make@bb#1{\expandafter\def \csname bb#1\endcsname{{\mathbb{#1}}}\ignorespaces} \def\make@bbm#1{\expandafter\def \csname bb#1\endcsname{{\mathbbm{#1}}}\ignorespaces} \def\make@bf#1{\expandafter\def\csname bf#1\endcsname{{\bf #1}}\ignorespaces} \def\make@gr#1{\expandafter\def \csname gr#1\endcsname{{\mathfrak{#1}}}\ignorespaces} \def\make@scr#1{\expandafter\def \csname scr#1\endcsname{{\mathscr{#1}}}\ignorespaces} \def\make@cal#1{\expandafter\def\csname cal#1\endcsname{{\mathcal #1}}\ignorespaces} \def\do@Letters#1{#1A #1B #1C #1D #1E #1F #1G #1H #1I #1J #1K #1L #1M #1N #1O #1P #1Q #1R #1S #1T #1U #1V #1W #1X #1Y #1Z} \def\do@letters#1{#1a #1b #1c #1d #1e #1f #1g #1h #1i #1j #1k #1l #1m #1n #1o #1p #1q #1r #1s #1t #1u #1v #1w #1x #1y #1z} \do@Letters\make@bb \do@letters\make@bbm \do@Letters\make@cal \do@Letters\make@scr \do@Letters\make@bf \do@letters\make@bf \do@Letters\make@gr \do@letters\make@gr \makeatother \def\ol{\overline} \def\wt{\widetilde} \def\opp{\mathrm{opp}} \def\ul{\underline} \def\onto{\twoheadrightarrow} \def\der{{\rm der}} \def\wh{\widehat} \newcommand{\per}{\bot} \newcommand{\<}{\langle} \renewcommand{\>}{\rangle} \newcommand{\then}{\Rightarrow} \newcommand{\oneone}{\mbox{$\longleftrightarrow$}} \newcommand{\isoto}{\stackrel{\sim}{\longrightarrow}} \newcommand{\embed}{\hookrightarrow} \def\Spec{{\rm Spec}\,} \def\Fqbar{\overline{\bbF}_q} \def\Fpbar{\overline{\bbF}_p} \def\Fp{{\bbF}_p} \def\Fq{{\bbF}_q} \def\Qlbar{\overline{{\bbQ}_{\ell}}} \def\Ql{{\bbQ}_{\ell}} \def\Qpbar{\overline{{\bbQ}_p}} \def\Qp{{\bbQ}_p} \def\Zpbar{\overline{{\bbZ}_p}} \def\Zp{{\bbZ}_p} \def\Qbar{\overline{\bbQ}} \def\ch{characteristic\ } \def\Gm{{\bbG_m}} \renewcommand{\H}{\mathbb H} \newcommand{\A}{\mathbb A} \newcommand{\G}{\mathbb G} \renewcommand{\O}{\mathbb O} \def\makeop#1{\expandafter\def\csname#1\endcsname {\mathop{\rm #1}\nolimits}\ignorespaces} \makeop{Hom} \makeop{End} \makeop{Aut} \makeop{Isom} \makeop{Pic} \makeop{Gal} \makeop{ord} \makeop{Char} \makeop{Div} \makeop{Lie} \makeop{PGL} \makeop{Corr} \makeop{PSL} \makeop{sgn} \makeop{Spf} \makeop{Spec} \makeop{Tr} \makeop{Nr} \makeop{Fr} \makeop{disc} \makeop{Proj} \makeop{supp} \makeop{ker} \makeop{im} \makeop{dom} \makeop{coker} \makeop{Stab} \makeop{SO} \makeop{SL} \makeop{SL} \makeop{Cl} \makeop{cond} \makeop{Br} \makeop{inv} \makeop{rank} \makeop{id} \makeop{Fil} \makeop{Frac} \makeop{GL} \makeop{SU} \makeop{Nrd} \makeop{Sp} \makeop{Tr} \makeop{Trd} \makeop{diag} \makeop{Res} \makeop{ind} \makeop{depth} \makeop{Tr} \makeop{st} \makeop{Ad} \makeop{Int} \makeop{tr} \makeop{Sym} \makeop{can} \makeop{length}\makeop{SO} \makeop{torsion} \makeop{GSp} \makeop{Ker} \makeop{Adm} \makeop{Mat} \DeclareMathOperator{\PSU}{PSU} \DeclareMathOperator{\PSp}{PSp} \DeclareMathOperator{\Spin}{Spin} \DeclareMathOperator{\Hdg}{Hdg} \DeclareMathOperator{\MT}{MT} \def\invlim{{\displaystyle{\lim_{\leftarrow}}}} \DeclareMathOperator{\Isog}{Isog} \DeclareMathOperator{\Mass}{Mass} \DeclareMathOperator{\Ell}{Ell} \newcommand{\dieu}{Dieudonn\'{e} } \DeclareMathOperator{\Alt}{Alt} \DeclareMathOperator{\Ind}{Ind} \DeclareMathOperator{\Rad}{Rad} \DeclareMathOperator{\Nil}{Nil} \DeclareMathOperator{\Tor}{Tor} \DeclareMathOperator{\Ext}{Ext} \DeclareMathOperator{\Id}{Id} \DeclareMathOperator{\Mor}{Mor} \DeclareMathOperator{\Nm}{N} \DeclareMathOperator{\coh}{H} \DeclareMathOperator{\Frob}{Frob} \DeclareMathOperator{\Span}{Span} \DeclareMathOperator{\LCM}{LCM} \DeclareMathSymbol{\twoheadrightarrow} {\mathrel}{AMSa}{"10} \DeclareMathOperator{\ad}{ad} \DeclareMathOperator{\cl}{cl} \DeclareMathOperator{\img}{img} \DeclareMathOperator{\lcm}{lcm} \DeclareMathOperator{\pr}{pr} \DeclareMathOperator{\rk}{rank} \DeclareMathOperator{\mult}{mult} \DeclareMathOperator{\fchar}{char} \DeclareMathOperator{\sign}{sign} \DeclareMathOperator{\res}{res} \DeclareMathOperator{\ddiv}{div} \DeclareMathOperator{\vol}{vol} \def\Max{\mathrm{max}} \newcommand{\Lsymb}[2]{\genfrac{(}{)}{}{}{#1}{#2}} \newcommand{\ulm}{{\underline{m}}} \newcommand{\uln}{{\underline{n}}} \newcommand{\brN}{\breve{\mathbb{N}}} \newcommand{\abs}[1]{\lvert #1 \rvert} \newcommand{\zmod}[1]{\mathbb{Z}/ #1 \mathbb{Z}} \newcommand{\umod}[1]{(\mathbb{Z}/ #1 \mathbb{Z})^\times} \newcommand{\fl}[1]{\left\lfloor #1 \right\rfloor} \newcommand{\dangle}[1]{\left\langle #1 \right\rangle} \def\red{\mathrm{red}} \def\sep{\mathrm{sep}} \def\alg{\mathrm{alg}} \def\ss{\mathrm{ss}} \def\op{\mathrm{op}} \def\sfF{\mathsf{F}} \def\sfV{\mathsf{V}} \def\red{\color{red}} \begin{document} \title{When is a polarised abelian variety determined by its $\boldsymbol{p}$-divisible group?} \author{Tomoyoshi Ibukiyama} \address{Department of Mathematics, Graduate School of Science, Osaka University, Toyonaka, Japan} \email{[email protected]} \author{Valentijn Karemaker} \address{Mathematical Institute, Utrecht University, Utrecht, The Netherlands} \email{[email protected]} \author{Chia-Fu Yu} \address{Institute of Mathematics, Academia Sinica and National Center for Theoretic Sciences, Taipei, Taiwan} \email{[email protected]} \keywords{Gauss problem, Hermitian lattices, abelian varieties, central leaves, mass formula} \subjclass{14K10 (14K15, 11G10, 11E41, 16H20)} \begin{abstract} We study the Siegel modular variety $\mathcal{A}_g\otimes \overline{\mathbb{F}}_p$ of genus $g$ and its supersingular locus~$\mathcal{S}_g$. As our main result we determine precisely when $\mathcal{S}_g$ is irreducible, and we list all~$x$ in~$\mathcal{A}_g\otimes \overline{\mathbb{F}}_p$ for which the corresponding central leaf $\mathcal{C}(x)$ consists of one point, that is, for which~$x$ corresponds to a polarised abelian variety which is uniquely determined by its associated polarised $p$-divisible group. The first problem translates to a class number one problem for quaternion Hermitian lattices. The second problem also translates to a class number one problem, whose solution involves mass formulae, automorphism groups, and a careful analysis of Ekedahl-Oort strata in genus $g=4$. \end{abstract} \maketitle \setcounter{tocdepth}{2} \section{Introduction} \def\pol{{\rm pol}} \def\opp{{\rm opp}} \def\LatR{{\rm Lat}_R} \def\RLat{{}_{R}{\rm Lat}} \def\RoLat{{}_{R^{\rm opp}}{\rm Lat}} Throughout this paper, let $p$ denote a prime number and let $k$ be an algebraically closed field of characteristic $p$. Let $(X_1,\lambda_1)$ and $(X_2,\lambda_2)$ be two principally polarised abelian varieties over~$k$. Then \begin{equation}\label{eq:Q} (X_1,\lambda_1)\simeq (X_2,\lambda_2) \implies (X_1,\lambda_1)[p^\infty]\simeq (X_2,\lambda_2)[p^\infty], \end{equation} where $(X_i,\lambda_i)[p^\infty]$ denotes the polarised $p$-divisible group associated to $(X_i,\lambda_i)$. The converse is generally not true. Indeed, the goal of this paper is to determine precisely when the converse to~\eqref{eq:Q} is true. We treat this problem by putting it in a geometric context, by considering the moduli space of abelian varieties. So let $\calA_g$ denote the moduli space over $\Fpbar$ of principally polarised abelian varieties of dimension $g\ge 1$. For a point $x=[(X_0,\lambda_0)]\in \calA_{g}(k)$, denote by \[ \calC(x):=\{[(X,\lambda)]\in \calA_{g}(k) : (X,\lambda)[p^\infty]\simeq (X_0,\lambda_0)[p^\infty] \} \] the central leaf of $\calA_{g}$ passing through $x$ introduced in \cite{oort:foliation}. Then the problem becomes a very simple question: when does a central leaf $\calC(x)$ consist of only the point $x$ itself?\\ Chai and Oort \cite{COirr} proved the Hecke orbit conjecture, stating that the prime-to-$p$ Hecke orbit $\calH^{(p)}(x)$ of any point $x\in \calA_g(k)$ is Zariski dense in the ambient central leaf $\calC(x)$. They also proved that every non-supersingular Newton stratum is irreducible and that every non-supersingular central leaf is irreducible. Furthermore, it follows from a result of Chai \cite[Proposition~1]{chai}, cf.~Proposition~\ref{prop:chai}, that if $x=[(X_0,\lambda_0)]$ is not supersingular, then $\calC(x)$ has positive dimension. If $x$ is supersingular, then the central leaf is finite. Hence, the converse to~\eqref{eq:Q} can be true only when $X_0$ is a supersingular abelian variety, that is, when $X_0$ is isogenous to a product of supersingular elliptic curves.\\ In this paper we prove supersingular analogues of the results of Chai and Chai--Oort. That is, we determine precisely when a supersingular central leaf $\calC(x)$ (i.e., $x\in \calS_g(k))$ is irreducible (i.e., $\mathcal{C}(x) = \{x \}$), and when the supersingular locus $\calS_g \subseteq \mathcal{A}_g$ is irreducible. When $g=1$, it is well known that the supersingular locus $\calS_1$ is the same as the unique supersingular central leaf~$\calC(x)$, whose cardinality is the class number of the quaternion $\Q$-algebra ramified at $\{p,\infty\}$. Then $\calS_1=\calC(x)$ is irreducible if and only if $p\in \{2, 3, 5, 7, 13\}$. When $g>1$, we will see in Subsection~\ref{ssec:4first} that the size of $\calC(x)$ is again equal to a class number of a certain reductive group, so the question is a type of Gauss problem or class number one problem. To solve this problem, we also answer fundamental questions on arithmetic properties of the polarised abelian varieties in question. These answers have applications in particular to determining the geometric endomorphism rings and automorphism groups of polarised abelian varieties in the Ekedahl-Oort strata that are entirely contained in the supersingular locus.\\ For any abelian variety $X$ over $k$, the $a$-number of $X$ is defined by $a(X):=\dim_k \Hom(\alpha_p, X)$, where $\alpha_p$ is the kernel of the Frobenius morphism on the additive group $\bbG_a$. The $a$-number of the abelian variety corresponding to a point $x \in \calA_{g}(k)$ is denoted by $a(x)$. Our main result is the following theorem. \begin{introtheorem}\label{thm:main} (Theorem~\ref{thm:main2}) \begin{enumerate} \item The supersingular locus $\calS_g$ is geometrically irreducible if and only if one of the following three cases holds: \begin{itemize} \item [(i)] $g=1$ and $p\in \{2,3,5,7,13\}$; \item [(ii)] $g=2$ and $p\in \{ 2, 3, 5, 7, 11\}$; \item [(iii)] $(g, p)=(3,2)$ or $(g,p)=(4,2)$. \end{itemize} \item Let $\calC(x)$ be the central leaf of $\calA_{g}$ passing through a point $x=[X_0,\lambda_0]\in \calS_{g}(k)$. Then $\calC(x)$ consists of one element if and only if one of the following three cases holds: \begin{itemize} \item [(i)] $g=1$ and $p\in \{2,3,5,7,13\}$; \item [(ii)] $g=2$ and $p=2,3$; \item [(iii)] $g=3$, $p=2$ and $a(x)\ge 2$. \end{itemize} \end{enumerate} \end{introtheorem} \begin{introcorollary} A principally polarised abelian variety $(X,\lambda)$ over $k$ is uniquely determined by its polarised $p$-divisible group if and only if $X$ is supersingular, $g=\dim X \leq 3$, and one of (i), (ii), (iii) of Theorem~\ref{thm:main}.(2) holds. \end{introcorollary} We first comment on Theorem~\ref{thm:main}.(2). As mentioned above, Case~(i) is well-known; Case~(ii) is a result due to the first author~\cite{ibukiyama}. In both cases, the result is independent of the point $x$. In Section~\ref{sec:proof} we prove the remaining cases; namely, we show that $\vert \calC(x) \vert >1$ for $g\geq 4$, and that when $g=3$, (iii) lists the only cases such that $|\calC(x)|=1$. When $g=3$ and $a(x)=3$ (the \emph{principal genus} case), the class number one result is known due to Hashimoto \cite{hashimoto:g=3}. Hashimoto first computes an explicit class number formula in the principal genus case and proves the class number one result as a direct consequence. Our method instead uses mass formulae and the automorphism groups of certain abelian varieties, which is much simpler than proving explicit class number formulae. Mass formulae for dimension $g=3$ were very recently provided by F.~Yobuko and the second and third-named authors~\cite{karemaker-yobuko-yu}. In addition, we perform a careful analysis of the Ekedahl-Oort strata in dimension $g=4$; in Proposition~\ref{prop:EO} we show precisely how the Ekedahl-Oort strata and Newton strata intersect. It is worth mentioning that we do not use any computers in this paper (unlike most papers that treat class number one problems); the only numerical data we use is the well-known table above Lemma~\ref{lem:vn} in Subsection~\ref{ssec:Gaussarith}. In the course of our proof of Theorem~\ref{thm:main}.(2), in Subsection~\ref{ssec:Eisog} we define the notion of minimal $E$-isogenies (Definition~\ref{def:minE}), where $E$ is any elliptic curve (not necessarily supersingular) over any field~$K$. This generalises the notion of minimal isogenies for supersingular abelian varieties in the sense of Oort \cite[Section 1.8]{lioort}. This new construction of minimal isogenies even has a new (and stronger) universal property since the test object is not required to be an isogeny, cf.~Remark~\ref{rem:min_isog}. We also extend the results of Jordan et al.~\cite{JKPRST} on abelian varieties isogenous to a power of an elliptic curve to those with a polarisation in Subsections~\ref{ssec:powers}--\ref{ssec:powerAV}, cf.~Proposition~\ref{prop:equiv}. These results can be paraphrased as follows: \begin{introtheorem}\label{thm:B}\ Let $E$ be any elliptic curve over any field $K$, let $R = \mathrm{End}(E)$ and denote by $\mathrm{Lat}_R$ (resp.~$\mathrm{Lat}_R^H$) the category of right $R$-lattices (resp.~positive-definite Hermitian such lattices). Also let $\mathcal{A}_E$ (resp.~$\mathcal{A}_E^{\mathrm{pol}}$) denote the category of abelian varieties over $K$ isogenous to a power of $E$ (resp.~fractionally polarised such varieties) and let $\mathcal{A}_{E,\mathrm{ess}}$ (resp.~$\mathcal{A}^{\mathrm{pol}}_{E,\mathrm{ess}}$) be the essential image of the sheaf Hom functor ${\mathcal Hom}_R(-, E): \RLat^\opp \to \calA_E$ constructed in \cite{JKPRST} with inverse $\mathrm{Hom}(-,E)$ (resp.~its fractionally polarised elements). \begin{enumerate} \item (Proposition~\ref{prop:equiv}, Corollary~\ref{cor:Aut}.(1)) There exists an equivalence of categories $\mathcal{A}^{\mathrm{pol}}_{E,\mathrm{ess}} \longrightarrow \mathrm{Lat}_R^H$. Hence, for any $(X,\lambda) \in \mathcal{A}^{\mathrm{pol}}_{E,\mathrm{ess}}$ there exists a unique decomposition of $\mathrm{Aut}(X,\lambda)$ determined by the unique orthogonal decomposition of its associated lattice. \item (Corollary~\ref{cor:JKPRST}, Corollary~\ref{cor:Aut}.(2)) Suppose that $K = \mathbb{F}_q$ and that either $E$ is ordinary with $R = \mathbb{Z}[\pi]$, or $E$ is supersingular with $K = \mathbb{F}_p$ and $R = \mathbb{Z}[\pi]$, or $E$ is supersingular with $K = \mathbb{F}_{p^2}$ and $R$ has rank $4$ over $\mathbb{Z}$. Then all results in~(1) hold upon replacing $\mathcal{A}^{\mathrm{pol}}_{E,\mathrm{ess}}$ with $\mathcal{A}^{\mathrm{pol}}_{E}$. \item (Theorem~\ref{thm:pol+JKPRST}) All results in~(1) hold when $E$ is any abelian variety over $K~=~\mathbb{F}_p$ with minimal endomorphism ring $R = \mathbb{Z}[\pi, \bar{\pi}]$ and commutative endomorphism algebra. \end{enumerate} \end{introtheorem} Finally, we comment on Theorem~\ref{thm:main}.(1). It was proven in \cite[Theorem 4.9]{lioort} that the number of irreducible components of $\mathcal{S}_g$ is a class number of a genus of maximal quaternion Hermitian lattices, namely the class number $H_g(p,1)$ of the principal genus if $g$ is odd and the class number $H_g(1,p)$ of the non-principal genus if $g$ is even. Thus, Theorem~\ref{thm:main}.(1) also solves a Gauss problem or class number one problem. Indeed, the above indicates a clear connection between the arithmetic (\ref{thm:main}.(1)) and geometric (\ref{thm:main}.(2)) class number one problems we are considering. More precisely, let $B$ be a definite quaternion $\Q$-algebra and let $O$ be a maximal order in $B$. Let $V$ be a left $B$-module of rank $n$, and $f:V\times V\to B$ be a positive-definite quaternion Hermitian form with respect to the canonical involution $x\mapsto \bar x$. For each left $O$-lattice $L$ in $V$ denote by $h(L,f)$ the class number of the isomorphism classes in the genus containing $L$. As the main result of the arithmetic part of this paper (Section~\ref{sec:Arith}), in Theorem~\ref{thm:mainarith} we determine precisely when $h(L,f)=1$ for all maximal left $O$-lattices $L$. This is a special case, with a different proof, of the results of \cite[Chapter~9]{KirschmerHab}, cf.~Remark~\ref{rem:Kirschmer}. For the rank one case, the list of definite quaternion $\Z$-orders of class number one has been determined by Brzezinski~\cite{brzezinski:h=1} in 1995; this was generalised to class number at most two by Kirschmer-Lorch~\cite{KirschmerLorch}.\\ The structure of the paper is as follows. The arithmetic theory (Theorem~\ref{thm:main}.(1)) is treated in Section~2, building up to the class number one result in Theorem~\ref{thm:mainarith}. Theorem~\ref{orthogonal} is the unique orthogonal decomposition result for lattices, and Corollary~\ref{autodecomposition} gives its consequence for automorphism groups of such lattices. The geometric theory starts in Section~\ref{sec:GMF}, which recalls mass formulae due to the second and third authors as well as other authors. Section~\ref{sec:aut} treats automorphism groups (cf.~Corollary~\ref{cor:Aut}), through the results collected in Theorem~\ref{thm:B}. Minimal $E$-isogenies are introduced in Subsection~\ref{ssec:Eisog}, and Subsection~\ref{ssec:uniquedec} provides the geometric analogue of Theorem~\ref{orthogonal}. Finally, Section~\ref{sec:proof} solves the geometric class number one problem for central leaves (Theorem~\ref{thm:main}.(2)), using mass formulae for the case $g=3$ (Subsection~\ref{ssec:g3}) and explicit computations on Ekedahl-Oort strata for the hardest case $g = 4$ (Subsection~\ref{ssec:g4}). In future work, we plan to extend the techniques of this work to prove that every geometric generic polarised supersingular abelian varieties of dimension $g>1$ in odd characteristic has automorphism group equal to $\{\pm 1\}$, known as a conjecture of Oort. \subsection*{Acknowledgements} The first author is supported by JSPS Kakenhi Grants JP19K03424 and JP20H00115. The second author is supported by the Dutch Research Council (NWO) through grants VI.Veni.192.038 and VI.Vidi.223.028. The third author is partially supported by the MoST grant 109-2115-M-001-002-MY3 and Academia Sinica grant AS-IA-112-M01. We thank Brendan Hassett and Akio Tamagawa for helpful discussions. The authors thank the referees for their careful reading and helpful comments that have improved the manuscript significantly. \section{The arithmetic theory}\label{sec:Arith} \subsection{Uniqueness of orthogonal decomposition}\label{ssec:RSarith}\ Let $F$ be a totally real algebraic number field, and let $B$ be either $F$ itself, a CM field over~$F$ (i.e., a totally imaginary quadratic extension of $F$), or a totally definite quaternion algebra central over~$F$ (i.e., such that any simple component of $B\otimes \R$ is a division algebra). These~$B$ are typical $\Q$-algebras for considering positive-definite Hermitian $B$-modules. We refer to Remark~\ref{rem:fromintro} for more general algebras $B$ that one may consider. We may regard~$B^n$ as a left $B$-vector space. As a vector space over $F$, we see that $B^n$ can be identified with~$F^{en}$, where $e=1$, $2$, or $4$ according to the choice of $B$ made above. Let $O_F$ be the ring of integers of $F$. A lattice in $B^n$ is a finitely generated $\Z$-submodule $L \subseteq B^n$ such that $\Q L=B^n$ (i.e., $L$ contains a basis of $B^n$ over $\Q$); it is called an $O_F$-lattice if $O_F L \subseteq L$. A subring $\cO$ of~$B$ is called an order of $B$ if it is a lattice in $B$; $\cO$ is called an $O_F$-order if $\cO$ also contains~$O_F$. Any element of $\cO$ is integral over $O_F$. We fix an order $\cO$ of $B$. Put $V=B^n$ and let $f:V\times V\rightarrow B$ be a quadratic form, a Hermitian form, or a quaternion Hermitian form according to whether $B=F$, $B$ is CM, or $B$ is quaternionic. This means that $f$ satisfies \begin{equation}\label{eq:hermitian} \begin{split} f(ax,y) & =af(x,y) \qquad \text{ for any $x$, $y\in V$, $a\in B$}, \\ f(x_1+x_2,y)& =f(x_1,y)+f(x_2,y) \quad \text{ for any $x_i$, $y \in V$},\\ f(y,x) & = \ol{f(x,y)} \qquad \text{ for any $x$, $y \in V$}, \end{split} \end{equation} where $x\mapsto \bar x$ is the canonical involution of $B$ over $F$, that is, the trivial map for $F$, the complex conjugation for a fixed embedding $B \subseteq \C$ if $B$ is a CM field, or the anti-automorphism of $B$ of order~$2$ such that $x+\bar x=\mathrm{Tr}_{B/F}(x)$ for the reduced trace $\mathrm{Tr}_{B/F}$. By the above properties, we have $f(x,x)\in F$ for any $x\in V$. We assume that $f$ is totally positive, that is, for any $x\in V$ and for any embedding $\sigma:F\rightarrow \R$, we have $f(x,x)^{\sigma}>0$ unless $x=0$. A lattice $L\subseteq V$ is said to be a left $\cO$-lattice if $\cO L\subseteq L$. An $\cO$-submodule~$M$ of an $\cO$-lattice $L$ is called an $\cO$-sublattice of $L$; in this case, $M$ is an $\cO$-lattice in the $B$-module $B M$ of possibly smaller rank. We say that a left $\cO$-lattice $L\neq 0$ is indecomposable if whenever $L=L_1+L_2$ and $f(L_1,L_2)=0$ for some left $\cO$-lattices $L_1$ and $L_2$, then $L_1=0$ or $L_2=0$. For quadratic forms over $\Q$, the following theorem is in \cite[Theorem 6.7.1, p.~169]{kitaoka} and \cite[Satz 27.2]{kneser}. The proof for the general case is almost the same and was also given in \cite[Theorem~2.4.9] {KirschmerHab} where the order $\cO$ is maximal. \begin{theorem}\label{orthogonal} Assumptions and notation being as above, any left $\cO$-lattice $L\subseteq B^n$ has an orthogonal decomposition \[ L=L_1\perp \cdots \perp L_r \] for some indecomposable left $\cO$-sublattices $L_i$. The set of lattices $\{L_i\}_{1\leq i\leq r}$ is uniquely determined by $L$. \end{theorem} \begin{proof}Any non-zero $x \in L$ is called primitive if there are no $y$,$z\in L$ such that $y\neq 0$, $z\neq 0$, and $x=y+z$ with $f(y,z)=0$. First we see that any $0\neq x\in L$ is a finite sum of primitive elements of $L$. If $x$ is not primitive, then we have $x=y+z$ with $0\neq y$, $z\in L$ with $f(y,z)=0$. So we have $f(x,x)=f(y,y)+f(z,z)$ and hence \[ \mathrm{Tr}_{F/\Q}(f(x,x))=\mathrm{Tr}_{F/\Q}(f(y,y))+\mathrm{Tr}_{F/\Q}(f(z,z)). \] Since $f$ is totally positive, we have $\mathrm{Tr}_{F/\Q}(f(x,x))=\sum_{\sigma:F\rightarrow \R}f(x,x)^{\sigma}=0$ if and only if $x=0$. So we have $\mathrm{Tr}_{F/\Q}(f(y,y))<\mathrm{Tr}_{F/\Q}(f(x,x))$. If $y$ is not primitive, we continue the same process. We claim that this process terminates after finitely many steps. Since $L\neq 0$ is a finitely generated $\Z$-module, $f(L,L)$ is a non-zero finitely generated $\Z$-module. So the module $\mathrm{Tr}_{F/\Q}(f(L,L))$ is a fractional ideal of $\Z$ and we have $\mathrm{Tr}_{F/\Q}(f(L,L))=e\Z$ for some $0<e\in \Q$. This means that $\mathrm{Tr}_{F/\Q}(f(x,x))\in e\Z_{>0}$ for any $x \in L$. So after finitely many iterations, $\mathrm{Tr}_{F/\Q}(f(y,y))$ becomes $0$ and the claim is proved. We say that primitive elements $x$, $y\in L$ are \emph{connected} if there are primitive elements $z_1$, $z_2$, \ldots, $z_r \in L$ such that $x=z_0$, $y=z_r$, and $f(z_{i-1},z_{i})\neq 0$ for $i=1$,\ldots, $r$. This is an equivalence relation. We denote by $K_{\lambda}$, for $\lambda \in \Lambda$, the equivalence classes of primitive elements in $L$. By definition, elements of $K_{\lambda_1}$ and $K_{\lambda_2}$ for $\lambda_1\neq \lambda_2$ are orthogonal. We denote by $L_{\lambda}$ the left $\cO$-module spanned by elements of $K_{\lambda}$. Then we have \[ L=\perp_{\lambda\in \Lambda}L_{\lambda}. \] Since $F\cO=B$, we see that $V_{\lambda}:=FL_{\lambda}$ is a left $B$-vector space and $L_{\lambda}$ is an $\cO$-lattice in $V_{\lambda}$. Since $\dim_B \sum_{\lambda\in \Lambda}V_{\lambda}=n$, we see that $\Lambda$ is a finite set. Hence any primitive element in $L_{\lambda}$ belongs to $K_{\lambda}$. Indeed, if $y\in L_{\lambda}\subseteq L$ is primitive, then $y\in K_{\mu}$ for some $\mu\in \Lambda$, but if $\lambda\neq \mu$, then $y\in K_{\mu}\subseteq L_{\mu}$, so $y=0$, a contradiction. Now if $L_{\lambda}=N_1\perp N_2$ for some left $\cO$-modules $N_1\neq 0$, $N_2\neq 0$, then whenever $x+y$ with $x\in N_1$, $y\in N_2$ is primitive, we have $x=0$ or $y=0$. So if $0\neq x \in N_1$ is primitive and if $f(x,z_1)\neq 0$ for some primitive element $z_1\in L_{\lambda}$, then $z_1 \in N_1$. Repeating the process, any $y\in K_{\lambda}$ belongs to $N_1$, so that $N_1=L_{\lambda}$, and hence, $L_{\lambda}$ is indecomposable. Now if $L=\perp_{\kappa \in K}M_{\kappa}$ for other indecomposable lattices $M_{\kappa}$ (indexed by the set $K$), then any primitive element $x$ of $L$ is contained in some $M_{\kappa}$ by the definition of primitivity. By the same reasoning as before, if $x \in M_{\kappa}$ is primitive, then any primitive $y\in L$ connected to $x$ belongs to $M_{\kappa}$. This means that there is an injection $\iota:\Lambda\rightarrow K$ such that $L_{\lambda}\subseteq M_{\iota(\lambda)}$. Since \[ L=\perp_{\lambda\in \Lambda}L_{\lambda}\subseteq \perp_{\lambda\in \Lambda} M_{\iota(\lambda)}\subseteq L \] we have $L_{\lambda}=M_{\iota(\lambda)}$ and $\iota$ is a bijection. \end{proof} \begin{corollary}\label{autodecomposition} Assumptions and notation being as before, suppose that $L$ has an orthogonal decomposition \[ L=\perp_{i=1}^{r}M_i \] where $M_i=\perp_{j=1}^{e_i}L_{ij}$ for some indecomposable left $\cO$-lattices $L_{ij}$ such that $L_{ij}$ and $L_{ij'}$ are isometric for any $j$, $j'$, but $L_{ij}$ and $L_{i'j'}$ are not isometric for $i\neq i'$. Then we have \[ \Aut(L)\cong \prod_{i=1}^{r}\Aut(L_{i1})^{e_i}\cdot S_{e_i} \] where $S_{e_i}$ is the symmetric group on $e_i$ letters and $\Aut(L_{i1})^{e_i}\cdot S_{e_i}$ is a semi-direct product where $S_{e_i}$ normalises $\Aut(L_{i1})^{e_i}$. \end{corollary} \begin{proof} By Theorem \ref{orthogonal}, we see that for any element $\epsilon \in \Aut(L)$, there exists $\tau\in S_{e_i}$ such that $\epsilon(L_{i1})=L_{i\tau(1)}$, so the result follows. \end{proof} \begin{remark}\label{rem:product} The proof of Theorem~\ref{orthogonal} also works in the following more general setting: $B=\prod_i B_i$ is a finite product of $\Q$-algebras $B_i$, where $B_i$ is either a totally real field $F_i$, a CM field over $F_i$, or a totally definite quaternion algebra over $F_i$. Denote by $\bar\cdot$ the canonical involution on~$B$ and $F=\prod_i F_i$ the subalgebra fixed by $\bar\cdot$. Let $\calO$ be any order in $B$, and let $V$ be a faithful left $B$-module equipped with a totally positive Hermitian form $f$, which satisfies the conditions in~\eqref{eq:hermitian} and is totally positive on each factor in $V=\oplus V_i$ with respect to $F=\prod_i F_i$. \end{remark} \begin{remark}\label{rem:fromintro} By the Albert classification of division algebras, the endomorphism algebra $B = \End^0(A)$ of any simple abelian variety $A$ over any field $K$ is either a totally real field~$F$, a quaternion algebra over $F$ (totally definite or totally indefinite), or a central division algebra over a CM field over~$F$. The results in this subsection apply to all these classes of algebras, except for totally indefinite quaternion algebras and non-commutative central division algebras over a CM field. Indeed, Theorem~\ref{orthogonal} provides a very general statement about unique orthogonal decomposition of lattices, which enables us to compute the automorphism groups of such lattices via Corollary~\ref{autodecomposition}. On the geometric side however, in this paper we will be mostly interested in supersingular abelian varieties, which are by definition isogenous to a power of a supersingular elliptic curve; hence, the most important algebras for us to study are the definite quaternion $\Q$-algebras $B = \End^0(E)$ for some supersingular elliptic curve $E$ over an algebraically closed field. We specialise to these algebras in the next subsections (Subsections~\ref{ssec:massarith} and~\ref{ssec:Gaussarith}) and solve a class number one problem for these in Theorem~\ref{thm:mainarith}. And indeed, in Theorem~\ref{thm:main2} we will solve the Gauss problem for the central leaves of all supersingular abelian varieties. Allowing $B$ to be a more general definite quaternion $\Q$-algebra (that is, not necessarily ramified only at $\{p,\infty\}$) would prove an extension of the Gauss problem for central leaves from Siegel modular varieties to quaternionic Shimura varieties of higher degree, which are direct generalisations of Shimura curves (that is, fake modular curves).\\ \end{remark} \subsection{Quaternionic Hermitian groups and mass formulae}\label{ssec:massarith}\ For the rest of this section, we let $B$ be a definite quaternion $\Q$-algebra central over $\Q$ with discriminant $D$ and let $O$ be a maximal order in $B$. Then $D=q_1\cdots q_t$ is a product of $t$ primes, where $t$ is an odd positive integer. The canonical involution on $B$ is denoted by $x\mapsto \bar x$. Let $(V,f)$ be a positive-definite quaternion Hermitian space over $B$ of rank $n$. That is, $f$ satisfies the properties in Equation~\eqref{eq:hermitian} and $f(x,x)\ge 0$ for all $x\in V$ and $f(x,x)=0$ only when $x=0$. The isomorphism class of $(V,f)$ over $B$ is uniquely determined by $\dim_B V$. We denote by $G=G(V,f)$ the group of all similitudes on $(V,f)$; namely, \[ G=\{\alpha\in \GL_B(V): f(x \alpha,y \alpha)=n(\alpha)f(x,y) \quad \forall\, x,y\in V\ \}, \] where $n(\alpha)\in \Q^\times$ is a scalar depending only on $\alpha$. For each prime $p$, we write $O_p:=O\otimes_\Z \Zp$, $B_p:=B\otimes_\Q \Qp$ and $V_p:=V\otimes_\Q \Qp$, and let $G_p=G(V_p,f_p)$ be the group of all similitudes on the local quaternion Hermitian space $(V_p,f_p)$. Two $O$-lattices $L_1$ and $L_2$ are said to be equivalent, denoted $L_1\sim L_2$, if there exists an element $\alpha\in G$ such that $L_2=L_1 \alpha$; the equivalence of two $O_p$-lattices is defined analogously. Two $O$-lattices $L_1$ and $L_2$ are said to be in the same genus if $(L_1)_p\sim (L_2)_p$ for all primes~$p$. The norm $N(L)$ of an $O$-lattice $L$ is defined to be the two-sided fractional $O$-ideal generated by $f(x,y)$ for all $x,y\in L$. If $L$ is maximal among the $O$-lattices having the same norm $N(L)$, then it is called a maximal $O$-lattice. The notion of maximal $O_p$-lattices in~$V_p$ is defined analogously. Then an $O$-lattice $L$ is maximal if and only if the $O_p$-lattice $L_p:=L\otimes_\Z \Zp$ is maximal for all prime numbers $p$. For each prime $p$, if $p\nmid D$, then there is only one equivalence class of maximal $O_p$-lattices in $V_p$, represented by the standard unimodular lattice $(O_p^n, f=\bbI_n)$. If $p|D$, then there are two equivalence classes of maximal $O_p$-lattices in $V_p$, represented by the principal lattice $(O_p^n,f=~\bbI_n)$ and a non-principal lattice $((\Pi_p O_p)^{\oplus (n-c)}\oplus O_p^{\oplus c},\bbJ_n)$, respectively, where $c=~\lfloor n/2\rfloor$, and $\Pi_p$ is a uniformising element in $O_p$ with $\Pi_p \ol \Pi_p=p$, and $\bbJ_n=\text{anti-diag}(1,\dots, 1)$ is the anti-diagonal matrix of size $n$. Thus, there are $2^t$ genera of maximal $O$-lattices in $V$ when $n\geq 2$. For each positive integer $n$ and a pair $(D_1,D_2)$ of positive integers with $D=D_1D_2$, denote by $\calL_n(D_1,D_2)$ the genus consisting of maximal $O$-lattices in $(V,f)$ of rank $n$ such that for all primes $p|D_1$ (resp.~$p|D_2$) the $O_p$-lattice $(L_p,f)$ belongs to the principal class (resp.~ the non-principal class). We denote by $[\calL_n(D_1,D_2)]$ the set of equivalence classes of lattices in $\calL_n(D_1,D_2)$ and by $H_n(D_1,D_2):=\# [\calL_n(D_1,D_2)]$ the class number of the genus $\calL_n(D_1,D_2)$. The mass $M_n(D_1,D_2)$ of $[\calL_n(D_1,D_2)]$ is defined by \begin{equation} \label{eq:Mass} M_n(D_1,D_2)=\Mass([\calL_n(D_1,D_2)]):=\sum_{L\in [\calL_n(D_1,D_2)]} \frac{1}{|\Aut(L)|}, \end{equation} where $\Aut(L):=\{\alpha\in G: L\alpha=L\}$. Note that if $\alpha\in \Aut(L)$ then $n(\alpha)=1$, because $n(\alpha)>0$ and $n(\alpha)\in \Z^\times=\{\pm 1 \}$. Let $G^1:=\{\alpha\in G: n(\alpha)=1\}$. The class number and mass for a $G^1$-genus of $O$-lattices are defined analogously to the case of $G$: two $O$-lattices $L_1$ and $L_2$ are said to be isomorphic, denoted $L_1\simeq L_2$, if there exists an element $\alpha\in G^1$ such that $L_2=L_1 \alpha$; similarly, two $O_p$-lattices $L_{1,p}$ and $L_{2,p}$ are said to be isomorphic, denoted $L_{1,p}\simeq L_{2,p}$ if there exists an element $\alpha_p\in G^1_p$ such that $L_{2,p}=L_{1,p} \alpha_p$. Two $O$-lattices $L_1$ and $L_2$ are said to be in the same $G^1$-genus if $(L_1)_p\simeq (L_2)_p$ for all primes $p$. We denote by $\calL_n^1(D_1,D_2)$ the $G^1$-genus which consists of maximal $O$-lattices in $(V,f)$ of rank $n$ satisfying \[ (V_p,f_p)\simeq \begin{cases} (O_p^n,\bbI_n) & \text{for $p\nmid D_2$}; \\ ((\Pi_p O_p)^{n-c}\oplus O_p^c,\bbJ_n) & \text{for $p\mid D_2$}, \\ \end{cases} \] where $c:=\lfloor n/2\rfloor$. We denote by $[\calL_n^1(D_1,D_2)]$ the set of isomorphism classes of $O$-lattices in $\calL_n^1(D_1,D_2)$ and by $H^1_n(D_1,D_2):=\# [\calL^1_n(D_1,D_2)]$ the class number of the $G^1$-genus $\calL_n^1(D_1,D_2)$. Similarly, the mass $M^1_n(D_1,D_2)$ of $[\calL^1_n(D_1,D_2)]$ is defined by \begin{equation} \label{eq:Mass1} M^1_n(D_1,D_2)=\Mass([\calL^1_n(D_1,D_2)]):=\sum_{L\in [\calL^1_n(D_1,D_2)]} \frac{1}{|\Aut_{G^1}(L)|}, \end{equation} where $\Aut_{G^1}(L):=\{\alpha\in G^1: L\alpha=L\}$, which is also equal to $\Aut(L)$. \begin{lemma}\label{lm:GvsG1} The natural map $\iota:[\calL^1_n(D_1,D_2)]\to [\calL_n(D_1,D_2)]$ is a bijection. In particular, we have the equalities \begin{equation} \label{eq:GvsG1} M^1_n(D_1,D_2)=M_n(D_1,D_2) \quad \text{and}\quad H^1_n(D_1,D_2)=H_n(D_1,D_2). \end{equation} \end{lemma} \begin{proof} Fix an $O$-lattice $L_0$ in $\calL_n(D_1,D_2)$ and regard $G$ and $G^1$ as algebraic groups over $\Q$. Denote by $\wh \Z=\prod_{\ell} \Z_\ell$ the profinite completion of $\Z$ and by $\A_f=\wh \Z\otimes_{\Z} \Q$ the finite adele ring of $\Q$. By the definition of $G$-genera, the right action of $G(\A_f)$ on $\calL_n(D_1,D_2)$ is transitive, and it induces an isomorphism $\calL_n(D_1,D_2)\simeq U_{D_1,D_2} \backslash G(\A_f)$, where $U_{D_1,D_2}$ is the stabiliser of $L_0\otimes \wh \Z$ in $G(\A_f)$. Since two lattices are isomorphic if and only if they differ by the action of an element in $G(\Q)$, we obtain an isomorphism of pointed sets \[ [\calL_n(D_1,D_2)]\simeq U_{D_1,D_2} \backslash G(\A_f)/G(\Q). \] Similarly, we also obtain an isomorphism \[ [\calL^1_n(D_1,D_2)]\simeq U_{D_1,D_2}^1 \backslash G^1(\A_f)/G^1(\Q), \] where $U_{D_1,D_2}^1:=U_{D_1,D_2}\cap G^1(\A_f)$. By the construction of these isomorphisms, the natural map $\iota:[\calL^1_n(D_1,D_2)]\to [\calL_n(D_1,D_2)]$ is nothing but the map \[ \iota: U_{D_1,D_2}^1 \backslash G^1(\A_f)/G^1(\Q) \to U_{D_1,D_2} \backslash G(\A_f)/G(\Q) \] induced by the inclusion map $G^1(\A_f)\embed G(\A_f)$. The map $n$ induces a surjective map $U_{D_1,D_2} \backslash G(\A_f)/G(\Q)\to n(U_{D_1,D_2})\backslash \A_f^\times/\Q^\times_+$. One shows that $n(U_{D_1,D_2})=\wh \Z^\times$ so the latter term is trivial. Then every double coset in $U_{D_1,D_2} \backslash G(\A_f)/G(\Q)$ is represented by an element of norm one. Therefore, $\iota$ is surjective. Let $g_1,g_2\in G^1(\A_f)$ such that $\iota [g_1]=\iota[g_2]$ in the $G$-double coset space. Then $g_1=u g_2 \gamma $ for some $u\in U_{D_1,D_2}$ and $\gamma\in G(\Q)$. Applying $n$, one obtains $n(\gamma)=1$ and hence $n(u)=1$. This proves the injectivity of $\iota$. \end{proof} For each $n\geq 1$, define \begin{equation} \label{eq:vn} v_n:=\prod_{i=1}^n \frac{|\zeta(1-2i)|}{2}, \end{equation} where $\zeta(s)$ is the Riemann zeta function. For each prime $p$ and $n\ge 1$, define \begin{equation} \label{eq:Lnp} L_n(p,1):=\prod_{i=1}^n (p^i+(-1)^i) \end{equation} and \begin{equation} \label{eq:L*np} L_n(1,p):= \begin{cases} \prod_{i=1}^c (p^{4i-2}-1) & \text{if $n=2c$ is even;} \\ \frac{(p-1) (p^{4c+2}-1)}{p^2-1} \cdot \prod_{i=1}^c (p^{4i-2}-1) & \text{if $n=2c+1$ is odd.} \end{cases} \end{equation} \begin{proposition}\label{prop:max_lattice} We have \begin{equation} \label{eq:Massformula} M_n(D_1,D_2)=v_n \cdot \prod_{p|D_1} L_n(p,1) \cdot \prod_{p|D_2} L_n(1,p). \end{equation} \end{proposition} \begin{proof} When $(D_1,D_2)=(D,1)$, the formula \eqref{eq:Massformula} is proved in \cite[Proposition~9]{hashimoto-ibukiyama:1}. By Lemma~\ref{lm:GvsG1}, we may replace $M_n(D_1,D_2)$ by $M^1_n(D_1,D_2)$ in \eqref{eq:Massformula}. Using the definition, the mass $M^1_n(D_1,D_2)$ can be also interpreted as the volume of the compact set $G^1(\A_f)/G^1(\Q)$ with respect to the Haar measure of $G^1(\A_f)$ which takes the value one on $U_{D_1,D_2}^1$. Using this property, we obtain \[ \frac{M^1_n(D_1,D_2)}{M^1_n(D,1)}=\frac{\vol(U^1_{D,1})}{\vol(U^1_{D_1,D_2})} \] for any Haar measure on $G^1(\A_f)$. It follows that \begin{equation} \label{eq:massquot} \frac{M^1_n(D_1,D_2)}{M^1_n(D,1)}=\prod_{p|D_2} \frac{\vol(\Aut_{G^1_p}(O_p^n,\bbI_n))}{\vol(\Aut_{G^1_p}((\Pi_pO_p)^{n-c}\oplus O_p^c,\bbJ_n))}, \end{equation} where $c=\lfloor n/2\rfloor$ and where $\vol(U_p^1)$ denotes the volume of an open compact subgroup $U_p^1\subseteq G^1_p$ for a Haar measure on $G^1_p$. The right hand side of \eqref{eq:massquot} also does not depend on the choice of the Haar measure. It is easy to see that the dual lattice $((\Pi_pO_p)^{n-c}\oplus O_p^c)^\vee$ of $(\Pi_pO_p)^{n-c}\oplus O_p^c$ with respect to $\bbJ_n$ is equal to $O_p^{c}\oplus (\Pi_p^{-1} O_p)^{n-c}$. Therefore, \[ \Aut_{G^1_p}((\Pi_pO_p)^{n-c}\oplus O_p^c,\bbJ_n)= \Aut_{G^1_p}((\Pi_pO_p)^{c}\oplus O_p^{n-c},\bbJ_n). \] In Subsection~\ref{ssec:sspmass} we shall see a connection between $M^1_n(p,1)$ or $M^1_n(1,p)$ and certain masses in geometric terms. In the notation of Theorem~\ref{thm:sspmass}, which is a reformulation of \cite[Proposition~3.5.2]{harashita}, we have \begin{equation} \label{eq:localquot} \frac{\vol(\Aut_{G^1_p}(O_p^n,\bbI_n))}{\vol(\Aut_{G^1_p}((\Pi_pO_p)^{c}\oplus O_p^{n-c},\bbJ_n))}=\frac{\Mass(\Lambda_{n,p^c})}{\Mass(\Lambda_{n,p^0})} =\frac{L_{n,p^c}}{L_{n,p^0}}=\frac{L_n(1,p)}{L_n(p,1)} \end{equation} by \eqref{eq:npgc}. Then Equation~\eqref{eq:Massformula} follows from \eqref{eq:massquot}, \eqref{eq:localquot}, and \eqref{eq:Massformula} for $(D_1,D_2)=(D,1)$. \end{proof} \subsection{The Gauss problem for definite quaternion Hermitian maximal lattices}\label{ssec:Gaussarith}\ In this subsection we determine for which $n$ and $(D_1,D_2)$ the class number $H_n(D_1,D_2)$ is equal to one. The Bernoulli numbers $B_n$ are defined by (cf. \cite[p.~91]{serre:arith}) \begin{equation} \label{eq:Bernoulli} \frac{t}{e^t-1}=1-\frac{t}{2} +\sum_{n=1}^\infty B_{2n} \frac{t^{2n}}{(2n)!}. \end{equation} For each $n\ge 1$, we have \begin{equation} \label{eq:zeta2n} B_{2n}=(-1)^{(n+1)} \frac{2 (2n)!}{(2\pi)^{2n}} \zeta(2n) \end{equation} and \begin{equation} \label{eq:zeta1-2n} \frac{|\zeta(1-2n)|}{2} = \frac{|B_{2n}|}{4n}=\frac{(2n-1)!\zeta(2n)}{(2\pi)^{2n}} . \end{equation} Below is a table of values of $|B_{2n}|$ and $|\zeta(1-2n)|/2$: \begin{center} \begin{tabular}{|c|c|c|c|c|c|c|c|c|c|c|c|c|} \hline $n$ & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 & 9 & 10 & 11 & 12 \\ \hline $|B_{2n}|$ & $\frac{1}{6}$ & $\frac{1}{30}$ & $\frac{1}{42}$ & $\frac{1}{30}$ & $\frac{5}{66}$ & $\frac{691}{2730}$ & $\frac{7}{6}$ & $\frac{3617}{510}$ & $\frac{43867}{798}$ & $\frac{174611}{330}$ & $\frac{864513}{138}$ & $\frac{236364091}{2730}$ \\ \hline $\frac{|\zeta(1-2n)|}{2}$ & $\frac{1}{24}$ & $\frac{1}{240}$ & $\frac{1}{504}$ & $\frac{1}{480}$ & $\frac{1}{264}$ & $\frac{691}{2730\cdot 24}$ & $\frac{1}{24}$ & $\frac{3617}{510\cdot 32}$ & $\frac{43867}{798\cdot 36 }$ & $\frac{174611}{330\cdot 40}$ & $\frac{864513}{138\cdot 44}$ & $\frac{236364091}{2730\cdot 48}$ \\ \hline \end{tabular} \end{center} We have (cf.~\eqref{eq:vn}) \begin{equation} \label{eq:valuevn} \begin{split} &v_1=\frac{1}{2^3\cdot 3}, \quad v_2=\frac{1}{2^7\cdot 3^2\cdot 5}, \quad v_3=\frac{1}{2^{10}\cdot 3^4 \cdot 5\cdot 7}, \\ &v_4=\frac{1}{2^{15}\cdot 3^5 \cdot 5^2\cdot 7}, \quad v_5=\frac{1}{2^{18}\cdot 3^6 \cdot 5^2\cdot 7\cdot 11}. \end{split} \end{equation} \begin{lemma}\label{lem:vn} If $n\geq 6$, then either the numerator of $v_n$ is not one or $v_n>1$. \end{lemma} \begin{proof} Put $A_n=|\zeta(1-2n)|/2$. First, by \[ \zeta(2n)<1+\int_{2}^\infty \frac{1}{x^{2n}}dx=1+\frac{2^{1-2n}}{2n-1}, \] and since $\zeta(2n+2) > 1$, we have \[ \frac{A_{n+1}}{A_n}> \frac{(2n+1)(2n)}{(2\pi)^2\cdot \zeta(2n)}> \left (\frac{2n}{2\pi}\right )^2 \cdot \frac{1+\frac{1}{2n}}{1+\frac{2^{1-2n}}{2n-1}}>1 \quad \text{for $n\ge 4$}. \] From the table and the fact that $A_n$ is increasing for $n\ge 4$ which we have just proved, we have \[ v_n=\prod_{i=1}^6 A_i \cdot \prod_{i=7}^{11} A_i \cdot \prod_{i=12}^n A_i > \frac{1}{504^6}\cdot 1 \cdot (1803)^{n-11} \quad \text{for $n\ge 12$,} \] since it follows from the table that $A_1, \ldots, A_6 \ge \frac{1}{504}$ and $A_{12} > 1803$. Thus, $v_n>1$ for $n\geq 17$. By a classical result of Clausen and von Staudt (see \cite[Theorem 3.1, p.~41]{AIK14}), $B_{2n}\equiv -\sum_{(p-1)|2n} (1/p) \mod 1$ where $p$ are primes. So if $n\le 17$ (even for $n\le 344$), then $B_{2n}$ has denominators only for primes such that $p-1\le 34$ (or $p-1 \le 344\cdot 2$) and this does not include $691$. Thus, for $6\le n\le 344$, we have $691|v_n$. This proves the lemma. \end{proof} \begin{corollary}\label{cor:ge6} For $n\geq 6$, we have $H_n(D_1,D_2)>1$. \end{corollary} \begin{proof} By Lemma~\ref{lem:vn}, either $v_n>1$ or the numerator of $v_n$ is not one. From the mass formula \eqref{eq:Mass}, either $M_n(D_1,D_2)>1$ or the numerator of $M_n(D_1,D_2)$ is not one. Therefore, $H_n(D_1,D_2)>1$. \end{proof} \begin{proposition}\label{prop:np2} We have $H_3(2,1)=1$, $H_3(1,2)=1$, and $H_4(1,2)=1$. \end{proposition} \begin{proof} It follows from Proposition~\ref{prop:max_lattice} and Equations~\eqref{eq:L*np} and~\eqref{eq:valuevn} that \[ M_3(1,2) = \frac{1}{2^{10} \cdot 3^2 \cdot 5} \qquad \text{ and } \qquad M_4(1,2) = \frac{1}{2^{15}\cdot 3^2 \cdot 5^2}. \] It follows from \cite[p.~699]{hashimoto-ibukiyama:2}, cf.~\cite[Section 5]{ibukiyama}, that the unique lattice $(L,h)$ in the non-principal genus $H_2(1,2)$ has an automorphism group of cardinality $1920 = 2^7 \cdot 3 \cdot 5$. Consider the lattice $(O,p\mathbb{I}_1) \oplus (L, h)$ contained in $\calL_3(1,2)$. By Corollary~\ref{autodecomposition} we see that \[ \Aut((O,p\mathbb{I}_1) \oplus (L, h)) \simeq \Aut((O,p\mathbb{I}_1)) \cdot \Aut((L, h)) = O^{\times} \cdot \Aut((L,h)). \] Since $O^{\times} = E_{24} \simeq \SL_2(\F_3)$ has cardinality $24$ (cf.~\cite[Equation~(57)]{karemaker-yobuko-yu}), it follows that \[ \vert \Aut((O,p\mathbb{I}_1) \oplus (L, h)) \vert = 24 \cdot 1920 = 2^{10} \cdot 3^2 \cdot 5 = \frac{1}{M_3(1,2)}, \] showing that the lattice $(O,p\mathbb{I}_1) \oplus (L, h)$ is unique and hence that $H_3(1,2) = 1$. Next, consider the lattice $(L, h)^{\oplus 2}$ contained in $\calL_4(1,2)$. Again by Corollary~\ref{autodecomposition} we see that \[ \Aut((L, h)^{\oplus 2}) \simeq \Aut((L, h))^2 \cdot C_2 \] which has cardinality \[ 1920^2 \cdot 2 = 2^{15} \cdot 3^2 \cdot 5^2 = \frac{1}{M_4(1,2)}, \] showing that also $(L, h)^{\oplus 2}$ is unique and therefore $H_4(1,2) = 1$. Finally, we compute that \[ M_3(2,1)=\frac{1}{2^{10}\cdot 3^4}=\frac{1}{24^3 \cdot 3!}=\frac{1}{|\Aut(O^3,\bbI_3)|}, \ \text{and therefore}\ H_3(2,1)=1. \] \end{proof} \begin{theorem}\label{thm:mainarith} The class number $H_n(D_1,D_2)$ is equal to one if and only if $D=p$ is a prime number and one of the following holds: \begin{enumerate} \item $n=1$, $(D_1,D_2)=(p,1)$ and $p\in \{2,3,5,7,13\}$; \item $n=2$, and either $(D_1,D_2)=(p,1)$ with $p=2,3$ or $(D_1,D_2)=(1,p)$ with $p \in \{2,3,5,7,11\}$; \item $n=3$, and either $(D_1,D_2)=(2,1)$ or $(D_1,D_2)=(1,2)$; \item $n=4$ and $(D_1,D_2)=(1,2)$. \end{enumerate} \end{theorem} \begin{proof} \begin{enumerate} \item When $n=1$ we only have the principal genus class number and $H_1(D,1)$ is the class number $h(B)$ of $B$. The corresponding Gauss problem is a classical result: $h(B)=1$ if and only if $D\in \{2,3,5,7,13\}$; see the list in \cite[p.~155]{vigneras}. We give an alternative proof of this fact for the reader's convenience. Suppose that $H_1(D,1)=1$ and $[\calL_n(D,1)]$ is represented by $L$. Then \begin{equation} \label{eq:M1} M_1(D,1)=\frac{\prod_{p|D} (p-1)}{24} =\frac{1}{m}, \quad \text{where $m= \vert \Aut(L)\vert \in 2\bbN $.} \end{equation} The discriminant $D$ has an odd number of prime divisors, since $B$ is a definite quaternion algebra. That the numerator of $M_1(D,1)$ is $1$ implies that every prime factor $p$ of~$D$ must satisfy $(p-1)|24$ and hence $p\in\{2,3,5,7,13\}$. Suppose that $D$ has more than one prime divisor; using the condition \eqref{eq:M1}, $D$ must then be $2\cdot 3\cdot 7=42$. Using the class number formula (see \cite{eichler-CNF-1938, vigneras}, cf. Pizer~\cite[Theorem 16, p.~68]{pizer:arith}) \[ H_1(D,1)=\frac{\prod_{p|D} (p-1)}{12} +\frac{1}{4} \prod_{p|D} \left ( 1-\left (\frac{-4}{p} \right ) \right )+\frac{1}{3} \prod_{p|D} \left ( 1-\left (\frac{-3}{p} \right ) \right ), \] we calculate that $H_1(42,1)=2$. Hence, $D$ must be a prime $p$, which is in $\{2,3,5,7,13\}$. Conversely, we check that $H_1(p,1)=1$ for these primes. \item See Hashimoto-Ibukiyama \cite[p.~595]{hashimoto-ibukiyama:1}, \cite[p.~696]{hashimoto-ibukiyama:2}. One may still want to verify $H_2(D_1,D_2)>1$ for pairs $(D_1,D_2)$ not in the data there. Using the class number formula in \cite{hashimoto-ibukiyama:2} we compute that $M_2(1,2\cdot 3\cdot 11)=1/2$ and $H_2(1,2\cdot 3 \cdot 11)=9$. For the remaining cases, one can show that either the numerator of $M_2(D_1,D_2)$ is not equal to $1$ or $M_2(D_1,D_2)>1$, by the same argument as that used below for $n \geq 3$. \item[(3)+(4)] The principal genus part for $n=3$ with $D=p$ a prime is due to Hashimoto \cite{hashimoto:g=3}, based on an explicit class number formula. We shall prove directly that for $n\geq 3$, (3) and (4) are the only cases for which $H_n(D_1,D_2)=1$. In particular, our proof of the principal genus part of (3) is independent of Hashimoto's result. By Corollary~\ref{cor:ge6}, it is enough to treat the cases $n=3,4,5$, so we assume this. We have $L_{n+1}(p,1)=L_n(p,1)(p^{n+1}+(-1)^{n+1})$, and \[ L_2(1,p)=(p^2-1), \quad L_3(1,p)=(p-1)(p^6-1), \] \[ L_4(1,p)=(p^2-1)(p^6-1), \quad L_5(1,p)=(p-1)(p^6-1)(p^{10}-1). \] In particular, $(p^3-1)$ divides both $L_n(p,1)$ and $L_n(1,p)$ for $n=3,4,5$. Observe that if $L_n(p,1)$ or $L_n(1,p)$ has a prime factor greater than $11$, then $H_n(D_1,D_2)>1$ for all $(D_1,D_2)$ with $p|D_1 D_2$; this follows from Proposition~\ref{prop:max_lattice} and \eqref{eq:valuevn}. We list a prime factor $d$ of $p^3-1$ which is greater than $11$: \begin{center} \begin{tabular}{ |c|c|c|c|c|c| } \hline $p$ & 3 & 5 & 7 & 11 & 13 \\ \hline $d|p^3-1$ & 13 & 31 & 19 & 19 & 61 \\ \hline \end{tabular} \end{center} Thus, $H_n(D_1,D_2)>1$ for $n=3,4,5$ and $p|D$ for some prime $p$ with $3\le p \le 13$. It remains to treat the cases $p\ge 17$ and $p=2$. We compute that $M_3(17,1) \doteq 7.85$ and $M_4(1,17) \doteq 4.99$. One sees that $M_3(1,17)>M_3(17,1)$, $M_5(17,1)>M_3(17,1)$ and $M_4(17,1)>M_4(1,17)$. Therefore $M_n(p,1)>1$ and $M_n(1,p)>1$ for $p\ge 17$. Thus, for $n=3,4,5$, $H_n(D_1,D_2)=1$ implies that $D=2$. One checks that $31|L_5(2,1)$, $31|L_5(1,2)$ and $17|L_4(2,1)$. Thus \[ H_5(2,1)>1, \quad H_5(1,2)>1, \quad \text{and} \quad H_4(2,1)>1. \] It remains to show that $H_3(2,1)=1$, $H_3(1,2)=1$ and $H_4(1,2)=1$, which is done in Proposition~\ref{prop:np2}. \end{enumerate} \end{proof} \begin{remark}\label{rem:Kirschmer} After completing this paper it came to our attention that Kirschmer also proved the unique orthogonal decomposition result (Theorem~\ref{orthogonal}) by adapting Kneser's proof, in Theorem 2.4.9 of his Habilitation \cite{KirschmerHab}. Moreover, in \cite[Chapter~9]{KirschmerHab}, he obtained more general results than Theorem~\ref{thm:mainarith}, which hold over any totally real algebraic number field $F$. When considering only maximal lattices over $F=\Q$ our result agrees with his results, although our method is different. For $n\geq 3$, we do not compute genus symbols and class numbers; instead we only use mass formulae and analyse the size and the numerator of the mass in question. This simplifies the computation and allows us to give a computer-free proof of Theorem~\ref{thm:mainarith} (of course based on earlier known results for $n\leq 2$). The same strategy is also applied in our geometric setting in Sections~\ref{sec:GMF}-\ref{sec:proof}. For this reason, we decided to keep our more elementary proof for interested readers. \end{remark} \section{The geometric theory: mass formulae and class numbers}\label{sec:GMF} \subsection{Set-up and definition of masses}\label{ssec:not}\ For the remainder of this paper, let $p$ be a prime number, let $g$ be a positive integer, and let $k$ be an algebraically closed field of characteristic $p$. Unless stated otherwise, $k$ will be the field of definition of abelian varieties. The cardinality of a finite set $S$ will be denoted by $\vert S\vert $. Let $\alpha_p$ be the unique local-local finite group scheme of order $p$ over $\Fp$; it is defined to be the kernel of the Frobenius morphism on the additive group $\G_a$ over $\Fp$. As before, denote by $\wh \Z=\prod_{\ell} \Z_\ell$ the profinite completion of $\Z$ and by $\A_f=\wh \Z\otimes_{\Z} \Q$ the finite adele ring of $\Q$. Let $B_{p,\infty}$ denote the definite quaternion $\Q$-algebra of discriminant $p$. Fix a quaternion Hermitian $B_{p,\infty}$-space $(V,f)$ of rank $g$, let $G=G(V,f)$ be the quaternion Hermitian group associated to $(V,f)$ which by definition is the group of unitary similitudes of $(V,f)$, and $G^1\subseteq G$ the subgroup consisting of elements $g \in G$ of norm $n(g)=1$. We regard $G^1$ and $G$ as algebraic groups over $\Q$. For any integer $d\ge 1$, let $\calA_{g,d}$ denote the (coarse) moduli space over $\Fpbar$ of $g$-dimensional polarised abelian varieties $(X,\lambda)$ with polarisation degree $\deg(\lambda)=d^2$. An abelian variety over~$k$ is said to be \emph{supersingular} if it is isogenous to a product of supersingular elliptic curves; it is said to be \emph{superspecial} if it is isomorphic to a product of supersingular elliptic curves. For any $m \geq 0$, let $\calS_{g,p^m}$ be the supersingular locus of $\calA_{g,p^m}$, which consists of all polarised supersingular abelian varieties in $\calA_{g,p^m}$. Then $\calS_g:=\mathcal{S}_{g,1}$ is the moduli space of $g$-dimensional principally polarised supersingular abelian varieties. If $S$ is a finite set of objects with finite automorphism groups in a specified category, the \emph{mass} of $S$ is defined to be the weighted sum \[ \Mass(S):=\sum_{s\in S} \frac{1}{\vert \Aut(s)\vert }. \] For any $x = (X_0, \lambda_0) \in \mathcal{S}_{g,p^m}(k)$, we define \begin{equation}\label{eq:Lambdax} \Lambda_{x} = \{ (X,\lambda) \in \mathcal{S}_{g,p^m}(k) : (X,\lambda)[p^{\infty}] \simeq (X_0, \lambda_0)[p^{\infty}] \}, \end{equation} where $(X,\lambda)[p^{\infty}]$ denotes the polarised $p$-divisible group associated to $(X,\lambda)$. We define a group scheme $G_x$ over $\Z$ as follows. For any commutative ring $R$, the group of its $R$-valued points is defined by \begin{equation}\label{eq:aut} G_{x}(R) = \{ \alpha \in (\text{End}(X_0)\otimes _{\mathbb{Z}}R)^{\times} : \alpha^t \lambda_0 \alpha = \lambda_0\}. \end{equation} Since any two polarised supersingular abelian varieties are isogenous, i.e., there exists a quasi-isogeny $\varphi: X_1\to X_2$ such that $\varphi^* \lambda_2=\lambda_1$, the algebraic group $G_x\otimes \Q$ is independent of~$x$ (up to isomorphism) and it is known to be isomorphic to $G^1$. We shall fix an isomorphism $G_x\otimes \Q \simeq G^1$ over $\Q$ and regard $U_x:=G_x(\wh \Z)$ as an open compact subgroup of $G^1(\A_f)$. By \cite[Theorem 2.1]{yu:2005}, there is a natural bijection between the following pointed sets: \begin{equation} \label{eq:smf:1} \Lambda_x \simeq G^1(\Q)\backslash G^1(\A_f)/U_x. \end{equation} In particular, $\Lambda_x$ is a finite set. The mass of $\Lambda_x$ is then defined as \begin{equation} \label{eq:Massx} \mathrm{Mass}(\Lambda_{x}) = \sum_{(X,\lambda) \in \Lambda_{x}} \frac{1}{\vert \mathrm{Aut}(X,\lambda)\vert}. \end{equation} If $U$ is an open compact subgroup of $G^1(\A_f)$, the \emph{arithmetic mass} for $(G^1,U)$ is defined by \begin{equation} \label{eq:arithmass} \Mass(G^1,U):=\sum_{i=1}^h \frac{1}{|\Gamma_i|}, \quad \Gamma_i:=G^1(\Q)\cap c_i U c_i^{-1}, \end{equation} where $\{c_i\}_{i=1,\ldots, h}$ is a complete set of representatives of the double coset space $ G^1(\Q)\backslash G^1(\A_f)/U$. The definition of $\Mass(G^1,U)$ is independent of the choices of representatives $\{c_i\}_i$. Then we have the equality (cf.~ \cite[Corollary 2.5]{yu:2005}) \begin{equation} \label{eq:smf:2} \Mass(\Lambda_x)=\Mass(G^1,U). \end{equation} \subsection{Superspecial mass formulae}\label{ssec:sspmass}\ For each integer $c$ with $0 \leq c \leq \lfloor g/2 \rfloor$, let $\Lambda_{g,p^c}$ denote the set of isomorphism classes of $g$-dimensional polarised superspecial abelian varieties $(X, \lambda)$ whose polarisation $\lambda$ satisfies $\ker(\lambda) \simeq \alpha_p^{2c}$. The mass of $\Lambda_{g,p^c}$ is \[ \mathrm{Mass}(\Lambda_{g,p^c}) = \sum_{(X,\lambda)\in \Lambda_{g,p^c}} \frac{1}{\vert \mathrm{Aut}(X,\lambda) \vert}. \] Note that the $p$-divisible group of a superspecial abelian variety of given dimension is unique up to isomorphism. Furthermore, the polarised $p$-divisible group associated to any member in~$\Lambda_{g,p^c}$ is unique up to isomorphism, cf.~\cite[Proposition 6.1]{lioort}. Therefore, if $x = (X_0, \lambda_0)$ is any member in $\Lambda_{g,p^c}$, then we have $\Lambda_x = \Lambda_{g,p^c}$ (cf.~\eqref{eq:Lambdax}). In particular, the mass $\Mass(\Lambda_{g,p^c})$ of the superspecial locus $\Lambda_{g,p^c}$ is a special case of $\Mass(\Lambda_x)$. We fix a supersingular elliptic curve $E$ over $\mathbb{F}_{p^2}$ such that its Frobenius endomorphism $\pi_E$ satisfies $\pi_E=-p$, and let ${E_k}=E\otimes_{\mathbb{F}_{p^2}} k$ (note that $k \supseteq \mathbb{F}_{p^2}$). It is known that every polarisation on ${E^g_k}$ is defined over $\mathbb{F}_{p^2}$, that is, it descends uniquely to a polarisation on $E^g$ over~$\F_{p^2}$. For each integer~$c$ with $0\leq c \leq \lfloor g/2 \rfloor$, we denote by $P_{p^c}(E^g)$ the set of isomorphism classes of polarisations $\mu$ on $E^g$ such that $\mathrm{ker}(\mu) \simeq \alpha_p^{2c}$; we define $P_{p^c}({E^g_k})$ similarly, and have the identification $P_{p^c}({E^g_k})=P_{p^c}(E^g)$. As superspecial abelian varieties of dimension $g>1$ are unique up to isomorphism, there is a bijection $P_{p^c}(E^g) \simeq \Lambda_{g,p^c}$ when $g>1$. For brevity, we shall also write $P(E^g)$ for $P_1(E^g)$. \begin{theorem}\label{thm:sspmass} For any $g \ge 1$ and $0 \leq c \leq \lfloor g/2 \rfloor$, we have \[ \mathrm{Mass}(\Lambda_{g,p^c})=v_g \cdot L_{g,p^c},\] where $v_g$ is defined in \eqref{eq:vn} and where \begin{equation} \label{eq:Lgpc} L_{g,p^c} =\prod_{i=1}^{g-2c} (p^i + (-1)^i)\cdot \prod_{i=1}^c (p^{4i-2}-1) \cdot \frac{\prod_{i=1}^g (p^{2i}-1)}{\prod_{i=1}^{2c}(p^{2i}-1)\prod_{i=1}^{g-2c} (p^{2i}-1)}. \end{equation} \end{theorem} \begin{proof} This follows from \cite[Proposition 3.5.2]{harashita} by the functional equation for $\zeta(s)$. See \cite[p.~159]{ekedahl} and \cite[Proposition 9]{hashimoto-ibukiyama:1} for the case where $c=0$ (the principal genus case). See also \cite{yu2} for a geometric proof in the case where $g=2c$ (the non-principal genus case). \end{proof} Clearly, $L_{g,p^0}=L_g(p,1)$ (see~\eqref{eq:Lnp}). One can also see from \eqref{eq:Lgpc} that for $c= \lfloor g/2 \rfloor$, \begin{equation} \label{eq:npgc} L_{g,p^c}= \begin{cases} \prod_{i=1}^c (p^{4i-2}-1) & \text{if $g=2c$ is even;} \\ \frac{(p-1) (p^{4c+2}-1)}{p^2-1} \cdot \prod_{i=1}^c (p^{4i-2}-1) & \text{if $g=2c+1$ is odd,} \end{cases} \end{equation} and therefore $L_{g,p^c}=L_g(1,p)$, cf.~\eqref{eq:L*np}. For $g=5$ and $c=1$, one has \begin{equation} \label{eq:Lambda5p} \Mass(\Lambda_{5,p})=v_5 \cdot (p-1)(p^2+1)(p^3-1)(p^4+1)(p^{10}-1), \end{equation} noting that this case is different from either the principal genus or the non-principal genus case. \begin{lemma}\label{lem:poly} For any $g \ge 1$ and $0 \leq c \leq \lfloor g/2 \rfloor$, the local component $L_{g,p^c}$ in \eqref{eq:Lgpc} is a polynomial in $p$ over $\Z$ of degree $(g^2+4gc-8c^2+g-2c)/2$. Furthermore, the minimal degree occurs precisely when $c=0$ if $g$ is odd and when $c= g/2$ if $g$ is even. \end{lemma} \begin{proof} It suffices to show that the term \[ A:=\frac{\prod_{i=1}^g (p^{2i}-1)}{\prod_{i=1}^{2c}(p^{2i}-1)\prod_{i=1}^{g-2c} (p^{2i}-1)} \] is a polynomial in $p$ with coefficients in $\Z$. Notice that $A=[g;2c]_{p^2}$, where \[ [n;k]_q:=\frac{\prod_{i=1}^n (q^i-1)}{\prod_{i=1}^k(q^i-1)\cdot \prod_{i=1}^{n-k}(q^i-1)}, \quad n\in \bbN, \ k=0,\dots, n. \] It is known that $[n;k]_q\in \Z[q]$; cf.~\cite{exton}. Alternatively, one considers the recursive relation $[n~+~1~;~k]_q=[n;k]_q+q^{n-k+1} [n;k-1]_q$ and concludes that $[n;k]_q\in \Z[q]$ by induction. The degree of $L_{g, p^c}$ is \begin{equation} \label{eq:degree} \begin{split} &\sum_{i=1}^{g-2c} i + \sum_{i=1}^c (4i-2) + \sum_{i=g-2c+1} ^g 2i - \sum_{i=1}^{2c} 2i \\ &= \frac{1}{2}\left [(g-2c)(g-2c+1)+c\cdot 4c+2c\cdot(4g-4c+2)-2c(4c+2) \right ] \\ &= \frac{1}{2}\left [ g^2+4gc-8c^2+g-2c \right ]. \end{split} \end{equation} The degree is a polynomial function of degree 2 in $c$ with negative leading coefficient. So the minimum occurs either at $c=0$ or at $c=\lfloor g/2 \rfloor$; the former happens if $g$ is odd and the latter happens if $g$ is even. \end{proof} If $g=2m$ is even, then the polynomial $L_{g,1}$ has degree $g(g+1)/2=2m^2+m$ and $L_{g,p^m}$ has degree $2m^2$. \subsection{Mass formulae and class number formulae for supersingular abelian surfaces and threefolds} \subsubsection{Non-superspecial supersingular abelian surfaces}\label{ssec:cng2}\ Let $x=(X_0,\lambda_0)$ be a principally polarised supersingular abelian surface over $k$. If $X_0$ is superspecial, then $\Lambda_x=\Lambda_{2,p^0}$ and the class number formula for $|\Lambda_{2,p^0}|$ is obtained in \cite{hashimoto-ibukiyama:1}. We assume that $X_0$ is not superspecial, that is, $a(X_0)=1$. In this case there is a unique (up to isomorphism) polarised superspecial abelian surface $(Y_1,\lambda_1)$ such that $\ker(\lambda_1) \simeq \alpha_p^2$ and an isogeny $\phi:(Y_1,\lambda_1)\to (X_0,\lambda_0)$ of degree $p$ which is compatible with polarisations. Furthermore, there is a unique polarisation $\mu_1$ on $E^2$ such that $\ker(\mu_1) \simeq \alpha_p^2$ and $(Y_1,\lambda_1)\simeq (E^2,\mu_1)\otimes_{\F_{p^2}} k$. Then $x$ corresponds to a point $t$ in $\bbP^1(k)=\bbP^1_{\mu_1}(k):=\{\phi_1:(E^2,\mu_1)\otimes k \to (X,\lambda) \text{ an isogeny of degree $p$} \}$, called the Moret-Bailly parameter for $(X_0,\lambda_0)$. The condition $a(X_0)=1$ implies that $t\in \bbP^1(k)\setminus \bbP^1(\F_{p^2})=k \setminus \F_{p^2}$. We consider two different cases, corresponding to the structures of $\End(X_0)$: the case $t\in k\setminus \F_{p^4}$, which we call the first case (I), and the case $t\in \F_{p^4} \setminus \F_{p^2}$, called the second case (II). The following explicit formula for the class number of a non-superspecial supersingular ``genus'' $\Lambda_x$ is due to the first-named author \cite{ibukiyama}. \begin{theorem}\label{thm:nsspg2} Let $x=(X_0,\lambda_0)$ be a principally polarised supersingular abelian surface over~$k$ with $a(X_0)=1$ and let $h$ be the cardinality of $\Lambda_x$. \begin{enumerate} \item In case (I), i.e., when $t\in \mathbb{P}^1(k) \setminus \mathbb{P}(\F_{p^4})$, we have \[ h= \begin{cases} 1 & \text{if $p=2$}; \\ \frac{p^2(p^4-1)(p^2-1)}{5760} & \text{if $p\ge 3$}. \end{cases} \] \item In case (II), i.e., when $t\in \mathbb{P}(\F_{p^4}) \setminus \mathbb{P}(\F_{p^2})$, we have \[ h= \begin{cases} 1 & \text{if $p=2$}; \\ \frac{p^2(p^2-1)^2}{2880} & \text{if } p\equiv \pm 1 \bmod 5 \text{ or } p=5; \\ 1+\frac{(p-3)(p+3)(p^2-3p+8)(p^2+3p+8)}{2880} & \text{if } p\equiv \pm 2 \bmod 5. \\ \end{cases} \] \item For each case, we have $h=1$ if and only if $p=2,3$. \end{enumerate} \end{theorem} \begin{proof} Parts (1) and (2) follow from Theorems 1.1 and 3.6 of \cite{ibukiyama}. Part (3) follows from the table in Section 1 of \cite{ibukiyama}. \end{proof} \begin{theorem}\label{thm:massg2} Let $x=(X_0,\lambda_0)$ and $t\in \bbP^1(k)$ be as in Theorem~\ref{thm:nsspg2}. Then \begin{equation} \label{eq:massg2} \Mass(\Lambda_{x})=\frac{L_p}{5760} , \end{equation} with \[ L_p= \begin{cases} (p^2-1)(p^4-p^2), & \text{ if } t\in \bbP^1(\F_{p^4}) \setminus \bbP^1(\F_{p^2});\\ 2^{-e(p)}(p^4-1)(p^4-p^2) & \text{ if }t\in \bbP^1(k) \setminus \bbP^1(\F_{p^4}),\\ \end{cases} \] where $e(p)=0$ if $p=2$ and $e(p)=1$ if $p>2$. \end{theorem} \begin{proof} See \cite[Theorem 1.1]{yuyu}; also cf.~\cite[Proposition 3.3]{ibukiyama}. \end{proof} \begin{corollary}\label{cor:p2g2aut} Let $x=(X_0,\lambda_0)$ and $t\in \bbP^1(k)$ be as in Theorem~\ref{thm:nsspg2}. Assume that $p=2$. Then \begin{equation} \label{eq:autg2} \vert \Aut(X_0,\lambda_0) \vert= \begin{cases} 160 & \text{ if } t\in \bbP^1(\F_{p^4}) \setminus \bbP^1(\F_{p^2});\\ 32 & \text{ if } t \in \bbP^1(k) \setminus \bbP^1(\F_{p^4}). \end{cases} \end{equation} \end{corollary} \begin{proof} By Theorem~\ref{thm:nsspg2}, we have $|\Lambda_{x}|=1$ in both cases. The mass formula (cf.~Theorem~\ref{thm:massg2}) for $p=2$ yields \[ \Mass(\Lambda_{x})= \begin{cases} 1/160 & \text{ if } t\in \bbP^1(\F_{p^4}) \setminus \bbP^1(\F_{p^2});\\ 1/32 & \text{ if } t\in \bbP^1(k) \setminus \bbP^1(\F_{p^4}).\\ \end{cases} \] This proves \eqref{eq:autg2}. \end{proof} \subsubsection{Supersingular abelian threefolds}\label{ssec:mfg3}\ We briefly describe the framework of polarised flag type quotients as developed in \cite{lioort}. Let $E/\F_{p^2}$ be the elliptic curve fixed in Subsection~\ref{ssec:sspmass}. An $\alpha$-group of rank $r$ over an $\Fp$-scheme~$S$ is a finite flat group scheme which is Zariski-locally isomorphic to $\alpha_p^r$ over an open subset. For an abelian scheme $X$ over $S$, put $X^{(p)}:=X\times_{S,F_S} S$, where $F_S:S\to S$ denotes the absolute Frobenius morphism on $S$. Denote by $F_{X/S}:X\to X^{(p)}$ and $V_{X/S}: X^{(p)}\to X$ the relative Frobenius and Verschiebung morphisms, respectively. If $f:X\to Y$ is a morphism of abelian varieties, we also write $X[f]$ for $\ker(f)$. \begin{definition}\label{def:PFTQ}(cf.~\cite[Section 3]{lioort}) Let $g$ be a positive integer. \begin{enumerate} \item For any polarisation $\mu$ on $E^g$ such that $\ker(\mu)=E^g[F]$ if $g$ is even and $\ker(\mu) = 0$ otherwise, a $g$-dimensional \emph{polarised flag type quotient (PFTQ)} with respect to $\mu$ is a chain of polarised abelian varieties over a base $\F_{p^2}$-scheme $S$ \[ (Y_\bullet,\rho_\bullet):(Y_{g-1},\lambda_{g-1}) \xrightarrow{\rho_{g-1}} (Y_{g-2},\lambda_{g-2})\cdots \xrightarrow{\rho_{2}}(Y_1,\lambda_1) \xrightarrow{\rho_1} (Y_0, \lambda_0),\] such that: \begin{itemize} \item [(i)] $(Y_{g-1},\lambda_{g-1}) = ({E^g}, p^{\lfloor (g-1)/2 \rfloor}\mu)\times_{\Spec \F_{p^2}} S$; \item [(ii)] $\ker(\rho_i)$ is an $\alpha$-group of rank $i$ for $1\le i\le g-1$; \item [(iii)] $\ker(\lambda_i) \subseteq Y_i [\sfV^j \circ \sfF^{i-j}]$ for $0\le i\le g-1$ and $0\le j\le \lfloor i/2 \rfloor$, where $\sfF=F_{Y_i/S}$ and $\sfV=V_{Y_i/S}$. \end{itemize} An isomorphism of $g$-dimensional polarised flag type quotients is a chain of isomorphisms $(\alpha_i)_{0\le i \le g-1}$ of polarised abelian varieties such that $\alpha_{g-1}={\rm id}_{Y_{g-1}}$. \item A $g$-dimensional polarised flag type quotient $(Y_\bullet,\rho_\bullet)$ is said to be \emph{rigid} if \[ \ker(Y_{g-1}\to Y_i)=\ker (Y_{g-1}\to Y_0)\cap Y_{g-1}[\sfF^{g-1-i}], \quad \text{for $1\le i \le g-1$}. \] \item Let $\mathcal{P}_{\mu}$ (resp.~$\calP'_\mu$) denote the moduli space over $\F_{p^2}$ of $g$-dimensional (resp.~rigid) polarised flag type quotients with respect to $\mu$. \end{enumerate} \end{definition} We introduce the notation and some properties of minimal isogenies for supersingular abelian varieties. \begin{lemma}\label{lem:minisog} Let $X$ be a supersingular abelian variety over $k$. Then there exists a pair $(Y,\varphi)$, where $Y$ is a superspecial abelian variety and $\varphi: Y\to X$ is an isogeny, such that for any pair $(Y',\varphi')$ as above there exists a unique isogeny $\rho: Y'\to Y$ such that $\varphi'=\varphi\circ \rho$. Dually, there exists a pair $(Z,\gamma)$, where $Z$ is a superspecial abelian variety and $\gamma: X\to Z$ is an isogeny, such that for any pair $(Z',\gamma')$ as above there exists a unique isogeny $\rho: Z\to Z'$ such that $\gamma'=\rho\circ \gamma$. \end{lemma} \begin{proof} See \cite[Lemma 1.8]{lioort}; also see \cite[Corollary 4.3]{yu:mrl2010} for an independent proof. \end{proof} \begin{definition}\label{def:minisog} Let $X$ be a supersingular abelian variety over $k$. We call the pair $(Y,\varphi:Y\to~X)$ or the pair $(Z,\gamma:X\to Z)$ as in Lemma~\ref{lem:minisog} \emph{the minimal isogeny} of $X$. \end{definition} \begin{proposition}\label{prop:minisoglift} Let $\varphi: Y\to X$ be the minimal isogeny of a supersingular abelian variety~$X$. Then every endomorphism $\sigma$ of $X$ lifts uniquely to an endomorphism $\sigma'$ of $Y$. \end{proposition} \begin{proof} This follows from the (local) statement \cite[Proposition 4.8] {yu:mrl2010}. Indeed, the element $\sigma':= \varphi^{-1} \sigma \varphi$ in $\End^0(Y)$ belongs to $\End(Y)$ if and only if $\sigma'$ belongs to $\End(Y[p^\infty])$, and the latter follows from \cite[Proposition 4.8]{yu:mrl2010}. \end{proof} Now let $g=3$. According to \cite[Section 9.4]{lioort}, $\mathcal{P}_{\mu}$ is a two-dimensional geometrically irreducible scheme over $\mathbb{F}_{p^2}$. The projection to the last member gives a proper ${\mathbb{F}}_{p^2}$-morphism \begin{align*} \mathrm{pr}_0 : \mathcal{P}_{\mu} & \to \mathcal{S}_{3,1}, \\ (Y_\bullet, \rho_\bullet) & \mapsto (Y_0, \lambda_0). \end{align*} Moreover, for each principally polarised supersingular abelian threefold $(X,\lambda)$ there exist a principal polarisation $\mu \in P(E^3)$ and a polarised flag type quotient $y \in \mathcal{P}_{\mu}$ such that $\mathrm{pr}_0(y) = [(X, \lambda)] \in \mathcal{S}_{3,1}$, cf.~\cite[Proposition 5.4]{katsuraoort}. Put differently, the morphism \begin{equation}\label{eq:moduli} \mathrm{pr}_0: \coprod _{\mu \in P(E^3)}\mathcal{P}_{\mu} \rightarrow \mathcal{S}_{3,1} \end{equation} is surjective and generically finite. We define the mass function on $\calP_\mu(k)$ as follows: \begin{equation} \label{eq:massfcn} \Mass: \calP_\mu(k) \to \Q, \quad \Mass(y):=\Mass(\Lambda_x), \ x=\mathrm{pr}_0(y). \end{equation} We now describe the geometry of $\calP_{\mu}$. First of all, the geometric structure is independent of the choice of $\mu$; see \cite[Section 3.10]{lioort}. The truncated map \[ \pi: ((Y_2,\lambda_2) \to (Y_1,\lambda_1) \to (Y_0, \lambda_0)) \mapsto ((Y_2,\lambda_2) \to (Y_1,\lambda_1)) \] induces a morphism $\pi: \calP_\mu \to \bbP^2$, since the target space is the family of subgroups of order $p$ of $Y_2[\sfF]=E^3[\sfF]=\alpha_p^3$, which is isomorphic to $\bbP^2$. The image of $\pi$ is isomorphic to the Fermat curve $C$ defined by the equation $X_{1}^{p+1}+X_{2}^{p+1}+X_{3}^{p+1} = 0$. Moreover, as a fibre space over $C$, $\calP_\mu$ is isomorphic to $\mathbb{P}_{C}(\mathcal{O}(-1)\oplus \mathcal{O}(1))$; see \cite[Sections 9.3-9.4]{lioort} and~\cite[Proposition 3.5]{karemaker-yobuko-yu}. According to \cite[Section 9.4]{lioort} (cf.~\cite[Definition 3.14]{karemaker-yobuko-yu}), there is a section $s:C\isoto T\subseteq \calP_\mu$ of $\pi$. Furthermore, one has $\calP_\mu'=\calP_\mu \setminus T$. We pull back the $a$-numbers of the points of $\calS_{3}$ to the $a$-numbers of the points of $\calP_\mu$, by setting $a(y):=a(\pr_0(y))$ for $y\in \calP_\mu(k)$. We shall write a point $y\in \calP_\mu(k)$ as $(t,u)$, where $t=\pi(y)$ and $u\in \pi^{-1}(t)=: \bbP^1_t(k)$. \begin{lemma}\label{lm:a_strata} Let $y=(t,u) \in \calP_\mu(k)$ be a point corresponding to a PFTQ. \begin{enumerate} \item If $y\in T$ then $a(y)=3$. \item If $t\in C(\F_{p^2})$, then $a(y)\ge 2$. Moreover, $a(y)=3$ if and only if $u\in \bbP^1_t(\F_{p^2})$. \item We have $a(y)=1$ if and only if $y\notin T$ and $t\not\in C(\F_{p^2})$. \end{enumerate} \end{lemma} \begin{proof} See \cite[Sections 9.3-9.4]{lioort}. \end{proof} \begin{theorem}\label{introthm:a2} Let $y = (t,u) \in \mathcal{P}_{\mu}(k)$ be a point such that $t\in C(\F_{p^2})$. Then \[ \mathrm{Mass}(y)=\frac{L_p}{2^{10}\cdot 3^4\cdot 5\cdot 7}, \] where \[ L_p= \begin{cases} (p-1)(p^2+1)(p^3-1) & \text{if } u\in \mathbb{P}_t^1(\mathbb{F}_{p^2}); \\ (p-1)(p^3+1)(p^3-1)(p^4-p^2) & \text{if } u\in\mathbb{P}_t^1(\mathbb{F}_{p^4})\setminus \mathbb{P}_t^1(\mathbb{F}_{p^2}); \\ 2^{-e(p)}(p-1)(p^3+1)(p^3-1) p^2(p^4-1) & \text{ if } u \not\in \mathbb{P}_t^1(\mathbb{F}_{p^4}), \end{cases} \] where $e(p)=0$ if $p=2$ and $e(p)=1$ if $p>2$. \end{theorem} \begin{proof} See \cite[Theorem A]{karemaker-yobuko-yu}. \end{proof} Theorem~\ref{introthm:a2} gives the mass formula for points with $a$-number greater than or equal to $2$. To describe the mass formula for points with $a$-number $1$, we need the construction of an auxiliary divisor $\calD\subseteq \calP'_\mu$, cf.~\cite[Definition 5.16]{karemaker-yobuko-yu}, and a function $d:C(k) \setminus C(\F_{p^2})\to \{3,4,5,6\}$, cf.~\cite[Definition 5.12]{karemaker-yobuko-yu} that is proven in \cite[Proposition 5.13]{karemaker-yobuko-yu} to be related to the field of definition of the parameter $t$. The function $d$ is surjective when $p\neq 2$, and it only takes value $3$ when $p=2$. \begin{theorem}\label{introthm:a1} Let $y = (t,u) \in \mathcal{P}'_{\mu}(k)$ be a point such that $t\not\in C(\F_{p^2})$. Then \[ \mathrm{Mass}(y)=\frac{p^3 L_p}{2^{10}\cdot 3^4\cdot 5\cdot 7}, \] where \[ \begin{split} L_p = \begin{cases} 2^{-e(p)}p^{2d(t)}(p^2-1)(p^4-1)(p^6-1) & \text{ if } y \notin \calD; \\ p^{2d(t)}(p-1)(p^4-1)(p^6-1) & \text{ if } t \notin C(\mathbb{F}_{p^6}) \text{ and } y \in \calD; \\ p^6(p^2-1)(p^3-1)(p^4-1) & \text{ if } t \in C(\mathbb{F}_{p^6}) \text{ and } y \in \calD. \end{cases} \end{split} \] \end{theorem} \begin{proof} See \cite[Theorem B]{karemaker-yobuko-yu}. \end{proof} \begin{remark} In \cite{{karemaker-yobuko-yu}} the authors define a stratification on $\calP_\mu$ and $\calS_{3}$ which is the coarsest one so that the mass function is constant. Using Theorem~\ref{introthm:a2}, the locus of $\calS_{3}$ with $a$-number $\ge 2$ decomposes into three strata: one stratum with $a$-number $3$ and two strata with $a$-number~$2$. In the locus with $a$-number $1$, the stratification depends on $p$. When $p=2$, the $d$-value is always $3$ and Theorem~\ref{introthm:a1} gives three strata, which are of dimension $0$, $1$, $2$, respectively. When $p\neq 2$, the $d$-value $d(t)=3$ if and only if $t\in C(\F_{p^6})$, cf. \cite[Proposition 5.13]{karemaker-yobuko-yu}. In this case, Theorem~\ref{introthm:a1} says that the mass function depends only on the $d$-value of $t$ and on whether or not $y\in \calD$, and hence it gives eight strata. The largest stratum is the open subset whose preimage consists of points $y=(t,u)$ with $d(t)=6$ and $y\not\in \calD$, and the smallest mass-value stratum is the zero-dimensional locus whose preimage consists of points $y=(t,u)$ with $d(t)=3$ and $y\in \calD$. Note that the mass-value strata for which the points $y=(t,u)$ have $d$-value less than~6 and are in the divisor $\calD$ are also zero-dimensional. Besides the superspecial locus, in which points have $a$-number three, the smaller mass-value stratum with $a$-number $2$ also has dimension $0$. For every point $x$ in the largest stratum, one has \begin{equation} \label{eq:asympopen} \Mass(\Lambda_x)\sim \frac{p^{27}}{2^{11}\cdot 3^4\cdot 5\cdot 7} \quad \text{as $p\to \infty$.} \end{equation} On the other hand, for every point $x$ in the superspecial locus, one has \begin{equation} \label{eq:asympsp} \Mass(\Lambda_x)\sim \frac{p^{6}}{2^{10}\cdot 3^4\cdot 5\cdot 7} \quad \text{as $p\to \infty$.} \end{equation} \end{remark} From all known examples, we observe that the mass $\Lambda_x$ is a polynomial function in $p$ with $\Q$-coefficients. It is plausible to expect that this holds true as well for any $x$ in $\calS_g$ and for arbitrary~$g$. Under this assumption, it is of interest to determine the largest degree of $\Mass(\Lambda_x)$, viewed as a polynomial in $p$. For $g\le 3$, it is known \cite{yuyu,karemaker-yobuko-yu} that the smallest degree is $g(g+1)/2$, which occurs when $x$ is superspecial. This is expected to be true for general $g$. \section{The geometric theory: Automorphism groups of polarised abelian varieties}\label{sec:aut} \subsection{Powers of an elliptic curve}\label{ssec:powers}\ Let $E$ be an elliptic curve over a field $K$ with canonical polarisation $\lambda_E$ and let $(X_0, \lambda_0) = (E^n, \lambda_{\mathrm{can}})$, where $\lambda_{\mathrm{can}} = \lambda_E^n$ equals the product polarisation on $E^n$. Denote by $R:=\End(E)$ the endomorphism ring of $E$ over $K$ and by $B=\End^0(E)$ its endomorphism algebra; $B$ carries the canonical involution $a \mapsto \bar a$. Then $B$ is either $\Q$, an imaginary quadratic field, or the definite quaternion $\Q$-algebra $B_{p,\infty}$ of prime discriminant $p$. We identify the endomorphism ring $\End(E^t)=\{a^t: a\in \End(E)\}$ with $\End(E)^\opp$. Via the isomorphism $\lambda_E$, the (anti-) isomorphism $\End(E^t)=\End(E)^\opp \isoto \End(E)$ maps $a^t$ to $\lambda_E^{-1} a^t \lambda_E=\bar a$. In other words, using the polarisation $\lambda_E$ we identify $\End(E)^\opp=\End(E^t)$ with $\{\bar a: a \in \End(E)\}$. The set $\Hom(E,X_0)=R^n$ is a free right $R$-module whose elements we view as column vectors. It carries a left $\End(X_0)$-module structure and it follows that $\End(X_0)=\Mat_n(R)=\End_{R}(R^n)$ and $\End^0(X_0)=\Mat_n(B)=\End_{B}(B^n)$, where $B^n$ naturally identifies with $\Hom(E,X_0)\otimes \Q$. The map $\End(X_0) \to \End(X_0^t)$, sending $a$ to its dual $a^t$, induces an isomorphism of rings $\End(X_0)^{\rm opp}\simeq \End(X_0)$. The Rosati involution on $\End^0(X_0)=\Mat_n(B)$ induced by $\lambda_0$ is given by $A \mapsto A^* = \bar{A}^T$. Let $\calH_n(B)$ be the set of positive-definite Hermitian\footnote{Strictly speaking, one should call such a matrix $H$ symmetric, Hermitian or quaternion Hermitian according to whether $B$ is $\Q$, an imaginary quadratic field, or $B_{p,\infty}$.} matrices $H$ in $\Mat_n(B)$, satisfying $H=H^*$ and $v^* H v>0$ for every non-zero vector $v\in B^n$. A \emph{fractional polarisation} on an abelian variety $X$ is an element $\lambda$ in $\Hom(X,X^t)\otimes_\Z \Q$ such that there exists a positive integer $N$ for which $N \lambda$ is a polarisation on $X$. Let $\calP(X_0)_{\Q}$ denote the set of fractional polarisations on $X_0$. \begin{lemma}\label{lm:PH} The map $\lambda \mapsto \lambda_0^{-1} \lambda$ gives a bijection $\calP(X_0)_\Q \isoto \calH_n(B)$, under which $\lambda_0$ corresponds to the identity~${\mathbb I}_n$. \end{lemma} \begin{proof} This is shown in \cite[7.12-7.14]{OortEO} for the case where $X_0$ is a superspecial abelian variety over an algebraically closed field of characteristic $p$ and the same argument holds for the present situation. \end{proof} For each $H\in \calH_n(B)$, we define a Hermitian form on $B^n$ by \begin{equation} \label{eq:hBn} h: B^n\times B^n \to B, \quad h(v_1, v_2):= v_1 ^* \cdot H \cdot v_2, \quad \text{$v_1, v_2\in B^n$}. \end{equation} If $H=\lambda_0^{-1} \lambda$ is the corresponding Hermitian form for $\lambda$, then the Rosati involution induced by~$\lambda$ is the adjoint of $h$: $A\mapsto H^{-1} \cdot A^* \cdot H$. The correspondence mentioned above induces an identification of automorphism groups \begin{equation}\label{eq:AutAut0} \mathrm{Aut}(X_0, \lambda) = \mathrm{Aut}(R^n, H):= \{ A \in \mathrm{GL}_n(B) : A(R^n) = R^n \text{ and } A^* \cdot H \cdot A = H \}. \end{equation} In particular, the identification \eqref{eq:AutAut0} induces an identification of automorphism groups \begin{equation}\label{eq:AutAut1} \mathrm{Aut}(X_0, \lambda_0) = \mathrm{Aut}(R^n, \mathbb{I}_n), \end{equation} and we know that \begin{equation}\label{eq:AutAut2} \begin{split} \mathrm{Aut}(R^n, \mathbb{I}_n) &= \{ A \in \mathrm{GL}_n(R) : A^* \cdot A = \mathbb{I}_n \} \\ &\simeq (R^\times)^n \cdot S_n, \end{split} \end{equation} where the last equality follows from the analogous result in \cite[Theorem 6.1]{karemaker-yobuko-yu}. \\ If $E$ is the unique supersingular elliptic curve over $\ol \F_2$ up to isomorphism, then $R^\times=E_{24}\simeq \SL_2(\F_3)$, where $E_{24}$ is the binary tetrahedral group of order 24 (see \cite[Theorem 3.7, p.~17]{vigneras}, cf. \cite[(57)]{karemaker-yobuko-yu}). Then the automorphism group $\Aut(X_0,\lambda_0)$ has $(24)^n n!$ elements by \eqref{eq:AutAut1} and \eqref{eq:AutAut2}. We expect that this is the maximal size of $\Aut(X,\lambda)$ for any $n$-dimensional principally polarised abelian variety $(X,\lambda)$ over any field $K$. We show a partial result towards confirming this expectation. \begin{proposition}\label{prop:maxsizeaut} For $n\leq 3$, the number $(24)^n n!$ is the maximal order of the automorphism group of an $n$-dimensional principally polarised abelian variety $(X,\lambda)$ over any field $K$. \end{proposition} \begin{proof} Since any principally polarised abelian variety$(X,\lambda)$ is of finite type over the prime field of $K$, it admits a model $(X_1,\lambda_1)$ over a finitely generated $\Z$-algebra $S$ such that $\Aut_{K}(X,\lambda)=\Aut_S(X_1,\lambda_1)$. Taking any $\Fpbar$-point $s$ of $S$ with residue field $k(s)$, one has $\Aut_{S}(X_1,\lambda_1)\subseteq \Aut_{\Fpbar}((X_1, \lambda_1) \otimes_S k(s))$. Thus, without loss of generality we may assume that the ground field~$K$ is the algebraically closed field $\Fpbar$ for some prime $p$. Further we can assume that $(X,\lambda)$ is defined over a finite field $\Fq$ with $\End(X)=\End(X\otimes \Fpbar)$. Note that $\Aut(X,\lambda)$ is a finite subgroup of $\Aut(X)$ and hence a finite subgroup of $\End^0(X)^\times$. We will bound the size of $\Aut(X,\lambda)$ by a maximal finite subgroup $G'$ of $\End^0(X)^\times$. When $n=1$, it is well known that $24$ is the maximal cardinality of $\Aut(E)$ of an elliptic curve $E$ over $\Fpbar$ for some prime $p$ and it is realised by the supersingular elliptic curve over $\ol \F_2$, cf.~\cite[V. Proposition 3.1, p. 145]{vigneras} . Suppose $n=2$. If $X$ simple, then $X$ is either ordinary or almost ordinary. By Tate's Theorem, the endomorphism algebra $\End^0(X)$ is a CM field and $G'$ consists of its roots of unity, so $|G'|\le 12$. If $X$ is isogenous to $E_1\times E_2$ where $E_1$ is not isogenous to $E_2$, then $\End^0(E_1\times E_2)=\End^0(E_1)\times \End^0(E_2)$ and any maximal finite subgroup $G'$ of $\End^0(E_1)^\times \times \End^0(E_2)^\times$ is of the form $\Aut(E_1')\times \Aut(E_2')$ for elliptic curves $E_i'$ isogenous to $E_i$. This reduces to the case $n=1$ and hence $|G'|\le 24^2$. Suppose now that $X\sim E^2$. If $L=\End^0(E)$ is imaginary quadratic, then $\End^0(X)\simeq \Mat_2(L)\simeq \End^0(\wt E^2)$ for a complex elliptic curve~$\wt E$ with CM by $L$. By \cite{birkenhake-lange}, $G'$ has order $\le 96$. Thus, we may assume that $E$ is supersingular so that $L$ is a quaternion algebra. If $X$ is superspecial, then the classification of $\Aut(X,\lambda)$ has been studied by Katsura and Oort; we have $|\Aut(X,\lambda)| \le 1152$ by \cite[Table 1, p. 137]{katsuraoort:compos87}. If~$X$ is non-superspecial, then $\Aut(X,\lambda) < \Aut(\wt X,\wt \lambda)$, where $(\wt X,\wt \lambda)$ is the superspecial abelian variety determined by the minimal isogeny of $(X,\lambda)$. The classification of $\Aut(\wt X,\wt \lambda)$ has been studied by the first author in~\cite{ibukiyama:autgp1989}. By \cite[Lemma 2.1, p.~132 and Remark 1, p.~343]{ibukiyama:autgp1989}, $\Aut(\wt X,\wt \lambda)$ has order $\le 720$ if $p>2$ and has order $1920$ if $p=2$. For the case $p=2$ and $a(X)=1$, the automorphism group $\Aut(X,\lambda)$ has order either $32$ or $160$ by Corollary~\ref{cor:p2g2aut}. This proves the case $n=2$. Now let $n=3$. Write $X\sim \prod_{i=1}^r X_i^{n_i}$ as the product of isotypic components up to isogeny, where the $X_i$'s are mutually non-isogenous simple factors. By induction and by the same argument as for $n=2$, we reduce to the case where $r=1$, that is, $X$ is elementary. Thus, we need to bound the size of maximal finite subgroups $G'$ in the simple $\Q$-algebra $\End^0(X)$. Finite subgroups in a division ring or in a certain simple $\Q$-algebra have been studied by Amitsur~\cite{amitsur:55} and Nebe~\cite{nebe:98}. A convenient list for our case is given by Hwang-Im-Kim~\cite[Section 5]{hwang-im-kim:g3}. From this list we see that $|G'|\le 24^3\cdot 6$ and that equality occurs exactly when $\End^0(X)\simeq\Mat_3(B_{2,\infty})$; see Theorem 5.13 of \emph{loc.~cit.} This proves the proposition. \end{proof} \begin{remark}\label{rem:Autchar0} Similarly, if $E$ is the unique elliptic curve with CM by $\Z[\zeta_3]$ over $\C$ up to isomorphism, then $R^\times=\Z[\zeta_3]^\times=\mu_6$ and the automorphism group $\Aut(X_0,\lambda_0)$ has $6^n n!$ elements by \eqref{eq:AutAut1} and \eqref{eq:AutAut2}. We expect that this is the maximal size of $\Aut(X,\lambda)$ for any $n$-dimensional principally polarised abelian variety $(X,\lambda)$ over any field $K$ of characteristic \emph{zero}. \end{remark} \subsection{Abelian varieties isogenous to a power of an elliptic curve}\label{ssec:isogpowers}\ Let $E/K$, $R = \End(E)$ and $B = \End^0(E)$ be as in the previous subsection. Let $\calA$ denote the category of abelian varieties over $K$ and $\calA^\pol$ denote that of abelian varieties $(X,\lambda)$ together with a fractional polarisation over $K$; we call $(X,\lambda)$ a $\Q$-polarised abelian variety. Let $\calA_E$ (resp.~$\calA^\pol_E$) be the full subcategory of $\calA$ (resp.~of $\calA^\pol$) consisting of abelian varieties that are isogenous to a power of $E$ over $K$. By an $R$-lattice we mean a finitely presented torsion-free $R$-module. Denote by $\LatR$ and $\RLat$ the categories of right $R$-lattices and left $R$-lattices, respectively. We may write $R^\opp=\{a^T: a\in R\}$ with multiplication $a^T b^T:=(ba)^T$. For a right $R$-module $M$, we write $M^\opp:=\{m^T: m\in M\}$ for the left $R^\opp$-module defined by $a^T m^T=(ma)^T$ for $a\in R$ and $m\in M$. The functor $I:M\mapsto M^\opp$ induces an equivalence of categories from $\LatR$ to $\RoLat$. A Hermitian form on $M$ here will mean a non-degenerate Hermitian form $h: M_\Q \times M_\Q \to B$ in the usual sense, where $M_ \Q:=M\otimes \Q$. A Hermitian $R$-lattice is an $R$-lattice together with a Hermitian form. If $h$ takes $R$-values on $M$, we say $h$ is integral. Let $\LatR^{\rm H}$ (resp. $\RoLat^{\rm H}$) denote the category of positive-definite Hermitian right $R$-lattices (resp.~left $R^\opp$-lattices). The functor \[ I:\LatR^{\rm H}\to \RoLat^{\rm H} \] induces an equivalence of categories. To each $\Q$-polarised abelian variety $(X,\lambda)$ in $\calA^\pol_E$, we associate a pair $(M,h)$, where \begin{equation}\label{eq:M} M:=\Hom(E,X) \end{equation} is a right $R$-lattice, and where \begin{equation}\label{eq:h} h=h_\lambda: M_\Q \otimes M_\Q \to B, \quad h_\lambda(f_1,f_2):=\lambda_E^{-1} f_1^t \lambda f_2 \end{equation} is a pairing on $M_\Q$. \begin{lemma}\label{lm:Mh} \begin{enumerate} \item The pair $(M,h)$ constructed above is a positive-definite Hermitian $R$-lattice. The Hermitian form $h$ is integral on $M$ if and only if $\lambda$ is a polarisation, and it is perfect if and only if $\lambda$ is a principal polarisation. \item Let $\lambda$ be a fractional polarisation on $X_0=E^n$. Then the associated Hermitian form $h_\lambda$ on $M:=\Hom(E,X_0)=R^n$ defined in \eqref{eq:h} is the Hermitian form defined in \eqref{eq:hBn}. \end{enumerate} \end{lemma} \begin{proof} \begin{enumerate} \item One checks that \begin{equation}\label{eq:h1} h(f_1 a,f_2)=\lambda_E^{-1} a^t f_1^t \lambda f_2 =(\lambda_E^{-1} a^t \lambda_E) \lambda_E^{-1} f_1^t \lambda f_2=\bar a h(f_1,f_2) \end{equation} and $h(f_1, f_2 a)=h(f_1,f_2)a$. Moreover, \begin{equation}\label{eq:h2} \ol{h(f_1,f_2)}=\lambda_E^{-1} ( \lambda_E^{-1} f_1^t \lambda f_2 )^t \lambda_E= \lambda_E^{-1} f_2^t \lambda f_1 \lambda_E^{-1} \lambda_E=h(f_2,f_1), \end{equation} so the $R$-lattice is indeed Hermitian. For $f\neq 0\in M$, we have $h(f,f)=\lambda_E^{-1} f^*\lambda$. Since $f^*\lambda$ is a fractional polarisation on $E$, the composition $\lambda_E^{-1} f^*\lambda$ is a positive element in $B$, which is a positive rational number in our case. This shows that $h$ is positive-definite. The last two statements are clear as the polarisation $\lambda_E$ is principal. \item For $f_1, f_2 \in \Hom(E,X)_\Q=B^n$, we have $h(f_1,f_2)=\lambda_E^{-1} f_1^t \lambda_0 \lambda_0^{-1} \lambda f_2$. If we write $f_1=(a_1, \dots, a_n)^T\in B^n$ and $\lambda_0^{-1} \lambda f_2 =(b_1, \dots, b_n)^T=:\ul b$, then $\lambda_E^{-1} f_1^t \lambda_0=(\bar a_1, \dots, \bar a_n)$ and $h(f_1,f_2)=\sum_{i=1}^n \bar a_i b_i= f_1^* \cdot\ul b= f_1^*\cdot H\cdot f_2$ for $H = \lambda_0^{-1} \lambda$. \end{enumerate} \end{proof} \def\calHom{\mathcal{Hom}} The sheaf Hom functor ${\mathcal Hom}_R(-, E):\RLat^\opp \to \calA_E$ produces a fully faithful functor whose essential image will be denoted by $\calA_{E,\mathrm{ess}}$. We refer to \cite{JKPRST} for the construction and properties of ${\mathcal Hom}_R(-, E)$. The functor $\Hom(-, E): \calA_E \to \RLat^\opp$ provides the inverse on $\calA_{E,\mathrm{ess}}$. The following result can be regarded as a polarised version of the construction in \cite{JKPRST}. \begin{proposition}\label{prop:equiv} The functor $(X,\lambda)\mapsto (M,h)$ introduced in Equations~\eqref{eq:M} and~\eqref{eq:h} induces an equivalence of categories \[ \calA_{E,\mathrm{ess}}^\pol \longrightarrow \LatR^H. \] Moreover, $\lambda$ is a polarisation if and only if $h$ is integral, and it is a principal polarisation if and only if $h$ is a perfect pairing on $M$. \end{proposition} \begin{proof} Let $T: \calA_E \to \calA_{E^t}$ be the functor sending $X$ to $X^t$; it induces an anti-equivalence of categories. The composition $\Hom(-, E^t)\circ T$ sends $X$ to $\Hom(X^t,E^t)$ and $I\circ \Hom(E,-)$ sends $X$ to $\Hom(E,X)^\opp$. The map that sends $f\in \Hom(E,X)$ to $f^t\in \Hom(X^t,E^t)$ gives a natural isomorphism $I\circ \Hom(E,-)\to \Hom(-, E^t)\circ T$. Restricted to $\calA_{E,\mathrm{ess}}$, the functor $\Hom(-, E^t)\circ T$ is an equivalence of categories. Therefore, $\Hom(E,-)$ induces an equivalence of categories from $\calA_{E,\mathrm{ess}}$ to $\LatR$. The dual $M^t:=\Hom_R(M,R)$ of a right $R$-lattice $M$, which a priori is a left $R$-lattice, may be regarded as a right $R$-lattice via $f\cdot a:=\bar a f$. This is simply the right $R^\opp$-module $(M^t)^\opp$ with the identification $R^\opp=\{\bar a: a\in R\}$. Suppose that $M = \Hom(E,X)$ is in the essential image of the equivalence, coming from some $(X,\lambda) \in \calA_{E,\mathrm{ess}}$. We claim that the map \begin{equation} \label{eq:Mt} \begin{split} \varphi: \Hom(E,X^t) &\to M^t \\ \alpha & \mapsto (\varphi_{\alpha}: m \mapsto \lambda_E^{-1} \alpha^t m) \end{split} \end{equation} is an isomorphism of right $R$-lattices. Indeed, it is injective by construction. For surjectivity, pick any $\psi \in M^t$. Since $\psi \in \Hom(\Hom(E,X),\Hom(E,E))$ and the functor $\Hom(E,-)$ is fully faithful, there exists a unique map $\tilde{\psi}\in \Hom(X,E)$ such that $\psi(f)=\tilde{\psi} \circ f$ for all maps $f\in \Hom(E, X)=M$. \begin{figure}[H]\label{fig:psitilde} \begin{center} \begin{tikzcd} E \arrow["\psi(f)"]{r} \arrow["f"]{d} & E \\ X \arrow["\tilde{\psi}"]{ur} & \end{tikzcd} \end{center} \end{figure} Then $\psi(m) = \tilde{\psi} m$ and we have $\tilde{\psi}^t \in \Hom(E^t, X^t)$. Considering $\alpha = \tilde{\psi}^t \lambda_E \in \Hom(E,X^t)$, it follows from the construction that \[ \varphi_{\alpha}(m) = \lambda_E^{-1} \alpha^t m = \lambda_E^{-1} \lambda_E \tilde{\psi} m = \psi(m) \] for all $m \in M$, hence $\psi = \varphi_{\alpha}$, which proves the claim. To prove the proposition, it remains to show that for any $X \in \calA_{E,\mathrm{ess}}$ we have a bijection between fractional polarisations on $X$ in $\Hom(X,X^t)\otimes \Q$ and positive-definite Hermitian forms on $M_{\mathbb{Q}}$ in $\Hom(M,M^t) \otimes \mathbb{Q}$. By the definition $\Hom(E,X) =M$, the isomorphism $\Hom(E,X^t)\simeq M^t$, and the fact that the functor $\Hom(E,-)$ is fully faithful, the natural map $\Hom(X, X^t) \to \Hom(M, M^t)$ is an isomorphism. Note that the induced isomorphism $\Hom(X, X^t) \otimes \Q \to \Hom(M, M^t)\otimes\Q$ is the same as the construction in Equation~\eqref{eq:h}. Hence, for every positive-definite Hermitian form $h$ on $M_{\mathbb{Q}}$, there exists a unique symmetric element $\lambda_1\in \Hom(X,X^t)_\Q$ such that $h_{\lambda_1} = h$ and it suffices to show that $\lambda_1$ is a fractional polarisation on $X$. Any quasi-isogeny $\beta: X \to E^n$ induces an isomorphism $\beta_* :M_\Q \to \Hom(E,E^n) \otimes \Q=B^n$ of $B$-modules. Let $\lambda := \beta_* \lambda_1$ be the pushforward map in $\Hom(E^n,(E^n)^t) \otimes \Q$, and let $h_\lambda: B^n \times B^n \to B$ be the Hermitian form defined by \eqref{eq:h}. Then $\beta_*: (M_\Q,h) \to (B^n, h_\lambda)$ is an isomorphism of $B$-modules with pairings. Since $h$ is a positive-definite Hermitian form by assumption, so is the pairing $h_\lambda$. Let $H\in \calH_n(B)$ be the positive-definite Hermitian matrix corresponding to $h_\lambda$ with respect to the standard basis. By Lemma~\ref{lm:Mh}.(2), $H$ is equal to $\lambda_{\rm can}^{-1} \lambda$, where $\lambda_{\rm can}$ is as defined in Subsection 4.1. Since $H\in \calH_n(B)$, by Lemma~\ref{lm:PH} the map $\lambda$ is a fractional polarisation and therefore $\lambda_1$ is a fractional polarisation, as required. \end{proof} By \cite[Theorem 1.1]{JKPRST} we obtain the following consequence. The main improvement to \cite{JKPRST} is dealing with polarisations. \begin{corollary}\label{cor:JKPRST} Let $E$ be an elliptic curve over a finite field $K=\Fq$ with Frobenius endomorphism $\pi$ and endomorphism ring $R = \mathrm{End}(E)$. The functor $\Hom(E,-): \calA_E^\pol \to \LatR^H$ induces an equivalence of categories if and only if one of the following holds: \begin{itemize} \item $E$ is ordinary and $\Z[\pi]=R$; \item $E$ is supersingular, $K=\Fp$ and $\Z[\pi]=R$; or \item $E$ is supersingular, $K=\F_{p^2}$ and $R$ has rank $4$ over $\mathbb{Z}$. \end{itemize} \end{corollary} \begin{remark} A few results similar to Proposition~\ref{prop:equiv} exist in the literature. The first case of Corollary~\ref{cor:JKPRST} is proven in \cite{KNRR}. More precisely, when $E$ is ordinary and $R = \mathbb{Z}[\pi]$, in \cite[Theorem 3.3]{KNRR} the constructions of \cite{JKPRST} are used to derive an equivalence of categories between $\mathcal{A}^{\mathrm{pol}}_E$ and $\mathrm{Lat}_R^H$.\\ When $R = \mathbb{Z}[\pi]$, Serre's tensor construction (cf.~\cite{Lauter}) gives an analogue of Corollary~\ref{cor:JKPRST} in some cases, when replacing ${\mathcal Hom}$ with $\otimes_R E$. The tensor construction is used in \cite[Theorem A]{Amir} for a ring $R$ with positive involution, a projective finitely presented right $R$-module $M$ with an $R$-linear map $h: M \to M^t$, and an abelian scheme $A$ over a base $S$ with $R$-action via $\iota: R \hookrightarrow \End_S(A)$ and an $R$-linear polarisation $\lambda: A \to A^t$, to prove that $h \otimes \lambda: M \otimes_R A \to M^t \otimes_R A^t$ is a polarisation if and only if $h$ is a positive-definite $R$-valued Hermitian form. Also, for a superspecial abelian variety $X$ over an algebraically closed field $k$ of characteristic $p$ it is shown in \cite[7.12-7.14]{OortEO} that the functors $X \mapsto M = \Hom(E,X)$ and $M \mapsto M \otimes_R E = X$ yield bijections between principal polarisations on $X$ and positive-definite perfect Hermitian forms on $M$. \end{remark} For any elliptic curve $E$ over a field $K$, we know that $B = \End^0(E)$ satisfies the conditions in Section~\ref{sec:Arith} and in particular those of Corollary~\ref{autodecomposition}. This means that when $E$ is defined over a finite field and is in one of the cases of Corollary~\ref{cor:JKPRST}, then we may apply the categorical constructions above to automorphism groups, in order to obtain the following result. \begin{corollary}\label{cor:Aut} \begin{enumerate} \item For any $(X,\lambda) \in \calA^{\mathrm{pol}}_{E,\mathrm{ess}}$, the lattice $(M,h)$ associated to $(X,\lambda)$ admits a unique orthogonal decomposition \[ M = \perp_{i=1}^r \left (\perp_{j=1}^{e_i} M_{ij} \right). \] for which $M_{ij}$ is isomorphic to $M_{i'j'}$ if and only if $i=i'$. Hence, we have that \begin{equation} \label{eq:AutXl} \Aut(X,\lambda) \simeq \Aut(M,h) \simeq \prod_{i=1}^r \Aut(M_{i1}, h|_{M_{i1}})^{e_i} \cdot S_{e_i}. \end{equation} \item Let $E$ be an elliptic curve over a finite field $K=\mathbb{F}_q$ such that Corollary~\ref{cor:JKPRST} applies. Then for any $(X,\lambda) \in \calA^{\mathrm{pol}}_{E}$, the automorphism group $\Aut(X,\lambda)$ can be computed as in Equation~\eqref{eq:AutXl}. \end{enumerate} \end{corollary} \begin{corollary}\label{cor:Autsp} Let $R$ be a maximal order in the definite quaternion $\Q$-algebra $B_{p,\infty}$. Let ${\rm Sp}^{\rm pol}$ be the category of fractionally polarised superspecial abelian varieties over an algebraically closed field $k$ of characteristic $p$. Then there is an equivalence of categories between ${\rm Sp}^{\rm pol}$ and $\LatR^{\rm H}$. Moreover, for any object $(X,\lambda)$ in ${\rm Sp}^{\rm pol}$, the automorphism group $\Aut(X,\lambda)$ can be computed as in Equation~\eqref{eq:AutXl}. \end{corollary} \begin{proof} Choose an elliptic curve $E$ over $\F_{p^2}$ with Frobenius endomorphism $\pi=-p$ and endomorphism ring $\End(E)\simeq R$. Then the category $\calA_{E}$ is the same as that of superspecial abelian varieties over $\F_{p^2}$ with Frobenius endomorphism $-p$, because every supersingular abelian variety $X/\F_{p^2}$ with Frobenius endomorphism $-p$ is superspecial. To see this, we use contravariant \dieu theory. Indeed, let $M$ be the (contravariant) \dieu module of $X$; then we have $\sfF^2 M=pM$, which implies that $\sfF M=\sfV M$ and that $a(M)=g$, and hence that $M$ is superspecial. The functor sending each object $X$ in $\calA_E$ to $X\otimes_{\F_{p^2}} k$ induces an equivalence of categories between $\calA_E$ and the category of superspecial abelian varieties over $k$ (cf.~\cite[Proposition 5.1]{yu:iumj18}). Thus, it induces an equivalence of categories between $\calA_E^{\rm pol}$ and $\Sp^{\rm pol}$. By Corollary~\ref{cor:JKPRST}, there is an equivalence of categories between $\Sp^{\rm pol}$ and $\LatR^{\mathrm{H}}$. The last statement of the corollary follows from Corollary~\ref{cor:Aut}. \end{proof} \subsection{Abelian varieties that are quotients of a power of an abelian variety over $\Fp$.}\label{ssec:powerAV} \ In this subsection only, we let $E$ denote an abelian variety over $K=\Fp$ such that its endomorphism algebra $B=\End^0(E)$ is commutative, and we put $R=\End(E)$ as before. This assumption on~$B$ means that $E$ does not have a repeated simple factor (i.e., it is squarefree) nor a factor that is a supersingular abelian surface with Frobenius endomorphism $\sqrt{p}$. Since every abelian variety over a finite field is of CM type, the algebra $B$ is a product of CM fields. Denote again by $a\mapsto \bar a$ the canonical involution of $B$. Let $R=\End(E)$ and fix a polarisation~$\lambda_E$ on~$E$. We will use the same notation and terminology as in previous subsections, except that we let $\calA_E$ (resp.~$\calA_E^\pol$) be the full subcategory of $\calA$ (resp.~$\calA^{\pol}$) consisting of abelian varieties which are quotients of a power of $E$ over $\Fp$. Recall that an $R$-module $M$ is called \emph{reflexive} if the canonical map $M\to (M^{t})^{t}$ is an isomorphism, where $M^t:=\Hom_R(M,R)$. If $\Z[\pi_E,\bar \pi_E]=R$, where $\pi_E$ denotes the Frobenius endomorphism of $E$, then $R$ is Gorenstein and every $R$-lattice is automatically reflexive \cite[Theorem~11 and Lemma~13]{CS15}. \begin{theorem}\label{thm:CS+JKPRST} {\rm (\!\cite[Theorem 8.1]{JKPRST}, \cite[Theorem 25]{CS15}) } Let $E$ be an abelian variety over $\Fp$ as above and assume that $\Z[\pi_E,\bar \pi_E]=R$. Then the functor ${\mathcal Hom}_R(-,E)$ induces an anti-equivalence of categories \begin{equation} \label{eq:avqEn} \RLat \longrightarrow \calA_{E} \end{equation} and $\Hom(-,E)$ is its inverse functor. Moreover, the functor ${\mathcal Hom}_R(-,E)$ is exact, and it is isomorphic to the Serre tensor functor $M \mapsto M^t\otimes B$. \end{theorem} Also see \cite[Theorem 3.1]{yu:jpaa2012} for a construction of a bijection from the set of isomorphism classes in $\RLat$ to that in $\calA_E$. The category $\calA_E$ contains more objects than those which are isogenous to a power of $E$ in the case where $E$ is not simple. Note that an abelian variety $X/\Fp$ lies in $\calA_E$ if and only if there is a $\Q$-algebra homomorphism $\Q[\pi_E]\to \Q[\pi_X]$ mapping $\pi_E$ to the Frobenius endomorphism $\pi_X$ of $X$. Let $\RLat^{\rm f}$ (resp.~$\LatR^{\rm f}$) denote the full subcategory consisting of left (resp.~right) $R$-lattices $M$ such that $M_\Q$ is a free $B$-module of finite rank. Similarly, let $\RLat^{\mathrm{f},H}\subseteq \RLat^{H}$ (resp.~$\LatR^{\mathrm{f},H}\subseteq \LatR^{H}$) be the full subcategory of positive-definite Hermitian left (resp.~right) $R$-lattices $(M,h)$ with free $B$-module $M_\Q$. The functor ${\mathcal Hom}_R(-,E)$ induces an anti-equivalence of categories from $\RLat^{\mathrm{f}}$ to the subcategory $\calA_E^{\mathrm{f}}$ consisting of abelian varieties isogenous to a power of $E$. Moreover, we prove the following result about polarised varieties. \begin{theorem}\label{thm:pol+JKPRST} Let $(E,\lambda_E)$ be a principally polarised abelian variety over $\Fp$ with the assumptions as in Theorem~\ref{thm:CS+JKPRST}. Then the following hold. \begin{enumerate} \item The functor $(X,\lambda)\mapsto (M,h)$ introduced in Equations~\eqref{eq:M} and~\eqref{eq:h} induces an equivalence of categories \[ \calA_{E}^\pol \longrightarrow \LatR^H. \] \item For any $\Q$-polarised abelian variety $(X,\lambda)$ over $\Fp$ in $\calA_E^\pol$, the automorphism group $\Aut(X,\lambda)$ can be computed as in Equation~\eqref{eq:AutXl}. \end{enumerate} \end{theorem} \begin{proof} \begin{itemize} \item[(1)] We first show that $(M,h)$ is a positive-definite Hermitian $R$-lattice. By Equations \eqref{eq:h1} and \eqref{eq:h2} in the proof of Lemma~\ref{lm:Mh}, $h$ is Hermitian and it remains to show that $h$ is positive-definite. Let $E_i$ ($1\le i\le r$) be the simple abelian subvarieties of $E$ and let $\varphi=\sum_{i} \iota_i: \prod_{i=1}^r E_i \to E$ be the canonical isogeny with inclusions $\iota_i:E_i \subseteq E$. Then we have an inclusion $M\subseteq \bigoplus_{i=1}^r M_i$, where $M_i=\Hom(E_i, X)$. Let $\lambda_{E_i}$ be the restriction of the polarisation $\lambda_E$ to~$E_i$. The isogeny~$\varphi$ induces an isomorphism from $B$ onto a product $ \prod_{i} B_i$ of CM fields $B_i=\End^0(E_i)$, and the decomposition $M_\Q=\bigoplus_{i=1}^r M_{i,\Q}$ respects the decomposition $B\simeq \prod_{i} B_i$. Moreover, we have $(M_\Q,h)= \perp_{i=1}^r (M_{i,\Q}, h_i)$, where $h_i$ is the restriction of $h$, which is also induced from the polarisation $\lambda_{E_i}$. Let $f=(f_i)\in M_\Q$ be a non-zero vector. Then $h(f,f)=(h_i(f_i,f_i))_i=(\lambda_{E_i}^{-1} f_i^*\lambda )_i$ and $\lambda_{E_i}^{-1} f_i^*\lambda$ is a totally positive element whenever $f_i\neq 0$. This shows that $h$ is positive-definite. Then the same argument as in Proposition~\ref{prop:equiv} proves the equivalence. Note that the principal polarisation $\lambda_E$ ensures there is a natural isomorphism $\Hom(E,X^t)\simeq M^t$. \item[(2)] This follows from Theorem~\ref{orthogonal} and Corollary~\ref{autodecomposition} in the extended setting where $B$ is a product of CM fields; see Remark~\ref{rem:product}. \end{itemize} \end{proof} \subsection{Minimal $\boldsymbol{E}$-isogenies}\label{ssec:Eisog}\ As in Subsections~\ref{ssec:powers} and~\ref{ssec:isogpowers}, we again let $E$ be an elliptic curve over a field $K$ with canonical polarisation $\lambda_E$, and $R:=\End(E)$ the endomorphism ring of $E$ over $K$, and $B=\End^0(E)$ its endomorphism algebra. In particular, we again let $\calA_E$ (resp.~$\calA^\pol_E$) be the full subcategory of $\calA$ (resp.~of $\calA^\pol$) consisting of abelian varieties that are isogenous to a power of $E$ over $K$. In this subsection, we define a notion of a minimal $E$-isogeny, generalising that of a minimal isogeny as introduced by Li-Oort (cf.~\cite[Section 1.8]{lioort}, also see Definition~\ref{def:minisog}), and satisfying a stronger universal property. \begin{lemma}\label{lm:minE} Let $X$ be an object in $\calA_E$. Then there exist an object $\wt X$ in $\calA_{E, {\rm ess}}$ and an isogeny $\gamma: X\to \wt X$ such that for any morphism $\phi: X \to Y$ with object $Y$ in $\calA_{E, {\rm ess}}$, there exists a unique morphism $\alpha: \wt X \to Y$ such that $\alpha\circ \gamma=\phi$. Dually, there exist an object $\wt X$ in $\calA_{E, {\rm ess}}$ and an isogeny $\varphi: \wt X\to X$ that satisfy the analogous universal property. \end{lemma} \begin{proof} We first construct a morphism $\gamma: X \to \wt X$, where $\wt X$ is an object in $\calA_{E,{\rm ess}}$. It will be more convenient to adopt the contravariant functors. Let $M:=\Hom(X,E)$ and let $\wt X:= {\mathcal Hom}_R(M,E)$. The abelian variety $\wt X$ represents the functor \[ S \mapsto \Hom_R(M,E(S)), \] for any $K$-scheme $S$. Define a morphism $\gamma:X\to \wt X$ by \begin{equation}\label{eq:minEisog} \gamma: X(S)\to \wt X(S)=\Hom_R(M,E(S))\quad \text{ mapping } \quad x \mapsto \left (\gamma_x: f \mapsto f(x)\in E(S) \right ), \end{equation} for all $f\in M=\Hom(X,E)$. Now let $Y$ be an object in $\calA_{E, {\rm ess}}$ and $\phi: X \to Y$ be a morphism. Using \eqref{eq:minEisog}, we also have a morphism $\gamma_Y: Y\to \wt Y$ which is an isomorphism as the functor $\Hom(-,E)$ induces an equivalence on $\calA_{E, {\rm ess}}$. The morphism $\phi: X \to Y$ induces a map $M_Y:=\Hom(Y,E) \to M=\Hom(X,E)$ by precomposition with $\phi$. This map also induces, after applying the functor ${\mathcal Hom}_R(-,E)$, a morphism $\beta: \wt X \to \wt Y$. We claim that the diagram \begin{equation}\label{eq:min_cd} \begin{CD} X @>{\gamma}>> \wt X \\ @VV{\phi}V @VV{\beta}V \\ Y @>{\gamma_Y}>{\sim}> \wt Y \end{CD} \end{equation} commutes; we will show this by proving it on $S$-points for any $K$-scheme $S$. Let $x\in X(S)$ and $g:Y\to E$. We have $\beta (\gamma_x)(g)=\gamma_x(g \circ \phi)=g(\phi(x))$. On the other hand $\gamma_Y(\phi(x))(g)=g(\phi(x))$. This shows the claim. Let $\alpha:=\gamma_Y^{-1} \circ \beta: \wt X \to Y$, so we have $\alpha\circ \gamma=\phi$ by commutativity. Finally, take $Y = E^n$ and any isogeny $\phi: X \to E^n$ and let $\alpha:\wt X \to E^n$ be the unique morphism satisfying $\alpha\circ \gamma=\phi$. Since $\dim X=\dim \wt X=\dim E^n = n$, it follows that $\gamma$ is an isogeny. The dual construction is entirely analogous. \end{proof} \begin{definition}\label{def:minE} Let $X$ be an object in $\calA_E$. We call the isogeny $\gamma: X \to \wt X$ (resp. $\varphi:\wt X \to X$) constructed in Lemma~\ref{lm:minE} the \emph{minimal $E$-isogeny of $X$} and the abelian variety $\wt X$ the \emph{$E$-hull} of $X$. \end{definition} \begin{remark} \label{rem:min_isog} If $E/K$ is a supersingular elliptic curve over an algebraically closed field $K=k$ of characteristic $p$, then $\calA_E$ is the category of supersingular abelian varieties over $k$ and $\calA_{E,{\rm ess}}$ is the category of superspecial abelian varieties over $k$. In this case, a minimal $E$-isogeny $\gamma:X\to \wt X$ or $\varphi:\wt X \to X$ of a supersingular abelian variety $X$ is precisely the minimal isogeny of $X$ in the sense of Oort, cf.~\cite[Definition~2.11]{karemaker-yobuko-yu}. By Lemma~\ref{lm:minE}, the minimal isogeny $(X, \gamma: X\to \wt X)$ satisfies the stronger universal property where the test object $\phi:X\to Y$ does not have to be an isogeny. \end{remark} \begin{lemma}\label{lm:product_compatibility} Let $X$ be an object in $\calA_{E,{\rm ess}}$. Suppose there are abelian varieties $X_1, \dots, X_r$ in~$\calA_E$ and there is an isomorphism $\phi:X_1\times \dots \times X_r \isoto X$. Then each abelian variety $X_i$ lies in $\calA_{E,{\rm ess}}$. \end{lemma} \begin{proof} According to the construction of minimal $E$-isogenies, let \[ M:=\Hom(X,E)\simeq \prod_{i=1}^r M_i \quad \text{and} \quad \wt X:={\mathcal Hom}_R(M,E)\simeq \prod_{i=1}^r \wt X_i, \] where \[ M_i:=\Hom(X_i,E)\quad \text{and} \quad \wt X_i:={\mathcal Hom}_R(M_i,E). \] By the definition of $\gamma$ in Equation~\eqref{eq:minEisog}, we have \[ \gamma=(\gamma_i)_i: X \simeq X_1\times \dots \times X_r \longrightarrow \wt X\simeq \wt X_1 \times \dots \times \wt X_r, \quad \text{where} \quad \gamma_i:X_i \to \wt X_i. \] By applying the universal property of the minimal $E$-isogeny with $Y = X$ and $\phi = \mathrm{id}$, there is a unique isogeny $\alpha: \wt X \to X$ such that $\alpha\circ \gamma=\mathrm{id}$. This shows that $\gamma$ is an isomorphism, which means that each $\gamma_i:X_i \to \wt X_i$ is an isomorphism. In particular, every abelian variety $X_i$ lies in $\calA_{E,{\rm ess}}$. \end{proof} \begin{lemma}\label{lm:prod_compt2} Let $X_1,\dots, X_r$ be objects in $\calA_{E}$. Then $(\gamma_i)_i: X=\prod_{i=1}^r X_i \to \prod_{i=1}^r \wt X_i$ is the minimal $E$-isogeny of $X$. \end{lemma} \begin{proof} For any $Y\in \calA_{E,\mathrm{ess}}$, as in Equation~\eqref{eq:min_cd}, we obtain the following commutative diagram: \begin{equation}\label{eq:min_cd2} \begin{CD} \prod_{i=1}^r X_i @>{(\gamma_i)_i}>> \prod_{i=1}^r \wt X_i \\ @VV{\sum_i \phi_i}V @VV{\beta=\sum_i\beta_i}V \\ Y @>{\gamma_Y}>{\sim}> \wt Y. \end{CD} \end{equation} Then the unique morphism $\alpha = \gamma_Y^{-1} \circ \beta$ satisfies the desired property $\alpha\circ {(\gamma_i)_i}=\sum_i \phi_i$. \end{proof} \begin{proposition} \label{prop:AutX2} Let $(X,\lambda)\in \calA_{E}^{\mathrm{pol}}$ and let $\varphi:(\wt X, \wt \lambda)\to (X,\lambda)$ be the minimal $E$-isogeny of~$(X,\lambda)$, where $\wt \lambda$ is chosen to be $\varphi^* \lambda$. Then \begin{equation}\label{eq:AutX2} \Aut(X,\lambda)=\{\alpha\in \Aut(\wt X, \wt \lambda): \alpha(H)=H\}, \end{equation} where $H:=\ker(\varphi)$ is the kernel of the morphism $\varphi$. \end{proposition} \begin{proof} By the universal property of minimal $E$-isogenies, every $\sigma_0\in\Aut(X,\lambda)$ uniquely lifts to an automorphism $\sigma\in \Aut(\wt X, \wt \lambda)$. To see this, note that we have $\sigma_0 \varphi = \varphi \sigma$, and $\wt \lambda=\varphi^* \lambda$, and $\sigma_0^*\lambda=\lambda$, so \[ \sigma^* \wt \lambda=\sigma^* \varphi^* \lambda = \varphi^* \sigma_0^* \lambda=\varphi^* \lambda =\wt \lambda. \] Since $X=\wt X/H$, an element $\sigma\in \Aut(\wt X, \wt \lambda)$ descends to an element $\sigma_0 \in \Aut(X,\lambda)$ if and only if $\sigma(H)=H$. \end{proof} \subsection{Unique decomposition property}\label{ssec:uniquedec} \begin{definition} Let $(X,\lambda)$ be a $\Q$-polarised abelian variety over $K$. We say $(X,\lambda)$ \emph{indecomposable} if whenever we have an isomorphism $(X_1,\lambda_1)\times (X_2,\lambda_2)=(X_1\times X_2, \lambda_1\times \lambda_2) \simeq (X,\lambda)$, either $\dim X_1=0$ or $\dim X_2=0$. \end{definition} By induction on the dimension of $X$, every object $(X,\lambda)$ in $\calA^\pol$ decomposes into a product of indecomposable objects. \begin{definition}\label{def:RS} An object $(X,\lambda)$ in $\calA^\pol$ is said to have \emph{the Remak-Schmidt property} if for any two decompositions into indecomposable objects $\phi=(\phi_i)_i: \prod_{i=1}^r (X_i,\lambda_i)\isoto (X,\lambda)$ and $\phi'=(\phi'_j)_j: \prod_{j=1}^s(X_j',\lambda_j')\isoto(X,\lambda)$, we have $r=s$ and there exist a permutation $\sigma\in S_r$ and an isomorphism $\alpha_i: (X_i,\lambda_i)\isoto (X_{\sigma(i)}', \lambda_{\sigma(i)}')$ such that $\phi_i=\phi_{\sigma(i)}' \circ \alpha_i$ for every $1 \le i \le r$. \end{definition} \begin{theorem} Any $\Q$-polarised abelian variety $(X,\lambda)$ in $\calA^\pol$ admits the Remak-Schmidt property. \end{theorem} \begin{proof} This is nothing but a categorical formulation of the unique decomposition of $\Q$-polarised abelian varieties into indecomposable $\Q$-polarised abelian subvarieties. When the ground field~$K = \bar K$ is algebraically closed, this is proved by Debarre and by Serre \cite{Debarre}, using a result of Eichler \cite{Eichler}. Let $\bar K$ and $K_s$ be an algebraic and a separable closure of $K$, respectively. Let $(X,\lambda)_{\bar K}=\prod_{i\in I} (X_i,\lambda_i)$ be a decomposition into indecomposable $\Q$-polarised abelian subvarieties over $\bar K$, where $\lambda_i:=\lambda|_{X_i}$. Let $\varepsilon_i: X \to X_i \to X \in \End_{\bar K}(X)$ be the idempotent corresponding to the component $X_i$. Since $\varepsilon_i\in \End_{K_s}(X)$ by Chow's Theorem, each subvariety $X_i:=\ker (1-\varepsilon_i)$ is defined over $K_s$ and the theorem is proved for $K_s$. We now show it for an arbitrary ground field. So let $(X,\lambda)_{K_s}=\prod_{j\in J} (Y_j,\lambda_j)$ be a decomposition into indecomposable $\Q$-polarised abelian subvarieties over $K_s$. For each $\sigma\in \Gamma_K :=\Gal(K_s/K)$, we have \[ \prod_{j\in J}(Y_j, \lambda_j)=(X,\lambda)_{K_s}=\sigma(X,\lambda)_{K_s}=\prod_{j\in J} \sigma(Y_j,\lambda_j).\] By the uniqueness of the decomposition over $K_s$, we obtain that $\sigma(Y_j)=Y_{\sigma(j)}$ for a unique $\sigma(j)\in J$; this gives an action of $\Gamma_K$ on $J$. Let $I$ denote the set of $\Gamma_K$-orbits of $J$ and put $(X_i,\lambda_i):=\prod_{j\in i}(Y_j,\lambda_j)$ for each $i\in I$. Since now $(X_i,\lambda_i)$ is the smallest $\Q$-polarised abelian subvariety defined over $K$ containing $(Y_j,\lambda_j)$ for any $j \in i$, it is in particular indecomposable. Since the abelian subvarieties $Y_j$ are uniquely determined by $X$ up to permutation, so are the abelian subvarieties $X_i$. \end{proof} We remark that if polarisations are not taken into consideration, then the decomposition of an (unpolarised) abelian variety into indecomposable subvarieties is far from unique; see Shioda~\cite{shioda}. \section{The geometric theory: the Gauss problem for central leaves}\label{sec:proof} \subsection{First results and reductions}\label{ssec:4first}\ Let $x=[(X_0,\lambda_0)]\in \calA_g(k)$ be a point and let $\calC(x)$ be the central leaf passing through $x$. \begin{proposition}[Chai]\label{prop:chai} The central leaf $\calC(x)$ is finite if and only if $X_0$ is supersingular. In particular, a necessary condition for $|\calC(x)|=1$ is that $x\in \calS_{g}(k)$. \end{proposition} \begin{proof} It is proved in \cite[Proposition 1]{chai} that the prime-to-$p$ Hecke orbit $\calH^{(p)}(X_0,\lambda_0)$ (i.e., the points obtained from $(X_0,\lambda_0)$ by polarised prime-to-$p$ isogenies) is finite if and only if $X_0$ is supersingular. Since $\calH^{(p)}(X_0,\lambda_0)\subseteq \calC(x)$, the central leaf $\calC(x)$ is finite only if $X_0$ is supersingular. When $X_0$ is supersingular, we have $\calC(x)=\Lambda_x$ by definition, and hence $\calC(x)$ is finite, cf.~\eqref{eq:smf:1} . \end{proof} From now on we assume that $x\in \calS_g(k)$. In this case \[ \calC(x)=\Lambda_x\simeq G^1(\Q)\backslash G^1(\A_f)/U_x, \] where $U_x=G_x(\wh \Z)$ is an open compact subgroup. Similarly, for $0\leq c\leq [g/2]$, we have \[ \Lambda_{g,p^c}\simeq G^1(\Q)\backslash G^1(\A_f)/U_{g,p^c}, \] where $U_{g,p^c}=G_{x_c}(\wh \Z)$ for a base point $x_c\in \Lambda_{g,p^c}$. \begin{lemma}\label{lem:Lgpc} For every point $x\in \calS_g(k)$, there exists a (non-canonical) surjective morphism $\pi:\Lambda_x \twoheadrightarrow \Lambda_{g,p^c}$ for some integer $c$ with $0\le c\le \lfloor g/2 \rfloor$. Moreover, one can select a base point~$x_c'$ in $\Lambda_{g,p^c}$ so that $G_x(\Zp)$ is contained in $G_{x_c'}(\Zp)$ and $\pi$ is induced from the identity map on $G^1(\A_f)$ \begin{equation} \label{eq:Gxc} G^1(\Q)\backslash G^1(\A_f)/U_x \longrightarrow G^1(\Q)\backslash G^1(\A_f)/U_{x'_c}. \end{equation} \end{lemma} \begin{proof} We have \[ G_{x_c}(\Zp)\simeq \Aut_{G^1(\Qp)} \left ( (\Pi_p O_p)^{n-c}\oplus O_p^c, \bbJ_g\right )=:P_c. \] By \cite[Theorem~3.13, p.~150]{platonov-rapinchuk}, the subgroups $P_c$ for $c=1,\dots, [g/2]$ form a complete set of representatives of maximal parahoric subgroups of $G^1(\Q_p)$ up to conjugacy. So $G_x(\Zp)$ is contained in $g_p^{-1} G_{x_c}(\Z_p) g_p$ for some integer $c$ with $0\le c\leq \lfloor g/2 \rfloor$ and some element $g_p\in G^1(\Qp)$. Thus, we have a surjective map \begin{equation} \label{eq:Gx} G^1(\Q)\backslash G^1(\A_f)/U_x \twoheadrightarrow G^1(\Q)\backslash G^1(\A_f)/g_p^{-1}U_{g,p^c}g_p \xrightarrow{\cdot g_p} G^1(\Q)\backslash G^1(\A_f)/U_{g,p^c}\simeq \Lambda_{g,p^c}. \end{equation} This gives a surjective map $\Lambda_x\twoheadrightarrow \Lambda_{g,p^c}$. The base point $x_c'$ is chosen so that $U_{x_c'}=g_p^{-1}U_{g,p^c}g_p$. \end{proof} Let $\varphi: \wt x=(\wt X_0,\wt \lambda_0)\to x=(X_0,\lambda_0)$ be the minimal isogeny for $x$, as constructed in Lemma~\ref{lem:minisog} and Definition~\ref{def:minisog}. Then $U_x\subseteq U_{\wt x}$ and we have a surjective map $\Lambda_x\twoheadrightarrow \Lambda_{\wt x}$ which is induced from the natural map \begin{equation} \label{eq:minisog} G^1(\Q)\backslash G^1(\A_f)/U_x \longrightarrow G^1(\Q)\backslash G^1(\A_f)/U_{\wt x}. \end{equation} If the open compact subgroup $U_{\wt x}$ is maximal, then $U_{\wt x}$ is conjugate to $U_{g,p^c}$ for some $0\le c\le \lfloor g/2\rfloor$ and the map $\pi: \Lambda_x \twoheadrightarrow \Lambda_{g,p^c}$ in Lemma~\ref{lem:Lgpc} is realised by the minimal isogeny $\varphi$. \begin{lemma}\label{lem:g1} Let $x$ be a point in $\calS_{g}(k)$. If $g=1$, then $|\Lambda_x|=1$ if and only if $p\in \{2,3,5,7,13\}$. \end{lemma} \begin{proof} In this case, the orbit $\Lambda_x$ is the supersingular locus $\Lambda_{1,1}$. The assertion is well-known and also follows from Theorem~\ref{thm:mainarith}.(1). \end{proof} \begin{lemma}\label{lem:g2} Let $x$ be a point in $\calS_{g}(k)$. If $g=2$, then $|\Lambda_x|=1$ if and only if $p\in \{2,3\}$. \end{lemma} \begin{proof} For the superspecial case, by the first part of Theorem~\ref{thm:mainarith}.(2) we have $H_2(p,1)=1$ if and only if $p=2,3$. For the non-superspecial case, it follows from Theorem~\ref{thm:nsspg2}.(3) that $|\Lambda_x|=1$ for every non-superspecial point $x\in \calS_{2}(k)$ if and only if $p=2, 3$. \end{proof} \begin{lemma}\label{lem:g5+} Let $x$ be a point in $\calS_{g}(k)$. If $g\ge 5$, then $|\Lambda_x|>1$. \end{lemma} \begin{proof} We first show that $|\Lambda_{g,p^c}|>1$ for all primes $p$ and all integers $c$ with $0\le c \le \lfloor g/2 \rfloor$. From Theorem~\ref{thm:sspmass} we have $\Mass(\Lambda_{g,p^c})=v_g \cdot L_{g,p^c}$. Using Lemma~\ref{lem:vn} and the proof of Corollary~\ref{cor:ge6}, we show that $|\Lambda_{g,p^c}|>1$ for all $g\ge 6$, all primes $p$ and all $c$. By Theorem~\ref{thm:mainarith}, we have $|\Lambda_{5,p^0}|=H_{5}(p,1)>1$ and $|\Lambda_{5,p^2}|=H_{5}(1,p)>1$ for all primes $p$. Using Theorem~\ref{thm:sspmass} and \eqref{eq:Lambda5p}, we have $\Mass(\Lambda_{5,p})=v_5\cdot L_{5,p^1}=\Mass(\Lambda_{5,p^0})(p^5+1)$ and $(p^3-1)$ divides $L_{5,p}$. From this the same proof of Theorem~\ref{thm:mainarith} shows that $|\Lambda_{5,p}|>1$ for all primes $p$. By Lemma~\ref{lem:Lgpc}, for every point $x\in \calS_{g}(k)$ we have $|\Lambda_x|\ge |\Lambda_{g,p^c}|$ for some $0\le c \le \lfloor g/2 \rfloor$. Therefore, $|\Lambda_x|>1$. \end{proof} For any matrix $A=(a_{ij})\in \Mat_g(\F_{p^2})$, write $A^*=\ol A^ T=(a_{ji}^p)$, where $\ol A=(a_{ij}^p)$ and $T$ denotes the transpose. Let \[ U_g(\Fp):=\{A\in \Mat_g(\F_{p^2}) : A\cdot A^*= {\bbI}_g \} \] denote the unitary group of rank $g$ associated to the quadratic extension $\F_{p^2}/\Fp$. Let ${\rm Sym}_g(\F_{p^2})$ $\subseteq \Mat_g(\F_{p^2})$ be the subspace consisting of all symmetric matrices and let $\Sym_g(\F_{p^2})^0\subseteq \Sym_g(\F_{p^2})$ be the subspace consisting of matrices $B=(b_{ij})$ with $b_{ii}=0$ for all $i$. \begin{definition}\label{def:EGH} Let $\calE\subseteq \Mat_g(\F_{p^2})$ be a maximal subfield of degree $g$ over $\F_{p^2}$ stable under the involution $*$. Let \begin{align} \label{eq:group_dc1} G&:=\left \{ \begin{pmatrix} \bbI_g & 0 \\ B & \bbI_g \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & \ol A \end{pmatrix}\in \GL_{2g}(\F_{p^2}): A\in U_g(\Fp), B\in {\rm Sym}_g(\F_{p^2})\, \right \}; \\ \calE^1&:=\{ A\in \calE^\times: A^* A=\bbI_g\}= \calE^\times \cap U_g(\Fp); \\ H&:=\left \{ \begin{pmatrix} \bbI_g & 0 \\ B & \bbI_g \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & \ol A \end{pmatrix}: A\in \calE^1, \quad B\in {\rm Sym}_g(\F_{p^2})^0 \right \}; \label{eq:group_dcH} \\ \Gamma &:= \left \{ \begin{pmatrix} \bbI_g & 0 \\ B & \bbI_g \end{pmatrix} \begin{pmatrix} A & 0 \\ 0 & \ol A \end{pmatrix}: A\in \diag(\F_{p^2}^1, \dots, \F_{p^2}^1) \cdot S_g,\ B\in \diag(\F_{p^2}, \dots, \F_{p^2}) \, \right \}, \end{align} where $\F_{p^2}^1\subseteq \F_{p^2}^\times$ denotes the subgroup of norm one elements and $S_g$ denotes the symmetric group of $\{1,\dots, g\}$. \end{definition} \begin{lemma}\label{lm:group_dcoset} Using the notation introduced in Definition~\ref{def:EGH}, the following statements hold. \begin{enumerate} \item Up to isomorphism, the double coset space $(\diag(\F_{p^2}^1, \dots, \F_{p^2}^1) \cdot S_g) \backslash U_g(\Fp)/\calE^1$ is independent of the choice of $\calE$. \item For $p=2$, up to isomorphism, the double coset space $\Gamma \backslash G/H$ is independent of the choice of $\calE$. \end{enumerate} \end{lemma} \begin{proof} \begin{enumerate} \item We know that $\calE^1$ is a cyclic group of order $p^g+1$ and choose a generator $\eta$ of~$\calE^1$. One has $\eta ^* \eta =\bbI_g$ and $\calE=\F_{p^2}[\eta]$. Suppose that $\calE_1$ is another maximal subfield stable under~$*$. We will first show that $\calE_1$ is conjugate to $\calE$ under $U_g(\Fp)$. By the Noether-Skolem theorem, there is an element $\gamma\in \GL_g(\F_{p^2})$ such that $\calE_1=\gamma \calE \gamma^{-1}$. Clearly, $\calE_1=\F_{p^2}[\eta_1]$ is generated by $\eta_1:=\gamma \eta \gamma^{-1}$ and $\eta_1$ has order $p^g+1$. We also have $\eta_1^* \eta_1=\bbI_g$; this follows from the fact that the norm-one subgroup $\calE^1_1\subseteq \calE_1^\times$ is the unique subgroup of order $p^g+1$ and that $\eta_1$ has order $p^g+1$. It follows from $\eta_1^* \eta_1 =\bbI_g$ that $(\gamma^{-1})^* \eta ^* \gamma^* \gamma \eta \gamma^{-1}=\bbI_g$. Putting $\alpha=\gamma^* \gamma$, we find that \[ \eta^* \alpha \eta = \alpha\quad \text{and} \quad \alpha \eta \alpha^{-1}=\eta. \] That is, $\alpha$ commutes with $\calE$, and $\alpha\in \calE^\times$ because $\calE$ is a maximal subfield. As $\alpha=\gamma^* \gamma$, $\alpha$ lies in the subfield $F\subseteq \calE$ fixed by the automorphism $*$ of order $2$. Since the norm map $N:\calE^\times \to F^\times, x \mapsto x^* x$ is surjective, we have $\alpha=\beta^* \beta$ for some $\beta\in \calE^\times$. Let $\gamma_1:=\gamma \beta^{-1}$. Then \[ \gamma_1^* \gamma_1 =(\gamma \beta^{-1})^* (\gamma \beta^{-1})=(\beta^{-1})^* \gamma^* \gamma \beta^{-1}=(\beta^{-1})^* \alpha \beta^{-1}=(\beta^{-1})^* \beta^* \beta \beta^{-1}=\bbI_g. \] Therefore, $\calE_1=\gamma_1 \calE \gamma_1^{-1}$ and $\gamma_1\in U_g(\Fp)$. Right translation by $\gamma_1^{-1}$ gives an isomorphism $(\diag(\F_{p^2}^1, \dots, \F_{p^2}^1) \cdot S_g) \backslash U_g(\Fp)/\calE^1\simeq (\diag(\F_{p^2}^1, \dots, \F_{p^2}^1) \cdot S_g) \backslash U_g(\Fp)/\calE^1_1.$ This proves (1). \item We may regard $U_g(\Fp)$ as a subgroup of $G$ via the map $A\mapsto \begin{pmatrix} A & 0 \\ 0 & \ol A \end{pmatrix}$, and $\Sym_g(\F_{p^2})$ as a normal subgroup of $G$ via the map $B\mapsto \begin{pmatrix} \bbI_g & 0 \\ B & \ol \bbI_g \end{pmatrix}$, so that $G$ is the semi-direct product $\Sym_g(\F_{p^2}) \rtimes U_g(\Fp)$; conjugation by $G$ on $\Sym_g(\F_{p^2})$ gives an action of $U_g(\F_p)$ on $\Sym_g(\F_{p^2})$ via $A\cdot B= \ol A B \ol A^T$, where $A\in U_g(\F_{p})$ and $B\in \Sym_g(\F_{p^2})$. Suppose that $\calE_1$ is another maximal subfield of $\Mat_g(\F_{p^2})$ stable under $*$ and that $H_1\subseteq G$ is the extension of $\Sym_g(\F_{p^2})^0$ by $\calE_1^1$ as in \eqref{eq:group_dcH}. As in part (1) of the lemma, it suffices to show that $H_1$ is conjugate to $H$ under~$G$. And to do so, it suffices to show they are conjugate under $U_g(\Fp)$, since this is a subgroup of $G$. In part~(1) we have shown that $\calE_1=\gamma_1 \calE \gamma_1^{-1}$ and $\gamma_1\in U_g(\Fp)$, so it follows that $\calE_1^1$ and $\calE^1$ are conjugate under $U_g(\Fp)$. Therefore, we are reduced to showing that ${\rm Sym}_g(\F_{p^2})^0$ is stable under the action of $U_g(\F_{p})$. Since $p=2$, one checks directly that the diagonal entries of the matrix $A (I_{ij}+I_{ji}) \ol A^T$ are all zero, where $I_{ij}$ is the matrix whose entries are $1$ at $(i,j)$ and zero elsewhere. Since the elements $I_{ij}+I_{ji}$ for $i\neq j$ generate ${\rm Sym}_g(\F_{p^2})^0$, we find that it is indeed stable under the action of $U_g(\F_{p})$. This proves (2). \end{enumerate} \end{proof} Let $\F_q$ be a finite field of characteristic $p$. Let $(V_0,\psi_0)$ be a non-degenerate symplectic space over $\F_q$ of dimension $2c$ and denote by $A\mapsto A^\dagger$ the symplectic involution on $\End(V_0)$ with respect to $\psi_0$. For any $k$-subspace $W$ of $V_0\otimes_{\Fq} k$, the endomorphism algebra of~$W$ over $\Fq$ is defined as \begin{equation}\label{eq:endW} \End(V_0,W):=\{ A\in \End(V_0): A(W)\subseteq W \}, \end{equation} and the automorphism group of $W$ in the symplectic group $\Sp(V_0)$ is defined as \begin{equation} \label{eq:SpW} \Sp(V_0,W):=\Sp(V_0)\cap \End(V_0,W). \end{equation} We denote by $\psi:V_0\otimes_{\Fq} k\times V_0\otimes_{\Fq} k \to k$ the extension of $\psi_0$ by $k$-linearity. Let $C_n$ denote a cyclic group of order $n$. \begin{proposition}\label{prop:Cq^2+1} If $W$ is a non-zero isotropic $k$-subspace of $V_0\otimes_{\Fq} k$ such that $\Sp(V_0,W)\supseteq C_{q^{c}+1}$, then $\End(V_0,W)\simeq \Mat_{{2c}/d}(\F_{q^d})$ for some positive integer $d|{2c}$ such that $\ord_2(d)=\ord_2({2c})$. Moreover, if ${2c}$ is a power of $2$, then $\End(V_0,W)\simeq \F_{q^{2c}}$ and $\Sp(V_0,W)=C_{q^{c}+1}$. \end{proposition} \begin{proof} Let $\eta$ be a generator of $C_{q^{c}+1}$ and let $\calE=\Fq[\eta]$ be the $\Fq$-subalgebra of $\End(V_0)$ generated by $\eta$. Since $\vert C_{q^{c}+1}\vert$ is prime to $q$, the group algebra $\Fq[C_{q^{c}+1}]$ is semi-simple and it maps onto $\calE$. On the other hand, the finite field $\F_{q^{2c}}$ is the smallest field extension of~$\Fq$ which contains an element of order $q^{c}+1$, so $\calE$ contains a copy $F$ of $\F_{q^{2c}}$ in $\End(V_0)$. Since $\dim V_0 = {2c} = [\F_{q^{2c}}:\F_q]$, we see that $F$ is a maximal subfield of $\End(V_0)$ and hence $\calE=F$. Since $C_{q^{c}+1}\subseteq \Sp(V_0)$ and $\ord(\eta)=q^{c}+1$, we have that $\eta^\dagger=\eta^{-1}\in C_{q^{c}+1}$ and $\eta^{\dagger}\neq \eta$. So $\calE$ is stable under $\dagger$, and $\dagger$ is an automorphism of $\calE$ of order $2$. Moreover, $C_{q^{c}+1}$ is equal to the subgroup $\calE^1=\{a\in \calE^\times: N_{\calE/\calE_0}(a)=1\}$ of norm one elements in $\calE^\times$, where $\calE_0$ is the subfield of $\calE$ fixed by $\dagger$. Let $\Sigma_\calE:=\Hom_{\Fq}(\calE, \ol \F_{p})$ denote the set of embeddings of $\calE$ into $\Fpbar$; it is equipped with a left action by $\Gal(\F_{q^{2c}}/\Fq)=\Gal(\calE/\Fq)=\< \sigma \>\simeq \Z/{2c}\Z$, which acts simply transitively. Arrange $\Sigma_{\calE}=\{\sigma_i: i\in \Z/{2c}\Z\}$ in such a way that $\sigma\cdot \sigma_{i}=\sigma_{i+1}$ for all $i\in \Z/{2c}\Z$ and denote by $V^i$ the $\sigma_i$-isotypic eigenspace of $V_0\otimes k$. Then $V_0\otimes_{\Fq} k=\oplus_{i\in \Z/{2c}\Z} V^i$ is a decomposition into simple $(\calE\otimes_{\Fq} k)$-submodules. Since $W\subseteq V_0\otimes_{\Fq} k$ is an $(\calE\otimes_{\Fq}k)$-submodule, there is a unique and non-empty subset $J\subseteq \Z/{2c}\Z$ such that $W=\oplus_{i\in J} V^i$. Note that the involution $\dagger$ acts on $\Sigma_{\calE}$ from the right and one has $\sigma_i^\dagger=\sigma_{i+c}$. We claim that $J\cap J^\dagger=\emptyset$. For $i,j\in \Z/{2c}\Z$, one computes that \[ \sigma_i(a)\psi(v_1,v_2)=\psi(a\cdot v_1, v_2)=\psi(v_1, a^\dagger \cdot v_2)=\sigma_{j+c}(a) \psi(v_1,v_2) \] for any $a\in \calE$, $v_1\in V^i$ and $v_2\in V^j$. It follows that $\psi(V^i, V^j)=0$ if $i-j\neq c$ in $\Z/{2c}\Z$. Since $\psi$ is non-degenerate, the latter is also a necessary condition. Since $W$ is isotropic, $J$ does not contain $\{i,i+c\}$ for any $i$ and therefore $J\cap J^\dagger=\emptyset$, as claimed. We represent the matrix algebra $\End(V_0)$ over $\Fq$ as a cyclic algebra, cf.~\cite[Theorem 30.4]{reiner:mo}: \[ \End(V_0)=\calE[z], \qquad z^{2c}=1, za z^{-1}=\sigma(a) \ \text{ for all } a\in \calE. \] Multiplication by $z$ maps $V^i$ onto $V^{i-1}$: \[ a\cdot zv= z(\sigma^{-1}(a)\cdot v)=z \sigma_{i-1}(a) v= \sigma_{i-1}(a) z v, \quad \forall \, a\in \calE, v\in V^i.\] Consider an element $x=\sum_{i\in \Z/{2c}\Z} a_i z^i\in \End(V_0,W)$; if $a_i\neq 0$, then $J$ is stable under the shift by $-i$. Let $d\ge 1$ be the smallest integer with $d|{2c}$ such that $J$ is stable under the shift by~$-d$. Then $\End(V_0,W)=\calE[z^d]\simeq \Mat_{{2c}/d}(\F_{q^d})$. Since $J\cap J^\dagger=\emptyset$, we have $d\nmid~c$. Therefore, $d$ is a positive divisor of ${2c}$ such that $\ord_2(d)=\ord_2({2c})$. This proves the first statement. When ${2c}$ is a power of $2$, the condition on $d$ implies $d={2c}$ and therefore $\End(V_0,W)=\calE$. This implies that $\Sp(V_0,W)=\calE^1=C_{q^{c}+1}$ and hence proves the second statement. \end{proof} \subsection{The case $\boldsymbol{g=3}$}\label{ssec:g3}\ \begin{lemma}\label{lem:p2coset} We use the notation for $G, \Gamma, H$ defined in Definition~\ref{def:EGH}. For $g=3$ and $p=2$, we have $|\Gamma \backslash G/H|=2$. \end{lemma} \begin{proof} Put $U:={\rm Sym}_g(\F_{p^2})$, embedded into $\mathrm{GL}_{2g}(\F_{p^2})$ via $B \mapsto \left( \begin{smallmatrix} \bbI_g & 0 \\ B & \bbI_g \end{smallmatrix}\right)$. Then $U_\Gamma:=U\cap \Gamma \simeq \diag(\F_{p^2}, \dots, \F_{p^2})$ and $U_H:=U\cap H \simeq {\rm \Sym}_g(\F_{p^2})^0$. Consider the surjective map induced by the natural projection \[ \pr: \Gamma\backslash G/H \to (\diag(\F_{p^2}^1, \dots, \F_{p^2}^1) \cdot S_g) \backslash U_g(\Fp)/\calE^1. \] One shows directly that the fibre of the double coset $(\diag(\F_{p^2}^1, \dots, \F_{p^2}^1) \cdot S_g)\cdot A\cdot \calE^1$ for an element $A\in U_g(\Fp)$ is isomorphic to $U_\Gamma+ \ol A U_H \ol A^{T}$. Since $\ol A U_H \ol A^{T}=U_H$ for $p=2$ by Lemma~\ref{lm:group_dcoset}.(2), we have $U_\Gamma+ \ol A U_H \ol A^{T}=U_\Gamma+U_H=U$ and hence $\pr$ is an isomorphism. Now let $g=3$ and $p=2$; we need to show that the target of $\pr$ has two double cosets. Put $\F_4=\F_2[\zeta]$ with $\zeta^2+\zeta+1=0$ and \begin{equation}\label{eq:etaA} \eta:= \begin{pmatrix} 0 & 0 & \zeta \\ 1 & 0 & 0 \\ 0 & 1 & 0 \\ \end{pmatrix}, \quad A: = \begin{pmatrix} 1 & \zeta & \zeta \\ \zeta & 1 & \zeta \\ \zeta & \zeta & 1 \\ \end{pmatrix}. \end{equation} We choose $\calE^1=\< \eta\>$ and verify directly that \[ U_3(\F_2)= \Big( \diag(\F_4^\times, \F_4^\times, \F_4^\times)S_3\cdot \bbI_3 \cdot \calE^1 \Big) \, \coprod\, \Big( \diag(\F_4^\times, \F_4^\times, \F_4^\times)S_3\cdot A \cdot \calE^1 \Big). \] This shows that $|\Gamma\backslash G/H| = 2$; recall from Lemma~\ref{lm:group_dcoset}.(2) that the double coset space is independent of the choices made. \end{proof} Recall that $\mathcal{P}_{\mu}$ (resp.~$\calP'_\mu$) denotes the moduli space over $\F_{p^2}$ of three-dimensional (resp.~rigid) polarised flag type quotients with respect to a principal polarisation $\mu$ on $E^3$. The moduli space $\mathcal{P}_{\mu}$ is a $\bbP^1$-bundle $\pi: \mathcal{P}_{\mu}\to C$ over the Fermat curve $C\subseteq \bbP^2$. We express each point $y\in \calP_\mu(k)$ as $(t,u)$, where $t=\pi(y)$ and $u\in \pi^{-1}(t)=: \bbP^1_t(k)$; see Subsection 3.3.2 for more details. \begin{proposition} \label{lm:a1DCFp6} Let $p=2$, let $x=(X,\lambda)\in \calS_{3}(k)$ with $a(x)=1$ and let $y=(t,u)\in \calP'_\mu(k)$ be a point over $x$ for the unique element $\mu=\lambda_{\rm can}$ in $P(E^3)$. Assume that $y\in \calD$ and $t\in C(\F_{p^6})$. Then $|\Lambda_x|=2$. Moreover, the two members $(X',\lambda')$ and $(X'',\lambda'')$ of $\Lambda_x$ have automorphism groups \[ \Aut(X',\lambda')\simeq C_2^3\rtimes C_9, \quad \Aut(X'',\lambda'')\simeq C_2^3 \times C_3, \] where $C_9$ acts on $C_2^3$ by a cyclic shift. \end{proposition} \begin{proof} Let $x_2=(X_2,\lambda_2)\to x=(X,\lambda)$ be the minimal isogeny for $(X,\lambda)$. As $a(X)=1$ and the class number $H_3(2,1)=1$, we have $(X_2,\lambda_2)\simeq (E^3, p\lambda_{\rm can})$. Again using $H_3(2,1)=1$, one has $|G^1(\Q)\backslash G^1(\A_f)/U_{x_2}|=1$ so $G^1(\A_f)=G^1(\Q)U_{x_2}$; recall that $U_x = G_x(\wh \Z)$ for any $x$. Hence, \[ \Lambda_x\simeq G^1(\Q)\backslash G^1(\A_f)/U_x= G^1(\Q)\backslash G^1(\Q)U_{x_2} /U_{x}=G^1(\Z)\backslash G_{x_2}(\Zp) /G_{x}(\Zp), \] where $G^1(\Z)=G^1(\Q)\cap U_{x_2}=\Aut(X_2,\lambda_2)$, as $G_{x_2}(\Z_\ell)=G_x(\Z_\ell)$ for all primes $\ell\neq~p$. Let $\ul M_2=(M_2,\<\, ,\>_2)$ and $\ul M=(M,\<\, ,\>)$ be the (contravariant) polarised \dieu modules associated to $(X_2,\lambda_2)$ and $(X,\lambda)$, respectively. We have $M\subseteq M_2$ with $\dim_k (M_2/M)=3$. Furthermore, $\<\, ,\>$ is a perfect pairing for $M$ and it is the restriction of $\<\, ,\>_2$ on $M$. Regarding $G_{x_2}(\Zp)=\Aut_{\rm DM}(\ul M_2)$, we have a reduction-modulo-$p$ map: \[ m_p: G_{x_2}(\Zp)=\Aut_{\rm DM}(\ul M_2) \to \Aut(M_2/pM_2); \] we write $G_{\ul M_2}$ for its image. For $G_{x}(\Zp)=\Aut_{\rm DM}(\ul M)$, it then follows from the construction that \[ G_x(\Zp)=\{h\in G_{x_2}(\Zp): m_p(h)(M/pM_2)=M/pM_2\}. \] Therefore, $G_{x}(\Zp)$ contains the kernel $\ker( m_p) \subseteq G_{x_2}(\Zp)$ and we obtain \begin{equation}\label{eq:p2} \Lambda_x\simeq \Gamma \backslash G_{\ul M_2} /G_{\ul M}, \end{equation} where $G_{\ul M}$ is the image $m_p(G_{x}(\Zp))$ and $\Gamma:=m_p(G^1(\Z))$. It follows from \cite[Lemma 6.1]{karemaker-yobuko-yu} that reduction modulo $p$ gives an exact sequence \[ \begin{CD} 1 @>>> C_2^3 @>>> \Aut(X_2, \lambda_2) @>{m_p}>> \Gamma @>>> 1. \end{CD} \] Let $O=\End(E)$ be a maximal order of $\End^0(E)\simeq B_{p,\infty}$ and let $\Pi\in O$ be the Frobenius endomorphism. Clearly, $G_{\ul M_2} = m_p(\Aut_{\rm DM}(\ul M_2))$ is a subgroup of $\GL_3(O/pO)=\GL_3(\F_{p^2}[\Pi])$. In fact, the group $G_{\ul M_2}$ is isomorphic to the group $G$ of Definition~\ref{def:EGH}; cf.~\cite[Definition 5.3]{karemaker-yobuko-yu}. By further reduction modulo $\Pi$, we obtain an exact sequence \[ \begin{CD} 1 @>>> U:=\Sym_3(\F_4) @>>> G_{\ul M_2} @>{m_\Pi}>> U_3(\F_2) @>>> 1. \end{CD} \] Let $\calE$ be the image of $\End_{\rm DM}(\ul M)$ in $m_\Pi(\End_{\rm DM}(\ul M_2))$. Since $p=2$ and $t\in C(\F_{2^6})$, $\calE\simeq \F_{4^3}$ is a subalgebra of $\Mat_3(\F_4)$ of degree $3$ which is stable under the induced involution $*$, and $U \cap G_{\ul M}=\Sym_3(\F_4)^0$. Therefore, $G_{\ul M}$ is isomorphic to the group $H$ in Definition~\ref{def:EGH}. As \[ G^1(\Z) = \Aut(X_2, \lambda_2) \simeq \Aut(E^3,\lambda_{\rm can}) \simeq (O^\times)^3\cdot S_3, \] we further see that $\Gamma$ is the same as in Definition~\ref{def:EGH}. So by Lemma~\ref{lem:p2coset}, for $x = (X, \lambda)$, the set \[ \text{$\Lambda_x \simeq \Gamma \backslash G / H$ has two elements}, \] represented by \begin{equation}\label{eq:Xlreps} (X',\lambda') \leftrightarrow G^1(\Z) \cdot \bbI_3 \cdot G_x(\mathbb{Z}_p) \text{ and } (X'',\lambda'') \leftrightarrow G^1(\Z) \cdot \tilde{A} \cdot G_x(\mathbb{Z}_p), \end{equation} where $\tilde{A}$ is a lift of $A$ as in Equation~\eqref{eq:etaA}. That is, we may take \[ \tilde{A}: = \frac{1}{a} \begin{pmatrix} 1 & \zeta & \zeta \\ \zeta & 1 & \zeta \\ \zeta & \zeta & 1 \\ \end{pmatrix}, \text{ for } 1 \neq \zeta \in O^{\times} \text{ such that $\zeta^3 = 1$ and } a = 2+\zeta \in O. \] The coset representation in \eqref{eq:Xlreps} also immediately implies that \[ \Aut(X', \lambda') \simeq G^1(\Z) \cap G_x(\mathbb{Z}_p) \text{ and } \Aut(X'', \lambda'') \simeq G^1(\Z) \cap \tilde{A} G_x(\mathbb{Z}_p) \tilde{A}^{-1}. \] The group $G^1(\Z) \cap G_x(\mathbb{Z}_p)$ sits in the short exact sequence \[ \begin{tikzcd} 1 \arrow[r] & C_2^3 \arrow[r] & G^1(\Z) \cap G_x(\mathbb{Z}_p) \arrow[r,"m_\Pi"] & \calE^1 \arrow[r] & 1 \end{tikzcd}\] and one has $|\Aut(X',\lambda')|=8\cdot 9$. From the mass $\Mass(\Lambda_x)=1/(2\cdot 3^2)$ (see Theorem~\ref{introthm:a1}) and the fact that $\vert \Lambda_x \vert = 2$, we immediately see that $|\Aut(X'',\lambda'')|=8\cdot 3$. To determine the automorphism groups precisely, we argue as follows. We have that $x = (X, \lambda)$ either equals $(X',\lambda')$ or equals $(X'',\lambda'')$. In either case, the group $\Aut(X,\lambda)$ is the subgroup of $\Aut(X_2,\lambda_2)$ consisting of elements $h$ such that $m_p(h)\in~H$. Since $U_\Gamma\cap U_H$ is trivial, its image $m_p(\Aut(X,\lambda))$ is the same as its image $m_\Pi(\Aut(X,\lambda))\subseteq \calE^1\simeq C_9$. Moreover, we know that $G^1(\Z) = (O^\times)^3\cdot S_3$ and that \[ G_x(\Z_p) = m_p^{-1}(H) = m_p^{-1}( \Sym_3(\F_4)^0 \calE^1) = m_{\Pi}^{-1} (\calE^1), \] where \[ C_2^3 \simeq \diag(\pm 1, \pm 1, \pm 1) = \ker(m_p)\cap (O^\times)^3\cdot S_3 \subseteq \ker(m_{\Pi})\cap (O^\times)^3\cdot S_3 \] and $C_9 \simeq \calE^1 = \langle \eta \rangle \subseteq (O^\times)^3\cdot S_3$ by construction. For $(X', \lambda')$ we therefore must have \[ \Aut(X', \lambda') \simeq G^1(\Z) \cap G_x(\mathbb{Z}_p) = C_2^3 \rtimes C_9 \] of cardinality $8\cdot 9$, since the conjugation action by $\eta$ on $\diag(\pm 1, \pm 1, \pm 1)$ is non-trivial. For $(X'',\lambda'')$, we note that $\tilde{A} \in G_{x_2}(\Z_p)$ normalises $\ker(m_{\Pi}) = m_p^{-1}( \Sym_3(\F_4)^0)$ by construction and compute that \[ \tilde{A} \eta \tilde{A}^{-1} = \frac{1}{2+\overline{\zeta}} \begin{pmatrix} 1 & 1 & 1 \\ 1 & \zeta & \overline{\zeta} \\ \zeta & 1 & \overline{\zeta} \\ \end{pmatrix} =: B, \] where $\overline{\zeta} = \zeta^{-1}$. Hence, we get \[ \Aut(X'', \lambda'') \simeq G^1(\Z) \cap \tilde{A} G_x(\mathbb{Z}_p) \tilde{A}^{-1} = \diag(\pm 1, \pm 1, \pm 1) \cdot \{ B^3, B^6, B^9 = \bbI_3 \} \simeq C_2^3 \times C_3 \] of cardinality $8 \cdot 3$, since the conjugation action by $\{\bbI_3, B^3, B^6\}$ is trivial. \end{proof} \begin{proposition}\label{prop:autg3} Let $p=2$, choose $x=(X,\lambda)\in \calS_{3}(k)$ and let $y=(t,u)\in \calP'_\mu(k)$ be a point over $x$ for the unique element $\mu=\lambda_{\rm can}$ in $P(E^3)$. \begin{enumerate} \item Suppose that $t\in C(\F_{p^2})$, that is, $a(x)\ge 2$. Then $\vert \Lambda_x \vert=1$ and we have that \begin{equation} \label{eq:auta23} \vert \Aut(X,\lambda)\vert= \begin{cases} 24^3\cdot 6=2^{10}\cdot 3^4 & \text{if } u\in \mathbb{P}_t^1(\mathbb{F}_{p^2}); \\ 24\cdot 160=2^8\cdot 3\cdot 5 & \text{if } u\in\mathbb{P}_t^1(\mathbb{F}_{p^4})\setminus \mathbb{P}_t^1(\mathbb{F}_{p^2}); \\ 24\cdot 32=2^8\cdot 3 & \text{ if } u \not\in \mathbb{P}_t^1(\mathbb{F}_{p^4}). \end{cases} \end{equation} \item Suppose that $t\not\in C(\F_{p^2})$, that is, $a(x)=1$. Then \begin{equation} \label{eq:cna1} \vert \Lambda_x \vert= \begin{cases} 4 & \text{ if } y \notin \calD; \\ 4 & \text{ if } t \notin C(\mathbb{F}_{p^6}) \text{ and } y \in \calD; \\ 2 & \text{ if } t \in C(\mathbb{F}_{p^6}) \text{ and } y \in \calD. \end{cases} \end{equation} \end{enumerate} \end{proposition} \begin{proof} \begin{enumerate} \item If $u \in \bbP^1_t(\F_{p^2})$, then $a(x)=3$ and $\vert \Lambda_x \vert =H_3(2,1)=1$, and one computes that $\Mass(\Lambda_x)=1/(2^{10}\cdot 3^4)$. Therefore, $\vert \Aut(X,\lambda) \vert=24^3\cdot 6$. Alternatively, this also follows from \cite[Lemma 7.1]{karemaker-yobuko-yu}. Now we assume that $a(x)=2$. Using the mass formula (cf.~Theorem~\ref{introthm:a2}), we compute that \begin{equation} \label{eq:massa2} \Mass(\Lambda_x)= \begin{cases} 1/(2^8\cdot 3\cdot 5) & \text{if } u\in\mathbb{P}_t^1(\mathbb{F}_{p^4})\setminus \mathbb{P}_t^1(\mathbb{F}_{p^2}); \\ 1/(2^8\cdot 3) & \text{ if } u \not\in \mathbb{P}_t^1(\mathbb{F}_{p^4}). \end{cases} \end{equation} Let $(E_k^3,p\mu)\xrightarrow{\rho_2} (Y_1,\lambda_1)\xrightarrow{\rho_1} (Y_0,\lambda_0)\simeq (X,\lambda)$ be the PFTQ corresponding to the point $y=(t,u)$. Since $t\in C(\F_{p^2})$, $Y_1$ is superspecial and $(Y_1,\lambda_1)\simeq (E_k,\lambda_E)\times (E_k^2, \mu_1)$, where $\lambda_E$ is the canonical principal polarisation on $E$ and $\mu_1\in P_1(E^2)$. Since $p=2$, we have $\vert \Aut(E,\lambda_E)\vert=\vert \Aut(E)\vert=24$ and $\vert \Aut(E^2,\mu_1)\vert=1920$, cf.~\cite{ibukiyama:autgp1989}. By Corollary~\ref{cor:Autsp} and Equation~\eqref{eq:AutXl}, we have \[ |\Aut\left ( (E,\lambda_E)\times (E^2, \mu_1)\right )|=|\Aut(E,\lambda_E)|\times |\Aut(E^2,\mu_1)|=24\cdot 1920. \] Notice that $\ker(\rho_1)$ is contained in $\ker(\mu_1)$ since $\ker(\lambda_E)$ is trivial. Therefore, $(X,\lambda)$ is isomorphic to $(E,\lambda_E)\times (X', \lambda')$, where $X'=E_k^2/\ker(\rho_1)$. The computation of $\Aut (X,\lambda)$ is now reduced to computing $\Aut(X',\lambda')$. By Corollary~\ref{cor:p2g2aut}, we have \[ \vert \Aut(X',\lambda') \vert= \begin{cases} 160 & \text{if $u\in \bbP^1(\F_{p^4})\setminus \bbP^1(\F_{p^2})$};\\ 32 & \text{if $u\in \bbP^1(k)\setminus \bbP^1(\F_{p^4})$}. \end{cases} \] Therefore, \[ \vert \Aut(X,\lambda) \vert= \begin{cases} 24\cdot 160 = 2^8 \cdot 3 \cdot 5 & \text{if $u\in \bbP^1(\F_{p^4})\setminus \bbP^1(\F_{p^2})$};\\ 24\cdot 32 = 2^8 \cdot 3 & \text{if $u\in \bbP^1(k)\setminus \bbP^1(\F_{p^4})$}. \end{cases} \] Comparing this result with the values of $\Mass(\Lambda_x)$ in \eqref{eq:massa2}, we conclude that $\vert \Lambda_x \vert=1$ in both cases. \item If $y\notin \calD$, by \cite[Corollary 7.5.(1)]{karemaker-yobuko-yu} we have that $\vert \Lambda_x \vert=4$. Suppose then that $y\in \calD$ and $t\notin C(\F_{p^6})$. For every point $x'$ in $\Lambda_x$, consider the corresponding polarised abelian variety $(X',\lambda')$ satisfying $(X',\lambda')[p^\infty]\simeq (X,\lambda)[p^\infty]$. If $y'=(t',u')\in \calP_\mu'(k)$ is a point over~$x'$, then again $y'\in \calD$ and $t'\notin C(\F_{p^6})$. Thus, by \cite[Theorem 7.9.(1)]{karemaker-yobuko-yu}, we have that $\Aut(X',\lambda')\simeq C_2^3 \times C_3$. Using the mass formula (cf.~Theorem~\ref{introthm:a1}), noting that $d(t)=3$ when $p=2$, we compute that \[ \Mass(\Lambda_x)=\frac{1}{6}. \] Therefore, $\vert \Lambda_x \vert =\vert C_2^3\times C_3\vert \cdot \Mass(\Lambda_x)=4$. For the last case, where $y\in \calD$ and $t\in C(\F_{p^6})$, the assertion $\vert \Lambda_x \vert=2$ follows directly from Proposition~\ref{lm:a1DCFp6}. \end{enumerate} \end{proof} \subsection{The case $\boldsymbol{g=4}$}\label{ssec:g4}\ \begin{definition} \begin{enumerate} \item An \emph{elementary sequence} is a map $\varphi: \{ 1, \ldots, g \} \to \mathbb{Z}_{\geq 0}$ such that $\varphi(0) =~0$ and $\varphi(i) \leq \varphi(i+1) \leq \varphi(i) + 1$ for all $0 \leq i < g$, cf.~\cite[Definition 5.6]{OortEO}. With each elementary sequence we associate an \emph{Ekedahl-Oort stratum}~$\mathcal{S}_{\varphi}$, which is a locally closed subset of the moduli space $\mathcal{A}_{g,1,n} \otimes \overline{\mathbb{F}}_p$ of principally polarised abelian varieties with level-$n$ structure. Roughly speaking, it consists of those varieties whose $p$-torsion has a canonical filtration described by $\varphi$. On $\mathcal{S}_g$ we consider the stratification induced by $\mathcal{S}_{\varphi} \cap \mathcal{S}_g$. \item The $p$-divisible group of an abelian variety of dimension~$g$ is determined up to isogeny by its Newton polygon, which can be described as a set of slopes $(\lambda_1, \ldots, \lambda_{2g})$ with $0 \leq \lambda_i \leq 1$ for all $1 \leq i \leq 2g$ and $\sum_i \lambda_i = g$, cf.~\cite{manin}. These slopes moreover satisfy that $\lambda_i + \lambda_{2g+1-i} = 1$ for all $1 \leq i \leq 2g$ and that the denominator of each $\lambda_i$ divides its multiplicity. All abelian varieties with the same Newton polygon form a \emph{Newton stratum} of $\mathcal{A}_g$. \item For $1 \leq a \leq g$, we will denote the \emph{$a$-number locus} of $\mathcal{S}_g$ by $\mathcal{S}_g(a) := \{ x \in \mathcal{S}_g(k) : a(x) = a \}$. \end{enumerate} \end{definition} \begin{proposition}\label{prop:EO} \begin{enumerate} \item The Ekedahl-Oort strata in dimension $g=4$ of $p$-rank zero are precisely the $\calS_{\varphi}$ for those $\varphi$ appearing in Figure~1. \item The stratum $\calS_{\varphi}$ for $\varphi = (0,1,2,3)$ has $a$-number $1$, those for $\varphi = (0,1,2,2)$, $(0,1,1,2)$, and $(0,0,1,2)$ have $a$-number $2$, those for $\varphi = (0,1,1,1)$, $(0,0,1,1)$, and $(0,0,0,1)$ have $a$-number $3$ and that for $\varphi = (0,0,0,0)$ has $a$-number $4$. \item The strata fully contained in the supersingular locus $\mathcal{S}_4$ are precisely the $\calS_{\varphi}$ for\\ $\varphi~=~(0,0,0,0), (0,0,0,1), (0,0,1,1)$, and $(0,0,1,2)$. \item The Newton strata of $p$-rank zero are those corresponding to the slope sequences \[ \left(\frac{1}{2},\frac{1}{2},\frac{1}{2},\frac{1}{2},\frac{1}{2},\frac{1}{2},\frac{1}{2},\frac{1}{2}\right), \left(\frac{1}{3},\frac{1}{3},\frac{1}{3},\frac{1}{2},\frac{1}{2},\frac{2}{3},\frac{2}{3},\frac{2}{3}\right), \text{ and } \left(\frac{1}{4},\frac{1}{4},\frac{1}{4},\frac{1}{4},\frac{3}{4},\frac{3}{4},\frac{3}{4},\frac{3}{4}\right), \] which we denote respectively by $\mathcal{N}_{\frac{1}{2}}$, $\mathcal{N}_{\frac{1}{3}}$, and $\mathcal{N}_{\frac{1}{4}}$. \item We have \[ \begin{split} \mathcal{S}_4 = \mathcal{N}_{\frac{1}{2}} & = \left( \mathcal{S}_{(0,1,2,3)} \cap \mathcal{S}_4 \right) \sqcup \mathcal{S}_{(0,0,0,0)} \sqcup \mathcal{S}_{(0,0,0,1)} \\ & \sqcup \mathcal{S}_{(0,0,1,1)} \sqcup \mathcal{S}_{(0,0,1,2)} \sqcup \left( \mathcal{S}_{(0,1,1,2)} \cap \mathcal{S}_4 \right), \end{split} \] and $\mathcal{S}_{(0,1,2,3)} \cap \mathcal{S}_4$ is dense. In particular, we have \[ \begin{split} \mathcal{S}_4(4) & = \mathcal{S}_{(0,0,0,0)}, \\ \mathcal{S}_4(3) & = \mathcal{S}_{(0,0,0,1)} \sqcup \mathcal{S}_{(0,0,1,1)}, \\ \mathcal{S}_4(2) & = \mathcal{S}_{(0,0,1,2)} \sqcup \left( \mathcal{S}_{(0,1,1,2)} \cap \mathcal{S}_4 \right). \end{split} \] \item We have \[ \mathcal{N}_{\frac{1}{3}} = \left( \mathcal{S}_{(0,1,2,3)} \cap \mathcal{N}_{\frac{1}{3}} \right) \sqcup \mathcal{S}_{(0,1,1,1)} \sqcup \left( \mathcal{S}_{(0,1,1,2)} \cap \mathcal{N}_{\frac{1}{3}} \right),\] and $\mathcal{S}_{(0,1,2,3)} \cap \mathcal{N}_{\frac{1}{3}}$ is dense. \item We have \[ \mathcal{N}_{\frac{1}{4}} = \left( \mathcal{S}_{(0,1,2,3)} \cap \mathcal{N}_{\frac{1}{4}} \right) \sqcup \mathcal{S}_{(0,1,2,2)}, \] and $\mathcal{S}_{(0,1,2,3)} \cap \mathcal{N}_{\frac{1}{4}}$ is dense. \end{enumerate} All intersections appearing in (5)--(7) are non-empty. \end{proposition} \begin{proof} The $p$-rank of an Ekedahl-Oort stratum $\mathcal{S}_{\varphi}$ is $\max\{ i : \varphi(i) = i \}$ and its $a$-number is $g - \varphi(g)$, proving (1) and (2). By \cite[Step 2, p.\ 1379]{COirr} we have $\mathcal{S}_{\varphi} \subseteq \mathcal{S}_4$ if and only if $\varphi(2) = 0$, proving (3). The $p$-rank of a Newton stratum is the number of zero slopes, which implies~(4). We read off from Figure~1 that $\mathcal{S}_{(0,1,2,3)} \cap \mathcal{S}_4$, $\mathcal{S}_{(0,1,2,3)} \cap \mathcal{N}_{\frac{1}{3}}$, and $\mathcal{S}_{(0,1,2,3)} \cap \mathcal{N}_{\frac{1}{4}}$ are the respective $a$-number $1$ loci of $\mathcal{S}_4$, $\mathcal{N}_{\frac{1}{3}}$, and $\mathcal{N}_{\frac{1}{4}}$. Hence, density of these intersections follows from \cite[Theorem 4.9(iii)]{lioort} for $\mathcal{S}_4$, and from combining \cite[Remark 5.4]{OortNP} with \cite[Theorem~3.1]{COirr} for $\mathcal{N}_{\frac{1}{3}}$, and $\mathcal{N}_{\frac{1}{4}}$. By \cite[Corollary 4.2 and Lemma 5.12]{harafirst} we see that $\mathcal{S}_{(0,1,2,2)} \subseteq \mathcal{N}_{\frac{1}{4}}$ by minimality of the associated $p$-divisble group, concluding the proof of~(7). Similarly, from \cite[Corollary 4.2 and Proposition 7.1]{harafirst}, we obtain that $\mathcal{S}_{(0,1,1,1)} \subseteq \mathcal{N}_{\frac{1}{3}}$, again by minimality. Finally, we read off from Figure~1 that \[ \mathcal{S}_{(0,1,1,2)} = \left( \mathcal{S}_{(0,1,1,2)} \cap \mathcal{N}_{\frac{1}{3}} \right) \sqcup \left( \mathcal{S}_{(0,1,1,2)} \cap \mathcal{S}_{4} \right). \] Now \cite[Theorem 4.17]{haraanumber} implies that $\mathcal{S}_4(2)$ has $H_4(1,p)+H_4(p,1)$ many irreducible components of two types, of which those of the type corresponding to $\mathcal{S}_{(0,0,1,2)}$ yield $H_4(1,p)$ many; see also~\cite[\S 9.9]{lioort}. Hence, the intersection $\mathcal{S}_{(0,1,1,2)} \cap \mathcal{S}_{4}$ must yield the other $H_4(p,1)$ components and thus be non-empty. On the other hand, since $\mathcal{S}_{(0,1,1,2)} \not\subseteq \mathcal{S}_4$ by (3), the intersection $\mathcal{S}_{(0,1,1,2)} \cap \mathcal{N}_{\frac{1}{3}}$ is also non-empty. This finishes the proof of (5) and (6), and hence, the proof of the proposition. \end{proof} \begin{figure}[H]\label{fig:EO} \begin{center} \begin{tikzcd} & & & |[blue]| (0,1,2,3) \arrow[dl, dash] \\ & & |[orange]| (0, 1, 2, 2) \arrow[dl, dash] & \\ & |[orange]| (0, 1, 1, 2) \arrow[dl, dash] \arrow[dr, dash] & & \\ |[purple]| (0,1,1,1) \arrow[dr, dash] & & |[orange]| (0,0,1,2) \arrow[dl, dash] & \\ & |[purple]| (0,0,1,1) \arrow[d, dash] & & \\ & |[purple]| (0,0,0,1) \arrow[d, dash] & & \\ & |[teal]| (0,0,0,0) & & \\ \end{tikzcd} \end{center} \caption{Ekedahl-Oort strata of $p$-rank zero in dimension $g=4$. The blue stratum has $a$-number $1$, the orange strata have $a$-number $2$, the red strata have $a$-number $3$ and the green stratum has $a$-number $4$. Strata are connected by a line if the lower one is contained in the Zariski closure of the upper one.} \end{figure} By Lemma~\ref{lem:Lgpc}, for every point $x\in \mathcal{S}_4(k)$, there exists an integer $0\leq c\leq 2$ such that there exists a surjective morphism $\pi:\Lambda_x \twoheadrightarrow \Lambda_{4,p^c}$. For Ekedahl-Oort strata with $g=4$ we have the following result: \begin{lemma}\label{lem:c} We have $c = 0$ for $x \in \mathcal{S}_{(0,0,0,0)}$, and $c =1$ for $x \in \mathcal{S}_{(0,0,0,1)}$, and $c = 2$ for $x \in \mathcal{S}_{(0,0,1,1)} \cup \mathcal{S}_{(0,0,1,2)}$. \end{lemma} \begin{proof} This follows from \cite[Proposition 3.3.2]{harashita}; the Deligne-Lusztig varieties $X(w')$ in \emph{loc. cit.} are given by $w' = \mathrm{id}$ when $c=0$, by $w' = (12)$ when $c=1$ and by $w' = (1342)$ or $(13)(24)$ when $c=2$. \end{proof} \begin{remark} One might ask whether the surjection $\Lambda_x \twoheadrightarrow \Lambda_{g,p^c}$ is realised through the minimal isogeny $\tilde{x} \twoheadrightarrow x$ for $x$, i.e., whether there is a natural isomorphism $\Lambda_{g,p^c}\isoto \Lambda_{\tilde{x}}$, sending $[X',\lambda']\mapsto [X', p^r \lambda']$, for appropriate values of $c$ and $r$, or equivalently, whether the open compact subgroup $U_{\wt x}$ is maximal. This is true if $g\le 3$. However, it is false in general, and we now give a counterexample. Take $g=4$ and $x=(X,\lambda)=(E, \lambda_E)\times (X_1,\lambda_1)$, where $E$ is a supersingular elliptic curve with canonical principal polarisation $\lambda_E$, and where $(X_1,\lambda_1)$ is a principally polarised supersingular abelian threefold with $a(X_1)=1$. The minimal isogeny $\wt x=(\wt X,\wt \lambda)$ of $(X,\lambda)$ is equal to $(E, \lambda_E)\times (\wt X_1,\wt \lambda_1)$, where $(\wt X_1,\wt \lambda_1)$ is the minimal isogeny of $(X_1, \lambda_1)$. Note that $U_{\wt x,p}=\Aut((\wt X,\wt \lambda)[p^\infty])$ is a maximal open compact subgroup if and only if $\wt X[\sfF^s]\subseteq \ker \wt \lambda\subseteq \wt X[\sfF^{s+1}]$ for some $s\in \Z_{\ge 0}$. To see this: if $(L,h)$ is the Hermitian $O$-lattice corresponding to $(\wt X,\wt \lambda)$, where $O\subseteq B_{p,\infty}$ is a maximal order, then the latter condition is equivalent to $\Pi^{s+1}_p L_p \subseteq L_p^\vee \subseteq \Pi^s_p L_p$ for some integer $s\in \Z_{\ge 0}$, where $\Pi_p$ is a uniformiser of $O_p$. The stabilisers of these $O_p$-lattices then give all maximal open compact subgroups of $G^1(\Q_p)$, as described in the proof of Lemma~\ref{lem:Lgpc}. From \cite[Theorem~3.3]{oda-oort}, cf.~\cite[Proposition~3.16.(1)]{karemaker-yobuko-yu}, we have $\ker \wt \lambda_1=\wt X_1[\sfF^2]=E^3[\sfF^2]$. We see that $\ker \wt \lambda \subseteq \wt X[\sfF^2]$ but $\wt X[\sfF]=E^4[\sfF]\not \subseteq \ker \wt \lambda=E^3[\sfF^2]$, so $U_{\wt x}$ is not a maximal open compact subgroup. Furthermore, up to conjugacy, the group $U_{\wt x}$ is properly contained in $U_{4,1}$, so there is a surjective map $\Lambda_{\wt x}\onto \Lambda_{4,1}$. Note that the above point $x$ lies in the stratum $\mathcal{S}_{(0,1,1,2)} \cap \mathcal{S}_4$. One can show that if $x\in \mathcal{S}_4\setminus \mathcal{S}_{(0,1,1,2)}$, then the surjection $\Lambda_x \twoheadrightarrow \Lambda_{4,p^c}$ is realised by the minimal isogeny. Indeed, if $x$ is contained in a supersingular Ekedahl-Oort stratum (i.e., one of the strata in Proposition~\ref{prop:EO}.(3)) this follows directly, cf.~\cite{harashita}. Otherwise, we have $x \in \mathcal{S}_{(0,1,2,3)} \cap \mathcal{S}_4$. In this case, we have $a(x) = 1$, so $\Lambda_{\tilde{x}} \simeq \Lambda_{4,p^2}$, as we will see in the proof of Theorem~\ref{thm:maing4}. \end{remark} \begin{lemma}\label{lem:a4} Let $x \in \mathcal{S}_4(k)$. When $a(x) = 4$, we have $\vert \mathcal{C}(x) \vert > 1$. \end{lemma} \begin{proof} By Proposition~\ref{prop:EO}.(5), we have $x \in \mathcal{S}_{(0,0,0,0)}$, so by Lemma~\ref{lem:c}, there exists a surjection $\Lambda_x \twoheadrightarrow \Lambda_{4,p^0}$. As observed in Subsection~\ref{ssec:sspmass}, it holds that $\lvert \Lambda_{4,p^0} \rvert= H_4(p,1)$, so it follows from Theorem~\ref{thm:mainarith}.(4) that $H_4(p, 1) > 1$. This implies the result. \end{proof} \begin{lemma}\label{lem:a2S0001} Let $x \in \mathcal{S}_4(k)$. When $a(x) = 3$ and $x \in \mathcal{S}_{(0,0,0,1)}$, we have $\vert \mathcal{C}(x) \vert > 1$. \end{lemma} \begin{proof} By Lemma~\ref{lem:c}, there exists a surjection $\Lambda_x \twoheadrightarrow \Lambda_{4,p}$. By Theorem~\ref{thm:sspmass} we get that \[ \mathrm{Mass}(\Lambda_{4,p}) = \frac{(p-1)(p^2+1)(p^4+1)(p^6-1)^2}{2^{15}\cdot 3^5 \cdot 5^2 \cdot 7}. \] Since $(p^3-1)$ divides the numerator of $\mathrm{Mass}(\Lambda_{4,p})$, we may argue as in the proof of Theorem~\ref{thm:mainarith}.(3)+(4) to conclude that this numerator is always larger than $1$. This implies that $\vert \Lambda_{4,p} \vert > 1$, so the result follows. \end{proof} \begin{lemma}\label{lem:a2Sn0012} Let $x \in \mathcal{S}_4(k)$. When $a(x) = 2$ and $x \not\in \mathcal{S}_{(0,0,1,2)}$, we have $\vert \mathcal{C}(x) \vert > 1$. \end{lemma} \begin{proof} By Proposition~\ref{prop:EO}.(5), we know that $x \in \mathcal{S}_{(0,1,1,2)} \cap \mathcal{S}_4$. By \cite[Main results, p.~164]{haraanumber} every generic point of $\mathcal{S}_{(0,1,1,2)} \cap \mathcal{S}_4$ has a minimal isogeny from $(E^4,p\mu)$ with a principal polarisation $\mu$. It follows that the minimal isogeny $\wt x=(\wt X ,\wt \lambda)$ for $x=(X,\lambda)$ is either isomorphic to $(E^4, p\mu)$ or there exists an isogeny $(E^4, p\mu)\to (\wt X, \wt \lambda)$ for some principal polarisation~$\mu$ on $E^4$. We also have that $\ker \wt \lambda$ is not equal to $\wt X[\sfF]=E^4[\sfF]$, otherwise $x \in \mathcal{S}_{(0,0,1,2)}$ as $a(X)=2$. Therefore, we find a surjection, either $\Lambda_{\wt x}\onto \Lambda_{4,1}$ or $\Lambda_{\wt x}\onto \Lambda_{4,p}$. The two numbers $|\Lambda_{4,1}|$ and $|\Lambda_{4,p}|$ are both greater than one as shown in Theorem~\ref{thm:mainarith}.(4) and in Lemma~\ref{lem:a2S0001}. Thus, $\lvert \calC(x) \rvert \ge \lvert \Lambda_{\wt x} \rvert >1$. \end{proof} \begin{theorem}\label{thm:maing4} For every $x \in \mathcal{S}_4(k)$, we have $\vert \mathcal{C}(x) \vert > 1$. \end{theorem} \begin{proof} It follows from Proposition~\ref{prop:EO}.(5) and Lemmas~\ref{lem:a4}--\ref{lem:a2Sn0012} that it suffices to consider $x \in \mathcal{S}_4(k)$ such that one of the following holds: \begin{itemize} \item[(i)] $x \in \mathcal{S}_{(0,0,1,2)} \sqcup \mathcal{S}_{(0,0,1,1)}$, or \item[(ii)] $a(x)=1$. \end{itemize} In Case (i), by Lemma~\ref{lem:c}, there exists a surjection $\Lambda_x \twoheadrightarrow \Lambda_{4,p^2}$, i.e., $c=2$. In Case (ii), by \cite[Theorem 2.2]{oda-oort} (also see \cite[Lemma 4.4]{katsuraoort}), there exists a unique four-dimensional rigid PFTQ \[ (Y_\bullet, \rho_\bullet): (Y_3, \lambda_3) \to (Y_2,\lambda_2) \to (Y_1,\lambda_1)\to (X_0,\lambda_0)=x \] extending $(X_0,\lambda_0)$. The construction in \emph{loc.~cit.} also shows that the composition \[ y_3=(Y_3,\lambda_3)\to (X_0,\lambda_0) = x \] is the minimal isogeny for $x$, and hence so is $y_3=(Y_3,\lambda_3)\to y_2=(Y_2,\lambda_2)$ for $y_2$. By Definition~\ref{def:PFTQ}, the polarisation $\lambda_3$ is $p$ times a polarisation $\mu$ on $E^4$. Dividing the polarisation by $p$ therefore gives an isomorphism $\Lambda_{y_3}\simeq \Lambda_{4,p^2}$. Thus, the minimal isogeny gives rise to surjective maps $\Lambda_x\twoheadrightarrow \Lambda_{y_2} \twoheadrightarrow \Lambda_{4,p^2}$. Hence, to show that $|\calC(x)|>1$, it suffices to show that $|\Lambda_{y_2}|>1$. Replacing $x$ with $y_2$, we now also have a surjection $\Lambda_x \twoheadrightarrow \Lambda_{4,p^2}$ in Case (ii). Since we have $L_{4,p^c} = L_4(1,p)$ from Equation~\eqref{eq:npgc}, it follows immediately from Theorem~\ref{thm:mainarith}.(4) that $\vert \mathcal{C}(x) \vert > 1$ when $p>2$. So from now on, we assume that $p=2$. We use the same notation as in Subsection~\ref{ssec:4first}. Since $\Lambda_{4,4} \simeq G^1(\Q)\backslash G^1(\A_f)/U_{x_2}$ where the base point $x_2 \in \Lambda_{4,4}$ is taken from the minimal isogeny for $x$, and $\vert \Lambda_{4,4} \vert = 1$, we get that $G^1(\A_f) = G^1(\mathbb{Q}) U_{x_2}$. Hence, \[ \Lambda_x \simeq G^1(\Q) \backslash G^1(\mathbb{Q}) U_{x_2} / U_x \simeq G^1(\Z) \backslash G_{x_2}(\Z_p) / G_x(\Z_p), \] where $G_x(\Z_p)$ is the automorphism group of the polarised Dieudonn{\'e} module associated to $x$. Applying the reduction-modulo-$\Pi$ map $m_{\Pi}$ , we obtain $m_{\Pi}(G_{x_2}(\Z_p)) = \mathrm{Sp}_4(\F_4)$. Further, let $(X_2,\lambda_2)$ be the superspecial abelian variety corresponding to the unique element $x_2 \in \Lambda_{4,4}$. Then by Proposition~\ref{prop:np2}, and using the same notation, we know that $G^1(\Z) = \Aut(X_2, \lambda_2) \simeq \Aut((L,h)^{\oplus 2})\simeq \Aut(L,h)^2 \cdot C_2$ and $\Aut(L,h)$ is the group of cardinality $1920$ described in \cite[Section 5]{ibukiyama}. By \cite[Section 5, p.~1178]{ibukiyama} the reduction modulo $\Pi$ induces a surjective homomorphism $\phi_0:\Aut(L,h)\onto \SL_2(\F_4)$ whose kernel $\ker(\phi_0)$ has order 32 (also see \emph{loc. cit.} for the description of $\ker(\phi_0)$). Then it follows that $m_{\Pi}(G^1(\Z)) = m_{\Pi}(\Aut(X_2,\lambda_2)) \simeq \mathrm{SL}_2(\F_4)^2 \cdot C_2$. Writing $\overline{G} := m_\Pi(G_x(\Z_p))$, we obtain \begin{equation}\label{eq:Lambdag4} \Lambda_x \simeq (\mathrm{SL}_2(\F_4)^2 \cdot C_2)\backslash \mathrm{Sp}_4(\F_4) / \overline{G}, \end{equation} since $\ker(m_\Pi) \subseteq G_x(\Z_p)$ (cf.~the proof of Proposition~\ref{lm:a1DCFp6}). Thus, \begin{equation}\label{eq:Massg4p2} \mathrm{Mass}(\Lambda_x) = \mathrm{Mass}(\Lambda_{4,4}) \cdot [\mathrm{Sp}_4(\F_4) : \overline{G}]. \end{equation} We compute that \begin{equation}\label{eq:massL44} \mathrm{Mass}(\Lambda_{4,4}) = \frac{1}{2^{15}\cdot3^2\cdot5^2} \end{equation} from Theorem~\ref{thm:sspmass}, using Equation~\eqref{eq:valuevn}. Standard computations also show that \begin{equation}\label{eq:Sp4F4} \vert \mathrm{Sp}_4(\F_4) \vert = 2^8 \cdot 3^2 \cdot 5^2 \cdot 17 \end{equation} and that \begin{equation}\label{eq:SL2S2} \vert \mathrm{SL}_2(\F_4)^2 \cdot C_2 \vert = 2^5 \cdot 3^2 \cdot 5^2. \end{equation} By~\eqref{eq:massL44} and~\eqref{eq:Sp4F4}, Equation~\eqref{eq:Massg4p2} reduces to \begin{equation}\label{eq:mass17} \mathrm{Mass}(\Lambda_{x}) = \frac{17}{2^7 \cdot \vert \overline{G} \vert}. \end{equation} We deduce that $\vert \Lambda_x \vert > 1$ whenever $17 \nmid \vert \overline{G} \vert$. Suppose therefore that $17 \mid \vert \overline{G} \vert$, so that $\overline{G}$ contains a cyclic group $C_{17}$ of order $17$. We claim that then $\overline{G} = C_{17}$. This finishes the proof, since if $\vert \Lambda_x \vert = 1$, Equation~\eqref{eq:Lambdag4} would imply that $\mathrm{Sp}_4(\F_4) = (\mathrm{SL}_2(\F_4)^2 \cdot C_2)$ $\overline{G} = (\mathrm{SL}_2(\F_4)^2 \cdot C_2)$ $C_{17}$. Comparing the cardinalities from~\eqref{eq:Sp4F4} and~\eqref{eq:SL2S2} would then yield a contradiction. Finally, we prove the claim. Let $(M_2, \<\, , \, \>_2)$ be the (contravariant) polarised \dieu module attached to~$x_2$. From $\ker \lambda_2\simeq \alpha_p^4$ and the fact that $M_2$ is superspecial, we have that $\sfF M_2=\sfV M_2=M_2^\vee\subseteq M_2$, where $M_2^\vee$ is the dual lattice of $M_2$, and that $\<\,, \>_2$ has polarisation type $(1/p,1/p,1,1)$ on $M_2$. One can see that $M_2^\vee/pM_2$ is the null-subspace for the symplectic pairing $\psi$ on $M_2/pM_2$ induced by $p\<\, , \, \>_2$. It follows that $p\<\, , \, \>_2$ induces a non-degenerate symplectic form $\psi$ on $V:=M_2/\sfV M_2$. The four-dimensional symplectic space $(V, \psi)$ over~$k$ admits an $\F_{4}$-structure~$V_0$ induced by the skeleton of $M_2$. Inside $V$ we have an isotropic $k$-subspace $W=M/\sfV M_2$, where $M\subseteq M_2$ is the \dieu module associated to $x$ and the inclusion is induced from the minimal isogeny. Note that $\dim W=2$ in Case (i) and $\dim W=1$ in Case (ii), respectively. According to our definition, \[ \ol G:=\{A\in \Sp_4(\F_{4}): A(W)=W\, \}=\Sp(V_0,W). \] Thus, it follows from Proposition~\ref{prop:Cq^2+1} for $g=4$ that $\ol G=C_{17}$. This completes the proof of the claim and hence of the theorem. \end{proof} \subsection{Proof of the main result} \begin{theorem}\label{thm:main2} \begin{enumerate} \item The supersingular locus $\calS_g$ is geometrically irreducible if and only if one of the following three cases holds: \begin{itemize} \item [(i)] $g=1$ and $p\in \{2,3,5,7,13\}$; \item [(ii)] $g=2$ and $p\in \{ 2, 3, 5, 7, 11\}$; \item [(iii)] $(g, p)=(3,2)$ or $(g,p)=(4,2)$. \end{itemize} \item Let $\calC(x)$ be the central leaf of $\calA_{g}$ passing through a point $x=[X_0,\lambda_0]\in \calS_{g}(k)$. Then $\calC(x)$ consists of one element if and only if one of the following three cases holds: \begin{itemize} \item [(i)] $g=1$ and $p\in \{2,3,5,7,13\}$; \item [(ii)] $g=2$ and $p=2,3$; \item [(iii)] $g=3$, $p=2$ and $a(x)\ge 2$. \end{itemize} \end{enumerate} \end{theorem} \begin{proof} \begin{enumerate} \item By \cite[Theorem 4.9]{lioort}, the number of irreducible components of $\mathcal{S}_g$ is equal to the class number $H_g(p,1)$ of the principal genus if $g$ is odd, and is equal to the class number $H_g(1,p)$ of the non-principal genus if $g$ is even. Thus, Statement (1) follows from Theorem ~\ref{thm:mainarith}. \item The cases where $g=1,2, 4$ or $g \geq 5$ follow from Lemma~\ref{lem:g1}, Lemma~\ref{lem:g2}, Theorem~\ref{thm:maing4} and Lemma~\ref{lem:g5+}, respectively. Suppose then that $g=3$. By Lemma~\ref{lem:Lgpc}, either $\vert \Lambda_x\vert \geq \vert \Lambda_{3,1} \vert=H_3(p,1)$ or $\vert \Lambda_x \vert \geq \vert \Lambda_{3,p}\vert=H_3(1,p)$. Thus, by Theorem~\ref{thm:mainarith}, $\vert \Lambda_x \vert=1$ occurs only when $p=2$. Further assuming $p=2$, by Proposition~\ref{prop:autg3}, $\calC(x)$ has one element if and only if $a(x)\ge 2$. \end{enumerate} \end{proof} \providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} \providecommand{\href}[2]{#2} \begin{thebibliography}{10} \bibitem{Amir} Zavosh Amir-Khosravi, \emph{Serre's tensor construction and moduli of abelian schemes}, Manuscripta Math. \textbf{156} (2018), no.~3-4, pp.~409--456. \bibitem{amitsur:55} Shimshon~A. Amitsur, \emph{Finite subgroups of division rings}, Trans. Amer. Math. Soc. \textbf{80} (1955), pp.~361--386. \bibitem{AIK14} Tsuneo Arakawa, Tomoyoshi Ibukiyama, and Masanobu Kaneko, \emph{Bernoulli numbers and zeta functions}, Springer Monographs in Mathematics, Springer, Tokyo, 2014, with an appendix by Don Zagier. \bibitem{birkenhake-lange} Christina Birkenhake and Herbert Lange, \emph{Complex abelian varieties}, second ed., Grundlehren der mathematischen Wissenschaften [Fundamental Principles of Mathematical Sciences], vol. 302, Springer-Verlag, Berlin, 2004. \bibitem{brzezinski:h=1} Juliusz Brzezinski, \emph{Definite quaternion orders of class number one}, J. Th\'{e}or. Nombres Bordeaux \textbf{7} (1995), no.~1, pp.~93--96, Les Dix-huiti\`emes Journ\'{e}es Arithm\'{e}tiques (Bordeaux, 1993). \bibitem{CS15} Tommaso~Giorgio Centeleghe and Jakob Stix, \emph{Categories of abelian varieties over finite fields, {I}: {A}belian varieties over {$\Bbb{F}_p$}}, Algebra Number Theory \textbf{9} (2015), no.~1, pp.~225--265. \bibitem{chai} Ching-Li Chai, \emph{Every ordinary symplectic isogeny class in positive characteristic is dense in the moduli}, Invent. Math. \textbf{121} (1995), no.~3, pp.~439--479. \bibitem{COirr} Ching-Li Chai and Frans Oort, \emph{Monodromy and irreducibility of leaves}, Ann. of Math. (2) \textbf{173} (2011), no.~3, pp.~1359--1396. \bibitem{Debarre} Olivier Debarre, \emph{Polarisations sur les vari\'{e}t\'{e}s ab\'{e}liennes produits}, C. R. Acad. Sci. Paris S\'{e}r. I Math. \textbf{323} (1996), no.~6, pp.~631--635. \bibitem{eichler-CNF-1938} Martin Eichler, \emph{\"{U}ber die {I}dealklassenzahl total definiter {Q}uaternionenalgebren}, Math. Z. \textbf{43} (1938), no.~1, pp.~102--109. \bibitem{Eichler} \bysame, \emph{Note zur {T}heorie der {K}ristallgitter}, Math. Ann. \textbf{125} (1952), pp.~51--55. \bibitem{ekedahl} Torsten Ekedahl, \emph{On supersingular curves and abelian varieties}, Math. Scand. \textbf{60} (1987), no.~2, pp.~151--178. \bibitem{exton} Harold Exton, \emph{{$q$}-hypergeometric functions and applications}, Ellis Horwood Series: Mathematics and its Applications, Ellis Horwood Ltd., Chichester; Halsted Press [John Wiley \& Sons, Inc.], New York, 1983, With a foreword by L. J. Slater. \bibitem{haraanumber} Shushi Harashita, \emph{The {$a$}-number stratification on the moduli space of supersingular abelian varieties}, J. Pure Appl. Algebra \textbf{193} (2004), no.~1-3, pp.~163--191. \bibitem{harafirst} \bysame, \emph{Ekedahl-{O}ort strata and the first {N}ewton slope strata}, J. Algebraic Geom. \textbf{16} (2007), no.~1, pp.~171--199. \bibitem{harashita} \bysame, \emph{Ekedahl-{O}ort strata contained in the supersingular locus and {D}eligne-{L}usztig varieties}, J. Algebraic Geom. \textbf{19} (2010), no.~3, pp.~419--438. \bibitem{hashimoto:g=3} Ki-ichiro Hashimoto, \emph{Class numbers of positive definite ternary quaternion {H}ermitian forms}, Proc. Japan Acad. Ser. A Math. Sci. \textbf{59} (1983), no.~10, pp.~490--493. \bibitem{hashimoto-ibukiyama:1} Ki-ichiro Hashimoto and Tomoyoshi Ibukiyama, \emph{On class numbers of positive definite binary quaternion {H}ermitian forms}, J. Fac. Sci. Univ. Tokyo Sect. IA Math. \textbf{27} (1980), no.~3, pp.~549--601. \bibitem{hashimoto-ibukiyama:2} \bysame, \emph{On class numbers of positive definite binary quaternion {H}ermitian forms. {II}}, J. Fac. Sci. Univ. Tokyo Sect. IA Math. \textbf{28} (1981), no.~3, pp.~695--699. \bibitem{hwang-im-kim:g3} WonTae Hwang, Bo-Hae Im, and Hansol Kim, \emph{A classification of the automorphism groups of polarized abelian threefolds over finite fields}, Finite Fields Appl. \textbf{83} (2022), Paper No. 102082, 37. \bibitem{ibukiyama:autgp1989} Tomoyoshi Ibukiyama, \emph{On automorphism groups of positive definite binary quaternion {H}ermitian lattices and new mass formula}, Automorphic forms and geometry of arithmetic varieties, Adv. Stud. Pure Math., vol.~15, Academic Press, Boston, MA, 1989, pp.~301--349. \bibitem{ibukiyama} \bysame, \emph{Principal polarizations of supersingular abelian surfaces}, J. Math. Soc. Japan \textbf{72} (2020), no.~4, pp.~1161--1180. \bibitem{JKPRST} Bruce~W. Jordan, Allan~G. Keeton, Bjorn Poonen, Eric~M. Rains, Nicholas Shepherd-Barron, and John~T. Tate, \emph{Abelian varieties isogenous to a power of an elliptic curve}, Compos. Math. \textbf{154} (2018), no.~5, pp.~934--959. \bibitem{karemaker-yobuko-yu} Valentijn Karemaker, Fuetaro Yobuko, and Chia-Fu Yu, \emph{Mass formula and {O}ort's conjecture for supersingular abelian threefolds}, Adv. Math. \textbf{386} (2021), Paper No. 107812, 52. \bibitem{katsuraoort:compos87} Toshiyuki Katsura and Frans Oort, \emph{Families of supersingular abelian surfaces}, Compositio Math. \textbf{62} (1987), no.~2, pp.~107--167. \bibitem{katsuraoort} \bysame, \emph{Supersingular abelian varieties of dimension two or three and class numbers}, Algebraic geometry, {S}endai, 1985, Adv. Stud. Pure Math., vol.~10, North-Holland, Amsterdam, 1987, pp.~253--281. \bibitem{KirschmerHab} Markus Kirschmer, \emph{Definite quadratic and hermitian forms with small class number}, Habilitationsschrift, RWTH Aachen University, unpublished (2016). \bibitem{KirschmerLorch} Markus Kirschmer and David Lorch, \emph{Ternary quadratic forms over number fields with small class number}, J. Number Theory \textbf{161} (2016), pp.~343--361. \bibitem{KNRR} Markus Kirschmer, Fabien Narbonne, Christophe Ritzenthaler, and Damien Robert, \emph{Spanning the isogeny class of a power of an elliptic curve}, Math. Comp. \textbf{91} (2021), no.~333, pp.~401--449. \bibitem{kitaoka} Yoshiyuki Kitaoka, \emph{Arithmetic of quadratic forms}, Cambridge tracts in Mathematics 106, Cambridge University Press, 1993. \bibitem{kneser} Martin Kneser, \emph{Quadratische formen}, Springer, Berlin Heidelberg, 2002. \bibitem{Lauter} Kristin Lauter, \emph{The maximum or minimum number of rational points on genus three curves over finite fields}, Compositio Math. \textbf{134} (2002), no.~1, pp.~87--111, With an appendix by Jean-Pierre Serre. \bibitem{lioort} Ke-Zheng Li and Frans Oort, \emph{Moduli of supersingular abelian varieties}, Lecture Notes in Mathematics, vol. 1680, Springer-Verlag, Berlin, 1998. \bibitem{manin} Yuri~I. Manin, \emph{Theory of commutative formal groups over fields of finite characteristic}, Uspehi Mat. Nauk \textbf{18} (1963), no.~6 (114), pp.~3--90. \bibitem{nebe:98} Gabriele Nebe, \emph{Finite quaternionic matrix groups}, Represent. Theory \textbf{2} (1998), pp.~106--223. \bibitem{oda-oort} Tadao Oda and Frans Oort, \emph{Supersingular abelian varieties}, Proceedings of the {I}nternational {S}ymposium on {A}lgebraic {G}eometry ({K}yoto {U}niv., {K}yoto, 1977), Kinokuniya Book Store, Tokyo, 1978, pp.~595--621. \bibitem{OortNP} Frans Oort, \emph{Newton polygons and formal groups: conjectures by {M}anin and {G}rothendieck}, Ann. of Math. (2) \textbf{152} (2000), no.~1, pp.~183--206. \bibitem{OortEO} \bysame, \emph{A stratification of a moduli space of abelian varieties}, Moduli of abelian varieties ({T}exel {I}sland, 1999), Progr. Math., vol. 195, Birkh\"{a}user, Basel, 2001, pp.~345--416. \bibitem{oort:foliation} \bysame, \emph{Foliations in moduli spaces of abelian varieties}, J. Amer. Math. Soc. \textbf{17} (2004), no.~2, pp.~267--296. \bibitem{pizer:arith} Arnold Pizer, \emph{On the arithmetic of quaternion algebras}, Acta Arith. \textbf{31} (1976), no.~1, pp.~61--89. \bibitem{platonov-rapinchuk} Vladimir Platonov and Andrei Rapinchuk, \emph{Algebraic groups and number theory}, Pure and Applied Mathematics, vol. 139, Academic Press, Inc., Boston, MA, 1994, Translated from the 1991 Russian original by Rachel Rowen. \bibitem{reiner:mo} Irving Reiner, \emph{Maximal orders}, London Mathematical Society Monographs. New Series, vol.~28, The Clarendon Press Oxford University Press, Oxford, 2003, Corrected reprint of the 1975 original, With a foreword by M. J. Taylor. \bibitem{serre:arith} Jean-Pierre Serre, \emph{A course in arithmetic}, Springer-Verlag, New York-Heidelberg, 1973, Translated from the French, Graduate Texts in Mathematics, No. 7. \bibitem{shioda} Tetsuji Shioda, Some remarks on abelian varietiees. J. Fac. Sci. Univ. Tokyo Sect. IA Math. \textbf{24} (1977), no. 1, pp.~11–-21. \bibitem{vigneras} Marie-France Vign{\'e}ras, \emph{Arithm\'etique des alg\`ebres de quaternions}, Lecture Notes in Mathematics, vol. 800, Springer, Berlin, 1980. \bibitem{yu:2005} Chia-Fu Yu, \emph{On the mass formula of supersingular abelian varieties with real multiplications}, J. Aust. Math. Soc. \textbf{78} (2005), no.~3, pp.~373--392. \bibitem{yu:mrl2010} \bysame, \emph{On finiteness of endomorphism rings of abelian varieties}, Math. Res. Lett. \textbf{17} (2010), no.~2, pp.~357--370. \bibitem{yu2} \bysame, \emph{The supersingular loci and mass formulas on {S}iegel modular varieties}, Doc. Math. \textbf{11} (2006), pp.~449--468. \bibitem{yu:jpaa2012} \bysame, \emph{Superspecial abelian varieties over finite prime fields}, J. Pure Appl. Algebra \textbf{216} (2012), no.~6, pp.~1418--1427. \bibitem{yu:iumj18} \bysame, \emph{On arithmetic of the superspecial locus}, Indiana Univ. Math. J. \textbf{67} (2018), no.~4, pp.~1341--1382. \bibitem{yuyu} Chia-Fu Yu and Jeng-Daw Yu, \emph{Mass formula for supersingular abelian surfaces}, J. Algebra \textbf{322} (2009), no.~10, pp.~3733--3743. \end{thebibliography} \end{document}
2205.13078v1
http://arxiv.org/abs/2205.13078v1
Birkhoff-James Orthogonality and Its Pointwise Symmetry in Some Function Spaces
\documentclass[12pt]{amsart} \usepackage{amsmath} \usepackage{mathtools} \usepackage{amssymb} \usepackage{amsthm} \usepackage{graphicx} \usepackage{enumerate} \usepackage[mathscr]{eucal} \usepackage[pagewise]{lineno} \usepackage{tikz} \usetikzlibrary{decorations.text,calc,arrows.meta} \theoremstyle{plain} \newtheorem*{acknowledgement}{\textnormal{\textbf{Acknowledgements}}} \newcommand{\loglike}[1]{\mathop{\rm #1}\nolimits} \setlength{\textwidth}{121.9mm} \setlength{\textheight}{176.2mm} \numberwithin{equation}{section} \setlength{\parindent}{4em} \usepackage[english]{babel} \DeclareMathOperator{\conv}{conv} \DeclareMathOperator{\sgn}{sgn} \DeclareMathOperator{\Int}{Int} \DeclareMathOperator{\dist}{dist} \DeclareMathOperator{\sech}{sech} \DeclareMathOperator{\csch}{csch} \DeclareMathOperator{\arcsec}{arcsec} \DeclareMathOperator{\arccot}{arcCot} \DeclareMathOperator{\arccsc}{arcCsc} \DeclareMathOperator{\arccosh}{arccosh} \DeclareMathOperator{\arcsinh}{arcsinh} \DeclareMathOperator{\arctanh}{arctanh} \DeclareMathOperator{\arcsech}{arcsech} \DeclareMathOperator{\arccsch}{arcCsch} \DeclareMathOperator{\arccoth}{arcCoth} \newtheorem{theorem}{Theorem}[subsection] \newtheorem{cor}[theorem]{Corollary} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{rem}[theorem]{Remark} \newtheorem{prop}[theorem]{Proposition} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \bibliographystyle{apa} \usepackage{hyperref} \hypersetup{ colorlinks=true, linkcolor=blue, filecolor=magenta, urlcolor=cyan, } \usepackage[pagewise]{lineno} \bibliographystyle{apa} \usepackage{hyperref} \begin{document} \title[Birkhoff-James Orthogonality in Function Spaces]{Birkhoff-James Orthogonality and Its Pointwise Symmetry in Some Function Spaces} \author[Bose]{Babhrubahan Bose} \newcommand{\acr}{\newline\indent} \subjclass[2020]{Primary 46B20, Secondary 46E30, 46L05} \keywords{Birkhoff-James orthogonality; Smooth points; Left-symmetric points; Right-symmetric points; $L_p$ spaces; Commutative $C^*$ algebras; Ultrafilters} \address[Bose]{Department of Mathematics\\ Indian Institute of Science\\ Bengaluru 560012\\ Karnataka \\INDIA\\ } \email{[email protected]} \thanks{The research of Babhrubahan Bose is funded by PMRF research fellowship under the supervision of Professor Apoorva Khare and Professor Gadadhar Misra.} \begin{abstract} We study Birkhoff-James orthogonality and its pointwise symmetry in commutative $C^*$ algebras, i.e., the space of all continuous functions defined on a locally compact Hausdorff space which vanish at infinity. We use this characterization to obtain the characterization of Birkhoff-James orthogonality on $L_\infty$ space defined on any arbitrary measure space. We also do the same for the $L_p$ spaces for $1\leq p<\infty$. \end{abstract} \maketitle \section*{Introduction} {In recent times, symmetry of Birkhoff-James orthogonality has been a topic of considerable interest \cite{annal}, \cite{dkp}, \cite{1}, \cite{3}, \cite{4}, \cite{5}, \cite{8}. It is now well known that the said symmetry plays an important role in the study of the geometry of Banach spaces. The present article aims to explore Birkhoff-James orthogonality and its pointwise symmetry in some function spaces. We have completed such a study for some well studied sequence spaces, namely $\ell_p$ for $1\leq p\leq\infty$, $c$, $c_0$ and $c_{00}$ in \cite{usseq}. Here we take the study one step further by doing the same for commutative $C^*$ algebras and $L_p(X)$ for $1\leq p\leq\infty$ and any measure space $X$.} Let us now establish the relevant notations and terminologies to be used throughout the article. Denote the scalar field $\mathbb{R}$ or $\mathbb{C}$ by $\mathbb{K}$ and recall the sign function $\sgn:\mathbb{K}\to\mathbb{K},$ given by \[\sgn(x)= \begin{cases} \frac{x}{|x|},\;x\neq0,\\ 0,\;x=0. \end{cases}\] Consider a normed linear space $\mathbb{X}$ over $\mathbb{K}$ and denote its continuous dual by $\mathbb{X}^*$. Let $J(x)$ denote the collection of all support functionals of a non-zero vector $x\in \mathbb{X}$, i.e., \begin{align}\label{support} J(x):=\{f\in \mathbb{X}^*:\|f\|=1,\;|f(x)|=\|x\|\}. \end{align} A non-zero {element} $x\in\mathbb{X}$ is said to be \textit{smooth} if $J(x)$ is singleton.\par Given $x,y\in \mathbb{X}$, $x$ is said to be \textit{Birkhoff-James orthogonal} to $y$ \cite{B}, denoted by $x\perp_By$, if \begin{align*} \|x+\lambda y\|\geq\|x\|,~~\textit{for~all~}\lambda\in\mathbb{K}. \end{align*} James proved in \cite{james} that $x\perp_By$ if and only if $x=0$ or there exists $f\in J(x)$ such that $f(y)=0$. In the same article he also proved that a non-zero $x\in \mathbb{X}$ is smooth if and only if Birkhoff-James orthogonality is right additive at $x$, i.e., \begin{align*} x\perp_By,~x\perp_Bz~~\Rightarrow~~x\perp_B(y+z),~~\textit{for every}~y,z\in\mathbb{X}. \end{align*} \par Birkhoff-James orthogonality is not symmetric in general, i.e., $x\perp_By$ does not necessarily imply that $y\perp_Bx$. In fact, James proved in \cite{james2} that Birkhoff-James orthogonality is symmetric in a normed linear space of dimension higher than 2 if and only if the space is an inner product space. However, the importance of studying the pointwise symmetry of Birkhoff-James orthogonality in describing the geometry of normed linear spaces has been illustrated in \cite[Theorem 2.11]{CSS}, \cite[Corollary 2.3.4.]{Sain}. Let us recall the following definition in this context from \cite{Sain2}, which will play an important part in our present study. \begin{definition} An element $x$ of a normed linear space $\mathbb{X}$ is said to be \textit{left-symmetric} (\textit{resp. right-symmetric}) if \begin{align*} x\perp_By\;\Rightarrow\; y\perp_Bx~~(\textit{resp.~}y\perp_Bx\;\Rightarrow\;x\perp_By), \end{align*} for every $y\in \mathbb{X}$. \end{definition} Note that by the term \textit{pointwise symmetry of Birkhoff-James orthogonality}, we refer to the left-symmetric and the right-symmetric points of a given normed linear space. The left-symmetric and the right-symmetric points of $\ell_p$ spaces where $1\leq p\leq \infty$, $p\neq2,$ were characterized in \cite{usseq}. {Here we generalize these results in $L_p(X)$ for any measure space $X$ and $p\in[1,\infty]\setminus\{2\}$.} For doing this generalization, we need to characterize Birkhoff-James orthogonality, smooth points, left symmetric points and right symmetric points in commutative $C^*$ algebras, i.e., $C_0(X)$, the space of all continuous functions vanishing at infinity defined on a locally compact Hausdorff space $X$. These characterizations in a given Banach space are important in understanding the geometry of the Banach space. We refer the readers to \cite{annal}, \cite{dkp}, \cite{1}, \cite{3}, \cite{4}, \cite{5}, \cite{8}, \cite{10}, \cite{SRBB}, \cite{12}, \cite{turnsek} for some prominent work in this direction. \par In the first section we completely characterize Birkhoff-James orthogonality in commutative $C^*$ algebras, i.e., the space of all $\mathbb{K}$-valued continuous functions vanishing at infinity that are defined on a locally compact Hausdorff space $X$ and then characterize the left-symmetric and the right-symmetric points of the space.\par In the second section, we use the results in the first section to completely characterize Birkhoff-James orthogonality, smoothness and pointwise symmetry of Birkhoff-James orthogonality in $L_\infty(X)$. It can be noted that we are establishing these results for an arbitrary measure space $X$ and in particular, we are not imposing any additional condition on $X$ such as finiteness or $\sigma$-finiteness of the measure. In the third and fourth sections we {obtain} the same characterizations for $L_1(X)$ and $L_p(X)$ spaces $(p\in(1,\infty)\setminus\{2\})$. Observe that the $p=2$ case is trivial since $L_2(X)$ is a Hilbert space. \section{Birkhoff-James orthogonality in commutative $C^*$ algebras} The aim of this section is to obtain a necessary and sufficient condition for two elements in a commutative $C^*$ algebra to be Birkhoff-James orthogonal. Using that characterization, we characterize the smooth points and also study the pointwise symmetry of Birkhoff-James orthogonality in these algebras. We use the famous result Gelfand and Naimark proved in \cite{gelfand}, that any commutative $C^*$ algebra is isometrically $*$-isomorphic to $C_0(X)$ for some locally compact Hausdorff space $X$. Recall that $C_0(X)$ denotes the space of $\mathbb{K}$-valued continuous maps $f$ on $X$ such that \[\lim\limits_{x\to\infty}f(x)=0,\] equipped with the supremum norm, where $X\cup\{\infty\}$ is the one-point compactification of $X$. Also note that the $C^*$ algebra is unital if and only if $X$ is compact.\par We also recall that by the Riesz representation theorem in measure theory, the continuous dual of $C_0(X)$ is isometrically isomorphic to the space of all regular complex finite Borel measures on $X$ equipped with total variation norm and the functional $\Psi_\mu$ corresponding to a measure $\mu$ acting by, \begin{equation*} \Psi_\mu(f):=\int\limits_Xfd\mu,~~f\in C_0(X). \end{equation*} \subsection{Birkhoff-James orthogonality in $C_0(X)$}\hfill \\ We begin with defining the \textit{norm attaining set} of an element $f\in C_0(X)$ by, \[M_f:=\{x\in X:|f(x)|=\|f\|\}.\] Clearly, $M_f$ is a compact subset of $X$. We state a characterization of the support functionals of an element $f\in C_0(X)$ using the norm attaining set. The proof of the result relies on elementary computations. \begin{theorem}\label{norm} Suppose $f\in C_0(X)$ and $f\neq0$. Let $\mu$ be a complex regular Borel measure. Then $\mu$ is of unit total variation corresponding to a support functional of $f$ if and only if $|\mu|\left(X\backslash M_f\right)=0$ and for almost every $x\in M_f$, with respect to the measure $\mu$, $d\mu(x)=\overline{\sgn(f(x))} d|\mu|(x)$. \end{theorem} We now come to the characterization of Birkhoff-James orthogonality in $C_0(X)$. \begin{theorem}\label{ortho} If $f,g\in C_0(X)$ and $f\neq0$, then $f\perp_Bg$ if and only if $0\in\conv\{\overline{f(x)}g(x):x\in M_f\}$. \end{theorem} \begin{proof} Let $0\in\conv\{\overline{f(x)}g(x):x\in M_f\}$. Then there exist $n\in\mathbb{N}$, $\lambda_1,\lambda_2,\dots,\lambda_n\geq0$ with $\sum\limits_{k=1}^n\lambda_k=1$ and\\ \begin{equation*} 0=\sum\limits_{k=1}^n\lambda_k\overline{f(x_k)}g(x_k),\\ \end{equation*} for some $x_1,x_2,\dots, x_n\in M_f$. Consider the functional\\ \begin{equation*} \Psi:h\mapsto\frac{1}{\|f\|}\sum\limits_{k=1}^n\lambda_k\overline{f(x_k)}h(x_k),~~h\in C_0(X).\\ \end{equation*} Then for $h\in C_0(X)$,\\ \begin{equation*} |\Psi(h)|=\left|\frac{1}{\|f\|}\sum\limits_{k=1}^n\lambda_k\overline{f(x_k)}h(x_k)\right|\leq\|h\|\left(\sum\limits_{k=1}^n\lambda_k\right)=\|h\|.\\ \end{equation*} Also,\\ \begin{equation*} \Psi(f)=\frac{1}{\|f\|}\sum\limits_{k=1}^n\lambda_k\overline{f(x_k)}f(x_k)=\|f\|\left(\sum\limits_{k=1}^n\lambda_k\right)=\|f\|,\\ \end{equation*} and\\ \begin{equation*} \Psi(g)=\frac{1}{\|f\|}\sum\limits_{k=1}^n\lambda_k\overline{f(x_k)}g(x_k)=0.\\ \end{equation*} Hence $\Psi$ is a support functional of $f$ such that $\Psi(g)=0$, giving $f\perp_Bg$ and proving the sufficiency. \par Conversely, suppose $f\perp_Bg$. Then there is a support functional of $f$ that annihilates $g$. Invoking Theorem \ref{norm} we obtain a complex regular Borel measure $\nu$ having $|\nu|(M_f)=1$ and \begin{equation*} \int\limits_Xhd\nu=\int\limits_{M_f}h(x)\overline{\sgn(f(x))}d|\nu|(x),~~\textit{for every}~h\in C_0(X), \end{equation*} such that \begin{equation*} 0=\int\limits_Xgd\nu=\int\limits_{M_f}g(x)\frac{\overline{f(x)}}{\|f\|}d|\nu|(x). \end{equation*} Suppose $\Lambda$ is the space of all positive semi-definite regular Borel probability measures on $M_f$ and $\Phi:\Lambda\to\mathbb{K}$ given by, \begin{equation*} \Phi(\mu):=\int\limits_{M_f}\overline{f(x)}g(x)d\mu(x),~~\mu\in\Lambda. \end{equation*} Observe that since $\Lambda$ is convex, so is $\Phi(\Lambda)$. Also, as $\Lambda$ is the collection of all support functionals of $|f|\in C_0(X)$, it is compact under the weak* topology by the Banach-Alaoglu theorem \cite[subsection 3.15, p.68]{BAT}. Now, the map $\Phi$ is evaluation at the element $\overline{f}g\in C_0(X)$ on $\Lambda$ and hence is continuous where $\Lambda$ is equipped with the weak* topology. Therefore, $\Phi(\Lambda)$ is compact and hence by the Krein-Milman theorem \cite{KMT}, \begin{equation*} \Phi(\Lambda)=\overline{\conv}\{\lambda:\lambda~\textit{is~an~extreme~point~of~}\Phi(\Lambda)\}. \end{equation*}\par We claim that any extreme point of $\Phi(\Lambda)$ is of the form $\overline{f(x)}g(x)$ for some $x\in M_f$. Suppose, on the contrary, $\Phi(\mu)$ is an extreme point of $\Phi(\Lambda)$ and $\mu$ is not a Dirac delta measure. If $\overline{f}g$ is constant on the support of $\mu$, clearly, $\Phi(\mu)=\overline{f(x)}g(x)$ for any $x$ in the support of $\mu$. Otherwise, there exist $x,y$ in the support of $\mu$ such that $\overline{f(x)}g(x)\neq \overline{f(y)}g(y)$. Consider $0<\delta<\frac{1}{2}|\overline{f(x)}g(x)-\overline{f(y)}g(y)|$ and $U_x\subset M_f$ open such that \begin{align*} z\in U_x~\Rightarrow~|\overline{f(x)}g(x)-\overline{f(z)}g(z)|<\delta. \end{align*} Then $U_x$ and $M_f\backslash U_x$ are two disjoint subsets of $M_f$ having non-zero measures since $M_f\backslash U_x$ contains an open subset of $M_f$ containing $y$. Clearly, since $\mu$ can be written as a convex combination of $\frac{1}{\mu\left(U_x\right)}\mu|_{U_x}$ and $\frac{1}{\mu\left(M_f\backslash U_x\right)}\mu|_{M_f\backslash U_x}$, we get \begin{align*} \Phi(\mu)=\frac{1}{\mu(U_x)}\int\limits_{U_x} \overline{f(z)}g(z)d\mu(z). \end{align*} Hence, we have \begin{align*} \left|\overline{f(x)}g(x)-\Phi(\mu)\right|&=\left|\overline{f(x)}g(x)-\frac{1}{\mu(U_x)}\int\limits_{U_x} \overline{f(z)}g(z)d\mu(z)\right|\\ &\leq\frac{1}{\mu\left(U_x\right)}\int\limits_{U_x}|\overline{f(x)}g(x)-\overline{f(z)}g(z)|d\mu(z)\leq\delta. \end{align*} Since $0<\delta<\frac{1}{2}|\overline{f(x)}g(x)-\overline{f(y)}g(y)|$ is arbitrary, we obtain that $\Phi(\mu)=\overline{f(x)}g(x)$ establishing our claim.\par Hence, \begin{equation}\label{convex} 0=\Phi(|\nu|)\in\Phi(\Lambda)=\overline{\conv}\{\overline{f(x)}g(x):x\in M_f\}. \end{equation}\par We now prove that if $K\subset\mathbb{K}$ is compact, $\conv(K)=\overline{\conv}(K)$. Suppose $z$ is a limit point of $\conv(K)$. Then there exists a sequence of elements $z_n$ in $\conv(K)$ converging to $z$. But by Caratheodory's theorem \cite{caratheodory}, for every $n\in\mathbb{N}$, there exist $\lambda_i^{(n)}\in[0,1]$ and $z_i^{(n)}\in K$ for $i=1,2,3$ such that \begin{equation*} \sum\limits_{i=1}^3\lambda_i^{(n)}=1,~~\sum\limits_{i=1}^3\lambda_i^{(n)}z_i^{(n)}=z_n. \end{equation*} Since $[0,1]$ and $K$ are both compact, we may consider an increasing sequence of natural numbers $\left(n_k\right)_{k\in\mathbb{N}}$ such that $\{\lambda_1^{(n_k)}\}_{k\in\mathbb{N}}$, $\{\lambda_2^{(n_k)}\}_{k\in\mathbb{N}}$, $\{\lambda_3^{(n_k)}\}_{n_k\in\mathbb{N}}$, $\{z_1^{(n_k)}\}_{k\in\mathbb{N}}$, $\{z_2^{(n_k)}\}_{k\in\mathbb{N}}$ and $\{z_3^{(n_k)}\}_{k\in\mathbb{N}}$ are all convergent and thereby obtain that $z\in\conv(K)$. \par As $M_f$ is compact, $\{\overline{f(x)}g(x):x\in M_f\}$ is a compact subset of $\mathbb{K}$ and hence by \eqref{convex}, \begin{equation*} 0\in\conv\{\overline{f(x)}g(x):x\in M_f\}, \end{equation*} establishing the necessity. \end{proof} We now furnish a generalization of \cite[Corollary 2.2]{function} characterizing the smoothness of an element of $C_0(X)$. \begin{theorem}\label{smooth} A point $f\in C_0(X)$ is smooth if and only if $M_f$ is a singleton set. \end{theorem} \begin{proof} First if $M_f$ is a singleton set, say $\{x_0\}$, then clearly by Theorem \ref{ortho}, $f\perp_Bg$ for $g\in C_0(X)$ if and only if $g(x_0)=0$. Hence clearly, for $g,h\in C_0(X)$, $f\perp_B g,h$ would imply \begin{equation*} g(x_0)=h(x_0)=0~\Rightarrow~ g(x_0)+h(x_0)=0~\Rightarrow ~f\perp_B(g+h). \end{equation*} Hence $f$ is smooth.\par Conversely, if $x_1,x_2\in M_f$, $x_1\neq x_2$, then $\Psi_1,\Psi_2:C_0(X)\to\mathbb{K}$ given by \begin{equation*} \Psi_i(g):=\overline{\sgn(f(x_i))}g(x_i),~~g\in C_0(X),~i=1,2, \end{equation*} are two support functionals of $f$. Now, since $X$ is Hausdorff, there exists $U\subset X$ open such that $x_1\in U$ and $x_2\notin U$. Hence, there exists a continuous map $h$ on $X$ having compact support, vanishing outside $U$ and $h(x_1)=1$. Thus $h\in C_0(X)$ and $\Psi_1(h)\neq\Psi_2(h)$. Therefore $f$ is not smooth. \end{proof} \subsection{Pointwise symmetry of Birkhoff-James orthogonality in $C_0(X)$}\hfill \\ In this subsection we characterize the pointwise symmetry of Birkhoff-James orthogonality in $C_0(X)$. We begin with our characterization of the left symmetric points of $C_0(X)$. \begin{theorem}\label{left} An element $f\in C_0(X)$ is a left symmetric point of $C_0(X)$ if and only if $f$ is identically zero or $M_f$ is singleton and $f$ vanishes outside $M_f$. \end{theorem} \begin{proof} We begin with the sufficiency. If $M_f=\{x_0\}$ for some $x_0\in X$, then by Theorem \ref{ortho}, $f\perp_Bg$ for any $g\in C_0(X)$ if and only if $g(x_0)=0$. Then clearly, $x_0\notin M_g$ and hence if $x_1\in M_g$, $f(x_1)=0$ giving $g\perp_Bf$ by Theorem \ref{ortho}.\par Conversely, suppose $f\in C_0(X)$ is left-symmetric and not identically zero. Suppose $x_1\in M_f$ and $x_2\in X$ such that $x_1\neq x_2$, $f(x_2)\neq0$. Consider $U,U'\subset X$ open containing $x_2$ such that $x_1\notin U$ and $f$ does not vanish on $U'$. Set $U''=U\cap U'$. Consider a continuous function $h:X\to[0,1]$ having compact support such that $h(x_2)=1$ and $h$ vanishes outside $U''$. Set $g(x):=\sgn(f(x))h(x)$, $x\in X$. Then clearly $g\in C_0(X)$ and $g(x_1)=0$ giving $f\perp_Bg$ by Theorem \ref{ortho}. But clearly $\sgn(g(x))=\sgn(f(x))$ for every $x\in M_g$. Hence $g\not\perp_Bf$ by Theorem \ref{ortho}, establishing the necessity. \end{proof} Note that this theorem clearly states that if $X$ has no singleton connected component, $C_0(X)$ has no non-zero left symmetric point. \par We next characterize the right-symmetric points. \begin{theorem}\label{right} An element $f\in C_0(X)$ is right-symmetric if and only if $M_f=X$. Hence, in particular if $X$ is not compact, $C_0(X)$ has no non-zero right symmetric point. \end{theorem} \begin{proof} We again begin with the sufficiency. If $M_f=X$, then by Theorem \ref{ortho}, $g\perp_B f$ only if \begin{align*} 0\in\conv\{\overline{g(x)}f(x):x\in M_g\}. \end{align*} Since $M_g\subset X=M_f$, we clearly obtain \begin{align*} 0\in\conv\{\overline{f(x)}g(x):x\in M_f\}, \end{align*} and hence $f\perp_Bg$ by Theorem \ref{ortho}.\par Conversely, suppose $f\in C_0(X)$ is right-symmetric and not identically zero. For the sake of contradiction, let us assume that $M_f\neq X$. We consider two cases. \\ \textbf{Case 1:} \textit{$f(x_0)=0$ for some $x_0\in X$.}\\ Since $x_0\notin M_f$ and $M_f$ is compact, we obtain $U,U'\subset X$ open such that $U\cap U'=\emptyset$, $x_0\in U$ and $M_f\subset U'$. Now, consider two continuous functions $h,h':X\to[0,1]$ having compact supports such that $h(x_0)=1$, $h$ vanishes outside $U$ and $h'$ is identically 1 on $M_f$ and vanishes outside $U'$. Set $g(x):=\|f\|h(x)+f(x)h'(x)$, $x\in X$. Then $\|g\|=\|f\|$ and $x_0\in M_g$. Hence by Theorem \ref{ortho}, $g\perp_Bf$. However, if $x\in M_f$, $g(x)=f(x)$ and hence by Theorem \ref{ortho}, $f\not\perp_Bg$.\\ \textbf{Case 2:} \textit{$f$ is non-zero everywhere on $X$ but there exists $x_0\in X\backslash M_f$.}\\ Let us again consider $U$, $U'$, $h$ and $h'$ as before and set $g(x):=-\|f\|\sgn(f(x))h(x)+f(x)h'(x)$. Then clearly, $M_f\subset M_g$ and $x_0\in M_g$. Also, $g(x)=f(x)$ for $x\in M_f$ and $g(x_0)=-\|f\|\sgn(f(x_0))$ giving $g\perp_Bf$ by Theorem \ref{ortho}. Also, by the same theorem $f\not\perp_Bg$, proving the necessity. \end{proof} \section{Birkhoff-James orthogonality and its pointwise symmetry in $L_\infty$ spaces} In this section, we study Birkhoff-James orthogonality and its pointwise symmetry in $L_\infty$ spaces. Note that since $L_\infty$ spaces are also commutative $C^*$ algebras, we are going to use the results from Section 2 for this study. We begin with representing $L_\infty(X)$ as $C_0(Y)$ for some suitable locally compact, Hausdorff $Y$. We then study this representation and use the results of Section 2 to characterize Birkhoff-James orthogonality and its pointwise symmetry in $L_\infty(X)$.\par We begin by considering a positive measure space $(X,\Sigma,\lambda)$ and $L_\infty^\mathbb{K}(X,\Sigma,\lambda)$, the space of all essentially bounded $\mathbb{K}$ valued functions on $X$ equipped with the essential supremum norm. Without any ambiguity, we refer to $L_\infty^\mathbb{K}(X,\Sigma,\lambda)$ as $L_\infty(X)$. \par We now represent $L_\infty(X)$ as the space of continuous functions on a compact topological space equipped with the supremum norm. We begin with a definition: \begin{definition} A \textit{0-1 measure with respect to $\lambda$} is a finitely additive set function $\mu$ on $(X,\Sigma)$ taking values in $\{0,1\}$ such that $\mu(X)=1$, $\mu(A)=0$ whenever $\lambda(A)=0$. \end{definition} Let us define $\mathfrak{G}$ as the collection of all 0-1 measures with respect to $\lambda$. We consider the $t$ topology on $\mathfrak{G}$ having a basis consisting of sets of the following form: \begin{equation*} t(A):=\{\mu\in\mathfrak{G}:\mu(A)=1\},~~A\in\Sigma,~\lambda(A)>0. \end{equation*} Yosida and Hewitt proved the following representation result in \cite{Y-H}: \begin{theorem}\label{representation} \strut\\ 1. The topological space $\left(\mathfrak{G},t\right)$ is compact and Hausdorff.\\ 2. The map $T:L_\infty(X)\to C^\mathbb{K}\left(\mathfrak{G},t\right)$ given by \begin{equation*} T(f)(\mu):=\int\limits_Xfd\mu,~~\mu\in\mathfrak{G},~f\in L_\infty(X), \end{equation*} is an isometric isomorphism. \end{theorem} In the first subsection we study the space of 0-1 measures with respect to $\lambda$ and integrals with respect to the measures. We characterize Birkhoff-James orthogonality between two elements of $L_\infty(X)$ in the second subsection along with characterization of smoothness of a point. The third subsection comprises of characterizations of pointwise symmetry in $L_\infty(X)$. \subsection{0-1 measures with respect to $\lambda$ and $\lambda$-ultrafilters}\hfill \\ In this subsection, we obtain a one to one correspondence between all the 0-1 measures with respect to $\lambda$ and all $\lambda$-ultrafilters and therefore use the $\lambda$-ultrafilters to study integrals with respect to the 0-1 measures with respect to $\lambda$. We begin with the definition of a $\lambda$-filter. \begin{definition} A non-empty subset $\mathcal{F}$ of $\Sigma$ is called a \textit{$\lambda$-filter on $X$} if\\ 1. $\lambda(A)>0$ for every $A\in \mathcal{F}$.\\ 2. For every $A,B\in\mathcal{F}$, $A\cap B\in\mathcal{F}$.\\ 3. $B\in\mathcal{F}$ for every $B\supset A$, $B\in\Sigma$ and $A\in\mathcal{F}$\\ A $\lambda$-filter $\mathcal{U}$ is called a \textit{$\lambda$-ultrafilter} if any $\lambda$-filter containing $\mathcal{U}$ is $\mathcal{U}$ itself. \end{definition} The existence of $\lambda$-ultrafilters is a direct consequence of Zorn's lemma. Before proceeding further, we derive a lemma that is going to be used throughout the section. \begin{lemma}\label{significant} Suppose $\mathcal{U}$ is a $\lambda$-ultrafilter.\\ 1. If $A\in\Sigma$ such that $A\notin\mathcal{U}$, then there exists $B\in\mathcal{U}$, such that $\lambda(A\cap B)=0$.\\ 2. If $\bigcup\limits_{k=1}^nA_k\in\mathcal{U}$ for $A_1, A_2,\dots,A_n\in\Sigma$, then $A_i\in\mathcal{U}$ for some $1\leq i\leq n$. \end{lemma} \begin{proof} 1.If no such $B$ exists, then $\mathcal{F}:=\{A\cap C:C\in\mathcal{U}\}\cup\mathcal{U}$ is a $\lambda$-filter properly containing $\mathcal{U}$.\\ 2. If $A_k\notin\mathcal{U}$ for every $1\leq k\leq n$, then by part 1, there exist $B_k\in\mathcal{U}$ for $1\leq k\leq n$ such that $\lambda(A_k\cap B_k)=0$. But then setting $B:=\bigcap\limits_{k=1}^n B_k$, we get \begin{equation*} \lambda\left(\left(\bigcup\limits_{k=1}^nA_k\right)\cap B\right)\leq\sum\limits_{k=1}^n\lambda(A_k\cap B)=0, \end{equation*} violating the closure of $\mathcal{U}$ under finite intersections. \end{proof} \par Let $\mathfrak{F}$ denote the collection of all $\lambda$-ultrafilters on $X$. We now define the $\lambda$-ultrafilter corresponding to a 0-1 measure with respect to $\lambda$ and vice versa. Suppose $\mu\in\mathfrak{G}$. We define $\mathcal{U}_\mu$ by: \begin{equation*} \mathcal{U}_\mu:=\{A\in\Sigma:\mu(A)=1\}. \end{equation*} Also for any $\lambda$-ultrafilter $\mathcal{U}$, let us define a set function $\mu^\mathcal{U}$ on $\Sigma$ by: \begin{equation*} \mu^\mathcal{U}(A):= \begin{cases} 1,~~A\in\mathcal{U},\\ 0,~~A\notin\mathcal{U}. \end{cases} \end{equation*} \begin{theorem}\label{representation2} \strut\\ 1.For any $\mu\in\mathfrak{G}$, $\mathcal{U}_\mu\in\mathfrak{F}$ and is called the $\lambda$-ultrafilter corresponding to $\mu$. \\ 2. For any $\mathcal{U}\in\mathfrak{F}$, $\mu^\mathcal{U}\in\mathfrak{G}$ and is called the 0-1 measure with respect to $\lambda$ corresponding to $\mathcal{U}$.\\ 3. The maps $\mu\mapsto\mathcal{U}_\mu$, $\mu\in\mathfrak{G}$ and $\mathcal{U}\mapsto\mu^\mathcal{U}$, $\mathcal{U}\in\mathfrak{F}$ are inverse to each other and thereby establish a one to one correspondence between $\mathfrak{F}$ and $\mathfrak{G}$. \end{theorem} \begin{proof} 1. It is easy to verify that for any $\mu\in\mathfrak{G}$, $\mathcal{U}_\mu$ does not contain any set having $\lambda$-measure zero. Further, since $\mu$ is a 0-1 measure, $\mu(A)=1$ and $B\supset A$, $B\in\Sigma$ forces $\mu(B)=1$. Now, if $\mu(A)=\mu(B)=1$, then $\mu(A\cup B)=1$ since $\mu$ is a 0-1 measure and hence as $\mu$ is additive, \begin{equation*} 1=\mu(A\cup B)=\mu(A)+\mu(B\backslash A)=1+\mu(B\backslash A). \end{equation*} Hence $\mu(B\backslash A)=0$ and so by additivity of $\mu$, \begin{equation*} \mu(A\cap B)=\mu(A\cap B)+\mu(B\backslash A)=\mu(B)=1, \end{equation*} giving $A\cap B\in\mathcal{U}_\mu$. Now if $\mathcal{F}$ is another $\lambda$-filter containing $\mathcal{U}_\mu$, then consider $C\in\mathcal{F}\backslash\mathcal{U}_\mu$. Clearly, $\mu(C)=0$. Then $\mu(X\backslash C)=1$ and hence $X\backslash C\in\mathcal{U}_\mu\subset\mathcal{F}$ violating, that $\mathcal{F}$ is a $\lambda$-filter.\\ 2. Clearly, $\mu^\mathcal{U}(A)=0$ for $\lambda(A)=0$ since $A\notin\mathcal{U}$. Now, if $A, B\in\Sigma$ and $A\cap B=\emptyset$, either exactly one of $A$ and $B$ is in $\mathcal{U}$ in which case $A\sqcup B\in\mathcal{U}$, or neither $A$ nor $B$ is in $\mathcal{U}$ in which case by Lemma \ref{significant}, $A\sqcup B\notin\mathcal{U}$. Clearly, in both cases, \begin{equation*} \mu^\mathcal{U}(A\sqcup B)=\mu^\mathcal{U}(A)+\mu^\mathcal{U}(B). \end{equation*} Hence $\mu^\mathcal{U}$ is a 0-1 measure with respect to $\lambda$.\\ 3. This part is an easy verification. \end{proof} We now study the integrals under 0-1 measures with respect to $\lambda$ in the light of this one to one correspondence. We introduce two new definitions. \begin{definition} A non-empty subset $\mathcal{B}$ of $\Sigma$ is said to be a \textit{$\lambda$-filter base} if\\ 1. $\lambda(A)>0$ for every $A\in\mathcal{B}$.\\ 2. For every $A,B\in\mathcal{B}$, there exists $C\in\mathcal{B}$ such that $C\subset A\cap B$. \end{definition} Any $\lambda$-filter base $\mathcal{B}$ is contained in a unique minimal $\lambda$-filter given by \begin{equation*} \{B\in\Sigma:B\supset A~for~some~A\in\mathcal{B}\}. \end{equation*} Since every $\lambda$-filter is contained in a $\lambda$-ultrafilter (a direct application of Zorn's lemma), every $\lambda$-filter base is contained in a $\lambda$-ultrafilter. We now define limit under a $\lambda$-filter. \begin{definition} Suppose $\mathcal{F}$ is a $\lambda$-filter on $X$ and $f:X\to\mathbb{K}$ is a measurable function. Then the \textit{limit of the map $f$ under the $\lambda$-filter $\mathcal{F}$} (written as $\lim\limits_\mathcal{F}f$) is defined as $z_0\in\mathbb{K}$ if \begin{equation*} \{x\in X:|f(x)-z_0|<\epsilon\}\in\mathcal{F}~~for~every~\epsilon>0. \end{equation*} \end{definition} We state a few elementary results pertaining to the limit of a measurable function under a $\lambda$-filter. We omit the proofs since the results follow directly from the definition of the limit. \begin{theorem} Suppose $f:X\to\mathbb{K}$ is a measurable function and $\mathcal{F}$ is a $\lambda$-filter on $X$.\\ 1. $\lim\limits_{\mathcal{F}}f$ if exists is unique.\\ 2. If $g:\mathbb{K}\to\mathbb{K}$ is continuous, $\lim\limits_{\mathcal{F}}g\circ f=g\left(\lim\limits_\mathcal{F}f\right)$.\\ 3. Limits under a $\lambda$-filter respect addition, multiplication, division and multiplication with a constant.\\ \end{theorem} We now come to our second key result of this subsection. \begin{theorem}\label{integral representation} Suppose $f\in L_\infty(X)$.\\ 1. If $\mathcal{U}$ is a $\lambda$-ultrafilter, $\lim\limits_\mathcal{U}f$ exists and is well defined.\\ 2. If $\mu\in\mathfrak{G}$, \begin{equation*} \lim\limits_{\mathcal{U}_\mu}f=\int\limits_Xfd\mu. \end{equation*} \end{theorem} \begin{proof} 1. Clearly $\lim\limits_\mathcal{U}f$, if exists, must lie in the set $\mathcal{D}:=\{z\in\mathbb{K}:|z|\leq\|f\|_\infty\}$. Now, suppose $\lim\limits_\mathcal{U}f$ does not exist. Then for every $z\in\mathcal{D}$, there exists $\epsilon_z>0$ such that \begin{equation*} \{x\in X:|f(x)-z|<\epsilon_z\}\notin\mathcal{U}. \end{equation*} Set $B_z:=\{w\in\mathbb{K}:|w-z|<\epsilon_z\}$. Then $\{B_z:z\in\mathcal{D}\}$ is an open cover of the compact set $\mathcal{D}$ and therefore must have a finite sub-cover, say $\{B_{z_1},B_{z_2},\dots,B_{z_n}\}$. We further set \begin{equation*} A_i:=\{x\in X: |f(x)-z_i|<\epsilon_{z_i}\},~~1\leq i\leq n. \end{equation*} Hence clearly, $A_i\notin\mathcal{U}$ and \begin{equation*} \bigcup\limits_{i=1}^nA_i=X\backslash B,~for~some~B\in\Sigma,~\lambda(B)=0. \end{equation*} Thus by Lemma \ref{significant}, $X\backslash B\notin\mathcal{U}$. But then as $B\notin\mathcal{U}$, we arrive at a contradiction.\par In order to prove that the limit is well defined, consider $f$ and $f'$ essentially bounded such that $f=f'$ almost everywhere on $X$ with respect to $\lambda$. Now, if $\lim\limits_\mathcal{U}f=z_0$, then for any $\epsilon>0$, \begin{equation*} \{x\in X:|f(x)-z_0|<\epsilon\}\subset\{x\in X:|f'(x)-z_0|<\epsilon\}\cup\{x\in X: f(x)\neq f'(x)\}. \end{equation*} Now by Lemma \ref{significant}, $\{x\in X:|f'(x)-z_0|<\epsilon\}\in\mathcal{U}$ since $\lambda\left(\{x\in X: f(x)\neq f'(x)\}\right)=0$.\\ 2. Suppose $\lim\limits_{\mathcal{U}_\mu}f=z_0$. Then for every $\epsilon>0$, \begin{equation*} \mu\left(\{x\in X:|f(x)-z_0|<\epsilon\}\right)=1, \end{equation*} and therefore \begin{equation*} \mu\left(\{x\in X:|f(x)-z_0|\geq\epsilon\}\right)=0. \end{equation*} Hence we obtain that \begin{align*} \left|\int\limits_Xfd\mu-z_0\right| \leq \int\limits_X|f-z_0|d\mu=\int\limits_{|f-z_0|<\epsilon}|f-z_0|d\mu \leq\epsilon. \end{align*} Since $\epsilon$ is arbitrary, \begin{equation*} \int\limits_Xfd\mu=z_0. \end{equation*} \end{proof} We now establish a result that gives a collection of possible values of limits of an essentially bounded function under a $\lambda$-ultrafilter. \begin{theorem}\label{integral and limit} Suppose $f,g\in L_\infty(X)$. \\ 1. For any $z_0\in\mathbb{K}$, there exists a $\lambda$-ultrafilter $\mathcal{U}$ such that $\lim\limits_\mathcal{U}f=c_0$ if and only if for every $\epsilon>0$, \begin{equation*} \lambda\left(\{x\in X:|f(x)-z_0|<\epsilon\}\right)>0. \end{equation*} 2. For any $z_0,w_0\in\mathbb{K}$, there exists a $\lambda$-ultrafilter $\mathcal{U}$ such that $\lim\limits_\mathcal{U}f=z_0$ and $\lim\limits_\mathcal{U}g=w_0$ if and only if for any $\epsilon>0$, \begin{equation*} \lambda\left(\{x\in X:|f(x)-z_0|<\epsilon,|g(x)-w_0|<\epsilon\}\right)>0. \end{equation*} \end{theorem} \begin{proof} The necessary part of both the statements are clear. We therefore prove the sufficiency in the two statements.\\ 1. Consider $\mathcal{B}\subset\Sigma$ given by \begin{equation*} \mathcal{B}:=\left\{\{x\in X:|f(x)-z_0|<\epsilon\},~~\epsilon>0\right\}. \end{equation*} Clearly, $\mathcal{B}$ is a $\lambda$-filter base and hence there exists a $\lambda$-ultrafilter $\mathcal{U}$ containing $\mathcal{B}$. Clearly, by construction, $\lim\limits_\mathcal{U}f=z_0$.\\ 2. Again consider $\mathcal{B}'\subset\Sigma$ given by \begin{equation*} \mathcal{B}':=\left\{\{x\in X:|f(x)-z_0|<\epsilon\}\cap\{x\in X:|g(x)-w_0|<\epsilon\},~~\epsilon>0\right\}. \end{equation*} Clearly, $\mathcal{B}'$ too is a $\lambda$-filter base and hence there exists a $\lambda$-ultrafilter $\mathcal{U}'$ containing $\mathcal{B}'$. Also, by construction, clearly, $\lim\limits_{\mathcal{U}'}f=z_0$ and $\lim\limits_{\mathcal{U}'}g=w_0$. \end{proof} \subsection{Birkhoff-James orthogonality in $L_\infty(X)$}\hfill \\ In this subsection we characterize Birkhoff-James orthogonality between two elements of $L_\infty(X)$ and use the characterization to study the smoothness of a point in $L_\infty(X)$. \par The following characterization of orthogonality follows from Theorems \ref{ortho}, \ref{representation}, \ref{integral representation} and \ref{integral and limit}. \begin{theorem} Suppose $f,g\in L_\infty(X)$ are non-zero. Then $f\perp_Bg$ if and only if \[0\in\conv\left\{z:\lambda\left(\{x\in X: |f(x)|>\|f\|_\infty-\epsilon,~|\overline{f(x)}g(x)-z|<\epsilon\} \right) >0~~\forall~\epsilon>0\right\}.\] \end{theorem} We now come to the characterization of smooth points in $L_\infty(X)$, but before this result, we prove a preliminary lemma. \begin{lemma}\label{partition} If $f\in L_\infty(X)$ and \begin{equation*} \lambda\left(\{x\in X:|f(x)|=\|f\|_\infty\}\right)=0, \end{equation*} there exist $A,B\subset X$ such that $A\cap B=\emptyset$ and \begin{equation*} \lambda\left(\{x\in A: \|f\|-|f(x)|<\epsilon\}\right),~\lambda\left(\{x\in B: \|f\|_\infty-|f(x)|<\epsilon\}\right)>0, \end{equation*} for every $\epsilon>0$. \end{lemma} \begin{proof} Since $\lambda\left(\{x\in X:|f(x)|=\|f\|_\infty\right\})=0$, clearly either, \[\lambda\left(\{x\in X:\|f\|_\infty-|f(x)|<\epsilon\}\right)<\infty,\] for some $\epsilon>0$ and \[\lambda\left(\{x\in X:\|f\|_\infty-|f(x)|<\delta\}\right)\to0,\] as $\delta\to0$ or, \[\lambda\left(\{x\in X:\|f\|_\infty-|f(x)|<\epsilon\}\right)=\infty\] for every $\epsilon>0$.\\ \textbf{Case 1:} \textit{$\lambda\left(\{x\in X:\|f\|_\infty-|f(x)|<\epsilon_0\}\right)<\infty$ for some $\epsilon_0>0$.}\\ For $n\in\mathbb{N}$, we set $C_n=\{x\in X:\|f\|_\infty-|f(x)|<\epsilon_{n-1}\}$ and $D_n\subset C_n$ such that \begin{equation*} 0<\lambda(D_n)\leq\frac{1}{2}\lambda(C_n). \end{equation*} We further consider $\epsilon_n>0$ such that \begin{equation*} \lambda\left(\{x\in X:\|f\|_\infty-|f(x)|<\epsilon_n\}\right)<\frac{1}{3}\lambda(D_n). \end{equation*} Finally, define \begin{equation*} A_n:=D_n\backslash C_{n+1},~~A:=\bigsqcup\limits_{n=1}^\infty A_n,~~B:=X\backslash A. \end{equation*} Then clearly, since $\lambda(A_n)>0$ for every $n\in\mathbb{N}$ and $\lim\limits_{n\to\infty}\epsilon_n=0$, we have \begin{equation*} \lambda\left(\{x\in A: \|f\|-|f(x)|<\epsilon\}\right)>0, \end{equation*} for every $\epsilon>0$. Again for any $\epsilon>0$, there exists $n\in\mathbb{N}$ such that $\epsilon>\epsilon_{n-1}$. Observe that \begin{align*} \lambda\left(\{x\in B: \|f\|-|f(x)|<\epsilon\}\right)&\geq\lambda\left(\{x\in B: \|f\|-|f(x)|<\epsilon_{n-1}\}\right)\\ &=\lambda\left(C_n\backslash\left(\bigsqcup\limits_{k\geq n}A_k\right)\right)\\ &=\lambda(C_n)-\sum\limits_{n\geq k}\lambda(A_k)\\ &\geq \lambda(C_n)-\sum\limits_{k\geq n}\lambda(D_k)\\ &\geq \lambda(C_n)-\sum\limits_{k=0}^\infty \lambda(D_n)\frac{1}{3^k}=\lambda(C_n)-\frac{3}{2}\lambda(D_n)>0. \end{align*} \textbf{Case 2:} \textit{$\lambda\left(\{x\in X:\|f\|_\infty-|f(x)|>\epsilon\}\right)=\infty$ for every $\epsilon>0$.}\\ Then for every $n\in\mathbb{N}$, there exists $\epsilon_n>0$ such that $\epsilon_n>\epsilon_{n+1}$ and $\lim\limits_{n\to\infty}\epsilon_n=0$ with \begin{equation*} \lambda\left(\{x\in X: \|f\|_\infty-|f(x)|\in(\epsilon_{n+1},\epsilon_n)\}\right)>0. \end{equation*} Hence setting \begin{equation*} A:=\left\{x\in X: \|f\|_\infty-|f(x)|\in\left(\epsilon_{2n},\epsilon_{2n-1}\right),~n\in\mathbb{N}\right\}, \end{equation*} and \begin{equation*} B:=\left\{x\in X: \|f\|_\infty-|f(x)|\in\left(\epsilon_{2n+1},\epsilon_{2n}\right),~n\in\mathbb{N}\right\}, \end{equation*} gives us the desired subsets of $X$. \end{proof} We now come to the characterization of smooth points in $L_\infty(X)$ but for that, we require the definition of a $\lambda$-atom. \begin{definition} A subset $A\in\Sigma$ is called a \textit{$\lambda$-atom} if $\lambda(A)>0$ and \begin{equation*} B\subset A,~\lambda(B)>0~\Rightarrow~B=A. \end{equation*} \end{definition} \begin{theorem}\label{smooth infinity 2} An element $f\in L_\infty(X)$ is smooth if and only if there exists a $\lambda$-atom $A$ such that $|f(x)|=\|f\|_\infty$ for almost every $x\in A$ and \begin{equation*} \lambda\left(\{x\in X\backslash A:|f(x)|>\|f\|_\infty-\epsilon\}\right)=0, \end{equation*} for some $\epsilon>0$. \end{theorem} \begin{proof} By Theorems \ref{smooth}, \ref{representation} and \ref{integral representation}, we have that $f\in L_\infty(X)$ is smooth if and only if there exists a unique $\lambda$-ultrafilter $\mathcal{U}$ such that \begin{equation*} \lim\limits_\mathcal{U}|f|=\|f\|_\infty. \end{equation*} We first prove the sufficiency. Set \begin{equation*} \mathcal{V}_A:=\{B\in \Sigma:B\supset A\}. \end{equation*} Clearly, $\mathcal{V}_A$ is a $\lambda$-ultrafilter since no proper measurable subset of $A$ has nonzero measure. Clearly, $\lim\limits_{\mathcal{V}_A}|f|=\|f\|_\infty$. Suppose $\mathcal{U}$ is a $\lambda$-ultrafilter such that $\lim\limits_\mathcal{U}|f|=\|f\|_\infty.$ Let us assume that $\lim\limits_\mathcal{U}f=e^{i\theta}\|f\|_\infty$, for some $\theta\in[0,2\pi)$. Then clearly, \begin{equation*} A\cup\{x\in X\backslash A: |f(x)|>\|f\|_\infty-\epsilon\}\supset\left\{x\in X:\left|f(x)-e^{i\theta}\|f\|_\infty\right|<\epsilon \right\}\in\mathcal{U}. \end{equation*} Since $\{x\in X\backslash A: |f(x)|>\|f\|_\infty-\epsilon\}\notin\mathcal{U}$, by Lemma \ref{significant}, $A\in\mathcal{U}$. Hence clearly $\mathcal{V}_A\subset\mathcal{U}$ and thus $\mathcal{V}_A=\mathcal{U}$ since $\mathcal{V}_A$ is a $\lambda$-ultrafilter.\par Conversely, suppose there is no $\lambda$-atom $A$ such that $|f(x)|=\|f\|_\infty$ for almost every $x\in A$. Then either \begin{equation*} \lambda\left(\{x\in X: |f(x)|=\|f\|\}\right)=0, \end{equation*} or there exist $A$ and $B$ disjoint subsets in $\Sigma$ such that \begin{equation*} A\sqcup B\subset\{x\in X: |f(x)|=\|f\|_\infty\}, \end{equation*} and $\lambda(A),\lambda(B)>0$. In the second case we consider \begin{align*} \mathcal{V}:=\{C\in\Sigma:C\supset A\},~~ \mathcal{W}:=\{C\in\Sigma:C\supset B\}. \end{align*} Clearly $\mathcal{V}$ and $\mathcal{W}$ are contained in two distinct ultrafilters, say $\mathcal{V}'$ and $\mathcal{W}'$ and \begin{equation*} \lim\limits_{\mathcal{V}'}|f|=\lim\limits_{\mathcal{W}'}|f| =\|f\|_\infty. \end{equation*} In the first case, by Lemma \ref{partition}, there exist $A_1, A_2\subset X$ disjoint such that \begin{equation*} \lambda\left(x\in A_i:\|f\|_\infty-|f(x)|<\epsilon\right)>0, \end{equation*} for every $\epsilon>0$ and $i=1,2$. Observe that \begin{equation*} \mathcal{B}_i:=\left\{\{x\in A_i:\|f\|_\infty-|f(x)|<\epsilon\}:\epsilon>0\right\}, \end{equation*} is a $\lambda$-filter base for $i=1,2$ and hence in contained in a $\lambda$-ultrafilter $\mathcal{U}_i$. Also, clearly, $\mathcal{U}_1\neq\mathcal{U}_2$ and \begin{equation*} \lim\limits_{\mathcal{U}_i}|f|=\|f\|_\infty, \end{equation*} for $i=1,2$.\\ Again if there exists a $\lambda$-atom $A$ with $|f(x)|=\|f\|_\infty$ for almost every $x\in A$ but \begin{equation*} \lambda\left(\{x\in X\backslash A:|f(x)|>\|f\|_\infty-\epsilon\}\right)>0~~\textit{for every}~\epsilon>0, \end{equation*} then set \begin{equation*} \mathcal{B}:=\left\{\{x\in X\backslash A:|f(x)|<\|f\|_\infty-\epsilon\}:\epsilon>0\right\}. \end{equation*} Clearly, $\mathcal{B}$ is a $\lambda$-filter base and is contained in some ultrafilter $\mathcal{U}$. Since $X\backslash A\in\mathcal{U}$, $\mathcal{U}$ and $\mathcal{V}_A$ are two distinct ultrafilters but \begin{equation*} \lim\limits_{\mathcal{U}}|f|=\lim\limits_{\mathcal{V}_A}|f|=\|f\|_\infty, \end{equation*} and hence the necessity. \end{proof} \subsection{Pointwise symmetry of Birkhoff-James orthogonality in $L_\infty(X)$}\hfill \\ In this subsection we characterize the left symmetric and the right symmetric points of $L_\infty(X)$. We begin with the characterization of the left symmetric points. \begin{theorem} A non-zero $f\in L_\infty(X)$ is left symmetric if and only if $|f(x)|=\|f\|$ for almost every $x$ in some $\lambda$-atom $A$ and $f(x)=0$ for almost every $x\in X\backslash A$. \end{theorem} \begin{proof} By Theorems \ref{smooth} and \ref{left}, if $f\in L_\infty(X)$ is left symmetric, $f$ is smooth and hence by Theorem \ref{smooth infinity 2}, there exists a $\lambda$-atom $A$, such that $|f(x)|=\|f\|_\infty$ for almost every $x\in A$. Further, by Theorems \ref{representation}, \ref{representation2} and \ref{integral representation}, $\lim\limits_\mathcal{U}f=0$ for every $\lambda$-ultrafilter $\mathcal{U}$ not containing $A$. Hence, for every $z\neq 0$, there exists $\epsilon_z>0$ such that \begin{equation*} \lambda\left(\{x\in X\backslash A:|f(x)-z|<\epsilon_z\}\right)=0. \end{equation*} Hence \begin{equation*} \left\{\{w\in\mathbb{K}:|w-z|<\epsilon_z\}:z\neq 0\right\}, \end{equation*} is an open cover of $\mathbb{K}\backslash \{0\}$. Choose and fix a countable sub-cover given by: \begin{equation*} \left\{\left\{w\in\mathbb{K}:\left|w-z_n\right|<\epsilon_{z_n}\right\}:z_n\neq 0,~n\in\mathbb{N}\right\}. \end{equation*} Hence \begin{equation*} \lambda\left(\{x\in X\backslash A:f(x)\neq 0\}\right)\leq\sum\limits_{n=1}^\infty\lambda\left(\{x\in X: \left|f(x)-z_n\right|\}\right)=0, \end{equation*} proving the necessity. The sufficiency follows easily from Theorems \ref{left}, \ref{representation}, \ref{integral representation} and \ref{integral and limit}. \end{proof} \begin{theorem} $f\in L_\infty(X)$ is right symmetric if and only if $|f(x)|=\|f\|_\infty$ for almost every $x\in X$. \end{theorem} \begin{proof} By Theorems \ref{right}, \ref{representation}, \ref{integral representation} and \ref{integral and limit}, $f\in L_\infty(X)$ is right symmetric if and only if for every $z\in\mathbb{K}$ with $|z|<\|f\|_\infty$, there exists $\epsilon_z>0$ such that \begin{equation*} \lambda\left(\{x\in X:|f(x)-z|<\epsilon_z\}\right)=0. \end{equation*} Hence the sufficiency is easy to verify. For the necessity, observe that, \begin{equation*} \left\{\{w\in\mathbb{K}:|w-z|<\epsilon_z\}:|z|<\|f\|_\infty\right\}, \end{equation*} is an open cover of $\{z\in\mathbb{K}:|z|<\|f\|_\infty\}$. Choose and fix a countable sub-cover of the aforesaid cover given by: \begin{equation*} \left\{\left\{w\in\mathbb{K}:\left|w-z_n\right|<\epsilon_{z_n}\right\}: \left|z_n\right|<\|f\|_\infty,~n\in\mathbb{N}\right\}. \end{equation*} Hence we obtain: \begin{align*} \lambda\left(\{x\in X:|f(x)|\neq\|f\|_\infty\right)&=\lambda\left(\{x\in X:|f(x)|<\|f\|_\infty\right\})\\ &\leq\sum\limits_{n=1}^\infty \lambda\left(\left\{x\in X:\left|f(x)-z_n\right|<\epsilon_{z_n}\right\}\right)=0. \end{align*} \end{proof} \section{Birkhoff-James orthogonality and its pointwise symmetry in $L_1$ spaces} In this section, we first characterize Birkhoff-James orthogonality in $L_1(X)$ and then characterize smoothness and pointwise symmetry. As before, we assume the measure space to be $(X,\Sigma,\lambda)$. Our approach would be to characterize $J(f)$ for any non-zero $f\in L_1(X)$ and therefrom use the James characterization to characterize Birkhoff-James orthogonality. The characterizations of smoothness and pointwise symmetry would follow therefrom.\par Since the dual of $L_1(X)$ is isometrically isomorphic to $L_\infty(X)$, we are going to assume that $L_\infty(X)$ is indeed the dual of $L_1(X)$ and any element $h\in L_\infty(X)$ acts on $L_1(X)$ as: \begin{align*} f\mapsto \int\limits_Xh(x)f(x)d\lambda(x),~f\in L_1(X). \end{align*} \begin{lemma}\label{supp} Suppose $f\in L_1(X)\setminus\{0\}$. Then for any $h\in L_\infty(X)=L_1(X)^*$, $h\in J(f)$ if and only if $h(x)=\overline{\sgn(f(x))}$ for almost every $x\in X$ such that $f(x)\neq0$, and $|h(x)|\leq1$ for almost every $x\in X$ such that $f(x)=0$. \end{lemma} \begin{proof} The sufficiency follows from direct computation. For the necessity, note that \begin{align*} \|f\|_1=\int\limits_Xh(x)f(x)d\lambda(x)\leq\int\limits_X\|h\|_\infty|f(x)|d\lambda(x)=\|f\|_1, \end{align*} whenever $h\in J(f)$. Hence from the condition of equality in the above inequality, we obtain $h(x)=\|h\|_\infty\overline{\sgn(f(x))}=\overline{\sgn(f(x))}$ for almost every $x\in X$, $f(x)\neq0$. \end{proof} From this lemma, we can now characterize Birkhoff-James orthogonality in $L_1(X)$. \begin{theorem}\label{orth1} Suppose $f,g\in L_1(X)$. Then $f\perp_Bg$ if and only if \begin{align}\label{ortho1} \left|\int\limits_X\overline{\sgn(f(x))}g(x)d\lambda(x)\right|\leq\int\limits_{f(x)=0}|g(x)|d\lambda(x). \end{align} \end{theorem} \begin{proof} We first prove the necessity. Since $f\perp_Bg$, there exists $h\in L_\infty(X)$, such that $h\in J(f)$ and $\int\limits_Xg(x)h(x)d\lambda(x)=0$. By Lemma \ref{supp}, we now conclude \begin{align*} \left|\int\limits_X\overline{\sgn(f(x))}g(x)d\lambda(x)\right|&=\left|\int\limits_{f(x)\neq0}\overline{\sgn(f(x))}g(x)d\lambda(x)\right|\\&=\left|\int\limits_{f(x)=0}g(x)h(x)d\lambda(x)\right|\leq\int\limits_{f(x)=0}|g(x)|d\lambda. \end{align*} Again, if \eqref{ortho1} holds, set: \begin{align*} c=\frac{-\left|\int\limits_X\overline{\sgn(f(x))}g(x)d\lambda(x)\right|}{\int\limits_{f(x)=0}|g(x)|d\lambda(x)}. \end{align*} Consider $h:X\to\mathbb{K}$ given by \begin{align*} h(x):= \begin{cases} \overline{\sgn(f(x))},~~&f(x)\neq0,\\ c\,\overline{\sgn(g(x))},~~&f(x)=0. \end{cases} \end{align*} Clearly, $h\in L_\infty(X)$ and $h\in J(f)$ by Lemma \ref{supp}. But clearly $\int\limits_Xg(x)h(x)d\lambda(x)=0$, establishing the sufficiency. \end{proof} We now characterize the smooth points of $L_1(X)$. \begin{theorem}\label{s} $f\in L_1(X)$ is a smooth point if and only if $f\neq0$ almost everywhere on $X$. \end{theorem} \begin{proof} If $f\neq0$ almost everywhere on $X$, $\int\limits_{f(x)=0}|g(x)|d\lambda(x)=0$ for every $g\in L_1(X)$ giving $f\perp_Bg$ if and only if \begin{align*} \int\limits_X\overline{\sgn(f(x))}g(x)d\lambda(x)=0. \end{align*} Hence $f\perp_Bg$ and $f\perp_Bh$ for $g,h\in L_1(X)$ forces $f\perp_B(g+h)$, proving the sufficiency. To prove the necessity, assume $\lambda(\{x\in X:f(x)\neq0\})>0$. Consider $h_0,h_1:X\to\mathbb{K}$ given by \begin{align*} h_i(x):= \begin{cases} \overline{\sgn(f(x))},~~&f(x)\neq0,\\ i,~~&f(x)=0, \end{cases} \end{align*} for $i=0,1$. Then by Lemma \ref{supp}, $h_0$ and $h_1$ are two distinct support functionals of $f$ and hence $f$ cannot be a smooth point of $L_1(X)$. \end{proof} We now characterize the pointwise symmetry of Birkhoff-James orthogonality in $L_1(X)$. We first address the left-symmetric case. \begin{theorem} $f\in L_1(X)$ is a left-symmetric point if and only if exactly one of the following conditions holds: \begin{enumerate} \item $f\equiv0$. \item $f\not\equiv0$ and $\Sigma=\{\emptyset, X\}$. \item There exist disjoint $\Sigma$-atoms $A$ and $B$ such that $A\sqcup B=X$ and $\lambda(A)|f(x)|=\lambda(B)|f(y)|$ for almost every $x\in A$ and $y\in B$. \end{enumerate} \end{theorem} \begin{proof} The sufficiency can be obtained from Theorem \ref{orth1} by direct computation. For, the necessity, let $f\in L_1(X)$, $f\not\equiv0$. We consider two cases:\\ \textbf{Case I:} $\lambda(\{x\in X:f(x)=0\})>0$.\\ Set $A\subseteq\{x\in X:f(x)=0\}$ such that $\infty>\lambda(A)>0$. Consider $g:X\to\mathbb{K}$ given by \begin{align*} g(x):= \begin{cases} f(x),~~&f(x)\neq0,\\ \frac{\|f\|_1}{\lambda(A)},~~&x\in A,\\ 0,~~&\text{otherwise}. \end{cases} \end{align*} Clearly, $g\in L_1(X)$ and by Theorem \ref{orth1}, $f\perp_Bg$ but $g\not\perp_Bf$.\\ \textbf{Case II:} $\lambda(\{x\in X:f(x)=0\})=0$.\\ Since $f\in L_1(X)$ is a smooth point, from the proof of Theorem \ref{s}, for any $g\in L_1(X)$, $f\perp_Bg$ if and only if \begin{align*} \int\limits_X\overline{\sgn(f(x))}g(x)d\lambda(x)=0. \end{align*} Suppose, there do not exist disjoint $\Sigma$-atoms $A$ and $B$ such that $A\sqcup B=X$ and $\lambda(A)|f(x)|=\lambda(B)|f(y)|$ for almost every $x\in X$ and $y\in Y$. Then there exists $A\in \Sigma$ such that \begin{align*} 0<\int\limits_A|f(x)|d\lambda(x)<\int\limits_{X\setminus A}|f(x)|d\lambda(x). \end{align*} Set $\alpha=\int\limits_A|f(x)|d\lambda(x)$ and $\beta=\int\limits_{X\setminus A}|f(x)|d\lambda(x)$. Now, $g:X\to\mathbb{K}$ given by \begin{align*} g(x):= \begin{cases} \beta f(x),~~&x\in A,\\ -\alpha f(x),~~&x\notin A, \end{cases} \end{align*} is a smooth point of $L_1(X)$ by Theorem \ref{s}. Hence for any $h\in L_1(X)$, $g\perp_Bh$ if and only if \begin{align*} \int\limits_X\overline{\sgn(g(x))}h(x)d\lambda(x)&=0\\ \Leftrightarrow \int\limits_A\overline{\sgn(f(x))}h(x)d\lambda(x)-\int\limits_{X\setminus A}&\overline{\sgn(f(x))}h(x)=0. \end{align*} Hence by our choice of $A\in \Sigma$, $f\perp_Bg$ but $g\not\perp_Bf$. \end{proof} We conclude this section with the characterization of the right-symmetric points of $L_1(X)$. \begin{theorem} A non-zero function $f\in L_1(X)$ is right-symmetric if and only if $\{x\in X:f(x)\neq0\}$ is a $\Sigma$-atom. \end{theorem} \begin{proof} Clearly, if $A=\{x\in X:f(x)\neq0\}$ is a $\Sigma$-atom, then by Theorem \ref{orth1}, for any $g\in L_1(X)$, $g\perp_Bf$ if and only if $g|_A\equiv0$, Hence $f\perp_Bg$ if $g\perp_Bf$.\par Conversely, if there exist disjoint measurable subsets $A$ and $B$ of finite positive measure such that $A\sqcup B\subseteq\{x\in X:f(x)\neq0\}$, then without loss of generality, we assume \begin{align*} 0<\int\limits_A|f|d\lambda\leq\int\limits_B|f|d\lambda. \end{align*} Setting $g: X\to\mathbb{K}$ given by \begin{equation*} g(x):= \begin{cases} \sgn(f(x)),~~&x\in A,\\ 0,~~&x\notin A, \end{cases} \end{equation*} we get $g\in L_1(X)$. Also, by Theorem \ref{orth1}, $g\perp_Bf$ and $f\not\perp_Bg$. \end{proof} \section{Birkhoff-James orthogonality and its pointwise symmetry in $L_p(X)$, $p\in(1,\infty)\setminus\{2\}$} In this section, we characterize Birkhoff-James orthogonality and its pointwise symmetry in $L_p(X)$ for $1<p<\infty$, $p\neq2$. It is well-known that $L_p(X)$ is smooth and hence the characterization of smoothness here is redundant. Our approach for $L_p(X)$, $p\in(1,\infty)\setminus\{2\}$ is similar to the $L_1(X)$ case. We first study the support functional (which is unique here as the space is smooth) of a non-zero element and therefrom obtain a characterization of Birkhoff-James orthogonality by James' characterization. The characterization of pointwise symmetry would then follow from the orthogonality characterization.\par Let us fix $p\in(1,\infty)\setminus\{2\}$. The following theorem characterizing the (unique) support functional of $f\in L_p(X)\setminus\{0\}$ follows directly from the condition of equality in Hölder's inequality. \begin{theorem}\label{suppp} Let $f\in L_p(X)\setminus\{0\}$ and let $\frac{1}{p}+\frac{1}{q}=1$. Suppose $g\in L_q(X)=L_p(X)^*$. Then $g\in J(f)$ if and only if \begin{equation*} g(x)=\frac{1}{\|f\|_p^{p-1}}\overline{\sgn(f(x))}|f(x)|^{p-1},~~x\in X. \end{equation*} \end{theorem} Using this result, we now characterize Birkhoff-James orthogonality in $L_p(X)$. \begin{theorem}\label{orthp} If $f,g\in L_p(X)$, then $f\perp_Bg$ if and only if \begin{align*} \int\limits_X\overline{\sgn(f(X))}|f(x)|^{p-1}g(x)d\lambda(x)=0. \end{align*} \end{theorem} We can now characterize pointwise symmetry of Birkhoff-James orthogonality in $L_p(X)$. \begin{theorem} Suppose $f\in L_p(X)$. Then $f$ is left-symmetric if and only if $f$ is right-symmetric if and only if exactly one of the following conditions holds: \begin{enumerate} \item $f\equiv0$. \item $\{x\in X:f(x)\neq0\}$ is a $\Sigma$-atom. \item There exist $\Sigma$-atoms $A$ and $B$ such that $\{x\in X:f(x)\neq0\}=A\sqcup B$ and $\lambda(A)|f(x)|^p=\lambda(B)|f(y)|^p$ for almost every $x\in A$ and $y\in B$. \end{enumerate} \end{theorem} \begin{proof} The sufficiency can be obtained from Theorem \ref{orthp} by an elementary computation. For the necessity, let us assume that $f\not\equiv 0$ and $\{x\in X:f(x)\neq0\}$ is not a $\Sigma$-atom. We consider the following two cases:\\ \textbf{Case I:} There exist $\Sigma$-atoms $A$ and $B$ such that $\{x\in X:f(x)\neq0\}=A\sqcup B$.\\ If $g\in L_p(X)$ with $\{x\in X:g(X)\neq0\}=A\sqcup B$ such that $g\perp_Bf$ and $f\perp_Bg$, then for almost every $x\in A$ and $y\in B$, \begin{equation}\label{condio1} g(x)=\frac{\lambda(B)|f(y)|^{p-1}\overline{\sgn(f(y))}}{\lambda(A)|f(x)|^{p-1}\overline{\sgn(f(x))}}g(y), \end{equation} and \begin{equation}\label{condio2} f(x)=\frac{\lambda(B)|g(y)|^{p-1}\overline{\sgn(g(y))}}{\lambda(A)|g(x)|^{p-1}\overline{\sgn(g(x))}}f(y). \end{equation} Hence \begin{align*} \left[\lambda(B)|f(y)|^p\right]^{p-2}=\left[\lambda(A)|f(x)|^p\right]^{p-2}. \end{align*} Since $p\neq2$, $f$ must satisfy condition 3. However, using \eqref{condio1} or \eqref{condio2}, we can always construct $g\in L_p(X)$ with $\{x\in X:g(x)\neq0\}=A\sqcup B$ such that $f\perp_Bg$ or $g\perp_Bf$ respectively.\\ \textbf{Case II:} There exist $A,B,C\in\Sigma$ disjoint such that all the sets are of finite positive measure and $A\sqcup B\sqcup C\subseteq\{x\in X:f(x)\neq0\}$. \\ Without loss of generality, let us assume that \begin{align*} 0<\int\limits_{A}|f(x)|^pd\lambda(x)<\int\limits_{B\sqcup C}|f(x)|^pd\lambda(x). \end{align*} Consider $g_{a,b}:X\to\mathbb{K}$ given by \begin{align*} g_{a,b}(x):= \begin{cases} a f(x),~~&x\in A,\\ b f(x),~~&x\in B\sqcup C,\\ 0,~~&\text{otherwise}, \end{cases} \end{align*} for some $a,b\in\mathbb{K}$. Then, \begin{align*} \int\limits_{X}\overline{\sgn(f(x))}|f(x)|^{p-1}g_{a,b}(x)d\lambda(x)=a\int\limits_A|f(x)|^pd\lambda(x)+b\int\limits_B|f(x)|^pd\lambda(x), \end{align*} and \begin{align*} \int\limits_{X}\overline{\sgn(g_{a,b}(x))}|g_{a,b}(x)|^{p-1}f(x)d\lambda(x)&=\overline{\sgn(a)}|a|^{p-1}\int\limits_A|f(x)|^pd\lambda(x)\\ &+\overline{\sgn(b)}|b|^{p-1}\int\limits_B|f(x)|^pd\lambda(x) \end{align*} Thus, $f\perp_Bg_{a,b}$ but $g_{a,b}\not\perp_Bf$ when \begin{align*} a=\int\limits_{B\cup C}|f(x)|^pd\lambda(x),~~b=-\int\limits_{A}|f(x)|^pd\lambda(x), \end{align*} and $g_{a,b}\perp_Bf$ but $f\not\perp_Bg_{a,b}$ when \begin{align*} a=\left[\int\limits_{B\sqcup C}|f(x)|^pd\lambda(x)\right]^\frac{1}{p-1},~~b=-\left[\int\limits_{A}|f(x)|^pd\lambda(x)\right]^\frac{1}{p-1}. \end{align*} \end{proof} \begin{thebibliography}{100} \bibitem{annal} L. Arambašić, R. Rajić, \textit{``On symmetry of the (strong) Birkhoff–James orthogonality in Hilbert $C^*$-modules"}, \texttt{Ann. Funct. Anal., Volume 7, Number 1 (2016), 17-23.} \bibitem{B} G. Birkhoff, \textit{``Orthogonality in linear metric spaces"}, \texttt{Duke Math. J., 1 (1935), 169-172.} \bibitem{usseq} B. Bose, S. Roy, D. Sain, \textit{``Birkhoff-James Orthogonality and Its Local Symmetry in Some Sequence Spaces"}, \texttt{arXiv:2205.11586 [math.FA], https://doi.org/10.48550/arXiv.2205.11586} \bibitem{caratheodory} C. Carathéodory, \textit{``Ueber den Variabilitätsbereich der Fourierschen Konstanten von positiven harmonischen Funktionen"}, \texttt{Rend. Circ. Mat. Palermo, 32 (1911), 193-217.} \bibitem{CSS} A. Chattopadhyay, D. Sain, T. Senapati, \textit{ ``Characterization of symmetric points in $ l_p^n $-spaces"}, \texttt{ Linear Multilinear Algebra, (2019), https://doi.org/10.1080/03081087.2019.1702916.} \bibitem{gelfand} I. M. Gelfand, M. A. Naimark, \textit{``On the imbedding of normed rings into the ring of operators on a Hilbert space"}, \texttt{Matematicheskii Sbornik 12 (2) (1943), 197-217.} \bibitem{dkp} P. Ghosh, D. Sain and K. Paul, \textit{``On symmetry of Birkhoff-James orthogonality of linear operators"}, \texttt{Adv. Oper. Theory, 2 (2017), 428-434.} \bibitem{1} P. Ghosh, K. Paul and D. Sain, \textit{``Symmetric properties of orthogonality of linear operators on $(\mathbb{R}^ n,\|.\|_1)$"}, \texttt{Novi Sad J. Math., 47 (2017), 41-46.} \bibitem{james2} R.C. James, \textit{``Inner product in normed linear spaces"}, \textit{Bull. Amer. Math. Soc., 53 (1947), 559-566.} \bibitem{james} R.C. James, \textit{``Orthogonality and linear functionals in normed linear spaces"}, \texttt{Trans. Amer. Math. Soc., 61 (1947), 265-292.} \bibitem{function} D. Kečkić, \textit{``Orthogonality and smooth points in $C(K)$ and $C_b(\Omega)$"}, \texttt{Eurasian Mathematical Journal., 3 (2012).} \bibitem{3} N. Komuro, K.-S. Saito and R. Tanaka, \textit{``Left symmetric points for Birkhoff orthogonality in the preduals of von Neumann algebras"}, \texttt{Bull. Aust. Math. Soc., 98 (2018), 494-501.} \bibitem{4} N. Komuro, K.-S. Saito and R. Tanaka, \textit{``Symmetric points for (strong) Birkhoff orthogonality in von Neumann algebras with applications to preserver problems"}, \texttt{J. Math. Anal. Appl., 463 (2018), 1109-1131.} \bibitem{5} N. Komuro, K.-S. Saito and R. Tanaka, \textit{``On symmetry of Birkhoff orthogonality in the positive cones of $C^*$-algebras with applications"}, \texttt{J. Math. Anal. Appl., 474 (2019), 1488–1497.} \bibitem{KMT} M. Krein, D. Milman, \textit{``On extreme points of regular convex sets"}, \texttt{Studia Mathematica, 9 (1940), 133-138.} \bibitem{BL} J. Lamperti, \textit{``On the isometries of certain function-spaces"}, \texttt{Pacific J. Math., 8 (1958), no. 3, 459-466.} \bibitem{KP} K. Paul, A. Mal and P. W\'{o}jcik, \textit{``Symmetry of Birkhoff-James orthogonality of operators defined between infinite dimensional Banach spaces"}, \texttt{Linear Algebra Appl., {\bf}563 (2019), 142-153.} \bibitem{BAT} W. Rudin, \textit{``Functional Analysis", Second Edition}, \texttt{Mathematics Series, McGraw-Hill. (1991).} \bibitem{Sain2} D. Sain, \textit{``Birkhoff-James orthogonality of linear operators on finite dimensional Banach spaces"}, \texttt{J. Math. Anal. Appl., 447, Issue 2, (2017), 860-866.} \bibitem{Sain} D. Sain, \textit{``On the norm attainment set of a bounded linear operator"}, \texttt{J. Math. Anal. Appl., 457, Issue 1, (2018), 67-76.} \bibitem{8} D. Sain, P. Ghosh and K. Paul, \textit{``On symmetry of Birkhoff-James orthogonality of linear operators on finite-dimensional real Banach spaces"}, \texttt{Oper. Matrices, 11 (2017), 1087-1095.} \bibitem{10} D. Sain, K. Paul, A. Mal, A. Ray, \textit{``A complete characterization of smoothness in the space of bounded linear operators"}, \texttt{Linear Multilinear Algebra, (2019), doi.org/10.1080/03081087.2019.1586824.} \bibitem{SRBB} D. Sain, S. Roy, S. Bagchi and V. Balestro, \textit{``A study of symmetric points in Banach spaces"}, \texttt{Linear Multilinear Algebra, (2020), https://doi.org/10.1080/03081087.2020.1749541.} \bibitem{12} A. Turn\^sek, \textit{``A remark on orthogonality and symmetry of operators in B(H)"}, \texttt{Linear Algebra Appl., 535 (2017), 141-150.} \bibitem{turnsek} A. Turnsek, \textit{``On operators preserving James’ orthogonality"}, \texttt{Linear Algebra and its Applications, 407 (2005), 189-195.} \bibitem{Y-H} K. Yosida, E. Hewitt, \textit{``Finitely additive measures"}, \texttt{Trans. Amer. Math. Soc. 72, (1952), 46-66.} \end{thebibliography} \end{document}
2205.13062v1
http://arxiv.org/abs/2205.13062v1
Prabhakar-type linear differential equations with variable coefficients
\documentclass[a4paper,12pt,reqno]{amsart} \usepackage{amsmath} \usepackage{amssymb} \usepackage{amsfonts} \usepackage{graphicx} \usepackage{mathtools} \usepackage[colorlinks]{hyperref} \renewcommand\eqref[1]{(\ref{#1})} \graphicspath{ {images/} } \setlength{\textwidth}{15.2cm} \setlength{\textheight}{22.7cm} \setlength{\topmargin}{0mm} \setlength{\oddsidemargin}{3mm} \setlength{\evensidemargin}{3mm} \setlength{\footskip}{1cm} \providecommand{\Real}{\mathop{\rm Re}\nolimits}\providecommand{\Imag}{\mathop{\rm Im}\nolimits}\providecommand{\Res}{\mathop{\rm Res}} \title[Variable-coefficient Prabhakar differential equations]{Prabhakar-type linear differential equations with variable coefficients} \author[A. Fernandez]{Arran Fernandez} \address{ Arran Fernandez: \endgraf Department of Mathematics \endgraf Eastern Mediterranean University \endgraf Northern Cyprus, via Mersin-10, Turkey \endgraf {\it E-mail address:} {\rm [email protected]}} \author[J. E. Restrepo]{Joel E. Restrepo} \address{ Joel E. Restrepo: \endgraf Department of Mathematics \endgraf Nazarbayev University \endgraf Kazakhstan \endgraf and \endgraf Department of Mathematics: Analysis, Logic and Discrete Mathematics \endgraf Ghent University, Krijgslaan 281, Building S8, B 9000 Ghent \endgraf Belgium \endgraf {\it E-mail address:} {\rm [email protected];\,[email protected]}} \author[D. Suragan]{Durvudkhan Suragan} \address{ Durvudkhan Suragan: \endgraf Department of Mathematics \endgraf Nazarbayev University \endgraf Kazakhstan \endgraf {\it E-mail address:} {\rm [email protected]}} \subjclass[2010]{26A33, 34A08, 33E12.} \keywords{Fractional differential equations, Prabhakar fractional calculus, Series solutions, Analytical solutions, Fixed point theory.} \newtheoremstyle{theorem}{10pt} {10pt} {\sl} {\parindent} {\bf} {. } { } {} \theoremstyle{theorem} \newtheorem{theorem}{Theorem} \newtheorem{corollary}[theorem]{Corollary} \numberwithin{equation}{section} \theoremstyle{plain} \newtheorem{thm}{Theorem}[section] \newtheorem{prop}[thm]{Proposition} \newtheorem{cor}[thm]{Corollary} \newtheorem{lem}[thm]{Lemma} \theoremstyle{definition} \newtheorem{defn}[thm]{Definition} \newtheorem{rem}[thm]{Remark} \newtheorem{ex}[thm]{Example} \newtheoremstyle{defi}{10pt} {10pt} {\rm} {\parindent} {\bf} {. } { } {} \theoremstyle{defi} \newtheorem{definition}[theorem]{Definition} \newtheorem{remark}[theorem]{Remark} \begin{document} \begin{abstract} Linear differential equations with variable coefficients and Prabhakar-type operators featuring Mittag-Leffler kernels are solved. In each case, the unique solution is constructed explicitly as a convergent infinite series involving compositions of Prabhakar fractional integrals. We also extend these results to Prabhakar operators with respect to functions. As an important illustrative example, we consider the case of constant coefficients, and give the solutions in a more closed form by using multivariate Mittag-Leffler functions. \end{abstract} \maketitle \tableofcontents \section{Introduction} Fractional differential equations (FDEs) are widely studied, both from the pure mathematical viewpoint \cite{kilbas,podlubny,samko} and due to their applications in assorted fields of science and engineering \cite{hilfer,sun-etal}. The simple case of linear ordinary FDEs with constant coefficients has been thoroughly studied in classical textbooks such as \cite{kilbas,miller}, but many other FDE problems are still providing challenges to mathematical researchers. Explicit solutions have been constructed for several classes of linear FDEs with variable coefficients. Different approaches have been considered to obtain representations of solutions for such equations, including Green's functions \cite{RL}, the Banach fixed point theorem \cite{first,analitical}, power series methods \cite{AML,kilbasalpha,vcapl}, and Volterra integral equations \cite{vcserbia1,vcserbia2}. The tools used in \cite{first,RL,analitical} yielded representations of the solutions by uniformly convergent infinite series involving nested compositions of Riemann--Liouville fractional integrals. This is relatively easy to handle compared with other representations where sometimes reproducing kernels are involved, and the nested fractional integrals can even be eliminated to obtain a formula more suitable for numerical calculation \cite{FRS}. The starting point of the method in these papers was to exchange the original fractional differential equation for an equivalent integral equation, a very useful technique which, to the best of our knowledge, was first used for FDEs by Pitcher and Sewel in \cite{AMS-1938}. Recently, the study of explicit solutions of FDEs with variable coefficients has been growing in attention and opening new directions of investigation and application. After the works \cite{RL,analitical} where the problem was solved in the classical settings of Riemann--Liouville and Caputo fractional derivatives, several other papers have extended the same methodology to other types of fractional derivatives, such as Caputo derivatives with respect to functions and derivatives with non-singular Mittag-Leffler kernels \cite{RRS,FRS:AB}. This method has also been applied to partial differential equations \cite{RSade}, and in the investigation of inverse fractional Cauchy problems of wave and heat type, it was also used to define a new class of time-fractional Dirac type operators with time-variable coefficients and with applications in fractional Clifford analysis \cite{BRS,RRSdirac}. Such operators of fractional Dirac type lead to the consideration of a wide range of fractional Cauchy problems, whose solutions were given explicitly. In this paper, we study the explicit solutions of variable-coefficient FDEs in the setting of Prabhakar fractional derivatives. The origins of Prabhakar fractional calculus lie in the fractional integral operator introduced in \cite{Prab1971}, which was more deeply studied in \cite{generalizedfc} and extended to fractional derivatives in \cite{prabcap}. Recently, Prabhakar fractional calculus has been intensively studied both for its pure mathematical properties \cite{fernandez-baleanu,giusti-etal} and for its assorted applications \cite{garrappa-maione,tomovski-dubbeldam-korbel}, so Prabhakar fractional differential equations have become a topic of interest \cite{RS:MMAS}. For this reason, we have conducted the current research into fractional differential equations with variable coefficients and Prabhakar derivatives, constructing explicit solutions using the methodology of \cite{analitical}. The structure of the paper is given as follows. In Section \ref{preliPrabFDE}, we collect all necessary definitions and preliminary results on Prabhakar fractional calculus, as well as Prabhakar operators with respect to functions. Section \ref{mainPrabFDE} is devoted to the main results: proving existence and uniqueness for the considered Prabhakar-type linear differential equation with variable coefficients, constructing explicitly a canonical set of solutions, and finally finding the explicit form of the unique solution, both for the Prabhakar-type differential equation and also for its generalisation using Prabhakar operators with respect to functions. In Section \ref{FDEPrabconstcoe}, as an illustrative example of our general results, we write explicit solutions for the general linear Prabhakar-type FDE with constant coefficients, by using the multivariate Mittag-Leffler function. \section{Preliminaries}\label{preliPrabFDE} Let us recall the main definitions and auxiliary results that will be used in this paper. \subsection{Prabhakar fractional calculus} Before introducing the operators of Prabhakar fractional calculus, we need to recall the three-parameter Mittag-Leffler function $E^{\theta}_{\alpha,\beta}$, which was introduced and studied by Prabhakar in \cite{Prab1971}: \[ E^{\theta}_{\alpha,\beta}(z)=\sum_{n=0}^{\infty}\frac{(\theta)_n}{\Gamma(\alpha n+\beta)}\cdot\frac{z^n}{n!},\quad z,\beta,\alpha,\theta\in\mathbb{C},\textrm{Re}\,\alpha>0, \] where $\Gamma(\cdot)$ is the Gamma function and $(\theta)_n$ is the Pochhammer symbol \cite[\S2.1.1]{pocha}, i.e. $(\theta)_n=\frac{\Gamma(\theta+n)}{\Gamma(\theta)}$ or \[ (\theta)_0=1,\quad (\theta)_n=\theta(\theta+1)\cdots(\theta+n-1)\quad (n=1,2,\ldots). \] For $\theta=1$, we obtain the two-parameter Mittag-Leffler function $E_{\alpha,\beta}$, namely \[ E_{\alpha,\beta}(z)=\sum_{n=0}^{\infty}\frac{z^n}{\Gamma(\alpha n+\beta)},\quad z,\beta,\alpha\in\mathbb{C},\textrm{Re}\,\alpha>0. \] For $\beta=\theta=1$, we obtain the classical Mittag-Leffler function $E_{\alpha}(z)=E_{\alpha,1}(z)$. For more details of various types of the Mittag-Leffler function, see e.g. the book \cite{mittag}. Briefly, we discuss the convergence of the above series. Applying the ratio test to $c_n=\frac{(\theta)_n}{\Gamma(\alpha k+\beta)}\frac{z^n}{n!}$ and using Stirling's approximation \cite[1.18(4)]{pocha}, we have \begin{align*} \left|\frac{c_{n+1}}{c_n}\right|&=\left|\frac{\frac{(\theta)_{n+1}}{\Gamma(\alpha(n+1)+\beta)}\frac{z^{n+1}}{(n+1)!}}{\frac{(\theta)_n}{\Gamma(\alpha n+\beta)}\frac{z^n}{n!}}\right|=|z|\frac{|\theta+n|}{n+1}\frac{|\Gamma(\alpha n+\beta)|}{|\Gamma(\alpha n+\beta+\alpha)|} \\ &\sim |z|\frac{|\theta+n|}{n+1}\frac1{|\alpha n+\beta|^{\Real \,\alpha}}\to 0,\quad n\to\infty, \end{align*} and we see why the assumption $\Real (\alpha)>0$ is necessary for the definition. We now recall the Prabhakar integral operator, which is defined by \begin{equation}\label{IPrab} \left(\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta}f\right)(t)=\int_a^t (t-s)^{\beta-1}E^{\theta}_{\alpha,\beta}(\omega(t-s)^{\alpha})f(s)\,\mathrm{d}s, \end{equation} where $\alpha,\beta,\theta,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$ and $\Real (\beta)>0$. This operator is bounded for functions $f\in L^1(a,b)$ for any $b>a$; for more details, see \cite[Theorems 4,5]{generalizedfc}. Note that for $\theta=0$, $\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{0}$ coincides with the Riemann--Liouville fractional integral of order $\beta$ \cite[Sections 2.3 and 2.4]{samko}: \begin{equation}\label{fraci} \prescript{RL}{a}I^{\beta}f(t)=\frac1{\Gamma(\beta)}\int_a^t (t-s)^{\beta-1}f(s)\,\mathrm{d}s,\quad \beta\in\mathbb{C},\quad\Real (\beta)>0. \end{equation} Two important properties of the Prabhakar operator are its semigroup property (in the parameters $\beta,\theta$) and its series formula, which were proved in \cite{generalizedfc} and \cite{fernandez-baleanu-srivastava} respectively. These are: \begin{align} \prescript{}{a}{\mathbb{I}}_{\alpha,\beta_1,\omega}^{\theta_1}\circ\prescript{}{a}{\mathbb{I}}_{\alpha,\beta_2,\omega}^{\theta_2}=\prescript{}{a}{\mathbb{I}}_{\alpha,\beta_1+\beta_2,\omega}^{\theta_1+\theta_2},\quad\Real (\alpha)>0,\Real (\beta_i)>0,i=1,2; \label{PI:semi} \\ \left(\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta}f\right)(t)=\sum_{n=0}^{\infty}\frac{(\theta)_n\omega^n}{n!}\prescript{RL}{a}I^{\alpha n+\beta}f(t),\quad\Real (\alpha)>0,\Real (\beta)>0. \label{PI:series} \end{align} Thanks to all of the above identities and relations, the Prabhakar integral operator \eqref{IPrab} is considered \cite{fernandez-baleanu,generalizedfc} as a generalised fractional integral operator, giving rise to a type of fractional calculus involving Mittag-Leffler kernels. It is a complete model of fractional calculus including fractional derivatives as well as integrals, as we shall see in the following statements. Firstly we recall the space $AC^n(a,b)$ ($n\in\mathbb{N}$), which is the set of real-valued functions $f$ whose derivatives exist up to order $n-1$ on $(a,b)$ and such that $f^{(n-1)}$ is an absolutely continuous function. The Prabhakar derivative of Riemann--Liouville type is defined \cite{prabcap} by \begin{align}\label{DPrabRL} \left(\prescript{RL}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta}f\right)(t)&=\frac{\mathrm{d}^m}{\mathrm{d}t^m}\left(\prescript{}{a}{\mathbb{I}}_{\alpha,m-\beta,\omega}^{-\theta}f(t)\right) \nonumber\\ &=\frac{\mathrm{d}^m}{\mathrm{d}t^m}\int_a^t (t-s)^{m-\beta-1}E^{-\theta}_{\alpha,m-\beta}(\omega(t-s)^{\alpha})f(s)\,\mathrm{d}s, \end{align} where $\alpha,\beta,\theta,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$, $\Real (\beta)\geqslant0$, and $m=\lfloor \Real \,\beta\rfloor+1$ (where $\lfloor\cdot\rfloor$ is the floor function) and $f\in AC^m(a,b)$. The following inversion result for Prabhakar integrals and derivatives follows immediately from the semigroup property \eqref{PI:semi} and the classical fundamental theorem of calculus: \begin{equation} \label{thm2.5PrabFDE} \prescript{RL}{a}{\mathbb{D}}_{\alpha,\beta_2,\omega}^{\theta_2}\circ\prescript{}{a}{\mathbb{I}}_{\alpha,\beta_1,\omega}^{\theta_1}= \begin{cases} \prescript{}{a}{\mathbb{I}}_{\alpha,\beta_1-\beta_2,\omega}^{\theta_1-\theta_2},&\quad\Real (\beta_1)>\Real (\beta_2)\geqslant0; \\\\ \prescript{RL}{a}{\mathbb{D}}_{\alpha,\beta_2-\beta_1,\omega}^{\theta_2-\theta_1},&\quad\Real (\beta_2)\geqslant\Real (\beta_1)>0, \end{cases} \end{equation} where $\alpha,\beta_i,\theta_i,\omega\in\mathbb{C}$ such that $\Real (\alpha)>0$ and $\Real (\beta_i)>0$ for $i=1,2$. In particular, for $\beta,\theta\in\mathbb{C}$ such that $\Real (\beta)>0$, we have \[ \prescript{RL}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta}\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta}f(t)=f(t),\quad f\in C[a,b]. \] The Prabhakar derivative of Caputo type, sometimes also called the regularised Prabhakar derivative, is usually defined \cite{prabcap} by \begin{align} \left(\prescript{C}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta}f\right)(t)&=\prescript{}{a}{\mathbb{I}}_{\alpha,m-\beta,\omega}^{-\theta}\left(\frac{\mathrm{d}^m}{\mathrm{d}t^m}f(t)\right) \nonumber\\ &=\int_a^t (t-s)^{m-\beta-1}E^{-\theta}_{\alpha,m-\beta}(\omega(t-s)^{\alpha})f^{(m)}(s)\,\mathrm{d}s, \label{DPrab} \end{align} where $\alpha,\beta,\theta,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$, $\Real (\beta)\geqslant0$, and $m=\lfloor\Real \beta\rfloor+1$, and $f\in AC^m(a,b)$. Note that $f\in AC^m[a,b]$ is enough for \eqref{DPrab} to be well-defined, since this guarantees $f^{(m)}$ exists almost everywhere and is in $L^1[a,b]$, therefore the fractional integral of $f^{(m)}$ exists; we do not need stronger conditions such as $f\in C^m[a,b]$ for the existence of the Caputo-type derivative. Boundedness of the operator $\prescript{C}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta}$ is discussed in \cite[Theorem 4]{polito}. For $\theta=0$, this operator coincides with the original Caputo fractional derivative. We also have the following alternative formula for the Caputo--Prabhakar derivative, which is equivalent to \eqref{DPrab} for any function $f\in AC^m(a,b)$: \begin{equation}\label{alternativePrabh} \left(\prescript{C}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta}f\right)(t)=\prescript{RL}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta}\left[f(t)-\sum_{j=0}^{m-1}\frac{f^{(j)}(a)}{j!}(t-a)^{j}\right], \end{equation} where $\alpha,\beta,\theta,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$, $\Real (\beta)\geqslant0$, and $m=\lfloor\Real\beta\rfloor+1$. The equivalence of \eqref{DPrab} and \eqref{alternativePrabh} was proved in \cite[Proposition 4.1]{prabcap}. In this paper, we shall use them both interchangeably. The Prabhakar derivatives, of both Riemann--Liouville and Caputo type, have series formulae analogous to \eqref{PI:series}, namely: \begin{align} \left(\prescript{RL}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta}f\right)(t)=\sum_{n=0}^{\infty}\frac{(-\theta)_n\omega^n}{n!}\prescript{RL}{a}I^{\alpha n-\beta}f(t),\quad\Real (\alpha)>0,\Real (\beta)\geqslant0, \label{PR:series}\\ \left(\prescript{C}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta}f\right)(t)=\sum_{n=0}^{\infty}\frac{(-\theta)_n\omega^n}{n!}\prescript{RL}{a}I^{\alpha n+m-\beta}f^{(m)}(t),\quad\Real (\alpha)>0,\Real (\beta)\geqslant0, \label{PC:series} \end{align} where in \eqref{PR:series} we use the analytic continuation of the Riemann--Liouville integral (called the Riemann--Liouville derivative) for the finitely many cases where $\Real (\alpha n-\beta)<0$. Note that the first term of the series in \eqref{PC:series} is precisely the classical Caputo derivative to order $\beta$ of $f$, defined by \[ \prescript{C}{a}D^{\beta}f(t)=\prescript{RL}{a}I^{m-\beta}\left(\frac{\mathrm{d}^m}{\mathrm{d}t^m}f(t)\right)=\frac{1}{\Gamma(m-\beta)}\int_a^t (t-s)^{m-\beta-1}f^{(m)}(s)\,\mathrm{d}s, \] where $m:=\lfloor\Real \beta\rfloor+1$ as usual. \begin{lem}\label{importantproPrabFDE} If $\alpha,\beta,\theta,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$, $\Real (\beta)>0$, and $f\in C[a,b]$, then the following statements hold: \begin{enumerate} \item $\left(\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta}f\right)(t)$ is a continuous function on $[a,b]$. \item $\displaystyle\lim_{t\to a+}\left(\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta}f\right)(t)=0$. \item If $\beta',\theta'\in\mathbb{C}$ with $\Real (\beta)>\Real (\beta')\geqslant0$, then \[ \prescript{C}{a}{\mathbb{D}}_{\alpha,\beta',\omega}^{\theta'}\circ\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta}f(t)=\prescript{}{a}{\mathbb{I}}_{\alpha,\beta-\beta',\omega}^{\theta-\theta'}f(t). \] In particular, letting $\beta'\to\beta$ and $\theta'=\theta$, we have \[ \prescript{C}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta}\circ\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta}f(t)=f(t). \] \end{enumerate} \end{lem} \begin{proof} The first statement follows by \cite[Theorem 5]{generalizedfc}. The second statement is an application of the mean value theorem for integrals; note that the continuity of $f$ on the closed interval $[a,b]$ is vital for this. Let us now prove the third statement. Setting $m=\lfloor\Real\beta'\rfloor+1$, we have by the formula \eqref{alternativePrabh}: \begin{align*} \prescript{C}{a}{\mathbb{D}}_{\alpha,\beta',\omega}^{\theta'}\circ\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta}f(t)&=\prescript{RL}{a}{\mathbb{D}}_{\alpha,\beta',\omega}^{\theta'}\left[\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta}f(t)-\sum_{j=0}^{m-1}\frac{t^j}{j!}\Big(\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta}f\Big)^{(j)}(a)\right] \\ &=\prescript{}{a}{\mathbb{I}}_{\alpha,\beta-\beta',\omega}^{\theta-\theta'}f(t)-\sum_{j=0}^{m-1}\Big(\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta}f\Big)^{(j)}(a)\cdot\prescript{RL}{a}{\mathbb{D}}_{\alpha,\beta',\omega}^{\theta'}\left(\frac{t^j}{j!}\right), \end{align*} where in the last line we used \eqref{thm2.5PrabFDE}. For each value of $j=0,1,\cdots,m-1$, since $j\leqslant m-1=\lfloor\Real\beta'\rfloor\leqslant\Real\beta'<\Real\beta$ and therefore $\Real (\beta-j)>0$, by \cite[Theorem 7]{generalizedfc} and the first statement of this Lemma, it follows that: \[ \lim_{t\to a+}\Big(\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta}f\Big)^{(j)}(t)=\lim_{t\to a+}\left(\prescript{}{a}{\mathbb{I}}_{\alpha,\beta-j,\omega}^{\theta}f\right)(t)=0, \] which completes the proof. \end{proof} In the last part of Lemma \ref{importantproPrabFDE}, we have proved one composition relation for the Prabhakar operators, namely the Caputo-type derivative of the fractional integral. We will also need the converse, a formula for the fractional integral of the Caputo-type derivative, which will be stated in the following function space \cite{kilbas-marzan}: \[ C^{\beta,m-1}[a,b]:=\left\{v\in C^{m-1}[a,b]\;:\; \prescript{C}{a}D^{\beta}v\text{ exists in }C[a,b]\right\}. \] Kilbas and Marzan used this space in \cite[\S3]{kilbas-marzan} for solving some Caputo fractional differential equations. It is a suitable setting because it guarantees the existence of Caputo fractional derivatives up to a given order without any further assumptions required. Given our context of Prabhakar operators, we shall endow it with the following norm: \[ \|v\|_{C^{\beta,m-1}}=\sum_{k=0}^{m-1}\left\|v^{(k)}\right\|_{\infty}+\big\|\prescript{C}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta}v\big\|_{\infty}, \] where $\alpha,\beta,\theta,\omega\in\mathbb{C}$ such that $\Real (\alpha)>0$, $\Real (\beta)\geqslant0$, and $m-1\leqslant\Real \,\beta<m$. This function space is the same as the one used in \cite{analitical}, defined according to continuity of the classical Caputo derivative, but the norm is different, adapted for the Prabhakar setting. Note that the assumptions for this function space are enough to guarantee existence and continuity of the Caputo-type Prabhakar derivative: \[ \prescript{C}{a}{\mathbb{D}}^{\theta}_{\alpha,\beta,\omega}v\in C[a,b]\quad\text{ for all }\;v\in C^{\beta}[a,b], \] because the series formula \eqref{PC:series} shows that $\prescript{C}{a}{\mathbb{D}}^{\theta}_{\alpha,\beta,\omega}v(t)$ is a uniformly convergent sum of the Caputo derivative $\prescript{C}{a}D^{\beta}v$ and various fractional integrals of it, which must all be continuous for $v\in C^{\beta}[a,b]$, since the fractional integral of a continuous function is continuous \cite{bonilla-trujillo-rivero}. \begin{lem}\label{inversepPrabFDE} If $\alpha,\beta,\theta,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$ and $\Real (\beta)>0$ and $m=\lfloor\Real \beta\rfloor+1$, then for any $f\in C^{\beta,m-1}[a,b]$, we have \[ \left(\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta}\circ\prescript{C}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta}f\right)(t)=f(t)-\sum_{j=0}^{m-1}\frac{f^{(j)}(a)}{j!}\big(t-a\big)^j. \] In particular, if $0<\beta<1$ so that $m=1$, we have \[ \left(\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta}\circ\prescript{C}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta}f\right)(t)=f(t)-f(a). \] \end{lem} \begin{proof} This follows from the first definition \eqref{DPrab} of the Caputo-type derivative, together with the semigroup property \eqref{PI:semi} of Prabhakar integrals and the fundamental theorem of calculus. \end{proof} \subsection{Fractional calculus with respect to functions} In order to make an extension of Prabhakar fractional calculus, let us now introduce the concept of fractional integrals and derivatives of a function with respect to another function. In the classical Riemann--Liouville sense, the fractional integral of a function $f(t)$ with respect to a monotonically increasing $C^1$ function $\psi:[a,b]\to\mathbb{R}$ with $\psi'>0$ everywhere is defined \cite{osler} by \[ \prescript{RL}{a}I^{\beta}_{\psi(t)}f(t)=\frac1{\Gamma(\beta)}\int_a^t \big(\psi(t)-\psi(s)\big)^{\beta-1}f(s)\psi'(s)\,\mathrm{d}s,\quad\Real (\beta)>0. \] This operator was first introduced by Osler \cite{osler}, and more detailed studies of both this fractional integral and its associated fractional derivatives can be found in \cite[\S2.5]{kilbas} and \cite[\S18.2]{samko}. One of its most important properties is its conjugation relation with the original Riemann--Liouville integral \eqref{fraci}: \begin{equation} \label{conjugation} \prescript{RL}{a}I^{\beta}_{\psi(t)}=Q_\psi\circ\prescript{RL}{\psi(a)}I^{\beta}\circ Q_\psi^{-1},\quad\text{ where }Q_\psi:f\mapsto f\circ \psi. \end{equation} This enables many properties of the fractional integral with respect to $\psi$, such as composition relations, to be proved immediately from the corresponding properties of the Riemann--Liouville fractional integral. Conjugation relations like \eqref{conjugation} are also valid for the Riemann--Liouville and Caputo derivatives with respect to functions, and these relations can be used for efficient treatment of fractional differential equations with respect to functions \cite{fahad-rehman-fernandez,zaky-hendy-suragan}. The same idea of conjugation relations has also been applied to other types of fractional calculus \cite{agrawal,fahad-fernandez-rehman-siddiqi}, and more general fractional integral and derivative operators have also been taken with respect to functions \cite{oumarou-fahad-djida-fernandez}, illustrating the scope of this idea's applicability. The Prabhakar fractional integral and derivatives of a function with respect to another function were first defined in \cite{fb:ssrn} and studied in more detail in \cite{oliveira1,oliveira2}: \begin{align} \prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta;\psi(t)}f(t)&=\int_a^t \big(\psi(t)-\psi(s))^{\beta-1}E^{\theta}_{\alpha,\beta}\left(\omega\big(\psi(t)-\psi(s)\big)^{\alpha}\right)f(s)\psi'(s)\,\mathrm{d}s, \label{Pwrtf:int} \\ \prescript{RL}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta;\psi(t)}f(t)&=\left(\frac{1}{\psi'(t)}\cdot\frac{\mathrm{d}}{\mathrm{d}t}\right)^m\left(\prescript{}{a}{\mathbb{I}}_{\alpha,m-\beta,\omega}^{-\theta;\psi(t)}f(t)\right), \label{Pwrtf:Rder} \\ \prescript{C}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta;\psi(t)}f(t)&=\prescript{}{a}{\mathbb{I}}_{\alpha,m-\beta,\omega}^{-\theta;\psi(t)}\left(\left(\frac{1}{\psi'(t)}\cdot\frac{\mathrm{d}}{\mathrm{d}t}\right)^mf(t)\right), \label{Pwrtf:Cder} \end{align} where $\Real\alpha>0$ in every case, $\Real\beta>0$ in \eqref{Pwrtf:int}, and $\Real\beta\geqslant0$ with $m=\lfloor\Real\beta\rfloor+1$ in \eqref{Pwrtf:Rder}--\eqref{Pwrtf:Cder}. Various properties of these operators were proved in \cite{oliveira1,oliveira2}, but those studies did not take account of the conjugation relation connecting these operators back to the original Prabhakar operators. We note that Prabhakar fractional calculus is a special case of fractional calculus with general analytic kernels \cite{fernandez-ozarslan-baleanu}, which has been extended to a version taken with respect to functions \cite{oumarou-fahad-djida-fernandez}, where a conjugation relation analogous to \eqref{conjugation} has been proved. Therefore, the corresponding relation holds for Prabhakar fractional integrals as a special case: \begin{align*} \prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta;\psi(t)}&=Q_\psi\circ\prescript{}{\psi(a)}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta}\circ Q_\psi^{-1}, \\ \prescript{RL}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta;\psi(t)}&=Q_\psi\circ\prescript{RL}{\psi(a)}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta}\circ Q_\psi^{-1}, \\ \prescript{C}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta;\psi(t)}&=Q_\psi\circ\prescript{C}{\psi(a)}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta}\circ Q_\psi^{-1},\end{align*} where the functional operator $Q_\psi$ is defined in \eqref{conjugation}. From these conjugation relations, all properties proved above for Prabhakar operators immediately give rise to corresponding properties for Prabhakar operators with respect to functions. For example, \eqref{alternativePrabh} implies that \begin{equation*} \prescript{C}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta;\psi(t)}f(t)=\prescript{RL}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta;\psi(t)}\left[f(t)-\sum_{j=0}^{m-1}\frac{\big(\psi(t)-\psi(a)\big)^j}{j!}\lim_{t\to a+}\left(\frac{1}{\psi'(t)}\cdot\frac{\mathrm{d}}{\mathrm{d}t}\right)^jf(t)\right], \end{equation*} with $\alpha,\beta,m$ as before. Or again, Lemma \ref{importantproPrabFDE} implies that \[ \prescript{C}{a}{\mathbb{D}}_{\alpha,\beta',\omega}^{\theta',\psi(t)}\circ\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta,\psi(t)}f(t)=\prescript{}{a}{\mathbb{I}}_{\alpha,\beta-\beta',\omega}^{\theta-\theta',\psi(t)}f(t) \] where $\Real\alpha>0$ and $\Real\beta>\Real\beta'\geqslant0$ and $\theta,\theta'\in\mathbb{C}$, while Lemma \ref{inversepPrabFDE} implies that \[ \left(\prescript{}{a}{\mathbb{I}}_{\alpha,\beta,\omega}^{\theta;\psi(t)}\circ\prescript{C}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta;\psi(t)}f\right)(t)=f(t)-\sum_{j=0}^{m-1}\frac{\big(\psi(t)-\psi(a)\big)^j}{j!}\lim_{t\to a+}\left(\frac{1}{\psi'(t)}\cdot\frac{\mathrm{d}}{\mathrm{d}t}\right)^jf(t), \] with $\alpha,\beta,m$ as before and $f$ in the function space \[ C^{\beta,m-1}_{\psi(t)}[a,b]:=\left\{v\in C^{m-1}[a,b]\;:\; \prescript{C}{a}D^{\beta}_{\psi(t)}v(t)\text{ exists in }C[a,b]\right\}, \] endowed with the norm \[ \|v\|_{C^{\beta,m-1}_\psi}=\sum_{k=0}^{m-1}\left\|\left(\frac{1}{\psi'(t)}\cdot\frac{\mathrm{d}}{\mathrm{d}t}\right)^kv(t)\right\|_{\infty}+\big\|\prescript{C}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta;\psi(t)}v(t)\big\|_{\infty}. \] It can be proved that the functional operator $Q_\psi$ is a natural isometry from the normed space $C^{\beta,m-1}[a,b]$ to the normed space $C^{\beta,m-1}_{\psi(t)}[a,b]$. \section{Main results}\label{mainPrabFDE} We will study the following differential equation with continuous variable coefficients and Caputo--Prabhakar fractional derivatives: \begin{equation}\label{eq1PrabFDE} \prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0}v(t)+\sum_{i=1}^{m}\sigma_i(t)\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}v(t)=g(t),\quad t\in[0,T], \end{equation} to be solved for the unknown function $v(t)$, under the initial conditions \begin{equation}\label{eq2PrabFDE} \frac{\mathrm{d}^k}{\mathrm{d}t^k} v(t)\Big|_{t=0+}=v^{(k)}(0)=e_k\in\mathbb{C},\quad k=0,1,\ldots,n_0-1, \end{equation} where $\alpha,\beta_i,\theta_i,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$ and $\Real (\beta_0)>\Real (\beta_1)>\cdots>\Real (\beta_{m})\geqslant0$ and $n_i=\lfloor \Real \beta_i\rfloor+1\in\mathbb{N}$ and the functions $\sigma_i,g\in C[0,T]$ for $i=0,1,\ldots,m$. We will also study the homogeneous case \begin{equation}\label{eq3PrabFDE} \prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0}v(t)+\sum_{i=1}^{m}\sigma_i(t)\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}v(t)=0,\quad t\in[0,T], \end{equation} and the homogeneous initial conditions \begin{equation}\label{eq4PrabFDE} v^{(k)}(0)=0,\quad k=0,1,\ldots,n_0-1, \end{equation} in order to obtain complementary functions which can then be used to construct the general solution. \begin{defn} A set of functions $v_j(t)$, $j=0,1,\ldots,n_0-1$, is called a canonical set of solutions of the homogeneous equation \eqref{eq3PrabFDE} if every function $v_j$ satisfies \eqref{eq3PrabFDE} and the following initial conditions hold for $j,k=0,1,\ldots,n_0-1$: \begin{equation} \label{initcond:canonical} v_j^{(k)}(0)= \begin{cases} 1,&\quad j=k,\\ 0,&\quad j\neq k. \end{cases} \end{equation} \end{defn} We now study the existence, uniqueness, and representation of solutions for the above initial value problem. \subsection{The general FDE with homogeneous initial conditions} We start by proving the existence and uniqueness of solutions for the general FDE \eqref{eq1PrabFDE} with homogeneous initial conditions \eqref{eq4PrabFDE}. \begin{thm}\label{lem3.1PrabFDE} Let $\alpha,\beta_i,\theta_i,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$ and $\Real (\beta_0)>\Real (\beta_1)>\cdots>\Real (\beta_{m})\geqslant0$ and $\Real (\beta_0)\not\in\mathbb{Z}$, and let $n_i=\lfloor \Real \beta_i\rfloor+1\in\mathbb{N}$ and the functions $\sigma_i,g\in C[0,T]$ for $i=0,1,\ldots,m$. Then the FDE \eqref{eq1PrabFDE} under the conditions \eqref{eq4PrabFDE} has a unique solution $v\in C^{\beta_0,n_0-1}[0,T]$, and it is represented by the following uniformly convergent series: \begin{equation}\label{for27} v(t)=\sum_{k=0}^{\infty}(-1)^k \prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\right)^{k}g(t). \end{equation} \end{thm} \begin{proof} Our proof will be in four parts: first transforming the FDE \eqref{eq1PrabFDE} with the conditions \eqref{eq4PrabFDE} into an equivalent integral equation, much easier to handle and work with; then using the Banach fixed point theorem to show that this integral equation has a unique solution in an appropriate function space; then constructing an appropriately convergent sequence of functions to give the unique solution function as a limit; and finally constructing an explicit formula for the solution function as an infinite series. \medskip \textbf{Equivalent integral equation.} Let us take $v\in C^{\beta_0,n_0-1}[0,T]$ satisfying \eqref{eq1PrabFDE} and \eqref{eq4PrabFDE}. For $u(t)=\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0}v(t)$, we know that $u\in C[0,T]$ by definition of the function space $C^{\beta_0,n_0-1}[0,T]$. By Lemma \ref{inversepPrabFDE} and conditions \eqref{eq4PrabFDE}, it follows that \[ \prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}u(t)=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0}v(t)=v(t).\] Due to $u\in C[0,T]$, $\Real (\beta_0)>\Real (\beta_{\it i})\geqslant0$, and Lemma \ref{importantproPrabFDE}, we have \[\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}v(t)=\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}u(t)=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}u(t),\quad i=1,\ldots,m.\] Therefore, equation \eqref{eq1PrabFDE} becomes \begin{equation}\label{integraleqPrabFDE} u(t)+\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}u(t)=g(t). \end{equation} Thus, if $v\in C^{\beta_0,n_0-1}[0,T]$ is a solution of the initial value problem \eqref{eq1PrabFDE} and \eqref{eq4PrabFDE}, then $u=\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0}v\in C[0,T]$ is a solution of the integral equation \eqref{integraleqPrabFDE}. We now focus on the converse statement. Let $u\in C[0,T]$ be a solution of \eqref{integraleqPrabFDE}. By the application of the operator $\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}$ to equation \eqref{integraleqPrabFDE}, we get \begin{equation} \label{equiv:step} \prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}u(t)+\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}u(t)=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}g(t). \end{equation} Defining $v(t)=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}u(t)$, from Lemma \ref{importantproPrabFDE} we obtain \[ \prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}v(t)=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}u(t)\quad\text{and}\quad\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}u\in C[0,T], \] therefore \eqref{equiv:step} implies \[ v(t)+\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t)\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}v(t)=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}g(t).\] Then, applying the Caputo--Prabhakar derivative: \begin{equation*} \prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0}v(t)+\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0}\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t)\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}v(t)=\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0}\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}g(t). \end{equation*} By Lemma \ref{importantproPrabFDE}, we arrive at \[ \prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0}v(t)+\sum_{i=1}^{m}\sigma_i(t)\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}v(t)=g(t), \] which is exactly \eqref{eq1PrabFDE}. Moreover, by \cite[Theorem 7]{generalizedfc}, Lemma \ref{importantproPrabFDE}, and $\Real(\beta_0)\not\in\mathbb{Z}$ so that $\Real (\beta_0)>n_0-1$, we have \[\frac{\mathrm{d}^k}{\mathrm{d}t^k} v(t)\Big|_{t=0+}=\frac{\mathrm{d}^k}{\mathrm{d}t^k} \prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}u(t)\Big|_{t=0+}=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-k,\omega}^{\theta_0}u(t)|_{t=0+}=0,\] for any $k=0,1,\ldots,n_0-1$, giving the required initial conditions \eqref{eq4PrabFDE}, and we also have the required regularity (function space) since $\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0}v=\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0}\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}u=u\in C[0,T]$ so that $v\in C^{\beta_0,n_0-1}[0,T]$. Thus, a solution $u\in C[0,T]$ of equation \eqref{integraleqPrabFDE} provides a solution $v=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}u\in C^{\beta_0,n_0-1}[0,T]$ for the equation \eqref{eq1PrabFDE} under the conditions \eqref{eq4PrabFDE}. Finally, we have proved the equivalence of \eqref{eq1PrabFDE} and \eqref{eq4PrabFDE} with \eqref{integraleqPrabFDE}, under suitable regularity (function space) conditions on both sides of the equivalence. \medskip \textbf{Existence and uniqueness.} Consider the operator $\mathfrak{T}$ defined by \[\mathfrak{T}u(t):=g(t)-\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}u(t).\] The integral equation \eqref{integraleqPrabFDE} is equivalent to $\mathfrak{T}u(t)=u(t)$, and it is clear that $\mathfrak{T}:C[0,T]\to C[0,T]$. Let us consider the norm on $C[0,T]$ defined by \[ \|z\|_{p}:=\max_{t\in[0,T]}\Big(e^{-pt}|z(t)|\Big), \] for some large $p\in\mathbb{R}_+$ (to be fixed later according to our needs). This norm is equivalent to the supremum norm on $C[0,T]$, therefore $C[0,T]$ is a complete metric space under this norm. For the next estimates, we need to recall the following inequality: \begin{equation}\label{util} \Big|\prescript{RL}{0}I^{\lambda}e^{pt}\Big|\leqslant \frac{\Gamma(\Real\lambda)}{\left|\Gamma(\lambda)\right|}\cdot\frac{e^{pt}}{p^{\Real\lambda}}, \quad t,p\in \mathbb{R}_+,\;\Real\lambda>0, \end{equation} which follows from a simple inequality of integrals: \[ \left|\Gamma(\lambda)\cdot\prescript{RL}{0}I^{\lambda}e^{pt}\right|\leqslant\Gamma(\Real\lambda)\cdot\prescript{RL}{-\infty}I^{\Real\lambda}e^{pt}=\Gamma(\Real\lambda)\cdot\frac{e^{pt}}{p^{\Real\lambda}}. \] Now, for any fixed $t\in [0,T]$ and $u_1,u_2\in C[0,T]$ and $p\in\mathbb{R}_+$, we get \begin{align*} |\mathfrak{T}&u_1(t)-\mathfrak{T}u_2(t)| \\ &\leqslant\sum_{i=1}^{m}\|\sigma_i\|_{\infty}\sum_{k=0}^{\infty}\frac{|(\theta_0-\theta_i)_k||\omega|^k}{k!}\Big|\prescript{RL}{0}I^{\alpha k+\beta_0-\beta_i}\big[u_1(t)-u_2(t)\big]\Big| \\ &\leqslant\|u_1-u_2\|_{p}\sum_{i=1}^{m}\|\sigma_i\|_{\infty}\sum_{k=0}^{\infty}\frac{|(\theta_0-\theta_i)_k||\omega|^k}{k!}\Big|\prescript{RL}{0}I^{\alpha k+\beta_0-\beta_i}\big[e^{pt}\big]\Big| \\ &\leqslant\|u_1-u_2\|_{p}\sum_{i=1}^{m}\|\sigma_i\|_{\infty}\sum_{k=0}^{\infty}\frac{|(\theta_0-\theta_i)_k||\omega|^k}{k!}\cdot\frac{\Gamma(\Real(\beta_0-\beta_i+\alpha k))}{\left|\Gamma(\beta_0-\beta_i+\alpha k)\right|}\cdot\frac{e^{pt}}{p^{\Real(\beta_0-\beta_i)+\Real(\alpha)k}} \\ &=e^{pt}\|u_1-u_2\|_{p}\sum_{i=1}^{m}\frac{\|\sigma_i\|_{\infty}}{p^{\Real(\beta_0-\beta_i)}}\sum_{k=0}^{\infty}\frac{|(\theta_0-\theta_i)_k|}{k!}\cdot\frac{\Gamma(\Real(\beta_0-\beta_i)+k\Real\alpha))}{\left|\Gamma(\beta_0-\beta_i+\alpha k)\right|}\left(\frac{|\omega|}{p^{\Real\alpha}}\right)^k \\ &\leqslant Ce^{pt}\|u_1-u_2\|_{p}, \end{align*} where $C>0$ is a constant, independent of $u_1,u_2$ and $t$, which can be taken to satisfy $0<C<1$ if we choose $p\in\mathbb{R}_+$ sufficiently large, since the $\beta_i$ and $\theta_i$ and $\sigma_i$ and $\alpha$ are fixed. Thus, dividing by $e^{pt}$ in this inequality and taking the supremum over $t\in[0,T]$, we find \[ \|\mathfrak{T}u_1-\mathfrak{T}u_2\|_{p}\leqslant C\|u_1-u_2\|_{p}, \] which means that $T$ is contractive with respect to the norm $\|\cdot\|_{p}$. Equivalently, it is contractive with respect to the supremum norm $\|\cdot\|_{\infty}$ on $C[0,T]$. By applying the Banach fixed point theorem, it follows that the equation \eqref{integraleqPrabFDE} has a unique solution $u\in C[0,T]$ and the sequence $\{u_n(t)\}_{n\geqslant0}$ defined by \begin{equation*} \begin{cases} u_0(t)&=g(t), \\ u_n(t)&=\displaystyle g(t)-\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}u_{n-1}(t), \quad n=1,2,\ldots, \end{cases} \end{equation*} converges (with respect to $\|\cdot\|_{\infty}$) to the limit $u$ in $C[0,T]$. Therefore, by the equivalence proved above, it follows that the initial value problem \eqref{eq1PrabFDE} and \eqref{eq4PrabFDE} has a unique solution $v\in C^{\beta_0,n_0-1}[0,T]$. \medskip \textbf{Solution as a limit.} We already know that the sequence $\{u_n(t)\}_{n\geqslant0}$ converges in $C[0,T]$ with respect to $\|\cdot\|_{\infty}$. Since the Prabhakar fractional integral preserves uniform convergence, we have the following sequence also convergent with respect to $\|\cdot\|_{\infty}$: \begin{equation*} \begin{cases} \prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}u_0(t)&=\displaystyle\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}g(t), \\ \prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}u_n(t)&=\displaystyle\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}g(t)-\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}u_{n-1}(t). \end{cases} \end{equation*} Let us denote $v_{n}(t)=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}u_n(t)$ for all $n$. Therefore, by Lemma \ref{importantproPrabFDE} since $\Real\beta_0>\Real\beta_i\geqslant0$, \[ \prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}v_{n-1}(t)=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}u_{n-1}(t)\quad\text{ for all }\,n, \] and so we have the following sequence of functions $v_n$: \begin{equation}\label{eq5eq6PrabFDE} \begin{cases} v_0(t)&=\displaystyle \prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}g(t), \\ v_n(t)&=\displaystyle v_0(t)-\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t)\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}v_{n-1}(t),\quad n=1,2,\ldots. \end{cases} \end{equation} Using Lemma \ref{importantproPrabFDE}, one can see that $v_n(t)\in C^{\beta_0,n_0-1}[0,T]$ for all $n$. Now we prove the convergence of the sequence $\{v_n(t)\}_{n\geqslant0}$ in $C^{\beta_0,n_0-1}[0,T]$. Since $v_n(t)=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}u_n(t)$ and $\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0}v_n(t)=u_n(t)$, and the same for $v$ and $u$, we get \[\frac{\mathrm{d}^k}{\mathrm{d}t^k}\Big( v_n(t)-v(t)\Big)=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-k,\omega}^{\theta_0}\Big(u_n(t)-u(t)\Big),\quad k=0,1,\ldots,n_0-1,\] where this is a fractional integral in each case because $\Real\beta_0\not\in\mathbb{Z}$ so $\Real(\beta_0-k)>0$ for all $k$. So we have \[ \left\|\frac{\mathrm{d}^k}{\mathrm{d}t^k}\Big( v_n(t)-v(t)\Big)\right\|_{\infty}\leqslant \|u_n-u\|_{\infty}\int_0^T (t-s)^{\Real\beta_0-k-1}\big|E^{\theta_0}_{\alpha,\beta_0-k}(\omega(t-s)^{\alpha})\big|\,\mathrm{d}s, \] for $k=0,1,\ldots,n_0-1$, and of course $\left\|\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0}(v_n-v)\right\|_{\infty}=\|u_n-u\|_{\infty}$. This gives \begin{align*} \|v_n-v\|_{C^{\beta_0,n_0-1}}&=\sum_{k=0}^{n_0-1}\left\|\frac{\mathrm{d}^k}{\mathrm{d}t^k}(v_n-v)\right\|_{\infty}+\left\|\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0}(v_n-v)\right\|_{\infty} \\ &\hspace{-1cm}\leqslant \left(1+\sum_{k=0}^{n_0-1}\int_0^T (t-s)^{\Real\beta_0-k-1}\big|E^{\theta_0}_{\alpha,\beta_0-k}(\omega(t-s)^{\alpha})\big|\,\mathrm{d}s\right)\|u_n-u\|_{\infty} \\ &\hspace{-1cm}\leqslant B\|u_n-u\|_{\infty}, \end{align*} for some finite constant $B>0$. This implies that the sequence $\{v_n(t)\}_{n\geqslant0}$ converges in $C^{\beta_0,n_0-1}[0,T]$ with respect to $\|\cdot\|_{C^{\beta_0,n_0-1}}$, since we already know that the sequence $\{u_n(t)\}_{n\geqslant0}$ converges with respect to $\|\cdot\|_{\infty}$. \medskip \textbf{Explicit solution function.} From \eqref{eq5eq6PrabFDE} and Lemma \ref{importantproPrabFDE}, the first approximation is given by \begin{align*} v^1(t)&=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}g(t)-\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t)\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}g(t) \\ &=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}g(t)-\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}g(t) \\ &=\sum_{k=0}^{1}(-1)^k \prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\right)^k g(t), \end{align*} where $v^1(t)\in C^{n_0-1,\beta_0,\theta_0}[0,T]$. Let us now suppose that for $n\in\mathbb{N}$ the $n$th approximation is given by \begin{equation} \label{nthapprox} v^n(t)=\sum_{k=0}^{n}(-1)^k\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\right)^k g(t), \end{equation} Then, using \eqref{eq5eq6PrabFDE}, the $(n+1)$th approximation is \begin{align*} v^{n+1}(t)&=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}g(t)-\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t)\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}v^{n}(t) \\ &=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}g(t)-\sum_{k=0}^{n}(-1)^k\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t) \\ &\hspace{3cm}\times\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\right)^k g(t) \\ &=\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}g(t)+\sum_{k=0}^{n}(-1)^{k+1}\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\right)^{k+1} g(t) \\ &=\sum_{k=0}^{n+1}(-1)^k\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\right)^k g(t). \end{align*} This proves by induction that the formula \eqref{nthapprox} for $v_n$ is valid for all $n$. Therefore, \[ v(t)=\lim_{n\to\infty}v^n (t)=\sum_{k=0}^{\infty}(-1)^k\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\right)^k g(t), \] where the limit is taken in the norm $\|\cdot\|_{C^{\beta_0,n_0-1}}$ and therefore in particular the convergence is uniform. \end{proof} \subsection{Canonical set of solutions} We now give the explicit representation for a canonical set of solutions of the homogeneous equation \eqref{eq3PrabFDE}. We will consider different cases of the fractional orders. A special collection of sets will help us to consider the possible cases: \[\mathbb{W}_j:=\big\{i\in\{1,\dots,m\}\;:\;0\leqslant\Real (\beta_i)\leqslant j\big\},\quad j=0,1,\dots,n_0-1,\] and we define $\varrho_j=\min(\mathbb{W}_j)$ for any $j$ such that $\mathbb{W}_j\neq\emptyset$. Thus, $\mathbb{W}_j\subseteq\mathbb{W}_{j+1}$ for all $j$, and we have $\varrho_j\leqslant i\Leftrightarrow\Real\beta_i\leqslant j$ for each $i,j$. \begin{thm}\label{lem3.3PrabFDE} Let $\alpha,\beta_i,\theta_i,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$ and $\Real (\beta_0)>\Real (\beta_1)>\cdots>\Real (\beta_{m})\geqslant0$ and $\Real (\beta_0)\not\in\mathbb{Z}$, and let $n_i=\lfloor \Real \beta_i\rfloor+1\in\mathbb{N}$ and the functions $\sigma_i,g\in C[0,T]$ for $i=0,1,\ldots,m$. Then there exists a unique canonical set of solutions in $C^{\beta_0,n_0-1}[0,T]$ for the equation \eqref{eq3PrabFDE}, namely $v_j\in C^{\beta_0,n_0-1}[0,T]$ for $j=0,1,\ldots,n_0-1$ given by \begin{equation}\label{form16} v_j(t)=\frac{t^j}{j!}+\sum_{k=0}^{\infty} (-1)^{k+1}\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\right)^{k}\Phi_j(t), \end{equation} where $\Phi_j$ denotes the function defined in general by \begin{equation} \label{form17} \Phi_j(t)=\sum_{i=\varrho_j}^{m}\sigma_i(t)\,t^{j-\beta_i}E_{\alpha,j-\beta_i+1}^{-\theta_i}(\omega t^\alpha), \end{equation} and it is worth noting the following special cases. \begin{enumerate} \item For the cases $j>\Real\beta_1$, we have $\varrho_j=1$: \begin{equation}\label{form17:norho} \Phi_j(t)=\sum_{i=1}^{m}\sigma_i(t)\,t^{j-\beta_i}E_{\alpha,j-\beta_i+1}^{-\theta_i}(\omega t^\alpha)\quad\text{ for }j=n_1,n_1+1,\ldots,n_0-1. \end{equation} \item For the cases $j<\Real\beta_m$, we have $\mathbb{W}_j=\emptyset$ and an empty sum $\Phi_j(t)=0$: \begin{align} \label{form17:zero} v_j(t)=\frac{t^j}{j!},\quad&\text{ for }j=0,1,\ldots,j_0,\text{ where } \\ \nonumber j_0&\in\{0,1,\ldots,n_0-2\}\text{ satisfies }j_0<\Real(\beta_m)\leqslant j_0+1. \end{align} \item If $n_0=n_1$ and $\beta_{m}=0$, then \eqref{form17:norho} defines $\Phi_j$ for all $j=0,1,\ldots,n_0-1$. \item If $\Real (\beta_i)\geqslant n_0-1$ for all $i=1,\ldots,m$, so that $n_0=n_1=\ldots=n_m$, then $\Phi_j(t)=0$ and \eqref{form17:zero} defines $v_j$ for all $j=0,1,\ldots,n_0-1$. \end{enumerate} \end{thm} \begin{proof} Following a proof similar to that of Theorem \ref{lem3.1PrabFDE}, we can show that finding the canonical set of solutions of \eqref{eq3PrabFDE}, i.e. solving \eqref{eq3PrabFDE} under the initial conditions \eqref{initcond:canonical}, is equivalent to the homogeneous version ($g(t)=0$) of the integral equation \eqref{integraleqPrabFDE}, under the correspondence $u_j(t)=\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0}v_j(t)$ and $v_j(t)=\frac{t^j}{j!}+\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}u_j(t)$, noting that $\frac{t^j}{j!}$ is always in $C^{\beta_0,n_0-1}[0,T]$ and the other regularity conditions are obtained as in the proof of Theorem \ref{lem3.1PrabFDE}. Since we already solved \eqref{integraleqPrabFDE} in the proof of Theorem \ref{lem3.1PrabFDE}, we can now immediately obtain that the canonical set of solutions of \eqref{eq3PrabFDE} is given by the limit as $n\to\infty$ of the following sequence derived from \eqref{eq5eq6PrabFDE}, for each $j=0,1,\ldots,n_0-1$: \begin{equation}\label{eq10eq11} \begin{cases} v^0_j (t)=\displaystyle\frac{t^j}{j!}, \\ v^n_j (t)=\displaystyle v^0_j(t)-\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t)\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}v^{n-1}_j(t),\quad n=1,2,\ldots, \end{cases} \end{equation} For $j,k\in\mathbb{N}_0$ we have \begin{equation*} \frac{\mathrm{d}^k}{\mathrm{d}t^k}\left(\frac{t^j}{j!}\right)\bigg|_{t=0+}= \begin{cases} 1,&\quad k=j, \\ 0,&\quad k\neq j. \end{cases} \end{equation*} By \eqref{alternativePrabh}, we know that \[ \prescript{C}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta}\left(\frac{t^j}{j!}\right)=\prescript{RL}{a}{\mathbb{D}}_{\alpha,\beta,\omega}^{\theta}\left[\frac{t^j}{j!}-\sum_{{\color{red}k}=0}^{n_i-1}\frac{t^{k}}{{k}!}\cdot\frac{\mathrm{d}^k}{\mathrm{d}t^k}\left(\frac{t^j}{j!}\right)\bigg|_{t=0+}\right] \] Thus, for $j=0,1,\ldots,n_1-1$ (we choose this range of values since $n_1\geqslant n_i$ for all $i$), we get \begin{equation}\label{formula18} \prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}t^j=\begin{cases} \prescript{RL}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}t^j&\quad \text{ if }\varrho_j\leqslant i\leqslant m\quad (j\geqslant n_i), \\ 0&\quad\text{ if }1\leqslant i< \varrho_j\quad (j\leqslant n_i-1). \end{cases} \end{equation} For $j=n_1,\ldots,n_0-1$ (noting that this range of values exists only if $n_0>n_1$), we have $k\leqslant n_i-1<j$ for all $i=1,\ldots,m$, and hence \[ \prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}t^j=\prescript{RL}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}t^j,\quad i=1,\ldots,m. \] Now, from \eqref{eq10eq11}, the first approximation of $v_j(t)$ is given by \[ v^1_j(t)=\begin{cases} \displaystyle\frac{t^j}{j!}-\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=\varrho_j}^{m}\sigma_i(t)\prescript{RL}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\left(\frac{t^j}{j!}\right),\quad j=0,1,\ldots,n_1-1, \\ \displaystyle\frac{t^j}{j!}-\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t)\prescript{RL}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\left(\frac{t^j}{j!}\right),\quad j=n_1,n_1+1,\ldots,n_0-1. \end{cases} \] It is now clear that $v_j^1\in C^{\beta_0,n_0-1}[0,T]$ for any $j=0,1,\ldots,n_0-1$. Let us now suppose that for $n\in\mathbb{N}$ the $n$th approximation is given by \begin{align*} v^n_j(t)=\frac{t^j}{j!}+\sum_{k=0}^{n-1}(-1)^{k+1}\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\right)^{k}\sum_{i=1}^{m}\sigma_i(t)\prescript{RL}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\left(\frac{t^j}{j!}\right) \end{align*} for $j=n_1,n_1+1,\ldots,n_0-1$ and \begin{align*} v^n_j(t)=\frac{t^j}{j!}+\sum_{k=0}^{n-1}(-1)^{k+1}\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\right)^{k}\sum_{i=\varrho_j}^{m}\sigma_i(t)\prescript{RL}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\left(\frac{t^j}{j!}\right) \end{align*} for $j=0,1,\ldots,n_1-1,$ and $v_j^n\in C^{\beta_0,n_0-1}[0,T]$ for $j=0,1,\ldots,n_0-1$. Inductively, we shall prove the analogous formula for the $(n+1)$th approximation. For $j=n_1,n_1+1,\ldots,n_0-1$, we obtain it by \begin{align*} v_j^{n+1}(t)&=\frac{t^j}{j!}-\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t)\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}v_j^{n}(t) \\ &=\frac{t^j}{j!}-\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t)\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\left(\frac{t^j}{j!}\right) \\ &\hspace{1cm}+\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t)\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\Bigg(\sum_{k=0}^{n-1}(-1)^{k+2} \\ &\hspace{3cm}\times\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\right)^{k}\sum_{i=1}^{m}\sigma_i(t)\prescript{RL}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\left(\frac{t^j}{j!}\right)\Bigg) \end{align*} Using Lemma \ref{importantproPrabFDE} and \eqref{formula18}, this becomes \begin{align*} v_j^{n+1}(t)&=\frac{t^j}{j!}-\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t)\prescript{RL}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\left(\frac{t^j}{j!}\right) \\ &\hspace{1cm}+\sum_{k=0}^{n-1}(-1)^{k+2}\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t) \\ &\hspace{3cm}\times\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\right)^{k}\sum_{i=1}^{m}\sigma_i(t)\prescript{RL}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\left(\frac{t^j}{j!}\right) \\ &=\frac{t^j}{j!}-\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\sum_{i=1}^{m}\sigma_i(t)\prescript{RL}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\left(\frac{t^j}{j!}\right) \\ &\hspace{1cm}+\sum_{k=0}^{n-1}(-1)^{k+2}\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\right)^{k+1}\sum_{i=1}^{m}\sigma_i(t)\prescript{RL}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\left(\frac{t^j}{j!}\right) \\ &=\frac{t^j}{j!}+\sum_{k=0}^{n} (-1)^{k+1}\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\right)^{k}\sum_{i=1}^{m}\sigma_i(t)\prescript{RL}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\left(\frac{t^j}{j!}\right). \end{align*} In the same manner, for $j=0,1,\ldots,n_1-1$, one can obtain the second approximation as \begin{align*} v_j^{n+1}(t)=\frac{t^j}{j!}+\sum_{k=0}^{n}(-1)^{k+1}\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\right)^{k}\sum_{i=\varrho_j}^{m}\sigma_i(t)\prescript{RL}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\left(\frac{t^j}{j!}\right). \end{align*} In either case, $v_j^{n+1}\in C^{\beta_0,n_0-1}[0,T]$ for all $j=0,1,\ldots,n_0-1$, and the induction process is complete. By the same argument used at the end of the proof of Theorem \ref{lem3.1PrabFDE}, we have for each $j$ that $v_j=\displaystyle{\lim_{n\to\infty}v_j^n}\in C^{\beta_0,n_0-1}[0,T]$. We have now achieved the general formula \eqref{form16} for the solution function $v_j$, with the general expression \eqref{form17} for $\Phi_j$ and the special case \eqref{form17:norho} when $j=n_1,n_1+1,\ldots,n_0-1$, after taking into account the following fact: \[ \prescript{RL}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i}\left(\frac{t^j}{j!}\right)=t^{j-\beta_i}E_{\alpha,j-\beta_i+1}^{-\theta_i}(\omega t^\alpha), \] which is easily proved using the series formula \eqref{PR:series} and standard facts on Riemann--Liouville differintegrals of power functions. Note that $j-\beta_i+1$ has positive real part for every $i,j$ in the sum, since $i\geqslant\varrho_j$ and therefore $j\geqslant\Real\beta_i>\Real(\beta_i-1)$. Other special cases mentioned in the Theorem follow by analysing carefully the expression \eqref{formula18} and the definition of the $\varrho_j$. We leave the details to the interested reader. \end{proof} \subsection{Explicit form for solutions in the general case} We now have explicit formulae, both for the canonical set of solutions given by the homogeneous FDE \eqref{eq3PrabFDE} with unit initial conditions \eqref{initcond:canonical} (as found in Theorem \ref{lem3.3PrabFDE}), and for the solution to the inhomogeneous FDE \eqref{eq1PrabFDE} with homogeneous initial conditions \eqref{eq4PrabFDE} (as found in Theorem \ref{lem3.1PrabFDE}). Combining these two results, we can obtain an explicit formula for the solution of the general initial value problem given by the inhomogeneous FDE \eqref{eq1PrabFDE} with the general initial conditions \eqref{eq2PrabFDE}. \begin{thm}\label{secondthmFDEprab} Let $\alpha,\beta_i,\theta_i,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$ and $\Real (\beta_0)>\Real (\beta_1)>\cdots>\Real (\beta_{m})\geqslant0$ and $\Real (\beta_0)\not\in\mathbb{Z}$, and let $n_i=\lfloor \Real \beta_i\rfloor+1\in\mathbb{N}$ and the functions $\sigma_i,g\in C[0,T]$ for $i=0,1,\ldots,m$. Then the general initial value problem \eqref{eq1PrabFDE} and \eqref{eq2PrabFDE} has a unique solution $v\in C^{\beta_0,n_0-1}[0,T]$ and it is represented by \[v(t)=\sum_{j=0}^{n_0-1}e_j v_j(t)+V_h(t),\] where the functions $v_j$ are the canonical set of solutions found in Theorem \ref{lem3.3PrabFDE} and the function $V_h$ is \[ V_h(t):=\sum_{k=0}^{\infty}(-1)^k\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i}\right)^{k}g(t). \] \end{thm} \begin{proof} This follows from Theorem \ref{lem3.3PrabFDE}, Theorem \ref{lem3.1PrabFDE}, and the superposition principle, noting that $V_h$ is exactly the function \eqref{for27} found in Theorem \ref{lem3.1PrabFDE}. \end{proof} \begin{rem}\label{rem1FDEPrab} Setting $\theta=0$ reduces the Caputo--Prabhakar derivative to the classical Caputo derivative, and it is straightforward to check that our results in this section reduce to those of \cite{analitical} when $\theta=0$. \end{rem} \subsection{Extension to operators with respect to functions} The results proved above can be generalised by replacing the Prabhakar integrals and derivatives by the same operators taken with respect to a general monotonic $C^1$ function $\psi(t)$ satisfying $\psi(0)=0$ and $\psi'>0$ everywhere, instead of just with respect to $t$. In the setting of these generalised operators, we write the FDE as follows: \begin{equation}\label{WRTF:eq1PrabFDE} \prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0;\psi(t)}v(t)+\sum_{i=1}^{m}\sigma_i(t)\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta_i;\psi(t)}v(t)=g(t),\quad t\in[0,T], \end{equation} to be solved for the unknown function $v(t)$, under the general initial conditions \begin{equation}\label{WRTF:eq2PrabFDE} \left(\frac{1}{\psi'(t)}\cdot\frac{\mathrm{d}}{\mathrm{d}t}\right)^k v(t)\bigg|_{t=0+}=e_k\in{\color{red}\mathbb{C}},\quad k=0,1,\ldots,n_0-1, \end{equation} where $\alpha,\beta_i,\theta_i,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$ and $\Real (\beta_0)>\Real (\beta_1)>\cdots>\Real (\beta_{m})\geqslant0$ and $n_i=\lfloor \Real \beta_i\rfloor+1\in\mathbb{N}$ and the functions $\sigma_i,g\in C[0,T]$ for $i=0,1,\ldots,m$. We also consider the corresponding homogeneous FDE: \begin{equation}\label{WRTF:eq3PrabFDE} \prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta_0;\psi(t)}v(t)+\sum_{i=1}^{m}\sigma_i(t)\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega;\psi(t)}^{\theta_i}v(t)=0,\quad t\in[0,T], \end{equation} and the homogeneous initial conditions \begin{equation}\label{WRTF:eq4PrabFDE} \left(\frac{1}{\psi'(t)}\cdot\frac{\mathrm{d}}{\mathrm{d}t}\right)^k v(t)\bigg|_{t=0+}=0,\quad k=0,1,\ldots,n_0-1, \end{equation} The analogue of Theorem \ref{lem3.1PrabFDE} for this type of problem, with respect to a function $\psi$, is as follows. \begin{thm}\label{WRTF:lem3.1PrabFDE} Let $\psi\in C^1[0,\infty)$ be a monotonic function with $\psi(0)=0$ and $\psi'>0$ everywhere. Let $\alpha,\beta_i,\theta_i,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$ and $\Real (\beta_0)>\Real (\beta_1)>\cdots>\Real (\beta_{m})\geqslant0$ and $\Real (\beta_0)\not\in\mathbb{Z}$, and let $n_i=\lfloor \Real \beta_i\rfloor+1\in\mathbb{N}$ and the functions $\sigma_i,g\in C[0,T]$ for $i=0,1,\ldots,m$. Then the FDE \eqref{eq1PrabFDE} under the conditions \eqref{eq4PrabFDE} has a unique solution $v\in C^{\beta_0,n_0-1}_{\psi}[0,T]$, and it is represented by the following uniformly convergent series: \[ v(t)=\sum_{k=0}^{\infty}(-1)^k \prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0;\psi(t)}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i;\psi(t)}\right)^{k}g(t). \] \end{thm} \begin{proof} The proof is identical to that of Theorem \ref{lem3.1PrabFDE}, except with all integrals, derivatives, operators, function spaces, etc. taken with respect to the function $\psi$. The first part of the proof (equivalence of the integral equation) is similar enough to be omitted entirely. For the second part, we note that the norm $\|\cdot\|_p$ would here be defined by \[ \|z\|_{p}:=\max_{t\in[0,T]}\Big(e^{-p\psi(t)}|z(t)|\Big), \] and we would use the following estimate for the Riemann--Liouville integral with respect to $\psi$: \[ \Big|\prescript{RL}{0}I^{\lambda}_{\psi(t)}e^{p\psi(t)}\Big|\leqslant \frac{\Gamma(\Real\lambda)}{\left|\Gamma(\lambda)\right|}\cdot\frac{e^{p\psi(t)}}{p^{\Real\lambda}}, \quad t,p\in \mathbb{R}_+,\;\Real\lambda>0, \] which follows immediately from \eqref{util} using the conjugation relations for operators with respect to $\psi$. This enables boundedness of the relevant linear operator to be shown in the same way as in the proof of Theorem \ref{lem3.1PrabFDE}. Finally, for the bounding of the integral in the third part of the proof, we will need to bound the multiplier $\psi'(t)$ as well as everything else, but this is perfectly possible since $\psi$ is assumed to be a $C^1$ function. \end{proof} Next, the analogue of Theorem \ref{lem3.3PrabFDE} for a canonical set of solutions with respect to a function $\psi$ is as follows. \begin{thm}\label{WRTF:lem3.3PrabFDE} Let $\psi\in C^1[0,\infty)$ be a monotonic function with $\psi(0)=0$ and $\psi'>0$ everywhere. Let $\alpha,\beta_i,\theta_i,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$ and $\Real (\beta_0)>\Real (\beta_1)>\cdots>\Real (\beta_{m})\geqslant0$ and $\Real (\beta_0)\not\in\mathbb{Z}$, and let $n_i=\lfloor \Real \beta_i\rfloor+1\in\mathbb{N}$ and the functions $\sigma_i,g\in C[0,T]$ for $i=0,1,\ldots,m$. Then there exists a unique canonical set of solutions of equation \eqref{WRTF:eq3PrabFDE}, namely $v_{j,\psi}\in C^{\beta_0,n_0-1}_{\psi}[0,T]$ for $j=0,1,\ldots,n_0-1$ given by \begin{equation}\label{WRTF:form16} v_{j,\psi}(t)=\frac{\psi(t)^j}{j!}+\sum_{k=0}^{\infty} (-1)^{k+1}\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0;\psi(t)}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i;\psi(t)}\right)^{k}\Phi_{j}\big(\psi(t)\big), \end{equation} where $\Phi_j$ is the same function defined by \eqref{form17} in Theorem \ref{lem3.3PrabFDE}. \end{thm} \begin{proof} Once again, the proof is identical to that of Theorem \ref{lem3.3PrabFDE} except {\color{red}that} everything {\color{red}is} taken with respect to the function $\psi$. We note in particular that \begin{equation*} \left(\frac{1}{\psi'(t)}\cdot\frac{\mathrm{d}}{\mathrm{d}t}\right)^k\left(\frac{\psi(t)^j}{j!}\right)\bigg|_{t=0+}= \begin{cases} 1,&\quad k=j, \\ 0,&\quad k\neq j, \end{cases} \end{equation*} an invaluable result in constructing the canonical set of solution functions. \end{proof} Finally, the analogue of Theorem \ref{secondthmFDEprab} for the solution to a general initial value problem with respect to a function $\psi$ is as follows. \begin{thm}\label{WRTF:secondthmFDEprab} Let $\psi\in C^1[0,\infty)$ be a monotonic function with $\psi(0)=0$ and $\psi'>0$ everywhere. Let $\alpha,\beta_i,\theta_i,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$ and $\Real (\beta_0)>\Real (\beta_1)>\cdots>\Real (\beta_{m})\geqslant0$ and $\Real (\beta_0)\not\in\mathbb{Z}$, and let $n_i=\lfloor \Real \beta_i\rfloor+1\in\mathbb{N}$ and the functions $\sigma_i,g\in C[0,T]$ for $i=0,1,\ldots,m$. Then the general initial value problem \eqref{WRTF:eq1PrabFDE} and \eqref{WRTF:eq2PrabFDE} has a unique solution $v\in C^{\beta_0,n_0-1}_{\psi}[0,T]$ and it is represented by \[v(t)=\sum_{j=0}^{n_0-1}e_j v_{j,\psi}(t)+V_{h,\psi}(t),\] where the functions $v_{j,\psi}$ are the canonical set of solutions found in Theorem \ref{WRTF:lem3.3PrabFDE} and the function $V_{h,\psi}$ is \[ V_{h,\psi}(t):=\sum_{k=0}^{\infty}(-1)^k\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta_0;\psi(t)}\left(\sum_{i=1}^{m}\sigma_i(t)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{\theta_0-\theta_i;\psi(t)}\right)^{k}g(t). \] \end{thm} \begin{proof} Here the proof follows exactly the same lines as the previous proof in the case $\psi(t)=t$ (not with respect to a function). We omit the straightforward details. \end{proof} \section{Examples}\label{FDEPrabconstcoe} In this section, to illustrate the general results achieved above, we will study the same initial value problems under the assumption that the coefficient functions $\sigma_i(t)$ are actually constant functions. Thus, we consider the following Prabhakar-type linear differential equation with constant coefficients: \begin{equation}\label{eq1PrabFDEconst} \prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_0,\omega}^{\theta}v(t)+\sum_{i=1}^{m}\sigma_i\prescript{C}{0}{\mathbb{D}}_{\alpha,\beta_i,\omega}^{\theta}v(t)=g(t),\quad t\in[0,T], \end{equation} under the same initial conditions \eqref{eq2PrabFDE}, where $\sigma_i,\alpha,\beta_i,\theta,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$ and $\Real (\beta_0)>\Real (\beta_1)>\cdots>\Real (\beta_{\it m})\geqslant0$ and $g\in C[0,T]$ and $n_i=\lfloor \Real \beta_i\rfloor+1\in\mathbb{N}$ for $i=0,1,\ldots,m$. Notice that we are not considering a whole set of different parameters $\theta_i$ in the equation \eqref{eq1PrabFDEconst}, as we did in the previous section. Instead, we have fixed $\theta_i=\theta$ for all $i=0,1,\ldots,m$. This is to enable us to get easier representations of the solution functions, using multivariate Mittag-Leffler functions that already exist in the literature \cite{luchko}, which we recall as follows. \begin{defn} The multivariate Mittag-Leffler function of $n$ complex variables $z_1,\ldots,z_n$, with $n+1$ complex parameters $\alpha_1,\ldots,\alpha_n,\beta$ satisfying $\Real\alpha_i,\Real\beta>0$ for $i=1,\ldots,n$, is defined by \begin{align*} E_{(\alpha_1,\ldots,\alpha_n),\beta}(z_1,\ldots,z_n)&=\sum_{k=0}^{\infty}\sum_{\substack{k_1+\cdots+k_n= k, \\ k_1,\ldots,k_n\geq0}}\frac{k!}{k_1!\times\cdots\times k_n!}\cdot\frac{\displaystyle{\prod_{i=1}^n z_i^{k_i}}}{\Gamma\left(\beta+\displaystyle{\sum_{i-1}^n\alpha_i k_i}\right)}, \\ &=\sum_{k_1,\ldots,k_n\geq0}\frac{(k_1+\cdots+k_n)!}{k_1!\times\cdots\times k_n!}\cdot\frac{z_1^{k_1}\times\cdots\times z_n^{k_n}}{\Gamma\left(\beta+\alpha_1 k_1+\cdots+\alpha_n k_n\right)}, \end{align*} where the multiple series is locally uniformly convergent for all $(z_1,\cdots,z_n)\in\mathbb{C}^n$ under the given conditions on the parameters. \end{defn} We now establish the main results of this section. \begin{thm}\label{thm3.1FDEPrabconst} Let $\sigma_i,\alpha,\beta_i,\theta,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$ and $\Real (\beta_0)>\Real (\beta_1)>\cdots>\Real (\beta_{\it m})\geqslant0$ and $\Real(\beta_0)\not\in\mathbb{Z}$, and let $n_i=\lfloor \Real \beta_i\rfloor+1\in\mathbb{N}$ for $i=0,1,\ldots,m$, and $g\in C[0,T]$. Then the FDE \eqref{eq1PrabFDEconst} with homogeneous initial conditions \eqref{eq4PrabFDE} has a unique solution $v\in C^{\beta_0,n_0-1}[0,T]$ and it is represented by \begin{align*} v(t)=\sum_{n=0}^{\infty}&\frac{(\theta)_n\omega^n}{n!}\int_0^t s^{\beta_0+\alpha n-1}\times \\ &\times E_{(\beta_0-\beta_1,\ldots,\beta_0-\beta_m),\alpha n+\beta_0}(-\sigma_1 s^{\beta_0-\beta_1},\ldots,-\sigma_m s^{\beta_0-\beta_m})g(t-s)ds. \end{align*} \end{thm} \begin{proof} By Theorem \ref{lem3.1PrabFDE}, we know that the FDE \eqref{eq1PrabFDEconst} with the conditions \eqref{eq4PrabFDE} has a unique solution $v\in C^{\beta_0,n_0-1}[0,T]$ given by \[ v(t)=\sum_{k=0}^{\infty}(-1)^k \prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta}\left(\sum_{i=1}^{m}\sigma_i\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{0}\right)^{k}g(t). \] By the semigroup property of the Prabhakar fractional integral \eqref{PI:semi}, the process of taking a finite sum of Prabhakar integral terms and raising it to a finite power will look exactly like doing the same thing with just numbers, as the ``powers'' of Prabhakar integrals combine according to the semigroup property. (This can be formalised using Mikusi\'nski's operational calculus for an algebraic interpretation of Prabhakar operators \cite{rani-fernandez1}, but in this case it is clear from direct calculation.) So, we get a multinomial expansion leading to a multivariate Mittag-Leffler function as follows: \begin{align} v(t)&=\sum_{k=0}^{\infty}(-1)^k \prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta}\left(\sum_{i=1}^{m}\sigma_i\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0-\beta_i,\omega}^{0}\right)^{k}g(t) \nonumber \\ &=\sum_{k=0}^{\infty}(-1)^k \prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0,\omega}^{\theta}\left(\sum_{k_1+\cdots+k_m=k}\frac{k!}{k_1!\times\cdots\times k_m!}\prod_{i=1}^{m}\sigma_i^{k_i}\prescript{}{0}{\mathbb{I}}_{\alpha,(\beta_0-\beta_i)k_i,\omega}^{0}\right)g(t) \nonumber \\ &=\sum_{k=0}^{\infty}(-1)^k \left(\sum_{k_1+\cdots+k_m=k}\frac{k!}{k_1!\times\cdots\times k_m!}\prod_{i=1}^{m}\sigma_i^{k_i}\right)\prescript{}{0}{\mathbb{I}}_{\alpha,\beta_0+\sum_{i=1}^{m}(\beta_0-\beta_i)k_{i},\omega}^{\theta}g(t) \nonumber \\ &=\sum_{k=0}^{\infty}\left(\sum_{k_1+\cdots+k_m=k}\frac{k!\prod_{i=1}^{m}(-\sigma_i)^{k_i}}{k_1!\times\cdots\times k_m!}\right)\sum_{n=0}^{\infty}\frac{(\theta)_n\omega^n}{n!}\prescript{RL}{0}I^{n\alpha+\beta_0+\sum_{i=1}^{m}(\beta_0-\beta_i)k_{i}}g(t). \label{multisum} \end{align} Writing the Riemann--Liouville integral explicitly using the integral \eqref{fraci}, and changing the places of integrals and sums, we can arrive at: \begin{multline*} v(t)=\sum_{n=0}^{\infty}\frac{(\theta)_n\omega^n}{n!}\int_0^t s^{\beta_0+\alpha n-1} \times \\ \times E_{(\beta_0-\beta_1,\ldots,\beta_0-\beta_m),\alpha n+\beta_0}\left(-\sigma_1 s^{\beta_0-\beta_1},\ldots,-\sigma_m s^{\beta_0-\beta_m}\right)g(t-s)\,\mathrm{d}s, \end{multline*} which is the final answer. \end{proof} \begin{thm} Let $\sigma_i,\alpha,\beta_i,\theta,\omega\in\mathbb{C}$ with $\Real (\alpha)>0$ and $\Real (\beta_0)>\Real (\beta_1)>\cdots>\Real (\beta_{\it m})\geqslant0$ and $\Real(\beta_0)\not\in\mathbb{Z}$, and let $n_i=\lfloor \Real \beta_i\rfloor+1\in\mathbb{N}$ for $i=0,1,\ldots,m$, and $g\in C[0,T]$. Then the FDE \eqref{eq1PrabFDEconst} with general initial conditions \eqref{eq2PrabFDE} has a unique solution $v\in C^{\beta_0,n_0-1}[0,T]$ and it is represented by: \[v(t)=\sum_{j=0}^{n_0-1}e_j v_j(t)+V_h^{c}(t),\] where $V_h^c$ is given by \begin{multline*} V_h^{c}(t):=\sum_{n=0}^{\infty}\frac{(\theta)_n\omega^n}{n!}\int_0^t s^{\beta_0+\alpha n-1} \times \\ \times E_{(\beta_0-\beta_1,\ldots,\beta_0-\beta_m),\alpha n+\beta_0}\left(-\sigma_1 s^{\beta_0-\beta_1},\ldots,-\sigma_m s^{\beta_0-\beta_m}\right)g(t-s)\,\mathrm{d}s, \end{multline*} and the canonical set of solutions $v_j$ to the constant-coefficient problem are defined by \begin{multline*} v_j(t)=\frac{t^j}{j!}-\sum_{n=0}^{\infty}\frac{(\theta)_n\omega^n}{n!}\int_0^t s^{\beta_0+\alpha n-1} \times \\ \times E_{(\beta_0-\beta_1,\ldots,\beta_0-\beta_m),\alpha n+\beta_0}\left(-\sigma_1 s^{\beta_0-\beta_1},\ldots,-\sigma_m s^{\beta_0-\beta_m}\right)\Phi_j(t-s)\,\mathrm{d}s, \end{multline*} with the function $\Phi_j$ defined in the same way as in Theorem \ref{lem3.3PrabFDE}, by the formula \eqref{form17} with special cases given by \eqref{form17:norho}, \eqref{form17:zero}, etc. \end{thm} \begin{proof} This follows from the general result of Theorem \ref{secondthmFDEprab}. The function $V_h$ found in Theorem \ref{secondthmFDEprab} now becomes the function $V_h^c$ which we already simplified in Theorem \ref{thm3.1FDEPrabconst} above. By comparing the formulae \eqref{form16} and \eqref{for27}, we can observe that, in general, the function $v_j(t)$ is exactly $\frac{t^j}{j!}$ minus the function $V_h$ with $g$ replaced by $\Phi_j$. This gives us the expressions stated here for the $v_j$ functions. \end{proof} \begin{rem} A general linear constant-coefficient Caputo--Prabhakar fractional differential equation of the form \eqref{eq1PrabFDEconst} was already studied and solved in \cite{rani-fernandez2}, using the method of Mikusi\'nski's operational calculus. The solution found there is consistent with the one we have found here, just expressed in a different form due to a different choice in how to manage the multiple sums. What emerges in \eqref{multisum} is a multiple sum that combines a sum over $n$ (corresponding to the Prabhakar function) with a sum over $k_1,\cdots,k_m$ (corresponding to a multivariate Mittag-Leffler function). In our results above, we have simplified this to a single sum over $n$ of an expression involving multivariate Mittag-Leffler functions whose parameters depend on $n$. In the previous work of \cite{rani-fernandez2}, the same expression was simplified to a sum over $k_1,\cdots,k_m$ of an expression involving a Prabhakar function whose parameters depend on $k_1,\cdots,k_m$. These are two different valid choices for how to simplify the complicated expression, and so our work here complements that of \cite{rani-fernandez2} by expressing the same solution function in a different form, a single sum of multivariate Mittag-Leffler functions rather than a multiple sum of univariate Mittag-Leffler functions. \end{rem} \section{Acknowledgements} \noindent The second and third authors were supported by the Nazarbayev University Program 091019CRP2120. The second author was also supported by the FWO Odysseus 1 grant G.0H94.18N: Analysis and Partial Differential Equations and the Methusalem programme of the Ghent University Special Research Fund (BOF) (Grant number 01M01021). \begin{thebibliography}{00} \bibitem{agrawal} O.P. Agrawal, Some generalized fractional calculus operators and their applications in integral equations, Fract. Calc. Appl. Anal. 15(4) (2012), 700--711. \bibitem{AML} C.N. Angstmann, B.I. Henry, Generalized fractional power series solutions for fractional differential equations, Appl. Math. Lett. 102 (2020). \bibitem{vcserbia1} T.M. Atanackovi\'c, B. Stankovi\'c. Linear fractional differential equation with variable coefficients I. Bull. de l Acad. Serbe Sci. Arts, Cl. Math. 38 (2013), 27--42. \bibitem{vcserbia2} T.M. Atanackovi\'c, B. Stankovi\'c. Linear fractional differential equation with variable coefficients II. Bull. de l Acad. Serbe Sci. Arts, Cl. Math. 39 (2014), 53--78. \bibitem{BRS} D. Baleanu, J.E. Restrepo, D. Suragan. A class of time-fractional Dirac type operators. Chaos Solitons Fractals, 143, \#510590, (2021). \bibitem{bonilla-trujillo-rivero} B. Bonilla, J.J. Trujillo, M. Rivero. Fractional Order Continuity and Some Properties about Integrability and Differentiability of Real Functions. J. Math. Anal. Appl. 231 (1999), 205--212. \bibitem{first} M.M. Dzhrbashyan, A.B. Nersessyan. Fractional derivatives and Cauchy problem for differential equations of fractional order, Izv. AN Arm. SSR. Mat. 3 (1968). \bibitem{pocha} A. Erd\'elyi, W.F. Oberhettinger, F.G. Tricomi. Higher Transcendental Functions, Vol. I. McGraw-Hill, New York, 1953. \bibitem{fahad-fernandez-rehman-siddiqi} H.M. Fahad, A. Fernandez, M. u. Rehman, M. Siddiqi, Tempered and Hadamard-type fractional calculus with respect to functions, Medit. J. Math. 18 (2021), 143. \bibitem{fahad-rehman-fernandez} H.M. Fahad, M. ur Rehman, A. Fernandez, On Laplace transforms with respect to functions and their applications to fractional differential equations, Math. Meth. Appl. Sci. (2021), 1--20. \bibitem{fb:ssrn} A. Fernandez, D. Baleanu. Differintegration with respect to functions in fractional models involving Mittag-Leffler functions. SSRN 3275746 (2018). \bibitem{fernandez-baleanu} A. Fernandez, D. Baleanu. Classes of Operators in Fractional Calculus: A Case Study. Math. Meth. Appl. Sci. 44(11) (2021), 9143--9162. \bibitem{fernandez-baleanu-srivastava} A. Fernandez, D. Baleanu, H.M. Srivastava. Series representations for fractional-calculus operators involving generalised Mittag-Leffler functions. Commun. Nonlin. Sci. Numer. Simul. 67 (2019), 517--527. \bibitem{fernandez-ozarslan-baleanu} A. Fernandez, M.A. \"Ozarslan, D. Baleanu. On fractional calculus with general analytic kernels. Appl. Math. Comput. 354 (2019), 248--265. \bibitem{FRS:AB} A. Fernandez, J.E. Restrepo, D. Suragan. Linear differential equations with variable coefficients and Mittag-Leffler kernels. Alex. Eng. J. 61 (2022), 4757--4763. \bibitem{FRS} A. Fernandez, J.E. Restrepo, D. Suragan. A new representation for the solutions of fractional differential equations with variable coefficients. Under review (2020). \bibitem{prabcap} R. Garra, R. Gorenflo, F. Polito, Z. Tomovski. Hilfer--Prabhakar derivatives and some applications. Appl. Math. Comput. 242, (2014), 576--589. \bibitem{garrappa-maione} R. Garrappa, G. Maione. Fractional Prabhakar Derivative and Applications in Anomalous Dielectrics: A Numerical Approach. In: A. Babiarz, A. Czornik, J. Klamka, M. Niezabitowski, eds., Theory and Applications of Non-integer Order Systems, Springer, Cham, 2017. \bibitem{giusti-etal} A. Giusti, I. Colombaro, R. Garra, R. Garrappa, F. Polito, M. Popolizio, F. Mainardi. A practical guide to Prabhakar fractional calculus. Fract. Calc. Appl. Anal. 23(1) (2020), 9--54. \bibitem{mittag} R. Gorenflo, A.A. Kilbas, F. Mainardi, S.V. Rogosin. Mittag-Leffler Functions, Related Topics and Applications, 2nd ed. Springer Monographs in Mathematics, Springer, New York, 2020. \bibitem{hilfer} R. Hilfer, ed. Applications of Fractional Calculus in Physics. World Scientific, Singapore, 2000. \bibitem{kilbas-marzan} A. Kilbas, S. Marzan, Cauchy problem for differential equation with Caputo derivative, Fract. Calc. Appl. Anal. 7(3) (2004), 297--321. \bibitem{kilbasalpha} A.A. Kilbas, M. Rivero, L. Rodrignez-Germa, J.J. Trujillo, $\alpha$-Analytic solutions of some linear fractional differential equations with variable coefficients, Appl. Math. Comput. 187 (2007), 239--249. \bibitem{generalizedfc} A.A. Kilbas, M. Saigo, R.K. Saxena. Generalized Mittag-Leffler function and generalized fractional calculus operators. Integr. Transf. Spec. F. 15(1) (2004), 31--49. \bibitem{kilbas} A.A. Kilbas, H.M. Srivastava, J.J. Trujillo. Theory and Applications of Fractional Differential Equations. North-Holland Mathematics Studies, vol. 204. Elsevier Science B.V., Amsterdam, 2006. \bibitem{RL} M. Kim, O. Hyong-Chol. Explicit representations of Green's function for linear fractional differential operator with variable coefficients. J. Fract. Calc. Appl. 5(1) (2014), 26--36. \bibitem{luchko} Y. Luchko, R. Gorenflo. An operational method for solving fractional differential equations with the Caputo derivatives. Acta Math Vietnam. 24(2) (1999), 207--233. \bibitem{miller} K.S. Miller, B. Ross. An Introduction to the Fractional Calculus and Fractional Differential Equations. John Wiley, New York, 1993. \bibitem{oldham} K.B. Oldham, J. Spanier. The Fractional Calculus. Academic Press, New York, 1974. \bibitem{oliveira1} D.S. Oliveira. Properties of $\psi$-Mittag-Leffler integrals. Rendiconti del Circolo Matematico di Palermo Series 2 (2021), 1--14. \bibitem{oliveira2} D.S. Oliveira. $\psi$-Mittag-Leffler pseudo-fractional operators. J. Pseudo-Differ. Oper. Appl. 12(3) (2021), 1--37. \bibitem{osler} T.J. Osler. Leibniz rule for fractional derivatives generalized and an application to infinite series. SIAM J. Appl. Math. 18(3) (1970), 658--674. \bibitem{oumarou-fahad-djida-fernandez} C.M.S. Oumarou, H.M. Fahad, J.D. Djida, A. Fernandez. On fractional calculus with analytic kernels with respect to functions. Comput. Appl. Math. 40 (2021), 244. \bibitem{analitical} S. Pak, H. Choi, K. Sin, K. Ri, Analytical solutions of linear inhomogeneous fractional differential equation with continuous variable coefficients. Adv Differ Equ 2019 (2019), 256. \bibitem{AMS-1938} E. Pitcher, W.E. Sewel. Existence theorems for solutions of differential equations of non-integer order. Bull. Amer. Math. Soc. 44(2) (1938), 100--107. \bibitem{podlubny} I. Podlubny. Fractional Differential Equations. Academic Press, San Diego, 1998. \bibitem{polito} F. Polito, \u{Z}. Tomovski. Some properties of Prabhakar-type fractional calculus operators. Fractional Differ. Calc., 6(1), (2016), 73--94. \bibitem{Prab1971} T.R. Prabhakar. A singular integral equation with a generalized Mittag-Leffler function in the kernel. Yokohama. Math. J., 19, (1971), 7--15. \bibitem{rani-fernandez1} N. Rani, A. Fernandez. Mikusinski's operational calculus for Prabhakar fractional calculus. Int. Transf. Spec. Func. (2022), 1--21. DOI: 10.1080/10652469.2022.2057970 \bibitem{rani-fernandez2} N. Rani, A. Fernandez. Solving Prabhakar differential equations using Mikusinski’s operational calculus. Comp. Appl. Math. 41 (2022), 107. \bibitem{RRS} J.E. Restrepo, M. Ruzhansky, D. Suragan. Explicit solutions for linear variable-coefficient fractional differential equations with respect to functions. Appl. Math. Comput. 403, (2021), 126177. \bibitem{RRSdirac} J.E. Restrepo, M. Ruzhansky, D. Suragan. Generalized time-fractional Dirac type operators and Cauchy type problems. Under review, (2020). \bibitem{RS:MMAS} J.E. Restrepo, D. Suragan. Oscillatory solutions of fractional integro-differential equations II. Math. Meth. Appl. Sci. 44(8) (2021), 7262--7274. \bibitem{RSade} J.E. Restrepo, D. Suragan. Direct and inverse Cauchy problems for generalized space-time fractional differential equations. Adv. Differential Equations 26(7--8) (2021), 305--339. \bibitem{vcapl} M. Rivero, L. Rodr\'igez--Cierm\'a, J. J. Trujillo. Linear fractional differential equations with variable coefficients, Appl. Math. Lett. 21 (2008), 892--897. \bibitem{samko} S.G. Samko, A.A. Kilbas, O.I. Marichev. Fractional integrals and derivatives, translated from the 1987 Russian original, Gordon and Breach, Yverdon, (1993). \bibitem{sun-etal} H.G. Sun, Y. Zhang, D. Baleanu, W. Chen, Y.Q. Chen. A new collection of real world applications of fractional calculus in science and engineering. Commun. Nonlin. Sci. Numer. Simul. 64 (2018), 213--231. \bibitem{tomovski-dubbeldam-korbel} \v{Z}. Tomovski, J.L.A. Dubbeldam, J. Korbel. Applications of Hilfer--Prabhakar operator to option pricing financial model. Fract. Calc. Appl. Anal. 23(4) (2020), 996--1012. \bibitem{zaky-hendy-suragan} M.A. Zaky, A.S. Hendy, D. Suragan. A note on a class of Caputo fractional differential equations with respect to another function. Math. Comput. Simul. 196 (2022), 289--295. \end{thebibliography} \end{document}
2205.13017v2
http://arxiv.org/abs/2205.13017v2
Two-Torsion Subgroups of some Modular Jacobians
\documentclass{amsart} \usepackage{url,amssymb,enumerate,colonequals} \usepackage{multirow} \usepackage{mathrsfs} \usepackage[section]{placeins} \usepackage{MnSymbol} \usepackage{extarrows} \usepackage{lscape} \usepackage[all,cmtip]{xy} \usepackage[OT2,T1]{fontenc} \usepackage{color} \usepackage[ colorlinks, citecolor=darkgreen, backref, pdfauthor={Elvira Lupoian}, ]{hyperref} \usepackage{comment} \usepackage{multirow} \usepackage[table]{xcolor} \usepackage{hyperref} \hypersetup{ colorlinks=true, linkcolor=darkgreen, filecolor=magenta, urlcolor=darkgreen, pdftitle={Overleaf Example}, pdfpagemode=FullScreen, } \newcommand{\defi}[1]{\textsf{#1}} \newcommand{\dashedarrow}{\dashrightarrow} \newcommand{\Aff}{\mathbb{A}} \newcommand{\F}{\mathbb{F}} \newcommand{\Fbar}{{\overline{\F}}} \newcommand{\G}{\mathbb{G}} \newcommand{\Gm}{\mathbb{G}_{\mathrm{m}}} \newcommand{\bbH}{\mathbb{H}} \newcommand{\PP}{\mathbb{P}} \newcommand{\Q}{\mathbb{Q}} \newcommand{\R}{\mathbb{R}} \newcommand{\Z}{\mathbb{Z}} \newcommand{\Qbar}{{\overline{\Q}}} \newcommand{\Zhat}{{\hat{\Z}}} \newcommand{\Ebar}{{\overline{E}}} \newcommand{\Zbar}{{\overline{\Z}}} \newcommand{\kbar}{{\overline{k}}} \newcommand{\Kbar}{{\overline{K}}} \newcommand{\rhobar}{{\overline{\rho}}} \newcommand{\ksep}{{k^{\operatorname{sep}}}} \newcommand{\ff}{\mathfrak{f}} \newcommand{\fq}{\mathfrak{q}} \newcommand{\fP}{\mathfrak{P}} \newcommand{\Adeles}{\mathbf{A}} \newcommand{\kk}{\mathbf{k}} \newcommand{\mm}{\mathfrak{m}} \newcommand{\eps}{\varepsilon} \newcommand{\uom}{\underline{\omega}} \newcommand{\boldf}{\mathbf{f}} \newcommand{\boldl}{\ensuremath{\boldsymbol\ell}} \newcommand{\boldL}{\mathbf{L}} \newcommand{\boldr}{\mathbf{r}} \newcommand{\boldw}{\mathbf{w}} \newcommand{\boldzero}{\mathbf{0}} \newcommand{\boldomega}{\ensuremath{\boldsymbol\omega}} \newcommand{\calA}{\mathcal{A}} \newcommand{\calB}{\mathcal{B}} \newcommand{\calC}{\mathcal{C}} \newcommand{\calD}{\mathcal{D}} \newcommand{\calE}{\mathcal{E}} \newcommand{\cF}{\mathcal{F}} \newcommand{\calG}{\mathcal{G}} \newcommand{\calH}{\mathcal{H}} \newcommand{\calI}{\mathcal{I}} \newcommand{\calJ}{\mathcal{J}} \newcommand{\calK}{\mathcal{K}} \newcommand{\calL}{\mathcal{L}} \newcommand{\calM}{\mathcal{M}} \newcommand{\calN}{\mathcal{N}} \newcommand{\calO}{\mathcal{O}} \newcommand{\calP}{\mathcal{P}} \newcommand{\calQ}{\mathcal{Q}} \newcommand{\calR}{\mathcal{R}} \newcommand{\calS}{\mathcal{S}} \newcommand{\calT}{\mathcal{T}} \newcommand{\calU}{\mathcal{U}} \newcommand{\calV}{\mathcal{V}} \newcommand{\calW}{\mathcal{W}} \newcommand{\calX}{\mathcal{X}} \newcommand{\calY}{\mathcal{Y}} \newcommand{\calZ}{\mathcal{Z}} \newcommand{\vv}{\upsilon} \newcommand{\fp}{\mathfrak{p}} \newcommand{\CC}{\mathscr{C}} \newcommand{\FF}{\mathscr{F}} \newcommand{\GG}{\mathscr{G}} \newcommand{\II}{\mathscr{I}} \newcommand{\JJ}{\mathscr{J}} \newcommand{\LL}{\mathscr{L}} \newcommand{\NN}{\mathscr{N}} \newcommand{\OO}{\mathcal{O}} \newcommand{\WW}{\mathscr{W}} \newcommand{\XX}{\mathscr{X}} \newcommand{\ZZ}{\mathscr{Z}} \DeclareMathOperator{\Ann}{Ann} \DeclareMathOperator{\Aut}{Aut} \DeclareMathOperator{\Br}{Br} \DeclareMathOperator{\cd}{cd} \DeclareMathOperator{\Char}{char} \DeclareMathOperator{\Cl}{Cl} \DeclareMathOperator{\codim}{codim} \DeclareMathOperator{\coker}{coker} \DeclareMathOperator{\Cor}{Cor} \DeclareMathOperator{\divv}{div} \DeclareMathOperator{\Div}{Div} \DeclareMathOperator{\Det}{Det} \DeclareMathOperator{\Dic}{Dic} \DeclareMathOperator{\End}{End} \newcommand{\END}{{\EE}\!nd} \DeclareMathOperator{\Eq}{Eq} \DeclareMathOperator{\Ext}{Ext} \newcommand{\EXT}{{\E}\!xt} \DeclareMathOperator{\Fix}{\tt Fix} \DeclareMathOperator{\Frac}{Frac} \DeclareMathOperator{\Frob}{Frob} \DeclareMathOperator{\Gal}{Gal} \DeclareMathOperator{\Gr}{Gr} \DeclareMathOperator{\Hom}{Hom} \newcommand{\HOM}{{\HH}\!om} \DeclareMathOperator{\im}{im} \DeclareMathOperator{\Ind}{Ind} \DeclareMathOperator{\inv}{inv} \DeclareMathOperator{\Jac}{Jac} \DeclareMathOperator{\lcm}{lcm} \DeclareMathOperator{\Lie}{Lie} \DeclareMathOperator{\Log}{Log} \DeclareMathOperator{\MakeDecentModel}{\tt MakeDecentModel} \DeclareMathOperator{\nil}{nil} \DeclareMathOperator{\Norm}{Norm} \DeclareMathOperator{\NP}{NP} \DeclareMathOperator{\Num}{Num} \DeclareMathOperator{\odd}{odd} \DeclareMathOperator{\ord}{ord} \DeclareMathOperator{\Pic}{Pic} \DeclareMathOperator{\PIC}{\bf Pic} \DeclareMathOperator{\Prob}{\bf P} \DeclareMathOperator{\Proj}{Proj} \DeclareMathOperator{\PROJ}{\bf Proj} \DeclareMathOperator{\rank}{rank} \DeclareMathOperator{\re}{Re} \DeclareMathOperator{\reg}{reg} \DeclareMathOperator{\res}{res} \DeclareMathOperator{\Res}{Res} \DeclareMathOperator{\rk}{rk} \DeclareMathOperator{\scd}{scd} \DeclareMathOperator{\Sel}{Sel} \DeclareMathOperator{\Sp}{Sp} \DeclareMathOperator{\Spec}{Spec} \DeclareMathOperator{\SPEC}{\bf Spec} \DeclareMathOperator{\Spf}{Spf} \DeclareMathOperator{\supp}{supp} \DeclareMathOperator{\Sym}{Sym} \DeclareMathOperator{\tr}{tr} \DeclareMathOperator{\Tr}{Tr} \DeclareMathOperator{\trdeg}{tr deg} \newcommand{\Ab}{\operatorname{\bf Ab}} \newcommand{\Groups}{\operatorname{\bf Groups}} \newcommand{\Schemes}{\operatorname{\bf Schemes}} \newcommand{\Sets}{\operatorname{\bf Sets}} \newcommand{\ab}{{\operatorname{ab}}} \newcommand{\an}{{\operatorname{an}}} \newcommand{\Az}{{\operatorname{Az}}} \newcommand{\CS}{\operatorname{\bf CS}} \newcommand{\et}{{\operatorname{et}}} \newcommand{\ET}{{\operatorname{\bf \acute{E}t}}} \newcommand{\fl}{{\operatorname{f\textcompwordmark l}}} \newcommand{\good}{{\operatorname{good}}} \newcommand{\op}{{\operatorname{op}}} \newcommand{\perf}{{\operatorname{perf}}} \newcommand{\red}{{\operatorname{red}}} \newcommand{\regular}{{\operatorname{regular}}} \newcommand{\sing}{{\operatorname{sing}}} \newcommand{\smooth}{{\operatorname{smooth}}} \newcommand{\tH}{{\operatorname{th}}} \newcommand{\tors}{{\operatorname{tors}}} \newcommand{\nontors}{{\operatorname{non-tors}}} \newcommand{\unr}{{\operatorname{unr}}} \newcommand{\Zar}{{\operatorname{Zar}}} \newcommand{\ns}{{\operatorname{ns}}} \renewcommand{\sp}{{\operatorname{sp}}} \newcommand{\Cech}{\v{C}ech} \newcommand{\E}{{\operatorname{\bf E}}} \newcommand{\GalQ}{{\Gal}(\Qbar/\Q)} \newcommand{\GL}{\operatorname{GL}} \newcommand{\HH}{{\operatorname{H}}} \newcommand{\HHcech}{{\check{\HH}}} \newcommand{\HHat}{{\hat{\HH}}} \newcommand{\M}{\operatorname{M}} \newcommand{\PGL}{\operatorname{PGL}} \newcommand{\PSL}{\operatorname{PSL}} \newcommand{\SL}{\operatorname{SL}} \newcommand{\del}{\partial} \newcommand{\directsum}{\oplus} \newcommand{\Directsum}{\bigoplus} \newcommand{\injects}{\hookrightarrow} \newcommand{\intersect}{\cap} \newcommand{\Intersection}{\bigcap} \newcommand{\isom}{\simeq} \newcommand{\notdiv}{\nmid} \newcommand{\surjects}{\twoheadrightarrow} \newcommand{\tensor}{\otimes} \newcommand{\Tensor}{\bigotimes} \newcommand{\union}{\cup} \newcommand{\Union}{\bigcup} \newcommand{\Algorithm}{\textbf{Algorithm}\ } \newcommand{\Subroutine}{\textbf{Subroutine}\ } \newcommand{\isomto}{\overset{\sim}{\rightarrow}} \newcommand{\isomfrom}{\overset{\sim}{\leftarrow}} \newcommand{\leftexp}[2]{{\vphantom{#2}}^{#1}{#2}} \newcommand{\rholog}{\rho \log} \newcommand{\sigmaiota}{{\leftexp{\sigma}{\iota}}} \newcommand{\sigmaphi}{{\leftexp{\sigma}{\phi}}} \newcommand{\sigmatauphi}{{\leftexp{\sigma\tau}{\phi}}} \newcommand{\tauphi}{{\leftexp{\tau}{\phi}}} \newcommand{\To}{\longrightarrow} \newcommand{\floor}[1]{\left\lfloor #1 \right\rfloor} \numberwithin{equation}{section} \newtheorem{thm}{Theorem} \newtheorem{lem}[thm]{Lemma} \newtheorem{prop}[thm]{Proposition} \newtheorem{cor}[thm]{Corollary} \newtheorem*{claim}{Claim} \newtheorem*{conj}{Conjecture} \theoremstyle{definition} \newtheorem*{defn}{Definition} \newtheorem*{eg}{Example} \newtheorem*{heu}{Heuristic} \newtheorem*{Ack}{Acknowledgements} \theoremstyle{remark} \newtheorem{rem}{Remark} \newtheorem*{case}{Case} \newtheorem*{c1}{Case 1} \newtheorem*{c2}{Case 2} \newtheorem*{s1}{Step 1} \newtheorem*{s2}{Step 2} \newtheorem*{note}{Note} \newtheorem*{fact}{Fact} \newtheorem*{N}{Notation} \newtheorem*{W}{Warning} \theoremstyle{definition} \newtheorem{definition}[equation]{Definition} \newtheorem{question}[equation]{Question} \newtheorem{example}[equation]{Example} \newtheorem{examples}[equation]{Examples} \theoremstyle{remark} \newtheorem{remark}[equation]{Remark} \newtheorem{remarks}[equation]{Remarks} \newenvironment{psmallmatrix} {\left(\begin{smallmatrix}} {\end{smallmatrix}\right)} \definecolor{darkgreen}{rgb}{0,0.5,0} \DeclareRobustCommand{\SkipTocEntry}[5]{} \renewcommand\thepart{\Roman{part}} \begin{document} \title{Two-Torsion Subgroups of Some Modular Jacobians} \begin{abstract} We give a practical method to compute the 2-torsion subgroup of the Jacobian of a non-hyperelliptic curve of genus $3$, $4$ or $5$. The method is based on the correspondence between the 2-torsion subgroup and the theta hyperplanes to the curve. The correspondence is used to explicitly write down a zero-dimensional scheme whose points correspond to elements of the $2$-torsion subgroup. Using $p$-adic or complex approximations (obtained via Hensel lifting or homotopy continuation and Newton-Raphson) and lattice reduction we are then able to determine the points of our zero-dimensional scheme and hence the $2$-torsion points. We demonstrate the practicality of our method by computing the $2$-torsion of the modular Jacobians $J_{0}\left( N \right)$ for $N = 42, 55, 63, 72, 75$. As a result of this we are able to verify the generalised Ogg conjecture for these values. \end{abstract} \author{Elvira Lupoian} \address{Mathematics Institute\\ University of Warwick\\ CV4 7AL \\ United Kingdom} \email{[email protected]} \date{\today} \thanks{The author is supported by the EPSRC studentship} \keywords{Two Torsion, Modular Jacobians, Generalised Ogg Conjecture} \subjclass[2020]{11G30} \maketitle \section{Introduction} Let $X$ be a smooth, projective genus $g \ge 1$ curve over $\Q$ and let $J$ be its Jacobian variety. The Mordell-Weil theorem states that the set of $K$-rational points of $J$ is a finitely generated group for any number field $K$; that is, $J \left( K \right) \isom J\left( K \right)_{\text{tors}} \oplus \Z^{r}$ for some integer $r \geq 0$ and a finite group $J\left( K \right)_{\text{tors}}$. In this paper we will be concerned with the finite torsion subgroup $J\left( K \right)_{\text{tors}}$, more specifically with the $2-$torsion part $J \left( K \right) \left[ 2 \right ] = \{ D \in J \left( K \right) \ \vert \ 2D = 0 \}$. If $X$ is hyperelliptic, then it is easy to compute the $2$-torsion of its Jacobian, for example see \cite{BS2tors} or \cite{scha2tors}. We describe a method for calculating the entire $2-$torsion subgroup $J\left( \Qbar \right) \left[ 2 \right] \isom \left( \Z / 2\Z \right)^{2g}$ for $X$ non-hyperelliptic and $g = 3, 4 \ \text{and} \ 5$. The theoretical basis of this method is the well-known description of 2-torsion points on the Jacobian as the difference of two odd theta characteristics \cite{caporaso}. A theta characteristic to the curve is a linear equivalence class of a degree $g-1$ divisor on $X$ which when doubled is equal to the canonical class. The parity of a theta characteristic is simply the parity of the dimension of its Riemann-Roch space. As our curve is non-hyperelliptic, it is canonically embedded in projective space and therefore all odd theta characteristics are in bijective correspondence with the hyperplanes to $X$ which intersect the curve at $g-1$ (not necessarily distinct) points, each with multiplicity $2$; we will call such planes theta hyperplanes. These are important geometric invariants of curves and have been studied extensively. For instance, Caporaso and Sernesi showed in \cite{CapEdo2} that a plane quartic, $g=3$, is completely determined by its theta hyperplanes, a result which they then generalized to all general canonical curves of genus $g \geq 4$ in \cite{CapEdo}. Lehavi showed that the curve can be effectively reconstructed from their theta hyerplanes in the genus $3$, $4$ and $5$ cases (see \cite{Leh1}, \cite{Leh2} and \cite{Leh3} respectively). The method described in this paper can be used to compute the theta hyperplanes to $X$ and thus the $2$-torsion points of its Jacobian, which are simply linear equivalence classes of differences of the divisors formed by intersecting the theta hyperplanes with the curve and multiplying by $\frac{1}{2}$. In Section $3$, we will construct zero-dimensional schemes whose points correspond to the theta hyperplanes to $X$. The points of such a zero-dimensional scheme are usually defined over a number field of fairly large degree, especially in the genus $4$ and $5$ case, and as a result are usually impractical to compute using Gr\"{o}bner bases. In Sections $4$ and $5$ we describe how such points can be computed, firstly by approximating and then by searching for short vectors in an appropriate lattice. The points are approximated either $p$-adically, first by searching for non-singular points over a finite field and then lifting these points using Hensel's lemma, or as complex points, using homotopy continuation to obtain initial complex approximations and then applying Newton-Raphson to obtain complex approximations accurate to many decimal places. In Section 5 we use the approximations to define lattices, in which we search for short vectors in order to find the points of our schemes. Using lattice reduction to find algebraic dependence is a standard technique, for instance it is described in \cite[Section 2.7.2]{cohen}. In Section $6$ we compute the $2$-torsion subgroup in some explicit examples. The main motivation for this work was to verify the generalised Ogg conjecture for some values of $N$, previously known up to $2$-torsion. Let $N$ be a positive integer and denote by $J_{0}\left( N \right)$ be the Jacobian variety of the modular curve $X_{0}\left( N \right)$. Denote by $C_{0}\left( N \right)$ the cuspidal subgroup of $J_{0}\left( N \right) \left( \overline{\mathbb{Q}} \right)$; that is, the subgroup generated by classes of differences of cusps, and write $C_{0}\left( N \right)\left( \mathbb{Q} \right)$ for the subgroup of $C_{0}\left( N \right)$ stable under the action of $\Gal \left( \mathbb{\overline{Q}} / \mathbb{Q} \right)$. A consequence of the Manin-Drinfeld theorem \cite{dr} is that $C_{0}\left( N \right) \left( \mathbb{Q} \right) \subseteq J_{0}\left( N \right) \left( \mathbb{Q} \right)_{\text{tors}}$. The generalised Ogg conjecture \cite{oggc} states that this is in fact an equality. In \cite{OS} Ozman and Siksek proved this for $N = 34, 38, 44, 45, 51, 52, 54, 56, 64, 81$ and for $N = 42, 55, 63, 72,75$ up to 2-torsion. We compute the 2-torsion subgroup of $J_{0}\left( N \right)$ and using the previous calculations of \cite{OS}, we verify the generalised Ogg conjecture for $N = 42, 55, 63, 72, 75$, where $g=5$ for all these values of $N$. \begin{thm} The generalised Ogg conjecture holds for $N = 42, 55, 63, 72, 75$. \end{thm} The explicit generators of the $2$-torsion subgroups of the above, as well as the \texttt{Magma} code used to compute them, can be found at \begin{center} \href{ https://github.com/ElviraLupoian/TwoTorsionSubgroups}{https://github.com/ElviraLupoian/TwoTorsionSubgroups}. \end{center} Algorithms to compute theta hyperplanes to canonical curves have also been developed by Bruin, Poonen and Stoll \cite[Section 12]{BPS} in the case of $g = 3$; where the $2$-torsion subgroup is used for explicit computations of Selmer groups, and an algorithm to compute the tritangents, in the genus $4$ case is described by Stoll in \cite[Section 4]{StollTriTan}. Methods to compute torsion points on Jacobians using Hensel lifting are also described by Dokchitser and Doris \cite{dokchitser}, and Mascot \cite{mascot}. We also use Hensel lifting in the genus $3$ and $4$ examples. For our genus 5 examples, we found it impractical to search for nonsingular points over finite fields that can be used as initial approximations in our Hensel lifting. Instead, we use homotopy continuation \cite{ver}, to obtain initial complex approximations to the points of our zero-dimensional scheme. These are then lifted to very high precision, approximately $2000$ decimal places, using Newton-Raphson. These high precision approximations were crucial for finding the algebraic expressions of the theta hyperplanes. \begin{Ack}I would like to thank my supervisors Samir Siksek and Damiano Testa for their continued support and the many valuable conversations throughout this project. I would also like to thank the anonymous referee for their detailed feedback, their suggestions and corrections have improved this paper greatly. \end{Ack} \section{Preliminaries} \label{Pre} Let $X$ be a smooth, projective complex curve of genus $g$. \begin{defn} A $\mathbf{theta \ characteristic}$ on $X$ is a degree $g-1$ divisor class $D \in \text{Pic}\left( X \right)$ such that $2D = K_{X}$, where $K_{X}$ is the canonical class on $X$. The $\mathbf{parity}$ of a theta characteristic $D$ is the parity of $h^{0}\left( X, D \right)$. \end{defn} Let $J$ be the Jacobian variety of $X$. For any two theta characteristics $D_{1}, D_{2}$, the equivalence class of the difference $D_{1} - D_{2}$ is a $2$-torsion point of $J$, so there are $2^{2g}$ theta characteristics. It is well known that there are precisely $2^{g-1}\left( 2^{g} - 1\right)$ odd theta characteristics and $2^{g-1}\left( 2^{g} + 1 \right)$ even theta characteristics (see \cite[Chapter 5]{dog}). The following result, stated in \cite{caporaso}, is a clear consequence of results of \cite[Chapter 5]{dog} and it is also proved in \cite[Cor 5.3]{BPS} for genus $g \geq 2$. \begin{thm} The $2$-torsion subgroup of $J$ is generated by differences of odd theta characteristics. \end{thm} As described in \cite{caporaso}, theta characteristics can also be interpreted geometrically, as we now explain. \begin{defn} A $\mathbf{theta \ hyperplane}$ to $X$ is a hyperplane $H \subset \mathbb{P}^{g-1}$ tangent to $X$ at $g-1$ points. \end{defn} Suppose further that $X$ is non-hyperelliptic and is embedded in $\mathbb{P}^{g-1}$ by its canonical embedding. Let $H$ be a theta hyperplane to $X$. The intersection of this hyperplane and the curve gives the following divisor on $X$ \begin{center} $H \cdot X = \displaystyle \sum_{i=1}^{g-1} 2P_{i}$ \end{center} for some $P_{i} \in X$. The equivalence class of the divisor $\frac{1}{2} H \cdot X$ is an effective theta characteristic. Conversely, given an effective theta characteristic, there exists a hyperplane $H \subset \mathbb{P}^{g-1}$ such that $D = \frac{1}{2}H \cdot X$, so $H$ is tangent to $X$ at $g-1 $ points. For $X$ as above, odd theta characteristics coincide with the effective theta characteristics, and thus we can state the following result (see \cite{CapEdo}). \begin{thm} There are $2^{g-1}\left( 2^{g} -1 \right)$ theta hyperplanes to $X$, in natural bijection with the odd theta characteristics of $X$. \end{thm} The $2-$torsion subgroup is computed as follows. Let $TH$ be the set of defining equations of the theta hyperplanes to $X$. We observe that the quotient of two elements of $TH$ is an element of the function field of $X$ and thus we can consider its divisor. The $2$-torsion subgroup of $J$ is generated by classes of divisors of the form \begin{center} $\frac{1}{2} \left( \text{div} \left( \frac{a}{b} \right) \right)= \displaystyle \sum_{i=1}^{g-1} P_{i} - \displaystyle \sum_{j=1}^{g-1} Q_{j}$ \end{center} for some $a,b \in TH$, where the $P_{i}$'s and $Q_{j}$'s are (not necessarily distinct) points on $X$. \section{Schemes of theta hyperplanes} \label{Sch} Let $X$ be a complete, nonsingular and non-hyperelliptic curve over an algebraically closed field $k$, of genus $g$. The image of the canonical embedding of $X$ into $\mathbb{P}^{g-1}$ is a curve of degree $2g - 2$ and this gives a model of the curve. \begin{itemize} \item $g = 3$ : the canonical embedding is a plane quartic in $\mathbb{P}^{2}$; \item $g = 4$ : the canonical model is the intersection of a quadric and a cubic surface in $\mathbb{P}^{3}$; \item $g = 5$ : the canonical model is the intersection of 3 quadrics in $\mathbb{P}^{4}$. \end{itemize} From now on, we will assume that $X$ has a model over $\mathbb{Q}$. In this section we construct a zero-dimensional scheme whose points correspond to theta hyperplanes to $X$, when $X$ has genus $3$, $4$ and $5$. \subsection{The Genus 3 Case: Scheme of Bitangents} Suppose $X$ has genus 3 and so the curve has a model: \begin{center} $X : f\left( x_{1}, x_{2}, x_{3} \right) = 0 $, \end{center} where $f \in \mathbb{Q} \left[ x_{1}, x_{2}, x_{3} \right]$ is a homogeneous polynomial of degree 4. A theta hyperplane to $X$ is a plane intersecting the curve in 2 points and we call such hyperplanes bitangent lines. We work on an affine chart and de-homogenise with respect to the appropriate coordinates. For notation purposes, we work on the affine chart $\lbrace x_{3} = 1 \rbrace $. A bitangent to the affine curve is given by a polynomial \begin{center} $b_{1}x_{1} + b_{2}x_{2} + b_{3} = 0 $, \end{center} for some $b_{i} \in \mathbb{\overline{Q}}$ with $ \left( b_{1},b_{2} \right) \neq \left( 0 , 0 \right)$. Suppose $b_{2} \ne 0 $, so rescaling and rearranging gives \begin{center} $x_{2} = a_{1}x_{1} + a_{2} $, \end{center} for some $a_{1}, a_{2} \in \mathbb{\overline{Q}}$. The intersection of the affine curve with this line is described by \begin{center} $F(x_{1} ) = f\left( x_{1} , a_{1} x_{1} + a_{2}, 1 \right),$ \end{center} where $F \in \mathbb{Q} \left[ a_{1}, a_{2}\right] \left[ x_{1} \right] $ has degree 4, and $F$ is necessarily a square if the given line is a bitangent. Equivalently, there exist $a_{3}, a_{4} \in \mathbb{\overline{Q}} $ such that \begin{center} $F \left( x_{1} \right) = l \left( x_{1}^{2} + a_{3}x_{1} + a_{4} \right)^{2}$ \end{center} where $l$ is the coefficient of $x_{1}^{4}$ in $F$. Equating coefficients in the above expression gives $4$ equations $e_{1}, \ldots , e_{4}$ in $a_{1}, \ldots, a_{4}$ which define a zero-dimensional scheme $S$, whose points correspond to bitangents to the curve. \begin{rem} The total number of bitangents to a plane curve is $28$ and hence the degree of $S$ is at most 28. If the degree is strictly less than 28, we can repeat the above, working on a different affine chart or with lines of a different form to obtain all 28 bitangents to $X$. \end{rem} \subsection{The Genus 4 Case: Scheme of Tritangents} Suppose $X$ has genus 4, and so a canonical model of the curve is the intersection of a quadric and a cubic. \begin{center} $X : f\left( x_{1}, x_{2}, x_{3}, x_{4} \right) = g\left( x_{1}, x_{2}, x_{3}, x_{4} \right) =0 $ \end{center} where $f, g \in \mathbb{Q} \left[ x_{1}, x_{2}, x_{3}, x_{4} \right]$ are homogeneous of degree $2$, $3$ respectively. A theta hyperplane to $X$ is a plane intersecting the curve in $3$ (not necessarily distinct) points, each with multiplicity two and we will call such hyperplanes tritangents planes. We work on an affine chart, say $ \lbrace x_{4} = 1 \rbrace $ and similar to above, we can assume that some tritangent planes to the affine curve are cut out by equations of the form \begin{center} $x_{3} = a_{1}x_{1} + a_{2}x_{2} + a_{3} $, \end{center} for some $a_{1}, a_{2}, a_{3} \in \mathbb{\overline{Q}}$. The intersection of the affine curve with the plane is described by the two expressions $F$ and $G$, \begin{align*} F\left( x_{1}, x_{2} \right) & \ = \ f \left( x_{1}, x_{2}, a_{1}x_{1} + a_{2}x_{2} + a_{3} , 1 \right), \\ G \left( x_{1}, x_{2} \right) & \ = \ g \left( x_{1}, x_{2}, a_{1}x_{1} + a_{2}x_{2} + a_{3} , 1 \right), \end{align*} which can be viewed as polynomials in $x_{2}$ with coefficients in $\mathbb{Q} \left[ a_{1}, a_{2}, a_{3}, x_{1} \right]$. Taking the resultant of $F$ and $G$ gives a polynomial $R$ in $x_{1}$ with coefficients in $\mathbb{Q} \left [ a_{1}, a_{2}, a_{3} \right]$, \begin{center} $R \left( x_{1} \right) = \text{Res}\left( F, G, x_{2} \right) \in \mathbb{Q} \left[ a_{1}, a_{2}, a_{3} \right]\left[ x_{1} \right] $. \end{center} This is a degree 6 polynomial; and it is necessarily a square if the given plane is a tritangent. That is, there exist $a_{4}, a_{5}, a_{6} \in \mathbb{\overline{Q}}$ such that \begin{center} $R \left( x_{1} \right) = l \left( x_{1}^{3} + a_{4}x_{1}^{2} + a_{5}x_{1} + a_{6} \right)^{2},$ \end{center} where $l$ is the coefficient of $x_{1}^{6}$ in $R$. The coefficients of the above expression $e_{1}, \ldots, e_{6}$ are polynomials in $a_{1}, \ldots, a_{6}$ with rational coefficients. To define the scheme of tritangents we require the additional equation \begin{center} $e_{7} : a_{7} \cdot \Delta + 1 = 0$, \end{center} where $\Delta$ is the discriminant of $x_{1}^{3} + a_{4}x_{1}^{2} + a_{5}x_{1} + a_{6}$. This equation ensures that $\Delta \neq 0 $ and avoids singularities on the scheme. The seven equations derived above define a zero-dimensional scheme $S$, whose points correspond to tritangent planes to $X$. \begin{rem} The total number of tritangent planes to such a curve is $120$ and hence the degree of $S$ is at most $120$. As in the genus $3$ case, if the degree is strictly less than $120$, we can repeat the above, working on a different affine chart or with planes of a different form to obtain all $120$ tritangent planes to $X$. \end{rem} \subsection{The Genus 5 Case: Scheme of Quadritangents} \label{Sch3} Suppose $X$ has genus $5$ and so the curve has a canonical model of the following form: \begin{center} $ X : f_{1}\left( x_{1}, \ldots , x_{5} \right) = f_{2}\left( x_{1}, \ldots, x_{5} \right) = f_{3}\left( x_{1}, \ldots, x_{5} \right) = 0,$ \end{center} where $f_{i} \in \mathbb{Q} \left[ x_{1}, x_{2}, x_{3}, x_{4}, x_{5} \right]$ are homogeneous of degree $2$. A theta hyperplane to $X$ is a hyperplane intersecting the curve in $4$ (not necessarily distinct) points, each with multiplicity $2$; and such hyperplanes will be called quadritangent planes throughout this paper. As in the previous two cases, to construct the scheme of quadritangent planes we intersect the affine curve with a given plane and eliminate variables, one at a time, until we obtain an expression in one variable which is necessarily a square if the given plane is a quadritangent. This is dependent on the model of the curve. In this subsection we give a general idea of how such a scheme can be defined; for an explicit example see Section 6.3. With $X$ as above, we work on an affine chart, say $\lbrace x_{5} = 1 \rbrace$, and as in previous cases we can assume that some quadritangents to the curve are given by equations of the form: \begin{center} $x_{4} = a_{1}x_{1} + a_{2}x_{2} + a_{3}x_{3} + a_{4}$ \end{center} for some $a_{1}, \ldots, a_{4} \in \Qbar$. The intersection of the affine curve with the above plane is described by $3$ polynomials: \begin{center} $F_{i}\left( x_{1}, x_{2}, x_{3} \right) = f_{i}\left( x_{1}, x_{2}, x_{3}, a_{1}x_{1} + a_{2}x_{2} + a_{3}x_{3} + a_{4}, 1 \right)$, \ \ $i = 1, 2, 3$, \end{center} in $x_{1}, x_{2}, x_{3}$ with coefficients in $\mathbb{Q} \left[ a_{1}, a_{2}, a_{3}, a_{4} \right]$ and monomials of total degree at most $2$. We now eliminate one of the variables. \begin{c1} Suppose one of the $F_{i}$, say $F_{1}$, is linear in one of the variables, say $x_{3}$, so $F_{1}$ can be written as \begin{center} $F_{1}\left( x_{1}, x_{2}, x_{3} \right) \ = \ g_{1}\left(x_{1}, x_{2} \right) x_{3} + g_{2}\left( x_{1}, x_{2} \right),$ \end{center} where $g_{1}, g_{2} \in \mathbb{Q}\left[ a_{1}, a_{2}, a_{3}, a_{4} \right] \left[ x_{1}, x_{2} \right]$. If $g_{1}\left( x_{1}, x_{2} \right) \neq 0$, we obtain in expression for $x_{3}$, \begin{center} $x_{3} = - \frac{g_{2}\left( x_{1}, x_{2} \right)}{g_{1}\left( x_{1}, x_{2} \right)} = \varphi \left( x_{1}, x_{2} \right),$ \end{center} which can be substituted into $F_{2}$ and $F_{3}$. \end{c1} \begin{c2} If none of the $F_{i}$ are linear in one variable, then we can write $F_{1}$ and $F_{2}$ as quadratics in $x_{3}$: \begin{align*} F_{1} & = g_{1,2} x_{3}^{2} + g_{1,1} x_{3} + g_{1,0}, \\ F_{2} & = g_{2,2} x_{3}^{2} + g_{2,1}x_{3} + g_{2,0}, \end{align*} where $g_{i,j} \in \mathbb{Q} \left[ a_{1}, a_{2}, a_{3}, a_{4} \right] \left[ x_{1}, x_{2} \right]$. If $g_{1,2} \ne 0$ and $g_{2,2} \ne 0 $ we cross multiply to obtain the following expression \begin{align*} g_{1,2}F_{2} - g_{2,2}F_{1} & = \left( g_{1,2}g_{2,1} - g_{2,2} g_{1,1} \right) x_{3} + g_{1,2} g_{2,0} - g_{2,2} g_{1,0} = 0 \\ \end{align*} and we use this to define $\alpha = g_{1,2}g_{2,1} - g_{2,2} g_{1,1}$ and $\beta = g_{1,2} g_{2,0} - g_{2,2} g_{1,0} $. Additionally, if we assume that $\alpha \ne 0 $: \begin{center} $x_{3} = - \frac{\beta}{\alpha} = \varphi \left( x_{1}, x_{2} \right)$ \end{center} and we obtain an expression for $x_{3}$ which can be substituted into the $F_{i}$'s. \end{c2} In deriving an expression for $x_{3}$, we assumed additional conditions, such as $g_{i,j} \neq 0 $. These conditions can be converted into equations, which will be added to the list of equations when defining the scheme. For example, if we assumed $a_{1} \neq 1$, the corresponding equation will be $b_{1}\left( a_{1} - 1 \right) + 1 =0$ for a newly introduced variable $b_{1}$. Given such an expression $x_{3} = \varphi \left( x_{1}, x_{2} \right)$, we can substitute this into the $F_{i}$'s to obtain: \begin{center} $H_{i} \left( x_{1} x_{2} \right) = F_{i} \left( x_{1}, x_{2}, \varphi \left( x_{1}, x_{2} \right) \right) $ \ $ i = 1, 2,3$. \end{center} We take two of these equations and clear denominators to obtain polynomials $h_{1}, h_{2} \in \mathbb{Q} \left[ a_{1}, a_{2}, a_{3}, a_{4} \right] \left[ x_{1}, x_{2} \right]$, whose monomials have total degree at most 4. We now eliminate one of the two remaining variables. One practical way in which this can done is by considering whether or not $h_{1}$ and $h_{2}$ are quadratics in one variable. \begin{c1} If $h_{1}, h_{2}$ are both quadratic in one variable, say $x_{2}$, and thus they can be written as: \begin{align*} h_{1}\left( x_{1}, x_{2} \right) & = h_{1,2}x_{2}^{2} + h_{1,1}x_{2} + h_{1,0}, \\ h_{2}\left( x_{1}, x_{2} \right) & = h_{2,2}x_{2}^{2} + h_{2,1}x_{2} + h_{2,0} \end{align*} where $h_{i,j} \in \mathbb{Q} \left[ a_{1}, a_{2}, a_{3}, a_{4} \right] \left[ x_{1} \right]$. If $h_{1,2} \neq 0 $ and $h_{2,2} \neq 0 $, we cross multiply: \begin{align*} h_{2,2}h_{1} - h_{1,2}h_{2} = \left( h_{2,2} h_{1,1} - h_{1,2}h_{2,1} \right) x_{2} + h_{2,2}h_{1,0} - h_{1,2} h_{2,0} = 0. \end{align*} Define $\gamma = h_{2,2} h_{1,1} - h_{1,2}h_{2,1}$ and $\delta = h_{2,2}h_{1,0} - h_{1,2} h_{2,0} $. If $\gamma \neq 0 $ then: \begin{center} $x_{2} = - \frac{-\delta}{\gamma} = \psi \left( x_{1} \right) $. \end{center} We substitute this into $h_{1}$ \begin{center} $R\left( x_{1} \right) = H_{1} \left( x_{1}, \psi \left(x_{1} \right) \right)$ \end{center} and clearing denominators, we obtain an expression $r \left( x_{1} \right) \in \mathbb{Q} \left[ a_{1}, a_{2}, a_{3}, a_{4} \right] \left[ x_{1} \right]$. One of the factors of $r$ is a degree 8 polynomial $h \left( x_{1} \right) \in \mathbb{Q} \left[ a_{1}, a_{2}, a_{3}, a_{4} \right] \left[ x_{1} \right]$. \end{c1} \begin{c2} If $h_{1}, h_{2}$ are not both quadratic in the same variable, we take the resultant with respect to $x_{2}$: \begin{center} $R\left(x_{1} \right) = \text{Res}\left( h_{1}, h_{2}, x_{2} \right) \in \mathbb{Q}\left[ a_{1}, a_{2}, a_{3},a_{4} \right] \left[ x_{1} \right]$ \end{center} and as in the previous case, $R$ has a degree 8 factor $h\left( x _{1} \right) \in \mathbb{Q} \left[ a_{1}, a_{2}, a_{3}, a_{4} \right] \left[ x_{1} \right] $. \end{c2} Taking the resultant can cut out a larger zero-dimensional scheme than the scheme of quadritangents. With notation as above, requiring $h$ to be a square will ensure that $x_{1}$ occurs with multiplicity 2, but the corresponding $x_{2}$ might not. In practice, we find that this is not a problem, as the additional points not corresponding to quadritangents can simply be discarded at the end. If there are multiple possible polynomials $h$ resulting from the above, we simply repeat the procedure below for all possibilities. In our computations, we found that this was not an issue, in all examples there was only one choice of $h$. If the given plane is a quadritangent then $h$ is necessarily a square. That is, there exist $a_{5}, a_{6}, a_{7},a_{8} \in \mathbb{\overline{Q}}$ such that: \begin{center} $h\left(x_{1} \right) = l \left( x_{1}^{4} + a_{5}x_{1}^{3} + a_{6}x_{1}^{2} + a_{7}x_{1} + a_{8} \right)^{2} $ \end{center} where $l$ is the leading coefficient of $h$. The coefficients in the above expression give 8 equations $e_{1}, \ldots, e_{8}$ with rational coefficients in $a_{1}, \ldots, a_{8}$. To define a scheme of quadritangents we also require the additional equation \begin{center} $e_{9} : a_{9} \cdot \Delta + 1 =0 $ \end{center} where $\Delta$ is the discriminant of $x_{1}^{4} + a_{5}x_{1}^{3} + a_{6}x_{1}^{2} + a_{7}x_{1} + a_{8}$. This ensures that the discriminant is non-zero and avoids singularities on our scheme. The above equations along with any equations arising from any conditions necessary to derive $h$ define a zero-dimensional scheme whose points correspond to quadritangent planes to the curve. \section{Approximate Theta Hyperplanes } \label{apr} In the previous section we described a method of deriving equations which define zero-dimension schemes whose points correspond to coefficients of the theta hyperplanes to the curve. Gr\"{o}bner basis techniques can in theory be used to compute the points of such a zero-dimensional scheme. For example, the following two \texttt{Magma} commands do precisely this: \texttt{PointsOverSplittingField} and \texttt{Points}. The input for the former is a set of equations defining a zero dimensional scheme and its output is the solution set of the system of equations. The latter command is less ambitious. It is designed to give the set of $K$-rational points of a zero dimension scheme $S$, where $K$ is the field of definition of $S$. We found that \texttt{PointsOverSplittingField} is extremely slow in our examples, and in fact we were not able to use this even in the genus $3$ case. Given the field of definition of the theta hyperplanes, the command \texttt{Points} was sometimes successful in determining the points, most notably in the genus $4$ example presented in Section 6, were it took around one hour to compute the points given the degree $36$ number field over which all tritangents to the curve are defined. The field of definition of the tritangent planes can be determined using the method described in the two sections which follow. However, this command is still inefficient in the genus $5$ case, and we needed to use the methods of the Sections $4$ and $5$. In this section we describe two methods of approximating points on a scheme of theta hyperplanes $S$ to a curve $X$. \subsection{$p$-adic Approximations} We briefly describe a method of approximating the points of our scheme $p$-adically. This method is also used in \cite{dokchitser} and \cite{mascot}. Suppose $p$ is an odd prime of good reduction for the curve. We view the defining equations of $S$ over the finite field $\mathbb{F}_{p}$ and search for points of $S$ over an extension $\mathbb{F}_{p^{n}}$ for some $n \in \mathbb{N}$ . Any smooth points in $S\left( \mathbb{F}_{p^{n}} \right)$ are lifted using the following multivariate version of Hensel's Lemma (see \cite{conradmulthensel}). \begin{thm}{(Multivariate Hensel Lemma) } Let $K$ be a non-archimedean local field, $\nu_{K}$ a valuation on $K$ and $\mathcal{O}_{K}$ its ring of integers. Suppose $F = \left( F_{1}, \ldots, F_{m} \right) \in \mathcal{O}_{K}\left[ x_{1}, \ldots, x_{m} \right]$ and $P = \left( P_{1}, \ldots, P_{m} \right) \in \mathcal{O}_{K}^{m}$; and let $v_{1} = \text{min} \lbrace \nu_{K}\left( P_{i} \right) : i = 1, \ldots, m \rbrace $ and $v_{2} = \nu_{K} \left( D_{F}\left(P \right) \right)$, where $D_{F}$ denotes the determinant of the Jacobian matrix of $F$. If $v_{1} > 2v_{2}$, then there exists a unique $Q = \left( Q_{1}, \ldots, Q_{m} \right) \in \mathcal{O}_{K}^{m}$ such that $F \left( Q \right) = 0 $ and $\nu_{K}\left( P_{i} - Q_{i} \right) \geqslant v_{1} - v_{2} $ for all $i$. \end{thm} The proof of Hensel's lemma is constructive in the sense that for a point $x \in S $ which satisfies the hypothesis of the above theorem, we can find a prime ideal $\mathfrak{p}$ in $\mathcal{O}_{K}$, where $K$ is some number field and $\mathcal{O}_{K}$ is its ring of integers, and construct a sequence $ \lbrace a_{i} \rbrace_{i \geq 1 } \subset \mathcal{O}_{K}$ satisfying the following: \begin{itemize} \item $a_{k} \equiv x \ \text{mod} \ \mathfrak{p}^{k}$ for all $k \geq 1$, \item $a_{k} \equiv a_{k-1} \ \text{mod} \ \mathfrak{p}^{k-1}$ for all $k \geq 2$, \item $E_{S}\left( a_{k} \right) \equiv \ 0 \ \text{mod} \ \mathfrak{p}^{k}$ for all $k \geq 1$, \end{itemize} where $E_{S} = \left( e_{1}, \ldots, e_{n} \right)$ are the defining equations of $S$. Note that the congruence in the above expressions is understood to be coordinate wise. \begin{rem} For large degree schemes, large $p$ and $n$ are required to find sufficiently many smooth points. In particular, this method was extremely inefficient in the computations required for the proof of Theorem 1; complex approximations were used for those calculations. \end{rem} \subsection{Complex Approximations} \label{apr2} The points of $S$ can also be approximated as complex points using the Newton-Raphson method. We give a brief overview of this, a detailed explanation can be found in \cite[Chapter 5]{NA}. Let $E = \left( e_{1}, \ldots, e_{n} \right)$ be the defining equations of $S$. We view $E$ as a function $ \mathbb{C}^{n} \longrightarrow \mathbb{C}^{n}$. Let $dE$ be the Jacobian matrix of $E$ and suppose $\mathbf{x}_{0}$ is an approximate solution to $E$ with $dE\left( \mathbf{x}_{0} \right)$ invertible. For $k \geq 1$, define \begin{center} $\mathbf{x}_{k} = \mathbf{x}_{k-1} - dE\left( \mathbf{x}_{k-1} \right)^{-1}E\left( \mathbf{x}_{k-1} \right)$ \end{center} Provided the initial approximation $\mathbf{x_{0}}$ is a good approximation, the resulting sequence $\lbrace \mathbf{x}_{k} \rbrace_{k \geq 0 }$ converges to a solution of $E$, with each iterate having increased precision. In fact, at each step the number of decimal places to which the approximation is accurate roughly doubles \cite[Section 5.8]{NA}. This was the method used to compute approximations of the theta hyperplanes of the curves stated in Theorem 1. This method requires good initial complex approximations to the solutions of $E$. These can be obtained using Homotopy Continuation and its implementation in the numerical analysis package \texttt{Julia}. \subsubsection{Homotopy Continuation} Homotopy continuation is a method for numerically approximating the solutions of a system of polynomial equations by deforming the solutions of a similar system whose solutions are known. We give a brief sketch of the idea, but a more detailed explanation of this theory can be found in \cite{ver} or \cite{NA}. The total degree of $E$ is $\text{deg} \left( E \right) = \displaystyle \prod_{i=1}^{n} \text{deg} \left( e_{i} \right)$, where $\text{deg} \left( e_{i} \right)$ is the maximum of the total degrees of the monomials of $e_{i}$. Let $F$ be a system of $n$ polynomials in $ \mathbf{a} = \left( a_{1}, \ldots, a_{n} \right)$, which has exactly $\text{deg}\left( E \right)$ solutions and these solutions are known. The system $F$ will be known as a start system. The standard homotopy of $F$ and $E$ is the function \begin{align*} & H : \mathbb{C}^{n} \times \left[ 0, 1 \right] \longrightarrow \mathbb{C}^{n} \\ & H \left( \mathbf{a}, t \right) \ = \ \left( 1 - t \right) F\left( \mathbf{a} \right) + t E \left( \mathbf{a} \right) \end{align*} Fix $N \in \mathbb{N}$, and for any $s \in \left[ 0, N \right] \cap \mathbb{N}$ define $H_{s}\left( \mathbf{a} \right) = H \left( \mathbf{a}, s/N \right)$, this is a system of $n$ polynomials in $ \mathbf{a} = \left( a_{1}, \ldots, a_{n} \right)$. For $N$ large enough, the solutions of $H_{s} \left( \mathbf{a} \right)$ are good initial approximations of the solutions of $H_{s+1}\left( \mathbf{a} \right)$, and using the Newton-Raphson method we can greatly increase their precision. The solutions of $H_{0} \left( \mathbf{a} \right) = F\left( \mathbf{a} \right)$ are known, and they can be used to define solution paths to approximate solutions of $H_{N}\left( \mathbf{a} \right) = E \left( \mathbf{a} \right)$. There are two important things to highlight. \begin{itemize} \item[1.] Given any $E$, a start system (and its solutions) can always be computed. \item[2.] A start system can be modified to ensure solutions paths do not cross and converge to approximate solutions of $E$. \end{itemize} Homotopy continuation is implemented in the \texttt{Julia} with the simple command \texttt{solve}, whose input is any complex system of equations which has a finite number of solutions (see \cite{JulHC}). \begin{rem} The implementation of homotopy continuation in \texttt{Julia} gives approximates to solutions of $E$ which are accurate to 16 decimal places. For the computations required for the proof of Theorem 1, we used the approximate solutions and 600 iterations of Newton-Raphson to obtain an accuracy of 2000 decimal places, which was sufficient for those calculations. \end{rem} \section{Algebraic Theta Hyperplanes} \label{alg} Given a scheme of theta hyperplanes $S$, as in Section $3$, and an approximation $\mathbf{a} = \left( a_{1}, \ldots, a_{n} \right)$, as in Section $4$, to a point $\mathbf{x} = \left( x_{1}, \ldots, x_{n} \right)$ of $S$, we use lattice reduction to compute the minimal polynomials of the $x_{i}$ and hence determine $\mathbf{x}$ exactly. The use of lattices and lattice reduction is a standard method when looking for minimal polynomials, for instance see \cite[Section 2.7.2]{cohen} where Cohen explains how the LLL-algorithm can can be used to determine linear and algebraic dependence. Similarly, Smart \cite[Chapter 6.1]{smart} shows that the LLL-algorithm can be used to find a polynomial which has a root that is a good approximation of $\pi$. \subsection{Algebraic Hyperplanes and $p$-adic Approximations} Suppose that $\mathbf{x} = \left( x_{1}, \ldots, x_{n} \right)$ is a point of $S$ for which we have a $p$-adic approximation $\mathbf{a} = \left( a_{1}, \ldots, a_{n} \right)$, and we wish to compute the minimal polynomials of the $x_{i}$. As in Section 4.1 there exist a number field $K$, a prime ideal $\mathfrak{p}$ of $\mathcal{O}_{K}$ and a point $ \left( a_{1}, \ldots, a_{n} \right)$, with $a_{i} \in \mathcal{O}_{K}$ such that for some $k \in \mathbb{N}$, $x_{i} \equiv a_{i} \ \text{mod} \ \mathfrak{p}^{k}$ for all $i \ge 1$. We will call this $k$ the precision of this approximation; and as explained in Section 4.1, approximations with arbitrarily large precision $k$ can be computed. The coefficients of the equations defining the theta hyperplanes are algebraic numbers and we can calculate their minimal polynomials by searching for short vectors in an appropriately defined lattice. Short vectors can be computed using the Lenstra-Lenstra-Lov\'{a}sz lattice reduction algorithm, or LLL for short, which given a lattice, returns a reduced basis whose vectors have small norms, see \cite[Section 2.6]{cohen}. \subsubsection*{Minimal Polynomials} Fix $i$, $1 \leq i \leq n$ and let $\theta = x_{i}$. In this subsection we describe a method to compute the minimal polynomial of $\theta$ using the $p$-adic approximations. As $\theta$ is algebraic, there exist $d_{\theta} \in \mathbb{N}$ and $c_{0}, \ldots, c_{d_{\theta}} \in \mathbb{Z}$ such that \begin{center} $c_{d_{\theta}} \theta^{d_{\theta}} + \cdots + c_{1} \theta + c_{0} = 0 $ \end{center} where $c_{d_{\theta}} \neq 0 $. As $\theta \equiv a_{i} \ \text{mod} \ \mathfrak{p}^{k}$, \begin{center} $c_{d_{\theta}} a_{i}^{d_{\theta}} + \cdots + c_{1} a_{i} + c_{0} \equiv 0 \ \text{mod} \ \mathfrak{p}^{k} $. \end{center} Define the homomorphism \begin{center} $\phi : \mathbb{Z}^{d_{\theta}+1} \longrightarrow \mathcal{O}_{K} / \mathfrak{p }^{k}$ \\ \medskip $ \left( u_{0}, \ldots , u_{d_{\theta}} \right) \longmapsto u_{d_{\theta}}a_{i}^{d_{\theta}} + \cdots + u_{1}a_{i} + u_{0} \ \text{mod} \ \mathfrak{p}^{k} $, \end{center} and let $\mathcal{L} = \text{ker} \left( \phi \right) \leqslant \mathbb{Z}^{d_{\theta}+1}$. This is a discrete subgroup of $\mathbb{Z}^{d_{\theta}+1}$, which contains all elements of the form $P\mathbf{e}_{i}$, where $P$ is any integer which reduces to $0$ modulo $\mathfrak{p}^{k}$ and $\mathbf{e}_{i}$ is the $ith$ element in the standard orthonormal basis of $\mathbb{Z}^{d_{\theta}+1}$, and thus $\mathcal{L}$ is a full rank lattice in $\mathbb{Z}^{d_{\theta}+1}$. Observe that the above lattice $\mathcal{L}$ can be constructed for any precision $k$, and $\left( c_{0}, \ldots , c_{d_{\theta}} \right) \in \mathcal{L}$ for any $k$. As $k$ increases, in general the average length of vectors in $\mathcal{L}$ increases and so eventually $\left( c_{0}, \ldots , c_{d_{\theta}} \right) $ should be the shortest vector in $\mathcal{L}$, or one of the shortest vectors. The length of a vector in a lattice is defined as the square root of its norm. To search for short vectors in $\mathcal{L}$ we use the \texttt{Magma} command \texttt{ShortestVectors()} which given a lattice returns a sequence containing all vectors of the lattice which have the minimum non-zero norm, see \cite{magma}. The \texttt{Magma} algorithm for computing short vectors first computes a reduced basis of the lattice using its efficient implementation of the LLL-algorithm, which is based on both Nguyen and Stehl\`{e}'s floating-point LLL algorithm \cite{NSLLL} and de Weger's exact integral algorithm \cite{dWLLL}; and then uses a closest vector algorithm, such as the one described by Fincke and Pohst \cite{FP}, to determine the shortest non-zero vector in $\mathcal{L}$. To determine what precision makes the shortest vector in $\mathcal{L}$ a suitable candidate for the coefficients of the minimal polynomial we use Hermite's theorem. \begin{thm}{(Hermite)} There exist constants $\mu_{n} \in \mathbb{R}_{\ge 0}$ such that for any $n$-dimensional lattice $\mathcal{L}$ we have \begin{center} $M^{n} \leq \mu_{n} \Delta \left( \mathcal{L} \right)^{2} $ \end{center} where $M$ is the length of the shortest non-zero vector in the lattice and $\Delta \left( \mathcal{L} \right)$ is the determinant of the lattice, as defined in \cite[Page 66]{smart}. \end{thm} There are bounds on these $\mu_{n}$ given in \cite[Page 66]{smart}. \begin{proof} See \cite[ Page 66]{smart} \end{proof} In our case, we expect the determinant $\Delta \left( \mathcal{L} \right)$ to be around $I^{1/(d_{\theta} + 1)}$, where $I$ is the index $\left[ \mathbb{Z}^{d_{\theta} + 1 } : \mathcal{L} \right]$, and the homomorphism $\phi$ to be surjective. Thus, \begin{center} $I = \frac{\vert \left( \mathcal{O}_{K} / \mathfrak{p}^{k} \right)^{\times} \vert }{\vert \left( \Z / p^{k} \Z \right)^{\times} \vert } \approx \frac{\text{Norm}\left( \mathfrak{p} \right)^{k}}{p^{k}} $ \end{center} and given our data, the last quantity can be explicitly computed. Therefore, if we are searching for a minimal polynomial whose coefficients are of approximate order $10^{n}$, then we require $k$ such that \begin{center} $ \left( \frac{\text{Norm} \left( \mathfrak{p}\right) }{p} \right)^{k/( d_{\theta} + 1 )} \ge \sqrt{\left( d_{\theta} + 1 \right)} \cdot 10^{n}$. \end{center} For large $k$, the length of the shortest non-zero vector in $\mathcal{L}$ should be significantly smaller than $\left(\Delta \left( \mathcal{L} \right)\right)^{\frac{1}{d_{\theta} + 1 }}$, and this would make it a suitable candidate for the coefficients of the minimal polynomial. In the construction above, we can replace $d_{\theta}$ by any positive integer $d$ and we can search for short vectors in the lattice constructed with respect to this $d$. If our candidate $d$ is equal to or slightly bigger than $d_{\theta}$, then the following should hold. \begin{itemize} \item The length of $v_{\text{min}}$, the shortest non-zero vector in the lattice, should be significantly smaller than $ d \left(\mathcal{ L} \right)^{\frac{1}{d+1}}$ for a large enough $k$. \item The polynomial $f_{\text{min}}$ whose vector of coefficient is $v_{\text{min}}$ should be irreducible or its factorization should contain an irreducible polynomial of degree close to the degree of $f_{\text{min}}$. \item The minimal polynomials should respect the Galois action on the theta hyperplanes, so multiple points should have the same minimal polynomial. \end{itemize} To summarise, the strategy for finding the coefficients of the minimal polynomial of $\theta$ is as follows. \begin{itemize} \item[1.] We start with a candidate for the degree $d$. \item[2.] Define the homomorphism $\phi$ and the lattice $\mathcal{L}$, depending on degree $d$ and an approximation of precision $k$. Note that $k$ also depends on $d$. \item[3.] In $\mathcal{L}$ search for vectors which are shorter than, say $1/1000 \Delta\left( \mathcal{L} \right)^{\frac{1}{d+1}}$. \item[4.] If such a vector exists, verify that it satisfies the conditions stated above, and if they are all satisfied, it's extremely likely that this vector represents the coefficients of the minimal polynomial. Otherwise, return to 1 and replace $d$ by $d +1$. \end{itemize} \subsubsection*{Coefficient Relations} Once we have candidates for the minimal polynomials for all $ x_{1}, \ldots, x_{n} $ we want to identify which roots of the minimal polynomials correspond to the point approximated. This is especially important when our computations use hyperplanes whose defining equations have coefficients whose minimal polynomials have large degrees, since in these cases it's impractical to simply run through all tuples of possible roots and test which define theta hyperplanes. When the degrees are small, the simplest way of choosing the corresponding roots is through factorisation. Suppose that we want to find the root of $f_{1}$ approximated by $a_{1}$. We factor $f_{i}$ over a number field $\tilde{K}$ over which $f_{1}$ splits into linear factors and for which $K$ is a subfield. Let $\mathfrak{P}$ be a prime of $\tilde{K}$ over $\mathfrak{p}$. We factor $f_{i}$ over $\tilde{K}$, \begin{center} $f_{i} = s_{1}\ldots s_{d_{i}}$ \end{center} where $s_{j}$ are linear factors. Then for some $k$, \begin{center} $s_{k} \left( a_{1} \right) \equiv 0 \ \text{mod} \ \mathfrak{P}^{k}$ \end{center} and thus $s_{k}$ corresponds to the root of $f_{1}$ approximated by $a_{1}$. When factorising is not efficient, for instance when the degrees of our polynomials are large, we may identify the required roots using lattice reduction. We search for relations amongst the coefficients to identify the required roots. Suppose that all our coefficients $x_{1},\ldots, x_{n}$ are contained in the number field $K = \Q \left( x_{1} \right)$ defined by $f_{1}$. Then, there exist $b_{0}, \ldots b_{d_{1}} \in \Z $ such that \begin{center} $b_{d_{1}} x_{2} + b_{d_{1} -1} x_{1}^{d_{1} - 1} + \ldots + b_{1} x_{1} + b_{0} =0 $ \end{center} where $d_{1}$ is the degree of $f_{1}$. Define the homomorphism, \begin{center} $r_{k} : \mathbb{Z}^{d_{1} + 1 } \longrightarrow \mathcal{O}_{K}/ \mathfrak{p}^{k}$\\ \medskip $\left( u_{0}, \ldots, u_{d_{1} -1}, u_{d_{1}} \right) \longmapsto u_{0} + u_{1}a_{1} + \ldots + u_{d_{1} -1}a_{1}^{d_{1} -1} - u_{d_{1}}a_{2} \ \text{mod} \ \mathfrak{p}^{k}.$ \end{center} Let $R_{k} = \text{ker}\left( r_{k} \right) < \mathbb{Z}^{d_{1} + 1 }$. This is a full rank lattice in $ \mathbb{Z}^{d_{1} + 1 }$ and arguing as before, for $k$ large enough $\left( b_{0}, \ldots, b_{d_{1} -1} , - b_{d_{1}} \right) $ is expected to be the shortest vector in $R_{k}$. The integers $b_{0}, \ldots, b_{d_{1}}$ give us a relations $\varphi_{2}$, such that for any tuple of coefficients $\left( x_{1}, \ldots, x_{n} \right)$, if $x_{1} = \theta $ is any root of $f_{1}$, then $a_{2} = \varphi_{2}\left( \theta \right)$. We repeat this with all coefficients. In fact, in all of our examples we found that for a given theta hyperplanes, all of the coefficients of its defining equation were defined over the number field generated by the minimal polynomial of one of the coefficients, so the method described above was sufficient to complete all of our computations. The correctness of the minimal polynomials and coefficient relations computed can be easily checked by verifying that the points they define are solutions of the set of equations which parameterise the coefficients of the theta hyperplanes. \subsection{Algebraic Hyperplanes and Complex Approximations} \label{CA} Suppose that $ \mathbf{a} = \left( a_{1}, \ldots, a_{n} \right)$ is a complex approximation, accurate to $k$ decimal places, of a point $\mathbf{x} = \left( x_{1}, \ldots , x_{n} \right)$ of $S$. In this subsection we give a method to compute the minimal polynomial of $x_{i}$. As in the $p$-adic case, we use the approximation to define a lattice and then search for short vectors in this lattice. Computing the minimal polynomial of a complex approximation is implemented in \texttt{MAGMA} by the command \texttt{MinimalPolynomial()}, whose input is a complex approximation and the supposed degree of its minimal polynomial, and its output is a polynomial with integer coefficients, of the given degree, and which has a root that is well approximated by our first input. This uses similar techniques the our implementations, which is described below, but we found it to be slower in all case, especially when the degree was large. For instance, our code was significantly faster when computing a degree $24$ minimal polynomial corresponding to the coefficient of one quadritangent to $X_{0}\left( 55 \right)$. We were able to run through all polynomials of degree $ 2 \le d \le 24$ in $12.5$ seconds, whilst using \texttt{MinimalPolynomial()} to complete the same task took $104.6$ seconds. This particular orbit of quadritangents was not used in our final calculation, but the file computing it is included in the online repository to demonstrate the efficiency of our implementation. In all genus $5$ examples, we found that our implementation was at least three times faster, even when the degree of the polynomials was small. \subsubsection*{Minimal Polynomials} Fix $i$, $1 \leq i \leq n$ and let $\theta = x_{i}$, $ a = a_{i}$. Recall that $a$ is a complex approximation of $\theta$, correct to $k$ decimal places. As $\theta$ is an algebraic number, there exists $d_{\theta} \in \mathbb{N}$ and $c_{0}, \ldots, c_{d_{\theta}} \in \mathbb{Z}$ such that \begin{center} $c_{d_{\theta}}\theta^{d_{\theta}} + \ldots + c_{1}\theta + c_{0} = 0$. \end{center} If the imaginary part of $a$ is very small, it's likely that $a$ is approximating a real, algebraic number. In this case, we will take $a = \text{Real}\left( a \right) \in \mathbb{R}$ and let $C = 10^{k'}$, for some large natural number $k' < k$, such that \begin{center} $ \mid \left[ C\cdot a^{i} \right] - C \cdot \theta^{i} \mid \ \leq \ 1 $ for all $0 \leq i \leq d_{\theta} $ \end{center} where $\left[ x \right]$ denotes the integer part of $x \in \R$. Let $\mathcal{L}$ be the lattice generated by the columns $v_{d_{\theta}}, \ldots, v_{0}$ of the matrix \begin{center} $A = \begin{pmatrix} 1 & \ldots & 0 & 0 \\ 0 & \ldots & 0 & 0 \\ \vdots & \ddots & \vdots & \vdots \\ 0 & \ldots & 1& 0 \\ \left[ C a^{d_{\theta}} \right] & \ldots & \left[ C a \right] & \left[ C \right] \end{pmatrix} = \left( v_{d_{\theta}} , \ldots, v_{1}, v_{0} \right). $ \end{center} Let $\gamma = c_{d_{\theta}}\left[ C a^{d_{\theta}} \right] + \ldots + c_{1} \left[ C a \right] + c_{0} \left[ C \right] $ and \begin{center} $ \mathbf{c} = \begin{pmatrix} c_{d_{\theta}} \\ \vdots \\ c_{1} \\ \gamma \end{pmatrix} = c_{d_{\theta}}v_{n} + \ldots + c_{0}v_{0} . $ \end{center} Observe that $\mathbf{c} \in \mathcal{L}$ since $c_{d_{\theta}}, \ldots, c_{0} \in \Z$, and we can recover the vector of coefficients of the minimal polynomial $\mathbf{c}_{\infty} = \left( c_{d_{\theta}}, \ldots, c_{0} \right)^{T}$ from $\mathbf{c}$, by setting \begin{center} $ c_{0} = \frac{1}{C} \left( \gamma - \left( c_{d_{\theta}}\left[ C a^{d_{\theta}} \right] + \ldots + c_{1} \left[ C a \right] \right) \right) $. \end{center} Then \begin{center} \begin{align*} \vert \vert \mathbf{c} \vert \vert & = \sqrt{ c_{d_{\theta}}^{2} + \ldots + c_{1}^{2} + \gamma^{2} } \\ & \leq \sqrt{ c_{d_{\theta}}^{2} + \ldots + c_{1}^{2} + \left( \gamma - Cc_{d_{\theta}}\theta^{d_{\theta}} - \ldots - Cc_{1}\theta - Cc_{0} \right)^{2} } \\ & \leq \sqrt{ c_{d_{\theta}}^{2} + \ldots + c_{1}^{2} + \left( c_{d_{\theta}}( \left[ Ca^{d_{\theta}} \right] - C\theta^{d_{\theta}}) + \ldots + c_{1} ( \left[ Ca \right] - C\theta ) + c_{0} ( \left[ C \right] - C) \right)^{2} } \\ & \leq \sqrt{ c_{d_{\theta}}^{2} + \ldots + c_{1}^{2} + \left( c_{d_{\theta}} + \ldots + c_{1} + c_{0} \right)^{2} } \\ & \leq \sqrt{ c_{d_{\theta}}^{2} + \ldots + c_{1}^{2} + \left( c_{d_{\theta}}^{2} + \ldots + c_{1}^{2} + c_{0}^{2} \right)^{2} } \\ & \leq \sqrt{ 2 \left( c_{d_{\theta}}^{2} + \ldots + c_{1}^{2} + c_{0}^{2} \right)^{2} } = \sqrt{2} \ \vert \vert \ \mathbf{c}_{\infty} \vert \vert^{2} \end{align*} \end{center} and this shows that although the length $\vert \vert \mathbf{c} \vert \vert$ depends on the precision of the approximation $k$, $\vert \vert \mathbf{c} \vert \vert$ is bounded by the fixed constant $ \sqrt{2} \vert \vert \ \mathbf{c}_{\infty} \vert \vert^{2} $. Hermite's theorem suggests that the length of the shortest vector in $\mathcal{L}$ is approximately $\Delta \left( \mathcal{L} \right)^{\frac{1}{d_{\theta}+1 }}$, where $\Delta \left( \mathcal{L} \right)$ is the determinant of the lattice $\mathcal{L}_{k}$, as defined in \cite[Page 68]{smart}. As in the $p$-adic case, if $k$ is large enough we expect the vector $\mathbf{c}$ to be the shortest vector in the lattice, and of length significantly smaller than $\Delta \left( \mathcal{L} \right)^{\frac{1}{d_{\theta}+1 }}$. In our case, $\Delta \left( \mathcal{L} \right) = \text{det}\left( A \right) = C = 10^{k'}$; and so if our minimal polynomial has coefficients of order $10^{n}$, $k$, $k'$ are such that: \begin{center} $\left( d_{\theta} + 1 \right)10^{2n} \leq 10^{k'/ \left( d_{\theta} + 1 \right)}$. \end{center} and if the shortest vector in $\mathcal{L}$ is shorter than $\Delta \left( \mathcal{L} \right)^{\frac{1}{d_{\theta}+1 }}$, then it is a suitable candidate for the vector we are looking for. As before, we search for the shortest vector in the lattice using the \texttt{Magma} command \texttt{ShortestVectors} (see \cite{magma}). \begin{rem} When the imaginary part of $a$ is not small, the same method can be used but with $\mathcal{L}$ being generated by the columns of \begin{center} $A_{k} = \begin{pmatrix} 1 & \ldots & 0 & 0 \\ 0 & \ldots & 0 & 0 \\ \vdots & \ddots & \vdots & \vdots\\ 0 & \ldots & 1 & 0 \\ \left[ C \text{Re}\left(a^{d_{\theta}} \right) \right] & \ldots & \left[ C \text{Re} \left( a \right) \right] & \left[ C \right] \\ \left[ C \text{Im}\left( a^{d_{\theta}} \right) \right] & \ldots & \left[ C \text{Im} \left( a \right) \right] & 0 \end{pmatrix} $ \end{center} where $\text{Re} \left( x \right)$ and $\text{Im} \left( x \right)$ denote the real and imaginary parts of $x \in \mathbb{C}$; and $C = 10^{k'}$ where $k^{'} < k $, is such that \begin{center} $ \mid \left[ C\cdot \text{Re}\left( a^{i}\right) \right] - C \cdot \text{Re} \left( \theta^{i} \right) \mid \ \leq \ 1 $ and $ \mid \left[ C\cdot \text{Im}\left( a^{i}\right) \right] - C \cdot \text{Im} \left( \theta^{i} \right) \mid \ \leq \ 1 $ \end{center} for all $0 \leq i \leq d_{\theta} $. Arguing as in the previous case, for suitable $k$ and $k^{'}$, the shortest vector in $\mathcal{L}$ is a suitable candidate for vector corresponding to the minimal polynomial. \end{rem} As in the $p$-adic case, we can replace $d_{\theta}$ in the above expression by any positive integer $d$ and search for short vectors in the lattice constructed with respect to this $d$. The strategy to find $\mathbf{c}$ is as before, we choose a candidate $d$ for $d_{\theta}$, starting with $d=1$, and running through the natural numbers. If our candidate $d$ is equal to $d_{\theta}$, then the shortest vector and the polynomial whose coefficients are derived from the shortest vector should satisfy the same conditions as before. \subsubsection*{Coefficient Relations} As in the previous subsection, once we have candidates $ \left( f_{1}, \ldots, f_{n} \right)$ for the minimal polynomials of $\left( x_{1}, \ldots, x_{n} \right)$, we want to find which permutations of roots correspond to coefficients of equations defining theta hyperplanes. Factorising in this case is simpler, as we now factor $f_{i}$ over $\mathbb{C}$, \begin{center} $f_{i} = s_{1} \ldots s_{d_{i}}$. \end{center} Then for some $n$, $s_{n}\left( a_{i} \right)$ is approximately zero. Thus $s_{n}$ corresponds to the root of $f_{i}$ approximated by $a_{i}$. When factorisation is not efficient, we search for relations amongst coefficients, say between $a_{1}$ and $a_{2}$ using lattice reductions as in the $p-$adic case. If $a_{1}, a_{2} \in \mathbb{R}$, that is the imaginary part of both approximations is very small, we search for $b_{0}, \ldots, b_{d_{1}} $ such that \begin{center} $-b_{d_{1}}x_{2} = b_{d_{1} -1}x_{1}^{d_{1} -1} + \ldots + b_{1}x_{1} + b_{0}$ \end{center} where $d_{1}$ is the minimal polynomial of $f_{1}$. Similar to the method used to search for minimal polynomial, such integers can be found by searching for short vectors in the lattice generate by the columns of \begin{center} $A = \begin{pmatrix} 1 & \ldots & 0 & 0 & 0 \\ 0 & \ldots & 0 & 0 & 0 \\ \vdots & \ddots & \vdots & \vdots& \vdots \\ 0 & \ldots & 0 & 1 & 0 \\ \left[ C a_{1}^{d_{1}-1} \right] & \ldots & \left[ C a_{1} \right] & \left[ C a_{2} \right] & \left[ C \right] \end{pmatrix} $ \end{center} where $C = 10^{k'}$ and $k' < k$ is chosen as before. If the imaginary parts of $a_{1}, a_{2} $ are not both small, we instead search for short vectors in the lattice generated by the columns of \begin{center} $A = \begin{pmatrix} 1 & \ldots & 0 & 0 & 0 \\ 0 & \ldots & 0 & 0 & 0 \\ \vdots & \ddots & \vdots & \vdots & \vdots \\ 0 & \ldots & 0 & 1 & 0 \\ \left[ C \text{Re}\left( a_{1}^{d_{1}-1} \right) \right] & \ldots & \left[ C \text{Re} \left( a_{1} \right) \right] & \left[ C \text{Re} \left( a_{2} \right) \right] & \left[ C \right] \\ \left[ C \text{Im}\left( a_{1}^{d_{1}-1} \right) \right] & \ldots & \left[ C \text{Im} \left( a_{1} \right) \right] & \left[ C \text{Im} \left( a_{2} \right) \right] & 0 \end{pmatrix} $ \end{center} The correctness of the minimal polynomials and coefficient relations computed can be verified by computing the corresponding points and checking that they are solutions to the set of equations defining our zero-dimensional scheme. \section{Examples} \allowdisplaybreaks \subsection{The genus $3$ curve: $X_{0} \left( 75 \right) / w_{25}$} \label{g3eg} Consider the non-hyperelliptic genus $3$ curve $X$ over $\Q$ defined by: \begin{equation*} f( x,y,z) = 3x^3z - 3x^2y^2 + 5x^2z^2 -3xy^3 -19xy^2z -xyz^2 + 3xz^3+ 2y^4 + 7y^3z -7y^2z^2 -3yz^3. \end{equation*} This is the quotient of the modular curve $X_{0} ( 75 )$ by the action of the Atkin-Lehner operator $w_{25}$ computed in \cite[Pages 19-21]{x075}. Let $J$ be the Jacobian variety of $X$. In \cite{x075} the authors showed $J\left(\mathbb{Q} \right)_{\text{tors}} \cong \ \mathbb{Z}/2\mathbb{Z} \times \mathbb{Z} / 40\mathbb{Z}$. Using the methods described in Sections 3,4 and 5 we show $J\left( \mathbb{Q} \right) \left[ 2 \right] \cong \left( \mathbb{Z} / 2 \mathbb{Z} \right)^{2}$. First, we write down equations for the scheme of bitangents. We work on the affine chart $ \lbrace z =1 \rbrace$ and with bitangents of the form $x = a_{1}y + a_{2}$ for some $a_{1},a_{2} \in \overline{\mathbb{Q}}$. Using the method of Section 3.1, we obtain the equations: \begin{align*} e_{1} & = 3a_{1}^{2}a_{4}^{2} + 3a_{1}a_{4}^{2} + 3a_{2}^3 + 5a_{2}^{2} + 3a_{2} - 2a_{4}^{2}, \\ e_{2} &= 6a_{1}^{2}a_{3}a_{4} + 9a_{1}a_{2}^{2} + 10a_{1}a_{2} + 6a_{1}a_{3}a_{4} + 3a_{1} - a_{2} - 4a_{3}a_{4} - 3,\\ e_{3} & = 9a_{1}^{2}a_{2} + 3a_{1}^{2}a_{3}^{2} + 6a_{1}^{2}a_{4} + 5a_{1}^{2} + 3a_{1}a_{3}^{2} + 6a_{1}a_{4} - a_{1} - 3a_{2}^{2} - 19a_{2} - 2a_{3}^{2} - 4a_{4} - 7,\\ e_{4} &= 3a_{1}^{3} + 6a_{1}^{2}a_{3} - 6a_{1}a_{2} + 6a_{1}a_{3} - 19a_{1} - 3a_{2} - 4a_{3} + 7. \end{align*} Let $S$ be the scheme defined by $e_{1},e_{2},e_{3},e_{4}$. As $17$ is a prime of good reduction for $X$ and we can verify using \texttt{Magma} that $S / \mathbb{F}_{289}$ has degree 28; all bitangents to $X$ correspond to points on $S$. Let $L = \mathbb{Q} \left( \sqrt{7} \right)$ and $\mathcal{O}_{L} = \mathbb{Z} \left[ \sqrt{7} \right]$ its ring of integers. The prime ideal $\mathfrak{p} = \langle 17 \rangle $ of $\mathcal{O}_{L}$ has norm $17^{2} = 289$, so we can use the isomorphism $\mathbb{F}_{289} \cong \mathcal{O}_{L}/\mathfrak{p} = \mathbb{F}_{\mathfrak{p}}$ to calculate the points of $S \left( \mathbb{F}_{289}\right) = S\left( \mathbb{F}_{\mathfrak{p}} \right)$. Each point in $S\left( \mathbb{F}_{\mathfrak{p}} \right)$ has a unique lift modulo $\mathfrak{p}^{k}$ for each $k \geq 1$ and using the methods described in Section 5.1, we find the minimal polynomials of the coefficients of each bitangent, and define the bitangents. There are four bitangents defined over the rationals, given by the following equations \begin{align*} & x + 3y + z, \\ & x - y + z, \\ & x - 2y + z, \\ & x + 2y +z. \end{align*} There are six bitangents defined over quadratic fields, all defined over the field $\Q \left(\sqrt{5}\right)$: \begin{align*} & -2x + \left( -1 \pm \sqrt{5} \right) y + \left( 1 \pm 3 \sqrt{5} \right) z, \\ & -2x + \left( 7 \pm \sqrt{5} \right) y + \left( -3 \mp \sqrt{5} \right) z, \\ & x + \left( -4 \pm 2 \sqrt{5} \right) y + \left( -9 \pm 4 \sqrt{5} \right) z. \end{align*} The remaining eighteen bitangents are listed in Galois orbits. All bitangents are described by equations of the form $x - a_{1} y - a_{2}z$ and we give a minimal polynomial $f$ of $a_{1}$ and an expression for $a_{2}$ in terms of $a_{1}$. \allowdisplaybreaks \begin{center} \begin{table} \caption{Equations defining some bitangents of $X_{0}\left( 75 \right)/ w_{25}$.} \label{table: 1} \begin{align*} \hline & f = u^6 + 3u^5 - 5u^3 + 60u^2 + 63u - 41 \\ \hline & a_{1} = u \\ & a_{2} = \frac{1}{27}\left( u^5 + u^4 - 2u^3 - u^2 + 62u - 34 \right) \\ \hline \hline & f = 11u^6 + 49u^5 + 20u^4 - 55u^3 + 40u^2 - 11u + 1 \\ \hline & a_{1} = u \\ & a_{2} = \frac{1}{17}\left( 407u^5 + 1901u^4 + 1154u^3 - 1773u^2 + 1106u - 188 \right) \\ \hline \hline & f = u^{6} + 14u^{5} - 5u^{4} - 20u^{3} + 95u^{2} + 134u - 139 \\ \hline & a_{1} = u \\ & a_{2} = \frac{1}{9164}\left( 41u^5 + 467u^4 - 1312u^3 + 2604u^2 + 2687u - 3083 \right) \\ \hline \end{align*} \end{table} \end{center} The minimal polynomials show that the Galois orbits of the bitangents are as follows: \begin{itemize} \item 3 orbits with 6 bitangents each; \item 3 orbits with 2 bitangents each, all defined over $\Q \left( \sqrt{5} \right)$; \item 4 bitangents defined over $\mathbb{Q}$ and thus stable under Galois action. \end{itemize} Let $K$ be the splitting field of $u^{6} + 14u^{5} - 5u^{4} - 20u^{3} + 95u^{2} + 134u - 139$. This is a degree 12 Galois extension of $\mathbb{Q}$ and we verify using the \texttt{Magma} command \texttt{Roots}, see \cite{magma}, that all of the coefficients stated in Table \ref{table: 1} split over this number field and thus all bitangents are defined over $K$. Furthermore, the same command can be used to verify that this is the smallest degree number field over which all bitangents are defined. Let $\mathcal{O}_{K}$ be the ring of integers of $K$. We view $X$ as a projective curve over $K$ and scale the equations of the bitangents to ensure that they are defined over the maximal order of $K$. Denote the set equations, defined over $\mathcal{O}_{K}$, which cut out the bitangents to $X$ by BT. Fix the rational bitangent \begin{center} $b : x + 3y + z = 0 $ \end{center} and define $HBT = \lbrace \frac{1}{2} \text{div} \left( \frac{l}{b} \right) : l \in BT \rbrace $. Let $H$ be the subgroup of $J \left( K \right) \left[ 2 \right]$ generated by equivalence classes of elements of $HBT$. The ideal $\mathfrak{p} = \langle 17, 4 + 2\theta + 2 \theta^2 \rangle$ is a prime ideal in $ \mathcal{O}_{K}$ of norm $289$, where $\theta$ is a generator of $K$. As $17$ is a prime of good reduction for $X$, reduction modulo $\mathfrak{p}$ induces an injection \begin{center} $r_{\mathfrak{p}} : J \left( K \right) _{\text{tor}} \longrightarrow J \left( \mathbb{F_{\mathfrak{p}}} \right)$ \end{center} see \cite{katz}. It can be shown using \texttt{Magma} (see the examples of \cite[Section 12.9]{BPS} for similar computation or refer to the accompanying code for details) that $ J \left( \mathbb{F_{\mathfrak{p}}} \right) \cong \left( \mathbb{Z} / 2 \mathbb{Z} \right) \times \left( \mathbb{Z} /8 \mathbb{Z} \right)^{2} \times \left( \mathbb{Z} / 40 \mathbb{Z} \right)^{2} \times \left( \mathbb{Z} / 160 \mathbb{Z} \right)$ and $r_{\mathfrak{p}} \left( H \right) \cong \left( \mathbb{Z} / 2 \mathbb{Z} \right)^{6}$, and thus we conclude that $H = J \left( K \right)\left[ 2 \right]$. As the genus of $X$ is 3, $H$ is necessarily the entire $2$-torsion subgroup, \begin{center} $H = J \left( \overline{\mathbb{Q}} \right) \left[ 2 \right]$. \end{center} To determine the rational $2$-torsion subgroup, we take Galois invariants of $H$. Let $G = \Gal \left( K/ \mathbb{Q} \right)$. Elementary computations show $G \cong D_{12} $, where $D_{12}$ is the dihedral group of order 12, and we find that the subgroup of $H$ fixed by $G$ is \begin{center} $H^{G} = \left( \mathbb{Z} / 2 \mathbb{Z} \right) \left[ P_{1} + P_{2} -2P_{0} \right] \ + \ \left( \mathbb{Z} / 2 \mathbb{Z} \right) \left[ P_{3} + P_{4} - 2P_{0} \right] $ \end{center} where the points $P_{0},\ldots, P_{4}$ are the points of the intersection of the first $3$ rational bitangents stated earlier in this subsection. \begin{align*} & P_{0} = \left( 1 :-1 : 2 \right), \\ & P_{1} = \left( 1 - \sqrt{2} :1 : \sqrt{2}\right), \quad P_{2} =\left( 1 + \sqrt{2} :1 : - \sqrt{2}\right), \\ & P_{3} = \left( -6 + 2 \sqrt{-15} : 1 + \sqrt{-15} : 8 \right), \quad P_{4} = \left( -6 - 2 \sqrt{-15} : 1 - \sqrt{-15} : 8 \right). \end{align*} Thus $J \left( \mathbb{Q} \right) \left[ 2 \right] \cong \left( \mathbb{Z} / 2 \mathbb{Z} \right)^{2}$, verifying the result of \cite{x075}. \subsection{The genus $4$ curve: $X_{0}\left( 54 \right)$} In this subsection we compute the $2$-torsion subgroup of the Jacobian of the non-hyperelliptic genus 4 modular curve $X = X_{0}\left( 54 \right)$. This has the rational model: \begin{align*} f_{1} & = x^{2}z - xz^{2} - y^{3} + y^{2}w -3yw^{2} + z^{3} + 3w^{3}, \\ f_{2} &= xw - yz + zw \end{align*} as given in \cite{OS}). Let $J$ be the Jacobian of this curve. As stated in \cite{OS}, $J \left( \mathbb{Q }\right) \cong \left( \mathbb{Z} / 3 \mathbb{Z} \right) \times \left( \mathbb{Z} / 3 \mathbb{Z} \right) \times \left( \mathbb{Z} / 9 \mathbb{Z} \right) $, and so there is no rational 2-torsion. We verify this using our method. For the scheme of tritangents, we work on the affine chart $\lbrace w = 1 \rbrace$ and with tritangent planes of the form \begin{center} $x = a_{1}y + a_{2} z + a_{3}$ \end{center} for some $a_{1}, a_{2}, a_{3} \in \mathbb{\overline{Q}}$. Using the method described in Section 3.2 we define a zero-dimensional scheme of tritangent planes to $X$, which we call $S$. It can be verified using \texttt{Magma} that $S$ is non-singular over $\mathbb{F}_{289}$ and it has degree $120$, thus all tritangents to $X$ correspond to a point on $S$. Let $L = \mathbb{Q} \left( \sqrt{-3} \right)$ and $\mathcal{O}_{L}$ its ring of integers. The ideal $ \mathfrak{p} = \left\langle 17 \right\rangle $ is prime and its norm is 289, and we use the isomorphism $ \mathbb{F}_{289} \cong \mathbb{F}_{\mathfrak{p}} = \mathcal{O}_{L} / \mathfrak{p}$ to calculate the points of $ S\left ( \mathbb{F}_{289} \right) = S \left( \mathbb{F}_{\mathfrak{p} } \right)$. Using the techniques of Sections 4 and 5, we compute lifts of the points of $S\left( \mathbb{F}_{\mathfrak{p}} \right)$ modulo powers of $\mathfrak{p}$, and compute the minimal polynomials. In this example, computing the minimal polynomials of all $a_{1}$'s we sufficient to determine the field of definition of the $2$-torsion subgroup and to complete our calculation. The minimal polynomials show that the Galois orbits of tritangents are as follows: \begin{itemize} \item there are 2 orbits with 3 tritangents each; \item there is one orbit with 6 tritangents; \item there are 2 orbits with 9 tritangents each; \item there are 3 orbits with 18 tritangents each; \item there is 1 orbits with 36 tritangents. \end{itemize} Using this, we are able to compute a candidate for the field $K = \Q \left( J \left[ 2 \right] \right)$. This has degree $36$. We were able to find all $120$ points of $S$ over $K$ using the \texttt{Magma} command \texttt{Points}. From this we deduce the $2$-torsion subgroup $J\left[ 2 \right]$ as before. Let $G = \Gal \left( K / \mathbb{Q} \right)$. Elementary computation show that $G \cong C_{3} \times A_{4}$, where $C_{3}$ denotes the cyclic group of order $3$ and $A_{4}$ denotes the alternating group acting on $4$ elements. Let $s \in G $ be any element of order $6$. We find that $s$ has no fixed points so $J \left[ 2 \right] ^{s} = 0$. Therefore, $J\left( \mathbb{Q} \right) \left[ 2 \right] = 0 $, verifying the result of \cite{OS}. \subsection{The genus $5$ curve: $X_{0}\left( 42 \right)$} \label{g5ex} In this subsection we compute the $2$-torsion subgroups of the Jacobian of the genus $5$, non-hyperelliptic modular curve $X = X_{0}\left( 54 \right)$. The canonical model that we use is (as in \cite{OS}) the intersection of the three quadrics: \begin{align*} f_{1} & = x_{1}x_{3} - x_{2}^{2} + x_{3}x_{4} \\ f_{2} & = x_{1}x_{5} - x_{2}x_{5} - x_{3}^{2} + x_{4}x_{5} - x_{5}^{2} \\ f_{3} & = x_{1}x_{4} - x_{2}x_{3} + x_{2}x_{4} - x_{3}^{2} + x_{3}x_{4} + x_{3}x_{5} - x_{4}^{2} \end{align*} For the scheme of quadritangents, with this model, it is convenient to work on the affine chart $ \lbrace x_{5} = 1 \rbrace$ and with planes of the form \begin{center} $x_{1} = a_{1}x_{2} + a_{2}x_{3} + a_{3}x_{4} + a_{4}$ \end{center} for some $a_{1},a_{2}, a_{3},a_{4} \in \overline{\mathbb{Q}}$. The intersection of such a plane with the affine curve is given by \begin{center} $F_{i}\left( x_{2}, x_{3}, x_{4} \right) = f_{i} \left( a_{1}x_{2} + a_{2}x_{3} + a_{3}x_{4} + a_{4}, x_{2} , x_{3}, x_{4}, 1 \right) $ for $i = 1,2,3 $ \end{center} Explicitly, these expressions are \begin{center} $F_{1} \left(x_{2}, x_{3}, x_{4} \right) = -x_{2}^{2} + a_{1} x_{2}x_{3} + a_{2}x_{3}^{2} + \left( a_{3} + 1 \right) x_{3}x_{4} + a_{4}x_{3} $\\ $F_{2} \left( x_{2} , x_{3} , x_{4} \right) = \left( a_{1} - 1\right) x_{2} -x_{3}^{2} + a_{2}x_{3} + \left( a_{3} + 1 \right) x_{4} + a_{4} -1 $\\ $F_{3} \left( x_{2}, x_{3}, x_{4} \right) = -x_{2}x_{3} + \left( a_{1} + 1 \right) x_{2}x_{4} -x_{3}^{2} + \left( a_{2} + 1 \right) x_{3}x_{4} + x_{3} + \left( a_{3} - 1 \right) x_{4}^{2} + \left( a_{4} - 2 \right) x_{4} $ \end{center} For $a_{1} \neq 1 $, $F_{2} = 0 $ gives an expression for $x_{2}$ in terms of $x_{3}$ and $x_{4} $ \begin{align} x_{2} = \frac{-1}{a_{1} - 1 } \left( -x_{3}^{2} + a_{2} x_{3} + \left( a_{3} + 1 \right) x_{4} + a_{4} - 1 \right) = \phi \left( x_{3} , x_{4} \right) \end{align} Substituting for $x_{2} $ in $F_{1}$ and $F_{3}$, we get \begin{center} \begin{align*} \widetilde{G_{1}} & = F_{1} \left( \phi \left( x_{3}, x_{4} \right), x_{3}, x_{4} \right) = \frac{G_{1} \left( x_{3}, x_{4} \right) }{\left( a_{1} - 1 \right)^{2}} \\ \widetilde{G_{2}} & = F_{3} \left( \phi \left( x_{3}, x_{4} \right), x_{3}, x_{4} \right) = \frac{G_{2} \left( x_{3}, x_{4} \right) }{\left( a_{1} - 1 \right)} \end{align*} \end{center} Clearing denominators, the intersection is given by \begin{center} $ G_{1} \left( x_{3}, x_{4} \right) = G_{2} \left( x_{3} , x_{4} \right) =0 $ \end{center} where \begin{center} $ G_{1} = -x_{3}^{4} + \left(a_{1}^{2} - a_{1}+ 2a_{2}\right)x_{3}^{3} + \left(2a_{3} + 2\right)x_{3}^{2}x_{4} + \left(-a_{1}a_{2} - a_{2}^{2} + a_{2} + 2a_{4} - 2\right)x_{3}^{2} + \left(-a_{1}a_{3} - a_{1} - 2a_{2}a_{3} - 2a_{2} + a_{3} + 1\right)x_{3}x_{4} + \left(a_{1}^{2} - a_{1}a_{4} - a_{1} - 2a_{2}a_{4} + 2a_{2} + a_{4}\right)x_{3} + \left(-a_{3}^{2} - 2a_{3} - 1\right)x_{4}^{2} + \left(-2a_{3}a_{4} + 2a_{3} - 2a_{4} + 2\right)x_{4} - a_{4}^{2} + 2a_{4} - 1$ \\ $G_{2} = -x_{3}^{3} + \left(a_{1} + 1\right)x_{3}^{2}x_{4} + \left(-a_{1} + a_{2} + 1\right)x_{3}^{2} + \left(a_{1} - 2a_{2} + a_{3}\right)x_{3}x_{4} + \left(a_{1} + a_{4} - 2\right)x_{3} + \left(-2a_{1} - 2a_{3}\right)x_{4}^{2} + \left(-a_{1} - 2a_{4} + 3\right)x_{4}$ \end{center} These can be re-written as \begin{center} $G_{1} = g_{1} \left( x_{3} \right) + h_{1} \left( x_{3} \right) x_{4} + \alpha_{1} x_{4}^{2} $ \\ $G_{2} = g_{2} \left( x_{3} \right) + h_{2} \left( x_{3} \right) x_{4} + \alpha_{2} x_{4}^{2} $ \end{center} with $ g_{1}, g_{2}, h_{1}, h_{2} \in \mathbb{Q} \left[ a_{1}, a_{2}, a_{3} ,a_{4} \right] \left[ x_{3} \right] $ and $ \alpha_{1} = -\left( a_{3} + 1 \right)^{2} $, $ \alpha_{2} = -2\left( a_{1} + a_{2} \right) $. If $\alpha_{1} \neq 0 $ and $ \alpha_{2} \neq 0 $ then \begin{center} $ \alpha_{2} G_{2} - \alpha_{1} G_{2} = \left( \alpha_{2} h_{1} \left( x_{3} \right) - \alpha_{1} h_{2} \left( x_{3} \right) \right) x_{4} + \alpha_{2} g_{1} \left( x_{3} \right) - \alpha_{1} g_{2} \left( x_{3} \right) = 0 $ \end{center} and if $ T_{1} = \alpha_{2} h_{1} \left( x_{3} \right) - \alpha_{1} h_{2} \left( x_{3} \right) \neq 0 $ , as a polynomial in $x_{3} $, the above gives an expression for $x_{4} = \frac{-T_{2} }{T_{1} }$ with $ T_{2} = \alpha_{2} g_{1} \left( x_{3} \right) - \alpha_{1} g_{2} \left( x_{3} \right) $. Substituing for $x_{4} = -T_{2}/T_{1}$ in $G_{1}$, we get \begin{center} $G\left( x_{3} \right) = G_{1} \left( x_{3} , \frac{-T_{2}}{T_{1} } \right) = \frac{h\left( x_{3} \right) }{g \left( x_{3} \right) } $ \end{center} where $h \in \mathbb{Q}\left[ a_{1}, a_{2} , a_{3}, a_{4} \right]\left[ x_{3} \right]$ has degree 8 and $g \left( x_{3} \right) = T_{1}^{2}/ \left(a_{3} + 1 \right)^{2} $. Clearing denominators in the expression above, we remark that if the given plane $x_{1} = a_{1}x_{2} + a_{2}x_{3} + a_{3}x_{4} + a_{4} $ is a quadritangent, then $h$ is necessarily a square. Equivalently, there exist $a_{5} , a_{6}, a_{7}, a_{8} \in \overline{\mathbb{Q}}$ such that \begin{center} $h\left( x \right) = l \left( x^{4} + a_{5}x^{3} + a_{6}x^{2} + a_{7}x + a_{8} \right)^{2} $ \end{center} where $l$ is the leading coefficient of $h$. Equating coefficients in the above expression, gives 8 equations $e_{1} , \ldots ,e_{8} $ in $a_{1}, \ldots , a_{8}$. We also add a 9th equation (and a 9th variable) \begin{center} $e_{9} : a_{9} \Delta \left( x^{4} + a_{5} x^{3} + a_{6}x^{2} + a_{7}x + a_{8} \right) + 1 = 0 $ \end{center} to ensure that $ x^{4} + a_{5} x^{3} + a_{6}x^{2} + a_{7}x + a_{8}$ has non-zero discriminant and avoid singularities on our scheme. To derive $e_{1}, \ldots, e_{8}$, we assumed that $ a_{1} \neq 1$, $\alpha_{1} \neq 0 $ and $\alpha_{2} \neq 0 $, and equations are also required for these conditions: \begin{align*} e_{10} & : a_{10} \left( a_{1} - 1 \right) + 1 = 0 \\ e_{11} & : a_{11} \left( a_{3} + 1 \right) + 1 = 0 \\ e_{12} & : a_{12} \left( a_{1} + a_{3} \right) + 1 = 0 \end{align*} It can be checked that $e_{2} \ldots e_{12}$ are irreducible, and $e_{1} = s_{1}s_{2} $, \begin{center} $s_{1} = a_{3}a_{4} -a_{3}a_{8} - a_{3} -2a_{4}^{2} + 5a_{4} + a_{8} -3 $ \\ $s_{2} = a_{3} a_{4} + a_{3} a_{8} - a_{3} - 2a_{4}^{2} + 5a_{4} - a_{8} - 3 $ \end{center} \texttt{Julia} suggests that the system: \begin{center} $s_{1} = s_{2} = e_{2} = \ldots = e_{12} = 0 $ \end{center} has no solutions. We consider the cases $s_{1} =0 $ and $s_{2} =0 $ separately. The condition $T_{1} \neq 0$ will also require equations. As a polynomial, $T_{1} $ is non-zero, if its coefficients $t_{1}$, $t_{2}$ and $t_{3} $ are not all zero. \begin{align*} t_{1} & = a_{1} a_{3} - 3a_{1} -3a_{3} + 1 \\ t_{2} &= k_{1} k_{2} = \left( a_{1} + 2a_{2} + a_{3} \right) \left( 2a_{1} + a_{3} - 1 \right) \\ t_{3} &= a_{1}a_{3} - 4a_{1} a_{4} + 5a_{1} -2a_{3}a_{4} + a_{3} + 2a_{4} - 3 \end{align*} \subsubsection*{Case 1} The first 12 equations are $s_{1}, e_{2}, \ldots e_{12} $ and we consider all possible combinations of zero and non-zero $t_{1}, k_{1}, k_{2} $ and $t_{3}$. \textbf{Case 1.1 : $t_{1} \neq 0, k_{1} \neq 0, k_{2} \neq 0, t_{3} \neq 0 $ } We add 4 equations and 4 variables \begin{align*} e_{13} & : a_{13}t_{1} + 1 = 0; \\ e_{14} & : a_{14}k_{1} + 1 = 0; \\ e_{15} & : a_{15}k_{2} + 1 = 0;\\ e_{16} & : a_{16}t_{3} + 1 =0. \end{align*} \texttt{Julia} finds 96 approximate solutions to the system formed by the 16 equations in $ a_{1} , \ldots , a_{16} $. \textbf{Case 1.2 : $t_{1} \neq 0, k_{1} \neq 0, k_{2} = 0, t_{3} \neq 0 $ } We add 4 equations and 3 variables \begin{align*} e_{13} & : a_{13}t_{1} + 1 = 0; \\ e_{14} & : a_{14}k_{1} + 1 = 0; \\ e_{15} & : a_{15}t_{3} + 1 = 0; \\ e_{16} & : k_{2} = 0. \end{align*} \texttt{Julia} finds 24 approximate solutions to the system formed by the 16 equations in $ a_{1} , \ldots , a_{15} $. \textbf{Case 1.3 : $t_{1} \neq 0, k_{1} = 0, k_{2} \neq 0, t_{3} \neq 0 $ } We add 4 equations and 3 variables \begin{align*} e_{13} & : a_{13}t_{1} + 1 = 0; \\ e_{14} & : a_{14}k_{2} + 1 = 0; \\ e_{15} & : a_{15}t_{3} + 1 = 0; \\ e_{16} & : k_{1} = 0. \end{align*} \texttt{Julia} finds 14 approximate solutions to the system formed by the 16 equations in $ a_{1} , \ldots , a_{15} $. In all other cases, for all other possible combinations of zero and non-zero $t_{1}, k_{1}, k_{2}, t_{3} $, the corresponding systems have no approximate solutions. \subsubsection*{Case 2 } The first 12 equations are $s_{2}, e_{2} , \ldots,e_{12} $ . \textbf{Case 2.1 : $t_{1} \neq 0, k_{1} \neq 0, k_{2} \neq 0, t_{3} \neq 0 $ } We add 4 equations and 4 variables \begin{align*} e_{13} & : a_{13}t_{1} + 1 = 0; \\ e_{14} & : a_{14}k_{1} + 1 = 0; \\ e_{15} & : a_{15}k_{2} + 1 = 0; \\ e_{16} & : a_{16}t_{3} + 1 = 0. \end{align*} \texttt{Julia} finds 256 approximate solutions to the system formed by the 16 equations in $ a_{1} , \ldots , a_{16} $. In all other cases, the resulting systems have no approximate solutions. We can also derive equations and schemes in the extreme cases, $\alpha_{1} =0$, $\alpha_{2} = 0$, $a_{1} = 1$ etc. These cases combined had few solutions, and in fact these planes are not needed in our calculation of the 2-torsion subgroup. In each case, we approximate solutions as complex points and using the techniques described in Section 5.2, we compute the corresponding quadritangents to $X$. The 16 planes described in Table \ref{table: 3} and the cusps of $X_{0}\left( 42\right)$ were sufficient to fully describe the 2-torsion subgroup of $X_{0} \left( 42 \right)$. The planes computed occur in two Galois orbits. For both orbits, we give the minimal polynomial of $a_{1}$ and expression for $a_{2}, a_{3}, a_{4}$ in terms of $a_{1}$. \begin{center} \begin{table} \caption{Some Quadritangent Planes to $X_{0}\left( 42 \right)$} \label{table: 3} \begin{align*} \hline & u^8 + 14u^7 + 151u^6 - 396u^5 + 283u^4 - 1730u^3 + 3201u^2 - 1440u +2284 \\ \hline a_{1} & = u \\ a_{2} & = 1/301100972656( 39380331u^7 + 442881623u^6 + 4027975134u^5 - 37845583334u^4 \\ & - 12023416509u^3 - 80118862717u^2 - 49248922084u + 241405142628 ) \\ a_{3} & = -1 / 301100972656 ( 52272255u^7 + 716826167u^6 + 7618567014u^5 - 24269489250u^4 \\ & + 8461415807u^3 - 144514109641u^2 + 341498511372u - 30420302668 ) \\ a_{4} & = -1/301100972656 (16239354u^7 + 285678727u^6 + 3306671724u^5 + 3184311718u^4 \\ & - 7648219606u^3 - 17837849633u^2 + 78572314624u - 372802926204 ) \\ \hline \\ \hfill \\ \hline & 23u^8 + 78u^7 + 135u^6 + 146u^5 + 236u^4 + 322u^3 + 239u^2 + 94u + 23 \\ \hline a_{1} & = u \\ a_{2} & = 1/6324 ( 10258u^7 + 27083u^6 + 40152u^5 + 34112u^4 + 75578u^3 + 80920u^2 \\ & + 41542u + 5979) \\ a_{3} & = -1/4743 (4324u^7 + 9535u^6 + 12034u^5 + 6080u^4 + 22416u^3 + 14876u^2 \\ & + 3588u - 7468) \\ a_{4} & = -1/9486 ( 3404u^7 + 10601u^6 + 16874u^5 + 17098u^4 + 28680u^3 + 37276u^2 \\ & + 24084u - 7931) \\ \hline \end{align*} \end{table} \end{center} Let $K$ be the number field defined by $f$ \begin{center} $ f\left( u \right) = 2713u^{16} + 9264u^{15} + 24252u^{14} - 1352u^{13} - 270446u^{12} - 739224u^{11} - 599968u^{10} + 1502208u^{9} + 6136803u^{8} + 10670696u^{7} + 11231488u^{6} + 7603968u^{5} + 3052898u^{4} + 591416u^{3} + 141500u^{2} + 210760u + 154057 $. \end{center} This is a degree 16 Galois extension, and the above quadritangents are defined over $K$. Let $\mathcal{O}_{K}$ be the ring of integers of $K$. The equations of the planes described above can be homogenised and scaled appropriately to ensure that they are all defined over $\mathcal{O}_{K}$. Denote by $QT$ the set of equations of 16 quadritangents to $X$. The quotient of any $2$ elements of $QT$ is a function on the curve $X$. Fix an element $l_{0} \in QT$ and let \begin{center} $HQT = \lbrace \frac{1}{2} \text{div} \left( \frac{l}{l_{0}} \right) : l \in QT \rbrace $. \end{center} The cusps of $X_{0}\left( 42 \right) $ are: \begin{align*} c_{1} & = \left( 2 : -1 : 1 : -1 : 1 \right), \\ c_{2} & = \left( 2 : 2 : 1 : 2: 1 \right), \\ c_{3} & = \left( 7 : 4 : 2 : 1 : 2 \right), \\ c_{4} & = \left( 1 : -2 : 2 : 1 : 2 \right), \\ c_{5} & = \left( 3 : 0 : 0 : -1 : 2 \right), \\ c_{6} & = \left( 1 :0 : 0 : 0 : 1 \right), \\ c_{7} & = \left( 1 : 0 : 0 : 1 : 0 \right), \\ c_{8} & = \left( 1 :0 :0 : 0 : 0 \right). \end{align*} Let $ J = J_{0} \left( 42 \right) $ be the Jacobian of $X_{0} \left( 42 \right)$ and define \begin{align*} D_{1} & = 3 c_{1} - c_{2} + 2c_{3} - 2c_{5} - 2 c_{6} , \\ D_{2} & = -7 c_{1} +2 c_{2} + 3c_{3} + c_{5} + c_{6} , \\ D_{3} &= 4c_{1} -4c_{2} + 3c_{3} - c_{4} -2c_{5} - c_{6} + c_{8} , \\ D_{4} &= 6c_{1} - 4 c_{2} + 2c_{3}- 2 c_{5} -2c_{6} . \ \end{align*} The linear equivalence classes of the above divisors are distinct, rational and we can verify using \texttt{Magma} that they are 2-torsion points on $J \cong \text{Pic}^{0} \left( X_{0} \left( 42 \right) \right)$. Let $H$ be the subgroup of $J\left( K \right) \left[ 2 \right]$ generated by equivalence classes of elements of $HQT$ and the equivalence classes of $D_{1}, D_{2}, D_{3}, D_{4}$. By factoring $11\mathcal{O}_{K}$ we find a prime ideal $\mathfrak{p} $ of norm 121. As 11 is a prime of good reduction for $X$, reduction modulo $\mathfrak{p}$ induces an injection \begin{center} $r_{\mathfrak{p}} : J \left( K \right)_{\text{tors}} \longrightarrow J \left( \mathbb{F}_{\mathfrak{p}} \right)$. \end{center} Using \texttt{Magma} we verify that $J \left( \mathbb{F}_{\mathfrak{p}} \right) \cong \left( \mathbb{Z} / 2 \mathbb{Z} \right)^{4} \times \left( \mathbb{Z} / 8 \mathbb{Z} \right)^{2} \times \left( \mathbb{Z} / 24 \mathbb{Z} \right) \times \left( \mathbb{Z} / 48 \mathbb{Z} \right) \times \left( \mathbb{Z} / 192 \mathbb{Z} \right)^{2}$ and $r_{\mathfrak{p}} \left( H \right) \cong \left( \mathbb{Z} / 2 \mathbb{Z} \right)^{10}$, and thus the subgroup $H$ is necessarily the whole 2-torsion subgroup of $ J \left( K \right)_{\text{tors}} $. Additionally, as the genus of $X$ is $5$, $H$ is in fact the entire 2-torsion subgroup \begin{center} $H = J \left( \mathbb{\overline{Q}} \right)\left[ 2 \right]. $ \end{center} Let $G = \Gal \left( K / \mathbb{Q} \right) $. Elementary calculations show that $ G \cong D_{8} \times C_{2} $, where $D_{8}$ is the dihedral group of order 8 and $C_{2}$ is the cyclic group of order 2. Taking Galois invariants, we find \begin{center} $H^{G} = J \left( \mathbb{Q} \right) \left[ 2 \right] \ = \ \left( \mathbb{Z} / 2 \mathbb{Z} \right)\left[ D_{1} \right] + \left( \mathbb{Z} / 2 \mathbb{Z} \right) \left[ D_{2} \right] + \left( \mathbb{Z} / 2 \mathbb{Z} \right) \left[ D_{3} \right] + \left( \mathbb{Z} / 2 \mathbb{Z} \right) \left[ D_{4} \right] $ \end{center} Combining this with the result of \cite{OS}, where the rational torsion subgroup of $J$ is computed up to $2$-torsion, we deduce the following corollary. \begin{cor} The generalised Ogg conjecture holds for $N=42$. More precisely, $J_{0}(42)(\mathbb{Q} ) = C \cong \left( \mathbb{Z} / 2 \mathbb{Z} \right) \times \left( \mathbb{Z} / 2 \mathbb{Z} \right) \times \left( \mathbb{Z} / 12 \mathbb{Z} \right) \times \left( \mathbb{Z} / 48 \mathbb{Z} \right) $, where $C$ is the rational cuspidal subgroup. \end{cor} \section{Proof of Theorem $1$} The quadritangents planes required for the calculations in the proof of Theorem 1 are stated in the \texttt{GitHub} directory given below. They were computed using the methods described in Sections \ref{Sch}, \ref{apr} and \ref{alg}. The points of their respective schemes of quadritangents were approximated using complex approximations and precise expressions were computed using the lattice technique described in Section \ref{CA}. For $N$ = $63$, $72$ and $75$ we were able to compute the entire $2$-torsion subgroup of the modular jacobian $J_{0}\left( N \right)$ using the computed quadritangents, and deduce the rational $2$-torsion subgroup as in the $N = 42$ case presented in detail in Section \ref{g5ex}. For details of these computations, the Galois orbits required in these computations and the \texttt{Magma} code used to compute them see \begin{center} \href{ https://github.com/ElviraLupoian/TwoTorsionSubgroups}{ https://github.com/ElviraLupoian/TwoTorsionSubgroups}. \end{center} In some case we also used the cusps of the modular curves to simplify our computations. All the modular curves considered are non-hyperelliptic, of genus $5$ and their Jacobians have rank $0$ over $\Q$. The canonical models of the curves and cusps on these models used in our computations are as in \cite{OS}. In the case $N$ = $55$, we were unable to compute the entire $2$-torsion subgroup. We did compute sufficiently many quadritangents, defined over degree $12$ number field $K$, such that $J_{0}\left( 55 \right) \left( K \right) \left[ 2 \right] \cong \left( \Z / 2 \right)^{8}$, and from this we deduced the rational $2$-torsion subgroup. The number field $K$ is not a Galois extension of $\Q$, and the computations are slightly different to those presented in \ref{g5ex}. An overview of this computation is given in Section \ref{X055}. In \cite{OS}, Ozman and Siksek prove $J_{0}\left(N \right) \left( \Q \right) = C_{0}\left( N \right) \left( \Q \right)$ up to $2$-torsion for $N = 42, 55, 63, 72,75$. Our computations show that the $2$-torsion parts are also equal, and hence this verifies the generalised Ogg conjecture for these values. \subsection{$X_{0}\left( 55 \right)$} \label{X055} The canonical model of the curve $X = X_{0}\left( 55 \right)$ used to derive a scheme of quadritangents is the intersection of the three quadrics: \begin{align*} f_{1} & = x_{1}x_{3} - x_{2}^{2} + x_{2}x_{4} - x_{2}x_{5} - x_{3}^{2} + 3x_{3}x_{4} - 2x_{4}^{2} - 4x_{5}^{2}, \\ f_{2} & = x_{1}x_{4} - x_{2}x_{3} + 2x_{2}x_{4} - 2x_{2}x_{5} - 2x_{3}^{2} + 4x_{3}x_{4} + 5x_{3}x_{5}-2x_{4}^{2} - 4x_{4}x_{5} - 3x_{5}^{2}, \\ f_{3} & = x_{1}x_{5} - 2x_{2}x_{5} - x_{3}^{2} + 2x_{3}x_{4} + x_{3}x_{5} - x_{4}^{2}. \end{align*} and the cusps on this model are: \begin{align*} & c_{0} = \left( -2 : 2 : 7 : 6 : 1 \right),\\ & c_{1} = \left( 3 : 2 : 2 : 1 : 1 \right), \\ & c_{2} = \left( 1 : 0 :0 :0 :0 \right),\\ & c_{3} = \left( 0 : 0 : 1 : 1 : 0 \right), \end{align*} as computed in \cite{OS}, where the authors show \begin{center} $C_{0}\left( 55 \right) \left( \Q \right) \cong \left( \Z / 10 \Z \right) \times \left( \Z / 20 \Z \right)$, \\ $J_{0}\left( 55 \right) \left( \Q \right) / C_{0}\left( 55 \right) \left( \Q \right) \cong 0 $ or $ \left( \Z / 2 \Z \right)$ or $ \left( \Z / 2 \Z \right)^{2}$. \end{center} Therefore to prove $J_{0}\left( 55 \right) \left( \Q \right) = C_{0}\left( 55 \right) \left( \Q \right)$, it is sufficient to show \begin{center} $J_{0}\left( 55 \right) \left( \Q \right) \left[ 2 \right] = C_{0}\left( 55 \right) \left( \Q \right) \left[ 2 \right]$. \end{center} Let $K$ be the number field defined by \begin{center} $f = u^{12} - 5u^{11} + 13u^{10} - 25u^9 + 39u^8 - 50u^7 + 53u^6 - 48u^5 + 37u^4 - 23u^3 + 12u^2 -4u + 1.$ \end{center} Using the method described in Sections \ref{Sch3}, \ref{apr2} and \ref{CA} we find 18 quadritangent planes defined over $K$. The equations for these can be found in the \texttt{GitHub} directory stated at the beginning of this section. Let $H$ be the subgroup of $2$-torsion points obtained from the divisors ff the ratios of the equations defining these $18$ quadritangents. The ideal $\mathfrak{p} = \langle 47, 36 + \theta \rangle $, where $\theta$ is a generator of $K$, is prime of norm $47$, and the map induced by reduction modulo $\mathfrak{p}$ \begin{center} $r_{\mathfrak{p}} : J_{0}\left( 55 \right) \left( K \right) \longrightarrow J_{0}\left( 55 \right) \left( \mathbb{F}_{\mathfrak{p}} \right) $ \end{center} is an injection on the torsion subgroup of $ J_{0}\left( 55 \right) \left( K \right) $. Using \texttt{Magma}, we find \begin{center} $J_{0}\left( 55 \right) \left( \mathbb{F}_{\mathfrak{p}} \right) \cong \left( \Z / 2 \Z \right)^{5} \times \left( \Z / 20 \Z \right)^{2} \times \left( \Z / 17220 \Z \right)$, \end{center} the image of $H$ is $r_{\mathfrak{p}} \cong \left( \Z / 2 \Z \right)^{8}$, and hence \begin{center} $ J_{0}\left( 55 \right) \left( K \right) \left[ 2 \right] = H \cong \left( \Z / 2 \Z \right)^{8}.$ \end{center} Notably $K$ is not a Galois extension of $\mathbb{Q}$, as its automorphism group is a cyclic group of order $6$. The subfield of $K$ fixed by its automorphism group is $\Q \left( \sqrt{-11} \right)$, and thus $ \Q \left( \sqrt{-11} \right) \subset K $ is a Galois extension of degree $6$. Let $G = \Gal \left( \Q \left( \sqrt{-11} \right) / \Q \right)$. The $2$-torsion points of $J_{0}\left( 55 \right) \left( \Q \left( \sqrt{-11} \right) \right)$ are simply the $2$-torsion points of $J_{0}\left( 55 \right) \left( K \right)$ fixed by $G$, and we find \begin{center} $ J_{0}\left( 55 \right) \left( \Q \left( \sqrt{-11} \right) \right) \left[ 2 \right] \cong \left( \Z / 2 \Z \right)^{3}$ \end{center} The action of $G$ on the 18 quadritangants has four orbits of size $6$, $6$, $3$ and $3$. We label these as $\{ P_{1}, \ldots, P_{6} \}$, $\{ Q_{1}, \ldots, Q_{6} \}$, $\{ R_{1}, R_{2}, R_{3} \}$, $\{ R_{4}, R_{5}, R_{6} \}$, where the elements of the above sets represent the defining equations of the quadritangents in each orbit. Let \begin{align*} F_{1} & = P_{1}\times \ldots \times P_{6}, \\ F_{2} & = Q_{1}\times \ldots \times Q_{6}, \\ F_{3} &= R_{1} \times R_{2} \times R_{3}, \\ F_{4} &= R_{4} \times R_{5} \times R_{6}. \end{align*} These have coefficients belonging to $\Q \left( \sqrt{-11} \right)$. We denote by $\overline{F_{i}}$ the conjugate of $F_{i}$ by the non-trivial element of $ \tilde{G} = \Gal \left( \mathbb{Q}\left( \sqrt{-11} \ \right) / \mathbb{Q} \right)$, and let \begin{center} $D_{i} = \left[ \frac{1}{2} \text{div} \left( \frac{F_{i+1}}{F_{1}} \right) \right] $ for $ i = 1, 2,3$ \\ $\overline{D_{j}} = \left[ \frac{1}{2} \text{div}\left( \frac{\overline{F_{j}}}{F_{1}} \right) \right] $ for $j = 1,2, 3, 4$. \end{center} These yield divisor classes that are $2$-torsion and the Galois action on these is clear.Using the cusps stated above and the map $r_{\mathfrak{p}}$ we can find generators of the $2$-torsion part of the cuspidal subgroup \begin{center} $C_{0}\left( 55 \right) \left( \Q \right) \left[ 2 \right] = \left( \mathbb{Z} /2 \mathbb{Z} \right) C_{1} + \left( \mathbb{Z} /2 \mathbb{Z} \right) C_{2} $, \end{center} where $C_{1} = \left[-5c_{2} + 5c_{3}\right] $ and $C_{2} = \left[ -10c_{1} + 10c_{2} \right] $. Let $\Tilde{H}$ be the subgroup of $J_{0}\left( 55 \right) \left( \Q \left( \sqrt{-11} \right) \right) \left[ 2 \right]$ generated by $C_{1}$, $C_{2}$, $D_{i}$ with $i = 1, 2, 3$ and $\Bar{D_{j}}$ with $j= 1, 2, 3, 4$. The map $r_{\mathfrak{p}} $ is an injection when restricted to $\Tilde{H}$, and we use it to show $\Tilde{H} \cong \left( \Z / 2 \Z \right)^{3}$, and hence $\Tilde{H} = J_{0}\left( 55 \right) \left( \Q \left( \sqrt{-11} \right) \right) \left[ 2 \right]$. Taking Galois invariants we obtain $ J_{0}\left( 55 \right) \left( \Q \right) \left[ 2 \right] \cong \left( \Z / 2 \Z \right)^{2}$, and hence \begin{center} $J_{0}\left( 55 \right) \left( \mathbb{Q} \right) \left[ 2 \right] = C_{0}\left( 55 \right) \left( \Q \right) \left[ 2 \right]\left( \mathbb{Z} /2 \mathbb{Z} \right)C_{1} + \left( \mathbb{Z} /2 \mathbb{Z} \right)C_{2} $. \end{center} This completes our calculation. \begin{rem} This is the only curve for which we were unable to calculate the whole $2$-torsion subgroup $J_{0}\left( 55 \right) \left( \overline{ \mathbb{Q}} \right) \left[ 2 \right] \ \cong \ \left( \mathbb{Z} /2 \mathbb{Z} \right)^{10} $ . It is probable that the $2$-torsion subgroup is defined over the degree $24$ number field defined by \begin{center} $ f \left( u \right) = 888358082666609u^{24} - 686137237735072u^{23} + 965478109129036u^{22} - 108753611253152u^{21} + 1046333329183210u^{20} - 462274837855648u^{19} + 986194062109068u^{18} - 264174312478816u^{17} + 521023423262647u^{16} - 224217460467776u^{15} + 265604493047384u^{14} - 67790597560640u^{13} + 44563612667308u^{12} + 4777480913088u^{11} + 1939479463608u^{10} + 3337865504320u^{9} + 104055137263u^{8} + 362477031136u^{7} + 105446733532u^{6} + 3486289120u^5 + 14677281802u^4 + 40615136u^3 + 680431932u^2 - 24640480u + 1394761 $ \end{center} All the quadritangents found are defined over this number field. The field has few prime ideals of small norm making our computations impractical. \end{rem} \bibliographystyle{abbrv} \bibliography{ref} \end{document}
2205.12930v2
http://arxiv.org/abs/2205.12930v2
Kinetic Schauder estimates with time-irregular coefficients and uniqueness for the Landau equation
\documentclass{amsart} \usepackage[utf8x]{inputenc} \usepackage{amsmath, amsthm, amssymb} \usepackage[usenames,dvipsnames,svgnames,table]{xcolor} \usepackage[margin=1.3in]{geometry} \usepackage{enumerate} \usepackage{dsfont} \usepackage{cleveref} \usepackage{cite} \usepackage{mathtools} \usepackage[normalem]{ulem} \usepackage{comment} \usepackage{autonum} \Crefname{Assumption}{Assumption}{Assumptions} \Crefname{Theorem}{Theorem}{Theorems} \Crefname{Lemma}{Lemma}{Lemmas} \Crefname{Corollary}{Corollary}{Corollaries} \Crefname{Proposition}{Proposition}{Propositions} \Crefname{Theorem}{Theorem}{Theorems} \Crefname{Conjecture}{Conjecture}{Conjectures} \Crefname{Remark}{Remark}{Remarks} \newtheorem{Theorem}{Theorem}[section] \newtheorem{Proposition}[Theorem]{Proposition} \newtheorem{Lemma}[Theorem]{Lemma} \newtheorem{definition}[Theorem]{Definition} \newtheorem*{Remark}{Remark} \newtheorem{corollary}[Theorem]{Corollary} \newtheorem{conjecture}[Theorem]{Conjecture} \newcommand{\E}{\mathbb E} \newcommand{\N}{\mathbb N} \newcommand{\PP}{\mathbb P} \newcommand{\R}{\mathbb R} \newcommand{\T}{\mathbb T} \newcommand{\Z}{\mathbb Z} \newcommand{\cW}{W} \newcommand{\eps}{\varepsilon} \newcommand{\e}{\eps} \newcommand{\sgn}{\mbox{sgn}} \newcommand{\He}{H_\eps} \newcommand{\vp}{\varphi} \newcommand{\sech}{\mbox{sech}} \newcommand{\dd}{\, \mathrm{d}} \newcommand{\dst}{\mbox{dist}} \newcommand{\osc}{\mbox{osc}} \newcommand{\spt}{\mbox{spt}} \DeclareMathOperator{\Tr}{tr} \newcommand{\tr}{\Tr} \DeclareMathOperator{\id}{Id} \newcommand{\cS}{\mathcal{S}} \newcommand{\1}{\mathds{1}} \newcommand{\vv}{\langle v\rangle} \newcommand{\vvp}{\langle v'\rangle} \newcommand{\vvO}{\langle v_0\rangle} \newcommand{\ww}{\langle w \rangle} \newcommand{\ul}{\rm ul} \newcommand{\vve}{\langle v_\e\rangle} \newcommand{\ec}{{\eps_{\rm cont}}} \newcommand{\dc}{{\delta_{\rm cont}}} \newcommand{\Ckin}{C_{\rm kin}} \newcommand{\les}{\lesssim} \newcommand{\kin}{\text{kin}} \newcommand{\loc}{\text{loc}} \newcommand{\calpha}{C_{x}^{\alpha/3}C_{v}^{\alpha}} \newcommand{\calphamu}{C_{x}^{\mu\alpha/3}C_{v}^{\mu\alpha}} \newcommand{\logalpha}{C_x^{\alpha/3} \log(1/C_v)^{-\theta}} \newcommand{\logalphamu}{C_x^{\mu\alpha/3} \log(1/C_v)^{-\mu\theta}} \newcommand{\logalphamup}{C_x^{\mu'\alpha/3} \log(1/C_v)^{-\mu'\theta}} \newcommand{\logalphamupp}{C_x^{\mu''\alpha/3} \log(1/C_v)^{-\mu''\theta}} \newcommand{\logvalpha}{\log(1/C_v)^{-\theta}} \newcommand{\newcalpha}{C_{v}^{2\alpha/3}C_{x}^{2\alpha/9}} \newcommand{\vvo}{\langle v_0 \rangle} \newcommand{\cP}{\mathcal P} \renewcommand{\epsilon}{\eps} \newcommand{\Ckfp}{C_{\rm kfp}} \newcommand{\cSro}{\mathcal{S}_{z_0,r_0}} \newcommand{\CH}{\textcolor{red}} \newcommand{\WW}{\textcolor{blue}} \DeclareMathOperator{\dist}{dist} \DeclareMathOperator{\Id}{Id} \DeclareMathOperator{\supp}{supp} \DeclareMathOperator{\argmin}{argmin} \DeclareMathOperator{\essinf}{essinf} \def\comma{ {\rm ,\qquad{}} } \newcommand{\be}{\begin{equation}} \newcommand{\ee}{\end{equation}} \newcommand{\ns}{{\rm ns}} \newcommand{\s}{{\rm s}} \numberwithin{equation}{section} \title[Time-irregular Schauder estimates and the Landau equation]{Kinetic Schauder estimates with time-irregular coefficients and uniqueness for the Landau equation} \author{Christopher Henderson} \address{Department of Mathematics, University of Arizona, Tucson, AZ 85721} \email{[email protected]} \author{Weinan Wang} \address{Department of Mathematics, University of Arizona, Tucson, AZ 85721} \email{[email protected]} \begin{document} \begin{abstract} We prove a Schauder estimate for kinetic Fokker-Planck equations that requires only H\"older regularity in space and velocity but not in time. As an application, we deduce a weak-strong uniqueness result of classical solutions to the spatially inhomogeneous Landau equation beginning from initial data having H\"older regularity in $x$ and only a logarithmic modulus of continuity in $v$. This replaces an earlier result requiring H\"older continuity in both variables. \end{abstract} \maketitle \section{Introduction} This paper is concerned with the regularity of kinetic Fokker-Planck type equations of the form \be\label{e.kfp_gen} (\partial_t + v\cdot\nabla_x) f = \tr(\bar a D^2_v f) + \bar b \cdot \nabla_v f + \bar c f + g \qquad\text{ in } (0,T)\times \R^d \times \R^d \ee and the applications of this regularity theory to the Landau equation, which, roughly, is a fundamental model from gas dynamics for the evolution of a density of colliding particles~\cite{villani2002review, mouhot2018review}. Interest in the regularity of equations of the form~\eqref{e.kfp_gen} dates back to Kolmogorov~\cite{Kolmogorov1934}, who studied it with the choices $\bar a = \Id$, $\bar b = v$, and $\bar c = d$. Kolmogorov explicitly computed the fundamental solution, which readily yields smoothing\footnote{It does not seem to be explicitly stated in~\cite{Kolmogorov1934} that Kolmogorov noticed the smoothing effect. As a result, it is not clear when this ``hypoelliptic'' behavior was first identified in the simple setting Kolmogorov considered.} of $f$ in all variables despite only being elliptic in the $v$-variable. We note two other computations of the fundamental solution in more general settings by Il'in~\cite{Ilin} and Weber~\cite{Weber}. Eventually the observation that, in the setting of~\eqref{e.kfp_gen}, regularity in $v$ transfers to regularity in $x$ due to the transport operator $\partial_t + v\cdot\nabla_x$ led to H\"ormander's development of the theory of hypoellipticity~\cite{Hormander}. Over the past few decades, a robust understanding of the role of the transport operator $\partial_t + v\cdot\nabla_x$ in regularity theory has been developed in the setting of Sobolev spaces. The literature is truly vast, so we only cite a few prominent examples~\cite{villani_hypo,GLPS,Bouchut}; although, we encourage the reader to explore the references therein and the work that developed as a result. More recently, there has been interest in precise quantitative estimates of regularity of solutions to kinetic equations in analogy with the regularity theory for parabolic equations. In particular, the interest has been in the development of estimates in {\em continuity spaces}, such as H\"older spaces. A suitable Harnack inequality has been proven~\cite{GuerandMouhot, GIMV, Wang-Zhang, Wang-Zhang-2, Zhu2021}, which yields the H\"older regularity of solutions to {\em divergence form} kinetic Fokker-Planck equations when the coefficients are merely bounded and elliptic-in-$v$ (note that~\eqref{e.kfp_gen} is in {\em non-divergence form}). A Harnack inequality for non-divergence form kinetic operators remains elusive~\cite{SilvestreOpen}. Additionally, Schauder estimates have been deduced and applied to various kinetic models~\cite{ImbertMouhot-toy, Manfredini, Polidoro, HS, bramanti2007schauder} (see also~\cite{Hao-2020, imbert2018schauder,ImbertSilvestre_survey} for estimates in the kinetic integro-differential setting). These estimates yield bounds on higher H\"older regularity of the solution as long as the coefficients $\bar a$, $\bar b$, and $\bar c$ are H\"older continuous in all variables. Our interest here is to investigate the minimal assumptions on the coefficients in~\eqref{e.kfp_gen} for proving the Schauder estimates. As we detail below, this is inspired by the connection between this question and the conditions needed to prove uniqueness of solutions to the Landau equation. Indeed, despite being nonlinear, the coefficients of the highest order terms in the Landau equation enjoy {\em better} regularity in $v$ than $f$ does. It is, thus, natural to hope that only regularity in $v$ is necessary to prove the Schauder estimates. We note that there are a number of related equations with similar structure to which the methods developed here may be applied: e.g., isotropic Landau~\cite{GualdaniZamponi-Isotropic-Landau,Isotropic-Landau, Gualdani-Guillen,Gualdani-review}, the Imbert-Mouhot toy model~\cite{Anceschi-Zhu,ImbertMouhot-toy}, and the Vlasov-Poisson-Landau equation~\cite{Guo-VPL}. For parabolic equations, minimal assumptions for the Schauder estimates similar to those considered here were first investigated by Brandt~\cite{Brandt}, who showed that H\"older regularity in $t$ is not necessary to establish partial Schauder estimates. More precisely, one needs only have boundedness in $t$ and H\"older continuity in the spatial variables $x$ in order to show that $D^2_x f$ is H\"older continuous in $x$. Knerr~\cite{Knerr} later strengthened this to deduce time regularity of $f$ under the same assumptions. These two papers are the main inspiration for the present manuscript. Their strategies are based on the comparison principle and are quite different from that used here, as we detail below. There has been a large body of literature on this over the ensuing decades, see, e.g.,~\cite{DongJinZhang, DongSeick, Lieberman, SinestrarivonWahl}. In this paper, we show that H\"older regularity in the time variable $t$ is not necessary to establish partial Schauder estimates for kinetic Fokker-Planck equations. As an application of this, we deduce a weak-strong uniqueness result for classical solutions of the Landau equation starting from initial data that is $C^\alpha$ in $x$ and has a logarithmic modulus of continuity in $v$. This improves upon an earlier uniqueness result in which H\"older regularity was required in both variables, and it indicates that the role of regularity in the uniqueness theory may be more technical than fundamental (although probably not nonexistent). Below, we expand on this in detail and formalize a conjecture on less restrictive assumptions for uniqueness to hold. We now make our main results more precise. \subsection{Schauder estimates} For simplicity, we consider the slightly less general equation \be\label{e.kfp} (\partial_t + v\cdot\nabla_x) f = \tr(\bar a(t,x,v) D^2_v f) + \bar c(t,x,v) f + g(t,x,v). \ee We note, however, that this essentially does not lose any generality. This is discussed after the statement of the main result of this section \Cref{t.Schauder}. We assume that $\bar a$ is uniformly elliptic and $\bar c$ and $g$ are bounded: there is $\Lambda>1$ such that \be\label{e.ellipticity} \frac{1}{\Lambda} \Id \leq \bar a(t,x,v) \leq \Lambda \Id \qquad\text{and}\qquad |\bar c(t,x,v)|, |g(t,x,v)| \leq \Lambda. \ee We also assume that $\bar a$, $\bar c$, and $g$ are H\"older continuous in $(x,v)$: $\bar a, \bar c, g \in \calpha$. The notation for this H\"older space is defined in \Cref{s.notation}. Our first result is a general Schauder estimate that does not require the $t$-regularity of the coefficients. Its proof is found in \Cref{s.Schauder}. \begin{Theorem}\label{t.Schauder} Fix $\alpha \in (0,1)$. Assume that~\eqref{e.ellipticity} holds and $f, D^2_v f, \bar a, \bar c, g \in \calpha(Q_1)$. Then \begin{equation}\label{e.w01151} \begin{split} [f]_{C_x^{(2+\alpha)/3}(Q_{1/2})} + [D^2_v f]_{C_x^{\alpha/3}C^\alpha_v(Q_{1/2})} \lesssim &\left(1+ [c]_{\calpha(Q_1)} + [a]_{\calpha(Q_1)}^{1+ \frac{2}{\alpha}}\right) \|f\|_{L^\infty(Q_1)} \\& + \left(1 + [a]_{\calpha(Q_1)}\right) [g]_{\calpha(Q_1)}. \end{split} \end{equation} The implied constant depends only on $d$, $\alpha$, and $\Lambda$. \end{Theorem} We note that a simple consequence of \Cref{t.Schauder} and~\eqref{e.kfp} is that \be\label{e.c52408} \begin{split} [(\partial_t + v\cdot\nabla_x) f]_{\calpha(Q_{1/2})} \lesssim &\left(1+ [c]_{\calpha(Q_1)} + [a]_{\calpha(Q_1)}^{1+ \frac{2}{\alpha}}\right) \|f\|_{L^\infty(Q_1)} \\& + \left(1 + [a]_{\calpha(Q_1)}\right) [g]_{\calpha(Q_1)}. \end{split} \ee Before commenting on the proof, we note that regularity in $t$ can easily be obtained at this point by two different methods. The first is the hypoelliptic approach of \cite[Lemma~2.8]{ImbertMouhot-toy}. The technique of the authors shows that shifts in $t$ decompose into a shifts in $v$ as well as shifts in transport (roughly, shifts according to the operator $\partial_t + v\cdot\nabla_x$). The $v$-regularity is provided by \Cref{t.Schauder} and the transport regularity is provided by~\eqref{e.c52408}. The second approach is to notice that time regularity can easily be obtained in the course of establishing \Cref{t.Schauder} with the same methods. We did not opt for this due to (i) the desire for simplicity, (ii) the fact that time regularity does not play a role in our application (\Cref{t.Landau}), and (iii) the fact that the hypoelliptic approach of \cite[Lemma~2.8]{ImbertMouhot-toy} yields it in a simple manner as a consequence of \Cref{t.Schauder}. Unfortunately, both approaches only provide H\"older continuity in $t$ of $f$ and do not yield regularity of $\partial_t f$. For greater regularity in $t$, it appears that one needs more regularity of the coefficients. We refer to~\cite{HS}. \subsubsection{Strategy of the proof} Our approach is along the lines of~\cite{HS}. The proof proceeds with the same main two steps of every proof of Schauder estimates -- direct estimates for a ``homogeneous'' equation and then perturbing off of this ``homogeneous'' equation using the regularity of the coefficients. The first step is slightly different from that of~\cite{HS}. For us, the relevant homogeneous equation is the one where $\bar a(t,x,v) = \bar a(t)$. Roughly, this allows us to perturb off of this case by using that, for $(x_0,v_0)$ fixed and $(x,v) \approx (x_0,v_0)$, \be |\bar a(t,x,v) - \bar a(t,x_0,v_0)| \leq (|x-x_0|^{\alpha/3} + |v-v_0|^\alpha) [\bar a]_{\calpha} \ll 1. \ee Notice that this depends only on the regularity of $\bar a$ in $(x,v)$ and not in $t$. We refer to this case, that is,~\eqref{e.kfp} when $\bar a$ does not depend on $(x,v)$ and $\bar c \equiv 0$, as the $(x,v)$-homogeneous equation. It is worth discussing the $(x,v)$-homogeneous equation further. Many proofs of the Schauder estimates for parabolic or kinetic equations (Brandt's \cite{Brandt} being a notable exception), hinge on the scaling in $t$ of moments of the fundamental solution $\Gamma_{\bar a}$, that is, integrals in $(x,v)$ of $\Gamma_{\bar a}$ with polynomial weights. To obtain the estimates here, we compute the fundamental solution $\Gamma_{\bar a}$ explicitly (see \Cref{p.gamma_a}). In~\cite{HS}, where the relevant homogeneous equation is $\bar a \equiv \Id$, it is essentially a basic calculus exercise to go from the explicit form of $\Gamma_{\Id}$ to the correct moment estimates. In our setting, however, it is more difficult and requires a somewhat involved proof based on the dynamics of some matrix valued terms (see \Cref{l.w09032}). Indeed, from \Cref{p.gamma_a} it is not even obvious that $\Gamma_{\bar a}$ is integrable in $(x,v)$. The second step, that is, the procedure of perturbing off of the homogeneous equation, proceeds as usual. \subsubsection{Estimates for~\eqref{e.kfp_gen} versus~\eqref{e.kfp}} As we mentioned above, there is essentially no loss in generality in considering~\eqref{e.kfp} in place of~\eqref{e.kfp_gen}. The reason for this is that, one can obtain~\eqref{e.kfp} from~\eqref{e.kfp_gen} by letting \be g_{\eqref{e.kfp}} = \bar b \cdot \nabla_v f + g_{\eqref{e.kfp_gen}}. \ee Here, to differentiate between the forcing term $g$ in~\eqref{e.kfp_gen} and the forcing term $g$ in~\eqref{e.kfp}, we use the equation number as the subscript. In this case, after applying \Cref{t.Schauder}, one has a $\calpha$-norm of $\nabla_v f$ on the right hand side of~\eqref{e.w01151}. By interpolating, one can ``absorb'' this lower order term into the left hand side of~\eqref{e.w01151}. While this is complicated by the different domains on which the norms are based, with $Q_{1/2}$ appearing on the left-hand side of~\eqref{e.w01151} and $Q_1$ appearing on the right-hand side of~\eqref{e.w01151}, it is generally possible to do, depending on the application. The reader will surely have noticed that the same procedure should apply to the $\bar c$ term as well, and so the simplest presentation would consider only the case of~\eqref{e.kfp} with $\bar c \equiv 0$. This is true; however, for the application we have in mind (\Cref{t.Landau}), it streamlines future computations to already have the explicit dependence on $[\bar c]_{\calpha}$. \subsubsection{Further comments on related time irregular Schauder estimates}\label{s.further_Schauder} As mentioned above, to our knowledge, the first result in this direction is due to Brandt~\cite{Brandt}, whose approach is entirely based on the comparison principle. Indeed, in a very simple short paper, Brandt establishes Schauder estimates with precise dependence on the coefficients via the construction of an upper barrier for an appropriate finite difference of the solution to the parabolic equation under consideration. Later, Knerr~\cite{Knerr} improved on the regularity obtained by Brandt by showing that, surprisingly, solutions had time regularity as well. Knerr's strategy was also based on the comparison principle. Unfortunately, despite~\eqref{e.kfp} enjoying a comparison principle, we were unable to adapt Brandt's strategy to the kinetic case. We give a heuristic description of the obstruction. One expects ${\partial_t + v\cdot\nabla_x}$ in the kinetic case to act analogously to $\partial_t$ in the parabolic case. A major difference, however, is that shifts in $t$ (the appropriate shifts related to $\partial_t$ regularity) have a directionality: time is one dimensional and there is a preferred direction, often called the ``arrow of time.'' Unfortunately, it is not clear what the analogue to this is in the kinetic setting with the operator $\partial_t + v\cdot\nabla_x$. Very roughly, this is the roadblock to adapting Brandt's argument. We note that this seems to be related to the impediment to proving a Harnack inequality for~\eqref{e.kfp_gen} using the methods of Krylov and Safonov; see \cite[Section~8.2]{SilvestreOpen} for further discussion. A few days prior to posting this manuscript to the arxiv, another very interesting paper was also posted by Biagi and Bramanti~\cite{BiagiBramanti} that investigates a similar problem to \Cref{t.Schauder}. The authors consider ultraparabolic equations, a general class of equations that includes kinetic equations as a particular example, and they prove a Schauder estimate for time irregular coefficients. Their proof proceeds along the same lines as ours; that is, they derive an explicit formula for the fundamental solution and use it to deduce the Schauder estimates. Their paper is focused entirely on the question of Schauder estimates of the form \Cref{t.Schauder} for a general family of ultraparabolic equations, and, as such, they do not consider applications of their theorem, as we do in \Cref{s.Landau_intro}. Their work builds upon an earlier work of Bramanti and Polidoro~\cite{BramantiPolidoro} in which the fundamental solution of a class of ultaparabolic operators was studied in depth. In particular, the authors construct it and establish that is has the appropriate regularity and Gaussian bounds. We also mention connections to the other very recent preprint by Lucertini, Pagliarani, and Pascucci~\cite{LPP} in which the authors deduce optimal bounds on the higher regularity of the fundamental solution. The estimates in~\cite{LPP} are strong enough to replace \Cref{l.w09031} in our proof of \Cref{t.Schauder}. It seems likely that one could establish \Cref{t.Schauder} via an alternative approach to the Schauder estimates using the results of~\cite{LPP} directly. \subsection{The Landau equation} \label{s.Landau_intro} The Landau equation has the form: \be\label{e.Landau} (\partial_t + v\cdot\nabla_x)f = \tr(\bar a^f D^2_v f) + \bar c^f f \qquad\text{ in } (0,T)\times \R^3\times\R^3, \ee where, for any function $h: \R^3 \to \R$, \be\label{e.coefficients} \begin{split} &\bar a^h(t,x,v) = a_\gamma \int_{\R^3} \left(\Id - \frac{w\otimes w}{|w|^2}\right) |w|^{2+\gamma} h(v-w) \, dw \\& \bar c^h(t,x,v) = \begin{cases} c_\gamma \int_{\R^3} |w|^\gamma h(v-w) \, dw \qquad&\text{ for } \gamma > -3,\\ c_\gamma h \qquad&\text{ for } \gamma = -3. \end{cases} \end{split} \ee Here, $a_\gamma$ and $c_\gamma$ are positive constants whose exact value plays no role in the analysis and $\gamma \in [-3,0)$. The physically relevant case is $\gamma=-3$. We note that the regime $\gamma < 0$ is often called the {\em soft potentials} case and that the case $\gamma \in [0,1]$ is considered in many works, but we do not address it here. We also note that~\eqref{e.Landau} is more often written in an equivalent divergence form, although that is not convenient for our work below. We refer the reader to~\cite{villani2002review, mouhot2018review} for a general discussion of the Landau equation, its physical relevance, and its mathematical history. We mention only that~\eqref{e.Landau} is nonlocal (that is, its coefficients at a point $(t,x,v)$ depend on the $f$ at other points $(t,x,v')$) and quasilinear (that is, the coefficient of the highest order term $\bar a^f$ depends on $f$). As a result, the unconditional global well-posedness of classical solutions to~\eqref{e.Landau} is an extremely difficult problem that appears to be out of reach for the time being. A new approach to this problem was initiated by Silvestre~\cite{Silvestre_Landau}, who proposed to study~\eqref{e.Landau} with methods coming from parabolic regularity theory under certain physically reasonable boundedness assumptions on the mass, energy, and entropy densities (see~\cite{ImbertSilvestre_survey} for a discussion of a similar program for the related Boltzmann equation). We do not discuss these assumptions further, and we refer to this program as the {\em conditional regularity} program in the sequel. These ideas have led to many new results, see, e.g.~\cite{GIMV, Cameron-Silvestre-Snelson, HS, ImbertMouhot-toy}. The most relevant work to the present setting coming out of this program is~\cite{HST2019rough} that leveraged the ideas and theorems of the previous works~\cite{GIMV, Cameron-Silvestre-Snelson, HS} to obtain local well-posedness with fairly ``rough'' initial data. In particular, the existence result in~\cite{HST2019rough} is in a weighted $L^\infty$ space, while the uniqueness result supposes, additionally, that the initial data is $C^\alpha$ for some $\alpha>0$. One of the key insights used in the conditional regularity program is that, as previously mentioned, $\bar a^f$ enjoys better regularity in $v$ than $f$ does. Indeed, $\bar a^f\in C^\alpha_v$ for any $\alpha \in (0,1)$ as long as $f$ is bounded and decays sufficiently quickly in $v$. The gap between the existence and uniqueness results of of~\cite{HST2019rough}, described above, partially reflects the fact that the authors were not able to leverage this insight. Our next result, a new uniqueness result for the Landau equation~\eqref{e.Landau}, provides a path in this direction. \subsubsection{Uniqueness for the Landau equation} We require the following non-degeneracy condition on $f_{\rm in}$: there exist $r$, $\delta$, and $R>0$ so that \be\label{e.nondegeneracy} \text{for every } x\in \R^3, \text{ there is } v_x \in \R^3 \text{ such that } f_{\rm in}(x, \cdot) \geq \delta \1_{B_r(x,v_x)}. \ee The reason for~\eqref{e.nondegeneracy} is that, from it, one can obtain a pointwise lower bound for $f$. This, in turn, yields the local-in-$v$ uniform ellipticity of $\bar a^f$. This was originally shown in \cite[Theorem~1.3]{HST2018landau}; see also \cite[Lemma~2.5]{HST2019rough} for the connection between the lower bound on $f$ and the ellipticity of $\bar a^f$. We are now ready to state the second main result. It is proved in \Cref{s.Landau}. \begin{Theorem}\label{t.Landau} Fix $k, \theta > 0$ and $\alpha \in (0,1)$. Assume that $f_{\text{in}} \in \logalpha \cap L^{\infty,k}$ and satisfies~\eqref{e.nondegeneracy}. Let $f \in L^{\infty,k}([0,T]\times \R^6)$ be any solution of~\eqref{e.Landau} constructed in \cite[Theorem 1.2]{HST2019rough} starting from initial data $f_{\rm in}$. Fix any uniformly continuous function $g \in L^{\infty, 5+\gamma+ \eta}([0,T]\times\R^6)$, where $\eta>0$, such that $g$ solves equation \eqref{e.Landau} weakly (in the sense of~\cite{HST2019rough}) and $g(t,x,v)\rightarrow f_{\text{in}}$ as $t\searrow 0$. Then, if $k$ is sufficiently large, depending on $\theta$, $\alpha$, and $\gamma$, and \be \frac{\theta}{2} \frac{\alpha}{2+\alpha} > 1, \ee there is $T_1 \in (0,T]$, depending only on $f_{\rm in}$, $\alpha$, $\theta$, $k$ and $\gamma$, such that $f=g$ in $[0,T_1]\times \R^6$. If $k = \infty$ then $T_1 = T$. \end{Theorem} We note that the nonstandard continuity space $\log(1/C_v)^{-\theta}$ is defined in \Cref{s.notation} below, as are all of the notational conventions we use. We also note that the particular type of weak solution plays almost no explicit role in our analysis since we immediately deduce various regularity properties of $g$ from previous results. Hence, our choice of weak solution is made simply so that it is compatible with the previous results in~\cite{GIMV, HS}. Roughly, though, $g$ is in an appropriate kinetic $H^1$-space and solves~\eqref{e.Landau} in the sense of integration against other kinetic $H^1$ test functions with compact support. \subsubsection{The strategy of the proof} We give a rough outline of the uniqueness argument used to prove \Cref{t.Landau}. For simplicity, we ignore all complications due to ``weights'' in this discussion, although these are required in the proof due to the fact that $\bar a^f$ is only defined when $f$ decays sufficiently quickly as $|v|\to\infty$. The proof follows the standard outline -- find an equation for the difference $f-g$ and use a Gronwall-type argument. This, however, is complicated by the fact that~\eqref{e.Landau} is nonlocal and quasilinear, that is, the highest order coefficient is nonlinear in $f$. As a result, we require an $L^\infty$ bound on $D^2_v f$. We obtain such a $W^{2,\infty}_v$-bound by applying a scaled version of \Cref{t.Schauder}. Were we to only assume that $f \in L^\infty$, such an estimate would degenerate like $1/t$ as $t\to0$ (recall that $f_{\rm in}\notin W_v^{2,\infty}$). This can be seen easily by scaling arguments. However, by propagating forward bounds on the $\logalpha$-norm, we are able to, via interpolation, obtain a bound that degenerates like \be \|D^2_v f(t)\|_{L^\infty(\R^6)} \lesssim \frac{1}{t \left( \log\frac{1}{t}\right)^{\frac{\theta}{2}{\frac{\alpha}{2+\alpha}}}}. \ee Crucially, this is integrable in $t$ near $t=0$, which allows the Gronwall argument to close. One key step above, scaling the Schauder estimates, was developed in~\cite{HS}. The other key step above, in which we propagate the $\logalpha$-norm, relies on the general ideas of \cite[Proposition~4.4]{HST2019rough}, in which the $\calpha$-norm was propagated. It is, however, significantly more complicated in our case. The reason being that, while $\bar a^f$ is $v$-H\"older continuous, regardless of the regularity of $f$, $\bar c^f$ does not enjoy this property. In particular, when $\gamma = -3$, which is the physically relevant case, $\bar c^f = c_\gamma f$. As such, it is exactly as irregular as $f$. Roughly, we overcome this by obtaining a bound on $\|D^2_v f(t_0)\|_{L^\infty(\R^6)}$ that depends on the $\logalpha$-norm of $f$ as well as on $\|D^2_v f\|_{L^\infty([t_0/4,t_0]\times \R^6)}$. The appearance of this second term is exactly due to the (potential) irregularity of $\bar c^f$. In this bound, the coefficient of $\|D^2_v f\|_{L^\infty([t_0/4,t_0]\times \R^6)}$ is small. Hence, by a careful argument, we are able to absorb it back into the $W_v^{2,\infty}$-term at $t_0$, despite the difference in time domains. This step is contained in \Cref{p.w02091}. The reason \Cref{t.Schauder} is useful in this application is the following. In \cite{HST2019rough}, $(x,v)$-H\"older regularity of $f$ is propagated from initial $(x,v)$-H\"older regularity. An additional argument shows that regularity is passed to $t$ as well. Stated imprecisely, if $f \in \calpha$ then $f \in C^{\alpha/2}_t\calpha$. The coefficients are then H\"older regular and the full Schauder estimates can be applied. As we only have $\logalpha$-regularity of $f$, the only regularity that could potential be passed to $t$ is that with a $\log$ modulus. At best, then, $\bar a^f$ will be H\"older in $(x,v)$, due to the H\"older regularity of $f$ in $x$ and the fact that $\bar a^f$ is defined by convolution with a ``nice'' kernel in $v$, but with only a $\log$ modulus of continuity in $t$. Thus, the full Schauder estimates could not be applied. We point out a subtle additional benefit to the application of \Cref{t.Schauder} in place of the Schauder estimates of~\cite{HS}. When the estimates of~\cite{HS} are applied in~\cite{HST2019rough}, there is a loss of regularity between $\bar a^f$ and $f$ due to how time shifts interact with the appropriate notion of `kinetic distance\footnote{We have largely avoided discussing the kinetic distance since it plays no role in our analysis. Indeed, without shifts in time, which we need not consider due to our not considering time regularity, the kinetic distance collapses to $\calpha$. We point the interested reader to a clear discussion of the kinetic distance in~\cite[Section~2.1]{imbert2018schauder}.}'; see \cite[Lemma~2.7]{HST2019rough}. This is avoided here due to our not considering time shifts. As such, we achieve {\em sharper estimates} on the various quantities, such as $\|D^2_v f(t)\|_{L^\infty(\R^6)},$ as $t\searrow0$. \subsubsection{Related work} As mentioned above, \Cref{t.Landau} supercedes the earlier work~\cite[Theorem~1.4]{HST2019rough}, which required $(x,v)$-H\"older regularity of $f_{\rm in}$ for uniqueness the hold. We also mention the work of Anceschi and Zhu in~\cite{Anceschi-Zhu} on a similar model. To our knowledge, the local well-posedness theory of Landau is relatively unstudied, with more interest directed toward a related kinetic integro-differential equation, the Boltzmann equation. There, the first local well-posedness results are due to the AMUXY group~\cite{amuxy2010regularizing,amuxy1,amuxy2}. In particular, a general uniqueness result in an appropriate Sobolev space (of order $2s$, that is, twice the order of the differential operator in the equation) was proven in~\cite{amuxy2011uniqueness} for a restricted class of parameters. While~\cite{amuxy2011uniqueness} is an extremely nice result, we describe in slightly more detail its limitations in order to highlight the difficulties in our setting. Their result requires $H^{2s}_v$-regularity of solutions. The Landau equation essentially corresponds to the $s=1$ case. Were their result to apply, it would require $H^2_v$-regularity of $f$, which corresponds to $C^{1/2}_v$-regularity and is significantly more than we require here. Indeed, for reasons related to this, we note that uniqueness is, in some ways, more difficult for Landau than Boltzmann as the differential operator is of higher order. Additionally, their uniqueness result requires regularity of both solutions in contrast to our result that has only mild conditions on the other potential solution $g$. On the other hand, their result only requires boundedness in $x$. The close-to-equilibrium and homogeneous setting for~\eqref{e.Landau} have seen more focus. This is probably due to the fact that one is often able to establish strong results such as global well-posedness and convergence to equilibrium. The state-of-the-art, techniques, and types of questions asked in these settings are quite different from those raised in the current manuscript, so we do not go into much detail here. We simply mention a few landmark results in each case. The story in the homogeneous setting (that is, when $f$ is independent of $x$) is somewhat complicated by the functional setting one works in, but we mention the works of~\cite{FournierGuerin,Alexandre-Liao-Lin, Wu, Fournier-CMP, Desvillettes-Villani-1, Desvillettes-Villani-2}. In the close-to-equilibrium setting (that is, when $f_{\rm in}$ is ``close'' to a Maxwellian of the form $\alpha e^{-|v|^2/\beta}$ for some $\alpha,\beta>0$), we refer to~\cite{Guo-2002, Mouhot-Neumann, villani-96, DLSS}. Both settings are extremely well studied and, as a result, we are only able to reference a small selection of the work completed over the past several decades. Outside of these settings, little is known about the global well-posedness of classical solutions. To our knowledge, the conditional result of~\cite{HST2019rough}, which yields global well-posedness as long as the mass and energy densities remain bounded in $t$ and $x$ in the case $\gamma > -2$ (or, in the case of $\gamma \leq -2$, if certain $L^p$-norms remain bounded), is currently the sharpest condition ruling out ``blow-up.'' \subsection{Two conjectures} We now formulate two conjectures regarding ways in which the results above might be strengthened. First, if we trust the analogy discussed above, that $\partial_t + v\cdot\nabla_x$ in the kinetic setting is similar to $\partial_t$ in the parabolic one, we are led to the following conjecture: \begin{conjecture} Fix any $\alpha \in (0,1)$. Assume that~\eqref{e.ellipticity} holds and that $f$, $D^2_vf$, $\bar a$, $\bar c$, $g \in C_v^\alpha(Q_1)$. Then \be \begin{split} [D^2_v f]_{C^\alpha_v(Q_{1/2})} \lesssim &\left(1+ [c]_{C^\alpha_v(Q_1)} + [a]_{C^\alpha_v(Q_1)}^{1+ \frac{2}{\alpha}}\right) \|f\|_{L^\infty(Q_1)} + \left(1 + [a]_{C^\alpha_v(Q_1)}\right) [g]_{C^\alpha_v(Q_1)}. \end{split} \ee The implied constant depends only on $d$, $\alpha$, and $\Lambda$. \end{conjecture} Notice that the conjectured result above does not require any $x$-regularity. It seems that a uniqueness result for the Landau equation is an immediate consequence of this. We state this roughly here: \begin{conjecture} In the setting of \Cref{t.Landau}, although assuming only that $f_{\rm in} \in \log(1/C_v)^{-\theta} \cap L^{\infty,k}$ (that is, we drop the H\"older regularity in $x$), the same weak-strong uniqueness conclusion holds as long as \be \theta > 2. \ee \end{conjecture} It is not clear that the above conjecture, were it true, would be sharp. There is a strong connection between regularity and uniqueness results. Indeed, recent work has established the nonuniqueness of irregular (weak) solutions of fluid equations, see, e.g.,~\cite{Camillo,Buckmaster-Vicol}. We also note the work of Kiselev, Nazarov, and Shterenberg, who, in the critical case of the fractal Burgers equation studied in~\cite{KiselevNazarovShterenberg}, see a situation similar to that of the Landau equation: rough solutions immediately become smooth but uniqueness is unknown without further regularity assumptions. In fact, despite the intense interest in~\cite{KiselevNazarovShterenberg}, uniqueness of these rough solutions remains open as far as we know. On the other hand, in the homogeneous ($x$-independent) case for the Landau equation, where the Landau equation has more structure, uniqueness has been established through a probabilistic approach that yields bounds on the Wasserstein distance between two solutions~\cite{FournierGuerin,Fournier-CMP}. This result requires essentially no regularity of $f$, although it is only applicable in the homogeneous case. We expect the conjectures above to be difficult to establish for reasons related to the fundamental difference between $\partial_t$ in the parabolic setting and $\partial_t + v\cdot\nabla_x$ in the kinetic setting that were discussed in \Cref{s.further_Schauder}. \subsection{Notation and continuity spaces}\label{s.notation} \subsubsection{Points and kinetic cylinders} For succinctness, we often write \be z = (t,x,v), \quad \tilde z = (\tilde t, \tilde x, \tilde v), \quad\text{and}\quad z' = (t',x',v'). \ee For any $r>0$, we let \be Q_r = (-r^2, 0] \times B_{r^3} \times B_r, \ee where we use the convention that if the base point of a ball is not stated then it is the origin; that is \be B_r = B_r(0). \ee The reason for the choice of $Q_r$ is the natural scaling $(t,x,v) \mapsto (r^2 t, r^3 x, rv)$ associated to~\eqref{e.kfp}. \subsubsection{Continuity spaces} Throughout we work with some inhomogeneous continuity spaces, i.e., those in which different `amounts' of regularity are required in each variable. In particular, for a any set $Q \subset \R\times \R^d \times \R^d$ and parameters $\alpha_1,\alpha_2 \in (0,1]$, we let \be C^{\alpha_1}_x C^{\alpha_2}_v(Q) := \{f : Q \to \R: f\in L^\infty(Q), [f]_{C^{\alpha_1}_x C^{\alpha_2}_v(Q)} < \infty\}, \ee where \be [f]_{C^{\alpha_1}_x C^{\alpha_2}_v(Q)} := \sup_{\substack{(t,x,v) \neq (t,x',v') \in Q,\\ |x-x'|, |v-v'| < 1/2}} \frac{|f(t,x,v) - f(t',x',v')|}{|x- x'|^{\alpha_1} + |v- v'|^{\alpha_2}}. \ee Finally, for the uniqueness result for the Landau equation, we define a space of functions whose modulus of continuity is logarithmic. Indeed, for $Q \subset \R\times \R^d \times \R^d$ and parameters $\alpha \in (0,1)$ and $\theta > 0$, we let \be \logalpha(Q) := \{f : Q \to \R: f\in L^\infty(Q), [f]_{\logalpha(Q)} < \infty\}, \ee where \be\label{e.logalpha} [f]_{\logalpha(Q)} = \sup_{\substack{(t,x,v) \neq (t,x',v') \in Q,\\ |x-x'|, |v-v'| < 1/2}} \frac{|f(t,x,v) - f(t,x',v')|}{|x- x'|^{\alpha/3} + \log(1/|v- v'|)^{-\theta}}. \ee Abusing notation, we also use the $\logalpha$ notation for functions $f$ that are independent of $t$ but for which the supremum in~\eqref{e.logalpha}, without the $t$ terms, is finite. When $Q$ is not specified in the norms above, it is taken to be either $\R^6$ or $\R_+ \times \R^6$, depending on the setting. For example, if $f: \R^6 \to \R$, then we say $f\in \calpha$ to mean $f\in \calpha(\R^6)$. \subsubsection{Multi-indices} Given a multi-index $\alpha \in (\N\cup\{0\})^d$, we write \be \partial_v^\alpha = \partial_{v_1}^{\alpha_1}\cdots \partial_{v_d}^{\alpha_d}. \ee The object $\partial_x^\alpha$ is defined analogously. \subsubsection{Other notation}\label{sec:notation} Throughout the work, constants are assumed to change line-by-line and depend on various parameters such as the dimension $d$, the ellipticity constant $\Lambda$, and the regularity parameter $\alpha$. In the statement of each result, we make clear the dependencies and in its proof, we simply write $A \lesssim B$ when $A \leq C B$, where $C$ is a constant depending on those parameters. We use $A\approx B$ when $A\lesssim B$ and $B\lesssim A$. In the uniqueness result for the Landau equation, we must work with weighted spaces. To this end, we recall the Japanese bracket: for any $v \in \R^d$, \be \vv = \sqrt{1 + |v|^2}. \ee Then we define the associated weighted $L^\infty$-spaces: for any $n$, \be L^{\infty,n} := \{f : \vv^n f \in L^\infty\} \qquad\text{with norm } \|f\|_{L^{\infty,n}} := \| \langle\cdot\rangle^n f\|_{L^\infty}. \ee \section{The Schauder estimates}\label{s.Schauder} In this section, we prove our first main result \Cref{t.Schauder}, which is the Schauder estimates for~\eqref{e.kfp}. As usual, the proof proceeds in two steps. The first step is an estimate for solutions of a relevant `homogenous' equation. For us, this `homogeneous' equation is the one where the coefficients depend only on time $t$. The second step (\Cref{s.w05171}) is to bootstrap to the general case by perturbing off of this `homogeneous' equation. \subsection{The first step: Schauder estimates for the $(x,v)$-homogeneous problem} Consider the basic kinetic Fokker-Planck equation involving only transport in $x$ and diffusion in $v$ which the diffusion has a coefficient depending only on $t$: \begin{equation}\label{e.homogeneous_kfp} \begin{split} (\partial_{t} + v\cdot \nabla_{x})f = \tr(\bar a(t)D^{2}_vf) + g. \end{split} \end{equation} Our assumption on $\bar a$ is the following: there is $\Lambda\geq 1$ such that \be\label{e.homogeneous_assumptions} \bar a: \R \to \R^3\times\R^3 \text{ is measurable and } \quad \frac{1}{\Lambda} \id \leq \bar a(t) \leq \Lambda \id \quad\text{ for all $t$}. \ee We stress that $\bar a$ does not satisfy any further regularity assumptions. We begin by studying the fundamental solution of this problem; that is, the function $\Gamma_{\bar a}$ for which the solution $f$ of~\eqref{e.homogeneous_kfp} is given by \be\label{e.kernel} \begin{split} f(z) = \int_\R \int_{\R^d} \int_{\R^d} \Gamma_{\bar a}(t, x- \tilde x - (t-\tilde t)\tilde v, v-\tilde v; \tilde t)g(\tilde t,\tilde x,\tilde v)\, d\tilde z. \end{split} \ee In the simple case $\bar a \equiv \id$, it is well-known~\cite{Hormander} that $\Gamma_{\id}$ is given by \be\label{e.gamma_id} \Gamma_{\id}(t,x,v) = \begin{cases} \left(\frac{\sqrt 3}{2 \pi t^2}\right)^d \exp\Big\{ - \frac{3|x-vt/2|^2}{t^3} - \frac{|v|^2}{4t} \Big\} \qquad&\text{ if } t>0,\\ 0 \qquad&\text{ if } t\leq 0. \end{cases} \ee We point out two features: (1) integrating $\Gamma_{\id}$ in $x$ recovers the standard heat kernel, and (2) the `kinetic convolution' involved in~\eqref{e.kernel} respects the Galilean invariance induced by the transport operator. By a somewhat complicated, but nonetheless straightforward, Fourier transform-based computation we can compute the fundamental solution associated to a general $(x,v)$-independent $\bar a$. Indeed, we find the following: \begin{Proposition}\label{p.gamma_a} Under the assumption~\eqref{e.gamma_id}, solutions of~\eqref{e.homogeneous_kfp} are given by~\eqref{e.kernel} with the fundamental solution \be\label{e.gamma_a} \Gamma_{\bar a}(t,x,v;s) = \begin{cases} \frac{\exp\Big\{-\frac{v\cdot A_0(t;s)^{-1}v}{4} - (x - M(t;s) v)\cdot P(t;s)^{-1} (x - M(t;s) v) \Big\}}{(4\pi)^d\sqrt{\det{A_0(t;s)P(t;s)}}} \qquad&\text{ if } t >s,\\ 0 \qquad&\text{ if } t\leq s, \end{cases} \ee where \begin{equation}\label{e.A_i} \begin{split} &A_i(t;s)=\int_{s}^{t} (r-s)^i \bar a(r)\,dr, \qquad \text{for}~i=0, 1, 2, \\& P(t;s) = A_{2}(t;s)-A_{1}(t;s)A_0(t;s)^{-1}A_{1}(t;s), \qquad\text{and} \\& M(t;s) = (t-s)\id - A_0(t;s)^{-1} A_1(t;s). \end{split} \end{equation} \end{Proposition} \noindent We postpone the proof of \Cref{p.gamma_a} to \Cref{appendix}. It is not obvious that~\eqref{e.kernel} is well-defined from~\eqref{e.gamma_a}. Indeed, while the positive-definiteness of $A_0$ and its having the same scaling in time as the analogous term in~\eqref{e.gamma_id} are immediately obvious, the same cannot be said for $P(t)$. In fact, even the positive definiteness of $P$ is not clear. However, we need a stronger estimate than merely positive definiteness of $P$ as the crucial step in most proofs of the Schauder estimates is in understanding the scaling in $t$ of $\Gamma_{\bar a}$ and its integrals in $x$ and $v$. We now state this scaling property. Notice that it is, up to constants, the same as one would obtain using $\Gamma_{\id}$ defined in~\eqref{e.gamma_id}. Its proof is contained in~\Cref{s.kernel_scaling}. \begin{Lemma}\label{l.w09031} Let $\Gamma_{\bar a}$ be as in~\eqref{e.gamma_a}, with $\bar a$ under the assumptions given by~\eqref{e.homogeneous_assumptions}. Fix any multi-indices $\alpha, \beta \in (\N\cup\{0\})^d$, any natural number $j\geq 0$, and any real numbers $r,s \geq 0$. For $t > \tilde t$, \begin{equation}\label{e.w09161} \begin{split} \int_{\R^d}\int_{\R^d} \max_{(0,\xi_2,\xi_3) \in Q_{(t-\tilde t)/2}} |\partial^{j}_{t}\partial_{x}^{\beta}\partial_{v}^{\alpha}\Gamma_{\bar a}(t,x+\xi_2,v+\xi_3; \tilde t)|x|^{r}|v|^{s}\,dxdv \lesssim (t-\tilde t)^{-\frac{2j+|\alpha|+3|\beta|}{2}+\frac{3r+s}{2}} . \end{split} \end{equation} \end{Lemma} Using this estimate, \Cref{l.w09031}, we are now able to establish the main result in the $(x,v)$-homogeneous setting that will be the basis of the main Schauder estimate. \begin{Proposition}\label{l.w12041} Fix $\alpha \in (0,1)$. Suppose that $f$, $(\partial_t + v\cdot\nabla_x) f$, $D^2_v f$, and $g \in C_x^{\alpha/3}C_v^{\alpha}(Q_1)$. Assume that $f$ and $g$ have compact support in $Q_1$ and satisfy~\eqref{e.homogeneous_kfp} with coefficient $\bar a$ that satisfies~\eqref{e.homogeneous_assumptions}. Then \be [f]_{C_x^{(2+\alpha)/3}(Q_1)} + [D_{v}^{2}f]_{\calpha(Q_1)} \lesssim [g]_{C^{\alpha/3}_xC^\alpha_v(Q_1)}, \ee where the implied constants depend only on $\alpha$, $\Lambda$, and the dimension $d$. \end{Proposition} \begin{proof} We begin by estimating $[D_{v}^{2}f]_{\calpha(Q_1)}$. Recalling~\eqref{e.kernel}, for any $z=(t,x,v) \in Q_{1}$ and any $1\leq i, j\leq d$, \be \begin{split} \partial_{v_{i}v_{j}}f(z) &= \int_{-1}^{t} \int_{\R^d}\int_{\R^d} \partial_{v_{i}v_{j}}\Gamma_{\bar a}(t, x-\tilde x - (t-\tilde t)\tilde v, v - \tilde v; \tilde t)g(\tilde z)\,d\tilde z . \end{split} \ee Fix another point $z'\in Q_1$ of the form \be \notag z' = (t, x', v'). \ee Notice that $z$ and $z'$ have the same $t$-coordinate. This is due to the fact that we do not prove any regularity of $D^2_v f$ in $t$. Let \be \begin{split}\notag &h = |x-x'|^{1/3} + |v-v'| \quad\text{ and } \\ &\delta g(\tilde z) = g(t - \tilde t, x - \tilde x - \tilde t(v-\tilde v), v - \tilde v) - g(t - \tilde t, x' - \tilde x - \tilde t(v' - \tilde v), v' - \tilde v). \end{split} \ee Then, after making the change of variables \be \tilde z \mapsto (t-\tilde t, x - \tilde x - (t-\tilde t)\tilde v, v - \tilde v), \ee we find \begin{equation}\notag \begin{split} \partial_{v_{i}v_{j}}f(z)-\partial_{v_{i}v_{j}}f(z') &= \left(\int_{0}^{2h^2}+\int_{2h^2}^{1+t}\right) \int_{\R^d}\int_{\R^d} \partial_{v_{i}v_{j}}\Gamma_{\bar a}(t, \tilde x, \tilde v; t-\tilde t)\delta g(\tilde z)\, d\tilde z \\&= I_1 +I_2. \end{split} \end{equation} We now estimate each of $I_1$ and $I_2$ in turn. \medskip {\bf Estimating $I_1$:} Here, integrating $\partial_{v_iv_j}\Gamma_{\bar a}$ over $(\tilde x, \tilde v)$ leaves us with an $O(1/\tilde t)$ term. This means that our approach needs to use the regularity of $g$ to obtain $\tilde t$-terms, either directly or via \Cref{l.w09031}. Using the regularity of $\partial_{v_iv_j}\Gamma_{\bar a}$ will only exacerbate this issue, so we do not use it, but obtain extra smallness instead by working on a small interval $[0,2h^2]$. To this end, we smuggle in a new term. Setting $\tilde z_{0}=(\tilde t, \tilde x, 0)$, we see that \be\notag \int_0^{2h^2} \int_{\R^d}\int_{\R^d} \partial_{v_iv_j} \Gamma_{\bar a}(t, \tilde x, \tilde v; t- \tilde t) \delta g(\tilde z_0)\, d\tilde z = 0. \ee Hence, we obtain \be\notag |I_1| = \Big| \int_{0}^{2h^2} \int_{\R^d}\int_{\R^d} \partial_{v_{i}v_{j}}\Gamma_{\bar a}(t, \tilde x, \tilde v; t- \tilde t)(\delta g(\tilde z)-\delta g(\tilde z_{0}))\,d\tilde z \Big|. \ee Next, we point out that \be\label{e.c032901} \begin{split} |\delta g(\tilde z) - \delta g(\tilde z_0)| &= \big| \left( g(t-\tilde t, x - \tilde x - \tilde t(v - \tilde v), v - \tilde v) - g(t-\tilde t, x' - \tilde x - \tilde t(v' - \tilde v), v' - \tilde v) \right) \\&\qquad - \left( g(t-\tilde t, x - \tilde x - \tilde t v , v) - g(t-\tilde t, x' - \tilde x - \tilde tv', v') \right)\big| \\& = \big| \left( g(t-\tilde t, x - \tilde x - \tilde t(v - \tilde v), v - \tilde v) - g(t-\tilde t, x - \tilde x - \tilde t v , v) \right) \\&\qquad - \left(g(t-\tilde t, x' - \tilde x - \tilde t(v' - \tilde v), v' - \tilde v) - g(t-\tilde t, x' - \tilde x - \tilde tv', v') \right)\big| \\& \leq 2[g]_{C^{\alpha/3}_xC^\alpha_v(Q_1)} \left( (\tilde t |\tilde v|)^{\alpha/3} + |\tilde v|^\alpha\right), \end{split} \ee where, to get the second inequality, we swapped the places of the second and third terms in the absolute values. This has the advantage of avoiding a $t-t'$ term that would require time regularity of $g$. Using~\eqref{e.c032901} and then \Cref{l.w09031}, we find \be \begin{split}\label{e.w05011} |I_1| &\lesssim [g]_{C^{\alpha/3}_x C^{\alpha}_v(Q_1)} \int_{0}^{2h^2} \int_{\R^d}\int_{\R^d} |\partial_{v_{i}v_{j}}\Gamma_{\bar a}(t, \tilde x, \tilde v; t- \tilde t)|((\tilde t |\tilde v|)^{\alpha/3}+|\tilde v|^{\alpha})\,d\tilde z \\&\lesssim [g]_{C^{\alpha/3}_x C^{\alpha}_v(Q_1)} \int_{0}^{2h^2} s^{\alpha/2-1}\,ds \lesssim [g]_{\calpha(Q_1)} h^{\alpha} . \end{split} \end{equation} \medskip {\bf Estimating $I_2$:} In this case, we are insulated from $\tilde t = 0$ so we may (and do) use the regularity of $\partial_{v_iv_j}\Gamma_{\bar a}$ here. The first step is to separate the two integrals in $I_2$ (recall that $\delta g$ is a difference of two terms and then change variables $\tilde z \mapsto z - \tilde z$ and $\tilde z \mapsto z' - \tilde z$, respectively. This yields: \begin{equation}\notag \begin{split} I_2 &= \int_{-1}^{t-2h^2} \int_{\R^d}\int_{\R^d} (\partial_{v_{i}v_{j}}\Gamma_{\bar a}(t, x - \tilde x, v -\tilde v; \tilde t)-\partial_{v_{i}v_{j}}\Gamma_{\bar a}(t, x' - \tilde x, v' -\tilde v; \tilde t)) g(\tilde t,\tilde x-(t-\tilde t)\tilde v,\tilde v)\,d\tilde z. \end{split} \end{equation} The key reason for doing this is so that the resulting terms, $\partial_{v_iv_j}\Gamma_{\bar a}$, are a full $\tilde v$-derivative. Hence, \begin{equation}\notag \int_{\R^d} \partial_{v_{i}v_{j}}\Gamma_{\bar a}(t,x-\tilde x,v-\tilde v; \tilde t) \,d\tilde v = \int_{\R^d}\partial_{v_{i}v_{j}}\Gamma_{\bar a}(t,x'-\tilde x,v'-\tilde v; \tilde t)\,d\tilde v = 0. \end{equation} Therefore, we rewrite $I_{2}$ as \begin{equation} \begin{split} I_2 &= \int_{-1}^{t-2h^2} \int_{\R^d}\int_{\R^d} (\partial_{v_{i}v_{j}}\Gamma_{\bar a}(t, x-\tilde x, v - \tilde v; \tilde t)-\partial_{v_{i}v_{j}}\Gamma_{\bar a}(t, x'-\tilde x, v' - \tilde v; \tilde t) \\&\phantom{MMMMMMMa}\times (g(\tilde t,\tilde x-(t-\tilde t)\tilde v,\tilde v)-g(\tilde t,\tilde x-(t-\tilde t)v,v))\,d\tilde z. \end{split} \end{equation} Notice that $z - z' \in Q_h$. By a Taylor approximation, we see that \begin{equation}\notag \begin{split} &|\partial_{v_{i}v_{j}}\Gamma_{\bar a}(t, x-\tilde x, v - \tilde v; \tilde t)-\partial_{v_{i}v_{j}}\Gamma_{\bar a}(t, x'-\tilde x, v' - \tilde v; \tilde t)| \\&\leq \max_{\xi \in Q_h,\xi_1=0}\left( h^3 |\nabla_x \partial_{v_{i}v_{j}}\Gamma_{\bar a}(t, x-\tilde x + \xi_2, v - \tilde v + \xi_3; \tilde t)| + h |\nabla_v \partial_{v_{i}v_{j}}\Gamma_{\bar a}(t, x-\tilde x + \xi_2, v - \tilde v + \xi_3; \tilde t)| \right). \end{split} \end{equation} Additionally, we have \be |g(\tilde t,\tilde x-(t-\tilde t)\tilde v,\tilde v)-g(\tilde t,\tilde x-(t-\tilde t)v,v)| \leq [g]_{\calpha(Q_1)} [(|t-\tilde t||\tilde v - v|)^{\alpha/3} + |\tilde v - v|^\alpha]. \ee Therefore, by a shifting back in all variables, $(\tilde x, \tilde v) \mapsto(x-\tilde x, v - \tilde v)$, we see \begin{equation}\notag \begin{split} &|I_2| \leq [g]_{\calpha(Q_1)} \int_{2h^2}^{t+1} \int_{\R^d}\int_{\R^d} \max_{\xi \in Q_h,\xi_1=0}\Big( h^3 |\nabla_x \partial_{v_{i}v_{j}}\Gamma_{\bar a}(t, \tilde x + \xi_2, \tilde v + \xi_3; \tilde t)| \\& \phantom{MMMMMMMMMMMM}+ h |\nabla_v \partial_{v_{i}v_{j}}\Gamma_{\bar a}(t, \tilde x + \xi_2, \tilde v + \xi_3; \tilde t)| \Big) (|\tilde t|^{\alpha/3}|\tilde v|^{\alpha/3} + |\tilde v|^{\alpha}) \,d\tilde z. \end{split} \end{equation} Using then \Cref{l.w09031}, which, effectively turns $\tilde x$ and $\tilde v$ into $\tilde t^{3/2}$ and $\tilde t^{1/2}$, respectively, and $\partial_t$, $\nabla_x$ and $\nabla_v$ into $\tilde t^{-1}$, $\tilde t^{-3/2}$, and $\tilde t^{-1/2}$, respectively, we find \be\label{e.c050401} \begin{split} |I_2| &\lesssim [g]_{\calpha(Q_1)} \int_{2h^2}^{t+1} \Big( \frac{h^2}{\tilde t^2} + \frac{h^3}{\tilde t^{5/2}} + \frac{h}{\tilde t^{3/2}}\Big) \tilde t^{\alpha/2} \, d\tilde t \lesssim [g]_{\calpha(Q_1)} h^\alpha. \end{split} \ee Combining this,~\eqref{e.w05011}, and~\eqref{e.c050401} completes the estimate of $[D_{v}^{2}f]_{\calpha(Q_1)}$ as claimed in the statement. The estimate of $[f]_{C^{\frac{2+\alpha}{3}}_x(Q_1)}$ essentially proceeds along the same lines, but is significantly simpler as there is no difference in the Galilean terms $-\tilde t(v - \tilde v)$ in $\delta g$. Additionally, the details are exactly the same as in \cite[Lemma~2.5]{HS}. As such we omit the proof. \end{proof} \subsubsection{Proof of \Cref{l.w09031}: integrals of $\Gamma_{\bar a}$ and their scaling in $t$}\label{s.kernel_scaling} Our first observation in service of establishing \Cref{l.w09031} is that the integral is well-defined due to the positivity of the exponential terms and that it satisfies the appropriate scaling laws. \begin{Lemma}\label{l.w09032} The matrix $P(t)$ is invertible. Additionally, we have the following bounds: for all $t>\tilde t$, any $i= 0,1,2$, and any vector $w\in\R^d$, \be\notag \begin{aligned} (i) & \quad w\cdot P(t;\tilde t)w \approx (t-\tilde t)^3 |w| &\qquad\qquad (ii) & \quad |A_i(t;\tilde t)w| \approx (t-\tilde t)^{i+1}|w| \\ (iii) & \quad |M(t;\tilde t)w| \lesssim (t-\tilde t)|w|. \end{aligned} \ee where the constants depends only on $\Lambda$. \end{Lemma} We note that the lower bound in $(ii)$ and all of the upper bounds are straightforward, but the lower bound in (i) is not obvious and nontrivial to prove. As the proof is somewhat long, we postpone it to \Cref{s.matrices}. Next, we observe that the partial derivative of $\Gamma$ appearing in \Cref{l.w09031} has a particular form. \begin{Lemma}\label{l.w09171} Fix any multi-indices $\alpha$ and $\beta$ as in \Cref{l.w09031}. Then there exist a homogeneous polynomial $\cP_{\alpha,\beta}$ of order $|\alpha| + 3|\beta|$ such that \be\label{e.c9164} \begin{split} \frac{\partial_x^\beta \partial_v^\alpha \Gamma_{\bar a}(t,x,v\tilde t)}{\Gamma_{\bar a}(t,x,v;\tilde t)} &= \cP_{\alpha,\beta}( (A_0^{-1/2})_{ij}, (P^{-1/2} M)_{ij}, (A_0^{-1}v)_j, (M^T P^{-1} M v)_j, \\&\qquad\qquad (M^T P^{-1} x)_j, P^{-1/6}, (M^T P^{-1})^{1/3} ), \end{split} \ee where the last two terms in the polynomial, $(M^T P^{-1})^{1/3}$ and $P^{-1/6}$, are understood to only appear in the polynomial in powers that are multiples of three. \end{Lemma} As its proof is somewhat short, we give it here. Before doing so, however, we make two observations. First, the above is essentially obvious when $\bar a(t) = \Id$. Second (setting $\tilde t=0$ for ease), using \Cref{l.w09171} and the kinetic scaling, in which we think of $v \sim \sqrt t$ and $x\sim t^{3/2}$, every input in the polynomial $\cP_{\alpha,\beta}$ is $\sim t^{-1/2}$, making the entire polynomial $\sim t^{-|\alpha|-3|\beta|}$. This is precisely the reason that \Cref{l.w09031} holds. \begin{proof} Our proof proceeds by induction, first on the magnitude of $\alpha$ and then on the magnitude of $\beta$. The case $|\alpha| = |\beta| = 0$ is obvious. We now consider the case where $\partial_v^\alpha = \partial_{v_i} \partial_v^{\tilde \alpha}$ for some $i\in\{1,\dots,d\}$ and $|\tilde \alpha|\geq 0$, and we assume that~\eqref{e.c9164} holds for $\partial_v^{\tilde \alpha} \Gamma$. The derivative $\partial_{v_i}$ can, by the product rule, either fall on $\cP_{\tilde \alpha}$ or $\Gamma$. We consider each case in turn. First, consider the former case; that is, consider the term that arises when the $\partial_{v_i}$ falls on $\cP_{\tilde \alpha}$. Observe that $\partial_{v_i} \cP_{\tilde \alpha}$ yields a linear combination of terms that are a $|\tilde\alpha|-1$ homogeneous polynomial multiplied by either \be\label{e.c032902} (A_0^{-1})_{ji} = (A_0^{-1/2})_{jk}(A_0^{-1/2})_{ki} \ee or \be\label{e.c032903} (M^T P^{-1} M)_{ji} = (M^TP^{-1/2})_{jk} (M^TP^{-1/2})_{ik}. \ee Each of~\eqref{e.c032902} and~\eqref{e.c032903} are 2-homogeneous in the variables of $\cP_{\alpha}$, making the resulting terms $|\tilde \alpha| - 1 + 2 = |\tilde \alpha| + 1$ homogeneous polynomials, as desired. We now consider the latter case; that is, when $\partial_{v_i}$ falls on $\Gamma$. The conclusion is then clear as \be\notag \cP_{\tilde\alpha} \partial_{v_i} \Gamma = \cP_{\tilde\alpha} \left( -\frac{1}{2} (A_0^{-1} v)_i + 2(M^TPx)_i - 2(M^TPM v)_i \right) \Gamma. \ee Hence, we are finished with the proof when $|\beta| = 0$. The proof of the induction on $\beta$ is essentially the same; hence, we omit it. \end{proof} We are now in a position to prove \Cref{l.w09031}. \begin{proof}[Proof of \Cref{l.w09031}] For ease, set $\tilde t = 0$. We discuss first the case when $(\xi_1, \xi_2, \xi_3) = 0$ and $j=0$. First, for notational ease, let \be\notag I =\int_{\R^d}\int_{\R^d} |\partial^{\alpha}_{v} \partial^\beta_x \Gamma_{\bar a}(t,x,v;0)||x|^{r}|v|^{s}\,dvdx. \ee By \Cref{l.w09171}, we have \begin{equation}\notag \begin{split} I &= \det({A_0 P})^{-1/2} \int_{\R^d}\int_{\R^d} |\cP_{\alpha,\beta} \Gamma_{\bar a}(t,x,v;0)||x|^r|v|^s\,dvdx . \end{split} \end{equation} Using \Cref{l.w09032}, we notice that \begin{equation}\notag \begin{aligned} |\cP_{\alpha,\beta}| \lesssim \tilde \cP_{\alpha,\beta}\left( \frac{1}{\sqrt t}, \frac{x}{t^2}, \frac{v}{t}\right) \end{aligned} \end{equation} for some positive $|\alpha| + 3|\beta|$-homogeneous polynomial $\tilde \cP_{\alpha,\beta}$. Therefore, we get \begin{equation}\notag \begin{split} I &\lesssim \frac{1}{\sqrt{\det({A_0 P})}} \int_{\R^d}\int_{\R^d} \widetilde {\mathcal P}_{\alpha,\beta}\left(\frac{1}{\sqrt t}, \frac{x}{t^2}, \frac{v}{t}\right) e^{-\frac{v^{T}A_0^{-1}v}{4}} e^{-(x-Mv)\cdot P^{-1}(x-Mv)}|x|^{r}|v|^{s}\,dvdx \\&= \frac{t^{\frac{3r}{2}+\frac{s}{2}}}{\sqrt{\det({A_0P})}} \int_{\R^d}\int_{\R^d} \widetilde {\mathcal P}_{\alpha,\beta}\left(\frac{1}{\sqrt t}, \frac{x}{t^2}, \frac{v}{t}\right) e^{-\frac{v\cdot A_0^{-1}v}{4}} e^{-(x-Mv)\cdot P^{-1}(x-Mv)} \frac{|x|^r}{t^\frac{3r}{2}} \frac{|v|^s}{t^\frac{s}{2}}\,dvdx. \end{split} \end{equation} Next, we change variables to find \be I\lesssim \frac{t^{\frac{3r}{2}+\frac{s}{2}-2d}}{\sqrt{\det({A_0P})}} \int_{\R^d}\int_{\R^d} \widetilde {\mathcal P}_{\alpha,\beta}\left(\frac{1}{\sqrt t}, \frac{\bar x}{\sqrt t}, \frac{\bar v}{\sqrt{t}}\right) e^{-t\frac{\bar v\cdot A_0^{-1}\bar v}{4}} e^{-t^3 (\bar x-t^{-1}M\bar v)\cdot P^{-1}(\bar x-t^{-1}M\bar v)} |\bar x|^r |\bar v|^s \, d\bar v d\bar x. \ee Notice that \be \widetilde {\mathcal P}_{\alpha,\beta}\left(\frac{1}{\sqrt t}, \frac{\bar x}{\sqrt t}, \frac{\bar v}{\sqrt{t}}\right) = t^{-\frac{|\alpha|+3|\beta|}{2}} \widetilde {\mathcal P}_{\alpha,\beta}\left(1, \bar x, \bar v\right) \ee due to the homogeneity of $\widetilde{\mathcal{P}}$. Hence, \be\notag I\lesssim \frac{t^{\frac{3r + s - |\alpha| - 3|\beta|}{2}-2d}}{\sqrt{\det({A_0 P})}} \int_{\R^d}\int_{\R^d} \widetilde {\mathcal P}_{\alpha,\beta}\left(1,\bar v, \bar x\right) e^{-t\frac{\bar v\cdot A_0^{-1}\bar v}{4}} e^{-t^3 (\bar x-t^{-1}M\bar v)\cdot P^{-1}(\bar x-t^{-1}M\bar v)} |\bar x|^r |\bar v|^s \, d\bar v d\bar x. \ee We change variables one final time with $\bar y = \bar x - t^{-1} M \bar v$ to find \be \begin{split} I &\lesssim \frac{t^{\frac{3r + s - |\alpha| - 3|\beta|}{2}-2d}}{\sqrt{\det({A_0 P})}} \int_{\R^d}\int_{\R^d} \widetilde {\mathcal P}_{\alpha,\beta}\left(1,\bar v, \bar x\right) e^{-t\frac{\bar v\cdot A_0^{-1}\bar v}{4}} e^{-t^3 \bar y\cdot P^{-1}\bar y} |\bar y + t^{-1} M \bar v|^r |\bar v|^s \, d\bar v d\bar x \\&\lesssim \frac{t^{\frac{3r + s - |\alpha| - 3|\beta|}{2}-2d}}{\sqrt{\det({A_0 P})}} \int_{\R^d}\int_{\R^d} \widetilde {\mathcal P}_{\alpha,\beta}\left(1,\bar v, \bar x\right) e^{-t\frac{\bar v\cdot A_0^{-1}\bar v}{4}} e^{-t^3 \bar y\cdot P^{-1}\bar y} (|\bar y|^r + |\bar v|^r) |\bar v|^s \, d\bar v d\bar x. \end{split} \ee In the last inequality we used \Cref{l.w09032} to bound $t^{-1} |M| \lesssim 1$. At this point, it follows from \Cref{l.w09032} that the quadratic terms in the exponential are bounded below as \be\notag t \bar v\cdot A_0^{-1} \bar v + t^3 \bar y\cdot P^{-1} \bar y \gtrsim |\bar v|^2 + |\bar y|^2. \ee The conclusion follows then from a simple calculation: \be\notag I \lesssim \frac{t^{\frac{3r + s - |\alpha|-3|\beta|}{2}-2d}}{\sqrt{\det({A_0P})}}. \ee The proof of this case is concluded after applying \Cref{l.w09032} again in order to bound the determinant. The case where $j>0$ reduces to the case above via the identity: \be\notag \partial_t \Gamma_{\bar a} = v \cdot \nabla_x \Gamma_{\bar a} + \tr(a(t) D^2_v \Gamma_{\bar a}). \ee This concludes the proof of all cases where $(\xi_1,\xi_2,\xi_3) = 0$. The general case can easily be handled as follows. First change variables: \be\notag \begin{split} \int_{\R^d}\int_{\R^d} &\max_{(0,\xi_1,\xi_2) \in Q_{t/2}}|\partial^{j}_{t}\partial_{x}^{\beta}\partial_{v}^{\alpha}\Gamma_{\bar a}(t,x+\xi_1,v+\xi_2;0)|x|^{r}|v|^{s}\,dxdv \\& = \int_{\R^d}\int_{\R^d} \max_{(0,\xi_1,\xi_2) \in Q_{t/2}}|\partial^{j}_{t}\partial_{x}^{\beta}\partial_{v}^{\alpha}\Gamma_{\bar a}(t,x,v;0)|x- \xi_1|^{r}|v-\xi_2|^{s}\,dxdv. \end{split} \ee Next, using the inequalities \be\notag |x- \xi_1|^{r} \lesssim |x|^r + |\xi_1|^r \quad\text{ and }\quad |v-\xi_2|^{s} \lesssim |v|^s + |\xi_2|^s. \ee At this point, the four resulting integrals may be estimated using the case above (keeping in mind the conditions $|\xi_1|\leq t^{3/2}$ and $|\xi_2| \leq \sqrt t$). This concludes the proof. \end{proof} \subsubsection{The proof of \Cref{l.w09032}: understanding the matrices $A_i$, $P$, and $M$}\label{s.matrices} \begin{proof} We note that the upper bounds in all cases (i), (ii), and (iii) are obvious from the assumptions on $\bar a$~\eqref{e.homogeneous_assumptions} and the definition of the matrices~\eqref{e.A_i}. The lower bounds of $A_i$ in (ii) are also obvious. Hence, we need only prove the lower bound for $P$ in (i). For ease, we set $\tilde t = 0$ and simply drop the ``$;0$'' notation from all quantities. To obtain this lower bound, notice that it suffices to establish a uniform bound of the form \be\label{e.c033009} w \cdot (P(t)w) \gtrsim t^3 \ee for any vector $w\in \R^d$ with $|w| = 1$. We proceed by analyzing the time derivative of $P$. First, \begin{equation}\notag \begin{split} P'(t) &= (A_2 -A_1 A_0^{-1} A_1)'(t) \\& = t^2 \bar a-t\bar aA_0^{-1}A_1 +A_1 (A_0^{-1} \bar aA_0^{-1} ) A_1 -A_1 A_0^{-1}t\bar a \\&= (t\sqrt{\bar a}-A_1 A^{-1}\sqrt{\bar a})(t\sqrt{\bar a}-\sqrt{\bar a}A^{-1}A_1) = M^T \bar a M \geq 0 . \end{split} \end{equation} Thus, \be\label{e.c033007} w\cdot P(t)w = \int_0^t (M(s) w) \cdot \bar a M(s) w \, ds \geq \frac{1}{\Lambda} \int_0^t |M(s) w|^2 \, ds. \ee In order to establish~\eqref{e.c033009}, it is enough to show \be\label{e.c033006} \int_0^t |M(s) w|^2\, ds \gtrsim t^3. \ee This is our focus for the remainder of the proof. To obtain this lower bound, we use the following intuition. Recall in equation \eqref{e.A_i} \begin{equation}\notag \begin{split} &A_i(s)=\int_{0}^{s} \tau^i \bar a(\tau)\,d\tau, \quad\text{ and }\quad M(s) = s\id - A_0^{-1}(s) A_1(s). \end{split} \end{equation} The time derivative of $M$ is \be\label{e.c033005} \begin{split} M'(s) &= \id - (A_0^{-1} A_1)'(s) = \id + A_0(s)^{-1} \bar a(s) A_0^{-1}(s) A_1(s) - s A_0(s)^{-1} \bar a(s) \\& = \id - A_0(s)^{-1} \bar a(s)M(s). \end{split} \ee From~\eqref{e.c033005}, we see that when $M(s) w$ is `small,' that is $o(t)$, $M'(s) w$ is approximately $w$. That is $M(s)w$ moves radially (in the direction of $w$) with a velocity $\approx 1$ away from the origin. This means that, eventually, $M(s) w$ will move radially across a distance $O(t)$ at a bounded velocity. This would yield the desired bound~\eqref{e.c033006}. In order to make this rigorous, we proceed in three steps. Fix $\eps >0$ sufficiently small in a way to be determined. The first step is to note that either there is an interval $[\eps t, 2\eps t]$ where $M(s) w$ always has magnitude $O(t)$ or not. If so, we are done. If not, we proceed to the second step. The second step takes a time $t_0$ in the interval $[\eps t, 2\eps t]$ in which $M(t_0) w$ is `small' and shows that it gets `big.' The third step is to show that $M(t_0) w$ remains `big'. The second and third steps are dependent on the time derivative of $M$. \smallskip {\bf Step one:} Notice that $|M(0) w| = 0$. If \be |M(s) w| \geq \epsilon^4 t \qquad \text{ for all } s \in [\eps t, 2\eps t] \ee then we are finished with the proof. Hence, assume that there is \be t_0 \in [\eps t, 2\eps t] \quad\text{ such that } |M(t_0) w| < \eps^3 t. \ee \smallskip {\bf Step two:} We claim that \be\label{e.c033001} t_1 := \inf \{ s > t_0 : |M(s) w| \geq \eps^3 t\} \leq 3 \eps t. \ee Roughly, $t_1$ is the first time after $t_0$ that $|M(s)w|$ becomes `big,' that is, has norm $\eps^3 t$. Using the time derivative of $M$~\eqref{e.c033005}, we obtain the identity \be\label{e.c033003} M(t_1) w = M(t_0) w + \int_{t_0}^{t_1}\left(w - A_0(s)^{-1} a(s) M(s) w\right) \, ds. \ee Combining~\eqref{e.c033003} with the definition of $t_1$~\eqref{e.c033001} and the bound \Cref{l.w09032}.(ii), we find \be\label{e.c033004} \begin{split} t_1 - t_0 &= (t_1 - t_0)|w| = \Big|\int_{t_0}^{t_1} w \,ds\Big| = \Big|M(t_1) w - M(t_0)w + \int_{t_0}^{t_1} A_0(s)^{-1} \bar a(s) M(s) w \,ds \Big| \\& \leq |M(t_1) w| + |M(t_0) w| + \int_{t_0}^{t_1} |A_0(s)^{-1} \bar a(s) M(s) w|\, ds \\& \leq 2 \eps^3 t + \int_{t_0}^{t_1} \frac{C |M(s) w|}{s^2} \, ds \leq 2 \eps^3 t + \int_{t_0}^{t_1} \frac{C \eps^3 t}{s} \, ds \leq 2 \eps^3 t + \frac{C \eps^3 t (t_1 - t_0)}{\eps t}. \end{split} \ee where $C$ is a universal constant depending only on $d$ and $\Lambda$. The last inequality uses that $s \geq t_0 \geq \eps t$. Before continuing, we note that the last integral in~\eqref{e.c033004} reveals the necessity of Step One, above. Indeed, the final integral above is not bounded for $t_0$ near $0$. Step One allows us to avoid this singularity. Returning to~\eqref{e.c033004}, notice that, if $\eps$ is sufficiently small then $C\eps^2 < 1/2$. Thus, after rearranging~\eqref{e.c033004}, we find \be\notag \frac{t_1-t_0}{2} \leq 2 \eps^3 t. \ee Rearranging this, recalling that $t_0 \leq 2 \eps t$, and further decreasing $\eps$, we obtain \be\notag t_1 \leq t_0 + 4\eps^2 t < 2 \eps t + \eps t. \ee Hence~\eqref{e.c033001} is established. \smallskip {\bf Step Three:} We claim that \be\label{e.w05181} t_2 := \sup \{ s \in (t_1, t] : |M(s) w| \geq \eps^4 t\} \geq t_1 + \eps^4 t. \ee Roughly, $t_2$ is the first time after $t_1$ (at which time $|M(s) w|$ is `big') that $|M(s) w|$ becomes `small,' that is $\eps^4 t$. Before showing this, we claim this allows us to conclude. Indeed, \be\notag |M(s) w| \geq \eps^4 t \quad \text{ for } s \in (t_1,t_2) \qquad\text{ and }\qquad t_2 - t_1 \geq \eps^4 t. \ee Hence, \be\notag \int_0^t |M(s) w|^2\, ds \geq \int_{t_1}^{t_2} |M(s) w|^2\, ds \geq (t_2 - t_1) (\eps^4 t)^2 \geq \epsilon^{12} t^3. \ee In view of~\eqref{e.c033007}, this establishes the claim~\eqref{e.c033006}, which concludes the proof. Thus, it is enough to prove~\eqref{e.w05181}, which is our focus now. If $t_2 = t$, we are finished. Hence, we assume that $t_2 < t$, which implies that \be\label{e.c033010} |M(t_2) w| = \eps^4 t. \ee Also, using~\eqref{e.c033005} once again, we find \be\notag M(t_1) w - M(t_2)w = -(t_2-t_1)w + \int_{t_1}^{t_2} A_0^{-1}(s) \bar a(s) M(s) w \, ds. \ee Combining the two identities above, and recalling from~\eqref{e.c033001} that $|M(t_1)w| = \eps^3 t$, we find \be\notag \eps^3 t(1 - \eps) \leq |M(t_1) w - M(t_2) w| \leq (t_2-t_1) + \int_{t_1}^{t_2} C \, ds = (C+1) (t_2- t_1). \ee Rearranging this and decreasing $\eps$ if necessary, we find~\eqref{e.w05181}. This concludes the proof. \end{proof} \subsection{The second step: full Schauder estimates by perturbing off the homogeneous problem}\label{s.w05171} By a careful procedure taking into account the natural scalings and available interpolations, we can perturb off the $(x,v)$-homogeneous problem in order to obtain the full Schauder estimates. In short, we finish the proof of \Cref{t.Schauder} by leveraging \Cref{l.w12041}. We begin by stating two important technical lemmas. The proof of the first is given in \cite{LH} and the second is standard (it can be seen easily by scaling, for example), but a proof can be found in~\cite[Lemma~2.10]{imbert2018schauder}. \begin{Lemma}[Lemma~4.3 in~\cite{LH}]\label{l.w12042} Let $\omega(r)>0$ be bounded in $[r_0,r_1]$ with $r_0 \geq 0$. Suppose that there is $\mu \in (0,1)$ and constants $A$, $B$, $p\geq 0$ so that, for all $r_0\leq r<R\leq r_1$, \begin{equation}\label{e.w12044} \begin{split} \omega(r)\leq \mu \omega(R)+\frac{A}{(R-r)^p}+B. \end{split} \end{equation} Then for any $r_0\leq r<R\leq r_1$, there holds \begin{equation}\label{e.w12045} \begin{split} \omega(r)\lesssim \frac{A}{(R-r)^p}+B , \end{split} \end{equation} where the implied constant depends only on $\mu$ and $p$. \end{Lemma} \begin{Lemma}[Interpolation inequalities]\label{l.w12022} Fix any $Q=Q_{r}$ for any $r \geq 1/2$ and any $\alpha \in (0,1)$. For any $\epsilon>0$, the following hold: \be \begin{split} [u]_{\calpha(Q)} &\lesssim \epsilon^{2}\left([u]_{C^{(2+\alpha)/3}_x(Q)} + [D^{2}_{v}u]_{C^{\alpha/3}_xC_v^\alpha(Q)}\right) + \epsilon^{-\alpha}\|u\|_{L^{\infty}(Q)}, \\ [D_{v}u]_{\calpha(Q)} &\lesssim \epsilon^{}\left([u]_{C^{(2+\alpha)/3}_x(Q)} + [D^{2}_{v}u]_{C^{\alpha/3}_xC_v^\alpha(Q)}\right) + \epsilon^{-\alpha-1}\|u\|_{L^{\infty}(Q)}, \\ \|D_v u\|_{L^{\infty}(Q)} &\lesssim \epsilon^{\alpha+1}[D^{2}_{v}u]_{C^{\alpha/3}_xC_v^\alpha(Q)} + \epsilon^{-1}\|u\|_{L^{\infty}(Q)}, \qquad\text{ and} \\ \|D_v^2 u\|_{L^{\infty}(Q)} &\lesssim \epsilon^{\alpha}[D^{2}_{v}u]_{C^{\alpha/3}_xC_v^\alpha(Q)} + \epsilon^{-2}\|u\|_{L^{\infty}(Q)} . \end{split} \end{equation} \end{Lemma} With these in hand, we now prove the full Schauder estimates. \begin{proof}[Proof of \Cref{t.Schauder}] We estimate $[D_{v}^{2}u]_{\calpha(Q_{1/2})}$ and omit the proof of the other terms as they are similar. For succinctness, in this proof, we use the following notation: \begin{equation}\label{e.w03312} [u]'_{2+\alpha,r} :=[D^{2}_{v}u]_{\calpha(Q_r)} +[u]_{C^{(\alpha+2)/3}_x(Q_r)}. \end{equation} The key estimate that we establish is the following. There is $\eps_0>0$ sufficiently small so that, with \be\label{e.theta_0} \theta_0 := \min\Big\{\frac{1}{8}, \eps_0 [\bar a]_{\calpha(Q_1)}^{-1/\alpha}\Big\} \ee then \be\label{e.c51201} \begin{split} [f]_{2+\alpha,r}' \leq &\frac{1}{2} [f]_{2+\alpha, r+2\theta}' + C([\bar c]_{\calpha(Q_1)} + \theta^{-2-\alpha}) \|f\|_{L^\infty(Q_1)} + C\theta^{-\alpha} \|g\|_{\calpha(Q_1)}, \end{split} \ee for some $C>0$ and all $\theta \in (0,\theta_0)$ and $r \in [1/4, 3/4]$. The proof of~\eqref{e.c51201} is complicated, so we postpone it until after show how \Cref{t.Schauder} follows from it. In order to prove \Cref{t.Schauder} from~\eqref{e.c51201}, we first rewrite~\eqref{e.c51201} in a manner more adapted to \Cref{l.w12042}. Indeed, applying \Cref{l.w12042} with, in its notation, the choices \be \begin{split} &r_1 = 1/4, \quad r_2 = 3/4, \quad \omega(r) = [u]_{2+\alpha,Q_r}', \quad R = r + 2\theta_0, \quad \mu = \frac{1}{2}, \\& A = \|f\|_{L^\infty(Q_1)} + \theta_0^2 [g]_{\calpha(Q_1)}, \quad B = C[\bar c]_{\alpha(Q_1)} \|f\|_{L^\infty(Q_1)}, \quad\text{ and }\quad p = 2 + \alpha, \end{split} \ee yields \begin{equation}\label{e.w12052} \begin{split} &[u]'_{2+\alpha,1/2} \lesssim \theta_0^{-2-\alpha} \left( \|f\|_{L^\infty(Q_1)} + \theta_0^2 [g]_{\calpha(Q_1)} \right) + [\bar c]_{\calpha(Q_1)}\|f\|_{L^\infty(Q_1)} \\&\quad \lesssim \left(1+ [\bar c]_{\calpha(Q_1)} + [\bar a]_{\calpha(Q_1)}^{1+ \frac{2}{\alpha}}\right) \|f\|_{L^\infty(Q_1)} + \left(1 + [\bar a]_{\calpha(Q_1)}\right) [g]_{\calpha(Q_1)}. \end{split} \end{equation} Thus, \Cref{t.Schauder} is proved, up to establishing~\eqref{e.c51201}. We now prove~\eqref{e.c51201}. We argue under the assumption that \be\notag [D^2_v f]_{\calpha(Q_r)} \geq [f]_{C^{(2+\alpha)/3}_x(Q_r)} \ee so that \be\notag [f]_{2+\alpha, r}' \leq 2 [D^2_v f]_{\calpha(Q_r)}, \ee although the proof is similar in the opposite case. Fix $z_0,z_1 \in Q_r$ with $t_0 = t_1$ so that \be\notag \frac{|D^2_v f(z_0) - D^2_v f(z_1)|}{|x_0-x_1|^{\alpha/3} + |v_0 - v_1|^\alpha} \geq \frac{1}{2} [D^2_v f]_{\calpha(Q_r)} \geq \frac{1}{4} [f]_{2+\alpha,Q_r}'. \ee Up to a change of variables, we may assume that $z_1 = 0$, which make the expressions in the sequel simpler. Fix $\theta \in (0,\theta_0)$. If \be\label{e.c51101} |x_0|^{\alpha/3} + |v_0|^\alpha > \theta, \ee then we have, using \Cref{l.w12022}, \be\notag \begin{split} [f]_{2+\alpha,Q_r}' &\lesssim \frac{|D^2_v f(z_0) - D^2_v f(0)|}{|x-x'|^{\alpha/3} + |v_0|^\alpha} \lesssim \theta^{-\alpha} \|D^2_v f\|_{L^\infty(Q_r)} \\& \leq \frac{1}{2} [ D^2_v f]_{\calpha(Q_r)} + C\theta^{-2-\alpha} \|f\|_{L^\infty(Q_r)}, \end{split} \ee and~\eqref{e.c51201} is proved. Next we consider the case when~\eqref{e.c51101} does not hold. We introduce a cut-off function $0 \leq \chi\leq 1$ such that \be\notag \chi(t,x, v) =\begin{cases} 1 \qquad&\text{ if } |t|^{1/2} + |x|^{1/3} + |v| \leq \theta,\\ 0 \qquad&\text{ if } |t|^{1/2} + |x|^{1/3} + |v| \geq 2\theta, \end{cases} \ee and \begin{equation}\label{e.w12031} [(\partial_t +v\cdot \nabla_x)\chi]_{\calpha(Q_1)}+[D_{v}^{2}\chi]_{\calpha(Q_1)} \lesssim \theta^{-2-\alpha} . \end{equation} We note that estimates on the other norms and semi-norms of $\chi$ can be obtained easily via \Cref{l.w12022}. Additionally, to make the notation simpler, we define \be\notag \tilde a(t) = \bar a(t,0,0) \qquad\text{and}\qquad L = \partial_t + v\cdot\nabla_x - \tr(\bar aD^2_v\cdot). \ee By \Cref{l.w12041}, we have \begin{equation}\label{e.w12041} \begin{split} [D_{v}^{2}f]_{\calpha(Q_r)} &\leq [D^2_v(\chi f)]_{\calpha(Q_1)} \lesssim [\partial_t(\chi f) + v\cdot\nabla_x(\chi f) - \tr(\tilde a D^2_v (\chi f))]_{\calpha(Q_1)} \\& \lesssim [L(\chi f)]_{\calpha(Q_1)} + [\tr((\bar a - \tilde a)D^2_v(\chi f))]_{\calpha(Q_1)}. \end{split} \end{equation} We consider the first term on the right hand side of~\eqref{e.w12041}. Using the equation for $u$, we see \begin{equation}\notag L(\chi f) = f L \chi - 2\bar a\nabla_{v}f\nabla_{v}\chi + \bar c \chi f +\chi g. \end{equation} Thus, by the triangle inequality \begin{equation}\notag \begin{split} &[L (\chi f) ]_{\calpha(Q_1)} \\&\leq [\chi g]_{\calpha(Q_1)} + [fL\chi]_{\calpha(Q_1)} +[\bar c\chi f]_{\calpha(Q_1)} + 2[\bar a\nabla_{v}f\nabla_{v}\chi]_{\calpha(Q_1)} \\&= I_1 +I_2 +I_3+I_4. \end{split} \end{equation} For $I_1$, we use the boundedness of the cut-off function and have \begin{equation}\notag \begin{split} I_1 &\lesssim \theta^{-\alpha}\|g\|_{\calpha(Q_1)}. \end{split} \end{equation} Next we consider $I_2$. Keeping in mind the support of $\chi$ and using the interpolation inequality \Cref{l.w12022} and \eqref{e.w12031} yields \be\notag \begin{split} &I_2 \lesssim [f]_{\calpha(Q_{r+2\theta})} \|L\chi\|_{L^{\infty}(Q_{r+2\theta})} + [L\chi]_{\calpha(Q_{r+2\theta})} \|f\|_{L^{\infty}(Q_{r+2\theta})} \\&\lesssim \theta^{-2} [f]_{\calpha(Q_{r+2\theta})} + (\theta^{-2-\alpha} + [\bar a D^2_v \chi]_{\calpha(Q_{r+2\theta})}) \|f\|_{L^{\infty}(Q_{r+2\theta})}. \end{split} \ee Using again \Cref{l.w12022} and~\eqref{e.w12031}, we have \be\notag \begin{split} [\bar a D^2_v \chi]_{\calpha(Q_{r+2\theta})} &\leq [\bar a]_{\calpha(Q_{r+2\theta})} \| D^2_v \chi\|_{L^\infty(Q_{r+2\theta})} + \|\bar a\|_{L^\infty(Q_{r+2\theta})} [ D^2_v \chi]_{\calpha(Q_{r+2\theta})} \\&\lesssim \theta^{-2} [\bar a]_{\calpha(Q_{r+2\theta})} + \theta^{-2-\alpha}. \end{split} \ee and, for $\eps>0$ to be chosen depending only on $d$, $\alpha$, and $\Lambda$ (recall~\eqref{e.ellipticity}), \be\notag \theta^{-2} [f]_{\calpha(Q_{r+2\theta})} \lesssim \eps [f]_{2+\alpha, Q_{r+2\theta}}' + \theta^{-2 - \alpha}\|f\|_{L^\infty(Q_1)}. \ee Note that, as $\eps$ will not be chosen to depend on $[\bar a]_{\calpha(Q_1)}$ or $\theta$, we omit all negative powers of $\eps$. Therefore, we conclude that \be\notag I_2 \lesssim \eps[f]_{2+\alpha,Q_{r+2\theta}}' + (\theta^{-2} [\bar a]_{\calpha(Q_{r+2\theta})} + \theta^{-2 - \alpha})\|f\|_{L^\infty(Q_1)}. \ee The terms $I_3$ and $I_4$ may be handled similarly to obtain \be\notag I_3 \lesssim ([\bar c]_{\calpha(Q_1)} + \theta^{-\alpha}) \|f\|_{L^\infty(Q_1)} + \eps [f]'_{2+\alpha,Q_{r+2\theta}} \ee and \be\notag \begin{split} I_4 &\lesssim ([a]_{\calpha(Q_1)} \theta^{-1} + \theta^{-1-\alpha} ) \|\nabla_v f\|_{L^\infty(Q_{r+2\theta})} + \theta^{-1} [\nabla_v f]_{\calpha(Q_{r+2\theta})} \\& \lesssim ([\bar a]_{\calpha(Q_1)} \theta^{-1} + \theta^{-1-\alpha} ) (\eps \theta^{1+\alpha}[D^2_vf]_{\calpha(Q_{r+2\theta})} + \theta^{-1}\|f\|_{L^\infty(Q_{r+2\theta})}) \\&\qquad + \theta^{-1} ( \eps \theta [D^2_v f]_{\calpha(Q_{r+2\theta})} + \theta^{-1-\alpha}\|f\|_{\calpha(Q_{r+2\theta})} ) \\&\lesssim (\eps + \theta^\alpha [\bar a]_{\calpha(Q_1)}) [D^2_v f]_{\calpha(Q_{r+2\theta})} + (\theta^{-2} [\bar a]_{\calpha(Q_1)} + \theta^{-2-\alpha}) \|f\|_{L^\infty(Q_1)}. \end{split} \ee Hence, we conclude that \be\label{e.c51102} \begin{split} &[L(\chi f)]_{\calpha(Q_1)} \lesssim (\eps + \theta^\alpha [\bar a]_{\calpha(Q_1)}) [f]_{2+\alpha,Q_{r+2\theta}}' \\&\qquad + ([c]_{\calpha(Q_1)} + \theta^{-2} [\bar a]_{\calpha(Q_1)}+ \theta^{-2-\alpha}) \|f\|_{L^\infty(Q_1)} + \theta^{-\alpha} \|g\|_{\calpha(Q_1)}. \end{split} \ee We now consider the second term on the right hand side of~\eqref{e.w12041}. We begin with the usual splitting: \be\label{e.c51204} \begin{split} [\tr(\bar a-\tilde a) D^2_v(\chi f)]_{\calpha(Q_1)} \lesssim &\|\bar a-\tilde a\|_{L^\infty(\supp(\chi))} [D^2_v(\chi f)]_{\calpha(Q_1)} \\& + [\bar a-\tilde a]_{\calpha(Q_1)} \|D^2_v(\chi f)\|_{L^\infty(Q_1)}. \end{split} \ee The second term in~\eqref{e.c51204} can be handled easily using the methods above (recall \Cref{l.w12022} and \eqref{e.w12031}): \be\notag [\tr(\bar a-\tilde a)]_{\calpha(Q_1)} \|D^2_v(\chi f)\|_{L^\infty(Q_1)} \lesssim [\bar a]_{\calpha(Q_1)} \left( \theta^\alpha [f]_{2+\alpha, Q_{r+2\theta}}' + \theta^{-2} \|f\|_{L^\infty(Q_1)} \right). \ee Estimating the first term in~\eqref{e.c51204} uses the fact that $\chi$ has support of size $\theta$: \be\notag \|\bar a-\tilde a\|_{L^\infty(\supp(\chi))} \lesssim \theta^\alpha [\bar a]_{\calpha(Q_1)}. \ee After applying \Cref{l.w12022} and \eqref{e.w12031}, we arrive at \be\notag \begin{split} \|\bar a-\tilde a\|_{L^\infty(\supp(\chi))} &[D^2_v(\chi f)]_{\calpha(Q_1)} \lesssim \theta^\alpha [\bar a]_{\calpha(Q_1)} \left( [f]_{2+\alpha,Q_{r+2\theta}}' + \theta^{-2} \|f\|_{L^\infty(Q_1)} \right). \end{split} \ee Therefore, we obtain the following bound on the second term on the right hand side of~\eqref{e.w12041}: \be\notag [\tr(\bar a-\tilde a) D^2_v(\chi f)]_{\calpha(Q_1)} \lesssim \theta^\alpha [\bar a]_{\calpha(Q_1)} \left( [f]_{2+\alpha,Q_{r+2\theta}}' + \theta^{-2} \|f\|_{L^\infty(Q_1)} \right). \ee Combining all above estimates, we have \be\label{e.c51205} \begin{split} &[f]_{2+\alpha, r}' \leq C(\eps + \theta^\alpha [\bar a]_{\calpha(Q_1)}) [f]_{2+\alpha,Q_{r+2\theta}}' \\&\qquad + C([\bar c]_{\calpha(Q_1)} + \theta^{-2} [\bar a]_{\calpha(Q_1)}+ \theta^{-2-\alpha}) \|f\|_{L^\infty(Q_1)} + C\theta^{-\alpha} \|g\|_{\calpha(Q_1)}, \end{split} \ee where $C$ is some universal constant. Choosing $\eps$ and $\eps_0$ sufficiently small and recalling the definition of $\theta_0$~\eqref{e.theta_0} and that $\theta < \theta_0$, we have \be\notag C(\eps + \theta^\alpha)[\bar a]_{\calpha(Q_1)} \leq \frac{1}{2} \quad\text{ and }\quad \theta^{-2} [\bar a]_{\calpha(Q_1)} \lesssim \theta^{-2-\alpha}. \ee Using this in~\eqref{e.c51205}, we obtain~\eqref{e.c51201}, which concludes the proof. \end{proof} \section{Uniqueness for the Landau equation: \Cref{t.Landau}} \label{s.Landau} Before beginning the proof, we review a few useful bounds that follow from our assumptions. For any $h \in L^{\infty,5+\gamma + \eta}$ for any $\eta>0$, we have \be\label{e.bar_a_above} e \cdot \bar a^h(t,x,v) e \lesssim \|h\|_{L^{\infty,5+\gamma + \eta}} \begin{cases} \vv^\gamma \qquad &\text{ if } e \parallel v,\\ \vv^{2+\gamma} \qquad &\text{ otherwise,} \end{cases} \ee for any $e \in \mathbb{S}^2$. The lower order coefficient $\bar c^h$ satisfies a similar bound \be\label{e.bar_c} 0 \leq \bar c^h(t,x,v) \lesssim \vv^\gamma\|h\|_{L^{\infty,3}}. \ee These bounds are not optimal in the weight; it is clear that~$3$ can be replaced by any $k > 3+\gamma$. The proofs of~\eqref{e.bar_a_above} and~\eqref{e.bar_c} are straightforward but can be seen in \cite[Lemma~2.1]{HST2019rough}. Finally, due to the assumption~\eqref{e.nondegeneracy}, the solution $f$ that is the subject of \Cref{t.Landau} satisfies a matching lower bound to~\eqref{e.bar_a_above}: \be\label{e.bar_a_below} e \cdot \bar a^f(t,x,v) e \gtrsim \begin{cases} \vv^{2+\gamma} \qquad &\text{ if } e \perp v,\\ \vv^\gamma \qquad &\text{ otherwise.} \end{cases} \ee Here we suppress the explicit dependence on $f$ as it depends in a complicated way on $\delta$, $r$, $R$, and $\|f\|_{L^{\infty,k}}$. This inequality~\eqref{e.bar_a_below} follows from \cite[Lemma~2.5]{HST2019rough}. We now state the main quantitative estimate that allows us to deduce uniqueness (\Cref{t.Landau}), which is postponed until \Cref{s.Hessian}. This lemma requires \Cref{t.Schauder} in a crucial way. \begin{Proposition}\label{l.Hessian} Under the assumptions of \Cref{t.Landau}, there are $\alpha' \in (0,\alpha)$ and $\theta'\in(0,\theta)$ so that \be \frac{\theta'}{2} \frac{\alpha'}{2+\alpha'} > 1, \ee and $T_0 < 1/2$ such that, for any $t\in [0,T_0]$, \be \|\vv^7 D^2_v f(t)\|_{L^\infty(\R^6)} \lesssim \frac{1}{t \log(\frac{1}{t})^{\frac{\theta'}{2} \frac{\alpha'}{2+\alpha'}}}. \ee The final time $T_0$ depends only on $\alpha$, $\theta$, $k$, $\|f_0\|_{L^{\infty,k}}$, and $\|f_0\|_{\logalpha}$. \end{Proposition} With this in hand, we are in position to prove the second main theorem, the uniqueness of solutions to the Landau equation with initial data having H\"older regularity in $x$ and $\log$-H\"older regularity in $v$. While the estimate of \Cref{l.Hessian} is different from its analogue in~\cite[Lemma 4.3 and Proposition 4.4]{HST2019rough}, its application in deducing uniqueness is quite similar to the proof of uniqueness in~\cite{HST2019rough}. However, we provide the proof as some technical details must be altered. \begin{proof}[Proof of \Cref{t.Landau}] For succinctness, we set \be \ell = 5 + \gamma + \eta, \ee and, without loss of generality, we may assume that \be\label{e.c52511} \ell \leq 5. \ee Let $r \in C(0,T_0] \cap L^1[0, T_0]$ be a positive function to be determined and define \begin{equation} w=e^{-\int_{0}^{t}r(s)\,ds}(g-f) \quad\text{ and }\quad W = \vv^{2\ell} w^2. \end{equation} Our goal is to show that $W \equiv 0$ as this immediately implies that $f\equiv g$. We proceed by contradiction, assuming that there is $\epsilon>0$ such that \begin{equation}\label{e.c033011} \sup_{[0,T_0]\times \R^6} W(t,x,v)> \eps. \end{equation} Following the work in \cite[Proposition 5.2]{HST2019rough}, we may find a point $z_\eps = (t_\eps, x_\eps, z_\eps)$ with $t_\eps>0$ such that \be\label{e.W_max} W(z_\eps) = \eps \quad\text{ and }\quad \sup_{[0,t_\eps]\times \R^6} W(t,x,v) \leq \eps. \ee Next, a direct computation from equation \eqref{e.Landau} yields \begin{equation}\label{e.w03012} \begin{split} \partial_t W+ v\cdot \nabla_x W = &\tr{(\bar a^g D_{v}^{2}W)} - \frac{1}{2} W^{-1} \nabla_v W \cdot (\bar a^g \nabla_v W) + 2\ell \vv^{-2} v \cdot (\bar a^g \nabla_v W) \\&+ \big[ 2\ell(\ell+2) \vv^{-4}v\cdot (\bar a^g v) -2\ell \vv^{-2}\tr{(\bar a^g)}+ 2\bar c^g \big]W \\& + 2 \vv^{2\ell} w\tr{(\bar a^w D_{v}^{2}f)} + 2\vv^{2\ell} w \bar c^w f -2rW . \end{split} \end{equation} It is in this step that we need the technical condition $W(z_\eps) > 0$; indeed, otherwise the second term on the right hand side would not be well-defined. We notice three things. First, as $z_\epsilon$ is a maximum point (recall~\eqref{e.W_max}), it follows that, at $z_\eps$, \be\label{e.c52409} \nabla_v W=0, \quad D_{v}^{2}W\leq 0, \quad\text{ and }\quad (\partial_t +v\cdot \nabla_x )W\geq 0. \ee Second, we have that, \be\label{e.c033012} \|w\|_{L^{\infty,\ell}([0,t_\epsilon]\times \R^6)} = W(z_\epsilon). \ee At this point, we drop the indication of the domain from the $L^\infty$-norms as it will always be $[0,t_\epsilon]\times \R^6$. Next, after using~\eqref{e.bar_a_above} and~\eqref{e.bar_c} to bound the $\bar a^g$ and $\bar c^g$ terms in~\eqref{e.w03012} and using~\eqref{e.c52409} to remove several other terms, we obtain, at $z_\eps$, \begin{equation}\label{e.w03015} \begin{split} 2rW &\lesssim W+\vv^{2\ell}|w||\bar a^w||D_{v}^{2}f| + \vv^{2\ell}|w||\bar c^w|f . \end{split} \end{equation} Recalling~\eqref{e.c033012} and~\eqref{e.bar_a_above}, we have, at $z_\epsilon$, \begin{equation}\label{e.w03013} \begin{split} |\bar a^w| \lesssim \vv^{(\gamma+2)_+} \|w\|_{L^{\infty,\ell}} = \vv^{(\gamma+2)_+} \sqrt W \end{split} \end{equation} and, by~\eqref{e.bar_c}, \begin{equation}\label{e.w03014} \begin{split} |\bar c^w| \lesssim \vv^{\gamma} \|w\|_{L^{\infty,\ell}} = \vv^\gamma \sqrt W . \end{split} \end{equation} Plugging the estimates~\eqref{e.w03013} and~\eqref{e.w03014} into~\eqref{e.w03015} yields \begin{equation}\label{e.w03016} \begin{split} rW &\lesssim W+ \vv^{\ell + (2+\gamma)_+} W |D_{v}^{2}f| + \vv^\ell W f \lesssim \left(1 + \vv^{\ell + (2+\gamma)_+}|D_v^2 f|\right) W. \end{split} \end{equation} Above we used that $\|f\|_{L^{\infty,\ell}}$ is bounded. Applying \Cref{l.Hessian} and using~\eqref{e.c52511}, this becomes \be\notag rW \leq C_0 \left(1+\frac{1}{t_\epsilon \log(\frac{1}{t_\epsilon})^{\frac{\theta'}{2}\frac{\alpha'}{2+\alpha'}}} \right) W \ee for some $C_0 >0$. We note that this is where the restriction to $[0,T_0]$ is inherited from \Cref{l.Hessian}. Choosing \begin{equation}\notag r(t) = 2C_0 \left(1+\frac{1}{t \log(\frac{1}{t})^{\frac{\theta'}{2}\frac{\alpha'}{2+\alpha'}}} \right) \end{equation} contradicts \eqref{e.w03016}. The condition that \be\notag \frac{\theta'}{2}\frac{\alpha'}{2+\alpha'} > 1 \ee ensures that $r \in L^1([0,T_0])$, as desired. Therefore, this rules out the existence of $z_\epsilon$. We conclude that \be\notag \sup W < \epsilon. \ee As $\epsilon$ is arbitrary in the above argument, we deduce that $W\equiv 0$. Thus, $g=f$. It remains to address the case when $f_{\rm in} \in L^{\infty,k}$ for all $k$. Here, however, the arguments of \cite[Theorem~1.4]{HST2019rough} directly apply. Indeed, these arguments are based on showing that $f(T_1) \in C^{\alpha/3}_x C^\alpha_v(\R^6)$ and lies in $L^{\infty,k}$ for all $k$, which do not require the stronger smoothness assumptions of \cite[Theorem~1.2]{HST2019rough}. The idea is to then re-apply the uniqueness argument on an interval starting at $T_1$. Hence, we deduce that uniqueness on the entire time interval $[0,T]$. \end{proof} \subsection{A $t$-integrable bound on $\|D^2_v f(t)\|_{L^\infty_{x,v}}$: proof of \Cref{l.Hessian}}\label{s.Hessian} We now state a more precise estimate that immediately yields \Cref{l.Hessian}. It establishes a bound on $D^2_v f$ at the same time as one on the $\logalpha$-norm of $f$. In the sequel we refer to these as a Hessian bound and as propagation of regularity, respectively. It is interesting to note that, although the latter is a `hyperbolic' estimate (that is, it does not involve a {\em gain of regularity}), it is dependent on the Schauder estimates in an essential way. \begin{Proposition} \label{p.w02091} Let $f\in L^{\infty, k }([0,T]\times \R^6)$ be the solution constructed in \cite[Theorem~1.2]{HST2019rough} with the nondegeneracy condition~\eqref{e.nondegeneracy}. Fix any $\theta > 0$, $\alpha \in (0,1)$, and $\mu < 1$ such that \be \frac{\mu\theta}{2} \frac{\mu\alpha}{2+\mu \alpha} > 1 \ee Then, for any $m> 5+\gamma$ and any $k$ sufficiently large depending on $m$, $\alpha$, $\theta$, and $\mu$, there exists a time $T_0 \leq \min\{1/2, T\}$ such that \begin{equation} \begin{split} \sup_{t\in[0,T_0]}&\left(t\left(\log\tfrac{1}{t}\right)^{\frac{\mu\theta}{2}\frac{\mu\alpha}{2+\mu\alpha}}\|D^2_v f\|_{L^{\infty,m+(2+\gamma)_+}([t/2,t]\times\R^6)}\right)^\frac{\mu\alpha}{2+\mu\alpha}, \\& \|\vv^{m}f\|_{\logalphamu ([0,T_0]\times \R^6)} \lesssim 1+ \|f_{\text{in}}\|_{\logalpha ( \R^6)}. \end{split} \end{equation} As in \Cref{l.w02102}, the implied constant depends on $\|f\|_{L^{\infty,k}([0,T_0]\times\R^6)}$. The final time $T_0$ depends only on $\alpha$, $\theta$, $k$, $\|f_0\|_{L^{\infty,k}}$, and $\|f_0\|_{\logalpha}$. \end{Proposition} We observe that the restriction $T_0 \leq 1/2$ is a technical one. Indeed, one can iterate \Cref{p.w02091} starting at time $t=0$, $T_0$, $T_1$, $\dots$ to obtain the bound at some (potentially) large time. As we see in its proof, and as is already hinted at by the exponent of the first term in the left hand side of the main inequality in \Cref{p.w02091}, it may be that the weighted H\"older norm blows up at a finite time. We do not address this further here, as it was already handled at the conclusion of the proof of \Cref{t.Landau}. In \cite{HST2019rough}, the analogue to \Cref{p.w02091} was broken up into two separate steps~\cite[Proposition~4.4 and Lemma~4.6]{HST2019rough}, one for each of the two inequalities. Here, however, we must deduce the Hessian bound and the propagation of regularity simultaneously. We discuss this in further detail after stating the next lemma, which plays a key role in the proof of \Cref{p.w02091} The next lemma is an estimate on $D^2_v f$ in terms of the $\logalpha$-norm of $f$. This is obtained by rescaling the equation, applying the Schauder estimates (\Cref{t.Schauder}), and then interpolating between the resulting $C^{\frac{2+\alpha}{3}}_xC^{2+\alpha}_v$-estimate and the $\logalpha$-seminorm of $f$. We note that, in order to do this, it is crucial that our Schauder estimates do not require $t$-H\"older regularity of the coefficients. Indeed, the coefficient $\bar a^f$ is a $v$-convolution of $f$ and a kernel, and, hence, will have no more $t$-regularity than that of $f$. {\em A priori} we do not have any bounds on the $t$-H\"older regularity of $f$. One might attempt to obtain apply known estimates (e.g. the De Giorgi estimates \cite[Theorem 12]{GIMV}) to obtain a $t$-H\"older bound; however, these estimates will scale poorly in $t$, leading to a non-integrable bound in $t$. This is overcome in~\cite[Proposition~A.1]{HST2019rough} by a lemma showing that $f$ obtains $t$ H\"older continuity from $(x,v)$ H\"older regularity. This is clearly not useful in our setting as we do not yet have ``nice'' $v$ H\"older regularity of $f$. \begin{Lemma} \label{l.w02102} Under the assumptions of \Cref{p.w02091}, \begin{equation}\begin{split} \|D_{v}^{2}f\|_{L^{\infty,m-2} ([t_0/2,t_0]\times \R^6)} \lesssim \ &\frac{1}{t_0 \log(\frac{1}{t_0})^{\frac{\theta}{2}\frac{\alpha}{2+\alpha}}} (1+ \|\vv^{m}f\|_{\logalpha([t_0/4, t_0]\times \R^6)})^{1+\frac{2}{\alpha}} \\& + \frac{t_0^{\alpha/2}}{\left(\log\frac{1}{t_0}\right)^{\frac{\alpha}{2(2+\alpha)}\frac{\theta}{2}}} \|D_v^2f\|_{L^{\infty,m-2}([t_0/4,t_0]\times \R^6)}^{\alpha/2} , \end{split} \end{equation} for any $t_0 \in (0,\min\{1/2,T\}]$. The implied constant in the above estimate depends additionally on $\|f\|_{L^{\infty, k }([0,T]\times \R^6)}$. \end{Lemma} Again, we note that the fact that $t_0$ is restricted to be less than $1/2$ is only so that the $\log$ in the denominator does not take the value zero. We now briefly comment that the necessity of proving both the Hessian bound and the propagation of regularity simultaneously is related to the fact that in \Cref{l.w02102}, one obtains both the $\logalpha$-norm and a $W^{2,\infty}_v$-norm on the right hand side. Hence, a dynamic argument is required in order to ``absorb'' the $W^{2,\infty}_v$-norm. The reason that both terms appear in our setting (in contrast to the work in~\cite{HST2019rough}) is that we cannot bound the $\calpha$-norm of $\bar c^f$, which is required for the Schauder estimates, by the $\logalpha$-norm of $f$. The proof of \Cref{l.w02102} is contained in \Cref{s.schauder_scaling}. \subsection{The Hessian bound and propagation of regularity: the proof of \Cref{p.w02091}} In this section, we prove the main estimate. Before that, we need to recast our notion of regularity. For any point $(t,x,v,\chi, \nu)\in \R_{+}\times \R^6 \times B_{1/2}(0)^2$ and any real number $m>0$, we define \begin{equation}\label{e.w02091} \begin{split} &\tau f(t,x,v,\chi, \nu) := f(t,x+\chi, v+\nu), \quad \delta f(t,x,v,\chi, \nu) := f(t,x+\chi, v+\nu)-f(t,x,v), \\& \text{and }\quad g(t,x,v,\chi, \nu) := \frac{|\delta f(t,x,v,\chi, \nu)|^2}{(|\chi|^2 + |\log|\nu||^{-2\theta/\alpha})^{\mu \alpha}}\vv^{2m}. \end{split} \end{equation} Then we have the following obvious equivalence between bounds on $g$ and the regularity of $f$. We omit the proof. \begin{Lemma} \label{l.w02101} We have \begin{equation}\notag \begin{split} \|g\|_{L_{x,v}^{\infty}} + \|\vv^{m}f\|_{L^{\infty}(\R^6)}^2 &\approx \|\vv^{m}f\|_{\logalphamu(\R^6)}^2 \\&\approx \sup_{(x_0,v_0)} \vvo^{m}\|f\|_{\logalphamu(B_{1/2}(x_0,v_0))}^2 , \end{split} \end{equation} where the implied constant depend only on $m$, $\theta$, and $\alpha$. \end{Lemma} With \Cref{l.w02101} in hand, we are now able to prove our main estimate \Cref{p.w02091} using the strategy of~\cite[Proposition~4.4]{HST2019rough}. When the details are the same as in \cite[Proposition~4.4]{HST2019rough} we note this and omit them. \begin{proof}[Proof of \Cref{p.w02091}] Before beginning we note two things. The first is that, since we are proving a statement regarding a solution constructed in \cite{HST2019rough}, we may assume without loss of generality that $f$ is smooth. Indeed, in~\cite{HST2019rough}, the solution $f$ is approximated by smooth solutions of~\eqref{e.Landau}. Were we to prove the claim for the approximating solution, it holds for $f$ in the limit. Next, we note that $f \in L^{\infty,k}$, by assumption. Hence, we ignore this norm throughout and absorb all instances of it into the $\lesssim$ notation. As the proof is somewhat complicated, we break it up into a number of steps. \smallskip \noindent {\bf Step 1: an equation for $g$ and straightforward estimates.} Using~\eqref{e.Landau}, we find \begin{equation}\label{e.c51501} \begin{split} &\partial_{t}g + v\cdot \nabla_{x}g +\nu\cdot \nabla_{\chi}g + \frac{2\alpha \mu \chi \cdot \nu}{|\chi|^2 + |\log|\nu||^{-2\theta/\alpha}} g \\&\qquad = 2\frac{\tr(\bar a^{\delta f}D_{v}^{2}\tau f+\bar a^{ f}D_{v}^{2}\delta f) +\bar c^{\delta f}\tau f +\bar c^{f}\delta f}{(|\chi|^2 + |\log|\nu||^{-2\theta/\alpha})^{\mu\alpha}}\delta f \vv^{2m}. \end{split} \end{equation} Three terms are estimated exactly\footnote{This corresponds to the estimates of $J_1$, $J_4$, and $J_5$ in \cite[Proposition~4.4]{HST2019rough}.} as in \cite[Proposition 4.4]{HST2019rough}: \be\label{e.c51502} - \frac{2\alpha \mu \chi \cdot \nu}{|\chi|^2 + |\log|\nu||^{-2\theta/\alpha}} g +\frac{\bar c^{\delta f}\tau f +\bar c^{f}\delta f}{(|\chi|^2 + |\log|\nu||^{-2\theta/\alpha})^{\mu\alpha}}\delta f \vv^{2m} \lesssim g + \sqrt{ g \|g(t)\|_{L^\infty(\R^6\times B_1^2)}}. \ee Here we used~\eqref{e.bar_c}, the condition that $m > 5+\gamma$, and that the $L^{\infty,k}$-norm of $f$ bounds $\vv^m \tau f$. Additionally, arguing as in \cite[Proposition 4.4]{HST2019rough}, one sees \be\notag \frac{|\bar a^{\delta f}|}{(|\chi|^2 + |\log|\nu||^{-2\theta/\alpha})^{\mu\alpha/2}} \lesssim \vv^{(2+\gamma)_+} \sqrt{\|g(t)\|_{L^\infty(\R^6\times B_1^2)}}. \ee The argument for this uses the definition of $g$ in terms of $\delta f$ and~\eqref{e.bar_a_above}. Hence, we have \be\label{e.c52405} \begin{split} \partial_t g + v\cdot\nabla_x g + &\nu\cdot \nabla_\chi g - 2\frac{\tr(\bar a^{ f}D_{v}^{2}\delta f)}{(|\chi|^2 + |\log|\nu||^{-2\theta/\alpha})^{\mu\alpha}}\delta f \vv^{2m} \\& \lesssim g + \left(1 + \|D_{v}^{2}\tau f(t)\|_{L^{\infty,m+(2+\gamma)_+}(\R^6)}\right) \sqrt{g\|g(t)\|_{L^\infty(\R^6 \times B_1^2)}}. \end{split} \ee This concludes the first step. To briefly comment on how we proceed from here, note that, roughly the terms on the left hand side should have a good sign at a maximum of $g$ (if we think of $\delta f$ as, approximately $\sqrt g$). On the other hand, the pure $g$ term on the right hand side lend itself to the construction of a barrier. The most complicated term is the Hessian term in $\tau f$. For this, we use \Cref{l.w02102} and the fact that the Hessian term on the left has a small parameter in front (when $t_0 \ll 1$), which, through a somewhat complicated process, allows us to to absorb this into Hessian in the left hand side of \Cref{l.w02102}. \smallskip \noindent {\bf Step 2: an upper barrier.} With $N > 1$ to be chosen later and fixing any $0< \mu' < \mu$ such that \be \frac{\mu' \theta}{2} \frac{\mu' \alpha}{2 + \mu'\alpha} > 1, \ee define $\bar G$ to be the unique solution to \begin{equation}\label{e.w02281} \begin{cases} \frac{d}{dt} \bar G(t) = \frac{N^2}{t \left(\log\frac{1}{t}\right)^{\frac{\mu\theta}{2}\frac{\mu\alpha}{2+\mu\alpha}}} (1 + \bar G)^{\frac{1}{2} + \frac{1}{\mu' \alpha}} \bar G, \\ \bar G(0)=\|g(0)\|_{L^\infty} + N\|f\|_{L^{\infty, m}}^2+1 . \end{cases} \end{equation} Let $T_1$ be the largest time in $[0,1/2]$ that $\bar G(T_1) \leq 2 \bar G(0)$. Let \be T_0 = \min\{1, T_1, T_2\} \ee for $T_2$ to be chosen in the sequel. Clearly $T_1$ depends on $N$, but $N$ will be chosen to depend only on $\mu$, $\alpha$, $\theta$, $m$, and $k$. We note that \be\label{e.c52601} \bar G(t) \geq 1 \qquad\text{ for all } t\in [0,T_0]. \ee We define the auxiliary function \be G_2(t) = t \left(\log \frac{1}{t}\right)^{\frac{\mu'\theta}{2}\frac{\mu'\alpha}{2 + \mu'\alpha}} \|D^2_v f\|_{L^{\infty,m+(2+\gamma)_+}([t/2,t]\times\R^6)}, \ee and then let \be G(t, x, v, \chi, \nu) = \max\left\{g(t,x,v,\chi,\nu), \Big(\frac{1}{N} G_2(t)\Big)^\frac{2\mu' \alpha}{2 + \mu'\alpha}\right\}. \ee Our goal is to show that, for $t\in [0,T_0]$, \begin{equation} G(t,x,v,\chi,\nu) < \bar G(t). \end{equation} This is true at $t=0$ by construction (recall that, without loss of generality, our $f$ is smooth, so that $G_2(0) = 0$). Hence, we may define \be\label{e.c52301} t_0 = \sup\{\bar t \in[0,T_0]: \|G(s)\|_{L^\infty(\R^6)} < \bar G(s) \quad\text{for all } s \in [0,\bar t]\}. \ee If $t_0 = T_0$, we are finished. Hence, we argue by contradiction, assuming that \be\label{e.c51505} t_0 < T_0. \ee \smallskip \noindent{\bf Step 3: The case where $g$ is not the dominant term in $G$.} Clearly $\|G(t_0)\|_{L^\infty(\R^6 \times B_1^2)} = \bar G(t_0)$. Consider the case where \be\label{e.c52402} \|g(t_0)\|_{L^\infty(\R^6 \times B_1^2)} < \Big(\frac{1}{N} G_2(t_0)\Big)^\frac{2\mu' \alpha}{2 + \mu'\alpha} \quad\text{ so that } \qquad \Big(\frac{1}{N} G_2(t_0)\Big)^\frac{2\mu' \alpha}{2 + \mu'\alpha} = \bar G(t_0). \ee Then, using \Cref{l.w02102} and that \be t_0 \left(\log \frac{1}{t_0}\right)^{\frac{\mu'\theta}{2}\frac{\mu'\alpha}{2 + \mu'\alpha}} \|D^2_v f\|_{L^\infty([t_0/4,t_0]\times \R^6)} \lesssim G_2(t_0/2) + G_2(t_0), \ee we find \be \begin{split} G_2(t_0) \lesssim\ &\left(1 + \|\vv^{m+2+(2+\gamma)_+} f\|_{\logalphamup([t_0/4,t_0]\times \R^6)}\right)^{1 + \frac{2}{\mu'\alpha}} \\& \quad + t_0^\frac{\mu' \alpha}{2} \left(G_2(t_0/2) + G_2(t_0)\right)^\frac{\mu'\alpha}{2}. \end{split} \ee Using the interpolation lemma (\Cref{l.weight_interpolation}), \Cref{l.w02101}, and increasing $k$ if necessary, we find \be\label{e.c52401} \begin{split} G_2(t_0) \lesssim\ &\left(1 + \|g\|^{1/2}_{L^\infty([t_0/4,t_0]\times \R^6 \times B_1^2)}\right)^{1 + \frac{2}{\mu'\alpha}} + t_0^\frac{\mu' \alpha}{2}\left(G_2(t_0/2)^\frac{\mu'\alpha}{2} + G_2(t_0)^\frac{\mu'\alpha}{2}\right). \end{split} \ee We recall that we are not tracking the $L^{\infty,k}$-norm of $f$ as it is bounded by assumption. We also note that it is in this step that we used that $\mu' < \mu$. By the choice of $t_0$ and the fact that $\bar G$ is increasing, we see that \be G_2(t_0/2) \leq N\bar G(t_0/2)^\frac{2+\mu'\alpha}{2\mu'\alpha} \leq N \bar G(t_0)^\frac{2+\mu'\alpha}{2\mu'\alpha} = G_2(t_0). \ee Also, by the definition of $t_0$ and~\eqref{e.c52402}, \be \|g\|_{L^\infty([t_0/4,t_0]\times \R^6 \times B_1^2)} \leq \sup_{t\in[t_0/4,t_0]} \bar G(t) = \bar G(t_0) = \Big(\frac{1}{N} G_2(t_0)\Big)^\frac{2\mu'\alpha}{2 + \mu'\alpha}. \ee Then~\eqref{e.c52401} becomes: \be \begin{split} G_2(t_0) &\lesssim \left(1 + \Big(\frac{1}{N} G_2(t_0)\Big)^\frac{\mu' \alpha}{2+\mu'\alpha}\right)^\frac{2 + \mu'\alpha}{\mu'\alpha} + t_0^\frac{\mu' \alpha}{2} G_2(t_0)^\frac{\mu'\alpha}{2} \lesssim 1 + \frac{1}{N} G_2(t_0) + t_0^\frac{\mu' \alpha}{2} G_2(t_0)^\frac{\mu'\alpha}{2}. \end{split} \ee Since $G_2(t_0) = N \bar G(t_0)^\frac{2+\mu'\alpha}{2\mu'\alpha} >1$, we have that \be G_2(t_0) \lesssim 1+ \frac{1}{N} G_2(t_0) + t_0^\frac{\mu' \alpha}{2}G_2(t_0). \ee After increasing $N$ and decreasing $T_2$, we may absorb the last two terms on the right into the left hand side. After this and recalling~\eqref{e.c52402}, we find \be (N \bar G(t_0))^\frac{2+\mu'\alpha}{\mu'\alpha} = G_2(t_0) \lesssim 1. \ee After further increasing $N$ and recalling~\eqref{e.c52601}, this is clearly a contradiction. It follows that~\eqref{e.c52402} cannot hold. We conclude that \be\label{e.c52403} \|g(t_0)\|_{L^\infty(\R^6\times B_1^2)} = \bar G(t_0). \ee An important consequence of this is that, for all $t \leq t_0$, \be\label{e.c52404} \|D^2_v f\|_{L^{\infty,m+(2+\gamma)_+}([t/2,t]\times\R^6)} \leq \frac{N}{t \left(\log \frac{1}{t}\right)^{\frac{\mu'\theta}{2}\frac{\mu'\alpha}{2+\mu'\alpha}}} \|g(t)\|_{L^\infty(\R^6 \times B_1^2)}^{\frac{1}{2} + \frac{1}{\mu' \alpha}}. \ee \smallskip \noindent{\bf Step 4: The bad Hessian term in~\eqref{e.c52405} and an interpolation.} We now use~\eqref{e.c52404} in~\eqref{e.c52405} to bound the norm of the Hessian that arises there. We require one additional fact. By the choice of $t_0$ and by~\eqref{e.c52403}, we have \be\label{e.c52406} \|g\|_{L^\infty([0,t_0]\times \R^6\times B_1^2)} = \|g(t_0)\|_{L^\infty(\times \R^6\times B_1^2)}. \ee Thus, at $(t_0,x_0,v_0,\chi_0,\nu_0)$, the combination of~\eqref{e.c52404} and~\eqref{e.c52406} in~\eqref{e.c52405} yields \be\label{e.c52407} \begin{split} \partial_t g + &v\cdot\nabla_x g + \nu\cdot \nabla_\chi g - 2\frac{\tr(\bar a^{ f}D_{v}^{2}\delta f)}{(|\chi|^2 + |\log|\nu||^{-2\theta/\alpha})^{\mu\alpha}}\delta f \vv^{2m} \\& \lesssim g + \frac{N}{t_0 \left(\log \frac{1}{t_0}\right)^{\frac{\mu'\theta}{2}\frac{\mu'\alpha}{2+\mu'\alpha}}} g^{\frac{3}{2} + \frac{1}{\mu'\alpha}} \lesssim \frac{N}{t_0 \left(\log \frac{1}{t_0}\right)^{\frac{\mu'\theta}{2}\frac{\mu'\alpha}{2+\mu'\alpha}}}(1 + g^{\frac{1}{2} + \frac{1}{\mu'\alpha}})g. \end{split} \ee \smallskip \noindent{\bf Step 5: finding a touching point.} Using~\eqref{e.c52403} and arguing exactly as in the proof of \cite[Proposition~4.4]{HST2019rough}, we may assume without loss of generality that there exists $(x_0,v_0,\chi_0,\nu_0) \in \R^6 \times \bar B_1 (0)^2$ such that \be\label{e.c51203} g(t_0, x_0,v_0,\chi_0,\nu_0) =\bar G(t_0). \ee We omit the argument. \smallskip \noindent{\bf Step 6: the touching point must be in $B_1(0)^2$.} If $\chi_0$ or $\nu_0$ were on the boundary, that is, either $\chi_0 \in \partial B_1 (0)$ or $\nu_0 \in \partial B_1(0)$, we deduce from the definition of $g$ that \begin{equation}\label{e.c51202} \begin{split} g(t_0, x_0, v_0,\chi_0,\nu_0)&\lesssim |\delta f(t_0, x_0, v_0,\chi_0,\nu_0)|^2 \langle v_0 \rangle^{2m} \\&\lesssim (f(t_0, x_0+\chi_0,v_0 +\nu_0)^2 + f(t_0, x_0, v_0)^2)\langle v_0 \rangle^{2m} \lesssim \|f\|^{2}_{L^{\infty, m}}. \end{split} \end{equation} In particular, this implies that, up to enlarging $N$ large enough depending only on the implied constant in~\eqref{e.c51202}, \begin{equation}\notag \begin{split} g(t_0, x_0, v_0,\chi_0,\nu_0)\leq N\|f\|^{2}_{L^{\infty, m}} . \end{split} \end{equation} We see from \eqref{e.w02281} that $\bar G$ increases with time $t$. Thus, \begin{equation}\notag \bar G(t_0) \geq \bar G(0) >N\|f\|^{2}_{L^{\infty, m}} , \end{equation} which contradicts~\eqref{e.c51203}. \smallskip \noindent {\bf Step 7: estimating the remaining term in~\eqref{e.c52407}.} We begin by expanding the last term on the left hand side of~\eqref{e.c52407} at the point $(t_0,x_0,v_0,\chi_0,\nu_0)$. This is a simple multivariable calculus computation that is exactly as in \cite[Proposition~4.4]{HST2019rough}, so we omit it and simply state that: \be\notag \frac{\tr(\bar a^f D^2_v \delta f)}{(|\chi|^2 + |\log|\nu||^{-2\theta/\alpha})^{\mu\alpha}} \delta f \vv^{2m} = \tr(\bar a^f D^2_v g) + \frac{2 m g}{\vvo^4} \left((m+2)v_0 \cdot \bar a^f v_0 - \vvo^2\tr \bar a^f\right). \ee This argument occurs at and below (4.7) in \cite{HST2019rough}. Since $g$ is at a maximum, we further obtain \be\notag \frac{\tr(\bar a^f D^2_v \delta f)}{(|\chi|^2 + |\log|\nu||^{-2\theta/\alpha})^{\mu\alpha}} \delta f \vv^{2m} \leq \frac{2 m g}{\vvo^4} \left((m+2)v_0 \cdot \bar a^f v_0 - \vvo^2\tr \bar a^f\right). \ee Hence, arguing as in \cite[Proposition~4.4]{HST2019rough} to bound the terms on the right hand side above, we find\footnote{This is the estimate of $J_3$ in \cite{HST2019rough}. It is somewhat obvious from~\eqref{e.bar_a_above}.} \be\notag \frac{\tr(\bar a^f D^2_v \delta f)}{(|\chi|^2 + |\log|\nu||^{-2\theta/\alpha})^{\mu\alpha}} \delta f \vv^{2m} \lesssim g. \ee Combining the above with~\eqref{e.c52407}, we have, at $(t_0, x_0,v_0, \chi_0,\nu_0)$, \be\label{e.c51504} \partial_t g + v\cdot\nabla_x + \nu \nabla_\chi g \lesssim \frac{N}{t \left(\log \frac{1}{t}\right)^{\frac{\mu'\theta}{2}\frac{\mu'\alpha}{2+\mu'\alpha}}} \left(1 + g\right)^{\frac{1}{2} + \frac{1}{\mu' \alpha}} g. \ee \smallskip \noindent {\bf Step 8: concluding the proof.} By the construction of $(t_0,x_0,v_0,\chi_0,\nu_0)$, it is a minimum of $\bar G - g$ on $[0,t_0]\times \R^6 \times B_1^2$. Hence, \be\notag \partial_t (\bar G - g) + v\cdot\nabla_x (\bar G - g) + \nu \cdot \nabla_\chi(\bar G - g) \leq 0. \ee Using~\eqref{e.w02281} and~\eqref{e.c51504} and recalling that $\bar G(t_0) = g(t_0,x_0,v_0,\chi_0,\nu_0)$, this implies that, at $(t_0,x_0,v_0,\chi_0,\nu_0)$, \be\notag \begin{split} \frac{N^2}{t_0 \left(\log \frac{1}{t_0}\right)^{\frac{\mu' \theta}{2} \frac{\mu' \alpha}{2 + \mu'\alpha}}} \left(1 + \bar G\right)^{\frac{1}{2} + \frac{1}{\mu'\alpha}} \bar G &\lesssim \frac{N}{t \left(\log \frac{1}{t}\right)^{\frac{\mu'\theta}{2}\frac{\mu'\alpha}{2+\mu'\alpha}}} \left(1 + g\right)^{\frac{1}{2} + \frac{1}{\mu' \alpha}} g \\& = \frac{N}{t \left(\log \frac{1}{t}\right)^{\frac{\mu'\theta}{2}\frac{\mu'\alpha}{2+\mu'\alpha}}} \left(1 + \bar G\right)^{\frac{1}{2} + \frac{1}{\mu' \alpha}} \bar G. \end{split} \ee This is a contradiction if $N$ is sufficiently large. Hence, it must be that~\eqref{e.c51505} does not hold, implying that \be\notag \sup_{(x,v,\chi,\nu)\in \R^6\times B_1^2} g(t,x,v,\chi,\nu) \leq \bar G(t) \qquad\text{ for all } t\in [0,T_0] \ee by definition of $t_0$. Recalling \Cref{l.w02101}, this concludes the proof of the bound of $\|\vv^m f\|_{\calphamu([0,T_0]\times \R^6)}$. The proof of the bound on the Hessian term in \Cref{p.w02091} follows from~\eqref{e.c52404} and the arbitrariness of $\mu$ and $\mu'$. \end{proof} \subsection{Scaling the Schauder estimates: proof of \Cref{l.w02102}}\label{s.schauder_scaling} Due to the degeneracy of the ellipticity constants of $\bar a^f$ as $|v|\rightarrow \infty$ and the fact that $Q_1(t_0,x_0,v_0)$ may involve negative times, we must change of variables. We begin by defining this change of variables. It is the one used in~\cite{Cameron-Silvestre-Snelson,HS,HST2019rough}. Fix $z_0 \in \R_{+}\times \R^{6}$. Let $S$ be the linear transformation such that \begin{equation}\label{e.w05201} Se = \begin{cases} \langle v_{0}\rangle^{1+\gamma/2}e,& e\cdot v_{0}=0\\ \langle v_{0}\rangle^{\gamma/2}e,& e\cdot v_{0}=|v_0|, \end{cases} \end{equation} and let \begin{equation}\label{e.w05202} r_0 =\langle v_{0}\rangle^{-(1+\gamma/2)_{+}}\min(1,\sqrt{t_0/2}) . \end{equation} Then we have the rescaled function \be\label{e.f_z_0} f_{z_0}(z) := f(r_0^2 t + t_0, r_0^3 Sx + x_0 + r_0^2 t v_0, r_0 S v + v_0), \ee which satisfies the rescaled equation \be\notag (\partial_t + v\cdot\nabla_x) f_{z_0} = \tr(\bar A D^2_v f_{z_0}) + \bar C f_{z_0} \ee with coefficients \be\notag \begin{split} &\bar A(z) = S^{-1} \bar a^f(r_0^2 t + t_0, r_0^3 S x + x_0+ r_0^2 t v_0, r_0 S v + v_0) S^{-1} \quad\text{ and} \\& \bar C(z) = r_0^2 \bar c^f(r_0^2 t + t_0, r_0^3 S x + x_0+ r_0^2 t v_0, r_0 S v + v_0). \end{split} \ee Roughly, the input of $f$ in the definition of $f_{z_0}$ can be written as $z_0 \circ (Sz)_{r_0}$ where $z_r = (r^2 t, r^3 x, r v)$ is the kinetic scaling by a factor $r$ and \be z' \circ z = (t' + t, x' + x + t v', v' + v) \ee is the related to the Galilean Lie group structure associated to $\partial_t + v\cdot\nabla_x$. For simplicity, we opt not to use this further, although it is common in the literature. It is immediate from~\eqref{e.bar_a_above},~\eqref{e.bar_a_below}, and \cite[Proposition~3.1]{HS} that \be\label{e.c51204bis} \bar A \approx \Id \qquad\text{ on } Q_1, \ee and, by an easy computation (see~\cite[eqn~(2.15)]{HST2019rough}), \be\label{e.c51204bisbis} \bar C(z) \lesssim \vvo^{-2} \min\{1, t_0\} \|f\|_{L^{\infty,m}} \ee for any $m > 3$. Additionally, one can observe that \be\label{e.c51413} \|f_{z_0}\|_{L^\infty(Q_1)} \lesssim \vvo^{-k} \|f\|_{L^{\infty,k}}. \ee We omit the proof of the above inequalities as they are straightforward and already contained in \cite{HS, HST2019rough}. We note that the coefficients have the following regularity: \begin{Lemma} \label{l.w01161} For $m, k> 5 + \gamma$ and $\alpha \in (0,1)$, we have \begin{equation}\label{e.w01241} [\bar A]_{\calpha(Q_{3/4})} \lesssim t_0^\frac{\alpha}{2} \left(\vvo^{2-\alpha} \|f\|_{L^{\infty,k}([t_0/4,t_0]\times \R^6)} + \vvo^2 \|\vv^m f\|_{C_x^{\alpha/3}([t_0/4,t_0]\times \R^6)}\right) \end{equation} and \be\notag [\bar C]_{\calpha(Q_{3/4})} \lesssim t_0^{1+\frac{\alpha}{2}} \vvo^\gamma \|\vv^m f\|_{\calpha([t_0/4,t_0]\times \R^6)}. \ee \end{Lemma} We note that \Cref{l.w01161} is stronger than its analogue \cite[Lemma~2.7]{HST2019rough} as we leverage the convolutional nature of $\bar a^f$ to obtain additional regularity in $v$ even when $f$ lacks regularity in $v$. Additionally, the fact that we do not require $t$-regularity allows us to avoid the slight loss of regularity seen in \cite[Lemma~2.7]{HST2019rough}. On the other hand, we note that we make no effort to optimize the $v_0$-weights in \Cref{l.w01161}. We prove \Cref{l.w01161} in \Cref{s.technical}. Moreover, we immediately see that the regularity of $f_{z_0}$ and $f$ are related by: \be\label{e.c51417} \|f\|_{\calpha(Q_{r_0/2}(z_0))} \lesssim \min\{1, t_0\}^{-\alpha/2}\vvo^{\alpha( (1 + \gamma/2)_+ - \gamma/2)} \|f_{z_0}\|_{\calpha(Q_{1/2})}. \ee Analogous statements hold for higher regularity seminorms of $f$ as well. Here, we are introducing the additional notation that \be\label{e.c52410} Q_r(z_0) = \{(t,x,v) : t_0 - r^2< t\leq t_0, |x-x_0 - (t-t_0)v_0| < r^3, |v-v_0| < r\}. \ee Finally, before proving \Cref{l.w02102}, we state two final technical results related to scaling: \begin{Lemma}[$\log$-H\"older interpolation inequality]\label{l.w01081} Fix any $u : \R^d \to \R$ and $r > 0$. For $\alpha \in (0,1)$, $\theta>0$, and any $\eps\in(0,r)$, \begin{equation}\notag \begin{split} \|D_{v}^{2}u\|_{L^{\infty}(Q_r)} \lesssim \frac{\log(\eps)^{\theta}}{\eps^2}[u]_{\log(\frac{1}{C})^{-\theta}(Q_r)} + \eps^\alpha [u]_{C^{2,\alpha}(Q_r)} . \end{split} \end{equation} The implied constant depends only on $\theta$ and $\alpha$. \end{Lemma} \begin{Lemma}\label{l.logH_scaling} We have, for $t_0 < 1/2$, \be\label{e.c022501} [f_{z_0}]_{\log(\frac{1}{C})^{-\theta/2}(Q_1)} \lesssim [f]_{\log(\frac{1}{C})^{-\theta}(Q_{t_0/2}(z_0))} \log\Big(\frac{1}{t_0}\Big)^{-\theta/2}. \ee \end{Lemma} The proofs of these two lemmas are also postponed to \Cref{s.technical}. We now prove the lemma on the scaling of the Schauder estimates. \begin{proof}[Proof of \Cref{l.w02102}] Throughout the proof we assume that \be\notag [\vv^m f]_{\logalpha([t_0/4,t_0]\times \R^6)} < \infty. \ee If this were not true, then the claim in \Cref{l.w02102} follows immediately. Fix $\eps\in(0,1/2)$ to be determined. Applying our $\log$-H\"older interpolation lemma (\Cref{l.w01081}), we see \begin{equation}\label{e.c51410} \|D_{v}^{2}f_{z_0}\|_{L^\infty (Q_{1/2})} \lesssim \frac{\log(1/\eps)^{-\theta/2}}{\eps^2}[f_{z_0}]_{\log(\frac{1}{C_v})^{-\theta/2}(Q_{1/2})} + \eps^{\alpha}[D^2_v f_{z_0}]_{\calpha(Q_{1/2})}. \end{equation} Clearly the first term in~\eqref{e.c51410} can be bounded by simply removing the scaling. Indeed, applying \Cref{l.logH_scaling}, we find \be\label{e.c51411} [f_{z_0}]_{\log(\frac{1}{C})^{-\theta/2}} \lesssim \left( \log \frac{1}{t_0}\right)^{-\theta/2} [f]_{\logalpha(Q_{t_0/2}(z_0))}. \ee For the second term in~\eqref{e.c51410}, we require our Schauder estimates \Cref{t.Schauder}. Applying this yields \be\notag [D^2_v f_{z_0}]_{\calpha(Q_{1/2})} \lesssim \left( 1 + [\bar C]_{\calpha(Q_{3/4})} + [\bar A]_{\calpha(Q_{3/4})}^{1 + \frac{2}{\alpha}} \right) \|f_{z_0}\|_{L^\infty(Q_{3/4})}. \ee We note that the statement of \Cref{t.Schauder} involves a cylinder $Q_1$ on the right hand side instead of $Q_{3/4}$; however, it is a simple scaling argument to obtain the above, so we omit the details. We use this cylinder in order to obtain an estimate below insulated from $t=0$ by $t_0/4$. Using~\eqref{e.c51413} and \Cref{l.w01161}, we obtain \be\notag \begin{split} &[D^2_v f_{z_0}]_{\calpha(Q_1)} \lesssim \vvo^{-k} \bigg( 1 + t_0^{1+\frac{\alpha}{2}} \vvo^{\gamma} \|\vv^m f\|_{C^\alpha_v([t_0/4,t_0])} + \\& \phantom{MMMMMMMMM} \left(t_0^\frac{\alpha}{2} \vvo^{2-\alpha} \|f\|_{L^{\infty,k}([t_0/4,t_0])} + t_0^\frac{\alpha}{2} \vvo^2 \|\vv^m f\|_{C^{\alpha/3}_x([t_0/4,t_0])} \right)^{1 + \frac2{\alpha}}\bigg) \|f\|_{L^{\infty,k}}. \end{split} \ee We recall, by assumption, $\|f\|_{L^{\infty,k}}$ is finite. This is inherited from \cite[Theorem~1.2]{HST2019rough}. Hence, \be\label{e.c51416} \begin{split} [D^2_v f_{z_0}]_{\calpha(Q_1)} \lesssim\ &\vvo^{-k + \gamma} t_0^{1 + \frac{\alpha}{2}} [\vv^m f]_{C^\alpha_v([t_0/4,t_0])} \\& + \vvo^{-k + 2 + \alpha} \left( 1 + \|\vv^m f\|_{C^{\alpha/3}_x([t_0/4,t_0])} \right)^{1 + \frac2{\alpha}}. \end{split} \ee Using~\eqref{e.c51411} and~\eqref{e.c51416} in~\eqref{e.c51410}, we find \be\notag \begin{split} \|D^2_v f_{z_0}\|_{L^\infty(Q_{1/2})} \lesssim &\frac{\log(1/\eps)^{-\theta/2}}{\eps^2} \left(\log \frac{1}{t_0}\right)^{-\frac{\theta}{2}} [f]_{\logalpha(Q_{t_0/4}(z_0))} \\& + \eps^{\alpha} \vvo^{-k + 2 + \alpha} \left( 1 + \|\vv^m f\|_{C^{\alpha/3}_x([t_0/4,t_0])} \right)^{1 + \frac2{\alpha}} \\& + \eps^\alpha \vvo^{-k + \gamma} t_0^{1 + \frac{\alpha}{2}} [\vv^m f]_{C^\alpha_v([t_0/4,t_0])}. \end{split} \ee Undoing the change of variables (similar to~\eqref{e.c51417}) and combining terms yields \be\label{e.c51418} \begin{split} \frac{t_0}{\vvo^2} \|D^2_v f\|_{L^\infty(Q_{t_0/2}(z_0))} \lesssim\ &\frac{\log(1/\eps)^{-\theta/2}}{\eps^2} \left(\log \frac{1}{t_0}\right)^{-\frac{\theta}{2}} [f]_{\logalpha(Q_{t_0/4}(z_0))} \\& + \eps^{\alpha} \vvo^{-k + 2 + \alpha} \left( 1 + \|\vv^m f\|_{C^{\alpha/3}_x([t_0/4,t_0]\times \R^6)} \right)^{1 + \frac2{\alpha}} \\& + \eps^\alpha \vvo^{-k + \gamma} t_0^{1 + \frac{\alpha}{2}} [\vv^m f]_{C^\alpha_v([t_0/4,t_0])}. \end{split} \ee Next, we take \be\notag \eps = \min\left\{1/4, \log(1/t_0)^{-\frac{\theta}{2(2+ \alpha)}}\right\} \ee so that~\eqref{e.c51418} becomes \be\notag \begin{split} \frac{t_0}{\vvo^2} &\|D^2_v f\|_{L^\infty(Q_{t_0/2}(z_0))} \lesssim \left( \log \frac{1}{t_0}\right)^{-\frac{\alpha}{2(2 + \alpha)}\frac{\theta}{2}} [f]_{\logalpha(Q_{t_0/4}(z_0))} \\& + \left( \log \frac{1}{t_0}\right)^{-\frac{\alpha}{2(2 + \alpha)}\frac{\theta}{2}}\vvo^{-k + 2 + \alpha} \left( 1 + \|\vv^m f\|_{C^{\alpha/3}_x([t_0/4,t_0]\times \R^6)} \right)^{1 + \frac2{\alpha}} \\& + \left( \log \frac{1}{t_0}\right)^{-\frac{\alpha}{2(2 + \alpha)}\frac{\theta}{2}} \vvo^{-k + \gamma} t_0^{1 + \frac{\alpha}{2}} [\vv^m f]_{C^\alpha_v([t_0/4,t_0])}. \end{split} \ee Dividing by $t_0$, multiplying by $\vvo^m$, increasing $k$ if necessary, and taking the supremum over all choices of $(x_0,v_0)$, we find \be\notag \begin{split} \|D^2_v f\|_{L^{\infty,m-2}([t_0/2,t_0])} \lesssim\ &\frac{1}{t_0 \left( \log \frac{1}{t_0}\right)^{\frac{\alpha}{2(2 + \alpha)}\frac{\theta}{2}}} \left( 1 + \|\vv^m f\|_{\logalpha([t_0/4,t_0]\times \R^6)} \right)^{1 + \frac2{\alpha}} \\& + \left( \log \frac{1}{t_0}\right)^{-\frac{\alpha}{2(2 + \alpha)}\frac{\theta}{2}} t_0^\frac{\alpha}{2} [\vv^m f]_{C^\alpha_v([t_0/4,t_0])}. \end{split} \ee In order to remove the last term above, it suffices to apply \cite[Lemma~B.2]{HST2019rough} (which is analogous to \Cref{l.weight_interpolation} but stated for standard H\"older spaces) to obtain \be \left( \log \frac{1}{t_0}\right)^{-\frac{\alpha}{2(2 + \alpha)}\frac{\theta}{2}} t_0^\frac{\alpha}{2} [\vv^m f]_{C^\alpha_v([t_0/4,t_0])} \lesssim \left( \log \frac{1}{t_0}\right)^{-\frac{\alpha}{2(2 + \alpha)}\frac{\theta}{2}} t_0^\frac{\alpha}{2} \left(\| D^2_vf\|_{L^{\infty,m-2}([t_0/4,t_0])}^\frac{\alpha}{2} + 1\right) \ee We remind the reader that $\|f\|_{L^{\infty,k}} \lesssim 1$. This concludes the proof. \end{proof} \subsection{Proof of technical lemmas}\label{s.technical} We begin by establishing the H\"older regularity of the transformed coefficients $\bar A$ and $\bar C$. In order to make the notation more compact, we define, for any $z$, \be\notag \tilde z := z_0 \circ (Sz)_{r_0} = (r_0^2 t + t_0, r_0^3 S x + x_0 + r_0^2 t v_0, r_0 S v + v_0). \ee As $r_0$ and $z_0$ remain fixed in the following proof, there is no risk of confusion. \begin{proof}[Proof of \Cref{l.w01161}] We note that the proofs for $\bar C$ and $\bar A$ are essentially the same. Hence, we show only the proofs of $(x,v)$-regularity of $\bar C$ and omit the proof of $x$-regularity of $\bar A$. We include the proof of $v$-regularity of $\bar A$ in order to show how to extract $v$-regularity of $\bar A$ without using the $v$-regularity of $f$. The proof of $x$-regularity is essentially the same as in \cite[Lemma~2.7]{HST2019rough}. However, since our statement is a bit different (here there is no loss of regularity and we have less strict requirements on $m$), we provide the proof for completeness. We begin by fixing any $z, z' \in Q_1$ with $t = t'$ and $v = v'$. Then \begin{equation}\notag \begin{split} |\bar C(z)-\bar C(z')| &\lesssim \int |w|^{\gamma}|f(\tilde t, \tilde x, \tilde v-w)-f(\tilde t, \tilde x', \tilde v-w)|\,dw \\&\lesssim \int |w|^{\gamma} |r_0^3(Sx - Sx')|^{\alpha/3} \langle \tilde v - w\rangle^{-m} \|\vv^m f\|_{C^{\alpha/3}_x([t_0/2,t_0]\times \R^6)}\,dw. \end{split} \ee Recalling the definitions of $r_0$ and $S$ in~\eqref{e.w05201}-\eqref{e.w05201} and that $|v| \leq 1$, we notice that \be\label{e.c51409} \langle \tilde v - w\rangle^{-m} = \langle r_0 S v + v_0 - w\rangle^{-m} \lesssim \langle v_0 - w\rangle^{-m}. \ee Additionally, \be\notag |\tilde x - \tilde x'|^{\alpha/3} = |r_0^3 (Sx - Sx')|^{\alpha/3} \lesssim t_0^{\alpha/2} |x-x'|^{\alpha/3}. \ee Hence, \be\notag \begin{split} |\bar C(z) - \bar C(z')| &\lesssim t_0^{\alpha/2} |x-x'|^{\alpha/3} \int |w|^{\gamma} \langle \tilde v - w\rangle^{-m} \|\vv^m f\|_{C^{\alpha/3}_x([t_0/2,t_0]\times \R^6)}\,dw \\&\lesssim t_0^{\alpha/2} |x-x'|^{\alpha/3}\|\vv^m f\|_{C^{\alpha/3}_x([t_0/2,t_0]\times \R^6)} \langle \tilde v \rangle^{\gamma} \\&\lesssim t_0^{\alpha/2} |x-x'|^{\alpha/3}\|\vv^m f\|_{C^{\alpha/3}_x([t_0/2,t_0]\times \R^6)} \vvo^{\gamma}. \end{split} \ee In the last line, we used that $\langle \tilde v\rangle \approx \vvo$. This concludes the proof of $x$-regularity for $\bar C$. The proof of $v$-regularity is similar. Now we establish the $v$-regularity of $\bar A$. Let $z,z'\in Q_1$ with $t=t'$ and $x=x'$. Changing variables, we have \begin{equation}\notag \begin{split} \bar C(z)-\bar C(z') = c_\gamma \int (|w|^\gamma - |w+\tilde v' - \tilde v|^\gamma) f(\tilde t, \tilde x, \tilde v - w) \, dw. \end{split} \end{equation} Let $R = 2|\tilde v - \tilde v'|$ and decompose the integral into two parts: \be\notag |\bar C(z) - \bar C(z')| \lesssim \left(\int_{B_R} + \int_{B_R^c}\right)||w|^\gamma - |w+\tilde v' - \tilde v|^\gamma| f(\tilde t, \tilde x, \tilde v - w) \, dw = I_1 + I_2. \ee For $I_1$, notice that (recall the definitions of $r_0$ and $S$ in~\eqref{e.w05201}-\eqref{e.w05202} and that $|v|, |v'| \leq 1$) \be\label{e.c51406} \langle \tilde v - w\rangle^{-m} = \langle r_0 S v + v_0 - w\rangle^{-m} \lesssim \langle v_0 - w\rangle^{-m}. \ee On the domain of $I_1$, clearly $\langle v_0 - w\rangle \approx \vvo$. Hence, \be\label{e.c51407} \begin{split} I_1 &\lesssim \int_{B_R} (|w|^\gamma+|w + \tilde v' - \tilde v|^\gamma) \langle \tilde v - w\rangle^{-k} \|f\|_{L^{\infty,k}} \, dw \lesssim \vvo^{-k} \|f\|_{L^{\infty,k}} R^{3+\gamma} \\&\lesssim \vvo^{-k} \|f\|_{L^{\infty,k}} |\tilde v- \tilde v'|^{3+\gamma}. \end{split} \ee We now consider the final integral $I_2$. Using again~\eqref{e.c51406}, we find \be\label{e.c51408} \begin{split} I_2 &\lesssim \int_{B_R^c} |w|^{\gamma - \alpha} |\tilde v - \tilde v'|^\alpha \langle v_0 - w\rangle^{-k} \|f\|_{L^{\infty,k}} \, dw \\&\leq |\tilde v - \tilde v'|^\alpha \|f\|_{L^{\infty,k}} \int |w|^{\gamma - \alpha} \langle v_0 - w\rangle^{-k} \, dw \lesssim |\tilde v - \tilde v'|^\alpha \|f\|_{L^{\infty,k}} \vvo^{\gamma - \alpha}. \end{split} \ee In the third inequality we used that $\gamma - \alpha > -3$ so that the integral is finite. Combining~\eqref{e.c51407},~\eqref{e.c51408}, and recalling the definitions of $r_0$ and $S$ (see~\eqref{e.w05201}-\eqref{e.w05202}), we arrive at \be\notag |\bar C(z) - \bar C(z')| \lesssim t_0^{\alpha/2} \vvo^{\gamma - \alpha} \|f\|_{L^{\infty,k}} |v-v'|^{\alpha}, \ee which concludes the proof. \end{proof} We next prove the $\log$-H\"older interpolation lemma. \begin{proof}[Proof of \Cref{l.w01081}] We begin by obtaining a bound on $\|D u\|_{L^\infty(Q_r)}$. Let $v_0 \in Q_r$ be a point such that \be\label{e.c51401} \|D u\|_{L^\infty(Q_r)} \leq 2 |D u(v_0)|. \ee We claim that there is $\bar v$ so that \be\label{e.c51402} v_0 + \eps\bar v \in Q_r, \quad |\bar v| = 1, \quad\text{ and }\quad |\bar v \cdot Du(v_0)| \gtrsim |Du(v_0)|. \ee This is a basic (though somewhat complicated) plane geometry exercise that we postpone to the end of the proof. A Taylor expansion at $0$ yields, for some $\theta \in [0,1]$, \be\notag u(v_0 + \eps \bar v) - u(v_0) = \eps \bar v \cdot D u(v_0) + \frac{\eps^2}{2} \bar v \cdot D^2 u(v_0 + \theta \eps\bar v) \bar v. \ee Rearranging this, recalling~\eqref{e.c51401} and~\eqref{e.c51402}, and dividing by $\eps$, we arrive at \be\label{e.c51403} \begin{split} \|D u\|_{L^\infty(Q_r)} &\leq 2 |D u(v_0)| \lesssim \frac{|u(v_0 + \eps \bar v) - u(v_0)|}{\eps} + \eps |D^2 u(v_0+\theta \eps \bar v)| \\& \leq \frac{\log(1/\eps)^{-\theta}}{\eps} [u]_{\log(\frac{1}{C})^{-\theta}(Q_r)} + \eps \|D^2 u\|_{L^\infty(Q_r)}. \end{split} \ee With~\eqref{e.c51403} in hand, we now use interpolation to obtain a bound on $D^2_v u$. Indeed, using standard interpolation estimates (see, e.g., \cite[Proposition 2.10]{imbert2018schauder}), we have \be\notag \|D^2u\|_{L^\infty(Q_r)} \lesssim \left(\frac{\eps}{\delta}\right)^\alpha [D^2 u]_{C^\alpha(Q_r)} + \frac{\delta}{\eps} \|D u\|_{L^\infty(Q_r)}, \ee where $\delta>0$ is a parameter to be chosen. Combining this with~\eqref{e.c51403}, we find \be\notag \begin{split} \|D^2u\|_{L^\infty(Q_r)} \lesssim \left(\frac{\eps}{\delta}\right)^\alpha [D^2 u]_{C^\alpha(Q_r)} + \frac{\delta}{\eps} \left( \frac{\log(1/\eps)^{-\theta}}{\eps} [u]_{\log(\frac{1}{C})^{-\theta}(Q_r)} + \eps \|D^2 u\|_{L^\infty (Q_r)}\right). \end{split} \ee After choosing $\delta$ sufficiently small, depending only on the implied constant, we may absorb the $\|D^2 u\|_{L^\infty}$ term from the right hand side into the left hand side. This yields \be\notag \begin{split} \|D^2u\|_{L^\infty(Q_r)} \lesssim \eps^\alpha [D^2 u]_{C^\alpha(Q_r)} + \frac{\log(1/\eps)^{-\theta}}{\eps^2} [u]_{\log(\frac{1}{C})^{-\theta}(Q_r)}, \end{split} \ee which concludes the proof up to establishing~\eqref{e.c51402}. We now prove~\eqref{e.c51402}. At the expense of a multiplicative constant, we may assume that $\eps < r/10$. Without loss of generality, we may assume that \be\label{e.c51414} \frac{Du(v_0)}{|Du(v_0)|} \cdot v_0 \leq 0. \ee Were this not the case, we work with $- Du(v_0)/|Du(v_0)|$ instead. Then, we let \be\notag \bar v = \frac{1}{10} \frac{Du(v_0)}{|Du(v_0)|} - \mu v_0, \ee where $\mu$ is chosen so that $|\bar v| = 1$. Clearly, due to~\eqref{e.c51414}, \be\label{e.c51415} |v_0| \mu \in [9/10,1]. \ee Notice that \be\notag \bar v \cdot D u(v_0) = \frac{1}{10}|Du(v_0)| - \mu v_0 \cdot \frac{Du(v_0)}{|Du(v_0)|} \geq \frac{1}{10}|Du(v_0)|, \ee where the second inequality holds due to~\eqref{e.c51414}. Next, using~\eqref{e.c51414} again as well as the fact that $\eps < r/10$, \be\notag |v_0 + \eps \bar v| = \Big|(1 - \eps \mu)v_0 + \frac{\eps}{10} \frac{Du(v_0)}{|Du(v_0)|}\Big| \leq |1 - \eps \mu| |v_0| + \frac{\eps}{10}. \ee Consider the case when $\eps \mu \geq 1$, then, using~\eqref{e.c51415} \be\notag |v_0 + \eps \bar v| \leq \eps \mu |v_0| + \frac{\eps}{10} \leq \frac{11 \eps}{10} < r. \ee which implies that $v_0 + \eps \bar v \in Q_r$. Next consider the case when $\eps \mu < 1$. Then \be\notag |v_0 + \eps \bar v| \leq |v_0| - \frac{9 \eps}{10} + \frac{\eps}{10} < |v_0| < r, \ee which again implies that $v_0 + \eps \bar v \in Q_r$. Thus, we have established~\eqref{e.c51402}, which concludes the proof. \end{proof} We now prove the final technical lemma, \Cref{l.logH_scaling}, which involves the time scaling of the $\log$-H\"older norm of $f_{z_0}$, defined in~\eqref{e.f_z_0}. \begin{proof}[Proof of \Cref{l.logH_scaling}] Fix any $z\neq \tilde z \in Q_1$, with $t = \tilde t$, and notice that \be\notag (r_0^2 t + t_0, r_0^3 S x + x_0, r_0 S v + v_0), (r_0^2 \tilde t + t_0, r_0^3 S \tilde x + x_0, r_0 S \tilde v + v_0) \in Q_{t_0/2}(z_0). \ee Hence, \be\notag |f_{z_0}(z) - f_{z_0}(\tilde z)| \leq (r_0^\alpha |Sx - S\tilde x |^{\alpha/3} + \log(1/|r_0 (Sv-S\tilde v)|)^{-\theta}) [f]_{\logalpha(Q_{t_0/2}(z_0))}. \ee From the definition of $S$, it is clear that \be\notag r_0^3|S(x-\tilde x)| \lesssim t_0^{3/2} |x-\tilde x| \quad\text{ and }\quad r_0|S(v-\tilde v)| \leq \sqrt{t_0} |v-\tilde v|. \ee Hence, \be\label{e.c51405} \frac{|f_{z_0}(z) - f_{z_0}(\tilde z)|}{[f]_{\logalpha(Q_{t_0/2}(z_0))}} \lesssim t_0^{\alpha/2} |x - \tilde x|^{\alpha/3} + \left(\log\frac{1}{\sqrt{t_0}} + \log\frac{1}{|v-\tilde v|}\right)^{-\theta}. \ee Young's inequality yields \be\notag \left(\log\frac{1}{\sqrt{t_0}} + \log\frac{1}{|v-\tilde v|}\right)^{-\theta} \lesssim \left(\log\frac{1}{\sqrt{t_0}}\right)^{-\theta/2}\left(\log\frac{1}{|v-\tilde v|}\right)^{-\theta/2} \ee and, it is straightforward to see that \be\notag t_0^{\alpha/2} \lesssim \left(\log\frac{1}{t_0}\right)^{-\theta/2}. \ee Returning to~\eqref{e.c51405}, we find \be\notag \frac{|f_{z_0}(z) - f_{z_0}(\tilde z)|}{[f]_{\logalpha(Q_{t_0/2}(z_0))}} \lesssim \left(\log\frac{1}{t_0}\right)^{-\theta/2} \left( |x-\tilde x|^{\alpha/3} + \left(\log\frac{1}{|v-\tilde v|}\right)^{-\theta/2}\right), \ee which concludes the proof. \end{proof} \begin{appendix} \section{Computation of the fundamental solution~\eqref{e.gamma_a}.}\label{appendix} In this section, we establish the form of the fundamental solution $\Gamma_{\bar a}$ for the $(x,v)$-homogeneous kinetic Fokker-Planck equation; that is, we prove \Cref{p.gamma_a}. \begin{proof}[Proof of \Cref{p.gamma_a}] We first notice that it is enough to find $\Gamma_{\bar a}$ such that the solution to the initial value problem \be\label{e.c050101} (\partial_t + v\cdot\nabla_x) f = \tr(\bar a(t) D_v^2 f), \ee with suitably decaying initial data at $t=\tilde t$ is given by \be\label{e.c050102} f(t,x,v) = \int_{\R^d}\int_{\R^d} \Gamma_{\bar a}(t,x - \tilde x - (t-\tilde t) \tilde v, v - \tilde v;\tilde t) f(\tilde t,\tilde x, \tilde v)\, d\tilde x d \tilde v. \ee Indeed, it is simply an application of Duhamel's principle to go from~\eqref{e.c050102} to~\eqref{e.gamma_a}. As $\tilde t$ plays essentially no role in the computations below, we simply set $\tilde t = 0$ and drop the ``$;0$'' notation. Next, we notice that~\eqref{e.c050102} is equivalent to \be\label{e.c050201} \hat f(t,\xi,\omega) = (2\pi)^d \hat f(0,\xi, \omega + \xi t) \hat \Gamma_{\bar a}(t,\xi,\omega). \ee Indeed, taking the Fourier transform of~\eqref{e.c050102} and computing, we find \begin{equation}\label{e.w05021} \begin{split} &\hat f(t,\xi, \omega) = \frac{1}{(2\pi)^d} \int_{\R^d}\int_{\R^d} \left( \int_{\R^d}\int_{\R^d} \Gamma_{\bar a}(t,x - \tilde x - t \tilde v, v - \tilde v) f(0,\tilde x, \tilde v)\, d\tilde x d \tilde v \right)\exp{\{-ix\cdot \xi -iv\cdot \omega\} }\,dxdv \\&= \frac{1}{(2\pi)^d} \int_{\R^d}\int_{\R^d} \int_{\R^d}\int_{\R^d} \Gamma_{\bar a}(t,x - \tilde x - t \tilde v, v - \tilde v) f(0,\tilde x, \tilde v) \exp{\{-ix\cdot \xi -iv\cdot \omega\} }\, d\tilde x d \tilde vdxdv. \end{split} \end{equation} As shifts in `physical space' correspond to multiplication in `Fourier space', we have \be\notag \hat \Gamma_{\bar a}(t,\xi,\omega) = \frac{e^{-i (\tilde x + t\tilde v)\cdot \xi - i\tilde v \cdot \omega}}{(2\pi)^d}\int_{\R^d}\int_{\R^d} \Gamma_{\bar a}(t,x-\tilde x - t\tilde v, v - \tilde v) \exp\{- ix \cdot \xi - i v \cdot \omega\} \, dx dv. \ee Thus, \be\notag \begin{split} \hat f(t,\xi,\omega) &= \hat \Gamma_{\bar a}(t,\xi,\omega) \int_{\R^d} \int_{\R^d} f(0,\tilde x, \tilde v) \exp\{- i(\tilde x+ t\tilde v)\cdot \xi - i \tilde v\cdot \omega\} \, d\tilde x d\tilde v \\& = \hat \Gamma_{\bar a}(t,\xi,\omega) \int_{\R^d} \int_{\R^d} f(0,\tilde x, \tilde v) \exp\{- i\tilde x\cdot \xi - i \tilde v\cdot (\omega+\xi t)\} \, d\tilde x d\tilde v \\& = (2\pi)^d \hat\Gamma_{\bar a}(t,\xi,\omega) \hat f(0, \xi, \omega + \xi t). \end{split} \ee We now find $\Gamma_{\bar a}$ through the identity~\eqref{e.c050201}. The first step is to take the Fourier transform of~\eqref{e.c050101} in $x$ and $v$ to obtain: \begin{equation}\notag \begin{split} \partial_{t} \hat f-\xi\cdot \nabla_{\omega}\hat f = -\bar a(t)|\omega|^{2}\hat f , \end{split} \end{equation} Next, letting $\hat F(t,\xi,\omega)=\hat f(t,\xi, \omega-\xi t)$, we have \begin{equation}\notag \begin{split} \partial_{t} \hat F = -(\omega-\xi t)^{T} \bar a(t)(\omega-\xi t)\hat F. \end{split} \end{equation} Integrating this in time, we find \begin{equation}\notag \begin{split} \hat F(t,\xi, \omega) &= \exp \left\{-\int_{0}^{t}(\omega-\xi s)\cdot \bar a(s)(\omega-\xi s)\,ds\right\} \hat F(0,\xi, \omega) \\&= \exp\left\{-\int_{0}^{t}(\omega-\xi s)\cdot \bar a(s)(\omega-\xi s)\,ds\right\} \hat f(0,\xi, \omega). \end{split} \end{equation} Therefore, \begin{equation}\label{e.c42501} \begin{split} \hat f(t,\xi, \omega) = \hat f(0,\xi, \omega+\xi t) \exp\Big\{-\int_{0}^{t}(\omega-\xi (s-t))\cdot \bar a(s)(\omega-\xi (s-t))\,ds\Big\}. \end{split} \end{equation} It follows from~\eqref{e.c050201} that \be\label{e.c050202} \hat \Gamma_{\bar a}(t,\xi,\omega) = \frac{1}{(2\pi)^d} \exp\Big\{ - \int_0^t (\omega - \xi(s-t))\cdot \bar a(s) (\omega - \xi(s-t)) \, ds \Big\}. \ee The remainder of the proof is in computing the inverse Fourier transform of~\eqref{e.c050202}. We begin by computing that: \begin{equation}\label{e.c050203} \begin{split} \Gamma_{\bar a} (t,x, v) &= \frac{1}{(2\pi)^{2d}} \int \int e^{-\int_{0}^{t}(\omega-\xi (s-t))\cdot \bar a(\omega-\xi (s-t))\,ds+i x\cdot \xi+ i v\cdot \omega} \,d\omega d\xi \\&= \frac{1}{(2\pi)^{2d}} \int e^{-\xi\cdot(A_{2}-2tA_{1}+t^{2}A)\xi+ i x\cdot \xi} \left(\int e^{-\omega\cdot A\omega +(2A_1 \xi -2 tA_0\xi -i v)\omega} \,d\omega\right)d\xi \\&= \frac{1}{(2\pi)^{2d}} \int e^{-\xi\cdot N_2(t)\xi+ i x\cdot \xi} \bar \Gamma_{\bar a}\,d\xi , \end{split} \end{equation} where we have introduced the notation \be\notag \begin{split} &N_1(t,\xi,v) = 2 A_1(t) \xi - 2 t A_0(t) \xi - i v, \qquad N_2(t) = A_2(t) - 2t A_1(t) + t^2 A_0(t), \\&\text{and }\quad \bar \Gamma_{\bar a}(t,\xi,v) = \int e^{-\omega \cdot A_0(t) \omega + N_1(t) \cdot \omega} \, d\omega. \end{split} \ee We simplify $\bar \Gamma_{\bar a}$ by completing the square: \begin{equation}\notag \begin{split} \bar \Gamma_{\bar a} (t,\xi, v) &= \int e^{ -(\omega-\frac{1}{2}A_0^{-1}N_1)\cdot A_0(\omega-\frac{1}{2}A_0^{-1}N_1) +\frac{1}{4}N_1 \cdot A_0^{-1}N_1} \,d\omega \\&= e^{\frac{1}{4}N_1 \cdot A_0^{-1}N_1} \int e^{-(\omega^{}-\frac{1}{2}A_0^{-1}N_1) \cdot A_0(\omega^{}-\frac{1}{2}A_0^{-1}N_1)} \,d\omega = e^{\frac{1}{4}N_1\cdot A_0^{-1}N_1}\frac{\pi^{d/2}}{\sqrt{\det{A_0}}}. \end{split} \end{equation} Plugging this into~\eqref{e.c050203} and then completing the square for the $\xi$-integral, we find \begin{equation}\notag \begin{split} \Gamma_{\bar a} (t,x, v) &= \frac{1}{2^{2d}\pi^\frac{3d}{2} \sqrt{\det{A_0}}} \int e^{ - \xi \cdot N_2 \xi + i x\cdot \xi+\frac{1}{4}N_1\cdot A_0^{-1}N_1} \,d\xi \\&= \frac{1}{2^{2d}\pi^\frac{3d}{2} \sqrt{\det{A_0}}} e^{-\frac{v\cdot A_0^{-1}v}{4} - \frac{1}{4} q \cdot P^{-1} q} \int e^{-(\xi^{}-\frac{i}{2}P^{-1}q)\cdot P(\xi^{}-\frac{i}{2}P^{-1}q)} \,d\xi, \end{split} \end{equation} where (recall $M$ from~\eqref{e.A_i}) \begin{equation}\notag \begin{split} &P = N_2-(tA_0-A_1)A^{-1}(tA_0-A_1) = A_{2}-A_{1}A_0^{-1}A_1 \\&\quad\text{and }\quad q = x - vt + A_1 A_0^{-1} v = x-Mv. \end{split} \end{equation} Computing the the integral and simplifying, we find \be\notag \Gamma_{\bar a}(t,x,v) = \frac{1}{2^{2d} \pi^d \sqrt{\det(A_0) \det(P)}} e^{-\frac{v\cdot A_0^{-1}v}{4} - \frac{1}{4} q \cdot P^{-1} q}. \ee This concludes the proof. \end{proof} \section{Interpolation of weights between $L^{\infty,k}$ and $\logalpha$} \begin{Lemma}\label{l.weight_interpolation} Fix any $\alpha, \mu \in (0,1)$ and any $\theta, k > 0$. Suppose that \be\notag \varphi \in L^{\infty,m}(\R^3) \cap \logvalpha(\R^3). \ee Then $\vv^{(1-\mu)k} \varphi \in \log\left(1/C_v\right)^{-\theta\mu}$ and \be\notag [\vv^{(1-\mu) k} \varphi]_{ \log\left(1/C_v\right)^{-\theta\mu}} \lesssim \|\varphi\|_{L^{\infty,k}}^{1-\mu} [\varphi]_{\logvalpha}^{\mu} + \|\varphi\|_{L^{\infty, ((1-\mu)k -1)_+}}. \ee \end{Lemma} \begin{proof} First, for $(t,v)\neq(t,v') \in \R_+ \times \R^3$ with $|v-v'|<1/2$, we let \begin{equation}\notag R=\vv^{-k}\|\phi\|_{L^{\infty, k}}[\varphi]^{-1}_{\logvalpha}. \end{equation} Then, we obtain \begin{equation}\notag \begin{split} |\vv^{(1-\mu)k}&\varphi(t,v) -\vvp^{(1-\mu)k} \varphi(t,v')|\\ &\lesssim \vv^{(1-\mu)k}|\varphi(t,v) - \varphi(t,v')| + |\varphi(t,v')| |\vv^{(1-\mu)k} -\vvp^{(1-\mu)k} | \\& \lesssim \vv^{(1-\mu)k}|\varphi(t,v) - \varphi(t,v')| + |\varphi(t,v')| \vv^{((1-\mu)k-1)_+}|v-v'| \\&\lesssim \vv^{(1-\mu)k}|\varphi(t,v) - \varphi(t,v')| + \|\varphi\|_{L^{\infty,((1-\mu)k-1)_+}} |v-v' | . \end{split} \end{equation} Notice that \be\notag \frac{ |v-v' |}{\log(1/|v- v'|)^{-\mu\theta}} \lesssim 1. \ee Hence, we need only bound \be\notag H := \frac{\vv^{(1-\mu)k}|\varphi(t,v) - \varphi(t,v')|}{\log(1/|v- v'|)^{-\mu\theta}}. \ee If $\log(1/|v- v'|)^{-\theta} \geq R$, we have \begin{equation}\notag \begin{split} &H \leq 2\vv^{-\mu k}\frac{\|\varphi\|_{L^{\infty,k}}}{R^\mu} =2\|\varphi\|_{L^{\infty,k}}^{1-\mu}[\varphi]^{\mu}_{\logvalpha}, \end{split} \end{equation} which is the desired inequality. On the other hand, if $\log(1/|v- v'|)^{-\theta} \leq R$, we see \begin{equation}\notag \begin{split} H &= \vv^{(1-\mu)k}\frac{| \varphi(t,v) - \varphi(t,v')|}{\log(1/|v- v'|)^{-\theta}} (\log(1/|v- v'|)^{-\theta})^{1-\mu} \leq \vv^{(1-\mu)k}[\varphi]^{}_{\logvalpha} R^{1-\mu} \\&\lesssim [\varphi]^{}_{\logvalpha} \|\phi\|^{1-\mu}_{L^{\infty, k}}[\varphi]^{\mu-1}_{\logvalpha} = [\varphi]^{\mu}_{\logvalpha} \|\phi\|^{1-\mu}_{L^{\infty, k}}, \end{split} \end{equation} which is, again, the desired inequality. This concludes the proof. \end{proof} \end{appendix} \section*{Acknowledgments} The authors would like to sincerely thank Marco Bramanti and Andrea Pascucci for helpful discussions regarding their preprints~\cite{BiagiBramanti,LPP}. CH was partially supported by NSF grants DMS-2003110 and DMS-2204615. WW was partially supported by an AMS-Simons travel grant. CH thanks Otis Chodosh for first bringing the references~\cite{Brandt,Knerr} to his attention. \bibliographystyle{abbrv} \bibliography{schauder_landau_HW2} \end{document}
2205.12913v1
http://arxiv.org/abs/2205.12913v1
Formations of Finite Groups in Polynomial Time: $\mathfrak{F}$-residuals and $\mathfrak{F}$-subnormality
\documentclass[a4paper,12pt]{article} \usepackage{cmap} \usepackage[cp1251]{inputenc} \usepackage[english]{babel} \usepackage[left=2cm,right=2cm,top=2cm,bottom=2cm]{geometry} \usepackage[ruled,vlined]{algorithm2e} \usepackage{amssymb} \usepackage{amsmath, amsthm} \theoremstyle{plain} \newtheorem{thm}{Theorem} \newtheorem{lem}{Lemma} \newtheorem{prop}{Proposition} \newtheorem{cor}{Corollary} \newtheorem{conj}{Conjecture} \theoremstyle{definition} \newtheorem{example}{Example} \newtheorem{remark}{Remark} \newtheorem{defn}{Definition} \newtheorem{pr}{Problem} \begin{document} \begin{center}\Large \textbf{Formations of Finite Groups in Polynomial Time: $\mathfrak{F}$-residuals and $\mathfrak{F}$-subnormality}\normalsize \smallskip Viachaslau I. Murashka \{[email protected]\} Department of Mathematics and Technologies of Programming, Francisk Skorina Gomel State University, Gomel, Belarus\end{center} \begin{abstract} For a wide family of formations $\mathfrak{F}$ it is proved that the $ \mathfrak{F}$-residual of a permutation finite group can be computed in a polynomial time. Moreover, if in the previous case $\mathfrak{F}$ is hereditary, then an $\mathfrak{F}$-subnormality of a subgroup can be checked in a polynomial time. \end{abstract} \textbf{Keywords.} Finite group; permutation group computation; formation; local formation; $\mathfrak{F}$-subnormality; polynomial time algorithm. \textbf{AMS}(2010). 20D10, 20B40. \section*{Introduction and the Main Results} All groups considered here are finite. One of the central directions in the modern algebra is the study of different classes of algebraic systems (groups, semigroups, rings, Lie algebras and other). The main problems of it are to construct classes of algebraic systems, to study the structure of a given system in such class and to find wether a given system belongs to a given class or not. This direction is well illustrated in the rather developed theory of classes of finite groups (formations, Schunk and Fitting classes). The main results in this direction are presented in monographs of Shemetkov \cite{Shemetkov1978}, Doerk and Hawkes \cite{Doerk1992}, Ballester-Bolinches and Ezquerro \cite{BallesterBollinches2006}, Wenbin Guo \cite{Guo2015} and others. According to zbMATH Open \cite{zb2021} (formerly known as Zentralblatt MATH) there are more than 5000 papers in this direction (20D10). The computational theory of classes of finite groups is not as developed as its theoretical part. The main results of this theory are presented in the papers \cite{EICK2002} by Eick and Wright and \cite{HOFLING20014} by H\"ofling and in the corresponding to them GAP packages ``FORMAT'' \cite{Eick2000} and ``CRISP'' \cite{Hoefling2000} respectively. These papers are dedicated to finding $\mathfrak{F}$-projectors, $ \mathfrak{F}$-injectors, $\mathfrak{F}$-residuals and $ \mathfrak{F}$-radicals of soluble groups. Also algorithms for classes of groups where permutability (or one of its generalizations) of subgroups is a transitive relation are studied in the paper \cite{BallesterBolinches2013} by Ballester-Bolinches, Cosme-Ll\'opez and Esteban-Romero and in corresponding to it GAP package ``permut'' \cite{BallesterBolinches2014}. The principal novelty of this paper is our ability to deal with non-saturated (non-local) classes of not necessary soluble groups. A finite group can be defined in the different ways. The most known of them are defining group by presentation, permutations or matrices. One of the main results in the foundation of the theory of formations of finite groups is Sylow's theorems. In \cite{Kantor1985} Kantor proved that a Sylow subgroup of a permutation group of degree $n$ can be found in polynomial time of $ n$ (mod CSFG). So it is natural to ask the following question: \begin{pr} For a given class of groups $\mathfrak{X}$ and a permutation group $G$ of degree $n$ is there a polynomial-type algorithm that checks wether $G$ belongs to $\mathfrak{X}$?\end{pr} That is why we introduce the following definition: \begin{defn} We shall call a class of groups $\mathfrak{X}$ $P$-recognizable if for every $K\trianglelefteq G\leq S_n$ there is a polynomial-time algorithm that tests wether $ G/K$ belongs $\mathfrak{X}$ or not. \end{defn} Recall that a formation is a class of groups closed under taking homomorphic images and subdirect products. One of the classical ways to study the structure of a group is to find the action of a group on its chief series. For example, formations of nilpotent, supersoluble and quasinilpotent groups; rank \cite[VII, Definitions 2.3]{Doerk1992}, local \cite[IV, Definitions 3.1]{Doerk1992}, Baer-local \cite[IV, Definitions 4.9]{Doerk1992} and graduated (see \cite[\S3]{Shemetkov1978} or \cite[\S 5.5]{Guo2015}) formations are defined by the action of a group on its chief factors. All these formations are particular cases of the following construction. \begin{defn}\label{def1} Let $\mathrm{f}$ be a function which assigns 0 or 1 to every group $G$ and its chief factor $H/K$ such that (1) $\mathrm{f}(H/K, G)=\mathrm{f}(M/N, G)$ whenever $H/K$ and $M/N$ are $G$-isomorphic chief factors of $G$; (2) $\mathrm{f}(H/K, G)=\mathrm{f}((H/N)/(K/N), G/N)$ for every $N\trianglelefteq G$ with $N\leq K$.\\ Such functions $\mathrm{f}$ will be called chief factor functions. Denote by $\mathcal{C}(\mathrm{f})$ the class of groups \begin{center} $(G\mid \mathrm{f}(H/K, G)=1$ for every chief factor $H/K$ of a group group $G)$. \end{center} \end{defn} For every non-empty formation $\mathfrak{F}$ in every group $G$ there exists the $\mathfrak{F}$-residual of $G$, that is the smallest normal subgroup $G^\mathfrak{F}$ of $G$ with $G/G^\mathfrak{F}\in\mathfrak{F}$. It is clear that $\mathfrak{F}=(G\mid G^\mathfrak{F}=1)$. \begin{thm}\label{residual} Assume that $\mathrm{f}(H/K, G)$ can be computed in polynomial time for every group $G$ and its chief factor $H/K$. Then $\mathfrak{F}=\mathcal{C}(\mathrm{f})$ is a $P$-recognizable formation and $G^\mathfrak{F}$ can be computed in polynomial time for every $ G\leq S_n$. \end{thm} The concept of subnormality plays an important role in the group's theory. The formational generalization of this concept was introduced in the universe of soluble groups by Hawkes \cite{Hawkes1969} and in the universe of all groups by Shemetkov (see \cite[Definition 8.1]{Shemetkov1978}). A subgroup $H$ of $G$ is called $\mathfrak{F}$-subnormal in $G$, if $H=G$ or there exists a maximal chain of subgroups $H=H_0\subset H_1\subset\dots\subset H_n=G$ such that $H_{i}/\mathrm{Core}_{H_{i}}(H_{i-1})\in\mathfrak{F}$ for $i=1,\dots,n$. Note that if $\mathfrak{F}$ is a hereditary formation, then the word ``maximal'' can be omitted in this definition. Kegel \cite{Kegel1978} introduced the another such generalization of subnormality. Recall \cite[Definition 6.1.4]{BallesterBollinches2006} that a subgroup $H$ of $G$ is called $K$-$\mathfrak{F}$-\emph{subnormal} in $G$ if there is a chain of subgroups $ H=H_0\subseteq H_1\subseteq\dots\subseteq H_n=G$ with $H_{i-1}\trianglelefteq H_i$ or $H_{i}/\mathrm{Core}_{H_{i}}(H_{i-1})\in\mathfrak{F}$ for all $i=1,\dots,n$. If $\mathfrak{F}=\mathfrak{N}$, then the notions of $K$-$\mathfrak{F}$-subnormal and subnormal subgroups coincide. For more information about $\mathfrak{F}$-subnormal and $K$-$\mathfrak{F}$-subnormal subgroups see \cite[Chapter 6]{BallesterBollinches2006}. \begin{thm}\label{Fsubnorm} Let $\mathfrak{F}$ be a hereditary formation. Assume that $G^\mathfrak{F}$ can be computed in polynomial time for every $ G\leq S_n$ and natural $n$. Then there are polynomial-time algorithms that tests wether given subgroup is $\mathfrak{F}$-subnormal or $K$-$\mathfrak{F}$-subnormal. \end{thm} \section{Preliminaries} \subsection{Groups and their classes} Recall that $M^G$ denotes the smallest normal subgroup of $G$ which contains $M$; $M'$ is the derived subgroup of $M$; $M^p$ is the subgroup generated by $p$-th powers of elements of $M$; $S_n$ denotes\,the symmetric group on $n$ elements; a formation $\mathfrak{F}$ is called hereditary, if $H\leq G\in\mathfrak{F}$ implies\,$H\in\mathfrak{F}$. The material of this section can be found, for example, in \cite[p. 5-8]{Doerk1992}. Let $\Omega$ be a set. A group $G$ is called an $\Omega$-\emph{group} if there is associated with each element $\omega\in\Omega$ an endomorphism of $G$ denoted for all $g\in G$ by $g\rightarrow g\omega$. A subgroup $U$ of $G$ is called $\Omega$-\emph{admissible} if $u\omega\in U$ for all $u\in U$ and $\omega\in\Omega$. Evidently the intersection and the join of $\Omega$-admissible subgroups are again $\Omega$-admissible. If $N$ is an $\Omega$-admissible normal subgroup of $G$, the quotient group $G/N$ may be regarded naturally as an $\Omega$-group via the action defined for all $g\in G$ and $\omega\in\Omega$ by $(Ng)\omega=N(g\omega)$. Finally if $G$ and $H$ are $\Omega$-groups, a isomorphism $\alpha: G \rightarrow H$ is called an $\Omega$-\emph{isomorphism} if for all $g\in G$ and $\omega\in\Omega$ holds $\alpha(g\omega)=\alpha(g)\omega$. \begin{thm}[The Isomorphism Theorems]\label{th1} Let $\Omega$ be a set and let $G$ be an $\Omega$-group. $(1)$ If $U$ and $N$ are $\Omega$-admissible subgroups of $G$ and $U$ normalizes $N$, then $UN/N \simeq U/(U \cap N)$ as $\Omega$-groups. $(2)$ If $M$ and $N$ are $\Omega$-admissible normal subgroups of $G$ and $N < M$, then the $\Omega$-groups $(G/N)/(M/N)$ and $G/M$ are $\Omega$-isomorphic. \end{thm} An $\Omega$-group is called $\Omega$-\emph{simple} if 1 and $G$ are the only $\Omega$-admissible normal subgroups of $G$. A a subnormal chain $U=U_0, U_1, \dots, U_n=G$ from $U$ to $G$ is called $\Omega$-\emph{series} if all of its terms are $\Omega$-admissible. An $\Omega$-series is called $\Omega$-\emph{composition series} if each factor $U_i/U_{i-1}$ is $\Omega$-simple for $i=1,\dots, n$. \begin{thm}[The Jordan-H\"older Theorem]\label{th2} Let $G$ be an $\Omega$-group, and let \[1=N_0\vartriangleleft N_1\vartriangleleft\dots\vartriangleleft N_n=G \hspace{3mm} and \hspace{3mm} 1=M_0\vartriangleleft M_1\vartriangleleft\dots\vartriangleleft M_m=G\] be two $\Omega$-composition series of $G$. Then $m =n$ and there exists a permutation $\pi\in S_n$ such that for $i =1,\dots, n$ the factor $N_i/N_{i-1}$ is $\Omega$-isomorphic with $M_{\pi(i)}/M_{\pi(i)-1}$. \end{thm} \subsection{Computational conventions} Here we use standard computational conventions of abstract finite groups equipped with poly\-nomial-time procedures to compute products and inverses of elements (for the related abstract notion of black-box groups, see \cite[Chapter 2]{Seress2003}). Unless stated otherwise, for both input and output, groups are specified by generators. We will consider only $G=\langle S\rangle\leq S_n$ with $|S|\leq n^2$. If necessary, Sims' algorithm \cite[Parts 4.1 and 4.2]{Seress2003} can be used to arrange that $|S|\leq n^2$. Quotient groups are specified by generators of a group and its normal subgroup. According to \cite{Babai1986} the following result, all subgroups chains have the bounded length: \begin{lem}[\cite{Babai1986}]\label{chain} Given $G \leq S_n$ every chain of subgroups of $G$ has at most $2n-3$ members for $n\geq 2$. \end{lem} We need the following well known basis tools in our proves (see, for example \cite{Kantor1990a} or \cite{Seress2003}). \begin{thm}\label{Basic} Given $G = \langle S\rangle\leq S_n$, in polynomial time one can solve the following problems: \begin{enumerate} \item Find $|G|$. \item Given normal subgroups $A$ and $B$ of $G$, find a composition series for $G$ containing them. \item Given $T\subseteq G$ find $\langle T\rangle^G$. \item (mod CFSG) Given $N, K \leq S_n$ such that $N/K$ is normalized by $G/K$, find $C_{G/K}(N/K)$ \cite[P6(i)]{Kantor1990a}. \item (mod CFSG) Given a prime $p$ dividing $|G|$, find a Sylow $p$-subgroup $P$ of $G$ and $N_G(P)$ \cite{Kantor1990}. \item Given $H=\langle S_1\rangle, K=\langle S_2\rangle \leq G$ find $\langle H, K\rangle=\langle S_1, S_2\rangle$ and $[H, K]=\langle \{[s_1, s_2]\mid s_1\in S_1, s_2\in S_2\rangle^{\langle H, K\rangle}$. \end{enumerate} \end{thm} \begin{lem}[{\cite[p. 155]{Seress2003}}]\label{transform} Let $H$ and $K$ be normal subgroups of $G$ such that $H/K$ is an elementary abelian $p$-group for some prime $p$. Then $H/K$ can be considered as $\mathbb{F}_pG$-module. Every generator of $G$ induces by conjugation an linear transformation of this module. Its matrix can be computed in a polynomial time. \end{lem} \section{Proves of the Main Results} \subsection{Proof of Theorem \ref{residual}} The first step is to prove \begin{lem} If $ \mathrm{f}$ is a chief factor function, then $\mathcal{C}(\mathrm{f})$ is a formation. \end{lem} \begin{proof} Let $G\in\mathcal{C}(\mathrm{f})$ and $N\trianglelefteq G$. Then if $(H/N)/(K/N)$ is a chief factor of $G/N$, then $H/K$ is a chief factor of $ G$ and $ \mathrm{f}((H/N)/(K/N), G/N)=\mathrm{f}(H/K, G)=1$ by $ (2)$ of Definition \ref{def1}. Hence $G/N\in \mathcal{C}(\mathrm{f})$. It means that $\mathcal{C}(\mathrm{f})$ is closed under taking homomorphic images. Assume now $G/N, G/M\in\mathcal{C}(\mathrm{f})$ and $ M\cap N=1$. Let $ H/K$ be a chief factor of $G$ below $N$. Then $$HM/KM\simeq H/(H\cap KM)=H/K(H\cap M)=H/K,$$ i.e. $H/K$ is $ G$-isomorphic to a chief factor of $G$ above $M$ by $(1)$ of The Isomorphism Theorems. From the Jordan-H\"older Theorem it follows that every chief factor of $G$ is $G$-isomorphic to a chief factor of $G$ above $M$ or $N$. WLOG let $H/K\simeq R/T$ and $ R/T$ is a chief factor of $G$ above $N$, then $\mathrm{f}(H/K, G)=\mathrm{f}(R/T, G)=\mathrm{f}((R/N)/(T/N), G/N)=1$ by Definition \ref{def1}. It means that $G\in\mathcal{C}(\mathrm{f})$. Hence $\mathcal{C}(\mathrm{f})$ is closed under taking subdirect products. It means that $ \mathcal{C}(\mathrm{f})$ is a formation. \end{proof} Recall that the smallest normal subgroup $H$ of $G$ such that $G/H$ is the direct product of simple (resp. simple non-abelian) subgroups of $G$ is called the (resp. non-abelian) residual of $G$ and is denoted by $\mathrm{Res}(G)$ (resp. $\mathrm{Res}_N(G)$). Here we are interested in the following subgroups. Let $M$ be a normal subgroup of a group $G$. Denote by $\mathrm{Res}_N(M, G)$ (resp. $\mathrm{Res}_p(M, G)$) the smallest normal subgroup $H$ of $G$ below $M$ such that $M/H$ is the direct product of minimal normal non-abelian (resp. $p$-subgroups) subgroups of $G/H$. \begin{lem}\label{lem2} $\mathrm{Res}_N(N, G)$ is defined for every normal subgroup $N$ of $G$. Moreover $\mathrm{Res}_N(N, G)$ and a decomposition of $N/\mathrm{Res}_N(N, G)$ into the direct product of minimal normal subgroups of $G/\mathrm{Res}_N(N, G)$ can be computed in a polynomial time. \end{lem} \begin{proof} Note that $\mathrm{Res}_N(N)\textrm{ char }N\trianglelefteq G$. Hence $\mathrm{Res}_N(N)\trianglelefteq G$. Recall that $\mathrm{Res}_N(N)$ is the smallest normal subgroup of $N$ such that $N/\mathrm{Res}_N(N)$ is a direct product of simple non-abelian groups and every minimal normal non-abelian subgroup is a direct product of simple non-abelian groups. Therefore if $ \mathrm{Res}_N(N, G)$ exists, then $\mathrm{Res}_N(N)$ contains it. Let prove that $N/\mathrm{Res}_N(N)$ is the direct product of minimal normal non-abelian subgroups of $G/\mathrm{Res}_N(N)$. Let $A=N$ and $M/\mathrm{Res}_N(N)$ be a simple subnormal subgroup of $A/\mathrm{Res}_N(N)$. Then $M/\mathrm{Res}_N(N)$ is a simple non-abelian subnormal subgroup of $G/\mathrm{Res}_N(N)$. Therefore \linebreak $(M/\mathrm{Res}_N(N))^G$ is a minimal normal subgroup of $G/\mathrm{Res}_N(N)$ below $A/\mathrm{Res}_N(N)\leq N/\mathrm{Res}_N(N)$. Note that $$ A/\mathrm{Res}_N(N)=(M/\mathrm{Res}_N(N))^G\times C_{A/\mathrm{Res}_N(N)}((M/\mathrm{Res}_N(N))^G),$$ $C_{A/\mathrm{Res}_N(N)}((M/\mathrm{Res}_N(N))^G)\trianglelefteq G/\mathrm{Res}_N(N)$. So now we can let $$A/\mathrm{Res}_N(N)\leftarrow C_{A/\mathrm{Res}_N(N)}((M/\mathrm{Res}_N(N))^G).$$ It means that using the previous steps we can decompose $N/\mathrm{Res}_N(N)$ into the direct product of minimal normal non-abelian subgroups of $G/\mathrm{Res}_N(N)$. Thus $\mathrm{Res}_N(N, G)=\mathrm{Res}_N(N)$. \begin{algorithm}[H] \caption{NonAbelianDecomposition($G, N$)} \SetAlgoLined \KwResult{The smallest normal subgroup $K$ of $G$ below $N$ such that $N/K\simeq \overline{N}_1\times\dots\times\overline{N}_k$ where $\overline{N}_i$ is a minimal normal non-abelian subgroup of $G$; subgroups $\overline{N}_i$. } \KwData{$N$ is a normal subgroup of a group $G$} $K\gets Res_N(N)$\; $A\gets N$\; $L\gets []$\; \While{$|A|\neq |K|$}{ Find a minimal subnormal subgroup $B/K$ of $A/K$\; Find $(B/K)^G$ and add this subgroup to $L$\; $A/K\gets C_{A/K}((B/K)^G)$\; } \end{algorithm} According to \cite[Theorem 8.3]{Babai1987} $\mathrm{Res}_N(N)$ can be found in polynomial time. By 2 of Theorem \ref{Basic} the minimal subnormal subgroup $M/\mathrm{Res}_N(N)$ of $N/\mathrm{Res}_N(N)$ can be found in a polynomial time. Then $(M/\mathrm{Res}_N(N))^G=M^G/\mathrm{Res}_N(N)$ can be computed in a polynomial time by 3 of Theorem \ref{Basic}. Now $C_{A/\mathrm{Res}_N(N)}((M/\mathrm{Res}_N(N))^G)$ can be computed in a polynomial time by 4 of Theorem \ref{Basic}. Thus Algorithm 1 runs in a polynomial time by Lemma \ref{chain}. \end{proof} \begin{lem}\label{lem3} Let $p$ be a prime. $\mathrm{Res}_p(N, G)$ is defined for every normal subgroup $N$ of $G$. Moreover $\mathrm{Res}_p(N, G)$ and a decomposition of $N/\mathrm{Res}_p(N, G)$ into the direct product of minimal normal subgroups of $G/\mathrm{Res}_p(N, G)$ can be computed in a polynomial time. \end{lem} \begin{proof} Let $N/K$ be the direct product of minimal normal $p$-subgroups of $G/K$ for a given $p$ where $N, K\trianglelefteq G$. Note that in this case $N/K$ is the elementary abelian $p$-group. It means that $N'N^p\subseteq K$. So if $N'N^p=N$, then we can let $\mathrm{Res}_p(N, G)=N'N^p$. Assume that $N\neq N'N^p$. Note that $N'N^p\textrm{ char }N\trianglelefteq G$. Hence $N'N^p\trianglelefteq G$. Then we can consider $V=N/N'N^p$ as an $\mathbb{F}_pG$-module. In this case normal subgroups of $G/N'N^p$ below $N/N'N^p$ are in the one to one correspondence with submodules of $V$. Let $K/N'N^p$ be the radical $\mathrm{Rad}(N/N'N^p)$ of $N/N'N^p$. Now $N/K\simeq (N/N'N^p)/\mathrm{Rad}(N/N'N^p)$ is a semisimple $ \mathbb{F}_pG$-module, i.e. $N/K$ is the direct product of minimal normal subgroups of $ G/K$. Assume that $ K_1$ is a normal subgroup of $ G$ such that $N/K_1=N_1/K_1\times\dots\times N_k/K_1$ is a direct product of minimal normal $ p$-subgroups $ N_i/K_1$ of $ G/K_1$. It is clear that $ N'N^p\subseteq K_1$. Note that $ N/(\prod_{i=1,i\neq j}^n N_i)$ is a chief factor of $G$. It means that $\prod_{i=1,i\neq j}^n N_i/N'N^p$ is a maximal submodule of $V$. Recall that the radical of a module is the intersection of all its maximal submodules. Now $$K/N'N^p=\mathrm{Rad}(N/N'N^p)\subseteq\bigcap_{j=1}^n(\prod_{i=1,i\neq j}^n N_i/N'N^p)=K_1/N'N^p. $$ Thus $K\subseteq K_1$. It means that $K$ is the smallest normal subgroup $G$ below $N$ such that $N/K$ is the direct product of minimal normal $p$-subgroups of $G/K$. Hence $K=\mathrm{Res}_p(N, G)$ is well defined. Let show that $K$ can be computed in polynomial time. If $N=\langle S\rangle$, then $N'N^p=\langle\{[x,y]\mid x,y\in S\}\cup\{x^p\mid x\in S\}\rangle$ can be computed in polynomial time. Every generator of $G$ induces by conjugation a linear transformation of $N/N'N^p$. The matrix of this transformation can be found in a polynomial time by Lemma \ref{transform}. Denote the algebra generated by these matrixes by $R$. Then the basis of its Jacobson radical $J(R)$ can be computed in a polynomial time by \cite[Theorem 2.7]{Ronyai1990}. Now $\mathrm{Rad}(N/N'N^p)=(N/N'N^p)J(R)$ by \cite[B, Proposition 4.2]{Doerk1992}. Hence $\mathrm{Rad}(N/N'N^p)$ is generated (as a module and as a subgroup) by products $ nr$ where $n$ is a generator $N/N'N^p$ and $r$ is a generator of $J(R)$. Thus $\mathrm{Rad}(N/N'N^p)$ can be computed in a polynomial time, i.e. $\mathrm{Rad}(N/N'N^p)=K/N'N^p$ and we know generators of $K$ as a subgroup of $G$. Since every generator of $G$ induces by conjugation a linear transformation of $N/K$, the matrix of this transformation can be found in a polynomial time by Lemma \ref{transform}. Denote the algebra generated by these matrixes by $R$. Note that $N/K$ is a semisimple $\mathbb{F}_pG$-module. Hence it is a semisimple $R$-module. Now $nr=0$ for every $n\in N/K$ and $r\in J(R)$. Since $R$ acts on $N/K$ by matrix multiplications, we see that $J(R)=0$. Thus $R$ is semisimple. Then bases of minimal ideals $R_i$ of $R$ can be found in polynomial time by \cite[Corollary 3.2]{Ronyai1990}. Then $(N/K)R_i$ is a minimal submodule of $N/K$ and the sum of this submodules is $N/K$ by \cite[VII, Theorem 12.1]{Huppert1982}. It is clear that generating sets of these submodules (and hence corresponding to them quotient groups) can be found in a polynomial time. \begin{algorithm}[H] \caption{PDecomposition($G, N, p$)} \SetAlgoLined \KwResult{The smallest normal subgroup $K$ of $G$ below $N$ such that $N/K\simeq \overline{N}_1\times\dots\times\overline{N}_k$ where $\overline{N}_i$ is a minimal normal $p$-subgroup of $G$; subgroups $\overline{N}_i$. } \KwData{$N$ is a normal subgroup of a group $G$ and $ p$ is a prime} $M\gets[]$\; $ L\gets[]$\; \If{$|N^pN'|=|N|$}{output $ N^pN'$ and $L$\;} \Else{For each generator $ g$ of $G$ find the linear transformation which this element induces on $N/N^pN'$\; For the algebra generated by above mentioned transformations $R$ find the basis of $\mathrm{J}(B)$\; Find the generators of $K$ where $K/N'N^p=(N/N'N^p)J(R)$\; For each generator $ g$ of $G$ find the linear transformation which this element induces on $N/K$\; Decompose the algebra $R$ generated by above mentioned transformations into the sum $\rho_1\oplus\dots\oplus\rho_k$ of minimal left ideals\; For each ideal $ \rho_i$ find $(N/K)\rho_i$ and add it to $ M$\; For each element in $M$ find its generators in $G$ and add them as an element to $L$\;} \end{algorithm} \end{proof} \begin{lem}\label{lem4} Let $\mathfrak{F}=\mathcal{C}(\mathrm{f})$, $N$ and $K$ be normal subgroups of $G$ such that $N/K=N_1/K\times\dots\times N_t/K$ where $N_i/K$ is a minimal normal subgroup of $G$ and $G/N\in\mathfrak{F}$. Then $ (G/K)^\mathfrak{F}$ can be found in polynomial time. \end{lem} \begin{proof} Let $$I^+=\{i\mid \mathrm{f}(N_i/K, G)=1\}, I^-=\{i\mid \mathrm{f}(N_i/K, G)=0\} \textrm{ and } M/K=\prod_{i\in I^-} N_i/K.$$ We claim that $M/K=(G/K)^\mathfrak{F}$. Note that every chief factor $H/T$ of $G$ between $M$ and $N$ is $G$-isomorphic to $N_i/K$ for some $i\in I^+$. Hence $$\mathrm{f}((H/M)/(T/M), G/M)=\mathrm{f}(H/T, G)=\mathrm{f}(N_i/K, G)=1.$$ Since $G/N\simeq (G/M)/(N/M)\in\mathcal{C}(\mathrm{f})$, we see that $\mathrm{f}((H/M)/(T/M), G/M)=1$ for every chief factor $(H/M)/(T/M)$ of $G/M$ above $N/M$. From the Jordan-H\"older theorem it follows that $(G/K)/(M/K)\simeq G/M\in \mathcal{C}(\mathrm{f})=\mathfrak{F}$. Hence $(G/K)^\mathfrak{F}\leq M/K$. Assume that $F/K=(G/K)^\mathfrak{F}< M/K$. So $I^-\neq\emptyset$. Then $F/K< FN_i/K$ for some $i\in I^-$, i.e. $ F\cap N_i= K$. Hence $FN_i/F$ and $N_i/K$ are $G$-isomorphic chief factors of $G$. Thus \begin{multline*} 1=\mathrm{f}(((FN_i/K)/(F/K))/((F/K)/(F/K)), (G/K)/(F/K))=\mathrm{f}((FN_i/K)/(F/K), G/K)=\\ \mathrm{f}(FN_i/F, G)=\mathrm{f}(N_i/K, G)=0, \end{multline*} a contradiction. Thus $(G/K)^\mathfrak{F}= M/K$. \begin{algorithm}[H] \caption{FResidualPart($G, N, K, L, \mathrm{f}$)} \SetAlgoLined \KwResult{$T/K=(G/K)^\mathfrak{F}$. } \KwData{$N\trianglelefteq G$ with $G/N\in\mathfrak{F}$, $K\trianglelefteq G$ with $N/K=N_1/K\times\dots\times N_t/K$; the list $L$ of minimal normal subgroups $N_i/K$ of $G/K$.} $T\gets K$\; \For{$i$ in $[1,..., t]$} {\If{$\mathrm{f}(N_i/K, G)=0$}{$T\gets\langle T, N_i\rangle$}} \end{algorithm} Since $\mathrm{f}(H/K, G)$ can be computed in a polynomial time for every chief factor $H/K$ of $G$, we see that $I^-$ can be computed in a polynomial time. Note that $t<2n$ by Lemma \ref{chain}. Hence the join of not more than $t$ subgroups can be computed in a polynomial time. \end{proof} \begin{lem}\label{lem7} Let $\mathfrak{F}=\mathcal{C}(\mathrm{f})$ and $G$ be a group. Then $G^\mathfrak{F}$ can be computed in a polynomial time. \end{lem} \begin{proof} Note that $G/G\in\mathcal{C}(\mathrm{f})$. Assume that we have a subgroup $H$ with $G/H\in \mathcal{C}(\mathrm{f})$. Then $G^\mathfrak{F}\subseteq H$. If $G^\mathfrak{F}\neq H$, then there is a chief factor $H/K$ of $G$ with $\mathrm{f}(H/K, G)=1$. Note that $H/K$ is either a non-abelian or an abelian $p$-group. Hence $H/K$ is $G$-isomorphic to a chief factor of $G$ between $\mathrm{Res}_N(H, G)$ and $H$ or between $\mathrm{Res}_p(H, G)$ and $H$ for some $p$. \begin{algorithm}[H] \caption{FResidual($G, \mathrm{f}$)} \SetAlgoLined \KwResult{$N=G^\mathfrak{F}$. } \KwData{$\mathfrak{F}=\mathcal{C}(\mathrm{f})$, $G$ is a group.} $K\gets G$\; \Repeat{$|N|\neq|K|$}{ $N\gets K$\; $K\gets$FResidualPart($G$, $K$, NonAbelianDecomposition($G, K$), $\mathrm{f}$)\; $\pi\gets\pi(K)$\; \For{$p$ in $\pi$} {$K\gets$FResidualPart($G$, $K$, PDecomposition($G, K, p$), $\mathrm{f}$)\;} } \end{algorithm} From Lemmas \ref{lem2}--\ref{lem7} it follows that this is a polynomial time algorithm. \end{proof} \begin{lem} Let $\mathfrak{F}=\mathcal{C}(\mathrm{f})$. Then $\mathfrak{F}$ is $P$-recognizable. \end{lem} \begin{proof} Let $G$ be a group. Then $G^\mathfrak{F}$ can be computed in a polynomial time. Recall that $(G/K)^\mathfrak{F}=G^\mathfrak{F}K/K$. Hence $G/K\in\mathfrak{F}$ iff $G^\mathfrak{F}\subseteq K$ iff $\langle G^\mathfrak{F}, K\rangle=K$ iff $|\langle G^\mathfrak{F}, K\rangle|=|K|$. The last condition can be checked in polynomial time by 1 and 6 of Theorem \ref{Basic}. \end{proof} \section{Proof of Theorem \ref{Fsubnorm}} Let $H$ be a $K$-$\mathfrak{F}$-subnormal subgroup of $G$. From the definition of $K$-$\mathfrak{F}$-subnormal subgroup it follows that either $G=H$ or there is a proper subgroup $M$ of $G$ with $H$ is a $K$-$\mathfrak{F}$-subnormal subgroup of $M$ and $M\trianglelefteq G$ or $G^\mathfrak{F}\leq M$. \begin{algorithm}[H] \caption{ISKFSUBNORMAL$(G, H, \mathfrak{F})$} \SetAlgoLined \KwResult{True if $H$ is $K$-$\mathfrak{F}$-subnormal in $G$ and False otherwise.} \KwData{A subgroup $H$ of a group $G$.} \eIf{$|G|=|H|$}{{\bf return} True;} {\eIf{$|HG^\mathfrak{F}|\neq|G|$}{{\bf return} ISKFSUBNORMAL$(HG^\mathfrak{F}, H, \mathfrak{F})$;} {\If{$|H^G|\neq|G|$}{{\bf return} ISKFSUBNORMAL$(H^G, H, \mathfrak{F})$;}}{{\bf return} False;}} \end{algorithm} Since $G^\mathfrak{F}$ can be computed in a polynomial time and according to 1 and 3 of Theorem \ref{Basic}, we see that every above mentioned check can be made in a polynomial time. Now the statement of theorem follows from the fact that every chain of subgroups of $G$ has at most $2n$ members by Lemma \ref{chain}. By analogy one can prove that the following algorithm tests $\mathfrak{F}$-subnormality in a polynomial time. \begin{algorithm}[H] \caption{ISFSUBNORMAL$(G, H, \mathfrak{F})$} \SetAlgoLined \KwResult{True if $H$ is $\mathfrak{F}$-subnormal in $G$ and False otherwise.} \KwData{A subgroup $H$ of a group $G$.} \eIf{$|G|=|H|$}{{\bf return} True;} {\eIf{$|HG^\mathfrak{F}|\neq|G|$}{{\bf return} ISFSUBNORMAL$(HG^\mathfrak{F}, H, \mathfrak{F})$;} {{\bf return} False;}} \end{algorithm} \section{Applications} \subsection{Local and Baer-local formations} Recall \cite[IV, Definitions 3.1]{Doerk1992} that a function $f$ which assigns a formation to each prime is called a \emph{formation function}; a chief factor $H/K$ of a group $G$ is called \emph{$f$-central} if $G/C_G(H/K)\in f(p)$ for all prime divisors of $|H/K|$; a formation $\mathfrak{F}$ is called \emph{local} if its coincides with the class of groups all whose chief factors are $f$-central for some formation function $f$. In this case $f$ is called a local definition of $\mathfrak{F}$. \begin{thm}\label{local} Let $f_\mathfrak{F}$ be a local definition of a local formation $\mathfrak{F}$. Assume that $G^{f_\mathfrak{F}(p)}$ can be computed in a polynomial time for every prime $p$ and every group $G$. Then $\mathfrak{F}$ is $P$-recognizable and $G^\mathfrak{F}$ can be computed in a polynomial time. \end{thm} \begin{proof} Note that \begin{align*} &G/C_G(H/K)\in f_\mathfrak{F}(p) \qquad\quad\,\,\forall p\in\pi(H/K)\\ &\Leftrightarrow G^{f_\mathfrak{F}(p)}\subseteq C_G(H/K) \quad\quad\,\,\,\,\forall p\in\pi(H/K)\\ &\Leftrightarrow [G^{f_\mathfrak{F}(p)}, H]\subseteq K \qquad\qquad\,\,\forall p\in\pi(H/K)\\ &\Leftrightarrow |\langle [G^{f_\mathfrak{F}(p)}, H], K\rangle|=|K| \,\,\,\,\,\forall p\in\pi(H/K). \end{align*} Let \begin{displaymath} \mathrm{f}_{\mathfrak{F}}(H/K, G)=\begin{cases} 1,& H/K \textrm{ is }f_\mathfrak{F}\textrm{-central};\\ 0,& \textrm{ otherwise}. \end{cases} =\begin{cases} 1,& |\langle [G^{f_\mathfrak{F}(p)}, H], K\rangle|=|K|\quad\forall p\in\pi(H/K);\\ 0,& \textrm{ otherwise}. \end{cases} \end{displaymath} From the definition of local formation it follows that $\mathfrak{F}=\mathcal{C}(\mathrm{f}_\mathfrak{F})$. Since $G^{f_\mathfrak{F}(p)}$, the commutator of two subgroups, the join of two subgroups and the order of subgroup can be computed in a polynomial time, we see that $\mathrm{f}_{\mathfrak{F}}(H/K, G)$ can be computed in a polynomial time. Lets prove that $\mathrm{f}_\mathfrak{F}$ is a chief factor function. If $H/K$ and $M/N$ are $G$-isomorphic chief factors, then $C_G(H/K)=C_G(M/N)$. Hence $G/C_G(H/K)=G/C_G(M/N)$. Therefore $\mathrm{f}_\mathfrak{F}$ satisfies (1) of Definition \ref{def1}. Note that if $[G^{f_\mathfrak{F}(p)}, H]\subseteq K$ for all $p\in\pi(H/K)$, then $$[(G/N)^{f_\mathfrak{F}(p)}, H/N]=[G^{f_\mathfrak{F}(p)}N/N, H/N]=[G^{f_\mathfrak{F}(p)}, H]N/N\subseteq K/N \quad\forall p\in\pi((H/N)/(K/N)).$$ Hence $\mathrm{f}_\mathfrak{F}$ satisfies (2) of Definition \ref{def1}. Therefore the statement of Theorem \ref{local} directly follows from Theorem \ref{residual}. \end{proof} The following classes of groups are local formations: \begin{enumerate} \item The class $\mathfrak{U}$ of all supersoluble groups. It is locally defined by $f_\mathfrak{U}(p)=\mathfrak{A}(p-1)$ (the class of all abelian groups of exponent dividing $p-1$). \item The class $w\mathfrak{U}$ of widely supersoluble groups \cite{Vasilev2010}. It is locally defined by $f_{w\mathfrak{U}}(p)=\mathcal{A}(p-1)$ (the class of all groups all whose Sylow subgroups are abelian of exponent dividing $p-1$). \item The class $\mathfrak{N}\mathcal{A}$ of groups $G$ such that all Sylow subgroups of $G/\mathrm{F}(G)$ are abelian \cite{Vasilev2010}. It is locally defined by $f_{\mathfrak{N}\mathcal{A}}(p)=\mathcal{A}$ (the class of groups all whose Sylow subgroups are abelian). \item In \cite{Zimmermann1989} the class $sm\mathfrak{U}$ of groups with submodular Sylow subgroups were studied. It is locally defined \cite{Vasilyev2015} by $f_{sm\mathfrak{U}}(p)=\mathcal{A}(p-1)\cap \mathfrak{B}$ where $\mathfrak{B}$ is a class of groups with square-free exponent. \item The class of strongly supersoluble groups $s\mathfrak{U}$ \cite{Vasilyev2015}. Its local definition $f_{s\mathfrak{U}}(p)=\mathfrak{A}(p-1)\cap\mathfrak{B}$. \item The class $sh\mathfrak{U}$ of groups all whose Schmidt subgroups are supersoluble \cite{Monakhov1995, Monakhov2021}. Its local definition $f_{sh\mathfrak{U}}(p)=\mathfrak{G}_{\pi(p-1)}$ (the class of all $\pi(p-1)$-groups). \end{enumerate} \begin{cor} Let $\mathfrak{F}\in\{\mathfrak{U}, \mathrm{w}\mathfrak{U}, s\mathfrak{U}, sm\mathfrak{U}, \mathfrak{N}\mathcal{A}, sh\mathfrak{U}\}$. Then $\mathfrak{F}$ is $P$-recognizable and $G^\mathfrak{F}$ can be computed in a polynomial type. In particular, $\mathfrak{F}$-subnormality of a subgroup can be tested in a polynomial time. \end{cor} \begin{proof} Let $G=\langle S\rangle$. Note that the generating set $S_p$ of a Sylow $p$-subgroup of $G$ can be computed in a polynomial time by 4 of Theorem \ref{Basic}. It is straightforward to check that 1. $G^{f_\mathfrak{U}(p)}=\langle \{[x, y], x^{p-1}\mid x, y\in S\} \rangle$. 2. $G^{f_{w\mathfrak{U}}(p)}=\langle (\bigcup_{p\in\pi(G)}\{[x, y], x^{p-1}\mid x, y\in S_p\})^G \rangle$. 3. $G^{f_{\mathfrak{N}\mathcal{A}}(p)}=\langle (\bigcup_{p\in\pi(G)}\{[x, y]\mid x, y\in S_p\})^G \rangle$. 4. $G^{f_{sm\mathfrak{U}}(p)}=\langle (\bigcup_{p\in\pi(G)}\{[x, y], x^{\prod_{q\in\pi(p-1)}q}\mid x, y\in S_p\})^G \rangle$. 5. $G^{f_{s\mathfrak{U}}(p)}=\langle \{[x, y], x^{\prod_{q\in\pi(p-1)}q}\mid x, y\in S\} \rangle$. 6. $G^{f_{sh\mathfrak{U}}(p)}=\langle (\bigcup_{p\not\in\pi(p-1)}\{x \mid x\in S_p\})^G \rangle$. Hence every of the above mentioned subgroups can be computed in a polynomial time. Note that all these formations are hereditary. Thus the statement of a corollary directly follows from Theorems \ref{Fsubnorm} and \ref{local}. \end{proof} Let $f$ be a local definition of a local formation $\mathfrak{F}$. Recall that if $f(p)\subseteq\mathfrak{F}$ for all $p$, then every $f$-central chief factor is called $\mathfrak{F}$-\emph{central} and every non-$f$-central chief factor is called $\mathfrak{F}$-\emph{eccentric}. \begin{lem}\label{Fcentral} Assume that $f_\mathfrak{F}$ is a local definition of a local formation $\mathfrak{F}$ and $G^{f_\mathfrak{F}(p)}$ can be computed in a polynomial time for every prime $p\in\pi(G)$. Then $G^{F_\mathfrak{F}(p)}$ can be computed in a polynomial time for every prime $p\in\pi(G)$ where $F_\mathfrak{F}$ is the canonical local definition of $\mathfrak{F}$. In particular the check of $\mathfrak{F}$-centrality of a chief factor can be done in a polynomial time. \end{lem} \begin{proof} Recall that $F_\mathfrak{F}(p)=\mathfrak{N}_p(\mathfrak{F}\cap f_\mathfrak{F}(p))$. So $G^{F_\mathfrak{F}(p)}=(G^{f_\mathfrak{F}(p)}G^\mathfrak{F})^{\mathfrak{N}_p}$ can be computed in a polynomial time by Theorem \ref{local} for any $p\in\pi(G)$. Following the proof of this theorem we can check a chief factor for $F_\mathfrak{F}$-centrality (which is the same as $\mathfrak{F}$-centrality) in a polynomial time. \end{proof} One of important families of formations are Baer-local or composition formations. There are many ways to define them (see \cite[IV, Definitions 4.9]{Doerk1992}, \cite[p. 4]{Guo2015} and \cite[Definition 3.11]{Shemetkov1978}). A function of the form $f: \{Simple\,\,groups\}\rightarrow\{formations\}$ is called a Baer function. $ f(Z_p)$ is denoted by $f(p)$ where $Z_p$ is a cyclic group of order $p$. A chief factor $H/K$ of a group $G$ is called \emph{$f$-central} if $G/C_G(H/K)\in f(S)$ where all composition factors of $H/K$ are isomorphic to $S$. A formation $\mathfrak{F}$ is called \emph{Baer-local} if its coincides with the class of groups all whose chief factors are $f$-central for some Baer function $f$. It is known (see \cite[1, Theorem 1.6]{Guo2015}) that Baer-local formation can be defined by Baer function $f$ such that $f(0)=f(S)$ for every non-abelian simple group. \begin{thm}\label{composition} Let $f$ be a Baer-local definition of a composition formation $\mathfrak{F}$. Assume that $G^{f(x)}$ can be computed in polynomial time for every $x\in\mathbb{P}\cup\{0\}$. Then $\mathfrak{F}$ is $P$-recognizable and $G^\mathfrak{F}$ can be computed in polynomial time. \end{thm} \begin{proof} Let \begin{displaymath} \mathrm{f}_{\mathfrak{F}}(H/K, G)=\begin{cases} 1,& H/K \textrm { is non-abelian and } |\langle [G^{f_\mathfrak{F}(0)}, H], K\rangle|=|K|;\\ 1,& H/K\textrm { is a $p$-group and } |\langle[G^{f_\mathfrak{F}(p)}, H], K\rangle|=|K|;\\ 0,& \textrm{ otherwise}. \end{cases} \end{displaymath} As in the proof of Theorem \ref{local} we can chow that $\mathrm{f}_{\mathfrak{F}}$ is a chief factor function and $\mathfrak{F}=\mathcal{C}(\mathrm{f}_{\mathfrak{F}})$. \end{proof} \subsection{The lattice of chief factor functions} For a chief factor functions $\mathrm{f}_1$ and $\mathrm{f}_2$ let \begin{enumerate} \item $(\mathrm{f}_1\vee\mathrm{f}_2)(H/K, G)=1$ iff $\mathrm{f}_1(H/K, G)=1$ or $\mathrm{f}_2(H/K, G)=1$. \item $(\mathrm{f}_1\wedge\mathrm{f}_2)(H/K, G)=1$ iff $\mathrm{f}_1(H/K, G)=1$ and $\mathrm{f}_2(H/K, G)=1$. \item $\overline{\mathrm{f}}_1(H/K, G)=1$ iff $\mathrm{f}_1(H/K, G)=0$. \end{enumerate} It is straightforward to check that these functions are chief factor functions. If $\mathrm{f}_1(H/K, G)$ and $\mathrm{f}_2(H/K, G)$ can be computed in a polynomial time, then functions from 1--3 can also be computed in a polynomial time. Note that the first two items defines the structure of a distributive lattice on the set of chief factor functions. \begin{thm} $P$-recognizable chief factor formations form a distributive lattice. \end{thm} The formation $\mathfrak{F}$ of groups whose $3$-chief factors are not central plays an important role as a counterexample in the study of mutual permutable products of groups (see \cite[Example 4.4.8]{PFG}). It is clear that this class of groups is defined by a chief factor function $\textrm{f}$ such that $\textrm{f}(H/K, G)=1$ if $H/K$ is not a central 3-chief factor or is not a 3-chief factor. Since the orders of a chief factor and its centralizer can be computed in a polynomial time by Theorem \ref{Basic}, we see \begin{prop} The formation of groups whose $3$-chief factors are not central is $P$-recognizable. \end{prop} It is well known that any Baer-local formation $\mathfrak{F}$ can be defined by Baer-function $F_\mathfrak{F}$ such that $F_\mathfrak{F}(0)=\mathfrak{F}$, i.e. the general definition of Baer-local formation gives little information about the action of an $\mathfrak{F}$-group $G$ on its non-abelian chief factors. Therefore several families of Baer-local formations were introduced by giving additional information about the action of an $\mathfrak{F}$-group on its non-abelian chief factors. For example, in \cite{Guo2009a, Guo2009} Guo and Skiba introduced the class $\mathfrak{F}^*$ of quasi-$\mathfrak{F}$-groups for a saturated formation $\mathfrak{F}$: \begin{center} $\mathfrak{F}^*=(G\,|$ for every $\mathfrak{F}$-eccentric chief factor $H/K$ and every $x\in G$, $x$ induces an inner automorphism on $H/K$). \end{center} If $\mathfrak{N}\subseteq\mathfrak{F}$ is a normally hereditary local formation, then $\mathfrak{F}^*$ is a normally hereditary Baer-local formation by \cite[Theorem~2.6]{Guo2009a}. \begin{thm}\label{quasi} Let $f_\mathfrak{F}$ be a local definition of a local formation $\mathfrak{F}$. Assume that $G^{f_\mathfrak{F}(p)}$ can be computed in a polynomial time for every prime $p$. Then $\mathfrak{F}^*$ is $P$-recognizable and $G^{\mathfrak{F}^*}$ can be computed in a polynomial time. \end{thm} \begin{proof} Note that every element of a group $G$ induces an inner automorphism on a chief factor $H/K$ if and only if $HC_G(H/K)=G$. The last condition can be checked in a polynomial time by Theorem \ref{Basic}. Now we can check that either a chief factor is $\mathfrak{F}$-central or every element of a group $G$ induces an inner automorphism on it in a polynomial time by Lemma \ref{Fcentral}. Thus $\mathfrak{F}^*$ is $P$-recognizable and $G^{\mathfrak{F}^*}$ can be computed in a polynomial time by Theorem \ref{residual}. \end{proof} \begin{cor} Formation $\mathfrak{N}^*$ of all quasinilpotent groups is $P$-recognizable and $G^{\mathfrak{N}^*}$ can be computed in a polynomial time. \end{cor} \subsection{$\mathfrak{F}$-subnormal subgroups} In \cite{Monakhov2018, Murashka2018, Semenchuk2011, Vasilev2016} groups with $K$-$\mathfrak{F}$-subnormal or $\mathfrak{F}$-subnormal Sylow subgroups were studied. The class of all groups with $K$-$\mathfrak{F}$-subnormal (resp. $\mathfrak{F}$-subnormal) Sylow $\pi$-subgroups was denoted by $\overline{w}_\pi\mathfrak{F}$ (resp. $w_\pi\mathfrak{F}$, see \cite{Vasilev2016}). \begin{thm} Let $\mathfrak{F}$ be a hereditary formation such that $G^\mathfrak{F}$ can be computed in a polynomial time for every group $G$ and $\pi$ be a set of primes such that $ \pi(G)\cap\pi$ can be computed in polynomial time for every group $G$. Then $\mathrm{w}_\pi\mathfrak{F}$ and $\mathrm{\overline{w}}_\pi\mathfrak{F}$ are $P$-recognizable formations. \end{thm} \begin{proof} Note that if a Sylow $p$-subgroup of $G$ is $K$-$\mathfrak{F}$-subnormal, then every Sylow $p$-subgroup of every quotient group of $G$ is $K$-$\mathfrak{F}$-subnormal in it. Assume now that $P$ is not a $K$-$\mathfrak{F}$-subnormal Sylow subgroup of $G$. Then ISKFSUBNORMAL$(G, P, \mathfrak{F})$ finds a $K$-$\mathfrak{F}$-subnormal in $G$ subgroup $M=M(P)$ of $G$ with $P^M=M$ and $PM^\mathfrak{F}=M$. Assume that a Sylow $p$-subgroup $PN/N$ is $K$-$\mathfrak{F}$-subnormal in $G/N$. Since $\mathfrak{F}$ is a hereditary formation, we see that $PN/N$ is $K$-$\mathfrak{F}$-subnormal in $MN/N$. From $(PN/N)^{MN/N}=P^MN/N=MN/N$ and $(PN/N)(M/N)^\mathfrak{F}=(PN/N)(M^\mathfrak{F}N/N)=MN/N$ it follows that $PN/N=MN/N$ is a $p$-group. Hence $\mathrm{O}^p(M)\leq N$. From the other hand $M$ is $K$-$\mathfrak{F}$-subnormal in $G$. Hence if $\mathrm{O}^p(M)\leq N$, then $PN/N=MN/N$ is a $K$-$\mathfrak{F}$-subnormal Sylow $p$-subgroup of $G/N$. Thus $G^{\mathrm{\overline{w}}_\pi\mathfrak{F}}$ is a normal closure of a subgroup generated by $\mathrm{O}^p(M(P))$ where $P$ is a non-$K$-$\mathfrak{F}$-subnormal subgroup of $G$ for $p\in\pi$. From Theorem \ref{Basic} it follows that this subgroup can be computed in a polynomial time. The algorithm for computing $G^{\mathrm{w}_\pi\mathfrak{F}}$ uses the same ideas. \end{proof} \subsection*{Acknowledgments} I am grateful to A.\,F. Vasil'ev for helpful discussions. {\small\bibliographystyle{siam} \bibliography{BibAlg}} \end{document}
2205.12906v2
http://arxiv.org/abs/2205.12906v2
A theory of quantum (statistical) measurement
\documentclass[12pt]{article} \usepackage{amsmath} \usepackage{amssymb,amsthm} \usepackage{color} \newtheorem{Thm}{Theorem}[section] \newtheorem{theorem}[Thm]{Theorem} \newtheorem{proposition}[Thm]{Proposition} \newtheorem{corollary}[Thm]{Corollary} \newtheorem{lemma}[Thm]{Lemma} \newtheorem{conjecture}[Thm]{Conjecture} \newtheorem{remark}{Remark}[section] \newtheorem{remarks}{Remarks}[section] \newtheorem{definition}[Thm]{Definition} \title{A theory of quantum (statistical) measurement} \author{Walter F. Wreszinski\footnote{[email protected], Instituto de Fisica, Universidade de S\~ao Paulo (USP), Brasil}} \begin{document} \maketitle \begin{abstract} We propose a theory of quantum (statistical) measurement which is close, in spirit, to Hepp's theory, which is centered on the concepts of decoherence and macroscopic (classical) observables, and apply it to a model of the Stern-Gerlach experiment. The number $N$ of degrees of freedom of the measuring apparatus is such that $N \to \infty$, justifying the adjective ``statistical'', but, in addition, and in contrast to Hepp's approach, we make a three-fold assumption: the measurement is not instantaneous, it lasts a finite amount of time and is, up to arbitrary accuracy, performed in a finite region of space, in agreement with the additional axioms proposed by Basdevant and Dalibard. It is then shown how von Neumann's ``collapse postulate'' may be avoided by a mathematically precise formulation of an argument of Gottfried, and, at the same time, Heisenberg's ``destruction of knowledge'' paradox is eliminated. The fact that no irreversibility is attached to the process of measurement is shown to follow from the author's theory of irreversibility, formulated in terms of the mean entropy, due to the latter's property of affinity. \end{abstract} \section{Introduction and Summary} In a recent very stimulating paper, S. Doplicher \cite{Dop} describes qualitatively a ``possible picture of the measurement process in quantum mechanics, which takes into account the finite and nonzero time duration $T$ of the interaction between the observed system and the microscopic part of the measurement apparatus''. In this paper we do not distinguish, as he does, two parts of the measurement apparatus, which, for us, will be a ``macroscopic pointer'', modelled by a quantum system with number of degrees of freedom $N= \infty$, as suggested by Hepp \cite{Hepp}, but the time-duration $T$ of the measurement will be assumed to satisfy the conditions: \begin{itemize} \item [$a.)$] $0 < T$; \item [$b.)$] the measurement takes place in a region of finite spatial extension \end{itemize} In their quantum mechanics textbook for the \'{E}cole Polytechnique, Basdevant and Dalibard \cite{BasDal} remark, in connection with their analysis of the Stern-Gerlach (SG) experiment \cite{StGer}, that a.) and b.) are ``two fundamental aspects which are absent from the classical formulation of the principles of quantum mechanics''. In his concluding remarks in section 3, Doplicher observes that ``the conventional picture of the measurement process in quantum mechanics'' requires that, as $N \to \infty$, the time duration of the measurement tends to zero and that the measurement apparatus occupies a volume $V$ such that $V \to \infty$, referring in this context to the important work of Araki and Yanase \cite{AY}. The latter authors also show, however, for a simple case, that an approximate measurement of an operator such as spin is possible to any desired accuracy. A similar result follows, in our approach, which relies in the framework introduced by Haag and Kastler \cite{HK}, by restriction to a class of observables which are ``arbitrarily close to their restriction to finite $N$''(corresponding to finite volume, assuming finite density, as required in the thermodynamic limit) - see Assumption A in section 2. In this sense, b.) above will follow, as in the case examined by Araki and Yanase, to arbitrary accuracy. Concerning, however, the requirement that the time duration tend to zero, the situation is completely different, at least in a nonrelativistic context (in the relativistic field context, the same should follow for entirely different reasons, see the conclusion). Our forthcoming Theorem 3.4 strongly requires assumption a.), i.e., that the measurement not be instantaneous, and, in the concrete SG model of section 4, it may be explicitly seen that if $T(N) \to 0$ at a certain rate (see (82) of Remark 4.1), the off-diagonal elements of the density matrix do not vanish as $N \to \infty$. We explain why we are not forced to require that the time of measurement be instantaneous in Remark 4.2: it has to do with the forthcoming notion of macroscopic or classical observables. In section 5 we shall also see that preparation of the system and measurement are dual, inseparable processes, and in the hypothesis of their both being instantaneous, a ``time-arrow'' may not exist a priori, which is an essential condition for a precise formulation of the author's condition of irreversibility \cite{Wre}. Doplicher's choice of conventional picture of the measurement process, the article \cite{DLP}, in his view ``quite satisfactory'', has, in our opinion (as well as Hepp's, see (\cite{Hepp}, p. 243)), one major disadvantage: it employs, in a crucial sense, the ``ergodic average'', which is not supported by any physical principle. We now briefly describe our framework, following, in part, \cite{Dop}. In von Neumann's general picture \cite{vN}, we have a system $S$, whose general observable $A=\sum_{j} \lambda_{j} E_{j}$ has finite spectrum $\lambda_{j}, j=1, \cdots, n$, and self-adjoint spectral projections $E_{j}$. The Hilbert space of the state vectors of the composite system, consisting of $S$ and the measurement apparatus $A_{N}$, which we assume to consist of a quantum system with $N$ degrees of freedom, is given by the tensor product ${\cal H}_{S} \otimes {\cal H}_{A_{N}}$ of the corresponding Hilbert spaces. The total Hamiltonian is \begin{equation} \label{(1.3)} H_{N} = H_{S} \otimes \mathbf{1} + \mathbf{1} \otimes H_{A_{N}} + V_{N} \end{equation} For simplicity, we restrict further the number of eigenvalues of the observable $A$ to two, $\lambda_{+}$ and $\lambda_{-}$, with $\lambda_{+} > \lambda_{-}$ (as will be the case in the SG experiment of section 4). There exists a quantity $t_{D}$, called \emph{decoherence time} (or relaxation time), which may be explicitly computed in the SG model, defined as the minimum time interval $t_{D}$ such as a measurement of $A_{N}$, i.e., such that $\lambda_{+}$ and $\lambda_{-}$ may be experimentally distinguished, is possible. We assume that \begin{equation} \label{(1.4)} 0 < t_{D} \mbox{ and } t_{D} \mbox{ is independent of } N \end{equation} Our requirement on $T$, compatible with assumption a.), may be stated as \begin{equation} \label{(1.5)} 0 < t_{D} \le T < \infty \mbox{ with } t_{D} \mbox{ and } T \mbox{ independent of } N \end{equation} In an important paper, Narnhofer and Thirring \cite{NTh1} examined the intriguing question why the only states found in Nature are such that they assume definite values on classical observables, but never mixtures of them. This problem has been lively discussed since Schr\"{o}dinger introduced his cat \cite{Schr}. As simple examples of classical (or macroscopic) observables, they propose the mean magnetization of a magnet \begin{equation} \label{(1.6)} \vec{m} = \lim_{N \to \infty} \frac{1}{2N+1} \sum_{i=-N}^{N} \vec{\sigma_{i}} \end{equation} or the center of mass velocity of a system of particles \begin{equation} \label{(1.7)} \vec{v} = \lim_{N \to \infty} \frac{\sum_{i=-N}^{N} m_{i} \vec{v}_{i}}{\sum_{i=-N}^{N} m_{i}} \end{equation} of a large object. We shall use both in this paper, but replace \eqref{(1.7)} by the center of mass coordinate of a particle system \begin{equation} \label{(1.8)} \vec{x}_{C.M.} = \lim_{N \to \infty} \frac{1}{2N+1} \sum_{i=-N}^{N} \vec{x}_{i} \end{equation} (of a group of equal atoms). We remark that \eqref{(1.6)} - \eqref{(1.8)} are precise definitions of macroscopic or classical observables when one specifies the appropriate representation, as we do in section 2. It is in this connection that the limit $N \to \infty$ plays a crucial role in the present framework and, in this respect, quite analogously to Hepp's \cite{Hepp}. In order to explain the problems, we adopt Bell's suggestion (\cite{Bell1}, p.36) of taking the apparatus $A_{N}$ out of the ``rest of the world'' R, and treat it together with $S$ as part of the enlarged quantum system $S_{N}^{'}$: $R=A_{N}+R^{'}$;$S+A_{N}=S_{N}^{'}$; $W=S_{N}^{'}+R^{'}$: ``the original axioms about 'measurement' are then applied not at the $S/A_{N}$ interface, but at the $A_{N}/R^{'}$ interface''. Neglecting the interaction of $A_{N}$ with $R^{'}$, the joint system $S_{N}^{'}$ is found to end, by the Schr\"{o}dinger equation associated to $H_{N}$ in \eqref{(1.3)}, after the ``measurement on $S$ by $A_{N}$'' (i.e., after a fixed time $T$ satisfying \eqref{(1.5)})in a state \begin{equation} \label{(1.9)} \Psi_{N}(T) = \sum_{n}c_{n} \Psi_{n,N}(T) \end{equation} where the states $\Psi_{\pm,N}(T)$ correspond to two definite (apparatus) pointer positions. The corresponding density matrix is \begin{equation} \label{(1.10.1)} \rho_{N}(T) = \sum_{n,m} c_{n}\bar{c_{m}} \Psi_{n,N}(T)\overline{\Psi_{m,N}(T)} \end{equation} where the bar denotes complex conjugation. Bell reports that in his textbook analysis of the measurement problem, Kurt Gottfried (\cite{KG}, pp. 186-188) insists that, being $A_{N}$ a macroscopic system (and thus also $S_{N}^{'}$), \begin{equation} \label{(1.10.2)} tr(A\hat{\rho})=tr(A\rho) \mbox{ ``for all observables $A$ known to occur in Nature'' } \end{equation} where \begin{equation} \label{(1.11)} \hat{\rho}_{N}(T) = \sum_{n} |c_{n}|^{2} \Psi_{n,N}(T) \overline{\Psi_{n,N}(T)} \end{equation} (in our notation) - ``dropping interference terms involving pairs of macroscopically different states''. We shall refer to the replacement of $\rho_{N}(T)$ by $\hat{\rho}_{N}(T)$ as the ``von Neumann collapse of the density matrix''. The associated ``loss of relative phases'' leads to what we shall refer to as \emph{Heisenberg paradox} \cite{Heis}: ``Every experiment destroys some of the knowledge of the system which was obtained by previous experiments''. We shall see that, while a reduction of type \eqref{(1.10.2)}, \eqref{(1.11)} does not occur for finite $N$, it may indeed occur in the limit $N \to \infty$: this is the content of Corollary 3.5, which makes the last sentence in \eqref{(1.10.2)} precise, i.e., specifies the (physically sensible) class of observables $A$. This enables elimination of one of Bell's objections in \cite{Bell2} to Hepp's conceptual framework: the observable which ``undoes the measurement'' proposed by him does not exist in the specified framework, see \cite{NarWre}. On the contrary, his second objection in \cite{Bell2}, that the infinite-time limit in the only example of automorphic evolution considered by Hepp, the Coleman model, is not physically sensible, is sound. Indeed, this model does \emph{not} satisfy \eqref{(1.5)}, because $$ t_{D} = t_{D}(N) = N+ \mbox{ constant } $$ where $N$ denotes the number of sites in the model's (spin) chain (\cite{Se1}, \cite{Se2}, \cite{NarWre}): thus $t_{D}(N) \to \infty$ as $N \to \infty$. It thus turns out that Bell's criticism applies to the model, rather than to the whole conceptual framework introduced by Hepp and, indeed, Narnhofer and Thirring provide a physically reasonable model example in which the infinite time limit can be controlled and agrees with some of Hepp's conclusions (\cite{NTh1}, see their Remark 1). This example is, however, not very illuminating from the point of view of measurement theory, having being designed to describe certain interactions with the environment which render a mixed state pure in the infinite time limit, while we are interested in the opposite effect, that a pure state becomes mixed under evolution. For this reason, we analyse in section 4 a model of the SG experiment, which well illustrates Theorem 3.4 and is a generalization to an infinite number of degrees of freedom of the model proposed in \cite{KG1}, together with the prescription of initial state and experimental setting in \cite{GG}, see also \cite{BasDal}. The states in the assumption of theorem 3.4 depend on the parameter $T$, which is only supposed to satisfy \eqref{(1.5)}. Concerning this point, the idea should be mentioned (\cite{Bell1}, p.37, bottom) that ``systems such as $S_{N}^{'}$ have \emph{intrinsic} properties - independently of and before observation''. For instance, the ``jump'' associated to the collapse is supposed to occur at some not well specified time (\cite{Bell2}, p. 98). However, both the Landau-Lifshitz-Bohr-Haag picture of measurement as an interaction between system $S_{N}^{'}$ and environment $R^{'}$ which occurs apart from and independently of any observer (\cite{LL}, \cite{Haag}), as well as the fact, emphasized by Peierls \cite{Pe1} that the observer does not have to be contemporaneous with the event, allowing, for example, from present evidence, to draw conclusions about the early Universe (the classical example being the cosmic microwave background), strongly suggest that the quantities to be measured do not depend on $T$. Ideally, we expect that the states in Theorem 3.4 satisfy the assumptions of the theorem \emph{for all} $T$ satisfying \eqref{(1.5)}, and, moreover, that the actually measured quantities independ of $T$. It is rewarding that the example treated in section 4 fulfills both of these expectations (see Remark 4.2). In section 5 we briefly review the definition of irreversibility in (\cite{Wre}, \cite{Wre1}) in terms of the mean entropy \cite{LanRo}, and prove that it is conserved on the average under ``collapse'', as a consequence of the property of affinity \cite{LanRo}. This result contrasts with Lemma 3 of \cite{NarWre}, where the quantum Boltzmann entropy of a finite system is shown to decrease under collapse, thus contradicting the second law (on the average), and requiring that the incidence of interactions with the environment be rare in order to assure the global validity of the second law (see the last remarks in \cite{NarWre}). As a consequence of theorem 5.1, van Kampen's conjecture (\cite{vK}, mentioned in \cite{Bell1}) that the entropy of the Universe remains zero throughout the process of measurement is confirmed \emph{in the sense of the mean entropy}, and thus the ``irreversibility paradox'' suggested by Landau and Lifschitz \cite{LL} and Gottfried \cite{KG} does not take place for infinite quantum spin systems, adopting the mean entropy as indicator. An illustration of Theorem 5.1 in the theory of measurement is provided by the effective quantum spin model of the SG experiment in section 4.2. Section 6 is reserved to a conclusion, with a brief discussion of open problems. The present paper owes very much to the theory of quantum statistical mechanics of infinite systems, as described in \cite{BRo2}, with a pedagogical textbook exposition in the classic book by Sewell \cite{Se}. The basic Theorem 3.4 amalgamates results in the papers of Roberts and Roepstorff \cite{RRoe} and Hepp \cite{Hepp}. The groundbreaking framework of the paper of Haag and Kastler \cite{HK}, nicely reviewed by Wightman \cite{Wight} plays a central role in the proposed framework. Concerning references, a good bibliography on several aspects of the quantum theory of measurement up to 2003 is to be found in \cite{KG1}, pp. 575 and 576. Several other recent references, including book references, may be found in \cite{Dop}. From the point of view of mathematical physics, a very recent reference is \cite{Fro}: there, it is argued that the Schr\"{o}dinger equation does not yield a correct description of the quantum mechanical time evolution of states of isolated physical systems featuring events; it also cites several recent references, to which we refer. In a different framework, that of thermal open systems, a recent reference is \cite{Pil}, see also references given there. In the introduction and elsewhere, we sometimes state ``we assume...'': in order to clarify what is really assumed, we have collected \emph{all} the assumptions in Assumption A in section 2. \section{General setting} \subsection{Generalities: states of infinite systems} We very briefly summarize here some concepts of crucial importance in this paper, but, for any detail, we refer to the references (\cite{Se}, \cite{BRo2}, \cite{Hug}). we shall use quantum spin systems as a prototype, such as the generalized Heisenberg Hamiltonian \begin{equation} \label{(2.1)} H_{\Lambda} = -2\sum_{x,y \in \Lambda}[J_{1}(x-y)(S_{x}^{1}S_{y}^{1}+S_{x}^{2}S_{y}^{2})+J_{2}(x-y)S_{x}^{3}S_{y}^{3}] \end{equation} where \begin{equation} \label{(2.2)} \sum_{x \in \mathbf{Z}^{\nu}}|J_{i}(x)|< \infty \mbox{ and } J_{i}(0)=0 \mbox{ for } i=1,2 \end{equation} Above, $\vec{S}_{x} \equiv (S_{x}^{1},S_{x}^{2},S_{x}^{3})$, where $S_{x}^{i}=1/2 \sigma_{x}^{i}, i=1,2,3$ and $\sigma_{x}^{i}, i=1,2,3$ are the Pauli matrices at the site $x$. Above, $H_{\Lambda}$ acts on the Hilbert space ${\cal H}_{\Lambda}=\otimes_{x \in \Lambda}\mathbf{C}_{x}^{2}$, and $\vec{S}_{x}$ is short for $\mathbf{1} \otimes \cdots \otimes \vec{S}_{x} \otimes \cdots \otimes \mathbf{1}$. The algebra associated to a finite region $\Lambda \subset \mathbf{Z}^{\nu}$ is \begin{equation} \label{(2.3)} {\cal A}(\Lambda) = B({\cal H}_{\Lambda}) \end{equation} and two of its properties are crucial: \begin{itemize} \item [$a.)$] (causality)$[{\cal A}(B),{\cal A}(C)]=0$ if $B \cap C = \phi$; \item [$b.)$] (isotony) $B \subset C \Rightarrow {\cal A}(B) \subset {\cal A}(C)$. \end{itemize} \begin{equation} \label{(2.6)} {\cal A}_{L} = \cup_{B} {\cal A}(B) \end{equation} where $B$ ranges over the finite parts of $\mathbf{Z}^{\nu}$, is called the \emph{local} algebra; its closure with respect to the norm \begin{equation} \label{(2.7)} {\cal A} \equiv \overline{{\cal A}_{L}} \end{equation} is the \emph{quasilocal} algebra: it consists of observables which are, to arbitrary accuracy, approximated by observables attached to a \emph{finite} region. The bar in \eqref{(2.7)} denotes the C*-inductive limit (\cite{KR}, Prop.11.4.1). The norm is defined by $A \in B({\cal H}_{\Lambda}) \to ||A|| = sup_{||\Psi|| \le 1} ||A \Psi||$, $\Psi \in {\cal H}_{\Lambda}$. An \emph{automorphism} one-to one mapping of ${\cal A}$ into ${\cal A}$ which preserves the algebraic structure: $A \to \tau_{x}(A)$ denotes the space-translation automorphism. A \emph{state} $\omega_{\Lambda}$ on ${\cal A}(\Lambda)$ is a positive, normalized linear functional on ${\cal A}(\Lambda)$: $\omega_{\Lambda}(A) = Tr_{{\cal H}_{\Lambda}} (\rho_{\Lambda} A) \mbox{ for } A \in {\cal A}(\Lambda)$ (positive means $ \omega_{\Lambda}(A^{\dag}A) \ge 0$, normalized $\omega_{\Lambda}(\mathbf{1})=1$.) For quantum spin systems, the index $N$ will be identified as \begin{equation} \label{(2.19d)} N = |\Lambda| = V \end{equation} with the understanding that $N \nearrow \infty$ means, for simplicity, the limit along a sequence of parallelepipeds of sides $a_{i}, i=1, \cdots, \nu$, with $a_{i} \to \infty$ for each $i \in [1,\nu]$; more general limits, such as the van Hove limit (\cite{BRo2}, p. 287) could be adopted. The notion of state generalizes to systems with infinite number of degrees of freedom $\omega(A)= \lim_{\Lambda \nearrow \infty} \omega_{\Lambda}(A)$, at first for $A \in {\cal A}_{L}$ and then to ${\cal A}$. Each state $\omega$ defines a representation $\Pi_{\omega}$ of ${\cal A}$ as bounded operators on a Hilbert space ${\cal H}_{\omega}$ with cyclic vector $\Omega_{\omega}$ (i.e., $\Pi_{\omega}({\cal A}) \Omega_{\omega}$ is dense in ${\cal H}_{\omega}$), such that $\omega(A) = (\Omega_{\omega}, \Pi_{\omega}(A) \Omega_{\omega})$ (the GNS construction). The strong closure of $\Pi_{\omega}({\cal A})$ is a von Neumann algebra, with commutant $\Pi_{\omega}({\cal A})^{'}$, which is the set of bounded operators on ${\cal H}_{\omega}$ which commute with all $\Pi_{\omega}({\cal A})$, and the center is defined by $Z_{\omega}= \Pi_{\omega}({\cal A}) \cap \Pi_{\omega}({\cal A})^{'}$. The set of atates over the algebra ${\cal A}$ will be denoted by $E_{{\cal A}}$. Considering quantum spin systems on $\mathbf{Z}^{\nu}$, we shall consider only space-translation-invariant states, i.e., such that \begin{equation} \label{(2.8)} \omega \circ \tau_{x} = \omega \mbox{ for all } x \in \mathbf{Z}^{\nu} \end{equation} An extremal invariant or ergodic state is a state which cannot be written as a proper convex combination of two distinct states $\omega_{1}$ and $\omega_{2}$, i.e., the following does \emph{not} hold: \begin{equation} \label{(2.9)} \omega = \alpha \omega_{1} + (1-\alpha) \omega_{2} \mbox{ with } 0<\alpha<1 \end{equation} If the above formula is true, it is natural to regard $\omega$ as a mixture of two pure ``phases'' $\omega_{1}$ and $\omega_{2}$, with proportions $\alpha$ and $1-\alpha$, respectively (\cite{BRo1}, Theorem 2.3.15). A \emph{factor} or \emph{primary} state is defined by the condition that the center \begin{equation} \label{(2.10)} Z_{\omega}= \{\lambda \mathbf{1} \} \end{equation} with $\lambda \in \mathbf{C}$. For quantum spin systems the center $Z_{\omega_{\beta}}$ coincides (\cite{BRo1}, Example 4.2.11) with the so called algebra at infinity $\zeta_{\omega}^{\perp}$, which corresponds to operations which can be made outside any bounded set. As a typical example of an observable in $\zeta_{\omega}^{\perp}$, let $\omega$ be any translation invariant state. Then the space average of A \begin{equation} \label{(2.11a)} \eta_{\omega}(A) \equiv s-lim_{\Lambda \nearrow \infty} \frac{1}{|\Lambda|} \sum_{x \in \Lambda} \Pi_{\omega}(\tau_{x}(A)) \end{equation} exists, and, if $\omega$ is ergodic, then \begin{equation} \label{(2.11b)} \eta_{\omega}(A) = \omega(A) \mathbf{1} \end{equation} (\cite{LvH}), which corresponds to ``freezing'' the observables at infinity to their expectation values. The following definition is abstracted from \cite{Hepp}, before his Lemma 1. \begin{definition} \label{Definition 2.1} Two states $\omega_{1}$ and $\omega_{2}$ are \emph{disjoint} if no subrepresentation of of $\Pi_{\omega_{1}}$ is unitarily equivalent to any subrepresentation of $\Pi_{\omega_{2}}$. Two states which induce disjoint representations are said to be disjoint; if they are not disjoint, they are called \emph{coherent}. \end{definition} For finite-dimensional matrix algebras (with trivial center) all representations are coherent, and factor representations as well. We have (\cite{Hepp}, Lemma 6): Let $\omega_{1}$ and $\omega_{2}$ be extremal invariant (ergodic) states with respect to space translations. If, for some $A \in {\cal A}$, \begin{equation} \label{(2.11c)} \eta_{\omega_{1}} (A) = a_{1} \mbox{ and } \eta_{\omega_{2}}(A) = a_{2} \mbox{ with } a_{1} \ne a_{2} \end{equation} then $\omega_{1}$ and $\omega_{2}$ are disjoint. The space averages $\eta$ defined above correspond to macroscopic ``pointer positions'', e.g., the mean magnetization in the Heisenberg model \eqref{(2.1)} in the $3$- direction $\sum_{x \in \Lambda} \frac{S_{x}^{3}}{|\Lambda|}$, with $A= S^{3}$. If $\eta_{\omega_{+}}(S^{3}) = a_{+} = 1$, and $\eta_{\omega_{-}}(S^{3}) = -1$, the states $\omega_{\pm}$ are macroscopically different, i.e., differ from one another by flipping an infinite number of spins. For a comprehensive discussion, see \cite{Se}, section 2.3. Given a state $\omega_{1}$, the set of states $\omega_{2}$ ``not disjoint from'' $\omega_{1}$ forms a \emph{folium}: a norm-closed subset ${\cal F}$ of $E_{{\cal A}}$ such that (i) if $\omega_{1},\omega_{2} \in {\cal F}$, and $\lambda_{1}, \lambda_{2} \in \mathbf{R}_{+}$ with $\lambda_{1}+\lambda_{2}=1$, then $\lambda_{1} \omega_{1}+\lambda_{2} \omega_{2} \in {\cal F}$; ii.) if $\omega \in {\cal F}$ and $A \in {\cal A}$, the state $\omega_{A}$, defined by \begin{equation} \label{(2.12)} \omega_{A}(B) = \frac{\omega(A^{*}BA)}{\omega(A^{*}A)} \mbox{ with } \omega(A^{*}A) \ne 0 \end{equation} also belongs to ${\cal F}$ and is interpreted as a ``local perturbation of $\omega$''. We shall denote the folium associated to a state $\omega$ by $[\omega]$. If two states $\omega_{1}$ and $\omega_{2}$ are disjoint, their folia $[\omega_{1}]$ and $[\omega_{2}]$ are also disjoint. This follows from Hepp's Lemma 1 \cite{Hepp}: \begin{lemma} \label{lem:1} $\omega_{1} \in E_{{\cal A}}$ and $\omega_{2} \in E_{{\cal A}}$ are disjoint if and only if for every representation $\pi$ of ${\cal A}$ with $\omega_{i} = \omega(\Psi_{i}) \circ \pi$ for some $\Psi_{i} \in {\cal H}_{\pi}$, $i=1,2$, one has $$ (\Psi_{1}, \pi(A) \Psi_{2}) = 0 \forall A \in {\cal A} $$ \end{lemma} Above, $\omega_{i} = \omega(\Psi_{i}) \circ \pi$ means $$ \omega_{i}(A) = (\Psi_{i}, \pi(A) \Psi_{i}) \mbox{ with } \Psi_{i} \in {\cal H}_{\pi} $$ where ${\cal H}_{\pi}$ is the Hilbert space associated to the representation $\pi$. The lemma is easy to understand from the definition ~\ref{Definition 2.1} of disjointness: $\Psi_{2}$ and $\Psi_{1}$ lie in non-unitarily equivalent (``orthogonal'') Hilbert spaces, which generally differ by different values of a macroscopic observable of type, e.g., (4), (5) or (6), which means an operation affecting an \emph{infinite} number of points or sites, and therefore cannot be connected by a quasilocal observable, which is, by definition, arbitrarily close (in norm) to one localized in a finite region. The lemma also shows explicitly that when two states $\omega_{1}$ and $\omega_{2}$ are disjoint, so are their folia, by definition ~\eqref{(2.12)}. One important example, which will be our main concern in sections 4 and 5, is that of an infinite direct product space. For each vector $\vec{m}_{i}$, with $\vec{m}_{i}^{2}=1$, there exists a vector $|\vec{m}_{i})$ in the Hilbert space $\mathbf{C}^{2}_{i}$ such that $(\vec{\sigma}_{i} \cdot \vec{m}_{i})|\vec{m})_{i} = |\vec{m})_{i}$. Let ${\cal A}$ act on a reference vector \cite{NTh1} $|\Psi_{\vec{m}}) = \otimes_{i=-\infty}^{\infty} |\vec{m})_{i} \mbox{ with } \vec{\sigma}_{i} |\vec{m}_{i}) = \vec{m} |\vec{m})_{i}$ For $\vec{m} \ne \vec{n}$, this yields two representations $\pi_{\vec{m}}, \pi_{\vec{n}}$ of ${\cal A}$ on separable Hilbert spaces ${\cal H}_{\vec{m}}, {\cal H}_{\vec{n}}$. The following weak limits exist in these representations: \begin{equation} \label{(2.13a)} \vec{m} \mathbf{1} = wlim_{N \to \infty} \frac{1}{2N+1} \sum_{i=-N}^{N} \pi_{\vec{m}}(\vec{\sigma}_{i}) \end{equation} \begin{equation} \label{(2.13b)} \vec{n} \mathbf{1} = wlim_{N \to \infty} \frac{1}{2N+1} \sum_{i=-N}^{N} \pi_{\vec{n}}(\vec{\sigma}_{i}) \end{equation} These two representations cannot be unitarily equivalent because \begin{equation} \label{(2.14)} U^{-1} \pi_{\vec{m}}(\vec{\sigma}_{i}) U = \pi_{\vec{n}}(\vec{\sigma}_{i}) \end{equation} would imply $U^{-1} \vec{m}\mathbf{1} U = \vec{n}\mathbf{1}$, which is impossible because $U$ cannot change the unity $\mathbf{1}$. The same argument shows disjointness. The $\Psi_{\\vec{m}}$ define states $\omega_{\vec{m}}(\cdot) = (\Psi_{\vec{m}}, \cdot \Psi_{\vec{m}})$. The \emph{mixed} state is defined as \eqref{(2.9)} (with $\vec{m} \ne \vec{n}$) \begin{equation} \label{(2.15)} \omega_{\alpha} \equiv \alpha \omega_{\vec{m}} + (1-\alpha) \omega_{\vec{n}} \mbox{ with } 0 \le \alpha \le 1 \end{equation} which is a convex combination of distinct pure states $\omega_{\vec{m}}$ and $\omega_{\vec{n}}$. Consider, now, the framework described in section 1, consisting of the system $S$, for simplicity a spin one-half system, whose general observable is \begin{equation} \label{(2.16)} A = \lambda_{+}P_{+}+\lambda_{-}P_{-} \end{equation} Consideration of a general, finite spectrum of $A$ poses, however, no problem. The Hilbert space of state vectors of the composite system will consist of $S$ and the measurement apparatus $A_{N}$, and is given by the tensor product \begin{equation} \label{(2.17)} {\cal H}_{S} \otimes {\cal H}_{A_{N}} \end{equation} of the corresponding Hilbert spaces. The total Hamiltonian is \begin{equation} \label{(2.18)} H_{N} = H_{S} \otimes \mathbf{1} + \mathbf{1} \otimes H_{A_{N}} + V_{N} \end{equation} We assume that later the limit $N \to \infty$ is taken in an appropriate sense. Take as initial state vector \begin{equation} \label{(2.19a)} \Psi_{N}(t=0) = (\alpha |+) + \beta |-)) \otimes \Psi_{0}^{N} \end{equation} We assume that \begin{equation} \label{(2.19b)} \exp(-iTH_{N}) \Psi_{N}(t=0) = \alpha |+) \otimes \Psi^{N,+,T} + \beta |-) \otimes \Psi^{N,-,T} \end{equation} with \begin{equation} \label{(2.19c)} |\alpha|^{2} + |\beta|^{2} = 1 \end{equation} \subsection{The framework: some specific assumptions} We shall assume that the case of particle systems \eqref{(1.8)} is also included, replacing $\mathbf{Z}^{\nu}$ by $\mathbf{Z}$ and finite regions $\lambda$ by $\Lambda_{N} = [-N,N], N \in \mathbf{N}_{+}$, with $|\Lambda|=|\Lambda_{N}|=2N+1$ (see \eqref{(2.19d)}). The isotony property b.) enables the algebra ${\cal A}$ associated to the apparatus to be defined as inductive limit \eqref{(2.6)} (for the infinite product case, see \cite{Tak}). The algebra of the (system + apparatus) is thus assumed to be the C*-inductive limit of the ${\cal A}_{s} \otimes {\cal A}_{\Lambda}$, denoted by \begin{equation} \label{(2.20)} {\cal A}_{s} \otimes {\cal A} \end{equation} where ${\cal A}_{s}$ is the spin algebra, generated by the Pauli operators $\{\vec{\sigma}, \mathbf{1}\}$. Under assumption \eqref{(2.19b)}, we may, for each $T$ satisfying \eqref{(1.5)}, consider the states on ${\cal A}$ \begin{equation} \label{(2.21)} \omega_{\Lambda}^{+,T} = (\Psi^{N,+,T}, A \Psi^{N,+,T}) \end{equation} and \begin{equation} \label{(2.22)} \omega_{\Lambda}^{-,T} = (\Psi^{N,-,T}, A \Psi^{N,-,T}) \end{equation} where $$ A \in {\cal A}_{\Lambda} $$ It is now convenient to distinguish explicitly the two cases we shall consider: 1.) Quantum spin systems The natural topology (from the point of view of physical applications) in the space of states is the \emph{weak* topology}. A sequence of states $\omega_{n}, n=1,2, \cdots$ on a C* algebra ${\cal A}$ is said to tend to a state $\omega$ in the weak* topology if \begin{equation} \label{(2.23)} \lim_{n \to \infty} \omega_{n}(A) = \omega(A) \mbox{ for all } A \in {\cal A} \end{equation} The above definition requires that we extend $\omega_{\Lambda}^{\pm,T}$ to ${\cal A}$ in one of the various possible ways, for instance, assigning to the extension $\tilde{\omega}_{\Lambda}^{\pm,T}$ the value $1$ in the complement ${\cal A}- {\cal A}_{\Lambda}$. Considering ${\cal A}$ as a Banach space, since the set of states on ${\cal A}$ is sequentially compact in the weak*-topology (see \cite{Roy}, Prop. 13, p.141 and Cor. 14, p. 142), because ${\cal A}$ is separable, there exists a subsequence $\{\Lambda_{n_{k}}\}_{k=1}^{\infty}$ of $\Lambda_{n} \nearrow \infty$ and states $\tilde{\omega}^{\pm,T}$ on ${\cal A}$ such that \begin{equation} \label{(2.24)} \tilde{\omega_{k}}^{\pm,T}(A) \equiv \tilde{\omega}_{\Lambda_{n_{k}}}^{\pm,T}(A) \to \tilde{\omega}^{\pm,T}(A) \mbox{ as } k \to \infty \end{equation} 2.) Particle systems. In this case, we confine our attention to infinite product states on the infinite tensor product of C* algebras $\otimes_{i \in \mathbf{Z}} {\cal A}_{i}$. Good references are \cite{Wehrl}, \cite{Gui}. In the sequel, take the index set $I = \mathbf{Z}$, and each ${\cal A}_{i}$, with $i \in I$ to be the von Neumann algebra generated by the Weyl operators (for simplicity in one dimension, which will be the case in the application in section 4) $$ W(\beta, \gamma) = \exp[i(\beta z_{i}+ \gamma p_{z_{i}})] $$ where $p_{z}=-i\frac{d}{dz}$, on ${\cal H}_{i}$ a copy of $L^{2}(\mathbf{R})$, with $\beta$ and $\gamma$ real numbers. \begin{definition} \label{Definition 2.2} Let $({\cal H}_{i})_{i \in I}$ be a family of Hilbert spaces. A family of vectors $(x_{i})_{i \in I}$, with $x_{i} \in {\cal H}_{i}$ is called a $C$ family if $\prod_{i \in I} ||x_{i}||$ converges. $(x_{i})_{i \in I}$ is called a $C_{0}$ family if $\sum_{i \in I} |||x_{i}||-1|$ converges. \end{definition} It may be proved (see, e.g., \cite{Wehrl}, lemma 2.2) that every $C_{0}$ family is a $C$ family, and that every $C$ family fulfilling $\prod_{i \in I} ||x_{i}|| \ne 0$ is a $C_{0}$ family. \begin{definition} \label{Definition 2.3} Two $C_{0}$ families $(x_{i})_{i \in I}$, $(y_{i})_{i \in I}$ are \emph{equivalent},$(x_{i})_{i \in I} \equiv (y_{i})_{i \in I}$, if \begin{equation} \label{(2.25)} \sum_{i \in I} |(x_{i}|y_{i}) - 1| < \infty \end{equation} \end{definition} It may be proved (see, e.g., \cite{Wehrl}, p. 60) that $\equiv$ is indeed an equivalence relation. The complete tensor product (CTP) of the ${\cal H}_{i}$, denoted by $\otimes_{i \in I} {\cal H}_{i}$, defined in \cite{Wehrl}, p. 65, is a direct sum of \emph{incomplete tensor product spaces} (IDPS) $\otimes_{i \in I}^{\zeta} {\cal H}_{i}$: they are the closed linear subspaces of the CTP spanned by the nonzero $C_{0}$ vectors in the $C_{0}$ family $\zeta$. If $0 \ne \otimes_{i \in I} x_{i} \in \zeta$, we write $\otimes_{i \in I}^{(\otimes x_{i})_{i \in I}} {\cal H}_{i}$ for the IDPS. The important result for us in this connection will be \begin{proposition} \label{prop:2.1} Let $\otimes_{i \in I} x_{i}$ be a $C_{0}$ vector not equal to zero. The set of all $\otimes_{i \in I} y_{i}$ such that $x_{i}=y_{i}$ for all but at most finitely many indices is total in $\otimes_{i \in I}^{\otimes_{i \in I} x_{i}} {\cal H}_{i}$. \end{proposition} (For a proof, see \cite{Wehrl}, p. 67, Prop. II.4). In the application in section 4 we shall have states on an infinite tensor product of C* algebras ${\cal A}_{i}, i \in I$ (see \cite{Gui}, p. 17, 2.2), which may also be defined as an inductive limit (\cite{Gui}, p. 18; \cite{Tak}) and will be denoted by ${\cal A}$. For each $i \in I$, let $\omega_{i}$ be a state on ${\cal A}_{i}$, $\pi_{i}$ the associated GNS representation (\cite{BRo1}, 2.3.3), with cyclic vector $\xi_{i}$. \begin{definition} \label{Definition 2.4} The (infinite) product state $\otimes_{i \in I} \omega_{i}$ is the unique state on ${\cal A}$ verifying \begin{equation} \label{(2.26)} (\otimes \omega_{i}) (\otimes x_{i}) = \prod \omega_{i}(x_{i}) \mbox{ for } x_{i} \in {\cal A}_{i} \end{equation} and $x_{i} = e_{i}$ for almost all $i$, where $e_{i}$ is the identity on ${\cal A}_{i}$. \end{definition} The representation of ${\cal A}$ canonically associated to $\otimes_{i \in I} \omega_{i}$ is equivalent to the representation $\pi = \otimes_{i \in I} ^{\otimes_{i} \xi_{i}} \pi_{i}$ of ${\cal A}$ on $\otimes_{i \in I}^{\otimes \xi_{i}} {\cal H}_{i}$ such that $\pi(\otimes x_{i}) = \otimes \pi_{i}(x_{i})$ for $x_{i} \in {\cal A}_{i}$, and $x_{i} = e_{i}$ for almost all $i$, where $e_{i}$ is the identity on ${\cal A}_{i}$. (See Proposition 2.5, p. 20 and Proposition 2.9, p. 23, of \cite{Gui}). We are now in the position of formulating our assumption - Assumption A - which will be the hypothesis of our main theorem (Theorem 3.4): \emph{Assumption A} Assume the framework consisting of the system S, for simplicity a spin one-half system with general observable \eqref{(2.16)}, and Hamiltonian and initial state vector given by \eqref{(2.19b)}, under condition \eqref{(2.19c)}. In this connection, we also assume condition \eqref{(1.5)}. The states $\tilde{\omega}^{\pm,T}$ of quantum spin systems are defined by \eqref{(2.24)} with the algebra ${\cal A}$ (of the apparatus alone, appearing in \eqref{(2.20)}. For particle systems the initial state vector \eqref{(2.19a)} and those $\Psi^{M,\pm,T}$ at time $T$ in \eqref{(2.19b)} are vectors $\otimes_{i=-M}^{M} \xi_{i}^{\pm,T}$ with $M$ finite, and corresponding states $\omega_{M}^{\pm,T}$, while the states of the infinite system are the infinite product factor states $\tilde{\omega}^{\pm,T} \equiv \otimes_{i \in \mathbf{Z}} \omega_{i}^{\pm,T}$ of ~\ref{Definition 2.4}, with corresponding factorial representation $\otimes_{i \in \mathbf{Z}}^{\xi_{i}^{\pm,T}}\pi_{i}$. The algebra is ${\cal A}$, with ${\cal A}$ the infinite tensor product of C* algebras. In each case, for all $A \in {\cal A}$ and given $\epsilon > 0$, there exists a finite positive integer $k$ and a strictly local $A(\Lambda_{k}) = \pi_{k}(A)$, or an element $A_{k} = \pi_{k}(A)$ of $\otimes_{i=-k}^{k} {\cal A}_{i}$ such that \begin{equation} \label{(2.27a)} ||A - A(\Lambda_{k})|| < \epsilon \end{equation} or \begin{equation} \label{(2.27b)} ||A - A_{k}|| < \epsilon \end{equation} \begin{remark} \label{Remark 2.1} In Assumption A, $\pi_{k}(A)$, for $A \in {\cal A}$ denotes a representation of ${\cal A}$ on a Hilbert space ${\cal H}_{\Lambda_{k}}$ (or ${\cal H}_{k}$ associated to the restriction of $A$ either to a local region or to a system with a finite number of particles, viz. satisfying \eqref{(2.27a)}. This follows by construction, using the inductive limit structure of ${\cal A}$. \end{remark} As a last remark, Assumption A is not so special as it might look: the way states of infinite systems are naturally obtained is precisely as limits of finite systems, which actually describe the physical situation(s), in the natural weak* topology \eqref{(2.23)}. \section{General framework and main theorem} Roberts and Roepstorff \cite{RRoe} have described a natural general framework for quantum mechanics, which includes systems with an infinite number of degrees of freedom. Since their building blocks are, just as in the previous subsection, the algebra of observables ${\cal A}$ and the states $\omega$, we are able to adapt it to the present context in a very simple way, which we now describe. We assume that $k=1,2, \cdots$ is a finite natural number and come back to Assumption A. The states $\tilde{\omega}_{k}^{\pm,T}$ are (pure) states on the algebra ${\cal A}(\Lambda_{k})$ or ${\cal A}_{k}$, identified as algebras of bounded operators ${\cal B}({\cal H}_{k})$ (on ${\cal H}(\Lambda_{k})$ or ${\cal H}_{k}$) corresponding to the vectors $\Psi^{k,\pm,T}$. For simplicity of notation, let $x_{k}^{T} \equiv \Psi^{k,+,T}$, $y_{k}^{T} \equiv \Psi^{k,-,T}$, ${\cal A}_{k}$ stands for ${\cal A}(\Lambda_{k})$ or ${\cal A}_{k}$, ${\cal H}_{k}$ for both ${\cal H}(\Lambda_{k})$ or ${\cal H}_{k}$, $\tilde{\omega}_{k}^{+,T} = \omega_{x_{k}^{T}}$, $\tilde{\omega}_{k}^{-,T} = \omega_{y_{k}^{T}}$. As usual, \begin{equation} \label{(2.28)} ||\omega_{x_{k}^{T}}-\omega_{y_{k}^{T}}|| = \sup_{A \in {\cal A}_{k}, ||A||\le 1} |\omega_{x_{k}^{T}}(A)-\omega_{y_{k}^{T}}(A)| \end{equation} but \begin{equation} \label{(2.29)} \omega_{x_{k}^{T}}(A)-\omega_{y_{k}^{T}}(A) = (x_{k}^{T}, A x_{k}^{T}) - (y_{k}^{T},A y_{k}^{T})= tr_{{\cal H}_{k}}(T_{k} A) \end{equation} where \begin{equation} \label{(2.30)} T_{k} \equiv x_{k}^{T} \otimes \overline{x_{k}^{T}} - y_{k}^{T} \otimes \overline{y_{k}^{T}} \end{equation} with the definition \begin{equation} \label{(2.31)} (x_{k} \otimes \overline{x_{k}})f \equiv (x_{k},f) x_{k} \mbox{ for } f \in {\cal H}_{k} \end{equation} Clearly, $T_{k}$ is an operator of rank 2, and therefore in the trace class, denoted $\tau c$ as in \cite{Sch}, and we have (\cite{Sch}, Theorem 2, p.47) \begin{lemma} \label{lem:3.1} The expression \eqref{(2.29)} represents a bounded linear functional on $\tau c$ of norm $||A||$. Moreover, $(\tau c)^{*}$ and ${\cal B}({\cal H})$ are equivalent, in the sense of Banach identical. \end{lemma} By the second assertion of Lemma ~\ref{lem:3.1}, \begin{equation} \label{(2.32)} ||\omega_{x_{k}^{T}}-\omega_{y_{k}^{T}}|| = tr_{{\cal H}_{k}} (|T_{k}|) \end{equation} where $|T_{k}| \equiv (T_{k}^{\dag}T_{k})^{1/2}$. the eigenvalues of $|T_{k}|$ equal the absolute values of those of $T_{k}$; by \eqref{(2.30)}, \eqref{(2.31)} the latter may be obtained directly from the trace and determinant of the anti-Hermitian matrix \[ \left( \begin{array}{ccc} 1 & (x_{k},y_{k})\\ -(y_{k},x_{k}) & -1 \end{array} \right)\] and equal \begin{equation} \label{(2.33a)} \lambda_{1,k} = \sqrt(1-|(x_{k}^{T}, y_{k}^{T})|^{2}) \end{equation} \begin{equation} \label{(2.33b)} \lambda_{2,k} = -\sqrt(1-|(x_{k}^{T}, y_{k}^{T})|^{2}) \end{equation} Putting together \eqref{(2.32)} and \eqref{(2.33a)}, \eqref{(2.33b)}, we obtain the \begin{corollary} \label{cor:3.1} \begin{equation} \label{(2.34)} |(x_{k}^{T}, y_{k}^{T})|^{2} = 1 - \frac{1}{4} ||\omega_{x_{k}^{T}}-\omega_{y_{k}^{T}}||^{2} \end{equation} \end{corollary} Equation \eqref{(2.34)} suggests the natural definition, adapted from (\cite{RRoe}, Def. 4.7) to the present context: \begin{definition} \label{Definition 3.1} Let, in the weak* topology, \begin{equation} \label{(2.35a)} \omega_{x_{k}^{T}} \to \omega_{1}^{T} \end{equation} and \begin{equation} \label{(2.35b)} \omega_{y_{k}^{T}} \to \omega_{2}^{T} \end{equation} The \emph{transition probability} between the states $\omega_{1}^{T}$ and $\omega_{2}^{T}$ on the C*-algebra ${\cal A}$, denoted $\omega_{1}^{T}.\omega_{2}^{T}$, is defined as \begin{equation} \label{(2.35c)} \omega_{1}^{T}.\omega_{2}^{T} \equiv \lim_{k \to \infty} (1 - \frac{1}{4}||\omega_{x_{k}^{T}} - \omega_{y_{k}^{T}}||^{2}) \end{equation} whenever the limit on the r.h.s. of \eqref{(2.35c)} exists. \end{definition} We have the \begin{theorem} \label{th:3.1} If the states $\omega_{1}^{T}$ and $\omega_{2}^{T}$ in Definition ~\ref{Definition 3.1} are disjoint (Definition ~\ref{Definition 2.1}), the transition probability between them is zero. \begin{proof} Considering the C* algebra ${\cal A}$ as a Banach space relatively to the weak topology on the dual space of states (the weak* topolgy), the norm is lower semi-continuous (see, e.g., \cite{Cho}, Ex. 60, p.287), and thus \eqref{(2.35a)} and \eqref{(2.35b)} imply that \begin{equation} \label{(2.36)} \liminf_{k \to \infty} ||\omega_{x_{k}^{T}} -\omega_{y_{k}^{T}}|| \ge ||\omega_{1}^{T} - \omega_{2}^{T}|| \end{equation} Since $\omega_{1}^{T}$ and $\omega_{2}^{T}$ are disjoint, by the theorem of Glimm and Kadison \cite{GK} \begin{equation} \label{(2.37)} ||\omega_{1}^{T} - \omega_{2}^{T}|| = 2 \end{equation} We thus have \begin{eqnarray*} 0 \le \liminf_{k \to \infty} (1 - \frac{1}{4}||\omega_{x_{k}^{T}} -\omega_{y_{k}^{T}}||^{2}) \le \\ \limsup_{k \to \infty} (1 - \frac{1}{4}||\omega_{x_{k}^{T}} -\omega_{y_{k}^{T}}||^{2}) \le 0 \end{eqnarray*} The first inequality above follows from the uniform bound $||\omega_{x_{k}^{T}} - \omega_{y_{k}^{T}}|| \le 2$ and the third inequality above is a consequence of \eqref{(2.36)}. The assertion follows. \end{proof} \end{theorem} \begin{remark} \label{Remark 3.1} In (\cite{Hepp}, Lemma3, p.24) it was wrongly asserted that the norm is weakly continuous; the rest of his Lemma 3 contains, however, an important idea, which we now use. Let $A \in {\cal A}$. If $\omega_{y_{k}^{T}}(A^{\dag}A) = 0$, $$ |(\Psi^{k,-,T}, \pi_{k}(A) \Psi^{k,+,T})|^{2} \le \omega_{y_{k}^{T}}(A^{\dag}A) \to 0 \mbox{ as } k \to \infty $$ Otherwise, $\omega_{y_{k}^{T}} (A^{\dag}A) \ne 0$ for $k$ sufficiently large, and we may define the state \begin{eqnarray*} \omega_{y_{k}^{T}}^{A} \equiv \\ \frac {(\Psi^{k,-,T}, \pi_{k}(A)^{\dag} \cdot \pi_{k}(A) \Psi^{k,-,T})}{(\Psi^{k,-,T}, \pi_{k}(A)^{\dag} \pi_{k}(A) \Psi^{k,-,T})} \end{eqnarray*} By \eqref{(2.35b)}, $$ \omega_{y_{k}^{T}}^{A} \to \omega_{2}^{T,A} $$ in the weak* topology, where, by \eqref{(2.12)}, $\omega_{2}^{T,A} \in [\omega_{2}^{T}]$, the \emph{folium} of $\omega_{2}^{T}$ (defined by (23). \end{remark} From the above, and the remarks following Lemma ~\ref{lem:1}, $\omega_{1}^{T}$ and $\omega_{2}^{T,A}$ are likewise disjoint, by the assumption of Theorem ~\ref{th:3.1}, implying the following \begin{corollary} \label{cor:3.1} Under the same assumptions of Theorem ~\ref{th:3.1}, the transition probability between $\omega_{1}^{T}$ and $\omega_{2}^{T,A}$ is zero for any $A \in {\cal A}$. In particular, by \eqref{(2.34)}, \begin{equation} \label{(2.37)} \lim_{k \to \infty} (\Psi^{k,+,T}, \pi_{k}(A) \Psi^{k,-,T}) = 0 \end{equation} \end{corollary} \begin{remark} \label{Remark 3.2} Corollary ~\ref{cor:3.1} makes precise the replacement of \eqref{(1.10.1)} and \eqref{(1.10.2)} by \eqref{(1.11)} ``in the limit $N \to \infty$'', which corresponds to the fact that the transition probability between the states $\omega_{1}^{T}$ and $\omega_{2}^{T,A}$ of the infinite system is zero, for any $A \in {\cal A}$, according to Definition ~\ref{Definition 3.1}. \end{remark} In general, the disjointness of the two states in the assumption of Theorem ~\ref{th:3.1} is not easy to prove. In the next section, we describe a model of Stern-Gerlach type in which two different proofs of this property may be given, as long as the time-of-measurement parameter $T$ satisfies \eqref{(1.5)}. The second proof will relate disjointness to the values taken by the limiting states on classical or macroscopic observables of type \eqref{(1.8)}, i.e., the ``pointer positions'' in measurement theory. \section{Application to a model of the Stern-Gerlach experiment} \subsection{The model} We describe in this section a model of the Stern-Gerlach experiment \cite{StGer}. A jet of silver atoms cross a strongly inhomogeneous magnetic field directed along the z-axis. We use the setting of Gondran and Gondran \cite{GG}, in which silver atoms of spin one-half contained in an oven are heated to high temperature and escape through a narrow opening. A collimating fence $F$ selects those atoms whose velocities are parallel to the y axis: it is assumed to be much larger along Ox, in such a way that both variables x and y may be treated classically. The atomic jet arrives then at an electromagnet at the initial time $t=0$, each atom being then described by the wave function \begin{equation} \label{(4.1)} \Psi_{T}(z) = \Psi_{C}(z) (\alpha |+) + \beta |-)) \end{equation} with $|\alpha|^{2}+|\beta|^{2} = 1$, $\sigma_{z}|\pm) = \pm |\pm)$, and the configurational part $\Psi_{C}$ is given by \begin{equation} \label{(4.2)} \Psi_{C}(z) \equiv (2 \pi \sigma_{0}^{2})^{-1/2} \exp(\frac{-z^{2}}{4\sigma_{0}^{2}}) \end{equation} After leaving the magnetic field, there is free motion until the particle reaches a screen placed beyond the magnet, at a certain time $T$, when the measurement is performed. We shall assume that each spin eigenstate is attached not only to one atom, but to all those atoms in a tiny neighborhood of a point in space (e.g., of diameter of a micron), but still containing a macroscopic number $N$ of atoms. The Hamiltonian \eqref{(2.18)} is thus assumed to be \begin{equation} \label{(4.3)} H_{N} = H_{S} \otimes \mathbf{1} + \mathbf{1} \otimes H_{A_{k}} + V_{k} \end{equation} with \begin{equation} \label{(4.4)} H_{S} = \mu \sigma_{z} B \end{equation} \begin{equation} \label{(4.5)} H_{A_{k}} = \frac{(P_{z}^{(k)})^{2}}{2 M_{k}} \end{equation} \begin{equation} \label{(4.6)} V_{k} = \lambda P_{z}^{(k)} \sigma_{z} \end{equation} with $M_{k} = (2k+1)m$, $m$ being the mass of a single atom, and \begin{equation} \label{(4.7)} P_{z}^{(k)} = p_{z}^{-k}+ \cdots + p_{z}^{k} \end{equation} Note that we have replaced $N$ by $2k+1$, the integer variable runs from $-k$ to $k$, in order to have a model on $\mathbf{Z}$. The corresponding effective quantum spin model of the next subsection will be thereby a translation invariant model on the lattice $\mathbf{Z}$. The operator $p_{z}^{k}$ corresponding to each atom is the usual self-adjoint z-component of the momentum operator acting on the Hilbert space $L^{2}(\mathbf{R})$, and the algebra, the one-dimensional Weyl algebra corresponding to the sole variable z. Since, by \eqref{(4.6)}, each spin couples only to the z-component of the center of mass momentum, the corresponding macroscopic operator will be the z-component of the center of mass coordinate $\frac{z_{-k} + \cdots + z_{k}}{2k+1}$ or, as we shall see, the limit, for $\rho$ real \begin{equation} \label{(4.8)} \lim_{k \to \infty} \exp(i\rho \frac{z_{-k} + \cdots + z_{k}}{2k+1}) \end{equation} which will be seen to exist in the appropriate representation. The model \eqref{(4.3)}-\eqref{(4.7)} is an adaptation (to a version of infinite number of degrees of freedom) of the model in the book by Gottfried and Yan (\cite{KG1}, pp. 559 et seq.). Equation \eqref{(4.4)} represents the interaction with the constant part of the magnetic field, \eqref{(4.5)} the kinetic energy and \eqref{(4.6)} the interaction with the field gradient, assumed to be along the z-direction Since $H_{S}$ and $H_{A_{k}}$ commute with $V_{k}$, there is no problem in taking them into account, but that will only be an unnecessary burden, which only changes some constants in the forthcoming account; consequently, we ignore them both (alternatively, take $m \to \infty$ and $B=0$). Thus our Hamiltonian will be \begin{equation} \label{(4.9)} H_{k} = V_{k} = \lambda P_{z}^{(k)} \otimes \sigma_{z} \end{equation} Before going on, we should like to explain the relation of the present model to the standard SG model-experiment in greater detail. The Hamiltonian of the flying atoms should be $$ H_{S} = \frac{p^{2}}{2m} + \mu \sigma_{z} B_{z}(z) = \frac{p^{2}}{2m} + \mu \sigma_{z}( B_{z}(0) + z \frac{\partial B_{z}}{\partial z}) $$ However, from $\nabla \cdot \vec{B} = 0$, it follows that other components of the magnetic moment interact with the field, ``a fact that is often ignored in text-book descriptions'', as remarked by Gottfried and Yan (\cite{KG1}, p. 558, bottom). They also remark that, as this issue is irrelevant to their purpose, they avoid it completely by constructing a soluble model that produces the same results as a good SG experiment. This is the model we use in this chapter, but with the following additions and modifications. First, we do not need to ignore the condition $\nabla \cdot \vec{B} = 0$, and assume that the particle first enters an electromagnetic field $\vec{B}$ directed along the $z$ axis given by $$ B_{x} = B^{'}_{0}x \mbox{ with } B_{y}=0 \mbox{ and } B_{z} = B_{0} - B^{'}_{0} z $$ We employ the approximation $$ B^{'}_{0} = |\frac{\partial B_{z}}{\partial z}| = \mbox{ constant } $$ Such a vector $\vec{B}$ does satisfy the Maxwell equation $\nabla \cdot \vec{B} = 0$. Reference (\cite{GG} is one of the very few in which the \emph{spatial extension} of the spinor is taken into account. This is, however, precisely the crucial element allowing to take into account the initial position $(x_{0},z_{0})$ of the particle and render the evolution of the quantum system deterministic: if it is eliminated, one loses the possibility of individualizing the particle and, finally, to perform the measurement of the coordinate $z$ of the spots on the screen. Assuming that the initial state of the silver atom is a bound state, a corresponding natural simplified Ansatz for it is a Gaussian $$ \Psi_{0}(x,z) = (2\pi \sigma_{0}^{2})^{-1/2} \exp(-\frac{z^{2}+x^{2}}{4\sigma_{0}^{2}}) S $$ where \[ S= \left( \begin{array}{c} \cos(\frac{\theta_{0}}{2})\exp(i\phi_{0}/2)\\ i\sin(\frac{\theta_{0}}{2})\exp(-i\phi_{0}/2) \end{array} \right) \] The solution of the time-dependent Schr\"{o}dinger equation for the spinor $\Psi$ $$ i\hbar \frac{\partial \Psi}{\partial t} = -\frac{\hbar^{2}}{2m}\nabla^{2} \Psi + \mu_{B} \vec{B}\cdot \vec{\sigma} \Psi $$ with the above initial condition, the magnetic field $\vec{B}$ as given above, is the same as the solution obtained with the Hamiltonian (65), see (3) of \cite{GG} and Appendix A of \cite{GG}. This is not unexpected because the multiplication operator $z$ acting on a Gaussian is equivalent to a derivation. This shows that our model is indeed the SG model ``in disguise''. The silver atoms form a jet with a certain, nonzero finite density $\rho$. Their number $N$, in a macroscopic volume $V$, may be supposed to be well described by the thermodynamic limit $N \to \infty$, $V \to \infty$, $\frac{N}{V} = \rho$. Since the $z$ coordinates of the two spots on the screen, in the SG experiment, are macroscopic numbers, it is reasonable to assume, correspondingly, that they are obtained as mean values of (microscopic) averages of $z$ coordinates $z_{1}, \cdots, z_{N}$, i.e., $\lim_{N \to \infty} \frac{z_{1} + \cdots +z_{N}}{N}$. The external magnetic field gradient (supposed to be a constant equal to $\lambda$) is also macroscopic and, accordingly, it seems reasonable to assume that $$ \lambda (\sigma^{1}_{z} \otimes (z_{1} + \cdots + \sigma^{N}_{z}\otimes z_{N}) \approx \lambda \sigma_{z} \otimes (z_{1} + \cdots +z_{N}) $$ in a tiny (e.g. of the diameter of a micron) but still macroscopic vicinity of a point in configuration space. As explained, we may replace $z_{1} + \cdots + z_{N}$ by $p_{1} + \cdots + p_{N}$, where $p_{i}$ denote momentum operators of the i-th particle. Thus, the measurement, here ``performed'' by the coordinate wave-function, is ``arbitrarily close'' to one in a finite volume $V_{0}$, and the elements of the quasi-local algebra ${\cal A}$, which are arbitrarily close (in norm) to an element localized in a finite volume $V_{0}$, will not be able to distinguish between two disjoint states, because they are ``macroscopically different'', i.e., differ from one another by an infinite number of operations - e.g., by flipping an infinite number of spins in states of different mean magnetizations, or changing the coordinates of the particles in jets of different values of the mean (C.M.) coordinate. The fact that the coupling is assumed to occur only with the center of mass momentum explains why only the free motion is relevant in the final formulas (see Remark 4.1), and justifies restriction to product states, because the eventual (e.g. van der Waals) interactions between the silver atoms is entirely negligible. We now proceed with the treatment of the model (65). In correspondence to \eqref{(4.2)}, the initial ($t=0$) configurational state is \begin{equation} \label{(4.10)} \Psi_{C,k,0}(z_{-k}, \cdots, z_{k}) = (2\pi \sigma_{0}^{2})^{-1/2} \exp(\frac{-z_{-k}^{2}+ \cdots -z_{k}^{2}}{4\sigma_{0}^{2}}) \end{equation} and the full $t=0$ wave-vector associated to \eqref{(4.1)} becomes \begin{equation} \label{(4.11)} \Psi_{T,k,0} = (\alpha |+) + \beta|-)) \otimes \Psi_{C,k,0} \end{equation} in the Hilbert space ${\cal H}= \mathbf{C}^{2} \otimes \otimes_{i=-k}^{k}L_{i}^{2}(\mathbf{R})$, where $L_{i}^{2}(\mathbf{R}$ denotes the $i-th$ copy of $L^{2}(\mathbf{R})$ associated to the $k-th$ particle. Equation \eqref{(4.9)} then yields \begin{equation} \label{(4.13)} \exp(-itH_{k}) \Psi_{T,k,0} = \alpha |+) \otimes \Psi^{k,-,t} + \beta |-) \otimes \Psi^{k,+,t} \end{equation} with \begin{equation} \label{(4.14a)} \Psi^{k,+,t}(z_{-k}, \cdots, z_{k})= \Psi_{C,k,0}(z_{-k}-\lambda t, \cdots, z_{k} -\lambda t) \end{equation} together with \begin{equation} \label{(4.14b)} \Psi^{k,-,t}(z_{-k}, \cdots, z_{k})= \Psi_{C,k,0}(z_{-k}+\lambda t, \cdots, z_{k} +\lambda t) \end{equation} In correspondence with \eqref{(4.13)}, the states $\omega_{x_{k}^{T}}$, $\omega_{y_{k}^{T}}$ defined before \eqref{(2.28)} become \begin{equation} \label{(4.15a)} \omega_{x_{k}^{T}}(A) = (\Psi^{k,+,t}, \pi_{k}(A) \Psi^{k,+,t}) \end{equation} and \begin{equation} \label{(4.15b)} \omega_{y_{k}^{T}}(A) = (\Psi^{k,-,t}, \pi_{k}(A) \Psi^{k,-,t}) \end{equation} where $A \in {\cal A}$, the infinite product of Weyl algebras defined in Assumption A. For this model the $t_{D}$ in \eqref{(1.5)} may be explicitly computed: after t=0, the density splits into a sum of two Gaussians, which become separated as long as the distance between their centers is larger than the widths of the two Gaussians, viz. $3\sigma_{0}$: $t_{D} = \frac{3\sigma_{0}}{\lambda}$ where $\lambda$ stands for the average velocity in the $z$ direction: see (6) and (9) of \cite{GG} and the forthcoming \eqref{(4.27)}. \begin{proposition} \label{prop:4.1} Let $T$ satisfy \eqref{(1.5)}. Then, the weak* limits of the states \eqref{(4.15a)}, \eqref{(4.15b)}, denoted by $\omega_{1}^{T}$ and $\omega_{2}^{T}$ as in Definition ~\ref{Definition 3.1}, are disjoint. \begin{proof} We are in the setting of Proposition ~\ref{prop:2.1}, with $x_{i} = \Psi^{i,+,T}$, on the one hand, and $y_{i} = \Psi^{i,-,T}$ on the other. We have, by \eqref{(4.14a)}, \eqref{(4.14b)}, \begin{eqnarray*} (x_{i},y_{i}) = (y_{i},x_{i}) = \\ = (2\pi \sigma_{0}^{2})^{-1} \int_{-\infty}^{\infty} dz_{i} \exp(-\frac{(z_{i}-\lambda T)^{2}}{4\sigma_{0}^{2}}) \times \\ \times \exp(-\frac{(z_{i}+\lambda T)^{2}}{4\sigma_{0}^{2}}) = \\ = (2\pi \sigma_{0}^{2})^{-1} \int_{-\infty}^{\infty} dz_{i}\exp(-\frac{z_{i}^{2}}{2\sigma_{0}^{2}}) \exp(-\frac{\lambda^{2}T^{2}}{2\sigma_{0}^{2}})=\\ = \exp(-\frac{\lambda^{2} T^{2}}{2\sigma_{0}^{2}}) \end{eqnarray*} By \eqref{(1.5)} $$ \exp(-\frac{\lambda^{2} T^{2}}{2\sigma_{0}^{2}}) \ge \exp(-\frac{\lambda^{2} t_{D}^{2}}{2\sigma_{0}^{2}}) $$ and hence \begin{equation} \label{(4.17)} |(x_{i},y_{i}) -1| = 1-\exp(-\frac{\lambda^{2} t_{D}^{2}}{2\sigma_{0}^{2}}) \ge \frac{1}{4}\frac{\lambda^{2} t_{D}^{2}}{2\sigma_{0}^{2}} \end{equation} By Definition ~\ref{Definition 2.4}, the representations of ${\cal A}$ canonically associated to the infinite product states $\omega_{1}$ and $\omega_{2}$ are $\otimes_{i \in \mathbf{Z}}^{\otimes\xi_{i}} \pi_{i}$, with $\xi_{i} = x_{i} \mbox{ or } y_{i}$, and the corresponding $C_{0}$ - families are not equivalent by \eqref{(4.17)} and Definition ~\ref{Definition 2.3}, hence they are disjoint. \end{proof} \end{proposition} The fact used above that ``not not-equivalent'' means disjointness as defined by definition ~\ref{Definition 2.1} may not be immediately clear but it, too, follows from Lemma \ref{lem:1}. First, we dispose of $A$ because of quasi-locality, and, due to the product structure, we arrive as a necessary and sufficient condition for disjointness of states, that the scalar product $\prod_{i \in I; |i| sufficiently large} (x_{i},y_{i}) = 0$, or, taking the logarithm $$ |\log(\prod_{i \in I; |i| sufficiently large} (x_{i},y_{i}))| = \infty $$ In rigorous terms, this is replaced by the condition $$ \sum_{i \in I; |i| sufficiently large} |(x_{i},y_{i})-1| = \infty $$ This replacement is due to the necessity of avoiding the problems related to zero factors in the infinite product, or to ``infinite phases'', see \cite{Wehrl}. The above condition may be intuitively motivated by the fact that \emph{convergence} of the infinite product implies that each term must tend to one: considering the logarithm of the product, each $\log(x_{i},y_{i})$ is close to $1-(x_{i},y_{i})$ and, thus, convergence means $$ \sum_{i \in I; |i| sufficiently large} |(x_{i},y_{i})-1| < \infty $$ of which the previous formula is the negation. We come now to a second proof of disjointness, which both illuminates its physical content and defines precisely the results and parameter values associated to the measurement. The (z-component of) the center of mass of the atoms \eqref{(1.8)} is \begin{equation} \label{(4.18)} z_{C.M.} = \lim_{k \to \infty} \frac{1}{2k+1} \sum_{i=-k}^{k} z_{k} \end{equation} The above limit may be seen to exist in each IDPS $\otimes_{i \in \mathbf{Z}}^{\otimes \xi_{i}}$, with $\xi_{i}=x_{i} \mbox{ or } y_{i}$, assuming different values in each representation: \begin{proposition} \label{prop:4.2} $z_{C.M.}$ exists in the sense that, for any $\rho \in \mathbf{R}$, \begin{equation} \label{(4.19a)} \lim_{k \to \infty} \exp(i \rho \frac{\sum_{i=-k}^{k} z_{k}}{2k+1}) = \exp(i\rho \lambda T) \end{equation} in the IDPS $\otimes_{i \in \mathbf{Z}}^{\otimes x_{i}}$, and \begin{equation} \label{(4.19b)} \lim_{k \to \infty} \exp(i \rho \frac{\sum_{i=-k}^{k} z_{k}}{2k+1}) = \exp(-i\rho \lambda T) \end{equation} in the IDPS $\otimes_{i \in \mathbf{Z}}^{\otimes y_{i}}$. As a consequence, the two IDPS are disjoint. \begin{proof} We have \begin{eqnarray*} (\Psi^{k,+,T},\exp(i \rho \frac{\sum_{i=-k}^{k} z_{k}}{2k+1})\Psi^{k,+,T})= \\ = (2\pi \sigma_{0}^{2})^{-\frac{2k+1}{2}}\int_{-\infty}^{\infty} dz_{-k} \cdots \int_{-\infty}^{\infty} dz_{k} \\ \exp(-2\frac{(z_{-k}-\lambda T)^{2}}{4\sigma_{0}^{2}}) \cdots \exp(-2\frac{(z_{k}-\lambda T)^{2}}{4\sigma_{0}^{2}})\\ \exp(i \rho \frac{z_{-k}}{2k+1}) \cdots \exp(i \rho \frac{z_{k}}{2k+1}) = \\ = \exp(-\frac{\rho^{2}\sigma_{0}^{2}}{2(2k+1)}) \exp(i \rho \lambda T) \end{eqnarray*} from which \begin{equation} \label{(4.20a)} \lim_{k \to \infty} (\Psi^{k,+,T},\exp(i \rho \frac{\sum_{i=-k}^{k} z_{k}}{2k+1})\Psi^{k,+,T}) = \exp(i \rho \lambda T) \end{equation} and, analogously, \begin{equation} \label{(4.20b)} \lim_{k \to \infty} (\Psi^{k,-,T},\exp(i \rho \frac{\sum_{i=-k}^{k} z_{k}}{2k+1})\Psi^{k,-,T}) = \exp(-i \rho \lambda T) \end{equation} By Proposition ~\ref{prop:2.1} and the fact that the limits on the left hand sides of \eqref{(4.20a)}, \eqref{(4.20b)} are not altered by changing the variables $z_{i}$ with $i$ in a finite set we may replace $\Psi^{k,\pm,T}$ in equations \eqref{(4.20a)}(resp. \eqref{(4.20b)}) by vectors in a total set in $\otimes_{i \in \mathbf{Z}}^{\otimes x_{i}}$ (resp.$\otimes_{i \in \mathbf{Z}}^{\otimes y_{i}}$). This shows \eqref{(4.19a)} and \eqref{(4.19b)}. Disjointness of the IDPS is a consequence of an argument identical to the one used in connection with \eqref{(2.14)}. \end{proof} \end{proposition} \subsection{An effective quantum spin model} An effective quantum spin model for the previously studied Stern-Gerlach model is obtained by replacing $\otimes_{-k}^{k} L_{i}^{2}(\mathbf{R})$ by $$ {\cal H}_{k} = \otimes_{i=-k}^{k} \mathbf{C}_{k}^{2} $$ Given a fixed $T$ satisfying \eqref{(1.5)}, perform in the states $\omega_{x_{k}^{T}}$, $\omega_{y_{k}^{T}}$ in \eqref{(4.15a)} and \eqref{(4.15b)} the substitution \begin{equation} \label{(4.22)} \Psi^{k,\pm,T} \to \otimes_{i=-k}^{k} |\pm)_{k} \end{equation} where $|\pm)_{k}$ are, as before, the spin eigenstates of $\sigma^{z}_{k}:\sigma^{z}_{k}|\pm)_{k} = \pm |\pm)_{k}$, together with the substitution \begin{equation} \label{(4.23a)} \lim_{k \to \infty} \exp(i \rho \frac{\sum_{i=-k}^{k} z_{k}}{2k+1}) \to \lim_{k \to \infty} \exp(2i\rho T \frac{\sum_{i=-k}^k \sigma^{z}_{i}}{2k+1}) \end{equation} Then: the weak* limit of the sequence of states \begin{equation} \label{(4.23b)} \omega_{k} \equiv |\alpha|^{2} \omega_{k}^{+} + |\beta|^{2} \omega_{k}^{-} \end{equation} with $|\alpha|^{2} + |\beta|^{2}= 1$, on the quasi-local algebra ${\cal A}$ associated to the spin algebra on $\mathbf{Z}$ and $\omega_{k}^{\pm}$ denoting the product states which define the familiar disjoint representations $\pi_{\vec{m}}$, $\pi_{\vec{n}}$ (with $\vec{m} = \pm (0,0,1)$) described in section 2, after (23), is an \emph{effective} quantum spin model for the SG model described in the previous subsection, in the sense that it reproduces the ``quantities to be measured'' \eqref{(4.19a)}, \eqref{(4.19b)}, as long as the substitution \eqref{(4.23a)} is performed. The present model serves as illustration of the remarks on irreversibility in the next section 5. \begin{remark} \label{Remark 4.1} It is of course critical that $t_{D} \ne 0$ in \eqref{(1.5)}; the case of ``instantaneous measurement'' is excluded by the Basdevant-Dalibard assumption a). The same requirement is independently imposed by the theory of irreversibility, see the next section. As a further concrete illustration of this requirement in the present model, note that by the equations preceding \eqref{(4.17)}, \begin{equation} \label{(4.24)} (\Psi^{k,+,T}, \Psi^{k,-,T}) = \exp(-\frac{\lambda^{2} T^{2} k}{2\sigma_{0}^{2}}) \end{equation} so that, if \begin{equation} \label{(4.25)} T = T(k) = O(\frac{1}{\sqrt(k)}) \end{equation} the cross terms in \eqref{(2.19b)} do not tend to zero. The fact that the possibility $T(k) \to 0$ as $k \to \infty$ is to be \emph{excluded}, contrarily to the remarks in \cite{Dop}, has a simple explanation, to be given next. Finally, it should be remarked that equation \eqref{(4.24)} shows explicitly that the condition $k \to \infty$ is not always \emph{necessary} to achieve a very high degree of decoherence. Indeed, let $T=1 sec$ and $\frac{\sigma_{0}}{\lambda} = 10^{-4} sec$ (the latter reasonable experimental values, see (9) in \cite{GG}), and $k=1$ (i.e., just one particle), we obtain for the r.h.s. of equation \eqref{(4.24)} the value $\exp(-10^{8})$, a forbiddingly small value! \end{remark} \begin{remark} \label{Remark 4.2} If we differentiate equations \eqref{(4.19a)} and \eqref{(4.19b)} with respect to $\rho$, setting $\rho=0$ afterwards, we obtain, denoting $<z_{C.M}>_{T}$ the expectation of the C.M. variable (74) in the product state at time $T$: \begin{equation} \label{(4.27)} <z_{C.M.}>_{T} = \pm \lambda T = 2 s_{z} \lambda T \end{equation} where \begin{equation} \label{(4.28)} s_{z} = \pm \frac{1}{2} \end{equation} are the two values of the z component of the spin operator, which comprise, in this experiment, the ``measured values''. Equation \eqref{(4.27)} is essentially equation (65) of p. 559 of \cite{KG1} (with $P_{z}=0$, which we assumed) - not surprisingly the solution of the classical equation of motion, because the Gaussians are coherent states. By \eqref{(4.27)}, $T$ is proportional to the value of a ``macroscopic observable'' $<z_{C.M.}>_{T}$, independent of $k$. This explains why a behavior such as \eqref{(4.25)} is excluded, or, more generally, that the possibility $T(k) \to zero$ as $k \to \infty$ mentioned in \cite{Dop} is excluded. The two values \eqref{(4.28)} are obtained from \eqref{(4.27)} through the measured values of $<z_{C.M.}>_{T}$ and $T$ (with a known constant $\lambda$) and remain constant when the ``observer'' $(<z_{C.M.}>_{T},T)$ changes; the ``intrinsic property postulate'' of Bell and Gottfried is therefore verified in the present model. Finally, the mathematical limit $T \to \infty$ is unphysical in this model, since it corresponds to place the screen at infinite distance from the electromagnet. \end{remark} \section{Irreversibility, the time-arrow and the conservation of entropy under measurements} In his conclusion, Hepp \cite{Hepp} remarks: ``The solution of the problem of measurement is closely connected with the yet unknown correct description of irreversibility in quantum mechanics''. One such description of closed systems, without changing the Schr\"{o}dinger equation and the Copenhagen interpretation was proposed in \cite{Wre}, see also \cite{Wre1} for a comprehensive review, which includes the stability of the second law in the form proposed in \cite{Wre} under interactions with the environment. For a finite quantum spin system the Gibbs-von Neumann entropy is ($k_{B}=1$) \begin{equation} \label{(5.1)} S_{\Lambda} = -Tr (\rho_{\Lambda} \log \rho_{\Lambda}) \end{equation} As remarked in section 2, we may view $\rho_{\Lambda}$ as a state $\omega_{\Lambda}$ on ${\cal A}(\Lambda)$ which generalizes to systems with infinite number of degrees of freedom $\omega(A)= \lim_{\Lambda \nearrow \infty} \omega_{\Lambda}(A)$, at first for $A \in {\cal A}_{L}$ and then to ${\cal A}$. For a large system the \emph{mean entropy} is the natural quantity from the physical standpoint: \begin{equation} \label{(5.2)} s(\omega) \equiv \lim_{\Lambda \nearrow \infty} (\frac{S_{\Lambda}}{|\Lambda|})(\omega) \end{equation} The mean entropy has the property \cite{LanRo}: \begin{equation} \label{(5.3)} 0 \le s(\omega) \le \log D \mbox{ where } D=2S+1 \end{equation} where $S$ denotes the value of the spin, in the present paper and in the effective model of section 4.2, $S=\frac{1}{2}$. In his paper ``Against measurement'', John Bell, in a statement which is qualitatively similar to Hepp's, insisted on the necessity of physical precision regarding such words as reversible, irreversible, information (whose information? information about what?). The theory developed in \cite{Wre}, \cite{Wre1} starts defining an adiabatic transformation, in which there is a first step, a finite preparation time $t_{p}$, during which external forces act, at the end of which the Hamiltonian associated to the initial equilibrium state is restored, and remains so ``forever'' during the second step. In measurement theory, Lamb \cite{Lamb} also emphasizes the dual role of preparation and measurement. If the time of measurement $T$ is such that $T > t_{p}$, and the wave-vector describing the system is not identically zero in the whole interval $[0,T]$, the dynamics of the system in the time interval $[-t_{r},T]$, of preparation followed by measurement, is *not* time-reversal invariant, leading to a \emph{time arrow}. If $t_{p}=T=0$, i.e., both preparation and measurement are instantaneous, no guarantee of the existence of a time-arrow can be given. According to our theory, given a time arrow, the process $\omega_{1}(0) \to \omega_{2}(\infty)$ is defined to be reversible (irreversible) iff the inverse process $\omega_{2}(0) \to \omega_{1}(\infty)$ is possible (impossible). The first alternative takes place iff $s(\omega_{1}) = s(\omega_{2})$, the second one iff $s(\omega_{1}) < s(\omega_{2})$. Infinite time $t=\infty$ means, physically, that $T$ is much larger than a quantity $t_{D}$, the decoherence time, as explained in section 1. Of course, irreversibility is incompatible with time-reversal invariance, because the mean entropy cannot both strictly increase and strictly decrease with time. This is a precise wording in our framework of the Schr\"{o}dinger paradox \cite{Schr}, cited in Lebowitz's inspiring review of the issue of time-assymetry \cite{Leb}. We know that the space of states is convex and the entropy of a finite system satisfies the inequality ($0 \le \alpha \le 1$) \begin{equation} \label{(5.4)} S_{\Lambda}(\alpha \rho_{\Lambda}^{1}+(1-\alpha) \rho_{\Lambda}^{2}) > \alpha S_{\Lambda}(\rho_{\Lambda}^{1}) + (1-\alpha) S_{\Lambda}(\rho_{\Lambda}^{2}) \end{equation} i.e, $S_{\Lambda}$ is \emph{strictly concave}: entropy is gained by mixing, but the gain is *not* extensive and disappears upon division by $|\Lambda|$ and taking the infinite volume limit (inequalities of Lanford and Robinson \cite{LanRo}), so that the mean entropy becomes \emph{affine}: \begin{equation} \label{(5.5)} s(\alpha \omega_{1} + (1-\alpha) \omega_{2}) = \alpha s(\omega_{1}) + (1-\alpha) s(\omega_{2}) \end{equation} The state \eqref{(2.19b)} , \eqref{(2.19c)} tends, in the weak* topology, to a state $\omega_{T}$ (now on the algebra of system and apparatus \eqref{(2.20)}, whose Gibbs-von Neumann entropy is identical to that of the initial state $\omega_{0}$, and equals zero since the state is pure. The associated mean entropy therefore also satisfies \begin{equation} \label{(5.6)} s(\omega_{T}) = s(\omega_{0})=0 \end{equation} By Theorem ~\ref{th:3.1}, the state $\omega_{T}$ is equivalent, ``for all observables found in Nature'' to the ``collapsed state'' $\omega_{C}$ given by \begin{equation} \label{(5.7)} \omega_{C} \equiv |\alpha|^{2}\omega_{1}^{T} + |\beta|^{2} \omega_{2}^{T} \end{equation} where, by \eqref{(2.19b)}, \eqref{(2.19c)}, \eqref{(2.21)}, \eqref{(2.22)} \begin{equation} \label{(5.8a)} \omega_{1}^{T} = \omega_{T}(P_{+} \cdot)(|\alpha|^{2})^{-1} \end{equation} and \begin{equation} \label{(5.8b)} \omega_{2}^{T} = \omega_{T}(P_{-} \cdot)(1-|\alpha|^{2})^{-1} \end{equation} with the notation $P_{+}=|+)(+|$, $P_{-}=|-)(-|$, the familiar projectors on the two eigenstates of $\sigma_{z}$. \begin{theorem} \label{th:5.1} On the average the mean entropy is conserved by measurements, and remains equal to zero. \begin{proof} On the average, the mean entropy equals \begin{eqnarray*} |\alpha|^{2} s(\omega_{1}^{T}) + (1-|\alpha|^{2}) s(\omega_{2}^{T}) = \\ = s(|\alpha|^{2}\omega_{1}^{T} + (1-|\alpha|^{2})\omega_{2}^{T}) = \\ =s(\omega_{T}(P_{+} \cdot) + \omega_{T}(P_{-} \cdot)) =s(\omega_{T})=\\ =s(\omega_{0})= 0 \end{eqnarray*} The first equation above is due to the property of affinity \eqref{(5.5)}, the second one follows from \eqref{(5.8a)}, \eqref{(5.8b)}, the third one by the linearity of the states, and the fact that $P_{+}+P_{-} = \mathbf{1}$, the fourth from \eqref{(5.6)}. \end{proof} \end{theorem} The above theorem relies on the property of affinity of the mean entropy, which has only been proved for quantum lattice systems \cite{LanRo}. The sole example we are able to give is the effective quantum spin model of the Stern Gerlach experiment of section 4.1 which was given in section 4.2. In contrast to the behavior found in Theorem ~\ref{th:5.1}, the Boltzmann and Gibbs-von Neumann entropy of a finite system is \emph{reduced} under collapse, by Lemma 3 of \cite{NarWre}. This may be understood as follows. Entropy $S_{\Lambda} = |\Lambda|\log D -I_{\Lambda}$, with $I_{\Lambda}$ denoting the (quantum) information. For quantum spin systems $0 \le S_{\Lambda}/|\Lambda| \le \log D$, and therefore $0 \le I_{\Lambda}/|\Lambda| \le \log D$. It attains its maximum value for pure states, which are characterized by $S_{\Lambda} = 0$. Under ``collapse'', each collapsed state is pure and therefore information is gained: this explains that the (Boltzmann and von Neumann) entropies are reduced, on the average, violating the second law (on the average). If one chooses to define irreversibility in terms of the growth of the quantum Boltzmann entropy, we arrive at the necessity, commented in the last paragraph of \cite{NarWre}, that interactions with the environment (as well as measurements) must be rare phenomena on the thermodynamic scale in order to account for the validity of the version of the second law which was proved in \cite{NarWre}. Our approach through the mean entropy seems therefore particularly natural in this context, and has the following physical interpretation. Equivalently to the previously discussed informational content (for quantum spin systems), entropy is, in Boltzmann's sense, a measure of a macrostate's wealth of ``microstates'', and therefore grows by mixing, but it turns out that this growth is *not* extensive and disappears upon division by $|\Lambda|$ , i.e., taking the infinite volume limit (inequalities of Lanford and Robinson \cite{LanRo}), so that the affinity property \eqref{(5.5)} results and, with it, Theorem ~\ref{th:5.1}, confirming, \emph{in the sense of mean entropy}, Nicolas van Kampen's conjecture \cite{vK} that \emph{the entropy of the Universe is not affected by measurements}. It is not affected either by more general interactions with the environment \cite{NTh1}, resulting in the \emph{stability of the second law} proved in \cite{Wre}, see \cite{Wre1}. \section{Conclusion and open problems} One central and dominating feature of the analysis over finite vs infinite dimensional spaces is that in the infinite dimensional case the solution may depend \emph{discontinuously} on the parameters of the problem. Indeed, infinite systems may exhibit \emph{singularities}, not present in finite macroscopic systems, well-known in the theory of \emph{phase transitions}: they are parametrized by \emph{critical exponents}, which, moreover, display \emph{universal} properties, in excellent agreement with experiment! The crucial example of ``discontinuity'', as $N \to \infty$, in the context of measurement theory, is the basic structural change of the states: a sequence of pure states may tend to a mixed state, by Theorem ~\ref{th:3.1}, as a consequence of the property of disjointness, which has no analogue for finite system: in measurement theory, the mean entropy is conserved and equals zero by Theorem ~\ref{th:5.1}. It may also happen that a sequence of pure states of infinite systems, parametrized by the time variable, tends to a mixed state of strictly higher mean entropy (\cite{Wre}, \cite{Wre1}). In greater generality, the physical ``N large but finite'' differs qualitatively from ``N infinite'' because the latter exhibits \emph{universal} properties not found in finite systems. One example of these universal properties, crucial in our approach, is the affinity of the mean entropy, whose finite-volume counterpart strict concavity of $\frac{S_{\Lambda}}{|\Lambda|}$) is not universal because, not being uniform in $|\Lambda|$, it depends on the volume $|\Lambda|$ of the system. The fact that (only) ``N infinite'' is in good agreement with experiment is explained by the fact that, with $N \approx 10^{24}$, macroscopic systems are extremely close to infinite systems (the success of the thermodynamic limit!). This explains why we are able to complement, and sometimes improve on, Sewell's approach to the measurement problem in (\cite{Se1}, \cite{Se2}). The above-mentioned universality in the framework introduced here and in (\cite{Wre}, \cite{Wre1}) suggests that other physical theories besides quantum spin systems might exhibit similar properties, e.g., relativistic quantum field theory (rqft), and, from there, hopefully, nonrelativistic quantum continuous systems by the non-relativistic limit of rqft. Since, however, rqft deals with fields and thus continuous quantum systems, the structure of the space of states is quite different from that of quantum spin systems, and, in particular, the states must be required to be locally normal \cite{DDR} or locally finite (\cite{Se}, p.26) - of which the only existing proof in an interacting field theory is due to Glimm and Jaffe, for the vacuum state \cite{GliJa}. Moreover, Theorem ~\ref{th:3.1} and its corollary show that in measurement theory the relevant state is (equivalent to) a weak* limit of a sequence of convex linear combinations of product states, that is, a non-entangled state, according to the Bertlmann-Narnhofer-Thirring geometrical picture of entanglement (\cite{NTh2}, \cite{BNTh}). The latter \cite{NTh2}, however, also suggests that in rqft ``almost every state is entangled'' in a precise sense, and, indeed, Summers and Werner \cite{SW} and Landau \cite{La} show that the vacuum state in rqft maximally violates Bell's inequality (see also Wightman's review \cite{Wight1} of their work). It is thus expected that entanglement will play a role in a future theory of measurement in rqft. The formulation of a theory of measurement in rqft is a difficult, very fundamental open problem: it is formulated as Problem 4 in Wightman's list \cite{Wight1}: ``to examine the effects of relativistic invariance on measurement theory'', see also \cite{Lamb}. In particular, Doplicher suggests \cite{Dop} that the apparent ``nonlocalizability'' of the type observed in the EPR thought experiment, due to the superposition principle, would certainly disappear if truly \emph{local} measurements were performed - and spin or angular momentum measurements are not such. In fact, instead of \eqref{(4.6)}, we must have a true interaction between fields. Incidentally, for interacting fields, the singularity hypothesis of \cite{JaWre} implies that fields are not defined for sharp times, and ``instantaneous measurements'' are excluded. Since the measurement problem in quantum mechanics is a very complex and controversial problem, the complexity being partly due to the variety of the existent physical situations, it cannot be hoped that this paper contains a ``final solution'' to the measurement problem. In particular, almost perfect decoherence may occur even if the limit $N \to \infty$ is not performed at all, when the time $T$ of observation is sufficiently large: an explicit example of this situation is given in Remark 4.1. The above-mentioned example relates to the work of Machida and Namiki \cite{MN}, commented by Araki \cite{Ar}, who formulated the Machida-Namiki theory in terms of continuous superselection rules. The reduction of the wave-packet proceeds, then, as a consequence of the (mathematical) limit $T \to \infty$ (in our notation: see equations (3.4), (3.5) in \cite{Ar}). Although, as we have argued, this limit need not, in general, be of physical relevance (and, indeed, it is not in the SG model, see the last sentence in Remark 4.2), we have just seen that almost perfect decoherence may occur, nevertheless, if $T$ is sufficiently large with respect to the decoherence time $t_{D}$. These theories, therefore, do remain of considerable interest as a complement to ours. Another example is provided by the question of whether it is possible to devise any experiment (of the Bell-EPR type) which simultaneously measures precise values of incompatible observables (the SG experiment of section 4 being not of this type). This may indicate a different route to the previous discussion based on microcausality. See, in this connection, the specific analysis in \cite{Gri4}, as well as the more general \cite{Gri1}; both are based on Griffiths' (probabilistic) theory of consistent histories (\cite{Gri2}, see also \cite{Gri3}). The latter theory was used by Omn\`{e}s \cite{Omn}, who proposed to consider measurements specified by special kinds of history in which decoherence results in the classical behavior of the macroscopic variables of the apparatus, to a sufficient approximation. This (not necessarily perfect) decoherence is yet another alternative, complementary approach to ours. Concerning the classical, macroscopic observables, it is also of special interest that they are shown in \cite{Req} to be special cases of a subalgebra of the class of microscopic quantum observables of a generic many-body system (see also \cite{Lud}). In spite of the above-mentioned limitations, we believe that the universal properties of perfect decoherence, as described in the first two paragraphs, suggest that it is relevant, in the sense of an idealized limit, to a significant number of physical measurements, in which both the ``Heisenberg paradox'' and the ``irreversibility paradox'' have been eliminated, and, therefore, quantum mechanics, in the original Copenhagen interpretation, is totally free of internal inconsistencies. This occurs, however, as Hepp \cite{Hepp} predicted, only if one takes into account the extension of quantum mechanics to systems with an infinite number of degrees of freedom, as formulated in \cite{RRoe} and \cite{Hepp}, and developed in section 3. \emph{Acknowledgements} We should like to thank Pedro L. Ribeiro for discussions, the late Derek W. Robinson for a correspondence in which he stressed the importance of the property of affinity, and Professor R. B. Griffiths for an enlightening correspondence on related matters. This paper owes very much to the remarks of both referees, which resulted in truly substantial improvements regarding the previous version. \qquad \textbf{Statement concerning data availability} \textbf{The author confirms that all the data supporting the findings of this study are available within the article.} \qquad \begin{thebibliography}{99} \bibitem[Dop18]{Dop} S.~Doplicher. \newblock The measurement process in local quantum physics and the EPR paradox. \newblock {\em Comm. Math. Phys.}, 357:407, 2018. \bibitem[Hepp72]{Hepp} K.~Hepp. \newblock Quantum theory of measurement and macroscopic observables. \newblock {\em Helv. Phys. Acta}, 45:237, 1972. \bibitem[BasDal02]{BasDal} J.~L. Basdevant and J.~Dalibard. \newblock M\'{e}canique Quantique - Cours \`{a} l'\'{e}cole polytechnique. \newblock Les \'{E}ditions de l\'{E}cole Polytechnique, 2002. \bibitem[StGer21]{StGer} W.~Gerlach und O.~Stern. \newblock Der experimentelle Nachweis des magnetischen Moments des Silberatoms. \newblock {\em Zeit. f\"{u}r Phys.}, 8:110, 1921. \bibitem[ArYa60]{AY} H.~Araki and M.~Yanase. \newblock Measurement of quantum mechanical operators. \newblock {\em Phys. Rev.}, 120:622, 1960. \bibitem[HK64]{HK} R.~Haag and D.~Kastler. \newblock An algebraic approach to quantum field theory. \newblock {\em J. Math. Phys.}, 5:848, 1964. \bibitem[Wre20]{Wre} W.~F. Wreszinski. \newblock Irreversibility, the time-arrow and a dynamical proof of the second law of thermodynamics. \newblock {\em Quantum Stud.:Math. and Found.}, 7:125, 2020. \bibitem[Wre22]{Wre1} W.~F. Wreszinski. \newblock The second law of thermodynamics as a deterministic theorem for quantum spin systems. \newblock {\em Rev. Math. Phys.}, 33:223005, 2022. \bibitem[DLP62]{DLP} A.~Daneri, G.~M. Loinger and A.~Prosperi. \newblock Quantum theory of measurement. \newblock {\em Nucl. Phys.}, 33:297, 1962. \bibitem[vN32]{vN} J.~von Neumann. \newblock Mathematische Grundlagen der Quantenmechanik. \newblock Springer, Berlin, 1932. \bibitem[NTh99]{NTh1} H.~Narnhofer and W.~Thirring. \newblock Macroscopic purification of states by interactions. \newblock H.~Atmanspacher et al (eds.), On Quanta, Mind and Matter. \newblock Springer, Dordrecht 1999. \bibitem[Schr35]{Schr} E.~Schr\"{o}dinger. \newblock Die gegenw\"{a}rtige Situation in der Quantenmechanik. \newblock {\em Die Naturwissenschaften}, 23:807, 1935. \bibitem[Be90]{Bell1} J.~S. Bell. \newblock Against ``measurement''. \newblock {\em Phys. World}, Aug. 1990, 33-40. \bibitem[KG66]{KG} K.~Gottfried. \newblock Quantum Mechanics. \newblock Benjamin, 1966. \bibitem[Heis30]{Heis} W.~Heisenberg. \newblock The physical principles of the quantum theory. \newblock University of Chicago Press, 1930. \bibitem[Be75]{Bell2} J.~S. Bell. \newblock On wave-packet reduction in the Coleman-Hepp model. \newblock {\em Helv. Phys. Acta}, 48:93, 1975. \bibitem[NaWre14]{NarWre} H.~Narnhofer and W.~F. Wreszinski. \newblock On reduction of the wave-packet, decoherence, irreversibility and the second law of thermodynamics. \newblock {\em Phys. Rep.}, 541:249, 2014. \bibitem[Se05]{Se1} G.~L. Sewell. \newblock On the mathematical structure of quantum measurement theory. \newblock {\em Rep. Math. Phys.}, 56:271, 2005. \bibitem[Se07]{Se2} G.~L. Sewell. \newblock Can the quantum measurement problem be resolved within the framework of Schr\"{o}dinger dynamics?. \newblock {\em Markov Proc. Rel. Fields}, 13:425, 2007. \bibitem[KG03]{KG1} K.~Gottfried an T.~M. Yan. \newblock Quantum Mechanics. \newblock Springer 2003. \bibitem[GG05]{GG} M.~Gondran and A.~Gondran. \newblock A complete analysis of the Stern-Gerlach experiment using Pauli spinors. \newblock arXiv: quanth-ph 0511276v1 (30-11-2005). \bibitem[LL77]{LL} L.~D. Landau and E.~M. Lifshitz. \newblock Quantum Mechanics. \newblock 3rd ed., transl. by J.~S. Bell, Pergamon Press 1977. \bibitem[Ha14]{Haag} R.~Haag. \newblock On the sharpness of localization of individual events in space and time. \newblock {\em Found. Phys.}, 43:1295, 2014. \bibitem[Pe91]{Pe1} R.~Peierls. \newblock In defense of ``measurement''. \newblock {\em Phys. World}, Jan. 1991, 19-20. \bibitem[LR68]{LanRo} O.~Lanford and D.~W. Robinson. \newblock Mean entropy of states in quantum statistical mechanics. \newblock {\em J. Math. Phys.}, 9:1120, 1968. \bibitem[vK88]{vK} N.~van Kampen. \newblock Ten theorems about quantum mechanical measurements. \newblock {\em Physica A}, 153:97, 1988. \bibitem[BR97]{BRo2} O.~Bratelli and D.~W. Robinson. \newblock Operator algebras and quantum statistical mechanics II. \newblock 2nd ed., Springer, Berlin 1997. \bibitem[Se86]{Se} G.~L. Sewell. \newblock Quantum theory of collective phenomena. \newblock Oxford University press, Oxford 1986. \bibitem[RR69]{RRoe} J.~Roberts and G.~Roepstorff. \newblock Some basic concepts in algebraic quantum theory. \newblock {\em Comm. Math. Phys.}, 11:321, 1969. \bibitem[Wight95]{Wight} A.~S. Wightman. \newblock Superselection rules: old and new. \newblock {\em Nuovo Cim.}, B110:751, 1995. \bibitem[Fro21]{Fro} J.~Fr\"{o}hlich and A.~Pizzo. \newblock The time-evolution of states in quantum mechanics according to the ETH approach. \newblock published online 23 october 2021 in Comm. Math. Phys.. \bibitem[Pil21]{Pil} T.~Benoist, N.~Cuneo, V.~Jaksic and C.~A.Pillet. \newblock On entropy production of repeated quantum measurements II. Examples. \newblock {\em J. Stat. Phys.}, 182:44, 2021. \bibitem[Hug72]{Hug} N.~M. Hugenholtz. \newblock States and representations in statistical mechanics. \newblock in R.~F. Streater (ed.), Mathematics of contemporary physics. \newblock Academic Press, 1972. \bibitem[KR86]{KR} R.~V. Kadison and J.~R. Ringrose. \newblock Fundamentals of the theory of operator alagebras II. \newblock Academic Press, N.Y. 1986. \bibitem[BR87]{BRo1} O.~Bratelli and D.~W. Robinson. \newblock Operator algebras and quantum statistical mechanics I. \newblock Springer, Berlin 1987. \bibitem[LvH78]{LvH} L.~van Hemmen. \newblock Linear fermion systems, molecular field models and the KMS condition. \newblock {\em Fort. der Phys.}, 26:397, 1978. \bibitem[Tak55]{Tak} Z.~Takeda. \newblock Inductive limit and the infinite direct product of operator algebras. \newblock {\em Toh\^{o}ku Math. J.}, 7:67, 1955. \bibitem[Roy63]{Roy} H.~Royden. \newblock Real Analysis. \newblock Macmillan, 1963. \bibitem[We69]{Wehrl} A.~Wehrl, M.~Gu\'{e}nin and W.~Thirring. \newblock Introduction to algebraic techniques. \newblock Lectures given at theoretical seminar series CERN 68-69. \bibitem[Gui66]{Gui} A.~Guichardet. \newblock Produits tensoriels infinis et r\'{e}presentations des relations d'anticommutation. \newblock {\em Ann. Sci. de l'E.N.S.}, 83:1, 1966. \bibitem[Sch60]{Sch} R.~Schatten. \newblock Norm ideals of completely continuous operators. \newblock Springer, 1960. \bibitem[Cho00]{Cho} G.~Choquet. \newblock Cours de topologie. \newblock 2\`{e}me edition, Dunod, Paris 2000. \bibitem[GK60]{GK} J.~Glimm and R.~V. Kadison. \newblock Unitary operators in C* algebras. \newblock {\em Pac. J. Math.}, 10:547, 1960. \bibitem[Schr65]{Schr1} E.~Schr\"{o}dinger. \newblock The spirit of science, in What is life? and other scientific essays. \newblock Doubleday, Anchor Books, Garden City, N.Y., 1965, pp. 229-250. \bibitem[Leb08]{Leb} J.~L. Lebowitz. \newblock Time-asymmetric macroscopic behavior: an overview. \newblock in Boltzmann's Legacy, G.~Gallavotti, W.~L. Reiter and J.~Yngvason, eds. \newblock Eur. Math. Soc. 2008. \bibitem[Pe79]{Pe2} R.~Peierls. \newblock Surprises in theoretical physics. \newblock Princeton University Press, Princeton 1979. \bibitem[La69]{Lamb} W.~E. Lamb, Jr. \newblock An operational interpretation of nonrelativistic quantum mechanics. \newblock {\em Phys. Today}, 22:23, 1969. \bibitem[DDR66]{DDR} G.~F. Dell'Antonio, S.~Doplicher and D.~Ruelle. \newblock A theorem on canonical commutation and anticomutation relations. \newblock {\em Comm. Math. Phys.}, 2:223, 1966. \bibitem[GliJa70]{GliJa} J.~Glimm and A.~Jaffe. \newblock The $(\lambda \Phi^{4})_{2}$ quantum field theory without cutoffs III- the physical vacuum. \newblock {\em Acta Math.}, 125:203, 1970. \bibitem[NTh12]{NTh2} H.~Narnhofer and W.~Thirring. \newblock Entanglement, Bell inequality and all that. \newblock {\em J. Math. Phys.}, 53:095210, 2012. \bibitem[BNTh02]{BNTh} R.~A. Bertlmann, H.~Narnhofer and W.~Thirring. \newblock A geometric picture of entanglement and Bell inequalities. \newblock {em Phys. Rev. A}, 66:032319, 2002. \bibitem[SW85]{SW} S.~Summers and R.~Werner. \newblock The vacuum violates Bell's inequalities. \newblock {\em Phys. Lett. A}, 110:257, 1985. \bibitem[La87]{La} L.~J. Landau. \newblock On the violation of Bell's inequality in quantum theory. \newblock {\em Phys. Lett. A}, 120:54, 1987. \bibitem[Wight92]{Wight1} A.~S. Wightman. \newblock Some comments on the quantum theory of measurement. \newblock in Probabilistic methods in mathematical physics, ed. by F.~Guerra, M.~I. Ioffredo and C.~Marchioro. \newblock World Scientific 1992. \bibitem[JW21]{JaWre} C.~J\"{a}kel and W.~F. Wreszinski. \newblock A criterion to characterize interacting theories in the Wightman framework. \newblock {\em Quant.Stud.:Math. Found.}, 8:51, 2021. \bibitem[MN80]{MN} S.~Machida and M.~Namiki. \newblock Theory of measurement in quantum mechanics I,II. \newblock {\em Progr. Theor. Phys.}, 63:1457,1833, 1980. \bibitem[Ar80]{Ar} H.~Araki. \newblock A remark on the Machida-Namiki theory of measurement. \newblock {\em Progr. Theor. Phys.}, 64:719, 1980. \bibitem[Gri20]{Gri4} R.~B. Griffiths. \newblock Nonlocality claims are inconsistent with Hilbert space quantum mechanics. \newblock {em Phys. Rev. A}, 101:022117, 2020. \bibitem[Gri17]{Gri1} R.~B. Griffiths. \newblock What quantum mechanical measurements measure. \newblock {\em Phys. Rev. A}, 96:032110, 2017. \bibitem[Gri84]{Gri2} R.~B. Griffiths. \newblock Consistent histories and the interpretation of quantum mechanics. \newblock {\em J. Stat. Phys.}, 36:219, 1984. \bibitem[Gri19]{Gri3} R.~B. Griffiths. \newblock The consistent histories approach to quantum mechanics. \newblock Stanford Encyclopaedia of philosophy, 2019. \bibitem[Omn88]{Omn} R.~Omn\`{e}s. \newblock Logical reformulation of quantum mechanics I, II, III. \newblock {\em J. Stat. Phys.}, 53:933-983, 1988. \bibitem[Req02]{Req} M.~Requardt. \newblock An alternative to decoherence by environment and the appearence of a classical world. \newblock arXiv 1009.1220v2, 2002. \bibitem[Lud61]{Lud} G.~Ludwig. \newblock Geloeste und ungeloeste Probleme des Messprozesses in der Quantenmechanik. \newblock In: W. Heisenberg und die Physik unserer Zeit, ed. F. Bopp, Vieweg, Braunschweig 1961. \end{thebibliography} \end{document}
2205.12838v10
http://arxiv.org/abs/2205.12838v10
Acceleration of Frank-Wolfe Algorithms with Open-Loop Step-Sizes
\documentclass{article} \input{prologue.tex} \usepackage{graphicx,wrapfig,lipsum} \usepackage[ singlelinecheck=false ]{caption} \captionsetup[table]{ indention=0em } \usepackage{makecell} \begin{document} \title{Acceleration of Frank-Wolfe Algorithms with Open-Loop Step-Sizes} \author{\name Elias Wirth \email \texttt{\href{mailto:[email protected]}{[email protected]}}\\ \addr Institute of Mathematics \\ Berlin Institute of Technology \\ Strasse des 17. Juni 135, Berlin, Germany \AND \name Thomas Kerdreux \email \texttt{\href{[email protected]}{[email protected]}}\\ \addr Geolabe LLC \\ 1615 Central Avenue, Los Alamos, New Mexico, USA \AND \name Sebastian Pokutta \email \texttt{\href{mailto:[email protected]}{[email protected]}} \\ \addr Institute of Mathematics \& AI in Society, Science, and Technology\\ Berlin Institute of Technology \& Zuse Institute Berlin\\ Strasse des 17. Juni 135, Berlin, Germany} \maketitle \begin{abstract} Frank-Wolfe algorithms (FW) are popular first-order methods for solving constrained convex optimization problems that rely on a linear minimization oracle instead of potentially expensive projection-like oracles. Many works have identified accelerated convergence rates under various structural assumptions on the optimization problem and for specific FW variants when using line-search or short-step, requiring feedback from the objective function. Little is known about accelerated convergence regimes when utilizing open-loop step-size rules, a.k.a. FW with pre-determined step-sizes, which are algorithmically extremely simple and stable. Not only is FW with open-loop step-size rules not always subject to the same convergence rate lower bounds as FW with line-search or short-step, but in some specific cases, such as kernel herding in infinite dimensions, it has been empirically observed that FW with open-loop step-size rules enjoys to faster convergence rates than FW with line-search or short-step. We propose a partial answer to this unexplained phenomenon in kernel herding, characterize a general setting for which FW with open-loop step-size rules converges non-asymptotically faster than with line-search or short-step, and derive several accelerated convergence results for FW with open-loop step-size rules. Finally, we demonstrate that FW with open-loop step-sizes can compete with momentum-based open-loop FW variants. \end{abstract} \begin{keywords} Frank-Wolfe algorithm, open-loop step-sizes, acceleration, kernel herding, convex optimization \end{keywords} \section{{Introduction}} In this paper, we address the constrained convex optimization problem \begin{equation}\label{eq:opt}\tag{OPT} \min_{x\in\cC}f(x), \end{equation} where $\cC\subseteq\R^d$ is a compact convex set and $f\colon \cC \to \R$ is a convex and $L$-smooth function. Let $x^* \in \argmin_{x\in \cC} f(x)$ be the constrained optimal solution. A classical approach to addressing \eqref{eq:opt} is to apply \emph{projected gradient descent}. When the geometry of $\cC$ is too complex, the projection step can become computationally too expensive. In these situations, the \emph{Frank-Wolfe algorithm} (FW) \citep{frank1956algorithm}, a.k.a. the conditional gradients algorithm \citep{levitin1966constrained}, described in Algorithm~\ref{algo:fw}, is an efficient alternative, as it only requires first-order access to the objective $f$ and access to a linear minimization oracle (LMO) for the feasible region, that is, given a vector $c \in \R^d$, the LMO outputs $\argmin_{x \in \cC} \langle c, x\rangle$. At each iteration, the algorithm calls the LMO, $p_t \in \argmin_{p\in \cC} \langle \nabla f (x_t), p-x_t\rangle$, and takes a step in the direction of the vertex $p_t$ to obtain the next iterate $x_{t+1}= (1-\eta_t) x_t + \eta_t p_t$. As a convex combination of elements of $\cC$, $x_t$ remains in the feasible region $\cC$ throughout the algorithm's execution. Various options exist for the choice of $\eta_t$, such as the \textit{open-loop step-size}\footnote{Open-loop is a term from control theory and here implies that there is no feedback from the objective function to the step-size.}, a.k.a. \emph{agnostic step-size}, rules $\eta_t = \frac{\ell}{t + \ell}$ for $\ell \in \N_{\geq 1}$ \citep{dunn1978conditional} or line-search $\eta_t \in \argmin_{\eta \in [0,1]} f((1-\eta) x_t + \eta p_t)$. Another classical approach, the \emph{short-step} step-size $\eta_t = \min\{ \frac{ \langle \nabla f(x_t), x_t - p_t\rangle}{L\|x_t - p_t\|_2^2},1\}$, henceforth referred to as short-step, is determined by minimizing a quadratic upper bound on the $L$-smooth objective function. There also exist variants that adaptively estimate local $L$-smoothness parameters \citep{pedregosa2018step}. \begin{algorithm}[t] \SetKwInput{Input}{Input} \SetKwInput{Output}{Output} \caption{Frank-Wolfe algorithm (FW) \citep{frank1956algorithm}}\label{algo:fw} \Input{$x_0\in \cC$, step-sizes $\eta_t\in [0, 1]$ for $t\in\{0,\ldots, T-1\}$.} \hrulealg \For{$t= 0, \ldots, T-1 $}{ $p_{t} \in \argmin_{p \in \cC} \langle\nabla f(x_{t}), p- x_{t}\rangle$\label{line:p_t_det}\\ $x_{t+1} \gets (1 - \eta_{t}) x_t + \eta_t p_{t}$} \end{algorithm} \subsection{{Related work}}\label{sec:related_work} Frank-Wolfe algorithms (FW) are first-order methods that enjoy various appealing properties \citep{jaggi2013revisiting}. They are easy to implement, projection-free, affine invariant \citep{lacoste2013affine,lan2013complexity,kerdreux2021affine,pena2021affine}, and iterates are sparse convex combinations of extreme points of the feasible region. These properties make FW an attractive algorithm for practitioners who work at scale, and FW appears in a variety of scenarios in machine learning, such as deep learning, optimal transport, structured prediction, and video co-localization \citep{ravi2018constrained,courty2016optimal,giesen2012optimizing,joulin2014efficient}. See \citet{braun2022conditional}, for a survey. For several settings, FW with line-search or short-step admits accelerated convergence rates in primal gap $h_t = f(x_t) - f(x^*)$, where $x^*\in\argmin_{x\in\cC}f(x)$ is the minimizer of $f$: Specifically, when the objective is strongly convex and the optimal solution lies in the relative interior of the feasible region, FW with line-search or short-step converges linearly \citep{guelat1986some}. Moreover, when the feasible region is strongly convex and the norm of the gradient of the objective is bounded from below by a nonnegative constant, FW with line-search or short-step converges linearly \citep{levitin1966constrained, demianov1970approximate, dunn1979rates}. Finally, when the feasible region and objective are strongly convex, FW with line-search or short-step converges at a rate of order $\cO(1/t^2)$, see also Table~\ref{table:references_to_results}. However, the drawback of FW is its slow convergence rate when the feasible region $\cC$ is a polytope and the optimal solution lies in the relative interior of an at least one-dimensional face $\cC^*$ of $\cC$. In this setting, for any $\epsilon > 0$, FW with line-search or short-step converges at a rate of order $\Omega (1/t^{1+\epsilon})$ \citep{wolfe1970convergence, canon1968tight}. To achieve linear convergence rates in this setting, algorithmic modifications of FW are necessary \citep{lacoste2015global, garber2016linear, braun2019blended, combettes2020boosting, garber2020revisiting}. FW with open-loop step-size rules, on the other hand, has a convergence rate that is not governed by the lower bound of \citet{wolfe1970convergence}. Indeed, \citet{bach2021effectiveness} proved an asymptotic convergence rate of order $\cO(1/t^2)$ for FW with open-loop step-sizes in the setting of \citet{wolfe1970convergence}. However, proving that the latter result holds non-asymptotically remains an open problem. Other disadvantages of line-search and short-step are that the former can be difficult to compute and the latter requires knowledge of the smoothness constant of the objective $f$. On the other hand, open-loop step-size rules are problem-agnostic and, thus, easy to compute. Nevertheless, little is known about the settings in which FW with open-loop step-size rules admits acceleration, except for two momentum-exploiting variants that achieve convergence rates of order up to $\cO(1/t^2)$: The \emph{primal-averaging Frank-Wolfe algorithm} (PAFW), presented in Algorithm~\ref{algo:pafw}, was first proposed by \citet{lan2013complexity} and later analyzed by \citet{kerdreux2021local}. PAFW employs the open-loop step-size $\eta_t = \frac{2}{t+2}$ and momentum to achieve convergence rates of order up to $\cO(1/t^2)$ when the feasible region is uniformly convex and the gradient norm of the objective is bounded from below by a nonnegative constant. For the same setting, the \emph{momentum-guided Frank-Wolfe algorithm} (MFW) \citep{li2021momentum}, presented in Algorithm~\ref{algo:mfw}, employs the open-loop step-size $\eta_t = \frac{2}{t+2}$, and also incorporates momentum to achieve similar convergence rates as PAFW. In addition, MFW converges at a rate of order $\cO(1/t^2)$ when the feasible region is a polytope, the objective is strongly convex, the optimal solution lies in the relative interior of an at least one-dimensional face of $\cC$, and strict complementarity holds. Finally, note that FW with open-loop step-size $\eta_t = \frac{1}{t+1}$ is equivalent to the kernel-herding algorithm \citep{bach2012equivalence}. For a specific infinite-dimensional kernel-herding setting, empirical observations in \citet[Figure 3, right]{bach2012equivalence} have shown that FW with open-loop step-size $\eta_t = \frac{1}{t+1}$ converges at the optimal rate of order $\cO(1/t^2)$, whereas FW with line-search or short-step converges at a rate of essentially $\Omega(1/t)$. Currently, both phenomena lack a theoretical explanation. \subsection{{Contributions}} \begin{table*}[t] \footnotesize \centering \begin{tabular}{|c|c|c|c|c|c|c|} \hline References & Region $\cC$ & Objective $f$ & Location of $x^*$ & Rate & Step-size rule \\ \hline \rowcolor{LightCyan} \citep{jaggi2013revisiting}&-& - & unrestricted & $\cO(1/t)$ & any\\ \hline \citep{guelat1986some}& -& str. con. & interior & $\cO(e^{-t})$ & line-search, short-step \\ \hline \bf{Theorem}~\ref{thm:interior} & -& str. con. & interior & $\cO(1/t^2)$ & open-loop $\eta_t = \frac{4}{t+4}$\\ \hline \rowcolor{LightCyan} \makecell{\citep{levitin1966constrained}\\ \citep{demianov1970approximate}\\ \citep{dunn1979rates}} & str. con. &\makecell{$\|\nabla f(x)\|_2 \geq\lambda > 0$\\ for all $x\in\cC$} & unrestricted & $\cO(e^{-t})$ & line-search, short-step\\ \hline \rowcolor{LightCyan} \bf{Theorem}~\ref{thm:exterior} & str. con. & \makecell{$\|\nabla f(x)\|_2 \geq\lambda > 0$\\ for all $x\in\cC$} & unrestricted & $\cO(1/t^2)$ & open-loop $\eta_t = \frac{4}{t+4}$\\ \hline \rowcolor{LightCyan} \bf{Remark}~\ref{rem:ol_linear} & str. con. & \makecell{$\|\nabla f(x)\|_2 \geq\lambda > 0$\\ for all $x\in\cC$} & unrestricted & $\cO(1/t^{\ell/2})$ & \Gape[0pt][2pt]{\makecell{open loop $\eta_t = \frac{\ell}{t+\ell}$\\ for $\ell\in\N_{\geq 4}$}}\\ \hline \rowcolor{LightCyan} \bf{Remark}~\ref{rem:ol_linear} & str. con. & \makecell{$\|\nabla f(x)\|_2 \geq\lambda > 0$\\ for all $x\in\cC$} & unrestricted & $\cO(e^{-t})$ & constant\\ \hline \citep{garber2015faster} & str. con. & str. con. & unrestricted & $\cO(1/t^2)$ & line-search, short-step\\ \hline \bf{Theorem}~\ref{thm:unrestricted} & str. con. & str. con. & unrestricted & $\cO(1/t^2)$ & open-loop $\eta_t = \frac{4}{t+4}$\\ \hline \rowcolor{LightCyan} \citep{wolfe1970convergence} & polytope & str. con. & interior of face & $\Omega(1/t^{1 +\eps})^*$ & line-search, short-step\\ \hline \rowcolor{LightCyan} \citep{bach2021effectiveness} & polytope & str. con. & interior of face & $\cO(1/t^2)^*$ & open-loop $\eta_t = \frac{2}{t+2}$\\ \hline \rowcolor{LightCyan} \bf{Theorem}~\ref{thm:polytope} & polytope & str. con. & interior of face & $\cO(1/t^2)$ & open-loop $\eta_t = \frac{4}{t+4}$\\ \hline \end{tabular} \normalsize \caption{Comparison of convergence rates of FW for various settings. We denote the optimal solution by $x^*\in\argmin_{x\in\cC}f(x)$. Convexity of $\cC$ and convexity and smoothness of $f$ are always assumed. The big-O notation $\cO(\cdot)^*$ indicates that a result only holds asymptotically, "str. con." is an abbreviation for strongly convex, and "any" refers to line-search, short-step, and open-loop step-size $\eta_t = \frac{2}{t+2}$. Shading is used to group related results and our results are denoted in bold.} \label{table:references_to_results} \end{table*} In this paper, we develop our understanding of settings for which FW with open-loop step-sizes admits acceleration. In particular, our contributions are five-fold: First, we prove accelerated convergence rates of FW with open-loop step-size rules in settings for which FW with line-search or short-step enjoys accelerated convergence rates. Details are presented in Table~\ref{table:references_to_results}. Most importantly, when the feasible region $\cC$ is strongly convex and the norm of the gradient of the objective $f$ is bounded from below by a nonnegative constant for all $x\in\cC$, the latter of which is, for example, implied by the assumption that the unconstrained optimal solution $\argmin_{x\in\R^d}f(x)$ lies in the exterior of $\cC$, we prove convergence rates of order $\cO(1/t^{\ell/2})$ for FW with open-loop step-sizes $\eta_t=\frac{\ell}{t+\ell}$, where $\ell\in\N_{\geq 1}$. Second, under the assumption of strict complementarity, we prove that FW with open-loop step-sizes admits a convergence rate of order $\cO(1/t^2)$ in the setting of the lower bound due to \citet{wolfe1970convergence}, that is, we prove the non-asymptotic version of the result due to \citet{bach2021effectiveness}. We thus characterize a setting for which FW with open-loop step-sizes is non-asymptotically faster than FW with line-search or short-step, see the last three rows of Table~\ref{table:references_to_results} for details. Third, we return again to the setting of the lower bound due to \citet{wolfe1970convergence}, for which both FW and MFW with open-loop step-sizes admit convergence rates of order $\cO(1/t^2)$, assuming strict complementarity. We demonstrate that the \emph{decomposition-invariant pairwise Frank-Wolfe algorithm} (DIFW) \citep{garber2016linear} and the \emph{away-step Frank-Wolfe algorithm} (AFW) \citep{guelat1986some, lacoste2015global} with open-loop step-sizes converge at rates of order $\cO(1/t^2)$ without the assumption of strict complementarity. Fourth, we compare FW with open-loop step-sizes to PAFW and MFW for the problems of logistic regression and collaborative filtering. The results indicate that FW with open-loop step-sizes converges at comparable rates as or better rates than PAFW and MFW. This implies that faster convergence rates can not only be achieved by studying algorithmic variants of FW but can also be obtained via deeper understanding of vanilla FW and its various step-size rules. Finally, we provide a theoretical analysis of the accelerated convergence rate of FW with open-loop step-sizes in the kernel herding setting of \citet[Figure 3, right]{bach2012equivalence}. \subsection{{Outline}} Preliminaries are introduced in Section~\ref{sec:preliminaries}. In Section~\ref{sec:accelerated}, we present a proof blueprint for obtaining accelerated convergence rates for FW with open-loop step-sizes. In Section~\ref{sec:ol_faster_than_ls_ss}, for the setting of the lower bound of \citet{wolfe1970convergence} and assuming strict complementarity, we prove that FW with open-loop step-sizes converges faster than FW with line-search or short-step. In Section~\ref{sec:fw_variants}, we introduce two algorithmic variants of FW with open-loop step-sizes that admit accelerated convergence rates in the problem setting of the lower bound of \citet{wolfe1970convergence} without relying on strict complementarity. In Section~\ref{sec:kernel_herding}, we prove accelerated convergence rates for FW with open-loop step-sizes in the infinite-dimensional kernel-herding setting of \citet[Figure 3, right]{bach2012equivalence}. Section~\ref{sec:numerical_experiments_main} contains the numerical experiments. Finally, we discuss our results in Section~\ref{sec:discussion}. \section{{Preliminaries}}\label{sec:preliminaries} Throughout, let $d\in \N$. Let $\zeroterm\in\R^d$ denote the all-zeros vector, let $\oneterm \in \R^d$ denote the all-ones vector, and let $\bar{\oneterm}\in \R^d$ be a vector such that $\bar{\oneterm}_i=0$ for all $i \in \{1, \ldots, \lceil d/2\rceil\}$ and $\bar{\oneterm}_{i}=1$ for all $i \in \{\lceil d/2\rceil + 1, \ldots, d\}$. For $i\in\{1,\ldots, d\}$, let $e^{(i)}\in \R^d$ be the $i$th unit vector such that $e^{(i)}_i = 1$ and $e^{(i)}_j = 0$ for all $j \in \{1,\ldots, d\} \setminus \{i\}$. Given a vector $x\in\R^d$, define its support as $\supp(x) = \{i \in \{1,\ldots, d\} \mid x_i \neq 0\}$. Let $I\in \R^{d\times d}$ denote the identity matrix. Given a set $\cC \subseteq \R^d$, let $\aff(\cC)$, $\conv(\cC)$, $\mathspan(\cC)$, and $\vertices(\cC)$ denote the affine hull, the convex hull, the span, and the set of vertices of $\cC$, respectively. For $z\in \R^d$ and $\beta > 0$, the ball of radius $\beta$ around $z$ is defined as $ B_\beta(z):= \{x\in \R^d \mid \|x - z\|_2 \leq \beta\}. $ For the iterates of Algorithm~\ref{algo:fw}, we denote the \emph{primal gap} at iteration $t\in \{0, \ldots, T\}$ by $h_t := f(x_t) - f(x^*)$, where $x^*\in\argmin_{x\in\cC}f(x)$. Finally, for $x\in\R$, let $[x]:= x - \lfloor x\rfloor$. We introduce several definitions. \begin{definition}[Uniformly convex set]\label{def:unif_cvx_C} Let $\cC \subseteq \R^d$ be a compact convex set, $\alpha_\cC >0$, and $q>0$. We say that $\cC$ is \emph{$(\alpha_\cC, q)$-uniformly convex} with respect to $\|\cdot\|_2$ if for all $x,y \in \cC$, $\gamma \in [0,1]$, and $z \in \R^d$ such that $\|z\|_2=1$, it holds that $ \gamma x + ( 1- \gamma) y + \gamma (1 - \gamma) \alpha_\cC \|x-y\|_2^q z \in \cC. $ We refer to $(\alpha_\cC, 2)$-uniformly convex sets as \emph{$\alpha_\cC$-strongly convex sets}. \end{definition} \begin{definition}[Smooth function]\label{def:smooth_f} Let $\cC \subseteq \R^d$ be a compact convex set, let $f\colon \cC \to \R$ be differentiable in an open set containing $\cC$, and let $L > 0$. We say that $f$ is \emph{$L$-smooth} over $\cC$ with respect to $\|\cdot\|_2$ if for all $x,y\in \cC$, it holds that $ f(y) \leq f(x) + \langle \nabla f(x), y - x\rangle + \frac{L}{2}\|x-y\|_2^2. $ \end{definition} \begin{definition}[Hölderian error bound]\label{def:heb} Let $\cC \subseteq \R^d$ be a compact convex set, let $f\colon \cC \to \R$ be convex, let $\mu> 0$, and let $\theta \in [0, 1/2]$. We say that $f$ satisfies a \emph{$(\mu, \theta)$-Hölderian error bound} if for all $x\in \cC$ and $x^*\in\argmin_{x\in\cC}f(x)$, it holds that \begin{align}\label{eq:heb_original} \mu(f(x)-f(x^*))^\theta \geq \min_{y\in\argmin_{z\in\cC}f(z)} \|x-y\|_2 . \end{align} \end{definition} Throughout, for ease of notation, we assume that $x^*\in\argmin_{x\in\cC}f(x)$ is unique. This follows, for example, from the assumption that $f$ is strictly convex. When $x^*\in \argmin_{x\in \cC}f(x)$ is unique, \eqref{eq:heb_original} becomes \begin{align}\tag{HEB}\label{eq:heb} \mu(f(x)-f(x^*))^\theta \geq \|x - x^*\|_2. \end{align} An important family of functions satisfying \eqref{eq:heb} is the family of uniformly convex functions, which interpolate between convex functions ($\theta = 0$) and strongly convex functions ($\theta = 1/2$). \begin{definition}[Uniformly convex function]\label{def:unif_cvx_f} Let $\cC \subseteq \R^d$ be a compact convex set, let $f\colon \cC \to \R$ be differentiable in an open set containing $\cC$, let $\alpha_f >0$, and let $r\geq 2$. We say that $f$ is \emph{$(\alpha_f, r)$-uniformly convex} over $\cC$ with respect to $\|\cdot\|_2$ if for all $x,y\in \cC$, it holds that $ f(y) \geq f(x) + \langle \nabla f(x), y-x\rangle + \frac{\alpha_f}{r}\|x-y\|_2^r. $ We refer to $(\alpha_f, 2)$-uniformly convex functions as \emph{$\alpha_f$-strongly convex}. \end{definition} Note that $(\alpha_f, r)$-uniformly convex functions satisfy a $((r/{\alpha_f})^{1/r},1/r)$-\eqref{eq:heb}: $ f(x) - f(x^*) \geq \langle \nabla f (x^*), x- x^*\rangle + \frac{\alpha_f}{r}\|x - x^*\|^r_2\geq \frac{\alpha_f}{r}\|x - x^*\|^r_2. $ \section{{Accelerated convergence rates for FW with open-loop step-sizes}}\label{sec:accelerated} FW with open-loop step-size rules was already studied by \citet{dunn1978conditional} and currently, two open-loop step-sizes are prevalent, $\eta_t = \frac{1}{t+1}$, for which the best known convergence rate is $\cO\left(\log (t)/t \right)$, and $\eta_t = \frac{2}{t+2}$, for which a faster convergence rate of order $\cO(1/t)$ holds, see, for example, \citet{dunn1978conditional} and \citet{jaggi2013revisiting}, respectively. In this section, we derive convergence rates for FW with open-loop step-size $\eta_t = \frac{4}{t+4}$. Convergence results for FW with $\eta_t = \frac{\ell}{t+\ell}$ for $\ell\in \N_{\geq 1}$ presented throughout this paper, except for those in Section~\ref{sec:kernel_herding}, can always be generalized (up to a constant) to $\eta_{t} = \frac{j}{t+j}$ for $j\in\N_{\geq \ell}$. This section is structured as follows. First, we derive a baseline convergence rate of order $\cO(1/t)$ in Section~\ref{sec:baseline}. Then, in Section~\ref{sec:blueprint}, we present the proof blueprint used throughout most parts of the paper to derive accelerated convergence rates and directly apply our approach to the setting when the objective satisfies \eqref{eq:heb} and the optimal solution $x^*\in\argmin_{x\in\cC} f(x)$ lies in the relative interior of the feasible region. In Section~\ref{sec:exterior}, we prove accelerated rates when the feasible region is uniformly convex and the norm of the gradient of the objective is bounded from below by a nonnegative constant. Finally, in Section~\ref{sec:unconstrained}, we prove accelerated rates when the feasible region is uniformly convex and the objective satisfies \eqref{eq:heb}. \subsection{Convergence rate of order $\cO(1/t)$}\label{sec:baseline} We begin the analysis of FW with open-loop step-size rules by first recalling the, to the best of our knowledge, best general convergence rate of the algorithm. Consider the setting when $\cC \subseteq \R^d$ is a compact convex set and $f\colon \cC \to \R$ is a convex and $L$-smooth function with unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$. Then, the iterates of Algorithm~\ref{algo:fw} with any step-size $\eta_t \in [0, 1]$ satisfy \begin{align}\tag{Progress-Bound}\label{eq:start_progress_bound} h_{t+1} & \leq h_t - \eta_t \langle\nabla f(x_t), x_t - p_t\rangle + \eta_t^2\frac{\ L \|x_t-p_t\|^2_2}{2}, \end{align} which follows from the smoothness of $f$. With \eqref{eq:start_progress_bound}, it is possible to derive a baseline convergence rate for FW with open-loop step-size $\eta_t = \frac{4}{t+4}$ similar to the one derived by \citet{jaggi2013revisiting} for FW with $\eta_t = \frac{2}{t+2}$. \begin{proposition}[Convergence rate of order $\cO(1/t)$]\label{prop:generalization_jaggi} Let $\cC \subseteq \R^d$ be a compact convex set of diameter $\delta > 0$, let $f\colon \cC \to \R$ be a convex and $L$-smooth function with unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$. Let $T\in\N$ and $\eta_t = \frac{4}{t+4}$ for all $t\in\Z$. Then, for the iterates of Algorithm~\ref{algo:fw} with step-size $\eta_t$, it holds that $ h_t \leq \frac{ 8L \delta^2}{t+3} = \eta_{t-1}2L\delta^2 $ for all $t\in\{1,\ldots, T\}$. \end{proposition} \begin{proof} In the literature, the proof is usually done by induction \citep{jaggi2013revisiting}. Here, for convenience and as a brief introduction for things to come, we proceed with a direct approach. Since $\eta_0 = 1$, by $L$-smoothness, we have $h_1 \leq \frac{ L \delta^2}{2}.$ Let $t\in\{1, \ldots, T-1\}$. By optimality of $p_t$ and convexity of $f$, $\langle\nabla f(x_t), x_t - p_t\rangle \geq \langle\nabla f(x_t), x_t - x^*\rangle \geq h_t$. Plugging this bound into \eqref{eq:start_progress_bound} and with $\|x_t - p_t\|_2\leq \delta$, it holds that \begin{align} h_{t+1}&\leq (1-\eta_t)h_t + \eta_t^2\frac{ L \|x_t - p_t\|_2^2}{2} \label{eq:always_combine_with_this}\\ & \leq \prod_{i=1}^t(1-\eta_i)h_1 + \frac{L\delta^2}{2}\sum_{i=1}^t \eta_i^2\prod_{j = i +1}^t(1-\eta_j)\nonumber\\ & \leq \frac{ L \delta^2}{2}(\frac{4!}{(t+1) \cdots (t+4)} + \sum_{i=1}^t \frac{4^2}{(i+4)^2}\frac{(i+1) \cdots (i+4)}{(t+1) \cdots (t+4)}) \nonumber\\ & \leq 8L \delta^2 ( \frac{1}{(t+4-1)(t+4)} + \frac{t}{(t+4-1)(t+4 )}) \nonumber\\ & \leq \frac{8L \delta^2}{t+4},\nonumber \end{align} where we used that $\prod_{j = i+1}^t (1 - \eta_j) = \frac{(i+1) (i+2) \cdots t}{(i+5) (i+6) \cdots (t+4)} = \frac{(i+1)(i+2)(i+3)(i+4)}{(t+1)(t+2)(t+3)(t+4)}$. \end{proof} To prove accelerated convergence rates for FW with open-loop step-sizes, we require bounds on the \emph{Frank-Wolfe gap} (FW gap) $\max_{p\in\cC} \langle\nabla f(x_t), x_t - p \rangle$, which appears in the middle term in \eqref{eq:start_progress_bound}. \subsection{{Optimal solution in the relative interior -- a blueprint for acceleration}}\label{sec:blueprint} Traditionally, to prove accelerated convergence rates for FW with line-search or short-step, the geometry of the feasible region, curvature assumptions on the objective function, and information on the location of the optimal solution are exploited \citep{levitin1966constrained, demianov1970approximate, guelat1986some, garber2015faster}. A similar approach leads to acceleration results for FW with open-loop step-sizes, however, requiring a different proof technique as FW with open-loop step-sizes is not monotonous in primal gap. Here, we introduce the proof blueprint used to derive most of the accelerated rates in this paper via the setting when the objective $f$ satisfies \eqref{eq:heb} and the minimizer of $f$ is in the relative interior of the feasible region $\cC$. Our goal is to bound the FW gap to counteract the error accumulated from the right-hand term in \eqref{eq:start_progress_bound}. More formally, we prove the existence of $\phi > 0$, such that there exists an iteration $\fwt \in \N$ such that for all iterations $t\geq \fwt$ of FW, it holds that \begin{align}\tag{Scaling}\label{eq:scaling} \frac{\langle \nabla f(x_t) , x_t - p_t \rangle}{\|x_t - p_t\|_2} \geq \phi \frac{\langle \nabla f(x_t) , x_t - x^* \rangle}{\|x_t - x^*\|_2}. \end{align} Inequalities that bound \eqref{eq:scaling} from either side are referred to as \emph{scaling inequalities}. Intuitively speaking, scaling inequalities relate the \emph{FW direction} $\frac{p_t - x_t}{\|p_t-x_t\|_2}$ with the \emph{optimal descent direction} $\frac{x^*-x_t}{\|x^*-x_t\|_2}$. Scaling inequalities stem from the geometry of the feasible region, properties of the objective function, or information on the location of the optimal solution. The scaling inequality below exploits the latter property. \begin{lemma}[\citealp{guelat1986some}]\label{lemma:GM} Let $\cC \subseteq \R^d$ be a compact convex set of diameter $\delta > 0$, let $f\colon \cC \to \R$ be a convex and $L$-smooth function with unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$, and suppose that there exists $\beta>0$ such that $\aff (\cC) \cap B_\beta(x^*)\subseteq \cC$. Then, for all $x\in \cC\cap B_\beta(x^*)$, it holds that \begin{equation}\tag{Scaling-INT}\label{eq:scaling_int} \frac{\langle \nabla f(x), x - p\rangle}{\|x - p\|_2} \geq \frac{\beta}{\delta} \|\nabla f(x)\|_2, \end{equation} where $p \in \argmin_{v\in \cC} \langle \nabla f(x), v \rangle$. \end{lemma} Below, we prove that there exists $\fwt \in \N$ such that for all $t\geq \fwt$, $x_t \in B_\beta(x^*)$ and \eqref{eq:scaling_int} is satisfied. \begin{lemma}\label{lemma:dist_to_opt} Let $\cC \subseteq \R^d$ be a compact convex set of diameter $\delta > 0$, let $f\colon \cC \to \R$ be a convex and $L$-smooth function satisfying a $(\mu, \theta)$-\eqref{eq:heb} for some $\mu > 0 $ and $\theta \in ]0, 1/2]$ with unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$, and let $\beta > 0$. Let $\fwt = \lceil 8L \delta^2\left(\mu / \beta\right)^{1/\theta} \rceil$, $T\in\N$, and $\eta_t = \frac{4}{t+4}$ for all $t\in\Z$. Then, for the iterates of Algorithm~\ref{algo:fw} with step-size $\eta_t$, it holds that $\|x_t -x^*\|_2 \leq \beta$ for all $t \in\{\fwt,\ldots, T\}$. \end{lemma} \begin{proof} By \eqref{eq:heb} and Proposition~\ref{prop:generalization_jaggi}, $\|x_t - x^*\|_2 \leq \mu h_t^\theta \leq \mu (\frac{8 L \delta^2 }{ 8L \delta^2(\mu/\beta)^{1/\theta} })^\theta \leq \beta$ for all $t\in\{\fwt,\ldots, T\}$. \end{proof} The second scaling inequality follows from the objective satisfying \eqref{eq:heb}. \begin{lemma}\label{lemma:heb_to_grad} Let $\cC\subseteq \R^d$ be a compact convex set and let $f\colon \cC \to \R$ be a convex function satisfying a $(\mu, \theta)$-\eqref{eq:heb} for some $\mu > 0 $ and $\theta \in [0, 1/2]$ with unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$. Then, for all $x\in \cC$, it holds that \begin{align}\label{eq:scaling_heb} \|\nabla f (x)\|_2 & \geq \frac{\langle \nabla f(x), x - x^*\rangle}{\|x - x^*\|_2}\geq \frac{1}{\mu}(f(x) - f(x^*))^{{1-\theta}}.\tag{Scaling-HEB} \end{align} \end{lemma} \begin{proof} The statement holds for $x=x^*$. For $x\in\cC\setminus \{x^*\}$, by convexity and \eqref{eq:heb}, $f(x) - f(x^*) \leq \frac{\langle \nabla f (x), x-x^*\rangle}{\|x-x^*\|_2} \|x-x^*\|_2 \leq \frac{\langle \nabla f (x), x-x^*\rangle}{\|x-x^*\|_2} \mu (f(x) - f(x^*))^{\theta}$. Dividing by $\mu(f(x)-f(x^*))^\theta$ yields \eqref{eq:scaling_heb}. \end{proof} For $t\in\{\fwt,\ldots, T-1\}$, where $\fwt = \lceil 8L \delta^2\left(2\mu / \beta\right)^{1/\theta} \rceil$, we plug \eqref{eq:scaling_int} and \eqref{eq:scaling_heb} into \eqref{eq:start_progress_bound} to obtain $h_{t+1} \leq h_t - \eta_t \frac{\beta^2}{2\mu \delta} h_t^{1-\theta} + \eta_t^2\frac{ L \delta^2}{2}$. Combined with \eqref{eq:always_combine_with_this}, we have \begin{align}\label{eq:int_sequence} h_{t+1} & \leq (1 -\frac{\eta_t}{2}) h_t - \eta_t \frac{\beta^2}{4\mu \delta} h_t^{1-\theta} + \eta_t^2\frac{ L \delta^2}{2} \end{align} for all $t\in\{\fwt,\ldots, T-1\}$. If the primal gaps of FW with open-loop step-sizes satisfy an inequality of this type, the lemma below implies accelerated convergence rates. \begin{lemma}\label{lemma:sequences} Let $\psi \in [0, 1/2]$, $\fwt, T \in \N_{\geq 1}$, and $\eta_t = \frac{4}{t+4}$ for all $t\in\Z$. Suppose that there exist constants $A, B, C > 0$, a nonnegative sequence $\{C_t\}_{t=\fwt}^{T-1}$ such that $C \geq C_t \geq 0$ for all $t\in\{\fwt,\ldots,T-1\}$, and a nonnegative sequence $\{h_t\}_{t = \fwt}^{T}$ such that \begin{align}\label{eq:gotta_derive_this} h_{t+1} & \leq (1 - \frac{\eta_t}{2})h_t - \eta_t AC_t h_t^{1-\psi} + \eta_t^2 BC_t \end{align} for all $t \in\{\fwt, \ldots, T-1\}$. Then, \begin{align}\label{eq:cd_simple} h_t \leq \max \left\{ \left(\frac{\eta_{t-2}}{\eta_{\fwt-1}}\right)^{1/(1-\psi)}h_\fwt, \left(\frac{\eta_{t-2} B}{A}\right)^{1/(1-\psi)} + \eta_{t-2}^2 BC\right\} \end{align} for all $t \in\{\fwt, \ldots, T\}$. \end{lemma} \begin{proof} For all $t\in\{\fwt, \ldots, T\}$, we first prove that \begin{align}\label{eq:cd} h_t & \leq \max \left\{ \left(\frac{\eta_{t-2}\eta_{t-1}}{\eta_{\fwt-2}\eta_{\fwt-1}}\right)^{1/(2(1-\psi))}h_\fwt, \left(\frac{\eta_{t-2}\eta_{t-1} B^2}{A^2}\right)^{1/(2(1-\psi))} + \eta_{t-2 }\eta_{t-1} BC\right\} , \end{align} which then implies \eqref{eq:cd_simple}. The proof is a straightforward modification of Footnote $3$ in the proof of Proposition $2.2$ in \citet{bach2021effectiveness} and is by induction. The base case of \eqref{eq:cd} with $t = \fwt$ is immediate, even if $\fwt= 1$, as $\eta_{-1}\geq \eta_{0} = 1$. Suppose that \eqref{eq:cd} is correct for a specific iteration $t\in\{\fwt, \ldots, T-1\}$. We distinguish between two cases. First, suppose that $h_t \leq (\frac{\eta_t B}{A})^{1/(1-\psi)}$. Plugging this bound into \eqref{eq:gotta_derive_this}, we obtain $h_{t+1} \leq (1-\frac{\eta_t}{2}) h_t - 0 + \eta_t^2 BC_t\leq (\frac{\eta_t B}{A})^{1/(1-\psi)} + \eta_t^2 BC \leq (\frac{\eta_{t-1}\eta_t B^2}{A^2})^{1/(2(1-\psi))} + \eta_{t-1}\eta_t BC$. Next, suppose that $h_t \geq (\frac{\eta_t B}{A})^{1/(1-\psi)}$ instead. Plugging this bound on $h_t$ into \eqref{eq:gotta_derive_this} and using the induction assumption \eqref{eq:cd} at iteration $t$ yields \begin{align*} h_{t+1} &\leq \left(1 - \frac{\eta_t}{2}\right)h_t -\eta_t A C_t \frac{\eta_t B}{A} + \eta_t^2 B C_t\\ &= \frac{t+2}{t+4} h_t \\ & = \frac{\eta_{t}}{\eta_{t-2}} h_t\\ &\leq \frac{\eta_{t}}{\eta_{t-2}} \max \left\{ \left(\frac{\eta_{t-2}\eta_{t-1}}{\eta_{\fwt-2}\eta_{\fwt-1}}\right)^{1/(2(1-\psi))}h_\fwt, \left(\frac{\eta_{t-2}\eta_{t-1} B^2}{A^2}\right)^{1/(2(1-\psi))} + \eta_{t-2 }\eta_{t-1} BC\right\}\\ & \leq \max \left\{ \left(\frac{\eta_{t-1}\eta_{t}}{\eta_{\fwt-2}\eta_{\fwt-1}}\right)^{1/(2(1-\psi))}h_\fwt, \left(\frac{\eta_{t-1}\eta_{t} B^2}{A^2}\right)^{1/(2(1-\psi))} + \eta_{t-1 }\eta_{t} BC\right\}, \end{align*} where the last inequality holds due to $\frac{\eta_t}{\eta_{t-2}}(\eta_{t-2}\eta_{t-1})^{1/(2(1-\psi))} \leq (\eta_{t-1}\eta_{t})^{1/(2(1-\psi))}$ for $\frac{\eta_t}{\eta_{t-2}}\in [0,1]$ and $1/(2(1-\psi)) \in [1/2,1]$. In either case, \eqref{eq:cd} is satisfied for $t+1$. By induction, the lemma follows. \end{proof} We conclude the presentation of our proof blueprint by stating the first accelerated convergence rate for FW with open-loop step-size $\eta_t = \frac{4}{t+4}$ when the the objective function $f$ satisfies \eqref{eq:heb} and the minimizer lies in the relative interior of the feasible region $\cC$. For this setting, FW with line-search or short-step converges linearly if the objective function is strongly convex \citep{guelat1986some,garber2015faster}. Further, FW with open-loop step-size $\eta_t = \frac{1}{t+1}$ converges at a rate of order $\cO(1/t^2)$ when the objective is of the form $f(x) = \frac{1}{2}\|x-b\|_2^2$ for some $b\in \cC$ \citep{chen2012super}. \begin{theorem}[Optimal solution in the relative interior of $\cC$]\label{thm:interior} Let $\cC \subseteq \R^d$ be a compact convex set of diameter $\delta > 0$, let $f\colon \cC \to \R$ be a convex and $L$-smooth function satisfying a $(\mu, \theta)$-\eqref{eq:heb} for some $\mu > 0 $ and $\theta \in ]0, 1/2]$ with unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$, and suppose that there exists $\beta>0$ such that $\aff (\cC) \cap B_\beta(x^*) \subseteq \cC$. Let $\fwt = \lceil 8L \delta^2\left(2\mu / \beta\right)^{1/\theta} \rceil$, $T\in\N$, and $\eta_t = \frac{4}{t+4}$ for all $t\in\Z$. Then, for the iterates of Algorithm~\ref{algo:fw} with step-size $\eta_t$, it holds that \begin{align}\label{eq:interior_sol} h_t \leq \max \Bigg\{ & \left(\frac{\eta_{t-2}}{\eta_{\fwt-1}}\right)^{1/(1-\theta)} h_\fwt, \left(\frac{\eta_{t-2}2 \mu L\delta^3}{\beta^2}\right)^{1/(1-\theta)} + \eta_{t-2}^2 \frac{L\delta^2}{2}\Bigg\} \end{align} for all $t\in\{\fwt, \ldots, T\}$. \end{theorem} \begin{proof} Let $t\in\{\fwt, \ldots, T-1\}$. By Lemma~\ref{lemma:dist_to_opt}, $\|x_t -x^*\|_2 \leq \beta / 2$ and, by triangle inequality, we have $\|x_t - p_t\|_2 \geq \beta / 2$. Thus, for all $t\in\{\fwt, \ldots, T\}$, it follows that \eqref{eq:int_sequence} holds. We apply Lemma~\ref{lemma:sequences} with $A = \frac{\beta^2}{4 \mu \delta}$, $B = \frac{L\delta^2}{2}$, $C= 1$, $C_t = 1$ for all $t\in\{\fwt, \ldots, T-1\}$, and $\psi = \theta$, resulting in \eqref{eq:interior_sol} holding for all $t\in\{\fwt, \ldots, T\}$. \end{proof} We complement Theorem~\ref{thm:interior} with a discussion on the lower bound of the convergence rate of FW when the optimal solution is in the relative interior of the probability simplex. \begin{lemma}[\citealp{jaggi2013revisiting}]\label{lemma:lb_jaggi} Let $\cC\subseteq \R^d$ be the probability simplex, $f(x) = \|x\|_2^2$, and $t\in\{1,\ldots, d\}$. It holds that $\min_{\substack{x \in \cC \\ |\supp (x)| \leq t}} f(x) = \frac{1}{t}$, where $|\supp(x)|$ denotes the number of non-zero entries of $x$. \end{lemma} \begin{remark}[{Compatibility with lower bound from \citet{jaggi2013revisiting}}]\label{rem:jaggi_interior} In Lemma~\ref{lemma:lb_jaggi}, the optimal solution $x^* = \frac{1}{d}\oneterm\in \R^d$ lies in the relative interior of $\cC$ and $\min_{x\in \cC}f(x) = 1/d$. When $\cC$ is the probability simplex, all of its vertices are of the form $e^{(i)} = (0, \ldots, 0 , 1, 0 , \ldots, 0)^\intercal \in \R^d$, $i\in\{1, \ldots, d\}$. Thus, any iteration of FW can modify at most one entry of iterate $x_t$ and the primal gap is at best $h_t = 1/t -1/d$ for $t\in\{1, \ldots, d\}$. Applying Theorem~\ref{thm:interior} to the setting of Lemma~\ref{lemma:lb_jaggi}, we observe that $\beta =1/d$ and acceleration starts only after $\fwt = \Omega(d^{1/\theta}) \geq \Omega(d)$ iterations. Thus, Theorem~\ref{thm:interior} does not contradict Lemma~\ref{lemma:lb_jaggi}. \end{remark} \subsection{Unconstrained minimizer in the exterior -- lower-bounded gradient norm}\label{sec:exterior} In this section, we apply the proof blueprint from the previous section to the setting when the feasible region $\cC$ is uniformly convex and the norm of the gradient of $f$ is bounded from below by a nonnegative constant. For this setting, FW with line-search or short-step converges linearly when the feasible region is also strongly convex \citep{levitin1966constrained, demianov1970approximate, garber2015faster}. When the feasible region is only uniformly convex, rates interpolating between $\cO(1/t)$ and linear convergence are known \citep{kerdreux2021projection}. Two FW variants employ open-loop step-sizes and enjoy accelerated convergence rates of order up to $\cO(1/t^2)$ when the feasible region $\cC$ is uniformly convex and the norm of the gradient of $f$ is bounded from below by a nonnegative constant: the primal-averaging Frank-Wolfe algorithm (PAFW) \citep{lan2013complexity, kerdreux2021local}, presented in Algorithm~\ref{algo:pafw}, and the momentum-guided FW algorithm (MFW) \citep{li2021momentum}, presented in Algorithm~\ref{algo:mfw}. Below, for the same setting, we prove that FW with open-loop step-size $\eta_t = \frac{4}{t+4}$ also admits accelerated convergence rates of order up to $\cO(1/t^2)$ depending on the uniform convexity of the feasible region. Furthermore, when the feasible region is strongly convex, we prove that FW with open-loop step-size $\eta_t=\frac{\ell}{t+\ell}$, where $\ell\in\N_{\geq 2}$, converges at a rate of order $\cO(1/t^{\ell/2})$, which is faster than the convergence rates known for PAFW and MFW. To prove these results, we require two new scaling inequalities, the first of which follows directly from the assumption that the norm of the gradient of $f$ is bounded from below by a nonnegative constant. More formally, let $\cC\subseteq \R^d$ be a compact convex set and let $f\colon \cC \to \R$ be a convex and $L$-smooth function such that there exists $\lambda > 0$ such that for all $x\in \cC$, \begin{equation}\tag{Scaling-EXT}\label{eq:scaling_ext} \|\nabla f(x)\|_2 \geq \lambda. \end{equation} In case $f$ is well-defined, convex, and differentiable on $\R^d$, \eqref{eq:scaling_ext} is, for example, implied by the convexity of $f$ and the assumption that the unconstrained minimizer of $f$, that is, $\argmin_{x\in\R^d} f(x)$, lies in the exterior of $\cC$. The second scaling inequality follows from the uniform convexity of the feasible region and is proved in the proof of \citet[Theorem 2.2]{kerdreux2021projection} in FW gap. The result stated below is then obtained by bounding the FW gap from below with the primal gap. \begin{figure*}[t!] \begin{minipage}[t]{0.46\textwidth} \begin{algorithm}[H] \SetKwInput{Input}{Input} \SetKwInput{Output}{Output} \caption{Primal-averaging Frank-Wolfe algorithm (PAFW) \citep{lan2013complexity}}\label{algo:pafw} \Input{$x_0\in \cC$, step-sizes $\eta_t = \frac{\ell}{t+\ell}$, where $\ell\in\N_{\geq 1}$, for $t\in\{0,\ldots, T-1\}$.} \hrulealg $v_0 \gets x_0$\\ \For{$t= 0, \ldots, T-1 $}{ $y_t \gets (1-\eta_t) x_t + \eta_t v_t$\\ $w_{t+1} \gets \nabla f(y_t)$\label{line:pafw_w}\\ $v_{t+1}\in\argmin_{v\in\cC} \langle w_{t+1}, v\rangle$\\ $x_{t+1}\gets(1-\eta_t) x_t + \eta_t v_{t+1}$} \end{algorithm} \end{minipage} \hfil \begin{minipage}[t]{0.46\textwidth} \begin{algorithm}[H] \SetKwInput{Input}{Input} \SetKwInput{Output}{Output} \caption{Momentum-guided Frank-Wolfe algorithm (MFW) \citep{li2021momentum}}\label{algo:mfw} \Input{$x_0\in \cC$, step-sizes $\eta_t = \frac{\ell}{t+\ell}$, where $\ell\in\N_{\geq 1}$, for $t\in\{0,\ldots, T-1\}$.} \hrulealg $v_0 \gets x_0$; $w_0 \gets \zeroterm$\\ \For{$t= 0, \ldots, T-1 $}{ $y_t \gets (1-\eta_t) x_t + \eta_t v_t$\\ $w_{t+1} \gets (1-\eta_t)w_t + \eta_t \nabla f(y_t)$\label{line:mfw_w}\\ $v_{t+1}\in\argmin_{v\in\cC} \langle w_{t+1}, v\rangle$\\ $x_{t+1}\gets(1-\eta_t) x_t + \eta_t v_{t+1}$} \end{algorithm} \end{minipage} \end{figure*} \begin{lemma}[\citealp{kerdreux2021projection}]\label{lemma:unif_convexity} For $\alpha >0$ and $q\geq 2$, let $\cC \subseteq \R^d$ be a compact $(\alpha,q)$-uniformly convex set and let $f\colon \cC \to \R$ be a convex function that is differentiable in an open set containing $\cC$ with unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$. Then, for all $x\in \cC$, it holds that \begin{equation}\tag{Scaling-UNIF}\label{eq:scaling_unif} \frac{\langle \nabla f(x), x-p\rangle}{\|x-p\|_2^2} \geq \left(\frac{\alpha}{2}\|\nabla f(x)\|_2\right)^{2/q} (f(x)-f(x^*))^{1-2/q}, \end{equation} where $p \in \argmin_{v\in \cC} \langle \nabla f(x), v \rangle$. \end{lemma} Combining \eqref{eq:scaling_ext} and \eqref{eq:scaling_unif}, we derive the following accelerated convergence result. \begin{theorem}[Norm of the gradient of $f$ is bounded from below by a nonnegative constant]\label{thm:exterior} For $\alpha >0$ and $q \geq 2$, let $\cC \subseteq \R^d$ be a compact $(\alpha,q)$-uniformly convex set of diameter $\delta > 0$, let $f\colon \cC \to \R$ be a convex and $L$-smooth function with lower-bounded gradients, that is, $\|\nabla f(x)\|_2 \geq \lambda$ for all $x\in \cC$ for some $\lambda > 0$, with unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$. Let $T\in\N$ and $\eta_t = \frac{4}{t+4}$ for all $t\in\Z$. Then, for the iterates of Algorithm~\ref{algo:fw} with step-size $\eta_t$, when $q \geq 4$, it holds that \begin{align}\label{eq:ext_q_greater_4} h_t & \leq \max \left\{ \eta_{t-2}^{1/(1-2/q)}\frac{L\delta^2}{2}, \left(\eta_{t-2} L \left( \frac{2}{\alpha \lambda}\right)^{2/q}\right)^{1/(1-2/q)} + \eta_{t-2}^2 \frac{L\delta^2}{2}\right\} \end{align} for all $t\in\{1, \ldots, T\}$, and letting ${\fwt} = \lceil 8 L \delta^2 \rceil$, when $q \in [2, 4[$, it holds that \begin{align}\label{eq:ext_sol} h_t & \leq \max \left\{ \left(\frac{\eta_{t-2}}{\eta_{{\fwt}-1}}\right)^{2}h_{\fwt}, \left(\eta_{t-2} L \left( \frac{2}{\alpha \lambda}\right)^{2/q}\right)^{2} + \eta_{t-2}^2 \frac{L\delta^2}{2}\right\} \end{align} for all $t\in\{\fwt,\ldots, T\}$. \end{theorem} \begin{proof} Let $t\in\{1,\ldots, T-1\}$. Combining \eqref{eq:scaling_unif} and \eqref{eq:scaling_ext}, it holds that $\langle \nabla f(x_t), x_t - p_t\rangle \geq \| x_t - p_t \|_2^2 \left(\frac{\alpha\lambda}{2}\right)^{2/q} h_t^{1-2/q}$. Then, using \eqref{eq:start_progress_bound}, we obtain $h_{t+1} \leq h_t - \eta_t\|x_t-p_t\|_2^2( \frac{\alpha \lambda}{2})^{2/q} h_t^{1-2/q} +\eta_t^2\frac{L\|x_t-p_t\|_2^2}{2}$. Combined with \eqref{eq:always_combine_with_this}, we obtain \begin{align}\label{eq:ext} h_{t+1} \leq & \left(1-\frac{\eta_t}{2}\right)h_t +\frac{\eta_t\|x_t-p_t\|_2^2}{2} \left( \eta_t L-\left( \frac{\alpha \lambda}{2}\right)^{2/q} h_t^{1-2/q} \right). \end{align} Suppose that $q \geq 4$. Then, \eqref{eq:ext} allows us to apply Lemma~\ref{lemma:sequences} with $A =( \frac{\alpha \lambda}{2})^{2/q}$, $B=L$, $C= \frac{\delta^2}{2}$, $C_t = \frac{\|x_t-p_t\|_2^2}{2}$ for all $t\in\{1,\ldots, T-1\}$, and $\psi = 2/q\in [0,1/2]$, resulting in \eqref{eq:ext_q_greater_4} holding for all $t\in\{1,\ldots, T\}$, since $h_1 \leq \frac{L\delta^2}{2}$, and $\eta_{-1}\geq \eta_{0} = 1$. Next, suppose that $q\in[2, 4[$ and note that $2/q > 1/2$. Thus, Lemma~\ref{lemma:sequences} can be applied after a burn-in phase of slower convergence. Let $t\in\{\fwt,\ldots,T-1\}$. By Proposition~\ref{prop:generalization_jaggi}, $ h_t \leq h_\fwt \leq 1 $. Since $1-2/q\leq 1/2$, we have $h_t^{1-2/q} \geq h_t^{1/2} = h_t^{1-1/2}$. Combined with \eqref{eq:ext}, it holds that $h_{t+1} \leq (1-\frac{\eta_t}{2})h_t +\frac{\eta_t\|x_t-p_t\|_2^2}{2} ( \eta_t L-( \frac{\alpha \lambda}{2})^{2/q} h_t^{1 - 1/2} )$. We then apply Lemma~\ref{lemma:sequences} with $A =( \frac{\alpha \lambda}{2})^{2/q}$, $B=L$, $C= \frac{\delta^2}{2}$, $C_t = \frac{\|x_t-p_t\|_2^2}{2}$ for all $t\in\{\fwt,\ldots,T-1\}$, and $\psi = 1/2$, resulting in \eqref{eq:ext_sol} holding for all $t\in\{\fwt,\ldots, T\}$. Note that the lemma holds even if $\fwt = 1$ since $\eta_{-1}\geq \eta_0 = 1$. \end{proof} As we discuss below, in the setting of Theorem~\ref{thm:exterior}, when $q=2$, FW with open-loop step-sizes $\eta_t= \frac{\ell}{t+\ell}$, where $\ell\in \N_{\geq 2}$, converges at a rate of order $\cO(1/t^{\ell/2})$. \begin{remark}[Acceleration beyond rates of order $\cO(1/t^2)$]\label{rem:ol_linear} Under the assumptions of Theorem~\ref{thm:exterior}, analogously to Proposition~\ref{prop:generalization_jaggi}, one can prove convergence rates of order $\cO(1/t)$ for FW with step-sizes $\eta_t = \frac{\ell}{t+\ell}$, where $\ell\in \N_{\geq 2}$, depending on $L, \delta$, and $\ell$. Thus, for $q=2$, there exists $\fwt \in \N$ depending only on $L, \alpha, \delta, \lambda,\ell$, such that for all $t\in\{\fwt,\ldots, T-1\}$, it holds that \begin{align*} \frac{\eta_t\|x_t-p_t\|_2^2}{2} ( \eta_t L-\frac{\alpha \lambda}{2} ) \leq 0. \end{align*} Thus, \eqref{eq:ext} becomes $h_{t+1}\leq (1-\frac{\eta_t}{2})h_t$ for all $t\in\{\fwt, \ldots, T-1\}$. Then, by induction, for even $\ell\in\N_{\geq 2}$, it holds that $h_t \leq \frac{h_{\fwt} ({\fwt}+\ell/2) ({\fwt}+\ell/2 +1) \cdots ({\fwt}+\ell-1)}{(t+\ell/2) (t+\ell/2 +1) \cdots (t+\ell-1)}$ for all $t\in\{\fwt,\ldots, T-1\}$, resulting in a convergence rate of order $\cO(1/t^{\ell/2})$. For $\ell\in\N_{\geq 6}$, this convergence rate is better than the convergence rates of order $\cO(1/t^2)$ known for PAFW and MFW. Using similar arguments, one can prove that FW with the constant open-loop step-size $\eta_t = \frac{\alpha\lambda}{2L}$ converges linearly, that is, $h_t \leq (1 - \frac{\alpha\lambda}{4L})^th_0$ for all $t\in\{0, \ldots, T\}$. \end{remark} \begin{figure}[t] \captionsetup[subfigure]{justification=centering} \begin{tabular}{c c c} \begin{subfigure}{.3\textwidth} \centering \includegraphics[width=1\textwidth]{lp_2_ball_location_exterior.png} \caption{$\ell_2$-ball.}\label{fig:exterior_2} \end{subfigure}& \begin{subfigure}{.3\textwidth} \centering \includegraphics[width=1\textwidth]{lp_3_ball_location_exterior.png} \caption{$\ell_3$-ball.}\label{fig:exterior_3} \end{subfigure} & \begin{subfigure}{.3\textwidth} \centering \includegraphics[width=1\textwidth]{lp_5_ball_location_exterior.png} \caption{$\ell_5$-ball.}\label{fig:exterior_5} \end{subfigure}\\ \end{tabular} \caption{ Comparison of FW with different step-sizes when the feasible region $\cC\subseteq\R^{100}$ is an $\ell_p$-ball, the objective $f$ is not strongly convex, and the unconstrained optimal solution $\argmin_{x\in\R^d}f(x)$ lies in the exterior of $\cC$, implying that $\|\nabla f(x)\|_2 \geq \lambda > 0$ for all $x\in\cC$ for some $\lambda > 0$. The $y$-axis represents the minimum primal gap. FW with open-loop step-sizes $\eta_t = \frac{\ell}{t+\ell}$, where $\ell\in\N_{\geq 1}$, converges at a rate of order $\cO(1/t^\ell)$ and FW with constant step-size converges linearly. }\label{fig:exterior} \end{figure} The results in Figure~\ref{fig:exterior}, see Section~\ref{sec:experiment_exterior} for details, show that in the setting of Theorem~\ref{thm:exterior} and Remark~\ref{rem:ol_linear}, FW with open-loop step-sizes $\eta_t=\frac{\ell}{t+\ell}$, where $\ell\in\N_{\geq 1}$, converges at a rate of order $\cO(1/t^\ell)$ and FW with constant step-size $\eta_t = \frac{\alpha\lambda}{2L}$ converges linearly in Figure~\ref{fig:exterior_2}. The convergence rates for FW with $\eta_t=\frac{\ell}{t+\ell}$ are better than predicted by Remark~\ref{rem:ol_linear} and indicate a gap between theory and practice. Note that we observe acceleration beyond $\cO(1/t^2)$ even when the feasible region is only uniformly convex, a behaviour which our current theory does not explain. \subsection{{No assumptions on the location of the optimal solution}}\label{sec:unconstrained} In this section, we address the setting when the feasible region $\cC$ is uniformly convex, the objective function $f$ satisfies \eqref{eq:heb}, and no assumptions are made on the location of the optimal solution $x^*\in\argmin_{x\in\cC}f(x)$. \citet{garber2015faster} showed that strong convexity of the feasible region and the objective function are enough to modify \eqref{eq:start_progress_bound} to prove a convergence rate of order $\cO(1/t^2)$ for FW with line-search or short-step. \citet{kerdreux2021projection} relaxed these assumptions and proved convergence rates for FW with line-search or short-step interpolating between $\cO(1/t)$ and $\cO(1/t^2)$. Below, for the same setting, we prove that FW with open-loop step-sizes also admits rates interpolating between $\cO(1/t)$ and $\cO(1/t^2)$. \begin{theorem}[No assumptions on the location of the optimal solution]\label{thm:unrestricted} For $\alpha>0$ and $q\geq 2$, let $\cC \subseteq \R^d$ be a compact $(\alpha,q)$-uniformly convex set of diameter $\delta > 0$, let $f\colon \cC \to \R$ be a convex and $L$-smooth function satisfying a $(\mu, \theta)$-\eqref{eq:heb} for some $\mu > 0 $ and $\theta \in [0, 1/2]$ with unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$. Let $T\in\N$ and $\eta_t = \frac{4}{t+4}$ for all $t\in\Z$. Then, for the iterates of Algorithm~\ref{algo:fw} with step-size $\eta_t$, it holds that \begin{align}\label{eq:unrestricted_rate} h_t & \leq \max \left\{ \eta_{t-2}^{1/(1-2\theta/q)}\frac{L\delta^2}{2}, \left(\eta_{t-2} L \left( \frac{2\mu}{\alpha}\right)^{2/q}\right)^{1/(1-2\theta/q)} + \eta_{t-2}^2 \frac{L\delta^2}{2}\right\} \end{align} for all $t \in\{1, \ldots, T\}$. \end{theorem} \begin{proof} Let $t\in\{1,\ldots, T-1\}$. Combining \eqref{eq:scaling_unif} and \eqref{eq:scaling_heb}, we obtain $\langle \nabla f(x_t) ,x_t-p_t \rangle \geq \|x_t-p_t\|_2^2 (\frac{\alpha}{2\mu})^{2/q} h_t^{1-2\theta/q}$. Then, using \eqref{eq:start_progress_bound}, we obtain $h_{t+1} \leq h_t - \eta_t \|x_t-p_t\|_2^2 (\frac{\alpha}{2\mu})^{2/q} h_t^{1-2\theta/q} + \eta_t^2\frac{L\|x_t - p_t\|_2^2}{2}$. Combined with \eqref{eq:always_combine_with_this}, we have $h_{t+1} \leq (1-\frac{\eta_t}{2})h_t + \frac{\eta_t\|x_t-p_t\|_2^2}{2} (\eta_t L - (\frac{\alpha}{2\mu})^{2/q} h_t^{1-2\theta/q})$. We apply Lemma~\ref{lemma:sequences} with $A =( \frac{\alpha}{2\mu})^{2/q}$, $B=L$, $C= \frac{\delta^2}{2}$, $C_t = \frac{\|x_t-p_t\|_2^2}{2}$ for all $t\in\{\fwt, \ldots, T-1\}$, and $\psi = 2\theta/q \leq 1/2$, resulting in \eqref{eq:unrestricted_rate} holding for all $t\in\{\fwt,\ldots, T\}$, since $h_1 \leq \frac{L\delta^2}{2}$, and $\eta_{-1}\geq \eta_0 = 1$. \end{proof} \section{Optimal solution in the relative interior of a face of $\cC$}\label{sec:ol_faster_than_ls_ss} In this section, we consider the setting when the feasible region is a polytope, the objective function is strongly convex, and the optimal solution lies in the relative interior of an at least one-dimensional face $\cC^*$ of $\cC$. Then, under mild assumptions, FW with line-search or short-step converges at a rate of order $\Omega(1/t^{1+\eps})$ for any $\eps > 0$ \citep{wolfe1970convergence}. Due to this lower bound, several FW variants with line-search or short-step were developed that converge linearly in the described setting, see Section~\ref{sec:related_work} For this setting, following our earlier blueprint from Section~\ref{sec:blueprint}, we prove that FW with open-loop step-sizes converges at a rate of order $\cO(1/t^2)$, which is non-asymptotically faster than FW with line-search or short-step. Our result can be thought of as the non-asymptotic version of Proposition~2.2 in \citet{bach2021effectiveness}. Contrary to the result of \citet{bach2012equivalence}, our result is in primal gap, we do not require bounds on the third-order derivatives of the objective, and we do not invoke affine invariance of FW to obtain acceleration. To prove our result, we require two assumptions. The first assumption stems from \emph{active set identification}, that is, the concept of identifying the face $\cC^*\subseteq\cC$ containing the optimal solution $x^*\in\argmin_{x\in\cC}f(x)$ to then apply faster methods whose convergence rates then often only depend on the dimension of the optimal face \citep{hager2006new, bomze2019first, bomze2020active}. Here, it is possible to determine the number of iterations necessary for FW with open-loop step-sizes to identify the optimal face when the following regularity assumption, already used in, for example, \citet{garber2020revisiting, li2021momentum}, is satisfied. \begin{assumption}[Strict complementarity]\label{ass:strict_comp} Let $\cC \subseteq \R^d$ be a polytope and let $f\colon \cC \to \R$ be differentiable in an open set containing $\cC$. Suppose that $x^*\in\argmin_{x\in\cC}f(x)$ is unique and contained in an at least one-dimensional face $\cC^*$ of $\cC$ and that there exists $\kappa > 0$ such that if $p\in \vertices \left(\cC\right)\setminus \cC^*$, then $\langle \nabla f(x^*), p-x^*\rangle \geq \kappa$; otherwise, if $p\in \vertices \left(\cC^*\right)$, then $\langle \nabla f(x^*), p-x^*\rangle = 0$. \end{assumption} In the proof of Theorem~$5$ in \citet{garber2020revisiting}, the authors showed that there exists an iterate $\fwt \in \N$ such that for all $t\geq \fwt$, the FW vertices $p_t$ lie in the optimal face, assuming that the objective function is strongly convex. Below, we generalize their result to convex functions satisfying \eqref{eq:heb}. \begin{lemma}[Active set identification]\label{lemma:active_face_identification} Let $\cC \subseteq \R^d$ be a polytope of diameter $\delta > 0$, let $f\colon \cC \to \R$ be a convex and $L$-smooth function satisfying a $(\mu, \theta)$-\eqref{eq:heb} for some $\mu > 0 $ and $\theta \in ]0, 1/2]$ with unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$, and suppose that there exists $\kappa > 0$ such that Assumption~\ref{ass:strict_comp} is satisfied. Let $\fwt =\lceil 8 L\delta^2 \left({2\mu L \delta}/{\kappa}\right)^{1/\theta}\rceil$, $T\in\N$, and $\eta_t = \frac{4}{t+4}$ for all $t\in\Z$. Then, for the iterates of Algorithm~\ref{algo:fw} with step-size $\eta_t$, it holds that $p_t\in \vertices \left(\cC^*\right)$ for all $t\in\{\fwt, \ldots, T-1\}$. \end{lemma} \begin{proof} Let $t\in\{\fwt, \ldots, T-1\}$. Note that in Line~\ref{line:p_t_det} of Algorithm~\ref{algo:fw}, $p_t \in \argmin_{p\in \cC} \langle \nabla f(x_t), p - x_t\rangle $ can always be chosen such that $p_t \in \argmin_{p \in \vertices(\cC)} \langle \nabla f(x_t), p - x_t\rangle$. For $p \in \vertices (\cC)$, it holds that \begin{align}\label{eq:any_vertex} \langle\nabla f(x_t), p - x_t\rangle & = \langle\nabla f(x_t) -\nabla f(x^*) + \nabla f(x^*), p - x^* + x^* - x_t\rangle \nonumber \\ & = \langle\nabla f(x_t) -\nabla f(x^*), p - x_t\rangle + \langle \nabla f(x^*), p - x^* \rangle + \langle \nabla f(x^*), x^* - x_t\rangle. \end{align} We distinguish between vertices $p\in \vertices \left(\cC\right) \setminus \cC^*$ and vertices $p\in \vertices \left(\cC^*\right)$. First, suppose that $p\in \vertices \left(\cC\right) \setminus \cC^*$. Using strict complementarity, Cauchy-Schwarz, $L$-smoothness, and \eqref{eq:heb} to bound \eqref{eq:any_vertex} yields \begin{align*} \langle\nabla f(x_t), p - x_t\rangle & \geq - \|\nabla f(x_t) - \nabla f(x^*)\|_2 \|p - x_t\|_2 + \kappa + \langle \nabla f(x^*), x^* - x_t\rangle\nonumber \\ &\geq \kappa -L\delta \|x_t-x^*\|_2 + \langle \nabla f(x^*), x^* - x_t\rangle \nonumber \\ & \geq \kappa - \mu L\delta h_t^\theta + \langle \nabla f(x^*), x^* - x_t\rangle. \end{align*} Next, suppose that $p\in \vertices \left(\cC^*\right)$. Using strict complementarity, Cauchy-Schwarz, $L$-smoothness, and \eqref{eq:heb} to bound \eqref{eq:any_vertex} yields \begin{align*} \langle\nabla f(x_t), p - x_t\rangle & \leq \|\nabla f(x_t) - \nabla f(x^*)\|_2 \|p - x_t\|_2 + \langle \nabla f(x^*), x^* - x_t\rangle \\ &\leq L \delta \|x_t-x^*\|_2 + \langle \nabla f(x^*), x^* - x_t\rangle \\ & \leq \mu L\delta h_t^\theta + \langle \nabla f(x^*), x^* - x_t\rangle. \end{align*} By Proposition~\ref{prop:generalization_jaggi}, $\mu L \delta h_t^\theta \leq \mu L \delta h_\fwt^\theta \leq \mu L \delta \left(\frac{8L\delta^2}{8 L \delta^2\left({2\mu L \delta}/{\kappa}\right)^{1/\theta} +3 }\right)^\theta < \frac{\kappa}{2}$. Hence, for $t\in\{\fwt, \ldots, T-1\}$, \begin{equation*} \langle \nabla f(x_t), p-x_t \rangle = \begin{cases} > \frac{\kappa}{2} + \langle \nabla f(x^*), x^* - x_t\rangle, & \text{if} \ p \in \vertices \left(\cC\right) \setminus \cC^* \\ < \frac{\kappa }{2} + \langle \nabla f(x^*), x^* - x_t\rangle, & \text{if} \ p \in \vertices \left(\cC^*\right). \end{cases} \end{equation*} Then, by optimality of $p_t$, for all iterations $t\in\{\fwt, \ldots, T-1\}$ of Algorithm~\ref{algo:fw}, it holds that $p_t \in \vertices \left(\cC^*\right)$. \end{proof} In addition, we assume the optimal solution $x^*\in\argmin_{x\in\cC}f(x)$ to be in the relative interior of an at least one-dimensional face $\cC^*$ of $\cC$. \begin{assumption}[Optimal solution in the relative interior of a face of $\cC$]\label{ass:opt_in_face} Let $\cC \subseteq \R^d$ be a polytope and let $f\colon \cC \to \R$. Suppose that $x^*\in\argmin_{x\in\cC}f(x)$ is unique and contained in the relative interior of an at least one-dimensional face $\cC^*$ of $\cC$, that is, there exists $\beta > 0 $ such that $\emptyset \neq B_\beta (x^*) \cap \aff(\cC^*) \subseteq \cC$. \end{assumption} Using Assumption~\ref{ass:opt_in_face}, \citet{bach2021effectiveness} derived the following scaling inequality, a variation of \eqref{eq:scaling_int}. \begin{lemma}[\citealp{bach2021effectiveness}]\label{lemma:scaling_bach} Let $\cC \subseteq \R^d$ be a polytope, let $f\colon \cC \to \R$ be a convex and $L$-smooth function with unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$, and suppose that there exists $\beta > 0$ such that Assumption~\ref{ass:opt_in_face} is satisfied. Then, for all $x\in \cC$ such that $p \in \argmin_{v\in \cC} \langle \nabla f(x), v \rangle \subseteq \cC^* $, it holds that \begin{align}\tag{Scaling-BOR}\label{eq:scaling_bor} \langle \nabla f(x), x - p \rangle & \geq \beta \|\Pi \nabla f(x)\|_2, \end{align} where $\Pi x$ denotes the orthogonal projection of $x\in\R^d$ onto the span of $\{x^* - p \mid p \in \cC^* \}$. \end{lemma} \begin{proof} Suppose that $x \in \cC$ such that $p \in \argmin_{v\in \cC} \langle \nabla f(x), v \rangle \subseteq \cC^*$. Then, \begin{align*} \langle \nabla f(x), x - p \rangle & = \max_{v\in \cC^* } \langle \nabla f(x), x - v \rangle \\ &\geq \langle\nabla f(x), x - x^* \rangle + \langle \nabla f(x), \beta \frac{\Pi \nabla f(x) }{\|\Pi \nabla f(x) \|_2} \rangle \\ & = \langle\nabla f(x), x - x^* \rangle + \langle \Pi \nabla f(x) + (\Iota - \Pi) \nabla f(x), \beta \frac{\Pi \nabla f(x) }{\|\Pi \nabla f(x) \|_2}\rangle \\ & = \langle\nabla f(x), x - x^* \rangle + \beta \|\Pi \nabla f(x)\|_2\\ & \geq \beta \|\Pi \nabla f(x)\|_2, \end{align*} where the first equality follows from the construction of $p \in \argmin_{v\in \cC} \langle \nabla f(x), v \rangle$, the first inequality follows from the fact that the maximum is at least as large as the maximum attained on $B_\beta (x^*) \cap \cC^*$, the second equality follows from the definition of the orthogonal projection, the third equality follows from the fact that $\Pi x$ and $(\Iota - \Pi) x$ are orthogonal for any $x\in \R^d$, and the second inequality follows from the convexity of $f$. \end{proof} To derive the final scaling inequality, we next bound the distance between $x_t$ and the optimal face $\cC^*$. \begin{lemma}[Distance to optimal face]\label{lemma:distance_to_optimal_face} Let $\cC \subseteq \R^d$ be a polytope of diameter $\delta > 0$, let $f\colon \cC \to \R$ be a convex and $L$-smooth function satisfying a $(\mu, \theta)$-\eqref{eq:heb} for some $\mu > 0 $ and $\theta \in ]0, 1/2]$ with unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$, and suppose that there exist $\beta, \kappa > 0$ such that Assumptions~\ref{ass:strict_comp} and~\ref{ass:opt_in_face} are satisfied. Let $\fwt = \max\{ \lceil 8L \delta^2\left({\mu}/{\beta}\right)^{1/\theta} \rceil, \lceil 8 L\delta^2 \left({2\mu L \delta}/{\kappa}\right)^{1/\theta} \rceil \}$, $T\in\N$, and $\eta_t = \frac{4}{t+4}$ for all $t\in\Z$. Then, for the iterates of Algorithm~\ref{algo:fw} with step-size $\eta_t$, it holds that \begin{align}\label{eq:statement_1} \|(I-\Pi) (x_t - x^*)\|_2 & \leq \frac{\eta_t^4}{\eta_{\fwt}^4} \beta \end{align} for all $t\in\{\fwt, \ldots, T-1\}$, where $\Pi x$ denotes the orthogonal projection of $x\in\R^d$ onto the span of $\{x^* - p \mid p \in \cC^* \}$. \end{lemma} \begin{proof} Let $t\in\{\fwt, \ldots, T-1\}$. By Lemma~\ref{lemma:active_face_identification}, $p_t \in \vertices (\cC^*)$. Thus, $(\Iota -\Pi) (p_t -x^*) = \zeroterm$, \begin{align*} (\Iota - \Pi) (x_{t+1} - x^*) & = (1- \eta_t) (\Iota - \Pi) (x_t - x^*) + \eta_t (\Iota - \Pi) (p_t -x^*) \\ & = (1- \eta_t) (\Iota - \Pi) (x_t - x^*)\\ & = \prod_{i = \fwt}^t (1-\eta_i) (\Iota - \Pi) (x_\fwt - x^*) \\ & = \frac{\fwt (\fwt+1)(\fwt+2) (\fwt+3)}{(t+1)(t+2)(t+3)(t+4)} (\Iota - \Pi) (x_\fwt - x^*), \end{align*} and $\|(I-\Pi) (x_{t+1} - x^*)\|_2 \leq \frac{\eta_{t+1}^4}{\eta_\fwt^4} \|(I-\Pi) (x_\fwt - x^*)\|_2 \leq \frac{\eta_{t+1}^4}{\eta_\fwt^4} \beta$, where the last inequality follows from Lemma~\ref{lemma:dist_to_opt}. \end{proof} We derive the second scaling inequality below. \begin{lemma}\label{lemma:scaling_boundary} Let $\cC \subseteq \R^d$ be a polytope of diameter $\delta > 0$, let $f\colon \cC \to \R$ be an $\alpha_f$-strongly convex and $L$-smooth function with unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$, and suppose that there exist $\beta, \kappa > 0$ such that Assumptions~\ref{ass:strict_comp} and~\ref{ass:opt_in_face} are satisfied. Let $M = \max_{x\in \cC}\|\nabla f(x)\|_2$, $\fwt = \max\{ \lceil {16L \delta^2}/{\alpha_f\beta^2}\rceil, \lceil {64 L^3\delta^4}/{\alpha_f\kappa^2} \rceil \}$, $T\in\N$, and $\eta_t = \frac{4}{t+4}$ for all $t\in\Z$. Then, for the iterates of Algorithm~\ref{algo:fw} with step-size $\eta_t$ and $t\in\{\fwt, \ldots, T-1\}$, it holds that $h_t \leq \frac{\eta_t^4}{\eta_{\fwt}^4} \beta M$ or \begin{align}\label{eq:scaling_cvx}\tag{Scaling-CVX} \|\Pi \nabla f(x_t)\|_2 \geq \sqrt{\frac{\alpha_f}{2}} \sqrt{h_t} - \frac{\eta_t^2}{\eta_{\fwt}^2}\sqrt{\frac{\alpha_f\beta M}{2}}-\frac{\eta_t^4}{\eta_\fwt^4}L \beta, \end{align} where $\Pi x$ denotes the orthogonal projection of $x\in\R^d$ onto the span of $\{x^* - p \mid p \in \cC^* \}$. \end{lemma} \begin{proof} Given a vector $x\in\R^d$, let $\Pi_{\aff(\cC^*)}x$ denote the projection of $x$ onto $\aff(\cC^*)$, that is, $\Pi_{\aff(\cC^*)}x\in \argmin_{y\in\aff(\cC^*)}\|y-x\|_2$. We first demonstrate how to express $\Pi_{\aff(\cC^*)}$ using $\Pi$. Since $\aff(\cC^*) = x^* + \mathspan(\{x^*-p \mid p\in\cC^*\})$, there has to exist some $y\in\R^d$ such that $\Pi_{\aff(\cC^*)}x = (I-\Pi)x^* + \Pi x + \Pi y$. By orthogonality of $\Pi$, we have $\|\Pi_{\aff(\cC^*)}x - x\|_2 = \|(I-\Pi)x^*- (I-\Pi) x+\Pi y \|_2 = \|(I-\Pi)x^*- (I-\Pi) x \|_2 +\|\Pi y\|_2$. The right-hand side is minimized when $\Pi y = \zeroterm$. Thus, $\Pi_{\aff(\cC^*)}x = (I-\Pi)x^* + \Pi x \in \argmin_{y\in\aff(\cC^*)}\|y-x\|_2$. Let $t\in\{\fwt, \ldots, T-1\}$. By Lemma~\ref{lemma:dist_to_opt}, $\|x_t -x^*\|_2 \leq \beta$ and, thus, by Assumption~\ref{ass:opt_in_face}, $\Pi_{\aff(\cC^*)}x_t\in \cC^*$. By $L$-smoothness of $f$, it holds that $\|\nabla f(x_t) - \nabla f(\Pi_{\aff(\cC^*)}x_t)\|_2 \leq L \|x_t - \Pi_{\aff(\cC^*)}x_t\|_2 = L\|(I-\Pi) (x_t-x^*)\|_2$. By Lemma~\ref{lemma:distance_to_optimal_face}, it then holds that \begin{align}\label{eq:pre_proj_grad} \|\nabla f(x_t) - \nabla f(\Pi_{\aff(\cC^*)}x_t)\|_2 \leq \frac{\eta_t^4}{\eta_{\fwt}^4}L\beta. \end{align} Since for any $x\in \R^d$, we have that $\|\Pi x\|_2 \leq \|\Pi x\|_2 + \|(I-\Pi) x\|_2 = \|x\|_2$, Inequality \eqref{eq:pre_proj_grad} implies that $\|\Pi \nabla f(x_t) - \Pi \nabla f(\Pi_{\aff(\cC^*)}x_t)\|_2 \leq \frac{\eta_t^4}{\eta_{\fwt}^4}L\beta$. Combined with the triangle inequality, $\|\Pi \nabla f(\Pi_{\aff(\cC^*)}x_t)\|_2 \leq \|\Pi \nabla f(x_t)\|_2 + \|\Pi \nabla f(x_t) - \Pi \nabla f(\Pi_{\aff(\cC^*)}x_t)\|_2 \leq \|\Pi \nabla f(x_t)\|_2 + \frac{\eta_t^4}{\eta_{\fwt}^4}L\beta$, which we rearrange to \begin{align}\label{eq:proj_grad_bound} \|\Pi \nabla f(\Pi_{\aff(\cC^*)}x_t)\|_2 - \frac{\eta_t^4}{\eta_{\fwt}^4}L\beta\leq \|\Pi \nabla f(x_t)\|_2. \end{align} For the remainder of the proof, we bound $\|\Pi \nabla f(\Pi_{\aff(\cC^*)}x_t)\|_2$ from below. To do so, define the function $g\colon \cC\cap B_\beta(x^*) \to \R$ via $g(x) := f(\Pi_{\aff(\cC^*)}x) = f((I-\Pi)x^* + \Pi x)$. The gradient of $g$ at $x\in \cC\cap B_\beta(x^*)$ is $\nabla g(x) = \Pi \nabla f(\Pi_{\aff(\cC^*)}x)=\Pi \nabla f((I-\Pi)x^*+\Pi x)$. Since $f$ is $\alpha_f$-strongly convex in $\cC$ and $g(x) = f(x)$ for all $x\in\aff(\cC^*) \cap B_\beta(x^*)$, $g$ is $\alpha_f$-strongly convex in $\aff(\cC^*) \cap B_\beta(x^*)$. Since the projection onto $\aff(\cC^*)$ is idempotent, $\Pi_{\aff(\cC^*)}x_t \in \aff(\cC^*) \cap B_\beta(x^*)$, and $g$ is $\alpha_f$-strongly convex in $\aff(\cC^*) \cap B_\beta(x^*)$, it holds that $ \|\Pi \nabla f(\Pi_{\aff(\cC^*)}x_t)\|_2 = \|\Pi \nabla f(\Pi_{\aff(\cC^*)}^2x_t)\|_2 = \|\nabla g(\Pi_{\aff(\cC^*)}x_t)\|_2 \geq \sqrt{\frac{\alpha_f}{2}} \sqrt{g(\Pi_{\aff(\cC^*)}x_t) - g(x^*)} = \sqrt{\frac{\alpha_f}{2}} \sqrt{f(\Pi_{\aff(\cC^*)}x_t) - f(x^*)} $. Suppose that $h_t \geq \frac{\eta_t^4}{\eta_{\fwt}^4} \beta M$. Then, by Lemma~\ref{lemma:distance_to_optimal_face} and Cauchy-Schwarz, we obtain $h_t - \langle \nabla f(x_t), (I-\Pi)(x_t-x^*)\rangle \geq h_t -\frac{\eta_t^4}{\eta_{\fwt}^4} \beta M \geq 0$. Combined with convexity of $f$, it holds that \begin{align*} \|\Pi \nabla f(\Pi_{\aff(\cC^*)}x_t)\|_2 & \geq \sqrt{\frac{\alpha_f}{2}} \sqrt{f(x_t) + \langle \nabla f(x_t), \Pi_{\aff(\cC^*)}x_t - x_t\rangle - f(x^*)}\\ &= \sqrt{\frac{\alpha_f}{2}} \sqrt{h_t - \langle \nabla f(x_t), (I-\Pi)(x_t -x^*) \rangle }\\ &\geq \sqrt{\frac{\alpha_f}{2}} \sqrt{h_t -\frac{\eta_t^4}{\eta_{\fwt}^4} \beta M}. \end{align*} Since for $a, b \in \R$ with $a \geq b \geq 0$, we have $\sqrt{a -b} \geq \sqrt{a} - \sqrt{b}$, we obtain $\|\Pi \nabla f(\Pi_{\aff(\cC^*)}x_t)\|_2 \geq \sqrt{\frac{\alpha_f}{2}} (\sqrt{h_t} -\sqrt{\frac{\eta_t^4}{\eta_{\fwt}^4} \beta M }) = \sqrt{\frac{\alpha_f}{2}} (\sqrt{h_t} - \frac{\eta_t^2}{\eta_{\fwt}^2}\sqrt{\beta M})$. Combined with \eqref{eq:proj_grad_bound}, we obtain \eqref{eq:scaling_cvx}. \end{proof} Finally, we prove that when the feasible region $\cC$ is a polytope, the objective function $f$ is strongly convex, and the unique minimizer $x^* \in \argmin_{x\in \cC} f(x)$ lies in the relative interior of an at least one-dimensional face $\cC^*$ of $\cC$, FW with the open-loop step-size $\eta_t = \frac{4}{t+4}$ converges at a rate of order $\cO(1/t)$ for iterations $ t\leq \fwt$ and at a non-asymptotic rate of order $\cO(1/t^2)$ for iterations $t\geq \fwt$, where $\fwt$ is defined as in Lemma~\ref{lemma:scaling_boundary}. \begin{theorem}[Optimal solution in the relative interior of a face of $\cC$]\label{thm:polytope} Let $\cC \subseteq \R^d$ be a polytope of diameter $\delta > 0$, let $f\colon \cC \to \R$ be an $\alpha_f$-strongly convex and $L$-smooth function with unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$, and suppose that there exist $\beta, \kappa > 0$ such that Assumptions~\ref{ass:strict_comp} and~\ref{ass:opt_in_face} are satisfied. Let $M = \max_{x\in \cC} \|\nabla f(x)\|_2$, $\fwt = \max\left\{ \left\lceil (16L \delta^2) / (\alpha_f\beta^2) \right\rceil, \left\lceil (64 L^3\delta^4)/(\alpha_f\kappa^2) \right\rceil \right\}$, $T\in\N$, and $\eta_t = \frac{4}{t+4}$ for all $t\in\Z$. Then, for the iterates of Algorithm~\ref{algo:fw} with step-size $\eta_t$, it holds that \begin{align}\label{eq:sol_poly} h_t \leq \eta_{t-2}^2 \max \left\{ \frac{h_\fwt}{\eta_{\fwt-1}^2}, \frac{ B^2}{A^2} + B, \frac{D}{\eta_\fwt^2} + E\right\} \end{align} for all $t\in\{\fwt, \ldots, T\}$, where \begin{align}\label{eq:big_letters} A & =\frac{\sqrt{\alpha_f}\beta}{2\sqrt{2}}, \qquad B = \frac{L \delta^2}{2} + \frac{\beta\sqrt{\alpha_f \beta M}}{\eta_\fwt 2\sqrt{2}} + \frac{L\beta^2}{\eta_\fwt 2}, \qquad D = \beta M , \qquad E =\frac{L \delta^2}{2}. \end{align} \end{theorem} \begin{proof} Let $t\in\{\fwt, \ldots, T-1\}$ and suppose that $h_t \geq \frac{\eta_t^4}{\eta_{\fwt}^4} \beta M$. Combine \eqref{eq:always_combine_with_this} and \eqref{eq:start_progress_bound} to obtain $h_{t+1} \leq (1 - \frac{\eta_t}{2}) h_t - \frac{\eta_t}{2} \langle\nabla f(x_t), x_t - p_t\rangle + \eta_t^2\frac{ L \|x_t-p_t\|^2_2}{2}$. Plugging \eqref{eq:scaling_bor} and \eqref{eq:scaling_cvx} into this inequality results in $h_{t+1} \leq (1 - \frac{\eta_t}{2})h_t - \frac{\eta_t \beta}{2} (\sqrt{\frac{\alpha_f}{2}} \sqrt{h_t} - \frac{\eta_t^2}{\eta_{\fwt}^2}\sqrt{\frac{\alpha_f\beta M}{2}}-\frac{\eta_t^4}{\eta_\fwt^4}L \beta) + \frac{\eta_t^2 L \delta^2}{2}$. Since $\eta_t / \eta_\fwt \leq 1$ for all $t\in\{\fwt, \ldots, T-1\}$, it holds that \begin{align}\label{eq:ready_for_lemma_sequences_border} h_{t+1} & \leq \left(1 - \frac{\eta_t}{2}\right)h_t - \eta_t \frac{\sqrt{\alpha_f}\beta}{2\sqrt{2}}\sqrt{h_t} + \eta_t^2 \left(\frac{L \delta^2}{2} + \frac{\beta\sqrt{\alpha_f \beta M}}{\eta_\fwt 2\sqrt{2}} + \frac{L\beta^2}{\eta_\fwt 2}\right). \end{align} Let $A, B, C$ as in \eqref{eq:big_letters}, $C_t=1$ for all $t\in\{\fwt, \ldots, T-1\}$, and $\psi = 1/2$. Ideally, we could now apply Lemma~\ref{lemma:sequences}. However, Inequality~\eqref{eq:ready_for_lemma_sequences_border} is only guaranteed to hold in case that $h_t \geq \frac{\eta_t^4}{\eta_{\fwt}^4} \beta M$. Thus, we have to extend the proof of Lemma~\ref{lemma:sequences} for the case that $h_t \leq \frac{\eta_t^4}{\eta_{\fwt}^4} \beta M$. In case $h_t \leq \frac{\eta_t^4}{\eta_{\fwt}^4} \beta M$, \eqref{eq:always_combine_with_this} implies that $h_{t+1}\leq (1-\eta_t)h_t + \eta_t^2 \frac{L\|x_t - p_t\|_2^2}{2} \leq h_t + \eta_t^2 \frac{L\delta^2}{2}\leq \eta_{t-1}\eta_t( \frac{\beta M}{\eta_{\fwt}^2} + \frac{L\delta^2}{2}) = \eta_{t-1}\eta_t( \frac{D}{\eta_{\fwt}^2} + E)$, where $D = \beta M $ and $E=\frac{L \delta^2}{2}$. Thus, in the proof of Lemma~\ref{lemma:sequences}, the induction assumption \eqref{eq:cd} has to be replaced by $h_t \leq \max \left\{ \frac{\eta_{t-2}\eta_{t-1}}{\eta_{\fwt-2}\eta_{\fwt-1}}h_\fwt, \frac{\eta_{t-2}\eta_{t-1} B^2}{A^2} + \eta_{t-2 }\eta_{t-1} BC, \eta_{t-2}\eta_{t-1}( \frac{D}{\eta_{\fwt}^2} + E)\right\}$. Then, using the same analysis as in Lemma~\ref{lemma:sequences}, extended by the case that $h_t \leq \frac{\eta_t^4}{\eta_{\fwt}^4} \beta M$, proves that \eqref{eq:sol_poly} holds for all $t\in\{\fwt, \ldots, T\}$. \end{proof} In the following remark to Theorem~\ref{thm:polytope}, we discuss how to relax strict complementarity. \begin{remark}[Relaxation of strict complementarity]\label{rem:relaxation_of_strict_complementarity} In the proof of Theorem~\ref{thm:polytope}, strict complementarity is only needed to guarantee that after a specific iteration $\fwt \in \{1,\ldots, T-1\}$, for all $t\in\{\fwt,\ldots, T-1\}$, it holds that $p_t\in \vertices(\cC^*)$, that is, only vertices that lie in the optimal face get returned by FW's LMO. However, strict complementarity is only a sufficient but not necessary criterion to guarantee that only vertices in the optimal face are obtained from the LMO for iterations $t\in\{\fwt,\ldots, T-1\}$: Consider, for example, the minimization of $f(x) = \frac{1}{2} \|x - b \|_2^2$ for $b = (0, 1/2, 1/2)^\intercal\in \R^3$ over the probability simplex $\cC=\conv\left(\{e^{(1)}, e^{(2)}, e^{(3)}\}\right)$. Note that $\cC^* = \conv\left(\{ e^{(2)}, e^{(3)}\}\right)$. It holds that $x^* = b $ and $\nabla f(x^*) = (0, 0 , 0)^\intercal \in \R^3$. Thus, strict complementarity is violated. However, for any $x_t = (u, v , w)^\intercal\in\R^3$ with $u + v + w = 1$ and $u,v,w \geq 0$, it holds, by case distinction, that either $\langle \nabla f(x_t), e^{(1)} -x_t\rangle > \min\{\langle \nabla f(x_t), e^{(2)}-x_t \rangle, \langle \nabla f(x_t), e^{(3)}-x_t \rangle\}$, or $x^* = x_t$. Thus, $p_t \in \cC^*$ for all $t\geq 0$ without strict complementarity being satisfied. \end{remark} \begin{figure}[t] \captionsetup[subfigure]{justification=centering} \centering \begin{tabular}{c c} \begin{subfigure}{.3\textwidth} \centering \includegraphics[width=1\textwidth]{probability_simplex_rho_0.25.png} \caption{$\rho=\frac{1}{4}$.} \label{fig:ls_sublinear} \end{subfigure}& \begin{subfigure}{.3\textwidth} \centering \includegraphics[width=1\textwidth]{probability_simplex_rho_2.png} \caption{$\rho=2$.} \label{fig:ls_linear} \end{subfigure} \end{tabular} \caption{ Comparison of FW with different step-sizes when the feasible region $\cC\subseteq\R^{100}$ is the probability simplex, the objective $f(x) = \frac{1}{2}\|x-\rho \bar{\oneterm}\|_2^2$, where $\rho \in\{ \frac{1}{4}, 2\}$, is strongly convex, and the optimal solution $x^*\in\argmin_{x\in\cC}f(x)$ lies in the relative interior of an at least one-dimensional face of $\cC$. The $y$-axis represents the minimum primal gap. For both settings, FW with open-loop step-sizes $\eta_t = \frac{\ell}{t+\ell}$ converges at a rate of order $\cO(1/t^2)$ when $\ell\in\N_{\geq 2}$ and at a rate of order $\cO(1/t)$ when $\ell=1$. FW with line-search converges at a rate of order $\cO(1/t)$ when $\rho = \frac{1}{4}$ and linearly when $\rho = 2$. In the latter setting, FW with line-search solves the problem exactly after $|\supp(x^*)|$ iterations. }\label{fig:experiments_polytope} \end{figure} The results in Figure~\ref{fig:experiments_polytope}, see Section~\ref{sec:experiment_polytope} for details, show that when the feasible region $\cC$ is a polytope, $f=\frac{1}{2}\|x-\rho \bar{\oneterm}\|_2^2$, where $\rho\in\{\frac{1}{4},2\}$, is strongly convex, the constrained optimal solution $x^*\in\argmin_{x\in\cC} f(x)$ lies in the relative interior of an at least one-dimensional face of $\cC$, FW with open-loop step-sizes $\eta_t=\frac{\ell}{t+\ell}$, where $\ell\in\N_{\geq 2}$, converges at a rate of order $\cO(1/t^2)$ and FW with open-loop step-size $\eta_t=\frac{1}{t+1}$ converges at a rate of order $\cO(1/t)$. For the same setting, FW with line-search either converges at a rate of order $\cO(1/t)$ when $\rho=\frac{1}{4}$ or linearly when $\rho=2$. We have thus demonstrated both theoretically and in practice that there exist settings for which FW with open-loop step-sizes converges non-asymptotically faster than FW with line-search or short-step. \section{Algorithmic variants}\label{sec:fw_variants} In Section~\ref{sec:ol_faster_than_ls_ss}, we established that when the feasible region $\cC$ is a polytope, the objective $f$ is strongly convex, and the unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$ lies in the relative interior of an at least one-dimensional face $\cC^*$ of $\cC$, FW with open-loop step-size $\eta_t = \frac{4}{t+4}$ converges at a rate of order $\cO(1/t^2)$. Combined with the convergence-rate lower bound of $\Omega(1/t^{1+\epsilon})$ for any $\epsilon > 0$ for FW with line-search or short-step by \citet{wolfe1970convergence}, this characterizes a problem setting for which FW with open-loop step-sizes converges non-asymptotically faster than FW with line-search or short-step. However, our accelerated convergence rate only holds when strict complementarity or similar assumptions, see Remark~\ref{rem:relaxation_of_strict_complementarity}, hold. Similarly, the accelerated convergence rate of MFW \citep{li2021momentum} in the described setting also relies on the assumption of strict complementarity. Here, we address this gap in the literature and present two FW variants employing open-loop step-sizes that admit convergence rates of order $\cO(1/t^2)$ in the setting of the lower bound due to \citet{wolfe1970convergence} without relying on the assumption of strict complementarity. \subsection{Decomposition-invariant pairwise Frank-Wolfe algorithm}\label{sec:difw} Using the proof blueprint from Section~\ref{sec:blueprint}, we derive accelerated convergence rates for the decomposition-invariant pairwise Frank-Wolfe algorithm (DIFW) \citep{garber2016linear} in the setting of the lower bound due to \citet{wolfe1970convergence}. DIFW with line-search or step-size as in Option 1 in \citet[Algorithm~3]{garber2016linear} converges linearly when the feasible region is a specific type of polytope and the objective function is strongly convex. Benefits of DIFW are that the convergence rate does not depend on the dimension of the problem but the sparsity of the optimal solution $x^* \in \argmin_{x\in \cC} f(x)$, that is, $|\supp(x^*)| = |\{x^*_i \neq 0 \mid i \in \{1, \ldots, d\}\}| \ll d$, and it is not necessary to maintain a convex combination of the iterate $x_t$ throughout the algorithm's execution. The latter property leads to reduced memory overhead compared to other variants of FW that admit linear convergence rates in the setting of \citet{wolfe1970convergence}. The main drawback of DIFW is that the method is not applicable to general polytopes, but only feasible regions that are similar to the simplex, that is, of the form described below. \begin{definition}[Simplex-like polytope (SLP)]\label{def:difw} Let $\cC\subseteq \R^d$ be a polytope such that $\cC$ can be described as $\cC = \{x\in \R^d \mid x\geq 0, Ax=b\}$ for $A\in\R^{m \times d}$ and $b\in \R^m$ for some $m\in \N$ and all vertices of $\cC$ lie on the Boolean hypercube $\{0, 1\}^d$. Then, we refer to $\cC$ as a \emph{simplex-like polytope} (SLP). \end{definition} Examples of SLPs are the probability simplex and the flow, perfect matchings, and marginal polytopes, see \citet{garber2016linear} and references therein for more details. In this section, we show that DIFW with open-loop step-size $\eta_t = \frac{8}{t+8}$ admits a convergence rate of order up to $\cO(1/t^2)$ when optimizing a function satisfying \eqref{eq:heb} over a SLP. \begin{algorithm}[th!] \SetKwInput{Input}{Input} \SetKwInput{Output}{Output} \SetKwComment{Comment}{$\triangleright$\ }{} \caption{Decomposition-invariant pairwise Frank-Wolfe algorithm (DIFW) \citep{garber2016linear}}\label{algo:difw} \Input{$x_0\in\cC$, step-sizes $\eta_t\in [0, 1]$ for $t\in\{0,\ldots, T-1\}$.} \hrulealg {$x_1 \in \argmin_{p\in \cC} \langle \nabla f(x_0), p - x_0 \rangle$}\\ \For{$t= 0, \ldots, T-1 $}{ {$p_t^+ \in \argmin_{p\in \cC} \langle \nabla f(x_t), p - x_t \rangle$\label{line:fw_vertex_difw}}\\ {Define the vector $\tilde{\nabla}f(x_t) \in \R^d$ entry-wise for all $i\in\{1, \ldots, d\}$: \begin{equation*} (\tilde{\nabla} f(x_t))_i = \begin{cases} (\nabla f(x_t))_i, & \text{if} \ (x_t)_i > 0 \\ -\infty , & \text{if} \ (x_t)_i = 0. \end{cases}\label{line:defining_gradient_difw} \end{equation*}}\\ {$p_t^- \in \argmin_{p\in \cC} \langle -\tilde{\nabla}f(x_t), p - x_t\rangle$}\\ {Let $\delta_t$ be the smallest natural number such that $2^{-\delta_t} \leq \eta_t$, and define the new step-size $\gamma_t \gets 2^{-\delta_t}$.}\\ {$x_{t+1} \gets x_t + \gamma_t (p_t^+ - p_t^-)$} } \end{algorithm} \subsubsection{Algorithm overview} We refer to $p_t^+$ and $p_t^-$ as the FW vertex and away vertex, respectively. At iteration $t\in\{0,\ldots, T\}$, consider the representation of $x_t$ as a convex combination of vertices of $\cC$, that is, $x_t = \sum_{i=0}^{t-1} \lambda_{p_i,t} p_i$, where $p_i \in \vertices (\cC)$ and $\lambda_{p_i, t} \geq 0$ for all $i\in\{0,\ldots, t-1\}$ and $\sum_{i=0}^{t-1}\lambda_{p_i, t} = 1$. DIFW takes a step in the direction $\frac{p_t^+-p_t^-}{\|{p_t^+-p_t^-}\|_2}$, which moves weight from the away vertex $p_t^-$ to the FW vertex $p_t^+$. Note that DIFW does not need to actively maintain a convex combination of $x_t$ because of the assumption that the feasible region is a SLP. \subsubsection{Convergence rate of order $\cO(1/t)$} We first derive a baseline convergence rate of order $\cO(1/t)$ for DIFW with open-loop step-size $\eta_t = \frac{8}{t+8}$. \begin{proposition}[Convergence rate of order $\cO(1/t)$]\label{prop:baseline_difw} Let $\cC\subseteq \R^d$ be a SLP of diameter $\delta > 0$ and let $f\colon \cC \to \R$ be a convex and $L$-smooth function with unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$. Let $T\in\N$ and $\eta_t = \frac{8}{t+8}$ for all $t\in\Z$. Then, for the iterates of Algorithm~\ref{algo:difw} with open-loop step-size $\eta_t$, it holds that $h_t \leq \frac{32L \delta^2}{t+7} = \eta_{t-1} 4 L \delta^2 $ for all $t\in\{1,\ldots, T\}$. \end{proposition} \begin{proof} Let $t\in\{0, \ldots, T-1\}$. Feasibility of $x_t$ follows from Lemma~1 in \citet{garber2016linear}. Further, in the proof of Lemma $3$ in \citet{garber2016linear}, it is shown that \begin{align}\label{eq:difw_basic_bound} h_{t+1} & \leq h_t + \frac{\eta_t \langle \nabla f(x_t), p_t^+ - p_t^-\rangle}{2} + \frac{\eta_t^2 L \delta^2}{2}. \end{align} Consider an irreducible representation of $x_t$ as a convex sum of vertices of $\cC$, that is, $x_t = \sum_{i=0}^{k}\lambda_{p_i, t} p_i$ such that $p_i\in\vertices(\cC)$ and $\lambda_{p_i, t} > 0$ for all $i \in \{ 0, \ldots , k\}$, where $k\in \N$. By Observation 1 in \citet{garber2016linear}, it holds that $\langle \nabla f(x_t), p_i\rangle \leq \langle \nabla f(x_t), p_t^-\rangle$ for all $i\in\{0, \ldots, k\}$. Thus, $\langle \nabla f(x_t), x_t - p_t^- \rangle \leq \langle \nabla f(x_t), x_t - \sum_{i=0}^{k}\lambda_{p_i, t} p_i \rangle \leq \langle \nabla f(x_t), x_t - x_t \rangle = 0$. Plugging this inequality into \eqref{eq:difw_basic_bound}, using $\langle \nabla f(x_t), p_t^+ -x_t \rangle \leq -h_t$, and using $h_1 \leq \frac{L\delta^2}{2} $, which is derived in the proof of Theorem~1 in \citet{garber2016linear}, we obtain \begin{align} h_{t+1} & \leq h_t + \frac{\eta_t \langle \nabla f(x_t), p_t^+ -x_t \rangle}{2} + \frac{\eta_t \langle \nabla f(x_t), x_t - p_t^-\rangle}{2} + \eta_t^2\frac{ L \delta^2}{2} \nonumber\\ & \leq (1 - \frac{\eta_t}{2}) h_t + \eta_t^2\frac{ L \delta^2}{2} \label{eq:need_for_difw}\\ & \leq \prod_{i = 1}^t (1 - \frac{\eta_i}{2})h_1 + \frac{L\delta^2}{2} \sum_{i = 1}^t \eta_i^2 \prod_{j = i+1}^t (1 - \frac{\eta_j}{2}) \nonumber\\ & = \frac{5\cdot 6 \cdot 7 \cdot 8}{(t+5)(t+6)(t+7)(t+8)} h_1 + \frac{L \delta^2}{2} \sum_{i = 1}^t\frac{8^2}{(i+8)^2} \frac{(i+5)(i+6)(i+7)(i+8)}{(t+5) (t+6)(t+7)(t+8)}\nonumber\\ & \leq \frac{64L\delta^2}{2}(\frac{1}{(t+7) (t+8)} + \frac{t}{(t+7) (t+8)}) \nonumber\\ & \leq \frac{32L\delta^2}{t+8}.\nonumber \end{align} \end{proof} \subsubsection{{Convergence rate of order up to $\cO(1/t^2)$}} Then, acceleration follows almost immediately from the analysis performed in \citet{garber2016linear}. \begin{theorem}[Convergence rate of order up to $\cO(1/t^2)$]\label{thm:difw_slp} Let $\cC\subseteq\R^d$ be a SLP of diameter $\delta > 0$ and let $f\colon \cC \to \R$ be a convex and $L$-smooth function satisfying a $(\mu, \theta)$-\eqref{eq:heb} for some $\mu > 0$ and $\theta \in [0, 1/2]$. Let $T\in\N$ and $\eta_t = \frac{8}{t+8}$ for all $t\in\Z$. Then, for the iterates of Algorithm~\ref{algo:difw} with open-loop step-size $\eta_t$, it holds that \begin{align}\label{eq:difw_pg} h_t & \leq \max \left\{ \eta_{t-2}^{1/(1-\theta)}\frac{L\delta^2}{2}, \left(\eta_{t-2} 2\mu L \delta^2\sqrt{|\supp(x^*)|} \right)^{1/(1-\theta)} + \eta_{t-2}^2\frac{ L \delta^2}{2} \right\}. \end{align} for all $t\in\{1,\ldots, T\}$. \end{theorem} \begin{proof} Let $t\in\{1,\ldots, T-1\}$. We can extend Lemma~$3$ in \citet{garber2016linear} from $\alpha_f$-strongly convex functions to convex functions satisfying \eqref{eq:heb}. Strong convexity is only used to show that $\Delta_t := \sqrt{\frac{2 |\supp(x^*)| h_t}{\alpha_f}}$ satisfies $\Delta_t \geq \sqrt{|\supp(x^*)|} \|x_t - x^*\|_2$. Here, we instead define $\Delta_t:= \sqrt{|\supp(x^*)|}\mu h_t^\theta$ for a function $f$ satisfying a $(\mu, \theta)$-\eqref{eq:heb}. Then, $\Delta_t \geq \sqrt{|\supp(x^*)|} \|x_t - x^*\|_2$. By Lemma~$3$ in \citet{garber2016linear}, we have $h_{t+1} \leq h_t -\frac{\eta_th_t^{1-\theta}}{2\mu \sqrt{|\supp(x^*)|}} + \eta_t^2\frac{ L \delta^2}{2}$. Combined with \eqref{eq:need_for_difw}, \begin{align}\label{eq:reason_for_new_ss} h_{t+1} & \leq \left(1 - \frac{\eta_t}{4}\right)h_t -\frac{\eta_th_t^{1-\theta}}{4\mu \sqrt{|\supp(x^*)|}} + \eta_t^2\frac{ L \delta^2}{2}. \end{align} Using the same proof technique as in Lemma~\ref{lemma:sequences}, we prove that \begin{align}\label{eq:difw_cd} h_t & \leq \max \left\{ \left(\eta_{t-2}\eta_{t-1}\right)^{1/(2(1-\theta))}\frac{L\delta^2}{2}, \left(\eta_{t-2}\eta_{t-1} \left(2\mu L \delta^2\sqrt{|\supp(x^*)|}\right)^2 \right)^{1/(2(1-\theta))} + \eta_{t-2}\eta_{t-1}\frac{ L \delta^2}{2} \right\} \end{align} for all $t\in\{1,\ldots, T\}$, which then implies \eqref{eq:difw_pg}. For $t = 1$, $h_1 \leq \frac{L\delta^2}{2}$ and \eqref{eq:difw_cd} holds. Suppose that \eqref{eq:difw_cd} is satisfied for a specific iteration $t\in\{1,\ldots, T-1\}$. We distinguish between two cases. First, suppose that $h_t \leq (\eta_t 2\mu L \delta^2\sqrt{|\supp(x^*)|} )^{1/(1-\theta)} $. Plugging this bound on $h_t$ into \eqref{eq:reason_for_new_ss} yields $h_{t+1} \leq (\eta_t 2\mu L \delta^2\sqrt{|\supp(x^*)|} )^{1/(1-\theta)} + \frac{ \eta_t^2L \delta^2}{2} \leq (\eta_{t-1}\eta_t (2\mu L \delta^2\sqrt{|\supp(x^*)|})^2 )^{1/(2(1-\theta))} + \eta_{t-1}\eta_t\frac{ L \delta^2}{2}$. Next, suppose that $h_t \geq (\eta_t 2\mu L \delta^2\sqrt{|\supp(x^*)|} )^{1/(1-\theta)}$. Plugging this bound on $h_t$ into \eqref{eq:reason_for_new_ss} and using the induction assumption yields {\footnotesize \begin{align}\label{eq:need_small_steps} h_{t+1} &\leq (1 - \frac{\eta_t}{4})h_t + 0 \\ &= \frac{t+6}{t+8} h_t\nonumber\\ &\leq \frac{\eta_{t}}{\eta_{t-2}} h_t\nonumber\\ &\leq \frac{\eta_{t}}{\eta_{t-2}} \max \left\{ \left(\eta_{t-2}\eta_{t-1}\right)^{1/(2(1-\theta))}\frac{L\delta^2}{2}, \left(\eta_{t-2}\eta_{t-1} \left(2\mu L \delta^2\sqrt{|\supp(x^*)|}\right)^2 \right)^{1/(2(1-\theta))} + \eta_{t-2}\eta_{t-1}\frac{ L \delta^2}{2} \right\}\nonumber\\ & \leq \max \left\{ \left(\eta_{t-1}\eta_{t}\right)^{1/(2(1-\theta))}\frac{L\delta^2}{2}, \left(\eta_{t-1}\eta_{t} \left(2\mu L \delta^2\sqrt{|\supp(x^*)|}\right)^2 \right)^{1/(2(1-\theta))} + \eta_{t-1}\eta_{t}\frac{ L \delta^2}{2} \right\}, \nonumber \end{align} }where the last inequality holds due to $\frac{\eta_t}{\eta_{t-2}}(\eta_{t-2}\eta_{t-1})^{1/(2(1-\theta))} \leq (\eta_{t-1}\eta_{t})^{1/(2(1-\theta))}$ for $\frac{\eta_t}{\eta_{t-2}}\in [0,1]$ and $1/(2(1-\theta)) \in [1/2,1]$. In either case, \eqref{eq:difw_cd} is satisfied for $t+1$. By induction, the theorem follows. \end{proof} Below, we discuss the technical necessity for $\eta_t = \frac{8}{t+8}$ instead of $\eta_t = \frac{4}{t+4}$ in Theorem~\ref{thm:difw_slp}. \begin{remark}[Necessity of $\eta_t = \frac{8}{t+8}$]\label{rem:necessity_for_switch_difw} Note that Inequality~\eqref{eq:reason_for_new_ss} is responsible for making our usual proof with $\eta_t = \frac{4}{t+4}$, $t\in\Z$, impossible. Indeed, for $\eta_t = \frac{4}{t+4}$, $(1- \frac{\eta_t}{4}) = \frac{t+3}{t+4}$, which is not enough progress in, for example, \eqref{eq:need_small_steps} assuming that $\theta = \frac{1}{2}$, to obtain a convergence rate of order $\cO(1/t^2)$. \end{remark} \subsection{Away-step Frank-Wolfe algorithm}\label{sec:afw} \begin{algorithm}[h] \SetKwInput{Input}{Input} \SetKwInput{Output}{Output} \SetKwComment{Comment}{$\triangleright$\ }{} \caption{Away-step Frank-Wolfe algorithm (AFW) with open-loop step-sizes}\label{algo:afw} \Input{$x_0\in \vertices (\cC)$, step-sizes $\eta_t\in [0, 1]$ for $t\in\{0, \ldots, T-1\}$.} \hrulealg {$\cS_0 \gets \{x_0\}$}\\ {$\lambda_{p,0} \gets \begin{cases} 1, & \text{if} \ p = x_0\\ 0, & \text{if} \ p \in\vertices(\cC) \setminus \{x_0\} \end{cases}$}\\ {$\ell_0 \gets 0$ \Comment*[f]{$\ell_t:$ number of progress steps performed before iteration $t$}\label{line:l}}\\ \For{$t= 0,\ldots, T-1 $}{ {$p_{t}^{FW} \in \argmin_{p \in \cC} \langle\nabla f(x_{t}), p- x_{t}\rangle$\label{line:p_fw}}\\ {$p_{t}^{A} \in \argmax_{p \in \cS_t} \langle\nabla f(x_{t}), p- x_{t}\rangle$\label{line:p_a}}\\ \uIf{$\langle\nabla f (x_t) ,p_t^{FW} - x_t\rangle \leq \langle \nabla f(x_t), x_t -p_t^A\rangle$\label{eq: which_direction}}{ {$d_t \gets p_t^{FW} - x_t$; $\eta_{t, \max} \gets 1$ \label{eq:AFW_line_p_FW}}} \Else{ {$d_t \gets x_t - p_t^A$; $\eta_{t, \max} \gets \frac{\lambda_{{p_t^A},t}}{1-\lambda_{{p_t^A},t}}$ \label{eq:AFW_line_p_A}} } {$\gamma_t \gets \min\left\{\eta_{\ell_t}, \eta_{t, \max}\right\}$\label{line:gamma_t}}\\ {$x_{t+1} \gets x_t + \gamma_t d_t$}\\ \uIf{$\langle\nabla f (x_t) ,p_t^{FW} - x_t\rangle \leq \langle \nabla f(x_t), x_t -p_t^A\rangle$}{ {$\lambda_{p,t+1} \gets \begin{cases} (1- \gamma_t) \lambda_{p, t} + \gamma_t, & \text{if} \ p=p_t^{FW}\\ (1-\gamma_t) \lambda_{p,t}, & \text{if} \ p\in\vertices(\cC) \setminus\{p_t^{FW}\} \end{cases}$} } \Else{ {$\lambda_{p,t+1} \gets \begin{cases} (1+ \gamma_t) \lambda_{p, t} - \gamma_t, & \text{if} \ p=p_t^{A}\\ (1+\gamma_t) \lambda_{p,t}, & \text{if} \ p\in\vertices(\cC) \setminus\{p_t^{A}\} \end{cases}$} } {$\cS_{t+1} \gets \{p\in\vertices(\cC) \mid \lambda_{p, t+1} >0\}$}\\ \uIf{$(\eta_{\ell_t} - \gamma_t) \langle \nabla f(x_t), p_t^A - p_t^{FW}\rangle \leq (\eta_{\ell_t}^2 - \gamma_t^2)L\delta^2 $\label{line:no_ds1}}{ {$\ell_{t+1} \gets \ell_t + 1$\label{line:no_ds4} \Comment*[f]{progress step}} } \Else{\label{line:no_ds3} {$\ell_{t+1} \gets \ell_t$\Comment*[f]{non-progress step}\label{line:no_ds2}} }\label{line:no_ds5} } \end{algorithm} \begin{algorithm}[h] \caption{Away-step Frank-Wolfe algorithm (AFW) with line-search \citep{guelat1986some}}\label{algo:afw_ls_ss} {Identical to Algorithm~\ref{algo:afw}, except that Lines~\ref{line:l}, \ref{line:no_ds1}, \ref{line:no_ds4}, \ref{line:no_ds3}, \ref{line:no_ds2}, and \ref{line:no_ds5} have to be deleted and Line~\ref{line:gamma_t} has to be replaced by $\gamma_t \in \argmin_{\gamma\in [0, \eta_{t, \max}]}f(x_t + \gamma d_t)$.} \end{algorithm} In this section, we derive a version of the away-step Frank-Wolfe algorithm (AFW) \citep{guelat1986some, lacoste2015global} with step-size $\eta_t = \frac{4}{t+4}$ that admits a convergence rate of order up to $\cO(1/t^2)$ when optimizing a function satisfying \eqref{eq:heb} over a polytope. \subsubsection{Algorithm overview} For better understanding, we first discuss AFW with line-search, which is presented in Algorithm~\ref{algo:afw_ls_ss}. At iteration $t\in\{0,\ldots, T\}$, we can write $x_t = \sum_{i=0}^{t-1} \lambda_{p_i,t} p_i$, where $p_i \in \vertices (\cC)$ and $\lambda_{p_i, t} \geq 0$ for all $i\in\{0,\ldots, t-1\}$ and $\sum_{i=0}^{t-1}\lambda_{p_i, t} = 1$. We refer to $\cS_t := \{p_i \mid \lambda_{p_i, t} > 0\}$ as the active set at iteration $t$. Note that maintaining the active set can incur a significant memory overhead. However, with AFW, instead of being limited to taking a step in the direction of a vertex $p_t^{FW}\in \vertices (\cC)$ as in Line~\ref{line:p_t_det} of vanilla FW, we are also able to take an away step: Compute $p_t^{A} \in \argmax_{p\in \cS_t} \langle \nabla f (x_t), p - x_t \rangle$ and take a step away from vertex $p_t^{A}$, removing weight from vertex $p_t^{A}$ and adding it to all other vertices in the active set. Away steps facilitate the option of takin drop steps. A drop step occurs when a vertex gets removed from the active set. In case $x^*$ lies in the relative interior of an at least one-dimensional face $\cC^*$ of $\cC$, drop steps allow AFW to get rid of bad vertices in the convex combination representing $x_t$, that is, vertices not in $\cC^*$. As soon as the optimal face is reached, that is, $x_t \in \cC^*$, the problem becomes that of having the optimal solution in the relative interior of $\cC^*$, for which FW with line-search admits linear convergence rates. We next explain AFW with step-size $\eta_t = \frac{4}{t+4}$, presented in Algorithm~\ref{algo:afw}, which requires a slight modification of the version presented in \citet{lacoste2015global}. The main idea is to replace line-search with the open-loop step-size $\eta_t = \frac{4}{t+4}$. However, as we motivate in detail below, at iteration $t\in\{0,\ldots, T-1\}$, AFW's step-length is $\eta_{\ell_t}$, where $0 = \ell_0 \leq \ell_1 \leq \ldots \leq \ell_{T-1}\leq T-1$, that is, AFW may perform multiple steps of the same length. Let $t\in\{0,\ldots, T-1\}$. Note that for $d_t$ obtained from either Line~\eqref{eq:AFW_line_p_FW} or Line~\eqref{eq:AFW_line_p_A} in Algorithm~\ref{algo:afw}, it holds that $\langle \nabla f (x_t), d_t \rangle \leq \langle \nabla f(x_t), p_t^{FW} -p_t^A \rangle /2$. By $L$-smoothness, \begin{align}\label{eq:contract_afw_poly_without_scaling} h_{t+1} & \leq h_t - \frac{\gamma_t \langle \nabla f(x_t), p_t^A - p_t^{FW}\rangle}{2} + \frac{\gamma_t^2 L\delta^2}{2}. \end{align} Working towards a convergence rate of order up to $\cO(1/t^2)$, we need to characterize a subsequence of steps for which an inequality of the form \eqref{eq:gotta_derive_this} holds. To do so, let \begin{align*} g_t(\gamma) := - \frac{\gamma \langle \nabla f(x_t), p_t^A - p_t^{FW}\rangle}{2} + \frac{\gamma^2 L\delta^2}{2} \qquad \text{for} \ \gamma \in [0,1]. \end{align*} We refer to all iterations $t\in\{0,\ldots, T-1\}$ such that $g_t(\gamma_t) \leq g_t(\eta_{\ell_t})$ as \emph{progress steps} and denote the number of progress steps performed before iteration $t\in\{0,\ldots, T\}$ by $\ell_t$, see Lines~\ref{line:l}, \ref{line:gamma_t}, and~\ref{line:no_ds1}-\ref{line:no_ds5} of Algorithm~\ref{algo:afw}. Thus, a progress step occurs during iteration $t$ if and only if the inequality in Line~\ref{line:no_ds1} is satisfied, which necessitates the computation of the smoothness constant $L$ of $f$ prior to the execution of the algorithm. A non-drop step is always a progress step as $\gamma_t = \eta_{\ell_t}$ and the following lemma shows that drop steps which are non-progress steps do not increase the primal gap. \begin{lemma}[Drop-step characterization]\label{lemma:contraction} Let $g\colon [0,1] \to \R$ be defined via $g(\eta) := - \eta A + \eta^2 B$, where $A,B > 0$. For $t\in\N$, let $\eta_t = \frac{4}{t+4}$ and $\gamma_t \in [0, \eta_t]$. Then, $g(\gamma_t) \leq g(0)$ or $g(\gamma_t) \leq g(\eta_t)$. \end{lemma} \begin{proof} By case distinction. Let $t\in\N$. Case 1: $g(\eta_t) \leq g(0)$. By convexity, $g(\gamma_t) = g(\lambda \eta_t + (1-\lambda) 0) \leq \lambda g(\eta_t) + (1-\lambda) g(0) \leq g(0) = 0$ where $\lambda \in [0,1]$. Case 2: $g(\eta_t) > g(0)$. Then, $\eta_t > \eta^* \in \argmin_{\eta \in [0, \eta_t]} g(\eta)$, as $g$ is monotonously decreasing in the interval $[0, \eta^*]$. If $\eta^* \leq \gamma_t$, then $g(\gamma_t) \leq g(\eta_t)$ due to $g$ being monotonously increasing in $[\eta^*, \eta_t]$. If $\eta^* \geq \gamma_t$, then $g(\gamma_t) \leq g(0)$, as $g$ is monotonously decreasing in $[0, \eta^*]$. \end{proof} Thus, a drop step is either a progress step and $h_{t+1} \leq h_t + g_t(\eta_{\ell_t})$, or $h_{t+1} \leq h_t$. \begin{lemma}[Number of progress steps]\label{lemma:n_progress} Let $\cC \subseteq \R^d$ be a compact convex set of diameter $\delta > 0$, let $f\colon \cC \to \R$ be a convex and $L$-smooth function. Let $T\in\N$ and $\eta_t = \frac{4}{t+4}$ for all $t\in\Z$. Then, for all iterations $t\in\{0,\ldots, T\}$ of Algorithm~\ref{algo:afw} with step-size $\eta_t$, it holds that $\ell_t\geq \lceil t/2\rceil \geq t/2$. \end{lemma} \begin{proof} Since all non-drop steps are progress steps and $\cS_t$, where $t\in\{0, \ldots, T\}$, has to contain at least one vertex of $\cC$, there cannot occur more drop steps than non-drop steps. Thus, $\ell_t\geq \lceil t/2\rceil \geq t/2$. \end{proof} \subsubsection{Convergence rate of order $\cO(1/t)$} We first derive a baseline convergence rate of order $\cO(1/t)$ for AFW with step-size $\eta_t = \frac{4}{t+4}$. \begin{proposition}[Convergence rate of order $\cO(1/t)$]\label{prop:baseline_afw} Let $\cC \subseteq \R^d$ be a compact convex set of diameter $\delta > 0$, let $f\colon \cC \to \R$ be a convex and $L$-smooth function. Let $T\in\N$ and $\eta_t = \frac{4}{t+4}$ for all $t\in\Z$. Then, for the iterates of Algorithm~\ref{algo:afw} with step-size $\eta_t$, it holds that $h_t \leq \frac{ 16 L \delta^2}{t+6} = \eta_{t+2} 4 L \delta^2$ for all $t\in\{1,\ldots, T\}$. \end{proposition} \begin{proof} Let $t\in\{0,\ldots, T-1\}$ and suppose that during iteration $t$, we perform a progress step. Either $d_t = p_t^{FW}-x_t$, or $d_t = x_t - p_t^A$ and by Line \ref{eq: which_direction} of Algorithm~\ref{algo:afw}, $\langle \nabla f (x_t), x_t - p_t^A \rangle \leq \langle \nabla f(x_t), p_t^{FW} - x_t \rangle$. In either case, by $L$-smoothness, \begin{align}\label{eq:basic_bf_bound_afw} h_{t+1} & \leq h_t - \gamma_{t} \langle \nabla f (x_t), x_t - p_t^{FW} \rangle + \frac{\gamma_{t}^2 L \delta^2}{2} \leq (1 - \gamma_{t})h_t + \frac{\gamma_{t}^2 L \delta^2}{2}. \end{align} By Lemma~\ref{lemma:contraction}, since non-progress steps do not increase the primal gap, we can limit our analysis to the subsequence of iterations corresponding to progress steps, $\{t^{(k)}\}_{k\in\{0,\ldots, \ell_T\}}$, for which, by \eqref{eq:basic_bf_bound_afw}, it holds that \begin{align}\label{eq:necessary_for_acceleration} h_{t^{(k+1)}} & \leq (1 - \eta_{\ell_{t^{(k)}}}) h_{t^{(k)}} + \frac{\eta_{\ell_{t^{(k)}}}^2L\delta^2}{2} = (1 - \eta_k) h_{t^{(k)}} + \frac{\eta_k^2L\delta^2}{2} \end{align} for all $k\in\{0, \ldots, \ell_T-1\}$. Since the first step is a non-drop step and thus a progress step, $h_{t^{(1)}} \leq h_1 \leq \frac{L\delta^2}{2}$. By similar arguments as in the proof of Proposition~\ref{prop:generalization_jaggi} starting with \eqref{eq:always_combine_with_this}, we obtain the bound $h_{t^{(k)}} \leq \frac{8L \delta^2}{k + 3}$ for all $k\in\{1,\ldots, \ell_T\}$. Since non-progress steps do not increase the primal gap and by Lemma~\ref{lemma:n_progress}, $h_t\leq h_{t^{(\ell_t)}} \leq \frac{8L \delta^2}{\ell_t+3} \leq \frac{16L \delta^2}{t + 6} = \eta_{t+2} 4 L \delta^2$ for all $t\in\{1,\ldots, T\}$. \end{proof} \subsubsection{Convergence rate of order up to $\cO(1/t^2)$} The introduction of away steps introduces another type of scaling inequality based on the \emph{pyramidal width}, a constant depending on the feasible region, see \citet{lacoste2015global} for more details. \begin{lemma}[\citealp{lacoste2015global}]\label{lemma:away_step_scaling} Let $\cC\subseteq \R^d$ be a polytope with pyramidal width $\omega > 0$ and let $f\colon \cC \to \R$ be a convex function with unique minimizer $x^*\in\argmin_{x\in\cC}f(x)$. Let $p^{FW} \in \argmin_{p\in \cC} \langle \nabla f (x), p \rangle$ and $p^A \in \argmax_{p\in \cS} \langle \nabla f(x), p \rangle$ for some $\cS \subseteq \vertices (\cC)$ such that $x\in \conv(\cS)$. Then, it holds that \begin{align}\tag{Scaling-A}\label{eq:scaling_a} \frac{\langle \nabla f(x), p^A - p^{FW}\rangle}{\omega} \geq \frac{\langle \nabla f (x), x - x^*\rangle}{\|x-x^*\|_2}. \end{align} \end{lemma} For example, the pyramidal width of the unit cube in $\R^d$ satisfies $\omega\geq 2/\sqrt{d}$ \citep{lacoste2015global} and the pyramidal width of the $\ell_1$-ball in $\R^d$ satisfies $\omega \geq {1}/{\sqrt{d-1}}$ \citep{wirth2023approximate}. Combining \eqref{eq:scaling_a} and \eqref{eq:scaling_heb} leads to a subsequence of primal gaps of the form \eqref{eq:gotta_derive_this} and a convergence rate of order up to $\cO(1/t^2)$ for Algorithm~\ref{algo:afw}. \begin{theorem}[Convergence rate of order up to $\cO(1/t^2)$]\label{theorem:afw_polytope} Let $\cC\subseteq \R^d$ be a polytope of diameter $\delta >0$ and pyramidal width $\omega >0$ and let $f\colon \cC \to \R$ be a convex and $L$-smooth function satisfying a $(\mu, \theta)$-\eqref{eq:heb} for some $\mu > 0 $ and $\theta \in [0, 1/2]$ with unique minimizer $x^*\in\argmin_{x\in\cC} f(x)$. Let $T\in\N$ and $\eta_t=\frac{4}{t+4}$ for all $t\in\Z$. Then, for the iterates of Algorithm~\ref{algo:afw} with step-size $\eta_t$, it holds that \begin{align}\label{eq:to_derive_acc_afw} h_{t} & \leq \max \left\{ \eta_{\lceil t/2 -2 \rceil}^{1/(1-\theta)} \frac{L\delta^2}{2}, \left(\frac{\eta_{\lceil t/2 -2 \rceil} 2 \mu L \delta^2}{\omega}\right)^{1/(1-\theta)} + \eta_{\lceil t/2 -2 \rceil}^2 \frac{L\delta^2}{2}\right\} \end{align} for all $t\in\{1,\ldots, T\}$. \end{theorem} \begin{proof} Let $t\in\{0,\ldots, T-1\}$. By \eqref{eq:contract_afw_poly_without_scaling}, \eqref{eq:scaling_a}, convexity of $f$, and \eqref{eq:scaling_heb}, it holds that $h_{t+1} \leq h_t - \frac{\gamma_t \omega \langle \nabla f(x_t), x_t-x^*\rangle}{2\|x_t-x^*\|_2} + \frac{\gamma_t^2L\delta^2}{2} \leq h_t - \frac{\gamma_t \omega}{2 \mu} h_t^{1-\theta} + \frac{\gamma_t^2L\delta^2}{2}$. Thus, by Lemma~\ref{lemma:contraction}, non-progress steps satisfy $h_{t+1} \leq h_t$ and progress steps satisfy \begin{align}\label{eq:contract_afw_actual_step_size} h_{t+1}\leq h_t - \frac{\eta_{\ell_t} \omega}{2 \mu} h_t^{1-\theta} + \frac{\eta_{\ell_t}^2L\delta^2}{2}. \end{align} Since non-progress steps do not increase the primal gap, we can limit our analysis to the subsequence of iterations corresponding to progress steps, $\{t^{(k)}\}_{k\in\{0,\ldots, \ell_T\}}$, for which, by \eqref{eq:contract_afw_actual_step_size}, it holds that \begin{align*} h_{t^{(k+1)}} \leq h_{t^{(k)}} - \frac{\eta_{\ell_{t^{(k)}}}\omega}{2\mu} h_{t^{(k)}}^{1-\theta} + \frac{\eta_{\ell_{t^{(k)}}}^2L\delta^2}{2} = h_{t^{(k)}} - \frac{\eta_k\omega}{2\mu} h_{t^{(k)}}^{1-\theta} + \frac{\eta_k^2L\delta^2}{2}. \end{align*} Combined with \eqref{eq:necessary_for_acceleration}, it thus holds that \begin{align}\label{eq:afw_apply_sequence_lemma} h_{t^{(k+1)}} \leq (1 - \frac{\eta_k}{2})h_{t^{(k)}} - \frac{\eta_k\omega}{4\mu} h_{t^{(k)}}^{1-\theta} + \frac{\eta_k^2L\delta^2}{2}. \end{align} for all $k \in \{1,\ldots, \ell_T-1\}$. Since the first step is a non-drop step and thus a progress step, $h_{t^{(1)}} \leq h_1 \leq \frac{L\delta^2}{2}$. Inequality~\ref{eq:afw_apply_sequence_lemma} allows us to apply Lemma~\ref{lemma:sequences} with $A = \frac{\omega}{4 \mu}$, $B = \frac{L\delta^2}{2}$, $C= 1$, $C_{t^{(k)}} = 1$ for all $k \in \{1,\ldots, \ell_T-1\}$, $\psi = \theta$, and $\fwt =1$, resulting in $h_{t^{(k)}} \leq \max \left\{ \eta_{k-2}^{1/(1-\theta)} \frac{L\delta^2}{2}, \left(\frac{\eta_{k-2} 2 \mu L \delta^2}{\omega}\right)^{1/(1-\theta)} + \eta_{k-2}^2 \frac{L\delta^2}{2}\right\} $ for all $k \in \{1,\ldots, \ell_T\}$, where we used that $\eta_{-1} \geq \eta_0 = 1$. Since non-progress steps do not increase the primal gap and by Lemma~\ref{lemma:n_progress}, \eqref{eq:to_derive_acc_afw} holds for all $t\in\{1,\ldots, T\}$. \end{proof} \section{{Kernel herding}}\label{sec:kernel_herding} In this section, we explain why FW with open-loop step-sizes converges at a rate of order $\cO(1/t^2)$ in the kernel-herding setting of \citet[Section~5.1 and Figure~3, right]{bach2012equivalence}. \subsection{{Kernel herding and the Frank-Wolfe algorithm}} Kernel herding is equivalent to solving a quadratic optimization problem in a \emph{reproducing kernel Hilbert space} (RKHS) with FW. To describe this application of FW, we use the following notation: Let $\cY\subseteq \R$ be an observation space, $\cH$ a RKHS with inner product $\langle \cdot, \cdot\rangle_\cH$, and $\Phi\colon \cY \to \cH$ the feature map associating a real function on $\cY$ to any element of $\cH$ via $x(y) = \langle x, \Phi(y) \rangle_\cH$ for $x\in \cH$ and $y\in \cY$. The positive-definite kernel associated with $\Phi$ is denoted by $k\colon (y,z) \mapsto k(y,z) = \langle \Phi(y), \Phi(z)\rangle_\cH$ for $y, z \in \cY$. In kernel herding, the feasible region is usually the \emph{marginal polytope} $\cC$, the convex hull of all functions $\Phi(y)$ for $y\in \cY$, that is, $\cC = \conv \left( \left\{\Phi(y) \mid y \in \cY\right\} \right)\subseteq \cH$. We consider a fixed probability distribution $p$ over $\cY$ and denote the associated mean element by $\mu = \E_{p(y)}\Phi(y) \in \cC$, where $\mu \in \cC$ follows from the fact that the support of $p$ is contained in $\cY$. In \citet{bach2012equivalence}, kernel herding was shown to be equivalent to solving the following optimization problem with FW and step-size $\eta_t = \frac{1}{t+1}$: \begin{equation}\tag{OPT-KH}\label{eq:kh} \min_{x\in \cC} f(x), \end{equation} where $f(x):=\frac{1}{2}\|x - \mu\|_\cH^2$. This equivalence led to the study of FW (variants) with other step-sizes to solve \eqref{eq:kh} \citep{chen2012super,lacoste2015sequential,tsuji2022pairwise}. Under the assumption that $\|\Phi(y)\|_\cH = R$ for some constant $R > 0$ and all $y\in \cY$, the herding procedure is well-defined and all extreme points of $\cC$ are of the form $\Phi(y)$ for $y\in \cY$ \citep{bach2012equivalence}. Thus, the linear minimization oracle (LMO) in FW always returns an element of the form $\Phi(y) \in \cC$ for $y\in \cY$. Furthermore, FW constructs iterates of the form $x_t = \sum_{i=1}^t v_i \Phi(y_i)$, where $v = (v_1, \ldots, v_t)^\intercal$ is a weight vector, that is, $\sum_{i=1}^tv_i = 1$ and $v_i \geq 0$ for all $i \in \{1, \ldots, t\}$, and $x_t$ corresponds to an empirical distribution $\tilde{p}_t$ over $\cY$ with empirical mean $\tilde{\mu}_t = \E_{\tilde{p}_t(y)}\Phi(y) = \sum_{i=1}^t v_i \Phi(y_i)= x_t \in \cC$. Then, according to \citet{bach2012equivalence}, $\sup_{x\in \cH, \|x\|_\cH = 1}|\E_{p(y)}x(y) - \E_{\tilde{p}_t(y)}x(y)| = \|\mu - \tilde{\mu}_t\|_\cH$. Thus, a bound on $\|\mu - \tilde{\mu}_t\|_\cH$ implies control on the error in computing the expectation for all $x\in \cH$ such that $\|x\|_\cH=1$. In kernel herding, since the objective function is a quadratic, line-search and short-step are identical. \subsection{{Explaining the phenomenon in} \citet{bach2012equivalence}}\label{sec:kernel_whaba} We briefly recall the infinite-dimensional kernel-herding setting of \citet[Section~5.1 and Figure~3, right]{bach2012equivalence}, see also \citet[Section~2.1]{wahba1990spline}. Let $\cY = [0,1]$ and \begin{align}\label{eq:hs} \cH = \{& x \colon {[0,1]} \to \R \mid x'(y) \in L^2({[0,1]}), x(y)= \sum_{j = 1}^{\infty}(a_j \cos(2\pi j y) + b_j \sin(2\pi j y)), a_j, b_j \in \R\}. \end{align} For $w, x\in \cH$, $\langle w, x\rangle_\cH:= \int_{[0,1]} w'(y)x'(y) dy$ defines an inner product and $(\cH, \langle \cdot, \cdot \rangle_\cH)$ is a Hilbert space. Moreover, $\cH$ is also a RKHS and for $y,z\in [0,1]$, $\cH$ has the reproducing kernel \begin{align}\label{eq:whaba_kernel} k(y,z) & = \sum_{j = 1}^\infty \frac{2}{(2\pi j)^{2}}\cos(2 \pi j (y-z)) = \frac{1}{2}B_{2}(y-z-\lfloor y - z\rfloor) = \frac{1}{2}B_{2}([y-z]), \tag{Bernoulli-kernel} \end{align} where for $y \in\R$, $[y] := y-\lfloor y \rfloor$, and $B_2(y) = y^2-y + \frac{1}{6}$ is a \emph{Bernoulli polynomial}. In the right plot of Figure~$3$ in \citet{bach2012equivalence}, kernel herding on $[0, 1]$ and Hilbert space $\cH$ is considered for the uniform density $p(y) := 1$ for all $y\in {[0,1]}$. Then, for all $z\in [0, 1]$, we have $\mu (z) = \int_{[0,1]} k(z,y)p(y) dy = \int_{[0,1]} \sum_{j = 1}^\infty \frac{2}{(2\pi j)^{2}}\cos(2 \pi j (z-y))\cdot 1 dy = \sum_{j = 1}^\infty 0 = 0$, where the integral and the sum can be interchanged due to the theorem of Fubini, see, for example, \citet{royden1988real}. For the remainder of this section, we assume that $p(y) = 1$ and, thus, $\mu(y) = 0$ for all $y\in{[0,1]}$. Thus, $f(x) = \frac{1}{2}\|x\|_\cH^2$. For this setting, \citet{bach2012equivalence} observed empirically that FW with open-loop step-size $\eta_t = \frac{1}{t+1}$ converges at a rate of order $\cO(1/t^2)$, whereas FW with line-search converges at a rate of order $\cO(1/t)$, see the reproduced plot in Figure~\ref{fig:kernel_herding_uniform}. The theorem below explains the accelerated convergence rate for FW with step-size $\eta_t = \frac{1}{t+1}$. \begin{theorem}[Kernel herding]\label{thm:answering_bach} Let $\cH$ be the Hilbert space defined in \eqref{eq:hs}, let $k \colon \R \times \R \to \cH$ be the kernel defined in \eqref{eq:whaba_kernel}, let $\Phi\colon[0,1] \to\cH$ be the feature map associated with $k$ restricted to $[0,1]\times [0,1]$, let $\cC=\conv(\{\Phi(y)\mid y\in[0,1]\})$ be the marginal polytope, and let $\mu = 0$ such that $f(x) = \frac{1}{2}\|x\|_\cH^2$. Let $T\in\N$ and $\eta_t = \frac{1}{t+1}$ for all $t\in\Z$. Then, for the iterates of Algorithm~\ref{algo:fw} with step-size $\eta_t$ and the LMO satisfying Assumption~\ref{ass:argmin} (a tie-breaking rule), it holds that $f(x_t) = 1/(24 t^2)$ for all $t \in\{1,\ldots, T\}$ such that $t=2^m$ for some $m\in\N$. \end{theorem} We first provide a proof sketch for Theorem~\ref{thm:answering_bach} and subsequently prove the theorem in detail. \begin{proof}[Sketch of proof for Theorem~\ref{thm:answering_bach}] The main idea behind the proof is that FW with $\eta_t = \frac{1}{t+1}$ leads to iterates $x_t = \frac{1}{t}\sum_{i = 1}^t \Phi(y_i)$ with $\{y_1, \ldots, y_t\} = \{\frac{i-1}{t} \mid i = 1, \ldots, t\}$ for all $t = 2^m$, where $m \in \N$. Then, the proof follows by a series of calculations. We make several introductory observations. Note that Line~\ref{line:p_t_det} of Algorithm~\ref{algo:fw} becomes $p_t \in \argmin_{p\in \cC} Df(x_t) (p - x_t) = \argmin_{p\in \cC} Df(x_t)(p)$, where, for $w, x\in \cH$, $D f(w)(x) = \langle w,x \rangle_\cH$ denotes the first derivative of $f$ at $w$. For $x\in \cC$ and $x_t\in \cC$ of the form $x_t = \frac{1}{t}\sum_{i=1}^{t} \Phi(y_i)$ for $y_1,\ldots, y_t\in {[0,1]}$, it holds that $Df(x_t)(x) = \langle \frac{1}{t}\sum_{i=1}^{t} \Phi(y_i), x\rangle_\cH$. Then, for $y\in [0,1]$, let \begin{align}\label{eq:def_g_t} g_t(y) := \langle \frac{1}{t}\sum_{i=1}^t \Phi(y_i), \Phi(y)\rangle_\cH =\frac{1}{t}\sum_{i=1}^t k(y_i, y). \end{align} Since the LMO of FW always returns a vertex of $\cC$ of the form $\Phi(y)$ for $y\in [0, 1]$ \citep{bach2012equivalence}, it holds that $\min_{p\in \cC} Df(x_t)(p) = \min_{y\in {[0,1]}} g_t(y)$ and the vertex returned by the LMO during iteration $t$ is contained in the set $\{\Phi(z) \mid z \in \argmin_{y\in {[0,1]}} g_t(y) \}$. Thus, instead of considering the LMO directly over $\cC$, we can perform the computations over $[0,1]$. To simplify the proof, we make the following assumption on the $\argmin$ operation in the LMO of FW, a tie-breaking rule in case $|\argmin_{p\in \cC}Df(x_t)(p)| \geq 2$. \begin{assumption}\label{ass:argmin} The LMO of FW always returns $p_t \in \argmin_{p\in \cC}Df(x_t)(p)$ such that $p_t = \Phi(z)$ for $ z = \min (\argmin_{y\in {[0,1]}} g_t(y))$. \end{assumption} Recall that FW starts at iterate $x_0$, but since $\eta_0 = 1$, it holds that $x_1 = \Phi(y_1)$. As we will prove in Lemma~\ref{lemma:second}, without loss of generality, we can assume that FW starts at iterate $x_1 = \Phi(y_1)$, where $y_1 = 0$. \end{proof} To rigorously prove Theorem~\ref{thm:answering_bach}, we require the following four technical lemmas. In the lemma below, we prove several technical properties of kernel $k$ as in \eqref{eq:whaba_kernel}. \begin{lemma}\label{lemma:cos_is_symmetric} Let $\cH$ be the Hilbert space defined in \eqref{eq:hs} and let $k \colon \R \times \R \to \cH$ be the kernel defined in \eqref{eq:whaba_kernel}. For $y, z \in [0, 1]$ and $n\in\Z$, it holds that $k(y,z) = k(z,y) = k(|y-z|,0) = \frac{1}{2}B_2(|y-z|)$ and $k(y,z) = k(y, z+n)$. \end{lemma} \begin{proof} We first prove that for $y, z \in [0, 1]$, it holds that $k(y,z) = k(z,y)$. Let $a\in[0, 1[$. Then, \begin{align}\label{eq:squarea} [a] &= a, & [-a] &= 1 - a, & B_2([a]) & = a^2 - a + \frac{1}{6} = (1-a)^2 - (1-a) + \frac{1}{6} = B_2[-a],\\ [1] &= 0, & [-1] &= 0, & B_2([1]) & = B_2([-1]).\label{eq:square1} \end{align} By \eqref{eq:squarea} and \eqref{eq:square1}, for any $y, z \in [0, 1]$, it holds that $k(y,z) = \frac{1}{2}B_2([y-z]) = \frac{1}{2}B_2([z-y]) = k(z,y)$. Next, we prove that for $y, z \in [0, 1]$, it holds that $k(y,z) = k(|y-z|,0) = \frac{1}{2}B_2(|y-z|)$. Let $y,z\in[0,1]$ such that $|y-z|=a\in[0,1[$. Then, by \eqref{eq:squarea}, $k(y,z) = \frac{1}{2}B_2([y-z]) = \frac{1}{2}B_2([|y-z|]) = \frac{1}{2}B_2(|y-z|)$. Furthermore, $k(y,z) = \frac{1}{2}B_2([y-z]) = \frac{1}{2}B_2([|y-z|]) = k(|y-z|,0)$. Next, let $y,z\in[0,1]$ such that $|y-z|=1$. Then, by \eqref{eq:square1}, $k(y,z) = \frac{1}{2}B_2([y-z]) = \frac{1}{2}B_2([|y-z|]) = \frac{1}{2}B_2([1]) = \frac{1}{12} = \frac{1}{2}\left(1^2 - 1 +\frac{1}{6}\right) = \frac{1}{2}B_2(1) = \frac{1}{2}B_2(|y-z|)$. Furthermore, $k(y,z) = \frac{1}{2}B_2([y-z]) = \frac{1}{2}B_2([|y-z|]) = \frac{1}{2}B_2([1]) = k(|y-z|, 0)$. Finally, we prove that for $y,z\in [0,1]$ and $n\in\Z$, it holds that $k(y,z)=k(y,z+n)$. Indeed, $k(y,z) = \frac{1}{2}B_2(y-z- \lfloor y-z\rfloor)= \frac{1}{2}B_2(y-z-n - \lfloor y-z-n\rfloor)= k(y,z+n)$. \end{proof} In the two lemmas below, we characterize $\argmin_{y\in [0,1]}g_t(y)$, where $g_t$ is defined as in \eqref{eq:def_g_t}. \begin{lemma}\label{lemma:first} Let $\cH$ be the Hilbert space defined in \eqref{eq:hs}, let $k \colon \R \times \R \to \cH$ be the kernel defined in \eqref{eq:whaba_kernel}, let $\Phi\colon[0,1] \to\cH$ be the feature map associated with $k$ restricted to $[0,1]\times [0,1]$, let $t\in \N$, let $\{y_1 , \ldots, y_t\} = \{\frac{i-1}{t} \mid i \in \{1, \ldots, t\}\}$, and let $g_t$ be defined as in \eqref{eq:def_g_t}, that is, $g_t (y) = \frac{1}{t}\sum_{i=1}^tk(y_i,y)$. Then, it holds that $\argmin_{y\in{[0,1]}} g_t(y) = \{ y_i + \frac{1}{2t} \mid i \in \{1, \ldots, t\} \}$. \end{lemma} \begin{proof} Let $t\in \N$ and $\{y_1, \ldots, y_t\} = \{\frac{i - 1}{t} \mid i \in \{1, \ldots, t\}\}$. We stress that this does not imply that for all $i\in\{1, \ldots, t\}$, $y_i = \frac{i-1}{t}$. By Lemma~\ref{lemma:cos_is_symmetric}, for all $y\in [0, 1]$, it holds that $g_t (y) = \langle \frac{1}{t} \sum_{i = 1}^t \Phi(y_i), \Phi(y) \rangle_\cH =\frac{1}{t}\sum_{i=1}^t k(y_i, y) = \frac{1}{2t}\sum_{i = 1}^t(|y_i -y |^2 - |y_i -y| + \frac{1}{6})$. Then, for $y \in [0, 1] \setminus \{y_1, \ldots, y_t\}$, it holds that $g_t'(y) = \frac{1}{2t}\sum_{i=1}^t (2 (y - y_i) - \frac{y - y_i}{|y - y_i|})$ and since $\sum_{i=1}^{t}{y_i}= (t-1)/2$, we have \begin{align*} g_t'(y) = \frac{1}{2}(2y - \frac{t-1}{t} - \frac{1}{t} \sabs{\{y_i < y \colon i \in \{1, \ldots, t\}\}} + \frac{1}{t}\sabs{\{y_i > y \colon i \in \{1, \ldots,t\}\}}). \end{align*} For $y\in \left]\frac{i-1}{t}, \frac{i}{t}\right[$, where $i \in \{1, \ldots, t\}$, it holds that $g'_t (y) = \frac{1}{2}(2y - \frac{t-1}{t} - \frac{i}{t} + \frac{t - i}{t}) = \frac{1}{2}(2y +\frac{1}{t} - \frac{2i}{t})$ and $g_t'(y) = 0$ if and only if $y = \frac{i - \frac{1}{2}}{t}$. Since $g_t$ is strongly convex on $]\frac{i-1}{t}, \frac{i}{t}[$ for $i \in \{1, \ldots, t \}$ and continuous on $[0, 1]$, it holds that $y_i = \frac{i-1}{t}$ cannot be a minimizer of $g_t$ on $[0,1]$ for any $i\in\{1, \ldots, t\}$. Since $g_t(0) =g_t(1)$ by Lemma~\ref{lemma:cos_is_symmetric}, $1$ cannot be a minimizer either. Thus, only elements in $\{ y_i + \frac{1}{2t} \mid i \in \{1, \ldots, t\} \}$ can be minimizers of $g_t$ on $[0,1]$. By Lemma \ref{lemma:cos_is_symmetric}, \begin{align*} \sum_{i = 1}^tk(\frac{i-1}{t}, \frac{j-1}{t}+ \frac{1}{2t}) - \sum_{i = 1}^t k(\frac{i-1}{t}, \frac{j}{t} + \frac{1}{2t}) & = \sum_{i = 1}^tk(\frac{i}{t}, \frac{j}{t}+ \frac{1}{2t}) - \sum_{i = 1}^t k(\frac{i-1}{t}, \frac{j}{t} + \frac{1}{2t}) \\ & = k(\frac{t}{t}, \frac{j}{t}+ \frac{1}{2t}) - k(\frac{0}{t}, \frac{j}{t}+ \frac{1}{2t})\\ & = 0 \end{align*} for all $j\in\{1, \ldots, t-1\}$. Thus, $g_t(\frac{j-1}{t}+\frac{1}{2t}) =g_t(\frac{j}{t}+\frac{1}{2t})$ for all $j \in \{1, \ldots, t-1\}$. Thus, $g_t(\frac{i-1}{t} + \frac{1}{2t}) = g_t(\frac{j-1}{t} + \frac{1}{2t})$ for all $i,j \in \{1, \ldots, t\}$, proving the lemma. \end{proof} \begin{lemma}\label{lemma:second} Let $\cH$ be the Hilbert space defined in \eqref{eq:hs}, let $k \colon \R \times \R \to \cH$ be the kernel defined in \eqref{eq:whaba_kernel}, let $\Phi\colon[0,1] \to\cH$ be the feature map associated with $k$ restricted to $[0,1]\times [0,1]$, let $t\in \N$, let $y_1, \ldots, y_t \in [0, 1]$, and let $g_t$ be defined as in \eqref{eq:def_g_t}, that is, $g_t (y) = \frac{1}{t}\sum_{i=1}^tk(y_i,y)$. Suppose that $\argmin_{y\in {[0,1]}} g_t(y) = \{z_1, \ldots, z_k\}\subseteq [0, 1]$ for some $k\in \N$. Let $c\in \R $, let $\tilde{y}_i = [y_i + c]$ for all $i \in \{1, \ldots, t\}$, and let $\tilde{g}_t (y) = \frac{1}{t}\sum_{i=1}^tk(\tilde{y}_i,y)$. Then, $\argmin_{z\in {[0,1]}} \tilde{g}_{t}(z) = \{[z_1 + c], \ldots, [z_k + c]\}$. \end{lemma} \begin{proof} It holds that \begin{align*} \argmin_{z\in[0,1]}\tilde{g}_t(z) & = \argmin_{z=[y+c], y\in\R} \tilde{g}_t (z)\\ & =\argmin_{z=[y+c], y\in\R}\frac{1}{2t}\sum_{i=1}^t B_2([[y_i+c]-[y+c]])\\ & =\argmin_{z=[y+c], y\in\R}\frac{1}{2t}\sum_{i=1}^t B_2([y_i+c - \lfloor y_i + c\rfloor - (y+c) - (-\lfloor y + c\rfloor)])\\ & =\argmin_{z=[y+c], y\in\R}\frac{1}{2t}\sum_{i=1}^t B_2([y_i - y - \lfloor y_i + c\rfloor +\lfloor y + c\rfloor])\\ & =\argmin_{z=[y+c], y\in\R}\frac{1}{2t}\sum_{i=1}^t B_2([y_i - y ])\\ & = \{[z_1 + c], \ldots, [z_k + c]\}, \end{align*} where the second-to-last equality is due to Lemma~\ref{lemma:cos_is_symmetric}. \end{proof} In the lemma below, we leverage the previous lemmas to prove that FW with step-size $\eta_t = \frac{1}{t+1}$ leads to iterates $x_t = \frac{1}{t}\sum_{i = 1}^t \Phi(y_i)$ with $\{y_1, \ldots, y_t\} = \{\frac{i-1}{t} \mid i = 1, \ldots, t\}$ for all $t = 2^m$, where $m \in \N$. \begin{lemma}\label{lemma:third} Let $\cH$ be the Hilbert space defined in \eqref{eq:hs}, let $k \colon \R \times \R \to \cH$ be the kernel defined in \eqref{eq:whaba_kernel}, let $\Phi\colon[0,1] \to\cH$ be the feature map associated with $k$ restricted to $[0,1]\times [0,1]$, let $\cC=\conv(\{\Phi(y)\mid y\in[0,1]\})$ be the marginal polytope, and let $\mu = 0$ such that $f(x) = \frac{1}{2}\|x\|_\cH^2$. Let $T\in\N$ and $\eta_t = \frac{1}{t+1}$ for all $t\in\Z$. Then, for the iterates of Algorithm~\ref{algo:fw} with step-size $\eta_t$ and the LMO satisfying Assumption~\ref{ass:argmin} it holds that $x_t = \frac{1}{t}\sum_{i = 1}^t \Phi (y_i)$ with $\{y_1, \ldots, y_t\} = \{ \frac{i -1}{t} \mid i \in \{1,\ldots,t\}\}$ for all $t \in\{1,\ldots, T\}$ such that $t=2^m$ for some $m\in\N$,. \end{lemma} \begin{proof} Since $\eta_0 = 1$, it holds that $x_1 = \Phi(y_1)$. By Lemma~\ref{lemma:second}, without loss of generality, we can assume that FW starts with iterate $x_1 = \Phi(y_1)$, where $y_1 = 0$. Let $t\in\{1,\ldots, T\}$. Since we use the step-size $\eta_t = \frac{1}{t+1}$, we obtain uniform weights, that is, $x_t = \frac{1}{t}\sum_{i=1}^t\Phi(y_i)$, where $y_i \in [0,1]$ for all $i \in \{ 1,\ldots, t\}$. Suppose that $t=2^m$ for some $m\in \N$. The proof that it holds that $\{y_1, \ldots, y_t\} = \{ \frac{i -1}{t} \mid i \in \{1,\ldots,t\}\}$ is by induction on $m\in \N$. The base case, $m = 0$, follows from $x_1 = \Phi(y_1)$, where $y_1 = 0$. Suppose that for $t=2^m$ for some $m\in\N$, it holds that $\{y_1, \ldots, y_t\} = \{ \frac{i -1}{t} \mid i \in \{1,\ldots,t\}\}$. If we show that \begin{align}\label{eq:kh_to_prove} \{y_1, \ldots, y_{2t}\} = \{ \frac{i -1}{2t} \mid i \in \{1,\ldots,2t\}\}, \end{align} the statement of the lemma follows from induction. \eqref{eq:kh_to_prove} is subsumed by the stronger statement that $y_{t+j} = y_j + \frac{1}{2t}$ for all $j \in\{ 1, \ldots, t\}$, and we prove the latter for the remainder of this proof. By Lemma~\ref{lemma:first} and Assumption~\ref{ass:argmin}, it holds that $y_{t +1} = \frac{1}{2t}$. Suppose that for some $\ell \in\{1, \ldots, t-1\}$, it holds that $y_{t + j} = y_{j} + \frac{1}{2t}$ for all $j\in \{1, \ldots, \ell\}$. We decompose the function $g_{t+\ell}(y)$ into $g_t(y)$ and $\tilde{g}_{\ell}(y) = \langle \frac{1}{\ell} \sum_{i=1}^\ell \Phi(y_i + \frac{1}{2t}), \Phi(y) \rangle_\cH$, that is, we consider the decomposition $g_{t+\ell}(y) = \frac{t}{t+\ell} g_t(y) + \frac{\ell}{t+\ell}\tilde{g}_{\ell}(y)$. By Lemma~\ref{lemma:first}, $\argmin_{y\in[0,1]} g_{t}(y) = \left\{y_i + \frac{1}{2t} \mid i \in \{1, \ldots, t\}\right\}\subseteq [0,1]$ and by Assumption~\ref{ass:argmin}, $y_{\ell+1} = \min(\argmin_{y\in [0,1]}g_\ell(y))$. Thus, by Lemma~\ref{lemma:second}, it holds that $\min\argmin_{y\in [0,1]}\tilde{g}_\ell(y) = \min(\argmin_{y\in [0,1]}g_\ell(y) + \frac{1}{2t}) = y_{\ell+1} + \frac{1}{2t}\in \{y_i + \frac{1}{2t} \mid i \in \{1, \ldots, t\}\}$. Thus, $\min\argmin_{y\in [0,1]}\tilde{g}_\ell(y) \in \argmin_{y\in[0,1]} g_{t}(y)$ and \begin{align*} y_{t+\ell+1} = \min\argmin_{y\in [0,1]}g_{t+\ell}(y) = \min\argmin_{y\in [0,1]} \tilde{g}_\ell(y) = y_{\ell+1} + \frac{1}{2t}. \end{align*} By induction, $y_{t+j} = y_j + \frac{1}{2t}$ for all $j \in\{ 1, \ldots, t\}$, as required to conclude the proof. \end{proof} Finally, we prove Theorem~\ref{thm:answering_bach}. \begin{proof}[Proof of Theorem~\ref{thm:answering_bach}] By Lemma~\ref{lemma:third}, $x_t = \frac{1}{t}\sum_{i=1}^t \Phi(\frac{i - 1}{t})$ and, since $\mu = 0$, we have $f(x_t) = \frac{1}{2}\|x_t\|_\cH^2 = \frac{1}{2t^2} \sum_{j=1}^{t} \sum_{i = 1}^{t} k(\frac{i-1}{t}, \frac{j-1}{t}) = \frac{1}{2t} \sum_{i=1}^{t} k(\frac{i-1}{t}, 1)$, where the last equality follows from repeatedly applying \begin{align}\label{eq:proof_in_kh} \sum_{i=1}^tk(\frac{i-1}{t}, \frac{j-1}{t}) & = \sum_{i=1}^tk(\frac{i-1}{t}, \frac{j}{t}), \end{align} where $j\in\{1,\ldots,t\}$. To see that \eqref{eq:proof_in_kh} holds, recall that by Lemma \ref{lemma:cos_is_symmetric}, it holds that \begin{align*} \sum_{i=1}^tk(\frac{i-1}{t}, \frac{j-1}{t}) - \sum_{i=1}^tk(\frac{i-1}{t},\frac{j}{t})= \sum_{i=1}^tk(\frac{i}{t}, \frac{j}{t}) - \sum_{i=1}^tk(\frac{i-1}{t},\frac{j}{t}) = k(1, \frac{j}{t}) - k(0, \frac{j}{t})=0 \end{align*} for all $j\in\{1, \ldots, t\}$. Thus, $f(x_t) = \frac{1}{2t} \sum_{i = 1}^{t} k(\frac{i-1}{t}, 1) = \frac{1}{2t} \sum_{i = 1}^{t} k(\frac{i-1}{t}, 0) = \frac{1}{2t} \sum_{i = 1}^{t} k(\frac{i}{t},0) = \frac{1}{4t} \sum_{i = 1}^{t} ((\frac{i}{t})^2 - \frac{i}{t} + \frac{1}{6})$, where the second, third, and fourth equalities are due to Lemma~\ref{lemma:cos_is_symmetric}. Since $\sum_{i = 1}^t i= \frac{t (t +1)}{2}$ and $\sum_{i=1}^t i^2 = \frac{2t^3 + 3t^2+t}{6}$, it holds that $f(x_t) = \frac{1}{4t} (\frac{2t + 3+\frac{1}{t}}{6} - \frac{t+1}{2} + \frac{t}{6} ) = \frac{1}{24t^2}$. \end{proof} The proof of Theorem~\ref{thm:answering_bach} implies that the iterates of FW with open-loop step-size $\eta_t = \frac{1}{t+1}$ are identical to the Sobol sequence at any iteration $t=2^m$, where $m\in \N$. The Sobol sequence is known to converge at the optimal rate of order $\cO(1/t^2)$ \citep{bach2012equivalence} in this infinite-dimensional kernel-herding setting. Here, the equivalence of FW with kernel herding leads to the study and discovery of new convergence rates for FW. This is in contrast to other papers \citep{chen2012super, bach2012equivalence, tsuji2022pairwise} in which FW is exploited to improve kernel-herding methods. \begin{figure}[t] \captionsetup[subfigure]{justification=centering} \centering \begin{tabular}{c c} \begin{subfigure}{.3\textwidth} \centering \includegraphics[width=1\textwidth]{kernel_herding_uniform.png} \subcaption{Uniform density.} \label{fig:kernel_herding_uniform} \end{subfigure}& \begin{subfigure}{.3\textwidth} \centering \includegraphics[width=1\textwidth]{kernel_herding_non_uniform.png} \subcaption{Non-uniform density.} \label{fig:kernel_herding_non_uniform} \end{subfigure} \end{tabular} \caption{ Comparison of FW with different step-sizes for the kernel-herding problem \eqref{eq:kh} as specified in Section~\ref{sec:kernel_herding} for RKHS $\cH$ as in \eqref{eq:hs}, kernel $k$ as in \eqref{eq:whaba_kernel}, and both uniform and non-uniform densities. The $y$-axis represents the minimum primal gap. In both settings, FW with open-loop step-sizes converges at a rate of order $\cO(1/t^2)$ whereas FW with line-search converges at a rate of order $\cO(1/t)$. }\label{fig:kernel_herding} \end{figure} The results in Figure~\ref{fig:kernel_herding}, see Section~\ref{sec:experiment_kernel_herding} for details, show that in the kernel-herding setting of Section~\ref{sec:kernel_whaba}, for RKHS $\cH$ as in \eqref{eq:hs}, kernel $k$ as in \eqref{eq:whaba_kernel}, and both uniform and non-uniform densities over $ \cY = [0, 1]$, FW with open-loop step-sizes $\eta_t = \frac{\ell}{t+\ell}$, where $\ell\in\N_{\geq 1}$, converges at a rate of order $\cO(1/t^2)$ and FW with line-search converges at a rate of order $\cO(1/t)$. It remains an open problem to extend Theorem~\ref{thm:answering_bach} to non-uniform densities. \section{{Numerical experiments}}\label{sec:numerical_experiments_main} In this section, we present the numerical experiments. Numerical experiments corroborating our results in Sections~\ref{sec:blueprint}, \ref{sec:unconstrained}, and~\ref{sec:fw_variants} are omitted since the studies do not provide new insights or highlight unexplained convergence rates. All of our numerical experiments are implemented in \textsc{Python} and performed on an Nvidia GeForce RTX 3080 GPU with 10GB RAM and an Intel Core i7 11700K 8x CPU at 3.60GHz with 64 GB RAM. Our code is publicly available on \href{https://github.com/ZIB-IOL/open_loop_fw}{GitHub}. For all numerical experiments, to avoid the oscillating behavior of the primal gap, the $y$-axis represents $\min_{i\in\{1, \ldots, t\}} h_i$, where $t$ denotes the number of iterations and $h_i$ the primal gap. \subsection{Detailed setups for the numerical experiments in Figures~\ref{fig:exterior}, \ref{fig:experiments_polytope}, and~\ref{fig:kernel_herding}} Throughout the paper, we present several toy examples in Figures~\ref{fig:exterior}, \ref{fig:experiments_polytope}, and~\ref{fig:kernel_herding} to illustrate results and raise open questions. For completeness, we present the detailed setups for these experiments below. \subsubsection{Detailed setup for numerical experiments in Figure~\ref{fig:exterior}}\label{sec:experiment_exterior} For $d = 100$, we address \eqref{eq:opt} with FW for $\cC\subseteq \R^{d}$ the $\ell_p$-ball, $f(x) = \frac{1}{2}\|Ax-b\|_2^2$, where $A\subseteq \R^{100\times 100}$ and $b\in \R^{100}$ are a random matrix and vector, respectively, such that $f$ is not strongly convex, the unconstrained optimal solution $\argmin_{x\in \R^d}f(x)$ lies in the exterior of the feasible region and, thus, $\|\nabla f(x)\|_2 \geq \lambda > 0$ for all $x\in\cC$ and some $\lambda > 0$. For $p\in\{2,3,5\}$, we compare FW with open-loop step-sizes $\eta_t = \frac{\ell}{t+\ell}$, where $\ell\in\{1, 2, 4, 6\}$, and the constant step-size introduced in Remark~\ref{rem:ol_linear}, starting with $x_0 = e^{(1)}$. We plot the results of the experiments in log-log plots in Figure~\ref{fig:exterior}. \subsubsection{Detailed setup for numerical experiments in Figure~\ref{fig:experiments_polytope}}\label{sec:experiment_polytope} For $d = 100$, we address \eqref{eq:opt} with FW for $\cC\subseteq \R^{d}$ the probability simplex and $f(x) = \frac{1}{2}\|x-\rho \bar{\oneterm}\|_2^2$, where $\rho \geq \frac{2}{d}$ and $\bar{\oneterm}$ is the vector with zeros for the first $\lceil d/2\rceil$ entries and ones for the remaining entries. Then, $\frac{2}{d}\bar{\oneterm} = x^*\in\argmin_{x\in \cC} f(x)$ is the unique minimizer of $f$. For $\rho \in \{\frac{1}{4}, 2\}$, we compare FW with line-search and open-loop step-sizes $\eta_t = \frac{\ell}{t+\ell}$, where $\ell\in\{1, 2, 4\}$, starting with $x_0 = e^{(1)}$. Here, short-step is identical to line-search and, thus, omitted. We plot the results of the experiments in log-log plots in Figure~\ref{fig:experiments_polytope}. \subsubsection{Detailed setup for numerical experiments in Figure~\ref{fig:kernel_herding}}\label{sec:experiment_kernel_herding} We consider the kernel-herding setting of Section~\ref{sec:kernel_whaba} over $ \cY = [0, 1]$, that is, $\cH$ is the RKHS as in \eqref{eq:hs} and $k$ is the kernel as in \eqref{eq:whaba_kernel}. Given either the uniform density or a random non-uniform density of the form $ p(y) \backsim \left(\sum_{i=1}^n(a_i \cos(2 \pi i y) + b_i \sin (2 \pi i y))\right)^2 $ with $n\leq 5$ and $a_i,b_i\in \R$ for all $i\in\{1,\ldots, n\}$ such that $\int_{[0,1]} p(y) dy = 1$, we address \eqref{eq:kh} with FW with line-search and open-loop step-sizes $\eta_t = \frac{\ell}{t+\ell}$, where $\ell \in \{1, 2\}$. The LMO is implemented as an exhaustive search over $[0,1]$ and run for 1,000 iterations. We plot the results of the experiments in log-log plots in Figure~\ref{fig:kernel_herding}. \subsection{Logistic regression}\label{sec:logistic_regression} \begin{figure}[t] \captionsetup[subfigure]{justification=centering} \begin{tabular}{c c c} \begin{subfigure}{.3\textwidth} \centering \includegraphics[width=1\textwidth]{gisette_lp_1_ball_logistic_regression.png} \caption{$\ell_1$-ball.}\label{fig:logistic_regression_1} \end{subfigure}& \begin{subfigure}{.3\textwidth} \centering \includegraphics[width=1\textwidth]{gisette_lp_2_ball_logistic_regression.png} \caption{$\ell_2$-ball.}\label{fig:logistic_regression_2} \end{subfigure} & \begin{subfigure}{.3\textwidth} \centering \includegraphics[width=1\textwidth]{gisette_lp_5_ball_logistic_regression.png} \caption{$\ell_5$-ball.}\label{fig:logistic_regression_5} \end{subfigure}\\ \end{tabular} \caption{ Logistic regression for different $\ell_p$-balls. }\label{fig:logistic_regression} \end{figure} We consider the problem of logistic regression, which for feature vectors $a_1,\ldots, a_m \in\R^d$, label vector $b\in\{-1,+1\}^m$, $p\in\R_{\geq 1}$, and radius $r > 0$, leads to the problem formulation \begin{align*} \min_{x\in\R^d} & \frac{1}{m}\sum_{i=1}^m \log(1+\exp(-b_i a_i^\intercal x))\\ \text{subject to} \ & \|x\|_p\leq r. \end{align*} Note that the feasible region is an $\ell_p$-ball and when $p=1$, the problem formulation is that of sparsity-constrained logistic regression, which induces sparsity in the iterates of FW variants. For $p\in\{1,2,5\}$, we compare FW, PAFW, and MFW, with open-loop step-sizes $\eta_t=\frac{\ell}{t+\ell}$, where $\ell\in\{2,6\}$, on the Z-score normalized Gisette dataset\footnote{Available online at \href{https://archive.ics.uci.edu/ml/datasets/Gisette}{https://archive.ics.uci.edu/ml/datasets/Gisette}.} \citep{guyon2003introduction}. The number of features is $d=5,000$, we use $m=2,000$ samples of the dataset, and we set $r=1$. We plot the results of the experiments in log-log plots in Figure~\ref{fig:logistic_regression}. PAFW and MFW seem to enjoy the same accelerated convergence rates as FW with step-sizes $\eta_t = \frac{\ell}{t+\ell}$, where $\ell\in\N_{\geq 1}$. This includes the rates of order $\cO(1/t^\ell)$ when $p\in\{2,5\}$, see also Remark~\ref{rem:ol_linear}. This raises the question whether PAFW \citep{lan2013complexity, kerdreux2021local} and MFW \citep{li2021momentum} admit accelerated convergence rates due to the exploitation of momentum, as indicated in the respective works, or due to the specific choice of open-loop step-size. Furthermore, MFW seems to converge at an accelerated rate earlier than FW, which converges at an accelerated rate earlier than PAFW. However, for $p = 5$, MFW converges quickly during early iterations but then converges at a slower rate than FW and PAFW, especially for step-size $\eta_t = \frac{2}{t+2}$. For $p=1$, all methods converge at the same rate of order $\cO(1/t^2)$. \subsection{Collaborative filtering}\label{sec:collaborative_filtering} \begin{figure}[t] \centering \captionsetup[subfigure]{justification=centering} \begin{tabular}{c} \begin{subfigure}{.3\textwidth} \centering \includegraphics[width=1\textwidth]{movielens_nuclear_norm_ball_collaborative_filtering_2000.png} \end{subfigure} \end{tabular} \caption{ Collaborative filtering. }\label{fig:collaborative_filtering} \end{figure} We consider the problem of collaborative filtering. In particular, let $A\in\R^{m\times d}$ be a matrix with only partially observed entries, that is, there exists a subset of indices $\cI\subseteq \{1,\ldots,m\}\times\{1,\ldots,d\}$ such that only the entries $A_{i,j}$ with $(i,j)\in\cI$ are observed. The task is to predict the unobserved entries of $A$. Let $H_\rho$ be the Huber loss with parameter $\rho > 0$ \citep{huber1992robust}: \begin{align*} H_\rho\colon x\in \R \mapsto \begin{cases} \frac{x^2}{2}, & \text{if} \ |x| \leq \rho\\ \rho(|x| - \frac{\rho}{2}), & \text{if} \ |x| > \rho, \end{cases} \end{align*} $\|\cdot\|_{\nuc}\colon X\in\R^{m\times d} \mapsto \trace(\sqrt{X^\intercal X})$ the nuclear norm, and $r>0$ the radius of the nuclear norm ball. Since we assume the solution to be low rank, the approach of \citet{mehta2007robust} leads to the problem formulation \begin{align*} \min_{X\in\R^{m\times d}} & \frac{1}{|\cI|} \sum_{(i,j)\in\cI} H_\rho(A_{i,j} - X_{i,j})\\ \text{subject to} \ & \|X\|_{\nuc}\leq r. \end{align*} We compare FW, PAFW, and MFW, with open-loop step-sizes $\eta_t = \frac{\ell}{t+\ell}$, where $\ell\in\{2,6\}$, on the MovieLens 100k dataset\footnote{Available online at \href{https://grouplens.org/datasets/movielens/100k/}{https://grouplens.org/datasets/movielens/100k/}.} \citep{harper2015movielens} with $m=943$, $d=1682$, and $|\cI| = 10,000$, and we set $\rho = 1$ and $r = 2,000$. We plot the results of the experiments in a log-log plot in Figure~\ref{fig:collaborative_filtering}. All algorithms with any step-size ultimately converge at a rate of order $\cO(1/t^2)$, except for MFW with step-size $\eta_t = \frac{6}{t+6}$, which appears to converge at a rate of order $\cO(1/t^6)$. The latter phenomenon is not currently motivated by results in this paper or \citet{li2021momentum}. Among the different methods, MFW admits the fastest rate of convergence, followed by FW. \section{{Discussion and open questions}}\label{sec:discussion} We investigated settings in which FW with open-loop step-sizes achieves accelerated convergence rates. Specifically, we observed in Figures~\ref{fig:exterior} and~\ref{fig:logistic_regression} that FW with step-size $\eta_t=\frac{\ell}{t+\ell}$, where $\ell\in\N_{\geq 1}$, converges at a rate of order $\cO(1/t^\ell)$ when the feasible region $\cC$ is strongly convex and the norm of the gradient of $f$ is bounded from below by a nonnegative constant. These rates are better than the rates of order $\cO(1/t^{\ell/2})$ derived in Remark~\ref{rem:ol_linear}, which raises the question whether this gap between theory and practice can be closed. Furthermore, it remains to investigate the accelerated rates of order up to $\cO(1/t^\ell)$ when $\cC$ is only uniformly convex instead of strongly convex, see Figures~\ref{fig:exterior_3} and~\ref{fig:exterior_5}. Furthermore, these convergence guarantees of order $\cO(1/t^{\ell/2})$ are significantly better than the convergence guarantees of order up to $\cO(1/t^2)$ of FW variants PAFW \citep{lan2013complexity, kerdreux2021local} and MFW \citep{li2021momentum}, which are designed to perform well in this setting. We thus conducted numerical experiments to investigate whether PAFW and MFW also achieve accelerated rates depending on the choice of open-loop step-size. According to the logistic-regression experiments in Figure~\ref{fig:logistic_regression}, it appears that they do, which raises the question whether the accelerated convergence rates of PAFW and MFW stem from exploitation of momentum, as suggested in the respective works, or are in fact due to the choice of the open-loop step-size. The latter explanation is further supported by the unexplained convergence rate of order $\cO(1/t^6)$ of MFW with step-size $\eta_t = \frac{6}{t+6}$ in the collaborative filtering experiment in Figure~\ref{fig:collaborative_filtering}. Further, we proved that FW with open-loop step-sizes achieves faster convergence rates than FW with line-search or short-step in the setting of the lower bound due to \citet{wolfe1970convergence}, assuming strict complementarity is satisfied. In case strict complementarity or similar assumptions are not satisfied, we proved that DIFW and AFW with open-loop step-sizes always converge at accelerated rates. We also answered the open question in \citet{bach2012equivalence} by demonstrating that FW with open-loop step-size $\eta_t = \frac{1}{t+1}$ achieves accelerated convergence rates in the setting of Section~\ref{sec:kernel_whaba} for the uniform density in Theorem~\ref{thm:answering_bach}. Numerical experiments in Figure~\ref{fig:kernel_herding_non_uniform} indicate that acceleration also holds for non-uniform densities, an observation which is currently not backed by theoretical results. Finally, an important limitation of our study is that the proofs rely on norms, which are affine variant, whereas FW is known to be affine invariant. We plan to address this limitation in future work. \subsubsection*{{Acknowledgements}} This research was partially funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) under Germany´s Excellence Strategy – The Berlin Mathematics Research Center MATH$^+$ (EXC-2046/1, project ID 390685689, BMS Stipend). \bibliography{utils/biblio_infinite_dimensional_spaces.bib} \end{document} \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage{url} \usepackage{booktabs} \usepackage{amsfonts} \usepackage{microtype} \usepackage{bbold} \usepackage{nicefrac} \usepackage{color, colortbl} \usepackage[first=0,last=9]{lcg} \definecolor{LightCyan}{rgb}{0.92,1,1} \usepackage{subcaption} \captionsetup[subfigure]{labelfont=rm} \usepackage{amsmath} \usepackage{amsthm} \usepackage{amssymb} \usepackage{thmtools,thm-restate} \usepackage{thm-restate} \usepackage{paralist} \usepackage{comment} \usepackage{dsfont} \usepackage{bbm} \usepackage{float} \usepackage{balance} \usepackage{stmaryrd} \usepackage{multicol} \usepackage[export]{adjustbox} \usepackage{multirow} \usepackage{tabularx} \usepackage{mathtools} \usepackage{mdframed} \usepackage{enumitem} \setitemize{noitemsep,topsep=0pt,parsep=0pt,partopsep=0pt} \usepackage{chemmacros} \usepackage[ruled, linesnumbered]{algorithm2e} \usepackage{setspace} \usepackage{xcolor} \usepackage{tikz} \usetikzlibrary{fit,calc} \colorlet{pink}{red!40} \colorlet{lightblue}{blue!30} \colorlet{lightgreen}{green!30} \setlength\arraycolsep{2pt} \DontPrintSemicolon \usepackage{jmlr2e} \usepackage[margin=1in]{geometry} \usepackage{natbib} \setcitestyle{authoryear,round,citesep={;},aysep={,},yysep={;}} \renewcommand{\cite}[1]{\citep{#1}} \def\fwt{{S}} \DeclareMathOperator{\Iota}{I} \DeclareMathOperator{\Id}{Id} \def\agd{{\textnormal{\texttt{AGD}}}} \def\agdavi{{\textnormal{\texttt{AGDAVI}}}} \def\cgavi{{\textnormal{\texttt{CGAVI}}}} \def\cg{{\textnormal{\texttt{CG}}}} \def\oavi{{\textnormal{\texttt{OAVI}}}} \def\avi{{\textnormal{\texttt{AVI}}}} \def\abm{{\textnormal{\texttt{ABM}}}} \def\vca{{\textnormal{\texttt{VCA}}}} \def\afw{{\textnormal{\texttt{AFW}}}} \def\pfw{{\textnormal{\texttt{PFW}}}} \def\svd{{\textnormal{\texttt{SVD}}}} \def\svm{{\textnormal{\texttt{SVM}}}} \def\cvxoracle{{\textnormal{\texttt{ORACLE}}}} \def\aa{{\mathbf{a}}} \def\bb{{\mathbf{b}}} \def\cc{{\mathbf{c}}} \def\dd{{\mathbf{d}}} \def\ee{{\mathbf{e}}} \def\ff{{\mathbf{f}}} \def\gg{{\mathbf{g}}} \def\hh{{\mathbf{h}}} \def\rr{{\mathbf{r}}} \def\ss{{\mathbf{s}}} \def\tt{{\mathbf{t}}} \def\uu{{\mathbf{u}}} \def\vv{{\mathbf{v}}} \def\ww{{\mathbf{w}}} \def\xx{{\mathbf{x}}} \def\yy{{\mathbf{y}}} \def\zz{{\mathbf{z}}} \def\aalpha{\boldsymbol{\alpha}} \def\bbeta{\boldsymbol{\beta}} \def\ssig{{\boldsymbol{\sigma}}} \def\AA{{\mathbf{A}}} \def\BB{{\mathbf{B}}} \def\CC{{\mathbf{C}}} \def\DD{{\mathbf{D}}} \def\EE{{\mathbf{E}}} \def\FF{{\mathbf{F}}} \def\GG{{\mathbf{G}}} \def\HH{{\mathbf{H}}} \def\MM{{\mathbf{M}}} \def\RR{{\mathbf{R}}} \def\SS{{\mathbf{S}}} \def\TT{{\mathbf{T}}} \def\UU{{\mathbf{U}}} \def\VV{{\mathbf{V}}} \def\XX{{\mathbf{X}}} \def\YY{{\mathbf{Y}}} \def\ZZ{{\mathbf{Z}}} \newcommand{\B}{\mathbb{B}} \newcommand{\C}{\mathbb{C}} \newcommand{\D}{\mathbb{D}} \newcommand{\E}{\mathbb{E}} \newcommand{\F}{\mathbb{F}} \renewcommand{\H}{\mathbb{H}} \newcommand{\N}{\mathbb{N}} \renewcommand{\P}{\mathbb{P}} \newcommand{\Q}{\mathbb{Q}} \newcommand{\R}{\mathbb{R}} \newcommand{\T}{\mathbb{T}} \newcommand{\TP}{\mathbb{TP}} \newcommand{\V}{\mathbb{V}} \newcommand{\X}{\mathbb{X}} \newcommand{\Z}{\mathbb{Z}} \newcommand\cA{{\ensuremath{\mathcal{A}}}\xspace} \newcommand\cB{{\ensuremath{\mathcal{B}}}\xspace} \newcommand\cC{{\ensuremath{\mathcal{C}}}\xspace} \newcommand\cD{{\ensuremath{\mathcal{D}}}\xspace} \newcommand\cE{{\ensuremath{\mathcal{E}}}\xspace} \newcommand\cF{{\ensuremath{\mathcal{F}}}\xspace} \newcommand\cG{{\ensuremath{\mathcal{G}}}\xspace} \newcommand\cH{{\ensuremath{\mathcal{H}}}\xspace} \newcommand\cI{{\ensuremath{\mathcal{I}}}\xspace} \newcommand\cJ{{\ensuremath{\mathcal{J}}}\xspace} \newcommand\cK{{\ensuremath{\mathcal{K}}}\xspace} \newcommand\cL{{\ensuremath{\mathcal{L}}}\xspace} \newcommand\cM{{\ensuremath{\mathcal{M}}}\xspace} \newcommand\cN{{\ensuremath{\mathcal{N}}}\xspace} \newcommand\cO{{\ensuremath{\mathcal{O}}}\xspace} \newcommand\cP{{\ensuremath{\mathcal{P}}}\xspace} \newcommand\cQ{{\ensuremath{\mathcal{Q}}}\xspace} \newcommand\cR{{\ensuremath{\mathcal{R}}}\xspace} \newcommand\cS{{\ensuremath{\mathcal{S}}}\xspace} \newcommand\cT{{\ensuremath{\mathcal{T}}}\xspace} \newcommand\cU{{\ensuremath{\mathcal{U}}}\xspace} \newcommand\cV{{\ensuremath{\mathcal{V}}}\xspace} \newcommand\cW{{\ensuremath{\mathcal{W}}}\xspace} \newcommand\cX{{\ensuremath{\mathcal{X}}}\xspace} \newcommand\cY{{\ensuremath{\mathcal{Y}}}\xspace} \newcommand\cZ{{\ensuremath{\mathcal{Z}}}\xspace} \newcommand{\eps}{\varepsilon} \newcommand{\sig}{\sigma} \newcommand{\Sig}{\Sigma} \DeclareMathOperator{\conv}{conv} \DeclareMathOperator{\vertices}{vert} \DeclareMathOperator{\aff}{aff} \DeclareMathOperator{\supp}{supp} \DeclareMathOperator{\mathspan}{span} \DeclareMathOperator{\rank}{rank} \DeclareMathOperator{\trace}{tr} \DeclareMathOperator{\nuc}{nuc} \DeclareMathOperator{\Ker}{Ker} \DeclareMathOperator{\apker}{apker} \DeclareMathOperator{\Mat}{Mat} \DeclareMathOperator{\diag}{diag} \DeclareMathOperator{\eval}{eval} \DeclareMathOperator{\mse}{MSE} \DeclareMathOperator{\rmse}{rmse} \DeclareMathOperator{\acc}{acc} \DeclareMathOperator{\aponb}{APONB} \DeclareMathOperator{\srref}{SRREF} \DeclareMathOperator{\lmo}{LMO} \DeclareMathOperator{\argmin}{argmin} \DeclareMathOperator{\argmax}{argmax} \DeclareMathOperator{\evaluation}{EV} \DeclareMathOperator{\spar}{SPAR} \DeclareMathOperator{\lt}{LT} \DeclareMathOperator{\ltc}{LTC} \newcommand{\zeros}{\ensuremath{\mathbf{0}}} \newcommand{\ones}{\mathbf{1}} \newcommand{\zeroterm}{\ensuremath{\mathbb{0}}} \newcommand{\oneterm}{\ensuremath{\mathbb{1}}} \newcommand{\true}{\operatorname{true}} \newcommand{\false}{\operatorname{false}} \newcommand{\cb}[1]{\{ #1 \}} \newcommand{\scb}[1]{\left\{ #1 \right\}} \newcommand{\rb}[1]{( #1 )} \newcommand{\srb}[1]{\left( #1 \right)} \renewcommand{\sb}[1]{[#1 ]} \newcommand{\ssb}[1]{\left[ #1 \right]} \newcommand{\innp}[1]{\langle #1 \rangle} \newcommand{\sinnp}[1]{\left\langle #1 \right\rangle} \newcommand{\bdot}[1]{\mathbf{\dot{ #1 }}} \newcommand{\norm}[1]{\| #1 \|} \newcommand{\snorm}[1]{\left\| #1 \right\|} \newcommand{\card}[1]{| #1 |} \newcommand{\scard}[1]{\left| #1 \right|} \newcommand{\abs}[1]{\lvert #1 \rvert} \newcommand{\sabs}[1]{\left\lvert #1 \right\rvert} \newcommand{\OPT}{\operatorname{OPT}} \newcommand{\etal}{\textit{et al}.} \newcommand{\defeq}{\stackrel{\mathrm{\scriptscriptstyle def}}{=}} \newcommand*{\sepfbox}[1]{ \begingroup \sbox0{\fbox{#1}} \setlength{\fboxrule}{0pt} \fbox{\unhbox0} \endgroup } \newcommand*{\vsepfbox}[1]{ \begingroup \sbox0{\fbox{#1}} \setlength{\fboxrule}{0pt} \mbox{\kern-\fboxsep\fbox{\unhbox0}\kern-\fboxsep} \endgroup } \newcommand*{\vertbar}{\rule[-1ex]{0.5pt}{2.5ex}} \newcommand*{\horzbar}{\rule[.5ex]{2.5ex}{0.5pt}} \newenvironment{nscenter} {\parskip=0pt\par\nopagebreak\centering} {\par\noindent\ignorespacesafterend} \theoremstyle{plain} \numberwithin{equation}{section} \newtheorem{theorem}{Theorem}[section] \numberwithin{theorem}{section} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{conjecture}[theorem]{Conjecture} \newtheorem{claim}[theorem]{Claim} \newtheorem{fact}[theorem]{Fact} \newtheorem{problem}[theorem]{Problem} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{finalremark}[theorem]{Final Remark} \newtheorem{remark}[theorem]{Remark} \newtheorem{example}[theorem]{Example} \newtheorem{observation}[theorem]{Observation} \newtheorem{maintheorem}[theorem]{Main Theorem} \theoremstyle{plain} \newtheorem{assumption}{Assumption} \let\origtheassumption\theassumption \graphicspath{{imgs/}} \makeatletter \def\mathcolor#1#{\@mathcolor{#1}} \def\@mathcolor#1#2#3{ \protect\leavevmode \begingroup \color#1{#2}#3 \endgroup } \makeatother \setlength{\parskip}{0.75em} \newcommand{\tk}{{\color{red}{\bf tk}}} \newenvironment{myfont}{\fontfamily{<familyname>}\selectfont}{\par} \newcommand{\hrulealg}[0]{\vspace{1mm} \hrule \vspace{1mm}} \endinput
2205.12786v3
http://arxiv.org/abs/2205.12786v3
Multi-sum Rogers-Ramanujan Type Identities
\documentclass[12pt,reqno]{amsart} \usepackage{amsmath,amssymb,extarrows} \usepackage{url} \usepackage{tikz,enumerate} \usepackage{diagbox} \usepackage{appendix} \usepackage{epic} \usepackage{float} \vfuzz2pt \usepackage{cite} \usepackage{hyperref} \usepackage{array} \usepackage{booktabs} \setlength{\topmargin}{-3mm} \setlength{\oddsidemargin}{0.2in} \setlength{\evensidemargin}{0.2in} \setlength{\textwidth}{5.9in} \setlength{\textheight}{8.9in} \allowdisplaybreaks[4] \newtheorem{theorem}{Theorem} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{conj}[theorem]{Conjecture} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{prop}[theorem]{Proposition} \theoremstyle{definition} \newtheorem{defn}{Definition} \theoremstyle{remark} \newtheorem{rem}{Remark} \numberwithin{equation}{section} \numberwithin{theorem}{section} \numberwithin{defn}{section} \DeclareMathOperator{\spt}{spt} \DeclareMathOperator{\RE}{Re} \DeclareMathOperator{\IM}{Im} \DeclareMathOperator{\sg}{sg} \newcommand{\eps}{\varepsilon} \newcommand{\To}{\longrightarrow} \newcommand{\h}{\mathcal{H}} \newcommand{\s}{\mathcal{S}} \newcommand{\A}{\mathcal{A}} \newcommand{\J}{\mathcal{J}} \newcommand{\M}{\mathcal{M}} \newcommand{\W}{\mathcal{W}} \newcommand{\X}{\mathcal{X}} \newcommand{\BOP}{\mathbf{B}} \newcommand{\BH}{\mathbf{B}(\mathcal{H})} \newcommand{\KH}{\mathcal{K}(\mathcal{H})} \newcommand{\Real}{\mathbb{R}} \newcommand{\Complex}{\mathbb{C}} \newcommand{\Field}{\mathbb{F}} \newcommand{\RPlus}{\Real^{+}} \newcommand{\Polar}{\mathcal{P}_{\s}} \newcommand{\Poly}{\mathcal{P}(E)} \newcommand{\EssD}{\mathcal{D}} \newcommand{\Lom}{\mathcal{L}} \newcommand{\States}{\mathcal{T}} \newcommand{\abs}[1]{\left\vert#1\right\vert} \newcommand{\set}[1]{\left\{#1\right\}} \newcommand{\seq}[1]{\left<#1\right>} \newcommand{\norm}[1]{\left\Vert#1\right\Vert} \newcommand{\essnorm}[1]{\norm{#1}_{\ess}} \newcommand{\sgn}{\mathrm{sgn}} \newcommand*\diff{\mathop{}\!\mathrm{d}} \newcommand*\Diff[1]{\mathop{}\!\mathrm{d^#1}} \begin{document} \title[Multi-sum Rogers-Ramanujan type identities] {Multi-sum Rogers-Ramanujan type identities} \author{Zhineng Cao and Liuquan Wang} \address{School of Mathematics and Statistics, Wuhan University, Wuhan 430072, Hubei, People's Republic of China} \email{[email protected]} \address{School of Mathematics and Statistics, Wuhan University, Wuhan 430072, Hubei, People's Republic of China} \email{[email protected];[email protected]} \subjclass[2010]{11P84, 33D15, 33D60} \keywords{Rogers-Ramanujan type identities; sum-product identities; Kanade-Russell identities; partitions; integral method} \begin{abstract} We use an integral method to establish a number of Rogers-Ramanujan type identities involving double and triple sums. The key step for proving such identities is to find some infinite products whose integrals over suitable contours are still infinite products. The method used here is motivated by Rosengren's proof of the Kanade-Russell identities. \end{abstract} \maketitle \section{Introduction}\label{sec-intro} The famous Rogers-Ramanujan identities assert that \begin{align}\label{RR} \sum_{n=0}^\infty \frac{q^{n^2}}{(q;q)_n}=\frac{1}{(q,q^4;q^5)_\infty}, \quad \sum_{n=0}^\infty \frac{q^{n(n+1)}}{(q;q)_n}=\frac{1}{(q^2,q^3;q^5)_\infty}. \end{align} Here and throughout this paper, we assume that $|q|<1$ for convergence and use the standard $q$-series notation \begin{align} (a;q)_0:=1, \quad (a;q)_n:=\prod\limits_{k=0}^{n-1}(1-aq^k), \quad (a;q)_\infty :=\prod\limits_{k=0}^\infty (1-aq^k), \\ (a_1,\cdots,a_m;q)_n:=(a_1;q)_n\cdots (a_m;q)_n, \quad n\in \mathbb{N}\cup \{\infty\}. \end{align} These two sum-product identities have fascinating combinatorial interpretations, and they stimulate a number of researches on finding similar identities. One of the famous work on this direction is Slater's list \cite{Slater}, which contains 130 of such identities such as \begin{align} \sum_{n=0}^\infty \frac{q^{2n^2}}{(q;q)_{2n}}&=\frac{1}{(q^2,q^3,q^4,q^5,q^{11},q^{12},q^{13},q^{14};q^{16})_\infty}, \\ \sum_{n=0}^\infty \frac{q^{2n(n+1)}}{(q;q)_{2n+1}}&= \frac{1}{(q,q^4,q^6,q^7,q^9,q^{10},q^{12},q^{15};q^{16})_\infty}. \end{align} Identities similar to \eqref{RR} are called as Rogers-Ramanujan type identities. It is natural to consider multi-sum Rogers-Ramanujan type identities. For example, the Andrews-Gordon identity (see \cite{Andrews1974,Gordon1961}), which is a generalization of \eqref{RR}, states that for positive integer $k>1$ and $1\leq i \leq k$, \begin{align} &\sum_{n_{k-1}\geq n_{k-2}\geq \cdots \geq n_1\geq 0} \frac{q^{n_1^2+n_2^2+\cdots+n_{k-1}^2+n_i+n_{i+1}+\cdots +n_{k-1}}}{(q;q)_{n_{k-1}-n_{k-2}}(q;q)_{n_{k-2}-n_{k-3}}\cdots (q;q)_{n_2-n_1} (q;q)_{n_1}} \nonumber \\ &=\frac{(q^i,q^{2k+1-i},q^{2k+1};q^{2k+1})_\infty}{(q;q)_\infty}. \label{AG} \end{align} Bressoud \cite{Bressoud1980} provided an even modulus analog of this identity. In a series of works (see e.g. \cite{Lepowsky-Wilson,Lepowsky-Wilson-1985}), Lepowsky and Wilson developed Lie theoretic approach to establish Rogers-Ramanujan type identities. In particular, they showed that the Rogers-Ramanujan identities, the Andrews-Gordon identity and Bressoud's identity are closely related to the affine Kac-Moody Lie algebra $A_1^{(1)}$. This motivates people to find similar identities by studying other Lie algebras. See the books \cite{Lost2,Sills-book} for more historical background. In recent years, Kanade and Russell \cite{KR-2019} searched for Rogers-Ramanujan type identities related to level 2 characters of the affine Lie algebra $A_9^{(2)}$, and they conjectured a number of such identities. Let \begin{align} F(u,v,w)&:=\sum_{i,j,k\geq 0} \frac{(-1)^kq^{3k(k-1)+(i+2j+3k)(i+2j+3k-1)}u^iv^jw^k}{(q;q)_i(q^4;q^4)_j(q^6;q^6)_k}, \\ G(u,v,w)&:=\sum_{i,j,k\geq 0}\frac{q^{(i+2j+3k)(i+2j+3k-1)/2+j^2}u^iv^jw^k}{(q;q)_i(q^2;q^2)_j(q^3;q^3)_k}. \end{align} Some of their conjectural identities are \begin{align} F(q,1,q^3)&=\frac{(q^3;q^{12})_\infty}{(q,q^2;q^4)_\infty}, \label{KR-conj-1} \\ F(q,q,q^6)&=\frac{1}{(q^3;q^4)_\infty (q,q^8;q^{12})_\infty}, \label{KR-conj-2} \\ G(q,q^2,q^4)&=\frac{1}{(q;q^3)_\infty (q^3,q^6,q^{11};q^{12})_\infty}, \label{KR-conj-3} \\ G(q^2,q^4,q^5)&=\frac{1}{(q^2;q^3)_\infty (q^3,q^6,q^7;q^{12})_\infty}. \label{KR-conj-4} \end{align} Five of their conjectural identities on $F(u,v,w)$ as well as the identities \eqref{KR-conj-3} and \eqref{KR-conj-4} on $G(u,v,w)$ were confirmed by Bringmann, Jennings-Shaffer and Mahlburg \cite{BSM}. Later, using an integral method, Rosengren \cite{Rosengren} gave proofs to all of the nine conjectural identities on $F(u,v,w)$. Since there are numerous Rogers-Ramanujan type identities in the literature and some of them have similar shapes, it is more convenient to group some of them together. Following the notion in \cite{Wang}, for a fixed $k$, we shall call an identity of the following shape: finite sum of \begin{align}\label{type-defn} \sum_{(i_1,\cdots,i_k)\in S}\frac{(-1)^{t(i_1,\cdots,i_k)}q^{Q(i_1,\cdots,i_k)}}{(q^{n_1};q^{n_1})_{i_1}\cdots (q^{n_k};q^{n_k})_{i_k}}= \prod\limits_{ (a,n)\in P} (q^{a};q^n)_\infty^{r(a,n)} \end{align} as a Rogers-Ramanujan type identity of {\it index} $(n_1,n_2,\cdots,n_k)$. Here $t(i_1,\cdots,i_k)$ is an integer-valued function, $Q(i_1,\cdots,i_k)$ is a rational polynomial in variables $i_1,\cdots,i_k$, $n_1,\cdots, n_k$ are positive integers with $\gcd(n_1,n_2,\cdots,n_k)=1$, $S$ is a subset of $\mathbb{Z}^k$, $P$ is a finite subset of $\mathbb{Q}^2$ and $r(a,n)$ are integer-valued functions. With this notion, we see that the identities \eqref{KR-conj-1} and \eqref{KR-conj-2} are of index $(1,4,6)$ while \eqref{KR-conj-3} and \eqref{KR-conj-4} are of index $(1,2,3)$. There are some other identities similar to \eqref{KR-conj-1}--\eqref{KR-conj-4} in the literature. First, we can find some identities involving double sums of index $(1,2)$, $(1,3)$ and $(1,4)$. For instance, analytical forms of two conjectural partition identities of Capparelli \cite{Capparelli} were given in the work of Kanade and Russell \cite{KR-2019} as well as the work of Kur\c{s}ung\"{o}z \cite{Kursungoz}. These two identities are all of index $(1,3)$ and one of them is \begin{align}\label{Capparelli-eq} \sum_{i,j\geq 0}\frac{q^{2i^2+6ij+6j^2}}{(q;q)_i(q^3;q^3)_j}&=\frac{1}{(q^2,q^3,q^9,q^{10};q^{12})_\infty}. \end{align} Kur\c{s}ung\"{o}z \cite{Kursungoz} also found four identities of index $(1,4)$. Five conjectural identities of index $(1,3)$ were presented in \cite[Conjecture 6.1]{Kursungoz-AnnComb} such as \begin{align} \sum_{i,j\geq 0}\frac{q^{i^2+3j^2+3ij}}{(q;q)_i(q^3;q^3)_j}=\frac{1}{(q,q^3,q^6,q^8;q^9)_\infty}. \label{K-conj-1} \end{align} They are based on the work of Kanade and Russell \cite{KR-2015} and so far remain open. Andrews \cite{Andrews2019} and Takigiku and Tsuchioka \cite{Takigiku-2019} provided some identities of index $(1,2)$, which can be proved by summing over one of the index first and then summing over the second index. Uncu and Zudilin \cite{Uncu-Zudilin} presented two identities of index $(1,2)$ and mentioned that they can be explained as instances of Bressoud's identities \cite{Bressoud1979}. Berkovich and Uncu \cite{Berkovich} proved an identity of index $(1,3)$. In 2021, Andrews and Uncu \cite{Andrews-Uncu} proved an identity of index $(1,3)$ and further conjectured that \cite[Conjecture 1.2]{Andrews-Uncu} \begin{align}\label{AU-conj} \sum_{i,j\geq 0}\frac{(-1)^jq^{3j(3j+1)/2+i^2+3ij+i+j}}{(q;q)_i(q^3;q^3)_j}=\frac{1}{(q^2,q^3;q^6)_\infty}. \end{align} This was first proved by Chern \cite{Chern} and then by Wang \cite{Wang}. Through the integral method, Wang \cite{Wang} also provided new proofs to some other double sum Rogers-Ramanujan type identities of indexes $(1,2)$, $(1,3)$ and $(1,4)$. As for identities involving triple sums or quadruple sums, besides the Kanade-Russell identities of indexes $(1,2,3)$ and $(1,4,6)$ such as \eqref{KR-conj-1}--\eqref{KR-conj-4}, there are other known identities of indexes $(1,1,6)$, $(1,2,2)$, $(1,2,3)$, $(1,1,1,2)$, $(1,2,2,4)$ and $(1,2,3,4)$. For example, Rosengren \cite[Eq.\ (5.3a)]{Rosengren} proved an identity of index $(1,1,6)$. Kanade and Russell \cite{KR-2019} presented four conjectural identities of index $(1,2,3,4)$. Takigiku and Tsuchioka \cite{Takigiku} proved some identities of indexes $(1,2,2)$ and $(1,2,2,4)$, which are related to the principal characters of the level 5 and level 7 standard modules of the affine Lie algebra $A_2^{(2)}$. For example, they proved that \cite[Theorem 1.3]{Takigiku} \begin{align} &\sum_{i,j,k\geq 0}\frac{q^{\binom{i}{2}+8\binom{j}{2}+10\binom{k}{2}+2ij+2ik+8jk+i+4j+5k}}{(q;q)_i(q^2;q^2)_j(q^2;q^2)_k} \nonumber \\ &=\frac{1}{(q,q^3,q^4,q^5,q^7,q^9,q^{11},q^{13},q^{15},q^{16},q^{17},q^{19};q^{20})_\infty}. \end{align} Recently, Mc Laughlin \cite{Laughlin} applied Rosengren's method in \cite{Rosengren} to derive some new Rogers-Ramanujan type identities including the following one of index $(1,2,3)$ \begin{align}\label{Laughlin123} \sum_{i,j,k\geq 0} \frac{(-1)^j q^{(3k+2j-i)(3k+2j-i-1)/2+j(j-1)-i+6j+6k}}{(q;q)_i(q^2;q^2)_j(q^3;q^3)_k}=\frac{(-1;q)_\infty (q^{18};q^{18})_\infty}{(q^3;q^3)_\infty (q^9;q^{18})_\infty}. \end{align} Note that in \cite{Laughlin}, such identities are called as identities of Kanade-Russell type. In the way of finding generalizations of Capparelli's first partition identity, Dousse and Lovejoy \cite[Eqs.\ (2.6),(2.7)]{Dousse-Lovejoy} proved the following identity of index $(1,1,1,2)$: \begin{align}\label{DL1112} \sum_{i,j,k,l\geq 0} \frac{a^{i+l}b^{j+l}q^{\binom{i+j+k+2l+1}{2}+\binom{i+1}{2}+\binom{j+1}{2}+l}}{(q;q)_i(q;q)_j(q;q)_k(q^2;q^2)_l}=(-q;q)_\infty (-aq^2,-bq^2;q^2)_\infty. \end{align} Motivated by the above works, in this paper, we will use the integral method to establish some Rogers-Ramanujan type identities of the following indexes $$(1,1),(1,2), (1,1,1), (1,1,2), (1,1,3), (1,2,2), (1,2,3), (1,2,4).$$ Most of our results are new. Some of them contain additional parameters and thus indicate infinite families of Rogers-Ramanujan type identities. For instance, we prove that (see Theorems \ref{thm-11-general} and \ref{thm-R-3}) \begin{align} \sum_{i,j\geq 0} \frac{u^{i-j}q^{\binom{i}{2}+\binom{j+1}{2}+a\binom{j-i}{2}}}{(q;q)_i(q;q)_j}&=\frac{(-uq^a,-q/u,q^{a+1};q^{a+1})_\infty}{(q;q)_\infty}, \label{intro-eq-J-3}\\ \sum_{i,j,k\geq0}\frac{(-1)^{i+j}b^{-i+j}c^{i-j+k}q^{(i^{2}+(i-j+2k)^{2}-2i+3j-2k)/2}}{(q;q)_{i}(q;q)_{j}(q^{2};q^{2})_{k}}&=\frac{(-q,bq^{2}/c;q)_{\infty}(bq,c/b;q^{2})_{\infty}} {(b^{2}q^{2}/c;q^{2})_{\infty}}. \end{align} Some of the identities we discovered are quite surprising. For example, we find that for any $u\in \mathbb{C}$ (see Theorems \ref{thm-4112-3} and \ref{thm-123}), \begin{align}\label{intro-eq-4112-3} \sum_{i,j,k\geq0}\frac{(-1)^{i+j}u^{i+3k}q^{(i^{2}-i)/2+(i-2j+3k)^{2}/4}}{(q;q)_{i}(q^{2};q^{2})_{j}(q^{3};q^{3})_{k}}&=\frac{(u^{2};q)_{\infty}(q,-u^{2};q^{2})_{\infty}}{(-u^{6};q^{6})_{\infty}}, \\ \sum_{i,j,k\geq 0}\frac{(-1)^{(i-2j+3k)/2}u^{i+k}q^{(i^{2}-i)/2+(i-2j+3k)^{2}/4}} {(q;q)_{i}(q^{2};q^{2})_{j}(q^{3};q^{3})_{k}} &=\frac{(q;q^{2})_{\infty}(-u^{2};q^{3})_{\infty}} {(u^{2};q^{6})_{\infty}}. \end{align} A rough look at these identities will let us doubt their correctness. From the expression of each identity, it is expected that the left side will be a power series in $q^{1/4}$. But it turns out that it is a power series in $q$, as the right side indicates. The rest of this paper is organized as follows. In Section \ref{sec-pre} we collect some useful $q$-series formulas which will be used to derive our identities. In Sections \ref{sec-double} and \ref{sec-triple} we present and prove identities involving double sums and triple sums, respectively. Finally, we give some concluding remarks in Section \ref{sec-concluding} including a new proof of \eqref{DL1112} via the integral method. \section{Preliminaries}\label{sec-pre} Throughout this paper we will denote $\zeta_n=e^{2\pi i/n}$. First, we need Euler's $q$-exponential identities \begin{align}\label{Euler} \sum_{n=0}^\infty \frac{z^n}{(q;q)_n}=\frac{1}{(z;q)_\infty}, \quad \sum_{n=0}^\infty \frac{q^{\binom{n}{2}} z^n}{(q;q)_n}=(-z;q)_\infty, \quad |z|<1. \end{align} These two identities are corollaries of the $q$-binomial theorem \begin{align}\label{q-binomial} \sum_{n=0}^\infty \frac{(a;q)_n}{(q;q)_n}z^n=\frac{(az;q)_\infty}{(z;q)_\infty}, \quad |z|<1. \end{align} We also need the Jacobi triple product identity \begin{align}\label{Jacobi} (q,z,q/z;q)_\infty=\sum_{n=-\infty}^\infty (-1)^nq^{\binom{n}{2}}z^n. \end{align} We recall the basic hypergeometric series $${}_r\phi_s\bigg(\genfrac{}{}{0pt}{} {a_1,\dots,a_r}{b_1,\dots,b_s};q,z \bigg):=\sum_{n=0}^\infty \frac{(a_1,\dots,a_r;q)_n}{(q,b_1,\dots,b_s;q)_n}\Big((-1)^nq^{\binom{n}{2}} \Big)^{1+s-r}z^n.$$ For a series $f(z)=\sum_{n=-\infty}^\infty a(n)z^n$, we shall use $[z^n]f(z)$ to denote the coefficient of $z^n$. That is, $[z^n]f(z)=a(n)$. We recall the following simple fact \begin{align}\label{int-constant} \oint_K f(z) \frac{dz}{2\pi iz}=[z^0]f(z), \end{align} where $K$ is a positively oriented and simple closed contour around the origin. This fact will be used frequently but usually without mention. There are two steps in using the integral method to prove Rogers-Ramanujan type identities: \begin{itemize} \item \textbf{Step 1.} Express the sum side as a finite sum of integrals of some infinite products. \item \textbf{Step 2.} Evaluate each of these integrals. \end{itemize} The first step is quite straightforward. In the proofs of all the Rogers-Ramanujan type identities appeared in \cite{Rosengren}, \cite{Wang} and this paper, this step will be done by the use of \eqref{Euler} and \eqref{Jacobi}. The main difficulty lies in the second step. In the book \cite[Sections 4.9 and 4.10]{GR-book}, calculations of the integral $$\oint_K \frac{(a_1z,\cdots,a_Az,b_1/z,\cdots,b_B/z;q)_\infty}{(c_1z,\cdots,c_Cz,d_1/z,\cdots,d_D/z;q)_\infty}z^{m}\frac{dz}{2\pi iz} $$ are given. Here $m$ is an integer, $K$ is a deformation of the (positively oriented) unit circle so that the poles of $1/(c_1z,\cdots,c_Cz;q)_\infty$ lie outside the contour and the origin and poles of $1/(d_1/z,\cdots,d_D/z;q)_\infty$ lie inside the contour. Throughout this paper, all the integral paths will be chosen in this way and we will omit them from the integral symbol. We will not need these general calculations. Instead, we recall some known formulas which will suffice to establish our multi-sum Rogers-Ramanujan type identities. First, from \cite[Eq.\ (4.10.8)]{GR-book} we find that when $|a_1a_2a_3|<|c_1c_2c_3|$, \begin{align}\label{GR41010} &\oint \frac{(a_{1}z,a_{2}z,a_{3}z,b_{1}/z;q)_{\infty}} {(c_{1}z,c_{2}z,c_{3}z,d_{1}/z;q)_{\infty}}\frac{dz}{2\pi iz} \\ & = \frac{(a_{1}d_{1},a_{2}d_{1},a_{3}d_{1},b_{1}/d_{1};q)_{\infty}} {(q,c_{1}d_{1},c_{2}d_{1},c_{3}d_{1};q)_{\infty}} \times{}_4\phi _3\left( \begin{gathered} c_{1}d_{1},c_{2}d_{1},c_{3}d_{1},qd_{1}/b_{1}\\ a_{1}d_{1},a_{2}d_{1},a_{3}d_{1} \end{gathered} ;q,b_{1}/d_{1} \right). \nonumber \end{align} From \cite[Eq.\ (4.11.2), (4.11.3)]{GR-book} we find \begin{align} \oint \frac{(cz/\beta,qz/c\alpha,c\alpha/z,q\beta/cz;q)_{\infty}}{(az,bz,\alpha/z,\beta/z;q)_{\infty}}\frac{dz}{2\pi iz} =\frac{(ab\alpha\beta,c,q/c,c\alpha/\beta,q\beta/c\alpha;q)_{\infty}}{(a\alpha,a\beta,b\alpha,b\beta,q;q)_{\infty}}, \label{GR4112} \end{align} \begin{align} &\oint \frac{(\delta z,qz/\gamma,\gamma/z,\gamma z/\alpha\beta,q\alpha\beta/\gamma z;q)_{\infty}} {(az,bz,cz,\alpha/z,\beta/z;q)_{\infty}}\frac{dz}{2\pi iz} \nonumber \\ &= \frac{(\gamma /\alpha,q\alpha/\gamma ,\gamma/\beta,q\beta/\gamma,\delta/a,\delta/b,\delta/c;q)_{\infty}} {(a\alpha,a\beta,b\alpha,b\beta,c\alpha,c\beta,q;q)_{\infty}}, \label{GR4113} \end{align} where $\delta=abc\alpha\beta$, $abc\alpha\beta\gamma\neq 0$ and $$a\alpha,a\beta,b\alpha,b\beta,c\alpha,c\beta \neq q^{-n}, \quad n=0,1,2,\dots.$$ Clearly, \eqref{GR4112} follows from \eqref{GR4113} after letting $c\rightarrow 0$. Next, we recall some identities in Rosengren's work \cite{Rosengren}. From \cite[Eq.\ (3.2)]{Rosengren} we know that when $\alpha_1\alpha_2=\beta_1\beta_2\beta_3$, \begin{align}\label{R32} \oint \frac{(\alpha_1z,\alpha_2z,qz,1/z;q)_\infty}{(\beta_1z,\beta_2z,\beta_3z;q)_\infty}\frac{\diff z}{2\pi iz}=\frac{(\beta_1,\alpha_1/\beta_1;q)_\infty}{(q;q)_\infty}{}_2\phi_1\bigg(\genfrac{}{}{0pt}{}{\alpha_2/\beta_2,\alpha_2/\beta_3}{\beta_1};q,\frac{\alpha_1}{\beta_1}\bigg). \end{align} From the proof of \cite[Proposition\ 3.2]{Rosengren}, we conclude that \begin{align}\label{Prop32-proof} \oint \frac{(abz,cz,qz/t,t/z;q)_{\infty}}{(az,bz,cz/t,d/z;q)_{\infty}}\frac{dz}{2\pi iz}=\frac{(abd,dq/t,t,c;q)_{\infty}}{(q,ad,bd,cd/t;q)_{\infty}} {}_3\phi _2\left( \begin{gathered} a,b,cd/t\\ c,abd \end{gathered} ;q,t \right). \end{align} Using the above formulas in Step 2, we can convert the sum-side of our Rogers-Ramanujan type identities to a ${}_r\phi_s$ series. Then to complete Step 2, it remains to evaluate this ${}_r\phi_s$ series. Here we recall the $q$-Gauss summation formula \cite[(\uppercase\expandafter{\romannumeral2}. 8)]{GR-book} \begin{align}\label{q-Gauss} {}_2\phi_1\bigg(\genfrac{}{}{0pt}{}{a,b}{c};q,\frac{c}{ab} \bigg)=\frac{(c/a,c/b;q)_\infty}{(c,c/ab;q)_\infty}, \end{align} the Bailey-Daum summation formula \cite[(\uppercase\expandafter{\romannumeral2}. 9)]{GR-book} \begin{align}\label{BD} {}_2\phi_1\bigg(\genfrac{}{}{0pt}{} {a,b}{aq/b};q,-\frac{q}{b} \bigg)=\frac{(-q;q)_\infty (aq,aq^2/b^2;q^2)_\infty}{(aq/b,-q/b;q)_\infty} \end{align} and the $q$-Dixon summation formula \cite[(\uppercase\expandafter{\romannumeral2}.13)]{GR-book} \begin{align}\label{II13} {}_4\phi _3\left( \begin{gathered} a,-qa^{1/2},b,c\\ -a^{1/2},aq/b,aq/c \end{gathered} ;q, \frac{qa^{1/2}}{bc} \right) =\frac{(aq,qa^{1/2}/b,qa^{1/2}/c,aq/bc;q)_{\infty}} {(aq/b,aq/c,qa^{1/2},qa^{1/2}/bc;q)_{\infty}}. \end{align} \section{Identities involving double sums}\label{sec-double} In this section, we present some identities involving double sums of indexes $(1,1)$ and $(1,2)$. \subsection{Identities of index $(1,1)$} \begin{theorem}\label{thm-R-1} We have \begin{align} \sum_{i,j\geq0}\frac{(-1)^{i+j}u^{i}v^{j}q^{((i-j)^{2}-i-j)/2}}{(q;q)_{i}(q;q)_{j}}= \frac{(u,v;q)_{\infty}}{(uv/q;q)_{\infty}}. \label{eq-R-1} \end{align} \end{theorem} Note that the identity \eqref{eq-R-1} is symmetric in $u$ and $v$. \begin{proof} Setting $a=c=0$ in \eqref{Prop32-proof}, we deduce that \begin{align} (q;q)_{\infty}\oint \frac{(qz/t,t/z;q)_{\infty}}{(bz,d/z;q)_{\infty}}\frac{dz}{2\pi iz} =\frac{(dq/t,t;q)_{\infty}}{(bd;q)_{\infty}} \sum_{n\geq0}\frac{(b;q)_{n}}{(q;q)_{n}}t^{n} =\frac{(dq/t,bt;q)_{\infty}} {(bd;q)_{\infty}}, \end{align} where for the last equality we used \eqref{q-binomial}. Now by \eqref{Euler} and \eqref{Jacobi}, \[ \begin{split} LHS&=\oint \sum_{i,j\geq0}\sum_{k= -\infty}^{\infty}\frac{(bz)^{i} (d/z)^{j} (-t/z)^{k} q^{(k^{2}-k)/2}}{(q;q)_{i}(q;q)_{j}} \frac{dz}{2\pi iz}\\ &=\sum_{i,j\geq0}\frac{(-1)^{i+j}b^{i}d^{j}t^{i-j}q^{((i-j)^{2}-i+j)/2}}{(q;q)_{i}(q;q)_{j}}. \end{split} \] Here we used \eqref{int-constant} for the second equality. This proves the desired identity after replacing $bt$ by $u$, and $dq/t$ by $v$. \end{proof} We can also prove Theorem \ref{thm-R-1} by the following way. \begin{proof}[Second proof of Theorem \ref{thm-R-1}] Summing over $i$ first using \eqref{Euler} and then applying \eqref{q-binomial}, we have \begin{align*} &\sum_{i,j\geq0}\frac{(-1)^{i+j}u^{i}v^{j}q^{((i-j)^{2}-i-j)/2}}{(q;q)_{i}(q;q)_{j}}=\sum_{j\geq 0} \frac{(-v)^{j}q^{(j^2-j)/2}}{(q;q)_j} \sum_{i\geq 0}\frac{(-uq^{-j})^{i}q^{(i^2-i)/2}}{(q;q)_i} \nonumber \\ &=\sum_{j\geq 0} \frac{(uq^{-j};q)_\infty (-v)^jq^{(j^2-j)/2}}{(q;q)_j} =(u;q)_\infty \sum_{j\geq 0}\frac{(uv/q)^{j}(q/u;q)_j}{(q;q)_j} \nonumber \\ &=\frac{(u,v;q)_\infty }{(uv/q;q)_\infty}. \qedhere \end{align*} \end{proof} Setting $u=-q$, $v=-q^{1/2}$ and $u=-q$, $v=-q$ in Theorem \ref{thm-R-1}, we obtain \begin{align} \sum_{i,j\geq 0}\frac{q^{((i-j)^{2}+i)/2}}{(q;q)_{i}(q;q)_{j}}&=\frac{1}{(q^{1/2};q)_{\infty}^{2}}, \label{eq-thm3.1-cor-1} \\ \sum_{i,j\geq 0}\frac{q^{((i-j)^{2}+i+j)/2}}{(q;q)_{i}(q;q)_{j}}&=\frac{(q^{2};q^{2})_{\infty}^{2}}{(q;q)_{\infty}^{3}}.\label{eq-thm3.1-cor-1.1} \end{align} \begin{theorem}\label{thm-4112-2} We have \begin{equation}\label{eq-4112-2} \sum_{i,j\geq0}\frac{(-1)^{i+j}u^{i}q^{(i-j)^{2}}}{(q^{2};q^{2})_{i}(q^{2};q^{2})_{j}} =\frac{(u;q)_{\infty}(q;q^{2})_{\infty}}{(u;q^{2})_{\infty}^{2}}. \end{equation} \end{theorem} \begin{proof} Setting $c=q^{1/2}$, $a=-b$ and $\alpha=-\beta$ in \eqref{GR4112}, then multiplying both sides by $(q^{2};q^{2})_{\infty}$, we obtain by \eqref{Euler} and \eqref{Jacobi} that the left side of \eqref{GR4112} becomes \begin{align*} LHS&=(q^{2};q^{2})_{\infty}\oint \frac{(qz^{2}/\alpha^{2},q\alpha^{2}/z^{2};q^{2})_{\infty}} {(a^{2}z^{2},\alpha^{2}/z^{2};q^{2})_{\infty}}\frac{dz}{2\pi iz}\\ &=\oint \sum_{i,j\geq0}\sum_{k= -\infty}^{\infty}\frac{(a^{2}z^{2})^{i} (\alpha^{2}/z^{2})^{j} (-q\alpha^{2}/z^{2})^{k}q^{k^{2}-k}}{(q^{2};q^{2})_{i}(q^{2};q^{2})_{j}} \frac{dz}{2\pi iz}\\ &= \sum_{i,j\geq0}\frac{(-1)^{i+j}a^{2i}\alpha^{2i}q^{(i-j)^{2}}}{(q^{2};q^{2})_{i}(q^{2};q^{2})_{j}}, \end{align*} and the right side of \eqref{GR4112} becomes \begin{align*} RHS=\frac{(a^{2}\alpha^{2};q)_{\infty}(q;q^{2})_{\infty}}{(a^{2}\alpha^{2};q^{2})_{\infty}^{2}}. \end{align*} This proves the theorem after replacing $\alpha^2 a^2$ by $u$. \end{proof} For example, if we set $u=-q$, $u=-q^{3/2}$ or $u=-q^2$ in the above theorem and replace $q$ by $q^2$ in the second assignment, we obtain \begin{align} \sum_{i,j\geq0}\frac{(-1)^{j}q^{(i-j)^{2}+i}}{(q^{2};q^{2})_{i}(q^{2};q^{2})_{j}}&=\frac{(q;q^{2})_{\infty}^{2}}{(q^{2};q^{4})_{\infty}^{2}}, \\ \sum_{i,j\geq0}\frac{(-1)^{j}q^{2(i-j)^{2}+3i}}{(q^{4};q^{4})_{i}(q^{4};q^{4})_{j}}&= \frac{(q^2,q^{10};q^{8})_{\infty}(q^{3};q^{4})_{\infty}}{(q^{5};q^{4})_{\infty}}, \\ \sum_{i,j\geq0}\frac{(-1)^{j}q^{(i-j)^{2}+2i}}{(q^{2};q^{2})_{i}(q^{2};q^{2})_{j}}&=\frac{(q,q^{2},q^{6};q^{4})_{\infty}}{(q^{5};q^{4})_{\infty}}. \end{align} \begin{theorem}\label{thm-T11} We have \begin{align} \sum_{i,j\geq0}\frac{(-1)^{i+j}q^{(i-j)^{2}/2}(q^{j}-q^{i+1/2})}{(q;q)_{i}(q;q)_{j}} &=\frac{(q^{1/2};q)_{\infty}^{2}} {(q;q)_{\infty}}, \label{T11-2}\\ \sum_{i,j\geq0}\frac{q^{(i-j)^{2}/2}(q^{j}+q^{i+1/2})}{(q;q)_{i}(q;q)_{j}} &=\frac{(q;q^{2})_{\infty}} {(q^{2};q^{2})_{\infty}(q^{1/2};q)_{\infty}^{2}}. \label{T11-3} \end{align} \end{theorem} \begin{proof} From \eqref{GR41010} and \eqref{II13} we have \begin{align}\label{Eq14} &\oint \frac{(-a^{1/2}z,a^{1/2}qz,abz,b/z;q)_{\infty}} {(az,-a^{1/2}qz,a^{1/2}z,1/z;q)_{\infty}}\frac{dz}{2\pi iz} \nonumber \\ & = \frac{(-a^{1/2},a^{1/2}q,ab,b;q)_{\infty}} {(q,a,-a^{1/2}q,a^{1/2};q)_{\infty}} {}_4\phi _3\left( \begin{gathered} a,-a^{1/2}q,a^{1/2},q/b\\ -a^{1/2},a^{1/2}q,ab \end{gathered} ;q,b \right) \nonumber \\ &=\frac{(-a^{1/2},aq,a^{1/2}b,a^{1/2}b;q)_{\infty}} {(a^{1/2},a,-a^{1/2}q,a^{1/2}q;q)_{\infty}}. \end{align} Let $a=q^{2}$ in \eqref{Eq14}. We obtain \begin{align}\label{Eq15} \oint \frac{(-qz,bq^{2}z,b/z;q)_{\infty}} {(-q^{2}z,qz,1/z;q)_{\infty}}\frac{dz}{2\pi iz} =\frac{(-q,q^{3},bq,bq;q)_{\infty}} {(q,q^{2},-q^{2},q^{2};q)_{\infty}}. \end{align} Setting $b=q^{-1/2}$ in \eqref{Eq15} and multiplying both sides by $(q;q)_\infty$, we see that its left side becomes \begin{align*} &(q;q)_{\infty} \oint \frac{(-qz,q^{3/2}z,1/q^{1/2}z;q)_{\infty}} {(-q^{2}z,qz,1/z;q)_{\infty}}\frac{dz}{2\pi iz} \\ &=\oint (1+qz)\sum_{i,j\geq0}\frac{(qz)^{i}(1/z)^{j}}{(q;q)_{i}(q;q)_{j}} \sum_{k= -\infty}^{\infty}(-q^{1/2}z)^{-k}q^{(k^{2}-k)/2}\frac{dz}{2\pi iz} \\ &=\sum_{i,j\geq0}\frac{(-1)^{i+j}q^{(i-j)^{2}/2}(q^{j}-q^{i+1/2})}{(q;q)_{i}(q;q)_{j}}, \end{align*} and its right side becomes \begin{align*} RHS=\frac{(-q,q^{3},q^{1/2},q^{1/2};q)_{\infty}} {(q^{2},-q^{2},q^{2};q)_{\infty}} =\frac{(q^{1/2};q)_{\infty}^{2}} {(q;q)_{\infty}}. \end{align*} This proves \eqref{T11-2}. Similarly, setting $b=-q^{-1/2}$ in \eqref{Eq15} and applying \eqref{Euler} and \eqref{Jacobi}, we obtain \eqref{T11-3}. \end{proof} Note that if we set $b=-1$ in \eqref{Eq15}, then we obtain \eqref{eq-thm3.1-cor-1.1}. \begin{rem}\label{rem-sec3} Similar to the second proof of Theorem \ref{thm-R-1}, Theorems \ref{thm-4112-2} and \ref{thm-T11} can also be proved by summing over one of the index first. We omit these proofs. \end{rem} Now we present another set of Rogers-Ramanujan type identities of index $(1,1)$. These identities are proved by repeated use of the Jacobi triple product identity, and we do not need to calculate any ${}_r\phi_s$ series. \begin{theorem}\label{thm-11-general} We have \begin{align} \sum_{i,j\geq 0} \frac{u^{i-j}q^{\binom{i}{2}+\binom{j+1}{2}+a\binom{j-i}{2}}}{(q;q)_i(q;q)_j}=\frac{(-uq^a,-q/u,q^{a+1};q^{a+1})_\infty}{(q;q)_\infty}. \end{align} \end{theorem} \begin{proof} By the Jacobi triple product identity, we have \begin{align*} &(q;q)_\infty (q^a;q^a)_\infty \oint (uz,q/uz;q)_\infty (z,q^a/z;q^a)_\infty \frac{dz}{2\pi iz} \nonumber \\ &=\oint \sum_{i,j=-\infty}^\infty (-uz)^i q^{\binom{i}{2}} (-z)^jq^{a\binom{j}{2}}\frac{dz}{2\pi iz} \nonumber \\ &=\sum_{i=-\infty}^\infty u^iq^{(a-1)i/2}q^{(a+1)i^2/2} \nonumber \\ &=(-uq^a,-q/u,q^{a+1};q^{a+1})_\infty. \end{align*} By \eqref{Euler} and \eqref{Jacobi}, the left side of this identity can also be written as \begin{align*} LHS&=(q;q)_\infty \oint \sum_{i,j\geq 0}\frac{(-uz)^iq^{\binom{i}{2}}}{(q;q)_i}\cdot \frac{(-q/uz)^jq^{\binom{j}{2}}}{(q;q)_j}\cdot \sum_{k=-\infty}^\infty (-z)^k q^{a\binom{k}{2}}\frac{dz}{2\pi iz} \nonumber \\ &=(q;q)_\infty\sum_{i,j\geq 0}\frac{u^{i-j}q^{\binom{i}{2}+\binom{j+1}{2}+a\binom{j-i}{2}}}{(q;q)_i(q;q)_j}. \end{align*} This proves the desired identity. \end{proof} Replacing $q$ by $q^{m_1}$ and setting $a=m_2/m_1$ and $u=\pm q^{n}$, where $m_1,m_2>0$ and $n\in \mathbb{R}$, we obtain the following corollary. \begin{corollary}\label{cor-Jacobi-add-1} We have \begin{align} &\sum_{i,j\geq 0}\frac{q^{((m_{1}+m_{2})(i^{2}+j^{2})-2m_{2}ij+(2n-m_{1}+m_{2})(i-j))/2}}{(q^{m_{1}};q^{m_{1}})_{i}(q^{m_{1}};q^{m_{1}})_{j}} \nonumber \\ &=\frac{(-q^{m_{1}-n},-q^{m_{2}+n},q^{m_{1}+m_{2}};q^{m_{1}+m_{2}})_{\infty}} {(q^{m_{1}};q^{m_{1}})_{\infty}}, \label{eq-J-1} \\ &\sum_{i,j\geq 0}\frac{(-1)^{i+j}q^{((m_{1}+m_{2})(i^{2}+j^{2})-2m_{2}ij+(2n-m_{1}+m_{2})(i-j))/2}}{(q^{m_{1}};q^{m_{1}})_{i}(q^{m_{1}};q^{m_{1}})_{j}} \nonumber \\ &=\frac{(q^{m_{1}-n},q^{m_{2}+n},q^{m_{1}+m_{2}};q^{m_{1}+m_{2}})_{\infty}} {(q^{m_{1}};q^{m_{1}})_{\infty}}. \label{eq-J-2} \end{align} \end{corollary} As examples, if we set $(m_1,m_2,n)=(1,3,-1)$ in \eqref{eq-J-1}, we obtain $$\sum_{i,j=0}^\infty \frac{q^{2(i^2+j^2)-3ij}}{(q;q)_i(q;q)_j}=\frac{(-q^2,-q^2,q^4;q^4)_\infty}{(q;q)_\infty}.$$ Setting $(m_1,m_2,n)$ as $(3,4,0)$, $(3,4,1)$ or $(3,4,2)$ in \eqref{eq-J-2}, we obtain \begin{align} \sum_{i,j\geq 0}\frac{(-1)^{i+j}q^{(7i^{2}+7j^{2}-8ij+i-j)/2}}{(q^{3};q^{3})_{i}(q^{3};q^{3})_{j}}&=\frac{(q^{3},q^{4},q^{7};q^{7})_{\infty}}{(q^{3};q^{3})_{\infty}}, \\ \sum_{i,j\geq 0}\frac{(-1)^{i+j}q^{(7i^{2}+7j^{2}-8ij+3i-3j)/2}}{(q^{3};q^{3})_{i}(q^{3};q^{3})_{j}}&= \frac{(q^{2},q^{5},q^{7};q^{7})_{\infty}}{(q^{3};q^{3})_{\infty}}, \\ \sum_{i,j\geq 0}\frac{(-1)^{i+j}q^{(7i^{2}+7j^{2}-8ij+5i-5j)/2}}{(q^{3};q^{3})_{i}(q^{3};q^{3})_{j}}&= \frac{(q,q^{6},q^{7};q^{7})_{\infty}}{(q^{3};q^{3})_{\infty}}. \end{align} \begin{theorem}\label{thm-J-3} We have \begin{align}\label{eq-thm-J-3} &\sum_{i,j\geq0}\frac{(-1)^{i+j}u^{i-j}q^{(i^{2}-i+j^{2}-j+4a(i-j)^{2})/2}}{(q;q)_{i}(q;q)_{j}} \\ &=\frac{(u^{-1}q^{2a},uq^{2a+1},q^{4a+1};q^{4a+1})_{\infty}+ (uq^{2a},u^{-1}q^{2a+1},q^{4a+1};q^{4a+1})_{\infty}}{(q;q)_{\infty}}. \nonumber \end{align} \end{theorem} \begin{proof} By the Jacobi triple product identity, we have \begin{align*} &(q;q)_{\infty}(q^{a};q^{a})_{\infty}\oint (uz^{2},1/uz^{2};q)_{\infty}(q^{a/2}z,q^{a/2}/z;q^{a})_{\infty} \frac{dz}{2\pi iz}\\ &= \oint (1-uz^{2}) \sum_{i,j=-\infty}^{\infty}(-1/uz^{2})^{i}q^{(i^{2}-i)/2}(-q^{a/2}z)^{j}q^{a(j^{2}-j)/2} \frac{dz}{2\pi iz} \\ &= \oint \Big(\sum_{i,j=-\infty}^{\infty}(-1/uz^{2})^{i}q^{(i^{2}-i)/2}(-q^{a/2}z)^{j}q^{a(j^{2}-j)/2} \\ &\quad -uz^{2}\sum_{i,j=-\infty}^{\infty}(-1/uz^{2})^{i}q^{(i^{2}-i)/2}(-q^{a/2}z)^{j}q^{a(j^{2}-j)/2} \Big)\frac{dz}{2\pi iz} \\ &=\sum_{i=-\infty}^{\infty} \big((-1)^{i}u^{-i}q^{((4a+1)i^{2}-i)/2}+(-1)^{i}u^{-i}q^{((4a+1)i^{2}+i)/2}\big) \qquad \\ &=(u^{-1}q^{2a},uq^{2a+1},q^{4a+1};q^{4a+1})_{\infty}+ (uq^{2a},u^{-1}q^{2a+1},q^{4a+1};q^{4a+1})_{\infty}. \end{align*} Here the third equality follows, since in the first sum, only the terms with $j=2i$ contributes to the integral, and in the second sum, only the terms with $j=2i-2$ contributes to the integral. We have also replaced $i$ by $i+1$ in the outcome of the integral of the second sum. By \eqref{Euler} and \eqref{Jacobi}, we see that the left side of the above identity is \begin{align*} LHS&=(q;q)_{\infty}\oint \sum_{i,j\geq0}\sum_{k= -\infty}^{\infty}\frac{(-uz^{2})^{i}q^{(i^{2}-i)/2} (-1/uz^{2})^{j} q^{(j^{2}-j)/2} (-q^{a/2}/z)^{k}q^{a(k^{2}-k)/2}}{(q;q)_{i}(q;q)_{j}} \frac{dz}{2\pi iz}\\ &=(q;q)_{\infty} \sum_{i,j\geq 0}\frac{(-1)^{i+j}u^{i-j}q^{(i^{2}-i+j^{2}-j+4a(i-j)^{2})/2}}{(q;q)_{i}(q;q)_{j}}. \end{align*} This proves the theorem. \end{proof} If we set $u=\pm 1$, $q^{2a}$ and $q^{2a+1}$ in Theorem \ref{thm-J-3}, we obtain the following corollary. \begin{corollary}\label{cor-J-4} We have \begin{align}\label{eq-J-3} \sum_{i,j\geq0}\frac{(-1)^{i+j}q^{(i^{2}-i+j^{2}-j+4a(i-j)^{2})/2}}{(q;q)_{i}(q;q)_{j}}&=\frac{2(q^{2a},q^{2a+1},q^{4a+1};q^{4a+1})_{\infty}}{(q;q)_{\infty}}, \\ \sum_{i,j\geq0}\frac{q^{(i^{2}-i+j^{2}-j+4a(i-j)^{2})/2}}{(q;q)_{i}(q;q)_{j}}&=\frac{2(-q^{2a},-q^{2a+1},q^{4a+1};q^{4a+1})_{\infty}}{(q;q)_{\infty}}, \\ \sum_{i,j\geq0}\frac{(-1)^{i+j}q^{2a(i-j)}q^{(i^{2}-i+j^{2}-j+4a(i-j)^{2})/2}}{(q;q)_{i}(q;q)_{j}}&=\frac{(q,q^{4a},q^{4a+1};q^{4a+1})_\infty}{(q;q)_\infty}, \\ \sum_{i,j\geq0}\frac{(-1)^{i+j}q^{(2a+1)(i-j)}q^{(i^{2}-i+j^{2}-j+4a(i-j)^{2})/2}}{(q;q)_{i}(q;q)_{j}}&=\frac{(q^{-1},q^{4a+2},q^{4a+1};q^{4a+1})_\infty}{(q;q)_\infty}. \end{align} \end{corollary} Setting $a=2$ and $a=3$ in the first two identities in Corollary \ref{cor-J-4}, we obtain \begin{align} \sum_{i,j\geq 0}\frac{(-1)^{i+j}q^{(i^{2}-i+j^{2}-j+8(i-j)^{2})/2}}{(q;q)_{i}(q;q)_{j}}&= \frac{2(q^{4},q^{5},q^{9};q^{9})_{\infty}}{(q;q)_{\infty}}, \\ \sum_{i,j\geq 0}\frac{(-1)^{i+j}q^{(i^{2}-i+j^{2}-j+12(i-j)^{2})/2}}{(q;q)_{i}(q;q)_{j}}&=\frac{2(q^{6},q^{7},q^{13};q^{13})_{\infty}}{(q;q)_{\infty}}, \\ \sum_{i,j\geq 0}\frac{q^{(i^{2}-i+j^{2}-j+8(i-j)^{2})/2}}{(q;q)_{i}(q;q)_{j}}&= \frac{2(-q^{4},-q^{5},q^{9};q^{9})_{\infty}}{(q;q)_{\infty}}, \\ \sum_{i,j\geq 0}\frac{q^{(i^{2}-i+j^{2}-j+12(i-j)^{2})/2}}{(q;q)_{i}(q;q)_{j}}&=\frac{2(-q^{6},-q^{7},q^{13};q^{13})_{\infty}}{(q;q)_{\infty}}. \end{align} \subsection{Identities of index $(1,2)$} \begin{theorem}\label{thm-R-5} We have \begin{align} \sum_{i,j\geq0}\frac{(-1)^{i}u^{i+j}q^{i^2+2ij+2j^2-i-j}}{(q;q)_{i}(q^{2};q^{2})_{j}}=(u;q^{2})_{\infty}, \label{eq-R-5a} \\ \sum_{i,j\geq0}\frac{(-1)^{i} u^{i+2j}q^{i^2+2ij+2j^2-i-j}}{(q;q)_{i}(q^{2};q^{2})_{j}}=(u;q)_{\infty}. \label{eq-R-5b} \end{align} \end{theorem} \begin{proof} Setting $\alpha_{1}=\beta_{2}$ in \eqref{R32} and using \eqref{q-binomial}, we deduce that \begin{align}\label{eq2.1} \oint \frac{(\beta_{1}\beta_{3}z,qz,1/z;q)_{\infty}}{(\beta_{1}z,\beta_{3}z;q)_{\infty}}\frac{dz}{2\pi iz}&=\frac{(\beta_1,\beta_2/\beta_1;q)_\infty}{(q;q)_\infty} \sum_{n=0}^\infty \frac{(\beta_1\beta_3/\beta_2;q)_n}{(q;q)_n}\left(\frac{\beta_2}{\beta_1}\right)^n \nonumber \\ &=\frac{(\beta_{1},\beta_{3};q)_{\infty}}{(q;q)_{\infty}}. \end{align} Setting $\beta_{1}=-\beta_{3}$ in \eqref{eq2.1}, we obtain \begin{align}\label{L-constant} (q;q)_{\infty}\oint \frac{(-\beta_{1}^{2}z,qz,1/z;q)_{\infty}}{(\beta_{1}^{2}z^{2};q^{2})_{\infty}}\frac{dz}{2\pi iz} = (\beta_{1}^{2};q^{2})_{\infty}. \end{align} By \eqref{Euler} and \eqref{Jacobi}, we see that its left side is \begin{align*} LHS&=\oint \sum_{i,j\geq0}\sum_{k= -\infty}^{\infty}\frac{(\beta_{1}^{2}z)^{i}q^{(i^{2}-i)/2} (\beta_{1}^{2}z^{2})^{j} (-1/z)^{k}q^{(k^{2}-k)/2} }{(q;q)_{i}(q^{2};q^{2})_{j}} \frac{dz}{2\pi iz}\\ &=\sum_{i,j\geq 0}\frac{(-1)^{i}\beta_{1}^{2i+2j}q^{(i^{2}+(i+2j)^{2}-2i-2j)/2}}{(q;q)_{i}(q^{2};q^{2})_{j}}. \end{align*} This proves \eqref{eq-R-5a} after replacing $\beta_1^2$ by $u$. Replacing $q$ by $q^{2}$ in \eqref{eq2.1} and setting $\beta_{3}=\beta_{1}q$, we obtain \begin{align*} (q^{2};q^{2})_{\infty}\oint \frac{(\beta_{1}^{2}qz,q^{2}z,1/z;q^{2})_{\infty}}{(\beta_{1}z;q)_{\infty}}\frac{dz}{2\pi iz} = (\beta_{1};q)_{\infty}. \end{align*} By \eqref{Euler} and \eqref{Jacobi}, we see that its left side is \begin{align*} LHS&=\oint \sum_{i,j\geq 0} \sum_{k= -\infty}^{\infty}\frac{(\beta_{1}z)^{i} (-\beta_{1}^{2}qz)^{j}q^{j^{2}-j} (-1/z)^{k}q^{k^{2}-k} }{(q;q)_{i}(q^{2};q^{2})_{j}} \frac{dz}{2\pi iz}\\ &=\sum_{i,j\geq 0}\frac{(-1)^{i}\beta_{1}^{i+2j}q^{j^{2}+(i+j)^{2}-i-j}}{(q;q)_{i}(q^{2};q^{2})_{j}}. \end{align*} This proves \eqref{eq-R-5b} after replacing $\beta_1$ by $u$. \end{proof} For example, if we set $u=q$ and $q^{2}$ in \eqref{eq-R-5a}, we obtain \begin{align} \sum_{i,j\geq 0}\frac{(-1)^{i}q^{i^{2}+2ij+2j^2}}{(q;q)_{i}(q^{2};q^{2})_{j}}&=(q;q^{2})_{\infty}, \label{add-12-1}\\ \sum_{i,j\geq 0}\frac{(-1)^{i}q^{i^{2}+2ij+2j^2+i+j}}{(q;q)_{i}(q^{2};q^{2})_{j}}&=(q^{2};q^{2})_{\infty}. \label{add-12-2} \end{align} If we set $u=q$ and $-q$ in \eqref{eq-R-5b}, we obtain \begin{align} \sum_{i,j\geq 0}\frac{(-1)^{i}q^{i^{2}+2ij+2j^{2}+j}}{(q;q)_{i}(q^{2};q^{2})_{j}}&= (q;q)_{\infty}, \label{add-12-3} \\ \sum_{i,j\geq 0}\frac{q^{i^{2}+2ij+2j^{2}+j}}{(q;q)_{i}(q^{2};q^{2})_{j}}&=\frac{1}{(q;q^{2})_{\infty}}. \label{add-12-4} \end{align} Note that \eqref{add-12-4} recovers \cite[Eq.\ (1.20)]{Wang} and hence \eqref{eq-R-5b} can be viewed as a generalization of it. \begin{rem} The identity \eqref{eq-R-5a} can also be deduced from the following identity in Lovejoy's work \cite[Eq.\ (1.7)]{Lovejoy2006}: \begin{align}\label{Lovejoy-constant-eq} [z^0]\frac{(-azq,-zq,-1/z;q)_\infty}{(-aqz^2;q^2)_\infty}=(-aq;q^2)_\infty. \end{align} Indeed, after setting $aq=-\beta_1^2$ and replacing $z$ by $-z$, we see that this identity is equivalent to \eqref{L-constant}. Lovejoy \cite{Lovejoy2006} also provided a partition interpretation to \eqref{Lovejoy-constant-eq} and hence the identity \eqref{eq-R-5a} can also be explained as a partition identity. \end{rem} \section{Identities involving triple sums}\label{sec-triple} In this section, we will establish Rogers-Ramanujan type identities involving triple sums. \subsection{Identities of index $(1,1,1)$} \begin{theorem}\label{thm-R-4} We have \begin{align}\label{eq-111} \sum_{i,j,k\geq0}\frac{(-1)^{j+k}\beta_{1}^{i+j}\beta_{3}^{i+k}q^{(i^{2}+(i+j+k)^{2}-2i-j-k)/2}}{(q;q)_{i}(q;q)_{j}(q;q)_{k}}=(\beta_{1},\beta_{3};q)_{\infty}. \end{align} \end{theorem} \begin{proof} Recall the identity \eqref{eq2.1}. By \eqref{Euler} and \eqref{Jacobi}, we see that its left side is \begin{align*} LHS&=\frac{1}{(q;q)_{\infty}}\oint \sum_{i,j,k\geq0}\sum_{l= -\infty}^{\infty}\frac{(-\beta_{1}\beta_{3}z)^{i}q^{(i^{2}-i)/2} (\beta_{1}z)^{j} (\beta_{3}z)^{k} (-1/z)^{l}q^{(l^{2}-l)/2}}{(q;q)_{i}(q;q)_{j}(q;q)_{k}} \frac{dz}{2\pi iz}\\ &=\sum_{i,j,k\geq0}\frac{(-1)^{j+k}\beta_{1}^{i+j}\beta_{3}^{i+k}q^{(i^{2}+(i+j+k)^{2}-2i-j-k)/2}}{(q;q)_{i}(q;q)_{j}(q;q)_{k}}. \end{align*} This proves the theorem. \end{proof} For example, if we set $\beta_{1}=-q^{1/4}$, $\beta_{3}=-q^{1/2}$ and replace $q$ by $q^4$, we obtain \begin{align} \sum_{i,j,k\geq0}\frac{q^{2i^{2}+2(i+j+k)^{2}-i-j}}{(q^4;q^4)_{i}(q^4;q^4)_{j}(q^4;q^4)_{k}}= \frac{(q^4;q^{8})_{\infty}}{(q;q^4)_{\infty}(q^{6};q^{8})_{\infty}}. \end{align} \begin{rem}\label{rem-111} The identity \eqref{eq-111} appeared in Lovejoy's work \cite{Lovejoy2017} and therein is viewed as a generalization of a partition theorem of Schur. See Section \ref{sec-concluding} for more discussion. \end{rem} \subsection{Identities of index $(1,1,2)$} \begin{theorem}\label{thm-R-3} We have \begin{align} \sum_{i,j,k\geq0}\frac{(-1)^{i+j}b^{-i+j}c^{i-j+k}q^{(i^{2}+(i-j+2k)^{2}-2i+3j-2k)/2}}{(q;q)_{i}(q;q)_{j}(q^{2};q^{2})_{k}}=\frac{(-q,bq^{2}/c;q)_{\infty}(bq,c/b;q^{2})_{\infty}} {(b^{2}q^{2}/c;q^{2})_{\infty}}. \end{align} \end{theorem} \begin{proof} Setting $a=0,t=-c/b$ and $d=-q/c$ in \eqref{Prop32-proof}, by \eqref{BD} we have \begin{align} & (q;q)_{\infty}\oint \frac{(cz,-bqz/c,-c/bz;q)_{\infty}}{(b^{2}z^{2};q^{2})_{\infty}(-q/cz;q)_{\infty}}\frac{dz}{2\pi iz} \nonumber \\ & = \frac{(bq^{2}/c^{2},-c/b,c;q)_{\infty}}{(-bq/c,bq/c;q)_{\infty}} {}_2\phi _1\left( \begin{gathered} b,bq/c\\ c \end{gathered} ;q,-c/b \right) \nonumber \\ &=\frac{(-q,bq^{2}/c^{2};q)_{\infty}(bq,c^{2}/b;q^{2})_{\infty}} {(b^{2}q^{2}/c^{2};q^{2})_{\infty}}. \end{align} By \eqref{Euler} and \eqref{Jacobi}, its left side is \begin{align*} LHS&=\oint \sum_{i,j,k\geq0}\sum_{l= -\infty}^{\infty}\frac{(-cz)^{i}q^{(i^{2}-i)/2} (-q/cz)^{j} (b^{2}z^{2})^{k} (c/bz)^{l}q^{(l^{2}-l)/2}}{(q;q)_{i}(q;q)_{j}(q^{2};q^{2})_{k}} \frac{dz}{2\pi iz} \\ &=\sum_{i,j,k\geq0}\frac{(-1)^{i+j}c^{2i-2j+2k}b^{-i+j}q^{(i^{2}+(i-j+2k)^{2}-2i+3j-2k)/2}}{(q;q)_{i}(q;q)_{j}(q^{2};q^{2})_{k}}. \end{align*} Replacing $c^2$ by $c$, we prove the theorem. \end{proof} Setting $(b,c)=(q^{1/2},q^2)$, $(-q^{1/2},q^2)$ and $(q^{1/2},q)$ and replacing $q$ by $q^2$, we obtain \begin{align} \sum_{i,j,k\geq 0}\frac{(-1)^{i+j}q^{i^{2}+(i-j+2k)^{2}+i+2k}}{(q^2;q^2)_{i}(q^2;q^2)_{j}(q^4;q^4)_{k}}&= \frac{(q;q^2)_{\infty}(q^{3};q^{4})_{\infty}^{2}}{(q^2;q^{4})_{\infty}^{2}}, \\ \sum_{i,j,k\geq 0}\frac{q^{i^{2}+(i-j+2k)^{2}+i+2k}}{(q^2;q^2)_{i}(q^2;q^2)_{j}(q^4;q^4)_{k}}&= \frac{(q^{6};q^{8})_{\infty}^{2}}{(q;q^2)_{\infty}(q^2;q^{4})_{\infty}(q^{3};q^{4})_{\infty}^{2}}, \\ \sum_{i,j,k\geq 0}\frac{(-1)^{i+j}q^{i^{2}+(i-j+2k)^{2}-i+2j}}{(q^2;q^2)_{i}(q^2;q^2)_{j}(q^4;q^4)_{k}}&= \frac{(q,q^3;q^2)_{\infty}}{(q^2;q^2)_{\infty}}. \end{align} \begin{theorem}\label{thm-4112-1} We have \begin{align}\label{eq-4112-1} \sum_{i,j,k\geq0}\frac{(-1)^{i}c^{2i-j+2k}d^{j}q^{(i^{2}+(i-j+2k)^{2}-2i+j-2k)/2}}{(q;q)_{i}(q;q)_{j}(q^{2};q^{2})_{k}}=\frac{(-d q/c;q)_{\infty}(c^{2};q^{2})_{\infty}}{(d^{2};q^{2})_{\infty}}. \end{align} \end{theorem} \begin{proof} Setting $\beta=-\alpha$ and $a=q/c\alpha$ in \eqref{GR4112}, we obtain \begin{align*} (q;q)_{\infty}\oint \frac{(-cz/\alpha,-q\alpha/cz,c\alpha/z;q)_{\infty}}{(bz;q)_{\infty}(\alpha^{2}/z^{2};q^{2})_{\infty}}\frac{dz}{2\pi iz} =\frac{(-b\alpha q/c;q)_{\infty}(c^{2};q^{2})_{\infty}}{(\alpha^{2}b^{2};q^{2})_{\infty}}. \end{align*} By \eqref{Euler} and \eqref{Jacobi} we see that its left side is \begin{align*} LHS&=\oint \sum_{i,j,k\geq 0}\sum_{l= -\infty}^{\infty}\frac{(-c\alpha/z)^{i}q^{(i^{2}-i)/2} (bz)^{j} (\alpha^{2}/z^{2})^{k} (cz/\alpha)^{l}q^{(l^{2}-l)/2}}{(q;q)_{i}(q;q)_{j}(q^{2};q^{2})_{k}} \frac{dz}{2\pi iz}\\ &= \sum_{i,j,k\geq0}\frac{(-1)^{i}c^{2i-j+2k}\alpha^{j}b^{j}q^{(i^{2}+(i-j+2k)^{2}-2i+j-2k)/2}}{(q;q)_{i}(q;q)_{j}(q^{2};q^{2})_{k}}. \end{align*} This proves the theorem after replacing $\alpha b$ by $d$. \end{proof} For example, if we replace $q$ by $q^4$ and set $(c,d)=(q^2,q)$ or $(q^2,q^3)$, we obtain \begin{align} \sum_{i,j,k\geq0}\frac{(-1)^{i}q^{2i^{2}+2(i-j+2k)^{2}+j}}{(q^{4};q^{4})_{i}(q^{4};q^{4})_{j}(q^{8};q^{8})_{k}}&= \frac{(q^{4},q^{6};q^{8})_{\infty}}{(q^{2},q^{3},q^{7};q^{8})_{\infty}}, \\ \sum_{i,j,k\geq0}\frac{(-1)^{i}q^{2i^{2}+2(i-j+2k)^{2}+3j}}{(q^{4};q^{4})_{i}(q^{4};q^{4})_{j}(q^{8};q^{8})_{k}}&= \frac{(q^{4},q^{10};q^{8})_{\infty}}{(q^{5},q^{6},q^{9};q^{8})_{\infty}}. \end{align} \subsection{Identities of index $(1,1,3)$} \begin{theorem}\label{thm-R-6} We have \begin{align}\label{eq-R-6} \sum_{i,j,k\geq0}\frac{(-1)^{k}u^{2i+j+3k}q^{(i^{2}+j^{2}+(i+j+3k)^{2}-2i-2j-3k)/2}}{(q;q)_{i}(q;q)_{j}(q^{3};q^{3})_{k}}=\frac{(u^{3};q^{3})_{\infty}}{(u;q)_{\infty}}. \end{align} \end{theorem} \begin{proof} Setting $\beta_{1}=\zeta_3 u,\beta_{3}=\zeta_3^{2}u$ in \eqref{eq2.1}, we obtain \begin{align*} (q;q)_{\infty}\oint \frac{(u^{2}z,uz,qz,1/z;q)_{\infty}}{(u^{3}z^{^{3}};q^{3})_{\infty}}\frac{dz}{2\pi iz} = \frac{(u^{3};q^{3})_{\infty}}{(u;q)_{\infty}}. \end{align*} By \eqref{Euler} and \eqref{Jacobi}, we see that its left side is \begin{align*} LHS&=\oint \sum_{i,j,k\geq0}\sum_{l= -\infty}^{\infty}\frac{(-u^{2}z)^{i}q^{(i^{2}-i)/2} (-uz)^{j}q^{(j^{2}-j)/2}(u^{3}z^{3})^{k} (-1/z)^{l}q^{(l^{2}-l)/2} }{(q;q)_{i}(q;q)_{j}(q^{3};q^{3})_{k}} \frac{dz}{2\pi iz}\\ &=\sum_{i,j,k\geq0}\frac{(-1)^{k}u^{2i+j+3k}q^{(i^{2}+j^{2}+(i+j+3k)^{2}-2i-2j-3k)/2}}{(q;q)_{i}(q;q)_{j}(q^{3};q^{3})_{k}}. \end{align*} This proves \eqref{eq-R-6}. \end{proof} Setting $u=q$, $q^{1/3}$, $q^{2/3}$ or $q^{1/2}$ in \eqref{eq-R-6} and replacing $q$ by $q^2$ or $q^3$ when necessary, we obtain \begin{align} \sum_{i,j,k\geq 0}\frac{(-1)^{k}q^{(i^{2}+j^{2}+(i+j+3k)^{2}+2i+3k)/2}}{(q;q)_{i}(q;q)_{j}(q^{3};q^{3})_{k}}&=\frac{1}{(q,q^{2};q^{3})_{\infty}}, \\ \sum_{i,j,k\geq 0}\frac{(-1)^{k}q^{3(i^{2}+j^{2}+(i+j+3k)^{2})/2-(2i+4j+3k)/2}}{(q^3;q^3)_{i}(q^3;q^3)_{j}(q^{9};q^{9})_{k}}&=\frac{(q^3;q^{9})_{\infty}}{(q;q^3)_{\infty}}, \\ \sum_{i,j,k\geq 0}\frac{(-1)^{k}q^{3(i^{2}+j^{2}+(i+j+3k)^{2})/2+(2i-2j+3k)/2}}{(q^3;q^3)_{i}(q^3;q^3)_{j}(q^{9};q^{9})_{k}}&= \frac{(q^{6};q^{9})_{\infty}}{(q^{2};q^3)_{\infty}}, \\ \sum_{i,j,k\geq0}\frac{(-1)^{k}q^{i^{2}+j^{2}+(i+j+3k)^{2}-j}}{(q^2;q^2)_{i}(q^2;q^2)_{j}(q^{6};q^{6})_{k}}&= \frac{1}{(q,q^5;q^{6})_{\infty}}. \end{align} \subsection{Identities of index $(1,2,2)$} \begin{theorem}\label{thm-122} We have \begin{align} \sum_{i,j,k\geq0}\frac{(-1)^{j}q^{i+j^{2}+2j+(i+j-k)^{2}}}{(q;q)_{i}(q^{2};q^{2})_{j}(q^{2};q^{2})_{k}} &=\frac{(q^{2};q^{2})_{\infty}(q^4;q^4)_\infty^2} {(q;q)_{\infty}^{2}}, \\ \sum_{i,j,k\geq0}\frac{(-1)^{j}q^{j^{2}+j+k}(q^{(i+j-k)^{2}}+q^{(i+j-k+1)^{2}})}{(q;q)_{i}(q^{2};q^{2})_{j}(q^{2};q^{2})_{k}} &=\frac{(q^{2};q^{2})_{\infty}^7} {(q;q)_{\infty}^{4} (q^4;q^4)_\infty^2}. \end{align} \end{theorem} \begin{proof} Let $b=-q/a^{1/2}$ in \eqref{Eq14}. We obtain \begin{align} \oint \frac{(-a^{1/2}z,a^{1/2}qz,-q/a^{1/2}z;q)_{\infty}} {(az,a^{1/2}z,1/z;q)_{\infty}}\frac{dz}{2\pi iz} =\frac{(-a^{1/2},aq,-q,-q;q)_{\infty}} {(a^{1/2},a,-a^{1/2}q,a^{1/2}q;q)_{\infty}}. \end{align} When $a=q$, we have \begin{align*} (q;q)_{\infty} \oint \frac{(-q^{1/2}z,q^{3/2}z,-q^{1/2}/z;q)_{\infty}} {(qz,q^{1/2}z,1/z;q)_{\infty}}\frac{dz}{2\pi iz} =\frac{(-q^{1/2},q^{2},-q,-q;q)_{\infty}} {(q^{1/2},-q^{3/2},q^{3/2};q)_{\infty}}. \end{align*} Replacing $q$ by $q^2$, simplifying the denominator of the integrand using \begin{align}\label{eq-simplify} (q^2z,qz;q^2)_\infty=(qz;q)_\infty\end{align} and applying \eqref{Euler} and \eqref{Jacobi}, we obtain the first identity. Let $b=-q^{1/2}/a^{1/2}$ in \eqref{Eq14}. We obtain \begin{align} &\oint \frac{(-a^{1/2}z,a^{1/2}qz,-a^{1/2}q^{1/2}z,-q^{1/2}/a^{1/2}z;q)_{\infty}} {(az,-a^{1/2}qz,a^{1/2}z,1/z;q)_{\infty}}\frac{dz}{2\pi iz} \nonumber \\ &=\frac{(-a^{1/2},aq,-q^{1/2},-q^{1/2};q)_{\infty}} {(a^{1/2},a,-a^{1/2}q,a^{1/2}q;q)_{\infty}}. \end{align} When $a=q$, we have \begin{align*} (q;q)_{\infty} \oint (1+q^{1/2}z)\frac{(q^{3/2}z,-qz,-1/z;q)_{\infty}} {(q^{1/2}z,qz,1/z;q)_{\infty}}\frac{dz}{2\pi iz} =\frac{(q^{2};q)_{\infty}(-q^{1/2};q)_{\infty}^{3}} {(q^{1/2};q)_{\infty}(q^{3};q^{2})_{\infty}}. \end{align*} Replacing $q$ by $q^{2}$, simplifying the denominator of the integrand using \eqref{eq-simplify} and applying \eqref{Euler} and \eqref{Jacobi}, we obtain the second identity. \end{proof} \subsection{Identities of index $(1,2,3)$} \begin{theorem}\label{thm-4112-3} We have \begin{equation}\label{eq-4112-3} \sum_{i,j,k\geq0}\frac{(-1)^{i+j}u^{i+3k}q^{(i^{2}-i)/2+(i-2j+3k)^{2}/4}}{(q;q)_{i}(q^{2};q^{2})_{j}(q^{3};q^{3})_{k}}=\frac{(u^{2};q)_{\infty}(q,-u^{2};q^{2})_{\infty}}{(-u^{6};q^{6})_{\infty}}. \end{equation} \end{theorem} \begin{proof} Setting $c=q^{1/2}$, replacing $\alpha$ by $\zeta_2\alpha$, setting $\beta=-\zeta_2 \alpha$, $a=d\zeta_3,b=d\zeta_3^{2}$ in \eqref{GR4112}, and then multiplying both sides by $(q^{2};q^{2})_{\infty}$, we see that the left side of \eqref{GR4112} becomes \begin{align} LHS&=(q^{2};q^{2})_{\infty}\oint \frac{(-qz^{2}/\alpha^{2},-q\alpha^{2}/z^{2};q^{2})_{\infty}(dz;q)_{\infty}} {(d^{3}z^{3};q^{3})_{\infty}(-\alpha^{2}/z^{2};q^{2})_{\infty}}\frac{dz}{2\pi iz} \nonumber \\ &=\oint \sum_{i,j,k\geq0}\sum_{l= -\infty}^{\infty}\frac{(-dz)^{i}q^{(i^{2}-i)/2} (-\alpha^{2}/z^{2})^{j} (d^{3}z^{3})^{k} (q\alpha^{2}/z^{2})^{l}q^{l^{2}-l}}{(q;q)_{i}(q^{2};q^{2})_{j}q^{3};q^{3})_{k}} \frac{dz}{2\pi iz} \nonumber \\ &= \sum_{i,j,k\geq0}\frac{(-1)^{i+j}\alpha^{i+3k}d^{i+3k}q^{(i^{2}-i)/2+(i-2j+3k)^{2}/4}}{(q;q)_{i}(q^{2};q^{2})_{j}q^{3};q^{3})_{k}}-S, \qquad \label{eq-S} \end{align} where \begin{align*} S=& \oint \sum_{i,j,k\geq 0}\sum_{m= -\infty}^{\infty}\frac{(-dz)^{i}q^{(i^{2}-i)/2} (-\alpha^{2}/z^{2})^{j} (d^{3}z^{3})^{k} }{(q;q)_{i}(q^{2};q^{2})_{j}q^{3};q^{3})_{k}} \\ & \times (q\alpha^{2}/z^{2})^{(2m+1)/2}q^{(2m+1)^{2}/4-(2m+1)/2} \frac{dz}{2\pi iz} \end{align*} corresponds to the case when $l=(i-2j+3k)/2$ is not an integer, i.e., $l=(2m+1)/2$ with $m\in \mathbb{Z}$. Now we convert the integrand in the expression of $S$ back to infinite products. We have \begin{align*} S&=\alpha q^{1/4}(q^2;q^2)_\infty \oint \frac{(-q^2\alpha^2 /z^{2},-z^2/\alpha^{2};q^{2})_{\infty}(dz;q)_{\infty}} {(d^{3}z^{3};q^{3})_{\infty}(-\alpha^{2}/z^{2};q^{2})_{\infty}}\frac{dz}{2\pi iz} \\ &=\alpha q^{1/4}(q^2;q^2)_\infty \oint \frac{(-z^2/\alpha^{2};q^{2})_{\infty}(dz;q)_{\infty}} {(d^{3}z^{3};q^{3})_{\infty}(1+\alpha^2/z^2)}z^{-1}\frac{dz}{2\pi iz} \\ &=\alpha^{-1} q^{1/4}(q^2;q^2)_\infty \oint \frac{(-z^2q^2/\alpha^2;q^2)_\infty (dz;q)_\infty}{(d^3z^3;q^3)_\infty} z \frac{dz}{2\pi iz} \\ &=0. \end{align*} Here the last equality follows since $$[z^0] \frac{(-z^2q^2/\alpha^2;q^2)_\infty (dz;q)_\infty}{(d^3z^3;q^3)_\infty} z=0.$$ Note that the right side of \eqref{GR4112} (after multiplication by $(q^{2};q^{2})_{\infty}$) is \begin{align}\label{eq-S-2} RHS=\frac{(d^{2}\alpha^{2};q)_{\infty}(q,-d^{2}\alpha^{2};q^{2})_{\infty}}{(-d^{6}\alpha^{6};q^{6})_{\infty}}. \end{align} Combining \eqref{eq-S} and \eqref{eq-S-2}, replacing $d\alpha$ by $u$, we obtain the desired identity. \end{proof} If we set $u$ as $q^{1/2}$ or $q$ in Theorem \ref{thm-4112-3}, we obtain \begin{align} \sum_{i,j,k\geq0}\frac{(-1)^{i+j}q^{(i^{2}+3k)/2+(i-2j+3k)^{2}/4}}{(q;q)_{i}(q^{2};q^{2})_{j}(q^{3};q^{3})_{k}}&=(q;q)_{\infty}(q^{3};q^{6})_{\infty}(q^{2},q^{10};q^{12})_{\infty}, \\ \sum_{i,j,k\geq0}\frac{(-1)^{i+j}q^{(i^{2}+i+6k)/2+(i-2j+3k)^{2}/4}}{(q;q)_{i}(q^{2};q^{2})_{j}(q^{3};q^{3})_{k}}&= \frac{(q^{2};q)_{\infty}(q;q^{2})_{\infty}}{(q^{2},q^{10};q^{12})_{\infty}}. \end{align} \begin{theorem}\label{thm-123} We have \begin{align} \sum_{i,j,k\geq 0}\frac{(-1)^{(i-2j+3k)/2}u^{i+k}q^{(i^{2}-i)/2+(i-2j+3k)^{2}/4}} {(q;q)_{i}(q^{2};q^{2})_{j}(q^{3};q^{3})_{k}} =\frac{(q;q^{2})_{\infty}(-u^{2};q^{3})_{\infty}} {(u^{2};q^{6})_{\infty}}. \end{align} \end{theorem} \begin{proof} Setting $b=\zeta_3 a,c=\zeta_3^{2}a,\alpha=-\beta$, $\gamma=q^{1/2}\alpha$ and $\delta=-a^3\alpha^2$ in \eqref{GR4113}, after multiplying both sides by $(q^2;q^2)_\infty$, we see that its left side is \begin{align} LHS=&(q^{2};q^{2})_{\infty}\oint \frac{(-a^{3}\alpha^{2}z;q)_{\infty} (qz^{2}/\alpha^{2},q\alpha^{2}/z^{2};q^{2})_{\infty}} {(a^{3}z^{3};q^{3})_{\infty}(\alpha^{2}/z^{2};q^{2})_{\infty}}\frac{dz}{2\pi iz} \nonumber \\ &=\oint \sum_{i,j,k\geq 0}\frac{(a^{3}\alpha^{2}z)^{i}q^{(i^2-i)/2}(\alpha^{2}/z^{2})^{j}(a^{3}z^{3})^{k}}{(q;q)_{i}(q^{2};q^{2})_{j}(q^{3};q^{3})_{k}} \sum_{l= -\infty}^{\infty}(-q\alpha^{2}/z^{2})^{l}q^{l^{2}-l}\frac{dz}{2\pi iz} \nonumber \\ &=\sum_{i,j,k\geq 0}\frac{(-1)^{(i-2j+3k)/2}a^{3i+3k}\alpha^{3i+3k}q^{(i^{2}-i)/2+(i-2j+3k)^{2}/4}} {(q;q)_{i}(q^{2};q^{2})_{j}(q^{3};q^{3})_{k}}-S, \label{add-S} \end{align} where \begin{align*} S&=\oint \sum_{i,j,k\geq 0}\frac{(a^{3}\alpha^{2}z)^{i}q^{(i^2-i)/2}(\alpha^{2}/z^{2})^{j}(a^{3}z^{3})^{k}}{(q;q)_{i}(q^{2};q^{2})_{j}(q^{3};q^{3})_{k}} \nonumber \\ &\quad \times \sum_{m= -\infty}^{\infty}(-q\alpha^{2}/z^{2})^{(2m+1)/2}q^{(2m+1)^{2}/4-(2m+1)/2}\frac{dz}{2\pi iz} \\ &=\zeta_2\alpha q^{1/4}(q^2;q^2)_\infty \oint\frac{(-a^{3}\alpha^{2}z;q)_{\infty} (z^{2}/\alpha^{2},q^{2}\alpha^{2}/z^{2};q^{2})_{\infty}} {(a^{3}z^{3};q^{3})_{\infty}(\alpha^{2}/z^{2};q^{2})_{\infty}} z^{-1}\frac{dz}{2\pi iz} \\ &=-\zeta_2\alpha^{-1} q^{1/4} \oint \frac{(-a^{3}\alpha^{2}z;q)_{\infty} (q^{2}z^{2}/\alpha^{2};q^{2})_{\infty}} {(a^{3}z^{3};q^{3})_{\infty}}z\frac{dz}{2\pi iz} \\ &=0. \end{align*} The right side of \eqref{GR4113} (after multiplication by $(q^2;q^2)_\infty$) is \begin{align} RHS=\frac{(q;q^{2})_{\infty}(-a^{6}\alpha^{6};q^{3})_{\infty}} {(a^{6}\alpha^{6};q^{6})_{\infty}}. \label{add-S-2} \end{align} Combining \eqref{add-S} and \eqref{add-S-2}, and replacing $a^3\alpha^3$ by $u$, we obtain the desired identity. \end{proof} Setting $u$ as $-\zeta_2 q^{3/2}$ and $q^{3/2}$ in Theorem \ref{thm-123}, we obtain \begin{align} \sum_{i,j,k\geq0}\frac{(-1)^{i+j}q^{(i^{2}+2i+3k)/2+(i-2j+3k)^{2}/4}} {(q;q)_{i}(q^{2};q^{2})_{j}(q^{3};q^{3})_{k}} &=(q;q^{2})_{\infty}(q^{3};q^{6})_{\infty}^{2}(q^{12};q^{12})_{\infty}, \\ \sum_{i,j,k\geq0}\frac{(-1)^{(i-2j+3k)/2}q^{(i^{2}+2i+3k)/2+(i-2j+3k)^{2}/4}} {(q;q)_{i}(q^{2};q^{2})_{j}(q^{3};q^{3})_{k}} &=\frac{(q,q^{5};q^{6})_{\infty}}{(q^{3};q^{6})_{\infty}}. \end{align} \subsection{Identities of index $(1,2,4)$} \begin{theorem} We have \begin{align} \sum_{i,j,k\geq 0} \frac{(-1)^{k}q^{(i+j+2k)(i+j+2k-1)+j+2k^2}u^{i+2j+4k}}{(q;q)_{i}(q^2;q^2)_{j}(q^4;q^4)_{k}}&=(-u;q)_\infty, \\ \sum_{i,j,k\geq 0}\frac{(-1)^{j}q^{(i+j+2k)(i+j+2k-1)+2i+3j+2k^2+6k}}{(q;q)_{i}(q^2;q^2)_{j}(q^4;q^4)_{k}}&=\frac{(q^4,q^{12},q^{16};q^{16})_\infty}{(q^2;q^2)_\infty}, \\ \sum_{i,j,k\geq 0}\frac{(-1)^{j}q^{(i+j+2k)^2+2k^2}}{(q;q)_{i}(q^2;q^2)_{j}(q^4;q^4)_{k}}&=\frac{(q^8;q^8)_\infty^2}{(q^2;q^2)_\infty (q^{16};q^{16})_\infty}. \end{align} \end{theorem} \begin{proof} Let \begin{align} &H(u,v,w)=H(u,v,w;q) \nonumber \\ &:=\sum_{i,j,k\geq 0}\frac{u^{i}v^{j}(-w)^{k}q^{(i+j+2k)(i+j+2k-1)+2k(k-1)}}{(q;q)_{i}(q^2;q^2)_{j}(q^4;q^4)_{k}}. \end{align} We have by \eqref{int-constant} that \begin{align*} H(u,v,w)=\oint\sum_{i=0}^\infty \frac{(uz)^{i}}{(q;q)_{i}}\sum_{j=0}^\infty \frac{(vz)^{j}}{(q^2;q^2)_{j}}\sum_{k=0}^\infty \frac{(-wz^2)^{k}q^{2k(k-1)}}{(q^4;q^4)_{k}}\sum_{l=-\infty}^\infty (1/z)^{l}q^{l(l-1)} \frac{\diff z}{2\pi iz}. \end{align*} Hence by \eqref{Euler} and \eqref{Jacobi}, \begin{align*} H(u,v,w)&=(q^2;q^2)_\infty \oint \frac{(wz^2;q^4)_\infty (-1/z,-q^2z;q^2)_\infty}{(uz;q)_\infty (vz;q^2)_\infty} \frac{\diff z}{2\pi iz} \\ &=(q^2;q^2)_\infty \oint \frac{(w^{1/2}z,-w^{1/2}z,-1/z,-q^2z;q^2)_\infty}{(uz,uzq,vz;q^2)_\infty} \frac{\diff z}{2\pi iz} \\ &=(q^2;q^2)_\infty \oint \frac{(w^{1/2}z,-w^{1/2}z,1/z,q^2z;q^2)_\infty}{(-uz,-uzq,-vz;q^2)_\infty} \frac{\diff z}{2\pi iz}. \end{align*} Here for the last line we have replaced $z$ by $-z$. When $w=u^2vq$, we can apply \eqref{R32} with $(\alpha_1,\alpha_2,\beta_1,\beta_2,\beta_3)=(w^{1/2},-w^{1/2},-v,-u,-uq)$ to deduce that \begin{align}\label{124F-exp} H(u,v,w;q)=(-v,-w^{1/2}/v;q^2)_\infty \cdot {}_2\phi_1 \bigg(\genfrac{}{}{0pt}{}{w^{1/2}/u,w^{1/2}/uq}{-v};q^2,-w^{1/2}/v \bigg). \end{align} We now specialize the choices of $(u,v,w)$ so that the ${}_2\phi_1$ series becomes a nice infinite product. By \eqref{124F-exp} and the $q$-Gauss summation \eqref{q-Gauss} ,we have \begin{align*} H(u,u^2q,u^4q^2)&=(-u^2q,-1;q^2)_\infty \cdot {}_2\phi_1 \bigg(\genfrac{}{}{0pt}{}{uq,u}{-u^2q};q^2,-1 \bigg) \\ &=(-u^2q,-1;q^2)_\infty \cdot \frac{(-u,-uq;q^2)_\infty}{(-u^2q,-1;q^2)_\infty} \\ &=(-u;q)_\infty. \end{align*} By \eqref{124F-exp} and the Bailey-Daum summation \eqref{BD}, we get \begin{align*} H(q^2,-q^3,-q^8)&=(q^3,\zeta_2 q;q^2)_\infty \cdot {}_2\phi_1 \bigg(\genfrac{}{}{0pt}{}{\zeta_2 q^2,\zeta_2q}{q^3};q^2,\zeta_2 q \bigg) \\ &=(q^3,\zeta_2 q;q^2)_\infty \cdot \frac{(-q^2;q^2)_\infty (\zeta_2q^4,-\zeta_2q^4;q^4)_\infty}{(q^3,\zeta_2q;q^2)_\infty} \\ &=\frac{(q^4,q^{12},q^{16};q^{16})_\infty}{(q^2;q^2)_\infty}. \end{align*} Similarly, by \eqref{124F-exp} and \eqref{BD} we get \begin{align*} H(q,-q,-q^4)&=(q,\zeta_2q;q^2)_\infty \cdot {}_2\phi_1\bigg(\genfrac{}{}{0pt}{}{\zeta_2,\zeta_2q}{q};q^2,\zeta_2q \bigg) \\ &=(q,\zeta_2q;q^2)_\infty \cdot \frac{(-q^2;q^2)_\infty (\zeta_2q^2,-\zeta_2q^2;q^4)_\infty}{(q,\zeta_2q;q^2)_\infty} \\ &=\frac{(q^8;q^8)_\infty^2}{(q^2;q^2)_\infty(q^{16};q^{16})_\infty}. \qedhere \end{align*} \end{proof} \section{Concluding Remarks}\label{sec-concluding} We give several remarks before closing this paper. First, though here we only discuss identities involving double sums and triple sums, the integral method can also be applied to deduce identities with more summation indexes. For example, we can use the integral method to give a new proof to the identity \eqref{DL1112}, which is of index $(1,1,1,2)$. \begin{proof}[Proof of \eqref{DL1112}] Using (\ref{Euler}), (\ref{Jacobi}) and (\ref{int-constant}), we have \begin{align} \label{Lovejoy1} & \sum_{i,j,k,l\geq 0} \frac{a^{i+l}b^{j+l}q^{\binom{i+j+k+2l+1}{2}+\binom{i+1}{2}+\binom{j+1}{2}+l}}{(q;q)_i(q;q)_j(q;q)_k(q^2;q^2)_l} \nonumber\\ &=(q;q)_\infty \oint \frac{(-aqz,-bqz,-z,-q/z;q)_{\infty}} {(z;q)_{\infty}(abqz^{2};q^{2})_{\infty}}\frac{dz}{2\pi iz} \nonumber\\ &=(q;q)_\infty \oint \frac{(-aqz,-bqz,-z,-q/z;q)_{\infty}} {(z,(abq)^{1/2}z,-(abq)^{1/2}z;q)_{\infty}}\frac{dz}{2\pi iz}. \end{align} Using (\ref{q-binomial}), we have \begin{align} \label{bi1} \frac{(-bdq^{n+1};q)_\infty }{(dq^{n};q)_\infty } =\sum_{m\geq 0} \frac{(-bq;q)_m}{(q;q)_m}(dq^{n})^{m}. \end{align} Using Heine's transformations of ${}_2\phi_1$ series \cite[(\uppercase\expandafter{\romannumeral3}. 3)]{GR-book} \begin{align} {}_2\phi_1\bigg(\genfrac{}{}{0pt}{}{a,b}{c};q,z \bigg)=\frac{(abz/c;q)_\infty}{(z;q)_\infty}{}_2\phi_1\bigg(\genfrac{}{}{0pt}{}{c/a,c/b}{c};q,abz/c \bigg), \end{align} we deduce that \begin{align} \label{he1} &\sum_{n\geq 0} \frac{((abq)^{1/2}d,-(abq)^{1/2}d;q)_n}{(q,-adq;q)_n}(-q^{m+1}/d)^{n} \nonumber \\ &=\frac{(-bq^{m+1};q)_\infty }{(-q^{m+1}/d;q)_\infty }\sum_{n\geq 0} \frac{(-(aq/b)^{1/2},(aq/b)^{1/2};q)_n}{(q,-adq;q)_n}(-bq^{m+1})^{n}. \end{align} By (\ref{GR41010}), we have \begin{align} \label{Lovejoy2} &(q;q)_\infty\oint \frac{(-aqz,-bqz,-z,-q/z;q)_{\infty}} {(z,(abq)^{1/2}z,-(abq)^{1/2}z,d/z;q)_{\infty}}\frac{dz}{2\pi iz} \nonumber \\ &=\frac{(-adq,-bdq,-d,-q/d;q)_{\infty}} {(d,(abq)^{1/2}d,-(abq)^{1/2}d;q)_{\infty}} \sum_{n\geq 0} \frac{(d,(abq)^{1/2}d,-(abq)^{1/2}d;q)_n}{(q,-adq,-bdq;q)_n}(-q/d)^{n} \nonumber \\ &=\frac{(-adq,-d,-q/d;q)_{\infty}} {((abq)^{1/2}d,-(abq)^{1/2}d;q)_{\infty}} \sum_{n\geq 0} \frac{((abq)^{1/2}d,-(abq)^{1/2}d;q)_n}{(q,-adq;q)_n}(-q/d)^{n} \frac{(-bdq^{n+1};q)_{\infty}} {(dq^{n};q)_{\infty}} \nonumber \\ &=\frac{(-adq,-d,-q/d;q)_{\infty}} {((abq)^{1/2}d,-(abq)^{1/2}d;q)_{\infty}} \sum_{m\geq 0} \frac{(-bq;q)_m}{(q;q)_m}d^{m} \nonumber \\ &\quad \quad \quad \quad \quad \times \sum_{n\geq 0} \frac{((abq)^{1/2}d,-(abq)^{1/2}d;q)_n}{(q,-adq;q)_n}(-q^{m+1}/d)^{n} \quad \text{(by \ref{bi1})} \nonumber \\ &= \frac{(-adq,-d,-q/d;q)_{\infty}} {((abq)^{1/2}d,-(abq)^{1/2}d;q)_{\infty}} \sum_{n\geq 0} \frac{(-(aq/b)^{1/2},(aq/b)^{1/2};q)_n}{(q,-adq;q)_n}(-bq)^{n} \nonumber \\ &\quad \quad \quad \times \sum_{m\geq 0} \frac{(-bq;q)_m}{(q;q)_m}(dq^{n})^{m} \frac{(-bq^{m+1};q)_\infty }{(-q^{m+1}/d;q)_\infty } \quad \text{(by \ref{he1})} \nonumber \\ &=\frac{(-adq,-d,-bq;q)_{\infty}} {((abq)^{1/2}d,-(abq)^{1/2}d;q)_{\infty}} \sum_{n\geq 0} \frac{(-(aq/b)^{1/2},(aq/b)^{1/2};q)_n}{(q,-adq;q)_n}(-bq)^{n} \nonumber \\ &\qquad \qquad \qquad \times \sum_{m\geq 0} \frac{(-q/d;q)_m}{(q;q)_m}(dq^{n})^{m} \nonumber \\ &=\frac{(-adq,-d,-bq;q)_{\infty}} {((abq)^{1/2}d,-(abq)^{1/2}d;q)_{\infty}} \sum_{n\geq 0} \frac{(aq/b;q^{2})_n}{(q,-adq;q)_n}(-bq)^{n} \frac{(-q^{n+1};q)_\infty }{(dq^{n};q)_\infty } \nonumber \\ &=\frac{(-adq,-d,-bq,-q;q)_{\infty}} {((abq)^{1/2}d,-(abq)^{1/2}d;q)_{\infty}} \sum_{n\geq 0} \frac{(aq/b;q^{2})_n}{(q^{2};q^{2})_n(-adq;q)_n}(-bq)^{n} \frac{1}{(dq^{n};q)_\infty } . \end{align} Letting $d\rightarrow 0$ on both sides of (\ref{Lovejoy2}), we have \begin{align}\label{Lovejoy3} &(q;q)_\infty \oint \frac{(-aqz,-bqz,-z,-q/z;q)_{\infty}} {(z,(abq)^{1/2}z,-(abq)^{1/2}z;q)_{\infty}}\frac{dz}{2\pi iz} \nonumber \\ &=(-bq,-q;q)_{\infty}\sum_{n\geq 0} \frac{(aq/b;q^{2})_n}{(q^{2};q^{2})_n}(-bq)^{n}\nonumber \\ &= \frac{(-bq,-q;q)_{\infty}(-aq^{2};q^{2})_{\infty}}{(-bq;q^{2})_{\infty}} \nonumber \\ &=(-q;q)_{\infty}(-aq^{2},-bq^{2};q^{2})_{\infty}. \end{align} Combining (\ref{Lovejoy1}) and (\ref{Lovejoy3}), we obtain (\ref{DL1112}). \end{proof} Second, there might exist partition or Lie theoretic interpretations for the identities we proved. This deserves further investigation. As promised in Remark \ref{rem-111}, we give below a brief discussion on partition interpretations of the identity \eqref{eq-111}. Here we follow closely the lines in \cite{Lovejoy2017}. Recall that a partition $\pi$ of $n$ is a nonincreasing sequence $\pi=(\lambda_1,\lambda_2,\dots,\lambda_s)$ of positive integers which sum up to $n$, i.e., $$n=\lambda_1+\lambda_2+\cdots+\lambda_s, \quad \lambda_1\geq \lambda_2\geq \cdots \geq \lambda_s\geq 1.$$ Let $T(u,v,n)$ be the number of bipartitions $(\pi_1,\pi_2)$ of $n$ such that the partition $\pi_1$ (resp.\ $\pi_2$) consists of $u$ (resp.\ $v$) distinct parts, respectively. Then clearly, we have \begin{align}\label{T-gen} \sum_{n=0}^\infty T(u,v,n)a^ub^vq^n=(-aq,-bq;q)_\infty. \end{align} To generalize and refine Schur's partition theorem, Alladi and Gordon \cite{Alladi-Gordon-1993,Alladi-Gordon-1995} introduced a new kind of three colored partitions. As in \cite{Lovejoy2017}, we color the positive integers by three colors $a,b$ and $ab$ with the order that $$ab<a<b.$$ Now the integers are ordered as $$1_{ab}<1_a<1_b<2_{ab}<2_a<2_b<\cdots.$$ Let $S(u,v,n)$ be the number of three-colored partitions of $n$ with no parts $1_{ab}$, $u$ parts colored $a$ or $ab$, $v$ parts colored $b$ or $ab$, and satisfying the difference conditions described in the matrix $$A=\bordermatrix{& a & b & ab \cr a & 1 & 2 &1 \cr b & 1 & 1 &1 \cr ab & 2 & 2 & 2 }.$$ Here the entry $(x,y)$ gives the minimal difference between the parts $\lambda_i$ of color $x$ and $\lambda_{i+1}$ of color $y$. Alladi and Gordon proved that \begin{align}\label{Alladi-Gordon-eq} \sum_{u,v,n\geq 0} S(u,v,n)a^ub^vq^n=(-aq,-bq;q)_\infty. \end{align} Through combinatorial arguments, Lovejoy \cite[Eq.\ (2.3)]{Lovejoy2017} proved that \begin{align}\label{eq-Lovejoy-gen} \sum_{u,v,n\geq 0} S(u,v,n)a^ub^vq^n=\sum_{i,j,k\geq 0}\frac{a^iq^i}{(q;q)_i} \frac{b^jq^j}{(q;q)_j}\frac{(ab)^kq^kq^{\binom{k+1}{2}}}{(q;q)_k}q^{\binom{i+j+k}{2}}. \end{align} After simplifying the sums by the $q$-Chu-Vandermonde summation, Euler's identities \eqref{Euler} and the $q$-binomial identity \eqref{q-binomial}, Lovejoy obtained \eqref{Alladi-Gordon-eq}. Clearly, combining \eqref{Alladi-Gordon-eq} and \eqref{eq-Lovejoy-gen}, we get \eqref{eq-111}. Together with \eqref{T-gen}, we see that \eqref{eq-111} is equivalent to the partition identity $$S(u,v,n)=T(u,v,n).$$ Once we convert our identities to partition identities like the above one, it will be quite interesting to find bijective proofs for them. Finally, we want to emphasize on the advantage of the integral method. It allows us to prove the identities in this paper in a uniform manner. Of course, it is possible to give different proofs to our theorems. As discussed in Remarks \ref{rem-sec3}--\ref{rem-111}, one may prove some of the theorems using approaches such as summing over one of the index first. Compared with the other methods, the integral method has the advantage that it tells us how the identities are constructed and the calculations involved are streamlined. \subsection*{Acknowledgements} We thank Jeremy Lovejoy for some valuable comments, especially for bringing the works \cite{Dousse-Lovejoy,Lovejoy2006,Lovejoy2017} to our attention. We are also grateful to Chuanan Wei for helpful comments on the presentation of Corollaries \ref{cor-Jacobi-add-1} and \ref{cor-J-4}. This work was supported by the National Natural Science Foundation of China (12171375). \begin{thebibliography}{0} \bibitem{Alladi-Gordon-1993} K. Alladi and B. Gordon, Generalizations of Schur's partition theorem, Manuscripta Math. 79 (1993), 113--126. \bibitem{Alladi-Gordon-1995} K. Alladi and B. Gordon, Schur's partition theorem, companions, refinements and generalizations, Trans. Am. Math. Soc. 347 (1995), 1591--1608. \bibitem{Andrews1974} G.E. Andrews, On the General Rogers-Ramanujan Theorem. Providence, RI: Amer. Math. Soc., 1974. \bibitem{Andrews2019} G.E. Andrews, Sequences in partitions, double $q$-series and the mock theta function $\rho_3(q)$, Algorithmic Combinatorics: Enumerative Combinatorics, Special Functions and Computer Algebra, Springer, Cham. Switzerland, (2020), pp. 25--46. \bibitem{Lost2} G.E. Andrews and B.C. Berndt, Ramanujan's Lost Notebook, Part II, Springer 2009. \bibitem{Andrews-Uncu} G.E. Andrews and A.K. Uncu, Sequences in overpartitions, arXiv:2111.15003v1. \bibitem{Berkovich} A. Berkovich and A.K. Uncu, Elementary polynomial identities involving $q$-trinomial coefficients, Ann. Comb. 23 (2019), no. 3--4, 549--560. \bibitem{Bressoud1979} D.M. Bressoud, A generalization of the Rogers-Ramanujan identities for all moduli, J. Combin. Theory Ser. A 27 (1979), 64--68. \bibitem{Bressoud1980} D.M. Bressoud, Analytic and combinatorial generalizations of the Rogers-Ramanujan identities, Mem. Amer. Math. Soc. 24 (1980). \bibitem{BSM} K. Bringmann, C. Jennings-Shaffer and K. Mahlburg, Proofs and reductions of various conjectured partition identities of Kanade and Russell, J. Reine Angew. Math. 766 (2020), 109--135. \bibitem{Capparelli} S. Capparelli, On some representations of twisted affine Lie algebras and combinatorial identities. J. Algebra 154 (1993), 335--355. \bibitem{Chern} S. Chern, Asymmetric Rogers-Ramanujan type identities. I, The Andrews-Uncu Conjecture, Proc. Amer. Math. Soc. (2022), https://doi.org/10.1090/proc/16332. \bibitem{Dousse-Lovejoy} J. Dousse and J. Lovejoy, Generalizations of Capparelli's identity, Bull. London Math. Soc. 51 (2019), 193--206. \bibitem{GR-book} G. Gasper and M. Rahman, Basic Hypergeometric Series, 2nd Edition, Encyclopedia of Mathematics and Its Applications, Vol.\ 96, Cambridge University Press, 2004. \bibitem{Gordon1961} B. Gordon, A combinatorial generalization of the Rogers-Ramanujan identities, Amer. J. Math. 83 (1961), 393--399. \bibitem{KR-2015} S. Kanade and M.C. Russell, IdentityFinder and some new identities of Rogers-Ramanujan type, Exp. Math. 24 (2015), no. 4, 419--423. \bibitem{KR-2019} S. Kanade and M.C. Russell, Staircases to analytic sum-sides for many new integer partition identities of Rogers-Ramanujan type. Electron. J. Combin. 26 (2019), 1--6. \bibitem{Kursungoz} K. Kur\c{s}ung\"{o}z, Andrews-Gordon type series for Capparelli's and G\"{o}llnitz-Gordon identities, J. Combin. Theory Ser. A 165 (2019) 117-138. \bibitem{Kursungoz-AnnComb} K. Kur\c{s}ung\"{o}z, Andrews-Gordon type series for Kanade-Russell conjectures, Ann. Comb. 23 (2019), 835--888. \bibitem{Laughlin} J. Mc Laughlin, Some more identities of Kanade-Russell type derived using Rosengren's method, Ann. Comb., https://doi.org/10.1007/s00026-022-00586-3. \bibitem{Lepowsky-Wilson} J. Lepowsky and R.L. Wilson, A Lie theoretic interpretation and proof of the Rogers-Ramanujan identities, Adv. Math. 45 (1982), 21--72. \bibitem{Lepowsky-Wilson-1985} J. Lepowsky and R.L. Wilson, The structure of standard modules II. the case $A_1^{(1)}$, principal gradation, Invent. Math. 79 (1985), 417--442. \bibitem{Lovejoy2006} J. Lovejoy, Constant terms, jagged partitions, and partitions with difference two at distance two, Aequationes Math. 72 (2006), 299--312. \bibitem{Lovejoy2017} J. Lovejoy, Asymmetric generalizations of Schur's theorem, Analytic Number Theory, Modular Forms and $q$-Hypergeometric Series, 463--476, Springer Proc. Math. Stat. 221, Springer, Cham, 2017. \bibitem{Rosengren} H. Rosengren, Proofs of some partition identities conjectured by Kanade and Russell, Ramanujan J. https://doi.org/10.1007/s11139-021-00389-9 \bibitem{Sills-book} A.V. Sills, An Invitation to the Rogers-Ramanujan Identities, CRC Press (2018). \bibitem{Slater} L.J. Slater, Further identities of the Rogers-Ramanujan type, Proc. Lond. Math. Soc. (2) 54 (1) (1952), 147--167. \bibitem{Takigiku-2019} M. Takigiku and S. Tsuchioka, A proof of conjectured partition identities of Nandi, arXiv:1910.12461. \bibitem{Takigiku} M. Takigiku and S. Tsuchioka, Andrews--Gordon type series for the level 5 and 7 standard modules of the affine Lie algebra $A_2^{(2)}$, Proc. Amer. Math. Soc. 149 (2021), no. 7, 2763--2776. \bibitem{Uncu-Zudilin} A. Uncu and W. Zudilin, Reflecting (on) the modulo 9 Kanade-Russell (conjectural) identities, arXiv: 2106.02959v3. \bibitem{Wang} L. Wang, New proofs of some double sum Rogers-Ramanujan type identities, Ramanujan J. (2022), https://doi.org/10.1007/s11139-022-00654-5. \end{thebibliography} \end{document}
2205.12774v2
http://arxiv.org/abs/2205.12774v2
$4$-manifolds with boundary and fundamental group $\mathbb{Z}$
\documentclass[10.9pt,a4paper]{amsart} \usepackage[english]{babel} \usepackage[percent]{overpic} \usepackage{xcolor} \definecolor{lblue}{rgb}{52,219,252} \newtheorem{theorem}{Theorem}[section] \newtheorem{conj}{Conjecture} \newtheorem{question}{Question} \newtheorem{problem}{Problem} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem*{thmintro}{Theorem} \newtheorem{claim}{Claim} \newtheorem*{claim*}{Claim} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{construction}[theorem]{Construction} \newtheorem*{terminology}{Terminology} \newtheorem{example}[theorem]{Example} \newtheorem{conjecture}[theorem]{Conjecture} \newtheorem{remark}[theorem]{Remark} \numberwithin{equation}{section} \newcommand{\purple}[1]{\textcolor{purple}{#1} } \newcommand{\MP}[1]{\textcolor{green}{#1} } }} \usepackage[square,sort,comma,numbers]{natbib} \setlength{\parskip}{2pt} \usepackage[all]{xy} \usepackage{pstricks} \usepackage{enumerate} \usepackage{amsfonts,amssymb,amsmath,eucal,pinlabel,array,hhline} \usepackage{slashed} \usepackage{tabulary} \usepackage{fancyhdr} \usepackage{color} \usepackage{a4wide} \usepackage{calrsfs,bbm} \usepackage[position=b]{subcaption} \usepackage[colorlinks, linkcolor={blue!50!black}, citecolor={blue!50!black}, urlcolor={blue!80!black}]{hyperref} \newcommand{\N}{\mathbb{N}} \newcommand{\Z}{\mathbb{Z}} \newcommand{\Q}{\mathbb{Q}} \newcommand{\R}{\mathbb{R}} \newcommand{\C}{\mathbb{C}} \newcommand{\D}{\mathbb{D}} \newcommand{\F}{\mathbb{F}} \newcommand{\s}{\mathbb{S}} \newcommand{\bsm}{\left(\begin{smallmatrix}} \newcommand{\esm}{\end{smallmatrix}\right)} \newcommand{\id}{\operatorname{Id}} \newcommand{\Bl}{\operatorname{Bl}} \newcommand{\coker}{\operatorname{coker}} \newcommand{\Iso}{\operatorname{Iso}} \newcommand{\Homeo}{\operatorname{Homeo}} \newcommand{\Surf}{\operatorname{Surf}} \newcommand{\Aut}{\operatorname{Aut}} \newcommand{\Pres}{\operatorname{Pres}} \newcommand{\im}{\operatorname{im}} \newcommand{\fr}{\operatorname{fr}} \newcommand{\Hom}{\operatorname{Hom}} \newcommand{\TB}{\operatorname{TB}} \newcommand{\pd}{\operatorname{pd}} \newcommand{\sign}{\mathit{sign}} \newcommand{\eps}{\varepsilon} \newcommand{\HRule}{\rule{\linewidth}{0.5mm}} \newcommand{\smargin}[1]{\marginpar{\tiny{#1}}} \newcommand{\sH}{\mathcal{H}} \newcommand{\Hi}{\mathcal{H}_i} \newcommand{\Ho}{\mathcal{H}_0} \newcommand{\ks}{\operatorname{ks}} \newcommand{\sm}{\setminus} \newcommand{\ol}{\overline} \newcommand{\wt}{\widetilde} \newcommand{\lk}{\ell k} \newcommand{\tmfrac}[2]{\mbox{\large$\frac{#1}{#2}$}} \newtheorem{innercustomthm}{Theorem} \newenvironment{customthm}[1] {\renewcommand\theinnercustomthm{#1}\innercustomthm} {\endinnercustomthm} \newenvironment{romanlist} {\begin{enumerate} \renewcommand{\theenumi}{\it\roman{enumi}}} {\end{enumerate}} \newcommand{\unaryminus}{\scalebox{0.75}[1.0]{\( - \)}} \DeclareSymbolFont{EulerScript}{U}{eus}{m}{n} \DeclareSymbolFontAlphabet\mathscr{EulerScript} \begin{document} \title{$4$-manifolds with boundary and fundamental group $\Z$} \begin{abstract} We classify topological $4$-manifolds with boundary and fundamental group $\Z$, under some assumptions on the boundary. We apply this to classify surfaces in simply-connected $4$-manifolds with $S^3$ boundary, where the fundamental group of the surface complement is $\Z$. We then compare these homeomorphism classifications with the smooth setting. For manifolds, we show that every Hermitian form over $\Z[t^{\pm 1}]$ arises as the equivariant intersection form of a pair of exotic smooth 4-manifolds with boundary and fundamental group $\Z$. For surfaces we have a similar result, and in particular we show that every $2$-handlebody with $S^3$ boundary contains a pair of exotic discs. \end{abstract} \author[A.~Conway]{Anthony Conway} \address{The University of Texas at Austin, Austin TX 78712} \email{[email protected]} \author[L.~Piccirillo]{Lisa Piccirillo} \address{The University of Texas at Austin, Austin TX 78712} \email{[email protected]} \author[M.~Powell]{Mark Powell} \address{School of Mathematics and Statistics, University of Glasgow, United Kingdom} \email{[email protected]} \maketitle In what follows a~$4$-manifold is understood to mean a compact, connected, oriented, topological~$4$-manifold. Freedman classified closed~ $4$-manifolds with trivial fundamental group up to orientation-preserving homeomorphism. Other groups~$\pi$ for which classifications of closed~$4$-manifolds with fundamental group~$\pi$ are known include~$\pi \cong \Z$,~\cite{FreedmanQuinn,WangThesis,StongWang}, ~$\pi$ a finite cyclic group~\cite{HambletonKreck}, and~$\pi$ a solvable Baumslag-Solitar group~\cite{HambletonKreckTeichner}. Complete classification results for manifolds with boundary essentially only include the simply-connected case~\cite{BoyerUniqueness, BoyerRealization}; see also~\cite{StongRealization}. This paper classifies~$4$-manifolds with boundary and fundamental group~$\Z$, under some extra assumptions on the boundary. We give an informal statement now. Fix a closed 3-manifold~$Y$, an epimorphism $\varphi \colon \pi_1(Y)\twoheadrightarrow\Z$, a nondegenerate Hermitian form $\lambda$ over $\Z[t^{\pm1}]$, and an additional piece of data specifying how the Alexander module of $Y$ interacts with $\lambda$. Then up to homeomorphism fixing $Y,$ there exists a unique $4$-manifold $M$ filling $Y$ inducing the specified data. Uniqueness is a consequence of~\cite[Theorem 1.10]{ConwayPowell}. Existence is the main contribution of this paper, Theorem~\ref{thm:MainTechnicalIntro}. We give a similar non-relative classification of such $M$ in Theorem~\ref{thm:Classification}. A feature of our classification, which we shall demonstrate in Section~\ref{sec:NonTrivialbAut}, is the existence of arbitrarily large sets of homeomorphism classes of such 4-manifolds, all of which have the same boundary $Y$ and the same form $\lambda$. Recently, this was extended~\cite{CCP,ConwayDaiMiller}, using the results of this paper, to produce infinite sets of homeomorphism classes with this property. Thus this paper leads to the first classification of infinite families of orientable $4$-manifolds, all with the same, nontrivial, equivariant intersection form. This can be compared with~\cite{Jahren-Kwasik,BDK-07} and \cite[Theorem~1.2]{Kwasik-Schultz}, which produced infinite families of manifolds homotopy equivalent to $\R P^4 \# \R P^4$ and $L(p,q) \times S^1$ respectively; note that in both cases $\pi_2=0$ and so there is no intersection form. We apply our results to study compact, oriented, locally flat, embedded surfaces in simply-connected~$4$-manifolds where the fundamental group of the exterior is infinite cyclic; we call these \emph{$\Z$-surfaces}. The classification of closed surfaces in~$4$-manifolds whose exterior is simply-connected was carried out by Boyer~\cite{BoyerRealization}; see also~\cite{Sunukjian}. Literature on the classification of discs in $D^4$ where the complement has fixed fundamental group includes~\cite{FriedlTeichner,ConwayPowellDiscs,Conway}. For surfaces in more general $4$-manifolds,~\cite{ConwayPowell} gave necessary and sufficient conditions for a pair of~$\Z$-surfaces to be equivalent. In this work, for a~$4$-manifold~$N$ with boundary~$S^3$ and a knot~$K \subset S^3$, we classify~$\Z$-surfaces in~$N$ with boundary~$K$ in terms of the equivariant intersection form of the surface exterior; see Theorem \ref{thm:SurfacesRelBoundaryIntro}. An application to $H$-sliceness can be found in Corollary~\ref{cor:HSliceIntro}, while Theorem~\ref{thm:SurfacesClosedIntro} classifies closed~$\Z$-surfaces. Finally, we compare these homeomorphism classifications with the smooth setting. We demonstrate that for every Hermitian form~$\lambda$ over $\Z[t^{\pm 1}]$ there are pairs of smooth 4-manifolds with boundary, ~$\pi_1 \cong \Z$, and equivariant intersection form~$\lambda$ which are homeomorphic rel.\ boundary but not diffeomorphic; see Theorem \ref{thm:exoticmanifolds}. We also show in Theorem~\ref{thm:exoticdiscs} that for every Hermitian form~$\lambda$ satisfying conditions which are conjecturally necessary, there is a smooth 4-manifold~$N$ with~$S^3$ boundary containing a pair of smoothly embedded~$\Z$-surfaces whose exteriors have equivariant intersection form $\lambda$ and which are topologically but not smoothly isotopic rel.\ boundary. \section{Statement of results} \label{sec:StatementIntro} Before stating our main result, we introduce some terminology. Our 3-manifolds~$Y$ will always be oriented and will generally come equipped with an epimorphism~$\varphi\colon \pi_1(Y) \twoheadrightarrow \Z$. \begin{definition} An oriented~$4$-manifold~$M$ together with an identification $\pi_1(M) \cong \Z$ is said to be a \emph{$\Z$-manifold} if the inclusion induced map~$\pi_1(\partial M) \to \pi_1(M)$ is surjective. \end{definition} When we say that a $\Z$-manifold~$M$ has boundary~$(Y,\varphi)$, we mean that $M$ comes equipped with a homeomorphism $\partial M \xrightarrow{\cong} Y$ such that the composition~$\pi_1(Y) \twoheadrightarrow \pi_1(M) \xrightarrow{\cong}~\Z$ agrees with~$\varphi$. We will always assume that the Alexander module~$H_1(Y;\Z[t^{\pm 1}])$ is~$\Z[t^{\pm 1}]$-torsion; recall that the Alexander module is the first homology group of the infinite cyclic cover~$Y^\infty \to Y$ corresponding to $\ker(\varphi)$. The action of the deck transformation group $\Z = \langle t \rangle$ makes the first homology into a~$\Z[t^{\pm 1}]$-module. \subsection{The classification result}\label{sub:MainThm} Our goal is to classify~$\Z$-manifolds~$M$ whose boundary~$\partial M \cong Y$ has $H_1(Y;\Z[t^{\pm 1}])$ torsion, up to orientation-preserving homeomorphism. The isometry class of the \textit{equivariant intersection form}~$\lambda_M$ on~$H_2(M;\Z[t^{\pm 1}])$ is an invariant of such~$M$ (this definition is recalled in Subsection~\ref{sub:HomologyIntersections}) and so, to classify such~$M$, it is natural to first fix a nondegenerate Hermitian form $\lambda$ over~$\Z[t^{\pm 1}]$, and then to classify~$\Z$-manifolds~$M$ with boundary~$\partial M \cong Y$, and equivariant intersection form~$\lambda$. The fact that $\lambda$ is nondegenerate implies that the Alexander module $H_1(Y;\Z[t^{\pm 1}])$ is torsion. For such a~$4$-manifold~$M,$ the equivariant intersection form~$\lambda_M$ on~$H_2(M;\Z[t^{\pm 1}])$ \emph{presents} the \emph{Blanchfield form} on $H_1(Y;\Z[t^{\pm 1}])$ (see Subsection~\ref{sub:EquivariantLinking}) $$\Bl_Y \colon H_1(Y;\Z[t^{\pm 1}]) \times H_1(Y;\Z[t^{\pm 1}]) \to \Q(t)/\Z[t^{\pm 1}],$$ We make this algebraic notion precise next. If~$\lambda \colon H \times H \to \Z[t^{\pm 1}]$ is a nondegenerate Hermitian form on a finitely generated free $\Z[t^{\pm 1}]$-module (for short, a \emph{form}), then we write~$\widehat{\lambda} \colon H \to H^*$ for the linear map~$z \mapsto \lambda(-,z)$, and there is a short exact sequence $$ 0 \to H \xrightarrow{\widehat{\lambda}} H^* \xrightarrow{} \coker(\widehat{\lambda}) \to 0.$$ Such a presentation induces a \emph{boundary linking form}~$\partial \lambda$ on~$\coker(\widehat{\lambda})$ in the following manner. For~$[x] \in \coker(\widehat{\lambda})$ with~$x \in H^*$, since $\coker(\widehat{\lambda})$ is $\Z[t^{\pm 1}]$-torsion there exist elements~$z\in H$ and~$p\in\Z[t^{\pm 1}] \sm \{0\}$ such that~$\lambda(-,z)=px\in H^*$. Then for~$[x],[y]\in \coker(\widehat{\lambda})$ with~$x,y\in H^*$, we define $$\partial\lambda([x],[y]):=\frac{y(z)}{p}\in\Q(t)/\Z[t^{\pm 1}].$$ One can check that~$\partial \lambda$ is independent of the choices of~$p$ and $z$. \begin{definition} \label{def:presentation} For~$T$ a torsion~$\Z[t^{\pm 1}]$-module with a linking form~$\ell \colon T \times T \to \Q(t)/\Z[t^{\pm 1}]$, a nondegenerate Hermitian form~$(H,\lambda)$ \textit{presents}~$(T,\ell)$ if there is an isomorphism~$h\colon\coker(\widehat{\lambda})\to T$ such that~$\ell(h(x),h(y))=\partial\lambda(x,y)$. Such an isomorphism~$h$ is called an \emph{isometry} of the forms, the set of isometries is denoted~$\Iso(\partial\lambda,\ell)$. If~$(H,\lambda)$ presents~$(H_1(Y;\Z[t^{\pm 1}]), \unaryminus \Bl_Y)$ then we say~$(H,\lambda)$ \emph{presents }$Y$. \end{definition} This notion of a presentation is well known (see e.g.~\cite{RanickiExact,CrowleySixt}), and appeared in the classification of simply-connected $4$-manifolds with boundary in~\cite{BoyerUniqueness,BoyerRealization} and in~\cite{ConwayPowell} for $4$-manifolds with $\pi_1 \cong \Z$. See also~\cite{BorodzikFriedlClassical1,FellerLewarkBalanced}. Presentations capture the geometric relationship between the linking form of a 3-manifold and the intersection form of a 4-manifold filling. To see why the form~$(H_2(M;\Z[t^{\pm 1}]),\lambda_M)$ presents~$\partial M$, one first observes that the long exact sequence of the pair~$(M, \partial M)$ with coefficients in~$\Z[t^{\pm 1}]$ reduces to the short exact sequence \[0 \to H_2(M;\Z[t^{\pm 1}]) \to H_2(M,\partial M;\Z[t^{\pm 1}]) \to H_1(\partial M;\Z[t^{\pm 1}]) \to 0,\] where $H_2(M;\Z[t^{\pm 1}])$ and $H_2(M,\partial M;\Z[t^{\pm 1}])$ are finitely generated free $\Z[t^{\pm 1}]$-modules~\cite[Lemma 3.2]{ConwayPowell}. The left term of the short exact sequence supports the equivariant intersection form~$\lambda_M$ and the right supports~$\Bl_{\partial M}$. As explained in detail in~\cite[Remark 3.3]{ConwayPowell}, some algebraic topology gives the following commutative diagram of short exact sequences, where the isomorphism~$D_M$ is defined so that the right-most square commutes: \begin{equation} \label{eq:SES} \[email protected]{ 0 \ar[r]& H_2(M;\Z[t^{\pm 1}]) \ar[r]^{\widehat{\lambda}_M}\ar[d]^-{\id}_=& H_2(M;\Z[t^{\pm 1}])^* \ar[r]^-{}\ar[d]_\cong^-{\operatorname{ev}^{-1} \circ \operatorname{PD}}& \coker(\widehat{\lambda}_M) \ar[d]^{\operatorname{D_M}}_\cong\ar[r]& 0 \\ 0 \ar[r]& H_2(M;\Z[t^{\pm 1}]) \ar[r]& H_2(M,\partial M;\Z[t^{\pm 1}]) \ar[r]^-{}& H_1(\partial M;\Z[t^{\pm 1}]) \ar[r]& 0. } \end{equation} It then follows that~$(H_2(M;\Z[t^{\pm 1}]),\lambda_M)$ presents~$\partial M$, where the isometry~$\partial\lambda_M\cong \unaryminus \Bl_{\partial M}$ is given by~$D_M$. For details see~\cite[Proposition 3.5]{ConwayPowell}. Thus to classify the~$\Z$-manifolds~$M$ with boundary ~$\partial M \cong Y$, it suffices to consider forms~$(H,\lambda)$ which present~$Y$. In Section~\ref{sec:MainTechnicalIntro} we use $D_M$ to define an additional \emph{automorphism invariant} \[b_M \in \Iso(\partial\lambda,\unaryminus\Bl_Y)/\Aut(\lambda).\] Here, as we define precisely in Equation~\eqref{eq:autaction} below, an isometry $F \in \Aut(\lambda)$ induces an isometry $\partial F$ of $\partial \lambda$, and the action on $h \in \Iso(\partial\lambda,\unaryminus\Bl_Y)$ is then by $F \cdot h=h \circ \partial F^{-1}.$ Additionally, recall that a Hermitian form $(H,\lambda)$ is \emph{even} if $\lambda(x,x)=q(x)+\overline{q(x)}$ for some $\Z[t^{\pm 1}]$-module homomorphism $q \colon H \to \Z[t^{\pm 1}]$ and is \emph{odd} otherwise. Our first classification now reads as follows. \begin{theorem} \label{thm:ClassificationRelBoundary} Fix the following data: \begin{enumerate} \item a closed 3-manifold $Y$, \item an epimorphism $\varphi \colon \pi_1(Y)\twoheadrightarrow\Z$ with respect to which the Alexander module of $Y$ is torsion, \item a nondegenerate Hermitian form $\lambda \colon H\times H\to\Z[t^{\pm 1}]$ which presents $Y$, \item if $\lambda$ is odd, $k \in \Z_2,$ \item a class $b \in \Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda)$. \end{enumerate} Up to homeomorphism rel.\ boundary, there exists a unique $\Z$-manifold $M$ with boundary~$(Y,\varphi)$, equivariant intersection form $\lambda$, automorphism invariant $b$ and, in the odd case, Kirby-Siebenmann invariant $k$. \end{theorem} Here two 4-manifolds $M_0$ and $M_1$ with boundary $Y$ are \emph{homeomorphic rel.\ boundary} if there exists a homeomorphism $M_0 \xrightarrow{\cong} M_1$ such that the restriction composed with the given parametrisations of the boundary, $Y \cong \partial M_0 \xrightarrow{\cong} \partial M_1 \cong Y$ is the identity on $Y$. The uniqueness part of the theorem (which follows from~\cite{ConwayPowell}) can be thought of as answering whether or not a given pair of parametrisations $Y \cong \partial M_i$ extend to a homeomorphism $M_0 \cong M_1$. We refer to Remark~\ref{rem:UserGuide} for a guide to applying the uniqueness statement of Theorem~\ref{thm:ClassificationRelBoundary}. We give the proof of Theorem \ref{thm:ClassificationRelBoundary} (modulo our main technical theorem) in Section~\ref{sec:MainTechnicalIntro}. \begin{remark} \label{rem:MainTheorem} We collect a couple of further remarks about this result. \begin{itemize} \item The \emph{automorphism invariant} that distinguishes $\Z$-manifolds with the same equivariant form is nontrivial to calculate in practice, as its definition typically involves choosing identifications of the boundary $3$-manifolds; see Section~\ref{sec:MainTechnicalIntro}. \item Theorem~\ref{thm:ClassificationRelBoundary} should be thought of as an extension of the work of Boyer~\cite{BoyerUniqueness,BoyerRealization} that classifies simply-connected $4$-manifolds with boundary and fixed intersection form and an extension of the classification of closed $4$-manifolds with $\pi_1=\Z$~\cite{FreedmanQuinn,StongWang}. Boyer's main statements are formulated using presentations instead of isometries of linking forms, but both approaches can be shown to agree when the $3$-manifold is a rational homology sphere~\cite[Corollary E]{BoyerRealization}. By way of analogy, rational homology 3-spheres are to 1-connected 4-manifolds with boundary as pairs~$(Y,\varphi)$ with torsion Alexander module are to $\Z$-manifolds. \item For $(Y,\varphi)$ as above, it is implicit in Theorem~\ref{thm:ClassificationRelBoundary} and in~\cite{ConwayPowell} that if $M_0$ and $M_1$ are spin~$4$-manifolds with $\pi_1(M_i) \cong \Z$, boundary homeomorphic to~$(Y,\varphi)$, isometric equivariant intersection form, and the same automorphism invariant, then their Kirby-Siebenmann invariants agree. The argument is given in Remark~\ref{rem:KSProof} below, whereas Section~\ref{sub:Example} shows that the assumption on the automorphism invariants cannot be dropped. We refer to~\cite[Proposition~4.1~(vi)]{BoyerUniqueness} for the analogous fact in the simply-connected setting. \end{itemize} \end{remark} \begin{example} \label{ex:LargeStableClassIntro} We will show in Proposition~\ref{prop:LargeStableClass} that there are examples of pairs~$(Y,\varphi)$ for which the set of 4-manifolds with fixed boundary $Y$ and fixed (even) equivariant intersection form, up to homeomorphism rel.\ boundary, can have arbitrarily large cardinality (in the recent~\cite{CCP,ConwayDaiMiller} examples with infinite cardinality were obtained). Details are given in Section~\ref{sec:NonTrivialbAut}, but we note that the underlying algebra is similar to that which was used in~\cite{CCPS-short} and \cite{CCPS-long} to construct closed manifolds of dimension~$4k \geq 8$ with nontrivial homotopy stable classes. This arbitrarily large phenomenon also exists for simply-connected 4-manifolds bounding rational homology spheres, which can be deduced from Boyer's work \cite{BoyerUniqueness,BoyerRealization} with a similar proof. On the other hand in the simply-connected setting there can only ever be finite such families. \end{example} In Theorem~\ref{thm:ClassificationRelBoundary}, we fixed a parametrisation of the boundary. By changing the parametrisation by a homeomorphism of $Y$ that intertwines $\varphi$, we can change the invariant $b \in \Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda)$ by post-composition with the induced automorphism of $-\Bl_Y$. This leads to an absolute (i.e.\ non-rel.\ boundary) classification analogous to Theorem~\ref{thm:ClassificationRelBoundary}, which we will formalise in Theorem~\ref{thm:Classification}. For now we highlight the following example, which contrasts with Example~\ref{ex:LargeStableClassIntro}. \begin{example}\label{example:bdy-surface-x-S1} If $Y \cong \Sigma_g \times S^1$ and $\varphi \colon \pi_1(\Sigma_g \times S^1) \to \pi_1(S^1) \to \Z$ is induced by projection onto the second factor, then for a fixed non-degenerate Hermitian form~$\lambda$ that presents $Y$, if~$\lambda$ is even there is a unique homeomorphism class of 4-manifolds with~$\pi_1\cong \Z$, boundary $Y$, and equivariant intersection form~$\lambda$, and if $\lambda$ is odd there are exactly two such homeomorphism classes. Here we allow homeomorphisms to act nontrivially on the boundary. The key input is that every automorphism of $\Bl_Y$ can be realised by a homeomorphism of $Y$ that intertwines $\varphi$~\cite[Proposition~5.6]{ConwayPowell}. Therefore, given two 4-manifolds for which the rest of the data coincide, by re-parameterising $Y$ we can arrange for the automorphism invariants to agree. \end{example} In Section~\ref{sec:MainTechnicalIntro} we describe the automorphism invariant $b$ from Theorem \ref{thm:ClassificationRelBoundary}, give the statement of our main technical theorem on realisation of the invariants by $\Z$-manifolds, and explain how Theorem \ref{thm:ClassificationRelBoundary} implies a non rel.\ boundary version of the result. But first, in Subsections~\ref{sub:SurfaceIntro} and~\ref{sub:exoticaintro}, we discuss some applications. \subsection{Classification of~$\Z$-surfaces in simply-connected~$4$-manifolds with~$S^3$ boundary} \label{sub:SurfaceIntro} For a fixed simply-connected 4-manifold~$N$ with boundary $S^3$ and a fixed knot~$K \subset \partial N=S^3$, we call two locally flat embedded compact surfaces~$\Sigma,\Sigma' \subset N$ with boundary~$K \subset S^3$ \emph{equivalent rel.\ boundary} if there is an orientation-preserving homeomorphism~$(N,\Sigma) \cong (N,\Sigma')$ that is pointwise the identity on~$S^3 \cong \partial N$. We are interested in classifying the~$\Z$-surfaces in~$N$ with boundary~$K$ up to equivalence rel.\ boundary. As for manifolds, first we inventory some invariants of $\Z$-surfaces. The genus of $\Sigma$ and the equivariant intersection form~$\lambda_{N_\Sigma}$ on~$H_2(N_\Sigma;\Z[t^{\pm 1}])$ are invariants of such a surface~$\Sigma$, where~$N_\Sigma$ denotes the exterior~$N\smallsetminus \nu (\Sigma)$. Write~$E_K:=S^3 \setminus \nu(K)$ for the exterior of~$K$ and recall that the boundary of~$N_\Sigma$ has a natural identification $$\partial N_\Sigma\cong E_K \cup_\partial(\Sigma_{g,1} \times S^1)=:M_{K,g}.$$ As discussed in Subsection \ref{sub:MainThm}, there is a relationship between the equivariant intersection form~$\lambda_{N_\Sigma}$ on~$H_2(N_\Sigma;\Z[t^{\pm 1}])$ and the Blanchfield form~$\Bl_{M_{K,g}}$ on~$H_1(M_{K,g};\Z[t^{\pm 1}])$: the Hermitian form $(H_2(N_\Sigma;\Z[t^{\pm 1}]), \lambda_{N_\Sigma})$ presents~$M_{K,g}$. There is one additional necessary condition for a given form~$(H,\lambda)$ to be isometric to the intersection pairing~$(H_2(N_\Sigma;\Z[t^{\pm 1}]), \lambda_{N_\Sigma})$ for some surface~$\Sigma$. Observe that we can reglue the neighborhood of~$\Sigma$ to~$N_\Sigma$ to recover~$N$. This is reflected in the intersection form, as follows. We write~$\lambda(1):=\lambda \otimes_{\Z[t^{\pm 1}]} \Z_\varepsilon$, where $\Z_\varepsilon$ denotes~$\Z$ with the trivial $\Z[t^{\pm 1}]$-module structure. If~$W$ is a~$\Z$-manifold, then~$\lambda_W(1) \cong Q_W$, where~$Q_W$ denotes the standard intersection form of~$W$; see e.g.~\cite[Lemma 5.10]{ConwayPowell}. Therefore, if~$\lambda \cong \lambda_{N_\Sigma}$, then we have the isometries $$\lambda(1) \cong \lambda_{N_\Sigma}(1)=Q_{N_\Sigma} \cong Q_N \oplus (0)^{\oplus 2g},$$ where the last isometry follows from a Mayer-Vietoris argument. The following theorem (which is stated slightly more generally in Theorem~\ref{thm:SurfacesRelBoundary} below) shows that these invariants, with these two necessary conditions, are in fact also sufficient once an automorphism invariant is fixed. \begin{theorem} \label{thm:SurfacesRelBoundaryIntro} Fix the following data: \begin{enumerate} \item a simply-connected~$4$-manifold $N$ with boundary~$S^3$, \item an oriented knot $K \subset S^3$, \item an integer~$g \in \mathbb{Z}_{\geq 0},$ \item a nondegenerate Hermitian form~$(H,\lambda)$ over~$\Z[t^{\pm 1}]$ which presents~$M_{K,g}$ and satisfies~$\lambda(1)\cong Q_N \oplus (0)^{\oplus 2g}$, \item a class $b \in \Aut(\Bl_K)/\Aut(\lambda)$. \end{enumerate} Up to equivalence rel.\ boundary, there exists a unique genus~$g$~$\Z$-surface $\Sigma \subset N$ with boundary~$K$ whose exterior $N_\Sigma$ has equivariant intersection form $\lambda$ and automorphism invariant $b$. \end{theorem} The action of the group~$\Aut(\lambda)$ on the set~$\Aut(\Bl_K)$ arises by restricting the action of $\Aut(\lambda)$ on~$\Aut(\partial \lambda) \cong \Aut(\Bl_{M_{K,g}}) \cong \Aut(\Bl_K) \oplus \operatorname{Sp}_{2g}(\Z)$ to the first summand. Here the (non-canonical) isomorphism~$\Aut(\partial \lambda) \cong \Aut(\Bl_{M_{K,g}})$ holds because the form~$\lambda$ presents $M_{K,g}$, while the isomorphism $\Aut(\Bl_{M_{K,g}}) \cong \Aut(\Bl_K) \oplus \operatorname{Sp}_{2g}(\Z)$ is a consequence of~\cite[Propositions 5.6 and 5.7]{ConwayPowell}. Again, the construction is explicit. The idea is that the set of topological surfaces (up to equivalence rel.\ boundary) is in bijection with the set of surface complements (up to homeomorphism rel.\ boundary). So this theorem can be recovered from Theorem \ref{thm:ClassificationRelBoundary} by taking $Y$ to be $M_{K,g}$. We detail this in Section~\ref{sec:Discs} where we state the outcome as a bijection between $ \Aut(\Bl_K)/\Aut(\lambda)$ and the set of rel.\ boundary isotopy classes of $\Z$-surfaces $\Sigma \subset N$ with boundary $K$ and equivariant intersection form $\lambda_{N_\Sigma} \cong \lambda$. Finally, we note that when~$N=D^4$, equivalence rel.\ boundary can be upgraded to isotopy rel.\ boundary via the Alexander trick. See also \cite[Theorem~F]{Orson-Powell-MCG} for more cases when equivalence can be upgraded to isotopy. \begin{remark} \label{rem:Discs} Previous classification results of locally flat discs in $4$-manifolds include $\Z$-discs in~$D^4$~\cite{FreedmanQuinn,ConwayPowellDiscs}, $BS(1,2)$-discs in~$D^4$~\cite{FriedlTeichner,ConwayPowellDiscs} and $G$-discs in $D^4$ (under some assumptions on the group~$G$)~\cite{FriedlTeichner,Conway}. In the latter case it is not known whether there are groups satisfying the assumptions other than $\Z$ and $BS(1,2)$. Our result is the first classification of discs with non simply-connected exteriors in 4-manifolds other than~$D^4$. \end{remark} Before continuing with $\Z$-surfaces, we mention an application of Theorem~\ref{thm:SurfacesRelBoundaryIntro} to $H$-sliceness. A knot~$K$ in~$\partial N$ is said to be (topologically) \emph{$H$-slice} if~$K$ bounds a locally flat, embedded disc~$D$ in~$N$ that represents the trivial class in~$H_2(N,\partial N)$. The study of $H$-slice knots has garnered some interest recently because of its potential applications towards producing small closed exotic 4-manifolds~\cite{ConwayNagel, ManolescuMarengonSarkarWillis, ManolescuMarengonPiccirillo, IidaMukherjeeTaniguchi,ManolescuPiccirillo,KjuchukovaMillerRaySakalli}. Since~$\Z$-slice knots are $H$-slice (see e.g.~\cite[Lemma~5.1]{ConwayPowell}), Theorem~\ref{thm:SurfacesRelBoundaryIntro} therefore gives a new criterion for topological~$H$-sliceness. Our results also apply in higher genus. When~$N=D^4$, this is reminiscent of the combination of~\cite[Theorems 2 and 3]{FellerLewarkOnClassical} and~\cite[Theorem 1.1]{BorodzikFriedlLinking} (and for $g=0$ it is Freedman's theorem that Alexander polynomial one knots bound $\Z$-discs~\cite{Freedman:1984-1,FreedmanQuinn}). In connected sums of copies of~$\C P^2$, this is closely related to~\cite[Theorem~1.3]{KjuchukovaMillerRaySakalli}. Compare also~\cite[Theorem~1.10]{FellerLewarkBalanced}, which applies in connected sums of copies of $\C P^2 \# \overline{\C P}^2$ and $S^2 \times S^2$. \begin{corollary} \label{cor:HSliceIntro} Let~$N$ be a simply-connected~$4$-manifold with boundary~$S^3$ and let~$K \subset S^3$ be a knot. If~$\Bl_{M_{K,g}}$ is presented by a nondegenerate Hermitian matrix~$A(t)$ such that~$A(1)$ is congruent to~$Q_N \oplus (0)^{\oplus 2g}$, then~$K$ bounds a genus~$g$~$\Z$-surface in~$N$. In particular, when~$g=0$,~$K$ is~$H$-slice in~$N$. \end{corollary} We also study~$\Z$-surfaces up to equivalence (instead of equivalence rel.\ boundary). Here an additional technical requirement is needed on the knot exterior $E_K:=S^3 \setminus \nu(K)$. \begin{theorem} \label{thm:SurfacesWithBoundaryIntro} Let ~$K$ be a knot in~$S^3$ such that every isometry of~$\Bl_K$ is realised by an orientation-preserving homeomorphism~$E_K \to E_K$. If a nondegenerate Hermitian form~$(H,\lambda)$ over~$\Z[t^{\pm 1}]$ presents~$M_{K,g}$ and satisfies~$\lambda(1)\cong Q_N \oplus (0)^{\oplus 2g}$, then up to equivalence, there exists a unique genus~$g$ surface~$\Sigma \subset N$ with boundary~$K$ and whose exterior has equivariant intersection form~$\lambda$. \end{theorem} The classification of closed~$\Z$-surfaces then follows from Theorem \ref{thm:SurfacesWithBoundaryIntro}. To state the result, given a closed simply-connected $4$-manifold $X$, we use $X_\Sigma$ to denote the exterior of a surface $\Sigma \subset X$ and~$N:=X \setminus \mathring{D}^4$ for the manifold obtained by puncturing $X$. The details are presented in Section~\ref{sub:Closed}. The idea behind the proof is that closed surfaces are in bijective correspondence, with surfaces with boundary $U$, so we can apply Theorem~\ref{thm:SurfacesWithBoundaryIntro}. \begin{theorem} \label{thm:SurfacesClosedIntro} Let~$X$ be a closed simply-connected~$4$-manifold. If a nondegenerate Hermitian form~$(H,\lambda)$ over~$\Z[t^{\pm 1}]$ presents~$\Sigma_g \times S^1$ and satisfies~$\lambda(1)\cong Q_X \oplus (0)^{\oplus 2g}$, then there exists a unique $($up to equivalence$)$ genus~$g$ surface~$\Sigma \subset X$ whose exterior has equivariant intersection form~$\lambda$. \end{theorem} Note that the boundary 3-manifold in question here, $\Sigma_g \times S^1$, is the same one that appeared in Example~\ref{example:bdy-surface-x-S1}. We conclude with a couple of remarks on Theorems~\ref{thm:SurfacesRelBoundaryIntro},~\ref{thm:SurfacesWithBoundaryIntro}, and~\ref{thm:SurfacesClosedIntro}. Firstly, we note that for each theorem, the uniqueness statements follow from~\cite{ConwayPowell}. Our contributions in this work are the existence statements. Secondly, we note that similar results were obtained for closed surfaces with simply-connected complements by Boyer~\cite{BoyerRealization}. Some open questions concerning $\Z$-surfaces are discussed in Subsection~\ref{sub:OpenQuestions}. \subsection{Exotica for all equivariant intersection forms } \label{sub:exoticaintro} So far, we have seen that the data in Theorems~\ref{thm:ClassificationRelBoundary} and~\ref{thm:SurfacesRelBoundaryIntro} determine the topological type of $\Z$-manifolds and $\Z$-surfaces respectively. In what follows, we investigate the smooth failure of these statements. One of the driving questions in smooth 4-manifold topology is whether every smoothable simply-connected closed 4-manifold admits multiple smooth structures. This question has natural generalisations to 4-manifolds with boundary and with other fundamental groups; we set up these generalisations with the following definition. \begin{definition} \label{def:ExoticallyRealisableRel} For a 3-manifold~$Y$, a (possibly degenerate) symmetric form~$Q$ over~$\Z$ (resp.\ Hermitian form~$\lambda$ over~$\Z[t^{\pm 1}]$) is \textit{exotically realisable rel.~$Y$} if there exists a pair of smooth simply-connected 4-manifolds~$M$ and~$M'$ with boundary $Y$ (resp.\ $\Z$-manifolds with boundary $Y$) and intersection form~$Q$ (resp.\ equivariant intersection form~$\lambda$) such that there is an orientation-preserving homeomorphism~$F \colon M\to M'$ (for $\pi_1 \cong \Z$, we additionally require that $F$ respects the identifications of $\pi_1(M)$ and $\pi_1(M')$ with $\Z$) but no diffeomorphism~$G \colon M\to M'$. \end{definition} In this language, the driving question above becomes (a subquestion of) the following: which symmetric bilinear forms over~$\Z$ are exotically realisable rel.~$S^3$? There is substantial literature demonstrating that some forms are exotically realisable rel.~$S^3$ (we refer to~\cite{AkhmedovPark,AkhmedovPark2} both for the state of the art and for a survey of results on the topic) but there remain many forms, such as definite forms or forms with~$b_2<3$, for which determining exotic realisability rel.~$S^3$ remains out of reach. For more general 3-manifolds, the situation is worse; in fact it is an open question whether for every integer homology sphere $Y$ there exists \textit{some} symmetric form $Q$ that is exotically realisable rel.~$Y$ \cite{EMM3manifolds}. Presently there only seems to be traction on exotic realisability of intersection forms if one relinquishes control of the homeomorphism type of the boundary. \begin{definition}\label{def:relexotic} A symmetric form~$Q$ over~$\Z$ (resp.\ a Hermitian form~$\lambda$ over~$\Z[t^{\pm 1}]$) is \textit{exotically realisable} if there exists pair of smooth simply-connected 4-manifolds~$M$ and~$M'$ with intersection form~$Q$ (resp.\ $\Z$-manifolds with equivariant intersection form~$\lambda$) such that there is an orientation-preserving homeomorphism~$F \colon M\to M'$ (for $\pi_1 \cong \Z$, we additionally require that $F$ respects the identifications of $\pi_1(M)$ and $\pi_1(M')$ with $\Z$) but no diffeomorphism~$G \colon M\to M'$. \end{definition} The following theorem, which appears in \cite{AR16} for $n=0$ and \cite{AkbulutYasui} for $n>1$, shows that contrarily to the closed setting, \emph{every} symmetric bilinear form over $\Z$ is exotically realisable. \begin{theorem}[{Akubulut-Yasui~\cite{AkbulutYasui} and Akbulut-Ruberman~\cite{AR16}}] \label{thm:exoticsimplyconn} Every symmetric bilinear form~$(\Z^n,Q)$ over~$\Z$ is exotically realisable. \end{theorem} Following our classification of $\Z$-manifolds with fixed boundary and fixed equivariant intersection form~$\lambda$ it is natural to ask which Hermitian forms~$\lambda$ are exotically realisable, with or without fixing a parametrisation of the boundary 3-manifold. We resolve the latter. \begin{theorem}\label{thm:exoticmanifolds} Every Hermitian form $(H,\lambda)$ over~$\Z[t^{\pm 1}]$ is exotically realisable. \end{theorem} 4-manifold topologists are also interested in finding smooth surfaces which are topologically but not smoothly isotopic. While literature in the closed case includes~\cite{FinashinKreckViro,FintushelStern, KimModifying,KimRubermanSmooth, KimRubermanTopological,Mark,HoffmanSunukjian} there has been a recent surge of interest in the relative setting on which we now focus~\cite{JuhaszMillerZemke, Hayden, HaydenKjuchukovaKrishnaMillerPowellSunukjian, HaydenSundberg, DaiMallickStoffregen}; see also~\cite{AkbulutZeeman}. Most relevant to us are the exotic ribbon discs from~\cite{Hayden}. In order to prove that his discs in $D^4$ are topologically isotopic, Hayden showed that their exteriors have group $\Z$ and appealed to~\cite{ConwayPowellDiscs}. From the perspective of this paper and~\cite{ConwayPowell}, any two $\Z$-ribbon discs are isotopic rel.\ boundary because their exteriors are aspherical and therefore have trivial equivariant intersection form. To generalise Hayden's result to other forms than the trivial one, we introduce some terminology. \begin{definition} \label{def:realisedByExoticSurfaces} For a fixed smooth simply-connected 4-manifold~$N$, with boundary $S^3$, a form~$\lambda$ over~$\Z[t^{\pm 1}]$ is \emph{realised by exotic $\Z$-surfaces in~$N$} if there exists a pair of smooth properly embedded~$\Z$-surfaces~$\Sigma$ and~$\Sigma'$ in~$N$, with the same boundary, whose exteriors have equivariant intersection forms isometric to~$\lambda$, and which are topologically but not smoothly isotopic rel.\ boundary. \end{definition} Using this terminology, Hayden's result states that the trivial form is realised by exotic $\Z$-discs (in~$D^4$). The next result shows that in fact \emph{every} form is realised by exotic $\Z$-discs. \begin{theorem}\label{thm:exoticdiscs} Every Hermitian form~$(H,\lambda)$ over~$\Z[t^{\pm 1}]$, such that~$\lambda(1)$ is realised as the intersection form of a smooth simply-connected 4-dimensional 2-handlebody~$N$ with boundary $S^3$, is realised by exotic $\Z$-discs in $N$. \end{theorem} \begin{remark} \label{rem:Smooth} We make a couple of remarks on Theorems~\ref{thm:exoticmanifolds} and~\ref{thm:exoticdiscs}. \begin{itemize} \item The~$11/8$ conjecture predicts that every integer intersection form which is realisable by a smooth~$4$-manifold with~$S^3$ boundary is realisable by a smooth 4-dimensional 2-handlebody with~$S^3$ boundary, thus our hypothesis on the realisability of~$\lambda(1)$ by 2-handlebodies is likely not an additional restriction (a nice exposition on why this follows from the~$11/8$ conjecture is given in~\cite[page 24]{HLSX}). \item The handlebody $N$ is very explicit: it can be built from $D^4$ by attaching $2$-handles according to $\lambda(1)$. In particular, when $\lambda$ is the trivial form, then $N=D^4$ and so Theorem~\ref{thm:exoticdiscs} demonstrates that there are exotic discs in $D^4$. This was originally proved in~\cite{Hayden}, and we note that our proof relies on techniques developed there. \item The proof of Theorem~\ref{thm:exoticdiscs} also shows that every smooth $2$-handlebody with $S^3$ boundary contains a pair of exotic $\Z$-discs. We expand on this above the statement of Theorem~\ref{thm:ExoticDiscsMain}. \end{itemize} \end{remark} We briefly mention the idea of the proof of Theorem~\ref{thm:exoticmanifolds}. For a given Hermitian form $(H,\lambda)$ over~$\Z[t^{\pm 1}]$, we construct a Stein $4$-manifold $M$ with $\pi_1(M) \cong \Z$ and $\lambda_M \cong \lambda$ that contains a cork. Twisting along this cork produces the $4$-manifold $M'$ and the homeomorphism $F \colon M \cong M'$. We show that if $F|_\partial$ extended to a diffeomorphism $M \cong M'$, two auxiliary $4$-manifolds $W$ and $W'$ (obtained from $M$ and $M'$ by adding a single $2$-handle) would be diffeomorphic. We show this is not the case by proving that $W$ is Stein whereas $W'$ is not using work of Lisca-Matic~\cite{LiscaMatic}. This proves that $M$ and $M'$ are non-diffeomorphic rel.\ $F|_\partial$. We then use a result of \cite{AR16} to show that there exists a pair of smooth manifolds $V$ and $V'$, which are homotopy equivalent to~$M$ and~$M'$ respectively, and which are homeomorphic but not diffeomorphic to each other. The proof of Theorem~\ref{thm:exoticdiscs} uses similar ideas. \subsection*{Organisation} In Section~\ref{sec:MainTechnicalIntro} we describe our main technical result and how it implies Theorem~\ref{thm:ClassificationRelBoundary}. In Section~\ref{sec:Prelim}, we recall and further develop the theory of equivariant linking numbers. In Section~\ref{sec:reidemeister-torsion} we review the facts we will need on Reidemeister torsion. Section~\ref{sec:ProofMainTechnical}, we prove our main technical result, Theorem~\ref{thm:MainTechnicalIntro}. Section~\ref{sec:Discs} is concerned with our applications to surfaces and in particular, we prove Theorems~\ref{thm:SurfacesRelBoundaryIntro},~\ref{thm:SurfacesWithBoundaryIntro} and~\ref{thm:SurfacesClosedIntro}. Our results in the smooth category, namely Theorems~\ref{thm:exoticmanifolds} and~\ref{thm:exoticdiscs}, are proved in Section~\ref{sec:ubiq}. Finally, Section~\ref{sec:NonTrivialbAut} exhibits the arbitrarily large collections promised in Example~\ref{ex:LargeStableClassIntro} \subsection*{Conventions} \label{sub:Conventions} In Sections~\ref{sec:MainTechnicalIntro}-\ref{sec:Discs} and~\ref{sec:NonTrivialbAut}, we work in the topological category with locally flat embeddings unless otherwise stated. In Section~\ref{sec:ubiq}, we work in the smooth category. From now on, all manifolds are assumed to be compact, connected, based and oriented; if a manifold has a nonempty boundary, then the basepoint is assumed to be in the boundary. If $P$ is manifold and $Q \subseteq P$ is a submanifold with closed tubular neighborhood $\ol{\nu}(Q) \subseteq P$, then~$P_Q := P \setminus \nu(Q)$ will always denote the exterior of $Q$ in $P$, that is the complement of the open tubular neighborhood. The only exception to this use of notation is that the exterior of a knot~$K$ in $S^3$ will be denoted $E_K$ instead of~$S^3_K$. We write~$p \mapsto \overline{p}$ for the involution on~$\Z[t^{\pm 1}]$ induced by~$t \mapsto t^{-1}$. Given a~$\Z[t^{\pm 1}]$-module~$H$, we write~$\overline{H}$ for the~$\Z[t^{\pm 1}]$-module whose underlying abelian group is~$H$ but with module structure given by~$p \cdot h=\overline{p}h$ for~$h \in H$ and~$p \in \Z[t^{\pm 1}]$. We write $H^*:=\overline{\Hom_{\Z[t^{\pm 1}]}(H,\Z[t^{\pm 1}])}$. If a pullback map $F^*$ is invertible we shall abbreviate $(F^*)^{-1}$ to $F^{-*}$. Similarly, for an invertible square matrix $A$ we write $A^{-T} := (A^T)^{-1}$. \subsection*{Acknowledgments} We thank the referee of a previous draft of this paper for helpful comments on the exposition. L.P.\ was supported in part by a Sloan Research Fellowship and a Clay Research Fellowship. L.P.\ thanks the National Center for Competence in Research (NCCR) SwissMAP of the Swiss National Science Foundation for their hospitality during a portion of this project. M.P.\ was partially supported by EPSRC New Investigator grant EP/T028335/2 and EPSRC New Horizons grant EP/V04821X/2. \section{The main technical realisation statement} \label{sec:MainTechnicalIntro} The goal of this section is to formulate our main technical theorem, to explain how it implies Theorem~\ref{thm:ClassificationRelBoundary} from the introduction, and to formulate its non-relative analogue. Along the way we also define the automorphism invariant in more detail. We begin by defining a set of $\Z$-manifolds $\mathcal{V}_\lambda^0(Y)$ with boundary $Y$ and intersection form $\lambda$. Then we describe a map~$b\colon \mathcal{V}_\lambda^0(Y)\to\Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda)$. Theorem~\ref{thm:ClassificationRelBoundary} (as formulated in Remark~\ref{rem:BijectionRelBoundary}) then reduces to the statement that~$b$ is a bijection. As we will explain, the injectivity of~$b$ follows from~\cite[Theorem~1.10]{ConwayPowell}. The main technical result of this paper is Theorem \ref{thm:MainTechnicalIntro}, which gives the surjectivity of~$b$ (and thus implies Theorem \ref{thm:ClassificationRelBoundary}). We also prove in this section that Theorem \ref{thm:Classification}, our absolute (i.e. non- rel. boundary) homeomorphism classification result, follows from Theorem \ref{thm:ClassificationRelBoundary}. We finish the section with an outline of the proof of Theorem~\ref{thm:MainTechnicalIntro}. We start by describing the set $\mathcal{V}_\lambda^0(Y)$ from Theorem~\ref{thm:ClassificationRelBoundary} more carefully. \begin{definition} \label{def:V0lambdaY} Let~$Y$ be a~$3$-manifold with an epimorphism~$\varphi \colon \pi_1(Y) \twoheadrightarrow \Z$ whose Alexander module is torsion, and let~$(H,\lambda)$ be a Hermitian form presenting~$Y$. Consider the set~$S_\lambda(Y)$ of pairs~$(M,g)$, where \begin{itemize} \item $M$ is a~$\Z$-manifold with a fixed identification $\pi_1(M) \xrightarrow{\cong} \Z$, equivariant intersection form isometric to~$\lambda$, and boundary homeomorphic to~$Y$; \item $g \colon \partial M \xrightarrow{\cong} Y$ is an orientation-preserving homeomorphism such that~$Y \xrightarrow{g^{-1},\cong} \partial M \to M$ induces~$\varphi$ on fundamental groups. \end{itemize} Define~$\mathcal{V}_\lambda^0(Y)$ as the quotient of~$S_\lambda(Y)$ in which two pairs~$(M_1,g_1), (M_2,g_2)$ are deemed equal if and only if there is a homeomorphism~$\Phi \colon M_1 \cong M_2$ such that~$ \Phi|_{\partial M_1}=g_2^{-1} \circ g_1$. Note that such a homeomorphism is necessarily orientation-preserving because $g_1$ and $g_2$ are. For conciseness, we will say that~$(M_1,g_1)$ and~$(M_2,g_2)$ are \emph{homeomorphic rel.\ boundary} to indicate the existence of such a homeomorphism~$\Phi$. \end{definition} \begin{remark} \label{rem:BijectionRelBoundary} Using Definition~\ref{def:V0lambdaY}, Theorem~\ref{thm:ClassificationRelBoundary} is equivalent to the following statement. \emph{If $\lambda$ presents $Y$, then~$\mathcal{V}_\lambda^0(Y)$ is nonempty and corresponds bijectively to} \begin{itemize} \item \emph{$\Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda)$, if~$\lambda$ is an even form;} \item \emph{$\left( \Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda)\right) \times \Z_2$ if~$\lambda$ is an odd form. The map to $\Z_2$ is given by the Kirby-Siebenmann invariant.} \end{itemize} The bijection is explicit and will be constructed in Construction~\ref{cons:EmbVBijection}. Additionally, note that since~$(H,\lambda)$ is assumed to present~$Y$, there is an isometry~$\partial \lambda \cong \unaryminus \Bl_Y$ and fixing a choice of one such isometry leads to a bijection $$\Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda) \approx \Aut(\partial \lambda)/\Aut(\lambda),$$ where $\Aut(\partial \lambda)$ denotes the group of self-isometries of $\partial \lambda$. Note however that this bijection is not canonical as it depends on the choice of the isometry~$\partial \lambda \cong -\Bl_Y$. \end{remark} \begin{construction} \label{cons:Invariant} [Constructing the map~$b \colon \mathcal{V}_\lambda^0(Y)\to\Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda)$.] \label{cons:PresentationAssociatedToManifold} Let~$Y$ be a~$3$-manifold with an epimorphism~$\varphi \colon \pi_1(Y) \twoheadrightarrow \Z$ whose corresponding Alexander module is torsion, and let~$(H,\lambda)$ be a form presenting~$Y$. Let~$(M,g)$ be an element of~$\mathcal{V}^0_\lambda(Y)$, i.e.\ $M$ is a~$\Z$-manifold with equivariant intersection form isometric to~$\lambda$ and $g \colon \partial M \cong Y$ is a homeomorphism as in Definition~\ref{def:V0lambdaY}. In the text preceding Theorem \ref{thm:ClassificationRelBoundary}, we showed how~$M$ determines an isometry~$D_M \in \Iso(\partial \lambda_M, \unaryminus \Bl_{\partial M})$. Morally, one should think that this isometry~$D_M$ is the invariant we associate to~$M$. For this to be meaningful however, we instead need an isometry that takes value in a set defined in terms of just the 3-manifold~$Y$ and the form~$(H,\lambda)$, without referring to $M$ itself. We resolve this by composing~$D_M$ with other isometries, so that our invariant is ultimately an element of~$\Iso(\partial \lambda,\unaryminus\Bl_Y)$. Once we have built the invariant, we will show it is well defined up to an action by~$\Aut(\lambda)$. We first use $g$ to describe an isometry $\Bl_{\partial M} \cong \Bl_Y$. Since on the level of fundamental groups~$g$ intertwines the maps to $\Z$, \cite[Proposition 3.7]{ConwayPowell} implies that~$g$ induces an isometry $$g_* \colon \Bl_{\partial M} \cong \Bl_Y.$$ Next we describe an isometry $\partial \lambda \cong \partial \lambda_M$. The assumption that~$M$ has equivariant intersection form~$\lambda$ means by definition that there is an isometry $F \colon \lambda \cong \lambda_M$, i.e.\ an isomorphism~$F \colon H\to H_2(M;\Z[t^{\pm 1}])$ that intertwines the forms~$\lambda$ and~$\lambda_M$. Note that there is no preferred choice of~$F$. Any such~$F$ induces an isometry~$\partial F \in \Aut(\partial \lambda,\partial \lambda_M)$ as follows: ~$F \colon H\to H_2(M;\Z[t^{\pm 1}])$ gives an isomorphism~$(F^{*})^{-1} \colon H^*\to H_2(M;\Z[t^{\pm 1}])^*$ that descends to an isomorphism~$\coker(\widehat{\lambda})\cong \coker(\widehat{\lambda}_M)$ and is in fact an isometry; this is by definition $$\partial F := (F^{*})^{-1} \colon \partial \lambda \cong \partial \lambda_M.$$ This construction is described in greater generality in~\cite[Subsection 2.2]{ConwayPowell}. We shall henceforth abbreviate $(F^*)^{-1}$ to $F^{-*}$. We are now prepared to associate an isometry in~$\Iso(\partial \lambda,\unaryminus\Bl_Y)$ to~$(M,g)\in \mathcal{V}_\lambda^0(Y)$ as follows: choose an isometry~$F \colon \lambda_M \cong \lambda$ and consider the isometry $$b_{(M,g,F)}:=g_* \circ D_M \circ \partial F \in \Iso(\partial \lambda,\unaryminus\Bl_Y).$$ We are not quite done, because we need to ensure that our invariant is independent of the choice of~$F$ and that $b$ defines a map on $\mathcal{V}_\lambda^0(Y)$. First, we will make our invariant independent of the choice of~$F$. We require the following observation. Given a Hermitian form $(H,\lambda)$ and linking form $(T,\ell)$, there is a natural left action~$\Aut(\lambda) \curvearrowright \Iso(\partial \lambda,\ell)$ defined via \begin{equation}\label{eq:autaction} G \cdot h :=h \circ \partial G^{-1} \text{ for } G\in\Aut(\lambda) \text{ and } h \in \Iso(\partial \lambda,\ell). \end{equation} In particular, we can consider~$$b_{(M,g)}:=g_* \circ D_M \circ \partial F \in \Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda).$$ It is now not difficult to check that~$b_{(M,g)}$ is independent of the choice of~$F$. The fact that if~$(M_0,g_0)$ and~$(M_1,g_1)$ are homeomorphic rel.\ boundary (recall Definition~\ref{def:V0lambdaY}), then $b_{(M_0,g_0)}=b_{(M_1,g_1)}$ follows fairly quickly. From now on we omit the boundary identification~$g \colon \partial M \cong Y$ from the notation, writing~$b_M$ instead of~$b_{(M,g)}$. This concludes the construction of our automorphism invariant. \end{construction} We are now ready to state our main technical theorem. \begin{theorem} \label{thm:MainTechnicalIntro} Let~$Y$ be a~$3$-manifold with an epimorphism~$\varphi \colon \pi_1(Y) \twoheadrightarrow \Z$ whose Alexander module is torsion, and let~$(H,\lambda)$ be a nondegenerate Hermitian form presenting~$Y$. If~$b \in \Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda)$ is an isometry, then there is a~$\Z$-manifold~$M$ with equivariant intersection form~$\lambda_M \cong \lambda$, boundary~$Y$ and~$b_M=b$. If the form is odd, then~$M$ can be chosen to have either~$\ks(M)=0$ or~$\ks(M)=1$. \end{theorem} We now describe how to obtain Theorem~\ref{thm:ClassificationRelBoundary} (as formulated in Remark~\ref{rem:BijectionRelBoundary}) by combining this result with~\cite{ConwayPowell}. \begin{proof}[Proof of Theorem~\ref{thm:ClassificationRelBoundary} assuming Theorem~\ref{thm:MainTechnicalIntro}] First, notice that Theorem~\ref{thm:MainTechnicalIntro} implies the surjectivity portion of the statement in Theorem~\ref{thm:ClassificationRelBoundary}. It therefore suffices to prove that the assignment~$ \mathcal{V}_\lambda^0(Y) \to \Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda)$ which sends~$M$ to~$b_M$ is injective for~$\lambda$ even, and that the assignment~$\mathcal{V}^0_\lambda(Y) \to\left( \Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda)\right) \times~\Z_2$ which sends~$M$ to~$(b_M,\ks(M))$ is injective for~$\lambda$ odd. Let~$(M_0,g_0),(M_1,g_1)$ be two pairs representing elements in~$\mathcal{V}_\lambda^0(Y)$. Each~$4$-manifold~$M_i$ comes with an isometry~$F_i\colon (H, \lambda)\to (H_2(M_i;\Z[t^{\pm 1}]),\lambda_{M_i})$ and for $i=0,1$, the homeomorphisms $g_i \colon \partial M_i \to Y$ are as in Definition~\ref{def:V0lambdaY}. We then get epimorphisms $$(g_i)_* \circ D_{M_i}\circ \partial F_i\circ \pi\colon H^* \twoheadrightarrow H_1(Y; \Z[t^{\pm 1}]).$$ Here $\pi \colon H^* \to \coker(\widehat{\lambda})$ denotes the canonical projection. We assume that~$b_{M_0} = b_{M_1}$ and, if~$\lambda$ is odd, then we additionally assume that~$\ks(M_0)=\ks(M_1)$. The fact that~$b_{M_0} = b_{M_1}$ implies that there is an isometry~$F \colon (H,\lambda) \cong (H,\lambda)$ that makes the following diagram commute: $$ \xymatrix @C+1.5cm{ 0\ar[r] &H \ar[r]^{\widehat{\lambda}}\ar[d]_F & H^* \ar[r]^-{(g_0)_* \circ D_{M_0} \circ \partial F_0 \circ \pi}\ar[d]_-{F^{-*}}& H_1(Y;\Z[t^{\pm 1}])\ar[d]^= \ar[r]&0 \\ 0\ar[r] &H \ar[r]^{\widehat{\lambda}}& H^* \ar[r]^-{(g_1)_* \circ D_{M_1} \circ \partial F_1 \circ \pi}& H_1(Y;\Z[t^{\pm 1}]) \ar[r]&0. } $$ But now, by considering the isometry~$G \colon \lambda_{M_0} \cong \lambda_{M_1}$ defined by~$G:=F_1 \circ F \circ F_0^{-1}$, a quick verification shows that~$(G,\id_Y)$ is a compatible pair in the sense of~\cite{ConwayPowell}. Consequently~\cite[Theorem 1.10]{ConwayPowell} shows that there is a homeomorphism~$M_0 \cong M_1$ extending~$\id_Y$ and inducing~$G$; in particular~$M_0$ and~$M_1$ are homeomorphic rel.\ boundary. \end{proof} \begin{remark} \label{rem:KSProof} For $(Y,\varphi)$ as in Theorem~\ref{thm:MainTechnicalIntro}, we explain the fact (already mentioned in Remark~\ref{rem:MainTheorem}) that if $M_0$ and $M_1$ are spin $4$-manifolds with $\pi_1(M_i) \cong \Z$, boundary homeomorphic to~$(Y,\varphi)$, isometric equivariant intersection form, and the same automorphism invariant, then their Kirby-Siebenmann invariants agree. As explained during the proof of Theorem~\ref{thm:ClassificationRelBoundary}, these assumptions ensure the existence of a compatible pair~$(G,\id_Y)$. This in turn implies that $M:=M_0 \cup_{g_0 \circ g_1^{-1}} M_1$ is spin and has fundamental group~$\Z $~\cite[Theorem 3.12]{ConwayPowell}. The assertion now follows from additivity of $\ks$ and Novikov additivity of the signature: $$\ks(M_0)+\ks(M_1) = \ks(M) \equiv \frac{\sigma(M)}{8}=\frac{\sigma(M_0)-\sigma(M_1)}{8}=0 \quad \pmod 2.$$ We also use that the signatures of $M$, $M_0$, and $M_1$ can be obtained from the respective equivariant intersection forms by specialising to $t=1$ and taking the signature. In Section~\ref{sub:Example}, we exhibit examples of spin 4-manifolds with boundary homeomorphic to $\unaryminus L(8,1) \# (S^1 \times S^2)$ and isometric equivariant intersection form that have different Kirby-Siebenmann invariants, demonstrating that the automorphism invariant was needed in the argument of this remark. \end{remark} Next we outline the strategy of the proof of Theorem~\ref{thm:MainTechnicalIntro}. \begin{proof}[Outline of the proof of Theorem~\ref{thm:MainTechnicalIntro}] \label{pf:ProofStrategy} The idea is to perform surgeries on~$Y$ along a set of generators of~$H_1(Y;\Z[t^{\pm 1}])$ to obtain a~$3$-manifold~$Y'$ with~$H_1(Y';\Z[t^{\pm 1}])=0$. The verification that $H_1(Y';\Z[t^{\pm 1}])=0$ uses Reidemeister torsion. We then use surgery theory to show that this~$Y'$ bounds a~$4$-manifold~$B$ with~$B \simeq S^1$; this step relies on Freedman's work in the topological category~\cite{Freedman, FreedmanQuinn,DET}. The~$4$-manifold~$M$ is then obtained as the union of the trace of these surgeries with~$B$. To show that in the odd case both values of the Kirby-Siebenmann invariant are realised, we use the star construction~\cite{FreedmanQuinn,StongRealization}. The main difficulty of the proof is to describe the correct surgeries on~$Y$ to obtain~$Y'$; this is where the fact that~$\lambda$ presents~$\Bl_Y$ comes into play: we show that generators of~$H_1(Y;\Z[t^{\pm 1}])$ can be represented by a framed link~$\widetilde{L}$ with equivariant linking matrix equal to minus the transposed inverse of a matrix representing~$\lambda$. \end{proof} This is a strategy similar to the one employed in Boyer's classification of simply-connected~$4$-manifolds with a given boundary~\cite{BoyerRealization}. The argument is also reminiscent of~\cite[Theorem~2.9]{BorodzikFriedlClassical1}, where Borodzik and Friedl obtain bounds (in terms of a presentation matrix for~$\Bl_K$) on the number of crossing changes required to turn~$K$ into an Alexander polynomial one knot: they perform surgeries on the zero-framed surgery~$Y=M_K$ to obtain~$Y'=M_{K'}$, where~$K'$ is an Alexander polynomial one knot. \begin{remark} \label{rem:HomotopyEquivalence} As we mentioned in Construction~\ref{cons:Invariant}, if~$M_0$ and~$M_1$ are homeomorphic rel.\ boundary, then~$b_{M_0}=b_{M_1}$ in~$\Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda)$. In fact the same proof shows more. If two $4$-manifolds~$M_0$ and~$M_1$ that represent elements of $\mathcal{V}^0_\lambda(Y)$ are \emph{homotopy equivalent} rel.\ boundary, then~$b_{M_0}=b_{M_1}$ in~$\Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda)$. \end{remark} Next, we describe how the classification in the case where the homeomorphisms need not fix the boundary pointwise follows from Theorem \ref{thm:ClassificationRelBoundary}. To this effect, we use~$\Homeo^+_\varphi(Y)$ to denote the orientation-preserving homeomorphisms of~$Y$ such that the induced map on~$\pi_1$ commutes with~$\varphi \colon \pi_1(Y) \twoheadrightarrow \Z$ and we describe the set of homeomorphism classes of $\Z$-manifolds that we will be working with. \begin{definition} \label{def:VlambdaY} For $Y$ and $(H,\lambda)$ as in Definition~\ref{def:V0lambdaY}, define~$\mathcal{V}_\lambda(Y)$ as the quotient of~$S_\lambda(Y)$ in which two pairs~$(M_1,g_1), (M_2,g_2)$ are deemed equal if and only if there is a homeomorphism~$\Phi \colon M_1 \cong M_2$ such that~$ \Phi|_{\partial M_1}=g_2^{-1} \circ f \circ g_1$ for some~$f \in \Homeo^+_\varphi(Y)$; note that such a homeomorphism $\Phi$ is necessarily orientation-preserving. \end{definition} We continue to set up notation to describe how the non relative classification follows from Theorem \ref{thm:ClassificationRelBoundary}. Observe that the group~$\Homeo^+_\varphi(Y)$ acts on~$\mathcal{V}_\lambda^0(Y)$ by setting~$f \cdot (M,g):=(M,f \circ g)$ for~$f\in \Homeo^+_\varphi(Y)$. Further, observe that \begin{equation} \label{eq:NotRelBoundary} \mathcal{V}_\lambda(Y)=\mathcal{V}_\lambda^0(Y)/\Homeo^+_\varphi(Y). \end{equation} Recall that any~$f \in \Homeo^+_\varphi(Y)$ induces an isometry~$f_*$ of the Blanchfield form~$\Bl_Y$. Thus the group~$\Homeo^+_\varphi(Y)$ acts on~$\Iso(\partial \lambda,\unaryminus \Bl_Y)$ by~$f \cdot h:=f_* \circ h$. Finally, there is a natural left action~$\Aut(\lambda) \times \Homeo^+_\varphi(Y)$ on~$\Iso(\partial \lambda,\unaryminus \Bl_Y)$ defined via \begin{equation}\label{eq:autaction2} (F,f) \cdot h:=f_* \circ h \circ \partial F^{-1}. \end{equation} The non-relative classification statement reads as follows. \begin{theorem} \label{thm:Classification} Let~$Y$ be a~$3$-manifold with an epimorphism~$\pi_1(Y) \twoheadrightarrow \Z$ whose Alexander module is torsion, let~$(H,\lambda)$ be a nondegenerate Hermitian form over $\Z[t^{\pm 1}]$. Consider the set~$\mathcal{V}_\lambda(Y)$ of~$\Z$-manifolds with boundary~$\partial M\cong Y$, and~$\lambda_M \cong \lambda$, considered up to orientation-preserving homeomorphism. \noindent If the form $(H,\lambda)$ presents $Y$, then~$\mathcal{V}_\lambda(Y)$ is nonempty and corresponds bijectively to \begin{enumerate} \item~$\Iso(\partial \lambda,\unaryminus\Bl_Y)/(\Aut(\lambda) \times \Homeo^+_\varphi(Y))$, if~$\lambda$ is an even form; \item $\left( \Iso(\partial \lambda,\unaryminus\Bl_Y)/(\Aut(\lambda) \times \Homeo^+_\varphi(Y))\right) \times \Z_2$ if~$\lambda$ is an odd form. The map to $\Z_2$ is given by the Kirby-Siebenmann invariant. \end{enumerate} \end{theorem} \begin{proof} Thanks to Theorem~\ref{thm:ClassificationRelBoundary} (as formulated in Remark~\ref{rem:BijectionRelBoundary}) and~\eqref{eq:NotRelBoundary}, it suffices to prove that the map~$b$ respects the~$\Homeo^+_\varphi(Y)$ actions, i.e.\ that~$b_{f \cdot (M,g)}=f \cdot b_{(M,g)}$, where~$g \colon \partial M \cong Y$ is a homeomorphism as in Definition~\ref{def:V0lambdaY} and $f \in \Homeo^+_\varphi(Y)$. This now follows from the following formal calculation: $b_{f \cdot (M,g)}=b_{(M,f \circ g)}=f_* \circ g_* \circ D_M \circ \partial F=f \cdot b_{(M,g)},$ where~$F \colon \lambda_M \cong \lambda$ is an isometry and we used the definitions of the~$\Homeo^+_\varphi(Y)$ actions and of the map~$b$. \end{proof} \begin{remark} \label{rem:UserGuide} To make the results as user friendly as possible, we spell out how to apply them in practice. Fix an oriented $3$-manifold $Y$ with torsion Alexander module. Two orientable $\Z$-manifolds $M_0$ and $M_1$ with boundary $Y$ are homeomorphic if and only if they have the same Kirby-Siebenmann invariants, and the following hold. \begin{enumerate} \item There are identifications $\psi_i \colon \pi_1(M_i) \xrightarrow{\cong} \Z$, for $i=0,1$, and \item there are homeomorphism $g_i \colon Y \xrightarrow{\cong} \partial M_i$, for $i=0,1$, and a surjection $\pi_1(Y) \to \Z$, such that $\psi_i \circ \operatorname{incl}_i \circ g_i = \varphi$ for $i=0,1$, and such that \item using the coefficient system induced by the $\psi_i$, and the orientations induced by the $g_i$ to define the intersection forms, there is an isometry \[F \colon (H_2(M_0;\Z[t^{\pm 1}]),\lambda_{M_0}) \cong (H_2(M_1;\Z[t^{\pm 1}]),\lambda_{M_1}), \text{ and }\] \item with respect to this isometry we have that $b_{M_0} = b_{M_1} \in \Iso(\partial \lambda_{M_0},\unaryminus\Bl_Y)/(\Aut(\lambda_{M_0}) \times \Homeo^+_\varphi(Y))$ or, equivalently, there exists an isometry $F \colon \lambda_{M_0} \cong \lambda_{M_1}$ whose algebraic boundary $\partial F \colon \partial \lambda_{M_0} \cong \partial \lambda_{M_1}$ is induced by some orientation-preserving homeomorphism $f \colon Y \to Y$ that intertwines $\varphi$. In~\cite{ConwayPowell} such a pair $(f,F)$ was called \emph{compatible}. \end{enumerate} \end{remark} The next few sections are devoted to proving Theorem~\ref{thm:MainTechnicalIntro}. \section{Equivariant linking and longitudes} \label{sec:Prelim} We collect some preliminary notions that we will need later on. In Subsection \ref{sub:HomologyIntersections} we fix our notation for twisted homology and equivariant intersections. In Subsection~\ref{sub:EquivariantLinking}, we collect some facts about linking numbers in infinite cyclic covers, while in Subsection~\ref{sub:Parallels}, we define an analogue of integer framings of a knot in~$S^3$ for knots in infinite cyclic covers. \subsection{Covering spaces and twisted homology} \label{sub:HomologyIntersections} We fix our conventions on twisted homology and recall some facts about equivariant intersection numbers. We refer the reader interested in the intricacies of transversality in the topological category to~\cite[Section 10]{FriedlNagelOrsonPowell}. \medbreak We first introduce some notation for infinite cyclic covers. Given a space~$X$ that has the homotopy type of a finite CW complex, together with an epimorphism~$\varphi \colon \pi_1(X) \twoheadrightarrow \Z$, we write~$p\colon X^\infty \to X$ for the infinite cyclic cover corresponding to~$\ker(\varphi)$. If~$A \subset X$ is a subspace then we set~$A^\infty :=p^{-1}(A)$ and often write~$H_*(X,A;\Z[t^{\pm 1}])$ instead of~$H_*(X^\infty,A^\infty)$. Similarly, since~$\Q(t)$ is flat over~$\Z[t^{\pm 1}]$, we often write~$H_*(X,A;\Q(t))$ or~$H_*(X,A;\Z[t^{\pm 1}]) \otimes_{\Z[t^{\pm 1}]} \Q(t)$ instead of~$H_*(X^\infty,A^\infty) \otimes_{\Z[t^{\pm 1}]} \Q(t)$. \begin{remark} \label{rem:AlexanderPolynomial} The \emph{Alexander polynomial} of $X$, denoted $\Delta_X$ is the order of the \emph{Alexander module}~$H_1(X;\Z[t^{\pm 1}])$. While we refer to Remark~\ref{rem:AlexPoly} below for some recollections on orders of modules, here we simply note that $\Delta_X$ is a Laurent polynomial that is well defined up to multiplication by~$\pm t^k$ with~$k \in \Z$ and that if~$X=M_K$ is the $0$-framed surgery along a knot~$K$, then $\Delta_X$ is the Alexander polynomial of $K$. \end{remark} Next, we move on to equivariant intersections in covering spaces. \begin{definition} \label{def:EquivariantIntersection} Let~$M$ be an~$n$-manifold (with possibly nonempty boundary) with an epimorphism~$\pi_1(M)\twoheadrightarrow \Z$. For a~$k$-dimensional closed submanifold ~$A \subset M^\infty$ and an~$(n-k)$-dimensional closed submanifold~$A' \subset M^\infty$ such that $A$ and $t^jA'$ intersect transversely for all $j \in \Z$, we define the \emph{equivariant intersection}~$A\cdot_{\infty,M}A' \in \Z[t^{\pm1} ]$ as $$A\cdot_{\infty, M}A'=\sum_{j\in \Z} (A\cdot_{M^\infty} (t^jA'))t^{-j},$$ where~$\cdot_{M^\infty}$ denotes the usual (algebraic) signed count of points of intersection. If the boundary of~$M$ is nonempty and~$A' \subset M$ is properly embedded, then we can make the same definition and also write~$A\cdot_{\infty, M}A' \in \Z[t^{\pm1} ]$. \end{definition} \begin{remark} \label{rem:EquivariantIntersections} We collect a couple of observations about equivariant intersections. \begin{enumerate} \item Equivariant intersections are well defined on homology and in fact~$A\cdot_{\infty, M}A'=\lambda([A'],[A])$, where~$\lambda$ denotes the equivariant intersection form $$ \lambda \colon H_k(M;\Z[t^{\pm 1}]) \times H_{n-k}(M;\Z[t^{\pm 1}]) \to \Z[t^{\pm 1}].$$ The reason for which $A\cdot_{\infty, M}A'$ equals $\lambda([A'],[A])=\overline{\lambda([A],[A'])}$ instead of $\lambda([A],[A'])$ is due to the fact that we are following the conventions from~\cite[Section 2]{ConwayPowell} in which the adjoint of a Hermitian form $\lambda \colon H \times H \to \Z[t^{\pm 1}]$ is defined by the equation $\widehat{\lambda}(y)(x)=\lambda(x,y)$. With these conventions $\lambda$ is linear in the first variable and anti-linear in the second, whereas~$\cdot_{\infty,M}$ is linear in the second variable and anti-linear in the first. \item When~$\partial M \neq \emptyset$ and~$A \subset M$ is a properly embedded submanifold with boundary, then again~$A\cdot_{\infty, M}A'=\lambda^\partial([A'],[A])$ where this time~$\lambda^\partial$ denotes the pairing $$ \lambda^\partial \colon H_k(M;\Z[t^{\pm 1}]) \times H_{n-k}(M,\partial M;\Z[t^{\pm 1}]) \to \Z[t^{\pm 1}].$$ As previously $\lambda^\partial$ is linear in the first variable and anti-linear in the second. \item The definition of the pairings~$\lambda$ and~$\lambda^\partial$ can be made with arbitrary twisted coefficients. In order to avoid extraneous generality, we simply mention that there are~$\Q(t)$-valued pairings~$\lambda_{\Q(t)}$ and~$\lambda_{\Q(t)}^\partial$ defined on homology with~$\Q(t)$-coefficients and that if~$A,B \subset M^\infty$ are closed submanifolds of complementary dimension, then~$\lambda_{\Q(t)}([A],[B])=\lambda([A],[B])$ and similarly for properly embedded submanifolds with boundary. \end{enumerate} \end{remark} \subsection{Equivariant linking} \label{sub:EquivariantLinking} We recall definitions and properties of equivariant linking numbers. Other papers that feature discussions of the topic include~\cite{PrzytyckiYasuhara, BorodzikFriedlLinking,KimRuberman}. \medbreak We assume for the rest of the section that~$Y$ is a~$3$-manifold and that~$\varphi \colon \pi_1(Y) \twoheadrightarrow \mathbb{Z}$ is an epimorphism such that the corresponding Alexander module~$H_1(Y;\Z[t^{\pm 1}])$ is torsion, i.e.\ ~$H_*(Y;\Q(t))=0$. We also write~$p \colon Y^\infty \to Y$ for the infinite cyclic cover corresponding to~$\ker(\varphi)$ so that~$H_1(Y;\Z[t^{\pm 1}])=H_1(Y^\infty)$. Given a simple closed curve~$\widetilde{a} \subset Y^\infty$, we write~$a^\infty:=\bigcup_{k \in \Z} t^k \widetilde{a}$ for the union of all the translates of~$\widetilde{a}$ and~$a:=p(\widetilde{a}) \subset Y$ for the projection of~$\widetilde{a}$ down to~$Y$. This way, the covering map~$p \colon Y^\infty \to Y$ restricts to a covering map $$ Y^\infty \setminus \nu(a^\infty) \to Y\setminus \nu (a)=:Y_a.$$ Since the Alexander module of~$Y$ is torsion, a short Mayer-Vietoris argument shows that the vector space~$H_*(Y_a;\Q(t))=\Q(t)$ is generated by~$[\widetilde{\mu}_a]$, the class of a meridian of~$\widetilde{a} \subset Y^\infty$. \begin{definition} \label{def:EquivariantLinking} The \emph{equivariant linking number} of two disjoint simple closed curves~$\widetilde{a},\widetilde{b} \subset Y^\infty$ is the unique rational function~$\ell k_{\Q(t)}(\widetilde{a},\widetilde{b}) \in \Q(t)$ such that $$ [\widetilde{b}]=\ell k_{\Q(t)}(\widetilde{a},\widetilde{b})[\widetilde{\mu}_a] \in H_1(Y \setminus \nu(a);\Q(t)).$$ \end{definition} Observe that this linking number is only defined for \emph{disjoint} pairs of simple closed curves. We give a second, more geometric, description of the equivariant linking number. \begin{remark}\label{rem:torsionsurface} Since~$H_1(Y;\Z[t^{\pm 1}])$ is torsion, for any simple closed curve $\widetilde{a}$ in $Y^\infty$, there is some polynomial~$p(t)=\sum_i c_it^i$ such that~$p(t)[\widetilde{a}]=0.$ Thus there is a surface~$F\subset Y^\infty\smallsetminus \nu(a^\infty)$ with boundary consisting of the disjoint union of~$c_i$ parallel copies of~$t^i\cdot \widetilde{a}'$ and $d_j$ meridians of $t^j\cdot \widetilde{a}'$ where~$\widetilde{a}'$ is some pushoff of~$\widetilde{a}$ in~$\partial \overline{\nu}(\widetilde{a})$ and $j \neq i$; we abusively write $\partial F=p(t)\widetilde{a}$. \end{remark} \begin{proposition}\label{prop:EquivariantLinkingDefinitions} Let~$Y$ be a~$3$-manifold, let~$\varphi \colon \pi_1(Y) \twoheadrightarrow \Z$ be an epimorphism such that the Alexander module~$H_1(Y;\Z[t^{\pm 1}])$ is torsion, and let~$\widetilde{a},\widetilde{b} \subset Y^\infty$ be disjoint simple closed curves. Let $F$ and $p(t)$ be respectively a surface and a polynomial associated to $\widetilde{a}$ as in Remark \ref{rem:torsionsurface}. The equivariant linking of~$\widetilde{a}$ and~$\widetilde{b}$ can be written as \begin{equation} \label{eq:EquivariantLinkingGeometric} \ell k_{\Q(t)}(\widetilde{a} ,\widetilde{b} )=\frac{1}{p(t^{-1})}\sum_{k \in \Z} (F \cdot t^k \widetilde{b}) t^{-k}=\frac{1}{p(t^{-1})}(F\cdot_{\infty,Y_a} \widetilde{b}). \end{equation} In particular, this expression is independent of the choices of $F$ and $p(t)$. \end{proposition} \begin{proof} As in Subsection~\ref{sub:HomologyIntersections}, write~$\lambda^\partial$ for the (homological) intersection pairing~$H_1(Y_a;\Z[t^{\pm 1}]) \times H_2(Y_a,\partial Y_a;\Z[t^{\pm 1}]) \to \Z[t^{\pm 1}]$ and~$\lambda^\partial_{\Q(t)}$ for the pairing involving~$\Q(t)$-homology. Write~$\ell:=\ell k(\widetilde{a},\widetilde{b})$ so that~$[\widetilde{b}]=\ell [\widetilde{\mu}_a] \in H_1(Y_a;\Q(t))$. From this and Remark~\ref{rem:EquivariantIntersections}, for a surface~$F$ as in the statement, we obtain $$ F \cdot_{\infty,Y_a} \widetilde{b} =\lambda^\partial([\widetilde{b}],[F]) =\lambda_{\Q(t)}^\partial([\ell \widetilde{\mu}_a],[F]) =\ell \lambda_{\Q(t)}^\partial([\widetilde{\mu}_a],[F]) =\ell(F \cdot_{\infty,Y_a} \widetilde{\mu}_a) =\ell p(t^{-1}).$$ The last equality here follows from inspection; since $F\hookrightarrow Y^\infty\smallsetminus \nu (a^\infty)$ has boundary along $c_i$ copies of $t^i\cdot \widetilde{a'}$ and $d_j$ copies of $t^j\widetilde{\mu}_a$, each meridian $t^i\cdot \mu_{\widetilde{a}}$ intersects $F$ in $c_i$ points. The result now follows after dividing out by~$p(t^{-1})$. \end{proof} Just as for linking numbers in rational homology spheres, the equivariant linking number is not well defined on homology, unless the target is replaced by $\Q(t)/\Z[t^{\pm 1}]$. To describe the resulting statement, we briefly recall the definition of the Blanchfield form. \begin{remark} \label{rem:Needp(t)Symmetric} Using the same notation and assumptions as in Proposition~\ref{prop:EquivariantLinkingDefinitions}, the Blanchfield form is a nonsingular sesquilinear, Hermitian pairing that can be defined as \begin{align} \label{eq:BlanchfieldGeom} \Bl_Y \colon H_1(Y;\Z[t^{\pm 1}]) \times H_1(Y;\Z[t^{\pm 1}]) &\to \Q(t)/\Z[t^{\pm 1}] \nonumber \\ ([\widetilde{b}],[\widetilde{a}]) &\mapsto \left[\frac{1}{p(t)}(F\cdot_{\infty,Y_a} \widetilde{b})\right]. \end{align} We refer to~\cite{PowellBlanchfield,FriedlPowell} for further background and homological definitions of this pairing. \end{remark} We summarise this discussion and collect another property of equivariant linking in the next proposition. \begin{proposition}\label{prop:Linkingprop} Let~$Y$ be a~$3$-manifold and let~$\varphi \colon \pi_1(Y) \twoheadrightarrow \Z$ be an epimorphism such that the Alexander module~$H_1(Y;\Z[t^{\pm 1}])$ is torsion. For disjoint simple closed curves~$\widetilde{a},\widetilde{b} \subset Y^\infty$, the equivariant linking number satisfies the following properties: \begin{enumerate} \item sesquilinearity:~$\ell k_{\Q(t)}(p \widetilde{a} ,q \widetilde{b} )=\overline{p}q\ell k_{\Q(t)}(\widetilde{a} ,\widetilde{b} )$ for all~$p,q \in \Z[t^{\pm 1}]$; \item symmetry:~$\ell k_{\Q(t)}(\widetilde{a} ,\widetilde{b} )=\overline{\ell k_{\Q(t)}(\widetilde{b} ,\widetilde{a} )}$; \item relation to the Blanchfield form:~$[\ell k_{\Q(t)}(\widetilde{a} ,\widetilde{b} )]=\Bl_Y([\widetilde{b}],[\widetilde{a}]) \in \Q(t)/\Z[t^{\pm 1}]$. \end{enumerate} \end{proposition} \begin{proof} The first property follows from~\eqref{eq:EquivariantLinkingGeometric}. Before proving the second and third properties,we note that in~\eqref{eq:EquivariantLinkingGeometric} and~\eqref{eq:BlanchfieldGeom}, we can assume that $p(t)=p(t^{-1})$. Indeed, both formulae are independent of the choice of $p(t)$ and if $q(t)$ satisfies $q(t)[\widetilde{a}]=0$, then so does $p(t):=q(t)q(t^{-1})$. The proof of the second assertion now follows as in~\cite[Lemma 3.3]{BorodzikFriedlLinking}, whereas the third follows by inspecting~\eqref{eq:EquivariantLinkingGeometric} and~\eqref{eq:BlanchfieldGeom}. \end{proof} The reader will have observed that the formulas in Proposition~\ref{prop:EquivariantLinkingDefinitions} and~\ref{prop:Linkingprop} depend heavily on conventions chosen for adjoints, module structures, equivariant intersections and twisted homology. It is for this reason that the formulas presented here might differ (typically up to switching variables) from others in the literature. \subsection{Parallels, framings, and longitudes} \label{sub:Parallels} Continuing with the notation and assumptions from the previous section, we fix some terminology regarding parallels and framings in infinite cyclic covers. The goal is to be able to describe a notion of integer surgery for appropriately nullhomologous knots in the setting of infinite cyclic covers. Our approach is inspired by~\cite{BoyerLines,BoyerRealization}. \begin{definition} \label{def:ParallelLongitude} Let~$\widetilde{K} \subset Y^\infty$ be a knot, let~$p \colon Y^\infty \to Y$ be the covering map, and denote~$K:=p(\widetilde{K})\subset Y$ the projection of~$\widetilde{K}$. \begin{enumerate} \item A \emph{parallel} to~$\widetilde{K}$ is a simple closed curve~$\pi \subset \partial \overline{\nu}(\widetilde{K})$ that is isotopic to~$\widetilde{K}$ in~$\overline{\nu}(\widetilde{K})$. \item Given any parallel~$\pi$ of~$\widetilde{K}$, we use~$\overline{\nu}_\pi(\widetilde{K})$ to denote the parametrisation~$S^1\times D^2\xrightarrow{\cong} \overline{\nu}(\widetilde{K})$ which sends~$S^1\times\{x\}$ to~$\pi$ for some~$x\in\partial D^2$. \item A \emph{framed link} is a link~$\widetilde{L} \subset Y^\infty$ together with a choice of a parallel for each of its components. \item We say that the knot~$\widetilde{K}$ \emph{admits framing coefficient}~$r(t) \in \Q(t)$ if there is a parallel~$\pi$ with~$\ell k_{\Q(t)}(\widetilde{K},\pi)=r(t)$. We remark that, unlike in the setting of homology with integer coefficients where every knot~$K$ admits any integer~$r$ as a framing coefficient, when we work with~$\Z[t^{\pm 1}]$-homology, a fixed knot~$\widetilde{K}$ will have many~$r(t) \in \Q(t)$ (in fact even in~$\Z[t^{\pm 1}]$) which it does not admit as a framing coefficient. We will refer to~$\pi$ as a \emph{framing curve} of~$\widetilde{K}$ with framing~$r(t)$. \item A framed $n$-component link~$\widetilde{L}$ which admits framing coefficients~$\mathbf{r}(t):=(r_i(t))_{i=1}^n$, together with a choice of parallels realising those framing coefficients, is called an $\mathbf{r}(t)$-framed link. \item The \emph{equivariant linking matrix} of an~$\mathbf{r}(t)$-framed link~$\widetilde{L}$ is the matrix~$A_{\widetilde{L}}$ with diagonal term~$(A_{\widetilde{L}})_{ii}=r_i(t)$ and off-diagonal terms~$(A_{\widetilde{L}})_{ij}=\ell k_{\Q(t)}(\widetilde{K}_i,\widetilde{K}_j)$ for~$i \neq j$. \item For a link~$\widetilde{L}$ in~$Y^\infty$, we define~$L^\infty$ to be the set of all the translates of~$\widetilde{L}$. We also set $$L:=p(\widetilde{L}).$$ We say that $\wt{L}$ is in \emph{covering general position} if the map $p \colon L^{\infty} \to L$ is a trivial $\Z$-covering isomorphic to the pullback cover \[\xymatrix @R0.5cm @C0.5cm{L^{\infty} \ar[r] \ar[d] & \R \ar[d] \\ L \ar[r]^{c} & S^1}\] where $c$ is a constant map. In particular each component of $L^{\infty}$ is mapped by $p$, via a homeomorphism, to some component of $L$. From now on we will always assume that our links $\wt{L}$ are in covering general position. This assumption is to avoid pathologies, and holds generically. \item For an $n$-component link~$\widetilde{L}$ which admits framing coefficients~$\mathbf{r}(t):=(r_i(t))_{i=1}^n$, the \emph{$\mathbf{r}(t)$-surgery} along~$\widetilde{L}$ is the covering space~${Y}^\infty_{\mathbf{r}(t)}(\widetilde{L}) \to Y_{\mathbf{r}}(L)$ defined by Dehn filling~$Y^\infty\setminus \nu(L^\infty)$ along all the translates of all the parallels~$\pi_1^\infty,\ldots,\pi_n^\infty$ as follows: $$ {Y}^\infty_{\mathbf{r}(t)}(\widetilde{L})=Y^\infty \setminus \Big( \bigcup_{k \in \Z} \bigcup_{i=1}^n \left( t^k\overline{\nu}_{\pi_i}(\widetilde{K}_i\right) \Big) \cup \Big( \bigcup_{k \in \Z} \bigcup_{i=1}^n \left( D^2 \times S^1 \right)\Big).$$ \noindent Since $\widetilde{L}$ is in covering general position, for all~$\widetilde{K}_i$ the covering map~$p|_{\widetilde{K}_i} \colon \widetilde{K}_i\to K_i$ is a homeomorphism, so $p|_{\overline{\nu}(\widetilde{K}_i)} \colon \overline{\nu}(\widetilde{K}_i)\to \nu(K_i)$ is a homeomorphism. Thus any parallel~$\pi_i$ of~$\widetilde{K}_i$ projects to a parallel of~$K$, so we may also define~$\mathbf{r}$-surgery along~$L$ downstairs: $${Y}_{\mathbf{r}}(L)=Y \setminus \Big( \bigcup_{i=1}^n \overline{\nu}_{p(\pi_i)}(p(\widetilde{K}_i))\Big) \cup \Big( \bigcup_{i=1}^n (D^2 \times S^1) \Big).$$ \noindent Observe that there is a naturally induced cover ~${Y}^\infty_{\mathbf{r}(t)}(\widetilde{L}) \to {Y}_{\mathbf{r}}(L)$ obtained by restricting~$p \colon Y^\infty \to Y$ to the link exterior and then extending it to the trivial disconnected $\Z$-cover over each of the surgery solid tori. \item The \emph{dual framed link}~$\widetilde{L'}\subset {Y}^\infty_{\mathbf{r}(t)}(\widetilde{L})$ associated to a framed link~$\widetilde{L} \subset Y^\infty$ is defined as follows: \begin{itemize} \item the~$i$-th component~$\widetilde{K}_i'$ of the underlying link~$\widetilde{L}' \subset {Y}^\infty_{\mathbf{r}(t)}(\widetilde{L})$ is obtained by considering the core of the~$i$-th surgery solid torus~$D^2 \times S^1$. \item The framing of~$\widetilde{K}_i'$ is given by the~$S^1$-factor~$S^1 \times \lbrace \operatorname{pt} \rbrace$ of the parametrised solid torus used to define~$\widetilde{K}_i'$. \end{itemize} \item We also define analogues of these notions (except $(6)$ and $(7)$) for a link $L$ in the $3$-manifold~$Y$, without reference to the cover. \end{enumerate} \end{definition} The next lemma provides a sort of analogue for the Seifert longitude of a knot in~$S^3$; it is inspired by~\cite[Lemma 1.2]{BoyerLines}. The key difference with the Seifert longitude is that in our setting this class, which we denote by~$\lambda_{\widetilde{K}}$, is just a homology class in $H_1(\partial \overline{\nu}(\widetilde{K});\Q(t))$; it will frequently not be represented by a simple closed curve. \begin{lemma} \label{lem:SimpleClosedCurve} For every knot~$\widetilde{K} \subset Y^\infty$, there is a unique homology class~$\lambda_{\widetilde{K}} \in H_1(\partial \overline{\nu}( \widetilde{K});\Q(t))$ called the \emph{longitude} of~$\widetilde{K}$ such that the following two conditions hold. \begin{enumerate} \item The algebraic equivariant intersection number of~$[\mu_{\widetilde{K}}]$ and~$\lambda_{\widetilde{K}}$ is one: $$\lambda_{\partial \overline{\nu}(K),\Q(t)}([\mu_{\widetilde{K}}],\lambda_{\widetilde{K}})=1.$$ \item The class~$\lambda_{\widetilde{K}}$ maps to zero in~$H_1(Y_K;\Q(t))$. \end{enumerate} For any parallel~$\pi$ of~$\widetilde{K}$, this class satisfies $$ \lambda_{\widetilde{K}} =[\pi]-\ell k_{\Q(t)}(\widetilde{K},\pi)[\mu_{\widetilde{K}}].$$ \end{lemma} \begin{proof} We first prove existence and then uniqueness. For existence, pick any parallel~$\pi$ to~$\widetilde{K}$, i.e.\ any curve in~$\partial \overline{\nu}(\widetilde{K})$ that is isotopic to~$\widetilde{K}$ in~$ {\overline{\nu}(\widetilde{K})}$ and define $$ \lambda_{\widetilde{K}}:=[\pi]-\ell k_{\Q(t)}(\widetilde{K},\pi)[\mu_{\widetilde{K}}].$$ Here recall that the equivariant linking~$r:=\ell k_{\Q(t)}(\widetilde{K},\pi)$ is the unique element of~$\Q(t)$ such that~$[\pi]=r[\mu_{\widetilde{K}}]$ in~$H_1(Y_K;\Q(t))$. The two axioms now follow readily. For uniqueness, we suppose that~$\lambda_{\widetilde{K}}$ and~$\lambda_{\widetilde{K}}'$ are two homology classes as in the statement of the lemma. Choose a parallel~$\pi$ of~$\widetilde{K}$ and base~$H_1(\partial \overline{\nu}(K);\Q(t))$ by the pair~$(\mu_{\widetilde{K}},\pi)$. This way, we can write~$\lambda_{\widetilde{K}}=r_1[\mu_{\widetilde{K}}]+r_2[\pi]$ and~$\lambda_{\widetilde{K}}'=r_1'[\mu_{\widetilde{K}}]+r_2'[\pi]$. The first condition on~$\lambda_{\widetilde{K}}$ now promptly implies that~$r_2=r_2'=1$; formally $$1=\lambda_{\partial \overline{\nu}(K),\Q(t)}([\mu_{\widetilde{K}}],\lambda_{\widetilde{K}})=r_2\lambda_{\partial \overline{\nu}(K),\Q(t)}([\mu_{\widetilde{K}}],[\pi])=r_2$$ and similarly for~$r_2'$. To see that~$r_1=r_1'$, observe that since~$r_2=r_2'$, we have that~$\lambda_{\widetilde{K}}=\lambda_{\widetilde{K}}'+(r_1'-r_1)[\mu_{\widetilde{K}}]$. Recall that~$[\mu_{\widetilde{K}}]$ is a generator of the vector space~$H_1(Y_K;\mathbb{Q}(t))=\Q(t)$ and that~$\lambda_{\widetilde{K}}', \lambda_{\widetilde{K}}'$ are zero in~$H_1(Y_K;\mathbb{Q}(t))$. We conclude that~$(r_1'-r_1)=0$, as required. \end{proof} As motivation, observe that for a link $L=K_1 \cup \cdots \cup K_n \subset S^3$, the group $H_1(E_L;\Z)$ is freely generated by the meridians $\mu_{K_i}$ and, if $L$ is framed with integral linking matrix $A$, then the framing curves $\pi_i$ can be written in this basis as $[\pi_i]=\sum_{j=1}^n A_{ij}[\mu_{K_j}] \in H_1(E_L;\Z)$. The situation is similar in our setting. \begin{proposition}\label{prop:relating-pi_i_and_meridians} Let $\widetilde{L} \subset Y^\infty$ be an $n$-component framed link in covering general position whose components have framing curves~$\pi_1,\ldots,\pi_n$. Recall that~$H_1(Y_L;\Q(t))=\Q(t)^n$ is generated by the homology classes of the meridians~$\mu_{\widetilde{K}_1},\ldots,\mu_{\widetilde{K}_n}$. The homology classes of the~$\pi_i$ in~$H_1(Y_L;\Q(t)) \cong \Q(t)^n$ are related to the meridians by the formula \[[\pi_i]=\sum_{j=1}^n (A_{\widetilde{L}})_{ij} [\mu_{\widetilde{K}_j}] \in H_1(Y_L;\Q(t)).\] \end{proposition} \begin{proof} By definition of the equivariant linking matrix $A_{\wt{L}}$, we must prove that \begin{equation} \label{eq:ForClaim} [\pi_i] =\ell k_{\Q(t)}(\widetilde{K}_i,\pi_i)[\mu_{\widetilde{K}_i}]+\sum_{j \neq i} \ell k_{\Q(t)}(\widetilde{K}_i,\widetilde{K}_j)[\mu_{\widetilde{K}_j}] \in H_1(Y_L;\Q(t)) \end{equation} for each $i$. Since the sum of the inclusion induced maps give rise to an isomorphism \[H_1(Y_L;\Q(t)) \cong \bigoplus_{j=1}^n H_1(Y_{K_j};\Q(t))\] it suffices to prove the equality after applying the inclusion map $H_1(Y_L;\Q(t)) \to H_1(Y_{K_j};\Q(t))$, for each~$j$. Since~$\pi_i$ is a parallel of~$\widetilde{K}_i$, applying Lemma~\ref{lem:SimpleClosedCurve}, we have $$ [\pi_i]=\ell k_{\Q(t)}(\widetilde{K}_i,\pi_i)[\mu_{\widetilde{K}_i}]+\lambda_{\widetilde{K}_i} \in H_1(\partial Y_{K_i};\Q(t)).$$ We consider the image of this homology class in~$H_1(Y_{K_j};\Q(t))$ for~$j=1,\dots,n$. In the vector space~$H_1(Y_{K_i};\Q(t))=\Q(t)[\mu_{\widetilde{K}_i}]$, the longitude class~$\lambda_{\widetilde{K}_i}$ vanishes (again by Lemma~\ref{lem:SimpleClosedCurve}). For~$j \neq i$, the class~$[\mu_{\widetilde{K}_i}]$ vanishes in~$H_1(Y_{K_j};\Q(t))$; thus the image of~$[\pi_i]$ in~$H_1(Y_{K_j};\Q(t))$ is~$\ell k_{\Q(t)} (\pi_i,\widetilde{K}_j)[\mu_{\widetilde{K}_j}] =\ell k_{\Q(t)} (\widetilde{K}_i,\widetilde{K}_j)[\mu_{\widetilde{K}_j}]$. This concludes the proof of~\eqref{eq:ForClaim}. \end{proof} From now on, we will be working with $\Z[t^{\pm 1}]$-coefficient homology both for $Y$ and for the result $Y':=Y_{\mathbf{r}(t)}(L)$ of surgery on a framed link $L \subset Y$. Let $W$ denote the trace of the surgery from $Y$ to $Y'$. We therefore record a fact about the underlying coefficient systems for later reference. \begin{lemma}\label{lem:coeff-system} The epimorphism $\varphi \colon \pi_1(Y) \twoheadrightarrow \Z$ extends to an epimorphism $\pi_1(W) \twoheadrightarrow \Z$, which by precomposition with the inclusion map induces an epimorphism $\varphi' \colon \pi_1(Y') \twoheadrightarrow \Z$. \end{lemma} \begin{proof} Note that~$\pi_1(W)$ is obtained from~$\pi_1(Y)$ by adding relators that kill each of the~$[K_i] \in \pi_1(Y)$ (indeed $W$ is obtained by adding~$2$-handles to $Y \times [0,1]$ along the $K_i$). Since~$\varphi$ is trivial on the~$K_i \subset Y$ (because they lift to $Y^\infty$), we deduce that $\varphi$ descends to an epimorphism on $\pi_1(W)$. The composition $\pi_1(Y') \to \pi_1(W) \twoheadrightarrow \Z$ is also surjective because $\pi_1(W)$ is obtained from $\pi_1(Y')$ by adding relators that kill each of the~$[K_i'] \in \pi_1(Y')$; indeed $W$ is obtained by adding~$2$-handles to $Y' \times [0,1]$ along the dual knots $K_i'$. \end{proof} \begin{remark}\label{rem:CoefficientSystemY'} In particular note from the proof of Lemma~\ref{lem:coeff-system} that the homomorphism $\varphi' \colon \pi_1(Y') \twoheadrightarrow \Z$ vanishes on the knots $K_i' \subset Y$ dual to the original $K_i \subset Y$. \end{remark} The next lemma proves an infinite cyclic cover analogue of the following familiar statement: performing surgery on a framed link $L \subset S^3$ whose linking matrix is invertible over $\Q$ results in a rational homology sphere. \begin{lemma} \label{lem:surgQsphere} Let $Y$ be a 3-manifold and let $\varphi \colon \pi_1(Y) \twoheadrightarrow \Z$ be an epimorphism such that the Alexander module $H_1(Y;\Z[t^{\pm 1}])$ is torsion. If $\widetilde{L} \subset Y^\infty$ is an $n$-component framed link in covering general position, whose equivariant linking matrix $A_{\widetilde{L}}$ is invertible over $\Q(t)$, then the result $Y'$ of surgery on $L$ satisfies $H_1(Y';\Q(t))=0$. \end{lemma} \begin{proof} The result will follow by studying the portion $$ \cdots \to H_2(Y,Y_L;\Q(t)) \xrightarrow{\partial} H_1(Y_L;\Q(t)) \to H_1(Y';\Q(t)) \to H_1(Y',Y_L;\Q(t))$$ of the long exact sequence sequence of the pair $(Y,Y_L)$ with $\Q(t)$-coefficients, and arguing that $H_1(Y',Y_L;\Q(t))=0$ and that $\partial$ is an isomorphism. The fact that $H_1(Y',Y_L;\Q(t)) =0$ can be deduced from excision, replacing $(Y',Y_L)$ with the pair $(\sqcup^n S^1 \times D^2, \sqcup^n S^1 \times S^1)$. For the same reason, the vector space $H_2(Y,Y_L;\Q(t))=\Q(t)^n$ is based by the classes of the discs $(D^2 \times \lbrace \operatorname{pt} \rbrace)_i \subset (D^2 \times S^1)_i$ whose boundaries are the framing curves~$\pi_i$. To conclude that $\partial$ is indeed an isomorphism, note that $H_1(Y_L;\Q(t))=\Q(t)^n$ is generated by the $[\mu_{\widetilde{K}_i}]$ (because the Alexander module of~$Y$ is torsion) and use Proposition~\ref{prop:relating-pi_i_and_meridians} to deduce that with respect to these bases, $\partial$ is represented by the equivariant linking matrix~$A_{\widetilde{L}}$. Since this matrix is by assumption invertible over $\Q(t)$, we deduce that $\partial$ is an isomorphism. It follows that~$H_1(Y';\Q(t))=0$, as desired. \end{proof} The next lemma describes the framing on the dual of a framed link. The statement ressembles~\cite[Lemma 1.5]{BoyerLines} and~\cite[Theorem 1.1]{PrzytyckiYasuhara}. \begin{lemma} \label{lem:InverseMatrix} Let $Y$ be a 3-manifold and let $\varphi \colon \pi_1(Y) \twoheadrightarrow \Z$ be an epimorphism such that the Alexander module $H_1(Y;\Z[t^{\pm 1}])$ is torsion. If $\widetilde{L} \subset Y^\infty$ is a framed link in covering general position whose equivariant linking matrix $A_{\widetilde{L}}$ is invertible over $\Q(t)$, then the equivariant linking matrix of the dual framed link~$\widetilde{L}'$ is $$A_{\widetilde{L}'}=-A_{\widetilde{L}}^{-1}.$$ \end{lemma} \begin{proof} Consider the exterior~$Y_L=Y'_{L'}$ and recall that~$H_1(Y_L;\Q(t))=\Q(t)^n$ is generated by the meridians~$\mu_{\widetilde{K}_1},\ldots,\mu_{\widetilde{K}_n}$ of the link~$\widetilde{L}$ because we assumed that~$H_1(Y;\Q(t))=0$. Since we assumed that $H_1(Y;\Q(t))=0$ and $\det(A_{\widetilde{L}})\neq 0$ we can apply Lemma~\ref{lem:surgQsphere} to deduce that~$H_1(Y';\Q(t))=0$ and hence~$H_1(Y_L;\Q(t))=H_1(Y'_{L'};\Q(t))$ is also generated by the meridians~$\mu_{\widetilde{K}_1'},\ldots,\mu_{\widetilde{K}_n'}$ of the link~$\widetilde{L}'$. Thus the vector space~$H_1(Y_L;\Q(t))=\Q(t)^n$ has bases both~$\boldsymbol{\mu}=([\mu_{\widetilde{K}_1}],\ldots,[\mu_{\widetilde{K}_n}])$ and~$\boldsymbol{\mu}'=([\mu_{\widetilde{K}'_1}],\ldots,[\mu_{\widetilde{K}'_n}])$, and we let~$B$ be the change of basis matrix between these two bases so that~$B\boldsymbol{\mu}=\boldsymbol{\mu}'$. Here and in the remainder of this proof, we adopt the following convention: if~$C$ is a matrix over~$\Q(t)^n$ and if~$\boldsymbol{x}=(x_1,\ldots,x_n)$ is a collection of~$n$ vectors in~$\Q(t)^n$, then we write~$C\boldsymbol{x}$ for the collection of~$n$ vectors~$Cx_1,\ldots,Cx_n$. Recall that for~$i=1,\ldots,n$, the framing curves of the~$\widetilde{K}_i$ and~$\widetilde{K}_i'$ are respectively denoted by~$\pi_i \subset Y^\infty$ and~$\pi_i' \subset {Y'}^\infty$. Slightly abusing notation, we also write~$[\pi_i]$ for the class of~$\pi_i$ in~$H_1(Y_{K_i};\Q(t))$. We set~$\boldsymbol{\pi}=([\pi_1],\ldots,[\pi_n])$ and~$\boldsymbol{\pi}'=([\pi_1'],\ldots,[\pi_n'])$ and and use Proposition~\ref{prop:relating-pi_i_and_meridians} to deduce that \begin{align*} \boldsymbol{\pi}= A_{\widetilde{L}}\boldsymbol{\mu}, \ \ \ \ \ \ \ \ \boldsymbol{\pi'}= A_{\widetilde{L}'}\boldsymbol{\mu'} \end{align*} Inspecting the surgery instructions, we also have the relations \begin{align*} \boldsymbol{\mu'}=-\boldsymbol{\pi} \ \ \ \ \ \ \ \ \boldsymbol{\mu}=\boldsymbol{\pi'}. \end{align*} We address the sign in Remark \ref{rem:sign} below. Combining these equalities, we obtain \begin{align*} \boldsymbol{\mu}&=\boldsymbol{\pi'}= A_{\widetilde{L}'}\boldsymbol{\mu'}=A_{\widetilde{L}'}B\boldsymbol{\mu}, \\ \boldsymbol{\mu'}&=-\boldsymbol{\pi}= -A_{\widetilde{L}}\boldsymbol{\mu}=-A_{\widetilde{L}}B^{-1}\boldsymbol{\mu'}. \end{align*} Unpacking the equality~$A_{\widetilde{L}'}B\boldsymbol{\mu}=\boldsymbol{\mu}$, we deduce that~$A_{\widetilde{L}'}B[\mu_{\widetilde{K}_i}]=[\mu_{\widetilde{K}_i}]$ for~$i=1,\ldots,n$. But since the~$[\mu_{\widetilde{K}_1}],\ldots,[\mu_{\widetilde{K}_n}]$ form a basis for~$\Q(t)^n$, this implies that~$A_{\widetilde{L}'}B=I_n$. The same argument shows that~$-A_{\widetilde{L}}B^{-1}=I_n$ and therefore both matrices~$A_{\widetilde{L}}$ and~$A_{\widetilde{L}'}$ are invertible, with~$-A_{\widetilde{L}}=B=A_{\widetilde{L}'}^{-1}$. \end{proof} \begin{remark}\label{rem:sign} In the above proposition, we were concerned with the relationship between the curves~$(\boldsymbol{\mu},\boldsymbol{\pi})$ and $(\boldsymbol{\mu'},\boldsymbol{\pi'})$, all of which represent classes in $H_1(\partial Y_L,\Q(t))$. We know from the surgery instructions that~$g(\boldsymbol{\mu})=\boldsymbol{\pi'}$. We are free to choose the collection of curves $g(\boldsymbol{\pi})$ so long as we choose each~$g(\pi_i)$ to intersect $\pi_i'$ geometrically once (as unoriented curves). We choose the unoriented curves $\boldsymbol{\pm \mu'}$. Since we know that the surgery was done to produce an oriented manifold, it must be the case that the gluing transformation $g\colon \partial Y_L\to \partial Y_L$ is orientation-preserving. The fact that $g$ is orientation-preserving implies that it preserves intersections numbers, we deduce that $ \delta_{ij}=\mu_i \cdot \pi_j=g(\mu_i) \cdot g(\pi_j)=\pi_j' \cdot (\pm \mu_i').$ This forces $g(\boldsymbol{\pi})=-\boldsymbol{\mu'}$. \end{remark} \section{Reidemeister torsion} \label{sec:reidemeister-torsion} We recall the definition of the Reidemeister torsion of a based chain complex as well as the corresponding definition for CW complexes. This will be primarly used in Subsection~\ref{sub:Step2}. References on Reidemeister torsion include~\cite{TuraevIntroductionTo, TuraevReidemeisterTorsionInKnotTheory, ChaFriedl}. \medbreak Let~$\mathbb{F}$ be a field. Given two bases~$u,v$ of a~$r$-dimensional~$\F$-vector space, we write~$\det(u/v)$ for the determinant of the matrix taking~$v$ to~$u$, i.e.\ the determinant of the matrix~$A=(A_{ij})$ that satisfies~$v^i=\sum_{j=1}^r A_{ij}u^j$. A \emph{based chain complex} is a finite chain complex $$C=\left( 0 \to C_m \xrightarrow{\partial_{m-1}} C_{m-1} \xrightarrow{\partial_{m-2}} \cdots \xrightarrow{\partial_2} C_1 \xrightarrow{\partial_0} C_0 \to 0\right)$$ of~$\F$-vector spaces together with a basis~$c_i$ for each~$C_{i+1}$. Given a based chain complex, fix a basis~$b_i$ for~$B_i=\im(\partial_{i+1})$ and pick a lift~$\widetilde{b}_i$ of~$b_i$ to~$C_i$. Additionally, fix a basis~$h_i$ for each homology group~$H_i(C)$ and let~$\widetilde{h}_i$ be a lift of~$h_i$ to~$C_i$. One checks that that~$(b_i,\widetilde{h}_i,\widetilde{b}_{i-1})$ forms a basis of~$C_i$. \begin{definition} \label{def:ReidemeisterTorsion} Let~$C$ be a based chain complex over~$\F$ and let~$\mathcal{B}=\lbrace h_i \rbrace$ be a basis for~$H_*(C)$. The \emph{Reidemeister torsion} of~$(C,\mathcal{B})$ is defined as $$ \tau(C,\mathcal{B})=\frac{\prod_i \det((b_{2i+1},\widetilde{h}_{2i+1},\widetilde{b}_{2i})|c_{2i+1})}{\prod_i \det((b_{2i},\widetilde{h}_{2i},\widetilde{b}_{2i-1})|c_{2i})} \in \F\setminus \lbrace 0\rbrace.~$$ Implicit in this definition is the fact that~$\tau(C,\mathcal{B})$ depends neither on the choice of the basis~$b_i$, nor on the choice of the lifts~$\widetilde{b}_i$, nor on the choice of the lifts~$\widetilde{h}_i$ of the~$h_i$. It does depend on $\mathcal{B}= \{h_i\}$. When~$C$ is acyclic, we drop~$\mathcal{B}$ from the notation and simply write~$\tau(C)$. \end{definition} Note that we are following Turaev's sign convention~\cite{TuraevIntroductionTo,TuraevReidemeisterTorsionInKnotTheory}; Milnor's convention~\cite{MilnorDualityTheorem} yields the multiplicative inverse of~$\tau(C,\mathcal{B})$~\cite[Remark 1.4 item 5]{TuraevIntroductionTo}. The next result collects two properties of the torsion that will be used later on. \begin{proposition} \label{thm:ReidemeisterTorsion} ~ \begin{enumerate} \item Suppose that~$0 \to C' \to C \to C'' \to 0$ is a short exact sequence of based chain complexes and that~$\mathcal{B}',\mathcal{B}$, and $\mathcal{B}''$ are bases for~$H_*(C'),H_*(C)$ and~$H_*(C'')$ respectively. If we view the associated homology long exact sequence as an acyclic complex~$\mathcal{H}$, based by~$\mathcal{B},\mathcal{B}'$, and $\mathcal{B}''$ respectively, then $$\tau(C,\mathcal{B})=\tau(C',\mathcal{B}')\tau(C'',\mathcal{B}'')\tau(\mathcal{H}).$$ \item If~$C=(0 \to C_1 \xrightarrow{\partial_{0}} C_{0} \to 0)$ is an isomorphism between~$n$-dimensional vector spaces, so that~$C$ is an acyclic based chain complex, then $$\tau(C)=\det(A)^{-1}$$ where~$A$ denotes the~$n \times n$-matrix which represents~$\partial_0$ with respect to the given bases. \end{enumerate} \end{proposition} \begin{proof} The multiplicativity statement is proved in~\cite{MilnorDualityTheorem}, The second statement follows from Definition~\ref{def:ReidemeisterTorsion}; details are in~\cite[Remark 1.4, item 3]{TuraevIntroductionTo}. \end{proof} We now recall the definition of the torsion of a pair of CW complexes. We focus on the case where the spaces come with a map of their fundamental group to~$\Z$. This is a special case of an analogous general theory for the case of an arbitrary group~\cite{TuraevIntroductionTo}, and for more general twisted coefficients~\cite{FriedlVidussiSurvey}. Let~$(X,A)$ be a finite CW pair, let~$\varphi \colon \pi_1(X) \to \Z$ be a homomorphism, and let~$\mathcal{B}$ be a basis for the~$\Q(t)$-vector space~$H_*(X,A;\Q(t))$. Write~$p \colon X^\infty \to X$ for the cover corresponding to~$\ker(\varphi)$ and set~$A^\infty:=p^{-1}(A)$. The chain complex~$C_*(X^\infty,A^\infty)$ can be based over~$\Z[t^{\pm 1}]$ by choosing a lift of each cell of~$(X,A)$ and orienting it; this also gives a basis of~$C_*(X,A;\Q(t))= C_*(X^\infty,A^\infty) \otimes_{\Z[t^{\pm 1}]} \Q(t)$. Let $\mathcal{E}$ denote the resulting choice of basis for $C_*(X,A;\Q(t))$. We then define the torsion of~$(X,A,\varphi)$ as $$ \tau(X,A,\mathcal{B},\mathcal{E}):=\tau(C_*(X,A;\Q(t)),\mathcal{B},\mathcal{E})\in \Q(t)\setminus \lbrace 0\rbrace.$$ Given~$p(t),q(t) \in \Q(t)$, we write~$p(t)\doteq q(t)$ to indicate that~$p(t)$ and~$q(t)$ agree up to multiplication by~$\pm t^k$, for some $k \in \Z$. This will enable us to obtain an invariant that does not depend on the choice of $\mathcal{E}$. We write \[\tau(X,A,\mathcal{B}) := [\tau(X,A,\mathcal{B},\mathcal{E})] \in (\Q(t)\sm \{0\})/\doteq, \] for some choice of $\mathcal{E}$. It is known that~$\tau(X,A,\mathcal{B})$ is well defined and is invariant under simple homotopy equivalence preserving~$\mathcal{B}$~\cite[Theorem 9.1]{TuraevIntroductionTo}. We drop the~$\mathcal{B}$ from the notation if~$H_*(X,A;\Q(t))=0$. Additionally, Chapman proved that~$\tau(X,A,\mathcal{B})$ only depends on the underlying homeomorphism type of~$(X,A)$~\cite{Chapman}, and not on the particular CW structure. In particular, when~$(M,N)$ is a manifold pair, we can define~$\tau(M,N,\mathcal{B})$ for any finite CW-structure on~$(M,N)$, We will only consider the Reidemeister torsion of 3-manifolds, and so every pair $(M,N)$ we consider will admit a CW structure. It will not be relevant in this paper, but we note that it is possible to define Reidemeister torsion for topological $4$-manifolds not known to admit a CW structure; see~\cite[Section 14]{FriedlNagelOrsonPowell} for a discussion. \begin{remark} \label{rem:AlexPoly} The reason we consider Reidemeister torsion is its relation with Alexander polynomials; see Subsection~\ref{sub:Step2} below. To this effect, we recall some relevant algebra. Let $P$ be a~$\Z[t^{\pm 1}]$-module with presentation \[\Z[t^{\pm 1}]^m \xrightarrow{f} \Z[t^{\pm 1}]^n \to P \to 0.\] Consider elements of the free modules $\Z[t^{\pm 1}]^m$ and $\Z[t^{\pm 1}]^n$ as row vectors and represent $f$ by an~$m \times n$ matrix $A$, acting on the right of the row vectors. By adding rows of zeros, corresponding to trivial relations, we may assume that $m \geq n$. The \emph{$0$-th elementary ideal}~$E_0(P)$ of a finitely presented~$\Z[t^{\pm 1}]$-module~$P$ is the ideal of~$\Z[t^{\pm 1}]$ generated by all~$n \times n$ minors of~$A$. This definition is independent of the choice of the presentation matrix~$A$. The \emph{order} of~$P$, denoted~$\Delta_P$, is then by definition a generator of the smallest principal ideal containing~$E_0(P)$, i.e.\ the greatest common divisor of the minors. The order of~$P$ is well defined up to multiplication by units of~$\Z[t^{\pm 1}]$ and if~$P$ admits a square presentation matrix, then~$\Delta_P\doteq\det(A)$, where~$A$ is some square presentation matrix for~$P$. It follows that for a~$\Z[t^{\pm 1}]$-module~$P$ which admits a square presentation matrix, one has~$P=0$ if and only if~$\Delta_P \doteq 1$. For more background on these topics, we refer the reader to~\cite[Section~1.4]{TuraevIntroductionTo}. \end{remark} \section{Proof of Theorem~\ref{thm:MainTechnicalIntro}.} \label{sec:ProofMainTechnical} Now we prove Theorem~\ref{thm:MainTechnicalIntro} from the introduction. For the reader's convenience, we recall the statement of this result. \begin{theorem} \label{thm:MainTechnical} Let~$Y$ be a~$3$-manifold with an epimorphism~$\varphi \colon \pi_1(Y) \twoheadrightarrow \Z$ whose Alexander module is torsion, and let~$(H,\lambda)$ be a nondegenerate Hermitian form over $\Z[t^{\pm 1}]$ presenting~$Y$. If~$b \in \Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda)$ is an isometry, then there is a~$\Z$-manifold~$M$ with equivariant intersection form~$\lambda_M \cong \lambda$, boundary~$Y$ and with~$b_M=b$. If the form is odd, then~$M$ can be chosen to have either~$\ks(M)=0$ or~$\ks(M)=1$. \end{theorem} For the remainder of the section, we let~$Y$ be a~$3$-manifold, let~$\varphi \colon \pi_1(Y) \twoheadrightarrow \Z$ be an epimorphism, and let~$p \colon Y^\infty \to Y$ be the infinite cyclic cover associated to~$(Y,\varphi)$. We assume that~$H_1(Y;\Z[t^{\pm 1}]):=H_1(Y^\infty)$ is~$\Z[t^{\pm 1}]$-torsion. We first describe the strategy of the proof and then carry out each of the steps successively. \subsection{Plan} \label{sub:Plan} Let~$b \colon (\coker(\widehat{\lambda}),\partial \lambda) \to (H_1(Y;\Z[t^{\pm 1}]),\unaryminus \Bl_Y)$ be an isometry. Precompose $b$ with the projection~$H^* \twoheadrightarrow \coker(\widehat{\lambda})$ to get an epimorphism~$\pi \colon H^* \twoheadrightarrow H_1(Y;\Z[t^{\pm 1}])$. In particular,~$0 \to H \xrightarrow{\widehat{\lambda}} H^* \xrightarrow{\varpi} H_1(Y;\Z[t^{\pm 1}]) \to 0$ is a presentation of~$Y$. Pick generators~$x_1,\ldots,x_n$ for~$H$ and endow~$H^*$ with the dual basis~$x_1^*,\ldots,x_n^*$. Write~$Q$ for the matrix of~$\lambda$ in this basis. Note that $Q = \ol{Q}^T$ since $\lambda$ is Hermitian. The strategy to prove Theorem~\ref{thm:MainTechnical} is as follows. \begin{itemize} \item Step 1: Prove that one can represent the classes~$\pi(x_1^*),\cdots, \pi(x_n^*)$ by an~$n$-component framed link~$\widetilde{L} = \widetilde{K}_1 \cup \cdots \cup \widetilde{K}_n$ with equivariant linking matrix~$A_{\widetilde{L}}=-Q^{-T}$. \item Step 2: Argue that the result~$Y'$ of surgery on~$L=p(\widetilde{L})$ satisfies~$H_1(Y';\Z[t^{\pm 1}]) = 0$. \item Step 3: There is a topological~$4$-manifold~$B \simeq S^1$ with boundary~$Y'$ following~\cite[Section~11.6]{FreedmanQuinn}. \item Step 4: Argue that the equivariant intersection form of the~$4$-manifold~$M$ defined below with boundary~$Y$ is represented by~$Q$ and prove that~$b_M = b$. Here, the~$4$-manifold~$M$ and its infinite cyclic cover~$M^\infty$ are defined via \begin{align*} -M^\infty&:=\Big( (Y^\infty \times [0,1]) \cup \bigcup_{i=1}^n \bigcup_{j_i \in\Z} t^{j_i} h_i^{(2)} \Big) \cup_{{Y'}^\infty} -B^\infty \\ -M&:=\Big( (Y \times [0,1]) \cup \bigcup_{i=1}^n h_i^{(2)} \Big) \cup_{Y'} -B, \end{align*} where upstairs the~$2$-handles~$h_i^{(2)}$ are attached along the link ~$L^\infty$; downstairs, one attaches the 2-handles along the projection~$L=p(L^\infty)$ of this link. \item Step 5: If~$\lambda$ is odd, then we use the star construction~\cite{FreedmanQuinn,StongUniqueness} to show that both values of the Kirby-Siebenmann invariant can occur. \end{itemize} \subsection{Step 1: constructing a link with the appropriate equivariant linking matrix} \label{sub:Step1} We continue with the notation from the previous section. In particular, we have a presentation $0 \to H \xrightarrow{\widehat{\lambda}} H^* \xrightarrow{\varpi} H_1(Y;\Z[t^{\pm 1}]) \to 0$ and a basis $x_1,\ldots,x_n$ for $H$ with dual basis $x_1^*,\ldots,x_n^*$ for $H^*$. The aim of this section is to prove that it is possible to represent the generators~$\pi(x_1^*),\ldots,\pi(x_n^*)$ of~$H_1(Y;\Z[t^{\pm 1}])$ by a framed link~$\widetilde{L}=\widetilde{K}_1 \cup \cdots \cup \widetilde{K}_n \subset Y^\infty$ whose transposed equivariant linking matrix agrees with~$-Q^{-1}$; see Proposition~\ref{prop:Step1}. In other words, we must have $$ \ell k_{\Q(t)} (\widetilde{K}_j,\widetilde{K}_i)=-(Q^{-1})_{ij} \ \ \ \text{ and } \ \ \ \ell k_{\Q(t)} (\widetilde{K}_i,\pi_i)=-(Q^{-1})_{ii},$$ where~$\pi_i$ is the framing curve of~$\widetilde{K}_i$. Since the Blanchfield form~$\Bl_Y$ is represented by the~$\Q(t)$-coefficient matrix~$-Q^{-1}$~\cite[Section 3]{ConwayPowell}, we know from Proposition~\ref{prop:Linkingprop} that any link representing the~$\pi(x_i^*)$ must satisfy these relations up to adding a polynomial in $\Z[t^{\pm 1}]$. Most of this section therefore concentrates on showing that the equivariant linking (resp.\ framing) of an arbitrary framed link in~$Y^\infty$ can be changed by any polynomial (resp.\ symmetric polynomial) in~$\Z[t^{\pm 1}]$, without changing the homology classes defined by the components of this link. \medbreak We start by showing how to modify the equivariant linking between distinct components of a link, without changing the homology class of the link. \begin{lemma} \label{lem:Step1} Let~$\widetilde{L}=\widetilde{K}_1 \cup \cdots \cup \widetilde{K}_n \subset Y^\infty$ be an~$n$-component framed link in covering general position, with parallels $\pi_1,\dots,\pi_n$. For every distinct~$i,j$ and every polynomial~$p(t) \in \Z[t^{\pm 1}]$, there is a framed link~$\widetilde{L}':=\widetilde{K}_1 \cup \cdots \cup \widetilde{K}_{i-1} \cup \widetilde{K}_i' \cup \widetilde{K}_{i+1} \cup \cdots \cup \widetilde{K}_n$, also in covering general position, such that: \begin{enumerate} \item the knot~$\widetilde{K}_i'$ is isotopic to~$\widetilde{K}_i$ in~$Y^\infty$. In particular,~$[\widetilde{K}_i']=[\widetilde{K}_i]$ in~$H_1(Y;\Z[t^{\pm 1}])$; \item the equivariant linking between~$\widetilde{K}_i$ and~$\widetilde{K}_j$ is changed by~$p(t)$, i.e.\ $$\ell k_{\Q(t)}(\widetilde{K}_i',\widetilde{K}_j)=\ell k_{\Q(t)}(\widetilde{K}_i,\widetilde{K}_j)+p(t);$$ \item the equivariant linking between~$\widetilde{K}_i$ and~$\widetilde{K}_\ell$ is unchanged for~$\ell \neq i,j$; \item the framing coefficients are unchanged; that is, there is a parallel $\gamma_i$ for $\wt{K}'_i$ such that \[\ell k_{\Q(t)}(\widetilde{K}_i',\gamma_i)=\ell k_{\Q(t)}(\widetilde{K}_i,\pi_i).\] \end{enumerate} \end{lemma} \begin{proof} Without loss of generality we can assume that~$p(t)=mt^k$ for~$m,k \in \Z$. The new knot~$\widetilde{K}_i'$ is then obtained by band summing~$\widetilde{K}_i$ with~$m$ meridians of~$t^{-k} \widetilde{K}_j$, framed using the bounding framing induced by meridional discs. The first, third, and fourth properties of~$\widetilde{K}_i'$ are immediate: clearly the linking of~$\widetilde{K}_i$ with~$\widetilde{K}_\ell$ is unchanged for~$\ell \neq i,j$ and since the aforementioned meridians bound discs in~$Y^\infty$ over which the framing extends, we see that~$\widetilde{K}_i'$ is framed isotopic (and in particular homologous) to~$\widetilde{K}_i$ in~$Y^\infty$. It follows that the framing coefficient is unchanged. The second property is obtained from a direct calculation using the sesquilinearity of equivariant linking numbers: \[ \ell k_{\Q(t)}(\widetilde{K}_i',\widetilde{K}_j)=\ell k_{\Q(t)}(\widetilde{K}_i,\widetilde{K}_j)+m \ \ell k_{\Q(t)}(t^{-k}\mu_{\widetilde{K}_j},\widetilde{K}_j)=\ell k_{\Q(t)}(\widetilde{K}_i,\widetilde{K}_j)+ mt^k. \qedhere \] \end{proof} Next, we show how to modify the framing of a framed link component by a symmetric polynomial $p=\ol{p}$, without changing the homology class of the link. \begin{lemma} \label{lem:ModifyFraming} Let~$\widetilde{L}=\widetilde{K}_1 \cup \cdots \cup \widetilde{K}_n \subset Y^\infty$ be an~$n$-component framed link in covering general position. Fix a parallel~$\pi_i$ for~$\widetilde{K}_i$. For each~$i=1,\ldots,n$ and every symmetric polynomial~$p(t) = p(t^{-1})$, there exists a knot~$\widetilde{K}_i' \subset Y^\infty$ and a parallel~$\gamma_i$ of~$\widetilde{K}_i'$ such that \begin{enumerate} \item the knot~$\widetilde{K}_i'$ is isotopic to~$\widetilde{K}_i$ in~$Y^\infty \sm \cup_{j \neq i} \wt{K}_j$, and in particular,~$[\widetilde{K}_i']=[\widetilde{K}_i]$ in~$H_1(Y;\Z[t^{\pm 1}])$; \item the framing coefficient of~$\widetilde{K}_i$ is changed by~$p(t)$, i.e.\ $$\ell k_{\Q(t)}(\widetilde{K}_i' ,\gamma_i)=\ell k_{\Q(t)}(\widetilde{K}_i ,\pi_i)+p(t);$$ \item the other linking numbers are unchanged:~$\ell k_{\Q(t)}(\widetilde{K}_i',\widetilde{K}_j)=\ell k_{\Q(t)}(\widetilde{K}_i,\widetilde{K}_j)$ for all~$j\neq i$. \end{enumerate} \end{lemma} \begin{proof} We first prove the lemma when~$p(t)$ has no constant term. In this case, it suffices to show how to change the self-linking number by~$m(t^k+t^{-k})$ for~$k \neq 0$. To achieve this, band sum~$\widetilde{K}_i$ with~$m$ meridians of~$t^k\widetilde{K}_i$. As in the proof of Lemma~\ref{lem:Step1}, the first and third properties of~$\widetilde{K}_i$ are clear. To define~$\gamma_i$ and prove the second property, define ~$\mu_{\widetilde{K}_i}'$ to be a parallel of~$\mu_{\widetilde{K}_i}$ with~$\ell k_{\Q(t)}(\mu_{\widetilde{K}_i},\mu_{\widetilde{K}_i}')=0$ in~$Y^\infty$. Define~$\gamma_i$ to be the parallel of~$\widetilde{K}_i'$ obtained by banding~$\pi_i$ to~$m$ copies of~$t^k\mu_{\widetilde{K}_i}'$, using bands which are push-offs of the bands used to define~$\widetilde{K}_i'$, and parallel copies of the meridian chosen with the zero-framing with respect to the framing induced by the associated meridional disc. Using the sesquilinearity of equivariant linking numbers, we obtain \begin{align*} \ell k_{\Q(t)}(\widetilde{K}_i',\gamma_i) &=\ell k_{\Q(t)}(\widetilde{K}_i,\pi_i)+m \ \ell k_{\Q(t)}(t^k\mu_{\widetilde{K}_i},\pi_i)+m\ \ell k_{\Q(t)}(\widetilde{K}_i,t^k\mu_{\widetilde{K}_i}')+\ell k_{\Q(t)}(\mu_{\widetilde{K}_i},\mu_{\widetilde{K}_i}') \\ &=\ell k_{\Q(t)}(\widetilde{K}_i,\pi)+m(t^k+t^{-k}). \end{align*} We have therefore shown how to modify the self-linking within a fixed homology class by a symmetric polynomial with no constant term. The general case follows: thanks to the previous paragraph, it suffices to describe how to change the self-linking by a constant, and this can be arranged by varying the choice of the parallel~$\gamma_i$ i.e.\ by additionally winding an initial choice of~$\gamma_i$ around the appropriate number of meridians of~$\widetilde{K}_i'$. \end{proof} By combining the previous two lemmas, we can now prove the main result of this section. \begin{proposition} \label{prop:Step1} Let~$0 \to H \xrightarrow{\widehat{\lambda}} H^* \xrightarrow{\varpi} H_1(Y;\Z[t^{\pm 1}]) \to 0$ be a presentation of~$Y$. Pick generators~$x_1,\ldots,x_n$ for~$H$ and endow~$H^*$ with the dual basis~$x_1^*,\ldots,x_n^*$. Let~$Q$ be the matrix of~$\lambda$ with respect to these bases. The classes~$\pi(x_1^*),\ldots,\pi(x_n^*)$ can be represented by simple closed curves~$\widetilde{K}_1,\ldots,\widetilde{K}_n \subset Y^\infty$ such that $\widetilde{L}=\widetilde{K}_1 \cup \cdots \cup\widetilde{K}_n$ is in covering general position and satisfies the following properties: \begin{enumerate} \item the equivariant linking of the~$\widetilde{K}_i$ satisfy $\ell k_{\Q(t)}(\widetilde{K}_j,\widetilde{K}_i)=\unaryminus(Q^{-1})_{ij}$ for~$i \neq j$; \item there exist parallels~$\gamma_1,\ldots,\gamma_n$ of~$\widetilde{K}_1,\ldots,\widetilde{K}_n$ such that~$\ell k_{\Q(t)}(\widetilde{K}_i,\gamma_i)=\unaryminus(Q^{-1})_{ii}$. \end{enumerate} In particular the parallel~$\gamma_i$ represents the homology class~$\unaryminus(Q^{-1})_{ii}[\mu_{\widetilde{K}_i}]+\lambda_{\widetilde{K}_i} \in H_1(\partial \overline{\nu} (K_i);\Q(t))$ and the transpose of the equivariant linking matrix of~$\widetilde{L}$ equals~$-Q^{-1}$. \end{proposition} \begin{proof} Represent the classes~$\pi(x_1^*),\ldots,\pi(x_n^*)$ by an~$n$-component link in~$Y^\infty$ that can be assumed to be in covering general position. Use~$\widetilde{J}_1,\ldots,\widetilde{J}_n$ to denote the components of this link. Thanks to Lemma~\ref{lem:Step1}, we can assume that the equivariant linking numbers of these knots coincide with the off-diagonal terms of~$Q^{-1}$; we can apply this lemma because for $i \neq j$ the rational functions~$\ell k_{\Q(t)} (\widetilde{J}_j,\widetilde{J}_i)$ and the corresponding~$-(Q^{-1})_{ij}$ both reduce mod~$\Z[t^{\pm 1}]$ to~$\Bl_Y(\pi(x_i^*),\pi(x_j^*))$ and thus differ by a Laurent polynomial~$p(t) \in \Z[t^{\pm 1}]$. We arrange the framings and last assertion simultaneously. For brevity, from now on we write $$r_i:=-(Q^{-1})_{ii}.$$ By Lemma~\ref{lem:SimpleClosedCurve}, for each $i$, the class~$r_i[\mu_{\widetilde{J}_i}]+\lambda_{\widetilde{J}_i}$ can be rewritten as~$(r_i-\ell k_{\Q(t)}(\widetilde{J}_i,\pi_i))[\mu_{\widetilde{J}_i}]+[\pi_i]$ for any choice of parallel~$\pi_i$ for~$\widetilde{J}_i$. Note that~$r_i-\ell k_{\Q(t)}(\widetilde{J}_i,\pi_i)$ is a Laurent polynomial: indeed both~$r_i$ and~$\ell k_{\Q(t)}(\widetilde{J}_i,\pi_i)$ reduce mod $\Z[t^{\pm 1}]$ to~$\Bl_Y(\pi([x_i^*]),\pi([x_i^*]))$. \begin{claim*} The polynomial~$r_i-\ell k_{\Q(t)}(\widetilde{J}_i,\pi_i)$ is symmetric. \end{claim*} \begin{proof} We first assert that if~$\sigma$ is a parallel of~$\widetilde{J}_i$, then~$\ell k_{\Q(t)}(\sigma,\widetilde{J}_i)$ is symmetric. The rational function~$\ell k_{\Q(t)}(\sigma,\widetilde{J}_i)$ is symmetric if and only if~$\ell k_{\Q(t)}(\sigma,\widetilde{J}_i)=\overline{\ell k_{\Q(t)}(\sigma,\widetilde{J}_i)}$. By the symmetry property of the equivariant linking form mentioned in Proposition~\ref{prop:Linkingprop}, this is equivalent to the equality~$\ell k_{\Q(t)}(\sigma,\widetilde{J}_i)=\ell k_{\Q(t)}(\widetilde{J}_i,\sigma)$ and in turn this equality holds because~the ordered link~$(\sigma,\widetilde{J}_i)$ is isotopic to the ordered link~$(\widetilde{J}_i,\sigma)$ in~$Y^\infty$. This concludes the proof of the assertion that~$\ell k_{\Q(t)}(\sigma,\widetilde{J}_i)$ is symmetric. We conclude the proof of the claim. Thanks to the assertion, it now suffices to prove that~$r_i$ is symmetric. To see this, note that since the matrix~$Q^{-1}$ is Hermitian (because~$Q$ is) we have~$r_i(t^{-1})=-(\overline{Q^{-1}})_{ii}=-(\overline{Q^{-T}})_{ii}=-(Q^{-1})_{ii}=r_i(t)$, as required. \end{proof} We can now apply Lemma~\ref{lem:ModifyFraming} to~$p(t):=r_i-\ell k_{\Q(t)}(\widetilde{J}_i,\pi_i)$ (which is symmetric by the claim) to isotope the~$\widetilde{J}_i$ to knots~$\widetilde{K}_i$ (without changing the equivariant linking) and to find parallels~$\gamma_1,\ldots,\gamma_n$ of~$\widetilde{K}_1,\ldots, \widetilde{K}_n$ that satisfy the equalities~$\unaryminus (Q^{-1})_{ii}=r_i=\ell k_{\Q(t)}(\widetilde{K}_i,\gamma_i)$. This proves the second item of the proposition and the assertions in the last sentence follow because~$r_i[\mu_{\widetilde{K}_i}]+\lambda_{\widetilde{K}_i}=[\gamma_i]$ (by Lemma~\ref{lem:SimpleClosedCurve}) and from the definition of the equivariant linking matrix. \end{proof} \subsection{Step 2: the result of surgery is a~$\Z[t^{\pm 1}]$-homology~$S^1 \times S^2$} \label{sub:Step2} Let $\widetilde{L} \subset Y^\infty$ be a framed link in covering general position. Let~$Y'$ be the effect of surgery on the framed link~$L=p(\widetilde{L})$ with equivariant linking matrix $A_{\widetilde{L}}$ over $\Q(t)$. We assume throughout this subsection that $\det(A_{\widetilde{L}})\neq 0$. Our goal is to calculate the Alexander polynomial~$\Delta_{Y'}$ in terms of~$\Delta_Y$ and of the equivariant linking matrix of~$\widetilde{L} \subset Y^\infty$. In Theorem~\ref{thm:OrderOfEffectOfSurgery} we will show that \begin{equation} \label{eq:AlexGoal} \Delta_{Y'} \doteq \Delta_Y\det(A_{\widetilde{L}}). \end{equation} We then apply this to the framed link~$\widetilde{L} \subset Y^\infty$ that we built in Proposition~\ref{prop:Step1}; this framed link satisfies $\det(A_{\widetilde{L}})=\det(Q^{-T})\neq 0$. Continuing with the notation from that proposition, we have~$\det(A_{\widetilde{L}})=\det(-Q^{-T}) \doteq \frac{1}{\Delta_Y}$ (because~$Q$ presents~$H_1(Y;\Z[t^{\pm 1}])$) so in this case~\eqref{eq:AlexGoal} implies that~$\Delta_{Y'} \doteq 1$, which in turn implies that~$Y'$ is a~$\Z[t^{\pm 1}]$-homology~$S^1 \times S^2$; see Remark~\ref{rem:AlexPoly} and Proposition~\ref{prop:Step2}. \medbreak We start by outlining the proof of~\eqref{eq:AlexGoal}, which will be later recorded as Theorem ~\ref{thm:OrderOfEffectOfSurgery}. \begin{proof}[Outline of proof of Theorem~\ref{thm:OrderOfEffectOfSurgery}] \label{rem:Step3IdeaOfProof} Our plan is to compute the Reidemeister torsion~$\tau(Y')$ in terms of the Reidemeister torsion~$\tau(Y)$, and then, for $Z=Y,Y'$ to use the relation \begin{equation}\label{eq:Alextotorsion} \Delta_{Z}=\tau(Z)(t-1)^2 \end{equation} from~\cite[Theorem 1.1.2]{TuraevReidemeisterTorsionInKnotTheory} to derive~\eqref{eq:AlexGoal}. We note that in our setting we are allowed to write~$\tau(Y)$ and~$\tau(Y')$ for the Reidemeister torsions without having to choose bases~$\mathcal{B}$; this is because both~$H_*(Y;\Q(t))=0$ and~$H_*(Y';\Q(t))=0$, recall Lemma \ref{lem:surgQsphere} and Section~\ref{sec:reidemeister-torsion}; here note that we can apply Lemma \ref{lem:surgQsphere} because we are assuming that $\det(A_{\widetilde{L}})\neq 0$. We will calculate~$\tau(Y')$ from~$\tau(Y)$ by studying the long exact sequence of the pairs~$(Y,Y_L)$ and~$(Y',Y_L)$ with~$\Q(t)$ coefficients. More concretely, in Construction~\ref{cons:Bases}, we endow the $\Q(t)$-vector spaces~$H_*(Y,Y_L;\Q(t))$,~$H_*(Y',Y_L;\Q(t))$, and~$H_*(Y_L;\Q(t))$ with bases that we denote by~$\mathcal{B}_{Y,Y_L},\mathcal{B}_{Y',Y_L}$, and~$\mathcal{B}_{Y_L}$ respectively. In Lemma~\ref{lem:MultiplicativityTorsion}, we then show that $$ \tau(Y)\tau(\mathcal{H}_L)^{-1}\doteq\tau(Y_L,\mathcal{B}_{Y_L})\doteq \tau(Y')\tau(\mathcal{H}_{L'})^{-1}, $$ where~$\mathcal{H}_L$ and~$\mathcal{H}_{L'}$ respectively denote the long exact sequences in~$\Q(t)$-homology of the pairs~$(Y,Y_L)$ and~$(Y',Y_L)$. Finally, we prove that~$\tau(\mathcal{H}_L) \doteq 1$ and~$\tau(\mathcal{H}_{L'}) \doteq \det(A_{\widetilde{L}})$. From~\eqref{eq:Alextotorsion} and the previous equation we then deduce \[\frac{\Delta_Y}{(t-1)^2 \cdot 1} \doteq \tau(Y)\tau(\mathcal{H}_L)^{-1}\doteq \tau(Y')\tau(\mathcal{H}_{L'})^{-1} \doteq \frac{\Delta_{Y'}}{(t-1)^2 \cdot \det(A_{\widetilde{L}})}. \] The equality~$\Delta_{Y'} \doteq \Delta_Y\det(A_{\widetilde{L}})$ follows promptly. \end{proof} We start filling in the details with our choice of bases for the previously mentioned~$\Q(t)$-homology vector spaces. \begin{construction} \label{cons:Bases} We fix bases for $H_*(Y,Y_L;\Q(t))$, $H_*(Y',Y_L;\Q(t))$, and $H_*(Y_L;\Q(t))$, that we will respectively denote by~$\mathcal{B}_{Y,Y_L},\mathcal{B}_{Y',Y_L}$ and~$\mathcal{B}_{Y_L}$. \begin{itemize} \item We base the~$\Q(t)$-vector spaces~$H_*(Y,Y_L;\Q(t))$ and~$H_*(Y',Y_L;\Q(t))$. Excising~$\mathring{Y}_L$, we obtain~$H_i(Y,Y_L;\Q(t))=\bigoplus_{i=1}^n H_i(D^2 \times S^1,S^1 \times S^1;\Q(t))$ where~$n$ is the number of components of~$L$. Similarly, by excising~$\mathring{Y}_L \cong \mathring{Y}_{L'}$, we have~$H_i(Y',Y_L;\Q(t))=\bigoplus_{i=1}^n H_i(S^1 \times D^2,S^1 \times S^1;\Q(t))$. Since the map $\pi_1(S^1) \to \Z$ determining the coefficients is trivial, \[\bigoplus_{i=1}^n H_i(S^1 \times D^2,S^1 \times S^1;\Q(t)) \cong \bigoplus_{i=1}^n H^{3-i}(S^1;\Q(t)) \cong \bigoplus_{i=1}^n H^{3-i}(S^1;\Z) \otimes \Q(t).\] These homology vector spaces are only non-zero when~$i=2,3$. in which case they are isomorphic to~$\Q(t)^n$. We now pick explicit generators for these vector spaces. Endow~$S^1 \times S^1$ with its usual cell structure, with one~$0$-cell, two~$1$-cells and one~$2$-cell~$e^2_{S^1 \times S^1}$. Note that~$D^2 \times S^1$ is obtained from~$S^1 \times S^1\times I$ by additionally attaching a 3-dimensional~$2$-cell~$e^2_{D^2 \times S^1}$ and~$3$-cell, ~$e^3_{D^2 \times S^1}$, where on the chain level~$\partial e^3_{D^2 \times S^1}=e^2_{D^2 \times S^1}+e^2_{S^1\times S^1}-e^2_{D^2 \times S^1}=e^2_{S^1\times S^1}$. We now fix once and for all lifts of these cells to the covers. It follows that for~$k=2,3$: \begin{align*} H_k(Y,Y_L;\Q(t))&=C_k(Y,Y_L;\Q(t))=C_k(D^2 \times S^1,S^1 \times S^1;\Q(t))=\bigoplus_{i=1}^n \Q(t) (\widetilde{e}_{D^2 \times S^1}^k)_i \\ H_k(Y',Y_L;\Q(t))&=C_k(Y',Y_L;\Q(t))=C_k(S^1 \times D^2,S^1 \times S^1;\Q(t))=\bigoplus_{i=1}^n \Q(t) (\widetilde{e}_{S^1 \times D^2}^k)_i. \end{align*} \item We now base~$H_*(Y_L;\Q(t))$. Since~$H_*(Y;\Q(t))=0$, a Mayer-Vietoris argument shows that~$H_1(Y_L;\Q(t)) \cong \Q(t)^n$, generated by the meridians~$\mu_{\widetilde{K}_i}$ of~$\widetilde{L}$. Mayer-Vietoris also shows that the inclusion of the boundary induces an isomorphism~$\Q(t)^n=H_2(\partial Y_L;\Q(t)) \cong H_2(Y_L;\Q(t))$. We can then base~$H_2(Y_L;\Q(t))$ using fixed lifts of the aforementioned~$2$-cells~$(e^2_{S^1 \times S^1})_i$ generating each of the torus factors of~$\partial Y_L$. Summarising, we have \begin{align*} H_1(Y_L;\Q(t))&=\bigoplus_{i=1}^n \Q(t)\mu_{\widetilde{K}_i},\\ H_2(Y_L;\Q(t))&=\bigoplus_{i=1}^n \Q(t)(\widetilde{e}^2_{S^1 \times S^1})_i. \end{align*} \end{itemize} \end{construction} The next lemma reduces the calculation of~$\Delta_{Y'}$ to the calculation of~$\tau(\mathcal{H}_L)$ and~$\tau(\mathcal{H}_{L'})$. Here, recall that~$\tau(\mathcal{H}_L)$ and~$\tau(\mathcal{H}_{L'})$ denote the torsion of the long exact sequences~$\mathcal{H}_L$ and~$\mathcal{H}_{L'}$ of the pairs $(Y,Y_L)$ and $(Y',Y_L)$, viewed as based acyclic complexes with bases~$\mathcal{B}_{Y_L},\mathcal{B}_{Y,Y_L}$, and~$\mathcal{B}_{Y',Y_L}$. \begin{lemma} \label{lem:MultiplicativityTorsion} If~$H_1(Y;\Q(t))=0$ and $\det(A_{\widetilde{L}})\neq 0$, then we have \begin{align*} \tau(Y)&\doteq \tau(Y_L,\mathcal{B}_{Y_L})\cdot \tau(\mathcal{H}_L), \\ \tau(Y')&\doteq\tau(Y_L,\mathcal{B}_{Y_L})\cdot \tau(\mathcal{H}_{L'}). \end{align*} In particular, we have $$\Delta_{Y'}\cdot \tau(\mathcal{H}_{L})\doteq\Delta_Y\cdot \tau(\mathcal{H}_{L'}).$$ \end{lemma} \begin{proof} We start by proving that the last statement follows from the first. First note that since the vector spaces~$H_1(Y;\Q(t))$ and~$H_1(Y';\Q(t))$ vanish (for the latter we use Lemma~\ref{lem:surgQsphere} which applies since $\det(A_{\widetilde{L}})\neq 0$), the Alexander polynomials of~$Y$ and~$Y'$ are nonzero. Next,~\cite[Theorem 1.1.2]{TuraevReidemeisterTorsionInKnotTheory} implies that~$\tau(Y)(t-1)^2=\Delta_Y$ and similarly for~$Y'$. Therefore $\Delta_{Y'}/\Delta_Y=\tau(Y')/\tau(Y).$ The first part of the lemma implies that $\tau(Y')/\tau(Y)=\tau(\mathcal{H}_{L'})/\tau(\mathcal{H}_{L})$. Combining these equalities, $$\frac{\Delta_{Y'}}{\Delta_Y}=\frac{\tau(Y')}{\tau(Y)}=\frac{\tau(\mathcal{H}_{L'})}{\tau(\mathcal{H}_{L})}, $$ from which the required statement follows immediately. To prove the first statement of the lemma, it suffices to prove that~$\tau(Y,Y_L,\mathcal{B}_{Y,Y_L})=1$ as well as~$\tau(Y',Y_L,\mathcal{B}_{Y',Y_L})=1$: indeed, the required equalities then follow by applying the multiplicativity of Reidemeister torsion (the first item of Proposition~\ref{thm:ReidemeisterTorsion}) to the short exact sequences \[0 \to C_*(Y_L;\Q(t)) \to C_*(Y;\Q(t)) \to C_*(Y,Y_L;\Q(t)) \to 0,\] leading to $\tau(Y) = \tau(Y_L) \cdot \tau(Y,Y_L,\mathcal{B}_{Y,Y_L}) \cdot \tau(\mathcal{H}_{L}) = \tau(Y_L) \cdot 1 \cdot \tau(\mathcal{H}_{L})$ as desired. And similarly for the pair~$(Y',Y_L)$. We use Definition~\ref{def:ReidemeisterTorsion} to prove that~$\tau(Y,Y_L,\mathcal{B}_{Y,Y_L})=1$; again the proof for~$L'$ is analogous. We endow~$Y$ and~$Y_L$ with cell structures for which~$Y_L$ and $\partial Y_L$ are subcomplexes of~$Y$, and~$Y$ is obtained from~$Y_L$ by attaching~$n$ solid tori to $\partial Y_L$. By definition of the relative chain complex, we have~$C_*(Y,Y_L;\Q(t))=C_*(Y;\Q(t))/C_*(Y_L;\Q(t))$. Since we are working with cellular chain complexes we deduce that $$C_*(Y,Y_L;\Q(t))=C_*(Y;\Q(t))/C_*(Y_L;\Q(t))=\bigoplus_{i=1}^n C_*(D^2 \times S^1;\Q(t))/C_*(S^1 \times S^1;\Q(t)).$$ Using the cell structures described in Construction~\ref{cons:Bases},~$D^2 \times S^1$ is obtained from~$S^1 \times S^1$ by attaching a~$2$-cell and a~$3$-cell. By the above sequence of isomorphisms, this shows that~$C_i(Y,Y_L;\Q(t))=0$ for~$i\neq 2,3$ and gives a basis for~$C_2(Y,Y_L;\Q(t))$ and~$C_3(Y,Y_L;\Q(t))$. In fact, this also implies that~$C_i(Y,Y_L;\Q(t))=H_i(Y,Y_L;\Q(t))$ and that the differentials in the chain complex are zero, as was mentioned in Construction~\ref{cons:Bases}. Thus, the basis of~$C_*(Y,Y_L;\Q(t))$ corresponds exactly to the way we based~$H_*(Y,Y_L;\Q(t))$ in Construction~\ref{cons:Bases}. Therefore the change of basis matrix is the identity and so the torsion is equal to~$1$. This concludes the proof of the lemma. \end{proof} Our goal is now to show that~$\tau(\mathcal{H}_L) \doteq 1$ and~$\tau(\mathcal{H}_{L'}) \doteq \det(A_{\widetilde{L}})$. We start by describing the long exact sequences~$\mathcal{H}_L$ and~$\mathcal{H}_{L'}$. \begin{lemma} \label{lem:LESSimple} Assume that~$H_1(Y_L;\Q(t))=0$ and $\det(A_{\widetilde{L}})\neq 0$. The only nontrivial portions of the long exact sequence of the pairs~$(Y,Y_L)$ and~$(Y,Y_{L'})$ with~$\Q(t)$-coefficients are of the following form: \begin{align*} \mathcal{H}_L=&\, \Big( 0 \to H_3(Y,Y_L;\Q(t)) \xrightarrow{\partial_3^L } H_2(Y_L;\Q(t)) \to 0 \to H_2(Y,Y_{L};\Q(t)) \xrightarrow{\partial_2^L } H_1(Y_{L};\Q(t)) \to 0 \Big), \\ \mathcal{H}_{L'}=&\, \Big( 0 \to H_3(Y',Y_L;\Q(t)) \xrightarrow{\partial_3^{L'}} H_2(Y_L;\Q(t)) \to 0 \to H_2(Y',Y_{L};\Q(t)) \xrightarrow{\partial_2^{L'} } H_1(Y_L;\Q(t)) \to 0 \Big). \end{align*} Additionally, with respect to the bases of Construction~\ref{cons:Bases}, \begin{itemize} \item the homomorphism~$\partial_2^{L'}$ is represented by ~$-A_{\widetilde{L}}^{-1}$, i.e.~minus the inverse of the equivariant linking matrix for~$\widetilde{L}$; \item the homomorphisms $\partial_2^L$, $\partial_3^L$, and~$\partial_3^{L'}$ are represented by identity matrices. \end{itemize} \end{lemma} \begin{proof} Since~$Y^\infty$ and~${Y'}^\infty$ are connected, we have~$H_0(Y;\Z[t^{\pm 1}])=\Z$ and~$H_0(Y';\Z[t^{\pm 1}])=\Z$, so~$H_0(Y;\Q(t))=0$ and~$H_0(Y';\Q(t))=0$. Since we are working with field coefficients, Poincar\'e duality and the universal coefficient theorem imply that~$H_3(Y;\Q(t))=0$ and~$H_3(Y';\Q(t))=0$. As observed in Construction~\ref{cons:Bases} above, by excision, the only non-zero relative homology groups of~$(Y,Y_L)$ and~$(Y',Y_L)$ are \begin{align*} H_i(Y,Y_L;\Q(t))=\Q(t)^n \ \ \ \ &\text{ and } \ \ \ \ H_i(Y',Y_L;\Q(t))=\Q(t)^n \end{align*} for~$i=2,3$. Next, since by assumption~$H_1(Y;\Q(t))=0$, duality and the universal coefficient theorem imply that~$H_2(Y;\Q(t))=0$. Since we proved in Lemma~\ref{lem:surgQsphere} that~$H_1(Y';\Q(t))=0$, (here we used $\det(A_{\widetilde{L}}) \neq 0$) the same argument shows that~$H_2(Y';\Q(t))=0$. This establishes the first part of the lemma. We now prove the statement concerning~$\partial_2^L$ and~$\partial_2^{L'}$. Recall from Construction~\ref{cons:Bases} that we based the vector spaces~$H_2(Y,Y_L;\Q(t))$ and~$H_2(Y',Y_L;\Q(t))$ by meridional discs to the~$\widetilde{K}_i$ and~$\widetilde{K}_i'$ respectively. The map~$\partial_2^L$ takes each disc to its boundary, the meridian~$\mu_{\widetilde{K}_i}$; since these meridians form our chosen basis for~$H_1(Y_L;\Q(t))$, we deduce that~$\partial_2^L$ is represented by the identity matrix. The map~$\partial_2^{L'}$ also takes each meridional disc to its boundary, the meridian~$\widetilde{\mu}_{K_i'}$ to the dual knot. It follows that~$\partial_2^{L'}$ is represented by the change of basis matrix~$B$ such that~$\boldsymbol{\mu}'=B\boldsymbol{\mu}$. But during the proof of Lemma~\ref{lem:InverseMatrix} we saw that~$B=-A_{\widetilde{L}}^{-1}$ . Finally, we prove that~$\partial_3^L$ and~$\partial_3^{L'}$ are represented by identity matrices. In Construction~\ref{cons:Bases}, we based~$H_3(Y,Y_L;\Q(t))$ and~$H_3(Y',Y_L;\Q(t))$ using respectively (lifts of) the~$3$-cells of the~$(D^2 \times S^1)_i$ and~$(S^1 \times D^2)_i$. Now both~$\partial_3^L$ and~$\partial_3^{L'}$ take these~$3$-cells to their boundaries. But as we noted in Construction~\ref{cons:Bases}, these boundaries are (algebraically) the~$2$-cells~$(e^2_{S^1 \times S^1})_i$. In other words both~$\partial_3^L$ and~$\partial_3^{L'}$ map our choice of ordered bases to our other choice of ordered bases, and are therefore represented in these bases by identity matrices, as required. This concludes the proof of Lemma~\ref{lem:MultiplicativityTorsion}. \end{proof} As we now understand the exact sequences~$\mathcal{H}_L$ and~$\mathcal{H}_{L'}$ we can calculate their torsions, leading to the proof of the main result of this subsection. \begin{theorem} \label{thm:OrderOfEffectOfSurgery} If~$H_1(Y_L;\Q(t))=0$ and $\det(A_{\widetilde{L}})\neq 0$, then we have $$\Delta_{Y'}\doteq \det(A_{\widetilde{L}})\Delta_Y.$$ \end{theorem} \begin{proof} Use the bases from Construction~\ref{cons:Bases}. Combine the second item of Proposition~\ref{thm:ReidemeisterTorsion} with Lemma~\ref{lem:LESSimple} to obtain: \begin{align*} \tau(\mathcal{H}_L) \doteq \frac{\det(\partial_3^L)}{\det(\partial_2^L)} \doteq 1\text{ and } \tau(\mathcal{H}_{L'})\doteq \frac{\det(\partial_3^{L'})}{\det(\partial_2^{L'})} \doteq \det(A_{\widetilde{L}}). \end{align*} We deduce that $\tau(\mathcal{H}_{L'})/\tau(\mathcal{H}_{L}) \doteq \det(A_{\widetilde{L}}).$ Apply Lemma~\ref{lem:MultiplicativityTorsion} to obtain $$\frac{\Delta_{Y'}}{\Delta_Y} \doteq \frac{\tau(\mathcal{H}_{L'})}{\tau(\mathcal{H}_{L})}\doteq \det(A_{\widetilde{L}}).$$ Rearranging yields the desired equality. \end{proof} As a consequence, we complete the second step of the plan from Subsection~\ref{sub:Plan}. \begin{proposition} \label{prop:Step2} Let~$0 \to H \xrightarrow{\widehat{\lambda}} H^* \xrightarrow{\varpi} H_1(Y;\Z[t^{\pm 1}]) \to 0$ be a presentation of~$Y$. Pick generators~$x_1,\ldots,x_n$ for~$H$ and endow~$H^*$ with the dual basis~$x_1^*,\ldots,x_n^*$. Let~$Q$ be the matrix of~$\lambda$ with respect to these bases. The classes~$\pi(x_1^*),\ldots,\pi(x_n^*)$ can be represented by a framed link~$\widetilde{L}$ in covering general position with equivariant linking matrix~$A_{\widetilde{L}}=-Q^{-T}$. In addition, the~$3$-manifold~$Y'$ obtained by surgery on~$Y$ along $L$ satisfies~$H_1(Y';\Z[t^{\pm 1}])=0$. \end{proposition} \begin{proof} The existence of~$\widetilde{L}$ representing the given generators and with equivariant linking matrix~$A_{\widetilde{L}}=-Q^{-T}$ is proved in Proposition \ref{prop:Step1}. Since~$Q^{T}$ presents~$H_1(Y;\Z[t^{\pm 1}])$, we have~$\det(Q) \doteq \Delta_Y$ and therefore~$\det(A_{\widetilde{L}}) \doteq \frac{1}{\Delta_Y}$. Theorem~\ref{thm:OrderOfEffectOfSurgery} now implies that~$\Delta_{Y'} \doteq 1$. A short argument is now needed to use Remark~\ref{rem:AlexPoly} in order to conclude~$H_1(Y';\Z[t^{\pm 1}])=0$: we require that this torsion module admits a square presentation matrix, i.e.\ has projective dimension at most~$1$, denoted~$\pd (H_1(Y';\Z[t^{\pm 1}])) \leq 1$. Here recall that that a $\Z[t^{\pm 1}]$-module~$P$ \emph{has projective dimension at most~$k$} if~$\operatorname{Ext}^i_{\Z[t^{\pm 1}]}(P;V)=0$ for every~$\Z[t^{\pm 1}]$-module~$V$ and every~$i\geq k+1$, and that for a short exact sequence $0 \to A \to B \to C \to 0$ of $\Z[t^{\pm 1}]$-modules, the associated long exact sequence in $\operatorname{Ext}(-;V)$ groups implies that: \begin{enumerate}[(a)] \item if $\pd (C) \leq 1$ and $A$ is free, then $\pd (B) \leq 1$; \item if $\pd (B) \leq 1$ and $A$ is free, then $\pd (C) \leq 1$. \end{enumerate} The following paragraph proves that~$\pd (H_1(Y';\Z[t^{\pm 1}])) \leq 1$. As~$H_1(Y;\Z[t^{\pm 1}])$ and~$H_1(Y';\Z[t^{\pm 1}])$ are torsion (for the latter recall Lemma~\ref{lem:surgQsphere}), a duality argument implies that~$H_2(Y;\Z[t^{\pm 1}])=\Z$ and~$H_2(Y';\Z[t^{\pm 1}])=\Z$ (see e.g. the first item of~\cite[Lemma 3.2]{ConwayPowell}). Since these modules are torsion and since excision implies that \begin{align*} H_2(Y,Y_L;\Z[t^{\pm 1}])=\Z[t^{\pm 1}]^n \ \ \ \ &\text{ and } \ \ \ \ H_2(Y',Y_L;\Z[t^{\pm 1}])=\Z[t^{\pm 1}]^n \\ H_1(Y,Y_L;\Z[t^{\pm 1}])=0\ \ \ \ &\text{ and } \ \ \ \ H_1(Y',Y_L;\Z[t^{\pm 1}])=0, \end{align*} we deduce that the maps~$H_2(Y;\Z[t^{\pm 1}]) \to H_2(Y,Y_L;\Z[t^{\pm 1}])$ and~$H_2(Y';\Z[t^{\pm 1}]) \to H_2(Y',Y_L;\Z[t^{\pm 1}])$ are both trivial leading to the short exact sequences \begin{align*} & 0 \to H_2(Y,Y_L;\Z[t^{\pm 1}]) \to H_1(Y_L;\Z[t^{\pm 1}]) \to H_1(Y;\Z[t^{\pm 1}]) \to 0, \\ & 0 \to H_2(Y',Y_L;\Z[t^{\pm 1}]) \to H_1(Y_L;\Z[t^{\pm 1}]) \to H_1(Y';\Z[t^{\pm 1}]) \to 0. \end{align*} Next we apply the facts (a) and (b) on projective dimension given above. Since the torsion module~$H_1(Y;\Z[t^{\pm 1}])$ is presented by~$(H,\lambda)$, it has projective dimension at most~$1$ and since $H_2(Y,Y_L;\Z[t^{\pm 1}])$ is free, the first short exact sequence implies that $H_1(Y_L;\Z[t^{\pm 1}])$ has projective dimension at most~$1$. Since~$ H_2(Y',Y_L;\Z[t^{\pm 1}])$ is free, the second short exact sequence now implies that~$\pd (H_1(Y';\Z[t^{\pm 1}])) \leq 1$ as required. As explained above, since~$\pd (H_1(Y';\Z[t^{\pm 1}])) \leq 1$ and~$\Delta_{Y'} \doteq 1$, Remark~\ref{rem:AlexPoly} now allow us to conclude that~$H_1(Y';\Z[t^{\pm 1}])=0$, as required. \end{proof} \subsection{Step 3: every~$\Z[t^{\pm 1}]$-homology~$S^1 \times S^2$ bounds a homotopy circle.} \label{sub:Step3} The goal of this subsection is to prove the following theorem, which is a generalisation of a key step in the proof that Alexander polynomial one knots are topologically slice. \begin{theorem} \label{thm:Step3} Let~$Y$ be a~$3$-manifold with an epimorphism~$\pi_1(Y) \twoheadrightarrow \Z$ whose Alexander module vanishes, i.e.~$H_1(Y;\Z[t^{\pm 1}])=0$. Then there exists a~$4$-manifold~$B$ with a homotopy equivalence~$g \colon B \xrightarrow{\simeq} S^1$ so that~$\partial B \cong Y$ and~$\pi_1(Y) \twoheadrightarrow \pi_1(B) \xrightarrow{g_*}\pi_1(S^1) \cong \Z$ agrees with~$\varphi.$ \end{theorem} \begin{proof} This proof can be deduced by combining various arguments from~\cite[Section~11.6]{FreedmanQuinn}, so we only outline the main steps. First we use framed bordism to find some 4-manifold whose boundary is~$Y$, with a map to~$S^1$ realising~$\varphi$, as in~\cite[Lemma 11.6B]{FreedmanQuinn}. This map might not be a homotopy equivalence, but we then we will use surgery theory to show that~$W$ is bordant rel.\ boundary to a homotopy circle. To start the first step, recall that every oriented 3-manifold admits a framing of its tangent bundle. Using the axioms of a generalised homology theory, we have \[\Omega_3^{\fr}(B\Z) \cong \Omega_3^{\fr} \oplus \Omega_2^{\fr} \cong \Z/24 \oplus \Z/2.\] We consider the image of~$(Y,\varphi)$ in~$\Omega_3^{\fr}(B\Z)$. The first summand can be killed by changing the choice of framing of the tangent bundle of~$Y$; see~\cite[proof of Lemma 11.6B]{FreedmanQuinn} for details. The second summand is detected by an Arf invariant, which vanishes thanks to the assumption that~$H_1(Y;\Z[t^{\pm 1}])=0$; details are again in~\cite[proof of Lemma 11.6B]{FreedmanQuinn}. Therefore there exists a framed 4-manifold~$W$ with framed boundary~$Y$, such that the map~$Y \to S^1$ associated with~$\varphi$ extends over~$W$. Now we use surgery theory to show that $W$ is bordant rel.\ boundary to a homotopy circle. Consider the mapping cylinder \begin{equation} \label{eq:MappingCylinder} X := \mathcal{M}(Y \xrightarrow{\varphi} S^1). \end{equation} We claim that~$(X,Y)$ is a Poincar\'{e} pair. The argument is similar to~\cite[Proposition~11.C]{FreedmanQuinn}. As~$X \simeq S^1$, the connecting homomorphism from the exact sequence of the pair $(X,Y)$ gives an isomorphism~$\partial \colon H_4(X,Y) \cong H_3(Y) \cong \Z$. We then define the required fundamental class as~$[X,Y]:=\partial^{-1}([Y]) \in H_4(X,Y)$. Using~$H_1(Y;\Z[t^{\pm 1}])=0$, one can now use the same argument as in~\cite[Lemma 3.2]{FriedlTeichner} to show that the following cap product is an isomorphism: $$- \cap [X,Y] \colon H^i(X,Y;\Z[t^{\pm 1}]) \to H_{4-i}(X;\Z[t^{\pm 1}]).$$ This concludes the proof of the fact that~$(X,Y)$ is a Poincar\'{e} pair. The end of the argument follows from the exactness of the surgery sequence for $(X,Y)$ as in~\cite[Proposition 11.6A]{FreedmanQuinn} but we outline some details for the reader unfamiliar with surgery theory. Since~$(X,Y)$ is a Poincar\'e pair, we can consider its set~$\mathcal{N}(X,Y)$ of normal invariants. The set~$\mathcal{N}(X,Y)$ consists of normal bordism classes of degree one normal maps to~$X$ that restrict to a homeomorphism on the boundary, where a bordism restricts to a product cobordism homeomorphic to~$Y \times I$ between the boundaries. The next paragraph uses the map~$W \to S^1$ to define an element of~$\mathcal{N}(X,Y)$. Via the homotopy equivalence~$X \simeq S^1$, the map~$Y \to S^1 \simeq X$ extends to~$F \colon W \to S^1 \simeq X$. It then follows from the naturality of the long exact sequence of the pairs~$(W,Y)$ and~$(X,Y)$ that~$F$ has degree one. We therefore obtain a degree one map $(F,\id_Y) \colon (W,Y) \to (X,Y)$. To upgrade~$(F,\id_Y)$ to a degree one normal map, take a trivial (stable) bundle $\xi \to X$ over the codomain. Normal data is determined by a (stable) trivialisation of $TW \oplus F^*\xi$. The framing of~$W$ provides a trivialisation for the first summand, while any choice of trivialisation for $F^*\xi$ can be used for the second summand. We therefore have a degree one normal map \[\big( (F,\id_Y) \colon (W,Y) \to (X,Y)\big) \in \mathcal{N}(X,Y).\] Our goal is to change $W$ to $W\#^{\ell} Z$, where $Z = E_8$, and then to do surgery on the interior of the domain~$(W \#^\ell Z,Y)$ to convert $F$ into a homotopy equivalence $(F',\id_Y) \colon (B,Y) \to (X,Y)$. Since the fundamental group~$\Z$ is a good group, surgery theory says that this is possible if and only if $\ker(\sigma)$ is nonempty~\cite[Section 11.3]{FreedmanQuinn}. Here \[\sigma \colon \mathcal{N}(X,Y) \to L_4(\Z[t^{\pm 1}])\] is the surgery obstruction map. Essentially, it takes the intersection pairing on~$H_2(W;\Z[t^{\pm 1}])$ and considers it in the Witt group of nonsingular, Hermitian, even forms over~$\Z[t^{\pm 1}]$ up to stable isometry, where stabilisation is by hyperbolic forms \[\left(\Z[t^{\pm 1}] \oplus \Z[t^{\pm 1}],\begin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix}\right).\] Shaneson splitting~\cite{ShanesonSplitting} implies that $L_4(\Z[t^{\pm 1}]) \cong L_4(\Z) \oplus L_3(\Z) \cong L_4(\Z) \cong 8\Z.$ The last isomorphism is given by taking the signature. We take the connected sum of~$W \to X$ with copies of~$(E_8 \to S^4)$ or~$(-E_8 \to S^4)$, to arrange that the signature becomes zero. Then the resulting normal map~$W \#^{\ell} Z \to X$ has trivial surgery obstruction in~$L_4(\Z[t^{\pm 1}])$ (i.e.\ lies in $\ker(\sigma)$) and therefore is normally bordant to a homotopy equivalence $(F',\id_Y) \colon (B,Y) \to (X,Y)$, as desired. Since the mapping cylinder $X$ from~\eqref{eq:MappingCylinder} is a homotopy circle, so is $B$. This concludes the proof of the theorem. \end{proof} \subsection{Step 4: constructing a~$4$-manifold that induces the given boundary isomorphism} \label{sub:Step4} We begin by recalling the notation and outcome of Proposition~\ref{prop:Step2}. Let~$b \in \Iso(\partial \lambda,\unaryminus\Bl_Y)$ be an isometry of linking forms. Pulling this back to~$H$, we obtain a presentation $$0 \to H \xrightarrow{\widehat{\lambda}} H^* \xrightarrow{\varpi} H_1(Y;\Z[t^{\pm 1}]) \to 0$$ of~$Y$. Pick generators~$x_1,\ldots,x_n$ for~$H$ and endow~$H^*$ with the dual basis~$x_1^*,\ldots,x_n^*$. Let~$Q$ be the matrix of~$\lambda$ with respect to these bases. By Propositions~\ref{prop:Step1} and~\ref{prop:Step2}, the classes~$\pi(x_1^*),\ldots,\pi(x_n^*)$ can be represented by a framed link~$\widetilde{L} \subset Y^\infty$ in covering general position with transposed equivariant linking matrix~$-Q^{-1}$ and the~$3$-manifold~$Y'$ obtained by surgery on~$L=p(\widetilde{Y})$ satisfies~$H_1(Y';\Z[t^{\pm 1}])=0$. Applying Theorem~\ref{thm:Step3}, there is a topological~$4$-manifold~$B$ with boundary~$Y'$ and such that~$B \simeq S^1$. We now define a~$4$-manifold~$M$ with boundary~$Y$ as follows: begin with~$Y\times I$ and attach 2-handles to~$Y\times \{1\}$ along the framed link~$L:=p(\widetilde{L})$~(here recall that $p \colon Y^\infty \to Y$ denotes the covering map), so that the resulting boundary is~$Y'$. Call this 2-handle cobordism~$W$, and observe that $\partial^-W=-Y$. We can now cap $\partial^+W\cong Y'$ with~$-B$. Since $W\cup -B$ has boundary $-Y$, we define $M$ to be $-W\cup B$. We can then consider the corresponding~$\Z$-cover: \begin{align*} -M^\infty&:=\Big( (Y^\infty \times [0,1]) \cup \bigcup_{i=1}^n \bigcup_{j_i \in\Z} t^{j_i} h_i^{(2)} \Big) \cup_{{Y'}^\infty} -B^\infty =W^\infty \cup_{{Y'}^\infty} -B^\infty\\ -M&:=\Big( (Y \times [0,1]) \cup \bigcup_{i=1}^n h_i^{(2)} \Big) \cup_{{Y'}} -B=:W \cup_{{Y'}} -B, \end{align*} in which the~$2$-handles are attached along the framed link~$\widetilde{L}$ upstairs and its framed projection~$L$ downstairs. We begin by verifying some properties of $M$. \begin{lemma} \label{lem:Pi1Z} The $\Z$-manifold $M$ has boundary $Y$. \end{lemma} \begin{proof} We first prove that $\pi_1(M)\cong \Z$. A van Kampen argument shows that~$\pi_1(M)$ is obtained from~$\pi_1(B)$ by modding out the~$[\iota(\widetilde{K}'_i)]$ where~$\widetilde{K}_1',\ldots,\widetilde{K}_n'$ denote the components of the framed link dual to~$\widetilde{L}$ and where~$\iota \colon \pi_1(Y') \to \pi_1(B)$ is the inclusion induced map. Recall from Lemma~\ref{lem:coeff-system} and Remark~\ref{rem:CoefficientSystemY'} that the epimorphism~$\varphi \colon \pi_1(Y) \twoheadrightarrow \Z$ induces an epimorphism~$ \varphi' \colon \pi_1(Y') \twoheadrightarrow \Z$ and that~$\varphi'([K_i'])=0$ for~$i=1,\ldots,n$. Since Theorem~\ref{thm:Step3} ensures that~$\iota$ agrees with $\varphi'$, we deduce that the classes~$[\iota(\widetilde{K}'_i)]$ are trivial and therefore~$\pi_1(M)\cong \pi_1(B) \cong \Z$. Next we argue that as a $\Z$-manifold $M$ has boundary $Y$. Since the inclusion induced map $\pi_1(Y) \to \pi_1(W)$ is surjective, it suffices to prove that the inclusion induced map $\pi_1(W) \to \pi_1(M)$ is surjective. This follows from van Kampen's theorem: as $\pi_1(Y') \to \pi_1(B)$ is surjective, so is $\pi_1(W) \to \pi_1(M)$. \end{proof} It is not too hard to compute, as we will do in Proposition~\ref{prop:BasisH2} below, that $H_2(M;\Z[t^{\pm 1}])$ is f.g. free of rank $n$. To complete step 4, we must prove the following two claims. \begin{enumerate} \item The equivariant intersection form~$\lambda_M$ of~$M$ is represented by~$Q$; i.e.~$\lambda_M$ is isometric to~$\lambda$. \item The~$4$-manifold~$M$ satisfies~$b_M =b \in \Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda)$. \end{enumerate} The proof of the first claim follows a standard outline; for the hasty reader we will give the outline here, and for the record we provide a detailed proof at the end of the subsection. \begin{proof}[Proof outline of claim (1)] Since by setup the transposed equivariant linking matrix of the framed link~$\widetilde{L}$ is~$-Q^{-1},$ Proposition~\ref{lem:InverseMatrix} shows that the transposed equivariant linking matrix of the dual link~$\widetilde{L}'$ is~$Q$. Thus, it suffices to show that~$\lambda_M$ is presented by the transposed equivariant linking matrix of~$\widetilde{L}'$. While it was natural initially to build~$W^\infty$ by attaching 2-handles to~$Y^\infty\times I$, in what follows it will be more helpful to view~$-W^\infty$ as being obtained from~$Y'\times I$ by attaching~$2$-handles to the framed link~$\widetilde{L}'$ dual to~$\widetilde{L}$. In particular, the components of~$\widetilde{L}'$ bound the cores of the~$2$-handles. Recall that~$H_1(Y';\Z[t^{\pm 1}])=0$ by Proposition~\ref{prop:Step2} and that~$H_2(B;\Z[t^{\pm 1}])=0$ by Proposition~\ref{thm:Step3}. Let~$\Sigma_i$ denote a surface in~${Y'}^\infty$ with boundary~$\widetilde{K}'_i$, and let~$F_i$ be the surface in~$M$ formed by~$\Sigma_i$ capped with the core of the 2-handle attached along~$\widetilde{K}'_i$. The proof that~$H_2(M;\Z[t^{\pm 1}])$ is freely generated by the~$[F_i]$ and that the equivariant intersection form~$\lambda_M$ is represented by the transposed equivariant linking matrix of~$\widetilde{L'}$ (which we showed above is~$Q$), is now routine; the details are expanded in Propositions~\ref{prop:BasisH2} and~\ref{thm:IntersectionForm} below. \end{proof} As promised, the section now concludes with a detailed proof of the claims. Firstly in Construction~\ref{cons:BasisH2}, we give the detailed construction of the surfaces~$F_i$ that were mentioned in the proof outline. Secondly, in Proposition~\ref{prop:BasisH2} we show that these surfaces lead to a basis of~$H_2(M;\Z[t^{\pm 1}])$. Thirdly, in Proposition~\ref{thm:IntersectionForm} we conclude the proof of the first claim by showing that with respect to this basis,~$\lambda_M$ is represented by the transposed equivariant linking matrix of~$\widetilde{L'}$. Finally, in Proposition~\ref{prop:step4}, we prove the second claim. \begin{construction} \label{cons:BasisH2} For~$i=1,\ldots,n$, we define the closed surfaces~$F_i \subset -W^\infty \subset M^\infty$ that were mentioned in the outline. As~$H_1(Y';\Z[t^{\pm 1}])=0$ (by Step 2), each component~$\widetilde{K}_i'$ of~$\widetilde{L}'$ bounds a surface~$\Sigma_i \subset {Y'}^\infty$. Additionally, each~$\widetilde{K}'_i$ (considered in $Y' \times \lbrace 1 \rbrace$) bounds the core of one of the (lifted) 2-handles in the dual handle decomposition of~$-W$. Define the surface~$F_i \subset -W^\infty \subset M^\infty$ by taking the union of~$\Sigma_i$ with this core. \end{construction} The next proposition shows that the surfaces~$F_i'$ give a basis for~$H_2(M;\Z[t^{\pm 1}])$. It is with respect to this basis that we will calculate~$\lambda_M$ in Proposition~\ref{thm:IntersectionForm} below. \begin{proposition} \label{prop:BasisH2} The following isomorphisms hold: \begin{align*} H_2(-W;\Z[t^{\pm 1}])=\Z \oplus \bigoplus_{i=1}^n \Z[t^{\pm 1}] [F_i], \ \ \ \text{ and } \ \ \ H_2(M;\Z[t^{\pm 1}])=\bigoplus_{i=1}^n \Z[t^{\pm 1}] [F_i]. \end{align*} \end{proposition} \begin{proof} These follow by standard arguments using Mayer-Vietoris, which we outline now. The first equality follows from the observation that~$-W^\infty$ is obtained from~${Y'}^\infty\times [0,1]$ by attaching the dual 2-handles to the $h^{(2)}_i$. Morally, since~$H_1(Y';\Z[t^{\pm 1}])=0$ (Step 2), each dual 2-handle contributes a free generator. The additional~$\Z$ summand comes from~$H_2(Y' \times [0,1];\Z[t^{\pm 1}]) \cong\mathbb{Z}$. More formally, one applies Mayer-Vietoris with~$\Z[t^{\pm 1}]$-coefficients to the decomposition of $W$ as the union of $Y' \times [0,1]$ with the dual 2-handles, which since the dual 2-handles are contractible and $H_1(Y';\Z[t^{\pm 1}])=0$ yields the short exact sequence: $$ 0 \to H_2(Y' \times [0,1];\Z[t^{\pm 1}]) \to H_2(-W;\Z[t^{\pm 1}]) \xrightarrow{\partial} H_1(\overline{\nu}(L');\Z[t^{\pm 1}]) \to 0.$$ Since $\varphi'([L']) =0$, $H_1(\overline{\nu}(L');\Z[t^{\pm 1}]) \cong \bigoplus_{i=1}^n \Z[t^{\pm 1}]$, generated by the $[K_i']$. Mapping each~$[K_i']$ to~$[F_i]$ determines a splitting. For the second equality, note that since~$B$ is a homotopy circle and $g_* \colon \pi_1(B) \to \Z$ is an isomorphism, $B$ has no (reduced)~$\Z[t^{\pm 1}]$-homology. The Mayer-Vietoris exact sequence associated to the decomposition~$M=-W \cup_{Y' \times \{1\}} B$ therefore yields the short exact sequence $$ 0 \to H_2(Y';\Z[t^{\pm 1}]) \to H_2(-W;\Z[t^{\pm 1}]) \to H_2(M;\Z[t^{\pm 1}]) \to 0.$$ Appealing to our computation of~$H_2(-W;\Z[t^{\pm 1}])$, we deduce that~$H_2(M;\Z[t^{\pm 1}])$ is freely generated by the~$[F_i]$. \end{proof} Now we prove the first claim of the previously mentioned outline. \begin{proposition} \label{thm:IntersectionForm} With respect to the basis of~$H_2(M;\Z[t^{\pm 1}])$ given by the~$[F_1],\ldots,[F_n]$, the equivariant intersection form~$\lambda_M$ of~$M$ is given by the transposed equivariant linking matrix of the framed link~$\widetilde{L}'$ dual to~$\widetilde{L}$. \end{proposition} \begin{proof} Recall from Construction~\ref{cons:BasisH2} that for $i=1,\ldots,n$, the surface $F_i \subset -W^\infty \subset M^\infty$ was obtained as the union of a surface $\Sigma_i \subset {Y'}^\infty$ whose boundary is~$\widetilde{K}_i'$ with the core of a (lifted) $2$-handle in the dual handle decomposition of $W$. For $i=1,\ldots,n$, define $F_i'$ to be a surface isotopic to $F_i$ obtained by pushing the interior of $\Sigma_i$ into $B^\infty$. Let $\Sigma_i'$ be such a push-in. Since $F_i$ and $F_i'$ are isotopic for every $i=1,\dots,n$, we can use the $F_i'$ to calculate~$\lambda_M$. Fix real numbers $0<s_1 < \cdots < s_n <1$. We model $\Sigma_i'$ in the coordinates of a collar neighborhood $\partial B \times [0,1]$ as \[\Sigma_i' := (\partial \Sigma_i \times [0,s_i]) \cup (\Sigma_i \times \{s_i\}).\] We start by calculating the equivariant intersection form~$ \lambda_M([F_i'],[F_j'])$ for~$i \neq j$. Since the aforementioned cores of the dual 2-handles are pairwise disjoint, we obtain $$\overline{\lambda_M([F_i'],[F_j'])}=F_i'\cdot_{\infty,M} F_j'=\Sigma_i'\cdot_{\infty, B} \Sigma_j'.$$ Recall that we use~$A_{\widetilde{L}'}$ to be the linking matrix of the framed link~$L'$. It therefore remains to show that~$\Sigma_i'\cdot_{\infty, B} \Sigma_j'=(A_{\widetilde{L}'})_{ij}$. Assume without loss of generality that $i>j$, and so $s_i > s_j$. Also note that $\partial \Sigma_i \cap \partial \Sigma_j = \emptyset$. By inspecting the locations of the intersections, it follows that \[\Sigma_i'\cdot_{\infty,B}\Sigma_j' = (\partial\Sigma_i \times [0,s_i]) \cdot_{\infty,B} (\Sigma_j \times \{s_j\}) = \partial \Sigma_i\cdot_{\infty,\partial B}\Sigma_j=\ell k_{\Q(t)}(\widetilde{K}_i',\widetilde{K}_j'),\] where the last equality uses the definition of the equivariant linking number in~$\partial B=Y'$. For~$i \neq j$, we have therefore proved that $$\lambda_M([F_j'],[F_i'])=\Sigma_i'\cdot_{\infty, B} \Sigma_j'=\ell k_{\Q(t)}(\widetilde{K}_i',\widetilde{K}'_j).$$ It remains to prove that~$\lambda_M([F_i'],[F_i'])=(A_{\widetilde{L}'})_{ii}$. By definition of the dual framed knot~$\widetilde{K}_i'$, we have~$(A_{\widetilde{L}'})_{ii}=\ell k_{\Q(t)}(\widetilde{K}_i',\pi_i')$, where~$\pi_i'$ denotes the framing curve of $\widetilde{K}_i'$. Perform a small push-off of the surface~$\Sigma_i' \subset B^\infty$ to obtain a surface~$\Sigma_i'' \subset B^\infty$ isotopic to~$\Sigma_i' \subset B^\infty$ with boundary~$\partial \Sigma_i''=\pi_i'$. Cap off $\Sigma_i''$ with a parallel disjoint copy of the cocore of the 2-handle, yielding a closed surface $F_i''$ that is isotopic to $F_i'$, and such that all the intersections between the two occur between $\Sigma_i'$ and $\Sigma_i''$. As in the~$i\neq j$ case, we then have $$\lambda_M([F_i'],[F_i'])=\Sigma_i'\cdot_{\infty,B}\Sigma_i''=\ell k_{\Q(t)}(\widetilde{K}_i',\pi_i').$$ We have therefore shown that the equivariant intersection form of~$M$ is represented by the transposed linking matrix~$A_{\widetilde{L}'}^T$ and this concludes the proof of the proposition. \end{proof} Finally, we prove the second claim of our outline, thus completing step 4. \begin{proposition}\label{prop:step4} Let~$Y$ be a~$3$-manifold with an epimorphism~$\varphi \colon \pi_1(Y) \twoheadrightarrow \Z$ whose Alexander module is torsion, and let~$(H,\lambda)$ be a nondegenerate Hermitian form presenting~$Y$. If~$b \in \Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda)$ is an isometry, then there is a~$\Z$-manifold~$M$ with equivariant intersection form~$\lambda_M\cong \lambda$, boundary~$Y$ and with~$b_M=b$. \end{proposition} \begin{proof} Let~$M$ be the 4-manifold with boundary~$Y$ constructed as described above. The manifold~$M=-W \cup_{Y'} B$ comes with a homeomorphism~$g \colon \partial M \cong Y$, because~$-W$ is obtained from~$Y \times [0,1]$ by adding~$2$-handles. We already explained why~$M$ has intersection form isometric to~$\lambda$ but we now make the isometry more explicit. Define an isomorphism~$F \colon H \to H_2(M;\Z[t^{\pm 1}])$ by mapping~$x_i$ to~$[F_i]$, where the~$F_i \subset M^\infty$ are the surfaces built in Construction~\ref{cons:BasisH2}. This is an isometry because, by combining Proposition~\ref{thm:IntersectionForm} with Lemma~\ref{lem:InverseMatrix}, we get $$\lambda_M([F_i],[F_j])=(A_{\widetilde{L}'})_{ji}=-(A_{\widetilde{L}}^{-1})_{ji} =Q_{ij}=\lambda(x_i,x_j).$$ We now check that~$b_M = b$ by proving that~$b=g_* \circ D_M \circ \partial F$. This amounts to proving that the bottom square of the following diagram commutes (we refer to Construction~\ref{cons:PresentationAssociatedToManifold} if a refresher on the notation is needed): $$ \[email protected]@C1.7cm{ H^* \ar[r]^-{F^{-*},\cong} \ar@{->>}[d]^-{\operatorname{proj}}&H_2(M;\Z[t^{\pm 1}])^* \ar[r]^-{\operatorname{PD} \circ \operatorname{ev}^{-1},\cong}\ar@{->>}[d]^-{\operatorname{proj}}&H_2(M,\partial M;\Z[t^{\pm 1}])\ar@{->>}[d]^-{\delta_M} \\ \coker(\widehat{\lambda}) \ar[r]^-{\partial F,\cong}\ar[d]^-{=}& \coker(\widehat{\lambda}_M) \ar[r]^-{D_M,\cong}& H_1(\partial M;\Z[t^{\pm 1}])\ar[d]^-{g_*,\cong} \\ \coker(\widehat{\lambda}) \ar[rr]^-{b,\cong}&&H_1(Y;\Z[t^{\pm 1}]). } $$ The top squares of this diagram commute by definition of~$\partial F$ and~$D_M$. Since the top vertical maps are surjective, the commutativity of the bottom square is now equivalent to the commutativity of the outer square. It therefore remains to prove that~$g_* \circ \delta_M \circ (\operatorname{PD} \circ \operatorname{ev}^{-1}) \circ F^{-*}=\pi$; (recall that by definition~$\pi=b \circ \operatorname{proj}$). In fact, it suffices to prove this on the~$x_i^*$ as they form a basis of~$H^*$. Writing~$c_i$ for the core of the 2-handles attached to~$Y \times [0,1]$, union a product of their attaching circles with $[0,1]$ in $Y \times [0,1]$, note that the $c_i$ intersects $F_j$ in $\delta_{ij}$ points, since $F_j$ is built from a surface in ${Y'}^{\infty}$ union the cocore of the $j$th 2-handle. We have $$g_* \circ \delta_M \circ (\operatorname{PD} \circ \operatorname{ev}^{-1}) \circ F^{-*}(x_i^*) =g_* \circ \delta_M \circ (\operatorname{PD} \circ \operatorname{ev}^{-1})([F_i]^*) =g_* \circ \delta_M ([\widetilde{c}_i]) =[\widetilde{K}_i]=\pi(x_i^*). $$ Here we use successively the definition of~$F$, the geometric interpretation of~$\operatorname{PD} \circ \operatorname{ev}^{-1}$, the fact that~$\widetilde{g}(\partial \widetilde{c}_i)=\widetilde{K}_i$ and the definition of the~$\widetilde{K}_i$. Therefore the outer square commutes as asserted. This concludes the proof that~$b=g_* \circ D_M \circ \partial F$ and therefore~$b_M = b$, as required. \end{proof} \subsection{Step 5: fixing the Kirby-Siebenmann invariant and concluding} \label{sub:Step5} The conclusion of Theorem~\ref{thm:MainTechnical} will follow promptly from Proposition~\ref{prop:step4} once we recall how, in the odd case, it is possible to modify the Kirby-Siebenmann invariant of a given $4$-manifold with fundamental group~$\Z$. This is achieved using the star construction, a construction which we now recall following~\cite{FreedmanQuinn} and~\cite{StongRealization}. In what follows, $*\C P^2$ denotes the Chern manifold, i.e.\ the unique simply-connected topological $4$-manifold homotopy equivalent to~$\C P^2$ but with~$\ks(*\C P^2)=1$. \medbreak Let~$M$ be a topological~$4$-manifold with (potentially empty) boundary, good fundamental group~$\pi$ and such that the second Stiefel-Whitney class of the universal cover $w_2(\wt{M})$ is nontrivial. There is a~$4$-manifold~$*M$, called the \emph{star partner of~$M$} that is rel.\ boundary homotopy equivalent to~$M$ but has the opposite Kirby-Siebenmann invariant from that of~$M$~\cite[Theorem 10.3~(1)]{FreedmanQuinn}. See~\cite{teichner-star} or~\cite[Propostion~5.8]{KPR-counterexamples} for a more general condition under which a star partner exists. \begin{remark} For fundamental group $\Z$, every non-spin 4-manifold has $w_2(\wt{M}) \neq 0$. To see this, we use the exact sequence \[0 \to H^2(B\pi;\Z/2) \to H^2(M;\Z/2) \xrightarrow{p^*} H^2(\wt{M};\Z/2)^\pi,\] where $\pi := \pi_1(M)$. This can be deduced from the Leray-Serre spectral sequence for the fibration $\wt{M} \to M \to B\pi$; see e.g.\ \cite[Lemma~3.17]{KLPT}. For $\pi =\Z$ the first term vanishes, so $p^*$ is injective. By naturality, $p^*(w_2(M)) = w_2(\wt{M})$, so $w_2(M) \neq 0$ implies $w_2(\wt{M}) \neq 0$ as desired. It follows that for a non-spin 4-manifold $M$ with fundamental group $\Z$, \cite[Theorem 10.3]{FreedmanQuinn} applies and there is a star partner. \end{remark} To describe $*M$, consider the~$4$-manifold~$W:=M \# (*\C P^2)$ and note that the inclusions $M \hookrightarrow W$ and~$* \C P^2 \hookrightarrow W$ induce a splitting \begin{equation} \label{eq:StarSplitting} \pi_2(M) \oplus (\pi_2(*\C P^2) \otimes_\Z \Z[\pi]) \xrightarrow{\cong} \pi_2(W). \end{equation} By~\cite[Theorem 10.3~(1)]{FreedmanQuinn} (cf.\ \cite[Proposition~5.8]{KPR-counterexamples}) there exists a $4$-manifold~$*M$ and an orientation-preserving homeomorphism $$ h \colon W \xrightarrow{\cong} *M \# \C P^2$$ that respects the splitting on $\pi_2$ displayed in~\eqref{eq:StarSplitting}. The star partner $*M$ is also unique up to homeomorphism, by \cite[Corollary~1.2]{StongUniqueness}. To be more precise about the condition on $h$, let $\iota \colon \pi_2(*\C P^2) \otimes_\Z \Z[\pi] \to \pi_2(M \# (*\C P^2))=\pi_2(W)$ denotes the split isometric injection induced by the zigzag $*\C P^2 \leftarrow *\C P^2 \sm \mathring{D}^4 \rightarrow W$, and let $\operatorname{incl}_* \colon \pi_2(\C P^2) \to \pi_2(*M \# \C P^2)$ be defined similarly. Then we say that $h$ \emph{respects the splitting on $\pi_2$} if for some isomorphism $f \colon \pi_2(*\C P^2) \xrightarrow{\cong} \pi_2(\C P^2)$, the following diagram commutes \begin{equation*} \xymatrix{ \pi_2(*\C P^2) \otimes_\Z \Z[\pi] \ar@{^{(}->}[r]^-\iota \ar[d]_{f \otimes \id}^{\cong} & \pi_2(W) \ar[d]^{h_*}_\cong \\ \pi_2(\C P^2) \otimes_\Z \Z[\pi] \ar@{^{(}->}[r]^-{\operatorname{incl}_*} & \pi_2(*M \# \C P^2). } \end{equation*} Since both horizontal maps in this diagram are split, this implies that $h_*$ induces an isomorphism~$g \colon \pi_2(M) \xrightarrow{\cong} \pi_2(*M)$, and so $h_*$ splits as follows: \[h_* = (g_*, f_* \otimes \id) \colon \pi_2(M) \oplus (\pi_2(*\C P^2) \otimes_\Z \Z[\pi]) \xrightarrow{\cong} \pi_2(*M) \oplus (\pi_2(\C P^2) \otimes_\Z \Z[\pi]).\] We recall that~$M$ and~$*M$ are orientation-preserving homotopy equivalent rel.\ boundary. This will ensure that their automorphism invariants agree. The argument is due to Stong~\cite[Section~2]{StongUniqueness}, and a proof can also be found in \cite[Lemma~5.7]{KPR-counterexamples}. \begin{proposition} \label{prop:StarHomotopyEquivalence} If~$M$ is a topological~$4$-manifold with boundary, good fundamental group~$\pi$ and whose universal cover has nontrivial second Stiefel-Whitney class, then~$M$ is orientation-preserving homotopy equivalent rel.\ boundary to its star partner~$*M$. \end{proposition} We are ready to prove Theorem~\ref{thm:MainTechnical}, whose statement we now recall for the reader's convenience. Let~$Y$ be a~$3$-manifold with an epimorphism~$\pi_1(Y) \twoheadrightarrow \Z$ whose Alexander module is torsion, and let~$(H,\lambda)$ be a form presenting~$Y$. If~$b \in \Iso(\partial \lambda,\unaryminus\Bl_Y)/\Aut(\lambda)$ is an isometry, then there is a~$\Z$-manifold~$M$ with equivariant intersection form~$\lambda_M$, boundary~$Y$ and with~$b_M=b$. If the form is odd, then~$M$ can be chosen to have either~$\ks(M)=0$ or~$\ks(M)=1$. We now conclude the proof of this theorem. \begin{proof}[Proof of Theorem~\ref{thm:MainTechnical}] In Proposition~\ref{prop:step4}, we proved the existence of a $\Z$~manifold~$M$ with equivariant intersection form~$\lambda_M$, boundary~$Y$ and with~$b_M = b$. It remains to show that if~$\lambda$ is odd, then~$M$ can be chosen to have either~$\ks(M)=0$ or~$\ks(M)=1$. This is possible by using the star partner $*M$ of $M$. Indeed Proposition~\ref{prop:StarHomotopyEquivalence} implies that $M$ and $*M$ are homotopy equivalent rel.\ boundary and therefore Remark~\ref{rem:HomotopyEquivalence} ensures that~$b_{*M} =b_{M}$ is unchanged. \end{proof} \subsection{An example} \label{sub:Example} Remark~\ref{rem:KSProof} shows that if $M_0$ and $M_1$ are spin~$4$-manifolds with $\pi_1(M_i) \cong \Z$, boundary homeomorphic to~$(Y,\varphi)$, isometric equivariant intersection form, and the same automorphism invariant, then their Kirby-Siebenmann invariants agree. The next proposition shows that the condition on the automorphism invariant is necessary. After the proof, we offer an extended example to illustrate the proof of Theorem \ref{thm:MainTechnicalIntro} and to show that it is possible to work with the automorphism invariants and the $\Q(t)$-valued linking numbers explicitly. \begin{proposition} \label{prop:KSSpin} There are two spin~$4$-manifolds~$M_0$ and~$M_1$ with $\pi_1 \cong \Z$, equivariant intersection form isometric to~$\lambda:= (-8)$ and boundary homeomorphic to~$Y := \unaryminus L(8,1) \# (S^1 \times S^2)$ that are distinguished both by their Kirby-Siebenmann invariants and their automorphism invariants. \end{proposition} \begin{proof} The manifolds~$M_0$ and~$M_1$ are obtained by boundary connect summing~$S^1 \times D^3$ to simply-connected~$4$-manifolds~$V_0$ and~$V_1$ that we now describe. Up to homeomorphism, there are two simply-connected~$4$-manifolds~$V_0$ and~$V_1$ with intersection form~$\lambda' = (-8) \colon \Z \times \Z \to \Z$, and boundary homeomorphic to the lens space~$Y' := \unaryminus L(8,1)$. They are distinguished by Boyer's simply-connected version of the automorphism invariant~\cite[Corollary E]{BoyerRealization}. We construct them explicitly and show that~$\ks(V_0) \neq \ks(V_1)$. The~$(-8)$-trace on the unknot,~$V_0:=X_{-8}(U)$, gives the first of these~$4$-manifolds. Towards describing~$V_1$, first note that from ~$\unaryminus L(8,1)$ one can obtain the integer homology sphere~$S_{+1}^3(T_{2,3})$ by a Dehn surgery along the framed knot~$K_1$ illustrated in Figure~\ref{fig:surgeryinlens}. Note also that~$S_{+1}^3(T_{2,3})$ bounds a contractible topological 4-manifold~$C$. We can now build~$\unaryminus V_1$ by beginning with~$\unaryminus L(8,1)\times I$, attaching a~$+1$ framed 2-handle along~$K_1$, and capping off with~$\unaryminus C$. The resulting manifold~$\unaryminus V_1$ has~$\partial (\unaryminus V_1)=L(8,1)$, so~$\partial V_1=\unaryminus L(8,1)$ as desired. \begin{figure}[!htbp] \center \begin{overpic}[width=0.5\textwidth,tics=10]{Surgeryinlenscropped} \put (22,5) {\textcolor{teal}{$-1$}} \put (19,21) {$-8$} \put (65,5) {$1$} \put (95,5) {$1$} \end{overpic} \caption{Peforming $-1$ surgery on the blue knot $K_1$ in the lens space $L(-8,1)$ yields the 3-manifold obtained by $+1$ surgery on the right handed trefoil in $S^3$. Each frame of the figure should be imagined to be vertically braid closed. The first homeomorphism indicated is a Rolfsen twist, the second is an isotopy in $S^3$. } \label{fig:surgeryinlens} \end{figure} The manifolds~$V_0$ and~$V_1$ are simply-connected, spin, have boundary homeomorphic to~$\unaryminus L(8,1)$, and intersection form isometric to~$(-8)$. We have that~$\ks(V_0)=0$ (because~$V_0$ is smooth), whereas~$\ks(V_1)=\ks(C)=\mu(S_{+1}^3(T_{2,3}))=\operatorname{Arf}(T_{2,3})=1$. Here~$\mu$ denotes the Rochlin invariant and the relation between~$\ks$ and~$\mu$ is due to Gonz\'ales-Acu\~{n}a~\cite{GonzalezAcuna}. The manifolds~$M_0$ and~$M_1$ are now obtained by setting $$M_0:=V_0 \natural (S^1 \times D^3) \quad \text{ and} \quad M_1:=V_1 \natural (S^1 \times D^3).$$ The manifolds~$M_0$ and~$M_1$ have~$\pi_1(M_i) \cong \Z$, boundary homeomorphic to~$Y = \unaryminus L(8,1) \# (S^1 \times S^2)$, and equivariant intersection form isometric to~$(-8) \colon \Z[t^{\pm 1}] \times \Z[t^{\pm 1}] \to \Z[t^{\pm 1}]$. The additivity of the Kirby-Siebenmann invariant implies that~$\ks(M_0)=\ks(V_0)=0$ whereas~$\ks(M_1)=\ks(V_1)=1.$ The manifolds must have distinct automorphism invariants, since otherwise by the classification (Theorem~\ref{thm:Classification}) they would be homeomorphic and hence would have the same Kirby-Siebenmann invariants. \end{proof} \begin{example} \label{ex:ExampleKS} To provide an explicit example of our realisation procedure from the proof of Theorem~\ref{thm:MainTechnicalIntro}, we describe how the manifolds~$M_0$ and~$M_1$ realise two distinct, explicit automorphism invariants. Fix a model of~$Y := \unaryminus L(8,1) \# (S^1 \times S^2)$ as surgery on a $2$-component unlink $L_1 \cup L_2$ with framings~$(-8,0)$. Consider the epimorphism~$\varphi \colon \pi_1(Y) \cong \Z_8 * \Z \to \Z$ given by sending the meridian~$\mu_{L_1}$ of~$L_1$ to~$0$ and the meridian~$\mu_{L_2}$ to~$1$. Fix a lift~$\wt{\mu}_{L_1}$ of~$\mu_{L_1}$ to the infinite cyclic cover and note that it generates~$H_1(Y;\Z[t^{\pm 1}]) \cong \Z[t^{\pm 1}]/(8)$ and satisfies~$\Bl_Y(\wt{\mu}_{L_1},\wt{\mu}_{L_1})=1/8.$ One way to see this latter equality is to use the calculation of the linking form of lens spaces. A verification shows that~$Y$ is presented by the Hermitian form \begin{align*} \lambda \colon \Z[t^{ \pm 1}] \times \Z[t^{ \pm 1}] &\to \Q(t)/\Z[t^{\pm 1}] \\ &(x,y) \mapsto 8x\overline{y}. \end{align*} Note also that multiplication by~$3$ induces an isometry of~$\Bl_Y \cong -\partial \lambda$. Using the notation from the proof of Step 1 in Section~\ref{sub:Plan}, we let~$x_1$ be a generator of~$\Z[t^{\pm 1}]$, and we let~$x_1^* \in \Z[t^{\pm 1}]^*$ be the dual generator. In these bases, the matrix of~$\lambda$ is~$Q = (-8)$. We therefore obtain two elements of~$\Iso(\partial \lambda,\unaryminus\Bl_{Y})$ by considering \begin{align*} &b_0 \colon \Z[t^{\pm 1}]/(8) \xrightarrow{\cong} H_1(Y;\Z[t^{\pm 1}]),\, [x_1^*] \mapsto [\wt{\mu}_{L_1}], \\ &b_1 \colon \Z[t^{\pm 1}]/(8) \xrightarrow{\cong} H_1(Y;\Z[t^{\pm 1}]),\, [x_1^*] \mapsto 3[\wt{\mu}_{L_1}]. \end{align*} Since~$\Aut(\lambda)=\{ \pm t^k\}_{k \in \Z}$, it follows that~$b_0$ and~$b_1$ remain distinct in~$\Iso(\partial \lambda,\unaryminus\Bl_{Y})/\Aut(\lambda).$ That they remain in distinct orbits of the action of~$\Homeo^+_{\varphi}(Y)$ requires the following claim. \begin{claim*} The group~$\Homeo^+_{\varphi}(Y)$ acts on~$H_1(Y;\Z[t^{\pm 1}])$ as follows: for each $\psi \in \Homeo^+_{\varphi}(Y)$, we have that $\psi \cdot x=\pm t^k x$ for some~$k \in \Z$. \end{claim*} \begin{proof} In~\cite[Theorem~3]{deSa-Rourke} we find the statement that every automorphism of a connected sum of 3-manifolds is a composition of slides, permutations, and automorphisms of the factors. That article was an announcement, and the theorem is actually due to Hendriks-Laudenbach~\cite[~\S5,~Th\'eor\`eme]{Hendriks-Laudenbach}. For our purposes the statement in~\cite{deSa-Rourke} is easier to apply, which is why we mention it. Permutations are irrelevant here since there is a unique irreducible factor. Sliding the~$\unaryminus L(8,1)$ factor around the generator of~$S^1 \times S^2$ exactly corresponds to an action by~$t^n$. Sliding the handle sends a generator~$t \in \pi_1(S^1 \times S^2)$ to~$g\cdot t$ where~$g \in \Z_8$. However it acts trivially on a generator of~$\pi_1(L(8,1))$ and hence acts trivially on~$H_1(Y;\Z[t^{\pm 1}])$. It remains to consider automorphisms of the irreducible factor, i.e.\ of~$L(8,1)$. Bonahon \cite{Bonahon} proved that every element of~$\Homeo^+(L(8,1))$ acts by~$\pm 1$ on~$H_1(L(8,1))$, and hence such an element acts by~$\pm 1$ on~$H_1(Y;\Z[t^{\pm 1}])$. Combining these conclusions, we see that every homeomorphism~$f \in \Homeo^+_{\varphi}(Y)$ acts by~$\pm t^n$ on~$H_1(Y;\Z[t^{\pm 1}])$, for some sign and some~$n \in \Z$, as asserted. \end{proof} The claim implies that the isometries~$b_0$ and~$b_1$ determine distinct elements in the orbit set~$\Iso(\partial \lambda,\unaryminus\Bl_{Y})/\Aut(\lambda) \times \Homeo^+_{\varphi}(Y)$. We will show that applying the realisation process of Theorem~\ref{thm:MainTechnical} to these elements results in~$M_0$ and~$M_1$ respectively. Following the notation of Section~\ref{sub:Plan}, for~$i=0,1$, precompose~$b_i$ with the canonical projection~$\Z[t^{\pm 1}]^* \to \Z[t^{\pm 1}]/(8)$ to get the epimorphism $$\varpi_i \colon \Z[t^{\pm 1}]^* \xrightarrow{}\Z[t^{\pm 1}]/(8) \xrightarrow{b_i} H_1(Y;\Z[t^{\pm 1}]$$ For~$i=0,1$, let~$\widetilde{K}_i \subset Y^\infty$ be a framed knot representing~$\varpi_i(x_1^*)$ and let~$K_i \subset Y$ be its projection down to~$Y$. We can assume that~$\widetilde{K}_i \subset \unaryminus L(8,1) \subset (S^2 \times \R) \#_{k \in \Z} t^k(\unaryminus L(8,1))=Y^\infty$. Thinking of~$Y$ as the~$(-8,0)$-framed surgery on the unlink~$L_1 \cup L_2$, one can arrange also for~$K_i$ to be disjoint from~$L_1 \cup L_2$. Consider the~$3$-component link~$K_i \cup L_1 \cup L_2 \subset S^3$. Note that~$K_i \cup L_2$ is split from~$L_1$, ~$\ell k (K_0,L_1)=1$ and~$\ell k(K_1,L_1)=3.$ When we refer to a framing of~$K_i$, it will be as a knot in~$S^3$. Let~$\pi_{K_1}$ (resp.~$\pi_{K_0}$) be the~$(\unaryminus 1)$-parallel of~$K_1$ (resp.~$0$-parallel of~$K_0$), and let~$\wt{\pi}_{\wt{K}_i}$ be a lift of~$\pi_{K_i}$ to~$Y^\infty$, which is a parallel of~$\wt{K}_i$ for~$i=0,1.$ The next claim carries out by hand the first step of the plan described in Section~\ref{sub:Plan}. \begin{claim*} For~$i=0,1$, the knot~$\widetilde{K}_i \subset Y^\infty$ represents the homology class~$\varpi_i(x_1^*)$, and the parallel~$\widetilde{\pi}_{\widetilde{K}_i}$ satisfies $$ \ell k_{\Q(t)}(\widetilde{K}_i,\widetilde{\pi}_{\widetilde{K}_i})=\tmfrac{1}{8}.$$ In particular,~$\widetilde{K}_i$ has equivariant linking matrix~$A_{\widetilde{K}_i}=\left(\tmfrac{1}{8}\right)=\unaryminus \left(\tmfrac{1}{\unaryminus 8}\right)=-Q^{-T}$ for~$i=0,1.$ \end{claim*} \begin{proof} The assertion concerning the homology class holds by construction and so we focus on the equivariant linking number calculation. The proofs are similar for~$M_0$ and~$M_1$, so we give the most details for~$M_1$, since that is the more complicated case, and then we sketch the easier case of~$M_0$. We will use the equation \begin{equation}\label{eq-lk-no-eqn} [\wt{\pi}_{\wt{K}_1}] = \lk_{\Q(t)}(\wt{K}_1,\wt{\pi}_{\wt{K}_1}) [\mu_{\wt{K}_1}] \in H_1(Y \sm \nu(K_1);\Q(t)) \end{equation} from Definition~\ref{def:EquivariantLinking}. The~$\Z$-cover~$Y^\infty$ of~$Y$ is~$(S^2 \times \R) \#_{k \in \Z} t^k (\unaryminus L(8,1))$, and there is no linking between curves in different~$L(8,1)$ summands. Thus it suffices to investigate the~$\Q$-valued linking number of~$K_1$ and~$\pi_{K_1}$ in~$Y' := \unaryminus L(8,1)$, and consider the result as an element of~$\Q(t)$. Formally speaking, we use an isomorphism \[H_1(Y^\infty\sm \cup_{i\in \Z} t^i \cdot \nu(\wt{K}_1)) \cong H_1(Y' \sm \nu(K_1)) \otimes_{\Z} \Z[t^{\pm 1}],\] and then tensor both sides further by~$-\otimes_{\Z[t^{\pm 1}]} \Q(t)$. We compute in the right hand side and translate to a conclusion about the left hand side. Since~$Y':=\unaryminus L(8,1)=S^3_{(-8)}(L_1)$, the manifold~$Y'\sm \nu(K_1)$ is obtained from the exterior of the~$2$-component link~$L_1 \cup K_1 \subset S^3$ by Dehn filling~$L_1$ with surgery coefficient~$-8$. Since~$\ell k(L_1,K_1)=3$, the homology is therefore $$ H_1(Y' \sm \nu(K_1)) \cong \frac{\Z\langle\mu_{L_1} \rangle \oplus \Z \langle\mu_{K_1} \rangle} {\langle -8\mu_{L_1} + 3 \mu_{K_1} \rangle} \cong \Z.$$ We now express~$[\pi_{K_1}]$ as a multiple of~$[\mu_{K_1}]$, as required to calculate the framing of~$K_1$. Since~$\pi_{K_1}$ is a~$(-1)$-parallel of~$K_1$ we have~$[\pi_{K_1}] = 3[\mu_{L_1}] - [\mu_{K_1}]$. One checks that~$\bsm 1 & 3 \\ -3 & -8 \esm \bsm-8 \\ 3 \esm = \bsm 1 \\ 0 \esm,$ so one can use the invertible matrix~$\bsm 1 & 3 \\ -3 & -8 \esm$ to change coordinates to the presentation $$\Z \xrightarrow{\bsm 1\\ 0 \esm} \Z \oplus \Z \to H_1(Y' \sm \nu(K_1)) \to 0.$$ In this presentation, we compute that \begin{align*} [\mu_{K_1}]&=\operatorname{proj}_2 \circ \bsm 1 & 3 \\ -3 & -8 \esm \bsm 0 \\ 1 \esm = -8 \in \Z \cong H_1(Y' \sm \nu(K_1)), \\ [\pi_{K_1}]&=\operatorname{proj}_2 \circ \bsm 1 & 3 \\ -3 & -8 \esm \bsm 3 \\ -1 \esm = -1 \in \Z \cong H_1(Y' \sm \nu(K_1)). \end{align*} Hence passing to the~$\Z$-cover, tensoring up to~$\Q(t)$ coefficients, and applying \eqref{eq-lk-no-eqn}, we see that~~$-1 = \lk_{\Q(t)}(\wt{K}_1,\wt{\pi}_{\wt{K}_1}) \cdot (-8)$ so, as asserted $$\lk_{\Q(t)}(\wt{K}_1,\wt{\pi}_{\wt{K}_1}) = \tmfrac{1}{8} \in \Q(t).$$ As indicated above, a similar computation shows the same result for~$M_0$. Here are some details. The space~$Y' \sm \nu(K_0)$ is obtained from the exterior of the link~$K_0 \cup L_1 \subset S^3$ by Dehn filling $L_1$ with framing~$-8$. Since~$\ell k(L_1,K_0)=1$, it follows that $$H_1(Y' \sm \nu(K_0)) \cong \frac{\Z\langle\mu_{L_1} \rangle \oplus \Z \langle\mu_{K_0} \rangle }{\langle-8\mu_{L_1} + \mu_{K_0}\rangle} \cong \Z.$$ We now express~$[\pi_{K_0}]$ as a multiple of~$[\mu_{K_0}]$, as required to calculate the framing of~$K_0$. Since~$\pi_{K_0}$ is a~$0$-parallel of~$K_0$ we have~$[\pi_{K_0}] = [\mu_{L_1}]$. Use the invertible matrix~$\bsm 1 & 8 \\ 0 & 1\esm$ to change coordinates to the presentation $$\Z \xrightarrow{\bsm 1\\ 0 \esm} \Z \oplus \Z \to H_1(Y' \sm \nu(K_1)) \to 0.$$ In this presentation, we compute that \begin{align*} [\mu_{K_0}]&=\operatorname{proj}_2 \circ \bsm 1 & 8 \\ 0 & 1\esm \bsm 0 \\ 1 \esm = 8 \in \Z \cong H_1(Y' \sm \nu(K_0)), \\ [\pi_{K_0}]&=\operatorname{proj}_2 \circ \bsm 1 & 8 \\ 0 & 1\esm \bsm 1 \\ 0 \esm = 1 \in \Z \cong H_1(Y' \sm \nu(K_0)). \end{align*} Hence passing to the~$\Z$ cover, tensoring up to~$\Q(t)$ coefficients, one obtains $$\lk_{\Q(t)}(\wt{K}_0,\wt{\pi}_{\wt{K}_0}) =\tmfrac{1}{8} \in \Q(t).$$ This concludes the proof of the claim. \end{proof} The combination of the claim with Step~$2$ of the plan from Section~\ref{sub:Plan} implies that surgery along~$K_i$ yields a~$\Z[t^{\pm 1}]$-homology~$3$-sphere for~$i=0,1.$ In order to recover the construction described during the proof of Proposition~\ref{prop:KSSpin} however, we take~$\widetilde{K}_i$ (and therefore~$K_i \subset \unaryminus L(8,1)$) to be the unknot for~$i=0,1$: as described in the proposition, surgery on~$Y$ along~$K_0$ and~$K_1$ then yields~$S^1 \times S^2$ and~$(S^1 \times S^2) \# S^3_{+1}(T_{2,3})$ respectively. The infinite cyclic covers of these manifolds have vanishing Alexander modules yielding a ``by hand" version of Step~$2$. Step~$3$ is carried out by capping off with~$S^1 \times D^3$ and~$(S^1 \times D^3) \natural C$ respectively; both of these are homotopy~$S^1 \times D^3$s. Thus~$M_0$ and~$M_1$ are obtained by the realisation process of our main theorem. It follows that~$b_{M_0} =b_0 \neq b_1=b_{M_1}$, as asserted. \color{black} \end{example} In summary, the Kirby-Siebenmann invariant of spin 4-manifolds is not always controlled by the boundary and the intersection form. Rather, the automorphism invariant must be taken into account as well. An explanation for this is that the automorphism invariant can act nontrivially on the spin structures. Using $b_0$ to fix an isometry $\partial \lambda \cong -\Bl_Y$, $b_1$ determines an automorphism of $\Bl_Y$. If this automorphism preserved the quadratic enhancement of $\Bl_Y$ determined by a spin structure (or by the presentation of $\partial \lambda \cong \Bl_Y$ as the boundary of an even Hermitian form~\cite[p.243]{RanickiExact}, \cite[Definition~2.5]{CCP}) then the induced spin structures on $Y$ would agree. Then $M_0$ and $M_1$ would be stably homeomorphic and hence their Kirby-Siebenmann invariants would be the same; see~\cite[Proposition~4.2]{CCP}. But when we consider an automorphism of the linking form that does not preserve the quadratic enhancement, as is the case for $b_1$ above, then the Kirby-Siebenmann invariants can be different, as with the example just given. Finally, we note that the example just given, without adding the copies of $S^1 \times D^3$, is also compelling in the simply-connected case. We gave it for infinite cyclic fundamental group since that is the topic of the present paper. \color{black} \section{Application to~$\Z$-surfaces in~$4$-manifolds} \label{sec:Discs} Recall that a \emph{$\Z$-surface} refers to a locally flat, embedded surface in a $4$-manifold whose complement has infinite cyclic fundamental group. In this section we apply our classification of $4$-manifolds with fundamental group $\Z$ to the study of $\Z$-surfaces in simply-connected $4$-manifolds and prove Theorems~\ref{thm:SurfacesRelBoundaryIntro},~\ref{thm:SurfacesWithBoundaryIntro}, and~\ref{thm:SurfacesClosedIntro} from the introduction. In Subsection~\ref{sub:Boundary}, we focus on~$\Z$-surfaces with boundary up to equivalence rel.\ boundary. In the shorter Subsections~\ref{sub:SurfacesBoundaryEq} and~\ref{sub:Closed}, we respectively study surfaces with boundary up to equivalence (not necessarily rel.\ boundary) and closed surfaces. Subsection~\ref{sub:OpenQuestions} lists some open problems. \subsection{Surfaces with boundary up to equivalence rel.\ boundary} \label{sub:Boundary} Let~$N$ be a simply-connected~$4$-manifold with boundary homeomorphic to $S^3$. We fix once and for all a particular homeomorphism $h \colon \partial N \cong S^3$. Let~$K \subset S^3$ be a knot. Thus $K$ and $h$ determine a knot in $\partial N$, which we also denote by $K$. The goal of this subsection is to give an algebraic description of the set of~$\Z$-surfaces in~$N$ with boundary $K$ up to equivalence rel.\ boundary. We begin with some conventions. Given a properly embedded~$\Z$-surface~$\Sigma \subset N$ in a simply-connected~$4$-manifold, denote its exterior by~$N_\Sigma:=N\smallsetminus \nu(\Sigma)$. Throughout this section, we will refer to embedded surfaces simply as $\Sigma$, and abstract surfaces as~$\Sigma_{g,b}$, where $g$ is the genus and $b$ is the number of boundary components; we may sometimes write $\Sigma_g$ when $b=0$. Recall that throughout,~$\Sigma_{g,b}$ and $N$ will be oriented. This data determines orientations on $S^3$, $K$, and every meridian of an embedding of~$\Sigma_{g,b}$. Observe that the~$\pi_1(N_\Sigma) \cong \Z$ hypothesis implies that~$[\Sigma,\partial \Sigma]=0\in H_2(N,\partial N)$ by~\cite[Lemma 5.1]{ConwayPowell}, so the relative Euler number of the normal bundle of $\Sigma$, with respect to the zero-framing of $\nu (\partial N)$, vanishes~\cite[Lemma~5.2]{ConwayPowell}. From now on, we choose a framing~$\nu(\Sigma) \cong \Sigma \times \mathring{D}^2 \cong \Sigma \times \R^2$ compatible with the orientation and with the property that for each simple closed curve~$\gamma_k \subset \Sigma$, we have~$\gamma_k \times \lbrace e_1 \rbrace \subset N \setminus \Sigma$ is nullhomologous in~$N \setminus \Sigma$. We will refer to such a framing as a \emph{good framing}. As such, when ~$\partial\Sigma=K\subset\partial N$ we can identify the boundary of~$N_\Sigma$ as $$\partial N_\Sigma \cong E_K \cup_\partial (\Sigma_{g,1} \times S^1)=:M_{K,g},$$ where the gluing~$\partial$ takes~$\lambda_K$ to~$\partial\Sigma\times\{ \operatorname{pt}\}$. We call two locally flat surfaces~$\Sigma,\Sigma' \subset N$ with boundary~$K \subset \partial N \cong S^3$ \emph{equivalent rel.\ boundary} if there is an orientation-preserving homeomorphism of pairs~$(N,\Sigma) \cong (N,\Sigma')$ that is pointwise the identity on~$\partial N \cong S^3$. Note that if~$\Sigma \subset N$ is a~$\Z$-surface with boundary~$K$, then~$N_\Sigma$ is a $\Z$-manifold with boundary~$\partial N_\Sigma \cong M_{K,g}$~\cite[Lemma 5.4]{ConwayPowell} and~$H_1(M_{K,g};\Z[t^{\pm 1}])\cong H_1(E_K;\Z[t^{\pm 1}]) \oplus \Z^{2g}$ is torsion because the Alexander module~$H_1(E_K;\Z[t^{\pm 1}])$ of~$K$ is torsion~\cite[Lemma~5.5]{ConwayPowell}. Additionally, note that the equivariant intersection form~$\lambda_{N_\Sigma}$ of a surface exterior~$N_\Sigma$ must present~$M_{K,g}$. Consequently, as we did for manifolds, it is natural to fix a form~$(H,\lambda)$ that presents~$M_{K,g}$ and to consider the set~$\operatorname{Surf(g)}^0_\lambda(N,K)$ of genus~$g$~$\Z$-surfaces in~$N$ with boundary~$K$ and~$\lambda_{N_\Sigma}\cong \lambda$. \begin{definition} \label{def:Surface(g)RelBoundary} For a nondegenerate Hermitian form~$(H,\lambda)$ over $\Z[t^{\pm 1}]$ that presents~$M_{K,g}$, set $$\operatorname{Surf(g)}^0_\lambda(N,K):=\lbrace \Z\text{-surfaces~$\Sigma \subset N$ for~$K$ with } \lambda_{N_\Sigma}\cong \lambda \rbrace/\text{ equivalence rel.~$\partial$}.$$ \end{definition} There is an additional necessary condition for this set to be nonempty. For conciseness, we write~$\lambda(1):=\lambda \otimes_{\Z[t^{\pm 1}]} \Z_\varepsilon$, where $\Z_\varepsilon$ denotes $\Z$ with the trivial $\Z[t^{\pm 1}]$-module structure. This way, if~$A(t)$ is a matrix that represents~$\lambda$, then~$A(1)$ represents~$\lambda(1)$. Additionally, recall that if~$W$ is a~$\Z$-manifold, then~$\lambda_W(1) \cong Q_W$, where~$Q_W$ denotes the standard intersection form of~$W$; see e.g.~\cite[Lemma 5.10]{ConwayPowell}. Thus, if we take~$W=N_\Sigma$ and assume that~$\lambda \cong \lambda_{N_\Sigma}$, then $$\lambda(1) \cong \lambda_{N_\Sigma}(1) \cong Q_{N_\Sigma} \cong Q_N \oplus (0)^{\oplus 2g},$$ where the last isometry follows from a Mayer-Vietoris argument. Thus, for the set~$\operatorname{Surf(g)}^0_\lambda(N,K)$ to be nonempty, it is also necessary that~$\lambda(1)\cong Q_N \oplus (0)^{\oplus 2g}$. For the final piece of setup for the statement of the main result of the section, we describe an action of~$\operatorname{Homeo}^+(\Sigma_{g,1},\partial)$ on the set~$\Iso(\partial \lambda,\unaryminus \Bl_{M_{K,g}})$ as follows. First, a rel.\ boundary homeomorphism $x \colon \Sigma_{g,1} \to \Sigma_{g,1}$ induces an isometry $x''_* \colon \Bl_{M_{K,g}} \cong \Bl_{M_{K,g}} $ as follows. Extend~$x$ to a self homeomorphism $x'$ of $\Sigma_{g,1}\times S^1$ by defining $x'(s,\theta)=(x(s),\theta)$. Then extend $x'$ by the identity over $E_K$; in total one obtains a self homeomorphism $x''$ of $M_{K,g}$. Now lift this homeomorphism to the covers and take the induced map on $H_1$ to get $x''_* \colon \Bl_{M_{K,g}} \cong \Bl_{M_{K,g}}$. The required action is now by postcomposition; for $f \in \Iso(\partial \lambda,\unaryminus \Bl_{M_{K,g}})$, define $x \cdot f := x''_* \circ f$. The main result of this section proves Theorem \ref{thm:SurfacesRelBoundaryIntro} from the introduction. The formulation of the result is different than in the introduction, but clearly equivalent. \begin{theorem} \label{thm:SurfacesRelBoundary} Let~$N$ be a simply-connected~$4$-manifold with boundary~$\partial N \cong S^3$ and let~$K \subset S^3$ be a knot. Given a nondegenerate Hermitian form~$(H,\lambda)$ over~$\Z[t^{\pm 1}]$, the following assertions are equivalent: \begin{enumerate} \item the Hermitian form~$(H,\lambda)$ presents~$M_{K,g}$ and satisfies~$\lambda(1)\cong Q_N \oplus (0)^{\oplus 2g}$; \item the set~$\operatorname{Surf(g)}^0_\lambda(N,K)$ is nonempty and there is a bijection $$\operatorname{Surf(g)}^0_\lambda(N,K) \approx \Iso(\partial \lambda,\unaryminus\Bl_{M_{K,g}})/(\Aut(\lambda)\times \operatorname{Homeo}^+(\Sigma_{g,1},\partial)).$$ \end{enumerate} \end{theorem} \begin{remark} \label{rem:SurfacesRelBoundaryTheorem} We collect some remarks concerning Theorem~\ref{thm:SurfacesRelBoundary}. \begin{itemize} \item If $(H,\lambda)$ presents $M_{K,g}$, then there is a non-canonical bijection $$\frac{\Iso(\partial \lambda,\unaryminus\Bl_{M_{K,g}})}{(\Aut(\lambda)\times \operatorname{Homeo}^+(\Sigma_{g,1},\partial))} \approx \frac{ \Aut(\partial \lambda)}{(\Aut(\lambda) \times \operatorname{Homeo}^+(\Sigma_{g,1},\partial))}.$$ In addition, we have the isomorphism~$\Aut(\partial \lambda) \cong \Aut(\Bl_{M_{K,g}})\cong \Aut (\Bl_K) \oplus \operatorname{Sp}_{2g}(\Z)$ where the latter is the group of automorphisms of the symplectic intersection pairing of~$\Sigma_{g,1}$~\cite[Propositions 5.6 and 5.7]{ConwayPowell}. The group $\operatorname{Homeo}^+(\Sigma_{g,1},\partial)$ acts trivially on the first summand and transitively on the second. Therefore one can express the quotients above as \[\Aut (\Bl_K)/ \Aut (\lambda),\] where the action of $\Aut(\lambda)$ on~$\Aut(\Bl_K)$ arises by restricting the action of $\Aut(\lambda)$ on $\Aut(\partial \lambda)\cong \Aut(\Bl_{M_{K,g}}) \cong \Aut(\Bl_K) \oplus \operatorname{Sp}_{2g}(\Z)$ to the first summand. We stress again that the isomorphism $\Aut(\partial \lambda) \cong \Aut(\Bl_{M_{K,g}})$ is not canonical. The set $\Aut (\Bl_K)/ \Aut (\lambda)$ was mentioned in Theorem~\ref{thm:SurfacesRelBoundaryIntro} from the introduction. \item The action of~$\operatorname{Homeo}^+(\Sigma_{g,1},\partial)$ on~$\Iso(\partial \lambda,\unaryminus \Bl_{M_{K,g}})$ factors through the corresponding mapping class group $\operatorname{Mod}^+(\Sigma_{g,1},\partial) := \pi_0(\operatorname{Homeo}^+(\Sigma_{g,1},\partial))$. In particular, Theorem~\ref{thm:SurfacesRelBoundary} could have equally well been stated using $\operatorname{Mod}^+(\Sigma_{g,1},\partial)$ instead of $\operatorname{Homeo}^+(\Sigma_{g,1},\partial)$. \item Our surface set~$\operatorname{Surf(g)}^0_\lambda(N,K)$ is defined up to equivalence, hence Theorem~\ref{thm:SurfacesRelBoundary} only gives a classification of surfaces up to equivalence (instead of ambient isotopy). This is because we prove Theorem \ref{thm:SurfacesRelBoundary} as a consequence of Theorem \ref{thm:ClassificationRelBoundary} and the equivalence on~$\mathcal{V}^0_\lambda(M_{K,g})$ is up to \textit{any }homeomorphism rel.\ boundary, not just homeomorphisms in a prescribed isotopy class. As a consequence, when $N$ admits homeomorphisms which are not isotopic to the identity rel.\ boundary, there can be~$\Z$-surfaces that are equivalent rel.\ boundary but not ambient isotopic. Here is an example. \black Let~$K \subset S^3$ be a knot with nontrivial Alexander polynomial~$\Delta_K$, that bounds a~$\Z$-disc in a punctured~$\C P^2$ with intersection form represented by the~$1 \times 1$ matrix~$(\Delta_K)$. Let~$N$ be given by the boundary connected sum with another punctured~$\C P^2$ (so that~$N$ is a punctured~$\C P^2\# \C P^2$), and denote the same~$\Z$-disc considered in~$N$ by~$D$. There is a self-homeomorphism~$\tau \colon N \to N$ that induces~$\bsm 0 & 1 \\ 1 & 0 \esm$ on~$H_2(N) \cong \Z^2$. Isotope~$\tau$ to be the identity on~$\partial N \cong S^3$. The discs~$D$ and~$\tau (D)$ are equivalent rel.\ boundary. But a short computation shows that the equivariant intersection forms of the exteriors are $\bsm\Delta_K & 0 \\ 0 & 1 \esm \text{ and } \bsm 1 & 0 \\ 0 & \Delta_K \esm$ respectively. A straightforward computation shows that every~$\Z[t^{\pm 1}]$-isometry between these two forms augments over~$\Z$ to~$\bsm 0 & 1 \\ 1 & 0 \esm$. It follows that there is no ambient isotopy between~$D$ and~$\tau(D)$. \end{itemize} \end{remark} Theorem~\ref{thm:SurfacesRelBoundary} will be proved in three steps. \begin{enumerate} \item We define a map $\Theta$ from a set of equivalence classes of embeddings~$\Sigma_{g,1} \hookrightarrow N$, which we denote~$\operatorname{Emb}_\lambda^0(\Sigma_{g,1},N;K)$ and which we will define momentarily, to the set of manifolds~$\mathcal{V}_\lambda^0(M_{K,g})$ from Definition~\ref{def:V0lambdaY}. By Theorem~\ref{thm:ClassificationRelBoundary},~$\mathcal{V}_\lambda^0(M_{K,g})$ corresponds bijectively to the set of isometries~$\Iso(\partial \lambda,\unaryminus \Bl_{M_{K,g}})/\Aut(\lambda)$. \item We prove that the map $\Theta$ is a bijection, by defining a map $\Psi$ in the other direction, from the set of manifolds to the set of embeddings, and showing that both $\Theta \circ \Psi$ and $\Psi \circ \Theta$ are the identity maps. \item We describe the set of surfaces~$\operatorname{Surf(g)}^0_\lambda(N,K)$ as a quotient of~$\operatorname{Emb}_\lambda^0(\Sigma_{g,1},N;K)$ by $\operatorname{Homeo}^+(\Sigma_{g,1},\partial)$. We show that this action and the actions of~$\operatorname{Homeo}^+(\Sigma_{g,1},\partial)$ on~$\mathcal{V}_\lambda^0(M_{K,g})$ and $\Iso(\partial \lambda,\unaryminus \Bl_{M_{K,g}})/\Aut(\lambda)$ are all compatible. Passing to orbits leads to the bijection in Theorem~\ref{thm:SurfacesRelBoundary}. This step is largely formal. \end{enumerate} \subsubsection*{Step $(1)$: From embeddings to manifolds} \black For the first step, we give some definitions and construct the map which will be the bijection in Theorem~\ref{thm:SurfacesRelBoundary}. Consider the following set: $$\operatorname{Emb}_\lambda^0(\Sigma_{g,1},N;K)=\frac{\lbrace e \colon \Sigma_{g,1} \hookrightarrow N \mid e(\Sigma_{g,1}) \text{ is a } \Z\text{-surface for~$K$ with } \lambda_{N_{e(\Sigma_{g,1})}}\cong \lambda \rbrace }{\text{ equivalence rel.~$\partial$}}.$$ Two embeddings~$e_1,e_2$ are \emph{equivalent rel.\ boundary} if there exists a homeomorphism~$\Phi \colon N \to N$ that is the identity on~$\partial N \cong S^3$ and satisfies~$\Phi \circ e_1=e_2$. In what follows, we let~$\varphi \colon \pi_1(M_{K,g}) \twoheadrightarrow \Z$ be the epimorphism such that the induced map $\varphi' \colon H_1(M_{K,g}) \twoheadrightarrow \Z$ is the unique epimorphism that maps the meridian of~$K$ to~$1$ and the other generators to zero. When we write~$\mathcal{V}^0_\lambda(M_{K,g})$, it is with respect to this epimorphism~$\varphi$. Recall also that we have a fixed homeomorphism $h\colon\partial N\to S^3$; whenever we say $\partial N\cong S^3$, it is with this fixed $h$. In addition to our homeomorphism $h \colon \partial N \to S^3$, we fix once and for all the following data. \begin{itemize} \item A closed tubular neighborhood $\overline{\nu}(K) \subset \partial N$. Since we have already fixed $h$, and since we are abusively using $K$ for both the knot $K$ in $\partial N$ and for the image $h(K)$ in $S^3$, this choice of $\overline{\nu}(K) \subset \partial N$ also determines a particular neighborhood $\overline{\nu}(K) \subset S^3$. We will use~$E_K$ exclusively to denote the complement of ${\nu}(K)$ in $S^3$. \item A homeomorphism $D \colon \partial \Sigma_{g,1} \times S^1 \to \partial \overline{\nu}(K)$ that takes $\partial \Sigma_{g,1}\times \{1\}$ to the $0$-framed longitude of $K$ and $\lbrace \operatorname{pt} \rbrace \times S^1$ to the meridian of $K$ such that $$ M_{K,g}=E_{K} \cup_D \Sigma_{g,1} \times S^1.$$ \end{itemize} These choices can change the bijection, however we are interested only in the existence of a bijection, so this is not an issue. Next we define the map which will be the bijection in Theorem~\ref{thm:SurfacesRelBoundary}. \begin{construction} \label{cons:EmbVBijection} We construct a map~$\Theta \colon \operatorname{Emb}_\lambda^0(\Sigma_{g,1},N;K) \to \mathcal{V}^0_\lambda(M_{K,g})$. Let~$e \colon \Sigma_{g,1} \hookrightarrow N$ be an embedding that belongs to~$\operatorname{Emb}_\lambda^0(\Sigma_{g,1},N;K)$. We will assign to~$e$ a pair~$(N_{e(\Sigma_{g,1})},f)$, where~$f \colon \partial N_{e(\Sigma_{g,1})} \to M_{K,g}$ is a homeomorphism. The pair we construct will depend on several choices, but we will show that the outcome is independent of these choices up to equivalence in $\mathcal{V}_\lambda^0(M_{K,g})$. To cut down on notation we set $\Sigma:=e(\Sigma_{g,1})$ and describe the choices on which our pair $(N_{\Sigma},f)$ will a priori depend. \begin{enumerate} \item An embedding $\iota \colon \overline{\nu}(\Sigma) \hookrightarrow N$ of the normal bundle of $\Sigma$ such that $\iota(\overline{\nu}(\Sigma)) \cap \partial N$ agrees with our fixed tubular neighbhourhood of $K$. \item A good framing $ \gamma \colon \overline{\nu}(\Sigma) \cong \Sigma_{g,1} \times D^2$ such that $h|\circ \iota\circ\gamma^{-1}=D$: \begin{equation} \label{eq:Compatible} \xymatrix{ \partial \Sigma_{g,1} \times S^1 \ar[r]^-D \ar[d]^{\gamma^{-1}}&\partial \overline{\nu}(K) \subset E_{K}\\ \gamma^{-1}(\partial \Sigma_{g,1} \times S^1) \ar[r]^-{\iota|}& \iota(\gamma^{-1}(\partial \Sigma_{g,1} \times S^1)) \subset \partial N \setminus \nu(K) \ar[u]^{h|}. } \end{equation} In this diagram, $h|$ denotes the restriction of our fixed identification $h \colon \partial N \cong S^3$ and $D \colon \partial \Sigma_{g,1} \times S^1 \to \partial \overline{\nu}(K)$ is the homeomorphism that we fixed above. \end{enumerate} We also record some of the notation that stems from these choices. \begin{itemize} \item The boundary of the surface exterior $N_\Sigma$ decomposes as \begin{equation} \label{eq:DecompositionBoundarySurfaceExterior} \partial N_{\Sigma} \cong \big(\partial N \setminus \nu(K)\big) \cup \Big{(}\partial \iota(\overline{\nu}(\Sigma))\smallsetminus \left( \iota(\nu(\Sigma)) \cap \partial N \right)\Big{)}. \end{equation} Here the first part of this union is homeomorphic to a knot exterior, while the second is homeomorphic to $\Sigma_{g,1} \times S^1$. \item Restricting our fixed homeomorphism $h \colon \partial N \cong S^3$ to the knot exterior part in~\eqref{eq:DecompositionBoundarySurfaceExterior}, we obtain the homeomorphism $$h| \colon \partial N \smallsetminus \nu(K) \to E_{K} \subset M_{K,g}.$$ \item On the circle bundle part of~\eqref{eq:DecompositionBoundarySurfaceExterior}, we consider the homeomorphism \[\gamma| \circ \iota^{-1} \colon \Big{(}\partial \iota(\overline{\nu}(\Sigma))\smallsetminus \left( \iota(\nu(\Sigma)) \cap \partial N \right) \Big{)} \to \Sigma_{g,1} \times S^1 \subset M_{K,g}.\] Here by the slightly abusive notation $\iota^{-1}$, we mean that since $\iota \colon \overline{\nu}(\Sigma) \hookrightarrow N$ is an embedding, it is a homeomorphism onto its image, whence the inverse. \end{itemize} The diagram in~\eqref{eq:Compatible} ensures that $h|$ and $\gamma| \circ \iota^{-1}$ can be glued together to give rise to the homeomorphism we have been building towards: \begin{equation} \label{eq:BoundaryHomeoSurface} f_\gamma \colon \partial N_{\Sigma} \to M_{K,g}, \ \ \ \ f_\gamma:=(h|) \cup (\gamma| \circ \iota^{-1}). \end{equation} \end{construction} Set $\Theta(e):=(N_{\Sigma},f_\gamma)$. We need to verify that $\Theta$ gives rise to a map $ \operatorname{Emb}_\lambda^0(\Sigma_{g,1},N;K) \to \mathcal{V}^0_\lambda(M_{K,g})$. In other words, we need to check that modulo homeomorphisms rel.\ boundary,~$\Theta(e)$ does not depend on the embedding $\iota \colon \overline{\nu}(\Sigma) \hookrightarrow N$ nor on the particular choice of the good framing~$\gamma$ subject to the condition in~\eqref{eq:Compatible}. We also have to verify that equivalent embeddings produce equivalent manifolds. \black \begin{itemize} \item First we show that the construction is independent of $\gamma$ and $\iota$. Pick another embedding $\iota' \colon \overline{\nu}(e(\Sigma_{g,1})) \hookrightarrow N$ of the normal bundle and another good framing $\gamma' \colon \overline{\nu}(e(\Sigma_{g,1})) \cong \Sigma_{g,1} \times D^2$ with the same hypothesis about compatibility with $D$. This leads to boundary homeomorphisms $f_\gamma:=(h|) \cup (\gamma| \circ \iota^{-1})$ and $f_{\gamma'}:=(h|) \cup (\gamma'| \circ {\iota'}^{-1})$ and we must show that the following pairs are equivalent rel.\ boundary: \begin{equation} \label{eq:WantEquivalent} (N_{e_\iota(\Sigma_{g,1})},f_\gamma) \text{ and } (N_{e_{\iota'}(\Sigma_{g,1})},f_{\gamma'}) . \end{equation} For a moment we are keeping track of the embeddings $\iota$ and $\iota'$ in our notation for exteriors. More explicitly, we set $N_{e_\iota(\Sigma_{g,1})}:=N \setminus \iota(\nu(e(\Sigma_{g,1})))$ and similarly for $\iota'$. By uniqueness of tubular neighourhoods~\cite[Theorem~9.3D]{FreedmanQuinn}, there is an isotopy of embeddings $\Gamma_t \colon \Sigma_{g,1}\times D^2 \hookrightarrow N$ such that $\Gamma_0=\iota \circ \gamma^{-1}$ and $\Gamma_1=\iota' \circ \gamma'^{-1}$ that fixes a neighborhood of $\partial \Sigma_{g,1} \times D^2$. Then by the Edwards-Kirby isotopy extension theorem~\cite{KirbyEdwards1971}, there is an isotopy of homeomorphisms $F_t\colon N \to N$ with $F_1\circ \iota \circ \gamma^{-1}=\iota' \circ \gamma'^{-1}$ and $F_0=\id_N$ and such that $F_t$ is the identity on a neighborhood of the boundary $\partial N$ for every $t \in [0,1]$. We will argue that this $F_1$ restricted to the exteriors $N_{e_{\iota}(\Sigma_{g,1})}$ and $N_{e_{\iota'}(\Sigma_{g,1})}$ gives a rel.\ boundary homeomorphism between the pairs in~\eqref{eq:WantEquivalent}. We wish to argue that the restriction of $F_1$ to the surface exteriors identifies $(N_{e_{\iota}(\Sigma_{g,1})},f_\gamma)$ with $(N_{e_{\iota'}(\Sigma_{g,1})},f_{\gamma'})$ as elements of $\mathcal{V}^0_\lambda(M_{K,g})$. Consider the following diagram: $$ \xymatrix @C+1.5cm{ M_{K,g} \ar[d]^=& \partial N_{e_\iota(\Sigma_{g,1})} \ar[l]_{f_\gamma=(h|) \cup(\gamma| \circ \iota^{-1})} \ar[r]^{\subset} \ar[d]^{F_1}& N_{e_\iota(\Sigma_{g,1})} \ar[r]^{\subset} \ar[d]^{F_1}& N \ar[d]^{F_1} \\ M_{K,g} & \partial N_{e_{\iota'}(\Sigma_{g,1})} \ar[r]^{\subset} \ar[l]_{f_{\gamma'}=(h|) \cup (\gamma'| \circ {\iota'}^{-1})}& N_{e_{\iota'}(\Sigma_{g,1})} \ar[r]^{\subset}& N. } $$ The right two squares certainly commute, while the left square commutes because the homeomorphism $F_1 \colon N \to N$ is rel.\ boundary and because, by construction, $\gamma| \circ \iota^{-1}=F_1\circ \gamma'| \circ {\iota'}^{-1}$. In total, we have: \begin{equation} \label{eq:VerificationTheta} f_{\gamma'} \circ F_1 = \left( (h|) \cup (\gamma'| \circ {\iota'}^{-1}) \right) \circ F_1 =(h| \circ F_1 ) \cup (\gamma'| \circ {\iota'}^{-1}\circ F_1) =h| \cup (\gamma \circ \iota^{-1})=f_{\gamma}. \end{equation} \item We now show that the map $\Theta$ from Construction~\ref{cons:EmbVBijection} is well defined up to rel.\ boundary homeomorphisms of $N$; recall that this is the equivalence relation on the domain $\operatorname{Emb}_\lambda^0(\Sigma_{g,1},N;K)$. Assume that~$e,e' \colon \Sigma_{g,1} \hookrightarrow N$ are embeddings that are homeomorphic rel.\ boundary via a homeomorphism~$F \colon N \to N$. Pick good framings $\gamma,\gamma'$ for $\overline{\nu}(e(\Sigma_{g,1}))$ and $\overline{\nu}(e'(\Sigma_{g,1}))$ as well as an embedding $\iota' \colon \overline{\nu}(e'(\Sigma_{g,1})) \hookrightarrow N$. We now consider the embedding $ \iota:= F^{-1} \circ \iota' \circ (\gamma')^{-1} \circ \gamma$. The following diagram commutes: \begin{equation} \label{eq:DiagramVerificationThetaRelBoundary} \xymatrix{ \Sigma_{g,1} \times D^2 \ar[r]^-{\gamma^{-1},\cong} \ar[d]^=& \overline{\nu}(e(\Sigma_{g,1})) \ar[r]^-{\iota,\cong}& \iota(\overline{\nu}(e'(\Sigma_{g,1}))) \ar[r]^-{\subset } \ar[d]^{F|}& N \ar[d]^F\\ \Sigma_{g,1} \times D^2 \ar[r]^-{{\gamma'}^{-1},\cong}& \overline{\nu}(e'(\Sigma_{g,1})) \ar[r]^-{\iota',\cong}& \iota'(\overline{\nu}(e(\Sigma_{g,1}))) \ar[r]^-{\subset } & N. } \end{equation} As in Construction~\ref{cons:EmbVBijection}, the choice of framings leads to boundary homeomorphisms \begin{align*} & f=(h|)\cup(\gamma| \circ \iota^{-1}) \colon \partial N_{e_{\iota}(\Sigma_{g,1})} \xrightarrow{\cong} M_{K,g}, \\ & f'=(h|)\cup(\gamma'| \circ {\iota'}^{-1} ) \colon \partial N_{e'_{\iota'}(\Sigma_{g,1})} \xrightarrow{\cong} M_{K,g}. \end{align*} As in~\eqref{eq:VerificationTheta}, using the diagram from~\eqref{eq:DiagramVerificationThetaRelBoundary} and the fact that $F$ is a rel.\ boundary homeomorphism, we deduce that $F|=f'^{-1}\circ f$ and that $F$ restricts to a rel.\ boundary homeomorphism $$F|\colon N_{e_{\iota}(\Sigma_{g,1})}\to N_{e'_{\iota'}(\Sigma_{g,1})}.$$ We conclude that $(N_{e(\Sigma_{g,1})},f)$ is equivalent to $(N_{e'(\Sigma_{g,1})},f')$ in $\mathcal{V}^0_\lambda(M_{K,g})$. \end{itemize} This concludes the verification that the map $\Theta$ from Construction~\ref{cons:EmbVBijection} is well defined. \begin{remark} \label{rem:OmitEmbedding} From now on, we continue to use the notation $\Sigma:=e(\Sigma_{g,1})$ and we omit the choice of an embedding $\iota \colon \overline{\nu}(\Sigma_{g,1}) \hookrightarrow N$ from the notation since we have shown that $\Theta(e)$ is independent of the choice of embedding $\iota$ up to equivalence in $\mathcal{V}^0_\lambda(M_{K,g})$. In practice this means that we will simply write $\overline{\nu}(\Sigma) \subset N$. Since we omit~$\iota$ from the notation, we also allow ourselves to think of (the inverse of) a good framing $\gamma$ as giving an embedding $$ \gamma^{-1} \colon \Sigma_{g,1} \times D^2 \hookrightarrow \overline{\nu}(\Sigma) \subset N.$$ Similarly, given a choice of such a good framing, we now write the homeomorphism from~\eqref{eq:BoundaryHomeoSurface} as \begin{equation} \label{eq:BoundaryHomeoSurfaceNoEmbedding} f_\gamma \colon \partial N_{\Sigma} \to M_{K,g}, \ \ \ \ f_\gamma:=(h|) \cup (\gamma|), \end{equation} once again omitting $\iota$ from the notation. We sometimes also omit the choice of the framing $\gamma$ from the notation, writing instead $\Theta(e)=(N_{\Sigma},f)$. \end{remark} \subsubsection*{Step $(2)$: From manifolds to embeddings} We set up some notation aimed towards proving that~$\Theta$ is a bijection when the form $\lambda$ is even, and that $\Theta$ is a bijection when $\lambda$ is odd and the Kirby-Siebenmann is fixed. Set~$\varepsilon:=\ks(N)$ and write~$\mathcal{V}^{0,\varepsilon}_\lambda(M_{K,g})$ for the subset of those manifolds in~$\mathcal{V}^{0}_\lambda(M_{K,g})$ whose Kirby-Siebenmann invariant equals~$\varepsilon$. Observe that by additivity of the Kirby-Siebenmann invariant (see e.g.~\cite[Theorem 8.2]{FriedlNagelOrsonPowell}), if $\lambda$ is odd and~$\Sigma\subset N$ is a $\Z$-surface, then~$\ks(N_\Sigma)=\ks(N)=\varepsilon$, so the image of $\Theta$ lies in $\mathcal{V}^{0,\varepsilon}_\lambda(M_{K,g})$. The next proposition is the next step in the proof of Theorem~\ref{thm:SurfacesRelBoundary}. \begin{proposition} \label{prop:EmbVBijections} Let~$N$ be a simply-connected~$4$-manifold with boundary~$\partial N \cong S^3$, let~$K \subset S^3$ be a knot and let~$(H,\lambda)$ be a nondegenerate Hermitian form with $\lambda(1) \cong Q_N \oplus (0)^{2g}.$ \begin{enumerate} \item If~$\lambda$ is even, then the map~$\Theta$ from Construction~\ref{cons:EmbVBijection} determines a bijection $$\operatorname{Emb}_\lambda^0(\Sigma_{g,1},N;K) \to \mathcal{V}^0_\lambda(M_{K,g}).$$ \item If~$\lambda$ is odd, then the map~$\Theta$ from Construction~\ref{cons:EmbVBijection} determines a bijection $$\operatorname{Emb}_\lambda^0(\Sigma_{g,1},N;K) \to \mathcal{V}^{0,\varepsilon}_\lambda(M_{K,g}),$$ where $\varepsilon=\ks(N)$. \end{enumerate} \end{proposition} \begin{proof} We construct an inverse~$\Psi$ to the assignment~$\Theta \colon e \mapsto (N_{e(\Sigma_{g,1})},f)$ from Construction~\ref{cons:EmbVBijection}; this will in fact take up most of the proof. Let~$(W,f)$ be a pair, where~$W$ is a~$4$-manifold with fundamental group~$\pi_1(W)\cong\Z$, equivariant intersection form~$\lambda_W\cong \lambda$ and, in the odd case, Kirby-Siebenmann invariant~$\ks(W)=\varepsilon$, and~$f \colon \partial W \cong M_{K,g}$ is a homeomorphism. The inverse~$\Psi(W,f)$ is an embedding $\Sigma_{g,1} \hookrightarrow N$ defined as follows. Glue~$\Sigma_{g,1} \times D^2$ to~$W$ via the homeomorphism~$f^{-1}|_{\Sigma_{g,1}\times S^1}$. This produces a~$4$-manifold~$\widehat{W}$ with boundary~$\partial \widehat{W}=(\partial W \setminus f^{-1}(\Sigma_{g,1}\times S^1)) \cup (\partial \Sigma_{g,1} \times D^2)$, together with an embedding $$ \times \lbrace 0 \rbrace \colon \Sigma_{g,1}\hookrightarrow \widehat{W} \ \ \ \ x \mapsto (x,0) \in \Sigma_{g,1}\times \{0\} \subset \Sigma_{g,1} \times D^2.$$ Note for now that~$\partial \Sigma_{g,1}\times \{0\}\subset\partial\widehat{W}$ bounds a genus~$g$~$\Z$-surface in $\widehat{W}$ (with exterior~$W$). We will use the homeomorphism~$f \colon \partial W \to M_{K,g}$ to define a homeomorphism~$f' \colon \partial \widehat{W} \to \partial N~$ and then use Freedman's classification of compact simply-connected 4-manifolds with~$S^3$ boundary, to deduce that this homeomorphism extends to a homeomorphism~$F\colon \widehat{W} \to N$. We will then take our embedding to be $$\Psi(W,f) :=F\circ (\times \lbrace 0 \rbrace) \colon \Sigma_{g,1}\hookrightarrow N.$$ The next paragraphs flesh out the details of this construction. Namely, firstly we build $f' \colon \partial \widehat{W} \to \partial N$ and secondly we argue it extends to a homeomorphism $F \colon \widehat{W} \to N$. \begin{itemize} \item Towards building this $f'$, first observe that we get a natural homeomorphism $\partial\widehat{W}\to S^3$ as follows. Restricting~$f$ gives a homeomorphism~$f| \colon \partial W \setminus f^{-1}(\Sigma_{g,1}\times S^1) \cong S^3\setminus \nu(K)$. Recall that the homeomorphism $D \colon \partial \Sigma_{g,1} \times S^1 \to \partial \overline{\nu}(K)$ sends $\partial\Sigma_{g,1}\times\{\operatorname{pt}\}$ to $\lambda_K$ and~$\{\cdot\}\times\partial D^2$ to $\mu_K$, where $\lambda_K$ and $\mu_K$ respectively denote the Seifert longitude and meridian of $K\subset S^3$. Since~$\mu_K$ bounds a disc in $\overline{\nu}(K)$, this homeomorphism extends to a homeomorphism \color{black} \begin{equation} \label{eq:varphi} \vartheta\colon \partial\Sigma_{g,1}\times D^2\to \overline{\nu}(K). \end{equation} Note that $\vartheta$ is well defined up to isotopy. Consider the following diagram: $$ \xymatrix{ \partial W \setminus f^{-1}(\Sigma_{g,1}^\circ \times S^1) \ar[r]^-{f|,\cong}& S^3\setminus \nu(K) \\ \partial \Sigma_{g,1} \times S^1 \ar[u]_{f^{-1}|_{\partial \Sigma_{g,1} \times S^1}} \ar[d]^{\subset} \ar[r]^{D,\cong}& \partial \overline{\nu} (K) \ar[u]_{\subset} \ar[d]^{\subset} \\ \partial \Sigma_{g,1} \times D^2 \ar[r]^{\vartheta,\cong}& \overline{\nu}(K). } $$ The bottom square commutes by definition of $\vartheta$, whereas the top square commutes because~$f|$ is obtained by restricting $f \colon \partial W \to M_{K,g}=(S^3\setminus \nu(K)) \cup_D \Sigma_{g,1} \times S^1$. The commutativity of this diagram implies that~$f$ and~$\vartheta$ combine to a homeomorphism $$ f|\cup\vartheta \colon =\partial\widehat{W} \to S^3.$$ \color{black} Then~$h^{-1} \circ (f|\cup\vartheta)$ gives the required homeomorphism $$f':= h|^{-1} \circ ( f| \cup \vartheta) \colon \partial \widehat{W} \to \partial N.$$ Further, we observe that $f'(\partial\Sigma_{g,1})=K$. \item To prove that this homeomorphism extends to a homeomorphism~$\widehat{W} \cong N$, we will appeal to Freedman's theorem that for every pair of simply-connected topological~$4$-manifolds with boundary homeomorphic to~$S^3$, the same intersection form, and the same Kirby-Siebenmann invariant, every homeomorphism between the boundaries extends to a homeomorphism between the 4-manifolds~\cite{Freedman}. We check now that the hypotheses are satisfied. First, we argue that~$\widehat{W}$ is simply-connected. The hypothesis that~$W$ lies in~$\mathcal{V}^0_\lambda(M_{K,g})$ implies that there is an isomorphism~$\widehat{\varphi} \colon \pi_1(W) \xrightarrow{\cong} \Z$ such that~$\varphi=\widehat{\varphi}\circ\kappa$, where~$\kappa$ is the inclusion induced map~$\pi_1(M_{K,g})\to\pi_1(W)$ (see Definition \ref{def:V0lambdaY}). Since we required that~$\varphi(\mu_K)$ generates~$\Z$, we must have that~$\kappa(\mu_K)$ generates~$\pi_1(W)\cong\Z$. Since gluing~$\Sigma_{g,1} \times D^2$ along~$\Sigma_{g,1} \times S^1$ has the effect of killing~$\kappa(\mu_K)$, we conclude that~$\widehat{W}$ is simply-connected as claimed. Next we must show that~$Q_{\widehat{W}}$ is isometric to~$Q_N$. A Mayer-Vietoris argument establishes the isometry~$Q_{\widehat{W}}\oplus (0)^{\oplus 2g}\cong Q_W$. It then follows from our assumption on the Hermitian form~$(H,\lambda)$ that we have the isometries $$ Q_{\widehat{W}} \oplus (0)^{\oplus 2g} \cong Q_W \cong \lambda_W(1) \cong \lambda(1) \cong Q_N \oplus (0)^{\oplus 2g}.$$ This implies that~$Q_{\widehat{W}} \cong Q_N$ because both forms are nonsingular (indeed~$\partial \widehat{W}\cong \partial N \cong S^3$). In the even case, we deduce that both~$\widehat{W}$ and~$N$ are spin. In the odd case, using the additivity of the Kirby-Siebenmann invariant (see e.g.~\cite[Theorem 8.2]{FriedlNagelOrsonPowell}), we have~$\ks(\widehat{W})=\ks(W)=\varepsilon=\ks(N)$. Therefore~$\widehat{W}$ and~$N$ are simply-connected topological~$4$-manifolds with boundary~$S^3$, with the same intersection form and the same Kirby-Siebenmann invariant. Freedman's classification of simply-connected~$4$-manifolds with boundary~$S^3$ now ensures that the homeomorphism~$f' \colon \partial \widehat{W} \to \partial N$ extends to a homeomorphism $F\colon \widehat{W} \to N$ that induces the isometry $Q_{\widehat{W}} \cong Q_N$ and fits into the following commutative diagram \begin{equation} \label{eq:DiagramForf'} \xymatrix{ (\partial W \setminus f^{-1}(\Sigma_{g,1}\times S^1)) \cup (\partial \Sigma_{g,1} \times D^2) \ar[r]^-{=}\ar[d]^-{h|^{-1} \circ (f| \cup \vartheta|)}& \partial \widehat{W} \ar[r]^\subset \ar[d]^{f'}& \widehat{W}\ar[d]^-{F} \\ (\partial N\setminus \nu(K)) \cup \overline{\nu}(K)\ar[r]^-{=}& \partial N \ar[r]^\subset &N. } \end{equation} \end{itemize} As mentioned above, we obtain an embedding as \begin{equation} \label{eq:DefOfPsi} \Psi(W,f):=\Big{(}e\colon \Sigma_{g,1} \xrightarrow{\times \lbrace 0\rbrace} \widehat{W} \xrightarrow{F,\cong}N\Big{)}. \end{equation} This concludes the construction of our embedding~$\Psi(W,f)$. We must check that this construction gives rise to a map~$\Psi \colon \mathcal{V}^0_\lambda(M_{K,g})\to \operatorname{Emb}_\lambda^0(\Sigma_{g,1},N;K).$ In other words, we verify that, up to homeomorphisms of $N$ rel.\ boundary, the embedding $e$ from~\eqref{eq:DefOfPsi} depends neither on the choice of isometry~$Q_{\widehat{W}} \cong Q_N$ nor the choice of $\vartheta$ from \eqref{eq:varphi} nor the homeomorphism~$\widehat{W} \cong N$ extending our boundary homeomorphism nor on the homeomorphism rel.\ boundary type of~$(W,f)$. \begin{itemize} \item The precise embedding~$e$ depends on the homeomorphism~$\widehat{W} \cong N$ chosen to extend a given $f'$. This homeomorphism in turn depends on the choice of isometry~$Q_{\widehat{W}} \cong Q_N$. However for any two choices $F_1$ and $F_2$ of homeomorphisms~$\widehat{W} \cong N$ extending $f'$, the resulting embeddings are equivalent rel. boundary, as can be seen by composing one choice of homeomorphism with the inverse of the other: $$ \[email protected]{ \Sigma \ar[r]^{[\times 0]} \ar[d]^=&\widehat{V} \ar[r]^{F_1}\ar[d]^=& W \ar[d]^{F_2 \circ F_1^{-1}}\\ \Sigma \ar[r]^{[\times 0]} &\widehat{V} \ar[r]^{F_2}& W. } $$ So the equivalence class of the surface~$\Psi(W,f)$ does not depend on the choice of isometry~$Q_{\widehat{W}} \cong Q_N$ nor on the choice of homeomorphism~$\widehat{W} \cong N$ realizing this isometry and extending $f'$. \item Next, we show that the definition is independent of the choice of $\vartheta \colon \partial \Sigma_{g,1} \times D^2 \to \overline{\nu}(K)$ within its isotopy class. If $\vartheta_0,\vartheta_1 \colon \partial \Sigma_{g,1} \times D^2 \to \overline{\nu}(K)$ are isotopic, then so are the resulting homeomorphisms $f_0':=(f| \cup \vartheta_0|),f_1':=(f| \cup \vartheta_1|) \colon \partial \widehat{W} \to \partial N$ via an isotopy $f_s'$. \begin{claim*} There is an isotopy $F_s \colon \widehat{W} \to N$ extending $f_s'$. \end{claim*} \begin{proof} Pick a homeomorphism $F_0 \colon \widehat{W} \to N$ extending $f_0'$; when we constructed~$\Psi(W,f)$, we argued that such an $F_0$ exists. There are collars $\partial \widehat{W} \times [0,1]$ and $\partial N \times [0,1]$ such that $F_0|_{\partial \widehat{W} \times [0,1]}=f_0' \times [0,1]$. Here it is understood that the boundaries of $\widehat{W} $ and $N$ are respectively given by $\partial \widehat{W} \times \lbrace 0 \rbrace$ and $\partial N \times \lbrace 0 \rbrace$. The idea is to implant the isotopy $f_s'$ between $f_0',f_1'$ in these collars in order to obtain an isotopy between $F_0$ and a homeomorphism $F_1$ that retricts to $f_1'$ on the boundary. To carry out this idea, consider the restriction $$ F_0| \colon \widehat{W} \setminus (\partial \widehat{W} \times [0,1]) \to N \setminus (\partial N \times [0,1]).$$ Define an isotopy of homeomorphisms between the collars via the formula \begin{align*} G_s \colon \partial \widehat{W} \times [0,1] &\to \partial N \times [0,1] \\ (x,t) &\mapsto (f_{(1-t)s}'(x),t). \end{align*} Since $G_s(x,1)=(f_0'(x),1)$ for every $s$, we obtain the required isotopy as $F_s:=G_s \cup F_0$. By construction $F_i$ restricts to $f_i'$ on the boundary for $i=0,1$, thus concluding the proof of the claim. \end{proof} Thanks to the claim, we can use $F_0$ and $F_1$ to define the embeddings $e_0:=F_0 \circ (\times \lbrace 0 \rbrace)$ and $e_1:=F_1 \circ (\times \lbrace 0 \rbrace)$. This way, $F_1 \circ F_0^{-1} \colon N \to N$ is an equivalence rel.\ boundary between $e_0$ and $e_1$ so that the definition of $\Psi$ is independent of the choice of $\vartheta$ within its isotopy class. \item Next we check the independence of the rel.\ boundary homeomorphism type of~$(W,f)$. If we have~$(W_1,f_1)$ and~$(W_2,f_2)$ that are equivalent rel.\ boundary, then there is a homeomorphism~$\Phi \colon W_1 \to W_2$ that satisfies~$f_2 \circ \Phi| =f_1$. This homeomorphism extends to~$\widehat{\Phi}:=\Phi \cup \id_{\Sigma_{g,1} \times D^2} \colon \widehat{W}_1 \to \widehat{W}_2$ and therefore to a homeomorphism~$N \to N$ that is, by construction rel.\ boundary. A formal verification using this latter homeomorphism then shows that the embeddings~$\Psi(W_1,f_1)$ and~$\Psi(W_2,f_2)$ are equivalent rel.\ boundary. \end{itemize} Now we prove that the maps~$\Theta$ and~$\Psi$ are mutually inverse. \begin{itemize} \item First we prove that~$\Psi \circ \Theta=\id$. Start with an embedding~$e \colon \Sigma_{g,1} \hookrightarrow N$ and write~$\Theta(e)=(N_{e(\Sigma_{g,1})},f)$ with~$f=(h|) \cup (\gamma|) \colon \partial N_{e(\Sigma_{g,1})} \to M_{K,g}$ the homeomorphism described in Construction~\ref{cons:EmbVBijection}. Then~$\Psi(\Theta(e))$ is an embedding $$ \Sigma_{g,1} \xrightarrow{\times \lbrace 0 \rbrace} N_{e(\Sigma_{g,1})} \cup_f (\Sigma_{g,1} \times D^2) \xrightarrow{F,\cong} N.$$ We showed that the equivalence class of this embedding is independent of the homeomorphism~$F$ that extends~$f$. It suffices to show that we can make choices so that $\Psi(\Theta(e))$ recovers $e$. This can be done explicitly as follows. Choose $\vartheta:=h\circ\gamma^{-1} \colon \partial \Sigma_{g,1} \times D^2 \to \overline{\nu}(K)$. Then we have $f'=\id_{\partial N \setminus \nu(K)} \cup (h^{-1} \circ (h \circ \gamma^{-1}))=\id_{\partial N \setminus \nu(K)} \cup \gamma|^{-1}$ where the notation is as in~\eqref{eq:DiagramForf'} (with $W=N_{e(\Sigma_{g,1})}$). We already know an extension of $f'$, namely $\id_{N_{e(\Sigma_{g,1})}} \cup \gamma^{-1}$, which we take to be $F$. Thus $\Psi(\Theta(e))=\gamma^{-1}|_{\Sigma_{g,1} \times \lbrace 0 \rbrace} \colon \Sigma_{g,1} \hookrightarrow N$ which, by definition of a normal bundle, agrees with the initial embedding $e$. \item Next we prove that~$\Theta \circ \Psi=\id$. This time we start with a pair~$(W,f)$ consisting of a 4-manifold~$W$ and a homeomorphism~$f \colon \partial W \to M_{K,g}$. Then~$\Psi(W,f)$ is represented by an embedding ~$e \colon \Sigma_{g,1} \xrightarrow{ \times \lbrace 0 \rbrace} \widehat{W} \xrightarrow{F,\cong} N$. Recall that we write~$h \colon \partial N \to S^3$ for our preferred homeomorphism and that by construction, on the boundaries,~$F$ restricts to $$h|^{-1} \circ (f| \cup \vartheta) \colon \partial \widehat{W} \to \partial N$$ where (the isotopy class of)~$\vartheta \colon \partial \Sigma_{g,1} \times D^2 \to \overline{\nu}(K)$ satisfies the properties listed below equation~\eqref{eq:varphi}. We frame $\Sigma_{g,1} \times \lbrace 0 \rbrace \subset \widehat{W}$ via the unique homeomorphism $\operatorname{fr}\colon\overline{\nu}(\Sigma_{g,1}\times\{0\})\to\Sigma_{g,1}\times D^2$ that makes the following diagram commute: $$ \xymatrix{ \overline{\nu}(\Sigma_{g,1}\times \lbrace 0 \rbrace) \ar[rr]^{\operatorname{fr}}\ar[dr]^{\operatorname{incl} }&& \Sigma_{g,1} \times D^2 \ar[dl]_{\operatorname{incl} } \\ &\widehat{W}=W\cup(\Sigma_{g,1} \times D^2 ).& } $$ We then frame $e(\Sigma_{g,1}) \subset N$ via $$\gamma:= \operatorname{fr} \circ F^{-1}| \colon \overline{\nu}(e(\Sigma_{g,1})) \cong \Sigma_{g,1} \times D^2.$$ This framing is good thanks to the definition of $\varphi \colon \pi_1(M_{K,g}) \to \Z$ as the unique epimorphism that maps the meridian of $K$ to $1$ and the other generators to zero: indeed this implies that the curves on $\Sigma_{g,1} \times \lbrace 0\rbrace$ are nullhomologous in $W$ and therefore the same thing holds for $e(\Sigma_{g,1})\subset N$. It can be verified that this framing satisfies the condition from~\eqref{eq:Compatible}. We then obtain~$\Theta(\Psi(W,f))=(N_{\Sigma} := N \setminus \nu(e(\Sigma_{g,1})),h| \cup \gamma|)$, where, as dictated by Construction~\ref{cons:EmbVBijection}, the boundary homeomorphism is~$h| \cup \gamma| \colon \partial N_\Sigma \to M_{K,g}$. Here we are making use of the fact that up to equivalence, we can choose any framing in the definition of~$\Theta$. We have to prove that~$(N_{\Sigma},h| \cup \gamma|)$ is homeomorphic rel.\ boundary to~$(W,f)$. We claim that the restriction of~$F \colon \widehat{W} \to N$ gives the required homeomorphism. To see this, consider the following diagram $$ \xymatrix @C+0.3cm{ M_{K,g} \ar[d]^=& (\partial W \setminus f^{-1}(\Sigma_{g,1} \times S^1)) \cup (f^{-1}(\Sigma_{g,1} \times S^1))\ar[l]_-{f,\cong} \ar[r]^-{=} \ar[d]^{f':=(h|^{-1} \circ f|) \cup F| }& \partial W \ar[r]^{\subset} \ar[d]^{F|}& W \ar[r]^{\subset} \ar[d]^{F|}& \widehat{W} \ar[d]^F \\ M_{K,g} & (\partial N \setminus \nu(K)) \cup (\partial \overline{\nu}(\Sigma) \setminus (\nu(\Sigma) \cap \partial N))) \ar[r]^-{=} \ar[l]_-{h| \cup \gamma|}& \partial N_\Sigma \ar[r]^{\subset}& N_\Sigma \ar[r]^{\subset} &N. } $$ The right two squares certainly commute. In the second-from-left square, we have just expanded out $\partial W$ and $\partial N_\Sigma$, as well as written $F|$ explicitly on the regions where we have an explicit description from the construction of $\Psi$. So this square commutes. It remains to argue that the left square commutes. By construction $F|_{\partial \widehat{W}}=f'= h^{-1} \circ (f| \cup \vartheta)$. Thus on the knot exteriors, we have that $F|=h^{-1} \circ f|$ and so the left portion of the square commutes on the knot exteriors. Now it remains to prove that $\gamma| \circ F|=f$. By definition of $\gamma=\operatorname{fr} \circ F^{-1}$, we must show that $\operatorname{fr}|=f|$ on $f^{-1}(\Sigma_{g,1} \times S^1)$. First note that $\operatorname{fr}$ has domain $\overline{\nu}(\Sigma_{g,1} \times \lbrace 0 \rbrace) \subset \widehat{W}=W\cup (\Sigma_{g,1} \times D^2)$, so it appears we are attempting to compare maps which have different domains. However, the definition of $\widehat{W}$ identifies the portion of the boundary of $\overline{\nu}(\Sigma_{g,1})$ that we are interested in with $f^{-1}(\Sigma_{g,1}\times S^1)\subset \partial W$ via $f^{-1}|\circ \operatorname{fr|}$, so it makes sense to compare $f$ on $f^{-1}(\Sigma_{g,1} \times S^1)$ with $\operatorname{fr|}$ on $\operatorname{fr|}^{-1}\circ f|_{f^{-1}(\Sigma_{g,1} \times S^1)}$. These maps are tautologically equal. Therefore the left hand side of the diagram commutes and this concludes the proof that~$\Theta \circ \Psi=\id$. \end{itemize} We have shown that~$\Theta$ and~$\Psi$ are mutually inverse, and so both are bijections. This completes the proof of Proposition~\ref{prop:EmbVBijections}. \end{proof} \subsubsection*{Step $(3)$: From embeddings to submanifolds} Now we deduce a description of~$\operatorname{Surf(g)}^0_\lambda(N,K)$ from Proposition~\ref{prop:EmbVBijections}. Note that $ \operatorname{Surf(g)}^0_\lambda(N,K)$ arises as the orbit set $$ \operatorname{Surf(g)}^0_\lambda(N,K)= \operatorname{Emb}_\lambda^0(\Sigma_{g,1},N;K)/\operatorname{Homeo}^+(\Sigma_{g,1},\partial),$$ where the left action of~$x \in \operatorname{Homeo}^+(\Sigma_{g,1},\partial)$ on~$e\in \operatorname{Emb}_\lambda^0(\Sigma_{g,1},N;K)$ is defined by $x \cdot e=e \circ x^{-1}$. There is a surjective map~$\operatorname{Emb}_\lambda^0(\Sigma_{g,1},N;K) \to \operatorname{Surf(g)}^0_\lambda(N,K)$ that maps an embedding~$e \colon \Sigma_{g,1} \hookrightarrow N$ onto its image. One then verifies that this map descends to a bijection on the orbit set. Next, we note that~$\operatorname{Homeo}^+(\Sigma_g,\partial)$ acts on the sets~$\mathcal{V}^0_\lambda(M_{K,g})$ and~$\mathcal{V}^{0,\varepsilon}_\lambda(M_{K,g})$ as follows. A rel.\ boundary homeomorphism~$x \colon \Sigma_{g,1} \to \Sigma_{g,1}$ extends to a self homeomorphism $x'$ of $\Sigma_{g,1}\times S^1$ by defining $x'(s,\theta)=(x(s),\theta)$. Then extend $x'$ by the identity over $E_K$; in total one obtains a self homeomorphism $x''$ of $M_{K,g}$. The required action is now by postcomposition: for $(W,f)$ representing an element of $\mathcal{V}^0_\lambda(M_{K,g})$ or~$\mathcal{V}^{0,\varepsilon}_\lambda(M_{K,g})$, define $x \cdot (W,f):=(W,x'' \circ f )$. The following proposition is now a relatively straightforward consequence of Proposition~\ref{prop:EmbVBijections}. \begin{proposition} \label{prop:SurfBijectionCorrected} Let~$N$ be a simply-connected~$4$-manifold with boundary~$\partial N \cong S^3$, let~$K \subset S^3$ be a knot and let~$(H,\lambda)$ be a nondegenerate Hermitian form with $\lambda(1) \cong Q_N \oplus (0)^{2g}.$ \begin{enumerate} \item If~$\lambda$ is even, then the map~$\Theta$ from Construction~\ref{cons:EmbVBijection} descends to a bijection $$\operatorname{Surf(g)}^0_\lambda(N,K) \to \mathcal{V}^0_\lambda(M_{K,g})/\operatorname{Homeo}^+(\Sigma_{g,1},\partial).$$ \item If~$\lambda$ is odd, then the map~$\Theta$ from Construction~\ref{cons:EmbVBijection} descends to a bijection $$\operatorname{Surf(g)}^0_\lambda(N,K) \to \mathcal{V}^{0,\varepsilon}_\lambda(M_{K,g})/\operatorname{Homeo}^+(\Sigma_{g,1},\partial),$$ where $\varepsilon=\ks(N)$. \end{enumerate} \end{proposition} \begin{proof} Thanks to Proposition~\ref{prop:EmbVBijections}, it is enough to check that~$\Theta(x \cdot e)=x \cdot \Theta(e)$ for~$x \in \Homeo(\Sigma_{g,1},\partial)$ and~$e \colon \Sigma_{g,1} \hookrightarrow N$ an embedding representing an element of~$\operatorname{Emb}_\lambda^0(\Sigma_{g,1},N;K)$. By definition of~$\Theta$, we know that~$\Theta(x \cdot e)$ is~$(N_{e(x^{-1}(\Sigma_{g,1}))},f_{e\circ x^{-1}})$ and~$x \cdot \Theta(e)=(N_{e(\Sigma_{g,1})},x'' \circ f_{e})$ where the~$f_e,f_{e \circ x^{-1}}$ are homeomorphisms from the boundaries of these surface exteriors to~$M_{K,g}$ that can be constructed, up to equivalence rel.\ boundary, using any choice of good framing; recall Construction~\ref{cons:EmbVBijection}. In what follows, we will make choices of framings so that the pairs~$\Theta(x \cdot e)=(N_{e(x^{-1}(\Sigma_{g,1}))},f_{e \circ x^{-1}})$ and~$x \cdot \Theta(e)=(N_{e(\Sigma_{g,1})},x'' \circ f_e)$ are equivalent rel.\ boundary. Pick a good framing $\gamma \colon \overline{\nu}(e(\Sigma_{g,1})) \cong \Sigma_{g,1} \times D^2$ so that $\Theta(e)=(N_{e(\Sigma_{g,1})},f_e)=(N_{e(\Sigma_{g,1})},h| \cup \gamma|)$. Since $\gamma^{-1} \colon \Sigma_{g,1} \times D^2 \hookrightarrow N$ satisfies $\gamma^{-1}|_{\Sigma_{g,1} \times \lbrace 0 \rbrace}=e$, we deduce that $\gamma^{-1} \circ (x^{-1} \times \id_{D^2})$ gives an embedding of the normal bundle of $e \circ x^{-1}$. We can therefore choose the inverse $\gamma_{e \circ x}:=(x \times \id_{D^2}) \circ \gamma$ as a good framing for the embedding $e \circ x^{-1}$. Using this choice of good framing to construct $f_{e \circ x^{-1}}$, we have~$\Theta(e \circ x^{-1})=(N_{e \circ x^{-1}(\Sigma_{g,1})},h| \cup ((x \times \id_{D^2}) \circ \gamma|))$. Using these observations and the fact that $x$ is rel.\ boundary, we obtain \begin{align*} \Theta(x \cdot e) &=\Theta(e \circ x^{-1}) =(N_{e \circ x^{-1}(\Sigma_{g,1})},h| \cup ((x \times \id_{D^2}) \circ \gamma|))\\ &=(N_{e \circ x^{-1}(\Sigma_{g,1})},x'' \circ (h| \cup \gamma|)) =x \cdot (N_{e(\Sigma_{g,1})},f_e) =x \cdot \Theta(e). \end{align*} This proves that the pairs~$\Theta(x \cdot e)=(N_{e(x^{-1}(\Sigma_{g,1}))},f_{e \circ x^{-1}})$ and~$x \cdot \Theta(e)=(N_{e(\Sigma_{g,1})},f_e)$ are equivalent rel.\ boundary and thus concludes the proof of the proposition. \end{proof} We now deduce our description of the surface set, thus proving the main result of this section. \begin{proof}[Proof of Theorem \ref{thm:SurfacesRelBoundary}] We have already argued the $(2) \Rightarrow (1)$ direction below Definition~\ref{def:Surface(g)RelBoundary}, and so we focus on the converse. Since we assumed that $\lambda(1)\cong Q_N \oplus (0)^{\oplus 2g}$, we can apply Proposition~\ref{prop:SurfBijectionCorrected} to deduce that if~$\lambda$ is even then the map~$\Theta$ from Construction~\ref{cons:EmbVBijection} induces a bijection $$\operatorname{Surf(g)}^0_\lambda(N,K) \to \mathcal{V}^0_\lambda(M_{K,g})/\operatorname{Homeo}^+(\Sigma_{g,1},\partial)$$ while if~$\lambda$ is odd, for~$\varepsilon:=\ks(N)$, the map~$\Theta$ induces a bijection $$\operatorname{Surf(g)}^0_\lambda(N,K) \to \mathcal{V}^{0,\varepsilon}_\lambda(M_{K,g})/\operatorname{Homeo}^+(\Sigma_{g,1},\partial).$$ Since we assumed that $(H,\lambda)$ presents $M_{K,g}$, the theorem will follow from Theorem~\ref{thm:ClassificationRelBoundary} once we show that the map $b \colon V_\lambda^0(M_{K,g}) \to \Iso(\partial \lambda,\unaryminus \Bl_{M_{K,g}})/\Aut(\lambda)$ from Construction~\ref{cons:Invariant} intertwines the~$\Homeo^+(\Sigma_{g,1},\partial)$-actions, i.e. satisfies~$b_{x\cdot (W,f)}=x \cdot b_{(W,f)}$ for every~$x \in \Homeo^+(\Sigma_{g,1},\partial)$ and for every pair~$(W,f)$ representing an element of~$V_\lambda^0(M_{K,g})$. This follows formally from the definitions of the actions: on the one hand, for some isometry $F \colon \lambda \cong \lambda_W$, we have~$b_{x\cdot (W,f)}=b_{(W,x''\circ f)}=x''_*\circ f_* \circ D_W \circ \partial F$; on the other hand, we have~$x \cdot b_{(W,f)}$ is~$x \cdot (f_* \circ D_W \circ \partial F)$ and this gives the same result. This concludes the proof of Theorem~\ref{thm:SurfacesRelBoundary}. \end{proof} \subsection{Surfaces with boundary up to equivalence} \label{sub:SurfacesBoundaryEq} The study of surfaces up to equivalence (instead of equivalence rel.\ boundary) presents additional challenges: while there is still a map $\Theta \colon \operatorname{Emb}_\lambda(\Sigma_{g,1},N;K) \to \mathcal{V}_\lambda(M_{K,g})$, the proof of Proposition~\ref{prop:EmbVBijections} (in which we constructed an inverse $\Psi$ of $\Theta$) breaks down because if~$W$ and~$W'$ are homeomorphic~$\Z$-fillings of~$M_{K,g}$, it is unclear whether we can always find a homeomorphism~$W \cup (\Sigma_{g,1} \times D^2) \cong W' \cup (\Sigma_{g,1} \times D^2)$. We nevertheless obtain the following result. \begin{theorem} \label{thm:SurfacesWithBoundary} Let~$N$ be a simply-connected~$4$-manifold with boundary~$\partial N \cong S^3$, let~$K$ be a knot such that every isometry of~$\Bl_K$ is realised by an orientation-preserving homeomorphism~$E_K \to E_K$ and let~$(H,\lambda)$ be a nondegenerate Hermitian form over~$\Z[t^{\pm 1}]$. The following assertions are equivalent: \begin{enumerate} \item the Hermitian form~$\lambda$ presents~$M_{K,g}$ and~$\lambda(1)\cong Q_N \oplus (0)^{\oplus 2g}$; \item up to equivalence, there exists a unique genus~$g$ surface~$\Sigma \subset N$ with boundary~$K$ and whose exterior has equivariant intersection form~$\lambda$, i.e.~$|\operatorname{Surf(g)}_\lambda(N,K)|=1$. \end{enumerate} \end{theorem} \begin{proof} We already proved the fact that the second statement implies the first, so we focus on the converse. We can apply Theorem~\ref{thm:SurfacesRelBoundary} to deduce that~$\operatorname{Surf(g)}^0_\lambda(N,K)$ is nonempty, this implies in particular that~$\operatorname{Surf(g)}_\lambda(N,K)$ is nonempty. Since this set is nonempty, we assert that the hypothesis on~$K$ ensures we can apply~\cite[Theorem 1.3]{ConwayPowell} to deduce that~$|\operatorname{Surf(g)}_\lambda(N,K)|=1$. In contrast to Theorem~\ref{thm:SurfacesWithBoundary}, the statement of~\cite[Theorem 1.3]{ConwayPowell} contains the additional condition that the orientation-preserving homeomorphism~$f \colon E_K \to E_K$ be the identity on~$\partial E_K$. We show that this assumption is superfluous, so that we can apply~\cite[Theorem~1.3]{ConwayPowell} without assuming that $f|_{\partial E_K}=\id_{\partial E_K}$. First, note that since~$f$ realises an isometry of~$\Bl_K$, it is understood that $f$ preserves a basepoint~$x_0$ and satisfies~$f([\mu_K])=[\mu_K]$, where~$[\mu_K] \in \pi_1(E_K,x_0)$ is the based homotopy class of a meridian of~$K$. An application of the Gordon-Luecke theorem~\cite{GordonLuecke} now implies that~$f|_{\partial E_K}$ is isotopic to~$\id_{\partial E_K}$; this isotopy can be assumed to be basepoint preserving by~\cite[page~57]{FarbMargalit}. Implanting this basepoint preserving isotopy in a collar neighborhood of~$\partial E_K$ implies that~$f$ itself is basepoint preserving isotopic to a homeomorphism $E_K \to E_K$ that restricts to the identity on~$\partial E_K$. This completes the proof that the extra assumption in the statement of \cite[Theorem~1.3]{ConwayPowell} can be assumed to hold without loss of generality. \end{proof} \subsection{Closed surfaces} \label{sub:Closed} We now turn our attention to closed~$\Z$-surfaces. Let~$X$ be a closed simply-connected~$4$-manifold and let~$\Sigma \subset X$ be a closed~$\Z$-surface with genus~$g$, whose normal bundle we frame as in the case with boundary. With this framing, we can now identify the boundary of~$X_\Sigma:=X \setminus \nu(\Sigma)$ as $$\partial X_\Sigma \cong \Sigma_g \times S^1.$$ Two such surfaces~$\Sigma$ and~$\Sigma'$ are \emph{equivalent} if there exists an orientation-preserving homeomorphism~$(X,\Sigma) \cong (X,\Sigma')$. Again as in the case of surfaces with boundary,~$X_\Sigma$ is a $\Z$ manifold and~$H_1( \Sigma_g \times S^1;\Z[t^{\pm 1}]) \cong \Z^{2g}$ is torsion. Additionally, note that the equivariant intersection form~$\lambda_{X_\Sigma}$ of a surface exterior~$X_\Sigma$ must present~$ \Sigma_g \times S^1$. \begin{definition} \label{def:Surface(g)Closed} For a nondegenerate Hermitian form~$(H,\lambda)$ over~$\Z[t^{\pm 1}]$ presenting~$\Sigma_g \times S^1$, set $$\operatorname{Surf(g)}_\lambda(X):=\lbrace \Z\text{-surface~$\Sigma \subset X$ with } \lambda_{X_\Sigma}\cong \lambda \rbrace /\text{ equivalence}.$$ \end{definition} As for~$\Z$-surfaces with nonempty boundary, in order for~$\operatorname{Surf(g)}_\lambda(X)$ to be nonempty it is additionally necessary that~$\lambda(1)\cong Q_X \oplus (0)^{\oplus 2g}$. It was proved in~\cite[Theorem 1.4]{ConwayPowell} that whenever~$\operatorname{Surf(g)}_\lambda(X)$ is nonempty, it contains a single element. We improve this statement to include an existence clause. \begin{theorem} \label{thm:SurfacesClosed} Let~$X$ be a closed simply-connected~$4$-manifold. Given a nondegenerate Hermitian form~$(H,\lambda)$ over~$\Z[t^{\pm 1}]$, the following assertions are equivalent: \begin{enumerate} \item the Hermitian form~$\lambda$ presents~$\Sigma_g \times S^1$ and~$\lambda(1)\cong Q_X \oplus (0)^{\oplus 2g}$; \item there exists a unique~$($up to equivalence$)$ genus~$g$~$\Z$-surface~$\Sigma \subset X$ whose exterior has equivariant intersection form~$\lambda$; i.e.~$|\operatorname{Surf(g)}_\lambda(X)|=1$. \end{enumerate} \end{theorem} \begin{proof} We have already argued that $(2) \Rightarrow (1)$ and so we focus on the converse. Use~$U \subset S^3$ to denote the unknot and use~$N$ to denote the simply-connected~$4$-manifold with boundary~$S^3$ obtained from~$X$ by removing a small open $4$-ball. Note that~$M_{U,g}=\Sigma_g \times S^1$ and that~$Q_N=Q_X$. Since the Blanchfield form of~$U$ is trivial, Theorem~\ref{thm:SurfacesWithBoundary} applies; this shows us that item~$(1)$ in Theorem \ref{thm:SurfacesClosed} is equivalent to the existence of a unique (up to equivalence) genus~$g$ surface~$\Sigma \subset N$ with boundary~$U$ and equivariant intersection form~$\lambda$, in other words: $$|\operatorname{Surf(g)}_\lambda(N,U)|=1.$$ Pick an embedded $4$-ball $B \subset X$ so that $N=X\setminus \mathring{B}$ and~$U \subset \partial N \cong S^3.$ Capping off a representative of the unique element in~$\operatorname{Surf(g)}_\lambda(N,U)$ is now readily seen to give an element of $\operatorname{Surf(g)}_\lambda(X)$. Since~\cite[Theorem 1.4]{ConwayPowell} shows that~$|\operatorname{Surf(g)}_\lambda(X)| \in \{0,1\}$, we deduce that~$|\Surf(g)_\lambda(X)|=1$, as required. \end{proof} \subsection{Problems and open questions} \label{sub:OpenQuestions} We conclude with some problems in the theory of~$\Z$-surfaces, both in the closed case and in the case with boundary. In what follows, we set $$\mathcal{H}_2:=\begin{pmatrix} 0&t-1 \\ t^{-1}-1&0 \end{pmatrix}.$$ We start with closed surfaces in closed manifolds where the statements are a little cleaner. \begin{problem} \label{prob:Closed} Fix a closed, simply-connected 4-manifold~$X$. Characterise the nondegenerate Hermitian forms~$(H,\lambda)$ over~$\Z[t^{\pm 1}]$ that arise as~$\lambda_{X_\Sigma}$ where~$\Sigma \subset X$ is a closed~$\Z$-surface of genus~$g$. \end{problem} It is known that if~$\lambda$ is as in Problem~\ref{prob:Closed}, then it must present~$\Sigma_g \times S^1$, that~$\lambda(1) \cong Q_X \oplus (0)^{\oplus 2g}$ and that~$\lambda \oplus \mathcal{H}_2^{\oplus n} \cong Q_X \oplus \mathcal{H}_2^{\oplus (g+n)}$ for some~$n\geq 0$. The necessity of the first two conditions was mentioned in Subsection~\ref{sub:Closed} while the necessity of third was proved in~\cite[Proposition 1.6]{ConwayPowell}. Here is what is known about Problem~\ref{prob:Closed}: \begin{itemize} \item if~$X=S^4$ and~$g\neq 1,2$, then~$\lambda \cong \mathcal{H}_2^{\oplus g}$~\cite[Section 7]{ConwayPowell}; \item for $X=\C P^2$ and $g=0$, the equivariant intersection form is necessarily the form $(x,y)\mapsto x\overline{y}$ and it follows that $\Z$-spheres in $X$ are unique up to isotopy~\cite[Proposition A.1]{ConwayOrson}; \item if~$b_2(X) \geq |\sigma(X)| +6$, then~\cite[Theorem 7.2]{Sunukjian} implies that~$\lambda \cong Q_X \oplus \mathcal{H}_2^{\oplus g}$. \end{itemize} This leads to the following question, a positive answer to which would solve Problem~\ref{prob:Closed}. \begin{question} \label{question:Closed} Let~$X$ be a closed simply-connected~$4$-manifold and let~$(H,\lambda)$ be a nondegenerate Hermitian form over~$\Z[t^{\pm 1}]$. Is it the case that if~$\lambda$ presents~$\Sigma_g \times S^1$,~$\lambda(1) \cong Q_X \oplus (0)^{\oplus 2g}$ and~$\lambda \oplus \mathcal{H}_2^{\oplus n} \cong Q_X \oplus \mathcal{H}_2^{\oplus (g+n)}$ for some~$n\geq 0$, then~$\lambda \cong Q_X \oplus \mathcal{H}_2^{\oplus g}$? \end{question} If the answer to Question~\ref{question:Closed} were positive, then using Theorem~\ref{thm:SurfacesClosed} one could completely classify closed~$\Z$-surfaces in closed simply-connected~$4$-manifolds: for every~$g \geq 0$, in a closed simply-connected~$4$-manifold~$X$, there would exist a unique~$\Z$-surface of genus~$g$ in~$X$ up to equivalence. Next, we discuss the analogous (but more challenging) problem for surfaces with boundary. \begin{problem} \label{prob:Boundary} Fix a simply-connected 4-manifold~$N$ with boundary $S^3$. Characterise the nondegenerate Hermitian forms~$(H,\lambda)$ over~$\Z[t^{\pm 1}]$ that arise as~$\lambda_{N_\Sigma}$ where~$\Sigma \subset N$ is a~$\Z$-surface of genus~$g$ with boundary a fixed knot~$K$. For brevity, we call such forms~$(N,K,g)$-\emph{realisable}. \end{problem} It is known that if~$\lambda$ is~$(N,K,g)$-realisable, then it must present~$M_{K,g}$, satisfy~$\lambda(1) \cong Q_N \oplus (0)^{\oplus 2g}$ as well as~$\lambda \oplus \mathcal{H}_2^{\oplus n} \cong Q_N \oplus \mathcal{H}_2^{\oplus (g+n)}$ for some~$n\geq 0$. The necessity of the first two conditions was mentioned in Subsection~\ref{sub:Boundary} while the necessity of third was proved in~\cite[Proposition~1.6]{ConwayPowell}. Here is what is known about Problem~\ref{prob:Boundary}: \begin{itemize} \item if~$N=D^4, g\neq 1,2$ and~$K$ has Alexander polynomial one, then~$\lambda \cong \mathcal{H}_2^{\oplus g}$~\cite[Section~7]{ConwayPowell}; \item for~$N=\C P^2 \setminus \mathring{D}^4$ and~$g=0$, the equivariant intersection form $\lambda$ is necessarily the form~$(x,y) \mapsto x\Delta_K\overline{y}$. After this article appeared, the classification~$\Z$-discs in~$\C P^2 \setminus \mathring{D}^4$ was studied in~\cite{ConwayDaiMiller}. \end{itemize} We conclude by listing consequences of further solutions to Problem~\ref{prob:Boundary}. \begin{enumerate} \item Using Theorem~\ref{thm:SurfacesRelBoundary}, a solution to Problem~\ref{prob:Boundary} would make it possible to fully determine the classification of properly embedded~$\Z$-surfaces in a simply-connected~$4$-manifold~$N$ with boundary~$S^3$ up to equivalence rel.\ boundary: for every~$g \geq 0$, there would be precisely one~$\Z$-surface of genus~$g$ in~$N$ with boundary~$K$ for every element of~$\Aut(\Bl_K)/\Aut (\lambda)$, where~$\lambda$ ranges across all~$(N,K,g)$-realisable forms. \item If one dropped the rel.\ boundary condition, then one might conjecture that for every~$g \geq 0$, in a simply-connected~$4$-manifold~$N$ with boundary~$S^3$, there is precisely one~$\Z$-surface of genus~$g$ with boundary~$K$ for every element of~$\Aut(\partial \lambda)/\left( \Aut (\lambda) \times \Homeo^+(E_K,\partial)\right)$, where~$\lambda$ ranges across~$(N,K,g)$-realisable forms. If the conjecture were true, then a solution to Problem~\ref{prob:Boundary} would provide a complete description of the set of properly embedded~$\Z$-surfaces in a simply-connected~$4$-manifold~$N$ with boundary~$S^3$, up to equivalence. \end{enumerate} \section{Ubiquitous exotica}\label{sec:ubiq} In this section we demonstrate the failure of our topological classification to hold in the smooth setting. In Subsection \ref{sub:exoticbackground} we set up some preliminaries we will require about Stein 4-manifolds and corks. In Subsection \ref{sub:exoticproofs} we give the proofs of Theorems \ref{thm:exoticmanifolds} and \ref{thm:exoticdiscs} from the introduction. In this section, all manifolds and embeddings are understood to be smooth. \subsection{Background on Stein structures and corks}\label{sub:exoticbackground} We will be concerned with arranging that certain compact 4-manifolds with boundary admit a Stein structure. The unfamiliar reader can think of this as a particularly nice symplectic structure. Abusively, we will say that any smooth 4-manifold which admits a Stein structure is Stein. The reason for this sudden foray into geometry is to take advantage of restrictions on the genera of smoothly embedded surfaces representing certain homology classes in Stein manifolds. These restrictions will aid us in demonstrating that two 4-manifolds are not diffeomorphic. In this section, we will recall both a combinatorial condition for ensuring that a 4-manifold is Stein and the restrictions on smooth representatives of certain homology classes in Stein manifolds. We use the conventions and setup of \cite{GompfStein} throughout. \begin{figure}\center \def\svgwidth{.4\linewidth}\input{Gompfform.pdf_tex} \caption{The left hand side shows a handle diagram for a boundary connected sum of~$S^1 \times D^3$. On the right hand side, the tangle diagram~$T$ satisfies the conventions of a front diagram. } \label{fig:gompfform} \end{figure} We begin by recalling a criterion to ensure that a handle diagram with an unique 0-handle and no 3 or 4-handles describes a Stein 4-manifold. Recall that we can describe~$\natural_{i=1}^r S^1\times B^3$ using the dotted circle notation for 1-handles as in the left frame of Figure~\ref{fig:gompfform}. It is not hard to show that any link in~$\#_{i=1}^r S^1\times S^2$ can be isotoped into the position shown in the right frame of Figure \ref{fig:gompfform}, where inside the tangle marked~$T$ we require that the diagram meet the conventions of a front diagram for the standard contact structure on~$S^3$. For details on front diagrams, see \cite{EtnyreLec}; stated briefly this amounts to isotoping the diagram so that all vertical tangencies are replaced by cusps and so that at each crossing the more negatively sloped strand goes over. We note that front diagrams require oriented links; we can choose orientations on our 2-handle attaching spheres arbitrarily, since orienting the link does not affect the 4-manifold. Thus any handle diagram with a unique 0-handle and no 3 or 4-handles can be isotoped into the form of the right frame of Figure~\ref{fig:gompfform}; we say that such a diagram is in \emph{Gompf standard form}. For a diagram in Gompf standard form, let~$L_i^T$ denote the tangle diagram obtained by restricting the~$i^{th}$ component~$L_i$ of the diagram of~$L$ to~$T$. For a diagram in Gompf standard form, the \textit{Thurston-Bennequin number}~$TB(L_i)$ of~$L_i$ is defined as $$TB(L^D_i)=w(L^T_i)-c(L_i^T)$$ where~$w(L_i^T)$ denotes the writhe of the tangle and~$c(L_i^T)$ denotes the number of left cusps. In this setup, the following criterion is helpful to prove that handlebodies are Stein. \begin{theorem}[\cite{Eliash,GompfStein}, see also Theorem 11.2.2 of \cite{GSbook}]\label{thm:Stein} A smooth 4-manifold~$X$ with boundary is Stein if and only if it admits a handle diagram in Gompf standard form such that the framing~$f_i$ on each 2-handle attaching curve~$L_i$ has~$f_i=TB(L_i)-1$. \end{theorem} \begin{figure}[!htbp] \center \def\svgwidth{.25\linewidth}\input{Stab.pdf_tex} \caption{Stabilising a front diagram.} \label{fig:stab} \end{figure} \begin{remark}\label{rem:largeTB} The `if' direction of the Theorem \ref{thm:Stein} holds under the weaker hypothesis that each 2-handle attaching curve~$L_i$ has~$f_i\le TB(L_i)-1$. To see this, observe that any 2-handle~$L_i$ can be locally isotoped via the \textit{stabilisations} demonstrated in Figure~$\ref{fig:stab}$ and observe that stabilisation preserves the condition on~$T$ and lowers the Thurston-Bennequin number of~$L_i$ by one. The claim now follows since we can stabilise any 2-handle in a diagram in Gompf standard form to lower its Thurston-Bennequin number without changing the smooth 4-manifold described. \end{remark} We will also make use of the following special case of the adjunction inequality for Stein manifolds. \begin{theorem}[\cite{LiscaMatic}]\label{thm:adjunct} In a Stein manifold~$X$, any homology class~$\alpha\in H_2(X)$ with~$\alpha\cdot\alpha =-1$ cannot be represented by a smoothly embedded sphere. \end{theorem} \begin{proof} The proof can be deduced by combining~\cite[Theorem 3.2]{LiscaMatic} with~\cite{Brussee,FriedmanMorgan}; further exposition can be found in~\cite[Theorems 1.2 and 1.3]{AkbulutMatveyev}. \end{proof} \begin{figure}[!htbp] \center \def\svgwidth{.75\linewidth}\input{Cork.pdf_tex} \caption{Two fillings of the boundary of the Akbulut cork, with boundary homeomorphism~$\delta'^{-1}\circ \delta$. Here and throughout the rest of the paper, all handle diagrams drawn in this horizontal format should be braid closed. } \label{fig:cork} \end{figure} In order to handily construct pairs of homeomorphic 4-manifolds, we will make use of \textit{cork twisting}. Define~$C$ to be the contractible 4-manifold in the left frame of Figure~\ref{fig:cork}, which is commonly refered to as the \textit{Akbulut cork}. Observe that~$\partial C$ admits another contractible filing~$C'$ given by the right frame of Figure~\ref{fig:cork}, and that there is a natural homeomorphism~$\tau:=(\delta')^{-1} \circ \delta \colon \partial C \to \partial C'$ demonstrated in the figure. Using the work of Freedman \cite{Freedman}, the homeomorphism~$\tau$ extends to a homeomorphism~$T \colon C\to C'$. As a result, for any 4-manifold~$W$ with~$\iota \colon C\hookrightarrow W$, one can construct a new~$4$-manifold~$W':= W\smallsetminus \iota(C)\cup _{(\iota|_\partial) \circ \tau^{-1}} C'$ and, combining the identity homeomorphism~$\id_{W\smallsetminus \iota(C)}$ with~$T$, one sees that~$W$ and~$W'$ are homeomorphic. Historically, the literature has been concerned with two types of exotic phenomena. If smooth 4-manifolds~$X,X'$ with boundary admit a homeomorphism~$F \colon X\to X'$ but no diffeomorphism $G \colon X\to X'$ such that~$G|_\partial$ is isotopic to~$F|_\partial$, we call~$X$ and~$X'$ \textit{relatively exotic}. If smooth 4-manifolds~$X,X'$ admit a homeomorphism $F \colon X\to X'$ but no diffeomorphism $G \colon X\to X'$ we call~$X$ and~$X'$ \textit{absolutely exotic}. It is easier to build relatively exotic pairs in practice. Fortunately, work of Akbulut and Ruberman shows that all relative exotica contains absolute exotica. \begin{theorem}[Theorem A of \cite{AR16}]\label{thm:absolute} Let $M$ and $M'$ be smooth 4-manifolds and let $F \colon M\to M'$ be a homeomorphism whose restriction to the boundary is a diffeomorphism that does not extend to a diffeomorphism $M\to M'$. Then $M$ $($resp.\ $M')$ contains a smooth codimension $0$ submanifold~$V$ $($resp.\ $V')$ which is orientation-preserving homotopy equivalent to $M$ $($resp.\ $M')$ such that $V$ is homeomorphic but not diffeomorphic to $V'$. \end{theorem} If $\partial M$ and $\partial M'$ are nonempty, then $V$ and $V'$ necessarily also have nonempty boundaries since they are codimension zero submanifolds of manifolds with boundary. We remark that Akbulut-Ruberman's theorem is only stated when $M$ is diffeomorphic to $M'$ (hence by applying a reference identification, they can in fact just call both manifolds $M$). However their proof works verbatim when $M$ and $M'$ are just homeomorphic smooth manifolds, which is the hypothesis we take above. \subsection{Proof of Theorems \ref{thm:exoticmanifolds} and \ref{thm:exoticdiscs}}\label{sub:exoticproofs} \black We prove Theorem~\ref{thm:exoticmanifolds} from the introduction, which for convenience we state again here in more detail: \begin{theorem} \label{thm:ExoticmanifoldsNotIntro} For every Hermitian form~$(H,\lambda)$ over~$\Z[t^{\pm 1}]$ there exists a pair of smooth $\Z$-manifolds~$M$ and~$M'$ with boundary and fundamental group~$\Z$, such that: \begin{enumerate} \item there is a homeomorphism~$F \colon M\to M'$; \item $F$ induces an isometry $\lambda_M \cong \lambda_{M'}$, and both forms are isometric to~$\lambda$; \item there is no diffeomorphism from~$M$ to~$M'$. \end{enumerate} In other words, every Hermitian form~$(H,\lambda)$ over~$\Z[t^{\pm 1}]$ is exotically realisable. \end{theorem} \begin{proof} Let~$A(t)$ be a matrix representing the given form~$\lambda$, so that~$A(1)$ is an integer valued matrix. Choose any framed link~$L=\cup L_i\subset S^3$ with linking matrix~$A(1)$ and let~$M_1$ be the 4-manifold obtained from~$D^4$ by attaching~$A(1)_{ii}$-framed 2-handles to~$D^4$ along~$L_i$. Let~$M_2$ be the 4-manifold obtained from~$M_1$ by attaching a 1-handle (which we will think of as removing the tubular neighborhood of a trivial disc for an unknot split from~$L$). Thus~$\pi_1(M_2)\cong \Z$ and both the integer valued intersection form~$Q_{M_2}$ and the equivariant intersection form~$\lambda_{M_2}$ are represented by a matrix for~$\lambda(1)$. \begin{figure}[!htbp] \center \def\svgwidth{.5\linewidth}\input{Fuckjprime.pdf_tex} \caption{Arbitrary Hermitian forms can be realised as equivariant intersection forms by repeatedly performing the following local move, which we illustrate for~$k=2$.} \label{fig:J'} \end{figure} Now we will modify the handle diagram of~$M_2$ in a way which will preserve the fundamental group and integer valued intersection form, but will result in an~$M_3$ with equivariant intersection form~$\lambda_{M_3}\cong\lambda$. For pairs~$i,j$ with~$i < j$, for each monomial~$\ell t^k$ in the polynomial~$A(t)_{ij}$, perform the local modification exhibited in Figure \ref{fig:J'}. Observe (for later use) that this move does not change the framed link type of the link of attaching spheres of 2-handles. Furthermore, the modification does not change the fundamental group or the integer valued intersection form of~$M_2$. We exhibit in Figure \ref{fig:coverex} what the cover looks like locally after the modification. \begin{figure}[!htbp] \center \def\svgwidth{.99\linewidth}\input{Modcoverlinking.pdf_tex} \caption{A local picture of the cover after our local modification with $k=2$. When $k>0$ the twist parameter $\epsilon$ is $1-k$, when $k<0$ it is $-k-1$.}\label{fig:coverex} \end{figure} Recall from Remark \ref{rem:EquivariantIntersections} that for elements~$[\widetilde{a}],[\widetilde{b}]\in H_2(M_2,\Z[t^{\pm1}])$ the equivariant intersection form satisfies $$ \lambda_{M_2}([\widetilde{b}],[\widetilde{a}])=\sum_k (\widetilde{a} \cdot_{M_3^\infty} t^k \widetilde{b} ) t^{-k}.$$ Thus we see that after each iteration of the local move we have that~$\lambda_{M_2'}(t)_{ij}=\lambda_{M_2}(t)_{ij}-\ell +\ell t^k$ and~$\lambda_{M_2'}(t)_{ji}=\lambda_{M_2}(t)_{ji}-\ell+\ell t^{-k}$. For pairs~$i=j$, for each monomial~$\ell t^k$ with~$k>0$ in the polynomial~$A(t)_{ii}$, again perform the local modification in Figure \ref{fig:J'}. In this case, one finds that \begin{equation}\label{eq:diagonalterms} \lambda_{M_2'}(t)_{ii}=\lambda_{M_2}(t)_{ii}-2\ell +\ell t^k+\ell t^{-k}. \end{equation} The non-constant terms of~\eqref{eq:diagonalterms} are straightforward to deduce. The constant term is computed by considering a parallel of $\Hi$ downstairs which is 0-framed in the modification region, lifting the framing curve into the cover, and then computing the linking of the lift of the framing with $\widetilde{\Hi}$. Once these modifications are complete, we obtain a 4-manifold~$M_3$ with~$\lambda_{M_3}$ agreeing with~$\lambda$ everywhere except \emph{a priori} on the constant terms of each~$A(t)_{ij}$. Observe however that since these local modifications do not change the integer valued intersection form~$\lambda(1)$, we have that~$\lambda_{M_3}$ must also agree with~$\lambda$ on the constant terms of each~$A(t)_{ij}$. Thus, when we are finished, we have a smooth 4-manifold~$M_3$ with no $3$-handles,~$\pi_1(M_3) \cong \Z$ and~$\lambda_{M_3}\cong \lambda$. \begin{figure}[!htbp] \center \def\svgwidth{.35\linewidth}\input{Equivdouble.pdf_tex} \caption{The knot $K$ in $S^1 \times S^2$. A handle diagram for the $4$-manifold $X$ is obtained from this diagram by dotting the black unknot and attaching a $0$-framed $2$-handle to $K$. } \label{fig:equivdouble} \end{figure} Next we will modify the 2-handles of our handle diagram ~$\sH$ of~$M_3$ to get a Stein 4-manifold~$M_4$ with the same fundamental group and equivariant intersection form as~$M_3$. We will do this by getting the handle diagram into a form where we can apply Eliashberg's Theorem~\ref{thm:Stein}, which requires arranging that each 2-handle has a suitably large Thurston-Bennequin number. To begin, isotope~$\sH$ into Gompf standard form, so that we think of the~$2$-handles of~$\sH$ as a Legendrian link in the standard tight contact structure on~$S^1\times S^2$. If any of the 2-handle attaching curves do not have any cusps, stabilise once so that they do. Let~$A_3(t)$ be the equivariant linking matrix of~$\sH$; note that $A_3(t)=A(t)$ is a matrix representing the equivariant intersection form $\lambda$. Let~$K$ be the knot in~$S^1\times S^2$ exhibited in Figure \ref{fig:equivdouble}. Observe that if we use~$K$ to describe a 4-manifold~$X$ via attaching a~$0$-framed 2-handle to~$S^1\times B^3$ along~$K$, then~$\pi_1(X)\cong \Z$ and the equivariant intersection form~$\lambda_X$ is represented by the size one matrix~$(0)$. Observe further that~$K$ has a Legendrian representative~$\mathcal{K}$ (illustrated in Figure~\ref{fig:equivdouble}) in the standard tight contact structure on~$S^1\times S^2$ with~$\TB(\mathcal{K})=1$. In our handle diagram~$\sH$ of~$M_3$, let~$\mathring{K}$ be a copy of~$K$ in~$S^1\times S^2$ which is split from all of the 2-handles of~$\sH$, as depicted in the left frame of Figure \ref{fig:connectsum}. \begin{figure}[!htbp] \center \def\svgwidth{.85\linewidth} \input{cts.pdf_tex} \caption{The connect sum band can be taken with a sufficiently positive slope that choosing it to pass under any strands in the tangle~$T$ causes the diagram to remain in Gompf standard form.}\label{fig:connectsum} \end{figure} Now for any handle~$\Hi$ of~$\sH$ with $A_3(1)_{ii}>\TB(\Hi)-2$ form~$\Hi'$ by taking the connected sum of~$\Hi$ with~a split copy of~$\mathring{K}$ in the manner depicted in Figure \ref{fig:connectsum}. Frame~$\Hi'$ using the same diagrammatic framing instruction that was used to frame~$\sH_i$. One computes readily from the right frame of Figure \ref{fig:connectsum} that~$\TB(\Hi')=\TB(\Hi)+1$. Repeat this process until~$A_3(1)_{ii}\le \TB(\Hi)-2$ for all 2-handles. Let~$M_4$ be the resulting 4-manifold. Then~$M_4$ is Stein by Theorem~\ref{thm:Stein} and Remark~\ref{rem:largeTB}. Further, since~$X$ contributes neither to the equivariant intersection form nor to~$\pi_1$, we have that~$M_4$ has the same equivariant intersection form and~$\pi_1$ as~$M_3$. We record (for later use) the observation that the link in~$S^3$ consisting of the attaching spheres of the 2-handles is unchanged by these modifications; one can see this by ignoring the 1-handle in Figure~\ref{fig:connectsum} and doing a bit of isotopy. \begin{figure}[!htbp] \centering \def\svgwidth{.6\linewidth}\input{Insertcork.pdf_tex} \caption{The local modification performed on the handle $\mathcal{H}_1$ of the manifold $M_3$. } \label{fig:insertcork} \end{figure} Now we will make a final modification to~$M_4$ to get a 4-manifold~$M_5 =: M$ which we can cork twist to get~$M'$. Choose any 2-handle, without loss of generality we choose $\sH_1$, and perform the local modification described in Figure \ref{fig:insertcork}; the resulting 4-manifold is our~$M$. One can readily check that this local modification does not impact~$\pi_1$ or the equivariant intersection form. Further, this local diagram can be readily converted to Gompf standard form, (see the blue and green handles of Figure \ref{fig:stillstein}) where we have~$A_3(1)_{ii}\le \TB(\Hi) -1$ for all 2-handles, hence~$M$ is Stein. By construction,~$M$ contains a copy of the Akbulut cork~$C$. Because $M$ has no 3-handles, $\pi_1(\partial M)$ surjects~$\pi_1(M)$. \begin{figure}[!htbp] \center \def\svgwidth{.25\linewidth}\input{Stillstein.pdf_tex} \caption{A handle diagram for the manifold $W$ in Gompf standard form.}\label{fig:stillstein} \end{figure} Now define~$M'$ to be the 4-manifold obtained from~$M$ by twisting~$C$. Since there is a homeomorphism~$T\colon C\to C$ extending the twist homeomorphism~$\tau \colon \partial C\to\partial C$, there is a natural homeomorphism~$F \colon M\to M'$; let~$f$ denote the restriction~$f\colon \partial M\to \partial M'$. It remains to show that $M$ and $M'$ are not diffeomorphic. We will begin by showing the relative statement, i.e. there is no diffeomorphism~$G\colon M\to M'$ such that~$G|_\partial = f$. It would be convenient if at this point we could distinguish $M$ and $M'$ directly by showing that one is Stein and one is not. Unfortunately, both are Stein. So instead we will consider auxiliary manifolds $W$ and $W'$ constructed as follows. Suppose for a contradiction that there were such a diffeomorphism~$G$. Construct a 4-manifold~$W$ by attaching a~$(-1)$-framed 2-handle to~$M$ along~$\gamma$ (where~$\gamma$ is the curve in~$\partial M$ marked in Figure \ref{fig:insertcork}) and a second 4-manifold~$W'$ from~$M'$ by attaching a 2-handle to~$M'$ with attaching sphere and framing given by~$(f(\gamma),-1)$. \footnote{The $(-1)$-framing instruction for $f(\gamma)$ requires a diagram of $f(\gamma)$ in $\partial M'$. Because $f$ is a dot-zero homeomorphism, we can use the exact same diagram as we used for $\gamma$ in $\partial M$.} Notice that the image under $f$ of a~$(-1)$-framing curve for $\gamma$ is in fact a~$(-1)$-framing curve for $f(\gamma)$. The diffeomorphism $G$ extends to give a diffeomorphism~$\widehat{G}\colon W\to W'$. In Figure~$\ref{fig:stillstein}$, we have exhibited the natural handle diagram for~$W$ in Gompf standard form, from which Theorem \ref{thm:Stein} implies that~$W$ admits a Stein structure. We will finish showing that $f$ does not extend by demonstrating that~$W'$ does not admit any Stein structure, thus~$W$ cannot be diffeomorphic to~$W'$. Since~$W'$ is obtained from~$W$ by reversing the dot and the zero on the handles of~$C$,~$f(\gamma)$ is just a meridian of a 2-handle of~$M'$. Thus the final 2-handle of~$W'$ is attached along a curve which bounds a disc in~$M'$, implying that there is a~$(-1)$-framed sphere embedded in~$W'$. But the adjunction inequality for Stein manifolds (recall Theorem~\ref{thm:adjunct}) indicates that no 4-manifold which admits a Stein structure can contain an embedded sphere with self-intersection~$-1$. Hence,~$W$ is not diffeomorphic to~$W'$, thus there cannot be a diffeomorphism~$ G \colon M\to M'$ extending~$f$. Now we would like to extend this to a statement about absolute exotica. To do so, we apply Theorem \ref{thm:absolute} to our $M, M'$, and $f$ to produce a pair of smooth 4-manifolds $V$ and $V'$ (both of which have nonempty boundary) which are homeomorphic but not diffeomorphic. Since $V$ and~$V'$ are orientation-preserving homotopy equivalent to $M$ and $M'$ respectively, the equivariant intersection forms $\lambda_V$ and $\lambda_{V'}$ are also isometric to $\lambda$, and both $V$ and $V'$ have fundamental group $\Z$. Since~$V$ and $V'$ are homeomorphic, so are $\partial V$ and $\partial V'$. \end{proof} Next, we prove Theorem~\ref{thm:exoticdiscs} from the introduction, again stated here in more detail. If one wants to show that any $2$-handlebody~$N$ with boundary~$S^3$ contains a pair of exotic $\Z$-discs one can run the same proof, where in the first line~$\mathcal{H}'$ is chosen to be a handle diagram for~$N$; this was mentioned in Remark~\ref{rem:Smooth}. \begin{theorem} \label{thm:ExoticDiscsMain} For every Hermitian form~$(H,\lambda)$ over~$\Z[t^{\pm 1}]$ such that~$\lambda(1)$ is realised as the intersection form of a smooth simply-connected 4-dimensional 2-handlebody~$N$ with~$\partial N\cong S^3$, there exists a pair of smooth~$\Z$-discs~$D$ and~$D'$ in~$N$ with the same boundary and the following properties: \begin{enumerate} \item the equivariant intersection forms~$\lambda_{N_D}$ and~$\lambda_{N_{D'}}$ are isometric to~$\lambda$; \item $D$ is topologically isotopic to~$D'$ rel.\ boundary; \item $D$ is not smoothly equivalent to~$D'$ rel.\ boundary. \end{enumerate} \end{theorem} \begin{proof} Let~$\sH'$ be a handle diagram for a 2-handlebody with~$S^3$ boundary and such that~$Q_N$ isometric to~$\lambda(1)$. Let~$D$ be the standard disc for a local unknot in~$\partial N$, and as usual let~$N_D$ be its exterior, which has handle diagram~$\sH:=\sH'\cup 1-$handle. Akin to the proof of Theorem \ref{thm:exoticmanifolds}, we will now modify the linking of the handles of~$\sH$ to get a Stein manifold with equivariant intersection form $\lambda$. However, we also want to do so in such a way that the manifold presented by~$\sH$ is still~$N_{D'}$ for some smooth disc~$D'$ properly embedded in~$N$. We claim that if we modify only the linking of the 2-handles with the 1-handle, and not the linking of the 2-handles with each other nor the knot type or framing of the 2-handles, we will have that~$\sH$ presents such an~$N_{D'}$. To prove the claim, first observe that $X$ is the exterior of a disc in~$N$ if and only if~$N$ can be obtained from~$X$ by adding on a single 2-handle. Observe that adding a 0-framed 2-handle to the meridian of a 1-handle in dotted circle notation allows us to erase both the new 2-handle and the 1-handle. Thus, if our modifications only change the way the 2-handles of $N$ link the new one-handle, we will still have the property that after a single 2-handle addition we obtain $N$, thus our manifold is the exterior of a disc embedded in $N$. This concludes the proof of the claim. \begin{figure}[!htbp] \center \def\svgwidth{.6\linewidth}\input{Kylesdisks.pdf_tex} \caption{In both frames the red and blue handles give a nonstandard handle diagram for~$D^4$, and in both frames the green knot~$K\subset S^3$ bounds a disc disjoint from the 1-handle; these are our two discs~$\Sigma$ and~$\Sigma'$ for~$K$ in~$D^4$. The handle diagrams here present~$D^4_\Sigma$ and~$D^4_{\Sigma'}$.}\label{fig:kylesdiscs} \end{figure} Now observe that all of the modifications we performed in the proof of Theorem~\ref{thm:exoticmanifolds} to get from~$M_2$ to~$M_4$ modified only the linking of the 2-handles with the 1-handle, and not the linking of the 2-handles with each other nor the knot type or framing of the 2-handles. Thus we can again perform those same modifications to our~$\sH$ to obtain a smooth~$\Z$-disc~$D'$ properly embedded in~$N$ such that the resulting~$\sH$ is a handle diagram for~$N_{D'}$ in Gompf standard form satisfying Eliashberg's criteria and such that the equivariant intersection form of the exterior is~$\lambda_{N_{D'}}\cong\lambda$. Notice in particular that~$N_{D'}$ is Stein. Now let~$\Sigma$ and~$\Sigma'$ be the pair of slice discs for~$K$ in~$D^4$ exhibited in Figure \ref{fig:kylesdiscs}. These discs were constructed following the techniques of \cite{Hayden}. It is elementary to check from the exhibited handle diagrams that both discs have~$\pi_1(D^4_\Sigma)=\pi_1(D^4_{\Sigma'})=\Z$ and are ribbon. It is then a consequence of \cite[Theorem 1.2]{ConwayPowellDiscs} that~$\Sigma$ is topologically isotopic to~$\Sigma'$ rel.\ boundary. \begin{figure}[!htbp] \center \def\svgwidth{.8\linewidth}\input{RandRprime.pdf_tex} \caption{The left frame gives a handle diagram for~$N_R$, and the right for~$N_{R'}$. The top black 2-handles and tangle~$T$ represent the handle diagram of~$N_{D'}$ in Gompf standard form which we already constructed.}\label{fig:RandRprime} \end{figure} We will construct discs~$R$ and~$R'$ in~$N$ by taking the boundary connect sum of pairs~$(N,R):=(N,D')\natural(D^4,\Sigma)$ and~$(N,R'):=(N,D')\natural(D^4,\Sigma')$. We demonstrate natural handle decompositions for~$N_R$ and~$N_{R'}$ in Figure \ref{fig:RandRprime}. It is straightforward to confirm that~$\pi_1(N_R)\cong\pi_1(N_{R'})\cong\Z$. Further, since~$\Sigma$ is topologically isotopic to~$\Sigma'$ in~$D^4$ rel.\ boundary,~$R$ is topologically isotopic in~$N$ to~$R'$ rel.\ boundary. Since~$\Sigma$ and~$\Sigma'$ are~$\Z$-discs in~$D^4$, their exteriors are aspherical~\cite[Lemma~2.1]{ConwayPowell} and so both~$\lambda_{N_\Sigma}$ and~$\lambda_{N_{\Sigma'}}$ are trivial. It is then not hard to show that band summing~$D'$ with~$\Sigma$ or~$\Sigma'$ does not change the equivariant intersection form, so~$\lambda_{N_R}\cong\lambda_{N_{R'}}\cong\lambda_{N_{D'}}$. It remains to show that~$R$ is not smoothly equivalent to~$R'$ rel.\ boundary. If~$R$ were equivalent to~$R'$ rel.\ boundary then there would be a diffeomorphism~$F \colon N_R \to N_{R'}$ which is the identity on the boundary. Let~$\gamma$ and~$\delta$ be the curves in~$\partial N_R = \partial N_{R'}$ demonstrated in Figure~\ref{fig:RandRprime}, and let~$W$ (similarly~$W'$) be formed from~$N_R$ by attaching~$(-1)$-framed 2-handles along~$\gamma$ and~$\delta$. \begin{figure}[!htbp] \center \def\svgwidth{.4\linewidth}\input{Steindisk.pdf_tex} \caption{The black 2-handles here have both framing and~$\TB$ one less than they had in Figure \ref{fig:RandRprime}; since we had already arranged that the tangle~$T$ in Figure \ref{fig:RandRprime} satisfied the framing criteria of Theorem \ref{thm:Stein}, this handle diagram also satisfies the criteria.}\label{fig:NRstein} \end{figure} If a diffeomorphism~$F \colon N_R \to N_{R'}$ extending the identity exists, then~$W$ is diffeomorphic to~$W'$. Observe that~$W'$ does not admit a Stein structure, because the 2-handle along~$\delta$ naturally introduces a~$(-1)$-framed 2-sphere embedded in~$W'$, which violates the Stein adjunction inequality in Theorem~\ref{thm:adjunct}. However,~$W$ admits the handle decomposition given in Figure \ref{fig:NRstein}, which is in Gompf standard form, so Theorem~\ref{thm:Stein} ensures that~$W$ admits a Stein structure. Therefore~$W$ is not diffeomorphic to~$W'$, so there can be no such~$F$, so~$R$ is not smoothly equivalent to~$R'$ rel.\ boundary. \end{proof} \begin{remark} In the above proof,~$R$ is smoothly isotopic to~$R'$ \emph{not} rel.\ boundary, because~$\Sigma$ is smoothly isotopic to~$\Sigma'$ not rel.\ boundary. If we wanted to produce~$R$ and~$R'$ which are not smoothly isotopic (without a boundary condition), we could have instead used a~$\Sigma$ and~$\Sigma'$ which are not isotopic rel.\ boundary and run a similar argument. Such~$\Sigma$ and~$\Sigma'$ are produced in \cite{Hayden}; we have not pursued this here because the diagrams are somewhat more complicated. \end{remark} \section{Nontrivial boundary automorphism set} \label{sec:NonTrivialbAut} We prove that there are examples of pairs $(Y,\varphi)$ for which the set of 4-manifolds with fixed boundary $Y$ and equivariant intersection form, up to homeomorphism, can have arbitrarily large cardinality. This was alluded to in Example~\ref{ex:LargeStableClassIntro}. The main step in this process is to find a sequence of Hermitian forms~$(H_i,\lambda_i)$ for which~$\big\{\big|\Aut(\partial \lambda_i)/\Aut(\lambda_i)\big|\big\}$ is unbounded. The most direct way to achieve this is when~$H$ has rank~$1$. Indeed, in this case,~$\Aut(\partial \lambda)/\Aut(\lambda)$ can be described in terms of certain units of~$\Z[t^{\pm 1}]/\lambda$, as we now make precise. \medbreak Given a ring~$R$ with involution~$x \mapsto \overline{x}$, the group of \emph{unitary units}~$U(R)$ refers to those~$u \in R$ such that~$u \overline{u}=1$. For example, when~$R=\Z[t^{\pm 1}]$, all units are unitary and are of the form~$\pm t^{k}$ with~$k \in \Z$. In what follows, we make no distinction between rank one Hermitian forms and symmetric Laurent polynomials. The next lemma follows by unwinding the definition of~$\Aut(\partial \lambda)$; see also~\cite[Remark 1.16]{ConwayPowell}. \begin{lemma} \label{lem:UnitaryUnits} If~$\lambda \in \Z[t^{\pm 1}]$ is a symmetric Laurent polynomial, then $$\Aut(\partial \lambda)/\Aut(\lambda)=U(\Z[t^{\pm 1}]/\lambda)/U(\Z[t^{\pm 1}]).$$ \end{lemma} Given a symmetric Laurent polynomial~$P \in \Z[t^{\pm 1}]$, use~$n_P$ to denote the number of ways~$P$ can be written as an unordered product~$ab$ of symmetric polynomials~$a,b\in \Z[t^{\pm 1}]$ such that there exists~$x,y\in \Z[t^{\pm 1}]$ with~$ax+by=1$, where the factorisations~$ab$ and~$(-a)(-b)$ are deemed equal. \begin{lemma} \label{lem:NonTrivialbAut} If~$P \in \Z[t^{\pm 1}]$ is a symmetric Laurent polynomial, then~$U(\Z[t^{\pm 1}]/2P)/U(\Z[t^{\pm 1}])$ contains at least~$n_P$ elements. \end{lemma} \begin{proof} A first verification shows that if~$P$ factorises as~$P=ab$ where~$a,b\in \Z[t^{\pm 1}]$ are symmetric polynomials and satisfy~$ax+by=1$, then~\[\Phi(a,b):=-ax+by\] is a unitary unit in~$\Z[t^{\pm 1}]/2P$, i.e.\ belongs to~$U(\Z[t^{\pm 1}]/2P)$: \begin{align*} (-ax+by)\overline{(-ax+by)} &=a\overline{a}x \overline{x}+b\overline{b}y\overline{y}-ax\overline{b}\overline{y}-\overline{a}\overline{x}by=a\overline{a}x \overline{x}+b\overline{b}y\overline{y}-ab(x\overline{y}+\overline{x}y)\\ &\equiv a\overline{a}x \overline{x}+b\overline{b}y\overline{y}+ab(x\overline{y}+\overline{x}y) =(ax+by)\overline{(ax+by)} =1. \end{align*} It can also be verified that~$\Phi(a,b)$ depends neither on the ordering of~$a,b$ nor on the choice of~$x,y$. The former check is immediate from the definition of~$\Phi$ because~$-1 \in U(\Z[t^{\pm 1}])$. We verify that the assignment does not depend on the choice of~$x,y$. Assume that~$ax+by=1=ax'+by'$ for~$x,x',y,y'\in \Z[t^{\pm 1}]$. We deduce that~$ax'=1=ax$ mod~$b$ and~$by'=1=by$ mod~$a$. But now~$x' \equiv (ax)x'=x(ax')=x$ mod~$b$ and similarly~$y'=y$ mod~$a$ so that~$x'=x+k b$ and~$y'=y+\ell a$ for~$k,l \in \Z[t^{\pm 1}]$. Expanding~$ax'+by'=1$, it follows that~$k=-l$. Therefore $$ -ax'+by'=-a(x+kb)+b(y-k a) \equiv -ax+by.$$ We will prove that if~$\Phi(a,b)=v \cdot \Phi(a',b')$ for some unit~$v \in U(\Z[t^{\pm 1}])$, then~$(a,b)=\pm (a',b')$ or~$(a,b)=\pm (b',a')$. It then follows that for any two ways~$(a,b)$ and~$(a',b')$ of factorising~$P$, distinct up to sign and up to reordering, the resulting elements~$\Phi(a,b)$ and~$\Phi(a',b')$ are distinct in~$U(\Z[t^{\pm 1}]/2P)/U(\Z[t^{\pm 1}])$, from which the proposition follows. Assume that~$x,x',y,y' \in \Z[t^{\pm 1}]$ are such that~$ax+by=1=a'x'+b'y'$ and~$-ax+by=-a'x'+b'y'$ mod~$2P$. Add~$2ax+2a'x'v$ to both sides of the congruence~$-ax+by=v(-a'x'+b'y')$ mod~$2P$. Using that~$ax+by=1$ and~$a'x'+b'y'=1$, we obtain the congruence \begin{equation}\label{eq:aaaaaaah} 2ax+v=2a'x'v+1 \text{ mod } 2P. \end{equation} Similarly, add~$-2by+2a'x'v$ to both sides of~$-ax+by=v(-a'x'+b'y')$ mod~$2P$. Using that~$ax+by=1$ and~$a'x'+b'y'=1$, we obtain the equation \begin{equation} \label{eq:bbbbbbh} -2by+v=2a'x'v-1 \text{ mod } 2P. \end{equation} We deduce from the previous two equations that~$v+1$ and~$v-1$ are divisible by~$2$. Since~$v=\pm t^k$, we deduce that~$\pm t^k \pm 1$ is divisible by~$2$ and so~$v=\pm 1$. First, we treat the case where the unit is~$v=1$. \begin{claim} We have~$(i)$~$a$ divides~$a'$, and~$(ii)$~$a'$ divides~$a$. \end{claim} \begin{proof} As~$v=1$, ~\eqref{eq:aaaaaaah} implies that~$2ax=2a'x'$ mod~$2P$. Writing~$2P=2ab$, and simplifying the~$2$s, we deduce that~$a$ divides~$a'x'$. Similarly, writing~$2P=2a'b'$, and simplifying the~$2$s, we deduce that~$a'$ divides~$ax$. Next, multiply the equations~$1=ax+by$ (resp.\ $1=a'x'+b'y'$) by~$a$ (resp.\ $a'$) to obtain \begin{align*} a&=a^2x+aby \\ a'&={a'}^2x'+a'b'y'. \end{align*} Since~$a'$ divides~$ax$ and~$ab=P=a'b'$, it follows that~$a'$ divides~$a$. The same reasoning with the second equation shows that~$a$ divides~$a'$. This concludes the proof of the claim. \end{proof} Using the claim we have~$a=ua'$ for some unit~$u$; this unit is necessarily symmetric since both~$a$ and~$a'$ are symmetric. It follows that~$a'b'=ab=ua'b$ with $u=\pm 1$. We deduce~$b'=ub$ and therefore~$b=b'/u$. Thus~$(a,b)=u \cdot (a',b')$ as required, in the case~$v=1$. Next, we treat the case where the unit is~$v=-1$. \begin{claim} We have~$(i)$~$b$ divides~$a'$, and~$(ii)$~$a'$ divides~$b$. \end{claim} \begin{proof} As~$v=-1$, ~\eqref{eq:bbbbbbh} implies that~$-2by=2a'x'$ mod~$2P$. Writing~$2P=2ab$, and simplifying the~$2$s, we deduce that~$b$ divides~$a'x'$. Similarly, writing~$2P=2a'b'$, and simplifying the~$2$s, we deduce that~$a'$ divides~$by$. Next, multiply the equations~$1=ax+by$ (resp.\ $1=a'x'+b'y'$) by~$b$ (resp.\ $a'$) to obtain \begin{align*} b&=abx+b^2y \\ a'&={a'}^2x'+a'b'y'. \end{align*} Since~$a'$ divides~$by$ and~$ab=P=a'b'$, it follows that~$a'$ divides~$b$. The same reasoning with the second equation shows that~$b$ divides~$a'$. This concludes the proof of the claim. \end{proof} Using the claim we have~$b=ua'$ for some unit~$u$; this unit is necessarily symmetric since both~$b$ and~$a'$ are symmetric. It follows that~$a'b'=ab=uaa'$ with $u=\pm 1$. We deduce~$b'=ua$ and therefore~$a=b'/u$. Thus~$(a,b)=u \cdot (b',a')$ as required, in the case that~$v=-1$. This completes the proof that~$\Phi(a,b)=v \cdot \Phi(a',b')$ implies~$(a,b)=\pm (a',b')$ or~$(a,b)=\pm (b',a')$, which completes the proof of the proposition. \end{proof} Over~$\Z$, it is not difficult to show that if~$N$ is an integer that can be factored as a product of~$n$ distinct primes, then~$U(\Z/N)/U(\Z)$ contains precisely~$2^{n-1}$ elements. Using Lemma~\ref{lem:NonTrivialbAut}, the next example shows that a similar lower bound (which is not in general sharp) holds over~$\Z[t^{\pm 1}]$. \begin{example} \label{ex:Integer} The reader can check that if~$P$ is an integer than can be factored as a product~$p_1\cdots p_n$ of~$n$ distinct primes, then~$n_P=2^{n-1}$. Lemma~\ref{lem:NonTrivialbAut} implies that~$U(\Z[t^{\pm 1}]/2P)/U(\Z[t^{\pm 1}])$ contains at least~$2^{n-1}$ elements. \end{example} \begin{remark} In order to produce examples, there is no need to restrict~$P$ an integer. Take~$P = q_1 \cdots q_n$, where the~$q_i$ are symmetric Laurent polynomials such that for every~$i,j$, there exists~$x,y\in \Z[t^{\pm 1}]$ with~$q_ix+q_jy=1$. The latter condition implies, via a straightforward induction on $n$, that there exists such~$x,y$ for any pair of polynomials~$q_{i_1} \cdots q_{i_k}$ and~$q_{i_{k+1}} \cdots q_{i_n}$ with~$\{i_1,\dots,i_n\} = \{1,\dots,n\}$ obtained from factoring~$P$. Then by applying~$\Phi$ we can obtain examples of~$P$ such that~$U(\Z[t^{\pm 1}]/2P)/U(\Z[t^{\pm 1}])$ has cardinality at least~$2^{n-1}$. However, this level of generality is not strictly necessary, as Example~\ref{ex:Integer}, in which~$P$ is an integer, suffices to prove Proposition~\ref{prop:LargeStableClass} below. \black \end{remark} We now prove the main result of this section that was mentioned in Example~\ref{ex:LargeStableClassIntro} from the introduction: there are examples of pairs~$(Y,\varphi)$ for which the set of 4-manifolds with fixed boundary~$Y$ and equivariant intersection form, up to homeomorphism, can have arbitrarily large cardinality. Recall that $\mathcal{V}_\lambda^0(Y)$ and~$\mathcal{V}_\lambda(Y)$ were defined in Definitions~\ref{def:V0lambdaY} and~\ref{def:VlambdaY} respectively. \begin{proposition} \label{prop:LargeStableClass} For every~$m\ge 0$, there is a pair~$(Y,\varphi)$ and a Hermitian form~$(H,\lambda)$ so that~$\mathcal{V}_\lambda^0(Y)$ and~$\mathcal{V}_\lambda(Y)$ have at least~$m$ elements. \end{proposition} \begin{figure}[!htbp] \center \def\svgwidth{.6\linewidth}\input{Trivialsym.pdf_tex} \caption{Left frame: the complement of~$\gamma$ is a hyperbolic 3-manifold~$Z$ with trivial mapping class group. Right frame: This $\Z$-manifold~$W_n$ has equivariant intersection form~$(n)$ and, for~$n$ sufficiently large, boundary~$\partial W_n$ with trivial mapping class group.}\label{fig:trivialsym} \end{figure} \begin{proof} Since the cardinality of~$\mathcal{V}_\lambda^0(Y)$ is greater than that of~$\mathcal{V}_\lambda(Y)$, it suffices to prove that the latter set can be made arbitrarily large. However since proof involving~$\mathcal{V}_\lambda^0(Y)$ is substantially less demanding, we include it as a quick warm up. Set~$\lambda:=2P$ where~$P$ is an integer than can be factored as a product~$p_1\cdots p_k$ of~$k$ distinct primes with~$2^{k-1} \geq m$. Example~\ref{ex:Integer} and Proposition~\ref{lem:NonTrivialbAut} imply that~$U(\Z[t^{\pm 1}]/\lambda)/U(\Z[t^{\pm 1}])$ has at least~$2^{k-1}$ elements. By Proposition~\ref{lem:UnitaryUnits}, this means that~$\Aut(\partial \lambda)/\Aut(\lambda)$ has at least~$2^{k-1}$ elements. As in the proof of Theorem~\ref{thm:ExoticmanifoldsNotIntro}, construct a smooth~$\Z$-manifold~$W$ with equivariant intersection form~$\lambda$. In our setting, where~$\lambda:=2P$, the manifold produced will be~$X_{\lambda}(U)\natural (S^1\times D^3)$, where~$X_{\lambda}(U)$ is the manifold obtained by attaching a~$\lambda$-framed 2-handle to~$D^4$ along the unknot~$U$. Let~$Y'$ be the boundary of this~$4$-manifold and let~$\varphi \colon \pi_1(Y') \to \pi_1(W) \cong \Z$ be the inclusion induced map. Since~$\lambda$ presents~$Y'$, Theorem~\ref{thm:ClassificationRelBoundary} implies that~$\mathcal{V}_\lambda^0(Y')$ has at least~$2^{k-1}\geq m$ elements, as required. We now turn to the statement involving~$\mathcal{V}_\lambda(Y)$. \begin{claim*} There is an integer~$N>0$ so that for any~$n >N$, there exists a smooth~$\Z$-manifold~$W_n$ with equivariant intersection form~$(n)$ and such that~$\partial W_n$ has trivial mapping class group. \end{claim*} \begin{proof} Let~$L$ be the~$3$-component link in the left frame of Figure \ref{fig:trivialsym} and let~$Z$ be the~$3$-manifold obtained from~$L$ by~$0$-surgering both the red and blue components, and removing a tubular neighborhood of the green component~$\gamma$. Using verified computations in Snappy inside of Sage, we find that~$Z$ is hyperbolic and has trivial mapping class group.\footnote{Transcripts of the computation are available at \cite{data}.} By Thurston's hyperbolic Dehn surgery theorem \cite[Theorem 5.8.2]{Thurston}, there exists~$N > 0$ such that for~$n > N$, the manifold~$Z_n$ obtained by~$-1/n$ filling~$\gamma$ is hyperbolic and has trivial symmetry group; for the mapping class group part of this statement, see for example~\cite[Lemma~2.2]{DHL}. Let~$W_n$ be the 4-manifold described in the right frame of Figure \ref{fig:trivialsym} and observe that~$\partial W_n\cong Z_n$. It is not difficult to verify that~$W_n$ is a $\Z$-manifold with equivariant intersection form~$(n)$. This concludes the proof of the claim. \end{proof} We conclude the proof of the proposition. Fix~$m \geq 0$ and choose an integer~$P$ such that \begin{itemize} \item~$P$ can be factored as a product~$p_1\cdots p_k$ of~$k$ distinct primes with~$2^{k-1} \geq m$. \item~$2P>N$ where~$N$ is as in the claim. \end{itemize} Since~$2P>N$, the claim implies that~$Y:=\partial W_{2P}$ has trivial mapping class group. The proof is now concluded as in the warm up, but we spell out the details. As we already mentioned,~$W_{2P}$ has equivariant intersection form~$\lambda:=2P$. Example~\ref{ex:Integer} and Proposition~\ref{lem:NonTrivialbAut} imply that~$U(\Z[t^{\pm 1}]/\lambda)/U(\Z[t^{\pm 1}])$ has at least~$2^{k-1}$ elements. By Proposition~\ref{lem:UnitaryUnits}, this means that~$\Aut(\partial \lambda)/\Aut(\lambda)$ has at least~$2^{k-1}$ elements. Since~$Y$ has trivial mapping class group, either of Theorem~\ref{thm:ClassificationRelBoundary} or Theorem~\ref{thm:Classification} implies that~$\mathcal{V}_\lambda(Y)=\mathcal{V}_\lambda^0(Y)$ has at least~$2^{k-1} \geq m$ elements. \end{proof} \bibliography{BiblioRealisation} \bibliographystyle{alpha} \end{document}
2205.12668v2
http://arxiv.org/abs/2205.12668v2
Measurement incompatibility vs. Bell non-locality: an approach via tensor norms
\documentclass[11pt, reqno, a4paper]{amsart} \usepackage[table,xcdraw]{xcolor} \usepackage{braket} \usepackage{graphicx} \usepackage{tikz} \usepackage{amssymb,amsthm} \usepackage[margin=1in]{geometry} \usepackage[colorlinks = true, linkcolor = blue, urlcolor = blue, citecolor = red]{hyperref} \usepackage{subfig} \captionsetup{belowskip=12pt,aboveskip=4pt} \renewcommand{\epsilon}{\varepsilon} \renewcommand{\phi}{\varphi} \newcommand{\C}{\mathbb{C}} \newcommand{\R}{\mathbb{R}} \renewcommand{\S}{\mathcal{S}} \newcommand{\M}{\mathcal{M}} \DeclareMathOperator{\Tr}{Tr} \DeclareMathOperator{\id}{id} \newcommand{\U}{\mathcal{U}} \newcommand{\E}{\mathbb{E}} \DeclareMathOperator{\Cat}{Cat} \newcommand{\scalar}[2]{\left\langle\,#1\,|\,#2\,\right\rangle} \newcommand{\ketbra}[2]{|#1\rangle\langle#2|} \newtheorem{theorem}{Theorem}[section] \newtheorem{definition}[theorem]{Definition} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{example}[theorem]{Example} \newtheorem{problem}[theorem]{Problem} \newtheorem{question}[theorem]{Question} \newtheorem{conjecture}[theorem]{Conjecture} \newtheorem{remark}[theorem]{Remark} \begin{document} \title[Measurement incompatibility vs. Bell non-locality]{Measurement incompatibility vs. Bell non-locality:\\an approach via tensor norms} \author{Faedi Loulidi} \author{Ion Nechita} \email{$\{$loulidi,nechita$\}[email protected]} \address{Laboratoire de Physique Th\'eorique, Universit\'e de Toulouse, CNRS, UPS, France} \begin{abstract} Measurement incompatibility and quantum non-locality are two key features of quantum theory. Violations of Bell inequalities require quantum entanglement and incompatibility of the measurements used by the two parties involved in the protocol. We analyze the converse question: for which Bell inequalities is the incompatibility of measurements enough to ensure a quantum violation? We relate the two questions by comparing two tensor norms on the space of dichotomic quantum measurements: one characterizing measurement compatibility and the second one characterizing violations of a given Bell inequality. We provide sufficient conditions for the equivalence of the two notions in terms of the matrix describing the correlation Bell inequality. We show that the CHSH inequality and its variants are the only ones satisfying it. \end{abstract} \date{\today} \maketitle \tableofcontents \section{Introduction} Since its discovery, quantum mechanics was formalized as a theory with many foundational aspects which differ significantly from classical mechanics. Some of these deep questions, and the relation among them are still subject to investigation nowadays. Understanding these notions and their interplay is crucial for the development of the second quantum revolution. Two of the most important conceptual revolutions put forward by quantum mechanics are the notions of \emph{non-locality of correlations} and the \emph{incompatibility of quantum measurements}. The latter notion, that of measurement incompatibility is one of the most unintuitive aspects of the quantum world, when examined from a classical perspective: there exist (quantum) measurements which cannot be performed simultaneously on a given quantum system. It is well-known that quantum non-locality is one of the fundamental aspects of quantum theory that gives rise to a lot of questions about quantum reality. John Bell \cite{Bell} gave a complete answer to the debate about the non-locality and elucidated the intrinsic probabilistic aspect of quantum theory. The answer he provides is that any local theory must obey some inequality, while if one applies the predictions of quantum mechanics, the aforementioned inequality can be violated. This means that the quantum world is completely non-local, which, in turn, means that there are phenomena that we could not understand with our classical macroscopic point of view. Such conclusion provides a complete answer about the intrinsic reality of the quantum world. Such violations of correlation inequalities were completely confirmed experimentally in Alain Aspect’s experiment \cite{Aspect}, and in a loophole-free manner in \cite{hensen2015loophole}. In the modern language of quantum information theory, such correlation inequalities can be understood as \emph{non-local games} \cite{palazuelos2016survey}. In such a game, two players, called traditionally Alice and Bob, play cooperatively against a Referee. Alice and Bob are space-like separated, hence once the game starts they can no longer communicate. However, they both know the rules of the game, and they can meet before the game starts and make a strategy. Technically, the games we consider are defined by a matrix $M$, which encodes the pay-off the players receive; in particular, we shall consider in this work exclusively XOR games with $N$ questions and two answers. Two scenarios are of particular importance for us: Alice and Bob are either allowed to use \emph{classical strategies} (where they share classical randomness) or \emph{quantum strategies} (where they share a bipartite entangled quantum state). It turns out that the optimal probabilities to win the game with classical or, respectively, quantum strategies, can be formulated as two different \emph{tensor norms} of the matrix that encodes the rules of the game (seen as a $2-$tensor). This is one of the instances where tensor norms (and Banach space theory in general) has found applications in the theory of non-local games. The main goal of the current work is to relate, in a \emph{quantitative manner} the notion of \emph{measurement incompatibility} to that of \emph{Bell inequality violations} in a very general setting. Our original motivation was the seminal work \cite{wolf2009measurements}, where the authors connected, in a qualitative manner, the incompatibility of Alice's measurements in the CHSH game, with possible violations of the CHSH inequality. Our results can be seen to build on this example, generalizing it in two different directions: \begin{itemize} \item we go beyond the CHSH game, allowing all (reasonable) correlation XOR games \item we relate, in a quantitative manner, the largest possible violation of a Bell inequality to the incompatibility robustness of Alice's measurements. \end{itemize} In order to achieve these goals, our framework is different than the usual setting of non-local games, in the respect that \begin{center} \boxed{ \emph{Alice's dichotomic measurements are fixed.}} \end{center} Optimizing over Bob's choice of $N$ dichotomic measurements and over the players' shared entangled state, we can express the quantum bias of the given non-local game $M$ as a tensor norm of Alice's $N$-tuple of measurements, which we denote by $\|A\|_M$. In particular, with Alice's choice of measurements fixed to be $A$, the players will violate the Bell inequality corresponding to $M$ if and only if $\|A\|_M > \beta(M)$, where $\beta(M)$ is the classical bias of the game. On the quantum measurement (in-)compatibility side, we revisit the construction from \cite{bluhm2022incompatibility}, where the compatibility of a $N$-tuple of dichotomic quantum measurements has been described with the help of a tensor norm, dubbed $\|A\|_c$. We give a direct proveof the result showing that $N$ dichotomic measurements $A = (A_1, \ldots, A_N)$ are compatible iff $\|A\|_{c}\leq 1$. The value of the compatibility norm $\|\cdot\|_c$ is related to the notion of \emph{compatibility robustness}: the value of the norm is precisely the quantity of (white) noise one needs to add to the tuple of dichotomic measurements in order to render them compatible. Having formulated the key physical principles of this work (quantum incompatibility and Bell non-locality), we get now to our main point: the relation between them. This question has received already a lot of attention in the literature. The starting point is the equivalence first observed in \cite{wolf2009measurements}: for the CHSH game \cite{clauser1969proposed} with two questions, Alice's pair of measurements are incompatible if and only if there exist an entangled state and a choice for Bob's pair of measurements such that they can obtain a violation of the CHSH Bell inequality. It is equally well-known that the two notions are not equivalent in more general situations, see \cite{quintino2014joint}. In this work, we provide a definitive answer to this question, using the framework of tensor norms. More precisely, we express the following quantities as tensor norms: \begin{itemize} \item \emph{incompatibility}: how much (white) noise one needs to add to a tuple of dichotomic POVMs to render them compatible \item \emph{quantum bias of a correlation game}: what is the maximal value of the game (normalized to have classical bias 1), when Alice's tuple of dichotomic measurements are fixed. \end{itemize} We then discuss how these norms compare, and when they are equal. We provide sufficient conditions for equality, and then show that only the CHSH game (and its permutations) satisfy them, emphasizing the special role of the CHSH inequality. \begin{figure} \centering \includegraphics[width=\textwidth]{non-locality.pdf} \caption{In this work, we relate measurement incompatibility with Bell inequalities using the formalism of tensor norms. Pairwise connections having already been established in the literature, we bring the three concepts together for the first time.} \label{fig:non-locality} \end{figure} Our paper is organised as follows. In Section \ref{mainresult} we (informally) state the main results of our paper and their interpretation. In Section \ref{sec:compatibility} we recall the notion of compatibility for quantum measurements. We present in Section \ref{sec:Tensor Norms} the basic definitions of tensor norms from Banach space theory, focusing on the examples needed in this work. In Section \ref{sec:non-locality} we introduce the framework of Bell non-locality as non-local games and relate the values of these games to tensor norms. In Section \ref{sec:non-locality-norm} we introduce the main definition of the non-locality norm $\|A\|_M$ that will characterise the violation of the Bell inequality. In Section \ref{sec: compatibility-norm} we introduce the compatibility norm $\|A\|_{c}$ that will characterise the compatibility of Alice's measurement. We present in the Section \ref{sec: non-locality and incompatibility} our main theorems, discussing also under which conditions the violation of a Bell inequality implies measurement incompatibility. In our framework, we provide a conceptual explanation of the main result in \cite{wolf2009measurements}, and we also analyze new Bell inequalities, such as different deformations of the CHSH inequality and the pure correlation part of the $I_{3322}$ tight Bell inequality; for the latter, the two notions are not equivalent, as noticed in \cite{quintino2014joint}. \section{Main results}\label{mainresult} In this section we introduce the main definitions and the main results of our work. Our goal is to unify two fundamental notions of quantum theory, \emph{measurement incompatibility} and \emph{Bell inequality violations}. To do so, we shall work in the framework of \emph{non-local games}, where the rules of a correlation game are encoded in a real $N \times N$ matrix $M$, and \emph{Alice's dichotomic measurements are fixed}. Note that in this work we shall be considering general (not necessarily projective) measurements, mathematically encoded by POVMs. The maximum value of the game $M$, when Alice's measurements are fixed, is given by the following quantity. \begin{definition}[The $M$-Bell-locality tensor norm] Let $M$ an invertible \emph{Bell functional} and Alice's $N$-tuple of dichotomic measurements $A = (A_1, \ldots, A_N)$, we define the following tensor norm: $$\|A\|_{M}:=\sup_{\|\psi\| = 1} \sup_{\|B_y\| \leq 1} \Big \langle \psi \Big | \sum_{x,y = 1}^N\,M_{xy} \,A_x \otimes B_y \Big | \psi \Big \rangle=\lambda_{\max}\bigg[\sum_{y=1}^N \bigg| \sum_{x=1}^N M_{xy}\,A_x\bigg|\bigg].$$ \end{definition} The quantity $\|A\|_M$ is the maximum value of the game $M$, when optimizing over quantum strategies, with Alice's measurements being fixed. The measurements $A = (A_1, \ldots, A_N)$ are called \emph{$M$-Bell-local} there is no violation of the Bell inequality corresponding to $M$: $\|A\|_M\leq \beta(M)$, with $\beta(M)$ being \emph{the classical bias} of the game (which, importantly, can also be expressed as a tensor norm). If this is not the case, we call Alice's measurements \emph{$M$-Bell-non-local}. Regarding compatibility, we are concerned with the same question as before: are Alice's dichotomic measurements compatible or not? The following quantity was introduced, in the more abstract setting of generalized probabilistic theories in \cite{bluhm2022incompatibility}, see also \cite{bluhm2022tensor}. \begin{definition}[The compatibility tensor norm] For a tensor $A \in \mathbb R^N \otimes \mathcal M_d^{sa}(\mathbb C)$, we define the following quantity: \begin{equation*} \|A\|_c := \inf \Bigg\{ \Big\|\sum_{j=1}^K H_j\Big\|_\infty \, : \, A = \sum_{j=1}^K z_j \otimes H_j, \, \text{ s.t. } \, \forall j \in [K], \, \|z_j\|_\infty \leq 1 \text{ and } H_j \geq 0\Bigg\}. \end{equation*} \end{definition} The compatibility norm, together with the injective tensor product of $\ell_\infty$ and $S_\infty$ norms, completely characterize compatibility of tuples of dichotomic quantum measurements \cite{bluhm2022incompatibility,bluhm2022tensor}. \begin{proposition} Let $A = (A_1, \ldots, A_N)$ be a $N$-tuple of self-adjoint $d \times d$ complex matrices. Then: \begin{enumerate} \item $A$ is a collection of dichotomic quantum observables (i.e.~$\|A_i\|_\infty \leq 1$ $\forall i$) if and only if $\|A\|_\epsilon \leq 1$. \item $A$ is a collection of \emph{compatible} dichotomic quantum observables if and only if $\|A\|_c \leq 1$. \end{enumerate} \end{proposition} The compatibility norm allows Alice to know whether her measurements are compatible ($\|A\|_c\leq 1$) or not ($\|A\|_c > 1$); in the latter case, the, the minimal quantity of white noise that needs to be mixed in the measurements in order to render them compatible is $1/\|A\|_c$, providing an operational interpretation of the compatibility norm. To sum up, in the setting of tensor norms, \begin{itemize} \item Alice's measurements are \emph{$M$-Bell-local} if and only if $\|A\|_M\leq\beta (M)=\|M\|_{\ell_1^N \otimes_{\epsilon} \ell_1^N}.$ \item Alice's measurements are compatible if and only if $\|A\|_c\leq 1$. \end{itemize} To understand the relation between non-locality and the compatibility, we now have to compare the two norms $\|\cdot\|_c$ and $\|\cdot\|_M$. \begin{theorem} Consider an $N$-input 2-output non-local game $M$, corresponding to a matrix $M \in \mathcal M_N(\mathbb R)$. Then, for any $N$-tuple of self-adjoint matrices $A = (A_1, \ldots, A_N)$, we have $$\|A\|_M\leq\|A\|_{c} \, \beta(M)=\|A\|_{c}\,\|M\|_{\ell_1^N \otimes_{\epsilon} \ell_1^N}.$$ In particular, if Alice's measurements $A$ are $M$-Bell-non-local, then they must be incompatible. \end{theorem} In the theorem above, we have upper bounded the $M$-Bell-locality norm by the compatibility norm that depends only on Alice's measurement times the classical bias of the game. This inequality is a \emph{quantitative} version of the well-known, \emph{qualitative} fact that if Alice's measurement are compatible, she will never observe any Bell inequality violation (i.e.~her measurements are $M$-Bell-local). One of our main contributions is to raise and answer the converse question: we want to upper bound the compatibility norm by the $M$-Bell-locality norm. In physical terms, we are asking whether, given a Bell inequality $M$ and a tuple of measurements, can Alice observe violations of $M$ using her measurements? We have the following theorem, providing a (partial) answer to this question. \begin{theorem} Let $M \in \mathcal M_N(\mathbb R)$ be an invertible matrix. Then, for any $N$-tuple of self-adjoint matrices $A = (A_1, A_2, \ldots, A_N)$, we have $$\|A\|_c \leq \|A\|_M\|M^{-1}\|_{\ell_\infty^N \otimes_\epsilon \ell_\infty^N}.$$ \end{theorem} In the main theorems succinctly stated above, we have compared the compatibility tensor norm and the $M$-Bell-locality norm. It was shown in \cite{wolf2009measurements} that for the CHSH game, the incompatibility of one party's quantum measurements and the violation of a Bell inequality are equivalent. In our setting, this equivalence can be understood as an \emph{equality} of the compatibility norm and the $M$-Bell-locality norm for $M_{\text{CHSH}}$: we have $$\|\cdot\|_c=\|\cdot\|_{M_{\text{CHSH}}}.$$ Having restated this classical result in terms of an equality of tensor norms, it is natural to ask whether this equality goes beyond the case of the CHSH inequality. Incompatibility and Bell non-locality are not, in general, equivalent, as it was shown in \cite{bene2018measurement, hirsch2018quantum}. From the main theorems above, any game $M$ must satisfy $\|M\|_{\ell_1^N \otimes_{\epsilon} \ell_1^N}\cdot\|M^{-1}\|_{\ell_\infty^N \otimes_\epsilon \ell_\infty^N} \geq 1$. If one wants to conclude $\|\cdot\|_c = \|\cdot\|_M$ from these results, one needs to investigate the equality case in the aforementioned inequality. We show that for any real and invertible matrix $M$, the following holds. \begin{proposition} For any real and invertible matrix $M$, we have: $$\|M^{-1}\|_{\ell^N_{\infty}\otimes_{\epsilon}\ell^N_{\infty}}\|M\|_{\ell^N_1\otimes_{\epsilon}\ell^N_1}\geq \frac{N}{\rho(\ell_{\infty}^N,\ell_{\infty}^N)}\geq \sqrt{\frac N 2}\geq 1.$$ For $N \geq 3$, the last inequality above is strict. \end{proposition} The case $N=2$ needs to be treated separately. We show that for $N=2$ questions, the only games achieving equality are the CHSH game and variants thereof. We summarize this in the following theorem. \begin{theorem} The only invertible non-local game $M \in \mathcal M_N(\mathbb R)$ satisfying $$\|M^{-1}\|_{\ell^N_{\infty}\otimes_{\epsilon}\ell^N_{\infty}}\|M\|_{\ell^N_1\otimes_{\epsilon}\ell^N_1} = 1$$ have two questions ($N=2$) and are variants of the CHSH game: $M = a M_{\text{CHSH}}$ for some $a \neq 0$. \end{theorem} \section{Compatibility of quantum measurements}\label{sec:compatibility} This section contains the main definitions and results from the theory of quantum measurements, with the focus on (in-)compatibility and noisy measurements. In Quantum Mechanics, a system is described by Hilbert space $\mathcal H$. Here, we shall consider only finite-dimensional Hilbert spaces: $\mathcal H \cong \C^d$, for a positive integer $d$, which corresponds to the number of degrees of freedom. For example, quantum bits (qubits) are described by the space $\mathbb C^2$. Quantum states are formalized mathematically by \emph{density matrices}: $$\M^{1,+}_d := \{ \rho \in \M_d \, : \, \rho \geq 0 \text{ and } \Tr \rho = 1\},$$ where $\mathcal M_d$ is the vector space of $d \times d$ complex matrices. Density matrices are positive semidefinite, a relation denoted by $\rho \geq 0$. Let us now discuss measurements in Quantum Mechanics. Historically, quantum measurements were modelled by \emph{observables}: Hermitian operators acting on the system Hilbert space. The possible outcomes of the measurement are the eigenvalues of the observable, while the probabilities of occurrence are given by the celebrated \emph{Born rule}. This formalism not only allows to obtain the probabilities of the different outcomes (via the Born rule), but also the post-measurement state of the quantum system (the \emph{wave function collapse}). In the current research, we are only concerned with the former, and thus we shall use the more general framework of Positive Operator Valued Measures (POVMs) \cite{nielsen00}. We shall write $[n]:= \{1, 2, \ldots, n\}$ for the set of the first $n$ positive integers. \begin{definition} A \emph{positive operator valued measure} (POVM) on $\M_d$ with $k$ outcomes is a $k$-tuple $A=(A_1, \ldots, A_k)$ of self-adjoint operators from $\M_d$ which are positive semidefinite and sum up to the identity: $$\forall i \in [k], \quad A_i \geq 0 \qquad \text{ and } \qquad \sum_{i=1}^k A_i = I_d.$$ When measuring a quantum state $\rho$ with the apparatus described by $A$, we obtain a random outcome from the set $[k]$: $$\forall i \in [k], \qquad \mathbb P(\text{outcome} = i) = \Tr[\rho A_i].$$ \end{definition} The vector of outcome probabilities $\left(\Tr[\rho A_i]\right)_{i=1}^k$ is indeed a probability vector; note that the properties of the operators $A_i$, called \emph{quantum effects}, are tailor made for this. This mathematical formalism used to described quantum measurements (or POVMs, or \emph{meters}) does not account for what happens with the quantum particle after the measurement. One can think that the particle is destroyed in the process of measurement (see Figure \ref{fig:measurement}) and thus only the outcome probabilities are relevant. \begin{figure}[htb!] \centering \includegraphics{measurement-before.pdf}\qquad\qquad\qquad\qquad \includegraphics{measurement-after.pdf} \caption{Diagrammatic representation of a quantum measurement apparatus. The device has an input canal and a set of $k$ LEDs which will turn on when the corresponding outcome is achieved. After the measurement is performed, the particle is destroyed, and the apparatus displays the classical outcome (here, $2$).} \label{fig:measurement} \end{figure} Several important classes of POVMs will be discussed in this paper: \begin{itemize} \item \emph{von Neumann measurements}, where $A_i = \ketbra{a_i}{a_i}$, $i \in [d]$, for an orthonormal basis $\{\ket{a_i}\}_{i=1}^d$ of $\C^d$; \item \emph{trivial measurements}, where the matrices $A_i$ are scalar multiples of the identity: $A_i = p_i I_d$, for some probability vector $p = (p_1, p_2, \ldots, p_k)$. \end{itemize} \bigskip Let us now define the notion of \emph{compatibility} for quantum measurements, which is central to this paper. Historically, in the physics literature, the notion of compatbility was closely related to that of commutativity of the quantum observables \cite{Heisenberg1927, Bohr1928}; indeed, sharp POVMs are compatible if and only if the corresponding observables commute. In the modern setting, suppose we want to measure two different physical quantities (modelled by two POVMs $A$ and $B$) on a given quantum particle in a state $\rho$. Having at our disposal just one copy of the particle, we cannot, in general, measure simultaneously $A$ and $B$. However, one can \emph{simulate} measuring $A$ and $B$ on $\rho$ with the help of a third POVM $C$, by \emph{classically} post-processing the output of $C$ to a pair of outcomes $(i,j)$ for $A$, respectively $B$, see Figure \ref{fig:compatibility}. Importantly, there are many pairs of POVMs $A$ and $B$ for which there is no such $C$, like the position and momentum operators of a particle in one dimension: it is impossible to attribute simultaneously an exact value to both position and momentum observables. \begin{figure}[htb!] \centering \includegraphics[width=0.9\textwidth]{compatibility.pdf} \caption{The joint measurement of $A$ and $B$ is simulated by by a third measurement $C$, followed by classical post-processing.} \label{fig:compatibility} \end{figure} Mathematically, we can either consider general post-processings or marginalization. We refer the reader to \cite{heinosaari2016invitation,heinosaari2022order} for more details. \begin{definition} Two POVMs $A=(A_1, \ldots, A_k)$, $B=(B_1, \ldots, B_l)$ on $\M_d$ are called \emph{compatible} if there exists a \emph{joint POVM} $C=(C_{11}, \ldots, C_{kl})$ on $\M_d$ such that $A$ and $B$ are its respective \emph{marginals}: \begin{align*} \forall i \in [k], \qquad A_i &= \sum_{j=1}^l C_{ij}.\\ \forall j \in [l], \qquad B_j &= \sum_{i=1}^k C_{ij}. \end{align*} More generally, a $g$-tuple of POVMs $\mathbf A = (A^{(1)}, \ldots, A^{(g)})$ is called compatible if there exists a POVM $C$ with outcome set $[k_1] \times \cdots \times [k_g]$ such that, for all $x \in [g]$, the POVM $A^{(x)}$ is the $x$-th marginal of $C$: \begin{align*} \forall i_x \in [k_x], \qquad A^{(x)}_{i_x} &= \sum_{i_1 = 1}^{k_1} \cdots \sum_{i_{x-1} = 1}^{k_{x-1}}\sum_{i_{x+1} = 1}^{k_{x+1}} \cdots \sum_{i_g = 1}^{k_g} C_{i_1i_2 \cdots i_g}\\ &= \sum_{\substack{\mathbf j \in [k_1] \times \cdots \times [k_g]\\ j_x = i_x}} C_{\mathbf j}. \end{align*} \end{definition} Note that the definition of compatibility given above can be formulated as a (feasibility) semi-definite program (SDP) \cite{boyd2004convex}. One can equivalently formulate the notion of compatibility with more general post-processings. \begin{proposition}\label{prop:compatibility-postprocessing} An $N$-tuple of POVMs $\mathbf A = (A^{(1)}, \ldots, A^{(N)})$ is compatible if and only if there exists a joint POVM $(C_k)_{k \in [K]}$ and a family of conditional probabilities $\big(p_x(\cdot | \cdot)\big)_{x \in [N]}$ such that $$\forall x \in [N], \, \forall i \in [k_x], \qquad A^{(x)}_i = \sum_{k \in [K]} p_x(i | k) C_k.$$ \end{proposition} We now consider the simplest possible setting, that of two 2-outcome POVMs $\{Q,I-Q\}$ and $\{P,I-P\}$, where $P,Q$ are $d \times d$ self-adjoint matrices satisfying $O \leq P, Q \leq I_d$. The pair of POVMs is compatible if and only if $\epsilon_0 \leq 1$ \cite{wolf2009measurements}, where \begin{align}\label{equ:SDP} \epsilon_0:=\inf\big\{\epsilon\, : \, \exists \delta\geq 0 \quad \text{s.t.} \quad \delta + I-Q-P\geq 0,\,Q+\epsilon I - \delta \geq 0,\,P +\epsilon I - \delta \geq 0\big\}, \end{align} where $\delta$ is a positive semidefinite matrix. The above formula corresponds to the value of a semidefinite program encoding the existence of a joint measurement for the POVMs $\{P, I-P\}$ and $\{Q, I-Q\}$. Generally, every SDP comes with a dual formulation. In our case the dual SDP is given below \cite{wolf2009measurements}: \begin{lemma}\label{lem:dual-SDP} Given the above optimization problem for deciding compatibility, its dual formulation is given by: $$\epsilon^*=\underset{X,Y,Z\geq0}{\sup}\bigg\{\Tr[X(Q+P-I)]-\Tr[YQ]-\Tr[PZ]\, \text{with}\, X\leq Y+Z,\, \Tr[Y+Z]=1\bigg\},$$ \end{lemma} \begin{proof} Let us consider the following Lagrangian, corresponding the primal SDP ~\eqref{equ:SDP}. $$\mathcal{L}:=\epsilon-\langle X, \delta +I-Q-P\rangle-\langle Y, \epsilon I+Q-\delta\rangle-\langle Z, \epsilon I +P-\delta\rangle -\langle C,\delta\rangle$$ Above $X,Y,Z,C$ are positive semidefinite matrices which represent the constraints of the primal optimisation problem. Due to the strict feasibility of the SDP we can calculate its dual optimal value is the same as the optimal one of the primal (Slater's condition, see \cite{boyd2004convex}). Thus, we have the following equality: $$\inf_{\epsilon,\delta} \sup_{X,Y,Z,C} \mathcal{L}=\sup_{X,Y,Z,C}\inf_{\epsilon,\delta}\mathcal{L}.$$ A simple calculation shows that $$\underset{\epsilon,\delta}{\text{inf}}\,\mathcal{L}=\langle X,Q+P-I\rangle-\langle Y,Q\rangle-\langle P,Z\rangle$$ with $\Tr[Y+Z]=1$ and $Z+Y-X-C=0\iff X\leq Y+Z$, which is precisely the dual formulation from the statement. \end{proof} In the following, we shall use the SDP value to describe the compatibility threshold of the POVMs, that is the minimal quantity of noise that one needs to mix in, in order to render the POVMs compatible; such quantities go in the literature under the name of \emph{robustness of incompatibility} \cite{designolle2019incompatibility}. \begin{definition} For a given parameter $\eta\in[0,1]$, given two POVMs $A=(A_1, \ldots, A_k)$, $B=(B_1, \ldots, B_l)$ on $\M_d$ one define their \emph{noisy} version as $A^{\eta}:=(A^{\eta}_1, \ldots, A^{\eta}_k)$, $B^{\eta}:=(B^{\eta}_1, \ldots, B^{\eta}_l)$ with \begin{align*} \forall i \in [k], \qquad A^{\eta}_i := \eta A_i + (1-\eta)\frac{I}{k}.\\ \forall j \in [l], \qquad B^{\eta}_j := \eta B_j + (1-\eta)\frac{I}{l}. \end{align*} \end{definition} \begin{remark} In our simplified setting, we shall only consider POVMs with two outcomes $\mathcal{P}=\{P,\,I-P\}$, $\mathcal{Q}=\{Q,\,I-Q\}$ and their noisy versions. The definition given above can be rewritten as follows: $$\mathcal{P}^{\eta}=\{P^{\eta},I-P^{\eta}\},$$ and $$\mathcal{Q}^{\eta}=\{Q^{\eta},I-Q^{\eta}\}.$$ The measurements $\mathcal{P}^{\eta}$ and $\mathcal{Q}^{\eta}$ can also be seen as convex mixtures of the measurement $\mathcal{P}$ and $\mathcal{Q}$ with the trivial POVM $\mathcal{I}=(\frac{I}{2},\frac{I}{2})$. One has $\mathcal{Q}^{\eta}=\eta \mathcal{Q}+(1-\eta)\mathcal{I}$ and $\mathcal{P}^{\eta}=\eta \mathcal{P}+(1-\eta)\mathcal{I}$. \end{remark} Let us now formalize the incompatibility robustness, in the symmetric case, where the same amount of white noise $(I/2, I/2)$ is mixed into the two POVMs; for the asymmetric version, see the incompatibility regions defined in \cite[Section III]{bluhm2018joint}. \begin{definition}\label{def:Gamma} For two (binary) measurements $\mathcal{P}$, $\mathcal Q$, we define their \emph{noise compatibility threshold} as: $$ \Gamma(P,Q) :=\sup\big\{ \eta \in [0,1]\, : \, \mathcal{P}^{\eta} , \mathcal{Q}^{\eta} \text{ are compatible }\big\}.$$ \end{definition} \begin{proposition}\label{prop: noise compatibility threshold} The noise compatibility threshold for two (binary) measurements $\mathcal{P}$ and $\mathcal{Q}$ is given by: $$\Gamma(P,Q)=\frac{1}{1+2\epsilon^*},$$ where $\epsilon^*$ is the optimal value of the SDP from Lemma \ref{lem:dual-SDP}. \end{proposition} \begin{proof} Recalling that $\mathcal{P}^{\eta} , \mathcal{Q}^{\eta} \text{ are compatible }$ is equivalent to $\exists\delta\geq 0$ with the following conditions: \begin{align*} \eta Q +(1-\eta)\frac{I}{2}-\delta&\geq0\\ \eta P +(1-\eta)\frac{I}{2}-\delta&\geq0\\ \delta -\eta(P+Q-I)&\geq0 \end{align*} Where it is easy to see that is equivalent to \begin{align*} Q +\epsilon I-\delta'&\geq0\\ P +\epsilon I-\delta'&\geq0\\ \delta' -(P+Q-I)&\geq0 \end{align*} with $\delta':=\frac{\delta}{\eta}$ and $\epsilon=\frac{1}{2}(\frac{1}{\eta}-1)\iff\eta=\frac{1}{2\epsilon + 1}$. By tacking the supremum over $\eta$ to compute the noise compatibility threshold $\Gamma(P,Q)$ with the following constraints: \begin{align*} Q +\epsilon I-\delta'&\geq0\\ P +\epsilon I-\delta'&\geq0\\ \delta' -(P+Q-I)&\geq0 \end{align*} is given by \begin{align*} \Gamma(P,Q)&=\sup\Big\{\,\frac{1}{2\epsilon + 1}\Big|\,\exists \delta'\geq0\, ,\,Q +\epsilon I-\delta'\geq0 ,\,P +\epsilon I-\delta'\geq0,\, \delta' -(P+Q-I)\geq0\Big\}\\ &=\frac{1}{2\inf\Big\{\,\epsilon\Big|\,P , Q \, \text{compatible}\Big\}+1}=\frac{1}{2\epsilon_0+1}=\frac{1}{2\epsilon^*+1} \end{align*} which ends the proof of the proposition. $$\Gamma(P,Q)=\sup\Big\{\, \eta\Big|\,\exists \delta\geq0 ,\, \eta Q +(1-\eta)\frac{I}{2}-\delta\geq0 , \,\eta P +(1-\eta)\frac{I}{2}-\delta\geq0\\ ,\,\delta -\eta(P+Q-I)\geq0\Big\}$$ $$=\sup\Big\{\eta\Big|\exists\,\delta':=\frac{\delta}{\eta}\geq0, \,Q +(\frac{1}{\eta}-1)\frac{I}{2}-\delta'\geq0,\,P+(\frac{1}{\eta}-1)\frac{I}{2}-\delta'\geq0,\, \delta' -(P+Q-I)\geq0\Big\}$$ With the following change of variable $$\epsilon=\frac{1}{2}(\frac{1}{\eta}-1) \iff \eta=\frac{1}{2\epsilon + 1}$$ The compatibility threshold becomes \begin{align*} \Gamma(P,Q)&=\sup\Big\{\,\frac{1}{2\epsilon + 1}\Big|\,\exists \delta'\geq0\, ,\,Q +\epsilon I-\delta'\geq0 ,\,P +\epsilon I-\delta'\geq0,\, \delta' -(P+Q-I)\geq0\Big\}\\ &=\frac{1}{2\inf\Big\{\,\epsilon\Big|\,P , Q \, \text{compatible}\Big\}+1} \end{align*} Thus, we have $\Gamma(P,Q)=2/(2\epsilon^* +1)$, as announced. \end{proof} \section{Tensor product of Banach spaces}\label{sec:Tensor Norms} In this section we wil give a brief overview of tensor norms with the aim of presenting Bell inequalities in the tensor norm framework. Tensor norms provide the natural mathematical framework for Bell inequalities, see the following survey \cite{palazuelos2016survey} and the reference therein. Let us start by recalling the projective and injective tensor norms for (finite-dimensional) Banach spaces. \begin{definition} Given two finite-dimensional Banach spaces $X$ and $Y$ with their respective norms $\|\cdot\|_X$ and $\|\cdot\|_Y$, and $z\in X\otimes Y$, we define the \emph{projective tensor norm} of $z$ as: $$\|z\|_{X\otimes_{\pi} Y}:= \inf \left\{\sum_{i=1}^N\|x_i\|_X\|y_i\|_Y : z=\sum_{i=1}^N x_i\otimes y_i\right\},$$ where the infimum is taken over all the decompositions of $z=\sum_{i=1}^N x_i\otimes y_i$ where N is a finite but arbitrary integer. We write $X\otimes_{\pi}Y=(X\otimes Y,\|\cdot\|_{X\otimes_{\pi}Y})$, the Banach space induced by the projective tensor norm on $X\otimes Y$. \end{definition} Every Banach space comes with a dual: \begin{definition} Let $X$ a finite-dimensional Banach space. The space of all bounded linear functionals on $X$ is called its dual space and denoted by $X^*$. It comes equipped with a norm: $$\forall \phi \in X^*, \qquad \|\phi\|_{X^*}:= \sup_{\|x\|_X\leq 1} |\phi(x)|.$$ \end{definition} We now introduce the other tensor norm of importance to us. \begin{definition}\label{def: injective norm} Given two finite-dimensional Banach spaces $X$ and $Y$ with their respective norms $\|\cdot\|_X$ and $\|\cdot\|_Y$, and $z\in X\otimes Y$, we define the \emph{injective tensor norm} of $z$ as: $$\|z\|_{X\otimes_{\epsilon}Y}:= \underset{\alpha \in \mathbb{B}(X^*), \beta\in \mathbb {B}(Y^*)}{\sup}|\langle z,\alpha\otimes \beta\rangle|,$$ where $\mathbb{B}(X^*)$ and $\mathbb{B}(Y^*)$ are the unit balls of $X^*$ and $Y^*$. We write $X\otimes_{\epsilon}Y=(X\otimes Y,\|\cdot\|_{X\otimes_{\epsilon}Y})$, the Banach space induced by the injective norm on $X\otimes Y$. \end{definition} It is known that the projective and the injective tensor product play the role of maximal and the minimal norm respectively that we can put naturally in the algebraic tensor product, for that we give the following definition of a reasonable crossnorm. \begin{definition}\label{def: reasonnable norm} Let $z\in X\otimes Y$, we say that a norm $\alpha$ on $X\otimes Y$ given by $\|z\|_{X\otimes_{\alpha}Y}$ is a \emph{reasonable crossnorm} (or a \emph{tensor norm}) if for $z=x\otimes y$ we have: $$\|z\|_{X\otimes_{\alpha}Y}\leq \|x\|_X\|y\|_Y$$ and the dual $\phi=\phi_1\otimes\phi_2\in X^*\otimes Y^*$ satisfies $$ \|\phi\|_{X^*\otimes_{\alpha}Y^*}\leq \|\phi_1\|_{X^*}\|\phi_2\|_{Y^*}.$$ We write $X\otimes_{\alpha}Y=(X\otimes Y,\|\cdot\|_{X\otimes_{\alpha}Y})$, the Banach space induced by $\alpha$ on $X\otimes Y$. \end{definition} The definition above can be found in \cite[page 127]{ryan2002introduction}, with the following equivalent statement. \begin{proposition}\cite[Proposition 6.1]{ryan2002introduction}\label{Prop: reasonnable norm} Consider two finite-dimensional Banach spaces $X$ and $Y$ with their respective norms $\|\cdot\|_X$ and $\|\cdot\|_Y$. A norm $\alpha$ on $X \otimes Y$ is a reasonable crossnorm if and only if for all $z\in X\otimes Y$, we have $$\|z\|_{X\otimes_{\epsilon} Y}\leq \|z\|_{X\otimes_\alpha Y}\leq \|z\|_{X\otimes_\pi Y}.$$ \end{proposition} \begin{remark} The injective and the projective tensor norm are dual to each other, in the following sense \cite{ryan2002introduction}: \begin{align*} \|z\|_{X\otimes_{\pi}Y}:=\underset{\|\alpha\|_{X^*\otimes_{\epsilon}Y^*}\leq 1}{\sup}\langle\alpha,z\rangle,\\ \|z\|_{X\otimes_{\epsilon}Y}:=\underset{\|\alpha\|_{X^*\otimes_{\pi}Y^*}\leq 1}{\sup}\langle\alpha,z\rangle. \end{align*} \end{remark} In general, for each tensor norm $\|\cdot\|_{X\otimes_{\alpha}Y}$ we can define its \emph{dual tensor norm} we note it by $\alpha^*$ and we have the following definition. \begin{definition}\label{def: dual tensor norm} Consider two finite dimensional Banach spaces $X$ and $Y$ with their respective norms $\|\cdot\|_X$ and $\|\cdot\|_Y$. Let $z\in X\otimes Y$ and $v\in X^*\otimes Y^*$, the dual tensor norm $\alpha^*$ of a given tensor norm $\alpha$ is defined by $$\|z\|_{X\otimes_{\alpha^*}Y}:=\sup\{|\langle v,z\rangle|;\,\|v\|_{X^*\otimes_{\alpha}Y^*}\leq 1\}.$$ We write $X\otimes_{\alpha^*}Y=(X\otimes Y,\|\cdot\|_{X\otimes_{\alpha^*}Y})$, the Banach space induced by the norm $\alpha^*$ on $X\otimes Y$. \end{definition} \begin{remark} With the definition above, we have the nice identification between the dual space of the tensor product of two spaces endowed with a tensor norm $\alpha$ and the dual space of each of the two spaces endowed with the dual norm $\alpha^*$ where we have $$(X\otimes_{\alpha}Y)^*=X^*\otimes_{\alpha^*}Y^*.$$ \end{remark} One last definition we want to recall that will play a fundamental role for Bell inequalities which is the reasonable norm known as $\gamma_2$ norm. \begin{definition} Given two finite-dimensional Banach spaces $X$ and $Y$ with their respective norms $\|\cdot\|_X$ and $\|\cdot\|_Y$, define the tensor norm $\gamma_2$ of $z\in X\otimes Y$ by: $$\|z\|_{X\otimes_{\gamma_2}Y}:= \inf\left\{\underset{\alpha^*\in \mathbb{B}(X^*)}{\sup}\left(\sum_{i=1}^N|\alpha^*(x_i)|^2\right)^{\frac{1}{2}}\underset{\beta^*\in \mathbb{B}(Y^*)}{\sup}\left(\sum_{j=1}^N|\beta^*(y_j)|^2\right)^{\frac{1}{2}} :z=\sum_{i=1}^N x_i\otimes y_i\right\},$$ where the infimum is taken over all decompositions of $z=\sum_{i=1}^N x_i\otimes y_i$ with $x_i\in X$ and $y_j\in Y$. \end{definition} \section{Bell inequalities and non-local games}\label{sec:non-locality} In this section we introduce the notion of \emph{non-local games} and show how it incorporates the non-local properties of the quantum world via the well-known Bell inequalities. We then recall how the natural framework for these games is the metric theory of tensor products of finite-dimensional Banach spaces. The non-local aspect of quantum mechanics can be incorporated in the non-local game framework. In this paper, we only consider non-local games with two players, \emph{Alice} and \emph{Bob}, which are collaborating to win the game. During the game, a third person known as the \emph{Referee}, will ask a certain number of question to the players which are not allowed to communicate. The two protagonists reply cooperatively to the referee with some answers. The referee will decide to accept or reject the answers, declaring a win or a loss. Note that in this paper we are going to consider games with arbitrary number of questions $N$, but only with two answers ($+1$ or $-1$); in the general case, Alice and Bob can give answers from a fixed set of given cardinality. During the game, players have access to a predetermined set of resources: this determines the type of strategy they are permitted to use. In this work, we shall consider \emph{classical strategies} and \emph{quantum strategies}. In classical strategies, the players share samples from a classical random variable that they can use to produce their answers locally. When using quantum strategies, the players share a bipartite quantum state, on which they can act locally with transformations and measurements. We shall focus on \emph{correlation games}, where the payoff of the game depends on the correlation of the $\pm 1$ answers $ab$, weighted by real numbers $M_{xy}$ depending on the question: $$\text{payoff} = \sum_{x,y \in [N]} \sum_{a,b \in \{\pm 1\}} M_{xy}\,ab \cdot \mathbb P(a,b|x,y),$$ where $\mathbb P(a,b|x,y)$ is the (strategy-dependent) probability that Alice and Bob answer respectively $a$ and $b$, when presented with the questions $x,y \in [N]$. The matrix $M \in \mathcal M_N(\mathbb R)$ encodes the rules of the game, and it is called the \emph{Bell functional} \cite{palazuelos2016survey}. In the following, we discuss the optimal classical and quantum strategies for a given non-local game $M$. \begin{definition} The \emph{classical bias of the game} $M$ is defined as the optimisation problem $$\beta(M):=\sup \Big|\sum_{x,y=1}^N\,\sum_{a,b\in\{\pm1\}}\, M_{xy}\, ab\, \mathbb P_c(a,b|x,y)\Big| $$ where the supremum is taken over all \emph{classical strategies} $$\mathbb P_c(a,b|x,y)=\int_{\Lambda} \mathbb P_A(a|x,\lambda)\,\mathbb P_B(b|y,\lambda)\, \mathrm d\mu(\lambda).$$ Above, $\mathbb P_A$, resp.~$\mathbb P_B$ correspond to Alice's, resp.~Bob's strategies, which can depend on the shared random variable $\lambda$ having distribution $\mu$. \end{definition} Introducing the expectation values with respect to the outputs $a,b$ $$A_x(\lambda):=\sum_{a\in\{\pm 1\}} a\, \mathbb P_A(a|x,\lambda) \qquad \text{ and } \qquad B_y(\lambda):=\sum_{b\in\{\pm1\}} b\, \mathbb P_B(b|y,\lambda),$$ we have \begin{align*} \beta(M)&=\sup_{\mathbb P_A, \mathbb P_B, \mu}\Big|\sum_{x,y=1}^N\,\sum_{a,b\in\{\pm1\}}\, M_{xy}\,a\,b\, \int_{\Lambda} \mathbb P_A(a|x,\lambda)\, \mathbb P_B(b|y,\lambda)\,\mathrm d\mu(\lambda)\Big|\\ &=\sup_{A_x, B_y, \mu}\Big|\sum_{x,y=1}^N\, M_{xy}\, \int_{\Lambda} A_x(\lambda)\,B_y(\lambda)\,d\mu(\lambda)\Big|\\ &=\sup_{\gamma}\Big|\sum_{x,y=1}^N\, M_{xy}\,\gamma_{x,y}\Big|, \end{align*} where the matrix $\gamma= (\gamma_{x,y})$ is a classical correlation matrix, containing the relevant information from the set of classical strategies. \begin{definition} We define the set of \emph{classical correlations} as $$\mathbb L:=\left\{\gamma_{x,y}\,\Big|\,\gamma_{x,y}=\int_{\Lambda} A_x(\lambda) B_y(\lambda) \, \mathrm d\mu(\lambda); \, |A_x(\lambda)|,|B_y(\lambda)|\leq 1 \right\} \subseteq \mathcal{M}_N(\mathbb{R})$$ where $\lambda$ is a random variable shared by Alice and Bob, following a probability distribution $\mu$. \end{definition} Using the definition above, the maximum payoff of a game $M$, using classical strategies, can be understood as the maximum overlap of the Bell functional $M$ defining the game with the set of classical correlations. \begin{proposition} The classical bias of the game defined by a Bell functional $M$ is: $$\beta(M)=\sup_{\gamma \in \mathbb L}\Bigg \{\Big| \sum_{x,y=1}^N \,M_{xy}\,\gamma_{x,y}\Big| \Bigg\}.$$ \end{proposition} \medskip We now move on to the quantum setting, where the players are allowed to use quantum strategies, that is they are allowed to perform local operations on a shared entangled state. \begin{definition} The \emph{quantum bias of the game} $M$ is defined as the optimisation problem $$\beta^*(M):=\sup \Big|\sum_{x,y=1}^N\,\sum_{a,b\in\{\pm1\}}\, M_{xy}\, a\,b\, \mathbb P_q(a,b|x,y)\Big| $$ where the supremum is taken over all \emph{quantum strategies} $$\mathbb P_q(a,b|x,y)=\Tr\Big[\rho\, (A_{a|x}\otimes B_{b|y})\Big],$$ where $\rho$ is a bipartite shared quantum state (of arbitrary dimension), and, for all questions $x,y$, $(A_{\pm|x})$, resp.~$(B_{\pm|y})$ are POVMs on Alice's, resp.~Bob's quantum system. \end{definition} Introducing the operators $$A_x := \sum_{a \in \{\pm 1\}} a\,A_{a|x} \qquad \text{ and } \qquad B_y := \sum_{b \in \{\pm 1\}} b\,B_{b|y},$$ and performing a similar computation as in the case of classical strategies, we are led to following definition and expression for the quantum bias of a non-local correlation game $M$. \begin{definition} We define the set of \emph{quantum correlations} as $$\mathbb Q:=\left\{\gamma_{x,y}\,\Big|\,\gamma_{x,y}=\Tr\bigg[\rho \cdot (A_x\otimes B_y) \bigg];\, \|A_x\|_{\infty},\|B_y\|_{\infty}\leq 1\right\}\subseteq \mathcal{M}_N(\mathbb{R}).$$ Above, $\rho$ is a bipartite quantum state of arbitrary dimension, and $A_x, B_y$ are observables of norm less than one. \end{definition} \begin{proposition} The quantum bias of the game defined by a Bell functional $M$ is: $$\beta^*(M)=\sup_{\gamma \in \mathbb Q}\Bigg \{\Big| \sum_{x,y=1}^N \,M_{xy}\,\gamma_{x,y}\Big| \Bigg\}.$$ \end{proposition} \begin{remark} The correlation games discussed above are also known in the literature as \emph{XOR games}, when the set of outputs is $\{0,1\}$ (instead of $\{\pm 1\}$) \cite{regev2015quantum}. \end{remark} Since classical correlations are a subset of the quantum correlations (corresponding to diagonal operators $A_x, B_y$), the quantum bias of the game must be always larger or equal the classical bias. In some cases, the quantum bias $\beta^*(M)$ is strictly larger than the classical one, which can be understood physically as the existence of quantum correlations can not be reproduced within a classical local hidden variable model. This motivates the following definition. \begin{definition} For a given non-local game described by a \emph{Bell functional} $M$, we say that we have a \emph{Bell violation} if $\beta^*(M)>\beta(M)$. \end{definition} Now we will recall the results on the profound link between the classical bias of a game and its quantum bias within their respective tensor norm description. \begin{theorem}\cite{palazuelos2016survey} Consider a non-local correlation game characterized by the matrix $M \in \mathcal M_N(\mathbb R)$. \begin{itemize} \item The classical bias of the game is equal to the injective tensor norm of $M$: $$\beta(M)=\|M\|_{\ell^N_1(\mathbb R)\otimes_{\epsilon}\ell^N_1(\mathbb R)}.$$ \item The quantum bias of the game is equal to the $\gamma^*_2$ tensor norm of $M$: $$\beta^*(M)=\|M\|_{\ell^N_{1}(\mathbb{R})\otimes_{\gamma_2^*} \ell^N_{1}(\mathbb{R})}.$$ \end{itemize} where we recall from the Definition \ref{def: dual tensor norm} that $$\|M\|_{\ell^N_{1}(\mathbb{R})\otimes_{\gamma_2^*} \ell^N_{1}(\mathbb{R})}:=\sup\Big\{|\langle v, M\rangle|\,;\, \|v\|_{\ell^N_{\infty}(\mathbb{R})\otimes_{\gamma_2} \ell^N_{\infty}(\mathbb{R})}\leq 1\Big\}.$$ \end{theorem} Tsirelson showed in \cite{tsirel1987quantum} the following theorem that links the classical and the quantum bias of an XOR game with famous Grothendieck constant $K_{G}^{\R}$ that plays a fundamental role in the theory of tensor product of Banach spaces, see also \cite[Corollary 3.3]{palazuelos2016survey}. \begin{theorem} Consider a non-local correlation game characterized by the matrix $M \in \mathcal M_N(\mathbb R)$. $$\beta^*(M)\leq K_{G}^{\R}\,\beta(M).$$ \end{theorem} From the result above, one can easily see that Bell inequality violations ($\beta^*(M) > \beta(M)$) can be understood as tensor norm ratios. The result above shows the intrinsic link between Bell inequality violation and tensor norms, which motivates our framework on using tensor norms. \bigskip Let us now discuss the CHSH non-local game \cite{clauser1969proposed}. \begin{definition} The CHSH game is given by the particular \emph{Bell functional} defined as the following: $$M_{\text{CHSH}} = \frac 1 2 \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix},$$ \end{definition} In this subsection, we recall the result of \cite{wolf2009measurements} where they made the link between the maximal violation of Bell inequality and the compatibility of the quantum measurement in the CHSH game. Precisely the maximal violation of the CHSH inequality is equivalent to the dual of formulation of the compatibility problem as an SDP \cite{wolf2009measurements}. \begin{theorem} Two dichotomic measurements $A=(A_0,A_1)$, $B=(B_0,B_1)$ are incompatible if and only if they enable violation of the CHSH inequality. More precisely, the optimal value of the CHSH inequality is related to $$\underset{\psi,B_0,B_1}{\sup}\bra{\psi}\mathbb{B}\ket{\psi}=\frac{1}{\Gamma(A)}.$$ with $$\mathbb{B}:=\sum_{x,y=0}^1 M_{\textrm{CHSH}}(x,y)A_x\otimes B_y=\frac{1}{2}(A_0\otimes B_0+A_0\otimes B_1+A_1\otimes B_0-A_1\otimes B_1).$$ and $\Gamma(A)$ is the noise compatibility threshold. \end{theorem} \begin{proof} The proof of this theorem is basically the following remark, where in \cite{wolf2009measurements} they have showed that $$\underset{\psi,B_0,B_1}{\sup}\bra{\psi}\mathbb{B}\ket{\psi}=1+2\epsilon^*.$$ By combining this result and the proposition \ref{prop: noise compatibility threshold} where Alice measurement apparatus are described by the POVMs $\{Q,I-Q\}$ and $\{P,I-P\}$ which ends the proof. \end{proof} \section{The tensor norm associated to a game}\label{sec:non-locality-norm} In this section we will introduce the notion of non-locality using our framework of tensor norms. For that we consider as the previous section a fixed \emph{quantum game}, and for \emph{fixed Alice measurements} we introduce the \emph{$M$-Bell-(non)locality} notion. This quantity will characterise all the observed non-local effects in Alice side. To do so she will calculate the following tensor norm $\|A\|_M$ given by her fixed measurement apparatus. This quantity is obtained by optimizing over all the shared quantum state and all Bob measurement apparatus. We say that Alice measurement apparatus are \emph{$M$-Bell-local} if such $\|A\|_M$ is \emph{less than or equal the classical bias of the game} and if is not we say that her measurement are \emph{$M$-Bell non-local}.\\ The physical motivation of such statement can be understood as the following, for a fixed quantum game no matter the optimisation over all the shared quantum states and Bob measurement we cannot do better than the classical bias of the game, which means that one cannot do better than the classical setting even if we use the quantum strategies.\\ For that we will give the precise definition of $\|A\|_M$ and the \emph{$M$-Bell-(non)locality} notion. We will show the main theorem of this section that $\|A\|_M$ is a \emph{tensor norm} in $(\mathbb{R}^N,\|\cdot\|_M)\otimes (\mathcal{M}_d,\|\cdot\|_{\infty})$ for a fixed invertible quantum game $M$.\\ As a starting point, we give the two main definitions of this section. \begin{definition}\label{def:A-M} Consider a fixed $N$-input, 2-outcome non-local game $M \in \mathcal M_N(\mathbb R)$. Fix also Alice's measurements, a $N$-tuple of binary observables $A = (A_1, \ldots, A_N) \in \mathcal M^{sa}_d(\mathbb C)^N$. The largest quantum bias of the game $M$, with Alice using the observable $A_x$ to answer question $x \in [N]$, is given by $$\sup_{\|\psi\| = 1} \sup_{\|B_y\| \leq 1} \Big \langle \psi \Big | \sum_{x,y = 1}^N\,M_{xy} \,A_x \otimes B_y \Big | \psi \Big \rangle = \underset{\|B_y\|\leq 1}{\sup}\lambda_{\max}\left[\sum_{x,y}^N\,M_{xy}\,A_x\otimes B_y\right]=: \|A\|_M,$$ where the suprema are taken over bipartite pure states $\psi \in \mathbb C^d \otimes \mathbb C^D$ and over Bob's observables $B = (B_1, \ldots, B_N) \in \mathcal M^{sa}_D(\mathbb C)^N$, where $D$ is a free dimension parameter. We shall later show in Theorem \ref{thm:M-tensor-norm} that this quantity defines a (tensor) norm. \end{definition} \begin{remark} In the definition above, the dimension of Alice's measurements is fixed ($d$), while the dimension of Bob's Hilbert space ($D$) is free. In the following we will show that one can assume, without loss of generality, that Alice and Bob have Hilbert spaces of the same dimension ($D=d$ suffices in the optimization problem). Let us consider $D\geq d$, a quantum state $\ket \psi \in \mathbb C^d \otimes \C^D$, and $N$ binary measurement operators $B_1, \ldots, B_N \in \mathcal M^{sa}_D(\mathbb C)$. The idea is that the Schmidt decomposition of the bipartite pure quantum state $\ket \psi$ will induce a reduction of the effective dimension of Bob's Hilbert space from $D$ to $d$. We start from the Schmidt decomposition of $\ket \psi$ $$\ket \psi=\sum_{i=1}^d \sqrt{\lambda_i}\ket{a_i}\otimes\ket{b_i}.$$ Note that in the equation above, the number of terms is bounded by the smallest of the two dimensions, that is $d$. The orthonormal family $\{\ket{b_i}\}_{i \in [d]}$ spans a subspace of dimension $d$ inside $\mathbb C^D$. Consider an arbitrary orthonormal \emph{basis} $\{\ket{\tilde b_i}\}_{i \in [d]}$ of $\mathbb C^d$ and the isometry $$V : \mathbb C^d \to \mathbb C^D \quad \text{ such that } \quad \forall i \in [d], \quad V\ket{\tilde b_i} = \ket{b_i}.$$ Let us now introduce the quantum state $$\mathbb C^d \otimes \mathbb C^d \ni \ket{\tilde \psi}:= \sum_{i=1}^d \sqrt{\lambda_i}\ket{a_i}\otimes\ket{\tilde b_i}$$ and the measurement operators $$ \mathcal M^{sa}_d(\mathbb C) \ni \tilde B_y := V^* B_y V, \quad \forall y \in [N].$$ The normalization of the state and the fact that the $\tilde B_y$ are contractions follow from the isometry property of the operator $V$. We now have \begin{align*} \Big \langle \psi \Big | \sum_{x,y = 1}^N M_{xy} A_x \otimes B_y \Big | \psi \Big \rangle &= \sum_{x,y = 1}^N M_{xy}\sum_{i,j=1}^d\sqrt{\lambda_i\lambda_j} \langle a_i | A_x | a_j \rangle \underbrace{\langle b_i | B_y | b_j \rangle}_{=\langle \tilde b_i | V^* B_y V | \tilde b_j \rangle}\\ &= \sum_{x,y = 1}^N M_{xy}\sum_{i,j=1}^d\sqrt{\lambda_i\lambda_j} \langle a_i | A_x | a_j \rangle \langle \tilde b_i | \tilde B_y | \tilde b_j \rangle\\ &= \Big \langle \tilde\psi \Big | \sum_{x,y = 1}^N M_{xy} A_x \otimes \tilde B_y \Big | \tilde\psi \Big \rangle. \end{align*} The above computation shows that any correlation that can be obtained with Bob's Hilbert space of dimension $D$ can also be obtain with a Hilbert space of dimension $d$, equal to that of Alice. \end{remark} \begin{definition}\label{def:Bell-local} Given a non-local game $M$, we say that Alice's measurements $A = (A_1, \ldots, A_N)$ are \emph{$M$-Bell-local} if for any choice of Bob's observables $B$ and for any shared state $\psi$, one cannot violate the Bell inequality corresponding to $M$: $$\|A\|_M\leq \beta(M).$$ If this is not the case, we call Alice's measurements \emph{$M$-Bell-non-local}. \end{definition} Instead of using definition \ref{def:A-M} we will use another simple equivalent formulation of $\|A\|_M$. To do so, we will consider $\|A\|_M$ as an optimization problem using an SDP, and we will give its equivalent formulation as a dual of the primal SDP. \begin{lemma} Given a quantum game $(M_{xy})_{\{x,y=1\}}^N$ we can characterise the following equivalent formulation of $\|A\|_M$ : $$\|A\|_{M}=\lambda_{\max}\left[\sum_{y=1}^N \bigg| \sum_{x=1}^N M_{xy}\,A_x\bigg|\right].$$ \end{lemma} \begin{proof} Remark that the definition above is equivalent to $$\|A\|_M=\sup_{\|\psi\| = 1}\quad\sup_{\|B_y\| \leq 1} \Big \langle \psi \Big | \sum_{x,y = 1}^N\,M_{xy}\,A_x \otimes B_y \Big | \psi \Big \rangle$$ with $\ket{\psi}=\sqrt{\rho}\otimes I \sum_{i=1}^d\ket{ii}$ and $\rho$ is a density matrix (this is the classical purification trick in quantum information theory, expressing any bipartite pure state as a local perturbation of the maximally entangled state $\sum_i \ket{ii}$). Then, one has $$\|A\|_M=\sup_{\rho\geq 0\,;\,\Tr\rho=1}\quad \sup_{\|B_y\| \leq 1}\bigg\{\sum_{x,y=1}^N\,M_{xy}\Tr\Big[\sqrt{\rho} \, A_x\, \sqrt{\rho}\, B_y^{\top}\Big]\bigg\}$$ Using the following variable change $B_y^{\top}=2S_y-I$ for all $y$, we have $$\|A\|_M=\underset{S_y\,,\,\rho}{\sup}\bigg\{\sum_{y=1}^N\Tr\Big[\sqrt{\rho}\, A'_y\, \sqrt{\rho}\, (2S_y-I)\Big]\bigg\}$$ where $A'_y=\sum_{x=1}^N M_{xy}\,A_x$ and the optimisation problem is on $S_y$ for all $y$ and $\rho$ with the following constraint : $0\leq S_y\leq I$ and $\rho\geq 0$ and $\Tr\rho=1$. Then $$\|A\|_M=\underset{S_y\,,\,\rho}{\sup}\bigg\{\sum_{y=1}^N\Tr\Big[A'_y(2\,\sqrt{\rho}\,S_y\,\sqrt{\rho}-\rho)\Big]:\, 0\leq S_y\leq I\, ,\, \rho\geq0\,, \,\Tr\rho=1\bigg\}$$ We consider now one last change of variable $S'_y=\sqrt{\rho}\,S_y\,\sqrt{\rho}$. $$\|A\|_M=\underset{S'_y\,,\,\rho}{\sup}\bigg\{\sum_{y=1}^N\Tr\Big[A'_y(2S'_y-\rho)\Big]:\,0\leq S'_y\leq \rho\,,\,\rho\geq 0\,,\,\Tr\rho=1\bigg\}$$ where the optimisation problem now is on $S'_y$ for all $y$ and $\rho$ with the constrained above.\\ We will formulate $\|A\|_M$ as an SDP and we will compute its dual. For that, we consider the following Lagrangian : $$\mathcal{L}=\sum_{y=1}^N\, \Tr\Big[\,A'_y\,(2S'_y-\rho)\Big]\,+\,\langle X,\rho\rangle\, +\,\sum_{y=1}^N\, \langle X_y,S'_y\rangle\, +\, \epsilon(1-\Tr \rho)+\sum_{y=1}^N\, \langle \rho-S'_y,Z_y\rangle.$$ with $X$, $X_y$, $\epsilon$, $Z_y$ are the constraints respectively for $\rho\geq0$, $S'_y\geq0$, $\Tr\rho=1$ and $S'_y\leq \rho$.\\ Then by using the SDP duality one has $$\|A\|_M=\sup_{S'_y\,,\,\rho}\quad \inf_{X\,,\,X_y\,,\epsilon\,,Z_y} \quad \mathcal L=\inf_{X\,,\,X_y\,,\epsilon\,,Z_y} \quad \sup_{S'_y\,,\,\rho} \quad \mathcal L$$ with the following constraints: \begin{itemize} \item $X\geq 0$ and $\forall y$ $X_y\,,Z_y\geq 0$ are positive semidefinite matrices \item $\epsilon \in \mathbb R$ is unconstrained \end{itemize} Using the duality given above and by tacking the suprema first over $S'_y$ and $\rho$, we have: $$\sup_{S'_y\,,\rho}\,\mathcal L=\begin{cases}\,\epsilon\,,\qquad\forall y\,,\, 2A'_y+X_y-Z_y=0\quad\text{and}\quad X-\sum_{y=1}^N A'_y-\epsilon I +\sum_{y=1}^N Z_y=0.\\ \,+\infty \end{cases}$$ Now by tacking the infimum over the constraints \begin{align*} \|A\|_M&=\underset{X\,,\,X_y\,,\,\epsilon\,,\,Z_y}{\inf}\bigg\{\epsilon\Big|\quad \forall y\,,\, 2A'_y+X_y-Z_y=0\,;\, X-\sum_{y=1}^N A'_y-\epsilon I +\sum_{y=1}^N Z_y=0\,\bigg\}\\ &=\underset{X_y\,,\,\epsilon}{\inf}\bigg\{\epsilon\Big| \quad\forall y \,,\,2A'_y\leq Z_y\quad\text{and}\quad \sum_{y=1}^N A'_y+\epsilon I \geq \sum_{y=1}^N Z_y\,\bigg\} \end{align*} where in the last equality we have used that $X\geq 0$, $ Z_y\geq 0$. With the constraints on $Z_y$ and $Z_y\geq 2A'_y$, we can choose $Z_y:=2(A')_y^+$ with $(A')_y^+$ is the positive part of $A'_y=(A')_y^+ - (A')_y^-$; this is the smallest (with respect to the positive semidefinite order) choice for $Z_y.$ Using the optimal value above $$\|A\|_M=\underset{\epsilon}{\inf}\bigg\{\epsilon\Big|\,\epsilon I\geq \sum_{y=1}^N (A')_y^+ + (A')_y^-\bigg\}$$ Then $$\|A\|_{M}=\lambda_{\max}\left[\sum_{y=1}^N \bigg| \sum_{x=1}^N\,M_{xy}\,A_x\bigg|\right]$$ \end{proof} In the following, we shall exploit the new formulation of $\|A\|_M$ and we will show in the lemma below that $\|\cdot\|_M$ is a norm for any \emph{invertible} game $M$. \begin{lemma}\label{lem:norm-M} Given an \emph{invertible} game $M$, the $M$-Bell-locality quantity $\|A\|_M$ verifies the following two properties: $$\|A\|_M\geq 0,$$ $$\|A+A'\|_M\leq\|A\|_M+\|A'\|_M.$$ In particular, $\|\cdot\|_M$ is a norm. \end{lemma} \begin{proof} To prove the first property we shall prove that : \begin{itemize} \item $\forall \alpha \in \mathbb{R}\, \text{we have}\, \|\alpha A\|_M=|\alpha|\|A\|_M.$ \item $\|A\|_M=0 \implies A=0$ \end{itemize} Obviously we have $$\|\alpha A\|_M=\lambda_{\max}\left[\sum_{y=1}^N \bigg| \sum_{x=1}^N \alpha M_{xy}\,A_x\bigg|\right]=|\alpha|\lambda_{\max}\left[\sum_{y=1}^N \bigg| \sum_{x=1}^N M_{xy}\,A_x\bigg|\right]=|\alpha|\|A\|_M$$ For the second property we have $$\|A\|_M=\lambda_{\max}\left[\sum_{y=1}^N \bigg| \sum_{x=1}^N \,M_{xy}\,A_x\bigg|\right]=0 \implies \sum_{x=1}^N \, M_{xy}\, A_x=0\implies A=0$$ where the first implication is due to the positivity of $|M^{\top}\,A|$ and the second implication is obtained by the assumption of the invertibility of $M$ and thus $M^{\top}$.\\ For the second property we have \begin{align*} \|A+A'\|_M&=\sup_{\|\psi\| = 1}\quad\sup_{\|B_y\| \leq 1} \Big \langle \psi \Big | \sum_{x,y = 1}^N\,M_{xy}\,(A_x+A'_x) \otimes B_y \Big | \psi \Big \rangle\\ &\leq\sup_{\|\psi\| = 1}\quad\sup_{\|B_y\| \leq 1} \Big \langle \psi \Big | \sum_{x,y = 1}^N\,M_{xy}\,A_x \otimes B_y \Big | \psi \Big \rangle+ \sup_{\|\psi\| = 1}\quad\sup_{\|B_y\| \leq 1} \Big \langle \psi \Big | \sum_{x,y = 1}^N\,M_{xy}\,A'_x \otimes B_y \Big | \psi \Big \rangle \end{align*} Hence we have $$\|A+A'\|_M\leq \|A\|_M+\|A'\|_M.$$ \end{proof} In the last lemma we have shown that $\|A\|_M$ is a norm (for an \emph{invertible Bell functional} $M$). We shall call this norm the \emph{$M$-Bell-locality norm}. The (real) vector spaces $\mathbb R^N$, resp.~$\mathcal{M}_d^{sa}(\mathbb C)$ shall be endowed with the $\|\cdot\|_M$, resp.~the operator norm (or the Schatten-$\infty$ norm, $\mathcal{S}_{\infty}$). Note that there is an abuse of notation here: we shall use $\|\cdot\|_M$ to denote norms on $\mathbb R^N$ and on $\mathbb R^N \otimes \mathcal{M}_d^{sa}(\mathbb C)$; the situation will be clear from the context. We shall now investigate the properties of the $\|\cdot\|_M$ norm with respect to this tensor product structure. We will consider that for given $N$-tuple of observables $(A_1, A_2, \ldots, A_N)$, we associate the tensor \begin{equation*} A := \sum_{x=1}^N e_x \otimes A_x \in \mathbb R^N \otimes \mathcal M_d^{sa}(\mathbb C). \end{equation*} \begin{definition} Given $p\in \mathbb{R}^N$, we define the following quantity: $$\|p\|_M:=\sum_{y=1}^N\bigg|\sum_{x=1}^N\,M_{xy}\,p_x\bigg| = \|M^\top p\|_1.$$ \end{definition} In the lemma below we will show that $\|\cdot\|_M$ is a norm. \begin{lemma}\label{M-norm} Given an invertible matrix $M$, the function $\mathbb{R}^N \ni p \mapsto \|p\|_M$ is a norm. \end{lemma} \begin{proof} Obviously we have $\|\alpha\, p\|_M=|\alpha|\,\|p\|_M$ for all $\alpha\in \mathbb{R}$.\\ Now we will show that $\|p\|_M=0\implies p=0$\\ $$\|p\|_M=\sum_{y=1}^N\bigg|\sum_{x=1}^NM_{xy}\,p_x\bigg|=0\implies\sum_{x=1}^NM_{xy}\, p_x=0\iff (M^{\top}p)_y=0$$\\ by using the assumption that $M$ is invertible we have necessarily $p=0$, which ends the proof of $\|p\|_M\geq0$.\\ Now we prove the triangle inequality $\|p+p'\|_M\leq\|p\|_M+\|p'\|_M$.\\ Let's consider $$\|p+p'\|_M=\sum_{y=1}^N\bigg|\sum_{x=1}^NM_{xy}\,(p_x+p'_x)\bigg| \leq\sum_{y=1}^N\bigg|\sum_{x=1}^NM_{xy}\,p_x\bigg|+\sum_{y=1}^N\bigg|\sum_{x=1}^N\,M_{xy}\,p'_x\bigg|=\|p\|_M+\|p'\|_M$$ Thus we have shown that $\|\cdot\|_M$ is a norm. \end{proof} By the Lemma \ref{M-norm}, we endow $\R^N$ with the norm $\|\cdot\|_M$, obtaining a Banach space $(\R^N,\|\cdot\|_M)$. In the following, we shall investigate the dual space of $(\R^N,\|\cdot\|_M)$. For that we shall compute the dual norm of $\|\cdot\|_M$ denoted by $\|\cdot\|^*_M$. \begin{proposition} The dual norm $\|\cdot\|^*_M$ is given by: $$\forall p \in \mathbb R^N, \qquad \|p\|_M^*=\max_y\Big|\sum_{z=1}^N\,(M^{-1})_{yz}\,p_z\Big| = \|M^{-1}p\|_\infty.$$ \end{proposition} \begin{proof} Let $q,p\in\R^N$ we have \begin{align*} |\langle p,q\rangle|=\Big|\sum_{x=1}^N\,p_x\,q_x\Big|&=\Big|\sum_{x,y,z=1}^N q_x\,M_{xy}\,M^{-1}_{yz}p_z\Big|=\Big|\sum_{y=1}^N\Big(\sum_{x=1}^N\,M_{xy}\,q_y\Big)\Big(\sum_{z=1}^N\,M^{-1}_{yz}p_z\Big)\Big|\\ &\leq\sum_{y=1}^N\Big|\sum_{x=1}^N\,M_{xy}\,q_x\Big|\Big|\sum_{z=1}^N\,M^{-1}_{yz}\,p_z\Big|\leq \Big(\max_y\Big|\sum_{z=1}^N\,M^{-1}_{yz}\,p_z\Big|\Big)\,\sum_{y=1}^N\Big|\sum_{x=1}^N\,M_{xy}q_x\Big|\\ &=\max_y\Big|\sum_{z=1}^N\,M^{-1}_{yz}\,p_z\Big|\,\|q\|_M. \end{align*} where we have used in the second equality that $M\cdot M^{-1}=I$. By taking the supremum over $\|q\|_M \leq 1$, we have shown that $\|p\|_M^* \leq \|M^{-1}p\|_\infty$. To show the converse inequality, note that $$\max_y\Big|\sum_{z=1}^N\,M^{-1}_{yz}\,p_z\Big| = \langle p, q\rangle, \qquad \text{for} \qquad q_z = \epsilon (M^{-1})_{y_0z}$$ for some $y_0 \in [d]$ achieving the maximum, and $\epsilon = \pm 1$. In order to conclude, we have to establish that $\|q\|_M \leq 1$. Indeed, we have $$\|q\|_M = \sum_y \Big| \sum_x M_{xy} \epsilon (M^{-1})_{y_0x}\Big| = \sum_y \big| (M^{-1}M)_{y_0y} \big| = 1.$$ \end{proof} The Banach space $(\R^N,\|\cdot\|^*_M)$ is the dual of $(\R^N,\|\cdot\|_M)$: $(\R^N,\|\cdot\|_M)^*=(\R^N,\|\cdot\|^*_M)$. Now, we are ready to show the main theorem of this section, that the norm $\R^N\otimes\M_d^{sa}(\C)\ni A\to\|A\|_M$ is a tensor norm (or a reasonable crossnorm) in the sense of Definition \ref{def: reasonnable norm}. To this end, using Proposition \ref{Prop: reasonnable norm}, it suffices to show that: $$\|A\|_{\R^N\otimes_{\epsilon}\M^{sa}_d(\C)}\leq\|A\|_M\leq\|A\|_{\R^N\otimes_{\pi}\M^{sa}_d(\C)}$$ where $\R^N$ is endowed with the norm $\|\cdot\|_M$ and $\M_d^{sa}$ with the norm $\|\cdot\|_{\infty}$. Before we show that $\|A\|_M$ is a tensor norm, we shall show the following proposition for tensors of rank one $A=p\otimes B\in\R^N\otimes\M_d^{sa}$. \begin{proposition}\label{prop:tensor-norm-M} Given $A\in\mathbb R^N \otimes \mathcal M_d^{sa}(\mathbb C)$ with $\mathbb{R}^N$ and $M_d^{sa}(\mathbb C)$ are endowed with $\|\cdot\|_{M}$ and the natural operator norm respectively. Given the particular decomposition $A=p\otimes B$ with $p\in(\mathbb{R}^N,\|\cdot\|_M)$ and $B\in(\mathcal M_d^{sa}(\mathbb C),\|\cdot\|_{\infty})$, one has $$\|p\otimes B\|_M=\|p\|_M\|B\|_{\infty}.$$ \end{proposition} \begin{proof} Given $A=p\otimes B$ one has $$\|p\otimes B\|_M=\lambda_{\max}\bigg[\sum_{y=1}^N\bigg|\sum_{x=1}^NM_{xy}\,p_x B\bigg|\bigg]=\lambda_{\max}[|B|]\sum_{y=1}^N\bigg|\sum_{x=1}^NM_{xy}\,p_x\bigg|=\|B\|_{\infty}\|p\|_M.$$ Above, we have used the following fact: for selfadjoint matrices $B$, $$\|B\|_\infty = \max_{\lambda \text{ eig.~of $B$}} |\lambda| = \max_{\lambda \text{ eig.~of $|B|$}} \lambda = \lambda_{\max}[|B|].$$ \end{proof} We now state and prove the following important result, establishing that the norm $\|\cdot\|_M$ is indeed a tensor norm. \begin{theorem}\label{thm:M-tensor-norm} For a fixed $N$-input, 2-output invertible non-local game $M$, the quantity $\|\cdot\|_M$ introduced in Definition \ref{def:A-M}, which characterizes the largest quantum bias of the game $M$ when one fixes Alice's dichotomic measurements, is a reasonable crossnorm on $\mathcal M^{sa}_d(\mathbb C)^N \cong \mathbb R^N \otimes \mathcal M_d^{sa}(\mathbb C)$: $$\|A\|_{\R^N\otimes_{\epsilon}\M^{sa}_d(\C)}\leq\|A\|_M\leq\|A\|_{\R^N\otimes_{\pi}\M^{sa}_d(\C)}$$ with $(\R^N,\|\cdot\|_M)$ and $(\M_d^{sa}(\C),\|\cdot\|_{\infty})$. \end{theorem} Before we give the proof of the theorem we recall the definitions of the projective and the injective norms in our setting: \begin{align*} \|A\|_{\R^N\otimes_{\pi}\M^{sa}_d(\C)}&:=\inf\Big\{\sum_{i=1}^k\|p_i\|_M\,\|X_i\|_{\infty},\,A=\sum_{i=1}^k\,p_i\otimes X_i\Big\}.\\ \|A\|_{\R^N\otimes_{\epsilon}\M^{sa}_d(\C)}&:=\sup\Big\{\langle\pi\otimes\alpha,A\rangle;\,\|\pi\|^*_M\leq 1,\,\|\alpha\|_1\leq1\Big\}. \end{align*} with $\M_d^{sa}(\C)\ni\alpha\to\|\alpha\|_1=\Tr|\alpha|$ is the Schatten 1-norm (or the nuclear norm). \begin{proof} We shall prove first the easy direction: $\|A\|_M\leq\|A\|_{\R^N\otimes_{\pi}\M^{sa}_d(\C)}$. Let us consider a decomposition $A=\sum_{i=1}^k\,p_i\otimes X_i$. We have $$\|A\|_M=\Big\|\sum_{i=1}^k\,p_i\otimes X_i\Big\|_M\leq\sum_{i=1}^k\|p_i\otimes X_i\|_M=\sum_{i=1}^k\|p_i\|_M\|X_i\|_{\infty},$$ where the factorization property follows by Proposition \ref{prop:tensor-norm-M}. Hence we have $$\|A\|_M\leq\|A\|_{\R^N\otimes_{\pi}\M^{sa}_d(\C)}.$$ We shall now prove that $\|A\|_{\R^N\otimes_{\epsilon}\M^{sa}_d(\C)}\leq\|A\|_M$. Let $\alpha=\pm\ketbra{\phi}{\phi} \in(\M_d^{sa}$ be an extremal point of the unit ball of the $\mathcal S_1$ space and $\pi\in(\R^N,\|\cdot\|^*_M)$. We have \begin{align*} |\langle \pi\otimes\alpha,A\rangle|&=\Big|\Big\langle\alpha,\sum_{x=1}^N\,\pi_x\,A_x\Big\rangle\Big|=\Big|\Big\langle\alpha,\sum_{x,y,z=1}^N\, A_z\,M_{zy}\,M^{-1}_{yx}\,\pi_x\Big\rangle\Big|\\ &=\sum_{y=1}^N\Big|\sum_{z=1}^N\,M_{zy}\,\langle \alpha,A_z\rangle\Big|\Big|\sum_{x=1}M^{-1}_{yx}\pi_{x}\Big|\leq\sum_{y=1}^N\Big|\sum_{z=1}^N\,M_{zy}\,\langle \alpha,A_z\rangle\Big|\max_y\Big|\sum_{x=1}M^{-1}_{yx}\pi_{x}\Big|\\ &=\|\pi\|^*_M\,\sum_{y=1}^N\Big|\sum_{z=1}^N\,M_{zy}\,\langle \alpha,A_z\rangle\Big|=\|\pi\|^*_M\,\sum_{y=1}^N\Big|\sum_{z=1}^N\,\langle \alpha,M_{zy}\,A_z\rangle\Big|\\ &=\|\pi\|^*_M\,\sum_{y=1}^N\Big|\sum_{z=1}^N\,\Tr\Big[ \alpha\,M_{zy}\,A_z\Big]\Big|=\|\pi\|^*_M\,\sum_{y=1}^N\Big|\sum_{z=1}^N\,\bra{\phi} \,M_{zy}\,A_z\ket{\phi}\Big|\\ &=\|\pi\|^*_M\,\sum_{y=1}^N\Big|\bra{\phi} \,\Big(\sum_{z=1}^N\,M_{zy}\,A_z\Big)^+\ket{\phi}-\bra{\phi} \,\Big(\sum_{z=1}^N\,M_{zy}\,A_z\Big)^-\ket{\phi}\Big|\\ &\leq\|\pi\|^*_M\,\sum_{y=1}^N\Big[\Big|\bra{\phi} \,\Big(\sum_{z=1}^N\,M_{zy}\,A_z\Big)^+\ket{\phi}\Big|+\Big|\bra{\phi} \,\Big(\sum_{z=1}^N\,M_{zy}\,A_z\Big)^-\ket{\phi}\Big|\Big]\\ &=\|\pi\|^*_M\sum_{y=1}^N\bra{\phi}\Big|\sum_{z=1}^N\,M_{zy}\,A_z\Big|\ket{\phi}. \end{align*} By taking the supremum $\|\pi\|^*_M\leq1 $ and $\|\alpha\|_{\mathcal{S}_1}\leq1$ on the last expression we have: $$\sup\{|\langle\pi\otimes\alpha,A\rangle|;\,\|\pi\|^*_M\leq1\,,\,\|\alpha\|_{\mathcal{S}_1}\leq1\}\leq\sup_{\|\phi\|=1}\sum_{y=1}^N\bra{\phi}\Big|\sum_{z=1}^N\,M_{zy}\,A_z\Big|\ket{\phi}=\lambda_{\max}\Big[\sum_{y=1}^N\Big|\sum_{z=1}^N\,M_{zy}\,A_z\Big|\Big].$$ Hence we have $$\|A\|_{\R^N\otimes_{\epsilon}\M^{sa}_d(\C)}\leq\|A\|_M$$ which ends the proof of the theorem. \end{proof} \section{Dichotomic measurement compatibility via tensor norms}\label{sec: compatibility-norm} Having addressed in the previous sections the maximum value of a non-local game $M$ with fixed dichotomic observables on Alice's side $A$, we now turn to the second object of our study, quantum measurement (in-)compatibility. We characterize compatibility of dichotomic measurements (or quantum ) using tensor norms, following \cite{bluhm2022incompatibility}. Recall that we associate a dichotomic POVM $(E, I-E)$ to the corresponding observable $A = E - (I-E) = 2E - I$. In other words, the effect $E$ corresponds to the ``$+1$'' outcome, while the effect $I-E$ corresponds to the other outcome, ``$-1$''. This way, the set of dichotomic POVMs is mapped to the set of selfadjoint operators $-I \leq A \leq I$. To a $N$-tuple of observables $(A_1, A_2, \ldots, A_N)$, we associate the tensor \begin{equation} A := \sum_{i=1}^N e_i \otimes A_i \in \mathbb R^N \otimes \mathcal M_d^{sa}(\mathbb C). \end{equation} The (real) vector spaces $\mathbb R^N$, resp.~$\mathcal M_d^{sa}(\mathbb C)$ shall be endowed with the $\ell_\infty$, resp.~the operator norm (or the Schatten-$\infty$ norm, $S_{\infty}$). On the tensor product space $$\mathbb R^N \otimes \mathcal M_d^{sa}(\mathbb C) \cong \left[ \mathcal M_d^{sa}(\mathbb C) \right]^N$$ we shall consider two tensor norms: the \emph{injective norm} \begin{equation}\label{eq:epsilon-norm} \|X = (X_1, X_2, \ldots, X_N)\|_\epsilon = \max_{i=1}^N \|X_i\|_\infty \end{equation} and the compatibility norm, which was introduced in \cite[Proposition 9.4]{bluhm2022incompatibility}. We review next its definition and its basic properties, in order to make the presentation self-contained. We note however that the situation considered in \cite[Section 9]{bluhm2022incompatibility} is more general, going beyond the case of quantum mechanics. \begin{definition}\label{def:compatibility-norm} For a tensor $X \in \mathbb R^N \otimes \mathcal M_d^{sa}(\mathbb C)$, we define the following quantity, which we call the \emph{compatibility norm} of $X$: \begin{equation}\label{eq:def-norm-c} \|X\|_c := \inf \left\{ \Big\|\sum_{j=1}^K H_j\Big\|_\infty \, : \, X = \sum_{j=1}^K z_j \otimes H_j, \, \text{ s.t. } \, \forall j \in [K], \, \|z_j\|_\infty \leq 1 \text{ and } H_j \geq 0\right\}. \end{equation} \end{definition} Note that in the case of a single matrix ($N=1$) we have $\|(X_1)\|_c = \|X_1\|_\infty$, and that, in general, we have $$\|X\|_c = \inf \left\{ t \, : \, X = \sum_{j=1}^K z_j \otimes H_j, \, \text{ s.t. } \, \sum_{j=1}^K H_j = t I_d \text{ and } \forall j \in [K], \, \|z_j\|_\infty = 1, H_j \geq 0\right\}.$$ Indeed, the condition $\|z_j\|=1$ can be imposed by replacing a non-zero term $z_j \otimes H_j$ by $z_j/\|z_j\|_\infty \otimes \|z_j\|_\infty H_j$, while the condition $\sum_j H_j = tI_d$ can be imposed by adding the term $0 \otimes (t I_d - \sum_j H_j)$ to the decomposition. \begin{proposition}\label{prop:c-norm-tensor} The $\|\cdot\|_c$ quantity is a tensor norm on $(\mathbb R^N, \|\cdot\|_\infty) \otimes (\mathcal M_d^{sa}(\mathbb C), \|\cdot\|_\infty)$. \end{proposition} \begin{proof} Let us start with the triangle inequality, $\|A+B\|_c \leq \|A\|_c + \|B\|_c$. Consider optimal decompositions \begin{align*} A &= \sum_j z_j \otimes H_j\\ B &= \sum_k w_k \otimes T_k \end{align*} such that $$\|A\|_c = \Big\|\sum_j H_j \Big\| \qquad \text{and} \qquad \|B\|_c = \Big\|\sum_k T_k \Big\|.$$ Then, $$A+B = \sum_j z_j \otimes H_j + \sum_k w_k \otimes T_k$$ is a valid decomposition for $A+B$, hence $$\|A+B\|_c \leq \Big\|\sum_j H_j + \sum_k T_k\Big\| \leq \Big\|\sum_j H_j \Big\| + \Big\| \sum_k T_k\Big\| = \|A\|_c + \|B\|_c.$$ The scaling equality $\|\lambda A \|_c = |\lambda| \|A\|_c$ is straightforward, and left to the reader. Consider now $A$ such that $\|A\|_c = 0$. Then, for all $\epsilon > 0$, there is a finite decomposition $A = \sum_j z_j \otimes H_j$ such that $\| \sum_j H_j\| \leq \epsilon$. We have then, for all $x \in [N]$, $$\|A_x\| = \Big\|\sum_j z_j(x) H_j\Big\| \leq \Big\|\sum_j |z_j(k)| H_j\Big\| \leq \Big\|\sum_j H_j\Big\| \leq \epsilon.$$ Taking $\epsilon \to 0$ shows that $A_x = 0$ for all $x$, and thus $A = 0$. The fact that the compatibility norm is bounded by the injective and projective norms is established in \cite[Proposition 3.3]{bluhm2022tensor}. Finally, let us show that $\|\cdot\|_c$ factorizes on simple tensors. To this end, consider a (non-zero) product tensor $A = w \otimes T$ with $\|w\|_\infty = 1$ (this can always be enforced by absorbing the norm of $w$ into $T$). On the one hand, we have $$\|A\|_c \leq \|T\| = \|w\|_\infty \|T\|,$$ establishing one inequality. Consider now an optimal decomposition $$w \otimes T = \sum_j z_j \otimes H_j$$ with $\|z_j\|_\infty \leq 1$, $H_j \geq 0$, and $\|w \otimes T\|_c = \|\sum_j H_j\|$. Consider an index $k \in [N]$ such that $\|w\|_\infty = |w(k)|$. We have then $w(k) T = \sum_j z_j(k) H_j$ and thus $$\|w\|_\infty \|T\| = \Big\| \sum_j z_j(k) H_j \Big \| \leq \Big\| \sum_j |z_j(k)| H_j \Big \| \leq \Big\| \sum_j H_j \Big \| = \|w \otimes T\|_c,$$ finishing the proof. \end{proof} We specialize now \cite[Theorem 9.2]{bluhm2022incompatibility} to the case of quantum mechanics, showing that the compatibility norm from Definition \ref{def:compatibility-norm}. \begin{theorem}\label{thm:c-norm-compatibility} Let $A = (A_1, \ldots, A_N)$ be a $N$-tuple of self-adjoint $d \times d$ complex matrices. Then: \begin{enumerate} \item $A$ is a collection of dichotomic quantum observables (i.e.~$\|A_i\|_\infty \leq 1$ $\forall i$) if and only if $\|A\|_\epsilon \leq 1$, where $\|\cdot\|_\epsilon$ quantity is the $\ell_\infty^N \otimes_\epsilon S_\infty^d$ tensor norm. \item $A$ is a collection of \emph{compatible} dichotomic quantum observables if and only if $\|A\|_c \leq 1$. \end{enumerate} \end{theorem} \begin{proof} The first statement is a direct consequence of ~\eqref{eq:epsilon-norm}. For the second statement, we shall prove the two implications separately. First, consider compatible dichotomic observables $A_1, \ldots, A_N$, and their joint POVM $X$, having $X_\epsilon \geq 0$ indexed by sign vectors $\epsilon \in \{\pm 1\}^N$, such that $$\forall i \in [N], \, \forall s \in \{\pm 1\}, \qquad E^s_i = \frac{I_d + s A_i}{2} = \sum_{\epsilon \in \{\pm 1\}^N \, : \, \epsilon_i = s} X_\epsilon.$$ In particular, we have, for all $i \in [N]$, $$A_i = -I_d + 2 \sum_{\epsilon \in \{\pm 1\}^N \, : \, \epsilon_i = +1} X_\epsilon$$ and thus \begin{align*} A &= \sum_{i=1}^N e_i \otimes A_i = \sum_{i=1}^N (-e_i) \otimes I_d + 2 \sum_{\epsilon \in \{\pm 1\}^N} \left(\sum_{i \, : \, \epsilon_i = +1} e_i \right) \otimes X_\epsilon \\ &= \sum_{\epsilon \in \{\pm 1\}^N} \left( 2\sum_{i \, : \, \epsilon_i = +1} e_i - \sum_i e_i \right) \otimes X_\epsilon \\ &= \sum_{\epsilon \in \{\pm 1\}^N} \Big(\underbrace{ \sum_i \epsilon_i e_i }_{=:z_\epsilon}\Big) \otimes X_\epsilon. \end{align*} We have thus obtained above a decomposition of the tensor $A$ with $2^N$ terms, $\|z_\epsilon\|_\infty=1$ and $\sum_\epsilon X_\epsilon = I_d$, proving that $\|A\|_c \leq 1$. For the reverse implication, start with a decomposition $A = \sum_j z_j \otimes H_j$ with $\|z_j\|_\infty \leq 1$, $H_j \geq 0$ and $\sum_j H_j = I_d$. One can recover the observables and the from this decomposition: $$A_i = \sum_j z_j(i) H_j \quad \text{ and } \quad E_i^\pm = \sum_j \frac{1 \pm z_j(i)}{2} H_j.$$ One recognizes in the expression above the description of the compatibility of the POVMs $(E^+_i, E^-_i)_{i \in [N]}$ as post-processing from Proposition \ref{prop:compatibility-postprocessing}: $$E^\pm_i = \sum_j p_i(\pm | j) H_j,$$ where the conditional probabilities $p_i$ are given by $$p_i(\pm | j) = \frac{1 \pm z_j(i)}{2} \in [0,1].$$ \end{proof} The compatibility norm of a tensor $A$ is related to the noise parameter $\Gamma$ from Definition \ref{def:Gamma}. The following proposition provides an \emph{operational interpretation} of the compatibility norm $\|A\|_c$, as the inverse of the minimal quantity of white noise that needs to be mixed in the measurements $A$ in order to render them compatible. \begin{proposition}\label{prop: compatibility norm and the noise threshold} For any $N$-tuple of observables $A = (A_1, A_2, \ldots, A_N) \neq 0$, $$\Gamma(A) = \frac{1}{\|A\|_c}.$$ \end{proposition} \begin{proof} Note first that, on the level of observables, adding noise to a dichotomic measurement corresponds to scaling: $$A^\eta = \left[\eta E + (1-\eta)\frac I 2\right] - \left[I - \eta E - (1-\eta)\frac I 2\right] = 2\eta E - \eta I = \eta A.$$ Hence, \begin{align*} \Gamma(A) &= \max\{\eta \, : \, (A_1^\eta, \ldots, A_N^\eta) \text{ compatible}\}\\ &= \max\{\eta \, : \, \|A_1^\eta, \ldots, A_N^\eta\|_c \leq 1\}\\ &= \max\{\eta \, : \, \eta \|A_1, \ldots, A_N\|_c \leq 1\}\\ &= \frac{1}{\|A\|_c}. \end{align*} \end{proof} \begin{example} Let us consider the example of the unbiased Pauli measurements, $$\frac 1 2 (I_2 \pm x \sigma_X), \quad \frac 1 2 (I_2 \pm y \sigma_Y), \quad \frac 1 2 (I_2 \pm z \sigma_Z),$$ where $(x,y,z) \in [0,1]^3$ are real parameters describing the noise in the measurements. These three POVMs correspond to the observables $$A_X = x \sigma_X, \quad A_Y = y \sigma_Y, \quad A_Z = z \sigma_Z.$$ It is known \cite{busch86,brougham2007estimating} that these observables are compatible if and only if $x^2 +y^2+z^2 \leq 1$, hence $$\|(A_X, A_Y, A_Z)\|_c = \sqrt{x^2+y^2+z^2} = \|(x,y,z)\|_2.$$ \end{example} \section{The relation between non-locality and incompatibility}\label{sec: non-locality and incompatibility} Having introduced the main conceptual definitions of the \emph{(in)compatibility norm} and \emph{M-Bell-locality norm} in the previous sections that formalize the compatibility and the non-locality physical notions for fixed measurements on Alice's side apparatus and invertible non-local games $M$, we bring together and compare the two norms. In this section we introduce \emph{the main theorems} of the paper. It was shown in \cite{wolf2009measurements} that the two notions are equivalent in the case of the CHSH game. Using the framework of tensor norms, we shall give a quantitative and precise answer to the following question: \medskip \emph{When is measurement incompatibility equivalent to non-locality for general games?} \medskip \noindent It turns out that the answer to this question is given by a comparison between the \emph{compatibility norm} and the \emph{M-Bell-locality norm}. For the reader's convenience, we recall the definitions of the two tensor norms that we introduced in the Sections \ref{sec:non-locality-norm} and \ref{sec: compatibility-norm}, in relation to, respectively, Bell inequality violations and measurement incompatibility. \begin{itemize} \item The \emph{$M$-Bell-locality norm} (see Definition \ref{def:A-M} and Theorem \ref{thm:M-tensor-norm}) $$\|A\|_{M}:=\sup_{\|\psi\| = 1} \sup_{\|B_y\| \leq 1} \Big \langle \psi \Big | \sum_{x,y = 1}^N\, M_{xy} \,A_x \otimes B_y \Big | \psi \Big \rangle=\lambda_{\max}\bigg[\sum_{y=1}^N \bigg| \sum_{x=1}^N \, M_{xy}\,A_x\bigg|\bigg].$$ \item The \emph{compatbility norm} (see Definition \ref{def:compatibility-norm} and Theorem \ref{thm:c-norm-compatibility}) $$\|A\|_c := \inf \left\{ \Big\|\sum_{j=1}^K H_j\Big\|_\infty \, : \, A = \sum_{j=1}^K z_j \otimes H_j, \, \text{ s.t. } \, \forall j \in [K], \, \|z_j\|_\infty \leq 1 \text{ and } H_j \geq 0\right\}.$$ \end{itemize} In what follows, we shall compare these two norms, in order to relate, in a quantitative manner, the two fundamental physical phenomena of Bell non-locality and measurement incompatibility. We start with a reformulation, using the language of tensor norms, of the following well established fact: an observed \emph{violation of the Bell inequality} $M$ implies necessarily the \emph{incompatibility} of Alice's measurements. Mathematically, this corresponds to upper bounding the $M$-Bell-locality norm of Alice's measurements by their compatibility norm. \begin{theorem}\label{thm:M-leq-rho} Consider a $N$-input, 2-output non-local inevrtible game $M$, corresponding to a matrix $M \in \mathcal M_N(\mathbb R)$. Then, for any $N$-tuple of self-adjoint matrices $A = (A_1, \ldots, A_N)$, we have \begin{equation}\label{eq:M-leq-rho} \|A\|_M\leq\|A\|_{c}\|M\|_{\ell_1^N \otimes_{\epsilon} \ell_1^N} = \|A\|_{c} \, \beta(M). \end{equation} In particular, if Alice's measurements $A$ are $M$-Bell-non-local (in the sense of Definition \ref{def:Bell-local}), then they must be incompatible. \end{theorem} \begin{proof} Let us consider the optimal decomposition $\|A\|_{c}=\|\sum_{j=1}^N C_j\|_{\infty}$ with $A=\sum_{j=1}^N z_j\otimes C_j$, $\|z_j\|_{\infty}\leq 1$ and $C_j\geq0$ for all $j$. Thus we have $A_x=\sum_{j=1}^Nz_j(x)C_j$.\\ We compute the upper bound of the $M$-Bell-locality norm $\|A\|_M$. \begin{align*} \|A\|_M&=\lambda_{\max}\left[\sum_{y=1}^N\bigg|\sum_{x=1}^N \,M_{xy}\,A_x\bigg|\right]=\lambda_{\max}\left[\sum_{y=1}^N\bigg|\sum_{j=1}^N\sum_{x=1}^N \,M_{xy}\,z_j(x)\,C_j\bigg|\right]\\ &\leq \lambda_{\max}\left[\sum_{j=1}^N\sum_{y=1}^N\bigg|\sum_{x=1}^N\, M_{xy}\,z_j(x)\bigg|\,C_j\right]=\lambda_{\max}\left[\sum_{j=1}^N\sum_{y=1}^N\sum_{x=1}^N \,\epsilon_y \,M_{xy}\,z_j(x)\,C_j\right] \end{align*} We have used $$ \sum_{y=1}^N\bigg|\sum_{x=1}^N\, M_{xy}\,z_j(x)\bigg|=\sum_{y=1}^N\sum_{x=1}^N \,\epsilon_y \,M_{xy}\,z_j(x)$$ with $\epsilon=\{\pm 1\}^N$. Then we have \begin{align*} \|A\|_M&\leq\lambda_{\max}\left[\sum_{j=1}^N\sum_{y=1}^N\sum_{x=1}^N\, \epsilon_y\,M_{xy}\,z_j(x)\,C_j\right]\leq\lambda_{\max}\left[\sum_{j=1}^N \,C_j\,\|M\|_{\ell_1 \otimes_\epsilon \ell_1}\right]\\ &=\lambda_{\max}\left[\sum_{j=1}^N C_j\right] \|M\|_{\ell_1 \otimes_\epsilon \ell_1}=\|A\|_{c} \|M\|_{\ell_1 \otimes_\epsilon \ell_1} \end{align*} where $\|M\|_{\ell_1 \otimes_\epsilon \ell_1}=\underset{\|\epsilon\|_{\infty}\leq 1,\|z_j\|_{\infty}\leq 1}{\sup} \langle M,z_j\otimes \epsilon \rangle$. \end{proof} In the following we will show, for \emph{invertible Bell functionals}, that the compatibility is upper bounded by the $M$-Bell-locality norm. \begin{theorem}\label{thm:rho-leq-M} Consider a $N$-input, 2-output non-local game $M$, corresponding to an \emph{invertible} matrix $M \in \mathcal M_N(\mathbb R)$. Then, for any $N$-tuple of self-adjoint matrices $A = (A_1, \ldots, A_N)$, we have \begin{equation}\label{eq:rho-leq-M} \|A\|_c \leq \|A\|_M \|M^{-1}\|_{\ell^N_\infty \otimes_\epsilon \ell^N_\infty}. \end{equation} \end{theorem} \begin{proof} Let us consider $$C_y=\sum_{x=1}^N M_{xy} A_x=(M^\top A)_y\implies A_x=((M^\top)^{-1}C)_x=\sum_{y=1}^N(M^{-1})_{y,x}C_y.$$ Let us also consider the following decomposition of $A=\sum_{x=1}^Ne_x\otimes A_x$ with $e_x$ the canonical basis vectors. We have $$A=\sum_{x=1}^Ne_x\otimes A_x=\sum_{y=1}^N\sum_{x=1}^N(M^{-1})_{y,x}e_x\otimes C_y$$ $$=\sum_{y=1}^N\left[\sum_{x=1}^N(M^{-1})_{y,x}e_x\right]\otimes C^+_y+\sum_{y=1}^N\left[-\sum_{x=1}^N(M^{-1})_{y,x}e_x\right]\otimes C^-_y$$ $$=\sum_{y=1}^N e'_y\otimes C^+_y+\sum_{y=1}^N-e'_y\otimes C^-_y$$ where we have decomposed $C_y=C^+_y-C^-_y$ into positive and negative parts $C_y^\pm \geq 0$ for all $y \in [N]$ and $e'_y:=\sum_{x=1}^N(M^{-1})_{y,x}e_x$. Observe that $$\|e'_y\|_{\infty}=\Big\|\sum_{x=1}^N(M^{-1})_{y,x}e_x\Big\|_{\infty}=\Big\|(M^{-1}e)_y\Big\|_{\infty}\leq\|M^{-1}\|_{\infty}\|e\|_{\infty}=\|M^{-1}\|_{\ell^N_\infty \otimes_\epsilon \ell^N_\infty}$$ where we recall by the Definition \ref{def: injective norm} that $$\|M^{-1}\|_{\ell^N_\infty \otimes_\epsilon \ell^N_\infty}:=\sup_{a,b\in\mathbb B(\ell_1^N(\R))}|\langle M^{-1},a\otimes b\rangle|=\max_{i,j}|(M^{-1})_{i,j}|=\|M^{-1}\|_{\infty}.$$ With the norm formulation above, $\|M^{-1}\|_{\ell^N_\infty \otimes_\epsilon \ell^N_\infty}$ and $\|M^{-1}\|_{\infty}$ are equal if we consider $M^{-1}$ in its matrix or tensor representations. Hence, we have $$\|e'_y\|_{\infty}\leq\|M^{-1}\|_{\ell^N_\infty \otimes_\epsilon \ell^N_\infty}.$$ We consider now the normalised vectors $$a_y:=\frac{e_y}{\|M^{-1}\|_{\ell^N_\infty \otimes_\epsilon \ell^N_\infty}}\in\mathbb{B}(\ell_{\infty}(\mathbb{R}^N))$$ By normalising the vectors one has $$A=\sum_{y=1}^N\, \|M^{-1}\|_{\ell^N_\infty \otimes_\epsilon \ell^N_\infty}\, a_y\otimes C^+_y\,-\,\sum_{y=1}^N\,\|M^{-1}\|_{\ell^N_\infty \otimes_\epsilon \ell^N_\infty}\, a_y\otimes C^-_y$$ We recognize above a valid decomposition of the tensor $A$ as in Eq.~\eqref{eq:def-norm-c}. Hence \begin{align*} \|A\|_c&\leq\Big\|\sum_{y=1}^N\,\|M^{-1}\|_{\ell^N_\infty\otimes_\epsilon \ell^N_\infty}\,(C^+_y\,+\,C^-_y)\Big\|_{\infty}\\ &=\|M^{-1}\|_{\ell^N_\infty \otimes_\epsilon \ell^N_\infty}\lambda_{max}\Big(\sum_{y=1}^N\Big|C_y\Big|\Big)=\|M^{-1}\|_{\ell^N_\infty \otimes_\epsilon \ell^N_\infty}\|A\|_M. \end{align*} \end{proof} Putting together Theorems \ref{thm:M-leq-rho} and \ref{thm:rho-leq-M}, we recover the main result from \cite{wolf2009measurements}: for $N=2$ and the CHSH matrix $$M_{\textrm{CHSH}} = \frac 1 2 \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix},$$ we have $$\beta(M_{\textrm{CHSH}}) = 1 \qquad \text{ and } \qquad (M_{\textrm{CHSH}})^{-1} = \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix}.$$ It follows thus, from Eqs.~\eqref{eq:M-leq-rho} and \eqref{eq:rho-leq-M} that \begin{equation}\label{eq:c-CHSH-equal} \|\cdot\|_c = \|\cdot\|_{M_{\textrm{CHSH}}}. \end{equation} \begin{remark} We have seen in Section \ref{sec:non-locality} that for the CHSH game we have $$\|A\|_{M_{\text{CHSH}}}=\frac{1}{\Gamma(A)}.$$ One can see also that within Proposition \ref{prop: compatibility norm and the noise threshold} we have, with respect to the compatibility norm, $$\|A\|_c=\frac{1}{\Gamma(A)}.$$ \end{remark} In what follows, we compare the compatibility norm and the Bell-locality norm in two special settings, a modified CHSH game, and the pure correlation $I_{3322}$ game. As we shall see, in these situations, the two norms are different. \bigskip Let us first consider the following modified CHSH game defined by the following matrix $$M_t :=\begin{bmatrix} 1 & 1 \\ 1 & -t \end{bmatrix},$$ where $t$ is a real parameter taking values in $\mathbb{R}\setminus\{-1\}$, such that the matrix $M_t$ is invertible. We start by normalizing the matrix $M_t$ such that its classical bias $\beta$ is equal to 1. A simple calculation shows that $$\beta(M_t):=\begin{cases}3-t\qquad \text{for} \quad t\leq1\\ 1+t \qquad \text{for} \quad t>1 \end{cases} = 2 + |t-1|.$$ We are thus going to work with the normalized version $$M'_t =\frac{1}{\beta(M_t)}\begin{bmatrix} 1 & 1 \\ 1 & -t \end{bmatrix},$$ for which $\beta(M'_t) = 1$. We consider the following pair of spin observables $$A:=(\sigma_X,y\sigma_Y),$$ where $y \in [-1,1]$ is a parameter we shall vary. These two observables correspond to, respectively, a sharp measurement in the eigenbasis of $\sigma_X$ and a noisy measurement in the eigenbasis of $\sigma_Y$. In the following, we calculate $\|A\|_{c}$ and $\|A\|_{M'_t}$, for different values of the parameters $t$ and $y$. Since the $t=1$ value corresponds to the $CHSH$ game (for which $\|\cdot\|_c = \|\cdot\|_{M_{\textrm{CHSH}}}$), the compatibility norm reads $$\|A\|_{c}=\|A\|_{M'_{t=1}}=\|A\|_{M_{\textrm{CHSH}}}=\lambda_{\max}\bigg[\sum_{y=1}^2\bigg|\sum_{x=1}^2M_{xy}A_x\bigg|\bigg]=\frac{1}{2}\lambda_{\max}\big[|\sigma_X+y\sigma_Y|+|\sigma_X-y\sigma_Y|\big].$$ A simple calculation shows that$$\|A\|_{c}=\sqrt{1+y^2}=:r.$$ We now compute $\|A\|_{M'_t}$ for the normalized modified CHSH game: $$\|A\|_{M'_t}=\frac{1}{2+|t-1|}\lambda_{\max}\big[|\sigma_X+y\sigma_Y|+|\sigma_X-ty\sigma_Y|\big]=\frac{r_t+r}{2+|t-1|}.$$ with $r_t:=\sqrt{1+(yt)^2}$; above, we have used the following fact: $$\forall x,y \in \mathbb R, \qquad |x \sigma_X + y \sigma_Y| = \left| \begin{bmatrix} 0 & x - \mathrm{i} y \\ x + \mathrm{i} y & 0 \end{bmatrix} \right| =\sqrt{x^2+y^2} \, I_2.$$ . We plot the norm $\|A\|_{M'_t}$ in Figure \ref{fig:norme A_Mt}, the region $t$ and $y$ where Alice observes a Bell inequality violation $\|A\|_{M'_t}> \beta(M _t) = 1$ in Figure \ref{fig: Alice observe violation}, and the ratio of the two norms in Figure \ref{fig:modified-CHSH}. Note that the plot for $t=1$ corresponds to the CHSH game: the two norms are equal (see Eq.~\eqref{eq:c-CHSH-equal}). At $y=1$, Alice's measurements are sharp: $A = (\sigma_X, \sigma_Y)$. One observes violations of the game $M'_t$ for the parameter values $$\|A_{y=1}\|_{M'_t} > 1 \iff t > \frac{9-4\sqrt 2}{7}=:t_*.$$ The values at $y=0$ also have a special meaning, since, in this case, $$A = (\sigma_X, 0) = (1,0) \otimes \sigma_X.$$ By the tensor norm property of the compatibility norm (see Proposition \ref{prop:c-norm-tensor}), we have $$\|A\|_c = \|(1,0)\|_{\ell_\infty} \cdot \|\sigma_X\|_{S_\infty} = 1.$$ Similarly, the tensor norm property of the Bell-locality norm yields $$\|A\|_{M_t'} = \|(1,0)\|_{M_t'} \cdot \|\sigma_X\|_{S_\infty} = \frac{2}{\beta(M_t)} \cdot 1 = \frac{2}{2+|t-1|} \leq 1.$$ In Figure \ref{fig:modified-CHSH}, the dashed curve corresponds to the limit $|t| \to \infty$, in which case $$\lim_{|t| \to \infty} \frac{\|A\|_{M_t'}}{\|A\|_c} = \frac{|y|}{\sqrt{1+y^2}}.$$ Finally, the dotted line corresponds to the game $M_t'$ for $t=-1$. This game is not invertible, so the quantity $\|\cdot\|_{M_{-1}'}$ is not a norm. \begin{figure}[htb!] \centering \includegraphics[width=\textwidth]{norme_de_A_Mt_.pdf}\qquad\qquad\qquad\qquad \caption{The norm $\|A\|_{M'_t}$ for $y\in[-1,1]$ and different value of $t$. The measurements $A$ are $\sigma_X$ and $y\sigma_Y$, a noisy version of $\sigma_Y$. For $t=1$ (the CHSH game), one observes violations (i.e.~$\|A\|_{M'_t} > \beta(M'_t)=1$) for every value of $y \in [-1, 1]$.} \label{fig:norme A_Mt} \end{figure} \begin{figure}[htb!] \centering \includegraphics[width=0.7\textwidth]{y_t_amt.pdf}\qquad\qquad\qquad\qquad \caption{The filled region corresponds to the set of parameters $(y,t)$ for which Alice's measurements ($\sigma_X$ and $y\sigma_Y$) are Bell non-local (for the game $M'_t$): $\|A\|_{M'_t}>1 = \beta(M'_t)$. Note that for $t > t_* = (9-4\sqrt 2)/7$, the game $M'_t$ does not allow quantum violations when Alice's measurements are of the form $A= (\sigma_X, y\sigma_Y).$} \label{fig: Alice observe violation} \end{figure} \begin{figure}[htb!] \centering \includegraphics[width=\textwidth]{CHSH-modified.pdf}\qquad\qquad\qquad\qquad \caption{The ratio $\|A\|_{M'_t} \, / \, \|A\|_{c}$ for $y\in[-1,1]$ and different value of $t$. We notice that the ration is always smaller than 1, except for $t=1$, which is the CHSH game.} \label{fig:modified-CHSH} \end{figure} \bigskip In the same spirit as the example above we shall now analyze another deformation of the CHSH game that was considered in \cite{lawson2010biased}. In the following we shall recall the game, and analyse it as the game considered before with the tools that we introduced. The deformation of the CHSH game that was considered in \cite{lawson2010biased}, was given by $$G(p,q) = \begin{bmatrix} p\,q & p\,(1-q)\\ q\,(1-p)& -(1-q)\,(1-p) \end{bmatrix},$$ where $p,q\in[0,1]^2$. Note that this matrix is invertible for all $(p,q) \in (0,1)^2$. In the following we give the classical bias $\beta(G(p,q))$ for different $p,q\in[0,1]^2$. \begin{itemize} \item For $p$ and $q$ satisfying $p,q\geq \frac{1}{2}$, the classical bias of the game $\beta(G(p,q))$ is given by $$\beta(G(p,q))=1-2\,(1-p)\cdot(1-q)$$ \item For $p$ and $q$ satisfying $p\leq\frac{1}{2},q\geq \frac{1}{2}$, the classical bias of the game $\beta(G(p,q))$ is given by $$\beta(G(p,q))=1-2\,p\cdot(1-q)$$ \item For $p$ and $q$ satisfying $q\leq\frac{1}{2},p\geq \frac{1}{2}$, the classical bias of the game $\beta(G(p,q))$ is given by $$\beta(G(p,q))=1-2\,q\cdot(1-p)$$ \item For $p$ and $q$ satisfying $p,q\leq \frac{1}{2}$, the classical bias of the game $\beta(G(p,q))$ is given by $$\beta(G(p,q))=1-2p\cdot q$$ \end{itemize}. \begin{remark} The classical bias of the game $\beta(G(p,q))$ for $p,q\geq \frac{1}{2}$ was already shown in \cite{lawson2010biased}. \end{remark} To give the classical bias of the game $\beta(G(p,q))$ for different $p,q\in[0,1]^2$, one shall use $\min(\cdot,\cdot)$ defined as $$\min(x,y):=\frac{1}{2}(x+y-|x-y|).$$ with $x,y\in\mathbb{R}$. One can easily check that for $p\in[0,1]$ we have $$\min(p,1-p)=\frac{1}{2}(1-|2p-1|)=\begin{cases} 1-p\qquad \text{for}\quad p\geq\frac{1}{2}&\\ p\qquad \text{for}\quad p\leq \frac{1}{2}& \end{cases}$$ The same results hold for $\min(q,1-q)$ with $q\in[0,1]$. It can be easily seen that the classical bias of the game for $p,q\in[0,1]^2$ is given by: $$\beta(G(p,q))=1-2\cdot\min(p,1-p)\min(q,1-q).$$ In our setting, we shall consider the normalised game $G'(p,q)$ for all $p,q\in[0,1]^2$ $$G'(p,q)=\frac{1}{\beta(G(p,q))} \begin{bmatrix} p\,q & p\,(1-q)\\ q\,(1-p)& -(1-q)\,(1-p) \end{bmatrix}. $$ As in the example above we consider the following pair of spin observables $$A:=(\sigma_X,y\sigma_Y),$$ where $y \in [-1,1]$ is a parameter we shall vary. In the following we will compute the $\|A\|_{G'(p,q)}$. \begin{align*} \|A\|_{G'_{p,q}}&:=\lambda_{\max}\bigg[\sum_{y=1}^2\bigg|\sum_{x=1}^2 G'(p,q)_{x,y}A_x\bigg|\bigg]\\ &=\frac{1}{|\beta(G(p,q))|}\lambda_{\max}\Big(\Big|p\,q\,\sigma_X+y\,q\,(1-p)\sigma_Y\Big|+\Big|p\,(1-q)\sigma_X-y\,(1-p)\,(1-q)\,\sigma_Y\Big|\Big).\end{align*} A simple calculation shows that $$\|A\|_{G'(p,q)}=\frac{1}{|1-2\min(p,1-p)\min(q,1-q)|}\Big(\Big[p^2\,q^2+y^2\,q^2\,(1-p)^2\Big]^{\frac{1}{2}}+\Big[p^2\,(1-q)^2+y^2\,(1-p)^2\,(1-q)^2\Big]^{\frac{1}{2}}\Big).$$ Note that for $p=1/2$, we have a simplification: $$\|A\|_{G'(1/2,q)}=\frac{\sqrt{1+y^2}}{2\max(q,1-q)} = \frac{\|A\|_c}{2\max(q,1-q)}.$$ We plot in Figure \ref{fig: Alice violation for G'(p,q)} the set of pairs $(p,q)$ such that $\|A\|_{G'(p,q)}>1$, that is the game parameter region where Alice observes a Bell violation for different values of $y\in[0,1]$. In Figure $\ref{fig: norme A_G'(p,q) }$ we plot the norm $\|A\|_{G'(p,q)}$ while in Figure $\ref{fig: Rapport norme A_G(p,q) et A_c}$ the ratio of $\|A\|_{G'(p,q)}/\|A\|_c$ for fixed value of $p$ and $q$ . \begin{figure}[htb!] \centering \includegraphics[width=0.8\textwidth]{p_q_CHSH_pq_violation.pdf}\qquad\qquad\qquad\qquad \caption{The $p$,$q$ region where Alice observe a violation $\|A\|_{G'(p,q)}>1$ for different value of $y$.} \label{fig: Alice violation for G'(p,q)} \end{figure} \begin{figure}[htb!] \centering \includegraphics[width=\textwidth]{norme_A_G_pq_.pdf}\qquad\qquad\qquad\qquad \caption{The norm $\|A\|_{G'(p,q)}$ for $y\in[-1,1]$ and different value of $p$ and $q$.} \label{fig: norme A_G'(p,q) } \end{figure} \begin{figure}[htb!] \centering \includegraphics[width=\textwidth]{rapport_norme_A_G_pq_A_c.pdf}\qquad\qquad\qquad\qquad \caption{The ratio $\|A\|_{G'(p,q)}/\|A\|_c$ for $y\in[-1,1]$ and different value of $p$ and $q$.} \label{fig: Rapport norme A_G(p,q) et A_c} \end{figure} \bigskip We now move on to the last example, the pure correlation part of the $I_{3322}$ tight Bell inequality (here, $N=3$): $$M_{\textrm{3322}} = \frac 1 4 \begin{bmatrix} 1 & 1 & 1 \\ 1 & 1 & -1 \\ 1 & -1 & 0 \end{bmatrix}.$$ The inverse of the matrix above has entries with absolute value 2, so our main result does not apply. Indeed, one can see that $$\|s(\sigma_X,\sigma_Y,\sigma_Z)\|_c \leq 1 \iff s \leq \frac{1}{\sqrt 3},$$ while $$\|s(\sigma_X,\sigma_Y,\sigma_Z)\|_{M_{3322}} \leq 1 \iff s \leq \frac{4}{\sqrt 2 + 2\sqrt 3} > \frac{1}{\sqrt 3}.$$ This shows that, for tensors in the positive direction $(\sigma_X,\sigma_Y,\sigma_Z)$, for parameter values $$s \in \left( \frac{1}{\sqrt 3}, \frac{4}{\sqrt 2 + 2 \sqrt 3} \right],$$ we have $$\|s(\sigma_X,\sigma_Y,\sigma_Z)\|_{M_{3322}} \leq 1 < \|s(\sigma_X,\sigma_Y,\sigma_Z)\|_c,$$ so there exist incompatible dichotomic Pauli measurements which do not violate the pure correlation $I_{3322}$ Bell inequality \cite{quintino2014joint}. \section{Non-local games which characterize incompatibility}\label{sec:norm-equality} Up to this point, we have seen the following two inequalities relating the $M$-Bell-locality norm $\|\cdot \|_M$ and the compatibility norm $\|\cdot\|_c$ of a tuple of dichotomic quantum measurements: $$\|A\|_M\leq\|A\|_{c}\|M\|_{\ell_1^N \otimes_{\epsilon} \ell_1^N} \qquad \text{ and } \qquad \|A\|_c \leq \|A\|_M \|M^{-1}\|_{\ell^N_\infty \otimes_\epsilon \ell^N_\infty}.$$ In this section, we ask for which (invertible) non-local games $M$, these two inequalities, used together, allow us to conclude that $\|\cdot \|_M = \|\cdot\|_c$. Such an equality would prove a strong equivalence of Bell inequality violations and incompatibility for the game $M$, in the spirit of \cite{wolf2009measurements}. First, note that, for an invertible game $M$ and a non-zero tuple of measurements $A$, we have $$\|A\|_M\leq\|A\|_{c}\|M\|_{\ell_1^N \otimes_\epsilon \ell_1^N}\leq \|A\|_M\|M^{-1}\|_{\ell_\infty^N \otimes_\epsilon \ell_\infty^N}\|M\|_{\ell_1^N \otimes_\epsilon \ell_1^N},$$ hence \begin{equation}\label{eq:uncertainty} \|M^{-1}\|_{\ell_\infty^N \otimes_\epsilon \ell_\infty^N}\|M\|_{\ell_1^N \otimes_\epsilon \ell_1^N}\geq 1. \end{equation} In order to deduce that $\|\cdot \|_M = \|\cdot\|_c$, one requires $$\beta(M) = \|M\|_{\ell_1^N \otimes_\epsilon \ell_1^N} = 1 \qquad \text{ and } \qquad \|M^{-1}\|_{\ell_\infty^N \otimes_\epsilon \ell_\infty^N} = 1.$$ Up to rescaling, this is equivalent to requiring that the inequality \eqref{eq:uncertainty} should be saturated. We now study the equality case in \eqref{eq:uncertainty}, which can be seen as an ``uncertainty relation'' for the non-local game $M$. Let us first show that \eqref{eq:uncertainty} cannot be saturated for $N \geq 3$. We recall and use a definition from \cite{aubrun2020universal} to understand the ratio of $\|\cdot\|_{X\otimes_{\pi}Y}$and $\|\cdot\|_{X\otimes_{\epsilon}Y}$ for two given Banach spaces $X$ and $Y$. \begin{definition}\cite{aubrun2020universal}\label{def 8.7} Given two finite-dimensional Banach spaces $X$ and $Y$. There will always exist a constant $1\leq C<\infty$ such that: $$\|\cdot\|_{X\otimes_{\epsilon}Y}\leq\|\cdot\|_{X\otimes_{\pi}Y}\leq C \|\cdot\|_{X\otimes_{\epsilon}Y} $$ One denotes $\rho(X,Y)$ the smallest $C$ satisfying this inequality. Equivalently one has $$\rho(X,Y)=\sup_{0\neq z\in X\otimes Y}\frac{\|z\|_{X\otimes_{\pi}Y}}{\|z\|_{X\otimes_{\epsilon}Y}}$$ \end{definition} We recall one of the important properties of $\rho(X,Y)$ in the case of $\ell_1$ and $\ell_\infty$ spaces. \begin{proposition}\cite[Proposition 13]{aubrun2020universal} For all $N \geq 2$, we have $$\rho(\ell_1^N,\ell_1^N) = \rho(\ell_\infty^N,\ell_\infty^N) \leq \sqrt{2N}.$$ \end{proposition} With the help of the definition of $\rho(X,Y)$ and proposition above, we can\footnote{We thank Carlos Palazuelos for the proof of the proposition.} improve the inequality \eqref{eq:uncertainty}. \begin{proposition}\label{prop:norm-inequality-M-inverse} Let $M$ a real and invertible matrix. Then one has $$\|M^{-1}\|_{\ell^N_{\infty}\otimes_{\epsilon}\ell^N_{\infty}}\|M\|_{\ell^N_1\otimes_{\epsilon}\ell^N_1}\geq \frac{N}{\rho(\ell_{\infty}^N,\ell_{\infty}^N)}\geq \sqrt{\frac N 2}\geq 1.$$ for $N\geq 2$. In particular, for $N \geq 3$, the last inequality above is strict. \end{proposition} \begin{proof} Let $$N=\Tr[M^{-1}M]=\langle \tilde{M},M\rangle_{H.S}\leq \|M\|_{\ell^N_1\otimes_{\epsilon}\ell^N_1}\|\tilde{M}\|_{\ell^N_{\infty}\otimes_{\pi}\ell^N_{\infty}}$$ Where $\tilde{M}:=(M^{-1})^T$ Thus we have by definition \ref{def 8.7} and we recall $\langle A,B\rangle_{H.S}:=\Tr[A^*B] $ $$N\leq\|M\|_{\ell^N_1\otimes_{\epsilon}\ell^N_1}\|\tilde{M}\|_{\ell^N_{\infty}\otimes_{\epsilon}\ell^N_{\infty}}\rho(\ell^N_{\infty},\ell^N_{\infty})$$ Thus we have $$1\leq\frac{N}{\rho(\ell^N_{\infty},\ell^N_{\infty})}\leq\|M\|_{\ell^N_1\otimes_{\epsilon}\ell^N_1}\|M^{-1}\|_{\ell^N_{\infty}\otimes_{\epsilon}\ell^N_{\infty}}.$$ \end{proof} Having shown that inequality \eqref{eq:uncertainty} cannot be saturated for $N \geq 3$, we now focus on the $N=2$ case. We need the following lemma\footnote{We thank Zbigniew Pucha{\l}a for this result.}. \begin{lemma}\label{uncertaincty prop} For any matrix $X\in\mathcal{M}_N(\mathbb{C})$ and for any unitary operators $U,V \in \mathcal{U}_N$, we have $$\|UXV^*\|_{\ell_\infty^N \otimes_\epsilon \ell_\infty^N}\|X\|_{\ell_\infty^N \otimes_\epsilon \ell_\infty^N}\geq \frac 1 N|\det X|^{2/N}.$$ Equality holds if and only if both $X$ and $UXV^*$ are scalar multiples of Hadamard matrices. \end{lemma} \begin{proof} Let $x \in \mathbb C^{N^2}$ be the vectorization of $X$; we have $$\|X\|_{\ell_\infty^N \otimes_\epsilon \ell_\infty^N} = \max_{i,j \in [N]} |X_{ij}| = \|x\|_{\ell_\infty^{N^2}}.$$ Moreover, the vectorization of $UXV^*$ is given by $$y:= (U \otimes \bar V) x.$$ Using the unitarity of $U,V$ and the fact that for all vectors $z \in \mathbb C^{N^2}$, $\|z\|_{\ell_\infty^{N^2}} \geq \|z\|_2/N$, we have \begin{align*} \|UXV^*\|_{\ell_\infty^N \otimes_\epsilon \ell_\infty^N}\|X\|_{\ell_\infty^N \otimes_\epsilon \ell_\infty^N} &= \|y\|_{\ell_\infty^{N^2}}\|x\|_{\ell_\infty^{N^2}} \geq \frac{1}{N^2}\|y\|_2\|x\|_2 \\ &= \frac{1}{N^2}\|x\|_2^2 = \frac{1}{N^2}\|X\|_2^2=\frac{1}{N^2}\sum_{i=1}^N\sigma_i(X)^2. \end{align*} Above, $\sigma_i(X)$ denote the singular values of $X$. Using now the arithmetic mean-geometric mean (AM-GM) inequality, we have $$\frac{1}{N}\sum_{i=1}^N\sigma_i(X)^2\geq\left(\prod_{i=1}^N\sigma_i(X)^2\right)^{\frac{1}{N}}=|\det X|^{\frac{2}{N}}.$$ Hence $$\|UXV^*\|_{\ell_\infty^N \otimes_\epsilon \ell_\infty^N}\|X\|_{\ell_\infty^N \otimes_\epsilon \ell_\infty^N}\geq \frac 1 N|\det X|^{\frac{2}{N}},$$ proving the inequality. In the derivation above, we have used three inequalities: the lower bound on the $\ell_\infty$ norm of the vectors $x,y$ by their $\ell_2$ norms, and the arithmetic and geometric inequality. If the former, equality holds iff the entries of, respectively, $x$ and $y$ are flat; this corresponds to the matrices $X$ and $UXV^*$ having, respectively, entries of identical absolute values. The latter corresponds to the singular values of $X$ being identical, which corresponds to $X$ being a scalar multiple of a unitary matrix. The announced equality condition follows from these considerations. \end{proof} Recall that the Fourier matrix, also known as the discrete Fourier transform (DFT), is given by $$F = N^{-1/2} \Big[\omega^{ij}\Big]_{i,j=0}^{N-1},$$ where $\omega = \exp(2 \pi \mathrm{i}/N)$. \begin{proposition}\label{prop:equality-N-2} For $N=2$, all the invertible Bell functionals $M\in\mathcal{M}_2(\mathbb{R})$ satisfying $$\|M^{-1}\|_{\ell^2_{\infty}\otimes_{\epsilon}\ell^2_{\infty}}\|M\|_{\ell^2_1\otimes_{\epsilon}\ell^2_1} = 1$$ are of the form $$M =a\begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix}$$ with $a\in\mathbb R$, $a \neq 0$. \end{proposition} \begin{proof} Since $M\in\mathcal{M}_2(\mathbb R) $ one note that $$\|M^{-1}\|_{\ell^2_{\infty}\otimes_{\epsilon}\ell^2_{\infty}}=\frac{1}{|\det(M)|}\|M\|_{\ell^2_{\infty}\otimes_{\epsilon}\ell^2_{\infty}}$$ and $$\|M\|_{\ell^2_1\otimes_{\epsilon}\ell^2_1}=\|(T\otimes T) M\|_{\ell^2_{\infty}\otimes_{\epsilon}\ell^2_{\infty}}=2\,\|(F\otimes F) M\|_{\ell^2_{\infty}\otimes_{\epsilon}\ell^2_{\infty}}$$ where $T =\begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix} = \sqrt 2F$, is an isometry $T:\ell^2_1\to\ell^2_{\infty}$; geometrically, this corresponds to the fact that the unit ball of $\ell^2_1$ (a diamond) is a scaled rotation of the unit ball of $\ell^2_{\infty}$ (a square).\\ Now using the lemma above one has $$\|M^{-1}\|_{\ell^2_{\infty}\otimes_{\epsilon}\ell^2_{\infty}}\|M\|_{\ell^2_1\otimes_{\epsilon}\ell^2_1}=\frac{2}{|\det(M)|}\|M\|_{\ell^2_{\infty}\otimes_{\epsilon}\ell^2_{\infty}}\|(F\otimes F) M\|_{\ell^2_{\infty}\otimes_{\epsilon}\ell^2_{\infty}}\geq \frac{2}{|\det (M)|}\frac{|\det(M)|}{2}=1$$ The equality holds as the lemma above iff $M$ is a unitary and the entries of $(F\otimes F)M$ and $M$ are flat (i.e.~have the same absolute value). Then $M$ is a multiple of a \emph{Hadamard matrix}: $$M =a\begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix} \qquad a\in \mathbb{R},\, a\neq 0.$$ \end{proof} Gathering Propositions \ref{prop:norm-inequality-M-inverse} and \ref{prop:equality-N-2}, we obtain the following important characterization of non-local games $M$ achieving equality in \eqref{eq:uncertainty}. \begin{theorem} The only invertible non-local games $M \in \mathcal M_N(\mathbb R)$ satisfying $$\|M^{-1}\|_{\ell^2_{\infty}\otimes_{\epsilon}\ell^2_{\infty}}\|M\|_{\ell^2_1\otimes_{\epsilon}\ell^2_1} = 1$$ have two questions ($N=2$) and are variants of the CHSH game: $M = a M_{\textrm{CHSH}}$ for some $a \neq 0$. \end{theorem} Note that saturating inequality \eqref{eq:uncertainty} is just a \emph{sufficient} condition for having $\|\cdot \|_M = \|\cdot \|_c$. We leave the general case open: for which non-local games $M$, does one have $\|\cdot \|_M = \|\cdot \|_c$? \section{Conclusion} Two of the most fundamental features of quantum mechanics are measurement incompatibility and the non-locality of correlations. In this work, we address the relation between the two concepts within the natural framework of tensor norms. It was well-known that in order to observe correlation non-locality in a Bell-type experiment, one should use incompatible measurements. Moreover, it was shown that in some particular cases, such as the CHSH game, incompatibility and the violation of the Bell inequality are equivalent. In the current paper, we introduced a natural framework in which one can directly compare the two notions. We have shown that the incompatibility is not in general equivalent to the non-locality, by comparing two tensor norms. Finally, let us address some questions we have left open. First and foremost, our setting is only adapted to dichotomic (2-outcome) POVMs; it would be interesting to extend the results in this paper to measurements with an arbitrary number of outcomes. The main obstacle here is encoding the outcomes of the $g$ POVMs with more than 2 outcomes into a relevant tensor. In Section \ref{sec:norm-equality}, we have shown that the two tensor norms, the one associated to a non-local game and the one associated to compatibility, cannot be shown to be equal using a simple chain of inequalities (except in the case of the CHSH game and its variants). The question whether the two norms can be equal (using other methods) is open. Here, one would need to rely on a more general argument instead of relying on some particular inequalities. Finally, our methods only cover XOR games with pure correlation terms; associating a tensor norm to more general games (such as the full $I_{3322}$ game) is an interesting open problem. \bigskip \noindent\textit{Acknowledgements.} The authors would like to thank Carlos Palazuelos and Zbigniew Pucha{\l}a for help with the proofs of some results in Section \ref{sec:norm-equality}. The two anonymous referees provided us with an extensive list of suggestions and corrections; we are very thankful for their interest in our work, helping us greatly improve the presentation of our results. The authors were supported by the ANR project \href{https://esquisses.math.cnrs.fr/}{ESQuisses}, grant number ANR-20-CE47-0014-01, and by the PHC programs \emph{Sakura} (Random Matrices and Tensors for Quantum Information and Machine Learning), \emph{Star} (Applications of random matrix theory and abstract harmonic analysis to quantum information theory), and \emph{Procope} (Entanglement Preservation in Quantum Information Theory). I.N.~was also supported by the ANR project \href{https://www.math.univ-toulouse.fr/~gcebron/STARS.php}{STARS}, grant number ANR-20-CE40-0008. \bibliographystyle{alpha} \bibliography{refs} \end{document}
2205.12582v5
http://arxiv.org/abs/2205.12582v5
Locally constrained flows and sharp Michael-Simon inequalities in hyperbolic space
\documentclass[11pt,oneside, final]{amsart} \copyrightinfo{0}{Iranian Mathematical Society} \pagespan{1}{\pageref*{LastPage}} \usepackage{etoolbox,lastpage} \commby{} \usepackage{amsmath,amsthm,amscd,amsfonts,amssymb,enumerate} \usepackage{graphicx} \usepackage{color} \usepackage[colorlinks]{hyperref} \makeatletter \@addtoreset{equation}{section} \def\theequation{\thesection.\arabic{equation}} \textwidth 155mm \oddsidemargin 0pt \evensidemargin 0pt \textheight 220mm \topmargin -3mm \newtheorem{theorem}{Theorem}[section] \newtheorem{proposition}[theorem]{Proposition} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{conjecture}[theorem]{Conjecture} \newtheorem{question}[theorem]{Question} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \newtheorem{counterexample}[theorem]{Counterexample} \newtheorem{remark}[theorem]{Remark} \newtheorem{assumption}[theorem]{Assumption} \theoremstyle{remark} \numberwithin{equation}{section} \DeclareMathOperator{\dist}{dist} \DeclareMathOperator{\trace}{trace} \begin{document} \title[Locally constrained flow \& Michael-Simon type inequalities in hyperbolic space]{Locally constrained flows and Michael-Simon type inequalities in hyperbolic space $\mathbb{H}^{n+1}$} \author[J. Cui]{Jingshi Cui} \author[P. Zhao]{Peibiao Zhao$^*$} \thanks{$^*$Corresponding author} \begin{abstract} Brendle \cite{B21} successfully establishes the sharp Michael-Simon inequality for mean curvature on Riemannian manifolds with nonnegative sectional curvature ($\mathcal{K} \geq 0$), and the proof relies on the Alexandrov-Bakelman-Pucci method. Nevertheless, this result cannot be extended to hyperbolic space $\mathbb{H}^{n+1}$ ($\mathcal{K} = -1$), as demonstrated by Counterexample \ref{ex1.7}. In the present paper, we propose Conjectures \ref{con1.8} and {\ref{con1.9}} concerning the hyperbolic version of the sharp Michael-Simon type inequality for $k$-th mean curvatures. However, the proof method in \cite{B21} failed to verify the validity of these conjectures. Recently, the authors \cite{CZ24} proved Conjectures \ref{con1.8} and {\ref{con1.9}} only for $h$-convex hypersurfaces by means of the Brendle-Guan-Li's flow. This paper aims to utilize other types of curvature flows to prove Conjectures \ref{con1.8} and {\ref{con1.9}} for hypersurfaces with weaker convexity conditions. For $k = 1$, we first investigate a new locally constrained mean curvature flow (\ref{1.9}) in $\mathbb{H}^{n+1}$ and prove its longtime existence and exponential convergence. Then, the sharp Michael-Simon type inequality for mean curvature of starshaped hypersurfaces in $\mathbb{H}^{n+1}$ is confirmed through the flow (\ref{1.9}). For $k \geq 2$, the sharp Michael-Simon inequality for $k$-th mean curvatures of starshaped, strictly $k$-convex hypersurfaces in $\mathbb{H}^{n+1}$ is proven using the locally constrained inverse curvature flow (\ref{1.12}) introduced by Scheuer and Xia \cite{SX19}.\\ \textbf{Keywords:} Locally constrained curvature flow; Michael-Simon type inequality; $k$-th mean curvatures. \\ \textbf{MSC(2010):} Primary: 53E99; Secondary: 52A20; 35K96 \end{abstract} \maketitle \section{ Introduction} \noindent The Michael-Simon inequality on the generalized submanifolds immersed into Euclidean space $\mathbb{R}^{N}$ was first formulated by Michael and Simon \cite{MS1973}. The specific form of the inequality is as follows \begin{theorem}{(\cite{MS1973})}\label{th1.1} Let $i: M^{n} \rightarrow \mathbb{R}^{N}$ be an isometric immersion $(N>n)$. Let $U$ be an open subset of $M^n$. For a function $\phi \in C_{c}^{\infty}(U)$, there exists a constant $C$, such that \begin{align}\label{1.1} \int_{M^{n}}(|H| \cdot|\phi|+|\nabla \phi|) d \mu_{M^{n}} \geq C\left(\int_{M^{n}}|\phi|^{\frac{n}{n-1}} d \mu_{M^{n}}\right)^{\frac{n-1}{n}}, \end{align} where $H$ is the mean curvature of $M^n$. \end{theorem} There are many important applications of the Michael-Simon type inequality. First, this type inequality is an effective tool for obtaining a priori estimates of solutions to elliptic partial differential equations and for investigating the regularity of these solutions. For example, X. Cabr$\acute{\rm e} $ \cite{Ca10} used this type inequality to prove the regularity theory for semilinear elliptic equations. Second, since the Michael-Simon type inequality involves integrals of mean curvature, it allows for solving problems related to hypersurfaces with prescribed mean curvature. For instance, this type inequality can be applied to study the regularity of surfaces with prescribed mean curvature \cite{BG73, DHT10}. Finally, the Michael-Simon type inequality is also valuable in the study of geometric flows. For instance, Evans and Spruck \cite{ES92} used this type inequality to establish an analog of Brakke’s ``clearing out'' lemma for generalized mean curvature flow. In summary, the Michael-Simon type inequality plays a crucial role not only in the study of elliptic equations but also in geometric analysis and the investigation of geometric flows. Therefore, it is necessary to explore the Michael-Simon type inequality for the $k$-th mean curvature, as well as the sharp form of this type inequality. Moreover, it is also important to investigate the Michael-Simon type inequality in Riemannian manifolds. It is widely known that Sun-Yung Alice Chang and Yi Wang \cite{CW2013}, utilized the optimal transport mapping method to successfully establish the Michael-Simon type inequality for $k$-th mean curvatures. Subsequently, in 2021, Brendle \cite{B19} established a sharp form of the Michael-Simon type inequality, where the best constant $C$ corresponds to the isometric constant in $\mathbb{R}^{n+1}$. \begin{theorem}{(\cite{B19})}\label{th1.2} Let $M$ be a compact hypersurface in $\mathbb{R}^{n+1}$ (possibly with boundary $\partial M$), and let $f$ be a positive smooth function on $M$. Then \begin{align}\label{1.2} \int_{M} \sqrt{\left|\nabla^{M} f\right|^{2}+f^{2} H^{2}}+\int_{\partial M} f \geq n\omega_{n}^{\frac{1}{n}}\left(\int_{M} f^{\frac{n}{n-1}}\right)^{\frac{n-1}{n}}, \end{align} where $H$ is the mean curvature of $M$ and $\omega_{n}$ is the area of the unit sphere $\mathbb{S}^{n}$ in $\mathbb{R}^{n+1}$. Moreover, if the equality holds, then $f$ is constant and $M$ is a flat disk. \end{theorem} \begin{remark} (1) If $M$ is a compact minimal hypersurface in $\mathbb{R}^{n+1}$, then by the Michael-Simon type inequality (\ref{1.2}), $M$ satisfies the sharp isoperimetric inequality \begin{align*} |\partial M| \geq n\omega_{n}^{\frac{1}{n}}|M|^{\frac{n-1}{n}}. \end{align*} (2) If $M$ is a closed hypersurface (without boundary) and $f$ be of constant, then the inequality (\ref{1.2}) gives the famous Minkowski inequality in $\mathbb{R}^{n+1}$, i.e., \begin{align*} \int_{M} |H| d\mu \geq n \omega^{\frac{1}{n}}_{n}|M|^{\frac{n-1}{n}}. \end{align*} \end{remark} Moreover, in 2022, Brendle \cite{B21} established the Michael-Simon type inequality for mean curvature in Riemannian manifolds with nonnegative sectional curvature. \begin{theorem}{(\cite{B21})\label{th1.5}} Let $N$ be a complete noncompact manifold of dimension $n+m$ with nonnegative sectional curvatures. Let $M$ be a compact submanifold of $N$ of dimension $n$ (possibly with boundary $\partial M$), and let $f$ be a positive smooth function on $M$. If $m\geq 2 $, then \begin{align}\label{1.3} \int_{M} \sqrt{\left|\nabla^{\Sigma} f\right|^{2}+f^{2} |H|^{2}}+\int_{\partial M} f \geq n\left(\frac{(n+m)\omega_{n+m}}{m\omega_{m}}\right)^{\frac{1}{n}}\theta^{\frac{1}{n}}\left(\int_{M} f^{\frac{n}{n-1}}\right)^{\frac{n-1}{n}}, \end{align} where $\theta$ denotes the asymptotic volume ratio of $N$ and $H$ denotes the mean curvature vector of $M$. \end{theorem} \begin{remark}\label{rem1.6} (1) When $m=2$, we have $(n+2) \omega_{n+2}=2 \omega_{2} \cdot \omega_{n}$. Then, the inequality (\ref{1.3}) is as follows \begin{align}\label{1.4} \int_{M} \sqrt{\left|\nabla^{\Sigma} f\right|^{2}+f^{2} |H|^{2}}+\int_{\partial M} f \geq n\omega_{n}^{\frac{1}{n}}\theta^{\frac{1}{n}}\left(\int_{M} f^{\frac{n}{n-1}}\right)^{\frac{n-1}{n}}. \end{align} (2) The inequality (\ref{1.4}) also holds for the setting of codimension one. Since $M$ can be regarded as a submanifold of the $(n+2)$-dimensional manifold $N\times\mathbb{R}$, the asymptotic volume ratios of $N\times\mathbb{R}$ and $N$ are equal. (3) Note that the inequalities (\ref{1.3}) and (\ref{1.4}) depend on the asymptotic volume ratio $\theta$ of the ambient manifold $N$. From the Bishop-Gromov volume comparison theorem \cite{BC64,MG81}, it follows that the following two conclusions hold: (i) $\theta \leq 1$ when $(N,g_{N})$ is a complete Riemannian manifold with Ricci curvature bounded below; (ii) $\theta > 1$ when the sectional curvature of $(N,g_{N})$ is negative. \end{remark} Given the significant importance of the Michael-Simon type inequality in the relevant mathematical fields, it is both necessary and urgent to conduct an study of the Michael-Simon type inequality on Riemannian manifolds with negative sectional curvature. As everyone knows, the hyperbolic space $\mathbb{H}^{n+1}$ is a simply connected, complete Riemannian ($n+1$)-manifold with $\mathcal{K}=-1<0$. Therefore, we first attempt to establish the Michael-Simon type inequality in $\mathbb{H}^{n+1}$. A natural question arises \begin{question}\label{qu1.4} Does the Michael-Simon type inequality in \cite{B21} also hold in $\mathbb{H}^{n+1}$ ? \end{question} We find that when $m=1$ and $f=const$, there exist counterexamples to the inequality (\ref{1.4}) in hyperbolic space $\mathbb{H}^{n+1}$. \begin{counterexample}\label{ex1.7} Let $M$ be a geodesic sphere $B_{r}$ in $\mathbb{H}^{n+1}$ where $r\in (0,+\infty)$. Assume that the hyperbolic space $\mathbb{H}^{n+1}$ is equipped with the measure $d\mu_{\mathbb{H}^{n+1}}=\lambda^{n}drd\mu_{\mathbb{S}^{n}}$, then $H\big|_{B_{r}} = \frac{n\lambda^{'}}{\lambda}$ and the inequality (\ref{1.4}) becomes \begin{align*} \int_{B_{r}} \frac{\lambda^{'}}{\lambda} d\mu_{\mathbb{H}^{n+1}} \geq \theta^{\frac{1}{n}} \omega_{n}^{\frac{1}{n}}|B_{r}|^{\frac{n-1}{n}}. \end{align*} Simplifying the above inequality, we have \begin{align*} \theta \leq \int^{r}_{0} (\lambda^{'})^{n} ds = \int^{r}_{0} \left(\frac{e^{s}+e^{-s}}{2}\right)^{n} ds < \int^{r}_{0} e^{ns} ds =\frac{e^{nr}-1}{n}. \end{align*} According to the Bishop-Gromov volume comparison theorem, in the hyperbolic space $\mathbb{H}^{n+1}$, $\theta >1$. However, if $r\in (0,\frac{ln(n+1)}{n} ]$, then $\theta < 1$, which is clearly in contradiction with the Bishop-Gromov volume comparison theorem. \end{counterexample} Counterexample \ref{ex1.7} provides an answer to Question \ref{qu1.4}, namely, the conclusion in \cite{B21} does not hold in hyperbolic space $\mathbb{H}^{n+1}$. Therefore, we make conjectures for the Michael-Simon type inequality in $\mathbb{H}^{n+1}$. \begin{conjecture}\label{con1.8} Let $M$ be a compact hypersurface in $\mathbb{H}^{n+1}$ (possibly with boundary $\partial M$) and let $f$ be a positive smooth function on $M$. Then, \begin{align}\label{1.5} \int_{M} \lambda^{'} \sqrt{f^{2} E_{1}^{2}+|\nabla^{M} f|^{2}} -\int_{M}\left\langle \bar{\nabla}\left(f\lambda^{'}\right),\nu \right\rangle +\int_{\partial M} f \geq C\left(\int_{M}f^{\frac{n}{n-1}}\right)^{\frac{n-1}{n}} . \end{align} In particular, when $M$ is closed and $f=const$, there holds \begin{align}\label{1.6} \int_{M}(\lambda^{'}|E_{1}|-u) \geq C|M|^{\frac{n-1}{n}}, \end{align} where $E_{1}:=E_{1}(\kappa)$ and $\nu$ are the (normalized) $1$-th mean curvature and the unit outward normal of $M$ respectively, $u$ and $|M|$ are the support function and the area of $M$ respectively, and $\bar{\nabla}$ is the Levi-Civita connection with respect to the metric $\bar{g}$ on $\mathbb{H}^{n+1}$. \end{conjecture} Note that the inequality (\ref{1.6}) is precisely the Minkowski type inequality established by Brendle, Hung, and Wang in \cite{BHW16} for mean convex and starshaped hypersurfaces. Similarly, we propose a conjecture for the Michael-Simon type inequality for $k$-th mean curvatures in hyperbolic space $\mathbb{H}^{n+1}$. \begin{conjecture}\label{con1.9} Let $M$ be a compact hypersurface in $\mathbb{H}^{n+1}$ (possibly with boundary $\partial M$) and let $f$ be a positive smooth function on $M$. For $1\leq k\leq n$, there holds \begin{align}\label{1.7} \int_{M} \lambda^{'} \sqrt{f^{2}E_{k}^{2} +|\nabla^{M} f|^{2} E_{k-1}^{2}} -\int_{M}\left\langle \bar{\nabla}\left(f\lambda^{'}\right),\nu \right\rangle \cdot |E_{k-1}| &+\int_{\partial M} f \cdot|E_{k-1}| \notag \\ &\geq C\left(\int_{M} f^{\frac{n-k+1}{n-k}}\cdot |E_{k-1}|\right)^{\frac{n-k}{n-k+1}}. \end{align} In particular, when $M$ is closed and $f=const$, there holds \begin{align}\label{1.8} \int_{M}(\lambda^{'}|E_{k}|-u|E_{k-1}|) \geq C \left(\int_{M} |E_{k-1}|\right)^{\frac{n-k+1}{n-k}}, \end{align} where $E_{k}:=E_{k}(\kappa)$ is the (normalized) $k$-th mean curvatures of $M$. \end{conjecture} When $M$ is a closed, $k$-convex hypersurface with $k\geq 2$, applying the Minkowski equality (\ref{2.9}) in $\mathbb{H}^{n+1}$, the inequality (\ref{1.8}) implies \begin{align*} \int_{M}(\lambda^{'}E_{k}-\lambda^{'}E_{k-2}) \geq C \left(\int_{M} E_{k-1}\right)^{\frac{n-k+1}{n-k}}, \end{align*} which represents a geometric inequality for weighted curvature integrals $W^{\lambda^{'}}_{k+1} = \int_{M}\lambda^{'}E_{k}$. The geometric inequality related to $W^{\lambda^{'}}_{k+1}$ can be found in \cite{HLW20}. Nowadays, the study of curvature flows is in full swing, and has achieved extremely fruitful research results. Notably, the curvature flow approach is a powerful tool for proving new geometric inequalities, especially constrained curvature flows. As we know that the research on constrained curvature flows can be divided into two categories: globally constrained flows and locally constrained flows. For globally constrained flows, Wang and Xia \cite{WangXia14} utilized the volume-preserving mean curvature flow, which was initially proposed by Cabezas-Rivas and Miquel \cite{CM07}, developed the quermassintegral inequalities for $h$-convex hypersurfaces and solved the isoperimetric problem in $\mathbb{H}^{n+1}$. See also \cite{BP17,GLW17,Mak12} for studies on the globally constrained flow in $\mathbb{H}^{n+1}$. Regarding locally constrained flows, Hu, Li, and Wei \cite{HLW20} not only provided new proofs of the quermassintegral inequalities but also derived new geometric inequalities in $\mathbb{H}^{n+1}$. Moreover, Hu and Li \cite{HL21} proved the weighted geometric inequality in hyperbolic space $\mathbb{H}^{n+1}$ by virtue of the locally constrained weighted volume preserving flows. Other results on locally constrained curvature flows, see \cite{JX19, LS20, SWX18, WX19, WX, GL2015, GL19}. Although the author's recent work \cite{CZ24} uses the Brendle-Guan-Li's flow to prove Conjectures \ref{con1.8} and \ref{con1.9}, the result requires the hypersurfaces to be $h$-convex, i.e., $\kappa_{i} \geq 1$. If the convexity condition of the hypersurfaces can be weakened, the scope of application of the inequality can be broadened. This paper aims to prove Conjectures \ref{con1.8} and \ref{con1.9} using other types of curvature flows, thereby enabling hypersurfaces with weaker convexity conditions to satisfy the inequality. In the first part of the paper, we introduce a new locally constrained mean curvature flow for starshaped hypersurfaces in $\mathbb{H}^{n+1}$, and apply it to establish the Michael-Simon type inequality for mean curvature, i.e., to prove Conjecture \ref{con1.8}. Let $X:\Sigma \times [0,T) \to \mathbb{H}^{n+1}$ be a smooth family of embedding such that $\Sigma_{t}=X(\Sigma,t)$ are smooth, closed starshaped hypersurfaces in $\mathbb{H}^{n+1}$. The new locally constrained mean curvature flow is \begin{equation}\label{1.9} \begin{cases} \frac{\partial }{\partial t}X(x,t)=-\frac{f}{v}H \nu(x,t) - \frac{n}{n-1}\frac{1}{v}\frac{\partial f}{\partial X},\\ X(\cdot,0)=X_{0}(\cdot), \end{cases} \end{equation} where $f \in C^{\infty}(\Sigma_{t})$ is a positive function, $\nu(x,t)$ and $H$ are the unit outer normal and the mean curvature of $\Sigma_{t}$ respectively, and $v=\sqrt{1+\lambda^{-2}|Dr|^{2}}$, with $D$ denotes Levi-Civita connection on $\mathbb{S}^{n}$. It is known that starshaped hypersurfaces $\Sigma_{t}$ can be parametrized by the positive radial function $r(\cdot,t): \mathbb{S}^{n}\to \mathbb{R}^{+}$ on $\mathbb{S}^{n}$, i.e., \begin{align*} \Sigma_{t}=X(\cdot,t)=\left\{\left(r(\xi,t),\xi\right)\in \mathbb{R}^{+}\times\mathbb{S}^{n}|\xi \in\mathbb{S}^{n}\right\}, \end{align*} and $\Sigma_{0}=X(\cdot,0)=\left\{\left(r_{0}(\xi),\xi\right)\in \mathbb{R}^{+}\times\mathbb{S}^{n}|\xi \in\mathbb{S}^{n}\right\}$, with $r_{0}(\xi)=r(\xi,0)$. Since $f \in C^{\infty}(\Sigma_{t})$, it follows that there exists a function $\phi: \mathbb{S}^{n} \to \mathbb{R}^{+} $ such that $f(X) = f(r(\xi,t),\xi)= \phi (\xi)$. Next we prove that the longtime existence and convergence of the locally constrained mean curvature flow (\ref{1.9}), provided that $f$ satisfies the following assumption. \begin{assumption}\label{as1.10} Let $f(X) = \Phi_{1} \circ r(\xi,t) $ and $\widehat{\Phi}_{1}(r):=n\Phi_{1}\frac{\lambda^{'}}{\lambda^{2}}+\frac{n}{n-1} \frac{\partial \Phi_{1}}{\partial r}\frac{1}{\lambda}$. The function $\widehat{\Phi}_{1}(r)$ is monotonically increasing with respect to $r$, where $r>0$, and there exists a zero point for $\widehat{\Phi}_{1}(r)$. \end{assumption} \begin{remark} It is not difficult to verify that there exists $\Phi_{1}(r)$ and $\widehat{\Phi}_{1}(r)$ satisfies Assumption \ref{as1.10}. For example, let \begin{align*} \Phi_{1}(r) = \lambda^{-n+1}\left[\int\frac{n-1}{n}(e^{-1} - e^{-r})\lambda^{n} dr +C\right] \end{align*} where the constant $C$ is chosen as follows: for $r >1$, let $C=0$; for $0< \epsilon \leq r \leq1 $, let $C \geq (1-\epsilon) \frac{n-1}{n}\frac{e-1}{e}\lambda^{n}(1)$. With this choice of $C$, $\Phi_{1}(r)$ is positive for all $r>0$. Moreover, we have $\widehat{\Phi}_{1}(r) = e^{-1} - e^{-r}$, $\frac{\partial \widehat{\Phi}_{1}}{\partial r} = e^{-r} >0$ and $\widehat{\Phi}_{1}(1) = 0$. \end{remark} \begin{theorem}\label{th1.11} Let $X_{0}:\Sigma \to \mathbb{H}^{n+1}(n\geq 2)$ be a smooth embedding of closed hypersurface $\Sigma$ in $\mathbb{H}^{n+1}$ such that $\Sigma_{0}=X_{0}(\Sigma)$ is starshaped with respect to the origin, and assume that $f$ satisfies Assumption \ref{as1.10}. Then the flow (\ref{1.9}) has a unique smooth solution $\Sigma_{t}=X(\Sigma,t)$ for all time $t\in [0,+\infty)$. Moreover, $\Sigma_{t}$ converges exponentially to a geodesic sphere $B_{r_{\infty}}$ centered at the origin as $t\to +\infty $ in the $C^{\infty}$-topology. \end{theorem} Let $M_{0}= \Sigma_{0}$ ($M_{0}=\Sigma_{0} \cup \partial \Sigma_{0}$ ) be a smooth starshaped and compact hypersurface (possibly with boundary). For any $ X^{*} \in M_{0}$, we have $X^{*}=(r^{*}(\xi), \xi)$, $r^{*}$ is the distance function of $M_{0}$ and \begin{equation*} r^{*}(\xi) = \begin{cases} r_{0}(\xi),\qquad x \in \Sigma_{0}, \\ \bar{r}(\xi), \qquad x \in \partial \Sigma_{0}. \end{cases} \end{equation*} If $f\in C^{\infty}(M_{0})$, then $f(X^{*})= f(\left(r^{*}(\xi),\xi\right))$ and $f(X^{*})\big|_{\Sigma_{0}} = f(X(\cdot,0)) = f (r_{0}(\xi),\xi)$. By using the locally constrained mean curvature flow (\ref{1.9}), the sharp Michael-Simon type inequality for mean curvature in $\mathbb{H}^{n+1}$ is established, and the best constant in (\ref{1.5}) is determined. \begin{theorem}\label{th1.12} Let $M_{0}$ be a smooth compact hypersurface in $\mathbb{H}^{n+1}$ (possibly with boundary $\partial M_{0}$) that is starshaped with respect to the origin, and let $f \in C^{\infty}(M_{0})$ be a positive function of the form $f=\Phi_{1} \circ r$ satisfying Assumption \ref{as1.10}. Then, there holds \begin{align}\label{1.10} \int_{M_{0}} \lambda^{'} \sqrt{f^{2} E_{1}^{2}+|\nabla^{M} f|^{2}} -\int_{M_{0}}\left\langle \bar{\nabla}\left(f\lambda^{'}\right),\nu \right\rangle +\int_{\partial M_{0}} f \geq \omega_{n}^{\frac{1}{n}}\left(\int_{M_{0}}f^{\frac{n}{n-1}}\right)^{\frac{n-1}{n}}. \end{align} In particular, if $M_{0}$ is closed, we have \begin{align}\label{1.11} \int_{M_{0}} \lambda^{'} \sqrt{f^{2} E_{1}^{2}+|\nabla^{M} f|^{2}} -\int_{M_{0}}\left\langle \bar{\nabla}\left(f\lambda^{'}\right),\nu \right\rangle \geq \omega_{n}^{\frac{1}{n}}\left(\int_{M_{0}}f^{\frac{n}{n-1}}\right)^{\frac{n-1}{n}}. \tag{1.10 ${'}$} \end{align} Equality holds in (\ref{1.11}) if and only if $M_{0}$ is a geodesic sphere centered at the origin. \end{theorem} In the second part of this paper, we use the locally constrained inverse curvature type flow to establish the Michael-Simon type inequality for $k$-th mean curvatures, i.e., to prove Conjecture \ref{con1.9}. Let $X_{0}:\Sigma \to \mathbb{H}^{n+1}$ be a smooth embedding such that $\Sigma_{0}$ is a closed, starshaped, and strictly $k$-convex hypersurface in hyperbolic space $\mathbb{H}^{n+1}$. In \cite{SX19}, Scheuer and Xia introduced a family of smooth embeddings $X:\Sigma \times [0,T) \to \mathbb{H}^{n+1}$ that satisfy \begin{equation}\label{1.12} \begin{cases} \frac{\partial}{\partial t}X(x,t)=\left(\frac{E_{k-2}(\kappa)}{E_{k-1}(\kappa)}-\frac{u}{\lambda^{'}}\right)\nu(x,t),\quad k=2,\cdots ,n,\\ X(\cdot,0)=X_{0}(\cdot), \end{cases} \end{equation} where $\nu(x,t)$ and $\kappa=(\kappa_{1},\cdots,\kappa_{n})$ are the unit outer normal and the principal curvatures of $\Sigma_{t}=X(\Sigma,t)$ respectively. They proved the following convergence result of the flow (\ref{1.12}). \begin{theorem}\label{th1.13}(\cite{SX19}) Let $X_{0}(\Sigma)$ be the smooth embedding of a closed n-dimensional manifold $\Sigma$ in $\mathbb{H}^{n+1}$, such that $\Sigma_{0}=X_{0}(\Sigma)$ is starshaped and strictly $k$-convex along $X_{0}(\Sigma)$. Then any solution $\Sigma_{t}=X_{t}(\Sigma)$ of the flow (\ref{1.12}) exists for $t\in[0,+\infty)$. Moreover, $\Sigma_{t}$ is starshaped and strictly $k$-convex for each $t> 0$ and it converges to a geodesic sphere $B_{R}$ centered at the origin in the $C^{\infty}$-topology as $t \to +\infty$. \end{theorem} Moreover, as an application of the flow (\ref{1.12}), we obtain the sharp Michael-Simon type inequalities for $k$-th mean curvatures, where $k\geq 2$. Before presenting the detailed results, we make the following assumptions about $f$. \begin{assumption}\label{as1.14} Let $f(X) = \Phi_{2} \circ r(\xi,t)$ and $\widehat{\Phi}_{2}(r):= \Phi_{2}^{\frac{n-k+1}{n-k}}(r) $. \\ (1) $\widehat{\Phi}_{2}$ is the solution of the following second order non-homogeneous differential equation \begin{align}\label{1.13} \frac{1}{n}\Delta_{\Sigma_{t}} \widehat{\Phi}_{2} - \widehat{\Phi}_{2} - \frac{1}{k-1}\frac{\lambda^{'}}{\lambda}\frac{\partial \widehat{\Phi}_{2}}{\partial r}\left(1+\lambda^{-2}|Dr|^{2}\right) =- \frac{m}{k-1}\frac{E_{k-1}}{E_{k-2}}\lambda (\lambda^{'})^{m-k}, \end{align} where $k+1 \leq m \leq n$. \\ (2) $(\lambda^{'})^{-1}\widehat{\Phi}_{2}$ is monotonically increasing with respect to $\lambda^{'}$. \end{assumption} \begin{remark} By the standard theory of ordinary differential equations, the solution to the equation (\ref{1.13}) exists. It can be shown that the set of elements satisfying Assumption \ref{as1.14} is non-empty. In particular, when $\Sigma_{t}$ is a geodesic sphere $B_{R}$, $\widehat{\Phi}_{2} = (\lambda^{'})^{m-k+1}$ satisfies Assumption \ref{as1.14}. \end{remark} \begin{theorem}\label{th1.15} Let $M_{0}$ be a smooth, compact, starshaped and strictly $k$-convex hypersurface in $\mathbb{H}^{n+1}$ (possibly with boundary $\partial M_{0}$), and $\Omega_{0}$ be the domain enclosed by $M_{0}$. Assume that $f$ satisfies Assumption \ref{as1.14}. Then, for any $2\leq k \leq n$, there holds \begin{align}\label{1.14} \int_{M_{0}} \lambda^{'} \sqrt{f^{2}E_{k}^{2} +|\nabla^{M_{0}} f|^{2} E_{k-1}^{2}} &-\int_{M_{0}}\left\langle \bar{\nabla}\left(f\lambda^{'}\right),\nu \right\rangle \cdot E_{k-1} +\int_{\partial M_{0}} f \cdot E_{k-1} \notag \\ &\geq \left(p_{k}\circ h_{0}^{-1}\left(W^{\lambda^{'}}_{0}(\Omega_{0})\right) \right)^{\frac{1}{n-k+1}}\left(\int_{M_{0}}f^{\frac{n-k+1}{n-k}}\cdot E_{k-1}\right)^{\frac{n-k}{n-k+1}}. \end{align} In particular, if $M_{0}$ is closed, we have \begin{align}\label{1.15} \int_{M_{0}} \lambda^{'} \sqrt{f^{2}E_{k}^{2} +|\nabla^{M_{0}} f|^{2} E_{k-1}^{2}} &-\int_{M_{0}}\left\langle \bar{\nabla}\left(f\lambda^{'}\right),\nu \right\rangle \cdot E_{k-1} \notag \\ &\geq \left(p_{k}\circ h_{0}^{-1}\left(W^{\lambda^{'}}_{0}(\Omega_{0})\right) \right)^{\frac{1}{n-k+1}}\left(\int_{M_{0}}f^{\frac{n-k+1}{n-k}}\cdot E_{k-1}\right)^{\frac{n-k}{n-k+1}}, \tag{1.13 ${'}$} \end{align} where $p_{k}(R)=\omega_{n}f^{\frac{n+1-k}{n-k}}(\lambda^{'})^{k-1}\lambda^{n-k+1}(R)$, $W^{\lambda^{'}}_{0}(\Omega_{0})= (n+1)\int_{\Omega_{0}} \lambda^{'} dvol$, $h_{0}(R)=W^{\lambda^{'}}_{0}(B^{n+1}_{R})$ and $h_{0}^{-1}$ is the inverse function of $h_{0}$. Equality holds in (\ref{1.15}) if and only if $M_{0}$ is a geodesic sphere centered at the origin. \end{theorem} The paper is organized as follows. In Section 2, we review the geometry of starshaped hypersurfaces in hyperbolic space $\mathbb{H}^{n+1}$, present some basic properties of normalized elementary symmetric functions, and derive the corresponding evolution equations. In Section 3, we establish a priori estimates for the locally constrained mean curvature flow (\ref{1.9}), prove its longtime existence and convergence. The sharp Michael-Simon type inequalities in Theorem \ref{th1.12} and Theorem \ref{th1.15} are proved in Section 4. \section{ Preliminaries} \noindent In this section, we first review starshaped hypersurfaces in hyperbolic space $\mathbb{H}^{n+1}$, then state some properties of normalized elementary symmetry functions, and finally derive the evolution equations along the general flow (\ref{2.11}). \subsection{ Starshaped hypersurfaces in a hyperbolic space.} \ \vglue-10pt \indent In this paper, the hyperbolic space $\mathbb{H}^{n+1}$ is regarded as a warped product manifold $\mathbb{R}^{+} \times \mathbb{S}^{n}$ equipped with the metric \begin{align*} \bar{g} =dr^{2}+\lambda(r)^{2}\sigma, \end{align*} where $\sigma$ is the standard metric on the unit sphere $\mathbb{S}^{n}\subset \mathbb{R}^{n+1}$. Let $\Sigma$ be a closed smooth hypersurface in $\mathbb{H}^{n+1}$. Define $g_{ij}$, $h_{ij}$ and $\nu$ as the induced metric, the second fundamental form and the unit outward normal vector of $\Sigma$, respectively. The principal curvatures $\kappa=(\kappa_{1},\cdots,\kappa_{n})$ of $\Sigma$ are given by the eigenvalues of the Weingarten matrix $\mathcal{W}=(h^{i}_{j})=(g^{ik}h_{kj})$, where $g^{ik}$ is the inverse matrix of $g_{ik}$. \begin{lemma}(\cite{GL2015})\label{lem2.1} Let $(\Sigma,g)$ be a smooth hypersurface in $\mathbb{H}^{n+1}$. Denote \begin{align*} \Gamma (r)=\int^{r}_{0} \lambda(y)dy=\lambda^{'}(r)-1. \end{align*} Then, $\Gamma|_{\Sigma}$ satisfies \begin{align} \nabla_{i}\Gamma &=\nabla_{i} \lambda^{'} =\left\langle \lambda \partial r, e_{i}\right\rangle, \label{2.1} \\ \nabla_{i}\nabla_{j}\Gamma &= \nabla_{i}\nabla_{j}\lambda^{'}=\lambda^{'}g_{ij}-uh_{ij}, \label{2.2} \end{align} where $\left\{e_{1},\cdots,e_{n}\right\}$ is a basis of the tangent space of $\Sigma$. \end{lemma} As $\Sigma \subset \mathbb{H}^{n+1}$ is a smooth, closed starshaped hypersurface with respect to the origin, then the support function $u>0$ everywhere on $\Sigma$. Moreover, $\Sigma$ can be represented as a radial graph, i.e., $\Sigma=\{ (r(\xi),\xi), \xi \in \mathbb{S}^{n}\}$. Let $\xi=(\xi^{1},\cdots,\xi^{n})$ be a local coordinate of $\mathbb{S}^{n}$, $\partial_{i}=\partial_{{\xi}^{i}}$, $r_{i}=D_{i}r$, with $D$ is the Levi-Civita connection on $(\mathbb{S}^{n},\sigma)$. We introduce a new function $\varphi:\mathbb{S}^{n} \to \mathbb{R}$ by \begin{align*} \varphi(\xi)=\chi(r(\xi)), \end{align*} where $\chi$ is a positive smooth function that satisfies $\frac{\partial }{\partial r}\chi=\frac{1}{\lambda(r)}$. Thus \begin{align*} \varphi_{i}:=D_{i}\varphi=\frac{r_{i}}{\lambda(r)}. \end{align*} The induced metric $g_{ij}$ and its inverse matrix $g^{ij}$ have the following forms \begin{align*} &g_{i j}=\lambda^{2}\sigma_{i j}+r_{i} r_{j}= \lambda^{2}\left(\sigma_{i j}+ \varphi_{i} \varphi_{j}\right) ,\\ &g^{i j}=\frac{1}{\lambda^{2}}\left(\sigma^{ij}-\frac{r^{i} r^{j}}{\lambda^{2}+|Dr|^{2}}\right)=\frac{1}{\lambda^{2}}\left(\sigma^{ij}-\frac{\varphi^{i} \varphi^{j}}{v^{2}}\right), \end{align*} where $r^{i}=\sigma^{ik}r_{k}$, $\varphi^{i}=\sigma^{ik}\varphi_{k}$ and $v=\sqrt{1+\lambda^{-2}|Dr|^{2}}=\sqrt{1+|D\varphi|^{2}}$. The unit outer normal $\nu$ and the support function $u$ can be expressed by \begin{align*} \nu= \frac{1}{v}\left(\partial_{r} - \frac{r_{i}}{\lambda^{2}}\partial_{i}\right) =\frac{1}{v}\left(\partial_{r} - \frac{\varphi_{i}}{\lambda}\partial_{i}\right), \qquad u=\left\langle \lambda\partial_{r}, \nu \right\rangle =\frac{\lambda}{v}. \end{align*} The second fundamental form $h_{ij}$, the Weingarten matrix $h^{i}_{j}$ and the mean curvature of $\Sigma$ are as follows (see e.g., \cite{G11}) \begin{align} &h_{ij}=\frac{\lambda^{'}}{\lambda v}g_{ij}-\frac{\lambda}{v}\varphi_{ij} ,\label{2.3}\\ &h^{i}_{j}=g^{ik}h_{kj}= \frac{\lambda^{'}}{\lambda v}\delta^{i}_{j}-\frac{1}{\lambda v}\left(\sigma^{ik}-\frac{\varphi^{i}\varphi^{k}}{v^{2}}\right)\varphi_{kj}, \label{2.4}\\ &H=\frac{n\lambda^{'}}{\lambda v}-\frac{1}{\lambda v}\left(\sigma^{ik}-\frac{\varphi^{i}\varphi^{k}}{v^{2}}\right)\varphi_{ki}.\label{2.5} \end{align} \subsection{ Normalized elementary symmetric functions } \ \vglue-10pt \indent For each $l=1, \cdots, n$, the normalized $l$-th elementary symmetric functions for $\kappa=\left(\kappa_{1}, \cdots, \kappa_{n}\right)$ are \begin{align*} E_{l}(\kappa)=\binom{n}{l}^{-1} \sigma_{l}(\kappa)=\binom{n}{l}^{-1} \sum_{1 \leq i_{1}<\ldots<i_{l} \leq n} \kappa_{i_{1}} \cdots \kappa_{i_{l}}, \end{align*} and we can set $E_{0}(\kappa)=1$ and $E_{l}(\kappa)=0$ for $l>n$. If $A=[A_{ij}] $ is an $n \times n$ symmetric matrix and $\kappa=\kappa(A)=(\kappa_{1},\cdots,\kappa_{n})$ are the eigenvalues of $A$, then $E_{l}(A)=E_{l}(\kappa(A))$ can be expressed as \begin{align*} E_{l}(A)=\frac{(n-l)!}{n!} \delta_{i_{1} \ldots i_{l}}^{j_{1} \ldots j_{l}} A_{i_{1} j_{1}} \cdots A_{i_{l} j_{l}}, \quad l=1, \ldots, n, \end{align*} where $\delta_{i_{1} \ldots i_{l}}^{j_{1} \ldots j_{l}}$ is a generalized Kronecker delta. See \cite{G2013} for details. We now recollect some properties of normalized $l$-th elementary symmetric functions. \begin{lemma}(\cite{G2013})\label{lem2.2} Let $\dot{E}_{l}^{i j}=\frac{\partial E_{l}}{\partial A_{i j}}$, then we have \begin{align} \sum_{i, j} \dot{E}_{l}^{i j} g_{ij} &=l E_{l-1} ,\label{2.6}\\ \sum_{i, j} \dot{E}_{l}^{i j} A_{i j} &=l E_{l} ,\label{2.7}\\ \sum_{i, j} \dot{E}_{l}^{i j}\left(A^{2}\right)_{i j} &=n E_{1} E_{l}-(n-l) E_{l+1}, \label{2.8} \end{align} where $\left(A^{2}\right)_{i j}=\sum_{l=1}^{n} A_{i l} A_{l j}$. \end{lemma} Before reviewing the famous Minkowski formula and the Newton-Maclaurin inequality in $\mathbb{H}^{n+1}$, we introduce the definition of strictly $k$-convex hypersurfaces. \begin{definition}\label{def1.14}(\cite{GL20}) For a bounded domain $\Omega \subset \mathbb{H}^{n+1}$, it is called strictly $k$-convex if the principal curvature $\kappa=(\kappa_{1},\cdots,\kappa_{n})\in \Gamma^{+}_{k}$, where $\Gamma^{+}_{k}$ is the Garding cone \begin{align*} \Gamma^{+}_{k}=\{\kappa \in \mathbb{R}^{n} |E_{i}(\kappa)>0,i=1,\cdots,k \}. \end{align*} \end{definition} \begin{lemma}(\cite{G2013})\label{lem2.4} If $\kappa \in \Gamma_{m}^{+}$, the following inequality is called the Newton-MacLaurin inequality \begin{align}\label{2.10} E_{m+1}(\kappa) E_{l-1}(\kappa) \leq E_{l}(\kappa) E_{m}(\kappa), \quad 1 \leq l \leq m. \end{align} Equality holds if and only if $\kappa_{1}=\cdots=\kappa_{n}$. \end{lemma} \begin{lemma} (\cite{{GL2015}})\label{lem2.3} Let $\Sigma$ be a smooth closed hypersurface in $\mathbb{H}^{n+1}$. Then \begin{align}\label{2.9} \int_{\Sigma} \lambda^{'}E_{l-1}(\kappa) d\mu= \int_{\Sigma} uE_{l}(\kappa) d\mu. \end{align} \end{lemma} \subsection{ Evolution equations} \ \vglue-10pt \indent A family of starshaped hypersurfaces $\Sigma_{t}=X(\Sigma,t)$ satisfies the general flow as below \begin{align}\label{2.11} \begin{cases} \frac{\partial}{\partial t}X(x,t)=F(x,t) \nu(x,t), \\ X(\cdot,0)=X_{0}, \end{cases} \end{align} where $F(x,t)$ and $\nu(x,t)$ are the velocity function and the unit outer normal vector of $\Sigma_{t}$, respectively. Then, the radial function $r(\xi,t)$ satisfies the following equation (see e.g.,\cite{HLW20}) \begin{align}\label{2.12} \begin{cases} \frac{\partial}{\partial t}r = F(x,t)v, \qquad \text{on}\quad\mathbb{S}^{n} \times \mathbb{R}^{+}, \\ r(\cdot,0)=r_{0}. \end{cases} \end{align} Moreover, $\varphi(\xi)=\chi(r(\xi))$ satisfies the initial value problem \begin{align}\label{2.13} \begin{cases} \frac{\partial}{\partial t}\varphi = F(x,t) \frac{v}{\lambda}, \qquad \text{on}\quad\mathbb{S}^{n} \times \mathbb{R}^{+}, \\ \varphi(\cdot,0)=\varphi_{0}. \end{cases} \end{align} We also have the following evolution equations for geometric quantities. \begin{lemma}(\cite{HLX14})\label{lem2.5} \begin{align} \frac{\partial}{\partial t}g_{ij}&=2F h_{ij},\label{2.14}\\ \frac{\partial}{\partial t}d\mu_{t}&=nE_{1}F d\mu_{t},\label{2.15}\\ \frac{\partial}{\partial t}h_{ij}&=-\nabla_{j}\nabla_{i}F+F\left((h^{2})_{ij}+g_{ij}\right),\label{2.16}\\ \frac{\partial}{\partial t}h^{j}_{i}&=-\nabla^{j}\nabla_{i}F-F\left((h^{2})^{j}_{i}+\delta^{j}_{i}\right),\label{2.17}\\ \frac{\partial}{\partial t}E_{l-1}&=\frac{\partial E_{l-1}}{\partial h^{j}_{i}}\frac{\partial h^{j}_{i}}{\partial t}=\dot{E}^{ij}_{l-1}\left(-\nabla_{j}\nabla_{i}F - F(h^{2})_{ij}+Fg_{ij} \right) ,\label{2.28}\\ \frac{\partial}{\partial t}\lambda^{'}&=\left\langle \bar{\nabla} \lambda^{'},\partial_{t}\right\rangle =uF ,\label{2.19} \\ \frac{\partial}{\partial t}W^{\lambda^{'}}_{0}(\Omega_{t})&=(n+1)\int_{M_{t}}(n+1)\lambda^{'} F d\mu_{t} , \label{2.20} \end{align} where $\nabla$ denotes the Levi-Civita connection on $(\Sigma_{t},g)$ . \end{lemma} \section{ Existence and smooth convergence of the flow (\ref{1.9})} \noindent It is known that the locally constrained mean curvature flow (\ref{1.9}) can be represented as the PDE of $\varphi (\xi,t)$, i.e., the following scalar equation (\ref{3.3}). In order to obtain the longtime existence of the flow (\ref{1.9}), we first derive a priori estimate for the equation (\ref{3.3}) and then use the standard theory of parabolic partial differential equations. The convergence of the flow (\ref{1.9}) is obtained by a detailed estimation of the gradient of $\varphi$. \subsection{ Longtime exists of the flow (\ref{1.9})} \ \vglue-10pt \indent From (\ref{2.12}), the flow (\ref{1.9}) can be parameterized into a scalar PDE of $r$ as following \begin{align}\label{3.1} \begin{cases} \frac{\partial}{\partial t} r =-\Phi_{1}H-\frac{n}{n-1} \frac{\partial \Phi_{1}}{\partial r}\sqrt{1+\lambda^{-2}|D r|^{2}}, \qquad \text{on}\quad\mathbb{S}^{n} \times \mathbb{R}^{+}, \\ r(\cdot,0)=r_{0}. \end{cases} \end{align} There are different representations of $f$ on starshaped hypersurfaces $\Sigma_{t}$, such as \begin{align*} &f:=\Phi_{1}(r(\xi)):\Sigma_{t} \to \mathbb{S}^{n} \to \mathbb{R}; \quad r: \mathbb{S}^{n} \to \mathbb{R},\\ &f:=\Phi_{1}(\varphi(\xi)):\Sigma_{t} \to \mathbb{S}^{n} \to \mathbb{R}; \quad \varphi :\mathbb{S}^{n} \to \mathbb{R}, \end{align*} where $\varphi(\xi) = \chi(r(\xi))$ and $\frac{\partial }{\partial r}\chi=\frac{1}{\lambda(r)}$. Thus, we have \begin{align}\label{3.2} \frac{\partial \Phi_{1}}{\partial r}=\frac{\partial \Phi_{1}}{\partial \varphi} \frac{1}{\lambda}. \end{align} Furthermore, by (\ref{2.13}) and (\ref{3.2}), $\varphi$ satisfies the initial value problem \begin{align}\label{3.3} \begin{cases} \frac{\partial}{\partial t}\varphi=-\Phi_{1}\frac{H}{\lambda}-\frac{n}{n-1} \frac{\partial \Phi_{1}}{\partial \varphi}\frac{\sqrt{1+|D \varphi|^{2}}}{\lambda^{2}}, \qquad \text{on}\quad\mathbb{S}^{n} \times \mathbb{R}^{+}, \\ \varphi(\cdot,0)=\varphi_{0}. \end{cases} \end{align} Firstly, we perform $C^{0}$ estimate of $\varphi$. \begin{proposition}\label{prop3.1} Let $\varphi \in C^{\infty}\left(\mathbb{S}^{n} \times [0,T) \right)$ be a solution to the initial value problem (\ref{3.3}), then there are positive constants $C_{1}=C_{1}( \varepsilon, \varphi_{min}(0))$ and $C_{2}=C_{2}( \delta, \varphi_{max}(0))$, such that \begin{align}\label{3.4} C_{1} \leq \varphi(\cdot,t) \leq C_{2}. \end{align} \end{proposition} \noindent{\it \bf Proof.}~~ Let $\varphi_{min}(t):=\underset{\xi \in \mathbb{S}^{n}}{\min} \varphi(\cdot,t)$, then $D \varphi_{min} =0$ and $D^{2} \varphi_{min} \ge 0$. In view of (\ref{2.5}), at the spatial minimum point of $\varphi$, we have \begin{align*} H(\varphi_{min})&=n\frac{\lambda^{'}}{\lambda}-\frac{1}{\lambda}D^{2}\varphi_{min}. \end{align*} From (\ref{3.3}) and the above equality, we get \begin{align*} \frac{\partial }{\partial t}\varphi_{min}&=-\lambda^{-2}\left(n\Phi_{1}\lambda^{'}+\frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\right)+\lambda^{-2} \Phi_{1} D^{2}\varphi_{min}\\ &\geq -\lambda^{-2}\left(n\Phi_{1}\lambda^{'}+\frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\right). \end{align*} Using (\ref{3.2}), $\widehat{\Phi}_{1}(r)=n\Phi_{1}\frac{\lambda^{'}}{\lambda^{2}}+\frac{n}{n-1} \frac{\partial \Phi_{1}}{\partial r}\frac{1}{\lambda}$ can be expressed as $\widehat{\Phi}_{1}(\varphi)=n\Phi_{1}\frac{\lambda^{'}}{\lambda^{2}}+\frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{1}{\lambda^{2}}$. Thus \begin{align*} \frac{\partial }{\partial t}\varphi_{min} \geq - \widehat{\Phi}_{1}(\varphi_{min}). \end{align*} If $\widehat{\Phi}_{1}(\varphi_{min}) \leq 0$, we have $\frac{\partial }{\partial t}\varphi_{min} \geq 0$. Then \begin{align*} \varphi_{min}(t) \geq \varphi_{min}(0). \end{align*} If $\widehat{\Phi}_{1}(\varphi_{min}) > 0$, then, according to Assumption \ref{as1.10}, there exists a zero point for $\widehat{\Phi}_{1}(\varphi)$. Consequently, there exist constants $\varepsilon$ and $ \delta$ such that $0<\varepsilon<\delta$ and \begin{align*} \widehat{\Phi}_{1}(\varepsilon)<0,\qquad \widehat{\Phi}_{1}(\delta)>0. \end{align*} Thus $\widehat{\Phi}_{1}(\varphi_{min}) > \widehat{\Phi}_{1}(\varepsilon)$. Moreover, Since $\widehat{\Phi}_{1}(r)$ is monotonically increasing with respect to $r$, it follows from (\ref{3.2}) that $\widehat{\Phi}_{1}(\varphi)$ is monotonically increasing with respect to $\varphi$. Then, we can deduce that \begin{align*} \varphi_{min}(t) > \varepsilon. \end{align*} Combining the above two cases, we get \begin{align*} \varphi_{min}(t) \geq C_{1}:= \min \{ \varepsilon,\varphi_{min}(0) \}. \end{align*} In the same way, let $\varphi_{max}(t):=\underset{\xi \in \mathbb{S}^{n}}{\max} \varphi(\cdot,t)$, we have $D \varphi_{max} =0$, $D^{2} \varphi_{max} \leq 0$ and \begin{align*} \frac{\partial }{\partial t} \varphi_{max} \leq -\lambda^{-2}\left(n\Phi_{1}\lambda^{'}+\frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\right) = -\widehat{\Phi}_{1}(\varphi_{max}). \end{align*} If $\widehat{\Phi}_{1}(\varphi_{max})\geq 0$, we obtain $\frac{\partial }{\partial t} \varphi_{max} \leq 0$, i.e., $\varphi_{max}(t)\leq \varphi_{max}(0)$. If $\widehat{\Phi}_{1}(\varphi_{max})< 0$, then $\widehat{\Phi}_{1}(\varphi_{max}) < \widehat{\Phi}_{1}(\delta)$, i.e., $\varphi_{max}(t) < \delta$. Combining the above two situations, we obtain \begin{align*} \varphi_{max}(t) \leq C_{2}:= \min \{\delta, \varphi_{max}(0)\}. \end{align*} This completes the proof of Proposition \ref{prop3.1}.\hfill${\square}$ In the following, we prove the $C^{1}$ estimate of $\varphi$. \begin{proposition}\label{prop3.2} Let $\varphi \in {C^{\infty}(\mathbb{S}^{n} \times [0,T))}$ be a solution to the initial value problem (\ref{3.3}). For any time $t \in [0,T)$, there is a positive constant $C$ depending on $\Sigma_{0}$, such that \begin{align}\label{3.5} \underset{\xi \in \mathbb{S}^{n}}{\max} |D \varphi(\cdot,t)| \leq C. \end{align} \end{proposition} \noindent{\it \bf Proof.}~~Let $\psi =\frac{1}{2}|D \varphi|^{2}$, we have $\frac{\partial}{\partial t}\psi=D^{k}\varphi \cdot D_{k}\left(\frac{\partial }{\partial t}\varphi\right)=D^{k}\varphi\cdot \frac{\partial}{\partial t}\left(D_{k}\varphi\right)$. According to (\ref{3.3}), the evolution equation of $\psi$ is \begin{align}\label{3.6} \frac{\partial }{\partial t}\psi= -D^{k}\varphi \left[D_{k}(\Phi_{1})\frac{H}{\lambda}+\Phi_{1} D_{k}\left(\frac{H}{\lambda}\right)+\frac{n}{n-1}D_{k}\left(\frac{\partial \Phi_{1}}{\partial \varphi}\right)\frac{v}{\lambda^{2}}+\frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}D_{k}\left(\frac{v}{\lambda^{2}}\right)\right], \end{align} where \begin{align*} D_{k}\Phi_{1}=\frac{\partial \Phi_{1}}{\partial \varphi}D_{k}\varphi, \qquad D_{k}\left(\frac{\partial \Phi_{1}}{\partial \varphi}\right)=\frac{\partial^{2} \Phi_{1}}{\partial \varphi^{2}}D_{k}\varphi. \end{align*} Suppose $\psi$ attains the spatial maximum at point $\left(\xi_{t},t\right) \in \left(\mathbb{S}^{n} \times [0,T)\right)$, $\psi_{max} := \psi (\xi_{t},t)=\underset{\xi \in \mathbb{S}^{n}}{\max}\psi(\xi,t) $, then \begin{align} D \psi_{max}&=D^{m}\varphi D_{km}\varphi =0, \label{3.7}\\ D^{2}\psi_{max}&=D^{km}\varphi D_{km}\varphi + D^{k}\varphi D_{klm}\varphi \leq 0, \qquad k=1,\dots ,n, \label{3.8} \end{align} and at the spatial maximum point $\left(\xi_{t},t\right)$ of $\psi$, there also holds \begin{align*} D_{k}v=D_{k}\left(\sqrt{1+|D \varphi|^{2}}\right)=&-v^{-1}D^{m}\varphi D_{km}\varphi =0 ,\\ D^{k}\varphi \cdot H =& n (v\lambda)^{-1}\lambda^{'} D^{k}\varphi,\\ D_{k}H=& n(v\lambda)^{-1} D_{k}\varphi \left(\lambda^{2} -(\lambda^{'})^{2}\right) -(v \lambda)^{-1}\left(\sigma^{ki}-\frac{\varphi^{k}\varphi^{i}}{v^{2}}\right) D_{k}\varphi_{ki},\\ D^{k}\varphi \cdot {D_{k}H} \geq & n(v\lambda)^{-1}|D \varphi|^{2} \left(\lambda^{2}-(\lambda^{'})^{2}\right). \end{align*} Substituting (\ref{3.7}), (\ref{3.8}) and the above equations into (\ref{3.6}), we obtain \begin{align}\label{3.9} \frac{\partial }{\partial t}\psi_{max} \leq & 2v^{-1}\psi_{max} \left[\frac{2n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}v^{2}-\frac{n}{n-1}\frac{\partial^{2}\Phi_{1}}{\partial \varphi^{2}}\frac{1}{\lambda^{2}}v^{2}-n\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-n\Phi_{1}+2n\Phi_{1} \frac{(\lambda^{'})^{2}}{\lambda^{2}}\right] \notag \\ =& 2v^{-1}\psi_{max} \left[\frac{2n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-\frac{n}{n-1}\frac{\partial^{2}\Phi_{1}}{\partial \varphi^{2}}\frac{1}{\lambda^{2}}-n\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-n\Phi_{1}+2n\Phi_{1}\frac{(\lambda^{'})^{2}}{\lambda^{2}}\right]\\ &+ 4v^{-1}\psi^{2}_{max} \left[\frac{2n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-\frac{n}{n-1}\frac{\partial^{2} \Phi_{1}}{\partial \varphi^{2}}\frac{1}{\lambda^{2}}\right]. \notag \end{align} Denote by \begin{align*} C_{3}(\varphi(t))&=2v^{-1}\left[\frac{2n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-\frac{n}{n-1}\frac{\partial^{2}\Phi_{1}}{\partial \varphi^{2}}\frac{1}{\lambda^{2}}-n\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-n\Phi_{1}+2n\Phi_{1}\frac{(\lambda^{'})^{2}}{\lambda^{2}}\right], \\ C_{4}(\varphi(t))&=4v^{-1} \left[\frac{2n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-\frac{n}{n-1}\frac{\partial^{2} \Phi_{1}}{\partial \varphi^{2}}\frac{1}{\lambda^{2}}\right] . \end{align*} Thus, (\ref{3.9}) can be expressed as \begin{align*} \frac{\partial }{\partial t}\psi_{max} \leq C_{3}(\varphi(t))\psi_{max} + C_{4}(\varphi(t))\psi^{2}_{max}. \end{align*} We now prove that $C_{3}(\varphi(t)) \leq 0$ and $C_{4}(\varphi(t)) < 0$. From Assumption \ref{as1.10}, $\widehat{\Phi}_{1}(r)$ is monotonically increasing with respect to $r$ and \begin{align*} \frac{\partial \widehat{\Phi}_{1}}{ \partial r} = \left(\frac{n}{\lambda} - \frac{2n(\lambda^{'})^{2}}{\lambda^{3}}\right)\Phi_{1} +\left(\frac{n\lambda^{'}}{\lambda^{2}} - \frac{n}{n-1}\frac{\lambda^{'}}{\lambda^{2}}\right)\frac{\partial \Phi_{1}}{\partial r} + \frac{n}{n-1}\frac{1}{\lambda}\frac{\partial^{2} \Phi_{1}}{\partial r^{2}}. \end{align*} Then \begin{align}\label{3.10} \frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial r}\frac{\lambda^{'}}{\lambda^{2}}-\frac{n}{n-1}\frac{\partial^{2} \Phi_{1}}{\partial r^{2}}\frac{1}{\lambda}\leq n\frac{\partial \Phi_{1}}{\partial r}\frac{\lambda^{'}}{\lambda^{2}}+n\Phi_{1}\frac{1}{\lambda}-2n\Phi_{1}\frac{(\lambda^{'})^{2}}{\lambda^{3}}. \end{align} Combining $\frac{\partial \Phi_{1}}{\partial r} = \frac{\partial \Phi_{1}}{\partial \varphi}\frac{1}{\lambda}$ and $\frac{\partial \varphi}{\partial r} = \frac{1}{\lambda}$, we get \begin{align*} \frac{\partial^{2} \Phi_{1}}{\partial r^{2}} = \frac{\partial^{2} \Phi_{1}}{\partial \varphi^{2}}\frac{1}{\lambda^{2}} - \frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}. \end{align*} Thus, the inequality (\ref{3.10}) is equivalent to \begin{align}\label{3.11} \frac{2n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-\frac{n}{n-1}\frac{\partial^{2} \Phi_{1}}{\partial \varphi^{2}}\frac{1}{\lambda^{2}}\leq n\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}+n\Phi_{1}-2n\Phi_{1}\frac{(\lambda^{'})^{2}}{\lambda^{2}}. \end{align} By (\ref{3.11}), it is not difficult to find that $C_{3}(\varphi(t)) \leq 0$. Next, we prove $C_{4}(\varphi(t)) < 0$. Suppose that \begin{align*} C_{4}(\varphi(t)) =4v^{-1} \left[\frac{2n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-\frac{n}{n-1}\frac{\partial^{2} \Phi_{1}}{\partial \varphi^{2}}\frac{1}{\lambda^{2}}\right] \geq 0, \end{align*} then $\frac{2n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-\frac{n}{n-1}\frac{\partial^{2} \Phi_{1}}{\partial \varphi^{2}}\frac{1}{\lambda^{2}} \geq 0$. From Proposition \ref{prop3.1}, we have $C_{1} \leq \varphi(\xi ,t) \leq C_{2}$ holds for all $(\xi,t) \in \mathbb{S}^{n} \times [0,T)$, where $C_{1}= min\{\varepsilon, \varphi_{min}(0)\}$ and $C_{1},C_{2}, \varepsilon \in \mathbb{R}^{+}$. Integrating both sides of $\frac{2n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-\frac{n}{n-1}\frac{\partial^{2} \Phi_{1}}{\partial \varphi^{2}}\frac{1}{\lambda^{2}} \geq 0$ yields \begin{align*} 0 &\leq \frac{n}{n-1} \int_{ C_{1}}^{\varphi} \left(2\frac{\partial \Phi_{1}}{\partial z}\frac{\lambda^{'}}{\lambda^{2}} - \frac{\partial^{2}\Phi_{1}}{\partial z^{2}}\frac{1}{\lambda^{2}} \right) dz = -\frac{n}{n-1}\int_{C_{1}}^{\varphi} \frac{\partial }{\partial z} \left(\frac{1}{\lambda^{2}}\frac{\partial \Phi_{1}}{\partial z}\right) d z \\ &= \frac{n}{n-1} \left[\left(\frac{1}{\lambda^{2}}\frac{\partial \Phi_{1}}{\partial \varphi}(C_{1})\right) - \left(\frac{1}{\lambda^{2}}\frac{\partial \Phi_{1}}{\partial \varphi}\right) \right], \end{align*} where we used $\frac{\partial \lambda}{\partial \varphi} = \lambda^{'}\lambda$. Thus, \begin{align*} \frac{1}{\lambda^{2}}\frac{\partial \Phi_{1}}{\partial \varphi} \leq \frac{1}{\lambda^{2}}\frac{\partial \Phi_{1}}{\partial \varphi}(C_{1}). \end{align*} In fact, $\frac{1}{\lambda^{2}}\frac{\partial \Phi_{1}}{\partial \varphi}(C_{1}) < 0$. To prove this assertion, we need to consider two cases: (1) when $C_{1} = \varepsilon $, it follows from the proof of Proposition \ref{prop3.1} that $\widehat{\Phi}_{1}(\varepsilon) < 0$; (2) when $C_{1} =\varphi_{min}(0) $, we have $\varphi_{min}(0) \leq \varepsilon$. From (\ref{3.11}), we know that $\widehat{\Phi}_{1}(\varphi)$ is monotonically increasing with respect to $\varphi$, then $\widehat{\Phi}_{1}(\varphi_{min}(0)) \leq \widehat{\Phi}_{1}(\varepsilon) < 0$. Thus, $\widehat{\Phi}_{1}(C_{1}) <0$. Since $\widehat{\Phi}_{1}(\varphi)= \frac{n}{n-1} \frac{\partial \Phi_{1}}{\partial \varphi}\frac{1}{\lambda^{2}}+n\Phi_{1}\frac{\lambda^{'}}{\lambda^{2}}$, it follows that \begin{align*} \widehat{\Phi}_{1}(C_{1}) = \frac{n}{n-1} \frac{\partial \Phi_{1}}{\partial \varphi}(C_{1})\frac{1}{\lambda^{2}}+n\Phi_{1}(C_{1})\frac{\lambda^{'}}{\lambda^{2}}< 0, \end{align*} and \begin{align*} \frac{1}{\lambda^{2}}\frac{\partial \Phi_{1}}{\partial \varphi}(C_{1}) \leq -(n-1)\frac{\lambda^{'}}{\lambda^{2}}\Phi_{1}(C_{1})< 0 . \end{align*} Thus, \begin{align*} \frac{1}{\lambda^{2}}\frac{\partial \Phi_{1}}{\partial \varphi} \leq \frac{1}{\lambda^{2}}\frac{\partial \Phi_{1}}{\partial \varphi} (C_{1})< 0 \end{align*} holds for any $\varphi \in [C_{1},C_{2}]$, and \begin{align*} n\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}+n\Phi_{1}-2n\Phi_{1}\frac{(\lambda^{'})^{2}}{\lambda^{2}} <0. \end{align*} Combining (\ref{3.11}) and the above inequality, we get \begin{align*} \frac{2n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-\frac{n}{n-1}\frac{\partial^{2} \Phi_{1}}{\partial \varphi^{2}}\frac{1}{\lambda^{2}}\leq n\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}+n\Phi_{1}-2n\Phi_{1}\frac{(\lambda^{'})^{2}}{\lambda^{2}} < 0, \end{align*} which contradicts $\frac{2n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-\frac{n}{n-1}\frac{\partial^{2} \Phi_{1}}{\partial \varphi^{2}}\frac{1}{\lambda^{2}} \geq 0$. Thus, we have $\frac{2n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-\frac{n}{n-1}\frac{\partial^{2} \Phi_{1}}{\partial \varphi^{2}}\frac{1}{\lambda^{2}} < 0$, i.e., $C_{4}(\varphi(t)) < 0$. From $C_{3}(\varphi(t)) \leq 0 $ and $C_{4}(\varphi(t)) < 0 $, the inequality (\ref{3.9}) can be scaled as $\frac{\partial }{\partial t} \psi_{max}\leq 0$. Hence, $\psi_{max} (t) \leq \psi_{max}(0) $, which implies (\ref{3.5}). This completes the proof of Proposition \ref{prop3.2}. \hfill${\square}$ Equation (\ref{3.3}) can be rewritten as a scalar parabolic PDE in divergent form as follows \begin{align*} \frac{\partial \varphi}{\partial t}&=\frac{\Phi_{1}}{\lambda^{2}v}\left(\sigma^{ki}-\frac{\varphi^{k}\varphi^{i}}{v^{2}}\right)\varphi_{ik}-n\lambda^{'}\frac{\Phi_{1}}{\lambda^{2}v}-\frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{v}{\lambda^{2}} \\ &=\operatorname{div}\left(\frac{\Phi_{1}}{\lambda^{2}v}\cdot D \varphi\right)+\frac{|D \varphi|^{2}}{\lambda^{2}v}\left(2\Phi_{1}\lambda^{'}-\frac{\partial \Phi_{1}}{\partial \varphi}\right)-n\Phi_{1}\frac{\lambda^{'}}{\lambda^{2}v}-\frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{v}{\lambda^{2}}, \end{align*} where we used $v_{i}=\frac{\varphi^{j}\varphi_{ji}}{v}$, $D_{i}\lambda=\lambda^{'}D_{i}r=\lambda \lambda^{'} D_{i}\varphi$, and $D_{i}\lambda^{'}=\lambda D_{i}r=\lambda^{2}D_{i}\varphi$. According to the classical theory of parabolic PDEs in divergent form \cite{LSU}, the higher regularity estimate for the solution $\varphi$ can be derived from the uniform $C^{1}$ estimate in Proposition \ref{prop3.2}. Hence, the solution $\Sigma_{t}$ of the flow (\ref{1.9}) has longtime existence. \subsection{ Exponential convergence of the flow (\ref{1.9})} \ \vglue-10pt \indent Finally, we prove the flow (\ref{1.9}) exponentially converges to a geodesic sphere by making an exact estimate of $|D\varphi|$. \begin{proposition}\label{prop3.3} Let $\varphi \in {C^{\infty}(\mathbb{S}^{n} \times [0,\infty))}$ be a solution to the initial value problem (\ref{3.3}). For any time $t \in [0,\infty)$, there exist two positive constants $\overline{C}$ and $\gamma$ that depend only on $\Sigma_{0}$, such that \begin{align}\label{3.12} \underset{\xi\in \mathbb{S}^{n}}{\max} |D \varphi(\cdot, t)|^{2}\leq \overline{C}e^{-\gamma t}. \end{align} \end{proposition} \noindent{\it \bf Proof.}~~From the proof of Proposition \ref{prop3.2}, we have, at the spatial maximum point of $\psi$, \begin{align*} \frac{\partial }{\partial t}\psi_{max} \leq C_{3}(\varphi(t))\psi_{max} + C_{4}(\varphi(t))\psi^{2}_{max}, \end{align*} and $C_{3}(\varphi(t)) \leq 0$, $C_{4}(\varphi(t)) < 0$. Since $\Phi_{1}(r)$ is a smooth function, we have $\frac{\partial \Phi_{1}}{\partial r} = \lambda^{-1} \frac{\partial \Phi_{1}}{\partial \varphi}$ and $\frac{\partial^{2} \Phi_{1}}{\partial r^{2}}=\lambda^{-2}\left(\frac{\partial^{2} \Phi_{1}}{\partial \varphi^{2}}-\lambda^{'}\frac{\partial \Phi_{1}}{\partial \varphi} \right)$ are continuous functions. Thus, given that $C_{1} \leq \varphi(t) \leq C_{2}$, it follows that $ C_{3}(\varphi(t)) $ and $ C_{4}(\varphi(t)) $ are uniformly bounded continuous functions. It is easy to show that there exist positive constants $\gamma$ and $C$ such that $\psi (t) \leq Ce^{-2\gamma t}$. In fact, for $C_{3}(\varphi(t)) < 0$, we denote $-2\gamma:= \max_{\varphi (t) \in [C_{1},C_{2}]} C_{3}(\varphi (t)) $, then $\gamma$ is a positive constant and \begin{align*} \frac{\partial }{\partial t}\psi_{max} \leq C_{3}(\varphi(t)) \psi_{max} \leq -2\gamma \psi_{max}. \end{align*} Integrating both ends of the above inequality yields \begin{align*} \psi_{max}(t) \leq Ce^{-2\gamma t}, \end{align*} where $C$ is a positive constant that depends only on $\Sigma_{0}$. For $C_{3}(\varphi(t))=2v^{-1}\left[\frac{2n}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-\frac{n}{n-1}\frac{\partial^{2}\Phi_{1}}{\partial \varphi^{2}}\frac{1}{\lambda^{2}}-n\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-n\Phi_{1}+2n\Phi_{1}\frac{(\lambda^{'})^{2}}{\lambda^{2}}\right] = 0$, we have \begin{align*} \frac{3n-n^{2}}{n-1}\frac{\partial \Phi_{1}}{\partial \varphi}\frac{\lambda^{'}}{\lambda^{2}}-\frac{n}{n-1}\frac{\partial^{2} \Phi_{1}}{\partial \varphi^{2}}\frac{1}{\lambda^{2}}+2n\Phi_{1}\frac{(\lambda^{'})^{2}}{\lambda^{2}}-n\Phi_{1}=0, \end{align*} which is equivalent to \begin{align*} \frac{2n-n^{2}}{n-1}\frac{\partial \Phi_{1}}{\partial r} \frac{\lambda^{'}}{\lambda}-\frac{n}{n-1}\frac{\partial^{2} \Phi_{1}}{\partial r^{2}} + 2n\Phi_{1}\left(\frac{\lambda^{'}}{\lambda}\right)^{2} -n\Phi_{1} = 0. \end{align*} Solving the above ODE, we can obtain the general solution as \begin{align*} \Phi_{1}(r) =C_{5}\lambda^{1-n}(r) = C_{5}({\rm{sinh}}r)^{1-n}, \end{align*} where $C_{5}$ is a positive constant. Since $\widehat{\Phi}_{1}(r)=n\Phi_{1}\frac{\lambda^{'}}{\lambda^{2}}+ \frac{n}{n-1} \frac{\partial \Phi_{1}}{\partial r}\frac{1}{\lambda}$, it follows that \begin{align*} 0 \equiv \widehat{\Phi}_{1}(r) = \widehat{\Phi}_{1}(\varphi), \qquad \text{ for any } \varphi(t) \in [C_{1},C_{2}], \end{align*} which does not satisfy Assumption \ref{as1.10}. This completes the proof of Proposition \ref{prop3.3}. \hfill${\square}$ \noindent {\bf Proof of Theorem \ref{th1.11}}: It can be inferred from Proposition \ref{prop3.3} that there exists a sequence of times $\{ t_{i} \}$ such that $\lim_{t_{i} \to \infty} r_{t_{i}}=r_{\infty}$. By the interpolation inequality and Sobolev embedding theorem on $\mathbb{S}^{n}$, one gets the convergence of $r_{t_{i}}$ to $r_{\infty}$ in $C^\infty$-topology. Since the velocity of the flow (\ref{1.9}) dose not contain global terms, by the comparison principle, the radius $r_{\infty}$ is unique. Moreover, $B_{r_{\infty}}$ must be centered at the origin. Otherwise, there will be a conflict with the proof of Proposition \ref{prop3.1}. \hfill${\square}$ \section{ Michael-Simon type inequalities for $k$-th mean curvatures} \noindent In this section, we will use the smooth convergence results of the flows (\ref{1.9}) and (\ref{1.12}) to give the proofs of Theorem \ref{th1.12} and Theorem \ref{th1.15} respectively. \subsection{ Sharp Michael-Simon type inequality for mean curvature} \ \vglue-10pt \indent In this section we will prove the desired geometric inequalities (\ref{1.10}) and (\ref{1.11}) for starshaped hypersurfaces. \noindent {\bf Proof of Theorem \ref{th1.12}}: First, we reduce the inequality (\ref{1.10}) by scaling. Assume that \begin{align*} \int_{M_{0}} \lambda^{'} \sqrt{f^{2} E_{1}^{2}+|\nabla^{M_{0}} f|^{2}}-\int_{M_{0}}\left\langle \bar{\nabla}\left(f\lambda^{'}\right),\nu \right\rangle +\int_{\partial M_{0}} f = \int_{M_{0}}f^{\frac{n}{n-1}}. \end{align*} This normalization guarantees that we can find the solution $\vartheta: M_{0} \rightarrow \mathbb{R}$ to the following equation \begin{align*} \operatorname{div}_{M_{0}}\left(f \nabla^{M} \vartheta \right)= f^{\frac{n}{n-1}}-\lambda^{'} \sqrt{f^{2} E_{1}^{2}+|\nabla^{M} f|^{2}} + \left\langle \bar{\nabla}\left(f\lambda^{'}\right),\nu \right\rangle \end{align*} on $M_{0}$, and $\left\langle\nabla^{M_{0}} \vartheta , \vec{n}\right\rangle=1$ on $\partial M_{0}$. Here, $\vec{n}$ denotes the co-normal to $M_{0}$. According to the standard elliptic regularity theory, $\vartheta$ belongs to $C^{2, \beta}$ for each $0<\beta<1$. Thus, we only need to prove $\int_{M_{0}}f^{\frac{n}{n-1}} d\mu \geq \omega_{n}$. Note that $M_{t} = \Sigma_{t}$ or $ M_{t} = \Sigma_{t} \cup \partial \Sigma_{t}$, $t\in[0,\infty)$, then \begin{align}\label{4.1} \int_{M_{t}}f^{\frac{n}{n-1}} d\mu \geq \int_{\Sigma_{t}}f^{\frac{n}{n-1}} d\mu \geq \omega_{n}, \end{align} and it is only necessary to prove the second inequality. Secondly, we prove the monotonicity of $\int_{\Sigma_{t}}f^{\frac{n}{n-1}}d\mu_{t}$, which is the key point. Since $f\big|_{\Sigma_{t}} = \Phi_{1} \circ r(\xi,t)$, it follows that $\int_{\Sigma_{t}}f^{\frac{n}{n-1}}d\mu_{t}=\int_{\Sigma_{t}}\Phi_{1}^{\frac{n}{n-1}}(r)d\mu_{t}$. Along the flow (\ref{1.9}), we have \begin{align*} \frac{\partial}{\partial t}\int_{\Sigma_{t}}\Phi_{1}^{\frac{n}{n-1}}d\mu_{t} &=\int_{\Sigma_{t}}\left(\frac{n}{n-1}\Phi_{1}^{\frac{1}{n-1}}\frac{\partial \Phi_{1}}{\partial t} + \Phi_{1}^{\frac{n}{n-1}}F H \right)d\mu_{t}\\ &=\int_{\Sigma_{t}}\Phi_{1}^{\frac{1}{n-1}}\left(\frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial r}\sqrt{1+\lambda^{-2}|D r|^{2}}+\Phi_{1} H\right)F d\mu_{t},\\ \end{align*} where $F=-\left(\frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial r} +\Phi_{1} \frac{H}{\sqrt{1+\lambda^{-2}|D r|^{2}}} \right)$. Thus \begin{align}\label{4.2} \frac{\partial}{\partial t}&\int_{\Sigma_{t}}\Phi_{1}^{\frac{n}{n-1}}d\mu_{t} =-\int_{\Sigma_{t}}\frac{\Phi_{1}^{\frac{1}{n-1}}}{\sqrt{1+\lambda^{-2}|D r|^{2}}}\left(\frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial r}\sqrt{1+\lambda^{-2}|D r|^{2}} +\Phi_{1} H\right)^{2} d\mu_{t} \leq 0. \end{align} The monotonically decreasing property of $\int_{\Sigma_{t}}\Phi_{1}^{\frac{n}{n-1}}d\mu_{t}$ yields \begin{align*} \int_{\Sigma_{0}}\Phi_{1}^{\frac{n}{n-1}}d\mu \geq \int_{\Sigma_{t}}\Phi_{1}^{\frac{n}{n-1}}d\mu_{t} \geq \int_{\Sigma_{\infty}}\Phi_{1}^{\frac{n}{n-1}}d\mu_{\infty}. \end{align*} From the convergence result of the flow (\ref{1.9}) that $\Sigma_{\infty}=B_{r_{\infty}}$, we have \begin{align*} \nabla r_{\infty} =0, \qquad H |_{\Sigma_{\infty}} = \frac{n\lambda^{'}(r_{\infty})}{\lambda(r_{\infty})}, \end{align*} and \begin{align*} \nabla \Phi_{1}(r_{\infty}) =\frac{\partial \Phi_{1}}{\partial r_{\infty}} \nabla r_{\infty}= 0. \end{align*} Therefore, $f\big|_{\Sigma_{\infty}}=\Phi_{1}(r_{\infty})$ is constant and \begin{align}\label{4.3} \int_{\Sigma_{\infty}}f^{\frac{n}{n-1}}d\mu_{\infty} =\int_{B_{r_{\infty}}}\Phi_{1}(r_{\infty})^{\frac{n}{n-1}}d\mu_{\mathbb{H}^{n+1}}= \Phi^{\frac{n}{n-1}}_{1}(r_{\infty})\lambda^{n}(r_{\infty})\omega_{n}. \end{align} The same, $\frac{\partial }{\partial t}\int_{\Sigma_{t}}\Phi_{1}^{\frac{n}{n-1}}d\mu_{t} = 0$ when $t \to \infty$ and by (\ref{4.2}), we have \begin{align}\label{4.4} \frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial r}\sqrt{1+\lambda^{-2}|D r|^{2}}+\Phi_{1} H = 0, \end{align} i.e., \begin{align*} n\Phi_{1}(r_{\infty})\frac{\lambda^{'}(r_{\infty})}{\lambda(r_{\infty})}+\frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial r}(r_{\infty}) = 0. \end{align*} It is not difficult to verify that one of the solutions to the above equation is \begin{align}\label{4.5} \Phi_{1}(r_{\infty})= \lambda^{-(n-1)}(r_{\infty}). \end{align} Substituting (\ref{4.5}) into (\ref{4.3}), we get \begin{align*} \int_{\Sigma_{\infty}}f^{\frac{n}{n-1}}d\mu_{\infty}= \omega_{n}. \end{align*} Hence, \begin{align*} \int_{M_{0}} f^{\frac{n}{n-1}} d\mu \geq \int_{\Sigma_{0}} f^{\frac{n}{n-1}} d\mu \geq \int_{\Sigma_{\infty}}f^{\frac{n}{n-1}}d\mu_{\infty} \geq \omega_{n}, \end{align*} which implies that the starshaped hypersurface satisfies the inequalities (\ref{1.10}) and (\ref{1.11}). It is obvious that the equality holds in the inequality (\ref{1.11}) for geodesic spheres, we just need to prove the converse. Suppose that the smooth starshaped hypersurface $\Sigma_{t}$ makes the equality hold, i.e., \begin{align}\label{4.6} \int_{\Sigma_{t}}f^{\frac{n}{n-1}}d\mu_{t}= \Phi_{1}^{\frac{n}{n-1}}(r_{\infty})\lambda^{n}(r_{\infty})\omega_{n}=\omega_{n}. \end{align} Then, along the flow (\ref{1.9}), we have $\frac{\partial}{\partial t}\int_{\Sigma_{t}}f^{\frac{n}{n-1}}d\mu_{t} =0$. From (\ref{4.2}), we get \begin{align*} \frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial r}\sqrt{1+\lambda^{-2}|D r|^{2}} +\Phi_{1} H = 0. \end{align*} Thereby, \begin{align*} \frac{\partial r}{\partial t} = -\frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial r}(1+\lambda^{-2}|D r|^{2})-\Phi_{1} H =0, \end{align*} and \begin{align*} \frac{\partial}{\partial t}\left(\Phi_{1}^{\frac{n}{n-1}}\lambda^{n}\right) = \Phi_{1}^{\frac{1}{n-1}}\lambda^{n-1}\left(\frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial r}\lambda+n\Phi_{1}\lambda^{'}\right)\frac{\partial r}{\partial t} =0. \end{align*} Thus, \begin{align*} \lambda^{n}\Phi_{1}^{\frac{n}{n-1}}(r) =\lambda^{n}(r_{\infty})\Phi_{1}^{\frac{n}{n-1}}(r_{\infty}) \end{align*} and the equation (\ref{4.6}) is equivalent to \begin{align}\label{4.7} \int_{\Sigma_{t}}\Phi_{1}^{\frac{n}{n-1}}d\mu_{t}= \Phi_{1}^{\frac{n}{n-1}}(r_{\infty})\lambda^{n}(r_{\infty})\omega_{n}=\Phi_{1}^{\frac{n}{n-1}}(r)\lambda^{n}(r)\omega_{n}. \end{align} By (\ref{4.7}), it follows that \begin{align}\label{4.8} \lambda^{n}(r) = \frac{\int_{\Sigma_{t}}\Phi_{1}^{\frac{n}{n-1}}d\mu_{t}}{\Phi_{1}^{\frac{n}{n-1}}(r)\omega_{n}}. \end{align} Differentiating (\ref{4.8}) and combining with (\ref{4.7}), we get \begin{align*} \nabla (\lambda^{n}) = \nabla \left( \frac{\int_{M_{t}}\Phi_{1}^{\frac{n}{n-1}}d\mu_{t}}{\Phi_{1}^{\frac{n}{n-1}}(r)\omega_{n}} \right) = - \frac{n}{n-1}\frac{\nabla \Phi_{1}}{\Phi_{1}}\lambda^{n}, \end{align*} i.e., \begin{align*} \nabla(\lambda^{n})+\frac{n}{n-1}\frac{\nabla \Phi_{1}}{\Phi_{1}}\lambda^{n} = \nabla r \left(\frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial r}+n\Phi_{1}\frac{\lambda^{'}}{\lambda}\right)\lambda^{n}\Phi_{1}^{-1} = 0. \end{align*} It can be inferred that either $\nabla r = 0$ or $\frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial r}+n\Phi_{1}\frac{\lambda^{'}}{\lambda}=0$. Case 1: If $\nabla r = 0 $, $r$ is constant, then $\Sigma_{t}$ is a geodesic sphere. Case 2: If $\frac{n}{n-1}\frac{\partial \Phi_{1}}{\partial r}+n\Phi_{1}\frac{\lambda^{'}}{\lambda}=0$, then \begin{align}\label{4.9} \Phi_{1}(r)=\lambda^{-(n-1)}(r). \end{align} Substituting (\ref{4.9}) into (\ref{4.4}), we have \begin{align*} \lambda^{-n}\left(\lambda H -n\lambda^{'}v \right)= 0. \end{align*} Then \begin{align}\label{4.10} H= \frac{n\lambda^{'}v}{\lambda}. \end{align} Let $\alpha^{ki}=\frac{1}{\lambda v}\left(\sigma^{ki}-\frac{\varphi^{k}\varphi^{i}}{v^{2}}\right)$. By (\ref{2.5})and (\ref{4.10}), we deduce that $\alpha^{ki}\varphi_{ik}=\frac{n\lambda^{'}v}{\lambda}-\frac{n\lambda^{'}}{\lambda v}$. Also, from (\ref{2.4}), it follows that \begin{align*} h^{i}_{j} = \frac{\lambda^{'}}{\lambda v}\delta^{i}_{j}-\frac{1}{\lambda v}\left(\sigma^{ki}-\frac{\varphi^{k}\varphi^{i}}{v^{2}}\right)\varphi_{kj} =\frac{\lambda^{'}}{\lambda v}\delta^{i}_{j}-\alpha^{ki}\varphi_{kj} =\frac{\lambda^{'}}{\lambda v}\delta^{i}_{j}-\left(\frac{\lambda^{'}}{\lambda v}-\frac{\lambda^{'} v}{\lambda }\right)\delta^{i}_{j}. \end{align*} Thus, \begin{align*} |A|^{2}=h^{i}_{j}h^{j}_{i} = n\left(\frac{\lambda^{'} v}{\lambda}\right)^{2}. \end{align*} Combining (\ref{4.10}) and the above equality, we have \begin{align*} \frac{|A|^{2}}{H^{2}} = \frac{1}{n}. \end{align*} Therefore, $\Sigma_{t}$ is a geodesic sphere centered at the origin for $t>0$. Since $\Sigma_{0}$ can be approximated smoothly by $\Sigma_{t}$, i.e., $\Sigma_{0}=\lim_{t \to 0} \Sigma_{t} $, $\Sigma_{0}$ is also a geodesic sphere centered at the origin. This completes the proof of Theorem \ref{th1.12}. \hfill${\square}$ \subsection{ Sharp Michael-Simon type inequality for $k$-th mean curvatures } \ \vglue-10pt \indent In this section, we will give the proof of the new geometric inequalities (\ref{1.14}) and (\ref{1.15}) by applying the convergence result of the flow (\ref{1.12}). \noindent {\bf Proof of Theorem \ref{th1.15}}: First simplify the inequality (\ref{1.14}). We may assume that \begin{align*} \int_{M_{0}} \lambda^{'} \sqrt{f^{2}E_{k}^{2} +|\nabla^{M_{0}} f|^{2} E_{k-1}^{2}} -\int_{M_{0}}\left\langle \bar{\nabla}\left(f\lambda^{'}\right),\nu \right\rangle E_{k-1} &+\int_{\partial M_{0}} f E_{k-1} \\ &=\int_{M_{0}}f^{\frac{n-k+1}{n-k}} E_{k-1}. \end{align*} This normalisation ensures the existence of solution $\eta: M_{0} \rightarrow \mathbb{R}$ for the following PDE \begin{align*} \operatorname{div}_{M_{0}}\left(E_{k-1}f \nabla^{M} \eta \right)=f^{\frac{n-k+1}{n-k}} E_{k-1}- \lambda^{'} \sqrt{f^{2}E_{k}^{2} +|\nabla^{M_{0}} f|^{2} E_{k-1}^{2}} + \left\langle \bar{\nabla}\left(f\lambda^{'}\right),\nu \right\rangle E_{k-1} \end{align*} on $M_{0}$, and $\left\langle \nabla^{M_{0}} \eta , \vec{n} \right\rangle=1$ on $\partial M_{0}$. Here, $\vec{n}$ denotes the co-normal to $M_{0}$. According to the standard elliptic regularity theory, $\eta \in C^{2, \beta}$ for each $0<\beta<1$. Hence, (\ref{1.14}) can be reduced into the following inequality \begin{align}\label{4.11} \int_{M_{0}}E_{k-1}f^{\frac{n-k+1}{n-k}} d\mu \geq \int_{\Sigma_{0}}E_{k-1}f^{\frac{n-k+1}{n-k}} d\mu \geq p_{k}\circ h_{0}^{-1}\left(W^{\lambda^{'}}_{0}(\Omega_{0})\right). \end{align} Secondly, we prove that $W^{\lambda^{'}}_{0}(\Omega_{t})$ monotonically increases along the flow (\ref{1.12}), where $\Omega_{t}$ is the domain enclosed by $\Sigma_{t}$. Using (\ref{2.10}) and (\ref{2.20}), we have \begin{align*} \frac{\partial}{\partial t}W^{\lambda^{'}}_{0}(\Omega_{t})=(n+1)\int_{\Sigma_{t}}\left(\lambda^{'}\frac{E_{k-2}}{E_{k-1}}-u\right)d\mu_{t} \geq (n+1)\int_{\Sigma_{t}}\left(\frac{\lambda^{'}}{E_{1}}-u\right)d\mu_{t} \geq 0, \end{align*} where the Heintze-Karcher inequality (see Theorem 3.5 in \cite{B13}) is used in the last inequality. Finally we prove the monotonicity of $\int_{\Sigma_{t}}E_{k-1}f^{\frac{n-k+1}{n-k}} d\mu_{t}$ along the flow (\ref{1.12}). For the convenience of computation, we use $\widehat{\Phi}_{2}(r)=\Phi_{2}^{\frac{n-k+1}{n-k}}(r(\xi,t))$. Along the general flow (\ref{2.11}), we have \begin{align*} \frac{\partial}{\partial t} \int_{\Sigma_{t}} E_{k-1} \widehat{\Phi}_{2} d\mu_{t}= &\int_{\Sigma_{t}} \widehat{\Phi}_{2} \dot{E}^{ij}_{k-1}\left(-\nabla_{j}\nabla_{i}F - F(h^{2})_{ij}+Fg_{ij}\right)d\mu_{t} \\ &+ \int_{\Sigma_{t}} \frac{\partial \widehat{\Phi}_{2}}{\partial r}\frac{\partial r}{\partial t} E_{k-1} d\mu_{t} + \int_{\Sigma_{t}} nE_{k-1}E_{1}\widehat{\Phi}_{2} F d\mu_{t}. \end{align*} Since $\dot{E}^{ij}_{k-1}$ is divergence-free, we have \begin{align*} \frac{\partial}{\partial t} \int_{\Sigma_{t}} E_{k-1} \widehat{\Phi}_{2} d\mu_{t}= &\int_{\Sigma_{t}}-\frac{k-1}{n}\Delta_{\Sigma_{t}}\widehat{\Phi}_{2} E_{k-2} F d\mu_{t} + \int_{\Sigma_{t}} (k-1)\widehat{\Phi}_{2}E_{k-2}F d\mu_{t}\\ &+ \int_{\Sigma_{t}} \frac{\partial \widehat{\Phi}_{2}}{\partial r} v E_{k-1}F d\mu_{t}+ \int_{\Sigma_{t}} (n-k+1)\widehat{\Phi}_{2}E_{k}F d\mu_{t}\\ =& \int_{\Sigma_{t}} \left(-\frac{k-1}{n}\Delta_{\Sigma_{t}} \widehat{\Phi}_{2}+(k-1)\widehat{\Phi}_{2}\right) E_{k-2} F d\mu_{t} \\ &+ \int_{\Sigma_{t}} \frac{\partial \widehat{\Phi}_{2}}{\partial r} v E_{k-1}F d\mu_{t}+ \int_{\Sigma_{t}} (n-k+1)\widehat{\Phi}_{2}E_{k}F d\mu_{t}, \end{align*} where we use integration by parts, (\ref{2.6}) and (\ref{2.8}). From Assumption \ref{as1.14} (1), it follows that \begin{align*} -\frac{k-1}{n}\Delta_{\Sigma_{t}} \widehat{\Phi}_{2}+(k-1)\widehat{\Phi}_{2} =-\frac{\partial \widehat{\Phi}_{2}}{\partial r} \frac{\lambda^{'}}{\lambda}v^{2} + m\frac{E_{k-1}}{E_{k-2}} \lambda (\lambda^{'})^{m-k}. \end{align*} Thus \begin{align*} \frac{\partial}{\partial t} &\int_{\Sigma_{t}} E_{k-1} \widehat{\Phi}_{2} d\mu_{t} \\ =&\int_{\Sigma_{t}} m\lambda (\lambda^{'})^{m-k} E_{k-1} F d\mu_{t} - \int_{\Sigma_{t}} \left(\frac{\partial \widehat{\Phi}_{2}}{\partial r}\frac{\lambda^{'}}{\lambda}v^{2}E_{k-2} - \frac{\partial \widehat{\Phi}_{2}}{\partial r} v E_{k-1}\right)F d\mu_{t} \\ &+ \int_{\Sigma_{t}} (n-k+1)\widehat{\Phi}_{2}E_{k}F d\mu_{t} . \end{align*} Substituting $F =\frac{E_{k-2}}{E_{k-1}}-\frac{u}{\lambda^{'}}$ into the above equation and applying the Newton-MacLaurin inequality (\ref{2.10}), we can obtain the following along the flow (\ref{1.12}) \begin{align}\label{4.12} \frac{\partial}{\partial t} \int_{\Sigma_{t}} E_{k-1} \widehat{\Phi}_{2} d\mu_{t} =&\int_{\Sigma_{t}} m\lambda (\lambda^{'})^{m-k-1} \left(\lambda^{'}E_{k-2} - uE_{k-1} \right) - \frac{\partial \widehat{\Phi}_{2}}{\partial r}\frac{\lambda^{'}}{\lambda}v^{2}E_{k-1}\left(\frac{E_{k-2}}{E_{k-1}}-\frac{u}{\lambda^{'}}\right)^{2} d\mu_{t} \notag \\ &+ \int_{\Sigma_{t}} (n-k+1)\widehat{\Phi}_{2}E_{k}\left(\frac{E_{k-2}}{E_{k-1}}-\frac{u}{\lambda^{'}}\right) d\mu_{t} \notag \\ \leq& \int_{\Sigma_{t}} m\lambda (\lambda^{'})^{m-k-1} \left(\lambda^{'}E_{k-2} - uE_{k-1} \right) - \frac{\partial \widehat{\Phi}_{2}}{\partial r}\frac{\lambda^{'}}{\lambda}v^{2}E_{k-1}\left(\frac{E_{k-2}}{E_{k-1}}-\frac{u}{\lambda^{'}}\right)^{2} d\mu_{t} \\ &+\int_{\Sigma_{t}} (n-k+1)\frac{\widehat{\Phi}_{2}}{\lambda^{'}}\left(\lambda^{'}E_{k-1} - uE_{k}\right) d\mu_{t}. \notag \end{align} From Assumption \ref{as1.14}(2), we have \begin{align*} \frac{\partial \widehat{\Phi}_{2}}{\partial \lambda^{'}} - \frac{\widehat{\Phi}_{2}}{\lambda^{'} }\geq 0,\qquad \frac{\partial \widehat{\Phi}_{2}}{\partial r} = \frac{\partial \widehat{\Phi}_{2}}{\partial \lambda^{'}} \lambda \geq \widehat{\Phi}_{2}\frac{\lambda}{\lambda^{'} } >0, \end{align*} and combining (\ref{2.2}), (\ref{2.6}) and (\ref{2.7}), it follows that \begin{align*} \lambda^{'}E_{k-1}-uE_{k}=\frac{1}{k}\dot{E}^{ij}_{k}\left(\lambda^{'}g_{ij}-uh_{ij}\right)=\frac{1}{k}\dot{E}^{ij}_{k}\nabla_{i}\nabla_{j}\lambda^{'}, \end{align*} Thus \begin{align*} \frac{\partial}{\partial t} &\int_{\Sigma_{t}} E_{k-1} \widehat{\Phi}_{2} d\mu_{t} \\ \leq& \int_{\Sigma_{t}} -\frac{m}{k-1} \dot{E}^{ij}_{k-1}\nabla_{i}(\lambda (\lambda^{'})^{m-k-1}) \nabla_{j}\lambda^{'} - \frac{n-k+1}{k} \dot{E}^{ij}_{k}\nabla_{i}\frac{\widehat{\Phi}_{2}}{\lambda^{'}}\nabla_{j}\lambda^{'} d\mu_{t} \\ =& \int_{\Sigma_{t}} -\frac{m}{k-1} (\lambda^{'})^{m-k-2}\lambda\left[\frac{(\lambda^{'})^{2}}{\lambda^{2}}+ (m-k-1)\right]\dot{E}^{ij}_{k-1}\nabla_{i}\lambda^{'} \nabla_{j}\lambda^{'} \\ &- \frac{n-k+1}{k}\left(\frac{\partial \widehat{\Phi}_{2}}{\partial \lambda^{'}} \frac{1}{\lambda^{'}}-\frac{\widehat{\Phi}_{2}}{(\lambda^{'})^{2}}\right)\dot{E}^{ij}_{k}\nabla_{i}\lambda^{'}\nabla_{j}\lambda^{'} d\mu_{t} \\ \leq& \int_{\Sigma_{t}} -\frac{m(m-k)}{k-1} (\lambda^{'})^{m-k-2}\lambda \dot{E}^{ij}_{k-1}\nabla_{i}\lambda^{'} \nabla_{j}\lambda^{'} \\ &- \frac{n-k+1}{k}\left(\frac{\partial \widehat{\Phi}_{2}}{\partial \lambda^{'}} \frac{1}{\lambda^{'}}-\frac{\widehat{\Phi}_{2}}{(\lambda^{'})^{2}}\right)\dot{E}^{ij}_{k}\nabla_{i}\lambda^{'}\nabla_{j}\lambda^{'} d\mu_{t}\\ \leq& 0 . \end{align*} where the last inequality follows from the positivity of $\dot{E}^{ij}_{k}$ and $\dot{E}^{ij}_{k-1}$. Therefore \begin{align*} \int_{M_{0}} E_{k-1}\widehat{\Phi}_{2} d\mu \geq \int_{\Sigma_{0}} E_{k-1}\widehat{\Phi}_{2} d\mu_{t} \geq \int_{\Sigma_{\infty}}E_{k-1}\widehat{\Phi}_{2} d\mu_{\infty}=\int_{B_{R}}E_{k-1}\widehat{\Phi}_{2} d\mu_{\mathbb{H}^{n+1}}, \end{align*} where the last equality is obtained from the convergence result of the flow (\ref{1.12}) and $B_{R}=\partial B^{n+1}_{R}$. Also $\nabla \Phi_{2}=\frac{\partial \Phi_{2}}{\partial r} \nabla r=0$ on $B_{R}$, i.e., $f = \Phi_{2}(r)$ is constant on $B_{R}$. Hence \begin{align*} \int_{B_{R}} E_{k-1} \widehat{\Phi}_{2} d\mu_{\mathbb{H}^{n+1}} &=\omega_{n} \Phi_{2}^{\frac{n-k+1}{n-k}}\int_{B_{R}} E_{k-1}(\kappa) d\mu_{\mathbb{H}^{n+1}}= \omega_{n} \Phi_{2}^{\frac{n-k+1}{n-k}}(\lambda^{'})^{k-1}\lambda^{n-k+1} (R)\\ &=p_{k}\circ h^{-1}_{0}\left(W^{\lambda^{'}}_{0}(B^{n+1}_{R})\right). \notag \end{align*} Furthermore, we already know that $W^{\lambda^{'}}_{0}(\Omega_{t})$ is monotonically increasing along the flow (\ref{1.12}) and $p_{k}(r)$ is monotonically increasing with respect to $r$, thus \begin{align*} \int_{M_{0}} E_{k-1} \widehat{\Phi}_{2} d\mu_{t} &\geq \int_{\Sigma_{0}} E_{k-1} \widehat{\Phi}_{2} d\mu_{t} \\ &\geq\int_{B_{R}} E_{k-1} \widehat{\Phi}_{2} d\mu_{\mathbb{H}^{n+1}} = p_{k}\circ h^{-1}_{0}\left(W^{\lambda^{'}}_{0}(B^{n+1}_{R})\right) \geq p_{k}\circ h^{-1}_{0}\left(W^{\lambda^{'}}_{0}(\Omega_{0})\right), \end{align*} which means that the starshaped, strictly $k$-convex hypersurface satisfies the inequalities (\ref{1.14}) and (\ref{1.15}). We now prove that $\Sigma_{0}$ is a geodesic sphere when the equality holds in (\ref{1.15}). If a smooth starshaped, strictly $k$-convex hypersurface $\Sigma_{t}$ attains the equality \begin{align*} \int_{\Sigma_{t}}E_{k-1}f^{\frac{n-k+1}{n-k}} d\mu_{t} = p_{k}\circ h^{-1}_{0}\left(W^{\lambda^{'}}_{0}(\Omega_{t})\right). \end{align*} Then, $\frac{\partial}{\partial t}\int_{\Sigma_{t}}E_{k-1}f^{\frac{n-k+1}{n-k}} d\mu_{t}=0$. According to Lemma \ref{lem2.4} and (\ref{4.12}), $\Sigma_{t}$ is a geodesic sphere centered at the origin for $t>0$. Since $\Sigma_{0}$ can be smoothly approximated by a family of geodesic spheres, $\Sigma_{0}$ is also a geodesic sphere. This completes the proof of Theorem \ref{th1.15}. \hfill${\square}$ \begin{thebibliography}{20} \bibitem{BC64}{\sc R.\ L.\ Bishop {\rm and}\ R.\ J.\ Crittenden}: \textit{Geometry of manifolds}, (Academic Press, New York, 1964). \bibitem{BG73}{\sc E.\ Bombieri {\rm and}\ E.\ Giusti}: \textit{Local estimates for the gradient of non-parametric surfaces of prescribed mean curvature}, Comm. Pure Appl. Math. {\bf 26}(1973), 381-394. \bibitem{BP17}{\sc M.\ C.\ Bertini {\rm and}\ G.\ Pipoli}: \textit{Volume preserving non homogeneous mean curvature flow in hyperbolic space}, Diff. Geom. Appl. {\bf 54} (2017), 448-463. \bibitem{B13} {\sc S.\ Brendle}: \textit{Constant mean curvature surfaces in warped product manifolds}, Publ. Math. de l'IH$\grave{\rm E} $S. {\bf 117}(2013), 247-269. \bibitem{B19} {\sc S.\ Brendle}: \textit{The isoperimetric inequality for a minimal submanifold in Euclidean space}, J. Amer. Math. Soc. {\bf 32}(2), (2021), 595-603. \bibitem{B21}{\sc S.\ Brendle}: \textit{Sobolev inequalities in manifolds with nonnegative curvature}, Comm.Pure Appl. Math. (2022). \bibitem{BHW16}{\sc S.\ Brendle, \ P.\ -K.\ Hung {\rm and}\ M.\ -T.\ Wang}: \textit{A Minkowski inequality for hypersurfaces in the anti-de Sitter-Schwarzschild manifold}, Commun. Pure Appl. Math. {\bf 69}(1), (2016), 124-144. \bibitem{CM07}{\sc E.\ Cabezas-Rivas {\rm and}\ V.\ Miquel}: \textit{Volume preserving mean curvature flow in the hyperbolic space}, Indiana Univ. Math. J. {\bf56}(2007), no.5, 2061-2086. \bibitem{Ca10}{\sc X.\ Cabr$\acute{\rm e}$}: \textit{Regularity of minimizers of semilinear elliptic problems up to dimension 4}, Comm. Pure Appl. Math. {\bf 63}(2010) 1362-1380. \bibitem{CW2013}{\sc S-Y.\ A.\ Chang {\rm and}\ Y.\ Wang}: \textit{Inequalities for quermassintegrals on k-convex domains}, Adv. Math. {\bf 248}(2013) 335-377. \bibitem{CZ21}{\sc J.\ Cui {\rm and}\ P.\ Zhao}: \textit{Mean curvature type flow and Michael-Simon inequalities}, J.Funct. Anal. {\bf 286}(7), (2024), 110334. \bibitem{CZ24}{\sc J.\ Cui {\rm and}\ P.\ Zhao}: \textit{Michael-Simon type inequalities in hyperbolic space $\mathbb{H}^{n+1}$ via Brendle-Guan-Li’s flows}, Advanced Nonlinear Studies. {\bf 24}(3), (2024), 720-733. \bibitem{DHT10}{\sc U.\ Dierkes, \ S.\ Hildebrandt {\rm and}\ A.\ Tromba}: \textit{Global analysis of minimal surfaces },Revised and enlarged second edition, Grundlehrender Mathematischen Wissenschaften [Fundamental Principles of Mathematical Sciences], 341, Springer, Heidelberg, 2010. \bibitem{ES92}{\sc L.\ C.\ Evans {\rm and}\ J.\ Spruck}: \textit{Motion of level sets by mean curvature \uppercase\expandafter{\romannumeral 3}}, J. Geom. Anal. {\bf 2}(1992), no. 2, 121-150. \bibitem{G11} {\sc C.\ Gerhardt}: \textit{Inverse curvature flows in hyperbolic space}, J. Diff. Geom. {\bf 89}(2011), 47-94. \bibitem{MG81}{\sc M.\ Gromov}: \textit{Structures M$\acute{\rm e}$triques pour les Vari$\acute{\rm e}$t$\acute{\rm e}$s riemanniennes}, (Cedic Nathan, Paris, 1981). \bibitem{G2013}{\sc P.\ Guan}: \textit{Curvature measures, isoperimetric type inequalities and fully nonlinear PDES}, Fully Nonlinear PDEs in Real and Complex Geometry and Optics. Lecture Notes in Mathematics, 2013, pp.47-94. \bibitem{GL20}{\sc P.\ Guan {\rm and}\ J.\ Li}: \textit{Isoperimetric type inequalities and hypersurface flows}, J. Math. Study. {\bf 54}(1), (2021), 56-80. \bibitem{GL2015}{\sc P.\ Guan {\rm and}\ J.\ Li}: \textit{A mean curvature type flow in space forms}, Int. Math. Res. Not. {\bf 2015}(2015), 4716-4740. \bibitem{GL19}{\sc P.\ Guan, \ J. \ Li {\rm and}\ M.\ -T.\ Wang}: \textit{A volume preserving flow and the isoperimetric problem in warped product spaces}, Trans. Am. Math. Soc. {\bf 372}(2019), 2777-2798. \bibitem{GLW17}{\sc S.\ Guo, \ G. \ Li {\rm and}\ C.\ Wu}: \textit{Volume preserving flow by powers of the $m$th mean curvature in the hyperbolic space}, Comm. Anal. Geom. {\bf 25}(2), (2017), 321-372. \bibitem{HL21}{\sc Y.\ Hu {\rm and}\ H.\ Li}: \textit{Geometric inequalities for static convex domains in hyperbolic space}, Trans. Am. Math. {\bf 375}(8), (2022), 5587-5615. \bibitem{HLW20}{\sc Y.\ Hu, \ H.\ Li {\rm and}\ Y.\ Wei}: \textit{Locally constrained curvature flows and geometric inequalities in hyperbolic space}, Math. Ann. {\bf 382}(3), (2022), 1425-1474. \bibitem{JX19}{\sc S.\ Julian {\rm and}\ C.\ Xia}: \textit{Locally constrained inverse curvature flows}, Trans. Amer. Math. Soc. {\bf 372} (2019), no.10, 6771-6803. \bibitem{LSU}{\sc O.\ A.\ Ladyzenskaya, \ V.\ A.\ Solonnikov {\rm and}\ N.\ N.\ Ural'ceva}: \textit{Linear and Quasi-Linear Equations of Parabolic Type}, Translations of Mathematical Monographs, 23., Providence, RI: American Mathematical Society, 1968. \bibitem{LS20}{\sc B.\ Lambert {\rm and}\ J.\ Scheuer}: \textit{Isoperimetric problems for spacelike domains in generalized Robertson-Walker spaces}, J. Evol. Equ. {\bf 21}(2021), 377-389. \bibitem{HLX14}{\sc H.\ Li, \ Y.\ Wei {\rm and}\ C.\ Xiong}: \textit{A geometric inequality on hypersurface in hyperbolic space}, Adv. Math. {\bf 253}(2014), no.1, 152-162. \bibitem{Mak12}{\sc M.\ Makowski}: \textit{Mixed volume preserving curvature flows in hyperbolic space}, (2012), arXiv:1208.1898. \bibitem{MS1973}{\sc J.\ H.\ Michael {\rm and}\ L.\ M.\ Simon}: \textit{Sobolev and mean-value inequalities on generalized submanifolds of $\mathbb{R}^{n}$}, Comm. Pure Appl. Math. {\bf 26}(1973), 361-379 \bibitem{S19}{\sc J.\ Scheuer}: \textit{The Minkowski inequality in de Sitter space}, Pacific Journal of Mathematics. {\bf 314}(2), (2021), 425-449. \bibitem{SX19}{\sc J.\ Scheuer {\rm and} \ C. \ Xia}: \textit{Locally constrained inverse curvature flows}, Trans. Amer. Math. Soc. {\bf 372}(10), (2019), 6771-6803. \bibitem{SWX18}{\sc J.\ Scheuer, \ G. \ Wang {\rm and}\ C.\ Xia}: \textit{Alexandrov-Fenchel inequalities for convex hypersurfaces with fre boundary in a ball}, J. Differ. Geom. {\bf 120}(2), (2022), 345-373. \bibitem{WangXia14}{\sc G.\ Wang {\rm and}\ C.\ Xia}: \textit{Isoperimetric type problems and Alexandrov-Fenchel type inequalities in the hyperbolic space}, Adv. Math. {\bf 259}(13), (2014), 532-556. \bibitem{WX19}{\sc G.\ Wang {\rm and}\ C.\ Xia}: \textit{Guan-Li type mean curvature flow for free boundary hypersurfaces in a ball}, (2019), arXiv:1910.07253. \bibitem{WX}{\sc Y.\ Wei {\rm and}\ C.\ Xiong}: \textit{A volume-preserving anisotropic mean curvature type flow}, Indiana Univ. Math.J. {\bf 70}(2021), 881-906. \end{thebibliography} \end{document}
2205.15024v1
http://arxiv.org/abs/2205.15024v1
Counterexample to a conjecture about dihedral quandle
\newif\ifdraft \drafttrue \ifdraft \documentclass[11pt, reqno]{amsart} \usepackage{lmodern} \renewcommand{\familydefault}{\sfdefault} \usepackage[a4paper, margin=1in]{geometry} \usepackage[inline]{showlabels} \else \documentclass[reqno]{amsart} \usepackage{lmodern} \usepackage[a4paper, margin=.75in]{geometry} \usepackage{amsmath, amsthm, thmtools, amsfonts, amssymb, mathtools} \usepackage{pdflscape, blkarray, multirow, booktabs} \usepackage{amstext} \usepackage{array} \newcolumntype{L}{>{$}l<{$}} \usepackage[dvipsnames]{xcolor} \usepackage{hyperref} \hypersetup{ colorlinks = true, linkcolor = {Blue}, citecolor = {BrickRed}, } \usepackage{makecell} \input{macros} \begin{document} \allowdisplaybreaks \title[Counterexample to conjecture]{Counterexample to a conjecture about dihedral quandle} \author[S. Panja]{Saikat Panja} \address{Department of Mathematics, IISER Pune \\ Maharashtra, India} \email{[email protected]} \author[S. Prasad]{Sachchidanand Prasad} \address{Department of Mathematics and Statistics, IISER Kolkata \\ West Bengal, India} \email{[email protected]} \subjclass[2010]{} \keywords{} \begin{abstract} \input{abstract} \end{abstract} \subjclass[2020]{Primary: 20N02; Secondary: 20B25, 16S34, 17D99} \keywords{Quandle rings, Augmentation ideal} \date{\today} \maketitle \setcounter{tocdepth}{3} \frenchspacing \input{sec_intro} \input{sec_counterexample} \noindent\textbf{Acknowledgements:} The first author (Panja) acknowledges the support of NBHM PhD fellowship. The second author (Prasad) was supported by UGC (NET)-JRF fellowship. \bibliographystyle{alphaurl} \begin{thebibliography}{EFT19} \bibitem[BPS19]{BaPaSi19} Valeriy~G. Bardakov, Inder Bir~S. Passi, and Mahender Singh. \newblock Quandle rings. \newblock {\em J. Algebra Appl.}, 18(8):1950157, 23, 2019. \newblock \href {https://doi.org/10.1142/S0219498819501573} {\path{doi:10.1142/S0219498819501573}}. \bibitem[EFT19]{ElFeTs19} Mohamed Elhamdadi, Neranga Fernando, and Boris Tsvelikhovskiy. \newblock Ring theoretic aspects of quandles. \newblock {\em J. Algebra}, 526:166--187, 2019. \newblock \href {https://doi.org/10.1016/j.jalgebra.2019.02.011} {\path{doi:10.1016/j.jalgebra.2019.02.011}}. \end{thebibliography} \end{document} \newcommand{\R}{\textup{R}} \newcommand{\delr}[1]{\Delta^{#1}\left(\textup{R}_8\right)} \newcommand{\delrn}[2]{\Delta^{#1}\left(\textup{R}_{#2}\right)} \newcommand{\Z}{\mathbb{Z}} \newcommand{\e}[1]{e_{#1}} \newcommand{\dsum}{\oplus} \newcommand{\defeq}{\vcentcolon=} \newcommand{\eqdef}{=\vcentcolon} \theoremstyle{definition} \newtheorem{thm}{Theorem}[section] \newtheorem{lemma}[thm]{Lemma} \newtheorem*{conj}{Conjecture} \newtheorem{propositionX}{Proposition} \renewcommand{\thepropositionX}{\Alph{propositionX}} It was conjectured that the augmentation ideal of a dihedral quandle of even order $n>2$ satisfies $|\Delta^k(\R_n)/\Delta^{k+1}(\R_{n})|=n$ for all $k\ge 2$. In this article we provide a counterexample against this conjecture. \section{Introduction} \label{sec:introduction} A \textit{quandle} is a pair $(A,\cdot)$ such that `$\cdot$' is a binary operation satisfying \begin{enumerate} \item the map $S_a:A\longrightarrow A$, defined as $S_a(b)=b\cdot a$ is an automorphism for all $a\in A$, \item for all $a\in A$, we have $S_a(a)=a$. \end{enumerate} \noindent To have a better understanding of the structure, a theory parallel to group rings was introduced by Bardakov, Passi and Singh in \cite{BaPaSi19}. Let $\Z_n$ denote the cyclic group of order $n$. Then defining $a\cdot b=2b-a$ defines a quandle structure on $A=\Z_n$. This is known as \textit{dihedral quandle}. For other examples see \cite{BaPaSi19}. The quandle ring of a quandle $A$ is defined as follows. Let $R$ be a commutative ring. Consider \begin{displaymath} R[A] \defeq \left\{\sum_{i}r_ia_i: r_i\in R,a_i\in A \right\}. \end{displaymath} Then this is an additive group in usual way. Define multiplication as \begin{displaymath} \left(\sum_{i}r_ia_i\right)\cdot \left(\sum_{j}s_ja_j\right) \defeq \sum_{i,j}r_is_j(a_i\cdot a_j). \end{displaymath} The \textit{augmentation ideal} of $R[A]$, $\Delta_R(A)$ is defined as the kernel of the augmentation map \begin{displaymath} \varepsilon :R[A]\to R,~\sum_{i}r_ia_i \mapsto \sum_{i} r_i. \end{displaymath} The powers $\Delta^k_R(A)$ is defined as $\left(\Delta_R(A)\right)^k$. When $R=\Z$, we will be omitting the subscript $R$. The following proposition gives a basis for $\Delta_R(X)$. \begin{propositionX}\cite[Proposition 3.2, Page 6]{BaPaSi19} \label{prop:basis} A basis of $\Delta_R(X)$ as an $R$-module is given by $\{a-a_0:a\in A\setminus\{a_0\}\}$, where $a_0\in A$ is a fixed element. \end{propositionX} The following has been conjectured in \cite[Conjecture 6.5, Page 20] {BaPaSi19}. \begin{conj} Let $\R_n=\{a_0,a_1,\cdots,a_{n-1}\}$ denote the dihedral quandle of order $n$. Then we have the following statements. \begin{enumerate} \item For an odd integer $n>1$, $\delrn{k}{n}/\delrn{k+1}{n}\cong \Z_n$ for all $k\ge 1$. \item For an even integer $n> 2$, $\left|\delrn{k}{n}/\delrn{k+1}{n}\right|=n$ for $k\ge 2$. \end{enumerate} The first statement has been confirmed by Elhamdadi, Fernando and Tsvelikhovskiy in \cite[Theorem 6.2, Page 182]{ElFeTs19}. The second statement holds true for $n=4$, see \cite{BaPaSi19}. Here we have given a counterexample in \autoref{thm:mainTheorem} to show that the conjecture is not true in general. \end{conj} \section{Counterexample}\label{sec:counterexample} \begin{thm} \label{thm:mainTheorem} Let $\R_8$ be the dihedral quandle of order $8$. Then \begin{displaymath} \left|\Delta^2\left(\R_8\right)/\Delta^3\left(\R_8\right)\right|= 16. \end{displaymath} \end{thm} \noindent From \autoref{prop:basis}, we get that $\{e_i=a_i-a_0:i=1,2,\cdots, n-1\}$ is a basis for $\delrn{}{n}$. We will be using this notation in the subsequent computation. \begin{lemma}\label{lemma:multiplictionWith_e4} Let $\R_{2k}$ denote the dihedral quandle of order $2k~(k\ge 2)$. Then $e_i \cdot e_k=0$ for all $i=1,2,\cdots, 2k-1$. \end{lemma} \begin{proof} Observe that \begin{align*} e_i \cdot e_k & = \left(a_i -a_0\right) \cdot \left(a_k-a_0\right) \\ & = a_{2k-i}-a_{2k-i}-a_0+a_0=0. \end{align*} \end{proof} \begin{lemma}\label{lemma:multiplictionSymmetry} Let $\R_{2k}$ denote the dihedral quandle of order $2k~(k\ge 2)$. Then $e_i\cdot e_j = e_i \cdot e_{k+j}$ for all $j=1,2,\cdots,k-1$ and for all $i=1,2,\cdots,2k-1$. \end{lemma} \begin{proof} Note that \begin{align*} e_i \cdot e_{k+j} & = a_ia_{k+j}-a_ia_0-a_0a_{k+j}+a_0 \\ & = a_i a_j - a_i a_0 -a_0a_j+a_0 \\ & = e_i \cdot e_j. \end{align*} \end{proof} \noindent We will use \autoref{lemma:multiplictionWith_e4} and \autoref{lemma:multiplictionSymmetry} to simplify the multiplication tables. \begin{proof}[Proof of \autoref{thm:mainTheorem}] Recall that a basis of $\delr{}$ is given by $\mathcal{B}_1=\{e_1,e_2,\cdots,e_7\}$. The multiplication table for the $e_i\cdot e_j$ is given as follows: \begin{center} \begin{displaymath} \begin{array}{|c|c|c|c|} \hline & e_1 & e_2 & e_3 \\ \hline e_1 & e_1-e_2-e_7 & e_3-e_4-e_7 & e_5-e_6-e_7 \\ \hline e_2 & -e_2-e_6 & e_2-e_4-e_6 & -2e_6 \\ \hline e_3 & -e_2-e_5+e_7 & e_1-e_4-e_5& e_3-e_5-e_6 \\ \hline e_4 & -e_2-e_4+e_6 & -2e_4 & e_2 - e_4- e_6 \\ \hline e_5 & -e_2-e_3+e_5 & -e_3-e_4+e_7 & e_1-e_3-e_6 \\ \hline e_6 & -2e_2 + e_4 & -e_2 - e_4 + e_6 & -e_2-e_6 \\ \hline e_7 & -e_1-e_2 + e_3 & -e_1-e_4+e_5 & -e_1-e_6+e_7 \\ \hline \end{array} \end{displaymath} \end{center} Since $\delr{2}$ is generated by $e_i\cdot e_j$ as a $\Z$-module, using row reduction over $\Z$ one can show that a $\Z$-basis is given by \begin{align*} \mathcal{B}_2 = & \left\{u_1 = \e{1}-\e{2}-\e{7}, u_2 = \e{2}+\e{6}, u_3= \e{3}-\e{4}-\e{7},\right. \\ & \kern .5cm \left.u_4 = \e{4}+2\e{6}, u_5 = \e{5}-\e{6}-\e{7}, u_6 = 4\e{6} \right\}. \end{align*} We now want to express a $\Z$-basis of $\delr{3}$ in terms of $\mathcal{B}_2$. First we calculate the products $u_i\cdot e_j$. This is presented in the following table. \begin{center} \begin{displaymath} \begin{array}{|c|c|c|c|} \hline & e_1 & e_2 & e_3 \\ \hline u_1 & \makecell{2e_1 + e_2 -e_3 \\ +e_6 -e_7} & \makecell{e_1 -e_2 +e_3 \\+e_4 -e_5 +e_6 -e_7 }& \makecell{e_1 -e_4 +e_5 \\ +2e_6 -2e_7} \\ \hline u_2 & -3e_2+e_4 -e_6 & -2e_4 & -e_2 +e_4 -3e_6 \\ \hline u_3 & \makecell{e_1+e_2-e_3\\+e_4-e_5-e_6+e_7} & 2e_1+2e_4-2e_5& \makecell{e_1-e_2+e_3+e_4 \\-e_5 +e_6 -e_7} \\ \hline u_4 & -5e_2-e_4+e_6 & -2e_2-4e_4+2e_6 & -e_2-e_4 -3e_6 \\ \hline u_5 & \makecell{e_1+2e_2-2e_3\\-e_4+e_5} & \makecell{e_1+e_2-e_3+e_4\\-e_5-e_6+e_7} & 2e_1+e_2-e_3+e_6-e_7 \\ \hline u_6 & -8e_2+4e_4 & -4e_2-4e_4+4e_6 & -4e_2-4e_6 \\ \hline \end{array} \end{displaymath} \end{center} \noindent Hence, a $\Z$-basis for $\delr{3}$ is given by \begin{align*} \mathcal{B}_3 & = \left\{v_1 = e_1-e_2+e_3+e_4-e_5+e_6-e_7, v_2 = e_2 - e_3 -2e_4+2e_5+e_6-e_7, \right. \\ & \kern 0.5cm \left. v_3 = -e_3-e_4+2e_5-2e_6-e_7, v_4 = -2e_4, v_5 = -4e_5-4e_6 + 4e_7, v_6 = 8e_6 \right\}. \end{align*} Now we will present the elements of $\mathcal{B}_3$ in terms of $\mathcal{B}_2$. We have the following presentation. \begin{displaymath} \begin{array}{c c c c c c c c} v_1 & = & u_1 & & & + 2u_4 & -u_5 & -u_6 \\ v_2 & = & & u_2 & -u_3 & - u_4 & + 2u_5 & + u_6 \\ v_3 & = & & & -u_3 & -2u_4 & +2u_5 & +u_6 \\ v_4 & = & & & & 2u_4 & & -u_6\\ v_5 & = & & & & & -4u_5 \\ v_6 & = & & & & & & 2u_6. \end{array} \end{displaymath} Note that we can alter the basis $\mathcal{B}_2$ of $\delr{2}$ as follows: \begin{align*} & \left\{u_1+2u_4-u_5-u_6, u_2-u_3-u_4+2u_5+u_6, u_3+2u_4-2u_5-u_6, u_4, u_5, u_6 \right\}. \end{align*} Hence, \begin{align*} \dfrac{\delr{2}}{\delr{3}} & \cong \dfrac{\Z v_1\dsum \Z v_2 \dsum \Z v_3 \dsum \Z u_4\dsum \Z u_5 \dsum \Z u_6}{\Z v_1\dsum \Z v_2 \dsum \Z v_3 \dsum \Z (2u_4-u_6)\dsum \Z (-4u_5) \dsum \Z (2u_6)} \\ & \cong \Z_4\dsum \dfrac{\Z u_4 \dsum \Z u_6}{\Z (2u_4-u_6) \dsum \Z (2u_6)} \\ & \cong \Z_4 \dsum \dfrac{\Z u_4 \dsum \Z u_6}{\Z u_4 \dsum \Z (4u_6)} \\ & \cong \Z_4 \dsum \Z_4. \end{align*} \end{proof}
2205.12457v1
http://arxiv.org/abs/2205.12457v1
Eigenvalues of the laplacian matrices of the cycles with one weighted edge
\documentclass[11pt]{article} \usepackage[spanish,english]{babel} \usepackage[utf8]{inputenc} \usepackage[width=15.5cm,height=21cm]{geometry} \usepackage{titling} \usepackage{tikz} \usepackage{amsmath,amsfonts,amssymb,mathtools} \usepackage{comment} \usepackage{enumitem} \usepackage{amsthm} \newtheorem{thm}{Theorem} \newtheorem{prop}[thm]{Proposition} \newtheorem{lem}[thm]{Lemma} \newtheorem{cor}[thm]{Corollary} \newtheorem{definition}{Definition} \newtheorem{remark}[thm]{Remark} \usepackage[colorlinks=true,allcolors=blue]{hyperref} \newcommand{\doi}[1]{\href{https://doi.org/#1}{doi:#1}} \newcommand{\myurl}[1]{\href{#1}{#1}} \newenvironment{egornote}{\color{blue}\medskip\noindent}{} \newenvironment{sotonote}{\color{violet}\medskip\noindent}{} \newcommand{\pheq}{\phantom{=}\ } \newcommand{\eqdef}{\coloneqq} \newcommand{\al}{\alpha} \newcommand{\om}{\omega} \newcommand{\ga}{\gamma} \newcommand{\be}{\beta} \newcommand{\de}{\delta} \newcommand{\eps}{\varepsilon} \newcommand{\si}{\sigma} \newcommand{\ka}{\varkappa} \newcommand{\La}{\Lambda} \newcommand{\la}{\lambda} \renewcommand{\phi}{\varphi} \newcommand{\Tht}{\Theta} \newcommand{\tht}{\vartheta} \newcommand{\bC}{\mathbb{C}} \newcommand{\bN}{\mathbb{N}} \newcommand{\bR}{\mathbb{R}} \newcommand{\bT}{\mathbb{T}} \newcommand{\bZ}{\mathbb{Z}} \newcommand{\cZ}{\mathcal{Z}} \newcommand{\cK}{\mathcal{K}} \newcommand{\cU}{\mathcal{U}} \newcommand{\cV}{\mathcal{V}} \newcommand{\fU}{\mathfrak{U}} \newcommand{\arccosh}{\operatorname{arccosh}} \newcommand{\arcsinh}{\operatorname{arcsinh}} \newcommand{\arctanh}{\operatorname{arctanh}} \newcommand{\sech}{\operatorname{sech}} \newcommand{\csch}{\operatorname{csch}} \newcommand{\spc}{\operatorname{sp}} \newcommand{\clos}{\operatorname{cl}} \renewcommand{\Re}{\operatorname{Re}} \renewcommand{\Im}{\operatorname{Im}} \newcommand{\laasympt}{\lambda^{\operatorname{asympt}}} \newcommand{\laasymptN}{\lambda^{\operatorname{asympt-NR}}} \newcommand{\laaN}{\lambda^{\operatorname{NR}}} \newcommand{\medstrut}{\vphantom{\int_0^1}} \newcommand{\hstrut}{\mbox{}\ \mbox{}} \newcommand{\bigstrut}{\vphantom{\int_{0_0}^{1^1}}} \newcommand{\rmA}{\mathrm{a}} \newcommand{\rmB}{\mathrm{b}} \newcommand{\rmC}{\mathrm{c}} \newcommand{\rmD}{\mathrm{d}} \definecolor{mygreen}{rgb}{0.0,0.75,0.0} \title{Eigenvalues of the laplacian matrices \\ of the cycles with one weighted edge} \author{Sergei M. Grudsky, Egor A. Maximenko and Alejandro Soto-Gonz\'alez} \date{\today\vspace{-6em}} \begin{document} \maketitle \let\thefootnote\relax \footnote{Sergei M. Grudsky, CINVESTAV del IPN, Departamento de Matem\'aticas, Apartado Postal 07360, Ciudad de M\'exico, Mexico. \href{mailto:[email protected]}{[email protected]}, \myurl{https://orcid.org/0000-0002-3748-5449},\\ \myurl{https://publons.com/researcher/2095797/sergei-m-grudsky}. \vspace*{0.5em} } \footnote{Egor A. Maximenko, Instituto Polit\'ecnico Nacional, Escuela Superior de F\'isica y Matem\'aticas, Apartado Postal 07730, Ciudad de M\'exico, Mexico. \href{mailto:[email protected]}{[email protected]}, \myurl{https://orcid.org/0000-0002-1497-4338}. \vspace*{0.5em} } \footnote{Alejandro Soto-Gonz\'{a}lez, CINVESTAV del IPN, Departamento de Matem\'aticas, Apartado Postal 07360, Ciudad de M\'exico, Mexico. \href{mailto:[email protected]}{[email protected]}, \myurl{https://orcid.org/0000-0003-2419-4754}. } \footnote{ \medskip \textbf{Funding.} \medskip The research of the first author has been supported by CONACYT (Mexico) project ``Ciencia de Frontera'' FORDECYT-PRONACES/61517/2020 and by Regional Mathematical Center of the Southern Federal University with the support of the Ministry of Science and Higher Education of Russia, Agreement 075-02-2021-1386. \medskip The research of the second author has been supported by CONACYT (Mexico) project ``Ciencia de Frontera'' FORDECYT-PRONACES/61517/2020 and IPN-SIP projects (Instituto Polit\'{e}cnico Nacional, Mexico). \medskip The research of the third author has been supported by CONACYT (Mexico) PhD scholarship. } \begin{abstract} In this paper we study the eigenvalues of the laplacian matrices of the cyclic graphs with one edge of weight $\alpha$ and the others of weight $1$. We denote by $n$ the order of the graph and suppose that $n$ tends to infinity. We notice that the characteristic polynomial and the eigenvalues depend only on $\operatorname{Re}(\alpha)$. After that, through the rest of the paper we suppose that $0<\alpha<1$. It is easy to see that the eigenvalues belong to $[0,4]$ and are asymptotically distributed as the function $g(x)=4\sin^2(x/2)$ on $[0,\pi]$. We obtain a series of results about the individual behavior of the eigenvalues. First, we describe more precisely their localization in subintervals of $[0,4]$. Second, we transform the characteristic equation to a form convenient to solve by numerical methods. In particular, we prove that Newton's method converges for every $n\ge3$. Third, we derive asymptotic formulas for all eigenvalues, where the errors are uniformly bounded with respect to the number of the eigenvalue. \medskip \textbf{Keywords}: eigenvalue, laplacian matrix, weighted cycle, periodic Jacobi matrix, Toeplitz matrix, tridiagonal matrix, perturbation, asymptotic expansion. \medskip \textbf{Mathematics Subject Classification (2020)}: 05C50, 15B05, 47B36, 15A18, 41A60, 65F15, 82B20. \end{abstract} \clearpage \tableofcontents \section{Introduction} For every natural $n\ge 3$ and every real $\al$, we denote by $G_{\al,n}$ the cyclic graph of order $n$, where the edge between the vertices $1$ and $n$ has weight $\al$, and all other edges have weights $1$. See Figure~\ref{fig:graph} for $n=7$. \begin{figure}[th] \centering \begin{tikzpicture} \foreach \j/\k in {0/1,1/2,2/3,3/4,4/5,5/6,6/7} { \node (N\j) at (\j*360/7:2cm) [draw, circle] {$\k$}; } \foreach \j/\k in {0/1,1/2,2/3,3/4,4/5,5/6,6/0} { \draw (N\j) -- (N\k); } \foreach \j in {0,1,2,3,4,5} { \node at (\j*360/7 + 360/14:1.95cm) {$\scriptstyle 1$}; } \node at (-360/14:1.95cm) {$\scriptstyle\al$}; \end{tikzpicture} \caption{Graph $G_{\al,7}$\label{fig:graph}} \end{figure} \noindent Let $L_{\al,n}$ be the laplacian matrix of $G_{\al,n}$. For example, \begin{equation}\label{eq:lapl_matrix} L_{\al,7} = \begin{bmatrix*}[r] 1+\al & -1 & 0 & 0 & 0 & 0 & -\al\phantom{\al} \\ -1\phantom{\al} & 2 & -1 & 0 & 0 & 0 & 0\phantom{\al} \\ 0\phantom{\al} & -1 & 2 & -1 & 0 & 0 & 0\phantom{\al} \\ 0\phantom{\al} & 0 & -1 & 2 & -1 & 0 & 0\phantom{\al} \\ 0\phantom{\al} & 0 & 0 & -1 & 2 & -1 & 0\phantom{\al} \\ 0\phantom{\al} & 0 & 0 & 0 & -1 & 2 & -1\phantom{\al} \\ -\al\phantom{\al} & 0 & 0 & 0 & 0 & -1 & 1+\al \end{bmatrix*}. \end{equation} The spectral decomposition of $L_{\al,n}$ is crucial to solve the heat and wave equations on the graph $G_{\al,n}$, i.e., the linear systems of differential equations of the form $f'(t)=-c L_{\al,n} f(t)$ and $f''(t)=c L_{\al,n} f(t)$, where $f(t)=[f_j(t)]_{j=1}^n$ and $c$ is some coefficient. Moreover, laplacian matrices appear in the study in of random walks on graphs, electrical flows, network dynamics, and many other physical phenomena; see, e.g.~\cite{M2012}. The matrices $L_{\al,n}$ can also be viewed as periodic Jacobi matrices and as real symmetric Toeplitz matrices with perturbations on the corners $(1,1)$, $(1,n)$, $(n,1)$, and $(n,n)$. The eigenvalues are explicitly known only for some very special matrix families from these classes; mainly when the eigenvectors are the columns of the DCT or DST matrices~\cite{BYR2006}. Over the past decade, there has been an increasing interest in Toeplitz matrices with certain perturbations, see~\cite{BPZ2020,BFGM2014,BYR2006,FK2020,DV2009,GT2009,OA2014,R2017,TS2017,VHB2018,ZJJ2019}, or~\cite{KST1999,NR2019,SM2014,W2008} for more general researches. In~\cite{FK2020,DV2009} the authors find the characteristic polynomial for some cases of Toeplitz matrices with corner perturbations. The methods used in the present paper are similar to the ones from~\cite{GMS2021}, where we studied the hermitian tridiagonal Toeplitz matrices with perturbations in the positions $(1,n)$ and $(n,1)$. The asymptotic distribution of hermitian Toeplitz matrices with small-rank perturbations is described by analogs of Szeg\H{o} theorem~\cite{GS2017,Tilli1998,Tyrtyshnikov1996}. The individual behavior of the eigenvalues is known only for some particular cases, including hermitian Toeplitz matrices with simple-loop symbols~\cite{BBGM2018,BGM2017,BBGM2015,BBGM2017}. In~\cite{GMS2021} we studied the eigenvalues of the hermitian tridiagonal Toeplitz matrices with diagonals $-1,2-1$ and values $-\al$ and $-\overline{\al}$ on the corners $(n,1)$ and $(1,n)$, respectively. In the present paper, we put $1+\al$ instead of $2$ in the entries $(1,1)$ and $(n,n)$. The matrices $L_{\al,n}$ are real and symmetric, thus their eigenvalues are real. We enumerate them in the ascending order: \begin{equation}\label{eq:eigvals_order} \la_{\al,n,1} \leq\la_{\al,n,2} \leq\cdots \leq\la_{\al,n,n}. \end{equation} It is well known that every laplacian matrix has eigenvalue $0$ associated to the eigenvector $[1,\ldots,1]^\top$. For $\al=0$, the eigenvalues of $L_{0,n}$ are $\la_{0,n,j} = g((j-1)\pi/n)$, where $g$ is defined by \begin{equation}\label{eq:g_main} g(x) \eqdef 2-2\cos(x) = 4\sin^2\frac{x}{2}, \quad x\in[0,\pi]. \end{equation} The normalized eigenvectors of $L_{0,n}$ are the columns of the matrix DCT-II, see~\cite[formula (2.53) and (2.54)]{BYR2006}. For $\al=1$, the matrices $L_{1,n}$ are circulant, and their eigenvalues and eigenvectors are well known, see, e.g.~\cite{GMS2021}. It is also well known that the eigenvalues of tridiagonal real symmetric Toeplitz matrices $T_n(g)$ generated by $g$ are $g(j\pi/(n+1))$. Except for the cases $\al=0$, $\al=1$, and $\al=1/2$ (see Remark~\ref{rem:case_al=1/2}), we do not know explicit formulas for all eigenvalues of $L_{\al,n}$. For $\al<0$ (resp., $\al>1$), it can be shown that the first (resp., last) eigenvalue goes out the interval $[0,4]$ and tends exponentially to $4\al^2/(2\al-1)$. We are going to present the corresponding results in another paper. In this paper we suppose that $0<\al<1$. Our matrices $L_{\al,n}$ can be obtained by small-rank perturbations from $T_n(g)$, $L_{0,n}$ or $L_{1,n}$. The Cauchy interlacing theorem or the theory of locally Toeplitz sequences \cite{GS2017,Tilli1998,Tyrtyshnikov1996} easily imply that the eigenvalues of $L_{\al,n}$ are asymptotically distributed as the values of $g$ on $[0,\pi]$, as $n$ tends to infinity. We obtain much more precise results about the eigenvalues of $L_{\al,n}$. Namely, we find exact eigenvalues of the form $g((j-1) \pi /n)$, with $j$ odd, and localize the other eigenvalues in the intervals of the form $(g((j-1)\pi/n)$, $g(j\pi/n))$ with $j$ even. We transform the characteristic equation to the form $x = f_{\al,n,j}(x)$, where $f_{\al,n,j}$ is ``slow'', i.e., the derivative of $f_{\al,n,j}$ is small when $n$ is large. After that, this equation is convenient to solve by the fixed point method and Newton's method (also known as Newton--Raphson or gradient method). On this base, we derive asymptotic formulas for all eigenvalues $\la_{\al,n,j}$, where the errors are uniformly bounded on $j$. For $\al$ in $\bC$, we consider the $n\times n$ complex laplacian matrix $L_{\al,n}$, for example, \begin{equation} \label{eq:lapl_matrix_complex} L_{\al,7} = \begin{bmatrix*}[r] 1+\overline{\al}& -1 & 0 & 0 & 0 & 0 & -\overline{\al}\phantom{\al} \\ -1\phantom{\al} & 2 & -1 & 0 & 0 & 0 & 0\phantom{\al} \\ 0\phantom{\al} & -1 & 2 & -1 & 0 & 0 & 0\phantom{\al} \\ 0\phantom{\al} & 0 & -1 & 2 & -1 & 0 & 0\phantom{\al} \\ 0\phantom{\al} & 0 & 0 & -1 & 2 & -1 & 0\phantom{\al} \\ 0\phantom{\al} & 0 & 0 & 0 & -1 & 2 & -1\phantom{\al} \\ -\al\phantom{\al} & 0 & 0 & 0 & 0 & -1 & 1+\al \end{bmatrix*}. \end{equation} These matrices appear in the study of problems related to networked multi-agent systems, see~\cite{LWHF2014} for investigations in this area. In Proposition~\ref{prop:L_char_pol_via_Cheb} we prove that the characteristic polynomial of $L_{\al,n}$ only depends on $\Re(\al)$, i.e., $\det(\la I_n - L_{\al,n}) = \det(\la I_n - L_{\Re(\al),n})$. We present the main results of this paper in Section~\ref{sec:Main_results}, the correspondent proofs lie in Section~\ref{sec:localization_eigenvalues} (localization), Section~\ref{sec:main_equation} (main equation), Section~\ref{sec:fixed_point} (fixed point method), Sections~\ref{sec:Newton_convex} and~\ref{sec:solve_by_Newton} (Newton's method), Section~\ref{sec:asymptotic_formulas} (asymptotic formulas), Section~\ref{sec:eigvec_norm} (norms of the eigenvectors). In Section~\ref{sec:trid_toep_corner_per} we give formulas for the characteristic polynomial and eigenvectors of general tridiagonal symmetric Toeplitz matrices with perturbations in the corners $(1,1)$, $(1,n)$, $(n,1)$ and $(n,n)$; our formulas are equivalent to Yueh and Cheng~\cite{YuehCheng2008}. In Section~\ref{sec:num_exp} we show the results of some numerical tests. \section{Main results}\label{sec:Main_results} We treat $\al$ as a fixed parameter, supposing that $0<\al<1$. It is well known that $0$ is the least eigenvalue of $L_{\al,n}$. A direct application of the Gershgorin disks theorem~\cite[Theorem 6.1.1]{HornJohnson2013} shows that all eigenvalues of $L_{\al,n}$ belong to $[0,4]$. However, we give a more precise localization. \begin{thm}[eigenvalues' localization]\label{thm:Localization_weak_eigenvals} For every $n\ge3$, \begin{align}\label{eq:localization_eigvals_weak_odd} \la_{\al,n,j}& = g\left(\frac{(j-1)\pi}{n}\right) \quad (j\ \text{odd},\ 1\le j\le n), \\\label{eq:localization_eigvals_weak_even} g\left(\frac{(j-1)\pi}{n}\right) &< \la_{\al,n,j}<g\left(\frac{j\pi}{n}\right)\quad (j\ \text{even},\ 1\le j\le n). \end{align} \end{thm} In particular, Theorem~\ref{thm:Localization_weak_eigenvals} implies that $\la_{\al,n,j}$ with odd $j$ does not depend on $\al$. Motivated by Theorem~\ref{thm:Localization_weak_eigenvals}, we use $g$ as a change of variable in the characteristic equation and put \[ d_{n,j}\eqdef\frac{(j-1)\pi}{n},\qquad \tht_{\al,n,j}\eqdef \widetilde{g}^{-1}(\la_{\al,n,j}), \] where $\widetilde{g}\colon[0,\pi]\to[0,4]$ is a restriction of $g$. In other words, the numbers $\tht_{\al,n,j}$ belong to $[0,\pi]$ and satisfy $g(\tht_{\al,n,j})=\la_{\al,n,j}$. Then~\eqref{eq:localization_eigvals_weak_odd} and~\eqref{eq:localization_eigvals_weak_even} are equivalent to \begin{equation*} \begin{aligned} \,&\tht_{\al,n,j} = d_{n,j}\qquad & & (j\ \text{odd},\ 1\le j\le n),\\[1ex] d_{n,j}< \,&\tht_{\al,n,j}<d_{n,j+1}\qquad & &(j\ \text{even},\ 1\le j\le n). \end{aligned} \end{equation*} We define $\eta_\al\colon[0,\pi]\to\bR$ by \begin{equation}\label{eq:eta} \eta_\al(x)\eqdef 2\arctan\left(\ka_\al\cot\frac{x}{2}\right), \end{equation} where \begin{equation}\label{eq:ka} \ka_\al\eqdef\frac{\al}{1-\al}. \end{equation} Obviously, $\eta_\al$ strictly decreases taking values from $\pi$ to $0$. Furthermore, $\eta_\al$ is strictly convex when $0<\al<1/2$ and strictly concave if $1/2<\al<1$. Other equivalent formulas for $\eta_\al$ are given in~\eqref{eq:eta1}, \eqref{eq:eta2}, and \eqref{eq:eta3}. A direct computation shows that $\eta_\al$ is an involution of the segment $[0,\pi]$, i.e., $\eta_\al(\eta_\al(x))=x$ for every $x$ in $[0,\pi]$. This property is not used in the paper. See~\cite{Z2014} for the general description of the continuous involutions of real intervals. \begin{thm}[main equation]\label{thm:weak_characteristic_equation_L} Let $n\ge3$ and $j$ be even, $1\le j\le n$. Then the number $\tht_{\al,n,j}$ is the unique solution of the following equation on $[0,\pi]$: \begin{equation}\label{eq:main_eq} x=d_{n,j}+\frac{\eta_\al(x)}{n}. \end{equation} \end{thm} Figure~\ref{fig:theta_vs_f} shows the left-hand side and the right-hand side of~\eqref{eq:main_eq}. \begin{figure}[htb] \centering \includegraphics{theta_vs_f_01.pdf}\quad\qquad \includegraphics{theta_vs_f_02.pdf} \caption{The left picture shows the left-hand side (green) and the right-hand side (blue) of~\eqref{eq:main_eq} for $\al=1/3$, $n=5$, $j=2,4$. The right picture corresponds to $\al=4/5$, $n=6$, $j=2,4,6$. \label{fig:theta_vs_f}} \end{figure} The main equation can be rewritten in the form $nx-(j-1)\pi = \eta_\al(x)$. Figure~\ref{fig:nx_plus_jpi_eq_eta} shows both sides of this equation for some values of $\al$ and $n$, $j$. For every $j$ with $1\le j\le n$, we define $I_{n,j} \eqdef \left(\frac{(j-1)\pi}{n}, \frac{j\pi}{n}\right)$. \begin{figure}[hbt] \centering \includegraphics{eta_vs_sline_1div3_n5.pdf}\quad\qquad\includegraphics{eta_vs_sline_4div5_n6.pdf} \caption{Plots of $x\mapsto nx - (j-1)\pi$ (green) and $\eta_\al$ (blue), for $\al=1/3$, $n=5$ (left) and $\al=4/5$, $n=6$ (right). \label{fig:nx_plus_jpi_eq_eta} } \end{figure} For every $n\ge3$ and every $j$ even with $1\le j\le n$, we define $h_{\al,n,j}\colon \clos(I_{n,j})\to \bR$ by \begin{equation}\label{eq:h_char} h_{\al,n,j}(x)\eqdef nx-(j-1)\pi-\eta_\al(x). \end{equation} In Proposition~\ref{prop:h_change_sign} we show that $h_{\al,n,j}$ changes its sign in $I_{n,j}$. Hence, it is feasible to solve~\eqref{eq:main_eq} by the bisection method or false rule method. In Proposition~\ref{prop:dependence_on_the_parameter} we study the dependence of $\la_{\al,n,j}$ on the parameter $\al$ (if $n$ and $j$ are fixed). Proposition~\ref{prop:Z_contractive_weak} states that if $n$ is large enough, then the functions $x\mapsto d_{n,j}+\eta_\al(x)/n$ are contractive and the fixed-point method yields the solution of~\eqref{eq:main_eq}. Moreover, surprisingly for us, Newton's method applied to the equation $h_{\al,n,j}(x) = 0$ converges for \emph{all} $n\ge3$. \begin{thm}[convergence of Newton's method] \label{thm:Newton} Let $n\ge3$, $j$ be even, $1\le j\le n$ and $y_{\al,n,j}^{(0)} \in\clos(I_{n,j})$. Define the sequence $(y_{\al,n,j}^{(m)})_{m=0}^\infty$ by the recursive formula \begin{equation}\label{eq:Newton_sequence_eigval} y_{\al,n,j}^{(m)} \eqdef y_{\al,n,j}^{(m-1)} - \frac{h_{\al,n,j}\left(y_{\al,n,j}^{(m-1)}\right)}{h_{\al,n,j}'\left(y_{\al,n,j}^{(m-1)}\right)}\quad (m\ge1). \end{equation} Then $(y_{\al,n,j}^{(m)})_{m=0}^\infty$ converges to $\tht_{\al,n,j}$. If $n>\sqrt{\pi\cK_2(\al)/2}$, then for every $m$ \begin{equation} \label{eq:sol_newton_convergence_eigval} \left|y_{\al,n,j}^{(m)} - \tht_{\al,n,j}\right| \le \frac{\pi}{n}\left(\frac{\pi \cK_2(\al)}{2n^2}\right)^{2^m-1}. \end{equation} \end{thm} We define $\Lambda_{\al,n}\colon [0,\pi]\to\bR$ by \begin{equation}\label{eq:La} \Lambda_{\al,n}(x) \eqdef g(x) +\frac{g'(x) \eta_\al(x)}{n} +\frac{g'(x) \eta_\al(x)\eta_\al'(x) +\frac{1}{2}g''(x) \eta_\al(x)^2}{n^2}. \end{equation} For $j$ even, $1\le j\le n$, we define $\laasympt_{\al,n,j}$ by \begin{equation}\label{eq:laasympt_w} \laasympt_{\al,n,j} \eqdef \Lambda_{\al,n}\left(d_{n,j}\right). \end{equation} \begin{thm}[asymptotic expansion of the eigenvalues] \label{thm:weak_asympt_weak} There exists $C_1(\al)>0$ such that for $n$ large enough and $j$ even, $1\le j\le n$, \begin{equation}\label{eq:weak_lambda_asympt_K} \left|\la_{\al,n,j}-\laasympt_{\al,n,j}\right| \le\frac{C_1(\al)}{n^3}. \end{equation} \end{thm} The asymptotic expansion~\eqref{eq:weak_lambda_asympt_K} can be written as $\la_{\al,n,j}=\Lambda_{\al,n}(d_{n,j})+O_\al(1/n^3)$, where the constant $C_1(\al)$ in the upper bound of $O_\al(1/n^3)$ depends on $\al$, but does not depend on $j$ or $n$. Proposition~\ref{prop:weak_asympt_2} gives an alternative asymptotic expansion for $\la_{\al,n,j}$, with the points $j\pi/(n+1)$ instead of $d_{n,j}$. Proposition~\ref{prop:first_eigenvalues} contains an asymptotic expansion of $\la_{\al,n,j}$ for small values of $j$, as $j/n$ tends to $0$. Notice that $\la_{\al,n,2}$ is the first non-zero eigenvalue of $L_{\al,n}$ and is known as the ``spectral gap'' of this matrix. In the upcoming theorem we show an explicit formula~\eqref{eq:eivec_w} for the eigenvectors of $L_{\al,n}$ and asymptotic formulas for their norms; in these results we extend the domain of $\al$ to the strip $0<\Re(\al) <1$ of the complex plane, see~\eqref{eq:lapl_matrix_complex}. In the complex case we define $\ka_\al$ as $\Re(\al)/(1-\Re(\al))$. Formula~\eqref{eq:eivec_w} is a particular case of~\cite[Theorem 3.1]{YuehCheng2008}. For every $x$ in $[0,\pi]$, we define \begin{equation}\label{eq:nu_al} \nu_{\al}(x) \eqdef \frac{1-\Re(\al)}{2}g(x) - \frac{\Re(\al)}{2} g(\eta_\al(x)) + \frac{\Re(\al)-|\al|^2}{2}g(x-\eta_\al(x)) + 2|\al|^2. \end{equation} \begin{thm}[eigenvectors and their norms]\label{thm:norm_eigvec} Let $\al\in\bC$, $0<\Re(\al)<1$. Then the vector $[1,\ldots,1]^\top$ is an eigenvector of the matrix $L_{\al,n}$ associated to the eigenvalue $\la_{\al,n,1}=0$. For every $j$, $2\le j\le n$, and every $k$, $1\le k\le n$, we define \begin{equation}\label{eq:eivec_w} v_{\al,n,j,k}\eqdef \sin(k \tht_{\al,n,j}) -(1-\overline{\al}) \sin((k-1)\tht_{\al,n,j}) + \overline{\al} \sin((n-k)\tht_{\al,n,j}). \end{equation} Then the vector $v_{\al,n,j}=[v_{\al,n,j,k}]_{k=1}^n$ with components~\eqref{eq:eivec_w} is an eigenvector of $L_{\al,n}$ associated to $\la_{\al,n,j}$. Moreover, if $j$ is odd, then \begin{equation}\label{eq:norm_eigvec_j_odd} \|v_{\al,n,j}\|_2 = |1-\al|\sqrt{\frac{n}{2} \la_{\al,n,j}}. \end{equation} If $j$ is even, then \begin{equation}\label{eq:norm_eigvec_j_even} \|v_{\al,n,j}\|_2 = \sqrt{n\nu_\al\left(\tht_{\al,n,j}\right)} + O_\al\left(\frac{1}{\sqrt{n}}\right) , \end{equation} with $O_\al\left(\frac{1}{\sqrt{n}}\right)$ uniformly on $j$. \end{thm} \section{Tridiagonal Toeplitz matrices with corner perturbations}\label{sec:trid_toep_corner_per} Let $\de$, $\eps$, $\si$, $\tau$ be arbitrary complex parameters and $n\ge3$. In this section, we consider the $n\times n$ matrix $A_n$, obtained from the tridiagonal Toeplitz matrix with diagonals $-1$, $2$, $-1$, substituting the components $(1,1)$, $(1,n)$, $(n,1)$, and $(n,n)$ by $2-\de$, $-\eps$, $-\si$, and $2-\tau$, respectively. For example, \begin{equation}\label{eq:Toeplitz_corner_pert} A_6\eqdef\begin{bmatrix*}[r] 2-\de & -1 & 0 & 0 & 0 & -\eps\phantom{\de} \\ -1\phantom{\de} & 2 & -1 & 0 & 0 & 0\phantom{\de} \\ 0\phantom{\de} & -1 & 2 & -1 & 0 & 0\phantom{\de} \\ 0\phantom{\de} & 0 & -1 & 2 & -1 & 0\phantom{\de} \\ 0\phantom{\de} & 0 & 0 & -1 & 2 & -1\phantom{\de} \\ -\si\phantom{\de} & 0 & 0 & 0 & -1 & 2-\tau \end{bmatrix*}. \end{equation} The study of more general tridiagonal symmetric Toeplitz matrices (with diagonals $a_1$, $a_0$, $a_1$ instead of $-1$,$2$,$-1$) with corner perturbations can be easily reduced to this case. We are going to give formulas for the characteristic polynomial and eigenvectors of $A_n$. The results are not essentially new (see~\cite{Ferguson1980,FF2009,YuehCheng2008}), but we present them in a different form (with Chebyshev polynomials) and with other proofs. We put $D_n(\la)\eqdef\det(\la I_n-A)$ and denote by $T_n$ and $U_n$ the Chebyshev polynomials of degree $n$ of the first and second kind, respectively. The next proposition is a particular case of~\cite[Corollary 2.4]{FF2009}; it is also easy to prove directly expanding by cofactors. \begin{prop}[the characteristic polynomial of $A_n$] \label{prop:char_pol_trid_toep_pert} \begin{equation}\label{eq:char_pol_trid_toep_pert} \begin{aligned} D_n(\la) & = U_n\left(\frac{\la-2}{2}\right) + (\de+ \tau) U_{n-1}\left(\frac{\la-2}{2}\right) \\&\pheq +(\de\tau - \eps \si) U_{n-2} \left( \frac{\la-2}{2}\right) + (-1)^{n+1}(\eps + \si). \end{aligned} \end{equation} \end{prop} \begin{cor} If $\eps = \de$ and $\si = \tau = -\de$, then $ D_n(\la) = U_n\left((\la-2)/2\right)$. Therefore, the eigenvalues of $A_n$ are $g(j\pi/(n+1))$ with $j$ in $\{1,\ldots,n\}$. The same situation holds for $\si = \de$ and $\eps = \tau = -\de$. \end{cor} If $\la$ is an eigenvalue of $A_n$, we will search for an associated eigenvector $v = [v_k]_{k=1}^n$ as a linear combination of two geometric progressions: \begin{equation}\label{eq:v} v_k = G_1 z^k + G_2 z^{-k} \qquad (1\le k\le n), \end{equation} where $z$ is a solution of the quadratic equation $z^2+(\la-2)z +1 =0$. Equivalently, $\la$ and $z$ are related by \begin{equation}\label{eq:la_z} -z^{-1} + (2-\la) - z = 0. \end{equation} Let $w\eqdef(\la I_n - A_n)v$. Formulas~\eqref{eq:v} and~\eqref{eq:la_z} easily imply that $w_k = 0$ for $2\le k\le n-1$, and our goal is to find coefficients $G_1$ and $G_2$ such that $w_1=0$ and $w_n=0$. To take advantage of the symmetry between $z$ and $z^{-1}$, we rewrite~\eqref{eq:v} in terms of Chebyshev polynomials: \begin{equation*} \begin{aligned} v_k &= \left(\frac{G_1+G_2}{2}\right) (z^k + z^{-k}) + \left(\frac{G_1-G_2}{2}\right) (z^k - z^{-k})\\ &= (G_1+G_2) T_k\left(\frac{z+z^{-1}}{2}\right) + \frac{(G_1-G_2)(z-z^{-1})}{2} U_{k-1}\left(\frac{z+z^{-1}}{2}\right). \end{aligned} \end{equation*} The system $w_1=0$ and $w_n=0$ is equivalent to \begin{equation}\label{eq:system_x_y} \begin{aligned} \rmA_{\de,\eps,n} x + \rmB_{\de,\eps,n} y & = 0\\ \rmC_{\si,\tau,n} x + \rmD_{\si,\tau,n} y & = 0, \end{aligned} \end{equation} where $x \eqdef (G_1+G_2)/2$, $y\eqdef (G_1-G_2)/2$, and \begin{equation}\label{eq:ABCD} \begin{aligned} \rmA_{\de,\eps,n} & \eqdef 2\left(-1 + \de T_1\left(\frac{z+z^{-1}}{2}\right) + \eps T_n\left(\frac{z+z^{-1}}{2}\right) \right), \\ \rmB_{\de,\eps,n} & \eqdef (z-z^{-1}) \left( \de + \eps U_{n-1}\left(\frac{z+z^{-1}}{2}\right) \right), \\ \rmC_{\si,\tau,n} & \eqdef 2\left(\si T_1\left(\frac{z+z^{-1}}{2}\right)+ \tau T_n\left(\frac{z+z^{-1}}{2}\right) - T_{n+1}\left(\frac{z+z^{-1}}{2}\right) \right), \\ \rmD_{\si,\tau,n} & \eqdef (z- z^{-1})\left( \si + \tau U_{n-1}\left(\frac{z+z^{-1}}{2}\right) - U_n\left(\frac{z+z^{-1}}{2}\right) \right). \end{aligned} \end{equation} In the next proposition we use the convention that $U_{-1}(t) \eqdef 0$. \begin{prop}[eigenvectors of $A_n$]\label{prop:eigvec_tri_Toep_corner_per} Let $\la\in\bC\setminus\{0,4\}$ be an eigenvalue of $A_n$. If $\rmA_{\de,\eps,n}\neq0$ or $\rmB_{\de,\eps,n}\neq0$, then the vector $v = [v_k]_{k=1}^n$ with components \begin{equation}\label{eq:eigvec_AB_neq_0} v_k \eqdef (-1)^{k-1}\left(U_{k-1}\left(\frac{\la-2}{2}\right) + \de U_{k-2}\left(\frac{\la-2}{2}\right) + (-1)^n \eps U_{n-k-1}\left(\frac{\la-2}{2}\right)\right) \end{equation} is an eigenvector of $A_n$ associated to $\la$. If $\rmC_{\si,\tau,n}\neq0$ or $\rmD_{\si,\tau,n}\neq0$, then the vector $v = [v_k]_{k=1}^n$ with components \begin{equation}\label{eq:eigvec_CD_neq_0} v_k \eqdef (-1)^{k-1}\left(\si U_{k-2}\left(\frac{\la-2}{2}\right) + (-1)^n \tau U_{n-k-1}\left(\frac{\la-2}{2}\right) + (-1)^n U_{n-k}\left(\frac{\la-2}{2}\right)\right) \end{equation} is an eigenvector of $A_n$ associated to $\la$. \end{prop} \begin{proof} The assumptions $\la\notin\{0,4\}$ and $z+z^{-1} = 2-\la$ imply that $z\notin\{-1,1\}$ and \begin{equation}\label{eq:Cheby_pol_z} T_n\left(\frac{\la-2}{2}\right) = (-1)^n \frac{z^n+z^{-n}}{2}, \qquad U_n\left(\frac{\la-2}{2}\right) = (-1)^{n}\frac{z^{n+1} - z^{-(n+1)}}{z-z^{-1}}. \end{equation} A direct computation shows that \begin{equation} \rmA_{\de,\eps,n} \rmD_{\si,\tau,n} - \rmB_{\de,\eps,n} \rmC_{\si,\tau,n} = 2 (-1)^n (z-z^{-1}) D_n(\la). \end{equation} Since $\la$ is an eigenvalue of $A_n$, we get $D_n(\la) = 0$, and the linear homogeneous system~\eqref{eq:system_x_y} has non-trivial solutions $(x,y)$. Namely, if $\rmA_{\de,\eps}\neq0$ or $\rmB_{\de,\eps}\neq0$, we put \[ x = \frac{\rmB_{\de,\eps}}{2(z-z^{-1})}, \qquad y = -\frac{\rmA_{\de,\eps}}{2(z-z^{-1})} . \] Using~\eqref{eq:Cheby_pol_z} we simplify $G_1$ and $G_2$ to \begin{align*} G_1 &=x+y = \frac{\rmB_{\de,\eps,n}-\rmA_{\de,\eps,n}}{2(z-z^{-1})} = \frac{1 - \de z^{-1} - \eps z^{-n}}{z-z^{-1}},\\ G_2 &=x-y = \frac{\rmB_{\de,\eps,n}+\rmA_{\de,\eps,n}}{2} = \frac{-1 + \de z + \eps z^n}{z-z^{-1}}. \end{align*} Hence, for every $k$, formula~\eqref{eq:v} converts in \begin{equation}\label{eq:v_comp_temp} v_k = \frac{z^k - z^{-k}}{z-z^{-1}} - \de\, \frac{z^{k-1} - z^{-(k-1)}}{z-z^{-1}} + \eps\, \frac{z^{n-k} - z^{-(n-k)}}{z-z^{-1}}, \end{equation} which by~\eqref{eq:Cheby_pol_z} simplifies to~\eqref{eq:eigvec_AB_neq_0}. The linear independence of the geometric progressions $[z^{k}]_{k=1}^n$ and $[z^{-k}]_{k=1}^{n}$ assures that $v$ is a non-zero vector. The proof of~\eqref{eq:eigvec_CD_neq_0} is similar. \end{proof} Proposition~\ref{prop:eigvec_tri_Toep_corner_per} does not cover the situation when \begin{equation}\label{eq:abcd=0} \rmA_{\de,\eps,n} = \rmB_{\de,\eps,n} = \rmC_{\si,\tau,n} = \rmD_{\si,\tau,n}=0. \end{equation} We analyze this situation in the following remarks. \begin{remark} If $\la=0$, i.e., $z=1$, then~\eqref{eq:abcd=0} is equivalent to $\de+\eps = 1$ and $\si+\tau = 1$. The last two equalities imply that $A_n$ is a laplacian complex matrix and $v=[1]_{k=1}^n$ is an eigenvector associated to $\la$. \end{remark} \begin{remark} If $\la = 4$, i.e., $z=-1$, then~\eqref{eq:abcd=0} is equivalent to $\de + (-1)^n\eps = 1$ and $(-1)^n\si - \tau = 1$. If these conditions are fulfilled, $v=[(-1)^k]_{k=1}^n$ is an eigenvector associated to $\la$. \end{remark} \begin{remark} If $\la\notin\{0,4\}$, then~\eqref{eq:abcd=0} is equivalent to \[ \de = \tau = \frac{U_{n-1}\left(\frac{z+z^{-1}}{2}\right)}{U_{n-2}\left(\frac{z+z^{-1}}{2}\right)}, \qquad \eps = \si = -\frac{1}{U_{n-2}\left(\frac{z+z^{-1}}{2}\right)}. \] In this case, every vector with components of the form~\eqref{eq:v} belongs to $\ker(\la I_n - A_n)$, and $\la$ is an eigenvalue of multiplicity at least $2$. \end{remark} \begin{remark} We have tested most formulas of this section in Sagemath using symbolic computations with polynomials over the variables $\de,\eps,\si,\tau,\la$, for every $n$ with $3\le n\le 20$. In particular, we have verified that if $v$ is given by~\eqref{eq:eigvec_AB_neq_0} and $w = (\la I_n -A_n)v$, then $w_n = (-1)^{n+1} D_n(\la)$. Analogously, if $v$ is given by~\eqref{eq:eigvec_CD_neq_0}, then $ w_1 = (-1)^{n} D_n(\la)$. \end{remark} \section{Eigenvalues' localization} \label{sec:localization_eigenvalues} In the incoming proposition, unlike the main part of the paper, we suppose that $\al$ is a complex parameter. We define $D_{\al,n}(\la)$ as the characteristic polynomial $\det (\la I_n - L_{\al,n})$, where $L_{\al,n}$ is the $n\times n$ complex laplacian matrix of the form~\eqref{eq:lapl_matrix_complex}. \begin{prop}[characteristic polynomial of complex laplacian matrices] \label{prop:L_char_pol_via_Cheb} For $n\ge 3$, \begin{equation}\label{eq:L_char_pol_via_Cheb} D_{\al,n}(\la) = (\la-2\Re(\al)) U_{n-1}\left(\frac{\la-2}{2}\right) -2\Re(\al) U_{n-2}\left(\frac{\la-2}{2}\right)+2(-1)^{n+1}\Re(\al). \end{equation} \end{prop} \begin{proof} This is a corollary of Proposition~\ref{prop:char_pol_trid_toep_pert}. \end{proof} Formula~\eqref{eq:L_char_pol_via_Cheb} implies a little miracle: $ D_{\al,n} = D_{\Re(\al),n}$ for every complex $\al$. Therefore, the eigenvalues of $L_{\al,n}$ are the same as the ones of the matrix $L_{\Re(\al),n}$. Since the latter matrix is hermitian, the eigenvalues are real. Hence, from now on we will suppose $\al$ to be a real number. It turns out that $D_{\al,n}(\la)$ factorizes into a product of two polynomials of nearly the same degree. To join the cases when $n$ is even and $n$ is odd, we use the change of variables $\la =4-t^2$. \begin{prop}\label{prop:pol_char_factorized} For $n\ge 3$, \begin{equation}\label{eq:char_pol_fact} D_{\al,n}(4-t^2) = 2(-1)^n \frac{p_{n}(t) q_{\al,n}(t)}{t}, \end{equation} where \[ p_{n}(t) = (t^2-4) U_{n-1}\left(\frac{t}{2}\right), \qquad q_{\al,n}(t) = (1-\al)T_n\left(\frac{t}{2}\right) +\al \frac{t}{2} U_{n-1}\left(\frac{t}{2}\right). \] \end{prop} \begin{proof} We will give a proof only for the case $n=2m$. The case $n=2m+1$ is similar. First, put $\la=2\om+2$, hence $t^2 = 2 - 2\om$. We apply the following elementary relations for Chebyshev polynomials: \begin{align*} U_{2m-2}(\om) &= -U_{2m}(\om) + 2\om U_{2m-1}(\om), \\ U_{2m-1}(\om) &=2U_{m-1}(\om) T_{m}(\om) , \\ U_{2m}(\om) &= 2\om U_{m-1}(\om)T_{m}(\om) +2T_m^2(\om) -1, \\ T_m^2(\om) - 1 &= (\om^2-1) U_{m-1}^2(\om), \\ T_{2m}\left(\frac{t}{2}\right) & = T_m\left(\frac{t^2-2}{2}\right),\quad U_{2m+1}\left(\frac{t}{2}\right) = t U_m\left(\frac{t^2-2}{2}\right). \end{align*} Thereby we obtain the next chain of equalities: \begin{align*} D_{\al,2m}(2\om+2) & = 2\Bigl(\al U_{2m}(\om) + (\om+1-\al-2\al \om)U_{2m-1}(\om) - \al\Bigr) \\ & = 4\Bigl( (\om+1)(1 - \al) U_{m-1}(\om) T_m(\om) + \al (T_m^2(\om)-1)\Bigr) \\ & = 4 (\om +1) U_{m-1}\left(\om\right) \Bigl((1-\al)T_{m}\left(\om\right) - \al (1-\om) U_{m-1 }\left(\om\right)\Bigr), \end{align*} and we arrive at~\eqref{eq:char_pol_fact}. \end{proof} The factorization~\eqref{eq:char_pol_fact} after the change of variable $t = 2\cos(x/2)$ reads as \begin{equation}\label{eq:char_pol_fact_trig} D_{\al,n}(g(x)) = D_{\al,n}(4-(2\cos(x/2))^2) = (-1)^n \frac{p_{n}(2\cos(x/2))q_{\al,n}(2\cos(x/2))}{\cos(x/2)}, \end{equation} where \[ p_n(2\cos(x/2)) = -4\sin\frac{x}{2} \sin\frac{nx}{2}, \qquad q_{\al,n}(2\cos(x/2)) = (1-\al) \cos\frac{nx}{2} + \al\cos\frac{x}{2} \frac{\sin\frac{nx}{2}}{\sin\frac{x}{2}}, \] or \begin{equation} \label{eq:charpol_factorization_trig} D_{\al,n}(g(x)) = (-1)^{n+1} \frac{4\sin\frac{x}{2}\sin\frac{nx}{2}}{\cos\frac{x}{2}} \left( (1-\al)\cos\frac{nx}{2} + \al \cos\frac{x}{2} \frac{\sin\frac{nx}{2}}{\sin\frac{x}{2}}\right). \end{equation} The polynomial $p_{n}$ does not depend on $\al$, and its zeros are easy to find. \begin{prop}[trivial eigenvalues of $L_{\al,n}$]\label{prop:trivial_eigvals} For every $n\ge3$ and every even $k$ with $0\le k\le n-1$, the number $g(k\pi/n)$ is an eigenvalue of $L_{\al,n}$. \end{prop} \begin{proof} The number $t = 2\cos(k\pi/(2n))$, with $k$ as in the hypothesis, is a zero of $p_n$. It corresponds to the eigenvalue $\la = 4-t^2 = g(k\pi/n)$, since $g(x) = 4-(2\cos(x/2))^2$. \end{proof} We already have an explicit formula for $\lfloor (n+1)/2 \rfloor$ eigenvalues of $L_{\al,n}$. The remaining ones correspond to the zeros of the polynomial $q_{\al,n}$. To analyze their localization, we first compute the values of $q_{\al,n}$ at the points $2\cos(j\pi/(2n))$ which correspond to the uniform mesh $j\pi/n$, $j=0,\ldots, n$. The next lemma is easily proven by direct computations. \begin{lem}\label{lem:q_n_eval_jpi/n} For every $j$ with $1\le j\le n-1$, \begin{equation*} q_{\al,n}\left(2\cos\frac{j\pi}{2n}\right) = \begin{cases} (1-\al) (-1)^{\frac{j}{2}}, & \quad \text{if } j \text{ is even}, \\ \al\cot\frac{j\pi}{2n} (-1)^{\frac{j-1}{2}}, & \quad \text{if } j \text{ is odd}. \end{cases} \end{equation*} Moreover, \begin{equation*} q_{\al,n}(0) = \begin{cases} 0, & \quad \text{if}\ n\ \text{is odd}, \\ (-1)^{\frac{n}{2}} (1-\al), &\quad \text{if}\ n\ \text{is even}, \end{cases} \qquad q_{\al,n}(2) = (1-\al) + \al n. \end{equation*} \end{lem} We observe that if $n$ is even, then $p_n(0)=0$, and if $n$ is odd, then $q_{\al,n}(0) = 0$. However, $t=0$ may not be a zero of $D_{\al,n}(4-t^2)$ because of the factor $1/t$ in~\eqref{eq:char_pol_fact}. This leads us to the next elementary lemma. \begin{lem}\label{lem:limits_pq} If $n$ is odd, then \begin{equation*} \lim_{t\to 0^+} \frac{2q_{\al,n}(t)}{t} = (-1)^{\frac{n-1}{2}} \Bigl( \al + (1-\al)n \Bigr) , \end{equation*} and if $n$ is even, then \begin{equation*} \lim_{t\to 0^+} \frac{2p_{n}(t)}{t} = 4(-1)^{\frac{n}{2}} n. \end{equation*} \end{lem} \begin{proof}[Proof of Theorem~\ref{thm:Localization_weak_eigenvals}] Let $1\le j\le n$. If $j$ is odd, then~\eqref{eq:localization_eigvals_weak_odd} follows by Proposition~\ref{prop:trivial_eigvals}. We consider the quotient $q_{\al,n}(2\cos(x/2))/(2\cos(x/2))$ from factorization~\eqref{eq:char_pol_fact_trig}. Lemmas~\ref{lem:q_n_eval_jpi/n} and~\ref{lem:limits_pq} imply that this expression changes its sign in the intervals $I_{n,j}$, where $j$ is even. By the intermediate value theorem, we have~\eqref{eq:localization_eigvals_weak_even}. \end{proof} Theorem~\ref{thm:Localization_weak_eigenvals} implies immediately that for every $0<\al<1$ and for every $y$ in $\bR$, \begin{equation*} \lim_{n\to\infty} \frac{\#\{j\in\{1,\ldots,n\}\colon \ \la_{\al,n,j}\le y \}}{n} = \frac{\mu\left(\{x\in[0,\pi]\colon g(x)\le y \}\right)}{\pi}, \end{equation*} i.e., the eigenvalues of $L_{\al,n}$ are asymptotically distributed as the function $g$ on $[0,\pi]$. \section{Main equation} \label{sec:main_equation} In this section we reduce the computation of the non-trivial eigenvalues to the solution of the ``main equation''~\eqref{eq:main_eq}. We recall it here: \[ x = d_{n,j}+\frac{\eta_\al(x)}{n}. \] \begin{proof}[Proof of Theorem~\ref{thm:weak_characteristic_equation_L}] Recall that $j$ is even. In the proof of Theorem~\ref{thm:Localization_weak_eigenvals} we have seen that $\tht_{\al,n,j}$ belongs to $I_{n,j}$ and is the unique solution of the equation $q_{\al,n}(2\cos(x/2)) = 0$. This is equivalent to the following one (see also~\eqref{eq:char_pol_fact_trig}): \begin{equation}\label{eq:tangent_equality_L} \tan\frac{nx}{2} = -\frac{1-\al}{\al}\tan\frac{x}{2}. \end{equation} Applying $\arctan$ to both sides of~\eqref{eq:tangent_equality_L} we transform it to \[ nx = j\pi - 2\arctan\left(\frac{1-\al}{\al}\tan\frac{x}{2}\right). \] Finally, since $\pi/2-\arctan(u) = \arctan(1/u)$, we obtain~\eqref{eq:main_eq}. \end{proof} Figure~\ref{fig:tangents_weak_L} shows the plots of both sides of~\eqref{eq:tangent_equality_L} for some $\al$ in $(0,1)$. We see that the intersections really take place in the intervals given in Theorem~\ref{thm:Localization_weak_eigenvals}. \begin{figure}[htb] \centering \includegraphics{tangents.pdf} \caption{The left-hand side (green) and right-hand side (blue) of~\eqref{eq:tangent_equality_L} for $\al=0.7$ and $n=8$; the scales of the axes are different. \label{fig:tangents_weak_L} } \end{figure} Recall that $h_{\al,n,j}$ is defined by~\eqref{eq:h_char}. Obviously,~\eqref{eq:main_eq} is equivalent to $h_{\al,n,j}(x)=0$. \begin{prop}\label{prop:h_change_sign} Let $n\ge3$ and $j$ be even with $1\le j\le n$. Then $h_{\al,n,j}$ changes its sign exactly once in $I_{n,j}$. \end{prop} \begin{proof} Indeed, \[ h_{\al,n,j}((j-1)\pi/n) = - \eta_\al((j-1)\pi/n)<0, \] \[ h_{\al,n,j}(j\pi/n) = \pi-\eta_\al(j\pi/n)>0, \] and $h_{\al,n,j}$ is strictly increasing. \end{proof} \begin{remark}\label{rem:case_al=1/2} If $\al = 1/2$, then $\ka_{\frac{1}{2}} = 1$ and $\eta_{\al}(x) = \pi -x$. In this case equation~\eqref{eq:main_eq} yields explicit formulas for the eigenvalues $\la_{\al,n,j}$ with even values of $j$: \[ \tht_{\al,n,j} = \frac{j\pi}{n+1},\qquad \la_{\al,n,j} = g\left(\frac{j\pi}{n+1}\right). \] \end{remark} In the following proposition, unlike in the other parts of this paper, we fix $n$ and $j$ and treat $\al$ as a variable running through the closed interval $[0,1]$. Formally, we define $\Psi_{n,j}\colon[0,1]\to[0,4]$ by \[ \Psi_{n,j}(\al)\eqdef\la_{\al,n,j}. \] \begin{prop}[dependence of the eigenvalues on the parameter $\al$] \label{prop:dependence_on_the_parameter} Let $n\ge 3$ and $j$ be even, with $1\le j\le n$. Then $\Psi_{n,j}$ is continuous and strictly increasing on $[0,1]$. In particular, \begin{align} \label{eq:lim_lambda_at_alpha_zero} \lim_{\al\to0^+} \la_{\al,n,j} &=\la_{0,n,j} =g\left(\frac{(j-1)\pi}{n}\right), \\[1ex] \label{eq:lim_lambda_at_alpha_one} \lim_{\al\to1^-} \la_{\al,n,j} &=\la_{1,n,j} =g\left(\frac{j\pi}{n}\right). \end{align} \end{prop} \begin{proof} It is well known that the functions $A\mapsto \la_j(A)$ are Lipschitz continuous on the space of the hermitian matrices provided with the operator norm, see~\cite[Weyl's Theorem~4.3.1 and Problem~4.3.P1]{HornJohnson2013}. As a consequence, $\Psi_{n,j}$ is continuous on $[0,1]$. To analyze the monotonicity, we will apply to the main equation some ideas from the implicity function theorem. Define $\Tht_{n,j}\colon(0,\pi)\to\bR$ and $H_{n,j}\colon (0,1)\times (0,\pi)\to\bR$ by \[ \Tht_{n,j}(\al)\eqdef\tht_{\al,n,j},\qquad H_{n,j}(\al,x) \eqdef h_{\al,n,j}(x) = nx - (j-1)\pi - \eta_\al(x). \] Compute the partial derivatives of $H_{n,j}$ with respect to the first and second argument: \[ (D_1 H_{n,j})(\al,x) = -\frac{2\tan\frac{x}{2}}{\al^2+(1-\al)^2\tan^2\frac{x}{2}} <0,\qquad (D_2 H_{n,j})(\al,x) =n-\eta_\al'(x)>n.\] Since $H_{n,j}(\al,\Tht_{n,j}(\al))=0$, we conclude that $\Tht_{n,j}$ is differentiable on $(0,1)$, and \[ \Tht_{n,j}'(\al) =-\frac{(D_1 H_{n,j})(\al,\Tht_{n,j}(\al))}{(D_2 H_{n,j})(\al,\Tht_{n,j}(\al))}>0. \] Hence, the functions $\Tht_{n,j}$ and $\Psi_{n,j}=g\circ\Tht_{n,j}$ are strictly increasing on $(0,1)$. Now the continuity of $\Psi_{n,j}$ implies that this function is strictly increasing on $[0,1]$. \end{proof} Figure~\ref{fig:theta_vs_lambda} shows the eigenvalues $\la_{\al,n,j} = g(\tht_{\al,n,j})$ for $\al=1/3$ and $\al=4/5$, with $n=10$. One can observe the localization of $\tht_{\al,n,j}$ in $I_{n,j}$ for even values of $j$ and the monotone dependence on $\al$. \begin{figure}[htb] \centering \includegraphics{theta_vs_lambda_red_lines.pdf}\qquad\quad\includegraphics{theta_vs_lambda_red_lines02.pdf} \caption{The values $\tht_{\al,n,j}$ and $\la_{\al,n,j}$ for $\al=1/3$, $n=10$ (left) and $\al=4/5$, $n=6$ (right); the red marks on the horizontal axis correspond to $k\pi/10$, $1\le k\le 9$. \label{fig:theta_vs_lambda} } \end{figure} \section{Solving the main equation by the fixed-point method} \label{sec:fixed_point} We recall that $\eta_{\al,n}$ and $\ka_\al$ are defined by~\eqref{eq:eta} and~\eqref{eq:ka}, respectively, and that $\eta_\al$ does not depend of $n$. Here are other equivalent formulas for $\eta_\al$: \begin{align} \label{eq:eta1} \eta_\al(x) &=\pi-2\arctan\left(\frac{1-\al}{\al}\tan\frac{x}{2}\right), \\ \label{eq:eta2} \eta_\al(x) &=2\arcsin\frac{\ka_\al \cos\frac{x}{2}}{\sqrt{\sin^2\frac{x}{2}+\ka_\al^2\cos^2\frac{x}{2}}}, \\ \label{eq:eta3} \eta_\al(x) &=2\arcsin\frac{\sqrt{2}\,\al\cos\frac{x}{2}}{\sqrt{(2\al^2-2\al+1)+(2\al-1)\cos(x)}}. \end{align} We notice that~\eqref{eq:eta} is more convenient to use if $x$ is close to $\pi$, while~\eqref{eq:eta1} is better for $x$ close to $0$. The first two derivatives of $\eta_{\al,n}$ are \begin{align} \label{eq:etader} \eta_\al'(x) &= - \frac{\ka_\al(1+\tan^2\frac{x}{2})}{\ka_\al^2+\tan^2\frac{x}{2}}, \\ \label{eq:eta_der_v2} \eta_\al'(x) &= - \frac{\ka_\al(1+\cot^2\frac{x}{2})}{1+\ka_\al^2\cot^2\frac{x}{2}}, \\ \label{eq:eta_der_2} \eta_\al''(x) & = \frac{(\ka_\al^2-1)\tan\frac{x}{2}}{\ka_\al^2+\tan^2\frac{x}{2}}\,\eta_\al'(x). \end{align} The incoming proposition gives some upper bounds for $\eta_\al'$ and $\eta_\al''$ for every $\al$ in $(0,1)$, involving the following numbers: \begin{equation}\label{eq:K} \cK_1(\al) \eqdef \max\left\{\ka_\al,\frac{1}{\ka_\al}\right\},\qquad \cK_2(\al) \eqdef \frac{|\ka_\al^2-1|}{2\ka_\al} \cK_1(\al)=\frac{\cK_1^2(\al)-1}{2}. \end{equation} \begin{prop}\label{prop:eta_bound} Each derivative of $\eta_\al$ is a bounded function on $(0,\pi)$. In particular, \begin{align}\label{eq:eta_der_bounded} \sup_{0<x<\pi}|\eta_\al'(x)| &= \cK_1(\al), \\ \label{eq:eta_der2_bounded} \sup_{0<x<\pi}|\eta_\al''(x)| & \le \cK_2(\al). \end{align} \end{prop} \begin{proof} In order to prove~\eqref{eq:eta_der_bounded}, we rewrite~\eqref{eq:etader} as follows: \begin{equation}\label{eq:eta_der_version_2} \eta_\al'(x)=- \ka_\al\left( 1 + \frac{1-\ka_\al^2}{\ka_\al^2+\tan^2\frac{x}{2}} \right) \qquad (x\in (0,\pi)). \end{equation} We notice that $\tan^2(x/2)$ increases from $0$ to $\infty$ as $x$ goes from $0$ to $\pi$. If $0< \al \le1/2$, then $\ka_\al\le 1$, and $\eta_\al'$ increases taking values from $\eta_\al'(0) = -\ka_\al^{-1}$ to $\eta_\al'(\pi) = -\ka_\al$. If $1/2<\al<1$, then $\eta'_\al$ decreases. In both cases, the maximal value of $|\eta_\al'|$ is reached at one of the points $0$ or $\pi$. This proves~\eqref{eq:eta_der_bounded}. For the second derivative of $\eta_\al''$, from~\eqref{eq:eta_der_2} we get \[ |\eta_\al''(x)| = \frac{\tan\frac{x}{2}}{\ka_\al^2+\tan^2\frac{x}{2}} |\ka_\al^2-1| |\eta_\al'(x)| \le \frac{|\ka_\al^2-1|}{2|\ka_\al|}\cK_1(\al) \qquad (x\in (0,\pi)). \] This is exactly~\eqref{eq:eta_der2_bounded}. For the higher derivatives of $\eta_{\al,j}$, the explicit estimates are too tedious, and we propose the following argument. By~\eqref{eq:etader}, $\eta_{\al}'$ is analytic in a neighborhood of $x$, for any $x$ in $(0,\pi)$. Even more, $\eta_{\al}'$ has an analytic extension in some neighborhoods of the points $0$ and $\pi$. Hence, $\eta_{\al,j}'$ has an analytic extension to a certain open set in the complex plane containing the segment $[0,\pi]$. Therefore, each derivative of this function is bounded on $(0,\pi)$. \end{proof} For every $j$, $1\le j\le n$, we define the function $f_{\al,n,j}\colon[0,\pi]\to\bR$ by \begin{equation}\label{eq:L_f} f_{\al,n,j}(x)\eqdef d_{n,j}+\frac{\eta_\al(x)}{n}, \end{equation} i.e., $f_{\al,n,j}(x)=((j-1)\pi+\eta_\al(x))/n$. Hence~\eqref{eq:main_eq} can be written as $\tht_{\al,n,j} = f_{\al,n,j}(\tht_{\al,n,j})$. \begin{prop}\label{prop:Z_contractive_weak} Let $n> \cK_1(\al)$, and let $j$ be even, $1\le j\le n$. Then $f_{\al,n,j}$ is contractive in $\operatorname{clos}( I_{n,j})$. Its fixed point belongs to $I_{n,j}$ and coincides with $\tht_{\al,n,j}$. \end{prop} \begin{proof} Since $\eta_\al$ takes values in $[0,\pi]$, for every $x$ in $\operatorname{clos}(I_{n,j})$ we get \[ \frac{(j-1)\pi}{n}\le\frac{(j-1)\pi+\eta_{\al}(x)}{n}\le \frac{j\pi}{n}, \] i.e., $f_{\al,n,j}(x)\in\operatorname{clos}(I_{n,j})$. By Proposition~\ref{prop:eta_bound}, $\eta_\al'$ is bounded by $\cK_1(\al)$, hence \[ \left|f_{\al,n,j}'(x)\right|\le\frac{\cK_1(\al)}{n}<1. \] This implies that $f_{\al,n,j}$ is a contractive function on $\operatorname{clos}(I_{n,j})$. Then, by the Banach fixed point theorem, $f_{\al,n,j}$ has a unique fixed point, and by Theorem~\ref{thm:weak_characteristic_equation_L} it coincides with $\tht_{\al,n,j}$ and belongs to $I_{n,j}$. \end{proof} \begin{cor} Let $n> \cK_1(\al)$, $j$ be even, $1\le j\le n$, and $x_{\al,n,j}^{(0)}$ be an arbitrary point in $\operatorname{clos}(I_{n,j})$. Define the sequence $\left(x_{\al,n,j}^{(m)}\right)_{m=0}^\infty$ by \begin{equation*} x_{\al,n,j}^{(m)} \eqdef f_{\al,n,j}\left(x_{\al,n,j}^{(m-1)}\right) \qquad (m \ge 1). \end{equation*} Then \begin{equation*} \left|x_{\al,n,j}^{(m)} - \tht_{\al,n,j}\right| \le \frac{\pi}{n}\left(\frac{\cK_1(\al)}{n}\right)^m \qquad (m \ge 0). \end{equation*} \end{cor} \begin{proof} Follows from Proposition~\ref{prop:Z_contractive_weak} and Banach fixed point theorem. \end{proof} \section{Newton's method for convex functions}\label{sec:Newton_convex} In this section we recall some sufficient conditions for the convergence of Newton's method. Assume that $a,b\in\bR$ with $a<b$; $f$ is differentiable and $f'>0$ on $[a,b]$; there exists $c$ in $[a,b]$ such that $f(c)=0$; $y^{(0)}$ is a point in $[a,b]$ and the sequence $(y^{(m)})_{m=0}^\infty$ is defined (when possible) by the recurrence relation \begin{equation}\label{eq:recursive_newton} y^{(m+1)} = y^{(m)} - \frac{f\left(y^{(m)}\right)}{f'\left(y^{(m)}\right)}. \end{equation} Obviously, if $y^{(m)}=c$ for some $m$, then the sequence is constant starting from this moment. In general, the sufficient conditions for Newton's method are quite complicated (see, for example, Kantorovich theorem). Nevertheless, it is well known that Newton's method converges for convex functions, when the initial point is chosen from the ``correct'' side of the root (\cite[Section 22, Problem 14]{Spivak1994} and \cite[Theorem 2.2]{A1989}). In the following proposition we show an upper bound for the linear convergence in this case. \begin{prop}[linear convergence of Newton's method for convex functions]\label{prop:convergence_f_convex_y0>c} If $f$ is convex on $[a,b]$, $c\le y^{(0)}\le b$, then $y^{(m)}$ belongs to $[c,b]$ for every $m\ge0$, the sequence $(y^{(m)})_{m=0}^\infty$ decreases and converges to $c$, with \begin{equation}\label{eq:linear_newton} y^{(m)}-c \le (b-a)\left(1 - \frac{f'(a)}{f'(b)}\right)^m. \end{equation} \end{prop} \begin{proof} Reasoning by induction, suppose that $m\ge1$ and $b\ge y^{(m)}\ge c$. By the mean value theorem, there exists $\xi_m\in[c,y^{(m)}]$ such that $f(y^{(m)})-f(c) = f'(\xi_m)(y^{(m)}-c)$, hence \begin{equation}\label{eq:mvt_recurrence_newton} f(y^{(m)}) = (y^{(m)}-c)f'(\xi_m). \end{equation} Combining~\eqref{eq:recursive_newton} with~\eqref{eq:mvt_recurrence_newton} we obtain that \begin{equation} y^{(m+1)}-c = y^{(m)}-\frac{f(y^{(m)})}{f'(y^{(m)})}-c = \left(y^{(m)}-c\right) \left(1 - \frac{f'(\xi_m)}{f'(y^{(m)})}\right). \end{equation} Since $f'$ is positive and increasing on $[a,b]$, \begin{equation} 0\le y^{(m+1)}-c \le \left(y^{(m)}-c\right) \left(1 - \frac{f'(a)}{f'(b)}\right). \end{equation} This yields~\eqref{eq:linear_newton} and the convergence of the sequence. \end{proof} The next proposition provides a sufficient convergence condition, when starting from the ``bad'' side of the root. Then $y^{(1)}$ is on the ``good'' side of the root and Proposition~\ref{prop:convergence_f_convex_y0>c} can be applied to the sequence $(y^{(m)})_{m=1}^\infty$. \begin{prop}\label{prop:first_eval_convex_y0<c} Suppose that $f$ is convex on $[a,b]$, $a\le y^{(0)} < c$, and \begin{equation}\label{eq:wrong_side_condition} a - \frac{f(a)}{f'(a)} \le b. \end{equation} Then $y^{(1)}$ belongs to $[c,b]$. \end{prop} \begin{proof} Since $f$ is convex, its graph is above the tangent lines at the points $(a,f(a))$ and $(y^{(0)},f(y^{(0)}))$. In particular, \[ f(y^{(0)})\ge f(a)+f'(a)(y^{(0)}-a),\qquad 0 = f(c) \ge f(y^{(0)})+f'(y^{(0)})(c-y^{(0)}). \] Moreover, $f(y^{(0)})<0$ and $f'(y^{(0)})\ge f'(a)>0$. Hence, \[ c\le y^{(1)} =y^{(0)}-\frac{f(y^{(0)})}{f'(y^{(0)})} \le y^{(0)}-\frac{f(a)+f'(a)(y^{(0)}-a)}{f'(a)} = a-\frac{f(a)}{f'(a)} \le b.\qedhere \] \end{proof} The following fact is well known~\cite[Theorem 2.1]{A1989}. \begin{prop} \label{prop:Newton_convex} Let $f\in C^2([a,b])$. Suppose that $(b-a)M<1$, where \[ M \eqdef \frac{\max_{t\in [a,b]} |f''(t)|}{2\min_{t\in[a,b]}|f'(t)|}.\] Assume that $y^{(m)}$ is well defined and belongs to $[a,b]$ for every $m$. Then $y^{(m)}$ converges to $c$ as $m$ tends to $\infty$, and for every $m$ \begin{equation}\label{eq:quadratic_convergence} |y^{(m)} - c| \le \left((b-a)M\right)^{2^{m}-1} (b-a). \end{equation} \end{prop} \begin{proof}[Idea of the proof] Let $m\ge0$. By Taylor's formula, there exists $\nu\in[c, y^{(m)}]$ such that \[0=f(c)= f(y^{(m)}) + f'(y^{(m)})\left(c-y^{(m)}\right) + \frac{1}{2}f''(\nu) \left(c-y^{(m)}\right)^2.\] It follows easily that $ |y^{(m+1)} - c |\le M (c-y^{(m)})^2$. Now~\eqref{eq:quadratic_convergence} is obtained by induction. \end{proof} \begin{remark}[Newton's method for concave functions] \label{rem:Newton_convex_2} Analogs of Propositions~\ref{prop:convergence_f_convex_y0>c} and~\ref{prop:first_eval_convex_y0<c} hold if $f$ is a concave function. In this case, each of the following two conditions is sufficient for the convergence: \begin{itemize} \item $a\le y^{(0)}\le c$, \item $c< y^{(0)}\le b$ and $ b-f(b)/f'(b)\ge a$. \end{itemize} Instead of repeating the corresponding proofs with obvious modifications, one can pass to the function $x\mapsto -f(-x)$. \end{remark} \section{Solving the main equation by Newton's method}\label{sec:solve_by_Newton} Recall that $h_{\al,n,j}$ is defined by~\eqref{eq:h_char}. In this section we prove that the equation $h_{\al,n,j}(x) =0$, which is equivalent to the main equation, can be solved by Newton's method for every $n\ge3$. Remark~\ref{rem:case_al=1/2} shows that the eigenvalues can be exactly computed if $\al=1/2$, hence this case could be omitted in the next propositons. \begin{prop}[linear convergence of Newton's method applied to the main equation]\label{prop:linear_conv_eigval} For every $n\ge3$ and every $j$ be even, $1\le j\le n$ and $y_{\al,n,j}^{(0)} \in I_{n,j}$, the sequence $(y_{\al,n,j}^{(m)})_{m=0}^{\infty}$, defined by~\eqref{eq:Newton_sequence_eigval}, converges to $\tht_{\al,n,j}$. The convergence is at least linear: \begin{equation}\label{eq:sol_linear_newton} \left|y_{\al,n,j}^{(m)}-\tht_{\al,n,j}\right|\le \frac{\pi}{n}\ga_{\al,n}^{m-1}, \end{equation} where \begin{equation} \ga_{\al,n} \eqdef \frac{|2\al-1|}{\al(1-\al)n+|2\al-1|}. \end{equation} \end{prop} \begin{proof} We start with the case $1/2\le \al \le 1$. By the proof of Proposition~\ref{prop:eta_bound}, $\eta_\al$ is analytic in $\clos(I_{n,j}))$, and $\eta_\al'$ decreases on $[0,\pi]$ taking values $\eta_\al'(0) = -\ka_\al^{-1}$ to $\eta_\al'(\pi) = -\ka_\al$. Therefore, $h_{\al,n,j}$ is analytic and convex on $[0,\pi]$, and \[ 1- \frac{n-\eta_\al'\left(\frac{(j-1)\pi}{n}\right)}{n-\eta_\al'\left(\frac{j\pi}{n}\right)} \le 1 - \frac{n-\eta_\al'(0)}{n-\eta_\al'(\pi)} = \frac{2\al-1}{\al(1-\al)n + \al^2}\le \frac{2\al-1}{\al(1-\al)n + 2\al-1}. \] If $y_{\al,n,j}^{(0)}\ge\tht_{\al,n,j}$, then Proposition~\ref{prop:convergence_f_convex_y0>c} yields the convergence and~\eqref{eq:sol_linear_newton}. For $y_{\al,n,j}^{(0)}<\tht_{\al,n,j}$, we have to verify the condition~\eqref{eq:wrong_side_condition} from Proposition~\ref{prop:first_eval_convex_y0<c}. In efect, \begin{equation*}\label{eq:badside_al_1} \frac{(j-1)\pi}{n} - \frac{h_{\al,n,j}\left(\frac{(j-1)\pi}{n}\right)}{h_{\al,n,j}'\left(\frac{(j-1)\pi}{n}\right)} = \frac{(j-1)\pi}{n} + \frac{\eta_{\al,n,j}\left(\frac{(j-1)\pi}{n} \right)}{n-\eta_{\al,n,j}'\left(\frac{(j-1)\pi}{n} \right)} \le \frac{j\pi}{n}. \end{equation*} Since $y_{\al,n,j}^{(1)}\ge\tht_{\al,n,j}$, after applying $m-1$ steps of the algorithm we get~\eqref{eq:sol_linear_newton}. For $0\le\al\le1/2$, $h_{\al,n,j}$ is concave, and the proof of the linear convergence is similar (see Remark~\ref{rem:Newton_convex_2}). In particular, if $y_{\al,n,j}^{(0)}>\tht_{\al,n,j}$, then \begin{equation*}\label{eq:badside_al_2} \frac{j\pi}{n} - \frac{h_{\al,n,j}\left(\frac{j\pi}{n}\right)}{h_{\al,n,j}'\left(\frac{j\pi}{n}\right)} = \frac{j\pi}{n} - \frac{\pi-\eta_\al\left(\frac{j\pi}{n}\right)}{n-\eta_\al'\left(\frac{j\pi}{n}\right)} \ge \frac{(j-1)\pi}{n}. \end{equation*} \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:Newton}] The first part of Theorem follows from Proposition~\ref{prop:linear_conv_eigval}. Now we suppose that $0<\al<1$ and $n>\sqrt{\pi\cK_2(\al)/2}$. Since $\eta_\al'<0$ and $|\eta_\al''|$ is bounded by $\cK_2(\al)$, \[ M_{\al,n,j}\eqdef \frac{1}{2}\sup_{0<x,y<\pi} \left|\frac{h_{\al,n,j}''(x)}{h_{\al,n,j}'(y)}\right| =\frac{1}{2n} \sup_{0<x,y<\pi}\left| \frac{\eta_{\al}''(x)}{1-\frac{\eta_\al'(y)}{n}} \right| \le \frac{\cK_2(\al)}{2n}. \] Therefore, $\frac{\pi}{n} M_{\al,n,j} \le\frac{\pi \cK_2(\al)}{2n^2}<1$, the conditions in Proposition~\ref{prop:Newton_convex} are fulfilled, and we obtain~\eqref{eq:sol_newton_convergence_eigval}. \end{proof} The upper bound~\eqref{eq:sol_linear_newton} allows us to compute ``a priori'' the number of steps that will be sufficient to achieve a desired precision. Namely, if \begin{equation}\label{eq:bounded_of_steps_for_newton_method} m >\frac{p + \log_2\frac{\pi}{2n}}{\log_2\frac{1}{\ga_{\al,n}}} + 1, \end{equation} then $|y_{\al,n,j}^{(m)}-\tht_{\al,n,j}|<2^{-p}$. In fact, after a few iterations, the linear convergence transforms into quadratic convergence, hence reducing the number of iterations. \section{Asymptotic formulas for the eigenvalues}\label{sec:asymptotic_formulas} \begin{prop} \label{prop:theta_approximation_1} Let $n\ge 3$ and $j$ be even with $1\le j\le n$. Then \begin{equation} \label{eq:theta_approximation_1} \left|\tht_{\al,n,j}-\left(d_{n,j}+\frac{\eta_\al(d_{n,j})}{n}\right)\right| \le \frac{\pi \cK_1(\al)}{n^2}. \end{equation} \end{prop} \begin{proof} Theorem~\ref{thm:weak_characteristic_equation_L} assures that $|\tht_{\al,n,j}-d_{n,j}|\le\frac{\pi}{n}$. Hence, by the mean value theorem and formula~\eqref{eq:eta_der_bounded}, \[ \bigl|\eta_\al(\tht_{\al,n,j})-\eta_\al(d_{n,j})\bigr| \le \|\eta_\al'\|_\infty\, \left|\tht_{\al,n,j}-d_{n,j}\right| \le \frac{\pi\cK_1(\al)}{n}. \] Using~\eqref{eq:main_eq} we obtain~\eqref{eq:theta_approximation_1}. \end{proof} \begin{prop} \label{prop:weak_theta_asympt} There exists $C_1(\al)>0$ such that for every $n\ge3$ and every $j$ even with $1\le j\le n$, \begin{equation}\label{eq:weak_theta_asympt} \tht_{\al,n,j} =d_{n,j} +\frac{\eta_\al(d_{n,j})}{n} +\frac{\eta_\al(d_{n,j})\eta_\al'(d_{n,j})}{n^2} +r_{\al,n,j}, \end{equation} where $|r_{\al,n,j}|\le\frac{C_1(\al)}{n^3}$. \end{prop} \begin{proof} Proposition~\ref{prop:theta_approximation_1} implies that \[ \tht_{\al,n,j} =d_{n,j}+\frac{\eta_\al(d_{n,j})}{n} +O_\al\left(\frac{1}{n^2}\right). \] Substitute this expression into the right-hand side of~\eqref{eq:main_eq}: \[ \tht_{\al,n,j} =d_{n,j}+\frac{\eta_\al\left(d_{n,j}+\frac{\eta_\al(d_{n,j})}{n} +O_\al\left(\frac{1}{n^2}\right)\right)}{n}. \] Expanding $\eta_\al$ by Taylor's formula around $d_{n,j}$ with two exact term and estimating the residue term with Proposition~\ref{prop:eta_bound} we obtain the desired result. \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:weak_asympt_weak}] This theorem follows from Proposition~\ref{prop:weak_theta_asympt}: we just evaluate $g$ at the expression~\eqref{eq:weak_theta_asympt} and expand it by Taylor's formula around $d_{n,j}$. \end{proof} In a similar manner, iterating in the main equation~\eqref{eq:main_eq}, we could obtain asymptotic expansions with more terms; see~\cite[(3.9)]{BBGM2018} for the asymptotic expansions up to $n^{-5}$. There are other forms of the asymptotic expansions for $\la_{\al,n,j}$. Adding $x$ to both sides of the equation $nx = (j-1)\pi + \eta_\al(x)$ and dividing it over $n+1$, we arrive at the following equivalent form of the main equation: \begin{equation}\label{eq:main_eq_2} x = \frac{j\pi + \widetilde{\eta}_\al(x)}{n+1},\quad\text{where}\quad \widetilde{\eta}_\al(x) \eqdef \eta_\al(x)+x-\pi =2\arctan\frac{(\ka_\al-1)\cot\frac{x}{2}}{1+\ka_\al \cot^2\frac{x}{2}}. \end{equation} After that, similarly to Proposition~\ref{prop:weak_theta_asympt} and Theorem~\ref{thm:weak_asympt_weak}, we obtain the next result. \begin{prop} \label{prop:weak_asympt_2} There exist $C_2(\al)>0$ and $C_3(\al)>0$ such that for every $n\ge3$ and every $j$ even with $1\le j\le n$, \begin{equation}\label{eq:tht_al_den} \tht_{\al,n,j} = \frac{j\pi}{n+1} +\frac{\widetilde{\eta}_\al\left(\frac{j\pi}{n+1}\right)}{n+1} +\frac{\widetilde{\eta}_\al\left(\frac{j\pi}{n+1}\right)\widetilde{\eta}_\al'\left(\frac{j\pi}{n+1}\right)}{(n+1)^2} + \widetilde{r}_{\al,n,j}, \end{equation} \begin{equation} \label{eq:la_asympt_exp_new} \begin{aligned} \la_{\al,n,j} &=g\left(\frac{j\pi}{n+1}\right) +\frac{g'\left(\frac{j\pi}{n+1}\right) \widetilde{\eta}_\al\left(\frac{j\pi}{n+1}\right)}{n+1} \\ &\qquad+\frac{g'\left(\frac{j\pi}{n+1}\right) \widetilde{\eta}_\al\left(\frac{j\pi}{n+1}\right)\widetilde{\eta}_\al'\left(\frac{j\pi}{n+1}\right) +\frac{1}{2}g''\left(\frac{j\pi}{n+1}\right) \widetilde{\eta}_\al\left(\frac{j\pi}{n+1}\right)^2}{(n+1)^2} +\widetilde{R}_{\al,n,j}, \end{aligned} \end{equation} where $|\widetilde{r}_{\al,n,j}|\le\frac{C_2(\al)}{n^3}$ and $|\widetilde{R}_{\al,n,j}|\le\frac{C_3(\al)}{n^3}$. \end{prop} Numerical experiments show that~\eqref{eq:la_asympt_exp_new} is more precise than~\eqref{eq:laasympt_w}, especially for $\al$ close to $1/2$, but the errors are almost the same for $\al$ close to $1$. Moreover, $\widetilde{\eta}_\al$ is more complicated than $\eta_\al$ ($\widetilde{\eta}_\al$ has two intervals of monotonicity), and the denominator $1/n$ naturally appears in the formula~\eqref{eq:localization_eigvals_weak_odd} for $\la_{\al,n,j}$ with odd $j$. In the incoming proposition we obtain a simplified asymptotic formula for the eigenvalues $\la_{\al,n,j}$ as $j/n$ tends to zero. \begin{prop}\label{prop:first_eigenvalues} Let $\al$ be a fixed number in $(0,1)$. Then $\la_{\al,n,j}$ has the following asymptotic expansion as $j/n$ tends to $0$: \begin{equation} \label{eq:lambda_first_asympt} \la_{\al,n,j} = \frac{j^2\pi^2}{n^2} - \frac{2j^2(1-\al)\pi^2}{\al n^3} + O_\al\left(\frac{j^4}{n^4}\right). \end{equation} \end{prop} \begin{proof} First, we use the following Maclaurin's expansions of $\eta_\al$ and $\eta_\al'$: \[ \eta_\al(x)=\pi-\frac{1-\al}{\al}x+O_\al(x^3),\qquad \eta_\al'(x)=-\frac{1-\al}{\al}+O_\al(x^2). \] Hence, by Proposition~\ref{prop:weak_theta_asympt}, \[ \tht_{\al,n,j} =\frac{j\pi}{n}-\frac{(1-\al)j\pi}{\al n^2} +O_\al\left(\frac{j}{n^3}\right) +O_\al\left(\frac{j^3}{n^4}\right). \] We substitute this expansion into $g(x)=x^2+O(x^4)$ and obtain~\eqref{eq:lambda_first_asympt}. \end{proof} In particular,~\eqref{eq:lambda_first_asympt} can be applied when $j$ is fixed and $n$ tends to $\infty$. In this situation,~\eqref{eq:lambda_first_asympt} provides a better error estimate than the asymptotic formula in Theorem~\ref{thm:weak_asympt_weak}. \clearpage \section{Norms of the eigenvectors} \label{sec:eigvec_norm} In this section we prove Theorem~\ref{thm:norm_eigvec} about the eigenvectors of $L_{\al,n,j}$. We suppose that $\al\in\bC$, $0<\Re(\al)<1$. Formula~\eqref{eq:eivec_w} follows from Proposition~\ref{prop:eigvec_tri_Toep_corner_per}. We divide the rest of the proof into three lemmas. Lemmas~\ref{lem:exact_norm_eigenvector_odd} and~\ref{lem:exact_norm_eigenvector} provide exact formulas~\eqref{eq:exact_norm_eigenvector_odd} and~\eqref{eq:exact_norm_eigenvector_even} for $\|v_{\al,n,j}\|^2$, where $j$ is odd ($j\ge3$) and even, respectively. In Lemma~\ref{lem:second_term_norm_eigenvector} we prove that for every fixed $\al$ and $j$ even, the second term of~\eqref{eq:exact_norm_eigenvector_even} (which does not contain the factor $n$) is uniformly bounded with respect to $n$ and $j$. In this section we use the following elementary trigonometric identity: \begin{equation}\label{eq:sum_cosines} \sum_{k=1}^n \cos(2kx+y) = \frac{\sin(nx)\cos((n+1)x+y)}{\sin x}. \end{equation} Recall that $v_{\al,n,j}$ is the vector with components~\eqref{eq:eivec_w}. \begin{lem} \label{lem:exact_norm_eigenvector_odd} Let $n\ge 3$ and $j$ be odd, $3\le j\le n$. Then \begin{equation} \label{eq:exact_norm_eigenvector_odd} \|v_{\al,n,j}\|_2 = |1-\al|\sqrt{\frac{n}{2} \la_{\al,n,j}}. \end{equation} \end{lem} \begin{proof} By Theorem~\ref{thm:Localization_weak_eigenvals}, it follows that $\tht_{\al,n,j} = (j-1)\pi/n$ and \begin{equation*} \sin(n\tht_{\al,n,j}) = 0,\qquad \cos(n\tht_{\al,n,j}) =1,\qquad \sin((n-k)\tht_{\al,n,j}) = -\sin(k\tht_{\al,n,j}). \end{equation*} Hence \begin{align*} v_{\al,n,j,k} &= (1-\overline{\al}) \left(\sin(k\tht_{\al,n,j}) - \sin((k-1)\tht_{\al,n,j})\right) \\&= 2(1-\overline{\al}) \sin\frac{\tht_{\al,n,j}}{2} \cos\frac{(2k-1)\tht_{\al,n,j}}{2}. \end{align*} Therefore \begin{equation} \begin{aligned} |v_{\al,n,j,k}|^2 &= 4|1-\al|^2\sin^2\frac{\tht_{\al,n,j}}{2} \cos^2\frac{(2k-1)\tht_{\al,n,j}}{2} \\ & = g(\tht_{\al,n,j})|1-\al|^2 \left(\frac{1+\cos((2k-1)\tht_{\al,n,j})}{2}\right). \end{aligned} \end{equation} Now we sum over $k$ and apply~\eqref{eq:sum_cosines}: \[ \|v_{\al,n,j}\|_2^2 = \frac{1}{2}g(\tht_{\al,n,j})|1-\al|^2 \left(n + \frac{\sin(2n \tht_{\al,n,j})}{2\sin \tht_{\al,n,j}} \right). \] This implies~\eqref{eq:exact_norm_eigenvector_odd} since $\sin(2n \tht_{\al,n,j}) = \sin(2(j-1)\pi) = 0$. \end{proof} For every $x\in[0,\pi]$, we define \begin{equation}\label{eq:xi_al} \begin{aligned} \xi_\al(x) & \eqdef \frac{|1-\al|^2}{2} g(x) \cos(\eta_\al(x)) + \frac{|\al|^2}{2}g(\eta_\al(x))\cos(x)\\ &\pheq+\frac{\Re(\al)-|\al|^2}{2}\left(g(x) + g(x+\eta_\al(x)) -g(\eta_\al(x))\right) - 2|\al|^2\cos(x). \end{aligned} \end{equation} \begin{lem} \label{lem:exact_norm_eigenvector} Let $n\ge 3$ and $j$ be even, $2\le j\le n$. Then \begin{equation} \label{eq:exact_norm_eigenvector_even} \|v_{\al,n,j}\|_2^2 = n\nu_\al(\tht_{\al,n,j}) + \frac{\sin(\eta_\al(\tht_{\al,n,j}))}{\sin(\tht_{\al,n,j})}\xi_\al(\tht_{\al,n,j}). \end{equation} \end{lem} \begin{proof} By Theorem~\ref{thm:weak_characteristic_equation_L}, $\tht_{\al,n,j} = (j-1)\pi/n + \eta_\al(\tht_{\al,n,j})/n$. Then \[ \sin(n\tht_{\al,n,j}) = -\sin(\eta_{\al}(\tht_{\al,n,j})),\qquad \cos(n\tht_{\al,n,j}) =-\cos(\eta_{\al}(\tht_{\al,n,j})), \] \[ \sin((n-k)\tht_{\al,n,j}) = \sin(k\tht_{\al,n,j} - \eta_\al(\tht_{\al,n,j})). \] So, \eqref{eq:eivec_w} transforms into \begin{align*} v_{\al,n,j,k} &= 2(1-\overline{\al}) \sin\frac{\tht_{\al,n,j}}{2} \cos\frac{(2k-1)\tht_{\al,n,j}}{2} \\& \pheq+ 2\overline{\al} \cos\frac{\eta_\al(\tht_{\al,n,j})}{2} \sin\frac{2k\tht_{\al,n,j} - \eta_\al(\tht_{\al,n,j})}{2}. \end{align*} Then $|v_{\al,n,j,k}|^2$ can be written as a sum of three terms: \begin{equation}\label{eq:v_odd_abs_three_summands} \begin{aligned} |v_{\al,n,j,k}|^2 & = \frac{|1-\al|^2}{2}g(\tht_{\al,n,j}) (1+\cos((2k-1)\tht_{\al,n,j})) \\& \pheq+ \frac{|\al|^2}{2} \cdot 4\cos^2\frac{\eta_\al(\tht_{\al,n,j})}{2} (1-\cos(2k\tht_{\al,n,j} - \eta_\al(\tht_{\al,n,j}))) \\&\pheq + 4\left(|\al|^2-\Re(\al)\right) \sin\frac{\tht_{\al,n,j}}{2} \cos\frac{\eta_\al(\tht_{\al,n,j})}{2}\times\\&\pheq \times \left(\sin\frac{\eta_\al(\tht_{\al,n,j})-\tht_{\al,n,j}}{2} - \sin\left(2k\tht_{\al,n,j} - \frac{\eta_\al(\tht_{\al,n,j})+\tht_{\al,n,j}}{2}\right)\right). \end{aligned} \end{equation} Now we compute $\sum_{k=1}^n |v_{\al,n,j,k}|^2$ working separately with each of the three terms from~\eqref{eq:v_odd_abs_three_summands}. The sums involving $k\tht_{\al,n,j}$ are transformed by~\eqref{eq:sum_cosines}: \begin{equation} \sum_{k=1}^{n}\cos((2k-1)\tht_{\al,n,j})= \frac{\sin(\eta_\al(\tht_{\al,n,j}))\cos(\eta_\al(\tht_{\al,n,j}))}{\sin(\tht_{\al,n,j})}, \end{equation} \begin{equation} \sum_{k=1}^n\cos(2k\tht_{\al,n,j} - \eta_\al(\tht_{\al,n,j}))= \frac{\sin(\eta_\al(\tht_{\al,n,j})) \cos(\tht_{\al,n,j}) }{\sin(\tht_{\al,n,j})}, \end{equation} \begin{equation} \label{eq:sum3} \sum_{k=1}^n \sin\left(2k\tht_{\al,n,j} - \frac{\eta_\al(\tht_{\al,n,j})+\tht_{\al,n,j}}{2}\right) = \frac{\sin(\eta_\al(\tht_{\al,n,j})) \sin\frac{\eta_\al(\tht_{\al,n,j}) + \tht_{\al,n,j}}{2}}{\sin(\tht_{\al,n,j})}. \end{equation} After some elementary simplifications we obtain~\eqref{eq:exact_norm_eigenvector_even}. \end{proof} In the next lemma we prove that the second term in~\eqref{eq:exact_norm_eigenvector_even} is uniformly bounded with respect to $n$ and $j$. \begin{lem} \label{lem:second_term_norm_eigenvector} There exists $C_{\al}>0$, depending only on $\al$, such that for every $n\ge3$ and every $j$ even, $2\le j\le n$, \begin{equation}\label{eq:bound_second_eigvec_term} \left|\frac{\sin(\eta_\al(\tht_{\al,n,j}))}{\sin(\tht_{\al,n,j})}\xi_\al(\tht_{\al,n,j})\right|\le C_{\al}. \end{equation} \end{lem} \begin{proof} Obviously, $\xi_\al$ is a bounded function on $[0,\pi]$. By a simple application of l'H\^opital's rule, the quotient $\sin(\eta_{\al}(x))/\sin(x)$ has finite limits at $0$ and $\pi$, hence it is bounded on $[0,\pi]$. This implies~\eqref{eq:bound_second_eigvec_term}. \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:norm_eigvec}] It is a well-known basic fact in the theory of laplacian matrices that the vector $[1,\ldots,1]^\top$ is an eigenvector associated to the eigenvalue $\la=0$. From Proposition~\ref{prop:eigvec_tri_Toep_corner_per} we obtain~\eqref{eq:eivec_w}. In Lemma~\ref{lem:exact_norm_eigenvector_odd},~\eqref{eq:norm_eigvec_j_odd} has been proved. From Lemmas~\ref{lem:exact_norm_eigenvector} and~\ref{lem:second_term_norm_eigenvector} we obtain~\eqref{eq:norm_eigvec_j_even}. \end{proof} \section{Numerical experiments}\label{sec:num_exp} With the help of Sagemath, we have verified numerically (for many values of parameters) the representations~\eqref{eq:L_char_pol_via_Cheb},~\eqref{eq:char_pol_fact},~\eqref{eq:charpol_factorization_trig} for the characteristic polynomial, the equivalence of the formulas~\eqref{eq:eta}, \eqref{eq:eta1}, \eqref{eq:eta2}, \eqref{eq:eta3} for $\eta_\al$, expressions~\eqref{eq:exact_norm_eigenvector_odd}, \eqref{eq:exact_norm_eigenvector_even} for the norms of the eigenvectors, and some other exact formulas of this paper. The following web page (written in JavaScript and SVG) contains interactive analogs of Figures~\ref{fig:nx_plus_jpi_eq_eta} and \ref{fig:theta_vs_lambda}, where the user can choose the values of $\al$ and $n$. \begin{center} \myurl{https://www.egormaximenko.com/plots/laplacian\_of\_cycle\_eig.html} \end{center} We introduce the following notation for different approximations of the eigenvalues. \begin{itemize} \item $\la_{\al,n,j}^{\text{gen}}$ are the eigenvalues computed in Sagemath by general algorithms, with double-precision arithmetic. \item $\la_{\al,n,j}^{\text{N}} \eqdef g(\tht_{\al,n,j}^{\text{N}})$, where $\tht_{\al,n,j}^{\text{N}}$ is the numerical solution of the equation $h_{\al,n,j}(x) = 0$ by Newton's method, see Theorem~\ref{thm:Newton}. We use $d_{n,j}$ as the initial approximation. These computations are performed in the high-precision arithmetic with $3322$ binary digits ($\approx 1000$ decimal digits). \item Using $\tht_{\al,n,j}^{\text{N}}$ we compute $v_{\al,n,j}$ by~\eqref{eq:eivec_w}. \item $\la_{\al,n,j}^{\text{bisec}}$ is similar to $\la_{\al,n,j}^{\text{N}}$, but now we solve the equation $h_{\al,n,j}(x) = 0$ by the bisection method, see Proposition~\ref{prop:h_change_sign}. \item $\la_{\al,n,j}^{\text{fp}}$ is computed similarly to $\la_{\al,n,j}^{\text{N}}$, but solving the main equation by the fixed point iteration, see Proposition~\ref{prop:Z_contractive_weak}. \item $\la_{\al,n,j}^{\text{N},2}$ is computed similarly to $\la_{\al,n,j}^{\text{N}}$, but using only two iterations of Newton's method. \item $\la_{\al,n,j}^{\text{asympt}}$ is the approximation given by~\eqref{eq:laasympt_w}. \end{itemize} We have constructed a large series of examples including all rational values $\al$ in $(0,1)$ with denominators $\le10$ and all $n$ with $3\le n\le 256$. In all these examples, we have obtained \[ \max_{1\le j\le n} \|L_{\al,n}v_{\al,n,j}-\la_{\al,n,j}^{\text{N}}v_{\al,n,j}\|_2 <10^{-996},\qquad \max_{1\le j\le n}|\la_{\al,n,j}^{\text{gen}}-\la_{\al,n,j}^{\text{N}}| <10^{-13}. \] Moreover, in all examples \[ \max_{1\le j\le n} |\la_{\al,n,j}^{\text{N}}-\la_{\al,n,j}^{\text{bisec}}| <10^{-998}, \] and for $n>\cK_1(\al)$, \[ \max_{1\le j\le n} |\la_{\al,n,j}^{\text{fp}}-\la_{\al,n,j}^{\text{N}}| <10^{-998}. \] For Theorem~\ref{thm:weak_asympt_weak}, we have computed the errors \[ R_{\al,n,j}^{\text{asympt}}\eqdef \la_{\al,n,j}^{\text{asympt}}-\la_{\al,n,j}^{\mathrm{N}} \] and their maximums $\|R_{\al,n}^{\text{asympt}}\|_\infty=\max_{1\le j\le n}|R_{\al,n,j}^{\text{asympt}}|$. Table~\ref{table:errors_weak_asympt} shows that these errors indeed can be bounded by $O_\al(1/n^3)$. \begin{table}[htb] \caption{Values of $\|R_{\al,n}^{\text{asympt}}\|_\infty$ and $n^3 \|R_{\al,n}^{\text{asympt}}\|_\infty$ for some $\al$ and $n$. \label{table:errors_weak_asympt}} \[ \begin{array}{|c|c|c|} \hline \multicolumn{3}{ |c| }{\bigstrut\al=1/3} \\\hline \bigstrut n & \|R_{\al,n}^{\text{asympt}}\|_\infty &\hstrut{}n^3 \|R_{\al,n}^{\text{asympt}}\|_\infty\hstrut{} \\\hline \medstrut 256 & 2.28\times10^{-6} & 38.24 \\ \medstrut 512 & 2.90\times10^{-7} & 38.86 \\ \medstrut 1024 & 3.65\times10^{-8} & 39.17 \\ \medstrut 2048 & 4.58\times10^{-9} & 39.32 \\ \medstrut\hstrut{}4096\hstrut{}&\hstrut{}5.73\times10^{-10}\hstrut{}&\hstrut{}39.40\hstrut{} \\ \medstrut\hstrut{}8192\hstrut{}&\hstrut{}7.17\times10^{-11}\hstrut{}&\hstrut{}39.44\hstrut{} \\ \hline \end{array} \qquad \begin{array}{|c|c|c|} \hline \multicolumn{3}{ |c| }{\bigstrut\al=4/5} \\\hline \bigstrut n & \|R_{\al,n}^{\text{asympt}}\|_\infty &\hstrut{}n^3 \|R_{\al,n}^{\text{asympt}}\|_\infty\hstrut{}\\\hline \medstrut 256 & 6.90\times10^{-7} & 11.58 \\ \medstrut 512 & 8.66\times10^{-8} & 11.62 \\ \medstrut 1024 & 1.08\times10^{-8} & 11.63 \\ \medstrut 2048 & 1.36\times10^{-9} & 11.64 \\ \medstrut\hstrut{}4096\hstrut{}&\hstrut{}1.69\times10^{-10}\hstrut{}&\hstrut{}11.64\hstrut{}\\ \medstrut\hstrut{}8192\hstrut{}&\hstrut{}2.12\times10^{-11}\hstrut{}&\hstrut{}11.64\hstrut{}\\ \hline \end{array} \] \end{table} Let $R_{\al,n,j}^{\text{N},2}\eqdef \la_{\al,n,j}^{\text{N},2}-\la_{\al,n,j}^{\mathrm{N}}$ and $\|R_{\al,n}^{\text{N},2}\|_\infty=\max_{1\le j\le n}|R_{\al,n,j}^{\text{N},2}|$. Table~\ref{table:errors_newton_2_iter} shows that these errors behave indeed as $O_\al(1/n^7)$. \begin{table}[htb] \caption{Values of $\|R_{\al,n}^{\text{N},2}\|_\infty$ and $n^7 \|R_{\al,n}^{\text{N},2}\|_\infty$ for some $\al$ and $n$. \label{table:errors_newton_2_iter}} \[ \begin{array}{|c|c|c|} \hline \multicolumn{3}{ |c| }{\bigstrut\al=1/3} \\\hline \bigstrut n & \|R_{\al,n}^{\text{N},2}\|_\infty &\hstrut{}n^7 \|R_{\al,n}^{\text{N},2}\|_\infty\hstrut{}\\\hline \medstrut 256 & 4.13\times10^{-17} & 2.97 \\ \medstrut 512 & 3.26\times10^{-19} & 3.01 \\ \medstrut 1024 & 2.57\times10^{-21} & 3.03 \\ \medstrut 2048 & 2.01\times10^{-23} & 3.04 \\ \medstrut\hstrut{}4096\hstrut{}&\hstrut{}1.57\times10^{-25}\hstrut{}&\hstrut{}3.04\hstrut{}\\ \medstrut\hstrut{}8192\hstrut{}&\hstrut{}1.23\times10^{-27}\hstrut{}&\hstrut{}3.05\hstrut{}\\ \hline \end{array} \qquad \begin{array}{|c|c|c|} \hline \multicolumn{3}{ |c| }{\bigstrut\al=4/5} \\\hline \bigstrut n & \|R_{\al,n}^{\text{N},2}\|_\infty &\hstrut{}n^7 \|R_{\al,n}^{\text{N},2}\|_\infty\hstrut{}\\\hline \medstrut 256 & 6.30\times10^{-16} & 45.41\\ \medstrut 512 & 5.02\times10^{-18} & 46.33\\ \medstrut 1024 & 3.96\times10^{-20} & 46.80\\ \medstrut 2048 & 3.11\times10^{-22} & 47.04\\ \medstrut\hstrut{}4096\hstrut{}&\hstrut{}2.44\times10^{-24}\hstrut{}&\hstrut{}47.16\hstrut{}\\ \medstrut\hstrut{}8192\hstrut{}&\hstrut{}1.91\times10^{-26}\hstrut{}&\hstrut{}47.22\hstrut{}\\ \hline \end{array} \] \end{table} We have done similar tests for many other values of $\al$ and $n$. Numerical experiments show that $n^3\|R^{\text{asympt}}_{\al,n}\|_\infty$ and $n^7\|R^{\text{N}}_{\al,n}\|_\infty$ are bounded by some numbers depending on $\al$, and that numbers grow as $\al$ tends to $0$ or $1$. Let $R_{\al,n,j}^{\text{asympt},2}\eqdef \la_{\al,n,j}^{\text{N}}- (\frac{j^2\pi^2}{n^2} - \frac{2(1-\al)j^2\pi^2}{\al n^3})$. Table~\ref{table:errors_asympt_first_eigenvalues} shows that these errors behave indeed as $O_\al(j^4/n^4)$. \begin{table}[htb] \caption{Values of $(n^4/j^4)|R_{\al,n,j}^{\text{asympt},2}|$ for $\al=1/3$, and some $n$ and even $j$. \label{table:errors_asympt_first_eigenvalues}} \[ \begin{array}{|c|c|c|c|} \hline \multicolumn{4}{ |c| }{\bigstrut\al=1/3} \\\hline \bigstrut n & (n^4/2^4) |R_{\al,n,2}^{\text{asympt},2}| &\hstrut{} (n^4/4^4) |R_{\al,n,4}^{\text{asympt},2}|\hstrut{} & (n^4/6^4) |R_{\al,n,6}^{\text{asympt},2}| \\\hline \medstrut 256 & 21.80 & 0.18 & 4.25 \\ \medstrut 512 & 21.65 & 0.44 & 4.53 \\ \medstrut 1024 & 21.57 & 0.58 & 4.67 \\ \medstrut 2048 & 21.53 & 0.65 & 4.75 \\ \medstrut\hstrut{}4096\hstrut{}& \hstrut{}21.51 \hstrut{}&\hstrut{}0.68 \hstrut{}& 4.79\\ \medstrut\hstrut{}8192\hstrut{}& \hstrut{}21.50 \hstrut{}&\hstrut{}0.70 \hstrut{}& 4.81\\ \hline \end{array} \] \end{table} \begin{thebibliography}{20} \bibitem{A1989} Atkinson, K. E.: An Introduction to Numerical Analysis. 2nd ed. Wiley, New York (1989). \bibitem{BBGM2018} Barrera, M.; B\"{o}ttcher, A., Grudsky, S. M.; Maximenko, E. A.: Eigenvalues of even very nice Toeplitz matrices can be unexpectedly erratic. In: B\"{o}ttcher, A., Potts, D., Stollmann, P., Wenzel, D. (eds.) The Diversity and Beauty of Applied Operator Theory, 51--77. Operator Theory: Advances and Applications, vol. 268. Birkh\"{a}user, Cham (2018), \doi{10.1007/978-3-319-75996-8\_2}. \bibitem{BPZ2020} Basak, A.; Paquette, E.; Zeitouni, O.: Spectrum of random perturbations of Toeplitz matrices with finite symbols. Trans. Amer. Math. Soc. 373, 4999--5023 (2020), \doi{10.1090/tran/8040}. \bibitem{BGM2017} Bogoya, J. M.; Grudsky, S. M.; Maximenko, E. A.: Eigenvalues of Hermitian Toeplitz matrices generated by simple-loop symbols with relaxed smoothness. In: Bini, D.; Ehrhardt, T.; Karlovich, A.; Spitkovsky, I. (eds.) Large Truncated Toeplitz Matrices, Toeplitz Operators, and Related Topics, 179--212. Operator Theory: Advances and Applications, vol. 259. Birkh\"{a}user, Cham (2017), \doi{10.1007/978-3-319-49182-0\_11}. \bibitem{BBGM2015} Bogoya, J. M.; B\"{o}ttcher, A.; Grudsky, S. M.; Maximenko, E. A.: Eigenvalues of Hermitian Toeplitz matrices with smooth simple-loop symbols. J. Math. Anal. Appl. 422, 1308--1334 (2015), \doi{10.1016/j.jmaa.2014.09.057}. \bibitem{BBGM2017} B\"{o}ttcher, A.; Bogoya, J. M.; Grudsky, S. M.; Maximenko, E. A.: Asymptotic formulas for the eigenvalues and eigenvectors of Toeplitz matrices. Sb. Math. 208, 1578--1601 (2017), \doi{10.1070/SM8865}. \bibitem{BFGM2014} Böttcher, A.; Fukshansky, L.; Garcia, S. R.; Maharak, H.: Toeplitz determinants with perturbations in the corners. J. Funct. Anal. 268, 171--193 (2014), \doi{10.1016/j.jfa.2014.10.023}. \bibitem{BYR2006} Britanak, V.; Yip, P. C.; Rao, K. R.: Discrete Cosine and Sine Transforms: General Properties, Fast Algorithms and Integer Approximations. Academic Press, San Diego (2006). \bibitem{Ferguson1980} Ferguson, W. E.: The construction of Jacobi and periodic Jacobi matrices with prescribed spectra. Math. Comput. 35:152, 1203--1220 (1980), \doi{10.2307/2006386}. \bibitem{FF2009} Fernandes, R.; da Fonseca, C. M.: The inverse eigenvalue problem for Hermitian matrices whose graphs are cycles. Linear Multilinear Alg., 57, 673--682 (2009), \doi{10.1080/03081080802187870}. \bibitem{FK2020} Da Fonseca, C. M.; Kowalenko, V.: Eigenpairs of a family of tridiagonal matrices: three decades later. Acta Math. Hung. 160, 376--389 (2020), \doi{10.1007/s10474-019-00970-1}. \bibitem{DV2009} Da Fonseca, C. M.; Veerman, J. J. P.: On the spectra of certain directed paths. Appl. Math. Lett. 22, 1351--1355 (2009), \doi{10.1016/j.aml.2009.03.006}. \bibitem{GS2017} Garoni, C.; Sierra-Capizzano, S.: Generalized Locally Toeplitz Sequences: Theory and Applications. Volume I. Springer, Cham (2017). \bibitem{GT2009} Grassmann, W. K.; Tavakoli, J.: Spectrum of certain tridiagonal matrices when their dimension goes to infinity. Linear Algebra Appl. 431, 1208–1217 (2009), \doi{10.1016/j.laa.2009.04.013}. \bibitem{GMS2021} Grudsky, S. M.; Maximenko, E. A.; Soto-Gonz\'alez, A.: Eigenvalues of tridiagonal Hermitian Toeplitz matrices with perturbations in the off-diagonal corners. In: Karapetyants, A. N.; Kravchenko, V. V.; Liflyand, E.; Malonek, H. R. (eds.) Operator Theory and Harmonic Analysis. OTHA 2020. Springer Proceedings in Mathematics \& Statistics, vol 357. Springer, Cham (2021), \doi{10.1007/978-3-030-77493-6\_11}. \bibitem{HornJohnson2013} Horn, R. A.; Johnson, C. R.: Matrix Analysis. 2nd ed. Cambridge University Press, New York (2013). \bibitem{KST1999} Kulkarni, D.; Schmidt, D.; Tsui, S.: Eigenvalues of tridiagonal pseudo-Toeplitz matrices. Linear Algebra Appl. 297, 63--80 (1999), \doi{10.1016/S0024-3795(99)00114-7}. \bibitem{LWHF2014} Lin, Z.; Wang, L.; Han, Z.; Fu, M.: Distributed formation control of multi-agent systems using complex Laplacian. IEEE Transactions on automatic control, 59, 1765--1777 (2014), \doi{10.1109/TAC.2014.2309031}. \bibitem{M2012} Molitierno, J. J.: Applications of Combinatorial Matrix Theory to Laplacian Matrices of Graphs. CRC Press, Florida (2012). \bibitem{NR2019} Noschese, S.; Reichel, L.: Eigenvector sensitivity under general and structured perturbations of tridiagonal Toeplitz-type matrices. Numer. Linear Algebra Appl. 26, e2232 (2019), \doi{10.1002/nla.2232}. \bibitem{OA2014} \"Otele\c{s}, A.; Akbulak, M.: Positive integer powers of one type of complex tridiagonal matrix. Bull. Malays. Math. Sci. Soc. (2) 37, 971--981 (2014), \myurl{http://math.usm.my/bulletin/pdf/v37n3/v37n4p6.pdf}. \bibitem{R2017} Reyes-Lega, A. F.: Some aspects of operator algebras in quantum physics. In: Cano, L.; Arboleda, S.; Cardona, A.; Ocampo, H.; Reyes-Lega, A. F. (eds.) Geometric, Algebraic and Topological Methods for Quantum Field Theory. World Scientific, 1--74 (2016), \doi{10.1142/9789814730884\_0001}. \bibitem{SM2014} Strang, G.; MacNamara, S.: Functions of difference matrices are Toeplitz plus Hankel. SIAM Rev. 56, 525--546 (2014), \doi{10.1137/120897572}. \bibitem{Spivak1994} Spivak, M.: Calculus, 3rd ed. Publish or Perish, Houston (1994). \bibitem{Tilli1998} Tilli, P.: Locally Toeplitz sequences: spectral properties and applications. Linear Algebra Appl. 278, 91--120 (1998), \doi{10.1016/S0024-3795(97)10079-9}. \bibitem{Tyrtyshnikov1996} Tyrtyshnikov, E. E.: A unifying approach to some old and new theorems on distribution and clustering. Linear Algebra Appl. 232, 1--43 (1996), \doi{10.1016/0024-3795(94)00025-5}. \bibitem{TS2017} Tavakolipour, H.; Shakeri, F.: On tropical eigenvalues of tridiagonal Toeplitz matrices. Linear Algebra Appl. 539, 198--218 (2017), \doi{10.1016/j.laa.2017.11.009}. \bibitem{VHB2018} Veerman, J. J. P.; Hammond, D. K.; Baldivieso, P. E.: Spectra of certain large tridiagonal matrices. Linear Algebra Appl. 548, 123–147 (2018), \doi{10.1016/j.laa.2018.03.005}. \bibitem{W2008} Willms, A. R.: Analytic results for the eigenvalues of certain tridiagonal matrices. Siam J. Matrix Anal. Appl. 30, 639--656 (2008), \doi{10.1137/070695411}. \bibitem{YuehCheng2008} Yueh, W. C.; Cheng, S. S.: Explicit eigenvalues and inverses of tridiagonal Toeplitz matrices with four perturbed corners. ANZIAM J. 49, 361--387 (2008), \doi{10.1017/S1446181108000102}. \bibitem{Z2014} Zampieri, G.: Involutions of real intervals. Annales Polonici Mathematici 112, 25--35 (2014), \doi{10.4064/ap112-1-2}. \bibitem{ZJJ2019} Zhang, M.; Jiang, X.; Jiang, Z.: Explicit determinants, inverses andeigenvalues of four band Toeplitz matrices with perturbed rows. Special Matrices 7, 52--66 (2019), \doi{10.1515/spma-2019-0004}. \end{thebibliography} \end{document}
2205.12409v1
http://arxiv.org/abs/2205.12409v1
On the number of tilting modules over a class of Auslander algebras
\documentclass[a4paper,reqno]{amsart} \textheight 220mm \textwidth 150mm \hoffset -16mm \usepackage{amssymb} \usepackage{amstext} \usepackage{amsmath} \usepackage{amscd} \usepackage{amsthm} \usepackage{amsfonts} \usepackage{enumerate} \usepackage{graphicx} \usepackage{latexsym} \usepackage{mathrsfs} \usepackage{mathtools} \usepackage[all]{xy} \xyoption{all} \usepackage{pstricks} \usepackage{lscape} \usepackage{comment} \newtheorem{theorem}{Theorem}[section] \newtheorem{acknowledgement}[theorem]{Acknowledgement} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{definition-proposition}[theorem]{Definition-Proposition} \newtheorem{problem}[theorem]{Problem} \newtheorem{question}[theorem]{Question} \newtheorem{conjecture}[theorem]{Conjecture} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{assumption}[theorem]{Assumption} \newtheorem{remark}[theorem]{Remark} \newtheorem{example}[theorem]{Example} \newtheorem{observation}[theorem]{Observation} \newtheorem{construction}[theorem]{Construction} \newcommand{\Ext}{\operatorname{Ext}\nolimits} \newcommand{\Tor}{\operatorname{Tor}\nolimits} \newcommand{\Hom}{\operatorname{Hom}\nolimits} \newcommand{\End}{\operatorname{End}\nolimits} \newcommand{\Tr}{\operatorname{Tr}\nolimits} \renewcommand{\mod}{\mathsf{mod}\hspace{.01in}} \newcommand{\Mod}{\mathsf{Mod}\hspace{.01in}} \newcommand{\add}{\mathsf{add}\hspace{.01in}} \newcommand{\Fac}{\mathsf{Fac}\hspace{.01in}} \newcommand{\tilt}{\mbox{\rm tilt}\hspace{.01in}} \newcommand{\sttilt}{\mbox{\rm s$\tau$-tilt}\hspace{.01in}} \newcommand{\op}{\operatorname{op}\nolimits} \newcommand{\bo}{\operatorname{b}\nolimits} \newcommand{\DD}{\mathsf{D}} \newcommand{\TT}{\mathcal{T}} \newcommand{\grade}{\operatorname{grade}\nolimits} \newcommand{\rad}{\operatorname{rad}\nolimits} \newcommand{\soc}{\operatorname{soc}\nolimits} \newcommand{\projdim}{\mathop{{\rm proj.dim}}\hspace{.01in}} \newcommand{\injdim}{\mathop{{\rm inj.dim}}\hspace{.01in}} \newcommand{\gldim}{\mathop{{\rm gl.dim}}\hspace{.01in}} \newcommand{\RHom}{\mathbf{R}\strut\kern-.2em\operatorname{Hom}\nolimits} \newcommand{\LotimesL}{\mathop{\otimes^{\mathbf{L}}_\Lambda}\nolimits} \newcommand{\new}[1]{{\blue #1}} \newcommand{\old}[1]{{\red #1}} \renewcommand{\comment}[1]{{\green #1}} \numberwithin{equation}{section} \usepackage{paralist} \let\itemize\compactitem \let\enditemize\endcompactitem \let\enumerate\compactenum \let\endenumerate\endcompactenum \let\description\compactdesc \let\enddescription\endcompactdesc \hoffset-9mm \def\Im{\mathop{\rm Im}\nolimits} \def\Ker{\mathop{\rm Ker}\nolimits} \def\Coker{\mathop{\rm Coker}\nolimits} \def\Tr{\mathop{\rm Tr}\nolimits} \def\rad{\mathop{\rm rad}\nolimits} \def\rgrade{\mathop{\rm r.grade}\nolimits} \def\fd{\mathop{\rm fd}\nolimits} \def\id{\mathop{\rm id}\nolimits} \def\pd{\mathop{\rm pd}\nolimits} \def\add{\mathop{\rm add}\nolimits} \def\Proj{\mathop{\rm Proj}\nolimits} \def\cx{\mathop{\rm cx}\nolimits} \def\inf{\mathop{\rm inf}\nolimits} \def\sup{\mathop{\rm sup}\nolimits} \def\gldim{\mathop{\rm gldim}\nolimits} \begin{document} \title{On the number of tilting modules over a class of Auslander algebras} \thanks{2000 Mathematics Subject Classification: 16G10} \thanks{Keywords: Auslander algebra; tilting module; support $\tau$-tilting module; Dynkin quiver}. \thanks{$*$ is the corresponding author. Both of the authors are supported by NSFC (Nos. 12171207). X. Zhang is supported by the Project Funded by the Priority Academic Program Development of Jiangsu Higher Education Institutions and the Starting project of Jiangsu Normal University} \author{Dan Chen} \address{D. Chen: School of Mathematics and Statistics, Jiangsu Normal University, Xuzhou, 221116, P. R. China.} \email{[email protected]} \author{Xiaojin Zhang$^*$} \address{X. Zhang: School of Mathematics and Statistics, Jiangsu Normal University, Xuzhou, 221116, P. R. China.} \email{[email protected], [email protected]} \maketitle \begin{abstract} Let $\Lambda$ be a radical square zero algebra of a Dynkin quiver and let $\Gamma$ be the Auslander algebra of $\Lambda$. Then the number of tilting right $\Gamma$-modules is $2^{m-1}$ if $\Lambda$ is of $A_{m}$ type for $m\geq 1$. Otherwise, the number of tilting right $\Gamma$-modules is $2^{m-3}\times14$ if $\Lambda$ is either of $D_{m}$ type for $m\geq 4$ or of $E_{m}$ type for $m=6,7,8$. \end{abstract} \section{\bf{Introduction}} Tilting theory has been essential in the representation theory of finite dimensional algebras since 1970s(see \cite{BGP,BB,HR}). Tilting modules play an important role in tilting theory. So, it is interesting but difficult to classify the tilting modules over a given algebra. There are many algebraists working on this topics. Br\"{u}stle, Hille, Ringel and R\"{o}hrle \cite {BHRR} classified the tilting modules over the Auslander algebra of $K[x]/(x^{n})$. Iyama and Zhang \cite {IZ1} studied tilting modules over Auslander-Gorenstein algebras. Geuenich \cite {G} studied tilting modules of finite projective dimension for the Auslander algebra of $K[x]/(x^{n})$. Zhang \cite {Z2} showed the number of tilting modules over the Auslander algebra of radical square zero Nakayama algebras. Xie, Gao and Huang \cite {XGH} studied the number of tilting modules over the Auslander algebras of radical cube zero Nakayama algebras. For more recent development on tilting modules, we refer to \cite {AT,K,PS}. In 2014, Adachi Iyama and Reiten \cite {AIR} introduced the notion of $\tau$-tilting modules as generalizations of tilting modules in terms of mutations. This help us be able to get the tilting modules in terms of support $\tau$-tilting modules. Therefore, it is important to classify support $\tau$-tilting modules for a given algebra. Adachi \cite {A1} classified support $\tau$-tilting modules over Nakayama algebras; Adachi \cite {A2} and Zhang \cite {Z1} studied $\tau$-rigid modules over algebras with radical square zero; Mizuno \cite {M} classified $\tau$-tilting modules over preprojective algebras of Dynkin type; Iyama and Zhang \cite {IZ2} classified $\tau$-tilting modules over the Auslander algebra of $K[x]/(x^{n})$. For more recent development on $\tau$-tilting modules, we can refer to \cite {AiH,DIJ, KK, W, Zi,Z3}. In this paper, we study tilting modules over the Auslander algebras of radical square zero algebras of Dynkin quiver in terms of $\tau$-tilting theory. By using a bijection over Auslander-Gorenstein algebras built by Iyama and the second author \cite {IZ1}, we can get the number of tilting modules over the Auslander algebras of radical square zero algebras of Dynkin quivers, which extends the results in \cite {Z2}. More precisely, we prove the following main result. \begin{theorem}\label{1.1} Let $\Lambda$ be a radical square zero algebra of a Dynkin quiver and let $\Gamma$ be the Auslander algebra of $\Lambda$. Then the number of tilting right $\Gamma$-modules is $2^{m-1}$ if $\Lambda$ is of $A_{m}$ type for $m\geq 1$. Otherwise, the number of tilting right $\Gamma$-modules is $2^{m-3}\times14$ if $\Lambda$ is either of $D_{m}$ type for $m\geq 4$ or of $E_{m}$ type for $m=6,7,8$. \end{theorem} We show the organization of this paper as follows: In Section 2, we recall some basic preliminaries on tilting modules, $\tau$-tilting modules and Auslander algebras; In Section 3, we prove the main results and give examples to show the main results. Throughout this paper, all the algebras are finite dimensional basic algebras over an algebraically closed field $K$ and all modules are finitely generated right modules. For a tilting module, we mean the classical tilting module. We use $\tau$ to denote the Auslander-Reitien translation functor. For an algebra $\Lambda$, we use $\mod\Lambda$ to denote the category of finitely generated right $\Lambda$-modules. \section{\bf{Preliminaries}} In this section, we recall definitions and basic facts on tilting modules, $\tau$-tilting modules, and Auslander algebras. For a module $M$, we use pd$_{\Lambda}M$ and $|M|$ to denote the projective dimension of $M$ and the number of indecomposable direct summand of $M$, respectively. Now we recall the definition of a tilting module \cite{HR}. \begin{definition}\label{2.1}Let $\Lambda$ be an algebra and $T\in \mod \Lambda$, $T$ is called a tilting module if the following are satisfied: \begin{enumerate}[\rm(1)] \item pd$_{\Lambda}{T} \leq1$. \item Ext$^{i}_{\Lambda}(T,T)=0$, for $i\geq1$. \item $|T|=|\Lambda|$. \end{enumerate} \end{definition} Now we recall the definition of $\tau$-tilting modules introduced in \cite {AIR}. \begin{definition}\label{2.2} Let $\Lambda$ be an algebra and $M\in\mod \Lambda$. \begin{enumerate}[\rm(1)] \item We call $M$ {\it $\tau$-rigid} if Hom$_{\Lambda}(M,\tau M)=0$. \item $M$ is called a {\it $\tau$-tilting} module if $M$ is $\tau$-rigid and $|M|=|\Lambda|$. \item We call $M$ in $\mod\Lambda$ {\it support $\tau$-tilting} if there exists an idempotent $e$ of $\Lambda$ such that $M$ is a $\tau$-tilting ($\Lambda/(e)$)-module. \end{enumerate} \end{definition} The following lemma \cite[Proposition 2.4]{AIR} on $\tau$-rigid modules is important. \begin{lemma}\label{2.3} Let $X$ be in $\mod\Lambda$ with a minimal projective presentation $P_{1}\stackrel{d_{1}}{\longrightarrow} P_{0}\stackrel{d_{0}}{\longrightarrow} X\longrightarrow 0$. Then $X$ is $\tau$-rigid if and only if the map $ \Hom_{\Lambda}(P_{0},X)\stackrel{d_{1}^*}{\longrightarrow} \Hom_{\Lambda}(P_{1},X)$ is surjective, where ${d_1}^*=\Hom_\Lambda(d_{1},X)$. \end{lemma} We also need the following definition of support $\tau$-tilting modules. \begin{definition}\label{2.4} Let $(M,P)$ be a pair with $M\in$ mod$\Lambda$ and $P$ projective. \begin{enumerate}[\rm(1)] \item We call $(M,P)$ a $\tau$-rigid pair if $M$ is $\tau$-rigid and Hom$_{\Lambda}(P,M)$=0. \item We call $(M,P)$ a support $\tau$-tilting (respectively, almost complete support $\tau$-tilting) pair if $(M,P)$ is $\tau$-rigid and $|M|+|P|=|A|$ (respectively, $|M|+|P|=|A|-1$). \end{enumerate} \end{definition} For an algebra $\Lambda$, we use $s\tau$-tilt $\Lambda$ to denote the set of isomorphism class of support $\tau$-tilting modules over $\Lambda$. Denote by $Q$(s$\tau$-tilt$\Lambda$) the support $\tau$-tilting quiver of $\Lambda$. The following lemma in \cite[Corollary 2.38]{AIR} is useful in this paper. \begin{lemma}\label{2.5} If $Q$(s$\tau$-tilt$\Lambda$) has a finite connected component $C$, then $Q$(s$\tau$-tilt$\Lambda$)=$C$. \end{lemma} In the following we recall the definition of Auslander algebras in \cite{ARS}. \begin{definition}\label{2.6} An algebra $\Lambda$ is called an Auslander algebra if gl.dim$\Lambda\leq 2$ and $E_{i}(\Lambda)$ is projective for $i=0,1$, where $E_{i}(\Lambda)$ is the $(i+1)$-th term in a minimal injective resolution of $\Lambda$. \end{definition} It is shown in \cite{ARS} that there is one to one bijection between Auslander algebras and algebras of finite representation type in \cite {ARS}. Let $\Lambda$ be an algebra of finite representation type, and $M$ an additive generator of $\mod\Lambda$. Then we call $\Gamma=\End_{\Lambda}M$ {\it the Auslander algebra} of $\Lambda$. For an algebra $\Lambda$, we use tilt$\Lambda$ to denote the set of isomorphism classes of tilting modules in $\mod\Lambda$. The following theorem on tilting modules over Auslander algebras in \cite {Z2} is essential in this paper. For more details on this bijection map we refer to \cite{IZ1, J}. \begin{theorem}\label{2.7}Let $\Lambda$ be an Auslander algebra and $e$ be an idempotent such that $e\Lambda $ is the additive generator of projective-injective modules. Then there is a bijective between the set tilt$A$ of tilting modules over $\Lambda $ and the set $s\tau$-tilt $\Lambda/(e)$ of support $\tau$-tilting modules over $\Lambda/(e)$. \end{theorem} Now we recall the definition of Dynkin algebras as follows. \begin{definition}\label{2.8} We call an algebra $\Lambda$ of Dynkin type if the quiver of $\Lambda$ is one of the following quivers: $A_{m}$ ($ m\geq 1$): $$1\stackrel{a_{1}}{\longrightarrow} 2\stackrel{a_{2}}{\longrightarrow} \cdots \stackrel{a_{m-2}}{\longrightarrow} m-1 \stackrel{a_{m-1}}{\longrightarrow}m $$ $D_{m}$ ($ m\geq 4$): $$\xymatrix{& & 1 \ar[d]^{a_{1}} & & & &\\ & 2 \ar[r]^{a_{2}} & 3\ar[r]^{a_{3}} & 4 \ar[r]^{a_{4}} & \ar[r] \cdots\ar[r]^{a_{m-2}} &m-1\ar[r]^{a_{m-1}}&m}$$ $E_{6}$: $$\xymatrix{& & & 3\ar[d]^{a_{3}} & &\\ &1 \ar[r]^{a_{1}} & 2 \ar[r]^{a_{2}} & 4\ar[r]^{a_{4}} & 5 \ar[r]^{a_{5}} & 6}$$ $E_{7}$: $$\xymatrix{& & & 3\ar[d]^{a_{3}} & & &\\ &1 \ar[r]^{a_{1}} & 2 \ar[r]^{a_{2}} & 4\ar[r]^{a_{4}} & 5 \ar[r]^{a_{5}} & 6\ar[r]^{a_{6}} &7}$$ $E_{8}$: $$\xymatrix{& & & 3\ar[d]^{a_{3}} & & & &\\ &1 \ar[r]^{a_{1}} & 2 \ar[r]^{a_{2}} & 4\ar[r]^{a_{4}} & 5 \ar[r]^{a_{5}} & 6\ar[r]^{a_{6}} &7\ar[r]^{a_{7}}&8}$$ \end{definition} In the following we recall properties of the block decomposition of an algebra from \cite[P92-93, Theorem 1, Proposition 2]{Al}. \begin{proposition}\label{2.9} Let $A$ be an algebra and $M\in \mod A$. Then \begin{enumerate}[\rm(1)] \item $\Lambda$ has a unique decomposition into a direct sum of indecomposable subalgebras, that is $A=A_1\oplus A_2\oplus\dots\oplus Ar$. \item $M$ has a unique decomposition as $M=M_1\oplus M_2\oplus\dots \oplus M_r$ with $M_i\in \mod A_i$ and $M_iA_j=0$. \item $\Hom_{A}(M_i,M_j)=0$ for any $i\not=j$. \end{enumerate} \end{proposition} \section{\bf{Main results}} In this section, we show the number of tilting modules over Auslander algebras of radical square zero Dynkin algebras. By a straight calculation, one gets the Auslander algebras of radical square zero Dynkin algebras as follows. \begin{proposition}\label{3.1} \begin{enumerate}[\rm(1)] \item Let $\Gamma$ be the Auslander algebra of a radical square zero algebra of type $A_{m}$. Then $\Gamma$ is given by the quiver $Q_{1}$: $$ 1\stackrel{a_{1}}{\longleftarrow} 2\stackrel{a_{2}}{\longleftarrow} \cdots \stackrel{a_{2m-3}}{\longleftarrow}2m-2 \stackrel{a_{2m-2}}{\longleftarrow}2m-1$$ $\mathrm{with}$ $\mathrm{the}$ $\mathrm{relations}$:$$a_{2k-1} a_{2k}=0 (1\leq k \leq m-1 ).$$ \item Let $\Gamma$ be the Auslander algebra of a radical square zero algebra of type $D_m$. Then $\Gamma$ is given by the quiver $Q_{2}$: $$\xymatrix{& & & & & &2m-4\ar[ld]_{a_{2m-5}} & & 2m-1 \ar[ld]_{a_{2m-1}} \\ & 1& 2 \ar[l]_{a_{1}} &\cdots\ar[l]_{a_{2}} & 2m-6 \ar[l]_{a_{2m-7}} &2m-5\ar[l]_{a_{2m-6}} & & 2m-2 \ar[lu]_{a_{2m-3}}\ar[ld]_{a_{2m-2}} & \\ & & & & & & 2m-3\ar[lu]_{a_{2m-4}}& & 2m \ar[lu]_{a_{2m}} }$$ $\mathrm{with}$ $\mathrm{the}$ $\mathrm{relations}:$ $a_{1} a_{2}=0, a_{3} a_{4}=0, \cdots, a_{2m-7}a_{2m-6}=0, a_{2m-5}a_{2m-3}= a_{2m-4} a_{2m-2}, $ \\ $ a_{2m-3} a_{2m-1}=0, a_{2m-2} a_{2m}=0 $. \item Let $\Gamma_m$ be the Auslander algebra of a radical square zero algebra of type $E_m$ with $m=6,7,8$. Then $\Gamma_{m}$ is given by the quiver $Q_{6}$: $$\xymatrix{& & & & & & 6\ar[ld]_{a_{5}} & &9\ar[ld]_{a_{9}} & & \\ &1 & 2 \ar[l]_{a_{1}} & 3\ar[l]_{a_{2}} & 4 \ar[l]_{a_{3}} &5 \ar[l]_{a_{4}} & & 8\ar[lu]_{a_{7}}\ar[ld]_{a_{8}}& & & \\ & & & & & &7\ar[lu]_{a_{6}} & &10\ar[lu]_{a_{10}} & 11\ar[l]_{a_{11}} & 12\ar[l]_{a_{12}}}$$ $\mathrm{with}$ $\mathrm{the}$ $\mathrm{relations}:$ $a_{1} a_{2}=0, a_{3} a_{4}=0, a_{5} a_{7}=a_{6} a_{8}, a_{7} a_{9}=0, a_{8} a_{10}=0, a_{11} a_{12}=0 $. $Q_{7}$: $$\xymatrix{& & & & & & & & 8\ar[ld]_{a_{7}} & &11\ar[ld]_{a_{11}} & & \\ &1 & 2 \ar[l]_{a_{1}} & 3\ar[l]_{a_{2}} & 4 \ar[l]_{a_{3}} &5 \ar[l]_{a_{4}} & 6\ar[l]_{a_{5}}& 7 \ar[l]_{a_{6}} & & 10\ar[lu]_{a_{9}}\ar[ld]_{a_{10}}& & & \\ & & & & & & & &9\ar[lu]_{a_{8}} & &12\ar[lu]_{a_{12}} & 13\ar[l]_{a_{13}} & 14\ar[l]_{a_{14}}}$$ $\mathrm{with}$ $\mathrm{the}$ $\mathrm{relations}:$ $a_{1} a_{2}=0, a_{3} a_{4}=0, a_{5} a_{6}=0, a_{7} a_{9}=a_{8} a_{10}, a_{9} a_{11}=0, a_{10} a_{12}=0, a_{13}a_{14}=0 $. $Q_{8}$: $$\xymatrix{& & & & & & & 10\ar[ld]_{a_{9}} & &13\ar[ld]_{a_{13}} & & \\ &1 & 2 \ar[l]_{a_{1}} & 3\ar[l]_{a_{2}} & \cdots \ar[l]_{a_{3}} & 8 \ar[l]_{a_{7}}& 9 \ar[l]_{a_{8}} & & 12\ar[lu]_{a_{11}}\ar[ld]_{a_{12}}& & & \\ & & & & & & &11\ar[lu]_{a_{10}} & &14\ar[lu]_{a_{14}} & 15\ar[l]_{a_{15}} & 16\ar[l]_{a_{16}}}$$ $\mathrm{with}$ $\mathrm{the}$ $\mathrm{relations}:$ $a_{1} a_{2}=0, a_{3} a_{4}=0, a_{5} a_{6}=0, a_{7} a_{8}=0 , a_{9} a_{11}=a_{10} a_{12}, a_{11} a_{13}=0, a_{12} a_{14}=0, a_{15}a_{16}=0 $. \end{enumerate} \end{proposition} To prove the main results we need the following proposition in \cite {Z2}. \begin{proposition}\label{3.2} Let $\Lambda$ be a semi-simple algebra with $n$ simple modules. Then the number of support $\tau$-tilting $\Lambda$-modules is $2^{n}$. \end{proposition} The following proposition on the support $\tau$-tilting modules over direct sums of algebras is essential in this paper. \begin{proposition}\label{3.3} Let $\Lambda$ be an algebra which can be decomposed as a direct sum of two subalgebras, that is, $\Lambda=\Lambda_{1}\oplus\Lambda_{2}$. \begin{enumerate}[\rm(1)] \item For any $M\in\mod\Lambda$, $M$ can be decomposed as $M_1\oplus M_2$ with $M_i\in \mod\Lambda_i$ for $i=1,2$. \item For any $M\in\mod\Lambda$ with the decomposition $M=M_1\oplus M_2$, $M$ is a support $\tau$-tilting module if both $M_1\in \mod\Lambda_{1}$ and $M_2\in\mod\Lambda_2$ are support $\tau$-tilting modules. \item For any $M\in\mod\Lambda$ with the decomposition $M=M_1\oplus M_2$, $M$ is a $\tau$-tilting module if both $M_1\in \mod\Lambda_{1}$ and $M_2\in\mod\Lambda_2$ are $\tau$-tilting modules. \item If $|s\tau$-tilt$\Lambda_{1}|$=$m$ and $|s\tau$-tilt$\Lambda_{2}|$=$n$, then $|s\tau$-tilt$\Lambda|$=$mn$. \end{enumerate} \end{proposition} \begin{proof} (1) This is a straight result of Proposition \ref{2.9}. (2) We divide the proof into three steps. (a) We show $M_1\oplus M_2$ is $\tau$-rigid in $\mod \Lambda$. Let $P_{1}\stackrel{d_{1}}{\longrightarrow} P_{0}\stackrel{d_{0}}{\longrightarrow} M_1\longrightarrow 0$ be a minimal projective presentation of $M_1\in\mod\Lambda_1$ and let $Q_{1}\stackrel{d_{1}^{'}}{\longrightarrow} Q_{0}\stackrel{d_{0}^{'}}{\longrightarrow} M_2 \longrightarrow 0$ be a minimal projective presentation of $M_2 \in\mod\Lambda_2$. By Proposition \ref{2.9}, one gets a minimal projective presentation of $M=M_1\oplus M_2$ as follows: $P_{1}\oplus Q_{1} \stackrel{d_{1}\oplus d_{1}^{'}}{\longrightarrow} P_{0}\oplus Q_{0}\stackrel{d_{0}\oplus d_{0}^{'}}{\longrightarrow} M_1\oplus M_2 \longrightarrow 0\ \ \ \ \ \ \ (1)$ Since $M_1$ is support $\tau$-tilting module, then the map $\Hom_{\Lambda_{1}}(P_{0},M_1)\stackrel{(d_{1},M_1)}{\longrightarrow} \Hom_{\Lambda_{1}}(P_{1},M_1)$ is surjective by Lemma \ref{2.3}. Similarly, one gets the map $\Hom_{\Lambda_{2}}(Q_{0},M_2)\stackrel{(d_{1}^{'},M_2)}{\longrightarrow}\Hom_{\Lambda_{2}}(Q_{1},M_2)$ is surjective since $M_2$ is support $\tau$-tilting module. Applying $\Hom_{\Lambda}(-,M_1\oplus M_2)$ to $(1)$, by Proposition \ref{2.9} we have the following exact sequence $ 0\longrightarrow \Hom_{\Lambda}(M_1\oplus M_2, M_1\oplus M_2)\stackrel{F_{0}}\longrightarrow \Hom_{\Lambda}(P_{0}\oplus Q_{0},M_1\oplus M_2)\stackrel{F_{1}}{\longrightarrow} \Hom_{\Lambda}(P_{1}\oplus Q_{1},M_1\oplus M_2),$ \noindent where $F_{i}=(d_{i},M_1)\oplus(d_{i}^{'}, M_2)$ for $i=0,1$. Notice that both the map $(d_{1},M_1)$ and the map $(d_{1}^{'},M_2)$ are surjective, then $(d_{1},M_1)\oplus (d_{1}^{'},M_2)$ is surjective. By Lemma \ref{2.3}, then $M_1\oplus M_2$ is $\tau$-rigid. (b) Denote by $(M_1,P)$ and $(M_2,Q)$ the support $\tau$-tilting pair in mod$\Lambda_{1}$ and mod$\Lambda_{2}$. We show that $(M_1\oplus M_2,P\oplus Q)$ is $\tau$-rigid pair in mod$\Lambda$. Since $(M_1,P)$ is a support $\tau$-tilting pair in $\mod \Lambda_{1}$, then $\Hom_{\Lambda_{1}}(P,M_1)=0$ holds. Similarly, one gets $\Hom_{\Lambda_{2}}(Q,M_2)=0$. Then by Proposition \ref{2.9}, $\Hom_{\Lambda}(P\oplus Q,M_1\oplus M_2)\cong \Hom_{\Lambda_{1}}(P,M_1)\oplus \Hom_{\Lambda_{2}}(Q,M_2)\cong 0$, so $(M_1\oplus M_2,P\oplus Q)$ is $\tau$-rigid pair in mod$\Lambda$ by (a). (c) We show $(M_1\oplus M_2,P\oplus Q)$ is a support $\tau$-tilting pair. Since $(M_1,P)$ is a support $\tau$-tilting pair, one gets $|M_1|+|P|=|\Lambda_{1}|$. Similarly, the fact $(M_2,Q)$ is a support $\tau$-tilting pair$\in \mod \Lambda_{2}$ implies $|M_2|+|Q|=|\Lambda_{2}|$. So one gets $|M_1|+|M_2|+|P|+|Q|=|\Lambda_{1}|+|\Lambda_{2}|=|\Lambda|$. Then by (b) $(M_1\oplus M_2,P\oplus Q)$ is a support $\tau$-tilting pair in mod $\Lambda$. And hence $M_1\oplus M_2$ is a support $\tau$-tilting module. (3)This is a straight result of (2). (4) Let $|s\tau$-tilt$\Lambda_{1}|$=$m$, $|s\tau$-tilt$\Lambda_{2}|$=$n$, by (2) one gets a finite connected component $\mathcal{C}$ of the quiver of $Q(s\tau$-tilt$\Lambda)$. Then by Lemma \ref{2.5}, we get that $|s\tau$-tilt$\Lambda|$=$mn$. \end{proof} The following lemma is also useful. \begin{lemma}\label{3.4}Let $\Lambda$ be a algebra with $Q$: $$\xymatrix{ & 4\ar[d] & \\ & 3 &\ar[l]5}$$ Then the number of support $\tau$-tilting $\Lambda$-modules is $14$. \end{lemma} \textbf{Proof}. In what follows, we denote a module by its composition factors. Now we can draw the quiver $Q$($s\tau$-tilt$\Lambda$) as follows. $$\begin{xy} 0;<3.4pt,0pt>:<0pt,2.5pt>:: (0,0) *+{ \left[\begin{smallmatrix} 3 \end{smallmatrix}\middle| \begin{smallmatrix} 4\\ &3 \end{smallmatrix}\middle| \begin{smallmatrix} &5\\ 3 \end{smallmatrix}\right]}="1", (-30,-16) *+{\left[\begin{smallmatrix} 4&&5\\ &3 \end{smallmatrix}\middle| \begin{smallmatrix} 4\\ &3 \end{smallmatrix}\middle| \begin{smallmatrix} &5\\ 3 \end{smallmatrix}\right]}="2", (0,-16) *+{\left[\begin{smallmatrix} 3 \end{smallmatrix}\middle| \begin{smallmatrix} \end{smallmatrix}\middle| \begin{smallmatrix} &5\\ 3 \end{smallmatrix}\right]}="3", (30,-16) *+{\left[\begin{smallmatrix} 3 \end{smallmatrix}\middle| \begin{smallmatrix} 4\\ &3 \end{smallmatrix}\middle| \begin{smallmatrix} \end{smallmatrix}\right]}="4", (-40,-32) *+{\left[\begin{smallmatrix} 4&&5\\ &3 \end{smallmatrix}\middle| \begin{smallmatrix} 5 \end{smallmatrix}\middle| \begin{smallmatrix} &5\\ 3 \end{smallmatrix}\right]}="5", (-20,-32) *+{\left[\begin{smallmatrix} 4&&5\\&3 \end{smallmatrix}\middle| \begin{smallmatrix} 4\\ &3 \end{smallmatrix}\middle| \begin{smallmatrix} 4 \end{smallmatrix}\right]}="6", (-40,-48) *+{\left[\begin{smallmatrix} 4&&5\\ &3 \end{smallmatrix}\middle| \begin{smallmatrix} 5 \end{smallmatrix}\middle| \begin{smallmatrix} 4 \end{smallmatrix}\right]}="7", (-25,-48) *+{\left[\begin{smallmatrix} \end{smallmatrix}\middle| \begin{smallmatrix} 5 \end{smallmatrix}\middle|\begin{smallmatrix} 4 \end{smallmatrix} \right]}="8", (-7,-48) *+{\left[\begin{smallmatrix} \end{smallmatrix}\middle| \begin{smallmatrix} 5 \end{smallmatrix}\middle| \begin{smallmatrix} &5\\3 \end{smallmatrix}\right]}="9", (30,-48) *+{\left[\begin{smallmatrix} \end{smallmatrix}\middle| \begin{smallmatrix} 4\\&3 \end{smallmatrix}\middle| \begin{smallmatrix} 4 \end{smallmatrix}\right]}="10", (-30,-64) *+{\left[\begin{smallmatrix} \end{smallmatrix}\middle| \begin{smallmatrix} \end{smallmatrix}\middle| \begin{smallmatrix} 5\end{smallmatrix}\right]}="11", (0,-64) *+{\left[\begin{smallmatrix} 3\end{smallmatrix}\middle| \begin{smallmatrix} \end{smallmatrix}\middle| \begin{smallmatrix}\end{smallmatrix}\right]}="12", (30,-64) *+{\left[\begin{smallmatrix} \end{smallmatrix}\middle| \begin{smallmatrix} 4\end{smallmatrix}\middle| \begin{smallmatrix} \end{smallmatrix}\right]}="13", (0,-80) *+{\left[\begin{smallmatrix} \end{smallmatrix}\middle| \begin{smallmatrix} \end{smallmatrix}\middle| \begin{smallmatrix} \end{smallmatrix}\right]}="14", \ar"1";"2", \ar"1";"3", \ar"1";"4", \ar"2";"5", \ar"2";"6", \ar"5";"7", \ar"5";"9", \ar"6";"7", \ar"6";"10", \ar"3";"9", \ar"3";"12", \ar"4";"10", \ar"4";"12", \ar"7";"8", \ar"9";"11", \ar"10";"13", \ar"8";"11", \ar"8";"13", \ar"11";"14",\ar"12";"14", \ar"13";"14", \end{xy}$$ By Lemma \ref{2.5}, then we can get the number of support $\tau$-tilting $\Lambda$-modules is $14$. The following result on the number of tilting modules over the Auslander algebras of radical square zero algebras of type $A_n$ has been shown in \cite {Z2}. \begin{theorem}\label{3.5} Let $\Gamma$ be the Auslander algebra of a radical square zero of type $A_{m}$ with $m\geq 1$. Then the number of tilting $\Gamma$-modules is $2^{m-1}$. \end{theorem} For a vertex $i$ in a quiver $Q$, we denote by $P(i)$,$I(i)$ and $S(i)$ the indecomposable projective, injective and simple module according to the $i$. Now we are in a position to show the following part of our main results. \begin{theorem}\label{3.6} Let $\Lambda$ be a radical square zero algebra of type $D_m$ with $m\geq 4$ and let $\Gamma$ be the Auslander algebra of $\Lambda$. Then the number of tilting $\Gamma$-module is $2^{m-3}\times 14$. \end{theorem} \textbf{Proof}. By Proposition \ref{3.1}, we can get the quiver of the algebra $\Gamma$. We also get the indecomposable projective-injective modules are as follows:$P(2)=I(1), P(4)=I(2), P(6)=I(4),\cdots,P(2m-2)=I(2m-6), P(2m-1)=I(2m-3), P(2m)=I(2m-4)$. Take the idempotent $e=e_{2}+e_{4}+e_{6}+\cdots +e_{2m-6} +e_{2m-2}+e_{2m-1}+e_{2m}$. Then $\Gamma/(e)$ is a direct sum of a semi-simple algebra with $m-3$ vertices and an algebra in Lemma \ref{3.4}. Then by Theorem \ref{2.7} , Proposition \ref{3.2}, Proposition \ref{3.3} and Lemma \ref{3.4}, one can get the number of the tilting modules in $\mod\Gamma$ is $2^{m-3}\times 14=112$. Now we show the number of tilting modules over the Auslander algebras of radical square zero algebras of type $E_m$ for $m=6,7,8$. \begin{theorem}\label{3.7} Let $\Lambda$ be a radical square zero algebra of type $E_m$ with m=6,7,8. Let $\Gamma$ be the Auslander algebra of $\Lambda$. Then the number of tilting $\Gamma$-module is $2^{m-3}\times 14$. \end{theorem} \textbf{Proof}. If $\Lambda$ is of type $E_6$, then by Proposition \ref{3.1}, we get the quiver of $\Gamma$. Moreover, one can get the indecomposable projective-injective modules are as follows:$P(2)=I(1), P(4)=I(2), P(8)=I(4), P(9)=I(7), P(11)=I(6), P(12)=I(11)$. Take the idempotent $e=e_{2}+e_{4}+e_{8} +e_{9} +e_{11}+e_{12}$. Then the quotient algebra $\Gamma/(e)$ is a direct sum of a semi-simple algebra with $3$ vertices and an algebra in Lemma \ref{3.4}. Then by Theorem \ref{2.7} , Proposition \ref{3.2}, Proposition \ref{3.3} and Lemma \ref{3.4}, we get the number of the tilting modules $\mod\Gamma$ is $2^{3}\times14=112$. If $\Lambda$ is of type $E_7$, then by Proposition \ref{3.1}, we get the quiver of $\Gamma$. Moreover, one can get the indecomposable projective-injective modules are as follows: $P(2)=I(1), P(4)=I(2), P(6)=I(4), P(10)=I(6), P(11)=I(9), P(13)=I(8), P(14)=I(13)$. Take the idempotent $e=e_{2}+e_{4}+e_{6} +e_{10} +e_{11}+e_{13}+e_{14}$. And hence the quotient algebra $\Gamma/(e)$ is a direct sum of a semi-simple algebra with $4$ vertices and an algebra in Lemma \ref{3.4}. Then by Theorem \ref{2.7} , Proposition \ref{3.2}, Proposition \ref{3.3} and Lemma \ref{3.4}, we get the number of tilting modules in $\mod\Gamma$ is $2^{4}\times14=224$. If $\Lambda$ is of type $E_8$, then by Proposition \ref{3.1}, we get the quiver of $\Gamma$. Moreover, one gets the indecomposable projective-injective modules are as follows:$P(2)=I(1), P(4)=I(2), P(6)=I(4), P(8)=I(6), P(12)=I(8), P(13)=I(11), P(15)=I(10), P(16)=I(15)$. Take the idempotent $e=e_{2}+e_{4}+e_{6} +e_{8}+e_{12} +e_{13}+e_{15}+e_{16}$. So the quotient algebra $\Gamma/(e)$ is a direct sum of a semi-simple algebra with $5$ vertices and an algebra in Lemma \ref{3.4}. Then by Theorem \ref{2.7} , Proposition \ref{3.2}, Proposition \ref{3.3} and Lemma \ref{3.4}, we can get the number of the tilting modules in the algebra $\Gamma$ is $2^{5}\times14=448$. At the end of this paper, we give some examples to show our main results. \begin{example}\label{3.8} Let $\Lambda$ be a radical square zero algebra of type $A_3$. Then Auslander algebra $\Gamma$ of $\Lambda$ is given by the quiver $Q: 1\stackrel{u_{1}}{\longleftarrow} 2\stackrel{u_{2}}{\longleftarrow} 3\stackrel{u_{3}}{\longleftarrow} 4\stackrel{u_{4}}{\longleftarrow} 5$. with the relations: $u_{1} u_{2}=0$, $u_{3} u_{4}=0$. Then the tilting $\Gamma$-modules are follows: $$T_{1}=\Gamma , T_{2}=P(5)\oplus P(4)\oplus S(4)\oplus P(2)\oplus P(1)$$ $$T_{3}=P(5)\oplus P(4)\oplus P(3)\oplus P(2)\oplus S(2)$$ $$T_{4}=P(5)\oplus P(4)\oplus S(4)\oplus P(2)\oplus S(2)$$ $$ 2^{m-1}=2^{3-1}=4$$ \end{example} \begin{example}\label{3.9} Let $\Lambda$ be a radical square zero algebra of type $D_4$. Then Auslander algebra $\Gamma$ is given by the quiver $Q$:$$ \xymatrix{& & & &4\ar[ld]_{u_{3}} & & 7\ar[ld]_{u_{7}} \\ & 1& 2 \ar[l]_{u_{1}} & 3\ar[l]_{u_{2}} & & 6 \ar[lu]_{u_{5}}\ar[ld]_{u_{6}} & \\ & & & & 5\ar[lu]_{u_{4}}& & 8\ar[lu]_{u_{8}} }$$ with the relations: $u_{1} u_{2}=0, u_{3}u_{5}= u_{4} u_{6}, u_{5} u_{7}=0, u_{6} u_{8}=0 $. The number of support $\tau$ tilting $\Gamma/(e)$-modules are as follows: $$\mathrm{T_{1}}=1\oplus5, \mathrm{T_{2}}=0\oplus5$$ $$\mathrm{T_{3}}=1\oplus3, \mathrm{T_{4}}=0\oplus3$$ $$\mathrm{T_{5}}=1\oplus4, \mathrm{T_{6}}= 0\oplus4$$ $$\mathrm{T_{7}}=1\oplus0, \mathrm{T_{8}}=0\oplus0$$ $$\mathrm{T_{9}}=1\oplus5\oplus4,\mathrm{T_{10}}= 0\oplus5\oplus4$$ $$\mathrm{T_{11}}=1\oplus5\oplus{ \left[\begin{smallmatrix} 5\\ &3 \end{smallmatrix}\right]}, \mathrm{T_{12}}=0\oplus5\oplus{ \left[\begin{smallmatrix} 5\\ &3 \end{smallmatrix}\right]}$$ $$\mathrm{T_{13}}=1\oplus4\oplus{ \left[\begin{smallmatrix} 4\\ &3 \end{smallmatrix}\right]},\mathrm{T_{14}}= 0\oplus4\oplus{ \left[\begin{smallmatrix} 4\\ &3 \end{smallmatrix}\right]}$$ $$\mathrm{T_{15}}=1\oplus3\oplus{ \left[\begin{smallmatrix} &5\\ 3 \end{smallmatrix}\right]}, \mathrm{T_{16}}=0\oplus3\oplus{ \left[\begin{smallmatrix} &5\\ 3 \end{smallmatrix}\right]}$$ $$\mathrm{T_{17}}=1\oplus3\oplus{ \left[\begin{smallmatrix} 4\\ &3 \end{smallmatrix}\right]},\mathrm{T_{18}}= 0\oplus3\oplus{ \left[\begin{smallmatrix} 4\\ &3 \end{smallmatrix}\right]}$$ $$\mathrm{T_{19}}=1\oplus{ \left[\begin{smallmatrix} 4&&5\\ &3 \end{smallmatrix}\right]}\oplus5\oplus4, \mathrm{T_{20}}=0\oplus{ \left[\begin{smallmatrix} 4&&5\\ &3 \end{smallmatrix}\right]}\oplus5\oplus4$$ $$\mathrm{T_{21}}=1\oplus3\oplus{ \left[\begin{smallmatrix} 4\\ &3 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &5\\ 3 \end{smallmatrix}\right]}, \mathrm{T_{22}}=0\oplus3\oplus{ \left[\begin{smallmatrix} 4\\ &3 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &5\\ 3 \end{smallmatrix}\right]}$$ $$\mathrm{T_{23}}=1\oplus{ \left[\begin{smallmatrix} 4&&5\\ &3 \end{smallmatrix}\right]}\oplus5 \oplus{ \left[\begin{smallmatrix} &5\\ 3 \end{smallmatrix}\right]}, \mathrm{T_{24}}=0\oplus{ \left[\begin{smallmatrix} 4&&5\\ &3 \end{smallmatrix}\right]}\oplus5 \oplus{ \left[\begin{smallmatrix} &5\\ 3 \end{smallmatrix}\right]}$$ $$\mathrm{T_{25}}=1\oplus{ \left[\begin{smallmatrix} 4&&5\\ &3 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 4\\ &3 \end{smallmatrix}\right]}\oplus4,\mathrm{T_{26}}= 0\oplus{ \left[\begin{smallmatrix} 4&&5\\ &3 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 4\\ &3 \end{smallmatrix}\right]}\oplus4$$ $$\mathrm{T_{27}}=1\oplus{ \left[\begin{smallmatrix} 4&&5\\ &3 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 4\\ &3 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &5\\ 3 \end{smallmatrix}\right]}, \mathrm{T_{28}}=0\oplus{ \left[\begin{smallmatrix} 4&&5\\ &3 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 4\\ &3 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &5\\ 3 \end{smallmatrix}\right]}$$ $$ 2^{m-3}\times14=2^{4-3}\times14=28$$ By Theorem \ref{2.7}, then the number of tilting $\Gamma$-modules is 28 ($=2^{4-3}\times 14$). \end{example} \begin{example}\label{3.10} Let $\Lambda$ be a radical square zero algebra of type $E_{6}$. Then Auslander algebra $\Gamma$ is given by the quiver $Q$: $$\xymatrix{& & & & & & 6\ar[ld]_{u_{5}} & &9\ar[ld]_{u_{9}} & & \\ &1 & 2 \ar[l]_{u_{1}} & 3\ar[l]_{u_{2}} & 4 \ar[l]_{u_{3}} &5 \ar[l]_{u_{4}} & & 8\ar[lu]_{u_{7}}\ar[ld]_{u_{8}}& & & \\ & & & & & &7\ar[lu]_{u_{6}} & &10\ar[lu]_{u_{10}} & 11\ar[l]_{u_{11}} & 12\ar[l]_{u_{12}}}$$ with the relations: $u_{1} u_{2}=0, u_{3} u_{4}=0, u_{5} u_{7}=u_{6} u_{8}, u_{7} u_{9}=0, u_{8} u_{10}=0, u_{11} u_{12}=0 $. The number of support $\tau$ tilting $\Gamma/(e)$-modules are as follows: $$\mathrm{T_{1}}=1\oplus3\oplus7\oplus0, \mathrm{T_{2}}=0\oplus3\oplus7\oplus0$$ $$\mathrm{T_{3}}=1\oplus3\oplus5\oplus10, \mathrm{T_{4}}=0\oplus3\oplus5\oplus10$$ $$\mathrm{T_{5}}=1\oplus3\oplus6\oplus10, \mathrm{T_{6}}= 0\oplus3\oplus6\oplus10$$ $$\mathrm{T_{7}}=1\oplus3\oplus0\oplus10, \mathrm{T_{8}}=0\oplus3\oplus0\oplus10$$ $$\mathrm{T_{9}}=1\oplus3\oplus7\oplus10, \mathrm{T_{10}}=0\oplus3\oplus7\oplus10$$ $$\mathrm{T_{11}}=1\oplus3\oplus5\oplus0, \mathrm{T_{12}}=0\oplus3\oplus5\oplus0$$ $$\mathrm{T_{13}}=1\oplus3\oplus6\oplus0, \mathrm{T_{14}}= 0\oplus3\oplus6\oplus0$$ $$\mathrm{T_{15}}=1\oplus3\oplus0\oplus0, \mathrm{T_{16}}=0\oplus3\oplus0\oplus0$$ $$\mathrm{T_{17}}=1\oplus0\oplus7\oplus0, \mathrm{T_{18}}=0\oplus0\oplus7\oplus0$$ $$\mathrm{T_{19}}=1\oplus0\oplus5\oplus0, \mathrm{T_{20}}=0\oplus0\oplus5\oplus0$$ $$\mathrm{T_{21}}=1\oplus0\oplus6\oplus0, \mathrm{T_{22}}= 0\oplus0\oplus6\oplus0$$ $$\mathrm{T_{23}}=1\oplus0\oplus0\oplus0, \mathrm{T_{24}}=0\oplus0\oplus0\oplus0$$ $$\mathrm{T_{25}}=1\oplus0\oplus7\oplus10, \mathrm{T_{26}}=0\oplus0\oplus7\oplus10$$ $$\mathrm{T_{27}}=1\oplus0\oplus5\oplus10, \mathrm{T_{28}}=0\oplus0\oplus5\oplus10$$ $$\mathrm{T_{29}}=1\oplus0\oplus6\oplus10, \mathrm{T_{30}}= 0\oplus0\oplus6\oplus10$$ $$\mathrm{T_{31}}=1\oplus0\oplus0\oplus10, \mathrm{T_{32}}=0\oplus0\oplus0\oplus10$$ $$\mathrm{T_{33}}=1\oplus3\oplus7\oplus6\oplus0, \mathrm{T_{34}}= 0\oplus3\oplus7\oplus6\oplus0$$ $$\mathrm{T_{35}}=1\oplus0\oplus7\oplus6\oplus0, \mathrm{T_{36}}= 0\oplus0\oplus7\oplus6\oplus0$$ $$\mathrm{T_{37}}=1\oplus3\oplus7\oplus6\oplus10, \mathrm{T_{38}}= 0\oplus3\oplus7\oplus6\oplus10$$ $$\mathrm{T_{39}}=1\oplus0\oplus7\oplus6\oplus10, \mathrm{T_{40}}= 0\oplus0\oplus7\oplus6\oplus10$$ $$\mathrm{T_{41}}=1\oplus3\oplus5\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus0, \mathrm{T_{42}}=0\oplus3\oplus5\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus0$$ $$\mathrm{T_{43}}=1\oplus3\oplus5\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus0, \mathrm{T_{44}}= 0\oplus3\oplus5\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus0$$ $$\mathrm{T_{45}}=1\oplus3\oplus7\oplus{ \left[\begin{smallmatrix} 7\\ &5 \end{smallmatrix}\right]}\oplus0, \mathrm{T_{46}}=0\oplus3\oplus7\oplus{ \left[\begin{smallmatrix} 7\\ &5 \end{smallmatrix}\right]}\oplus0$$ $$\mathrm{T_{47}}=1\oplus3\oplus6\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus0, \mathrm{T_{48}}= 0\oplus3\oplus6\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus0$$ $$\mathrm{T_{49}}=1\oplus0\oplus5\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus0, \mathrm{T_{50}}=0\oplus0\oplus5\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus0$$ $$\mathrm{T_{51}}=1\oplus0\oplus5\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus0, \mathrm{T_{52}}= 0\oplus0\oplus5\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus0$$ $$\mathrm{T_{53}}=1\oplus0\oplus7\oplus{ \left[\begin{smallmatrix} 7\\ &5 \end{smallmatrix}\right]}\oplus0, \mathrm{T_{54}}=0\oplus0\oplus7\oplus{ \left[\begin{smallmatrix} 7\\ &5 \end{smallmatrix}\right]}\oplus0$$ $$\mathrm{T_{55}}=1\oplus0\oplus6\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus0, \mathrm{T_{56}}= 0\oplus0\oplus6\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus0$$ $$\mathrm{T_{57}}=1\oplus3\oplus5\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus10, \mathrm{T_{58}}=0\oplus3\oplus5\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus10$$ $$\mathrm{T_{59}}=1\oplus3\oplus5\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus10, \mathrm{T_{60}}= 0\oplus3\oplus5\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus10$$ $$\mathrm{T_{61}}=1\oplus3\oplus7\oplus{ \left[\begin{smallmatrix} 7\\ &5 \end{smallmatrix}\right]}\oplus10, \mathrm{T_{62}}=0\oplus3\oplus7\oplus{ \left[\begin{smallmatrix} 7\\ &5 \end{smallmatrix}\right]}\oplus10$$ $$\mathrm{T_{63}}=1\oplus3\oplus6\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus10, \mathrm{T_{64}}= 0\oplus3\oplus6\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus10$$ $$\mathrm{T_{65}}=1\oplus0\oplus5\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus10, \mathrm{T_{66}}=0\oplus0\oplus5\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus10$$ $$\mathrm{T_{67}}=1\oplus0\oplus5\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus10, \mathrm{T_{68}}= 0\oplus0\oplus5\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus10$$ $$\mathrm{T_{69}}=1\oplus0\oplus7\oplus{ \left[\begin{smallmatrix} 7\\ &5 \end{smallmatrix}\right]}\oplus10, \mathrm{T_{70}}=0\oplus0\oplus7\oplus{ \left[\begin{smallmatrix} 7\\ &5 \end{smallmatrix}\right]}\oplus10$$ $$\mathrm{T_{71}}=1\oplus0\oplus6\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus10, \mathrm{T_{72}}= 0\oplus0\oplus6\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus10$$ $$\mathrm{T_{73}}=1\oplus3\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus7\oplus6\oplus0, \mathrm{T_{74}}=0\oplus3\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus7\oplus6\oplus0$$ $$\mathrm{T_{75}}=1\oplus0\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus7\oplus6\oplus0, \mathrm{T_{76}}=0\oplus0\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus7\oplus6\oplus0$$ $$\mathrm{T_{77}}=1\oplus3\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus7\oplus6\oplus10, \mathrm{T_{78}}=0\oplus3\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus7\oplus6\oplus10$$ $$\mathrm{T_{79}}=1\oplus0\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus7\oplus6\oplus10, \mathrm{T_{80}}=0\oplus0\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus7\oplus6\oplus10$$ $$\mathrm{T_{81}}=1\oplus0\oplus5\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus0, \mathrm{T_{82}}=0\oplus0\oplus5\oplus{ \left[\begin{smallmatrix} 6\\ &5\end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus0$$ $$\mathrm{T_{83}}=1\oplus3\oplus5\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus0, \mathrm{T_{84}}=0\oplus3\oplus5\oplus{ \left[\begin{smallmatrix} 6\\ &5\end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus0$$ $$\mathrm{T_{85}}=1\oplus0\oplus5\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus10, \mathrm{T_{86}}=0\oplus0\oplus5\oplus{ \left[\begin{smallmatrix} 6\\ &5\end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus10$$ $$\mathrm{T_{87}}=1\oplus3\oplus5\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus10, \mathrm{T_{88}}=0\oplus3\oplus5\oplus{ \left[\begin{smallmatrix} 6\\ &5\end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus10$$ $$\mathrm{T_{89}}=1\oplus3\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus7 \oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus0, \mathrm{T_{90}}=0\oplus3\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5\end{smallmatrix}\right]}\oplus7 \oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus0$$ $$\mathrm{T_{91}}=1\oplus3\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus6\oplus0, \mathrm{T_{92}}= 0\oplus3\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus6\oplus0$$ $$\mathrm{T_{93}}=1\oplus0\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus7 \oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus0, \mathrm{T_{94}}=0\oplus0\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5\end{smallmatrix}\right]}\oplus7 \oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus0$$ $$\mathrm{T_{95}}=1\oplus0\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus6\oplus0, \mathrm{T_{96}}= 0\oplus0\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus6\oplus0$$ $$\mathrm{T_{97}}=1\oplus3\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus7 \oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus10, \mathrm{T_{98}}=0\oplus3\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5\end{smallmatrix}\right]}\oplus7 \oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus10$$ $$\mathrm{T_{99}}=1\oplus3\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus6\oplus10, \mathrm{T_{100}}= 0\oplus3\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus6\oplus10$$ $$\mathrm{T_{101}}=1\oplus0\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus7 \oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus10, \mathrm{T_{102}}=0\oplus0\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5\end{smallmatrix}\right]}\oplus7 \oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus10$$ $$\mathrm{T_{103}}=1\oplus0\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus6\oplus10, \mathrm{T_{104}}= 0\oplus0\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus6\oplus10$$ $$\mathrm{T_{105}}=1\oplus0\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus0, \mathrm{T_{106}}=0\oplus0\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus0$$ $$\mathrm{T_{107}}=1\oplus3\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus0, \mathrm{T_{108}}=0\oplus3\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus0$$ $$\mathrm{T_{109}}=1\oplus3\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus10, \mathrm{T_{110}}=0\oplus3\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus10$$ $$\mathrm{T_{111}}=1\oplus0\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus10, \mathrm{T_{112}}=0\oplus0\oplus{ \left[\begin{smallmatrix} 6&&7\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} 6\\ &5 \end{smallmatrix}\right]}\oplus{ \left[\begin{smallmatrix} &7\\ 5 \end{smallmatrix}\right]}\oplus10$$ $$ 2^{m-3}\times14=2^{6-3}\times14=112$$ By Theorem \ref{2.7}, then the number of tilting $\Gamma$-modules is 112 ($=2^{6-3}\times 14$). \end{example} \begin{thebibliography}{101} \bibitem[A1]{A1} T. Adachi, The classification of $\tau$-tilting modules over Nakayama algebras, J. Algebra., 452 (2016), 227-262. \bibitem[A2]{A2} T. Adachi, Characterizing $\tau$-tilting algebras with radical square zero, Proc. Amer. Math. Soc., 144 (2016), 4673-4685. \bibitem[Al]{Al} J.L. Alperin, Local representation theory, Cambridge Univ. Press, Cambridge, 1986. \bibitem[AiH]{AiH} T. Aihara and T. Honma, $\tau$-tilting finite triangular matrix algebras, J. Pure Appl. Algebra, 225(2)(2021), 106785. \bibitem[AIR]{AIR} T. Adachi, O. Iyama and I. Reiten, $\tau$-tilting theory, Compos. Math., 150(3) (2014), 415-452. \bibitem[ARS]{ARS} M. Auslander, I. Reiten, S. O. Smal$\phi$, Representation theory of Artin algebras. Corrected reprint of the 1995 original. Cambridge Studies in Advanced Mathematics, 36. Cambridge University Press, Cambridge, 1997. \bibitem[AT]{AT} T. Adachi and M. Tsukamoto, Tilting modules and dominant dimension with respect to injective modules, Q. J. Math., 72(3) (2021), 855-884. \bibitem[BB]{BB} S. Brenner and M. C. R. Butler, Generalization of the Bernstein-Gelfand-Ponomarev reflection functors, Lecture Notes in Mathematics, Vol. 839 (Springer-Verlag, 1980), pp. 103-169. \bibitem[BGP]{BGP} I. N. Bernstein, I. M. Gelfand and V. A. Ponomarev, Coxeter functors and Gabriel's theorem, Russ. Math. Surv., 28(1973), 17-32. \bibitem[BHRR]{BHRR} T. Br\"{u}stle, L. Hille, C. M. Ringel and G. R\"{o}hrle, The $\bigtriangleup$-filtered modules without self-extensions for the Auslander algebra of $k[T]/\langle T^{n}\rangle $, Algebr. Represent. Theory, 2 (1999), 295-312. \bibitem[DIJ]{DIJ} L. Demonet, O. Iyama and G. Jasso, $\tau$-tilting finite algebras, bricks and g-vectors, Int. Math. Res. Not., 3 (2019), 852-892. \bibitem[G]{G} J. Geuenich, Tilting modules for the Auslander algebra of $K[x]/(x^{n})$. Comm. Algebra, 50(1) (2022), 82-95. \bibitem[HR]{HR} D. Happel and C. M. Ringel, Tilted algebras, Trans. Amer. Math. Soc., 274 (1982), 399-443. \bibitem[IZ1]{IZ1} O. Iyama and X. Zhang, Tilting modules over Auslander-Gorenstein algebra, Pacific J. Math., 298(2) (2019), 399-416. \bibitem[IZ2]{IZ2} O. Iyama and X. Zhang, Classifying $\tau$-tilting modules over the Auslander algebra of $K[x]/(x^{n})$. J. Math. Soc. Japan, 72(3) (2020), 731-764 \bibitem[J]{J} G. Jasso, Reduction of $\tau$-Tilting Modules and Torsion Pairs, Int. Math. Not. IMRN, 16(2015), 7190-7237. \bibitem[K]{K} N. Kajita, The number of tilting modules over hereditary algebras and tilting modules over Auslander algebras, thesis, Graduate School of Mathematics, Nagoya University(2008)(in Japanese). \bibitem[KK]{KK} R. Koshio and Y. Kozakai, On support $\tau$-tilting modules over blocks covering cyclic blocks, J. Algebra, 580(2021), 84-103. \bibitem[M]{M} Y. Mizuno, Classifying $\tau$-tilting modules over preprojective algebras of Dynkin type, Math. Zeit., 277(3)(2014), 665-690. \bibitem[PS]{PS} M. Pressland and J. Sauter, Special tilting modules for algebras with positive dominant dimension. Glasg. Math. J., 64(1)(2022), 79-105. \bibitem[W]{W} Q. Wang, On $\tau$-tilting finiteness of the Schur algebra, J. Pure Appl. Algebra, 226(1) (2022), 106818. \bibitem[XGH]{XGH} Z. Xie, H. Gao and Z. Huang, Tilting modules over Auslander algebras of Nakayama algebras with radical cube zero, Internat. J. Algebra Comput., 31(2) (2021) 303-324. \bibitem[Z1]{Z1} X. Zhang, $\tau$-rigid modules for algebras with radical square zero, Algebra Colloq., 28(1) (2021), 91-104. \bibitem[Z2]{Z2} X. Zhang, Classifying tilting modules over the Auslander algebras of radical square zero Nakayama algebras, J. Algebra Appl., 21(2) (2022), Paper No. 2250041. \bibitem[Z3]{Z3} X. Zhang, Self-orthogonal $\tau$-tilting modules and tilting modules, J. Pure Appl. Algebra, 226(3)(2022), 10860. \bibitem[Zi]{Zi} S. Zito, $\tau$-tilting finite cluster-tilted algebras, Proc. Edinb. Math. Soc., 63(4)(2020), 950-955. \end{thebibliography} \end{document}
2205.12367v1
http://arxiv.org/abs/2205.12367v1
Contour Integration for Eigenvector Nonlinearities
\documentclass[12pt]{extarticle} \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage{caption} \tolerance 10000 \headheight 0in \headsep 0in \evensidemargin 0in \oddsidemargin \evensidemargin \textwidth 6.5in \topmargin .25in \textheight 8.8in \synctex=1 \usepackage{amssymb} \usepackage{amsmath} \usepackage{comment} \usepackage{amsthm} \usepackage{amssymb} \usepackage{algorithmicx} \usepackage{algorithm} \usepackage{algpseudocode} \usepackage{mathtools} \usepackage[margin=1cm]{caption} \usepackage{subcaption} \usepackage{mathrsfs} \usepackage{xcolor} \usepackage{tikz,tikz-3dplot,tikz-cd} \usepackage{pgfplots} \pgfplotsset{compat=1.16} \usepackage{url} \usepackage[finalizecache=false,frozencache=true]{minted} \usemintedstyle{tango} \setminted[julia]{frame=lines, rulecolor=\color{white!80!black}, fontsize=\small, numbers=right, numbersep=-5pt, obeytabs=true, tabsize=4} \definecolor{mycolor1}{rgb}{0.00000,0.44700,0.74100} \definecolor{mycolor2}{rgb}{0.8500, 0.3250, 0.0980} \definecolor{mycolor3}{rgb}{0.9290, 0.6940, 0.1250} \definecolor{mycolor4}{rgb}{0.4940, 0.1840, 0.5560} \definecolor{mycolor5}{rgb}{0.4660, 0.6740, 0.1880} \newtheorem{assumption}{Assumption} \newtheorem{definition}{Definition}[section] \newtheorem{proposition}{Proposition}[section] \newtheorem{corollary}{Corollary}[section] \newtheorem{theorem}{Theorem}[section] \newtheorem{exercise}{Exercise}[section] \newtheorem{lemma}{Lemma}[section] \theoremstyle{remark} \newtheorem{remark}{Remark}[section] \newtheorem{examplex}{Example}[section] \newenvironment{example} {\pushQED{\qed}\renewcommand{\qedsymbol}{$\diamond$}\examplex} {\popQED\endexamplex} \usepackage{mathtools} \mathtoolsset{showonlyrefs,showmanualtags} \numberwithin{equation}{section} \newcommand{\R}{\mathbb{R}} \newcommand{\Z}{\mathbb{Z}} \newcommand{\C}{\mathbb{C}} \newcommand{\N}{\mathbb{N}} \newcommand{\Q}{\mathbb{Q}} \newcommand{\PP}{\mathbb{P}} \newcommand{\A}{\mathcal{A}} \newcommand{\B}{\mathcal{B}} \newcommand{\CC}{\mathcal{C}} \newcommand{\im}{\operatorname{im}} \newcommand{\Specm}{\operatorname{Specm}} \newcommand{\Hom}{\operatorname{Hom}} \newcommand{\blue}[1]{\textcolor{blue}{#1}} \newcommand{\red}[1]{\textcolor{red}{#1}} \newcommand{\todo}{\textbf{TODO}} \newcommand{\HF}{\text{HF}} \title{Contour Integration for Eigenvector Nonlinearities} \author{Rob Claes, Karl Meerbergen and Simon Telen} \date{} \begin{document} \maketitle \begin{abstract} Solving polynomial eigenvalue problems with eigenvector nonlinearities (PEPv) is an interesting computational challenge, outside the reach of the well-developed methods for nonlinear eigenvalue problems. We present a natural generalization of these methods which leads to a contour integration approach for computing all eigenvalues of a PEPv in a compact region of the complex plane. Our methods can be used to solve any suitably generic system of polynomial or rational function equations. \end{abstract} \section{Introduction} We consider a matrix valued function $T : \C^n \times \C \rightarrow \C^{n \times n}, (x,z) \mapsto T(x,z)$ such that, for any fixed $z \in \C$, $T$ is given by homogeneous polynomials in $x$, and for any fixed $x$, $T$ is given by polynomials in $z$. We assume moreover that all polynomials in the $i$-th row of $T$ are of the same degree $d_i$. If any of these degrees is positive, the function $T$ defines a polynomial eigenvalue problem with eigenvector nonlinearities (PEPv), given by the equations \begin{equation} \label{eq:PEPv} T(x,z) \cdot x = 0. \end{equation} By homogeneity, these equations are well-defined on $\PP^{n-1} \times \C$, where $\PP^{n-1}$ is the $(n-1)$-dimensional complex projective space. Points $(x^*, z^*) \in \PP^{n-1} \times \C$ such that $T(x^*,z^*)\cdot x^* =0$ are called eigenpairs. For such an eigenpair, $z^*$ is the eigenvalue, with corresponding eigenvector $x^*$. This paper is concerned with computing all eigenpairs $(x^*, z^*)$ for which $z^*$ lies in a compact domain $\Omega \subset \C$, whose Euclidean boundary is denoted by $\partial \Omega$. \begin{example}[$n = 3, d_1 = d_2 = d_3 = 1$] \label{ex:intro} Consider the PEPv given by \[ T(x,z) \cdot x = \begin{pmatrix} x_1 + z x_2 & z x_2+x_3 & x_1-x_3 \\ x_1+(1+z)x_2 & (1-z^2)x_2-zx_3 & x_1 +x_3\\ (1+z)x_1 +x_2 & x_2-x_3 & zx_1+(1-z)x_3 \end{pmatrix} \cdot \begin{pmatrix} x_1 \\ x_2 \\ x_3 \end{pmatrix} = \begin{pmatrix} 0 \\ 0 \\ 0 \end{pmatrix}.\] For fixed $z \in \C$, the rows define three conics in the projective plane $\PP^2$. Usually, these three conics have no common intersection points. The eigenvalues $z = z^*$ are precisely those choices of $z$ for which the three conics intersect. The 12 eigenvalues are the roots of \[{\cal R}(z) = 4z^{12}+12z^{11}-z^{10}-53z^9-100z^8-108z^7-78z^6-23z^5+14z^4+22z^3+8z^2-4z+3,\] depicted in Figure~\ref{fig:intro_sols}. For instance, $z^* \approx 0.5919$ is an eigenvalue, with eigenvector $x^* \approx (1 : -1.9218 : -1.9646) \in \PP^2$. A possible choice for the target domain $\Omega$ to select this eigenvalue is shown in Figure~\ref{fig:intro_sols} by its boundary $\partial\Omega$. The three conics corresponding to $z^* = 0.5919$ are shown in Figure \ref{fig:intro}. \end{example} \begin{figure}[tb] \begin{subfigure}[b]{.48\textwidth} \centering \input{example1-1-domain} \caption{Eigenvalues (\ref{marker:intro_eigenvalues}) and contour $\partial\Omega$ (\ref{marker:intro_contour}).} \label{fig:intro_sols} \end{subfigure} \begin{subfigure}[b]{.48\textwidth} \centering \input{conics_ex1_1} \caption{Three conics corresponding to $z^*\approx0.5919$.} \label{fig:intro} \end{subfigure} \caption{Example \ref{ex:intro}.} \end{figure} Any system of polynomial equations $f_1(x,z) = \cdots = f_n(x,z) = 0$ on $\PP^{n-1} \times \C$ can be formulated as a PEPv. Rewriting this as in \eqref{eq:PEPv} and calling solutions `eigenpairs' seemingly does not change much. Our motivation is that the algorithm we propose for finding eigenpairs with $z^* \in \Omega$ is a natural generalization of standard algorithms used for eigenvalue problems with more structure. More precisely, PEPv's generalize polynomial eigenvalue problems (PEP), for which $d_i = 0$. These in turn contain generalized eigenvalue problems (GEP), for which $d_i = 0$ and $T(z) = A - z \cdot B$ is an affine-linear function. Polynomial eigenvalue problems often arise from an intermediate step in solving general nonlinear eigenvalue problems (NEP), in which the entries of $T(z)$ are allowed to be transcendental functions of $z$. One typically approximates these functions by polynomials in a certain region of the complex plane, obtaining a PEP. One way of solving PEPs is linearization \cite{effenberger2012chebyshev,van2015linearization}. The linearization step results in a GEP of larger dimension. This dimension grows with the degree of the approximating polynomials, and is typically very large. In order to solve it, special structure exploiting methods are used \cite{jarlebring2012linear,van2015compact}. Another common approach for solving NEPs is based on contour integration. The goal of methods like Beyn \cite{beyn2012integral}, SS \cite{asakura2009numerical} or NLFEAST \cite{gavin2018feast} is to locate all eigenvalues on a compact domain $\Omega$ in the complex plane. This is done by calculating a contour integral over the boundary $\partial \Omega$ with an integrand that contains the matrix inverse of the eigenvalue problem. Using the residue theorem, the poles of the integrand -- which coincide with the desired eigenvalues in the compact domain -- can be extracted. In the present paper, we develop a new contour-integration-based method for finding all eigenpairs of a PEPv with $z^* \in \Omega$. It generalizes known approaches for PEPs, in the sense that when $d_i = 0$, Beyn's algorithm is recovered. We reiterate that, under suitable genericity assumptions, this can be used to find all solutions to a polynomial system $f_1(x,z) = \cdots = f_n(x,z) = 0$ with $z$-coordinate inside $\Omega$. The situation of interest is where the number of solutions with this property is much smaller than the total number of solutions, i.e., the total number of eigenvalues of $T(x,z)$. Our strategy is to integrate trace functions along the boundary $\partial \Omega$, and extract the eigenvalues from moments. These traces are evaluated using numerical homotopy continuation \cite{sommese2005numerical}. Such methods can also be used to naively compute all eigenpairs of $T(x,z)$ and then filter out relevant solutions by checking whether $z \in \Omega$. However, an important feature of our method is that evaluating the trace usually requires significantly less homotopy paths than the total number of eigenvalues of $T(x,z)$, which makes it more efficient than the naive approach. It is important to note that the traces are not available in an explicit form as is usually expected for PEPs solved by Krylov methods. Therefore, we only consider contour integration methods in this paper: these only require evaluation of the trace, not its explicit expression. This paper is structured as follows. An overview of the standard Beyn's algorithm is presented in Section~\ref{sec:2}. The basis of our approach is laid in Section~\ref{sec:3} by introducing the concepts of resultants and traces. Section~\ref{sec:4} describes the resulting contour integration method and comments on the numerical implementation. We discuss the complexity of our method in Section~\ref{sec:5} and present an analysis for two families of systems of equations. Our numerical experiments in Section~\ref{sec:6} confirm the presented theory. \section{Beyn's algorithm} \label{sec:2} The method of Beyn \cite{beyn2012integral} considers the nonlinear eigenvalue problem defined by the holomorphic matrix valued function $A:\C\rightarrow\C^{n\times n}$ as \[ A(z) \cdot x = 0.\] The goal is to find eigenpairs $(x^*,z^*)\in \PP^{n-1}\times\C$ for which the eigenvalue $z^*$ lies in the compact domain $\Omega$ of the complex plane. The function $A$ is typically assumed to be holomorphic in a neighborhood of $\Omega$. Beyn's method is especially useful for targeting a specific subset of the, possibly infinite, complete set of eigenvalues. In this section, we recapitulate the idea and theory behind contour integration for eigenvalue problems. For reasons of clarity, we focus the derivations on simple eigenvalues only. An eigenvalue is called simple if the algebraic multiplicity and the geometric multiplicity are equal to one, where the multiplicity of an eigenvalue is defined by the following definitions. \begin{definition} The algebraic multiplicity of an eigenvalue $z^*$ is the smallest positive integer $m_a$ such that \begin{equation} \left.\frac{d^{m_a}}{dz^{m_a}}\det ( A(z) ) \right\vert_{z=z^*}\neq 0. \end{equation} \end{definition} \begin{definition} The geometric multiplicity of an eigenvalue $z^*$ is the dimension of the null space of $A(z^*)$. \end{definition} Let $z^*$ be a simple eigenvalue of $A$ with corresponding right and left eigenvectors $x^*$ and $y^*$ such that $A(z^*)\cdot x^*=0$ and $A(z^*)^H\cdot y^*=0$. There exists a region $\mathcal{N}\subset\C$ around $z^*$ and a holomorphic function $R: \C \rightarrow \C^{n\times n}$ such that \[ A(z)^{-1} = \frac{1}{z-z^*}x^*y^{*H} + R(z) , \quad z\in \mathcal{N}\setminus\{z^*\}. \] This property can be easily generalized to the case where multiple simple eigenvalues are considered in a compact subset of $\C$ \cite[Thm.~2.4]{beyn2012integral}. \begin{theorem}\label{thm:keldysh_simple} Let $\Omega \subset\C$ be a compact subset that contains only the simple eigenvalues $z^*_i, i=1,\ldots,l$ with corresponding right and left eigenvectors $x^*_i$ and $y^*_i$. Then there exists a neighborhood $\mathcal{N}$ of $\Omega$ and a holomorphic function $R: \C \rightarrow\C^{n \times n}$ such that \[ A(z)^{-1} = \sum_{i=1}^l \frac{1}{z-z^*_i} \, x^*_iy^{*H}_i + R(z), \quad z\in\mathcal{N}\setminus\{z^*_1,\ldots,z^*_l\}.\] \end{theorem} Theorem~\ref{thm:keldysh_simple} provides us with a way of expressing the value of a contour integral over the boundary of the compact subset $\Omega\subset\mathcal{N}$. \begin{theorem} \label{thm:res_simple} In the situation of Theorem \ref{thm:keldysh_simple}, we have that \[ \frac{1}{2\pi\sqrt{-1}}\oint_{\partial\Omega} f(z)A(z)^{-1}dz = \sum_{i=1}^l f(z_i^*) \,x_i y_i^H.\] \end{theorem} Under the assumption that only a few eigenvalues lie within $\Omega$, i.e., $l<n$, and all eigenvectors are linearly independent, we can extract the eigenvalues and corresponding eigenvectors from the following two contour integrals \[ A_0 = \frac{1}{2\pi\sqrt{-1}}\oint_{\partial\Omega} A(z)^{-1}\hat{V}dz , \qquad A_1 = \frac{1}{2\pi\sqrt{-1}}\oint_{\partial\Omega} zA(z)^{-1}\hat{V}dz,\] with $\hat{V}\in\C^{n\times q}$, $q\geq l$ a random matrix of full rank $q$. Using Theorem~\ref{thm:res_simple}, we see that \[A_0 = \sum_{i=1}^l x_i^*y_i^{*H}\hat{V} = XY^H\hat{V}, \quad A_1 = \sum_{i=1}^lz_i^*x_i^*y_i^{*H}\hat{V} = XZY^H\hat{V},\] where $X$ and $Y$ have the right and left eigenvectors for their columns and $Z$ is a diagonal matrix containing the corresponding eigenvalues. The matrix $A_0$ has rank at most $l$ for random choices of $\hat{V}$, so that a reduced singular value decomposition can be expressed as \[ A_0 = V_0\Sigma_0W_0^H\] with rectangular $V_0\in\C^{n\times l}$ and $W_0\in\C^{q\times l}$ and diagonal matrix $\Sigma_0 = \text{diag}(\sigma_1,\ldots,\sigma_l)$. In \cite{beyn2012integral} it is shown, via some linear algebra manipulations, that \[ V_0^HA_1W_0\Sigma_0^{-1} = SZS^{-1}.\] This decomposition reveals the diagonal matrix $Z$ containing the eigenvalues, while the corresponding eigenvectors can be extracted from $V=V_0S$. Since nonlinear eigenvalue problems can have more eigenvalues than the size of the matrix, it is necessary to extend this approach to the case where $l>n$. Luckily, Beyn's algorithm generalizes easily to this case. First the matrix $\hat{V}\in\C^{n\times n}$ is now a square matrix of full rank which is used to calculate so-called higher order moments of the contour integrals: \[ A_k = \frac{1}{2\pi\sqrt{-1}}\oint_{\partial\Omega} z^kA(z)^{-1}\hat{V}dz.\] It should be clear that $A_k$ can be decomposed as $A_k = XZ^kY^H\hat{V}$. From these higher order moments, we can calculate two block Hankel matrices \begin{equation} \label{eq:Bi} B_0=\begin{pmatrix} A_0 &\cdots& A_{M-1} \\ \vdots & & \vdots \\ A_{M-1} & \cdots &A_{2M-2} \end{pmatrix}, \text{ and } B_1=\begin{pmatrix} A_1 & \cdots & A_{M} \\ \vdots & & \vdots \\ A_M & \cdots & A_{2M-1} \end{pmatrix}. \end{equation} In a similar way as with few eigenvalues, it can be shown that the rank of $B_0$ is equal to the number of eigenvalues in $\Omega$ such that the diagonazible matrix \[ V_0^HB_1W_0\Sigma_0^{-1} = SZS^{-1}\] is defined by the reduced singular value decomposition $B_0 = V_0\Sigma_0W_0^H$. The eigenvalues are again the elements of the diagonal matrix $Z$ while the corresponding eigenvectors can be extracted from the first $n$ rows of $V_0S$. Some additional technicalities need to be considered in the case of semi-simple and defective eigenvalues \cite{beyn2012integral}, but this falls outside the scope of this discussion. We conclude the section with a discussion on how the moment matrices $A_k$ are computed in practice. We assume that $\partial\Omega$ is parameterized by a continuous function $\varphi: [0,2\pi) \rightarrow \C$. The moment matrix $A_k$ is then expressed as \[A_k = \frac{1}{2\pi\sqrt{-1}} \int_0^{2\pi} \varphi^k(t)A(\varphi(t))^{-1}\hat{V}\varphi^\prime(t) dt.\] This integral can be approximated numerically by the trapezoidal rule with $N$ equidistant points $t_\ell=\frac{2 \ell \pi}{N}, \ell =0,\ldots,N-1$ as \[ A_k \approx A_{k,N} = \frac{1}{N\sqrt{-1}}\sum_{\ell=0}^{N-1} \varphi^k(t_\ell)A(\varphi(t_\ell))^{-1}\hat{V}\varphi^\prime(t_\ell).\] The choice of the trapezoidal rule integration scheme with equidistant points might feel somewhat arbitrary, but it often leads to satisfactory results with a limited amount of points \cite{beyn2012integral}. The impact of the integration scheme on the accuracy of the results is discussed in \cite{van2016nonlinear}. The largest part of the computational cost of Beyn's method originates from the calculation of the moment matrices. Note that most of the computation work can be reused between every moment matrix since the factor $A(\varphi(t_\ell))^{-1}\hat{V}$ is independent of the moment index $k$. Each linear system $A(\varphi(t_\ell))^{-1}\hat{V}$ can be solved independently for every value of $t_\ell$ which leads to an efficient parallel implementation. In what follows, our aim is to generalize Beyn's method to the case with eigenvector nonlinearities. \section{Resultants and traces} \label{sec:3} In this section, we turn back to the PEPv from the Introduction. We discuss resultants and traces related to our equations $T(x,z) \cdot x = 0$. These algebraic objects fit into our strategy for solving a PEPv as follows. \begin{enumerate} \item There is a polynomial ${\cal R}(z)$, obtained by evaluating a \emph{resultant}, whose roots are the eigenvalues of $T(x,z)$. \item \emph{Traces} are rational functions in $z$ whose denominator is (roughly) ${\cal R}(z)$. \item Traces can be evaluated using tools from numerical nonlinear algebra. This allows to perform numerical \emph{contour integration} along $\partial \Omega$ to compute eigenvalues. \end{enumerate} This section addresses points 1 and 2. Point 3 is the subject of the next section. We work in the ring $K[x] = K[x_1, \ldots, x_n]$ of polynomials in the variables $x_i$ with coefficients in the rational function field $K = \C(z)$. The polynomials $f_1, \ldots, f_n \in K[x]$ are the entries of the vector $T(x,z) \cdot x$. We assume that $f_i$ is homogeneous of degree $d_i + 1$ and write $f_i \in K[x_i]_{d_i+1}$. \subsection{Resultants} For fixed values $z = z^*$, the system of polynomial equations $f_1 = \cdots = f_n = 0$ encoded by the PEPv $T(x,z^*) \cdot x = 0$ consists of $n$ homogeneous equations on $\PP^{n-1}$. Generically, one expects such equations to have no solution with nonzero coordinates. The eigenvalues~are those special values of $z^*$ for which they \emph{do} have solutions, see Example \ref{ex:intro}. This is captured by a polynomial ${\cal R}(z)$ obtained via \emph{resultants}. We summarize the basics, and refer the reader to \cite[Chapters 3 and 7]{cox2006using} for more details. Let $\A_i \subset \N^n, i = 1, \ldots, n$ denote the \emph{supports} of the polynomials $f_i \in K[x]$: if $f_i = \sum_{\alpha \in \N^n} c_{i,\alpha}(z) \, x^\alpha$, where $x^\alpha$ is short for $x_1^{\alpha_1} \cdots x^{\alpha_n}$, then \[ \A_i = \{ \alpha \in \N^n ~|~ c_{i,\alpha} \neq 0 \}. \] We write $K[x]_{d_i+1} \supset K[x]_{\A_i} \simeq K^{|\A_i|}$ for the affine space over $K$ of polynomials with support contained in $\A_i$. A natural set of coordinates for $K[x]_{\A_i}$ is given by the coefficients $\{b_{i,\alpha} ~|~ \alpha \in \A_i\}$ of a generic polynomial with support $\A_i$: $h_i = \sum_{\alpha \in \A_i} b_{i,\alpha} x^\alpha \in K[x]_{\A_i}$. Let $Z_0 \subset K[x]_{\A_1} \times \cdots \times K[x]_{\A_n}$ be the set of tuples $(h_1, \ldots, h_n)$ for which $h_1 = \cdots = h_n = 0$ has a solution in $(K \setminus \{ 0 \} )^n$. Its Zariski closure is $Z = \overline{Z_0} \subset K[x]_{\A_1} \times \cdots \times K[x]_{\A_n}$. Under mild assumptions on the ${\cal A}_i$, $Z$ has codimension one, so that it is defined by one polynomial equation in the coefficients of $h_1, \ldots, h_n$ \cite[Cor.~1.1]{sturmfels1994newton}. It turns out that, in this case, $Z$ is an irreducible variety defined over $\Q$ \cite[Lem.~1.1]{sturmfels1994newton}. The \emph{sparse resultant} $R_{\A_1, \ldots, \A_n}$ is the unique (up to sign) irreducible polynomial in $\Z[\, b_{i,\alpha}~|~ i =1, \ldots, n, \alpha \in \A_i \,]$ such that \[ (h_0, \ldots, h_n) \in Z \quad \Longleftrightarrow \quad R_{\A_1, \ldots, \A_n}(h_0, \ldots, h_n) = 0. \] Evaluating the sparse resultant $R_{\A_1, \ldots, \A_n}$ at our tuple $(f_1, \ldots, f_n)$ means plugging in the coefficients $c_{i,\alpha}(z) \in K$ for the $b_{i,\alpha}$. Since we assume the coefficients of the $f_i$ to be polynomials in $z$, we obtain a polynomial \begin{equation} \label{eq:calR} {\cal R}(z) = R_{\A_1, \ldots, \A_n} (f_1, \ldots, f_n) \quad \in \, \C[z]. \end{equation} \begin{example} Let $\A = \A_1 = \A_2 = \A_3 \subset \Z^3$ consist of all monomials of degree 2 in 3 variables. Consider 3 general ternary quadrics \[ h_i \, = \, b_{i,1} \, x_1^2 + b_{i,2} \, x_2^2 + b_{i,3} \, x_3^2 + b_{i,4} \, x_1x_2 + b_{i,5} \, x_1x_3 + b_{i,6} \, x_2x_3, \quad i = 1, 2, 3.\] The resultant $R_{\A,\A,\A}$ is a polynomial of degree 12 in the 18 variables $b_{i,j}, i = 1, \ldots, 3, j = 1, \ldots, 6$, which characterizes when the three conics $\{h_i = 0 \} \subset \PP^2$ intersect. It has 21894 terms and can be computed as a $ 6 \times 6$ determinant, see \cite[Chapter 3, \S2]{cox2006using}. Plugging in the coefficients, i.e.~$b_{1,1} = 1, b_{1,2} = z, b_{1,3} = -1, b_{1,4} = z, \ldots$, we obtain the polynomial ${\cal R}(z) = R_{\A,\A,\A}(f_1,f_2,f_3)$ shown in Example \ref{ex:intro}. \end{example} \begin{example} In the case of a polynomial eigenvalue problem (PEP) given by $T(z) \cdot x = 0$, we have ${\cal R}(z) = \det T(z)$. \end{example} \begin{definition} The PEPv given by $T(x,z) \cdot x = 0$ is called \emph{regular} if ${\cal R}(z) \neq 0$. \end{definition} Unlike in the case of PEPs, regularity of a PEPv does not mean that there are finitely many eigenvalues. Here is an example. \begin{example} \label{ex:regular} We consider the PEPv $T(x,z) \cdot x = 0$ where \[ T(x,z) = \begin{pmatrix} x_1 & (1+z)x_1 & x_2 \\ 2x_1 & 3x_1 & (3+z)x_2 \\ 2z x_1 & x_1 & x_2 \end{pmatrix} \quad \text{and} \quad \begin{matrix} f_1 = x_1^2+(1+z)x_1x_2 + x_2x_3, \\ f_2 = 2x_1^2 + 3x_1x_2 + (3+z)x_2x_3,\\ f_3 = 2zx_1^2 + x_1x_2 + x_2x_3. \end{matrix}\] We calculate ${\cal R}(z) = 2z^3+8z^2-3z \neq 0$. However, for any $z^* \in \C$, $T(x^*,z^*) \cdot x^* = 0$, with $x^* = (0,0,1)^\top$ or $x^* = (0,1,0)^\top$. \end{example} To avoid such artefacts, we will limit ourselves to computing eigenpairs $(z^*, x^*)$ for which $x^*$ has no zero coordinates. That is, we look for eigenvectors in the \emph{algebraic torus} $\{x \in \PP^{n-1} ~|~ x_i \neq 0, i = 1, \ldots, n \}$. For such an eigenpair, we say that $z^*$ is an \emph{eigenvalue with toric eigenvector}. By construction, if $z^* \in \C$ is an eigenvalue of $T(x,z)$ with toric eigenvector, then ${\cal R}(z^*) = 0$. This implies the following statement. \begin{theorem} A regular PEPv has finitely many eigenvalues with toric eigenvector. \end{theorem} It is \emph{not} true in general that each $z^*$ such that ${\cal R}(z^*) =0$, is an eigenvalue with toric eigenvector. We continue Example \ref{ex:regular}. \begin{example} There are no toric solutions to $T(x,z^*) \cdot x = 0$, with $z^* = 0$ and $T$ as in Example \ref{ex:regular}. This eigenvalue is picked up by our polynomial ${\cal R}(z)$ because it corresponds to a solution of $T(x,z^*) \cdot x = 0$ in a \emph{toric compactification} of $(\C \setminus \{0\})^n$. Note that for this eigenvalue, there is an `extra' non-toric eigenvector $(0,1,-1)^\top$. \end{example} \begin{definition} An eigenvalue of the PEPv $T(x,z) \cdot x = 0$ with toric eigenvector is called \emph{simple} if it is a simple zero of ${\cal R}(z)$. \end{definition} \begin{example} In Example \ref{ex:intro}, $z^* \approx 0.5919$ is a simple eigenvalue with toric eigenvector. \end{example} \subsection{Traces} The roots of the polynomial ${\cal R}(z)$ are eigenvalues of the PEPv given by $T(x,z)\cdot x$. It is usually hard to compute ${\cal R}(z)$. In this section we discuss rational functions in $z$, called \emph{traces}, whose denominator is ${\cal R}(z)$. The upshot is that these traces can be evaluated using tools from numerical nonlinear algebra, so that residue techniques can be used to approximate its poles. We fix $n$ random homogeneous polynomials $a_1, \ldots, a_n \in \C[x]$ such that $\deg(a_i) = d_i = \deg(f_i)-1$. We write $a_i \in \C[x]_{d_i}$ and collect them in a vector $a = (a_1, \ldots, a_n)^\top \in \C[x]^n$. Consider the ideal $I_a$ generated by the entries of $T(x,z) \cdot x - a$: \begin{equation} \label{eq:Ia} I_a = \langle f_1 - a_1, \ldots, f_n - a_n \rangle \subset K[x, x^{-1}]. \end{equation} Here $K[x, x^{-1}] = K[x_1^{\pm 1}, \ldots, x_n^{\pm 1}]$ is the Laurent polynomial ring in $n$ variables with coefficients in $K$. Note that the ideal $I_a$ is \emph{not} homogeneous. We will assume throughout that the equations $f_i - a_i = 0$ have finitely many solutions in $(\overline{K} \setminus \{0\})^n$, where $\overline{K}$ is the algebraic closure of $K$. This is the field of Puiseux series $\overline{K} = \C \{ \! \{ z \} \! \} $. By \cite[Ch.~5, \S 3, Thm.~6]{cox2013ideals}, our assumption can equivalently be phrased as follows. \begin{assumption} \label{assum:finitedim} The dimension $\delta = \dim_K K[x, x^{-1}]/I_a$ is finite. \end{assumption} The set of solutions to $f_1 - a_1 = \cdots = f_n - a_n = 0$ is denoted by \[ V(I_a) = \{ \xi \in (\overline{K} \setminus \{0\})^n ~|~ f_i(\xi) - a_i = 0, i = 1, \ldots, n \}. \] A point $\xi \in V(I_a)$ has multiplicity $\mu(\xi)$. By Assumption \ref{assum:finitedim}, $\sum_{\xi \in V(I_a)} \mu(\xi) = \delta$. \begin{definition} \label{def:trace} For a polynomial $p \in K[x,x^{-1}]$, the \emph{trace} ${\rm Tr}_p(I_a)$ is $ \sum_{\xi \in V(I_a)} \mu(\xi) \, p(\xi)$. \end{definition} \begin{proposition} \label{prop:traceisrat} For any Laurent polynomial $p \in K[x,x^{-1}]$, the trace ${\rm Tr}_p(I_a)$ is a rational function in $z$. That is, ${\rm Tr}_p(I_a) \in K$. \end{proposition} \begin{proof} This is a standard result from Galois theory, see for instance \cite[Ch.~6, Thm.~1.2]{lang02}. Another way to see this explicitly is by considering the $K$-linear map \[ M_p : K[x,x^{-1}]/I_a \longrightarrow K[x,x^{-1}]/I_a \quad \text{given by} \quad [f] \longmapsto [pf],\] where $[f]$ denotes the residue class of $f \in K[x,x^{-1}]$ in $K[x,x^{-1}]/I_a$. This is called a \emph{multiplication map}. A matrix representation of such a map can be computed using linear algebra over $K$. A standard algorithm uses Gr\"obner bases \cite[Ch.~2, \S 4]{cox2006using}. Since $M_p$ can be represented by a $\delta \times \delta$ matrix with entries in $K$, its trace ${\rm tr}(M_p)$ lies manifestly in $K$. Moreover, since the trace is the sum of the eigenvalues, \cite[Ch.~4, \S 2, Prop.~2.7]{cox2006using} gives ${\rm tr}(M_p) = {\rm Tr}_p(I_a)$. \end{proof} \begin{example} \label{ex:trace} Let $T$ be as in Example \ref{ex:intro}. The number $\delta$ is the number of Puiseux series solutions $x(z) = (x_1(z), x_2(z), x_3(z))$ to $f_1-a_1 = f_2 -a_2 = f_3-a_3 = 0$, with \begin{align*} f_1 - a_1 &\,=\, x_1^2 +zx_2x_2 + zx_2^2+x_2x_3 + x_1x_3-x_3^2 - (b_{11} x_1 + b_{12} x_2 + b_{13} x_3), \\ f_2 - a_2 &\,=\, x_1^2+(1+z)x_1x_2 + (1-z^2)x_2^2-zx_2x_3 + x_1x_3+x_3^2 - (b_{21} x_1 + b_{22} x_2 + b_{23} x_3), \\ f_3 - a_3 &\, = \, (1+z)x_1^2+x_1x_2 + x_2^2-x_2x_3 + zx_1x_3 + (1-z)x_3^2 - (b_{31} x_1 + b_{32} x_2 + b_{33} x_3). \end{align*} Here $a_i = b_{i1}x_1 + b_{i2} x_2 + b_{i3}x_3$ are generic linear forms. Using \texttt{Maple}, we find $\delta = 8$ and \small \[ {\rm Tr}_{x_2}(I_a) = \frac{(16b_{11} + 8b_{13})\, z^{11}+(-4 b_{33}+ \cdots -16 b_{31}) \, z^{10} + \cdots + (-8b_{33}+\cdots+2b_{32})}{{\cal R}(z)}, \] \normalsize where ${\cal R}(z)$ is the polynomial from Example \ref{ex:intro}. \end{example} The fact that ${\cal R}(z)$ shows up as the denominator of ${\rm Tr}_{x_2}(I_a)$ in Example \ref{ex:trace} is no coincidence. To state our main result, we introduce some more notation. Let ${\cal C}_i \subset \Z^n, i = 1, \ldots, s$ be finite sets of lattice points. The sublattice of $\Z^n$ affinely generated by ${\cal C}_1, \ldots, {\cal C}_s$ is \[ L({\cal C}_1, \ldots, {\cal C}_s) = \left \{\, \sum_{\alpha \in {\cal C}_1} \ell_{1,\alpha} \, \alpha + \cdots + \sum_{\alpha \in {\cal C}_s} \ell_{s,\alpha} \, \alpha ~\big |~ \sum_{\alpha \in {\cal C}_i} \ell_{i, \alpha} = 0, \, \ell_{i,\alpha} \in \Z \right \}. \] Let $\A_i$ be the support of $f_i$ and $\B_i$ that of $a_i$. We will make the following assumption. \begin{assumption} \label{assum:lattice} The lattice $L(\A_1, \ldots, \A_n)$ is equal to $\{ \alpha \in \Z^n ~|~ \alpha_1 + \cdots + \alpha_n = 0 \}$. This can always be realized by a change of coordinates as long as $L(\A_1, \ldots, \A_n)$ has rank $n-1$. \end{assumption} We set $\A_0 = \{ e_1, \ldots, e_n \}$ with $e_i$ the $i$-th standard basis vector of $\Z^n$, $\B_0 = \{0\}$ and $\CC_i = \A_i \cup \B_i$ for $i = 0, \ldots, n$. The set $\CC_0 = \{0\} \cup \A_0$ contains all lattice points of the standard simplex in $\Z^n$. Note that, by Assumption \ref{assum:lattice}, $L(\CC_1, \ldots, \CC_n)$ has rank $n$. For any point $\omega$ in the dual lattice $(\Z^n)^\vee = \Z^n$ and any finite subset $\CC \subset \Z^n$, we set \[ \CC^\omega = \{ \gamma \in \CC ~|~ \langle \omega, \gamma \rangle = \min_{\gamma' \in \CC} \, \langle \omega, \gamma' \rangle \}. \] Here $\langle \cdot, \cdot \rangle$ is the pairing between $\Z^n$ and its dual, i.e.~the usual dot product. For a Laurent polynomial $f = \sum_{\gamma \in \CC} c_\gamma \, x^\gamma$ supported in $\CC$, we write $f^\omega$ for the \emph{leading form} of $f$ w.r.t.~$\omega$: \[f^\omega = \sum_{\gamma \in \CC^\omega} c_\gamma \, x^\gamma.\] Below we use the resultant $R_{\CC_0, \CC_1, \ldots, \CC_n}$, which is a polynomial in $b_{i,\gamma}, i = 0, \ldots, n, \gamma \in \CC_i$, characterizing when $h_0 = \cdots = h_n = 0$ has a solution in $(\overline{K} \setminus \{0\})^n$, with $h_i = \sum_{\gamma \in \CC_i} b_{i,\gamma} \, x^\gamma$. To give an explicit formula for the trace in terms of ${\cal R}(z)$, we will make the additional assumption that our ideal $I_a$ behaves like a \emph{generic intersection} in $(\overline{K} \setminus \{0\})^n$. To make this precise, we denote by $P_i = {\rm Conv}(\CC_i) \subset \R^n$ the \emph{Newton polytope} of $f_i - a_i$. This is the convex hull of the lattice points in $\CC_i$. The \emph{mixed volume} of $P_1, \ldots, P_n$, denoted ${\rm MV}(P_1, \ldots, P_n)$, is the generic number of solutions to a system of equations with supports $\CC_1, \ldots, \CC_n$. For definitions and examples, see for instance \cite[Sec.~5.1]{telen2020thesis}. \begin{assumption} \label{assum:mv} The dimension $\delta = \dim_K K[x,x^{-1}]/I_a$ equals ${\rm MV}(P_1, \ldots, P_n)$. \end{assumption} Assumption \ref{assum:mv} implies Assumption \ref{assum:finitedim}, so it suffices to work with Assumptions \ref{assum:lattice} and \ref{assum:mv}. \begin{theorem} \label{thm:traceformula} Let $T(x,z) \cdot x = (f_1, \ldots, f_n)^\top = 0$ be a PEPv satisfying Assumption \ref{assum:lattice} and let $a_i \in \C[x]_{d_i}$ be such that $I_a$ satisfies Assumption \ref{assum:mv}. Let $\CC_i$ be the support of $f_i - a_i$ and $\CC_0 = \{ 0, e_1, \ldots, e_n \}$. The PEPv given by $T(x,z)$ is regular and for $ p = \sum_{\gamma \in \CC_0} c_{0,\gamma} x^\gamma$ we have \[ {\rm Tr}_{p} (I_a) = \frac{{\cal Q}_{p,a}(z)}{{\cal R}(z) \cdot {\cal S}_a(z)}, \quad \text{where } {\cal Q}_{p,a}(z) = \sum_{\gamma \in \CC_0} c_{0,\gamma} \frac{\partial R_{\CC_0, \CC_1, \ldots, \CC_n}}{\partial b_{0,\gamma}} (1,f_1-a_1, \ldots, f_n-a_n),\] ${\cal R}(z)$ is as in \eqref{eq:calR} and ${\cal S}_a(z)$ is a nonzero polynomial. \end{theorem} \begin{proof} Our starting point is Theorem 2.3 in \cite{d2008rational}, which expresses the trace as \[ {\rm Tr}_{p} (I_a) = C \cdot \frac{{\cal Q}_{p,a}(z)}{R_{\CC_0, \ldots, \CC_n}(1, f_1-a_1, \ldots, f_n-a_n)} \] for a nonzero constant $C$. Proposition 2.6 in the same paper writes the denominator $R_{\CC_0, \ldots, \CC_n}(1, f_1-a_1, \ldots, f_n-a_n)$ as a product of \emph{face resultants}. More precisely, we have \[R_{\CC_0, \ldots, \CC_n}(1, f_1-a_1, \ldots, f_n-a_n) = \prod_{\omega} R_{\CC_1^\omega, \ldots, \CC_n^\omega}((f_1-a_1)^\omega, \ldots, (f_n - a_n)^\omega)^{\delta_{\omega}}, \] where the product ranges over the primitive inward pointing facet normals $\omega$ of the Minkowski sum $P_1 + \cdots + P_n$. The exponents $\delta_{\omega}$ are defined combinatorially from the ${\cal C}_i$ in the discussion preceeding \cite[Prop.~2.6]{d2008rational}. By Assumption \ref{assum:mv}, none of the face resultants vanishes identically. Let $\omega^* = (-1,\ldots,-1) \in (\Z^n)^\vee$. We have $\CC_i^{\omega^*} = \A_i$ and $(f_i - a_i)^{\omega^*} = f_i$, which shows that $T(x,z)$ is regular and that ${\cal R}(z)^{\delta_{\omega^*}}$ is a factor in the denominator of ${\rm Tr}_{p}(I_a)$. Assumption \ref{assum:lattice} and the fact that ${\rm Conv}(\CC_0)$ is a standard simplex imply $\delta_{\omega^*} = 1$. The theorem follows by setting ${\cal S}_a(z) = C^{-1} \cdot \prod_{\omega \neq \omega^*} R_{\CC_1^\omega, \ldots, \CC_n^\omega}((f_1-a_1)^\omega, \ldots, (f_n - a_n)^\omega)^{\delta_{\omega}}$. \end{proof} \begin{example} \label{ex:extraneous1} Consider de PEPv $T(x,z) \cdot x = 0$ given by \[ T(x,z) = \begin{pmatrix} 1 & z & 1 \\ 2 & 1 & z \\ x_2 & (z+1)x_3 + x_2 & 0 \end{pmatrix}. \] This satisfies Assumptions \ref{assum:lattice} and \ref{assum:mv}. We have $\CC_0 = \{ (0,0,0), (1,0,0), (0,1,0), (0,0,1) \}$, $a_1, a_2 \in \C$ and $a_3(x) = b_{31}x_1 + b_{32} x_2 + b_{33}x_3$. The trace for $p = x_1$ is \begin{equation} \label{eq:extrandenom} {\rm Tr}_{x_1}(I_a) = \frac{b_{31} \, z^4 + (a_1+a_2-b_{32}-2b_{33})\, z^3 + \cdots + (2 a_1 + 4 a_2 + b_{31}-2 b_{32} - b_{33})}{(z^2+2z-2)(z-2)}. \end{equation} Here ${\cal R}(z) = z^2 + 2z-2$ and ${\cal S}_a(z) = z-2$ is independent of $a$. We will explain the extraneous factor ${\cal S}_a(z)$ in Example \ref{ex:extraneous2} below. \end{example} \begin{corollary} If the PEPv $T(x,z) \cdot x = (f_1, \ldots, f_n)^\top = 0$ and the ideal $I_a$ satisfy Assumptions \ref{assum:lattice} and \ref{assum:mv}, then an eigenvalue $z^*$ of $T(x,z)$ with toric eigenvector is a pole of ${\rm Tr}_p(I_a)$ if ${\cal Q}_{p,a}(z^*) \neq 0$. Moreover, simple such eigenvalues correspond to simple poles of the trace. \end{corollary} In the above notation. It would be desirable to have ${\cal S}_a(z)$ equal to a nonzero constant, and ${\cal Q}_{p,a}(z^*) \neq 0$ for all simple eigenvalues of $T(x,z)$. We now discuss when this happens. Let $P = P_1 + \cdots + P_n$ be the Minkowski sum of the Newton polytopes $P_i = {\rm Conv}(\CC_i)$. In the proof of Theorem \ref{thm:traceformula} we derived \[ {\cal S}_a(z) = C^{-1} \cdot \prod_{\omega \neq \omega^*} R_{\CC_1^\omega, \ldots, \CC_n^\omega}((f_1-a_1)^\omega, \ldots, (f_n - a_n)^\omega)^{\delta_{\omega}}, \] where $\omega$ ranges over the inner facet normals to $P$. It follows from the definition of $\delta_\omega$ in \cite[Section 2]{d2008rational} that the only facet normals $\omega$ for which $\delta_{\omega} \neq 0$ are those for which $0 \notin \CC_0^\omega$. This gives a sufficient condition for ${\cal S}_a(z) \in \C \setminus \{0\}$. Let $P_0 = {\rm Conv}(\CC_0)$ be the standard simplex in $\R^n$. If the monomials $x_j^{d_i + 1}, j = 1, \ldots, n$ appear in $f_i$, and $x_j^{d_i}$ appear in $a_i$, then \begin{equation} \label{eq:dense} P_i = {\rm Conv}(\CC_i) = {\rm cl}((d_i + 1) \cdot P_0 \setminus (d_i \cdot P_0)), \end{equation} where ${\rm cl}(\cdot)$ denotes the Euclidean closure in $\R^n$. \begin{theorem} \label{thm:unmixed} Let $T(x,z) \cdot x = (f_1, \ldots, f_n)^\top = 0$ be a PEPv satisfying Assumption \ref{assum:lattice}, with $\deg(f_i) = d_i + 1$. Let $a_i \in \C[x]_{d_i}$ be such that $I_a$ satisfies Assumption \ref{assum:mv} and $P_i = {\rm Conv}(\CC_i) = {\rm cl}((d_i + 1) \cdot P_0 \setminus (d_i \cdot P_0))$. Then ${\cal S}_a(z)$ in Theorem \ref{thm:traceformula} is a nonzero complex constant. \end{theorem} \begin{proof} The theorem follows from the fact that, under the assumption \eqref{eq:dense}, the facet normals of $P = P_1 + \cdots + P_n$ are \[ \omega^* = (-1,\ldots, -1), ~\omega_0 = (1,\ldots,1), ~ \omega_1 = (1,0,\ldots, 0), ~\omega_2 = (0, 1, \ldots, 0), ~ \omega_n = (0,0,\ldots, 1). \] Out of these, only for $\omega = \omega^*$ we have $0 \notin \CC_0^{\omega}$. \end{proof} We present one more example of a family of PEPv's for which $S_a(z) \in \C \setminus \{0\}$. We assume that all $f_i$ are of the same degree $d+1$ and such that $x_j^{d+1}$ appears in $f_j$ for all $j$. We let $a_i = c_i \, x^\beta$ consist of one term of degree $d$, with $c_i \neq 0$. The resulting polytopes $P_i$ are all equal to a pyramid of height one over the simplex $(d+1) \cdot {\rm Conv}(e_1, \ldots, e_n)$. \begin{theorem} \label{thm:pyramid} Let $T(x,z) \cdot x = (f_1, \ldots, f_n)^\top = 0$ be a PEPv satisfying Assumption \ref{assum:lattice}, with $\deg(f_i) = d + 1$. Let $a_i(x) = c_i\, x^\beta \in \C[x]_{d}$ be such that $I_a$ satisfies Assumption \ref{assum:mv}. Then ${\cal S}_a(z)$ in Theorem \ref{thm:traceformula} is a nonzero complex constant. \end{theorem} \begin{proof} The polytope $P = P_1 + \cdots + P_n = n \cdot P_1$ has $n+1$ normal vectors. All of these are nonnegative, except $\omega^* = (-1, \ldots, -1)$. Therefore, only $\omega^*$ satisfies $0 \notin \CC_0^{\omega}$. \end{proof} If ${\rm Conv}(\A_1) = \cdots = {\rm Conv}(\A_n)$, the argument in the proof of Theorem \ref{thm:pyramid} can be used to construct more general situations in which $P_1 = \cdots = P_n$ is a pyramid over ${\rm Conv}(A_i)$ and ${\cal S}_a(z) \in \C \setminus \{0\}$. We do not work this out explicitly. Here is an example where $S_a(z) \notin \C \setminus \{0\}$. \begin{example} \label{ex:extraneous2} The polytope $P = P_1 + P_2 + P_3$ from the PEPv in Example \ref{ex:extraneous1} is shown in Figure \ref{fig:polytope}. There are six facets. Their normal vectors $\omega_i$ in the dual lattice $(\Z^3)^\vee \simeq \Z^3$ are \[ \omega_1 = (0,0,1), ~ \omega_2 = -(1,0,1), ~ \omega_3 = -(1, 1, 1), ~ \omega_4 = (1,0,0), ~ \omega_5 = (1,1,1), ~ \omega_6 = (0,1,0). \] Here $\omega^* = \omega_3$. The only other facet normal for which $0 \notin \CC_0^{\omega_i}$ is $\omega_2$. We calculate \[ \CC_1^{\omega_2} = \CC_2^{\omega_2} = \{ (1,0,0), (0,0,1) \}, \quad \CC_3^{\omega_2} = \{(1,0,0), (0,0,1), (0,1,1), (1,1,0) \}. \] The corresponding face equations are $f_1^{\omega_2}= f_2^{\omega_2} = f_3^{\omega_2} = 0$, with \[ f_1^{\omega_2} = x_1 + x_3, \quad f_2^{\omega_2} = 2 x_1 + z x_3, \quad f_3^{\omega_2} = (z+1)x_2x_3 + x_1x_2 - b_{31}x_1 - b_{33}x_3. \] These have a nontrivial solution if and only if the determinant of the linear system $f_1^{\omega_2} = f_2^{\omega_2} = 0$ vanishes. This explains $R_{\CC_1^{\omega_2}, \CC_2^{\omega_2}, \CC_3^{\omega_2}} = z-2$, which gives the extraneous factor in the denominator of \eqref{eq:extrandenom}. \begin{figure} \centering \includegraphics[scale=0.3]{Polytope_Ex37.png} \caption{The polytope $P$ from Example \ref{ex:extraneous2}. The facets corresponding to $\omega_2$ and $\omega_3$ are the quadrilateral and triangle coloured in blue and orange respectively. } \label{fig:polytope} \end{figure} \end{example} We conclude by briefly discussing the condition ${\cal Q}_{p,a}(z^*) \neq 0$. First of all, note that Assumption \ref{assum:mv} implies ${\rm Tr}_1(I_a) = \delta$, so by Theorem \ref{thm:traceformula} we have \[ {\cal Q}_{1,a}(z) = \frac{\partial R_{\CC_0, \ldots, \CC_n}}{\partial b_{0,0}}(1,f_1-a_1, \ldots, f_n-a_n) = \delta \, {\cal R}(z) \, {\cal S}_a(z). \] In particular, ${\cal Q}_{1,a}(z^*) = 0$ for every eigenvalue $z^*$ with toric eigenvector. Therefore, we will use the traces ${\rm Tr}_{x_i}(I_a)$, corresponding to the remaining exponents $\A_0 = \CC_0 \setminus \{0\}$. \begin{definition} We say that an eigenvalue $z^*$ of $T(x,z)$ has a \emph{simple toric eigenvector} if ${\cal R}(z^*) = 0$ and, for generic choices of $a_i$, there is some $i \in \{1, \ldots, n\}$ for which ${\cal Q}_{x_i,a}(z^*) \neq 0$. \end{definition} We point out that if $z^*$ has a simple toric eigenvector, then for generic $a_i$ the tuple $(1,f_1(x,z^*)-a_1(x), \ldots, f_n(x,z^*)-a_n(x))$ is a smooth point on the resultant hypersurface given by $\{R_{\CC_0, \ldots, \CC_n} = 0\}$. This implies that the corresponding eigenvector is unique. We summarize the above discussion in the following theorem. \begin{theorem} \label{thm:main} Under Assumptions \ref{assum:lattice} and \ref{assum:mv}, each simple eigenvalue $z^*$ of $T(x,z)$ with simple toric eigenvector is a pole of order one of the trace vector $({\rm Tr}_{x_1}(I_a), \ldots, {\rm Tr}_{x_n}(I_a)) \in \C(z)^n$. In the situations of Theorems \ref{thm:unmixed} and \ref{thm:pyramid}, all simple poles correspond to such eigenvalues. \end{theorem} We leave the problem of determining the precise conditions under which a simple eigenvalue has a simple toric eigenvector for future research. In our examples and experiments from Section \ref{sec:6}, we observe that this is satisfied for all simple eigenvalues. \section{Contour integration and homotopy continuation} \label{sec:4} Let $T(x,z)$ be a PEPv satisfying Assumptions \ref{assum:lattice} and \ref{assum:mv}. We write the trace vector from Theorem \ref{thm:main} as${\rm Tr}_{\A_0}(I_a) = ({\rm Tr}_{x_1}(I_a), \ldots, {\rm Tr}_{x_n}(I_a))$. Using Definition \ref{def:trace} and Assumption \ref{assum:mv}, we see that the entries of ${\rm Tr}_{\A_0}(I_a)$ are computed as a sum of $\delta$ terms: \begin{equation} \label{eq:tracereminder} {\rm Tr}_{x_i}(I_a) = \sum_{\xi \in V(I_a)} \, \xi_i. \end{equation} The simple eigenvalues with simple toric eigenvector of $T(x,z)$ are among the poles of ${\rm Tr}_{\A_0}(I_a)$. We remind the reader that $a \in \C[x]$ has homogeneous entries of degree $d_i$, where $d_i$ is the degree in $x$ of the entries in the $i$-th row of $T(x,z)$. In analogy with Beyn's method, we evaluate the trace for several vectors $a$. We collect ${\rm Tr}_{\A_0}(I_{a^{(j)}})$ for $n$ random choices $a^{(1)}, \ldots, a^{(n)} \in \C^n$ in the columns of \begin{equation} \label{eq:U} U(z) = \begin{pmatrix} \vrule & & \vrule \\ \\ {\rm Tr}_{\A_0}(I_{a^{(1)}}) & \cdots & {\rm Tr}_{\A_0}(I_{a^{(n)}}) \\ \\ \vrule & & \vrule \end{pmatrix} \quad \in \C(z)^{n \times n}. \end{equation} Our next result uses notation from Theorem \ref{thm:traceformula} and explains our interest in the matrix $U(z)$. \begin{theorem} \label{thm:REP} Let $U(z)$ be as above and let $Q(z) =( {\cal Q}_{x_i,a^{(j)}}(z))_{i,j}$. Suppose that $\det Q(z) \neq 0$ and $z^*$ is a simple eigenvalue of $T(x,z)$ with simple toric eigenvector $x^* \in \PP^{n-1}$. If ${\cal S}_{a^{(j)}}(z^*) \neq 0$ for $j = 1, \ldots, n$, we have $ U(z^*)^{-1} \cdot x^* = 0$ and $z^*$ is a simple zero of $\det U(z)^{-1}$. \end{theorem} \begin{proof} If the matrix $Q(z) = ( {\cal Q}_{x_i,a^{(j)}}(z))_{i,j}$ is invertible, then so is $U(z) \in \C(z)^{n \times n }$. Indeed, Theorem \ref{thm:traceformula} implies $\det(U(z)) = \det(Q(z)) \cdot ({\cal R}(z)^n \cdot \prod_{j=1}^n {\cal S}_{a^{(j)}}(z))^{-1}$. For any $j$, we have \[U(z) \cdot \begin{pmatrix} 0 \\ \vdots \\{\cal R}(z) \cdot {\cal S}_{a^{(j)}}(z) \\ \vdots \\ 0 \end{pmatrix} = \begin{pmatrix} {\cal Q}_{x_1,a^{(j)}}(z) \\ \vdots \\ {\cal Q}_{x_j,a^{(j)}}(z) \\ \vdots \\ {\cal Q}_{x_n,a^{(j)}}(z) \end{pmatrix} \] by Theorem \ref{thm:traceformula}. This is an equality of vectors of rational functions. We denote the right hand side by $Q_j(z)$. Left multiplying by $U(z)^{-1}$ and plugging in $z = z^*$ shows that $(z^*,Q_j(z^*))$ is an eigenpair of $U(z)^{-1}$. Here we use that $x^*$ is a simple toric eigenvector, so that $Q_j(z^*) \neq 0$. It remains to show that, as points in projective space $\PP^{n-1}$, we have $Q_j(z^*) = x^*$. For this, one adapts the proof of \cite[Lemma 3.9]{d2015poisson}. The important step requires \cite[Proposition 1.37]{d2013heights}. For brevity, we omit technicalities and leave the details to the reader. To see that $z^*$ is a simple zero of $\det U(z)^{-1}$, we start from the identity \begin{equation} \label{eq:detform} \det U(z)^{-1} \cdot \det Q(z) = {\cal R}^n(z) \cdot \prod_{j=1}^n {\cal S}_{a^{(j)}}(z). \end{equation} We have established that $\det U(z)^{-1} = c_1 (z-z^*)^\kappa + O((z-z^*)^{\kappa+1})$ near $z = z^*$ for some $c_1 \in \C \setminus \{0\}$ and $\kappa > 0$. Moreover, since ${\cal S}_{a^{(j)}}(z^*) \neq 0$ and $z^*$ is a simple zero of ${\cal R}(z)$, the right hand side equals $c_3 (z-z^*)^n + O((z-z^*)^{n+1})$ for some $c_3 \in \C \setminus \{0\}$. Since $Q_j(z^*) = x^* \in \PP^n$ for all $j = 1, \ldots, n$, we know that ${\rm rank}(Q(z^*)) = 1$. Therefore, $(z-z^*)$ divides all but one of the invariant factors of $Q(z)$, viewed as a matrix over $\C[z]$. It follows that $\det Q(z) = c_2 (z-z^*)^\lambda + O((z-z^*)^{\lambda + 1})$ for $\lambda \geq n-1$. Since $\kappa + \lambda = n$ by \eqref{eq:detform}, we must have $\kappa = 1, \lambda = n-1$, which concludes the proof. \end{proof} Theorem \ref{thm:REP} shows that the matrix $U(z)$ reduces our problem to a rational eigenvalue problem of the form $U(z)^{-1} \cdot x = 0$, which can be solved using contour integration techniques from Section \ref{sec:2}. We proceed by discussing how to do this in practice. The $k$-th \emph{moment matrix} $A_k$ is given by \[ A_k \, \, = \, \, \frac{1}{2 \pi \sqrt{-1}} \, \oint_{\partial \Omega} z^k \, U(z) \, {\rm d} z, \qquad k = 0, 1, 2, \ldots . \] To find the poles of $U(z)$, these matrices are arranged into two block Hankel matrices $B_0, B_1$, on which we perform a sequence of standard numerical linear algebra operations. This was explained in Section \ref{sec:2}. The rank of $B_0$ equals the number of eigenvalues inside $\partial \Omega$. We emphasize that when $T(x,z) = T(z)$ represents a PEP, the matrix $U(z)$ is given by $T(z)^{-1} \cdot \begin{pmatrix}a^{(1)} & \cdots & a^{(\ell)} \end{pmatrix}^\top$ and our moment matrices $A_k$ coincide with those used in Beyn's algorithm. In practice, we approximate the moment matrices $A_k$ using numerical integration techniques. We assume that $\partial \Omega$ is parameterized by a differentiable map $\varphi: [0,2\pi) \rightarrow \C$, so that the $k$-th moment matrix can be written as \[ A_k \, \, = \, \, \frac{1}{2\pi \sqrt{-1}} \, \int_0^{2\pi}U(\varphi(t)) \, \varphi^{\prime}(t)\, \varphi^k(t) \, {\rm d} t .\] A standard approach to evaluate this integral numerically is to use the \emph{trapezoidal rule} with $N+1$ equidistant nodes $t_\ell = \frac{2\pi \ell}{N}$, $\ell=0,\ldots,N$. This gives the approximation $A_{k,N} \approx A_k$: \begin{equation} \label{eq:int_approx} A_{k,N} \, = \, \frac{1}{\sqrt{-1} \, N} \sum_{\ell=0}^{N-1} U(\varphi(t_\ell)) \, \varphi^{\prime}(t_\ell) \, \varphi^k(t_\ell) . \end{equation} Hence, we need to evaluate $U(z)$ for $z = \varphi(t_\ell), \ell = 0, \ldots, N-1$. We do this efficiently, without explicitly constructing $U(z)$, using \emph{homotopy continuation methods}. Here, we briefly review the basics. For a complete introduction, the reader is referred to the textbook \cite{sommese2005numerical}. For fixed $t \in [0, 2\pi)$, the trace vectors ${\rm Tr}_{{\cal A}_0}(I_{a^{(j)}})_{|z = \varphi(t)}$ are obtained by summing over the solutions to the system of polynomial equations given by $F(x,t) = 0$, where \[ F(x,t) \, = \, T(x,\varphi(t)) \cdot x - a^{(j)}(x) \, = \, \begin{pmatrix} f_1(x,\varphi(t)) - a^{(j)}_1(x) \\ \cdots \\ f_n(x,\varphi(t)) - a^{(j)}_n(x) \end{pmatrix}. \] By Assumption \ref{assum:mv}, there are $\delta$ solutions. We think of these solutions as paths $x^{(m)}: [0,2 \pi) \rightarrow \C^n$ satisfying $F(x^{(m)}(t),t) = 0$, $m = 1, \ldots, \delta$. These paths are described by a system of ordinary differential equations called the \emph{Davidenko equation}: \begin{equation} \label{eq:davidenko} \frac{{\rm d} F(x(t),t)}{{\rm d} t} \, = \, J_F(x(t),t) \cdot \frac{{\rm d} x}{{\rm d} t} + \frac{\partial F(x(t),t)}{\partial t} \, = \, 0 , \end{equation} where $J_F$ is the Jacobian matrix whose $(j,k)$ entry is $\frac{\partial f_j}{\partial x_k}$. Each of the paths is uniquely determined by an initial condition specifying $x^{(m)}(t_0) = x^{(m)}(0)$. For computing the trace, we need to evaluate the paths at the discrete points $t_\ell = \frac{2 \pi \ell}{N}$. The situation is illustrated in Figure \ref{fig:homotopy}, where $\partial \Omega$ is the unit circle in the complex plane, parameterized by $\varphi(t) = \cos(t) + \sqrt{-1} \cdot \sin(t)$. This is drawn in orange. At each of the points $\varphi(t_\ell)$, represented as black dots on $\partial \Omega$, there are $\delta = 3$ solutions $x^{(m)}(t_\ell), m = 1, \ldots, 3$ to $F(x,t_\ell) = 0$. This is illustrated with a dashed line for one choice of $\ell$. \begin{figure} \centering \input{paths_contourintegration} \caption{An illustration of the paths $x^{(m)}(t), m = 1, \ldots, \delta$ and the discretized paths $x^{(m)}(t_\ell), \ell = 0, \ldots, N$ for $\delta = 3$ and $N = 9$.} \label{fig:homotopy} \end{figure} Approximating $x^{(m)}(t_\ell)$ can be done using numerical techniques for solving the Davidenko equation \eqref{eq:davidenko}. An example is the Euler method, which approximates $x^{(m)}(t_\ell)$ from $x^{(m)}(t_{\ell-1})$ using finite differences. An important remark is that, in our scenario, we have an implicit equation $F(x(t),t) = 0$ satisfied by the solution paths. This allows us, in every step, to refine an approximation $\widetilde{x^{(m)}(t_\ell)}$ for $x^{(m)}(t_\ell)$ using \emph{Newton iteration} on $F(x,t_\ell) = 0$. With a slight abuse of notation, we also write $x^{(m)}(t_\ell)$ for the numerical approximation of $x^{(m)}(t_\ell)$ obtained \emph{after} this refinement. The path values $x^{(m)}(t_\ell)$ are used to evaluate the $i$-th column ${\rm Tr}_{{\cal A}_0}(I_{a^{(j)}})$ of $U(\varphi(t_\ell))$, by plugging $a = a^{(j)}$ and $\xi = x^{(m)}(t_\ell)$ into \eqref{eq:tracereminder}. We summarize this discussion in Algorithm \ref{alg:homotopy} and provide some clarifying remarks. We start by pointing out that Assumption \ref{assum:mv} guarantees that for all but finitely many values $z \in \C$, the system of equations $T(x,z) \cdot x - a^{(j)}(x) = 0$ has $\delta$ isolated solutions $x \in \C^n$, each with multiplicity one. We assume that the contour $\partial \Omega$ misses these finitely many $z$-values, which makes sure that the solution paths $x^{(m)}(t)$ do not \emph{cross}, i.e.~$x^{(m)}(t) \neq x^{(m')}(t)$ for $m \neq m'$. This can be realized, if necessary, by slightly enlarging $\Omega$. In line \ref{line:start} of Algorithm \ref{alg:homotopy}, the starting points $x^{(m)}(t_0)$ are computed. This can be done using any numerical method for solving polynomial systems. Recent eigenvalue methods are described in \cite{bender2021yet}. In case of many variables, it is favorable to use the \emph{polyhedral homotopies} introduced in \cite{huber1995polyhedral}. Line \ref{line:predict} is often called the \emph{predictor} step. Our presentation assumes a first order predictor, which uses only $x^{(m)}(t_{\ell-1})$ to compute an approximation for $x^{(m)}(t_\ell)$. In practice, one sometimes uses the path values at $t_{\ell-2}, t_{\ell-3}, \ldots$ for more accurate results. It is important to remark that when $N$ is too small, the step size $2\pi/N$ may be too large to track the paths reliably. A bad approximation in line \ref{line:predict} may cause the Newton iteration in line \ref{line:correct} to converge to a \emph{different} path. This phenomenon is called \emph{path jumping}. To remedy this, one could take some `extra' steps between $t_{\ell-1}$ and $t_\ell$. Recent studies in the direction of \emph{adaptive stepsize} algorithms are \cite{telen2020robust,timme2021mixed}. Details are beyond the scope of this paper. In our implementation, the algorithm in \cite{timme2021mixed} decides how many steps to take between $t_{\ell-1}$ and $t_\ell$. Line \ref{line:correct} is called the \emph{corrector} step, and Algorithm \ref{alg:homotopy} is a blueprint for a \emph{predictor-corrector} scheme, see e.g.~\cite[Alg.~2.1]{telen2020robust}. \begin{algorithm}[h!] \caption{Evaluating the $j$-th column of $U(z)$ at $z = \varphi(t_\ell), \ell = 0, \ldots, N-1$} \label{alg:homotopy} \begin{algorithmic}[1] \State Compute $\delta$ \emph{start solutions} $x^{(m)}(t_0), m = 1, \ldots, \delta$ satisfying $F(x^{(m)}(t_0), t_0) = 0$ \label{line:start} \State ${\rm Tr}_{{\cal A}_0}(I_{a^{(j)}})_{|z = \varphi(t_0)} = \left (\sum_{m = 1}^\delta (x^{(m)}(t_0))_i \right )_{i = 1, \ldots, n}$ \State {$\ell \gets 0$} \While {$\ell \leq N$} \For {$m = 1, \ldots, \delta$} \State $\widetilde{x^{(m)}(t_\ell)} \gets $ an approximation for $x^{(m)}(t_\ell)$ obtained from $x^{(m)}(t_{\ell-1})$ \label{line:predict} \State $x^{(m)}(t_\ell) \gets $ refine $\widetilde{x^{(m)}(t_\ell)}$ using Newton iteration \label{line:correct} \EndFor \State ${\rm Tr}_{{\cal A}_0}(I_{a^{(j)}})_{|z = \varphi(t_\ell)} = \left (\sum_{m = 1}^\delta (x^{(m)}(t_\ell))_i \right )_{i = 1, \ldots, n}$ \State $\ell \gets \ell + 1$ \EndWhile \end{algorithmic} \end{algorithm} \section{Complexity} \label{sec:5} In this section, we discuss the complexity of the contour integration algorithm presented in Section \ref{sec:4}. We split the algorithm into two major steps: \begin{enumerate} \item Evaluate the moment matrices $A_0, \ldots, A_{2M-1}$. \item Extract the eigenvalues from these moment matrices. \end{enumerate} In Step 2, one constructs the matrices $B_0, B_1$ from \eqref{eq:Bi}. These are of size $M \cdot n$, and $M$ is chosen such that $M \cdot n \geq \delta(\Omega)$, where $\delta(\Omega)$ is the number of eigenvalues inside $\Omega$. The eigenvalues are then extracted from $B_0, B_1$ by computing an SVD, see Section \ref{sec:2}. The cost is $O(M^3 \cdot n^3)$. The most favorable situation for our method is when $\delta(\Omega) \approx M \cdot n \ll \hat{\delta}$. Step 1 uses numerical homotopy continuation. Continuing to work under Assumption \ref{assum:mv}, it requires tracking $n \cdot \delta = n \cdot {\rm MV}(P_1, \ldots, P_n)$ solution paths. The homotopy is used to evaluate $U(\varphi(t_\ell))$ as discussed in Section \ref{sec:4}. The moment matrices $A_k$ are then approximated via \eqref{eq:int_approx}. In our analysis, we assume that the number of nodes $N$ is fixed. Moreover, we ignore the complexity of computing $A_{k,N}$ from $U(\varphi(t_\ell))$, as it is negligible compared to the cost of tracking our $n \cdot \delta$ paths. The number $n \cdot \delta$ should be compared to the total number of eigenvalues of $T(x,z)$, denoted $\hat{\delta}$. This is the number of paths tracked in the naive approach of computing all eigenpairs and discarding those for which $z \notin \Omega$. However, we warn the reader that one cannot straightforwardly draw conclusions about the computation time by simply comparing $n \cdot \delta$ and $\hat{\delta}$. For instance, it might be favorable to solve $n$ problems with $\delta < \hat{\delta}$ solutions rather than one problem with $\hat{\delta}$ solutions, even if $n \cdot \delta > \hat{\delta}$. Below, we compute the number of paths $n \cdot \delta$ for two families of PEPv's. The first one is inspired by Theorems \ref{thm:unmixed} and \ref{thm:pyramid}, where $d_1 = \cdots = d_n$. The second one is a family of systems of rational function equations from \cite{claes2022linearizable}, which can be solved using a slight modification of our method. \subsection{Unmixed, dense equations} We consider the case where $T(x,z) \cdot x = (f_1(x,z), \ldots, f_n(x,z))^\top$ comes from the polynomial system $f_1 = \cdots = f_n = 0$, where each $f_i$ is homogeneous of degree $d+1$ in $x$, and of degree $e$ in $z$. We assume that $x_j^{d+1}, j = 1, \ldots, n$ appear in each of the $f_i$. First, we also choose the polynomials $a_i(x) \in \C[x]_d$ such that $x_j^d, j = 1, \ldots, n$ appear in each of them. This is the situation of Theorem \ref{thm:unmixed}. We compute the numbers $n \cdot \delta$ and $\hat{\delta}$ for this setup. \begin{proposition} \label{prop:rat1} Let $f_1, \ldots, f_n, a_1, \ldots, a_n$ be as in Theorem \ref{thm:unmixed}. We have \[ n \cdot \delta = n \cdot ((d+1)^n - d^n), \quad \hat{\delta} = e \cdot n \cdot (d+1)^{n-1}.\] \end{proposition} \begin{proof} By the multihomogeneous version of B\'ezout's theorem, the total number of eigenvalues, i.e., solutions to $f_1 = \cdots = f_n = 0$, is $\hat{\delta} = e \cdot n \cdot (d+1)^{n-1}$. To compute $\delta$, consider the polytope $P = P_1 = \cdots = P_n \subset \R^n$, given by \eqref{eq:dense}, with $d_i = d$. By Kushnirenko's theorem, the number $\delta$ is the lattice volume of $P$. This is given by $\delta = (d+1)^n - d^n$. \end{proof} It follows that, for large $d$, the ratio $(n \cdot \delta)/\hat{\delta}$ tends to $n/e$. Hence, our method tracks significantly fewer solution paths when $e \gg n$. We note that, for small $d$, this conclusion is pessimistic. For instance, if $d = 2$, we find that $(n \cdot \delta)/\hat{\delta} \approx 2/e$. A smaller number of paths $n \cdot \delta$ is obtained when the $a_i(x)$ are chosen as in Theorem \ref{thm:pyramid}. The computation is similar to the proof of Proposition \ref{prop:rat1}, noting that the lattice volume of a pyramid of lattice height 1 equals the $(n-1)$-dimensional lattice volume of its base. \begin{proposition} \label{prop:rat2} Let $f_1, \ldots, f_n, a_1, \ldots, a_n$ be as in Theorem \ref{thm:pyramid}. We have \[ n \cdot \delta = n \cdot (d+1)^{n-1}.\] \end{proposition} Propositions \ref{prop:rat1} and \ref{prop:rat2} lead us to conclude that the methods presented in this paper are effective only when the degree in the eigenvalue variable is large. This situation arises, for instance, when the PEPv comes from a polynomial approximation of a set of equations that depends transcendentally on $z$. We will show an example in Section \ref{exp:3}. \subsection{Rational functions} We now discuss an example where the entries of the matrix $T(x,z)$ are homogeneous \emph{rational} functions in $x$. More precisely, consider a rational map $T: \PP^{n-1} \times \C \dashrightarrow \C^{n \times n}$ of the form \begin{equation} \label{eq:rationalT} T(x,z) \, = \, T_0(z) + \frac{r_1(x)}{s_1(x)} \, T_1 \, + \, \cdots \, + \, \frac{r_m(x)}{s_m(x)} \,T_m, \end{equation} where $T_0(z) = A + z \cdot B$ with $A, B \in \C^{n \times n}$, and $r_i(x), s_i(x)$ are linear forms in $x$. The associated \emph{rational eigenvalue problem with eigenvector nonlinearities} (REPv) is \begin{equation} \label{eq:ratprob} \text{find $(x^*,z^*) \in (\PP^{n-1} \setminus V_{\PP^{n-1}}(s_1 \cdots s_m)) \times \C$ such that $T(x^*,z^*) \cdot x^* = 0$.} \end{equation} Here we use the standard notation $V_X(f) = \{ x \in X ~|~ f(x) = 0 \}$. The problem \eqref{eq:ratprob} was studied in \cite{claes2022linearizable}. We here discuss how our methods can be used to solve this REPv. We point out that, in this case, the problem cannot be turned into a PEPv by clearing denominators, as this typically introduces infinitely many spurious eigenvectors. The rows of $T$ are homogeneous of degree $d = 0$ in $x$. Consistently with our approach for PEPv's, we consider the equations $T(x,z) \cdot x - a = (f_1 - a_1, \ldots, f_n - a_n)^\top = 0$, where $a = (a_1, \ldots, a_n)^\top \in \C^n$ is a generic vector of complex constants. The matrix $U(z)$ from \eqref{eq:U} is constructed by summing over the $\delta$ solutions. The following theorem predicts $\delta$. \begin{theorem} \label{thm:nosolsrat} For $T$ as in \eqref{eq:rationalT} and generic $z \in \C, a \in \C^n$, the system of equations $T(x,z) \cdot x - a = 0$ has at most $\delta$ isolated solutions in $(\C^n \setminus V_{\C^n}(s_1 \cdots s_m)) \times \C$, with \[ \delta = \sum_{k=0}^{\min(n-1,m)} \begin{pmatrix} n-1 \\ k \end{pmatrix} \cdot \begin{pmatrix} m \\ k \end{pmatrix}. \] \end{theorem} \begin{proof}[Sketch of proof] The system of rational function equations $T(x,z) \cdot x - a = 0$ is equivalent to the system of $n + m$ polynomial equations \begin{equation} \label{eq:rattopol} (T_0 + \lambda_1 T_1 + \cdots + \lambda_n T_n) \cdot x - a = 0, \quad s_i(x) \lambda_i - r_i(x) = 0, i = 1, \ldots, m, \end{equation} where $\lambda_1, \ldots, \lambda_m$ are new variables and $T_0 = T_0(z)$. The entries of $(T_0 + \lambda_1 T_1 + \cdots + \lambda_n T_n) \cdot x - a$ all have the same Newton polytope, denoted $P \subset \R^{m+n}$. The equation $s_i(x)\lambda_i - r_i(x)$ has Newton polytope $\Delta_n \times L_i$, where $\Delta_n = {\rm Conv}(e_1, \ldots, e_n) \subset \R^n$ and $L_i = {\rm Conv}(0,e_i) \subset \R^m$. By the BKK theorem, the number of isolated solutions to \eqref{eq:rattopol} is bounded by the mixed volume $\delta ={\rm MV}(P, \ldots, P, \Delta_n \times L_1, \ldots, \Delta_n \times L_m)$. Here $P$ is listed $n$ times. Multilinearity and symmetry of the mixed volume gives the equality \[ \delta = \sum_{k=0}^m \begin{pmatrix} m \\ k \end{pmatrix}{\rm MV}(P,\ldots, P, \Delta_n, \ldots, \Delta_n, L_{k+1}, \ldots, L_m). \] Since $\Delta_n$ has dimension $n-1$, all terms with $k > n-1$ are zero. It remains to show that for $k \leq \min(n-1,m)$, we have \[{\rm MV}(P,\ldots, P, \Delta_n, \ldots, \Delta_n, L_{k+1}, \ldots, L_m) = \begin{pmatrix} n-1 \\ k \end{pmatrix}.\] This number counts solutions to $(T_0 + \lambda_1 T_1 + \cdots + \lambda_n T_n) \cdot x - a = 0$ after plugging in random values for $\lambda_{k+1}, \ldots, \lambda_m$ and replacing $x_{n-k+1}, \ldots, x_n$ by generic linear forms in $x_1, \ldots, x_{n-k}$. What is left is a system of $n$ equations in the variables $(x_1, \ldots, x_{n-k}, \lambda_1, \ldots, \lambda_k)$. It has at most $\begin{pmatrix} n-1 \\ k \end{pmatrix}$ solutions by the multihomogeneous version of B\'ezout's theorem. \end{proof} By \cite[Theorem 3.1]{claes2022linearizable}, the total number of eigenvalues of \eqref{eq:rationalT} is $\hat{\delta} = \begin{pmatrix} n +m \\ m+1 \end{pmatrix}$. Although $\delta < \hat{\delta}$, we have $n \cdot \delta > \hat{\delta}$. We will illustrate this with an example in Section \ref{sec:6}. We leave the question whether and when our method is advantageous for solving this type of REPv as a topic for future research. \section{Numerical experiments} \label{sec:6} In this section we present several numerical examples illustrating the results presented above. Our algorithm has two important parameters that impact the numerical performance: the number $N+1$ of discretization points on the contour to evaluate the integral, and the number of moment matrices $2M$. In the experiments below, we will investigate the influence of these parameters on the accuracy. We assess the quality of an approximate eigenpair $(x^*,z^*)$ by its residual $r^* = \lVert T(x^*,z^*)\cdot x^* \rVert/\lVert x^*\rVert$. The presented result are generated by an implementation in Julia (v1.6) using \texttt{HomotopyContinuation.jl} (v2.6.4) \cite{breiding2018homotopy}. The source code is available online to reproduce all results\footnote{\texttt{github.com/robclaes/contour-integration}}. \subsection{Experiment 1} Consider the PEPv $T(x,z)\cdot x=0$ where $T(x,z)$ has size $3\times 3$ and each row is of degree $d = 2$ in $x$ and $e = 4$ in $z$. The coefficients are randomly generated in order to obtain a generic system. The contour enclosing the target domain $\Omega$ is shown in Figure~\ref{fig:exp1} together with the exact eigenvalues in the neighborhood of $\Omega$. The impact of the number of discretization points $N+1$ is the most intuitive: the more points, the higher the accuracy of the detected eigenvalues in $\Omega$. There is a less intuitive impact that has been observed in contour integration for nonlinear eigenvalue problems \cite{van2016nonlinear}. When the contour integral is approximated with a low number of points, it is possible that eigenvalues outside the contour are detected. Evaluating the contour integral with $1000$ points detects only the four eigenvalues in $\Omega$ with average residual in the order of magnitude of machine precision. However, evaluating the contour integral with $100$ points, detects 14 eigenvalues depicted in Figure~\ref{fig:exp1}: four eigenvalues in $\Omega$ with average residual of $\approx 10^{-11}$ and eight eigenvalues outside the target domain with residual varying from $10^{-9}$ to $10^{-5}$ depending on the distance from the contour. This phenomenon is best explained via the relation between numerical integration and filter functions on $\C$, see \cite{van2016nonlinear} for details. An obvious impact of the number of moment matrices can be seen in \eqref{eq:Bi}: the maximum number of eigenvalues that can be detected is $Mn$. Therefore $M$ should be large enough to detect at least the expected number of eigenvalues in $\Omega$. However when a low number of discretization points is chosen, extra care must be taken when choosing the number of moment matrices: the algorithm will detect additional eigenvalues outside $\Omega$ which may lead to more eigenvalues than the number of eigenvalues that can be detected for a given $M$. For the specific instance here, we selected $M=9$ which leads to a maximum of $Mn=27$ detectable eigenvalues. In the case with $100$ discretization points this upper bound is large enough to detect the 14 eigenvalues. When we set $M=2$ -- which should suffice for the expected $4$ eigenvalues in $\Omega$ -- with $100$ discretization points, the eigenvalues outside $\Omega$ perturb the result leading the an average residual of the 4 eigenvalues in $\Omega$ of $10^{-3}$. Since the degree of the polynomials is the same for each row, we select the polynomials $a_i$ in accordance with Theorem~\ref{thm:pyramid}, i.e., $a_i$ is a monomial in $x$ of degree $d=2$. By Proposition \ref{prop:rat2}, this leads to $n\cdot\delta =n\cdot(d+1)^{n-1}=27$ tracked paths, which is smaller than the expected number of tracked paths when using random polynomials: $n\cdot\delta=n\cdot\left((d+1)^n-d^n\right)=57$. \begin{figure} \centering \input{experiment1} \caption{Eigenvalues (\ref{marker:eigs}) inside the target domain defined by the contour (\ref{marker:contour}) and the extracted values by contour integration (\ref{marker:CI}) for experiment 1.} \label{fig:exp1} \end{figure} \subsection{Experiment 2} Consider the PEPv $T(x,z)\cdot x=0$ where $T(x,z)$ has size $10\times 10$ and each row is of degree $d = 1$ in $x$ and $e = 5$ in $z$. The coefficients are randomly generated. The contour enclosing the target domain $\Omega$ is shown in Figure~\ref{fig:exp2} together with the exact eigenvalues in the neighborhood of $\Omega$. This is a very nontrivial problem since the total number of solutions of the PEPv equals $\hat{\delta}=25600$ and they are almost all clustered around the origin of the complex plane. The selected contour is a circle with center at the origin and a radius of $0.1$ which encircles $44$ eigenvalues of the problem. Since the neighborhood of the target region $\Omega$ is densely scattered with eigenvalues, we select a relatively high number of integration points $N+1=400$ to increase the sharpness of the integration filter as discussed in the previous example. Given the high number of integration points, a maximum of $2M=10$ moment matrices should suffice to capture the $44$ expected eigenvalues in $\Omega$. The result is shown in Figure~\ref{fig:exp2}: a total of $46$ detected eigenvalues: $44$ inside $\Omega$ and $2$ just outside the target region. The residual for the extracted eigenpairs varies from $10^{-4}$ to $10^{-8}$. In accordance with Theorem~\ref{thm:pyramid}, we selected $a_i$ as a monomial of degree $d=1$ which leads to $n\cdot\delta=n\cdot(d+1)^{n-1}=5120$ tracked paths. Therefore, finding all solutions with standard homotopy continuation takes roughly 2390 seconds to compute, while our approach with $400$ interpolation points takes 1120 seconds. (Both timings result from a single-thread implementation in Julia). \begin{figure} \centering \begin{subfigure}{0.48\textwidth} \centering \input{experiment2} \caption{Experiment 2.} \label{fig:exp2} \end{subfigure} \hfill \begin{subfigure}{0.48\textwidth} \centering \input{experiment4} \caption{Experiment 4.} \label{fig:exp4} \end{subfigure} \caption{Eigenvalues (\ref{marker:eigs}) inside the target domain defined by the contour (\ref{marker:contour}) and the extracted values by contour integration (\ref{marker:CI}).} \label{fig:my_label} \end{figure} \subsection{Experiment 3} \label{exp:3} Consider the system of equations $T(x,z)\cdot x=0$ given by \begin{equation} T(x,z) = \begin{pmatrix} x_1^2x_2 & -2\sqrt{-1}x_1^2x_2\cos(z)\\ -x_2^2\cos(z^2) & 2x_2^2\sin(3z) \end{pmatrix}. \end{equation} Note that this system is not polynomial in $z$, but in practice the system is solved by an implicit substitution of Maclaurin series of high order for the sine and cosine functions. This approach leads to a PEPv that is of high degree in $z$. We expect an infinite number of solutions since the trigonometric functions can be expressed by their Maclaurin series in $z$. We use $100$ discretization points for the contour, and $2M = 16$ moment matrices. The $a_i$ are selected as random monomials in $x$ that have the same degree as the polynomials in the corresponding row of $T(x,z)$, similarly as in Theorem~\ref{thm:pyramid}. This leads to 4 tracked paths, instead of 10 for random polynomials. Figure~\ref{fig:exp3_conv} shows the impact of the number of discretization points on the residual of the 11 extracted solutions. As stated in experiment 1, increasing the number of discretization points leads to a decrease in the residual. \begin{figure} \centering \input{experiment3_conv} \caption{Impact of number of discretization points on residuals for experiment~3.} \label{fig:exp3_conv} \end{figure} \subsection{Experiment 4} Consider the REPv \eqref{eq:rationalT} of dimension $n=10$ with $m=2$ rational terms where all coefficients are randomly generated. A problem with these dimensions is expected to have $\hat{\delta} =\begin{pmatrix} n +m \\ m+1 \end{pmatrix}=220$ eigenvalues. According to Theorem~\ref{thm:nosolsrat} we need to track $n\cdot\delta=550$ paths. As depicted in Figure~\ref{fig:exp4}, all $33$ eigenvalues in the contour are detected with a residual ranging from $10^{-8}$ to $10^{-12}$, and one eigenvalue outside of the contour with a residual of $10^{-6}$. This result is obtained using $N+1=400$ nodes, and $2M=10$ moment matrices. \section{Conclusions} We presented a new contour integration method for solving polynomial eigenvalue problems with eigenvector nonlinearities and developed its first theoretical foundations. The eigenvalues are the roots of a resultant polynomial. We showed that, under suitable assumptions, this polynomial equals the denominator of the trace obtained by summing over the solutions to a modified system of equations. This can be evaluated along a contour using numerical homotopy continuation techniques. This way, we can extract eigenvalues in a compact domain and their corresponding eigenvectors by numerical contour integration. We derived the number of homotopy continuation paths that need to be tracked for two classes of problems. This governs, to a certain extent, the complexity of our method. However, a direct comparison with the total number of eigenvalues is not very meaningful since the difficulty and computational cost of tracking a single path may differ greatly. A comparative study on the total computational cost is an interesting topic for future research, together with a study on the applicability of other NEP methods on the compound trace matrix $U(z)$. \section*{Acknowledgements} The work by Rob Claes and Karl Meerbergen is supported by the Research Foundation Flanders (FWO) Grant G0B7818N and the KU Leuven Research Council. We would like to thank Paul Breiding for his help with \texttt{HomotopyContinuation.jl}, and Carlos D'Andrea for insightful discussions. \bibliographystyle{abbrv} \bibliography{references.bib} \noindent \footnotesize {\bf Authors' addresses:} \smallskip \noindent Rob Claes, KU Leuven \hfill {\tt [email protected]} \noindent Karl Meerbergen, KU Leuven \hfill {\tt [email protected]} \noindent Simon Telen, MPI-MiS Leipzig \hfill {\tt [email protected]} \end{document} \begin{tikzpicture}[/tikz/background rectangle/.style={fill={rgb,1:red,1.0;green,1.0;blue,1.0}, draw opacity={1.0}},scale = 0.92] \begin{axis}[point meta max={nan}, point meta min={nan}, legend cell align={left}, legend columns={1}, title={}, title style={at={{(0.5,1)}}, anchor={south}, font={{\fontsize{14 pt}{18.2 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, legend style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid, fill={rgb,1:red,1.0;green,1.0;blue,1.0}, fill opacity={1.0}, text opacity={1.0}, font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, text={rgb,1:red,0.0;green,0.0;blue,0.0}, cells={anchor={center}}, at={(1.02, 1)}, anchor={north west}}, axis background/.style={fill={rgb,1:red,1.0;green,1.0;blue,1.0}, opacity={1.0}}, anchor={north west}, xshift={1.0mm}, yshift={-1.0mm}, width={70mm}, height={70mm}, scaled x ticks={false}, xlabel={$\Re(z)$}, x tick style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}}, x tick label style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}, rotate={0}}, xlabel style={at={(ticklabel cs:0.5)}, anchor=near ticklabel, at={{(ticklabel cs:0.5)}}, anchor={near ticklabel}, font={{\fontsize{11 pt}{14.3 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, xmin={-1.6801588660066602}, xmax={2.5029040585803934}, xtick={{-1.0,0.0,1.0,2.0}}, xticklabels={{$-1$,$0$,$1$,$2$}}, xtick align={inside}, xticklabel style={font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, x grid style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={0.1}, line width={0.5}, solid}, axis x line*={left}, x axis line style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid}, scaled y ticks={false}, ylabel={$\Im(z)$}, y tick style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}}, y tick label style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}, rotate={0}}, ylabel style={at={(ticklabel cs:0.5)}, anchor=near ticklabel, at={{(ticklabel cs:0.5)}}, anchor={near ticklabel}, font={{\fontsize{11 pt}{14.3 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, ymin={-1.4416526225849546}, ymax={1.4416526225849546}, ytick={{-1.0,0.0,1.0}}, yticklabels={{$-1$,$0$,$1$}}, ytick align={inside}, yticklabel style={font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, y grid style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={0.1}, line width={0.5}, solid}, axis y line*={left}, y axis line style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid}, colorbar={false}] \addplot[color={rgb,1:red,0.0;green,0.6056;blue,0.9787}, only marks, draw opacity={1.0}, line width={0}, solid, mark={*}, mark size={1.50 pt}, mark repeat={1}, mark options={color=mycolor1, draw opacity={1.0}, fill=mycolor1, fill opacity={1.0}, line width={0.75}, rotate={0}, solid}] table[row sep={\\}] { \\ -0.5166001009052071 -0.828656618389808 \\ -1.1538313760548315 -0.2182072653957205 \\ -0.5166001009052065 0.8286566183898081 \\ 0.5919305292430951 -6.130680781421075e-46 \\ 0.03266379320803539 0.9631352535118162 \\ -1.5617702926692907 -0.7683088286685945 \\ 0.2113149691782338 0.314616665230113 \\ 0.03266379320803556 -0.9631352535118162 \\ -1.56177029266929 0.7683088286685948 \\ -1.1538313760548318 0.21820726539572027 \\ 0.21131496917823395 -0.31461666523011306 \\ 2.384515485243024 -1.1479437019748901e-41 \\ } ; \label{marker:intro_eigenvalues} \addplot[color=mycolor2, draw opacity={1.0}, line width={1}, solid] table[row sep={\\}] { \\ 1.0 0.0 \\ 0.9967160055292985 0.038363148505351795 \\ 0.9869179452156118 0.0760963751728522 \\ 0.9707667029384087 0.11258010146381223 \\ 0.9485274816493557 0.14721526560118134 \\ 0.9205654487471826 0.1794331591473648 \\ 0.887339740039091 0.20870476518104591 \\ 0.8493959207434935 0.23454944474040892 \\ 0.8073570273242101 0.2565428289016038 \\ 0.7619133372489575 0.27432378690474374 \\ 0.7138110346524129 0.28760035591099814 \\ 0.6638399580133517 0.29615453502433503 \\ 0.6128206310286621 0.29984586486020637 \\ 0.5615907896369272 0.2986137338847595 \\ 0.5109916264174742 0.2924783736545471 \\ 0.461853978231477 0.2815405266149281 \\ 0.41498468390366594 0.2659797919119 \\ 0.3711533359511322 0.2460516763790868 \\ 0.33107964389547323 0.22208339912259464 \\ 0.2954216166523462 0.19446851859233652 \\ 0.2647647580432637 0.1636604703631646 \\ 0.23961245283903232 0.13016512173526745 \\ 0.22037770119573252 0.09453246540708626 \\ 0.2073763372035738 0.05734758861041163 \\ 0.20082184289986543 0.019221065994213967 \\ 0.20082184289986543 -0.019221065994213898 \\ 0.20737633720357385 -0.057347588610411684 \\ 0.2203777011957324 -0.09453246540708606 \\ 0.23961245283903232 -0.1301651217352674 \\ 0.26476475804326366 -0.16366047036316453 \\ 0.2954216166523461 -0.19446851859233646 \\ 0.3310796438954733 -0.22208339912259467 \\ 0.371153335951132 -0.24605167637908668 \\ 0.41498468390366583 -0.26597979191189997 \\ 0.46185397823147667 -0.28154052661492807 \\ 0.5109916264174741 -0.2924783736545471 \\ 0.5615907896369273 -0.2986137338847595 \\ 0.6128206310286619 -0.29984586486020637 \\ 0.6638399580133517 -0.29615453502433503 \\ 0.7138110346524128 -0.28760035591099825 \\ 0.7619133372489575 -0.27432378690474374 \\ 0.8073570273242101 -0.2565428289016038 \\ 0.8493959207434933 -0.23454944474040895 \\ 0.8873397400390908 -0.20870476518104608 \\ 0.9205654487471826 -0.17943315914736485 \\ 0.9485274816493557 -0.14721526560118134 \\ 0.9707667029384088 -0.11258010146381212 \\ 0.9869179452156117 -0.07609637517285225 \\ 0.9967160055292984 -0.03836314850535199 \\ 1.0 -7.347880794884119e-17 \\ } ; \label{marker:intro_contour} \end{axis} \end{tikzpicture} \begin{tikzpicture}[scale = 0.96] \begin{axis}[point meta max={nan}, point meta min={nan}, legend cell align={left}, legend columns={1}, title={}, title style={at={{(0.5,1)}}, anchor={south}, font={{\fontsize{14 pt}{18.2 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, legend style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid, fill={rgb,1:red,1.0;green,1.0;blue,1.0}, fill opacity={1.0}, text opacity={1.0}, font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, text={rgb,1:red,0.0;green,0.0;blue,0.0}, cells={anchor={center}}, at={(1.02, 1)}, anchor={north west}}, axis background/.style={fill={rgb,1:red,1.0;green,1.0;blue,1.0}, opacity={1.0}}, anchor={north west}, xshift={1.0mm}, yshift={-1.0mm}, width={70mm}, height={70mm}, scaled x ticks={false}, xlabel={$x_2$}, x tick style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}}, x tick label style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}, rotate={0}}, xlabel style={at={(ticklabel cs:0.5)}, anchor=near ticklabel, at={{(ticklabel cs:0.5)}}, anchor={near ticklabel}, font={{\fontsize{11 pt}{14.3 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, xmin={-2.5}, xmax={0}, xtick={{-2, -1}}, xticklabels={{$-2$,$-1$}}, xtick align={inside}, xticklabel style={font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, x grid style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={0.1}, line width={0.5}, solid}, axis x line*={left}, x axis line style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid}, scaled y ticks={false}, ylabel={$x_3$}, y tick style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}}, y tick label style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}, rotate={0}}, ylabel style={at={(ticklabel cs:0.5)}, anchor=near ticklabel, at={{(ticklabel cs:0.5)}}, anchor={near ticklabel}, font={{\fontsize{11 pt}{14.3 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, ymin={-2.5}, ymax={0}, ytick={{-2,-1}}, yticklabels={{$-2$,$-1$}}, ytick align={inside}, yticklabel style={font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, y grid style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={0.1}, line width={0.5}, solid}, axis y line*={left}, y axis line style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid}, colorbar={false}] \addplot[color=mycolor1,draw opacity={1.0}, line width={1}, solid] table[row sep={\\}] { \\ -2.3493815337533217 -2.5 \\ -2.344502123410092 -2.493734335839599 \\ -2.343358395989975 -2.4922630448589085 \\ -2.339620497380114 -2.487468671679198 \\ -2.337092731829574 -2.484220868043099 \\ -2.334737022795212 -2.481203007518797 \\ -2.330827067669173 -2.476185370540277 \\ -2.329851683296566 -2.474937343358396 \\ -2.3249638169309566 -2.468671679197995 \\ -2.324561403508772 -2.4681547972848463 \\ -2.320072485106497 -2.462406015037594 \\ -2.318295739348371 -2.460126646241828 \\ -2.315179228054395 -2.456140350877193 \\ -2.31203007518797 -2.452105281638906 \\ -2.3102840285617394 -2.4498746867167918 \\ -2.305764411027569 -2.4440907390320024 \\ -2.3053868692098107 -2.443609022556391 \\ -2.3004861276197395 -2.43734335839599 \\ -2.299498746867168 -2.436078629000538 \\ -2.2955827698673867 -2.431077694235589 \\ -2.293233082706767 -2.4280717271138363 \\ -2.2906773886165737 -2.424812030075188 \\ -2.2869674185463658 -2.420071758279199 \\ -2.285769965532184 -2.418546365914787 \\ -2.2808602218079765 -2.412280701754386 \\ -2.280701754385965 -2.412078043858018 \\ -2.2759464314573035 -2.406015037593985 \\ -2.274436090225564 -2.4040859454598236 \\ -2.2710305328245384 -2.399749373433584 \\ -2.2681704260651627 -2.3961008943551385 \\ -2.2661125066047068 -2.393483709273183 \\ -2.261904761904762 -2.3881229281897647 \\ -2.2611923332564206 -2.387218045112782 \\ -2.256268944448411 -2.380952380952381 \\ -2.255639097744361 -2.3801492195144354 \\ -2.2513421782020253 -2.37468671679198 \\ -2.2493734335839597 -2.3721794416508364 \\ -2.2464131946248367 -2.3684210526315788 \\ -2.243107769423559 -2.3642168672518427 \\ -2.2414819731332707 -2.362155388471178 \\ -2.236842105263158 -2.35626153521305 \\ -2.2365484928881916 -2.355889724310777 \\ -2.2316109866051015 -2.3496240601503757 \\ -2.230576441102757 -2.3483087414046158 \\ -2.2266706758186046 -2.343358395989975 \\ -2.224310776942356 -2.3403619298200833 \\ -2.2217280320543265 -2.337092731829574 \\ -2.218045112781955 -2.3324224836205074 \\ -2.216783033352212 -2.330827067669173 \\ -2.211835561732862 -2.324561403508772 \\ -2.211779448621554 -2.3244901838408563 \\ -2.2068835373903175 -2.318295739348371 \\ -2.2055137844611528 -2.3165595188447097 \\ -2.201929080484484 -2.31203007518797 \\ -2.199248120300752 -2.3086363459199237 \\ -2.1969721678667664 -2.305764411027569 \\ -2.192982456140351 -2.3007207063375463 \\ -2.1920127760939225 -2.299498746867168 \\ -2.187050304270583 -2.293233082706767 \\ -2.1867167919799497 -2.2928110890835622 \\ -2.18208362567565 -2.2869674185463658 \\ -2.180451127819549 -2.2849045903102985 \\ -2.177114385738956 -2.280701754385965 \\ -2.174185463659148 -2.277005756486218 \\ -2.172142559736823 -2.274436090225564 \\ -2.1679197994987467 -2.2691146303050593 \\ -2.1671681226263493 -2.2681704260651627 \\ -2.162190108133642 -2.261904761904762 \\ -2.161654135338346 -2.2612287398150914 \\ -2.1572081085083443 -2.255639097744361 \\ -2.155388471177945 -2.253347130684394 \\ -2.152223410687932 -2.2493734335839597 \\ -2.1491228070175437 -2.2454733659799793 \\ -2.1472359882487257 -2.243107769423559 \\ -2.142857142857143 -2.237607489887501 \\ -2.1422458144208902 -2.236842105263158 \\ -2.1372516861039172 -2.230576441102757 \\ -2.136591478696742 -2.2297464242129332 \\ -2.1322536585080436 -2.224310776942356 \\ -2.130325814536341 -2.221890458999069 \\ -2.1272527871670737 -2.218045112781955 \\ -2.1240601503759398 -2.2140425246299693 \\ -2.1222490438206902 -2.211779448621554 \\ -2.117794486215539 -2.2062026668567456 \\ -2.1172423998328775 -2.2055137844611528 \\ -2.1122315558500064 -2.199248120300752 \\ -2.111528822055138 -2.198367581173179 \\ -2.1072167501376846 -2.192982456140351 \\ -2.1052631578947367 -2.1905380467103694 \\ -2.1021989457543544 -2.1867167919799497 \\ -2.098997493734336 -2.182716736816579 \\ -2.0971781124534385 -2.180451127819549 \\ -2.092731829573935 -2.174903698886558 \\ -2.0921542195802014 -2.174185463659148 \\ -2.0871260252329624 -2.1679197994987467 \\ -2.0864661654135337 -2.167095809046145 \\ -2.082093644729429 -2.161654135338346 \\ -2.080200501253133 -2.1592935265498867 \\ -2.0770581005099373 -2.155388471177945 \\ -2.073934837092732 -2.151499670041087 \\ -2.072019360177576 -2.1491228070175437 \\ -2.0676691729323307 -2.143714288641319 \\ -2.0669773908915885 -2.142857142857143 \\ -2.061931176263748 -2.136591478696742 \\ -2.06140350877193 -2.135934874986696 \\ -2.0568803741319193 -2.130325814536341 \\ -2.055137844611529 -2.128160702000401 \\ -2.0518262323061593 -2.1240601503759398 \\ -2.0488721804511276 -2.1203951645285355 \\ -2.0467687160590864 -2.117794486215539 \\ -2.0426065162907268 -2.11263831350807 \\ -2.041707790180167 -2.111528822055138 \\ -2.036642847565825 -2.1052631578947367 \\ -2.036340852130326 -2.1048887249703396 \\ -2.0315727228284377 -2.098997493734336 \\ -2.030075187969925 -2.09714355744756 \\ -2.026499070593307 -2.092731829573935 \\ -2.0238095238095237 -2.089407243517077 \\ -2.0214218536045334 -2.0864661654135337 \\ -2.017543859649123 -2.0816798360256406 \\ -2.0163410340797223 -2.080200501253133 \\ -2.011278195488722 -2.0739613882414276 \\ -2.0112565737006505 -2.073934837092732 \\ -2.006166208356526 -2.0676691729323307 \\ -2.0050125313283207 -2.066246269064528 \\ -2.0010720734271974 -2.06140350877193 \\ -1.9987468671679198 -2.058540124291719 \\ -1.9959741703873775 -2.055137844611529 \\ -1.9924812030075187 -2.0508431150698154 \\ -1.9908724586602484 -2.0488721804511276 \\ -1.9862155388471179 -2.0431552966987643 \\ -1.9857668970842928 -2.0426065162907268 \\ -1.9806560598378176 -2.036340852130326 \\ -1.9799498746867168 -2.0354732164921026 \\ -1.97554040556215 -2.030075187969925 \\ -1.9736842105263157 -2.0277982300260766 \\ -1.9704207656936277 -2.0238095238095237 \\ -1.9674185463659148 -2.0201326178592636 \\ -1.965297096615541 -2.017543859649123 \\ -1.9611528822055138 -2.012476437432222 \\ -1.9601693540724174 -2.011278195488722 \\ -1.9550371944017892 -2.0050125313283207 \\ -1.9548872180451127 -2.004828995382956 \\ -1.9498989143871273 -1.9987468671679198 \\ -1.9486215538847118 -1.9971862025020184 \\ -1.9447564159584994 -1.9924812030075187 \\ -1.9423558897243107 -1.9895530328533628 \\ -1.939609652188165 -1.9862155388471179 \\ -1.9360902255639099 -1.981929546134461 \\ -1.9344585754497285 -1.9799498746867168 \\ -1.9298245614035088 -1.9743158025375807 \\ -1.9293031374050906 -1.9736842105263157 \\ -1.9241421034531754 -1.9674185463659148 \\ -1.9235588972431077 -1.9667089157840738 \\ -1.9189755477351353 -1.9611528822055138 \\ -1.9172932330827068 -1.9591092836201958 \\ -1.913804473565583 -1.9548872180451127 \\ -1.9110275689223057 -1.9515195964726453 \\ -1.9086288296406817 -1.9486215538847118 \\ -1.9047619047619047 -1.9439399169411895 \\ -1.9034485638769516 -1.9423558897243107 \\ -1.8984962406015038 -1.9363703081520867 \\ -1.8982636233964023 -1.9360902255639099 \\ -1.8930722079011826 -1.9298245614035088 \\ -1.8922305764411027 -1.9288065437609239 \\ -1.887875522444808 -1.9235588972431077 \\ -1.8859649122807018 -1.921251815226774 \\ -1.8826739916159356 -1.9172932330827068 \\ -1.8796992481203008 -1.9137073704730267 \\ -1.8774675592383439 -1.9110275689223057 \\ -1.8734335839598997 -1.9061732751967457 \\ -1.8722561682640502 -1.9047619047619047 \\ -1.8671679197994988 -1.8986495956557907 \\ -1.8670397607563305 -1.8984962406015038 \\ -1.8618163404208459 -1.8922305764411027 \\ -1.8609022556390977 -1.8911316985984934 \\ -1.8565875085411934 -1.8859649122807018 \\ -1.8546365914786966 -1.883623716521615 \\ -1.8513534744744797 -1.8796992481203008 \\ -1.8483709273182958 -1.8761263751694404 \\ -1.8461141766080822 -1.8734335839598997 \\ -1.8421052631578947 -1.8686397435479636 \\ -1.8408695523525682 -1.8671679197994988 \\ -1.8358395989974938 -1.8611638912611561 \\ -1.8356195381222566 -1.8609022556390977 \\ -1.8303623617631797 -1.8546365914786966 \\ -1.8295739348370927 -1.8536947991020052 \\ -1.8250991860226344 -1.8483709273182958 \\ -1.8233082706766917 -1.8462355136486308 \\ -1.8198304179671732 -1.8421052631578947 \\ -1.8170426065162908 -1.8387872453507963 \\ -1.8145559899050951 -1.8358395989974938 \\ -1.8107769423558897 -1.8313500667534222 \\ -1.8092758330477838 -1.8295739348370927 \\ -1.8045112781954886 -1.8239240510397507 \\ -1.8039898774873941 -1.8233082706766917 \\ -1.7986970526667299 -1.8170426065162908 \\ -1.7982456140350878 -1.8165069100610625 \\ -1.7933971343085884 -1.8107769423558897 \\ -1.7919799498746867 -1.8090983868396797 \\ -1.7880911962989146 -1.8045112781954886 \\ -1.7857142857142858 -1.801701278114924 \\ -1.7827791641326285 -1.7982456140350878 \\ -1.7794486215538847 -1.794315660220922 \\ -1.7774609620700266 -1.7919799498746867 \\ -1.7731829573934836 -1.7869416101739661 \\ -1.772136513111102 -1.7857142857142858 \\ -1.7669172932330828 -1.7795792056801498 \\ -1.7668057389692187 -1.7794486215538847 \\ -1.7614667065351535 -1.7731829573934836 \\ -1.7606516290726817 -1.7722242220220494 \\ -1.7561209334072763 -1.7669172932330828 \\ -1.7543859649122806 -1.76488048524275 \\ -1.7507685894727842 -1.7606516290726817 \\ -1.7481203007518797 -1.7575486631612935 \\ -1.7454095911788572 -1.7543859649122806 \\ -1.7418546365914787 -1.7502288369036287 \\ -1.7400438535500622 -1.7481203007518797 \\ -1.7355889724310778 -1.7429210883338864 \\ -1.7346712901579444 -1.7418546365914787 \\ -1.7293233082706767 -1.7356255000627905 \\ -1.7292918130898385 -1.7355889724310778 \\ -1.723903356194157 -1.7293233082706767 \\ -1.7230576441102756 -1.728337650622332 \\ -1.7185077309892887 -1.7230576441102756 \\ -1.7167919799498748 -1.7210619968790237 \\ -1.713104918246961 -1.7167919799498748 \\ -1.7105263157894737 -1.7137987916980757 \\ -1.7076948240000474 -1.7105263157894737 \\ -1.7042606516290726 -1.7065481214079363 \\ -1.7022773526362618 -1.7042606516290726 \\ -1.6979949874686717 -1.6993100731371336 \\ -1.6968524068620006 -1.6979949874686717 \\ -1.6917293233082706 -1.6920847348235628 \\ -1.6914198876652207 -1.6917293233082706 \\ -1.685978456553313 -1.6854636591478698 \\ -1.6854636591478698 -1.6848694287883434 \\ -1.6805285109321833 -1.6791979949874687 \\ -1.6791979949874687 -1.6776653922538856 \\ -1.675070685590236 -1.6729323308270676 \\ -1.6729323308270676 -1.6704743748243547 \\ -1.6696048745278849 -1.6666666666666667 \\ -1.6666666666666667 -1.66329646848589 \\ -1.6641309698357387 -1.6604010025062657 \\ -1.6604010025062657 -1.6561317660932444 \\ -1.6586488616514041 -1.6541353383458646 \\ -1.6541353383458646 -1.6489803613800604 \\ -1.653158438115096 -1.6478696741854637 \\ -1.6478696741854637 -1.641842348969295 \\ -1.6476595853240372 -1.6416040100250626 \\ -1.6421508276011638 -1.6353383458646618 \\ -1.6416040100250626 -1.6347148596371057 \\ -1.6366328920935984 -1.6290726817042607 \\ -1.6353383458646618 -1.6275998639533191 \\ -1.6311061775696927 -1.6228070175438596 \\ -1.6290726817042607 -1.6204985960966052 \\ -1.62557056184634 -1.6165413533834587 \\ -1.6228070175438596 -1.6134111551609984 \\ -1.6200259204625316 -1.6102756892230576 \\ -1.6165413533834587 -1.6063376411965804 \\ -1.614472126626026 -1.6040100250626566 \\ -1.6102756892230576 -1.599278155221033 \\ -1.6089090511585102 -1.5977443609022557 \\ -1.6040100250626566 -1.5922327992313674 \\ -1.603336562439204 -1.5914786967418546 \\ -1.597754500241961 -1.5852130325814537 \\ -1.5977443609022557 -1.5852016207648145 \\ -1.5921610410424942 -1.5789473684210527 \\ -1.5914786967418546 -1.5781811580158673 \\ -1.5865577673194355 -1.5726817042606516 \\ -1.5852130325814537 -1.5711751900623767 \\ -1.580944537582476 -1.5664160401002507 \\ -1.5789473684210527 -1.564183823850693 \\ -1.5753212076084626 -1.5601503759398496 \\ -1.5726817042606516 -1.557207167381866 \\ -1.5696876303750973 -1.5538847117794485 \\ -1.5664160401002507 -1.5502453297246757 \\ -1.5640436559926976 -1.5476190476190477 \\ -1.5601503759398496 -1.5432984210288612 \\ -1.5583891316339498 -1.5413533834586466 \\ -1.5538847117794485 -1.5363665525385468 \\ -1.5527239014615895 -1.5350877192982457 \\ -1.5476190476190477 -1.529449836605865 \\ -1.5470478065539306 -1.5288220551378446 \\ -1.5413606651987601 -1.5225563909774436 \\ -1.5413533834586466 -1.5225483465428566 \\ -1.535660818335098 -1.5162907268170427 \\ -1.5350877192982457 -1.5156591564322224 \\ -1.5299496261772878 -1.5100250626566416 \\ -1.5288220551378446 -1.5087855251312314 \\ -1.5242269173791532 -1.5037593984962405 \\ -1.5225563909774436 -1.5019275706288344 \\ -1.51849251712664 -1.4974937343358397 \\ -1.5162907268170427 -1.4950854121100192 \\ -1.5127462470496382 -1.4912280701754386 \\ -1.5100250626566416 -1.4882591699710006 \\ -1.5069879251310982 -1.4849624060150375 \\ -1.5037593984962405 -1.4814489658346508 \\ -1.5012173656133454 -1.4786967418546366 \\ -1.4974937343358397 -1.4746549225661596 \\ -1.495434378901491 -1.4724310776942355 \\ -1.4912280701754386 -1.4678771642889346 \\ -1.48963877146384 -1.4661654135338347 \\ -1.4849624060150375 -1.4611158164007536 \\ -1.4838303457291686 -1.4598997493734336 \\ -1.4786967418546366 -1.454371005590158 \\ -1.478008899980786 -1.4536340852130325 \\ -1.4724310776942355 -1.4476428598531026 \\ -1.4721742282472297 -1.4473684210526316 \\ -1.4663256596849867 -1.4411027568922306 \\ -1.4661654135338347 -1.4409306179826613 \\ -1.4604627344939562 -1.4348370927318295 \\ -1.4598997493734336 -1.4342339539088411 \\ -1.4545859664411511 -1.4285714285714286 \\ -1.4536340852130325 -1.427554424377567 \\ -1.4486951324707253 -1.4223057644110275 \\ -1.4473684210526316 -1.420892164094363 \\ -1.4427900047086992 -1.4160401002506267 \\ -1.4411027568922306 -1.4142473091804688 \\ -1.4368703503321567 -1.4097744360902256 \\ -1.4348370927318295 -1.4076199971914904 \\ -1.4309359314341628 -1.4035087719298245 \\ -1.4285714285714286 -1.4010103671363419 \\ -1.4249865048842318 -1.3972431077694236 \\ -1.4223057644110275 -1.3944185594964886 \\ -1.4190218221841728 -1.3909774436090225 \\ -1.4160401002506267 -1.3878447162454994 \\ -1.4130416293191361 -1.3847117794486214 \\ -1.4097744360902256 -1.3812889808689077 \\ -1.4070456666036713 -1.3784461152882206 \\ -1.4035087719298245 -1.374751498384395 \\ -1.401033668522602 -1.3721804511278195 \\ -1.3972431077694236 -1.368232415362294 \\ -1.395005363566513 -1.3659147869674186 \\ -1.3909774436090225 -1.3617318799464233 \\ -1.388960474061632 -1.3596491228070176 \\ -1.3847117794486214 -1.3552500418752622 \\ -1.3828987159938864 -1.3533834586466165 \\ -1.3784461152882206 -1.3487870525034633 \\ -1.3768197988269006 -1.3471177944862156 \\ -1.3721804511278195 -1.3423430648237158 \\ -1.3707234253136849 -1.3408521303258145 \\ -1.3659147869674186 -1.335918233488967 \\ -1.364609291301764 -1.3345864661654134 \\ -1.3596491228070176 -1.329512714835002 \\ -1.3584770855314763 -1.3283208020050126 \\ -1.3533834586466165 -1.3231266669033992 \\ -1.3523264894271572 -1.3220551378446115 \\ -1.3471177944862156 -1.3167602494648567 \\ -1.346157176880923 -1.3157894736842106 \\ -1.3408521303258145 -1.3104136240429038 \\ -1.3399688140287367 -1.3095238095238095 \\ -1.3345864661654134 -1.3040869539380078 \\ -1.3337610590184383 -1.3032581453634084 \\ -1.3283208020050126 -1.2977804042520757 \\ -1.3275335617693982 -1.2969924812030076 \\ -1.3220551378446115 -1.291494141913361 \\ -1.3212859637234442 -1.2907268170426065 \\ -1.3157894736842106 -1.2852283357017913 \\ -1.315017897586682 -1.2844611528822054 \\ -1.3095238095238095 -1.2789831562747118 \\ -1.3087289870618246 -1.2781954887218046 \\ -1.3032581453634084 -1.2727587761930639 \\ -1.3024188465706177 -1.2719298245614035 \\ -1.2969924812030076 -1.2665553699479999 \\ -1.296087080965936 -1.2656641604010026 \\ -1.2907268170426065 -1.2603731139879493 \\ -1.2897332852330874 -1.2593984962406015 \\ -1.2844611528822054 -1.2542121867461378 \\ -1.2833570441798683 -1.2531328320802004 \\ -1.2781954887218046 -1.2480727686685738 \\ -1.2769579321148568 -1.2468671679197996 \\ -1.2719298245614035 -1.2419550422425107 \\ -1.270535512513421 -1.2406015037593985 \\ -1.2656641604010026 -1.2358591920253923 \\ -1.2640893376709088 -1.2343358395989974 \\ -1.2593984962406015 -1.2297854046742902 \\ -1.2576189483424163 -1.2280701754385965 \\ -1.2531328320802004 -1.2237338689758517 \\ -1.2511238733685413 -1.2218045112781954 \\ -1.2468671679197996 -1.2177047758767523 \\ -1.2446036292864837 -1.2155388471177946 \\ -1.2406015037593985 -1.21169831851468 \\ -1.2380577199258023 -1.2092731829573935 \\ -1.2343358395989974 -1.2057146922498505 \\ -1.2314856359881374 -1.2030075187969924 \\ -1.2280701754385965 -1.1997540946970704 \\ -1.2248868546101368 -1.1967418546365916 \\ -1.2218045112781954 -1.1938167257583534 \\ -1.2182608389088045 -1.1904761904761905 \\ -1.2155388471177946 -1.187902787656109 \\ -1.2116070375084407 -1.1842105263157894 \\ -1.2092731829573935 -1.1820124849669071 \\ -1.2049248840482973 -1.1779448621553885 \\ -1.2030075187969924 -1.1761460246558355 \\ -1.1982137966700213 -1.1716791979949874 \\ -1.1967418546365916 -1.170303616111461 \\ -1.1914731774839211 -1.1654135338345866 \\ -1.1904761904761905 -1.1644854711814054 \\ -1.1847024120130158 -1.1591478696741855 \\ -1.1842105263157894 -1.1586918042085517 \\ -1.1779448621553885 -1.1529225935091458 \\ -1.177900701416573 -1.1528822055137844 \\ -1.1716791979949874 -1.1471754630564885 \\ -1.1710655582270078 -1.1466165413533835 \\ -1.1654135338345866 -1.1414533023121023 \\ -1.164198165275829 -1.1403508771929824 \\ -1.1591478696741855 -1.1357563331632046 \\ -1.1572978297057817 -1.1340852130325814 \\ -1.1528822055137844 -1.13008478013382 \\ -1.150363838669225 -1.1278195488721805 \\ -1.1466165413533835 -1.1244388704240633 \\ -1.1433954586019452 -1.1215538847117794 \\ -1.1403508771929824 -1.1188188339501295 \\ -1.1363919344650695 -1.1152882205513786 \\ -1.1340852130325814 -1.1132249033850026 \\ -1.1293524889534508 -1.1090225563909775 \\ -1.1278195488721805 -1.1076573141998989 \\ -1.1222763216687777 -1.1027568922305764 \\ -1.1215538847117794 -1.102116304706458 \\ -1.1152882205513786 -1.0966014493045788 \\ -1.1151620966411029 -1.0964912280701755 \\ -1.1090225563909775 -1.0911096280350094 \\ -1.1080063462208576 -1.0902255639097744 \\ -1.1027568922305764 -1.0856449252215399 \\ -1.1008111074917917 -1.0839598997493733 \\ -1.0964912280701755 -1.080207587694778 \\ -1.0935754698578537 -1.0776942355889725 \\ -1.0902255639097744 -1.0747978652899652 \\ -1.0862984945084566 -1.0714285714285714 \\ -1.0839598997493733 -1.069416010892833 \\ -1.0789792133171772 -1.0651629072681705 \\ -1.0776942355889725 -1.0640622804863058 \\ -1.0716166276884567 -1.0588972431077694 \\ -1.0714285714285714 -1.0587369331980614 \\ -1.0651629072681705 -1.053435279405299 \\ -1.064205591784873 -1.0526315789473684 \\ -1.0588972431077694 -1.0481613439071336 \\ -1.0567480758084113 -1.0463659147869675 \\ -1.0526315789473684 -1.0429163706182403 \\ -1.0492437762820117 -1.0401002506265664 \\ -1.0463659147869675 -1.0377006280387042 \\ -1.041691547990253 -1.0338345864661653 \\ -1.0401002506265664 -1.032514387997813 \\ -1.0340902080318766 -1.0275689223057645 \\ -1.0338345864661653 -1.0273579257058143 \\ -1.0275689223057645 -1.0222257649296407 \\ -1.0264334282755003 -1.0213032581453634 \\ -1.0213032581453634 -1.0171224060482853 \\ -1.018723566484737 -1.0150375939849625 \\ -1.0150375939849625 -1.0120494348349374 \\ -1.0109604474667715 -1.0087719298245614 \\ -1.0087719298245614 -1.0070071365305353 \\ -1.0031427057897815 -1.0025062656641603 \\ -1.0025062656641603 -1.0019957999571973 \\ -0.9962406015037594 -0.9970108670359842 \\ -0.9952643672348336 -0.9962406015037594 \\ -0.9899749373433584 -0.9920540515249073 \\ -0.9873251658881774 -0.9899749373433584 \\ -0.9837092731829574 -0.9871288335361974 \\ -0.9793265331701952 -0.9837092731829574 \\ -0.9774436090225563 -0.9822355124778361 \\ -0.9712668838178967 -0.9774436090225563 \\ -0.9711779448621554 -0.9773743915525837 \\ -0.9649122807017544 -0.9725371370541949 \\ -0.9631359370351755 -0.9711779448621554 \\ -0.9586466165413534 -0.9677319993498148 \\ -0.9549397167122105 -0.9649122807017544 \\ -0.9523809523809523 -0.9629597247373516 \\ -0.9466768677547688 -0.9586466165413534 \\ -0.9461152882205514 -0.9582206275958733 \\ -0.9398496240601504 -0.9535078167026344 \\ -0.9383379453164269 -0.9523809523809523 \\ -0.9335839598997494 -0.9488258457470499 \\ -0.9299252578394448 -0.9461152882205514 \\ -0.9273182957393483 -0.9441777361549157 \\ -0.9214395941850513 -0.9398496240601504 \\ -0.9210526315789473 -0.9395638139342245 \\ -0.9147869674185464 -0.9349754492824934 \\ -0.9128688961113296 -0.9335839598997494 \\ -0.9085213032581454 -0.9304198210061492 \\ -0.9042183054815962 -0.9273182957393483 \\ -0.9022556390977443 -0.9258990856711222 \\ -0.8959899749373433 -0.9214112646362926 \\ -0.8954848398910044 -0.9210526315789473 \\ -0.8897243107769424 -0.9169496346572252 \\ -0.8866576951779505 -0.9147869674185464 \\ -0.8834586466165414 -0.9125236175469168 \\ -0.877744755575822 -0.9085213032581454 \\ -0.8771929824561403 -0.9081335595095351 \\ -0.8709273182957393 -0.9037699651779101 \\ -0.8687312924967101 -0.9022556390977443 \\ -0.8646616541353384 -0.899440221552589 \\ -0.8596228380010907 -0.8959899749373433 \\ -0.8583959899749374 -0.8951471798676944 \\ -0.8521303258145363 -0.8908836367762683 \\ -0.8504097998070982 -0.8897243107769424 \\ -0.8458646616541353 -0.8866517462459726 \\ -0.8410910865544825 -0.8834586466165414 \\ -0.8395989974937343 -0.8824573156734893 \\ -0.8333333333333334 -0.8782935103860875 \\ -0.8316607049192416 -0.8771929824561403 \\ -0.8270676691729323 -0.8741610773480821 \\ -0.8221154078843967 -0.8709273182957393 \\ -0.8208020050125313 -0.870066877746104 \\ -0.8145363408521303 -0.8660024842657967 \\ -0.812447827769729 -0.8646616541353384 \\ -0.8082706766917294 -0.8619711383511505 \\ -0.8026578192345539 -0.8583959899749374 \\ -0.8020050125313283 -0.8579788151295328 \\ -0.7957393483709273 -0.8540134946091451 \\ -0.792730788169592 -0.8521303258145363 \\ -0.7894736842105263 -0.8500848911386402 \\ -0.7832080200501254 -0.8461939465200882 \\ -0.7826725251030722 -0.8458646616541353 \\ -0.7769423558897243 -0.8423295161679083 \\ -0.7724643780986903 -0.8395989974937343 \\ -0.7706766917293233 -0.8385053366175121 \\ -0.7644110275689223 -0.8347126902173374 \\ -0.762107145327484 -0.8333333333333334 \\ -0.7581453634085213 -0.8309535628869028 \\ -0.7518796992481203 -0.8272344132869275 \\ -0.7515959536599719 -0.8270676691729323 \\ -0.7456140350877193 -0.8235407945302756 \\ -0.7409114108276219 -0.8208020050125313 \\ -0.7393483709273183 -0.8198886885515899 \\ -0.7330827067669173 -0.8162670315476307 \\ -0.7300533342667938 -0.8145363408521303 \\ -0.7268170426065163 -0.8126813264841738 \\ -0.7205513784461153 -0.8091322739389681 \\ -0.7190133906934371 -0.8082706766917294 \\ -0.7142857142857143 -0.8056134291607799 \\ -0.7080200501253133 -0.8021365217042876 \\ -0.7077805585263047 -0.8020050125313283 \\ -0.7017543859649122 -0.7986849965814075 \\ -0.6963367609677812 -0.7957393483709273 \\ -0.6954887218045113 -0.7952767272737795 \\ -0.6892230576441103 -0.791896028746057 \\ -0.6846747926063749 -0.7894736842105263 \\ -0.6829573934837093 -0.7885559879268096 \\ -0.6766917293233082 -0.7852465256547283 \\ -0.6727824812181653 -0.7832080200501254 \\ -0.6704260651629073 -0.7819751757401214 \\ -0.6641604010025063 -0.7787364873074215 \\ -0.6606447567661545 -0.7769423558897243 \\ -0.6578947368421053 -0.775534290713715 \\ -0.6516290726817042 -0.7723659137041364 \\ -0.6482452053093463 -0.7706766917293233 \\ -0.6453634085213033 -0.7692333328475907 \\ -0.6390977443609023 -0.7661348048448735 \\ -0.635565915779719 -0.7644110275689223 \\ -0.6328320802005013 -0.7630723021417483 \\ -0.6265664160401002 -0.7600431607296321 \\ -0.6225873053066546 -0.7581453634085213 \\ -0.6203007518796992 -0.7570511985961874 \\ -0.6140350877192983 -0.7540909813584127 \\ -0.6092879195009635 -0.7518796992481203 \\ -0.6077694235588973 -0.7511700222109087 \\ -0.6015037593984962 -0.748278266731215 \\ -0.5956442034081074 -0.7456140350877193 \\ -0.5952380952380952 -0.7454287729859115 \\ -0.5889724310776943 -0.7426050168480391 \\ -0.5827067669172933 -0.7398242949265069 \\ -0.5816206580913532 -0.7393483709273183 \\ -0.5764411027568922 -0.7370712317088853 \\ -0.5701754385964912 -0.734357601805631 \\ -0.5671904293152321 -0.7330827067669173 \\ -0.5639097744360902 -0.731676911313753 \\ -0.5576441102756893 -0.7290299140587375 \\ -0.5523247916107258 -0.7268170426065163 \\ -0.5513784461152882 -0.7264220556626428 \\ -0.5451127819548872 -0.7238412316858261 \\ -0.5388471177944862 -0.7213017055545731 \\ -0.536969882668195 -0.7205513784461153 \\ -0.5325814536340853 -0.718791554686897 \\ -0.5263157894736842 -0.7163173109870591 \\ -0.5210883620313596 -0.7142857142857143 \\ -0.5200501253132832 -0.7138808830619502 \\ -0.5137844611528822 -0.7114714654397061 \\ -0.5075187969924813 -0.7091020887567602 \\ -0.5046138090781174 -0.7080200501253133 \\ -0.5012531328320802 -0.7067641689125138 \\ -0.4949874686716792 -0.7044587406855144 \\ -0.48872180451127817 -0.7021925444619692 \\ -0.48749245846215233 -0.7017543859649122 \\ -0.4824561403508772 -0.6999534882672171 \\ -0.47619047619047616 -0.6977503729439603 \\ -0.4699248120300752 -0.6955856968484393 \\ -0.46963997678796976 -0.6954887218045113 \\ -0.46365914786967416 -0.6934458466690727 \\ -0.4573934837092732 -0.6913433938436591 \\ -0.45112781954887216 -0.6892786033042333 \\ -0.4509567033039413 -0.6892230576441103 \\ -0.4448621553884712 -0.6872382886007156 \\ -0.43859649122807015 -0.6852348734885518 \\ -0.4323308270676692 -0.6832683592356561 \\ -0.43132400202491666 -0.6829573934837093 \\ -0.42606516290726815 -0.6813278968534957 \\ -0.4197994987468672 -0.679421919836534 \\ -0.41353383458646614 -0.6775520974552418 \\ -0.41059951996174826 -0.6766917293233082 \\ -0.40726817042606517 -0.6757117919087976 \\ -0.40100250626566414 -0.6739016780900869 \\ -0.39473684210526316 -0.6721269875815256 \\ -0.3886109486267291 -0.6704260651629073 \\ -0.38847117794486213 -0.6703871313313101 \\ -0.38220551378446116 -0.6686713300986437 \\ -0.37593984962406013 -0.6669902354503354 \\ -0.36967418546365916 -0.6653435153718668 \\ -0.3650813728892262 -0.6641604010025063 \\ -0.3634085213032581 -0.6637280937724247 \\ -0.35714285714285715 -0.6621390825373489 \\ -0.3508771929824561 -0.6605837512077887 \\ -0.34461152882205515 -0.6590617796325509 \\ -0.33970377434970694 -0.6578947368421053 \\ -0.3383458646616541 -0.6575708053916612 \\ -0.33208020050125314 -0.6561053873570643 \\ -0.3258145363408521 -0.654672655714995 \\ -0.31954887218045114 -0.653272301714894 \\ -0.3132832080200501 -0.6519040204825435 \\ -0.31199743607884445 -0.6516290726817042 \\ -0.30701754385964913 -0.650560826061927 \\ -0.3007518796992481 -0.6492476196169683 \\ -0.29448621553884713 -0.6479658402330077 \\ -0.2882205513784461 -0.6467151938327431 \\ -0.2819548872180451 -0.6454953899965508 \\ -0.2812614474654371 -0.6453634085213033 \\ -0.2756892230576441 -0.6442995463561024 \\ -0.2694235588972431 -0.6431333836862244 \\ -0.2631578947368421 -0.6419974442065041 \\ -0.2568922305764411 -0.640891447726674 \\ -0.2506265664160401 -0.6398151175092118 \\ -0.24633742272287792 -0.6390977443609023 \\ -0.24436090225563908 -0.638766143355105 \\ -0.23809523809523808 -0.6377420640802851 \\ -0.23182957393483708 -0.6367470567782992 \\ -0.22556390977443608 -0.6357808544071432 \\ -0.21929824561403508 -0.6348431931853991 \\ -0.21303258145363407 -0.6339338125426218 \\ -0.20676691729323307 -0.6330524550706292 \\ -0.20515291474023883 -0.6328320802005013 \\ -0.20050125313283207 -0.6321950124663949 \\ -0.19423558897243107 -0.6313639406487904 \\ -0.18796992481203006 -0.6305603334997893 \\ -0.18170426065162906 -0.629783942545309 \\ -0.17543859649122806 -0.6290345222995297 \\ -0.16917293233082706 -0.6283118302201072 \\ -0.16290726817042606 -0.6276156266641871 \\ -0.15664160401002505 -0.6269456748452066 \\ -0.15295582059047894 -0.6265664160401002 \\ -0.15037593984962405 -0.6263001587507186 \\ -0.14411027568922305 -0.6256783321388999 \\ -0.13784461152882205 -0.6250822373423774 \\ -0.13157894736842105 -0.6245116457016552 \\ -0.12531328320802004 -0.623966331258471 \\ -0.11904761904761904 -0.6234460707160274 \\ -0.11278195488721804 -0.6229506433999193 \\ -0.10651629072681704 -0.6224798312197519 \\ -0.10025062656641603 -0.6220334186314272 \\ -0.09398496240601503 -0.6216111926000925 \\ -0.08771929824561403 -0.6212129425637308 \\ -0.08145363408521303 -0.6208384603973859 \\ -0.07518796992481203 -0.6204875403780049 \\ -0.07161946261046359 -0.6203007518796992 \\ -0.06892230576441102 -0.620159164492888 \\ -0.06265664160401002 -0.6198530068725683 \\ -0.05639097744360902 -0.6195699504923109 \\ -0.05012531328320802 -0.6193097965462634 \\ -0.043859649122807015 -0.6190723485032398 \\ -0.03759398496240601 -0.6188574120742808 \\ -0.03132832080200501 -0.6186647951807684 \\ -0.02506265664160401 -0.6184943079230816 \\ -0.018796992481203006 -0.618345762549785 \\ -0.012531328320802004 -0.6182189734273391 \\ -0.006265664160401002 -0.6181137570103206 \\ 0.0 -0.6180299318121439 \\ } ; \addlegendentry {$f_1$} \addplot[color=mycolor2, draw opacity={1.0}, line width={1}, solid] table[row sep={\\}] { \\ -2.5 -0.5638241226428142 \\ -2.493734335839599 -0.5577259878056791 \\ -2.493649921940312 -0.5576441102756893 \\ -2.487468671679198 -0.5516873840481473 \\ -2.487147004091986 -0.5513784461152882 \\ -2.481203007518797 -0.545706380929091 \\ -2.4805788376039457 -0.5451127819548872 \\ -2.474937343358396 -0.5397818745318262 \\ -2.4739447554635348 -0.5388471177944862 \\ -2.468671679197995 -0.5339127889693653 \\ -2.4672440815355188 -0.5325814536340853 \\ -2.462406015037594 -0.5280980755004013 \\ -2.4604761304055827 -0.5263157894736842 \\ -2.456140350877193 -0.5223367116785329 \\ -2.4536402072206083 -0.5200501253132832 \\ -2.4498746867167918 -0.516627700533285 \\ -2.4467356075256346 -0.5137844611528822 \\ -2.443609022556391 -0.5109700697815585 \\ -2.4397616170974277 -0.5075187969924813 \\ -2.43734335839599 -0.5053628710681807 \\ -2.432717511774582 -0.5012531328320802 \\ -2.431077694235589 -0.4998051792343187 \\ -2.4256025572840545 -0.4949874686716792 \\ -2.424812030075188 -0.4942960916125825 \\ -2.418546365914787 -0.48883570467415816 \\ -2.418415164829645 -0.48872180451127817 \\ -2.412280701754386 -0.48342852019836907 \\ -2.4111498095954613 -0.4824561403508772 \\ -2.406015037593985 -0.478067665661426 \\ -2.4038108171751786 -0.47619047619047616 \\ -2.399749373433584 -0.47275230936122314 \\ -2.3963974003692883 -0.4699248120300752 \\ -2.393483709273183 -0.46748163938455123 \\ -2.3889087607176798 -0.46365914786967416 \\ -2.387218045112782 -0.46225486302202506 \\ -2.381344088297574 -0.4573934837092732 \\ -2.380952380952381 -0.45707120620364683 \\ -2.37468671679198 -0.45193662667138956 \\ -2.373696006685052 -0.45112781954887216 \\ -2.3684210526315788 -0.44684662174783735 \\ -2.365967051813109 -0.4448621553884712 \\ -2.362155388471178 -0.441797746466931 \\ -2.358158965508983 -0.43859649122807015 \\ -2.355889724310777 -0.4367892863362303 \\ -2.3502708775360563 -0.4323308270676692 \\ -2.3496240601503757 -0.43182054331729436 \\ -2.343358395989975 -0.4268976036604988 \\ -2.3422947160922507 -0.42606516290726815 \\ -2.337092731829574 -0.42201742653303204 \\ -2.334231744305474 -0.4197994987468672 \\ -2.330827067669173 -0.41717514477805656 \\ -2.326085424294144 -0.41353383458646614 \\ -2.324561403508772 -0.4123701134562675 \\ -2.318295739348371 -0.4076043961427431 \\ -2.3178517644878376 -0.40726817042606517 \\ -2.31203007518797 -0.4028842876466949 \\ -2.309521653967272 -0.40100250626566414 \\ -2.305764411027569 -0.3981997248804913 \\ -2.301104632489421 -0.39473684210526316 \\ -2.299498746867168 -0.3935501116185016 \\ -2.293233082706767 -0.3889385559860648 \\ -2.2925952354303383 -0.38847117794486213 \\ -2.2869674185463658 -0.38437041730202814 \\ -2.2839848814814188 -0.38220551378446116 \\ -2.280701754385965 -0.3798356316605737 \\ -2.275283818927262 -0.37593984962406013 \\ -2.274436090225564 -0.3753336475692694 \\ -2.2681704260651627 -0.37087326135833487 \\ -2.2664789134249173 -0.36967418546365916 \\ -2.261904761904762 -0.36644948537553323 \\ -2.2575742823792533 -0.3634085213032581 \\ -2.255639097744361 -0.3620570146929709 \\ -2.2493734335839597 -0.3576996366798955 \\ -2.248569059570786 -0.35714285714285715 \\ -2.243107769423559 -0.353383197895947 \\ -2.2394530150386838 -0.3508771929824561 \\ -2.236842105263158 -0.3490966335599607 \\ -2.230576441102757 -0.3448412186393066 \\ -2.2302365567281295 -0.34461152882205515 \\ -2.224310776942356 -0.34062858353454073 \\ -2.2209008117507385 -0.3383458646616541 \\ -2.218045112781955 -0.33644445426585734 \\ -2.211779448621554 -0.33228996577646064 \\ -2.2114614611911967 -0.33208020050125314 \\ -2.2055137844611528 -0.3281777335542357 \\ -2.2018972540650537 -0.3258145363408521 \\ -2.199248120300752 -0.32409269781185124 \\ -2.192982456140351 -0.32003810785994014 \\ -2.1922225544133362 -0.31954887218045114 \\ -2.1867167919799497 -0.31602300455365695 \\ -2.1824206128849553 -0.3132832080200501 \\ -2.180451127819549 -0.3120338448037125 \\ -2.174185463659148 -0.30807813454736366 \\ -2.1724972394898225 -0.30701754385964913 \\ -2.1679197994987467 -0.3041570074322721 \\ -2.1624477415905132 -0.3007518796992481 \\ -2.161654135338346 -0.3002606247118297 \\ -2.155388471177945 -0.29640278460993197 \\ -2.1522614273874416 -0.29448621553884713 \\ -2.1491228070175437 -0.29257259690169507 \\ -2.142857142857143 -0.28877006047564485 \\ -2.141946732333716 -0.2882205513784461 \\ -2.136591478696742 -0.28500503568868624 \\ -2.131489411716122 -0.2819548872180451 \\ -2.130325814536341 -0.2812628615118713 \\ -2.1240601503759398 -0.2775568537484443 \\ -2.1208873092788267 -0.2756892230576441 \\ -2.117794486215539 -0.2738780945521194 \\ -2.111528822055138 -0.27022697680286195 \\ -2.11014239946216 -0.2694235588972431 \\ -2.1052631578947367 -0.2666106450048591 \\ -2.0992470765640587 -0.2631578947368421 \\ -2.098997493734336 -0.26301538782674605 \\ -2.092731829573935 -0.259459465706753 \\ -2.088187226520134 -0.2568922305764411 \\ -2.0864661654135337 -0.2559249725316284 \\ -2.080200501253133 -0.25242352191999157 \\ -2.076968151139003 -0.2506265664160401 \\ -2.073934837092732 -0.24894884730148792 \\ -2.0676691729323307 -0.24550179114854054 \\ -2.0655835004093515 -0.24436090225563908 \\ -2.06140350877193 -0.2420860031779059 \\ -2.055137844611529 -0.2386932629576345 \\ -2.054026582113889 -0.23809523809523808 \\ -2.0488721804511276 -0.23533544304379916 \\ -2.0426065162907268 -0.23199693879646024 \\ -2.04229033846242 -0.23182957393483708 \\ -2.036340852130326 -0.22869618145021323 \\ -2.0303647250514505 -0.22556390977443608 \\ -2.030075187969925 -0.22541291660155707 \\ -2.0238095238095237 -0.22216724444614502 \\ -2.018243303453752 -0.21929824561403508 \\ -2.017543859649123 -0.21893953627380616 \\ -2.011278195488722 -0.21574766941133877 \\ -2.005920673797321 -0.21303258145363407 \\ -2.0050125313283207 -0.21257465033458023 \\ -1.9987468671679198 -0.2094365048919884 \\ -1.9933881970745582 -0.20676691729323307 \\ -1.9924812030075187 -0.2063173195772341 \\ -1.9862155388471179 -0.20323281043929162 \\ -1.980636732534586 -0.20050125313283207 \\ -1.9799498746867168 -0.20016661560454171 \\ -1.9736842105263157 -0.19713565645079614 \\ -1.967656600720747 -0.19423558897243107 \\ -1.9674185463659148 -0.19412162067363584 \\ -1.9611528822055138 -0.19114412401448452 \\ -1.9548872180451127 -0.18818292077288806 \\ -1.9544331465588631 -0.18796992481203006 \\ -1.9486215538847118 -0.18525730475553862 \\ -1.9423558897243107 -0.18234965099159 \\ -1.940954907363539 -0.18170426065162906 \\ -1.9360902255639099 -0.17947430068573375 \\ -1.9298245614035088 -0.17662010529758027 \\ -1.9272123818463902 -0.17543859649122806 \\ -1.9235588972431077 -0.17379422405540745 \\ -1.9172932330827068 -0.17099339488274978 \\ -1.913193029619054 -0.16917293233082706 \\ -1.9110275689223057 -0.16821619720795475 \\ -1.9047619047619047 -0.16546864106495443 \\ -1.8988835041540109 -0.16290726817042606 \\ -1.8984962406015038 -0.16273935243679444 \\ -1.8922305764411027 -0.1600449751442214 \\ -1.8859649122807018 -0.15736786183461904 \\ -1.884251465805678 -0.15664160401002505 \\ -1.8796992481203008 -0.15472153826139837 \\ -1.8734335839598997 -0.15209767818844494 \\ -1.8692915826083216 -0.15037593984962405 \\ -1.8671679197994988 -0.14949748125919973 \\ -1.8609022556390977 -0.14692677699651885 \\ -1.8546365914786966 -0.14437378301993886 \\ -1.853984132660779 -0.14411027568922305 \\ -1.8483709273182958 -0.14185431777323596 \\ -1.8421052631578947 -0.13935454883383 \\ -1.838290114090764 -0.13784461152882205 \\ -1.8358395989974938 -0.13687946949325028 \\ -1.8295739348370927 -0.13443282505626086 \\ -1.8233082706766917 -0.13200433557891933 \\ -1.8222005534303105 -0.13157894736842105 \\ -1.8170426065162908 -0.1296077891617534 \\ -1.8107769423558897 -0.12723248837626117 \\ -1.8056728545260974 -0.12531328320802004 \\ -1.8045112781954886 -0.12487862784984376 \\ -1.7982456140350878 -0.12255641224748194 \\ -1.7919799498746867 -0.1202527945936377 \\ -1.7886719276098062 -0.11904761904761904 \\ -1.7857142857142858 -0.1179753022240038 \\ -1.7794486215538847 -0.11572482902417934 \\ -1.7731829573934836 -0.11349323215505594 \\ -1.7711659668354833 -0.11278195488721804 \\ -1.7669172932330828 -0.11129092723492864 \\ -1.7606516290726817 -0.10911253133688151 \\ -1.7543859649122806 -0.10695329108731962 \\ -1.7531043812289004 -0.10651629072681704 \\ -1.7481203007518797 -0.10482499809282636 \\ -1.7418546365914787 -0.10271901259586474 \\ -1.7355889724310778 -0.10063246299127464 \\ -1.7344295892302386 -0.10025062656641603 \\ -1.7293233082706767 -0.0985770115584352 \\ -1.7230576441102756 -0.09654376777062523 \\ -1.7167919799498748 -0.09453024103573957 \\ -1.7150757680553412 -0.09398496240601503 \\ -1.7105263157894737 -0.09254646593707376 \\ -1.7042606516290726 -0.09058629338388369 \\ -1.6979949874686717 -0.08864611995146776 \\ -1.6949673246015757 -0.08771929824561403 \\ -1.6917293233082706 -0.08673286107271976 \\ -1.6854636591478698 -0.0848460875056198 \\ -1.6791979949874687 -0.08297959602513351 \\ -1.6740170127795078 -0.08145363408521303 \\ -1.6729323308270676 -0.08113569834211609 \\ -1.6666666666666667 -0.07932264974713216 \\ -1.6604010025062657 -0.07753016709334766 \\ -1.6541353383458646 -0.07575837593811857 \\ -1.652091398821509 -0.07518796992481203 \\ -1.6478696741854637 -0.07401548125512693 \\ -1.6416040100250626 -0.07229733253670155 \\ -1.6353383458646618 -0.07060016169127123 \\ -1.6290726817042607 -0.06892409695319728 \\ -1.6290658859114908 -0.06892230576441102 \\ -1.6228070175438596 -0.06728059327383659 \\ -1.6165413533834587 -0.06565836696250416 \\ -1.6102756892230576 -0.06405753566184759 \\ -1.6047165672248398 -0.06265664160401002 \\ -1.6040100250626566 -0.06247945175554265 \\ -1.5977443609022557 -0.06093249244438566 \\ -1.5914786967418546 -0.05940721791405886 \\ -1.5852130325814537 -0.05790376078565464 \\ -1.5789473684210527 -0.05642225476393093 \\ -1.578812817910927 -0.05639097744360902 \\ -1.5726817042606516 -0.05497264417833609 \\ -1.5664160401002507 -0.05354535769594969 \\ -1.5601503759398496 -0.0521403163084241 \\ -1.5538847117794485 -0.05075765651479208 \\ -1.5509685489637888 -0.05012531328320802 \\ -1.5476190476190477 -0.049402521299238306 \\ -1.5413533834586466 -0.04807426607480203 \\ -1.5350877192982457 -0.04676868898843163 \\ -1.5288220551378446 -0.04548592937341622 \\ -1.5225563909774436 -0.044226127706795855 \\ -1.5206969678997118 -0.043859649122807015 \\ -1.5162907268170427 -0.04299543032661875 \\ -1.5100250626566416 -0.04179037314424984 \\ -1.5037593984962405 -0.040608574750427444 \\ -1.4974937343358397 -0.039450178521972815 \\ -1.4912280701754386 -0.038315329016579046 \\ -1.4871575683452847 -0.03759398496240601 \\ -1.4849624060150375 -0.037206870702865304 \\ -1.4786967418546366 -0.036127101716671474 \\ -1.4724310776942355 -0.035071184643789874 \\ -1.4661654135338347 -0.034039267008758055 \\ -1.4598997493734336 -0.033031497555215096 \\ -1.4536340852130325 -0.032048026258522204 \\ -1.4489295384360206 -0.03132832080200501 \\ -1.4473684210526316 -0.03109067009920431 \\ -1.4411027568922306 -0.030162770922953955 \\ -1.4348370927318295 -0.029259481302500803 \\ -1.4285714285714286 -0.028380954275446665 \\ -1.4223057644110275 -0.02752734415094009 \\ -1.4160401002506267 -0.02669880652291138 \\ -1.4097744360902256 -0.02589549828346978 \\ -1.4035087719298245 -0.025117577636472058 \\ -1.4030505094290424 -0.02506265664160401 \\ -1.3972431077694236 -0.02437010538522618 \\ -1.3909774436090225 -0.023648567274139217 \\ -1.3847117794486214 -0.022952738233997485 \\ -1.3784461152882206 -0.02228277968616657 \\ -1.3721804511278195 -0.021638854406216084 \\ -1.3659147869674186 -0.02102112653815212 \\ -1.3596491228070176 -0.020429761608826008 \\ -1.3533834586466165 -0.019864926542528274 \\ -1.3471177944862156 -0.019326789675766815 \\ -1.3408521303258145 -0.018815520772234143 \\ -1.3406118948120387 -0.018796992481203006 \\ -1.3345864661654134 -0.018334609403700627 \\ -1.3283208020050126 -0.01788086188844959 \\ -1.3220551378446115 -0.017454318229580826 \\ -1.3157894736842106 -0.017055151634129625 \\ -1.3095238095238095 -0.016683536782671433 \\ -1.3032581453634084 -0.016339649845025 \\ -1.2969924812030076 -0.0160236684961573 \\ -1.2907268170426065 -0.015735771932293526 \\ -1.2844611528822054 -0.015476140887231288 \\ -1.2781954887218046 -0.015244957648868532 \\ -1.2719298245614035 -0.015042406075943416 \\ -1.2656641604010026 -0.014868671614992501 \\ -1.2593984962406015 -0.01472394131752634 \\ -1.2531328320802004 -0.014608403857431193 \\ -1.2468671679197996 -0.01452224954859677 \\ -1.2406015037593985 -0.01446567036277403 \\ -1.2343358395989974 -0.014438859947667075 \\ -1.2280701754385965 -0.014442013645261837 \\ -1.2218045112781954 -0.014475328510397737 \\ -1.2155388471177946 -0.014539003329581354 \\ -1.2092731829573935 -0.014633238640051538 \\ -1.2030075187969924 -0.014758236749093961 \\ -1.1967418546365916 -0.014914201753614931 \\ -1.1904761904761905 -0.015101339559973342 \\ -1.1842105263157894 -0.015319857904078103 \\ -1.1779448621553885 -0.015569966371752716 \\ -1.1716791979949874 -0.01585187641937487 \\ -1.1654135338345866 -0.016165801394790034 \\ -1.1591478696741855 -0.01651195655850895 \\ -1.1528822055137844 -0.016890559105188257 \\ -1.1466165413533835 -0.017301828185403986 \\ -1.1403508771929824 -0.017745984927717375 \\ -1.1340852130325814 -0.01822325246104236 \\ -1.1278195488721805 -0.018733855937314418 \\ -1.1270911201264597 -0.018796992481203006 \\ -1.1215538847117794 -0.01928174345735616 \\ -1.1152882205513786 -0.019864191954841112 \\ -1.1090225563909775 -0.020480949265316697 \\ -1.1027568922305764 -0.021132252655530044 \\ -1.0964912280701755 -0.021818341585079132 \\ -1.0902255639097744 -0.022539457731804536 \\ -1.0839598997493733 -0.023295845017535562 \\ -1.0776942355889725 -0.0240877496341943 \\ -1.0714285714285714 -0.024915420070267206 \\ -1.0703583654829891 -0.02506265664160401 \\ -1.0651629072681705 -0.02578481072127676 \\ -1.0588972431077694 -0.026691962430705544 \\ -1.0526315789473684 -0.02763596569033249 \\ -1.0463659147869675 -0.02861708283747438 \\ -1.0401002506265664 -0.029635578705391987 \\ -1.0338345864661653 -0.030691720653045537 \\ -1.0301851425159645 -0.03132832080200501 \\ -1.0275689223057645 -0.03178950266000543 \\ -1.0213032581453634 -0.032930997727225436 \\ -1.0150375939849625 -0.03411131583226462 \\ -1.0087719298245614 -0.03533073963766324 \\ -1.0025062656641603 -0.03658955455664745 \\ -0.9976568849431824 -0.03759398496240601 \\ -0.9962406015037594 -0.03789049201984734 \\ -0.9899749373433584 -0.039240110663347545 \\ -0.9837092731829574 -0.04063038481872696 \\ -0.9774436090225563 -0.04206161663449444 \\ -0.9711779448621554 -0.043534111260652555 \\ -0.969828138717617 -0.043859649122807015 \\ -0.9649122807017544 -0.04505825952405899 \\ -0.9586466165413534 -0.04662746559088517 \\ -0.9523809523809523 -0.04823929498920213 \\ -0.9461152882205514 -0.04989407120676553 \\ -0.9452601552467798 -0.05012531328320802 \\ -0.9398496240601504 -0.05160479926536162 \\ -0.9335839598997494 -0.05336157774124555 \\ -0.9273182957393483 -0.05516275286051412 \\ -0.9231456932652172 -0.05639097744360902 \\ -0.9210526315789473 -0.05701409551058913 \\ -0.9147869674185464 -0.058921770891552205 \\ -0.9085213032581454 -0.0608753721900883 \\ -0.9029389784453759 -0.06265664160401002 \\ -0.9022556390977443 -0.06287721389467207 \\ -0.8959899749373433 -0.06494209375753182 \\ -0.8897243107769424 -0.06705451421697005 \\ -0.8843051810943737 -0.06892230576441102 \\ -0.8834586466165414 -0.06921751423740077 \\ -0.8771929824561403 -0.07144629471126149 \\ -0.8709273182957393 -0.07372432238697482 \\ -0.8669834956896901 -0.07518796992481203 \\ -0.8646616541353384 -0.07605998756248285 \\ -0.8583959899749374 -0.07845978501211151 \\ -0.8521303258145363 -0.08091063534885698 \\ -0.8507677557121429 -0.08145363408521303 \\ -0.8458646616541353 -0.08343140100997154 \\ -0.8395989974937343 -0.08600978694648184 \\ -0.8355243934342726 -0.08771929824561403 \\ -0.8333333333333334 -0.08864994606244823 \\ -0.8270676691729323 -0.09136045847656551 \\ -0.8211196058574233 -0.09398496240601503 \\ -0.8208020050125313 -0.09412686156971245 \\ -0.8145363408521303 -0.096974337617202 \\ -0.8082706766917294 -0.0998779790119743 \\ -0.807479946742187 -0.10025062656641603 \\ -0.8020050125313283 -0.10286384757991278 \\ -0.7957393483709273 -0.10591127341351447 \\ -0.7945159005597623 -0.10651629072681704 \\ -0.7894736842105263 -0.10904220589562325 \\ -0.7832080200501254 -0.1122388874924337 \\ -0.7821606619871074 -0.11278195488721804 \\ -0.7769423558897243 -0.11552348893023558 \\ -0.7706766917293233 -0.11887520769513076 \\ -0.770359343134239 -0.11904761904761904 \\ -0.7644110275689223 -0.12232270279207579 \\ -0.7590745008279274 -0.12531328320802004 \\ -0.7581453634085213 -0.12584106494883765 \\ -0.7518796992481203 -0.12945586138243015 \\ -0.7482605852508073 -0.13157894736842105 \\ -0.7456140350877193 -0.13315301153273132 \\ -0.7393483709273183 -0.13694007244302914 \\ -0.737874605359546 -0.13784461152882205 \\ -0.7330827067669173 -0.1408271517914131 \\ -0.7278936312824615 -0.14411027568922305 \\ -0.7268170426065163 -0.14480117248278723 \\ -0.7205513784461153 -0.14888263380982386 \\ -0.7182939492569423 -0.15037593984962405 \\ -0.7142857142857143 -0.15306600868734302 \\ -0.7090410238825722 -0.15664160401002505 \\ -0.7080200501253133 -0.15734793885406442 \\ -0.7017543859649122 -0.16174628652063291 \\ -0.7001247272302686 -0.16290726817042606 \\ -0.6954887218045113 -0.16625967684956144 \\ -0.6915202503497982 -0.16917293233082706 \\ -0.6892230576441103 -0.17088505689923034 \\ -0.6832034804377234 -0.17543859649122806 \\ -0.6829573934837093 -0.17562763733562708 \\ -0.6766917293233082 -0.18050750487558476 \\ -0.6751769943782966 -0.18170426065162906 \\ -0.6704260651629073 -0.18551720243865638 \\ -0.6674132759869777 -0.18796992481203006 \\ -0.6641604010025063 -0.19066064369391775 \\ -0.6598989488009169 -0.19423558897243107 \\ -0.6578947368421053 -0.19594437947823698 \\ -0.6526237032647383 -0.20050125313283207 \\ -0.6516290726817042 -0.2013753955472795 \\ -0.6455777903466793 -0.20676691729323307 \\ -0.6453634085213033 -0.20696114927823986 \\ -0.6390977443609023 -0.21271383263738564 \\ -0.6387551402789577 -0.21303258145363407 \\ -0.6328320802005013 -0.21863820086731633 \\ -0.6321438033516535 -0.21929824561403508 \\ -0.6265664160401002 -0.224740658340804 \\ -0.6257336677141606 -0.22556390977443608 \\ -0.6203007518796992 -0.23103074206981533 \\ -0.6195170203570289 -0.23182957393483708 \\ -0.6140350877192983 -0.2375186824093526 \\ -0.6134865375322004 -0.23809523809523808 \\ -0.6077694235588973 -0.24421546723157517 \\ -0.6076352605023475 -0.24436090225563908 \\ -0.6019604340799675 -0.2506265664160401 \\ -0.6015037593984962 -0.251140391238567 \\ -0.5964543386471509 -0.2568922305764411 \\ -0.5952380952380952 -0.25830469754782104 \\ -0.5911097275420195 -0.2631578947368421 \\ -0.5889724310776943 -0.2657204446599601 \\ -0.5859208038539582 -0.2694235588972431 \\ -0.5827067669172933 -0.27340294913070246 \\ -0.580882044852301 -0.2756892230576441 \\ -0.5764411027568922 -0.2813687973129038 \\ -0.5759881859656986 -0.2819548872180451 \\ -0.5712426751644253 -0.2882205513784461 \\ -0.5701754385964912 -0.28965947900997113 \\ -0.566636705660539 -0.29448621553884713 \\ -0.5639097744360902 -0.29828746374390414 \\ -0.5621619735557849 -0.3007518796992481 \\ -0.55781537306534 -0.30701754385964913 \\ -0.5576441102756893 -0.30726970951966387 \\ -0.553605724200357 -0.3132832080200501 \\ -0.5513784461152882 -0.3166774756270315 \\ -0.5495151593985586 -0.31954887218045114 \\ -0.5455430022136956 -0.3258145363408521 \\ -0.5451127819548872 -0.32650861666138353 \\ -0.5416970184051734 -0.33208020050125314 \\ -0.5388471177944862 -0.33684519023698145 \\ -0.537959279047696 -0.3383458646616541 \\ -0.5343392254998046 -0.34461152882205515 \\ -0.5325814536340853 -0.3477310594661525 \\ -0.5308275865345431 -0.3508771929824561 \\ -0.5274225180199384 -0.35714285714285715 \\ -0.5263157894736842 -0.35923173198263475 \\ -0.5241261021213189 -0.3634085213032581 \\ -0.5209286932140283 -0.36967418546365916 \\ -0.5200501253132832 -0.37144159516939546 \\ -0.5178372560182888 -0.37593984962406013 \\ -0.5148405363298834 -0.38220551378446116 \\ -0.5137844611528822 -0.38447539952626647 \\ -0.5119444223247439 -0.38847117794486213 \\ -0.5091417544794462 -0.39473684210526316 \\ -0.5075187969924813 -0.39847406315341477 \\ -0.5064318564841852 -0.40100250626566414 \\ -0.5038169157457824 -0.40726817042606517 \\ -0.5012848547360013 -0.41353383458646614 \\ -0.5012531328320802 -0.4136147113557332 \\ -0.4988513930538256 -0.4197994987468672 \\ -0.4964988249092736 -0.42606516290726815 \\ -0.4949874686716792 -0.43022764998204877 \\ -0.4942313123756841 -0.4323308270676692 \\ -0.4920525741663907 -0.43859649122807015 \\ -0.48995168212955437 -0.4448621553884712 \\ -0.48872180451127817 -0.44866537739464163 \\ -0.4879331886798256 -0.45112781954887216 \\ -0.4859987211728585 -0.4573934837092732 \\ -0.4841394192894439 -0.46365914786967416 \\ -0.4824561403508772 -0.46956736942124644 \\ -0.4823552707680453 -0.4699248120300752 \\ -0.48065541494110103 -0.47619047619047616 \\ -0.4790281836104676 -0.4824561403508772 \\ -0.47747293467874813 -0.48872180451127817 \\ -0.47619047619047616 -0.49413472641628353 \\ -0.47599032450340906 -0.4949874686716792 \\ -0.4745861701397491 -0.5012531328320802 \\ -0.47325161770501123 -0.5075187969924813 \\ -0.4719860628588274 -0.5137844611528822 \\ -0.47078890823713543 -0.5200501253132832 \\ -0.4699248120300752 -0.5248409287373553 \\ -0.46966122817709804 -0.5263157894736842 \\ -0.4686057519186115 -0.5325814536340853 \\ -0.46761647093983816 -0.5388471177944862 \\ -0.46669282220438374 -0.5451127819548872 \\ -0.46583424904315024 -0.5513784461152882 \\ -0.4650402010645814 -0.5576441102756893 \\ -0.4643101340664218 -0.5639097744360902 \\ -0.46365914786967416 -0.5700280527825036 \\ -0.46364360558529805 -0.5701754385964912 \\ -0.46304357385209993 -0.5764411027568922 \\ -0.4625055269562489 -0.5827067669172933 \\ -0.4620289510694049 -0.5889724310776943 \\ -0.46161333802680693 -0.5952380952380952 \\ -0.46125818524945594 -0.6015037593984962 \\ -0.46096299566757604 -0.6077694235588973 \\ -0.4607272776453317 -0.6140350877192983 \\ -0.4605505449067755 -0.6203007518796992 \\ -0.4604323164630038 -0.6265664160401002 \\ -0.46037211654049737 -0.6328320802005013 \\ -0.4603694745106239 -0.6390977443609023 \\ -0.46042392482028127 -0.6453634085213033 \\ -0.46053500692366 -0.6516290726817042 \\ -0.46070226521510316 -0.6578947368421053 \\ -0.4609252489630437 -0.6641604010025063 \\ -0.4612035122449998 -0.6704260651629073 \\ -0.46153661388360656 -0.6766917293233082 \\ -0.4619241173836676 -0.6829573934837093 \\ -0.46236559087020435 -0.6892230576441103 \\ -0.4628606070274879 -0.6954887218045113 \\ -0.46340874303903257 -0.7017543859649122 \\ -0.46365914786967416 -0.7043616592515248 \\ -0.46401161157953463 -0.7080200501253133 \\ -0.46466850664107456 -0.7142857142857143 \\ -0.4653775588069267 -0.7205513784461153 \\ -0.4661383581499245 -0.7268170426065163 \\ -0.4669504990274233 -0.7330827067669173 \\ -0.4678135800254698 -0.7393483709273183 \\ -0.4687272039038414 -0.7456140350877193 \\ -0.46969097754194145 -0.7518796992481203 \\ -0.4699248120300752 -0.7533223854309319 \\ -0.47070896267461276 -0.7581453634085213 \\ -0.47177791284735293 -0.7644110275689223 \\ -0.4728961045827562 -0.7706766917293233 \\ -0.474063156665897 -0.7769423558897243 \\ -0.4752786918067392 -0.7832080200501254 \\ -0.47619047619047616 -0.7877257794965851 \\ -0.47654433058068535 -0.7894736842105263 \\ -0.477863122752767 -0.7957393483709273 \\ -0.4792295181990092 -0.8020050125313283 \\ -0.4806431510362091 -0.8082706766917294 \\ -0.4821036591211977 -0.8145363408521303 \\ -0.4824561403508772 -0.8159989667696246 \\ -0.4836171797509535 -0.8208020050125313 \\ -0.4851790662869966 -0.8270676691729323 \\ -0.48678697566427204 -0.8333333333333334 \\ -0.48844055667099157 -0.8395989974937343 \\ -0.48872180451127817 -0.840634059442589 \\ -0.49014740101381077 -0.8458646616541353 \\ -0.49190100713387713 -0.8521303258145363 \\ -0.4936994518246415 -0.8583959899749374 \\ -0.4949874686716792 -0.8627716643597634 \\ -0.4955454958749239 -0.8646616541353384 \\ -0.49744311139870695 -0.8709273182957393 \\ -0.4993847443819487 -0.8771929824561403 \\ -0.5012531328320802 -0.8830887409489179 \\ -0.5013707132902875 -0.8834586466165414 \\ -0.5034106678934995 -0.8897243107769424 \\ -0.5054938306334483 -0.8959899749373433 \\ -0.5075187969924813 -0.901957036002247 \\ -0.5076204372193893 -0.9022556390977443 \\ -0.5098010773994472 -0.9085213032581454 \\ -0.5120241280253146 -0.9147869674185464 \\ -0.5137844611528822 -0.9196535335363593 \\ -0.5142920729418254 -0.9210526315789473 \\ -0.5166117618272853 -0.9273182957393483 \\ -0.5189730749449292 -0.9335839598997494 \\ -0.5200501253132832 -0.9363888410225691 \\ -0.52138304684055 -0.9398496240601504 \\ -0.5238401640030199 -0.9461152882205514 \\ -0.5263157894736842 -0.9523247743240727 \\ -0.5263382544153111 -0.9523809523809523 \\ -0.5288908061877631 -0.9586466165413534 \\ -0.5314837474562577 -0.9649122807017544 \\ -0.5325814536340853 -0.9675206058355658 \\ -0.5341252721538456 -0.9711779448621554 \\ -0.5368128189334007 -0.9774436090225563 \\ -0.5388471177944862 -0.9821144066266142 \\ -0.5395438330883344 -0.9837092731829574 \\ -0.5423256149017184 -0.9899749373433584 \\ -0.5451127819548872 -0.9961653601032794 \\ -0.5451467607119856 -0.9962406015037594 \\ -0.5480224058523863 -1.0025062656641603 \\ -0.5509367754796757 -1.0087719298245614 \\ -0.5513784461152882 -1.0097071299739249 \\ -0.5539034628078201 -1.0150375939849625 \\ -0.5569108719589213 -1.0213032581453634 \\ -0.5576441102756893 -1.0228089666731135 \\ -0.559969057322981 -1.0275689223057645 \\ -0.5630691332600929 -1.0338345864661653 \\ -0.5639097744360902 -1.035510221751751 \\ -0.5662194614866837 -1.0401002506265664 \\ -0.5694118302394587 -1.0463659147869675 \\ -0.5701754385964912 -1.0478445945789754 \\ -0.5726549479229096 -1.0526315789473684 \\ -0.5759392342846881 -1.0588972431077694 \\ -0.5764411027568922 -1.0598421076360436 \\ -0.579275789792123 -1.0651629072681705 \\ -0.5826516173161558 -1.0714285714285714 \\ -0.5827067669172933 -1.0715295946754397 \\ -0.586082260792591 -1.0776942355889725 \\ -0.5889724310776943 -1.0829152561793314 \\ -0.5895524586010322 -1.0839598997493733 \\ -0.5930746351617079 -1.0902255639097744 \\ -0.5952380952380952 -1.0940319045553617 \\ -0.5966401661631949 -1.0964912280701755 \\ -0.6002531876773239 -1.1027568922305764 \\ -0.6015037593984962 -1.1049011579615393 \\ -0.6039147101353658 -1.1090225563909775 \\ -0.6076181936590764 -1.1152882205513786 \\ -0.6077694235588973 -1.115541085895262 \\ -0.6113763676480832 -1.1215538847117794 \\ -0.6140350877192983 -1.125941850025275 \\ -0.6151762666445423 -1.1278195488721805 \\ -0.6190254163802392 -1.1340852130325814 \\ -0.6203007518796992 -1.1361394768737363 \\ -0.6229232415153308 -1.1403508771929824 \\ -0.6265664160401002 -1.1461450295179998 \\ -0.6268637919013135 -1.1466165413533835 \\ -0.6308585541941629 -1.1528822055137844 \\ -0.6328320802005013 -1.155947908448991 \\ -0.6348983222906439 -1.1591478696741855 \\ -0.6389824853054048 -1.1654135338345866 \\ -0.6390977443609023 -1.1655885043432235 \\ -0.6431221460317886 -1.1716791979949874 \\ -0.6453634085213033 -1.1750402883936908 \\ -0.6473061873082817 -1.1779448621553885 \\ -0.6515355461752973 -1.1842105263157894 \\ -0.6516290726817042 -1.184347685770763 \\ -0.6558207955485047 -1.1904761904761905 \\ -0.6578947368421053 -1.1934815836666983 \\ -0.6601514793752492 -1.1967418546365916 \\ -0.6641604010025063 -1.2024827030716467 \\ -0.6645280180019987 -1.2030075187969924 \\ -0.6689591722819741 -1.2092731829573935 \\ -0.6704260651629073 -1.2113291327165105 \\ -0.6734389079461255 -1.2155388471177946 \\ -0.6766917293233082 -1.2200451975714772 \\ -0.6779655861137367 -1.2218045112781954 \\ -0.6825420128349067 -1.2280701754385965 \\ -0.6829573934837093 -1.228633733237623 \\ -0.6871732503232795 -1.2343358395989974 \\ -0.6892230576441103 -1.23708535709887 \\ -0.6918525410982601 -1.2406015037593985 \\ -0.6954887218045113 -1.2454239309708783 \\ -0.696580334342633 -1.2468671679197996 \\ -0.7013593528831171 -1.2531328320802004 \\ -0.7017543859649122 -1.253646281313767 \\ -0.7061937713420882 -1.2593984962406015 \\ -0.7080200501253133 -1.2617459713021235 \\ -0.7110778389191442 -1.2656641604010026 \\ -0.7142857142857143 -1.2697421366055546 \\ -0.7160120228635155 -1.2719298245614035 \\ -0.7205513784461153 -1.2776372158274656 \\ -0.7209967963059433 -1.2781954887218046 \\ -0.7260371606468393 -1.2844611528822054 \\ -0.7268170426065163 -1.285422928700093 \\ -0.7311313851204301 -1.2907268170426065 \\ -0.7330827067669173 -1.2931073543558735 \\ -0.7362773958972686 -1.2969924812030076 \\ -0.7393483709273183 -1.3006988160813144 \\ -0.7414756852241282 -1.3032581453634084 \\ -0.7456140350877193 -1.3081994143174493 \\ -0.7467267516062477 -1.3095238095238095 \\ -0.7518796992481203 -1.3156111867014053 \\ -0.7520310999071103 -1.3157894736842106 \\ -0.7573936584044698 -1.3220551378446115 \\ -0.7581453634085213 -1.322926922899574 \\ -0.7628111251805805 -1.3283208020050126 \\ -0.7644110275689223 -1.3301568967467199 \\ -0.7682831373596624 -1.3345864661654134 \\ -0.7706766917293233 -1.337304802382363 \\ -0.773810220345092 -1.3408521303258145 \\ -0.7769423558897243 -1.3443724085419253 \\ -0.7793929063098445 -1.3471177944862156 \\ -0.7832080200501254 -1.3513614334969424 \\ -0.7850317343058765 -1.3533834586466165 \\ -0.7894736842105263 -1.3582735468420521 \\ -0.7907272503756351 -1.3596491228070176 \\ -0.7957393483709273 -1.3651103712065824 \\ -0.7964800076657431 -1.3659147869674186 \\ -0.8020050125313283 -1.3718734838944222 \\ -0.8022905665429104 -1.3721804511278195 \\ -0.8081601593316279 -1.3784461152882206 \\ -0.8082706766917294 -1.3785632645089905 \\ -0.8140900599569441 -1.3847117794486214 \\ -0.8145363408521303 -1.3851800850456486 \\ -0.8200791428987952 -1.3909774436090225 \\ -0.8208020050125313 -1.3917283818841923 \\ -0.8261279916450276 -1.3972431077694236 \\ -0.8270676691729323 -1.398209538237995 \\ -0.8322371973790432 -1.4035087719298245 \\ -0.8333333333333334 -1.4046249003406885 \\ -0.8384073591070907 -1.4097744360902256 \\ -0.8395989974937343 -1.4109757786737585 \\ -0.8446390837880877 -1.4160401002506267 \\ -0.8458646616541353 -1.4172634491455613 \\ -0.8509329864660409 -1.4223057644110275 \\ -0.8521303258145363 -1.4234891542239891 \\ -0.857289690405119 -1.4285714285714286 \\ -0.8583959899749374 -1.4296541040248958 \\ -0.8637098272274419 -1.4348370927318295 \\ -0.8646616541353384 -1.4357594773582856 \\ -0.8701940370536532 -1.4411027568922306 \\ -0.8709273182957393 -1.4418064227341658 \\ -0.8767429686463362 -1.4473684210526316 \\ -0.8771929824561403 -1.4477960593298655 \\ -0.8833572795563474 -1.4536340852130325 \\ -0.8834586466165414 -1.4537294779205319 \\ -0.8897243107769424 -1.459605103300931 \\ -0.8900396042803111 -1.4598997493734336 \\ -0.8959899749373433 -1.4654253015386973 \\ -0.8967897232852252 -1.4661654135338347 \\ -0.9022556390977443 -1.4711919682325942 \\ -0.9036076957335903 -1.4724310776942355 \\ -0.9085213032581454 -1.476906105857794 \\ -0.910494224894471 -1.4786967418546366 \\ -0.9147869674185464 -1.482568692013862 \\ -0.9174500237893614 -1.4849624060150375 \\ -0.9210526315789473 -1.4881806801915873 \\ -0.9244758153618262 -1.4912280701754386 \\ -0.9273182957393483 -1.4937430005116163 \\ -0.9315723326506883 -1.4974937343358397 \\ -0.9335839598997494 -1.499256560436097 \\ -0.9387403189668626 -1.5037593984962405 \\ -0.9398496240601504 -1.5047222454544784 \\ -0.9459805280739171 -1.5100250626566416 \\ -0.9461152882205514 -1.5101409197445477 \\ -0.9523809523809523 -1.5155067668991662 \\ -0.9532996781456325 -1.5162907268170427 \\ -0.9586466165413534 -1.520825977249909 \\ -0.9606939980542374 -1.5225563909774436 \\ -0.9649122807017544 -1.5261003655311902 \\ -0.968163409016349 -1.5288220551378446 \\ -0.9711779448621554 -1.5313307284488842 \\ -0.9757087195875576 -1.5350877192982457 \\ -0.9774436090225563 -1.5365178439383027 \\ -0.983330749973614 -1.5413533834586466 \\ -0.9837092731829574 -1.5416624717137561 \\ -0.9899749373433584 -1.546758235975851 \\ -0.9910373875936935 -1.5476190476190477 \\ -0.9962406015037594 -1.5518101604070818 \\ -0.9988255384572747 -1.5538847117794485 \\ -1.0025062656641603 -1.5568215552443785 \\ -1.0066935500587542 -1.5601503759398496 \\ -1.0087719298245614 -1.5617931218643224 \\ -1.014642303944446 -1.5664160401002507 \\ -1.0150375939849625 -1.56672554555294 \\ -1.0213032581453634 -1.57161082187845 \\ -1.0226820481046672 -1.5726817042606516 \\ -1.0275689223057645 -1.5764555584934972 \\ -1.0308076826197252 -1.5789473684210527 \\ -1.0338345864661653 -1.5812629453808267 \\ -1.0390174526651252 -1.5852130325814537 \\ -1.0401002506265664 -1.586033615812215 \\ -1.0463659147869675 -1.5907624715451596 \\ -1.0473188897219492 -1.5914786967418546 \\ -1.0526315789473684 -1.5954490496517202 \\ -1.0557145853852066 -1.5977443609022557 \\ -1.0588972431077694 -1.6001005896643867 \\ -1.0641980299762412 -1.6040100250626566 \\ -1.0651629072681705 -1.604717677123971 \\ -1.0714285714285714 -1.609293153570313 \\ -1.0727797373380825 -1.6102756892230576 \\ -1.0776942355889725 -1.6138295519264132 \\ -1.0814588003352281 -1.6165413533834587 \\ -1.0839598997493733 -1.618333069850297 \\ -1.0902255639097744 -1.6228042272761984 \\ -1.0902294930278718 -1.6228070175438596 \\ -1.0964912280701755 -1.627229319320259 \\ -1.0991115861027174 -1.6290726817042607 \\ -1.1027568922305764 -1.6316230338736812 \\ -1.1080882699376933 -1.6353383458646618 \\ -1.1090225563909775 -1.6359858834289571 \\ -1.1152882205513786 -1.640308404576207 \\ -1.1171743941374437 -1.6416040100250626 \\ -1.1215538847117794 -1.6445959388620794 \\ -1.126365074090507 -1.6478696741854637 \\ -1.1278195488721805 -1.648854017638187 \\ -1.1340852130325814 -1.6530750294225498 \\ -1.1356663512841136 -1.6541353383458646 \\ -1.1403508771929824 -1.6572598699115453 \\ -1.1450794557302977 -1.6604010025062657 \\ -1.1466165413533835 -1.6614166032244564 \\ -1.1528822055137844 -1.6655371369148573 \\ -1.1546077928245209 -1.6666666666666667 \\ -1.1591478696741855 -1.6696226395387346 \\ -1.1642522352283007 -1.6729323308270676 \\ -1.1654135338345866 -1.6736813250892755 \\ -1.1716791979949874 -1.6777024031436683 \\ -1.1740203594457193 -1.6791979949874687 \\ -1.1779448621553885 -1.68169179907201 \\ -1.1839055751147487 -1.6854636591478698 \\ -1.1842105263157894 -1.6856556125645537 \\ -1.1904761904761905 -1.6895782483568584 \\ -1.1939271036976706 -1.6917293233082706 \\ -1.1967418546365916 -1.6934746494760364 \\ -1.2030075187969924 -1.6973418070797957 \\ -1.204071432973344 -1.6979949874686717 \\ -1.2092731829573935 -1.701171847530544 \\ -1.214352607041755 -1.7042606516290726 \\ -1.2155388471177946 -1.7049782516424696 \\ -1.2218045112781954 -1.7087483116967337 \\ -1.224773638431049 -1.7105263157894737 \\ -1.2280701754385965 -1.7124901404201593 \\ -1.2343358395989974 -1.7162051336221051 \\ -1.235331142568755 -1.7167919799498748 \\ -1.2406015037593985 -1.7198833930989252 \\ -1.246037170289732 -1.7230576441102756 \\ -1.2468671679197996 -1.7235398411212868 \\ -1.2531328320802004 -1.7271590736195666 \\ -1.2568975570297896 -1.7293233082706767 \\ -1.2593984962406015 -1.730753650996425 \\ -1.2656641604010026 -1.734318233238758 \\ -1.2679099538364846 -1.7355889724310778 \\ -1.2719298245614035 -1.7378519029894333 \\ -1.2781954887218046 -1.7413619107175107 \\ -1.2790803439337022 -1.7418546365914787 \\ -1.2844611528822054 -1.7448356220215773 \\ -1.2904176897845316 -1.7481203007518797 \\ -1.2907268170426065 -1.7482899011191908 \\ -1.2969924812030076 -1.7517058209293626 \\ -1.3019322170051986 -1.7543859649122806 \\ -1.3032581453634084 -1.7551017314403239 \\ -1.3095238095238095 -1.7584635006421203 \\ -1.3136233651219762 -1.7606516290726817 \\ -1.3157894736842106 -1.761801951608048 \\ -1.3220551378446115 -1.765109650356478 \\ -1.325498405911828 -1.7669172932330828 \\ -1.3283208020050126 -1.7683915378914015 \\ -1.3345864661654134 -1.771645247707765 \\ -1.3375650158946126 -1.7731829573934836 \\ -1.3408521303258145 -1.774871455206451 \\ -1.3471177944862156 -1.7780712589384275 \\ -1.349831304890155 -1.7794486215538847 \\ -1.3533834586466165 -1.7812426572808386 \\ -1.3596491228070176 -1.7843886390635015 \\ -1.362305847027755 -1.7857142857142858 \\ -1.3659147869674186 -1.7875060868154815 \\ -1.3721804511278195 -1.790598332033218 \\ -1.3749977144574512 -1.7919799498746867 \\ -1.3784461152882206 -1.7936626756434684 \\ -1.3847117794486214 -1.7967012708927825 \\ -1.3879165140416898 -1.7982456140350878 \\ -1.3909774436090225 -1.7997133448862253 \\ -1.3972431077694236 -1.802698377939401 \\ -1.4010724273394157 -1.8045112781954886 \\ -1.4035087719298245 -1.8056590051069863 \\ -1.4097744360902256 -1.8085905648765936 \\ -1.414476254232501 -1.8107769423558897 \\ -1.4160401002506267 -1.8115005564616422 \\ -1.4223057644110275 -1.8143787329658636 \\ -1.4281394605875664 -1.8170426065162908 \\ -1.4285714285714286 -1.8172388888470021 \\ -1.4348370927318295 -1.820063773175762 \\ -1.4411027568922306 -1.8228718434519884 \\ -1.4420841805333497 -1.8233082706766917 \\ -1.4473684210526316 -1.8256465663284118 \\ -1.4536340852130325 -1.828401264367394 \\ -1.4563211994813567 -1.8295739348370927 \\ -1.4598997493734336 -1.8311279832435305 \\ -1.4661654135338347 -1.8338294035441962 \\ -1.4708602726720446 -1.8358395989974938 \\ -1.4724310776942355 -1.8365088848800128 \\ -1.4786967418546366 -1.839157122908445 \\ -1.4849624060150375 -1.8417879272330222 \\ -1.4857246441310004 -1.8421052631578947 \\ -1.4912280701754386 -1.8443852746336589 \\ -1.4974937343358397 -1.8469628242341012 \\ -1.5009435106132862 -1.8483709273182958 \\ -1.5037593984962405 -1.8495147012783706 \\ -1.5100250626566416 -1.8520390948166268 \\ -1.5162907268170427 -1.8545456087849592 \\ -1.5165202594536733 -1.8546365914786966 \\ -1.5225563909774436 -1.857017572928132 \\ -1.5288220551378446 -1.8594708634981056 \\ -1.5325081788127837 -1.8609022556390977 \\ -1.5350877192982457 -1.8618990831405169 \\ -1.5413533834586466 -1.8642992520327422 \\ -1.5476190476190477 -1.8666810971406715 \\ -1.548911770656935 -1.8671679197994988 \\ -1.5538847117794485 -1.869031590520246 \\ -1.5601503759398496 -1.871360251609159 \\ -1.5657751335625993 -1.8734335839598997 \\ -1.5664160401002507 -1.8736686859494152 \\ -1.5726817042606516 -1.8759442676656746 \\ -1.5789473684210527 -1.8782010830890972 \\ -1.583144691206474 -1.8796992481203008 \\ -1.5852130325814537 -1.880433944025174 \\ -1.5914786967418546 -1.8826376226116546 \\ -1.5977443609022557 -1.884822256325146 \\ -1.60105375307303 -1.8859649122807018 \\ -1.6040100250626566 -1.8869807196798747 \\ -1.6102756892230576 -1.8891121618287952 \\ -1.6165413533834587 -1.8912242795968583 \\ -1.619557488344845 -1.8922305764411027 \\ -1.6228070175438596 -1.893309516035136 \\ -1.6290726817042607 -1.8953683902294796 \\ -1.6353383458646618 -1.8974076596168152 \\ -1.6387188537156463 -1.8984962406015038 \\ -1.6416040100250626 -1.8994208346689776 \\ -1.6478696741854637 -1.9014068111738538 \\ -1.6541353383458646 -1.903372901536657 \\ -1.6586100177304022 -1.9047619047619047 \\ -1.6604010025062657 -1.9053151756217315 \\ -1.6666666666666667 -1.9072279264757843 \\ -1.6729323308270676 -1.90912050895309 \\ -1.6791979949874687 -1.9109927998781382 \\ -1.6793158685879936 -1.9110275689223057 \\ -1.6854636591478698 -1.912832236408795 \\ -1.6917293233082706 -1.9146509839138661 \\ -1.6979949874686717 -1.9164491551087321 \\ -1.7009735848223353 -1.9172932330827068 \\ -1.7042606516290726 -1.9182202397119694 \\ -1.7105263157894737 -1.9199648269237324 \\ -1.7167919799498748 -1.9216885521832936 \\ -1.7230576441102756 -1.92339128801631 \\ -1.7236832990079656 -1.9235588972431077 \\ -1.7293233082706767 -1.9250625369503545 \\ -1.7355889724310778 -1.9267114918358084 \\ -1.7418546365914787 -1.928339169126744 \\ -1.747649675779715 -1.9298245614035088 \\ -1.7481203007518797 -1.9299446114302123 \\ -1.7543859649122806 -1.9315184732605388 \\ -1.7606516290726817 -1.9330707684598796 \\ -1.7669172932330828 -1.934601365182933 \\ -1.7731004615364672 -1.9360902255639099 \\ -1.7731829573934836 -1.936109994117936 \\ -1.7794486215538847 -1.9375865854345706 \\ -1.7857142857142858 -1.9390411867098802 \\ -1.7919799498746867 -1.9404736633396902 \\ -1.7982456140350878 -1.9418838796190039 \\ -1.8003795033231278 -1.9423558897243107 \\ -1.8045112781954886 -1.9432654028666347 \\ -1.8107769423558897 -1.9446212690519535 \\ -1.8170426065162908 -1.9459545790866768 \\ -1.8233082706766917 -1.9472651944431558 \\ -1.8295739348370927 -1.9485529754570665 \\ -1.8299143227833734 -1.9486215538847118 \\ -1.8358395989974938 -1.949809530457227 \\ -1.8421052631578947 -1.9510426203677456 \\ -1.8483709273182958 -1.9522525758471967 \\ -1.8546365914786966 -1.953439254343331 \\ -1.8609022556390977 -1.9546025121302957 \\ -1.862469898488657 -1.9548872180451127 \\ -1.8671679197994988 -1.9557362875329334 \\ -1.8734335839598997 -1.956844517052869 \\ -1.8796992481203008 -1.9579290214326996 \\ -1.8859649122807018 -1.9589896539918414 \\ -1.8922305764411027 -1.9600262668380783 \\ -1.8984962406015038 -1.961038710855024 \\ -1.8992215245012056 -1.9611528822055138 \\ -1.9047619047619047 -1.9620207550214819 \\ -1.9110275689223057 -1.962977677914316 \\ -1.9172932330827068 -1.9639101213508 \\ -1.9235588972431077 -1.9648179331648055 \\ -1.9298245614035088 -1.9657009599264192 \\ -1.9360902255639099 -1.9665590469287921 \\ -1.9423558897243107 -1.9673920381748307 \\ -1.942561930537203 -1.9674185463659148 \\ -1.9486215538847118 -1.9681942999971085 \\ -1.9548872180451127 -1.9689711196707353 \\ -1.9611528822055138 -1.9697225247765575 \\ -1.9674185463659148 -1.9704483561414063 \\ -1.9736842105263157 -1.971148453260115 \\ -1.9799498746867168 -1.9718226542815573 \\ -1.9862155388471179 -1.9724707959945094 \\ -1.9924812030075187 -1.9730927138133323 \\ -1.9987045424345744 -1.9736842105263157 \\ -1.9987468671679198 -1.9736882132312332 \\ -2.0050125313283207 -1.9742531483671542 \\ -2.011278195488722 -1.9747915304383652 \\ -2.017543859649123 -1.9753031915211379 \\ -2.0238095238095237 -1.9757879622728027 \\ -2.030075187969925 -1.9762456719167287 \\ -2.036340852130326 -1.9766761482271131 \\ -2.0426065162907268 -1.9770792175135772 \\ -2.0488721804511276 -1.9774547046055644 \\ -2.055137844611529 -1.9778024328365411 \\ -2.06140350877193 -1.9781222240279903 \\ -2.0676691729323307 -1.978413898473203 \\ -2.073934837092732 -1.978677274920859 \\ -2.080200501253133 -1.9789121705583967 \\ -2.0864661654135337 -1.979118400995165 \\ -2.092731829573935 -1.9792957802453603 \\ -2.098997493734336 -1.979444120710739 \\ -2.1052631578947367 -1.9795632331631037 \\ -2.111528822055138 -1.9796529267265655 \\ -2.117794486215539 -1.9797130088595674 \\ -2.1240601503759398 -1.979743285336679 \\ -2.130325814536341 -1.9797435602301445 \\ -2.136591478696742 -1.9797136358911942 \\ -2.142857142857143 -1.9796533129311058 \\ -2.1491228070175437 -1.9795623902020134 \\ -2.155388471177945 -1.9794406647774676 \\ -2.161654135338346 -1.9792879319327312 \\ -2.1679197994987467 -1.9791039851248158 \\ -2.174185463659148 -1.9788886159722516 \\ -2.180451127819549 -1.9786416142345848 \\ -2.1867167919799497 -1.9783627677916016 \\ -2.192982456140351 -1.9780518626222725 \\ -2.199248120300752 -1.9777086827834136 \\ -2.2055137844611528 -1.9773330103880578 \\ -2.211779448621554 -1.976924625583535 \\ -2.218045112781955 -1.9764833065292557 \\ -2.224310776942356 -1.97600882937419 \\ -2.230576441102757 -1.9755009682340414 \\ -2.236842105263158 -1.9749594951681104 \\ -2.243107769423559 -1.9743841801558382 \\ -2.2493734335839597 -1.9737747910730297 \\ -2.2502568680846737 -1.9736842105263157 \\ -2.255639097744361 -1.9731267775846628 \\ -2.261904761904762 -1.972443220749741 \\ -2.2681704260651627 -1.971724583264197 \\ -2.274436090225564 -1.9709706203784305 \\ -2.280701754385965 -1.9701810850607862 \\ -2.2869674185463658 -1.9693557279708924 \\ -2.293233082706767 -1.9684942974326285 \\ -2.299498746867168 -1.96759653940671 \\ -2.3006946344009527 -1.9674185463659148 \\ -2.305764411027569 -1.9666561362358086 \\ -2.31203007518797 -1.965677135733296 \\ -2.318295739348371 -1.9646606951528305 \\ -2.324561403508772 -1.9636065461834915 \\ -2.330827067669173 -1.9625144179444518 \\ -2.337092731829574 -1.9613840369541347 \\ -2.338334172591087 -1.9611528822055138 \\ -2.343358395989975 -1.9602074411480923 \\ -2.3496240601503757 -1.9589897822966345 \\ -2.355889724310777 -1.957732663922714 \\ -2.362155388471178 -1.9564357967625658 \\ -2.3684210526315788 -1.9550988887181786 \\ -2.3693860323848024 -1.9548872180451127 \\ -2.37468671679198 -1.953711893451564 \\ -2.380952380952381 -1.9522820999935315 \\ -2.387218045112782 -1.9508109685288593 \\ -2.393483709273183 -1.9492981896892934 \\ -2.3962144282742184 -1.9486215538847118 \\ -2.399749373433584 -1.9477359679439479 \\ -2.406015037593985 -1.9461252909710445 \\ -2.412280701754386 -1.9444715857025414 \\ -2.418546365914787 -1.9427745240816547 \\ -2.4200561151806013 -1.9423558897243107 \\ -2.424812030075188 -1.9410222941728255 \\ -2.431077694235589 -1.939221954748055 \\ -2.43734335839599 -1.9373767869430811 \\ -2.441611267429358 -1.9360902255639099 \\ -2.443609022556391 -1.9354811121351865 \\ -2.4498746867167918 -1.9335280900300535 \\ -2.456140350877193 -1.93152868641781 \\ -2.461360963674216 -1.9298245614035088 \\ -2.462406015037594 -1.9294794642125597 \\ -2.468671679197995 -1.92736734317084 \\ -2.474937343358396 -1.925207200285634 \\ -2.479616754589847 -1.9235588972431077 \\ -2.481203007518797 -1.922993533160314 \\ -2.487468671679198 -1.9207154995994524 \\ -2.493734335839599 -1.9183877099543312 \\ -2.4966224078894634 -1.9172932330827068 \\ -2.5 -1.9159978303451097 \\ } ; \addlegendentry {$f_2$} \addplot[color=mycolor5, draw opacity={1.0}, line width={1}, solid] table[row sep={\\}] { \\ -1.1175820282763838 -2.5 \\ -1.1159455478675744 -2.493734335839599 \\ -1.1152882205513786 -2.4911957923752968 \\ -1.1143279116942972 -2.487468671679198 \\ -1.1127261929427634 -2.481203007518797 \\ -1.1111340444067233 -2.474937343358396 \\ -1.1095516106513668 -2.468671679197995 \\ -1.1090225563909775 -2.4665566788042796 \\ -1.1079894952526983 -2.462406015037594 \\ -1.1064427249206927 -2.456140350877193 \\ -1.104906177826803 -2.4498746867167918 \\ -1.103380009971611 -2.443609022556391 \\ -1.1027568922305764 -2.4410252933979875 \\ -1.1018734140649702 -2.43734335839599 \\ -1.1003838410280367 -2.431077694235589 \\ -1.0989051806007244 -2.424812030075188 \\ -1.0974376010144256 -2.418546365914787 \\ -1.0964912280701755 -2.414467375427724 \\ -1.095986488230924 -2.412280701754386 \\ -1.0945564657497724 -2.406015037593985 \\ -1.0931380838035043 -2.399749373433584 \\ -1.0917315237088623 -2.393483709273183 \\ -1.0903369705679076 -2.387218045112782 \\ -1.0902255639097744 -2.3867106491651118 \\ -1.0889678111922667 -2.380952380952381 \\ -1.087612212083227 -2.37468671679198 \\ -1.0862692176662 -2.3684210526315788 \\ -1.0849390273825155 -2.362155388471178 \\ -1.0839598997493733 -2.357491493609626 \\ -1.083625392173592 -2.355889724310777 \\ -1.0823351991448595 -2.3496240601503757 \\ -1.0810584380697763 -2.343358395989975 \\ -1.0797953237450428 -2.337092731829574 \\ -1.07854607557179 -2.330827067669173 \\ -1.0776942355889725 -2.3264986860563295 \\ -1.077315004224497 -2.324561403508772 \\ -1.076107272586673 -2.318295739348371 \\ -1.074914078606392 -2.31203007518797 \\ -1.073735658540278 -2.305764411027569 \\ -1.0725722537921571 -2.299498746867168 \\ -1.0714285714285714 -2.2932572918737892 \\ -1.0714241593787521 -2.293233082706767 \\ -1.0703038690416877 -2.2869674185463658 \\ -1.069199312942529 -2.280701754385965 \\ -1.0681107510081143 -2.274436090225564 \\ -1.0670384489222426 -2.2681704260651627 \\ -1.0659826782859478 -2.261904761904762 \\ -1.0651629072681705 -2.2569551554421072 \\ -1.0649461441417916 -2.255639097744361 \\ -1.0639356686071173 -2.2493734335839597 \\ -1.0629425094299256 -2.243107769423559 \\ -1.0619669591162555 -2.236842105263158 \\ -1.0610093167974233 -2.230576441102757 \\ -1.0600698884186712 -2.224310776942356 \\ -1.0591489869342965 -2.218045112781955 \\ -1.0588972431077694 -2.2162892518697372 \\ -1.0582543392361556 -2.211779448621554 \\ -1.0573816150475939 -2.2055137844611528 \\ -1.056528292956605 -2.199248120300752 \\ -1.0556947101954757 -2.192982456140351 \\ -1.0548812118573854 -2.1867167919799497 \\ -1.0540881511267957 -2.180451127819549 \\ -1.0533158895179897 -2.174185463659148 \\ -1.0526315789473684 -2.168473903765059 \\ -1.05256558002744 -2.1679197994987467 \\ -1.051844643500781 -2.161654135338346 \\ -1.051145485025501 -2.155388471177945 \\ -1.0504684944235216 -2.1491228070175437 \\ -1.0498140708762345 -2.142857142857143 \\ -1.0491826232070949 -2.136591478696742 \\ -1.0485745701745133 -2.130325814536341 \\ -1.047990340775493 -2.1240601503759398 \\ -1.0474303745604683 -2.117794486215539 \\ -1.046895121959835 -2.111528822055138 \\ -1.04638504462268 -2.1052631578947367 \\ -1.0463659147869675 -2.1050142414665145 \\ -1.045906374419462 -2.098997493734336 \\ -1.0454538223468837 -2.092731829573935 \\ -1.0450276396249985 -2.0864661654135337 \\ -1.0446283250435422 -2.080200501253133 \\ -1.0442563900518878 -2.073934837092732 \\ -1.0439123591632502 -2.0676691729323307 \\ -1.0435967703744744 -2.06140350877193 \\ -1.0433101756021166 -2.055137844611529 \\ -1.0430531411355597 -2.0488721804511276 \\ -1.0428262481079453 -2.0426065162907268 \\ -1.0426300929857435 -2.036340852130326 \\ -1.0424652880778267 -2.030075187969925 \\ -1.0423324620649628 -2.0238095238095237 \\ -1.0422322605506862 -2.017543859649123 \\ -1.0421653466345608 -2.011278195488722 \\ -1.04213240150891 -2.0050125313283207 \\ -1.0421341250801304 -1.9987468671679198 \\ -1.0421712366157914 -1.9924812030075187 \\ -1.0422444754187725 -1.9862155388471179 \\ -1.0423546015297596 -1.9799498746867168 \\ -1.042502396459516 -1.9736842105263157 \\ -1.0426886639523967 -1.9674185463659148 \\ -1.0429142307826809 -1.9611528822055138 \\ -1.043179947585377 -1.9548872180451127 \\ -1.0434866897232558 -1.9486215538847118 \\ -1.0438353581919655 -1.9423558897243107 \\ -1.0442268805651973 -1.9360902255639099 \\ -1.044662211981982 -1.9298245614035088 \\ -1.0451423361783272 -1.9235588972431077 \\ -1.0456682665655357 -1.9172932330827068 \\ -1.0462410473576846 -1.9110275689223057 \\ -1.0463659147869675 -1.9097594101579642 \\ -1.0468694661033775 -1.9047619047619047 \\ -1.0475497674890841 -1.8984962406015038 \\ -1.048281208332035 -1.8922305764411027 \\ -1.049065010116082 -1.8859649122807018 \\ -1.0499024335380411 -1.8796992481203008 \\ -1.0507947800939779 -1.8734335839598997 \\ -1.0517433937431255 -1.8671679197994988 \\ -1.0526315789473684 -1.8616322206245526 \\ -1.0527516372978238 -1.8609022556390977 \\ -1.0538349779017093 -1.8546365914786966 \\ -1.054980226105285 -1.8483709273182958 \\ -1.0561889747284392 -1.8421052631578947 \\ -1.0574628717059367 -1.8358395989974938 \\ -1.0588036224920796 -1.8295739348370927 \\ -1.0588972431077694 -1.8291543053867156 \\ -1.0602365723322449 -1.8233082706766917 \\ -1.061743362980977 -1.8170426065162908 \\ -1.0633243072228955 -1.8107769423558897 \\ -1.0649814534998006 -1.8045112781954886 \\ -1.0651629072681705 -1.8038507201009661 \\ -1.0667463585843446 -1.7982456140350878 \\ -1.0685973666346777 -1.7919799498746867 \\ -1.0705334680114245 -1.7857142857142858 \\ -1.0714285714285714 -1.7829278948530916 \\ -1.072579587789647 -1.7794486215538847 \\ -1.0747361101673316 -1.7731829573934836 \\ -1.0769882379940139 -1.7669172932330828 \\ -1.0776942355889725 -1.7650220234994678 \\ -1.07937331737543 -1.7606516290726817 \\ -1.081877802744885 -1.7543859649122806 \\ -1.0839598997493733 -1.7493819003833575 \\ -1.08450201955522 -1.7481203007518797 \\ -1.0872865705834955 -1.7418546365914787 \\ -1.0901903487833364 -1.7355889724310778 \\ -1.0902255639097744 -1.7355152677406187 \\ -1.0932864865104555 -1.7293233082706767 \\ -1.0964912280701755 -1.7231042509736556 \\ -1.096516106899747 -1.7230576441102756 \\ -1.0999634316039757 -1.7167919799498748 \\ -1.1027568922305764 -1.7119094351724402 \\ -1.103578043118248 -1.7105263157894737 \\ -1.1074207690955922 -1.7042606516290726 \\ -1.1090225563909775 -1.701737109884753 \\ -1.1114920794887355 -1.6979949874686717 \\ -1.1152882205513786 -1.692444658118309 \\ -1.1157976899721513 -1.6917293233082706 \\ -1.120401026092548 -1.6854636591478698 \\ -1.1215538847117794 -1.6839445849871666 \\ -1.1253137103143867 -1.6791979949874687 \\ -1.1278195488721805 -1.6761368126963911 \\ -1.1305626510838773 -1.6729323308270676 \\ -1.1340852130325814 -1.668946204543357 \\ -1.1361961919941328 -1.6666666666666667 \\ -1.1403508771929824 -1.6623165182864112 \\ -1.1422724102476183 -1.6604010025062657 \\ -1.1466165413533835 -1.656198144031304 \\ -1.148861696234599 -1.6541353383458646 \\ -1.1528822055137844 -1.6505471545813086 \\ -1.156050199387939 -1.6478696741854637 \\ -1.1591478696741855 -1.6453245144149946 \\ -1.163944499836042 -1.6416040100250626 \\ -1.1654135338345866 -1.6404954172159645 \\ -1.1716791979949874 -1.6360370079182258 \\ -1.1727218557986587 -1.6353383458646618 \\ -1.1779448621553885 -1.6319294981990509 \\ -1.182630303967092 -1.6290726817042607 \\ -1.1842105263157894 -1.6281335960172028 \\ -1.1904761904761905 -1.6246466294225637 \\ -1.1940323195962748 -1.6228070175438596 \\ -1.1967418546365916 -1.6214395242637283 \\ -1.2030075187969924 -1.6185032385012652 \\ -1.2075511602415432 -1.6165413533834587 \\ -1.2092731829573935 -1.6158152721418062 \\ -1.2155388471177946 -1.613376403647771 \\ -1.2218045112781954 -1.6111570241821038 \\ -1.2245351463908147 -1.6102756892230576 \\ -1.2280701754385965 -1.6091602697221326 \\ -1.2343358395989974 -1.6073734703285076 \\ -1.2406015037593985 -1.6057803970005124 \\ -1.2468671679197996 -1.6043741418443282 \\ -1.2487125923568856 -1.6040100250626566 \\ -1.2531328320802004 -1.6031563058818303 \\ -1.2593984962406015 -1.6021140238427323 \\ -1.2656641604010026 -1.6012379285337635 \\ -1.2719298245614035 -1.6005224149187793 \\ -1.2781954887218046 -1.5999621272166693 \\ -1.2844611528822054 -1.5995519451982871 \\ -1.2907268170426065 -1.5992869713775595 \\ -1.2969924812030076 -1.5991625190294316 \\ -1.3032581453634084 -1.5991741009730416 \\ -1.3095238095238095 -1.5993174190637127 \\ -1.3157894736842106 -1.599588354342043 \\ -1.3220551378446115 -1.599982957792658 \\ -1.3283208020050126 -1.6004974416690458 \\ -1.3345864661654134 -1.6011281713444367 \\ -1.3408521303258145 -1.6018716576518794 \\ -1.3471177944862156 -1.6027245496795943 \\ -1.3533834586466165 -1.6036836279903421 \\ -1.3552958568523903 -1.6040100250626566 \\ -1.3596491228070176 -1.6047516777874655 \\ -1.3659147869674186 -1.605923105441023 \\ -1.3721804511278195 -1.6071923745519603 \\ -1.3784461152882206 -1.6085566931725468 \\ -1.3847117794486214 -1.6100133745800638 \\ -1.3857663287675308 -1.6102756892230576 \\ -1.3909774436090225 -1.6115696901099386 \\ -1.3972431077694236 -1.6132157662755422 \\ -1.4035087719298245 -1.6149471405264921 \\ -1.409008112500531 -1.6165413533834587 \\ -1.4097744360902256 -1.6167631277358052 \\ -1.4160401002506267 -1.6186722696010956 \\ -1.4223057644110275 -1.6206602624087265 \\ -1.4285714285714286 -1.6227249846124114 \\ -1.4288096235699173 -1.6228070175438596 \\ -1.4348370927318295 -1.6248794244901639 \\ -1.4411027568922306 -1.6271074275590653 \\ -1.4464518585927229 -1.6290726817042607 \\ -1.4473684210526316 -1.6294088772011248 \\ -1.4536340852130325 -1.6317940409360119 \\ -1.4598997493734336 -1.6342467692352478 \\ -1.4626020583169608 -1.6353383458646618 \\ -1.4661654135338347 -1.636775435174575 \\ -1.4724310776942355 -1.63937617992812 \\ -1.4776647547517687 -1.6416040100250626 \\ -1.4786967418546366 -1.642042603237449 \\ -1.4849624060150375 -1.6447859374637546 \\ -1.4912280701754386 -1.6475888532159786 \\ -1.491838420306731 -1.6478696741854637 \\ -1.4974937343358397 -1.6504676338086905 \\ -1.5037593984962405 -1.6534050560468905 \\ -1.5052787355695665 -1.6541353383458646 \\ -1.5100250626566416 -1.6564131938279134 \\ -1.5162907268170427 -1.6594802940162654 \\ -1.5181291463487998 -1.6604010025062657 \\ -1.5225563909774436 -1.6626148589708156 \\ -1.5288220551378446 -1.66580701309 \\ -1.5304733930285392 -1.6666666666666667 \\ -1.5350877192982457 -1.6690651719072298 \\ -1.5413533834586466 -1.6723779501292642 \\ -1.5423800821885347 -1.6729323308270676 \\ -1.5476190476190477 -1.6757569620496033 \\ -1.5538847117794485 -1.6791861190215578 \\ -1.5539059575993583 -1.6791979949874687 \\ -1.5601503759398496 -1.6826833319022556 \\ -1.5650609238357844 -1.6854636591478698 \\ -1.5664160401002507 -1.6862297862053617 \\ -1.5726817042606516 -1.689837644183113 \\ -1.5759166989510802 -1.6917293233082706 \\ -1.5789473684210527 -1.6934989301953691 \\ -1.5852130325814537 -1.6972135096674001 \\ -1.586509084145901 -1.6979949874686717 \\ -1.5914786967418546 -1.7009871636515255 \\ -1.5968452880290354 -1.7042606516290726 \\ -1.5977443609022557 -1.7048082703629375 \\ -1.6040100250626566 -1.7086884110789025 \\ -1.6069377398517188 -1.7105263157894737 \\ -1.6102756892230576 -1.7126187458537234 \\ -1.6165413533834587 -1.7165968195617727 \\ -1.6168438241663852 -1.7167919799498748 \\ -1.6228070175438596 -1.7206340901717125 \\ -1.6265239559743339 -1.7230576441102756 \\ -1.6290726817042607 -1.72471713161238 \\ -1.6353383458646618 -1.7288487331424833 \\ -1.636047633873288 -1.7293233082706767 \\ -1.6416040100250626 -1.733035779851246 \\ -1.6453820908282077 -1.7355889724310778 \\ -1.6478696741854637 -1.737267699043023 \\ -1.6541353383458646 -1.7415462208872632 \\ -1.6545807309671603 -1.7418546365914787 \\ -1.6604010025062657 -1.745879324240243 \\ -1.6636067004222679 -1.7481203007518797 \\ -1.6666666666666667 -1.7502564306275035 \\ -1.6725188126706814 -1.7543859649122806 \\ -1.6729323308270676 -1.7546773569315526 \\ -1.6791979949874687 -1.7591511473794115 \\ -1.6812766468220512 -1.7606516290726817 \\ -1.6854636591478698 -1.7636698823080148 \\ -1.6899223143531907 -1.7669172932330828 \\ -1.6917293233082706 -1.7682316006884464 \\ -1.6979949874686717 -1.772838223831909 \\ -1.698458224433249 -1.7731829573934836 \\ -1.7042606516290726 -1.7774951544584687 \\ -1.7068632847868253 -1.7794486215538847 \\ -1.7105263157894737 -1.7821942583168728 \\ -1.715176622045655 -1.7857142857142858 \\ -1.7167919799498748 -1.7869353636864516 \\ -1.7230576441102756 -1.7917198646015173 \\ -1.7233943840758221 -1.7919799498746867 \\ -1.7293233082706767 -1.7965530662907965 \\ -1.7314970280226833 -1.7982456140350878 \\ -1.7355889724310778 -1.8014274892862987 \\ -1.7395178506279372 -1.8045112781954886 \\ -1.7418546365914787 -1.8063429676440879 \\ -1.7474591163198563 -1.8107769423558897 \\ -1.7481203007518797 -1.8112993363097518 \\ -1.7543859649122806 -1.8163008376485827 \\ -1.755306183719336 -1.8170426065162908 \\ -1.7606516290726817 -1.8213457317932533 \\ -1.7630678522944565 -1.8233082706766917 \\ -1.7669172932330828 -1.8264307692052395 \\ -1.7707580423744693 -1.8295739348370927 \\ -1.7731829573934836 -1.8315557903017234 \\ -1.7783786283954357 -1.8358395989974938 \\ -1.7794486215538847 -1.8367206363446278 \\ -1.7857142857142858 -1.8419261999047027 \\ -1.7859277207738027 -1.8421052631578947 \\ -1.7919799498746867 -1.8471761722545692 \\ -1.793393869779969 -1.8483709273182958 \\ -1.7982456140350878 -1.8524652538372943 \\ -1.8007970822517358 -1.8546365914786966 \\ -1.8045112781954886 -1.8577932911012487 \\ -1.8081389263751764 -1.8609022556390977 \\ -1.8107769423558897 -1.8631601312975907 \\ -1.8154209186663122 -1.8671679197994988 \\ -1.8170426065162908 -1.8685656224750253 \\ -1.822644526081817 -1.8734335839598997 \\ -1.8233082706766917 -1.8740096134746067 \\ -1.8295739348370927 -1.8794931450161938 \\ -1.8298073844036502 -1.8796992481203008 \\ -1.8358395989974938 -1.885017959658934 \\ -1.8369050882157654 -1.8859649122807018 \\ -1.8421052631578947 -1.8905805962395656 \\ -1.8439495916211366 -1.8922305764411027 \\ -1.8483709273182958 -1.8961809084509669 \\ -1.8509421376199213 -1.8984962406015038 \\ -1.8546365914786966 -1.9018187507396598 \\ -1.8578839307887793 -1.9047619047619047 \\ -1.8609022556390977 -1.9074939783009641 \\ -1.864776138754166 -1.9110275689223057 \\ -1.8671679197994988 -1.9132064470741885 \\ -1.8716198935983523 -1.9172932330827068 \\ -1.8734335839598997 -1.9189560137378574 \\ -1.8784162932017283 -1.9235588972431077 \\ -1.8796992481203008 -1.9247425357049774 \\ -1.8851664025247312 -1.9298245614035088 \\ -1.8859649122807018 -1.9305658711183369 \\ -1.8918712548325491 -1.9360902255639099 \\ -1.8922305764411027 -1.9364258788458428 \\ -1.8984962406015038 -1.9423226070301076 \\ -1.89853133449198 -1.9423558897243107 \\ -1.9047619047619047 -1.948257410639521 \\ -1.9051435736403495 -1.9486215538847118 \\ -1.9110275689223057 -1.9542282590236724 \\ -1.9117141571036997 -1.9548872180451127 \\ -1.9172932330827068 -1.960235015471636 \\ -1.9182439749607703 -1.9611528822055138 \\ -1.9235588972431077 -1.96627754396297 \\ -1.9247338921112704 -1.9674185463659148 \\ -1.9298245614035088 -1.9723557091633654 \\ -1.931184749159973 -1.9736842105263157 \\ -1.9360902255639099 -1.978469376420322 \\ -1.9375973632638195 -1.9799498746867168 \\ -1.9423558897243107 -1.9846184117588614 \\ -1.9439725289438252 -1.9862155388471179 \\ -1.9486215538847118 -1.9908026818772722 \\ -1.9503110188634802 -1.9924812030075187 \\ -1.9548872180451127 -1.997022054142884 \\ -1.956613584575251 -1.9987468671679198 \\ -1.9611528822055138 -2.0032763965878773 \\ -1.9628809572366928 -2.0050125313283207 \\ -1.9674185463659148 -2.009565577905121 \\ -1.9691138482976105 -2.011278195488722 \\ -1.9736842105263157 -2.015889467444044 \\ -1.97531295015962 -2.017543859649123 \\ -1.9799498746867168 -2.022247935206537 \\ -1.9814789368094028 -2.0238095238095237 \\ -1.9862155388471179 -2.028640851842883 \\ -1.9876124644268574 -2.030075187969925 \\ -1.9924812030075187 -2.035068088647722 \\ -1.9937141719693123 -2.036340852130326 \\ -1.9987468671679198 -2.0415295175560417 \\ -1.9997846817328897 -2.0426065162907268 \\ -2.0050125313283207 -2.0480250111392015 \\ -2.005824599892049 -2.0488721804511276 \\ -2.011278195488722 -2.0545544426009843 \\ -2.011834517018305 -2.055137844611529 \\ -2.017543859649123 -2.061117685773677 \\ -2.01781500857904 -2.06140350877193 \\ -2.0237660834431472 -2.0676691729323307 \\ -2.0238095238095237 -2.0677148661469964 \\ -2.029685017822709 -2.073934837092732 \\ -2.030075187969925 -2.074347369298988 \\ -2.035575869654912 -2.080200501253133 \\ -2.036340852130326 -2.0810134886487184 \\ -2.0414391708061856 -2.0864661654135337 \\ -2.0426065162907268 -2.087713099156351 \\ -2.0472754397962456 -2.092731829573935 \\ -2.0488721804511276 -2.0944460764014203 \\ -2.0530851822141414 -2.098997493734336 \\ -2.055137844611529 -2.1012122965789963 \\ -2.0588688911188355 -2.1052631578947367 \\ -2.06140350877193 -2.1080116364958856 \\ -2.0646270474249815 -2.111528822055138 \\ -2.0676691729323307 -2.114843973566857 \\ -2.070360120274534 -2.117794486215539 \\ -2.073934837092732 -2.1217091858108965 \\ -2.0760685673948043 -2.1240601503759398 \\ -2.080200501253133 -2.1286071518474876 \\ -2.0817528354435177 -2.130325814536341 \\ -2.0864661654135337 -2.1355377508929227 \\ -2.0874133603414355 -2.136591478696742 \\ -2.092731829573935 -2.142500862756638 \\ -2.0930505675930524 -2.142857142857143 \\ -2.0986608747456454 -2.1491228070175437 \\ -2.098997493734336 -2.149498412233955 \\ -2.104244656124414 -2.155388471177945 \\ -2.1052631578947367 -2.156530354711584 \\ -2.109806169765934 -2.161654135338346 \\ -2.111528822055138 -2.1635946182649244 \\ -2.1153458100221063 -2.1679197994987467 \\ -2.117794486215539 -2.1706910837881144 \\ -2.1208639619877427 -2.174185463659148 \\ -2.1240601503759398 -2.177819632759795 \\ -2.1263610017706123 -2.180451127819549 \\ -2.130325814536341 -2.1849801472395303 \\ -2.131837296752086 -2.1867167919799497 \\ -2.136591478696742 -2.1921725098642466 \\ -2.137293205838762 -2.192982456140351 \\ -2.1427275940540245 -2.199248120300752 \\ -2.142857142857143 -2.199397413896784 \\ -2.148133985989532 -2.2055137844611528 \\ -2.1491228070175437 -2.2066585165806214 \\ -2.153520910562276 -2.211779448621554 \\ -2.155388471177945 -2.213951276948223 \\ -2.158888700737412 -2.218045112781955 \\ -2.161654135338346 -2.221275578704242 \\ -2.164237681929865 -2.224310776942356 \\ -2.1679197994987467 -2.2286313061222573 \\ -2.1695681722171334 -2.230576441102757 \\ -2.174185463659148 -2.236018344041294 \\ -2.174880482544931 -2.236842105263158 \\ -2.1801718029361776 -2.243107769423559 \\ -2.180451127819549 -2.243438368241933 \\ -2.185437590800804 -2.2493734335839597 \\ -2.1867167919799497 -2.2508941188808116 \\ -2.190686021363735 -2.255639097744361 \\ -2.192982456140351 -2.258380990359388 \\ -2.195917383271895 -2.261904761904762 \\ -2.199248120300752 -2.2658988685725516 \\ -2.2011319588073346 -2.2681704260651627 \\ -2.2055137844611528 -2.2734476399723347 \\ -2.206330024061712 -2.274436090225564 \\ -2.2115088987244986 -2.280701754385965 \\ -2.211779448621554 -2.281028962368545 \\ -2.2166627045675384 -2.2869674185463658 \\ -2.218045112781955 -2.288646486723193 \\ -2.221800752853976 -2.293233082706767 \\ -2.224310776942356 -2.2962947149212085 \\ -2.2269232999431905 -2.299498746867168 \\ -2.230576441102757 -2.303973534463554 \\ -2.232030596663617 -2.305764411027569 \\ -2.236842105263158 -2.3116828334001167 \\ -2.237122888461105 -2.31203007518797 \\ -2.242190627474774 -2.318295739348371 \\ -2.243107769423559 -2.3194286271660625 \\ -2.2472407733091613 -2.324561403508772 \\ -2.2493734335839597 -2.327206713231922 \\ -2.252276604224319 -2.330827067669173 \\ -2.255639097744361 -2.3350150896787785 \\ -2.2572983481648197 -2.337092731829574 \\ -2.261904761904762 -2.342853645598527 \\ -2.2623062282612025 -2.343358395989975 \\ -2.267291227679226 -2.3496240601503757 \\ -2.2681704260651627 -2.3507282454304885 \\ -2.272258511913808 -2.355889724310777 \\ -2.274436090225564 -2.358635695433376 \\ -2.2772125771743137 -2.362155388471178 \\ -2.280701754385965 -2.366573135738733 \\ -2.282153630654694 -2.3684210526315788 \\ -2.2869674185463658 -2.3745404564783406 \\ -2.287081875241334 -2.37468671679198 \\ -2.2919845987249134 -2.380952380952381 \\ -2.293233082706767 -2.3825461770538827 \\ -2.2968737219769326 -2.387218045112782 \\ -2.299498746867168 -2.3905824952430725 \\ -2.3017506395893506 -2.393483709273183 \\ -2.305764411027569 -2.3986485045468915 \\ -2.306615539920802 -2.399749373433584 \\ -2.3114628011895384 -2.406015037593985 \\ -2.31203007518797 -2.406748071722662 \\ -2.3162895938136296 -2.412280701754386 \\ -2.318295739348371 -2.414883259373407 \\ -2.321104943707715 -2.418546365914787 \\ -2.324561403508772 -2.423047948172147 \\ -2.325909025658913 -2.424812030075188 \\ -2.330700730904812 -2.431077694235589 \\ -2.330827067669173 -2.4312429275100182 \\ -2.3354676857485366 -2.43734335839599 \\ -2.337092731829574 -2.4394769742525706 \\ -2.340223920055876 -2.443609022556391 \\ -2.343358395989975 -2.447740331530824 \\ -2.344969595931715 -2.4498746867167918 \\ -2.3496240601503757 -2.4560328915301906 \\ -2.349704872229086 -2.456140350877193 \\ -2.3544151887061027 -2.462406015037594 \\ -2.355889724310777 -2.4643651969605838 \\ -2.359114649710168 -2.468671679197995 \\ -2.362155388471178 -2.472727209729576 \\ -2.3638042242561346 -2.474937343358396 \\ -2.3684210526315788 -2.4811182344773623 \\ -2.3684840596100103 -2.481203007518797 \\ -2.3731390065479245 -2.487468671679198 \\ -2.37468671679198 -2.4895494956318927 \\ -2.3777839324727914 -2.493734335839599 \\ -2.380952380952381 -2.498010148901868 \\ -2.382419608516015 -2.5 \\ } ; \addlegendentry {$f_3$} \end{axis} \end{tikzpicture} \begin{tikzpicture} \begin{axis}[width=3.3in, height=3in, at={(0.991in,0.82in)}, scale only axis, xmin=-1, xmax=1.3, xmajorticks=false, ymajorticks=false, zmajorticks=false, tick align=outside, ymin=-1, ymax=1, zmin=0, zmax=2.5, view={-25.8220916568743}{12.3411153284671}, axis background/.style={fill=white}, axis line style={draw=none} ] \addplot3 [color=mycolor1,very thick] table[row sep=crcr] {1 0 2\\ 0.992114701314478 0.125333233564304 2.00785420971784\\ 0.968583161128631 0.248689887164855 2.03092332998903\\ 0.929776485888251 0.368124552684678 2.06775784314465\\ 0.876306680043864 0.481753674101715 2.11604330125525\\ 0.809016994374947 0.587785252292473 2.17274575140626\\ 0.728968627421412 0.684547105928689 2.23430237011767\\ 0.63742398974869 0.770513242775789 2.29684532864643\\ 0.535826794978997 0.844327925502015 2.35644482289127\\ 0.425779291565073 0.90482705246602 2.40935599743717\\ 0.309016994374947 0.951056516295154 2.45225424859374\\ 0.187381314585725 0.982287250728689 2.48244412147206\\ 0.0627905195293135 0.998026728428272 2.49802867532862\\ -0.0627905195293134 0.998026728428272 2.49802867532862\\ -0.187381314585725 0.982287250728689 2.48244412147206\\ -0.309016994374947 0.951056516295154 2.45225424859374\\ -0.425779291565073 0.904827052466019 2.40935599743717\\ -0.535826794978997 0.844327925502015 2.35644482289127\\ -0.63742398974869 0.770513242775789 2.29684532864643\\ -0.728968627421411 0.684547105928689 2.23430237011767\\ -0.809016994374947 0.587785252292473 2.17274575140626\\ -0.876306680043863 0.481753674101716 2.11604330125525\\ -0.929776485888251 0.368124552684678 2.06775784314465\\ -0.968583161128631 0.248689887164855 2.03092332998903\\ -0.992114701314478 0.125333233564305 2.00785420971784\\ -1 1.22464679914735e-16 2\\ -0.992114701314478 -0.125333233564304 2.00785420971784\\ -0.968583161128631 -0.248689887164855 2.03092332998903\\ -0.929776485888251 -0.368124552684678 2.06775784314465\\ -0.876306680043864 -0.481753674101715 2.11604330125525\\ -0.809016994374947 -0.587785252292473 2.17274575140626\\ -0.728968627421412 -0.684547105928689 2.23430237011767\\ -0.63742398974869 -0.770513242775789 2.29684532864643\\ -0.535826794978997 -0.844327925502015 2.35644482289127\\ -0.425779291565073 -0.904827052466019 2.40935599743717\\ -0.309016994374948 -0.951056516295154 2.45225424859374\\ -0.187381314585725 -0.982287250728689 2.48244412147206\\ -0.0627905195293132 -0.998026728428272 2.49802867532862\\ 0.0627905195293128 -0.998026728428272 2.49802867532862\\ 0.187381314585724 -0.982287250728689 2.48244412147206\\ 0.309016994374947 -0.951056516295154 2.45225424859374\\ 0.425779291565073 -0.90482705246602 2.40935599743717\\ 0.535826794978996 -0.844327925502016 2.35644482289127\\ 0.637423989748689 -0.77051324277579 2.29684532864643\\ 0.728968627421411 -0.684547105928689 2.23430237011767\\ 0.809016994374947 -0.587785252292473 2.17274575140626\\ 0.876306680043864 -0.481753674101715 2.11604330125525\\ 0.929776485888251 -0.368124552684679 2.06775784314465\\ 0.968583161128631 -0.248689887164855 2.03092332998903\\ 0.992114701314478 -0.125333233564305 2.00785420971784\\ 1 -2.44929359829471e-16 2\\ }; \addplot3 [color=mycolor1, very thick] table[row sep=crcr] {1 0 1.7\\ 0.992114701314478 0.125333233564304 1.69371663222573\\ 0.968583161128631 0.248689887164855 1.67526133600877\\ 0.929776485888251 0.368124552684678 1.64579372548428\\ 0.876306680043864 0.481753674101715 1.6071653589958\\ 0.809016994374947 0.587785252292473 1.56180339887499\\ 0.728968627421412 0.684547105928689 1.51255810390586\\ 0.63742398974869 0.770513242775789 1.46252373708285\\ 0.535826794978997 0.844327925502015 1.41484414168699\\ 0.425779291565073 0.90482705246602 1.37251520205026\\ 0.309016994374947 0.951056516295154 1.33819660112501\\ 0.187381314585725 0.982287250728689 1.31404470282235\\ 0.0627905195293135 0.998026728428272 1.3015770597371\\ -0.0627905195293134 0.998026728428272 1.3015770597371\\ -0.187381314585725 0.982287250728689 1.31404470282235\\ -0.309016994374947 0.951056516295154 1.33819660112501\\ -0.425779291565073 0.904827052466019 1.37251520205026\\ -0.535826794978997 0.844327925502015 1.41484414168699\\ -0.63742398974869 0.770513242775789 1.46252373708285\\ -0.728968627421411 0.684547105928689 1.51255810390586\\ -0.809016994374947 0.587785252292473 1.56180339887499\\ -0.876306680043863 0.481753674101716 1.6071653589958\\ -0.929776485888251 0.368124552684678 1.64579372548428\\ -0.968583161128631 0.248689887164855 1.67526133600877\\ -0.992114701314478 0.125333233564305 1.69371663222573\\ -1 1.22464679914735e-16 1.7\\ -0.992114701314478 -0.125333233564304 1.69371663222573\\ -0.968583161128631 -0.248689887164855 1.67526133600877\\ -0.929776485888251 -0.368124552684678 1.64579372548428\\ -0.876306680043864 -0.481753674101715 1.6071653589958\\ -0.809016994374947 -0.587785252292473 1.56180339887499\\ -0.728968627421412 -0.684547105928689 1.51255810390586\\ -0.63742398974869 -0.770513242775789 1.46252373708285\\ -0.535826794978997 -0.844327925502015 1.41484414168699\\ -0.425779291565073 -0.904827052466019 1.37251520205026\\ -0.309016994374948 -0.951056516295154 1.33819660112501\\ -0.187381314585725 -0.982287250728689 1.31404470282235\\ -0.0627905195293132 -0.998026728428272 1.3015770597371\\ 0.0627905195293128 -0.998026728428272 1.3015770597371\\ 0.187381314585724 -0.982287250728689 1.31404470282235\\ 0.309016994374947 -0.951056516295154 1.33819660112501\\ 0.425779291565073 -0.90482705246602 1.37251520205026\\ 0.535826794978996 -0.844327925502016 1.41484414168699\\ 0.637423989748689 -0.77051324277579 1.46252373708285\\ 0.728968627421411 -0.684547105928689 1.51255810390586\\ 0.809016994374947 -0.587785252292473 1.56180339887499\\ 0.876306680043864 -0.481753674101715 1.6071653589958\\ 0.929776485888251 -0.368124552684679 1.64579372548428\\ 0.968583161128631 -0.248689887164855 1.67526133600877\\ 0.992114701314478 -0.125333233564305 1.69371663222573\\ 1 -2.44929359829471e-16 1.7\\ }; \addplot3 [color=mycolor1, very thick] table[row sep=crcr] {1 0 1\\ 0.992114701314478 0.125333233564304 1.06217247179121\\ 0.968583161128631 0.248689887164855 1.12043841852543\\ 0.929776485888251 0.368124552684678 1.17113677648217\\ 0.876306680043864 0.481753674101715 1.2110819813755\\ 0.809016994374947 0.587785252292473 1.23776412907379\\ 0.728968627421412 0.684547105928689 1.24950668210707\\ 0.63742398974869 0.770513242775789 1.24557181268217\\ 0.535826794978997 0.844327925502015 1.2262067631165\\ 0.425779291565073 0.90482705246602 1.19262831069395\\ 0.309016994374947 0.951056516295154 1.14694631307312\\ 0.187381314585725 0.982287250728689 1.09203113817117\\ 0.0627905195293135 0.998026728428272 1.03133330839108\\ -0.0627905195293134 0.998026728428272 0.968666691608924\\ -0.187381314585725 0.982287250728689 0.90796886182883\\ -0.309016994374947 0.951056516295154 0.853053686926882\\ -0.425779291565073 0.904827052466019 0.807371689306053\\ -0.535826794978997 0.844327925502015 0.773793236883495\\ -0.63742398974869 0.770513242775789 0.754428187317828\\ -0.728968627421411 0.684547105928689 0.750493317892932\\ -0.809016994374947 0.587785252292473 0.762235870926212\\ -0.876306680043863 0.481753674101716 0.788918018624496\\ -0.929776485888251 0.368124552684678 0.828863223517828\\ -0.968583161128631 0.248689887164855 0.879561581474571\\ -0.992114701314478 0.125333233564305 0.937827528208786\\ -1 1.22464679914735e-16 1\\ -0.992114701314478 -0.125333233564304 1.06217247179121\\ -0.968583161128631 -0.248689887164855 1.12043841852543\\ -0.929776485888251 -0.368124552684678 1.17113677648217\\ -0.876306680043864 -0.481753674101715 1.2110819813755\\ -0.809016994374947 -0.587785252292473 1.23776412907379\\ -0.728968627421412 -0.684547105928689 1.24950668210707\\ -0.63742398974869 -0.770513242775789 1.24557181268217\\ -0.535826794978997 -0.844327925502015 1.22620676311651\\ -0.425779291565073 -0.904827052466019 1.19262831069395\\ -0.309016994374948 -0.951056516295154 1.14694631307312\\ -0.187381314585725 -0.982287250728689 1.09203113817117\\ -0.0627905195293132 -0.998026728428272 1.03133330839108\\ 0.0627905195293128 -0.998026728428272 0.968666691608924\\ 0.187381314585724 -0.982287250728689 0.907968861828831\\ 0.309016994374947 -0.951056516295154 0.853053686926882\\ 0.425779291565073 -0.90482705246602 0.807371689306053\\ 0.535826794978996 -0.844327925502016 0.773793236883495\\ 0.637423989748689 -0.77051324277579 0.754428187317828\\ 0.728968627421411 -0.684547105928689 0.750493317892932\\ 0.809016994374947 -0.587785252292473 0.762235870926212\\ 0.876306680043864 -0.481753674101715 0.788918018624496\\ 0.929776485888251 -0.368124552684679 0.828863223517827\\ 0.968583161128631 -0.248689887164855 0.879561581474571\\ 0.992114701314478 -0.125333233564305 0.937827528208786\\ 1 -2.44929359829471e-16 1\\ }; \addplot3 [color=mycolor2, very thick] table[row sep=crcr] {1 0 0\\ 0.992114701314478 0.125333233564304 0\\ 0.968583161128631 0.248689887164855 0\\ 0.929776485888251 0.368124552684678 0\\ 0.876306680043864 0.481753674101715 0\\ 0.809016994374947 0.587785252292473 0\\ 0.728968627421412 0.684547105928689 0\\ 0.63742398974869 0.770513242775789 0\\ 0.535826794978997 0.844327925502015 0\\ 0.425779291565073 0.90482705246602 0\\ 0.309016994374947 0.951056516295154 0\\ 0.187381314585725 0.982287250728689 0\\ 0.0627905195293135 0.998026728428272 0\\ -0.0627905195293134 0.998026728428272 0\\ -0.187381314585725 0.982287250728689 0\\ -0.309016994374947 0.951056516295154 0\\ -0.425779291565073 0.904827052466019 0\\ -0.535826794978997 0.844327925502015 0\\ -0.63742398974869 0.770513242775789 0\\ -0.728968627421411 0.684547105928689 0\\ -0.809016994374947 0.587785252292473 0\\ -0.876306680043863 0.481753674101716 0\\ -0.929776485888251 0.368124552684678 0\\ -0.968583161128631 0.248689887164855 0\\ -0.992114701314478 0.125333233564305 0\\ -1 1.22464679914735e-16 0\\ -0.992114701314478 -0.125333233564304 0\\ -0.968583161128631 -0.248689887164855 0\\ -0.929776485888251 -0.368124552684678 0\\ -0.876306680043864 -0.481753674101715 0\\ -0.809016994374947 -0.587785252292473 0\\ -0.728968627421412 -0.684547105928689 0\\ -0.63742398974869 -0.770513242775789 0\\ -0.535826794978997 -0.844327925502015 0\\ -0.425779291565073 -0.904827052466019 0\\ -0.309016994374948 -0.951056516295154 0\\ -0.187381314585725 -0.982287250728689 0\\ -0.0627905195293132 -0.998026728428272 0\\ 0.0627905195293128 -0.998026728428272 0\\ 0.187381314585724 -0.982287250728689 0\\ 0.309016994374947 -0.951056516295154 0\\ 0.425779291565073 -0.90482705246602 0\\ 0.535826794978996 -0.844327925502016 0\\ 0.637423989748689 -0.77051324277579 0\\ 0.728968627421411 -0.684547105928689 0\\ 0.809016994374947 -0.587785252292473 0\\ 0.876306680043864 -0.481753674101715 0\\ 0.929776485888251 -0.368124552684679 0\\ 0.968583161128631 -0.248689887164855 0\\ 0.992114701314478 -0.125333233564305 0\\ 1 -2.44929359829471e-16 0\\ }; \addplot3 [color=black, only marks, mark size=1.7pt, mark=*, mark options={solid, black}] table[row sep=crcr] {1 0 2\\ 0.809016994374947 0.587785252292473 2.17274575140626\\ 0.309016994374947 0.951056516295154 2.45225424859374\\ -0.309016994374948 0.951056516295154 2.45225424859374\\ -0.809016994374947 0.587785252292473 2.17274575140626\\ -1 1.22464679914735e-16 2\\ -0.809016994374947 -0.587785252292473 2.17274575140626\\ -0.309016994374948 -0.951056516295154 2.45225424859374\\ 0.309016994374947 -0.951056516295154 2.45225424859374\\ 0.809016994374947 -0.587785252292473 2.17274575140626\\ 1 -2.44929359829471e-16 2\\ }; \addplot3 [color=black, only marks, mark size=1.7pt, mark=*, mark options={solid, black}] table[row sep=crcr] {1 0 1.7\\ 0.809016994374947 0.587785252292473 1.56180339887499\\ 0.309016994374947 0.951056516295154 1.33819660112501\\ -0.309016994374948 0.951056516295154 1.33819660112501\\ -0.809016994374947 0.587785252292473 1.56180339887499\\ -1 1.22464679914735e-16 1.7\\ -0.809016994374947 -0.587785252292473 1.56180339887499\\ -0.309016994374948 -0.951056516295154 1.33819660112501\\ 0.309016994374947 -0.951056516295154 1.33819660112501\\ 0.809016994374947 -0.587785252292473 1.56180339887499\\ 1 -2.44929359829471e-16 1.7\\ }; \addplot3 [color=black, only marks, mark size=1.7pt, mark=*, mark options={solid, black}] table[row sep=crcr] {1 0 1\\ 0.809016994374947 0.587785252292473 1.23776412907379\\ 0.309016994374947 0.951056516295154 1.14694631307312\\ -0.309016994374948 0.951056516295154 0.853053686926882\\ -0.809016994374947 0.587785252292473 0.762235870926212\\ -1 1.22464679914735e-16 1\\ -0.809016994374947 -0.587785252292473 1.23776412907379\\ -0.309016994374948 -0.951056516295154 1.14694631307312\\ 0.309016994374947 -0.951056516295154 0.853053686926882\\ 0.809016994374947 -0.587785252292473 0.762235870926212\\ 1 -2.44929359829471e-16 1\\ }; \addplot3 [color=black, only marks, mark size=1.7pt, mark=*, mark options={solid, black}] table[row sep=crcr] {1 0 0\\ 0.809016994374947 0.587785252292473 0\\ 0.309016994374947 0.951056516295154 0\\ -0.309016994374948 0.951056516295154 0\\ -0.809016994374947 0.587785252292473 0\\ -1 1.22464679914735e-16 0\\ -0.809016994374947 -0.587785252292473 0\\ -0.309016994374948 -0.951056516295154 0\\ 0.309016994374947 -0.951056516295154 0\\ 0.809016994374947 -0.587785252292473 0\\ 1 -2.44929359829471e-16 0\\ }; \addplot3 [color=black, dashed] table[row sep=crcr] {1 0 2\\ 1 0 0\\ }; \draw (axis cs:1.5,0,0) node [anchor=north] {$\textcolor{mycolor2}{\partial \Omega}$}; \draw (axis cs:1.5,0,0.9) node [anchor=north] {$\textcolor{mycolor1}{x^{(1)}(t)}$}; \draw (axis cs:1.5,0,1.5) node [anchor=north] {$\textcolor{mycolor1}{x^{(2)}(t)}$}; \draw (axis cs:1.5,0,2.3) node [anchor=north] {$\textcolor{mycolor1}{x^{(3)}(t)}$}; \end{axis} \end{tikzpicture} \begin{tikzpicture}[/tikz/background rectangle/.style={fill={rgb,1:red,1.0;green,1.0;blue,1.0}, draw opacity={1.0}}] \begin{axis}[xlabel={$\Re(z)$}, ylabel={$\Im(z)$},point meta max={nan}, point meta min={nan}, legend cell align={left}, legend columns={1}, title={}, title style={at={{(0.5,1)}}, anchor={south}, font={{\fontsize{14 pt}{18.2 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, legend style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid, fill={rgb,1:red,1.0;green,1.0;blue,1.0}, fill opacity={1.0}, text opacity={1.0}, font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, text={rgb,1:red,0.0;green,0.0;blue,0.0}, cells={anchor={center}}, at={(1.02, 1)}, anchor={north west}}, axis background/.style={fill={rgb,1:red,1.0;green,1.0;blue,1.0}, opacity={1.0}}, anchor={north west}, xshift={1.0mm}, yshift={-1.0mm}, width={130.0mm}, height={70.0mm}, scaled x ticks={false}, x tick style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}}, x tick label style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}, rotate={0}}, xlabel style={at={(ticklabel cs:0.5)}, anchor=near ticklabel, at={{(ticklabel cs:0.5)}}, anchor={near ticklabel}, font={{\fontsize{11 pt}{14.3 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, xmin={-1.1}, xmax={2}, xtick={{-1.0,0.0,1.0,2.0}}, xticklabels={{$-1$,$0$,$1$,$2$}}, xtick align={inside}, xticklabel style={font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, x grid style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={0.1}, line width={0.5}, solid}, axis x line*={left}, x axis line style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid}, scaled y ticks={false}, y tick style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}}, y tick label style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}, rotate={0}}, ylabel style={at={(ticklabel cs:0.5)}, anchor=near ticklabel, at={{(ticklabel cs:0.5)}}, anchor={near ticklabel}, font={{\fontsize{11 pt}{14.3 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, ymin={-0.5}, ymax={0.5}, ytick={{-0.25,0.25}}, yticklabels={{$-1/4$,$1/4$}}, ytick align={inside}, yticklabel style={font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, y grid style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={0.1}, line width={0.5}, solid}, axis y line*={left}, y axis line style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid}, colorbar={false}] \addplot[color={rgb,1:red,0.0;green,0.6056;blue,0.9787}, only marks, draw opacity={1.0}, line width={0}, solid, mark={*}, mark size={1.50 pt}, mark repeat={1}, mark options={color=mycolor1, draw opacity={1.0}, fill=mycolor1, fill opacity={1.0}, line width={0.75}, rotate={0}, solid}] table[row sep={\\}] { \\ -8.105028125102232 6.6585393969530635 \\ 1.189999798798648 0.09768606560280418 \\ 0.9198163566625256 -1.021557639526657 \\ 0.772997548229957 -0.6256620063959714 \\ -0.45376210140012385 1.2655902105693588 \\ -8.105028125102232 -6.65853939695307 \\ -1.0847187990123415 -1.1974420107427741 \\ -1.4404553967477796 -2.465190328815662e-32 \\ 0.021940425353687662 0.7205484198956565 \\ -1.1712966535385305 -0.32556544322316056 \\ -1.466899912944475 -0.22703793043383 \\ 0.8618818489236272 0.26861123130544223 \\ -0.8481661070501179 0.4052975978619661 \\ -0.4426608254162332 0.7592735191446341 \\ -0.5851703807901389 -1.531537667049914 \\ 0.3210065931046781 0.5666957833254967 \\ 1.397450775412802 1.090710559709818 \\ -2.759417097546287 -1.3552134698765272 \\ -0.24908656236640153 0.19998549003090685 \\ -1.3164877373272172 -0.20217700513508746 \\ 0.8618818489236271 -0.2686112313054423 \\ -0.41166059629527624 0.5417689732973071 \\ 0.029730841567716635 0.5121516027563787 \\ -0.00499239353147474 -0.1821682552554755 \\ -0.9849101134140398 -0.13709715031830436 \\ -0.5252447595417289 -0.40832913658490455 \\ 0.021940425353687697 -0.7205484198956565 \\ -0.04266202165948319 1.4060010832158902 \\ 0.7539402405489781 0.6297277423509554 \\ 0.900943841304785 0.4598971674230159 \\ -1.1712966535385305 0.32556544322316033 \\ 0.12869541144498495 -1.0531109895980768 \\ 0.5204412717915115 -0.6638145686429008 \\ 0.11784170859416067 0.7134409366896034 \\ -1.0847187990123417 1.1974420107427741 \\ -2.7594170975462866 1.3552134698765264 \\ -0.44266082541623325 -0.7592735191446341 \\ -0.7112706189426288 -0.4291309184492799 \\ 1.1069623458746793 -0.5939791237417381 \\ -1.0488203988248956 -0.45370601229946383 \\ 1.1899997987986477 -0.09768606560280423 \\ -1.1851461522806808 0.18196596209875773 \\ 1.5930827630901636 -3.0420028257738427 \\ -0.5843368553063193 1.3133187140119376 \\ 0.5963255956033139 0.07346643825966541 \\ 1.397450775412802 -1.0907105597098181 \\ -0.8481661070501181 -0.40529759786196623 \\ 0.11784170859416061 -0.7134409366896034 \\ 0.25422179250841065 -0.6998717073281338 \\ -0.5851703807901395 1.5315376670499141 \\ -0.45376210140012385 -1.2655902105693588 \\ 0.02973084156771663 -0.5121516027563787 \\ 0.3984542643220684 0.32032288392293695 \\ 0.3230981827003987 0.9586230456683439 \\ -0.9058364900703729 -0.471672171362506 \\ 0.424713806653413 -0.49336045604211354 \\ 1.1069623458746796 0.5939791237417382 \\ -1.0488203988248959 0.45370601229946383 \\ -0.22880897143352474 -1.0603109870819074 \\ 0.571441154752641 0.651109577134133 \\ 1.2180459443355085 0.3120878801219488 \\ 0.3916453662663879 -0.4435208621441546 \\ -0.6404326167286186 0.27880446665235586 \\ -0.7590407761409119 0.5593513462646765 \\ -0.606919586533005 0.7227764290112583 \\ -0.6069195865330052 -0.7227764290112583 \\ -0.8020265291866383 -1.2724348893313318 \\ -0.8020265291866381 1.272434889331331 \\ 0.32309818270039864 -0.9586230456683439 \\ -0.042662021659483365 -1.4060010832158905 \\ -1.0675872100829862 2.579379544252778 \\ 0.772997548229957 0.6256620063959715 \\ 1.5930827630901634 3.0420028257738427 \\ 0.128695411444985 1.0531109895980768 \\ -0.7590407761409119 -0.5593513462646765 \\ -0.22880897143352474 1.0603109870819074 \\ -1.1851461522806805 -0.18196596209875773 \\ -1.067587210082986 -2.579379544252778 \\ -0.5815859079316346 -3.76158192263132e-37 \\ 0.7539402405489781 -0.6297277423509555 \\ 0.571441154752641 -0.651109577134133 \\ 0.9198163566625256 1.021557639526657 \\ 0.900943841304785 -0.4598971674230159 \\ 0.42471380665341296 0.4933604560421136 \\ 0.39845426432206843 -0.320322883922937 \\ 0.5449206609776382 -0.04828132591966907 \\ -0.24908656236640148 -0.19998549003090668 \\ -0.7112706189426289 0.4291309184492799 \\ 0.5204412717915115 0.6638145686429008 \\ 0.5449206609776382 0.04828132591966912 \\ -0.9849101134140398 0.13709715031830433 \\ 0.876476344194184 0.2421147757130498 \\ 0.2542217925084107 0.6998717073281336 \\ -0.5843368553063194 -1.3133187140119376 \\ 0.044243149157375934 -0.6780358211256007 \\ 0.3210065931046781 -0.5666957833254967 \\ 0.5963255956033139 -0.07346643825966546 \\ -0.004992393531474766 0.1821682552554755 \\ 0.044243149157375906 0.6780358211256007 \\ -0.905836490070373 0.4716721713625059 \\ -0.4116605962952763 -0.541768973297307 \\ 1.2180459443355087 -0.31208788012194894 \\ 0.39164536626638796 0.44352086214415465 \\ -0.5252447595417288 0.4083291365849045 \\ 0.876476344194184 -0.24211477571304987 \\ -1.3164877373272175 0.2021770051350875 \\ -0.6404326167286186 -0.27880446665235575 \\ -1.466899912944475 0.22703793043382994 \\ } ; \addplot[color=mycolor2, draw opacity={1.0}, line width={1}, solid] table[row sep={\\}] { \\ 1.15 0.0 \\ 1.1499524364608886 0.0031478052496141575 \\ 1.1498097533845217 0.006295111430034631 \\ 1.1495719733926064 0.009441419551192726 \\ 1.1492391341840023 0.012586230781257184 \\ 1.1488112885287451 0.015729046525721543 \\ 1.1482885042596804 0.01886936850645386 \\ 1.1476708642617082 0.02200669884069629 \\ 1.146958466458642 0.025140540120001977 \\ 1.1461514237976846 0.028270395489096725 \\ 1.1452498642315203 0.031395768724653 \\ 1.144253930698028 0.03451616431396371 \\ 1.143163781097619 0.03763108753350333 \\ 1.141979588268204 0.040740044527363926 \\ 1.14070153995779 0.0438425423855536 \\ 1.1393298387947128 0.04693808922214493 \\ 1.137864702255512 0.05002619425326114 \\ 1.1363063626304508 0.053106367874887477 \\ 1.134655066986689 0.05617812174049553 \\ 1.1329110771291098 0.0592409688384682 \\ 1.1310746695588114 0.06229442356931304 \\ 1.1291461354292727 0.06533800182265166 \\ 1.127125780500188 0.0683712210539731 \\ 1.125013925088993 0.07139360036113886 \\ 1.1228109040200795 0.07440466056062768 \\ 1.12051706657171 0.07740392426350765 \\ 1.1181327764206412 0.08039091595112399 \\ 1.1156584115844668 0.08336516205049019 \\ 1.113094364361682 0.08632619100937064 \\ 1.1104410412694878 0.08927353337104303 \\ 1.10769886297934 0.09220672184872837 \\ 1.1048682642502532 0.09512529139967707 \\ 1.1019496938598716 0.09802877929889917 \\ 1.0989436145333187 0.10091672521252716 \\ 1.0958505028698327 0.10378867127079969 \\ 1.092670849267206 0.10664416214065446 \\ 1.0894051578440331 0.1094827450979193 \\ 1.0860539463597871 0.11230397009908914 \\ 1.082617746132729 0.11510738985267839 \\ 1.0790971019556723 0.11789255989013668 \\ 1.0754925720096067 0.12065903863631738 \\ 1.0718047277752025 0.12340638747948697 \\ 1.068034153942205 0.12613417084086495 \\ 1.0641814483167336 0.12884195624368264 \\ 1.0602472217265038 0.13152931438175036 \\ 1.056232097923984 0.13419581918752174 \\ 1.0521367134875006 0.13684104789964469 \\ 1.0479617177203129 0.13946458112998825 \\ 1.0437077725476693 0.14206600293013433 \\ 1.0393755524118617 0.14464490085732448 \\ 1.0349657441652962 0.1472008660398506 \\ 1.030479046961596 0.14973349324187968 \\ 1.025916172144755 0.15224238092770168 \\ 1.021277843136355 0.15472713132539143 \\ 1.0165647953208738 0.1571873504898732 \\ 1.0117777759290916 0.1596226483653788 \\ 1.0069175439196223 0.16203263884728902 \\ 1.001984869858583 0.16441693984334876 \\ 0.996980535797426 0.16677517333424577 \\ 0.9919053351489474 0.1691069654335437 \\ 0.9867600725614958 0.17141194644696028 \\ 0.9815455637913988 0.17368975093098019 \\ 0.9762626355736292 0.1759400177507945 \\ 0.9709121254907302 0.17816239013755697 \\ 0.9654948818400202 0.18035651574494774 \\ 0.9600117634991001 0.18252204670503638 \\ 0.9544636397896823 0.1846586396834345 \\ 0.9488513903397633 0.18676595593372977 \\ 0.9431759049441641 0.18884366135119254 \\ 0.9374380834234566 0.19089142652574656 \\ 0.9316388354813019 0.19290892679419522 \\ 0.9257790805602206 0.1948958422916955 \\ 0.9198597476958215 0.1968518580024707 \\ 0.9138817753695043 0.19877666380975506 \\ 0.9078461113596716 0.20066995454496087 \\ 0.9017537125914603 0.2025314300360617 \\ 0.8956055449850275 0.2043607951551831 \\ 0.889402583302408 0.20615775986539378 \\ 0.8831458109929704 0.2079220392666895 \\ 0.876836220037496 0.20965335364116255 \\ 0.8704748107909057 0.21135142849734978 \\ 0.8640625918236577 0.21301599461375162 \\ 0.8576005797618442 0.21464678808151624 \\ 0.85108979912601 0.21624355034628062 \\ 0.84453128216872 0.2178060282491633 \\ 0.8379260687108998 0.21933397406690155 \\ 0.8312752059769786 0.22082714555112642 \\ 0.8245797484288554 0.22228530596677035 \\ 0.8178407575987201 0.22370822412960006 \\ 0.8110593019207528 0.22509567444286996 \\ 0.8042364565617275 0.22644743693308925 \\ 0.7973733032505514 0.22776329728489764 \\ 0.7904709301067612 0.22904304687504415 \\ 0.7835304314680065 0.23028648280546318 \\ 0.776552907716549 0.231493407935443 \\ 0.7695394651048015 0.23266363091288153 \\ 0.7624912155799373 0.23379696620462426 \\ 0.755409276607596 0.23489323412587962 \\ 0.7482947709947158 0.23595226086870705 \\ 0.7411488267115158 0.23697387852957363 \\ 0.7339725767126648 0.23795792513597405 \\ 0.7267671587576539 0.23890424467211085 \\ 0.7195337152304123 0.2398126871036298 \\ 0.7122733929581874 0.24068310840140714 \\ 0.70498734302972 0.2415153705643847 \\ 0.6976767206127459 0.24230934164144946 \\ 0.6903426847708495 0.2430648957523534 \\ 0.6829863982796993 0.24378191310767156 \\ 0.6756090274426962 0.2444602800277938 \\ 0.6682117419060615 0.24509988896094836 \\ 0.6607957144733947 0.2457006385002536 \\ 0.6533621209197321 0.2462624333997954 \\ 0.645912139805133 0.2467851845897282 \\ 0.6384469522878241 0.24726880919039648 \\ 0.6309677419369337 0.24771323052547484 \\ 0.6234756945448422 0.24811837813412474 \\ 0.6159719979391799 0.24848418778216583 \\ 0.6084578417945035 0.2488106014722597 \\ 0.6009344174436786 0.24909756745310532 \\ 0.5934029176889994 0.24934504022764387 \\ 0.5858645366130769 0.249552980560272 \\ 0.5783204693895214 0.2497213554830626 \\ 0.5707719120934542 0.2498501383009916 \\ 0.5632200615118763 0.24993930859617028 \\ 0.5556661149539223 0.2499888522310826 \\ 0.5481112700610343 0.24999876135082652 \\ 0.5405567246170806 0.24996903438435933 \\ 0.5330036763584538 0.24989967604474683 \\ 0.5254533227841744 0.249790697328416 \\ 0.5179068609660346 0.24964211551341164 \\ 0.5103654873588068 0.24945395415665694 \\ 0.5028303976105528 0.2492262430902188 \\ 0.49530278637305947 0.24895901841657792 \\ 0.48778384711243145 0.24865232250290498 \\ 0.48027477191987417 0.2483062039743436 \\ 0.47277675132269315 0.24792071770630109 \\ 0.4652909740955413 0.247495924815748 \\ 0.4578186270719452 0.24703189265152856 \\ 0.45036089495613774 0.24652869478368275 \\ 0.4429189601352295 0.24598641099178215 \\ 0.4354940024917472 0.2454051272522812 \\ 0.4280871992165679 0.2447849357248862 \\ 0.42069972462228283 0.2441259347379437 \\ 0.41333274995701474 0.24342822877285109 \\ 0.40598744321872204 0.24269192844749155 \\ 0.3986649689700196 0.2419171504986962 \\ 0.39136648815354136 0.2411040177637359 \\ 0.38409315790788023 0.24025265916084615 \\ 0.37684613138412815 0.23936320966878763 \\ 0.3696265575630501 0.238435810305446 \\ 0.36243558107291873 0.23747060810547427 \\ 0.3552743420080393 0.23646775609698104 \\ 0.34814397574799283 0.2354274132772687 \\ 0.3410456127776279 0.23434974458762534 \\ 0.33398037850782664 0.23323492088717376 \\ 0.3269493930970777 0.23208311892578293 \\ 0.3199537712738797 0.23089452131604496 \\ 0.3129946221600065 0.22966931650432287 \\ 0.3060730490946617 0.22840769874087322 \\ 0.2991901494595494 0.22710986804904887 \\ 0.29234701450488926 0.22577603019358597 \\ 0.28554472917640533 0.2244063966479813 \\ 0.27878437194331157 0.22300118456096385 \\ 0.27206701462732663 0.22156061672206712 \\ 0.2653937222327424 0.22008492152630704 \\ 0.2587655527775713 0.21857433293797074 \\ 0.2521835571258036 0.21702909045352287 \\ 0.24564877882079839 0.2154494390636347 \\ 0.23916225391983376 0.21383562921434168 \\ 0.2327250108298456 0.21218791676733673 \\ 0.2263380701443793 0.2105065629594047 \\ 0.2200024444817783 0.20879183436100418 \\ 0.21371913832463918 0.2070440028340044 \\ 0.20748914786055567 0.20526334548858258 \\ 0.20131346082417845 0.20345014463928984 \\ 0.19519305634061368 0.20160468776029114 \\ 0.1891289047701879 0.19972726743978791 \\ 0.18312196755460292 0.19781818133362974 \\ 0.17717319706450246 0.19587773211812215 \\ 0.1712835364484792 0.19390622744203898 \\ 0.16545391948354377 0.1919039798778462 \\ 0.15968527042707792 0.1898713068721449 \\ 0.1539785038702977 0.18780853069534165 \\ 0.14833452459325153 0.18571597839055445 \\ 0.1427542274213695 0.18359398172176125 \\ 0.13723849708359404 0.18144287712120047 \\ 0.13178820807211095 0.17926300563603156 \\ 0.12640422450370198 0.17705471287426333 \\ 0.12108739998274382 0.17481834894995946 \\ 0.11583857746587356 0.17255426842773003 \\ 0.11065858912834126 0.1702628302665166 \\ 0.10554825623207303 0.167944397762681 \\ 0.1005083889954646 0.16559933849240704 \\ 0.09553978646492378 0.1632280242534224 \\ 0.09064323638818639 0.16083083100605208 \\ 0.08581951508942309 0.15840813881361224 \\ 0.08106938734615582 0.15596033178215227 \\ 0.07639360626800684 0.153487797999557 \\ 0.07179291317729708 0.15099092947401757 \\ 0.06726803749151222 0.14847012207187973 \\ 0.0628196966076578 0.14592577545488172 \\ 0.05844859578851902 0.1433582930167896 \\ 0.05415542805084539 0.14076808181944134 \\ 0.04994087405547554 0.13815555252820885 \\ 0.04580560199942185 0.13552111934688915 \\ 0.04175026750993194 0.13286519995203458 \\ 0.037775513540540984 0.13018821542673178 \\ 0.03388197026913509 0.1274905901938412 \\ 0.03007025499804028 0.12477275194870739 \\ 0.026340972056150602 0.12203513159134886 \\ 0.022694712703116626 0.11927816315814237 \\ 0.01913205503560289 0.11650228375300747 \\ 0.015653563895633438 0.11370793347810638 \\ 0.01225979078103967 0.11089555536406763 \\ 0.008951273758022738 0.10806559529974614 \\ 0.005728537375845111 0.10521850196152918 \\ 0.002592092583666772 0.10235472674220114 \\ -0.0004575633504638743 0.0994747236793777 \\ -0.003419946918448935 0.09657894938351984 \\ -0.0062945884487998205 0.09366786296554071 \\ -0.009081032181100013 0.09074192596401619 \\ -0.011778836338264598 0.08780160227200955 \\ -0.014387573196582237 0.08484735806352374 \\ -0.016906829153527148 0.08187966171959217 \\ -0.01933620479333542 0.0788989837540185 \\ -0.021675314950329483 0.07590579673877926 \\ -0.023923788769984577 0.0729005752291002 \\ -0.026081269767725623 0.0698837956882173 \\ -0.02814741588544689 0.06685593641183604 \\ -0.030121899545742625 0.06381747745230036 \\ -0.032004407703843385 0.06076890054248201 \\ -0.03379464189724801 0.057710689019404626 \\ -0.03549231829304278 0.05464332774761272 \\ -0.03709716773290217 0.051567303042299235 \\ -0.03860893577576263 0.048483102592202 \\ -0.04002738273816264 0.04539121538228336 \\ -0.041352283732243644 0.04229213161620443 \\ -0.042583428701404746 0.039186342638604495 \\ -0.04372062245360642 0.03607434085720225 \\ -0.04476368469231684 0.03295661966472573 \\ -0.0457124500450975 0.029833673360687755 \\ -0.04656676808982174 0.026705997073017455 \\ -0.04732650337852384 0.02357408667955951 \\ -0.04799153545887347 0.02043843872945556 \\ -0.04856175889327263 0.01729955036441855 \\ -0.04903708327557266 0.014157919239913755 \\ -0.04941743324540737 0.011014043446257279 \\ -0.049702748500141114 0.007868421429646584 \\ -0.04989298380442975 0.00472155191313462 \\ -0.049988108997392144 0.001573933817559031 \\ -0.049988108997392144 -0.0015739338175588587 \\ -0.04989298380442975 -0.0047215519131344474 \\ -0.049702748500141114 -0.007868421429646524 \\ -0.04941743324540737 -0.011014043446257105 \\ -0.04903708327557266 -0.014157919239913584 \\ -0.04856175889327263 -0.01729955036441838 \\ -0.04799153545887347 -0.02043843872945539 \\ -0.047326503378523954 -0.023574086679559337 \\ -0.04656676808982185 -0.026705997073017282 \\ -0.0457124500450975 -0.029833673360687697 \\ -0.04476368469231695 -0.032956619664725566 \\ -0.04372062245360653 -0.036074340857202083 \\ -0.04258342870140486 -0.03918634263860444 \\ -0.041352283732243644 -0.04229213161620426 \\ -0.04002738273816275 -0.0453912153822833 \\ -0.03860893577576263 -0.04848310259220194 \\ -0.03709716773290228 -0.05156730304229917 \\ -0.03549231829304278 -0.05464332774761266 \\ -0.03379464189724801 -0.05771068901940457 \\ -0.032004407703843385 -0.060768900542481956 \\ -0.030121899545742736 -0.06381747745230029 \\ -0.028147415885447002 -0.06685593641183597 \\ -0.026081269767725734 -0.06988379568821725 \\ -0.023923788769984577 -0.07290057522910015 \\ -0.021675314950329594 -0.0759057967387792 \\ -0.01933620479333542 -0.07889898375401844 \\ -0.016906829153527148 -0.08187966171959211 \\ -0.014387573196582237 -0.08484735806352368 \\ -0.011778836338264709 -0.08780160227200948 \\ -0.009081032181100013 -0.09074192596401613 \\ -0.0062945884487998205 -0.09366786296554065 \\ -0.003419946918449046 -0.09657894938351978 \\ -0.0004575633504638743 -0.09947472367937764 \\ 0.002592092583666661 -0.10235472674220109 \\ 0.005728537375845111 -0.10521850196152913 \\ 0.008951273758022738 -0.10806559529974609 \\ 0.01225979078103967 -0.11089555536406757 \\ 0.015653563895633327 -0.11370793347810633 \\ 0.01913205503560278 -0.11650228375300742 \\ 0.022694712703116626 -0.11927816315814231 \\ 0.026340972056150602 -0.12203513159134881 \\ 0.03007025499804028 -0.12477275194870734 \\ 0.0338819702691352 -0.12749059019384124 \\ 0.03777551354054087 -0.13018821542673173 \\ 0.04175026750993205 -0.13286519995203463 \\ 0.04580560199942196 -0.13552111934688918 \\ 0.049940874055475426 -0.1381555525282088 \\ 0.054155428050845444 -0.14076808181944137 \\ 0.058448595788519075 -0.14335829301678962 \\ 0.06281969660765774 -0.1459257754548817 \\ 0.06726803749151228 -0.14847012207187976 \\ 0.07179291317729719 -0.1509909294740176 \\ 0.07639360626800695 -0.15348779799955706 \\ 0.08106938734615587 -0.1559603317821523 \\ 0.08581951508942315 -0.15840813881361226 \\ 0.09064323638818628 -0.16083083100605203 \\ 0.09553978646492356 -0.16322802425342225 \\ 0.10050838899546427 -0.16559933849240693 \\ 0.10554825623207298 -0.16794439776268097 \\ 0.11065858912834098 -0.17026283026651645 \\ 0.11583857746587328 -0.17255426842772992 \\ 0.12108739998274376 -0.1748183489499594 \\ 0.12640422450370176 -0.17705471287426322 \\ 0.13178820807211067 -0.17926300563603145 \\ 0.1372384970835937 -0.18144287712120033 \\ 0.14275422742136917 -0.1835939817217611 \\ 0.14833452459325125 -0.18571597839055434 \\ 0.15397850387029738 -0.18780853069534154 \\ 0.15968527042707736 -0.18987130687214474 \\ 0.16545391948354365 -0.19190397987784613 \\ 0.17128353644847927 -0.193906227442039 \\ 0.17717319706450213 -0.19587773211812207 \\ 0.1831219675546028 -0.1978181813336297 \\ 0.18912890477018807 -0.19972726743978794 \\ 0.19519305634061335 -0.20160468776029103 \\ 0.20131346082417834 -0.20345014463928981 \\ 0.20748914786055572 -0.2052633454885826 \\ 0.21371913832463885 -0.20704400283400431 \\ 0.22000244448177814 -0.20879183436100415 \\ 0.2263380701443794 -0.21050656295940473 \\ 0.23272501082984548 -0.2121879167673367 \\ 0.23916225391983365 -0.21383562921434165 \\ 0.2456487788207985 -0.2154494390636347 \\ 0.2521835571258035 -0.21702909045352284 \\ 0.2587655527775712 -0.2185743329379707 \\ 0.2653937222327425 -0.22008492152630704 \\ 0.27206701462732646 -0.2215606167220671 \\ 0.2787843719433114 -0.22300118456096382 \\ 0.28554472917640544 -0.22440639664798132 \\ 0.29234701450488915 -0.22577603019358594 \\ 0.29919014945954925 -0.22710986804904884 \\ 0.30607304909466126 -0.22840769874087316 \\ 0.3129946221600063 -0.22966931650432282 \\ 0.31995377127387964 -0.23089452131604496 \\ 0.3269493930970774 -0.23208311892578287 \\ 0.33398037850782647 -0.23323492088717374 \\ 0.3410456127776279 -0.23434974458762534 \\ 0.3481439757479927 -0.23542741327726868 \\ 0.3552743420080393 -0.23646775609698104 \\ 0.36243558107291896 -0.2374706081054743 \\ 0.36962655756304996 -0.238435810305446 \\ 0.37684613138412815 -0.23936320966878763 \\ 0.38409315790788046 -0.24025265916084618 \\ 0.39136648815354125 -0.24110401776373588 \\ 0.3986649689700196 -0.2419171504986962 \\ 0.4059874432187223 -0.24269192844749157 \\ 0.4133327499570146 -0.24342822877285106 \\ 0.42069972462228294 -0.2441259347379437 \\ 0.4280871992165681 -0.24478493572488622 \\ 0.435494002491747 -0.2454051272522812 \\ 0.4429189601352297 -0.24598641099178215 \\ 0.45036089495613746 -0.24652869478368275 \\ 0.45781862707194454 -0.24703189265152853 \\ 0.4652909740955409 -0.24749592481574798 \\ 0.4727767513226929 -0.24792071770630109 \\ 0.4802747719198736 -0.24830620397434358 \\ 0.48778384711243106 -0.24865232250290495 \\ 0.4953027863730592 -0.2489590184165779 \\ 0.5028303976105523 -0.2492262430902188 \\ 0.5103654873588064 -0.24945395415665694 \\ 0.5179068609660343 -0.24964211551341162 \\ 0.5254533227841739 -0.249790697328416 \\ 0.5330036763584534 -0.24989967604474683 \\ 0.5405567246170805 -0.24996903438435933 \\ 0.5481112700610338 -0.24999876135082652 \\ 0.5556661149539219 -0.2499888522310826 \\ 0.5632200615118761 -0.24993930859617028 \\ 0.5707719120934538 -0.2498501383009916 \\ 0.5783204693895211 -0.24972135548306262 \\ 0.5858645366130768 -0.249552980560272 \\ 0.593402917688999 -0.24934504022764387 \\ 0.6009344174436783 -0.24909756745310532 \\ 0.6084578417945035 -0.2488106014722597 \\ 0.6159719979391796 -0.24848418778216583 \\ 0.6234756945448421 -0.24811837813412477 \\ 0.6309677419369338 -0.2477132305254748 \\ 0.6384469522878238 -0.24726880919039648 \\ 0.6459121398051328 -0.2467851845897282 \\ 0.6533621209197322 -0.2462624333997954 \\ 0.6607957144733945 -0.24570063850025362 \\ 0.6682117419060614 -0.2450998889609484 \\ 0.6756090274426964 -0.2444602800277938 \\ 0.6829863982796991 -0.24378191310767158 \\ 0.6903426847708494 -0.2430648957523534 \\ 0.697676720612746 -0.24230934164144946 \\ 0.7049873430297197 -0.24151537056438474 \\ 0.7122733929581874 -0.24068310840140714 \\ 0.7195337152304127 -0.23981268710362977 \\ 0.7267671587576537 -0.23890424467211085 \\ 0.7339725767126648 -0.23795792513597405 \\ 0.7411488267115156 -0.23697387852957366 \\ 0.7482947709947156 -0.23595226086870708 \\ 0.7554092766075962 -0.2348932341258796 \\ 0.762491215579937 -0.2337969662046243 \\ 0.7695394651048014 -0.23266363091288156 \\ 0.7765529077165491 -0.23149340793544299 \\ 0.7835304314680063 -0.23028648280546324 \\ 0.7904709301067612 -0.22904304687504418 \\ 0.7973733032505516 -0.22776329728489764 \\ 0.8042364565617273 -0.22644743693308927 \\ 0.8110593019207527 -0.22509567444286996 \\ 0.8178407575987203 -0.22370822412960004 \\ 0.8245797484288553 -0.22228530596677037 \\ 0.8312752059769786 -0.22082714555112642 \\ 0.8379260687109 -0.2193339740669015 \\ 0.8445312821687199 -0.21780602824916334 \\ 0.8510897991260097 -0.2162435503462807 \\ 0.857600579761844 -0.21464678808151627 \\ 0.8640625918236576 -0.21301599461375165 \\ 0.8704748107909053 -0.21135142849734986 \\ 0.8768362200374957 -0.20965335364116264 \\ 0.8831458109929702 -0.20792203926668953 \\ 0.8894025833024077 -0.20615775986539386 \\ 0.8956055449850273 -0.20436079515518313 \\ 0.9017537125914599 -0.2025314300360618 \\ 0.9078461113596713 -0.20066995454496095 \\ 0.9138817753695043 -0.1987766638097551 \\ 0.919859747695821 -0.19685185800247085 \\ 0.9257790805602205 -0.19489584229169557 \\ 0.9316388354813017 -0.19290892679419525 \\ 0.9374380834234562 -0.19089142652574667 \\ 0.9431759049441639 -0.18884366135119263 \\ 0.9488513903397633 -0.1867659559337298 \\ 0.954463639789682 -0.18465863968343463 \\ 0.9600117634991 -0.18252204670503647 \\ 0.9654948818400202 -0.18035651574494777 \\ 0.9709121254907299 -0.17816239013755708 \\ 0.9762626355736291 -0.17594001775079457 \\ 0.9815455637913988 -0.17368975093098019 \\ 0.9867600725614956 -0.1714119464469604 \\ 0.9919053351489473 -0.16910696543354375 \\ 0.996980535797426 -0.16677517333424574 \\ 1.0019848698585827 -0.16441693984334887 \\ 1.006917543919622 -0.16203263884728905 \\ 1.0117777759290918 -0.15962264836537876 \\ 1.0165647953208736 -0.15718735048987328 \\ 1.021277843136355 -0.15472713132539148 \\ 1.025916172144755 -0.15224238092770165 \\ 1.030479046961596 -0.14973349324187973 \\ 1.0349657441652962 -0.14720086603985064 \\ 1.0393755524118617 -0.14464490085732443 \\ 1.0437077725476693 -0.1420660029301344 \\ 1.0479617177203129 -0.13946458112998827 \\ 1.0521367134875008 -0.13684104789964466 \\ 1.056232097923984 -0.1341958191875218 \\ 1.060247221726504 -0.13152931438175036 \\ 1.0641814483167336 -0.12884195624368258 \\ 1.0680341539422047 -0.126134170840865 \\ 1.0718047277752025 -0.12340638747948696 \\ 1.0754925720096065 -0.1206590386363175 \\ 1.0790971019556723 -0.11789255989013672 \\ 1.0826177461327293 -0.11510738985267834 \\ 1.086053946359787 -0.11230397009908923 \\ 1.0894051578440331 -0.10948274509791932 \\ 1.092670849267206 -0.10664416214065442 \\ 1.0958505028698327 -0.10378867127079976 \\ 1.0989436145333187 -0.10091672521252719 \\ 1.1019496938598716 -0.09802877929889911 \\ 1.104868264250253 -0.09512529139967714 \\ 1.10769886297934 -0.09220672184872836 \\ 1.110441041269488 -0.08927353337104295 \\ 1.113094364361682 -0.0863261910093707 \\ 1.1156584115844668 -0.08336516205049038 \\ 1.1181327764206412 -0.08039091595112412 \\ 1.12051706657171 -0.0774039242635077 \\ 1.1228109040200795 -0.07440466056062785 \\ 1.125013925088993 -0.07139360036113897 \\ 1.127125780500188 -0.06837122105397311 \\ 1.1291461354292727 -0.06533800182265183 \\ 1.1310746695588114 -0.062294423569313126 \\ 1.1329110771291098 -0.05924096883846821 \\ 1.134655066986689 -0.056178121740495676 \\ 1.1363063626304508 -0.053106367874887546 \\ 1.1378647022555115 -0.05002619425326135 \\ 1.1393298387947126 -0.04693808922214506 \\ 1.14070153995779 -0.04384254238555366 \\ 1.141979588268204 -0.040740044527364135 \\ 1.1431637810976187 -0.03763108753350345 \\ 1.144253930698028 -0.03451616431396375 \\ 1.1452498642315203 -0.03139576872465318 \\ 1.1461514237976846 -0.028270395489096833 \\ 1.146958466458642 -0.025140540120002004 \\ 1.1476708642617082 -0.022006698840696462 \\ 1.1482885042596804 -0.018869368506453953 \\ 1.1488112885287451 -0.015729046525721557 \\ 1.1492391341840023 -0.01258623078125734 \\ 1.1495719733926064 -0.009441419551192803 \\ 1.1498097533845217 -0.0062951114300346285 \\ 1.1499524364608886 -0.0031478052496142985 \\ 1.15 -6.123233995736766e-17 \\ } ; \addplot[color=mycolor5, only marks, draw opacity={1.0}, line width={0}, solid, mark={o}, mark size={3.0 pt}, mark repeat={1}, mark options={color=mycolor5, draw opacity={1.0}, fill=mycolor5, fill opacity={1.0}, line width={0.75}, rotate={0}, solid}] table[row sep={\\}] { \\ -0.004992341966157261 -0.18216828787539432 \\ -0.004992336508748335 0.18216828909826016 \\ 0.3984542740065543 -0.3203228881653435 \\ 0.39845427432494657 0.32032288555549715 \\ 0.544920660977127 -0.048281325921720714 \\ 0.5449206609774261 0.04828132592183305 \\ 0.5963255956034472 -0.07346643826342107 \\ 0.5963255956039348 0.07346643826340256 \\ 0.8618818487512163 0.26861123075816384 \\ 0.8618818488315535 -0.2686112308036275 \\ 0.8764763441928392 0.2421147757166872 \\ 0.8764763441932606 -0.24211477571647053 \\ 1.1899995627381987 -0.09768612456314475 \\ 1.1899996100221895 0.09768614133300155 \\ } ; \end{axis} \end{tikzpicture} \begin{tikzpicture}[/tikz/background rectangle/.style={fill={rgb,1:red,1.0;green,1.0;blue,1.0}, draw opacity={1.0}}] \begin{axis}[point meta max={nan}, point meta min={nan}, legend cell align={left}, legend columns={1}, title={}, title style={at={{(0.5,1)}}, anchor={south}, font={{\fontsize{14 pt}{18.2 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, legend style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid, fill={rgb,1:red,1.0;green,1.0;blue,1.0}, fill opacity={1.0}, text opacity={1.0}, font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, text={rgb,1:red,0.0;green,0.0;blue,0.0}, cells={anchor={center}}, at={(1.02, 1)}, anchor={north west}}, axis background/.style={fill={rgb,1:red,1.0;green,1.0;blue,1.0}, opacity={1.0}}, anchor={north west}, xshift={1.0mm}, yshift={-1.0mm}, width={70mm}, height={70mm}, scaled x ticks={false}, xlabel={$\Re(z)$}, x tick style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}}, x tick label style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}, rotate={0}}, xlabel style={at={(ticklabel cs:0.5)}, anchor=near ticklabel, at={{(ticklabel cs:0.5)}}, anchor={near ticklabel}, font={{\fontsize{11 pt}{14.3 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, xmin={-0.15}, xmax={0.15}, xtick={{-0.1,0.0,0.1}}, xticklabels={{$-0.1$,$0.0$,$0.1$}}, xtick align={inside}, xticklabel style={font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, x grid style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={0.1}, line width={0.5}, solid}, axis x line*={left}, x axis line style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid}, scaled y ticks={false}, ylabel={$\Im(z)$}, y tick style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}}, y tick label style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}, rotate={0}}, ylabel style={at={(ticklabel cs:0.5)}, anchor=near ticklabel, at={{(ticklabel cs:0.5)}}, anchor={near ticklabel}, font={{\fontsize{11 pt}{14.3 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, ymin={-0.15}, ymax={0.15}, ytick={{-0.1,0.0,0.1}}, yticklabels={{$-0.1$,$0.0$,$0.1$}}, ytick align={inside}, yticklabel style={font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, y grid style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={0.1}, line width={0.5}, solid}, axis y line*={left}, y axis line style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid}, colorbar={false}] \addplot[color={rgb,1:red,0.0;green,0.6056;blue,0.9787}, only marks, draw opacity={1.0}, line width={0}, solid, mark={*}, mark size={1.50 pt}, mark repeat={1}, mark options={color=mycolor1, draw opacity={1.0}, fill=mycolor1, fill opacity={1.0}, line width={0.75}, rotate={0}, solid}] table[row sep={\\}] { \\ 0.05495890107662027 0.07206406070829693 \\ 0.051087631261481366 -0.15427486327088402 \\ 0.22581163330769324 0.10660795345008413 \\ 0.20424977595343474 -0.05075076059473997 \\ 0.0771531231329846 0.20420979965467093 \\ 0.25082100719550515 0.06763214164489495 \\ -0.037485688371563984 -0.050824213842331906 \\ 0.24255564867477877 0.12858365536093336 \\ -0.09809844692032249 0.1390892464819501 \\ -0.10036769250627638 0.04202114686679008 \\ 0.00334194650146224 -2.4920480237432495e-36 \\ -0.15909214474131292 -0.07203225946276472 \\ -0.11673481533097477 6.112570624275895e-37 \\ -0.16555093154859646 -0.04291110757437835 \\ -0.19517934679827892 -0.013951414960933484 \\ -0.27535139849386453 0.07662625179551456 \\ -0.08799144233754147 -0.1166732623242799 \\ 0.18326498400184266 4.9370762734536075e-37 \\ -0.04837307674233557 0.15109934272578393 \\ 0.028249538563840947 -0.2456686578686999 \\ -0.042544467975123526 -0.05525397780570099 \\ 0.014581490053417438 0.045765031461763146 \\ 0.03780362068931981 0.26265779313828813 \\ 0.14587755095419358 0.1859459145101552 \\ 0.18999697186846376 0.19482892558748968 \\ 0.18432208934040428 0.03260987565595242 \\ -0.039461443596170584 -0.08368594431010579 \\ -0.16805519740161523 -0.1598332781216051 \\ -0.2594155677634869 0.07551480814940247 \\ 0.10725347368055167 -0.21044188923984397 \\ -0.21526604762591547 -0.13501670267115143 \\ -0.08900699976896771 0.09536851383847365 \\ -0.19014824761763158 0.03167018847342314 \\ 0.08566709732632521 -0.25994766729546886 \\ -0.2326539922536282 0.021774443356979093 \\ -0.10084956786948442 -0.1635444073907246 \\ 0.15882967689166894 0.2320193485551333 \\ -0.0923802776709384 -0.207060993836613 \\ 0.2222229590197998 -0.0060404999617153085 \\ -0.007421601228689867 -0.1304205746379624 \\ -0.14270950108031902 0.23255186170796993 \\ -0.13704504695376063 0.25837094560544493 \\ -0.2047685354602103 0.06882812061958425 \\ -0.19010697560503423 0.02455323838924522 \\ -0.027251954770775223 0.07183405741431383 \\ 0.2273278569475975 -0.12340091112672522 \\ -0.12161456030810504 0.10846652466458419 \\ 0.057090337638957135 0.23495033373582222 \\ 0.21166237212190023 -0.1602549250566806 \\ 0.2057953117252195 -0.04247854779632417 \\ -0.03342605352757462 0.19945752827314228 \\ -0.16109659521683053 0.22351598795743569 \\ 0.15376136123705209 0.2192347774621537 \\ 0.08843439817432794 0.13252944831129138 \\ 0.07442836272131362 -0.11731072492718518 \\ 0.10708848003847202 0.180132881272046 \\ 0.13531164390590006 -0.09092643298513915 \\ 0.11057167831843512 0.27624549807405524 \\ 0.11928741565692971 0.026199004419000572 \\ 0.00896939884294219 -0.03015569126386534 \\ -0.20476853546021026 -0.06882812061958415 \\ -0.008844920209791827 0.09045380818873552 \\ 0.008969398842942237 0.03015569126386523 \\ 0.10008696496634656 0.18905553322658364 \\ 0.24064177225250727 4.513898307157584e-36 \\ -0.16832965217504564 0.2414276755238596 \\ -0.2092983241811314 0.0860368497743773 \\ -0.0632318992671233 0.17868317039003406 \\ 0.198233712310042 0.005605744575298293 \\ -0.04264052332220311 -1.4143548029093763e-34 \\ -0.07663749211422675 -0.11571365753326275 \\ -0.039461443596170584 0.08368594431010594 \\ -0.061970637802885684 -0.2606185617064076 \\ -0.07535939919237405 0.09861296152589702 \\ -0.2513556357498025 -1.5407439555097887e-33 \\ 0.14992450330694274 -0.08249184660239597 \\ 0.1717862604590049 -0.2035262027026807 \\ -0.16805519740161529 0.1598332781216051 \\ -0.10469732763449949 0.06865255667905723 \\ -0.19002681949188258 0.07579010977313883 \\ -0.21421847125148438 0.044632064169282136 \\ -0.04254446797512359 0.05525397780570092 \\ 0.10265699293808243 -0.25709412517666447 \\ -0.22137197842589473 -0.12352824833385594 \\ -0.23846751449485817 -0.09483818968637013 \\ -0.18157053018076336 0.184933341183918 \\ -0.1137879325191615 0.21401872535127792 \\ 0.031729138462193406 0.18437309634057153 \\ 0.03780362068931973 -0.262657793138288 \\ 0.17354486847185133 0.10032787962699875 \\ 0.016557076180748358 0.10743720742859073 \\ 0.27223638867333483 0.038744295059777674 \\ -0.06323189926712328 -0.17868317039003404 \\ 0.20676954392838354 -0.05850907793792879 \\ -0.07878985167785096 -0.08398057318118478 \\ 0.27375961322406106 -0.09934495726552693 \\ 0.13390849421340076 0.07209812743317802 \\ 0.15989523942918496 -0.15389156990019937 \\ 0.1891017215843056 0.01914304983332189 \\ -0.23239030056220078 -0.09888619834905744 \\ -0.03748568837156397 0.05082421384233204 \\ 0.11183413295760875 -0.27281322862432544 \\ -0.19002681949188263 -0.07579010977313877 \\ -0.18519081619221733 0.12748287391920066 \\ 0.059245439645788704 0.14542999497100761 \\ 0.15376136123705209 -0.21923477746215367 \\ 0.11523053997794303 0.07635917932381009 \\ 0.21251928203723497 0.14556886417198184 \\ 0.022278079020926288 0.04790731113389859 \\ 0.2370852751256996 -0.08805674615664233 \\ 0.03876403488019119 -0.19400614072942698 \\ -0.15909214474131286 0.0720322594627647 \\ 0.12668774130819574 0.2343974020330761 \\ -0.07878985167785096 0.08398057318118539 \\ 0.06925930202418762 -0.2908195979503966 \\ -0.03582109804879594 0.16171130016722884 \\ 0.16753534567630585 -0.2439081581546655 \\ 0.14378258019859488 -0.05397883070352121 \\ 0.059713044533664536 -0.1940864111675549 \\ 0.08551350652239761 0.17453100240600433 \\ 0.16905077711570773 -0.13358108985814518 \\ 0.14681044983634722 0.21638649047099043 \\ 0.23859263796711083 0.1235974985567323 \\ -0.1897852438643222 -0.15127856241630167 \\ 0.049013451107773505 -0.2719667469538059 \\ 0.10457021716741387 -0.02363389045401236 \\ -0.08144695010570477 -0.06531832348258262 \\ 0.21784048470004203 0.08473684801338245 \\ 0.06617890186107235 -0.21606565862549848 \\ 0.06925930202418759 0.29081959795039647 \\ 0.06617890186107218 0.21606565862549862 \\ 0.010528226716110782 0.1352602994188758 \\ 0.06992212838233706 -0.16097893671744173 \\ -0.0149529839767518 -0.2178055220346761 \\ 0.1213641771136677 -0.07890166366732695 \\ -0.09238027767093857 0.20706099383661286 \\ 0.046412439306801 -0.11671569944073716 \\ -0.15157727173135996 -0.07048553689200864 \\ -0.21035429932620933 -0.13311837430018306 \\ 0.0509301490148824 -0.12758080375541164 \\ -0.16306927456900927 0.05737632956205668 \\ 0.17942226930247024 0.23163821078722296 \\ 0.1735448684718513 -0.10032787962699878 \\ -0.22516801015882465 0.10987716504339548 \\ 0.08927335494785388 0.06543567349044241 \\ 0.08927335494785377 -0.0654356734904424 \\ 0.1991954165123003 -0.15320356315576805 \\ 0.16905077711570782 0.1335810898581453 \\ 0.14587755095419355 -0.18594591451015527 \\ 0.15587011723520244 0.10390741419438677 \\ 0.2067441319003245 0.14465006210148387 \\ -0.16545512684222818 -0.10204010062601616 \\ -0.09384722548750392 0.17197999400266764 \\ -0.04044400629826746 0.0 \\ -0.11419399689502424 0.029718673962149753 \\ -0.059602275621096476 -0.22138000152593057 \\ 0.08551350652239761 -0.17453100240600433 \\ -0.21526604762591553 0.1350167026711516 \\ 0.125640897315717 -0.2663345434807606 \\ 0.059713044533664564 0.1940864111675549 \\ 0.036522869876594186 -0.16179420460875962 \\ -0.15157727173135996 0.07048553689200855 \\ -0.08980426219579439 0.15521742433962613 \\ -0.0483730767423358 -0.15109934272578374 \\ -0.16109659521683048 -0.22351598795743566 \\ -0.11715972940021797 0.20288372116837572 \\ 0.1808442573769798 -0.1889639842531173 \\ 0.028249538563841055 0.24566865786869976 \\ 0.008016784709361287 -0.16388935510032696 \\ -0.0324652250887694 0.011118494979545616 \\ 0.11183413295760875 0.27281322862432544 \\ 0.09455637719919452 0.08651058240908426 \\ -0.12161456030810501 -0.10846652466458419 \\ -0.13074159302256405 -0.0885108193916283 \\ 0.1598952394291848 0.15389156990019934 \\ 0.1717862604590048 0.20352620270268074 \\ -0.24501759363116843 -0.11707738475337358 \\ -0.05694856047039239 0.20394827940993562 \\ -0.09809844692032221 -0.13908924648194995 \\ 0.12668774130819582 -0.23439740203307613 \\ 0.11523053997794314 -0.07635917932381019 \\ -0.07257442046565163 -0.2585622276614144 \\ -0.0898042621957945 -0.15521742433962601 \\ -0.13074159302256402 0.08851081939162832 \\ 0.16127353213194381 -0.18346466211396137 \\ -0.16306927456900927 -0.05737632956205665 \\ 0.014827702398372722 0.25937804263437964 \\ 0.017180895809489866 0.10075846526962766 \\ -0.253074440643716 -0.1583422227451699 \\ -0.07535939919237418 -0.09861296152589685 \\ 0.22732785694759772 0.12340091112672555 \\ 0.01482770239837246 -0.2593780426343794 \\ 0.014581490053417304 -0.04576503146176315 \\ -0.10036769250627643 -0.04202114686679019 \\ -0.1268875757622474 0.05065588880798546 \\ 0.1909751652301977 -0.1872832327318059 \\ -0.1433259579202703 -0.18550934440549136 \\ 0.08843439817432781 -0.1325294483112912 \\ -0.29781713590019815 0.0024926680318439927 \\ -0.0045574131189503875 -0.05041967697729034 \\ 0.1933518995412518 -0.2088744943633275 \\ 0.01718089580948963 -0.10075846526962753 \\ 0.0819318442902258 -0.26273077519263605 \\ 0.27084924089089474 -0.09563515683946622 \\ 0.18910172158430558 -0.01914304983332216 \\ -0.10084956786948433 0.16354440739072462 \\ -0.1755822870983239 0.08311719098484771 \\ 0.16229726831748978 0.11943841722603162 \\ 0.13390849421340079 -0.07209812743317806 \\ -0.23265399225362826 -0.021774443356979055 \\ 0.2567873798230184 -0.02568822107551972 \\ 0.10708848003847186 -0.18013288127204594 \\ 0.06992212838233702 0.1609789367174417 \\ 0.2476402434887364 0.07745277325614013 \\ -0.11378793251916136 -0.21401872535127772 \\ 0.002074684512656315 0.03996000136536669 \\ 0.19919541651230033 0.1532035631557682 \\ 0.09455637719919449 -0.08651058240908438 \\ 0.2737596132240611 0.09934495726552701 \\ -0.24501759363116843 0.11707738475337354 \\ 0.08193184429022574 0.2627307751926361 \\ 0.16753534567630585 0.24390815815466552 \\ 0.18084425737697973 0.18896398425311725 \\ 0.18999697186846376 -0.19482892558748968 \\ -0.275614744259623 -0.0030990644230033905 \\ 0.0856670973263252 0.2599476672954688 \\ -0.20929832418113145 -0.08603684977437728 \\ 0.2655131957453389 -0.0640741397235932 \\ 0.10008696496634664 -0.1890555332265836 \\ 0.2042497759534347 0.05075076059473996 \\ -0.18978524386432216 0.15127856241630164 \\ -0.13704504695376057 -0.25837094560544505 \\ 0.23859263796711072 -0.12359749855673224 \\ -0.03342605352757473 -0.19945752827314264 \\ 0.16229726831748983 -0.11943841722603163 \\ 0.04535954087230724 0.21803225323908618 \\ 0.21015618310799675 -0.010602974394001463 \\ -0.12341604698421976 0.23067801466126864 \\ 0.051087631261481234 0.15427486327088427 \\ 0.10555941366938415 -0.20155020793908615 \\ 0.13326249539391138 0.052473392018374615 \\ 0.15600013450357816 -0.20183694684613826 \\ -0.10615173149375003 0.08001808705225992 \\ 0.1499245033069425 0.08249184660239609 \\ 0.03876403488019108 0.19400614072942693 \\ -0.03246522508876949 -0.011118494979545583 \\ -0.07257442046565175 0.25856222766141446 \\ -0.11491801375981994 0.1488483192364193 \\ -0.05960227562109655 0.22138000152593026 \\ -0.16832965217504559 -0.24142767552385966 \\ 0.04901345110777355 0.2719667469538059 \\ -0.11715972940021793 -0.20288372116837552 \\ 0.17145320193025607 0.0031082347643203393 \\ 0.09420147177184993 0.17994799920663937 \\ 0.2708492408908947 0.09563515683946604 \\ 0.15600013450357822 0.20183694684613823 \\ 0.21166237212190023 0.16025492505668054 \\ 0.26469669557353626 0.10536412814963708 \\ -0.08799144233754147 0.11667326232427995 \\ 0.19823371231004197 -0.005605744575298342 \\ -0.24085610780415762 -0.1416471058694763 \\ 0.2040220038131202 2.644862289350147e-38 \\ 0.26180616772718146 -0.028217343749931074 \\ -0.25708218958310025 -0.11684652932373116 \\ -0.008844920209791827 -0.09045380818873551 \\ 0.03652286987659418 0.16179420460875957 \\ 0.07064987501299572 -0.06768603769036746 \\ 0.26283042317764904 0.09883550476626715 \\ 0.11057167831843516 -0.2762454980740552 \\ 0.046412439306801063 0.11671569944073712 \\ -0.14270950108031907 -0.23255186170796996 \\ -0.19014824761763158 -0.03167018847342322 \\ 0.12637899044620218 -0.1802829739823531 \\ 0.12136417711366762 0.07890166366732691 \\ -0.1755822870983239 -0.08311719098484772 \\ 0.21015618310799666 0.010602974394001555 \\ -0.21421847125148435 -0.0446320641692822 \\ 0.005141606196536705 -3.4438311059246704e-41 \\ -0.14420058329594723 -4.663521289272991e-42 \\ -0.04719765043125423 0.08401322861285147 \\ -0.11419399689502421 -0.029718673962149736 \\ -0.25307444064371615 0.15834222274516996 \\ 0.1619211502748402 0.10935476688166909 \\ -0.25708218958309986 0.11684652932373134 \\ 0.15587011723520233 -0.1039074141943868 \\ 0.23013000662528504 -0.08240018309482344 \\ 0.05924543964578846 -0.14542999497100767 \\ 0.19335189954125181 0.20887449436332747 \\ 0.07442836272131362 0.11731072492718532 \\ -0.2130173992025291 0.10704507503212071 \\ -0.044534917529664805 0.043455859612585364 \\ -0.26324169516628193 0.090729820881365 \\ 0.13592049594272756 1.4637067577342992e-31 \\ -0.08144695010570478 0.06531832348258257 \\ -0.056100039881191124 -2.9340338996524296e-35 \\ 0.016557076180748382 -0.1074372074285909 \\ 0.011812843415060857 0.003822468705094818 \\ 0.04535954087230735 -0.21803225323908643 \\ 0.12637899044620216 0.18028297398235313 \\ 0.17942226930247018 -0.23163821078722294 \\ 0.011068172262442012 0.18314162026126254 \\ 0.1909751652301978 0.18728323273180594 \\ 0.20948777858965525 -0.02217737895970026 \\ 0.0661704403459037 -2.465190328815662e-32 \\ -0.263241695166282 -0.09072982088136493 \\ -0.11565870311646437 0.26517498057356936 \\ -0.08900699976896755 -0.09536851383847389 \\ 0.10457021716741394 0.023633890454012388 \\ -0.08119356416063798 0.008511570796426823 \\ 0.07715312313298461 -0.20420979965467104 \\ -0.22137197842589484 0.12352824833385596 \\ -0.14700965192265283 -0.09181351658235135 \\ -0.1451758767767803 0.024154168203922535 \\ 0.2094877785896556 0.022177378959700258 \\ 0.052862079741731335 0.27001009509283513 \\ 0.184322089340404 -0.03260987565595252 \\ -0.16100440626521131 0.19618199017987256 \\ 0.2222229590197997 0.006040499961715521 \\ 0.1256408973157171 0.2663345434807604 \\ -0.2330530742294672 0.09144641277966704 \\ -0.12688757576224732 -0.050655888807985494 \\ 0.1860272320137397 -0.11557102320250898 \\ -0.14517587677678037 -0.024154168203922476 \\ 0.2508210071955051 -0.06763214164489502 \\ -0.1610044062652114 -0.19618199017987265 \\ 0.20676954392838348 0.0585090779379288 \\ 0.14524311976937718 -0.25682634194980913 \\ 0.24255564867477886 -0.12858365536093336 \\ 0.10555941366938411 0.20155020793908604 \\ 0.28776263112184275 0.0 \\ -0.26456241039496486 -0.05793263610667331 \\ 0.1468104498363472 -0.2163864904709906 \\ 0.20674413190032456 -0.14465006210148382 \\ -0.27561474425962357 0.00309906442300363 \\ 0.010528226716110724 -0.13526029941887596 \\ -0.061970637802885684 0.2606185617064076 \\ 0.05495890107662033 -0.07206406070829696 \\ 0.1619869541844624 -0.12205376664362691 \\ 0.18602723201373975 0.11557102320250894 \\ 0.11783003750885696 -0.1349239951235166 \\ -0.27535139849386453 -0.07662625179551463 \\ 0.16192115027484022 -0.10935476688166901 \\ 0.011269093378426683 0.2831937815889813 \\ -0.23305307422946742 -0.09144641277966732 \\ 0.002074684512656073 -0.0399600013653667 \\ -0.264562410394965 0.05793263610667327 \\ 0.23708527512569957 0.08805674615664227 \\ 0.143782580198595 0.05397883070352113 \\ -0.16555093154859643 0.042911107574378264 \\ -0.004557413118950365 0.050419676977290315 \\ 0.050930149014882416 0.12758080375541164 \\ -0.24085610780415764 0.14164710586947632 \\ 0.21784048470004205 -0.08473684801338237 \\ 0.2722363886733347 -0.0387442950597777 \\ 0.26551319574533905 0.06407413972359335 \\ -0.00742160122868988 0.13042057463796244 \\ -0.2978171359001981 -0.00249266803184399 \\ 0.09420147177184991 -0.17994799920663934 \\ 0.00801678470936165 0.1638893551003271 \\ -0.2130173992025292 -0.1070450750321209 \\ 0.26180616772718146 0.02821734374993116 \\ 0.012430630512379281 1.232595164407831e-32 \\ -0.056948560470392394 -0.2039482794099357 \\ 0.011812843415061081 -0.003822468705094784 \\ -0.2323903005622008 0.09888619834905736 \\ -0.10469732763449949 -0.06865255667905719 \\ 0.1714532019302561 -0.003108234764320334 \\ -0.1869509781648369 0.19643450005575766 \\ -0.035821098048795935 -0.16171130016722893 \\ -0.04453491752966476 -0.04345585961258528 \\ -0.21035429932620933 0.133118374300183 \\ -0.11565870311646433 -0.26517498057356953 \\ 0.14506925373128132 0.20155208324560267 \\ 0.16198695418446243 0.12205376664362681 \\ 0.15882967689166905 -0.23201934855513345 \\ -0.08519999555875721 -3.5032461608120427e-45 \\ -0.10866837239665328 4.484155085839415e-44 \\ 0.01126909337842639 -0.2831937815889813 \\ -0.0766374921142266 0.11571365753326283 \\ 0.21251928203723486 -0.14556886417198178 \\ -0.12341604698421979 -0.2306780146612685 \\ -0.16545512684222796 0.1020401006260161 \\ 0.14506925373128138 -0.20155208324560278 \\ 0.022278079020926555 -0.04790731113389861 \\ 0.057090337638957094 -0.23495033373582225 \\ 0.2628304231776491 -0.09883550476626711 \\ 0.1192874156569297 -0.02619900441900056 \\ 0.2297105762438641 -0.18813543273960906 \\ 0.1026569929380824 0.25709412517666436 \\ -0.08119356416063789 -0.008511570796426936 \\ 0.052862079741731294 -0.2700100950928352 \\ 0.1452431197693771 0.2568263419498092 \\ 0.26469669557353664 -0.1053641281496373 \\ -0.18695097816483683 -0.19643450005575763 \\ -0.027251954770775064 -0.0718340574143137 \\ -0.04719765043125421 -0.0840132286128516 \\ 0.0893709958949184 0.025282671048795377 \\ -0.1901069756050342 -0.02455323838924531 \\ 0.13326249539391127 -0.05247339201837468 \\ 0.031729138462193336 -0.1843730963405715 \\ -0.2594155677634869 -0.0755148081494022 \\ -0.10507456283505164 0.26489283066858305 \\ 0.11783003750885691 0.13492399512351658 \\ -0.2620540156424635 -1.7632415262334313e-38 \\ 0.15737158940341772 0.18698514883738543 \\ -0.14700965192265303 0.09181351658235107 \\ 0.13531164390590006 0.09092643298513912 \\ 0.25678737982301836 0.025688221075519643 \\ 0.20579531172521945 0.04247854779632411 \\ 0.24764024348873626 -0.07745277325614013 \\ -0.18519081619221733 -0.12748287391920077 \\ 0.0706498750129957 0.06768603769036753 \\ -0.11491801375981994 -0.14884831923641922 \\ 0.16127353213194365 0.1834646621139613 \\ 0.2258116333076933 -0.10660795345008407 \\ 0.2297105762438641 0.18813543273960912 \\ 0.08937099589491845 -0.02528267104879538 \\ -0.10507456283505173 -0.26489283066858293 \\ -0.09384722548750404 -0.17197999400266772 \\ -0.014952983976751813 0.21780552203467612 \\ 0.10725347368055171 0.21044188923984383 \\ -0.23342661001320422 -6.05360936588321e-43 \\ -0.14332595792027022 0.18550934440549124 \\ 0.23013000662528518 0.08240018309482358 \\ -0.23846751449485817 0.09483818968637012 \\ 0.15737158940341778 -0.1869851488373854 \\ 0.011068172262441982 -0.18314162026126252 \\ -0.10615173149374996 -0.08001808705225995 \\ -0.18157053018076333 -0.18493334118391805 \\ -0.2251680101588245 -0.10987716504339534 \\ -0.195179346798279 0.013951414960933496 \\ 0.13039943599151674 3.7874427983494104e-35 \\ } ; \addplot[color=mycolor2, draw opacity={1.0}, line width={1}, solid] table[row sep={\\}] { \\ 0.1 0.0 \\ 0.09979866764718845 0.006342391965656451 \\ 0.09919548128307953 0.012659245357374928 \\ 0.09819286972627067 0.01892512443604102 \\ 0.09679487013963563 0.025114798718107924 \\ 0.09500711177409454 0.031203344569848707 \\ 0.09283679330160727 0.037166245566032756 \\ 0.09029265382866214 0.04297949120891716 \\ 0.0873849377069785 0.048619673610046876 \\ 0.08412535328311813 0.05406408174555976 \\ 0.08052702575310587 0.059290792905464046 \\ 0.0766044443118978 0.06427876096865393 \\ 0.07237340381050703 0.06900790114821119 \\ 0.06785094115571322 0.07345917086575333 \\ 0.06305526670845225 0.07761464642917569 \\ 0.05800569095711982 0.08145759520503358 \\ 0.05272254676105024 0.08497254299495144 \\ 0.04722710747726827 0.08814533634475821 \\ 0.041541501300188646 0.09096319953545184 \\ 0.03568862215918719 0.09341478602651067 \\ 0.029692037532827495 0.09549022414440739 \\ 0.02357589355094273 0.09718115683235418 \\ 0.017364817766693044 0.0984807753012208 \\ 0.01108381999010111 0.09938384644612541 \\ 0.004758191582374241 0.0998867339183008 \\ -0.001586596383480803 0.09998741276738751 \\ -0.007924995685678855 0.09968547759519425 \\ -0.0142314838273285 0.09898214418809329 \\ -0.020480666806519054 0.09788024462147787 \\ -0.026647381369003482 0.09638421585599422 \\ -0.032706796331742165 0.09450008187146686 \\ -0.03863451256931287 0.09223542941045815 \\ -0.044406661260577414 0.0895993774291336 \\ -0.04999999999999998 0.08660254037844388 \\ -0.05539200638661103 0.08325698546347715 \\ -0.06056096871376665 0.07957618405308324 \\ -0.0654860733945285 0.07557495743542583 \\ -0.07014748877063214 0.07126941713788627 \\ -0.07452644496757548 0.06667690005162917 \\ -0.07860530947427874 0.06181589862206055 \\ -0.08236765814298327 0.05670598638627709 \\ -0.08579834132349771 0.051367739157340636 \\ -0.08888354486549234 0.045822652172741056 \\ -0.09161084574320696 0.040093053540661366 \\ -0.09396926207859084 0.03420201433256689 \\ -0.09594929736144973 0.028173255684143007 \\ -0.09754297868854071 0.02203105327865408 \\ -0.09874388886763943 0.01580013959733499 \\ -0.09954719225730846 0.009505604330418289 \\ -0.09994965423831852 0.0031727933498067657 \\ -0.09994965423831852 -0.003172793349806786 \\ -0.09954719225730846 -0.009505604330418263 \\ -0.09874388886763943 -0.01580013959733501 \\ -0.09754297868854071 -0.022031053278654057 \\ -0.09594929736144975 -0.02817325568414294 \\ -0.09396926207859085 -0.034202014332566866 \\ -0.09161084574320698 -0.04009305354066134 \\ -0.08888354486549234 -0.04582265217274107 \\ -0.08579834132349773 -0.05136773915734061 \\ -0.08236765814298332 -0.05670598638627703 \\ -0.07860530947427874 -0.06181589862206053 \\ -0.07452644496757548 -0.06667690005162916 \\ -0.07014748877063214 -0.0712694171378863 \\ -0.06548607339452853 -0.07557495743542582 \\ -0.060560968713766664 -0.07957618405308321 \\ -0.05539200638661105 -0.08325698546347714 \\ -0.050000000000000044 -0.08660254037844385 \\ -0.0444066612605774 -0.0895993774291336 \\ -0.03863451256931287 -0.09223542941045815 \\ -0.03270679633174219 -0.09450008187146684 \\ -0.026647381369003548 -0.0963842158559942 \\ -0.02048066680651914 -0.09788024462147786 \\ -0.014231483827328523 -0.09898214418809327 \\ -0.007924995685678879 -0.09968547759519424 \\ -0.001586596383480761 -0.09998741276738751 \\ 0.004758191582374238 -0.0998867339183008 \\ 0.011083819990101086 -0.09938384644612541 \\ 0.017364817766693 -0.09848077530122082 \\ 0.023575893550942664 -0.09718115683235419 \\ 0.029692037532827495 -0.09549022414440739 \\ 0.03568862215918717 -0.09341478602651068 \\ 0.04154150130018869 -0.09096319953545183 \\ 0.04722710747726829 -0.0881453363447582 \\ 0.05272254676105024 -0.08497254299495144 \\ 0.0580056909571198 -0.0814575952050336 \\ 0.06305526670845221 -0.07761464642917572 \\ 0.06785094115571323 -0.0734591708657533 \\ 0.07237340381050701 -0.0690079011482112 \\ 0.07660444431189778 -0.06427876096865397 \\ 0.08052702575310583 -0.05929079290546409 \\ 0.08412535328311808 -0.05406408174555982 \\ 0.08738493770697849 -0.04861967361004688 \\ 0.09029265382866211 -0.0429794912089172 \\ 0.09283679330160727 -0.03716624556603273 \\ 0.09500711177409454 -0.031203344569848707 \\ 0.09679487013963563 -0.025114798718107942 \\ 0.09819286972627067 -0.018925124436041066 \\ 0.09919548128307953 -0.012659245357374994 \\ 0.09979866764718845 -0.0063423919656564525 \\ 0.1 -2.4492935982947065e-17 \\ } ; \addplot[color=mycolor5, only marks, draw opacity={1.0}, line width={0}, solid, mark={o}, mark size={3.0 pt}, mark repeat={1}, mark options={color=mycolor5, draw opacity={1.0}, fill=mycolor5, fill opacity={1.0}, line width={0.75}, rotate={0}, solid}] table[row sep={\\}] { \\ -0.0851999955984461 1.8703185348251382e-16 \\ -0.0811935636342816 0.008511570992394793 \\ -0.08119356363427488 -0.008511570992378306 \\ -0.05610004053842608 3.2693001094868804e-14 \\ -0.047197657371648605 -0.08401322997132865 \\ -0.047197657371626504 0.08401322997128614 \\ -0.04453503647572701 -0.04345629063900655 \\ -0.0445350364743566 0.04345629064443852 \\ -0.04264054128262811 -2.2582067639868575e-13 \\ -0.04254446545169638 0.05525397748598218 \\ -0.04254446545166633 -0.05525397748600593 \\ -0.04044400056057647 1.8234232327725316e-13 \\ -0.03946144328731091 0.08368594448652737 \\ -0.0394614432873054 -0.08368594448652164 \\ -0.037485760833529086 0.05082423966589468 \\ -0.037485760832879085 -0.05082423966485775 \\ -0.03246514522448475 -0.011118549664034387 \\ -0.03246514522439744 0.011118549664372781 \\ -0.02725195791948489 -0.07183405889413698 \\ -0.027251957919480027 0.07183405889413441 \\ -0.008844920887853071 -0.09045380678222553 \\ -0.008844920887710614 0.09045380678216824 \\ -0.004557425873925012 -0.05041968098976297 \\ -0.004557425873842082 0.050419680989913555 \\ 0.002074685445342613 -0.039960003369133926 \\ 0.0020746854453860927 0.03996000336904287 \\ 0.0033419888510643325 -1.4446340327627867e-13 \\ 0.005141717112193901 3.842843559646147e-13 \\ 0.008969354861016956 -0.03015574142103819 \\ 0.008969354861400531 0.030155741421527386 \\ 0.011812863362079205 -0.003822459006207282 \\ 0.01181286336208037 0.0038224590060398737 \\ 0.012431812166047773 1.7770380410625277e-12 \\ 0.014581452449036065 0.04576496999458158 \\ 0.014581452450950484 -0.045764969992364755 \\ 0.01717958832164278 0.10075760461731194 \\ 0.017179588327890297 -0.10075760457798465 \\ 0.02227807402052865 0.04790731542336791 \\ 0.022278074020539638 -0.04790731542337733 \\ 0.05495889986118713 -0.07206406216804644 \\ 0.05495889986118937 0.07206406216803816 \\ 0.0661704434421038 -9.820349185887221e-15 \\ 0.07064987482823579 0.06768603819302894 \\ 0.07064987482823662 -0.06768603819302979 \\ 0.0893709952494624 0.025282670408583494 \\ 0.0893709952494627 -0.02528267040858117 \\ } ; \end{axis} \end{tikzpicture} \begin{tikzpicture}[/tikz/background rectangle/.style={fill={rgb,1:red,1.0;green,1.0;blue,1.0}, draw opacity={1.0}}] \begin{axis}[point meta max={nan}, point meta min={nan}, legend cell align={left}, legend columns={1}, title={}, title style={at={{(0.5,1)}}, anchor={south}, font={{\fontsize{14 pt}{18.2 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, legend style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid, fill={rgb,1:red,1.0;green,1.0;blue,1.0}, fill opacity={1.0}, text opacity={1.0}, font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, text={rgb,1:red,0.0;green,0.0;blue,0.0}, cells={anchor={center}}, at={(1.02, 1)}, anchor={north west}}, axis background/.style={fill={rgb,1:red,1.0;green,1.0;blue,1.0}, opacity={1.0}}, anchor={north west}, xshift={1.0mm}, yshift={-1.0mm}, width={70mm}, height={70mm}, scaled x ticks={false}, xlabel={$\Re(z)$}, x tick style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}}, x tick label style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}, rotate={0}}, xlabel style={at={(ticklabel cs:0.5)}, anchor=near ticklabel, at={{(ticklabel cs:0.5)}}, anchor={near ticklabel}, font={{\fontsize{11 pt}{14.3 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, xmin={-0.5}, xmax={2.5}, xtick={{0.0,1.0,2.0}}, xticklabels={{$0.0$,$1.0$,$2.0$}}, xtick align={inside}, xticklabel style={font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, x grid style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={0.1}, line width={0.5}, solid}, axis x line*={left}, x axis line style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid}, scaled y ticks={false}, ylabel={$\Im(z)$}, y tick style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}}, y tick label style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}, rotate={0}}, ylabel style={at={(ticklabel cs:0.5)}, anchor=near ticklabel, at={{(ticklabel cs:0.5)}}, anchor={near ticklabel}, font={{\fontsize{11 pt}{14.3 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, ymin={-2.5}, ymax={0.5}, ytick={{-2.0,-1.0,0}}, yticklabels={{$-2.0$,$-1.0$,$0.0$}}, ytick align={inside}, yticklabel style={font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, y grid style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={0.1}, line width={0.5}, solid}, axis y line*={left}, y axis line style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid}, colorbar={false}] \addplot[color={rgb,1:red,0.0;green,0.6056;blue,0.9787}, only marks, draw opacity={1.0}, line width={0}, solid, mark={*}, mark size={1.50 pt}, mark repeat={1}, mark options={color=mycolor1, draw opacity={1.0}, fill=mycolor1, fill opacity={1.0}, line width={0.75}, rotate={0}, solid}] table[row sep={\\}] { \\ -0.09001591111568559 0.6672089058359312 \\ 0.1666342997006614 -3.907096910665796 \\ -4.161418221513816 -0.1369200839739335 \\ 3.526265176929987 0.13647785066733445 \\ 0.5309716585327479 0.4032912967785163 \\ 0.3495078984455063 -0.7747735275636949 \\ 0.5818881367425712 -0.6813052918462653 \\ 0.38675447194444773 -1.3002042416931605 \\ -0.1620140472383091 -0.7301426882351304 \\ 0.8196940410244593 3.053530082194601 \\ -0.12292635708833559 -1.3430515655647037 \\ -0.4565522892978873 0.10348908035255193 \\ -0.6431860058345981 1.1493866860053916 \\ -1.0512921815809801 -0.15763329486895952 \\ -0.28445540970997735 -0.2366498917931143 \\ -0.22116850447036096 -0.07730682376892638 \\ -0.7595433258900903 -0.24552681730000048 \\ 0.13780702953168417 -0.6127122971382961 \\ -1.5055621088501063 0.579271107842074 \\ 4.71955698063018 -0.7249058158570859 \\ -3.2194238146000433 -0.44071792812882443 \\ -1.2741923083608226 -0.568577231895492 \\ -0.29151450177109217 -0.3746913875723674 \\ -0.1152251306949719 0.2949321213118135 \\ -1.164982443324732 -1.9630556088492566 \\ -0.3410260350958497 -0.157172750332326 \\ 0.4530361192135348 -0.09209029539377794 \\ 0.00830846990557618 -0.01472369382926132 \\ 0.12092901329809219 0.6159949663581775 \\ -0.6876329785806518 0.6051607406227395 \\ 2.2737899491677465 1.0698929010569267 \\ 0.7839460825951073 -0.22286367715756278 \\ 1.1258148428776684 -0.6161451071969797 \\ -1.163979129552359 -0.4091527279956395 \\ -1.7496506332004058 -0.7961439077323179 \\ 1.77978220991071 0.9763315778555017 \\ -0.16565307083797656 -0.9212682604557495 \\ -1.3778946980722702 -0.18845591333147205 \\ -10.807753856111738 -8.889815962217106 \\ -0.5020402022630727 0.28794142822250623 \\ -1.2617240501380496 -1.4402148361844471 \\ -1.5762059381691973 -0.1169113880244232 \\ 1.0931081445041144 0.3479283639349315 \\ -0.5051830544842746 -0.2945970908743621 \\ -2.0183498300846168 0.5902473211155429 \\ 0.08114416322296233 0.13981785065212296 \\ -0.33115528192989263 -0.7650370252314569 \\ 4.122132675793074 -4.698632375916618 \\ -0.4663628687684027 -0.8020066914008561 \\ 21.504185143254947 2.450445994560922 \\ -1.0174952095435523 4.195447426426759 \\ -2.3403160285487057 0.531466429023083 \\ -0.8186983540706165 0.4533670813526604 \\ -0.3417099866348293 -0.3880458251046717 \\ 0.37234557156666315 -0.8314626088535458 \\ 2.43168488491019 -0.36687300435934783 \\ -4.782408952632965 -4.551630646243888 \\ 1.7458596377796718 0.31720452869777854 \\ -0.3919631342138382 -1.8474616621866413 \\ 0.9704525214516116 -0.23727366879970266 \\ -16.019930163993433 19.85019038198617 \\ -1.011253505399407 -0.7173939856886462 \\ -2.7402695457480735 4.355654864029392 \\ 2.4157022975241462 2.7221346892300478 \\ 0.7127685024242236 -0.4698492589343822 \\ -0.9134478543648885 -0.1996664835076648 \\ 0.6318465769372339 -0.34062151552812614 \\ -0.03096528887334333 0.03782121626817006 \\ 0.5536352990699686 -0.3749020571513565 \\ -0.7936426642576413 -0.093261021664415 \\ 0.48855659667050916 1.3220117556594755 \\ 0.5650615026806085 -0.985184613686498 \\ -0.20400580201894963 -0.6970331607200216 \\ 1.2198004872515298 1.0879187267570747 \\ -2.9541214541164678 -5.270747179574006 \\ -0.31555129924306125 -0.257223113690924 \\ -0.49471052413000266 0.1783222976547319 \\ -0.027707934709465483 0.014729161437902814 \\ -0.8737993675647272 -2.9842188587413054 \\ -0.9583978902030488 -0.22188970001420993 \\ -1.0799552668899126 -0.26140198763637995 \\ 0.04160137124560876 0.050512337484480536 \\ -0.901279007849784 -2.8273240387750325 \\ -0.5828088483924433 0.12442521244768671 \\ -2.3882546418473014 2.8052813077210383 \\ 0.07268722500077113 -0.2595760925297963 \\ -0.025389995400396496 -1.0557789774190969 \\ -0.9752876740252484 -2.785493544419106 \\ 0.8041519800518505 0.3931485014341232 \\ 2.897043781222463 -5.150449761094574 \\ 0.24360592547160873 0.1542428346425277 \\ 0.3063862444896809 -0.13405314631018916 \\ -1.1361890472143132 -0.3144558214933595 \\ 0.8063410454054868 5.277583968798922 \\ 0.8354933378367275 -0.23835977526152016 \\ 1.682138861194629 0.2625787013971504 \\ -6.597191098726825 3.8645570868636447 \\ -0.11729817904925878 -0.14315901916262497 \\ -3.286206347313742 -0.9356724531220625 \\ -1.3426343730659718 1.0303491098979336 \\ -0.297913863227741 1.0436790723169873 \\ 4.039279816681409 -13.996342877942954 \\ -1.7956577633880677 -2.5710979454574803 \\ -1.2713992031159937 -0.14646579065063325 \\ -6.010786813454519 0.5680112670871248 \\ -0.4368887410623232 -1.0124137353190903 \\ 1.4269419629980113 -1.8688916254123586 \\ 2.8637875294888 -1.966182655481756 \\ 6.94533038923015 -4.465004110578257 \\ -0.3229527128272929 -0.42334452094528224 \\ 0.8538125264725899 0.33715261050769607 \\ -0.06749110778263494 1.3424637131510821 \\ 0.6282241325109852 0.8799320696939452 \\ -4.534806839908544 4.444685128321454 \\ 2.3705531271620734 1.6197866303936395 \\ 0.5276695090286875 -0.8648198925896222 \\ -2.1708802463809334 1.0109350682686582 \\ 0.4459803189025213 -0.29002093924922884 \\ -0.6995936307574694 1.2245926871421322 \\ 1.2231153888992417 1.5650065919295506 \\ -0.7943449257373781 -3.1706286973168205 \\ -0.4265456644110327 -0.49327365241117105 \\ 0.8124858968741218 0.822163091583767 \\ -0.3581125066861673 0.601855506685886 \\ 0.29016275192183627 -0.42047575403510007 \\ 0.39436256605588066 1.1221849807074176 \\ -0.04539676362564188 -0.17022819188281116 \\ -0.8541152444478476 -1.2296593585674156 \\ -0.5737859156436931 -0.17806772596611586 \\ 0.5720420266497984 0.9138899739708461 \\ 1.7053090649345217 0.9079026831662474 \\ 0.07607531775654194 0.15447864524796043 \\ -7.013970335237569 1.3292942902765605 \\ 1.050704959435795 -1.257586254766296 \\ -0.7100610835355649 0.014786614168262458 \\ -2.0115149400084356 2.1920947388629823 \\ -0.1746970246953375 -0.3357514560455263 \\ 0.025758453955502993 0.17509917018268772 \\ -0.7168895475732573 0.009739921261030605 \\ 0.21177355293209518 -0.07939282303817129 \\ 0.03305161299598855 -0.25034631733769525 \\ -0.7213138055554129 0.5200945355489653 \\ -0.5496521941220118 0.4069414326348671 \\ 0.02627371047161661 0.3285201264805016 \\ 0.23939617716504502 -0.05419800848724293 \\ 1.156388400727035 -0.3723329284694505 \\ -0.6302995462141905 0.10842636892667293 \\ 0.3266276541288669 -0.5658335315447116 \\ 1.576674835021894 -4.317975348359148 \\ 0.2646979005222734 1.554376192100565 \\ 1.8836627393465402 -1.9067765346340984 \\ 0.004398505771848225 0.9284656910638563 \\ -1.5113036185168953 -0.06796179876134538 \\ -0.24283610387509286 -0.03137190775954187 \\ 0.2488562682918671 0.09258307618170941 \\ 1.5502201632835628 0.29546673338670115 \\ 0.12252782720304142 -0.214010353888386 \\ -0.923949932100518 0.007339664825921521 \\ 0.5671177721080701 1.8732428380600317 \\ 0.5320828239479186 -0.3920273934657649 \\ 0.1490073524086437 -0.9456044721032981 \\ -0.5015860063393708 -0.4495407734076789 \\ 1.9587847881555775 1.4685280967287646 \\ 0.9404378151959093 -1.721740584807866 \\ -1.5927670712448478 1.5967872149384446 \\ -0.1462686933371423 1.0697146889747016 \\ -1.6783853776870707 1.174734277448681 \\ -1.0353447943643403 0.2727777702325694 \\ 0.22677277740174662 -7.032488517730581 \\ -0.8334039560343114 0.18952382821984828 \\ 1.4859845067930901 -5.232455480806997 \\ -0.635899604466308 -3.3981343933131147 \\ -0.22235032918373915 -0.9948385724876156 \\ -0.5135659672747984 -1.159483428730109 \\ -0.10933007980292442 -0.15107363136805063 \\ 0.3767261321605986 -0.8521424218600147 \\ 1.422445772982088 0.21398799955551184 \\ 4.828180317998143 7.365348902600172 \\ -0.331505624316812 -1.2090760421804927 \\ 0.3952796129866738 -0.9084271184102013 \\ -1.183901457817789 0.31422069450092893 \\ -0.3471195512638368 0.2318222938959436 \\ -1.0893347443491437 -0.08564662847591775 \\ 0.16840529863211695 0.19521854010876963 \\ -1.556367951433645 -0.7408933332338248 \\ 0.194391097535205 3.7650597218546014 \\ 0.9057141604312139 0.9173369769695712 \\ -0.6172391806480747 0.11782105826851831 \\ -0.4966909141118885 -0.7421757235058642 \\ -0.2364344553070186 -0.6256498438918756 \\ 0.6395429800689594 0.9183619001596078 \\ 0.17144128198701822 0.494340111107092 \\ -0.5919419212473204 2.0619225232894807 \\ 0.12159321172872957 -0.700472788834571 \\ 0.11518476790330705 -0.08831315896266984 \\ 1.500746426600094 1.2356482301807514 \\ 0.6224931915193664 -0.11629776383916875 \\ -0.8328286896137505 -0.09903828398010221 \\ 0.7016682775504361 -1.1872218431951196 \\ 0.5954599532444269 -0.8095806738569656 \\ -3.8421776368499474 -4.687991827662449 \\ 0.643152759126893 -0.5822987398415168 \\ 2.193622140401718 0.6761147423631702 \\ -0.20595619620298417 0.0898788540797489 \\ -0.24076447438945406 1.6721260490985148 \\ 0.9105289405922127 0.32558778111632863 \\ 2.5087633552066904 0.4185164784806665 \\ 1.4460735917462046 -0.7928076360368691 \\ -3.656484315928987 0.6310694789384728 \\ -0.6376596605789986 -0.5692911475902014 \\ 3.2220059497404616 1.7892791056369317 \\ 1.196426162278664 -1.0503031044427151 \\ 0.05298145384890167 1.519667074426388 \\ -0.07010460050230118 0.1374610696295674 \\ -1.3383817935488964 -1.546825312321426 \\ -0.5909855465445687 0.22545308862744234 \\ 1.9242744075894402 0.6886301705628424 \\ 1.2616032586762804 0.9454749525647851 \\ 1.3715557190051202 -0.9236628333313107 \\ -0.007644747611564009 -3.608222481175245 \\ } ;\label{marker:eigs} \addplot[color=mycolor2, draw opacity={1.0}, line width={1}, solid] table[row sep={\\}] { \\ 2.0 -1.0 \\ 1.9979866764718843 -0.9365760803434355 \\ 1.9919548128307953 -0.8734075464262507 \\ 1.9819286972627066 -0.8107487556395898 \\ 1.9679487013963564 -0.7488520128189208 \\ 1.9500711177409453 -0.687966554301513 \\ 1.9283679330160726 -0.6283375443396725 \\ 1.9029265382866214 -0.5702050879108285 \\ 1.873849377069785 -0.5138032638995313 \\ 1.8412535328311812 -0.45935918254440244 \\ 1.8052702575310586 -0.4070920709453596 \\ 1.766044443118978 -0.35721239031346075 \\ 1.72373403810507 -0.3099209885178881 \\ 1.6785094115571322 -0.2654082913424667 \\ 1.6305526670845225 -0.22385353570824318 \\ 1.5800569095711983 -0.18542404794966427 \\ 1.5272254676105024 -0.1502745700504856 \\ 1.4722710747726828 -0.11854663655241793 \\ 1.4154150130018865 -0.09036800464548167 \\ 1.3568862215918718 -0.06585213973489334 \\ 1.296920375328275 -0.045097758555926126 \\ 1.2357589355094274 -0.028188431676458348 \\ 1.1736481776669305 -0.01519224698779198 \\ 1.110838199901011 -0.006161535538745877 \\ 1.0475819158237425 -0.0011326608169920371 \\ 0.984134036165192 -0.0001258723261249317 \\ 0.9207500431432114 -0.0031452240480576155 \\ 0.8576851617267149 -0.010178558119067205 \\ 0.7951933319348095 -0.021197553785221257 \\ 0.7335261863099651 -0.03615784144005785 \\ 0.6729320366825784 -0.054999181285331544 \\ 0.6136548743068713 -0.07764570589541864 \\ 0.5559333873942258 -0.10400622570866414 \\ 0.5000000000000002 -0.1339745962155613 \\ 0.44607993613388974 -0.1674301453652286 \\ 0.3943903128623335 -0.20423815946916768 \\ 0.345139266054715 -0.24425042564574173 \\ 0.2985251122936786 -0.2873058286211373 \\ 0.25473555032424533 -0.3332309994837084 \\ 0.21394690525721272 -0.3818410137793945 \\ 0.1763234185701673 -0.43294013613722915 \\ 0.14201658676502293 -0.4863226084265937 \\ 0.11116455134507663 -0.5417734782725895 \\ 0.08389154256793041 -0.5990694645933864 \\ 0.06030737921409168 -0.6579798566743311 \\ 0.04050702638550274 -0.7182674431585699 \\ 0.024570213114592954 -0.7796894672134592 \\ 0.012561111323605711 -0.84199860402665 \\ 0.004528077426915411 -0.9049439566958171 \\ 0.0005034576168149174 -0.9682720665019323 \\ 0.0005034576168149174 -1.0317279334980678 \\ 0.004528077426915411 -1.0950560433041827 \\ 0.012561111323605711 -1.1580013959733502 \\ 0.024570213114592954 -1.2203105327865407 \\ 0.04050702638550252 -1.2817325568414293 \\ 0.06030737921409157 -1.3420201433256687 \\ 0.0838915425679303 -1.4009305354066135 \\ 0.11116455134507663 -1.4582265217274106 \\ 0.14201658676502282 -1.513677391573406 \\ 0.17632341857016687 -1.5670598638627702 \\ 0.2139469052572126 -1.6181589862206054 \\ 0.2547355503242452 -1.6667690005162914 \\ 0.29852511229367873 -1.712694171378863 \\ 0.3451392660547148 -1.7557495743542582 \\ 0.3943903128623334 -1.7957618405308322 \\ 0.4460799361338895 -1.8325698546347713 \\ 0.49999999999999956 -1.8660254037844384 \\ 0.555933387394226 -1.895993774291336 \\ 0.6136548743068713 -1.9223542941045815 \\ 0.6729320366825782 -1.9450008187146683 \\ 0.7335261863099645 -1.963842158559942 \\ 0.7951933319348086 -1.9788024462147784 \\ 0.8576851617267147 -1.9898214418809328 \\ 0.9207500431432112 -1.9968547759519422 \\ 0.9841340361651923 -1.9998741276738752 \\ 1.0475819158237423 -1.998867339183008 \\ 1.1108381999010108 -1.9938384644612541 \\ 1.17364817766693 -1.9848077530122081 \\ 1.2357589355094265 -1.9718115683235418 \\ 1.296920375328275 -1.9549022414440738 \\ 1.3568862215918718 -1.9341478602651068 \\ 1.4154150130018868 -1.9096319953545182 \\ 1.4722710747726828 -1.8814533634475819 \\ 1.5272254676105024 -1.8497254299495145 \\ 1.5800569095711978 -1.814575952050336 \\ 1.630552667084522 -1.7761464642917573 \\ 1.6785094115571324 -1.734591708657533 \\ 1.72373403810507 -1.690079011482112 \\ 1.766044443118978 -1.6427876096865397 \\ 1.8052702575310584 -1.5929079290546408 \\ 1.8412535328311808 -1.540640817455598 \\ 1.8738493770697848 -1.4861967361004689 \\ 1.902926538286621 -1.429794912089172 \\ 1.9283679330160726 -1.3716624556603272 \\ 1.9500711177409453 -1.312033445698487 \\ 1.9679487013963564 -1.2511479871810793 \\ 1.9819286972627066 -1.1892512443604106 \\ 1.9919548128307953 -1.12659245357375 \\ 1.9979866764718843 -1.0634239196565645 \\ 2.0 -1.0000000000000002 \\ } ; \label{marker:contour} \addplot[color=mycolor5, only marks, draw opacity={1.0}, line width={0}, solid, mark={o}, mark size={3.0 pt}, mark repeat={1}, mark options={color=mycolor5, draw opacity={1.0}, fill=mycolor5, fill opacity={1.0}, line width={0.75}, rotate={0}, solid}] table[row sep={\\}] { \\ -0.025390020083171247 -1.0557789638987876 \\ 0.12159321171870856 -0.7004727888345896 \\ 0.13780702952629775 -0.6127122971407035 \\ 0.14900735241503132 -0.9456044721080824 \\ 0.29016275192720387 -0.4204757540587775 \\ 0.3266276540962509 -0.5658335315478045 \\ 0.3495078982227598 -0.7747735275384487 \\ 0.372345571668286 -0.8314626087689914 \\ 0.3767261321201932 -0.8521424218001943 \\ 0.38675447194554113 -1.300204241694438 \\ 0.39527961288671154 -0.9084271185520928 \\ 0.445980318900451 -0.2900209392544003 \\ 0.5276695090689193 -0.8648198925401038 \\ 0.5320828239405424 -0.39202739346585197 \\ 0.5536352990720979 -0.37490205715369257 \\ 0.5650615026430659 -0.9851846136953439 \\ 0.5818881367808509 -0.6813052916699781 \\ 0.595459953178549 -0.8095806738642263 \\ 0.6224931915188231 -0.11629776384047347 \\ 0.6318465769835493 -0.34062151552919095 \\ 0.6431527592298872 -0.5822987398244864 \\ 0.701668277553053 -1.187221843195899 \\ 0.7127685024219131 -0.4698492589306824 \\ 0.783946082569995 -0.22286367714262326 \\ 0.8354933378038056 -0.2383597752703286 \\ 0.9404378151958483 -1.721740584807884 \\ 0.9704525214483626 -0.23727366879818879 \\ 1.0507049594357394 -1.257586254765891 \\ 1.125814842877215 -0.6161451071961443 \\ 1.1563884007259637 -0.37233292847011457 \\ 1.1964261622789407 -1.050303104443826 \\ 1.3715557190051604 -0.9236628333310262 \\ 1.426941962997982 -1.8688916254123416 \\ 1.4460735917461232 -0.792807636036977 \\ } ; \label{marker:CI} \end{axis} \end{tikzpicture} \begin{tikzpicture}[/tikz/background rectangle/.style={fill={rgb,1:red,1.0;green,1.0;blue,1.0}, draw opacity={1.0}}] \begin{axis}[point meta max={nan}, point meta min={nan}, legend cell align={left}, legend columns={1}, title={}, title style={at={{(0.5,1)}}, anchor={south}, font={{\fontsize{14 pt}{18.2 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, legend style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid, fill={rgb,1:red,1.0;green,1.0;blue,1.0}, fill opacity={1.0}, text opacity={1.0}, font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, text={rgb,1:red,0.0;green,0.0;blue,0.0}, cells={anchor={center}}, at={(1.02, 1)}, anchor={north west}}, axis background/.style={fill={rgb,1:red,1.0;green,1.0;blue,1.0}, opacity={1.0}}, anchor={north west}, xshift={1.0mm}, yshift={-1.0mm}, width={100mm}, height={70mm}, scaled x ticks={false}, xlabel={Discretization points}, x tick style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}}, x tick label style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}, rotate={0}}, xlabel style={at={(ticklabel cs:0.5)}, anchor=near ticklabel, at={{(ticklabel cs:0.5)}}, anchor={near ticklabel}, font={{\fontsize{11 pt}{14.3 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, xmin={44.0}, xmax={256.0}, xtick={{50.0,100.0,150.0,200.0,250.0}}, xticklabels={{$50$,$100$,$150$,$200$,$250$}}, xtick align={inside}, xticklabel style={font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, x grid style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={0.1}, line width={0.5}, solid}, axis x line*={left}, x axis line style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid}, scaled y ticks={false}, ylabel={Residual}, y tick style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}}, y tick label style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}, rotate={0}}, ylabel style={at={(ticklabel cs:0.5)}, anchor=near ticklabel, at={{(ticklabel cs:0.5)}}, anchor={near ticklabel}, font={{\fontsize{11 pt}{14.3 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, ymode={log}, log basis y={10}, ymin={4.3581874493397274e-14}, ymax={0.19669137868571174}, ytick={{1.0e-5,1.0e-10,1.0e-11,1.0e-12,1.0e-13}}, yticklabels={{$10^{-5}$,$10^{-10}$,$10^{-11}$,$10^{-12}$,$10^{-13}$}}, ytick align={inside}, yticklabel style={font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, y grid style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={0.1}, line width={0.5}, solid}, axis y line*={left}, y axis line style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid}, colorbar={false}] \addplot[color=mycolor1, draw opacity={1.0}, line width={1}, solid] table[row sep={\\}] { \\ 50.0 0.00032297556867893134 \\ 60.0 3.165767621366915e-5 \\ 70.0 3.5799428139726276e-6 \\ 80.0 3.6561079633110145e-7 \\ 90.0 2.8724664485481023e-8 \\ 100.0 1.1627380884341356e-8 \\ 110.0 1.1424158897092236e-9 \\ 120.0 1.4253430229282738e-10 \\ 130.0 5.90786517022531e-11 \\ 140.0 1.0174503599032456e-11 \\ 150.0 1.184195974893719e-12 \\ 160.0 4.74784027858908e-13 \\ 170.0 3.1907982724276905e-13 \\ 180.0 2.77159094300411e-13 \\ 190.0 5.285471971650926e-13 \\ 200.0 1.1144981208101681e-13 \\ 210.0 2.6527106649798715e-13 \\ 220.0 1.6207025163484862e-13 \\ 230.0 2.170571980076027e-13 \\ 240.0 9.941491593564362e-14 \\ 250.0 4.666300013500311e-13 \\ } ; \addlegendentry {Minimum} \addplot[color=mycolor2,draw opacity={1.0}, line width={1}, solid] table[row sep={\\}] { \\ 50.0 0.010356128768253068 \\ 60.0 0.000938409050477471 \\ 70.0 0.00014512139700793267 \\ 80.0 1.455329848457706e-5 \\ 90.0 1.385793605553641e-6 \\ 100.0 1.3094984125726255e-6 \\ 110.0 1.0675802955264307e-7 \\ 120.0 1.3145436385831498e-8 \\ 130.0 5.7141204654576766e-9 \\ 140.0 1.0384263609228616e-9 \\ 150.0 1.304610385622612e-10 \\ 160.0 1.380591839215439e-11 \\ 170.0 5.4320789078795914e-12 \\ 180.0 4.5724007301143065e-12 \\ 190.0 3.490898810968369e-12 \\ 200.0 2.585484738003879e-12 \\ 210.0 4.756769373057153e-12 \\ 220.0 4.331855437300488e-12 \\ 230.0 4.4294070049551e-12 \\ 240.0 3.2872505921898884e-12 \\ 250.0 3.3715501519623125e-12 \\ } ; \addlegendentry {Median} \addplot[color=mycolor3, draw opacity={1.0}, line width={1}, solid] table[row sep={\\}] { \\ 50.0 0.08622628605714612 \\ 60.0 0.021917403372299683 \\ 70.0 0.0041724977108685835 \\ 80.0 0.00032352652909829115 \\ 90.0 1.9612073043114425e-5 \\ 100.0 5.069184925829584e-5 \\ 110.0 3.586445287826807e-6 \\ 120.0 3.8820371462841895e-7 \\ 130.0 1.655688903194703e-7 \\ 140.0 2.9440433679053535e-8 \\ 150.0 3.935847861486385e-9 \\ 160.0 5.982899533133101e-10 \\ 170.0 1.5058813163731429e-10 \\ 180.0 1.0166747056029275e-10 \\ 190.0 8.526763291507809e-11 \\ 200.0 1.0293699726750942e-10 \\ 210.0 9.288689513868011e-11 \\ 220.0 9.1510279521014e-11 \\ 230.0 2.373574299500802e-10 \\ 240.0 1.63135765754764e-10 \\ 250.0 1.0082795853451223e-10 \\ } ; \addlegendentry {Maximum} \end{axis} \end{tikzpicture} \begin{tikzpicture}[/tikz/background rectangle/.style={fill={rgb,1:red,1.0;green,1.0;blue,1.0}, draw opacity={1.0}}] \begin{axis}[xlabel={$\Re(z)$}, ylabel={$\Im(z)$},point meta max={nan}, point meta min={nan}, legend cell align={left}, legend columns={1}, title={}, title style={at={{(0.5,1)}}, anchor={south}, font={{\fontsize{14 pt}{18.2 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, legend style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid, fill={rgb,1:red,1.0;green,1.0;blue,1.0}, fill opacity={1.0}, text opacity={1.0}, font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, text={rgb,1:red,0.0;green,0.0;blue,0.0}, cells={anchor={center}}, at={(1.02, 1)}, anchor={north west}}, axis background/.style={fill={rgb,1:red,1.0;green,1.0;blue,1.0}, opacity={1.0}}, anchor={north west}, xshift={1.0mm}, yshift={-1.0mm}, width={130mm}, height={70mm}, scaled x ticks={false}, x tick style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}}, x tick label style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}, rotate={0}}, xlabel style={at={(ticklabel cs:0.5)}, anchor=near ticklabel, at={{(ticklabel cs:0.5)}}, anchor={near ticklabel}, font={{\fontsize{11 pt}{14.3 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}},xmin={-3.6712903920343956}, xmax={3.673034961636064}, xtick={{-3.14159,0,3.14159}}, xticklabels={{$-\pi$,$0$, $\pi$}}, xtick align={inside}, xticklabel style={font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, x grid style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={0.1}, line width={0.5}, solid}, axis x line*={left}, x axis line style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid}, scaled y ticks={false}, y tick style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}}, y tick label style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, opacity={1.0}, rotate={0}}, ylabel style={at={(ticklabel cs:0.5)}, anchor=near ticklabel, at={{(ticklabel cs:0.5)}}, anchor={near ticklabel}, font={{\fontsize{11 pt}{14.3 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, ymin={-1.0598665753343075}, ymax={1.0598665753343075}, ytick={{-1.0,0.0,1.0}}, yticklabels={{$-1.0$,$0.0$,$1.0$}}, ytick align={inside}, yticklabel style={font={{\fontsize{8 pt}{10.4 pt}\selectfont}}, color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, rotate={0.0}}, y grid style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={0.1}, line width={0.5}, solid}, axis y line*={left}, y axis line style={color={rgb,1:red,0.0;green,0.0;blue,0.0}, draw opacity={1.0}, line width={1}, solid}, colorbar={false}] \addplot[color=mycolor2, draw opacity={1.0}, line width={1}, solid] table[row sep={\\}] { \\ 3.4651766969095417 0.0 \\ 3.458200175136576 0.0634239196565645 \\ 3.437298701808538 0.12659245357374926 \\ 3.4025564397814754 0.1892512443604102 \\ 3.354113283882506 0.2511479871810792 \\ 3.2921642976027257 0.31203344569848707 \\ 3.216958927645373 0.3716624556603275 \\ 3.128799999492001 0.4297949120891716 \\ 3.028042498031138 0.4861967361004687 \\ 2.915092138159435 0.5406408174555976 \\ 2.7904037311109695 0.5929079290546404 \\ 2.6544793530929294 0.6427876096865393 \\ 2.5078663236019314 0.6900790114821119 \\ 2.35115500156158 0.7345917086575333 \\ 2.1849764081554475 0.7761464642917568 \\ 2.009999685927481 0.8145759520503357 \\ 1.8269294043811493 0.8497254299495144 \\ 1.636502722926724 0.8814533634475821 \\ 1.439486422600511 0.9096319953545183 \\ 1.2366738185082493 0.9341478602651067 \\ 1.028881565425173 0.9549022414440739 \\ 0.8169463694154668 0.9718115683235417 \\ 0.6017216187122552 0.984807753012208 \\ 0.3840739474243851 0.9938384644612541 \\ 0.16487974590674356 0.998867339183008 \\ -0.05497836815438633 0.9998741276738751 \\ -0.2746151037312302 0.9968547759519424 \\ -0.49314606120903737 0.9898214418809328 \\ -0.7096912935511858 0.9788024462147787 \\ -0.9233788495353235 0.9638421585599422 \\ -1.1333482847931944 0.9450008187146685 \\ -1.3387541265164173 0.9223542941045814 \\ -1.5387692778770854 0.8959937742913359 \\ -1.7325883484547702 0.8660254037844387 \\ -1.9194308972594902 0.8325698546347714 \\ -2.09854457529212 0.7957618405308323 \\ -2.2692081549882808 0.7557495743542583 \\ -2.4307344343471824 0.7126941713788627 \\ -2.582473004051539 0.6667690005162916 \\ -2.723812866436335 0.6181589862206055 \\ -2.854184895760771 0.5670598638627709 \\ -2.973064129876752 0.5136773915734063 \\ -3.079971884066178 0.4582265217274105 \\ -3.1744776785353546 0.4009305354066136 \\ -3.256200971805184 0.3420201433256689 \\ -3.324812693017398 0.28173255684143006 \\ -3.380036566986753 0.2203105327865408 \\ -3.421650226663697 0.1580013959733499 \\ -3.449486108527992 0.09505604330418288 \\ -3.463432127307873 0.031727933498067656 \\ -3.463432127307873 -0.03172793349806786 \\ -3.449486108527992 -0.09505604330418263 \\ -3.421650226663697 -0.15800139597335008 \\ -3.380036566986753 -0.22031053278654056 \\ -3.3248126930173982 -0.2817325568414294 \\ -3.2562009718051845 -0.34202014332566866 \\ -3.1744776785353546 -0.4009305354066134 \\ -3.079971884066178 -0.4582265217274107 \\ -2.9730641298767524 -0.5136773915734061 \\ -2.8541848957607723 -0.5670598638627703 \\ -2.7238128664363352 -0.6181589862206053 \\ -2.5824730040515393 -0.6667690005162915 \\ -2.430734434347182 -0.7126941713788629 \\ -2.2692081549882817 -0.7557495743542582 \\ -2.0985445752921206 -0.7957618405308321 \\ -1.9194308972594911 -0.8325698546347713 \\ -1.7325883484547724 -0.8660254037844385 \\ -1.538769277877085 -0.895993774291336 \\ -1.3387541265164173 -0.9223542941045814 \\ -1.133348284793195 -0.9450008187146683 \\ -0.9233788495353258 -0.9638421585599419 \\ -0.7096912935511889 -0.9788024462147785 \\ -0.49314606120903814 -0.9898214418809327 \\ -0.27461510373123105 -0.9968547759519423 \\ -0.05497836815438487 -0.9998741276738751 \\ 0.16487974590674348 -0.998867339183008 \\ 0.38407394742438433 -0.9938384644612541 \\ 0.6017216187122536 -0.9848077530122081 \\ 0.8169463694154646 -0.9718115683235419 \\ 1.028881565425173 -0.9549022414440739 \\ 1.2366738185082486 -0.9341478602651068 \\ 1.4394864226005124 -0.9096319953545182 \\ 1.6365027229267244 -0.881453363447582 \\ 1.8269294043811493 -0.8497254299495144 \\ 2.0099996859274802 -0.8145759520503358 \\ 2.184976408155446 -0.7761464642917572 \\ 2.3511550015615805 -0.7345917086575331 \\ 2.507866323601931 -0.690079011482112 \\ 2.654479353092929 -0.6427876096865396 \\ 2.7904037311109686 -0.5929079290546408 \\ 2.9150921381594337 -0.5406408174555982 \\ 3.028042498031138 -0.4861967361004688 \\ 3.128799999492 -0.4297949120891719 \\ 3.2169589276453734 -0.37166245566032724 \\ 3.2921642976027257 -0.31203344569848707 \\ 3.354113283882506 -0.2511479871810794 \\ 3.402556439781475 -0.18925124436041063 \\ 3.4372987018085377 -0.12659245357374993 \\ 3.458200175136576 -0.06342391965656452 \\ 3.4651766969095417 -2.4492935982947064e-16 \\ } ; \addlegendentry {Contour} \addplot[color=mycolor5, only marks, draw opacity={1.0}, line width={0}, solid, mark={o}, mark size={3.5 pt}, mark repeat={1}, mark options={color=mycolor5, draw opacity={1.0}, fill=mycolor5, fill opacity={1.0}, line width={0.75}, rotate={0}, solid}] table[row sep={\\}] { \\ -3.267154029739428 -0.08720697284126788 \\ -2.887265546795732 -0.18892822968386488 \\ -2.1073284515769433 0.03161897954261098 \\ -2.0318009439265485 0.5962614007898032 \\ -1.060622260612746 -0.046388775515406114 \\ -1.0728999698353504e-5 0.2123634140001349 \\ 1.060650276766277 -0.04636228896148163 \\ 2.031801272985895 0.5962617303592488 \\ 2.107329407077874 0.03161887310667814 \\ 2.8872654982336448 -0.1889281536633988 \\ 3.267154031036407 -0.08720698338417839 \\ } ; \addlegendentry {CI values} \end{axis} \end{tikzpicture}
2205.12106v4
http://arxiv.org/abs/2205.12106v4
Loop group methods for the non-abelian Hodge correspondence on a 4-punctured sphere
\documentclass[10pt]{amsart} \setlength{\textheight}{22cm} \setlength{\textwidth}{15.5cm} \setlength{\topmargin}{-0.5cm} \setlength{\parskip}{0.3\baselineskip} \hoffset=-1.5cm \setlength\parindent{0pt} \usepackage[latin1]{inputenc} \usepackage[T1]{fontenc} \usepackage{amsmath} \usepackage{hyperref} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{amsthm} \usepackage[arrow, matrix, curve]{xy} \usepackage{hyperref} \usepackage{comment} \usepackage{amssymb,amscd,url,tikz,stmaryrd,mathtools,fixltx2e} \usepackage{amssymb,amscd,url} \usepackage{tikz, pgfplots} \usepackage{tikz-cd} \usepackage{color} \newcommand{\A}{{\mathbb A}} \newcommand{\Z}{{\mathbb Z}} \renewcommand{\S}{{\mathbb S}} \newcommand{\R}{{\mathbb R}} \newcommand{\C}{{\mathbb C}} \newcommand{\D}{{\mathbb D}} \newcommand{\N}{{\mathbb N}} \renewcommand{\H}{{\mathbb H}} \newcommand{\ArcTanh}{\rm ArcTanh} \newcommand{\ArcTan}{\rm ArcTan} \newcommand{\uc}{\overline{u}} \newcommand{\vc}{\overline{v}} \def\cqfd{\hfill$\Box$} \def\Res{{\,\rm Res}} \def\Re{{\rm Re}} \def\Im{{\rm Im}} \def\Id{{\rm Id}} \def\ii{{\rm i}} \def\xx{{\bf x}} \def\sl{\mathfrak{sl}} \def\su{\mathfrak{su}} \def\Uni{{\rm Uni}} \def\Pos{{\rm Pos}} \def\Sym{{\rm Sym}} \def\tr{{\rm trace}} \def\Area{{\rm Area}} \def\SL{{\rm SL}} \def\SU{{\rm SU}} \def\z{\overline{z}} \def\cvx{\cv{x}} \renewcommand{\matrix}[1]{\left(\begin{array}{cc} #1\end{array}\right)} \newcommand{\minimatrix}[1]{\left(\begin{smallmatrix}#1\end{smallmatrix}\right)} \newcommand{\wt}[1]{\widetilde{#1}} \newcommand{\wh}[1]{\widehat{#1}} \newcommand{\cal}[1]{{\mathcal #1}} \newcommand{\low}[1]{{#1}_{\mbox{\scriptsize lower}}} \newcommand{\lowind}[2]{{#1}_{\mbox{\scriptsize lower},#2}} \newcommand{\indlow}[2]{{#1}_{#2,\mbox{\scriptsize lower}}} \theoremstyle{plain} \newtheorem{theorem}{Theorem} \newtheorem*{theorem*}{Theorem} \newtheorem{lemma}{Lemma} \newtheorem{proposition}[lemma]{Proposition} \newtheorem{remark}[lemma]{Remark} \newtheorem{corollary}[lemma]{Corollary} \newtheorem{claim}[lemma]{Claim} \newtheorem{assertion}[lemma]{Assertion} \newtheorem{ansatz}[lemma]{Ansatz} \newtheorem{definition}[lemma]{Definition} \newtheorem{hypothesis}[lemma]{Hypothesis} \newtheorem{example}[lemma]{Example} \newtheorem{question}[lemma]{Question} \newtheorem{convention}[lemma]{Convention} \DeclareFontFamily{U}{mathx}{\hyphenchar\font45} \DeclareFontShape{U}{mathx}{m}{n}{ <5> <6> <7> <8> <9> <10> <10.95> <12> <14.4> <17.28> <20.74> <24.88> mathx10 }{} \DeclareSymbolFont{mathx}{U}{mathx}{m}{n} \DeclareFontSubstitution{U}{mathx}{m}{n} \DeclareMathAccent{\widecheck}{0}{mathx}{"71} \DeclareMathAccent{\widetilde}{0}{mathx}{"72} \DeclareMathAccent{\widebar}{0}{mathx}{"73} \DeclareMathAccent{\widevec}{0}{mathx}{"74} \DeclareMathAccent{\widehat}{0}{mathx}{"70} \DeclareMathAccent{\widefrown}{0}{mathx}{"75} \DeclareMathAccent{\chinesehat}{0}{mathx}{"69} \newenvironment{smatrix}{\bigl(\begin{smallmatrix}}{\end{smallmatrix}\bigr)} \def\sign{\varepsilon} \def\cqfd{\hfill$\Box$} \def\Res{{\,\rm Res}} \def\Re{{\rm Re}} \def\Im{{\rm Im}} \def\Id{{\rm Id}} \def\ii{{\rm i}} \def\xx{{\bf x}} \def\sl{\mathfrak{sl}} \def\su{\mathfrak{su}} \def\Uni{{\rm Uni}} \def\Pos{{\rm Pos}} \def\Sym{{\rm Sym}} \def\tr{{\rm tr}} \def\Area{{\rm Area}} \def\End{{\rm End}} \def\SL{{\rm SL}} \def\GL{{\rm GL}} \def\SU{{\rm SU}} \def\z{\overline{z}} \newcommand{\Li}{\operatorname{Li}} \renewcommand{\and}{\quad\text{and}\quad} \newcommand{\with}{\quad\text{with}\quad} \newcommand{\dbar}{{\bar\partial}} \newcommand{\del}{{\partial}} \renewcommand{\matrix}[1]{\left(\begin{array}{cc} #1\end{array}\right)} \newcommand{\red}[1]{{\color{red}#1}} \newcommand{\blue}[1]{{\color{blue}#1}} \newcommand{\note}[1]{\marginpar{\Tiny \color{red}#1}} \setlength{\marginparwidth}{2.5cm} \newcommand{\wc}[1]{\widecheck{#1}} \newcommand{\wf}[1]{\widefrown{#1}} \newcommand{\cv}[1]{\underline{#1}} \newcommand{\wtcv}[1]{\widetilde{\underline{#1}}} \title[Loop group methods for the non-abelian Hodge correspondence]{Loop group methods for the non-abelian Hodge correspondence on a 4-punctured sphere} \author{Lynn Heller} \author{Sebastian Heller} \author{Martin Traizet} \address{Beijing Institute of Mathematical Sciences and Applications\\ Beijing, China} \email{[email protected]} \address{Beijing Institute of Mathematical Sciences and Applications\\ Beijing, China} \email{[email protected]} \address{Institut Denis Poisson, CNRS UMR 7350 \\ Facult\'e des Sciences et Techniques \\ Universit\'e de Tours\\France } \email{[email protected] } \begin{document} \begin{abstract} The non-abelian Hodge correspondence is a real analytic map between the moduli space of stable Higgs bundles and the deRham moduli space of irreducible flat connections mediated by solutions to the self-duality equations. In this paper we construct self-duality solutions for strongly parabolic $\mathfrak{sl}(2,\C)$ Higgs fields on a $4$-punctured sphere with parabolic weights $t \sim 0$ using complex analytic methods. We identify the rescaled limit hyper-K\"ahler moduli space $\mathcal M_t$ at $t=0$ to be the completion of the nilpotent orbit in $\mathfrak{sl}(2, \C)$ modulo a $\Z_2\times\Z_2$ action, equipped with the Eguchi-Hanson metric. Our methods and computations are based on the twistor approach to the self-duality equations using Deligne and Simpson's $\lambda$-connections interpretation. By construction we can compute the Taylor expansions of the holomorphic symplectic form $\varpi_t$ on $\mathcal M_t$ at $t=0$ which turn out to have closed form expressions in terms of multiple polylogarithms (MPLs). The geometric properties of $\mathcal M_t$ lead to some identities of certain MPLs which we believe deserve further investigations. \end{abstract} \thanks{\\We thank Philip Boalch, Andy Neitzke and Hartmut Wei{\ss} for helpful comments. Moreover, we would like to thank the anonymous referee for a very thorough report which greatly helped to improve the presentation.\\ LH and SH have been supported by the {\em Deutsche Forschungsgemeinschaft} within the priority program {\em Geometry at Infinity} and by the Beijing Natural Science Foundation IS23002 (LH) and IS23003 (SH).\\MT is supported by the French ANR project Min-Max (ANR-19-CE40-0014).\\ On behalf of all authors, the corresponding author states that there is no conflict of interest and a supplementary Mathematica file can be found under the web address \href{https://www.idpoisson.fr/traizet/}{https://www.idpoisson.fr/traizet/}} \setcounter{tocdepth}{1} \maketitle \tableofcontents \section{Introduction} Hitchin's self-duality equations \cite{Hi1} on a degree zero and rank two hermitian vector bundle $V \rightarrow \Sigma$ over a compact Riemann surface $\Sigma$ are equations on a pair $(\nabla, \Psi)$ consisting of a special unitary connection $\nabla$ and an (trace free) endomorphism valued $(1,0)$-form $\Psi$ satisfying \begin{equation}\label{SD}\dbar^\nabla \Psi = 0 \quad \text{ and }\quad F^\nabla + [\Psi, \Psi^*] = 0.\end{equation} These equations are equivalent to the flatness of the whole associated family of connections \begin{equation}\label{nabla-lambda}\nabla^\lambda = \nabla + \lambda^{-1} \Psi + \lambda \Psi^*\end{equation} parametrized by $\lambda \in \C^*$ -- the spectral parameter. Solutions to \eqref{SD} give rise to equivariant harmonic maps from the Riemann surface into the hyperbolic 3-space SL$(2, \C)/ \SU(2)$ by considering the Higgs field $\Psi$ as the $(1,0)$-part of the differential of the harmonic map \cite{Do}. An application of a classical result by Eells-Sampson \cite{ES} then shows the existence of a unique solution in each homotopy class of equivariant maps, i.e., when prescribing the (totally reducible) monodromy of the harmonic map \cite{Do}. As first recognized by Hitchin \cite{Hi1}, the moduli space of solutions to the self-duality equations (modulo gauge transformations) $\mathcal M_{SD}$ has two very different incarnations as complex analytic spaces. Firstly as the moduli space of Higgs bundles $\mathcal M_{Higgs},$ i.e., as the moduli space of polystable pairs $(\dbar_V, \Psi)$ satisfying $\dbar_V\Psi=0$, and secondly as the moduli space of totally reducible flat connections $\mathcal M_{dR}$. The non-abelian Hodge correspondence is the map between $\mathcal M_{Higgs}$ and $\mathcal M_{dR}$ obtained through solutions to Hitchin's equations. Both complex analytic spaces $\mathcal M_{Higgs}$ and $\mathcal M_{dR}$ induce anti-commuting complex structures on $\mathcal M_{SD}$ turning it into a hyper-K\"ahler manifold when equipped with its natural $L^2$-metric. Since the mapping is mediated by the harmonic map this correspondence is not explicit and it is not possible to see every facet of the geometry within one framework only. Every hyper-K\"ahler manifold can be described using complex analytic data (subject to additional reality conditions) via twistor theory \cite{HKLR}. For $\mathcal M_{SD}$, the twistor space has been identified with the so-called Deligne-Hitchin moduli space $\mathcal M_{DH}$ introduced by Deligne and Simpson \cite{Si}. The space $\mathcal M_{DH}$ is obtained by gluing the moduli space of $\lambda$-connections on the Riemann surface with the moduli space of $\lambda$-connections on the complex conjugate Riemann surface, and the associated family of flat connections \eqref{nabla-lambda} naturally extends to a special real holomorphic section -- a twistor line. The main subject of the paper is to construct twistor lines and the hyper-K\"ahler structure of $\mathcal M_{SD}$ {\em `entirely complex analytically, so we bypass the nonlinear elliptic theory necessary to define the harmonic metrics' \footnote{C. Simpson in \cite[Section 4]{Si}}.} The self-duality equations \eqref{SD} generalize to punctured Riemann surfaces by imposing first order poles of the Higgs fields and a growth condition of the harmonic metric determined by their parabolic weights, called tameness, see Simpson \cite{Si1}. In the following, we restrict to {\em strongly} parabolic case, where the Higgs fields have nilpotent residues adapted to the parabolic filtration. Then the associated family of flat connections has (up to conjugation) the same local monodromy at each puncture \cite[table on page 720]{Si1} for all spectral parameters $\lambda\in\C^*$. The moduli space of solutions has again a hyper-K\"ahler structure which was first studied by Konno \cite{Konno}. The complex structure $I$ denotes hereby the one of the moduli space of (polystable) strongly parabolic Higgs fields, and the complex structure $J$ denotes that of the moduli space of logarithmic connections with prescribed conjugacy class of their local monodromy. In this paper, we study the simplest non-trivial case, where the underlying Riemann surface is a 4-punctured sphere and restrict to Fuchsian systems, i.e., logarithmic connections on the trivial holomorphic (rank 2) bundle. On the Higgs side, we assume that the parabolic Higgs bundles are strongly parabolic with the same parabolic weights $\pm t$ with $t\in(0,\tfrac{1}{4})$ at each of the four singular points. The moduli space of self-duality equations is then denoted by $\mathcal M_t.$ For $t\rightarrow0$ the space of polystable parabolic Higgs pairs bounded by $tC$ (for some fixed $C>0$) degenerates to the point given by the trivial parabolic Higgs pair. When rescaling by $t$ its blow-up limit $\tfrac{1}{t}\mathcal M_t$ at $t=0$ is given by the moduli space of parabolic Higgs fields $\Phi$ on the trivial holomorphic bundle. Using an implicit theorem argument we construct for $t \sim 0$ twistor lines in the (parabolic) Deligne-Hitchin moduli space near this singular limit using complex analytic methods, i.e., we construct solutions for which the Higgs field $\Psi \sim t\Phi$ is small enough. The length of $\Phi$ is measured using its $L^2$-norm, or equivalently by the energy $\mathcal E(\nabla, \Phi)$ of the equivariant harmonic map corresponding to the Higgs pair. \begin{theorem}\label{construction} For $t\sim 0$ fixed let $C>0$ and consider Higgs fields with $\mathcal E(d, \Phi) <C.$ Then, there exists $\varepsilon >0,$ depending only on the constant $C$, such that twistor lines $s_t$ can be constructed with $s_t(\lambda = 0) = t \Phi$ using complex analytic methods only. In particular, the method allows to explicitly compute the Taylor expansions of the twistor lines in $t$. \end{theorem} In fact, we find two different families of real holomorphic sections $s^\pm_t$ of the Deligne-Hitchin moduli space for a given initial Higgs field $\Phi.$ Only one of them $s^+_t$ turn out to be twistor lines, while the other family $s^-_t$ corresponds to higher solutions as introduced in \cite{HH}. By construction Theorem \ref{construction} also allows us to study the Taylor expansions of the geometric structures on $\mathcal M_t$, such as its hyper-K\"ahler structure, explicitly. In particular, the (rescaled) limit metric for $t\rightarrow 0$ can be identified to be the Eguchi-Hanson metric $g_{EH}$ modulo a $\mathbb Z_2\times\mathbb Z_2$-action. For rational $t = \tfrac{l}{2k}$, we can take a suitable $k$-fold cover $\Sigma_k$ of the four punctured sphere (depending on the value of $t$), such that the (pull-back of) logarithmic connections on the sphere are gauge equivalent to symmetric (smooth) connections on the compact Riemann surface $\Sigma_k$ of genus $k-1 = O(\tfrac{1}{t})$. The moduli space of equivariant solutions to the self-duality equations $\mathcal M^l_{SD}$ on $\Sigma_k$ then converge (on every compact subset) to a corresponding compact subset of the Eguchi-Hanson space modulo the $\mathbb Z_2\times\mathbb Z_2$-action as $k\rightarrow \infty$. More precisely, \begin{theorem}\label{thm:compactcon} Let $C>0$ and $l\in\N^{>0}$ be fixed. Consider the compact subspaces \[\mathcal C^l_k:=\{[\nabla,\Phi]\in \mathcal M^l_{SD}(\Sigma_k)\mid \mathcal E(\nabla,\Phi)\leq C\}\] of $\mathcal M^l_{SD}$ with the induced hyper-K\"ahler metric $g_k$. Then for $k \rightarrow \infty$, we obtain smooth convergence \[(\mathcal C^l_k,g_k)\longrightarrow (\{v\in T^*\C P^1\mid \mathcal E_{EH}(v)\leq C\}, 32\pi\,l\, g_{EH})/(\Z_2\times\Z_2).\] \end{theorem} The main advantage of the approach is that the higher order terms of the metric expansion are also accessible. We have \begin{theorem}\label{mainT} The hyper-K\"ahler metric of the moduli space $\mathcal M_t$ of strongly parabolic Higgs bundles on the 4-punctured sphere is real analytic in its weight $t$ and there exist an explicit algorithm to computing its Taylor expansion at the trivial parabolic Higgs pair in $t=0$. More precisely, when computing the Taylor expansion of the twistor lines in $t$ at the trivial connection, the $n$-th order coefficients are polynomials in $\lambda$ of degree $n+1$ and can be expressed explicitly in terms of multiple polylogarithms of depth and weight at most $n+1$. \end{theorem} Also the second family of solutions yields a hyper-K\"ahler metric, the $n$-th order coefficients of the two metric expansions differ only by $(-1)^n$. It would be interesting to compare our results to those of \cite{FMSW}, where the moduli space is studied for fixed weights and large Higgs fields in the regular locus, as well as in the context of current developments on hyper-K\"ahler 4-manifolds (see e.g. \cite{CC1,CC2,CC3}), and in view of the different ways of obtaining hyper-K\"ahler structures on moduli spaces (see e.g. \cite{Boa} and the references therein).\\ The paper is organized as follows. In Section \ref{pre} we introduce Higgs bundles, the associated moduli spaces as well as its hyper-K\"ahler structure for compact Riemann surfaces. Their complex geometric properties are encoded in the Deligne-Hitchin moduli space, see \cite{Si}, and we discuss twistor lines as special real holomorphic sections induced by self-duality solutions. Furthermore, we identify the twisted holomorphic symplectic structure on the Deligne-Hitchin moduli space with (a version of) Goldman's symplectic structure. We extend this discussion to the case of punctured surfaces and strongly parabolic Higgs pairs in Section \ref{sec:paradh}, and explain the relationship between the compact and punctured case for rational weights. Thereafter, we give the ansatz in terms of Fuchsian systems for constructing real holomorphic sections through an implicit function theorem argument and loop group methods in Section \ref{Ansatz} and prove Theorem \ref{construction} in Section \ref{IFT}. To find appropriate coordinates, we first consider the 4-fold covering of the Higgs bundles moduli space $\mathcal M_{Higgs}$ and we rescale it by by the factor $\frac{1}{t}$. The limit space at $t=0$ is then given by the blow-up of $\C^2/\Z_2$ (with coordinates $(u,v)$) at the origin. To perform the implicit theorem argument, we thus first consider the regular case in Theorem \ref{thm:IFT} and then when $(u, v) \rightarrow (0,0)$ in Theorem \ref{blowuplimit}. It turns out that in the appropriate $(u, v) \rightarrow (0,0)$ limit the Higgs field vanishes, and the corresponding sections $s^+_0$ are flat unitary connections, which are twistor lines. Due to the fact that twistor lines form a connected component of the space of real holomorphic sections, all constructed sections must be twistor lines as well. Interestingly, we find a Lax pair type equation describing the deformation given by the implicit function theorem. In Section \ref{NAHCt=0}, we compute the limit non-abelian Hodge correspondence at $t=0$, and identify the rescaled limit hyper-K\"ahler metric to be Eguchi-Hanson. One advantage of our construction is that we can compute Taylor expansion of all geometric quantities. As an example, we compute first order derivatives of the parameters in Section \ref{firstderivative} and put them together to obtain the first order expansion of the non-abelian Hodge correspondence, the twisted holomorphic symplectic structure and the hyper-K\"ahler metric. In the last Section \ref{hod} we analyse the structure of the higher order derivatives of the twistor sections with respect to $t$. As in \cite{HHT3} for minimal surfaces in the 3-sphere, the $n$-th order derivatives of the parameters are polynomial in $\lambda$ of order $(n+1)$. Since the twisted holomorphic symplectic form is a Laurent polynomial of degree one in $\lambda$, evaluating it on meromorphic 1-forms which represent tangent vectors to $\mathcal M(t)$ yields infinitely many cancelations for higher order terms in $\lambda$ leading to identities for some iterated integrals, called {\em $\Omega$-values}, which can be expressed in terms of multiple polylogarithms. In Section \ref{nOid} we give some identities of depth three $\Omega$-values obtained from this idea which are non-trivial in the sense that they cannot be derived from the shuffle and stuffle relations alone. \section{Preliminaries}\label{pre} In this preliminary section, we recall basic results about the moduli spaces of solutions to Hitchin's self-duality equations with a focus on the twistorial description of its hyper-K\"ahler structure. Unless stated otherwise, the underlying Riemann surface $\Sigma$ is assumed to be compact throughout the section. \subsection{The hyper-K\"ahler structures and their twistor spaces} The moduli space of irreducible solutions of Hitchin's self-duality equations $\mathcal M_{SD}$ has through the non-abelian Hodge correspondence three complex structures $I,J,K= I J$ which are K\"ahler with respect to the same Riemannian metric $g$. In fact there exist a whole $\C P^1$ worth of complex structures defined by $$I_\lambda = \frac{1-|\lambda|^2}{1+|\lambda|^2}I+\frac{\lambda + \bar \lambda}{1+|\lambda|^2}J + \frac{i(\lambda - \bar \lambda)}{1+|\lambda|^2} K$$ for $\lambda \in \C \subset \C P^1.$ Within this family we find the complex structure $I$ at $\lambda = 0$, and the complex structure $J$ at $\lambda = 1$. Let $$\omega_I = - g(., I .) \quad \omega_J = - g(., J.) \and \omega_K = - g(., K .)$$ be the associated K\"ahler forms so that $h= g+ i\omega_L$ is the corresponding hermitian metric for $L\in\{I,J,K\}$. The twistor space of a hyper-K\"ahler manifold $\mathcal M$, introduced in \cite{HKLR}, is the smooth manifold$$\mathcal P := \mathcal M \times \C P^1$$ equipped with the (integrable) complex structure $$\mathbb I = \left (I_\lambda, \ii \right),$$ at the point $x \in \mathcal M $ and $\lambda \in \C \subset \C P^1$, where $\ii$ is the standard complex structure of $\C P^1.$ Furthermore, the twistor space has a natural anti-holomorphic involution $\mathcal T$ given by $(x,\lambda)\mapsto(x,-\bar\lambda^{-1})$. By construction the twistor space has a holomorphic projection $\pi$ to $\C P^1$ and a twisted relative holomorphic symplectic form given by \[\wh\varpi\in H^0(P,\Lambda^2V^*\otimes\mathcal O(2))\] where $V=\ker d\pi$ (is the complex tangent bundle to the fibers) and $\mathcal O(2)$ denotes the dual of the pull-back of the canonical bundle over $\C P^1.$ In terms of the K\"ahler forms the twisted relative holomorphic symplectic form has the following explicit expression \begin{equation}\label{deftwistedOmega}\wh\varpi=\varpi\otimes\lambda\frac{\partial}{\partial\lambda},\end{equation} where \begin{equation}\label{deftwistedOmega2}\varpi=\lambda^{-1}(\omega_J+i\omega_K)-2\omega_I-\lambda(\omega_J-i\omega_K),\end{equation} see \cite[Equation (3.87)]{HKLR} or \eqref{eq:omlam} below. Sections of $\mathcal P$ are holomorphic maps $s \colon \C P^1 \rightarrow \mathcal P$ with $\pi \circ s = \Id.$ When restricting to $\mathcal M = \mathcal M_{SD},$ the simplest sections are {\em constant} sections \begin{equation}\label{twistorPline}\lambda \mapsto ([\nabla, \Psi], \lambda)\end{equation} for a solution of the self-duality equations $(\nabla, \Psi)$. By dimension count, these twistor lines give rise to an open subspace of the space of real holomorphic sections \cite{HKLR}. The following well known theorem then follows from the completeness of the moduli space of self-duality solutions \cite{Hi1}. \begin{theorem} \label{connectedcomponent} Twistor lines form an open and closed subset of the space of real holomorphic sections. \end{theorem} A convenient set-up for studying associated families of flat connections obtained from solutions to self-duality equations is to consider them as real sections of the Deligne-Hitchin moduli space $\mathcal M_{DH}$. \subsection{The Deligne-Hitchin moduli space $\mathcal M_{DH}$}\label{DH} The Deligne-Hitchin moduli space was first introduced by Deligne (see \cite{Si, Si2}) as a complex analytic way of viewing the associated twistor space of the moduli space of solutions to the self-duality equations. As such it interpolates between the moduli space of Higgs bundles $\mathcal M_{Higgs}$ and the moduli space of flat connections $\mathcal M_{dR}$. \begin{definition} Let $\Sigma$ be a compact Riemann surface and $\lambda\in\C$ fixed. A (integrable) $\lambda$-connection on a $\mathcal C^\infty$-complex vector bundle $V\to \Sigma$ is a pair $(\dbar, D)$ consisting of a holomorphic structure $\dbar$ on $V$ and a linear first order differential operator \[D\colon\Gamma(\Sigma,V)\to\Omega^{(1,0)}(\Sigma,V )\] satisfying the $\lambda$-Leibniz rule \[D(fs)=\lambda\partial f\otimes s+f Ds,\] where $\partial$ is the trivial $\partial$-operator for functions $f$ and $s$ is a section of $V$, and the integrability condition \begin{equation}\label{intcond} D\dbar+\dbar D=0.\end{equation} \end{definition} \begin{remark} The operators $D$ and $\dbar$ also act on $(0,1)$-forms and $(1,0)$-forms respectively. For $\lambda = 0$ the integrability condition \eqref{intcond} is equivalent to \[D=\Psi\in H^0(M, K_\Sigma \End(V))\] being a holomorphic endomorphism-valued 1-form, and for $\lambda \neq 0$ we have that \[\nabla=\tfrac{1}{\lambda}D+\dbar\] is a flat connection. \end{remark} \begin{example} Consider on the hermitian bundle $V \to\Sigma $ a solution $(\nabla=\del^\nabla+\dbar^\nabla, \Psi)$ of the self-duality equations. Then, the pair \[(\dbar^\nabla + \lambda \Psi^*, \lambda \del^\nabla + \Psi) \] defines a $\lambda$-connection on $V$ for every $\lambda \in \C$ which coincides with the Higgs pair $(\dbar^\nabla, \Psi)$ at $\lambda = 0$ and with the flat connection $ \nabla^1 = \nabla + \Psi + \Psi^*$ at $\lambda = 1.$ \end{example} \begin{definition}\label{def:sllambda} A $\SL (2, \C)$ $\lambda$-connection is a $\lambda$-connection on a rank 2 vector bundle $V\rightarrow \Sigma$, such that the induced $\lambda$-connection on the determinant bundle $\Lambda^2 V$ is trivial.\end{definition} \begin{definition} A $\SL(2, \C)$ $\lambda$-connection $(\dbar, D)$ is called stable, if every $\dbar$-holomorphic subbundle $L\subset V$ with \[D(\Gamma(\Sigma,L))\subset\Omega^{(1,0)}(\Sigma,L)\] is of negative degree and semi-stable if its degree is non-positive. All other $\lambda$-connections are called unstable. A $\SL(2,\C)$ $\lambda$-connection is called polystable if it is either stable or the direct sum of dual $\lambda$-connections on degree zero line bundles. \end{definition} For $\lambda\neq0$, every $\dbar$-holomorphic and $D$-invariant line subbundle $L\subset V$ must be parallel with respect to the flat connection $\nabla=\tfrac{1}{\lambda}D+\dbar$. Therefore, the degree of $L$ is $0$ and the $\lambda$-connection $(\dbar, D)$ is semi-stable. Moreover, $(\dbar, D)$ is stable if and only if the flat connection $\nabla=\tfrac{1}{\lambda}D+\dbar$ is irreducible. The situation is different at $\lambda = 0$ and we need to restrict to polystable $\lambda$-connections to obtain a well-behaved moduli space. \begin{definition} The Hodge moduli space $\mathcal M_{Hod}=\mathcal M_{Hod}(\Sigma)$ is the space of all polystable $\SL(2,\C)$ $\lambda$-connections on $V=\Sigma\times\C^2\to \Sigma$ modulo gauge transformations, i.e., $\mathcal M_{Hod}$ consists of gauge classes of triples $(\lambda,\dbar,D)$ for $\lambda \in \C$ and $(\dbar,D)$ a polystable $\lambda$-connection. The gauge-equivalence class of $(\lambda,\dbar,D)$ is denoted by $$[\lambda,\dbar,D]\in\mathcal M_{Hod}$$ or by \[[\lambda,\dbar,D]_\Sigma \in\mathcal M_{Hod}( \Sigma)\] to emphasis its dependence on the Riemann surface. \end{definition} The Hodge moduli space admits a natural holomorphic map \[\pi_\Sigma\colon\mathcal M_{Hod}\longrightarrow \C; \quad [\lambda,\dbar,D] \longmapsto\lambda\] whose fiber at $\lambda=0$ is the (polystable) Higgs moduli space $\mathcal M_{Higgs}$, and at $\lambda=1$ it is the deRham moduli space of flat (and totally reducible) $\SL(2,\C)$-connections $\mathcal M_{dR}$. We consider both spaces as complex analytic spaces endowed with their natural complex structures $I$ and $J,$ respectively. The next step is then to compactify the $\lambda$-plane $\C$ to $\C P^1.$ For a given Riemann surface $\Sigma$ let $\overline \Sigma$ be the Riemann surface with conjugate complex structure. As differentiable manifolds we have $\Sigma\cong\overline \Sigma$ and thus their deRham moduli spaces of flat $\SL(2,\C)$-connections are naturally isomorphic. Then the two Hodge moduli spaces $\mathcal M_{Hod}(\Sigma)$ and $\mathcal M_{Hod}(\overline \Sigma)$ can be the glued together via Deligne gluing \cite{Si2} \[\mathcal G \colon\mathcal M_{Hod}(\Sigma)\setminus \pi_\Sigma^{-1}(0)\longrightarrow \mathcal M_{Hod}(\overline \Sigma)\setminus \pi_{\overline \Sigma}^{-1}(0);\quad [\lambda,\dbar,D]_\Sigma\longmapsto[\tfrac{1}{\lambda},\tfrac{1}{\lambda}D,\tfrac{1}{\lambda}\dbar]_{\overline \Sigma}\] along $\lambda \in \C^*$ to give Deligne-Hitchin moduli space \[\mathcal M_{DH}=\mathcal M_{Hod}(\Sigma)\cup_{\mathcal G}\mathcal M_{Hod}(\overline \Sigma).\] The natural fibration $\pi_\Sigma$ on $\mathcal M_{Hod} $ extends holomorphically to the whole Deligne-Hitchin moduli space to give $\pi\colon\mathcal M_{DH}\to\C P^1$ whose restriction to $\mathcal M_{Hod}(\overline \Sigma)$ is $1/\pi_{\overline \Sigma}.$ \begin{remark}\label{smoothMdh} Note that the Deligne gluing map $\mathcal G$ maps stable $\lambda$-connections over $\Sigma$ to stable $\tfrac{1}{\lambda}$-connections on $\overline \Sigma.$ Hence, it maps the smooth locus of $\mathcal M_{Hod}(M)$ (consisting of stable $\lambda$-connections) to the smooth locus of $\mathcal M_{Hod}(\overline M)$, and thus $\mathcal M_{DH}$ is equipped with a complex manifold structure at all stable points. Moreover, since $\lambda$-connections for $\lambda \in \C^*$ can be reinterpreted as flat connections, we can identify $$\C^*\times \mathcal M_{dR}(\Sigma) = \mathcal M_{Hod}(\Sigma)\setminus \pi_\Sigma^{-1}(0).$$ \end{remark} \begin{definition} A section of $\mathcal M_{DH}$ is a holomorphic map $$s: \C P^1 \longrightarrow \mathcal M_{DH}$$ such that $\pi \circ s = $Id. \end{definition} \begin{example} The associated family of flat connections $\nabla^\lambda$ \eqref{nabla-lambda} to a solution of the self-duality equations \eqref{SD} gives rise to a section of $\mathcal M_{DH} \rightarrow \C P^1$ via \begin{equation}\label{twistorsection} s(\lambda) = [\lambda,\dbar^\nabla+\lambda\Psi^*,\lambda \partial^\nabla+\Psi]_\Sigma\in\mathcal M_{Hod}(\Sigma)\subset\mathcal M_{DH}.\end{equation} When identifying the Deligne-Hitchin moduli space with the twistor space $\mathcal P\to\C P^1$ of the hyper-K\"ahler space $\mathcal M_{SD}$ (at the smooth points), the section given by \eqref{twistorsection} is identified with the `constant' twistor line \eqref{twistorPline}, see \cite{Si}. \end{example} \begin{definition} A section $s$ of $\mathcal M_{DH}$ is called stable, if the $\lambda$-connection $s(\lambda)$ is stable for all $\lambda\in \C^*$ and if the Higgs pairs $s(0)$ on $\Sigma$ and $s(\infty)$ on $\overline{\Sigma}$ are stable. \end{definition} It follows from Hitchin \cite{Hi1} and Donaldson \cite{Do} that every stable point in $\mathcal M_{DH}$ uniquely determines a twistor line. Therefore, a twistor line $s$ is already stable if $s(\lambda_0)$ is stable for some $\lambda_0\in\C$. Moreover, twistor lines are in one-to-one correspondence with self-duality solutions \eqref{SD}. The following characterization of twistor lines as particular {\em negative real holomorphic sections} of $\mathcal M_{DH}$ is useful to decide when certain real sections give rise to global solutions to the self-duality equations. \subsection{Automorphisms of the Deligne-Hitchin moduli space} To define a real structure on $\mathcal M_{DH}$ we need to look at some natural automorphisms of Deligne-Hitchin moduli space first. For every $\mu\in\mathbb C^*$ the (multiplicative) action of $\mu$ on $\C P^1$ has a natural lift to $\mathcal M_{DH}$ by \[\mu([\lambda,\dbar,D])=[\mu\lambda,\dbar,\mu D].\] \begin{definition} We denote by $N : \mathcal M_{DH} \rightarrow \mathcal M_{DH}$ the map given by multiplication with $\mu=-1$, namely \[[\lambda,\dbar,D] \longmapsto [-\lambda,\dbar, -D].\] \end{definition} Furthermore, we have a natural anti-holomorphic automorphism denoted by $C.$ \begin{definition}\label{defC2} Let $C\colon \mathcal M_{DH}\longrightarrow \mathcal M_{DH}$ be (the continuation of) the map \[\widetilde C: \mathcal M_{Hod}(\Sigma) \longrightarrow \mathcal M_{Hod}(\overline \Sigma)\] given by \begin{equation}\label{defcomcon}[\lambda,\dbar,D] \longmapsto [\bar\lambda,\bar\dbar,\bar D]_{\overline \Sigma}.\end{equation} To be more concrete, for \[\dbar = \dbar_0+\eta \quad \text{and} \quad D = \lambda(\partial_0)+\omega\] where $d=\partial_0+\dbar_0$ is the trivial connection, $\eta\in\Omega^{0,1}(\Sigma,\mathfrak{sl}(2,\C)),$ and $\omega\in\Omega^{1,0}(\Sigma,\mathfrak{sl}(2,\C)),$ we define the complex conjugate on the trivial $\C^2$-bundle over $\overline \Sigma$ to be \[\bar \dbar = \partial_0+\bar\eta \quad \text{and} \quad \bar D = \bar\lambda(\dbar_0)+\bar\omega.\] \end{definition} The map $C$ covers the map \[\lambda\in\C P^1\longmapsto \bar\lambda^{-1}\in\C P^1.\] Since $C$ and $N$ commute and both maps are involutive, their composition \[\mathcal T=CN\] is an involution as well, which covers the fixed-point free involution $\lambda\mapsto - \bar\lambda^{-1}$ on $\C P^1.$ \subsection{Real sections}\label{subSrealS} Consider the anti-holomorphic involution of the associated Deligne-Hitchin moduli space \[\mathcal T=CN\colon\mathcal M_{DH}\longrightarrow\mathcal M_{DH} \] covering the antipodal involution \[\lambda\longmapsto-\bar\lambda^{-1}\] of $\mathbb CP^1.$ We call a holomorphic section $s$ of $\mathcal M_{DH}$ real (with respect to $\mathcal T$) if \begin{equation}\label{taurealdefin}\mathcal T(s(\lambda))=s(-\bar\lambda^{-1})\end{equation} holds for all $\lambda\in\mathbb CP^1$. \begin{example} Twistor lines \eqref{twistorsection} are real holomorphic sections with respect to $\mathcal T$. Let $(\nabla,\Psi)$ be a solution of the self-duality equations on $\Sigma$ with respect to the standard hermitian metric on $\underline \C^2\to \Sigma.$ Because we are dealing with $\mathfrak{sl}(2,\C)$-matrices, the unitary connection $\nabla$ satisfies \[\nabla=\nabla^*=\bar\nabla.\begin{pmatrix}0&1\\-1&0\end{pmatrix}\] which is equivalent to \[\bar\partial^\nabla=\overline{\partial^\nabla}.\begin{pmatrix}0&1\\-1&0\end{pmatrix}\quad\text{and}\quad \partial^\nabla =\overline{\bar\partial^\nabla}.\begin{pmatrix}0&1\\-1&0\end{pmatrix},\] and analogously we have \[\Psi=-\begin{pmatrix}0&-1\\1&0\end{pmatrix}\overline{\Psi^*} \begin{pmatrix}0&1\\-1&0\end{pmatrix}\quad\text{and}\quad \Psi^*=-\begin{pmatrix}0&-1\\1&0\end{pmatrix}\bar\Psi \begin{pmatrix}0&1\\-1&0\end{pmatrix}.\] Therefore, the twistor line \eqref{twistorsection} satisfies \begin{equation} \begin{split} \mathcal T(s(\lambda))&=\mathcal T([\lambda, \bar\partial^\nabla+\lambda\Psi^*,\Psi+\lambda\partial^\nabla]_\Sigma)\\ &=[-\bar\lambda^{-1}, \bar\lambda^{-1}(\overline{\Psi +\lambda\partial^\nabla}),-\bar\lambda^{-1}(\overline{\bar\partial^\nabla+\lambda\Psi^*})]_\Sigma\\ &=[-\bar\lambda^{-1}, \overline{\partial^\nabla}+\bar\lambda^{-1}\overline{\Psi},-\overline{\Psi^*} -\bar\lambda^{-1}\overline{\bar\partial^\nabla}]_\Sigma\\ &=[(-\bar\lambda^{-1}, \overline{\partial^\nabla}+\bar\lambda^{-1}\overline{\Psi},-\overline{\Psi^*} -\bar\lambda^{-1}\overline{\bar\partial^\nabla} ).\begin{pmatrix}0&1\\-1&0\end{pmatrix} ]_\Sigma\\ &=[(-\bar\lambda^{-1}, \bar\partial^\nabla-\bar\lambda^{-1}\Psi^*,\Psi -\bar\lambda^{-1}\partial^\nabla )]_\Sigma= s(-\bar\lambda^{-1}). \end{split} \end{equation} \end{example} Let $s(\lambda)$ be a real holomorphic section with $\nabla^\lambda\sim \lambda^{-1}\Psi+\nabla+\dots$ being a lift to the space of flat connections such that $(\bar\partial^\nabla,\Psi)$ is stable. For the existence of such lifts see \cite[Lemma 2.2 ] {BHR}. Then the reality condition \eqref{taurealdefin} gives rise to a holomorphic family of gauge transformations $g(\lambda)$ satisfying $$\overline{\nabla^{-\bar\lambda^{-1}}}= \nabla^\lambda .g(\lambda).$$ Applying this equation twice we obtain $$\nabla^\lambda.g(\lambda) \overline{g(-\bar\lambda^{-1})} = \nabla^\lambda.$$ Because the section $s$ is stable, the connections $\nabla^\lambda$ are irreducible for all $\lambda \in \mathbb C^*$. Therefore $g(\lambda) \overline{g(-\bar\lambda^{-1})}$ is a constant multiple of the identity for every $\lambda \in \C^*$. By \cite[Lemma 1.18]{HH} we can choose $g$ to be SL$(2, \C)$-valued and thus \begin{equation}\label{realeqsecsign} g(\lambda)\overline{g(-\bar\lambda^{-1})}=\pm\text{Id}.\end{equation} The sign on the right hand side is independent of the lift $\nabla^\lambda$ of $s$ and is preserved in a connected component of real sections motivating the following definition. \begin{definition}\cite[Definition 2.16]{BHR} A stable real holomorphic section $s$ of $\mathcal M_{DH}$ is called positive or negative depending on the sign of \eqref{realeqsecsign}. \end{definition} \begin{example}\label{exa:negative} Twistor lines are always negative sections. In fact, the associated family of flat connections is a canonical lift of the twistor line to the space of flat connections, and with respect to the standard hermitian structure on $\mathbb C^2$ the gauge \begin{equation}\label{g=delta} g(\lambda)=\begin{pmatrix}0&1\\-1&0\end{pmatrix}, \end{equation} as in \eqref{realeqsecsign}, is constant in $\lambda$ and squares to $-\text{Id}.$ \end{example} The space of real sections of $\mathcal M_{DH}$ has multiple connected components. A negative real holomorphic section lying in the connected component of the twistor lines must be a twistor line itself by Theorem \ref{connectedcomponent}. It corresponds therefore to a global solution of the self-duality equation. \subsection{Loop groups}\label{sec:loops} We briefly introduce basic notions and statements about loop groups which are relevant with regard to the paper. For details we refer to \cite{PS} or \cite{DPW}. Define \begin{itemize} \item $\Lambda \mathrm{SL}(2,\C):= \{$ real analytic maps (loops) $\Phi\colon \S^1\longrightarrow \mathrm{SL}(2,\C),\quad \lambda \longmapsto \Phi^\lambda\};$ \item $\Lambda \mathfrak{sl}(2,\C):=\{$ real analytic maps (loops) $\eta\colon \S^1\longrightarrow\mathfrak{sl}(2,\C),\quad \lambda \longmapsto \eta^\lambda\}.$ \end{itemize} Then, $\Lambda \mathrm{SL}(2,\C)$ is an infinite dimensional Frechet Lie group via pointwise multiplication with $\Lambda \mathfrak{sl}(2,\C)$ as its Lie algebra. We consider $\S^1=\{\lambda\in\C P^1\mid \lambda\bar\lambda=1\},$ and denote \begin{equation} \begin{split} \Lambda_+\mathrm{SL}(2,\C)&=\{\Phi\in\Lambda \mathrm{SL}(2,\C)\mid \Phi \text{ extends holomorphically to } \lambda=0\}\\ \Lambda_-\mathrm{SL}(2,\C)&=\{\Phi\in\Lambda \mathrm{SL}(2,\C)\mid \Phi \text{ extends holomorphically to } \lambda=\infty\} \end{split}\end{equation} and \[\Lambda_+\mathfrak{sl}(2,\C)=\{\eta\in\Lambda \mathfrak{sl}(2,\C)\mid \eta \text{ extends holomorphically to } \lambda=0\}.\] We also denote \[\Lambda_-^*\mathrm{SL}(2,\C)=\{B\in\Lambda_-\mathrm{SL}(2,\C)\mid B(\infty)=\Id\}.\] The following classical theorem, due to Birkhoff, Grothendieck and others, is essential for our method, see \cite{PS} for details and \cite{DPW} for the loop group method for harmonic maps from simply connected 2-dimensional domains into positively curved symmetric spaces. \begin{theorem}\label{thm:birk} There is an open and dense subset $\mathcal U\subset\Lambda \mathrm{SL}(2,\C)$ called the {\em big cell} such that every $g\in\mathcal U$ admits a factorization $$g=g^+ g^-$$ with $g^+\in\Lambda_+\mathrm{SL}(2,\C)$ and $g^- \in \Lambda_-^*\mathrm{SL}(2,\C).$ This splitting is unique and depends holomorphically on $g\in\mathcal U.$ The pair $(g^+,g^-)$ is called the Birkhoff factorization of $g$.\\ \end{theorem} \subsection{Reconstruction of self-duality solutions from admissible negative real sections}\label{sec:reconstruction} A section $s$ of the Deligne-Hitchin moduli space has lifts to families of flat connections in both Hodge moduli spaces. Let $$\wt\nabla^\lambda = \lambda^{-1} \Psi_1 + \nabla + \text{higher order terms in } \lambda$$ be a lift around $\lambda = 0.$ Due to $s$ being real, there exists a family of $\mathrm{SL}(2,\C)$ gauges $g(\lambda)$ satisfying $$\overline{\wt\nabla^{-\bar \lambda^{-1}}} = \wt \nabla^\lambda . g(\lambda)\,.$$ Assume that the Birkhoff factorization $$g(\lambda) = g^+(\lambda) g^-(\lambda)$$ exist for every $z \in \Sigma$. Assume that the section $s$ is negative, i.e, \begin{equation}\label{eq:pmgpm1}\overline{g(-\bar\lambda^{-1})}^{-1} = \overline{g^- (-\bar \lambda^{-1})}^{-1} \overline{g^+ (-\bar \lambda^{-1})}^{-1} = -g(\lambda).\end{equation} Since $\lambda \rightarrow -\bar \lambda^{-1}$ interchanges the $()^+$ and $()^-$ parts of the Birkhoff factorization, the uniqueness assertion gives \begin{equation}\label{eq:pmgpm2}g^+(\lambda) = -\overline{g^- (-\bar \lambda^{-1})}^{-1} B^{-1} \quad \text{ and } \quad g^-(\lambda) = B \overline{g^+ (-\bar \lambda^{-1})}^{-1}\end{equation} for $B=(g^+(\lambda=0))^{-1}\colon\Sigma\rightarrow \SL(2, \C)$. Combining \eqref{eq:pmgpm1} and \eqref{eq:pmgpm2} we get $\bar B B = -\Id$. This implies that $B$ lies in the same conjugacy class as $\delta = \begin{pmatrix}0 & 1\\-1 & 0 \end{pmatrix},$ i.e., there exist $G\colon\Sigma\to\mathrm{SL}(2,\C)$ satisfying $\delta=G^{-1} B \overline{G}.$ Then $$\nabla^\lambda =\wt \nabla^\lambda . (g^+(\lambda) G) $$ satisfies $ \overline{\nabla^{-\bar \lambda^{-1}}}= \nabla^\lambda .\delta$ and is therefore the associated family of a self-duality solution with respect to the standard hermitian metric. \begin{remark} In this paper we construct real holomorphic sections via loop group methods, i.e, we write down a particular lift of a real section $s$ of the Deligne-Hitchin moduli space around $\lambda =0$ in terms of a Fuchsian potential $\eta$ (i.e., a $\lambda$-dependent Fuchsian connection 1-form). To obtain the actual harmonic map into the hyperbolic 3-space $\H^3$ (or the associated self-duality solution), we need thus to first perform a global Birkhoff factorization and then show that the obtained section is negative. This needs further conditions, as examples for which the harmonic map into $\H^3$ becomes singular and intersects the boundary at infinity of $\H^3$ exists \cite{HH} as well as positive real sections with global Birkhoff factorization that give rise to harmonic maps into the de-Sitter 3-space \cite[Theorem 3.4]{BHR}. \end{remark} \subsection{Goldman's symplectic form on the moduli space of $\lambda$-connections}\label{sec:goldman} Fix $\lambda\in\C$ and consider the space of $\lambda$-connections modulo gauge transformations. Let $(\bar\partial,D)$ be a $\lambda$-connection. A tangent vector to the (infinite dimensional) space of $\lambda$-connection is given by \[(A,B)\in\Omega^{(0,1)}(\mathfrak{sl}(2,\C))\oplus\Omega^{(1,0)}(\mathfrak{sl}(2,\C))\] satisfying the linearized compatibility (flatness) condition \begin{equation} \begin{split} 0&=\bar\partial B+ DA. \end{split} \end{equation} Tangent vectors at $(\dbar, D)$ which are generated by the infinitesimal gauge transformation $\xi\in\Gamma(\Sigma,\mathfrak{sl}(2,\C))$ are given by \[(A,B)=(\bar\partial\xi,D\xi).\] The Goldman symplectic structure $\Omega^\lambda$ on the moduli space of $\lambda$-connections \cite[Section 1]{Gold} is defined to be \begin{equation}\label{GoldmanO} \Omega^\lambda( (A_1,B_1),(A_2,B_2))=4\int_\Sigma\tr(A_1\wedge B_2-A_2\wedge B_1)\end{equation} for $(A_1,B_1), (A_2,B_2)\in\Omega^{(0,1)}(\mathfrak{sl}(2,\C))\oplus\Omega^{(1,0)}(\mathfrak{sl}(2,\C))$ representing tangent vectors. On a compact Riemann surface, the symplectic structure $\Omega^\lambda$ is gauge invariant, and thus can be computed using arbitrary representatives of the tangent vectors, which makes it well-defined on the moduli space of $\lambda$-connections. Since we are not aware of any explicit reference (except for $\lambda=0$ and $\lambda=1$), we give the simple proof (which is of course an instance of an infinite-dimensional symplectic reduction for the gauge group action with the curvature as moment map, see \cite{AB}). \begin{lemma} Let $\lambda\in\C$ be fixed. The holomorphic symplectic form $\Omega^\lambda$ is well-defined on the moduli space of $\lambda$-connections. \end{lemma} \begin{proof} By construction, $\Omega^\lambda$ is complex bilinear. Proposition \ref{proId} below identifies $\Omega^\lambda$ with the twisted holomorphic symplectic form \eqref{deftwistedOmega2} restricted to the fiber over $\lambda,$ up to a constant factor. Thus closeness and non-degeneracy follows from \cite[ Section 3(F)] {HKLR}. It remains to show that $\Omega^\lambda$ descents to the moduli space. A gauge transformation acts on tangent vectors to the space of connections by conjugation, and since the trace is conjugation invariant we obtain \[\Omega^\lambda((A_1,B_1).g,(A_2,B_2).g)=\Omega^\lambda((A_1,B_1),(A_2,B_2)).\] Let $(A_1,B_1)$ satisfy $0=\bar\partial B+ DA$ and let $(A_2,B_2)=(\bar\partial\xi,D\xi)$ be a tangent vector that is also tangent to the gauge orbit. Then, \begin{equation}\label{def:goldi} \begin{split} \Omega^\lambda((A_1,B_1),(A_2,B_2))&=4 \int_\Sigma\tr(A_1\wedge D\xi-\bar\partial\xi\wedge B_1)\\ &=-4\int_\Sigma d\,\tr(A_1\xi+\xi B_1)-4\int_\Sigma \tr(DA_1\xi+\xi \bar\partial B_1) =0. \end{split} \end{equation} Therefore, $\Omega^\lambda$ is well-defined on the quotient space (the moduli space of $\lambda$-connections). \end{proof} The Goldman symplectic form in fact coincides (up to scaling) with the twisted holomorphic symplectic form \eqref{deftwistedOmega2} on the twistor space $\mathcal P = \mathcal M_{SD} \times \C P^1$ of the moduli space of self-duality solutions, also compare with \cite[Equation (4.10)]{GMN}. \begin{proposition}\label{proId} Let $\lambda\in\C\subset \C P^1$ be fixed. Then, the fiber $\mathcal P_\lambda=\pi^{-1}(\lambda)$ is the moduli space of $\lambda$-connections, and \[\lambda\varpi_{\mid \mathcal P_\lambda} = \Omega^\lambda\] is the Goldman symplectic form on the moduli space of $\lambda$-connections. \end{proposition} \begin{proof} For $\lambda \in \C$ fixed, it is shown in \cite[Theorem 4.2]{Si2} that $\mathcal P_\lambda=\pi^{-1}(\{\lambda\})$ is the moduli space of $\lambda$-connections. In order to evaluate the corresponding symplectic forms, we need to first find appropriate representatives for the tangent vectors. To do so, let $h$ be the standard hermitian metric and let $(\nabla,\Psi)$ be a solution of the self-duality equations. The tangent space of $\mathcal M_{SD}$ at $[\nabla,\Psi]$ is given by \[(\xi,\phi) \in \Omega^{(0,1)}(\mathfrak{sl}(2,\C))\oplus\Omega^{(1,0)}(\mathfrak{sl}(2,\C))\] satisfying \begin{equation}\label{goeq1} \begin{split} 0&=d^\nabla (\xi-\xi^*)+\tfrac{1}{2}[\phi\wedge\Psi^*]+\tfrac{1}{2}[\Psi\wedge \phi^*]\\ 0&=\bar\partial^\nabla \phi+[\xi,\Psi]\\ \end{split} \end{equation} modulo infinitesimal gauge deformations (see \cite[(6.1)]{Hi1}). These equations mean that we have an infinitesimal deformation \[t\mapsto (\nabla+t (\xi- \xi^*),\Psi+t\phi)\] of a solution to the self-duality equation (for the fixed hermitian metric $h$), and we can choose harmonic representatives which are orthogonal to the (unitary) gauge orbit, which is equivalent to \begin{equation}\label{goeq2} \begin{split}\mathcal D^*(\xi,\phi)=0 \end{split} \end{equation} where \[\mathcal D \colon\Gamma(\Sigma,\mathfrak{su}(2))\to\Omega^{(0,1)}(\Sigma,\mathfrak{sl}(2))\oplus\Omega^{(1,0)}(\Sigma,\mathfrak{sl}(2)); \quad \psi\mapsto \left((d^\nabla\psi)'',[\Psi,\psi]\right)\] see \cite{Hi1, Fred}. Provided \eqref{goeq1}, \eqref{goeq2} hold, the complex structures $I,J,K=IJ$ are given by \begin{equation} \begin{split} I(\xi,\phi)=(i\xi,i\phi)\quad J(\xi,\phi)=(i\phi^*,-i\xi^*) \and K(\xi,\phi)=(-\phi^*,\xi^*), \end{split} \end{equation} see \cite[page 109]{Hi1}. Provided \eqref{goeq1}, \eqref{goeq2} hold for $(\xi_k, \phi_k)$, $k=1,2$, the Hitchin metric on $\mathcal M_{SD}$ (see \cite[(6.2)]{Hi1}) is defined to be \[g\left((\xi_1,\phi_1),(\xi_2,\phi_2)\right):=2i\int_\Sigma\tr\left(\xi_1^*\wedge \xi_2+\xi_2^*\wedge \xi_1+\phi_1\wedge\phi_2^*+\phi_2\wedge\phi_1^*\right).\] Thus \begin{equation*} \begin{split} \omega_I((\xi_1,\phi_1),(\xi_2,\phi_2))&=-g((\xi_1,\phi_1),(i\xi_2,i\phi_2))\\ &=2\int_\Sigma\tr(\xi_1^*\wedge \xi_2-\xi_2^*\wedge \xi_1-\phi_1\wedge\phi_2^*+ \phi_2\wedge\phi_1^*)\\ \omega_J((\xi_1,\phi_1),(\xi_2,\phi_2))&=2\int_\Sigma\tr(\xi_1^*\wedge \phi_2^*-\phi_2\wedge \xi_1+\phi_1\wedge\xi_2-\xi_2^*\wedge\phi_1^*)\\ \omega_K((\xi_1,\phi_1),(\xi_2,\phi_2))&=-2i\int_\Sigma\tr(-\xi_1^*\wedge \phi_2^*-\phi_2\wedge \xi_1+\phi_1\wedge\xi_2+\xi_2^*\wedge\phi_1^*) \end{split} \end{equation*} which gives \begin{equation*} \begin{split} (\omega_J+i\omega_K)(\xi_1,\phi_1),(\xi_2,\phi_2)&=-4\int_\Sigma\tr(\phi_2\wedge\xi_1-\phi_1\wedge\xi_2). \end{split} \end{equation*} Recall that twistor lines corresponding to the self duality solution $(\nabla,\Psi)$ are given by \[\lambda\longmapsto[\lambda,\bar\partial^\nabla+\lambda\Psi^*,\Psi+\lambda\partial^\nabla]_\Sigma \in \mathcal M_{Hod} \subset \mathcal M_{DH}\] or equivalently, by the associated family of flat connections \begin{equation}\label{associated_family}\lambda\in\C^*\longmapsto \nabla+\lambda^{-1}\Psi+\lambda\Psi^*.\end{equation} Thus, the tangent vectors $X=(\xi_1,\phi_1)$ and $Y=(\xi_2,\phi_2)$ correspond to sections of the normal bundle to the twistor line in $\mathcal M_{DH}$ given by \[X: \lambda \longmapsto [0,\xi_1+\lambda \phi_1^*,\phi_1-\lambda\xi_1^*]\] and \[Y: \lambda \longmapsto [0,\xi_2+\lambda \phi_2^*,\phi_2-\lambda\xi_2^*].\] Therefore, when fixing $\lambda \in \C$, we obtain from \eqref{GoldmanO} \begin{equation}\label{eq:omlam} \begin{split} \Omega^\lambda(X,Y)=&\Omega^\lambda((\xi_1+\lambda \phi_1^*,\phi_1-\lambda\xi_1^*),(\xi_2+\lambda \phi_2^*,\phi_2-\lambda\xi_2^*))\\ =&(\omega_J+i\omega_K)(X,Y)-2\lambda\omega_I(X,Y)-\lambda^2(\omega_J-i\omega_K)(X,Y). \end{split} \end{equation} \end{proof} \section{Strongly parabolic Higgs bundles}\label{sec:paradh} In this section we generalise the setup in section \ref{pre} to punctured Riemann surfaces. We introduce strongly parabolic Higgs fields, and explain their relationship to Higgs bundles on compact surfaces when their parabolic weights are rational. We will restrict to the $\mathfrak{sl}(2,\C)$-case, which we refer to as {\em trace-free}. In particular, the holomorphic bundles are of rank 2, have trivial determinant bundle, and the corresponding Higgs fields are trace-free. \subsection{Parabolic structures and logarithmic connections} Let $\Sigma$ be a compact Riemann surface, $p_1,\dots,p_n\in \Sigma$ be pairwise distinct and $\Sigma^0=\Sigma\setminus\{p_1,\dots,p_n\}.$ Let $V=\C^2\times\Sigma\to\Sigma$ be the trivial $ C^\infty$-complex vector bundle over $\Sigma$ of rank two and $d=\partial^0+\bar\partial^0$ be the decomposition of the deRham differential $d$ on $\Sigma$ into its $(1,0)$ and $(0,1)$ parts. Furthermore, let $\bar\partial=\bar\partial^0+\gamma$, $\gamma\in\Gamma(\Sigma,\bar K_\Sigma\mathfrak{sl}(2,\C))$ be a holomorphic structure on $V$, and consider the corresponding holomorphic rank 2 vector bundle $\mathcal V:=(V,\bar\partial).$ Since $\gamma$ is trace free, the determinant bundle $\Lambda^2V$ of $\mathcal V$ is holomorphically trivial. A holomorphic $\mathrm{SL}(2,\C)$ frame $F=(s,t)$ on an open set $U\subset \Sigma$ is given by two holomorphic sections $s,t\in H^0(U,\mathcal V)$ such that $s\wedge t=1.$ \begin{definition} A (trace-free) parabolic structure $\mathcal P$ on the holomorphic bundle $\mathcal V$ with parabolic divisor $D=p_1+\dots+p_n$ is given by a collection of quasiparabolic lines, i.e., 1-dimensional vector subspaces $\ell_j\subset V_{p_j} $, together with parabolic weights $\alpha_j\in[0,\tfrac{1}{2}[$ for $j=1,\dots,n$. \end{definition} For a general definition of parabolic structures see for example \cite{MS,Si1}, and for the trace-free convention see \cite{Pir}. The {\em parabolic degree} of a holomorphic subbundle $L$ of $\mathcal V$ on $\Sigma$ is defined by \[\text{pdeg}(L)=\deg(L)+\sum_j (-1)^{\sigma_j} \alpha_j,\quad\text{with}\quad \sigma_j=\begin{cases} 0 &\quad\text{if}\; L_{p_j}=\ell_j\\ 1 &\quad\text{if}\; L_{p_j}\neq\ell_j \end{cases}.\] Two different parabolic structures $\mathcal P$ and $\wt{\mathcal P}$ on two holomorphic bundles $\mathcal V$ respectively $\wt{\mathcal V}$ over $\Sigma$ are called {\em isomorphic} if the singular divisors $D=\wt D$ coincide, the parabolic weights are the same $\alpha_j=\wt \alpha_j$ for all $j, $ and if there is a holomorphic bundle isomorphism $\Phi\colon \mathcal V\to\wt{\mathcal V}$ mapping the parabolic lines onto each other, i.e., $\Phi(\ell_k)=\wt \ell_j$ for all $j$. \begin{definition} A parabolic structure $\mathcal P$ on the holomorphic bundle $\mathcal V$ is called (semi-)stable if and only if all holomorphic subbundles have negative (non-positive) parabolic degree. A parabolic structure is called {\em polystable} if it is either stable or the underlying holomorphic bundle $\mathcal V$ is the direct sum of two holomorphic line subbundles of parabolic degree $0.$ \end{definition} The notion of a parabolic structure was introduced to generalize the Narasimhan-Seshadri theorem \cite{NS} to the case of punctured Riemann surfaces \cite{MS}. \begin{definition} A (trace-free) logarithmic connection on the holomorphic bundle $\mathcal V \rightarrow \Sigma$ with singular divisor $D=p_1+\dots+p_n$ is a connection $\nabla$ on $\mathcal V{\mid_ {\Sigma^0}}$ with $\bar\partial^\nabla=\bar\partial_{V}{\mid_ {\Sigma^0}},$ such that with respect to any holomorphic $\mathrm{SL}(2,\C)$ frame $F$ of $\mathcal V$ on an open subset $U\subset\Sigma$, the connection 1-form $\alpha=\alpha_F$ of $\nabla$ with respect to $F$ is a meromorphic $\mathfrak{sl}(2,\C)$-valued 1-form with at most first order poles at $U\cap\{p_1,\dots,p_n\}$ and holomorphic elsewhere. \end{definition} Note that a logarithmic connection on a Riemann surface is automatically flat, as its connection 1-form is meromorphic. A holomorphic frame $F$ of $\mathcal V$ restricted to $U\subset\Sigma$ can be regarded as a holomorphic isomorphism from the trivial holomorphic rank 2 bundle to $\mathcal V$ restricted to $U.$ Thus, the {\em residue} \[\text{res}_{p_j}\nabla:=F \circ \text{res}_{p_j}\alpha_F \circ F^{-1}\in \mathfrak{sl}(V_p)\] of a logarithmic connection $\nabla$ on a holomorphic bundle $\mathcal V$ at a singular point $p_j$ is well-defined, i.e., independent of the choice of the holomorphic frame. Assume that the two eigenvalues $\pm\alpha_j$ of $\text{res}_{p_j}\nabla$ do not differ by an integer. Then, the (local) monodromy of $\nabla$ along a simple closed curve surrounding $p_j$ lies in the conjugacy class of \begin{equation}\label{eq:locmon}\exp(2\pi i\, \text{res}_{p_j}\nabla)\cong\begin{pmatrix}e^{2\pi i\alpha_j}&0\\0&e^{-2\pi i\alpha_j}\end{pmatrix}.\end{equation} In particular, if the eigenvalues $\pm \alpha_j$ are real with $\alpha_j\in (0,\tfrac{1}{2})$, then the local monodromy lies in the conjugacy class of unitary matrices. In the following, we always assume that this is the case, i.e., we impose the condition \[\det(\text{res}_{p_j}\nabla)\in(-\tfrac{1}{4},0)\] on the trace-free residues. Then a logarithmic connection determines a parabolic structure with underlying holomorphic bundle $\mathcal V$ and singular divisor $D=p_1+\dots p_n$ such that the parabolic weight $0<\alpha_j< \tfrac{1}{2}$ at $p_j$ is the positive eigenvalue of $\text{res}_{p_j}\nabla$ and the quasiparabolic line $\ell_j$ is the corresponding eigenline. We will refer to the positive eigenvalues of the residues $\text{res}_{p_j}\nabla$ as the parabolic weights of $\nabla.$ If the monodromy representation is irreducible and unitary (up to overall conjugation), then the parabolic structure is stable. The converse statement also holds by the Mehta-Seshadri theorem \cite{MS}: for every stable parabolic structure $\mathcal P$ there exists a (unique) logarithmic connection $\nabla$ with unitary monodromy (up to conjugation) inducing the parabolic structure $\mathcal P.$ This correspondence extends to reducible unitary logarithmic connections with strictly polystable parabolic structures: the induced parabolic structure of a unitary connection with abelian monodromy is the direct sum of two holomorphic line subbundles of parabolic degree 0. And by abelian Hodge theory and the residue theorem, the converse is true as well. \subsubsection{Parabolic structures on the 4-punctured sphere} In this paper, we mainly study the case of rank $2$ trace-free parabolic structures over $\Sigma=\C P^1$ with 4 singular points $p_1,\dots, p_4\in\mathbb CP^1$ such that the parabolic weights $\alpha_j$ satisfy \begin{equation}\label{para-weights}\alpha_1=\alpha_2=\alpha_3=\alpha_4=t\in (0,\tfrac{1}{2}).\end{equation} By the Birkhoff-Grothendieck theorem, every holomorphic rank 2 vector bundle over $\C P^1$ with trivial determinant is of the form $\mathcal V=\mathcal O(k)\oplus\mathcal O(-k)$ for some unique $k\in \N^{\geq0}.$ \begin{lemma}\label{lemmaOk} For $k\geq 2$, the holomorphic bundle $\mathcal V= \mathcal O(k)\oplus\mathcal O(-k) \rightarrow \C P^1$ does not admit a logarithmic connection on the 4-punctured sphere with parabolic weights \eqref{para-weights}. In particular, there is no stable or semi-stable parabolic structure with these weights. \end{lemma} \begin{proof} Assume there is such a connection $\nabla$ on $\mathcal V= \mathcal O(k)\oplus\mathcal O(-k).$ Writing $\nabla$ with respect to this splitting gives \[\nabla=\begin{pmatrix} D &\beta_+\\ \beta_-&D^*\end{pmatrix}.\] Then, the upper-left entry $D$ is a logarithmic connection on $\mathcal O(k)$ (the definition is analogous to the $\mathfrak{sl}(2,\C)$ case) with dual connection $D^*,$ and $\beta_+\in H^0(\C P^1, K\mathcal O(2k+4))$ and $\beta_-\in H^0(\C P^1, K\mathcal O(-2k+4))$, where the $\mathcal O(4)$ factor comes from the 4 singularities. Since $K=K_{\C P^1}\cong\mathcal O(-2)$ we have by degree count that $\beta_-\equiv0$. This is a contradiction, since in this case $D$ would be a meromorphic connection on $L= O(k)$ with residues $\pm t$ at the 4 singular points, thus the residue formula gives $0=\text{pdeg}(L)\geq k-4 t>0.$ The same computation gives that the holomorphic bundle $\mathcal V$ does not admit a stable or semi-stable parabolic structure, since the parabolic degree of $\mathcal O(k)$ is always positive. \end{proof} The two remaining cases $k=1$ and $k=0$ behave quite differently. \begin{lemma}\label{lemOK1} Let $t \in (0, \tfrac{1}{4}) \cup (\tfrac{1}{4}, \tfrac{1}{2}).$ Then there is, up to isomorphism, exactly one parabolic structure $\mathcal P$ on $\mathcal V=\mathcal O(1)\oplus\mathcal O(-1)$ over $\C P ^1$ with 4 singular points and parabolic weights \eqref{para-weights} which admits a logarithmic connection. The parabolic structure $\mathcal P$ is stable if and only if $t>\tfrac{1}{4}.$ Furthermore, up to isomorphism, the space of logarithmic connections on $\mathcal O(1)\oplus\mathcal O(-1)$ is a complex affine line. \end{lemma} \begin{proof}We refer to \cite[Proposition 18]{HH3} for the proof. \end{proof} If the bundle $\mathcal V\cong\mathcal O\oplus\mathcal O$ is trivial, we can compare the different quasiparabolic lines over the different singular points with respect to a fixed background $\mathbb C^2$. Since we will be focusing on $t\sim 0$ later, we only discuss stability for $t<\tfrac{1}{4}$ here. \begin{lemma}\label{lem:stableparalemma} Let $t\in (0,\tfrac{1}{4}).$ A parabolic structure on $\mathcal O\oplus\mathcal O$ over $\C P^1$ with 4 singular points and parabolic weights \eqref{para-weights} admits a logarithmic connection if and only if every parabolic line $\ell_j,$ $j = 1, .., 4,$ coincides with at most one other line $\ell_k$ for $k= 1, ..., 4$ and $k\neq j$. The parabolic structure $\mathcal P$ is stable if and only if the parabolic lines $\ell_j,$ $j= 1, ...,4,$ are pairwise distinct, and $\mathcal P$ is the direct sum of two holomorphic line subbundles of parabolic degree 0 if and only if we have two pairs of (distinct) parabolic lines. \end{lemma} \begin{proof} Let $\nabla$ be a logarithmic connection on $\mathcal V=\mathcal O\oplus\mathcal O$. Let $L$ be some holomorphic line subbundle of degree 0, i.e., it is isomorphic to $\mathcal O$. Take a complementary subbundle $L^*$, which is isomorphic to $\mathcal O$ as well, and decompose \[\nabla=\begin{pmatrix} D&\beta_+\\\beta_-&D^*\end{pmatrix}\] with respect to $\mathcal V=L\oplus L^*,$ where $D$ is a logarithmic connection on $L$, and \[\beta_-\in H^0(\C P^1,K\mathcal O(4-n)),\] $n\in\{0,\dots,4\}$ being the number of points $p_j$ for which $L_{p_j} = \ell_j$. For $n>2$ this implies $\beta_-=0$ which then gives a contradiction to the residue theorem for the line bundle connection $D$. Hence, we obtain $n<3.$ The converse direction can be shown easily by giving explicit formulas using Fuchsian systems, or it follows directly by the Mehta-Seshadri theorem once we know that they are stable. To show stability, note first that there is no holomorphic sub line bundle of $\mathcal O \oplus \mathcal O$ with positive degree. Moreover, the parabolic degree of a line subbundle $\mathcal O(k)\subset \mathcal O \oplus \mathcal O$ with $k<0$ is always negative since $t<\tfrac{1}{4}$. Thus it remains to consider line subbundles $L$ that are isomorphic to $\mathcal O,$ which means $L$ is constant. If all parabolic lines $\ell_j$ are different, then $n\leq 1$ and we have pdeg$(L) \leq -2t <0$, from which stability follows. The case $n=2$ follows from similar arguments together with the residue theorem. \end{proof} \begin{proposition}\label{pro:paramodul} The moduli space $\mathcal M_{Bun}$ of polystable parabolic structures $\mathcal P$ over $\C P^1$ with 4 singular points $p_1,\dots,p_4$ and with parabolic weights given by $t\in(0,\tfrac{1}{4})$ can be identified with $\C P^1$ with three $\Z_2$-orbifold points $0,1,\infty$. \end{proposition} \begin{proof} Since every polystable parabolic bundle admits a (unitary) logarithmic connection by the Mehta-Seshadri theorem the underlying holomorphic bundle is $\mathcal O\oplus\mathcal O$ by Lemma \ref{lemmaOk} and Lemma \ref{lemOK1}. Hence, holomorphic gauge transformations of the underlying holomorphic bundle $\mathcal V$ are given by conjugation with constant $\mathrm{SL}(2,\C)$ matrices. If $\mathcal P$ is stable, all parabolic lines are pairwise distinct. Fixing the gauge freedom is then equivalent to normalizing the parabolic lines, i.e., \[\ell_1=0,\quad \ell_2=1,\quad\text{and}\quad \ell_3=\infty.\] Then, \[\ell_4=w\in \C P^1\setminus\{0,1,\infty\}\] uniquely determines a stable parabolic structure (and for each $w\in \C P^1\setminus\{0,1,\infty\}$ there is a stable parabolic structure by Lemma \ref{lem:stableparalemma}). We call $w$ the parabolic modulus of the parabolic structure $\mathcal P$. By identifying \begin{equation}\label{eq:semisstableparaline} \begin{split} w=0\quad &\Longleftrightarrow \quad\ell_1=\ell_4 \text{ and } \ell_2=\ell_3\\ w=1\quad &\Longleftrightarrow \quad \ell_2=\ell_4 \text{ and } \ell_1=\ell_3\\ w=\infty\quad &\Longleftrightarrow \quad\ell_3=\ell_4 \text{ and } \ell_1=\ell_2\\ \end{split} \end{equation} we can fill in the missing three points of $\C P^1$. We first claim that it is not possible to find a holomorphic family $ w\mapsto \mathcal P(w)$ of polystable parabolic structures which represents the modulus $ w$ in any open neighbourhood of $ w=0,1,$ or $ w=\infty.$ Consider the case $w=0$ and parametrize (in terms of a centered local holomorphic coordinate $\zeta$ ) \begin{equation} \begin{split} \ell_1(\zeta)&=\C(f_1(\zeta)\zeta,1)^T\quad \quad \ell_2(\zeta)=\C(1+f_2(\zeta)\zeta,1)^T\\ \ell_3(\zeta)&=\C(1+f_3(\zeta)\zeta,1)^T\quad \quad \ell_4(\zeta)=\C(f_4(\zeta)\zeta,1)^T. \end{split} \end{equation} Assume that the lines only meet in first order in $\zeta$, which translates to the condition $f_1(0)\neq f_3(0)$ and $f_2(0)\neq f_4(0)$. Consider (for small $\zeta\neq0$) the Moebius transformations \[x\in\C P^1\longmapsto \frac{\zeta(x-\zeta f_1(\zeta))(f_2(\zeta)-f_3(\zeta)}{(1-x-\zeta f_3(\zeta))(-1+\zeta(f_1(\zeta)-f_2(\zeta)))}\in\C P^1\] which satisfy \begin{equation} \begin{split} &\ell_1(\zeta)\longmapsto 0, \quad \ell_2(\zeta)\longmapsto 1,\quad \ell_3(\zeta)\longmapsto \infty\\ &w(\zeta)=\ell_4(\zeta)\mapsto -\zeta^2\frac{(f_2(\zeta)-f_3(\zeta))(f_1(\zeta)-f_4(\zeta))}{ (-1-\zeta f_1(\zeta)-\zeta f_2(\zeta))(1+\zeta f_3(\zeta)-\zeta f_4(\zeta))}. \end{split} \end{equation} Hence, $w(\zeta)$ has at $\zeta=0$ a simple branch point as claimed. The other two cases $w=1$ and $w=\infty$ follow analogously. It remains to exhibit the $\Z_2$-orbifold structure on the moduli space of polystable parabolic structures with parabolic weights $t$ at all 4 singular points. As we have seen above, the moduli space $\mathcal M_{Bun}$ is given by the set of quadruples of parabolic lines $\ell_j\subset\C$, $j=1,\dots,4$, subject to the condition that the 4 lines are pairwise distinct or that they are pairwise the same as in \eqref{eq:semisstableparaline}, up to the action of $\mathrm{SL}(2,\C)$. Consider the map \begin{equation}\label{eq:p1Mpbun}\C P^1\to \mathcal M_{Bun}\cong \C P^1;\; [u,v]\mapsto (\ell_1=[u,v],\,\ell_2=[v,-u],\,\ell_3=[-u,v],\,\ell_4=[v,u])/\sim\end{equation} where $\sim$ denotes the equivalence relation induced by the $\mathrm{SL}(2,\C)$-action. This map is well-defined, as the 4 lines are either pairwise distinct ( $[u,v]\notin\{[\pm 1,1],[\pm i,1],[0,1],[1,0]\}$), or $\ell_1=\ell_2$ and $\ell_3=\ell_4$ (if $[u,v]=[\pm i,1]$), or $\ell_1=\ell_3$ and $\ell_2=\ell_4$ (if $[u,v]\in\{[0,1],[1,0]\}$), or $\ell_1=\ell_4$ and $\ell_2=\ell_3$ (if $[u,v]=[\pm 1,1]$). Consider the $\Z_2\times \Z_2$ action on $\C P^1$ generated by $[u,v]\mapsto [-u,v]$ and $[u,v]\mapsto [v,u].$ Acting with $\begin{smatrix} i&0\\0&-i\end{smatrix}$ respectively with $\begin{smatrix} 0&i\\ i&0\end{smatrix}$, we see that the map \eqref{eq:p1Mpbun} is invariant under the $\Z_2\times \Z_2$ action. Furthermore, this map is rational of degree 4, and the only fix points (of the 3 non-trivial elements of order 2) are $[\pm 1,1],[\pm i,1],$ or $[0,1]$ and $[1,0],$ respectively. This proves the proposition. \end{proof} As we have seen in Proposition \ref{pro:paramodul} the $\C P^1$ with three orbifold singularities $0,1,\infty$ is given by the quotient of $\C P^1$ by the $\Z_2\times\Z_2$ action generated by $\zeta\mapsto -\zeta$ and $\zeta\mapsto \tfrac{1}{\zeta}.$ We will encounter this $\Z_2\times\Z_2$ action again in our symmetric setup, see Proposition \ref{pro:classparahiggs} below. \subsubsection{Strongly parabolic Higgs fields} Let $\nabla$ and $\widetilde\nabla$ be two logarithmic connections with the same induced parabolic structure $\mathcal P$ on the holomorphic bundle $\mathcal V$. The difference $\widetilde\nabla-\nabla$ is a $\mathfrak{sl}(\mathcal V)$-valued meromorphic $1$-form. Since the eigenvalues of the residues of $\widetilde\nabla$ and $\nabla$ are the same, and the eigenlines $\ell_j$ for the positive eigenvalues coincide, the residues \[R_j:=\text{res}_{p_j}(\widetilde\nabla-\nabla)=\text{res}_{p_j}(\widetilde\nabla)-\text{res}_{p_j}(\nabla)\] must be nilpotent with $\ell_j\subset \ker R_j.$ This motivates the following definition: \begin{definition} Let $\mathcal P$ be a parabolic structure on the holomorphic bundle $\mathcal V$ with singular divisor $D=p_1+\dots+ p_n.$ A strongly parabolic Higgs field is a meromorphic 1-form \[\Psi\in H^0(\Sigma, K_\Sigma\mathfrak{sl}(\mathcal V,\C)\otimes \mathcal O_\Sigma(D))\] with $(\text{res}_{p_j}\Psi)\ell_j=0$ for all $j.$ A strongly parabolic Higgs pair $(\mathcal P,\Psi)$ is called stable if every $\Psi$-invariant holomorphic line subbundle of $\mathcal V$ has negative degree, and strictly polystable if $\mathcal V$ is the direct sum of two $\Psi$-invariant line bundles of parabolic degree 0. \end{definition} We restrict ourselves in the following to the case of the $4$-punctured sphere with parabolic weight $t \in (0, \tfrac{1}{4})$ at each puncture. Note that, similarly to the proof of Lemma \ref{lemmaOk}, it can be shown that there is no stable parabolic Higgs pair on $\mathcal O(k)\oplus\mathcal O(-k)$ with $k\geq2$ over $\C P^1$ and only four singular points (together with our restrictions for the range of the weights). As before we treat the cases of $\mathcal V= \mathcal O(-1)\oplus\mathcal O(1)$ and $\mathcal V= \mathcal O\oplus\mathcal O$ separately. \begin{lemma}\label{lem:unstablepara} Let $(\mathcal P,\Psi)$ be a stable strongly parabolic Higgs pair with underlying holomorphic bundle $\mathcal V=\mathcal O(-1)\oplus\mathcal O(1).$ Then, up to isomorphisms of $\mathcal P$, all four parabolic lines are contained in $\mathcal O(-1),$ and the Higgs field $\Psi$ takes the form \[\Psi=\begin{pmatrix}0&\alpha\\ \gamma &0\end{pmatrix}\] where $\gamma\in H^0(\C P^1,\text{Hom}(\mathcal O(-1),K_{\C P^1}\mathcal O(1)))\cong \C$ is a constant and $\alpha$ is a non-zero meromorphic section of $\text{Hom}(\mathcal O(1),K_{\C P^1}\mathcal O(-1))$ with 4 simple poles at $p_1,\dots,p_4$ which is unique up to a scale that can be fixed by an isomorphism of $\mathcal P$. \end{lemma} \begin{proof} We can assume without loss of generality that $\ell_1,\ell_2$ and $\ell_3$ are contained in $\mathcal O(-1)\subset\mathcal V,$ i.e., $\mathcal O(-1)|_{p_j} = \ell_j,$ for $j = 1, 2, 3.$ Write \[\Psi=\begin{pmatrix}\beta&\alpha\\\gamma&-\beta\end{pmatrix}\] with respect to the decomposition $\mathcal V=\mathcal O(-1)\oplus\mathcal O(1).$ Then, $\beta$ is a meromorphic 1-form on $\C P^1$ with at most a simple pole at $p_4,$ thus $\beta$ must vanish. If $\alpha=0$ then $\mathcal O(1)$ would be an invariant subbundle of positive parabolic degree. Hence, $\alpha\neq0$ by stability. Since $\text{Hom}(\mathcal O(1),K_{\C P^1}\mathcal O(-1)) \cong \mathcal O(-4)$ is of degree $-4$ and $\alpha\neq0$ can have at most simple poles, $\alpha$ must have a simple pole at each $p_j.$ Moreover, this implies $\alpha$ is unique up to scale, which can be fixed by a constant diagonal gauge. Furthermore, we obtain that $\gamma$ cannot have a pole at $p_1,\dots,p_4$, as the residues of $\Psi$ must be nilpotent, thus $\gamma$ must be a constant which also implies that $\mathcal O(-1)|_{p_4} = \ell_4.$ \end{proof} \begin{lemma}\label{lem:stabhiggspar} Let $(\mathcal P,\Psi)$ be a stable Higgs pair with strongly parabolic Higgs field $\Psi$ and underlying holomorphic structure $\mathcal O\oplus\mathcal O.$ Then, $\mathcal P$ is polystable. Conversely, for every polystable $\mathcal P$ on $\mathcal O\oplus\mathcal O$ there is a 1-dimensional vector space (unique up to parabolic isomorphisms) of strongly parabolic Higgs fields $\Psi$ such that the Higgs pair $(\mathcal P, \Psi)$ is stable if $\Psi \neq 0$. \end{lemma} \begin{proof} We first show that the underlying parabolic structure $\mathcal P$ for a stable strongly parabolic Higgs pair on $\mathcal O\oplus\mathcal O$, is polystable. Assume $\mathcal P$ is not polystable, then there is a unique degree zero line bundle $L$ with non-negative parabolic degree. Thus, $L$ must contain at least two parabolic lines, i.e., $L|_{p_j} = \ell_j$ for two different $ j \in \{1, ..., 4\}$. A strongly parabolic Higgs field $\Psi\in H^0(K_{\C P^1} \mathfrak{sl}(2, \C))$ is given by $$\Psi = \sum_{j=1}^4R_j \tfrac{dz}{z-p_j},$$ with $R_j \in \mathfrak{sl}(2, \C)$ and $\sum_{j=1}^4R_j=0.$ Thus if $L$ contains three or four parabolic lines, in which case $L$ has positive parabolic degree, it must be invariant under $\Psi$ in contradiction to the Higgs pair being stable. If $L$ contains exactly two parabolic lines, then it is of parabolic degree zero, and we can find a complementary constant line bundle $\widetilde L$, which contains either one or two of the remaining parabolic lines. Since the quasiparabolic lines lie in the kernel of $R_j, $ the matrices $R_j$ with respect to the decomposition $\mathcal O\oplus\mathcal O=L\oplus \widetilde L$ are either upper or lower triangular with vanishing diagonals whenever $L$ or $\wt L$ contains the quasiparabolic line $\ell_j$. Thus by residue theorem, as the sum of all residues $R_j$ must be zero, we obtain that $\widetilde L$ must contain two parabolic lines and thus has parabolic degree 0. But then the parabolic bundle is the direct sum of two line bundles of parabolic degree zero and hence $\mathcal P$ must be polystable, which gives a contradiction. Next, we show that dimension of the space of (stable) strongly parabolic Higgs fields for a given stable parabolic structure is at most 1-dimensional (up to isomorphism). If $\mathcal P$ is stable, i.e., the four quasiparabolic eigenlines are distinct, a short computation shows that there exists no non-zero strongly parabolic Higgs field of $\mathcal P$ with vanishing residue at one of the singular points by the residue theorem. This shows that the space of strongly parabolic Higgs fields is at most 1-dimensional, as the linear combination of two (non-zero) strongly parabolic Higgs fields such that the residue $R_j$ at one singular point vanishes must be identically zero. The space of stable strongly parabolic Higgs fields is at least 1-dimensional, up to conjugation, since every stable parabolic structure $\mathcal P$ with underlying holomorphic bundle $\mathcal O\oplus\mathcal O$ and parabolic weights $t\in (0,\tfrac{1}{4})$ is determined by the quasiparabolic lines \begin{equation}\label{eq:quasbef}\ell_1=(u,v)^T\C,\quad \ell_2=(v,-u)^T\C,\quad \ell_3=(u,-v)^T\C,\quad \ell_4=(v,u)^T\C \end{equation} for suitable $u,v\in\C^2\setminus\{0\}$. Then, it can be easily verified that \begin{equation}\label{eq:psier} \Psi= \begin{pmatrix} uv& -u^2\\v^2&-uv\end{pmatrix}\frac{dz}{z-p_1} +\begin{pmatrix} -uv& -v^2\\u^2&uv\end{pmatrix}\frac{dz}{z-p_2} +\begin{pmatrix} uv& u^2\\-v^2&-uv\end{pmatrix}\frac{dz}{z-p_3} +\begin{pmatrix} -uv& v^2\\-u^2&uv\end{pmatrix}\frac{dz}{z-p_4} \end{equation} is a non-zero strongly parabolic Higgs field for $\mathcal P$. Since $\mathcal P$ is already stable, the Higgs pair $(\mathcal P,\Psi)$ is stable as well. If $\mathcal P$ is the direct sum of two line bundles of parabolic degree 0, then a stable strongly parabolic Higgs field must be off-diagonal (with respect to the decomposition of the rank two bundle into the two line bundles), with the off-diagonal entries being two non-zero meromorphic 1-forms with simple poles at the two of the four singular points, where the quasiparabolic lines coincide with the respective holomorphic line bundle. This gives a two-dimensional space (with two complex lines removed) of possible strongly parabolic Higgs fields. Because for non-vanishing off-diagonals the determinant of the Higgs field is non-vanishing with simple poles, these Higgs fields are stable. By a diagonal gauge, we can fix the ratio of the meromorphic 1-forms, and we obtain a complex line (without the origin, where the Higgs field vanishes) of stable parabolic Higgs fields. \end{proof} \begin{lemma}\label{lem:next} Let $(u,v)\in\C^2\setminus\{0\}$ and consider the strongly parabolic Higgs field $\Psi$ as in \eqref{eq:psier}. Then the underlying parabolic structures are always polystable and moreover, they are stable except for $$uv=0, \quad u^2=v^2 \quad \text{ or } \quad u^2=-v^2.$$ \end{lemma} \begin{proof} Since $4t<1$ every holomorphic subbundle of the holomorphically trivial rank 2 bundle with negative degree also has negative parabolic degree. Let $L$ be a holomorphic line subbundle of degree $0$, i.e., a constant line in $\C^2.$ Thus the parabolic degree of $L$ can be maximized by choosing $L = \ell_j$ to be one of the quasiparabolic lines. These lines $\ell_1, ..., \ell_4$ are given by \eqref{eq:quasbef}. If all four lines are distinct, then any such $L$ has negative parabolic degree $-2t$ and the parabolic structure is stable. From the explicit formulas in \eqref{eq:quasbef} it follows that at most two of the lines $\ell_1, ..., \ell_4$ can coincide. And two lines coincide if and only if $uv=0$, $u^2=v^2$ or $u^2=-v^2.$ In this case we have that there are two complementary lines $L$ and $\wt L$ with parabolic degree zero and $\mathcal P$ is polystable but not stable. \end{proof} The parabolic Higgs pair, consisting of the parabolic structure $\mathcal P$ with trivial underlying holomorphic structure and the strongly parabolic Higgs field $\Psi$ as in \eqref{eq:psier}, is uniquely determined by the nilpotent $\mathfrak{sl}(2,\C)$-matrix \begin{equation}\label{eq:res1higgs}R_1=\text{res}_{p_1}\Psi=\begin{pmatrix} uv& -u^2\\v^2&-uv\end{pmatrix}.\end{equation} In fact, we have \begin{equation}\label{eq:psiAs}\Psi=R_1\frac{dz}{z-p_1}+R_2\frac{dz}{z-p_2}+R_3\frac{dz}{z-p_3}+R_4\frac{dz}{z-p_4}\end{equation} with \begin{equation}\label{eq:psiAs2} R_2=(CD)^{-1}R_1(CD),\quad R_3=D^{-1}R_1 D,\quad R_4=C^{-1}R_1 C\end{equation} for anti-commuting \begin{equation}\label{eq:CD} C=\matrix{0&i\\i&0}\quad \text{and}\quad D=\matrix{i&0\\0&-i}\,. \end{equation} Note that the conjugation of $R_1$ by $ C$, $ D$ or $ CD$ leads to a conjugation of the corresponding Higgs field $\Psi$, which is not true in general for arbitrary $g\in\mathrm{SL}(2,\C)$, as $g$ does neither commute nor anti-commute with $C$ and $D$. In fact, a direct computation gives: \begin{lemma}\label{41cov} Let $0\neq R_1\in\mathfrak{sl}(2,\C)$ be nilpotent and $g\in\mathrm{SL}(2,\C).$ Then the parabolic Higgs fields $\Psi$ and $\widetilde \Psi$ corresponding to $R_1$ and $\widetilde R_1=g^{-1}R_1g$ via \eqref{eq:psiAs} and \eqref{eq:psiAs2} are gauge equivalent if and only if \[g\in\,<C,D>,\] where $<C,D>$ denote the gauge group generated by conjugation with $C$ and $D.$ Therefore, the group of (non-trivial) transformations preserving the gauge orbit is isomorphic to $\Z_2\times\Z_2.$ \end{lemma} \begin{remark} Note that a non-zero stable strongly parabolic Higgs field on $\mathcal O\oplus\mathcal O$ uniquely determines the quasiparabolic lines. Hence if the parabolic weights are given, the Higgs field induces a parabolic structure. We call a parabolic Higgs field $\Psi$ of the form \eqref{eq:psier} {\em symmetric}. \end{remark} As a corollary of the above observations, we have the following classification. \begin{proposition}\label{pro:classparahiggs} For $t \in (0, \tfrac{1}{4})$ fixed, the moduli space of polystable strongly parabolic Higgs fields over $\C P^1$ with 4 singular points contains an open dense subset $\mathcal U$ which is isomorphic to the holomorphic cotangent bundle $T^*\C P^1$ modulo the $\Z_2\times\Z_2$ action induced by conjugation by $C$ and $D.$ \end{proposition} \begin{proof} We define $\mathcal U$ to be the (open) subset of the moduli space of polystable strongly parabolic Higgs pairs over $\C P^1$ with 4 singular points on $\mathcal V= \mathcal O \oplus \mathcal O.$ Whenever the Higgs field is non-zero, the parabolic structure is determined by the symmetric Higgs field $\Psi.$ This Higgs field in turn is determined by the non-zero nilpotent residue $R_1\in\mathfrak{sl}(2,\C).$ The matrix $R_1$ is determined unique up to sign by $$(u,v)\in\C^2\setminus\{0\}\mapsto\begin{pmatrix} uv &-u^2\\v^2 &-uv\end{pmatrix}.$$ Recall that the kernel of $R_1$ determines the induced quasiparabolic lines via \eqref{eq:psiAs2} or \eqref{eq:quasbef}. Moreover, by Lemma \ref{41cov}, the gauge class of a strongly parabolic Higgs field (or the induced parabolic structure) determines $R_1$ uniquely up to conjugation by the gauge group $\Z_2\times\Z_2$ generated by $C$ and $D.$ When $(u, v) \rightarrow 0$ the induced parabolic structure is determined by the ratio $[u:v]$ in the limit. Thus the moduli space of strongly parabolic structures $\mathcal U$ on $\mathcal O \oplus \mathcal O$ is given by the blow up of $\C^2/\Z_2$ at $(u,v) = 0$ up to taking the quotient by the $\Z_2\times\Z_2$ action. The blow-up of $\C^2/\Z_2$ can then be naturally identified with the cotangent bundle of $\C P^1.$ \end{proof} The complement of $\mathcal U$ is the complex line of parabolic Higgs fields with underlying holomorphic bundle $\mathcal V=\mathcal O(1)\oplus\mathcal O(-1).$ It is possible to explicitly glue this line to $\mathcal U$ and turn the moduli space into a complex orbifold, see for example \cite{Men}. We do not give more details of this, since our loop group methods only work on $\mathcal U$ as of now. \subsection{Non-abelian Hodge correspondence in the strongly parabolic case} In this section we recall Simpson's non-abelian Hodge correspondence \cite{Si1} in the case of rank two strongly parabolic Higgs bundles and non-trivial weights, see also \cite{FMSW} or \cite{KiWi}. The space of hermitian metrics with determinant one is naturally diffeomorphic to the hyperbolic 3-space. Thus we can measure the distance between two metrics as the distance of the two corresponding points in the hyperbolic space using the metric $d$. Let $\mathcal P$ be a parabolic structure on a holomorphic rank two bundle $\mathcal V$ with trivial determinant bundle, $z$ be a local holomorphic coordinate centered at a singular point $p_j\in\Sigma$, and let $\ell_j\subset\mathcal V_{p_j}$ be the parabolic line and $\alpha_j \in (0, \tfrac{1}{2})$ be the corresponding parabolic weight. Choose a local holomorphic frame $(s_1,s_2)$ of $\mathcal V$ on $U_j\ni p_j$ such that \[s_1\wedge s_2=1\in H^0(U_j,\Lambda^2\mathcal V)=H^0(U_j,\mathcal O)\] and \[s_1(p_j)\in\ell_j.\] Consider the hermitian metric $h_j$ given by \[h_j=\begin{pmatrix} (z\bar z)^{\alpha_j}&0\\0&(z\bar z)^{-\alpha_j}\end{pmatrix}\] with respect to the holomorphic frame $(s_1,s_2)$. We call $h_j$ a model metric with respect to the parabolic structure at $p_j$. A hermitian metric $h$ on $\mathcal V$ over ${\Sigma\setminus\{p_1,\dots,p_n\}}$ is called {\em tame} at $p_j$ with respect to the parabolic structure if and only if \[d(h,h_j) \quad \text{is bounded on } U_j\setminus\{p_j\}.\] Since any two model metrics have finite distance to each other the notion of tameness is independent of the choice of holomorphic frame with the above properties. \begin{definition} Let $\mathcal P$ be a parabolic structure on a holomorphic vector bundle $\mathcal V$ over $\Sigma$ with singular divisor $p_1+\dots+p_n.$ A hermitian metric on a holomorphic bundle $\mathcal V$ over ${\Sigma\setminus\{p_1,\dots,p_n\}}$ is called {\em tame} with respect to $\mathcal P$ if it is tame at all of its singular points $p_j.$ \end{definition} Given a tame hermitian metric $h$ and a holomorphic structure $\bar\partial$ we denote its Chern connection on $\Sigma\setminus\{p_1,\dots,p_n\}$ by $D^h=D^{\bar\partial,h}.$ Similarly, for a strongly parabolic Higgs field $\Psi$ we denote its hermitian conjugate by $\Psi^*=\Psi^{*,h}\in\Gamma(\Sigma\setminus\{p_1,\dots,p_n\},\overline{K}\mathfrak{sl}(\mathcal V)).$ \begin{definition} Let $(\mathcal P,\Psi)$ be a polystable strongly parabolic Higgs pair. A metric $h$ is called tame harmonic with respect to the Higgs pair if the metric is tame with respect to $\mathcal P$ and if the connection \[\nabla:=D^h+\Psi+\Psi^{*,h}\] is flat. \end{definition} A tame harmonic metric gives a solution of Hitchin's self-duality equations over $\Sigma\setminus\{p_1,\dots,p_n\}$ with appropriate growth conditions at the punctures. The notation of a (tame) harmonic metric is motivated by the fact that there is a direct link to harmonic maps into hyperbolic 3-space: let $U\subset \Sigma\setminus\{p_1,\dots,p_n\}$ be open and $(s_1,s_2)\colon U\to\mathrm{SL}(2,\C)$ be a parallel frame with respect to the flat connection $\nabla.$ Then, the map \[\begin{pmatrix}h(s_i,s_j)\end{pmatrix}_{i,j}\colon U\to\{A\in\mathrm{SL}(2,\C)\mid \bar A^T=A \text{ and } A>0\}\cong \mathbb H^3\] is a harmonic map to hyperbolic 3-space $\mathbb H^3.$ Globally, this gives an equivariant harmonic map (with respect to the monodromy representation of the flat connection $\nabla$) from the universal covering of $\Sigma\setminus\{p_1,\dots,p_n\}$ into the hyperbolic 3-space. We can now state the non-abelian Hodge correspondence for the parabolic case (pNAH). \begin{theorem}\cite{Si2}\label{NAHSi} For every polystable strongly parabolic Higgs pairs with singular divisor $D= p_1+\dots+p_n$ and parabolic weights $\alpha=(\alpha_1,\dots,\alpha_n), \alpha_i\in(0,\tfrac{1}{2})$ there is a tame harmonic metric $h$. The tame harmonic metric is unique if the Higgs pair is stable. This induces a bijection between the space of polystable strongly parabolic Higgs pairs with singular divisor $D$ and parabolic weights $\alpha$ modulo isomorphisms and the space of totally reducible flat logarithmic connections with singularities at $p_j$ of conjugacy class \eqref{eq:locmon} modulo gauge transformations by associating $$(\mathcal V,\mathcal P,\Psi)\mapsto \nabla:=D^h+\Psi+\Psi^{*,h}.$$ \end{theorem} The parabolic structure induced by the logarithmic connection $\nabla=D^h+\Psi+\Psi^{*,h}$ is not isomorphic to the parabolic structure underlying the stable strongly parabolic Higgs field in general, but the parabolic weights are the same. Just as in the compact case, we obtain from a tame harmonic metric $h$ for a strongly parabolic Higgs pair $(\mathcal V,\mathcal P,\Psi)$ an associated $\C^*$ family of flat connections \begin{equation}\label{eq:asslambdafami} \lambda\in\C^*\mapsto \nabla^\lambda:=D^h+\lambda^{-1}\Psi+\lambda\Psi^{*,h}.\end{equation} It follows from Simpson's construction \cite{Si2} (see also \cite{Si21}) that for all $\lambda\in\C^*$, the connections $\nabla^\lambda$ extends naturally to logarithmic connections on $\Sigma$ with the same parabolic weights as the initial parabolic structure $\mathcal P.$ In particular, the parabolic weights of $\nabla^\lambda$ are independent of $\lambda\in\C^*$. \subsubsection{Rational weights}\label{ssec:rat} In the case of rational parabolic weights \[\alpha_1=\tfrac{l_1}{k_1},\dots,\alpha_n=\tfrac{l_n}{k_n}\in\mathbb Q\cap(0,\tfrac{1}{2}),\] Theorem \ref{NAHSi} is directly linked to the non-abelian Hodge correspondence on compact Riemann surfaces through coverings of $\Sigma$, see \cite[Section 3 and Section 5]{NaSt} for details. The underlying geometric idea is that the equivariant harmonic map to hyperbolic 3-space obtained from the tame harmonic metric associated to a polystable strongly parabolic Higgs pair is actually the equivariant harmonic map associated to a polystable Higgs pair on some compact surface, see Figure \ref{fig:1}. We will explain the details here only in the case of $\Sigma$ being the $4$-punctured sphere and parabolic weights $t=\alpha_j=\tfrac{l}{k}$, $l,k\in\N,$ $2l<k$, but these constructions also work in the general case of rational weights, see \cite{NaSt}. \begin{figure} \centering \includegraphics[width=1\textwidth]{im} \caption{ \footnotesize{Two views on an equivariant minimal surface in $\mathbb H^3$ obtained from a strongly parabolic nilpotent Higgs field on $\C P^1$ with four singular points and parabolic weight $t=\alpha_j=\tfrac{1}{6}$. Image by Nick Schmitt. }} \label{fig:1} \end{figure} Consider the $k$-fold covering of $\C P^1$ defined by the equation \begin{equation}\label{eq:sigma_k}\Sigma_k \colon y^k = \frac{(z-p_1)(z-p_3)}{(z- p_2)(z-p_4)},\end{equation} which is totally branched over $p_1,\dots,p_4$, and let $\pi=z\colon\Sigma_k\to\C P^1$ denote the covering map. Note that $\Sigma_k$ is a compact Riemann surface of genus $k-1$ and there is an action of the finite abelian group $\Z_k$ on $\Sigma_k$ such that $\Sigma_k/\Z_k=\C P^1.$ Let $\mathcal V\to\C P^1$ be the underlying holomorphic bundle of the parabolic structure $\mathcal P.$ By abuse of notation we also denote its pull-back bundle by $\mathcal V=\pi^*\mathcal V\to\Sigma_k$. We construct a new holomorphic bundle $\wh{\mathcal V}\to\Sigma_k$ as follows. Let $x\colon U_j\subset\Sigma_k\to\C$ be a local holomorphic coordinate centered at $\pi^{-1}(p_j)$ satisfying $\sigma^*x=e^{\tfrac{2\pi i}{k}}x$ for a generator $\sigma$ of the $\Z_k$-action. Let $g_j\colon\mathcal V_{\mid U_j}\to \C^2$ be a holomorphic $\mathrm{SL}(2,\C)$ trivialisation of $\mathcal V$ over $U$ such that $g_j(\ell_j)=\C(1,0)$, where $\ell_j$ is the parabolic line at $\pi^{-1}(p_j)$. Then, $\wh{\mathcal V}$ is the holomorphic bundle which is $\mathcal V$ over $\Sigma_k\setminus\pi^{-1}\{p_1,\dots,p_4\}$ and over $U_j$ the $\mathrm{SL}(2,\C)$ trivialisation $\wh{g}_j\colon \wh {\mathcal V} |_{U_j} \rightarrow \C^2$ is defined by \begin{equation}\label{def:twistbundle}(g_j\circ \wh{g}_j^{-1})|_{U_j\setminus \pi^{-1}(p_j)}=\begin{pmatrix} x^{-l}&0\\0&x^{l}\end{pmatrix}.\end{equation} Note that $\wh{\mathcal V}$ has trivial determinant bundle. Moreover, the pull-back of the Higgs field $\pi^*\Psi$ is given by \[g_j\circ\pi^*\Psi\circ g_j^{-1}=\begin{pmatrix} 0& a\\0&0\end{pmatrix} \frac{dx}{x}+A_1x^{k-1}dx+\text{ higher order terms in $x$}\] for some $a\in\C$ and $A_1,\dots\in\mathfrak{sl}(2,\C).$ Thus by conjugating with $g_j\circ \wh{g}_j^{-1}$ on $U_j\setminus \pi^{-1}(p_j)$ the Higgs field extends smoothly to $p_j$ and gives rise to $\wh\Psi\in H^0(\Sigma_k,K\mathfrak{sl}(\wh{\mathcal V})).$ By construction $\wh{\mathcal V}$ has a natural non-trivial $\Z_k$-action extending \[\wh\sigma\colon U_j \times \C^2\to U_j \times \C^2;\; (p,\begin{pmatrix} a\\b\end{pmatrix})\longmapsto (\sigma(p), \begin{pmatrix} e^{\tfrac{2\pi i l}{k}}a\\e^{\tfrac{-2\pi i l}{k}}b\end{pmatrix})\] in the trivialisation $\wh g_j,$ and $\wh\Psi$ is equivariant: \[\sigma^*\wh\Psi=\wh\sigma^{-1}\wh\Psi\wh\sigma.\] Furthermore, let $\nabla$ be a logarithmic connection with induced parabolic structure $\mathcal P$. Recall that this implies that \[\text{res}_{p_j}\nabla_{\mid \ell_j}=\tfrac{l}{k}\Id_{\ell_j}.\] A short computation similar to the case for a strongly parabolic Higgs field then shows that $\pi^*\nabla$ gives rise to an equivariant global holomorphic connection $\wh\nabla$ on $\wh{\mathcal V}\longrightarrow \Sigma_k$ \[\sigma^*\wh\nabla=\wh \nabla.\wh\sigma.\] We will refer to both operations $(\mathcal V,\mathcal P,\Psi)\longmapsto(\wh{\mathcal V},\wh\Psi)$ and $\nabla\longmapsto\wh\nabla$ as {\em twisted lifts}. Note that gauge equivalent strongly parabolic Higgs pairs (or respectively logarithmic connections) have gauge equivalent twisted lifts. In other words, let $\mathcal M(\Sigma_k)$ and $\mathcal M(\C P^1)$ denote the moduli spaces of (parabolic) Higgs bundles or flat (logarithmic) connections on $\Sigma_k$ and the 4-punctured $\C P^1$, respectively, then \begin{equation}\label{eq:pistartwistedliftmap}\wh{ }\; \colon\mathcal M(\C P^1)\longrightarrow\mathcal M(\Sigma_k)\end{equation} is a well-defined and smooth map between these moduli spaces. Every polystable strongly parabolic Higgs pair $(\mathcal P,\Psi)$ gives rise to a tame harmonic metric $h$, see Theorem \ref{NAHSi}. For rational weights this can be deduced directly from the fact that a stable and $\Z_k$-symmetric Higgs pair $(\wh{\mathcal V}, \wh \Psi)$ on $\Sigma_k$ gives rise to a harmonic metric $\wh h$ which is, by uniqueness of the solution, equivariant with respect to the $\Z_k$-action. Then twisting leads to an invariant harmonic metric $h$ on $\pi^* V$ descending to the quotient $4$-punctured sphere. Using \eqref{def:twistbundle}, it can be shown that the metric $h$ on the quotient is tame. In particular, we obtain a commutative non-abelian Hodge diagram \begin{equation}\label{commNAH} \begin{tikzcd} \{(\mathcal P,\Psi)\mid \mathrm{polystable}\}/\sim \arrow[rr, "\mathrm{twisted\,lift}"] \arrow[dd, "\mathrm{pNAH}"] && \{(\wh{\mathcal V} ,\Psi)\mid \mathrm{polystable}\} /\sim\arrow[dd, "\mathrm{NAH}"] \\ \\ \{\nabla\mid \mathrm{logarithmic\, and\, totally\, reducible}\}/\sim\arrow[rr, "\mathrm{twisted\,lift}"] & &\{\wh\nabla\mid \mathrm{totally\, reducible}\}/\sim \end{tikzcd} \end{equation} where $\sim$ denotes modulo isomorphism/gauge equivalence leading to \begin{theorem}\label{thm:twistedlifttwistorlines} Let $(\mathcal V,\mathcal P,\Psi)$ be a polystable strongly parabolic Higgs pair with associated family of flat logarithmic connections $\nabla^\lambda = D^h + \lambda^{-1}\Psi + \lambda\Psi^*$. Then, the associated family $D^{\wh h} + \lambda^{-1}\wh\Psi + \lambda\wh \Psi^*$ of the twisted lift $(\wh{\mathcal V},\wh\Psi)$ is (gauge equivalent) to the twisted lift $\wh{\nabla^\lambda}$ of the family $\nabla^\lambda$. \end{theorem} \subsection{The parabolic Deligne-Hitchin moduli space} Analogously to the case of compact Riemann surfaces, Deligne-Hitchin moduli spaces can be defined in the strongly parabolic setup, see \cite{AlGo} and \cite{Si21} for more details. We restrict ourselves again to the $\mathrm{SL}(2,\C)$ case with fixed local conjugacy classes. \begin{definition} Let $\Sigma$ be a compact Riemann surface and $p_1,\dots,p_n\in\Sigma$ be pairwise distinct points. A parabolic $\mathrm{SL}(2,\C)$ $\lambda$-connection with singular divisor $D=p_1+\dots+p_n$ is a quadruple $(\lambda,\mathcal V,\mathcal P,\mathcal D)$ consisting of a holomorphic $\mathrm{SL}(2,\C)$ bundle $\mathcal V$ over $\Sigma$ with a trace-free parabolic structure $\mathcal P$ and holomorphic trace-free $\lambda$-connection $\mathcal D$ on $\mathcal V\mid_{\Sigma\setminus\{p_1,\dots,p_n\}}$ such that with respect to any local holomorphic frame of $\mathcal V$ around $p_j,$ the $\lambda$-connection 1-form of $\mathcal D$ is meromorphic with first order pole at $p_j$ only, and such that the quasiparabolic line $\ell_j$ at $p_j$ is an eigenline of the residue $\text{res}_{p_j}\mathcal D$ with eigenvalue $(\lambda\alpha_j)$, where $\alpha_j$ is the parabolic weight at $p_j.$ \end{definition} For $\lambda\neq0$, $\tfrac{1}{\lambda}\mathcal D+\bar\partial^{\mathcal V}$ is a logarithmic connection with parabolic structure $\mathcal P$. For $\lambda=0,$ $\mathcal D$ is a strongly parabolic Higgs field for the parabolic structure $\mathcal P.$ A parabolic $\lambda$-connection is stable (semi-stable) if every $\mathcal D$-invariant holomorphic line subbundle has negative (non-positive) parabolic degree, and unstable otherwise. As before, we also have the notion of polystability and for $\lambda\neq0,$ there are no unstable parabolic $\lambda$-connections. An isomorphism between two parabolic $\lambda$-connections $(\lambda,\mathcal V_1,\mathcal P_1,\mathcal D_1)$ and $(\lambda,\mathcal V_2,\mathcal P_2,\mathcal D_2)$ is given by a $\mathrm{SL}(2,\C)$ gauge transformation $g$ between the two parabolic structures $(\mathcal V_1,\mathcal P_1)$ and $(\mathcal V_2,\mathcal P_2)$ such that $\mathcal D_2\circ g=g\circ \mathcal D_1.$ The only automorphisms of a stable parabolic $\lambda$-connection are $\pm\Id.$ In order to construct a moduli space, we fix the Riemann surface $\Sigma$, the divisor $D$ and the parabolic weights, but neither the holomorphic bundle nor the quasiparabolic lines. Then, the parabolic Hodge moduli space is the space of all polystable $\lambda$-connections with $\lambda \in \C$ modulo isomorphism. It is denoted by $\mathcal M_{Hod}^{D,\alpha_1,\dots,\alpha_n}(\Sigma)$, or by $\mathcal M_{Hod}^{par}(\Sigma)$ for short. It is a smooth complex manifold at its stable locus, and it fibers over $\C.$ Let $\nabla$ be a logarithmic $\mathrm{SL}(2,\C)$ connection on $\Sigma$ with singular divisor $D$ and induced parabolic structure $\mathcal P.$ Let $\bar\Sigma$ be the complex conjugate Riemann surface, with divisor $\bar D=p_1+\dots+p_n.$ Then $\nabla$ is a flat connection on $\bar\Sigma\setminus\{p_1,\dots,p_n\}$. By Deligne extension, there is a logarithmic $\mathrm{SL}(2,\C)$ connection $\widetilde\nabla$ on $\bar\Sigma$ with the same parabolic weights as $\nabla,$ which is gauge equivalent to $\nabla$ on the smooth surface $\Sigma\setminus\{p_1,\dots,p_n\}=\bar\Sigma\setminus\{p_1,\dots,p_n\}.$ More explicitly, choose a local holomorphic coordinate around $p_j \in \Sigma$ and a trivialization of $\mathcal V$ such that the connection $\nabla$ is given by $$\nabla= d+ \begin{pmatrix} \alpha_j&0\\0&-\alpha_j\end{pmatrix}\tfrac{dz}{z},$$ and choose a gauge in this trivialization given by $g_j=\text{diag}((z\bar z)^{-\alpha_j},(z\bar z)^{\alpha_j})$, then the logarithmic connection $\wt \nabla$ on $\bar\Sigma$ is given by \[\wt \nabla = \nabla.g_j = d+ \begin{pmatrix} -\alpha_j&0\\0&\alpha_j\end{pmatrix}\tfrac{d\bar z}{\bar z}\] around $\bar p_j \in \bar\Sigma$ in the same trivialization. It is easy to check that this gives a well-defined map from the moduli space of logarithmic connections $\mathcal M_{dR}^{D,\alpha_1,\dots,\alpha_n}(\Sigma)$ on $\Sigma$ to the moduli space of logarithmic connections $\mathcal M_{dR}^{\bar D,\alpha_1,\dots,\alpha_n}(\bar\Sigma) $ on $\bar\Sigma.$ Furthermore, this map can be extended to $\lambda \in \C_*$ to identify the Hodge moduli spaces of $\Sigma$ and $\bar \Sigma$ restricted to $ \C^*$ \[\mathcal G\colon\mathcal M_{Hod}^{par}(\Sigma)\mid_{\C^*}=\C^*\times \mathcal M_{dR}^{D,\alpha_1,\dots,\alpha_n}(\Sigma) \, \longrightarrow \,\C^*\times \mathcal M_{dR}^{\bar D,\alpha_1,\dots,\alpha_n}(\bar\Sigma)=\mathcal M_{Hod}^{par}(\bar\Sigma)\mid_{\C^*}.\] As in the smooth case, $\mathcal G$ maps the stable (and smooth) locus to the stable (and smooth) locus. In this way, we obtain the parabolic Deligne-Hitchin moduli space \[\mathcal M_{DH}^{par}(\Sigma,D,\alpha_1,\dots,\alpha_n)=\mathcal M_{Hod}^{par}(\Sigma)\cup_\mathcal G \mathcal M_{Hod}^{par}(\bar\Sigma)\longrightarrow\C P^1.\] If $\alpha_1=\dots=\alpha_n=t$ we will denote the space by $\mathcal M_{DH}^{par}(\Sigma,D,\alpha_1,\dots,\alpha_n)=\mathcal M_{DH}^{par}(\Sigma,D,t).$ The parabolic Deligne-Hitchin moduli space is naturally equipped with a holomorphic $\C^*$ action covering \[\mu\in\C^*\mapsto (\lambda\in\C P^1\mapsto \mu\lambda\in\C P^1).\] As before, we denote the automorphism obtained by multiplying with $\mu=-1$ by $N.$ Furthermore, there exists a natural real structure $C$. In order to define $C$ we recall that the underlying complex rank 2 vector bundle has a fixed $C^\infty$ trivialization $V=\Sigma\times\C^2$ with complex conjugation $c\colon V\to \bar V\cong V.$ For a linear differential operator $A$ let $\bar A:=c^{-1}\circ A\circ c.$ Thus if $A$ is complex linear, $\bar A$ is also complex linear but in general not gauge equivalent to $A$. If $\bar\partial$ is a holomorphic structure on $V\to\Sigma$, then $\overline{\bar\partial}$ is a holomorphic structure on $V\to\bar\Sigma.$ \begin{lemma}\label{lem:cmap} If $(\lambda,\bar\partial, \mathcal P,\mathcal D)$ is a parabolic $\lambda$-connection on $\Sigma$ with singular divisor $D=p_1+\dots+p_n$, then \[(\bar\lambda,\overline{\bar\partial}, \bar{\mathcal P}, \overline{\mathcal D})\] is a parabolic $\lambda$-connection on $\bar\Sigma$, where $ \bar{\mathcal P}$ is given by the quasiparabolic lines $\bar\ell_j=c(\ell_j)$ and the parabolic weights $\bar\alpha_j=\alpha_j.$ If two parabolic $\lambda$-connections are isomorphic via a gauge $g$ then their complex conjugate $\lambda$-connections on $\bar\Sigma$ are isomorphic via the gauge $c^{-1}\circ g\circ c.$ In particular, there exists an anti-holomorphic involution $C$ of $\mathcal M_{DH}^{par}$ covering $\lambda\mapsto\bar\lambda^{-1}$ and commuting with $N.$ \end{lemma} \begin{example} Let $(\lambda,\bar\partial, \mathcal P,\mathcal D)$ be a parabolic $\lambda$-connection on $\Sigma$ with $\lambda\in S^1$ such that $\bar\partial+\tfrac{1}{\lambda}\mathcal D$ is unitary. Then, $[\lambda,\bar\partial, \mathcal P,\mathcal D]\in \mathcal M_{DH}^{par}$ is a fixed point of $C.$ \end{example} For a given polystable strongly parabolic Higgs pair $p=(\mathcal P,\Psi)$ together with its tame harmonic metric $h$, the associated family of flat connections \eqref{eq:asslambdafami} gives rise to a holomorphic section $s_p$ of $\mathcal M_{DH}^{par}$ called twistor line, see \cite[Theorem 2.7]{Si21} for details. With respect to the real structure $\mathcal T=CN=NC$ this section is real, i.e., $\mathcal T(s_p(-\bar\lambda^{-1}))=s_p(\lambda)$ for all $\lambda\in\C^*$. We now discuss the necessary and sufficient conditions of a real holomorphic section $s$ of $\mathcal M_{DH}^{par}$ to be a twistor line. This is motivated by (and analogous to) the compact case described in Section \ref{pre}. \begin{lemma}\label{lem:sufsec} Let $\epsilon>0$. Let $\lambda\in \D_{1+\epsilon}^*\mapsto \nabla^\lambda$ be a holomorphic family of irreducible logarithmic $\mathrm{SL}(2,\C)$ connections with first order pole at $\lambda = 0$ and $\Psi= $res$_{\lambda = 0} \nabla^\lambda \in \Gamma(K_\Sigma \mathfrak{sl} (\mathcal V(\lambda)))$ on a family of holomorphic bundles $\mathcal V(\lambda)\to\Sigma$ with $\lambda$-independent parabolic divisor $p_1+\dots+p_n$ and parabolic weights $\alpha_j\in (0,\tfrac{1}{2})$. Assume that there exists $g=g(\lambda)\colon\Sigma\setminus\{p_1,\dots,p_n\}\to \Lambda\mathrm{SL}(2,\C)$ such that \begin{equation}\label{eq:realityconditionpara}\overline{\nabla^{-\bar\lambda^{-1}}}=\nabla^\lambda.g(\lambda)\,.\end{equation} Then $\nabla^\lambda$ gives rise to a real holomorphic section $s$ of the parabolic Deligne-Hitchin moduli space. \end{lemma} \begin{proof} Note that $\lambda\mapsto\nabla^\lambda$ gives rise to a local section of $\mathcal M_{DH}^{par}$ over $\D_{1+\epsilon}^*$ via \[\D_{1+\epsilon}^*\times \mathcal M_{dR}^{D,\alpha_,\dots,\alpha_n}\subset\mathcal M_{Hod}^{par}(\Sigma)\mid_{\C^*}\subset \mathcal M_{Hod}^{par}(\Sigma)\subset \mathcal M_{DH}^{par}(\Sigma).\] We first extend this section to $\lambda=0.$ Consider the holomorphic structure $\bar\partial_{\mathcal V}$. Then, $\Psi$ is meromorphic with at most first order poles at $p_1,\dots,p_n$. By assumption the eigenvalues $\pm\alpha_j$ of res$_{p_j}\nabla^\lambda$ are independent of $\lambda.$ This implies that res$_{p_j}\Psi$ is nilpotent for all $j= 1, ..., n$. We define the quasiparabolic line at $p_j$ as follows: if $\text{res}_{p_j}(\Psi)\neq0$, then $\ell_j:=\text{ker}(\text{res}_{p_j}(\Psi)),$ else we define the line to be $\ell_j:=\text{Eig}((\text{res}_{p_j}\nabla)\mid_{\lambda=0},\alpha_j).$ This gives rise to a parabolic structure $\mathcal P$ on $\mathcal V$ with $\Psi$ being a compatible strongly parabolic Higgs field. To extend the section to $\lambda= 0,$ we need to ensure that the strongly parabolic Higgs pair $(\mathcal P, \Psi)$ is polystable. \begin{remark} In this paper, all $\D^*_{1+\varepsilon}$-families of logarithmic connections considered will induce stable strongly parabolic Higgs pairs at $\lambda= 0$. Thus there is no further gauge necessary to obtain a proper section of the Deligne-Hitchin moduli space. \end{remark} Assume the parabolic Higgs pair $(\mathcal P, \Psi)$ is unstable, then there exists an invariant holomorphic line bundle $L$ with respect to $\bar\partial_{\mathcal V}$ (on $\Sigma$) of positive parabolic degree. Consider a complementary complex line bundle $\widetilde L$, which is not holomorphic with respect to $\bar\partial_{\mathcal V}$ in general. Define the holomorphic family of gauge transformations by $\lambda\in\C^*\mapsto \wt g(\lambda)=\text{diag}(1,\lambda)$ with respect to the $C^\infty$ decomposition $V=L\oplus \widetilde L.$ Then, it can be shown analogously to \cite[Section 1.4]{HH} that the family of logarithmic connections \begin{equation}\label{eq:twist}\widetilde\nabla^\lambda:=\nabla^\lambda.\wt g(\lambda)=\lambda^{-1}\widetilde\Psi+\widetilde\nabla+ \text{higher order terms in } \lambda\end{equation} is gauge equivalent to $\nabla^\lambda$ for $\lambda\neq 0$ and gives rise to a stable parabolic structure at $\lambda = 0$ with strongly parabolic Higgs field $\wt \Psi.$ If the parabolic structure $(\mathcal P , \Psi)$ is semi-stable but not polystable, we can replace the Higgs pair at $\lambda = 0$ by a polystable pair in the same gauge orbit. (This case does not lead to twistor lines, but it does neither occur for the real holomorphic sections constructed by in this paper. In fact, either the parabolic Higgs field is non-vanishing and induces a stable parabolic Higgs pair by Lemma \ref{lem:stabhiggspar}, or the Higgs field vanishes and the section of the parabolic Deligne-Hitchin moduli space is induced by a unitary logarithmic connection by Theorem \ref{blowuplimit} below.) To extend the section to $\C P^1$ recall that the underlying $C^\infty$ trivialization of the holomorphic bundles $\mathcal V(\lambda)$ is of fixed type, so that the complex conjugation $c$ is well-defined. Then, by Lemma \ref{lem:cmap} and \eqref{eq:realityconditionpara}, $s$ extends holomorphically to $\{\lambda \in \C \cup \{\infty\} | |\lambda| >\tfrac{1}{1+\varepsilon}\}$, i.e., $s$ gives rise to a global holomorphic section of $\mathcal M_{DH}^{par}(\Sigma)\to\C P^1$, which is real by \eqref{eq:realityconditionpara}. \end{proof} We call the transformation $\nabla^\lambda\mapsto\widetilde\nabla^\lambda$ in \eqref{eq:twist} the {\em twist} of the family $\nabla^\lambda.$ The twist transformation can also be applied if the Higgs pair is stable. In fact, assume that $\nabla^\lambda$ is the associated family of a tame harmonic metric for some nilpotent strongly parabolic Higgs field, which can be interpreted as an equivariant conformal harmonic map $f$, i.e., a minimal surface, into the hyperbolic 3-space. Then, the twisted family $\wt\nabla^\lambda$ corresponds to the associated family of the equivariant harmonic conformal Gauss map of $f$ into the deSitter 3-space, see \cite{HH}. In the following, we will always assume, without lost of generality, that we start with a family of logarithmic connections $\nabla^\lambda$ inducing a stable (or polystable) strongly parabolic Higgs pair, without applying the twist transformation first. In particular, if $\nabla^\lambda=\lambda^{-1}\Psi+\nabla+\lambda\Psi_1+\dots$ such that the determinant of the strongly parabolic Higgs field $\Psi$ has only simple zeros or poles, the corresponding Higgs pair must be stable, as it cannot have a holomorphic $\Psi$-invariant line bundle. \begin{proposition}\label{pro:nectwist} Let $s$ be the real holomorphic section of $\mathcal M_{DH}^{par}$ given by a family of irreducible logarithmic connections $\nabla^\lambda$ as in Lemma \ref{lem:sufsec} together with the gauge $g$ satisfying \eqref{eq:realityconditionpara}. Then, necessary conditions for $s$ to be a twistor line are \begin{itemize} \item $g_x$ lies in the big cell for all $x\in\Sigma\setminus\{p_1,\dots,p_n\},$ i.e., $g_x=g_x^+g_x^-$ for some $g_x^+\in \Lambda_+\mathrm{SL}(2,\C)$ and $g_x^-\in \Lambda_-\mathrm{SL}(2,\C),$ \\ \item $g(\lambda)\overline{g(-\bar\lambda^{-1})}=-\mathrm{Id}.$\end{itemize} \end{proposition} \begin{proof} If the family of logarithmic connections $\nabla^\lambda$ induces a twistor line, then $\nabla^\lambda$ is gauge equivalent to the associated family of flat connections of a self-duality solution \eqref{eq:asslambdafami} by a positive gauge $\wt g$, i.e., $\wt g_x \in \Lambda_+\mathrm{SL}(2,\C)$ for all $x\in\Sigma\setminus\{p_1,\dots,p_n\}$. Since by Example \ref{exa:negative} associated families are negative sections and the gauge is given by \eqref{g=delta}, we obtain by \eqref{eq:realityconditionpara} and by irreducibility of $\nabla^\lambda$ that $\wt g^{-1}g\in\Lambda_-\mathrm{SL}(2,\C).$ The second condition can be derived as in the compact case, for details see \cite[Section 1.3]{HH}. \end{proof} The next proposition shows that being a twistor line is an open condition. \begin{proposition}\label{pro:sufftwist} Let $s$ be a twistor line of $\mathcal M_{DH}^{par}(\Sigma,D,\alpha_1,\dots,\alpha_n)$ with rational parabolic weights $\alpha_j\in(0,\tfrac{1}{4})\cap \mathbb Q$. Then there exists an open neighborhood $U$ around $s$ in the space of the real holomorphic sections of $\mathcal M_{DH}^{par}(\Sigma,D,\alpha_1,\dots,\alpha_n)$ such that every $\widetilde s\in U$ is a twistor line. \end{proposition} \begin{proof} Consider the covering surface $\wh\Sigma\to\Sigma$ which branches over $p_1,\dots,p_n$ of appropriate order. Over $\wh\Sigma$ we obtain, after applying the twisted lift construction, a holomorphic family of smooth connections $$\wh\nabla^\lambda=\lambda^{-1}\wh\Psi+\wh\nabla+ \text{ higher order terms in }\lambda,$$ see Theorem \ref{thm:twistedlifttwistorlines}, such that the induced Higgs pair at $\lambda = 0$ is stable. Since $s$ is a twistor line of $\mathcal M_{DH}^{par}(\Sigma,D,\alpha_1,\dots,\alpha_n)$, the family $\wh\nabla^\lambda$ on $\wh\Sigma$ gives rise to a twistor line of $\mathcal M_{DH}(\wh\Sigma)$ by Theorem \ref{thm:twistedlifttwistorlines} and the gauge $g$ with $$\wh\nabla^\lambda.g=\overline{\wh\nabla^{-\bar\lambda^{-1}}}$$ possesses a global corresponding loop group factorization $g=g^+g^-$ on $\wh\Sigma.$ Let $\widetilde s$ be another real holomorphic section of $\mathcal M_{DH}^{par}(\Sigma,D,\alpha_1,\dots,\alpha_n)$ lying in an appropriate open neighborhood of $s$. Then its twisted lift $\wh{\widetilde s}$ is a holomorphic section of $\mathcal M_{DH}(\wh\Sigma)$ which can be represented by a family $\wh {\wt\nabla}^\lambda$ of flat and smooth connections on $\wh\Sigma.$ The family $\wh {\wt\nabla}^\lambda$ can be chosen in a way such that for all $\lambda \in K$, where $K$ is a compact subset of $\C^*$ containing the unit circle, $\wh {\wt\nabla}^\lambda$ lies in an appropriate open neighborhood of $\wh\nabla^\lambda$ in the space of flat connections. Consequently, the family of $\mathrm{SL}(2,\C)$ gauge transformations $\wt g$ satisfying $$\wh {\wt\nabla}^\lambda.\wt g(\lambda)=\overline{\wh {\wt\nabla}^{-\bar\lambda^{-1}}}$$ lies in an open neighborhood of $g$ for all $\lambda\in \{\lambda\in\C\mid \lambda\in K,-\bar\lambda^{-1}\in K\}.$ By Theorem \ref{thm:birk}, we obtain that $\wt g$ also lies in the big cell, and using the construction in Section \ref{sec:reconstruction} we obtain $\wh {\wt s}$ is a twistor line of $\mathcal M_{DH}(\wh\Sigma)$. Then Theorem \ref{thm:twistedlifttwistorlines} gives that $\wt s$ is also a twistor line of $\mathcal M_{DH}^{par}(\Sigma,D,\alpha_1,\dots,\alpha_n)$ as claimed. \end{proof} \begin{remark}\label{rem:sufftwist} Proposition \ref{pro:sufftwist} also holds for non-rational parabolic weights. More generally, the same conclusion holds if we allow the (positive) parabolic weights as well as the conformal structure of $\Sigma$ and the singular points $p_1,\dots,p_n$ to vary. This follows from the results of \cite{Si} combined with \cite{KiWi}. In fact, as a generalization of Theorem \ref{connectedcomponent}, the space of twistor lines is open and closed in the space of real holomorphic sections. \end{remark} \subsection{The symplectic form on the space of logarithmic connections} The symplectic form on the moduli space of flat connections over compact surfaces \cite{AB,Gold} has been generalized to moduli spaces of flat connections with prescribed local conjugacy classes over punctured Riemann surfaces, see for example \cite{AlMa} or \cite{Audin}. We provide a short self-contained account to the construction of this symplectic form, and describe its relationship to the symplectic form on a appropriate compact covering surface in the case of rational parabolic weights. Let $\Sigma$ be a compact Riemann surface and $p_1,\dots,p_n\in\Sigma$ be pairwise distinct points. Fix $\mathrm{SL}(2,\C)$ conjugacy classes \[\mathcal C_1,\dots,\mathcal C_n\] of diagonal matrices $C_1,\dots,C_n\in\mathfrak{sl}(2,\C)$ at the punctures such that the eigenvalues of each $C_j$ are contained in $ (0,\tfrac{1}{2}).$ Let $z_j$ be a centered holomorphic coordinate at $p_j$ for $j=1,\dots,n$. For $\mathcal C_1,\dots,\mathcal C_n$ fixed let $\mathcal A$ denote the infinite dimensional space of all flat $\mathrm{SL}(2,\C)$ connections $\nabla$ on $\Sigma^0:=\Sigma\setminus\{p_1,\dots,p_n\}$ which are of the form \begin{equation} \label{nfconn} \nabla=A_j\frac{dz_j}{z_j}+\text{smooth connection},\end{equation} where $z_j$ is a centered coordinate around $p_j$ and $A_j\in\mathcal C_j$. \begin{lemma}\label{lem:tnablaxia} Let $X\in\Omega^1(\Sigma^0,\mathfrak{sl}(2,\C))$ be a tangent vector to $\nabla\in\mathcal A.$ Then, there exists smooth $\xi\in\Gamma(\Sigma,\mathfrak{sl}(2,\C))$ and $\wh X\in\Omega^1(\Sigma,\mathfrak{sl}(2,\C))$ such that \[X=d^\nabla \xi+\wh X \and d^\nabla\wh X=0\] on the punctured surface $\Sigma^0.$ \end{lemma} \begin{proof} Since $X$ is a tangent vector at $\nabla$ to the infinite dimensional space of flat connections, we have in particular $d^\nabla X = 0.$ Moreover, $X$ preserves the form \eqref{nfconn}, and therefore we can write \[X=[A_j,\xi_j]\frac{dz_j}{z_j}+\widetilde X\] around $p_j$ for appropriate $\xi_j\in\mathfrak{sl}(2,\C)$ and a smooth $\widetilde X$ on $\Sigma.$ Let $\xi\in\Gamma(\Sigma,\mathfrak{sl}(2,\C))$ be a section with $\xi(p_j)=\xi_j.$ Then the splitting \[X=d^\nabla\xi+(X-d^\nabla\xi)\] is of the required form. \end{proof} \begin{lemma}\label{lem:compsympbound} Let $\nabla\in \mathcal A$ with $A_j$ as in \eqref{nfconn} and consider two tangent vectors $$X=d^\nabla \xi+\wh X, \quad Y=d^\nabla \mu+\wh Y\in T_{\nabla}\mathcal A$$ for $\xi,\mu\in\Gamma(\Sigma,\mathfrak{sl}(2,\C))$ and $\wh X,\wh Y\in\Omega^1(\Sigma,\mathfrak{sl}(2,\C))$ as in Lemma \ref{lem:tnablaxia}. Then \[\tfrac{1}{8\pi }\int_{\Sigma^0}\tr\left (X\wedge Y\right)=\tfrac{1}{8\pi }\int_\Sigma\tr\left(\wh X\wedge\wh Y\right) - i\sum_{j=1}^n \frac{1}{8\tr(A_j^2)} \tr(A_j[\text{Res}_{p_j}(X),\text{Res}_{p_j}(Y)]\,] ).\] \end{lemma} \begin{proof} Consider the punctured Riemann surface $\Sigma^0$ and centered holomorphic coordinates $z_j$ at the punctures $p_j.$ For $t>0$ small let $$\gamma^j_t\colon S^1\longrightarrow \Sigma^0; \quad e^{2\pi i \varphi}\longmapsto (z_j)^{-1}(te^{2\pi i \varphi}).$$ When splitting $X, Y$ according to $$X=d^\nabla \xi+\wh X, \quad Y=d^\nabla \mu+\wh Y$$ we obtain \[\int_{\Sigma^0}\tr\left (X\wedge Y\right) = \int_{\Sigma^0}\tr\left (\wh X\wedge \wh Y\right) + \tr\left (d^\nabla \xi \wedge \wh Y\right) + \tr\left (\wh X\wedge d^\nabla \mu \right) +\tr\left (d^\nabla \xi \wedge d^\nabla \mu\right).\] For the second term in the above expression we have \begin{equation} \begin{split} \int_{\Sigma^0}\tr\left (d^\nabla \xi \wedge \wh Y\right) &=\int_{\Sigma^0}d\,\tr(\xi \wh Y)-\int_{\Sigma^0}\tr(\xi (d^\nabla \wh Y ))\\ &=-\lim_{t\to 0} \sum_j\int_{\gamma^j_t}\tr(\xi \wh Y)=0 \end{split} \end{equation} since $d^\nabla \wh Y = 0$ and $\wh Y, \xi$ are both smooth on $\Sigma.$ Analogously, we have \[\int_{\Sigma^0}\tr(\wh X\wedge d^\nabla \mu) =0.\] For the last term we compute \begin{equation} \begin{split} \int_{\Sigma^0}\tr(d^\nabla\xi\wedge d^\nabla \mu)&=\int_{\Sigma^0}d\,\tr(\xi d^\nabla \mu) =-\lim_{t\to 0} \sum_j\int_{\gamma^j_t}\tr(\xi d^\nabla\mu)\\ &=-\lim_{t\to 0} \sum_j\int_{\gamma^j_t}\tr(\xi [A_j,\mu] \frac{dz_j}{z_j})-\lim_{t\to 0} \sum_j\int_{\gamma^j_t}\tr(\text{something smooth})\\ &=-2\pi i\sum_j \tr(\xi(p_j) [A_j,\mu(p_j)] )=2\pi i\sum_j \tr(A_j[\xi(p_j) ,\mu(p_j)] )\\ &=-2\pi i\sum_j \frac{1}{2\,\tr(A_j^2)}\tr(A_j[\,[A_j,\xi(p_j)] ,[A_j,\mu(p_j)]\,] )\\ &=-2\pi i\sum_j \frac{1}{2\,\tr(A_j^2)}\tr(A_j[\text{Res}_{p_j}(X),\text{Res}_{p_j}(Y)]\,] ) \end{split} \end{equation} as claimed.\end{proof} Define a holomorphic complex bilinear and skew-symmetric form on the (infinite dimensional) tangent space $T_{\nabla}\mathcal A$ by \begin{equation}\label{def:symf} \begin{split} \mathcal O \colon &T_{\nabla}\mathcal A\times T_{\nabla}\mathcal A\to \C\\ &(X,Y)\mapsto \tfrac{1}{8\pi }\int_{\Sigma^0}\tr(X\wedge Y)+ i\sum_j \frac{1}{8\,\tr((\text{Res}_{p_j}(\nabla))^2)} \tr(\, \text{Res}_{p_j}(\nabla)[ \text{Res}_{p_j}(X),\text{Res}_{p_j}(Y)]\, ). \end{split} \end{equation} From Lemma \ref{lem:compsympbound} we then obtain: \begin{corollary}\label{cor:vanishing} For $d^\nabla\xi,Y\in T_{\nabla}\mathcal A$ we have \[\mathcal O(d^\nabla \xi,Y) = 0.\] Therefore, the bilinear form $\mathcal O$ descends to a well defined holomorphic 2-form on the quotient of $\mathcal A$ by gauge transformations. \end{corollary} It can be shown that $\mathcal O$ is indeed a symplectic form. \begin{remark} If the infinitesimal deformation of the flat connection preserves the holomorphic structure, i.e., the tangent vector fields $X,$ $Y$ are both meromorphic, then the first summand of $\mathcal O$ in \eqref{def:symf}vanishes and only the residue terms remain. For $A\in \mathcal C_j$, the complex skew bilinear form \[X=[A,\xi],Y=[A,\mu]\in T_{A} \mathcal C_j\longmapsto \tr(A[\xi,\mu])=-\frac{1}{2\tr(A^2)}\tr(A[X,Y])\] is well-defined and known as the Kirillov symplectic form on the adjoint orbit $\mathcal C_j$. \end{remark} \subsubsection{Rational weights} Assume that the parabolic weights are all rational. Then, the symplectic form $\mathcal O$ and the Goldman symplectic form coincide up to scaling and taking the twisted lift. We give the proof in the case of the 4-punctured sphere, with rational weights $\alpha_1=\dots=\alpha_4=\tfrac{l}{k}.$ As before, consider the $k$-fold covering $\Sigma_k\to\C P^1$ given by \eqref{eq:sigma_k} which totally branches over the singular points. \begin{proposition}\label{pro:sympupdown} Let $\mathcal M(\C P^1)$ be the space of logarithmic connections on $\C P^1$ with singular points $p_1,\dots,p_4$ and rational weights $\alpha_1=\dots=\alpha_4=\tfrac{l}{k}$ equipped with its symplectic form $\mathcal O$. Let $\pi\colon\Sigma_k\to\C P^1$ be the $k$-fold covering given by \eqref{eq:sigma_k} which totally branches over the singular points, and let $\mathcal M(\Sigma_k)$ be the moduli space of flat connections on $\Sigma_k$ with Goldman symplectic form $\Omega.$ Then \[32\pi \,k\,\mathcal O=\Omega.\] \end{proposition} \begin{proof} Because of Corollary \ref{cor:vanishing} $\mathcal O$ is well-defined on the moduli space $\mathcal M(\C P^1)$, and for tangent vectors $X,Y,$ we can choose representatives that vanish in an open set $U \subset \C P^1$ containing all the singular points. Then, only the first summand in \eqref{def:symf} contributes to $\mathcal O(X,Y).$ On the other hand, the pull-back of $\pi^*X$ and $\pi^*Y$ to $\Sigma_k$ vanish on $\pi^{-1}(U)$, and the tangent vectors $\wh X$ and $\wh Y$ of $\mathcal M(\Sigma_k)$ are represented by $\pi^*X$ and $\pi^*Y$ in the trivialization of the holomorphic bundle $\wh{\mathcal V}\cong\mathcal V$ over $\Sigma_k\setminus\pi^{-1}\{p_1,\dots,p_4\}$. Since $\pi\colon\Sigma_k\to\C P^1$ is of degree $k,$ and due to the different scalings in \eqref{GoldmanO} and \eqref{def:symf}, the result follows. \end{proof} \begin{remark}\label{rem:sympupdown} Proposition \ref{pro:sympupdown} directly generalizes to $\lambda$-connections. As a consequence, we are able to compute the twisted holomorphic symplectic form \eqref{deftwistedOmega2} on the open dense subset of $\mathcal M_{DH}(\C P^1,p_1+\dots+p_4,t)$ consisting of Fuchsian $\lambda$-connections. \end{remark} \section{Initial conditions at $t=0.$}\label{Ansatz} In this section we write down $\lambda$-dependent connection 1-forms on the 4-puncture sphere $\C P^1\setminus\{p_1,\dots,p_4\}$ depending on a parabolic weight $\alpha=t \in [0, \tfrac{1}{4})$ and a strongly parabolic Higgs field. We then explicitly compute the initial conditions at $t=0$ which we will deform via an implicit function theorem argument in Section \ref{IFT} to obtain equivariant tame harmonic maps from $\C P^1\setminus\{p_1,\dots,p_4\}$ into $\H^3$ for $t>0$. \subsection{The potential} To construct real holomorphic sections explicitly, we restrict to the case where the underlying holomorphic structure is trivial for all $\lambda \in \mathbb D_a=\{\lambda\mid |\lambda|^2< a^2\}$ for some $a >1.$ This restriction is motivated by the fact that an open and dense subset of the moduli space of strongly parabolic Higgs field (respectively logarithmic connections) has trivial underlying holomorphic bundle, see Proposition \ref{pro:classparahiggs} for strongly parabolic Higgs fields (and Lemma \ref{lemOK1} for logarithmic connections). In general (e.g. for large Higgs field with trivial underlying holomorphic structure), the associated family of flat connections does not have trivial underlying holomorphic bundle for all $\lambda$ in the punctured unit disc. On the other hand, for fixed weight $t$ and fixed compact subset $K$ of the $\lambda$-spectral plane, we will show that there is a constant $C$ such that for any strongly parabolic Higgs field with trivial underlying holomorphic structure and norm less than $C$, the associated logarithmic connection $\nabla^\lambda$ is Fuchsian for all $\lambda\in K\setminus\{0\}$. This will follow by continuity of the solution depending on the parabolic Higgs data (with trivial underlying holomorphic bundle), and the fact that for vanishing Higgs field the associated family is constant (and Fuchsian). A {\em potential} is given by a holomorphic and complex linear loop algebra-valued 1-form \[\eta\in\Omega^{1,0}(\C P^1\setminus\{p_1,\dots,p_4\},\Lambda \mathfrak{sl}(2,\C))\] with \[(\lambda \eta)\in \Omega^{1,0}(\C P^1\setminus\{p_1,\dots,p_4\},\Lambda_+\mathfrak{sl}(2,\C)),\] where $\Lambda \mathfrak{sl}(2,\C)$ and $\Lambda_+\mathfrak{sl}(2,\C)$ are as in Section \ref{sec:loops} and $(\lambda \eta)$ denotes pointwise multiplication. Motivated by Lemma \ref{lem:sufsec} and its proof we call the residue at $\lambda=0$ \[\Psi=\eta_{-1}:=\text{Res}_{\lambda=0} (\eta)\] the parabolic Higgs field of the potential $\eta$. On the $4$-punctured sphere we further specialize to $\eta$ being a Fuchsian system for every $\lambda \in \D_{a}\setminus\{0\}$, i.e., we consider potentials of the form $$\eta = \sum_{j = 1}^4 A_j \frac{dz}{z-p_j} \quad \text{ satisfying }\quad \sum_{j= 0}^4 A_j = 0$$ with $z$ being the homogenous coordinate of $\C P^1,$ $ (\lambda A_j) \in \Lambda_+\mathfrak {sl}(2, \C)$. To be more explicit, the coefficients of $A_j$, as functions of $\lambda,$ are in the functional space $\cal{W}^{\geq -1}_{a}$ where for $l\in\Z$ \[\cal{W}^{\geq l}_{a}:=\left\{h=\sum_{k=l}^\infty h_k \lambda^k \;\text{ such that } \; \sum_{k=l}^\infty |h_k| a^{|k|} < \infty \right\}.\] For $l\geq0$ we obtain the space of absolutely convergent power series in the disk of radius $a>1$. We denote the subspace of convergent power series with vanishing constant term $h_0$ by $\cal{W}^{+}_{a}.$ Whenever the dependence on a particular $a$ is not important, we will omit this index. More generally, $\cal{W}_{a}$ denotes the space of convergent Laurent series on the annulus $\mathbb A_a := \{\tfrac{1}{a^2}<|\lambda|^2< a^2\}.$ Every element $h \in \mathcal W =\mathcal W_a$ can be decomposed into its positive part $h^+\in \mathcal W^+$, its constant part $h^0 := h _0$ and its negative part $h^-= h- h^+ -h^0.$ \begin{remark} Even though the choice of $a>1$ is irrelevant for the application of the implicit function theorem for $t\sim0,$ we expect that for larger $t$ and general strongly parabolic Higgs field (with trivial underlying holomorphic structure), the associated family of flat connection is not Fuchsian for all $\lambda$ satisfying $|\lambda|^2\leq a^2.$ Instead, we expect unstable logarithmic connections (where the power series expansion in $\lambda$ will necessarily have poles) moving into the unit disc within the $t$-deformation. This is in contrast to the case of the equations describing (equivariant) harmonic maps into the 3-sphere, which differ from the self-duality equation only by a sign, where unstable points cannot cross the unit circle, see \cite{HH3}. \end{remark} To fix further notations let $\C^+$ denote the upper-right quadrant of the plane $$\C^+=\{z\in\C\,\mid\, \Re(z)>0,\Im(z)>0\}.$$ Let $p\in\C^+$ and consider the 4-punctured sphere $$\Sigma=\Sigma_p=\C P^1\setminus\{p_1,p_2,p_3,p_4\}$$ with \begin{equation}\label{pointsonp}p_1=p,\quad p_2=-1/p,\quad p_3=-p\and p_4=1/p.\end{equation} Up to M\"obius transformations, every 4-punctured sphere is of this form. By definition $\Sigma$ is invariant under the holomorphic involutions $\delta(z)=-z$ and $\tau(z)=1/z$. Consider the Pauli matrices $$\mathfrak m_1=\matrix{1&0\\0&-1} \qquad \mathfrak m_2=\matrix{0&1\\1&0} \qquad \mathfrak m_3=\matrix{0&i\\-i&0}$$ and the holomorphic 1-forms on $\Sigma$ \begin{equation}\label{omega} \begin{split} \omega_1&=\frac{dz}{z-p_1}-\frac{dz}{z-p_2}+\frac{dz}{z-p_3}-\frac{dz}{z-p_4}\\ \omega_2&=\frac{dz}{z-p_1}-\frac{dz}{z-p_2}-\frac{dz}{z-p_3}+\frac{dz}{z-p_4}\\ \omega_3&=\frac{dz}{z-p_1}+\frac{dz}{z-p_2}-\frac{dz}{z-p_3}-\frac{dz}{z-p_4}. \end{split} \end{equation} Then the $\omega_i$ have the symmetries \begin{equation} \label{eq:symmetry-omega} \begin{cases} \delta^*\omega_1=\omega_1,\quad \delta^*\omega_2=-\omega_2,\quad\delta^*\omega_3=-\omega_3\\ \tau^*\omega_1=-\omega_1,\quad\tau^*\omega_2=\omega_2,\quad\tau^*\omega_3=-\omega_3\,.\end{cases} \end{equation} \begin{ansatz}\label{etatttinfront} In the following we will consider potentials of the form \begin{equation} \eta_t=t\sum_{j=1}^3 x_j(t)\mathfrak m_j\omega_j\end{equation} where $t\sim 0$ is a real parameter and $x_1(t)$, $ x_2(t)$, $x_3(t) \in \mathcal W^{\geq -1}$ are parameters depending on $t$. \end{ansatz} We aim to determine $\eta_t$ in dependence of $t$ through the implicit function theorem by imposing the reality condition \eqref{eq:realityconditionpara}. The maps $t\mapsto x_j(t)$, $j=1,2,3$, are always assumed to be smooth in some neighbourhood $(-\epsilon,\epsilon)$ of $t=0.$ We denote the parameter vector by \[ x(t)=(x_1(t),x_2(t),x_3(t)) \in (\mathcal W^{\geq -1})^3.\] \begin{remark}\label{rem:inidata} For $t\sim0$, the parameter $x(t)$ is not uniquely determined by the reality condition \eqref{eq:realityconditionpara} as we have a non-trivial moduli space of solutions, which is parametrized by Higgs data. Hence, we have to incorporate the dependency on the parabolic Higgs pair when applying the implicit function theorem. We omit this dependency for now, and refer to Section \ref{sec:t=0para} below and to Theorem \ref{thm:IFT} below for precise statements. \end{remark} With this ansatz $\eta_t$ is a Fuchsian system for every $\lambda \in \mathbb D_a\setminus\{0\}$ and the residues at a puncture is given by a $\Lambda \mathfrak{sl}(2,\C)$ element $$\Res_{z=p_j} \eta_t = t A_j$$ with \begin{equation}\label{symmetry-fix1-4} \begin{split} A_1&=\matrix{x_1(t)&x_2(t)+ix_3(t)\\x_2(t)-ix_3(t)&-x_1(t)} \quad\quad A_2=\matrix{-x_1(t) &-x_2(t)+ix_3(t)\\-x_2(t)-ix_3(t)&x_1(t)}\\ A_3&=\matrix{x_1(t)&-x_2(t)-ix_3(t)\\-x_2(t)+ix_3(t)&-x_1(t)}\quad A_4=\matrix{-x_1(t)&x_2(t)-ix_3(t)\\x_2(t)+ix_3(t)&x_1(t)} \end{split} \end{equation} satisfying \begin{equation}\label{parabolicweightstxx}\det(A_j)=-x_1(t)^2-x_2(t)^2-x_3(t)^2\end{equation} for all $j= 1, ..., 4$. Moreover, $\Psi=\text{res}_{\lambda=0}\eta_t$ takes the form \eqref{eq:psiAs} with $R_j=\text{res}_{\lambda=0}A_j.$ This ansatz is chosen such that the potential has the following symmetries \begin{equation}\label{symmetry-fix4} \begin{split}\delta^*\eta_t&=D^{-1}\eta_t D\with D=\matrix{i&0\\0&-i}\\ \tau^*\eta_t&=C^{-1}\eta_t C\with C=\matrix{0&i\\i&0}. \end{split} \end{equation} We will show that these symmetries can be imposed without loss of generality if we start with a symmetric Higgs field and $t\sim0$ is small. \begin{remark} With this ansatz we only study harmonic maps for which the monodromy becomes trivial for $t\rightarrow 0$ and consequently the corresponding Higgs fields $\Psi(t) \rightarrow 0$. In fact, evaluating Ansatz \eqref{etatttinfront} at $t=0$ gives the trivial connection with trivial monodromy due to the factor $t$ in Ansatz \eqref{etatttinfront}. \end{remark} \subsection{The choice of initial data at $t=0$, and parabolic Higgs fields}\label{sec:t=0para} In this section, we discuss the initial data at $t=0$ for the implicit function theorem. These initial data are naturally parametrized by the coordinates $(u,v)$ from \eqref{eq:psier}, which determine the Higgs pair. We denote the initial value of the parameters with an underscore $\cvx_j:=x_j(0)$, and accordingly $\cv{A}_j$. These are chosen such that $\cvx_1$, $\cvx_2$, $\cvx_3$ at $t=0$ satisfy \begin{equation}\label{initialproblem} \begin{cases}\det(\cv{A}_1)=-1\\ \cv{A}_1=\cv{A}_1^*,\end{cases} \end{equation} where $()^*$-operator for matrices is defined as $$M^*(\lambda)=\overline{M(-1/\overline{\lambda})}^T.$$ The first equation in \eqref{initialproblem} implies that the local monodromies of the associated Fuchsian systems $d+\eta_t$ lie in the conjugacy class of diag$\left(\exp(2\pi it),\exp(-2\pi i t)\right)$ for every $\lambda \in \mathbb D_a.$ The second equation in \eqref{initialproblem} is an infinitesimal version of $\mathcal T$-reality at $t=0$. The equations \eqref{initialproblem} on $\cv{A}_1$ are equivalent to $$\begin{cases} \cvx_1^2+\cvx_2^2+\cvx_3^2=1\\ \cvx_j=\cvx_j^*, \quad \forall j, \end{cases}$$ where the induced $()^*$-operator for functions is defined to be \begin{equation}\label{fstar}f^*(\lambda)=\overline{f(-1/\overline{\lambda})}.\end{equation} Since potentials have at most a first order pole at $\lambda = 0$ the eligible $\cvx_j$ must be a degree 1 Laurent polynomial \begin{equation}\label{eq:defcvxj}\cvx_j=\cvx_{j,-1}\lambda^{-1}+\cvx_{j,0}+\cvx_{j,1}\lambda \end{equation} with \begin{equation}\label{eq:detcvxj} \begin{cases} \cvx_{j,0}\in\R\\ \cvx_{j,1}=-\overline{\cvx_{j,-1}}. \end{cases} \end{equation} Then $\cvx_1^2+\cvx_2^2+\cvx_3^2=1$ is equivalent to \begin{equation} \label{eq:det1} \begin{cases} {\displaystyle \sum_{j=1}^3}\cvx_{j,-1}^2=0\\ {\displaystyle\sum_{j=1}^3}\cvx_{j,-1}\cvx_{j,0}=0\\ {\displaystyle\sum_{j=1}^3}\cvx_{j,0}^2- 2|\cvx_{j,-1}|^2=1. \end{cases} \end{equation} To solve the first equation of \eqref{eq:det1}, consider the standard parametrization of the quadric $\{x^2+y^2+z^2=0\}$ in $\C^3$ given by \begin{equation} \label{eq:ambmcm} \cvx_{1,-1}=u\,v,\quad \cvx_{2,-1}=\tfrac{1}{2}(v^2-u^2)\and \cvx_{3,-1}=\tfrac{i}{2}(u^2+v^2) \end{equation} with $(u,v)\in\C^2\setminus\{ (0,0)\}$. Then the second equation of \eqref{eq:det1} gives \begin{equation} \label{eq:det2} u\,v\, \cvx_{1,0}+\tfrac{1}{2}(v^2-u^2)\, \cvx_{2,0}+\tfrac{i}{2}(u^2+v^2)\,\cvx_{3,0}=0 \end{equation} and its (real) solutions are \begin{equation} \label{eq:a0b0c0} \cvx_{1,0}=\rho(|u|^2-|v|^2),\quad \cvx_{2,0}=2\rho\,\Re(u\overline{v})\and \cvx_{3,0}=2\rho\,\Im(u\overline{v}),\quad \text{ for some } \rho\in\R.\end{equation} Finally, the third equation of \eqref{eq:det1} becomes \begin{equation}\label{eq:rho0}\rho^2(|u|^2+|v|^2)^2-(|u|^2+|v|^2)^2=1\end{equation} which determines $\rho$ up to sign. The geometric meaning of these equations and its relationship to the classical Weierstrass representation of minimal surfaces in Euclidean 3-space is discussed in Section \ref{sectionNAHt0}. We will show in Section \ref{sec:twist} below that the correct sign of $\rho$ to obtain twistor lines is \begin{equation} \label{eq:rho} \rho=\sqrt{1+(|u|^2+|v|^2)^{-2}}>0. \end{equation} But we obtain $\mathcal T$-real holomorphic sections of $\mathcal M_{DH}(\C P^1,p_1+\dots+p_4,t)$ for $t\sim0$ for both choices of sign in \eqref{eq:rho0}. We have thus fixed the initial conditions of $\eta_t$ depending on a pair $(u, v) \in \C^2\setminus\{ (0,0)\}$ which parametrizes the space of all eligible non-vanishing $\lambda$-residues of the $\eta_t$. This gives a 8-fold covering of the open subset $\mathcal U$ of the parabolic Higgs bundle moduli space specified by a trivial underlying holomorphic bundle. In fact, $(u,v)$ uniquely determines the nilpotent residue $R_1$ of the Higgs field at $p_1$ up to sign, and by Lemma \ref{41cov}, $R_1$ determines the gauge class of the Higgs pair uniquely up to the $\Z_2\times\Z_2$ action generated by conjugation with $C$ and $D.$ \begin{convention} In the following, we slightly abuse notation and neglect the $\mathbb Z_2\times \mathbb Z_2$ action when referring to completion of the nilpotent orbit $\{R_1\in\mathfrak{sl}(2,\C)\mid \det(R_1)=0\}$ as the moduli space of parabolic Higgs fields. This 4-fold covering can actually be identified with an open dense subset of the space of parabolic Higgs fields on the 1-punctured torus with parabolic weight $\tfrac{1}{2}-2t.$ We will also ignore the complex line of stable parabolic Higgs fields with underlying holomorphic structure $\mathcal O(-1)\oplus\mathcal O(1)$ at $t=0$, since the $t\to0$ limit of the corresponding representations is not the trivial representation. \end{convention} \begin{remark} When considering Ansatz \eqref{etatttinfront} for $t>0$ we have to rephrase the second condition in \eqref{initialproblem} which encodes the reality condition at $t=0$. Instead to ensure \eqref{eq:realityconditionpara} we have to require that $\eta_t^*$ and $\eta_t$ lie in the same gauge class. By Lemma \ref{lem:sufsec} this corresponds to constructing families of flat connections that descend to real holomorphic sections of $\mathcal M_{DH}(\C P^1,p_1+\dots+p_4,t)$ for prescribed strongly parabolic Higgs field at $\lambda= 0$ by varying parabolic weight $t$. \end{remark} Using the above $(u,v)$-parametrization the parabolic Higgs field $\Psi=\Res_{\lambda=0}\,\eta_t$ is given by \eqref{eq:psier} and has determinant \begin{equation} \label{eq:detPsi}\det(\Psi)=\frac{-4t^2(u^4-(p^2+p^{-2})u^2v^2+v^4)}{z^4-(p^2+p^{-2})z^2+1},\end{equation} where the singular points $p_1,\dots,p_4$ are determined by $p\in \C^+$ via \eqref{pointsonp}. The 0-eigenlines of the nilpotent residues of $\Psi$ viewed as points in $\C P^1$ have homogenous coordinates $u/v$, $-v/u$, $-u/v$ and $v/u$, respectively. The cross-ratio of these four eigenlines, which together with the parabolic weight $t$ determines the parabolic structure on the trivial holomorphic bundle, is computed to be \begin{equation}\label{comp:crossratio}\left(\frac{u}{v},\frac{-v}{u};\frac{-u}{v},\frac{v}{u}\right)=\frac{-4 u^2v^2}{(u^2-v^2)^2},\end{equation} where the cross-ratio is defined to be $$(z_1,z_2;z_3,z_4)=\frac{(z_3-z_1)(z_4-z_2)}{(z_3-z_2)(z_4-z_1)}.$$ The cross-ratio \eqref{comp:crossratio} is already uniquely determined by the ratio $u/v$, up to a $\Z_2\times\Z_2$ symmetry generated by $u/v\mapsto -u/v,\,u/v\mapsto v/u$ in accordance with Proposition \ref{pro:classparahiggs} (since scaling $u$ and $v$ simultaneously just scales the Higgs field). \section{Constructing real holomorphic sections}\label{IFT} In order for the potential $d+\eta_t$ to be the lift of a real holomorphic section $s$ we impose the condition that \begin{equation}\label{DPWreality} d+ \eta_t(\lambda) \quad\text{is gauge equivalent to}\quad d+\overline{\eta_t(-\bar\lambda^{-1})} \end{equation} for all $\lambda \in \mathbb A_a=\{\lambda\mid\, a^{-2}<|\lambda|^2< a^2\}$ as in \eqref{eq:realityconditionpara}. To make this condition more explicit, we identify the space of gauge equivalence classes of flat connections with the space of representations of $\pi_1(\Sigma,z_0)$ modulo conjugation via the monodromy representation. Let $p\in \C^+$ and $\Sigma = \Sigma_p$ be the 4-punctured sphere and fix the base point $z_0=0.$ Choose the generators $\gamma_1$, $\gamma_2$, $\gamma_3$ and $\gamma_4$ of the fundamental group $\pi_1(\Sigma,0)$ as in \cite{HHT2}, i.e, let $\gamma_1$ be the composition of the real half-line from $0$ to $+\infty$ with the imaginary half-line from $+i\infty$ to $0$, and more generally $\gamma_k$ is the product of the half-line from $0$ to $i^{k-1}\infty$ with the half line from $i^k\infty$ to $0$, so $\gamma_k$ encloses $p_k$ and $\gamma_1\gamma_2\gamma_3\gamma_4=1$. Let $\Phi_t$ be the fundamental solution of the Cauchy Problem \begin{equation} \label{eq:cauchy} d_{\Sigma}\Phi_t=\Phi_t\eta_t\quad\text{with initial condition}\quad \Phi_t(z=0)=\Id\end{equation} and let $M_k(t)=\mathcal M(\Phi_t,\gamma_k)$ be the monodromy of $\Phi_t$ along $\gamma_k$. This gives the {\em left} monodromy representation of the connection $d+\eta_t,$ i.e., \[M_1(t)M_2(t)M_3(t)M_4(t)=\mathrm{Id}.\] The identification of the left and the right monodromy representation is done by taking the inverse, which is in accordance with the fact that for a solution $\Phi$ of $d\Phi=\Phi\eta$ its inverse solves $d\Phi^{-1}=-\eta\Phi^{-1}.$ In particular, two connections are gauge equivalent if and only if their left monodromies are conjugate to each other. In order to directly use results and explicit computations from our previous work (e.g. \cite{HHT1,HHT2}), we will work with the left monodromy in the following. \subsubsection*{Fricke coordinates} The moduli space of representations can be parametrized using the so-called Fricke coordinates. Define \[s_k:=\tr (M_k); \quad \quad s_{kl}=\tr (M_kM_l).\] Since $M_k \in \SL(2, \C), $ the trace $s_k$ determines the eigenvalues of $M_k, $ i.e., the conjugacy class of the local monodromy $M_k.$ For the symmetric case considered in this paper we restrict to $$s_1= s_2=s_3 = s_4 = 2\cos(2 \pi t)$$ for $t\in (0,\tfrac{1}{4})$ and the following classical result by Fricke-Voigt holds. \begin{proposition}\label{Pro:Friecke} Consider a $\SL(2,\C)$-representation on the $4$-punctured sphere $\Sigma$. Let $$s=s_1=\dots =s_4\in(0,2)$$ and let $U=s_{12}$, $V=s_{23}$, $W=s_{13}.$ Then the following algebraic equation holds \begin{equation}\label{eq:quadratic} U^2+V^2+W^2+U\,V\,W-2 s^2(U+V+W)+4(s^2-1)+s^4=0.\end{equation} When satisfying \eqref{eq:quadratic} the parameters $s$ and $U,V,W$ together determine a monodromy representation $\rho \colon \pi_1(\Sigma) \rightarrow$ SL$(2, \C)$ from the first fundamental group of $\Sigma$ into SL$(2, \C)$. By imposing the symmetries \eqref{symmetry-fix4}, this representation is unique up to conjugation. \end{proposition} \begin{proof} For the first (classical) part of the proposition see for example \cite{gold2}. Whenever the representations are irreducible, \cite{gold2} moreover shows that they are uniquely determined by their global traces $U,V, W $ up to conjugation, even without symmetry assumptions. By \cite[Lemma 5]{gold2} a representation is reducible if and only if $$s_{ij}\in\{2,-2+s^2\}=\{2,2 \cos (4 \pi t)\}$$ for all $i,j$. For $t\in (0,\tfrac{1}{4})$ \eqref{eq:quadratic} then implies that $$(U,V,W)\in\{(2,2,2 \cos (4 \pi t)),(2,2 \cos (4 \pi t),2),(2 \cos (4 \pi t),2,2)\}.$$ It can be easily checked that for each of the 3 possibilities there is a Fuchsian potential, unique up to conjugation with $C$, $D$, and $CD,$ satisfying the symmetry assumptions \eqref{symmetry-fix4} and inducing the corresponding totally reducible representation. \end{proof} Using the above proposition the reality condition \eqref{DPWreality} on the potentials $\eta_t$ is equivalent to $$s_{jk}=s_{jk}^*\quad \text{ for } (j,k)\in\{(1,2),(1,3),(2,3)\},$$ with $()^*$ as defined in \eqref{fstar}. The goal of this section is thus to solve the following Monodromy Problem \begin{equation} \label{eq:monodromy-problem1} \begin{cases} s_{jk}=s_{jk}^*\quad \text{ for } (j,k)\in\{(1,2),(1,3),(2,3)\}\\ {\displaystyle \sum_{j=1}^3} x_j(t)^2=1 \end{cases} \end{equation} using a similar implicit function theorem argument as in \cite{HHT1, HHT2}. By Lemma \ref{lem:sufsec} and Proposition \ref{Pro:Friecke} we then obtain real holomorphic sections of $\mathcal M_{DH}(\C P^1,p_1+\dots+p_4,t).$ It then remains to show that the so-constructed real holomorphic sections are negative and in the connected component of twistor lines. This is done in Theorem \ref{computingthecomponent} below. \subsection{Setup} As in \cite{HHT2}, consider for a fixed potential $\eta$ the extended frame $\Phi$ satisfying $d\Phi = \Phi \eta$ and $\Phi(z=0)= \Id$. Let $\mathcal P=\Phi(z=1)$ and $\mathcal Q=\Phi(z=i),$ where we omitted the index $t$. Then the traces $s_{jk}$ are given by squares of holomorphic functions in terms of the entries of $\mathcal P= (\mathcal P_{ij})$ and $\mathcal Q = (\mathcal Q_{ij})$ as follows: \begin{proposition} \label{prop:traces} With the notation above we have \begin{equation}\label{eq:s12}s_{12}=2-4\mathfrak p^2\end{equation} \begin{equation}\label{eq:s23}s_{23}=2-4\mathfrak q^2\end{equation} \begin{equation}\label{eq:s13}s_{13}=2-4\mathfrak r^2\end{equation} with \begin{equation*} \mathfrak p=\mathcal P_{11}\mathcal P_{21}-\mathcal P_{12}\mathcal P_{22}, \quad \mathfrak q= i(\mathcal Q_{11}\mathcal Q_{21}+\mathcal Q_{12}\mathcal Q_{22}) \end{equation*} and \begin{eqnarray*} \mathfrak r&=& \frac{i}{2}(\mathcal P_{22} \mathcal Q_{11}+\mathcal P_{12}\mathcal Q_{21})^2 +\frac{i}{2}(\mathcal P_{22} \mathcal Q_{12}+\mathcal P_{12}\mathcal Q_{22})^2 \\&& -\frac{i}{2}(P_{21} Q_{11}+P_{11}Q_{21})^2 -\frac{i}{2}(P_{21} Q_{12}+P_{11}Q_{22})^2 .\end{eqnarray*} \end{proposition} \begin{proof} The equations \eqref{eq:s12} and \eqref{eq:s23} have been proven in \cite[Proposition 16]{HHT2}. It remains to show \eqref{eq:s13}. Using the symmetries $\delta$ and $\tau\circ\delta$ which fix $z=1$ and $z=i$ respectively we have as in \cite[Proposition 16]{HHT2} \begin{equation} \begin{split} \Phi(+\infty)&=\mathcal P C \mathcal P^{-1}C^{-1}, \quad \quad\quad\quad\;\; \Phi(+i\infty)=\mathcal Q DC \mathcal Q^{-1} C^{-1}D^{-1},\\ M_1&=\Phi(+\infty)\Phi(+i\infty)^{-1}, \quad \quad\quad\quad M_3=DM_1D^{-1}. \end{split} \end{equation} This gives with $C^2=D^2=-\Id$ and $CD=-DC$ that $$M_1M_3=-(\mathcal P C\mathcal P^{-1}C^{-1}DC\mathcal Q C^{-1}D^{-1}Q^{-1}D)^2 =-\left(\mathcal P C \mathcal P^{-1} D\mathcal Q CD \mathcal Q^{-1} D\right)^2.$$ For $A\in SL(2,\C)$ we have $$\tr(-A^2)=2-\tr(A)^2,$$ hence \eqref{eq:s13} holds with $$\mathfrak r=\frac{1}{2}\tr\left(\mathcal P C \mathcal P^{-1} D\mathcal Q DC \mathcal Q^{-1} D\right)$$ which coincides with the formula given in Proposition \ref{prop:traces} after a tedious computation. \end{proof} \begin{proposition} \label{prop:derivative-pqr} At $t=0$, we have $$\mathfrak p(0)=\mathfrak q(0)=\mathfrak r(0)=0$$ with derivatives $$\mathfrak p'(0)=2\pi x_3(0),\quad \mathfrak q'(0)=2\pi x_2(0)\and \mathfrak r'(0)=2\pi x_1(0).$$ \end{proposition} \begin{proof} At $t=0$ we have $\eta_t=0$ thus all monodromies are trivial and $\mathcal P=\mathcal Q=\Id$ from which the first point follows. For the assertion on the derivatives define as in \cite{HHT2} \begin{equation}\label{Omega} \Omega_j(z)=\int_0^z\omega_j(z) \end{equation} for $j=1,2,3,$ where the integral is computed on the segment from $0$ to $z$. Then $$\mathcal P'(0)=\sum_{j=1}^3 x_j(0)\Omega_j(1)\mathfrak m_j, \and \mathcal Q'(0)=\sum_{j=1}^3 x_j(0)\Omega_j(i)\mathfrak m_j,$$ from which we can compute \begin{equation*} \begin{split} \mathfrak p'(0)&=\mathcal P_{21}'(0)-\mathcal P_{12}'(0)=-2 i x_3(0)\Omega_3(1)\\ \mathfrak q'(0)&=i(\mathcal Q_{21}'(0)+\mathcal Q_{12}'(0))=2i x_2(0)\Omega_2(i)\\ \mathfrak r'(0)&=i(\mathcal P_{22}'(0)+\mathcal Q_{11}'(0)-\mathcal P_{11}'(0)-\mathcal Q_{22}'(0)) =-2i x_1(0)(\Omega_1(1)-\Omega_1(i)). \end{split} \end{equation*} By the Residue Theorem, we have for $j=1,2,3$ $$2\pi i=\int_{\gamma_1}\omega_j=\int_0^1\omega_j-\int_0^1\tau^*\omega_j -\int_0^i\omega_j+\int_0^i(\tau\delta)^*\omega_j.$$ Using the symmetries \eqref{eq:symmetry-omega}, this gives \begin{equation} \label{eq:easy-integrals} \Omega_1(1)-\Omega_1(i)=\pi i,\quad \Omega_2(i)=-\pi i\and \Omega_3(1)=\pi i \end{equation} proving Proposition \ref{prop:derivative-pqr}. \end{proof} In view of Proposition \ref{prop:traces}, the monodromy problem \eqref{eq:monodromy-problem1} can be reformulated to be \begin{equation} \label{eq:monodromy-problem2} \begin{cases} \mathfrak p=\mathfrak p^*\\ \mathfrak q=\mathfrak q^*\\ \mathfrak r=\mathfrak r^*\\ {\displaystyle\sum_{j=1}^3} x_j(t)^2=1 \end{cases} \end{equation} and the following proposition implies that it suffices to solve for two of the three traces only. \begin{proposition} \label{prop:two-traces} Assume that $x_1(t),x_2(t),x_3(t)\colon (-\varepsilon, \varepsilon) \rightarrow \mathcal W^{\geq 1}$ are analytic in $t$, with $x_j(0)=\cvx_j,$ satisfying the following equations for all $t$ \begin{equation} \label{eq:monodromy-problem3} \begin{cases} \mathfrak p=\mathfrak p^*\\ \mathfrak q=\mathfrak q^*\\ {\displaystyle\sum_{j=1}^3} x_j(t)^2=1. \end{cases} \end{equation} Then also $\mathfrak r=\mathfrak r^*$ for all $t$. Analogous statements hold by cyclicly permutating $\mathfrak{p,q,r}$. \end{proposition} \begin{proof} Let $$U=s_{12},\quad V=s_{23},\quad W=s_{13}\quad \text{and} \quad s=\tr({M_k})$$ as in Proposition \ref{Pro:Friecke} satisfying the quadratic equation \eqref{eq:quadratic} $$Q : = U^2+V^2+W^2+U\,V\,W-2 s^2(U+V+W)+4(s^2-1)+s^4 =0.$$ By substitution of $U=2-4\mathfrak p^2$, $V=2-4\mathfrak q^2$ and $W=2-4\mathfrak r^2$, $Q $ factors as $$Q=Q_1 Q_2\with Q_j=s^2+4(\mathfrak p^2+\mathfrak q^2+\mathfrak r^2-1)+8(-1)^j \mathfrak{pqr}.$$ Since $Q=0$ for all $t$, and $Q_1$, $Q_2$ are analytic functions of $t$, one of them must be identically zero. (We will see in Remark \ref{remark:r''} below that $j=2$.) The discriminant of $Q_j,$ considered as a polynomial in the variable $\mathfrak r$ is give by $$\Delta=64(1-\mathfrak p^2)(1-\mathfrak q^2)-16 s^2$$ and is independent of $j=1,2$. Since $\mathcal P$, $\mathcal Q$ are well-defined analytic functions in $t$ (with values in $\mathcal W_a$), $\mathfrak r$ is a well-defined analytic function in $t$ as well by Proposition \ref{prop:traces}. Therefore, $\Delta$ admits a well-defined square root $\delta$ such that $$\mathfrak r=(-1)^{j+1}\mathfrak p\mathfrak q+\frac{\delta}{8}$$ for all $t.$ From the hypotheses of the proposition, we have $\Delta=\Delta^*$ hence $\delta^*=\varepsilon \delta,$ where the sign $\sign=\pm 1$ does not depend on $t$ because $\mathfrak r^*$ is a well-defined analytic function in $t$. Hence $$\mathfrak r^*=(-1)^{j+1}\mathfrak p\mathfrak q+ \sign\frac{\delta}{8}.$$ The sign $\varepsilon$ can be determined using the first order derivatives at $t=0$. We have $$\mathfrak r'=2\pi\cvx_1=\frac{\delta'}{8} \and \mathfrak r'^*=2\pi\cvx_1^*=2\pi\cvx_1=\sign\frac{\delta'}{8}.$$ Since $\cvx_1\not\equiv 0$, we obtain $\sign=1$. Hence $\mathfrak r=\mathfrak r^*$ for all $t$. \end{proof} \subsection{Solving the Monodromy Problem} \label{section:monodromy:IFT} As remarked before, see Remark \ref{rem:inidata}, the potentials $d+\eta_t$ are not uniquely determined by the reality conditions \eqref{eq:monodromy-problem1} or \eqref{eq:monodromy-problem2}, but also depend on the Higgs data. By Proposition \ref{pro:classparahiggs}, we identify the moduli space of strongly parabolic Higgs fields on $V= \mathcal O \oplus \mathcal O$, up to taking the quotient by $\Z_2\times\Z_2$ action, with the completion of the nilpotent orbit in $\mathfrak{sl}(2, \C),$ which is in turn given by the blow-up of $\C^2/\Z_2$ at the origin. First, consider the regular case of $(u, v) \in\C^2\setminus\{(0,0)\}$ and consider for $j=1,2,3$ the quadratic polynomials $$P_j(\lambda)=\lambda\cvx_j(\lambda)=\cvx_{j,1}\lambda^2+\cvx_{j,0}\lambda-\overline{\cvx_{j,1}},$$ where the $\cvx_j$ are defined in \eqref{eq:defcvxj} satisfying \eqref{eq:detcvxj}, \eqref{eq:ambmcm}, \eqref{eq:a0b0c0} and \eqref{eq:rho0}, and denote the discriminant of $P_j$ by $$\Delta_j=\cvx_{j,0}^2+4 |\cvx_{j,1}|^2\in\R.$$ The following arguments work for both signs of $\rho$ in \eqref{eq:rho0}, compare with Lemma \ref{lem:signmean} below. When statements do depend on the choice of sign, e.g., in Theorem \ref{blowuplimit}, we state them for the correct choice \eqref{eq:rho} only. \begin{proposition}\label{prop:rootPj} $\;$\\ \vspace{-0.6cm} \begin{enumerate} \item The polynomial $P_j$ has a complex root $\mu_j$ with $|\mu_j|<1$ if and only if $\cvx_{j,0}\neq 0$. In this case $\mu_j \in \mathbb D_1$ depends real-analytically on $(u,v) \neq (0,0)$. \item With the same notation, $P_k(\mu_j)$ and $P_{\ell}(\mu_j)$ are $\R$-independent complex numbers, if $\{j,k,\ell\}=\{1,2,3\}$. \end{enumerate} \end{proposition} \begin{proof} If $\cvx_{j,1}=0,$ the first point is trivial, because in this case $P_j=\cvx_{j,0} \lambda$ and $\cvx_{j,0}\neq 0$ since $(u,v)\neq(0,0)$. Hence assume in the following $\cvx_{j,1}\neq 0$. If $\mu$ is a root of $P_j$, then $\mu\neq 0$. Moreover, $\cv{x}_j=\cv{x}_j^*$ gives that $-\tfrac{1}{\overline{\mu}}\neq \mu$ is the other root of $P_j$. Thus $$|\mu|=1\Leftrightarrow \mu-\frac{1}{\overline{\mu}}=0\Leftrightarrow \cvx_{j,0}=0.$$ For $x_{j, 0} \neq 0$ the root $\mu_j$ with $|\mu_j|<1$ is given by $$\mu_j=\frac{-\cvx_{j,0}+\operatorname{sign}(\cvx_{j,0})\sqrt{\Delta_j}}{2 \cvx_{j,1}},$$ which we can rewrite as $$\mu_j=\frac{2\,\overline{\cvx_{j,1}}}{\cvx_{j,0}}f\left(\frac{4|\cvx_{j,1}|^2}{\cvx_{j,0}^2}\right) \with f(z)=\frac{\sqrt{1+ z}-1}{z}.$$ The function $f$ extends holomorphically to $z=0$, therefore $\mu_j$ depends analytically on $(u,v)$. To prove the second point, assume for simplicity of notation that $j=1$. Suppose by contradiction that $P_2(\mu_1)$ and $P_3(\mu_1)$ are linearly dependent over $\R$. First assume that $\mu_1\neq 0$. Then the complex numbers $\cvx_2(\mu_1)$, $\cvx_3(\mu_1)$ are linearly dependent over $\R$. Moreover, $\cvx_1(\mu_1)=0$ so $$\cvx_2(\mu_1)^2+\cvx_3(\mu_1)^2=1$$ and this implies that $\cvx_2(\mu_1)$ and $\cvx_3(\mu_2)$ are real. Since all $\cvx_{k,0}$ are real, we obtain $$\cvx_{k,1}\mu_1-\overline{\cvx_{k,1}}\mu_1^{-1}\in\R \quad\text{for $k=1,2,3$}.$$ Then $$\sum_{k=1}^3\left(\cvx_{k,1}\mu_1-\overline{\cvx_{k,1}}\mu_1^{-1}\right)^2\geq 0.$$ Expanding the squares and using $\sum_{k=1}^3 \cvx_{k,1}^2=0$ we obtain $$\sum_{k=1}^3|\cvx_{k,1}|^2\leq 0$$ which implies $u=v=0$ which is a contradiction. \noindent If $\mu_1=0$, we have $P_2(0)=\cvx_{2,-1}$ and $P_3(0)=\cvx_{3,-1}$. From $P_1(0)=\cvx_{1,-1}=0$ we obtain $$\cvx_{2,-1}^2+\cvx_{3,-1}^2=0$$ and since $\cvx_{2,-1}$ and $\cvx_{3,-1}$ are linearly dependent over $\R$, $\cvx_{2,-1}=\cvx_{3,-1}=0$ which again results in $u=v=0$ leading to a contradiction. \end{proof} \begin{theorem} \label{thm:IFT} Let $(u,v)\neq (0,0)$ be fixed. This determines $\cvx_{j,-1}$ for $j=1, 2, 3$. Then, there are $\epsilon_0>0$ and $a >1$ such that there exists unique values of the parameters $x(t)=(x_1(t),x_2(t),x_3(t)) \in (\mathcal W_a^{\geq -1})^3$ in a neighborhood of $\cvx$ for all $t\in (-\epsilon_0,\epsilon_0)$ depending real analytically on $(t,p,u,v)$ solving \eqref{eq:monodromy-problem1} with $x(0)=\cvx$ and prescribed $x_{j,-1}(t)=\cvx_{j,-1}$. Moreover, $\epsilon_0$ and $a>1$ are uniform with respect to $(p,u,v)$ on compact subsets of $\C^+\times\C^2\setminus\{(0,0)\}$. \end{theorem} \begin{proof} Fix $(u,v) \neq (0,0)$. By Proposition \ref{prop:rootPj} and \eqref{eq:a0b0c0}, at least one of the polynomials $P_j$ has a root $\mu_j$ inside the unit $\lambda$-disc. By symmetry of the roles played by the parameters $\mathfrak p$, $\mathfrak q$, $\mathfrak r$, we may assume without loss of generality that $j=1$. By Proposition \ref{prop:two-traces}, it suffices to solve Problem \eqref{eq:monodromy-problem3}. We fix $a>1$ such that $a|\mu_1|<1$ and consider the corresponding function space $\mathcal W^{\geq 0}_a$. We introduce a parameter $y=(y_1,y_2,y_3)$ in a neighborhood of $0$ in $(\mathcal W^{\geq 0}_a)^3$ and set \begin{equation}\label{eq:defxkyk}x_k(t)=\cvx_k+y_k,\quad k=1,2,3.\end{equation} Note that the negative part of the potential is fixed to its initial value $x_{k,-1}=\cvx_{k,-1}.$ Since $\mathfrak p$ and $\mathfrak q$ are analytic functions of $(t,y)$ which vanish at $t=0$, the functions $$\wh{\mathfrak p}(t,y):=\frac{1}{t}\mathfrak p(t,y) \and \wh{\mathfrak q}(t,y):=\frac{1}{t}\mathfrak q(t,y)$$ extend analytically at $t=0$ and by Proposition \ref{prop:derivative-pqr} we have at $t=0$ \begin{equation}\label{eq:whmathfrakpqt0}\wh{\mathfrak p}(0,y)=2\pi(\cvx_3+ y_3) \and \wh{\mathfrak q}(0,y)=2\pi(\cvx_2+y_2).\end{equation} We define \begin{equation} \begin{split} \mathcal F(t,y)&:=\wh{\mathfrak p}(t,y)-\wh{\mathfrak p}(t,y)^*\\ \mathcal G(t,y)&:=\wh{\mathfrak q}(t,y)-\wh{\mathfrak q}(t,y)^*\\ \mathcal K(y)&:=\sum_{k=1}^3 x_k(t)^2=\sum_{k=1}^3(\cvx_k+y_k)^2. \end{split} \end{equation} Then solving Problem\eqref{eq:monodromy-problem3} is equivalent to solving the equations $\mathcal F=\mathcal G=0$ and $\mathcal K=1$. By our choice of the central value \eqref{eq:defcvxj} and due to \eqref{eq:whmathfrakpqt0}, these equations hold at $(t,y)=(0,0)$. By definition we have $\mathcal F^*=-\mathcal F$ so $\mathcal F=0$ is equivalent to $\mathcal F^+=0$ and $\Im(\mathcal F^0)=0$, and the analogous statement holds for $\mathcal G$ as well. By Proposition \ref{prop:derivative-pqr} and substituting \eqref{eq:defxkyk} the partial derivatives with respect to $y=(y_1,y_2,y_3)$ are \begin{equation}\label{eq:partialderFGplus} \begin{split} d\mathcal F(0,0)^+&=2\pi dy_3^+\\ \Im(d\mathcal F(0,0)^0)&=2\pi \Im(d y_{3,0})\\ d\mathcal G(0,0)^+&=2\pi d y_2^+\\ \Im(d\mathcal G(0,0)^0)&=2\pi \Im(d y_{2,0}). \end{split} \end{equation} Hence, the partial derivative of $$\left(\mathcal F^+,\mathcal G^+,\Im(\mathcal F^0),\Im(\mathcal G^0)\right)$$ with respect to $$\left(y_2^+,y_3^+, \Im(y_{2,0}),\Im(y_{3,0})\right)$$ is an automorphism of $(\mathcal W^+_a)^2\times\R^2$. The implicit function theorem therefore uniquely determines $(y_2^+,y_3^+, \Im(y_{2,0}),\Im(y_{3,0}))\in(\mathcal W^+_a)^2\times\R^2$ as analytic functions of $t$ and the remaining parameters $y_1$, $\Re(y_{2,0})$ and $\Re(y_{3,0})$. Furthermore, the partial derivative of $(y_2^+,y_3^+, \Im(y_{2,0}),\Im(y_{3,0}))$ at $(t,y)=(0,0)$ with respect to these remaining parameters is zero by \eqref{eq:partialderFGplus}. It remains to solve the equation $\mathcal K=1$. We write the Euclidean division of the polynomial $P_k$ by $(\lambda-\mu_1)$ as $$P_k(\lambda)=(\lambda-\mu_1)Q_k+P_k(\mu_1)$$ with $Q_k\in\C[\lambda]$. Note that the $Q_k$ are real analytic in $(u,v)$ by Proposition \ref{prop:rootPj}. Observe that since $$\sum_{k=1}^3\cvx_{k,-1}^2=0,$$ $\mathcal K$ has no $\lambda^{-2}$ term so $\lambda\mathcal K\in\mathcal W^{\geq 0}$. We write the division of $\lambda\mathcal K$ by $(\lambda-\mu_1)$ as $$\lambda\mathcal K=(\lambda-\mu_1)\mathcal S+\mathcal R$$ where $\mathcal R\in\C$ and $\mathcal S\in\mathcal W^{\geq 0}$. Note that since $|\mu_1|<1$, $\mathcal R$ and $\mathcal S$ are analytic functions of all parameters by \cite[Proposition 5]{HHT2}. We have, since $P_1(\mu_1)=0$ and $dy_k=\Re(dy_{k,0})$ for $k=2,3$ \begin{eqnarray*} d(\lambda\mathcal K)(0,0)&=&\sum_{k=1}^3 2P_k(\lambda)\,dy_k=2(\lambda-\mu_1)Q_1d y_1+\sum_{k=2}^3 2\big((\lambda-\mu_1)Q_k+P_k(\mu_1)\big)\Re(dy_{k,0})\end{eqnarray*} so by uniqueness of the division \begin{equation*} \begin{split} d\mathcal R(0,0)&=2P_2(\mu_1)\Re(d y_{2,0})+2P_3(\mu_1)\Re(d y_{3,0})\\ d\mathcal S(0,0)&=2 Q_1d y_1 +2 Q_2\Re(d y_{2,0})+2 Q_3\Re(d y_{3,0}). \end{split} \end{equation*} If $\cvx_{1,1}\neq 0$, we have $\mu_1\neq 0$ and the other root of $P_1$ is $-1/\overline{\mu_1}$ so $$Q_1=\cvx_{1,1}\left(\lambda+\frac{1}{\overline{\mu_1}}\right)$$ is invertible in $\mathcal W^{\geq 0}_a$ because $\frac{1}{|\mu_1|}>a$. (Note that $\lambda-c$ is invertible in $\mathcal W^{\geq 0}_a$ if and only if $|c|>a$.) If $\cvx_{1,1}=0$, we have $$P_1=\cvx_{1,0}\lambda$$ so $Q_1=\cvx_{1,0}\in\C^*$ is invertible in $\mathcal W^{\geq 0}_a$ as well. By Proposition \ref{prop:rootPj}, $P_2(\mu_1)$ and $P_3(\mu_1)$ are $\R$-independent complex numbers, and the partial derivative of $(\mathcal S,\mathcal R)$ with respect to $\left(y_1,\Re(y_{2,0}),\Re(y_{3,0})\right)$ at $(t,y)=(0,0)$ is an isomorphism from $\mathcal W^{\geq 0}_a\times\R^2$ to $\mathcal W^{\geq 0}_a\times\C$. Thus, the implicit function theorem uniquely determines $(y_1$, $\Re(y_{2,0}))$ and $\Re(y_{3,0})$ as analytic functions of $t$ in a neighborhood of $t=0$. \end{proof} \subsection{The limit $(u,v)\to (0,0)$} In this section, we highlight the dependency of the solutions $x(t)=x(t,p,u,v)$ provided by Theorem \ref{IFT} on the parameters $p,u,v$ when necessary, and we drop the dependency of all parameters when convenient. By Proposition \ref{pro:classparahiggs} the Higgs bundle moduli space is given by the blow up of $\C^2/\Z_2$ at the origin. Rather than expecting the solution $x(t,p,u,v)$ to extend continuously to $(u,v)=(0,0)$, the limit $(u,v)\to (0,0)$ should therefore depend on the direction in the blow-up. We write $$u=r\wt{u}\and v=r\wt{v}\with |\wt{u}|^2+|\wt{v}|^2=1.$$ Let $0<|r|\leq \frac{1}{2}$ so that $(u,v)\neq (0,0)$. With a slight abuse of notation, we write $x=x(t,p,r,\wt{u},\wt{v})$. Recall that $(u,v)\to (-u,-v)$ does not change the initial value $\cvx$, so the map $x(t,p,r,\wt{u},\wt{v})$ is even with respect to $r$. Our goal is to prove \begin{theorem}\label{blowuplimit}$\;$\\\vspace{-0.5cm} \begin{enumerate} \item There exists $\epsilon_2>0$ such that for $|t|<\epsilon_2$, the function $x(t,p,r,\wt{u},\wt{v})$ extends analytically to $r=0$. Moreover, $\epsilon_2$ is uniform with respect to $p$ in compact subsets of $\C^+$ and $(\wt{u},\wt{v})$ in $\S^3$. \item At $r=0$, $x(t,p,0,\wt{u},\wt{v})$ does not depend on $\lambda$ and solves the following problem: \begin{equation} \label{monodromy-problem-r0} \begin{cases} \exists U\in SL(2,\C),\quad\forall k,\;U M_k U^{-1}\in SU(2)\\ x_1^2+x_2^2+x_3^2=1. \end{cases} \end{equation} \item At $(t,r)=(0,0)$, we have for $\rho>0$ \begin{equation}\label{eq:blowuplimit}x_1=(|\wt{u}|^2-|\wt{v}|^2),\quad x_2=2\,\Re\left(\wt{u}\,\overline{\wt{v}}\right)\and x_3=2\,\Im\left(\wt{u}\,\overline{\wt{v}}\right).\end{equation} \end{enumerate} \end{theorem} \begin{remark} For $\rho<0$, we have to replace in $(3)$ the limit $(x_1, x_2, x_3)$ by $-(x_1, x_2, x_3)$. \end{remark} \begin{proof}[Proof of Theorem \ref{blowuplimit}] Rewrite the central value $\cvx$ in terms of $(r,\wt{u},\wt{v})$ as $$\cvx_j=r^2\wtcv{x}_{j,-1}\lambda^{-1}+\wt{\rho}(r)\wtcv{x}_{j,0}+r^2\wtcv{x}_{j,1}\lambda$$ with $$\wtcv{x}_{1,-1}=\wt{u}\,\wt{v},\quad \wtcv{x}_{2,-1}=\tfrac{1}{2}(\wt{v}^2-\wt{u}^2),\quad \wtcv{x}_{3,-1}=\tfrac{i}{2}(\wt{u}^2+\wt{v}^2),$$ $$\wtcv{x}_{1,0}=|\wt{u}|^2-|\wt{v}|^2,\quad \wtcv{x}_{2,0}=2\,\Re\left(\wt{u}\,\overline{\wt{v}}\right),\quad \wtcv{x}_{3,0}=2\,\Im\left(\wt{u}\,\overline{\wt{v}}\right),$$ $$\wtcv{x}_{j,1}= - \overline{\wtcv{x}_{j,-1}}$$ and $$\wt{\rho}(r):=r^2\rho(r)=\sqrt{1+r^4} = 1+ O(r^4).$$ Observe that \begin{equation} \label{eq:wtcvx} \sum_{k=1}^3\wtcv{x}_{k,-1}^2=0,\quad \sum_{k=1}^3\wtcv{x}_{k,0}^2=1,\quad \sum_{k=1}^3\wtcv{x}_{k,-1}\wtcv{x}_{k,0}=0 \end{equation} and \begin{equation} \label{eq:wtcvxmodule} \sum_{k=1}^3|\wtcv{x}_{k,-1}|^2=\frac{1}{2}. \end{equation} For given $(\wt{u},\wt{v})\in\S^3$, fix $j\in\{1,2,3\}$ such that $\wtcv{x}_{j,0}\neq 0$. (This is possible by \eqref{eq:wtcvx}). We take as ansatz that the parameter $y_j$ is of the following form \begin{equation} \label{eq:ansatz-wtyj} y_j=y_{j,0}+r^2\wt{y}_j^+\with \wt{y}_j^+\in\mathcal W^+_a. \end{equation} We solve the equations $\mathcal F=\mathcal G=0$ using the implicit function theorem as in Section \ref{section:monodromy:IFT}. This uniquely determines the parameters $y_k^+$ and $\Im(y_{k,0})$ with $k\neq j$ as functions of $(t,r)$ and the remaining parameters $y_{j,0}$, $\wt{y}_j^+$ and $\Re(y_{k,0})$ with $k\neq j$. Moreover, at $r=0$, $y_j^+=0$ and $y_k^+=0$, $k\neq j$, solve the equation $\mathcal F^+=\mathcal G^+=0$. Since $y_k^+$ is an even function of $r$ (due to uniqueness), this means that we can also write for $k\neq j$ $$y_k=y_{k,0}+r^2\wt{y}_k^+\with \wt{y}_k^+\in\mathcal W^+.$$ We decompose $\mathcal K=x_1^2+x_2^2+x_3^2$ as $$\mathcal K=\mathcal K_{-1}\lambda^{-1}+\mathcal K_0+\mathcal K^+ \with \mathcal K^+\in\mathcal W^+.$$ When $r=0$, $\mathcal K$ does not depend on $\lambda$, i.e., $\mathcal K_{-1}=0$ and $\mathcal K^+=0$. Thus since $\mathcal K_{-1}$ and $\mathcal K^+$ are even functions of $r$, this means that $\wt{\mathcal K}_{-1}=r^{-2}\mathcal K_{-1}$ and $\wt{\mathcal K}^+=r^{-2}\mathcal K^+$ extend analytically at $r=0$. More explicitly, \begin{eqnarray*} \mathcal K&=&\sum_{k=1}^3\left(r^2\wtcv{x}_{k,-1}\lambda^{-1}+\wt{\rho}\,\wtcv{x}_{k,0}+r^2\wtcv{x}_{k,1}\lambda+y_{k,0}+r^2\wt{y}_k^+\right)^2\\ &=&\sum_{k=1}^3\left(\wtcv{x}_{k,0}+y_{k,0}\right)^2+2 r^2\left(\wtcv{x}_{k,0}+y_{k,0}\right)\left(\wtcv{x}_{k,-1}\lambda^{-1}+\wtcv{x}_{k,1}\lambda+\wt{y}_k^+\right) +O(r^4) \end{eqnarray*} from which we obtain at $r=0$ \begin{equation*} \begin{split} \wt{\mathcal K}_{-1}\mid_{r=0}&=2\sum_{k=1}^3\left(\wtcv{x}_{k,0}+y_{k,0}\right)\wtcv{x}_{k,-1}\\ \mathcal K_0\mid_{r=0}&=\sum_{k=1}^3\left(\wtcv{x}_{k,0}+y_{k,0}\right)^2\\ \wt{\mathcal K}^+\mid_{r=0}&=2\sum_{k=1}^3\left(\wtcv{x}_{k,0}+y_{k,0}\right)\left(\wtcv{x}_{k,1}\lambda+\wt{y}_k^+\right). \end{split} \end{equation*} In particular, at the central value $y=0$, we have by Equation \eqref{eq:wtcvx} that $\mathcal K_0=1$, $\wt{\mathcal K}_{-1}=0$ and $\wt{\mathcal K}^+=0$ and the differentials with respect to $y$ at $(t,r,y)=(0,0,0)$ are \begin{equation*} \begin{split} d\wt{\mathcal K}_{-1}&=2\sum_{k=1}^3 \wtcv{x}_{k,-1} dy_{k,0}\\ d\mathcal K_0&=2\sum_{k=1}^3\wtcv{x}_{k,0}dy_{k,0}\\ d\wt{\mathcal K}^+&=2\sum_{k=1}^3\wtcv{x}_{k,0}d\wt y_k^+ +\lambda\wtcv{x}_{k,1}dy_{k,0}. \end{split} \end{equation*} Keep in mind that $\wt y_k^+$ and $\Im(y_k^0)$ for $k\neq j$ have already been determined and their differential with respect to the remaining parameters $y_{j,0}$, $\wt{y}_j^+$ and $\Re(y_{k,0})$ with $k\neq j$, and are zero at $(t,r)=(0,0)$. Consider the polynomials $\wt P_k(\lambda)=\wtcv{x}_{k,0}\lambda+\wtcv{x}_{k,-1}.$ Recall that $\wtcv{x}_{j,0}\neq0$, and let $\wt\mu_j$ be the root of $P_j$. Then $$d\wt{\mathcal K}_{-1}+\wt\mu_j d\mathcal K_0=2\sum_{k\neq j}\wt P_k(\wt\mu_j)\Re(d y_{k,0}).$$ By Claim \ref{claim:wtPk} below, this is an isomorphism from $\R^2$ to $\C$. Hence the partial derivative of $(\wt{\mathcal K}_{-1},\mathcal K_0,\wt{\mathcal K}^+)$ with respect to $\big((\Re(y_{k,0}))_{k\neq j},y_j^0,\wt y_j^+\big)$ is an isomorphism from $\R^2\times\C\times\mathcal W^+$ to $\C^2\times\mathcal W^+$. The implicit function theorem then gives that there exists $\epsilon_1>0$ such that for $|t|<\epsilon_1$ and $|r|<\epsilon_1$, there exists unique values of the parameters which solve Problem \eqref{eq:monodromy-problem1}. When $\epsilon_1/2\leq |r|\leq 1/2$, Theorem \ref{thm:IFT} gives us a uniform $\epsilon_0>0$ such that for $|t|<\epsilon_0$, there exists unique values of the parameters which solve Problem \eqref{eq:monodromy-problem1}. They certainly satisfy the ansatz \eqref{eq:ansatz-wtyj} since $|r|\geq \epsilon_1/2$. Take $\epsilon_2=\min(\epsilon_1,\epsilon_0)$. In the overlap $\epsilon_1/2\leq |r|<\epsilon_1$, the solutions agree for $|t|<\epsilon_2$ by uniqueness of the implicit function theorem. So far, we have proven point (1), and point (3) follows by construction. Since for $r=0,$ the connections are independent of $\lambda$, the reality condition \eqref{eq:monodromy-problem1} implies that the traces $s_{jk}$ are real. Thus, the monodromy representation is either conjugated to a real representation or to a unitary representation. Using (3), it immediately follows that the traces for all $(j,k)\in\{(1,2),(1,3),(2,3)$ $s_{jk}\in[-2,2]$ for small $t$, which implies unitarity. \end{proof} \begin{claim} \label{claim:wtPk} For $k\neq j$ the two complex numbers $\wt P_k(\wt\mu_j)$ are linearly independent over $\R$. \end{claim} \begin{proof} Assume for the simplicity of notation that $j=1$ and that $\wt P_2(\wt\mu_1)$ and $\wt P_3(\wt\mu_1)$ are linearly dependent. If $\mu_1\neq 0$, let $$\alpha_k=\wt\mu_1^{-1}\wt P_k(\wt\mu_1)=\wtcv{x}_{k,0}+\wtcv{x}_{k,-1}\wt\mu_1^{-1}.$$ Using Equation \eqref{eq:wtcvx} we have $$\sum_{k=1}^3 \alpha_k^2=1.$$ Since $\alpha_1=0$ and $\alpha_2$, $\alpha_3$ are linearly dependent, all $\alpha_k$ must be real numbers. Since $\wtcv{x}_{k,0}$ are real, each $\wtcv{x}_{k,-1}\wt\mu_1^{-1}$ must be real. Then using Equation \eqref{eq:wtcvx} again, we have $$\sum_{k=1}^3(\wtcv{x}_{k,-1}\wt\mu_1^{-1})^2=0$$ implying that $\wtcv{x}_{k,-1}=0$ for all $k$ contradicting Equation \eqref{eq:wtcvxmodule}. Thus, $\wt P_2(\wt\mu_1)$ and $\wt P_3(\wt\mu_1)$ must be linearly independent, if $\mu_1 \neq0.$ If $\mu_1=0$, then $\wtcv{x}_{1,-1}=0$. Then, if $\wtcv{x}_{2,-1}$, $\wtcv{x}_{3,-1}$ are linearly dependent, we would obtain $$\sum_{k=1}^3(\wtcv{x}_{k,-1})^2=0,$$ which again gives $\wtcv{x}_{k,-1}=0$ for all $k$ contradicting Equation \eqref{eq:wtcvxmodule}. \end{proof} \begin{remark} The constructed real holomorphic sections are uniquely determined by the residue $A_1(t, \lambda)$ at $z= p$ of the potential $\eta_t$ (when fixing the sign in \eqref{eq:rho0}). The deformation of $A= A_1$ in the parameter $t$ can in fact be expressed by a Lax pair type equation $$A' = [A, X],$$ for some $X\in \Lambda^+ \mathfrak{sl}(2, \C)$. Moreover, $X$ is unique up to adding $g \cdot A$, where $g \colon \lambda \mapsto g(\lambda) \in\C$ is holomorphic around $\lambda = 0.$ To see this recall $$A= \begin{pmatrix} x_1 & x_2 + i x_3 \\ x_2- i x_3 & - x_1 \end{pmatrix}$$ satisfies $\det A= -1$ and thus $A^2= \Id.$ Therefore, we obtain $A' A + A A' = 0$ which gives $$x_1' x_1 + x_2'x_2 + x_3'x_3 = 0.$$ Let $$ A'= \begin{pmatrix} x_1' & x_2'+ i x_3' \\ x_2' - ix_3' & -x_1' \end{pmatrix} \and X= \begin{pmatrix} \alpha & \beta \\ \gamma & - \alpha \end{pmatrix}. $$ Then we can choose $X$ to be given by \begin{equation} \begin{split} \alpha &= - \tfrac{1}{2}({x_2 - i x_3})(x_2' + ix_3') + x_1 B \\ \beta &= \tfrac{1}{2}x_1 (x_2' + ix_3') + (x_2+ i x_3) B \\ \gamma &= - \tfrac{1}{2}x_1(x_2' - ix_3') + (x_2- i x_3)(B + a). \end{split} \end{equation} with $B= -\tfrac{1}{2}(x_{2,0}' + ix_{3,0}') \frac{x_{1,-1}}{x_{2,-1}+i x_{3,-1}}$ chosen to remove the negative powers of $\lambda.$ \end{remark} \subsection{Twistor lines}\label{sec:twist} It remains to determine whether the real holomorphic sections constructed in Theorem \ref{thm:IFT} and Theorem \ref{blowuplimit} are actually twistor lines. This finally determines the sign in \eqref{eq:rho}. We start with the following observation: \begin{lemma}\label{lem:signmean} Denote by $x^{\pm}(t)\in(\mathcal W_a^{\geq -1})^3$ the solutions of the monodromy provided by Theorem \ref{thm:IFT} for the two possible choices \eqref{eq:a0b0c0} of $\rho$ in their initial data \eqref{eq:defcvxj}. We then have \[x^-(-t)(-\lambda)=x^+(t)(\lambda)\] for all $\lambda\in \mathbb D_a^*.$ \end{lemma} \begin{proof} This directly follows from the uniqueness part in the implicit function theorem, compare with the proof of \cite[Proposition 24]{HHT2}. \end{proof} \begin{remark} This Lemma shows how to relate the two different choices of the sign for $\rho$ in the initial data by changing the sign for the deformation parameter. Since we will show that only one of the sign choices leads to actual twistor lines, we will restrict to solutions for $t>0$ in the following. \end{remark} \begin{lemma}\label{lem:wrongsignlim} For small $t>0$, let $x^-_{u,v}(t)\in(\mathcal W_a^{\geq -1})^3$ be the solution of the monodromy problem provided by Theorem \ref{thm:IFT} with initial data \eqref{eq:defcvxj} and $\rho <0$ in \eqref{eq:a0b0c0}, where $(u,v)\in \C^2\setminus\{0\}$ parametrizes the Higgs field via \eqref{eq:ambmcm}. Let $s^-_{u,v}(t)$ be the corresponding real holomorphic section of the parabolic Deligne-Hitchin moduli space. Then the limit \[\lim_{r\to 0}s^-_{ru,rv} =s_{[u,v]}\] is not a real holomorphic section, and $s^-_{ru,rv}$ are not twistor lines. \end{lemma} \begin{proof} By Theorem \ref{blowuplimit}, the families of flat connections extend to $r=0$. This is true for both possible signs of $\rho$, and it can be directly computed that the corresponding limit for negative $\rho$ differs from \eqref{eq:blowuplimit} by an overall sign. Therefore, in the limit $(t,r)\to0$, the quasiparabolic lines (the eigenlines with respect to the {\em positive} eigenvalue of the residue $A_j$ determined by $x_1, x_2, x_3$) at any fixed $\lambda_0\in\C^*$ are given by \begin{equation}\label{eq:wrongparastr}\ell_1=(-\bar v,\bar u)^T\C,\quad\ell_2=(\bar u,\bar v)^T\C,\quad \ell_3=(\bar v,\bar u)^T\C,\quad\ell_4=(-\bar u,\bar v)^T\C\,.\end{equation} On the other hand, the quasiparabolic lines of $s^-_{ru,rv}$ for $r\neq0$ at $\lambda=0$ are given by \eqref{eq:quasbef}. Hence, the parabolic structure at $\lambda=0$ differs from \eqref{eq:wrongparastr} for $(u,v)\neq0$. Therefore, for $(u,v)\in\C^2\setminus\{0\}$, the $r \rightarrow 0$ limit cannot exist. But this contradicts the fact that the space of twistor lines is open and closed: if $s^-_{ru,rv}$ would be twistor lines for all $r\neq0,$ the limit would exist and would be a twistor line as well. Thus $s^-_{ru,rv}$ cannot be a twistor line for all $(u,v)$. \end{proof} Lemma \ref{lem:wrongsignlim} shows that taking the limit $r\to0$ is delicate in general. But using additional symmetries, we have \begin{lemma}\label{lem:specialextension} Consider the 4-punctured sphere given by $p=\exp(\tfrac{\pi i}{4}),$ and let $(\wt u,\wt v)=\tfrac{1}{\sqrt{2}}(1,\exp(\tfrac{\pi i}{4})).$ Consider the solution provided by Theorem \ref{blowuplimit} for $\rho >0$. Then, we have at $r=0$ that \eqref{eq:blowuplimit} holds for all $t>0$ small enough. In particular, the corresponding real holomorphic sections parametrised by $r$ and $t$ have a limit for $r\to0$ which is a twistor line. \end{lemma} \begin{proof} The aim is to show that for this special choice of parameters in Theorem \ref{blowuplimit} and \eqref{eq:blowuplimit} holds for all $t\in(0,\tfrac{1}{4})$, where the implicit function theorem applies, rather than only at $(r,t) = 0$. First, we show that the logarithmic connection given by \eqref{eq:blowuplimit} with singular divisor determined by $p$ and quasiparabolic lines given by $(\wt u,\wt v)$ has unitary monodromy for all $t\in(0,\tfrac{1}{4})$. Due the choice of $p,$ there is an additional symmetry $z\mapsto iz$ which maps the set of singular points into itself. Moreover, the special choice of $(\wt u, \wt v)$ gives that the logarithmic connection is equivariant with respect to this order four symmetry, and therefore descend to a logarithmic $\mathrm{SL}(2,\C)$ connection $\wt\nabla$ on the 3-punctured sphere. On the $3$-punctured sphere, any monodromy representation is uniquely determined (up to conjugation) by the parabolic weights, and $\wt \nabla$ must be unitary for $t \in (0, \tfrac{1}{4})$ , see \cite[Lemma 2.2]{Fuchs} for details. Moreover, the parabolic structure for the pull-back of $\wt\nabla$ to the 4-punctured sphere is uniquely determined and thus given by $(\wt u, \wt v)$. Next, consider for $(\wt u,\wt v)=\tfrac{1}{\sqrt{2}}(1,\exp(\tfrac{\pi i}{4}))$ and $r\neq0$ the solution $(x_1,x_2,x_3)=x(t,p,r,\wt{u},\wt{v})$ provided by Theorem \ref{blowuplimit}. Then we have \[(x_1,x_2,x_3)(-\lambda)=-(x_1,x_3,x_2)(\lambda)\] for all $\lambda\in\C^*$ by the uniqueness part of the implicit function theorem, together with the symmetry $z\mapsto iz$. As a consequence, the $r\to0$ limit has the corresponding symmetry as well. But by Theorem \ref{blowuplimit}, point (2), the limit is given by a unitary logarithmic connection $\nabla$ independent of $\lambda$, symmetric with respect to $z\mapsto iz$. This gives that $\nabla$ descents to the 3-punctured sphere and thus has unitary monodromy for $t\in (0, \tfrac{1}{4})$. Therefore $\nabla$ is the pull-back of $\wt \nabla$, and thus \eqref{eq:blowuplimit} holds for all small $t\in (0, \tfrac{1}{4})$. \end{proof} \begin{theorem}\label{computingthecomponent} The real sections constructed in Theorem \ref{thm:IFT} and Theorem \ref{blowuplimit} (for $t>0$ and $\rho>0$ ) are twistor lines. \end{theorem} \begin{proof} For $p=\exp(\tfrac{\pi i}{4})$ and small rational $t$, this follows from Lemma \ref{lem:specialextension} and Proposition \ref{pro:sufftwist}. The general case then follows by continuous dependency of real holomorphic sections from the parameters $(p,t,r,\wt u,\wt v)$ (Theorem \ref{blowuplimit}) and $(p,t,u,v)$ (Theorem \ref{thm:IFT}), respectively, together with Remark \ref{rem:sufftwist} for non-rational weights. \end{proof} \section{The hyper-K\"ahler structure and the non-abelian Hodge correspondence}\label{NAHCt=0} In this section we explicitly describe the (rescaled) metric and the non-abelian Hodge correspondence at the limit $t=0$. We then compute the first order derivatives with respect to $t$ at $t=0.$ \subsection{The non-abelian Hodge correspondence at $t=0$}\label{sectionNAHt0} The parabolic non-abelian Hodge correspondence on the rank $2$ hermitian bundle $\mathcal V$ is a diffeomorphism that associates to each stable strongly parabolic Higgs pair $(\dbar_{\mathcal V}, \Phi)$ the logarithmic connection $\nabla^{\lambda=1}$ of the associated family of flat connections. In the case of Fuchsian potentials on a $4$-punctured sphere with parabolic weight $t \in (0, \tfrac{1}{4}), $ the underlying holomorphic structure is trivial and the Higgs pair is given by the strongly parabolic Higgs field $\Psi$ only. Due to the symmetry assumptions, $\Psi$ is fully determined by its residue at $p \in \C^+$ and since $\sum_{j=1}^3 x_{j,-1}^2 = 0,$ this residue is nilpotent. On the other hand, the connection $1$-form for the flat SL$(2, \C)$-connection $\nabla^{\lambda=1}$ is determined by $A_1(\lambda = 1)$ satisfying $\det A_1 = -1$ and $\tr A_1 = 0$. Hence $A_1$ lies in the SL$(2, \C)$ adjoint orbit of $\begin{smatrix} 1 &0 \\ 0 &-1\end{smatrix}.$ Consider the complex 3-dimensional vector space $\mathfrak{sl}(2,\C)$ with its complex bilinear inner product \[<\xi,\eta>=-\frac{1}{2}\text{tr}(\xi \eta).\] Then its associated quadratic form is the determinant $\text{det}.$ Decompose $\mathfrak {sl}(2, \C)$ into real subspaces \[\mathfrak{sl}(2,\C)=\mathfrak{su}(2)\oplus i\mathfrak{su}(2)\] consisting of the subspace of skew-hermitian ($A = -\bar A^T$) and the subspace of hermitian symmetric $(A = \bar A^T)$ trace-free matrices. Note that $<.,.>$ is positive-definite on the 3-dimensional real subspace $\mathfrak{su}(2)$ and negative definite on $i\mathfrak{su}(2).$ \begin{lemma}\label{classical-weierstrass} There is a diffeomorphism between the $\mathrm{SL}(2,\C)$ orbit through $\begin{smatrix}1&0\\0&-1\end{smatrix}$ without the hermitian symmetric matrices and the nilpotent $\mathrm{SL}(2,\C)$ orbit in $\mathfrak{sl}(2,\C)$. \end{lemma} \begin{proof} Let $\Psi$ be an element in the nilpotent orbit. Then, $\Phi=\Psi-\bar\Psi^T$ is skew-hermitian and there is a unique skew-hermitian $N\in\mathfrak{su}(2)$ of length 1 such that \[<N,\Psi-\bar\Psi^T>=0 , \quad <N,i\Psi+i\bar\Psi^T> = 0\] and \[N\times (\Psi-\bar\Psi^T):= \tfrac{1}{2}[N, \Psi-\bar\Psi^T] = i\Psi+i\bar \Psi^T.\] Moreover, \begin{equation}\label{eq:apsia}A_\Psi:=-\sqrt{1+<\Phi,\Phi>}\, iN+\Phi \in\mathfrak{sl}(2,\C)\end{equation} has determinant $-1$ and the map \[\Psi\longmapsto A_\Psi\] is smooth. Note that \[A_1(\lambda)=\lambda^{-1}\Psi-\sqrt{1+<\Phi,\Phi>}\, iN- \lambda\bar\Psi^T\] is of the form \eqref{initialproblem}. Plugging in the formulas for the Higgs field $\Psi$ in terms of the $(u,v)$ coordinates, $-\sqrt{1+<\Phi,\Phi> } \;iN$ is the constant term of the initial values \eqref{eq:rho0} with $\rho$ being positive. \\ For the converse direction, let $A\in\mathfrak{sl}(2,\C)$ be of determinant $-1$, such that $A$ is not hermitian symmetric, i.e., \[\Phi:=\frac{1}{2}(A-\bar A^T) \neq 0.\] Since $\det(A)=-1$ and $<.,.>$ is positive definite on $\mathfrak{su}(2)$, also \[\xi:=\tfrac{1}{2}(A+\bar A^T)\] does not vanish. Define \[N:=\frac{i}{\sqrt{-<\xi,\xi>}}\xi\in\mathfrak{su}(2).\] Note that $<N,\Phi>=0$ since $\det(A)$ is real. Then, \[\Psi_A:=\tfrac{1}{2}(\Phi-i N\times \Phi)\] is nilpotent, and using \eqref{eq:apsia} \[A_{\Psi_A}=A.\] Moreover, the map $A\longmapsto\Psi_A$ is also smooth. \end{proof} Recall that we identify the nilpotent orbit with $\C^2 \setminus{(0, 0)}/\Z_2$ via the $2:1$ map \[ \C^2\setminus\{(0,0)\} \longrightarrow \{\Psi\in\mathfrak{sl}(2,\C)\mid \Psi\neq0;\, \det(\Psi)=0\}, \quad (u,v)\longmapsto \Psi:=\begin{pmatrix} u\,v&-u^2\\v^2&-u\,v\end{pmatrix}.\] Note that \[\Psi \begin{pmatrix}u \\ v \end{pmatrix}=0.\] In the following, we consider the {\em rescaled} space of Higgs bundles, i.e., we study Higgs fields of the form $t\Psi$ on parabolic structures with trivial underlying holomorphic bundle and parabolic weight $t>0$ small. On the deRham moduli space side, we consider logarithmic connections of the form $d+t\eta$ which converge to the trivial connection for $t\to0.$ We obtain \begin{theorem}\label{the:nahdiffeot0} The diffeomorphism in Lemma \ref{classical-weierstrass} extends to a diffeomorphism of the blow-up of $\C^2/\Z_2$ at $(0,0)$ which is $T^*\C P^1$, to the full adjoint $\mathrm{SL}(2,\C)$ orbit through $\begin{smatrix}1&0\\0&-1\end{smatrix}$ (including hermitian symmetric matrices). This map is the limit of the non-abelian Hodge correspondence for $t \rightarrow 0$ for rescaled strongly parabolic Higgs fields with trivial underlying holomorphic bundle. \end{theorem} \begin{proof} For $(u, v) \rightarrow 0$ take $u= r \wt u$ and $v = r \wt v$ with $|\wt u|^2 = |\wt v|^2 = 1$ and $r\in \R_{>0}$ and consider $r \rightarrow 0.$ Let $ \Psi(u, v) = r^2 \Psi(\wt u, \wt v)$ be the associated nilpotent matrix. Then the map $\Psi \mapsto A_{\Psi}$ extends to $r=0$ with $$\lim_{r \rightarrow 0} A_{\Psi(u,v)} =- i \wt N$$ with $\wt N \in \mathfrak{su}(2, \C)$ of length $1$ satisfying \[<\wt N,\wt\Psi-\overline{\wt \Psi}^T>=0 \and <\wt N,i\wt \Psi+i\overline{\wt\Psi}^T> = 0.\] Conversely, all hermitian symmetric matrices of determinant $-1$ can be realized as a limit. It follows from Theorem \ref{thm:IFT} and Theorem \ref{blowuplimit} that this map is the non-abelian Hodge correspondence for $t \rightarrow 0$. \end{proof} \subsection{The rescaled metric at $t=0$}\label{sec:res-metric} {By Theorem \ref{the:nahdiffeot0} the rescaled Higgs bundle moduli space at $t=0$ is the completion of the nilpotent $\mathrm{SL}(2,\C)$ orbit, identified with the blow-up of $\C^2 \setminus\{(0,0)\}/\Z_2$ at the origin, which is mapped by the limit of the non-abelian Hodge correspondence at $t=0$ to the $\mathrm{SL}(2,\C)$ orbit through $\begin{smatrix} 1&0\\0&-1\end{smatrix}.$ The latter can be interpreted as the rescaled limit of the Betti moduli space at the identity, compare with the vanishing of $Q_j$ in the proof of Proposition \ref{prop:two-traces}. Next, we show the rescaled limit hyper-K\"ahler metric on the rescaled Higgs bundle moduli space at $t=0$ is the Eguchi-Hanson metric. The scaling factor $\tfrac{1}{t}$ is chosen so that the central sphere $\C P^1$ in $\mathcal M_{Higgs}$, which is the moduli space of semi-stable parabolic bundles (i.e., Higgs pairs with vanishing Higgs fields), has constant volume independent of the weight $t$. This is in accordance with the scaling factor $k$ in Proposition \ref{pro:sympupdown} when $t=\tfrac{l}{k}$ is rational. \begin{lemma}\label{rescaledform} Let $t\sim 0$ and consider the space of solutions $\mathcal M(t)$ provided by Theorem \ref{thm:IFT} and Theorem \ref{blowuplimit}, parametrized by $(u,v) \in \C^2 \setminus \{(0,0)\}$. In terms of the parameters $x_1(t), x_2(t), x_3(t)$ in \eqref{symmetry-fix1-4}, the rescaled twisted holomorphic symplectic form is given by \[\varpi= \frac{d x_2(t)\wedge d x_3(t)}{x_1(t)}.\] \end{lemma} \begin{proof} By Proposition \ref{proId} the twisted holomorphic symplectic from is give by the Goldman symplectic form, which can be computed by \eqref{def:symf}. Moreover, since we consider Fuchsian systems, the integral on $\Sigma^0$ in \eqref{def:symf} vanishes and we only need to compute the boundary terms. By construction all 4 residues $B_j=tA_j$ give the same contribution. A direct computation shows that \begin{equation}\label{kirilov123}\begin{split} \frac{1}{8\tr(B_1^2)}\tr[(B_1[d B_1, d B_1])= -\tfrac{t}{4} i(x_1 dx_2\wedge dx_3-x_2dx_1\wedge dx_3+x_3 dx_1\wedge dx_2). \end{split} \end{equation} On the other hand, \[0=x_1 dx_1+x_2 dx_2+x_3 dx_3\] which combines with \eqref{kirilov123} (together with the rescaling by $\tfrac{1}{t}$) to \[\varpi=\tfrac{4 i}{t}\frac{1}{8\tr(B_1^2)}\tr[(B_1[d B_1\wedge d B_1])= \frac{dx_2\wedge dx_3}{x_1}\] proving the lemma. \end{proof} For an explicit formula of the rescaled twisted symplectic form in Lemma \ref{rescaledform} at $t=0$ let $A=A_1(\lambda)$ be the residue of the rescaled potential $\tfrac{1}{t}\eta_{t=0}$ (see \eqref{etatttinfront}) at $t=0.$ Recall that $$A=\lambda^{-1}\matrix{uv&-u^2\\v^2&-uv} +\rho\matrix{|u|^2-|v|^2& 2u\vc\\2\uc v&|v|^2-|u|^2} +\lambda\matrix{-\uc\vc&-\vc^2\\\uc^2&\uc\vc},$$ i.e., we compute the symplectic form using the Higgs field coordinates $(u,v)$, where $r^2=|u|^2+|v|^2$ and $\rho = \sqrt{1+r^{-4}}$. A direct computation then gives at $t=0$ \begin{align} \label{mathematica-omega0} \varpi=2i&\left(-\frac{r^6+|v|^2}{\rho r^6}du\wedge d\uc -\lambda^{-1}du\wedge dv +\frac{\uc v}{\rho r^6} du\wedge d\vc\right.\\ &\left.-\frac{u\vc}{\rho r^6}d\uc\wedge dv -\lambda d\uc\wedge d\vc -\frac{r^6+|u|^2}{\rho r^6} dv\wedge d\vc \right).\nonumber\end{align} On the other hand, $$\varpi=\lambda^{-1}(\omega_J+i\omega_K)-2\omega_I-\lambda(\omega_J-i\omega_K)$$ from which we obtain \begin{equation*} \begin{split} \omega_I&=\frac{i}{\rho r^6}\left( (r^6+|v|^2)du\wedge d\uc-\uc v \,du\wedge d\vc+u\vc \,d\uc\wedge dv+(r^6+|u|^2) dv\wedge d\vc\right)\\ \omega_J&=i\left(-du\wedge dv+d\uc\wedge d\vc\right)\\ \omega_K&=-\left(du\wedge dv+d\uc\wedge d\vc\right). \end{split} \end{equation*} \begin{proposition}\label{limitEH} The rescaled hyper-K\"ahler metric of strongly parabolic Higgs bundles on the 4-punctured sphere at $t=0$ is the Eguchi-Hanson space modulo a $\Z_2\times\Z_2$ action. \end{proposition} \begin{proof} Since $(u,v)$ corresponds to Higgs bundle coordinates, we use the complex structure $I$ to compute $g= \omega_I(., I.)$. Consider the tangent space basis $$\mathcal B = \left( \tfrac{\del }{\del u}, \tfrac{\del }{\del \bar u}, \tfrac{\del }{\del v}, \tfrac{\del }{\del \bar v}\right)$$ Then the complex structure $I$ can be represented by the diagonal matrix $\mathrm{diag}(i,-i,i,-i)$. and we obtain \begin{eqnarray*} g&=&\sqrt{1+r^{-4}}\Big[ du\otimes d\uc+d\uc\otimes du +dv\otimes d\vc+d\vc\otimes dv -\frac{1}{r^2(1+r^4)}\Big( u\uc(du\otimes d\uc+d\uc\otimes du)\\ && +\uc v(du\otimes d\vc+d\vc\otimes du) +u\vc(dv\otimes d\uc+d\uc\otimes dv) +v\vc(dv\otimes d\vc+d\vc\otimes dv)\Big)\Big] \end{eqnarray*} which by \cite[Equation 2]{Lye} identifies with the Eguchi-Hanson metric with $n=2$ and $a=1$. \end{proof} Obviously, the Eguchi-Hanson metric is independent of the conformal type of the 4-punctured sphere. On the other hand, it is known that the conformal type of the underlying 4-punctured sphere can be recovered from the Hitchin metric of the parabolic Higgs bundle moduli space, see \cite{CVZ} and \cite{FMSW}. We will see the non-trivial dependence of the metric on the conformal type in the first order approximation of the metric in $t$. \subsection{First order approximations}\label{firstderivative} The advantage of our setup is that we obtain, analogously to \cite{HHT2}, an iterative way of computing the power series expansion of the twistor lines leading to an approach towards explicitly computing the non-abelian Hodge correspondence and all involved geometric quantities for the case at hand. In this section, we compute the first order derivatives of the parameters $(x_1, x_2, x_3)$ and derive in particular first order derivatives of the relative twisted holomorphic symplectic form. This yields derivatives of the non-abelian Hodge correspondence as well as derivatives of the hyper-K\"ahler metric. \subsubsection{First order derivatives of the parameters} Define for $1\leq j,k\leq 3$ and $\omega_k$, $\Omega_j$ given in \eqref{omega} and \eqref{Omega}, respectively, $$\Omega_{jk}(z)=\int_0^z\Omega_j\omega_k.$$ The shuffle relation (see e.g. \cite[Appendix]{HHT2}) then gives \begin{equation} \label{eq:shuffle-depth2} \Omega_j\Omega_k=\Omega_{jk}+\Omega_{kj}. \end{equation} Let $()'$ and $()''$ denote the first and second order derivatives of a quantity with respect to $t$ at $t=0$. Then the following proposition holds. \begin{proposition} \label{prop:derivatives} The first order derivatives $x'_j$ for $j=1,2,3$ are polynomials of degree at most 2 in $\lambda$: $$x'_j=x'_{j,0}+x'_{j,1}\lambda+x'_{j,2}\lambda^2=x'_{j,0}+(x'_j)^+$$ with the positive parts given by \begin{equation} \label{eq:x'pos} \begin{cases} (x'_1)^+=\displaystyle\frac{4i}{\pi}\,\Im(\Omega_{21}(1)+\Omega_{31}(i))\,(\cvx_2\cvx_3)^+\\ (x'_2)^+=\displaystyle\frac{-4i}{\pi}\,\Im(\Omega_{31}(i))\,(\cvx_1\cvx_3)^+\\ (x'_3)^+=\displaystyle\frac{-4i}{\pi}\,\Im(\Omega_{21}(1))\,(\cvx_1\cvx_2)^+, \end{cases} \end{equation} and the constant terms given by \begin{equation} \label{eq:x'0} \begin{cases} x'_{1,0}=\displaystyle\frac{-1}{\rho r^4}\left((|u|^2-|v|^2)X-2\rho u v Y\right)\\ x'_{2,0}=\displaystyle\frac{-1}{\rho r^4}\left(2\,\Re(u\,\overline{v})X-\rho(v^2-u^2)Y\right)\\ x'_{3,0}=\displaystyle\frac{-1}{\rho r^4}\left(2\,\Im(u\,\overline{v})X-\rho i(u^2+v^2)Y\right), \end{cases} \end{equation} where $r^2=|u|^2+|v|^2$, $\rho$ satisfying \eqref{eq:rho} and \begin{equation} \begin{split} X&=\sum_{j=1}^3\cvx_{j,-1}x'_{j,1}\\ Y&=\sum_{j=1}^3\left(\cvx_{j,-1}x'_{j,2}+\cvx_{j,0}x'_{j,1}\right), \end{split} \end{equation} with $x'_{j,1}$ and $x'_{j,2}$ determined by \eqref{eq:x'pos}. \end{proposition} \begin{remark} The required $\Omega$ integrals are computed in Proposition \ref{prop:integrals2} to be $$\Im(\Omega_{21}(1))=2\pi\log\left|\frac{p^2-1}{2p}\right|\and \Im(\Omega_{31}(i))=-2\pi\log\left|\frac{p^2+1}{2p}\right|.$$ \end{remark} \begin{proof} Let $\Phi$ be the fundamental solution of the equation $d\Phi_t=\Phi_t\eta_t$ with $\Phi_t(0) = \Id.$ Recall that $\eta_{t=0} =0$ and therefore $\Phi_{t=0} = \Id.$ Differentiating the equation $d\Phi_t=\Phi_t\eta_t$ twice at $t=0$ we thus obtain $$d\Phi''=\eta''+2\Phi'\eta',$$ hence $$\Phi''(z)=\int_0^z(\eta''+2\Phi'\eta').$$ Then we have using the proof of Proposition \ref{prop:derivative-pqr} $$\eta'=\sum_{j=1}^3 \cvx_j\omega_j\mathfrak m_j, \quad \Phi'=2\sum_{j=1}^3 \cvx_j\Omega_j\mathfrak m_j, \quad \eta''=2\sum_{j=1}^3 x'_j\omega_j\mathfrak m_j.$$ This gives $$\int_0^z\eta''=2\matrix{x'_1\Omega_1&x'_2\Omega_2+ix'_3\Omega_3\\ x'_2\Omega_2-ix'_3\Omega_3&-x'_1\Omega_1}$$ and $$\int_0^z \Phi'\eta'=\matrix{\sum_{j=1}^3\cvx_j^2\Omega_{jj}+i\cvx_2\cvx_3(\Omega_{32}-\Omega_{23})& \cvx_1\cvx_2(\Omega_{12}-\Omega_{21})+i\cvx_1\cvx_3(\Omega_{13}-\Omega_{31})\\ \cvx_1\cvx_2(\Omega_{21}-\Omega_{12})+i\cvx_1\cvx_3(\Omega_{13}-\Omega_{31}) &\sum_{j=1}^3\cvx_j^2\Omega_{jj}+i\cvx_2\cvx_3(\Omega_{23}-\Omega_{32})}.$$ Using Leibniz rule, the shuffle relation \eqref{eq:shuffle-depth2} and that we solved Equation \ref{eq:easy-integrals} we obtain \begin{eqnarray}\label{eq:p''} \mathfrak p''&=&\mathcal P''_{12}-\mathcal P''_{21}+2(\mathcal P'_{11}\mathcal P'_{21}-\mathcal P'_{12}\mathcal P'_{22})\nonumber\\ &=&-4i x'_3\Omega_3(1)+4\cvx_1\cvx_2\left(\Omega_{21}(1)-\Omega_{12}(1)\right)+4\cvx_1\cvx_2\Omega_1(1)\Omega_2(1)\nonumber\\ &=&4\pi x'_3+8\cvx_1\cvx_2\Omega_{21}(1) \end{eqnarray} \begin{eqnarray}\label{eq:q''} \mathfrak q''&=&i(\mathcal Q''_{21}+\mathcal Q''_{12})+2i(\mathcal Q'_{11}\mathcal Q'_{21}+\mathcal Q'_{12}\mathcal Q'_{22})\nonumber\\ &=&4i x'_2\Omega_2(i)-4 \cvx_1\cvx_3(\Omega_{13}(i)-\Omega_{31}(i))+4\cvx_1\cvx_3\Omega_1(i)\Omega_3(i)\nonumber\\ &=&4\pi x'_2+8\cvx_1\cvx_3\Omega_{31}(i). \end{eqnarray} Since we have solved $\mathfrak p(t)=\mathfrak p(t)^*$ for all $t$, we have $\mathfrak p''=(\mathfrak p'')^*$. Moreover, $\cvx_j=\cvx_j^*,$ hence \begin{equation} \label{eq:p''star} 4\pi x'_3-4\pi(x'_3)^*=-16 i\,\cvx_1\cvx_2\Im(\Omega_{21}(1)). \end{equation} Projecting onto $\mathcal W^+$ and remembering that $x'\in\mathcal W^{\geq 0},$ we obtain the formula for $(x'_3)^+$ stated in Equation \eqref{eq:x'pos}, and $(\cvx_1\cvx_2)^+$ is a degree-2 polynomial. In the same vein $\mathfrak q''=(\mathfrak q'')^*$ gives \begin{equation} \label{eq:q''star} 4\pi x'_2-4\pi(x'_2)^*=-16 i\,\cvx_1\cvx_3\Im(\Omega_{31}(i)) \end{equation} which determines $(x'_2)^+$. For $(x'_1)^+$ consider the equation $\mathcal K=1$ which holds for all $t$. Therefore, \begin{equation} \label{eq:K'} \mathcal K'=0=2\sum_{j=1}^3 \cvx_j x'_j. \end{equation} Then $\mathcal K'=\mathcal K'^*$ and Equations \eqref{eq:p''star}, \eqref{eq:q''star} give the equation \begin{equation} \label{eq:r''star} 4\pi \cv{x}_1 x'_1-4\pi\cv{x}_1(x'_1)^*=16 i\,\cvx_1\cvx_2\cvx_3\Im(\Omega_{21}(1)+\Omega_{31}(i)). \end{equation} Dividing by $\cv{x}_1\not\equiv 0$ and taking the positive part determines $(x'_1)^+$. To compute the constant terms, we consider the coefficients of $\lambda^{-1}$, $\lambda^0$ and $\lambda$ in $\mathcal K$: \begin{equation*} \begin{split} \mathcal K'_{-1}&=2\sum_{j=1}^3\cvx_{j,1}x'_{j,0}=0\\ \mathcal K'_0&=2\sum_{j=1}^3(\cvx_{j,0}x'_{j,0}+\cvx_{j,-1}x'_{j,1})=0\\ \mathcal K'_1&=2\sum_{j=1}^3(\cvx_{j,1}x'_{j,0}+\cvx_{j,0}x'_{j,1}+\cvx_{j,-1}x'_{j,2}) \end{split} \end{equation*} which yield the system of equations $$\begin{cases} {\displaystyle \sum_{j=1}^3}\cvx_{j,-1} x'_{j,0}=0\\ {\displaystyle \sum_{j=1}^3}\cvx_{j,0} x'_{j,0}=-X\\ {\displaystyle \sum_{j=1}^3}\cvx_{j,1} x'_{j,0}=-Y \end{cases}$$ with $X$, $Y$ as in Proposition \ref{prop:derivatives}. Its determinant simplifies to $$\det(\cvx_{j,k})_{1\leq j\leq 3,-1\leq k\leq 1}=\frac{i}{2}\rho r^6.$$ Using the Cramer rule, we obtain \eqref{eq:x'0} after simplification. \end{proof} \begin{remark}\label{remark:r''} The second derivative of $\mathfrak r$ is computed in the following. Through the character variety equation and the formulas for $\mathfrak p''$ and $\mathfrak q''$ it will give rise to an identity between $\Omega$-integrals. Using Proposition \ref{prop:traces} we have \begin{eqnarray*} \mathfrak r''&=&i\big[\mathcal P''_{22}+2\mathcal P'_{22}\mathcal Q'_{11}+\mathcal Q''_{11}+2\mathcal P'_{12}\mathcal Q'_{21}-2\mathcal P'_{21}\mathcal Q'_{12}-\mathcal P''_{11}-2\mathcal P'_{11}\mathcal Q'_{22}-\mathcal Q''_{22}\\ &&+(\mathcal P'_{22}+\mathcal Q'_{11})^2 +(\mathcal P'_{12}+\mathcal Q'_{12})^2 -(\mathcal P'_{21}+\mathcal Q'_{21})^2 -(\mathcal P'_{11}+\mathcal Q'_{22})^2\big] \\ &=&i\big[\mathcal P''_{22}-\mathcal P''_{11}+\mathcal Q''_{11}-\mathcal Q''_{22} +2(\mathcal P'_{12}-\mathcal P'_{21})(\mathcal Q'_{12}+\mathcal Q'_{21}) +(\mathcal P'_{12})^2-(\mathcal P'_{21})^2+(\mathcal Q'_{12})^2-(\mathcal Q'_{21})^2\big]\end{eqnarray*} where we used that $\mathcal P'_{11}+\mathcal P'_{22}=\mathcal Q'_{11}+\mathcal Q'_{22}=0$. This gives \begin{eqnarray} \mathfrak r''&=& i\big[-4x'_1\Omega_1(1)+4i \cvx_2\cvx_3(\Omega_{23}(1)-\Omega_{32}(1)) +4x'_1\Omega_1(i)-4i\cvx_2\cvx_3(\Omega_{23}(i)-\Omega_{32}(i))\nonumber\\ &&+8i\cvx_2\cvx_3\Omega_3(1)\Omega_2(i) +4i\cvx_2\cvx_3\Omega_3(1)\Omega_2(1) +4i\cvx_2\cvx_3\Omega_2(i)\Omega_3(i)\big]\nonumber\\ &=&4\pi x'_1-8\cvx_2\cvx_3\left (\Omega_{23}(1)+\Omega_{32}(i)+\pi^2 \right) \label{eq:r''}\end{eqnarray} using that we solved Equation \ref{eq:easy-integrals} and the shuffle relation \eqref{eq:shuffle-depth2}. Observe the similarity with \eqref{eq:p''} and \eqref{eq:q''}. Remember from the proof of Proposition \ref{prop:two-traces} that $\mathfrak p$, $\mathfrak q$ and $\mathfrak r$ satisfy the character variety equation for all $t$ \begin{equation} \label{mathematica-fricke} 4\cos(2\pi t)^2 + 4(\mathfrak p^2+\mathfrak q^2+\mathfrak r^2-1)+8(-1)^j \mathfrak{pqr}=0 \end{equation} with either $j=1$ or $j=2$. Taking the third order derivative of the above equation yields (since $\mathfrak p (0) = \mathfrak q (0) = \mathfrak r (0) = 0$) $$\mathfrak{p'p''+q'q''+r'r''}+2(-1)^j\mathfrak{p'q'r'}=0$$ Using Proposition \ref{prop:derivative-pqr} and Equations \eqref{eq:p''}, \eqref{eq:q''}, \eqref{eq:r''} we then obtain $$8\pi^2(\cvx_1 x'_1+\cvx_2 x'_2+\cvx_3 x'_3) +16\pi\cvx_1\cvx_2\cvx_3\big(\Omega_{21}(1)+\Omega_{31}(i)-\Omega_{23}(1)-\Omega_{32}(i)-\pi^2+(-1)^j\pi^2\big)=0.$$ The first summand vanishes, as $x_1^2+x_2^2+x_3^2=1$ for all $t$, thus $$\Omega_{21}(1)+\Omega_{31}(i)-\Omega_{23}(1)-\Omega_{32}(i)-\pi^2+(-1)^j\pi^2=0.$$ In the most symmetric case of the 4-punctured sphere where $p=e^{i\pi/4}$ we have by symmetry $$\Omega_{31}(i)=-\Omega_{21}(1)\and \Omega_{32}(i)=-\Omega_{23}(i).$$ Hence $j=2$ and we have proved the following identity that holds for all $p\in\C^+$: \begin{equation} \label{eq:Omega-identity} \Omega_{23}(1)+\Omega_{32}(i)=\Omega_{21}(1)+\Omega_{31}(i). \end{equation} \end{remark} \begin{proposition} \label{prop:integrals2} For $p\in\C^+$ we have $$\Omega_{21}(1)=2\pi i\log\left(\frac{p^2-1}{2ip}\right)\quad\text{and}\quad\Omega_{31}(i)=-2\pi i\log\left(\frac{p^2+1}{2p}\right),$$ where $\log$ denote the principal valuation of the logarithm on $\C\setminus\R^-$. \end{proposition} \begin{proof} We first prove the Proposition for $p=e^{i\varphi}$ with $0<\varphi<\pi/2$. In that case, we have by \cite[Proposition 35]{HHT2} \begin{equation} \begin{split} \Omega_{21}(1)-\Omega_{12}(1)&=4\pi i\log(\sin\varphi))-i(\pi-2\varphi)\log\left(\frac{1-\cos(\varphi)}{1+\cos(\varphi)}\right)\\ \Omega_{31}(i)-\Omega_{13}(i)&=-4\pi i\log(\cos(\varphi))+2i\varphi\log\left(\frac{1-\sin(\varphi)}{1+\sin(\varphi)}\right). \end{split} \end{equation} On the other hand, by the shuffle product formula and using \eqref{eq:easy-integrals} $$\Omega_{21}(1)+\Omega_{12}(1)=\Omega_1(1)\Omega_2(1)=i(\pi-2\varphi)\log\left(\frac{1-\cos(\varphi)}{1+\cos(\varphi)}\right)$$ $$\Omega_{31}(i)+\Omega_{13}(i)=\Omega_1(i)\Omega_3(i)=-2i\varphi\log\left(\frac{1-\sin(\varphi)}{1+\sin(\varphi)}\right).$$ Hence $$\Omega_{21}(1)=2\pi i \log(\sin\varphi)$$ $$\Omega_{31}(i)=-2\pi i\log(\cos(\varphi)$$ proving the result for $p=e^{i\varphi}$.\\ For $p\in\C^+$, both $\frac{p^2-1}{2ip}$ and $\frac{p^2+1}{2p}$ are in $\C\setminus\R^-$ and both sides of the formulas of Proposition \ref{prop:integrals2} are well-defined holomorphic functions in $p\in\C^+$ which coincide when $p\in\C^+\cap\S^1$, therefore they are equal. \end{proof} \subsubsection{First order derivative of the metric} By Lemma \ref{rescaledform} we have $\varpi=\frac{dx_2\wedge dx_3}{x_1}$ for all $t.$ Recall from Proposition \ref{pro:classparahiggs} that the open dense subset of the Higgs bundle moduli space specified by trivial holomorphic bundles is independent of the weight $t$. Thus the complex structure $I$ is independent of $t$ and also the holomorphic symplectic form $\omega_J + i\omega_K$ are independent of $t$. The same holds true for the holomorphic symplectic form $\omega_J - i\omega_K$ for the complex structure $-I.$ This gives that the first order derivatives of the twisted holomorphic symplectic form is given by the derivative of its constant term \begin{equation}\label{varphiconst}\varpi_0= - 2 \omega_I = \frac{1}{\cv{x}_{1,-1}}\left( \frac{-x_{1,0}}{\cv{x}_{1,-1}}d\cv{x}_{2,-1}\wedge d\cv{x}_{3,-1} +d x_{2,0}\wedge d\cv{x}_{3,-1} +d\cv{x}_{2,-1}\wedge x_{3,0} \right).\end{equation} Using the formulas for $x'_{1,0}$, $x'_{2,0}$ and $x'_{3,0}$, we obtain \begin{eqnarray} \varpi_0'&=& \frac{4\,i\,\Im(\Omega_{21}(1))}{\pi r^8}\left[\left(-r^4 +3(\uc v+u \vc)^2\right)|v|^2+r^8\left(|u|^2-|v|^2\right)\right]du\wedge d\uc \label{mathematica-omega1}\\ &+&\frac{4\,i\,\Im(\Omega_{31}(i))}{\pi r^8}\left[\left(r^4 +3(\uc v-u \vc)^2\right)|v|^2+r^8\left(|v|^2-|u|^2\right)\right]du\wedge d\uc \nonumber\\ &+&\frac{4\,i\,\Im(\Omega_{21}(1))}{\pi r^8}\left[\left(r^4 -3(\uc v+u \vc)^2\right)\uc v+r^8\left(-\uc v-3u\vc\right)\right]du\wedge d\vc \nonumber\\ &+&\frac{4\,i\,\Im(\Omega_{31}(i))}{\pi r^8}\left[\left(-r^4 -3(\uc v-u \vc)^2\right)\uc v+r^8\left(\uc v-3u\vc\right)\right]du\wedge d\vc \nonumber\\ &+&\frac{4\,i\,\Im(\Omega_{21}(1))}{\pi r^8}\left[\left(r^4 -3(\uc v+u \vc)^2\right)u \vc+r^8\left(-u \vc-3\uc v\right)\right]dv\wedge d\uc \nonumber\\ &+&\frac{4\,i\,\Im(\Omega_{31}(i))}{\pi r^8}\left[\left(-r^4 -3(\uc v-u \vc)^2\right)u \vc+r^8\left(u \vc-3\uc v\right)\right]dv\wedge d\uc \nonumber\\ &+&\frac{4\,i\,\Im(\Omega_{21}(1))}{\pi r^8}\left[\left(-r^4 +3(\uc v+u \vc)^2\right)|u|^2+r^8\left(|v|^2-|u|^2\right)\right]dv\wedge d\vc \nonumber\\ &+&\frac{4\,i\,\Im(\Omega_{31}(i))}{\pi r^8}\left[\left(r^4 +3(\uc v-u \vc)^2\right)|u|^2+r^8\left(|u|^2-|v|^2\right)\right]dv\wedge d\vc. \nonumber \end{eqnarray} where the $\Omega$-integrals are given by Proposition \ref{prop:integrals2}. Note that the $(2,0)$ and $(0,2)$ terms $du\wedge dv$ and $d\uc\wedge d\vc$ vanish in accordance with the twistorial description of hyper-K\"ahler metrics \cite{HKLR}. \subsection{Energy}\label{sec:energy} The (Dirichlet) energy of a map $f\colon \Sigma\to (M,g)$ from a Riemann surface $\Sigma$ to a Riemannian manifold $(M,g)$ is given by \[\mathcal E(f)=-\tfrac{1}{2}\int_\Sigma g(df\wedge* df)\] (where $*$ denotes the Riemann surface $*$-operator). If $f$ is equivariant (with respect to a discrete group acting holomorphically on $\Sigma$ and isometrically on $M$), the energy remains well-defined. If $(\nabla,\Phi,h)$ is a solution to the self-duality equations, the equivariant harmonic map is given by the harmonic metric $f=h$, see for example \cite{Do, Li}. Furthermore, the energy is then \begin{equation}\label{def:ener}\mathcal E(h)=2i\int_\Sigma \tr(\Phi\wedge\Phi^*),\end{equation} which is $L^2$-norm of the Higgs field $\Phi$. We refer to $\mathcal E(h)$ in \eqref{def:ener} as the energy of the self-duality solution $(\nabla,\Phi,h)$ and note that it is a K\"ahler potential for the hyperk\"ahler metric on the moduli space of solutions with respect to the complex structure $J,$ see \cite{Hi1}. Let $\eta=\eta_t$ be given by the parameters $(x_1,x_2,x_3)$ provided by Theorem \ref{thm:IFT}. Assume that $t=\frac{l}{k}$ and let $\pi:\Sigma_k\to\C P^1$ be the $k$-fold covering as in \eqref{eq:sigma_k} totally branched over $p_1,\dots,p_4$. A twistor line provided by Theorem \ref{computingthecomponent} then gives rise to a solution $(\nabla,\Phi,h)$ of the self-duality equations on $\Sigma_k$. Analogously to \cite[Corollary 4.3]{HHT1} and similar to the proof of Lemma \ref{rescaledform}, the energy of the self-duality solution $(\nabla,\Phi,h)$ corresponding to the real section provided by $\eta=\eta_{x_1,x_2,x_3}$ on the covering surface $\Sigma_k$ is given by \begin{equation}\label{eq:energy}{\mathcal E}(h)=-4\pi\sum_j \Res_{q_j}\tr\left(\eta_{-1}G_{j,1}G_{j,0}^{-1}\right).\end{equation} In this equation $G_j=G_{j,0}+G_{j,1}\lambda+\dots$ is a desingularizing gauge, i.e., a holomorphic family of gauge transformations $G_j$ that removes the singularity of the pull-back potential $\pi^*\eta$ at $\wh p_j = \pi^{-1}(p_j)$ on $\Sigma_k$ and extends holomorphically to $\lambda=0$ (compare with \eqref{def:twistbundle} for fixed $\lambda$). The reason for the different sign in \eqref{eq:energy} compared to \cite[Corollary 4.3]{HHT1} is that we are considering (equivariant) harmonic maps into hyperbolic 3-space here, with associated family of the form $\nabla+\lambda^{-1}\Phi+\lambda\Phi^*$, while in \cite{HHT1} we considered harmonic maps into the 3-sphere with associated family of the form $\nabla+\lambda^{-1}\Phi-\lambda\Phi^*$, see also \cite[Theorem 2.4]{BeHRo}. A desingularizing gauge in a neighborhood of $\wt p_1$ is given by $$G_1=\matrix{1&0\\ \frac{1-x_1}{x_2+ix_3}&1}\matrix{w^{-l}&0\\0&w^l}$$ with $w$ the local coordinate with $w^{k}=z-p_1$. Then $$\frac{1}{l}\Res_{\wt p_1}\tr\left(\wt\eta_{-1}G_{1,1}G_{1,0}^{-1}\right) =1-x_{1,0}\,+\,\frac{x_{1,-1}(x_{2,0}+i x_{3,0})}{x_{2,-1}+i x_{3,-1}} =1-x_{1,0}\,-\,\frac{(x_{2,-1}-i x_{3,-1})(x_{2,0}+i x_{3,0})}{x_{1,-1}}.$$ For the residues at the other punctures, we substitute $(x_1,x_2,x_3)\to(-x_1,-x_2,x_3)$ to obtain $$\frac{1}{l}\Res_{\wt p_2}\tr\left(\wt\eta_{-1}G_{2,1}G_{2,0}^{-1}\right) =1+x_{1,0}\,+\,\frac{(x_{2,-1}+i x_{3,-1})(x_{2,0}-i x_{3,0})}{x_{1,-1}}$$ and substitute $(x_1,x_2,x_3)\to (x_1,-x_2,-x_3)$ to obtain for $k=1,2$ $$\Res_{\wt p_{k+2}}\tr\left(\wt\eta_{-1}G_{k+2,1}G_{k+2,0}^{-1}\right) =\Res_{\wt p_k}\tr\left(\wt\eta_{-1}G_{k,1}G_{k,0}^{-1}\right).$$ This gives the following formula for the energy of the equivariant harmonic map on $\Sigma_k$ $${\mathcal E}=-8\pi\,l\left(1+\frac{i}{x_{1,-1}}(-x_{2,-1}x_{3,0}+x_{2,0}x_{3,-1})\right).$$ Therefore, we define for $t>0$ the {\em renormalized energy} by \begin{equation}\label{def:renen} \underline{\mathcal E}_t:=-8\pi\left(1+\frac{i}{x_{1,-1}}(-x_{2,-1}x_{3,0}+x_{2,0}x_{3,-1})\right). \end{equation} Using the central value of the parameters we find that $\mathcal E$ extends to $t=0$ with \begin{equation} \label{mathematica-energy0} \underline{\mathcal E}=-8\pi(1-\rho r^2)=8\pi (\sqrt{1+r^{4}}-1), \end{equation} where $r^2=|u|^2+|v|^2.$ The first order derivatives of the parameters then yield \begin{eqnarray} \mathcal E'&=&-8\,\Im(\Omega_{21}(1))\left(|u|^4+|v|^4-3\uc^2v^2-3u^2\vc^2-4|u|^2|v|^2\right)\label{mathematica-energy1}\\ &&+8\,\Im(\Omega_{31}(i))\left(|u|^4+|v|^4+3\uc^2v^2+3u^2\vc^2-4|u|^2|v|^2\right). \nonumber\end{eqnarray} For the most symmetric case of $p=e^{i\pi/4}$, using $\Omega_{21}(1)=-\pi i\log(2)$ from Appendix A of \cite{HHT2} and $\Omega_{31}(i)=-\Omega_{21}(1)$, this simplifies to \begin{equation} \label{mathematica-energy1sym} \mathcal E'=16\pi\log(2)\left(|u|^4+|v|^4-4|u|^2|v|^2\right). \end{equation} \begin{remark} The energy $\mathcal E$ can also be used to give a different proof of Lemma \ref{lem:wrongsignlim}, because for $\rho <0$, the (rescaled) energy in the limit \[\mathcal E=-8\pi (1+\sqrt{1+r^{4}})<0\] is negative (instead of \eqref{mathematica-energy0}), but twistor lines must have non-negative energy. In the case of nilpotent Higgs fields, the (negative) energy can be interpreted as (the negative of) the Willmore energy of a Willmore surface, see \cite[Section 4]{BeHRo}. \end{remark} \subsection{Another hyper-K\"ahler metric} Recall that the implicit function theorem (Theorem \ref{thm:IFT}) also applies for the choice of negative $\rho$ in the initial condition \eqref{eq:rho0}. The corresponding real holomorphic sections are not twistor lines by Lemma \ref{lem:wrongsignlim}. On the other hand, we can still apply the general twistor space theory \cite{HKLR} to obtain a hyper-K\"ahler metric locally. In fact, the rescaled metric at $t=0$ is still the Eguchi-Hanson metric $g_{EH}$, and therefore, for small $t$ and small Higgs fields, the metric (and the smooth structure) remains non-degenerate. Using Lemma \ref{lem:signmean} we can compare the $t$-families of hyper-K\"ahler metrics $g^{\pm}$ for both choices of sign in the initial condition \eqref{eq:rho0} and obtain \[g^\pm=g_{EH}+\sum_{k\geq1} (\pm1)^k g_k t^k.\] \subsection{Convergence of Hitchin metrics on symmetric components} The aim of this section is to show Theorem \ref{thm:compactcon}, which we restate here for the convenience of the reader. \begin{theorem*}Let $C>0$ and $l\in\N^{>0}$ be fixed. Consider the compact subspaces \[\mathcal C^l_k:=\{[\nabla,\Phi]\in \mathcal M^l_{SD}(\Sigma_k)\mid \mathcal E(\nabla,\Phi)\leq C\}\] of $\mathcal M^l_{SD}$ with the induced hyper-K\"ahler metric $g_k$. Then for $k \rightarrow \infty$, we obtain smooth convergence \[(\mathcal C^l_k,g_k)\longrightarrow (\{v\in T^*\C P^1\mid \mathcal E_{EH}(v)\leq C\}, 32\pi\,l\, g_{EH})/(\Z_2\times\Z_2).\]\end{theorem*} For the setup of the proof, let $\pi\colon\Sigma_k\to\C P^1$ be the totally branched covering defined in \eqref{eq:sigma_k}. Consider the moduli space of polystable Higgs bundles, and the moduli space of solutions to the self-duality equations $\mathcal M_{SD}(\Sigma_k),$ respectively, equipped with the hyper-K\"ahler metric on its smooth locus $\mathcal M^{irr}_{SD}(\Sigma_k)$. Consider the natural $\Z_k$-action (generated by $\sigma$) on $\mathcal M_{SD}(\Sigma_k)$. The fixed point set of $\sigma$ in $\mathcal M^{irr}_{SD}(\Sigma_k)$ has multiple connected components, which can be distinguished as follows. A fixed point $(\nabla,\Phi,h)$ of the $\Z_k$ action can be represented by a $\Z_k$-equivariant solution of the self-duality equations, i.e., there exists a gauge transformation $g_\sigma\colon\Sigma_k\to\mathrm{SU}(2)$ such that \[\sigma^*(\nabla,\Phi,h)=(\nabla,\Phi,h).g_{\sigma}.\] By assumption, $(\nabla,\Phi,h)$ lies in the smooth locus and in particular $\nabla$ is irreducible. Hence $g_\sigma$ is unique up to sign. Moreover, at every fixed point $q\in\Sigma_k$ of $\sigma$, we have $g_{\sigma}^k(q)=\pm\text{Id}.$ Hence, the eigenvalues of $g_\sigma(q)$ are of the form $\exp(\pm 2\pi i\tfrac{ l_q}{2k})$ for some integer $l_q\in\{0,\dots,k\}.$ The integers $l_{p_1},\dots,l_{p_4}$ are then the invariants of a connected component $\mathcal C$ of the fixed point set. In fact, for $l_{p_1},\dots,l_{p_4}\in\{1,\dots,k-1\}$, this component is diffeomorphic to $\mathcal M_{SD}(\C P^1,p_1,\dots,p_4,\tfrac{l_{p_1}}{2k},\dots\tfrac{l_{p_4}}{2k}).$ For even $l_{p_1},\dots,l_{p_4}$, this directly follows from the discussion in Section \ref{ssec:rat}. For odd $l_{p_j}$, the gauge \eqref{def:twistbundle} has to be modified to involve square roots, see for example \cite{NaSt}, or \cite{HeHeSch} for the specific $\Sigma_k$. Likewise, one can first go to the double covering $\Sigma_{2k}\to\Sigma_k$ and then proof that the solutions on $\Sigma_{2k}$ are actually obtained as pull-backs of solutions on $\Sigma_k.$ Note that the component $\mathcal C$ is a hyper-K\"ahler submanifold of $\mathcal M^{irr}_{SD}(\Sigma_k)$ (see for example \cite[Theorem 8]{HSch}). In fact, this easily follows from the uniqueness of solutions to the self-duality equations. These components can be `completed' by adding orbifold points consisting of gauge classes of reducible connections. For fixed $l\in\N^{>0}$ and every $k\in\N^{>2l}$ we denote by $\mathcal M^l_{SD}(\Sigma_k)$ the component of equivariant solutions corresponding to the integers \[l_{p_1}=l_{p_2}=l_{p_3}=l_{p_4}=l.\] It then follows that this space is isomorphic to the hyper-K\"ahler space $\mathcal M_{SD}(\C P^1,p_1,\dots,p_4,\tfrac{l}{2k})$ up to the scaling constant $32\pi k,$ see Proposition \ref{pro:sympupdown}. We consider the energy (of the solution of the self-duality equations) $\mathcal E$ on $\mathcal M^l_{SD}(\Sigma_k)$, which corresponds up to a constant with the energy $\underline{\mathcal E}_{\tfrac{l}{2k}}$ on $\mathcal M_{SD}(\C P^1,p_1,\dots,p_4,\tfrac{l}{2k})$ by Section \ref{sec:energy}. We consider the Eguchi-Hanson metric $g_{EH}$ on $T^*\C P^1.$ By identifying $T^*\C P^1$ with the blow up of $(\C^2\setminus\{(0,0)\})/\Z_2$ at the origin, we have the energy \[\mathcal E_{EH}(u,v)=8\pi(\sqrt{1+(u\bar u+v\bar v)^2}-1)\] defined in terms of the coordinates $(u,v)\in\C^2.$ Consider also the $\Z_2\times\Z_2$-action on $T^*\C P^1$ generated by \[\zeta\in\C P^1\mapsto -\zeta\in\C P^1\quad\text{and}\quad \zeta\in\C P^1\mapsto \zeta^{-1}\in\C P^1.\] This defines an isometric action on $(T^*\C P^1,g_{EH})$, and has 6 fixed points contained in the zero section $\C P^1\subset T^*\C P^1.$ Their images in $T^*\C P^1/(\mathbb Z_2\times\mathbb Z_2)$ represent the 3 strictly semi-stable parabolic bundles (with vanishing Higgs field), see Proposition \ref{pro:paramodul}. Note that the energy $\mathcal E_{EH}$ is invariant under this action, and furthermore $\mathcal E_{EH}$ is a K\"ahler potential for $32\pi g_{EH}$ on $T^*\C P^1$ with respect to the complex structure $J$ (where $I$ is the natural complex structure induced from the cotangent bundle of $\C P^1$). \begin{proof}[Proof of Theorem \ref{thm:compactcon}] We first show that for $k$ large enough, any $[\nabla,\Phi]\in \mathcal C_k^l$ has (semi-)stable underlying holomorphic structure $\bar\partial^\nabla$. Recall that every equivariant stable Higgs pair $(\bar\partial^\nabla,\Phi)\in \mathcal M^l_{SD}$ gives rise to stable strongly parabolic Higgs pair on $\C P^1$ with 4 singular points and parabolic weight $t=\tfrac{l}{2k}.$ If the underlying holomorphic structure $\bar\partial^\nabla$ is unstable, then also the parabolic structure is unstable. By Lemma \ref{lem:unstablepara}, there is only a complex affine line of corresponding stable Higgs pairs with unstable underlying holomorphic structure. The Higgs pair with least energy in this affine line has vanishing determinant, and it remains to estimate its energy. This energy is given by $2\pi d$, where $d$ is the degree of the destabilizing holomorphic line bundle $L$, see for example the proof of \cite[Proposition 7.1]{Hi1}. It therefore remains to estimate the degree $d$, which depends on $k.$ Note that the equivariant nilpotent Higgs field is determined by a holomorphic section \[\phi\in H^0(\Sigma_k,KL^{-2}).\] Since $\phi$ is equivariant with respect to $\sigma$ as well, it must have a zero of order $(l-1)$ at all 4 singular points $p_1,\dots,p_4,$ compare with \cite[Theorem 3.3]{HeHeSch}. Hence, the degree of $KL^{-2}$ is $4(l-1)$ and since the genus of $\Sigma_k$ is $(k-1)$ we obtain \[d=k-2l>\frac{C}{2\pi}\] for all $k$ large enough. By Theorem \ref{thm:IFT} there exists for every $\widetilde C>0$ an $\epsilon>0$ such that for all $(u,v)\in\C^2$ with $0<|(u,v)|^2<\widetilde C$ and all $\epsilon>t>0$, we have constructed a real holomorphic section in $\mathcal M^{par}_{DH}(\C P^1,p_1+\dots+p_4,t)$ with parabolic Higgs field $t\Psi$ for $\Psi$ as in \eqref{eq:psiAs} with residues \eqref{eq:res1higgs} and \eqref{eq:psiAs2}. By Theorem \ref{blowuplimit} and Theorem \ref{computingthecomponent}, these real holomorphic sections are twistor lines and our construction extends to polystable parabolic structures with vanishing Higgs fields. Denote this subset of strongly parabolic Higgs fields (with trivial underlying holomorphic bundle) by $\wt{\mathcal C}_t,$ and (by a slight abuse of notation) use $(u,v)$ as coordinates on it. From the results of Section \ref{sec:energy} and particularly \eqref{mathematica-energy0}, we can chose for all $C>0$ a constant $\widetilde C>0$ and $\epsilon>0$ such that \[\underline{\mathcal E}_t(u,v)<\tfrac{C}{l}\quad \Longrightarrow \quad (u,v)\in \wt{\mathcal C}_t.\] Let $t=\tfrac{l}{2k}<\epsilon.$ By the definition of $\underline{\mathcal E}_t$ and by the results of Section \ref{sec:energy} we have \[\mathcal E(\nabla,\Phi,h)=l\underline{\mathcal E}_{\tfrac{l}{2k}}(u,v)\] for the twisted lift $(\nabla,\Phi,h)$ corresponding to the strongly parabolic Higgs field given by $(u,v).$ Recall that unless $(\nabla,\phi)$ is a fixed point of the $\C^*$-action, the energy along rays $r\in\R^{\geq0}\mapsto \mathcal E(\nabla,r\phi)$ is strictly increasing, see for example the proof of \cite[Proposition 9.1]{Hi1}. For $k$ large enough, we therefore obtain that $\mathcal C_k$ is completely contained in $\wt{\mathcal C}_{\tfrac{l}{2k}}$ (after taking the twisted lift), and we are therefore in the domain covered by our implicit function theorem. Thus, the theorem then directly follows from Proposition \ref{limitEH} and the results from Section \ref{sec:res-metric}. \end{proof} \section{Higher order derivatives}\label{hod} \subsection{The algorithm} Just as in \cite[Section 5]{HHT2} we give an iterative algorithm for computing higher order derivatives of the parameters in terms of the multiple polylogarithm (MPL) function. The difference to the minimal surface case \cite{HHT2} is that we have no extrinsic closing conditions but complex parameters here. The computation of the higher order derivatives of the parameters involves the iterated integral $\Omega_{i_1,\cdots,i_n}$ defined recursively by \begin{equation}\label{eqn:def:omegaintegral} \Omega_{i_1,\cdots,i_n}(z)=\int_0^z\Omega_{i_1,\cdots,i_{n-1}}\omega_{i_n}, \end{equation} where $\omega_i$ is as in \eqref{omega} for $i=1,2,3.$ It is shown in \cite[A.3]{HHT2} that (and how) these iterated integrals can be expressed in terms of multiple polylogarithm $ \Li_{n_1,\ldots,n_d} $. For positive integers $n_1,\ldots,n_d \in \mathbb{Z}_{>0} $, and $z_i \in \mathbb{C}^d $ in the region given by $ |z_i \ldots z_d| < 1 $, the multiple polylogarithm \( \Li_{n_1,\ldots,n_d} \) is defined by \begin{equation*} \Li_{n_1,\ldots,n_d}(z_1,\ldots,z_d) =\sum_{0<k_1<k_2<\cdots<k_d}\frac{z_1^{k_1}\cdots z_d^{k_d}}{k_1^{n_1}\cdots k_d^{n_d}} \,. \end{equation*} This function is extended to a multivalued function by analytic continuation. Here the \emph{depth} $d $ counts the number of indices $n_1,\ldots,n_d $, and the \emph{weight} is given by the sum $ n_1 + \cdots + n_d $ of the indices. The functions $\Omega_{i_1,\cdots,i_n}(z)$ can be expressed in terms of multiple polylogarithms of depth $n$ and weight $n.$ In the following we denote by $x_i^{(n)}$, $\mathcal P^{(n)}$, $\mathcal Q^{(n)}$ the $n$-th order derivatives of $x_i$, $\mathcal P$, $\mathcal Q$ with respect to $t$ at $t=0$ and we suppress the dependence of $(u,v)$ and $p.$} \begin{proposition}\label{lastPro} For $n\geq 1$ and $1\leq i\leq 3$, $x_i^{(n)}$ are polynomials (with respect to $\lambda$) of degree at most $n+1$ and the coefficients of $\mathcal P^{(n+1)}$, $\mathcal Q^{(n+1)}$ are Laurent polynomials of degree at most $n+1$, which can be expressed explicitly in terms of multiple-polylogarithms of depth $n+1$ and weight $n+1$. \end{proposition} \begin{proof} The proof is by induction on $n$. Let $H_n$ be the statement of the proposition. We have already proved $H_1$ in Section \ref{firstderivative}. Fix $n\geq 1$ and assume that $H_k$ is true for all $1\leq k\leq n-1$ and let the index `lower' denote all terms of a quantity that depends only on derivatives of lower order. As in \cite[Proposition 37]{HHT2} we have: $$\mathcal P^{(n+1)}=\sum_{\ell=1}^{n+1}\frac{(n+1)!}{(n+1-\ell)!} \sum_{i_1,\cdots,i_{\ell}}x_{i_1,\cdots,i_{\ell}}^{(n+1-\ell)}\mathfrak m_{i_1,\cdots,i_{\ell}}\Omega_{i_1,\cdots,i_{\ell}}(1)$$ with $$x_{i_1,\cdots,i_{\ell}}=\prod_{j=1}^{\ell} x_{i_j} \and\mathfrak m_{i_1,\cdots,i_{\ell}}=\prod_{j=1}^{\ell}\mathfrak m_{i_j}.$$ We rewrite this as $$\mathcal P^{(n+1)}=(n+1)\sum_{i=1}^3 x_i^{(n)}\mathfrak m_i\Omega_i(1) +\low{\mathcal P}^{(n+1)}$$ with $$\low{\mathcal P}^{(n+1)}=\sum_{\ell=2}^{n+1}\frac{(n+1)!}{(n+1-\ell)!} \sum_{i_1,\cdots,i_{\ell}}x_{i_1,\cdots,i_{\ell}}^{(n+1-\ell)}\mathfrak m_{i_1,\cdots,i_{\ell}}\Omega_{i_1,\cdots,i_{\ell}}(1).$$ Similar formula holds for $\mathcal Q^{(n+1)}$ with $\Omega_{i_1,\cdots,i_{\ell}}(1)$ replaced by $\Omega_{i_1,\cdots,i_{\ell}}(i)$. Using Leibniz rule we have $$\mathfrak p^{(n+1)}=2\pi (n+1) x_3^{(n)}+\low{\mathfrak p}^{(n+1)}$$ with $$\low{\mathfrak p}^{(n+1)}=\sum_{k=1}^n {n+1\choose k}\left( \mathcal P_{11}^{(k)}\mathcal P_{21}^{(n+1-k)}-\mathcal P_{12}^{(k)}\mathcal P_{22}^{(n+1-k)}\right)+\lowind{\mathcal P}{21}^{(n+1)}-\lowind{\mathcal P}{12}^{(n+1)}$$ and from $\mathfrak p^{(n+1)}=(\mathfrak p^{(n+1)})^*$ we obtain by taking the positive part: $$(x_3^{(n)})^+=\frac{1}{2\pi(n+1)}\left((\low{\mathfrak p}^{(n+1)})^{-*}-(\low{\mathfrak p}^{(n+1)})^+\right).$$ In the same way $$\mathfrak q^{(n+1)}=2\pi (n+1) x_2^{(n)}+\low{\mathfrak q}^{(n+1)}$$ with $$\low{\mathfrak q}^{(n+1)}=i\sum_{k=1}^n{n+1\choose k}\left( \mathcal Q_{11}^{(k)}\mathcal Q_{21}^{(n+1-k)}+\mathcal Q_{12}^{(k)}\mathcal Q_{22}^{(n+1-k)}\right)+i\lowind{\mathcal Q}{21}^{(n+1)}+i\lowind{\mathcal Q}{12}^{(n+1)}$$ and we obtain $$(x_2^{(n)})^+=\frac{1}{2\pi(n+1)}\left((\low{\mathfrak q}^{(n+1)})^{-*}-(\low{\mathfrak q}^{(n+1)})^+\right).$$ By inspection and the induction hypothesis, $(x_2^{(n)})^+$ and $(x_3^{(n)})^+$ are polynomials of degree at most $n+1$. \begin{remark} We could also determine $\Im(x_{3,0}^{(n)})$ and $\Im(x_{2,0}^{(n)})$ from the zero part of $\mathfrak p^{(n)}=(\mathfrak p^{(n)})^*$ and $\mathfrak q^{(n)}=(\mathfrak q^{(n)})^*$, but it is simpler to determine the three complex parameters $x_{i,0}^{(n)}$ by solving a complex linear system, see below. \end{remark} Using Leibniz rule: $$0=\mathcal K^{(n)}=2\sum_{i=1}^3\cv{x}_ix_i^{(n)}+\low{\mathcal K}^{(n)}$$ with $$\low{\mathcal K}^{(n)}=\sum_{i=1}^3\sum_{k=1}^{n-1} {n\choose k}x_i^{(k)}x_i^{(n-k)}.$$ We multiply by $\lambda$ and obtain (recall that $P_j=\lambda \cv{x}_j$): \begin{equation} \label{eq:higher-order1} P_1x_1^{(n)}+P_2 x_{2,0}^{(n)}+P_3 x_{3,0}^{(n)}= -\frac{\lambda}{2}\low{\mathcal K}^{(n)}-P_2(x_2^{(n)})^+-P_3(x_3^{(n)})^+. \end{equation} The right hand side of \eqref{eq:higher-order1} is already known and is a polynomial in $\lambda$ of degree at most $n+3$. Hence $P_1 x_1^{(n)}$ is a polynomial of degree at most $n+3$. When $u\sim v$, then both roots of $P_1$ are in $\D_{a}$, so since $x_1^{(n)}$ cannot have poles in $\D_{a}$, it must be a polynomial of degree at most $n+1$. This remains true for all $(u,v)$ by analyticity. Let $Q,R$ be the quotient and remainder of the division of the right side of \eqref{eq:higher-order1} with respect to $P_1$. Then $$(x_1^{(n)})^+=Q^+$$ and looking at the coefficients of $\lambda^0$, $\lambda^1$ and $\lambda^2$ in $\eqref{eq:higher-order1}$, we obtain a system of three complex equations with unknowns $x_{1,0}^{(n)}$, $x_{2,0}^{(n)}$ and $x_{3,0}^{(n)}$, whose determinant is $$\det\left(\cv{x}_{i,j}\right)_{-1\leq i\leq 1\atop 1\leq j\leq 3}=\tfrac{1}{2}i\rho r^6\neq 0$$. \end{proof} As a direct corollary we can express the hyper-K\"ahler metric of the moduli space $\mathcal M (t)$ in terms of multiple polylogarithms. \begin{proof}[Proof of Theorem \ref{mainT}] By twistor theory, we can compute the hyper-K\"ahler metric explicitly in terms of the relative holomorphic symplectic form $\varpi=32\pi\frac{dx_2\wedge dx_3}{x_1}.$ Since $x_1,x_2,x_3$ depend real analytic on $t$ the theorem follows from Proposition \ref{lastPro}. \end{proof} The algorithm has been implemented and using Mathematica we obtain, for example, for $p = e^{i\pi/4}$ \begin{equation} \label{mathematica-energy2} \mathcal E''=\frac{32\,\pi}{\rho r^6}(|u|^2-|v|^2)^2\big(3 r^8 +2 r^4+4 |u|^2|v|^2\big)\log(2)^2. \end{equation} \subsection{Results for the nilpotent cone of the most symmetric case} All following computations are conducted and simplified using Mathematica. The fully documented Mathematica notebook can be found with the arxiv version of the paper or on the webpage \begin{verbatim} https://www.idpoisson.fr/traizet/ \end{verbatim} We restrict to the case $p=e^{i\pi/4}$ and assume $v=p u$, i.e., by \eqref{eq:detPsi} we are in the nilpotent cone. The computations of higher order derivatives then simplify to: $$\mathcal E''=0$$ \begin{equation} \label{mathematica-energy3} \mathcal E'''=-192\, \pi | u| ^4 \left(127 | u| ^4+20\right)\zeta (3) \end{equation} where $\zeta$ is the Riemann $\zeta$-function. If $\wt\varpi$ denotes the restriction of $\varpi$ to the nilpotent cone, we obtain $$\wt\varpi'=8\, i |u|^2\log(2)\, du\wedge d\overline{u}$$ $$\wt\varpi''=0$$ \begin{equation} \label{mathematica-omega3} \wt\varpi'''=96 i\,\zeta(3)(127|u|^6+10 |u|^2)\,du\wedge d\uc. \end{equation} \subsection{New $\Omega$-identities}\label{nOid} Write $$\varpi=\sum_{k=0}^{\infty}\varpi_k\lambda^k.$$ We can compute $\varpi''_k$ for $k=0,1,2...$ from the derivatives of the parameters. On the other hand, we know from section \ref{sec:goldman} and Lemma \ref{rescaledform} that $\varpi''_k=0$ for $k\geq 1$, and also $$\varpi''_{0,u,v}=\varpi''_{0,\uc,\vc}=0.$$ Note that $\varpi''_{0,\uc,\vc}=0$ is trivial because of \eqref{varphiconst} and because $d\cv{x}_{2,-1}$ and $d\cv{x}_{3,-1}$ are holomorphic 1-forms so vanish on $(\frac{\partial}{\partial \uc},\frac{\partial}{\partial\vc})$. On the other hand, $\varpi''_{0,u,v}=0$ is not trivial and gives identities involving $\Omega$-integrals. In this section, we use the notation $$\Omega_{i_1,\cdots,i_n}=\Omega_{i_1,\cdots,i_n}(1) \quad\text{and}\quad \Theta_{i_1,\cdots,i_n}=\Omega_{i_1,\cdots,i_n}(i).$$ From $$\varpi''_0=\frac{1}{\cv{x}_{1,-1}}\left( \frac{-x_{1,0}''}{\cv{x}_{1,-1}}d\cv{x}_{2,-1}\wedge d\cv{x}_{3,-1} +d x_{2,0}''\wedge d\cv{x}_{3,-1} +d\cv{x}_{2,-1}\wedge dx_{3,0}'' \right)$$ we obtain \begin{align} \label{mathematica-omega''uv} \varpi''_{0,u,v}&=\frac{ \rho}{\pi^2}\big[ 6(u\uc^2\vc-\uc v\vc^2)(I_1+\overline{I_1}) +8(\uc^3 v - u\vc^3)(I_2+\overline{I_2})\\ &+\left(\frac{u\uc^6}{\vc^3}-\frac{v\vc^6}{\uc^3}+\frac{3\uc^5 v}{\vc^2}-\frac{3u\vc^5}{\uc^2}\right)(I_3+\overline{I_3})\big] \nonumber\end{align} with $$\begin{cases} I_1=6\pi(\Omega_{333}-\Theta_{222})+i((\Omega_{21})^2+(\Theta_{31})^2) +2\pi(\Omega_{223}-\Theta_{332})-8\pi(\Omega_{311}-\Theta_{211}) +10 i \Omega_{21}\Theta_{31}\\ I_2=i((\Omega_{21})^2-(\Theta_{31})^2)+2\pi(\Omega_{223}+\Theta_{332}+\Omega_{311}+\Theta_{211})-4\pi(\Omega_{333}+\Theta_{222})\\ $$I_3=-i((\Omega_{21})^2+(\Theta_{31})^2)+2\pi(\Omega_{333}-\Theta_{222}-\Omega_{223}+\Theta_{332})-2i\Omega_{21}\Theta_{31}.\end{cases}$$ Since $\varpi''_{0,u,v}=0$, $I_1$, $I_2$ and $I_3$ are pure imaginary. Since they are holomorphic functions of $p\in\C^+$, they must be constant. We find the constants by evaluating at $p=e^{i\pi/4},$ where all integrals are known from \cite[Appendix]{HHT2}, and find $$\begin{cases} I_1=-i\frac{\pi^4}{3}\\ I_2=0\\ I_3=-i\pi^4.\end{cases}$$ Using the elementary values $$\Omega_{333}=\frac{1}{6}(\Omega_3)^3=\frac{-i\pi^3}{6} \quad\text{and}\quad \Theta_{222}=\frac{1}{6}(\Theta_2)^2=\frac{i\pi^3}{6}$$ we obtain the following identities for all $p\in\C^+$: \begin{equation} \label{eq:new-identities} \begin{cases} \Omega_{223}+\Omega_{311}=-\frac{i}{2\pi}(\Omega_{21})^2\\ \Theta_{211}+\Theta_{332}=\frac{i}{2\pi}(\Theta_{31})^2\\ \Omega_{223}-\Theta_{332}=-\frac{i}{2\pi}(\Omega_{21}+\Theta_{31})^2+\frac{i\pi^3}{6}. \end{cases}\end{equation} From higher order derivatives in $t$ and higher order terms $\varpi_k$ we expect a hierarchy of identities for $\Omega$-values. Another source of identities is the character variety of the 4-punctured sphere. Analogously to Remark \eqref{remark:r''}, taking the 4th-order derivative of \eqref{mathematica-fricke} gives rise to the following three identities, which express linear combinations of $\Omega$-integrals of depth 3 as a function of $\Omega$-integrals of depth at most 2, and are non-trivial in the sense that they do not follow from shuffle product relations alone: \begin{align} \label{mathematica-ID1} &\Omega_{2,1,2} -\Theta_{1,2,1} -\Theta_{2,1,2}= \\& -\frac{1}{2} \Omega_{2} \Omega_{1,2} +\frac{1}{2}\Omega_{2} \Omega_{2,1} -\frac{i \Omega_{1,2}^2}{4 \pi } +\frac{3 i\Omega_{2,1}^2}{4 \pi } -\frac{i \Omega_{1,2}\Omega_{2,1}}{2 \pi} +\frac{1}{2} \Theta_{1}\Theta_{1,2} +\frac{1}{2} i \pi \Theta_{1,2} -\frac{1}{2} \Theta_{1}\Theta_{2,1} \nonumber\\& -\frac{1}{2} i \pi \Theta_{2,1} +\frac{i \Omega_{1}^2 \Omega_{2}^2}{4 \pi} +\frac{1}{2} \Omega_{1} \Omega_{2}^2 -\pi ^2\Omega_{1} +\frac{1}{2} i \pi \Theta_{1}^2 -\frac{1}{4} \Theta_{1}\Theta_{2}^2 +\frac{1}{4} \pi ^2\Theta_{1} +\frac{2 i \pi ^3}{3} \nonumber\end{align} \begin{align} \label{mathematica-ID2} & \Omega_{1,3,1} +\Omega_{3,1,3} -\Theta_{3,1,3}= \\& -\frac{1}{2} \Omega_{1} \Omega_{1,3} +\frac{1}{2}\Omega_{1} \Omega_{3,1} +\frac{1}{2} i \pi \Omega_{1,3} -\frac{1}{2} i \pi \Omega_{3,1} -\frac{i \Theta_{1,3}^2}{4 \pi } +\frac{3 i\Theta_{3,1}^2}{4 \pi } +\frac{1}{2} \Theta_{3}\Theta_{1,3} -\frac{1}{2} \Theta_{3}\Theta_{3,1} -\frac{i \Theta_{1,3}\Theta_{3,1}}{2 \pi} \nonumber \\& +\Omega_{1}^2\Theta_{1} -\Omega_{1}\Theta_{1}^2 -\frac{1}{3}\Omega_{1}^3 +\frac{1}{2} i \pi \Omega_{1}^2 +\frac{1}{4} \Omega_{3}^2\Omega_{1} -\frac{1}{4} \pi ^2\Omega_{1} +\frac{1}{3} \Theta_{1}^3 +\frac{i\Theta_{1}^2 \Theta_{3}^2}{4 \pi } -\frac{1}{2}\Theta_{1} \Theta_{3}^2 +\pi ^2 \Theta_{1} +\frac{i \pi ^3}{3} \nonumber \end{align} \begin{align} \label{mathematica-ID3} &\Omega_{2 3 2}-\Theta_{3 2 3}=\\ &\frac{1}{2} \Omega_{2}\Omega_{2 3} -\frac{1}{2} \Omega_{2}\Omega_{3 2} +\frac{1}{2} \Theta_{3}\Omega_{3 2} -\frac{1}{2} \Theta_{3}\Omega_{2 1} +\Theta_{3}\Omega_{2 3} +\frac{1}{2} \Theta_{3}\Theta_{2 3} -\frac{1}{2}\Theta_{3} \Theta_{3 1} \nonumber\\& -\frac{i \Omega_{2 1}\Theta_{2 3}}{2 \pi } -\frac{i \Omega_{3 2}\Theta_{2 3}}{2 \pi } +\frac{3 i \Omega_{2 1}\Theta_{3 1}}{2 \pi } -\frac{i \Omega_{3 2}\Theta_{3 1}}{2 \pi } -\frac{i \Omega_{2 1}\Omega_{3 2}}{2 \pi } -\frac{i\Theta_{2 3} \Theta_{3 1}}{2 \pi } \nonumber\\& +\frac{3 i \Omega_{2 1}^2}{4 \pi} -\frac{i \Omega_{3 2}^2}{4 \pi } -\frac{i\Theta_{2 3}^2}{4 \pi } +\frac{3 i\Theta_{3 1}^2}{4 \pi } +\frac{i \pi \Omega_{2}^2}{4} +\frac{i\pi \Theta_{3}^2}{4} -i \pi \Omega_{2 1} -i \pi \Omega_{3 2} \nonumber\\& -i \pi \Theta_{2 3} -i \pi \Theta_{3 1} -\pi ^2 \Omega_{2} +\pi ^2 \Theta_{3} -\frac{i\pi ^3}{3}\nonumber. \end{align} \begin{thebibliography}{100} \bibitem{AB}{\sc M. F. Atiyah, R. Bott}, {\em The Yang-Mills Equations over Riemann Surfaces}, Phil. Trans. R. Soc. Lond. A (1983) 308, 523--615 \bibitem{AlMa}{\sc A. Alekseev, A. Malkin}, {\em Symplectic Structure of the Moduli Space of Flat Connections on a Riemann surface}, { Comm. Math. Phys.}, Volume 169, 99--119 (1995). \bibitem{AlGo} {\sc D. Alfaya, T. L. Gomez}, {\em Torelli theorem for the parabolic Deligne-Hitchin moduli space}, J. Geom. Phys. 123, 448--462. \bibitem{Audin}{\sc Audin}, {\em Lectures on gauge theory and integrable systems}, In: Hurtubise, J., Lalonde, F., Sabidussi, G. (eds) Gauge Theory and Symplectic Geometry. NATO ASI Series, vol 488. Springer, Dordrecht. \href{https://doi.org/10.1007/978-94-017-1667-3_1}{DOI:10.1007/978-94-017-1667-3-1} \bibitem{Boa}{\sc Ph. Boalch}, {\em Wild character varieties, meromorphic Hitchin systems and Dynkin diagrams,} Geometry and Physics: A Festschrift in honour of Nigel Hitchin, Oxford University Press (2018) 433--454. \bibitem{BeHRo} {\sc F. Beck, S. Heller, M. R\"oser}, {\em Energy of sections of Deligne-Hitchin moduli spaces}, Math. Annalen 380 (2021), no. 3-4, 1169--1214. \bibitem{BHR} {\sc I. Biswas, S. Heller, M. R\"oser}, {\em Real sections of the Deligne-Hitchin moduli space}, Comm. Math. Phys., Volume 366, Issue 3 (2019), pages 1099--1133. \bibitem{Fuchs} {\sc I. Biswas, S. Dumitrescu, S. Heller, L. Heller}, {\em Holomorphic sl(2,C)-systems with Fuchsian monodromy (with an appendix by Takuro Mochizuki) }, \href{https://arxiv.org/abs/2104.04818}{arXiv:2104.04818}. \bibitem{CC1} {\sc G. Chen, X. Chen}, {\em Gravitational instantons with faster than quadratic curvature decay (I)}, Acta Mathematica, Volume 227 (2021), Pages 263--307 \bibitem{CC2} {\sc G. Chen, X. Chen}, {\em Gravitational instantons with faster than quadratic curvature decay (II)}, Journal für die reine und angewandte Mathematik (Crelles Journal), Volume 2019, Issue 756, Pages 259--284. \bibitem{CC3} {\sc G. Chen, X. Chen}, {\em Gravitational instantons with faster than quadratic curvature decay (III)}, Mathematische Annalen, Volume 380, Pages 687--717 (2021). \bibitem{CVZ} Gao Chen, Jeff Viaclovsky, and Ruobing Zhang, Torelli-type theorems for gravitational instantons with quadratic volume growth, Duke Mathematical Journal, 173 (2) 227-275, (2024). \bibitem{Do}{\sc S. Donaldson}, {\em Twisted harmonic maps and the self-duality equations}, Proc. London Math. Soc. (3) 55 (1987), no. 1, 127--131. \bibitem{DPW} {\sc J. Dorfmeister, F. Pedit, Wu} \emph{Weierstrass type representation of harmonic maps into symmetric spaces}, Comm. Anal. Geom. \textbf{6} (1998), no.~4, 633--668. \bibitem{ES} {\sc J. Eells and J. H. Sampson}, {\em Harmonic mappings of Riemannian manifolds}, Amer. J. Math. 86 (1964) 109--160. \bibitem{Gold} William M.~Goldman, {\em The symplectic nature of fundamental groups of surfaces.} Advances in Mathematics, 52(2):200-225, 1984 \bibitem{gold2} William M.~Goldman, {\em An exposition of results of Fricke}. Available at: \href{https://arxiv.org/abs/math/0402103}{https://arxiv.org/abs/math/0402103}. \bibitem{FMSW} {\sc L.~ Fredrickson, R.~Mazzeo, J.~Swoboda, H.~Weiss}, {\em Asymptotic Geometry of the Moduli Space of Parabolic SL(2,$\C$)-Higgs Bundles,} J. Lond. Math. Soc. (2022), 1-72 \bibitem{Fred} {\sc L. Fredrickson}, {\em Exponential decay for the asymptotic geometry of the Hitchin metric,} Comm. Math. Phys. 375 no. 2, (2020), pp 1393--1426. \bibitem{GMN} {\sc D.~ Gaiotto, G.~W.~Moore, A.~Neitzke}, {\em Wall-crossing, Hitchin systems, and the WKB approximation.} Adv. Math., {\bf 234} (2013), pp 239-403. \bibitem{HeHe} {\sc L. Heller, S. Heller}, {\em Abelianization of Fuchsian systems on the 4-punctured sphere and applications} Journal of Symplectic Geometry, Vol. 14, No. 4 (2016), pp. 1059--1088. \bibitem{HH} {\sc L. Heller, S. Heller}, \emph{Higher solutions of Hitchin's self-duality equations}, J. Integrable Systems 5 (2020). \bibitem{HH3} {\sc L. Heller, S. Heller}, {\em Fuchsian DPW potentials for Lawson surfaces,} Geom. Dedicata, 217, (2023) \bibitem{HeHeSch} {\sc L. Heller, S. Heller, N.Schmitt}, {\em Navigating the space of symmetric CMC surfaces}, J. Differential Geom., Volume 110, Number 3 (2018), 413--455. \bibitem{HHT1} {\sc L. Heller, S. Heller, M. Traizet}, {\em Area estimates for High genus Lawson surfaces via DPW}, J. Differential Geom., Volume 124 (2023), 1--35. \bibitem{HHT2} {\sc L. Heller, S. Heller, M. Traizet}, {\em Complete families of embedded high genus CMC surfaces in the 3-sphere (with an appendix by Steven Charlton) }, \href{https://arxiv.org/abs/2108.10214v3}{arXiv:2108.10214v3}. \bibitem{HHT3} {\sc S. Charlton, L. Heller, S. Heller, M. Traizet}, {\em Minimal surfaces and alternating multiple zetas) }, \href{https://arxiv.org/abs/2407.07130}{arXiv:2407.07130}. \bibitem{HSch}{\sc S. Heller, L. Schaposnik}, {\em Branes through finite group actions}, Journal of Geometry and Physics 129 (2018) 279--293. \bibitem{Hi1} {\sc N. J. Hitchin}, {\em The self-duality equations on a Riemann surface}. Proc. London Math. Soc. (3) 55 (1987), no. 1, 59-126. \bibitem{HKLR} {\sc N. J. Hitchin, A. Karlhede, U. Lindstr\"om, and M. Rocek}, {\em Hyperk\"ahler Metrics and Supersymmetry}, Commun. Math. Phys. {\bf 108} (1987), 535--589. \bibitem{KiWi} {\sc S. Kim, G. Wilkin}, {\em Analytic convergence of harmonic metrics for parabolic Higgs bundles}, Journal of Geometry and Physics, vol. 127, pp. 55--67. \bibitem{Konno} {\sc H. Konno}, {\em Construction of the moduli space of stable parabolic Higgs bundles on a Riemann surface}, J. Math. Soc. Japan, vol. 45, No. 1993. \bibitem{Li} Q. Li, {\em An Introduction to Higgs Bundles via Harmonic Maps}, SIGMA, 15 (2019), 035, 30 pages. \bibitem{Lye} {\sc J. Lye}, {\em A detailed look at the Calabi-Eguchi-Hanson spaces}, preprint: \href{https://arxiv.org/abs/math/2201.0729}{arXiv:2201.0729}. \bibitem{MS} V. B. Mehta and C. S. Seshadri, Moduli of vector bundles on curves with parabolic structures, {\em Math. Ann.} {\bf 248} (1980), 205--239. \bibitem{Men} {\sc C. Meneses}, {\em Geometric Models and Variation of Weights on Moduli of Parabolic Higgs Bundles over the Riemann Sphere: a Case Study}, SIGMA 18 (2022), 062, 41 pages. \bibitem{NS} M. S. Narasimhan and C. S. Seshadri, {\em Stable and unitary bundles on a compact Riemann surface}, Ann. of Math. 82 (1965), 540--564. \bibitem{NaSt} B. Nasatyr, B. Steer, {\em Orbifold Riemann surfaces and the Yang-Mills-Higgs equations} Annali della Scuola Normale Superiore di Pisa, Classe di Scienze 4e série, tome 22, no 4 (1995), p. 595--643 \bibitem{Pir} G. Pirola, {\em Monodromy of constant mean curvature surface in hyperbolic space}, Asian Jour. Math. 11 (2007), 651--669. \bibitem{PS} {\sc A. Pressley, G. Segal}, {\em Loop groups. Oxford Mathematical Monographs}. Oxford University Press, New York, 1986. \bibitem{Si0} {\sc C. Simpson} \newblock {\em Constructing variations of {H}odge structure using {Y}ang--{M}ills theory and applications to uniformization}, Journal of the AMS \textbf{1} (1988), 867--918. \bibitem{Si1} {\sc C. Simpson}, {\em Harmonic bundles on non-compact curves}, Journal of the AMS, Vol. 3, No. 3 (Jul., 1990), pp. 713--770. \bibitem{Si} {\sc C. Simpson}, {\em The Hodge filtration on nonabelian cohomology}. Algebraic geometry--Santa Cruz 1995, 217--281, Proc. Sympos. Pure Math., 62, Part 2, Amer. Math. Soc., Providence, RI, 1997 \bibitem{Si2} {\sc C. Simpson}, {\em A weight two phenomenon for the moduli of rank one local systems on open varieties}. From Hodge theory to integrability and TQFT tt*-geometry, 175--214, Proc. Sympos. Pure Math., 78, Amer. Math. Soc., Providence, RI, 2008. \bibitem{Si21} {\sc C. Simpson}, {\em The twistor geometry of parabolic structures in rank two}, Proc. Indian Acad. Sci., Math. Sci. 132, No. 2, Paper No. 54, 26 p. (2022). \end{thebibliography} \end{document}
2205.12103v1
http://arxiv.org/abs/2205.12103v1
A free boundary inviscid model of flow-structure interaction
\documentclass[10pt,reqno,oneside]{amsproc} \title[A free boundary inviscid model of flow-structure interaction]{A free boundary inviscid model of flow-structure interaction} \author[I.~Kukavica]{Igor Kukavica} \address{Department of Mathematics, University of Southern California, Los Angeles, CA 90089} \email{[email protected]} \author[A.~Tuffaha]{Amjad Tuffaha} \address{Department of Mathematics and Statistics, American University of Sharjah, Sharjah, UAE} \email{atufaha\char'100aus.edu} \chardef\forshowkeys=0 \chardef\refcheck=0 \chardef\showllabel=0 \chardef\sketches=0 \usepackage{enumitem} \usepackage{datetime} \usepackage{fancyhdr} \usepackage[margin=1in]{geometry} \usepackage{amsmath, amsthm, amssymb} \usepackage{times} \usepackage{graphicx} \usepackage[usenames,dvipsnames,svgnames,table]{xcolor} \usepackage{marginnote} \usepackage[unicode,breaklinks=true,colorlinks=true,linkcolor=blue,urlcolor=blue,citecolor=blue]{hyperref} \usepackage[most]{tcolorbox} \begin{document} \def\XX{X} \def\YY{Y} \def\ZZZ{Z} \def\intint{\int\!\!\!\!\int} \def\OO{\mathcal O} \def\SS{\mathbb S} \def\CC{\mathbb C} \def\RR{\mathbb R} \def\TT{\mathbb T} \def\ZZ{\mathbb Z} \def\HH{\mathbb H} \def\RSZ{\mathcal R} \def\LL{\mathcal L} \def\SL{\LL^1} \def\ZL{\LL^\infty} \def\GG{\mathcal G} \def\tt{\langle t\rangle} \def\erf{\mathrm{Erf}} \def\mgt#1{\textcolor{magenta}{#1}} \def\ff{\rho} \def\gg{G} \def\sqrtnu{\sqrt{\nu}} \def\ww{w} \def\ft#1{#1_\xi} \def\les{\lesssim} \def\ges{\gtrsim} \renewcommand*{\Re}{\ensuremath{\mathrm{{\mathbb R}e\,}}} \renewcommand*{\Im}{\ensuremath{\mathrm{{\mathbb I}m\,}}} \ifnum\showllabel=1 \def\llabel#1{\marginnote{\color{lightgray}\rm\small(#1)}[-0.0cm]\notag} \newcommand{\norm}[1]{\left\|#1\right\|} \newcommand{\nnorm}[1]{\lVert #1\rVert} \newcommand{\abs}[1]{\left|#1\right|} \newcommand{\NORM}[1]{|\!|\!| #1|\!|\!|} \newtheorem{theorem}{Theorem}[section] \newtheorem{Theorem}{Theorem}[section] \newtheorem{corollary}[theorem]{Corollary} \newtheorem{Corollary}[theorem]{Corollary} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{Proposition}[theorem]{Proposition} \newtheorem{Lemma}[theorem]{Lemma} \newtheorem{lemma}[theorem]{Lemma} \theoremstyle{definition} \newtheorem{definition}{Definition}[section] \newtheorem{Remark}[theorem]{Remark} \def\theequation{\thesection.\arabic{equation}} \numberwithin{equation}{section} \definecolor{mygray}{rgb}{.6,.6,.6} \definecolor{myblue}{rgb}{9, 0, 1} \definecolor{colorforkeys}{rgb}{1.0,0.0,0.0} \newlength\mytemplen \newsavebox\mytempbox \def\weaks{\text{\,\,\,\,\,\,weakly-* in }} \def\weak{\text{\,\,\,\,\,\,weakly in }} \def\inn{\text{\,\,\,\,\,\,in }} \def\cof{\mathop{\rm cof\,}\nolimits} \def\Dn{\frac{\partial}{\partial N}} \def\Dnn#1{\frac{\partial #1}{\partial N}} \def\tdb{\tilde{b}} \def\tda{b} \def\qqq{u} \def\cite#1{[#1]} \def\lat{\Delta_2} \def\biglinem{\vskip0.5truecm\par==========================\par\vskip0.5truecm} \def\inon#1{\hbox{\ \ \ \ \ \ \ }\hbox{#1}} \def\onon#1{\inon{on~$#1$}} \def\inin#1{\inon{in~$#1$}} \def\FF{F} \def\andand{\text{\indeq and\indeq}} \def\ww{w(y)} \def\ll{{\color{red}\ell}} \def\ee{\epsilon_0} \def\startnewsection#1#2{ \section{#1}\label{#2}\setcounter{equation}{0}} \def\nnewpage{ } \def\sgn{\mathop{\rm sgn\,}\nolimits} \def\Tr{\mathop{\rm Tr}\nolimits} \def\div{\mathop{\rm div}\nolimits} \def\curl{\mathop{\rm curl}\nolimits} \def\dist{\mathop{\rm dist}\nolimits} \def\supp{\mathop{\rm supp}\nolimits} \def\indeq{\quad{}} \def\period{.} \def\semicolon{\,;} \def\nts#1{{\cor #1\cob}} \def\colr{\color{red}} \def\colrr{\color{black}} \def\colb{\color{black}} \def\coly{\color{lightgray}} \definecolor{colorgggg}{rgb}{0.1,0.5,0.3} \definecolor{colorllll}{rgb}{0.0,0.7,0.0} \definecolor{colorhhhh}{rgb}{0.3,0.75,0.4} \definecolor{colorpppp}{rgb}{0.7,0.0,0.2} \definecolor{coloroooo}{rgb}{0.45,0.0,0.0} \definecolor{colorqqqq}{rgb}{0.1,0.7,0} \def\colg{\color{colorgggg}} \def\collg{\color{colorllll}} \def\cole{\color{coloroooo}} \def\coleo{\color{colorpppp}} \def\cole{\color{black}} \def\colu{\color{blue}} \def\colc{\color{colorhhhh}} \def\colW{\colb} \definecolor{coloraaaa}{rgb}{0.6,0.6,0.6} \def\colw{\color{coloraaaa}} \def\comma{ {\rm ,\qquad{}} } \def\commaone{ {\rm ,\quad{}} } \def\les{\lesssim} \def\thelt#1{}\def\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls{\lesssim} \def\nts#1{{\color{blue}\hbox{\bf ~#1~}}} \def\ntsf#1{\footnote{\color{colorgggg}\hbox{#1}}} \def\blackdot{{\color{red}{\hskip-.0truecm\rule[-1mm]{4mm}{4mm}\hskip.2truecm}}\hskip-.3truecm} \def\bluedot{{\color{blue}{\hskip-.0truecm\rule[-1mm]{4mm}{4mm}\hskip.2truecm}}\hskip-.3truecm} \def\purpledot{{\color{colorpppp}{\hskip-.0truecm\rule[-1mm]{4mm}{4mm}\hskip.2truecm}}\hskip-.3truecm} \def\greendot{{\color{colorgggg}{\hskip-.0truecm\rule[-1mm]{4mm}{4mm}\hskip.2truecm}}\hskip-.3truecm} \def\cyandot{{\color{cyan}{\hskip-.0truecm\rule[-1mm]{4mm}{4mm}\hskip.2truecm}}\hskip-.3truecm} \def\reddot{{\color{red}{\hskip-.0truecm\rule[-1mm]{4mm}{4mm}\hskip.2truecm}}\hskip-.3truecm} \def\tdot{{\color{green}{\hskip-.0truecm\rule[-.5mm]{3mm}{3mm}\hskip.2truecm}}\hskip-.1truecm} \def\gdot{\greendot} \def\bdot{\bluedot} \def\ydot{\cyandot} \def\rdot{\cyandot} \def\fractext#1#2{{#1}/{#2}} \def\ii{\hat\imath} \def\fei#1{\textcolor{blue}{#1}} \def\vlad#1{\textcolor{cyan}{#1}} \def\igor#1{\text{{\textcolor{colorqqqq}{#1}}}} \def\igorf#1{\footnote{\text{{\textcolor{colorqqqq}{#1}}}}} \def\AA{Y} \newcommand{\p}{\partial} \newcommand{\UE}{U^{\rm E}} \newcommand{\PE}{P^{\rm E}} \newcommand{\KP}{K_{\rm P}} \newcommand{\uNS}{u^{\rm NS}} \newcommand{\vNS}{v^{\rm NS}} \newcommand{\pNS}{p^{\rm NS}} \newcommand{\omegaNS}{\omega^{\rm NS}} \newcommand{\uE}{u^{\rm E}} \newcommand{\vE}{v^{\rm E}} \newcommand{\pE}{p^{\rm E}} \newcommand{\omegaE}{\omega^{\rm E}} \newcommand{\ua}{u_{\rm a}} \newcommand{\va}{v_{\rm a}} \newcommand{\omegaa}{\omega_{\rm a}} \newcommand{\ue}{u_{\rm e}} \newcommand{\ve}{v_{\rm e}} \newcommand{\omegae}{\omega_{\rm e}} \newcommand{\omegaeic}{\omega_{{\rm e}0}} \newcommand{\ueic}{u_{{\rm e}0}} \newcommand{\veic}{v_{{\rm e}0}} \newcommand{\up}{u^{\rm P}} \newcommand{\vp}{v^{\rm P}} \newcommand{\tup}{{\tilde u}^{\rm P}} \newcommand{\bvp}{{\bar v}^{\rm P}} \newcommand{\omegap}{\omega^{\rm P}} \newcommand{\tomegap}{\tilde \omega^{\rm P}} \renewcommand{\up}{u^{\rm P}} \renewcommand{\vp}{v^{\rm P}} \renewcommand{\omegap}{\Omega^{\rm P}} \renewcommand{\tomegap}{\omega^{\rm P}} \begin{abstract} We obtain the local existence and uniqueness for a system describing interaction of an incompressible inviscid fluid, modeled by the Euler equations, and an elastic plate, represented by the fourth-order hyperbolic PDE. We provide a~priori estimates for the existence with the optimal regularity $H^{r}$, for $r>2.5$, on the fluid initial data and construct a unique solution of the system for initial data $u_0\in H^{r}$ for $r\geq3$. An important feature of the existence theorem is that the Taylor-Rayleigh instability does not occur. \end{abstract} \keywords{Euler equations, free-boundary problems, Euler-plate system} \maketitle \setcounter{tocdepth}{2} \tableofcontents \colb \startnewsection{Introduction}{sec01} In this paper, we prove the existence and uniqueness of local-in-time solutions to a system describing the interaction between an inviscid incompressible fluid and an elastic plate. The model couples the 3D incompressible Euler equations with a hyperbolic fourth-order equation that describes the motion of the free-moving interface. We consider a domain that is a channel with a rigid bottom boundary and a top moving boundary which is only allowed to move in the vertical direction according to a displacement function $w$. The function $w$ satisfies a fourth-order hyperbolic equation, with a forcing imposed by the fluid normal stress. The boundary conditions on the Euler equations match the normal component of the fluid velocity with the normal velocity of the plate, while periodic boundary conditions are imposed in the horizontal directions. We also prove that if solutions with the proposed regularity exist, they are unique. As far as we know, this is the first treatment of the moving boundary fluid-elastic structure system where the fluid is inviscid. \par The viscous model, involving the Navier-Stokes equations, has been treated in the literature by several authors. The earliest known work on the free-moving domain model is by Beir\~ao da~Veiga~\cite{B}, who considered the coupled 2D Navier-Stokes-plate model and established the existence of a strong solution. In \cite{DEGL, CDEG}, Desjardins et~al considered the existence of weak solutions to the 3D Navier-Stokes system coupled with a strongly damped plate. Weak solutions to the 2D model without damping were obtained in~\cite{G}. In all the treatments mentioned above, the plate equations were considered under clamped boundary conditions at the ends of the interface. \par More recent works have considered an infinite plate model with periodic boundary conditions. In \cite{GH}, Grandmont and Hillairet obtained global solutions to the 2D model, with lower order damping on the plate. Local-in-time strong solutions for the 2D model were also constructed in \cite{GHL} under different scenarios involving either a plate with rotational inertia (no damping) or a rod instead of a beam (wave equation). Models, where the plate equation on the lower dimensional interface is replaced by the damped wave equation, have also been treated earlier by Lequeurre in both 2D and 3D~\cite{L1,L2}. In another recent work, Badra and Takahashi \cite{BT} proved the well-posedness and Gevrey regularity of the viscous 2D model without imposing any damping, rotational inertia, or any other approximation on the plate equations. \par Models of viscous Koiter shell interactions which involve coupling the Navier-Stokes equations with fourth-order hyperbolic equations on cylindrical domains were also studied in numerous works~\cite{CS2, CCS, GGCC, GGCCL, CGH, GM, L, LR, MC1, MC2, MC3}. The considered shell equations are nonlinear and model blood flow inside the arteries. The same 3D model on a cylindrical domain was also studied by Maity, Roy, and Raymond \cite{MRR}, who obtained the local-in-time solutions under less regularity on the initial data. For other related works on plate models, see~\cite{Bo, BKS, BS1, BS2, C, CK, DEGLT, MS} and for other results on fluids interacting with elastic objects, see~\cite{AL,Bo,BST,CS1,IKLT,KOT,KT,RV,TT} \par For mathematical treatments of the non-moving boundary viscous models of flow structure interaction, one can find plenty of works on well-posedness and stabilization; cf.~for example~\cite{AB, AGW, Ch, CR}. On the other hand, inviscid models have been treated mainly through linearized potential flow-structure interaction models on non-moving boundary~\cite{CLW, LW, W}. These models are mathematically valuable and physically meaningful if one considers a high order of magnitude in the structure velocity relative to the displacement in which non-moving domains provide a fairly good approximation. \par Up to our knowledge, there have been no works in the literature on the well-posedness of the inviscid free boundary model where the Euler equations are considered in place of the Navier-Stokes equations. To address the existence of solutions, we use an ALE (Arbitrary Lagrangian Eulerian) formulation, which fixes the domain and provides the necessary additional regularity for the variables. In particular, we use a change of variable via the harmonic extension of the boundary transversal displacement. The a~priori estimates are then obtained using a div-curl type bound on the fluid velocity. Tangential bounds provide control of the structure displacement and velocity, while the pressure term is determined by solving an elliptic problem with Robin boundary conditions on the plate. \par The construction of solutions turns out to be a challenging problem. Naturally, we need to first solve the variable coefficients Euler equations with a non-homogeneous type boundary conditions (normal component) but the low regularity of the pressure on the boundary does not allow for the usual fixed point scheme to be carried through. In our construction scheme, we solve the variable coefficients Euler equations with nonhomogeneous type boundary conditions (normal component given) in five stages. In the first step, we solve a linear transport equation under more regular boundary data, where we rely on two new tools: an extension operator allowing to solve the problem on the whole space with no boundary conditions and a specially designed boundary value problem for the pressure that exploits the regularizing effect coming through the boundary data at the interface, and is based on certain cancellations that appear when formulating the Neumann Robin type boundary conditions for the pressure (cf.~Remark~\ref{R01}). The approximate problem is then solved in the whole space employing a new technique involving Sobolev extensions without imposing any boundary conditions, and without imposing the variable divergence-free condition. In the next stage, the nonlinear problem, still with more regular boundary data, is solved by a fixed point technique using the extension operator and the solution of the linear problem. In the third sage, we prove that the unique fixed point solutions to the Euler equations with given variable coefficients satisfy the boundary conditions and the divergence conditions. In the fourth stage, we employ the vorticity formulation (pressure free) whereby we solve a div-curl type systems and derive estimates for the full regularity of velocity in terms of less regular boundary data. In the final step, we derive solutions to the variable Euler equations under less regular data using a standard density argument and the uniform estimates in the previous step, thus concluding the proof of existence for the variable Euler equations. \par For the construction of solutions to the coupled Euler-plate system, the low regularity of the pressure does not allow for a fixed point scheme to be used. Instead, we use the fixed point scheme to obtain solutions to a regularized system that includes a damping term in the plate. Once solutions to the regularized system are obtained, the coupled a~priori estimates which involve the cancellation of the pressure boundary terms give rise to estimates uniform in the damping parameter $\nu$ and thus allow us to pass through the limit in the damping parameter to obtain solutions to the original system without damping. \par The paper is structured as follows. In Section~\ref{sec02}, we introduce the model and restate it in the ALE variables. The first main result, contained in Theorem~\ref{T01}, provides a~priori estimates for the existence of a local in time solution for the initial velocity in $H^{2.5+\delta}$ (the minimal regularity for the classical Euler equations) and the initial plate velocity in $H^{2+\delta}$, where $\delta>0$ is arbitrary and not necessarily small. Next, Theorem~\ref{T03} provides the existence of a local solution, i.e., gives a construction of a solution, when $\delta\geq0.5$. In Section~\ref{secap}, we prove the statement on the a~priori estimates. The first part of the proof, stated in Lemma~\ref{L01}, contains bounds on the cofactor matrix and the Jacobian. The estimates controlling the tangential components on the boundary are obtained in Lemma~\ref{L02}. There, energy estimates performed on the plate equation are derived by exploiting the coupling with the Euler equation to eliminate the pressure term. A characteristic feature of these estimates is that the fluid velocity in the interior appears as a lower order term in these estimates. \par The estimates controlling the pressure term are derived in Lemma~\ref{L03} via solving an elliptic problem for the pressure with Robin type boundary conditions on the moving interface. Control of the interior fluid velocity is accomplished using the ALE vorticity formulation and div-curl type estimates (Lemma~\ref{L04}) with estimates performed on the whole space using Sobolev extensions. The proof of Theorem~\ref{T01} is then provided in Section~\ref{sec06}. Next, a short Section~\ref{sec07} provides a discussion on the compatibility conditions imposed on the data at the boundary. Section~\ref{sec08} contains the proof of uniqueness of solutions, in the regularity class $H^{2.5+ \delta}$ for the fluid velocity $v$ and $H^{4+\delta} \times H^{2+\delta}$ for plate displacement $w$ and velocity $w_{t}$, with the additional constraint $\delta \geq 0.5$. This additional constraint on the regularity exponent turns out to be necessary in the uniqueness argument when performing the pressure estimate (cf.~the comment below \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3155}) and when bounding the commutator terms on the difference of the two solutions. \par Finally, in Section~\ref{secle}, we provide the construction of solutions. We start with the construction of solutions for the variable coefficient Euler equations, where the difficulties are the inflow condition \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3194} on the top and the low regularity of the pressure boundary condition \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3212} due to the first term, $w_{tt}$. In the second step, Sections~\ref{sec20}--\ref{sec11}, we construct a local solution for a regularized Euler-plate system. Finally, in the last step of the proof, we pass to the limit in the plate damping parameter $\nu\to0$, concluding the construction. \par \startnewsection{The model and the main results}{sec02} We consider a flow-structure interaction system, defined on an open bounded domain $\Omega(t)\subseteq {\mathbb R}^{3}$, which evolves in time $t$ over $[0,T]$, where $T>0$. The dynamics of the flow are modeled by the incompressible Euler equations \begin{align}\thelt{8Th sw ELzX U3X7 Ebd1Kd Z7 v 1rN 3Gi irR XG KWK0 99ov BM0FDJ Cv k opY NQ2 aN9 4Z 7k0U nUKa mE3OjU 8D F YFF okb SI2 J9 V9gV lM8A LWThDP nP u 3EL 7HP D2V Da ZTgg zcCC mbvc70 qq P cC9 mt6 0og cr TiA3 HEjw TK8ymK eu J Mc4 q6d Vz2 00 XnYU tLR9 GYjPXv FO V r6W 1zU K1W bP ToaW JJuK nxBLnd 0f t DEb Mmj 4lo HY yhZy MjM9 1zQS4p 7z 8 eKa 9h0 Jrb ac ekci rexG 0z4n3x z0 Q OWS vFj 3jL hW XUIU 21iI AwJtI3 Rb W a90 I7r zAI qI 3UEl UJG7 tLtUXz w4 K QNE TvX zqW au} \begin{split} & u_t + (u\cdot \nabla) u + \nabla p = 0 \\& \nabla \cdot u=0 \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n301} \end{align} in $\Omega(t) \times[0,T]$. For simplicity of presentation, we assume that $\Omega(0)=\Omega={\mathbb T}^2\times [0,1]$, i.e., the initial domain is ${\mathbb R}^2\times[0,1]$, with the 1-periodic boundary conditions on the sides. Denote \begin{equation} \Gamma_1={\mathbb T}^2 \times \{1\} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n302} \end{equation} and \begin{equation} \Gamma_0={\mathbb T}^2 \times \{0\} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n303} \end{equation} the initial position of the upper and the lower portions of the boundary. We impose the slip boundary condition on the bottom \begin{equation} u\cdot N = 0 \onon{\Gamma_0} . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n304} \end{equation} A function $w\colon \Gamma_1\times[0,T)\to {\mathbb R}$ satisfies the fourth-order damped plate equation \begin{equation} w_{tt} + \Delta_2^2 w - \nu \Delta_{2} w_{t} = p \onon{\Gamma_1\times[0,T]} , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n305} \end{equation} where $\nu\geq 0$ is fixed; the pressure $p$ is evaluated at $(x_1,x_2,w(x_1,x_2,t))$, with the initial condition \begin{equation} (w,w_t)|_{t=0}=(0,w_1) . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n306} \end{equation} The general initial data, i.e., $w(0)$ nonzero, can be considered using the same approach. We emphasize that the case $\nu=0$ is included and is our primary model. However, in order to construct a solution when $\nu=0$, we first obtain solutions with $\nu>0$ satisfying a uniform in $\nu$ bound in an appropriate solution space, and pass to the limit as $\nu\to0$. The reason why the parameter $\nu>0$ is needed is the low regularity of the pressure term forcing the plate equation, while a~priori estimates rely on cancellation of the lower regularity term involving the pressure. Since we are mainly interested in the limiting case $\nu=0$, we always assume $\nu\in[0,1]$. The variable $w$ represents the height of the interface at $t\in[0,T]$. We assume that the plate evolves with the fluid velocity, and $w$ thus satisfies the kinematic condition \def\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH{\partial} \begin{equation} w_t + u_1 \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{1} w + u_2 \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{2} w = u_3 . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n307} \end{equation} Note that \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n307} may be rewritten as \begin{equation} |(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{1}w,\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{2}w,-1)| u(x_1,x_2,w(x_1,x_2,t))\cdot n = 0 , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n308} \end{equation} where $n$ is the dynamic normal, asserting matching of the normal velocity components. Denote by $\psi\colon \Omega\to {\mathbb R}$ the harmonic extension of $1+w$ to the domain $\Omega=\Omega(0)$, i.e., assume that $\psi$ solves \begin{align}\thelt{ mt6 0og cr TiA3 HEjw TK8ymK eu J Mc4 q6d Vz2 00 XnYU tLR9 GYjPXv FO V r6W 1zU K1W bP ToaW JJuK nxBLnd 0f t DEb Mmj 4lo HY yhZy MjM9 1zQS4p 7z 8 eKa 9h0 Jrb ac ekci rexG 0z4n3x z0 Q OWS vFj 3jL hW XUIU 21iI AwJtI3 Rb W a90 I7r zAI qI 3UEl UJG7 tLtUXz w4 K QNE TvX zqW au jEMe nYlN IzLGxg B3 A uJ8 6VS 6Rc PJ 8OXW w8im tcKZEz Ho p 84G 1gS As0 PC owMI 2fLK TdD60y nH g 7lk NFj JLq Oo Qvfk fZBN G3o1Dg Cn 9 hyU h5V SP5 z6 1qvQ wceU dVJJsB vX D G4E LHQ H} \begin{split} &\Delta \psi = 0 \inon{on $\Omega$} \\& \psi(x_1,x_2,1,t)=1+w(x_1,x_2,t) \inon{on $\Gamma_1\times [0,T]$} \\& \psi(x_1,x_2,0,t)=0 \inon{on $\Gamma_0\times [0,T]$} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n309} \end{align} Next, we define $\eta\colon \Omega\times[0,T]\to \Omega(t) $ as \begin{equation} \eta(x_1,x_2,x_2,t)=(x_1,x_2,\psi(x_1,x_2,x_3,t)) \comma (x_1,x_2,x_3)\in \Omega , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n310} \end{equation} which represents the ALE change of variable. Note that \begin{equation} \nabla \eta = \begin{pmatrix} 1 & 0 & 0 \\ 0 & 1 & 0 \\ \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{1}\psi & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{2} \psi & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\psi \end{pmatrix} . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n311} \end{equation} Denote $a=(\nabla \eta)^{-1}$, or in the matrix notation \begin{equation} a = \frac{1}{J} \tda = \begin{pmatrix} 1 & 0 & 0 \\ 0 & 1 & 0 \\ -\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{1}\psi/\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\psi & -\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{2}\psi/\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\psi & 1/\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\psi \end{pmatrix} , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n312} \end{equation} where \begin{equation} J=\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\psi \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n313} \end{equation} is the Jacobian and \begin{equation} \tda = \begin{pmatrix} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\psi & 0 & 0 \\ 0 & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi & 0 \\ -\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{1}\psi & -\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{2}\psi & 1 \end{pmatrix} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n314} \end{equation} stands for the cofactor matrix. Since $b$ is the cofactor matrix, it satisfies the Piola identity \begin{equation} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{i}\tda_{ij}=0 \comma j=1,2,3 , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n315} \end{equation} which can also be verified directly from~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n314}. We use the summation convention on repeated indices; thus, unless indicated otherwise, the repeated indices are summed over 1,~2,~3. Next, denote by \begin{align}\thelt{Q OWS vFj 3jL hW XUIU 21iI AwJtI3 Rb W a90 I7r zAI qI 3UEl UJG7 tLtUXz w4 K QNE TvX zqW au jEMe nYlN IzLGxg B3 A uJ8 6VS 6Rc PJ 8OXW w8im tcKZEz Ho p 84G 1gS As0 PC owMI 2fLK TdD60y nH g 7lk NFj JLq Oo Qvfk fZBN G3o1Dg Cn 9 hyU h5V SP5 z6 1qvQ wceU dVJJsB vX D G4E LHQ HIa PT bMTr sLsm tXGyOB 7p 2 Os4 3US bq5 ik 4Lin 769O TkUxmp I8 u GYn fBK bYI 9A QzCF w3h0 geJftZ ZK U 74r Yle ajm km ZJdi TGHO OaSt1N nl B 7Y7 h0y oWJ ry rVrT zHO8 2S7oub QA W x9d } \begin{split} & v(x,t) = u(\eta(x,t),t) \\& q(x,t) = p(\eta(x,t),t) \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n316} \end{align} the ALE velocity and the pressure. With this change of variable, the system \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n301} becomes \begin{align}\thelt{y nH g 7lk NFj JLq Oo Qvfk fZBN G3o1Dg Cn 9 hyU h5V SP5 z6 1qvQ wceU dVJJsB vX D G4E LHQ HIa PT bMTr sLsm tXGyOB 7p 2 Os4 3US bq5 ik 4Lin 769O TkUxmp I8 u GYn fBK bYI 9A QzCF w3h0 geJftZ ZK U 74r Yle ajm km ZJdi TGHO OaSt1N nl B 7Y7 h0y oWJ ry rVrT zHO8 2S7oub QA W x9d z2X YWB e5 Kf3A LsUF vqgtM2 O2 I dim rjZ 7RN 28 4KGY trVa WW4nTZ XV b RVo Q77 hVL X6 K2kq FWFm aZnsF9 Ch p 8Kx rsc SGP iS tVXB J3xZ cD5IP4 Fu 9 Lcd TR2 Vwb cL DlGK 1ro3 EEyqEA zw 6} \begin{split} & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} v_i + v_1 a_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i + v_2 a_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i + \frac{1}{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\psi}(v_3-\psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i + a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q =0 , \\& a_{ki} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}v_i=0 \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n317} \end{align} in $\Omega\times[0,T]$, where we used $a_{j3}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_i=(1/\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\psi )\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i$. The initial condition reads \begin{equation} v|_{t=0} = v_0 . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n318} \end{equation} The boundary condition on the bottom boundary is \begin{equation} v_3=0 \inon{on $\Gamma_0$} , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n320} \end{equation} while, using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n314} and the second equation in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n309}, we may rewrite \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n307} as \begin{equation} \tda_{3i}v_i = w_t \inon{on $\Gamma_1$} . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n321} \end{equation} On the other hand, the plate equation \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n305} simply reads \begin{align}\thelt{geJftZ ZK U 74r Yle ajm km ZJdi TGHO OaSt1N nl B 7Y7 h0y oWJ ry rVrT zHO8 2S7oub QA W x9d z2X YWB e5 Kf3A LsUF vqgtM2 O2 I dim rjZ 7RN 28 4KGY trVa WW4nTZ XV b RVo Q77 hVL X6 K2kq FWFm aZnsF9 Ch p 8Kx rsc SGP iS tVXB J3xZ cD5IP4 Fu 9 Lcd TR2 Vwb cL DlGK 1ro3 EEyqEA zw 6 sKe Eg2 sFf jz MtrZ 9kbd xNw66c xf t lzD GZh xQA WQ KkSX jqmm rEpNuG 6P y loq 8hH lSf Ma LXm5 RzEX W4Y1Bq ib 3 UOh Yw9 5h6 f6 o8kw 6frZ wg6fIy XP n ae1 TQJ Mt2 TT fWWf jJrX ilpYGr} w_{tt} +\Delta_2^2 w - \nu \Delta_{2} w_{t} = q , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n322} \end{align} \def\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH{\int} where the pressure is normalized by the condition \begin{equation} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} q = 0 , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n326} \end{equation} for all $t\in[0,T]$. \par The next theorem, asserting the a~priori estimates for the local existence for the flow-structure problem \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n317}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n322}, is the main result of the paper. \par \cole \begin{Theorem} \label{T01} (A~priori~estimates~for~existence) Let $0\leq \nu \leq 1$. Assume that $(v,w)$ is a $C^{\infty}$ solution on an interval $[0,T]$ with \begin{align}\thelt{FWFm aZnsF9 Ch p 8Kx rsc SGP iS tVXB J3xZ cD5IP4 Fu 9 Lcd TR2 Vwb cL DlGK 1ro3 EEyqEA zw 6 sKe Eg2 sFf jz MtrZ 9kbd xNw66c xf t lzD GZh xQA WQ KkSX jqmm rEpNuG 6P y loq 8hH lSf Ma LXm5 RzEX W4Y1Bq ib 3 UOh Yw9 5h6 f6 o8kw 6frZ wg6fIy XP n ae1 TQJ Mt2 TT fWWf jJrX ilpYGr Ul Q 4uM 7Ds p0r Vg 3gIE mQOz TFh9LA KO 8 csQ u6m h25 r8 WqRI DZWg SYkWDu lL 8 Gpt ZW1 0Gd SY FUXL zyQZ hVZMn9 am P 9aE Wzk au0 6d ZghM ym3R jfdePG ln 8 s7x HYC IV9 Hw Ka6v EjH5 J} \begin{split} \Vert v_0\Vert_{H^{2.5+\delta}}, \Vert w_1\Vert_{H^{2+\delta}(\Gamma_1)} \leq M , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n323} \end{align} where $M\geq1$ and $\delta>0$. Then $v$, $w$, $\psi$, and $a$ satisfy \begin{align}\thelt{LXm5 RzEX W4Y1Bq ib 3 UOh Yw9 5h6 f6 o8kw 6frZ wg6fIy XP n ae1 TQJ Mt2 TT fWWf jJrX ilpYGr Ul Q 4uM 7Ds p0r Vg 3gIE mQOz TFh9LA KO 8 csQ u6m h25 r8 WqRI DZWg SYkWDu lL 8 Gpt ZW1 0Gd SY FUXL zyQZ hVZMn9 am P 9aE Wzk au0 6d ZghM ym3R jfdePG ln 8 s7x HYC IV9 Hw Ka6v EjH5 J8Ipr7 Nk C xWR 84T Wnq s0 fsiP qGgs Id1fs5 3A T 71q RIc zPX 77 Si23 GirL 9MQZ4F pi g dru NYt h1K 4M Zilv rRk6 B4W5B8 Id 3 Xq9 nhx EN4 P6 ipZl a2UQ Qx8mda g7 r VD3 zdD rhB vk LDJo t} \begin{split} &\Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_t\Vert_{H^{2+\delta}(\Gamma_1)}, \Vert \psi\Vert_{H^{4.5+\delta}}, \Vert \psi_t\Vert_{H^{2.5+\delta}}, \Vert a\Vert_{H^{3.5+\delta}} \leq C_0 M \comma t\in[0,T_0] , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n324} \end{align} with \begin{equation} \nu^{1/2} \Vert w_t\Vert_{L^2H^{3+\delta}(\Gamma_1\times[0,T_0])} \leq C_0 M \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n339} \end{equation} and \begin{align}\thelt{d SY FUXL zyQZ hVZMn9 am P 9aE Wzk au0 6d ZghM ym3R jfdePG ln 8 s7x HYC IV9 Hw Ka6v EjH5 J8Ipr7 Nk C xWR 84T Wnq s0 fsiP qGgs Id1fs5 3A T 71q RIc zPX 77 Si23 GirL 9MQZ4F pi g dru NYt h1K 4M Zilv rRk6 B4W5B8 Id 3 Xq9 nhx EN4 P6 ipZl a2UQ Qx8mda g7 r VD3 zdD rhB vk LDJo tKyV 5IrmyJ R5 e txS 1cv EsY xG zj2T rfSR myZo4L m5 D mqN iZd acg GQ 0KRw QKGX g9o8v8 wm B fUu tCO cKc zz kx4U fhuA a8pYzW Vq 9 Sp6 CmA cZL Mx ceBX Dwug sjWuii Gl v JDb 08h BOV C1 p} \begin{split} & \Vert v_t\Vert_{H^{1.5+\delta}}, \Vert w_{tt}\Vert_{H^{\delta}(\Gamma_1)}, \Vert q\Vert_{H^{1.5+\delta}} \leq K \comma t\in[0,T_0] , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n325} \end{align} where $C_0>0$ is a constant, $K$ and $T_0$ are constants depending on~$M$. In particular, $C_0$, $K$, and $T_0$ do not depend on~$\nu$. \end{Theorem} \colb \par The parameter $\delta>0$, which does not have to be small, is fixed throughout; in particular, we allow all the constants to depend on $\delta$ without mention. All the results in this paper also apply when $\nu\geq 1$ with the constants depending on~$\nu$. The proof of Theorem~\ref{T01} is provided in Section~\ref{secap}. \par Next, we assert the uniqueness of solutions in Theorem~\ref{T01}. For this, we need slightly more regular solutions; namely, we need to assume $\delta\geq0.5$. \par \cole \begin{Theorem} \label{T02} (Uniqueness) Let $0\leq \nu \leq 1$ and $\delta\geq0.5$. Assume that two solutions $(u,w)$ and $(\tilde u, \tilde w)$ satisfy the regularity \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n324}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n325} for some $T>0$ and \begin{equation} (v(0),w(0))=(\tilde v(0),\tilde w(0)) . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3344} \end{equation} Then $(u,v)$ and $(\tilde u, \tilde v)$ agree on~$[0,T]$. \par \end{Theorem} \colb \par The theorem is proven in Section~\ref{sec08}. \par Next, we assert the local existence with initial data $(v_0,w_1)$ in $H^{m}(\Omega)\times H^{m-0.5}$ where $m\geq 3$ is not necessarily an integer. \par \cole \begin{Theorem} \label{T03} (Local existence) Let $0\leq \nu \leq 1$. Assume that initial data \begin{equation} (v_{0}, w_{1})\in H^{2.5 +\delta} \times H^{2+\delta}(\Gamma_{1}) , \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3346} \end{equation} where $\delta \geq 0.5$, satisfy the compatibility conditions \begin{equation} v_{0}\cdot N |_{\Gamma_{1}} =w_1 \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n327} \end{equation} and \begin{equation} v_{0}\cdot N |_{\Gamma_{0}}=0 \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n328} \end{equation} with \begin{equation} \div v_{0}=0 \inon{in~$\Omega$} , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n329} \end{equation} and \begin{equation} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} w_1 = 0 . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3321} \end{equation} Then there exists a unique local-in-time solution $(v,q,w,w_{t} )$ to the Euler-plate system \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n317}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n322} with the initial data $(v_0,w_1)$ such that \begin{align}\thelt{Yt h1K 4M Zilv rRk6 B4W5B8 Id 3 Xq9 nhx EN4 P6 ipZl a2UQ Qx8mda g7 r VD3 zdD rhB vk LDJo tKyV 5IrmyJ R5 e txS 1cv EsY xG zj2T rfSR myZo4L m5 D mqN iZd acg GQ 0KRw QKGX g9o8v8 wm B fUu tCO cKc zz kx4U fhuA a8pYzW Vq 9 Sp6 CmA cZL Mx ceBX Dwug sjWuii Gl v JDb 08h BOV C1 pni6 4TTq Opzezq ZB J y5o KS8 BhH sd nKkH gnZl UCm7j0 Iv Y jQE 7JN 9fd ED ddys 3y1x 52pbiG Lc a 71j G3e uli Ce uzv2 R40Q 50JZUB uK d U3m May 0uo S7 ulWD h7qG 2FKw2T JX z BES 2Jk Q4U} \begin{split} &v \in L^{\infty}([0,T];H^{2.5+ \delta}(\Omega)) \cap C([0,T];H^{0.5 +\delta}(\Omega)) , \\& v_{t} \in L^{\infty}([0,T];H^{0.5 +\delta}(\Omega)) , \\& q \in L^{\infty}([0,T];H^{1.5 + \delta}(\Omega)) , \\& w \in L^{\infty}([0,T];H^{4+ \delta}(\Gamma_{1})) , \\& w_{t} \in L^{\infty}([0,T];H^{2+ \delta}(\Gamma_{1})) , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n330} \end{align} for some time $T>0$ depending on the size of the initial data. \end{Theorem} \colb \par The theorem is proven in Section~\ref{secle} below. Note that \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n330}$_1$ self-improves to $v\in C([0,T];H^{2.5+\delta_0}(\Omega))$ for any $\delta_0<\delta$. \par \startnewsection{A~priori bounds}{secap} This section is devoted to establishing the a~priori bounds for the Euler-plate system. \par \subsection{Basic properties of the coefficient matrix $a$, the cofactor matrix $b$, and the Jacobian~$J$} \label{sec02a} Note that, by multiplying the equation \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n317}$_2$ with $J$ and integrating it over $\Omega$, while using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n320} and the Piola identity \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n315}, we get $\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \tda_{3i}v_i =0$, which in turn, by \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n321}, implies \begin{equation} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} w_t=0 . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n331} \end{equation} Also, since $a=(\nabla \eta)^{-1}$, we have \begin{equation} a_t = - a\nabla \eta_t a , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n332} \end{equation} where the right-hand side is understood as a product of three matrices. In the proof of the a~priori estimates, we work on an interval of time $[0,T]$ such that \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n324} holds, where $C_0$ is a fixed constant determined in the Gronwall argument below. \par \cole \begin{Lemma} \label{L01} Let $\epsilon\in(0,1/2]$. Assume that \begin{align}\thelt{fUu tCO cKc zz kx4U fhuA a8pYzW Vq 9 Sp6 CmA cZL Mx ceBX Dwug sjWuii Gl v JDb 08h BOV C1 pni6 4TTq Opzezq ZB J y5o KS8 BhH sd nKkH gnZl UCm7j0 Iv Y jQE 7JN 9fd ED ddys 3y1x 52pbiG Lc a 71j G3e uli Ce uzv2 R40Q 50JZUB uK d U3m May 0uo S7 ulWD h7qG 2FKw2T JX z BES 2Jk Q4U Dy 4aJ2 IXs4 RNH41s py T GNh hk0 w5Z C8 B3nU Bp9p 8eLKh8 UO 4 fMq Y6w lcA GM xCHt vlOx MqAJoQ QU 1 e8a 2aX 9Y6 2r lIS6 dejK Y3KCUm 25 7 oCl VeE e8p 1z UJSv bmLd Fy7ObQ FN l J6F Rd} \begin{split} &\Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_t\Vert_{H^{2+\delta}(\Gamma_1)} \leq C_0 M \comma t\in[0,T_0] , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n333} \end{align} where $M\geq1$ is as in the statement of Theorem~\ref{T01}. Then we have \begin{equation} \Vert a-I\Vert_{H^{1.5+\delta}}, \Vert \tda-I\Vert_{H^{1.5+\delta}}, \Vert J-1\Vert_{H^{1.5+\delta}} \leq \epsilon \comma t\in [0,T_0] \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n334} \end{equation} and \begin{equation} \Vert J-1\Vert_{L^{\infty}} \leq \epsilon \comma t\in [0,T_0] , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n335} \end{equation} where $T_0$ satisfies \begin{equation} 0<T_0\leq \frac{\epsilon}{C M^{3}} , \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n336} \end{equation} and $C$ depends on $C_0$. \end{Lemma} \colb \par Note that, by \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n334}, we also have \begin{equation} \Vert a-I\Vert_{L^{\infty}}, \Vert \tda-I\Vert_{L^{\infty}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \epsilon \comma t\in [0,T_0] , \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n337} \end{equation} while \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n335} gives \begin{equation} \frac12 \leq J \leq \frac32 \comma t\in [0,T_0] ; \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n338} \end{equation} in particular, $J=\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\psi$ is positive and stays away from~$0$. The pressure estimates require $\epsilon\leq 1/C$, where $C$ is a constant, while we need $\epsilon\leq 1/C M$ when concluding the a~priori estimates in Section~\ref{sec06} below. Therefore, we fix \begin{equation} \epsilon=\frac{1}{CM} , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n340} \end{equation} where we assumed for convenience $M\geq1$ and work with \begin{equation} T_0 = \frac{1}{C M^{4}} , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n341} \end{equation} where $C$ is a sufficiently large constant. The symbol $C\geq1$ denotes a sufficiently large constant, which may change from inequality to inequality. Also, we write $A\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls B$ when $A\leq C B$ for a constant~$C$. \par Before the proof, note that by the definitions of $\psi$ and $\eta$ in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n309} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n310} we have \begin{align}\thelt{Lc a 71j G3e uli Ce uzv2 R40Q 50JZUB uK d U3m May 0uo S7 ulWD h7qG 2FKw2T JX z BES 2Jk Q4U Dy 4aJ2 IXs4 RNH41s py T GNh hk0 w5Z C8 B3nU Bp9p 8eLKh8 UO 4 fMq Y6w lcA GM xCHt vlOx MqAJoQ QU 1 e8a 2aX 9Y6 2r lIS6 dejK Y3KCUm 25 7 oCl VeE e8p 1z UJSv bmLd Fy7ObQ FN l J6F RdF kEm qM N0Fd NZJ0 8DYuq2 pL X JNz 4rO ZkZ X2 IjTD 1fVt z4BmFI Pi 0 GKD R2W PhO zH zTLP lbAE OT9XW0 gb T Lb3 XRQ qGG 8o 4TPE 6WRc uMqMXh s6 x Ofv 8st jDi u8 rtJt TKSK jlGkGw t8 n F} \begin{split} \Vert \eta\Vert_{H^{4.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \psi\Vert_{H^{4.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert w\Vert_{H^{4+\delta}(\Gamma_1)} \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n342} \end{align} and \begin{align}\thelt{AJoQ QU 1 e8a 2aX 9Y6 2r lIS6 dejK Y3KCUm 25 7 oCl VeE e8p 1z UJSv bmLd Fy7ObQ FN l J6F RdF kEm qM N0Fd NZJ0 8DYuq2 pL X JNz 4rO ZkZ X2 IjTD 1fVt z4BmFI Pi 0 GKD R2W PhO zH zTLP lbAE OT9XW0 gb T Lb3 XRQ qGG 8o 4TPE 6WRc uMqMXh s6 x Ofv 8st jDi u8 rtJt TKSK jlGkGw t8 n FDx jA9 fCm iu FqMW jeox 5Akw3w Sd 8 1vK 8c4 C0O dj CHIs eHUO hyqGx3 Kw O lDq l1Y 4NY 4I vI7X DE4c FeXdFV bC F HaJ sb4 OC0 hu Mj65 J4fa vgGo7q Y5 X tLy izY DvH TR zd9x SRVg 0Pl6Z8 9} \begin{split} \Vert \eta_t\Vert_{H^{2.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \psi_t\Vert_{H^{2.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert w_t\Vert_{H^{2+\delta}(\Gamma_1)} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n343} \end{align} and both far right sides are bounded by constant multiples of $M$. \par Also, we have \begin{align}\thelt{AE OT9XW0 gb T Lb3 XRQ qGG 8o 4TPE 6WRc uMqMXh s6 x Ofv 8st jDi u8 rtJt TKSK jlGkGw t8 n FDx jA9 fCm iu FqMW jeox 5Akw3w Sd 8 1vK 8c4 C0O dj CHIs eHUO hyqGx3 Kw O lDq l1Y 4NY 4I vI7X DE4c FeXdFV bC F HaJ sb4 OC0 hu Mj65 J4fa vgGo7q Y5 X tLy izY DvH TR zd9x SRVg 0Pl6Z8 9X z fLh GlH IYB x9 OELo 5loZ x4wag4 cn F aCE KfA 0uz fw HMUV M9Qy eARFe3 Py 6 kQG GFx rPf 6T ZBQR la1a 6Aeker Xg k blz nSm mhY jc z3io WYjz h33sxR JM k Dos EAA hUO Oz aQfK Z0cn 5kq} \begin{split} \Vert J\Vert_{H^{3.5+\delta}} = \Vert \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\psi\Vert_{H^{3.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \psi\Vert_{H^{4.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert w\Vert_{H^{4+\delta}(\Gamma_1)} \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n344} \end{align} and \begin{equation} \Vert J_t\Vert_{H^{1.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \psi_t\Vert_{H^{2.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \eta_t\Vert_{H^{2.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert w_t\Vert_{H^{2+\delta}(\Gamma_1)} , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n345} \end{equation} with both right sides bounded by a constant multiple of $M$. \par \begin{proof}[Proof of Lemma~\ref{L01}] By \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n332}, we have \begin{align}\thelt{7X DE4c FeXdFV bC F HaJ sb4 OC0 hu Mj65 J4fa vgGo7q Y5 X tLy izY DvH TR zd9x SRVg 0Pl6Z8 9X z fLh GlH IYB x9 OELo 5loZ x4wag4 cn F aCE KfA 0uz fw HMUV M9Qy eARFe3 Py 6 kQG GFx rPf 6T ZBQR la1a 6Aeker Xg k blz nSm mhY jc z3io WYjz h33sxR JM k Dos EAA hUO Oz aQfK Z0cn 5kqYPn W7 1 vCT 69a EC9 LD EQ5S BK4J fVFLAo Qp N dzZ HAl JaL Mn vRqH 7pBB qOr7fv oa e BSA 8TE btx y3 jwK3 v244 dlfwRL Dc g X14 vTp Wd8 zy YWjw eQmF yD5y5l DN l ZbA Jac cld kx Yn3V QYI} \begin{split} \Vert a-I\Vert_{H^{1.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \left\Vert \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} a \nabla \eta_t a \,ds \right\Vert_{H^{1.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls T_0 M^{3} , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n346} \end{align} where we used \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n342} and~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n345}. Now we only need to choose $T_0 \leq \epsilon/C M^{3}$, where $C$ is a sufficiently large constant, and the bound on the first term in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n334} is established. Similarly, we have \begin{align}\thelt{6T ZBQR la1a 6Aeker Xg k blz nSm mhY jc z3io WYjz h33sxR JM k Dos EAA hUO Oz aQfK Z0cn 5kqYPn W7 1 vCT 69a EC9 LD EQ5S BK4J fVFLAo Qp N dzZ HAl JaL Mn vRqH 7pBB qOr7fv oa e BSA 8TE btx y3 jwK3 v244 dlfwRL Dc g X14 vTp Wd8 zy YWjw eQmF yD5y5l DN l ZbA Jac cld kx Yn3V QYIV v6fwmH z1 9 w3y D4Y ezR M9 BduE L7D9 2wTHHc Do g ZxZ WRW Jxi pv fz48 ZVB7 FZtgK0 Y1 w oCo hLA i70 NO Ta06 u2sY GlmspV l2 x y0X B37 x43 k5 kaoZ deyE sDglRF Xi 9 6b6 w9B dId Ko gSU} \begin{split} \Vert J-1\Vert_{H^{1.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \left\Vert \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} J_t\,ds\right\Vert_{H^{1.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls M T_0 . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n347} \end{align} The bound for $\Vert \tda-I\Vert_{H^{1.5+\delta}}$ follows immediately from those on $\Vert a-I\Vert_{H^{1.5+\delta}}$ and $ \Vert J-1\Vert_{H^{1.5+\delta}}$ by using $\tda = J a $. \end{proof} \par As pointed out above, the value of $\epsilon$ in Lemma~\ref{L01} is fixed in the pressure estimates and then further restricted in the conclusion of a~priori bounds in Section~\ref{sec06}. \par Note that by the definitions of $a$ and $b$ in the beginning of Section~\ref{sec02}, we have \begin{equation} \Vert a\Vert_{H^{3.5+\delta}}, \Vert \tda\Vert_{H^{3.5+\delta}} \leq P(\Vert w\Vert_{H^{4+\delta}(\Gamma_1)}) \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n348} \end{equation} and \begin{equation} \Vert a_t\Vert_{H^{1.5+\delta}}, \Vert \tda_t\Vert_{H^{1.5+\delta}} \leq P(\Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_t\Vert_{H^{2+\delta}(\Gamma_1)} ) . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n349} \end{equation} \colb Above and in the sequel, the symbol $P$ denotes a generic polynomial of its arguments. It is assumed to be nonnegative and is allowed to change from inequality to inequality. \par \subsection{The tangential estimate} \label{sec03} Denote \def\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI{\Lambda} \begin{equation} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI=(I-\Delta_2)^{1/2} , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n350} \end{equation} where $\Delta_2$ denotes the Laplacian in $x_1$ and $x_2$ variables. The purpose of this section is to obtain the following a~priori estimate. \par \cole \begin{Lemma} \label{L02} Under the assumptions of Theorem~\ref{T01}, we have \begin{align}\thelt{ btx y3 jwK3 v244 dlfwRL Dc g X14 vTp Wd8 zy YWjw eQmF yD5y5l DN l ZbA Jac cld kx Yn3V QYIV v6fwmH z1 9 w3y D4Y ezR M9 BduE L7D9 2wTHHc Do g ZxZ WRW Jxi pv fz48 ZVB7 FZtgK0 Y1 w oCo hLA i70 NO Ta06 u2sY GlmspV l2 x y0X B37 x43 k5 kaoZ deyE sDglRF Xi 9 6b6 w9B dId Ko gSUM NLLb CRzeQL UZ m i9O 2qv VzD hz v1r6 spSl jwNhG6 s6 i SdX hob hbp 2u sEdl 95LP AtrBBi bP C wSh pFC CUa yz xYS5 78ro f3UwDP sC I pES HB1 qFP SW 5tt0 I7oz jXun6c z4 c QLB J4M NmI 6} \begin{split} & \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{4+\delta} w\Vert_{L^2(\Gamma_1)}^2 + \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} w_{t}\Vert_{L^2(\Gamma_1)}^2 + \nu \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \Vert \nabla_2 \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} w_{t} \Vert_{L^2(\Gamma_1)}^2 \, ds \\&\indeq \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert w_{t}(0)\Vert_{H^{2+\delta}(\Gamma_1)}^2 + \Vert v(0) \Vert_{H^{2.5+\delta}}^2 + \Vert v\Vert_{L^2}^{1/(2.5+\delta)} \Vert v\Vert_{H^{2.5+\delta}}^{(4+2\delta)/(2.5+\delta)} \\&\indeq\indeq +\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert q\Vert_{H^{1.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} )\,ds , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n351} \end{align} for $t\in [0,T_0]$. \end{Lemma} \colb \par \begin{proof}[Proof of Lemma~\ref{L02}] Assume that \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n323} holds. We test the plate equation \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n322} with $\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2(2+\delta)}w_{t}$, obtaining \begin{equation} \frac12 \frac{d}{dt} \Bigl( \Vert \Delta_2 \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}w\Vert_{L^2(\Gamma_1)}^2 + \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}w_{t}\Vert_{L^2(\Gamma_1)}^2 \Bigr) + \nu \Vert \nabla_2 \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} w_{t} \Vert_{L^2(\Gamma_1)}^2 = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} q \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2(2+\delta)} w_{t} . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n352} \end{equation} Integrating in time leads to \begin{align}\thelt{o hLA i70 NO Ta06 u2sY GlmspV l2 x y0X B37 x43 k5 kaoZ deyE sDglRF Xi 9 6b6 w9B dId Ko gSUM NLLb CRzeQL UZ m i9O 2qv VzD hz v1r6 spSl jwNhG6 s6 i SdX hob hbp 2u sEdl 95LP AtrBBi bP C wSh pFC CUa yz xYS5 78ro f3UwDP sC I pES HB1 qFP SW 5tt0 I7oz jXun6c z4 c QLB J4M NmI 6F 08S2 Il8C 0JQYiU lI 1 YkK oiu bVt fG uOeg Sllv b4HGn3 bS Z LlX efa eN6 v1 B6m3 Ek3J SXUIjX 8P d NKI UFN JvP Ha Vr4T eARP dXEV7B xM 0 A7w 7je p8M 4Q ahOi hEVo Pxbi1V uG e tOt HbP } \begin{split} & \frac12 \Vert \Delta_2 \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} w\Vert_{L^2(\Gamma_1)}^2 + \frac12 \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} w_{t}\Vert_{L^2(\Gamma_1)}^2 + \nu \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \Vert \nabla_2 \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} w_{t} \Vert_{L^2(\Gamma_1)}^2 \, ds \\&\indeq = \frac12 \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} w_{t}(0)\Vert_{L^2(\Gamma_1)}^2 + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} q \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2(2+\delta)} w_{t} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n353} \end{align} where we also used $w(0)=0$. Note that, by \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n304}, the boundary condition on $\Gamma_0$ reads \begin{equation} v\cdot N=0 \inon{on $\Gamma_0$} . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n355} \end{equation} To obtain \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n351}, we claim that \begin{align}\thelt{ C wSh pFC CUa yz xYS5 78ro f3UwDP sC I pES HB1 qFP SW 5tt0 I7oz jXun6c z4 c QLB J4M NmI 6F 08S2 Il8C 0JQYiU lI 1 YkK oiu bVt fG uOeg Sllv b4HGn3 bS Z LlX efa eN6 v1 B6m3 Ek3J SXUIjX 8P d NKI UFN JvP Ha Vr4T eARP dXEV7B xM 0 A7w 7je p8M 4Q ahOi hEVo Pxbi1V uG e tOt HbP tsO 5r 363R ez9n A5EJ55 pc L lQQ Hg6 X1J EW K8Cf 9kZm 14A5li rN 7 kKZ rY0 K10 It eJd3 kMGw opVnfY EG 2 orG fj0 TTA Xt ecJK eTM0 x1N9f0 lR p QkP M37 3r0 iA 6EFs 1F6f 4mjOB5 zu 5 GGT} \begin{split} & \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} v_i \Big|_{t} - \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} v_i \Big|_{0} \\&\indeq \leq - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} q\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2(2+\delta)}w_{t}\,d\sigma \,ds + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert q\Vert_{H^{1.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} )\,ds . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3326} \end{align} After \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3326} is established, we simply add \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n352} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3326}; the high order terms containing the pressure~$q$ cancel and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n351} follows; note that the negative of the first term on the left-hand side of \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3326} is estimated by the third term on the right-hand side of \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n351} (see \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n376} below). To prove \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3326}, we first claim \begin{align}\thelt{jX 8P d NKI UFN JvP Ha Vr4T eARP dXEV7B xM 0 A7w 7je p8M 4Q ahOi hEVo Pxbi1V uG e tOt HbP tsO 5r 363R ez9n A5EJ55 pc L lQQ Hg6 X1J EW K8Cf 9kZm 14A5li rN 7 kKZ rY0 K10 It eJd3 kMGw opVnfY EG 2 orG fj0 TTA Xt ecJK eTM0 x1N9f0 lR p QkP M37 3r0 iA 6EFs 1F6f 4mjOB5 zu 5 GGT Ncl Bmk b5 jOOK 4yny My04oz 6m 6 Akz NnP JXh Bn PHRu N5Ly qSguz5 Nn W 2lU Yx3 fX4 hu LieH L30w g93Xwc gj 1 I9d O9b EPC R0 vc6A 005Q VFy1ly K7 o VRV pbJ zZn xY dcld XgQa DXY3gz x3 } \begin{split} & \frac12 \frac{d}{dt} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} v_i = \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J_t \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} v_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} v_i + \bar I , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3328} \end{align} where \begin{equation} \bar I \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert q\Vert_{H^{1.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3327} \end{equation} To show \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3328}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3327}, first observe that, by the product rule, \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3328} holds with \begin{equation} \bar I = \frac12\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}v_i - \frac12\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} v_i . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3329} \end{equation} In order to show that \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3329} has a commutator form, we rewrite \begin{align}\thelt{ opVnfY EG 2 orG fj0 TTA Xt ecJK eTM0 x1N9f0 lR p QkP M37 3r0 iA 6EFs 1F6f 4mjOB5 zu 5 GGT Ncl Bmk b5 jOOK 4yny My04oz 6m 6 Akz NnP JXh Bn PHRu N5Ly qSguz5 Nn W 2lU Yx3 fX4 hu LieH L30w g93Xwc gj 1 I9d O9b EPC R0 vc6A 005Q VFy1ly K7 o VRV pbJ zZn xY dcld XgQa DXY3gz x3 6 8OR JFK 9Uh XT e3xY bVHG oYqdHg Vy f 5kK Qzm mK4 9x xiAp jVkw gzJOdE 4v g hAv 9bV IHe wc Vqcb SUcF 1pHzol Nj T l1B urc Sam IP zkUS 8wwS a7wVWR 4D L VGf 1RF r59 9H tyGq hDT0 TDloo} \begin{split} \bar I &= \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^2 (J\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}v_i) - \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI (J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta}v_i) \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}v_i \\& = \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^2 (J\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}v_i) - J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{3.5+\delta}v_i \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}v_i + \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl( J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{3.5+\delta}v_i - \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI (J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta}v_i) \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}v_i \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert J\Vert_{H^{3}} \Vert v\Vert_{H^{2.5+\delta}} \Vert v_t\Vert_{H^{0.5+\delta}} \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert q\Vert_{H^{1.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3330} \end{align} where in the last inequality, we bounded $v_t$ in terms of $v$ and $q$ directly from \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n317}$_1$ as \begin{align}\thelt{ L30w g93Xwc gj 1 I9d O9b EPC R0 vc6A 005Q VFy1ly K7 o VRV pbJ zZn xY dcld XgQa DXY3gz x3 6 8OR JFK 9Uh XT e3xY bVHG oYqdHg Vy f 5kK Qzm mK4 9x xiAp jVkw gzJOdE 4v g hAv 9bV IHe wc Vqcb SUcF 1pHzol Nj T l1B urc Sam IP zkUS 8wwS a7wVWR 4D L VGf 1RF r59 9H tyGq hDT0 TDlooa mg j 9am png aWe nG XU2T zXLh IYOW5v 2d A rCG sLk s53 pW AuAy DQlF 6spKyd HT 9 Z1X n2s U1g 0D Llao YuLP PB6YKo D1 M 0fi qHU l4A Ia joiV Q6af VT6wvY Md 0 pCY BZp 7RX Hd xTb0 sjJ0 } \begin{split} \Vert v_t\Vert_{H^{0.5+\delta}} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert v\Vert_{H^{1.5+\delta}} \Vert a\Vert_{H^{1.5+\delta}} \Vert \nabla v\Vert_{H^{0.5+\delta}} + (\Vert v\Vert_{H^{1.5+\delta}} + \Vert \psi_t\Vert_{H^{1.5+\delta}} ) \Vert \nabla v\Vert_{H^{0.5+\delta}} \\&\indeq + \Vert a\Vert_{H^{1.5+\delta}} \Vert q\Vert_{H^{1.5+\delta}} \\& \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert q\Vert_{H^{1.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n358} \end{align} Thus \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3328}, with the estimate \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3327}, is established. \par Note that the equations \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n317} may be rewritten as \begin{align}\thelt{ Vqcb SUcF 1pHzol Nj T l1B urc Sam IP zkUS 8wwS a7wVWR 4D L VGf 1RF r59 9H tyGq hDT0 TDlooa mg j 9am png aWe nG XU2T zXLh IYOW5v 2d A rCG sLk s53 pW AuAy DQlF 6spKyd HT 9 Z1X n2s U1g 0D Llao YuLP PB6YKo D1 M 0fi qHU l4A Ia joiV Q6af VT6wvY Md 0 pCY BZp 7RX Hd xTb0 sjJ0 Beqpkc 8b N OgZ 0Tr 0wq h1 C2Hn YQXM 8nJ0Pf uG J Be2 vuq Duk LV AJwv 2tYc JOM1uK h7 p cgo iiK t0b 3e URec DVM7 ivRMh1 T6 p AWl upj kEj UL R3xN VAu5 kEbnrV HE 1 OrJ 2bx dUP yD vyVi } \begin{split} & J\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} v_i + v_1 b_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i + v_2 b_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i + (v_3-\psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i + b_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q =0 , \\& b_{ki} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}v_i=0 . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n354} \end{align} Using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n354}$_1$ in the second term of \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3328}, we obtain \begin{align}\thelt{1g 0D Llao YuLP PB6YKo D1 M 0fi qHU l4A Ia joiV Q6af VT6wvY Md 0 pCY BZp 7RX Hd xTb0 sjJ0 Beqpkc 8b N OgZ 0Tr 0wq h1 C2Hn YQXM 8nJ0Pf uG J Be2 vuq Duk LV AJwv 2tYc JOM1uK h7 p cgo iiK t0b 3e URec DVM7 ivRMh1 T6 p AWl upj kEj UL R3xN VAu5 kEbnrV HE 1 OrJ 2bx dUP yD vyVi x6sC BpGDSx jB C n9P Fiu xkF vw 0QPo fRjy 2OFItV eD B tDz lc9 xVy A0 de9Y 5h8c 7dYCFk Fl v WPD SuN VI6 MZ 72u9 MBtK 9BGLNs Yp l X2y b5U HgH AD bW8X Rzkv UJZShW QH G oKX yVA rsH TQ } \begin{split} & \frac12 \frac{d}{dt} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} v_i \\&\indeq = \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J_t \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta}v_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl( J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} (\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}v_i) - \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}(J\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_t v_i) \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta}v_i \\&\indeq\indeq - \sum_{m=1}^{2}\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}(v_m\tda_{jm}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} v_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \bigl( (v_3-\psi_t)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i \bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} v_i \\&\indeq\indeq -\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(\tda_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} v_i + \bar I \\&\indeq = I_1 + I_2 + I_3 + I_4 + I_5 + \bar I . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n356} \end{align} Above and in the sequel, unless indicated otherwise, all integrals and norms are assumed to be over $\Omega$. For the first two terms, we have \begin{align}\thelt{iiK t0b 3e URec DVM7 ivRMh1 T6 p AWl upj kEj UL R3xN VAu5 kEbnrV HE 1 OrJ 2bx dUP yD vyVi x6sC BpGDSx jB C n9P Fiu xkF vw 0QPo fRjy 2OFItV eD B tDz lc9 xVy A0 de9Y 5h8c 7dYCFk Fl v WPD SuN VI6 MZ 72u9 MBtK 9BGLNs Yp l X2y b5U HgH AD bW8X Rzkv UJZShW QH G oKX yVA rsH TQ 1Vbd dK2M IxmTf6 wE T 9cX Fbu uVx Cb SBBp 0v2J MQ5Z8z 3p M EGp TU6 KCc YN 2BlW dp2t mliPDH JQ W jIR Rgq i5l AP gikl c8ru HnvYFM AI r Ih7 Ths 9tE hA AYgS swZZ fws19P 5w e JvM imb sF} \begin{split} I_1 + I_2 &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert J_t\Vert_{L^\infty} \Vert v\Vert_{H^{1.5+\delta}} \Vert v\Vert_{H^{2.5+\delta}} + \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} J\Vert_{L^6} \Vert v_{t}\Vert_{L^3} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta}v\Vert_{L^2} + \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI J\Vert_{L^\infty} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} v_{t}\Vert_{L^2} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} v\Vert_{L^2} \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert J_t\Vert_{H^{1.5+\delta}} \Vert v\Vert_{H^{1.5+\delta}} \Vert v\Vert_{H^{2.5+\delta}} + \Vert J\Vert_{H^{2.5+\delta}} \Vert v_{t}\Vert_{H^{0.5}} \Vert v\Vert_{H^{2.5+\delta}} + \Vert J\Vert_{H^{2.5+\delta}} \Vert v_{t}\Vert_{H^{0.5+\delta}} \Vert v\Vert_{H^{2.5+\delta}} \\& \leq P(\Vert v\Vert_{H^{2.5+\delta}}, \Vert v_{t}\Vert_{H^{0.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) \\& \leq P(\Vert v\Vert_{H^{2.5+\delta}}, \Vert q\Vert_{H^{1.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n357} \end{align} where we used \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n344} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n345} in the third inequality and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n358} in the fourth. For $I_3$, we write \begin{align}\thelt{ WPD SuN VI6 MZ 72u9 MBtK 9BGLNs Yp l X2y b5U HgH AD bW8X Rzkv UJZShW QH G oKX yVA rsH TQ 1Vbd dK2M IxmTf6 wE T 9cX Fbu uVx Cb SBBp 0v2J MQ5Z8z 3p M EGp TU6 KCc YN 2BlW dp2t mliPDH JQ W jIR Rgq i5l AP gikl c8ru HnvYFM AI r Ih7 Ths 9tE hA AYgS swZZ fws19P 5w e JvM imb sFH Th CnSZ HORm yt98w3 U3 z ant zAy Twq 0C jgDI Etkb h98V4u o5 2 jjA Zz1 kLo C8 oHGv Z5Ru Gwv3kK 4W B 50T oMt q7Q WG 9mtb SIlc 87ruZf Kw Z Ph3 1ZA Osq 8l jVQJ LTXC gyQn0v KE S iSq B} \begin{split} I_3 &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \bigl\Vert v_m \tda_{jm}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_i \bigr\Vert_{H^{1.5+\delta}} \Vert v \Vert_{H^{2.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert v\Vert_{H^{2.5+\delta}}^3 \Vert b\Vert_{H^{3.5+\delta}} \leq P(\Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)} ) , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n359} \end{align} by~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n348}. In the last step, we used the multiplicative Sobolev inequality \begin{equation} \Vert a b c\Vert_{H^{k}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert a\Vert_{H^{l}} \Vert b\Vert_{H^{m}} \Vert c\Vert_{H^{n}} \comma \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n360} \end{equation} where $l,m,n\geq k\geq 0$, which holds when $l+m+n> 3+k$ or when $l+m+n= 3+k$ and at least two of the parameters $l,m,n$ are strictly greater than~$k$. Next, we treat $I_4$ similarly to $I_3$ and write \begin{align}\thelt{ JQ W jIR Rgq i5l AP gikl c8ru HnvYFM AI r Ih7 Ths 9tE hA AYgS swZZ fws19P 5w e JvM imb sFH Th CnSZ HORm yt98w3 U3 z ant zAy Twq 0C jgDI Etkb h98V4u o5 2 jjA Zz1 kLo C8 oHGv Z5Ru Gwv3kK 4W B 50T oMt q7Q WG 9mtb SIlc 87ruZf Kw Z Ph3 1ZA Osq 8l jVQJ LTXC gyQn0v KE S iSq Bpa wtH xc IJe4 SiE1 izzxim ke P Y3s 7SX 5DA SG XHqC r38V YP3Hxv OI R ZtM fqN oLF oU 7vNd txzw UkX32t 94 n Fdq qTR QOv Yq Ebig jrSZ kTN7Xw tP F gNs O7M 1mb DA btVB 3LGC pgE9hV FK Y } \begin{split} I_4 &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \bigl\Vert (v_3-\psi_t)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v \bigr\Vert_{H^{1.5+\delta}} \Vert v \Vert_{H^{2.5+\delta}} \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \bigl\Vert v_3\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v \bigr\Vert_{H^{1.5+\delta}} \Vert v \Vert_{H^{2.5+\delta}} + \bigl\Vert \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\eta_3 \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v \bigr\Vert_{H^{1.5+\delta}} \Vert v \Vert_{H^{2.5+\delta}} \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert v\Vert_{H^{2.5+\delta}}^3 + \Vert \eta_{t}\Vert_{H^{2.5+\delta}} \Vert v\Vert_{H^{2.5+\delta}}^2 \leq P(\Vert v\Vert_{H^{2.5+\delta}}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n361} \end{align} where we used \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n360}, and a similar multiplicative Sobolev inequality for two factors \begin{equation} \Vert a b \Vert_{H^{k}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert a\Vert_{H^{l}} \Vert b\Vert_{H^{m}} , \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n362} \end{equation} where either $l,m\geq k$ and $l+m> k+1.5$ or $l,m> k\geq0$ and $l+m= k+1.5$. Finally, we treat the pressure term~$I_5$, for which we use integration by parts in $x_k$ to rewrite it as \begin{align}\thelt{wv3kK 4W B 50T oMt q7Q WG 9mtb SIlc 87ruZf Kw Z Ph3 1ZA Osq 8l jVQJ LTXC gyQn0v KE S iSq Bpa wtH xc IJe4 SiE1 izzxim ke P Y3s 7SX 5DA SG XHqC r38V YP3Hxv OI R ZtM fqN oLF oU 7vNd txzw UkX32t 94 n Fdq qTR QOv Yq Ebig jrSZ kTN7Xw tP F gNs O7M 1mb DA btVB 3LGC pgE9hV FK Y LcS GmF 863 7a ZDiz 4CuJ bLnpE7 yl 8 5jg Many Thanks, POL OG EPOe Mru1 v25XLJ Fz h wgE lnu Ymq rX 1YKV Kvgm MK7gI4 6h 5 kZB OoJ tfC 5g VvA1 kNJr 2o7om1 XN p Uwt CWX fFT SW DjsI wux} \begin{split} I_5 &= -\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(\tda_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} v_i = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(\tda_{ki}q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} v_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(\tda_{3i}q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} v_i \\& = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}(\tda_{ki}q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} v_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(\tda_{3i}q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} v_i \\& = I_{51} + I_{52} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n363} \end{align} where we used the Piola identity \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n315} and $N=(0,0,1)$ on $\Gamma_1$. Note that the boundary integral over $\Gamma_0$ vanishes since \begin{align}\thelt{xzw UkX32t 94 n Fdq qTR QOv Yq Ebig jrSZ kTN7Xw tP F gNs O7M 1mb DA btVB 3LGC pgE9hV FK Y LcS GmF 863 7a ZDiz 4CuJ bLnpE7 yl 8 5jg Many Thanks, POL OG EPOe Mru1 v25XLJ Fz h wgE lnu Ymq rX 1YKV Kvgm MK7gI4 6h 5 kZB OoJ tfC 5g VvA1 kNJr 2o7om1 XN p Uwt CWX fFT SW DjsI wuxO JxLU1S xA 5 ObG 3IO UdL qJ cCAr gzKM 08DvX2 mu i 13T t71 Iwq oF UI0E Ef5S V2vxcy SY I QGr qrB HID TJ v1OB 1CzD IDdW4E 4j J mv6 Ktx oBO s9 ADWB q218 BJJzRy UQ i 2Gp weE T8L aO 4ho} \begin{split} &\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_0} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(\tda_{ki}q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} v_i N_k = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_0} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(\tda_{3i}q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} v_i = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_0} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(\tda_{33}q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} v_3 = 0 , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n364} \end{align} where we used \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n314} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n309}$_3$ in the second step, and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n355} in the third. For the first term in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n363}, we have \begin{align}\thelt{ Ymq rX 1YKV Kvgm MK7gI4 6h 5 kZB OoJ tfC 5g VvA1 kNJr 2o7om1 XN p Uwt CWX fFT SW DjsI wuxO JxLU1S xA 5 ObG 3IO UdL qJ cCAr gzKM 08DvX2 mu i 13T t71 Iwq oF UI0E Ef5S V2vxcy SY I QGr qrB HID TJ v1OB 1CzD IDdW4E 4j J mv6 Ktx oBO s9 ADWB q218 BJJzRy UQ i 2Gp weE T8L aO 4ho9 5g4v WQmoiq jS w MA9 Cvn Gqx l1 LrYu MjGb oUpuvY Q2 C dBl AB9 7ew jc 5RJE SFGs ORedoM 0b B k25 VEK B8V A9 ytAE Oyof G8QIj2 7a I 3jy Rmz yET Kx pgUq 4Bvb cD1b1g KB y oE3 azg elV N} \begin{split} I_{51} &= \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \tda_{ki}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}q\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}v_i +\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl(\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}(\tda_{ki}q) - \tda_{ki}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}q \Bigr)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}v_i \\& = - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}( \tda_{ki} v_i ) - \tda_{ki} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}v_i \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}q +\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl(\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}(\tda_{ki}q) - \tda_{ki}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}q \Bigr)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}v_i \\& = I_{511} + I_{512} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n365} \end{align} where we used \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n354}$_2$ and the Piola identity \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n315} in the second equality. For the first term, we use the Kato-Ponce commutator inequality to write \begin{align}\thelt{r qrB HID TJ v1OB 1CzD IDdW4E 4j J mv6 Ktx oBO s9 ADWB q218 BJJzRy UQ i 2Gp weE T8L aO 4ho9 5g4v WQmoiq jS w MA9 Cvn Gqx l1 LrYu MjGb oUpuvY Q2 C dBl AB9 7ew jc 5RJE SFGs ORedoM 0b B k25 VEK B8V A9 ytAE Oyof G8QIj2 7a I 3jy Rmz yET Kx pgUq 4Bvb cD1b1g KB y oE3 azg elV Nu 8iZ1 w1tq twKx8C LN 2 8yn jdo jUW vN H9qy HaXZ GhjUgm uL I 87i Y7Q 9MQ Wa iFFS Gzt8 4mSQq2 5O N ltT gbl 8YD QS AzXq pJEK 7bGL1U Jn 0 f59 vPr wdt d6 sDLj Loo1 8tQXf5 5u p mTa dJD } \begin{split} I_{511} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert b\Vert_{H^{3.5+\delta}} \Vert v \Vert_{L^{\infty}} \Vert q\Vert_{H^{1.5+\delta}} + \Vert b\Vert_{W^{1,\infty}} \Vert v \Vert_{H^{2.5+\delta}} \Vert q\Vert_{H^{1.5+\delta}} \leq P(\Vert v \Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert q\Vert_{H^{1.5+\delta}}) . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n366} \end{align} The term $I_{512}$ cannot be treated with the Kato-Ponce inequality directly since $v$ is not bounded in $H^{3.5+\delta}$. Instead, we use $\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^2= I -\sum_{m=1}^{2}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}^2$, by \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n350}, and write \begin{align}\thelt{ B k25 VEK B8V A9 ytAE Oyof G8QIj2 7a I 3jy Rmz yET Kx pgUq 4Bvb cD1b1g KB y oE3 azg elV Nu 8iZ1 w1tq twKx8C LN 2 8yn jdo jUW vN H9qy HaXZ GhjUgm uL I 87i Y7Q 9MQ Wa iFFS Gzt8 4mSQq2 5O N ltT gbl 8YD QS AzXq pJEK 7bGL1U Jn 0 f59 vPr wdt d6 sDLj Loo1 8tQXf5 5u p mTa dJD sEL pH 2vqY uTAm YzDg95 1P K FP6 pEi zIJ Qd 8Ngn HTND 6z6ExR XV 0 ouU jWT kAK AB eAC9 Rfja c43Ajk Xn H dgS y3v 5cB et s3VX qfpP BqiGf9 0a w g4d W9U kvR iJ y46G bH3U cJ86hW Va C Mje} \begin{split} I_{512} &= - \sum_{m=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}\Bigl(\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}(\tda_{ki}q) - \tda_{ki}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}q \Bigr)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}v_i \\&\indeq + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl(\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}(\tda_{ki}q) - \tda_{ki}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}q \Bigr)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}v_i \\& = - \sum_{m=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}(\tda_{ki}q) - \tda_{ki}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}q \Bigr)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}v_i \\&\indeq + \sum_{m=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}\tda_{ki}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}q \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}v_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl(\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}(\tda_{ki}q) - \tda_{ki}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}q \Bigr)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}v_i . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n367} \end{align} The first term is bounded using the Kato-Ponce commutator estimate, while the second and the third terms are estimated directly. Thus, \begin{align}\thelt{q2 5O N ltT gbl 8YD QS AzXq pJEK 7bGL1U Jn 0 f59 vPr wdt d6 sDLj Loo1 8tQXf5 5u p mTa dJD sEL pH 2vqY uTAm YzDg95 1P K FP6 pEi zIJ Qd 8Ngn HTND 6z6ExR XV 0 ouU jWT kAK AB eAC9 Rfja c43Ajk Xn H dgS y3v 5cB et s3VX qfpP BqiGf9 0a w g4d W9U kvR iJ y46G bH3U cJ86hW Va C Mje dsU cqD SZ 1DlP 2mfB hzu5dv u1 i 6eW 2YN LhM 3f WOdz KS6Q ov14wx YY d 8sa S38 hIl cP tS4l 9B7h FC3JXJ Gp s tll 7a7 WNr VM wunm nmDc 5duVpZ xT C l8F I01 jhn 5B l4Jz aEV7 CKMThL ji } \begin{split} I_{512} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert b\Vert_{H^{3+\delta}} \Vert q \Vert_{H^{1}} \Vert v\Vert_{H^{2.5+\delta}} + \Vert b\Vert_{H^{2.5+\delta}} \Vert q \Vert_{H^{1.5+\delta}} \Vert v\Vert_{H^{2.5+\delta}} \leq P(\Vert v \Vert_{H^{2.5+\delta}}, \Vert q \Vert_{H^{1.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}) . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n368} \end{align} The boundary term $I_{52}$ may be rewritten as \begin{align}\thelt{ c43Ajk Xn H dgS y3v 5cB et s3VX qfpP BqiGf9 0a w g4d W9U kvR iJ y46G bH3U cJ86hW Va C Mje dsU cqD SZ 1DlP 2mfB hzu5dv u1 i 6eW 2YN LhM 3f WOdz KS6Q ov14wx YY d 8sa S38 hIl cP tS4l 9B7h FC3JXJ Gp s tll 7a7 WNr VM wunm nmDc 5duVpZ xT C l8F I01 jhn 5B l4Jz aEV7 CKMThL ji 1 gyZ uXc Iv4 03 3NqZ LITG Ux3ClP CB K O3v RUi mJq l5 blI9 GrWy irWHof lH 7 3ZT eZX kop eq 8XL1 RQ3a Uj6Ess nj 2 0MA 3As rSV ft 3F9w zB1q DQVOnH Cm m P3d WSb jst oj 3oGj advz qcMB6} \begin{split} I_{52} &= - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} q \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI( \tda_{3i}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} v_i) - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(\tda_{3i}q) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}q \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} v_i , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n369} \end{align} which may be rewritten as \begin{align}\thelt{ 9B7h FC3JXJ Gp s tll 7a7 WNr VM wunm nmDc 5duVpZ xT C l8F I01 jhn 5B l4Jz aEV7 CKMThL ji 1 gyZ uXc Iv4 03 3NqZ LITG Ux3ClP CB K O3v RUi mJq l5 blI9 GrWy irWHof lH 7 3ZT eZX kop eq 8XL1 RQ3a Uj6Ess nj 2 0MA 3As rSV ft 3F9w zB1q DQVOnH Cm m P3d WSb jst oj 3oGj advz qcMB6Y 6k D 9sZ 0bd Mjt UT hULG TWU9 Nmr3E4 CN b zUO vTh hqL 1p xAxT ezrH dVMgLY TT r Sfx LUX CMr WA bE69 K6XH i5re1f x4 G DKk iB7 f2D Xz Xez2 k2Yc Yc4QjU yM Y R1o DeY NWf 74 hByF dsWk } \begin{split} I_{52} &= - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} q \tda_{3i}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{3+\delta} v_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} q \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI(\tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}v_i) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{3+\delta}v_i \Bigr) - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(\tda_{3i}q) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}q \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} v_i \\& = - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} q \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{3+\delta}(\tda_{3i} v_i) + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} q \Bigl(\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{3+\delta}(\tda_{3i} v_i) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{3+\delta}v_i \Bigr) \\&\indeq - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} q \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI(\tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}v_i) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{3+\delta}v_i \Bigr) - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(\tda_{3i}q) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}q \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} v_i \\& = I_{521} + I_{522} + I_{523} + I_{524} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n370} \end{align} The first term is the leading one and, using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n321}, it may be rewritten as \begin{align}\thelt{ 8XL1 RQ3a Uj6Ess nj 2 0MA 3As rSV ft 3F9w zB1q DQVOnH Cm m P3d WSb jst oj 3oGj advz qcMB6Y 6k D 9sZ 0bd Mjt UT hULG TWU9 Nmr3E4 CN b zUO vTh hqL 1p xAxT ezrH dVMgLY TT r Sfx LUX CMr WA bE69 K6XH i5re1f x4 G DKk iB7 f2D Xz Xez2 k2Yc Yc4QjU yM Y R1o DeY NWf 74 hByF dsWk 4cUbCR DX a q4e DWd 7qb Ot 7GOu oklg jJ00J9 Il O Jxn tzF VBC Ft pABp VLEE 2y5Qcg b3 5 DU4 igj 4dz zW soNF wvqj bNFma0 am F Kiv Aap pzM zr VqYf OulM HafaBk 6J r eOQ BaT EsJ BB tHXj } \begin{split} I_{521} &= - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} q \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{3+\delta} w_{t} = \colrr - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} q\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2(2+\delta)}w_{t} \colb , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n371} \end{align} which cancels with the second term on the right-hand side of \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n353} upon adding \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n356}, integrated in time, to~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n353}. The next three terms are commutators. For the first one, we have \begin{align}\thelt{Mr WA bE69 K6XH i5re1f x4 G DKk iB7 f2D Xz Xez2 k2Yc Yc4QjU yM Y R1o DeY NWf 74 hByF dsWk 4cUbCR DX a q4e DWd 7qb Ot 7GOu oklg jJ00J9 Il O Jxn tzF VBC Ft pABp VLEE 2y5Qcg b3 5 DU4 igj 4dz zW soNF wvqj bNFma0 am F Kiv Aap pzM zr VqYf OulM HafaBk 6J r eOQ BaT EsJ BB tHXj n2EU CNleWp cv W JIg gWX Ksn B3 wvmo WK49 Nl492o gR 6 fvc 8ff jJm sW Jr0j zI9p CBsIUV of D kKH Ub7 vxp uQ UXA6 hMUr yvxEpc Tq l Tkz z0q HbX pO 8jFu h6nw zVPPzp A8 9 61V 78c O2W aw } \begin{split} I_{522} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}q\Vert_{L^2(\Gamma_1)} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{3+\delta}\tda\Vert_{L^{2}(\Gamma_1)} \Vert v\Vert_{L^{\infty}(\Gamma_1)} + \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}q\Vert_{L^2(\Gamma_1)} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI \tda\Vert_{L^{\infty}(\Gamma_1)} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}v\Vert_{L^{2}(\Gamma_1)} \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert q\Vert_{H^{1+\delta}(\Gamma_1)} \Vert \tda\Vert_{H^{3+\delta}(\Gamma_1)} \Vert v\Vert_{H^{2}(\Gamma_1)} + \Vert q\Vert_{H^{1+\delta}(\Gamma_1)} \Vert \tda\Vert_{H^{2+\delta}(\Gamma_1)} \Vert v\Vert_{H^{2+\delta}(\Gamma_1)} \\& \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert q\Vert_{H^{1.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)} ) , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n372} \end{align} using the trace inequalities. The second commutator term in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n370} is estimated similarly as \begin{align}\thelt{igj 4dz zW soNF wvqj bNFma0 am F Kiv Aap pzM zr VqYf OulM HafaBk 6J r eOQ BaT EsJ BB tHXj n2EU CNleWp cv W JIg gWX Ksn B3 wvmo WK49 Nl492o gR 6 fvc 8ff jJm sW Jr0j zI9p CBsIUV of D kKH Ub7 vxp uQ UXA6 hMUr yvxEpc Tq l Tkz z0q HbX pO 8jFu h6nw zVPPzp A8 9 61V 78c O2W aw 0yGn CHVq BVjTUH lk p 6dG HOd voE E8 cw7Q DL1o 1qg5TX qo V 720 hhQ TyF tp TJDg 9E8D nsp1Qi X9 8 ZVQ N3s duZ qc n9IX ozWh Fd16IB 0K 9 JeB Hvi 364 kQ lFMM JOn0 OUBrnv pY y jUB Ofs Pz} \begin{split} I_{523} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}q\Vert_{L^2(\Gamma_1)} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI \tda\Vert_{L^{\infty}(\Gamma_1)} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} v\Vert_{L^{2}(\Gamma_1)} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert q\Vert_{H^{1+\delta}(\Gamma_1)} \Vert \tda\Vert_{H^{2+\delta}(\Gamma_1)} \Vert v\Vert_{H^{2+\delta}(\Gamma_1)} \\& \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert q\Vert_{H^{1.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)} ) , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n373} \end{align} while for the last commutator term $I_{524}$, we have \begin{align}\thelt{ kKH Ub7 vxp uQ UXA6 hMUr yvxEpc Tq l Tkz z0q HbX pO 8jFu h6nw zVPPzp A8 9 61V 78c O2W aw 0yGn CHVq BVjTUH lk p 6dG HOd voE E8 cw7Q DL1o 1qg5TX qo V 720 hhQ TyF tp TJDg 9E8D nsp1Qi X9 8 ZVQ N3s duZ qc n9IX ozWh Fd16IB 0K 9 JeB Hvi 364 kQ lFMM JOn0 OUBrnv pY y jUB Ofs Pzx l4 zcMn JHdq OjSi6N Mn 8 bR6 kPe klT Fd VlwD SrhT 8Qr0sC hN h 88j 8ZA vvW VD 03wt ETKK NUdr7W EK 1 jKS IHF Kh2 sr 1RRV Ra8J mBtkWI 1u k uZT F2B 4p8 E7 Y3p0 DX20 JM3XzQ tZ 3 bMC v} \begin{split} I_{524} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}\tda\Vert_{L^2(\Gamma_1)} \Vert q\Vert_{L^{\infty}(\Gamma_1)} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} v\Vert_{L^{2}(\Gamma_1)} + \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI \tda\Vert_{L^\infty(\Gamma_1)} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}q\Vert_{L^{2}(\Gamma_1)} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} v\Vert_{L^{2}(\Gamma_1)} \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \tda\Vert_{H^{2+\delta}(\Gamma_1)} \Vert q\Vert_{H^{1+\delta}(\Gamma_1)} \Vert v\Vert_{H^{2+\delta}(\Gamma_1)} + \Vert \tda\Vert_{H^{2+\delta}(\Gamma_1)} \Vert q\Vert_{H^{1+\delta}(\Gamma_1)} \Vert v\Vert_{H^{2+\delta}(\Gamma_1)} \\& \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert q\Vert_{H^{1.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)} ) . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n374} \end{align} Now, we add \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n353} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n356}, integrated in time, with all the estimates above on the terms $I_1$, $I_2$, $I_3$, $I_4$, $I_5$, and $\bar I$ obtaining \begin{align}\thelt{ X9 8 ZVQ N3s duZ qc n9IX ozWh Fd16IB 0K 9 JeB Hvi 364 kQ lFMM JOn0 OUBrnv pY y jUB Ofs Pzx l4 zcMn JHdq OjSi6N Mn 8 bR6 kPe klT Fd VlwD SrhT 8Qr0sC hN h 88j 8ZA vvW VD 03wt ETKK NUdr7W EK 1 jKS IHF Kh2 sr 1RRV Ra8J mBtkWI 1u k uZT F2B 4p8 E7 Y3p0 DX20 JM3XzQ tZ 3 bMC vM4 DEA wB Fp8q YKpL So1a5s dR P fTg 5R6 7v1 T4 eCJ1 qg14 CTK7u7 ag j Q0A tZ1 Nh6 hk Sys5 CWon IOqgCL 3u 7 feR BHz odS Jp 7JH8 u6Rw sYE0mc P4 r LaW Atl yRw kH F3ei UyhI iA19ZB u8 m } \begin{split} & \Vert \Delta_2\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} w\Vert_{L^2(\Gamma_1)}^2 + \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} w_{t}\Vert_{L^2(\Gamma_1)}^2 + \nu \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \Vert \nabla_2 \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} w_{t} \Vert_{L^2(\Gamma_1)}^2 \, ds \\&\indeq \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} w_{t}(0)\Vert_{L^2(\Gamma_1)}^2 - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} v \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} v + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} v_0 \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} v_0 \\&\indeq\indeq +\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert q\Vert_{H^{1.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} )\,ds . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n375} \end{align} Next, we estimate the second term on the right-hand side as \begin{align}\thelt{Udr7W EK 1 jKS IHF Kh2 sr 1RRV Ra8J mBtkWI 1u k uZT F2B 4p8 E7 Y3p0 DX20 JM3XzQ tZ 3 bMC vM4 DEA wB Fp8q YKpL So1a5s dR P fTg 5R6 7v1 T4 eCJ1 qg14 CTK7u7 ag j Q0A tZ1 Nh6 hk Sys5 CWon IOqgCL 3u 7 feR BHz odS Jp 7JH8 u6Rw sYE0mc P4 r LaW Atl yRw kH F3ei UyhI iA19ZB u8 m ywf 42n uyX 0e ljCt 3Lkd 1eUQEZ oO Z rA2 Oqf oQ5 Ca hrBy KzFg DOseim 0j Y BmX csL Ayc cC JBTZ PEjy zPb5hZ KW O xT6 dyt u82 Ia htpD m75Y DktQvd Nj W jIQ H1B Ace SZ KVVP 136v L8XhMm } \begin{split} - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta} v_i & \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert J\Vert_{L^\infty} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}v\Vert_{L^2} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta}v\Vert_{L^2} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}v\Vert_{L^2} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta}v\Vert_{L^2} \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert v\Vert_{L^2}^{1/(2.5+\delta)} \Vert v\Vert_{H^{2.5+\delta}}^{(4+2\delta)/(2.5+\delta)} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n376} \end{align} Using the equality \begin{equation} \Delta_2\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} w = \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} w(0) -\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{4+\delta} w(t) + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}w_{t} \,ds \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n377} \end{equation} on the first term on the left-hand side of \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n353}, we conclude the proof of~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3326}. \end{proof} \par \subsection{Pressure estimates} \label{sec04} In this section, we prove the following pressure estimate. \par \cole \begin{Lemma} \label{L03} Under the conditions of Theorem~\ref{T01}, we have \begin{align}\thelt{Won IOqgCL 3u 7 feR BHz odS Jp 7JH8 u6Rw sYE0mc P4 r LaW Atl yRw kH F3ei UyhI iA19ZB u8 m ywf 42n uyX 0e ljCt 3Lkd 1eUQEZ oO Z rA2 Oqf oQ5 Ca hrBy KzFg DOseim 0j Y BmX csL Ayc cC JBTZ PEjy zPb5hZ KW O xT6 dyt u82 Ia htpD m75Y DktQvd Nj W jIQ H1B Ace SZ KVVP 136v L8XhMm 1O H Kn2 gUy kFU wN 8JML Bqmn vGuwGR oW U oNZ Y2P nmS 5g QMcR YHxL yHuDo8 ba w aqM NYt onW u2 YIOz eB6R wHuGcn fi o 47U PM5 tOj sz QBNq 7mco fCNjou 83 e mcY 81s vsI 2Y DS3S yloB Nx} \begin{split} \Vert q\Vert_{H^{1.5+\delta}} \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t} \Vert_{H^{2+\delta}(\Gamma_1)} ) . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n378} \end{align} \end{Lemma} \colb \par Applying $\tda_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}$ to the Euler equations \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n317}$_{1}$ and using the Piola identity \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n315}, we get \begin{align}\thelt{BTZ PEjy zPb5hZ KW O xT6 dyt u82 Ia htpD m75Y DktQvd Nj W jIQ H1B Ace SZ KVVP 136v L8XhMm 1O H Kn2 gUy kFU wN 8JML Bqmn vGuwGR oW U oNZ Y2P nmS 5g QMcR YHxL yHuDo8 ba w aqM NYt onW u2 YIOz eB6R wHuGcn fi o 47U PM5 tOj sz QBNq 7mco fCNjou 83 e mcY 81s vsI 2Y DS3S yloB Nx5FBV Bc 9 6HZ EOX UO3 W1 fIF5 jtEM W6KW7D 63 t H0F CVT Zup Pl A9aI oN2s f1Bw31 gg L FoD O0M x18 oo heEd KgZB Cqdqpa sa H Fhx BrE aRg Au I5dq mWWB MuHfv9 0y S PtG hFF dYJ JL f3Ap k5} \begin{split} &\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tda_{ji} a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q) = - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tda_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}v_i) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \biggl( \sum_{m=1}^{2} \tda_{ji} v_m a_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} v_i \biggr) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(a_{ji} (v_3-\psi_t)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i) , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n379} \end{align} where we used $b_{ji} / \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi=a_{ji}$ in the last term. Recall that we use the summation convention over repeated indices unless indicated otherwise (as, for example, in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n379}). By $\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tda_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}v_i)=-\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{ji} v_i)$, which follows from \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n354}$_2$, we get \begin{align}\thelt{ u2 YIOz eB6R wHuGcn fi o 47U PM5 tOj sz QBNq 7mco fCNjou 83 e mcY 81s vsI 2Y DS3S yloB Nx5FBV Bc 9 6HZ EOX UO3 W1 fIF5 jtEM W6KW7D 63 t H0F CVT Zup Pl A9aI oN2s f1Bw31 gg L FoD O0M x18 oo heEd KgZB Cqdqpa sa H Fhx BrE aRg Au I5dq mWWB MuHfv9 0y S PtG hFF dYJ JL f3Ap k5Ck Szr0Kb Vd i sQk uSA JEn DT YkjP AEMu a0VCtC Ff z 9R6 Vht 8Ua cB e7op AnGa 7AbLWj Hc s nAR GMb n7a 9n paMf lftM 7jvb20 0T W xUC 4lt e92 9j oZrA IuIa o1Zqdr oC L 55L T4Q 8kN yv sI} \begin{split} &\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tda_{ji} a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q) = \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{ji} v_i) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \biggl( \sum_{m=1}^{2} \tda_{ji} v_m a_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} v_i \biggr) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(a_{ji} (v_3-\psi_t)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i) =\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} f_j \inon{in $\Omega$} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n380} \end{align} To obtain the boundary condition for the pressure on $\Gamma_0\cup\Gamma_1$, we test \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n317} with $\tda_{3i}$ obtaining \begin{align}\thelt{M x18 oo heEd KgZB Cqdqpa sa H Fhx BrE aRg Au I5dq mWWB MuHfv9 0y S PtG hFF dYJ JL f3Ap k5Ck Szr0Kb Vd i sQk uSA JEn DT YkjP AEMu a0VCtC Ff z 9R6 Vht 8Ua cB e7op AnGa 7AbLWj Hc s nAR GMb n7a 9n paMf lftM 7jvb20 0T W xUC 4lt e92 9j oZrA IuIa o1Zqdr oC L 55L T4Q 8kN yv sIzP x4i5 9lKTq2 JB B sZb QCE Ctw ar VBMT H1QR 6v5srW hR r D4r wf8 ik7 KH Egee rFVT ErONml Q5 L R8v XNZ LB3 9U DzRH ZbH9 fTBhRw kA 2 n3p g4I grH xd fEFu z6RE tDqPdw N7 H TVt cE1 8hW } \begin{split} & \tda_{3i}a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q = - \tda_{3i}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}v_i - \tda_{3i} v_1 a_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tda_{3i} v_2 a_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - a_{3i} (v_3-\psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i \inon{on $\Gamma_0\cup\Gamma_1$} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n381} \end{align} where we again employed $b_{ji} / \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi=a_{ji}$ in the last term. On $\Gamma_1$, we use \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n321} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n322} and rewrite the first term on the right-hand side of \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n381} by using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n321} as \begin{align}\thelt{AR GMb n7a 9n paMf lftM 7jvb20 0T W xUC 4lt e92 9j oZrA IuIa o1Zqdr oC L 55L T4Q 8kN yv sIzP x4i5 9lKTq2 JB B sZb QCE Ctw ar VBMT H1QR 6v5srW hR r D4r wf8 ik7 KH Egee rFVT ErONml Q5 L R8v XNZ LB3 9U DzRH ZbH9 fTBhRw kA 2 n3p g4I grH xd fEFu z6RE tDqPdw N7 H TVt cE1 8hW 6y n4Gn nCE3 MEQ51i Ps G Z2G Lbt CSt hu zvPF eE28 MM23ug TC d j7z 7Av TLa 1A GLiJ 5JwW CiDPyM qa 8 tAK QZ9 cfP 42 kuUz V3h6 GsGFoW m9 h cfj 51d GtW yZ zC5D aVt2 Wi5IIs gD B 0cX LM1} \begin{split} -\tda_{3i}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} v_i = - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}(\tda_{3i} v_i) + \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{3i} v_i = - w_{tt} + \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{3i} v_i = \Delta_2^2 w - \nu \Delta_2 w_{t} - q + \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{3i} v_i . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n382} \end{align} Thus, on $\Gamma_1$, the boundary condition \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n381} becomes a Robin boundary condition \begin{align}\thelt{5 L R8v XNZ LB3 9U DzRH ZbH9 fTBhRw kA 2 n3p g4I grH xd fEFu z6RE tDqPdw N7 H TVt cE1 8hW 6y n4Gn nCE3 MEQ51i Ps G Z2G Lbt CSt hu zvPF eE28 MM23ug TC d j7z 7Av TLa 1A GLiJ 5JwW CiDPyM qa 8 tAK QZ9 cfP 42 kuUz V3h6 GsGFoW m9 h cfj 51d GtW yZ zC5D aVt2 Wi5IIs gD B 0cX LM1 FtE xE RIZI Z0Rt QUtWcU Cm F mSj xvW pZc gl dopk 0D7a EouRku Id O ZdW FOR uqb PY 6HkW OVi7 FuVMLW nx p SaN omk rC5 uI ZK9C jpJy UIeO6k gb 7 tr2 SCY x5F 11 S6Xq OImr s7vv0u vA g rb} \begin{split} \tda_{3i}a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q + q =\Delta_2^2 w - \nu \Delta_2 w_{t} + \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{3i}v_i - \tda_{3i} v_1 a_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tda_{3i} v_2 a_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - a_{3i} (v_3-\psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i = g_1 \inon{on $\Gamma_1$} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n383} \end{align} On $\Gamma_0$, we have $a=I$, and then the first term on the right hand side of \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n381} vanishes, and we get \begin{align}\thelt{PyM qa 8 tAK QZ9 cfP 42 kuUz V3h6 GsGFoW m9 h cfj 51d GtW yZ zC5D aVt2 Wi5IIs gD B 0cX LM1 FtE xE RIZI Z0Rt QUtWcU Cm F mSj xvW pZc gl dopk 0D7a EouRku Id O ZdW FOR uqb PY 6HkW OVi7 FuVMLW nx p SaN omk rC5 uI ZK9C jpJy UIeO6k gb 7 tr2 SCY x5F 11 S6Xq OImr s7vv0u vA g rb9 hGP Fnk RM j92H gczJ 660kHb BB l QSI OY7 FcX 0c uyDl LjbU 3F6vZk Gb a KaM ufj uxp n4 Mi45 7MoL NW3eIm cj 6 OOS e59 afA hg lt9S BOiF cYQipj 5u N 19N KZ5 Czc 23 1wxG x1ut gJB4ue Mx} \begin{split} \tda_{3i}a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q = - \tda_{3i} v_1 a_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tda_{3i} v_2 a_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - a_{3i} (v_3-\psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i = g_0 \inon{on $\Gamma_0$} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n384} \end{align} The boundary value problem for the pressure can be simplified, as we show in Remark~\ref{R01} below. The form of the equations above suffices for the purpose of obtaining the a~priori control, but it is adequate for the construction. \par To estimate the pressure, we need the following statement on the elliptic regularity for the Robin/Neumann problem. \par \cole \begin{Lemma} \label{L08} Assume that $d\in W^{1,\infty}(\Omega)$. Let $1\leq l\leq 2$, and suppose that $\qqq$ is an $H^{l}$ solution of \begin{align}\thelt{7 FuVMLW nx p SaN omk rC5 uI ZK9C jpJy UIeO6k gb 7 tr2 SCY x5F 11 S6Xq OImr s7vv0u vA g rb9 hGP Fnk RM j92H gczJ 660kHb BB l QSI OY7 FcX 0c uyDl LjbU 3F6vZk Gb a KaM ufj uxp n4 Mi45 7MoL NW3eIm cj 6 OOS e59 afA hg lt9S BOiF cYQipj 5u N 19N KZ5 Czc 23 1wxG x1ut gJB4ue Mx x 5lr s8g VbZ s1 NEfI 02Rb pkfEOZ E4 e seo 9te NRU Ai nujf eJYa Ehns0Y 6X R UF1 PCf 5eE AL 9DL6 a2vm BAU5Au DD t yQN 5YL LWw PW GjMt 4hu4 FIoLCZ Lx e BVY 5lZ DCD 5Y yBwO IJeH VQsK} \begin{split} &\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{i}(d_{ij}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\qqq) = \div f \inon{in $\Omega$} , \\& d_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\qqq N_{m} + u=g_1 \inon{on $\Gamma_1$} , \\& d_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\qqq N_{m}=g_0 \inon{on $\Gamma_0$} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n385} \end{align} If \begin{equation} \Vert d-I\Vert_{L^\infty} \leq \epsilon_0 , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n386} \end{equation} where $\epsilon_0>0$ is sufficiently small, then \begin{equation} \Vert u\Vert_{H^{l}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert f\Vert_{H^{l-1}} + \Vert g_1\Vert_{H^{l-3/2}(\Gamma_1)} + \Vert g_0\Vert_{H^{l-3/2}(\Gamma_0)} . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n387} \end{equation} \end{Lemma} \colb \par \begin{proof}[Proof of Lemma~\ref{L08}] By interpolation, it is sufficient to establish the inequality \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n387} for $l=1$ and $l=2$. First let $l=1$. Testing \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n385}$_1$ with $-u$ and integrating by parts, we obtain \begin{align}\thelt{5 7MoL NW3eIm cj 6 OOS e59 afA hg lt9S BOiF cYQipj 5u N 19N KZ5 Czc 23 1wxG x1ut gJB4ue Mx x 5lr s8g VbZ s1 NEfI 02Rb pkfEOZ E4 e seo 9te NRU Ai nujf eJYa Ehns0Y 6X R UF1 PCf 5eE AL 9DL6 a2vm BAU5Au DD t yQN 5YL LWw PW GjMt 4hu4 FIoLCZ Lx e BVY 5lZ DCD 5Y yBwO IJeH VQsKob Yd q fCX 1to mCb Ej 5m1p Nx9p nLn5A3 g7 U v77 7YU gBR lN rTyj shaq BZXeAF tj y FlW jfc 57t 2f abx5 Ns4d clCMJc Tl q kfq uFD iSd DP eX6m YLQz JzUmH0 43 M lgF edN mXQ Pj Aoba 07MY} \begin{split} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH d_{ij}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{i}u \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} u + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} u^2 = - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH u\div f + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_0} g_0 u + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} g_1 u . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n388} \end{align} Applying the $H^{-1/2}$-$H^{1/2}$ duality on the boundary, we obtain the result for $l=1$. The inequality \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n387} for $l=2$ is classical for $d_{mk}=\delta_{mk}$, and then we simply use the perturbation argument and~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n386}. \end{proof} \par \begin{proof}[Proof of Lemma~\ref{L03}] We apply the elliptic estimate \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n387} in $H^{1.5+\delta}$ for the equation \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n380} with the boundary conditions \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n383}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n384}, leading to \begin{align}\thelt{L 9DL6 a2vm BAU5Au DD t yQN 5YL LWw PW GjMt 4hu4 FIoLCZ Lx e BVY 5lZ DCD 5Y yBwO IJeH VQsKob Yd q fCX 1to mCb Ej 5m1p Nx9p nLn5A3 g7 U v77 7YU gBR lN rTyj shaq BZXeAF tj y FlW jfc 57t 2f abx5 Ns4d clCMJc Tl q kfq uFD iSd DP eX6m YLQz JzUmH0 43 M lgF edN mXQ Pj Aoba 07MY wBaC4C nj I 4dw KCZ PO9 wx 3en8 AoqX 7JjN8K lq j Q5c bMS dhR Fs tQ8Q r2ve 2HT0uO 5W j TAi iIW n1C Wr U1BH BMvJ 3ywmAd qN D LY8 lbx XMx 0D Dvco 3RL9 Qz5eqy wV Y qEN nO8 MH0 PY zeVN} \begin{split} \Vert q\Vert_{H^{1.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert f\Vert_{H^{0.5+\delta}} + \Vert g_0\Vert_{H^{\delta}(\Gamma_0)} + \Vert g_1\Vert_{H^{\delta}(\Gamma_1)} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n389} \end{align} For the interior term, we have \begin{align}\thelt{57t 2f abx5 Ns4d clCMJc Tl q kfq uFD iSd DP eX6m YLQz JzUmH0 43 M lgF edN mXQ Pj Aoba 07MY wBaC4C nj I 4dw KCZ PO9 wx 3en8 AoqX 7JjN8K lq j Q5c bMS dhR Fs tQ8Q r2ve 2HT0uO 5W j TAi iIW n1C Wr U1BH BMvJ 3ywmAd qN D LY8 lbx XMx 0D Dvco 3RL9 Qz5eqy wV Y qEN nO8 MH0 PY zeVN i3yb 2msNYY Wz G 2DC PoG 1Vb Bx e9oZ GcTU 3AZuEK bk p 6rN eTX 0DS Mc zd91 nbSV DKEkVa zI q NKU Qap NBP 5B 32Ey prwP FLvuPi wR P l1G TdQ BZE Aw 3d90 v8P5 CPAnX4 Yo 2 q7s yr5 BW8 Hc} \begin{split} \Vert f\Vert_{H^{0.5+\delta}} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \sum_{j=1}^{3}\Vert \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{ji} v_i\Vert_{H^{0.5+\delta}} + \sum_{j=1}^3\sum_{m=1}^{2} \Vert \tda_{ji} v_m a_{km}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}v_i\Vert_{H^{0.5+\delta}} + \sum_{j=1}^{3}\Vert a_{ji} (v_3-\psi_t)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i\Vert_{H^{0.5+\delta}} \\& \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert a\Vert_{H^{3.5+\delta}}, \Vert \tda\Vert_{H^{3.5+\delta}}, \Vert \tda_t\Vert_{H^{1.5+\delta}}, \Vert \psi_t\Vert_{H^{2.5+\delta}}, ) . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n390} \end{align} On the other hand, for the boundary terms, we have \begin{align}\thelt{ iIW n1C Wr U1BH BMvJ 3ywmAd qN D LY8 lbx XMx 0D Dvco 3RL9 Qz5eqy wV Y qEN nO8 MH0 PY zeVN i3yb 2msNYY Wz G 2DC PoG 1Vb Bx e9oZ GcTU 3AZuEK bk p 6rN eTX 0DS Mc zd91 nbSV DKEkVa zI q NKU Qap NBP 5B 32Ey prwP FLvuPi wR P l1G TdQ BZE Aw 3d90 v8P5 CPAnX4 Yo 2 q7s yr5 BW8 Hc T7tM ioha BW9U4q rb u mEQ 6Xz MKR 2B REFX k3ZO MVMYSw 9S F 5ek q0m yNK Gn H0qi vlRA 18CbEz id O iuy ZZ6 kRo oJ kLQ0 Ewmz sKlld6 Kr K JmR xls 12K G2 bv8v LxfJ wrIcU6 Hx p q6p Fy7 O} \begin{split} &\Vert g_0\Vert_{H^{\delta}(\Gamma_0)} + \Vert g_1\Vert_{H^{\delta}(\Gamma_1)} \\&\indeq \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert w\Vert_{H^{4+\delta}(\Gamma_1)} + \Vert w_{t} \Vert_{H^{2+\delta}(\Gamma_1)} + \Vert \tda_t\Vert_{H^{0.5+\delta}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega)} \Vert v\Vert_{H^{0.5+\delta}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega)} \\&\indeq\indeq + \Vert \tda\Vert_{H^{1+\delta}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega)} \Vert v\Vert_{H^{1+\delta}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega)} \Vert a\Vert_{H^{1+\delta}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega)} \Vert \nabla v\Vert_{H^{\delta}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega)} \\&\indeq\indeq + \Vert a\Vert_{H^{1+\delta}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega)} ( \Vert v\Vert_{H^{1+\delta}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega)} + \Vert \psi_t\Vert_{H^{1+\delta}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega)} ) \Vert \nabla v\Vert_{H^{\delta}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega)} \\&\indeq \leq P( \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t} \Vert_{H^{2+\delta}(\Gamma_1)}, \Vert \tda\Vert_{H^{3.5+\delta}}, \Vert \tda_t\Vert_{H^{1.5+\delta}}, \Vert \psi_t\Vert_{H^{2.5+\delta}}, \Vert v\Vert_{H^{2.5+\delta}} ) , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n391} \end{align} where $\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega=\Gamma_0\cup \Gamma_1$. By combining \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n389}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n391} and using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n348}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n349}, we obtain~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n378}. \end{proof} \par \begin{Remark} \label{R01} {\rm Note that in the boundary value problem for the pressure, the situation is very different from the pressure estimates for the classical Euler equations. It is crucial in the construction of solutions below that the equations \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n380}, \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n383}, and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n384} may be simplified so that the highest order terms in $v$ are by one degree more regular. \par First, the right hand side of the PDE for the pressure, \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n380}, may be rewritten as \begin{align}\thelt{q NKU Qap NBP 5B 32Ey prwP FLvuPi wR P l1G TdQ BZE Aw 3d90 v8P5 CPAnX4 Yo 2 q7s yr5 BW8 Hc T7tM ioha BW9U4q rb u mEQ 6Xz MKR 2B REFX k3ZO MVMYSw 9S F 5ek q0m yNK Gn H0qi vlRA 18CbEz id O iuy ZZ6 kRo oJ kLQ0 Ewmz sKlld6 Kr K JmR xls 12K G2 bv8v LxfJ wrIcU6 Hx p q6p Fy7 Oim mo dXYt Kt0V VH22OC Aj f deT BAP vPl oK QzLE OQlq dpzxJ6 JI z Ujn TqY sQ4 BD QPW6 784x NUfsk0 aM 7 8qz MuL 9Mr Ac uVVK Y55n M7WqnB 2R C pGZ vHh WUN g9 3F2e RT8U umC62V H3 Z dJX } \begin{split} & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{ji} v_i) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \left( \sum_{m=1}^{2} \tda_{ji} v_m a_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} v_i \right) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(J^{-1} (v_3-\psi_t)b_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i) \\&\indeq = \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{ji} v_i) - \sum_{m=1}^{2} \tda_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(v_m a_{km}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} v_i - b_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(J^{-1}(v_3-\psi_t))\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i \\&\indeq\indeq - \sum_{m=1}^{2} v_m a_{km} \tda_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{jk}v_i - J^{-1}(v_3-\psi_t)b_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3270} \end{align} which holds in $\Omega$. Therefore, using also the divergence-free condition, we obtain \begin{align}\thelt{z id O iuy ZZ6 kRo oJ kLQ0 Ewmz sKlld6 Kr K JmR xls 12K G2 bv8v LxfJ wrIcU6 Hx p q6p Fy7 Oim mo dXYt Kt0V VH22OC Aj f deT BAP vPl oK QzLE OQlq dpzxJ6 JI z Ujn TqY sQ4 BD QPW6 784x NUfsk0 aM 7 8qz MuL 9Mr Ac uVVK Y55n M7WqnB 2R C pGZ vHh WUN g9 3F2e RT8U umC62V H3 Z dJX LMS cca 1m xoOO 6oOL OVzfpO BO X 5Ev KuL z5s EW 8a9y otqk cKbDJN Us l pYM JpJ jOW Uy 2U4Y VKH6 kVC1Vx 1u v ykO yDs zo5 bz d36q WH1k J7Jtkg V1 J xqr Fnq mcU yZ JTp9 oFIc FAk0IT A9 3} \begin{split} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tda_{ji} a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q) & = \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{ji} v_i) - \sum_{m=1}^{2} \tda_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(v_m a_{km}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} v_i - b_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(J^{-1}(v_3-\psi_t))\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i \\&\indeq\indeq + \sum_{m=1}^{2} v_m a_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\tda_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i + J^{-1}(v_3-\psi_t)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}b_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i \inon{in $\Omega$} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3341} \end{align} Next, on $\Gamma_0$, we have $\psi=0$ from where $b_{3i}=\delta_{3i}$, and by \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n320}, the boundary condition \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n384} reduces to \begin{align}\thelt{NUfsk0 aM 7 8qz MuL 9Mr Ac uVVK Y55n M7WqnB 2R C pGZ vHh WUN g9 3F2e RT8U umC62V H3 Z dJX LMS cca 1m xoOO 6oOL OVzfpO BO X 5Ev KuL z5s EW 8a9y otqk cKbDJN Us l pYM JpJ jOW Uy 2U4Y VKH6 kVC1Vx 1u v ykO yDs zo5 bz d36q WH1k J7Jtkg V1 J xqr Fnq mcU yZ JTp9 oFIc FAk0IT A9 3 SrL axO 9oU Z3 jG6f BRL1 iZ7ZE6 zj 8 G3M Hu8 6Ay jt 3flY cmTk jiTSYv CF t JLq cJP tN7 E3 POqG OKe0 3K3WV0 ep W XDQ C97 YSb AD ZUNp 81GF fCPbj3 iq E t0E NXy pLv fo Iz6z oFoF 9lkIun} \begin{split} \tda_{3i}a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q = 0 \inon{on $\Gamma_0$} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3334} \end{align} Finally, we simplify the condition \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n381} on~$\Gamma_1$. First, we have \begin{align}\thelt{VKH6 kVC1Vx 1u v ykO yDs zo5 bz d36q WH1k J7Jtkg V1 J xqr Fnq mcU yZ JTp9 oFIc FAk0IT A9 3 SrL axO 9oU Z3 jG6f BRL1 iZ7ZE6 zj 8 G3M Hu8 6Ay jt 3flY cmTk jiTSYv CF t JLq cJP tN7 E3 POqG OKe0 3K3WV0 ep W XDQ C97 YSb AD ZUNp 81GF fCPbj3 iq E t0E NXy pLv fo Iz6z oFoF 9lkIun Xj Y yYL 52U bRB jx kQUS U9mm XtzIHO Cz 1 KH4 9ez 6Pz qW F223 C0Iz 3CsvuT R9 s VtQ CcM 1eo pD Py2l EEzL U0USJt Jb 9 zgy Gyf iQ4 fo Cx26 k4jL E0ula6 aS I rZQ HER 5HV CE BL55 WCtB 2} \begin{split} & a_{3i} v_3 \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i = b_{3i} v_3 \frac{1}{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\psi}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i = b_{3i}v_3 a_{33} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i = b_{3i}v_3 a_{j3}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3338} \end{align} and thus we have, on $\Gamma_1$, using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3338}, \begin{align}\thelt{POqG OKe0 3K3WV0 ep W XDQ C97 YSb AD ZUNp 81GF fCPbj3 iq E t0E NXy pLv fo Iz6z oFoF 9lkIun Xj Y yYL 52U bRB jx kQUS U9mm XtzIHO Cz 1 KH4 9ez 6Pz qW F223 C0Iz 3CsvuT R9 s VtQ CcM 1eo pD Py2l EEzL U0USJt Jb 9 zgy Gyf iQ4 fo Cx26 k4jL E0ula6 aS I rZQ HER 5HV CE BL55 WCtB 2LCmve TD z Vcp 7UR gI7 Qu FbFw 9VTx JwGrzs VW M 9sM JeJ Nd2 VG GFsi WuqC 3YxXoJ GK w Io7 1fg sGm 0P YFBz X8eX 7pf9GJ b1 o XUs 1q0 6KP Ls MucN ytQb L0Z0Qq m1 l SPj 9MT etk L6 KfsC 6} \begin{split} & - \tda_{3i} v_1 a_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tda_{3i} v_2 a_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - a_{3i} (v_3-\psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i \\&\indeq = - \tda_{3i} v_k a_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i + a_{3i} \psi_t \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i = \frac{1}{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\psi} ( - \tda_{3i} v_k b_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i + b_{3i} \psi_t \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i ) . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3337} \end{align} The negative of the first term inside the parenthesis equals \begin{align}\thelt{o pD Py2l EEzL U0USJt Jb 9 zgy Gyf iQ4 fo Cx26 k4jL E0ula6 aS I rZQ HER 5HV CE BL55 WCtB 2LCmve TD z Vcp 7UR gI7 Qu FbFw 9VTx JwGrzs VW M 9sM JeJ Nd2 VG GFsi WuqC 3YxXoJ GK w Io7 1fg sGm 0P YFBz X8eX 7pf9GJ b1 o XUs 1q0 6KP Ls MucN ytQb L0Z0Qq m1 l SPj 9MT etk L6 KfsC 6Zob Yhc2qu Xy 9 GPm ZYj 1Go ei feJ3 pRAf n6Ypy6 jN s 4Y5 nSE pqN 4m Rmam AGfY HhSaBr Ls D THC SEl UyR Mh 66XU 7hNz pZVC5V nV 7 VjL 7kv WKf 7P 5hj6 t1vu gkLGdN X8 b gOX HWm 6W4 YE m} \begin{split} b_{3i} v_k b_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i &= b_{3i} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(v_k b_{jk} v_i) = \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}( b_{3i} v_k b_{jk} v_i) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} b_{3i} v_k b_{jk} v_i \\& = \sum_{j=1}^{2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}( b_{3i} v_k b_{jk} v_i ) + \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}( b_{3i} v_i b_{3k} v_k ) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} b_{3i} v_k b_{jk} v_i \\& = \sum_{j=1}^{2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}( v_k b_{jk} w_t) + 2 v_k b_{3k} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}( v_i b_{3i} ) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} b_{3i} v_k b_{jk} v_i , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3342} \end{align} where we used \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n321} in the third equality, and thus \begin{align}\thelt{fg sGm 0P YFBz X8eX 7pf9GJ b1 o XUs 1q0 6KP Ls MucN ytQb L0Z0Qq m1 l SPj 9MT etk L6 KfsC 6Zob Yhc2qu Xy 9 GPm ZYj 1Go ei feJ3 pRAf n6Ypy6 jN s 4Y5 nSE pqN 4m Rmam AGfY HhSaBr Ls D THC SEl UyR Mh 66XU 7hNz pZVC5V nV 7 VjL 7kv WKf 7P 5hj6 t1vu gkLGdN X8 b gOX HWm 6W4 YE mxFG 4WaN EbGKsv 0p 4 OG0 Nrd uTe Za xNXq V4Bp mOdXIq 9a b PeD PbU Z4N Xt ohbY egCf xBNttE wc D YSD 637 jJ2 ms 6Ta1 J2xZ PtKnPw AX A tJA Rc8 n5d 93 TZi7 q6Wo nEDLwW Sz e Sue YFX 8cM} \begin{split} b_{3i} v_k b_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i &= \sum_{j=1}^{2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}( v_k b_{jk} w_t) + 2 w_t \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}( v_i b_{3i} ) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} b_{3i} v_k b_{jk} v_i \\& = \sum_{j=1}^{3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}( v_k b_{jk} w_t) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}( v_k b_{3k} w_t) + 2 w_t \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}( v_i b_{3i} ) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} b_{3i} v_k b_{jk} v_i \\& = \sum_{j=1}^{3} v_k b_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}w_t - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_k b_{3k} w_t - v_k \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}(b_{3k} w_t) + 2 w_t b_{3i} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i + 2 w_t \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}b_{3i} v_i - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} b_{3i} v_k b_{jk} v_i \\& = w_t b_{3i} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i + \sum_{j=1}^{3} v_k b_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}w_t - v_k \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}(b_{3k} w_t) + 2 w_t \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}b_{3i} v_i - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} b_{3i} v_k b_{jk} v_i \\& = w_t b_{3i} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i + \sum_{j=1}^{2} v_k b_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}w_t + w_t \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}b_{3i} v_i - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} b_{3i} v_k b_{jk} v_i , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3339} \end{align} where we used $2w_t b_{3i} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i-\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_k b_{3k}w_t=w_t b_{3i}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i$. We conclude that \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n381} may be written as \begin{align}\thelt{THC SEl UyR Mh 66XU 7hNz pZVC5V nV 7 VjL 7kv WKf 7P 5hj6 t1vu gkLGdN X8 b gOX HWm 6W4 YE mxFG 4WaN EbGKsv 0p 4 OG0 Nrd uTe Za xNXq V4Bp mOdXIq 9a b PeD PbU Z4N Xt ohbY egCf xBNttE wc D YSD 637 jJ2 ms 6Ta1 J2xZ PtKnPw AX A tJA Rc8 n5d 93 TZi7 q6Wo nEDLwW Sz e Sue YFX 8cM hm Y6is 15pX aOYBbV fS C haL kBR Ks6 UO qG4j DVab fbdtny fi D BFI 7uh B39 FJ 6mYr CUUT f2X38J 43 K yZg 87i gFR 5R z1t3 jH9x lOg1h7 P7 W w8w jMJ qH3 l5 J5wU 8eH0 OogRCv L7 f JJg 1u} \begin{split} & \tda_{3i}a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q = - b_{3i}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}v_i + \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{3i}v_i - \frac{1}{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\psi} \biggl( \sum_{j=1}^{2} v_k b_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}w_t + w_t \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}b_{3i} v_i - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} b_{3i} v_k b_{jk} v_i \biggr) \inon{on $\Gamma_1$} . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3356} \end{align} while \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n383} may be rewritten as \begin{align}\thelt{wc D YSD 637 jJ2 ms 6Ta1 J2xZ PtKnPw AX A tJA Rc8 n5d 93 TZi7 q6Wo nEDLwW Sz e Sue YFX 8cM hm Y6is 15pX aOYBbV fS C haL kBR Ks6 UO qG4j DVab fbdtny fi D BFI 7uh B39 FJ 6mYr CUUT f2X38J 43 K yZg 87i gFR 5R z1t3 jH9x lOg1h7 P7 W w8w jMJ qH3 l5 J5wU 8eH0 OogRCv L7 f JJg 1ug RfM XI GSuE Efbh 3hdNY3 x1 9 7jR qeP cdu sb fkuJ hEpw MvNBZV zL u qxJ 9b1 BTf Yk RJLj Oo1a EPIXvZ Aj v Xne fhK GsJ Ga wqjt U7r6 MPoydE H2 6 203 mGi JhF nT NCDB YlnP oKO6Pu XU 3 u} \begin{split} & \tda_{3i}a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q + q =\Delta_2^2 w - \nu \Delta_2 w_{t} + \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{3i}v_i - \frac{1}{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\psi} \biggl( \sum_{j=1}^{2} v_k b_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}w_t + w_t \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}b_{3i} v_i - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} b_{3i} v_k b_{jk} v_i \biggr) \inon{on $\Gamma_1$} . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3340} \end{align} } \end{Remark} \par \subsection{The vorticity estimate} \label{sec05} Recall that the Eulerian vorticity $\omega_{i}=\epsilon_{ijk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}u_k$, for $i=1,2,3$, solves \begin{equation} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\omega_i + u_j\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \omega = \omega_j\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}u_i \comma i=1,2,3 . \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n392} \end{equation} Therefore, the ALE vorticity \begin{equation} \zeta(x,t)=\omega(\eta(x,t),t) \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n393} \end{equation} satisfies the equation \begin{align}\thelt{X38J 43 K yZg 87i gFR 5R z1t3 jH9x lOg1h7 P7 W w8w jMJ qH3 l5 J5wU 8eH0 OogRCv L7 f JJg 1ug RfM XI GSuE Efbh 3hdNY3 x1 9 7jR qeP cdu sb fkuJ hEpw MvNBZV zL u qxJ 9b1 BTf Yk RJLj Oo1a EPIXvZ Aj v Xne fhK GsJ Ga wqjt U7r6 MPoydE H2 6 203 mGi JhF nT NCDB YlnP oKO6Pu XU 3 uu9 mSg 41v ma kk0E WUpS UtGBtD e6 d Kdx ZNT FuT i1 fMcM hq7P Ovf0hg Hl 8 fqv I3R K39 fn 9MaC Zgow 6e1iXj KC 5 lHO lpG pkK Xd Dxtz 0HxE fSMjXY L8 F vh7 dmJ kE8 QA KDo1 FqML HOZ2iL 9} \begin{split} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\zeta_i + v_1 a_{j1}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \zeta_i + v_2 a_{j2}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \zeta_i + (v_3-\psi_t) a_{j3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \zeta_i = \zeta_k a_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}v_i \comma i=1,2,3 . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n394} \end{align} Note that in the ALE variables, the vorticity reads \begin{equation} \zeta_i = \epsilon_{ijk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} v_k a_{mj} . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n395} \end{equation} Since we do not use the Eulerian variables in estimates, we denote the ALE variable, for simplicity of notation, with~$x$. By multiplying \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n394} with $J$, we obtain \begin{align}\thelt{1a EPIXvZ Aj v Xne fhK GsJ Ga wqjt U7r6 MPoydE H2 6 203 mGi JhF nT NCDB YlnP oKO6Pu XU 3 uu9 mSg 41v ma kk0E WUpS UtGBtD e6 d Kdx ZNT FuT i1 fMcM hq7P Ovf0hg Hl 8 fqv I3R K39 fn 9MaC Zgow 6e1iXj KC 5 lHO lpG pkK Xd Dxtz 0HxE fSMjXY L8 F vh7 dmJ kE8 QA KDo1 FqML HOZ2iL 9i I m3L Kva YiN K9 sb48 NxwY NR0nx2 t5 b WCk x2a 31k a8 fUIa RGzr 7oigRX 5s m 9PQ 7Sr 5St ZE Ymp8 VIWS hdzgDI 9v R F5J 81x 33n Ne fjBT VvGP vGsxQh Al G Fbe 1bQ i6J ap OJJa ceGq 1vv} \begin{split} J\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\zeta_i + v_1 \tda_{j1}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \zeta_i + v_2 \tda_{j2}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \zeta_i + (v_3-\psi_t) \tda_{j3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \zeta_i = \zeta_k \tda_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}v_i \comma i=1,2,3 . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n396} \end{align} In order to perform non-tangential estimates, we need to extend functions to ${\mathbb R}^{3}$ using the classical Sobolev extension operator $f\mapsto \tilde f$, which is a continuous operator $H^{k}(\Omega)\to H^{k}(\Omega_0)$ for all $k\in[0,5]$, where \begin{equation} \Omega_0={\mathbb T}^2\times[0,2] . \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n397} \end{equation} The extension is designed so that $\supp f$ vanishes in a neighborhood of $[3/2,\infty)$. For the Jacobian $J$, we need to modify the extension operator to $\bar{~}\colon H^{k}(\Omega)\to H^{k}(\Omega_0)$ so that we have \begin{equation} \frac14 \leq \bar J(x) \leq 2 \comma x_3\leq \frac43 \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3220} \end{equation} and $\bar J \equiv 0$ for $x_3\geq 2$. \par Now, consider the solution $\theta=(\theta_1,\theta_2,\theta_3)$ of the problem \begin{align}\thelt{aC Zgow 6e1iXj KC 5 lHO lpG pkK Xd Dxtz 0HxE fSMjXY L8 F vh7 dmJ kE8 QA KDo1 FqML HOZ2iL 9i I m3L Kva YiN K9 sb48 NxwY NR0nx2 t5 b WCk x2a 31k a8 fUIa RGzr 7oigRX 5s m 9PQ 7Sr 5St ZE Ymp8 VIWS hdzgDI 9v R F5J 81x 33n Ne fjBT VvGP vGsxQh Al G Fbe 1bQ i6J ap OJJa ceGq 1vvb8r F2 F 3M6 8eD lzG tX tVm5 y14v mwIXa2 OG Y hxU sXJ 0qg l5 ZGAt HPZd oDWrSb BS u NKi 6KW gr3 9s 9tc7 WM4A ws1PzI 5c C O7Z 8y9 lMT LA dwhz Mxz9 hjlWHj bJ 5 CqM jht y9l Mn 4rc7 6Am} \begin{split} \bar J\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\theta_i + \tilde v_1 \tilde \tda_{j1}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \theta_i + \tilde v_2 \tilde \tda_{j2}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \theta_i + (\tilde v_3-\tilde \psi_t) \tilde \tda_{j3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \theta_i = \theta_k \tilde \tda_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}\tilde v_i \comma i=1,2,3 , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n398} \end{align} with the initial condition \begin{equation} \theta(0)=\tilde \zeta(0) . \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3236} \end{equation} \par First, we prove the following uniqueness result. \par \cole \begin{Lemma} \label{L08a} We have $\zeta=\theta$ on $\Omega\times [0,T]$, provided \begin{equation} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{T} P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)}) \,ds <\infty , \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n399} \end{equation} where $P$ is a polynomial. \end{Lemma} \colb \par \begin{proof}[Proof of Lemma~\ref{L08a}] The main point of this approach is that in the equation \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n396} we have \begin{equation} v_1 \tda_{31} + v_2 \tda_{32} + (v_3-\psi_t) \tda_{33} = 0 \inon{on $\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega$} . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3100} \end{equation} First, we verify \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3100} on $\Gamma_0$. By \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n309}$_3$ and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n314}, we have $\tda_{31}=\tda_{32}=0$ and $\psi_t=0$, so the left side of \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3100} reduces to $v_3$, which vanishes by the boundary condition~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n320}. On $\Gamma_1$, the left side of \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3100} vanishes by \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n321}. Thus \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3100} indeed holds. Now, the difference $\sigma=\zeta-\theta$ satisfies \begin{align}\thelt{ZE Ymp8 VIWS hdzgDI 9v R F5J 81x 33n Ne fjBT VvGP vGsxQh Al G Fbe 1bQ i6J ap OJJa ceGq 1vvb8r F2 F 3M6 8eD lzG tX tVm5 y14v mwIXa2 OG Y hxU sXJ 0qg l5 ZGAt HPZd oDWrSb BS u NKi 6KW gr3 9s 9tc7 WM4A ws1PzI 5c C O7Z 8y9 lMT LA dwhz Mxz9 hjlWHj bJ 5 CqM jht y9l Mn 4rc7 6Amk KJimvH 9r O tbc tCK rsi B0 4cFV Dl1g cvfWh6 5n x y9Z S4W Pyo QB yr3v fBkj TZKtEZ 7r U fdM icd yCV qn D036 HJWM tYfL9f yX x O7m IcF E1O uL QsAQ NfWv 6kV8Im 7Q 6 GsX NCV 0YP oC jnW} J\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\sigma_i + v_1 \tda_{j1}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \sigma_i + v_2 \tda_{j2}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \sigma_i + (v_3-\psi_t) \tda_{j3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \sigma_i = \sigma_k \tda_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} v_i \inon{on $\Omega$} \comma i=1,2,3 , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3102} \end{align} using that the extension operators $\tilde{~}$ and $\bar{~}$ act as an identity in $\Omega$. \par We now test \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3102} with $\sigma_i$, on $\Omega$, which leads to \begin{align}\thelt{ gr3 9s 9tc7 WM4A ws1PzI 5c C O7Z 8y9 lMT LA dwhz Mxz9 hjlWHj bJ 5 CqM jht y9l Mn 4rc7 6Amk KJimvH 9r O tbc tCK rsi B0 4cFV Dl1g cvfWh6 5n x y9Z S4W Pyo QB yr3v fBkj TZKtEZ 7r U fdM icd yCV qn D036 HJWM tYfL9f yX x O7m IcF E1O uL QsAQ NfWv 6kV8Im 7Q 6 GsX NCV 0YP oC jnWn 6L25 qUMTe7 1v a hnH DAo XAb Tc zhPc fjrj W5M5G0 nz N M5T nlJ WOP Lh M6U2 ZFxw pg4Nej P8 U Q09 JX9 n7S kE WixE Rwgy Fvttzp 4A s v5F Tnn MzL Vh FUn5 6tFY CxZ1Bz Q3 E TfD lCa d7V f} \begin{split} \frac12 \frac{d}{dt} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J|\sigma|^2 &= - \sum_{m=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH v_m \tda_{jm}\sigma_i \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \sigma_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH (v_3-\psi_t) \tda_{j3} \sigma_i \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \sigma_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \sigma_k \tda_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}v_i \sigma_i + \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J_t | \sigma|^2 \\& = I_1+I_2+I_3+I_4 . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3103} \end{align} For the first two terms, we write $\sigma_i\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\sigma_i=(1/2)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(|\sigma|^2)$ and integrate by parts, obtaining \begin{align}\thelt{M icd yCV qn D036 HJWM tYfL9f yX x O7m IcF E1O uL QsAQ NfWv 6kV8Im 7Q 6 GsX NCV 0YP oC jnWn 6L25 qUMTe7 1v a hnH DAo XAb Tc zhPc fjrj W5M5G0 nz N M5T nlJ WOP Lh M6U2 ZFxw pg4Nej P8 U Q09 JX9 n7S kE WixE Rwgy Fvttzp 4A s v5F Tnn MzL Vh FUn5 6tFY CxZ1Bz Q3 E TfD lCa d7V fo MwPm ngrD HPfZV0 aY k Ojr ZUw 799 et oYuB MIC4 ovEY8D OL N URV Q5l ti1 iS NZAd wWr6 Q8oPFf ae 5 lAR 9gD RSi HO eJOW wxLv 20GoMt 2H z 7Yc aly PZx eR uFM0 7gaV 9UIz7S 43 k 5Tr ZiD } \begin{split} I_1+I_2 &= \frac12 \sum_{m=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_m \tda_{jm} \sigma_i \sigma_i + \frac12\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(v_3-\psi_t) \tda_{j3} \sigma_i \sigma_i \\& - \frac12 \sum_{m=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega} v_m \tda_{jm} \sigma_i \sigma_i N_j - \frac12\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega} (v_3-\psi_t) \tda_{j3} \sigma_i \sigma_i N_j , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3104} \end{align} where we used the Piola identity. The boundary terms vanish by \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3100} and $N=(0,0,\pm 1)$. Therefore, \begin{align}\thelt{ U Q09 JX9 n7S kE WixE Rwgy Fvttzp 4A s v5F Tnn MzL Vh FUn5 6tFY CxZ1Bz Q3 E TfD lCa d7V fo MwPm ngrD HPfZV0 aY k Ojr ZUw 799 et oYuB MIC4 ovEY8D OL N URV Q5l ti1 iS NZAd wWr6 Q8oPFf ae 5 lAR 9gD RSi HO eJOW wxLv 20GoMt 2H z 7Yc aly PZx eR uFM0 7gaV 9UIz7S 43 k 5Tr ZiD Mt7 pE NCYi uHL7 gac7Gq yN 6 Z1u x56 YZh 2d yJVx 9MeU OMWBQf l0 E mIc 5Zr yfy 3i rahC y9Pi MJ7ofo Op d enn sLi xZx Jt CjC9 M71v O0fxiR 51 m FIB QRo 1oW Iq 3gDP stD2 ntfoX7 YU o S5k} \begin{split} I_1+I_2 \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert \tda\Vert_{H^{3.5+\delta}}, \Vert \psi_t\Vert_{H^{2.5+\delta}} ) \Vert \sigma\Vert_{L^2}^2 . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3105} \end{align} Note that also $I_3$ and $I_4$ are bounded by the right side of~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3105}. Using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n338}, we get \begin{align}\thelt{Ff ae 5 lAR 9gD RSi HO eJOW wxLv 20GoMt 2H z 7Yc aly PZx eR uFM0 7gaV 9UIz7S 43 k 5Tr ZiD Mt7 pE NCYi uHL7 gac7Gq yN 6 Z1u x56 YZh 2d yJVx 9MeU OMWBQf l0 E mIc 5Zr yfy 3i rahC y9Pi MJ7ofo Op d enn sLi xZx Jt CjC9 M71v O0fxiR 51 m FIB QRo 1oW Iq 3gDP stD2 ntfoX7 YU o S5k GuV IGM cf HZe3 7ZoG A1dDmk XO 2 KYR LpJ jII om M6Nu u8O0 jO5Nab Ub R nZn 15k hG9 4S 21V4 Ip45 7ooaiP u2 j hIz osW FDu O5 HdGr djvv tTLBjo vL L iCo 6L5 Lwa Pm vD6Z pal6 9Ljn11 re } \begin{split} \frac12 \frac{d}{dt} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J |\sigma|^2 &\leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert \tda\Vert_{H^{3.5+\delta}}, \Vert \psi_t\Vert_{H^{2.5+\delta}} ) \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J |\sigma|^2 \\& \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J |\sigma|^2 , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3106} \end{align} where we used \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n345} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n348} in the last step. The lemma then follows a standard Gronwall argument. \end{proof} \par By the properties of the extension operator and since the equation for $\theta$ is of transport type (note that \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3220} holds), we have \begin{equation} \theta(x,t) = 0 \comma (x,t)\in (0,3/2)\times [0,T] . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3221} \end{equation} \par The main result of this section is the following estimate on the Sobolev norm of the vorticity. \par \cole \begin{Lemma} \label{L04} Under the assumption \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n333}, the quantity \begin{equation} \AA = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \bar J|\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta|^2 \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3122} \end{equation} satisfies \begin{equation} \Vert \zeta\Vert_{H^{1.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \AA \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3123} \end{equation} with \begin{equation} \Vert \zeta(0)\Vert_{H^{1.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls Y(0) \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \zeta(0)\Vert_{H^{1.5+\delta}} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3124} \end{equation} and \begin{align}\thelt{ MJ7ofo Op d enn sLi xZx Jt CjC9 M71v O0fxiR 51 m FIB QRo 1oW Iq 3gDP stD2 ntfoX7 YU o S5k GuV IGM cf HZe3 7ZoG A1dDmk XO 2 KYR LpJ jII om M6Nu u8O0 jO5Nab Ub R nZn 15k hG9 4S 21V4 Ip45 7ooaiP u2 j hIz osW FDu O5 HdGr djvv tTLBjo vL L iCo 6L5 Lwa Pm vD6Z pal6 9Ljn11 re T 2CP mvj rL3 xH mDYK uv5T npC1fM oU R RTo Loi lk0 FE ghak m5M9 cOIPdQ lG D LnX erC ykJ C1 0FHh vvnY aTGuqU rf T QPv wEq iHO vO hD6A nXuv GlzVAv pz d Ok3 6ym yUo Fb AcAA BItO es52V} \begin{split} \frac{d}{dt}\AA \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls M P(\Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) \AA , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3107} \end{align} for all $t\in[0,T]$, where $M$ is as in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n323}. \end{Lemma} \colb \par \begin{proof}[Proof of Lemma~\ref{L04}] Denote \begin{equation} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3 =(I-\Delta)^{1/2} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3108} \end{equation} on the domain ${\mathbb T}^2\times{\mathbb R}$. We apply $\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}$ to the equation \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n398} and test it with $\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta$, obtaining \begin{align}\thelt{ Ip45 7ooaiP u2 j hIz osW FDu O5 HdGr djvv tTLBjo vL L iCo 6L5 Lwa Pm vD6Z pal6 9Ljn11 re T 2CP mvj rL3 xH mDYK uv5T npC1fM oU R RTo Loi lk0 FE ghak m5M9 cOIPdQ lG D LnX erC ykJ C1 0FHh vvnY aTGuqU rf T QPv wEq iHO vO hD6A nXuv GlzVAv pz d Ok3 6ym yUo Fb AcAA BItO es52Vq d0 Y c7U 2gB t0W fF VQZh rJHr lBLdCx 8I o dWp AlD S8C HB rNLz xWp6 ypjuwW mg X toy 1vP bra uH yMNb kUrZ D6Ee2f zI D tkZ Eti Lmg re 1woD juLB BSdasY Vc F Uhy ViC xB1 5y Ltql qoUh } \begin{split} \frac{d \AA}{dt} &= - \sum_{m=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \tilde v_m \tilde\tda_{jm}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} (\tilde v_3-\tilde\psi_t) \tilde\tda_{j3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \\&\indeq + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \theta_k \tilde\tda_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\tilde v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \\&\indeq - \sum_{m=1}^2 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \Bigl(\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}(\tilde v_m \tilde\tda_{jm}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \theta_i ) - \tilde v_m \tilde\tda_{jm}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \Bigr)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \\&\indeq - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}( (\tilde v_3-\tilde\psi_t) \tilde\tda_{j3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\theta_i ) - (\tilde v_3-\tilde\psi_t) \tilde\tda_{j3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \\&\indeq + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}(\theta_k \tilde\tda_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}\tilde v_i ) - \theta_k \tilde\tda_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\tilde v_i \Bigr)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \\&\indeq + \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \bar J_t |\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta} \theta|^2 + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}(\bar J\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_t \theta_i) - \bar J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta} (\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\theta_i) \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \\& = I_1+\cdots+I_8 . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3109} \end{align} For the first two terms on the right-hand side of \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3109}, we integrate by parts in $x_j$ obtaining \begin{align}\thelt{ 0FHh vvnY aTGuqU rf T QPv wEq iHO vO hD6A nXuv GlzVAv pz d Ok3 6ym yUo Fb AcAA BItO es52Vq d0 Y c7U 2gB t0W fF VQZh rJHr lBLdCx 8I o dWp AlD S8C HB rNLz xWp6 ypjuwW mg X toy 1vP bra uH yMNb kUrZ D6Ee2f zI D tkZ Eti Lmg re 1woD juLB BSdasY Vc F Uhy ViC xB1 5y Ltql qoUh gL3bZN YV k orz wa3 650 qW hF22 epiX cAjA4Z V4 b cXx uB3 NQN p0 GxW2 Vs1z jtqe2p LE B iS3 0E0 NKH gY N50v XaK6 pNpwdB X2 Y v7V 0Ud dTc Pi dRNN CLG4 7Fc3PL Bx K 3Be x1X zyX cj 0Z6a } \begin{split} I_1+I_2 &= \sum_{m=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tilde v_m \tilde\tda_{jm}) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}( (\tilde v_3-\tilde\psi_t) \tilde\tda_{j3}) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \\&\indeq - \sum_{m=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega} \tilde v_m \tilde\tda_{3m}N_{3} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega} (\tilde v_3-\tilde\psi_t) \tilde\tda_{33} N_{3} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3110} \end{align} Since the extension operators are the identity on $\Omega$, the last two terms in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3110} equal \begin{align}\thelt{ra uH yMNb kUrZ D6Ee2f zI D tkZ Eti Lmg re 1woD juLB BSdasY Vc F Uhy ViC xB1 5y Ltql qoUh gL3bZN YV k orz wa3 650 qW hF22 epiX cAjA4Z V4 b cXx uB3 NQN p0 GxW2 Vs1z jtqe2p LE B iS3 0E0 NKH gY N50v XaK6 pNpwdB X2 Y v7V 0Ud dTc Pi dRNN CLG4 7Fc3PL Bx K 3Be x1X zyX cj 0Z6a Jk0H KuQnwd Dh P Q1Q rwA 05v 9c 3pnz ttzt x2IirW CZ B oS5 xlO KCi D3 WFh4 dvCL QANAQJ Gg y vOD NTD FKj Mc 0RJP m4HU SQkLnT Q4 Y 6CC MvN jAR Zb lir7 RFsI NzHiJl cg f xSC Hts ZOG 1V } \begin{split} - \sum_{m=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega} v_m \tda_{3m}N_{3} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH\Omega} ( v_3-\psi_t) \tda_{33} N_{3} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i =0 , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3143} \end{align} where the last equality follows by \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3100}. Using that the sum of the last two terms in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3110} vanishes, we get \begin{align}\thelt{0E0 NKH gY N50v XaK6 pNpwdB X2 Y v7V 0Ud dTc Pi dRNN CLG4 7Fc3PL Bx K 3Be x1X zyX cj 0Z6a Jk0H KuQnwd Dh P Q1Q rwA 05v 9c 3pnz ttzt x2IirW CZ B oS5 xlO KCi D3 WFh4 dvCL QANAQJ Gg y vOD NTD FKj Mc 0RJP m4HU SQkLnT Q4 Y 6CC MvN jAR Zb lir7 RFsI NzHiJl cg f xSC Hts ZOG 1V uOzk 5G1C LtmRYI eD 3 5BB uxZ JdY LO CwS9 lokS NasDLj 5h 8 yni u7h u3c di zYh1 PdwE l3m8Xt yX Q RCA bwe aLi N8 qA9N 6DRE wy6gZe xs A 4fG EKH KQP PP KMbk sY1j M4h3Jj gS U One p1w Rq} \begin{split} I_1+I_2 &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \tilde v\Vert_{H^{2.5+\delta}(\Omega_0)} \Vert \tilde\tda\Vert_{H^{2.5+\delta}(\Omega_0)} \Vert \theta\Vert_{H^{1.5+\delta}(\Omega_0)}^2 \\&\indeq + ( \Vert \tilde v\Vert_{H^{2.5+\delta}(\Omega_0)} + \Vert \tilde\psi_t\Vert_{H^{2.5+\delta}(\Omega_0)} ) \Vert \tilde\tda\Vert_{H^{2.5+\delta}(\Omega_0)} \Vert \theta\Vert_{H^{1.5+\delta}(\Omega_0)}^2 \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert v\Vert_{H^{2.5+\delta}} \Vert \tda\Vert_{H^{2.5+\delta}} \Vert \theta\Vert_{H^{1.5+\delta}}^2 + ( \Vert v\Vert_{H^{2.5+\delta}} + \Vert \psi_t\Vert_{H^{2.5+\delta}} ) \Vert \tda\Vert_{H^{2.5+\delta}} \Vert \theta\Vert_{H^{1.5+\delta}}^2 \\& \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert b\Vert_{H^{3.5+\delta}}, \Vert \psi_t\Vert_{H^{2.5+\delta}} ) \Vert \theta\Vert_{H^{1.5+\delta}}^2 , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3111} \end{align} where we used multiplicative Sobolev inequalities in the first step, the continuity properties of the Sobolev extension operator in the second, and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n395} in the last. Therefore, \begin{align}\thelt{ vOD NTD FKj Mc 0RJP m4HU SQkLnT Q4 Y 6CC MvN jAR Zb lir7 RFsI NzHiJl cg f xSC Hts ZOG 1V uOzk 5G1C LtmRYI eD 3 5BB uxZ JdY LO CwS9 lokS NasDLj 5h 8 yni u7h u3c di zYh1 PdwE l3m8Xt yX Q RCA bwe aLi N8 qA9N 6DRE wy6gZe xs A 4fG EKH KQP PP KMbk sY1j M4h3Jj gS U One p1w RqN GA grL4 c18W v4kchD gR x 7Gj jIB zcK QV f7gA TrZx Oy6FF7 y9 3 iuu AQt 9TK Rx S5GO TFGx 4Xx1U3 R4 s 7U1 mpa bpD Hg kicx aCjk hnobr0 p4 c ody xTC kVj 8t W4iP 2OhT RF6kU2 k2 o oZJ F} \begin{split} I_1+I_2 &\leq P(\Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) \Vert \theta\Vert_{H^{1.5+\delta}}^2 . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3112} \end{align} For the third term in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3109}, we have \begin{align}\thelt{ yX Q RCA bwe aLi N8 qA9N 6DRE wy6gZe xs A 4fG EKH KQP PP KMbk sY1j M4h3Jj gS U One p1w RqN GA grL4 c18W v4kchD gR x 7Gj jIB zcK QV f7gA TrZx Oy6FF7 y9 3 iuu AQt 9TK Rx S5GO TFGx 4Xx1U3 R4 s 7U1 mpa bpD Hg kicx aCjk hnobr0 p4 c ody xTC kVj 8t W4iP 2OhT RF6kU2 k2 o oZJ Fsq Y4B FS NI3u W2fj OMFf7x Jv e ilb UVT ArC Tv qWLi vbRp g2wpAJ On l RUE PKh j9h dG M0Mi gcqQ wkyunB Jr T LDc Pgn OSC HO sSgQ sR35 MB7Bgk Pk 6 nJh 01P Cxd Ds w514 O648 VD8iJ5 4F W } \begin{split} I_3 &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \tilde \tda\Vert_{H^{1.5+\delta}} \Vert \tilde v\Vert_{H^{2.5+\delta}} \Vert \theta\Vert_{H^{1.5+\delta}}^2 \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \tda\Vert_{H^{1.5+\delta}} \Vert v\Vert_{H^{2.5+\delta}} \Vert \theta\Vert_{H^{1.5+\delta}}^2 \\& \leq P(\Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) \Vert \theta\Vert_{H^{1.5+\delta}}^2 , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3113} \end{align} which is bounded by the right-hand side of~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3112}. For the next term, we use Kato-Ponce type estimate to write \begin{align}\thelt{Xx1U3 R4 s 7U1 mpa bpD Hg kicx aCjk hnobr0 p4 c ody xTC kVj 8t W4iP 2OhT RF6kU2 k2 o oZJ Fsq Y4B FS NI3u W2fj OMFf7x Jv e ilb UVT ArC Tv qWLi vbRp g2wpAJ On l RUE PKh j9h dG M0Mi gcqQ wkyunB Jr T LDc Pgn OSC HO sSgQ sR35 MB7Bgk Pk 6 nJh 01P Cxd Ds w514 O648 VD8iJ5 4F W 6rs 6Sy qGz MK fXop oe4e o52UNB 4Q 8 f8N Uz8 u2n GO AXHW gKtG AtGGJs bm z 2qj vSv GBu 5e 4JgL Aqrm gMmS08 ZF s xQm 28M 3z4 Ho 1xxj j8Uk bMbm8M 0c L PL5 TS2 kIQ jZ Kb9Q Ux2U i5Aflw } \begin{split} I_4 &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Bigl( \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}(\tilde v_m\tilde\tda_{jm})\Vert_{L^{6}} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3 \theta_i\Vert_{L^{3}} + \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3(\tilde v_m\tilde \tda_{jm})\Vert_{L^{\infty}} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta} \theta_i\Vert_{L^{2}} \Bigr) \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta_i\Vert_{L^2} \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Bigl( \Vert \tilde v_m\tilde\tda_{jm}\Vert_{H^{2.5+\delta}} \Vert \theta_i\Vert_{H^{1.5}} + \Vert \tilde v_m\tilde\tda_{jm}\Vert_{H^{2.5+\delta}} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta} \theta_i\Vert_{L^{2}} \Bigr) \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta\Vert_{L^2} \\& \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert \tda\Vert_{H^{3.5+\delta}}) \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta\Vert_{L^2}^2 , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3114} \end{align} which is also bounded by the right-hand side of~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3112}. The terms $I_5$ and $I_6$ are treated similarly, and following the Kato-Ponce and Sobolev inequalities, we get \begin{align}\thelt{cqQ wkyunB Jr T LDc Pgn OSC HO sSgQ sR35 MB7Bgk Pk 6 nJh 01P Cxd Ds w514 O648 VD8iJ5 4F W 6rs 6Sy qGz MK fXop oe4e o52UNB 4Q 8 f8N Uz8 u2n GO AXHW gKtG AtGGJs bm z 2qj vSv GBu 5e 4JgL Aqrm gMmS08 ZF s xQm 28M 3z4 Ho 1xxj j8Uk bMbm8M 0c L PL5 TS2 kIQ jZ Kb9Q Ux2U i5Aflw 1S L DGI uWU dCP jy wVVM 2ct8 cmgOBS 7d Q ViX R8F bta 1m tEFj TO0k owcK2d 6M Z iW8 PrK PI1 sX WJNB cREV Y4H5QQ GH b plP bwd Txp OI 5OQZ AKyi ix7Qey YI 9 1Ea 16r KXK L2 ifQX QPdP NL} \begin{split} I_5 + I_6 &\leq P(\Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) \Vert \theta\Vert_{H^{1.5+\delta}}^2 . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3115} \end{align} For the seventh term in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3109}, we also have \begin{align}\thelt{JgL Aqrm gMmS08 ZF s xQm 28M 3z4 Ho 1xxj j8Uk bMbm8M 0c L PL5 TS2 kIQ jZ Kb9Q Ux2U i5Aflw 1S L DGI uWU dCP jy wVVM 2ct8 cmgOBS 7d Q ViX R8F bta 1m tEFj TO0k owcK2d 6M Z iW8 PrK PI1 sX WJNB cREV Y4H5QQ GH b plP bwd Txp OI 5OQZ AKyi ix7Qey YI 9 1Ea 16r KXK L2 ifQX QPdP NL6EJi Hc K rBs 2qG tQb aq edOj Lixj GiNWr1 Pb Y SZe Sxx Fin aK 9Eki CHV2 a13f7G 3G 3 oDK K0i bKV y4 53E2 nFQS 8Hnqg0 E3 2 ADd dEV nmJ 7H Bc1t 2K2i hCzZuy 9k p sHn 8Ko uAR kv sHKP y8} \begin{split} I_7 \leq P(\Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) \Vert \theta\Vert_{H^{1.5+\delta}}^2 . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3116} \end{align} Finally, for the eight term, we write \begin{align}\thelt{ sX WJNB cREV Y4H5QQ GH b plP bwd Txp OI 5OQZ AKyi ix7Qey YI 9 1Ea 16r KXK L2 ifQX QPdP NL6EJi Hc K rBs 2qG tQb aq edOj Lixj GiNWr1 Pb Y SZe Sxx Fin aK 9Eki CHV2 a13f7G 3G 3 oDK K0i bKV y4 53E2 nFQS 8Hnqg0 E3 2 ADd dEV nmJ 7H Bc1t 2K2i hCzZuy 9k p sHn 8Ko uAR kv sHKP y8Yo dOOqBi hF 1 Z3C vUF hmj gB muZq 7ggW Lg5dQB 1k p Fxk k35 GFo dk 00YD 13qI qqbLwy QC c yZR wHA fp7 9o imtC c5CV 8cEuwU w7 k 8Q7 nCq WkM gY rtVR IySM tZUGCH XV 9 mr9 GHZ ol0 VE eI} \begin{split} I_8 &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Bigl( \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta} \bar J \Vert_{L^6} \Vert \theta_t\Vert_{L^3} + \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3 \bar J\Vert_{L^{\infty}} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta} \theta_t\Vert_{L^{2}} \Bigr) \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta\Vert_{L^2} \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert J\Vert_{H^{3.5+\delta}} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta\Vert_{L^2} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}\theta_t\Vert_{L^2} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3117} \end{align} In order to treat the last factor $ \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}\theta_t\Vert_{L^2}$, we divide \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n398} by $\bar J$ and use the fractional Leibniz rule to estimate \begin{align}\thelt{i bKV y4 53E2 nFQS 8Hnqg0 E3 2 ADd dEV nmJ 7H Bc1t 2K2i hCzZuy 9k p sHn 8Ko uAR kv sHKP y8Yo dOOqBi hF 1 Z3C vUF hmj gB muZq 7ggW Lg5dQB 1k p Fxk k35 GFo dk 00YD 13qI qqbLwy QC c yZR wHA fp7 9o imtC c5CV 8cEuwU w7 k 8Q7 nCq WkM gY rtVR IySM tZUGCH XV 9 mr9 GHZ ol0 VE eIjQ vwgw 17pDhX JS F UcY bqU gnG V8 IFWb S1GX az0ZTt 81 w 7En IhF F72 v2 PkWO Xlkr w6IPu5 67 9 vcW 1f6 z99 lM 2LI1 Y6Na axfl18 gT 0 gDp tVl CN4 jf GSbC ro5D v78Cxa uk Y iUI WWy YDR } \begin{split} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}\theta_t\Vert_{L^2} &\leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert J\Vert_{H^{3.5+\delta}}, \Vert \tda\Vert_{H^{3.5+\delta}} \Vert \psi_t\Vert_{H^{2.5+\delta}} ) \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta\Vert_{L^2} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3118} \end{align} where we also used \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3220} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3221}. Employing \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3118} in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3117}, we then obtain \begin{align}\thelt{ZR wHA fp7 9o imtC c5CV 8cEuwU w7 k 8Q7 nCq WkM gY rtVR IySM tZUGCH XV 9 mr9 GHZ ol0 VE eIjQ vwgw 17pDhX JS F UcY bqU gnG V8 IFWb S1GX az0ZTt 81 w 7En IhF F72 v2 PkWO Xlkr w6IPu5 67 9 vcW 1f6 z99 lM 2LI1 Y6Na axfl18 gT 0 gDp tVl CN4 jf GSbC ro5D v78Cxa uk Y iUI WWy YDR w8 z7Kj Px7C hC7zJv b1 b 0rF d7n Mxk 09 1wHv y4u5 vLLsJ8 Nm A kWt xuf 4P5 Nw P23b 06sF NQ6xgD hu R GbK 7j2 O4g y4 p4BL top3 h2kfyI 9w O 4Aa EWb 36Y yH YiI1 S3CO J7aN1r 0s Q OrC AC4} \begin{split} I_8 &\leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert J\Vert_{H^{3.5+\delta}}, \Vert \tda\Vert_{H^{3.5+\delta}} \Vert \psi_t\Vert_{H^{2.5+\delta}} ) \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta\Vert_{L^2} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3119} \end{align} Combining \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3109} and the upper bounds \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3111}, \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3112}, \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3113}, \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3114}, \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3115}, \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3116}, and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3119}, we get \begin{align}\thelt{7 9 vcW 1f6 z99 lM 2LI1 Y6Na axfl18 gT 0 gDp tVl CN4 jf GSbC ro5D v78Cxa uk Y iUI WWy YDR w8 z7Kj Px7C hC7zJv b1 b 0rF d7n Mxk 09 1wHv y4u5 vLLsJ8 Nm A kWt xuf 4P5 Nw P23b 06sF NQ6xgD hu R GbK 7j2 O4g y4 p4BL top3 h2kfyI 9w O 4Aa EWb 36Y yH YiI1 S3CO J7aN1r 0s Q OrC AC4 vL7 yr CGkI RlNu GbOuuk 1a w LDK 2zl Ka4 0h yJnD V4iF xsqO00 1r q CeO AO2 es7 DR aCpU G54F 2i97xS Qr c bPZ 6K8 Kud n9 e6SY o396 Fr8LUx yX O jdF sMr l54 Eh T8vr xxF2 phKPbs zr l pM} \begin{split} \frac{d \AA}{dt} \leq P(\Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\theta\Vert_{L^2} , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3121} \end{align} and then, using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3220} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3221}, we obtain \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3107}. \end{proof} \par \subsection{The conclusion of a~priori bounds} \label{sec06} \par Now, we are ready to conclude the proof of the main statement on a~priori estimates for the system. \par \begin{proof}[Proof of Theorem~\ref{T01}] Using the pressure estimate \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n378} in the tangential bound \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n351}, we get \begin{align}\thelt{xgD hu R GbK 7j2 O4g y4 p4BL top3 h2kfyI 9w O 4Aa EWb 36Y yH YiI1 S3CO J7aN1r 0s Q OrC AC4 vL7 yr CGkI RlNu GbOuuk 1a w LDK 2zl Ka4 0h yJnD V4iF xsqO00 1r q CeO AO2 es7 DR aCpU G54F 2i97xS Qr c bPZ 6K8 Kud n9 e6SY o396 Fr8LUx yX O jdF sMr l54 Eh T8vr xxF2 phKPbs zr l pMA ubE RMG QA aCBu 2Lqw Gasprf IZ O iKV Vbu Vae 6a bauf y9Kc Fk6cBl Z5 r KUj htW E1C nt 9Rmd whJR ySGVSO VT v 9FY 4uz yAH Sp 6yT9 s6R6 oOi3aq Zl L 7bI vWZ 18c Fa iwpt C1nd Fyp4oK xD} \begin{split} & \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{4+\delta} w\Vert_{L^2(\Gamma_1)}^2 + \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} w_{t}\Vert_{L^2(\Gamma_1)}^2 + \nu \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \Vert \nabla_2 \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} w_{t} \Vert_{L^2(\Gamma_1)}^2 \, ds \\&\indeq \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert w_{t}(0)\Vert_{H^{4+\delta}(\Gamma_1)}^2 + \Vert v(0) \Vert_{H^{2.5+\delta}}^2 + \Vert v\Vert_{L^2}^{(1+\delta)/(2.5+\delta)} \Vert v\Vert_{H^{2.5+\delta}}^{(4+\delta)/(2.5+\delta)} \\&\indeq\indeq +\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} )\,ds . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3127} \end{align} By the div-curl elliptic estimate (see \cite{BB}), we have \begin{equation} \Vert v\Vert_{H^{2.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \curl v\Vert_{H^{1.5+\delta}} + \Vert \div v\Vert_{H^{1.5+\delta}} + \Vert v\cdot N\Vert_{H^{2+\delta}(\Gamma_0\cup\Gamma_1)} + \Vert v\Vert_{L^2} . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3128} \end{equation} We bound the terms on the right-hand side in order. Using the formula \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n395} for the ALE vorticity $\zeta$, we may estimate \begin{align}\thelt{F 2i97xS Qr c bPZ 6K8 Kud n9 e6SY o396 Fr8LUx yX O jdF sMr l54 Eh T8vr xxF2 phKPbs zr l pMA ubE RMG QA aCBu 2Lqw Gasprf IZ O iKV Vbu Vae 6a bauf y9Kc Fk6cBl Z5 r KUj htW E1C nt 9Rmd whJR ySGVSO VT v 9FY 4uz yAH Sp 6yT9 s6R6 oOi3aq Zl L 7bI vWZ 18c Fa iwpt C1nd Fyp4oK xD f Qz2 813 6a8 zX wsGl Ysh9 Gp3Tal nr R UKt tBK eFr 45 43qU 2hh3 WbYw09 g2 W LIX zvQ zMk j5 f0xL seH9 dscinG wu P JLP 1gE N5W qY sSoW Peqj MimTyb Hj j cbn 0NO 5hz P9 W40r 2w77 TAoz} \begin{split} \Vert (\curl v)_i\Vert_{H^{1.5+\delta}} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \epsilon_{ijk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} v_k (a_{mj}-\delta_{mj}) \Vert_{H^{1.5+\delta}} + \Vert \zeta_i \Vert_{H^{1.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \epsilon \Vert v\Vert_{H^{2.5+\delta}} + \Vert \zeta \Vert_{H^{1.5+\delta}} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3129} \end{align} for $i=1,2,3$, where we used \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n334} in the last step. Therefore, applying the vorticity bound \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3107}, integrated in time, along with \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3124}, in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3129}, we get \begin{align}\thelt{d whJR ySGVSO VT v 9FY 4uz yAH Sp 6yT9 s6R6 oOi3aq Zl L 7bI vWZ 18c Fa iwpt C1nd Fyp4oK xD f Qz2 813 6a8 zX wsGl Ysh9 Gp3Tal nr R UKt tBK eFr 45 43qU 2hh3 WbYw09 g2 W LIX zvQ zMk j5 f0xL seH9 dscinG wu P JLP 1gE N5W qY sSoW Peqj MimTyb Hj j cbn 0NO 5hz P9 W40r 2w77 TAoz70 N1 a u09 boc DSx Gc 3tvK LXaC 1dKgw9 H3 o 2kE oul In9 TS PyL2 HXO7 tSZse0 1Z 9 Hds lDq 0tm SO AVqt A1FQ zEMKSb ak z nw8 39w nH1 Dp CjGI k5X3 B6S6UI 7H I gAa f9E V33 Bk kuo3 FyEi} \begin{split} \Vert \curl v\Vert_{H^{1.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \zeta(0)\Vert_{H^{1.5+\delta}} + \epsilon \Vert v\Vert_{H^{1.5+\delta}} + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) \AA \,ds . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3343} \end{align} For the divergence term in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3128}, we use \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n317}$_2$ to estimate \begin{align}\thelt{5 f0xL seH9 dscinG wu P JLP 1gE N5W qY sSoW Peqj MimTyb Hj j cbn 0NO 5hz P9 W40r 2w77 TAoz70 N1 a u09 boc DSx Gc 3tvK LXaC 1dKgw9 H3 o 2kE oul In9 TS PyL2 HXO7 tSZse0 1Z 9 Hds lDq 0tm SO AVqt A1FQ zEMKSb ak z nw8 39w nH1 Dp CjGI k5X3 B6S6UI 7H I gAa f9E V33 Bk kuo3 FyEi 8Ty2AB PY z SWj Pj5 tYZ ET Yzg6 Ix5t ATPMdl Gk e 67X b7F ktE sz yFyc mVhG JZ29aP gz k Yj4 cEr HCd P7 XFHU O9zo y4AZai SR O pIn 0tp 7kZ zU VHQt m3ip 3xEd41 By 7 2ux IiY 8BC Lb OYGo} \begin{split} \Vert \div v\Vert_{H^{1.5+\delta}} &= \Vert (a_{ki}-\delta_{ki})\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} v_i \Vert_{H^{1.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \epsilon \Vert v\Vert_{H^{2.5+\delta}} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3131} \end{align} The part on $\Gamma_0$ of the final term in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3128} vanishes, while on $\Gamma_1$, we use~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n321}. We get \begin{align}\thelt{0tm SO AVqt A1FQ zEMKSb ak z nw8 39w nH1 Dp CjGI k5X3 B6S6UI 7H I gAa f9E V33 Bk kuo3 FyEi 8Ty2AB PY z SWj Pj5 tYZ ET Yzg6 Ix5t ATPMdl Gk e 67X b7F ktE sz yFyc mVhG JZ29aP gz k Yj4 cEr HCd P7 XFHU O9zo y4AZai SR O pIn 0tp 7kZ zU VHQt m3ip 3xEd41 By 7 2ux IiY 8BC Lb OYGo LDwp juza6i Pa k Zdh aD3 xSX yj pdOw oqQq Jl6RFg lO t X67 nm7 s1l ZJ mGUr dIdX Q7jps7 rc d ACY ZMs BKA Nx tkqf Nhkt sbBf2O BN Z 5pf oqS Xtd 3c HFLN tLgR oHrnNl wR n ylZ NWV NfH vO} \begin{split} \Vert v\cdot N\Vert_{H^{2+\delta}(\Gamma_1)} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert (b_{3i} - \delta_{3i})v_i\Vert_{H^{2+\delta}(\Gamma_1)} + \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert b-I\Vert_{H^{2+\delta}(\Gamma_1)} \Vert v\Vert_{H^{2+\delta}(\Gamma_1)} + \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3132} \end{align} Note that \begin{align}\thelt{ cEr HCd P7 XFHU O9zo y4AZai SR O pIn 0tp 7kZ zU VHQt m3ip 3xEd41 By 7 2ux IiY 8BC Lb OYGo LDwp juza6i Pa k Zdh aD3 xSX yj pdOw oqQq Jl6RFg lO t X67 nm7 s1l ZJ mGUr dIdX Q7jps7 rc d ACY ZMs BKA Nx tkqf Nhkt sbBf2O BN Z 5pf oqS Xtd 3c HFLN tLgR oHrnNl wR n ylZ NWV NfH vO B1nU Ayjt xTWW4o Cq P Rtu Vua nMk Lv qbxp Ni0x YnOkcd FB d rw1 Nu7 cKy bL jCF7 P4dx j0Sbz9 fa V CWk VFo s9t 2a QIPK ORuE jEMtbS Hs Y eG5 Z7u MWW Aw RnR8 FwFC zXVVxn FU f yKL Nk4 e} \begin{split} \Vert b-I\Vert_{H^{2+\delta}(\Gamma_1)} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert b-I\Vert_{H^{2.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert b-I\Vert_{H^{1.5+\delta}}^{1-\alpha} \Vert b-I\Vert_{H^{3.5+\delta}}^{\alpha} \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \epsilon ( 1+ \Vert b\Vert_{H^{3.5+\delta}}^{\alpha} ) \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \epsilon ( 1+ \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}^{\alpha} ) , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3133} \end{align} where $\alpha\in(0,1)$ is the exponent determined by $2.5+\delta=(1-\alpha)(1.5+\delta)+\alpha(3.5+\delta)$. Using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3133} in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3132}, we get \begin{align}\thelt{d ACY ZMs BKA Nx tkqf Nhkt sbBf2O BN Z 5pf oqS Xtd 3c HFLN tLgR oHrnNl wR n ylZ NWV NfH vO B1nU Ayjt xTWW4o Cq P Rtu Vua nMk Lv qbxp Ni0x YnOkcd FB d rw1 Nu7 cKy bL jCF7 P4dx j0Sbz9 fa V CWk VFo s9t 2a QIPK ORuE jEMtbS Hs Y eG5 Z7u MWW Aw RnR8 FwFC zXVVxn FU f yKL Nk4 eOI ly n3Cl I5HP 8XP6S4 KF f Il6 2Vl bXg ca uth8 61pU WUx2aQ TW g rZw cAx 52T kq oZXV g0QG rBrrpe iw u WyJ td9 ooD 8t UzAd LSnI tarmhP AW B mnm nsb xLI qX 4RQS TyoF DIikpe IL h WZZ } \begin{split} \Vert v\cdot N\Vert_{H^{2+\delta}(\Gamma_1)} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \epsilon ( 1 + \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}^{\alpha} ) \Vert v\Vert_{H^{2.5+\delta}} + \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3134} \end{align} and thus \begin{align}\thelt{9 fa V CWk VFo s9t 2a QIPK ORuE jEMtbS Hs Y eG5 Z7u MWW Aw RnR8 FwFC zXVVxn FU f yKL Nk4 eOI ly n3Cl I5HP 8XP6S4 KF f Il6 2Vl bXg ca uth8 61pU WUx2aQ TW g rZw cAx 52T kq oZXV g0QG rBrrpe iw u WyJ td9 ooD 8t UzAd LSnI tarmhP AW B mnm nsb xLI qX 4RQS TyoF DIikpe IL h WZZ 8ic JGa 91 HxRb 97kn Whp9sA Vz P o85 60p RN2 PS MGMM FK5X W52OnW Iy o Yng xWn o86 8S Kbbu 1Iq1 SyPkHJ VC v seV GWr hUd ew Xw6C SY1b e3hD9P Kh a 1y0 SRw yxi AG zdCM VMmi JaemmP 8x r} \begin{split} \Vert v\cdot N\Vert_{H^{2+\delta}(\Gamma_1)} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \epsilon \Vert w\Vert_{H^{4+\delta}(\Gamma_1)} \Vert v\Vert_{H^{2.5+\delta}} + \epsilon \Vert v\Vert_{H^{2.5+\delta}} + \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3135} \end{align} Now, we use \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3343}, \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3131}, and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3135} in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3128}, while also absorbing the term $\epsilon\Vert v\Vert_{H^{2.5+\delta}}$, obtaining \begin{align}\thelt{rBrrpe iw u WyJ td9 ooD 8t UzAd LSnI tarmhP AW B mnm nsb xLI qX 4RQS TyoF DIikpe IL h WZZ 8ic JGa 91 HxRb 97kn Whp9sA Vz P o85 60p RN2 PS MGMM FK5X W52OnW Iy o Yng xWn o86 8S Kbbu 1Iq1 SyPkHJ VC v seV GWr hUd ew Xw6C SY1b e3hD9P Kh a 1y0 SRw yxi AG zdCM VMmi JaemmP 8x r bJX bKL DYE 1F pXUK ADtF 9ewhNe fd 2 XRu tTl 1HY JV p5cA hM1J fK7UIc pk d TbE ndM 6FW HA 72Pg LHzX lUo39o W9 0 BuD eJS lnV Rv z8VD V48t Id4Dtg FO O a47 LEH 8Qw nR GNBM 0RRU LluASz} \begin{split} \Vert v\Vert_{H^{2.5+\delta}}^2 &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \zeta(0)\Vert_{H^{1.5+\delta}}^2 + \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)}^2 + \epsilon^2 \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}^{2} \Vert v\Vert_{H^{2.5+\delta}}^2 \\&\indeq + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} )\AA \,ds . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3136} \end{align} Next, we combine \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3136} with the tangential estimate \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3127}. Multiplying \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3136} with a small constant $\epsilon_0\in(0,1]$ and adding the resulting inequality to \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3127}, we obtain \begin{align}\thelt{1Iq1 SyPkHJ VC v seV GWr hUd ew Xw6C SY1b e3hD9P Kh a 1y0 SRw yxi AG zdCM VMmi JaemmP 8x r bJX bKL DYE 1F pXUK ADtF 9ewhNe fd 2 XRu tTl 1HY JV p5cA hM1J fK7UIc pk d TbE ndM 6FW HA 72Pg LHzX lUo39o W9 0 BuD eJS lnV Rv z8VD V48t Id4Dtg FO O a47 LEH 8Qw nR GNBM 0RRU LluASz jx x wGI BHm Vyy Ld kGww 5eEg HFvsFU nz l 0vg OaQ DCV Ez 64r8 UvVH TtDykr Eu F aS3 5p5 yn6 QZ UcX3 mfET Exz1kv qE p OVV EFP IVp zQ lMOI Z2yT TxIUOm 0f W L1W oxC tlX Ws 9HU4 EF0I Z} \begin{split} & \epsilon_0 \Vert v\Vert_{H^{2.5+\delta}}^2 + \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}^2 + \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)}^2 \\&\indeq \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert w_{t}(0)\Vert_{H^{2+\delta}(\Gamma_1)}^2 + \Vert v(0) \Vert_{H^{2.5+\delta}}^2 + {\Vert v\Vert_{L^2}^{(1+\delta)/(2.5+\delta)} \Vert v\Vert_{H^{2.5}}^{(4+\delta)/(2.5+2\delta)}} \\&\indeq\indeq + \epsilon_0 \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)}^2 + \epsilon_0\epsilon^2 \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}^2 \Vert v\Vert_{H^{2.5+\delta}}^2 \\&\indeq\indeq + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) \AA\,ds , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3138} \end{align} with the implicit constant independent of $\nu$. Now, first choose and fix $\epsilon_0$ so small that the fourth term on the right-hand side is absorbed in the third term on the left. Then choose $\epsilon$ as in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n340} with a sufficiently large constant $C$ so that the fifth term in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3138} is absorbed in the second term on the left. This choice as requires \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n341} for $T_0$. For the third term on the right-hand side of \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3127}, we use \begin{align}\thelt{72Pg LHzX lUo39o W9 0 BuD eJS lnV Rv z8VD V48t Id4Dtg FO O a47 LEH 8Qw nR GNBM 0RRU LluASz jx x wGI BHm Vyy Ld kGww 5eEg HFvsFU nz l 0vg OaQ DCV Ez 64r8 UvVH TtDykr Eu F aS3 5p5 yn6 QZ UcX3 mfET Exz1kv qE p OVV EFP IVp zQ lMOI Z2yT TxIUOm 0f W L1W oxC tlX Ws 9HU4 EF0I Z1WDv3 TP 4 2LN 7Tr SuR 8u Mv1t Lepv ZoeoKL xf 9 zMJ 6PU In1 S8 I4KY 13wJ TACh5X l8 O 5g0 ZGw Ddt u6 8wvr vnDC oqYjJ3 nF K WMA K8V OeG o4 DKxn EOyB wgmttc ES 8 dmT oAD 0YB Fl yGRB p} \begin{split} & \Vert v\Vert_{L^2}^{1/(2.5+\delta)} \Vert v\Vert_{H^{2.5+2\delta}}^{(4+\delta)/(2.5+\delta)} \leq \epsilon_1 \Vert v\Vert_{H^{2.5+\delta}}^2 + C_{\epsilon_1} \Vert v\Vert_{L^2}^2 \\&\indeq \leq \epsilon_1 \Vert v\Vert_{H^{2.5+\delta}}^2 + C_{\epsilon_1} \Vert v_0\Vert_{L^2}^2 + C_{\epsilon_1} \Vert v_t\Vert_{L^2}^2 \\&\indeq \leq \epsilon_1 \Vert v\Vert_{H^{2.5+\delta}}^2 + C_{\epsilon_1} \Vert v_0\Vert_{L^2}^2 + C_{\epsilon_1} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t}\Vert v_t\Vert_{L^2}^2\,ds \\&\indeq \leq \epsilon_1 \Vert v\Vert_{H^{2.5+\delta}}^2 + C_{\epsilon_1} \Vert v_0\Vert_{L^2}^2 + C_{\epsilon_1} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) \AA \,ds , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3139} \end{align} by \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n358} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n378}, where $\epsilon_1\in(0,1]$ is a small constant to be determined. Using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3139} in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3138}, and choosing $\epsilon_1$ sufficiently small, we obtain \begin{align}\thelt{6 QZ UcX3 mfET Exz1kv qE p OVV EFP IVp zQ lMOI Z2yT TxIUOm 0f W L1W oxC tlX Ws 9HU4 EF0I Z1WDv3 TP 4 2LN 7Tr SuR 8u Mv1t Lepv ZoeoKL xf 9 zMJ 6PU In1 S8 I4KY 13wJ TACh5X l8 O 5g0 ZGw Ddt u6 8wvr vnDC oqYjJ3 nF K WMA K8V OeG o4 DKxn EOyB wgmttc ES 8 dmT oAD 0YB Fl yGRB pBbo 8tQYBw bS X 2lc YnU 0fh At myR3 CKcU AQzzET Ng b ghH T64 KdO fL qFWu k07t DkzfQ1 dg B cw0 LSY lr7 9U 81QP qrdf H1tb8k Kn D l52 FhC j7T Xi P7GF C7HJ KfXgrP 4K O Og1 8BM 001 mJ P} \begin{split} & \Vert v\Vert_{H^{2.5+\delta}}^2 + \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}^2 + \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)}^2 + \nu \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \Vert \nabla_2 \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} w_{t} \Vert_{L^2(\Gamma_1)}^2 \, ds \\&\indeq \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert w_{t}(0)\Vert_{H^{2+\delta}(\Gamma_1)}^2 + \Vert v(0) \Vert_{H^{2.5+\delta}}^2 + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}, \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} ) \AA \,ds . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3140} \end{align} A standard Gronwall argument on \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3140} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3107} then implies a uniform in $\nu$ estimate \begin{equation} \Vert v\Vert_{H^{2.5+\delta}} + \Vert w\Vert_{H^{4+\delta}(\Gamma_1)} + \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)} + \AA \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls M \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3141} \end{equation} on $[0,T_0]$, where $T_{0}$ is independent of $ 0\leq \nu \leq 1$, and the proof of Theorem~\ref{T01} is concluded. \end{proof} \par \startnewsection{Compatibility conditions}{sec07} From \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n321}, we obtain $ b_{3i}(0) v_i(0) = w_1(0) $, and since $ b_{3i}(0) = (0,0,1) $, we get the compatibility condition \begin{equation} v_3(0) = w_1(0) \inon{on $\Gamma_1$} \colb . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3144} \end{equation} Next, the divergence-free boundary condition gives the compatibility condition \begin{equation} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} v_3(0) = 0 , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3145} \end{equation} \colb which results from integrating the divergence-free condition $\tda_{ij}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{i}v_j=0$ and evaluating it at $t=0$. By the condition \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n321}, we also obtain \begin{equation} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} w_1(0) = 0 , \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3146} \end{equation} but this also follows from \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3144} and~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3145}. \par Assume that $u=q$ solves \begin{align}\thelt{Gw Ddt u6 8wvr vnDC oqYjJ3 nF K WMA K8V OeG o4 DKxn EOyB wgmttc ES 8 dmT oAD 0YB Fl yGRB pBbo 8tQYBw bS X 2lc YnU 0fh At myR3 CKcU AQzzET Ng b ghH T64 KdO fL qFWu k07t DkzfQ1 dg B cw0 LSY lr7 9U 81QP qrdf H1tb8k Kn D l52 FhC j7T Xi P7GF C7HJ KfXgrP 4K O Og1 8BM 001 mJ PTpu bQr6 1JQu6o Gr 4 baj 60k zdX oD gAOX 2DBk LymrtN 6T 7 us2 Cp6 eZm 1a VJTY 8vYP OzMnsA qs 3 RL6 xHu mXN AB 5eXn ZRHa iECOaa MB w Ab1 5iF WGu cZ lU8J niDN KiPGWz q4 1 iBj 1kq bak} \begin{split} &\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{i}(d_{ij}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\qqq) = \div f \inon{in $\Omega$} , \\& d_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\qqq N_{m} + u=g_1 \inon{on $\Gamma_1$} , \\& d_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\qqq N_{m}=g_0 \inon{on $\Gamma_0$} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3147} \end{align} Integrating \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3147}$_1$ over $\Omega$, we get \begin{equation} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_0} d_{ij}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\qqq N_i +\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} d_{ij}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\qqq N_i = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_0\cup \Gamma_1} f_i N_i , \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3148} \end{equation} from where, using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3147}$_2$ and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3147}$_3$, \begin{equation} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_0} g_0 +\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} (g_1-u) = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_0\cup \Gamma_1} f_i N_i , \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3149} \end{equation} and thus \begin{equation} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} u = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_0} g_0 + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} g_1 - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_0\cup \Gamma_1} f_i N_i . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3150} \end{equation} Therefore, every solution of \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3147} satisfies~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3150}. We apply this to the equation \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n380} with the boundary conditions \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n383} and~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n384}. We have \begin{equation} d_{ij} = b_{ik}a_{jk} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3151} \end{equation} and \begin{align}\thelt{cw0 LSY lr7 9U 81QP qrdf H1tb8k Kn D l52 FhC j7T Xi P7GF C7HJ KfXgrP 4K O Og1 8BM 001 mJ PTpu bQr6 1JQu6o Gr 4 baj 60k zdX oD gAOX 2DBk LymrtN 6T 7 us2 Cp6 eZm 1a VJTY 8vYP OzMnsA qs 3 RL6 xHu mXN AB 5eXn ZRHa iECOaa MB w Ab1 5iF WGu cZ lU8J niDN KiPGWz q4 1 iBj 1kq bak ZF SvXq vSiR bLTriS y8 Q YOa mQU ZhO rG HYHW guPB zlAhua o5 9 RKU trF 5Kb js KseT PXhU qRgnNA LV t aw4 YJB tK9 fN 7bN9 IEwK LTYGtn Cc c 2nf Mcx 7Vo Bt 1IC5 teMH X4g3JK 4J s deo Dl} \begin{split} f_j &= \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{ji} v_i) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tda_{ji} (v_3-\psi_t)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\left(\sum_{m=1}^{2} \tda_{jm} v_m \right) \inon{in $\Omega$} \\ g_0 &= - \tda_{3i} v_1 a_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tda_{3i} v_2 a_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tda_{3i} (v_3-\psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i \inon{on $\Gamma_0$} \\ g_1 &=\Delta_2^2 w -\nu\Delta_2 w_{t} + \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{3i}v_i - \tda_{3i} v_1 a_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tda_{3i} v_2 a_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tda_{3i} (v_3-\psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i \inon{on $\Gamma_1$} . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3152} \end{align} Thus the equation \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3150} reads \begin{align}\thelt{qs 3 RL6 xHu mXN AB 5eXn ZRHa iECOaa MB w Ab1 5iF WGu cZ lU8J niDN KiPGWz q4 1 iBj 1kq bak ZF SvXq vSiR bLTriS y8 Q YOa mQU ZhO rG HYHW guPB zlAhua o5 9 RKU trF 5Kb js KseT PXhU qRgnNA LV t aw4 YJB tK9 fN 7bN9 IEwK LTYGtn Cc c 2nf Mcx 7Vo Bt 1IC5 teMH X4g3JK 4J s deo Dl1 Xgb m9 xWDg Z31P chRS1R 8W 1 hap 5Rh 6Jj yT NXSC Uscx K4275D 72 g pRW xcf AbZ Y7 Apto 5SpT zO1dPA Vy Z JiW Clu OjO tE wxUB 7cTt EDqcAb YG d ZQZ fsQ 1At Hy xnPL 5K7D 91u03s 8K 2 0} \begin{split} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} q &= - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_0}\tda_{3i} v_1 a_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_0}\tda_{3i} v_2 a_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_0}\tda_{3i} (v_3-\psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i \\&\indeq + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1}\Delta_2^2 w - \nu\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \Delta_2 w_{t} + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{3i}v_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1}\tda_{3i} v_1 a_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1}\tda_{3i} v_2 a_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1}\tda_{3i} (v_3-\psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i \\&\indeq -\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_0\cup\Gamma_1}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{ji} v_i N_j + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_0\cup\Gamma_1}(\tda_{ji} v_3-\psi_t)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i N_j + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_0\cup\Gamma_1}\sum_{m=1}^{2} \tda_{ji} v_m a_{km}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}v_i N_j , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3153} \end{align} from where \begin{align}\thelt{gnNA LV t aw4 YJB tK9 fN 7bN9 IEwK LTYGtn Cc c 2nf Mcx 7Vo Bt 1IC5 teMH X4g3JK 4J s deo Dl1 Xgb m9 xWDg Z31P chRS1R 8W 1 hap 5Rh 6Jj yT NXSC Uscx K4275D 72 g pRW xcf AbZ Y7 Apto 5SpT zO1dPA Vy Z JiW Clu OjO tE wxUB 7cTt EDqcAb YG d ZQZ fsQ 1At Hy xnPL 5K7D 91u03s 8K 2 0ro fZ9 w7T jx yG7q bCAh ssUZQu PK 7 xUe K7F 4HK fr CEPJ rgWH DZQpvR kO 8 Xve aSB OXS ee XV5j kgzL UTmMbo ma J fxu 8gA rnd zS IB0Y QSXv cZW8vo CO o OHy rEu GnS 2f nGEj jaLz ZIocQe g} \begin{split} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} q = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \Delta_2^2 w -\nu\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \Delta_2 w_{t} = 0 , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3154} \end{align} where the last equality follows from the periodic boundary conditions imposed on $w$ and $w_{t}$ in $x_{1}$ and $x_{2}$. \par \startnewsection{Uniqueness}{sec08} For simplicity, we only consider the case $\nu=0$; the uniqueness result is the same for other values of $\nu$. To obtain uniqueness, we need to assume \begin{equation} \delta\geq \frac12 . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3155} \end{equation} The main reason for this restriction is that when we apply the elliptic estimate \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n387} to \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3175}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3177} below: Since we use it with $k=0.5+\delta$ and Lemma~\ref{L08} requires $k\ge1$, this imposes the condition~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3155}. \par \begin{proof}[Proof of Theorem~\ref{T02}] Assume that $(v,q,w,a,\eta)$ and $(\tilde v,\tilde q,\tilde w,\tilde \eta,\tilde a)$ are solutions of the system on an interval $[0,T_0]$, both satisfying the bounds in Theorem~\ref{T01}. Denote by \begin{equation} (W,V,Q,E,A,\Psi) = (w,v,q,\eta,a,\psi) - (\tilde w, \tilde v,\tilde q,\tilde\eta,\tilde a,\tilde\psi) \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3156} \end{equation} the difference, and assume that \begin{equation} (W,V,Q,E,A,\Psi)(0)=0 . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3120} \end{equation} We start with tangential estimates by claiming that \begin{align}\thelt{pT zO1dPA Vy Z JiW Clu OjO tE wxUB 7cTt EDqcAb YG d ZQZ fsQ 1At Hy xnPL 5K7D 91u03s 8K 2 0ro fZ9 w7T jx yG7q bCAh ssUZQu PK 7 xUe K7F 4HK fr CEPJ rgWH DZQpvR kO 8 Xve aSB OXS ee XV5j kgzL UTmMbo ma J fxu 8gA rnd zS IB0Y QSXv cZW8vo CO o OHy rEu GnS 2f nGEj jaLz ZIocQe gw H fSF KjW 2Lb KS nIcG 9Wnq Zya6qA YM S h2M mEA sw1 8n sJFY Anbr xZT45Z wB s BvK 9gS Ugy Bk 3dHq dvYU LhWgGK aM f Fk7 8mP 20m eV aQp2 NWIb 6hVBSe SV w nEq bq6 ucn X8 JLkI RJbJ Ebw} \begin{split} &\Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{3+\delta} W\Vert_{L^2(\Gamma_1)}^2 + \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} W_{t}\Vert_{L^2(\Gamma_1)}^2 \\&\indeq \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert V\Vert_{L^2}^{1/(1.5+\delta)} \Vert V\Vert_{H^{1.5+\delta}}^{(2+2\delta)/(1.5+\delta)} + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} (\Vert V\Vert_{H^{1.5+\delta}} + \Vert Q\Vert_{H^{0.5+\delta}} + \Vert W\Vert_{H^{3+\delta}(\Gamma_1)} + \Vert W_t\Vert_{H^{1+\delta}(\Gamma_1)} )^2\,ds , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3157} \end{align} where, in this section, we allow all the implicit constants to depend on the norms of $(v,q,w)$ and $(\tilde v, \tilde q, \tilde w)$. To prove this, we start by subtracting the equation \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n322} and the analogous equation for $\tilde w$ and get \begin{equation} W_{tt} +\Delta_2^2 W = Q . \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3158} \end{equation} We test this equation with $\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2(1+\delta)}W_{t}$ obtaining \begin{equation} \frac12 \frac{d}{dt} \Bigl(\Vert \Delta_2 \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} W\Vert_{L^2(\Gamma_1)}^2 + \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} W_{t}\Vert_{L^2(\Gamma_1)}^2 \Bigr) = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} Q \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2(1+\delta)} W_{t} , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3159} \end{equation} where we used~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3120}. Subtracting the velocity equation \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n354}$_1$ and its analog for $\tilde v$, we get \begin{align}\thelt{5j kgzL UTmMbo ma J fxu 8gA rnd zS IB0Y QSXv cZW8vo CO o OHy rEu GnS 2f nGEj jaLz ZIocQe gw H fSF KjW 2Lb KS nIcG 9Wnq Zya6qA YM S h2M mEA sw1 8n sJFY Anbr xZT45Z wB s BvK 9gS Ugy Bk 3dHq dvYU LhWgGK aM f Fk7 8mP 20m eV aQp2 NWIb 6hVBSe SV w nEq bq6 ucn X8 JLkI RJbJ EbwEYw nv L BgM 94G plc lu 2s3U m15E YAjs1G Ln h zG8 vmh ghs Qc EDE1 KnaH wtuxOg UD L BE5 9FL xIp vu KfJE UTQS EaZ6hu BC a KXr lni r1X mL KH3h VPrq ixmTkR zh 0 OGp Obo N6K LC E0Ga Udt} \begin{split} & J\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} V_i + (J-\tilde J)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} v_i + V_1 \tda_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i + \tilde v_1 B_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i + \tilde v_1 \tilde \tda_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}V_i + V_2 \tda_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i + \tilde v_2 B_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i + \tilde v_2 \tilde\tda_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}V_i \\&\indeq + (V_3-\Psi_t)\tda_{j3}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_i + (\tilde v_3-\tilde\psi_t)B_{j3}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_i + (\tilde v_3-\tilde\psi_t)\tilde\tda_{j3}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} V_i + B_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\tilde q + \tda_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}Q=0 , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3160} \end{align} while the difference of divergence-free conditions gives \begin{align}\thelt{Bk 3dHq dvYU LhWgGK aM f Fk7 8mP 20m eV aQp2 NWIb 6hVBSe SV w nEq bq6 ucn X8 JLkI RJbJ EbwEYw nv L BgM 94G plc lu 2s3U m15E YAjs1G Ln h zG8 vmh ghs Qc EDE1 KnaH wtuxOg UD L BE5 9FL xIp vu KfJE UTQS EaZ6hu BC a KXr lni r1X mL KH3h VPrq ixmTkR zh 0 OGp Obo N6K LC E0Ga Udta nZ9Lvt 1K Z eN5 GQc LQL L0 P9GX uakH m6kqk7 qm X UVH 2bU Hga v0 Wp6Q 8JyI TzlpqW 0Y k 1fX 8gj Gci bR arme Si8l w03Win NX w 1gv vcD eDP Sa bsVw Zu4h aO1V2D qw k JoR Shj MBg ry glA} \begin{split} \tda_{ki} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}V_i= - B_{ki} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} \tilde v_i . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3161} \end{align} As in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3328}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3330}, we have \begin{align}\thelt{ xIp vu KfJE UTQS EaZ6hu BC a KXr lni r1X mL KH3h VPrq ixmTkR zh 0 OGp Obo N6K LC E0Ga Udta nZ9Lvt 1K Z eN5 GQc LQL L0 P9GX uakH m6kqk7 qm X UVH 2bU Hga v0 Wp6Q 8JyI TzlpqW 0Y k 1fX 8gj Gci bR arme Si8l w03Win NX w 1gv vcD eDP Sa bsVw Zu4h aO1V2D qw k JoR Shj MBg ry glA9 3DBd S0mYAc El 5 aEd pII DT5 mb SVuX o8Nl Y24WCA 6d f CVF 6Al a6i Ns 7GCh OvFA hbxw9Q 71 Z RC8 yRi 1zZ dM rpt7 3dou ogkAkG GE 4 87V ii4 Ofw Je sXUR dzVL HU0zms 8W 2 Ztz iY5 mw9 a} \begin{split} & \frac12 \frac{d}{dt} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} V_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} V_i = \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J_t \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} V_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} V_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}V_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} V_i + \bar I , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3331} \end{align} where \begin{equation} { \bar I \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert V\Vert_{H^{1.5+\delta}} \Vert V_t\Vert_{H^{-0.5+\delta}} } ; \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3332} \end{equation} recall that $\delta\geq0.5$ and that the constants depend on the norms of $(v,q,w)$ and $(\tilde v, \tilde q, \tilde w)$. Note that \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3332} is obtained analogously to \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3330} by writing \begin{align}\thelt{X 8gj Gci bR arme Si8l w03Win NX w 1gv vcD eDP Sa bsVw Zu4h aO1V2D qw k JoR Shj MBg ry glA9 3DBd S0mYAc El 5 aEd pII DT5 mb SVuX o8Nl Y24WCA 6d f CVF 6Al a6i Ns 7GCh OvFA hbxw9Q 71 Z RC8 yRi 1zZ dM rpt7 3dou ogkAkG GE 4 87V ii4 Ofw Je sXUR dzVL HU0zms 8W 2 Ztz iY5 mw9 aB ZIwk 5WNm vNM2Hd jn e wMR 8qp 2Vv up cV4P cjOG eu35u5 cQ X NTy kfT ZXA JH UnSs 4zxf Hwf10r it J Yox Rto 5OM FP hakR gzDY Pm02mG 18 v mfV 11N n87 zS X59D E0cN 99uEUz 2r T h1F P8x } \begin{split} \bar I &= \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^2 (J\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}V_i) - \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI (J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}V_i) \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{-0.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}V_i \\& = \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^2 (J\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}V_i) - J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta}V_i \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{-0.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}V_i + \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl( J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta}V_i - \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI (J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}V_i) \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{-0.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}V_i , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3347} \end{align} and estimating the commutators by employing Kato-Ponce inequalities. \par Next, we apply $\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}$ to \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3160} and test with $\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}V$ obtaining \begin{align}\thelt{ Z RC8 yRi 1zZ dM rpt7 3dou ogkAkG GE 4 87V ii4 Ofw Je sXUR dzVL HU0zms 8W 2 Ztz iY5 mw9 aB ZIwk 5WNm vNM2Hd jn e wMR 8qp 2Vv up cV4P cjOG eu35u5 cQ X NTy kfT ZXA JH UnSs 4zxf Hwf10r it J Yox Rto 5OM FP hakR gzDY Pm02mG 18 v mfV 11N n87 zS X59D E0cN 99uEUz 2r T h1F P8x jrm q2 Z7ut pdRJ 2DdYkj y9 J Yko c38 Kdu Z9 vydO wkO0 djhXSx Sv H wJo XE7 9f8 qh iBr8 KYTx OfcYYF sM y j0H vK3 ayU wt 4nA5 H76b wUqyJQ od O u8U Gjb t6v lc xYZt 6AUx wpYr18 uO v 62v} \begin{split} & \frac12 \frac{d}{dt} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} V_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} V_i \\&\indeq = \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J_t \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} V_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} V_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}(J\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_t V_i) - J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} (\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}V_i) \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}V_i \\&\indeq\indeq - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}((J-\tilde J) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} v_i) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} V_i - \sum_{m=1}^{2}\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}(V_m\tda_{jm}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} V_i \\&\indeq\indeq - \sum_{m=1}^{2}\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}(\tilde v_m B_{jm}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} V_i - \sum_{m=1}^{2}\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}(\tilde v_m\tilde \tda_{jm}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}V_i) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} V_i \\&\indeq\indeq - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} \bigl( (V_3-\Psi_t)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i \bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} V_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} \bigl( (\tilde v_3-\tilde \psi_t)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}V_i \bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} V_i \\&\indeq\indeq -\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}(B_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} \tilde q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} V_i -\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}(b_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}Q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} V_i + \bar I \\& = I_1 + \cdots + I_{10} + \bar I . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3162} \end{align} All the terms are treated similarly as those in~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n356}. We show a detailed treatment of the tenth (and the most essential) term $I_{10}$. We first rewrite it as \begin{align}\thelt{0r it J Yox Rto 5OM FP hakR gzDY Pm02mG 18 v mfV 11N n87 zS X59D E0cN 99uEUz 2r T h1F P8x jrm q2 Z7ut pdRJ 2DdYkj y9 J Yko c38 Kdu Z9 vydO wkO0 djhXSx Sv H wJo XE7 9f8 qh iBr8 KYTx OfcYYF sM y j0H vK3 ayU wt 4nA5 H76b wUqyJQ od O u8U Gjb t6v lc xYZt 6AUx wpYr18 uO v 62v jnw FrC rf Z4nl vJuh 2SpVLO vp O lZn PTG 07V Re ixBm XBxO BzpFW5 iB I O7R Vmo GnJ u8 Axol YAxl JUrYKV Kk p aIk VCu PiD O8 IHPU ndze LPTILB P5 B qYy DLZ DZa db jcJA T644 Vp6byb 1g } \begin{split} I_{10} &= \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}(\tda_{ki}Q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} V_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}(\tda_{3i}Q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} V_i = J_1+J_2 . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3163} \end{align} For the first term in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3163}, we proceed as in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n365} and write \begin{align}\thelt{ OfcYYF sM y j0H vK3 ayU wt 4nA5 H76b wUqyJQ od O u8U Gjb t6v lc xYZt 6AUx wpYr18 uO v 62v jnw FrC rf Z4nl vJuh 2SpVLO vp O lZn PTG 07V Re ixBm XBxO BzpFW5 iB I O7R Vmo GnJ u8 Axol YAxl JUrYKV Kk p aIk VCu PiD O8 IHPU ndze LPTILB P5 B qYy DLZ DZa db jcJA T644 Vp6byb 1g 4 dE7 Ydz keO YL hCRe Ommx F9zsu0 rp 8 Ajz d2v Heo 7L 5zVn L8IQ WnYATK KV 1 f14 s2J geC b3 v9UJ djNN VBINix 1q 5 oyr SBM 2Xt gr v8RQ MaXk a4AN9i Ni n zfH xGp A57 uA E4jM fg6S 6eNGK} \begin{split} J_1 &= \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}Q\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} (\tda_{ki} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}V_i ) - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}( \tda_{ki} V_i ) - \tda_{ki} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}V_i \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}Q \\&\indeq +\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl(\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}( \tda_{ki}Q) - \tda_{ki}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}Q \Bigr)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}V_i \\& =J_{11}+J_{12}+J_{13} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3164} \end{align} Note that $J_{11}= -\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}Q\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} (B_{ki} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} \tilde v_i )$, due to~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3161}. Since $0.5+\delta\geq 1$ by \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3155}, we have \begin{align}\thelt{ YAxl JUrYKV Kk p aIk VCu PiD O8 IHPU ndze LPTILB P5 B qYy DLZ DZa db jcJA T644 Vp6byb 1g 4 dE7 Ydz keO YL hCRe Ommx F9zsu0 rp 8 Ajz d2v Heo 7L 5zVn L8IQ WnYATK KV 1 f14 s2J geC b3 v9UJ djNN VBINix 1q 5 oyr SBM 2Xt gr v8RQ MaXk a4AN9i Ni n zfH xGp A57 uA E4jM fg6S 6eNGKv JL 3 tyH 3qw dPr x2 jFXW 2Wih pSSxDr aA 7 PXg jK6 GGl Og 5PkR d2n5 3eEx4N yG h d8Z RkO NMQ qL q4sE RG0C ssQkdZ Ua O vWr pla BOW rS wSG1 SM8I z9qkpd v0 C RMs GcZ LAz 4G k70e O7k6 } \begin{split} J_{11} + J_{12} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert Q\Vert_{H^{0.5+\delta}} \Vert B\Vert_{H^{1.5+\delta}} \Vert \tilde v\Vert_{H^{2.5+\delta}} + \Vert b\Vert_{H^{3.5+\delta}} \Vert V\Vert_{H^{1.5+\delta}} \Vert Q\Vert_{H^{0.5+\delta}} \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert Q\Vert_{H^{0.5+\delta}} \Vert W\Vert_{H^{3.5+\delta}(\Gamma_1)} + \Vert V\Vert_{H^{1.5+\delta}} \Vert Q\Vert_{H^{0.5+\delta}} , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3165} \end{align} recalling the agreement on constants. For the third term in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3164}, we write \begin{equation} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} = \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{\delta-0.5}-\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{1} T_1 - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{2} T_2 , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3166} \end{equation} where \begin{equation} T_j = \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} (I-\Delta_2)^{\delta/2-0.25} \comma j=1,2 \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3167} \end{equation} are tangential operators of order $0.5+\delta$. Using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3166} and integrating by parts, we have \begin{align}\thelt{ v9UJ djNN VBINix 1q 5 oyr SBM 2Xt gr v8RQ MaXk a4AN9i Ni n zfH xGp A57 uA E4jM fg6S 6eNGKv JL 3 tyH 3qw dPr x2 jFXW 2Wih pSSxDr aA 7 PXg jK6 GGl Og 5PkR d2n5 3eEx4N yG h d8Z RkO NMQ qL q4sE RG0C ssQkdZ Ua O vWr pla BOW rS wSG1 SM8I z9qkpd v0 C RMs GcZ LAz 4G k70e O7k6 df4uYn R6 T 5Du KOT say 0D awWQ vn2U OOPNqQ T7 H 4Hf iKY Jcl Rq M2g9 lcQZ cvCNBP 2B b tjv VYj ojr rh 78tW R886 ANdxeA SV P hK3 uPr QRs 6O SW1B wWM0 yNG9iB RI 7 opG CXk hZp Eo 2JNt } \begin{split} J_{13} &= \sum_{j=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}(\tda_{ki}Q) - \tda_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}Q \Bigr)T_j\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}V_i + \sum_{j=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \tda_{ki}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}Q T_j\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}V_i \\&\indeq + \sum_{j=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl(\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}( \tda_{ki}Q) - \tda_{ki}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}Q \Bigr)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{\delta-0.5}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}V_i \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert b\Vert_{H^{3.5+\delta}} \Vert Q\Vert_{H^{0.5+\delta}} \Vert V\Vert_{H^{1.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert Q\Vert_{H^{0.5+\delta}} \Vert V\Vert_{H^{1.5+\delta}} . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3168} \end{align} The boundary term $J_{2}=-\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} ( \tda_{3i} Q) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} V_i$ in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3163} is rewritten as \begin{align}\thelt{MQ qL q4sE RG0C ssQkdZ Ua O vWr pla BOW rS wSG1 SM8I z9qkpd v0 C RMs GcZ LAz 4G k70e O7k6 df4uYn R6 T 5Du KOT say 0D awWQ vn2U OOPNqQ T7 H 4Hf iKY Jcl Rq M2g9 lcQZ cvCNBP 2B b tjv VYj ojr rh 78tW R886 ANdxeA SV P hK3 uPr QRs 6O SW1B wWM0 yNG9iB RI 7 opG CXk hZp Eo 2JNt kyYO pCY9HL 3o 7 Zu0 J9F Tz6 tZ GLn8 HAes o9umpy uc s 4l3 CA6 DCQ 0m 0llF Pbc8 z5Ad2l GN w SgA XeN HTN pw dS6e 3ila 2tlbXN 7c 1 itX aDZ Fak df Jkz7 TzaO 4kbVhn YH f Tda 9C3 WCb tw } \begin{split} J_{2} &= - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1}\tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} Q \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} V_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}( \tda_{3i}Q) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}Q \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} V_i \\& = - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{\delta} Q\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI( \tda_{3i}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} V_i) - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}(\tda_{3i}Q) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}Q \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} V_i , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3169} \end{align} and thus \begin{align}\thelt{VYj ojr rh 78tW R886 ANdxeA SV P hK3 uPr QRs 6O SW1B wWM0 yNG9iB RI 7 opG CXk hZp Eo 2JNt kyYO pCY9HL 3o 7 Zu0 J9F Tz6 tZ GLn8 HAes o9umpy uc s 4l3 CA6 DCQ 0m 0llF Pbc8 z5Ad2l GN w SgA XeN HTN pw dS6e 3ila 2tlbXN 7c 1 itX aDZ Fak df Jkz7 TzaO 4kbVhn YH f Tda 9C3 WCb tw MXHW xoCC c4Ws2C UH B sNL FEf jS4 SG I4I4 hqHh 2nCaQ4 nM p nzY oYE 5fD sX hCHJ zTQO cbKmvE pl W Und VUo rrq iJ zRqT dIWS QBL96D FU d 64k 5gv Qh0 dj rGlw 795x V6KzhT l5 Y FtC rpy bH} \begin{split} J_{2} &= - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{\delta} Q \tda_{3i}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} V_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{\delta} Q \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI( \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}V_i) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}V_i \Bigr) \\&\indeq - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}(\tda_{3i}Q) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}Q \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} V_i \\& = - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{\delta} Q \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(\tda_{3i} V_i) + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{\delta} Q \Bigl(\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(\tda_{3i} V_i) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}V_i \Bigr) \\&\indeq - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{\delta} Q \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI( \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}V_i) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}V_i \Bigr) - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}( \tda_{3i}Q) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}Q \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} V_i \\& = J_{21} + J_{22} + J_{23} + J_{24} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3170} \end{align} For the first term, we use \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n321}, which for the differences of solutions reads as \begin{equation} \tda_{3i} V_i = W_{t} - B_{3i} \tilde v_i . \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3171} \end{equation} We obtain \begin{align}\thelt{ SgA XeN HTN pw dS6e 3ila 2tlbXN 7c 1 itX aDZ Fak df Jkz7 TzaO 4kbVhn YH f Tda 9C3 WCb tw MXHW xoCC c4Ws2C UH B sNL FEf jS4 SG I4I4 hqHh 2nCaQ4 nM p nzY oYE 5fD sX hCHJ zTQO cbKmvE pl W Und VUo rrq iJ zRqT dIWS QBL96D FU d 64k 5gv Qh0 dj rGlw 795x V6KzhT l5 Y FtC rpy bHH 86 h3qn Lyzy ycGoqm Cb f h9h prB CQp Fe CxhU Z2oJ F3aKgQ H8 R yIm F9t Eks gP FMMJ TAIy z3ohWj Hx M R86 KJO NKT c3 uyRN nSKH lhb11Q 9C w rf8 iiX qyY L4 zh9s 8NTE ve539G zL g vhD N} \begin{split} J_{21} &= - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{\delta} Q \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}W_{t} + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{\delta} Q \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(B_{3i} \tilde v_{i}) = - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} Q\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2(1+\delta)} W_{t} + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{\delta} Q \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(B_{3i} \tilde v_{i}) \\& = J_{211}+J_{212} . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3172} \end{align} The first term $J_{211}$ cancels with the right side of \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3159} after adding \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3159} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3162}, while the second term $J_{212}$ may be bounded as \begin{align}\thelt{ pl W Und VUo rrq iJ zRqT dIWS QBL96D FU d 64k 5gv Qh0 dj rGlw 795x V6KzhT l5 Y FtC rpy bHH 86 h3qn Lyzy ycGoqm Cb f h9h prB CQp Fe CxhU Z2oJ F3aKgQ H8 R yIm F9t Eks gP FMMJ TAIy z3ohWj Hx M R86 KJO NKT c3 uyRN nSKH lhb11Q 9C w rf8 iiX qyY L4 zh9s 8NTE ve539G zL g vhD N7F eXo 5k AWAT 6Vrw htDQwy tu H Oa5 UIO Exb Mp V2AH puuC HWItfO ru x YfF qsa P8u fH F16C EBXK tj6ohs uv T 8BB PDN gGf KQ g6MB K2x9 jqRbHm jI U EKB Im0 bbK ac wqIX ijrF uq9906 Vy m } \begin{split} J_{212} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert Q\Vert_{H^{\delta}(\Gamma_1)} \Vert B\Vert_{H^{2+\delta}(\Gamma_1)} \Vert \tilde v\Vert_{H^{2+\delta}(\Gamma_1)} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert Q\Vert_{H^{0.5+\delta}} \Vert B\Vert_{H^{2.5+\delta}} \Vert \tilde v\Vert_{H^{2.5+\delta}} \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert Q\Vert_{H^{0.5+\delta}} \Vert W\Vert_{H^{3.5+\delta}(\Gamma_1)} , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3173} \end{align} using the agreement on constants. The last three terms in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3170} are commutators and the sum is estimated easily as \begin{align}\thelt{3ohWj Hx M R86 KJO NKT c3 uyRN nSKH lhb11Q 9C w rf8 iiX qyY L4 zh9s 8NTE ve539G zL g vhD N7F eXo 5k AWAT 6Vrw htDQwy tu H Oa5 UIO Exb Mp V2AH puuC HWItfO ru x YfF qsa P8u fH F16C EBXK tj6ohs uv T 8BB PDN gGf KQ g6MB K2x9 jqRbHm jI U EKB Im0 bbK ac wqIX ijrF uq9906 Vy m 3Ve 1gB dMy 9i hnbA 3gBo 5aBKK5 gf J SmN eCW wOM t9 xutz wDkX IY7nNh Wd D ppZ UOq 2Ae 0a W7A6 XoIc TSLNDZ yf 2 XjB cUw eQT Zt cuXI DYsD hdAu3V MB B BKW IcF NWQ dO u3Fb c6F8 VN77Da } \begin{split} J_{22}+J_{23}+J_{24} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert Q\Vert_{H^{0.5+\delta}} \Vert V\Vert_{H^{1.5+\delta}} , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3174} \end{align} employing the Kato-Ponce and trace inequalities. Finally, we add \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3159} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3162}, observing that $J_{211}$ and the right-hand side of \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3159} cancel, we obtain~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3157}. \par With the tangential estimates completed, we now estimate the difference of the pressures, $Q=q-\tilde q$. Subtracting the pressure equation \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n380} and its analog for $\tilde q$, we have \begin{align}\thelt{BXK tj6ohs uv T 8BB PDN gGf KQ g6MB K2x9 jqRbHm jI U EKB Im0 bbK ac wqIX ijrF uq9906 Vy m 3Ve 1gB dMy 9i hnbA 3gBo 5aBKK5 gf J SmN eCW wOM t9 xutz wDkX IY7nNh Wd D ppZ UOq 2Ae 0a W7A6 XoIc TSLNDZ yf 2 XjB cUw eQT Zt cuXI DYsD hdAu3V MB B BKW IcF NWQ dO u3Fb c6F8 VN77Da IH E 3MZ luL YvB mN Z2wE auXX DGpeKR nw o UVB 2oM VVe hW 0ejG gbgz Iw9FwQ hN Y rFI 4pT lqr Wn Xzz2 qBba lv3snl 2j a vzU Snc pwh cG J0Di 3Lr3 rs6F23 6o b LtD vN9 KqA pO uold 3sec xq} \begin{split} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tda_{ji} a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}Q) &= - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(B_{ji} a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\tilde q) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tilde b_{ji} A_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\tilde q) + \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tda_{ji} V_i) + \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}B_{ji} \tilde v_i) \\&\indeq - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \sum_{m=1}^{2} B_{ji} v_m a_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} v_i - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \sum_{m=1}^{2} \tilde \tda_{ji} V_m a_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} v_i \\&\indeq - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \sum_{m=1}^{2} \tilde\tda_{ji} \tilde v_m A_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} v_i - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \sum_{m=1}^{2} \tilde\tda_{ji} \tilde v_m \tilde a_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} V_i \\&\indeq - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(A_{ji} (v_3-\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\psi)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tilde a_{ji} (V_3-\Psi_t)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tilde a_{ji} (\tilde v_3-\tilde\psi_t)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}V_i) \\& =\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} f_j \inon{in $\Omega$} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3175} \end{align} Subtracting \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n383} and the same equation for $\tilde q$ gives \begin{align}\thelt{7A6 XoIc TSLNDZ yf 2 XjB cUw eQT Zt cuXI DYsD hdAu3V MB B BKW IcF NWQ dO u3Fb c6F8 VN77Da IH E 3MZ luL YvB mN Z2wE auXX DGpeKR nw o UVB 2oM VVe hW 0ejG gbgz Iw9FwQ hN Y rFI 4pT lqr Wn Xzz2 qBba lv3snl 2j a vzU Snc pwh cG J0Di 3Lr3 rs6F23 6o b LtD vN9 KqA pO uold 3sec xqgSQN ZN f w5t BGX Pdv W0 k6G4 Byh9 V3IicO nR 2 obf x3j rwt 37 u82f wxwj SmOQq0 pq 4 qfv rN4 kFW hP HRmy lxBx 1zCUhs DN Y INv Ldt VDG 35 kTMT 0ChP EdjSG4 rW N 6v5 IIM TVB 5y cWuY Oo} \begin{split} & \tda_{3i}a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}Q + Q \\&\indeq = - B_{3i}a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\tilde q - \tilde \tda_{3i}A_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\tilde q +\Delta_2^2 W + \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}B_{3i}v_i + \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tilde\tda_{3i}V_i \\&\indeq\indeq - B_{3i} v_1 a_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tilde \tda_{3i} V_1 a_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tilde \tda_{3i} \tilde v_1 A_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tilde\tda_{3i} \tilde v_1 \tilde a_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}V_i \\&\indeq\indeq - B_{3i} v_2 a_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tilde\tda_{3i} V_2 a_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tilde\tda_{3i} \tilde v_2 A_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tilde \tda_{3i} \tilde v_2 \tilde a_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}V_i \\&\indeq\indeq - A_{3i} (v_3-\psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i - \tilde a_{3i} (V_3-\Psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i - \tilde a_{3i} (\tilde v_3-\tilde \psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} V_i = g_1 \inon{on $\Gamma_1$} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3176} \end{align} while from \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n384}, we get \begin{align}\thelt{ Wn Xzz2 qBba lv3snl 2j a vzU Snc pwh cG J0Di 3Lr3 rs6F23 6o b LtD vN9 KqA pO uold 3sec xqgSQN ZN f w5t BGX Pdv W0 k6G4 Byh9 V3IicO nR 2 obf x3j rwt 37 u82f wxwj SmOQq0 pq 4 qfv rN4 kFW hP HRmy lxBx 1zCUhs DN Y INv Ldt VDG 35 kTMT 0ChP EdjSG4 rW N 6v5 IIM TVB 5y cWuY OoU6 Sevyec OT f ZJv BjS ZZk M6 8vq4 NOpj X0oQ7r vM v myK ftb ioR l5 c4ID 72iF H0VbQz hj H U5Z 9EV MX8 1P GJss Wedm hBXKDA iq w UJV Gj2 rIS 92 AntB n1QP R3tTJr Z1 e lVo iKU stz A8 fC} \begin{split} \tda_{3i}a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}Q &= - B_{3i}a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\tilde q - \tilde \tda_{3i}A_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\tilde q \\&\indeq - B_{3i} v_1 a_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tilde \tda_{3i} V_1 a_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tilde \tda_{3i} \tilde v_1 A_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tilde\tda_{3i} \tilde v_1 \tilde a_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}V_i \\&\indeq - B_{3i} v_2 a_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tilde\tda_{3i} V_2 a_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tilde\tda_{3i} \tilde v_2 A_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i - \tilde\tda_{3i} \tilde v_2 \tilde a_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}V_i \\&\indeq - A_{3i} (v_3-\psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i - \tilde a_{3i} ( V_3-\Psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i - \tilde a_{3i} (\tilde v_3-\tilde \psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} V_i = g_0 \inon{on $\Gamma_0$} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3177} \end{align} Applying the elliptic estimate \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n387} with $l=0.5+\delta$, we get \begin{align}\thelt{4 kFW hP HRmy lxBx 1zCUhs DN Y INv Ldt VDG 35 kTMT 0ChP EdjSG4 rW N 6v5 IIM TVB 5y cWuY OoU6 Sevyec OT f ZJv BjS ZZk M6 8vq4 NOpj X0oQ7r vM v myK ftb ioR l5 c4ID 72iF H0VbQz hj H U5Z 9EV MX8 1P GJss Wedm hBXKDA iq w UJV Gj2 rIS 92 AntB n1QP R3tTJr Z1 e lVo iKU stz A8 fCCg Mwfw 4jKbDb er B Rt6 T8O Zyn NO qXc5 3Pgf LK9oKe 1p P rYB BZY uui Cw XzA6 kaGb twGpmR Tm K viw HEz Rjh Te frip vLAX k3PkLN Dg 5 odc omQ j9L YI VawV mLpK rto0F6 Ns 7 Mmk cTL 9Tr } \begin{split} \Vert Q\Vert_{H^{0.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert V\Vert_{H^{1.5+\delta}} + \Vert W\Vert_{H^{3+\delta}(\Gamma_1)} + \Vert W_{t}\Vert_{H^{1+\delta}(\Gamma_1)} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3178} \end{align} This concludes the pressure estimates. \par Next, we obtain the vorticity bound for the difference $Z=\zeta-\tilde \zeta$. We use the approach from Section~\ref{sec05} by extending $\zeta$ and $\tilde\zeta$ to $\theta$ and $\tilde\theta$, respectively, with the extensions defined on ${\mathbb T}^2\times{\mathbb R}$. For simplicity of notation, we do not distinguish between functions defined in $\Omega$ and their extension, i.e., we assume that the quantities $b$, $\tilde b$, $J$, $\tilde J$, $v$, and $\tilde v$ are already extended to ${\mathbb T}^2\times{\mathbb R}$ and that the Jacobian~$J$ is bounded as in~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3220}. \par The equation for $\Theta=\theta-\tilde\theta$ then reads \begin{align}\thelt{5Z 9EV MX8 1P GJss Wedm hBXKDA iq w UJV Gj2 rIS 92 AntB n1QP R3tTJr Z1 e lVo iKU stz A8 fCCg Mwfw 4jKbDb er B Rt6 T8O Zyn NO qXc5 3Pgf LK9oKe 1p P rYB BZY uui Cw XzA6 kaGb twGpmR Tm K viw HEz Rjh Te frip vLAX k3PkLN Dg 5 odc omQ j9L YI VawV mLpK rto0F6 Ns 7 Mmk cTL 9Tr 8f OT4u NNJv ZThOQw CO C RBH RTx hSB Na Iizz bKIB EcWSMY Eh D kRt PWG KtU mo 26ac LbBn I4t2P1 1e R iPP 99n j4q Q3 62UN AQaH JPPY1O gL h N8s ta9 eJz Pg mE4z QgB0 mlAWBa 4E m u7m nfY} \begin{split} & J\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\Theta_i + v_1 \tda_{j1}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \Theta_i + v_2 \tda_{j2}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \Theta_i + ( v_3- \psi_t) \tda_{j3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \Theta_i = F_i , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3179} \end{align} where \begin{align}\thelt{m K viw HEz Rjh Te frip vLAX k3PkLN Dg 5 odc omQ j9L YI VawV mLpK rto0F6 Ns 7 Mmk cTL 9Tr 8f OT4u NNJv ZThOQw CO C RBH RTx hSB Na Iizz bKIB EcWSMY Eh D kRt PWG KtU mo 26ac LbBn I4t2P1 1e R iPP 99n j4q Q3 62UN AQaH JPPY1O gL h N8s ta9 eJz Pg mE4z QgB0 mlAWBa 4E m u7m nfY gbN Lz ddGp hhJV 9hyAOG CN j xJ8 3Hg 6CA UT nusW 9pQr Wv1DfV lG n WxM Bbe 9Ww Lt OdwD ERml xJ8LTq KW T tsR 0cD XAf hR X1zX lAUu wzqnO2 o7 r toi SMr OKL Cq joq1 tUGG iIxusp oi i tj} \begin{split} F_i &= - (J-\tilde J)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tilde \theta_i - V_1 \tda_{j1}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \tilde\theta_i - \tilde v_1 B_{j1}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\tilde \theta_i - V_2 \tda_{j2}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \tilde\theta_i - \tilde v_2 B_{j2}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\tilde \theta_i \\&\indeq\indeq - ( V_3- \Psi_t) \tda_{j3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \tilde \theta_i - ( \tilde v_3- \tilde \psi_t) B_{j3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \tilde\theta_i + \theta_k \tda_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} V_i + \Theta_k \tda_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} \tilde v_i + \tilde \theta_k B_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} \tilde v_i \comma i=1,2,3 . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3180} \end{align} We proceed as in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3109}, except that we use $\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}$ instead of $\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}$. We get \begin{align}\thelt{2P1 1e R iPP 99n j4q Q3 62UN AQaH JPPY1O gL h N8s ta9 eJz Pg mE4z QgB0 mlAWBa 4E m u7m nfY gbN Lz ddGp hhJV 9hyAOG CN j xJ8 3Hg 6CA UT nusW 9pQr Wv1DfV lG n WxM Bbe 9Ww Lt OdwD ERml xJ8LTq KW T tsR 0cD XAf hR X1zX lAUu wzqnO2 o7 r toi SMr OKL Cq joq1 tUGG iIxusp oi i tja NRn gtx S0 r98r wXF7 GNiepz Ef A O2s Ykt Idg H1 AGcR rd2w 89xoOK yN n LaL RU0 3su U3 JbS8 dok8 tw9NQS Y4 j XY6 25K CcP Ly FRlS p759 DeVbY5 b6 9 jYO mdf b99 j1 5lvL vjsk K2gEwl Rx} \begin{split} \frac12 \frac{d}{dt} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \tilde J|\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}\Theta|^2 &= - \sum_{m=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \tilde v_m \tilde\tda_{jm}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}\Theta_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}\Theta_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} (\tilde v_3-\tilde\psi_t) \tilde\tda_{j3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}\Theta_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta}\Theta_i \\&\indeq - \sum_{m=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \Bigl(\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}(\tilde v_m \tilde\tda_{jm}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \Theta_i ) - \tilde v_m \tilde\tda_{jm}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}\Theta_i \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}\Theta_i \\&\indeq - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}( (\tilde v_3-\tilde\psi_t) \tilde\tda_{j3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\Theta_i ) - (\tilde v_3-\tilde\psi_t) \tilde\tda_{j3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}\Theta_i \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}\Theta_i \\&\indeq + \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \tilde J_t |\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1+\delta} \Theta|^2 + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}(\tilde J\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_t \Theta_i) - \tilde J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta} (\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\Theta_i) \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}\Theta_i \\&\indeq + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta} F_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}\Theta_i \\& = I_1+\cdots +I_7 . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3181} \end{align} For the first five terms in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3181}, we have \begin{equation} I_1 + \cdots + I_5 \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \Theta\Vert_{H^{0.5+\delta}}^2 , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3182} \end{equation} where, as above, the constant depends on $ \Vert v\Vert_{H^{2.5+\delta}}$, $ \Vert \tilde v\Vert_{H^{2.5+\delta}}$, $ \Vert w\Vert_{H^{4+\delta}(\Gamma_1)}$, $ \Vert \tilde w\Vert_{H^{4+\delta}(\Gamma_1)}$, $ \Vert w_{t}\Vert_{H^{2+\delta}(\Gamma_1)}$, and $ \Vert \tilde w_{t}\Vert_{H^{2+\delta}(\Gamma_1)}$. For the sixth term, which involves the time derivative of the vorticity, we have \begin{align}\thelt{l xJ8LTq KW T tsR 0cD XAf hR X1zX lAUu wzqnO2 o7 r toi SMr OKL Cq joq1 tUGG iIxusp oi i tja NRn gtx S0 r98r wXF7 GNiepz Ef A O2s Ykt Idg H1 AGcR rd2w 89xoOK yN n LaL RU0 3su U3 JbS8 dok8 tw9NQS Y4 j XY6 25K CcP Ly FRlS p759 DeVbY5 b6 9 jYO mdf b99 j1 5lvL vjsk K2gEwl Rx O tWL ytZ J1y Z5 Pit3 5SOi ivz4F8 tq M JIg QQi Oob Sp eprt 2vBV qhvzkL lf 7 HXA 4so MXj Wd MS7L eRDi ktUifL JH u kes trv rl7 mY cSOB 7nKW MD0xBq kb x FgT TNI wey VI G6Uy 3dL0 C3Mz} \begin{split} I_6 \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \tilde J \Vert_{H^{3.5+\delta}} \Vert \Theta_t \Vert_{H^{-0.5+\delta}} \Vert \Theta \Vert_{H^{0.5+\delta}} , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3183} \end{align} since we assumed \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3155}. To estimate the right-hand side, we use \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3179}, obtaining \begin{align}\thelt{8 dok8 tw9NQS Y4 j XY6 25K CcP Ly FRlS p759 DeVbY5 b6 9 jYO mdf b99 j1 5lvL vjsk K2gEwl Rx O tWL ytZ J1y Z5 Pit3 5SOi ivz4F8 tq M JIg QQi Oob Sp eprt 2vBV qhvzkL lf 7 HXA 4so MXj Wd MS7L eRDi ktUifL JH u kes trv rl7 mY cSOB 7nKW MD0xBq kb x FgT TNI wey VI G6Uy 3dL0 C3MzFx sB E 7zU hSe tBQ cX 7jn2 2rr0 yL1Erb pL R m3i da5 MdP ic dnMO iZCy Gd2MdK Ub x saI 9Tt nHX qA QBju N5I4 Q6zz4d SW Y Urh xTC uBg BU T992 uczE mkqK1o uC a HJB R0Q nv1 ar tFie kBu4} \begin{split} I_6 &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \tilde J \Vert_{H^{3.5+\delta}} \Vert \Theta \Vert_{H^{0.5+\delta}} \Vert \Theta \Vert_{H^{0.5+\delta}} + \Vert \tilde J \Vert_{H^{3.5+\delta}} \Vert F \Vert_{H^{-0.5+\delta}} \Vert \Theta \Vert_{H^{0.5+\delta}} \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls ( \Vert V\Vert_{H^{1.5+\delta}} + \Vert \Theta\Vert_{H^{0.5+\delta}} + \Vert W\Vert_{H^{3+\delta}(\Gamma_1)} + \Vert W_{t}\Vert_{H^{1+\delta}(\Gamma_1)} ) \Vert \Theta\Vert_{H^{0.5+\delta}} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3184} \end{align} where we also used \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n338}. The last term in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3181} is estimated similarly to the first five, using the fractional product rule, leading to \begin{equation} I_7 \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls ( \Vert V\Vert_{H^{1.5+\delta}} + \Vert \Theta\Vert_{H^{0.5+\delta}} + \Vert W\Vert_{H^{3+\delta}(\Gamma_1)} + \Vert W_{t}\Vert_{H^{1+\delta}(\Gamma_1)} ) \Vert \Theta\Vert_{H^{0.5+\delta}} . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3185} \end{equation} Using the estimates \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3182}, \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3184}, and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3185} in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3181}, we get \begin{align}\thelt{d MS7L eRDi ktUifL JH u kes trv rl7 mY cSOB 7nKW MD0xBq kb x FgT TNI wey VI G6Uy 3dL0 C3MzFx sB E 7zU hSe tBQ cX 7jn2 2rr0 yL1Erb pL R m3i da5 MdP ic dnMO iZCy Gd2MdK Ub x saI 9Tt nHX qA QBju N5I4 Q6zz4d SW Y Urh xTC uBg BU T992 uczE mkqK1o uC a HJB R0Q nv1 ar tFie kBu4 9ND9kK 9e K BOg PGz qfK J6 7NsK z3By wIwYxE oW Y f6A Kuy VPj 8B 9D6q uBkF CsKHUD Ck s DYK 3vs 0Ep 3g M2Ew lPGj RVX6cx lb V OfA ll7 g6y L9 PWyo 58h0 e07HO0 qz 8 kbe 85Z BVC YO KxNN} \begin{split} \frac{d}{dt} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \tilde J|\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}\Theta|^2 \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls ( \Vert V\Vert_{H^{1.5+\delta}} + \Vert \Theta\Vert_{H^{0.5+\delta}} + \Vert W\Vert_{H^{3+\delta}(\Gamma_1)} + \Vert W_{t}\Vert_{H^{1+\delta}(\Gamma_1)} ) \Vert \Theta\Vert_{H^{0.5+\delta}} , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3186} \end{align} and then, using $1/4\leq \tilde J\leq 2$, we obtain \begin{align}\thelt{nHX qA QBju N5I4 Q6zz4d SW Y Urh xTC uBg BU T992 uczE mkqK1o uC a HJB R0Q nv1 ar tFie kBu4 9ND9kK 9e K BOg PGz qfK J6 7NsK z3By wIwYxE oW Y f6A Kuy VPj 8B 9D6q uBkF CsKHUD Ck s DYK 3vs 0Ep 3g M2Ew lPGj RVX6cx lb V OfA ll7 g6y L9 PWyo 58h0 e07HO0 qz 8 kbe 85Z BVC YO KxNN La4a FZ7mw7 mo A CU1 q1l pfm E5 qXTA 0QqV MnRsbK zH o 5vX 1tp MVZ XC znmS OM73 CRHwQP Tl v VN7 lKX I06 KT 6MTj O3Yb 87pgoz ox y dVJ HPL 3k2 KR yx3b 0yPB sJmNjE TP J i4k m2f xMh 35} \begin{split} & \frac{d}{dt} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \tilde J|\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}\Theta|^2 \\&\indeq \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls ( \Vert V\Vert_{H^{1.5+\delta}} + \Vert \Theta\Vert_{H^{0.5+\delta}} + \Vert W\Vert_{H^{3+\delta}(\Gamma_1)} + \Vert W_{t}\Vert_{H^{1+\delta}(\Gamma_1)} ) \left(\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \tilde J|\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}\Theta|^2\right)^{1/2} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3187} \end{align} concluding the vorticity estimates. \par Finally, we apply a standard barrier argument to \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3157}, \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3178}, and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3187}. \end{proof} \par \startnewsection{The local existence}{secle} In this section, we construct a solution to the Euler-plate model, thus proving Theorem~\ref{T03}. \par \subsection{Euler equations with given variable coefficients} \label{sec09} \par We start by assuming that the function $w$ on the top boundary $\Gamma_{1}$ is given, and consider the Euler equations with given variable coefficients \begin{align}\thelt{ 3vs 0Ep 3g M2Ew lPGj RVX6cx lb V OfA ll7 g6y L9 PWyo 58h0 e07HO0 qz 8 kbe 85Z BVC YO KxNN La4a FZ7mw7 mo A CU1 q1l pfm E5 qXTA 0QqV MnRsbK zH o 5vX 1tp MVZ XC znmS OM73 CRHwQP Tl v VN7 lKX I06 KT 6MTj O3Yb 87pgoz ox y dVJ HPL 3k2 KR yx3b 0yPB sJmNjE TP J i4k m2f xMh 35 MtRo irNE 9bU7lM o4 b nj9 GgY A6v sE sONR tNmD FJej96 ST n 3lJ U2u 16o TE Xogv Mqwh D0BKr1 Ci s VYb A2w kfX 0n 4hD5 Lbr8 l7Erfu N8 O cUj qeq zCC yx 6hPA yMrL eB8Cwl kT h ixd Izv i} \begin{split} & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} v_i + v_1 \tilde{a}_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_i + v_2 \tilde{a}_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_i + (v_3-\psi_t)\tilde{a}_{33} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i + \tilde{a}_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q=0 , \\& \tilde{a}_{ki} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}v_i=0 . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3188} \end{align} Here $\tilde{a}$ is defined as the inverse of the matrix $\nabla \tilde{\eta}$ where $\tilde{\eta}= (x_{1}, x_{2}, \psi)$, and $\psi$ is a harmonic function satisfying the boundary value problem \begin{align}\thelt{v VN7 lKX I06 KT 6MTj O3Yb 87pgoz ox y dVJ HPL 3k2 KR yx3b 0yPB sJmNjE TP J i4k m2f xMh 35 MtRo irNE 9bU7lM o4 b nj9 GgY A6v sE sONR tNmD FJej96 ST n 3lJ U2u 16o TE Xogv Mqwh D0BKr1 Ci s VYb A2w kfX 0n 4hD5 Lbr8 l7Erfu N8 O cUj qeq zCC yx 6hPA yMrL eB8Cwl kT h ixd Izv iEW uw I8qK a0VZ EqOroD UP G phf IOF SKZ 3i cda7 Vh3y wUSzkk W8 S fU1 yHN 0A1 4z nyPU Ll6h pzlkq7 SK N aFq g9Y hj2 hJ 3pWS mi9X gjapmM Z6 H V8y jig pSN lI 9T8e Lhc1 eRRgZ8 85 e NJ8 } \begin{split} &\Delta \psi = 0 \inon{on $\Omega$} \\& \psi(x_1,x_2,1,t)=1+w(x_1,x_2,t) \inon{on $\Gamma_1\times [0,T]$} \\& \psi(x_1,x_2,0,t)=0 \inon{on $\Gamma_0\times [0,T]$} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3189} \end{align} More explicitly, we have \begin{align}\thelt{1 Ci s VYb A2w kfX 0n 4hD5 Lbr8 l7Erfu N8 O cUj qeq zCC yx 6hPA yMrL eB8Cwl kT h ixd Izv iEW uw I8qK a0VZ EqOroD UP G phf IOF SKZ 3i cda7 Vh3y wUSzkk W8 S fU1 yHN 0A1 4z nyPU Ll6h pzlkq7 SK N aFq g9Y hj2 hJ 3pWS mi9X gjapmM Z6 H V8y jig pSN lI 9T8e Lhc1 eRRgZ8 85 e NJ8 w3s ecl 5i lCdo zV1B oOIk9g DZ N Y5q gVQ cFe TD VxhP mwPh EU41Lq 35 g CzP tc2 oPu gV KOp5 Gsf7 DFBlek to b d2y uDt ElX xm j1us DJJ6 hj0HBV Fa n Tva bFA VwM 51 nUH6 0GvT 9fAjTO 4M Q} \begin{split} \tilde{a}= \left( \begin{matrix} 1 & 0 & 0 \\ 0 & 1& 0\\ -\fractext{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{1} \psi} {\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi} & -\fractext{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{2} \psi} {\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi} & \fractext{1} {\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi}\end{matrix}\right) , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3190} \end{align} and $\tilde{b}$ is the cofactor matrix \begin{align}\thelt{pzlkq7 SK N aFq g9Y hj2 hJ 3pWS mi9X gjapmM Z6 H V8y jig pSN lI 9T8e Lhc1 eRRgZ8 85 e NJ8 w3s ecl 5i lCdo zV1B oOIk9g DZ N Y5q gVQ cFe TD VxhP mwPh EU41Lq 35 g CzP tc2 oPu gV KOp5 Gsf7 DFBlek to b d2y uDt ElX xm j1us DJJ6 hj0HBV Fa n Tva bFA VwM 51 nUH6 0GvT 9fAjTO 4M Q VzN NAQ iwS lS xf2p Q8qv tdjnvu pL A TIw ym4 nEY ES fMav UgZo yehtoe 9R T N15 EI1 aKJ SC nr4M jiYh B0A7vn SA Y nZ1 cXO I1V 7y ja0R 9jCT wxMUiM I5 l 2sT XnN RnV i1 KczL G3Mg JoEktl} \begin{split} \tilde{b} = (\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi)\tilde{a} = \begin{pmatrix} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\psi & 0 & 0 \\ 0 & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi & 0 \\ -\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{1}\psi & -\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{2}\psi & 1 \end{pmatrix} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3191} \end{align} Note that, since $\tilde b$ is a cofactor matrix (or by a direct verification), it satisfies the Piola identity \begin{equation} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_i \tilde b_{ij} =0 \comma j=1,2,3 . \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3192} \end{equation} We impose the boundary condition \begin{equation} v_3=0 \inon{on $\Gamma_0$} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3193} \end{equation} on the bottom boundary $\Gamma_0$ and \begin{equation} \tilde{b}_{3i}v_i = w_{t} \inon{on $\Gamma_1$} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3194} \end{equation} on~$\Gamma_{1}$. Assume that we have \begin{equation} (w,w_{t},w_{tt}) \in L^{\infty}([0,T]; H^{4+\delta}(\Gamma_{1}) \times H^{2+\delta}(\Gamma_{1}) \times H^{\delta}(\Gamma_{1})) \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3195} \end{equation} with \begin{align}\thelt{Gsf7 DFBlek to b d2y uDt ElX xm j1us DJJ6 hj0HBV Fa n Tva bFA VwM 51 nUH6 0GvT 9fAjTO 4M Q VzN NAQ iwS lS xf2p Q8qv tdjnvu pL A TIw ym4 nEY ES fMav UgZo yehtoe 9R T N15 EI1 aKJ SC nr4M jiYh B0A7vn SA Y nZ1 cXO I1V 7y ja0R 9jCT wxMUiM I5 l 2sT XnN RnV i1 KczL G3Mg JoEktl Ko U 13t saq jrH YV zfb1 yyxu npbRA5 6b r W45 Iqh fKo 0z j04I cGrH irwyH2 tJ b Fr3 leR dcp st vXe2 yJle kGVFCe 2a D 4XP OuI mtV oa zCKO 3uRI m2KFjt m5 R GWC vko zi7 5Y WNsb hORn x} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_{1}}w_{t} =0 \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3196} \end{align} and $w_{0} =0$, so that $\psi(0,x) =x_{3}$ and $\tilde{a}(0)=I$. We further assume that the matrix $\nabla \tilde{\eta}$ is non-singular on $[0,T]$ with a well-defined inverse $\tilde{a}$ (i.e., $\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi \neq 0$) and is such that \begin{align}\thelt{nr4M jiYh B0A7vn SA Y nZ1 cXO I1V 7y ja0R 9jCT wxMUiM I5 l 2sT XnN RnV i1 KczL G3Mg JoEktl Ko U 13t saq jrH YV zfb1 yyxu npbRA5 6b r W45 Iqh fKo 0z j04I cGrH irwyH2 tJ b Fr3 leR dcp st vXe2 yJle kGVFCe 2a D 4XP OuI mtV oa zCKO 3uRI m2KFjt m5 R GWC vko zi7 5Y WNsb hORn xzRzw9 9T r Fhj hKb fqL Ab e2v5 n9mD 2VpNzl Mn n toi FZB 2Zj XB hhsK 8K6c GiSbRk kw f WeY JXd RBB xy qjEV F5lr 3dFrxG lT c sby AEN cqA 98 1IQ4 UGpB k0gBeJ 6D n 9Jh kne 5f5 18 umOu L} \Vert \tilde{a} -I \Vert_{L^{\infty}([0,T];H^{1.5+\delta}(\Omega))} \leq \epsilon \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3197} \end{align} and \begin{align}\thelt{p st vXe2 yJle kGVFCe 2a D 4XP OuI mtV oa zCKO 3uRI m2KFjt m5 R GWC vko zi7 5Y WNsb hORn xzRzw9 9T r Fhj hKb fqL Ab e2v5 n9mD 2VpNzl Mn n toi FZB 2Zj XB hhsK 8K6c GiSbRk kw f WeY JXd RBB xy qjEV F5lr 3dFrxG lT c sby AEN cqA 98 1IQ4 UGpB k0gBeJ 6D n 9Jh kne 5f5 18 umOu LnIa spzcRf oC 0 StS y0D F8N Nz F2Up PtNG 50tqKT k2 e 51y Ubr szn Qb eIui Y5qa SGjcXi El 4 5B5 Pny Qtn UO MHis kTC2 KsWkjh a6 l oMf gZK G3n Hp h0gn NQ7q 0QxsQk gQ w Kwy hfP 5qF Ww N} \Vert \tilde{b} -I \Vert_{L^{\infty}([0,T];H^{1.5+\delta}(\Omega))} \leq \epsilon , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3198} \end{align} for some $\epsilon >0$ sufficiently small. Note that we have the estimate \begin{align}\thelt{Xd RBB xy qjEV F5lr 3dFrxG lT c sby AEN cqA 98 1IQ4 UGpB k0gBeJ 6D n 9Jh kne 5f5 18 umOu LnIa spzcRf oC 0 StS y0D F8N Nz F2Up PtNG 50tqKT k2 e 51y Ubr szn Qb eIui Y5qa SGjcXi El 4 5B5 Pny Qtn UO MHis kTC2 KsWkjh a6 l oMf gZK G3n Hp h0gn NQ7q 0QxsQk gQ w Kwy hfP 5qF Ww NaHx SKTA 63ClhG Bg a ruj HnG Kf4 6F QtVt SPgE gTeY6f JG m B3q gXx tR8 RT CPB1 8kQa jtt6GD rK b 1VY LV3 RgW Ir AyZf 69V8 VM7jHO b7 z Lva XTT VI0 ON KMBA HOwO Z7dPky Cg U S74 Hln FZM} \Vert \tilde{a} \Vert_{L^{\infty}([0,T];H^{s-1/2}(\Omega))} \leq \Vert w \Vert_{L^{\infty}([0,T];H^{s}(\Gamma_{1}))} , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3199} \end{align} for $t\in [0,T]$ and $s >1/2$. \par We prove the following theorem pertaining to the above Euler system with given coefficients. \par \cole \begin{Theorem} \label{T04} Assume that $v_{0}\in H^{2.5 +\delta}$, where $\delta \geq 0.5$, satisfies \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3193}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3194}, and suppose \begin{equation} (w, w_{t},w_{tt} ) \in L^{\infty}([0,T]; H^{4+\delta}(\Gamma_{1}) \times H^{2+\delta} (\Gamma_{1}) \times H^{\delta} (\Gamma_{1})) \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3200} \end{equation} with $w_0=0$ and the compatibility condition \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3196}, as well as \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n327}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n329}. Then, there exists a local-in-time solution $(v,q)$ to the system \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3188} with the boundary conditions \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3193} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3194} such that \begin{align}\thelt{5B5 Pny Qtn UO MHis kTC2 KsWkjh a6 l oMf gZK G3n Hp h0gn NQ7q 0QxsQk gQ w Kwy hfP 5qF Ww NaHx SKTA 63ClhG Bg a ruj HnG Kf4 6F QtVt SPgE gTeY6f JG m B3q gXx tR8 RT CPB1 8kQa jtt6GD rK b 1VY LV3 RgW Ir AyZf 69V8 VM7jHO b7 z Lva XTT VI0 ON KMBA HOwO Z7dPky Cg U S74 Hln FZM Ha br8m lHbQ NSwwdo mO L 6q5 wvR exV ej vVHk CEdX m3cU54 ju Z SKn g8w cj6 hR 1FnZ Jbkm gKXJgF m5 q Z5S ubX vPK DB OCGf 4srh 1a5FL0 vY f RjJ wUm 2sf Co gRha bxyc 0Rgava Rb k jzl te} \begin{split} &v \in L^{\infty}([0,T];H^{2.5+ \delta}(\Omega)) \\& v_{t} \in L^{\infty}([0,T];H^{0.5 +\delta}(\Omega)) \\& q \in L^{\infty}([0,T];H^{1.5 + \delta}(\Omega)), \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3201} \end{align} for some time $T>0$ depending on the initial data and $(w, w_{t})$. The solution is unique up to an additive function of time for the pressure $q$. Moreover, the solution $(v,q)$ satisfies the estimate \begin{align}\thelt{rK b 1VY LV3 RgW Ir AyZf 69V8 VM7jHO b7 z Lva XTT VI0 ON KMBA HOwO Z7dPky Cg U S74 Hln FZM Ha br8m lHbQ NSwwdo mO L 6q5 wvR exV ej vVHk CEdX m3cU54 ju Z SKn g8w cj6 hR 1FnZ Jbkm gKXJgF m5 q Z5S ubX vPK DB OCGf 4srh 1a5FL0 vY f RjJ wUm 2sf Co gRha bxyc 0Rgava Rb k jzl teR GEx bE MMhL Zbh3 axosCq u7 k Z1P t6Y 8zJ Xt vmvP vAr3 LSWDjb VP N 7eN u20 r8B w2 ivnk zMda 93zWWi UB H wQz ahU iji 2T rXI8 v2HN ShbTKL eK W 83W rQK O4T Zm 57yz oVYZ JytSg2 Wx 4 Y} \begin{split} \Vert v(t) \Vert_{H^{2.5+\delta}} + \Vert \nabla q(t) \Vert_{H^{0.5+\delta}} \leq \Vert v_{0} \Vert_{H^{2.5+\delta}} + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} P( \Vert w(s) \Vert_{H^{4+\delta}(\Gamma_{1})}, \Vert w_{t}(s) \Vert_{H^{2+\delta}(\Gamma_{1})} )\, ds , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3202} \end{align} for $t\in[0,T)$. \end{Theorem} \colb \par In the proof of the theorem, we shall employ the generalized vorticity corresponding to a given velocity (see \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3227} below). In order to estimate the velocity from the vorticity, we use the following div-curl theorem. \par \cole \begin{lemma} \label{L06} For a fixed time $t\in[0,T]$, consider the system \begin{align}\thelt{XJgF m5 q Z5S ubX vPK DB OCGf 4srh 1a5FL0 vY f RjJ wUm 2sf Co gRha bxyc 0Rgava Rb k jzl teR GEx bE MMhL Zbh3 axosCq u7 k Z1P t6Y 8zJ Xt vmvP vAr3 LSWDjb VP N 7eN u20 r8B w2 ivnk zMda 93zWWi UB H wQz ahU iji 2T rXI8 v2HN ShbTKL eK W 83W rQK O4T Zm 57yz oVYZ JytSg2 Wx 4 Yaf THA xS7 ka cIPQ JGYd Dk0531 u2 Q IKf REW YcM KM UT7f dT9E kIfUJ3 pM W 59Q LFm u02 YH Jaa2 Er6K SIwTBG DJ Y Zwv fSJ Qby 7f dFWd fT9z U27ws5 oU 5 MUT DJz KFN oj dXRy BaYy bTvnhh 2} \begin{split} & \epsilon_{ijk} \tilde{b}_{mj}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} v_{k} =\zeta_{i} \inon{in $\Omega$} \comma i=1,2,3 \\& \tilde{b}_{mj}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} v_{j} =0 \inon{in $\Omega$} \\& \tilde{b}_{3j} v_{j} = \psi_t \inon{on $\Gamma_0 \cup \Gamma_1$} , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3203} \end{align} where $\zeta\in H^{1.5+\delta}$ with $\tilde{b} \in H^{2.5+\delta}$ and $\Vert \tilde{b}-I\Vert_{L^{\infty}}\leq \epsilon_0$. If $\epsilon_0>0$ is sufficiently small, then $v$ satisfies the estimate \begin{equation} \Vert v\Vert_{H^{2.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \zeta\Vert_{H^{1.5+\delta}} + \Vert w_t\Vert_{H^{2+\delta}(\Gamma_1)} + \Vert v\Vert_{L^2} , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3204} \end{equation} where the implicit constant depends on the bound on~$\tilde{b}$. \end{lemma} \colb \par \begin{proof}[Proof of Lemma~\ref{L06}] (sketch) The proof is standard and is obtained by rewriting the system as \begin{align}\thelt{da 93zWWi UB H wQz ahU iji 2T rXI8 v2HN ShbTKL eK W 83W rQK O4T Zm 57yz oVYZ JytSg2 Wx 4 Yaf THA xS7 ka cIPQ JGYd Dk0531 u2 Q IKf REW YcM KM UT7f dT9E kIfUJ3 pM W 59Q LFm u02 YH Jaa2 Er6K SIwTBG DJ Y Zwv fSJ Qby 7f dFWd fT9z U27ws5 oU 5 MUT DJz KFN oj dXRy BaYy bTvnhh 2d V 77o FFl t4H 0R NZjV J5BJ pyIqAO WW c efd R27 nGk jm oEFH janX f1ONEc yt o INt D90 ONa nd awDR Ki2D JzAqYH GC T B0p zdB a3O ot Pq1Q VFva YNTVz2 sZ J 6ey Ig2 N7P gi lKLF 9Nzc rhu} \begin{split} & \epsilon_{ijk} \delta_{mj}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} v_{k} = \zeta_{i} + \epsilon_{ijk} (\delta_{mj}-\tilde{b}_{mj}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} v_{k} \inon{in $\Omega\times[0,T]$} \\& \delta_{mj}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} v_{j} = (\delta_{mj}-\tilde{b}_{mj})\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} v_{j} \inon{in $\Omega\times [0,T]$} \\& v_{3}= \psi_t + ({\delta}_{3j} - \tilde{b}_{3j}) v_{j} \inon{on $(\Gamma_0 \cup \Gamma_1) \times [0,T]$} . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3205} \end{align} The rest depends on the classical div-curl estimates as in \cite{BB} and the smallness assumption $\Vert \tilde{b}-I\Vert_{L^{\infty}}\leq \epsilon_0$. \end{proof} \colb \par \colb \begin{proof}[Proof of Theorem~\ref{T04}] We prove the theorem in three steps. \par \emph{Step 1: Linear Problem.} Assume that $w,w_{t}, w_{tt}$ satisfy the assumptions in the theorem, but with the additional regularity \begin{equation} (w, w_{t},w_{tt} ) \in L^{\infty}([0,T]; H^{6+\delta}(\Gamma_{1}) \times H^{4+\delta} (\Gamma_{1}) \times H^{2+\delta} (\Gamma_{1})), \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3206} \end{equation} and $\tilde{a}$ as defined above. Denote by $E$ the Sobolev extension $H^{k}(\Omega)\to H^{k}(\Omega_0)$ for all $k\in[0,5]$, where $\Omega_0=\mathbb{T}^2\times [-1,2]$ (which is different than in Section~\ref{sec05}). We consider the linear transport equation \begin{align}\thelt{a2 Er6K SIwTBG DJ Y Zwv fSJ Qby 7f dFWd fT9z U27ws5 oU 5 MUT DJz KFN oj dXRy BaYy bTvnhh 2d V 77o FFl t4H 0R NZjV J5BJ pyIqAO WW c efd R27 nGk jm oEFH janX f1ONEc yt o INt D90 ONa nd awDR Ki2D JzAqYH GC T B0p zdB a3O ot Pq1Q VFva YNTVz2 sZ J 6ey Ig2 N7P gi lKLF 9Nzc rhuLeC eX w b6c MFE xfl JS E8Ev 9WHg Q1Brp7 RO M ACw vAn ATq GZ Hwkd HA5f bABXo6 EW H soW 6HQ Yvv jc ZgRk OWAb VA0zBf Ba W wlI V05 Z6E 2J QjOe HcZG Juq90a c5 J h9h 0rL KfI Ht l8tP rtR} \begin{split} & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} v_i + E(\tilde{v}_1) E(\tilde{a}_{j1}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_i + E(\tilde{v}_2) E(\tilde{a}_{j2}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_i + E(\tilde{v}_3-\psi_t)E(\tilde{a}_{33}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i + E(\tilde{a}_{ki})E(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\tilde{q})=0 \inon{in $\mathbb{T}^2\times\mathbb{R}$} \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3207} \end{align} with $\tilde{v} \in L^{\infty}([0,T];H^{2.5+ \delta}(\Omega))$ a given periodic function in the $x_{1}, x_{2}$ directions. In \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3207}, the pressure function $\tilde{q}$ is given as the solution to the elliptic problem \begin{align}\thelt{nd awDR Ki2D JzAqYH GC T B0p zdB a3O ot Pq1Q VFva YNTVz2 sZ J 6ey Ig2 N7P gi lKLF 9Nzc rhuLeC eX w b6c MFE xfl JS E8Ev 9WHg Q1Brp7 RO M ACw vAn ATq GZ Hwkd HA5f bABXo6 EW H soW 6HQ Yvv jc ZgRk OWAb VA0zBf Ba W wlI V05 Z6E 2J QjOe HcZG Juq90a c5 J h9h 0rL KfI Ht l8tP rtRd qql8TZ GU g dNy SBH oNr QC sxtg zuGA wHvyNx pM m wKQ uJF Kjt Zr 6Y4H dmrC bnF52g A0 3 28a Vuz Ebp lX Zd7E JEEC 939HQt ha M sup Tcx VaZ 32 pPdb PIj2 x8Azxj YX S q8L sof qmg Sq jm8} \begin{split} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tdb_{ji} \tilde{a}_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\tilde{q}) &= \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tdb_{ji} \tilde{v}_i) - \sum_{m=1}^{2} \tdb_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tilde{v}_m \tilde{a}_{km}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} \tilde{v}_i - \tdb_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(J^{-1}(\tilde{v}_3-\psi_t))\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\tilde{v}_i \\&\indeq\indeq + \sum_{m=1}^{2} \tilde{v}_m \tilde{a}_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\tdb_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\tilde{v}_i + J^{-1}(\tilde{v}_3-\psi_t)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\tdb_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\tilde{v}_i + \mathcal{E} = \tilde{f} \inon{in $\Omega$} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3210} \end{align} with the Neumann boundary conditions \begin{align}\thelt{ Yvv jc ZgRk OWAb VA0zBf Ba W wlI V05 Z6E 2J QjOe HcZG Juq90a c5 J h9h 0rL KfI Ht l8tP rtRd qql8TZ GU g dNy SBH oNr QC sxtg zuGA wHvyNx pM m wKQ uJF Kjt Zr 6Y4H dmrC bnF52g A0 3 28a Vuz Ebp lX Zd7E JEEC 939HQt ha M sup Tcx VaZ 32 pPdb PIj2 x8Azxj YX S q8L sof qmg Sq jm8G 4wUb Q28LuA ab w I0c FWN fGn zp VzsU eHsL 9zoBLl g5 j XQX nR0 giR mC LErq lDIP YeYXdu UJ E 0Bs bkK bjp dc PLie k8NW rIjsfa pH h 4GY vMF bA6 7q yex7 sHgH G3GlW0 y1 W D35 mIo 5gE U} \begin{split} & \tdb_{3i}\tilde{a}_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\tilde{q} = 0 = \tilde{g_0} \inon{on $\Gamma_0$} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3211} \end{align} and \begin{align}\thelt{a Vuz Ebp lX Zd7E JEEC 939HQt ha M sup Tcx VaZ 32 pPdb PIj2 x8Azxj YX S q8L sof qmg Sq jm8G 4wUb Q28LuA ab w I0c FWN fGn zp VzsU eHsL 9zoBLl g5 j XQX nR0 giR mC LErq lDIP YeYXdu UJ E 0Bs bkK bjp dc PLie k8NW rIjsfa pH h 4GY vMF bA6 7q yex7 sHgH G3GlW0 y1 W D35 mIo 5gE Ub Obrb knjg UQyko7 g2 y rEO fov QfA k6 UVDH Gl7G V3LvQm ra d EUO Jpu uzt BB nrme filt 1sGSf5 O0 a w2D c0h RaH Ga lEqI pfgP yNQoLH p2 L AIU p77 Fyg rj C8qB buxB kYX8NT mU v yT7 YnB } \begin{split} & \tdb_{3i}\tilde{a}_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\tilde{q} = -w_{tt} +\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tdb_{3i}\tilde{v}_i - \frac{1}{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi} \biggl(\sum_{j=1}^{2} \tilde{v}_k \tdb_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} ( w_{t}) +w_{t} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} ( \tdb_{3i})\tilde{v}_{i} - \tilde{v}_k \tdb_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} ( \tdb_{3i}) \tilde{v}_i\biggr)= \tilde{g}_1 \inon{on $\Gamma_1$} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3212} \end{align} Note that \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3210} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3212} are suggested by Remark~\ref{R01}. The function of time \begin{align}\thelt{ E 0Bs bkK bjp dc PLie k8NW rIjsfa pH h 4GY vMF bA6 7q yex7 sHgH G3GlW0 y1 W D35 mIo 5gE Ub Obrb knjg UQyko7 g2 y rEO fov QfA k6 UVDH Gl7G V3LvQm ra d EUO Jpu uzt BB nrme filt 1sGSf5 O0 a w2D c0h RaH Ga lEqI pfgP yNQoLH p2 L AIU p77 Fyg rj C8qB buxB kYX8NT mU v yT7 YnB gv5 K7 vq5N efB5 ye4TMu Cf m E2J F7h gqw I7 dmNx 2CqZ uLFthz Il B 1sj KA8 WGD Kc DKva bk9y p28TFP 0r g 0iA 9CB D36 c8 HLkZ nO2S 6Zoafv LX b 8go pYa 085 EM RbAb QjGt urIXlT E0 G z0t} \begin{split} \mathcal{E} &= \frac{1}{|\Omega|} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} (\tilde{a}_{km} \tilde{v}_{m}) \tdb_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \tilde{v}_{i} - \frac{1}{|\Omega|} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma} \tilde{v}_{m} \tilde{a}_{3m} \tdb_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \tilde{v}_{i} -\frac{1}{|\Omega|} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} (\tilde{a}_{33} \psi_{t}) \tdb_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \tilde{v}_{i} \\&\indeq + \frac{1}{|\Omega|} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma} \psi_{t} \tilde{a}_{33}\tdb_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \tilde{v}_{i} + \frac{1}{|\Omega|} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma} \frac{1}{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi} ( \tilde{b}_{3k} \tilde{v}_{k}- \psi_{t}) \tdb_{3i} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \tilde{v}_i + \frac{1}{|\Omega|} \sum_{k=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma} \tilde{a}_{km} \tilde{v}_{m} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} (\tdb_{3i} \tilde{v}_{i} -\psi_{t}) \\ & \indeq + \frac{1}{|\Omega|} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma} \frac{1}{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi} (\tdb_{3i} \tilde{v}_{i} - \psi_{t} ) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \tdb_{3i}\tilde{v}_{i} -\frac{1}{|\Omega|} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_{0}} \tilde{v}_k \tilde{a}_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \tdb_{3i} \tilde{v}_i , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3348} \end{align} where \begin{equation} \Gamma=\Gamma_0 \cup \Gamma_1 , \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3351} \end{equation} is introduced to insure the validity of the compatibility condition \begin{equation} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \tilde{f} = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_{1}} \tilde{g_{1}} ; \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3130} \end{equation} see Appendix for the verification of~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3130}. The condition~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3130} is necessary and sufficient for the existence of the solution $\tilde q$ to the Neumann boundary value problem \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3210}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3212}, which satisfies the estimate \begin{align}\thelt{f5 O0 a w2D c0h RaH Ga lEqI pfgP yNQoLH p2 L AIU p77 Fyg rj C8qB buxB kYX8NT mU v yT7 YnB gv5 K7 vq5N efB5 ye4TMu Cf m E2J F7h gqw I7 dmNx 2CqZ uLFthz Il B 1sj KA8 WGD Kc DKva bk9y p28TFP 0r g 0iA 9CB D36 c8 HLkZ nO2S 6Zoafv LX b 8go pYa 085 EM RbAb QjGt urIXlT E0 G z0t YSV Use Cj DvrQ 2bvf iIJCdf CA c WyI O7m lyc s5 Rjio IZt7 qyB7pL 9p y G8X DTz JxH s0 yhVV Ar8Z QRqsZC HH A DFT wvJ HeH OG vLJH uTfN a5j12Z kT v GqO yS8 826 D2 rj7r HDTL N7Ggmt 9M } \begin{split} \Vert \nabla \tilde{q}\Vert_{H^{2.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \tilde{f} \Vert_{H^{1.5+\delta}} + \Vert \tilde{g}_0 \Vert_{H^{2+\delta}(\Gamma_0)} + \Vert \tilde{g}_1\Vert_{H^{2+\delta}(\Gamma_1)} \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3213} \end{align} and is determined up to a constant. \par Estimating $\tilde{f}$ defined in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3210} in $H^{1.5 +\delta}$, we have \begin{align}\thelt{ p28TFP 0r g 0iA 9CB D36 c8 HLkZ nO2S 6Zoafv LX b 8go pYa 085 EM RbAb QjGt urIXlT E0 G z0t YSV Use Cj DvrQ 2bvf iIJCdf CA c WyI O7m lyc s5 Rjio IZt7 qyB7pL 9p y G8X DTz JxH s0 yhVV Ar8Z QRqsZC HH A DFT wvJ HeH OG vLJH uTfN a5j12Z kT v GqO yS8 826 D2 rj7r HDTL N7Ggmt 9M z cyg wxn j4J Je Qb7e MmwR nSuZLU 8q U NDL rdg C70 bh EPgp b7zk 5a32N1 Ib J hf8 XvG RmU Fd vIUk wPFb idJPLl NG e 1RQ RsK 2dV NP M7A3 Yhdh B1R6N5 MJ i 5S4 R49 8lw Y9 I8RH xQKL lAk8W} \Vert \tilde{f} \Vert_{H^{1.5+ \delta}} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tdb\Vert_{H^{2.5+ \delta}} \Vert \tilde{v}\Vert_{H^{2.5+ \delta}} + \Vert \tdb \Vert_{H^{2.5+\delta}} \Vert \tilde{v}\Vert^{2}_{H^{2.5+\delta}}\Vert \tilde{a} \Vert_{H^{2.5+ \delta}} + \Vert \tilde{v}\Vert_{H^{2.5+\delta}} \Vert \tilde{a} \Vert_{H^{2.5+ \delta}} \Vert \psi_{t}\Vert_{H^{2.5+\delta}} , \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3350} \end{align} while $\tilde{g}_{1}$ from \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3212} may be bounded as \begin{align}\thelt{ Ar8Z QRqsZC HH A DFT wvJ HeH OG vLJH uTfN a5j12Z kT v GqO yS8 826 D2 rj7r HDTL N7Ggmt 9M z cyg wxn j4J Je Qb7e MmwR nSuZLU 8q U NDL rdg C70 bh EPgp b7zk 5a32N1 Ib J hf8 XvG RmU Fd vIUk wPFb idJPLl NG e 1RQ RsK 2dV NP M7A3 Yhdh B1R6N5 MJ i 5S4 R49 8lw Y9 I8RH xQKL lAk8W3 Ts 7 WFU oNw I9K Wn ztPx rZLv NwZ28E YO n ouf xz6 ip9 aS WnNQ ASri wYC1sO tS q Xzo t8k 4KO z7 8LG6 GMNC ExoMh9 wl 5 vbs mnn q6H g6 WToJ un74 JxyNBX yV p vxN B0N 8wy mK 3reR eEzF } \begin{split} \Vert \tilde{g}_{1} \Vert_{H^{2+ \delta}(\Gamma_{1})} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert w_{tt} \Vert_{H^{2+ \delta}(\Gamma_{1})} + \Vert \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tdb \Vert_{H^{2+ \delta}(\Gamma_{1})} \Vert\tilde{v}\Vert_{H^{2+ \delta}(\Gamma_{1})} \\&\indeq + \Vert\tilde{v}\Vert_{H^{2+ \delta}(\Gamma_{1})} \Vert\tdb \Vert_{H^{2+ \delta}(\Gamma_{1})} \Vert w_{t}\Vert_{H^{3+ \delta}(\Gamma_{1})} + \Vert \tilde{v} \Vert^{2}_{H^{2+ \delta}(\Gamma_{1})} \Vert\tdb \Vert^{2}_{H^{3+ \delta}(\Gamma_{1})} . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3349} \end{align} Therefore, \begin{align}\thelt{ vIUk wPFb idJPLl NG e 1RQ RsK 2dV NP M7A3 Yhdh B1R6N5 MJ i 5S4 R49 8lw Y9 I8RH xQKL lAk8W3 Ts 7 WFU oNw I9K Wn ztPx rZLv NwZ28E YO n ouf xz6 ip9 aS WnNQ ASri wYC1sO tS q Xzo t8k 4KO z7 8LG6 GMNC ExoMh9 wl 5 vbs mnn q6H g6 WToJ un74 JxyNBX yV p vxN B0N 8wy mK 3reR eEzF xbK92x EL s 950 SNg Lmv iR C1bF HjDC ke3Sgt Ud C 4cO Nb4 EF2 4D 1VDB HlWA Tyswjy DO W ibT HqX t3a G6 mkfG JVWv 40lexP nI c y5c kRM D3o wV BdxQ m6Cv LaAgxi Jt E sSl ZFw DoY P2 nRYb } \begin{split} \Vert \nabla \tilde{q}\Vert_{H^{2.5+\delta}} \le P( \Vert w_{tt}\Vert_{H^{2+\delta}(\Gamma_{1})}, \Vert \tilde{a}\Vert_{H^{3.5+\delta}}, \Vert \tdb\Vert_{H^{3.5+\delta}}, \Vert \tdb_t\Vert_{H^{2.5+\delta}}, \Vert \psi_t\Vert_{H^{2.5+\delta}}, \Vert \tilde{v}\Vert_{H^{2.5+\delta}} ) . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3214} \end{align} Since $\tilde q$ is given, the linear equation \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3207} has the structure of a transport system \begin{align}\thelt{KO z7 8LG6 GMNC ExoMh9 wl 5 vbs mnn q6H g6 WToJ un74 JxyNBX yV p vxN B0N 8wy mK 3reR eEzF xbK92x EL s 950 SNg Lmv iR C1bF HjDC ke3Sgt Ud C 4cO Nb4 EF2 4D 1VDB HlWA Tyswjy DO W ibT HqX t3a G6 mkfG JVWv 40lexP nI c y5c kRM D3o wV BdxQ m6Cv LaAgxi Jt E sSl ZFw DoY P2 nRYb CdXR z5HboV TU 8 NPg NVi WeX GV QZ7b jOy1 LRy9fa j9 n 2iE 1S0 mci 0Y D3Hg UxzL atb92M hC p ZKL JqH TSF RM n3KV kpcF LUcF0X 66 i vdq 01c Vqk oQ qu1u 2Cpi p5EV7A gM O Rcf ZjL x7L cv } \begin{split} & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} v_{i} + E(K)\cdot \nabla v_{i} = F_{i} \inon{in $\mathbb{T}^2\times\mathbb{R}$} \comma i=1,2,3 , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3215} \end{align} where $K \in L^{\infty}([0,T];H^{2.5+ \delta})$ and $F \in L^{\infty}([0,T];H^{2.5+ \delta})$. The existence of a solution $v \in L^{\infty}([0,T];H^{2.5+ \delta})$ is standard, and in addition we have the estimate \begin{align}\thelt{HqX t3a G6 mkfG JVWv 40lexP nI c y5c kRM D3o wV BdxQ m6Cv LaAgxi Jt E sSl ZFw DoY P2 nRYb CdXR z5HboV TU 8 NPg NVi WeX GV QZ7b jOy1 LRy9fa j9 n 2iE 1S0 mci 0Y D3Hg UxzL atb92M hC p ZKL JqH TSF RM n3KV kpcF LUcF0X 66 i vdq 01c Vqk oQ qu1u 2Cpi p5EV7A gM O Rcf ZjL x7L cv 9lXn 6rS8 WeK3zT LD P B61 JVW wMi KE uUZZ 4qiK 1iQ8N0 83 2 TS4 eLW 4ze Uy onzT Sofn a74RQV Ki u 9W3 kEa 3gH 8x diOh AcHs IQCsEt 0Q i 2IH w9v q9r NP lh1y 3wOR qrJcxU 4i 5 5ZH TOo GP} \begin{split} &\Vert v \Vert_{L^{\infty}([0,T];H^{2.5+\delta})} \\&\indeq \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert v_{0} \Vert_{H^{2.5+\delta}} + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{T} P( \Vert w_{tt} \Vert_{H^{2+\delta}(\Gamma_{1})}, \Vert \tilde{a}\Vert_{H^{3.5+\delta}}, \Vert \tdb\Vert_{H^{3.5+\delta}}, \Vert \tdb_t\Vert_{H^{1.5+\delta}}, \Vert \psi_t\Vert_{H^{2.5+\delta}}, \Vert \tilde{v}\Vert_{H^{2.5+\delta}} ) \,ds . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3217} \end{align} \par \emph{Step 2: Local-in-time solution of the nonlinear problem with more regular boundary data.} In the second step we still assume~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3206} and aim to solve the nonlinear problem \begin{align}\thelt{ ZKL JqH TSF RM n3KV kpcF LUcF0X 66 i vdq 01c Vqk oQ qu1u 2Cpi p5EV7A gM O Rcf ZjL x7L cv 9lXn 6rS8 WeK3zT LD P B61 JVW wMi KE uUZZ 4qiK 1iQ8N0 83 2 TS4 eLW 4ze Uy onzT Sofn a74RQV Ki u 9W3 kEa 3gH 8x diOh AcHs IQCsEt 0Q i 2IH w9v q9r NP lh1y 3wOR qrJcxU 4i 5 5ZH TOo GP0 zE qlB3 lkwG GRn7TO oK f GZu 5Bc zGK Fe oyIB tjNb 8xfQEK du O nJV OZh 8PU Va RonX BkIj BT9WWo r7 A 3Wf XxA 2f2 Vl XZS1 Ttsa b4n6R3 BK X 0XJ Tml kVt cW TMCs iFVy jfcrze Jk 5 MBx w} \begin{split} & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} v_i + v_1 \tilde{a}_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_i +v_2 \tilde{a}_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_i + (v_3-\psi_t)\tilde{a}_{33} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i + \tilde{a}_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q=0 \inon{in $\Omega$} \comma i=1,2,3, \\& \tilde b_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i=0 \inon{in $\Omega$} \\& v_3=0 \inon{on $\Gamma_0$}, \\& \tilde{b}_{3i}\tilde{v}_i = w_{t} \inon{on $\Gamma_1$} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3218} \end{align} using the iteration \begin{align}\thelt{ Ki u 9W3 kEa 3gH 8x diOh AcHs IQCsEt 0Q i 2IH w9v q9r NP lh1y 3wOR qrJcxU 4i 5 5ZH TOo GP0 zE qlB3 lkwG GRn7TO oK f GZu 5Bc zGK Fe oyIB tjNb 8xfQEK du O nJV OZh 8PU Va RonX BkIj BT9WWo r7 A 3Wf XxA 2f2 Vl XZS1 Ttsa b4n6R3 BK X 0XJ Tml kVt cW TMCs iFVy jfcrze Jk 5 MBx wR7 zzV On jlLz Uz5u LeqWjD ul 7 OnY ICG G9i Ry bTsY JXfr Rnub3p 16 J BQd 0zQ OkK ZK 6DeV gpXR ceOExL Y3 W KrX YyI e7d qM qanC CTjF W71LQ8 9m Q w1g Asw nYS Me WlHz 7ud7 xBwxF3 m8 u } \begin{split} & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} v^{(n+1)}_i + E(v^{(n)}_1) E(\tilde{a}_{j1}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v^{(n+1)}_i +E(v^{(n)}_2) E(\tilde{a}_{j2}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v^{(n+1)}_i \\&\indeq + E(v^{(n)}_3-\psi_t)E(\tilde{a}_{33}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v^{(n+1)}_i + E(\tilde{a}_{ki})E(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q^{(n+1)})=0 \inon{in $\Omega_{0}$} \comma i=1,2,3 , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3219} \end{align} where $q^{(n+1)}$ is obtained by solving the system \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3210}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3212} with $\tilde v$ is replaced by $v^{(n)}$. \par Note that, given $\tilde{v}=v^{(n)}$, we solve for $\tilde{q}=q^{(n+1)}$ and then obtain $v^{(n+1)}$ as in Step~1. We now proceed by using a fixed point argument. We first choose $M>0$ sufficiently large and a time $T$ sufficiently small so that $ \Vert v^{(n)} \Vert_{L^{\infty}([0,T];H^{2.5+\delta})} \leq M$ for all $n$ and we establish that the mapping $v^{(n)} \mapsto v^{(n+1)}$ is a contraction in the norm of $L^{\infty} ([0,T];L^2)$. For $n\in{\mathbb N}_0$, denote $V^{(n)}=v^{(n)} - v^{(n-1)}$ and $Q^{(n)}=q^{(n)} - q^{(n-1)}$. Note that the function $V^{(n+1)}$ satisfies \begin{align}\thelt{T9WWo r7 A 3Wf XxA 2f2 Vl XZS1 Ttsa b4n6R3 BK X 0XJ Tml kVt cW TMCs iFVy jfcrze Jk 5 MBx wR7 zzV On jlLz Uz5u LeqWjD ul 7 OnY ICG G9i Ry bTsY JXfr Rnub3p 16 J BQd 0zQ OkK ZK 6DeV gpXR ceOExL Y3 W KrX YyI e7d qM qanC CTjF W71LQ8 9m Q w1g Asw nYS Me WlHz 7ud7 xBwxF3 m8 u sa6 6yr 0nS ds Ywuq wXdD 0fRjFp eL O e0r csI uMG rS OqRE W5pl ybq3rF rk 7 YmL URU SSV YG ruD6 ksnL XBkvVS 2q 0 ljM PpI L27 Qd ZMUP baOo Lqt3bh n6 R X9h PAd QRp 9P I4fB kJ8u ILIArp } \begin{split} & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} V^{(n+1)}_i + E(v^{(n)}_k) E(\tilde{a}_{jk}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} V^{(n+1)}_i + E(V^{(n)}_k) E(\tilde{a}_{jk}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v^{(n)}_i \\&\indeq - E(\psi_t) E(\tilde{a}_{33}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} V^{(n+1)}_i + E(\tilde{a}_{ki}) E(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}Q^{(n+1)}) =0 \inon{in $\Omega_{0}$} . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3222} \end{align} Applying the differential operator $(I-\Delta)^{1/2}$, multiplying with $(I-\Delta)^{1/2}V^{(n+1)}$, and integrating in time and space, we may then estimate the norm of $V$ in $H^{1}$ as \begin{align}\thelt{pXR ceOExL Y3 W KrX YyI e7d qM qanC CTjF W71LQ8 9m Q w1g Asw nYS Me WlHz 7ud7 xBwxF3 m8 u sa6 6yr 0nS ds Ywuq wXdD 0fRjFp eL O e0r csI uMG rS OqRE W5pl ybq3rF rk 7 YmL URU SSV YG ruD6 ksnL XBkvVS 2q 0 ljM PpI L27 Qd ZMUP baOo Lqt3bh n6 R X9h PAd QRp 9P I4fB kJ8u ILIArp Tl 4 E6j rUY wuF Xi FYaD VvrD b2zVpv Gg 6 zFY ojS bMB hr 4pW8 OwDN Uao2mh DT S cei 90K rsm wa BnNU sHe6 RpIq1h XF N Pm0 iVs nGk bC Jr8V megl 416tU2 nn o llO tcF UM7 c4 GC8C lasl J0} \begin{split} \Vert V^{(n+1)}(t) \Vert^{2}_{H^{1}} & \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \Vert V^{(n+1)} \Vert^{2}_{H^{1}} \Vert \tilde{a}\Vert_{H^{2.5+\delta}} ( \Vert v^{(n)}\Vert_{H^{2.5+\delta}}+\Vert \psi_{t} \Vert_{H^{2.5+\delta}}) \,ds \\ & \indeq \indeq + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \Vert V^{(n)} \Vert_{H^{1}} \Vert V^{(n+1)} \Vert_{H^{1}} \Vert \tilde{a}\Vert_{H^{2.5+\delta}} \Vert v^{(n)}\Vert_{H^{2.5+\delta}} \,ds \\ & \indeq \indeq + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \Vert \nabla Q^{(n+1)} \Vert_{H^{1}} \Vert V^{(n+1)} \Vert_{H^{1}} \Vert \tilde{a}\Vert_{H^{2.5+\delta}} \,ds . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3224} \end{align} We now use a similar elliptic estimate to~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3214} to bound the difference of two solutions $Q$ to the pressure equation. Namely, \begin{align}\thelt{uD6 ksnL XBkvVS 2q 0 ljM PpI L27 Qd ZMUP baOo Lqt3bh n6 R X9h PAd QRp 9P I4fB kJ8u ILIArp Tl 4 E6j rUY wuF Xi FYaD VvrD b2zVpv Gg 6 zFY ojS bMB hr 4pW8 OwDN Uao2mh DT S cei 90K rsm wa BnNU sHe6 RpIq1h XF N Pm0 iVs nGk bC Jr8V megl 416tU2 nn o llO tcF UM7 c4 GC8C lasl J0N8Xf Cu R aR2 sYe fjV ri JNj1 f2ty vqJyQN X1 F YmT l5N 17t kb BTPu F471 AH0Fo7 1R E ILJ p4V sqi WT TtkA d5Rk kJH3Ri RN K ePe sR0 xqF qn QjGU IniV gLGCl2 He 7 kmq hEV 4PF dC dGpE P9} \Vert \nabla Q^{(n+1)} \Vert_{H^{1}} \le P( \Vert \tilde{a}\Vert_{H^{3.5+\delta}}, \Vert \tdb\Vert_{H^{3.5+\delta}}, \Vert \tdb_t\Vert_{H^{2.5+\delta}}, \Vert \psi_t\Vert_{H^{2.5+\delta}},M) \Vert V^{(n)} \Vert_{H^{1}} . \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3354} \end{align} Note that we used a bound on the error term $\mathcal{E}^{(n)}-\mathcal{E}^{(n-1)}$ by $\Vert V^{n} \Vert_{H^{1}}$ with constants depending on $w$, $w_{t}$, and $M$. Hence, we have \begin{align}\thelt{ wa BnNU sHe6 RpIq1h XF N Pm0 iVs nGk bC Jr8V megl 416tU2 nn o llO tcF UM7 c4 GC8C lasl J0N8Xf Cu R aR2 sYe fjV ri JNj1 f2ty vqJyQN X1 F YmT l5N 17t kb BTPu F471 AH0Fo7 1R E ILJ p4V sqi WT TtkA d5Rk kJH3Ri RN K ePe sR0 xqF qn QjGU IniV gLGCl2 He 7 kmq hEV 4PF dC dGpE P9nB mcvZ0p LY G idf n65 qEu Df Mz2v cq4D MzN6mB FR t QP0 yDD Fxj uZ iZPE 3Jj4 hVc2zr rc R OnF PeO P1p Zg nsHA MRK4 ETNF23 Kt f Gem 2kr 5gf 5u 8Ncu wfJC av6SvQ 2n 1 8P8 RcI kmM SD 0w} \Vert V^{(n+1)}(t) \Vert^{2}_{H^{1}} & \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \Vert V^{(n+1)} \Vert^{2}_{H^{1}} \,ds + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \Vert V^{(n)} \Vert^{2}_{H^{1}} \,ds , \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3352} \end{align} where all constants are allowed to depend on the norms of $w$. Using Gronwall's inequality and taking $T$ sufficiently small, we obtain the desired contraction estimate \begin{align}\thelt{V sqi WT TtkA d5Rk kJH3Ri RN K ePe sR0 xqF qn QjGU IniV gLGCl2 He 7 kmq hEV 4PF dC dGpE P9nB mcvZ0p LY G idf n65 qEu Df Mz2v cq4D MzN6mB FR t QP0 yDD Fxj uZ iZPE 3Jj4 hVc2zr rc R OnF PeO P1p Zg nsHA MRK4 ETNF23 Kt f Gem 2kr 5gf 5u 8Ncu wfJC av6SvQ 2n 1 8P8 RcI kmM SD 0wrV R1PY x7kEkZ Js J 7Wb 6XI WDE 0U nqtZ PAqE ETS3Eq NN f 38D Ek6 NhX V9 c3se vM32 WACSj3 eN X uq9 GhP OPC hd 7v1T 6gqR inehWk 8w L oaa wHV vbU 49 02yO bCT6 zm2aNf 8x U wPO ilr R3v } \Vert V^{(n+1)} \Vert^{2}_{L^{\infty}([0,T];H^{1})} \leq \frac12 \Vert V^{(n)} \Vert^{2}_{L^{\infty}([0,T];H^{1})} . \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3353} \end{align} Thus there exist $v \in L^{\infty}([0,T];H^{2.5+\delta})$ and a pressure function $q \in L^{\infty}([0,T];H^{2.5+\delta})$, which is defined up to a constant, which are the fixed point for the iteration scheme. The couple $(v,q)$ then satisfies the first equation of the nonlinear system~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3218}. We next show that the divergence condition and the boundary conditions in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3218} are satisfied by the pair $(v,q)$. \\ \par \colb \emph{Step 3: Reconstruction of Divergence and Boundary Conditions of the nonlinear problem \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3218}}. \par It follows that the fixed point $v$ of the iteration scheme defined in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3219} solves the problem \begin{align}\thelt{nF PeO P1p Zg nsHA MRK4 ETNF23 Kt f Gem 2kr 5gf 5u 8Ncu wfJC av6SvQ 2n 1 8P8 RcI kmM SD 0wrV R1PY x7kEkZ Js J 7Wb 6XI WDE 0U nqtZ PAqE ETS3Eq NN f 38D Ek6 NhX V9 c3se vM32 WACSj3 eN X uq9 GhP OPC hd 7v1T 6gqR inehWk 8w L oaa wHV vbU 49 02yO bCT6 zm2aNf 8x U wPO ilr R3v 8R cNWE k7Ev IAI8ok PA Y xPi UlZ 4mw zs Jo6r uPmY N6tylD Ee e oTm lBK mnV uB B7Hn U7qK n353Sn dt o L82 gDi fcm jL hHx3 gi0a kymhua FT z RnM ibF GU5 W5 x651 0NKi 85u8JT LY c bfO Mn0} \begin{split} & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} v_i + {v}_m\tilde{a}_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} v_i -\psi_t\tilde{a}_{33} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i + \tilde{a}_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}{q}=0 \inon{in $\Omega$} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3126} \end{align} Using the equations~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3210}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3212}, the corresponding pressure $q$ satisfies the elliptic boundary value problem \begin{align}\thelt{N X uq9 GhP OPC hd 7v1T 6gqR inehWk 8w L oaa wHV vbU 49 02yO bCT6 zm2aNf 8x U wPO ilr R3v 8R cNWE k7Ev IAI8ok PA Y xPi UlZ 4mw zs Jo6r uPmY N6tylD Ee e oTm lBK mnV uB B7Hn U7qK n353Sn dt o L82 gDi fcm jL hHx3 gi0a kymhua FT z RnM ibF GU5 W5 x651 0NKi 85u8JT LY c bfO Mn0 auD 0t vNHw SAWz E3HWcY TI d 2Hh XML iGi yk AjHC nRX4 uJJlct Q3 y Loq i9j u7K j8 4EFU 49ud eA93xZ fZ C BW4 bSK pyc f6 nncm vnhK b0HjuK Wp 6 b88 pGC 3U7 km CO1e Y8jv Ebu59z mG Z sZ} \begin{split} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tdb_{ji} \tilde{a}_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}{q}) &= \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tdb_{ji} {v}_i) - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}({v}_m \tilde{a}_{km}) \tdb_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} {v}_i + \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tilde{a}_{33}\psi_t)\tdb_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i \\&\indeq + {v}_m \tilde{a}_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} \tdb_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}{v}_i - \tilde{a}_{33}\psi_t\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \tdb_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} {v}_i +\mathcal{E} \inon{in $\Omega$} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3269} \end{align} with the Neumann type boundary conditions \begin{align}\thelt{3Sn dt o L82 gDi fcm jL hHx3 gi0a kymhua FT z RnM ibF GU5 W5 x651 0NKi 85u8JT LY c bfO Mn0 auD 0t vNHw SAWz E3HWcY TI d 2Hh XML iGi yk AjHC nRX4 uJJlct Q3 y Loq i9j u7K j8 4EFU 49ud eA93xZ fZ C BW4 bSK pyc f6 nncm vnhK b0HjuK Wp 6 b88 pGC 3U7 km CO1e Y8jv Ebu59z mG Z sZh 93N wvJ Yb kEgD pJBj gQeQUH 9k C az6 ZGp cpg rH r79I eQvT Idp35m wW m afR gjD vXS 7a FgmN IWmj vopqUu xF r BYm oa4 5jq kR gTBP PKLg oMLjiw IZ 2 I4F 91C 6x9 ae W7Tq 9CeM 62kef7 MU} \begin{split} & \tdb_{3i}\tilde{a}_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}{q} = 0 \inon{on $\Gamma_0$} \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3240} \end{align} and \begin{align}\thelt{d eA93xZ fZ C BW4 bSK pyc f6 nncm vnhK b0HjuK Wp 6 b88 pGC 3U7 km CO1e Y8jv Ebu59z mG Z sZh 93N wvJ Yb kEgD pJBj gQeQUH 9k C az6 ZGp cpg rH r79I eQvT Idp35m wW m afR gjD vXS 7a FgmN IWmj vopqUu xF r BYm oa4 5jq kR gTBP PKLg oMLjiw IZ 2 I4F 91C 6x9 ae W7Tq 9CeM 62kef7 MU b ovx Wyx gID cL 8Xsz u2pZ TcbjaK 0f K zEy znV 0WF Yx bFOZ JYzB CXtQ4u xU 9 6Tn N0C GBh WE FZr6 0rIg w2f9x0 fW 3 kUB 4AO fct vL 5I0A NOLd w7h8zK 12 S TKy 2Zd ewo XY PZLV Vvtr aCxA} \begin{split} & \tdb_{3i}\tilde{a}_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}{q} = - w_{tt} + \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tdb_{3i} v_i - \frac{1}{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi} \biggl(\sum_{j=1}^{2} v_k \tdb_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} w_{t} + w_{t} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \tdb_{3i} v_{i} - v_k \tdb_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \tdb_{3i} v_i \biggr) \inon{on $\Gamma_1$} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3239} \end{align} Applying the variable divergence $\tilde{b}_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}$ to \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3126} and using the expression for $q$ from \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3269}, we obtain \begin{align}\thelt{N IWmj vopqUu xF r BYm oa4 5jq kR gTBP PKLg oMLjiw IZ 2 I4F 91C 6x9 ae W7Tq 9CeM 62kef7 MU b ovx Wyx gID cL 8Xsz u2pZ TcbjaK 0f K zEy znV 0WF Yx bFOZ JYzB CXtQ4u xU 9 6Tn N0C GBh WE FZr6 0rIg w2f9x0 fW 3 kUB 4AO fct vL 5I0A NOLd w7h8zK 12 S TKy 2Zd ewo XY PZLV Vvtr aCxAJm N7 M rmI arJ tfT dd DWE9 At6m hMPCVN UO O SZY tGk Pvx ps GeRg uDvt WTHMHf 3V y r6W 3xv cpi 0z 2wfw Q1DL 1wHedT qX l yoj GIQ AdE EK v7Ta k7cA ilRfvr lm 8 2Nj Ng9 KDS vN oQiN hng2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} (\tilde{b}_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_{i}) + v_m \tilde{a}_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} (\tilde{b}_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_i) - \psi_{t} \tilde{a}_{33} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} (\tilde{b}_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_i) = -\mathcal{E} . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3225} \end{align} Also, multiplying \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3126} by $\tdb_{3i}$, restricting to $\Gamma_{1}$, then substituting the expression for $\tdb_{3i}\tilde{a}_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}{q}$ from \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3239} into the equation we obtain \begin{align}\thelt{E FZr6 0rIg w2f9x0 fW 3 kUB 4AO fct vL 5I0A NOLd w7h8zK 12 S TKy 2Zd ewo XY PZLV Vvtr aCxAJm N7 M rmI arJ tfT dd DWE9 At6m hMPCVN UO O SZY tGk Pvx ps GeRg uDvt WTHMHf 3V y r6W 3xv cpi 0z 2wfw Q1DL 1wHedT qX l yoj GIQ AdE EK v7Ta k7cA ilRfvr lm 8 2Nj Ng9 KDS vN oQiN hng2 tnBSVw d8 P 4o3 oLq rzP NH ZmkQ Itfj 61TcOQ PJ b lsB Yq3 Nul Nf rCon Z6kZ 2VbZ0p sQ A aUC iMa oRp FW fviT xmey zmc5Qs El 1 PNO Z4x otc iI nwc6 IFbp wsMeXx y8 l J4A 6OV 0qR zr St3P} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} (\tilde{b}_{3i} v_{i}-w_{t}) + \sum_{j=1}^{2} v_k \tilde{a}_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} ( \tilde{b}_{3i} v_{i} - w_{t}) =-\frac{1}{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} ( \tdb_{3k}v_{k}) ( \tilde{b}_{3i} v_{i}-w_{t}) \inon{on $\Gamma_1$} . \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3363} \end{align} Note this equation on $\Gamma_{1}$ is a transport equation on $\mathbb{R}^{2}$ with periodic boundary conditions, satisfied by $\tdb_{3i} v_{i}-w_{t}$. Since $\tdb_{3i} v_{i}-w_{t}=0$ at time $0$, this implies $\tdb_{3i} v_{i}-w_{t}=0$ for all $t$. Indeed, testing the equation $\tdb_{3i} v_{i}-w_{t}$ on $\Gamma_1$ leads to this conclusion. \par Similarly, multiplying \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3126} by $\tdb_{3i}$, restricting to $\Gamma_{0}$, then using the fact that $\tdb=I$ on $\Gamma_{0}$ while $\psi_{t}=0$ and the boundary condition \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3240} for $q$ on $\Gamma_{0}$, we obtain the transport equation \begin{align}\thelt{cpi 0z 2wfw Q1DL 1wHedT qX l yoj GIQ AdE EK v7Ta k7cA ilRfvr lm 8 2Nj Ng9 KDS vN oQiN hng2 tnBSVw d8 P 4o3 oLq rzP NH ZmkQ Itfj 61TcOQ PJ b lsB Yq3 Nul Nf rCon Z6kZ 2VbZ0p sQ A aUC iMa oRp FW fviT xmey zmc5Qs El 1 PNO Z4x otc iI nwc6 IFbp wsMeXx y8 l J4A 6OV 0qR zr St3P MbvR gOS5ob ka F U9p OdM Pdj Fz 1KRX RKDV UjveW3 d9 s hi3 jzK BTq Zk eSXq bzbo WTc5yR RM o BYQ PCa eZ2 3H Wk9x fdxJ YxHYuN MN G Y4X LVZ oPU Qx JAli DHOK ycMAcT pG H Ikt jlI V25 YY} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} v_{3} + v_k \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} v_{3} =0 \inon{on $\Gamma_0$} , \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3357} \end{align} which may be rewritten as \begin{align}\thelt{ iMa oRp FW fviT xmey zmc5Qs El 1 PNO Z4x otc iI nwc6 IFbp wsMeXx y8 l J4A 6OV 0qR zr St3P MbvR gOS5ob ka F U9p OdM Pdj Fz 1KRX RKDV UjveW3 d9 s hi3 jzK BTq Zk eSXq bzbo WTc5yR RM o BYQ PCa eZ2 3H Wk9x fdxJ YxHYuN MN G Y4X LVZ oPU Qx JAli DHOK ycMAcT pG H Ikt jlI V25 YY oRC7 4thS sJClD7 6y x M6B Rhg fS0 UH 4wXV F0x1 M6Ibem sT K SWl sG9 pk9 5k ZSdH U31c 5BpQeF x5 z a7h WPl LjD Yd KH1p OkMo 1Tvhxx z5 F LLu 71D UNe UX tDFC 7CZ2 473sjE Re b aYt 2sE p} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} v_{3} + \sum_{k=1}^{2} v_k \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} v_3 = - (\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_3) v_3 \inon{on $\Gamma_0$} . \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3357} \end{align} Since $v_{3}=0$ at time $0$, we conclude that \begin{equation} v_{3} =0 \inon{on $\Gamma_1$} , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3358} \end{equation} and the boundary conditions satisfied by $v$ are recovered. \par We next recover the divergence condition. Now, use that in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3348} all the integrals over $\Gamma$ vanish and integrating by parts in the remaining two, we get \begin{equation} \mathcal{E} = \frac{1}{|\Omega|}\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \sum_{m=1}^{2} v_m a_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}(\tilde\tda_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i) + \frac{1}{|\Omega|}\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \frac{1}{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_3\psi}(v_3-\psi_t)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}(\tilde b_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i) . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3359} \end{equation} By \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3225} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3359}, the ALE divergence \begin{equation} \mathcal{D} = \tilde b_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_i \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3360} \end{equation} satisfies the PDE \begin{equation} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_t \mathcal{D} + A \cdot \nabla \mathcal{D} = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH B \cdot \nabla \mathcal{D} , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3361} \end{equation} where $A\cdot N=0$ on $\Gamma$ and $A,B \in L^{\infty}([0,T];H^{2.5+\delta})$. Using an $H^{1}$ estimate on $\mathcal{D}$ and employing $\mathcal{D}(0)=0$, we get $\mathcal{D}=0$ recovering the divergence condition \begin{equation} \tilde{b}_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_{i} =0 , \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3362} \end{equation} for all $t \in[0,T]$.\\ \par \emph{Step~4: Regularity of the vorticity with more regular boundary data.} Still under the assumption \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3206}, we apply the variable curl, $\epsilon_{ijk} \tilde b_{mj}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} (.)_k $, to \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3218}, with $k$ replaced by $m$ and $i$ replaced by $k$, and obtain the system \begin{align}\thelt{o BYQ PCa eZ2 3H Wk9x fdxJ YxHYuN MN G Y4X LVZ oPU Qx JAli DHOK ycMAcT pG H Ikt jlI V25 YY oRC7 4thS sJClD7 6y x M6B Rhg fS0 UH 4wXV F0x1 M6Ibem sT K SWl sG9 pk9 5k ZSdH U31c 5BpQeF x5 z a7h WPl LjD Yd KH1p OkMo 1Tvhxx z5 F LLu 71D UNe UX tDFC 7CZ2 473sjE Re b aYt 2sE pV9 wD J8RG UqQm boXwJn HK F Mps XBv AsX 8N YRZM wmZQ ctltsq of i 8wx n6I W8j c6 8ANB wz8f 4gWowk mZ P Wlw fKp M1f pd o0yT RIKH MDgTl3 BU B Wr6 vHU zFZ bq xnwK kdmJ 3lXzIw kw 7 Jku } \begin{split} & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} \zeta_i + v_1 \tilde{a}_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \zeta_i + v_2 \tilde{a}_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \zeta_i + (v_3-\psi_t) \tilde{a}_{33} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \zeta_i - \zeta_1 \tilde{a}_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_i - \zeta_2 \tilde{a}_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_i - \zeta_3 \tilde{a}_{33} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i =0 , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3226} \end{align} for $i=1,2,3$, in $\Omega$, for $i=1,2,3$, where the ALE vorticity $\zeta$ is given by \begin{equation} \zeta_{i} = \epsilon_{ijk} \tilde a_{mj}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} v_{k} \comma i=1,2,3 \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3227} \end{equation} and where $\tilde{a}$ is defined as before in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3189}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3190}, depending on given functions $(w,w_{t})$, and such that \begin{align}\thelt{F x5 z a7h WPl LjD Yd KH1p OkMo 1Tvhxx z5 F LLu 71D UNe UX tDFC 7CZ2 473sjE Re b aYt 2sE pV9 wD J8RG UqQm boXwJn HK F Mps XBv AsX 8N YRZM wmZQ ctltsq of i 8wx n6I W8j c6 8ANB wz8f 4gWowk mZ P Wlw fKp M1f pd o0yT RIKH MDgTl3 BU B Wr6 vHU zFZ bq xnwK kdmJ 3lXzIw kw 7 Jku JcC kgv FZ 3lSo 0ljV Ku9Syb y4 6 zDj M6R XZI DP pHqE fkHt 9SVnVt Wd y YNw dmM m7S Pw mqhO 6FX8 tzwYaM vj z pBS NJ1 z36 89 00v2 i4y2 wQjZhw wF U jq0 UNm k8J 8d OOG3 QlDz p8AWpr uu 4} \begin{split} & \tilde{b}_{3i} v_{i} -w_{t} =0 \inon{on $\Gamma_0 \cup \Gamma_1$} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3228} \end{align} Based on~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3226}, we claim that \begin{align}\thelt{4gWowk mZ P Wlw fKp M1f pd o0yT RIKH MDgTl3 BU B Wr6 vHU zFZ bq xnwK kdmJ 3lXzIw kw 7 Jku JcC kgv FZ 3lSo 0ljV Ku9Syb y4 6 zDj M6R XZI DP pHqE fkHt 9SVnVt Wd y YNw dmM m7S Pw mqhO 6FX8 tzwYaM vj z pBS NJ1 z36 89 00v2 i4y2 wQjZhw wF U jq0 UNm k8J 8d OOG3 QlDz p8AWpr uu 4 D9V Rlp VVz QQ g1ca Eqev P0sFPH cw t KI3 Z6n Y79 iQ abga 0i9m RVGbvl TA g V6P UV8 Eup PQ 6xvG bcn7 dQjV7C kw 5 7NP WUy 9Xn wF 9ele bZ8U YJDx3x CB Y CId PCE 2D8 eP 90u4 9NY9 Jxx9RI} \Vert \zeta(t) \Vert_{H^{1.5+ \delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \zeta_{0} \Vert_{H^{1.5+ \delta}} + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert w \Vert_{H^{4+\delta}(\Gamma_{1})}, \Vert w_{t} \Vert_{H^{2+\delta}(\Gamma_{1})} ) \,ds , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3229} \end{align} where $P$ always denotes a generic polynomial. Note that \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3226} is a transport equation of the form $\zeta_{t} + A \nabla \zeta + B \zeta =0$ such that $A\in L^{\infty}([0,T];H^{2.5 +\delta}(\Omega))$ and $B\in L^{\infty}([0,T];H^{1.5 +\delta}(\Omega))$ with $ A\cdot N|_{\Gamma_0\cup \Gamma_1}=0$. The regularity assumptions hold since $\tilde{a} \in L^{\infty}([0,T];H^{3.5 +\delta}(\Omega))$, $\psi_{t} \in L^{\infty}([0,T];H^{2.5 +\delta}(\Omega))$, and $v \in L^{\infty}([0,T];H^{2.5 +\delta}(\Omega))$. On the other hand, the boundary condition $A\cdot N|_{\Gamma_0\cup \Gamma_1}=0$ is satisfied by~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3228}. The norms of $\tilde{a}$ and $\tilde{b}$ are estimated using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3199} in terms of $\psi$, which in turn depends on the boundary data $w$. This concludes the proof of~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3229}. \par Now, we use \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3204} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3229}, as well as $\Vert v\Vert_{L^2}\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert v_0\Vert_{L^2}+\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t}\Vert v_t\Vert_{L^2}\,ds$, to estimate \begin{equation} \Vert v\Vert_{H^{2.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert v_{0}\Vert_{H^{2.5+\delta}} + \Vert w_t\Vert_{H^{2+\delta}(\Gamma_1)} +\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert w \Vert_{H^{4+\delta}(\Gamma_{1})}, \Vert w_{t} \Vert_{H^{2+\delta}(\Gamma_{1})} ) \,ds . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3230} \end{equation} Applying the Gronwall inequality on \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3230}, we consequently obtain \begin{equation} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3231} \Vert v\Vert_{H^{2.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert v_{0}\Vert_{H^{2.5+\delta}} + \Vert w_t\Vert_{H^{2+\delta}(\Gamma_1)} +\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} P( \Vert w \Vert_{H^{4+\delta}(\Gamma_{1})}, \Vert w_{t} \Vert_{H^{2+\delta}(\Gamma_{1})} ) \,ds , \end{equation} for small times $t\in (0,T]$. This inequality provides a bound on $v$ in terms of lower norms (see~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3195}) of the boundary data, under the assumption of higher regularity~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3206}. In the statement above and in the rest of the paper, we continue to use the convention that the domain in norms is $\Omega$ unless otherwise indicated.\\ \par \emph{Step~5: Solution to the nonlinear problem with less regular boundary data.} Now, assume only~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3195}. We approximate $(w,w_{t}, w_{tt}) \in L^{\infty}([0,T];H^{4+\delta} \times H^{2+\delta} \times H^{\delta})$ by a sequence of more regular data $(w^{(m)},w^{(m)}_{t}, w^{(m)}_{tt}) \in L^{\infty}([0,T];H^{6+\delta} \times H^{4+\delta} \times H^{2+\delta})$. From Step~2, we can find a sequence of solutions $v^{(m)} \in L^{\infty}([0,T];H^{2.5+\delta}(\Omega))$ and $q^{(m)} \in L^{\infty}([0,T];H^{3.5+\delta}(\Omega))$ with $q^{(m)}$ determined up to a constant employing the given boundary data $(w^{(m)},w^{(m)}_{t}, w^{(m)}_{tt})$. Using the estimate \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3231}, we have a uniform bound on the sequence $v^{(m)}$. Therefore, we may extract a subsequence $v^{(m_{j})}$ which converges weak-* to some $v \in L^{\infty}([0,T];H^{2.5+\delta}(\Omega))$. Moreover, we may also obtain a uniform bound on $\nabla q^{(m)}$ in $L^{\infty}([0,T];H^{0.5+\delta}(\Omega))$ in terms of the data $(w, w_{t}, w_{tt})$ by considering the elliptic problem \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3210} with the Neumann boundary conditions \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3211}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3212}, from which one can derive the estimate \begin{align}\thelt{6FX8 tzwYaM vj z pBS NJ1 z36 89 00v2 i4y2 wQjZhw wF U jq0 UNm k8J 8d OOG3 QlDz p8AWpr uu 4 D9V Rlp VVz QQ g1ca Eqev P0sFPH cw t KI3 Z6n Y79 iQ abga 0i9m RVGbvl TA g V6P UV8 Eup PQ 6xvG bcn7 dQjV7C kw 5 7NP WUy 9Xn wF 9ele bZ8U YJDx3x CB Y CId PCE 2D8 eP 90u4 9NY9 Jxx9RI 4F e a0Q Cjs 5TL od JFph ykcz Bwoe97 Po h Tql 1LM s37 cK hsHO 5jZx qpkHtL bF D nvf Txj iyk LV hpwM qobq DM9A0f 1n 4 i5S Bc6 trq VX wgQB EgH8 lISLPL O5 2 EUv i1m yxk nL 0RBe bO2Y W} \begin{split} \Vert \nabla q^{(m)} \Vert_{H^{0.5+\delta}} \leq P( \Vert w_{tt} \Vert_{H^{\delta}(\Gamma_{1})}, \Vert \tilde{a}\Vert_{H^{3.5+\delta}}, \Vert \tdb\Vert_{H^{3.5+\delta}}, \Vert \tdb_t\Vert_{H^{1.5+\delta}}, \Vert \psi_t\Vert_{H^{2.5+\delta}}, \Vert \tilde{v}\Vert_{H^{2.5+\delta}} ) . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3232} \end{align} In addition, we may adjust the pressure $q^{(m)}$ by an appropriate constant so that $\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_{1}} q^{(m)}=0$, which then in turn implies \begin{align}\thelt{6xvG bcn7 dQjV7C kw 5 7NP WUy 9Xn wF 9ele bZ8U YJDx3x CB Y CId PCE 2D8 eP 90u4 9NY9 Jxx9RI 4F e a0Q Cjs 5TL od JFph ykcz Bwoe97 Po h Tql 1LM s37 cK hsHO 5jZx qpkHtL bF D nvf Txj iyk LV hpwM qobq DM9A0f 1n 4 i5S Bc6 trq VX wgQB EgH8 lISLPL O5 2 EUv i1m yxk nL 0RBe bO2Y Ww8Jhf o1 l HlU Mie sst dW w4aS WrYv Osn5Wn 3w f wzH RHx Fg0 hK FuNV hjzX bg56HJ 9V t Uwa lOX fT8 oi FY1C sUCg CETCIv LR 0 AgT hCs 9Ta Zl 6ver 8hRt edkAUr kI n Sbc I8n yEj Zs VOSz t} \begin{split} \Vert q^{(m)} \Vert_{H^{1.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \nabla q^{(m)} \Vert_{H^{0.5+\delta}} . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3233} \end{align} It then follows that we have a uniform bound on $q^{(m)}$ in $L^{\infty}([0,T];H^{1.5+\delta}(\Omega))$, and we can thus extract a further weak-* convergent subsequence with a limit $q \in L^{\infty}([0,T];H^{1.5+\delta}(\Omega))$. Consequently, the corresponding sequence of time derivatives $v_{t}^{(m)}$ is uniformly bounded in $L^{\infty}([0,T];H^{0.5+\delta}(\Omega))$, which can be directly deduced from the equation. Using a standard Aubin-Lions compactness argument, we may pass to the limit in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3188} and boundary conditions \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3193} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3194} satisfied by $v^{(m)}$ and $q^{(m)}$ as $m \to \infty$ to obtain a solution $v \in L^{\infty}([0,T];H^{2.5+\delta}(\Omega))$ and $q \in L^{\infty}([0,T];H^{0.5+\delta}(\Omega))$ satisfying the equations \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3188} and the boundary conditions \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3193}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3194}, given $(w,w_{t}, w_{tt}) \in L^{\infty}([0,T];H^{4+\delta} \times H^{2+\delta} \times H^{\delta})$. Note that the resulting pressure $q$ is periodic in $x_{1}, x_{2}$ and has zero average on~$\Gamma_{1}$. \end{proof} \par \subsection{The plate equation} \label{sec20} \par Next, we provide the existence theorem for the plate equation. \par \cole \begin{Lemma} \label{L05} Consider the damped plate equation \begin{align}\thelt{k LV hpwM qobq DM9A0f 1n 4 i5S Bc6 trq VX wgQB EgH8 lISLPL O5 2 EUv i1m yxk nL 0RBe bO2Y Ww8Jhf o1 l HlU Mie sst dW w4aS WrYv Osn5Wn 3w f wzH RHx Fg0 hK FuNV hjzX bg56HJ 9V t Uwa lOX fT8 oi FY1C sUCg CETCIv LR 0 AgT hCs 9Ta Zl 6ver 8hRt edkAUr kI n Sbc I8n yEj Zs VOSz tBbh 7WjBgf aA F t4J 6CT UCU 54 3rba vpOM yelWYW hV B RGo w5J Rh2 nM fUco BkBX UQ7UlO 5r Y fHD Mce Wou 3R oFWt baKh 70oHBZ n7 u nRp Rh3 SIp p0 Btqk 5vhX CU9BHJ Fx 7 qPx B55 a7R kO y} \begin{split} w_{tt} +\Delta_2^{2}w - \nu \Delta_{2} w_{t} &= d \inon{on $\Gamma_{1} \times[0,T]$} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3234} \end{align} where $\nu >0$, defined on the domain $\Gamma_{1}= [0,1] \times [0,1] \subseteq \mathbb{R}^{2}$ with periodic boundary conditions. Given the initial data $w(0, \cdot)= w_{0} \in H^{4+\delta}(\Gamma_{1})$ and $w_{t}(0,\cdot)= w_{1} \in H^{2+\delta}(\Gamma_{1})$ such that \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3321} holds and the forcing term $d \in L^{2}([0,T]; H^{1+\delta}(\Omega))$ with $\delta >0$, there exists a unique solution $w \in L^{\infty}([0,T]; H^{4+\delta}(\Gamma_{1}))$ such that $w_{t} \in L^{\infty}([0,T]; H^{2+\delta}(\Gamma_{1})) \cap L^{2}([0,T]; H^{3+\delta}(\Gamma_{1}))$ and $w_{tt} \in L^{2}([0,T]; H^{\delta}(\Gamma_{1}))$ with \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n331} for all $t\in [0,T]$. Moreover, we have the estimate \begin{align}\thelt{OX fT8 oi FY1C sUCg CETCIv LR 0 AgT hCs 9Ta Zl 6ver 8hRt edkAUr kI n Sbc I8n yEj Zs VOSz tBbh 7WjBgf aA F t4J 6CT UCU 54 3rba vpOM yelWYW hV B RGo w5J Rh2 nM fUco BkBX UQ7UlO 5r Y fHD Mce Wou 3R oFWt baKh 70oHBZ n7 u nRp Rh3 SIp p0 Btqk 5vhX CU9BHJ Fx 7 qPx B55 a7R kO yHmS h5vw rDqt0n F7 t oPJ UGq HfY 5u At5k QLP6 ppnRjM Hk 3 HGq Z0O Bug FF xSnA SHBI 7agVfq wf g aAl eH9 DMn XQ QTAA QM8q z9trz8 6V R 2gO MMV uMg f6 tGLZ WEKq vkMEOg Uz M xgN 4Cb Q8f} \begin{split} &\Vert w \Vert_{ L^{\infty}([0,T]; H^{4+\delta}(\Gamma_{1}))} + \Vert w_{t} \Vert_{ L^{\infty}([0,T]; H^{2+\delta}(\Gamma_{1}))} +\Vert w_{tt} \Vert_{ L^{\infty}([0,T]; H^{\delta}(\Gamma_{1}))} + \nu\Vert w \Vert_{ L^{2}([0,T]; H^{3+\delta}(\Gamma_{1}))} \\&\indeq \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert w_{0} \Vert_{H^{4+\delta}(\Gamma_1)} +\Vert w_{1} \Vert_{H^{2+\delta}(\Gamma_1)} + \Vert d \Vert_{L^{2}([0,T]; H^{1+\delta}( \Gamma_{1}))} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3235} \end{align} where the constant depends on $\nu$. \end{Lemma} \colb \par \begin{proof}[Proof of Lemma~\ref{L05}] We provide a necessary a~priori estimate for \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3235}. Since the equation is linear, it is straight-forward to justify it using a truncation in the Fourier variables. With $\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI$ as in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n350}, we test \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3234} with $\Gamma^{4+2\delta}w_t$ obtaining \begin{align}\thelt{fHD Mce Wou 3R oFWt baKh 70oHBZ n7 u nRp Rh3 SIp p0 Btqk 5vhX CU9BHJ Fx 7 qPx B55 a7R kO yHmS h5vw rDqt0n F7 t oPJ UGq HfY 5u At5k QLP6 ppnRjM Hk 3 HGq Z0O Bug FF xSnA SHBI 7agVfq wf g aAl eH9 DMn XQ QTAA QM8q z9trz8 6V R 2gO MMV uMg f6 tGLZ WEKq vkMEOg Uz M xgN 4Cb Q8f WY 9Tk7 3Gg9 0jy9dJ bO v ddV Zmq Jjb 5q Q5BS Ffl2 tNPRC8 6t I 0PI dLD UqX KO 1ulg XjPV lfDFkF h4 2 W0j wkk H8d xI kjy6 GDge M9mbTY tU S 4lt yAV uor 6w 7Inw Ch6G G9Km3Y oz b uVq ts} \begin{split} & \frac12 \frac{d}{dt} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{4+\delta}w\Vert_{L^2(\Gamma_1)}^2 + \frac12 \frac{d}{dt} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}w_t\Vert_{L^2(\Gamma_1)}^2 + \nu \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{3+\delta}w_t\Vert_{L^2(\Gamma_1)}^2 \\&\indeq = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} d \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{4+2\delta}w_t \leq \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} d\Vert_{L^2(\Gamma_1)} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{3+\delta}w_t\Vert_{L^2(\Gamma_1)} \leq \frac{\nu}2 \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{3+\delta}w_t\Vert_{L^2(\Gamma_1)}^2 + \frac{1}{2\nu} \Vert d\Vert_{\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}}^2 , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3364} \end{align} and the estimate \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3235} follows upon absorbing the first term on the far-right side into the third term on the far-left side. \end{proof} \par \subsection{Regularized Euler-plate system} \label{sec10} \par We now consider the regularized Euler-plate system consisting of the Euler equations \begin{align}\thelt{wf g aAl eH9 DMn XQ QTAA QM8q z9trz8 6V R 2gO MMV uMg f6 tGLZ WEKq vkMEOg Uz M xgN 4Cb Q8f WY 9Tk7 3Gg9 0jy9dJ bO v ddV Zmq Jjb 5q Q5BS Ffl2 tNPRC8 6t I 0PI dLD UqX KO 1ulg XjPV lfDFkF h4 2 W0j wkk H8d xI kjy6 GDge M9mbTY tU S 4lt yAV uor 6w 7Inw Ch6G G9Km3Y oz b uVq tsX TNZ aq mwkz oKxE 9O0QBQ Xh x N5L qr6 x7S xm vRwT SBGJ Y5uo5w SN G p3h Ccf QNa fX Wjxe AFyC xUfM8c 0k K kwg psv wVe 4t FsGU IzoW FYfnQA UT 9 xcl Tfi mLC JR XFAm He7V bYOaFB Pj j e} \begin{split} & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} v_i + v_1 a_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i + v_2 a_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i + (v_3-\psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v_i + a_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q=0 , \\& a_{ki} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}v_i=0 \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3241} \end{align} in $\Omega\times[0,T]$, with the boundary condition \begin{equation} v_3=0 \inon{on $\Gamma_0$} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3242} \end{equation} on the bottom, and \begin{equation} \tda_{3i}v_i = w_{t} \inon{on $\Gamma_1$} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3243} \end{equation} on the top. The coefficient matrices $a$ and $b$ are defined as in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3189}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3191} in terms of $(w,w_{t})$ solving the regularized damped plate equation \begin{equation} w_{tt} - \nu \Delta_2 w_{t} +\Delta_2^2 w = q \inon{on $\Gamma_{1} \times [0,T]$} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3244} \end{equation} defined on a domain $\Gamma_{1}= [0,1] \times [0,1] \subseteq \mathbb{R}^{2}$ with periodic boundary conditions. The following theorem, which we prove next, establishes the existence of the solution to the above system. \par \cole \begin{Theorem} \label{T05} Let $\nu>0$. Assume that initial data \begin{equation} (v_{0}, w_{0}, w_{1})\in H^{2.5 +\delta} \times H^{4+\delta}(\Gamma_{1}) \times H^{2+\delta}(\Gamma_{1}) , \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3245} \end{equation} where $\delta \geq 0.5$, satisfy the compatibility conditions \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n327}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3321}. Then there exists a unique local-in-time solution $(v,q,w,w_{t} )$ to the system \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3241}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3244} such that \begin{align}\thelt{DFkF h4 2 W0j wkk H8d xI kjy6 GDge M9mbTY tU S 4lt yAV uor 6w 7Inw Ch6G G9Km3Y oz b uVq tsX TNZ aq mwkz oKxE 9O0QBQ Xh x N5L qr6 x7S xm vRwT SBGJ Y5uo5w SN G p3h Ccf QNa fX Wjxe AFyC xUfM8c 0k K kwg psv wVe 4t FsGU IzoW FYfnQA UT 9 xcl Tfi mLC JR XFAm He7V bYOaFB Pj j eF6 xI3 CzO Vv imZ3 2pt5 uveTrh U6 y 8wj wAy IU3 G1 5HMy bdau GckOFn q6 a 5Ha R4D Ooj rN Ajdh SmhO tphQpc 9j X X2u 5rw PHz W0 32fi 2bz1 60Ka4F Dj d 1yV FSM TzS vF 1YkR zdzb YbI0qj K} \begin{split} &v \in L^{\infty}([0,T];H^{2.5+ \delta}(\Omega)) \cap C([0,T];H^{0.5 +\delta}(\Omega)) , \\& v_{t} \in L^{\infty}([0,T];H^{0.5 +\delta}(\Omega)) , \\& q \in L^{\infty}([0,T];H^{1.5 + \delta}(\Omega)) , \\& w \in L^{\infty}([0,T];H^{4+ \delta}(\Gamma_{1})) , \\& w_{t} \in L^{\infty}([0,T];H^{2+ \delta}(\Gamma_{1})) , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3248} \end{align} for some time $T>0$ depending on initial data as well as on $\nu$ and $\epsilon$. \end{Theorem} \colb \par \begin{proof}[Proof of Theorem~\ref{T05}] Given $\nu >0$ and a regularization parameter $\epsilon >0$, we shall construct a solution to the above system using the iteration scheme \begin{align}\thelt{yC xUfM8c 0k K kwg psv wVe 4t FsGU IzoW FYfnQA UT 9 xcl Tfi mLC JR XFAm He7V bYOaFB Pj j eF6 xI3 CzO Vv imZ3 2pt5 uveTrh U6 y 8wj wAy IU3 G1 5HMy bdau GckOFn q6 a 5Ha R4D Ooj rN Ajdh SmhO tphQpc 9j X X2u 5rw PHz W0 32fi 2bz1 60Ka4F Dj d 1yV FSM TzS vF 1YkR zdzb YbI0qj KM N XBF tXo CZd j9 jD5A dSrN BdunlT DI a A4U jYS x6D K1 X16i 3yiQ uq4zoo Hv H qNg T2V kWG BV A4qe o8HH 70FflA qT D BKi 461 GvM gz d7Wr iqtF q24GYc yi f YkW Hv7 EI0 aq 5JKl fNDC NmW} \begin{split} & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} v^{(n+1)}_i + v^{(n+1)}_1 a^{(n)}_{j1} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v^{(n+1)}_i + v^{(n+1)}_2 a^{(n)}_{j2} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v^{(n+1)}_i + (v^{(n+1)}_3-\psi^{(n)}_t) a^{(n)}_{33} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} v^{(n+1)}_i + a^{(n)}_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q^{(n+1)} =0 , \\& a^{(n)}_{ki} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}v^{(n+1)}_i=0 , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3250} \end{align} where $a^{(n)}$ is determined from \begin{align}\thelt{dh SmhO tphQpc 9j X X2u 5rw PHz W0 32fi 2bz1 60Ka4F Dj d 1yV FSM TzS vF 1YkR zdzb YbI0qj KM N XBF tXo CZd j9 jD5A dSrN BdunlT DI a A4U jYS x6D K1 X16i 3yiQ uq4zoo Hv H qNg T2V kWG BV A4qe o8HH 70FflA qT D BKi 461 GvM gz d7Wr iqtF q24GYc yi f YkW Hv7 EI0 aq 5JKl fNDC NmWom3 Vy X JsN t4W P8y Gg AoAT OkVW Z4ODLt kz a 9Pa dGC GQ2 FC H6EQ ppks xFKMWA fY 0 Jda SYg o7h hG wHtt bb4z 5qrcdc 9C n Amx qY6 m8u Gf 7DZQ 6FBU PPiOxg sQ 0 CZl PYP Ba7 5O iV6t ZOB} \begin{split} &\Delta \psi^{(n)}= 0 \inon{on $\Omega$} \\ & \psi^{(n)}(x_1,x_2,1,t)=1+w^{(n)} (x_1,x_2,t) \inon{on $\Gamma_1$} \\ & \psi^{(n)}(x_1,x_2,0,t)=0 \inon{on $\Gamma_0$} \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3251} \end{align} by \begin{align}\thelt{BV A4qe o8HH 70FflA qT D BKi 461 GvM gz d7Wr iqtF q24GYc yi f YkW Hv7 EI0 aq 5JKl fNDC NmWom3 Vy X JsN t4W P8y Gg AoAT OkVW Z4ODLt kz a 9Pa dGC GQ2 FC H6EQ ppks xFKMWA fY 0 Jda SYg o7h hG wHtt bb4z 5qrcdc 9C n Amx qY6 m8u Gf 7DZQ 6FBU PPiOxg sQ 0 CZl PYP Ba7 5O iV6t ZOBp fYuNcb j4 V Upb TKX ZRJ f3 6EA0 LDgA dfdOpS bg 1 ynC PUV oRW xe WQMK Smuh 3JHqX1 5A P JJX 2v0 W6l m0 llC8 hlss 1NLWaN hR B Aqf Iuz kx2 sp 01oD rYsR ywFrNb z1 h Gpq 99F wUz lf cQk} a^{(n)}= \begin{pmatrix} 1 & 0 & 0 \\ 0 & 1& 0\\ -\fractext{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{1} \psi^{(n)}} {\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi^{(n)}} & -\fractext{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{2} \psi^{(n)}} {\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi^{(n)}} & \fractext{1} {\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi^{(n)}} \end{pmatrix} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3252} \end{align} and where $w$ satisfies \begin{align}\thelt{ o7h hG wHtt bb4z 5qrcdc 9C n Amx qY6 m8u Gf 7DZQ 6FBU PPiOxg sQ 0 CZl PYP Ba7 5O iV6t ZOBp fYuNcb j4 V Upb TKX ZRJ f3 6EA0 LDgA dfdOpS bg 1 ynC PUV oRW xe WQMK Smuh 3JHqX1 5A P JJX 2v0 W6l m0 llC8 hlss 1NLWaN hR B Aqf Iuz kx2 sp 01oD rYsR ywFrNb z1 h Gpq 99F wUz lf cQkT sbCv GIIgmf Hh T rM1 ItD gCM zY ttQR jzFx XIgI7F MA p 1kl lwJ sGo dX AT2P goIp 9VonFk wZ V Qif q9C lAQ 4Y BwFR 4nCy RAg84M LJ u nx8 uKT F3F zl GEQt l32y 174wLX Zm 6 2xX 5xG oaC H} & w^{(n+1)}_{tt} +\Delta^{2}w^{(n+1)} - \nu \Delta_{2} w^{(n+1)}_{t} =q^{(n+1)} \inon{on $\Gamma_{1} \times[0,T]$} . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3253} \end{align} With the initial data as in Theorem~\ref{T05}, let \begin{equation} (w^{(n)}, w_{t}^{(n)}, w^{(n)}_{tt}) \in L^{\infty}([0,T]; H^{4+\delta}(\Gamma_{1}) \times H^{2+\delta}(\Gamma_{1}) \times H^{\delta}(\Gamma_{1}) ) \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3254} \end{equation} with $w^{(n)}_{t} \in L^{2}([0,T]; H^{3+\delta}(\Gamma_{1}))$ such that \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3196} and the bound \begin{align}\thelt{X 2v0 W6l m0 llC8 hlss 1NLWaN hR B Aqf Iuz kx2 sp 01oD rYsR ywFrNb z1 h Gpq 99F wUz lf cQkT sbCv GIIgmf Hh T rM1 ItD gCM zY ttQR jzFx XIgI7F MA p 1kl lwJ sGo dX AT2P goIp 9VonFk wZ V Qif q9C lAQ 4Y BwFR 4nCy RAg84M LJ u nx8 uKT F3F zl GEQt l32y 174wLX Zm 6 2xX 5xG oaC Hv gZFE myDI zj3q10 RZ r ssw ByA 2Wl OA DDDQ Vin8 PTFLGm wi 6 pgR ZQ6 A5T Ll mnFV tNiJ bnUkLy vq 9 zSB P6e JJq 7P 6RFa im6K XPWaxm 6W 7 fM8 3uK D6k Nj 7vhg 4ppZ 4ObMaS aP H 0oq xAB } \Vert w^{(n)} \Vert_{ L^{\infty}([0,T]; H^{4+\delta}(\Gamma_{1}))} + \Vert w^{(n)}_{t} \Vert_{ L^{\infty}([0,T]; H^{2+\delta}(\Gamma_{1}))} + \Vert w^{(n)}_{tt} \Vert_{ L^{\infty}([0,T]; H^{\delta}(\Gamma_{1}))} \leq M \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3255} \end{align} holds, where $M=C(\Vert v_{0} \Vert_{H^{2.5+\delta}} + \Vert w_{0} \Vert_{H^{4+\delta}} +\Vert w_{1} \Vert_{H^{2+\delta}(\Gamma_1)})$ with a sufficiently large constant $C\geq1$. We now invoke Theorem~\ref{T04} to obtain $(v^{(n+1)},q^{(n+1)})$. The functions $\psi^{(n)}$ and $a^{(n)}$ are obtained as in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3252}, while $b^{(n)}=\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi^{(n)} a^{(n)}$. The coefficients $a^{(n)}$ and $b^{(n)}$ satisfy \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3197} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3198} for some time $T>0$ only depending on $M$ and the initial data. Hence, Theorem~\ref{T04} guarantees the existence of a solution $(v^{(n+1)}, q^{(n+1)}) \in L^{\infty}([0,T];H^{2.5+ \delta}(\Omega) \times H^{1.5+ \delta}(\Omega))$ for a time $T>0$ depending on $M$ and $v_{0}$. Moreover, from \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3202} we have the estimate \begin{align}\thelt{ V Qif q9C lAQ 4Y BwFR 4nCy RAg84M LJ u nx8 uKT F3F zl GEQt l32y 174wLX Zm 6 2xX 5xG oaC Hv gZFE myDI zj3q10 RZ r ssw ByA 2Wl OA DDDQ Vin8 PTFLGm wi 6 pgR ZQ6 A5T Ll mnFV tNiJ bnUkLy vq 9 zSB P6e JJq 7P 6RFa im6K XPWaxm 6W 7 fM8 3uK D6k Nj 7vhg 4ppZ 4ObMaS aP H 0oq xAB G8v qr qT6Q iRGH BCCN1Z bl T Y4z q8l FqL Ck ghxD UuZw 7MXCD4 ps Z cEX 9Rl Cwf 0C CG8b gFti Uv3mQe LW J oyF kv6 hcS nM mKbi QukL FpYAqo 5F j f9R RRt qS6 XW VoIY VDMl a5c7cW KJ L Uqc} \Vert v^{(n+1)}(t) \Vert_{H^{2.5+\delta}} + \Vert \nabla q^{(n+1)}(t) \Vert_{H^{0.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert v_{0} \Vert_{H^{2.5+\delta}} + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} P( \Vert w^{(n)} \Vert_{H^{4+\delta}(\Gamma_{1})}, \Vert w^{(n)}_{t} \Vert_{H^{2+\delta}(\Gamma_{1})} )\, ds . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3256} \end{align} Invoking Lemma~\ref{L05}, we then solve the plate equation and obtain $(w^{(n+1)}, w_{t}^{(n+1)})$ given $q^{(n+1)}$. We need to adjust the pressure by an appropriate function of time to insure $w_{t}^{(n+1)}$ satisfies the compatibility condition~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3196}. To achieve this, we adjust $q^{(n+1)}$ with an additive function of time such that $\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_{1}} q^{(n+1)} =0$. Since $w^{(n+1)}$ and $w^{(n+1)}_{t}$ are periodic, we have $\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_{1}} \Delta_2^{2} w^{(n+1)}=-\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_{1}} \Delta_{2} w^{(n+1)}_{t} =0$ and hence, by \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3253}, we obtain $ \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_{1}} w^{(n+1)}_{tt} =0 $. From $\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_{1}} w^{(n+1)}_{tt} =0$ and $\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_{1}} w_{1} =0$, we obtain \begin{equation} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_{1}} w^{(n+1)}_{t} =0 . \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3257} \end{equation} Also, from \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3235} we have the estimate \begin{align}\thelt{Ly vq 9 zSB P6e JJq 7P 6RFa im6K XPWaxm 6W 7 fM8 3uK D6k Nj 7vhg 4ppZ 4ObMaS aP H 0oq xAB G8v qr qT6Q iRGH BCCN1Z bl T Y4z q8l FqL Ck ghxD UuZw 7MXCD4 ps Z cEX 9Rl Cwf 0C CG8b gFti Uv3mQe LW J oyF kv6 hcS nM mKbi QukL FpYAqo 5F j f9R RRt qS6 XW VoIY VDMl a5c7cW KJ L Uqc vti IOe VC U7xJ dC5W 5bk3fQ by Z jtU Dme gbg I1 79dl U3u3 cvWoAI ow b EZ0 xP2 FBM Sw azV1 XfzV i97mmy 5s T JK0 hz9 O6p Da Gcty tmHT DYxTUB AL N vQe fRQ uF2 Oy okVs LJwd qgDhTT Je } \begin{split} &\Vert w^{(n+1)} \Vert_{ L^{\infty}([0,T]; H^{4+\delta}(\Gamma_{1}))} + \Vert w^{(n+1)}_{t} \Vert_{ L^{\infty}([0,T]; H^{2+\delta}(\Gamma_{1}))} \\&\indeq \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert w_{0} \Vert_{H^{4+\delta}(\Gamma_1)} +\Vert w_{1} \Vert_{H^{2+\delta}(\Gamma_1)} + \Vert q^{(n+1)} \Vert_{L^{2}([0,T]; H^{1+\delta}( \Gamma_{1}))} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3258} \end{align} Since $\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1}q^{(n+1)}=0$, we have \begin{align}\thelt{ Uv3mQe LW J oyF kv6 hcS nM mKbi QukL FpYAqo 5F j f9R RRt qS6 XW VoIY VDMl a5c7cW KJ L Uqc vti IOe VC U7xJ dC5W 5bk3fQ by Z jtU Dme gbg I1 79dl U3u3 cvWoAI ow b EZ0 xP2 FBM Sw azV1 XfzV i97mmy 5s T JK0 hz9 O6p Da Gcty tmHT DYxTUB AL N vQe fRQ uF2 Oy okVs LJwd qgDhTT Je R 7Cu Pcz NLV j1 HKml 8mwL Fr8Gz6 6n 4 uA9 YTt 9oi JG clm0 EckA 9zkElO B9 J s7G fwh qyg lc 2RQ9 d52a YQvC8A rK 7 aCL mEN PYd 27 XImG C6L9 gOfyL0 5H M tgR 65l BCs WG wFKG BIQi IRBiT} \begin{split} & \Vert q^{(n+1)} \Vert_{L^{2}([0,T]; H^{1+\delta}( \Gamma_{1}))} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \nabla q^{(n+1)} \Vert_{L^{2}([0,T]; H^{0.5+\delta}( \Omega))} , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3259} \end{align} where we used the standard trace inequality. We now estimate the pressure term using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3256}, so that \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3258} becomes \begin{align}\thelt{ XfzV i97mmy 5s T JK0 hz9 O6p Da Gcty tmHT DYxTUB AL N vQe fRQ uF2 Oy okVs LJwd qgDhTT Je R 7Cu Pcz NLV j1 HKml 8mwL Fr8Gz6 6n 4 uA9 YTt 9oi JG clm0 EckA 9zkElO B9 J s7G fwh qyg lc 2RQ9 d52a YQvC8A rK 7 aCL mEN PYd 27 XImG C6L9 gOfyL0 5H M tgR 65l BCs WG wFKG BIQi IRBiT9 5N 7 8wn cbk 7EF ei BRB2 16Si HoHJSk Ng x qup JmZ 1px Eb Wcwi JX5N fiYPGD 6u W sXT P94 uaF VD ZuhJ H2d0 PLOY24 3x M K47 VP6 FTy T3 5zpL xRC6 tN89as 3k u 8eG rdM KWo MI U946 FBjk } \begin{split} & \Vert w^{(n+1)} \Vert_{ L^{\infty}([0,T]; H^{4+\delta}(\Gamma_{1}))}+ \Vert w^{(n+1)}_{t} \Vert_{ L^{\infty}([0,T]; H^{2+\delta}(\Gamma_{1}))} \\&\indeq \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert w_{0} \Vert_{H^{4+\delta}(\Gamma_1)} +\Vert w_{1} \Vert_{H^{2+\delta}(\Gamma_1)} + T^{1/2}\Vert v_{0} \Vert_{H^{2.5+\delta}} + T^{1/2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} P( \Vert w^{(n)} \Vert_{H^{4+\delta}(\Gamma_{1})}, \Vert w^{(n)}_{t} \Vert_{H^{2+\delta}(\Gamma_{1})} )\, ds . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3260} \end{align} From \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3260}, it is standard to obtain \begin{align}\thelt{ 2RQ9 d52a YQvC8A rK 7 aCL mEN PYd 27 XImG C6L9 gOfyL0 5H M tgR 65l BCs WG wFKG BIQi IRBiT9 5N 7 8wn cbk 7EF ei BRB2 16Si HoHJSk Ng x qup JmZ 1px Eb Wcwi JX5N fiYPGD 6u W sXT P94 uaF VD ZuhJ H2d0 PLOY24 3x M K47 VP6 FTy T3 5zpL xRC6 tN89as 3k u 8eG rdM KWo MI U946 FBjk sOTe0U xZ D 4av bTw 5mQ 3R y9Af JFjP gvLFKz 0o l fZd j3O 07E av pWfb M3rB GSyOiu xp I 4o8 2JJ 42X 1G Iux8 QFh3 PhRtY9 vj i SL6 x76 W9y 2Z z3YA SGRM p7kDhr gm a 8fW GG0 qKL sO 5oQr } \Vert w^{(n+1)} \Vert_{ L^{\infty}([0,T]; H^{4+\delta}(\Gamma_{1}))} + \Vert w^{(n+1)}_{t} \Vert_{ L^{\infty}([0,T]; H^{2+\delta}(\Gamma_{1}))} \leq M = C M_0 , \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3261} \end{align} where \begin{equation} M_0= \Vert w_{0} \Vert_{H^{4+\delta}(\Gamma_1)} +\Vert w_{1} \Vert_{H^{2+\delta}(\Gamma_1)} , \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3246} \end{equation} provided $T$ is chosen so that $T\leq 1/P(M_0)$, where $P_0$ is a certain polynomial depending on $P$ in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3260}. This estimate establishes the iteration map, which takes a ball of appropriate size $M$ into itself. \par Now we proceed by obtaining a contraction estimate for the sequence. We denote the differences between two iterates by $W^{(n+1)}= w^{(n+1)} -w^{(n)}$, $A^{(n+1)}= a^{(n+1)} -a^{(n)}$, $B^{(n+1)}= b^{(n+1)} -b^{(n)}$, $V^{(n+1)}= v^{(n+1)} -v^{(n)}$, and $Q^{(n+1)}= q^{(n+1)} -q^{(n)}$. Consider the vorticity formulation of the $(n+1)$-th iterate, which reads \begin{align}\thelt{aF VD ZuhJ H2d0 PLOY24 3x M K47 VP6 FTy T3 5zpL xRC6 tN89as 3k u 8eG rdM KWo MI U946 FBjk sOTe0U xZ D 4av bTw 5mQ 3R y9Af JFjP gvLFKz 0o l fZd j3O 07E av pWfb M3rB GSyOiu xp I 4o8 2JJ 42X 1G Iux8 QFh3 PhRtY9 vj i SL6 x76 W9y 2Z z3YA SGRM p7kDhr gm a 8fW GG0 qKL sO 5oQr 42t1 jP1crM 2f C lRb ETd qra 5l VG1l Kitb XqbdPK ca U V0l v4L alo 8V TXcl aUqh 5GWCzA nR n lNN cmw aF8 Er bwX3 2rji Hleb4g XS j LRO JgG 2yb 8O CAxN 4uy4 RsLQjD 7U 7 enw cYC nZx iK } &\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} \zeta^{(n+1)}_i + v^{(n+1)}_k a^{(n)}_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \zeta^{(n+1)}_i -\psi^{(n)}_t a^{(n)}_{33} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \zeta^{(n+1)}_i - \zeta^{(n+1)}_k a^{(n)}_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v^{(n+1)}_i =0 \inon{in $\Omega$} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3262} \\& \epsilon_{ijk} a^{(n)}_{mj}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} v^{(n+1)}_{k} =\zeta^{(n+1)}_{i} \inon{in $\Omega$} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3263} \\& a^{(n)}_{mj}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} v^{(n+1)}_{j} =0 \inon{in $\Omega$} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3264} \\ & b^{(n)}_{3j} v^{(n+1)}_{j} -\psi^{(n)}_{t}=0 \inon{on $\Gamma_0 \cup \Gamma_1$} . \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3265} \end{align} We now consider the equation satisfied by the difference $Z^{(n+1)}= \zeta^{(n+1)} - \zeta^{(n)}$. Denote the corresponding extension defined on ${\mathbb T}^2\times{\mathbb R}$ by $\Theta^{(n+1)}= \theta^{(n+1)} - \theta^{(n)}$. Then we have \begin{align}\thelt{2JJ 42X 1G Iux8 QFh3 PhRtY9 vj i SL6 x76 W9y 2Z z3YA SGRM p7kDhr gm a 8fW GG0 qKL sO 5oQr 42t1 jP1crM 2f C lRb ETd qra 5l VG1l Kitb XqbdPK ca U V0l v4L alo 8V TXcl aUqh 5GWCzA nR n lNN cmw aF8 Er bwX3 2rji Hleb4g XS j LRO JgG 2yb 8O CAxN 4uy4 RsLQjD 7U 7 enw cYC nZx iK dju7 4vpj BKKjRR l3 6 kXX zvn X2J rD 8aPD UWGs tgb8CT WY n HRs 6y6 JCp 8L x1jz CI1m tG26y5 zr J 1nF hX6 7wC zq F8uZ QIS0 dnYxPe XD y jBz 1aY wzD Xa xaMI ZzJ3 C3QRra hp w 8sW Lxr As} \begin{split} & \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} \Theta^{(n+1)}_i + V^{(n+1)}_k a^{(n)}_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \theta^{(n+1)}_i + v^{(n)}_k A^{(n)}_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \theta^{(n+1)}_i + v^{(n)}_k a^{(n-1)}_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \Theta^{(n+1)}_i \\&\indeq\indeq -\Psi^{(n)}_t a^{(n)}_{33} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \theta^{(n+1)}_i -\psi^{(n-1)}_t A^{(n)}_{33} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \theta^{(n+1)}_i -\psi^{(n-1)}_t a^{(n-1)}_{33} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \Theta^{(n+1)}_i \\&\indeq\indeq - \Theta^{(n+1)}_k a^{(n)}_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v^{(n+1)}_i - \theta^{(n)}_k A^{(n)}_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v^{(n+1)}_i - \theta^{(n)}_k a^{(n-1)}_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} V^{(n+1)}_i =0 \inon{in $\Omega$} . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3266} \end{align} Using the same estimates of Section~\ref{sec08}, we obtain the inequality \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3187}, with the constants depending on $M$, i.e., \begin{align}\thelt{ lNN cmw aF8 Er bwX3 2rji Hleb4g XS j LRO JgG 2yb 8O CAxN 4uy4 RsLQjD 7U 7 enw cYC nZx iK dju7 4vpj BKKjRR l3 6 kXX zvn X2J rD 8aPD UWGs tgb8CT WY n HRs 6y6 JCp 8L x1jz CI1m tG26y5 zr J 1nF hX6 7wC zq F8uZ QIS0 dnYxPe XD y jBz 1aY wzD Xa xaMI ZzJ3 C3QRra hp w 8sW Lxr AsS qZ P5Wv v1QF 7JPAVQ wu W u69 YLw NHU PJ 0wjs 7RSi VaPrEG gx Y aVm Sk3 Yo1 wL n0q0 PVeX rzoCIH 7v x q5z tOm q6m p4 drAp dzhw SOlRPD ps C lr8 FoZ UG7 vD UYhb ScJ6 gJb8Q8 em G 2JG 9} \begin{split} \Vert \Theta^{(n+1)}(t)\Vert_{H^{0.5+\delta}} &\leq P(M) \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \bigl( \Vert V^{(n+1)} \Vert_{H^{1.5+\delta}} + \Vert \Theta^{(n+1)} \Vert_{H^{0.5+\delta}} \\&\indeq\indeq\indeq\indeq\indeq\indeq\indeq\indeq\indeq\indeq + \Vert W^{(n)} \Vert_{H^{3+\delta}(\Gamma_1)} + \Vert W^{(n)}_t\Vert_{H^{1+\delta}(\Gamma_1)} \bigr)\,ds . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3267} \end{align} Choosing $T$ sufficiently small compared to $P(M)$, we may absorb the term containing $\Theta^{(n+1)}$ and obtain \begin{align}\thelt{ zr J 1nF hX6 7wC zq F8uZ QIS0 dnYxPe XD y jBz 1aY wzD Xa xaMI ZzJ3 C3QRra hp w 8sW Lxr AsS qZ P5Wv v1QF 7JPAVQ wu W u69 YLw NHU PJ 0wjs 7RSi VaPrEG gx Y aVm Sk3 Yo1 wL n0q0 PVeX rzoCIH 7v x q5z tOm q6m p4 drAp dzhw SOlRPD ps C lr8 FoZ UG7 vD UYhb ScJ6 gJb8Q8 em G 2JG 9Oj a83 ow Ywjo zLa3 DB500s iG j EHo lPu qe4 p7 T1kQ JmU6 cHnOo2 9o r oOz Ta3 j31 n8 mDL7 CIvC pKZUs0 jV r b7v HIH 7NT tY Y7JK vVdG LhA1ON CW o QW1 fvj mlH 7l SlIm 8T1Q SdUWhT iM P } \begin{split} \Vert \Theta^{(n+1)}\Vert_{L^\infty H^{0.5+\delta}} &\leq T P(M) \bigl( \Vert V^{(n+1)} \Vert_{L^\infty H^{1.5+\delta}} + \Vert W^{(n)} \Vert_{L^\infty H^{3+\delta}} + \Vert W^{(n)}_t\Vert_{L^{\infty}H^{1+\delta}} \bigr) , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3268} \end{align} where the norms of the terms involving $W^{(n)}$ are over $\Gamma_1\times (0,T)$, while others are over $\Omega\times (0,T)$. We next invoke the div-curl estimates on the system \begin{align}\thelt{zoCIH 7v x q5z tOm q6m p4 drAp dzhw SOlRPD ps C lr8 FoZ UG7 vD UYhb ScJ6 gJb8Q8 em G 2JG 9Oj a83 ow Ywjo zLa3 DB500s iG j EHo lPu qe4 p7 T1kQ JmU6 cHnOo2 9o r oOz Ta3 j31 n8 mDL7 CIvC pKZUs0 jV r b7v HIH 7NT tY Y7JK vVdG LhA1ON CW o QW1 fvj mlH 7l SlIm 8T1Q SdUWhT iM P KDZ mm4 V7o fR W1dn lqg0 Ah1QRj dt K ZVz EBN E1e Xi RRSL LQPE SEDeXb iM M Ffx C5F I1z vi yNsY HPsG xfGiIu hD P Di0 OIH uBT TH OCHy CTkA BxuCjg OZ s 965 wfe Fwv fR pNLL T3Ev gKgkO9 } \begin{split} & \epsilon_{ijk} a^{(n)}_{mj}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} V^{(n+1)}_{k} =Z^{(n+1)}_{i} - \epsilon_{ijk} A^{(n)}_{mj}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} v^{(n)}_{k} \inon{in $\Omega\times[0,T]$} \\& a^{(n)}_{mj}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} V^{(n+1)}_{j} = - A^{(n)}_{mj}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m} v^{(n)}_{j} \inon{in $\Omega\times [0,T]$} \\ & b^{(n)}_{3j} V^{(n+1)}_{j} -\Psi^{(n)}_{t} = -B^{(n)}_{3j} v^{(n)}_{j} \inon{on $(\Gamma_0 \cup \Gamma_1) \times [0,T]$} \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3271} \end{align} to obtain, for any $t\in[0,T]$, \begin{align}\thelt{IvC pKZUs0 jV r b7v HIH 7NT tY Y7JK vVdG LhA1ON CW o QW1 fvj mlH 7l SlIm 8T1Q SdUWhT iM P KDZ mm4 V7o fR W1dn lqg0 Ah1QRj dt K ZVz EBN E1e Xi RRSL LQPE SEDeXb iM M Ffx C5F I1z vi yNsY HPsG xfGiIu hD P Di0 OIH uBT TH OCHy CTkA BxuCjg OZ s 965 wfe Fwv fR pNLL T3Ev gKgkO9 jy y vot RRl pDT dn 9H5Z nqwW r4OUkI lx t sk0 RZd ODn so Yid6 ctgw wQrxQk 1S 8 ajp PiZ Jlp 5p IAT1 t482 KxtvQ6 D1 T VzQ 7F3 xoz 6H w2ph WDlC Jg7VcE ix 6 XFI dlO lcN bg ODKp 86tC HV} \begin{split} \Vert V^{(n+1)} \Vert_{H^{1.5+\delta}} &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \Theta^{(n+1)} \Vert_{H^{0.5+\delta}} + \Vert A^{(n)} \Vert_{H^{0.5+\delta}} \Vert \nabla v^{(n)} \Vert_{H^{1.5+\delta}} \\&\indeq + \Vert \Psi_{t}^{(n)} \Vert_{H^{1+\delta}(\Gamma_{1})} + \Vert B^{(n)} \Vert_{H^{1.5+\delta}} \Vert v^{(n)} \Vert_{H^{1.5+\delta}} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3272} \end{align} where we used $\Vert Z^{(n+1)} \Vert_{H^{0.5+\delta}}\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls\Vert \Theta^{(n+1)} \Vert_{H^{0.5+\delta}}$. \colb From \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3272}, we obtain \begin{align}\thelt{NsY HPsG xfGiIu hD P Di0 OIH uBT TH OCHy CTkA BxuCjg OZ s 965 wfe Fwv fR pNLL T3Ev gKgkO9 jy y vot RRl pDT dn 9H5Z nqwW r4OUkI lx t sk0 RZd ODn so Yid6 ctgw wQrxQk 1S 8 ajp PiZ Jlp 5p IAT1 t482 KxtvQ6 D1 T VzQ 7F3 xoz 6H w2ph WDlC Jg7VcE ix 6 XFI dlO lcN bg ODKp 86tC HVGrzE cV n Bk9 9sq 5XG d1 DNFA Negg JYjfBW jA b JSc hyE uVl EN awP0 DWoZ WKuP4I Pt v Zbm nRL 047 2K 3bBQ IH5S pPxtXy 5N J joW ceA 7Fe T7 Iwpi vQdq LaeZE0 Qf i MW1 Koz kdU tR sGH6 ry} \Vert V^{(n+1)} \Vert_{L^\infty H^{1.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \Theta^{(n+1)} \Vert_{L^{\infty}H^{0.5+\delta}} + M\Vert W^{(n)} \Vert_{L^{\infty}H^{1+\delta}(\Gamma_1\times(0,T))} + \Vert W_{t}^{(n)} \Vert_{L^{\infty}H^{1+\delta}(\Gamma_{1}\times(0,T))} . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3273} \end{align} Using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3268} in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3273} and again reducing $T$ if necessary sufficiently small to absorb the term containing $V^{(n+1)}$, we get \begin{align}\thelt{ 5p IAT1 t482 KxtvQ6 D1 T VzQ 7F3 xoz 6H w2ph WDlC Jg7VcE ix 6 XFI dlO lcN bg ODKp 86tC HVGrzE cV n Bk9 9sq 5XG d1 DNFA Negg JYjfBW jA b JSc hyE uVl EN awP0 DWoZ WKuP4I Pt v Zbm nRL 047 2K 3bBQ IH5S pPxtXy 5N J joW ceA 7Fe T7 Iwpi vQdq LaeZE0 Qf i MW1 Koz kdU tR sGH6 ryob MpDbfL t0 Z 2FA XbR 3QQ wu Iizg ZFQ4 Gh4lY5 pt 9 RMT ieq BIk dX I979 BGU2 yYtJSa nO M sDL Wyd CQf ol xJWb bIdb EggZLB Kb F mKX oRM cUy M8 NlGn WyuE RUtbAs 4Z R PHd IWt lbJ Rt Qw} \Vert V^{(n+1)} \Vert_{H^{1.5+\delta}} \leq P(M) \bigl( \Vert W^{(n)} \Vert_{H^{1+\delta}(\Gamma_1)} + \Vert W_{t}^{(n)} \Vert_{H^{1+\delta}(\Gamma_{1})} \bigr) . \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3274} \end{align} We next use the fact that the pressure term $Q^{(n+1)}$ satisfies an elliptic boundary value problem with the Neumann type boundary conditions. In particular, the term $Q^{(n+1)}$ satisfies the equation with the structure of \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3175} with the Neumann boundary condition similar to \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3177} on both $\Gamma_{0}$ and $\Gamma_{1}$ (but not the Robin boundary condition as in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3176}), with the usual extra terms on $\Gamma_1$. Omitting the details, as they are similar to those in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3175}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3178}, we obtain the elliptic estimate \begin{align}\thelt{L 047 2K 3bBQ IH5S pPxtXy 5N J joW ceA 7Fe T7 Iwpi vQdq LaeZE0 Qf i MW1 Koz kdU tR sGH6 ryob MpDbfL t0 Z 2FA XbR 3QQ wu Iizg ZFQ4 Gh4lY5 pt 9 RMT ieq BIk dX I979 BGU2 yYtJSa nO M sDL Wyd CQf ol xJWb bIdb EggZLB Kb F mKX oRM cUy M8 NlGn WyuE RUtbAs 4Z R PHd IWt lbJ Rt Qwod dmlZ hI3I8A 9K 8 Syf lGz cVj Cq GkZn aZrx HNxIcM ae G QdX XxG HFi 6A eYBA lo4Q 9HZIjJ jt O hl4 VLm Vvc ph mMES M8lt xHQQUH jJ h Yyf 5Nd c0i 8m HOTN S7yx 5hNrJC yJ 1 ZFj 4Qe Iom } \begin{split} \Vert \nabla Q^{(n+1)} \Vert_{H^{\delta -0.5}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert V^{(n+1)} \Vert_{H^{1.5+\delta}} + \Vert W_{tt}^{(n)}\Vert_{H^{\delta}(\Gamma_1)} + \Vert W^{(n)}_t\Vert_{H^{1+\delta }(\Gamma_1)} + \Vert W^{(n)}\Vert_{H^{3+\delta }(\Gamma_1)} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3275} \end{align} where the constant depends on~$M$. Therefore, using the fundamental theorem of calculus, we have \begin{align}\thelt{DL Wyd CQf ol xJWb bIdb EggZLB Kb F mKX oRM cUy M8 NlGn WyuE RUtbAs 4Z R PHd IWt lbJ Rt Qwod dmlZ hI3I8A 9K 8 Syf lGz cVj Cq GkZn aZrx HNxIcM ae G QdX XxG HFi 6A eYBA lo4Q 9HZIjJ jt O hl4 VLm Vvc ph mMES M8lt xHQQUH jJ h Yyf 5Nd c0i 8m HOTN S7yx 5hNrJC yJ 1 ZFj 4Qe Iom 7w czw9 8Bn6 SxxoqP tn X p4F yiE b2M Cy j2AH aB8F ejdIRh qQ V fR8 rEt z0m q5 4IZt bSlX dBmEvC uv A f5b YxZ 3LE sJ YEX8 eNmo tV2IHl hJ E 70c s45 KVw JR 1riF MPEs P3srHa 8p q wVN AHu} \begin{split} \Vert Q^{(n+1)}\Vert_{2} &= \left\Vert Q^{(n+1)} - \frac{1}{|\Gamma_1|}\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} Q^{(n+1)}\right\Vert_{L^2} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}Q^{(n+1)}\Vert_{L^2} . \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3276} \end{align} Combining \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3275} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3276}, we get \begin{align}\thelt{t O hl4 VLm Vvc ph mMES M8lt xHQQUH jJ h Yyf 5Nd c0i 8m HOTN S7yx 5hNrJC yJ 1 ZFj 4Qe Iom 7w czw9 8Bn6 SxxoqP tn X p4F yiE b2M Cy j2AH aB8F ejdIRh qQ V fR8 rEt z0m q5 4IZt bSlX dBmEvC uv A f5b YxZ 3LE sJ YEX8 eNmo tV2IHl hJ E 70c s45 KVw JR 1riF MPEs P3srHa 8p q wVN AHu soh YI rkNw ekfR bDVLm2 ax u 6ca KkT Xrg Bg nQhU A1z8 X6Mtqv ks U fAF VLg Tmq Pn trgI ggjf JfMGfC uB y BS7 njW fYR Nh pHsj FCzM 4f6cRD gj P Zkb SUH QBn zQ wEnS 9CxS fn00xm Af w lT} \begin{split} \Vert Q^{(n+1)} \Vert_{H^{\delta +0.5}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert V^{(n+1)} \Vert_{H^{1.5+\delta}} + \Vert W_{tt}^{(n)}\Vert_{H^{\delta}(\Gamma_1)} + \Vert W^{(n)}_t\Vert_{H^{1+\delta }(\Gamma_1)} + \Vert W^{(n)}\Vert_{H^{3+\delta }(\Gamma_1)}. \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3277} \end{align} The energy estimate for the plate equation yields \begin{align}\thelt{EvC uv A f5b YxZ 3LE sJ YEX8 eNmo tV2IHl hJ E 70c s45 KVw JR 1riF MPEs P3srHa 8p q wVN AHu soh YI rkNw ekfR bDVLm2 ax u 6ca KkT Xrg Bg nQhU A1z8 X6Mtqv ks U fAF VLg Tmq Pn trgI ggjf JfMGfC uB y BS7 njW fYR Nh pHsj FCzM 4f6cRD gj P Zkb SUH QBn zQ wEnS 9CxS fn00xm Af w lTv 4HI ZIZ Ay XIs4 hPOP jQ3v93 iT L 0Jt NJ8 baB BW cY18 vifU iGKvSQ 4g E kZ1 0yS 5lX Cw I4oX 2gPB isFp7T jK u pgV n5o i4u xK t2QP 4kbr ChS5Zn uW X Wep 0mO jW1 r2 IaXv Hle8 ksF2XQ 52} \begin{split} & \Vert W^{(n+1)}(t) \Vert^{2}_{H^{3+\delta}(\Gamma_1)} + \Vert W_{t}^{(n+1)}(t) \Vert^{2}_{H^{1+\delta}(\Gamma_1)} + \Vert W_{tt}^{(n+1)}(t) \Vert^{2}_{H^{\delta}(\Gamma_1)} + \nu \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \Vert W_{t}^{(n+1)}(s) \Vert^{2}_{H^{2.5+\delta}(\Gamma_1)}\,ds \\&\indeq \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \Vert Q^{(n+1)}(s) \Vert^{2}_{H^{\delta}} \, ds \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \Vert Q^{(n+1)}(s) \Vert^{2}_{H^{\delta+0.5}} \, ds \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls T \Vert Q^{(n+1)} \Vert^{2}_{L^{\infty}([0,T];H^{\delta+0.5}(\Omega)) } , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3278} \end{align} for $t\in[0,T]$, Using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3277} in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3278}, we get \begin{align}\thelt{f JfMGfC uB y BS7 njW fYR Nh pHsj FCzM 4f6cRD gj P Zkb SUH QBn zQ wEnS 9CxS fn00xm Af w lTv 4HI ZIZ Ay XIs4 hPOP jQ3v93 iT L 0Jt NJ8 baB BW cY18 vifU iGKvSQ 4g E kZ1 0yS 5lX Cw I4oX 2gPB isFp7T jK u pgV n5o i4u xK t2QP 4kbr ChS5Zn uW X Wep 0mO jW1 r2 IaXv Hle8 ksF2XQ 52 9 gTL s3u vAO f6 4HOV Iqrb LoG5I2 n0 X skv cKY FIV 8y P9tf MEVP R7F0ip Da q wgQ xro 5Et IW r3tE aSs5 CjzfRR AL g vmy MhI ztV Kj StP7 44RC 0TTPQp n8 g LVt zpL zEQ e2 Rck9 WuM7 XHGA} \begin{split} & \Vert W^{(n+1)}(t) \Vert^{2}_{L^{\infty}H^{3+\delta}(\Gamma_1)\times(0,T)} + \Vert W_{t}^{(n+1)}(t) \Vert^{2}_{L^{\infty}H^{1+\delta}(\Gamma_1\times(0,T))} \\&\indeq\indeq + \Vert W_{tt}^{(n+1)}(t) \Vert^{2}_{L^{\infty}H^{\delta}(\Gamma_1\times(0,T))} + \nu \Vert W_{t}^{(n+1)} \Vert^{2}_{L^{2}H^{2.5+\delta}(\Gamma_1\times(0,T))} \\&\indeq \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls T \Vert V^{(n+1)}(t) \Vert^{2}_{L^{\infty}H^{1.5+\delta}(\Omega\times(0,T))} + T \Vert W^{(n)}(t) \Vert^{2}_{L^{\infty}H^{3+\delta}(\Gamma_1)\times(0,T)} + T\Vert W_{t}^{(n)}(t) \Vert^{2}_{L^{\infty}H^{1+\delta}(\Gamma_1\times(0,T))} \\&\indeq\indeq + T\Vert W_{tt}^{(n)}(t) \Vert^{2}_{L^{\infty}H^{\delta}(\Gamma_1\times(0,T))} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3280} \end{align} which then, choosing $T$ sufficiently small showing the contractivity property for the plate component. On the other hand, using \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3280}, with $n_1$ replaced by $n$ in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3274} and choosing $T$ sufficiently small compared to $P(M)$, we get that the velocity component is contractive too. Hence, there exists a unique solution $(w, w_{t}) \in L^{\infty}([0,T];H^{4+ \delta}(\Gamma_{1})\times H^{2+ \delta}(\Gamma_{1}) )$ satisfying \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3241}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3244}. \par To obtain the contractive property, denote \begin{align}\thelt{X 2gPB isFp7T jK u pgV n5o i4u xK t2QP 4kbr ChS5Zn uW X Wep 0mO jW1 r2 IaXv Hle8 ksF2XQ 52 9 gTL s3u vAO f6 4HOV Iqrb LoG5I2 n0 X skv cKY FIV 8y P9tf MEVP R7F0ip Da q wgQ xro 5Et IW r3tE aSs5 CjzfRR AL g vmy MhI ztV Kj StP7 44RC 0TTPQp n8 g LVt zpL zEQ e2 Rck9 WuM7 XHGA7O 7K G wfm ZHL hJR NU DEQe Brqf KIt0Y4 RW 4 9GK EHY ptg LH 4F8r ZfYC vcf1pO yj k 8iT ES0 ujR vF pipc wIvL DgikPu qq k 9RE dH9 YjR UM kr9b yFJK LBex0S gD J 2gB IeC X2C UZ yyRt GNY3} \begin{split} \alpha_n = \Vert W^{(n)}(t) \Vert^{2}_{L^{\infty}H^{3+\delta}(\Gamma_1)\times(0,T)} + \Vert W_{t}^{(n)}(t) \Vert^{2}_{L^{\infty}H^{1+\delta}(\Gamma_1\times(0,T))} + \Vert W_{tt}^{(n)}(t) \Vert^{2}_{L^{\infty}H^{\delta}(\Gamma_1\times(0,T))} \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3247} \end{align} and \begin{align}\thelt{W r3tE aSs5 CjzfRR AL g vmy MhI ztV Kj StP7 44RC 0TTPQp n8 g LVt zpL zEQ e2 Rck9 WuM7 XHGA7O 7K G wfm ZHL hJR NU DEQe Brqf KIt0Y4 RW 4 9GK EHY ptg LH 4F8r ZfYC vcf1pO yj k 8iT ES0 ujR vF pipc wIvL DgikPu qq k 9RE dH9 YjR UM kr9b yFJK LBex0S gD J 2gB IeC X2C UZ yyRt GNY3 eGOaDp 3m w QyV 1Aj tGL gS C1dD pQCB cocMSM 4j q bSW bvx 6aS nu MtD0 5qpw NDlW0t Z1 c bjz wU5 bUd CG AghC w0nI CDFKHR kp h btA 6nY ld6 c5 TSkD q3Qx o2jhDx Qb m b8n Pq3 zNZ QF JJyu} \begin{split} \beta_n = \Vert V^{(n)}(t) \Vert^{2}_{L^{\infty}H^{3+\delta}(\Omega)\times(0,T)} . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3322} \end{align} The inequality \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3280} may then be written as \begin{align}\thelt{ujR vF pipc wIvL DgikPu qq k 9RE dH9 YjR UM kr9b yFJK LBex0S gD J 2gB IeC X2C UZ yyRt GNY3 eGOaDp 3m w QyV 1Aj tGL gS C1dD pQCB cocMSM 4j q bSW bvx 6aS nu MtD0 5qpw NDlW0t Z1 c bjz wU5 bUd CG AghC w0nI CDFKHR kp h btA 6nY ld6 c5 TSkD q3Qx o2jhDx Qb m b8n Pq3 zNZ QF JJyu Vm1C 6rzRDC B1 m eQy 4Tt Yr5 jQ VWoO fbrY Q6qakZ ep H b2b 5w4 KN3 mE HtQK AXsI ycbaky ID 9 O8Y CmR lEW 7f GISs 6xaz bM6PSB N2 B jtb 65z z2N uY o4kU lpIq JVBC4D zu Z ZN6 Zkz 0oo mm} \begin{split} \alpha_{n+1} \leq C_0 T (\alpha_n + \beta_{n+1}) , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3323} \end{align} while \begin{align}\thelt{ wU5 bUd CG AghC w0nI CDFKHR kp h btA 6nY ld6 c5 TSkD q3Qx o2jhDx Qb m b8n Pq3 zNZ QF JJyu Vm1C 6rzRDC B1 m eQy 4Tt Yr5 jQ VWoO fbrY Q6qakZ ep H b2b 5w4 KN3 mE HtQK AXsI ycbaky ID 9 O8Y CmR lEW 7f GISs 6xaz bM6PSB N2 B jtb 65z z2N uY o4kU lpIq JVBC4D zu Z ZN6 Zkz 0oo mm nswe bstF mlxkKE QE L 6bs oYz xx0 8I Q5Ma 7Inf dXLQ9j eH S Tmi gtt k4v P7 778H p1o6 7atRbf cr S 2CW zwQ 9j0 Rj r0VL 9vlv kkk6J9 bM 1 Xgi Yla y8Z Eq 39Z5 3jRn Xh5mKP Pa 5 tFw 7E0 n} \begin{split} \beta_{n+1} \leq C_0 \alpha_{n} , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3324} \end{align} where $C_0\geq1$ is a fixed constant. With $\epsilon_0>0$ to be determined, we have \begin{align}\thelt{9 O8Y CmR lEW 7f GISs 6xaz bM6PSB N2 B jtb 65z z2N uY o4kU lpIq JVBC4D zu Z ZN6 Zkz 0oo mm nswe bstF mlxkKE QE L 6bs oYz xx0 8I Q5Ma 7Inf dXLQ9j eH S Tmi gtt k4v P7 778H p1o6 7atRbf cr S 2CW zwQ 9j0 Rj r0VL 9vlv kkk6J9 bM 1 Xgi Yla y8Z Eq 39Z5 3jRn Xh5mKP Pa 5 tFw 7E0 nE7 Cu FIoV lFxg uxB1hq lH e OLd b7R Kfl 0S KJiY ekpv RSYnNF f7 U VOW Bvw pN9 mt gGwh 2NJC Y53IdJ XP p YAZ 1B1 AgS xn 61oQ Vtg7 W7QcPC 42 e cSA 5jG 4K5 H1 tQs6 TNph OKTBId Gk F SGm } \begin{split} \alpha_{n+1} + \epsilon_0 \beta_{n+1} \leq C_0 T (\beta_{n+1}+\alpha_{n}) + C_0 \epsilon_0 \alpha_n . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3325} \end{align} To obtain a contractive property, it is sufficient to require $C_0 T \leq \epsilon_0$ and $C_0 T + C_0 \epsilon_0 \leq 1/2$. Note that it is possible to achieve these two inequalities if $T=\epsilon_0/C_0$ and $(1+C_0)\epsilon_0\leq 1/2$. With the two choices, we obtain $\alpha_{n+1}+\epsilon_0 \beta_{n+1}\leq (1/2)(\alpha_n+\epsilon_0\beta_n)$. Hence, there exists a unique solution $(w, w_{t}) \in L^{\infty}([0,T];H^{4+ \delta}(\Gamma_{1})\times H^{2+ \delta}(\Gamma_{1}) )$ satisfying \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3241}--\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3244}. \colb The regularity and uniqueness of the corresponding pair $(v,q)$ can be deduced from Theorem~\ref{T04}. This establishes Theorem~\ref{T05}. \end{proof} \par \subsection{Applying the a~priori estimates to constructed $\nu>0$ solutions} \label{sec11} Now that we have constructed solutions given $\nu>0$, the uniform bounds from a~priori estimates in Section~\ref{sec06} are used to pass through the limit as $\nu \to 0$. However, the constructed solutions are not sufficiently regular to justify a direct application of the a~priori estimates in Section~\ref{sec02}. Instead, we perform the a~priori estimates on partial difference quotients of solutions. \par \begin{proof}[Proof of Theorem~\ref{T03}] Denote by \begin{equation} D_{h,l}f(x)= \frac{1}{h}(f(x+h e_{l}) - f(x)) \comma x\in\Omega \commaone l=1,2 \commaone h\in\mathbb{R}\backslash\{0\} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3283} \end{equation} the difference quotient of a function $f$ by $h\in\mathbb{R}\backslash \{0\}$ in the direction~$e_l$. We start with the analog of the plate estimate \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n352}, which reads \begin{align}\thelt{f cr S 2CW zwQ 9j0 Rj r0VL 9vlv kkk6J9 bM 1 Xgi Yla y8Z Eq 39Z5 3jRn Xh5mKP Pa 5 tFw 7E0 nE7 Cu FIoV lFxg uxB1hq lH e OLd b7R Kfl 0S KJiY ekpv RSYnNF f7 U VOW Bvw pN9 mt gGwh 2NJC Y53IdJ XP p YAZ 1B1 AgS xn 61oQ Vtg7 W7QcPC 42 e cSA 5jG 4K5 H1 tQs6 TNph OKTBId Gk F SGm V0k zAx av Qzje XGbi Sjg3kY Z5 L xzF 3JN Hkn rm y4sm J70w hEtBeX kS T WEu jcA uS0 Nk Hloa 7wYg Ma5j8g 4g i 7WZ 77D s5M ZZ MtN5 iJEa CfHJ0s D6 z VuX 06B P99 Fg a9Gg YMv6 YFVOBE Ry 3} \begin{split} & \frac12 \frac{d}{dt} \Bigl( \Vert \Delta_2 \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}D_{h,l} w\Vert_{L^2(\Gamma_1)}^2 + \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D_{h,l} w_{t}\Vert_{L^2(\Gamma_1)}^2 \Bigr) + \nu \Vert \nabla_2 \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D_{h,l} w_{t} \Vert_{L^2(\Gamma_1)}^2 \\&\indeq = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D_{h,l}q \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D_{h,l}w_{t} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3284} \end{align} where we assume $h\in\mathbb{R}\backslash \{0\}$ and $l\in\{1,2\}$ throughout. Since $h$ and $l$ are fixed for most of the proof, we denote \begin{equation} D = D_{h,l} . \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3335} \end{equation} The identity \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3284} is obtained by applying $\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D_{h,l}$ to the plate equation \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n322} and testing the resulting equation with $\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D_{h,l}w_{t}$, for $l=1,2$. Integrating \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3284} in time, we obtain \begin{align}\thelt{Y53IdJ XP p YAZ 1B1 AgS xn 61oQ Vtg7 W7QcPC 42 e cSA 5jG 4K5 H1 tQs6 TNph OKTBId Gk F SGm V0k zAx av Qzje XGbi Sjg3kY Z5 L xzF 3JN Hkn rm y4sm J70w hEtBeX kS T WEu jcA uS0 Nk Hloa 7wYg Ma5j8g 4g i 7WZ 77D s5M ZZ MtN5 iJEa CfHJ0s D6 z VuX 06B P99 Fg a9Gg YMv6 YFVOBE Ry 3 Xw2 SBY ZDx ix xWHr rlxj KA3fok Ph 9 Y75 8fG XEh gb Bw82 C4JC StUeoz Jf I uGj Ppw p7U xC E5ah G5EG JF3nRL M8 C Qc0 0Tc mXI SI yZNJ WKMI zkF5u1 nv D 8GW YqB t2l Nx dvzb Xj00 EEpUTc} \begin{split} & \frac12 \Vert \Delta_2 \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D w\Vert_{L^2(\Gamma_1)}^2 + \frac12 \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D w_{t}\Vert_{L^2(\Gamma_1)}^2 + \nu \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t} \Vert \nabla_2 \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} w_{t} \Vert_{L^2(\Gamma_1)}^2 \, ds \\&\indeq = \frac12 \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D w_{t}(0)\Vert_{L^2(\Gamma_1)}^2 \colrr + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{0}^{t}\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D q \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D w_{t}\,d\sigma\,ds \colb . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3285} \end{align} For the tangential estimate for the Euler equations, we start, in analogy with \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3328}, by \begin{align}\thelt{7wYg Ma5j8g 4g i 7WZ 77D s5M ZZ MtN5 iJEa CfHJ0s D6 z VuX 06B P99 Fg a9Gg YMv6 YFVOBE Ry 3 Xw2 SBY ZDx ix xWHr rlxj KA3fok Ph 9 Y75 8fG XEh gb Bw82 C4JC StUeoz Jf I uGj Ppw p7U xC E5ah G5EG JF3nRL M8 C Qc0 0Tc mXI SI yZNJ WKMI zkF5u1 nv D 8GW YqB t2l Nx dvzb Xj00 EEpUTc w3 z vyf ab6 yQo Rj HWRF JzPB uZ61G8 w0 S Abz pNL IVj WH kWfj ylXj 6VZvjs Tw O 3Uz Bos Q7e rX yGsd vcKr YzZGQe AM 1 u1T Nky bHc U7 1Kmp yaht wKEj7O u0 A 7ep b7v 4Fd qS AD7c 02cG v} \begin{split} & \frac12 \frac{d}{dt} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} D v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} D v_i = \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J_t \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} D v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} D v_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} D v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} D v_i + \bar I , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3333} \end{align} where \begin{align}\thelt{E5ah G5EG JF3nRL M8 C Qc0 0Tc mXI SI yZNJ WKMI zkF5u1 nv D 8GW YqB t2l Nx dvzb Xj00 EEpUTc w3 z vyf ab6 yQo Rj HWRF JzPB uZ61G8 w0 S Abz pNL IVj WH kWfj ylXj 6VZvjs Tw O 3Uz Bos Q7e rX yGsd vcKr YzZGQe AM 1 u1T Nky bHc U7 1Kmp yaht wKEj7O u0 A 7ep b7v 4Fd qS AD7c 02cG vsiW44 4p F eh8 Odj wM7 ol sSQo eyZX ota8wX r6 N SG2 sFo GBe l3 PvMo Ggam q3Ykaa tL i dTQ 84L YKF fA F15v lZae TTxvru 2x l M2g FBb V80 UJ Qvke bsTq FRfmCS Ve 3 4YV HOu Kok FX YI2M T} \begin{split} \bar I &= \frac12\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} D v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} D v_i - \frac12\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t} D v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} D v_i \\& = \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^2 (J\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}D v_i) - J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta}D v_i \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{-0.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}D v_i \\&\indeq + \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl( J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2.5+\delta}D v_i - \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI (J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}D v_i) \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{-0.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}D v_i \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert J\Vert_{H^{3}} \Vert v\Vert_{H^{2.5+\delta}} \Vert v_t\Vert_{H^{0.5+\delta}} \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)} ) , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3336} \end{align} recalling that $\delta\geq 0.5$. \par For the second term in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3333}, we use the Euler equations \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n354}$_1$, which leads to \begin{align}\thelt{e rX yGsd vcKr YzZGQe AM 1 u1T Nky bHc U7 1Kmp yaht wKEj7O u0 A 7ep b7v 4Fd qS AD7c 02cG vsiW44 4p F eh8 Odj wM7 ol sSQo eyZX ota8wX r6 N SG2 sFo GBe l3 PvMo Ggam q3Ykaa tL i dTQ 84L YKF fA F15v lZae TTxvru 2x l M2g FBb V80 UJ Qvke bsTq FRfmCS Ve 3 4YV HOu Kok FX YI2M TZj8 BZX0Eu D1 d Imo cM9 3Nj ZP lPHq Ell4 z66IvF 3T O Mb7 xuV RYj lV EBGe PNUg LqSd4O YN e Xud aDQ 6Bj KU rIpc r5n8 QTNztB ho 3 LC3 rc3 0it 5C N2Tm N88X YeTdqT LP l S97 uLM w0N As M} \begin{split} & \frac12 \frac{d}{dt} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} D v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} D v_i \\&\indeq = \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH J_t \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} D v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} D v_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} D(J\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_t v_i) - J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}D (\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}v_i) \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} Dv_i \\&\indeq\indeq - \sum_{m=1}^{2}\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}D(v_m\tda_{jm}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}v_i) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}D v_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} D \bigl( (v_3-\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\eta_3)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v_i \bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}D v_i \\&\indeq\indeq -\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}D(\tda_{ki}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} D v_i \\&\indeq = I_1 + I_2 +I_3 + I_4 + I_5 + \bar I . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3286} \end{align} The first term satisfies \begin{align}\thelt{4L YKF fA F15v lZae TTxvru 2x l M2g FBb V80 UJ Qvke bsTq FRfmCS Ve 3 4YV HOu Kok FX YI2M TZj8 BZX0Eu D1 d Imo cM9 3Nj ZP lPHq Ell4 z66IvF 3T O Mb7 xuV RYj lV EBGe PNUg LqSd4O YN e Xud aDQ 6Bj KU rIpc r5n8 QTNztB ho 3 LC3 rc3 0it 5C N2Tm N88X YeTdqT LP l S97 uLM w0N As MphO uPNi sXNIlW fX B Gc2 hxy kg5 0Q TN75 t5JN wZR3NH 1M n VRZ j2P rUY ve HPEl jGaT Ix4sCF zK B 0qp 3Pl eK6 8p 85w4 4l5z Zl07br v6 1 Kki AuT SA5 dk wYS3 F3YF 3e1xKE JW o AvV OZV bwN} \begin{split} I_1 \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert J_t\Vert_{H^{1.5+\delta}} \Vert v\Vert_{H^{1.5+\delta}} \Vert v\Vert_{H^{2.5+\delta}} . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3287} \end{align} For the next commutator term $I_{2}$, we use the product rule \begin{equation} D_{h,l}(fg)= D_{h,l}f g + \tau_{h,l}f D_{h,l} g , \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3288} \end{equation} where we denote by \begin{equation} \tau_{h,l}g = g(x+ he_{l}) \comma x\in\Omega \comma l=1,2 \commaone h\in\mathbb{R}\backslash\{0\} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3289} \end{equation} the translation operator and abbreviate $\tau=\tau_{h,l}$. We get \begin{align}\thelt{Xud aDQ 6Bj KU rIpc r5n8 QTNztB ho 3 LC3 rc3 0it 5C N2Tm N88X YeTdqT LP l S97 uLM w0N As MphO uPNi sXNIlW fX B Gc2 hxy kg5 0Q TN75 t5JN wZR3NH 1M n VRZ j2P rUY ve HPEl jGaT Ix4sCF zK B 0qp 3Pl eK6 8p 85w4 4l5z Zl07br v6 1 Kki AuT SA5 dk wYS3 F3YF 3e1xKE JW o AvV OZV bwN Yg F7CK bSi9 2R0rlW h2 a khC oEp pr6 O2 PZJD ZN8Z ZD4IhH PT M vSD TgO y1l Z0 Y86n 9aMg kWdeuO Zj O i2F g3z iYa SR Cjlz XdQK bcnb5p KT q rJp 1P6 oGy xc 9vZZ RZeF r5TsSZ zG l 7HW uI} \begin{split} I_{2}=\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} (J D(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_t v_i)) - J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}D (\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}v_i) \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} Dv_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} \bigl(DJ \tau(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_t v_i)\bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} Dv_i . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3290} \end{align} Using commutator estimates, we get \begin{align}\thelt{zK B 0qp 3Pl eK6 8p 85w4 4l5z Zl07br v6 1 Kki AuT SA5 dk wYS3 F3YF 3e1xKE JW o AvV OZV bwN Yg F7CK bSi9 2R0rlW h2 a khC oEp pr6 O2 PZJD ZN8Z ZD4IhH PT M vSD TgO y1l Z0 Y86n 9aMg kWdeuO Zj O i2F g3z iYa SR Cjlz XdQK bcnb5p KT q rJp 1P6 oGy xc 9vZZ RZeF r5TsSZ zG l 7HW uIG M0y Re YDw3 lMux gAdF6d pp 8 ZVR cl7 uqH 8O BMbz L6dK BflWCW dl V hyc V5n Epv 2J SkD0 ccMp oIR38Q pe Z j9j 0Zo Pmq XR TxBs 8w9Q 5epR3t N5 j bvb rbS K7U 4W 4PJ0 ovnB 0opRpC YN P s} \begin{split} I_2 &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} J\Vert_{L^\infty} \Vert D v_{t}\Vert_{L^2} \Vert D v\Vert_{H^{1.5+\delta}} + \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI J\Vert_{L^\infty} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{-0.5+\delta} D v_{t}\Vert_{L^2} \Vert D v\Vert_{H^{1.5+\delta}} \\&\indeq + \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} D J\Vert_{L^6} \Vert \tau v_{t}\Vert_{L^3} \Vert D v\Vert_{H^{1.5+\delta}} + \Vert D J\Vert_{L^\infty} \Vert \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}\tau v_{t}\Vert_{L^2} \Vert D v\Vert_{H^{1.5+\delta}} \\& \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert J\Vert_{H^{3.5+\delta}} \Vert v_{t}\Vert_{H^{0.5+\delta}} \Vert v\Vert_{H^{2.5+\delta}} \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert v_t\Vert_{H^{0.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)} ) , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3291} \end{align} where we used $\delta\geq0.5$. The third term $I_3$ may be estimated using the product rule as \begin{align}\thelt{deuO Zj O i2F g3z iYa SR Cjlz XdQK bcnb5p KT q rJp 1P6 oGy xc 9vZZ RZeF r5TsSZ zG l 7HW uIG M0y Re YDw3 lMux gAdF6d pp 8 ZVR cl7 uqH 8O BMbz L6dK BflWCW dl V hyc V5n Epv 2J SkD0 ccMp oIR38Q pe Z j9j 0Zo Pmq XR TxBs 8w9Q 5epR3t N5 j bvb rbS K7U 4W 4PJ0 ovnB 0opRpC YN P so8 34P wtS Rq vir4 DRqu jaJq32 QU T G1P gbp 6nJ M2 CUnE NdJC r3ZGBH Eg B tds Td8 4gM 22 gKBN 7Qnm RtJgKU IG E eKx 64y AGK Ge zeJN mpeQ kLR389 HH 9 fXL BcE 6T4 Gj VZLI dLQI iQtkBk 9} \begin{split} I_3 & \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \bigl\Vert D(v_m \tda_{jm}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} v_i) \bigr\Vert_{H^{0.5+\delta}} \Vert D v \Vert_{H^{1.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert v\Vert_{H^{2.5+\delta}}^3 \Vert b\Vert_{H^{3.5+\delta}} \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert w\Vert_{H^{4+\delta}(\Gamma_1)} ) . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3292} \end{align} Similarly, \begin{align}\thelt{Mp oIR38Q pe Z j9j 0Zo Pmq XR TxBs 8w9Q 5epR3t N5 j bvb rbS K7U 4W 4PJ0 ovnB 0opRpC YN P so8 34P wtS Rq vir4 DRqu jaJq32 QU T G1P gbp 6nJ M2 CUnE NdJC r3ZGBH Eg B tds Td8 4gM 22 gKBN 7Qnm RtJgKU IG E eKx 64y AGK Ge zeJN mpeQ kLR389 HH 9 fXL BcE 6T4 Gj VZLI dLQI iQtkBk 9G 9 FzH WIG m91 M7 SW02 9tzN UX3HLr OU t vG5 QZn Dqy M6 ESTx foUV ylEQ99 nT C SkH A8s fxr ON eFp9 QLDn hLBPib iu j cJc 8Qz Z2K zD oDHg 252c lhDcaQ continuous n xG9 aJl jFq mA DsfD } \begin{split} I_4 & \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \bigl\Vert D (v_3-\psi_t)\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}v \bigr\Vert_{H^{0.5+\delta}} \Vert D v \Vert_{H^{1.5+\delta}} \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert v\Vert_{H^{2.5+\delta}}^3 + \Vert \eta_{t}\Vert_{H^{2.5+\delta}} \Vert v\Vert_{H^{2.5+\delta}}^2 \\& \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert w_t\Vert_{H^{2+\delta}(\Gamma_1)} ) . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3293} \end{align} Next, $I_{5}$ can be expressed as \begin{align}\thelt{BN 7Qnm RtJgKU IG E eKx 64y AGK Ge zeJN mpeQ kLR389 HH 9 fXL BcE 6T4 Gj VZLI dLQI iQtkBk 9G 9 FzH WIG m91 M7 SW02 9tzN UX3HLr OU t vG5 QZn Dqy M6 ESTx foUV ylEQ99 nT C SkH A8s fxr ON eFp9 QLDn hLBPib iu j cJc 8Qz Z2K zD oDHg 252c lhDcaQ continuous n xG9 aJl jFq mA DsfD FA0w DO3CZr Q1 a 2IG tqK bjc iq zRSd 0fjS JA1rsi e9 i qOr 5xg Vlj y6 afNu ooOy IVlT21 vJ W fKU deL bcq 1M wF9N R9xQ np6Tqg El S k50 p43 Hsd Cl 7VKk Zd12 Ijx43v I7 2 QyQ vUm 77B V2 } \begin{split} I_5 & = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}D(\tda_{ki}q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} D v_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}D(\tda_{3i}q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D v_i \\& = I_{51} + I_{52} . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3294} \end{align} Using the product rule \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3288}, we rewrite $I_{51}$ as \begin{align}\thelt{ON eFp9 QLDn hLBPib iu j cJc 8Qz Z2K zD oDHg 252c lhDcaQ continuous n xG9 aJl jFq mA DsfD FA0w DO3CZr Q1 a 2IG tqK bjc iq zRSd 0fjS JA1rsi e9 i qOr 5xg Vlj y6 afNu ooOy IVlT21 vJ W fKU deL bcq 1M wF9N R9xQ np6Tqg El S k50 p43 Hsd Cl 7VKk Zd12 Ijx43v I7 2 QyQ vUm 77B V2 3a6W h6IX dP9n67 St l Zll bRi DyG Nr 0g9S 4AHA Vga0Xo fk X FZw gGt sW2 J4 92NC 7FAd 8AVzIE 0S w EaN EI8 v9e le 8EfN Yg3u WVH3JM gi 7 vGf 4N0 akx mB AIjp x4dX lxQRGJ Ze r TMz BxY 9J} \begin{split} I_{51} &= \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \tda_{ki}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}D q\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}D v_i +\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl(\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}(\tda_{ki}D q) - \tda_{ki}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}D q \Bigr)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}Dv_i \\ & \indeq + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} ( D \tda_{ki}) \tau q\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}D v_i \\ & = \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl( \tda_{ki} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}D v_i - \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta}D \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}( \tda_{ki} v_i ) \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}D q \\&\indeq +\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl(\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}(\tda_{ki}D q) - \tda_{ki}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}D q \Bigr)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}Dv_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} ( D \tda_{ki}) \tau q\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}D v_i , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3295} \end{align} and thus \begin{align}\thelt{ fKU deL bcq 1M wF9N R9xQ np6Tqg El S k50 p43 Hsd Cl 7VKk Zd12 Ijx43v I7 2 QyQ vUm 77B V2 3a6W h6IX dP9n67 St l Zll bRi DyG Nr 0g9S 4AHA Vga0Xo fk X FZw gGt sW2 J4 92NC 7FAd 8AVzIE 0S w EaN EI8 v9e le 8EfN Yg3u WVH3JM gi 7 vGf 4N0 akx mB AIjp x4dX lxQRGJ Ze r TMz BxY 9JA tm ZCjH 9064 Q4uzKx gm p CQg 8x0 6NY x0 2vkn EtYX 5O2vgP 3g c spG swF qhX 3a pbPW sf1Y OzHivD ia 1 eOD MIL TC2 mP ojef mEVB 9hWwMa Td I Gjm 9Pd pHV WG V4hX kfK5 Rtci05 ek z j0L 8} \begin{split} I_{51} &= \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl( \tda_{ki} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}D v_i - \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}( \tda_{ki} Dv_i ) \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}D q \\&\indeq +\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \Bigl(\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}(\tda_{ki}D q) - \tda_{ki}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}D q \Bigr)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}Dv_i \\ & \indeq - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}( D \tda_{ki} \tau v_i ) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta}D q + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{0.5+\delta} ( D \tda_{ki}) \tau q\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1.5+\delta} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}D v_i . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3249} \end{align} We treat these four terms using Kato-Ponce commutator estimates, and proceeding as for $I_2$, we obtain $ I_{51} \leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert q\Vert_{H^{1.5+\delta}}, \Vert \tda\Vert_{H^{3.5+\delta}} )$. As for $I_{52}= - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}D(\tda_{3i}q)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D v_i$, we write \begin{align}\thelt{ 0S w EaN EI8 v9e le 8EfN Yg3u WVH3JM gi 7 vGf 4N0 akx mB AIjp x4dX lxQRGJ Ze r TMz BxY 9JA tm ZCjH 9064 Q4uzKx gm p CQg 8x0 6NY x0 2vkn EtYX 5O2vgP 3g c spG swF qhX 3a pbPW sf1Y OzHivD ia 1 eOD MIL TC2 mP ojef mEVB 9hWwMa Td I Gjm 9Pd pHV WG V4hX kfK5 Rtci05 ek z j0L 8Tm e2J PX pDI8 Ebcq V4Fdxv rH I eP8 CdO RJp Ti MVEb AunS GsUMWP ts 4 uBv 2QS iXI b7 B8zo 7bp9 voEwNR uX J 4Zx uRZ Yhc 1h 339T HRXV Fw5XVW 8g a B39 mFS v6M ze znkb LHrt Z73hUu aq L } \begin{split} I_{52} &= - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} (\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{\delta} D q )\tda_{3i}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta} D v_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{\delta} D q \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI(\tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}Dv_i) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}D v_i \Bigr) \\ & \indeq - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}(\tda_{3i}Dq) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}D q \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D v_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}(D\tda_{3i}\tau q) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D v_i \\& = - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} (\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{\delta} D q ) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}D(\tda_{3i} v_i) + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{\delta} Dq \Bigl(\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(\tda_{3i} Dv_i) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}Dv_i \Bigr) \\&\indeq - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}(\tda_{3i}Dq) - \tda_{3i} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}D q \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D v_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}(D\tda_{3i}\tau q) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D v_i \\& \indeq - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta}(D\tda_{3i}\tau q) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{1+\delta} D v_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_1} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{\delta} Dq \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI^{2+\delta}(D\tda_{3i} \tau v_i) . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3297} \end{align} After integration in time, the first boundary term cancels with the boundary integral on the right hand side of~\eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3284}. The remaining terms are estimated as above by $ P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert q\Vert_{H^{1.5+\delta}}, \Vert \tda\Vert_{H^{3.5+\delta}} ) $. \par We also need to justify applying the vorticity estimate \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3107} for the constructed solutions. We apply $\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D$ to the equation \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n398} and test with $\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D\theta$, obtaining \begin{align}\thelt{zHivD ia 1 eOD MIL TC2 mP ojef mEVB 9hWwMa Td I Gjm 9Pd pHV WG V4hX kfK5 Rtci05 ek z j0L 8Tm e2J PX pDI8 Ebcq V4Fdxv rH I eP8 CdO RJp Ti MVEb AunS GsUMWP ts 4 uBv 2QS iXI b7 B8zo 7bp9 voEwNR uX J 4Zx uRZ Yhc 1h 339T HRXV Fw5XVW 8g a B39 mFS v6M ze znkb LHrt Z73hUu aq L vPh gTl NnV po 1Zgg mnRA qM3X31 OR Y Sj8 Rkt S8V GO jrz1 iblt 3uOuEs 8Q 3 xJ1 cA2 NKo F8 o6U3 mW2H q5y6jp os x Jgw WZ4 Exd 79 Jvlc wauo RDCYZz mp a bV0 9jg ume bz cbug patf 9yU9iB } \begin{split} & \frac12 \frac{d}{dt} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \bar J|\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D \theta|^2 \\&\indeq = - \sum_{m=1}^{2} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \tilde v_m \tilde\tda_{jm}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D\theta_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D \theta_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} (\tilde v_3-\tilde\psi_t) \tilde\tda_{j3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D \theta_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D\theta_i \\&\indeq\indeq + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \theta_k \tilde\tda_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D\tilde v_i \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D\theta_i \\&\indeq\indeq - \sum_{m=1}^2 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \Bigl(\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}(\tilde v_m \tilde\tda_{jm}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} D\theta_i ) - \tilde v_m \tilde\tda_{jm}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D\theta_i \Bigr)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D\theta_i \\&\indeq\indeq - \sum_{m=1}^2 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta} \Bigl( D(\tilde v_m \tilde\tda_{jm}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \tau\theta_i \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D\theta_i \\&\indeq\indeq - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}( (\tilde v_3-\tilde\psi_t) \tilde\tda_{j3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}D\theta_i ) - (\tilde v_3-\tilde\psi_t) \tilde\tda_{j3} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D\theta_i \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D \theta_i \\&\indeq\indeq - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta} \Bigl( D( (\tilde v_3-\tilde\psi_t) \tilde\tda_{j3}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\tau\theta_i \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D \theta_i \\&\indeq\indeq + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}(\theta_k \tilde\tda_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}D \tilde v_i ) - \theta_k \tilde\tda_{mk}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D\tilde v_i \Bigr)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D\theta_i \\&\indeq\indeq + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta} \Bigl(D(\theta_k \tilde\tda_{mk}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{m}\tau \tilde v_i \Bigr)\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D\theta_i \\&\indeq\indeq + \frac12 \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \bar J_t |\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{1.5+\delta} D\theta|^2 - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \Bigl( \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}(\bar J D\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_t \theta_i) - \bar J \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta} D(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\theta_i) \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D\theta_i \\&\indeq\indeq - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta} \Bigl(D\bar J \tau \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_t \theta_i \Bigr) \UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D\theta_i . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3298} \end{align} The first two terms are treated as above by integrating by parts in $x_{j}$ and noting that the boundary term vanishes. The rest of the terms are estimated also as above using commutator estimates and Sobolev inequalities to conclude \begin{align}\thelt{bp9 voEwNR uX J 4Zx uRZ Yhc 1h 339T HRXV Fw5XVW 8g a B39 mFS v6M ze znkb LHrt Z73hUu aq L vPh gTl NnV po 1Zgg mnRA qM3X31 OR Y Sj8 Rkt S8V GO jrz1 iblt 3uOuEs 8Q 3 xJ1 cA2 NKo F8 o6U3 mW2H q5y6jp os x Jgw WZ4 Exd 79 Jvlc wauo RDCYZz mp a bV0 9jg ume bz cbug patf 9yU9iB Ey v 3Uh S79 XdI mP NEhN 64Rs 9iHQ84 7j X UCA ufF msn Uu dD4S g3FM LMWbcB Ys 4 JFy Yzl rSf nk xPjO Hhsq lbV5eB ld 5 H6A sVt rHg CN Yn5a C028 FEqoWa KS s 9uu 8xH rbn 1e RIp7 sL8J rF} \begin{split} \frac12 \frac{d}{dt} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \bar J|\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D_{h,l} \theta|^2 &\leq P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert b\Vert_{H^{3.5+\delta}}, \Vert \psi_t\Vert_{H^{2.5+\delta}}, \Vert J\Vert_{H^{3.5+\delta}}, \Vert J_t\Vert_{H^{1.5+\delta}} ) \Vert \zeta\Vert_{H^{1.5+\delta}(\Omega)}^2 \\&\indeq + P( \Vert v\Vert_{H^{2.5+\delta}}, \Vert b\Vert_{H^{3.5+\delta}}, \Vert \psi_t\Vert_{H^{2.5+\delta}}, \Vert J\Vert_{H^{3.5+\delta}}, \Vert J_t\Vert_{H^{1.5+\delta}} ) \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega_0} \bar J|\UIPOIUPOIUPOOYIUIUYOIUYOIUHOIUOIUHIOPUHPOIJPOIJPOUHOIUHOILJHLIUHYOIUYOUI_3^{0.5+\delta}D_{h,l} \theta|^2 , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3299} \end{align} for all $h\in\mathbb{R}\backslash \{0\}$ and $l\in\{1,2\}$. Then the a~priori estimates \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n324} and \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n339} can then be applied directly to the constructed $\nu>0$ solutions. \par We now pass to the limit as $\nu \to 0$ with solutions for which we have uniform bounds. For a sequence $\nu_1,\nu_2,\ldots \to0$, denote the corresponding solution to the damped system by $(v^{(n)},q^{(n)}, w^{(n)},w_{t}^{(n)})$ and the corresponding matrix coefficient by $a^{(n)}$. We then have the uniform bound \begin{align}\thelt{6U3 mW2H q5y6jp os x Jgw WZ4 Exd 79 Jvlc wauo RDCYZz mp a bV0 9jg ume bz cbug patf 9yU9iB Ey v 3Uh S79 XdI mP NEhN 64Rs 9iHQ84 7j X UCA ufF msn Uu dD4S g3FM LMWbcB Ys 4 JFy Yzl rSf nk xPjO Hhsq lbV5eB ld 5 H6A sVt rHg CN Yn5a C028 FEqoWa KS s 9uu 8xH rbn 1e RIp7 sL8J rFQJat og Z c54 yHZ vPx Pk nqRq Gw7h lG6oBk zl E dJS Eig f0Q 1B oCMa nS1u LzlQ3H nA u qHG Plc Iad FL Rkdj aLg0 VAPAn7 c8 D qoV 8bR CvO zq k5e0 Zh3t zJBWBO RS w Zs9 CgF bGo 1E FAK7 Ee} \begin{split} &\Vert v^{(n)} \Vert_{L^{\infty}([0,T];H^{2.5+\delta})} + \Vert q^{(n)} \Vert_{L^{\infty}([0,T];H^{1.5+\delta})} + \Vert w^{(n)} \Vert_{L^{\infty}([0,T];H^{4+\delta}(\Gamma_{1}))} + \Vert w_{t}^{(n)} \Vert_{L^{\infty}([0,T];H^{2+\delta}(\Gamma_{1}))} \\&\indeq \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert v_{0} \Vert_{H^{2.5 +\delta}} + \Vert w_{0} \Vert_{H^{4+\delta}(\Gamma_{1})} + \Vert w_{1} \Vert_{H^{2+\delta}(\Gamma_{1})} , \end{split} \label{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3300} \end{align} for all $n \in \mathbb{N}$, for a uniform time $T>0$ depending on the initial data, independent of $\nu$. Consequently, $a^{(n)}$ is also uniformly bounded in $L^{\infty}([0,T];H^{3+\delta})$. We may now pass to a subsequence for which \begin{align}\thelt{ nk xPjO Hhsq lbV5eB ld 5 H6A sVt rHg CN Yn5a C028 FEqoWa KS s 9uu 8xH rbn 1e RIp7 sL8J rFQJat og Z c54 yHZ vPx Pk nqRq Gw7h lG6oBk zl E dJS Eig f0Q 1B oCMa nS1u LzlQ3H nA u qHG Plc Iad FL Rkdj aLg0 VAPAn7 c8 D qoV 8bR CvO zq k5e0 Zh3t zJBWBO RS w Zs9 CgF bGo 1E FAK7 EesL XYWaOP F4 n XFo GQl h3p G7 oNtG 4mpT MwEqV4 pO 8 fMF jfg ktn kw IB8N P60f wfEhjA DF 3 bMq EPV 9U0 o7 fcGq UUL1 0f65lT hL W yoX N4v uSY es 96Sc 2HbJ 0hugJM eB 5 hVa EdL TXr No 2L} \begin{split} &v^{(n)} \to v \weaks L^{\infty}([0,T];H^{2.5+\delta}) \\& q^{(n)}\to q \weaks \in L^{\infty}([0,T];H^{1.5+\delta}) \\& w^{(n)}\to w \weaks \in L^{\infty}([0,T];H^{4+\delta}(\Gamma_{1})) \\& w_{t}^{(n)}\to w_{t} \weaks L^{\infty}([0,T];H^{2+\delta}(\Gamma_{1})) \\& \nu_n w_{t}^{(n)}\to \chi \weak L^{2}([0,T];H^{3+\delta}(\Gamma_{1})) \\& w_{tt}^{(n)} \to w_{tt} \weaks L^{\infty}([0,T];H^{\delta}(\Gamma_{1})) \\& \eta^{(n)} \to \eta \weaks L^{\infty}([0,T];H^{4.5+\delta}) \\& a^{(n)} \to a \weaks L^{\infty}([0,T];H^{3.5+\delta}) \\& a_{t}^{(n)} \to a_{t} \weaks L^{\infty}([0,T];H^{1.5+\delta}) . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3301} \end{align} To pass through the limit in the nonlinear terms, we need a strong convergence. Given that we also have \begin{align}\thelt{c Iad FL Rkdj aLg0 VAPAn7 c8 D qoV 8bR CvO zq k5e0 Zh3t zJBWBO RS w Zs9 CgF bGo 1E FAK7 EesL XYWaOP F4 n XFo GQl h3p G7 oNtG 4mpT MwEqV4 pO 8 fMF jfg ktn kw IB8N P60f wfEhjA DF 3 bMq EPV 9U0 o7 fcGq UUL1 0f65lT hL W yoX N4v uSY es 96Sc 2HbJ 0hugJM eB 5 hVa EdL TXr No 2L78 fJme hCMd6L SW q ktp Mgs kNJ q6 tvZO kgp1 GBBqG4 mA 7 tMV p8F n60 El QGMx joGW CrvQUY V1 K YKL pPz Vhh uX VnWa UVqL xeS9ef sA i 7Lm HXC ARg 4Y JnvB e46D UuQYkd jd z 5Mf PLH oWI } \begin{split} v_{t}^{(n)}\to v_t \weaks L^{\infty}([0,T];H^{0.5+\delta}) \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3302} \end{align} by the a~priori estimates, the Aubin-Lions lemma yields \begin{align}\thelt{Mq EPV 9U0 o7 fcGq UUL1 0f65lT hL W yoX N4v uSY es 96Sc 2HbJ 0hugJM eB 5 hVa EdL TXr No 2L78 fJme hCMd6L SW q ktp Mgs kNJ q6 tvZO kgp1 GBBqG4 mA 7 tMV p8F n60 El QGMx joGW CrvQUY V1 K YKL pPz Vhh uX VnWa UVqL xeS9ef sA i 7Lm HXC ARg 4Y JnvB e46D UuQYkd jd z 5Mf PLH oWI TM jUYM 7Qry u7W8Er 0O g j2f KqX Scl Gm IgqX Tam7 J8UHFq zv b Vvx Niu j6I h7 lxbJ gMQY j5qtga xb M Hwb JT2 tlB si b8i7 zj6F MTLbwJ qH V IiQ 3O0 LNn Ly pZCT VUM1 bcuVYT ej G 3bf hcX} \begin{split} v^{(n)}\to v \inn C([0,T];H^{s}) , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3303} \end{align} for any $s < 2.5+\delta$. Similarly, we can conclude that \begin{align}\thelt{1 K YKL pPz Vhh uX VnWa UVqL xeS9ef sA i 7Lm HXC ARg 4Y JnvB e46D UuQYkd jd z 5Mf PLH oWI TM jUYM 7Qry u7W8Er 0O g j2f KqX Scl Gm IgqX Tam7 J8UHFq zv b Vvx Niu j6I h7 lxbJ gMQY j5qtga xb M Hwb JT2 tlB si b8i7 zj6F MTLbwJ qH V IiQ 3O0 LNn Ly pZCT VUM1 bcuVYT ej G 3bf hcX 0BV Ql 6Dc1 xiWV K4S4RW 5P y ZEV W8A Yt9 dN VSXa OkkG KiLHhz FY Y K1q NGG EEU 4F xdja S2NR REnhHm B8 V y44 6a3 VCe Ck wjCM e3DG fMiFop vl z Lp5 r0z dXr rB DZQv 9HQ7 XJMJog kJ n sD} \begin{split} a^{(n)}\to a \inn C([0,T];H^{r}) , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3304} \end{align} for any $r < 3.5+\delta$ since \begin{align}\thelt{tga xb M Hwb JT2 tlB si b8i7 zj6F MTLbwJ qH V IiQ 3O0 LNn Ly pZCT VUM1 bcuVYT ej G 3bf hcX 0BV Ql 6Dc1 xiWV K4S4RW 5P y ZEV W8A Yt9 dN VSXa OkkG KiLHhz FY Y K1q NGG EEU 4F xdja S2NR REnhHm B8 V y44 6a3 VCe Ck wjCM e3DG fMiFop vl z Lp5 r0z dXr rB DZQv 9HQ7 XJMJog kJ n sDx WzI N7F Uf veeL 0ljk 83TxrJ FD T vEX LZY pEq 5e mBaw Z8VA zvvzOv CK m K2Q ngM MBA Wc UH8F jSJt hocw4l 9q J TVG sq8 yRw 5z qVSp d9Ar UfVDcD l8 B 1o5 iyU R4K Nq b84i OkIQ GIczg2 nc} \begin{split} a_{t}^{(n)}\to a_t \weaks L^{\infty}([0,T];H^{1.5+\delta}) . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3305} \end{align} We are now ready to pass to the limit in both equations. Starting with the Euler equations, and denoting the duality pairing by $\langle \cdot , \cdot \rangle$, we pass to the limit as $n \to \infty$ by \begin{align}\thelt{R REnhHm B8 V y44 6a3 VCe Ck wjCM e3DG fMiFop vl z Lp5 r0z dXr rB DZQv 9HQ7 XJMJog kJ n sDx WzI N7F Uf veeL 0ljk 83TxrJ FD T vEX LZY pEq 5e mBaw Z8VA zvvzOv CK m K2Q ngM MBA Wc UH8F jSJt hocw4l 9q J TVG sq8 yRw 5z qVSp d9Ar UfVDcD l8 B 1o5 iyU R4K Nq b84i OkIQ GIczg2 nc t txd WfL QlN ns g3BB jX2E TiPrpq ig M OSw 4Cg dGP fi G2HN ZhLe aQwyws ii A WrD jo4 LDb jB ZFDr LMuY dt6k6H n9 w p4V k7t ddF rz CKid QPfC RKUedz V8 z ISv ntB qpu 3c p5q7 J4Fg Bq59} \begin{split} \langle v_{t}^{(n)} -v_{t}, \phi \rangle \to 0 \comma \phi \in C_{0}^{\infty}(\Omega\times(0,T)) . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3306} \end{align} We next pass through the limit in the nonlinear terms using \begin{align}\thelt{F jSJt hocw4l 9q J TVG sq8 yRw 5z qVSp d9Ar UfVDcD l8 B 1o5 iyU R4K Nq b84i OkIQ GIczg2 nc t txd WfL QlN ns g3BB jX2E TiPrpq ig M OSw 4Cg dGP fi G2HN ZhLe aQwyws ii A WrD jo4 LDb jB ZFDr LMuY dt6k6H n9 w p4V k7t ddF rz CKid QPfC RKUedz V8 z ISv ntB qpu 3c p5q7 J4Fg Bq59pS Md E onG 7PQ CzM cW lVR0 iNJh WHVugW PY d IMg tXB 2ZS ax azHe Wp7r fhk4qr Ab J FFG 0li i9M WI l44j s9gN lu46Cf P3 H vS8 vQx Yw9 cE yGYX i3wi 41aIuU eQ X EjG 3XZ IUl 8V SPJV gCJ3} \begin{split} & |\langle (v_{j})^{(n)} (a_{kj})^{(n)} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} (v_{i})^{(n)} - v_{j} a_{kj} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} v_{i} , \phi \rangle | \\&\indeq \dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert v^{(n)} - v \Vert_{L^{\infty}([0,T];H^{2})} \Vert a^{(n)} \Vert_{L^{\infty}([0,T];H^{2+\delta})} \Vert \nabla v^{(n)} \Vert_{L^{\infty}([0,T];H^{1.5+\delta})} \Vert \phi \Vert_{L^{1}([0,T];H^{-0.5-\delta})} \\&\indeq\indeq + \Vert v \Vert_{L^{\infty}([0,T];H^{0.5+\delta})} \Vert a^{(n)} - a \Vert_{L^{\infty}([0,T];H^{1.5+\delta})} \Vert \nabla v^{(n)} \Vert_{L^{\infty}([0,T];H^{1.5+\delta})} \Vert \phi \Vert_{L^{1}([0,T];H^{-0.5-\delta})} \\&\indeq\indeq + \Vert v \Vert_{L^{\infty}([0,T];H^{1.5+\delta})} \Vert a \Vert_{L^{\infty}([0,T];H^{1.5+\delta})} \Vert \nabla v^{(n)} - \nabla v \Vert_{L^{\infty}([0,T];H^{0.5+\delta})} \Vert \phi \Vert_{L^{1}([0,T];H^{-0.5-\delta})} , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3307} \end{align} for $\phi \in C_{0}^{\infty}(\Omega\times(0,T))$. By the strong convergence result above, the right-hand side goes to zero as $n \to \infty$. For the other nonlinear term, the argument is similar. It remains to pass through the limit in the pressure term, for which we have \begin{align}\thelt{B ZFDr LMuY dt6k6H n9 w p4V k7t ddF rz CKid QPfC RKUedz V8 z ISv ntB qpu 3c p5q7 J4Fg Bq59pS Md E onG 7PQ CzM cW lVR0 iNJh WHVugW PY d IMg tXB 2ZS ax azHe Wp7r fhk4qr Ab J FFG 0li i9M WI l44j s9gN lu46Cf P3 H vS8 vQx Yw9 cE yGYX i3wi 41aIuU eQ X EjG 3XZ IUl 8V SPJV gCJ3 ZOliZQ LO R zOF VKq lyz 8D 4NB6 M5TQ onmBvi kY 8 8TJ ONa DfE 2u zbcv fL67 bnJUz8 Sd 7 yx5 jWr oXd Jp 0lSy mIK8 bkKzql jN n 4Kx luF hYL g0 FrO6 yRzt wFTK7Q RN 0 1O2 1Zc HNK gR M7GZ} \begin{split} |\langle a^{(n)}_{ki} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} q^{(n)} - a_{ki} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} q , \phi \rangle | &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert a^{(n)} - a \Vert_{L^{\infty}([0,T];H^{1.5+\delta})} \Vert \nabla q^{(n)} \Vert_{L^{\infty}([0,T];H^{0.5+\delta})} \Vert \phi \Vert_{L^{1}([0,T];H^{-0.5-\delta})} \\&\indeq + |\langle \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} q^{(n)} - \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} q , a_{ki} \phi \rangle| . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3308} \end{align} The first term on the right hand side again converges to zero by strong convergence of $a^{(n)}$ to $a$ for all test functions $ \phi \in L^{1}([0,T];H^{-0.5+\delta})$. The second term goes to zero as well for all $ \phi \in L^{1}([0,T];H^{-0.5+\delta})$ by the weak-* convergence of $q^{(n)} \to q$ since the element $a \phi \in L^{1}([0,T];H^{-0.5-\delta})$ which easily follows from $ a \in L^{\infty}([0,T];L^{\infty})$. Passing to the limit in the plate equation, we have \begin{align}\thelt{i9M WI l44j s9gN lu46Cf P3 H vS8 vQx Yw9 cE yGYX i3wi 41aIuU eQ X EjG 3XZ IUl 8V SPJV gCJ3 ZOliZQ LO R zOF VKq lyz 8D 4NB6 M5TQ onmBvi kY 8 8TJ ONa DfE 2u zbcv fL67 bnJUz8 Sd 7 yx5 jWr oXd Jp 0lSy mIK8 bkKzql jN n 4Kx luF hYL g0 FrO6 yRzt wFTK7Q RN 0 1O2 1Zc HNK gR M7GZ 9nB1 Etq8sq lA s fxo tsl 927 c6 Y8IY 8T4x 0DRhoh 07 1 8MZ Joo 1oe hV Lr8A EaLK hyw6Sn Dt h g2H Mt9 D1j UF 5b4w cjll AvvOSh tK 8 06u jYa 0TY O4 pcVX hkOO JVtHN9 8Q q q0J 1Hk Ncm LS} \begin{split} & \langle w_{tt}^{(n)} -w_{tt}, \psi \rangle_{\Gamma_{1}} \to 0 \comma \psi \in L^{1}([0,T];H^{-\delta}(\Gamma_{1}) ) \\& \langle \Delta_2^{2} w^{(n)} -\Delta_2^{2} w, \psi \rangle \to 0 \comma \psi \in L^{1}([0,T];H^{-\delta}(\Gamma_{1}) ) . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3309} \end{align} Moreover, since $(1/n) w_{tt}^{(n)}$ converges weakly in $L^{2}([0,T];H^{\delta}(\Gamma_{1}) )$, by the Aubin-Lions Lemma, we have additionally \begin{align}\thelt{ jWr oXd Jp 0lSy mIK8 bkKzql jN n 4Kx luF hYL g0 FrO6 yRzt wFTK7Q RN 0 1O2 1Zc HNK gR M7GZ 9nB1 Etq8sq lA s fxo tsl 927 c6 Y8IY 8T4x 0DRhoh 07 1 8MZ Joo 1oe hV Lr8A EaLK hyw6Sn Dt h g2H Mt9 D1j UF 5b4w cjll AvvOSh tK 8 06u jYa 0TY O4 pcVX hkOO JVtHN9 8Q q q0J 1Hk Ncm LS 3MAp Q75A lAkdnM yJ M qAC erD l5y Py s44a 7cY7 sEp6Lq mG 3 V53 pBs 2uP NU M7pX 6sy9 5vSv7i IS 8 VGJ 08Q KhA S3 jIDN TJsf bhIiUN fe H 9Xf 8We Cxm BL gzJT IN5N LhvdBO zP m opx YqM 4} \begin{split} \nu_n w_{t}^{(n)} \to \chi \inn L^{2}([0,T];H^{s}(\Gamma_{1}) ) \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3310} \end{align} (strongly) for all $s <3+\delta$. Since we also have \begin{align}\thelt{h g2H Mt9 D1j UF 5b4w cjll AvvOSh tK 8 06u jYa 0TY O4 pcVX hkOO JVtHN9 8Q q q0J 1Hk Ncm LS 3MAp Q75A lAkdnM yJ M qAC erD l5y Py s44a 7cY7 sEp6Lq mG 3 V53 pBs 2uP NU M7pX 6sy9 5vSv7i IS 8 VGJ 08Q KhA S3 jIDN TJsf bhIiUN fe H 9Xf 8We Cxm BL gzJT IN5N LhvdBO zP m opx YqM 4Vh ky btYg a3XV TTqLyA Hy q Yqo fKP 58n 8q R9AY rRRe tBFxHG g7 p duM 8gm 1Td pl RKIW 9gi5 ZxEEAH De A sfP 5hb xAx bW CvpW k9ca qNibi5 A5 N Y5I lVA S3a hA aB8z zUTu yK55gl DL 5 XO9 } \begin{split} w_{t}^{(n)} \to w_{t} \inn L^{\infty}([0,T];H^{r}(\Gamma_{1}) ) \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3311} \end{align} for all $r < 2+\delta$, we get \begin{align}\thelt{i IS 8 VGJ 08Q KhA S3 jIDN TJsf bhIiUN fe H 9Xf 8We Cxm BL gzJT IN5N LhvdBO zP m opx YqM 4Vh ky btYg a3XV TTqLyA Hy q Yqo fKP 58n 8q R9AY rRRe tBFxHG g7 p duM 8gm 1Td pl RKIW 9gi5 ZxEEAH De A sfP 5hb xAx bW CvpW k9ca qNibi5 A5 N Y5I lVA S3a hA aB8z zUTu yK55gl DL 5 XO9 CpO RXw rE V1IJ G7wE gpOag9 zb J iGe T6H Emc Ma QpDf yDxh eTNjwf wM x 2Ci pkQ eUj RU VhCf NMo5 DZ4h2a dE j ZTk Ox9 46E eU IZv7 rFL6 dj2dwg Rx g bOb qJs Yms Dq QAss n9g2 kCb1Ms gK f} \begin{split} \nu_n w_{t}^{(n)} \to 0 \inn L^{2}([0,T];H^{r}(\Gamma_{1}) ) \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3312} \end{align} for all $r < 2+\delta$. By uniqueness of the limit, this implies $\chi=0$ and \begin{align}\thelt{ZxEEAH De A sfP 5hb xAx bW CvpW k9ca qNibi5 A5 N Y5I lVA S3a hA aB8z zUTu yK55gl DL 5 XO9 CpO RXw rE V1IJ G7wE gpOag9 zb J iGe T6H Emc Ma QpDf yDxh eTNjwf wM x 2Ci pkQ eUj RU VhCf NMo5 DZ4h2a dE j ZTk Ox9 46E eU IZv7 rFL6 dj2dwg Rx g bOb qJs Yms Dq QAss n9g2 kCb1Ms gK f x0Y jK0 Glr XO 7xI5 WmQH ozMPfC XT m Dk2 Tl0 oRr nZ vAsF r7wY EJHCd1 xz C vMm jeR 4ct k7 cS2f ncvf aN6AO2 nI h 6nk VkN 8tT 8a Jdb7 08jZ ZqvL1Z uT 5 lSW Go0 8cL J1 q3Tm AZF8 qhxaoY} \begin{split} | \nu_n \langle \Delta_2 w_{t}^{(n)}, \psi \rangle_{\Gamma_{1}} \leq \Vert \nu_n w_{t}^{(n)} \Vert_{L^{\infty}([0,T];H^{3+\delta}(\Gamma_{1}))} \Vert \psi \Vert_{L^{1}([0,T];H^{-\delta}(\Gamma_{1}))} \to 0 \comma \psi \in L^{1}([0,T];H^{-\delta}(\Gamma_{1}) ) . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3313} \end{align} Finally, by weak-* convergence of the pressure terms on the boundary $\Gamma_{1}$ in $ L^{\infty}([0,T];H^{1}(\Gamma_{1}) )$, we obtain \begin{align}\thelt{NMo5 DZ4h2a dE j ZTk Ox9 46E eU IZv7 rFL6 dj2dwg Rx g bOb qJs Yms Dq QAss n9g2 kCb1Ms gK f x0Y jK0 Glr XO 7xI5 WmQH ozMPfC XT m Dk2 Tl0 oRr nZ vAsF r7wY EJHCd1 xz C vMm jeR 4ct k7 cS2f ncvf aN6AO2 nI h 6nk VkN 8tT 8a Jdb7 08jZ ZqvL1Z uT 5 lSW Go0 8cL J1 q3Tm AZF8 qhxaoY JC 6 FWR uXH Mx3 Dc w8uJ 87Q4 kXVac6 OO P DZ4 vRt sP0 1h KUkd aCLB iPSAtL u9 W Loy xMa Bvi xH yadn qQSJ WgSCkF 7l H aO2 yGR IlK 3a FZen CWqO 9EyRof Yb k idH Qh1 G2v oh cMPo EUzp 6} \begin{split} \langle q^{(n)}-q, \psi \rangle_{\Gamma_{1}} \to 0 \comma \psi \in L^{1}([0,T];H^{-\delta}(\Gamma_{1}) ) . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3314} \end{align} For the divergence term we have \begin{align}\thelt{cS2f ncvf aN6AO2 nI h 6nk VkN 8tT 8a Jdb7 08jZ ZqvL1Z uT 5 lSW Go0 8cL J1 q3Tm AZF8 qhxaoY JC 6 FWR uXH Mx3 Dc w8uJ 87Q4 kXVac6 OO P DZ4 vRt sP0 1h KUkd aCLB iPSAtL u9 W Loy xMa Bvi xH yadn qQSJ WgSCkF 7l H aO2 yGR IlK 3a FZen CWqO 9EyRof Yb k idH Qh1 G2v oh cMPo EUzp 6f14Ni oa r vW8 OUc 426 Ar sSo7 HiBU KdVs7c Oj a V9K EUt Kne 4V IPuZ c4bP RFB9AB fq c lU2 ct6 PDQ ud t4VO zMMU NrnzJX px k E2N B8p fJi M4 UNg4 Oi1g chfOU6 2a v Nrp cc8 IJm 2W nVXL D} \begin{split} | \langle (a_{ki})^{(n)}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} (v_{i})^{(n)} - a_{ki} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} v_{i}, \rho \rangle | &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert a^{(n)} - a \Vert_{L^{\infty}([0,T];H^{2.5+\delta})} \Vert v^{(n)} \Vert_{L^{\infty}([0,T];H^{2.5+\delta})} \Vert \rho \Vert_{ L^{1}([0,T];H^{-1.5-\delta})} \\&\indeq + | \langle \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} ((v_{i})^{(n)} -v_{i}), a_{ki} \rho \rangle| , \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3315} \end{align} for all $ \rho \in L^{1}([0,T];H^{-1.5-\delta}) $. The first term converges to zero as $ n \to \infty$ by the strong convergence of $a^{(n)}$ to $a$, while the second term goes to zero as $ n \to \infty$ by the weak-* convergence $ \nabla v^{(n)}\to \nabla v$ in $L^{\infty}([0,T];H^{1.5+ \delta})$, since $ a \in L^{\infty}([0,T];L^{\infty})$ and thus $ \rho a \in L^{1}([0,T];H^{-1.5- \delta})$. \par We finally pass to the limit in the boundary condition on $\Gamma_{1}$, to obtain \begin{align}\thelt{i xH yadn qQSJ WgSCkF 7l H aO2 yGR IlK 3a FZen CWqO 9EyRof Yb k idH Qh1 G2v oh cMPo EUzp 6f14Ni oa r vW8 OUc 426 Ar sSo7 HiBU KdVs7c Oj a V9K EUt Kne 4V IPuZ c4bP RFB9AB fq c lU2 ct6 PDQ ud t4VO zMMU NrnzJX px k E2N B8p fJi M4 UNg4 Oi1g chfOU6 2a v Nrp cc8 IJm 2W nVXL D672 ltZTf8 RD w qTv BXE WuH 2c JtO1 INQU lOmEPv j3 O OvQ SHx iKc 8R vNnJ NNCC 3KXp3J 8w 5 0Ws OTX HHh vL 5kBp Kr5u rqvVFv 8u p qgP RPQ bjC xm e33u JUFh YHBhYM Od 0 1Jt 7yS fVp F0 z} \begin{split} | \langle a^{(n)}_{3i} (v_{i})^{(n)} - a_{3i} v_{i}, \xi \rangle_{\Gamma_{1}}| &\dlkjfhlaskdhjflkasdjhflkasjhdflkasjhdflkasjhdfls \Vert a^{(n)} - a \Vert_{L^{\infty}([0,T];H^{2.5+\delta})} \Vert v^{(n)} \Vert_{L^{\infty}([0,T];H^{2.5+\delta})} \Vert \xi \Vert_{ L^{1}([0,T];H^{-2-\delta}(\Gamma_{1}) ) } \\&\indeq + | \langle (v_{i})^{(n)} -v_{i}, a_{3i} \xi \rangle_{\Gamma_{1}}| . \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3316} \end{align} The first term on the right converges to zero as $n \to \infty$ for all $\xi \in L^{1}([0,T];H^{-2-\delta}(\Gamma_{1}) ) $, by the strong convergence of $a^{(n)}$ to $a$. The second term also converges to $0$ by weak star convergence of $v^{(n)}|_{\Gamma_{1}}$ in $L^{\infty}([0,T];H^{2+\delta}(\Gamma_{1}) )$ since $ a \xi \in L^{1}([0,T];H^{-2-\delta}(\Gamma_{1}) )$ which is a consequence of $a\in L^{\infty}([0,T];L^{\infty}(\Gamma_{1}) )$. \end{proof} \par \section*{Appendix} Here we provide the proof of the compatibility condition \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3130}. Computing the integral of $\tilde{f}$ over $\Omega$, we have \begin{align*} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \tilde{f} &= \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tdb_{ji} \tilde{v}_i) - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \tdb_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tilde{v}_m \tilde{a}_{km}) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} \tilde{v}_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \tdb_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(J^{-1} \psi_t) \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\tilde{v}_i \\&\indeq\indeq + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \tilde{v}_m \tilde{a}_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}\tdb_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\tilde{v}_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} J^{-1} \psi_t \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\tdb_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\tilde{v}_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \mathcal{E} . \end{align*} This can be rewritten using the divergence theorem and the product rule as \begin{align*} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \tilde{f} &= \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tdb_{3i} \tilde{v}_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}( \tdb_{ji} \tilde{v}_m \tilde{a}_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} \tilde{v}_i) + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}( \tdb_{ji} J^{-1} \psi_t \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\tilde{v}_i) \\&\indeq\indeq + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \tilde{v}_m \tilde{a}_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}(\tdb_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\tilde{v}_i) - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} J^{-1} \psi_t \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}(\tdb_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\tilde{v}_i) + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \mathcal{E} . \end{align*} Noting that $b=I$ on $\Gamma_{0}$ and using the divergence theorem again, this can be expressed as \begin{align*} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \tilde{f} &= \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_{1}} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tdb_{3i} \tilde{v}_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma} \tdb_{3i} \tilde{v}_m \tilde{a}_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} \tilde{v}_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma} \tdb_{3i} J^{-1} \psi_t \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\tilde{v}_i \\&\indeq\indeq - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k}(\tilde{v}_m \tilde{a}_{km}) \tdb_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\tilde{v}_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma} \tilde{v}_m \tilde{a}_{3m} \tdb_{ji} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\tilde{v}_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}(J^{-1} \psi_t) \tdb_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\tilde{v}_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma} J^{-1} \psi_t \tdb_{ji}\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\tilde{v}_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \mathcal{E} . \end{align*} The last five integrals cancel with terms in $\mathcal{E}$. The boundary integral $\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma} \tdb_{3i} \tilde{v}_m \tilde{a}_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} \tilde{v}_i$ can be expressed using a similar calculation as in \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3339} as \begin{align*} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma} \tdb_{3i} \tilde{v}_m \tilde{a}_{km} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{k} \tilde{v}_i= \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma} \biggl( \tilde{a}_{3k} \tilde{v}_{k} \tdb_{3i} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \tilde{v}_i + \sum_{j=1}^{2} \tilde{v}_k \tilde{a}_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\tdb_{3i} \tilde{v}_{i}) + \frac{1}{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi} \tdb_{3k} \tilde{v}_{k} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\tilde{b}_{3i} \tilde{v}_i - \frac{1}{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \tdb_{3i} \tilde{v}_k \tdb_{jk} \tilde{v}_i \biggr) . \end{align*} In particular, \begin{align}\thelt{t6 PDQ ud t4VO zMMU NrnzJX px k E2N B8p fJi M4 UNg4 Oi1g chfOU6 2a v Nrp cc8 IJm 2W nVXL D672 ltZTf8 RD w qTv BXE WuH 2c JtO1 INQU lOmEPv j3 O OvQ SHx iKc 8R vNnJ NNCC 3KXp3J 8w 5 0Ws OTX HHh vL 5kBp Kr5u rqvVFv 8u p qgP RPQ bjC xm e33u JUFh YHBhYM Od 0 1Jt 7yS fVp F0 z6nC K8gr RahMJ6 XH o LGu 4v2 o9Q xO NVY8 8aum 7cZHRN XH p G1a 8KY XMa yT xXIk O5vV 5PSkCp 8P B oBv 9dB mep ms 7DDU aicX Y8Lx8I Bj F Btk e2y ShN GE 7a0o EMFy AUUFkR WW h eDb HhA M6U} \begin{split} \tilde{b}_{3i} \tilde{v}_k \tilde{b}_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}\tilde v_i &= \tilde v_k \tilde{b}_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} ( \tilde{b}_{3i} \tilde{v}_i) - \tilde{v}_k \tdb_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \tdb_{3i} \tilde{v}_i. \\ &= \sum_{j=1}^{2} \tilde{v}_k \tilde{b}_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} ( \tilde{b}_{3i} \tilde{v}_i) + \tilde{v}_k \tilde{b}_{3k} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} ( \tilde{b}_{3i} \tilde{v}_i) - \tilde{v}_k \tilde{b}_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \tilde{b}_{3i} \tilde{v}_i. \\ \end{split} \llabel{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3345} \end{align} From the definition of $\mathcal{E}$, \eqref{8ThswELzXU3X7Ebd1KdZ7v1rN3GiirRXGKWK099ovBM0FDJCvkopYNQ2aN94Z7k0UnUKamE3OjU8DFYFFokbSI2J9V9gVlM8ALWThDPnPu3EL7HPD2VDaZTggzcCCmbvc70qqPcC9mt60ogcrTiA3HEjwTK8ymKeuJMc4q6dVz200XnYUtLR9GYjPXvFOVr6W1zUK1WbPToaWJJuKnxBLnd0ftDEbMmj4loHYyhZyMjM91zQS4p7z8eKa9h0JrbacekcirexG0z4n3348}, we get cancellation of the first three terms, while only the integral over $\Gamma_{1}$ of the last term remains. Hence, we get \begin{align*} \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Omega} \tilde{f} &= \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_{1}} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{t}\tdb_{3i} \tilde{v}_i - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma} \sum_{j=1}^{2} \tilde{v}_k \tilde{a}_{jk} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j}(\psi_{t}) - \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma} \frac{1}{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi} \psi_{t} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3}\tilde{b}_{3i} \tilde{v}_i + \OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_{1}} \frac{1}{\UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{3} \psi} \UIOIUYOIUyHJGKHJLOIUYOIUOIUYOIYIOUYTIUYIOOOIUYOIUYPOIUPOIUPOIUYOIUYOIUYOIUHOUHOHIOUHOIHOIUHOIUHIOUH_{j} \tdb_{3i} \tilde{v}_k \tdb_{jk} \tilde{v}_i . \end{align*} Noting that $\psi_{t} =0$ on $\Gamma_{0}$ and $\psi_{t}= w_{t}$ on $\Gamma_{1}$ while $\OIUYJHUGFAJKLDHFKJLSDHFLKSDJFHLKSDJHFLKSDJHFLKDJFHLLDKHFLKSDHJFALKJHLJLHGLKHHLKJHLKGKHGJKHGKJHLKHJLKJH_{\Gamma_{1}} w_{tt}=0$, this is precisely the integral of $\tilde{g}_{1}$ on $\Gamma_{1}$. \par \colb \section*{Acknowledgments} IK was supported in part by the NSF grant DMS-1907992. \par \colb \small \begin{thebibliography}{[BCGMQ]} \bibitem[AB]{AB} G.~Avalos and F.~Bucci, \emph{Rational rates of uniform decay for strong solutions to a fluid-structure {PDE} system}, J.~Differential Equations~\textbf{258} (2015), no.~12, 4398--4423. \bibitem[AL]{AL} H.~Abels and Y.~Liu, \emph{On a fluid-structure interaction problem for plaque growth}, arXiv:2110.00042. \bibitem[AGW]{AGW} G.~Avalos, P.G.~Geredeli, and J.T.~Webster, \emph{A linearized viscous, compressible flow-plate interaction with non-dissipative coupling}, J.~Math.\ Anal.\ Appl.~\textbf{477} (2019), no.~1, 334--356. \bibitem[B]{B} H.~Beir\~{a}o~da Veiga, \emph{On the existence of strong solutions to a coupled fluid-structure evolution problem}, J.~Math.\ Fluid Mech.~\textbf{6} (2004), no.~1, 21--52. \bibitem[BB]{BB} J.P.~Bourguignon and H.~Brezis, \emph{Remarks on the Euler equation}, J.~Functional Analysis~\textbf{15} (1974), 341--363. \bibitem[BKS]{BKS} B.~Bene\v sov\'a, M.~Kampschulte, and Sebastian Schwarzacher, \emph{A variational approach to hyperbolic evolutions and fluid-structure interactions}, arXiv:2008.04796. \bibitem[BST]{BST} M.~Boulakia, E.L.~Schwindt, and T.~Takahashi, \emph{Existence of strong solutions for the motion of an elastic structure in an incompressible viscous fluid}, Interfaces Free Bound.~\textbf{14} (2012), no.~3, 273--306. \bibitem[BS1]{BS1} D.~Breit and S.~Schwarzacher, \emph{Compressible fluids interacting with a linear-elastic shell}, Arch.\ Ration.\ Mech.\ Anal.~\textbf{228} (2018), no.~2, 495--562. \bibitem[BS2]{BS2} D.~Breit and S.~Schwarzacher, \emph{Navier-Stokes-Fourier fluids interacting with elastic shells}, arXiv:2101.00824. \bibitem[BT]{BT} M.~Badra and T.~Takahashi, \emph{Gevrey regularity for a system coupling the Navier-Stokes system with a beam equation}, SIAM J.~Math.\ Anal.~\textbf{51} (2019), no.~6, 4776--4814. \bibitem[Bo]{Bo} M.~Boulakia, \emph{Existence of weak solutions for an interaction problem between an elastic structure and a compressible viscous fluid}, J.~Math.\ Pures Appl.~(9)~\textbf{84} (2005), no.~11, 1515--1554. \bibitem[C]{C} J.-J.~Casanova, \emph{Existence of time-periodic strong solutions to a fluid-structure system}, Discrete Contin.\ Dyn.\ Syst.~\textbf{39} (2019), no.~6, 3291--3313. \bibitem[Ch]{Ch} I.~Chueshov, \emph{Interaction of an elastic plate with a linearized inviscid incompressible fluid}, Commun.\ Pure Appl.\ Anal.~\textbf{13} (2014), no.~5, 1759--1778. \bibitem[CGH]{CGH} J.-J.~Casanova, C.~Grandmont, and M.~Hillairet, \emph{On an existence theory for a fluid-beam problem encompassing possible contacts}, J.~\'{E}c.\ polytech.\ Math.~\textbf{8} (2021), 933--971. \bibitem[CDEG]{CDEG} A.~Chambolle, B.~Desjardins, M.J.~Esteban, and C.~Grandmont, \emph{Existence of weak solutions for the unsteady interaction of a viscous fluid with an elastic plate}, J.~Math.\ Fluid Mech.~\textbf{7} (2005), no.~3, 368--404. \bibitem[CCS]{CCS} C.H.~Arthur Cheng, D.~Coutand, and S.~Shkoller, \emph{Navier-Stokes equations interacting with a nonlinear elastic biofluid shell}, SIAM J.~Math.\ Anal.~\textbf{39} (2007), no.~3, 742--800. \bibitem[CK]{CK} A.~Celik and M.~Kyed, \emph{Fluid-plate interaction under periodic forcing}, arXiv:2103.00795. \bibitem[CS1]{CS1} D.~Coutand and S.~Shkoller, \emph{Motion of an elastic solid inside an incompressible viscous fluid}, Arch.\ Ration.\ Mech.\ Anal.~\textbf{176} (2005), no.~1, 25--102. \bibitem[CS2]{CS2} C.H.~Arthur Cheng and S.~Shkoller, \emph{The interaction of the 3{D} Navier-Stokes equations with a moving nonlinear {K}oiter elastic shell}, SIAM J.~Math.\ Anal.~\textbf{42} (2010), no.~3, 1094--1155. \bibitem[CLW]{CLW} I.~Chueshov, I.~Lasiecka, and J.~Webster, \emph{Flow-plate interactions: well-posedness and long-time behavior}, Discrete Contin.\ Dyn.\ Syst.\ Ser.~S~\textbf{7} (2014), no.~5, 925--965. \bibitem[CR]{CR} I.~Chueshov and I.~Ryzhkova, \emph{A global attractor for a fluid-plate interaction model}, Commun.\ Pure Appl.\ Anal.~\textbf{12} (2013), no.~4, 1635--1656. \bibitem[DEGLT]{DEGLT} B.~Desjardins, M.J.~Esteban, C.~Grandmont, and P.~Le~Tallec, \emph{Weak solutions for a fluid-elastic structure interaction model}, Rev.\ Mat.\ Complut.~\textbf{14} (2001), no.~2, 523--538. \bibitem[GGCC]{GGCC} G.~Guidoboni, R.~Glowinski, N.~Cavallini, and S.~Canic, \emph{Stable loosely-coupled-type algorithm for fluid-structure interaction in blood flow}, J.~Comput.\ Phys.~\textbf{228} (2009), no.~18, 6916--6937. \bibitem[GGCCL]{GGCCL} G.~Guidoboni, R.~Glowinski, N.~Cavallini, S.~Canic, and S.~Lapin, \emph{A kinematically coupled time-splitting scheme for fluid-structure interaction in blood flow}, Appl.\ Math.\ Lett.~\textbf{22} (2009), no.~5, 684--688. \bibitem[GM]{GM} C.~Grandmont and Y.~Maday, \emph{Existence for an unsteady fluid-structure interaction problem}, M2AN Math.\ Model.\ Numer.\ Anal.~\textbf{34} (2000), no.~3, 609--636. \bibitem[G]{G} C.~Grandmont, \emph{Existence of weak solutions for the unsteady interaction of a viscous fluid with an elastic plate}, SIAM J.~Math.\ Anal.~\textbf{40} (2008), no.~2, 716--737. \bibitem[GH]{GH} C.~Grandmont and M.~Hillairet, \emph{Existence of global strong solutions to a beam-fluid interaction system}, Arch.\ Ration.\ Mech.\ Anal.~\textbf{220} (2016), no.~3, 1283--1333. \bibitem[GHL]{GHL} C.~Grandmont, M.~Hillairet, and J.~Lequeurre, \emph{Existence of local strong solutions to fluid-beam and fluid-rod interaction systems}, Ann.\ Inst.\ H.~Poincar\'{e} C Anal.\ Non Lin\'{e}aire~\textbf{36} (2019), no.~4, 1105--1149. \bibitem[IKLT]{IKLT} M.~Ignatova, I.~Kukavica, I.~Lasiecka, and A.~Tuffaha, {\em Small data global existence for a fluid-structure model}, Nonlinearity~\textbf{30} (2017), 848--898. \bibitem[KOT]{KOT} I.~Kukavica, W.S.~O\.za\' nski, and Amjad Tuffaha, \emph{On the global existence for a fluid-structure model with small data}, arXiv:2110.15284. \bibitem[KT]{KT} I.~Kukavica and A.~Tuffaha, \emph{Regularity of solutions to a free boundary problem of fluid-structure interaction}, Indiana Univ.\ Math.~J.~\textbf{61} (2012), no.~5, 1817--1859. \bibitem[LW]{LW} I.~Lasiecka and J.~Webster, \emph{Generation of bounded semigroups in nonlinear subsonic flow---structure interactions with boundary dissipation}, Math.\ Methods Appl.\ Sci.~\textbf{36} (2013), no.~15, 1995--2010. \bibitem[L]{L} D.~Lengeler, \emph{Weak solutions for an incompressible, generalized {N}ewtonian fluid interacting with a linearly elastic {K}oiter type shell}, SIAM J.~Math.\ Anal.~\textbf{46} (2014), no.~4, 2614--2649. \bibitem[LR]{LR} D.~Lengeler and M.~R\r{u}\v{z}i\v{c}ka, \emph{Weak solutions for an incompressible {N}ewtonian fluid interacting with a {K}oiter type shell}, Arch.\ Ration.\ Mech.\ Anal.~\textbf{211} (2014), no.~1, 205--255. \bibitem[L1]{L1} J.~Lequeurre, \emph{Existence of strong solutions to a fluid-structure system}, SIAM J.~Math.\ Anal.~\textbf{43} (2011), no.~1, 389--410. \bibitem[L2]{L2} J.~Lequeurre, \emph{Existence of strong solutions for a system coupling the Navier-Stokes equations and a damped wave equation}, J.~Math.\ Fluid Mech.~\textbf{15} (2013), no.~2, 249--271. \bibitem[MRR]{MRR} D.~Maity, J.-P.~Raymond, and A.~Roy, \emph{Maximal-in-time existence and uniqueness of strong solution of a 3{D} fluid-structure interaction model}, SIAM J.~Math.\ Anal.~\textbf{52} (2020), no.~6, 6338--6378. \bibitem[MC1]{MC1} B.~Muha and S.~\v Cani\'{c}, \emph{Existence of a weak solution to a nonlinear fluid-structure interaction problem modeling the flow of an incompressible, viscous fluid in a cylinder with deformable walls}, Arch.\ Ration.\ Mech.\ Anal.~\textbf{207} (2013), no.~3, 919--968. \bibitem[MC2]{MC2} B.~Muha and S.~\v{C}ani\'{c}, \emph{Existence of a weak solution to a fluid-elastic structure interaction problem with the Navier slip boundary condition}, J.~Differential Equations~\textbf{260} (2016), no.~12, 8550--8589. \bibitem[MC3]{MC3} B.~Muha and S.~\v{C}ani\'{c}, \emph{Fluid-structure interaction between an incompressible, viscous 3{D} fluid and an elastic shell with nonlinear Koiter membrane energy}, Interfaces Free Bound.~\textbf{17} (2015), no.~4, 465--495. \bibitem[MS]{MS} B.~Muha and S.~Schwarzacher, \emph{Existence and regularity of weak solutions for a fluid interacting with a non-linear shell in 3D}, arXiv:1906.01962. \bibitem[RV]{RV} J.-P.~Raymond and M.~Vanninathan, \emph{A fluid-structure model coupling the {N}avier-{S}tokes equations and the {L}am\'e system}, J.~Math.\ Pures Appl.~(9) \textbf{102} (2014), no.~3, 546--596. \bibitem[TT]{TT} T.~Takahashi and M.~Tucsnak, \emph{Global strong solutions for the two-dimensional motion of an infinite cylinder in a viscous fluid}, J.~Math.\ Fluid Mech.~\textbf{6} (2004), no.~1, 53--77. \bibitem[W]{W} J.T.~Webster, \emph{Weak and strong solutions of a nonlinear subsonic flow-structure interaction: semigroup approach}, Nonlinear Anal.~\textbf{74} (2011), no.~10, 3123--3136. \end{thebibliography} \end{document}
2205.11974v1
http://arxiv.org/abs/2205.11974v1
A mathematical model of Breast cancer (ER+) with excess estrogen: Mixed treatments using Ketogenic diet, endocrine therapy and Immunotherapy
\documentclass[conference]{IEEEtran} \IEEEoverridecommandlockouts \usepackage{mathtools, cuted} \usepackage{lipsum, color} \usepackage{cite} \usepackage{amsmath,amssymb,amsfonts} \usepackage{algorithmic} \usepackage{graphicx} \usepackage{textcomp} \usepackage{xcolor} \setlength{\parskip}{0em} \usepackage{float} \usepackage{amsmath} \usepackage{amssymb} \usepackage{mathrsfs} \newcommand{\R}{\mathbb{R}} \usepackage{graphicx} \usepackage{tabularx} \usepackage{wrapfig} \usepackage[colorinlistoftodos]{todonotes} \usepackage{url} \usepackage{epigraph} \usepackage{amsthm} \usepackage{multicol,lipsum} \usepackage{etoolbox} \newtheorem{theorem}{Theorem} \newtheorem{rem}{Remark} \newtheorem{exmp}{Exemple} \newtheorem{definition}{Definition} \newtheorem{lemma}{Lemma} \newtheorem{proposision}{Proposision} \usepackage{enumitem} \def\BibTeX{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em T\kern-.1667em\lower.7ex\hbox{E}\kern-.125emX}} \begin{document} \title{A mathematical model of Breast cancer (ER+) with excess estrogen: Mixed treatments using Ketogenic diet, endocrine therapy and Immunotherapy\\ } \author{\IEEEauthorblockN{ Hassnaa AKIL} \IEEEauthorblockA{\textit{Laboratory LIPIM} \\ \textit{ENSA Khouribga}\\ \textit{University of sultan moulay slimane }\\ Khouribga, Morocco\\ [email protected]} \and \IEEEauthorblockN{ Nadia IDRISSI FATMI } \IEEEauthorblockA{\textit{Laboratory LIPIM} \\ \textit{ENSA Khouribga}\\ \textit{University of sultan moulay slimane }\\ Khouribga, Morocco\\ [email protected]} } \maketitle \begin{abstract} Breast Cancer is a major public health problem and the most common diagnosed malignancy in woman. There have been significant developments in clinical approaches and theoretical experimental to understand the interactions of cancer cells dynamics with the immune system, also developments on analytical and computational models to help provide insights into clinical observations for a better understanding of cancer cells, but more are needed, especially at the genetic and molecular levels mathematically. Treatments such as immunotherapy, chemotherapy, hormone therapy, radiotherapy, and gene therapy are the main strategies in the fight against breast cancer. The present study aims at investigating the effects of estrogen derived from recent models, but this time combined with immunotherapy as a way to treat or inhibit the cancer growth by a mathematical model of breast cancer in situ, governed by a simplified model of nonlinear-coupled ordinary differential equations, that combines important interactions between natural cells, tumor cells, immune cells, ketogenic diet in the presence of an anticancer drug. Another contribution was to introduce the inhibition effect \textbf{$\epsilon$} for new results and conclusions, A qualitative study was performed and biological interpretations were included to understand the conditions of stability in a realistic way. \end{abstract} \begin{IEEEkeywords} Mathematical modelling, ODE's, Dynamical systems, Breast cancer, ER-Positive, Ketogenic diet, Estrogen, Immunotherapy. \end{IEEEkeywords} \section{Introduction} Female breast cancer was the most common obstructive diseases in women worldwide in 2020 according to the Global Cancer Statistics. In 2020, as reported in CA: A Cancer Journal for Clinicians, an estimated 19.3 million new cancer cases will be diagnosed, with about 10 million people dying from cancer-related causes. Overall, the data revealed that one out of every five men and women in the world will develop cancer over their lifetime. In the future, the analysis estimates that 28.4 million additional cancer cases will be diagnosed globally in 2040, up 47\% from 2020\cite{CA}. The most commonly diagnosed cancer kind in women is breast cancer, with a projected 1.8 million deaths. mainly, rates are high in North America, Australia, New Zealand, and northern and western Europe; lowest in much of Africa and Asia (Torre et al.\cite{torre}). It is also occurring in North Africa, although its incidence is lower than in Western countries (about 1.5 to 2 times lower in women under 50 and 3 to 4 times lower in women over 50) change. According to The World Health Organization (WHO), in 2020, 2.3 million women worldwide were diagnosed with breast cancer and 685,000 died. By the end of 2020, 7.8 million women had been diagnosed with breast cancer in the past 5 years, making it the most common cancer in the world and the second-largest cancer after lung cancer. Globally, women lose more disability-adjusted life years to breast cancer than any other cancer \cite{0}. In Morocco there are approximately 50,000 new cancer cases each year. Cancer also accounts for 13.4\% of deaths in the country. Accounting for 20\% of cases. Breast cancer was the most common disease among Moroccan women in 2016, according to a research issued by the Greater Casablanca Cancer Registry of Morocco, accounting for 35.8\% of all new malignancies among women \cite{MR}. Breast cancer is a disease that affects breast cells and causes unhindered division of mitosis, which can be malignant in breast tissue, it occurs when the cells in the lobules or the ducts become abnormal and divide uncontrollably. These abnormal cells begin to invade the surrounding breast tissue and may eventually spread (Metastasis) via blood vessels and lymphatic channels to the lymph nodes, lungs, bones, brain and liver \cite{meta}. There are many different types of breast cancer, about 70\% of them are sensitive to the female sex hormone called estrogen. Cells from these cancers have receptor sites that bind to estrogen, thereby promoting their growth and spread \cite{horm}. These cancers are called estrogen receptor-positive cancers (or ER-positive cancers) and it's the type we are discussing in this paper. Cells from tumors are tested to see if they have these receptors. Hormonal (or endocrine) therapy can be used as a treatment in this case such as Tamoxifen \cite{tam}. However, information on the etiology of this disease remains scarce. For many years Now, there are challenges in researching new ways to understand and fight breast cancer, the causes of breast cancer are not fully understood and not well known yet, so it's hard to say why one woman might develop it and another doesn't. In either way, some risk factors are known to affect the likelihood of developing it. Some of them you can't change, but some you can, after all the three main risk factors for breast cancer namely are, hormonal imbalances, genetical and environmental. Some of the therapies to suppress tumor growth Or inhibit the dynamic of cancer cell are surgery, chemotherapy, radiation therapy, endocrine therapy, targeted therapy and immunotherapy, despite the fact that breast cancer was formerly thought to be difficult to treat with the latter therapy, because to its immunological "coldness," clinical research and novel medications have proven that immunotherapy treatment can enhance breast cancer patient outcomes \cite{imm}. Yet, every treatment has side effects such as hair loss, mood swings, nausea, vomiting, and fatigue, etc... As with most cancers, the earlier breast cancer is detected and diagnosed, the better the chances of successful treatment. Over the years, breast cancer modeling has become an invaluable tool for understanding dynamics Behavior of tumor growth during treatment. Some studies show that mathematical modeling helps solve epidemiological problems. Oke et al. \cite{oke} and Bozkurt Yousef et al.\cite{yousef} Improved Mudufza's model\cite{1}, they have built-in control parameters (ketogenic diet, immune booster and anticancer drugs) hypothetically have interactions between normal and malignant cells. Anyhow, these Studies do not include the concept of a nutritious diet combined with Tamoxifen and immunotherapy in their mathematical models, and even in traditional current treatments used by oncologists and doctors. According to the breast oncologist Dr. Hung Khong, MD \cite{hung}, the majority of research looking at these types of combinations have focused on an other type of cancer: triple-negative breast cancer, however, there is no indication that ER-positive illness is immune to immunotherapy. In fact, there is evidence that ER-positive breast cancer responds to immunotherapy, and it's our main goal using mathematical tools. This paper is inspired from mathematical models of \cite{1,2,de2009}, but here we introduced the saturation effect of cancer cells and added the immunotherapy drug (ICB Immune checkpoint blockade's type \cite{ICB}) interaction with the immune cells to conclude new results. This work is organized as follow: first section, we construct the mathematical Model of breast dynamics with the presence of cancer treatment, excess estrogen, and a ketogenic diet as a therapeutic tool to help shrink the tumor size \cite{med} (many researches are still needed to see the effect of this diet on cancer patients). The five compartements are modeled by forming a system of differential equations. A qualitative study consists of the existence of equilibria and the study of their local stability, basic reproduction numbers are discussed. \section{Model description} In Oke et al.\cite{oke} and de Pillis et al. \cite{de2009} models, and also previous mathematical models published lately, never used the combination of endocrine therapy with a diet and immunotherapy, so we propose a mathematical model to study the dynamics of breast cancer with excess estrogen by considering the saturation effect with the presence of treatments. We present a system of differential equations describing the interactions between normal cells N, tumor cells T , immune cells I, estrogen E and immunotherapy M, then we study the dynamic behavior described by the system bellow: \begin{equation} \left\{ \begin{array}{l} \begin{aligned} \dfrac{dN}{dt}={} &N(t)(a_1-b_1N(t))-\\ &\dfrac{d_1T(t)N(t)}{1+\epsilon T(t)}-l_1N(t)E(t)(1-k), \end{aligned}\\\\ \begin{aligned} \dfrac{dT}{dt}={} &T(t)(a_2d-b_2T(t))-g_1I(t)T(t)-m_dT(t)+\\ &l_1N(t)E(t)(1-k), \end{aligned}\\\\ \begin{aligned} \dfrac{dI}{dt}={} & s+\dfrac{r I(t)T(t)}{o+T(t)}-g_2I(t)T(t)-m I(t)-\\ &\dfrac{l_3 I(t)E(t)}{g+E(t)}(1-k)+\dfrac{p_M I(t)M(t)}{j_M+M(t)} , \end{aligned}\\\\ \dfrac{dE}{dt}=p(1-k)-\theta E(t),\\\\ \dfrac{dM}{dt}=v_M(t)-n_MM(t)+\dfrac{\chi M(t)I(t)}{\xi+I(t)}. \end{array} \right. \label{equ} \end{equation} We now explain the model parameters and describe the terms biologically for a better understanding of the interactions presented in each equation. \subsection{Modeling normal cells} In system (\ref{equ}), normal cells are represented by the first equation, $a_1$ is the logistic growth rate of the normal cells, $b_1$ is the natural death, therefore; it is very reasonable to model the infection rate by a saturated incidence of the form $\dfrac{d_1TN}{1+\epsilon T}$, where $d_1$ and $\epsilon$ are positive constants which, respectively describe the inhibition rate of normal cells due to the DNA damage, and the saturation effect. Excess estrogen leads to DNA mutation and thus normal cells population will be reduced and transformed into tumor cells by $l_1NE$ term. \subsection{Modeling tumor cells} In the second equation, the first term is the limited growth term for tumor cells that depends on the ketogenic diet doze $d$, $m_d$ is the death rate of tumor cells as a result of starvation of nutrients, glucose, note that keto diet \cite{4} put the body in ketosis that is a metabolism that turns fat into ketones, which helps protect against some cancers. Tumor cells will be removed due to the immune response by $g_1$. \subsection{Modeling Lymphocytes(Immune cells)} Lymphocytes, including T-cells, T-regulatory cells, and natural killer cells (NK), and their cytokine release patterns are implicated in both primary prevention and recurrence of breast cancer \cite{5}. In the third equation $s$ is the constant source rate of immune response, the presence of tumor cells stimulates and activate the immune response; resulting in growth of immune cells, this is represented by a positive nonlinear growth term also called the Michaelis–Menten interaction term for immune cells $\dfrac{r IT}{o+T}$, where $r$ is the immune response rate, and $o$ is the immune threshold rate, $g_2$ represents the inactivation of immune cells due tumour cells as the interaction coefficient, $m$ is the natural death rate of immune cell, the next term is the limited rate suppression of $I$ due to excess estrogen, where $l_3$ is the suppression rate, and $g$ is the estrogen threshold rate, the last term represente a Michaelis-Menten interaction, it's the activation of immune cells by immunotherapy. \subsection{Modeling estrogen} The last equation represents the estrogen dynamics, this hormone plays various roles in the reproductive system of a female, many Studies show higher blood levels of the estrogen called estradiol increase the risk of breast cancer in postmenopausal women \cite{3}, here $p$ is the source rate of estrogen, and $\theta$ is the decay rate after being washed out from the body. \subsection{Immunotherapy} We differ our work from previous models such as Oke et al. by adding this equation gonverning the amount of the drug injected per day per litre of body volume, its turnover represented by $n_MM$, and finally a Michaelis–Menten term in the drug used $\frac{\chi M(t)I(t)}{\xi+I(t)}$, representing the production of immunotherapy from activated immune cells ($CD8^{+}T$). \section{Model analysis} \subsection{Positiveness and existence of equilibria: } \begin{theorem} System~\eqref{equ} has one unique solution $(N,T,I,E,M)$ in $\R_{+}^5$. \end{theorem} \begin{proof} See \cite{murray}. \end{proof} \begin{rem} The aim of our model is to investigate cellular populations, all variables and parameters are non-negative. Based on the biological findings, the system of Equation~\eqref{equ} will be evaluated in $\Omega$ discused in the next theorem, that guarantees that the system is well-posed in such a way that solutions with non-negative initial conditions persist non-negative for all $t>0$, making the variables biologically meaningful. As a result, we get the theorem below. \end{rem} \begin{theorem} The following set, $$\Omega=\left\{(N, T, I, E, M) \in \R_{+}^5\right\},$$ is a positively invariant set of model~\eqref{equ}. \end{theorem} \begin{proof} Let $P(t)=(N(t),T(t),I(t),E(t),M(t))$ be any positive solution of the model, with initial condition $P(0)=(N(0),T(0),I(0),E(0),M(0)) \in \R_{+}^{5},$ from the model~\eqref{equ} we get: $$ \begin{aligned} &N(t)= N(0) e^{\int_{0}^{t}a_1-b_1N(\tau)-\frac{d_1T(\tau)}{1+\epsilon T(\tau)}-l_1E(\tau)(1-k) \mathrm{d} \tau},\\ &T(t)=T(0) e^{\int_{0}^{t}a_2d-b_2T(\tau)-g_1I(\tau)-m_d+\frac{l_1N(\tau)E(\tau)}{T(\tau)(1-k)} \mathrm{d} \tau}, \\ &I(t)=I(0) e^{\int_{0}^{t}\frac{s}{I(\tau)}+\frac{rT(\tau)}{o+T(\tau)}-g_2T(\tau)-m - \frac{l_3E(\tau)}{g+E(\tau)}(1-k)+\frac{p_M M(\tau)}{j_M+M(\tau)} \mathrm{d} \tau},\\ &E(t)=E(0) e^{\int_{0}^{t} \frac{p(1-k)}{E(\tau)}-\theta \mathrm{d} \tau}, \\ &M(t)=M(0) e^{\int_{0}^{t} \frac{v_M}{M(\tau)}-n_M+\frac{\chi I(\tau)}{\xi+I(\tau)}\mathrm{d} \tau}. \end{aligned} $$ By starting from a positive initial condition each solution of the model remain positive for all $t \geq 0$, thus $\Omega$ is a positively invariant set of our model.\par From the first and second equation of $N(t)$ and $T(t)$, and by using the comparison theorem we obtain: $$ \frac{\mathrm{d} N(t)}{\mathrm{d} t} \leq N(t)(a_1-b_1N(t)),$$ Thus $$\limsup _{t \rightarrow+\infty} N(t) \leq \frac{a_1}{b_1} . $$ and, $$\limsup _{t \rightarrow+\infty} T(t) \leq \frac{a_2}{b_2} . $$ Consequently, it can be shown that $I, E, M$ are also bounded, which completes the proof. \end{proof} \subsection{Equilibria} In this section we study the existence and stability of the equilibria, which also represents the critical points of system~\eqref{equ}. The model system admits seven steady states in which there are one tumor-free equilibrium point, five dead equilibria, and finally the coexisting equilibria discussed in a brief way. Let $\bar{P}=(\bar{N},\bar{T},\bar{I},\bar{E},\bar{M})$ be an equilibrium point, We have $\bar{N}$, $\bar{T}$, $\bar{I}$, $\bar{E}$ and $\bar{M}$ are positive variables since cell populations are non-negative and real. Therefore, all parameters considered in this model are positive. \subsubsection{Tumor free equilibrium point} $$P_{0}=\left(\bar{N}_{0}, 0, \bar{I}_{0}, \bar{E}_{0}, \bar{M}_{0}\right),$$ where, \begin{itemize} \item $\bar{N}_{0}=\dfrac{a_{1}-l_{1} \bar{E}_{0}(1-k)}{b_{1}},$ \item$\bar{I}_{0}=\dfrac{s}{m_{+} \dfrac{l_{3} \bar{E}_{0}}{g+\bar{E}_{0}}(1-k)-\dfrac{p_{M} \bar{M}_{0}}{j+\bar{M}_{0}}},$ \item $\bar{E}_{0}=\dfrac{(1-k) p}{\theta},$ \item $\bar{M}_{0}=\dfrac{v_{M}}{n_{M}-\dfrac{\chi \overline{I_{0}}}{\xi_{+} \overline{I_{0}}}}.$ \end{itemize} \begin{rem} $CD8^{+}T$ cells also called cytotoxic T lymphocytes, are a type of immune cells that have the capacity to react to pathogens like infections, virus and cancer \cite{CD8}.\\ If we consider $CD8^{+}T$ as the only type of immune cells for our model, then during the tumor-free state, $I(t)$ will be zero for all $t$, since the activation of these effector cells depends on tumor. \end{rem} \begin{lemma} $P_{0}$ exists if and only if : \begin{enumerate}[label=\roman*)] \item $\bar{E}_{0}\leq\dfrac{a_1}{l_1(1-k)}, with\quad k<1,$ \\ \item $\bar{M}_{0}\leq\dfrac{m}{p_{M}},$ \\ \item $\bar{I}_{0}\leq\dfrac{n_{M} \xi}{\chi-n_{M}},$\\ \item $l_1\leq\dfrac{\theta a}{p(1-k)^{2}}.$ \end{enumerate} \end{lemma} Biologically, it means that the growth rate of normal cells must be more than the estrogen amount, also we notice that the $\bar{I}_0$ in this case depends on the suppression of estrogen and the amount of immunotherapy dose, unlike the the estrogen-free model studied by T. Sundaresan et al. \cite{2}, where it depends only on the nature of the dynamics. \subsubsection{Death equilibrium points type 1}~\par \vskip1mm Dead equilibria are states when both normal cells and tumor cells have died off, maybe by a mastectomy surgery \cite{surg}, or death, and it's given by: $$ P_{di}=\left(0,0, \bar{I}_{i}, \frac{(1-k) p}{\theta}, \bar{M}_{i}\right) , $$ such that, $$\bar{M_{i}}=\dfrac{v_{M}}{n_{M}-\dfrac{\chi \bar{I_{i}}}{\xi_{i}+ \bar{I_{i}}}} \text { for } i\in\{1,2\},$$ and $\bar{I}_{i}$ verifies this equality: \begin{equation} \bar{I}^{2}\left(A \chi+p_{M} v_{M}\right)-\bar{I}\left(A n_{M}-p_{M} v_{M} \xi+s \chi \right)+s n_{M}=0, \label{equI} \end{equation} such that, $$A=m+\frac{l_{3} \bar{E}}{g+\bar{E}}(1-k),$$ therefore, the discriminant of (2) is: \begin{equation} \begin{aligned} &\Delta=v_{M}(t)^{2} \chi^{2} p_{M}^{2}-2 A v_{M}(t) \chi n_{M} p_{M}-2 v_{M}(t) \chi s \xi p_{M}\left(A n_{M}\right)^{2}\\ &-2 A s \xi n_{M}-4 v_{M}(t) s n_{M} p_{M}+s^{2} \xi^{2}, \end{aligned} \label{delta} \end{equation} we set $B=A\chi+p_{M} v_{M},$\\ $B\geq0 \Rightarrow \Delta\geq0$, which means that equation (\ref{equI}) has two distinct real roots: \begin{equation} \bar{I}_{1,2}=\dfrac{-p_{M} v_{M}(t) \chi+A n_{M}+s \xi_{\mp} \sqrt{\Delta}}{2\left(A \xi+p_{M} v_{M}(t)\right)}. \label{rootsI} \end{equation} We summerize then the existence of the two dead equilibrium points in the following lemma, \begin{lemma} $P_{di}$ for $i=1,2.$ exists if and only if : \begin{enumerate}[label=\roman*)] \item $\dfrac{p_{M} v_{M} \xi}{s \chi A n_{M}}<1,$ \item $k<1,$ \item $ n_M\geq \dfrac{\chi\bar{I}_{i}}{\xi+\bar{I}_{i}}.$ \end{enumerate} \end{lemma} Biologically, it means that the existence of these equilibria depends on the natural elimination or excretion of IL-2, which must be more than its production from the activated $CD8^{+}T$. \subsubsection{Death equilibrium points type 2} $$ P_{di}=\left(0, \bar{T}_{i}, \frac{a_2 d-b_{2} \bar{T}_{i}-m_{d}}{g_{1}}, \frac{(1-k)p}{\theta}, \frac{v_{M}}{n_{M}-\frac{\chi \bar{I}_{i}}{\xi+\bar{I}_i}}\right), $$ for $i={3,4,5}.$ To find the expressions of $\bar{T_i}$, we solve the above quadratic equation: \begin{equation} \begin{aligned} \nu(\bar{T})=&\bar{T}^3\left(b_{2} g_{2}\right.)+\bar{T}^{2}(b_{2} m+b_{2} g_{2}o-b_{2} r+b_{2}C\\ &-a_{2} d g_{2}-a_{2} d C +m_d g_{2})+\bar{T}(b_{2} m o+b_{2} C o\\ &-a_{2} d m-a_{2} d g_{2} o+r a_2d-a_{2}do C+g_{1} s\\ &+m_{d} m+m_{d} g_{2} \theta-r m_{d}+m_{d} C) \\ &+m_{d} C o+m_{d} m o+g_{1} s o-a_2d m o, \end{aligned} \label{Tequation} \end{equation} where, $$ C=\frac{l_{3} \bar{E}}{g+\bar{E}}(1-k)-\frac{p_{M} \bar{M}}{j+\bar{M}}, $$ thus, $$\bar{T}_i=Roots (\nu(\bar{T}))\quad \text{for}\quad i=3,4,5.$$ We finally can summarize the expression of the death equilibrium points of type 2 as follows: $$ P_{d3, d4, d5}=\left(0, \bar{T}_{3,4,5}, \bar{I}_{3,4,5} ,\frac{(1-k) p}{\theta}, \bar{M}_{3,4,5}\right). $$ \begin{lemma} The dead equilibria of type 2 exist if: \begin{enumerate}[label=\roman*)] \item $k<1,$ \item $ \dfrac{a_{2} d-m_{d}}{b_{2}}-\dfrac{g_{1}^{2} n_{M}}{b_{2}\left(\chi-g_{1}n_M\right)} \leqslant \bar{T}_{3,4,5} \leqslant \dfrac{a_{2} d-m_{d}}{b_{2}}.$ \end{enumerate} \end{lemma} \vspace{1em} \subsubsection{Co-existing point}~\par In this case all cell populations survive the competition and they coexist, it is given by: $$ P_e=\left(N_{e}, T_{e}, I_{e}, E_{e},M_{e}\right), $$ such that, \begin{itemize} \item $N_e=\dfrac{1}{b_1}\left(a_1-\dfrac{d_1T_e}{1+\epsilon T_e}-l_1E_e(1-k)\right)=\psi_{1}(I_e),$ \item $T_e= Roots(\mu(T_e))=\psi_{2}(I_e),$ \item $I_e=\dfrac{s}{g_2T_e-\dfrac{r T_e}{o+T_e}+m+\dfrac{l_3E_e}{g+E_e}(1-k)- \dfrac{p_{M} M_e}{j+M_e}},$ \item $E_e=\dfrac{(1-k) p}{\theta},$ \item $M_e=\dfrac{v_{M}}{n_{M}-\dfrac{\chi I_e}{\xi+ I_e}}=\psi_{3}(I_e),$ \end{itemize} where, \begin{equation} \mu(T_e)=b_2T_e^{2}+(g_1I_e+m_d-a_2d)T_e-l_1N_eE_e(1-k), \label{6} \end{equation} we let, \begin{align*} a&=b_2,\\ b&=g_1I_e+m_d-a_2d,\\ c&=l_1N_e E_e(1-k), \end{align*} the existence of the coexisting point depends on the sign of (\ref{6}) roots, we discuss three cases using its coefficients, when $b$ and $c$ are positive we get two negative roots, means there exist no realistic equilibria in this case, therefore, for $c<0$ we get one equilibrium point, and for $b$ either positive, or negative we obtain two roots with opposite signs, which means there exist at least one coexisting point, finally the case where $b<0$ and $c>0$ will allow us to get two coexisting points besides the other equilibria we found in our previous parts. We summarize the existence of the coexisting point in the following lemma, \begin{lemma} $P_e$ exists if and only if: $$I_{e} \in\left]0,\dfrac{a_2d-m_d}{g_1}\right[\text{and}\quad k<1.$$ \end{lemma} \section{Stability analysis} In this section we analyze the equilibria in terms of their local stability by means of eigenvalues to identify conditions that can help eliminate tumor cells, We apply the Hartman Grobman theorem \cite{perko}. Let $\bar{P}=(\bar{N},\bar{T},\bar{I},\bar{E},\bar{M})$ be an arbitrary equilibrium of model (\ref{equ}). Hence, the associated characteristic equation of the jacobian matrix of our system at $P$is given by, \begin{strip} \begin{equation} \left|\begin{array}{ccccc}{a_1-2 b_{1} \bar{N}-\dfrac{d_{1} \bar{T}}{1+e \bar{T}}-l_{1} \bar{E}(1-k)-\lambda } & {\dfrac{d_1 \bar{N}}{(1+e\bar{T})^2}} & {0} & {-l_1 (1-k)\bar{N}}& {0} \\\\ {l_1 (1-k)\bar{E}} & {a_{2} d-2 b_{2} \bar{T}-g_{1} \bar{I}-m_{d}-\lambda } & {-g_1 \bar{T}} &{l_1 \bar{N} (1-k)} & {0} \\\\ {0} & {{\dfrac{r o \bar{I}}{(o+\bar{T})^{2}}-g_{2} \bar{I}}} & {J_{33}-\lambda } & { -\dfrac{l_3\bar{I}g}{(g+\bar{E})^2}(1-k) } & {\dfrac{P_{\bar{M}} \bar{I} j_{\bar{M}}}{(j_{\bar{M}}+\bar{M})^2}} \\\\ {0} & {0} & {0} & {-\theta-\lambda }& {0}\\\\ {0} & {0} & {\dfrac{\chi \bar{M} \xi}{(\xi+\bar{I})^{2}}} & {0}& {-n_{\bar{M}}+\dfrac{\chi \bar{I}}{\xi+\bar{I}}-\lambda } \end{array}\right|=0. \label{CE} \end{equation} \end{strip} where,\[J_{33}=\dfrac{r \bar{T}}{o+\bar{T}}-g_{2} \bar{T}-m-\dfrac{l_{3} \bar{E}}{g+\bar{E}}(1-k)+\dfrac{P_{\bar{M}}\bar{M}}{j_{\bar{M}}+\bar{M}}. \] Now, we introduce reproductive numbers and conditions for the stability of tumor-free state, denote: $$\mathscr{R}_{0}=\dfrac{A_{6} A_{9}}{A_{10} A_{5}},\quad \text{and}\quad \mathscr{R}_{1}=\dfrac{A_{1} A_{2}}{A_{0} A_{3}},$$ where, \begin{align*} A_0 &=a_1-2 b_{1}N-l_{1} E(1-k),\\ A_1 &=l_1 E(1-k),\\ A_2 &=d_1 N,\\ A_3 &=a_{2} d-g_{1} I-m_{d},\\ A_4 &=\dfrac{r I}{o}-g_{2} I,\\ A_5 &=-m-\dfrac{l_{3} E(1-k)}{g+ E}+\dfrac{P_{M} M}{j_{M}+ M},\\ A_6 &=\dfrac{\chi M \xi}{(\xi+ I)^{2}},\\ A_7 &=l_1 N(1-k),\\ A_8 &=\dfrac{l_{3} I g}{g+ E}(1-k),\\ A_9 &=\dfrac{P_{M} I j_{M}}{(j_{M}+M)^{2}},\\ A_{10}&=-n_M+\dfrac{\chi I}{\xi+I}. \end{align*} \begin{theorem} The tumor-free equilibrium point $P_{0}$ of system (\ref{equ}) is locally asymptotically stable if $\mathscr{R}_{0}<1$ and $\mathscr{R}_{1}<1$, otherwise it is unstable. \end{theorem} \begin{proof} Let $J_{p_0}$ be the jacobian matrix around $P_0$: $$J_{p_0}=\left(\begin{array}{ccccc} A_{0} & A_{2} & 0 & -A_{7} & 0 \\ A_{1} & A_{3} & 0 & A_{7} & 0 \\ 0 & A_{4} & -A_{5} & -A_{8} & A_{9} \\ 0 & 0 & 0 & -\theta & 0 \\ 0 & 0 & A_{6} & 0 & A_{10} \end{array}\right)$$ and the characteristic equation at $P_0$ is given by:\\ \begin{multline*} \mathbb{P}(\lambda)=(\lambda+\theta)\left(\lambda^{2}-A_{10} \lambda+A_{5} \lambda-A_{10} A_{5}-A_{6} A_{9}\right)\\\left(\lambda^{2}-A_{0} \lambda- A_{3} \lambda-A_{1}A_{2}+A_{0}A_{3}\right), \end{multline*} we can clearly see that this equation has five eigenvalues \begin{align} \lambda_1=-\theta,\\ \lambda^2-\left(A_{10}-A_{5}\right) \lambda-A_{10} A_{5}\left(1- \dfrac{A_{6} A_{9}}{A_{10} A_{5}}\right)=0,\label{9}\\ \lambda^2-\left(A_{0}+A_{3}\right) \lambda+A_{0} A_{3}\left(1- \dfrac{A_{1} A_{2}}{A_{0} A_{3}}\right)=0,\label{10} \end{align} equation (\ref{9}) admits two eigenvalues: $\lambda_2$ and $\lambda_3$, to study their nature we apply the Routh hurwitz criteria, we let : $$ H_1=\left(\begin{array}{ll} a_{1} & a_{0} \\ a_{3} & a_{2} \end{array}\right), $$ where, \begin{align*} a_0&=1,\\ a_1&=-(A_{10}-A_{5}),\\ a_2&=-A_{10} A_{5}(1-\mathscr{R}_{0}),\\ a_3&=0. \end{align*} The roots of (\ref{9}) has negative real parts only if all the principle diagonal minors of the Hurwitz matrix are positive, provided that:\\ $$a_0>0,\quad \Delta_1=a_1>0,\quad \Delta_2=\left|\begin{array}{ll}a_{1} & a_{0} \\ a_{3} & a_{2}\end{array}\right|>0. $$ Therefore, the eigenvalues of (\ref{9}) are negative only if $\mathscr{R}_{0}<1$. We repeat the same process for (\ref{10}), we get two eigenvalues: $\lambda_4$, $\lambda_5$ and the other reproductive number $\mathscr{R}_{1}$. We conclude that our system is stable at the tumor free equilibrium point if and only if : $$\mathscr{R}_{0}<1\quad \text{and} \quad \mathscr{R}_{1}<1. $$ \end{proof} \begin{rem} $\lambda_3$ and $\lambda_5$ are negative if and only if : $$ \dfrac{sM}{V_{M}}<{I}< \dfrac{a_{2}(1+d)-2 b_{1} N-l_{1} E(1-k)-m_{d}}{g_{1}}. $$ Biologically its means that the immune response must be greater than the immunotherapy dose taken during the treatment. \end{rem} Now we study the stability of the death equilibrium points of type 1 $P_{d1,d2}$, and we let $\mathscr{R}_{IM}=\dfrac{B_{5} B_{7}}{B_{4} B_{8}}$ be the reproduction number for the immune system response such that, \begin{align*} B_0 &=a_1-l_{1} E(1-k),\\ B_1 &=l_1 E(1-k),\\ B_2 &=a_{2} d-g_{1} I-m_{d},\\ B_3 &=\dfrac{r I}{o}-g_{2} I,\\ B_4 &=-m-\dfrac{l_{3} E(1-k)}{g+ E}+\dfrac{P_{M} M}{j_{M}+ M},\\ B_5 &=\dfrac{\chi M \xi}{(\xi+ I)^{2}},\\ B_6 &=-\dfrac{l_{3} I g}{(g+ E)^2}(1-k),\\ B_7 &=\dfrac{P_{M} I j_{M}}{(j_{M}+M)^{2}},\\ B_8 &=-n_M+\dfrac{\chi I}{\xi+I}. \end{align*} Therefore equation (\ref{CE}) becomes: \begin{equation} \label{jac2} (\lambda+\theta)(B_{4} B_{8}-B_{4} \lambda-B_{5} B_{7}-B_{8} \lambda+\lambda^{2})(B_{2}-\lambda)(B_{0}-\lambda)=0 \end{equation} \begin{theorem} Death equilibria of type 1 are stable if and only if: \begin{itemize} \item $\mathscr{R}_{IM}<1,$ \item $B_0, B_2, B_4, B_8 <0,$ \end{itemize} \end{theorem} \begin{proof} The characteristic equation (\ref{jac2}) has five eigenvalues: $$ \begin{aligned} &\lambda_{1}=-\theta,\\ &\lambda_{2,3}=Roots(F(\lambda)=B_{4} B_{8}-B_{4} \lambda-B_{5} B_{7}-B_{8} \lambda+\lambda^{2}), \\ &\lambda_{4}=B_{2}, \\ &\lambda_{5}=B_{0}. \end{aligned} $$ For the system to be stable we must have all the eigenvalues with positive real parts. First let's work on the $\lambda_{2,3}$ sign: $$ \begin{aligned} F(\lambda) &=B_{4} B_{8}-B_{4} \lambda-B_{8} B_{+}-B_{8} \lambda_{+} \lambda^{2} \\ &=\lambda^{2}-\lambda(B_{4}+B_{8})+B_{4} B_{8}(1-\dfrac{B_{5} B_{7}}{B_{4} B_{8}}), \end{aligned} $$ using Routh-Hurwitz criteria, we summarize that the death equilibria of type 1 are stable only if: \begin{equation} \left\{\begin{aligned} & B_{4}<0 \quad \text{and}\quad B_{8}<0, \\ & B_{4}+B_{8}<0, \\ &\mathscr{R}_{IM}<1. \end{aligned}\right. \end{equation} In a more detailed way, we will determine necessary conditions and also their biological interpretations for each part as follow: $$ B_{4}<0 \iff \dfrac{P_{M}M}{J_{M}+M}<m+\dfrac{l_{3} E(1-k)}{g+E}, $$ biologically means that the activation of the $CD8^+T$ by the immunotherapy must be smaller than the deactivation of the immune response by estrogen. $$ B_{8}<0 \iff \dfrac{\chi I}{\xi_{1}+I}<n_{M}, $$ here we should see that the production of $IL_2$ from activated immune cells must be less than the excretion of the $IL_2$. $$ B_{0}<0 \iff a_{1}<l_{1} E(1-k), $$ this condition shows that the damaged natural cells due to excess estrogen should be more than it's natural growth rate. and finally, $$ B_{2}<0 \iff I>\dfrac{a_{2} d-m_{d}}{g_{1}}, $$ in this case the immune response needs to be very strong so it affect and blocks the tumor cells growth. \end{proof} Next we study the behavior of the death equilibria of type 2, in this case (\ref{CE}) becomes, \begin{equation} \begin{multlined} \mathbb{P}(\lambda)=(\lambda+\theta)(C_{0}-\lambda)[\lambda^{2}\left(C_{2}+C_{5}+C_{9}\right)-\lambda^{3}+\\ \lambda\left(C_{6} C_{8}-C_{5} C_{9}-C_{3} C_{4}-C_{2} C_{9}-C_{2} C_{5}\right)+\\ C_{2} C_{5} C_{9}-C_{2} C_{6} C_{8}+C_{3} C_{4} C_{9}]=0, \end{multlined} \label{charaC} \end{equation} where, $$ \begin{aligned} &C_{0}=a_{1}-\frac{d_{1} T_{1}}{1+e T}-l_{1} E, \\ &C_{1}=l_{1} E(1-k), \\ &C_{2}=a_{2} d-2 b_{2} T-g_{1} I-m_{d}, \\ &C_{3}=\frac{r I o}{(o+T)^{2}}-g_{2} I, \\ &C_{4}=g_{1} T,\\ &C_{5}=\frac{r T}{o+T}-g_{2} T-m-\dfrac{l_{3} E(1-k)}{g+ E}+\dfrac{P_{M} M}{j_{M}+ M},\\ &C_{6}=\dfrac{\chi M \xi}{(\xi+ I)^{2}},\\ &C_{7}=-\dfrac{l_{3} I g}{(g+ E)^2}(1-k),\\ &C_{8}=\dfrac{P_{M} I j_{M}}{(j_{M}+M)^{2}},\\ &C_{9}=-n_M+\dfrac{\chi I}{\xi+I}. \end{aligned} $$ \begin{theorem} The death equilibria of type 2 are stable only if: \begin{enumerate}[label=\roman*)] \item $a_1<\dfrac{d_{1} T_{1}}{1+e T}-l_{1} E,$ \item $C_{6} C_{8}-C_{5} C_{9}-C_{3} C_{4}-C_{2} C_{9}-C_{2} C_{5}>0,$ \item $ C_{2} C_{5} C_{9}-C_{2} C_{6} C_{8}+C_{3} C_{4} C_{9}>0.$ \end{enumerate} \end{theorem} \begin{proof} The characteristic equation (\ref{CE}) has five eigenvalues: $$ \begin{aligned} &\lambda_{1}=-\theta,\\ &\lambda_{2}=C_{0},\\ &\lambda_{3,4,5}=Roots(G(\lambda)), \end{aligned} $$ such that, $\begin{multlined} G(\lambda)=-\lambda^{3}+\lambda^{2}(C_{2}+C_{5}+C_{9})+\\ \lambda(C_{6} C_{8}-C_{5} C_{9}-C_{3} C_{4}-C_{2} C_{9}-C_{2} C_{5})\\ +C_{2} C_{5} C_{9}-C_{2} C_{6} C_{8}+C_{3} C_{4} C_{9}, \end{multlined}$\\ in a short form we let: $$ f(\lambda)=-a_0\lambda^{3}+a_1\lambda^{2}+a_2\lambda+a_3, $$ $\mathfrak{Re}(\lambda_i)<0$ for $i={3,4,5}$ if and only if $a_i>0$ and the principal minors of the Hurwitz matrix of $f$ are positive. $\lambda_1<0$, $\lambda_2$ has positive real part if: $$a_1<\frac{d_{1} T_{1}}{1+e T}-l_{1} E,$$ biologically it means that the growth rate of normal cells $a_1$, must be smaller than it's damage rate by excess estrogen and tumor cells effect; to reach one of the conditions of stability where $C_0<0$. \end{proof} Finally we study the stability of our system at $P_e$, let $$ P_{e}=\left(\phi_{1}\left(I_{e}\right), \phi_{2}\left(I_{e}\right), I_{e}, \frac{p(1-k)}{\theta}, \phi_{3}\left(I_{e}\right)\right), $$ such that $\phi_{i}$ for $i={1,2,3}$ are functions of $I_e$. Repeating the same process as before, we develop our calculus, and from the jacobian matrix at $P_e$ we get the following characteristic equation: \begin{equation} \begin{aligned} &\mathbb{P}(\lambda)=(D_{10}-\lambda)[(D_{0}-\lambda)(D_{3}-\lambda)(D_{6}-\lambda)(D_{12}-\lambda)\\ &-D_{11}D_{7}-D_{5}D_{4}(D_{12}-\lambda)(D_{0}-\lambda)\\ &-D_{2} D_{1}(D_{6}-\lambda)(D_{12}-\lambda)+D_{2} D_{11} D_{7}]=0, \end{aligned} \label{pec} \end{equation} such that the $D_i$ are the jacobian matrix elements at $P_e$, the equation (\ref{pec}) admets five eignevalues, using mathematical calculus tools such as Maple, we can define each eignevalues, therefore we can conclude the stability of this system at this point in a brief way in the theorem bellow: \begin{theorem} The system is stable at $P_e$ if and only if: $\mathfrak{Re}(\lambda_i)<0$ for $i={2,3,4,5}$, while $\lambda_1=-\theta<0.$ \end{theorem} \begin{rem} To study the stability of the system at $P_e$, we can also develop its characteristic equation (\ref{pec}), and study the sign of its coefficients using the Routh-Hurwitz criteria using the same previous calculus. \end{rem} \begin{thebibliography}{00} \bibitem{CA} H. Slater “Global Cancer Report Finds Breast Cancer Most Commonly Diagnosed Cancer in 2020,” CA: A Cancer Journal for Clinicians, February 5, 2021, https://www.cancernetwork.com/view/global-cancer-report-finds-breast-cancer-most-commonly-diagnosed-cancer-in-2020. \bibitem{torre}L. A. Torre, F. Bray, R. L. Siegel, J. Ferlay, J. Lortet-Tieulent and A. Jemal, "Global cancer statistics 2012," CA Cancer J Clin. 2015,65:87–108. \bibitem{0} The World Health Organization (WHO), “Breast cancer,”26 March 2021, https://www.who.int/news-room/fact-sheets/detail/breast-cancer. \bibitem{MR} H. Mrabtia, C. Sauvage, A.Beniderc, K. Bendahhou, F. Selmouni, R. Muwonge, L. Alaoui, E. Lucas, Y. Chamie, P. Villain, L. Abousselham, A. L.Carvalho, M. Bennani, H. Errihani, R. Sankaranarayanan, R. Bekkali and P. Basu, "Patterns of care of breast cancer patients in Morocco – A study of variations in patient profile, tumour characteristics and standard of care over a decade," The Breast, Volume 59, October 2021. \bibitem{meta} B. Nazario, "Slideshow: Where Breast Cancer Spreads," WebMD, January 19, 2022. \bibitem{horm} The American Cancer Society, "Breast Cancer Hormone Receptor Status", American cancer society, November 8, 2021. \bibitem{tam} The American Cancer Society, "Hormone Therapy for Breast Cancer", National cancer institute, July 7, 2021. \bibitem{imm} Cancer research institute, "How is Immunotherapy for Breast Cancer Changing the Outlook for Patients?", National cancer institute, July 7, 2021, https://www.cancerresearch.org/en-us/immunotherapy/cancer-types/breast-cancer. \bibitem{oke}S. I. Oke, M. B. Matadi and S. S. Xulu, "Optimal Control Analysis of a Mathematical Model for Breast Cancer", Mathematical and Computational Applications, 2018. \bibitem{yousef}F.B. Yousef, T. Abdeljawad and A. Kalinli "Mathematical modeling of breast cancer in a mixed immune-chemotherapy treatment considering the effect of ketogenic diet," Eur. Phys. J. Plus, 2020. \bibitem{1} G. C. Mufudza, W. Sorofa and E. T. Chiyaka, "Assessing the Effects of Estrogen on the Dynamics of Breast Cancer," Computational and Mathematical Methods in Medicine-Hindawi Publishing Corporation. vol. 2012, pp. 1--14, 2012. \bibitem{hung} H. Virgil, "Next Generation of Combo Regimens Being Explored in ER+ Breast Cancer", https://www.onclive.com/view/next-generation-of-combo-regimens-being-explored-in-er-breast-cancer, July 2, 2020. \bibitem{2} T. Sundaresan, A. Govindarajan and S. Balamuralitharan, "A Local and Global Stability Analysis of Estrogen-Free Model on the Dynamics of Breast Cancer," International Journal of Pure and Applied Mathematics. vol. 113 No. 13, pp. 75--84, 2017. \bibitem{ICB}F. Petitprez, M. Meylan, A.de Reyniès, C. Sautès-Fridman, and Wolf H. Fridman, "The Tumor Microenvironment in the Response to Immune Checkpoint Blockade Therapies," Frentiers immunology, 2020 May 7. \bibitem{khalis} M. Khalis, K. El Rhazi, H. Charaka, V. Chajès, S. Rinaldi, C. Nejjari, I. Romieu, and B. Charbotel, "Female Breast Cancer Incidence and Mortality in Morocco: Comparison with Other Countries," Asian Pac J Cancer Prev, 17 (12), 5211-5216, 2016. \bibitem{med} M. W. Smith, "Cancer and the Keto Diet," PubMed, June 17, 2020, https://www.webmd.com/cancer/keto-diet-cancer-link. \bibitem{3} J. Russo and I. H. Russo, "The role of estrogen in the initiation of breast cancer," PubMed. DOI: 10.1016/j.jsbmb.2006.09.004, 2006. \bibitem{4} J. Naftulin, "Low-fat diets could lower the risk of dying of breast cancer. That may be bad news for keto fans," INSIDER, 2019. \bibitem{5} L. J. Standish, E. S. Sweet, J. Novack, C. A. Wenner, C. Bridge, A. Nelson, M. Martzen, and C.Torkelson, "Breast Cancer and the Immune System," J Soc Integr Oncol. 6(4), pp. 158–-168, 2008. \bibitem{picard} B. J. Schroers, "Ordinary Differential Equations: A Practical Guide,"Cambridge University Press, Cambridge, UK, 2011. \bibitem{perko} L. Perko, "Differential Equations and Dynamical Systems," Springer, Berlin, Germany, 2013; Volume 7. \bibitem{murray} J.D. Murray, "Mathematical Biology: I. An Introduction," Interdisciplinary Applied Mathematics, Third edition, Springer Volume 17. \bibitem{surg} A. Gonzalez, "What is a mastectomy?," Medical New Today, July 22, 2019, https://www.medicalnewstoday.com/articles/302035. \bibitem{de2006}L. G. de Pillis, W. Gu and A. E Radunskaya, "Mixed immunotherapy and chemotherapy of tumors: modeling, applications and biological interpretations," Journal of Theoretical Biology, 238 USA (2006) 841–862. \bibitem{de2009}L. G. de Pillis, K. R. Fisterb, W. Gua, C.Collinsc, M. Daubd, D. Grosse, J. Mooree and B. Preskill."Mathematical model creation for cancer chemo-immunotherapy," Computational and Mathematical Methods in Medicine, Vol. 10, No. 3, September 2009, 165–184. \bibitem{CD8} N. Zhang and Michael J. Bevan, "CD8+ T Cells: Foot Soldiers of the Immune System," Immunity. 2011 Aug 26; 35(2): 161–168. \bibitem{treatm}M. Sethi and S.K. Chakravarti, "Hyperthermia techniques for cancer treatment: a review," Int. J. PharmTech Res. 8(6), 292–299 (2015). \bibitem{pillis2005}L.G. De Pillis, A.E. Radunskaya and C.L. Wiseman, "A validated mathematical model of a cell-mediated immune response to the tumor growth," Cancer Res. 65, 7950–7958 (2005). \bibitem{aber}K. Abernathy, Z. Abernathy, A. Baxter and M. Stevens, "Global dynamics of a breast cancer competition model." Differ. Equ. Dyn. Syst. 3, 1–15 (2017). \bibitem{ketoo} B. G. Allen, S. K. Bhatia, C. M. Anderson, J. M. Eichenberger-Gilmore, Z. A. Sibenaller, K.A. Mapuskar, J. D. Schoenfeld, J. M. Buatti, D. R. Spitz, and M. A. Fath, "Ketogenic diets as an adjuvant cancer therapy: History and potential mechanism," Redox Biol. 2014, 2, 963–970. \bibitem{pillis2001}L. De Pillis, L.G., Radunskaya, A. "A mathematical tumor model with immune resistance and drug therapy: An optimal control approach," Comput. Math. Methods Med. 2001, 3, 79–100. \bibitem{asco} Z. Benbrahim, S.Boutayeb, H. Errihani, and N. Mellas,, "Cancer in Morocco: Access to Innovative Treatments and Research Status", The ASCO Post, June 25, 2021. \end{thebibliography} \vspace{12pt} \color{red} \end{document}
2205.11924v2
http://arxiv.org/abs/2205.11924v2
Growth of actions of solvable groups
\documentclass[11pt]{amsart} \usepackage{amssymb,amsfonts,amsthm,amsmath,mathrsfs,xspace,hyperref} \usepackage{graphicx} \usepackage[francais,english]{babel} \usepackage{inputenc} \usepackage[T1]{fontenc} \usepackage[centering]{geometry} \usepackage{csquotes} \usepackage{color} \usepackage{enumerate} \usepackage{mathabx} \usepackage{stmaryrd} \usepackage{enumitem} \newcommand{\red}[1]{{\color{red}#1}} \newcommand{\R}{\ensuremath{\mathbb{R}}} \newcommand{\Z}{\ensuremath{\mathbb{Z}}} \newcommand{\Q}{\ensuremath{\mathbb{Q}}} \newcommand{\N}{\ensuremath{\mathbb{N}}} \newcommand{\F}{\ensuremath{\mathcal{F}}} \newcommand{\A}{\ensuremath{\mathcal{A}}} \newcommand{\Rcal}{\ensuremath{\mathcal{R}}} \renewcommand{\O}{\ensuremath{\mathcal{O}}}\renewcommand{\fg}{\ensuremath{\operatorname{FG}}} \newcommand{\M}{\ensuremath{\mathcal{M}}} \newcommand{\X}{\ensuremath{\mathcal{X}}} \renewcommand{\P}{\ensuremath{\mathcal{P}}} \newcommand{\Pbb}{\ensuremath{\mathbb{P}}} \newcommand{\Sbb}{\ensuremath{\mathbb{S}}} \newcommand{\Ccal}{\ensuremath{\mathcal{C}}} \newcommand{\Ncal}{\ensuremath{\mathcal{N}}} \newcommand{\mk}{\ensuremath{\mathfrak{m}}} \newcommand{\nk}{\ensuremath{\mathfrak{n}}} \newcommand{\zk}{\ensuremath{\mathfrak{z}}} \newcommand{\hk}{\ensuremath{\mathfrak{h}}} \newcommand{\m}{\ensuremath{\overline{m}}} \renewcommand{\H}{\ensuremath{\mathcal{H}}} \newcommand{\K}{\ensuremath{\mathcal{K}}} \newcommand{\Sym}{\ensuremath{\operatorname{Sym}}} \newcommand{\sym}{\ensuremath{\operatorname{Sym}}} \newcommand{\Alt}{\ensuremath{\operatorname{Alt}}} \newcommand{\alt}{\ensuremath{\operatorname{Alt}}} \newcommand{\GL}{\ensuremath{\operatorname{GL}}} \newcommand{\vol}{\ensuremath{\operatorname{vol}}} \newcommand{\relvol}{\ensuremath{f}} \newcommand{\asdim}{\ensuremath{\operatorname{asdim}}} \newcommand{\corps}{\ensuremath{\mathbb{K}}} \newcommand{\Lie}{\ensuremath{\operatorname{Lie}}} \newcommand{\supp}{\ensuremath{\textrm{Supp}}}x}{\ensuremath{\textrm{Fix}}} \newcommand{\gp}{\ensuremath{\textrm{germ}_+}} \newcommand{\gm}{\ensuremath{\textrm{germ}_-}} \newcommand{\acts}{\ensuremath{\curvearrowright}} \newcommand{\sub}{\ensuremath{\operatorname{Sub}}} \newcommand{\stab}{\ensuremath{\operatorname{Stab}}} \newcommand{\homeo}{\ensuremath{\operatorname{Homeo}}} \newcommand{\aut}{\ensuremath{\operatorname{Aut}}} \newcommand{\st}{\ensuremath{\operatorname{St}}} \newcommand{\rist}{\ensuremath{\operatorname{RiSt}}} \newcommand{\cay}{\ensuremath{\operatorname{Cay}}} \newcommand{\PSL}{\ensuremath{\operatorname{PSL}}} \newcommand{\psl}{\ensuremath{\operatorname{PSL}}} \renewcommand{\sl}{\ensuremath{\operatorname{SL}}} \newcommand{\fsl}{\ensuremath{\operatorname{FSL}}} \newcommand{\Pw}{\mathrm{P_W}} \newcommand{\PwG}{\mathrm{P_W}(G)} \newcommand{\autT}{\mathrm{Aut}(T)} \newcommand{\Cred}{C_{red}^\ast} \newcommand{\conj}{\mathcal{C}} \newcommand{\urs}{\ensuremath{\operatorname{URS}}} \newcommand{\irs}{\ensuremath{\operatorname{IRS}}} \newcommand{\isf}{\ensuremath{\operatorname{ISF}}} \newcommand{\prob}{\ensuremath{\mathcal{P}}} \newcommand{\cl}{\ensuremath{\mathcal{F}}} \newcommand{\Comm}{\mathrm{Comm}} \newcommand{\Sch}{\mathrm{Sch}} \newcommand{\rk}{\mathrm{rk}} \newcommand{\Fit}{\ensuremath{\operatorname{Fit}}} \newcommand{\Res}{\ensuremath{\operatorname{Res}}} \newcommand{\FC}{\ensuremath{\operatorname{FC}}} \newcommand{\ind}{\ensuremath{\operatorname{Ind}}} \newcommand{\bigslant}[2]{{\raisebox{-.2em}{$#1$}\backslash\raisebox{.2em}{$#2$}}} \newcommand{\ass}{\ensuremath{\mathrm{ass}}} \newcommand{\ann}{\ensuremath{\mathrm{ann}}} \newcommand{\rad}{\ensuremath{\mathrm{rad}}} \newcommand{\RadVN}{\ensuremath{\mathrm{Rad}_{\mathrm{VN}}}} \newcommand{\p}{\ensuremath{\mathfrak{p}}} \theoremstyle{definition} \newtheorem{defin}{Definition}[section] \newtheorem*{claim}{Claim} \newtheorem{condition}{Condition} \newtheorem*{setting}{Setting} \newtheorem{notation}[defin]{Notation} \newtheorem{main-question}{Question} \newtheorem{main-problem}[main-question]{Problem} \newtheorem{question}[defin]{Question} \newtheorem*{questions}{Questions} \newtheorem{problems}[defin]{Problems} \newtheorem{problem}[defin]{Problem} \theoremstyle{plain} \newtheorem{thm}[defin]{Theorem} \newtheorem{thmmain}{Theorem} \renewcommand{\thethmmain}{\Alph{thmmain}} \newtheorem*{main-thm}{Theorem} \newtheorem*{main-prop}{Proposition} \newtheorem{prop}[defin]{Proposition} \newtheorem{prop-def}[defin]{Proposition-Definition} \newtheorem{conjecture}{Conjecture} \newtheorem{conjecturebis}{Conjecture} \newtheorem{main-defin}{Definition} \newtheorem{fact}[defin]{Fact} \newtheorem{cor}[defin]{Corollary} \newtheorem*{main-cor}{Corollary} \newtheorem{lem}[defin]{Lemma} \theoremstyle{remark} \newtheorem{remark}[defin]{Remark} \newtheorem{remarks}[defin]{Remarks} \newtheorem{example}[defin]{Example} \begin{document} \date{July 13, 2022} \title{Growth of actions of solvable groups} \author{Adrien Le Boudec} \address{CNRS, UMPA - ENS Lyon, 46 all\'ee d'Italie, 69364 Lyon, France} \email{[email protected]} \author{Nicol\'as Matte Bon} \address{ CNRS, Institut Camille Jordan (ICJ, UMR CNRS 5208), Universit\'e de Lyon, 43 blvd.\ du 11 novembre 1918, 69622 Villeurbanne, France} \email{[email protected]} \thanks{Supported by the LABEX MILYON (ANR-10-LABX-0070) of Universite de Lyon, within the program "Investissements d'Avenir" (ANR-11-IDEX-0007) operated by the French National Research Agency.} \maketitle \begin{abstract} Given a finitely generated group $G$, we are interested in common geometric properties of all graphs of faithful actions of $G$. In this article we focus on their growth. We say that a group $G$ has a Schreier growth gap $f(n)$ if every faithful $G$-set $X$ satisfies $\vol_{G, X}(n)\succcurlyeq f(n)$, where $\vol_{G, X}(n)$ is the growth of the action of $G$ on $X$. Here we study Schreier growth gaps for finitely generated solvable groups. We prove that if a metabelian group $G$ is either finitely presented or torsion-free, then $G$ has a Schreier growth gap $n^2$, provided $G$ is not virtually abelian. We also prove that if $G$ is a metabelian group of Krull dimension $k$, then $G$ has a Schreier growth gap $n^k$. For instance the wreath product $C_p \wr \Z^d$ has a Schreier growth gap $n^d$, and $\Z \wr \Z^d$ has a Schreier growth gap $n^{d+1}$. These lower bounds are sharp. For solvable groups of finite Pr\"ufer rank, we establish a Schreier growth gap $\exp(n)$, provided $G$ is not virtually nilpotent. This covers all solvable groups that are linear over $\Q$. Finally for a vast class of torsion-free solvable groups, which includes solvable groups that are linear, we establish a Schreier growth gap $n^2$. \end{abstract} \section*{Introduction} Let $G = \langle S \rangle$ be a finitely generated group. We are interested in the growth of faithful actions of $G$. A set $X$ endowed with a $G$-action will be called a $G$-set. The Schreier graph of the action of $G$ on $X$ is the graph $\Gamma(G, X)$ with vertex set $X$, and edges $(x, sx)$ for all $x \in X$ and $s \in S$. The terminology Schreier graph is usually used for transitive actions, but in the present setting we do \textit{not} require that actions are transitive (see e.g.\ Remark \ref{rmq-finite-orbits}). The \textbf{growth of the action} of $G$ on $X$ is defined by \[\vol_{G, X}(n)=\max_{x\in X} |S^n \cdot x|.\] In other words, $\vol_{G, X}(n)$ is the maximal cardinality of a ball of radius $n$ in $\Gamma(G, X)$. Given functions $f, g\colon \N \to \N$, we write $f(n)\preccurlyeq g(n)$ if there is a constant $C>0$ such that $f(n)\leq Cg(Cn)$, and $f(n)\simeq g(n)$ if $f(n)\preccurlyeq g(n)$ and $g(n) \preccurlyeq f(n)$. The function $\vol_{G, X}(n)$ does not depend on the choice of $S$ up to $\simeq$. For the action of $G$ on itself by left translations, the function $\vol_{G,X}(n)$ is the classical growth of the group $G$, which we denote by $\vol_G(n)=|S^n|$. The latter is one of the most widely studied asymptotic invariants of groups, after the results of Milnor \cite{Mil-sol}, Wolf \cite{Wolf}, Gromov \cite{Gromov-poly}, Grigorchuk \cite{Gri-growth} and many other developments. By contrast, few general results are available on growth of actions. It is clear that for every $G$-set $X$ we have $\vol_{G, X}(n) \preccurlyeq \vol_G(n)$, but various groups admit faithful actions for which the function $\vol_{G, X}(n)$ is much smaller. Classical instances of groups having natural actions with $\vol_{G, X}(n)\simeq n$ are B.H. Neumann's examples of continuously many non-isomorphic finitely generated groups \cite{Neumann-manygroups}, the Grigorchuk group \cite{Bar-Gri-Hecke}, or the topological full group of a $\Z$-action on the Cantor space. Moreover actions of linear growth, and more generally the analysis of graphs of actions, played a crucial role in the recent developments on topological full groups and other related groups, see \cite{Ju-Mo, Nek-simple-dyn, Nek-frag} and the recent preprint \cite{BNZ}. Other examples of groups admitting faithful actions of linear growth are non-abelian free groups (an observation that can be attributed to Schreier \cite{Schreier}), and all right-angled Artin groups (and hence all their infinite subgroups) \cite{Salo}. Conversely, it is natural to ask whether there are obstructions to the existence of actions of small growth. We introduce the following definition: \begin{main-defin} Let $f\colon \N\to \R_+$. A finitely generated group $G$ has a {\textbf{Schreier growth gap} $f(n)$} if every faithful $G$-set $X$ satisfies $\vol_{G, X}(n)\succcurlyeq f(n)$. \end{main-defin} For every infinite group $G$ we always have $\vol_{G, X}(n)\succcurlyeq n$ for every faithful $G$-set $X$. Hence we are interested in Schreier growth gaps $f(n)$ where the function $f$ is super-linear, meaning that $f(n)/n$ is unbounded. The word \enquote{gap} implicitly refers to that situation. Every infinite group $G$ with Kazhdan property (T) has a Schreier growth gap $\exp(n)$. This is a standard consequence of the fact that for every $G$-set $X$ the unitary representation of $G$ on $\ell^2(X)$ has a spectral gap. This remark can be traced back to Kazhdan, and is pointed out by Gromov in \cite[Remark 0.5.F]{Gro-asdim} (see also \cite[Th. B]{Stuck-growth}, and \cite[\S 4]{Ju-dlS} for a statement that does not assume transitivity of actions, and also \cite[\S 7]{Cor-MathZ}). In a different direction, Schreier growth gaps were established in \cite{MB-graph-germs} and \cite{LB-MB-comm-lemm}, respectively in the setting of topological full groups of \'etale groupoids and of branch groups acting on rooted trees. Given the above manifestations of this phenomenon, we want to initiate a systematic study of Schreier growth gaps for finitely generated groups. The purpose of this article is to establish various Schreier growth gaps among finitely generated solvable groups. Beyond the fact that the role played by solvable groups in the classical theory of growth of groups suggests to consider this setting, it is natural here to consider \enquote{small} groups $G$, in view of the fact that if a group $G$ has a Schreier growth gap $f(n)$, then the same is true for any group having $G$ as a subgroup. \subsection*{The method: non-foldable subsets and confined subgroups} Before discussing our main results, we outline our approach to study Schreier growth gaps. It is based on the following notion of independent interest. \begin{main-defin} A subset $\mathcal{L}$ of a group $G$ is \textbf{non-foldable} if for every faithful $G$-set $X$, for every finite subset $\Sigma \subset \mathcal{L}$ there exists $x\in X$ such that the orbital map $g\mapsto gx$ is injective on $\Sigma$. \end{main-defin} Roughly speaking, a subset $\mathcal{L}$ is non-foldable if for every faithful $G$-set $X$, the graph $\Gamma(G, X)$ contains Lipschitz embedded copies of arbitrarily large finite subsets of $\mathcal{L}$. In terms of growth, this implies that $\vol_{G, X}(n)$ must be at least equal to the maximal cardinality of a ball of radius $n$ in $\mathcal{L}$, where $\mathcal{L}$ is equipped with the induced metric from $G$. Our method to establish Schreier growth gaps consists in exhibiting non-foldable subsets that are as large as possible. Non-foldable subsets provide information on the geometry of the graphs $\Gamma(G, X)$ beyond the notion of growth. For example they also provide lower bounds for their {asymptotic dimension}. As the growth, the asymptotic dimension of the graph $\Gamma(G, X)$ is an invariant of the action of $G$ on $X$, that is monotone when passing to a finitely generated subgroup. The non-foldable subsets that we exhibit are natural and explicit, so that our results also provide computable lower bounds for the asymptotic dimension of $\Gamma(G, X)$. The study of non-foldable subsets of a group $G$ is crucially related to the study of closed $G$-invariant subsets for the conjugation action of $G$ on the space $\sub(G)$ of subgroups of $G$. The latter is a compact space with the topology induced from the set $2^G$ of all subsets of $G$. Recall that a subgroup of $H$ of a group $G$ is \textbf{confined} if the closure of the $G$-orbit of $H$ in $\sub(G)$ does not contain the trivial subgroup $\{1\}$. Equivalently, $H$ is confined if there is a finite set $P$ of non-trivial elements of $G$ which intersects all conjugates of $H$. Confined subgroups are natural generalisations of uniformly recurrent subgroups (URSs) of Glasner and Weiss \cite{GW-urs}. Confined subgroups and URSs were studied and found applications recently in \cite{Kenn-urs,LBMB-sub-dyn,Elek-simple-alg,MB-Tsan, LB-lattices,MB-graph-germs,Fraczyk-urs,LB-MB-comm-lemm,LB-MB-confined-ht,CLB-commens,Bou-Houd}. A common point in \cite{LBMB-sub-dyn, MB-graph-germs, LB-MB-comm-lemm} is a complete classification, or a strong structural result, of confined subgroups and URSs for certain families of groups defined by an action by homeomorphisms. This global rigidity behaviour for confined subgroups and URSs leads to Schreier growth gaps \cite{MB-graph-germs, LB-MB-comm-lemm}. A major difference in the present article is that solvable groups may admit a large pool of confined subgroups and URSs. This was already illustrated by Glasner--Weiss \cite{GW-urs}. In our present setting it is essential to first guess what will be the relevant non-foldable subsets $\mathcal{L}$, which has the consequence of restricting the confined subgroups that need to be studied. We shall now state our results. For simplicity here we state the conclusions that we draw about Schreier growth gaps, and refer the reader to the core of the article for statements about non-foldable subsets. \subsection*{Polycyclic groups} At this point it is worth mentioning that if $G$ is an infinite virtually abelian group, then $G$ always admits a faithful $G$-set with linear growth (Proposition \ref{prop-virtually-abelian}), and thus does not satisfy any Schreier growth gap. Thus virtually abelian groups should be considered as trivial for the problem considered in this paper, and shall systematically be excluded. The following proposition treats the case of polycyclic groups. \begin{main-prop}[Proposition \ref{prop-n4-growth} and Corollary \ref{cor-poly-growth}] Let $G$ be a polycyclic group. \begin{enumerate}[label=\roman*)] \item \label{item-prop-noeth-exp} If $G$ is not virtually nilpotent, then $G$ has a Schreier growth gap $\exp(n)$. \item If $G$ is virtually nilpotent and not virtually abelian, then $G$ has a Schreier growth gap $n^4$. \end{enumerate} \end{main-prop} The proof of this result is elementary. Recall that polycyclic groups are the solvable groups that have all their subgroups finitely generated. This property notably implies that the space $\sub(G)$ of subgroups of $G$ is a countable compact space. Simple compactness considerations then allow to reduce the understanding of the growth of actions of nilpotent and polycyclic groups to the classical results on the growth of these groups. We refer to Section \ref{s-noetherian} for details. Here we shall point out that this approach has limited scope, and does not generalize to other solvable groups. \subsection*{Metabelian groups} The next step to consider is the case of metabelian groups. Even for these groups, the situation becomes much more diversified compared to the polycyclic case. One major difference is that there are groups of exponential growth which admit faithful actions of linear growth. The archetype of such an example is the lamplighter group $C_p\wr \Z$, where $C_p$ is the cyclic group of order $p$. (In fact the lamplighter group admits action of almost arbitrarily prescribed growth, see \S \ref{subsec-behavior}). Hence there is no Schreier growth gap that is uniform for all (non-virtually abelian) metabelian groups, unlike in the polycyclic case. Nevertheless we prove that such uniform gaps do hold in the following two situations: \begin{thmmain} \label{thm-intro-metab-quad} Let $G$ be a finitely generated metabelian group that is not virtually abelian. Suppose that $G$ satisfies at least one of the following: \begin{enumerate}[label=\roman*)] \item $G$ is finitely presented; \item $G$ is torsion-free. \end{enumerate} Then $G$ has a Schreier growth gap $n^2$. \end{thmmain} We note that the quadratic bound is optimal both in the finitely presented case and in the torsion-free case, as it is realized respectively by Baumslag's finitely presented metabelian groups (\S \ref{subsec-Baumslag}) and by the wreath product $\Z \wr \Z$ (see below). Beyond the classical lamplighter group, every wreath product $G=A\wr B$ of two finitely generated abelian groups admits faithful actions of polynomial growth. A natural example is the action of $G$ on the Cartesian product $X=B\times A$ called the \textbf{standard wreath product action} (whose definition is recalled in \S \ref{s-wreath-actions}). With natural choices of generators, the graph of this action is obtained by taking a copy of the Cayley graph of $A$ and attaching to each vertex a copy of the Cayley graph of $B$. When $A = B = \Z$ this graph is a comb (Figure \ref{fig-comb}). The growth of the standard action of $A\wr B$ is equivalent to the growth of the abelian group $B\times A$. So for the lamplighter group $G=C_p \wr \Z^d$, the standard wreath product action satisfies $\vol_{G, X}(n)\simeq n^d$, while for $G=\Z \wr \Z^d$ we have $\vol_{G, X}(n)\simeq n^{d+1}$. The next result states that for these groups the growth of the standard action of $G$ is the minimal possible growth of all faithful $G$-actions: \begin{main-thm}[Theorem \ref{thm-wreath}] For every $d \geq 1$, the following hold: \begin{enumerate}[label=\roman*)] \item The group $C_p\wr \Z^d$ has a Schreier growth gap $n^d$. \item The group $\Z \wr \Z^d$ has a Schreier growth gap $n^{d+1}$. \end{enumerate} \end{main-thm} This result is a simple illustration of a more general theorem about metabelian groups, in which we obtain a Schreier growth gap in terms of an algebraic invariant called the Krull dimension. Let $G$ be a finitely generated metabelian group. Recall that whenever $1\to M\to G\to Q\to 1$ is a short exact sequence such that $M$ and $Q$ are abelian, $M$ can be seen as a finitely generated module over the group ring $\Z Q$. This point of view plays a crucial role in the study of metabelian groups since the seminal work of Hall \cite{Hall}. The Krull dimension of the $\Z Q$-module $M$ does not depend on $(M,Q)$ provided that $G$ is not virtually abelian (see \S \ref{s-Krull}). This positive integer is called the \textbf{Krull dimension of $G$}, and was introduced by Cornulier. It implicitly appears in \cite{Cornulier-CBrank}. This notion was further studied by Jacoboni, who established estimates for the return probability for the random walk on a metabelian group $G$ in terms of the Krull dimension of $G$ \cite{Lison}. The following result relates the Krull dimension of a metabelian group to the possible growth of all its faithful actions: \begin{thmmain}\label{thm-metabelian-krull} Let $G$ be a finitely generated metabelian group which is not virtually abelian, and let $k = \dim_{\mathrm{Krull}}(G)$. Then $G$ has a Schreier growth gap $n^k$. \end{thmmain} We have $\dim_{\mathrm{Krull}}(C_p \wr \Z^d)=d$ and $\dim_{\mathrm{Krull}}(\Z \wr \Z^d)=d+1$, so the above statement for wreath products follows from Theorem \ref{thm-metabelian-krull}. As another example, for the free metabelian group $\mathbb{FM}_d$ on $d$ generators, we obtain a Schreier growth gap $n^{d+1}$. This estimate is sharp, in the sense that there is an action realizing the lower bound. The case of $\mathbb{FM}_d$ turns out to be very particular among free solvable groups, as for the free solvable group $\mathbb{FS}_{d, \ell}$ of rank $d$ and solvability length $\ell \ge 3$, we prove a Schreier growth gap $\exp(n)$ (Theorem \ref{thm-free-solvable}). The main tool in the proof of the above results is Theorem \ref{thm-explicit-metab} below, which can be described as our main result on metabelian groups. Given a finitely generated group in a short exact sequence $1\to M\to G\to Q\to 1$ where $M,Q$ are abelian, Theorem \ref{thm-explicit-metab} provides a criterion which guarantees that $G$ has a Schreier growth gap $n^d$, where the exponent $d$ depends on the $\Z Q$-module $M$. Beyond wreath products and free metabelian groups, this estimate is sharp for other interesting families of metabelian groups. We illustrate this in \S \ref{s-metabelian-first-examples} with Baumslag's finitely presented metabelian groups \cite{Baumslag-group}. In this case the lower bound provided by Theorem \ref{thm-explicit-metab} is quadratic. Here we mention that these groups have Krull dimension one, so that there are examples for which Theorem \ref{thm-explicit-metab} provides a better bound than Theorem \ref{thm-metabelian-krull}. \subsection*{Solvable groups} We now discuss solvable groups of higher length. A prominent class of finitely generated solvable groups is the class of groups of finite Pr\"ufer rank. Recall that a group $G$ has finite \textbf{Pr\"ufer rank} if there exists $k\in \N$ such that every finitely generated subgroup of $G$ is generated by at most $k$ elements (the rank of $G$ is then the least $k$ with this property). In the sequel we abbreviate finite Pr\"ufer rank by \textbf{finite rank}. The class of solvable groups of finite rank contains polycyclic groups, but it is much richer. Basic examples of groups of finite rank that are not polycyclic are the Baumslag-Solitar groups $\mathrm{BS}(1,n)$. Every finitely generated solvable group that is linear over $\Q$ has finite rank. Examples of groups that are not of finite rank are wreath products $A \wr B$ where $A$ is non-trivial and $B$ is infinite. The class of finitely generated solvable groups of finite rank admits several algebraic characterisations: it coincides with the minimax groups \cite[Ch.\ 5]{Lennox-Rob}, and by a theorem of P.\ Kropholler it also coincides with those solvable groups that do not admit a lamplighter group $C_p \wr \Z$ as a subquotient \cite{Kropholler84}. On a more geometric perspective, finite rank solvable groups have been studied by Pittet--Saloff-Coste in \cite{Pittet-Saloff-Coste} and Cornulier--Tessera in \cite{CT-Banach}. The following result asserts that these groups satisfy the strongest possible Schreier growth gap. \begin{thmmain}\label{thm-intro-prufer} Let $G$ be a finitely generated solvable group of finite rank, and assume that $G$ is not virtually nilpotent. Then $G$ has a Schreier growth gap $\exp(n)$. \end{thmmain} So this result extends the previously mentioned result for polycyclic groups to finite rank solvable groups. However we point out that the proof here is substantially more involved than in the polycyclic case. First we reduce the general case to the case of torsion-free finite rank solvable groups. Such a group is linear over $\Q$, and its nilpotent radical is subject to Malcev theory on torsion-free divisible nilpotent groups. We deduce Theorem \ref{thm-intro-prufer} from a general result on groups that are extensions $1\to N\to G\to Q\to 1$ where $N$ is a nilpotent group of finite rank and the action of $Q$ on $N^{ab}\otimes \Q$ satisfies a certain irreducibility condition (Theorem \ref{t-strongly-irreducible}), together with a result of independent interest which asserts that finite rank solvable groups always contain subgroups of a particular form, to which the previous result applies (Proposition \ref{prop-non-virt-nilp-subgroup}). \bigskip The last problem that we discuss is whether the quadratic Schreier growth gap for torsion-free metabelian groups from Theorem \ref{thm-intro-metab-quad} extends to torsion-free solvable groups. We conjecture that this is the case: \begin{conjecture} \label{conj-intro-torsion-free} Let $G$ be a finitely generated solvable group which is virtually torsion-free, and not virtually abelian. Then $G$ has a Schreier growth gap $n^2$. \end{conjecture} We prove that the conjecture is true in many situations, which cover all familiar classes of torsion-free solvable groups. The first observation to make is that Theorem \ref{thm-intro-metab-quad} immediately implies that if a torsion-free group $G$ contains a finitely generated metabelian subgroup which is not virtually abelian, then $G$ has a Schreier growth gap $n^2$. Classical arguments imply that this situation covers all groups which are virtually nilpotent-by-abelian. By a theorem of Malcev, this includes all solvable linear groups (i.e.\ isomorphic to a subgroup of $\GL(n, \corps)$, where $\corps$ is a field). Hence the conjecture is true for these groups: \begin{main-cor}[Corollary \ref{cor-linear-case-quadratic}] Let $G$ be a finitely generated solvable linear group which is virtually torsion-free. If $G$ is not virtually abelian, then $G$ has a Schreier growth gap $n^2$. \end{main-cor} The next result proves the conjecture under a reinforcement of the torsion-free assumption on $G$. Note that if a group $G$ admits a series $\{1\}=N_0\unlhd N_1\unlhd \cdots \unlhd N_k=G$ such that the successive quotients $N_{i+1}/N_i$ are torsion-free, then $G$ is torsion-free. We refer to \S \ref{subsec-fittingseries} for the definition of the Fitting series. \begin{main-thm}[Theorem \ref{thm-fitting-torsionfree}] Suppose that $G$ is a finitely generated solvable group such that the successive quotients in the Fitting series of $G$ are torsion-free, and $G$ is not virtually abelian. Then $G$ has a Schreier growth gap $n^2$. \end{main-thm} The proof of that result does not only rely on the case of metabelian groups. It involves a rather technical mechanism that allows in some cases to lift the desired conclusion from a quotient to the ambient group (see \S \ref{subsec-lift-exp} and \S \ref{subsec-fittingseries}). While the above results establish the conjecture under additional assumptions on $G$, it is also natural to add assumptions on the actions. The following result asserts that Conjecture \ref{conj-intro-torsion-free} is true if we restrict to transitive actions, or more generally to actions with finitely many orbits. \begin{main-thm}[Theorem \ref{t-torsion-free-quasitransitive}] Let $G$ be a finitely generated solvable group which is virtually torsion-free and not virtually abelian. Let $X$ be a faithful $G$-set such that the action of $G$ on $X$ has finitely many orbits. Then $\vol_{G, X}(n) \succcurlyeq n^2$. \end{main-thm} \subsection*{Applications: subgroups of topological full groups} We end this introduction by mentioning that Schreier growth gaps provide a natural quantitative obstruction to the existence of embeddings between groups. Explicitly, if a group $L$ is known to admit a faithful action of growth $g(n)$ and if a group $G$ has a Schreier growth gap $f(n)$ with $f(n) \npreccurlyeq g(n)$, then $G$ cannot embed into $L$ (see Proposition \ref{prop-monoton}). Interesting examples of groups $L$ that naturally come with an action of possibly small growth are topological full groups of group actions on the Cantor set. The subgroup structure of such groups is in general quite mysterious. Our results can be immediately applied to obtain restrictions on the solvable subgroups of various topological full groups. As an illustration, we state here one example of such an application. Recall that Matui showed that for every minimal $\Z$-action on the Cantor set which is not an odometer, the topological full group contains the lamplighter group $C_2\wr \Z$ \cite{Mat-exp}. By contrast, our result on Schreier growth gaps of wreath products has the following consequence: \begin{main-cor}[see \S \ref{subsubsection-full-gp}] Let $d \geq 1$ and let $A$ be a non-trivial finitely generated abelian group. If the wreath product $A \wr \Z^d$ embeds into the topological full group of a $\Z$-action on the Cantor set, then $A$ is finite and $d = 1$. \end{main-cor} The fact that the group $C_p \wr \Z^2$ does not embed into the topological full group of the full shift over $\Z$ was conjectured in \cite{Salo}. We refer to \S \ref{s-non-embeddings} for additional applications of our results in this setting (notably Corollary \ref{cor-nV-poly}). \subsection*{Guidelines} Sections \ref{s-finite-rank}--\ref{sec-metab}--\ref{sec-torsionfree} are the core of the article, and our main results stated in this introduction are proven there. \subsection*{Acknowledgements} We are grateful to A. Erschler, J. Frisch and T. Zheng for interesting conversations on Schreier graphs of the lamplighter group which inspired Proposition \ref{p-lamplighter-given-growth}. We also thank Y. Cornulier and P. de la Harpe for useful comments on a preliminary version of this work. \setcounter{tocdepth}{1} \tableofcontents \section{Preliminaries} \label{s-preliminaries} \subsection{Notation} \label{subsec-notation} Let $G$ be a group. We denote by $G^{(i)}$ the derived series of $G$, defined inductively by $G^{(1)} = G' = [G,G]$ and $G^{(i+1)} = [G^{(i)},G^{(i)}] $. The abelianization $G/G'$ is denoted $G^{ab}$. The lower central series of $G$ is denoted by $\gamma_i(G)$. So $\gamma_1(G)=G$ and $\gamma_{i+1}(G)=[\gamma_{i}(G),G]$ for all $i \geq 1$. The upper central series is denoted $Z_i(G)$, with the convention $Z_1(G)=Z(G)$ and $Z_{i+1}(G)/Z_i(G)=Z(G/Z_i(G))$ for all $i$. The \textbf{Fitting subgroup} is denoted $\Fit(G)$. It is the subgroup generated by all nilpotent normal subgroups of $G$. The \textbf{FC-center} $\FC(G)$ is the subgroup of $G$ consisting of elements with a finite conjugacy class. The set of elements of finite order in a group $G$ will be denoted $T(G)$. Recall that when $G$ is nilpotent, $T(G)$ is a subgroup of $G$, which is finite if $G$ is finitely generated. Recall that a set $X$ on which $G$ acts is called a $G$-set. All $G$-sets are supposed to be non-empty. We say that $X$ is faithful, or transitive, whenever the action of $G$ on $X$ has this property. For $f,g, \colon \N \to \N$, we write $f\preccurlyeq g$ if there exists a constant $C>0$ such that $f(n)\leq Cg(Cn)$ for sufficiently large $n$. If $f\preccurlyeq g$ and $g\preccurlyeq f$, we write $f\simeq g$. \begin{notation} Let $G$ be a group with an abelian normal subgroup $M$. Since $M$ acts trivially by conjugation on itself, the conjugation action of $G$ on $M$ factors through $Q = G/M$, so that $M$ naturally has the structure of a $\mathbb{Z} Q$-module. When adopting this point of view, we will use additive notation for $M$, and the notation $qm$ for $q \in Q$ and $m \in M$ will be used for the module operation of $q$ on $m$. \end{notation} \subsection{Graphs of actions} Let $G$ be a finitely generated group, and fix a finite symmetric generating subset $S$. If $X$ is a $G$-set, we denote by $\Gamma(G, X)$ the graph whose vertex set is $X$, and for every $x\in X$ and $s\in S$ there is an edge connecting $x$ to $sx$. The graph $\Gamma(G, X)$ is called the \textbf{Schreier graph of the action} of $G$ on $X$. Note that $\Gamma(G, X)$ is not connected in general: its connected components are the $G$-orbits in $X$. We intentionally omit $S$ in the notation, as we will only be interested in properties of these graphs that do not depend on the choice of $S$. We consider the simplicial distance $d$ on $\Gamma(G, X)$, where $d(x, y)$ is defined as the length of the shortest path from $x$ to $y$ (ignoring orientation of edges), with the convention that $d(x, y)=+\infty$ if $x, y$ lie in different connected components. Distinct generating subsets yield bi-Lipschitz equivalent metrics. \begin{remark} \label{rmq-finite-orbits} An extreme case that is covered by our setting is the case where all $G$-orbits in $X$ are finite, so that $\Gamma(G, X)$ is the disjoint union of finite graphs. This corresponds to $G$-actions that are given by a family of finite index subgroups $(G_i)$ of $G$, where $G$ acts on the union of coset spaces $G/G_i$. Note that this action is faithful if and only if there is no non-trivial normal subgroup of $G$ contained in $\bigcap_i G_i$. \end{remark} \subsection{Growth of actions} \label{subsec-growth} Let $(X, d)$ be a metric space (where we allow the distance $d$ to take the value $+\infty$). We denote by $B_x(n)$ the ball of radius $n$ around a point $x\in X$. We say that $(X, d)$ is \textbf{uniformly locally finite} if for every $n$ we have $\sup_{x\in X} |B_x(n)|<\infty$. For example, any graph of bounded degree is uniformly locally finite. If $(X, d)$ is uniformly locally finite, we denote $\vol_X(n)=\sup_{x\in X} |B_x(n)|$. This function is invariant under the equivalence relation $\simeq$ if one replaces $d$ by a quasi-isometric metric. In the sequel $G$ is a finitely generated group, and $S$ a finite symmetric generating set. Let $d_S$ be the associated right-invariant word metric on $G$. Recall that the growth of $G$ is denoted $\vol_G(n) = |S^n|$. \begin{notation} For a subset $\mathcal{L}$ of $G$, we write $\relvol_{(G, \mathcal{L})}:=\vol_{(\mathcal{L}, d_S|_{\mathcal{L}})}$. \end{notation} \begin{remark} If $\mathcal{L} = H$ is a subgroup of $G$, then for every $n$ all balls $B_h(n)$ are isometric since $H$ acts transitively on itself by right translations. Hence in this case $\relvol_{(G,H)}(n) = |H \cap B_e(n)|$ is the classical relative growth of $H$ in $G$. \end{remark} \begin{defin} The \textbf{growth of the action} of $G$ on a $G$-set $X$ is $\vol_{G, X}(n)= \vol_{(\Gamma(G, X), d)}(n)$. \end{defin} Equivalently, \[\vol_{G, X}(n)=\max_{x\in X} |S^n \cdot x|.\] Since distinct generating subsets of $G$ yield bi-Lipschitz equivalent metrics on $\Gamma(G, X)$, $\vol_{G, X}(n)$ does not depend on $S$ up to the equivalence relation $\simeq$. The following proposition establishes basic properties of growth of actions. The proof is straightforward, and we omit it. \begin{prop}[Monotonicity] \label{prop-monoton} Let $G$ be a finitely generated group, and $X, Y$ $G$-sets. \begin{enumerate}[label=\roman*)] \item If $H$ is a finitely generated subgroup of $G$, then $\vol_{H, X}(n)\preccurlyeq\vol_{G, X}(n)$. \item If there is a surjective $G$-equivariant map $X \to Y$, then $\vol_{G, X}(n) \succcurlyeq \vol_{G, Y}(n)$. \item If there is a $G$-equivariant map $X\to Y$ whose fibers have uniformly bounded cardinality, then $\vol_{G, X}(n)\preccurlyeq\vol_{G, Y}(n)$. In particular if there is an injective $G$-equivariant map $X\to Y$, then $\vol_{G, X}(n)\preccurlyeq\vol_{G, Y}(n)$. \end{enumerate} \end{prop} The next lemma relates the growth of actions of a group and of its finite index subgroups. Recall that when $H$ is a subgroup of $G$, then to every $H$-set $X$ one can associate a $G$-set, called the \textbf{induced $G$-set}. $\operatorname{Ind}_H^G(X)$ is defined by $\operatorname{Ind}_H^G(X):=(G\times X)/H$, where the quotient is taken with respect to the diagonal action $h\cdot (g, x)=(gh^{-1}, hx)$. The action of $G$ on $G\times X$ given by $g_1\cdot (g_2, x)=(g_1g_2, x)$ descends to an action on $\operatorname{Ind}_H^G(X)$. \begin{prop} \label{prop-finite-index} Let $G$ be a group and $H$ be a subgroup of finite index in $G$. Then for every $G$-set $X$ we have $\vol_{G, X}(n) \simeq \vol_{H, X}(n)$. Conversely for every $H$-set $X$, the induced $G$-set $Y:=\operatorname{Ind}_H^G(X)$ satisfies $\vol_{G, Y}(n)\simeq \vol_{H, X}(n)$. \end{prop} \begin{proof} The first claim is straightforward, and we only justify the second claim. Let $N$ be the intersections of all conjugates of $H$, which is normal and of finite index in $G$. Set set $Y:=\operatorname{Ind}_H^G(X)$ and consider the natural $G$-equivariant projection $p\colon Y\to G/H$. Since $N$ acts trivially on $G/H$, it preserves each fiber of the map $p$. The fiber $p^{-1}(H)$ is naturally a copy of $X$, and the action of $N$ on it coincides with the action obtained by restricting the $H$-action, so that by the first claim we have $\vol_{N, p^{-1}(H)}(n)\simeq \vol_{H, X}(n)$. If $gH$ is another $H$-coset, the map $x\mapsto gx$ induces an identification of $p^{-1}(H)$ to $p^{-1}(gH)$ which is $N$-equivariant up to the automorphism of $N$ induced by conjugation by $G$, so that we also have $\vol_{N, p^{-1}(gH)}(n) \simeq \vol_{H, X}(n)$. Since $Y$ is the disjoint union of the finitely many fibers of the map $p$, we obtain (using the first claim again) that $\vol_{G, Y}(n)\simeq \vol_{N, Y}(n)\simeq \vol_{H, X}(n)$. \end{proof} \subsection{Confined of subgroups} We denote by $\sub(G)$ the space of subgroups of $G$, endowed with the topology inherited from $\left\lbrace 0,1\right\rbrace ^G$. The following simple lemma shows that the growth of actions is well-behaved with respect to the topology on $\sub(G)$. \begin{lem} \label{lem-growth-closure} Let $X$ be a $G$-set, and let \[ \mathcal{S}(X) = \overline{ \left\lbrace G_x \, : \, x \in X \right\rbrace } \subseteq \sub(G). \] Then for every $H \in \mathcal{S}(X)$, we have $\vol_{G, G/H}(n) \preccurlyeq \vol_{G, X}(n)$. \end{lem} \begin{proof} Let $H\in \mathcal{S}(X)$ and let $(x_k)$ a sequence of points such that $(G_{x_k})$ converges to $H$ in $\sub(G)$. For every coset $gH\in G/H$ and every $n$, there exists $k_0$ such that for every $k\geq k_0$ the ball of radius $n$ centered at $gH$ in the graph $\Gamma(G, G/H)$ is isomorphic to the ball of radius $n$ centered at $x_k$ in $\Gamma(G, X)$. Thus its cardinality does not exceed $\vol_{G, X}(n)$, showing that $\vol_{G, G/H}(n)\preccurlyeq\vol_{G, X}(n)$. \qedhere \end{proof} As a consequence of Lemma \ref{lem-growth-closure}, if the trivial subgroup $\{1\}$ belongs to the closure of the stabilisers $\{G_x, x\in X\}$, then $\vol_{G}(n)\preccurlyeq\vol_{G, X}(n)$ and thus $\vol_{G, X}(n)\simeq \vol_{G}(n)$. Recall that a subgroup $H$ of $G$ is \textbf{confined} if the closure of the $G$-conjugacy class of $H$ in $\sub(G)$ does not contain the trivial subgroup $\left\lbrace 1\right\rbrace$. Explicitly, this means that there exists a finite subset $P$ of $G$ consisting of non-trivial elements such that $g Hg^{-1} \cap P \neq \emptyset$ for all $g \in G$. We will consider more generally the case where we only take into account the conjugation action of a given subgroup $L$ of $G$: \begin{defin} \label{def-k-conf-subset} Let $G$ be a group, $L$ a subgroup of $G$. A subgroup $H$ of $G$ is \textbf{confined by $L$} if the closure of the $L$-conjugacy class of $H$ in $\sub(G)$ does not contain the trivial subgroup. Equivalently, if there exists a finite subset $P \subset G \setminus \left\lbrace 1\right\rbrace $ such that $g Hg^{-1} \cap P \neq \emptyset$ for all $g \in L$. We say that such a subset $P$ \textbf{is confining for $(H,L)$}. \end{defin} \begin{notation} We denote by $S_G(P,L) \subset \sub(G)$ the set of subgroups $H$ of $G$ such that $P$ is confining for $(H,L)$. \end{notation} \subsection{Non-foldable subsets} \label{subsec-non-foldable} We now introduce the notion of non-foldable subset of a group $G$. The motivation is that non-foldable subsets provide geometric information on the graphs of actions of $G$. In particular they provide lower bounds for certain asymptotic invariants of these graphs: see Lemma \ref{lem-exp-subset-growth} and Proposition \ref{p-asdim-non-foldable}. \begin{defin} \label{d-non-foldable} Let $G$ be a group and $\mathcal{L}$ a subset of $G$. \begin{enumerate}[label=\roman*)] \item Let $X$ be a $G$-set. We say that $\mathcal{L}$ is \textbf{non-folded in $X$} if for every finite subset $\Sigma$ of $\mathcal{L} $, there exists $x \in X$ such that the map $\Sigma \to X$, $g \mapsto g x$, is injective. \item We say that $\mathcal{L}$ is \textbf{non-foldable} if $\mathcal{L} $ is non-folded in $X$ for every faithful $G$-set $X$. \end{enumerate} \end{defin} The following lemma reformulates the condition that a given subset of $G$ is non-foldable in terms of confined of subgroups. \begin{lem}[Non-foldable subsets and confined subgroups] \label{l-non-foldable-confined} Let $G$ be a group, and $\mathcal{L}$ be a subset of $G$. The following are equivalent: \begin{enumerate}[label=(\roman*)] \item \label{i-non-foldable} $\mathcal{L}$ is a non-foldable subset of $G$. \item \label{i-conf} for every finite subset $\Sigma \subset \mathcal{L} $, we have \[\bigcap_{H\in S_G(P, G)} \! \! H\neq \{1\}, \] where $P=\{g^{-1}h\colon g, h\in \Sigma, g\neq h\}$. \end{enumerate} \end{lem} \begin{proof} Assume that \ref{i-conf} holds, and let $X$ be a faithful $G$-set. Assume by contradiction that \ref{i-non-foldable} does not hold. Let $\Sigma\subset \mathcal{L}$ be a subset such that for every $x\in X$, the map $g\mapsto gx$ is not injective on $\Sigma$, and let $P$ be the corresponding set of differences as in the statement. Then $G_x\cap P\neq \varnothing$ for every $x\in X$, and thus $G_x\in S_G(P, G)$ for every $x$. Hence $\cap_x G_x$ contains $\bigcap_{S_G(P, G)} \! \! H$, and is therefore non-trivial. This contradicts the fact that $X$ is faithful. Assume now that \ref{i-non-foldable} holds. Fix a finite subset $\Sigma\subset \mathcal{L}$ and consider the $G$-set $X:=\sqcup_{H\in S_G(P, G)} G/H$, for the subset $P$ associated to $\Sigma$. By construction every point of $X$ is fixed by an element of $P$, so that no orbital map can be injective in restriction to $\Sigma$. If the action of $G$ on $X$ was faithful then by the assumption \ref{i-non-foldable} we would have a contradiction. So $X$ cannot be faithful, and since the kernel is precisely $\bigcap_{S_G(P, G)} H$, condition \ref{i-conf} holds. \qedhere \end{proof} \begin{lem} \label{lem-exp-subset-growth} Let $G$ be a group, and $X$ a $G$-set. If $\mathcal{L} $ is non-folded in $X$, then $\vol_{G,X} (n) \succcurlyeq\relvol_{(G, \mathcal{L})}(n)$. \end{lem} \begin{proof} Let $S$ be a symmetric generating subset of $G$. For $n \geq 1$, let $g_n$ be an element of $G$ such that $|B_{g_n}(n) \cap \mathcal{L}| = \relvol_{(G, \mathcal{L}) }(n)$. We apply the defining condition of $\mathcal{L} $ being non-folded in $X$ to the finite subset $\Sigma =B_{g_n}(n) \cap \mathcal{L}$. We obtain $x_n \in X$ such that $\Sigma \to X$, $g \mapsto g x_n$, is injective. If we write $y_n = g_n x_n$, this implies that $|S^n y_n | \geq |\Sigma|$. But by definition we have $\vol_{G,X}(n) \geq |S^n y_n|$, so $\vol_{G,X}(n) \geq |\Sigma| = \relvol_{(G, \mathcal{L}) }(n)$. \end{proof} \begin{example} An infinite cyclic subgroup $C$ is always a non-foldable subset of $G$. In particular for a finitely generated group $G$, the existence of a distorted cyclic subgroup $C$ provides a non-trivial Schreier growth gap $\relvol_{(G,C)}(n)$. \end{example} \subsection{Non-foldable $k$-tuples} In Section \ref{sec-metab} we will exhibit non-foldable subsets of a particular form in metabelian groups. To this end, we introduce the following convenient terminology. \begin{defin} Let $G$ be a group, $k \geq 1$ and $(g_1,\ldots,g_k) \in G^k$. \begin{enumerate}[label=\roman*)] \item Let $X$ be a $G$-set. We say that $(g_1,\ldots,g_k)$ is \textbf{non-folded in $X$} if for all $n \geq 1$, there exists $x_n \in X$ such that \[ \left[-n,n \right]^k \to X, \, \, (n_1, \ldots, n_k ) \mapsto g_k^{n_k} \ldots g_1^{n_1} (x_n),\] is injective. \item We say that $(g_1,\ldots,g_k)$ is \textbf{non-foldable} if $(g_1,\ldots,g_k)$ is non-folded in $X$ for every faithful $G$-set $X$. \end{enumerate} \end{defin} The following is a reformulation of the definition: \begin{lem} \label{lem-mapZk-inj} Let $X$ be a $G$-set. A $k$-tuple $(g_1,\ldots,g_k)$ is non-folded in $X$ if and only if the following two conditions hold: \begin{itemize} \item the map $\varphi : \mathbb{Z}^k \to G, \, \, (n_1, \ldots, n_k ) \mapsto g_k^{n_k} \ldots g_1^{n_1}$, is injective; \item the image of $\varphi$ is non-folded in $X$. \end{itemize} In particular if $(g_1,\ldots,g_k)$ is non-folded in $X$ then $g_1,\ldots,g_k$ all have infinite order. \end{lem} In particular in this setting Lemma \ref{lem-exp-subset-growth} reads as follows: \begin{lem} \label{lem-exp-tuple-growth} Let $G$ be a group, and $X$ a $G$-set. If a $k$-tuple $(g_1,\ldots,g_k)$ is non-folded in $X$ then $\vol_{G,X}(n) \succcurlyeq n^k$. \end{lem} \begin{proof} Consider the map $\varphi : \mathbb{Z}^k \to G, \, \, (n_1, \ldots, n_k ) \mapsto g_k^{n_k} \ldots g_1^{n_1}$, and denote by $\mathcal{L}$ its image. Since $\varphi$ is injective, we have $\relvol_{(G, \mathcal{L}) }(n) \succcurlyeq n^k$. And since $\mathcal{L}$ is non-folded in $X$ the statement follows from Lemma \ref{lem-exp-subset-growth}. \end{proof} \subsection{Asymptotic dimension of graphs of actions} Another geometric invariant associated to graphs of actions of a finitely generated group is the asymptotic dimension, introduced by Gromov \cite{Gro-asdim}. Let us recall its definition and some basic properties, in analogy with the previous discussion on growth from \S \ref{subsec-growth} and \S \ref{subsec-non-foldable}. For more information we refer to the survey \cite{BD-asdim}. \begin{defin} Let $(X, d)$ be a metric space, and $n \geq 0$. We say that space $(X, d)$ has asymptotic dimension at most $n$ if for every $R>0$ there exists a cover $\mathcal{U}$ of $X$ consisting of subsets of uniformly bounded diameter such that every $R$-ball in $X$ intersects at most $n+1$ sets in $\mathcal{U}$. The asymptotic dimension $\asdim(X, d)$ of $X$ is the smallest $n\in \N\cup \{ \infty \}$ such that $(X, d)$ has asymptotic dimension at most $n$. \end{defin} The following lemma follows easily from the definition of asymptotic dimension, see the argument in \cite[\S 6]{BTS-asdim} (or the proof of \cite[Proposition 2.5]{MB-graph-germs} for more details). \begin{lem} \label{l-asdim-monotone} Let $(\Gamma, d_\Gamma)$ and $(\Delta, d_\Delta)$ be uniformly locally finite metric spaces. Assume that there exist $C, D>0$ such that for every $R>0$ and $x\in \Gamma$ there exists a $C$-Lipschitz map $f\colon B(x, R)\to \Delta$ such that $|f^{-1}(y)|\le D$ for every $y\in \Delta$. Then $\asdim(\Gamma)\le \asdim(\Delta)$. \end{lem} \begin{defin} Let $G$ be a finitely generated group, and $X$ a $G$-set. The asymptotic dimension of the $G$-set $X$ is $\asdim(G, X)=\asdim(\Gamma(G, X))$. \end{defin} Note that $\asdim(G, X)$ does not depend on the generating subset $S$ used to define the graph $\Gamma(G, X)$, since asymptotic dimension is a quasi-isometry invariant \cite[Prop. 22]{BD-asdim}. The following lemma is a straightforward consequence of Lemma \ref{l-asdim-monotone}. \begin{lem} Let $G$ be a finitely generated group and $X$ be a $G$-set. Then for every finitely generated subgroup $H$ of $G$ we have $\asdim(H, X)\le \asdim(G, X)$. \end{lem} The following lemma is proven analogously to Proposition \ref{prop-finite-index}, using Lemma \ref{l-asdim-monotone} and the invariance of asymptotic dimension up to quasi-isometry. \begin{lem} Let $G$ be a finitely generated group and $H$ be a subgroup of finite index of $G$. Then for every faithful $G$-set $X$ we have $\asdim(G, X)=\asdim(H, X)$. Conversely for every $H$-set $X$, the induced $G$-set $Y:=\operatorname{Ind}^G_H(X)$ satisfies $\asdim(G, Y)=\asdim(H, X)$. \end{lem} Lemma \ref{l-asdim-monotone} has the following immediate consequence concerning non-foldable subsets. \begin{prop}\label{p-asdim-non-foldable} Let $G$ be a finitely generated group, and $\mathcal{L}$ a non-foldable subset of $G$. Then for every faithful $G$-set $X$ we have $\asdim(G, X)\ge \asdim(\mathcal{L})$, where $\mathcal{L}$ is seen as a metric space endowed with the restriction to $\mathcal{L}$ of a word metric on $G$. \end{prop} \section{Motivating and limiting examples} In this section we give some examples of group actions which add context to the main results of the article. \subsection{Some actions of small growth} \label{s-small-growth} The point of this paragraph is to recall some examples of finitely generated solvable groups of exponential growth that admit natural actions with small growth, such as the wreath products $C_p \wr \Z^d$ or $\Z\wr \Z^d$, and Baumslag metabelian groups. \subsubsection{Wreath product actions} \label{s-wreath-actions} Consider a wreath product $G = A \wr B$ of two groups $A,B$. Recall that $G$ is the semi-direct product $\oplus_B A \rtimes B$, where $\oplus_B A$ is the set of finitely supported functions $B \to A$, and $B$ acts on $\oplus_B A$ by $b \cdot \varphi : b' \mapsto \varphi(b^{-1}b')$. The group $G$ naturally comes with an action on the set $B \times A$, called the \textbf{standard wreath product action}, defined by $(\varphi,b) \cdot (b_0, a_0) = (bb_0, \varphi(bb_0)a_0)$. In the sequel we denote by $X_\mathrm{st}$ the $G$-set $X_\mathrm{st} = B \times A$. If $S_1,S_2$ are finite generating subsets of $A,B$ respectively, then $S = S_1 \cup S_2$ is a generating subset of $G$. Here and throughout the paper, we implicitly view $S_1$ and $S_2$ a subsets of $G$ by identifying $A$ and $B$ with their natural copies inside $G$, where the copy of $A$ consists of elements in $\oplus_B A$ that are trivial everywhere except at the identity position. The edges in the graph $\Gamma(G,X_\mathrm{st})$ are defined by saying that there is an edge between $(1_B,a)$ and $(1_B,s_1a)$ for every $a \in A$ and $s_1 \in S_1$, and between $(b,a)$ and $(s_2b,a)$ for every $b \in B,a \in A$ and $s_2 \in S_2$. Equivalently, the graph $\Gamma(G,X_\mathrm{st})$ is obtained by taking a copy of the Cayley graph of $A$ and attaching to each vertex a copy of the Cayley graph of $B$. For instance when $A$ is finite, $\Gamma(G,X_\mathrm{st})$ is just the union of $k=|A|$ copies of the Cayley graph of $B$ that are joined at the identity position, and $\vol_{G, X_\mathrm{st}}(n) \simeq \vol_{B}(n)$. So for $B = \Z$, we obtain a union of $k$ bi-infinite lines. When $A = B = \Z$ with standard generating subsets, the graph $\Gamma(\Z \wr \Z,X_\mathrm{st})$ is a comb, see Figure \ref{fig-comb}. In that case we have $\vol_{G, X_\mathrm{st}}(n) \simeq n^2$. In general the growth of the $G$-action on $X_\mathrm{st}$ is given by the following: \begin{figure}[ht] \includegraphics[scale=.5]{peigneZwrZlabelled} \caption{\small The graph of the standard action of $\Z\wr \Z$ on $\Z\times \Z$. Red arrows correspond to the generator $a$ of the lamp group, while blue arrows correspond to the generator $b$ of the base group. }\label{fig-comb} \end{figure} \begin{lem} We have $\vol_{G, X_\mathrm{st}}(n) \simeq \vol_{B}(n) \times \vol_{A}(n)$. \end{lem} \begin{proof} By the above description of $\Gamma(G,X_\mathrm{st})$, the identity map $\Gamma(G,X_\mathrm{st}) \to B \times A$ is a Lipschitz map, where the target space $B \times A$ is endowed with the $\ell^1$-metric. This implies $\vol_{G, X_\mathrm{st}}(n) \preccurlyeq \vol_{B}(n) \times \vol_{A}(n)$. Conversely, every element $(b,a)$ such that $|a|_{S_1}, |b|_{S_2} \leq n$ is at distance at most $2n$ from $(1_B,1_A)$ in $\Gamma(G,X_\mathrm{st})$, so the ball of radius $2n$ around $(1_B,1_A)$ in $\Gamma(G,X_\mathrm{st})$ contains at least $\vol_{B}(n) \times \vol_{A}(n)$ elements. The statement follows. \end{proof} \subsubsection{Baumslag's finitely presented examples} \label{subsec-Baumslag} Consider the group given by the finite presentation \[\Lambda_p = \left\langle u,t,s \, | \, u^p = 1, \, [s,t] = 1, \, [u^{t},u] = 1, \, u^{s} = u^{t} u \right\rangle, \] where $p$ is a prime number. These groups (actually their torsion-free counterpart) were introduced in \cite{Baumslag-group}. Baumslag showed that $\Lambda_p$ is isomorphic to the semi-direct product $\mathbb{F}_p[T, T^{-1},(T+1)^{-1}] \rtimes \Z^2$, where the generators $t,s$ of $\Z^2$ act on the ring $R:=\mathbb{F}_p[T, T^{-1},(T+1)^{-1}]$ by multiplication by $T$ and $T+1$. We claim that $\Lambda_p$ admits a faithful transitive action on a set $X$ such that $\vol_{\Lambda_p, X}(n)\simeq n^2$. To see this, observe that every element of $R$ can be uniquely written as \[ \sum_{n \in \Z} a_n T^n + \sum_{\ell \geq 0, q > 0} b_{\ell,q} T^{-\ell} (1+T)^{-q},\] where $a_n, b_{\ell,q} \in \mathbb{F}_p$, and only finitely many of them are non-zero. Consider the subgroup $H$ of $R$ defined by the condition $a_0 = 0$. An easy computation shows that the intersection of all conjugates of $H$ in $\Lambda_p$ is trivial. Equivalently, the action of $\Lambda_p$ on $X = \Lambda_p/H$ is faithful. The subgroup $H$ has finite index equal to $p$ in $R$. We have a $G$-equivariant surjective map $ G/H\to G/R$, whose fibers have cardinality $p$. Thus Proposition \ref{prop-monoton} implies that $\vol_{\Lambda_p, X}(n) \simeq \vol_{\Lambda_p/R}(n)= n^2$. The Baumslag group admits the following generalisation. For $d\ge 1$ let \[ \Lambda_{p, d}= \mathbb{F}_p[T_1,\ldots, T_d, T_1^{-1}, \ldots, T_d^{-1}, (1+T_1)^{1},\ldots (1+T_d)^{-1}] \rtimes \Z^{2d}.\] The action is defined by saying that if $t_1,\ldots, t_d, s_1,\ldots, s_d$ are generators of $\Z^{2d}$, then $t_i$ and $s_i$ act respectively by multiplication by $T_i$ and $T_i+1$. The (torsion-free analogues) of these groups were introduced by Erschler \cite{Ersch-Liouville} as generalisation of Baumslag's groups, to give examples of finitely presented groups which are amenable but not Liouville (for $d\ge 3$). They admit an analogous finite presentation \cite[Lemma 5.1]{Ersch-Liouville}. The same argument as above shows that each group $\Lambda_{p, d}$ admits a faithful action with $\vol_{\Lambda_{p, d}, X}(n)\simeq n^{2d}$. \subsubsection{A general criterion} The following result, which is based on a result of Olshanskii \cite{Olshanskii-KK}, shows that the existence of actions of polynomial growth of the wreath products $C_p \wr \Z^d$ and the above groups $\Lambda_{p, d}$ are not isolated phenomena. \begin{prop} \label{prop-growth-ol} Let $G$ be a finitely generated metabelian group such that $G'$ is a torsion group and $G/G'$ has torsion-free rank $d$. Then $G$ admits a faithful transitive action on a set $X$ with $\vol_{G, X}(n) \simeq n^d$. \end{prop} \begin{proof} By \cite[\S 3]{Olshanskii-KK}, if $G$ is a metabelian group such that $G'$ is a torsion group, there always exists a finite index subgroup $H$ of $G'$ such that the intersection of all $G$-conjugates of $H$ is trivial, so that the action of $G$ on $G/H$ is faithful. Since $H\le G'$ we have a $G$-equivariant surjective map $ G/H\to G/G'$, whose fibers have cardinality equal to the index $[G': H]$. Proposition \ref{prop-monoton} then implies $\vol_{G, G/H}(n)\simeq \vol_{G, G/G'}(n)\simeq n^d$. \qedhere \end{proof} \subsection{Actions of lamplighter groups with prescribed growth} \label{subsec-behavior} It is natural to wonder if, as in the case of the growth of the group, the growth of an action of a solvable group is always polynomial of integer degree or exponential. It turns out that this is far from being the case. In fact, even the growth of an action of the lamplighter group $G=C_2 \wr \Z$ can be arranged to be equivalent to an arbitrary function satisfying mild conditions: \begin{prop}\label{p-lamplighter-given-growth} Let $G=C_2 \wr \Z$. Let $f\colon \N \to \R_+$ be a non-decreasing function such that $f(n+1)/f(n)$ is non-increasing. Then there exists a faithful and transitive $G$-set $X$ such that $\vol_{G, X}(n)\simeq n f(n)$. In particular: \begin{itemize} \item for every real number $\alpha \ge 1$, there exists a faithful $G$-set $X$ with $\vol_{G, X}(n)\simeq n^\alpha$; \item for every $0<\beta \le 1$, there exists a faithful $G$-set $X$ with $\vol_{G, X}(n) \simeq \exp(n^\beta)$. \end{itemize} \end{prop} \begin{proof} Let us write $f(n)= 4^{g(n)}$, where $g\colon \N\to \R$ is non-decreasing and $g(n+1)-g(n)$ is non-increasing. If $g(n+1)-g(n)$ is bounded away from $0$, then $f(n)\simeq \exp(n)$, so that we can choose $X=G$ acting on itself. Thus in the following we suppose that $g(n+1)-g(n)$ tends to $0$. Upon replacing $g(n)$ by $g(n_0+n)$ for some $n_0>0$ we can suppose that $g(n+1)-g(n)\le 1$ for every $n$ (this does not affect $f(n)$ up to $\simeq$ since $f(n)\le 4^{g(n+n_0)} \le 4^{n_0(g(1)-g(0))} f(n)$). Similarly if $g(n)$ is bounded then we can choose $X=C_2\times \Z$ (the standard wreath product action; see \S \ref{s-wreath-actions}). So we assume that $g(n)$ tends to $+\infty$. For $k\in \N$ let $x_k=\min\{n\colon g(n)\ge k\}$ and let $\Omega=\{x_k, k\in \N\} \cup \{-x_k, k\in \N\}$. Let $H=\{r\in \oplus_\Z C_2 \colon r|_\Omega=0\}$ and set $X=G/H$. Note that $X$ is a faithful $G$-set. Indeed it is easy to see that every non-trivial element $r\in \oplus_\Z C_2$ has a conjugate outside of $H$. In what follows we write elements of $G$ as pairs $(r,m)$ with $r\in \oplus_\Z C_2$ and $m\in \Z$, and let $S=\{s, t^{\pm 1}\}$ be the standard generating set given by $s=(\delta_0, 0)$ and $t=(0, 1)$. The set $X$ can be naturally identified with $\Z \times (\oplus_{\Omega} C_2)$, by mapping each coset $(r, m)H$ to $(m, \sigma_mr|_{\Omega})$, where $\sigma_mr$ denotes the shifted configuration $\sigma_mr(x):=r(x-m)$. Under this identification, the generator $t$ acts by moving $(m, r)$ to $(m+1, r)$, and the generator $s$ acts on $(m, r)$ by flipping the value of $r(-m)$ if $-m\in \Omega$, while $s$ fixes $(m, r)$ if $-m\notin \Omega$. From this description it follows that any product of at most $n$ generators must move $(m, r)$ to some point $(m', r')$, where $m'\in [m-n, m+n]$ and $r$ and $r'$ coincide outside $[m-n, m+n]\cap \Omega$. On the other hand, by applying a product of at most $2n$ generators to $(m, r)$ it is possible to reach any $(m', r')$ satisfying the same constraints. This implies that the ball $B_n$ of radius $n$ around $(m, r)$ in the Schreier graph of the action satisfies \begin{equation}\label{e-lamplighter-actions} |B_n|\le (2n+1)2^{|[m-n, m+n]\cap \Omega|}\le |B_{2n}|.\end{equation} Let us analyse the cardinality $|[m-n, m+n]\cap \Omega|$. First we note that the assumption that $g(n+1)-g(n)$ is non-increasing implies that $x_{k+1}-x_k$ is non-decreasing, so that when $n$ is fixed, $|[m-n, m+n]\cap \Omega|$ is maximized for $m=0$. On the other hand the assumption that $g(n+1)-g(n)\le 1$ implies that the map $k\mapsto x_k$ is injective. As a consequence, we have $|[0, n]\cap \Omega|=\max\{k \colon x_k\le n\}$, which is equal to $g(n)$ up to an additive error bounded by 1. We deduce that $\vol_{G, X}(n)= (2n+1)2^{|[-n, n]\cap \Omega|}\simeq n2^{2g(n)}= n f(n)$. \qedhere \end{proof} \begin{remark} For the $d$-dimensional lamplighter $G=C_2\wr \Z^d$, the same argument (with minor modifications) can be applied to show that for every non-decreasing function $f\colon \N\to \R_{+}$ such that $f(n+1)/f(n)$ is non-decreasing, there exists a faithful $G$-set $X$ such that $\vol_{G, X}(n)\simeq n^d f(n)$. Note that the smallest growth obtained through this construction is $n^d$, which is equal to the growth of the standard wreath product action of $C_2 \wr \Z^d$. \end{remark} \section{Polycyclic groups} \label{s-noetherian} The goal of this section is to explain how the understanding of the growth of actions of nilpotent and polycyclic groups reduces to the classical results on growth of these groups due to Wolf \cite{Wolf}, Bass \cite{Bass-nilp} and Guivarc'h \cite{Guivarch-nilp}. The approach is elementary. It is based on compactness arguments in the space $\sub(G)$, and holds more generally for Noetherian groups. Recall that a group is \textbf{Noetherian} if all its subgroups are finitely generated. A solvable group is Noetherian if and only if it is polycyclic. First we isolate the following remark for future reference: \begin{lem} \label{l-product-actions} Let $G$ be a finitely generated group which is a direct product $G=Q_1\times \cdots \times Q_k$. For each $i$, let $X_i$ be a faithful $Q_i$-set. Then $X=\sqcup_i X_i$ is a faithful $G$-set, where $G$ acts on each $X_i$ through $Q_i$, and we have \[\vol_{G, X}(n)=\max_{i=1}^k \vol_{Q_i, X_i}(n).\] In particular if all $Q_i$s have polynomial growth with $\vol_{Q_i}(n)\simeq n^{d_i}$, then there is a faithful $G$-set $X$ such that $\vol_{G, X}(n)\simeq n^d$, where $d=\max_i d_i$. \end{lem} In particular we have the following for virtually abelian groups. \begin{prop}\label{prop-virtually-abelian} If $G$ is an infinite finitely generated virtually abelian group, then there exists a faithful $G$-set $X$ such that $\vol_{G, X}(n)\simeq n$. \end{prop} \begin{proof} The group $G$ contains a finite index subgroup isomorphic to $\Z^d$ for some $d\ge 1$. Thus the conclusion follows from Proposition \ref{prop-finite-index}, by considering the action of $\Z^d$ on $\sqcup_{i=1}^d \Z$ as in Lemma \ref{l-product-actions} \end{proof} The following proposition is a partial converse to Lemma \ref{l-product-actions} for Noetherian groups. \begin{prop} \label{prop-noetherian} Let $G$ be a finitely generated Noetherian group, and $X$ a faithful $G$-set. Then there exist a finite index subgroup $G^0\le G$ and normal subgroups $K_1,\dots,K_d\unlhd G^0$ such that, if we denote $Q_i=G^0/K_i$, then the following hold: \begin{enumerate}[label=\roman*)] \item \label{item-neoth-subdirect} The intersection $\bigcap_i K_i$ is trivial. Equivalently, $G^0$ embeds in $Q_1\times \cdots \times Q_d$. x(K_i)$, then we have $X=\cup_i X_i$, and each $X_i$ is a $G^0$-invariant subset such that the action of $G^0$ on $X_i$ factors through an action of $Q_i$. \item \label{item-neoth-locally-embeds} $Q_i$ is non-folded in $X_i$ for all $i$. \item \label{item-neoth-growth} We have $\vol_{G, X}(n)\simeq \max_{i=1}^n \vol_{Q_i}(n)$. \end{enumerate} \end{prop} \begin{proof} Let $\X$ be a closed $G$-invariant subset of $\sub(G)$. Fix $H\in \X$. Let $\H$ be the closure of the $G$-orbit of $H$ in $\X$, and $\mathcal{L} \subseteq \H$ be a non-empty minimal closed $G$-invariant subset. Since $G$ is Noetherian, $\sub(G)$ is a countable compact space, and hence every closed subset of $\sub(G)$ admits isolated points. So $\mathcal{L}$ admits isolated points, and hence by minimality it follows that $\mathcal{L}$ is finite. So $\mathcal{L}$ is just the finite $G$-orbit of a subgroup $L \in \X$. Now since $L$ is finitely generated, the set $S_G(\geq,L)$ of subgroups $H$ of $G$ such that $H$ contains $L$ is an open neighbourhood of $L$ in $\sub(G)$. Since by definition $L$ belongs to the closure of the $G$-orbit of $H$, it follows that there exists a conjugate of $H$ that contains $L$, and as a consequence $H$ itself contains some $L' \in \mathcal{L}$. Now using again that $S_G(\geq,L)$ is open for every subgroup $L$ and compactness of $\X$, it follows that we can find a finite $G$-invariant subset $\{L_1, \cdots, L_n\}\in \X$, such that for every $H\in \X$ there is $i$ such that $L_i\le H$. x(K_i)$, and note that all conclusions in part \ref{item-neoth-subdirect} and \ref{item-neoth-fixed} hold true by construction. Let us show \ref{item-neoth-locally-embeds}. Since $L_i$ belongs to $\X$, we can find a sequence of points $(x_n)\subset X$ such that $G_{x_n}$ converges to $L_i$. As a consequence the subgroups $G^0_{x_n}=G^0\cap G_{x_n}$ converge to $G^0\cap L_i=K_i$. It follows in particular that $G^0_{x_n}$ contains $K_i$ for $n$ large enough, which means that $(x_n)\subset X_i$ for $n$ large enough. Thus the stabiliser of $x_n$ in $Q_i$ converges to the trivial subgroup in $\sub(Q_i)$, so that $Q_i$ is non-folded in $X_i$. In order to justify \ref{item-neoth-growth}, first note that $\vol_{G, X}(n)\simeq \vol_{G^0, X}(n)$ by Proposition \ref{prop-finite-index}. Second, note that since the action of $G_0$ on the orbit of every $x\in X$ factors through an action of some $Q_i$, we must have $\vol_{Q_i}(n)\preccurlyeq\vol_{G^0, X}(n)$ for all $i$, and hence $\max_i \vol_{Q_i}(n)\preccurlyeq\vol_{G^0, X}(n)$. Finally the converse inequality follows from part \ref{item-neoth-locally-embeds}.\qedhere \end{proof} Recall that a group $G$ is \textbf{subdirectly decomposable} if it admits non-trivial normal subgroups $M,N$ such that $M \cap N = 1$. We will also say that $G$ is \textbf{virtually subdirectly decomposable} if some finite index subgroup of $G$ is subdirectly decomposable. \begin{cor} \label{cor-noeth-group-loc-embed} Suppose $G$ is Noetherian and not virtually subdirectly decomposable. Then the whole group $G$ is non-foldable. In particular $\vol_{G, X}(n)\simeq \vol_G(n)$ for every faithful $G$-set $X$. \end{cor} \begin{proof} We apply Proposition \ref{prop-noetherian}. It cannot be that all the $K_i$ are non-trivial, since otherwise the intersection would be non-trivial by assumption. Hence there is $i$ such that $K_i = 1$, and hence $Q_i=G$ is non-folded in $X_i=X$. \end{proof} \subsection{Nilpotent groups} If $G$ is a nilpotent group $G$, then classical results of Bass and Guivarc'h \cite{Bass-nilp, Guivarch-nilp} assert that the growth of $G$ is $\vol_G(n)\simeq n^{\alpha_G}$, where $\alpha_G$ is a positive integer given by the formula \begin{equation}\label{e-Bass-Guivarch}\alpha_G:=\sum_{i\ge 0} i\dim_\Q\left((\gamma_i(G)/\gamma_{i+1}(G))\otimes \Q\right).\end{equation} The following is a direct consequence of Proposition \ref{prop-noetherian} combined with the Bass-Guivarc'h formula. It implies in particular that the growth of the action of every nilpotent group is polynomial. \begin{cor} \label{cor-polynomial-growth} Let $G$ be a finitely generated nilpotent group, and let $X$ be a faithful $G$-set. Then there exist a finite index subgroup $G^0\le G$ and normal subgroups $K_1,\dots,K_d\unlhd G^0$ with $\bigcap K_i=\{1\}$ such that if we write $Q_i=G^0/K_i$ and $\alpha_*=\max_i \alpha_{Q_i}$, then $\vol_{G, X}(n) \simeq n^{\alpha_*}$. \end{cor} In the sequel we denote by $H_3(\Z)$ the Heisenberg group of $3 \times 3$-matrices over the integers. Recall that it has infinite cyclic center, and the associated quotient is free abelian of rank $2$. It has the presentation $H_3(\Z) = \langle a, b, c\colon [a,b]=c, [a,c]=[b, c]=1\rangle$. For later use we record the following well-known fact. \begin{lem} \label{l-subgroup-H3} Let $G$ be finitely generated nilpotent group which is not virtually abelian. Then $G$ contains a subgroup $H$ isomorphic to $H_3(\Z)$. \end{lem} \begin{proof} Upon passing to a finite index subgroup we can suppose that $G$ is torsion-free. Let $\gamma_{k} (G)$ be the last non-trivial term in the lower central series. Since $G$ is not abelian, we have $k\ge 2$ and we can find $g\in \gamma_{k-1} (G)$ and $h\in G$ such that $k:=[g, h]$ is not trivial. Note that $k$ is central in $G$, so that we have $[g, k]=[h,k]=1$. Hence $H=\langle g, h, k\rangle$ is isomorphic to a quotient of $H_3(\Z)$. Since every proper quotient of $H_3(\Z)$ either has torsion or is abelian, it follows that $H$ is isomorphic to $H_3(\Z)$. \end{proof} \begin{lem} \label{l-H3-indecomp} The group $H_3(\Z)$ is not virtually subdirectly decomposable. \end{lem} \begin{proof} If $L$ is a finite index subgroup of $H = H_3(\Z)$, the center $Z(L)$ is infinite cyclic. Since any normal subgroup of $L$ intersects $Z(L)$ non-trivially \cite[1.2.8 i)]{Lennox-Rob}, any finite family of non-trivial normal subgroups of $L$ contains a common non-trivial element of $Z(L)$. \qedhere \end{proof} \begin{prop}\label{prop-n4-growth} Let $G$ be finitely generated virtually nilpotent group which is not virtually abelian. Then any subgroup $H$ isomorphic to $H_3(\Z)$ is a non-foldable subset of $G$. In particular $G$ has a Schreier growth gap $n^4$. \end{prop} \begin{proof} The first statement is a direct consequence of Corollary \ref{cor-noeth-group-loc-embed} and the previous lemmas, and the last statement follows since $H_3(\Z)$ has growth $\simeq n^4$. \end{proof} \subsection{Polycyclic groups} Recall that Wolf theorem asserts that a polycyclic group that is not virtually nilpotent has exponential growth \cite{Wolf}. Combined with Proposition \ref{prop-noetherian} this has the following consequence. \begin{cor} \label{cor-poly-growth} Suppose $G$ is a polycyclic group that is not virtually nilpotent. Then $G$ has a Schreier growth gap $\exp(n)$. \end{cor} \begin{proof} The group is polycyclic, and hence Noetherian. If $X$ is a faithful $G$-set, we apply Proposition \ref{prop-noetherian}. Let $G^0$ and $K_1,\cdots K_d$ as in the conclusion. Being a finite index subgroup of $G$, the group $G^0$ has exponential growth. It follows that at least one of the groups $Q_i=G^0/K_i$ must have exponential growth, since $G^0$ embeds in their product. Therefore $\max_i{\vol}_{Q_i}(n)\simeq \exp(n)$, and the conclusion follows from Proposition \ref{prop-noetherian}. \qedhere \end{proof} The route taken above to prove Corollary \ref{cor-poly-growth} does not produce explicit non-foldable subsets. However this will be achieved in Section \ref{s-finite-rank} in the more general setting of solvable groups of finite rank. Below we provide a simpler construction of non-foldable subsets in polycyclic groups, which already yield information on the asymptotic dimension of graphs of actions of polycyclic groups (Corollary \ref{cor-poly-asdimX}). To this end, we introduce the following terminology (which is consistent with the terminology that we will use in \S \ref{subsec-strong-irr}). \begin{defin} We say that a semi-direct product $G = \Z^k \rtimes \Z$ is \textbf{strongly irreducible} if every non-trivial subgroup of $\Z^k$ that is invariant under a finite index subgroup of $\Z$ has rank $k$. \end{defin} \begin{lem} \label{lem-irr-SDP-notdecomp} Let $G = \Z^k \rtimes \Z$ be a strongly irreducible semi-direct product with $k \geq 2$. Then $G$ is not virtually subdirectly decomposable. \end{lem} \begin{proof} Observe that every finite index subgroup of $G$ is also a strongly irreducible semi-direct product. Hence it is enough to check that the intersection of two non-trivial normal subgroups of $G$ remains non-trivial. Observe also that the condition $k \geq 2$ ensures that $G$ is not virtually abelian. If $N$ is a non-trivial normal subgroup of $G$, then $N \cap \Z^k$ is necessarily non-trivial, because otherwise $N$ would commute with $\Z^k$ and $G$ would have a finite index abelian subgroup. So by the strongly irreducible assumption it follows that $N \cap \Z^k$ has finite index in $\Z^k$. In particular it follows immediately that if $M,N$ are two non-trivial normal subgroups of $G$, then $M \cap N$ is non-trivial. \end{proof} \begin{lem} \label{lem-poly-metab-norm-subgroup} Every polycyclic group $G$ that is not virtually abelian contains a (normal) subgroup that is metabelian and not virtually abelian. \end{lem} \begin{proof} Since $G$ is infinite, $G$ admits an infinite free abelian normal subgroup \cite[1.3.9]{Lennox-Rob}. Let $A$ be an infinite free abelian normal subgroup of maximal rank $n$. Again $G/A$ is infinite, so we may find an infinite free abelian normal subgroup $B$ in $G/A$ or rank $m$. Consider the preimage $N$ of $B$ in $G$, which is a metabelian normal subgroup of $G$. If $N$ was virtually abelian, it would admit a characteristic free abelian subgroup $C$ of rank $m+n$. In particular $C$ would be normal in $G$, contradicting the maximality of $n$. So this subgroup $N$ satisfies the conclusion. \end{proof} The following is an easy consequence of basic facts from linear algebra. \begin{lem} \label{lem-poly-metab-nilpORirr} Let $G$ be a metabelian polycyclic group. Then either $G$ is virtually nilpotent; or contains a subgroup $H = \Z^k \rtimes \Z$ with $k \geq 2$ that is a strongly irreducible semi-direct product. \end{lem} \begin{proof} Upon passing to a finite index subgroup, $G$ admits a normal subgroup $N$ isomorphic to $\Z^d$ such that $Q = G/N$ is isomorphic to $\Z^r$. Consider the associated representation $Q \to \GL(d, \Z)$. If every element of $Q$ has the property that all its eigenvalues are roots of unity then $Q$ has a finite index subgroup that is unipotent, and $G$ is virtually nilpotent, a contradiction. So we can find an element of $Q$ whose eigenvalues are not all roots of unity, and the conclusion follows by applying Lemma \ref{l-strongly-irreducible-power} below. \end{proof} \begin{lem}\label{lem-poly-special subgroup} Let $G$ be polycyclic group that is not virtually abelian. Then $G$ contains a subgroup isomorphic to the Heisenberg group $H_3(\Z)$; or a subgroup $H = \Z^k \rtimes \Z$ with $k \geq 2$ that is a strongly irreducible semi-direct product. \end{lem} \begin{proof} By Lemma \ref{lem-poly-metab-norm-subgroup} it is enough to treat the case where $G$ is metabelian. In the virtually nilpotent case we use Lemma \ref{l-subgroup-H3}, and in the other case we invoke Lemma \ref{lem-poly-metab-nilpORirr}. \end{proof} \begin{prop} \label{prop-expand-subgroup-poly} Let $G$ be polycyclic group that is not virtually abelian. Then any subgroup of $G$ as in Lemma \ref{lem-poly-special subgroup} is a non-foldable subset of $G$. \end{prop} \begin{proof} Let $H$ be a subgroup of $G$ that is isomorphic either to $H_3(\Z)$ or to a strongly irreducible semi-direct product $\Z^k \rtimes \Z$ with $k \geq 2$. In both cases $H$ is not virtually subdirectly decomposable (Lemma \ref{l-H3-indecomp} and Lemma \ref{lem-irr-SDP-notdecomp}), and hence $H$ is a non-foldable subset of $G$ by Corollary \ref{cor-noeth-group-loc-embed}. \end{proof} Recall that the \textbf{Hirsch length} $h(G)$ of a polycyclic group $G$ is the number of infinite cyclic factors appearing in a finite series of $G$ with cyclic factors. The following result was proven in \cite{Dranishnikov-Smith}. \begin{thm} \label{thm-poly-asdim-hirsch} If $G$ is a polycyclic group, then $\asdim(G) = h(G)$. \end{thm} \begin{cor} \label{cor-poly-asdimX} Let $G$ be polycyclic group that is not virtually abelian. Then $\asdim(G,X) \geq 3$ for every faithful $G$-set $X$. \end{cor} \begin{proof} Let $H$ be a subgroup of $G$ that is isomorphic either to $H_3(\Z)$ or to a strongly irreducible semi-direct product $\Z^k \rtimes \Z$ with $k \geq 2$. According to Theorem \ref{thm-poly-asdim-hirsch} we have $\asdim(H) = 3$ if $H$ is $H_3(\Z)$ and $\asdim(H) = k+1$ if $H$ is $\Z^k \rtimes \Z$. In both case $\asdim(H) \geq 3$. Let $X$ be a faithful $G$-set. The subgroup $H$ being non-foldable by Proposition \ref{prop-expand-subgroup-poly}, Proposition \ref{p-asdim-non-foldable} then implies $\asdim(G,X) \geq \asdim(H,d_G) = \asdim(H) \geq 3$. In the middle equality we have used that the restriction of the word metric $d_G$ to $H$ is coarsely equivalent to the word metric on $H$, and $\asdim$ is an invariant of coarse equivalence. \end{proof} \section{Strongly irreducible extensions and groups of finite rank} \label{s-finite-rank} The goal of this section is to prove Theorem \ref{t-strongly-irreducible}, which gives a criterion to find non-foldable subsets in a group $G$ that can be written as an extension $1\to N\to G\to Q\to 1$, where $N$ is a nilpotent group of finite rank and the action of $Q$ satisfies a certain irreducibility condition. As an application, we will prove Theorem \ref{thm-intro-prufer} from the introduction. \subsection{Preliminaries on nilpotent groups} Before stating the main results of the section we need to recall some preliminaries on nilpotent groups, based on Malcev theory. A group $G$ is \textbf{divisible} if for every $g\in G$ and every integer $n>0$ there is $h\in G$ such that $h^n=g$. Assume that $\mk$ is a nilpotent Lie algebra over $\Q$. Then $\mk$ can be turned into a group with group law given by the Baker-Campbell-Hausdorff formula. The resulting group is a divisible torsion-free nilpotent group. The following theorem of Malcev \cite{Malcev49} is a converse to this construction (see also \cite{Stewart} for a more algebraic treatment). \begin{thm}[Malcev]\label{t-Malcev-divisible} Let $\mk$ be a torsion-free divisible nilpotent group. Then $\mk$ can be endowed with a unique structure of Lie algebra over $\Q$ such that the group law on $\mk$ coincides with the group law determined by the Baker-Campbell-Hausdorff formula. Moreover, every homomorphism between divisible nilpotent groups is also a homomorphism of Lie algebras, and vice versa \end{thm} In the sequel, whenever $\mk$ is a torsion-free divisible nilpotent group, we will consider it also as a Lie algebra $\mk$ without mention. By the above theorem the Lie subalgebras of $\mk$ are precisely the divisible subgroups of $\mk$. The Lie bracket will be denoted $\llbracket\cdot, \cdot \rrbracket$, the notation $[\cdot, \cdot]$ being reserved for commutators in groups. Given a subset $S\subset \mk$, we will denote by $\Lie(S)$ the Lie subalgebra generated by $S$, so $\Lie(S)$ is the smallest divisible subgroup of $\mk$ containing $S$. We keep the notation $\langle S \rangle$ for the subgroup generated by $S$. The lower central series or $\mk$ as a Lie algebra is denoted $\gamma_i\mk$; this does not lead to confusion as it coincides with its lower central series of $\mk$ as a group. The abelianization of $\mk$ is denoted $\mk^{ab}$. \begin{lem} \label{l-nilpotent-abelianisation} Let $\mk$ be a nilpotent Lie algebra and $S\subset \mk$ be a set whose projection to $\mk^{ab}:=\mk/\llbracket\mk, \mk\rrbracket$ contains a linear basis of $\mk^{ab}$. Then $\Lie(S)=\mk$. \end{lem} \begin{proof} Denote $\pi_i\colon \gamma_{i} \mk\to \gamma_{i}\mk/\gamma_{i+1}\mk$ the quotient projection. Set $\hk=\Lie(S)$. It is enough to show that $\pi_i(\hk\cap \gamma_i\mk)=\gamma_i\mk/\gamma_{i+1}\mk$ for every $i$. By assumption, this holds for $i=1$. Assume that it holds for all $j<i$. Fix $X\in \gamma_{i-1}\mk$ and $Y\in \mk$. By assumption we can find $\tilde{X}\in \hk\cap \mk_{i-1}, \tilde{Y}\in\hk$ such that $\pi_{i-1}(\tilde{X})=\pi_{i-1}(X)$ and $\pi_1(\tilde{Y})=\pi_1(Y)$. Then we have $\llbracket\tilde{X}, \tilde{Y}\rrbracket\in \hk\cap \gamma_i\mk$ and $\pi_i(\llbracket\tilde{X}, \tilde{Y}\rrbracket)=\pi_i(\llbracket X, Y\rrbracket)$. Since $\gamma_i\mk$ is generated by $\llbracket X, Y\rrbracket$ with $X\in \gamma_{i-1}\mk$ and $Y\in \mk$, the claim follows. \qedhere \end{proof} For a proof of the following result, we refer to \cite[2.1.1]{Lennox-Rob} \begin{thm}[Rational Malcev completion] \label{t-Malcev-completion} Let $N$ be a torsion-free nilpotent group. Then there exists a divisible torsion-free nilpotent group $\mk_N$ and an injective group homomorphism $\iota \colon N\to \mk_N$ such that every element of $\mk_N$ has a power in $\iota(N)$. Moreover for if $(\mk', \iota')$ is another pair with this property, then there exists an isomorphism $\varphi\colon \mk'\to \mk_N$ such that $\varphi\circ \iota=\iota'$. \end{thm} The group $\mk_N$ from Theorem \ref{t-Malcev-completion} is called the \textbf{rational Malcev completion} of $N$. In the sequel we will drop the embedding $\iota$ from the notation and consider $N$ as a subgroup of $\mk_N$. When $N$ is abelian, $\mk_N$ is simply $N\otimes \Q$. Note that it follows from Theorem \ref{t-Malcev-completion} that every automorphism of $N$ extends to an automorphism of $\mk_N$. If $H$ is a subgroup of a group $G$, we denote by $I_G(H)$ the set of elements $g \in G$ such that there exists $n \geq 1$ such that $g^n \in H$. $I_G(H)$ is called the \textbf{isolator} of $H$ in $G$. When working with the rational Malcev completion it is useful to keep in mind the following facts, see \cite[\S 2.3]{Lennox-Rob}. \begin{lem}\label{lem-isolator-elementary} Let $N$ be a nilpotent group and $H$ a subgroup of $N$. Then: \begin{enumerate}[label=\roman*)] \item $I_N(H)$ is a subgroup of $N$. \item If $N$ is divisible, then so is $I_N(H)$. \item If $N$ is finitely generated, then $H$ has finite index in $I_N(H)$. \end{enumerate} \end{lem} \begin{prop}\label{p-isolator} Let $\mk$ be a divisible torsion-free nilpotent group. \begin{enumerate}[label=\roman*)] \item \label{i-lie-span} If $H$ is a subgroup of $\mk$, then $\Lie(H) = \operatorname{Vect}(H) = I_\mk(H)$. \item If $H_1, H_2$ are finitely generated subgroups of $\mk$ such that $\Lie(H_1)=\Lie(H_2)$, then $H_1$ and $H_2$ are commensurable. \end{enumerate} \end{prop} \begin{proof} $I_\mk(H)$ is a subgroup of $\mk$ by Lemma \ref{lem-isolator-elementary}, and we have the inclusions $H\le I_\mk(H) \le \operatorname{Vect}(H) \le \Lie(H)$. But since $I_\mk(H)$ is divisible, Theorem \ref{t-Malcev-divisible} implies that $I_\mk(H)$ is a Lie subalgebra of $\mk$. Since $I_\mk(H)$ contains $H$, we must have $I_\mk(H) =\Lie(H)$. For the second statement, since each $H_i$ is finitely generated, $H_1 \cap H_2$ has finite index in $I_{H_i}(H_1 \cap H_2)$ by Lemma \ref{lem-isolator-elementary}. Now since $\Lie(H_1)=\Lie(H_2)$ we have $I_{H_i}(H_1 \cap H_2) = H_i$, so $H_1 \cap H_2$ is indeed of finite index in both $H_1$ and $H_2$. \end{proof} Assume that $N$ is a torsion-free nilpotent group. Then the derived subgroup $[N, N]$ is always contained in $\llbracket \mk_N, \mk_N\rrbracket$, but in general it might be strictly smaller than $N\cap \llbracket \mk_N, \mk_N\rrbracket$: indeed $N^{ab}$ may have non-trivial torsion, while $\mk_N/\llbracket \mk_N, \mk_N\rrbracket$ is a torsion-free group. For later use we record the following lemma, which clarifies the difference. \begin{lem}\label{l-malcev-ab} Let $N$ be a torsion-free nilpotent group, with abelianization map $\pi_{ab}\colon N\to N^{ab}$. Then $\pi_{ab}^{-1}(T(N^{ab})) =N\cap \llbracket \mk_N, \mk_N\rrbracket$. In particular we have $N^{ab}/T(N^{ab}) \simeq N/ (N\cap \llbracket \mk_N, \mk_N\rrbracket)$ and $N^{ab}\otimes \Q\simeq \mk_N/\llbracket \mk_N, \mk_N\rrbracket$.\end{lem} \begin{proof} Write $T=T(N^{ab})$ and $A = N^{ab}/T$. The canonical projection $p\colon N\to A$ extends to an epimorphism $ \mk_N \to \mk_A$ \cite[Cor.\ 2.41]{Baumslag-lecturenotes}, which must factor through $\mk_N/\llbracket\mk_N, \mk_N\rrbracket$. In particular $p$ descends to a map $p'\colon N/(N\cap \llbracket\mk, \mk\rrbracket)\to A$. But since $N/(N\cap \llbracket\mk, \mk\rrbracket)$ is a torsion-free abelian quotient of $N$ and $A$ is the largest such quotient, the map $p'$ is an isomorphism. Moreover since $\mk_N/\llbracket \mk_N, \mk_n\rrbracket$ is the rational Malcev completion of $N/(N\cap \llbracket\mk, \mk\rrbracket)$, $p'$ extends to an isomorphism $\mk_N/\llbracket \mk_N, \mk_n\rrbracket \to \mk_A$. Since $\mk_A \simeq A \otimes \Q \simeq N^{ab}\otimes \Q$, the statement follows. \qedhere \end{proof} \subsection{Non-foldable subsets via strong irreducibility} \label{subsec-strong-irr} Let $\corps$ be a field. Recall that a subgroup $G \subset \GL(n, \corps)$ is \textbf{irreducible} if $G$ does not preserve any proper non-trivial subspace of $\corps^n$, and \textbf{strongly irreducible} if $G$ does not preserve any finite union of non-trivial proper subspaces. Equivalently $G$ is strongly irreducible if every finite index subgroup of $G$ is irreducible. By extension we will say that a linear representation $\rho\colon G\to \GL(n,\corps)$ is (strongly) irreducible if its image is so. If $G$ is a subgroup of $\GL(n, \corps)$, we denote by $\overline{G}$ its Zariski closure, and by $\overline{G}\,^0$ the connected component of the identity in $\overline{G}$ with respect to the Zariski topology. Recall that $\overline{G}\,^0$ can be equivalently defined as the unique Zariski-closed subset of $\overline{G}$ which contains the identity and which cannot be written as a union of closed strict subsets (not necessarily disjoint). The group $\overline{G}\,^0$ is a closed finite index subgroup of $\overline{G}$ and is contained in every closed finite index subgroup of $\overline{G}$. We will need the following lemma, which may be compared with \cite[Lemma 3.13]{Glas-IRS}. \begin{lem}\label{l-strongly-irreducible} Suppose that $G\le \GL(n, \corps)$ is strongly irreducible. Then for every finite subset $\Sigma\subset \corps^n\setminus{0}$ of non zero vectors, and every finite collection $V_1, \cdots, V_m$ of strict subspaces of $\corps^n$, there is $g\in G$ such that $g(\Sigma)\cap V_i=\varnothing$ for every $i=1,\cdots, m$. \end{lem} \begin{proof} Note that $\overline{G}$ is also strongly irreducible as it contains $G$, and hence $\overline{G}\,^0$ is irreducible. We argue by contradiction and assume that the conclusion fails for $\Sigma=\{w_1,\cdots, w_\ell\}$ and subspaces $V_1,\cdots, V_m$. Write $Y_{i, j}=\{g\in \GL(n, \corps)\colon g(v_i)\in V_j\}$ for $i=1,\cdots, \ell$ and $j=1,\cdots, m$, so that $G\subset \cup_{i, j} Y_{i, j}$. Since every set $Y_{i, j}$ is Zariski-closed, we have $\overline{G}\subset \cup_{i, j} Y_{i, j}$, and thus there exists $i, j$ such that $\overline{G}\,^0\subset Y_{i, j}$. In particular the linear span of $\overline{G}\,^0v_i$ is contained in $V_j$. So we have found a strict non-trivial $\overline{G}\,^0$-invariant subspace, contradicting that $\overline{G}\,^0$ is irreducible. \qedhere \end{proof} \begin{defin} Assume that $G$ is a group which can be written as an extension \begin{equation}\label{e-extension} 1\to N\to G\to Q\to 1.\end{equation} The conjugation action of $Q$ on $N^{ab}$ gives rise to a linear representation of $Q$ on the vector space $H_1(N, \Q)=N^{ab}\otimes \Q$ over $\Q$. We say that the extension \eqref{e-extension} is \textbf{strongly irreducible} if $\dim_\Q(N^{ab}\otimes \Q)<\infty$ and the associated linear representation $\rho\colon Q\to \GL(N^{ab}\otimes \Q)$ is strongly irreducible. \end{defin} We are now ready to state the main result of this section. \begin{thm}\label{t-strongly-irreducible} Let $G$ be a group that can be written as a strongly irreducible extension \[1\to N\to G\to Q\to 1,\] where $N$ is a nilpotent group. Let $\mathcal{L}\subset N$ be a subset whose projection to $N^{ab}/T(N^{ab})$ is injective. Then $\mathcal{L}$ is a non-foldable subset of $G$. \end{thm} \begin{proof} In the sequel we fix a finite subset $P$ of $N$ such that the image of every element of $P$ in $N^{ab}/T(N^{ab})$ is non-trivial, and we aim to show $\bigcap_{H\in S_G(P, G)} H\neq \{1\}$. By Lemma \ref{l-non-foldable-confined} this is equivalent to the conclusion of the theorem. Set $M:=N/T(N)$, and let $\mk:=\mk_{M}$ be the rational Malcev completion of $M$. By Lemma \ref{l-malcev-ab} we have an isomorphism $\left(N^{ab}/T(N^{ab})\right)\otimes \Q \simeq \mk^{ab}$, so that the present assumption tells us that the representation $\rho\colon Q\to \GL(\mk^{ab})$ is strongly irreducible. Denote by $\pi\colon N\to \mk$ the composition of the quotient map from $N$ to $M$ and of the inclusion $M\hookrightarrow \mk$, and $\pi^{ab}$ be the post-composition of $\pi$ with $\mk \to \mk^{ab}$. Write $d=\dim_\Q (\mk^{ab})$. We construct inductively a family of subsets $P_1, P_2, \cdots, P_k$ with $k \leq d$ with the following properties: \begin{itemize} \item for each $k$ there exists $g_k \in G$ such that $P_k = g_k P g_k^{-1}$; \item for every $\sigma = (h_1, \cdots, h_k) \in P_1 \times \cdots \times P_k$, the subspace $W_\sigma = \operatorname{Vect} (\pi^{ab}(h_1), \cdots, \pi^{ab}(h_k))$ is a $k$-dimensional subspace of $\mk^{ab}$. \end{itemize} For $k = 1$ we set $P_1=P$. Since $\pi^{ab}(h)$ is non-zero for every $h \in P$, the second condition above holds. Assume that for $k < d$ we have constructed $P_1, \cdots, P_k$ with the desired properties. When $\sigma$ ranges over $P_1 \times \cdots \times P_k$, the subspaces $W_\sigma$ form a finite collection of proper subspaces of $\mk^{ab}$. Since $\rho\colon Q\to \GL(\mk^{ab})$ is strongly irreducible and since the subset $P$ is finite, according to Lemma \ref{l-strongly-irreducible} we can find an element $q\in Q$ such that $\rho(q)(\pi^{ab}(P)) \cap W_\sigma=\varnothing$ for all $\sigma \in P_1\cdots \times \cdots P_k$. Let $g_{k+1}\in G$ be a preimage of $q$ in $G$, and set $P_{k+1}= g_{k+1} P g_{k+1}^{-1}$, so that $\pi^{ab}(P_{k+1})=\rho(q)(\pi^{ab}(P))$. Then by construction the $\pi^{ab}$-image of every $(k+1)$-tuple $(h_1,\cdots, h_{k+1})\in P_1\times \cdots \times P_{i+1}$ generates an $(k+1)$-dimensional subspace of $\mk^{ab}$. Continuing in this way up to $d$, we arrive at finite subsets $P_1, \cdots, P_d$ such that for any choice of $h_1\in P_1,\cdots, h_d\in P_d$, the elements $\pi^{ab}(h_1), \ldots, \pi^{ab}(h_d)$ form a basis of $\mk^{ab}$. Hence by Lemma \ref{l-nilpotent-abelianisation} the elements $\pi(h_1), \ldots, \pi(h_d)$ generate $\mk$ as a Lie algebra. We write $\Sigma = P_1\times \cdots \times P_d$, and for $\sigma=(h_1,\ldots, h_d)\in \Sigma$, we denote by $H_\sigma$ the subgroup of $N$ generated by $h_1,\cdots, h_d$. Let $H\in S_G(P, G)$. Since $H$ intersects all conjugates of $P$, in particular $H$ intersects all the sets $P_i$. Hence for every $H\in S_G(P, G)$ there exists $\sigma \in \Sigma$ such that $H$ contains $H_\sigma$. Hence in order to terminate the proof it is enough to justify that $\bigcap_\sigma H_\sigma$ is non-trivial. Since all the subgroups $H_\sigma$ are finitely generated and verify $\Lie(\pi(H_\sigma))=\mk$, all the subgroups $\pi(H_\sigma)$ are commensurable by Proposition \ref{p-isolator}. So $\bigcap_\sigma \pi(H_\sigma)$ has finite index in each of them, and hence in particular is infinite. Let $\Delta=\langle P_1, \cdots, P_d\rangle$. Being a finitely generated nilpotent group, $\Delta$ is virtually torsion-free. Let $\Delta_0$ be a finite index subgroup of $\Delta$ such that $\Delta_0\cap T(N)=\{1\}$. So the restriction of $\pi$ to $\Delta_0$ is injective. For $\sigma\in \Sigma$ set $K_\sigma:=H_\sigma \cap \Delta_0$. Since $K_\sigma$ has finite index in $H_\sigma$, we still have that $\Lie(\pi(K_\sigma))=\Lie(\pi(H_\sigma))=\mk$ by Proposition \ref{p-isolator}. Thus the same argument as before tells us that $\bigcap_\sigma \pi(K_\sigma)$ is non-trivial. Since $\pi$ is injective on $\Delta_0$, this shows that $\bigcap_\sigma K_\sigma$ is non-trivial, and the proof is complete. \qedhere \end{proof} \subsection{Solvable groups of finite rank} The goal of this part is to prove Theorem \ref{t-finite-rank-exp}. For this we need some preliminaries on solvable groups of finite rank, which will allow to reduce the proof of this theorem to the case of a group $G$ falling into the setting of Theorem \ref{t-strongly-irreducible}. Given a group $G$, we denote $\Res(G)$ the \textbf{finite residual} of $G$, which is defined as the intersection of all subgroups of finite index of $G$. Note that $\Res(G)$ is a normal subgroup of $G$, and $G/\Res(G)$ is the largest residually finite quotient of $G$. We call a group \textbf{quasi-cyclic} if it is isomorphic to $\Z[\frac{1}{p}]/\Z$ for some prime $p$. The following proposition summarizes the main structural properties of solvable groups of finite rank. Proofs can be found in \cite[\S 5]{Lennox-Rob}. \begin{thm}\label{thm-preliminaries-prufer} Let $G$ be a finitely generated solvable group of finite rank. Set $R:=\Res(G)$, $N:=\Fit(G)$. Then the following hold: \begin{enumerate}[label=(\roman*)] \item $N$ is nilpotent and $Q = G/N$ is virtually abelian. \item $R$ is a direct product of finitely many quasi-cyclic groups. \item $G/R$ is virtually torsion-free. Moreover it is linear over $\Q$. \end{enumerate} \end{thm} The following is a classical consequence of a criterion of Hall. We include a proof for completeness. \begin{prop} \label{p-finite-rank-non-virt-nilp} Let $G$ be a finitely generated solvable group of finite rank, and assume that $N$ is a nilpotent normal subgroup of $G$ such that $Q=G/N$ is virtually abelian. Set $V=N^{ab}\otimes \Q$ and let $\rho\colon Q\to \GL(V)$ be the associated linear representation. Then the following are equivalent. \begin{enumerate}[label=\roman*)] \item \label{i-virt-nilp} The group $G$ is virtually nilpotent. \item \label{i-roots-unity} There exists $n\ge 0$ such that the complex eigenvalues of $\rho(q)$ are $n$th roots of unity for every $q\in Q$. \end{enumerate} \end{prop} \begin{proof} \ref{i-virt-nilp} $\Rightarrow$ \ref{i-roots-unity}. Let $G_0$ be a finite index subgroup of $G$ containing $N$ that is nilpotent, and let $Q_0$ be its image in $Q$. Write $T=T(N^{ab})$ and $A=N^{ab}/T$, so that $V\simeq A \otimes \Q$. Let $\tilde{T}$ be the preimage of $T$ in $N$. Then $\tilde{T}$ is normal in $G_0$ and $H=G_0/\tilde{T}$ satisfies $A\unlhd H$ and $H/A=Q_0$. Then there exists $m$ such that for every $h\in H$ and $a\in A$ the $m$-fold iterated commutator $[h, a]_m:=[h, [h,\cdots, [h,a]\cdots]]$ is trivial. But in $V= A\otimes \Q$ we have the equality $[h, a]_m \otimes 1=(\rho(q)-1)^m v$ where $ v=a\otimes 1$ and $q$ is the projection of $h$ to $Q$. It follows that $\rho(Q_0)$ consists of unipotent elements, and since every element of $Q$ has a power in $Q_0$, the claim follows. For the converse, suppose first that \ref{i-roots-unity} $\Rightarrow$ \ref{i-virt-nilp} holds true in the case where $N$ is abelian. Since assumption \ref{i-roots-unity} depends on $N^{ab}$ rather than $N$, the group $G/N'$ still satisfies \ref{i-roots-unity}. Hence by the current assumption we infer that $G/N'$ is virtually nilpotent. Since $N$ is nilpotent, we deduce from a result of Hall \cite[1.2.17]{Lennox-Rob} that $G$ is also virtually nilpotent. Hence it is enough to see that \ref{i-roots-unity} $\Rightarrow$ \ref{i-virt-nilp} is true when $N$ is abelian. In that case $G$ is virtually metabelian, and hence residually finite. So by Theorem \ref{thm-preliminaries-prufer} $G$ is virtually torsion-free. Upon modding out by the torsion subgroup of $N$, we can assume that $N$ is torsion-free. Also upon passing to a finite index subgroup, \ref{i-roots-unity} tells us that every element of $Q$ is unipotent. By a standard argument the group $Q$ acts unipotently on $V = N\otimes \Q$, and it follows that $G$ is nilpotent. \qedhere \end{proof} In the sequel we say that a linear automorphisms $f\in \GL(V)$ is strongly irreducible if $\langle f\rangle$ is a strongly irreducible subgroup of $\GL(V)$, i.e. if $f^n$ does not preserve any non-zero proper subspace of $V$ for all $n\neq 0$. The following is a basic lemma from linear algebra. \begin{lem}\label{l-strongly-irreducible-power} Consider a finite dimensional vector space $V$ over $\Q$, and $f \in \GL(V)$ such that the complex eigenvalues of $f$ are not all roots of unity. Then there exists $m>0$ and a non-zero subspace $W\subset V$ such that $W$ is invariant by $f^m$ and the restriction of $f^m$ to $W$ is strongly irreducible and has no root of unity as a complex eigenvalue. \end{lem} \begin{proof} Let $p\in \Q[T]$ be the minimal polynomial of $f$, and $p=p_1^{r_1}\cdots p_k^{r_k}$ the factorisation of $p$ in $\Q[T]$, where the $p_i$s are irreducible and coprime. Then one of the irreducible factors of $p$, say $p_1$, does not admit any root of unity among its complex roots. The subspace $V'=\ker p_1(f)$ is $f$-invariant and non-zero, and the complex eigenvalues of $f|_{V'}$ are precisely the roots of $p_1$, thus none of them is a root of unity. Among pairs $(W, m)$, where $m\ge 1$ and $W\subset V'$ is a non-zero $f^m$-invariant subspace, choose one where $W$ has minimal dimension. Then by construction $f^m|_W$ is strongly irreducible, and since $f|_{V'}$ does not admit any root of unity as a complex eigenvalue, the same holds for $f^m|_W$. \qedhere \end{proof} The following result, which might be of independent interest, will reduce the proof of Theorem \ref{t-finite-rank-exp} to finitely generated solvable groups of finite rank of a particular form. \begin{prop}\label{prop-non-virt-nilp-subgroup} Let $G$ be a finitely generated solvable group of finite rank which is not virtually nilpotent. Then $G$ admits a finitely generated subgroup $H$ such that: \begin{enumerate}[label=\roman*)] \item $H$ is not virtually nilpotent; \item $H$ splits as a semi-direct product $H=M\rtimes \langle t \rangle$, where $M$ is nilpotent; \item the action of $t$ on $M$ induces a strongly irreducible automorphism of $M^{ab}\otimes \Q$. \end{enumerate} \end{prop} \begin{proof} Set $N=\Fit (G)$, $Q=G/N$ and $\rho\colon G\to \GL(N^{ab}\otimes \Q)$ be the associated representation. Recall from Theorem \ref{thm-preliminaries-prufer} that $N$ is nilpotent and $Q$ is virtually abelian. Suppose for a moment that $N$ is torsion-free. Let $\nk$ be the rational Malcev completion of $N$, so that we can identify $N^{ab}\otimes \Q$ with $\nk^{ab}$ as $\Q Q$-modules (see Lemma \ref{l-malcev-ab}). Since $G$ is not virtually nilpotent, there exists $q\in Q$ such that $\rho(q)$ has an eigenvalue which is not a root of unity (Proposition \ref{p-finite-rank-non-virt-nilp}). Let $g\in G$ be an element that projects to $q$. The automorphism induced by the conjugation action of $g$ on $N$ extends to a Lie algebra automorphism, denoted $g_*\in \aut(\nk)$. Let $\mathcal{E}$ be the set of pairs $(m, \hk)$, where $m>0$ is a integer and $\hk\subset \nk$ is a $g_*^m$-invariant Lie subalgebra with the property that the linear automorphism of $\hk/\llbracket \hk, \hk\rrbracket$ induced by $g_*^m$ has an eigenvalue which is not a root of unity. Note that $\mathcal{E}\neq \varnothing$ as it contains $(1, \nk)$. Choose a pair $(m, \hk)\in \mathcal{E}$ such that $\dim_\Q \hk$ is minimal. We claim that $g_*^m$ induces a strongly irreducible element of $\GL(\hk/\llbracket \hk, \hk\rrbracket)$. By Lemma \ref{l-strongly-irreducible-power} applied to $g_*^m$, there exists $\ell>0$ and a $g_*^{m\ell}$-invariant subspace $V\subset \hk/\llbracket \hk, \hk\rrbracket$ such that the restriction of $g_*^{m\ell}$ to $V$ is strongly irreducible and has no root of unity as an eigenvalue. Then the preimage $\hk'\subset \hk$ of $V$ is a Lie subalgebra $\hk'$ of $\hk$ such that $(\hk', m\ell)\in \mathcal{E}$, so the minimality of $\hk$ implies $\hk'=\hk$. Therefore $V$ is equal to $\hk/\llbracket\hk, \hk\rrbracket$, and $g_*^m$ is strongly irreducible on $\hk/\llbracket \hk, \hk\rrbracket$. We have $\hk = \operatorname{Vect}(\hk \cap N)$ by Proposition \ref{p-isolator}, so there exist $n_1,\ldots,n_k \in N$ that form a basis of the $\Q$-vector space $\hk$. Let $t=g^m$. and $H = \langle n_1,\ldots,n_k, t\rangle = M \rtimes \langle t\rangle$, where $M = \langle t^i n_j t^{-i,} : i \in \Z, j = 1,\ldots,k\rangle $ is contained in $N$. Then $\hk$ is isomorphic to the rational Malcev completion of $M$, so by Lemma \ref{l-malcev-ab} we have $M^{ab}\otimes \Q\simeq \hk/\llbracket \hk, \hk\rrbracket$. By Proposition \ref{p-finite-rank-non-virt-nilp}) the subgroup $H$ is not virtually nilpotent, and hence $H$ satisfies all the conclusions. Now for the general case, we can apply the previous argument to $G / T(N)$, and deduce the existence of a subgroup $H = M \rtimes \langle t \rangle$ of $G / T(N)$ with $M \leq \Fit(G / T(N)) = \Fit (G)/T(N)$, that satisfies the conclusion. If $\tilde{H}$ is a finitely generated subgroup $G$ of the form $\tilde{H} = \tilde{M} \rtimes \langle \tilde{t} \rangle$ with $\tilde{M}/T(N) = M$, then we have $\tilde{M}^{ab} \otimes \Q \simeq M^{ab} \otimes \Q$ since $T(N)$ is a torsion group, and it follows that $\tilde{H}$ satisfies the desired properties. \end{proof} \begin{thm} \label{t-finite-rank-exp} Let $G$ be a finitely generated solvable group of finite rank, and suppose that $G$ is not virtually nilpotent. Then $G$ has a Schreier growth gap $\exp(n)$. \end{thm} \begin{proof} Since the group $G$ is not virtually nilpotent, we can apply Proposition \ref{prop-non-virt-nilp-subgroup}. Let $H=M\rtimes \langle t\rangle$ be a subgroup of $G$ as in the conclusion of Proposition \ref{prop-non-virt-nilp-subgroup}. Theorem \ref{t-strongly-irreducible} applies to $H$, and implies that if $\mathcal{L}$ is a subset of $M$ that projects injectively to $A := M^{ab}/T(M^{ab})$, then $\mathcal{L}$ is a non-foldable subset of $H$. A fortiori $\mathcal{L}$ is a non-foldable subset of $G$. Set $\overline{H}:=A\rtimes \langle t\rangle$. Since $A\otimes \Q=M^{ab}\otimes \Q$ as $\Q[\langle t\rangle]$-modules, Proposition \ref{p-finite-rank-non-virt-nilp} implies that the group $\overline{H}$ remains not virtually nilpotent. Thus $\overline{H}$ has exponential growth. Since $\overline{H}/A\simeq \Z$, it follows that the relative growth of $A$ in $\overline{H}$ is exponential. Choose a finite generating subset $S$ of $H$ and let $\overline{S}$ be its projection to $\overline{H}$. Let $\mathcal{L}\subset M$ be a set theoretic section of $A$ such that for every $a \in A$, the unique lift of $a$ in $\mathcal{L}$ has minimal length with respect to the word metric defined by $S$ among the elements of $M$ that project to $a$. Since the relative growth of $A$ in $\overline{H}$ is exponential, we have $\relvol_{(G,\mathcal{L})}(n) \simeq \exp(n)$. Since $\mathcal{L}$ is non-foldable according to the first paragraph, Lemma \ref{lem-exp-subset-growth} yields the conclusion. \qedhere \end{proof} \section{Confined subgroups and abelian normal subgroups} The goal of this section is to prove Proposition \ref{p-Neumann-abelien}, a key result which provides a tool to study confined subgroups of a group $G$ admitting an abelian normal subgroup $M$. It will be used later to show that under suitable conditions, a confined subgroup of $G$ must contain a large subgroup of $M$. \begin{notation} In all this section we fix once for all the following notation:\begin{itemize} \item $G$ is a group that can be written as an extension $1\rightarrow M \rightarrow G\rightarrow Q\rightarrow 1$, where $M$ is abelian. We denote by $\pi_Q$ the projection from $G$ to $Q$. \item If $H$ is a subgroup of $G$ and $q\in Q$, we set \[M_{q, H}:=\{m\in M \colon (1-q)m\in H\}.\] \end{itemize} \end{notation} Recall that since $M$ is abelian, $M$ is naturally a $\mathbb{Z}Q$-module. As explained in \S \ref{subsec-notation}, in this setting we use additive notation for $M$. \begin{lem} \label{lem-MqH} Let $M,G,Q$ as above. For every subgroup $H$ of $G$ and $q\in Q$, the following hold: \begin{enumerate}[label=\roman*)] \item \label{i-Neumann-contained} $M_{q, H}$ is a subgroup of $M$, and $(1-q)M_{q, H}\le H$. \item \label{i-Neumann-submodule} $M_{q, H}$ is a $\Z K$-submodule of $M$, where $K:=C_{\pi_Q(H)}(q)$. In particular if $q\in \pi_{Q}(H)$, then $M_{q, H}$ is a $\Z \langle q\rangle$-submodule of $M$. \end{enumerate} \end{lem} \begin{proof} \ref{i-Neumann-contained} is clear from the definitions. To prove \ref{i-Neumann-submodule}, set $K=C_{\pi_Q(H)}(q)$. Note that $M \cap H$ is normalized by $H$ and thus it is a $\Z \pi_Q(H)$-submodule of $M$. In particular it is a $\Z K$-submodule of $M$. Thus for $f\in \Z K$ and $m\in M_{q, H}$, since $f$ commutes with $1-q$ and $(1-q)m\in M \cap H$ we have $(1-q) fm=f(1-q)m\in M\cap H$, showing that $fm\in M_{q, H}$. Thus $M_{q, H}$ is a $\Z K$-submodule. If $q\in \pi_Q(H)$, then clearly $q\in K$, so that $M_{q, H}$ is a $\Z \langle q \rangle $-submodule. \end{proof} The proof of the following proposition uses a well-known lemma of B.H.\ Neumann \cite{Neum54}, stating that if a group $G=\cup_{i=1}^r g_iH_i$ is the union of $r$ cosets of subgroups, then at least one of the subgroups $H_i$ has finite index at most $r$. An argument of similar spirit appears in the proof of Proposition 3.8 in \cite{LBMB-sub-dyn}. \begin{prop} \label{p-Neumann-abelien} Let $M,G,Q$ as above. Let $P$ be a finite subset of $G\setminus \{1\}$ , and set $r:=|P|$. Then for every $H\in S_G(P, M)$, there exists $q\in \pi_Q(P) \cap \pi_Q(H)$ such that $M_{q, H}$ is a finite index subgroup of $M$, and $(M : M_{q, H}) \leq r$. \end{prop} \begin{proof} Fix $g\in P$. Since all conjugates of $g$ by elements of $M$ have the same projection to $Q$, if $g$ has at least one such conjugate inside $H$, then $\pi_Q(g)\in \pi_q(H)$. This observation ensures that since $P$ is confining for $(H, M)$, the subset $P_H:=\{g\in P\colon \pi _Q(g)\in \pi_Q(H)\}$ is already confining for $(H, M)$. Now for $g\in P_H$, we write $Y_g=\{m\in M \colon mgm^{-1}\in H\}$. That $P_H$ is confining for $(H, M)$ means that $M=\bigcup_{P_H} Y_g$. Let $L_g$ be the subgroup of $M$ generated by the set of differences by elements in $Y_g$. Then $Y_g$ is contained in a coset of $L_g$ and thus, by Neumann's Lemma, there exists $g_0\in P_H$ such that $L_{g_0}$ has index at most $|P_H| \le r$ in $M$. So if we denote $q=\pi_Q(g_0)$, then in order to terminate the proof it is enough to check that $L_{g_0}\le M_{q, H}$. Let $m, n\in Y_{g_0}$. Since $g_0^{-1}m^{-1}ng_0$ belongs to $M$ and $M$ is abelian, we have the equality \begin{equation} \label{e-a-delta-gamma} m(g_0^{-1}m^{-1}ng_0)n^{-1}=mn^{-1}(g_0^{-1}m^{-1}ng_0)=(1-q)(m-n),\end{equation} where in the first two terms we use multiplicative notation within the group $G$, while in the right-most term we see $M$ as a $\Z Q$-module and use additive notation. Since the first term in \eqref{e-a-delta-gamma} is the product of $mg_0^{-1}m^{-1}$ and $ng_0n^{-1}$, which both belong to $H$, we see that $m-n\in M_{q, H}$ for $m, n\in Y_{g_0}$. Thus $L_{g_0}\le M_{q, H}$, and the proof is complete. \qedhere \end{proof} \section{Metabelian groups} \label{sec-metab} \subsection{Non-foldable subsets in metabelian groups} \label{subsec-metab-non-foldable-tuples} The goal of this part is to prove Theorem \ref{thm-explicit-metab}, which will be our main tool to study growth of actions of metabelian groups. We need some preliminaries. If $Q$ is a finitely generated abelian group and $N$ is a $\Z Q$-module, we denote by $C_Q(N)$ the centralizer of $N$ in $Q$, which is the set of $q \in Q$ such that $qn = n$ for all $n \in N$. By definition we have $C_Q(N) = Q \cap (1 + \ann(N))$ (the intersection is taken in $\Z Q$), where $\ann(N)$ is the annihilator of $N$ in $\Z Q$. In particular $\ann(N) = \left\lbrace 0 \right\rbrace$ implies $C_Q(N) = \left\lbrace 1 \right\rbrace$. \begin{lem} \label{lem-virt-ab-FC} Let $Q = \langle q \rangle$ be a cyclic group, and $M$ a finitely generated $\Z Q$-module. Assume that $q$ centralizes a subgroup $N$ of finite index in $M$. Then there is $n \geq 1$ such that $q^n$ centralizes $M$. \end{lem} \begin{proof} $N$ is finitely generated as a $\Z Q$-module, and $Q$ centralizes $N$, so $N$ is finitely generated as an abelian group. Since $N$ has finite index in $M$, $M$ is also a finitely generated abelian group. The automorphism induced by $q$ on $M/T(M)$ centralizes the finite index subgroup $NT(M)/T(M)$ of $M/T(M)$, and hence is trivial since $M/T(M)$ is a finitely generated free abelian group. So the conclusion holds with $n$ equals to the order of the automorphism induced by $q$ on the finite group $T(M)$. \qedhere \end{proof} \begin{lem} \label{lem-finite-number-submodules} Let $Q$ be a finitely generated group, and $M$ a finitely generated $\Z Q$-module. Fix $r \geq 1$. Let $S_{r}$ be the set of $\Z Q$-submodules $L$ of $M$ such that $L$ has index at most $r$ in $M$. Then $S_{r}$ is finite. \end{lem} \begin{proof} The semi-direct product $G = M \rtimes Q $ is a finitely generated group, and for every $L \in S_{r}$ we have that $L \rtimes Q $ is a subgroup of $G$ of finite index at most $r$. Since $G$ is finitely generated it has only finitely many subgroups of index at most $r$. It follows that $S_{r}$ is finite. \end{proof} \begin{remark} When $N$ is a $\Z Q$-module, the terminology \enquote{$N$ is torsion-free} might be ambiguous, as this might refer to the structure of abelian group or the module structure. In all the article when use this terminology we always mean that $N$ is torsion-free as an abelian group. \end{remark} \begin{lem} \label{lem-trick-non-zero-module} Let $Q$ be an abelian group, and $N$ a finitely generated $\Z Q$-module. Let $q_0 \in Q$ such that at least one of the following conditions hold: \begin{enumerate}[label=\roman*)] \item the abelian group $N$ is torsion-free and $q_0\notin C_Q(N)$; \item $q_0^n\notin C_Q(N)$ for all $n \geq 1$. \end{enumerate} For $r\ge 1$, let $S_{r,q_0}$ be the set of $\Z \langle q_0 \rangle $-submodules $L$ of $N$ such that $L$ has index at most $r$ in $N$, and let $N_{r,q_0} := \bigcap_{L \in S_{r,q_0}} L$. Then $(q_0-1) N_{r, q_0}\neq \left\lbrace 0 \right\rbrace$. \end{lem} \begin{proof} We first consider the case where $N$ is torsion-free. Since $k N \subset L$ for every $L \in S_{r,i}$, where $k = r!$, we have $k N \subset N_{r,i}$. Therefore $(q_0-1) N_{r,i} $ contains $(q_0-1) kN = k (q_0-1) N$, which is non-zero since $(q_0-1) N$ is non-zero and $N$ is torsion-free. So in that case the conclusion holds. We shall now assume that $q_0^n\notin C_Q(N)$ for all $n \geq 1$. We denote by $Q_0$ the subgroup of $Q$ generated by $q_0$. Since $N$ is a finitely generated $\Z Q$-module, there exist $x_1,\ldots,x_k \in N$ such that $N = \Z Q x_1 + \ldots + \Z Q x_k$. For $1 \leq p \leq k$, let $J_p$ be the $\Z Q_0$-submodule generated by $x_p$. We shall first prove that there exists $p$ such that $(q_0^n-1) J_p \neq \left\lbrace 0 \right\rbrace$ for all $n \geq 1$. Suppose that this is not the case, i.e.\ for all $p$ there is $n_p \geq 1$ with $(q_0^{n_p}-1) J_p = \left\lbrace 0 \right\rbrace$. Then we have $(q_0^{n}-1) J_p = \left\lbrace 0 \right\rbrace$ for all $p$, where $n$ is the least common multiple of $(n_1,\ldots,n_k)$. Hence, since $Q$ is abelian, we deduce that we have $(q_0^{n}-1) \Z Q x_p = \Z Q (q_0^{n}-1) x_p = \left\lbrace 0 \right\rbrace$ for all $p$, and hence $(q_0^{n}-1) N = \left\lbrace 0 \right\rbrace$. This contradicts the assumption. So in the sequel we fix $p$ such that $(q_0^n-1) J_p \neq \left\lbrace 0 \right\rbrace$ for all $n \geq 1$. Let $S_{r,q_0}(J_p)$ be the set of $\Z Q_0$-submodules $L$ of $J_p$ such that $L$ has index at most $r$ in $J_p$, and let $N_{r,q_0}(J_p) := \bigcap_{L \in S_{r,q_0}(J_p)} L$. Since any $\Z Q_0$-submodule of $N$ of index at most $r$ in $N$ intersects $J_p$ along a $\Z Q_0$-submodule of $J_p$ of index at most $r$ in $J_p$, it follows that $N_{r,q_0}(J_p) \leq N_{r,q_0}$. Hence to prove the desired result it it is enough to prove that $(q_0-1) N_{r,q_0}(J_p) \neq \left\lbrace 0 \right\rbrace$. Now Lemma \ref{lem-finite-number-submodules} ensures that the set $S_{r,q_0}(J_p)$ is finite, and hence $N_{r,q_0}(J_p)$ has finite index in $J_p$. Therefore Lemma \ref{lem-virt-ab-FC} implies that if $(q_0-1) N_{r,q_0}(J_p) = \left\lbrace 0 \right\rbrace$, then there must exist $n \geq 1$ such that $(q_0^n-1)J_p = \left\lbrace 0 \right\rbrace$. By the definition of $p$ this cannot happen. So $(q_0-1) N_{r,q_0}(J_p) \neq \left\lbrace 0 \right\rbrace$, and the conclusion also holds in that case. \end{proof} We recall the following terminology from module theory. \begin{defin} A $\Z Q$-module $N$ is \textbf{uniform} if for every non-zero submodules $N_1,N_2$ of $N$, the submodule $N_1 \cap N_2$ is non-zero. \end{defin} \begin{prop} \label{prop-construct-expand-metab} Let $G$ be a group that lies in a short exact sequence $1\to M \to G \to Q\to 1$, where $M,Q$ are abelian. Let $N$ be a finitely generated uniform $\Z Q$-submodule of $M$. Let $P=\{g_1,\cdots, g_r\}\subset G$ be a finite subset of $G$, and set $q_i=\pi_Q(g_i)$. Suppose that for every $i=1,\cdots, r$, at least one of the following conditions hold: \begin{enumerate}[label=\roman*)] \item \label{item-mod-ss-tor} the abelian group $N$ is torsion-free and $q_i\notin C_Q(N)$; \item \label{item-mod-inf-cc} $q_i^n\notin C_Q(N)$ for all $n \geq 1$. \end{enumerate} Let $S_{r, i}$ be the set of $\Z \langle q_i\rangle $-submodules $L$ of $N$ of index at most $r$ in $N$, and set $N_{r, i}= \bigcap _{L \in S_{r,i}} L$. Then $J := \bigcap_{i=1}^r (q_i-1)N_{r,i}$ is a non-zero $\Z Q$-submodule of $M$, and $J$ is contained in $H$ for every $H \in S_G(P,N)$. \end{prop} \begin{proof} We observe that when $i$ is fixed, the set $S_{r,i}$ is globally invariant by $Q$ since $Q$ is abelian. Therefore $N_{r,i}$ is a $\Z Q$-submodule of $N$, and hence so is $(q_i-1)N_{r,i}$. By the assumptions we may apply Lemma \ref{lem-trick-non-zero-module}, which ensures that $(q_i-1)N_{r,i}$ is non-zero for each $i$. The module $N$ being uniform by assumption, it follows that $J$ is non-trivial. Now given $H \in S_G(P,N)$, Proposition \ref{p-Neumann-abelien} ensures that there exist $i$ and $L \in S_{r, i}$ such that $(q_i-1)L \le H$. So in particular we have $(q_i-1)N_{r,i} \leq H$, and hence $J \leq H$ for every $H \in S_G(P,N)$. \end{proof} \begin{remark} If $M$ is a non-zero finitely generated $\Z Q$-module, $M$ always admits a (non-zero) uniform submodule. This follows from the existence of associated primes (see \S \ref{s-metabelian-torsion-free}). \end{remark} \begin{defin} Let $\pi : G \to Q$ be a group homomorphism. We say that a subset $\mathcal{L}$ of $G$ is a \textbf{lift} of a subset $\mathcal{L}'$ of $Q$ if for every $q \in \mathcal{L}'$ there is a unique $g \in \mathcal{L}$ such that $\pi(g)=q$. \end{defin} \begin{notation} If $A,B$ are subsets of a group $G$, we write $A \cdot B = \left\lbrace a b \, : \, a \in A, b \in B \right\rbrace$. \end{notation} \begin{thm} \label{thm-explicit-metab} Let $G$ be a finitely generated metabelian group that is an extension $1 \to M \to G \to Q \to 1$, where $M$ is abelian and $Q$ is free abelian, and suppose that there exists a submodule $N$ of $M$ such that $N$ is uniform and $C_Q(N) = \left\lbrace 1 \right\rbrace$. Let $\mathcal{L}$ be a lift of $Q$. Then: \begin{enumerate}[label=\roman*)] \item \label{item-non-foldable-gene} $\mathcal{L}$ is a non-foldable subset of $G$. \item \label{item-non-foldable-tf} If moreover $N$ is torsion-free, then for every non-zero element $m$ of $N$, the subset $\mathcal{L} \cdot \mathcal{Z}$ is a non-foldable subset of $G$, where $\mathcal{Z}$ is the cyclic subgroup generated by $m$. \end{enumerate} \end{thm} \begin{proof} In case where the group $N$ is torsion-free, we fix a non-zero element $m$ of $N$, and denote by $\mathcal{Z}$ the subgroup generated by $m$. In that case we write $\mathcal{J} = \mathcal{L} \cdot \mathcal{Z}$. In case $N$ admits torsion, we set $\mathcal{J} = \mathcal{L}$. We shall prove that $\mathcal{J}$ is always a non-foldable subset of $G$. Fix a finite subset $\Sigma\subset \mathcal{J}$, and let $P=\{g^{-1}h\colon g, h\in \Sigma, g\neq h\}$. We will prove that there is a non-zero $\Z Q$-submodule $L$ of $N$ such that $L$ is contained in $H$ for every $H\in S_G(P, G)$. This is enough to conclude according to Lemma \ref{l-non-foldable-confined}. Note that since $N$ is a subgroup of $G$, we have the inclusion $S_G(P,G) \subset S_G(P,N)$, so it is enough to find a submodule $L$ contained in $H$ for every $H\in S_G(P, N)$. Suppose first that we make no torsion-free assumption for $N$, so that $\mathcal{J} = \mathcal{L}$. Then all the elements of $P$ have a non-trivial projection to $Q$ since $\mathcal{L}$ is a lift of $Q$. Since in addition $N$ is uniform, we are in position to apply Proposition \ref{prop-construct-expand-metab} to the subset $P$ and the $\Z Q$-submodule $N$. Note that $G$ being finitely generated, $M$ is a finitely generated $\Z Q$-module \cite[11.1.1]{Lennox-Rob}. Since $\Z Q$ is a Noetherian ring \cite[Th.\ 1]{Hall}, it follows that $N$ is a finitely generated $\Z Q$-submodule of $M$. Condition \ref{item-mod-inf-cc} of the proposition is satisfied here because by assumption $C_Q(N) = \left\lbrace 1 \right\rbrace$ and $Q$ is free abelian. The conclusion of Proposition \ref{prop-construct-expand-metab} therefore provides a non-zero $\Z Q$-submodule $L$ of $N$ that is contained in $H$ for every $H \in S_G(P,N)$, as desired. We now deal with the case where $N$ is torsion-free. So here $\mathcal{J} = \mathcal{L} \cdot \mathcal{Z}$. Let $P'$ denote the elements of $P$ that have non-trivial projection to $Q$, and $P''$ the complement of $P'$ in $P$. The elements of $P''$ are precisely the elements of $P$ that belong to $\mathcal{Z}$. Since $P''$ is finite, we may find an integer $s$ such that, if we set $m' = m^s$, then every subgroup of $G$ that intersects $P''$ contains $m'$. Since $H \cap P \neq \emptyset$ for all $H \in S_G(P, N)$, in particular for all such $H$ we have the following alternative: $m' \in H$, or $H \cap P' \neq \emptyset$. Let $Y$ be the set of $H\in S_G(P, N)$ such that $m' \notin H$. Since the group $N$ is abelian and $m' \in N$, the subset $Y$ in $N$-invariant. Therefore we deduce that we have $H \in S_G(P',N)$ for every $H\in Y$. As in the previous paragraph we can apply Proposition \ref{prop-construct-expand-metab}, and find a non-zero $\Z Q$-submodule $N_1$ of $N$ which is contained in all $H\in Y$. Let now $Z$ be the set of subgroups $H\in S_G(P, N)$ such that $N_1\not\le H$. By definition we have $Y \cap Z = \emptyset$. Then for every $H\in Z$, we have $m' \in H$ according to the above alternative. Since $N_1$ is a $\Z Q$-submodule, hence a normal subgroup of $G$, the set $Z$ is $G$-invariant and $m'\in H$ for all $H\in Z$, and thus the $\Z Q$-submodule $N_2$ generated by $m'$ satisfies $N_2\le H$ for every $H\in Z$. So every subgroup in $S_G(P, N)$ contains $N_1$ or $N_2$, and thus in all cases contains $N_1 \cap N_2$. Since $N_2$ is non-zero because $m' \neq 0$, we have that $N_1 \cap N_2$ is non-zero since $N$ is uniform. Therefore all subgroups in $S_G(P, N)$ intersect non-trivially, as desired. \end{proof} \begin{cor} \label{cor-explicit-metab} Let $G$ be a finitely generated metabelian group that is an extension $1 \to M \to G \to Q \to 1$, where $M$ is abelian and $Q$ is free abelian of rank $d \geq 1$. Suppose that there exists a submodule $N$ of $M$ such that $N$ is uniform and $C_Q(N) = \left\lbrace 1 \right\rbrace$. Then whenever $\gamma_1,\ldots,\gamma_d \in G$ are lifts of generators of $Q$, $(\gamma_1,\ldots,\gamma_d)$ is non-foldable. If moreover $N$ is torsion-free, then $(m,\gamma_1,\ldots,\gamma_d)$ is non-foldable for every non-zero element $m$ of $N$. \end{cor} \begin{proof} This is a direct consequence of Lemma \ref{lem-mapZk-inj} and Theorem \ref{thm-explicit-metab}. \end{proof} In the case of split extensions Theorem \ref{thm-explicit-metab} also implies the following: \begin{cor}\label{c-metab-asdim} Let $G=M\rtimes Q$ be a finitely generated group, where $M$ is abelian and $Q$ is free abelian of rank $d\ge 1$. Suppose that there exists a $\Z Q$-submodule $N$ of $M$ such that $N$ is uniform and $C_Q(N)=1$. Then for every faithful $G$-set $X$ we have $\asdim(G, X)\ge d$. \end{cor} \begin{proof} Theorem \ref{thm-explicit-metab} implies that $Q$ is a non-foldable subset of $G$. Thus the conclusion follows from Proposition \ref{p-asdim-non-foldable}, since $\asdim(Q)=d$. \end{proof} \subsection{First applications} \label{s-metabelian-first-examples}We now proceed to apply Theorem \ref{thm-explicit-metab} to some explicit families of finitely generated metabelian groups. We will appeal to the following trivial lemma. \begin{lem} \label{lem-prime-SI} Let $Q$ be a finitely generated abelian group, and let $\p$ be a prime ideal of $\Z Q$. Consider the $\Z Q$-module $N = \Z Q / \p$. Then the following hold:\begin{enumerate}[label=\roman*)] \item $C_Q(N) = Q \cap (1 + \p)$. \item $N$ is uniform. \item the abelian group $N$ is torsion-free if and only if $\p \cap \Z = \left\lbrace 0\right\rbrace $. \end{enumerate} \end{lem} \begin{proof} The annihilator of $N$ is $\p$, so the first statement is clear. The submodules of $N$ are the ideals of the ring $\Z Q / \p$. If $I,J$ are non-zero ideals, then $I J \leq I \cap J$ and $IJ$ is non-zero since $\Z Q / \p$ is a domain. So $N$ is uniform. The last statement is also clear. \end{proof} \subsubsection{Wreath products} \begin{thm} \label{thm-wreath} Consider the wreath product $G=A\wr \Z^d$, where $A$ is a non-trivial finitely generated abelian group. Then $\Z^d$ is a non-foldable subset of $G$, and if $t\in A$ is an element of infinite order then $\Z^d \cdot \langle t\rangle$ is a non-foldable subset of $G$. In particular: \begin{enumerate}[label=\roman*)] \item \label{i-wreath-i} If $A$ is finite, then $G$ has a Schreier growth gap $n^d$. \item \label{i-wreath-ii} If $A$ is infinite, then $G$ has a Schreier growth gap $n^{d+1}$. \item \label{i-wreath-asdim} every faithful $G$-set $X$ satisfies $\asdim(G, X)\ge d$. \end{enumerate} Moreover these bounds are sharp. \end{thm} \begin{proof} Set $M=\oplus_{\Z^d} A$ and $Q=\Z^d$. Assume first that $A$ is finite, and choose an element $s\in A$ of prime order $p$. The $\Z Q$-submodule $N$ generated by $S$ is $N = \oplus_{\Z^d} C_p \simeq \Z Q / (p)$. The module $N$ is indeed uniform by Lemma \ref{lem-prime-SI}, and clearly $C_Q(N)=\{1\}$. Thus we may apply Theorem \ref{thm-explicit-metab} conclusion of the theorem says that $\mathcal{L} = \Z^d$ is a non-foldable subset of $G$. The bound $\vol_{G, X}(n) \succcurlyeq n^d$ follows from Lemma \ref{lem-exp-subset-growth} since $\relvol_{(G, \mathcal{L}) }(n) \simeq n^{d}$. This proves part \eqref{i-wreath-i}. Now assume that $A$ is infinite and choose an element $s\in A$ of infinite order. Then the module generated by $s$ is $N=\oplus_Q\Z$, a free $\Z Q$-module of rank 1. It also follows from Theorem \ref{thm-explicit-metab} that the subset $\mathcal{L}=\Z^d \cdot \langle s \rangle$ is non-foldable (in particular, so is the subset $\Z^d$), and the claim on the growth follows again from Lemma \ref{lem-exp-subset-growth} since $\relvol_{(G, \mathcal{L})}(n)\simeq n^{d+1}$. The claim on asymptotic dimension follows similarly from Proposition \ref{p-asdim-non-foldable}, since $\asdim(\Z^d)=d$. Finally we justify that these bounds are sharp. If the group $A$ is finite, then the standard wreath product action of $G$ on $X=\Z^d\times A$ satisfies $\vol_{G, X}(n)\simeq n^d$ (\S \ref{s-wreath-actions}). If $A$ is infinite, then we may choose a faithful $A$-set $Y$ with $\vol_{A, Y}(n)\simeq n$ (see Proposition \ref{prop-virtually-abelian}). Then the action of $G$ on $X = B \times Y$, given by $((a_b),b_1) \cdot (b_2,y) = (b_1+b_2, a_{b_1b_2}y)$ is faithful, and $\vol_{G, X}(n)\simeq n^{d+1}$. Moreover in both cases the asymptotic dimension of the resulting action is $\asdim(G, X)=d$. \qedhere \end{proof} \begin{remark} Theorem \ref{thm-wreath} implies an analogous result when $A$ is an arbitrary finitely generated group, because any non-trivial wreath product $G=A\wr \Z^d$ contains a subgroup isomorphic to either $C_p \wr \Z^d$ or $\Z \wr \Z^d$. However in this case the lower bounds obtained on the growth might not be sharp. \end{remark} \subsubsection{Baumslag finitely presented groups} Here we consider the extended family of Baumslag groups $\Lambda_{p, d}$ from \S \ref{subsec-Baumslag}, where $p$ is a prime and $d\ge 1$. Recall that $\Lambda_{p, d}=R_{p, d}\rtimes Q$, where $Q=\Z^{2d}$ and $R_{p, d}:=\mathbb{F}_p[T_1,\ldots, T_d, T_1^{-1}, \ldots, T_d^{-1}, (1+T_1)^{1},\ldots (1+T_d)^{-1}]$. If we denote $t_1,\ldots, t_d, s_1,\ldots, s_d$ a basis of $\Z^{2d}$, then $t_i$ and $s_i$ act on $R_{p, d}$ respectively by multiplication by $T_i$ and $T_i+1$. The ring $R_{p, d}$ is quotient of the ring $\Z Q\simeq \Z[T_1^{\pm 1},\cdots, T_{2d}^{\pm 1}]$ by the ideal generated by $p$ and by the polynomials $T_i-1 - T_{d+i}$ for $i=1,\ldots, d$, which is a prime ideal. Hence $R_{p, d}$ is a uniform $\Z Q$-module. Since moreover $C_Q(R_{p, d})=\{1\}$, all the assumption of Theorem \ref{thm-explicit-metab} are satisfied. We obtain: \begin{thm} For every prime $p$ and every $d\ge 1$, $\Z^{2d} $ is a non-foldable subset of $G = \Lambda_{p, d}$. In particular $G$ has a Schreier growth gap $n^{2d}$. \end{thm} We note that this lower bound is sharp because $\Lambda_{p, d}$ admits a faithful transitive $G$-set $X$ with $\vol_{G, X}(n) \simeq n^{2d}$ (Proposition \ref{prop-growth-ol}). \subsubsection{Free metabelian groups} Let $d \geq 2$, and let $\mathbb{FM}_d$ be the fee metabelian group of rank $d$, i.e.\ the quotient of the free group $F_d$ by its second derived subgroup. If $x_1,\ldots,x_d$ are free generators of $F_d$, then for simplicity we still denote $x_1,\ldots,x_d$ their images in $\mathbb{FM}_d$. The group $Q = \mathbb{FM}_d / \mathbb{FM}_d'$ is free abelian of rank $d$. \begin{thm} \label{thm-freemetab-growth} Let $G = \mathbb{FM}_d$. For every non-trivial $m \in G'$, the $(d+1)$-tuple $(m,x_1,\ldots,x_d)$ is non-foldable. In particular $G$ has a Schreier growth gap $n^{d+1}$. Moreover this bound is sharp. \end{thm} \begin{proof} Recall that the Magnus embedding is an injective homomorphism from $\mathbb{FM}_d$ to the wreath product $\mathbb{Z}^d \wr Q $ \cite{Magnus-embed}. From this it is easy to see that every non-trivial $m \in \mathbb{FM}_d'$ has trivial annihilator in $\Z Q$, so that the $\Z Q$-module $N$ generated by $m$ is free of rank one. In particular $C_Q(N) = \left\lbrace 1 \right\rbrace$ and $N$ is uniform. Therefore Corollary \ref{cor-explicit-metab} applies and yields the conclusion. The only thing that remains to be justified is the last claim. Since $\Z^d \wr \Z^d$ is isomorphic to a (finite index) subgroup of $ \Z \wr \Z^d$, the Magnus embedding implies in particular that $G = \mathbb{FM}_d$ embeds in $\Z \wr \Z^d$. Since the standard wreath product action of $ \Z \wr \Z^d$ has growth $n^{d+1}$, by restricting to $G$ we obtain a faithful $G$-set $X$ with $\vol_{G, X}(n) \simeq n^{d+1}$. So the lower bound $n^{d+1}$ from the statement is sharp. \end{proof} \subsection{Growth of actions and Krull dimension} \label{s-Krull} We recall the following definitions. \begin{defin} Let $A$ be a commutative ring with unit. The Krull dimension of $A$, written $\dim(A)$, is the supremum of the lengths of all chains of prime ideals of $A$, where the length of the chain $\p_0 \subsetneq \cdots \subsetneq \p_{n}$ is $n$. The Krull dimension of a non-zero module $M$ over $A$ is defined as $\dim(M) = \dim(A/\ann(M))$. \end{defin} We will need the following proposition proven by Jacoboni \cite[Prop.\ 4.2]{Lison}. \begin{prop} \label{prop-lison} Let $Q$ be a finitely generated free abelian group, and $M$ a finitely generated $\Z Q$-module of Krull dimension $k$. Then one can find a subgroup $Q_0 \leq Q$ and $m \in M$ such that at least one of the following holds: \begin{enumerate}[label=\roman*)] \item $Q_0$ has rank $k-1$ and the $\Z Q_0$-module generated by $m$ is a free module; \item $Q_0$ has rank $k$ and the $\Z Q_0$-module generated by $m$ is isomorphic to $\mathbb{F}_p Q_0$ for some prime number $p$. \end{enumerate} \end{prop} For a proof of the following, see \cite[\S 2.2.3]{Lison}. \begin{prop-def} Let $G$ be a finitely generated metabelian group, and suppose $G$ is not virtually abelian. If $1 \to M \to G \to Q \to 1$ is a short exact sequence of groups with $M,Q$ abelian, then the Krull dimension of $M$ as a $\Z Q$-module is a positive integer that does not depend on the choice of $M,Q$. This integer is the \textbf{Krull dimension} of $G$. \end{prop-def} \begin{thm} \label{t-krull} Let $G$ be a finitely generated metabelian group, and suppose that $G$ is not virtually abelian. If $G$ has Krull dimension $k$, then there exists a non-foldable $k$-tuple $(g_1,\ldots,g_k)$ in $G$. In particular $G$ has a Schreier growth gap $n^{k}$. \end{thm} \begin{proof} Note that if $k=1$ then there is nothing to prove because every element of $G$ of infinite order satisfies the conclusion. So we assume $k \geq 2$. Write $M=G'$ and $Q=G/G'$, and apply Proposition \ref{prop-lison}. Let $Q_0$ and $m$ as in the conclusion of the proposition, and let $N$ be the $\Z Q_0$-submodule generated by $m$. We choose lifts $\gamma_1,\ldots,\gamma_d \in G$ of generators of $Q_0$, where $d \geq 1$ is the rank of $Q_0$, and we denote by $G_0$ the subgroup of $G$ generated by $m$ and $\gamma_1,\ldots,\gamma_d$. Then we have a short exact sequence $1 \to N \to G_0 \to Q_0 \to 1$. Since the module $N$ is uniform and satisfies $C_{Q_0}(N) = \left\lbrace 1 \right\rbrace$ since $N$ is either a free module or isomorphic to $\mathbb{F}_p Q_0$ for some prime number $p$, we can apply Corollary \ref{cor-explicit-metab}. In case $N \simeq \mathbb{F}_p Q_0$ we have $d=k$ and by the corollary $(\gamma_1,\ldots,\gamma_k)$ is non-foldable; and in case $N$ is free we have $d=k-1$ and again by the corollary $(m,\gamma_1,\ldots,\gamma_{k-1})$ is non-foldable. So in both cases we have found a non-foldable $k$-tuple, and hence the proof is complete. \end{proof} \subsection{Torsion-free metabelian groups} \label{s-metabelian-torsion-free} In the sequel $A$ is a commutative ring with unit. The radical of an ideal $I$ of $A$ is denoted $\rad(I)$. Let $M$ be a module over $A$. Given $x \in M$, the annihilator of $x$ is denote $\ann(x)$. The annihilator of $M$ is denoted $\ann(M)$. A prime ideal $\p$ of $A$ is \textbf{associated} with $M$ if there exists $x \in M$ such that $\p = \ann(x)$. We denote by $\ass(M)$ the set of associated prime ideals of $M$. We will use the following basic facts, a proof of which can be found for instance in \cite{Bourbaki-alg-comm-3-4}: \begin{enumerate} \item If $A$ is Noetherian and $M$ is non-zero, then $\ass(M)$ is not empty. \item \label{item-ass-ext} If $N$ is a submodule of a module $M$, every associated prime of $N$ is associated with $M$, and every associated prime of $M$ is associated with $N$ or $M/N$. \item \label{item-radical} If $A$ is Noetherian and $M$ is a Noetherian $A$-module, then $\rad(\ann(M)) = \bigcap_{\p \in \ass(M)} \p$. \item \label{item-series} If $A$ is Noetherian and $M$ is a Noetherian $A$-module, then there exists a series of submodules $0 = M_0 \subsetneq M_1 \subsetneq \ldots \subsetneq M_n = M$ such that each $M_{i+1}/M_i$ is isomorphic to $A / \p_{i+1}$ for some prime ideal $\p_{i+1}$, and $\ass(M) \subseteq \left\lbrace \p_{1}, \ldots, \p_{n} \right\rbrace $. \end{enumerate} \begin{lem} \label{lem-associatedprime} Let $Q$ be a finitely generated abelian group. Let $\P$ be a property of $\Z Q$-modules such that the zero module has $\P$, and $\P$ is stable under taking finite direct product of modules, submodules, quotient modules, and extensions. Let $M$ be a finitely generated $\Z Q$-module that does not have $\P$. Then $M$ admits a submodule $N$ that is isomorphic to $\Z Q / \p$ for some prime ideal $\p$ of $\Z Q$ such that $N$ does not have $\P$ either. \end{lem} \begin{proof} Consider a series $0 = M_0 \subsetneq M_1 \subsetneq \ldots \subsetneq M_n = M$ as in (\ref{item-series}) above. So $M_{i+1}/M_i$ is isomorphic to $\Z Q / \p_{i+1}$, and $\ass(M) \subseteq \left\lbrace \p_{1}, \ldots, \p_{n} \right\rbrace $. Let $i$ be the least integer such that $M_i$ does not have $\P$. By the assumption on $M$ such an integer exists, and we have $ 1 \leq i \leq n$. Since $M_i$ admits $\Z Q / \p_{i}$ as a quotient module, we have $\ann(M_i) \subset \p_i$. hence $\rad(\ann(M_i))$ is contained in $\p_i$. We claim that $\p_i \in \ass(M_i)$. Suppose for a contradiction that this is not the case. Then $M_{i-1}$ and $M_i$ have the same associated primes (because every prime associated with $M_i$ but not with $M_{i-1}$ would be associated with $M_{i} / M_{i-1}$ by (\ref{item-ass-ext}), and $M_{i} / M_{i-1}$ has $\p_{i}$ as only associated prime since $M_{i} / M_{i-1}$ is isomorphic to $\Z Q / \p_{i}$). So by property (\ref{item-radical}) we deduce that $\rad(\ann(M_{i-1})) \subset \p_i$, and in particular $I := \bigcap_{j=1}^{i-1} \p_i \subset \p_i$. Now the module $\Z Q/I$ embeds in $\prod_{j=1}^{i-1} \Z Q/\p_j$. By the definition of the integer $i$, each module $\Z Q/\p_j$ has $\P$, and since $\P$ is stable under taking products and submodules, $\Z Q/I$ also has $\P$. Therefore, being a quotient of $\Z Q/I$, the module $\Z Q/\p_i$ also has $\P$, and finally so does $M_i$ as an extension of two modules $M_{i-1}$ and $\Z Q/\p_i$ that have $\P$. This is a contradiction. So we indeed have $\p_i \in \ass(M_i)$, and the statement holds with $N = \Z Q x$, where $x \in M_i$ is such that $\ann(x) = \p_i$. \end{proof} \begin{lem} \label{prop-metab-fp} Let $G$ be a finitely generated metabelian group, and assume that $G$ is not polycyclic. Choose abelian groups $M,Q$ such that $G$ is an extension $1 \to M \to G \to Q \to 1$. Then there exists a submodule $N$ of $M$ whose underlying abelian group is infinitely generated, and such that $N$ is isomorphic to $\Z Q / \p$ for some prime ideal $\p$ of $\Z Q$. \end{lem} \begin{proof} $M$ is a finitely generated $\Z Q$-module, and the assumption that $G$ is not polycyclic is equivalent to saying that $M$ is infinitely generated as an abelian group. The statement follows by applying Lemma \ref{lem-associatedprime} with the property $\P$ equals \enquote{$M$ is finitely generated as an abelian group}. \end{proof} \begin{thm} \label{thm-metab-notorsion} Let $G$ be a finitely generated torsion-free metabelian group, and suppose that $G$ is not virtually abelian. Then there exists a pair $(g_1,g_2)$ that is non-foldable. \end{thm} \begin{proof} In case where $G$ is polycyclic, Proposition \ref{prop-expand-subgroup-poly} provides a subgroup $H$ of $G$ isomorphic either to $H_3(\Z)$ or $\Z^k \rtimes \Z$ with $k \geq 2$, such that $H$ is a non-foldable subset of $G$. It immediately follows that if $g_1,g_2$ are two non-trivial elements of $H$ such that the subgroups generated by $g_1$ and $g_2$ intersect trivially, then the pair $(g_1,g_2)$ is non-foldable. Now suppose that $G$ is not polycyclic. Choose abelian groups $M,Q$ such that $G$ lies in an extension $1 \to M \to G \to Q \to 1$, and choose $N$ as in Lemma \ref{prop-metab-fp}. So $N$ is isomorphic to $\Z Q / \p$ for some prime ideal $\p$ of $\Z Q$, and $N$ is infinitely generated as an abelian group. This last property ensures that the rank $d$ of $Q/ C_Q(N)$ is at least one. Let $Q_1$ be a free abelian subgroup of rank $d$ of $Q$ such that $Q_1$ intersects $C_Q(N)$ trivially. If $m \in N$ is a generator of $N$ as a $\Z Q$-module, that $Q_1$ intersects $C_Q(N)$ trivially is equivalent to saying that no non-trivial element of $Q_1$ centralizes $m$. Let $g_1,\ldots,g_d \in G$ be lifts of generators of $Q_1$, and let $G_1$ be the subgroup of $G$ generated by $m$ and $g_1,\ldots,g_d $. The group $G_1$ lies in a short exact sequence $1 \to L \to G_1 \to Q_1 \to 1$, and we denote by $N_1 \subseteq L$ the $\Z Q_1$-module generated by $m$. The annihilator $\p_1$ of $m$ in $\Z Q_1$ equals $\p \cap \Z Q_1$, and hence is a prime ideal of $\Z Q_1$. Moreover $Q_1$ acts faithfully on $N_1$ because no non-trivial element of $Q_1$ centralizes $m$. So the $\Z Q_1$-module $N_1$ satisfies $C_{Q_1}(N_1) = \left\lbrace 1 \right\rbrace$ and $N_1$ is uniform by Lemma \ref{lem-prime-SI}. Therefore we may apply Corollary \ref{cor-explicit-metab} to the group $G_1$. Since $N$ is torsion-free, the conclusion says that $(m,g_1,\ldots,g_d)$ is non-foldable in $G_1$. A fortiori $(m,g_1,\ldots,g_d)$ is also non-foldable in $G$. In particular the pair $(m,g_i)$ is non-foldable for all $i$, and we have proved the statement. \end{proof} \subsection{Finitely presented metabelian groups} In this section we show how our results from \S \ref{subsec-metab-non-foldable-tuples}, combined with results of Bieri and Strebel, provide the existence of expansive pairs in finitely presented metabelian groups. Let $Q$ be a finitely generated abelian group, and $M$ a finitely generated $\Z Q$-module. Following Bieri--Strebel \cite{BS78,BS80}, for $v \in \mathrm{Hom}(Q,\R)$ we denote by $Q_v$ the set of $q \in Q$ such that $v(q) \geq 0$, and by $\Sigma_M$ the set of $v$ such that $M$ is a finitely generated $\Z Q_v$-module. The module $M$ is called \textbf{tame} if $\mathrm{Hom}(Q,\R) = \Sigma_M \cup - \Sigma_M$. The main result of \cite{BS80} states that if $G$ is a metabelian group and $1\to M\to G\to Q\to 1$ is a short exact sequence where $M, Q$ are abelian, then $G$ is finitely presented if and only if $M$ is a tame $\Z Q$-module. We record the following properties, proven in \cite[Proposition 2.5]{BS80}. \begin{enumerate} \item \label{BS1} A submodule of a tame $\Z Q$-module is tame. \item \label{BS2} If $G$ is a tame $\Z Q$-module and $R\le Q$ is a finite index subgroup of $Q$, then $M$ is a tame $\Z R$-module. \end{enumerate} \begin{thm} \label{thm-metab-presfin} Let $G$ be a finitely presented metabelian group that is not virtually abelian. Then there exists a pair $(g_1,g_2)$ that is non-foldable. \end{thm} \begin{proof} The polycyclic case has already been treated in Theorem \ref{thm-metab-notorsion}, so in the sequel we assume that $G$ is not polycyclic. Choose abelian groups $M,Q$ such that $G$ is an extension $1 \to M \to G \to Q \to 1$, and choose $N$ as in Proposition \ref{prop-metab-fp}. Since the group $G$ is finitely presented, $M$ is a tame $\Z Q$-module according to \cite{BS80}. Therefore $N$ is also tame by\eqref{BS1} above. In the sequel we write $\overline{Q} = Q/ C_Q(N)$, which is an infinite abelian group. Note $N$ is naturally a tame $\Z \overline{Q}$-module. We first consider the case when $\overline{Q}$ is virtually cyclic. We choose an infinite cyclic subgroup $Q_1 \leq Q$ of finite index such that no non-trivial element of $Q_1$ centralizes $N$. By \eqref{BS2} above, $N$ is a finitely generated tame $\Z Q_1 $-module. Again by the main result of \cite{BS80} the subgroup $G_1:=N\rtimes Q_1$ is finitely presented. According to Theorem A in \cite{BS78} this implies that $G_1$ splits as an HNN-extension over some finitely generated subgroup of $N$. Since $G_1$ has no free subgroups, this HNN-extension is necessarily ascending. In this situation by Proposition 3.3 in \cite{BS78} the torsion subgroup of $N$ is finite, and hence trivial here because $N$ is isomorphic to $\Z Q / \p$ for some prime ideal $\p$. Therefore in this situation the group $G_1 $ is torsion-free. Since $G_1$ is not virtually abelian, $G_1$ falls under the scope of Theorem \ref{thm-metab-notorsion}, and the existence of a non-foldable pair in is thus guaranteed. Now we consider the case where the torsion-free rank $d$ of $\overline{Q}$ is at least $2$. As in the proof of Theorem \ref{thm-metab-notorsion}, we choose a torsion-free subgroup $Q_1$ inside $Q$ of rank $d$ such that no non-trivial element of $Q_1$ centralizes $N$. We choose lifts $g_1,\ldots,g_d \in G$ of generators of $Q_1$ to $G$; and consider the subgroup $G_1$ of $G$ generated by $m$ and $g_1,\ldots,g_d $, where $m$ is a generator of $N$ as a $\Z Q$-module. Repeating the argument from the end of the proof of Theorem \ref{thm-metab-notorsion}, we apply Corollary \ref{cor-explicit-metab} to $G_1$, and deduce that $(g_1,\ldots,g_d)$ is non-foldable. This completes the proof. \end{proof} \begin{cor} \label{cor-metab-presfinie-growth} Let $G$ be a finitely presented metabelian group that is not virtually abelian. Then $G$ has a Schreier growth gap $n^2$. If moreover $G$ is torsion-free, then $G$ has a Schreier growth gap $n^3$. \end{cor} \begin{proof} The first statement directly follows from Theorem \ref{thm-metab-presfin}. In order to prove the second statement, we assume that $G$ is torsion-free and follow the proof of Theorem \ref{thm-metab-presfin}. In the polycyclic case, any faithful $G$-set verifies $\vol_{G, X}(n) \succcurlyeq n^4$ according to Proposition \ref{prop-n4-growth} and Corollary \ref{cor-poly-growth}. Suppose now $G$ is not polycyclic. We retain the notation $N,Q,m, \ldots$ as above. In case $\overline{Q}$ is virtually cyclic, as in the previous proof we find a cyclic subgroup $Q_1$ of $Q$ such that $G_1 = N \rtimes Q_1 $ is a finitely presented subgroup of $G$ which splits as an HNN-extension over a torsion-free finitely generated subgroup of $N$. It follows that $N$ is an ascending union of finitely generated torsion-free abelian groups whose rank is bounded, and that $G_1$ is a group of finite rank. Hence in that case the conclusion is provided by Theorem \ref{t-finite-rank-exp}. In case $\overline{Q}$ has torsion-free rank $d \geq 2$, we choose $g_1,\ldots,g_d \in G$ whose projections generate a free abelian subgroup $Q_1$ of $Q$ in which $N$ has trivial centralizer, and we obtain from Corollary \ref{cor-explicit-metab} that $(m,g_1,\ldots,g_d)$ is non-foldable. \end{proof} \begin{remark} When $G$ is finitely presented and torsion-free (and not virtually abelian), although faithful $G$-actions have growth $\succcurlyeq n^3$ by Corollary \ref{cor-metab-presfinie-growth}, $G$ does not always admit a non-foldable triple $(g_1,g_2,g_3)$. For example for the Baumslag-Solitar groups $\mathrm{BS}(1,n) \simeq \Z [1/n] \rtimes \Z$, $n\geq 2$, it is not hard to see that for every triple $(g_1,g_2,g_3)$, the map $\Z^3 \to \mathrm{BS}(1,n), \, \, (n_1, n_2, n_3 ) \mapsto g_3^{n_3} g_2^{n_2} g_1^{n_1}$, is non-injective. In particular no triple can be non-foldable (Lemma \ref{lem-mapZk-inj}). \end{remark} \section{Torsion-free solvable groups} \label{sec-torsionfree} Based on the results from the previous sections, we make the following conjecture: \begin{conjecturebis} \label{conj-torsion-free} Let $G$ be a finitely generated solvable group which is virtually torsion-free, and not virtually abelian. Then $G$ has a Schreier growth gap $n^2$. \end{conjecturebis} The purpose of this section is to establish the following results: \begin{itemize} \item Conjecture \ref{conj-torsion-free} is true if $G$ admits a nilpotent normal subgroup $N$ such that $G/N$ is virtually abelian. This includes in particular solvable linear groups. \item Conjecture \ref{conj-torsion-free} is true under a strengthening of the torsion-free assumption on $G$, see Theorem \ref{thm-fitting-torsionfree}. \item Conjecture \ref{conj-torsion-free} is true if we restrict to actions with finitely many orbits, see Theorem \ref{t-torsion-free-quasitransitive}. \end{itemize} \subsection{Nilpotent-by-abelian groups} First recall that we have proven in Theorem \ref{thm-metab-notorsion} that the above conjecture is true when $G$ is a metabelian group. This has the following straightforward consequence: \begin{cor} \label{c-metabelian-subgroup} Let $G$ be a torsion-free solvable group, and assume that $G$ admits a finitely generated metabelian subgroup which is not virtually abelian. Then $G$ contains an expansive pair. In particular Conjecture \ref{conj-torsion-free} is true for $G$. \end{cor} We recall the following: \begin{lem} \label{lem-nilp-virtab-tf} Let $N$ be a torsion-free nilpotent group. If $N$ is virtually abelian, then $N$ is abelian. \end{lem} \begin{proof} Let $g,h \in N$. Since $N$ is virtually abelian, there are $m,n \geq 1$ such that $g^m$ and $h^n$ commute. Then $g^m h g^{-m}$ and $h$ have the same $n$-th power, and hence $g^m h g^{-m} = h$ since $N$ is torsion-free \cite[2.1.2]{Lennox-Rob}. So $h^{-1} g h$ and $g$ have the same $m$-th power, and again $h^{-1} g h= g$. \end{proof} Recall that a group is nilpotent-by-abelian if $G$ admits a nilpotent normal subgroup $N$ such that $G/N$ is abelian. Corollary \ref{c-metabelian-subgroup} readily implies the following. \begin{thm} \label{t-torsion-free-nilp-by-abelian} Let $G$ be a finitely generated torsion-free group that is nilpotent-by-abelian, and not virtually abelian. Then $G$ contains a non-foldable pair. In particular Conjecture \ref{conj-torsion-free} is true for $G$. \end{thm} \begin{proof} By Corollary \ref{c-metabelian-subgroup} it is enough to show that $G$ contains a finitely generated subgroup $H$ which is metabelian and not virtually abelian. Let $N$ be a nilpotent normal subgroup of $G$ such that $G/N$ is abelian. If $N$ is not abelian, then $N$ is not virtually abelian by Lemma \ref{lem-nilp-virtab-tf}, and we can choose $H$ to be a copy of the Heisenberg group in $N$ (Lemma \ref{l-subgroup-H3}). If $N$ is abelian, then $G$ is metabelian, and we take $H=G$. \qedhere \end{proof} Since solvable linear groups are virtually nilpotent-by-abelian by a theorem of Malcev \cite[3.1.8]{Lennox-Rob}, Theorem \ref{t-torsion-free-nilp-by-abelian} implies: \begin{cor} \label{cor-linear-case-quadratic} Let $G$ be a finitely generated solvable linear group which is virtually torsion-free. If $G$ is not virtually abelian, then $G$ contains a non-foldable pair. In particular Conjecture \ref{conj-torsion-free} is true for $G$. \end{cor} \begin{remark} The above results are entirely based on the case of metabelian groups, proven in the previous section. However in general the reduction to metabelian subgroups is not enough to prove Conjecture \ref{conj-torsion-free}. As we show in a separate work in preparation, there exist finitely generated torsion-free solvable groups that are not virtually abelian and with the property that all finitely generated metabelian subgroups are virtually abelian. \end{remark} \subsection{Lifting non-foldable subsets} \label{subsec-lift-exp} The goal of this paragraph is to prove Proposition \ref{prop-lift-nice-subsets}. This result enables, under suitable conditions, to lift a non-foldable subset from a quotient to a non-foldable subset of the ambient group. For solvable groups, this mechanism can be applied inductively, and will be used in \S \ref{subsec-fittingseries}. We fix the following notation: $G$ is a group that is an extension $1 \to A \to G \to Q \to 1$, where $A$ is abelian and torsion-free. For $r \geq 1$, we denote by $A_r$ the intersection of all subgroups of $A$ of index at most $r$. The subgroup $A_r$ contains $(r!)$-powers of all elements of $A$, and hence is non-trivial. Note also that $A_r$ is a characteristic subgroups of $A$. In particular $A_r$ is $Q$-invariant. \begin{lem} \label{lem-QHr} Retain the above notation. \begin{enumerate}[label=\roman*)] \item If $H$ is a subgroup of $G$, then \[Q_{H,r} = \left\lbrace q \in Q : (q-1) A_r \leq H \right\rbrace \] is a subgroup of $Q$. \item The map $\sub(G) \to \sub(Q)$, $H \mapsto Q_{H,r}$, is equivariant, where $G$ acts on $\sub(Q)$ by conjugation via the projection $G \to Q$. \end{enumerate} \end{lem} \begin{proof} The fact that $Q_{H,r}$ is a subgroup follows from the fact that $A_r$ is $Q$-invariant and the equality \[ (q_1 q_2^{-1} - 1) a = (q_1 - 1) q_2^{-1} a - (q_2 - 1) q_2^{-1} a, \] which holds for all $q_1, q_2 \in Q$ and $a \in A$. To see that $H \mapsto Q_{H,r}$ is equivariant, fix $g \in G$ and take $q \in Q_{gHg^{-1},r}$. Using module notation, that $(q-1) A_r$ lies in $gHg^{-1}$ can be rewritten as $\pi(g)^{-1} (q-1) A_r \leq H$. Now we have \[ \pi(g)^{-1} (q-1) A_r = (\pi(g)^{-1} q \pi(g)-1) A_r \] since $A_r$ is $Q$-invariant, so we deduce $\pi(g)^{-1} q \pi(g) \in Q_{H,r}$, or equivalently $q \in \pi(g) Q_{H,r} \pi(g)^{-1}$. This shows $Q_{gHg^{-1},r} \leq \pi(g) Q_{H,r} \pi(g)^{-1}$, and the same argument also shows that equality holds. \end{proof} \begin{prop} \label{prop-lift-confine} Retain the notation from Lemma \ref{lem-QHr}. Suppose that the group $G$ is an extension $1 \to A \to G \to Q \to 1$, where $A$ is abelian, torsion-free, and such that $A = C_G(A)$. Suppose $P$ is a finite subset of $Q \setminus \left\lbrace 1\right\rbrace $, and $P_G$ is a finite subset of $G$ of cardinality $r$ such that $\pi(P_G) = P$, where $\pi$ is the projection $G \to Q$. Then: \begin{enumerate}[label=\roman*)] \item $Q_{H,r} \in S_Q(P,Q)$ for all $H \in S_G(P_G,G)$. \item If \[ \bigcap_{L \in S_Q(P,Q)} \! \! \! \! \! L \neq \left\lbrace 1 \right\rbrace, \] then \[ \bigcap_{H \in S_G(P_G,G)} \! \! \! \! \! H \neq \left\lbrace 1 \right\rbrace . \] \end{enumerate} \end{prop} \begin{proof} By Proposition \ref{p-Neumann-abelien}, for every $H$ in $S_G(P_G,G)$ there exists $g \in P_G$ such that $(\pi(g)-1)A_r \leq H$. This means that $\pi(g) \in Q_{H,r}$. So we have $Q_{H,r} \cap P \neq \emptyset$ for all $H \in S_G(P_G,G)$. Since $H \mapsto Q_{H,r}$ is equivariant, this implies that $Q_{H,r} \in S_Q(P,Q)$ for all $H \in S_G(P_G,G)$. This proves the first statement. For the second, the assumption implies that there exists a non-trivial element $q$ such that $q \in Q_{H,r}$ for all $H \in S_G(P_G,G)$. This means that $(q-1)A_r \leq H$ for all $H \in S_G(P_G,G)$. Since $q$ is non-trivial, $(q-1)A$ is non-zero according to the assumption that $A = C_G(A)$. Since $A$ has no torsion by assumption, $(q-1)A_r$ is also non-trivial. Since $(q-1)A_r$ is contained in $\bigcap_{S_G(P_G,G)} H$, we obtain the conclusion. \end{proof} \begin{prop} \label{prop-lift-nice-subsets} Suppose that the group $G$ is an extension $1 \to A \to G \to Q \to 1$, where $A$ is abelian, torsion-free, and such that $A = C_G(A)$. Suppose that $\mathcal{L} \subset Q$ is a non-foldable subset of $Q$. Let $\mathcal{L}_G \subset G$ be a lift of $\mathcal{L}$. Then $\mathcal{L}_G $ is a non-foldable subset of $G$. \end{prop} \begin{proof} This follows by combining Proposition \ref{prop-lift-confine} with Lemma \ref{l-non-foldable-confined}. \qedhere \end{proof} \subsection{Torsion-free Fitting series} \label{subsec-fittingseries} Observe that if the successive quotients $N_{i+1}/N_i$ in a series $\{1\}=N_0\unlhd N_1\unlhd \cdots \unlhd N_k=G$ are all torsion-free, then the group $G$ is evidently torsion-free. In general the converse does not hold, in the sense that some torsion-free solvable groups do not admit a series with torsion-free abelian factors. The goal of this paragraph is to prove Conjecture \ref{conj-torsion-free} under the assumption that the factors in the Fitting series of $G$ are torsion-free. Recall that the Fitting series of a solvable group $G$ is defined inductively by $F_0(G) = \left\lbrace 1\right\rbrace $ and $F_{i+1}(G) / F_i(G) = \Fit(G/F_i(G))$. So $F_1(G) = \Fit(G)$, and the series eventually reaches $G$ since $G$ is solvable. The Fitting length of $G$ is the least $\ell$ such that $F_\ell(G) = G$. \begin{thm} \label{thm-fitting-torsionfree} Let $G$ be a finitely generated solvable group, and assume that all successive quotients $F_{i+1}(G) / F_i(G)$ in the Fitting series of $G$ are torsion-free. If $G$ is not virtually abelian, then $G$ contains a non-foldable pair. In particular Conjecture \ref{conj-torsion-free} is true for $G$. \end{thm} To prove Theorem \ref{thm-fitting-torsionfree} we need some further preliminaries. We will use the following result of Lennox (see \cite[2.3.14]{Lennox-Rob}). \begin{prop} \label{prop-Lennox-isolator} Let $G$ be a finitely generated solvable group, and $H$ a subgroup of $G$ such that for every $g \in G$ there is $n \geq 1$ such that $g^n \in H$. Then $H$ has finite index in $G$. \end{prop} Recall that by Corollary \ref{c-metabelian-subgroup}, in order to prove Conjecture \ref{conj-torsion-free}, there is no loss of generality in restricting to solvable groups whose finitely generated metabelian subgroups are all virtually abelian. It is convenient to introduce the following ad-hoc terminology. \begin{defin} We say that a solvable group $G$ is \textbf{restricted} if every finitely generated metabelian subgroup of $G$ is virtually abelian. \end{defin} \begin{prop} \label{prop-no-metab-sbgp-restriction} Let $G$ be a finitely generated restricted solvable group. Suppose $\Fit(G)$ is torsion-free. Then the following hold: \begin{enumerate}[label=\roman*)] \item $\Fit(G)$ is abelian and contained in the FC-center of $G$. \item If $\Fit(G)$ is finitely generated, then $G$ is virtually abelian. \end{enumerate} \end{prop} \begin{proof} By Lemma \ref{l-subgroup-H3}, every nilpotent subgroup of $\Fit(G)$ is virtually abelian, and hence abelian by Lemma \ref{lem-nilp-virtab-tf}. Now if $N_1,N_2$ are two abelian normal subgroups of $G$, then $N_1N_2$ is nilpotent by Fitting's theorem, and hence abelian. So all abelian normal subgroups of $G$ commute, and hence $\Fit(G)$ is abelian. Now let $a \in \Fit(G)$, and let $H$ be the centralizer of $a$ in $G$. If there is $g \in G$ such that $g^n \notin H$ for all $n \geq 1$, then $a$ and $g$ generate a non-virtually abelian subgroup of $G$, that is metabelian because $\Fit(G)$ is abelian. This is impossible by our assumption. So for all $g \in G$ there is $n \geq 1$ such that $g^n \in H$, and hence by Proposition \ref{prop-Lennox-isolator} the subgroup $H$ has finite index in $G$. So $a$ has a finite conjugacy class, and $\Fit(G)$ is indeed contained in the FC-center of $G$. Finally if $\Fit(G)$ is finitely generated, then it follows from the previous paragraph that $C_G(\Fit(G))$ is a finite index subgroup of $G$. But for $G$ is solvable, $C_G(\Fit(G))$ is always contained in $\Fit(G)$ \cite[1.2.10]{Lennox-Rob}. So $\Fit(G)$ has finite index in $G$, and $G$ is virtually abelian. \end{proof} \begin{prop} \label{prop-lift-fitting} Let $G$ be a finitely generated solvable group such that $\Fit(G)$ is torsion-free. If $G / \Fit(G)$ admits a non-foldable pair, then so does $G$. \end{prop} \begin{proof} Note that the assumption that $G / \Fit(G)$ admits a non-foldable pair implies that $G$ is not virtually abelian. If $G$ is not restricted, then we can invoke Theorem \ref{thm-metab-notorsion}. If $G$ is restricted, then $A = \Fit(G)$ is abelian by Proposition \ref{prop-no-metab-sbgp-restriction}, and we have $A = C_G(A)$ \cite[1.2.10]{Lennox-Rob}. Hence we may apply Proposition \ref{prop-lift-nice-subsets}, which implies that any pair $(g_1,g_2)$ that is a lift of a non-foldable pair $(q_1,q_2)$ of $Q$, is a non-foldable pair of $G$. \end{proof} \begin{proof}[Proof of Theorem \ref{thm-fitting-torsionfree}] Recall that when $G$ is not restricted, the conclusion follows from Theorem \ref{thm-metab-notorsion}. We argue by induction on the Fitting length $\ell$. Since $G$ is finitely generated, the case $\ell=1$ corresponds to the case where $G$ is nilpotent. Since $G$ is not virtually abelian, Proposition \ref{prop-no-metab-sbgp-restriction} says that in that case $G$ cannot be restricted, and hence the statement is true. Suppose now that $G$ has length $\ell+1$, $\ell \geq 1$. According to Proposition \ref{prop-no-metab-sbgp-restriction}, we may assume that $\Fit(G)$ is abelian. If $Q = G/\Fit(G)$ is virtually abelian, then $G$ is virtually metabelian, and again the conclusion holds. Now if $Q$ is not virtually abelian, then $Q$ satisfies all the assumptions of the theorem. Hence in that case we can apply the induction hypothesis to $Q$, and the conclusion then follows from Proposition \ref{prop-lift-fitting}. \end{proof} \subsection{Quasi-transitive actions} Here we prove Conjecture \ref{conj-torsion-free} in the special case of transitive actions. Actually the same argument extends without much effort to quasi-transitive actions. In the sequel we call a $G$-set $X$ \textbf{quasi-transitive} if the action of $G$ on $X$ has finitely many orbits. \begin{thm} \label{t-torsion-free-quasitransitive} Let $G$ be a finitely generated torsion-free solvable groups, not virtually abelian. Let $X$ be a faithful quasi-transitive $G$-set. Then $\vol_{G, X}(n) \succcurlyeq n^2$. \end{thm} The proof requires some additional ingredients. \begin{defin} A subgroup $H$ of a group $G$ is \textbf{absorbing} if for every $g \in G$, there is $n \geq 1$ such that $g^n \in H$. \end{defin} In the sequel, given $f, g\colon \N\to \N$ we write $f(n)\nsucccurlyeq g(n)$ for the negation of $f(n) \succcurlyeq g(n)$. Thus $f(n)\nsucccurlyeq g(n)$ if and only if $\liminf_{n\to \infty} \frac{f(n)}{g(n)} =0$. \begin{prop} \label{prop-subquadratic-robinson} Let $G$ be a finitely generated group, and $N$ a normal subgroup of $G$ such that $Q = G/N$ is solvable, and denote $p_Q$ the projection of $G$ to $Q$. Let $X$ a $G$-set such that $\vol_{G, X}(n)\nsucccurlyeq n^2$. Then for every $x \in X$, at least one of the following hold: \begin{enumerate}[label=\roman*)] \item $N \cap G_x$ is absorbing in $N$. \item $p_Q(G_x)$ has finite index in $Q$; \end{enumerate} \end{prop} \begin{proof} Fix $x \in X$, and suppose that $N \cap G_x$ is not absorbing in $N$. By definition this means that there exists $g_1 \in N$ such that $g_1^n \notin G_x$ for every non-zero integer $n$. We want to show that $p_Q(G_x)$ has finite index in $Q$. According to Proposition \ref{prop-Lennox-isolator}, it suffices to prove that $p_Q(G_x)$ is absorbing in $Q$ since $Q$ is a finitely generated solvable group. So we shall prove that for every $q \in Q$, there are $n \geq 1$ and $g \in G_x$ such that $p_Q(g) = q^n$. Clearly it suffices to treat the case where $q$ has infinite order. Let $g_2 \in G$ such that $p_Q(g_2)=q$. The subset $\mathcal{L} = \left\lbrace g_2^i g_1^j \, : \, i,j \in \Z \right\rbrace$ satisfies $\relvol_{(G, \mathcal{L})}(n) \succcurlyeq n^2$ since $g_1 \in N$ and $p_Q(g_2)=q$ has infinite order in $Q$. Since $X$ satisfies $\vol_{G, X}(n)\nsucccurlyeq n^2$ by assumption, by Lemma \ref{lem-exp-subset-growth} it cannot be the case that $\mathcal{L}$ is non-folded in $X$. Hence there exist $(i,j) \neq (k,\ell)$ such that $G_x$ contains $g = (g_2^i g_1^j)^{-1} g_2^k g_1^\ell = g_1^{-j} g_2^{-i} g_2^k g_1^\ell$. If $i = k$, then necessarily $j \neq \ell$, and hence $G_x$ contains a non-trivial power of $g_1$, which is impossible. So $n = i - k$ is non-zero. Since $g \in G_x$ and $p_Q(g) = p_Q(g_2^n)$, we have obtained the desired conclusion. \end{proof} \begin{lem} \label{lem-chab-closure-FC-central} Let $G$ be a group, and $N$ be a normal subgroup of $G$ contained in the FC-center of $G$. Let $H,K \in \sub(G)$ such that there is a sequence of conjugates of $K$ that converges to $H$ in $\sub(G)$. If $N \leq H$, then $N \leq K$. \end{lem} \begin{proof} Let $g \in N$, and let $g^G$ be the conjugacy class of $g$. Since $N$ is normal and $N \leq H$, $g^G$ is contained in $H$. Since $g^G$ is finite, it follows that the set of subgroups of $G$ containing $g^G$ forms an open neighbourhood of $H$ in $\sub(G)$. Hence it follows from the assumption that there is a conjugate of $K$ that contains $g^G$, which is equivalent to saying that $K$ contains $g^G$. Since $g$ was arbitrary, we have $N \leq K$. \end{proof} \begin{prop} \label{prop-restricted-transitive} Suppose $G$ is a finitely generated solvable group, and $A$ is an abelian normal subgroup of $G$ contained in the FC-center of $G$. Let $Q = G/A$. Let $X$ be a transitive $G$-set such that $\vol_{G, X}(n)\nsucccurlyeq n^2$. Then at least one of the following holds:\begin{enumerate}[label=\roman*)] \item there exists a normal subgroup $N$ of $G$ such that $N \leq A$, $A/N$ is a torsion group, and $N$ acts trivially on $X$; \item there exists a finite index subgroup $L$ of $Q$ such that for all $q \in L$, there is $r \geq 1$ such that $r (q-1) A \leq G_x$ for all $x \in X$. \end{enumerate} \end{prop} \begin{proof} Note that since $A$ is abelian, a subgroup $B$ of $A$ is absorbing if and only if $A/B$ is a torsion group. Let \[ \mathcal{S}(X) = \overline{ \left\lbrace G_x \, : \, x \in X \right\rbrace } \subseteq \sub(G). \] By Lemma \ref{lem-growth-closure}, we have that $\vol_{G, G/H}(n)\nsucccurlyeq n^2$ for every $H \in \mathcal{S}(X) $. Hence we are in position to apply Proposition \ref{prop-subquadratic-robinson} to the action of $G$ on $G/H$, with $N = A$. Suppose that there exists $H \in \mathcal{S}(X) $ such that the first alternative of Proposition \ref{prop-subquadratic-robinson} holds, that is $A \cap H$ is absorbing in $A$. Then for every finitely generated subgroup $M$ of $A$ that is normal in $G$, there exists a finite index subgroup $M'$ of $M$ that is normal in $G$ and such that $M' \leq H$. Now since $A$ is contained in the FC-center of $G$, the finitely generated subgroups $M$ of $A$ that are normal in $G$ exhaust $A$. Hence it follows that there is a normal subgroup $N$ of $G$ such that $N$ is absorbing in $A$ and $N \leq H$. Fix $x \in X$. Since the $G$-action on $X$ is transitive, $\mathcal{S}(X)$ is equal to the closure of the $G$-orbit of $G_x$ in $\sub(G)$. Since $N$ is contained in the FC-center of $G$, $H \in \mathcal{S}(X)$ and $N \leq H$, Lemma \ref{lem-chab-closure-FC-central} says that $N$ must be contained in $G_x$. It follows that $N$ acts trivially on $X$, and hence the first conclusion holds in this case. Hence in the sequel we assume that for every $H \in \mathcal{S}(X) $, the second alternative of Proposition \ref{prop-subquadratic-robinson} holds, that is $p_Q(H)$ has finite index in $Q$, where $Q = G/A$. Observe that for $H \in \mathcal{S}(X) $, since $p_Q(H)$ is finitely generated, there exists an open neighbourhood $\mathcal{U}$ of $H$ in $\mathcal{S}(X) $ such that $p_Q(K) \geq p_Q(H)$ for every $K \in \mathcal{U}$. Since $\mathcal{S}(X) $ is a compact space, $\mathcal{S}(X) $ can be covered by finitely many of these open sets, and hence that there is a finite index subgroup $L$ of $Q$ such that $p_Q(H)$ contains $L$ for every $H \in \mathcal{S}(X) $. Fix a non-trivial element $q \in L$. We have that for every $H\in \mathcal{S}(X)$ there exists $h \in H$ such that $p_Q(h) = q$. Since the condition of containing $h$ is an open condition, using compactness again we obtain a finite subset $P = \left\lbrace h_1,\ldots,h_r\right\rbrace $ such that $p_Q(h_i) = q$ for all $i$ and every $H \in \mathcal{S}(X) $ intersects $P$. So $P$ is confining for $(H,G)$ for all $H \in \mathcal{S}(X) $. So the conclusion follows from Proposition \ref{p-Neumann-abelien}. \end{proof} \begin{proof}[Proof of Theorem \ref{t-torsion-free-quasitransitive}] The case where the group is metabelian has already been treated in Theorem \ref{thm-metab-notorsion}, regardless of the number of orbits. This implies that if $G$ is not restricted, then the statement holds true. So in the sequel we assume that $G$ is restricted. By Proposition \ref{prop-no-metab-sbgp-restriction} the subgroup $A = \Fit(G)$ is abelian, and $A$ is contained in the FC-center of $G$. Let $Q = G/A$. Suppose for a contradiction that $\vol_{G, X}(n)\nsucccurlyeq n^2$. Let $X_1,\ldots,X_m$ be the $G$-orbits in $X$. We have $\vol_{G, X_i}(n)\nsucccurlyeq n^2$ for all $i$ (Proposition \ref{prop-monoton}), so we can apply Proposition \ref{prop-restricted-transitive} to each $X_i$. Let $X_1,\ldots,X_\ell$ be the components that satisfy the first conclusion of the proposition. So for all $i \in \left\lbrace 1,\ldots,\ell \right\rbrace $ there exists a normal subgroup $N_i$ of $G$ that is an absorbing subgroup of $A$ and such that $N_i$ acts trivially on $X_i$. For $j \in \left\lbrace \ell + 1,\ldots, m \right\rbrace $, there is a finite index subgroup $L_i$ of $Q$ such that for every $q \in L_i$, there is $r_j \geq 1$ such that $r_j(q-1) A$ acts trivially on $X_j$. Note that $M_1 := \bigcap N_i$ is also an absorbing subgroup of $A$, and $M$ acts trivially on $\cup_{i \leq \ell} X_i$. Moreover since $G$ is not virtually abelian, the group $Q$ is infinite, and hence $L := \bigcap L_j$ is non-trivial. Fix a non-trivial element $q$ of $L$. Then we can find $r \geq 1$ such that $r (q-1) A$ acts trivially on $\cup_{j > \ell} X_j$. Since $A$ is equal to its own centralizer in $G$ \cite[1.2.10]{Lennox-Rob} and $q$ is non-trivial, we have that $(q-1) A$ is non-trivial. Since $A$ is torsion-free, it follows that $r (q-1) A$ is also non-trivial. Hence if $M_2$ is the normal subgroup of $G$ generated by $r (q-1) A$, then $M_2$ is non-trivial and $M_2$ acts trivially on $\cup_{j > \ell} X_j$. The subgroup $M_1 \cap M_2$ is an absorbing subgroup of $M_2$, and hence is non-trivial. Since $M_1 \cap M_2$ acts trivially on $X$, we have reached the desired contradiction. \end{proof} \subsection{Higman-type extensions} In this paragraph we focus on the following specific class of groups: \begin{defin} We call a group $G$ a \textbf{Higman type extension} if there is a finitely generated free group $F_d$ and a normal subgroup $N$ of $F_d$ such that $G = F_d/N'$. \end{defin} So $G$ is an extension of the group $Q=F_d/N$ by the abelian normal subgroup $N/N'$. Our choice of terminology comes from the article \cite{Higman-torsionfree}, where Higman showed that the group $G$ is always torsion-free. \begin{prop} \label{prop-F/N'} Let $G = F_d/N'$ be a Higman type extension. Let $L$ be a torsion-free abelian subgroup of group $Q=F_d/N$, and let $\mathcal{L}$ be a lift of $L$ to $G$. Then the following hold: \begin{enumerate} [label=\roman*)] \item $\mathcal{L}$ is a non-foldable subset of $G$; \item $\vol_{G, X}(n) \succcurlyeq f_{Q,L}(n)$ for every faithful $G$-set $X$. \end{enumerate} \end{prop} \begin{proof} Let $\Sigma$ be a finite subset of $\mathcal{L}$, and $P =\{g^{-1}h\colon g, h\in P, g\neq h\}$. Note that ell elements of $P$ have a non-trivial projection to $Q$. We shall prove that there is a non-trivial normal subgroup of $G$ that is contained in $H$ for every $H \in S_G(P,N/N')$. By Lemma \ref{l-non-foldable-confined}, this will prove that $\mathcal{L}$ is a non-foldable subset of $G$. The subgroup $M = N/N'$ is a torsion-free abelian normal subgroup of $G$. Let $r$ be the cardinality of $P$, and let $M_r = (r!) M$. According to Proposition \ref{p-Neumann-abelien}, for every $H \in S_G(P,M)$ there exists $b \in P$ such that $(\pi_Q(b)-1) M_r \leq H$. Since $\pi_Q(b)$ belongs to $L$ and $L$ is abelian, it follows that if we set $q = \prod_P (\pi_Q(b)-1)$, then $q M_r$ is contained in $H$ for every $H \in S_G(P,M)$. Now since $L$ is torsion-free abelian, the group ring $\Z L$ has no zero divisors. Since $\pi_Q(b)$ is non-trivial for every $b \in P$, it follows that $q$ is non-zero in $\Z L \subseteq \Z Q$. Moreover according to \cite{Passi-annihilators}, $M = N/N'$ is a faithful $\Z Q$-module. Hence $qM \neq \left\lbrace 0\right\rbrace $, and since $M$ is torsion-free we also have $qM_r \neq \left\lbrace 0\right\rbrace $. Since $q M_r \leq H$ for every $H \in S_G(P,M)$, this terminates the proof of the first statement. By considering for every element of $L$ a preimage in $G$ of minimal word length, we see that one can choose a lift $\mathcal{L}$ of $L$ such that $f_{Q,L}(n) \simeq \relvol_{(G, \mathcal{L})}(n)$. Hence the second statement follows from the first together with Lemma \ref{lem-exp-subset-growth}. \end{proof} \begin{remark} The result from \cite{Passi-annihilators} used in the above proof shows that if $Q$ is not a torsion group, then $G$ always contains a copy of $\Z \wr \Z$. In particular solvable Higman-type extensions satisfy Conjecture \ref{conj-torsion-free}. \end{remark} The previous result is most useful when the group $Q$ is not too small. We illustrate this for free solvable groups (compare with Theorem \ref{thm-freemetab-growth}): \begin{thm}\label{thm-free-solvable} Let $G = \mathbb{FS}_{d,\ell}$ be the free solvable group of rank $d \geq 2$ and length $\ell \geq 3$. Then $G$ has a Schreier growth gap $\exp(n)$. \end{thm} In the proof we will use the following easy lemma. \begin{lem} \label{lem-Free-sol-exp-growth} Let $Q = \mathbb{FS}_{d,\ell}$ be the free solvable group of rank $d \geq 2$ and length $\ell \geq 2$, and let $L = Q^{(\ell-1)}$. Then $\relvol_{(Q,L)}(n) \simeq \exp(n)$. \end{lem} \begin{proof} Let $x_1,\ldots, x_d$ be the images in $Q$ of free generators of $F_d$, and set $c = w_{\ell-1} \in L$, where $w_{i}$ is defined inductively by $w_{1}=[x_1,x_2]$ and $w_{i+1} = [x_1,w_i]$. For $i\geq 0$ we also set $c_i = x_1^i c x_1^{-i}$. The map from $\left\lbrace 0,1\right\rbrace ^n$ to $L$ that maps $(\varepsilon_1,\ldots,\varepsilon_n)$ to $w(\varepsilon_1,\ldots,\varepsilon_n) = c_1^{\varepsilon_1} \cdots c_n^{\varepsilon_n}$ is injective, and $w(\varepsilon_1,\ldots,\varepsilon_n)$ has word length at most $(\left|c \right| +2)n$ with respect to $x_1,\ldots, x_d$. Hence the ball of radius $(\left|c \right| +2)n$ contains at least $2^n$ elements of $L$, and it follows that $L$ indeed has relative exponential growth in $Q$. \end{proof} \begin{proof}[Proof of Theorem \ref{thm-free-solvable}] We have $G = F_d / N'$, where $N = F_d^{(\ell-1)}$, and $Q = F_d / N \simeq \mathbb{FS}_{d,\ell-1}$. The subgroup $L = Q^{(\ell-2)}$ is a torsion-free abelian subgroup of $Q$, and $f_{Q,L}(n) \simeq \exp(n)$ by Lemma \ref{lem-Free-sol-exp-growth}. Therefore the conclusion follows from Proposition \ref{prop-F/N'}. \end{proof} \section{Additional comments and examples} \subsection{Pointed growth of transitive actions} When $X$ is a transitive $G$-set, there is another natural notion of growth, called the pointed growth of the action. It is defined by fixing a point $x \in X$, and considering $\vol_{G, X,x}(n)= |S^n \cdot x|$. By transitivity of the action, $\vol_{G, X,x}(n)$ is easily seen to be independent of the choice of $x$ up to $\simeq$. Clearly $\vol_{G, X}(n) \succcurlyeq \vol_{G, X,x}(n)$, and in general $\vol_{G, X}(n)$ is strictly larger than $\vol_{G, X,x}(n)$ (see the examples below). Here we would like to emphasize that in our setting the growth function $\vol_{G, X}(n)$ is more natural and better suited for our purposes than the pointed growth. The first advantage of considering $\vol_{G, X}(n)$ rather than the pointed growth is that the latter only makes sense for transitive actions. Restricting ourselves to transitive actions is not desirable here, for instance because it is not stable when passing to a subgroup. On the contrary $\vol_{G, X}(n)$ remains defined when passing to a subgroup and is monotone under this operation (Propositions \ref{prop-monoton} and \ref{prop-finite-index}). Another advantage of $\vol_{G, X}(n)$ is the fact that it behaves nicely with respect to the topology on $\sub(G)$. One illustration of this is that our notion of non-foldable subsets, which by definition provides lower bounds for $\vol_{G, X}(n)$ (Lemma \ref{lem-exp-subset-growth}), has a simple reinterpretation in terms of confined subgroups in $\sub(G)$ (Lemma \ref{l-non-foldable-confined}). Another illustration is given by Lemma \ref{lem-growth-closure}, which says that $\vol_{G, X}(n)$ decreases when taking a limit point in $\sub(G)$. Here we also note that this lemma fails if one replaces $\vol_{G, X}(n)$ by the pointed growth (see Example \ref{ex-point-heis}). The following simple examples also illustrate that the lower bounds obtained for $\vol_{G, X}(n)$ fail in general for the pointed growth. \begin{example} Let $G = \Z \wr \Z $. Recall that $\vol_{G, X}(n) \succcurlyeq n^2$ for every faithful $G$-set $X$ by Theorem \ref{thm-wreath}. We claim that this bound fails for the pointed growth. Consider a pair of generators $s,t$, where $t$ is a generator of the base group $\Z$, and $s$ is a generator of lamp group at position $0$, and for $i\in \Z$ set $s_i=t^ist^{-i}$. Fix an increasing sequence of integers $d_k \geq 1$, and let $H$ be the subgroup of $G$ contained in $\oplus \Z$ consisting of all configurations $f \in \oplus \Z$ such that $f(d_k)\in 2^k\Z$. The action of $G$ on $X:=G/H$ is easily seen to be faithful. In particular $\vol_{G, X}(n) \succcurlyeq n^2$. By contrast, we claim that if $(d_k)$ grows sufficiently fast, then the pointed growth of the action of $G$ on $X$, henceforth denoted $v(n)$, can be made strictly subquadratic. To see this, let $k(n):=\max\{k \colon d_k\le n\}$. Let $g\in G$ be an element of word length $|g|\le n$. Then $g$ can be expressed as $g=t^\ell \prod_{i=-n}^ns_i^{m_i}$, with $\ell \leq n$ and $m_i\leq n$. Note that for each $i$, we have $s_i^{m_i}H=H$ if $i\neq d_k, k\ge 1$, and for every $i$ of the form $i=d_k$ for some $k$ we have $s_{d_k}^{m_{d_k}}H=s_{d_k}^{r_k}H$, where $r_i$ is the remainder of the euclidean division of $m_{d_k}$ by $2^k$. Since moreover the $s_i$ commute, we have $gH = t^\ell s_{d_1}^{r_1}\cdots s_{d_{k(n)}}^{r_{k(n)}}H$. Counting possibilities for $\ell$ and $r_i$ with $ i\le k(n)$, we obtain the upper bound \[v(n)\preccurlyeq n\prod_{i=1}^{k(n)} 2^i \preccurlyeq n2^{k(n)^2}.\] For a fixed $\alpha >1$, we choose $d_k=2^{\alpha k^2}$ for all $k$. The above computation yields $v(n) \preccurlyeq n^{1+\frac{1}{\alpha}}$, and hence $v(n) \nsucccurlyeq n^2$. \end{example} The following example was suggested by Yves Cornulier. \begin{example} \label{ex-point-heis} Consider the Heisenberg group $G=H_3(\Z) = \langle a, b, c\colon [a,b]=c, [a,c]=[b, c]=1\rangle$. Recall that every $g\in G$ admits a unique decomposition $g=c^zb^ya^x$, with $x, y, z\in \Z$. The word metric is given up to constants by $|g|\simeq \max (|x|, |y|, \sqrt{|z|})$, and as a consequence the volume growth of $G$ is $\vol_G(n)\simeq n^4$ (see e.g. \cite[\S 14.1.1]{Drutu-Kapovich}). Recall from Proposition \ref{prop-n4-growth} that the group $G$ is non-foldable, and in particular every faithful $G$-set $X$ satisfies $\vol_{G, X}(n)\simeq \vol_G(n)\simeq n^4$. Now let $H$ be the cyclic subgroup generated by $a$. For every $g=c^zb^ya^x$, the coset $gH$ coincides with $c^zb^yH$. If $|g|\leq n$ we have $|y|\leq Cn, |z|\leq Cn^2$ for some $C>0$, so that there are $\simeq n^3$ possibilities for the pair $(y, z)$. Hence the pointed growth of the $G$-action on $G/H$ is $\simeq n^3$. Note that since $H$ is not confined, this example also shows that Lemma \ref{lem-growth-closure} fails if one replaces $\vol_{G, X}$ by the pointed growth. \end{example} \subsection{Connection with groups of dynamical origin} \label{s-non-embeddings} As mentioned in the introduction, Schreier growth gaps provide an obstruction to the existence of embeddings between groups, which has applications to the study of topological full groups. Recall that given an action $G \acts \mathfrak{C}$ by homeomorphisms on the Cantor set, the topological full group of the action is the group $F(G, \mathfrak{C})$ of all homeomorphisms of $\mathfrak{C}$ that locally coincide with elements of $G$. This notion was introduced (for $G=\Z$) by Giordano, Putnam and Skau, and further developed by Matui, and has been studied extensively in recent years \cite{Ju-Mo,Nek-simple-dyn, Nek-frag}. This construction also encompasses many more specific groups studied in the litterature, such as Thompson's groups $V$ and some generalisations, and subgroups of the group $\operatorname{IET}$ of interval exchanges. Despite various advances, it remains quite mysterious how the properties of the $G$-action on $ \mathfrak{C}$ constraint the group $F(G, \mathfrak{C})$ and its possible subgroups. Non-foldable subsets and graphs of actions naturally fit in this setting due to the following basic and well-known remark. \begin{prop} Let $G$ be a finitely generated group and $G\acts \mathfrak{C}$ be an action on the Cantor set. Then for every finitely generated subgroup $H\le F(G, \mathfrak{C})$, the identity map $\mathfrak{C} \to \mathfrak{C}$ defines a Lipschitz embedding of the graph $\Gamma(H, \mathfrak{C})$ inside $\Gamma(G, \mathfrak{C})$. In particular, we have $\vol_{H, \mathfrak{C}}(n)\preceq \vol_{G, \mathfrak{C}}(n)$ and $\asdim(H, \mathfrak{C})\le \asdim (G, \mathfrak{C})$, \end{prop} Hence our results immediately provide restrictions on the nature of the solvable subgroups of the topological full groups of various actions. Without being exhaustive, we point out some simple illustrations. \subsubsection{Topological full groups of $\Z^d$-actions} \label{subsubsection-full-gp} Matui showed that for every minimal action of $\Z$ on $\mathfrak{C}$ that is not an odometer action, the topological full group $F(\Z, \mathfrak{C})$ contains a copy of the lamplighter group $C_2 \wr \Z$ \cite{Mat-exp}. By contrast, Theorem \ref{thm-wreath} shows that neither $\Z \wr \Z$ nor $C_p \wr \Z^{2}$ embed in $F(\Z, \mathfrak{C})$. The case of $C_p \wr \Z^{2}$ solves Conjecture 2 from \cite{Salo}. More generally for every action of $\Z^d$ on $\mathfrak{C}$, the groups $\Z \wr \Z^d$ and $C_p \wr \Z^{d+1}$ do not embed in $F(\Z^d, \mathfrak{C})$. \subsubsection{ Brin--Thompson groups} Consider the generalizations $nV$ of Thompson group $V$ from \cite{Brin-nV}. It is not difficult to see that for the natural action of $nV$ on the Cantor set $\mathfrak{C}$, the graph of the action on each orbit is quasi-isometric to a product of $n$ trees (see e.g. \cite[Lemma 11.10]{MB-graph-germs}). Thus $\Gamma(nV,\mathfrak{C})$ has asymptotic dimension $n$. Hence Theorem \ref{thm-wreath} implies: \begin{cor} The group $nV$ does contain a wreath product $A \wr \Z^{n+1}$, with $A\neq \{1\}$. \end{cor} By contrast it is not difficult to see that $C_2 \wr \Z^n$ embeds in $nV$. Hence this gives another proof of the result \cite[Corollary 11.20]{MB-graph-germs} that $nV$ embeds in $mV$ only if $n\le m$. In the case $n=2$, Corollary \ref{cor-poly-asdimX} also immediately implies the following (that problem was notably raised in \cite{MO-zarem}): \begin{cor} \label{cor-nV-poly} Every polycyclic subgroup of $2V$ is virtually abelian. \end{cor} \subsubsection{Interval exchanges} Dahmani, Fujiwara and Guirardel studied solvable subgroups of the group $\operatorname{IET}$ of interval exchanges in \cite{DFG-sol}. It is well known that the natural action of a finitely generated subgroup of $\operatorname{IET}$ has polynomial growth. One result of \cite{DFG-sol} is that every finitely generated torsion-free solvable subgroup of $\operatorname{IET}$ is virtually abelian. They deduce in particular that this holds true for polycyclic subgroups \cite[Cor.\ 3.2]{DFG-sol}. The arguments in \S \ref{s-noetherian} provide a soft proof of this last result, since Corollary \ref{cor-poly-growth} implies that every polycyclic subgroup of $\operatorname{IET}$ is virtually nilpotent, and hence virtually abelian by \cite{Novak-disc-IET}. However we stress that the absence of torsion-free solvable subgroups in $\operatorname{IET}$ \cite{DFG-sol} cannot be proven relying only on considerations on growth (as torsion-free solvable groups can admit actions of polynomial growth). \bibliographystyle{amsalpha} \bibliography{bib-growth} \end{document}
2205.11897v1
http://arxiv.org/abs/2205.11897v1
Complexity of Cut-and-Project Sets of Polytopal Type in Special Homogeneous Lie Groups
\documentclass[12pt, a4paper, english]{amsart} \oddsidemargin0.3cm \evensidemargin0.3cm \textwidth15.7cm \textheight 22.5 cm \topmargin=1cm \usepackage[utf8]{inputenc} \usepackage[T1]{fontenc} \usepackage[english]{babel} \usepackage{csquotes} \usepackage{enumitem} \usepackage[pdftex]{graphicx} \usepackage{booktabs} \usepackage{latexsym} \usepackage{amsmath,amssymb,amsthm,amsfonts,amscd} \usepackage{mathrsfs} \usepackage[mathscr]{euscript} \usepackage{color} \usepackage{dsfont} \usepackage[backend=bibtex, style=numeric, url=false, doi=false, hyperref=false,backref=false]{biblatex} \addbibresource{literatur.bib} \usepackage{stmaryrd} \SetSymbolFont{stmry}{bold}{U}{stmry}{m}{n} \usepackage{mleftright} \usepackage{tikz} \usetikzlibrary{arrows, angles, quotes, calc,through,backgrounds,matrix,decorations.pathmorphing} \usepackage{multirow} \usepackage[font=small, format=plain, labelfont=bf, up, justification=justified, singlelinecheck=false]{caption} \usepackage{float} \usepackage{faktor} \usepackage{xfrac} \newcommand{\rslant}[2]{{\raisebox{.1em}{$#1$}\left/\raisebox{-.1em}{$#2$}\right.}} \newcommand{\lslant}[2]{{\raisebox{-.1em}{$#1$}\left\backslash\raisebox{.1em}{$#2$}\right.}} \usepackage{colonequals} \newenvironment{rcases}{\left.\begin{aligned}}{\end{aligned}\right\rbrace} \usepackage{xcolor} \usepackage{hyperref} \usepackage[nameinlink,noabbrev, capitalise]{cleveref} \usepackage[title]{appendix} \setlength{\topmargin}{-15mm} \setlength{\parindent}{0em} \tolerance=500 \patchcmd{\subsection}{-.5em}{.5em}{}{} \newtheoremstyle{style1} {1cm} {1cm} {\normalfont} {} {\normalfont\bfseries} {} { } {\textbf{\thmname{#1}\thmnumber{ #2} \thmnote{(#3)}}} \newtheoremstyle{style2} {1cm} {1cm} {\itshape } {} {\normalfont\bfseries} {} { } {\textbf{\thmname{#1}\thmnumber{ #2} \thmnote{(#3)}}} \theoremstyle{style2} \newtheorem{Theorem}{Theorem}[section] \newtheorem{Lemma}[Theorem]{Lemma} \newtheorem{Corollary}[Theorem]{Corollary} \newtheorem{Proposition}[Theorem]{Proposition} \theoremstyle{style1} \newtheorem{Definition}[Theorem]{Definition} \newtheorem{Remark}[Theorem]{Remark} \newtheorem{Example}[Theorem]{Example} \newtheorem{Convention}[Theorem]{Convention} \numberwithin{equation}{section} \newcommand{\begriff}[1]{\emph{#1}} \addtolength\textheight{70pt} \setlist[enumerate]{topsep=0.65ex, itemsep=0.5ex} \newcommand{\HH}{\mathbb{H}} \newcommand{\KK}{\mathbb{K}} \newcommand{\NN}{\mathbb{N}} \newcommand{\RR}{\mathbb{R}} \newcommand{\ZZ}{\mathbb{Z}} \newcommand{\rk}{\mathrm{rk}} \newcommand{\modulo}{\mathrm{mod}} \DeclareMathOperator{\Ad}{\mathrm{Ad}} \DeclareMathOperator{\ad}{\mathrm{ad}} \DeclareMathOperator{\homdim}{\mathrm{homdim}} \DeclareMathOperator{\codim}{\mathrm{codim}} \DeclareMathOperator{\tr}{\mathrm{tr}} \DeclareMathOperator{\cA}{\mathcal{A}} \DeclareMathOperator{\cB}{\mathcal{B}} \DeclareMathOperator{\cF}{\mathcal{F}} \DeclareMathOperator{\cH}{\mathcal{H}} \DeclareMathOperator{\cP}{\mathcal{P}} \DeclareMathOperator{\cS}{\mathcal{S}} \DeclareMathOperator{\fg}{\mathfrak{g}} \renewcommand{\labelenumi}{(\alph{enumi})} \renewcommand{\epsilon}{\varepsilon} \setcounter{tocdepth}{2} \title[Complexity of Cut-and-Project Sets]{\textbf{Complexity of Cut-and-Project Sets of Polytopal Type in Special Homogeneous Lie Groups}} \author{Peter Kaiser} \address{Institut für Algebra und Geometrie, KIT, Karlsruhe, Germany} \email{[email protected]} \urladdr{https://www.math.kit.edu/didaktik/~kaiserp/de} \begin{document} \setlength{\parindent}{0mm} \renewcommand{\thesection}{\arabic{section}} \renewcommand{\thesubsection}{\thesection.\arabic{subsection}} \begin{abstract} The aim of this paper is to determine the asymptotic growth rate of the complexity function of cut-and-project sets in the non-abelian case. In the case of model sets of polytopal type in homogeneous two-step nilpotent Lie groups we can establish that the complexity function asymptotically behaves like $r^{\homdim(G) \dim(H)}$. Further we generalize the concept of acceptance domains to locally compact second countable groups. \end{abstract} \maketitle \section{Introduction} This article is concerned with the complexity of discrete subsets of locally compact groups, which obey some form of aperiodic order.\par For discrete subsets of locally compact abelian groups, notably for discrete subsets of $\RR^n$, there is a established notion of complexity based on the study of the so-called patch counting function \cite{ArnouxMauditShiokawa, Baryshnikov, Julien, KoivusaloWalton1, Lagarias2, Lagarias3, Moody1, Moody2, Vuillon}. More recently, there has been an approach to extend results about discrete subsets of locally abelian groups to general locally compact groups \cite{Beckus, BjorklundHartnick1, BjorklundHartnick2, BjorklundHartnick3, LenzStrungaru, Schlottmann1, Schlottmann2}.\par In the present article we contribute to this program by extending the notion of complexity to discrete subsets of non-abelian locally compact group. More specifically, we are going to generalize an approach of Julien, \cite{Julien}, and Koivusalo and Walton, \cite{KoivusaloWalton1}. While the theory works in full generality, we will obtain our strongest results in the case of two-step-nilpotent Lie groups. \subsection{Aperiodic order in the Euclidean case} Consider the abelian group $(\RR^n, +)$ as a metric group with respect to the standard Euclidean metric. A set $\Lambda \subset \RR^n$ is called \textit{locally finite} if for all bounded sets $B \subset \RR^n$ the intersection $\Lambda \cap B$ is finite. For this sets one can define the patch counting function $p(r)$ (see \cref{Def:p}) as a measure of their complexity. Examples of locally finite sets are lattices. Their complexity functions are constant 1, meaning that lattices are highly structured. In the case of aperiodic ordered sets the patch counting function is growing at least linearly \cite{Lagarias2, Lagarias3, Moody1, Moody2, Vuillon}. A locally finite set with $p(r)< \infty$ for all $r>0$ is called a set with \textit{finite local complexity} or an \textit{FLC set}.\par\medskip There are two important methods to construct FLC sets, either by \textit{substitution} or by \textit{cut-and-project}. We are interested in the cut-and-project approach, which is due to Yves Meyer, who is a pioneer in the field of aperiodic order and has laid the foundation for a lot of our common knowledge in the 1960s \cite{Meyer1, Meyer2, Meyer3}. The idea in the cut-and-project approach is to consider a lattice $\Gamma$ in the product $\RR^n \times \RR^d$. Then one chooses a subset $W \subset \RR^d$, which is called the \textit{window}. The projection of $(\RR^n \times W) \cap \Gamma$ to $\RR^n$ results in a point set, which is called a \textit{cut-and-project set}. Under some extra conditions this cut-and-project set is an FLC set, in this case it is called the \textit{model set} defined by the data $\Lambda(\RR^n, \RR^d, \Gamma, W)$. Such sets have been studied from different perspectives, \cite{BaakeHuckStrungaru, KoivusaloWalton1, Hof3, Lagarias1, Moody3}. \par\medskip In the 1980s the popularity of this field was pushed by the discovery of \textit{quasi-crystals}, \cite{Shechtman}. After this discovery physicists, crystallographers and mathematicians worked on models to describe these newly discovered aperiodic structures. Physicists are primarily interested in quasi-crystals in $\RR^n$, for $n \leq 3$, but mathematically the restriction on the dimension is unnatural and therefore was rapidly dropped. A history of the developments in this time can be found in the book by Senechal, \cite{Senechal}. The characterising property of a quasi-crystal is \textit{pure-point diffraction}, which is a global property and was studied in \cite{BaakeLenz, Dworkin, Hof1, Hof2, Lagarias2b}. Another line of research is to characterise the structure of an aperiodic ordered set by some local data, namely its \textit{repetitivity} or its \textit{complexity}, \cite{Lagarias2, Lagarias3, Moody1, Moody2, Vuillon}. For a comprehensive overview of the field see \cite{BaakeGrimm}.\par \medskip This paper will focus on understanding the complexity of model sets. We want to determine how the \textit{complexity function $p(r)$} behaves asymptotically. \begin{Definition}[Patch] Let $X$ be a metric space, $\Lambda \subset X$ a locally finite subset, $\lambda \in \Lambda$ and $r\in \RR^+$. Then the \textit{$r$-patch} $P_r(\lambda)$ is the constellation of points from $\Lambda$ around $\lambda$, which have distance at most $r$ to $\lambda$, i.e. $ P_r(\lambda):= B_r(\lambda) \cap \Lambda$.\par\medskip If $G$ is a lcsc group the set of patches of radius $r$ impose an equivalence relation on the elements of $\Lambda \subset G$ by \begin{equation} \label{Def:requiv} \lambda \sim_r \mu :\Leftrightarrow P_r(\lambda)\lambda^{-1}=P_r(\mu)\mu^{-1}. \end{equation} We will denote the \textit{$r$-equivalence class of $\lambda$} by \begin{equation*} A_r^G(\lambda):= \{\mu \in \Lambda \mid \lambda \sim_r \mu\} \subset G \end{equation*} and the set of all equivalence classes by \begin{equation*} A_r^G:=\{A_r^G(\lambda) \mid \lambda \in \Lambda\}. \end{equation*} \end{Definition} \begin{Definition}[Complexity function]\label{Def:p} Let $G$ be a lcsc group and $\Lambda \subset G$ a locally finite subset. Then the \textit{complexity function} $p(r)$ is given by \begin{equation*} p(r) := \left\vert\left\{B_r(e) \cap \Lambda \lambda^{-1} \,\middle\vert\, \lambda \in \Lambda \right\} \right\vert = \left\vert\left\{P_r(\lambda) \lambda^{-1} \,\middle\vert\, \lambda \in \Lambda \right\}\right\vert = \left\vert A_r^G \right\vert . \end{equation*} \end{Definition} The function $p$ is also called the \textit{patch-counting function} and first appears in the work by Lagarias and Pleasants, \cite{Lagarias3}. Note that model sets carry information than the underlying point set itself, this can be used to determine the complexity function. Some early work in this context is done in \cite{ArnouxMauditShiokawa} and \cite{Baryshnikov} for some special cases and low dimensions. A general approach first appeared in the paper by Julien, \cite{Julien}, the main idea is that each class of patches corresponds to a certain region inside the window, the so called \textit{acceptance domain}. Optimal results can be obtained in the case of \textit{polytopal windows}, that is when $W$ is a convex polytope. The ideas of Julien where picked up by Koivusalo and Walton, \cite{KoivusaloWalton2}, who where proved the following theorem. We will assume the stabilizers of the hyperplanes which bound the window are trivial; in the original theorem the role of this stabilizers is addressed. \begin{Theorem}[Koivusalo, Walton, {\cite[Theorem 7.1]{KoivusaloWalton2}}] Consider a model set $\Lambda(\RR^n, \RR^d, \Gamma, W)$ with a polytopal window $W$. Assume that the stabilizer of the hyperplanes which bound the window are trivial. Then the complexity grows asymptotically as $p(r) \asymp r^{n \cdot d}$. \end{Theorem} \subsection{Aperiodic order beyond the Euclidean case} A natural generalisation of FLC sets in $\RR^n$ is to consider FLC sets in arbitrary \textit{locally compact groups} equipped with some metric. We will be interested in studying their complexity functions. We emphasise that in doing so the choice of metric is important. By the restriction to \textit{metric locally compact second countable (lcsc)} groups a theorem of Struble, \cite{Struble}, guarantees the existence of a `nice' metric. `Nice' means in this context that the metric is right-invariant, proper and compatible. This is the setup in which the euclidean ideas are generalized \cite{Beckus, BjorklundHartnick1, BjorklundHartnick2, BjorklundHartnick3, LenzStrungaru}.\par \medskip The cut-and-project approach also applies in this more general setup, \cite{BjorklundHartnick3,Schlottmann1, Schlottmann2}. The question we want to answer is how the approach of Julien and Koivusalo and Walton can be translated to this more general set-up? \subsection{Results on two-step homogeneous Lie groups} Ideally one would like to describe the complexity of FLC sets for all lcsc groups. However this turns out to be quite challenging so we will have to introduce some more restrictions. In particular, since we want to follow the approach of Julien, \cite{Julien}, and Koivusalo and Walton, \cite{KoivusaloWalton2}, we need a notion of hyperplanes. So a first question is, in which groups can we define hyperplanes?\par We will consider homogeneous Lie groups. These groups are nilpotent, real, finite-dimensional, connected, simply connected and admit a family of dilations which replace the scalar multiplication. For a detailed discussion of such groups we refer to the book by Fischer and Ruzhansky, \cite{FischerRuzhansky}. For this class of groups it is possible to identify the underlying set of the Lie group $G$ with the corresponding Lie algebra $\fg$. Since $\fg$ is a vector space we can define hyperplanes in the usual sense.\par Moreover these groups admit a canonical quasi-isometry class of homogeneous norms, which provide the same complexity resolving the aforementioned issue of dependence of the choice of metric. It turns out that balls with respect to such norms have exact polynomial growth, i.e. the volume of a ball $B_r(e)$ grows as $r^{\alpha}$. The exponent of this growth is called the \textit{homogeneous dimension} of the homogeneous Lie group.\par\medskip A second restriction has to be made since we also need that the group acts on the space of hyperplanes in the vector space underlying $\fg$. We can show that this is the case exactly if the Lie group has nilpotency degree one or two, i.e. if it is abelian or two-step nilpotent. For higher nilpotency degree the action of the group bends the hyperplanes into algebraic hypersurfaces.\par\medskip Naively one would expect that the complexity function of a model set $\Lambda(G,H, \Gamma, W)$ would depend on the dimension of the Lie groups $G$ and $H$, i.e. $p(r)\asymp r^{\dim(G)\dim(H)}$, or if not their homogeneous dimensions , i.e. $p(r) \asymp r^{\homdim(G)\homdim(H)}$, but surprisingly both turn out to be false. In fact the two factors behave differently, on the $G$-side the homogenous dimension replaces the dimension, while on the $H$-side it does not. More precisely, we prove the following theorem, which is the main theorem of this paper. \begin{Theorem}[Informal version of the main theorem] Consider a model set $\Lambda(G, H, \Gamma, W)$ with a convex polytopal window $W$ and $G$ and $H$ two-step nilpotent homogeneous Lie groups. Assume that the stabilizer of the hyperplanes which bound the window are trivial. Then the complexity grows asymptotically as $p(r) \asymp r^{\homdim(G) \cdot \dim(H)}$. \end{Theorem} \subsection{Method of proof} The proof of the main theorem consists of four steps. The first three are similar to the Euclidean case, while the forth one is uses different techniques.\par\medskip First we will establish the connection between the equivalence classes of patches and the acceptance domains in \cref{Sec:Complexity}. This is a translation from the Euclidean case considered in \cite{KoivusaloWalton2}. The only difference is that we have to be a bit more careful since our groups are in general non-abelian. The established result is the same as in the Euclidean case. \begin{Theorem}[Acceptance domains vs. Equivalence classes] Let $\Lambda(G,H, \Gamma, W)$ be a model set, with $G, H$ lcsc groups, $\Gamma \subset G \times H$ a uniform lattice and $W\subset H$ a non-empty, pre-compact, $\Gamma$-regular window, then \begin{equation*} A_r^H(\lambda) \subset \left(\bigcap_{\mu \in \cS_r(\lambda)}\mu \mathring{W}\right)\cap \left(\bigcap_{\mu \in \cS_r(\lambda)^\mathrm{C}} \mu W^\mathrm{C} \right)=:W_r(\lambda). \end{equation*} The $W_r(\lambda)$ are called \textit{$r$-acceptance domains of $\lambda$}. Further for $\lambda \not\sim_r \lambda'$ we have \begin{equation*} W_r(\lambda) \cap W_r(\lambda') = \emptyset. \end{equation*} Finally we have \begin{equation*} \overline{W} = \bigcup_{\lambda \in A_r^G}\overline{W_r(\lambda)}. \end{equation*} \end{Theorem} We will give the precise definition of $\cS_r(\lambda)$ in \cref{Sec:Complexity} and of $A_r^H(\lambda)$ in \cref{Def:Pre}. For now think of $A_r^H(\lambda)$ as the projection of all the points in the equivalence class of $\lambda$ to $H$. Further $\cS_r(\lambda)$ and $\cS_r^\mathrm{C}(\lambda)$ are roughly speaking a decomposition of the possible neighbours of $\lambda$ projected to $H$.\par\medskip As a second step we establish a lattice point counting argument in \cref{Sec:Lattice}. \begin{Proposition}[Growth Lemma] Let $G$ and $H$ be lcsc groups. For a model set $\Lambda(G,H, \Gamma, W)$ with a uniform lattice $\Gamma \subset G \times H$ and a bounded open set ${\emptyset \neq A \subset H}$. The asymptotic growth of the number of lattice points inside $B^G_r(e) \times A$ is bounded by \begin{equation*} \mu_G\left(B^G_{r-k_1}(e)\right) \ll \left\vert(B^G_r(e)\times A)\cap \Gamma \right\vert \ll \mu_G\left(B^G_{r+k_2}(e)\right), \end{equation*} for some constants $k_1,k_2>0$ as $r \to \infty$. \end{Proposition} \begin{Remark} For the asymptotic behaviour we use the common notation $g(t) \ll f(t)$ which means $\limsup\limits_{t \to \infty}\left\vert \frac{g(t)}{f(t)} \right\vert < \infty$. If both $g(t) \ll f(t)$ and $g(t) \gg f(t)$ holds we write $g(t) \asymp f(t)$. \end{Remark} This result connects the number of lattice points in sets of a certain form to the measure of these sets. The standard proof is via ergodic theory, but we will give a more elementary proof.\par\medskip In the third step we show in \cref{Sec:Growth} that we can estimate the number of acceptance domains by extending the boundary of the window. This is also done in \cite{KoivusaloWalton2}, the new regions inside the window are called \textit{cut regions} in this paper. Again the result we obtain is the same as in the Euclidean case. But we have to overcome a major difference in its proof, since in the Euclidean case the group acts on a hyperplane by translations, which preserves the directions of the hyperplane. Or formulated differently, a translation does not rotate a hyperplane. In our general setup the group action can rotate hyperplanes, leading to a new phenomenon.\par \begin{Theorem}[Cut regions vs. Acceptance domains] For a polytopal model set $\Lambda(G,H,\Gamma,W)$, where the window is bounded by $P_1,...,P_N$, and $G$ and $H$ at most two-step nilpotent homogeneous Lie groups we have \begin{equation*} \left\vert A_r^H\right\vert \leq \# \pi_0 \left( H \setminus \bigcup_{\mu\in\cS_r} \bigcup_{i=1}^N \mu P_i\right). \end{equation*} And for a certain ball $B_h(c_W)\subset W$ we also have \begin{equation*} \# \pi_0 \left( B_h(c_W) \setminus \bigcup_{i=1}^N \bigcup_{\mu \in U_i(r)} \mu P_i \right) \leq \left\vert A_r^H \right\vert. \end{equation*} \end{Theorem} The last step is devoted to solving the problem with the rotating hyperplanes see \cref{Sec:Combinatorics}. To do so we use the theory of hyperplane arrangements; this tool was not needed in the abelian case. The research of such arrangements has a long history and goes back as far as \cite{Schlafli}, more modern approaches are due to Grünbaum \cite{Grunbaum1, Grunbaum2, Grunbaum3} and Zaslavsky \cite{Zaslavsky}. Two sources for a survey of the field are the book by Dimca, \cite{Dimca}, and the lecture notes by Stanley, \cite{Stanley}. An important tool for our combinatorial argument is by Beck, \cite{Beck}. We need a special version of Beck's theorem. To prove this version of Beck's theorem we need some combinatorial inputs from \cite{Szekely} and \cite{SzemerediTrotter}. This lets us extend the standard counting formulas to our specific context and we can proof the following theorem, which will then finish the proof of the main theorem. \begin{Theorem}[Higher dimensional local dual of Beck's Theorem] Let $\cH$ be a hyperplane arrangement in $\RR^d$, let $B \subset \RR^d$ be convex and let $c_d$ be a constant only depended on the dimension. Further let $\cH$ consist of $d$ families $F_1,...,F_d$ with $\vert F_i \vert = \frac{n}{d}$ and such that for all $(f_1,...,f_d) \in F_1 \times ... \times F_d$ we have $B \cap \bigcap_{i=1}^d f_i = \{p\}$ for some point $p \in B$. Moreover assume that there is $c< \frac{1}{100}$ such that at most $c \cdot \vert F_i \vert$ hyperplanes from $F_i$ can intersect in one point. Then the number of intersection points in $B$ exceeds $c_d \cdot n^d$, i.e. $\vert F_{0,B} \vert \geq c_d \cdot n^d$. \end{Theorem} \subsection{General results for lcsc groups} As discussed above our restriction to homogeneous Lie groups is necessary for the proof of the main theorem, but the individual steps work in greater generality. Acceptance domains and cut regions can be defined for all connected lcsc groups, the polytopal condition on the window is not needed for this approach. \subsection{Notation} Throughout this text, $G$ and $H$ will always be locally compact second countable groups, $\Gamma \subset G \times H$ a uniform lattice and $W \subset H$ a precompact $\Gamma$-regular, i.e. $\partial W \cap \pi_H(\Gamma) = \emptyset$, subset with nonempty interior, which is called the \textit{window}. Why these restrictions on $W$ are required will be explained in \cref{Prop:CPSFLCrelativelydense} below. Further we denote the projection on $G$ by $\pi_G$ and on $H$ by $\pi_H$. \begin{Definition} A triple $(G,H, \Gamma)$ is called a \textit{cut-and-project scheme (CPS)} if $\pi_G\vert_\Gamma$ is injective and $\pi_H(\Gamma)$ is dense in $H$. The set \begin{equation*} \Lambda = \Lambda(G, H, \Gamma, W) := \pi_G((G \times W) \cap \Gamma) = \tau^{-1}(\Gamma_H \cap W) \end{equation*} is called a \textit{model set} if $(G,H, \Gamma)$ is a cut-and-project scheme. Here we use the notation $\Gamma_H := \pi_H(\Gamma)$ and $\tau:= \pi_H \circ (\pi_G \vert_\Gamma)^{-1}$. \end{Definition} \begin{Remark} Observe that we do not consider non-uniform model sets, so in our terminology of a model set the lattice is always uniform. \end{Remark} We will always put a $G$, resp. $H$ in the index if we consider the projection of an object to the factor $G$, resp. $H$. The diagram in \cref{Fig:CPS} visualizes the relation between the different groups in this setup.\par\medskip \begin{figure}[h] \centering \captionsetup{width=0.45\linewidth} \begin{tikzpicture}[descr/.style={fill=white,inner sep=2.5pt}] \matrix (m) [matrix of math nodes, row sep=3em, column sep=5em, text height=2ex, text depth=0.5ex] { G & G \times H & H \\ \Gamma_G & \Gamma & \Gamma_H\\ }; \path[thick, ->,font=\scriptsize] (m-1-2) edge node[above] {$\pi_G$} (m-1-1) edge node[above] {$\pi_H$}(m-1-3) (m-2-2) edge node[below] {${\pi_H}\vert_\Gamma$} (m-2-3); \path[thick,right hook->,font=\scriptsize] (m-2-1) edge (m-1-1) (m-2-2) edge (m-1-2) (m-2-3) edge (m-1-3); \path[thick,left hook->,font=\scriptsize] (m-2-2) edge node[below] {${\pi_G}\vert_\Gamma$} node[above] {$\cong$} (m-2-1); \draw[->] (m-2-1) -- ++ (0,-1) -- node[descr] {$\tau$} ++ (7,0) -- ++ (0,3) -- (m-1-3); \node at (-1.5,2) {$G$-side}; \node at (1.5,2) {$H$-side}; \draw[-, dashed] (0, 2.5) -- (0, 1.2); \draw[-, dashed] (0, -1.2) -- (0, -1.8); \draw[-, dashed] (0, -2.2) -- (0, -2.5); \end{tikzpicture} \caption{Visualisation of a CPS.} \label{Fig:CPS} \end{figure} To study complexity we need a metric, which will always be associated to a norm by $d(x,y):= \vert x y^{-1}\vert$. In the case of homogeneous Lie groups, we have a homogeneous norm on $G$ and $H$, which we will denote by $\vert \cdot \vert_G$, $\vert \cdot \vert _H$, if it is clear which norm we consider we drop the index. \begin{Definition}\textbf{(Delone)} Let $X$ be a metric space, a subset $\Lambda \subset X$ is called \textit{$(R,r)$-Delone} if: \begin{enumerate} \item It is \textit{$R$-relatively dense}, i.e. \begin{equation*} \exists R>0\, \forall x \in X : B_R(x) \cap \Lambda \neq \emptyset. \end{equation*} \item It is \textit{$r$-uniformly discrete}, i.e. \begin{equation*} \exists r>0\, \forall \lambda, \mu \in \Lambda: d(\lambda,\mu) \geq r. \end{equation*} \end{enumerate} If one is not interested in the parameters $R$ and $r$ one simply speaks of a \textit{Delone set}. \end{Definition} \begin{Remark} By \cref{Prop:CPSFLCrelativelydense} model sets are Delone sets. \end{Remark} \begin{Definition}[Pre-Acceptance domains]\label{Def:Pre} Let $\Lambda(G,H, \Gamma, W)$ be a model set. The image of $A_r^G(\lambda)$ under the map $\tau$ is called the \textit{$r$-pre-acceptance domain of $\lambda$} \begin{equation*} A_r^H(\lambda):=\tau\left(A_r^G(\lambda)\right) \subset H. \end{equation*} We denote the set of all pre-acceptance domains by $A_r^H$. \end{Definition} \section{How to measure complexity?}\label{Sec:Complexity} In this section we will explain, how one can determine the complexity function of a given FLC set, e.g. a model set $\Lambda(G,H, \Gamma,W)$, where $G=(G,d_G)$ and $H=(H,d_H)$ are metric locally compact second countable groups. The statements in this section are translations from the euclidean case, we refer to the paper of Koivusalo and Walton, \cite{KoivusaloWalton2}, for this set-up. We will take a closer look on how the complexity function $p$ is connected to local constellations, which are called \textit{patches}, in the FLC set. We will then establish a connection between these patches and certain regions in the window $W$.\par\medskip \begin{Definition}[Finite local complexity]\label{Rem:FLC} Let $\Lambda \subset G$ be a FLC subset. If the complexity function of $\Lambda$ is finite for all $r$ we say that $\Lambda$ has \textit{finite local complexity}. There are several ways of viewing this condition, compare \cref{FLC_Lemma}. \end{Definition} \begin{Theorem}[Acceptance domains]\label{Thm:AcceptanceDomains} Let $\Lambda(G,H, \Gamma, W)$ be a model set, with $G, H$ lcsc groups, $\Gamma \subset G \times H$ a uniform lattice and $W\subset H$ a non-empty, pre-compact, $\Gamma$-regular window, then \begin{equation}\label{Thm:AcceptanceDomainsEq1} A_r^H(\lambda) \subset \left(\bigcap_{\mu \in \cS_r(\lambda)}\mu \mathring{W}\right)\cap \left(\bigcap_{\mu \in \cS_r(\lambda)^\mathrm{C}} \mu W^\mathrm{C} \right)=:W_r(\lambda), \end{equation} where $\cS_r(\lambda)$ will be defined in \cref{Def:Slab}. The $W_r(\lambda)$ are called \textit{$r$-acceptance domains of $\lambda$}. Further for $\lambda \not\sim_r \lambda'$ we have \begin{equation}\label{Thm:AcceptanceDomainsEq2} W_r(\lambda) \cap W_r(\lambda') = \emptyset. \end{equation} Finally we have \begin{equation}\label{Thm:AcceptanceDomainsEq3} \overline{W} = \bigcup_{\lambda \in A_r^G}\overline{W_r(\lambda)}. \end{equation} \end{Theorem} \begin{Remark} The terminology is due to Koivusalo and Walton, \cite{KoivusaloWalton2}. In their paper they treat the case of model sets $\Lambda(\RR^d,\RR^n,\Gamma, W)$ and extend Julien's paper, \cite{Julien}, which first introduced the idea of considering a decomposition of the window. \end{Remark} \begin{Corollary} $p(r)=\vert \{W_r(\lambda) \mid \lambda \in \Lambda\}\vert=\left\vert A_r^H\right\vert$. \end{Corollary} The rest of the section is devoted to the proof of the theorem and we begin by working towards the definition of $S_r(\lambda)$. \begin{Definition}[Displacements] Let $\Lambda$ be a CPS. We define the displacements of $\lambda \in \Lambda$ as \begin{equation*} \mathrm{Disp}(\lambda):=\{\mu \in \Gamma_G \mid \mu \lambda \in \Lambda\}. \end{equation*} \end{Definition} \begin{Lemma}[{\cite[Lemma 2.1]{KoivusaloWalton2}}]\label{Lem:WindowShift} Let $\lambda \in \Lambda$ and $\mu \in G$, if $\mu \lambda \in \Lambda$ then $\mu \in \Gamma_G$. On the other hand if $\mu \in \Gamma_G$: \begin{equation*} \mu \lambda \in \Lambda \;\Leftrightarrow\; \tau(\lambda) \in \tau(\mu)^{-1}\mathring{W} \;\Leftrightarrow\; \tau(\mu) \in \mathring{W}\tau(\lambda)^{-1}. \end{equation*} In particular $\tau(\mathrm{Disp}(\lambda)) \subset \mathring{W}\mathring{W}^{-1}$. \begin{proof} Since $\lambda, \mu \lambda \in \Lambda$ we find elements $\gamma, \delta \in \Gamma$ such that $\pi_G(\gamma)=\lambda$, $\pi_G(\delta)=(\mu \lambda)^{-1}$. Then $\gamma \delta \in \Gamma$ and $\pi_G(\gamma \delta)= \lambda (\mu \lambda)^{-1} = \mu^{-1} \in \Gamma_G$ and therefore $\mu \in \Gamma_G$.\par Now let $\mu \in \Gamma_G$. By definition $\mu \lambda \in \Lambda$ if and only if $\tau(\mu \lambda) \in \mathring{W}$ and since $\tau$ is a homomorphism this is equivalent to $\tau(\mu) \in \mathring{W} \tau(\lambda)^{-1}$ and $\tau(\lambda) \in \tau(\mu)^{-1}\mathring{W}$. \end{proof} \end{Lemma} To understand patches on the $H$-side of the model set we transport the information of the displacements to this side. Since we always consider patches in dependence of $r$ we only need displacements of magnitude at most $r$. \begin{Definition}[$r$-slab]\label{Def:Slab} Let $\Lambda(G,H,\Gamma,W)$ be a model set. We define the \textit{$r$-slab} as \begin{equation*} \cS_r := \pi_H\left(\left\{(\gamma,\mu) \in \Gamma \,\middle\vert\, \vert \gamma \vert <r \text{ and } \mu \in WW^{-1} \right\}\right). \end{equation*} Further in the case that we only are interested in the displacements of a certain equivalence class we define the \textit{$r$-slab of $\lambda$} as \small\begin{equation*} \cS_r(\lambda) := \pi_H\left(\left\{(\gamma,\mu) \in \Gamma \,\middle\vert\, \vert \gamma \vert <r \text{ and } \mu \in WW^{-1} \text{ and } \gamma^{-1} \in \mathrm{Disp}(\lambda) \right\}\right) \end{equation*}\normalsize and \small\begin{equation*} \cS_r^\mathrm{C}(\lambda) := \pi_H\left(\left\{(\gamma,\mu) \in \Gamma \,\middle\vert\, \vert \gamma \vert <r \text{ and } \mu \in WW^{-1} \text{ and } \gamma^{-1} \notin \mathrm{Disp}(\lambda) \right\}\right). \end{equation*}\normalsize \end{Definition} \begin{Remark} In the paper of Koivusalo and Walton the sets $S_r(\lambda)$ and $S_r^\mathrm{C}(\lambda)$ are called $P_{in}$ and $P_{out}$, we think that our notation highlights the connection to the slab in a better way. Whereas their notation highlights the connection to the patch $P$. \end{Remark} \begin{figure} \centering \captionsetup{width=0.75\linewidth} \begin{tikzpicture} \draw[->] (-1.5,0) -- (10,0); \node at (10,-0.3) {$G$}; \draw[->] (0,-1.5) -- (0,5); \node at (-0.3,5) {$H$}; lldraw [gray] (-1.8284+1/2, 3.8284+1/2) circle (2pt) (-0.4142-1.4142/2, 2.4142+1.4142/2) circle (2pt) (-0.4142-0.4142/2, 2.4142+2.4142/2) circle (2pt) (-0.4142-0.4142/2+1/2, 2.4142+3.4142/2) circle (2pt) (-1.4142, 1.4142) circle (2pt) (-1.4142+1/2, 1.4142+1/2) circle (2pt) (-0.4142, 2.4142) circle (2pt) (-0.4142+1/2, 2.4142+1/2) circle (2pt) ( 0.5858, 3.4142) circle (2pt) ( 0.5858+1/2, 3.4142+1/2) circle (2pt) ( 1.5858, 4.4142) circle (2pt) (-2.4142/2, 0.4142/2) circle (2pt) (-1.4142/2, 1.4142/2) circle (2pt) (-0.4142/2, 2.4142/2) circle (2pt) ( 0.5858/2, 3.4142/2) circle (2pt) ( 1.5858/2, 4.4142/2) circle (2pt) ( 2.5858/2, 5.4142/2) circle (2pt) ( 3.5858/2, 6.4142/2) circle (2pt) ( 4.5858/2, 7.4142/2) circle (2pt) ( 5.5858/2, 8.4142/2) circle (2pt) (-1,-1) circle (2pt) (-1/2,-1/2) circle (2pt) (0,0) circle (2pt) (1/2,1/2) circle (2pt) (1,1) circle (2pt) (3/2,3/2) circle (2pt) (2,2) circle (2pt) (5/2,5/2) circle (2pt) (3,3) circle (2pt) (7/2,7/2) circle (2pt) (4,4) circle (2pt) (1.4142,-1.4142) circle (2pt) (1.4142+1/2,-1.4142+1/2) circle (2pt) (2.4142,-0.4142) circle (2pt) (2.4142+1/2,-0.4142+1/2) circle (2pt) (3.4142, 0.5858) circle (2pt) (3.4142+1/2, 0.5858+1/2) circle (2pt) (4.4142, 1.5858) circle (2pt) (4.4142+1/2, 1.5858+1/2) circle (2pt) (5.4142, 2.5858) circle (2pt) (5.4142+1/2, 2.5858+1/2) circle (2pt) (6.4142, 3.5858) circle (2pt) (6.4142+1/2, 3.5858+1/2) circle (2pt) (0.4142/2,-2.4142/2) circle (2pt) (1.4142/2,-1.4142/2) circle (2pt) (2.4142/2,-0.4142/2) circle (2pt) (3.4142/2, 0.5858/2) circle (2pt) (4.4142/2, 1.5858/2) circle (2pt) (5.4142/2, 2.5858/2) circle (2pt) (6.4142/2, 3.5858/2) circle (2pt) (7.4142/2, 4.5858/2) circle (2pt) (8.4142/2, 5.5858/2) circle (2pt) (9.4142/2, 6.5858/2) circle (2pt) (10.4142/2, 7.5858/2) circle (2pt) (11.4142/2, 8.5858/2) circle (2pt) (4.8284-1.4142/2-1,-0.8284+1.4142/2-1) circle (2pt) (4.8284-1.4142/2-1/2,-0.8284+1.4142/2-1/2) circle (2pt) (4.8284-1.4142/2,-0.8284+1.4142/2) circle (2pt) (4.8284-1.4142/2+1/2,-0.8284+1.4142/2+1/2) circle (2pt) (4.8284-1.4142/2+2/2,-0.8284+1.4142/2+2/2) circle (2pt) (4.8284-1.4142/2+3/2,-0.8284+1.4142/2+3/2) circle (2pt) (4.8284-1.4142/2+4/2,-0.8284+1.4142/2+4/2) circle (2pt) (4.8284-1.4142/2+5/2,-0.8284+1.4142/2+5/2) circle (2pt) (4.8284-1.4142/2+6/2,-0.8284+1.4142/2+6/2) circle (2pt) (4.8284-1.4142/2+7/2,-0.8284+1.4142/2+7/2) circle (2pt) (4.8284-1.4142/2+8/2,-0.8284+1.4142/2+8/2) circle (2pt) (4.8284-1/2,-0.8284-1/2) circle (2pt) (4.8284,-0.8284) circle (2pt) (4.8284+1/2,-0.8284+1/2) circle (2pt) (5.8284, 0.1716) circle (2pt) (5.8284+1/2, 0.1716+1/2) circle (2pt) (6.8284, 1.1716) circle (2pt) (6.8284+1/2, 1.1716+1/2) circle (2pt) (7.8284, 2.1716) circle (2pt) (7.8284+1/2, 2.1716+1/2) circle (2pt) (8.8284, 3.1716) circle (2pt) (8.8284+1/2, 3.1716+1/2) circle (2pt) (9.8284, 4.1716) circle (2pt) (7.2426-1.4142/2-1/2, -1.2426+1.4142/2-1/2) circle (2pt) (7.2426-1.4142/2, -1.2426+1.4142/2) circle (2pt) (7.2426-1.4142/2+1/2, -1.2426+1.4142/2+1/2) circle (2pt) (7.2426-1.4142/2+2/2, -1.2426+1.4142/2+2/2) circle (2pt) (7.2426-1.4142/2+3/2, -1.2426+1.4142/2+3/2) circle (2pt) (7.2426-1.4142/2+4/2, -1.2426+1.4142/2+4/2) circle (2pt) (7.2426-1.4142/2+5/2, -1.2426+1.4142/2+5/2) circle (2pt) (7.2426, -1.2426) circle (2pt) (7.2426+1/2, -1.2426+1/2) circle (2pt) (8.2426, -0.2426) circle (2pt) (8.2426+1/2, -0.2426+1/2) circle (2pt) (9.2426, 0.7574) circle (2pt) ; \node at (9.6,4.3) {$\textcolor{gray}{\Gamma}$}; \draw[color=green] (-1.5,3.4) -- (10,3.4); \draw[color=green] (-1.5,2.4) -- (10,2.4); \draw[color=green, line width=2] (0,2.4) -- (0,3.4); ll[fill=green!10, opacity=0.2] (-1.5,3.4) -- (10,3.4) -- (10,2.4) -- (-1.5,2.4) -- cycle; lldraw [green] (-0.4142-1.4142/2, 2.4142+1.4142/2) circle (1pt) (-0.4142, 2.4142) circle (1pt) (-0.4142+1/2, 2.4142+1/2) circle (1pt) ( 2.5858/2, 5.4142/2) circle (1pt) ( 3.5858/2, 6.4142/2) circle (1pt) (5/2,5/2) circle (1pt) (3,3) circle (1pt) (8.4142/2, 5.5858/2) circle (1pt) (9.4142/2, 6.5858/2) circle (1pt) (5.4142, 2.5858) circle (1pt) (5.4142+1/2, 2.5858+1/2) circle (1pt) (4.8284-1.4142/2+6/2,-0.8284+1.4142/2+6/2) circle (1pt) (4.8284-1.4142/2+7/2,-0.8284+1.4142/2+7/2) circle (1pt) (7.8284+1/2, 2.1716+1/2) circle (1pt) (8.8284, 3.1716) circle (1pt) ; \node at (0.3,2.6) {$\textcolor{green}{W}$}; lldraw[blue] (-0.4142-1.4142/2, 0) circle (1pt) (-0.4142, 0) circle (1pt) (-0.4142+1/2, 0) circle (1pt) ( 2.5858/2,0) circle (1pt) ( 3.5858/2, 0) circle (1pt) (5/2,0) circle (1pt) (3,0) circle (1pt) (8.4142/2, 0) circle (1pt) (9.4142/2, 0) circle (1pt) (5.4142, 0) circle (1pt) (5.4142+1/2, 0) circle (1pt) (4.8284-1.4142/2+6/2,0) circle (1pt) (4.8284-1.4142/2+7/2,0) circle (1pt) (7.8284+1/2, 0) circle (1pt) (8.8284,0) circle (1pt) ; \node at (4.2,-0.3) {$\textcolor{blue}{\Lambda}$}; \draw[color=red] (-1.5,1) -- (1.5,1); \draw[color=red] (-1.5,1) -- (-1.5,-1); \draw[color=red] (1.5,-1) -- (1.5,1); \draw[color=red] (1.5,-1) -- (-1.5,-1); ll[fill=red!10, opacity=0.2] (-1.5,-1) -- (-1.5,1) -- (1.5,1) -- (1.5,-1) -- cycle; lldraw[red] (-1,-1) circle (1pt) (-1/2,-1/2) circle (1pt) (0,0) circle (1pt) (1/2,1/2) circle (1pt) (1,1) circle (1pt) (-2.4142/2, 0.4142/2) circle (1pt) (-1.4142/2, 1.4142/2) circle (1pt) (1.4142/2,-1.4142/2) circle (1pt) (2.4142/2,-0.4142/2) circle (1pt) ; \node at (0.3,-1.6) {\textcolor{red}{$r$-slab}}; \end{tikzpicture} \caption{This figure shows the preimage of the slab for a fixed $r$ in the setting of a $\RR \times \RR$ model set.} \label{Fig:Slab} \end{figure}\noindent \vspace{-1cm} \begin{Lemma}\label{Lem:Equiv} For $\lambda, \mu \in \Lambda$ we have that: $\lambda \sim_r \mu \Leftrightarrow \cS_r(\lambda)=\cS_r(\mu)$. \begin{proof} Assume $\lambda \sim_r \mu$, then $(B_r(\lambda) \cap \Lambda)\lambda^{-1}=(B_r(\mu) \cap \Lambda)\mu^{-1}$. Let $x \in S_r(\lambda)$ then there exists a $(\gamma, x)\in \Gamma$ such that \begin{align*} \gamma^{-1}\lambda &\in B_r(\lambda) \cap \Lambda\\ &\Leftrightarrow \gamma^{-1} \in (B_r(\lambda) \cap \Lambda)\lambda^{-1} = (B_r(\mu) \cap \Lambda)\mu^{-1}\\ &\Leftrightarrow \gamma^{-1}\mu \in B_r(\mu) \cap \Lambda. \end{align*} Therefore $x \in S_r(\mu)$.\par Now assume $\cS_r(\lambda)=\cS_r(\mu)$ and let $x \in (B_r(\lambda)\cap \Lambda)\lambda^{-1}$ then $\tau(x^{-1}) \in \cS_r(\lambda)=\cS_r(\mu)$ and this implies $x \in (B_r(\mu)\cap \Lambda)\mu^{-1}$. \end{proof} \end{Lemma} \begin{proof}[Proof of \cref{Thm:AcceptanceDomains}] \Cref{Lem:Equiv} tells us that for all $\lambda \in A_r^G(\lambda)$ the set \begin{equation*} W_r(\lambda):=\left(\bigcap_{\mu \in \cS_r(\lambda)}\mu \mathring{W}\right)\cap \left(\bigcap_{\mu \in \cS_r(\lambda)^\mathrm{C}} \mu W^\mathrm{C} \right) \end{equation*} is the same. So to prove \cref{Thm:AcceptanceDomainsEq1} it is enough to show $\tau(\lambda)\in W_r(\lambda)$. By the definition of the $r$-slab of $\lambda$ we have for all $\mu \in \cS_r(\lambda)$ that there is a $\mu_G \in \Gamma_G$ with $\tau(\mu_G)=\mu$ and $\mu_G^{-1}\lambda \in \Lambda$. Further \cref{Lem:WindowShift} tells us that $\tau(\lambda) \in \mu \mathring{W}$. For $\mu \in \cS_r^\mathrm{C}(\lambda)$ \cref{Lem:WindowShift} tells us that $\tau(\lambda) \not\in \mu W$, but this means that $\tau(\lambda) \in \mu W^\mathrm{C}$. So it follows that $\tau(\lambda)\in W_r(\lambda)$.\par \medskip Now let $\lambda \not\sim_r \lambda'$, so by \cref{Lem:Equiv} $S_r(\lambda) \neq S_r(\mu)$ and the disjointness of $W_r(\lambda)$ and $W_r(\lambda')$ follows by the same argument.\par\medskip Finally we show that the $\overline{W_r(\lambda)}$ tile the closure of the window $\overline{W}$. The inclusion ${\overline{W_r(\lambda)} \subseteq \overline{W}}$ is clear since $e_H \in \cS_r(\lambda)$ for all $\lambda \in \Lambda$ and all $r>0$. Since $\Gamma_H$ is dense in $W$ and $W_r(\lambda)$ is open we know that $\Gamma_H$ is dense in $W_r(\lambda)$. Since $A_r^H(\lambda)=\Gamma_H \cap W_r(\lambda)$ we know that $A_r^H(\lambda)$ is dense in $W_r(\lambda)$. Therefore the completion by sequences $\overline{A_r^H(\lambda)}^{seq}$ is the topological closure $\overline{W_r(\lambda)}^{top}$. Further since every $\gamma \in \Gamma_H \cap W$ has to belong to some $W_r(\lambda)$ we get that \begin{equation*} W \cap \Gamma_H= \bigcup_{\lambda \in A_r^G} A_r^H(\lambda). \end{equation*} Completion by sequences on both sides delivers \begin{equation*} \overline{W} = \bigcup_{\lambda \in A_r^G} \overline{A_r^H(\lambda)}^{seq} = \bigcup_{\lambda \in A_r^G} \overline{W_r(\lambda)}^{top}. \end{equation*} \end{proof} \begin{Remark} Taking the closure of the window in the theorem does not make a big difference, since by $\Gamma$-regularity there are no projected lattice points on the boundary. This also holds for the shifted window, since if $\gamma_1, \gamma_2 \in \Gamma_H$ with $\gamma_1 \in \gamma_2 W$, then $\gamma_2^{-1}\gamma_1 \in W$ in contradiction to the $\Gamma$-regularity of $W$. So for all acceptance domains $\partial W_{r}(\lambda) \cap \Gamma_H = \emptyset$. \end{Remark} \section{Lattice point counting}\label{Sec:Lattice} Before we begin with the actual proof of our main theorem we will establish the growth lemma, \cref{Prop:Growth}. The growth lemma tells us how to count points in sets of the form $(B_r(e) \times A) \cap \Gamma$. Notice that in particular the slab $\cS_r=\pi_H\left(\left(B_r(e)\times WW^{-1}\right)\cap \Gamma\right)$ is of this form. \begin{Proposition}[Growth Lemma]\label{Prop:Growth} Let $G$ and $H$ be lcsc groups. For a model set with a uniform lattice $\Gamma \subset G \times H$ and a bounded open set ${\emptyset \neq A \subset H}$. The asymptotic growth of the number of lattice points inside $B^G_r(e) \times A$ is bounded by \begin{equation*} \mu_G\left(B^G_{r-k_1}(e)\right) \ll \left\vert\left(B^G_r(e)\times A\right)\cap \Gamma \right\vert \ll \mu_G\left(B^G_{r+k_2}(e)\right), \end{equation*} for some constants $k_1,k_2>0$ as $r \to \infty$. \end{Proposition} The proof consists of the following well known proposition, see for example \cite[Lemma 7.4]{BaakeGrimm}, and the two subsequent lemmas. \begin{Proposition}\label{Lem:ProduktOffenKompakt} Let $G$ and $H$ be lcsc groups and $\Gamma \subset G \times H$ a uniform lattice, such that $\pi_H(\Gamma)$ is dense in $H$. Further let $U \subset H$ be an open non-empty set. Then there exists a compact set $K \subset G$ such that \begin{equation*} G \times H = (K \times U)\Gamma. \end{equation*} \begin{proof} Since $\Gamma$ is a uniform lattice in $G \times H$ there exists a compact set $C$ such that $G \times H= C\Gamma$. We can cover $C$ by the bigger compact set $\pi_G(C) \times \pi_H(C)$, which implies $ G \times H = (C_G \times C_H)\Gamma$. By density of $\pi_H(\Gamma)$ in $H$ we get a covering \begin{equation*} \bigcup_{ \gamma \in \Gamma} U\pi_H(\gamma) = H \supset C_H. \end{equation*} Since $C_H$ is compact we can choose a finite subcovering with finite $F\subset \Gamma$ such that \begin{equation*} \bigcup_{ \gamma \in F} U\pi_H(\gamma) \supset C_H. \end{equation*} Now let $z\in G \times H$ be arbitrary. By the choice of $C$ we find a $\gamma \in \Gamma$ such that ${z \gamma^{-1} \in C \subset C_G \times C_H}$. By our covering argument we find a $f \in F$ such that ${\pi_H(z \gamma^{-1}) \in U \pi_H(f)}$ and therefore $\pi_H(z \gamma^{-1} f^{-1}) \in U$. If we project the same element to $G$ we get \begin{equation*} \pi_G(z \gamma^{-1} f^{-1}) \in C_G \pi_G(F^{-1}) =:K. \end{equation*} Now $K$ is compact since $C_G$ is compact and $\pi_G(F^{-1})$ is finite. Putting things together we realise \begin{equation*} z = (z \gamma^{-1} f^{-1}) (f \gamma) \in (K \times U) \Gamma. \end{equation*} \end{proof} \end{Proposition} \begin{Lemma}\label{Lem:GrowthSrUpperGeneral} Let $G$ and $H$ be lcsc groups. For a model set with a uniform lattice $\Gamma \subset G \times H$ and a bounded open set ${\emptyset \neq A \subset H}$ the growth of the lattice points inside $B^G_r(e) \times A$ is asymptotically bounded from above by \begin{equation*} \left\vert(B^G_r(e)\times A)\cap \Gamma \right\vert \ll \mu_G(B^G_{r+k_2}(e)), \end{equation*} where $k_2$ is some constant as $r \to \infty$. \begin{proof} Since $\Gamma$ is a lattice it is uniformly discrete, therefore we find a constant $c_1$ such that $d(\gamma_1,\gamma_2)> c_1$ for all $\gamma_1 \neq \gamma_2 \in \Gamma$. If we halve the constant we get disjoint balls around the lattice points, i.e. $B^{G\times H}_{\frac{c_1}{2}}(\gamma_1) \cap B^{G\times H}_{\frac{c_1}{2}}(\gamma_2)=\emptyset$. \par\medskip Since $A$ is bounded we find a second constant $c_2$ such that $A \subset B_{c_2}^H(e)$ and that $B_{c_1}^{G\times H}(x)\subset G \times B^H_{c_2}(e)$ for every $x \in G\times A$. The norm in the product is given by the maximum of the norms of the components.\par The idea is that we build a set which contains not only the points of $(B^G_r(e)\times A)\cap \Gamma$, but also the balls around them. Then we can obtain an upper bound for the number of points in $(B^G_r(e)\times A) \cap \Gamma$ by estimating how often the thickened set of points could fit in this set via a volume estimate. Since by our choice of $\frac{c_1}{2}$ the balls do not overlap, we obtain that \begin{equation*} \sum_{\gamma \in (B^G_r(e)\times A) \cap \Gamma} \mu_{G \times H}\left(B^{G \times H}_{\frac{c_1}{2}}(\gamma)\right) \leq \mu_{G\times H}\left(B^G_{r+c_1}(e) \times B^H_{c_2}(e)\right). \end{equation*} We need the ''$+c_1$`` in the index so the set can also contain all the balls whose center lie close to the border of $B^G_r(e)$. The volume of a ball is independent of its center point, since the metric and the Haar-measure are right-invariant. Therefore we can write the inequality as \begin{equation*} \left\vert\left(B^G_r(e)\times A\right) \cap \Gamma\right\vert \cdot \mu_{G \times H}\left(B^{G \times H}_{\frac{c_1}{2}}(e)\right) \leq \mu_{G \times H}\left(B^G_{r+c_1}(e)\times B^H_{c_2}(e)\right). \end{equation*} We will now divide this equation by $\mu_{G \times H}\left(B^{G\times H}_{\frac{c_1}{2}}(e)\right)$, which is just a constant dependent on $c_1$ which we will denote by $c_1'$, and the constant $\mu_{H}\left(B^H_{c_2}(e)\right)$ will be denoted by $c_2'$. \begin{align*} \left\vert\left(B^G_r(e)\times A\right) \cap \Gamma\right\vert &\leq \frac{\mu_{G \times H}\left(B^G_{r+c_1}(e) \times B^H_{c_2}(e)\right)}{c_1'}\\ &=\frac{\mu_{G}\left(B^G_{r+c_1}(e)\right) \cdot \mu_{H}\left(B^H_{c_2}(e)\right)}{c_1'} = \frac{c_2'}{c_1'} \mu_G\left(B^G_{r+c_1}(e)\right). \end{align*} \end{proof} \end{Lemma} \begin{Lemma}\label{Lem:GrowthSrLowerGeneral} Let $G$ and $H$ be lcsc groups. For a model set with a uniform lattice $\Gamma \subset G \times H$ and a bounded open set ${\emptyset \neq A \subset H}$ the growth of the number of lattice points inside $B^G_r(e) \times A$ is asymptotically bounded from below by \begin{equation*} \left\vert\left(B^G_r(e)\times A\right)\cap \Gamma \right\vert \gg \mu_G\left(B^G_{r-k_1}(e)\right), \end{equation*} where $k_1$ is some constant as $r \to \infty$. \begin{proof} Let $\epsilon>0$ be fixed. We choose an open ball $B^H_\epsilon(\gamma_H) \subset A$ with $\gamma_H \in \Gamma_H$, this can be done since $\Gamma_H$ is dense in $H$ and $A$ is open and therefore $\Gamma_H \cap A \subset A$ is dense.\par First we assume that $\gamma_H = e$. By \cref{Lem:ProduktOffenKompakt} we find a compact set $K \subset G$ such that $ G \times H = \left(K \times B^H_\epsilon(e)\right)\Gamma$. Since $K$ is compact it is bounded, we can consider $\overline{B^G_{c_1}}(e)$, with $c_1$ large enough, instead. Then for all $z \in G \times H$ we see $\left(B^G_{c_1}(e) \times B^H_\epsilon(e)\right) z \cap \Gamma \neq \emptyset$. This holds true since we can write $z=(k_z,u_z)(\gamma_{zG},\gamma_{zH})$ with $(\gamma_{zG},\gamma_{zH}) \in \Gamma$, $k_z \in B^G_{c_1}(e)$ and $u_z \in B^H_\epsilon(e)$. But then \begin{equation*} (\gamma_{zG},\gamma_{zH}) = \left(k_z^{-1},u_z^{-1}\right) z \in \left(B^G_{c_1}(e) \times B^H_\epsilon(e)\right) z \cap \Gamma, \end{equation*} since $k_z^{-1} \in B^G_{c_1}(e)$ and $u_z^{-1} \in B^H_\epsilon(e)$.\par We can find a lower bound of the growth if we can fit enough of the sets of type ${\left(B^G_{c_1}(e)\times B^H_\epsilon(e)\right) z}$ into $B^G_r(e) \times A$ in a disjoint way. One should think of this as stacking these sets onto another with base $B^H_\epsilon(e)$. This comes down to \begin{align*} &\left\vert\left(B^G_r(e)\times A\right)\cap \Gamma \right\vert > \left\vert\left(B^G_r(e) \times B^H_\epsilon(e)\right)\cap \Gamma \right\vert\\ &\quad\geq \max \left\{ \vert X \vert \,\middle\vert\, X \subset G,\text{ such that } \forall x \in X: B^G_{c_1}(x) \subset B^G_r(e) \right.\\ &\quad\quad \quad \quad \left.\text{ and } B^G_{c_1}(x)\cap B^G_{c_1}(y) = \emptyset\,\, \forall x \neq y \in X \right\}\\ &\quad\geq \max \left\{ \vert X \vert \,\middle\vert\, X \subset B^G_{r-c_1}(e) \text{ and } X \text{ is } 2c_1\text{-uniformly discrete}\right\}. \end{align*} We can extend every $c_1$-uniformly discrete set to a $(c_2,c_1)$-Delone set for some constant $c_2$, \cite[Proposition 3.C.3]{Harpe}. Thus \small\begin{equation*} \left\vert\left(B^G_r(e)\times A\right)\cap \Gamma \right\vert > \max \left\{ \vert X \vert \,\middle\vert\, X \subset B^G_{r-c_1}(e) \text{ and } X \text{ is a } (c_2,2c_1)\text{-Delone subset of } B_r^G(e)\right\}. \end{equation*}\normalsize For every such Delone set we can cover $B^G_{r-c_1}(e)$ with balls $B^G_{c_2}(x)$ for $x\in X$, so that \begin{align*} \bigcup_{x \in X} B^G_{c_2}(x) \supset B^G_{r-c_1}(e) \Rightarrow \sum_{x\in X} \mu_{G}\left(B^G_{c_2}(x)\right) \geq \mu_G\left(B^G_{r-c_1}(e)\right). \end{align*} Since the metric and the Haar-measure are right-invariant all of these balls have the same measure and we get \begin{align*} \vert X \vert \cdot \mu_G\left(B^G_{c_2}(e)\right) \geq \mu_G\left(B^G_{r-c_1}(e)\right) \Leftrightarrow \vert X \vert \geq \frac{\mu_G\left(B^G_{r-c_1}(e)\right)}{\mu_G\left(B^G_{c_2}(e)\right)}. \end{align*} Summing up we have \begin{equation*} \left\vert\left(B^G_r(e)\times A\right)\cap \Gamma \right\vert > \frac{\mu_G\left(B^G_{r-c_1}(e)\right)}{\mu_G\left(B^G_{c_2}(e)\right)}. \end{equation*} \end{proof} \end{Lemma} \begin{Definition} Let $G$ be a locally compact group and let $d$ be a right-invariant metric on $G$ compatible with the topology on $G$. Then $G$ is a group with \textit{exact polynomial growth of degree $\kappa$ with respect to $d$} if there exists a constant $c>0$ such that \begin{equation*} \lim\limits_{r \to \infty} \frac{\mu_{G}(B_r(e))}{c r^\kappa} =1. \end{equation*} \end{Definition} \begin{Corollary} If $G$ is a group with exact polynomial growth of degree $\kappa$ with respect to $d$, then \begin{equation*} \left\vert\left(B^G_r(e)\times A\right)\cap \Gamma \right\vert \asymp r^\kappa. \end{equation*} \end{Corollary} \section{Homogeneous Lie groups}\label{Sec:HomLie} In this section we will review the basic concepts of homogeneous Lie groups, following the book by Fisher and Ruzhansky \cite{FischerRuzhansky}. In this context we also recall an ergodic theorem, due to Gorodnik and Nevo, \cite{GorodnikNevo1, GorodnikNevo2, NevoErgodic}, which will be recalled later. \begin{Definition}\textbf{\cite[Definition 3.1.7]{FischerRuzhansky}}\label{Def:HomLie} \begin{enumerate} \item A family of \textit{dilations} of a Lie algebra $\fg$ is a family of linear mappings $\{D_r, r>0\}$ from $\fg$ to itself which satisfies: \begin{enumerate}[label=\roman*)] \item The mappings are of the form \begin{equation*} D_r=\exp(\ln(r) A) = \sum_{l=0}^\infty \frac{1}{l !}(\ln(r)A)^l, \end{equation*} where $A$ is a diagonalisable linear operator on $\fg$ with positive eigenvalues, $\exp$ denotes the exponential of matrices and $\ln(r)$ the natural logarithm of $r>0$. \item Each $D_r$ is a morphism of the Lie algebra $\fg$, that is, a linear mapping from $\fg$ to itself which respects the Lie bracket, i.e. \begin{equation*} \forall X,Y \in \fg, r>0: [D_r X, D_r Y] = D_r[X,Y]. \end{equation*} \end{enumerate} \item A \textit{homogeneous} Lie group is a connected simply connected Lie group whose Lie algebra is equipped with a fixed family of dilations. \item We call the eigenvalues of $A$ the \textit{dilations' weights}, the sum of these weights is the \textit{homogeneous dimension of $\fg$}, denoted by $\homdim(\fg)$. \end{enumerate} \end{Definition} \begin{Convention} From now on we will always assume $G$ and $H$ to be homogeneous Lie groups. \end{Convention} \begin{Remark} Since every Lie algebra, which is equipped with dilations, is nilpotent, a homogeneous Lie group is nilpotent, \cite[Proposition 3.1.10.]{FischerRuzhansky}. This together with the connectedness and the simply connectedness implies that the exponential map is a global diffeomorphism, see \cite[Proposition 1.6.6.]{FischerRuzhansky}, which we use to identify the underlying sets of $G$ and $\fg$. On $\fg$ the group multiplication takes the form \begin{equation*} X \ast Y := \log(\exp(X)\exp(Y)). \end{equation*} The operation $\ast$ is called the Baker-Campbell-Hausdorff (BCH) multiplication, since to determine it we can use the BCH formula. The explicit formula for multiplication then is \begin{equation*} X \ast Y = X + \sum_{\substack{k,m \geq 0 \\ p_i+q_i \geq 0\\ i \in \{0,...,k\}}} (-1)^k \frac{{\ad_X}^{p_1} \circ {\ad_Y}^{q_1} \circ ... \circ {\ad_X}^{p_k} \circ {\ad_Y}^{q_k} \circ {\ad_X}^m }{(k+1)(q_1+...+q_k+1) \cdot p_1!\cdot q_1! \cdot ... \cdot p_k! \cdot q_k! \cdot m!}(Y). \end{equation*} In the case of a $n$-step nilpotent Lie group this sum is finite, since all terms there $m+\sum_i p_i + q_i \geq n$ are zero. The first few terms of the sum then look like \begin{equation*} X \ast Y = X + Y +\frac{1}{2}[X,Y] + \frac{1}{12} \big([X,[X,Y]]-[Y,[X,Y]]\big)- \frac{1}{24} [Y,[X,[X,Y]]] + ... \end{equation*} Observe that the inverse of $X$ in terms of the action $\ast$ is given by the additive inverse of $X$ in the Lie algebra, i.e. $X^{-1}= -X$.\par In particular the group law is a polynomial, see \cite[Proposition 1.6.6.]{FischerRuzhansky}, this means that for $x=(x_1,...,x_n)$ and $y=(y_1,...,y_n)$ we have \begin{equation*} x \ast y = (P_1(x,y),..., P_n(x,y)), \end{equation*} with $P_1,..., P_n$ polynomials in $2n$ variables, we will observe some restrictions for this polynomials in \cref{Appendix:Poly2}.\par \medskip Observe that we can transport the dilations of the Lie algebra to the Lie group via the exponential map, so that we also have a dilation structure on the Lie group itself. This results in a family of dilations of the form $D_r=\exp(A \ln(r))$ with a diagonalisable linear operator $A$. The eigenvalues of $A$ are the weights we mentioned above, in accordance with \cite{FischerRuzhansky} we denote this weights by $\nu_1,...,\nu_n$. The trace of $A$ gives us the \textit{homogeneous dimension of $G$}, the properties listed below explain this terminology.\par \medskip \end{Remark} \begin{Definition}[{\cite[Definition 3.1.33.]{FischerRuzhansky}}] A \textit{homogeneous quasi-norm} is a continuous non-negative function $G \to [0,\infty), x \mapsto \vert x \vert$ satisfying \begin{enumerate}[label=\roman*)] \item $\vert x^{-1}\vert = \vert x \vert$, \item $\vert D_r( x )\vert = r \cdot \vert x \vert$ $ \forall r>0$, \item $\vert x \vert =0$ if and only if $x=e$. \end{enumerate} It is called a \textit{homogenous norm} if additionally \begin{enumerate}[label=\roman*)] \setcounter{enumi}{3} \item for all $x,y \in G$ it is $\vert xy\vert \leq \vert x \vert + \vert y\vert$. \end{enumerate} \end{Definition} \begin{Lemma}[{\cite[Theorem 3.1.39.]{FischerRuzhansky}}]\label{Lem:HomNorm} If $G$ is a homogeneous Lie group, then there exists a homogeneous norm $\vert \cdot \vert$ on $G$. \end{Lemma} \begin{Lemma}[{\cite[Proposition 3.1.35.]{FischerRuzhansky}}] Any two homogeneous quasi-norms $\vert \cdot \vert$ and $\vert \cdot \vert'$ on $G$ are mutually equivalent, in the sense that there exists $a,b >0$ such that for all $g \in G$ it is $a \vert x \vert' \leq \vert x\vert \leq b \vert x\vert'$. \end{Lemma} \begin{Remark} To such a homogeneous norm we can associate the right invariant metric $d$ given by $d(x,y):= \vert x y^{-1}\vert$. \end{Remark} \begin{Lemma}[{\cite[Proposition 3.1.37.]{FischerRuzhansky}}]\label{Lem:HomLieTop} If $\vert \cdot \vert$ is a homogeneous quasi-norm on a homogeneous Lie group $G$ of dimension $n$, then the topology induced by the quasi-norm coincides with the Euclidean topology on the underlying set $\RR^n$. \end{Lemma} \begin{Proposition}[{\cite[Section 3.1.3 and 3.1.6]{FischerRuzhansky}}]\label{Prop:BallMeas} Let $G$ be a homogeneous Lie group and $\vert \cdot \vert$ a homogeneous norm on $G$ with associated right invariant metric $d$, then for $x,y \in G$ and $r,s >0$: \begin{enumerate}[label=\roman*)] \item $B_r(x) = \{y \in G \mid \vert y x^{-1}\vert < r\} = B_r(e) \cdot x$, \item $D_r(x y) = D_r(x) D_r(y)$, \item $D_r(B_s(e)) = B_{r\cdot s}(e)$, \item $D_r(B_s(x))=B_{r \cdot s}(D_r(x))$, \item $\mu_{G}(B_r(x))= r^{\homdim(G)}\cdot \mu_{G}(B_1(e))$. \end{enumerate} \end{Proposition} \begin{Remark} The fifth point of \cref{Prop:BallMeas} tells us that a homogeneous Lie group has exact polynomial growth of degree $\homdim(G)$. Since all homogeneous quasi-norms on $G$ are mutually equivalent the behaviour is independent from the choice of a metric. \end{Remark} \begin{Definition} A \textit{hyperplane} in a homogeneous Lie group $G$ is the image of a hyperplane in the Lie algebra $\fg$ under the exponential map. The set of all hyperplanes in $G$ is denoted by $\cH(G)$.\par A \textit{half-space} in a homogeneous Lie group $G$ is the image of a half-space in the Lie algebra $\fg$ under the exponential map. \end{Definition} \begin{Definition} A group $G$ is \textit{non-crooked} if $\cH(G) \subset \cP(G)$ is $G$ invariant. \end{Definition} \begin{Definition} A Lie group $G$ is called \textit{locally $k$-step nilpotent} if for all $X,Y \in \fg$ we have $\ad_X^k(Y) = 0$. \end{Definition} \begin{Theorem}\label{Thm:NonCrooked} Let $G$ be a homogenous Lie group, then the following are equivalent \begin{enumerate} \item $G$ is non-crooked, \item $G$ is $2$-step nilpotent or abelian, \item $G$ is locally $2$-step nilpotent. \end{enumerate} \end{Theorem} For a proof of the theorem see \cref{Appendix:Poly}. \begin{Remark} The notion of locally $k$-step nilpotent and $k$-step nilpotent are only equivalent for $k=1$ and $k=2$ for greater $k$ the notions are different. \end{Remark} \begin{Definition} We call a window \textit{polytopal} or of \textit{polytopal type} if it is the intersection of finitely many half-spaces. \end{Definition} \begin{Convention} If $W$ is polytopal, we can express $W$ as $\bigcap_{i=1}^N P_i^+$, where each $P_i^+$ is a half-space with opposite half-space $P_i^-$ and bounding hyperplane $P_i$. Further we denote the faces of $W$ by $\partial_i W=W \cap P_i$. We will in the rest of the paper always use this notation for the half spaces and hyperplanes associated to a window of polytopal type. \end{Convention} We give a small example which the reader can keep in mind, we will now fix the basics of this example so the reader can follow the steps in the paper. \begin{Example} We set $G=H=\HH$, where $\HH$ denotes the Heisenberg group. We view the underlying set of $\HH$ as $\RR^3$. Further we set \begin{equation*} \Gamma= \left\{(a,b,c, a^\ast,b^\ast,c^\ast) \in G \times H \,\middle\vert\, a,b,c \in \ZZ\left[\sqrt{2}\right]\right\} \end{equation*} and $W=[-\frac{1}{2},\frac{1}{2}]\times[-\frac{1}{2},\frac{1}{2}]\times[-\frac{1}{2},\frac{1}{2}]$, so it is clear that $W$ is non-empty, pre-compact and $\Gamma$-regular. Also $W$ is a polytope, in fact it is a cube.\par A dilation structure on $\HH$ is given by $D_r((x,y,z)) = \left(r \cdot x, r \cdot y , r^2 \cdot z\right)$, further a homogeneous norm is given by the Kor{\'a}nyi-Cygan norm $\vert (x,y,z)\vert _{\HH} = \left( (x^2+y^2)^2 + z^2 \right)^{\frac{1}{4}}$. \end{Example} \subsection{Ergodic theorems for homogeneous Lie groups}\label{SubSec:Ergodic} \begin{Definition}[{\cite[Definition 1.1.]{GorodnikNevo2}}] Let $O_\epsilon$, $\epsilon >0$ be a family of symmetric neighbourhoods of the identity in a lcsc group $G$, which are decreasing in $\epsilon$. Then a family of bounded Borel subsets of finite Haar-measure $(B_t)_{t>0}$ is \textit{well-rounded w.r.t. $O_\epsilon$} if for every $\delta >0$ there exists $\epsilon, t_1 >0$ such that for all $t \geq t_1$ \begin{equation*} \mu_G(O_\epsilon B_t O_\epsilon) \leq (1+\delta) \mu_G\left(\bigcap_{u,v \in O_\epsilon}u B_t v\right). \end{equation*} \end{Definition} In our setup we will always fix $O_\epsilon$ as $B_\epsilon(e)$, this does not make a difference by \cite[Remark 2.3.]{Yakov}. \begin{Definition}[{\cite[Definition 1.4 and 1.5]{GorodnikNevo2}}] Let $G$ be a lcsc group and $B_t$ a family of bounded Borel subsets of finite Haar-measure. And let $\beta_{G,B_t}$ be the operator \begin{equation*} \beta_{G,B_t}f(x) := \frac{1}{\mu_{G}(B_t)} \int_{B_t} f(g^{-1}x) d\mu_G(g) \end{equation*} for $f \in L^2(G)$. We say that the \textit{mean ergodic theorem in $L^2(G)$} holds if \begin{equation*} \left\vert\left\vert \beta_{G,B_t}f - \int_G f d\mu \right\vert\right\vert_{L^2(G)} \to 0 \text{ as } t \to \infty \end{equation*} for all $f \in L^2(G)$. We say that the \textit{stable mean ergodic theorem in $L^2(X)$} holds if the \textit{mean ergodic theorem in $L^2(G)$} holds for the sets \begin{equation*} B_t^+(\epsilon) = O_\epsilon B_t O_\epsilon \text{ and } B_t^-(\epsilon)=\bigcap_{u,v \in O_\epsilon}u B_t v, \end{equation*} for all $\epsilon \in (0, \epsilon_1)$ with $\epsilon_1 >0.$ \end{Definition} \begin{Remark} From now on we fix a Haar-measure $\mu_{G \times H}$, which we assume to be normalized by $\mu_{G \times H /\Gamma}(G \times H / \Gamma) = 1$. \end{Remark} \begin{Theorem}[{\cite[Theorem 1.7]{GorodnikNevo2}}]\label{Thm:Nevo} Let $G$ be a lcsc group, $\Gamma \subset G$ a discrete lattice subgroup and $(B_t)_{t>0}$ a well-rounded family of subsets of $G$. Assume that the averages $\beta_{G/\Gamma, B_t}$ supported on $B_t$ satisfy the stable mean ergodic theorem in $L^2(G/\Gamma)$. Then \begin{equation*} \lim\limits_{t \to \infty} \frac{\vert \Gamma \cap B_t\vert}{\mu_{G}(B_t)} = 1. \end{equation*} \end{Theorem} To apply this theorem, we have to show that the sets we consider are well-rounded and that they satisfy the stable means ergodic theorem. We will give criteria which ensure this. \begin{Lemma}\label{Lem:WellRoundedBalls} Let $G$ be a homogeneous Lie group and $(B_t(x))_{t>0}$ a family of balls in $G$. Then this family is well-rounded. \begin{proof} We have to show that for every $\delta>0$ there exists some $\epsilon, t_1 >0$ such that for all $t \geq t_1$ holds \begin{equation*} \mu_G(B_\epsilon(e) B_t(x) B_\epsilon(e)) \leq (1+\delta) \mu_G\left(\bigcap_{u,v \in B_\epsilon(e)}u B_t(x) v\right). \end{equation*} We first show that we can choose $\epsilon$ such that for a constant $k \in (0,t)$ we have $B_{t-k}(x) \subset \bigcap_{u,v \in B_\epsilon(e)}u B_t(x) v$. So let $g \in B_{t-k}(x)$, then we can write $g$ as $u u^{-1} g v^{-1} v$ with $u,v \in B_\epsilon(e)$ and we have to show that $u^{-1} g v^{-1} \in B_t(x)$. \begin{align*} d(x, u^{-1}g v^{-1}) &= \vert x v g^{-1} u\vert_G = \vert x v x^{-1} x g^{-1} u\vert_G\\ &\leq \vert x v x^{-1}\vert_G + \vert xg^{-1} \vert_G + \vert u\vert_G \leq c_x(\epsilon) + t-k + \epsilon. \end{align*} Here the last inequality holds by \cref{Cor:ConBall}. And we have to choose $\epsilon$ so small that $k > \epsilon + c_x(\epsilon)$, which is possible since $c(\epsilon) \to 0$ as $\epsilon \to 0$.\par On the other hand $B_\epsilon(e)B_t(x)B_\epsilon(e) \subset B_{\epsilon + t}(x)B_\epsilon(e)$. Let $y \in B_{t+\epsilon}(x)$ and $u \in B_\epsilon(e)$ then \begin{align*} d(x, yu) &= \vert x u^{-1} y^{-1}\vert_G = \vert x u^{-1} x^{-1} x y^{-1} \vert_G\\ &\leq \vert x u ^{-1} x^{-1}\vert_G + \vert x y^{-1} \vert_G \leq c_x(\epsilon) + \epsilon + t. \end{align*} Therefore $B_\epsilon(e)B_t(x)B_\epsilon(e) \subset B_{\epsilon + t + c_x(\epsilon)}(x)$. Therefore we can choose $\epsilon > 0$ and $k \in (0, t)$ such that \begin{equation*} B_\epsilon(e) B_t(x) B_\epsilon(e) \subset B_{\epsilon + t + c_x(\epsilon)}(x) \text{ and } B_{t-k}(x) \subset \bigcap_{u,v \in B_\epsilon(e)}u B_t(x) v \end{equation*} hold simultaneously. Now we can use that we can calculate the measure of balls in homogeneous Lie groups by \cref{Prop:BallMeas}: \begin{equation*} \mu_G(B_\epsilon(e) B_t(x) B_\epsilon(e)) \leq \mu_G(B_{\epsilon + t + c_x(\epsilon)}(x)) = (t+\epsilon+c_x(\epsilon))^{\homdim(G)} \mu_G(B_1(e)) \end{equation*} and \begin{equation*} (t-k)^{\homdim(G)} \mu_G(B_1(e)) \leq \mu_G(B_{t-k}(x)) \leq \mu_G \left(\bigcap_{u,v \in B_\epsilon(e)}u B_t(x) v\right). \end{equation*} Combining the arguments we see that we have to choose $\epsilon$, $k$ and $t_1$ such that for all $t > t_1$ \begin{equation*} \left(\frac{(t+\epsilon+c_x(\epsilon))}{(t-k)}\right)^{\homdim(G)} \leq (1+\delta). \end{equation*} \end{proof} \end{Lemma} \begin{Lemma} Let $G$ be a homogeneous Lie group and $(B_r(x))_{t>0}$ a constant family of balls in $G$. Then this family is well-rounded. \begin{proof} We have already seen in the proof of \cref{Lem:WellRoundedBalls} that $\bigcap_{u,v \in B_\epsilon(e)} u B_r(x) v$ contains a ball of the form $B_{r-k}(x)$ for any $k\in (0,t)$ if we choose $\epsilon$ accordingly. On the other hand $B_\epsilon(e)B_r(x)B_\epsilon(e)$ is contained in a ball $B_{r+\epsilon+c_x(\epsilon)}(x)$ and therefore has finite measure. Choosing $\epsilon$ accordingly we are done. \end{proof} \end{Lemma} Now what is left to show is that the stable mean ergodic theorem holds for our families of sets. To do so we use \cite[Theorem 3.33.]{Glasner} which tells us that we have to check that our families are F\o lner sequences. Alternatively see the work by Nevo, \cite{NevoErgodic}, especially step I in the proof of Theorem 5.1 is exactly what we need. \begin{Definition}\textbf{(F\o lner sequences)} Let $G$ be a lcsc group acting on a measure space $(X,\mu)$. A sequence $F_1, F_2, ...$ of subsets of finite, non-zero measure in $X$ is called a \textit{(right) F\o lner sequence} if for all $g \in G$ \begin{equation*} \lim\limits_{i \to \infty}\frac{\mu(F_i g \triangle F_i)}{\mu(F_i)} = 0. \end{equation*} \end{Definition} \begin{Lemma} Let $G \times H$ be a product of homogeneous Lie groups, $\Gamma \subset G \times H$ a lattice, $(B_t^G(e))_{t>0}$ a family of balls in $G$ and $(B_r^H(x))_{t>0}$ a constant family of balls in $H$. The family $(B_t^G(e) \times B_r^H(x))_{t>0}/\Gamma$ is a F\o lner sequence in $(G\times H)/ \Gamma$. \begin{proof} We use \cref{Lem:ProduktOffenKompakt}, which tells us that for every $r>0$ and every $x \in H$ we find a compact $K \subset G$ such that $(K \times B_r^H(x))\Gamma = G \times H$. And since every compact set $K$ is contained in some $B_t^G(e)$, for $t$ large enough, we get that $(B_t^G(e) \times B_r^H(x)) \Gamma = G \times H$. This also holds if we consider balls $B_t^G(g)$ instead of $B_t^G(e)$. Therefore we have for every $(g,h) \in G \times H$ that there exists a $t_{g,h} >0$ such that $(B_t^G(g) \times B_r^H(xh)) \Gamma = G \times H$ and so \begin{equation*} \lim\limits_{t \to \infty} \mu_{(G \times H)/\Gamma}((B_t^G(g) \times B_r^H(xh))_{(G\times H)/\Gamma} \triangle (B_t^G(e) \times B_r^H(x))_{(G\times H)/ \Gamma}) = 0 \end{equation*} for all $(g,h) \in G \times H$. \end{proof} \end{Lemma} \section{Growth of the complexity function}\label{Sec:Growth} In this section we proof our main theorem, modulo \cref{Thm:BeckInduction} which we will proof in \cref{Sec:Combinatorics}. \begin{Theorem}\label{MainTHM} Let $\Lambda(G,H, \Gamma, W)$ be a model set, where $G$ be a homogeneous Lie group and $H$ a non-crooked homogeneous Lie group. Further $W \subset H$ be a non-empty, precompact window of polytopal type with bounding hyperplanes $P_1,..., P_N$ such that $P_i$ has trivial stabilizer and that $P_i \cap \Gamma_H = \emptyset$ for all $i \in \{1,...,N\}$. Then for the complexity function $p$ of $\Lambda$ we have \begin{equation*} p(r) \asymp r^{\homdim(G)\dim(H)}. \end{equation*} \end{Theorem} Notice that the condition of $\Gamma$-regularity is hidden in the stronger condition that all the hyperplanes $P_i$ do not intersect $\Gamma_H$. The condition that the $P_i$ have trivial stabilizer simplifies the problem, since otherwise we had to address the effects of the stabilizer. The effect of a non trivial stabilizer is similar to the Euclidean case, for which we refer to \cite{KoivusaloWalton2}.\par \medskip The proof of the \cref{MainTHM} will be divided into establishing an upper bound and a lower bound. For the lower bound we have to put in a lot more effort. \subsection{Upper bound of the growth}\label{SubSec:UpperBound} For an upper bound we consider the decomposition of the window $W \setminus \bigcup_{x\in\cS(r)} x\partial W$. We will show that counting the connected components is an upper bound for the number of acceptance domains. Then we will use the theory of real hyperplane arrangements which gives us an upper bound for our counting. \begin{Lemma} For a model set $\Lambda(G,H,\Gamma,W)$ we have \begin{equation*} \left\vert A_r^H\right\vert \leq \#\pi_0 \left( W \setminus \bigcup_{\mu\in\cS_r} \mu\partial W\right). \end{equation*} \begin{proof} By \cref{Thm:AcceptanceDomains} we know that the acceptance domains $W_r(\lambda)$ tile the window $W$ and that they are disjoint. Further for every $A_r(\lambda)$ we know that \begin{equation*} A_r(\lambda) \subset W_r(\lambda). \end{equation*} So \begin{align*} \partial W_r(\lambda) = &\partial \left( \left(\bigcap_{\mu \in \cS_r(\lambda)}\mu \mathring{W}\right)\cap \left(\bigcap_{\mu \in \cS_r(\lambda)^\mathrm{C}} \mu W^\mathrm{C} \right) \right)\\ &\subset \left(\bigcup_{\mu \in \cS_r(\lambda)}\mu \partial \mathring{W}\right) \cup \left(\bigcup_{\mu \in \cS_r(\lambda)^\mathrm{C}} \mu \partial W^\mathrm{C} \right) = \bigcup_{\mu \in S_r} \mu \partial W. \end{align*} Therefore every connected component of $ W \setminus \bigcup_{\mu\in\cS_r} \mu\partial W$ is contained in some $W_r(\lambda)$, so \begin{equation*} \left\vert A^H_r\right\vert = \vert W_r \vert \leq \#\pi_0 \left( W \setminus \bigcup_{\mu\in\cS_r} \mu\partial W\right). \end{equation*} \end{proof} \end{Lemma} \begin{Lemma} For a model set $\Lambda(G,H,\Gamma, W)$ with polytopal window $W \subset H$ we have \begin{equation*} \#\pi_0 \left( W \setminus \bigcup_{\mu\in\cS_r} \mu\partial W\right) \leq \# \pi_0 \left( H \setminus \bigcup_{\mu\in\cS_r} \bigcup_{i=1}^N \mu P_i\right). \end{equation*} \begin{proof} Since $\bigcup_{\mu\in\cS_r} \mu\partial W \subset \bigcup_{\mu\in\cS_r} \bigcup_{i=1}^N \mu P_i$ we have \begin{equation*} \#\pi_0 \left( W \setminus \bigcup_{\mu\in\cS_r} \mu\partial W\right) \leq \# \pi_0 \left( W \setminus \bigcup_{\mu\in\cS_r} \bigcup_{i=1}^N \mu P_i\right). \end{equation*} Since $e \in \cS_r$ for all $r$ we have $\partial W \subset \bigcup_{\mu \in S_r}\bigcup_{i=1}^N \mu P_i$ such that all regions inside $W$ stay the same if we increase $W$ to $H$. If we add the regions outside of $W$ we therefore get \begin{equation*} \#\pi_0 \left( W \setminus \bigcup_{\mu\in\cS_r} \bigcup_{i=1}^N \mu P_i\right) \leq \# \pi_0 \left( H \setminus \bigcup_{\mu\in\cS_r} \bigcup_{i=1}^N \mu P_i\right). \end{equation*} \end{proof} \end{Lemma} By \cref{Lem:HomLieTop} the topology on $H$ is the same as on $\RR^{\dim(H)}$ and we additionally assumed $H$ to be non-crooked. So the problem of counting the connected components is a well known problem from the theory of real hyperplane arrangements. A general upper bound for the number of connected components has been known for a long time and first appears in \cite{Schlafli} by L. Schläfli, see also \cite[Theorem 1.2]{Dimca}. For an arrangement $\cH \subset \RR^n$ consisting of $k$ different hyperplanes we get the general upper bound \begin{equation*} \sum_{i=0}^n \begin{pmatrix} k \\ i\end{pmatrix} \asymp k^n. \end{equation*} In our case $n= \dim(H)$ and $k= \vert \cS(r)\vert$. We know from \cref{Prop:Growth} that ${\vert \cS(r)\vert \asymp r^{\homdim(G)}}$. Combining these results yields \small\begin{equation*} p(r) = \left\vert A_r^H\right\vert \leq \#\pi_0 \left( H \setminus \bigcup_{\mu\in\cS(r)} \bigcup_{i=1}^N \mu P_i\right) \ll \vert \cS(r)\vert^{\dim(H)} \asymp r^{\homdim(G) \cdot \dim (H)}. \end{equation*}\normalsize \subsection{Lower bound of the growth}\label{SubSec:LowerBound} We fix some parameters of the window which will help us in proving \cref{MainTHM}. \begin{Definition} For a given polytopal window $W$ we fix the following parameters: \begin{enumerate}[label=\roman*)] \item A \textit{center of the window} $c_W \in W$ such that $\inf\{r \in \RR \mid B_r(c_W) \subset W \}$ is maximal, \item the \textit{inner radius} of the window $I_W:=\sup\{r \in \RR \mid B_r(c_W) \subset W\}$, \item the \textit{outer radius} of the window $O_W:=\inf\{r \in \RR\mid W \subset B_r(c_W)\}$, \item the \textit{size of $\partial_i W$} \begin{equation*} F_i:=\sup\{r \in \RR \mid \exists p \in P_i: B_r(p)\cap \partial_i W = B_r(p) \cap P_i\} \end{equation*} and the minimum of all the sizes of the faces \begin{equation*} F_W:=\min\{F_i \mid i \in \{1,...,N\}\}, \end{equation*} \item for each face $\partial_i W$ a \textit{face center} $p_i \in \partial_i W$ such that $B_{F_W}(p_i)\cap \partial_i W=B_{F_W}(p_i)\cap P_i$. \end{enumerate} \end{Definition} We will use this parameters in our proof later. The centres may not be unique but we fix a choice for the rest of the argument. Further we need to widen the definition of parallel a bit, since we are only interested in intersections inside a bounded region. \begin{Definition} Let $B \subset H$ be a bounded region and $P_1, P_2$ two hyperplanes in $H$. We call $P_1$ and $P_2$ \textit{almost parallel with respect to $B$} if $P_1\cap P_2 \cap B = \emptyset$. \end{Definition} The aim is now to find a region inside $W$ for which it makes no difference if it is divided by a face $\partial_i W$ or by the whole hyperplane $P_i$. Further we wish to get a one to one correspondence between the connected components and the acceptance domains in this small region. \begin{Definition} Let $B \subset H$ be a bounded region. For $s \in H$ we say $s \partial_i W$ \textit{cuts $B$ fully} if \begin{equation*} (s \partial_i W) \cap B = (s P_i) \cap B \neq \emptyset. \end{equation*} And if additionally \begin{equation*} (s P_i^+) \cap B = (s W) \cap B \neq \emptyset, \end{equation*} we say $s \partial_i W$ \textit{cuts $B$ all-round}. \end{Definition} \begin{Remark} An all-round cut is always a full cut, but the converse is false, see \cref{Fig:Cuts}. \end{Remark} \begin{figure} \centering \captionsetup{width=0.75\linewidth} \begin{tikzpicture}[descr/.style={fill=white,inner sep=2.5pt}] \draw (0,0) circle (2); \draw[-] (-2,-1) -- (2,1); \draw[-, dotted] (-3,-1.5) -- (-2,-1); \draw[-, dotted] (2,1) -- (3,1.5); \draw[-] (-2,-1) -- (-2,-2); \draw[-] (2,1) -- (3,0); ll[fill=gray!50, fill opacity=0.2] (-2,-2.5) -- (-2,-1) -- (2,1) -- (3,0) -- (3,-2.5) -- (-2,-2.5); \node at (-1.2,1.3) {$B$}; \node at (2.5, -2) {$W$}; \node at (2.8, 1.7) {$P_i$}; \end{tikzpicture} \hspace{0.5cm} \begin{tikzpicture}[descr/.style={fill=white,inner sep=2.5pt}] \draw (0,0) circle (2); \draw[-] (-2,-1) -- (2,1); \draw[-, dotted] (-3,-1.5) -- (-2,-1); \draw[-, dotted] (2,1) -- (3,1.5); \draw[-] (-2,-1) -- (-2.5,-2); \draw[-] (2,1) -- (-1,-2); ll[fill=gray!50, fill opacity=0.2] (-2.75,-2.5) -- (-2,-1) -- (2,1) -- (-1.5,-2.5) -- (-2.75,-2.5); \node at (-1.2,1.3) {$B$}; \node at (-1.7, -2) {$W$}; \node at (2.5, 1.6) {$P_i$}; \end{tikzpicture} \caption{On the left $\partial_i W$ cuts $B$ fully and all-round on the right $\partial_i W$ cuts $B$ fully but not all-round.} \label{Fig:Cuts} \end{figure} Additionally to decreasing the area of interest we will decrease the set from which we operate on the window, instead of considering all elements from $\cS_r$ we define for each face a subset $U_i(r) \subset \cS_r$. The $U_i(r)$ will be defined such that we only obtain all-round cuts and get a one to one correspondence between the connected components and the acceptance domains. \begin{Definition}\label{Def:SmallBall} Let $(k,h) \in \RR^2$. The region we will consider is $B_h(c_W)$ and the set from which we operate is ${U_i:= B_k(c_W p_i^{-1})}$ for all $i\in \{1,...,N\}$. If the following conditions are fulfilled we call $(k,h)$ a \textit{good pair}: \begin{enumerate}[label=\roman*)] \item $0 < k <h$, \item $h < I_W$, therefore $B_h(c_W) \subset W$, \item $\forall a\in B_{O_W}(e)$, $x \in B_{2h}(e)$: $\vert a x a^{-1}\vert_H \leq F_W$, \item $\forall i \in \{1,...,N\}$: $\forall s \in U_i$: $(s P_i^+) \cap B_h(c_W) = (s W) \cap B_h(c_W)$. \end{enumerate} \end{Definition} \begin{Remark} Observe that if $(k,h)$ is a good pair, then $(k',h)$ is a good pair for all $0 < k' < k$. \end{Remark} \begin{Proposition}\label{Prop:GoodPair} A good pair exists. \end{Proposition} For the proof we need some preparation. It will begin after \cref{Cor:Angle} \begin{Lemma}\label{Cor:ConBall} Let $G$ be a homogeneous Lie group and $x \in G$ fixed. Then for all $\epsilon >0$ there exists $\delta(x) >0$ such that for $u \in B_{\delta(x)}(e)$: \begin{equation*} x u x^{-1} \in B_{\epsilon}(e). \end{equation*} Further if $\vert x \vert \leq k$ then there exists a $\delta'(k)>0$ such that for $u \in B_{\delta'(k)}(e)$: \begin{equation*} x u x^{-1} \in B_{\epsilon}(e). \end{equation*} \begin{proof} Using the Baker-Campbell-Hausdorff formula, we get \small\begin{align*} x u &x^{-1} = \left(x + u + \frac{1}{2}[x,u]+\frac{1}{12}([x,[x,u]]-[u,[x,u]])-...\right) x^{-1}\\ &= \left(x + u + \frac{1}{2}[x,u]+\frac{1}{12}([x,[x,u]]-[u,[x,u]])-...\right)\\ &\quad\quad - x + \frac{1}{2}\left[\left(x + u + \frac{1}{2}[x,u]+\frac{1}{12}([x,[x,u]]-[u[x,u]])-...\right) ,x^{-1}\right]+...\\ &= u +B(x,u), \end{align*}\normalsize where $B(x,u)$ only contains terms which include $[u,x]$. The continuity of the Lie bracket implies the claim. Be aware that we work in exponential coordinates here, as explained in \cref{Sec:HomLie}, so formally we should write $\exp(x)$ and $\exp(u)$ instead of $x$ and $u$. \end{proof} \end{Lemma} \begin{Lemma}\label{Lem:CutsFully} Let $(k,h)$ fulfil conditions i) and iii) of \cref{Def:SmallBall}, then for any $i\in \{1,...,N\}$ and for every $s \in U_i$ it holds that $s P_i $ cuts $B_h(c_W)$ fully. \begin{proof} First we show that for all $s \in U_i$ we get $s P_i \cap B_h(c_W) \neq \emptyset$. We can write $s=a \cdot c_W \cdot p_i^{-1}$ with $a \in B_k(e)$. Then \begin{equation*} d(s \cdot p_i, c_W)= \left\vert a \cdot c_W \cdot p_i^{-1} \cdot p_i \cdot c_W^{-1}\right\vert = \vert a \vert < k < h. \end{equation*} Now we need to show that $s \partial_i W \cap B_h(c_W) = s P_i \cap B_h(c_W)$. This is equivalent to \begin{equation*} \partial_i W \cap s^{-1} B_h(c_W) = P_i\cap s^{-1} B_h(c_W). \end{equation*} The inclusion $\subseteq$ is obvious since $\partial_i W \subset P_i$. We show that $s^{-1} B_h(c_W) \subseteq B_{F_W}(p_i)$, then the claim follows from the definition of $p_i$ and $F_W$. Let $x \cdot c_W \in B_h(c_W)$ be an arbitrary element and $s=a \cdot c_W \cdot p_i^{-1}$ as above. \begin{equation*} d(s^{-1} x c_W, p_i)= \big\vert \hspace{-0.45cm}\underbrace{p_i c_W^{-1}}_{=: y \in B_{O_W}(e) } \cdot \underbrace{a^{-1} x_{}}_{\in B_{h + k}(e)} \cdot \underbrace{c_W p_i^{-1}}_{=y^{-1}}\big\vert \leq F_W. \end{equation*} The inequality follows by iii) of \cref{Def:SmallBall}. \end{proof} \end{Lemma} From the proof we can extract the following corollary. \begin{Corollary}\label{Cor:IntersectB_k} Let $i\in \{1,...,N\}$, for every $s \in U_i$ we have that $s P_i $ intersects $B_k(c_W)$ non-trivially. \end{Corollary} \begin{Corollary} Let $(k,h)$ fulfil conditions i), iii) and iv) of \cref{Def:SmallBall}, then for any $i\in \{1,...,N\}$ and for every $s \in U_i$ holds $s P_i $ cuts $B_h(c_W)$ all-round. \end{Corollary} We will also need the definition of an intersection angle between to hyperplanes, since we will show that by acting with a small element, we can only rotate a plane a bit. \begin{Definition}\label{Def:Angle} The angle between two hyperplanes $P$ and $Q$ in $\RR^d$ with normals $n_P$ and $n_Q$, both normalized, is given by \begin{equation*} \sphericalangle(P,Q) := \cos^{-1} \left(\vert \langle n_P \cdot n_Q \rangle \vert\right). \end{equation*} For $i,j \in \{1,...,N\}$ with $i \neq j$ we denote by $\alpha_{ij}$ the angle between $c_Wp_i^{-1} P_i$ and $c_W p_j^{-1} P_j$, i.e. \begin{equation*} \alpha_{ij}:=\sphericalangle(c_W p_i^{-1} P_i, c_W p_j^{-1} P_j). \end{equation*} \end{Definition} \begin{Remark}\label{Rem:FixFamily} In the definition we use $c_W p_i^{-1} P_i$ instead of $P_i$ since this plane is sort of the prototype for the family $U_i P_i$, all the other planes from this family then result from an action with a small element. Since $u \in U_i$ is of the form $a c_Wp_i^{-1}$ with $a\in B_k(e)$. \end{Remark} \begin{Convention} We choose $i_1,..., i_{\dim(H)}$ such that \begin{equation*} \bigcap_{l=1}^{\dim(H)} c_W p_{i_l}^{-1} P_{i_l}= \{c_W\}. \end{equation*} So this is a set of hyperplanes in which each intersection of $k$ hyperplanes has dimension $\dim(H)-k$. From now on we fix such a family and denote it by $\cF$. Without loss of generality $\cF = \{P_1,...,P_{\dim(H)}\}$. \end{Convention} \begin{Lemma}\label{Lem:Angle} For all $r>0$ there exists $\beta(r)$, with $\beta(r) \to 0$ for $r \to 0$, such that for all $x \in B_r(e) \subset H$ and any hyperplane $P$ we have $\sphericalangle(x P, P) \leq \beta(r)$. \begin{proof} Since $H$ is a non-crooked homogeneous Lie group we know that $xP$ is again a hyperplane. So let \begin{equation*} P= \left\{a + \sum_{i=1}^n t_i v_i \mid t_i \in \RR \right\} \end{equation*} where $a, v_i \in \RR^n$. By the form of the group action, which we discuss in \hyperref[Appendix:Poly2]{Appendix B.1}, we know that $xP$ is of the form \begin{equation*} xP = \big(f_1(x,P),...,f_n(x,P)\big)^\mathrm{T} \end{equation*} with $f_i$ polynomials of a special form, namely \begin{equation*} f_i(x,P) = x_i + P_i +\sum_{k=1}^n\sum_{\substack{\alpha_1,...,\alpha_n \in \NN \\ \sum \alpha_i \neq 0}} c_{k,\alpha_1,...,\alpha_n} P_k x_1^{\alpha_1}...x_n^{\alpha_n}. \end{equation*} The direction vectors are the ones from $P$ plus some deviation which depends on $x$. If $x$ gets smaller the two planes are getting closer to being parallel. \end{proof} \end{Lemma} \begin{Corollary}\label{Cor:Angle} For all $r>0$ there exists $\beta(r)$, with $\beta(r) \to 0$ for $r \to 0$, such that for all $x, y \in B_r(e) \subset H$ and any hyperplane $P$ we have $\sphericalangle(x P, y P) \leq 2\beta(r)$. \end{Corollary} \begin{proof}[Proof of \cref{Prop:GoodPair}] By \cref{Cor:ConBall} there exists an upper bound $b_1$ on $h$ such that for all $a\in B_{O_W}(e)$, $x \in B_{2h}(e)$: $\vert a x a^{-1}\vert_H \leq F_W$. Set $h':=\min\{b_1, \frac{I_W}{2}\}$ and set $k':=\frac{h'}{2}$ then the conditions i), ii) and iii) from \cref{Def:SmallBall} are fulfilled.\par By \cref{Lem:CutsFully} for any $i \in \{1,...,N\}$ and all $s \in B_{k'}(c_Wp_i^{-1})$ we have $sP_i$ cuts $B_{h'}(c_W)$ fully, this also holds for all $h \leq h'$.\par Now assume that there is a cut which is full but not all-round, therefore \begin{equation*} s P_i^+ \cap B_{h'}(c_W) \neq sW \cap B_{h'}(c_W). \end{equation*} To be more precise we have $\ sW \cap B_{h'}(c_W) \subsetneq s P_i^+ \cap B_{h'}(c_W)$ since $sW \subset sP_i^+$. Let \begin{equation*} b_{s}^i:= \inf\{r \in \RR \mid \exists x \in B_r(c_W) : x \in s P_i^+, x \notin sW\}. \end{equation*} We see that $b_s^i \neq 0$ since $sW$ is a polytope with non-empty interior. Now set \begin{equation*} h:=\min\big\{h', \inf_{s\in B_{k'}(c_Wp_i^{-1})}\{b_s^i\}\big\} \end{equation*} and $k=\frac{h}{2}$. The last thing to observe now is that the infimum over the $b_s^i$ is not zero. If it were zero this would mean that the polytope $sW$ could become arbitrary thin such that only an even smaller ball would fit in. But this can not be the case since we have seen that we can only rotate the bounding hyperplanes only by a small amount. \end{proof} \begin{Convention} From now on let $(k,h)$ be a good pair. \end{Convention} Observe that $U_i \subset WW^{-1}$ for all $i\in \{1,...,N\}$. Further notice that we operate differently on the different hyperplanes which bound $W$, the $U_i$ may overlap but they are not equal. Additionally we have chosen $B_h(c_W)$ so that for each of the hyperplanes it does not make a difference if we operate on the face $\partial_i W$ or on the hyperplane $P_i$. \par \medskip Now we will reconsider the dependence of the growing parameter $r$ and the lattice $\Gamma$. \begin{Definition} Set $U_i(r):= \pi_H((B_r^G(e) \times U_i)\cap \Gamma)$ which is a finite subset of $U_i$. \end{Definition} \begin{Remark} Observe that $U_i(r)$ is a subset of the $r$-slab $\cS_r$, since $U_i \subset WW^{-1}$. \end{Remark} \begin{Proposition}\label{Lem:CutsAllround} The number of connected components of $ B_h(c_W) \setminus \bigcup_{i=1}^N \bigcup_{s \in U_i(r)} s \partial_i W$ is a lower bound of the number of acceptance domains $\vert A_r^H \vert$, i.e. \begin{equation*} \# \pi_0 \left( B_h(c_W) \setminus \bigcup_{i=1}^N \bigcup_{s \in U_i(r)} s \partial_i W \right) \leq \vert A_r^H \vert. \end{equation*} \begin{proof} Recall that a pre-acceptance domain $A_r^H(\lambda)$ is contained in an acceptance domain $W_r(\lambda)$. Let $C$ be a connected component of $B_h(c_W) \setminus \bigcup_{i=1}^N \bigcup_{s \in U_i(r)} s \partial_i W$. By \cref{Lem:CutsFully} we can replace the faces by the hyperplanes without changing the connected components in $B_h(c_W)$, so we consider ${B_h(c_W) \setminus \bigcup_{i=1}^N \bigcup_{s \in U_i(r)} s P_i}$.\par We show that if an acceptance domain intersects a connected component of $B_h(c_W) \setminus \bigcup_{i=1}^N \bigcup_{s \in U_i(r)} s P_i$ it is fully contained in it. Let $C'$ be another connected component of $B_h(c_W) \setminus \bigcup_{i=1}^N \bigcup_{s \in U_i(r)} s P_i$ and assume that $C \cap W_r(\lambda) \neq \emptyset \neq C' \cap W_r(\lambda)$. Between $C$ and $C'$ is a hyperplane $s P_i$ for some $i\in \{1,...,N\}$ and $s \in U_i(r)$. Therefore $C \subset s \mathring{P^+}$ and $C'\subset s \mathring{P^-}$ or the other way around. And since the cut $s\partial_i W$ is all-round we get that $C \subset s \mathring{W}$ and $C' \subset s W^\mathrm{C}$ or the other way around. But either $W_r(\lambda) \subset s\mathring{W}$ or $W_r(\lambda) \subset sW^\mathrm{C}$, a contradiction. \end{proof} \end{Proposition} It remains to find a combinatorial argument for counting the connected components of \begin{equation*} B_h(c_W) \setminus \bigcup_{i=1}^N \bigcup_{s \in U_i} s \partial_i W=B_h(c_W) \setminus \bigcup_{i=1}^N \bigcup_{s \in U_i} s P_i, \end{equation*} which yields a lower bound by \cref{Lem:CutsAllround}. This will be done in the next section.\par \medskip In the rest of the section we will prove the following proposition, which gives us the tools for the combinatorics in the next section. \begin{Proposition}\label{Prop:ToolComb} There exists a good pair $(k,h)$ such that: \begin{enumerate} \item For all $I \subset \{1,....,N\}$ with $\vert I \vert = \dim(H)$ and all $u_{1} \in U_{i_1},...,u_{{\dim(H)}} \in U_{i_{\dim(H)}}$ we get \begin{equation*} u_{1} P_{i_1} \cap ... \cap u_{{\dim(H)}} P_{i_{\dim(H)}} = \{s\}, \text{where } s \in B_h(c_W). \end{equation*} \item For every constant $c>0$ and all $s \in H$, there is a $r_0$ such that for all $r > r_0$ we get that \begin{equation*} \big\vert \{u \in U_i(r) \mid s \in u P_i\}\big\vert \leq c \vert U_i(r) \vert. \end{equation*} \end{enumerate} \end{Proposition} \begin{Lemma}\label{Lem:IntersectionPoint} For $i,j \in \{1,...,\dim(H)\}$, $i \neq j$, there exists a good pair $(k,h)$ such that for all $u \in U_i=B_k(c_Wp_i^{-1}), v \in U_j$ we have $u P_i$ and $vP_j$ are not almost parallel with respect to $B_h(c_W)$. \begin{proof} Fix some $i,j \in \{1,...,N\}$ with $i \neq j$. By \cref{Cor:IntersectB_k} all the $u P_i$, $v P_j$ with $u \in U_i$, $v \in U_j$ intersect $B_k(c_W)$.\par Further we can control the angle between the two hyperplanes by \cref{Lem:Angle} so that for all $ u \in U_i, v \in U_j $: \begin{equation*} \sphericalangle(u P_i, v P_j) \geq \sphericalangle(P_i, P_j) - \sphericalangle(u P_i, P_j) - \sphericalangle(P_i, v P_j)\geq \alpha_{ij} - 2 \beta(k), \end{equation*} where $\beta(k)$ is from \cref{Lem:Angle}. We can choose $k$ so small such that $0 < \alpha_{ij} - 2 \beta(k) < \frac{\pi}{2}$, this means that the hyperplanes can not be parallel so they intersect somewhere. For two hyperplanes which intersect the same ball of radius $k$ and which intersect in at least a given angle there is a bound for the intersection to the center point of the ball \begin{equation*} c(k):= k \left( 1+\frac{1}{\tan\left(\frac{\alpha_{ij}-2 \beta(k)}{2}\right)}\right). \end{equation*} The idea how to establish this bound is to consider the space which is orthogonal to the intersection of $u P_i$ and $v P_j$ and contains $c_W$. Then one can argue in a two-dimensional plane.\par The bound $c(k)$ goes to zero if $k$ goes to zero, so we can choose $k$ so small that $c(k) < h$. Therefore the two planes intersect inside $B_h(c_W)$. \end{proof} \end{Lemma} \begin{Corollary}\label{Cor:IntersectionPoint} There exists a good pair $(k,h)$ such that for all $u_i \in U_{i}$ and $i \in \{1,...,\dim(H)\}$ we can find some $x \in B_h(c_W)$ such that: \begin{equation*} \bigcap_{i=1}^{\dim(H)} u_i P_i = \{x\}. \end{equation*} \begin{proof} By the choice of the family $\cF$ we know that $\bigcap_{i=1}^{\dim(H)} c_W p_{i}^{-1} P_{i}= \{c_W\}$, we will first show that there is a $k_0$ such that for all $0< k \leq k_0$ we also get a zero dimensional intersection inside $B_h(c_W)$ if we replace $c_W p_i^{-1}$ by $u_i \in U_i=B_k(c_Wp_i^{-1})$.\par This intersection behaviour means that if we choose some vector $v \parallel c_Wp_i^{-1}P_i$, then $v \parallel c_Wp_j^{-1}P_j$ can maximally hold for all but one $j$. Since otherwise the intersection of all hyperplanes would end up in a line instead of a point. We have to choose $k_0$ such that for all $i\in \{1,...,\dim(H)\}$ and all $v \parallel u_i P_i$, $u_i \in U_i$, there exists a $j\in \{1,...,\dim(H)\}\setminus\{i\}$ and a $u_j \in U_j$ such that $v \nparallel u_jP_j$. Since operating with an element from $U_i$ only rotates the hyperplane a little it is possible to find such a $k_0$ and than the property also holds for all $k$ smaller than $k_0$.\par Now we have to check that the intersection point also lies inside of $B_h(c_W)$. We do this stepwise. It is clear that $\bigcap_{i=1}^{\dim(H)} c_W p_{i}^{-1} P_{i}= \{c_W\}$ and $c_W \in B_h(c_W)$. Now we change $c_W p_1^{-1}$ to some $u_1 \in U_1$ and consider $u_1 P_1 \cap \bigcap_{i=2}^{\dim(H)} c_W p_{i}^{-1} P_{i} =\{x_1\}$. We already know that $\bigcap_{i=2}^{\dim(H)} c_W p_{i}^{-1} P_{i}$ is a subspace of dimension $1$ and that $u_1 P_1$ intersects this subspace. Since $u_1= a_1 c_W p_1^{-1}$ with $a_1\in B_k(e)$ the plane $u_1 P_1$ is just a small shift, this follows from the form of the group action, which we explained in \cref{Sec:HomLie}, and a small rotation away from $c_W p_1^{-1} P_1$, this follows from \cref{Lem:Angle}. Therefore $d(x_1, c_W) < \epsilon_1(k)$, where $\epsilon_1$ depends on $k$ and goes to zero if $k$ goes to zero. We can iterate this process and get a new solution on each step until we end at $x_d$, $d=\dim(H)$, where we have \small\begin{align*} d(x_d,c_W) &< d(x_d, x_{d-1})+ d(x_{d-1},x_{d-2})+... + d(x_2,x_1)+d(x_1,c_W) < \sum_{i=1}^d \epsilon_i(k) =:\epsilon(k). \end{align*}\normalsize So by choosing $k$ such that $\epsilon(k)<h$ we get the claim. This is possible since $\epsilon(k) \to 0$ for $k \to 0$. \end{proof} \end{Corollary} The corollary tells us that all intersections result in a single point in $B_h(c_W)$, but it is not clear that different choices of $u_j$ result in different intersection points. This is a major difference to the Euclidean case, since here the action is just translation, so by acting on a hyperplane we get a parallel hyperplane, which then either is still the same hyperplane or does not intersect the original hyperplane at all.\par \medskip To proof part (b) of \cref{Prop:ToolComb} we need \cref{Thm:Nevo}, we can use it by the discussion in \cref{SubSec:Ergodic}. \begin{Lemma}\label{Lem:BoundInsidenz} Consider a family $U_i(r) \cdot P_i$. For any constant $c >0$ and all $s \in H$ there is a $r_0$ such that for all $r \geq r_0$ we get that \begin{equation*} \big\vert\{u \in U_i(r) \mid s \in u P_i \}\big\vert \leq c \cdot \vert U_i(r) \vert. \end{equation*} \begin{proof} Let $u \in U_i(r)$ such that $s\in u P_i$. This implies that $u^{-1} \in P_i s^{-1}$, so $u \in U_i(r) \cap (P_i s^{-1})^{-1}$. So the question is how many elements are in $U_i(r) \cap (P_i s^{-1})^{-1}$ compared to the number of elements in $U_i(r)$. To get an estimate via the Haar-measure we have to thicken $(P_i s^{-1})^{-1}$ since it is a subset of lower dimension. We consider an $\epsilon$-strip around the set, so we choose a finite set $A(\epsilon) \subset (P_is^{-1})^{-1}\cap U_i$ such that \begin{equation*} U_i \cap (P_i s^{-1})^{-1} \subset \bigcup_{p \in A(\epsilon)}B_\epsilon(p) \end{equation*} and further let $S_\epsilon:= U_i(r) \cap \cup_{p \in A(\epsilon)} B_\epsilon(p)$. We have seen that we can use \cref{Thm:Nevo} so for every $\delta >0$ and $r$ large enough \begin{align*} \delta &\geq \big\vert \vert U_i(r)\vert - \mu_{G \times H}(B_r(e) \times U_i) \big\vert = \big\vert \vert U_i(r)\vert - \mu_{G}(B_r(e)) \mu_{H}(U_i) \big\vert\\ &= \big\vert \vert U_i(r)\vert - r^{\homdim(G)}\mu_{G}(B_1(e)) \mu_{H}(U_i)\big\vert. \end{align*} Since $S_\epsilon$ is a finite union of balls we can use the same argument for all the balls simultaneously and get for $\delta >0$ that \small\begin{align*} \lim\limits_{r \to \infty} \frac{\vert U_i(r) \cap (P_i s^{-1})^{-1}\vert }{\vert U_i(r)\vert} &< \lim\limits_{r \to \infty}\frac{\vert S_\epsilon \vert}{\vert U_i(r)\vert} = \frac{\sum_{p \in A(\epsilon)} r^{\homdim(G)}\mu_{G}(B_1(e))\mu_{H}(B_\epsilon(p))}{r^{\homdim(G)}\mu_{G}(B_1(e))\mu_{H}(U_i)}\\ &= \frac{\vert A(\epsilon) \vert \cdot \mu_{H}(B_\epsilon(e))}{\mu_{H}(U_i)} \xrightarrow{\epsilon \to 0} 0. \end{align*}\normalsize \end{proof} \end{Lemma} \section{Combinatorics}\label{Sec:Combinatorics} The aim of this section is to give a lower bound for the number of connected components of \begin{equation*} B_h(c_W) \setminus \bigcup_{i=1}^N \bigcup_{s \in U_i(r)} s P_i, \end{equation*} under some conditions on the family $s P_i$. This will fill the gap we left in the last section. To do so we will give a short introduction to the theory of hyperplane arrangements and fix the common notation for this setup. To do so we will follow the lines of Dimca, \cite{Dimca}, and Stanley, \cite{Stanley}. Furthermore we will consider Beck's theorem which was first proved in \cite{Beck}, but also follows from the Sz\'emeredi-Trotter theorem, \cite{SzemerediTrotter}. An easier proof for the Sz\'emeredi-Trotter theorem can be found in the paper of Szekely, \cite{Szekely}, we leave it to the reader to see that the two versions are equivalent.\par\medskip \begin{Theorem}[Higher dimensional local dual of Beck's Theorem]\label{Thm:BeckInduction} There exists a constant $c_d$ such that for a hyperplane arrangement $\cH$ in $\RR^d$ and $B \subset \RR^d$ convex. Where $\cH$ consist of $d$ families $F_1,...,F_d$ with $\vert F_i \vert = \frac{n}{d}$ and for all $(f_1,...,f_d) \in F_1 \times ... \times F_d$ we have $B \cap \bigcap_{i=1}^d f_i = \{p\}$ for some point $p \in B$. And additionally at most $c \cdot \vert F_i \vert$ hyperplanes from $F_i$ can intersect in one point, with $0 < c < \frac{1}{100}$. Then the number of intersection points in $B$ exceeds $c_d \cdot n^d$, i.e. $\vert F_{0,B} \vert \geq c_d \cdot n^d$. \end{Theorem} \begin{Corollary} In the situation of \cref{MainTHM} and \cref{Prop:ToolComb}: \begin{equation*} \# \pi_0 \left( B_h(c_W) \setminus \bigcup_{i=1}^N \bigcup_{s \in U_i(r)} s P_i \right) \gg r^{\homdim(G) \cdot \dim(H)}. \end{equation*} \begin{proof} Notice that $B_h(c_W)$ is convex and the family we constructed in \cref{SubSec:LowerBound} fulfils the requirements in \cref{Thm:BeckInduction} by \cref{Prop:ToolComb}. Then by \cref{Prop:StanleyBound} the claim follows. \end{proof} \end{Corollary} This finishes the proof of the main theorem, \cref{MainTHM}. The rest of the section is devoted to proof \cref{Thm:BeckInduction}. \begin{Definition} A finite set of affine hyperplanes $\cH=\{P_1,...,P_n\}$ in $\RR^d$ is called a \textit{hyperplane arrangement}. \end{Definition} \begin{Definition} Let $\cH$ be a hyperplane arrangement in $\RR^d$. \begin{enumerate}[label=\roman*)] \item A non-empty intersection of hyperplanes from $\cH$ is called a \textit{flat} of $\cH$. The set of all flats is denoted by $F(\cH)$. If we are only interested in flats of a certain dimension $k$ we denote this set by $F_k(\cH)$. Notice that the whole space is also a flat, as a result of the intersection over the empty set. \item The connected components of \begin{equation*} \RR^d \setminus \bigcup_{H \in \cH} H \end{equation*} are called \textit{regions of the arrangement}. The set of all regions is denoted by $f_d(\cH)$ and the number of these regions is denoted by $r(\cH)$. \item Let $B\subset \RR^d$ then the connected components of \begin{equation*} B \setminus \bigcup_{H \in \cH} H \end{equation*} are called \textit{regions of the arrangement with respect to $B$} and the number of these regions is denoted by $r_B(\cH)$. \item Let $B\subset \RR^d$, then define the \textit{arrangement with respect to $B$} as \begin{equation*} \cH_B := \{H \in \cH \mid H \cap B \neq \emptyset\}. \end{equation*} \item Let $B\subset \RR^d$ then a \textit{flat with respect to $B$} is a flat of $\cH$ which intersects $B$. The set of these flats is denoted by $F_B(\cH)$ and again if we only consider the flats of dimension $k$, we write $F_{k,B}(\cH)$. \end{enumerate} \end{Definition} \begin{Remark} Obviously $r_B(\cH)$ only depends on the hyperplanes which intersect $B$, so $r_B(\cH)=r_B(\cH_B)$. Further notice that in general $r_B(\cH) \neq \vert \{ R \in f_d(\cH) \mid R \cap B \neq \emptyset\}\vert$, but the following proposition will show that equality holds for convex $B$. \end{Remark} \begin{Proposition}\label{Prop:RegionsWithRespectB} Let $\cH$ be a hyperplane arrangement in $\RR^d$ and $B \subset \RR^d$ convex, then \begin{equation*} r_B(\cH) = \vert\{R \in f_d(\cH) \mid R \cap B \neq \emptyset\}\vert. \end{equation*} \begin{proof} The regions of a hyperplane arrangement are convex, since $B$ is also convex we have for all $R \in f_d(\cH)$ that $R \cap B$ is convex and especially connected. So each region of the arrangement either intersects $B$ and therefore contributes exactly one to $r_B(\cH)$ or it does not intersect at all. \end{proof} \end{Proposition} \begin{Definition} An arrangement $\cH$ in $\RR^d$ is called: \begin{enumerate}[label=\roman*)] \item \textit{Central} if $\bigcap_{H \in \cH} H \neq \emptyset$ \footnote{observe that the empty arrangement is central since the empty intersection is the whole space}, \item \textit{central with respect to $B$}, for some $B \subset \RR^d$, if $B \cap \bigcap_{H \in \cH} H \neq \emptyset$. \end{enumerate} \end{Definition} We will now define the characteristic polynomial for the arrangement $\cH$ which depends on $B\subset \RR^d$, this idea is similar to the standard idea of considering the characteristic polynomial of the arrangement. \begin{Definition} Let $\cH$ be a hyperplane arrangement in $\RR^d$. The \textit{characteristic polynomial with respect to $B$} is defined by \begin{equation*} \chi_{\cH,B}(t):= \hspace{-0.5cm }\sum_{\substack{\cA \subset \cH\\ \cA \text{ central with respect to } B}} \hspace{-0.5cm } (-1)^{\vert \cA \vert} t^{\dim(\cap_{H \in \cA} H)}. \end{equation*} \end{Definition} Following the argumentation in \cite{Dimca} for the characteristic polynomial with respect to $B$ instead of the characteristic polynomial we can establish the following theorem. Since the argument is exactly the same we will not prove the statement here. \begin{Theorem}[{\cite[Theorem 2.8]{Dimca}}]\label{Thm:RegionsCharPoly} Let $\cH$ be a hyperplane arrangement in $\RR^d$ and $B \subset \RR^d$ convex, then \begin{equation*} r_{B}(\cH) = (-1)^d \chi_{\cH,B}(-1). \end{equation*} \end{Theorem} We can use this formula with the help of the following lemma, which we import from Stanley. \begin{Lemma}[{\cite[Theorem 3.10.]{Stanley}}]\label{Lem:Stanley} Let $\chi(t)$ be a characteristic polynomial of an hyperplane arrangement $\cH$ in $\RR^d$, then \begin{equation*} \chi(t) = \sum_{f \in F_B(\cH)} a_f t^{\dim(f)} \end{equation*} with $(-1)^{d-\dim(f)} a_f >0$ for $f \in F_B(\cH)$. \end{Lemma} \begin{Corollary}\label{Prop:StanleyBound} Let $\cH$ be a hyperplane arrangement in $\RR^d$ and $B \subset \RR^d$ convex, then \begin{equation*} r_B(\cH) \geq \vert F_B(\cH) \vert \geq \vert F_{0,B}(\cH) \vert. \end{equation*} \begin{proof} By \cref{Thm:RegionsCharPoly} and \cref{Lem:Stanley} it is \begin{equation*} r_{B}(\cH) = (-1)^d \chi_{\cH,B}(-1) = \sum_{f \in F_B(\cH)} (-1)^d a_f (-1)^{\dim(f)} > \sum_{f \in F_B(\cH)} 1 = \vert F_B(\cH) \vert. \end{equation*} \end{proof} \end{Corollary} This proposition means that to establish a lower bound of the regions it is enough to count the number of intersection points. We will do this by following the idea of the proof of Beck's theorem, \cite{Beck}. But instead of considering the set of lines/hyperplanes spanned by a point set we turn all the arguments around and consider the intersection points of a given arrangement. We will first handle the case of dimension two and then use induction to generalize the statement.\par\medskip We first state the Szemer\'edi-Trotter Theorem in two equivalent ways. \begin{Theorem}[Szemer\'edi-Trotter Theorem, {\cite{Szekely}}, {\cite{SzemerediTrotter}}]\label{Thm:SzemerediTrotter}~\par Let $n,m \in \NN$, we set \begin{equation*} I(n,m)= \max_{\vert P \vert =n, \vert L \vert =n} \vert\{(p,l) \in P \times L \mid p \in l\} \vert, \end{equation*} where $P$ denotes a set of points and $L$ a set of lines in $\RR^2$. \begin{enumerate}[label=\roman*)] \item There exists a constant $c>0$ such that $I(n,m) < c \cdot (n^{2/3}m^{2/3} + n + m)$. \item Let $\sqrt{n} \leq m \leq \begin{pmatrix} n \\ 2 \end{pmatrix}$, then there exists a constant $c>0$ such that \linebreak ${I(n,m) < c \cdot (n^{2/3}m^{2/3})}$. \end{enumerate} \end{Theorem} \begin{Remark} The constant for the growth in the Szemer\'edi-Trotter Theorem is known to be less than $2.5$ but more than $0.4$. \end{Remark} \begin{Definition} Let $\cH$ be a hyperplane arrangement then for a flat $f \in F(\cH)$ we define $S(f):=\{H \in \cH \mid f \subset H \}$, and further $a(f):=\vert S(f) \vert$. \end{Definition} \begin{Definition} For a hyperplane arrangement $\cH$ let \begin{align*} t(\cH,k) &:= \vert \{p \in F_0(\cH) \mid a(p) \geq k\}\vert, \\ t^\ast(\cH,k) &:= \vert \{p \in F_0(\cH) \mid k \leq a(p) < 2k\}\vert. \end{align*} Further we consider the maximal value of the two terms \begin{align*} t(n,k) &:= \max_{\vert \cH \vert = n } t(\cH,k), \\ t^\ast(n,k) &:= \max_{\vert \cH \vert = n } t^\ast(\cH,k) . \end{align*} \end{Definition} For our proof we need some bounds on these terms. The first two are from the paper of Beck, \cite{Beck}, and are easy to prove. The third is a corollary of the Szemer\'edi-Trotter Theorem. Beck proves a similar inequality, \cite[Lemma 2.2]{Beck}, which is the main part of his argument. For us it would also be possible in our setup to translate the proof of Beck, but this would require a lot of effort, if the reader is interested we challenge him to follow the proof, it certainly is illuminating to translate all the arguments. \begin{Lemma}[{\cite[Lemma 2.1]{Beck}}]\label{Lem:Beck2.1} For a hyperplane arrangement $\cH$ in $\RR^2$, with $\vert \cH \vert = n$, we have \begin{align*} t(n,k) \leq \frac{n(n-1)}{k(k-1)},\quad &\forall 2 \leq k \leq n,\\ t(n,k) < \frac{2n}{k},\quad &\forall \sqrt{2n} < k \leq n. \end{align*} \begin{proof} For the first formula we consider the number of pairs of lines. On the one hand we consider all possible pairs and on the other hand the pairs through points in which at least $k$ lines intersect: \begin{equation*} t(n,k) \cdot \begin{pmatrix} k \\ 2 \end{pmatrix} \leq \begin{pmatrix} n \\ 2 \end{pmatrix}. \end{equation*} For the second inequality the points in which at least $k$ lines intersect are denoted by $p_1,...,p_t$. We assume that $t=\frac{2n +l }{k} \in \NN$, where $l \in \{0,...,k-1\}$. Then $t < \sqrt{2n} + \frac{l}{k}$, since $\sqrt{2n} < k$. Notice that $\vert S(p_i) \vert \geq k$ and $\vert S(p_i) \cap S(p_j) \vert \leq 1$ for $i \neq j$, since two points are connected by exactly one line. Then \begin{align*} n &= \vert \cH \vert \geq \left\vert \bigcup_{i=1}^t S(p_i) \right\vert \geq \sum_{i=1}^t \vert S(p_i)\vert - \sum_{1 \leq i < j \leq t} \vert S(p_i) \cap S(p_j)\vert\\ &\geq \sum_{i=1}^t k - \sum_{1 \leq i < j \leq t} 1 = tk - \frac{1}{2}t (t-1) > 2n+l - \frac{1}{2} \left(\sqrt{2n}+\frac{l}{k}\right) \left(\sqrt{2n}+\underbrace{\frac{l}{k}-1}_{<0}\right)\\ &> 2n+l - n - \sqrt{\frac{n}{2}} \frac{l}{k} = n+ l \left( 1 - \frac{\sqrt{n}}{\sqrt{2}k}\right) > n+l\left(1- \frac{1}{2}\right) \geq n. \end{align*} This is a contradiction, so $t=\lceil \frac{2n}{k}\rceil$ can not hold and also $t > \frac{2n}{k}$ is not possible since we can simply ignore some points to get the same contradiction. \end{proof} \end{Lemma} \begin{Corollary}[{Corollary of {\cref{Thm:SzemerediTrotter}}, {\cite[Theorem 2]{SzemerediTrotter}}}]\label{Cor:SzemTrot} For a hyperplane arrangement $\cH$ in $\RR^2$ there is some constant $\beta >0$ such that \begin{align*} t(n,k) < \beta \frac{n^2}{k^3},\quad \forall 3 \leq k \leq \sqrt{n} . \end{align*} \begin{proof} Assume there are $t=\frac{c^3n^2+l}{k^3} \in \NN$, where $l\in \{0,..., k^3-1\}$ and $c=2.5$, points with $a(p)\geq k$.\par Then \begin{equation*} \sqrt{t} = n \sqrt{\frac{c^3}{k^3} + \frac{l}{n^2 k^3}} \leq n \sqrt{\frac{c^3}{k^3} + \frac{k^3-1}{n^2k^3}} < n \sqrt{\frac{2.5^3}{3^3}+\frac{1}{n^2}} < n, \end{equation*} since $n > 3$. Further \begin{align*} \begin{pmatrix} t \\ 2 \end{pmatrix} &= \frac{1}{2} \frac{c^3 n^2+l}{k^3}\left(\frac{c^3 n^2+l}{k^3}-1 \right) \geq \frac{1}{2} \left(c^3 \sqrt{n}+ \frac{l}{n^{\frac{3}{2}}}\right) \left(c^3 \sqrt{n}+ \frac{l}{n^{\frac{3}{2}}}-1 \right)\\ &\geq \frac{1}{2} c^3 \sqrt{n}(c^3 \sqrt{n}-1) >n, \end{align*} since $c=2.5$ and $n >3$. Therefore we can use version ii) of the Szemer\'edi-Trotter Theorem: There is a constant $c$ such that $I(n,m) < c n^{2/3}m^{2/3}$ and we know that $c=2.5$ works. The $t$ point induce $t \cdot k$ incidences, but \begin{equation*} t \cdot k = c^3 \frac{n^2 + l}{k^2} \not< c n^{2/3}t^{2/3}, \end{equation*} a contradiction to the theorem. Therefore $t=\lceil\frac{c^3n^2}{k^3} \rceil$ is not possible and also $t> \frac{c^3n^2}{k^3}$ is not possible by the same argument if we ignore some points. We see that $\beta =2.5^3$ is a possible choice. \end{proof} \end{Corollary} \begin{Theorem}[Local dual of Becks Theorem]\label{Thm:DualBeck} There exists a constant $c_2$ such that for a hyperplane arrangement $\cH$ in $\RR^2$ and $B \subset \RR^2$ convex. Where $\cH$ consist of two disjoint families $F_1$ and $F_2$ of hyperplanes with $\vert F_1 \vert = \vert F_2 \vert = \frac{n}{2}$ and such that for all $(f,g)\in F_1 \times F_2$ we have $B \cap f \cap g =\{p\}$, for some point $p \in B$ depending on $f$ and $g$. Then one of the following two cases holds: \begin{enumerate} \item There is a point $p \in B$ such that $a(p) \geq \frac{n}{100}$. \item The number of intersection points in $B$ exceeds $c_2 \cdot n^2$, i.e. $\vert F_{0,B}(\cH) \vert \geq c\cdot n^2$. \end{enumerate} \begin{proof} We count the number of pairs of lines, we get \begin{equation*} \begin{pmatrix} n \\ 2 \end{pmatrix} \geq \sum_{p \in F_{0,B}(\cH)} \begin{pmatrix} a(p) \\ 2 \end{pmatrix} \geq \vert F_1 \vert \cdot \vert F_2 \vert = \frac{1}{4}n^2. \end{equation*} On the left side we counted all the possible options, in the middle we counted the pairs which intersect inside $B$ and on the right we counted the pairs of lines from the two families, since we know that they intersect in $B$. We will split the sum into three parts: \begin{align*} S_1 &:= \sum_{\substack{p \in F_{0,B}(\cH)\\ 2^k \leq a(p) < \sqrt{n}}} \begin{pmatrix} a(p) \\ 2\end{pmatrix},\\ S_2 &:= \sum_{\substack{p \in F_{0,B}(\cH)\\ \sqrt{n} \leq a(p) < \frac{n}{100}}} \begin{pmatrix} a(p) \\ 2\end{pmatrix},\\ S_3 &:= \sum_{\substack{p \in F_{0,B}(\cH)\\ 2 \leq a(p) < 2^k}} \begin{pmatrix} a(p) \\ 2\end{pmatrix} + \sum_{\substack{p \in F_{0,B}(\cH)\\ \frac{n}{100} \leq a(p) \leq n}} \begin{pmatrix} a(p) \\ 2\end{pmatrix}, \end{align*} where $k=10$ is constant. Now we will bound $S_1$ and $S_2$. We start with $S_1$ using \cref{Cor:SzemTrot}: \begin{align*} S_1 &= \sum_{l \geq k} \sum_{\substack{p \in F_{0,B}(\cH) \\ 2^l \leq a(p) < 2^{l+1} \\ a(p) < \sqrt{n}}} \begin{pmatrix} a(p) \\ 2 \end{pmatrix} \leq \sum_{\substack{l \geq k \\ 2^{l+1}<\sqrt{n}}} t^{\ast}(n,2^l) \begin{pmatrix} 2^{l+1} \\ 2 \end{pmatrix} \\ &= \sum_{\substack{l \geq k \\ 2^{l+1} < \sqrt{n}}} t^{\ast}(n,2^l) 2^l (2^{l+1}-1) \leq \sum_{\substack{l \geq k \\ 2^{l+1} < \sqrt{n}}} \beta \frac{n^2}{2^{3l}} 2^l (2^{l+1}-1)\\ &\leq 2 \beta n^2 \sum_{l \geq k} \frac{1}{2^l} = \frac{4 \beta}{2^k} n^2 \leq \frac{1}{8} \begin{pmatrix} n \\ 2\end{pmatrix}, \end{align*} Since $\beta = 2.5^3$, $k=10$ and $n\geq 2$. For the next sum we use \cref{Lem:Beck2.1} and \cref{Cor:SzemTrot}: \begin{align*} S_2 &= \sum_{l \geq0} \sum_{\substack{p \in F_{0,B}(\cH) \\ 2^l \sqrt{n} \leq a(p) < 2^{l+1} \sqrt{n} \\ a(p) < \frac{n}{100}}} \begin{pmatrix} a(p) \\ 2 \end{pmatrix} \leq \sum_{\substack{l \geq 0\\ 2^{l} \sqrt{n} < \frac{n}{100}}} t(n, 2^l \sqrt{n}) \begin{pmatrix} 2^{l+1} \sqrt{n} \\ 2 \end{pmatrix}\\ &= t(n, \sqrt{n}) \begin{pmatrix} 2 \sqrt{n} \\ 2 \end{pmatrix} +\sum_{\substack{l \geq 1\\ 2^{l} \sqrt{n} < \frac{n}{100}}} t(n,\underbrace{2^l \sqrt{n}}_{> \sqrt{2n}}) \begin{pmatrix} 2^{l+1} \sqrt{n} \\ 2 \end{pmatrix}\\ &< \beta \frac{n^2}{n^{3/2}} \sqrt{n} (2 \sqrt{n}-1) + \sum_{\substack{l \geq 1\\ 2^{l} \sqrt{n} < \frac{n}{100}}} \frac{2n}{2^l \sqrt{n}} 2^l \sqrt{n} (2^{l+1} \sqrt{n}-1)\\ &< 2 \beta n^{3/2} + 4 n^{3/2} \sum_{\substack{l \geq 1 \\ 2^{l} < \frac{\sqrt{n}}{100}}} 2^l = 2 \beta n^{3/2} + 4 n^{3/2} \left( \frac{\sqrt{n}}{50}-2\right)\\ &= \frac{2}{25} n^2 + (2 \beta -8 )n^{3/2} \leq \frac{1}{4} \begin{pmatrix} n \\ 2\end{pmatrix}. \end{align*} Combining the two results we get a lower bound for $S_3$ \begin{equation*} S_3 \geq \vert F_1 \vert \cdot \vert F_2 \vert - \frac{1}{4}\begin{pmatrix} n \\ 2 \end{pmatrix} - \frac{1}{8}\begin{pmatrix} n \\ 2 \end{pmatrix} \geq \frac{1}{16}n^2. \end{equation*} So now assume that condition (a) of the theorem does not hold, then \begin{equation*} \vert F_{0,B}(\cH) \vert \geq \sum_{\substack{p \in F_{0,B}\\ 2 \leq a(p) < 2^k}} 1 \geq \begin{pmatrix} 2^k \\ 2 \end{pmatrix}^{-1} \sum_{\substack{p \in F_{0,B}\\ 2 \leq a(p) < 2^k}} \begin{pmatrix} a(p) \\ 2 \end{pmatrix} \geq \begin{pmatrix} 2^k \\ 2 \end{pmatrix}^{-1} \frac{1}{16} n^2. \end{equation*} Since we have seen that $k=10$ is a possible choice the constant would be $c=\frac{1}{8 380416}$. \end{proof} \end{Theorem} \begin{Remark} In condition (a) the constant $\frac{1}{100}$ is by no means optimal, but since for us the constant plays an insignificant role we stick to the original constant used by Beck.\par Further notice that we have even proved a stronger theorem, namely that \begin{equation*} \big\vert \{p \in F_{0,B}(\cH) \mid 2 \leq a(p) \leq 2^k\}\big\vert \geq c \cdot n^2, \end{equation*} if condition (a) does not hold. \end{Remark} \begin{proof}[Proof of \cref{Thm:BeckInduction}] The idea of the proof is that for a family $F_i$ of hyperplanes, the other families induce a hyperplane arrangement in all $H \in F_i$, thus we can conclude by induction. The case $d=2$ is already completed by \cref{Thm:DualBeck}, where we even proved the stronger statement that \begin{equation*} \vert \{p \in F_{0,B}(\cH) \mid 2 \leq a(p) < 2^k\}\vert \geq c_2 \cdot n^2 \end{equation*} if for all $p \in B$ we have $a(p) < \frac{n}{100}$. That $a(p) < \frac{1}{100} n$ is guaranteed by the assumption that at most $c \cdot \vert F_i \vert$ hyperplanes from $F_i$ can intersect in one point and $c < \frac{1}{100}$. So the initial case of the induction holds.\par\medskip Now consider the family $F_1$, we are interested in the $d-1$ dimensional arrangement which is induced on the hyperplanes $H\in F_1$. Notice that for $H_i \in F_i, H_j \in F_j$ and $H_k\in F_k$, $i \neq j \neq k$, we have $H_i \cap H_j \neq H_i \cap H_k$ since otherwise we get a contradiction to the assumption that $B \cap \bigcap_{i=1}^d f_i = \{p\}$ for all $(f_1,...,f_d) \in F_1 \times ... \times F_d$. So the different families induce different $(d-2)$-hyperplanes on the hyperplanes of $F_1$. Set \begin{equation*} \cH^{H \ast} := \{H \cap f \mid f \in F_2 \cup ... \cup F_d\}, \end{equation*} here $H \cap f \neq \emptyset$ holds for all $f$ by the assumption on the intersection behaviour. Now we prove the following claim, which gives us the induction hypothesis:\par\medskip \textbf{Claim:} $\vert \cH^{H \ast} \vert > \delta \cdot n$, for some constant $\delta$, and at least $\epsilon \cdot \frac{n}{d}$ hyperplanes $H \in F_1$, for some constant $\epsilon >0$.\par \medskip We proof the claim: We do this by considering two families and show that the one induces enough planes on the second one. To do so let $P$ be a generic $2$-dimensional plane in $\RR^d$, i.e. $P \cap H$ is one dimensional for all $H \in F_1 \cup F_2$ and they are all distinct for different $H$. And $P\cap f$ is a point for all $f=H \cap K$ with $H \in F_1$, $K \in F_2$ which are also distinct if the $K \cap H$ are distinct. So each hyperplane corresponds to a line and each $d-2$ dimensional flat corresponds to a point. The intersection behaviour for the lines clearly fulfils the assumptions in \cref{Thm:DualBeck}. So we can apply the theorem and get $c \cdot \left(\frac{n}{d}\right)^2$ intersections points and therefore $c \cdot \left(\frac{n}{d}\right)^2$ induced flats. Since each hyperplane can carry at most $\frac{n}{d}$ induced flats we see that the flats have to spread out such that the claim holds.\par\medskip Denote the set of hyperplanes from $F_1$ for which the claim holds by $\tilde{F_1}$. It is clear that the intersection behaviour of the different families also holds in the $d-1$ dimensional arrangement induced on the hyperplanes in $\tilde{F_1}$. Also assume that the stronger statement \begin{equation*} \vert \{p \in F_{0,B}(\cH) \mid l \leq a(p) < l^k\}\vert \geq c_l \cdot n^l \end{equation*} is proved for all dimensions $l$ up to $d-1$, where $k$ is a constant.\par\medskip Now we can do the induction step. We get the following inequality \begin{align*} \vert F_{0,B}(\cH) \vert \cdot \begin{pmatrix} d^k \\ d \end{pmatrix} > \sum_{\substack{p \in F_{0,B}(\cH)\\ d \leq a(p) < d^{k}}} \begin{pmatrix} a(p) \\ d \end{pmatrix} \geq \sum_{H \in F_1} \sum_{\substack{p \in F_{0,B}(\cH^{H\ast})\\ d-1 \leq a_{\cH^{H\ast}}(p) < (d-1)^k}} \begin{pmatrix} a_{\cH^{H\ast}}(p) \\ d-1 \end{pmatrix}. \end{align*} For the last inequality notice that $a_{\cH^{H\ast}}(p)$ now only counts the hyperplanes in $\cH^{H \ast}$ and we only have to take $d-1$ out of them since we fixed the choice $H \in F_1$. Now further by the induction assumption \begin{align*} \sum_{H \in F_1} &\sum_{\substack{p \in F_{0,B}(\cH^{H\ast})\\ d-1 \leq a(p) < (d-1)^k}} \begin{pmatrix} a(p) \\ d-1 \end{pmatrix} \geq \sum_{H \in \tilde{F_1}} \sum_{\substack{p \in F_{0,B}(\cH^{H\ast})\\ d-1 \leq a(p) < (d-1)^k}} \begin{pmatrix} a(p) \\ d-1 \end{pmatrix}\\ &\geq \sum_{H \in \tilde{F_1}} \vert \{ p \in F_{0,B}(\cH^{H\ast}) \mid d-1 \leq a(p) < (d-1)^k\}\vert\\ &\geq \sum_{H \in \tilde{F_1}} c_{d-1} \cdot \delta^{d-1} n^{d-1} \geq \epsilon \frac{n}{d} c_{d-1} \delta^{d-1} n^{d-1} = \frac{\epsilon c_{d-1}}{d} \cdot \delta^{d-1} n^d.\\ \end{align*} This finally yields \begin{equation*} \big\vert \{ F_{0,B}(\cH) \mid d \leq a(p) < d^k \} \big\vert \geq \begin{pmatrix} d^k \\ d \end{pmatrix}^{-1} \frac{\epsilon c_{d-1}}{d} \cdot \delta^{d-1} n^d =: c_d n^d. \end{equation*} \end{proof} \begin{appendices} \section{FLC in non-abelian lcsc groups}\label[appendix]{Appendix:FLC} This appendix is dedicated to give some more information on sets with finite local complexity for lcsc groups and we will show that all model sets have FLC.\par\medskip A locally finite subset $\Lambda \subset G$ which fulfils one and therefore all of the conditions in the following lemma has \textit{finite local complexity} as defined in \cref{Rem:FLC}. In the lemma we only need that $\Lambda$ is locally finite, so we could also define the term of finite local complexity for this type of sets. \begin{Lemma}[Finite local complexity] \label{FLC_Lemma} Let $G$ be a lcsc group and $\Lambda \subset G$ a locally finite set, i.e. for all bounded $B\subset G$ we have that $B \cap \Lambda$ is finite. Then the following are equivalent: \begin{enumerate}[label=\roman*)] \item For all $B \subset G$ bounded there exists a finite $F_B \subset G$ such that \begin{equation*} \forall g \in G \,\exists h \in \Lambda^{-1}\Lambda \,\exists f \in F_B: (B g^{-1}\cap \Lambda)h=Bf^{-1}\cap \Lambda. \end{equation*} \item For all $B \subset G$ bounded there exists a finite $F_B \subset G$ such that \begin{equation*} \forall g \in G \,\exists h \in G \,\exists f \in F_B: (B g^{-1}\cap \Lambda)h=Bf^{-1}\cap \Lambda. \end{equation*} \item $\Lambda \Lambda^{-1}$ is locally finite. \item For all $B \subset G$ bounded: \begin{equation*} \big\vert \{B \cap \Lambda \lambda^{-1}\mid \lambda \in \Lambda\}\big\vert< \infty. \end{equation*} \item The complexity function $p(r)$ is finite for all $r\geq 0$. \end{enumerate} \begin{proof} First we will show the equivalence of $i), ii)$ and $iii)$. Afterwards we will show the equivalence of $iii)$ and $iv)$. Finally the will show the equivalence of $iv)$ and $v)$.\par $i) \Rightarrow ii)$: This step is obvious, since $\Lambda^{-1}\Lambda \subset G$.\par $ii) \Rightarrow iii)$: Without loss of generality we can assume that $B$ is compact and contains the identity, otherwise we just simply expand $B$ and notice that this would just increase the number of elements in the intersection. For this $B$ we choose $F_B$ such that $ii)$ holds. Since $F_B$ is finite and $B$ is bounded we see that $B':=BF_B^{-1}$ is also bounded, further we see, since $\Lambda$ is locally finite, that $F:=B'\cap \Lambda$ is finite.\\ Now let $\lambda_1, \lambda_2 \in \Lambda$ be arbitrary with $\lambda_1\lambda_2^{-1} \in B$. We get $\lambda_1 \in B\lambda_2 \cap \Lambda$ and since we assumed $e \in B$ we also get $\lambda_2 \in B \lambda_2 \cap \Lambda$. With our assumption we get that $h_1 \in G$ and $f_1 \in F_B$ exist with \begin{equation*} (B \lambda_2 \cap \Lambda)h_1=B f_1^{-1} \cap \Lambda. \end{equation*} Putting the pieces together we obtain \begin{equation*} \{\lambda_1,\lambda_2\} h_1 \subseteq (B \lambda_2 \cap \Lambda) h_1 = B f_1^{-1}\cap \Lambda \subset B F_B^{-1} \cap \Lambda = B'\cap \Lambda =F. \end{equation*} So $\lambda_1 \lambda_2^{-1} = (\lambda_1 h_1^{-1})(\lambda_2 h_1^{-1})^{-1}\in F F^{-1}$ and we get that $\Lambda\Lambda^{-1} \cap B \subset F F^{-1}$ is finite.\par\medskip $iii) \Rightarrow i)$: Let $B \subset G$ be bounded. Without loss of generality we can assume $B$ to be symmetric, i.e. $B=B^{-1}$. Since $B$ is bounded $B^2$ is also bounded and $B^2 \cap \Lambda \Lambda^{-1}$ is finite by assumption. Then \begin{equation*} BB \cap \Lambda \Lambda^{-1} = \bigcup_{b \in B} \bigcup_{\lambda \in \Lambda} Bb \cap \Lambda \lambda^{-1}, \end{equation*} and since we know that this is finite we conclude that $Bb \cap \Lambda \lambda^{-1}$ can only have finitely many different forms. So we find $b_1,...,b_s \in B$ and $\lambda_1,...,\lambda_t \in \Lambda$ such that for arbitrary $b\in B$ and $\lambda \in \Lambda$ there exists a $n\in \{1,...,s\}$ and a $m\in \{1,...,t\}$ with \begin{equation*} Bb \cap \Lambda \lambda^{-1} = Bb_n \cap \Lambda \lambda_m^{-1}. \end{equation*} Let $g\in G$ be arbitrary. Then the two following cases can appear:\par \medskip \textbf{Case 1}: $B g^{-1} \cap \Lambda = \emptyset$, to deal with this case we simply set $f_0 = g$, for one such $g$.\par \medskip \textbf{Case 2:} $B g^{-1} \cap \Lambda \neq \emptyset$, then there exists a $b'\in B$ such that $b'g^{-1} = \lambda$. Set $b:=b'^{-1}$, then $g^{-1}=b\lambda$ and, since $B$ is symmetric, $b \in B$. Now choose $n$ and $m$ such that $Bb \cap \Lambda \lambda^{-1}=B b_n \cap \Lambda \lambda_n^{-1}$ and set $h:=\lambda^{-1}\lambda_n \in \Lambda^{-1} \Lambda$. Now we get \begin{align*} (Bg^{-1} \cap \Lambda)h &= (Bb \lambda \cap \Lambda) \lambda^{-1} \lambda_m = (Bb \cap \Lambda \lambda^{-1}) \lambda_m\\ &=(B b_n \cap \Lambda \lambda_m^{-1}) \lambda_m = B b_n \lambda_m \cap \Lambda \end{align*} Finally we set $F_B':=\big\{\lambda_m^{-1} b_n^{-1} \mid n \in \{1,...,s\}, m\in\{1,...,t\}\big\}$, which is finite.\par\medskip To combine both cases define $F_B := F_B' \cup {f_0}$.\par \medskip $iv) \Rightarrow iii)$: Let $B\subset G$ be bounded. For $\{B \cap \Lambda \lambda^{-1} \mid \lambda \in \Lambda\}$ we use our assumption to find finitely many representatives $\lambda_1,...,\lambda_k$ such that \begin{equation*} \{B \cap \Lambda \lambda^{-1} \mid \lambda \in \Lambda\} = \bigcup_{l=1}^k \{B \cap \Lambda \lambda_l^{-1}\}. \end{equation*} We get \begin{equation*} B \cap \Lambda \Lambda^{-1} = \bigcup_{\lambda \in \Lambda} B \cap \Lambda \lambda^{-1} =\bigcup_{l=1}^k B \cap \Lambda \lambda_l^{-1}. \end{equation*} The sets $B \cap \Lambda \lambda_l^{-1} = (B \lambda_l \cap \Lambda)\lambda_l^{-1}$ are finite, since $\Lambda$ is locally finite.\medskip $iii) \Rightarrow iv)$: Let $B \subset G$ be bounded, then by our assumption $B \cap \Lambda\Lambda^{-1}$ is finite. Further \begin{equation*} B \cap \Lambda\Lambda^{-1} = \bigcup_{\lambda \in \Lambda} B \cap \Lambda \lambda^{-1}. \end{equation*} Since the left hand side is finite this also holds for the right hand side. But this means that there can only be finitely many combinations for the sets $B \cap \Lambda \lambda^{-1}$. So we get \begin{equation*} \vert \{ B \cap \Lambda \lambda^{-1} \mid \lambda \in\Lambda\} \vert < \infty. \end{equation*}\medskip $iv) \Leftrightarrow v)$: This is obvious since each ball $B_r(e)$ is a bounded set and on the other hand for every bounded set $B$ we can find a $r>0$ such that $B \subset B_r(e)$ holds. \end{proof} \end{Lemma} The following proposition justifies our restriction to precompact windows with non-empty interior. \begin{Proposition}\label{Prop:CPSFLCrelativelydense} Let $(G,H, \Gamma)$ be a CPS, $W \subset H$ a subset and $\Lambda:= \pi_G((G \times W)\cap \Gamma)$. \begin{enumerate}[label=\roman*)] \item If $W^\circ \neq \emptyset$, then $\Lambda$ is relatively dense, \item if $W$ is relatively compact, then $\Lambda$ is uniformly discrete, \item if $W$ is relatively compact and $W^\circ \neq \emptyset$, then $\Lambda$ has FLC. \end{enumerate} \begin{proof} \begin{enumerate}[label=\roman*)] \item We are using \cref{Lem:ProduktOffenKompakt}. Since ${W^\circ \neq \emptyset}$ this also holds for the inverse ${(W^{-1})^\circ \neq \emptyset}$ and we can choose an open subset $\emptyset \neq U \subset W^{-1}$. By the \cref{Lem:ProduktOffenKompakt} we find a compact set $K$ such that $G\times H= (K \times U)\Gamma$. Let $g \in G$ be arbitrary, we can find $u \in U, k \in K$ and $\gamma \in \Gamma$ such that \begin{equation*} (g, e_H) = (k,u)(\gamma_{G}, \gamma_{H}). \end{equation*} This tells us that $u \gamma_{H} = e_H$ and therefore $\gamma_{H}=u^{-1} \in (W^{-1})^{-1}=W$, so $\gamma_{G} \in \Lambda$. Therefore $g = k \gamma_{G} \in K \Lambda$. This shows the claim. \item Let us assume $\Lambda$ is not uniformly discrete, then for all $r>0$ there exists ${x,y \in \Lambda}$ such that ${d(x,y)<r}$. By the right-invariance of $d$ this is equivalent to ${d(e, yx^{-1})<r}$. We can lift $x$ and $y$ to elements in the product and get \begin{equation*} \pi_G\vert_\Gamma^{-1}(x)=:(x_G,x_H),\, \pi_G\vert_\Gamma^{-1}(y)=:(y_G,y_H) \in \Gamma \cap (G \times W). \end{equation*} Since $\Gamma$ and $G$ are groups we can deduce \begin{equation*} {(y_G,y_H)(x_G^{-1},x_H^{-1}) \in \Gamma \cap (G \times WW^{-1})}. \end{equation*} Since we know that ${yx^{-1} \in B_r(x)}$ we get \begin{equation*} {(y_G,y_H)(x_G^{-1},x_H^{-1}) \in \Gamma \cap (B_r(e_G) \times WW^{-1})}. \end{equation*} Since $W$ is relatively compact, $WW^{-1}$ is also relatively compact and therefore bounded. Moreover $B_r(e_G)$ is bounded so the product ${B_r(e_G)\times WW^{-1}}$ is bounded. Thus, since $\Gamma$ is a lattice, we get that ${\Gamma \cap (B_r(e_G) \times WW^{-1})}$ is finite. By the injectivity of ${\pi_G}$ we know that ${d(a_G,b_G) \neq 0}$ for ${a \neq b \in \Gamma}$ so we get that $d(a_G,b_G) > 0$ for $a,b \in \Gamma \cap (B_r(e_G) \times WW^{-1})$ and by finiteness there is a minimal distance $\tilde{d}$. Now set $\tilde{r}< \tilde{d}$ and conclude ${\Gamma \cap (B_{\tilde{r}}(e_G) \times WW^{-1})=\{(e_G,e_H)\}}$. This is a contradiction to the assumption, since we do not find two elements, which are this close together. Therefore $\Lambda$ has to be uniformly discrete for $\tilde{r}$. \item By i) and ii) we know that $\Lambda$ is a Delone set and therefore locally finite. We want to use the characterisation iii) of \cref{FLC_Lemma}, so we show that $B \cap \Lambda \Lambda^{-1}$ is finite for a bounded set $B \subset H$. It is enough to show that the preimage of this set is finite. Since taking the preimage and intersecting commutes we get \begin{equation*} \pi_G^{-1}(B \cap \Lambda\Lambda^{-1}) = \pi_G^{-1}(B) \cap \pi_G^{-1}(\Lambda \Lambda^{-1}). \end{equation*} Now we can consider the two parts separately and then intersect them, so the preimage of $B$ is obviously $\pi_G^{-1}(B)=B \times H$.\\ For the second part we need to remember the definition of $\Lambda$, this was given by ${\Lambda=\pi_G((G \times W)\cap \Gamma)}$, so \begin{equation*} \pi_G^{-1}(\Lambda \Lambda^{-1}) = \pi_G^{-1}(\pi_G((G \times W)\cap \Gamma) \pi_G((G \times W)\cap \Gamma)^{-1}). \end{equation*} We want to show that this is a subset of $\Gamma \cap (G \times WW^{-1})$. So let $\lambda_1, \lambda_2 \in \Lambda$ then they are both in $\Gamma_G$ and therefore $\lambda_1 \lambda_2^{-1} \in \Gamma_G$ and there exists a unique preimage inside $\Gamma$ which we name $(\lambda_1 \lambda_2^{-1},x)$. On the other hand the preimage of $\lambda_i$, $i\in\{1,2\}$, is $(\lambda_i, w_i) \in \Gamma \cap (G \times W)$. And \footnotesize\begin{equation*} (\lambda_1, w_1)(\lambda_2, w_2)^{-1} = (\lambda_1, w_1)(\lambda_2^{-1}, w_2^{-1}) = (\lambda_1 \lambda_2^{-1}, w_1w_2^{-1}) \in \Gamma \cap (G \times WW^{-1}). \end{equation*}\normalsize Since the preimage was unique and we see that \begin{equation*} \pi_G(\lambda_1 \lambda_2^{-1}, w_1w_2^{-1})= \lambda_1 \lambda_2^{-1} \end{equation*} we get that $\pi_G^{-1}(\lambda_1 \lambda_2^{-1}) \in \Gamma \cap (G \times WW^{-1})$.\\ Combining the two arguments we get \small\begin{equation*} \pi_G^{-1}(B \cap \Lambda\Lambda^{-1}) = (B \times H) \cap \Gamma \cap (G \times WW^{-1}) = \Gamma \cap (B \times WW^{-1}). \end{equation*}\normalsize Since $W$ is relatively compact we get that $\overline{W}$ is compact. Since $W \subset \overline{W}$ we see $W \subset H$ is bounded. Hence there exists a $r>0$ such that $r>d(w_1, w_2)$ for all ${w_1,w_2 \in W}$. And once more by right-invariance of the metric we get ${r>d(w_1w_2^{-1},e)}$. This tells us that $WW^{-1} \subset B_r(e)$ and therefore it is bounded. Further $B \subset G$ was a bounded set. We see that $B \times WW^{-1} \subset G \times H$ is bounded in the product. Since $\Gamma$ is a lattice it has FLC and therefore $(B \times WW^{-1}) \cap \Gamma$ is finite. \end{enumerate} \end{proof} \end{Proposition} \section{Homogenous Lie groups}\label[appendix]{Appendix:Poly} In the first part of this appendix we are concerned with the proof of \cref{Thm:NonCrooked}. \begin{Proposition} Every locally two-step nilpotent homogeneous Lie group is non-crooked. \begin{proof} This follows directly by considering the BCH-formula and noticing that by using the assumption on locally two-step nilpotent Lie groups that for all $X,Y \in \fg$ \begin{equation*} X \ast Y = X + Y + \frac{1}{2}[X,Y]. \end{equation*} So for a hyperplane $H =v_0 + \sum_{i=1}^d t_i v_i$, with $t_i \in \RR$, $v_i \in \RR^d$ and $d$ the dimension of the Lie group. We get for all $X \in \fg$ \small\begin{align*} X \ast H &= X + H + \frac{1}{2}[X,H] = v_0 +X +\sum_{i=1}^d t_i v_i + \frac{1}{2} [X, v_0] + \frac{1}{2} \sum_{i=1}^d t_i [X,v_i]\\ &= v_0 +X + \frac{1}{2} [X, v_0] +\sum_{i=1}^d t_i (v_i + [X,v_i]) =: \tilde{v_0} + \sum_{i=1}^d t_i \tilde{v_i}. \end{align*}\normalsize And this is again a hyperplane. \end{proof} \end{Proposition} At first sight the condition of locally two-step nilpotent seems weaker than the condition of being two-step nilpotent, but in fact the two are equivalent by the following proposition. \begin{Proposition} Let $G$ be a Lie group. If $G$ is locally two-step nilpotent then $G$ is two-step nilpotent or abelian. If $G$ is two-step nilpotent it is locally two-step nilpotent. \begin{proof} The conclusion from two-step nilpotent to locally two-step nilpotent is trivial, so two-step nilpotent Lie groups are locally two-step nilpotent Lie groups.\par So now assume that we have a locally two-step nilpotent Lie group and arbitrary $X,Y,Z \in \fg$, then \begin{align*} 0 &= [X+Y,[X+Y,Z] = [X,[X,Z]]+[X,[Y,Z]]+[Y,[X,Z]]+[Y,[Y,Z]]\\ &= [X,[Y,Z]]+[Y,[X,Z]]. \end{align*} This means that $[X,[Y,Z]] =-[Y,[X,Z]] = [Y,[Z,X]]$. By using Jacobi's identity we have \begin{equation*} 0 = [X,[Y,Z]]+[Y,[Z,X]]+[Z,[X,Y]]. \end{equation*} And therefore by using the equality we found before we have $2[Y,[Z,X]]=[Z,[Y,X]]$. Since $X$, $Y$ and $Z$ are arbitrary, we can switch the roles of $Y$ and $Z$. Thus \linebreak ${2[Z,[Y,X]]=[Y,[Z,X]]}$. So in total this means \begin{equation*} [Z,[Y,X]]=2[Y,[Z,X]] = 4 [Z,[Y,X]], \end{equation*} and therefore $[Z,[Y,X]]=0$. So $G$ is two-step nilpotent or abelian. \end{proof} \end{Proposition} We have seen that the class of non-crooked homogeneous Lie groups contains the abelian and the two-step nilpotent homogeneous Lie groups. We will now see that a higher nilpotency degree always implies crookedness. \begin{Proposition} A three-step homogeneous Lie group is crooked. \begin{proof} Let $H$ be a hyperplane given by $H=v_0 + \sum_{i=1}^n t_i v_i$, with $t_i \in \RR$, $v_i \in \RR^d$ and $d$ the dimension of the Lie group. We get for all $X \in \fg$ \footnotesize\begin{align*} X \ast H &= X + H + \frac{1}{2}[X,H] + \frac{1}{12}([X,[X,H]]-[H,[X,H]])\\ &= X+ v_0 + \frac{1}{2}[X,v_0]+\frac{1}{12}\left([X,X,v_0]+[v_0,[X,v_0]]\right)\\ &\quad\quad + \sum_{i=1}^n t_i\cdot \left( v_i+\frac{1}{2} [X,v_i]+ \frac{1}{12}([X,[X,v_i]] - [v_0,[X,v_i]] - [v_i,[X,v_0]]) \right)\\ &\quad\quad - \frac{1}{12} \sum_{i=1}^n \sum_{j=1}^n t_i t_j [v_i,[X,v_j]]. \end{align*}\normalsize So for this to be non-crooked the last sum has to disappear, i.e. \begin{equation*} \sum_{i=1}^n \sum_{j=1}^n t_i t_j [v_i,[X,v_j]] = 0. \end{equation*} But since the $t_i$, $t_j$ are parameters we can only compare the summands with the same coefficients, so for all $i,j \in \{1,...,n\}$, $i\neq j$ \begin{equation*} [v_i,[X,v_j]] + [v_j, [X,v_i]] =0 \end{equation*} and for the diagonal, i.e. $i=j$ \begin{equation*} [v_i,[X,v_i]]=0. \end{equation*} But this last conditions is the locally two-step nilpotency condition, which as we have seen implies two-step nilpotency. \end{proof} \end{Proposition} \begin{Corollary} All nilpotent homogeneous Lie groups, with nilpotency degree greater than two, are crooked. \end{Corollary} \subsection{Form of the polynomial action}\label[appendix]{Appendix:Poly2}~\par\medskip We can find some restrictions on the form of the polynomials in the group law. Since we have a dilation structure we get \begin{align*} (r^{\nu_1}P_1(x,y),&...,r^{\nu_n}P_n(x,y))=D_r( P_1(x,y),..., P_n(x,y)) = D_r(xy)\\ &= D_r(x) D_r(y) = ( P_1(D_r(x),D_r(y)),..., P_n(D_r(x),D_r(y))). \end{align*} This means for the polynomials we have \begin{equation*} P_i(D_r(x),D_r(y))= r^{\nu_i}P_i(x,y). \end{equation*} This gives us a restriction on the form of the $P_i$ namely if \begin{equation*} P_i(x,y)= \sum_{\alpha_1,...,\alpha_n,\beta_1,...,\beta_n \in \NN} c_{(\alpha_1,...,\alpha_n,\beta_1,...,\beta_n)} x_1^{\alpha_1}...x_n^{\alpha_n} y_1^{\beta_1}...y_n^{\beta_n} \end{equation*} then for all summands with $c\neq 0$ it is $\nu_i=\nu_1\alpha_1+...+\nu_n\alpha_n+\nu_1\beta_1+...+\nu_n\beta_n$. Since we sorted the $\nu_i$ by their size we see that in the $i$-th entry all $\alpha_j$ and $\beta_j$ have to be zero if $\nu_j > \nu_i$. This means that for the Polynomial $P_i$ we have that it is only dependent on the first $i$ entries of both $x$ and $y$.\par Another restriction which can be seen from the BCH formula is that the polynomials are of a certain form, namely \begin{equation*} P_i(x,y) = x_i+y_i + \sum_{\substack{\alpha_1,...,\alpha_n,\beta_1,...,\beta_n \in \NN\\\sum_i \alpha_i \neq 0, \sum_i \beta_i \neq 0 }} c_{(\alpha_1,...,\alpha_n,\beta_1,...,\beta_n)} x_1^{\alpha_1}...x_{i-1}^{\alpha_{i-1}} y_1^{\beta_1}...y_{i-1}^{\beta_{i-1}}. \end{equation*} This means that in the polynomial all summands except the two in front have entries from $x$ and $y$. \par Notice that in the non-crooked case there can be no product of the $t_i$, this means that we have either one of the $\beta_j$ is one and all the overs are zero or all $\beta_j$ are zero.\par \medskip To finish this appendix we give one example of such a group. Consider the Heisenberg group $\HH$ of upper triangular matrices with ones on the diagonal. The entries of the second diagonal have weight 1 and the third diagonal has weight 2. For two elements \begin{equation*} \begin{pmatrix} 1 & a & c \\ 0 & 1 & b \\ 0 & 0 & 1 \end{pmatrix}, \begin{pmatrix} 1 & x & z \\ 0 & 1 & y \\ 0 & 0 & 1 \end{pmatrix} \end{equation*} the polynomials then are given by \begin{align*} &P_1((a,b,c),(x,y,z))= a+x,\\ &P_2((a,b,c),(x,y,z))= b+y,\\ &P_3((a,b,c),(x,y,z))= c+z + ay. \end{align*} And we see that $\HH$ is $2$-step nilpotent and therefore non-crooked.\par \medskip \end{appendices} \printbibliography \end{document}
2205.11851v1
http://arxiv.org/abs/2205.11851v1
Restricted Nim with a Pass
\documentclass[10pt]{article} \textwidth= 5.00in \textheight= 7.4in \topmargin = 30pt \evensidemargin=0pt \oddsidemargin=55pt \headsep=17pt \parskip=.5pt \parindent=12pt \font\smallit=cmti10 \font\smalltt=cmtt10 \font\smallrm=cmr9 \usepackage{amssymb,latexsym,amsmath,epsfig,amsthm} \usepackage{empheq} \usepackage{ascmac} \makeatletter \renewcommand\section{\@startsection {section}{1}{\z@} {-30pt \@plus -1ex \@minus -.2ex} {2.3ex \@plus.2ex} {\normalfont\normalsize\bfseries\boldmath}} \renewcommand\subsection{\@startsection{subsection}{2}{\z@} {-3.25ex\@plus -1ex \@minus -.2ex} {1.5ex \@plus .2ex} {\normalfont\normalsize\bfseries\boldmath}} \renewcommand{\@seccntformat}[1]{\csname the#1\endcsname. } \makeatother \newtheorem{theorem}{Theorem} \newtheorem{lemma}{Lemma} \newtheorem{conjecture}{Conjecture} \newtheorem{proposition}{Proposition} \newtheorem{corollary}{Corollary} \theoremstyle{definition} \newtheorem{defn}{Definition}[section] \newtheorem{rem}{Remark}[section] \newtheorem{exam}{Example}[section] \newtheorem{pict}{Figure}[section] \begin{document} \begin{center} \uppercase{\bf Restricted Nim with a Pass} \vskip 20pt {\bf Ryohei Miyadera }\\ {\smallit Keimei Gakuin Junior and High School, Kobe City, Japan}. \\ {\tt [email protected]} \vskip 10pt {\bf Hikaru Manabe}. \\ {\smallit Keimei Gakuin Junior and High School, Kobe City, Japan}. \\ {\tt [email protected]} \vskip 10pt \end{center} \vskip 20pt \centerline{\smallit Received: , Revised: , Accepted: , Published: } \vskip 30pt \pagestyle{myheadings} \markright{\smalltt INTEGERS: 19 (2019)\hfill} \thispagestyle{empty} \baselineskip=12.875pt \vskip 30pt \centerline{\bf Abstract} \noindent This paper presents a study of restricted Nim with a pass. In the restricted Nim considered in this study, two players take turns and remove stones from the piles. In each turn, when the number of stones is $m$, each player is allowed to remove at least one stone and at most $\lceil \frac{m}{2} \rceil$ stones from a pile of $m$ stones. The standard rules of the game are modified to allow a one-time pass, that is, a pass move that may be used at most once in the game and not from a terminal position. Once a pass has been used by either player, it is no longer available. It is well-known that in classical Nim, the introduction of the pass alters the underlying structure of the game, significantly increasing its complexity. In the restricted Nim considered in this study, the pass move was found to have a minimal impact. There is a simple relationship between the Grundy numbers of restricted Nim and the Grundy numbers of restricted Nim with a pass, where the number of piles can be any natural number. Therefore, the authors address a longstanding open question in combinatorial game theory: the extent to which the introduction of a pass into a game affects its behavior. The game that we developed appears to be the first variant of Nim that is fully solvable when a pass is not allowed and remains fully solvable following the introduction of a pass move. \pagestyle{myheadings} \markright{\smalltt \hfill} \thispagestyle{empty} \baselineskip=12.875pt \vskip 30pt \section{Introduction} In this study, restricted Nim and restricted Nim with a pass are examined. An interesting but difficult question in combinatorial game theory has been to determine what happens when standard game rules are modified to allow a one-time pass, that is, a pass move that may be used at most once in the game and not from a terminal position. Once a pass has been used by either player, it is no longer available. In the case of classical Nim, the introduction of the pass alters the mathematical structure of the game, considerably increasing its complexity. The effect of a pass on classical Nim remains an important open question that has defied traditional approaches. The late mathematician David Gale offered a monetary prize to the first person to develop a solution for three-pile classical Nim with a pass. In \cite{nimpass} (p. 370), Friedman and Landsberg conjectured that ”solvable combinatorial games are structurally unstable to perturbations, while generic, complex games will be structurally stable.” One way to introduce such a perturbation is to allow a pass. One of the authors of the present article reported a counterexample to this conjecture in \cite{integers1}. The game used in \cite{integers1} is solvable because there is a simple formula for the Grundy numbers, and even when we introduce a pass move to the game, there is a simple formula for $\mathcal{P}$-positions. The restricted Nim considered in the present study is of the same type, but the introduction of a pass move has a minimal impact. There is a simple relationship between the Grundy numbers of the game and the Grundy numbers of the game with a pass move, and the number of piles can be any natural number. This result is stated in Theorem \ref{grundytwopilepass} of the present article. One of the authors discussed part of the result for one-pile restricted Nim with a pass in \cite{jcdcg2018a}. Let $Z_{\ge 0}$ and $N$ be sets of non-negative numbers and natural numbers, respectively. For completeness, we briefly review some of the necessary concepts of combinatorial game theory. Details are presented in $\cite{lesson}$ and $\cite{combysiegel}$. \begin{defn}\label{definitionfonimsum11} Let $x$ and $y$ be non-negative integers. They are expressed in Base 2 as follows: $x = \sum_{i=0}^n x_i 2^i$ and $y = \sum_{i=0}^n y_i 2^i$, with $x_i,y_i \in \{0,1\}$. We define \textit{nim-sum} $x \oplus y$ as follows: \begin{equation} x \oplus y = \sum\limits_{i = 0}^n {{w_i}} {2^i}, \end{equation} where $w_{i}=x_{i}+y_{i} \ (\bmod\ 2)$. \end{defn} For impartial games without drawings, there are only two outcome classes. \begin{defn}\label{NPpositions} $(a)$ A position is referred to as a $\mathcal{P}$-\textit{position} if it is a winning position for the previous player (the player who has just moved), as long as he/she plays correctly at every stage. \\ $(b)$ A position is referred to as an $\mathcal{N}$-\textit{position} if it is a winning position for the next player as long as he/she plays correctly at every stage. \end{defn} \begin{defn}\label{defofmexgrundy} $(i)$ For any position $\mathbf{p}$ of game $\mathbf{G}$, there is a set of positions that can be reached by precisely one move in $\mathbf{G}$, which we denote as \textit{move}$(\mathbf{p})$. \\ $(ii)$ The \textit{minimum excluded value} $(\textit{mex})$ of a set $S$ of non-negative integers is the smallest non-negative integer that is not in S. \\ $(iii)$ Let $\mathbf{p}$ be the position of an impartial game. The associated Grundy number is denoted as $G(\mathbf{p})$ and is recursively defined as follows: $\mathcal{G}(\mathbf{p}) = \textit{mex}(\{\mathcal{G}(\mathbf{h}): \mathbf{h} \in move(\mathbf{p})\}).$ \end{defn} \begin{defn}\label{sumofgames} The \textit{disjunctive sum} of the two games, which is denoted as $\mathbf{G}+\mathbf{H}$, is a supergame in which a player may move in either $\mathbf{G}$ or $\mathbf{H}$ but not both. \end{defn} \begin{theorem}\label{thofsumofgame} Let $\mathbf{G}$ and $\mathbf{H}$ be impartial rulesets and $G_{\mathbf{G}}$ and $G_{\mathbf{H}}$ be the Grundy numbers of position $\mathbf{g}$ played under the rules of $\mathbf{G}$ and position $\mathbf{h}$ played under the rules of $\mathbf{H}$, respectively. Thus, we have the following:\\ $(i)$ For any position $\mathbf{g}$ of $\mathbf{G}$, $G_{\mathbf{G}}(\mathbf{g})=0$ if and only if $\mathbf{g}$ is the $\mathcal{P}$ position. \\ $(ii)$ The Grundy number of positions $\{\mathbf{g},\mathbf{h}\}$ in game $\mathbf{G}+\mathbf{H}$ is $G_{\mathbf{G}}(\mathbf{g})\oplus G_{\mathbf{H}}(\mathbf{h})$. \end{theorem} For the proof of this theorem, see $\cite{lesson}$. \begin{rem} With Theorem \ref{thofsumofgame}, we can find a $\mathcal{P}$-position by calculating the Grundy numbers and a $\mathcal{P}$-position of the sum of two games by calculating the Grundy numbers of two games. Therefore, Grundy numbers are an important research topic in combinatorial game theory. \end{rem} \section{Maximum Nim} In this section, we study maximum Nim, which is a game of restricted Nim. \begin{defn}\label{defofregular} If the sequence $f(m)$ for $m \in Z_{\ge 0}$ satisfies $0 \leq f(m) -f(m-1) \leq 1$ for any natural number $m$, it is called a “regular sequence.” \end{defn} \begin{defn}\label{defofmaxnim} Let $f(m)$ be a regular sequence: Suppose that there is a pile of $n$ stones, and two players take turns removing stones from the pile. In each turn, the player is allowed to remove at least one stone and at most $f(m)$ stones, where $m$ represents the number of stones. The player who removes the last stone is the winner. We refer to $f$ as a “rule sequence.” \end{defn} \begin{lemma}\label{lemmabylevinenim} Let $\mathcal{G}$ represent the Grundy number of the maximum Nim with the rule sequence $f(x)$. Then, we have the following properties: \\ $(i)$ If $f(x) = f(x-1)$, $\mathcal{G}(x) = \mathcal{G}(x-f(x)-1)$.\\ $(ii)$ If $f(x) > f(x-1)$, $\mathcal{G}(x) = f(x)$. \end{lemma} \begin{proof} Properties $(i)$ and $(ii)$ are proven in Lemma 2.1 of \cite{levinenim}. \end{proof} \subsection{Maximum Nim Whose Rule Sequence is $f(x) = \lceil \frac{x}{2} \rceil$}\label{sectionforceil} In this section, we let $f(x) = \lceil \frac{x}{2} \rceil$. Because $0 \leq f(m) -f(m-1) \leq 1$ for any $m \in N$, $f(m)$ for $m \in Z_{\ge 0}$ is a regular sequence. Here, we examine the maximum Nim of Definition \ref{defofmaxnim} for $f(x)$. Another option is to use $f(x) = \lfloor \frac{x}{2}\rfloor $; however, this case produces almost the same result because $ \lfloor \frac{x+1}{2}\rfloor = \lceil \frac{x}{2} \rceil$ for any $n \in Z_{\ge 0}$. Therefore, the case of $f(x) = \lfloor \frac{x}{2}\rfloor $ is omitted in this study. \begin{defn} We denote the pile of $m$ stones as $(m)$, which we call the position of the game. \end{defn} We define $\textit{move}(t)$ for the maximum Nim of Definition \ref{defofmaxnim} for the rule sequence $f(x)$. \begin{defn}\label{moveofvpsnim} $\textit{move}(t)$ is the set of all the positions that can be reached from position $(t)$. For any $t \in Z_{\ge 0}$, we have \begin{flalign} & \textit{move}(t) = \{(t-v):v \leq \lceil \frac{t}{2} \rceil \text{ and } v \in N \}. & \nonumber \end{flalign} \end{defn} \begin{lemma}\label{grundyhalfnim} Let $\mathcal{G}$ represent the Grundy number of the maximum Nim with the rule sequence $f(x) = \lceil \frac{x}{2} \rceil$. Then, we have the following properties: \\ $(i)$ If $t$ is even and $t \geq 2$, $\mathcal{G}(t) = \mathcal{G}(\frac{t-2}{2})$. \\ $(ii)$ If $t$ is odd, $\mathcal{G}(t) =\frac{t+1}{2}$. \end{lemma} \begin{proof} \textbf{(i)} If $t$ is even, $\lceil \frac{t}{2} \rceil = \lceil \frac{t-1}{2} \rceil$. Therefore, according to $(i)$ in Lemma \ref{lemmabylevinenim}, $\mathcal{G}(t) = \mathcal{G}(t-\lceil \frac{t}{2} \rceil -1)$ $=\mathcal{G}(\frac{t-2}{2})$. \\ \textbf{(ii)} If $t$ is odd, $\lceil \frac{t}{2} \rceil > \lceil \frac{t-1}{2} \rceil$. Therefore, according to $(ii)$ of Lemma \ref{lemmabylevinenim}, we have $\mathcal{G}(t) = \lceil \frac{t}{2} \rceil$ $= \frac{t+1}{2}$. \\ \end{proof} \subsection{Three-Pile Maximum Nim}\label{twopilepass} \begin{defn}\label{defofmaxnim3piles} Suppose that there are three piles of stones and two players take turns to remove stones from the piles. In each turn, the player chooses a pile and removes at least one stone and at most $f(x) = \lceil \frac{x}{2} \rceil$ stones, where $x$ represents the number of stones. The player who removes the last stone is the winner. The position of the game is represented by three coordinates $\{s,t,u\}$, where $s$, $t$, and $u$ represent the numbers of stones in the first, second, and third piles, respectively. \end{defn} According to the results presented in Section \ref{sectionforceil} and Theorem \ref{thofsumofgame}, we can calculate the Grundy numbers of the game in Definition \ref{defofmaxnim3piles}. \begin{theorem}\label{thmforsumgame} Let $\mathcal{G}(t)$ be the Grundy number of the game in Subsection \ref{sectionforceil}. Then, the Grundy number $\mathcal{G}(s,t,u)$ of the game of Definition \ref{defofmaxnim3piles} satisfies the following equation: $\mathcal{G}(s,t,u) = \mathcal{G}(s) \oplus \mathcal{G}(t)\oplus \mathcal{G}(u)$. \end{theorem} \begin{proof} This is directly from Theorem \ref{thofsumofgame}. \end{proof} \section{Maximum Nim with a Pass}\label{pass1} In Subsections \ref{sectionforceilpass} and \ref{twowithpass}, we modify the standard rules of the games to allow for a one-time pass, that is, a pass move that may be used at most once in the game and not from a terminal position. Once a pass has been used by either player, it is no longer available. \subsection{Maximum Nim with a Pass Whose Rule Sequence is $f(x) = \lceil \frac{x}{2} \rceil$ with a pass move}\label{sectionforceilpass} The position of this game is represented by two coordinates $\{t, p\}$, where $t$ represents the number of stones in the pile. $p = 1$ if the pass is still available; otherwise, $p = 0$. We define $\textit{move}$ in this game. \begin{defn}\label{defofonenimepass} For any $t \in Z_{\ge 0}$, we have $(i)$ and $(ii)$. \\ $(i)$ If $p=1$ and $t>0$, \begin{equation} \textit{move}(t,p)= \{\{t-v,p\}:v \leq \lceil \frac{t}{2} \rceil \text{ and } v \in N \} \cup \{\{t,0\}\}.\nonumber \end{equation} $(ii)$ If $p=0$ or $t=0$, \begin{equation} \textit{move}(t,p)= \{\{t-v,p\}:u \leq \lceil \frac{t}{2} \rceil \text{ and } v \in N \}. \nonumber \end{equation} \end{defn} \begin{rem} Note that a pass is not available from position $\{t,1\}$ with $t=0$, which is the terminal position. It is clear that $\mathcal{G}(t,0)$ is identical to $\mathcal{G}(t)$ in Section \ref{sectionforceil}. \end{rem} According to Definitions \ref{defofmexgrundy} and \ref{defofonenimepass}, we define the Grundy number $\mathcal{G}(t,p)$ of the position $\{t,p\}$. \includegraphics[height=1.5cm]{grundyhalfnim.eps} \begin{pict}\label{Grundy number of half nim with a pass} Table of Grundy numbers $\mathcal{G}(t,p)$ \end{pict} \begin{theorem}\label{grundyhalfnimpass} Let $\mathcal{G}(s,p)$ be the Grundy number of position $\{s,p\}$. Then, we obtain the following equations: \\ $(i)$ $\mathcal{G}(0,0) = 0$ and $\mathcal{G}(0,1) = 0$. \\ $(ii)$ For $u \in N$, if $\mathcal{G}(u,0)=0$, then $ \mathcal{G}(u,1)=1.\nonumber$\\ $(iii)$ For $u \in N$, if $\mathcal{G}(u,0)=2$, then $ \mathcal{G}(u,1)=0.\nonumber$\\ $(iv)$ For $u,m \in N$ such that $m>1$, if $\mathcal{G}(u,0)=2m$, then $ \mathcal{G}(u,1)=2m-1.\nonumber$\\ $(v)$ For $u,m \in N$, if $\mathcal{G}(u,0)=2m-1$, then $ \mathcal{G}(u,1)=2m.$\nonumber \end{theorem} \begin{proof} $(i)$ $\mathcal{G}(0,0)= 0$ because $\{0,0\}$ is the terminal position. We cannot move to any position or use a pass move from position $\{0,1\}$. Hence, $\{0,1\}$ is also a terminal position. Therefore, $\mathcal{G}(0,1) = 0$. Next, we prove $(ii)$, $(iii)$, $(iv)$, and $(v)$ using mathematical induction. From $(ii)$ of Lemma \ref{grundyhalfnim}, \begin{equation} \mathcal{G}(1,0)=\mathcal{G}(1) = \frac{1+1}{2}= 1. \label{g10eq1} \end{equation} As $\textit{move}(1,1) =\{\{0,1\},\{1,0\}\}$, $\mathcal{G}(0,1) = 0$, and $\mathcal{G}(1,0) = 1$, we have \begin{align} \mathcal{G}(1,1) & = \textit{mex}(\{\mathcal{G}(k,h):\{k,h\} \in \textit{move}(1,1)\}) \nonumber \\ & =\textit{mex}(\{\mathcal{G}(0,1),\mathcal{G}(1,0)\}), \nonumber \\ & =\textit{mex}(\{0,1\})=2. \label{g11eq2} \end{align} Let $t \in N$. From equations (\ref{g10eq1}) and (\ref{g11eq2}), we have only to prove the case such that \begin{equation} t \geq 2. \label{casegreat2} \end{equation} We suppose that $(ii)$, $(iii)$, $(iv)$, and $(v)$ are valid for $k \in Z_{\ge 0}$ such that $k < t $. From the inequality in (\ref{casegreat2}), we have \begin{equation} \{0,0\} \notin \textit{move}(t,0)=\{\{t-1,0\}, \cdots, \{t-\lceil \frac{t}{2} \rceil,0\}\}; \nonumber \end{equation} hence, we have \begin{equation} k \in N \label{kinn} \end{equation} when $\{k,0\} \in \textit{move}(t,0)$. \\ $(ii)$ Suppose that \begin{equation} \mathcal{G}(t,0) = 0.\label{gtoeq0} \end{equation} \begin{equation} \mathcal{G}(t,0) = \textit{mex}(\{\mathcal{G}(k,0):\{k,0\} \in \textit{move}(t,0)\}); \nonumber \end{equation} hence, according to the definition of the Grundy number in Definition \ref{defofmexgrundy}, \begin{align} 0 \notin & \{\mathcal{G}(k,0):\{k,0\} \in \textit{move}(t,0)\} \nonumber \\ = & \{ \mathcal{G}(t-1,0), \cdots, \mathcal{G}(t-\lceil \frac{t}{2} \rceil,0)\}. \nonumber \end{align} From relation (\ref{kinn}) and the mathematical induction hypothesis for $(ii)$, $(iii)$, $(iv)$, and $(v)$, \begin{equation} 1 \notin \{\mathcal{G}(t-1,1), \cdots, \mathcal{G}(t-\lceil \frac{t}{2} \rceil,1)\}.\label{no1in} \end{equation} \begin{align} \mathcal{G}(t,1)& = \textit{mex}(\{\mathcal{G}(k,m):\{k,m\} \in \textit{move}(t,1)\}) \nonumber \\ & = \textit{mex}(\{\mathcal{G}(k,1):\{k,1\} \in \textit{move}(t,1)\}\cup \{ \mathcal{G}(t,0) \}) \nonumber \\ & = \textit{mex}(\{\mathcal{G}(t-1,1), \cdots, \mathcal{G}(t-\lceil \frac{t}{2} \rceil,1)\} \cup \{ \mathcal{G}(t,0) \}) \nonumber \end{align} Hence, from Equation (\ref{gtoeq0}), relation (\ref{no1in}), and the definition of the Grundy number in Definition \ref{defofmexgrundy}, we have $\mathcal{G}(t,1) = 1$. \\ $(iii)$ Suppose that \begin{equation} \mathcal{G}(t,0) = 2.\label{grundye2} \end{equation} Then, \begin{align} & 2 \notin \{\mathcal{G}(k,0):\{k,0\} \in \textit{move}(t,0)\} \nonumber \\ & = \{\mathcal{G}(t-1,0), \cdots, \mathcal{G}(t- \lceil \frac{t}{2} \rceil ,0)\}. \label{no2in} \end{align} Therefore, according to relation (\ref{kinn}) and the mathematical induction hypothesis for $(ii)$, $(iii)$, $(iv)$, and $(v)$, we have \begin{equation} 0 \notin \{\mathcal{G}(t-1,1), \cdots, \mathcal{G}(t- \lceil \frac{t}{2} \rceil,1)\}. \label{not0in2} \end{equation} \begin{align} \mathcal{G}(t,1) & =\textit{mex}(\{\mathcal{G}(k,m):\{k,m\} \in \textit{move}(t,1)\}) \nonumber \\ & = \textit{mex}(\{ \mathcal{G}(t-1,1), \cdots, \mathcal{G}(t- \lceil \frac{t}{2} \rceil,1)\}\cup \{ \mathcal{G}(t,0) \}), \nonumber \end{align} Hence, from Equation (\ref{grundye2}) and relation (\ref{not0in2}), we have $\mathcal{G}(t,1) = 0$. \\ $(iv)$ Suppose that \begin{equation} \mathcal{G}(t,0)=2m \label{gt0eq2m} \end{equation} for a natural number $m$ such that $m > 1$. We proved that $\mathcal{G}(t,1) = 2m-1$. Because \begin{equation} 2m = \mathcal{G}(t,0) =\textit{mex}(\{\mathcal{G}(k,0):\{k,0\} \in \textit{move}(t,0)\}), \nonumber \end{equation} \begin{equation} \{\mathcal{G}(k,0):\{k,0\} \in \textit{move}(t,0)\} \supset \{2m-1, 2m-2,...,4,3,2,1,0\}\label{include2mminus1} \end{equation} and \begin{equation} 2m \notin \{\mathcal{G}(k,0):\{k,0\} \in \textit{move}(t,0)\}.\label{not2min} \end{equation} From the relations (\ref{kinn}), (\ref{include2mminus1}), and (\ref{not2min}) and the mathematical induction hypothesis for (ii),(iii), (iv), and (v), \begin{equation} \{\mathcal{G}(k,1):\{k,1\} \in \textit{move}(t,1)\} \supset \{2m, 2m-3,2m-2,2m-5,2m-4,...,3,4,0,2,1\} \nonumber \end{equation} and \begin{equation} 2m-1 \notin \{\mathcal{G}(k,m):\{k,1\} \in \textit{move}(t,1)\}. \nonumber \end{equation} \begin{equation} \mathcal{G}(t,1) =\textit{mex}(\{\mathcal{G}(k,1):\{k,1\} \in \textit{move}(t,1)\} \cup \{\mathcal{G}(t,0)\}); \nonumber \end{equation} hence, according to (\ref{gt0eq2m}), $\mathcal{G}(t,1) = 2m-1 $. \\ $(v)$ Suppose that \begin{equation} \mathcal{G}(t,0)=2m-1 \label{gt02mminus1} \end{equation} for natural number $m$. Since \begin{equation} 2m-1 = \mathcal{G}(t,0) =\textit{mex}(\{\mathcal{G}(k,0):\{k,0\} \in \textit{move}(t,0)\}), \nonumber \end{equation} \begin{equation} \{\mathcal{G}(k,0):\{k,0\} \in \textit{move}(t,0)\} \supset \{ 2m-2,2m-3...,2,1,0\} \label{inc2mminus2} \end{equation} and \begin{equation} 2m-1 \notin \{\mathcal{G}(k,0):\{k,0\} \in \textit{move}(t,0)\}. \label{not2mminus1} \end{equation} From relations (\ref{kinn}), (\ref{inc2mminus2}), and (\ref{not2mminus1}) and the mathematical induction hypothesis for $(ii)$, $(iii)$, $(iv)$, and $(v)$, \begin{equation} \{\mathcal{G}(k,1):\{k,1\} \in \textit{move}(t,1)\} \supset \{2m-3,2m-2,2m-5,2m-4,...0,2,1\}\label{inc2mminus3} \end{equation} and \begin{equation} 2m \notin \{\mathcal{G}(k,1):\{k,1\} \in \textit{move}(t,1)\}.\label{notin2mm} \end{equation} As \begin{equation} \mathcal{G}(t,1) =\textit{mex}(\{\mathcal{G}(k,1):\{k,1\} \in \textit{move}(t,1)\} \cup \{\mathcal{G}(t,0)\}),\nonumber \end{equation} according to Equation (\ref{gt02mminus1}), relations (\ref{inc2mminus3}) and (\ref{notin2mm}), we have $\mathcal{G}(t,1) = 2m $. \end{proof} \subsection{Three-Pile Maximum Nim with a Pass}\label{twowithpass} Here, we study maximum Nim with three piles based on Definition \ref{defofmaxnim3piles} by modifying the standard rules of the games to allow a one-time pass. We consider only three-pile games, although generalization to the case of an arbitrary natural number of games is straightforward. We denote the position of the game with three coordinates $\{s,t,u,p\}$, where $s$, $t$, and $u$ represent the numbers of stones in the first, second, and third piles, respectively. $p = 1$ if the pass is still available, and $p = 0$ otherwise. We define a $\textit{move}$ in this game as follows. \begin{defn}\label{defof3pilepass} For any $s,t,u \in Z_{\ge 0}$, we have $(i)$ and $(ii)$. \\ $(i)$ If $p=1$ and $s+t+u>0$, \begin{align} & \textit{move}(s,t,u,p)= \{\{s-v,t,u,p\}: v \leq \lceil \frac{s}{2} \rceil \text{ and } v \in N \}\nonumber \\ & \cup \{\{s,t-v,u,p\}:v \leq \lceil \frac{t}{2} \rceil \text{ and } v \in N \} \nonumber \\ & \cup \{\{s,t,u-v,p\}:v \leq \lceil \frac{u}{2} \rceil \text{ and } v \in N \} \cup \{\{s,t,u,0\}\}. \nonumber \end{align} $(ii)$ If $p=0$ or $s+t+u=0$, \begin{align} & \textit{move}(s,t,u,p)= \{\{s-v,t,u,p\}:v \leq \lceil \frac{s}{2} \rceil \text{ and } v \in N \}\nonumber \\ & \cup \{\{s,t-v,u,p\}:v \leq \lceil \frac{t}{2} \rceil \text{ and } v \in N \} \nonumber \\ & \cup \{\{s,t,u-v,p\}:v \leq \lceil \frac{u}{2} \rceil \text{ and } v \in N \} . \nonumber \end{align} \end{defn} According to Definitions \ref{defofmexgrundy} and \ref{defof3pilepass}, we define the Grundy number $\mathcal{G}(s,t,u,p)$ of the position $\{s,t,u,p\}$. \begin{rem} Note that a pass is not available from the position $\{s,t,u,1\}$ with $s+t+u=0$ which is the terminal position. It is clear that $\mathcal{G}(s,0,0,p)$, $\mathcal{G}(0,s,0,p)$, and $\mathcal{G}(0,0,s,p)$ are identical to $\mathcal{G}(s,p)$ in Section \ref{sectionforceilpass}. \end{rem} \begin{lemma}\label{caseof1110} Let $\mathcal{G}(s,t,u,p)$ be the Grundy number of position $\{s,t,u,p\}$. Then, we obtain the following equations: \\ $(i)$ $\mathcal{G}(1,0,0,0) = \mathcal{G}(0,1,0,0)=\mathcal{G}(0,0,1,0)=1.$ \\ $(ii)$ $\mathcal{G}(1,1,0,0) = \mathcal{G}(0,1,1,0)=\mathcal{G}(1,0,1,0)=0.$ \\ $(iii)$ $\mathcal{G}(1,1,1,0) =1.$ \\ $(iv)$ $\mathcal{G}(1,0,0,1) = \mathcal{G}(0,1,0,1)=\mathcal{G}(0,0,1,1)=2.$ \\ $(v)$ $\mathcal{G}(1,1,0,1) = \mathcal{G}(0,1,1,1)=\mathcal{G}(1,0,1,1)=1.$ \\ $(vi)$ $\mathcal{G}(1,1,1,1) =0. $ \end{lemma} \begin{proof} $(i)$ From Lemma \ref{grundyhalfnim}, $ \mathcal{G}(1,0,0,0) = \mathcal{G}(0,1,0,0)=\mathcal{G}(0,0,1,0)$ $=\mathcal{G}(1,0)$ \\ $=\mathcal{G}(1)=1.$\\ $(ii)$ From (i) and Theorem \ref{thofsumofgame}, we have $\mathcal{G}(1,1,0,0)=\mathcal{G}(0,1,1,0)=\mathcal{G}(1,0,1,0)$ $=\mathcal{G}(1,0,0,0) \oplus \mathcal{G}(0,0,1,0)=1 \oplus 1 =0.$\\ $(iii)$ From (i) and Theorem \ref{thofsumofgame}, $\mathcal{G}(1,1,1,0) =\mathcal{G}(1,0,0,0) \oplus \mathcal{G}(0,1,0,0) \oplus \mathcal{G}(0,0,1,0)$ $= 1.$\\ $(iv)$ From (i) of Lemma \ref{grundyhalfnim} and (v) of Theorem \ref{grundyhalfnimpass}, $\mathcal{G}(1,0,0,1) = \mathcal{G}(0,1,0,1)$ \\ $=\mathcal{G}(0,0,1,1)$ $=\mathcal{G}(1,1)=\mathcal{G}(1,0)+1=2.$ \\ $(v)$ From (ii) and (iv), \begin{align} & \mathcal{G}(1,1,0,1) =\textit{mex}(\{\mathcal{G}(h,k,0,1):\{h,k,0,1\} \in \textit{move}(1,1,0,1)\} \cup \{\mathcal{G}(1,1,0,0)\}) \nonumber \\ & = \textit{mex}(\{\mathcal{G}(1,0,0,1), \mathcal{G}(0,1,0,1), \mathcal{G}(1,1,0,0)\})= \textit{mex}(\{2,2,0 \}) = 1. \nonumber \end{align} $(vi)$ From (iii) and (v), \begin{align} & \mathcal{G}(1,1,1,1) =\textit{mex}(\{\mathcal{G}(h,k,j,1):\{h,k,j,1\} \in \textit{move}(1,1,1,1)\} \cup \{\mathcal{G}(1,1,1,0)\}) \nonumber \\ & = \textit{mex}(\{\mathcal{G}(1,1,0,1), \mathcal{G}(1,0,1,1), \mathcal{G}(0,1,1,1), \mathcal{G}(1,1,1,0)\})= \textit{mex}(\{1,1,1,1 \}) = 0. \nonumber \end{align} \end{proof} \begin{theorem}\label{grundytwopilepass} We have the following formulae for the Grundy numbers: \\ $(i)$ $\mathcal{G}(s,0,0,1) =\mathcal{G}(0,s,0,1) =\mathcal{G}(0,0,s,1) = \mathcal{G}(s,1)$ for $s \in Z_{\ge 0}$. \\ $(ii)$ We suppose that $s,t>0$, $t,u>0$, or $u,s>0$. Thus, we have the following: \ \ $(ii.1)$ For any $m \in Z_{\ge 0}$, if $ \mathcal{G}(s,t,u,0) = 2m$, then $ \mathcal{G}(s,t,u,1) = 2m+1.$\\ $(ii.2)$ For any $m \in Z_{\ge 0}$, if $ \mathcal{G}(s,t,u,0) = 2m+1$, then $ \mathcal{G}(s,t,u,1) = 2m.$ \end{theorem} \begin{proof} (I) Let $\{s,t,u,1\}$ be the position of the game. The position $\{s,0,0,1\}$ is identical to the position $\{s,1\}$ in the game of Subsection \ref{sectionforceil}; hence, $\mathcal{G}(s,0,0,1) = \mathcal{G}(s,1)$. Similarly, $\mathcal{G}(0,s,0,1) =\mathcal{G}(0,0,s,1) = \mathcal{G}(s,1)$, and we have $(i)$. \\ (II) We prove this using mathematical induction. From Lemma \ref{caseof1110}, $(ii.1)$ and $(ii.2)$ are valid for $s,t,u \in Z_{\ge 0}$ such that $s,t,u \leq 1$. Suppose that $(ii.1)$ and $(ii.2)$ for $\{h,k,j,p\}$ when $h \leq s, k \leq t$, $j \leq u$, $h+k+j<s+t+u$, and $p=0,1$.\\ (II.1) Suppose that \begin{equation} \mathcal{G}(s,t,u,0) =2m \label{geq2m} \end{equation} for $m \in Z_{\ge 0}$. Then, according to the definition of the Grundy number in Definition \ref{defofmexgrundy}, \begin{align} & \{\mathcal{G}(h,k,j,0):\{h,k,j,0\} \in \textit{move}(s,t,u,0)\}\nonumber \\ \supset & \{2m-1, 2m-2,2m-3,2m-4, \cdots, 5,4,3,2,1,0\}\label{movefor0} \end{align} and \begin{equation} 2m \notin \{\mathcal{G}(h,k,j,0):\{h,k,j,0\} \in \textit{move}(s,t,u,0)\}.\label{no2m} \end{equation} \underline{Case $(a)$} Suppose that \begin{equation} \textit{move}(s,t,u,0) \cap \{\{s,0,0,0\},\{0,t,0,0\},\{0,0,u,0\}\} = \emptyset.\nonumber \end{equation} Then, we have \begin{equation} h,k>0 \text{ or } k,j>0 \text{ or } j,h>0 \label{condforhkj} \end{equation} when \begin{equation} \{h,k,j,0\} \in \textit{move}(s,t,u,0). \nonumber \end{equation} By applying the mathematical induction hypothesis to $(ii.1)$ and $(ii.2)$, along with relations (\ref{movefor0}), (\ref{no2m}), and (\ref{condforhkj}), we obtain \begin{align} & \{\mathcal{G}(h,k,j,1):\{h,k,j,1\} \in \textit{move}(s,t,u,1)\} \nonumber \\ & \supset \{2m-2, 2m-1,2m-4,2m-3, \cdots, 4,5,2,3,0,1\} \label{incl2mninus2b} \end{align} and \begin{equation} 2m+1 \notin \{\mathcal{G}(h,k,j,1):\{h,k,j,1\} \in \textit{move}(s,t,u,1)\}. \label{no2mplus1b} \end{equation} As \begin{equation} \mathcal{G}(s,t,u,1) =\textit{mex}(\{\mathcal{G}(h,k,j,1):\{h,k,j,1\} \in \textit{move}(s,t,u,1)\} \cup \{\mathcal{G}(s,t,u,0)\}), \nonumber \end{equation} according to Equations (\ref{geq2m}), (\ref{incl2mninus2b}), and (\ref{no2mplus1b}), we have $\mathcal{G}(s,t,u,1)=2m+1$.\\ \underline{Case $(b)$} Suppose that \begin{align} & \textit{move}(s,t,u,0) \cap \{\{s,0,0,0\},\{0,t,0,0\},\{0,0,u,0\}\} \ne \emptyset. \nonumber \end{align} Then, we have $\{t,u\}=\{1,0\}$ or $\{t,u\}=\{0,1\}$ or $\{s,u\}=\{1,0\}$ or $\{s,u\}=\{0,1\}$ or $\{s,t\}=\{1,0\}$ or $\{s,t\}=\{0,1\}$. Here, we prove only the case where $\{t,u\}=\{1,0\}$. If $s = 1$, according to Lemma \ref{caseof1110}, we have $\mathcal{G}(1,1,0,0) = 0$ and $\mathcal{G}(1,1,0,1) = 1=\mathcal{G}(1,1,0,0)+1$. This result satisfies $(ii.1)$. Therefore, we assume the following: \begin{equation}\label{gs002m} \mathcal{G}(s,1,0,0) = 2m, \end{equation} where $s>1$, and we prove that $\mathcal{G}(s,1,0,1) = 2m+1$. Because $s>1$, we have \begin{equation} \textit{move}(s,1,0,0) = \{\{s-v,1,0,0\}: v \in N \text{ and } v \leq \lceil \frac{s}{2} \rceil\}\cup \{\{s,0,0,0\}\}, \label{nowhere0100} \end{equation} where \begin{equation} s-v > s - \lceil \frac{s}{2} \rceil >0 \label{nowhere0100b} \end{equation} for $v$ such that $v \leq \lceil \frac{s}{2} \rceil$. \begin{equation}\label{s002m} \mathcal{G}(s,1,0,0)=\mathcal{G}(s,0,0,0) \oplus \mathcal{G}(0,1,0,0) = \mathcal{G}(s,0,0,0) \oplus \mathcal{G}(1,0)= \mathcal{G}(s,0,0,0) \oplus 1\nonumber \end{equation} Hence, according to Equation (\ref{gs002m}), we have \begin{equation}\label{s0002m1} \mathcal{G}(s,0,0,0)=2m+1. \end{equation} We use relation (\ref{movefor0}) for $\{s,t,u,0\} = \{s,1,0,0\}$; then, we have \begin{align} & \{\mathcal{G}(h,k,0,0):\{h,k,0,0\} \in \textit{move}(s,1,0,0)\} \nonumber \\ & = \{\mathcal{G}(h,1,0,0):\{h,1,0,0\} \in \textit{move}(s,1,0,0)\} \cup \{\mathcal{G}(s,0,0,0)\} \nonumber \\ & \supset \{2m-1,2m-2,2m-3,2m-4, \cdots,5,4,3,2,1,0\};\nonumber \end{align} hence, from Equation (\ref{s0002m1}), we have \begin{align} & \{\mathcal{G}(h,1,0,0):\{h,1,0,0\} \in \textit{move}(s,1,0,0)\}\nonumber \\ & = \{\{s-v,1,0,0\}: v \in N \text{ and } v \leq \lceil \frac{s}{2} \rceil\} \nonumber \\ & \supset \{2m-1,2m-2,2m-3,2m-4, \cdots,5,4,3,2,1,0\}.\label{movefor02} \end{align} We use relation (\ref{no2m}) for $\{s,t,u,0\} = \{s,1,0,0\}$; then, we have \begin{equation} 2m \notin \{\mathcal{G}(h,1,0,0):\{h,1,0,0\} \in \textit{move}(s,1,0,0)\}\cup \{\{s,0,0,0\}\}.\label{no2m2} \end{equation} From equations (\ref{nowhere0100}) and (\ref{movefor02}), the inequality in (\ref{nowhere0100b}), and the mathematical induction hypothesis for $(ii.1)$ and $(ii.2)$, we have \begin{align} & \{\mathcal{G}(h,1,0,1):\{h,1,0,1\} \in \textit{move}(s,1,0,1)\} \nonumber \\ & \supset \{2m-2, 2m-1,2m-4,2m-3, \cdots, 4,5,2,3,0,1\}. \label{ind2mminus2c} \end{align} From equations (\ref{nowhere0100}) and (\ref{no2m2}), the inequality in (\ref{nowhere0100b}), and the mathematical induction hypothesis for $(ii.1)$ and $(ii.2)$, we have \begin{equation} 2m+1 \notin \{\mathcal{G}(h,1,0,1):\{h,1,0,1\} \in \textit{move}(s,1,0,1)\}. \label{notincluding2m1} \end{equation} From Equations $(\ref{s0002m1})$ and $(iv)$ of Theorem \ref{grundyhalfnimpass}, we have \begin{equation} \mathcal{G}(s,0,0,1) =\mathcal{G}(s,1)=\mathcal{G}(s,0)+1= \mathcal{G}(s,0,0,0)+1 = 2m+2.\label{s0002m2} \end{equation} Since \begin{align} & \mathcal{G}(s,1,0,1) =\textit{mex}(\{\mathcal{G}(h,k,0,1):\{h,k,0,1\} \in \textit{move}(s,1,0,1)\} \cup \{\mathcal{G}(s,1,0,0)\}) \nonumber \\ & = \textit{mex}(\{\mathcal{G}(h,1,0,1):\{h,1,0,1\} \in \textit{move}(s,1,0,1)\} \nonumber \\ & \cup \{\mathcal{G}(s,0,0,1)\} \cup \{\mathcal{G}(s,1,0,0)\}), \nonumber \end{align} by the relations (\ref{ind2mminus2c}), (\ref{notincluding2m1}), and equations (\ref{gs002m}) and (\ref{s0002m2}), we have $\mathcal{G}(s,1,0,1)$ \\ $=2m+1$.\\ (II.2) We assume that: \begin{equation} \mathcal{G}(s,t,u,0) =2m+1 \label{geq2mb} \end{equation} for $m \in Z_{\ge 0}$. Then, according to the definition of the Grundy number in Definition \ref{defofmexgrundy} \begin{align} & \{\mathcal{G}(h,k,j,0):\{h,k,j,0\} \in \textit{move}(s,t,u,0)\}\nonumber \\ \supset & \{2m,2m-1, 2m-2,2m-3, \cdots, 5,4,3,2,1,0\}\label{movefor0b} \end{align} and \begin{equation} 2m+1 \notin \{\mathcal{G}(h,k,j,0):\{h,k,j,0\} \in \textit{move}(s,t,u,0)\}.\label{no2mb} \end{equation} \underline{Case $(a)$} Suppose that \begin{align} & \textit{move}(s,t,u,0) \cap \{\{s,0,0,0\},\{0,t,0,0\},\{0,0,u,0\}\} = \emptyset. \nonumber \end{align} Then, we have \begin{equation} h,k>0 \text{ or } k,j>0 \text{ or } j,h>0 \label{condforhkj2} \end{equation} when \begin{equation} \{h,k,j,0\} \in \textit{move}(s,t,u,0). \nonumber \end{equation} According to the inequality in (\ref{condforhkj2}), relations (\ref{movefor0b}) and (\ref{no2mb}), and the mathematical induction hypothesis for $(ii.1)$ and $(ii.2)$, we have \begin{align} & \{\mathcal{G}(h,k,j,1):\{h,k,j,1\} \in \textit{move}(s,t,u,1)\} \nonumber \\ & \supset \{2m+1,2m-2, 2m-1,2m-4,2m-3, \cdots, 4,5,2,3,0,1\} \label{incc2mplus1} \end{align} and \begin{equation} 2m \notin \{\mathcal{G}(h,k,j,1):\{h,k,j,1\} \in \textit{move}(s,t,u,1)\}. \label{no2mint} \end{equation} As \begin{equation} \mathcal{G}(s,t,u,1) =\textit{mex}(\{\mathcal{G}(h,k,j,1):\{h,k,j,1\} \in \textit{move}(s,t,u,1)\} \cup \{\mathcal{G}(s,t,u,0)\}), \nonumber \end{equation} according to Equations (\ref{geq2mb}), (\ref{incc2mplus1}), and (\ref{no2mint}), we have $\mathcal{G}(s,t,u,1)=2m$. \\ \underline{Case $(b)$} We suppose that \begin{align} & \textit{move}(s,t,u,0) \cap \{\{s,0,0,0\},\{0,t,0,0\},\{0,0,u,0\}\} \ne \emptyset. \nonumber \end{align} Then, we have $\{t,u\}=\{1,0\}$ or $\{t,u\}=\{0,1\}$ or $\{s,u\}=\{1,0\}$ or $\{s,u\}=\{0,1\}$ or $\{s,t\}=\{1,0\}$ or $\{s,t\}=\{0,1\}$. Here, we prove only the case where $\{t,u\}=\{1,0\}$. If $s = 1$, according to Lemma \ref{caseof1110}, we have $\mathcal{G}(1,1,0,0) = 0$. This contradicts Equation (\ref{geq2mb}). We suppose that \begin{equation}\label{s10002mbb} \mathcal{G}(s,1,0,0) = 2m+1 \end{equation} and $s>1$. Because $s>1$, we have \begin{equation} \textit{move}(s,1,0,0) = \{\{s-v,1,0,0\}: v \in N \text{ and } v \leq \lceil \frac{s}{2} \rceil\}\cup \{\{s,0,0,0\}\}, \label{nowhere0100bb} \end{equation} where \begin{equation} s-v > s - \lceil \frac{s}{2} \rceil >0 \label{nowhere0100bbb} \end{equation} for $v$ such that $v \leq \lceil \frac{s}{2} \rceil$. \begin{equation}\label{s002mb} \mathcal{G}(s,1,0,0)=\mathcal{G}(s,0,0,0) \oplus \mathcal{G}(0,1,0,0) = \mathcal{G}(s,0,0,0) \oplus \mathcal{G}(1,0)= \mathcal{G}(s,0,0,0) \oplus 1 \nonumber \end{equation} Hence, according to Equation (\ref{s10002mbb}), we have \begin{equation}\label{s0002m1b} \mathcal{G}(s,0,0,0)=2m. \end{equation} From relation (\ref{movefor0b}), \begin{align} & \{\mathcal{G}(h,1,0,0):\{h,k,0,0\} \in \textit{move}(s,1,0,0)\}\nonumber \\ & =\{\mathcal{G}(h,1,0,0):\{h,1,0,0\} \in \textit{move}(s,1,0,0)\}\cup \{\mathcal{G}(s,0,0,0) \}\nonumber \\ \supset & \{2m,2m-1, 2m-2,2m-3, \cdots, 5,4,3,2,1,0\};\label{movefor0b2a} \end{align} hence, according to Equation (\ref{s0002m1b}), we have \begin{align} & \{\mathcal{G}(h,1,0,0):\{h,1,0,0\} \in \textit{move}(s,1,0,0)\}\nonumber \\ \supset & \{2m-1, 2m-2,2m-3, \cdots, 5,4,3,2,1,0\}.\label{movefor0b2} \end{align} From relations (\ref{nowhere0100bb}) and (\ref{movefor0b2}), the inequality in (\ref{nowhere0100bbb}), and the mathematical induction hypothesis for $(ii.1)$ and $(ii.2)$, we have \begin{align} & \{\mathcal{G}(h,1,0,1):\{h,1,0,1\} \in \textit{move}(s,1,0,1)\} \nonumber \\ & \supset \{2m-2, 2m-1,2m-4,2m-3, \cdots, 4,5,2,3,0,1\}. \label{include2mm201} \end{align} From relation (\ref{no2mb}), we have \begin{align} & 2m+1 \notin \{\mathcal{G}(h,k,0,0):\{h,k,0,0\} \in \textit{move}(s,1,0,0)\}. \nonumber \\ & = \{\mathcal{G}(h,1,0,0):\{h,1,0,0\} \in \textit{move}(s,1,0,0)\}\cup \{\mathcal{G}(s,0,0,0) \}.\label{no2mb2} \end{align} From relations (\ref{nowhere0100bb}), (\ref{no2mb2}), inequality in (\ref{nowhere0100bbb}), and the mathematical induction hypothesis on $(ii.1)$ and $(ii.2)$, we have \begin{equation} 2m \notin \{\mathcal{G}(h,1,0,1):\{h,1,0,1\} \in \textit{move}(s,1,0,1)\}. \label{no2mintb} \end{equation} If $m=0$, according to Equations (\ref{s0002m1b}) and (ii) of Theorem \ref{grundyhalfnimpass}, we have \begin{equation}\label{caseof1} \mathcal{G}(s,0,0,1) = \mathcal{G}(s,1) =1. \end{equation} If $m=1$, according to Equations (\ref{s0002m1b}) and (iii) of Theorem \ref{grundyhalfnimpass}, we have \begin{equation}\label{caseof2} \mathcal{G}(s,0,0,1) = \mathcal{G}(s,1) =0. \end{equation} If $m \ne 0,1$, according to Equations (\ref{s0002m1b}) and (iv) of Theorem \ref{grundyhalfnimpass}, we have \begin{equation}\label{caseof3} \mathcal{G}(s,0,0,1) = \mathcal{G}(s,1)=2m-1. \end{equation} From Equations (\ref{caseof1}), (\ref{caseof2}), and (\ref{caseof3}), we have \begin{equation}\label{caseof4} \mathcal{G}(s,0,0,1) \ne 2m. \end{equation} As \begin{align} & \mathcal{G}(s,1,0,1) =\textit{mex}(\{\mathcal{G}(h,1,0,1):\{h,1,0,1\} \in \textit{move}(s,1,0,1)\} \nonumber \\ & \cup \{\mathcal{G}(s,0,0,1)\}\cup \{\mathcal{G}(s,1,0,0)\}), \nonumber \end{align} according to Equation (\ref{s10002mbb}), relations (\ref{include2mm201}) and (\ref{no2mintb}), and the inequality in (\ref{caseof4}), we have \\ $\mathcal{G}(s,t,u,1)=2m$. \end{proof} \begin{flushleft} \large{Acknowledgements}\\ \normalsize{We would like to thank Editage (www.editage.com) for English language editing.} \end{flushleft} \begin{thebibliography}{111} \bibitem{nimpass} R.E. Morrison, E.J. Friedman, and A.S. Landsberg, Combinatorial games with a pass: A dynamic systems approach, Chaos, {\it An Interdisciplinary Journal of Nonlinear Science}, {\bf 21} (2011), 43-108. \bibitem{integers1} M. Inoue, M. Fukui, and R. Miyadera, IMPARTIAL CHOCOLATE BAR GAMES WITH A PASS, Integers Volume 16, 2016. \bibitem{jcdcg2018a}R. Miyadera, S. Kannan, and M. Fukui, Some Formulas for Max Nim, {\it The 21th Japan Conference on Discrete and Computational Geometry, Graphs, and Games}, (2018) 35-36. \bibitem{lesson}M. H. Albert, R. J. Nowakowski, and D. Wolfe, {\it Lessons In Play}, A K Peters, p-139. \bibitem{combysiegel} A. N. Siegel, {\it Combinatorial Game Theory }, Graduate Studies in Mathematics, American Mathematical Society (2013). \bibitem{levinenim} L. Levine, Fractal sequences and restricted Nim, {\it Ars Combinatoria},{\bf 80} (2006) 113-127. \end{thebibliography} \end{document}
2205.11797v4
http://arxiv.org/abs/2205.11797v4
Complexity for exact polynomial optimization strengthened with Fritz John conditions
\documentclass[a4paper]{article} \usepackage{hyperref} \usepackage{graphicx} \usepackage{mathrsfs} \usepackage{enumerate} \usepackage{amsxtra,amssymb,latexsym, amscd,amsthm} \usepackage{indentfirst} \usepackage{color} \usepackage[utf8]{inputenc} \usepackage[mathscr]{eucal} \usepackage{amsfonts} \usepackage{graphics} \usepackage{multirow} \usepackage{array} \usepackage{subfigure} \usepackage{cite} \usepackage{wrapfig} \usepackage{tikz} \usepackage{diagbox} \usepackage{pgfplots} \pgfplotsset{compat=1.15} \usepackage{mathrsfs} \usetikzlibrary{arrows} \newcommand{\footremember}[2]{ \footnote{#2} \newcounter{#1} \setcounter{#1}{\value{footnote}}} \newcommand{\footrecall}[1]{ \footnotemark[\value{#1}]} \def\B{\mathscr B} \def\one{\mathbf 1} \def\R{{\mathbb R}} \def\C{{\mathbb C}} \def\N{{\mathbb N}} \def\LL{{\mathbb L}} \def\cM{\mathcal M} \newcommand\abs[1]{|#1|} \newcommand\set[1]{\{#1\}} \newcommand\norm[1]{||#1||} \DeclareMathOperator{\vrai}{vrai} \DeclareMathOperator{\const}{const} \DeclareMathOperator{\rank}{rank} \DeclareMathOperator{\diag}{diag} \DeclareMathOperator{\FJ}{FJ} \DeclareMathOperator{\KKT}{KKT} \DeclareMathOperator{\bit}{bit} \DeclareMathOperator{\sign}{sign} \DeclareMathOperator{\supp}{supp} \DeclareMathOperator{\dist}{dist} \DeclareMathOperator{\cl}{cl} \DeclareMathOperator{\trace}{trace} \newcommand{\eoproof}{\hfill $\square$} \newcommand{\spro}[1]{\left<#1\right>} \newcommand{\argmin}{\arg\min} \renewcommand{\theequation}{\arabic{equation}} \newtheorem{theorem}{\bf Theorem}\newtheorem{lemma}{\bf Lemma} \newtheorem{problem}{\bf Problem}\newtheorem{algorithm}{\bf Algorithm}\newtheorem{example}{\bf Example}\newtheorem{proposition}{\bf Proposition}\newtheorem{corollary}{\bf Corollary}\newtheorem{definition}{\bf Definition}\newtheorem{remark}{\bf Remark}\newtheorem{assumption}{\bf Assumption} \providecommand{\keywords}[1] { \small \textbf{\textbf{Keywords:}} #1 } \begin{document} \definecolor{qqzzff}{rgb}{0,0.6,1} \definecolor{ududff}{rgb}{0.30196078431372547,0.30196078431372547,1} \definecolor{xdxdff}{rgb}{0.49019607843137253,0.49019607843137253,1} \definecolor{ffzzqq}{rgb}{1,0.6,0} \definecolor{qqzzqq}{rgb}{0,0.6,0} \definecolor{ffqqqq}{rgb}{1,0,0} \definecolor{uuuuuu}{rgb}{0.26666666666666666,0.26666666666666666,0.26666666666666666} \newcommand{\vi}[1]{\textcolor{blue}{#1}} \newif\ifcomment \commentfalse \commenttrue \newcommand{\comment}[3]{\ifcomment {\color{#1}\bfseries\sffamily#3 } \marginpar{\textcolor{#1}{\hspace{3em}\bfseries\sffamily #2}} \else } \newcommand{\mapr}[1]{{{\color{blue}#1}}} \newcommand{\revise}[1]{{{\color{blue}#1}}} \title{Complexity for exact polynomial optimization strengthened with Fritz John conditions} \author{Ngoc Hoang Anh Mai\footremember{1}{CNRS; LAAS; 7 avenue du Colonel Roche, F-31400 Toulouse; France.} } \maketitle \begin{abstract} Let $f,g_1,\dots,g_m$ be polynomials of degree at most $d$ with real coefficients in a vector of variables $x=(x_1,\dots,x_n)$. Assume that $f$ is non-negative on a basic semi-algebraic set $S$ defined by polynomial inequalities $g_j(x)\ge 0$, for $j=1,\dots,m$. Our previous work [arXiv:2205.04254 (2022)] has stated several representations of $f$ based on the Fritz John conditions. This paper provides some explicit degree bounds depending on $n$, $m$, and $d$ for these representations. In application to polynomial optimization, we obtain explicit rates of finite convergence of the hierarchies of semidefinite relaxations based on these representations. \end{abstract} \keywords{sum-of-squares; Nichtnegativstellensatz; gradient ideal; Fritz John conditions; polynomial optimization; convergence rate; finite convergence} \tableofcontents \section{Introduction} \paragraph{Nichtnegativstellens\"atze and their degree bounds.} Hilbert's 17th problem concerns the representation of a non-negative polynomial as a sum of squares of rational functions. Artin gave a positive answer to this problem in \cite{artin1927zerlegung}. Later Krivine \cite{krivine1964anneaux} and Stengle \cite{stengle1974nullstellensatz} stated a Nichtnegativstellensatz. It says that we can express a polynomial $f$ non-negative on a basic semi-algebraic set $S$ as a linear combination of the polynomials $g_j$ defining $S$ with weights which are sums of squares of rational functions. Lombardi, Perrucci, and Roy have recently provided a degree bound for Krivine--Stengle's Nichtnegativstellensatz. It is a tower of five exponentials depending only on the number of variables, the number of polynomials $g_j$ defining $S$, and the degrees of $f,g_j$. Consequently, they obtain the best-known bound degrees for the numerators and denominators in Hilbert--Artin's Nichtnegativstellensatz as a tower of five exponentials. \paragraph{Positivstellens\"atze.} Today Positivstellens\"atze, representations of polynomials positive on a basic semi-algebraic set, are of broad interest with influential applications in polynomial optimization. Let us revisit two representations commonly used in practice: Schm\"udgen's Positivstellensatz \cite{schmudgen1991thek} says that we can write a polynomial $f$ positive on a compact basic semi-algebraic set $S$ as a linear combination of products of polynomials defining $S$ with weights which are sums of squares of polynomials. Putinar presents in \cite{putinar1993positive} a Positivstellensatz saying that we con decompose a polynomial $f$ positive on a compact basic semi-algebraic set $S$ satisfying the so-called Archimedean condition (stated in \cite{mai2022exact}) as a linear combination of polynomials defining $S$ with weights which are sums of squares of polynomials. \paragraph{Polynomial optimization.} Let $\R[x]$ denote the ring of polynomials with real coefficients in the vector of variables $x$. Given $r\in\N$, denote by $\R_r[x]$ the linear space of polynomials in $\R[x]$ of degree at most $r$. Denote by $\Sigma^2[x]$ (resp. $\Sigma^2_r[x]$) the cone of sum of squares of polynomials in $\R[x]$ (resp. $\R_r[x]$). Given $f,g_1,\dots,g_m\in\R[x]$, consider polynomial optimization problem: \begin{equation}\label{eq:pop} f^\star:=\inf\limits_{x\in S(g)} f(x)\,, \end{equation} where $S(g)$ is the basic semi-algebraic set associated with $g=(g_1,\dots,g_m)$, i.e., \begin{equation} S(g):=\{x\in\R^n\,:\,g_j(x)\ge 0\,,\,j=1,\dots,m\}\,. \end{equation} Schm\"udgen's and Putinar's Positivstellensatz are both applicable to polynomial optimization. Indeed, the idea is to reformulate the original problem as $f^\star=\sup\{\lambda\in\R\,:\,f-\lambda> 0\text{ on }S(g)\}$ and then replace the constraint ``$f-\lambda> 0\text{ on }S(g)$" with the representations of $f-\lambda$ associated with $g_j$. More explicitly, using Putinar's Positivstellensatz gives a sequence of reals $(\rho_k^\text{Pu})_{k\in\N}$, where \begin{equation} \begin{array}{rl} \rho_k^\text{Pu}:=\sup\limits_{\lambda,\sigma_j}&\lambda\\ &\lambda\in\R\,,\,\sigma_j\in\Sigma^2[x]\,,\\ &f-\lambda=\sum_{j=1}^m\sigma_jg_j\,,\,\deg(\sigma_jg_j)\le 2k\,, \end{array} \end{equation} where $g_1=1$. Here $\deg(\cdot)$ stands for the degree of a polynomial. The sequence $(\rho_k^\text{Pu})_{k\in\N}$ converges to $f^\star$ under the Archimedean condition on $S$. Moreover, the program to compute each $\rho_k^\text{Pu}$ is convex, specifically, semidefinite (see \cite{boyd2004convex}). The sequence of these programs is known as Lasserre's hierarchy \cite{lasserre2001global}. A similar process applies to Schm\"udgen's Positivstellensatz to obtain the corresponding sequence of reals $(\rho_k^\text{Sch})_{k\in\N}$ defined by \begin{equation} \begin{array}{rl} \rho_k^\text{Sch}:=\sup\limits_{\lambda,\sigma_j}&\lambda\\ &\lambda\in\R\,,\,\sigma_j\in\Sigma^2[x]\,,\\ &f-\lambda=\sum_{\alpha\in\{0,1\}^m}\sigma_\alpha g_1^{\alpha_1}\dots g_m^{\alpha_m}\,,\,\deg(\sigma_\alpha g_\alpha)\le 2k\,. \end{array} \end{equation} \paragraph{Convergence rates.} Schweighofer \cite{schweighofer2004complexity} (resp. Nie--Schweighofer \cite{nie2007complexity}) analyzes the convergence rates of sequence $(\rho_k^\text{Sch})_{k\in\N}$ (resp. $(\rho_k^\text{Pu})_{k\in\N}$). Despite the polynomial time complexity $\mathcal{O}(\varepsilon^{-c})$ of the former, the latter has unexpected exponential time complexity $\mathcal{O}(\exp(\varepsilon^{-c}))$. Baldi and Mourrain have recently provided in \cite{baldi2021moment} an improved complexity $\mathcal{O}(\varepsilon^{-c})$ for sequence $(\rho_k^\text{Pu})_{k\in\N}$. It relies on the degree bound for Schmudgen's Positivstellensatz on the unit hypercube stated by Laurent and Slot in \cite{laurent2021effective}. In addition, Laurent and Slot obtain the optimal convergence rate $\mathcal{O}(\varepsilon^{-1/2})$ for minimizing a polynomial on the unit hypercube. To do this, Laurent and Slot utilize the polynomial kernel method introduced in \cite{fang2020sum}, where Fang and Fawzi provide the optimal convergence rate $\mathcal{O}(\varepsilon^{-1/2})$ for minimizing a polynomial on the unit sphere. Applying this method again, Slot achieves in \cite{slot2021sum} the optimal convergence rate for $(\rho_k^\text{Sch})_{k\in\N}$ when $S$ is the unit ball or the standard simplex. \paragraph{Finite convergence.} The finite convergences of the two sequences $(\rho_k^\text{Sch})_{k\in\N}$ and $(\rho_k^\text{Pu})_{k\in\N}$ are mainly studied by Scheiderer \cite{scheiderer2000sums,scheiderer2003sums,scheiderer2006sums}, Marshall \cite{marshall2006representations,marshall2009representations} and Nie \cite{nie2014optimality}. In particular, Lasserre's hierarchy has finite convergence under the Archimedean condition and some standard optimality conditions. These conditions make the original polynomial optimization problem necessarily have finitely many global minimizers at which the Karush--Kuhn--Tucker conditions hold for this problem. Recent efforts of Nie--Demmel--Sturmfels \cite{nie2006minimizing}, Demmel--Nie--Powers \cite{demmel2007representations}, Nie \cite{nie2013exact}, and our previous work \cite{mai2022exact} are to obtain the finite convergence of Lasserre's hierarchy through the information of the Jacobian of the objective and constrained polynomials. However, rates of finite convergence for these methods have been open until now. \paragraph{Nichtnegativstellens\"atze based on Fritz John conditions.} Let $V(h_{\FJ})$ and $V(h_{\FJ}^+)$ (defined later in \eqref{eq:.polyFJ} and \eqref{eq:.polyFJ.plus}) be the sets of points at which the Fritz John conditions hold for problem \eqref{eq:pop}. In our previous works \cite{mai2022exact}, we use the finiteness of the images of $S(g) \cap V(h_{\FJ})$ and $S(g) \cap V(h_{\FJ}^+)$ under $f$ to construct representations of $f-f^\star$ without denominators involving quadratic modules and preorderings associated with these two intersections. Here $f^\star$ is defined as in \eqref{eq:pop}. These non-negativity certificates allow us to tackle the following two cases: (i) polynomial $f-f^\star$ is non-negative with infinitely many zeros on basic semi-algebraic sets $S(g)$ and (ii) the Karush–Kuhn–Tucker conditions do not hold for problem \eqref{eq:pop} at any zero of $f-f^\star$ on $S(g)$. \paragraph{Contribution.} In this paper, we aim to provide some explicit degree bounds for Nichtnegativstellens\"atze in our previous work \cite{mai2022exact} and the work of Demmel, Nie, and Powers \cite{demmel2007representations}. To do this, we rely on the constructiveness of these representations and utilize the degree bounds for Krivine--Stengle's Nichtnegativstellens\"atze analyzed by Lombardi, Perrucci, and Roy \cite{lombardi2020elementary} as well as the upper bound on the number of connected components of a basic semi-algebraic set due to Coste \cite{coste2000introduction}. We briefly sketch the analysis for the representation of $f-f^\star$ using the set $V(h_{\FJ})$ as follows: We first decompose $V(h_{\FJ})$ into finitely many connected components. Thanks to Coste \cite{coste2000introduction}, the number of these components is bounded from above by a value depending on the number of polynomials defining $V(h_{\FJ})$ and the degrees of these polynomials. To prove that $f$ is constant on each connected component, we rely on the Fritz John conditions generating $V(h_{\FJ})$ and the assumption that the image of the set of critical points $C(g)$ (defined later in \eqref{eq:critical.set}) under $f$ is finite. It turns out that $f$ has finitely many values on $V(h_{\FJ})$. To obtain the representation of $f-f^\star$ with explicit degree bound, we explicitly construct a ``variety version" of Lagrangian interpolation and utilize Krivine--Stengle's Nichtnegativstellens\"atze with degree bound given in \cite{lombardi2020elementary}. We accordingly obtain the explicit convergence rate of each corresponding hierarchy of semidefinite relaxations for a polynomial optimization problem. The rates of these hierarchies depend on the number of variables, the number of constraints, and the degrees of the input polynomials. In particular, they do not depend on the coefficients of the input polynomials. Because of the huge bounds on Krivine--Stengle's Nichtnegativstellens\"atze and the number of connected components of a basic semi-algebraic set, the rates are extremely large. \paragraph{Organization.} We organize the paper as follows: Section \ref{sec:preli} is to recall some necessary tools from real algebraic geometry. Section \ref{sec:degree.bound} states the explicit degree bounds for our Nichtnegativstellens\"atze based on the Fritz John conditions. Section \ref{sec:application} presents the application of these bounds in analyzing the complexity of the corresponding hierarchies of the exact semidefinite relaxations for a polynomial optimization problem. Section \ref{sec:bound.KKT} provides the explicit degree bounds for the representation based on the Karush--Kuhn--Tucker conditions by Demmel, Nie, and Powers in \cite{demmel2007representations}. Section \ref{sec:implicit.quadraic} shows the implicit degree bound for the representations involving quadratic modules. \section{Preliminaries} \label{sec:preli} This section presents some preliminaries from real algebraic geometry needed to prove our main results. \subsection{First-order optimality conditions} Given $p\in\R[x]$, we denote by $\nabla p$ the gradient of $p$, i.e., $\nabla p=(\frac{\partial p}{\partial x_1},\dots,\frac{\partial p}{\partial x_n})$. We say that the Fritz John conditions hold for problem \eqref{eq:pop} at $u\in S(g)$ if \begin{equation}\label{eq:FJcond} \begin{cases} \exists (\lambda_0,\dots,\lambda_m)\in[0,\infty)^{m+1}\,:\\ \lambda_0 \nabla f(u)=\sum_{j=1}^m \lambda_j \nabla g_j(u)\,,\\ \lambda_j g_j(u) =0\,,\,j=1,\dots,m\,,\\ \sum_{j=0}^m \lambda_j^2=1\,, \end{cases} \Leftrightarrow \begin{cases} \exists (\lambda_0,\dots,\lambda_m)\in\R^{m+1}\,:\\ \lambda_0^2 \nabla f(u)=\sum_{j=1}^m \lambda_j^2 \nabla g_j(u)\,,\\ \lambda_j^2 g_j(u) =0\,,\,j=1,\dots,m\,,\\ \sum_{j=0}^m \lambda_j^2=1\,. \end{cases} \end{equation} In addition, the Karush--Kuhn--Tucker conditions hold for problem \eqref{eq:pop} at $u\in S(g)$ if \begin{equation}\label{eq:KKT.cond} \begin{cases} \exists (\lambda_1,\dots,\lambda_m)\in[0,\infty)^{m}\,:\\ \nabla f(u)=\sum_{j=1}^m \lambda_j \nabla g_j(u)\,,\\ \lambda_j g_j(u) =0\,,\,j=1,\dots,m\,. \end{cases} \Leftrightarrow \begin{cases} \exists (\lambda_1,\dots,\lambda_m)\in\R^{m}\,:\\ \nabla f(u)=\sum_{j=1}^m \lambda_j^2 \nabla g_j(u)\,,\\ \lambda_j^2 g_j(u) =0\,,\,j=1,\dots,m\,. \end{cases} \end{equation} If $u$ is a local minimizer for problem \eqref{eq:pop}, then the Fritz John conditions hold for problem \eqref{eq:pop} at $u$. In contrast, there exist cases where the Karush--Kuhn--Tucker conditions do not hold for problem \eqref{eq:pop} at any local minimizer of this problem. We denote by $W(f,g)$ the set of all points at which the Fritz John conditions hold but the Karush--Kuhn--Tucker conditions do not hold for problem \eqref{eq:pop}. \subsection{Sets of critical points} Given $g_1,\dots,g_m\in\R[x]$, let $\varphi^g:\R^{n}\to \R^{(n+m)\times m}$ be a function associated with $g=(g_1,\dots,g_m)$ defined by \begin{equation} \varphi^g(x)=\begin{bmatrix} \nabla g(x)\\ \diag(g(x)) \end{bmatrix}= \begin{bmatrix} \nabla g_1(x)& \dots& \nabla g_m(x)\\ g_1(x)&\dots&0\\ .&\dots&.\\ 0&\dots&g_m(x) \end{bmatrix}\,. \end{equation} Given a real matrix $A$, we denote by $\rank(A)$ the dimension of the vector space generated by the columns of $A$ over $\R$. We say that a set $\Omega$ is finite if its cardinal number is a non-negative integer. Let $C(g)$ be the set of critical points associated with $g$ defined by \begin{equation}\label{eq:critical.set} C(g):=\{x\in\R^n\,:\,\rank(\varphi^g(x))< m\}. \end{equation} Given a real matrix $A$, we denote by $\rank^+(A)$ the largest number of columns of $A$ whose convex hull over $\R$ has no zero. Let $C^+(g)$ be the set of critical points associated with $g$ defined by \begin{equation} C^+(g):=\{x\in \R^n\,:\,\rank^+(\varphi^g(x))< m\}. \end{equation} \subsection{Quadratic modules, preoderings, and ideals} Given $g_1,\dots,g_m\in\R[x]$, let $Q_r(g)[x]$ be the truncated quadratic module of order $r\in\N$ associated with $g=(g_1,\dots,g_m)$, i.e., \begin{equation} Q_r(g)[x]:=\{\sigma_0 +\sum_{j=1}^m g_j\sigma_j\,:\,\sigma_j\in\Sigma^2[x]\,,\,\deg(\sigma_0)\le 2r\,,\,\deg(g_j\sigma_j)\le 2r\}\,. \end{equation} Let $\Pi g$ be the vector of products of $g_1,\dots,g_m$ defined by \begin{equation}\label{eq:prod.g} \Pi g:=(g^\alpha)_{\alpha\in\{0,1\}^m\backslash \{0\}}\,, \end{equation} where $\alpha=(\alpha_1,\dots,\alpha_m)$ and $g^\alpha:=g_1^{\alpha_1}\dots g_m^{\alpha_m}$. We call $Q_r(\Pi g)[x]$ the truncated preordering of order $r\in\N$ generated by $g$, denoted by $P_r(g)[x]$. Obviously, if $m=1$, it holds that $P_r(g)[x]=Q_r(g)[x]$. Given $h_1,\dots,h_l\in\R[x]$, let $V(h)$ be the variety generated by $h=(h_1,\dots,h_l)$, i.e., \begin{equation} V(h):=\{x\in\R^n\,:\,h_j(x)=0\,,\,j=1,\dots,l\}\,. \end{equation} and let $I_r(h)[x]$ be the truncated ideal of order $r$ generated by $h$, i.e., \begin{equation} I_r(h)[x]:= \{\sum_{j=1}^l h_j \psi_j\,:\,\psi_j\in\R[x]\,,\,\deg(h_j \psi_j)\le 2r\}\,. \end{equation} \subsection{Degree bound for Krivine--Stengle's Nichtnegativstellens\"atze} We denote by $\bit(d)$ the number of bits of $d\in\N$, i.e., \begin{equation} \bit(d):= \begin{cases} 1 & \text{if } d = 0\,,\\ k & \text{if } d \ne 0 \text{ and } 2^{k-1}\le d < 2^k. \end{cases} \end{equation} Given $n,d,s\in\N$, set \begin{equation} b(n,d,s):=2^{ 2^{\left(2^{\max\{2,d\}^{4^{n}}}+s^{2^{n}}\max\{2, d\}^{16^{n}\bit(d)} \right)}}\,. \end{equation} We recall the degree bound for Krivine--Stengle's Nichtnegativstellens\"atze analyzed by Lombardi, Perrucci, and Roy \cite{lombardi2020elementary} in the following two lemmas: \begin{lemma}\label{lem:pos} Let $g_1,\dots,g_m,h_1,\dots,h_l\in\R_d[x]$. Assume that $S(g)\cap V(h)=\emptyset$ with $g:=(g_1,\dots,g_m)$ and $h:=(h_1,\dots,h_l)$. Set $r=b(n,d,m+l+1)/2$. Then it holds that $-1 \in P_r(g)[x]+I_r(h)[x]$. \end{lemma} \begin{lemma}\label{lem:pos2} Let $p,g_1,\dots,g_m,h_1,\dots,h_l\in\R_d[x]$. Assume that $p$ vanishes on $S(g)\cap V(h)$ with $g:=(g_1,\dots,g_m)$ and $h:=(h_1,\dots,h_l)$. Set $r:=b(n,d,m+l+1)/2$ and $s:=2\lfloor r/d\rfloor$. Then it holds that $-p^s \in P_r(g)[x]+ I_r(h)[x]$. \end{lemma} \subsection{Upper bound on the number of connected components of a basic semi-algebraic set} We generalize the definition of basic semi-algebraic sets as follows: A semi-algebraic subset of $\R^n$ is a subset of the form \begin{equation}\label{eq:def.semi.set} \bigcup_{i=1}^t\bigcap_{j=1}^{r_i}\{x\in\R^n\,:\,f_{ij}(x)*_{ij}0\}\,, \end{equation} where $f_{ij}\in\R[x]$ and $*_{ij}$ is either $>$ or $=$. Given two semi-algebraic sets $A\subset \R^n$ and $B\subset \R^m$, we say that a mapping $f : A \to B$ is semi-algebraic if its graph $\{(x,f(x))\,:\,x\in A\}$ is a semi-algebraic set in $\R^{n+m}$. A semi-algebraic subset $A\subset \R^n$ is said to be semi-algebraically path connected if for every $x,y$ in $A$, there exists a continuous semi-algebraic mapping $\phi:[0,1] \to A$ such that $\phi(0) = x$ and $\phi(1) = y$. Note that $\phi$ is piecewise-differentiable in this case (see, e.g., \cite[Theorem 1.8.1]{pham2016genericity}). Given $n,d,s\in\N$, set \begin{equation} c(n,d,s):=d(2d-1)^{n+s-1}\,. \end{equation} The upper bound on the number of connected components of a basic semi-algebraic set is stated by Coste \cite[Proposition 4.13]{coste2000introduction} as follows: \begin{lemma}\label{lem:num.connected} Let $g_1,\dots,g_m,h_1,\dots,h_l\in\R_d[x]$ with $d\ge 2$. The number of (semi-algebraically path) connected components of $S(g)\cap V(h)$ is not greater than $c(n,d,m+l)$. \end{lemma} \subsection{Sum-of-squares representations under the finite image assumption} Let $|\cdot|$ stand for the cardinal number of a set. Denote by $\delta_{ij}$ the Kronecker delta function at $(i,j)\in\N^2$. We state in the following lemmas the representations of polynomials nonnegative on semi-algebraic sets under the finite image assumption: \begin{lemma}\label{lem:quadra} Let $f,g_1,\dots,g_m\in\R_d[x]$ and $h_1,\dots,h_l\in\R_{d+1}[x]$. Assume that $f$ is non-negative on $S(g)$ with $g=(g_1,\dots,g_m)$ and $f(V(h))$ is finite with $h=(h_1,\dots,h_l)$. Set $r:=|f(V(h))|$ and $u:=b(n,d+1,m+l+2)/2$. Then there exist $q\in P_w(g)[x]$ with $w=dr+u$ such that $f - q$ vanishes on $V(h)$. \end{lemma} \begin{proof} By assumption, we get $f(V(h)) = \{t_1 ,\dots, t_r \} \subset \R$, where $t_i\ne t_j$ if $i\ne j$. For $j=1,\dots,r$, let $W_j:=V(h,f-t_j)$. Then $W_j$ is a real variety generated by $l+1$ polynomials in $\R_{d+1}[x]$. It is clear that $f(W_j)=\{t_j\}$. Define the following polynomials: \begin{equation}\label{eq:lagrange.pol} p_j(x)=\prod_{i\ne j}\frac{f(x)-t_i}{t_j-t_i}\,,\,j=1,\dots,r\,. \end{equation} It is easy to check that $p_j(W_i)=\{\delta_{ji}\}$ and $\deg(p_j)\le d(r-1)$. Without loss of generality, we assume that there is $s\in \{0,1,\dots,r-1\}$ such that $W_j\cap S(g)= \emptyset$, for $j=1,\dots,s$, and $W_i\cap S(g)\ne \emptyset$, for $i=s+1,\dots,r$. Let $j\in\{1,\dots,s\}$. Since $W_j \cap S(g) = \emptyset$, Lemma \ref{lem:pos} says that $-1 \in P_u(g)[x]+I_u(h,f-t_j)[x]$. It implies that there exists $v_j \in P_u(g)[x]$ such that $-1 = v_j$ on $W_j$. We have $f = s_1 - s_2$ for the SOS polynomials $s_1 =(f+\frac{1}{2})^2$ and $s_2 = f^2+\frac{1}{4}$. It implies that $f = s_1 + v_j s_2$ on $W_j$. Let $q_j = s_1 +v_j s_2 \in P_{u+d}(g)[x]$. Since $f\ge 0$ on $S(g)$, it holds that $f=t_i\ge 0$ on $W_i$, for $i=s+1,\dots,r$. Now letting \begin{equation}\label{eq:rep.q} q =\sum_{j=1}^s q_j p_j^2+\sum_{i=s+1}^r t_i p_i^2\,, \end{equation} we obtain $q\in P_w(g)[x]$ since $\deg(t_i p_i^2)\le 2\deg(p_i)\le 2d(r-1)\le 2w$ and \begin{equation} \deg(q_j p_j^2)\le \deg(q_j)+2\deg(p_j)\le 2(d+u+d(r-1))=2w\,, \end{equation} Hence $f - q$ vanishes on $V(h)=W_1\cup\dots\cup W_r$. \end{proof} \begin{lemma}\label{lem:quadra.deno} Let $f,g_1,\dots,g_m\in\R_d[x]$ and $h_1,\dots,h_l\in\R_{d+1}[x]$. Assume that $f$ is non-negative on $S(g)$ with $g=(g_1,\dots,g_m)$ and $f(V(h)\backslash A)$ is finite with $h=(h_1,\dots,h_l)$ and $A\subset \R^n$. Set $r:=|f(V(h)\backslash A)|$ and $u:=b(n,d+1,m+l+2)/2$. Then there exist $q\in P_w(g)[x,\bar\lambda]$ with $w=dr+u$ such that $f - q$ vanishes on $V(h)\backslash A$. \end{lemma} \begin{proof} By assumption, we get $f(V(h)\backslash A) = \{t_1 ,\dots, t_r \} \subset \R$, where $t_i\ne t_j$ if $i\ne j$. We now handle in much the same way as the proof of Lemma \ref{lem:quadra} to get $q\in P_w(g)[x]$ in \eqref{eq:rep.q}. Hence $f - q$ vanishes on $V(h)\backslash A\subset W_1\cup\dots\cup W_r$. \end{proof} \begin{lemma}\label{lem:quadra2} Let $f,g_1,\dots,g_m\in\R_d[x]$ and $h_1,\dots,h_l\in\R_{d+1}[x]$. Assume that $f$ is non-negative on $S(g)$ with $g=(g_1,\dots,g_m)$ and $f(S(g)\cap V(h))$ is finite with $h=(h_1,\dots,h_l)$. Set $r:=|f(S(g)\cap V(h))|$. Then there exist $q\in P_w(g)[x,\bar\lambda]$ with $w=d(r-1)$ such that $f - q$ vanishes on $S(g)\cap V(h)$. \end{lemma} \begin{proof} By assumption, we get $f(S(g)\cap V(h)) = \{t_1 ,\dots, t_r \} \subset \R$, where $t_i\ne t_j$ if $i\ne j$. For $j=1,\dots,r$, let $W_j:=V(h,f-t_j)$. Then $W_j$ is a real variety generated by $l+1$ polynomials in $\R_{d+1}[x]$. It is clear that $f(W_j)=\{t_j\}$. Define polynomials $p_j$, for $j=1,\dots,r$, as in \eqref{eq:lagrange.pol}. It is easy to check that $W_i\cap S(g)\ne \emptyset$, $p_j(W_i)=\{\delta_{ji}\}$ and $\deg(p_j)\le d(r-1)$. Since $f\ge 0$ on $S(g)$, it holds that $f=t_i\ge 0$ on $W_i$, for $i=1,\dots,r$. Now letting $q =\sum_{i=1}^r t_i p_i^2$, we obtain $q\in \Sigma_w^2[x]$ with $w=d(r-1)$, and hence $f - q$ vanishes on $W_1\cup\dots\cup W_r\supset S(g)\cap V(h)$. \end{proof} \subsection{Moment/Sum-of-squares relaxations for polynomial optimization} We recall some preliminaries of the Moment/Sum-of-squares relaxations originally developed by Lasserre in \cite{lasserre2001global}. Given $d\in\N$, let $\N^n_d:=\{\alpha\in\N^n\,:\,\sum_{j=1}^n \alpha_j\le d\}$. Given $d\in\N$, we denote by $v_d$ the vector of monomials in $x$ of degree at most $d$, i.e., $v_d=(x^\alpha)_{\alpha\in\N^n_d}$ with $x^\alpha:=x_1^{\alpha_1}\dots x_n^{\alpha_n}$. For each $p\in\R_d[x]$, we write $p=c(p)^\top v_d=\sum_{\alpha\in\N^n_d}p_\alpha x^\alpha$, where $c(p)$ is denoted by the vector of coefficient of $p$, i.e., $c(p)=(p_\alpha)_{\alpha\in\N^n_d}$ with $p_\alpha\in\R$. Given $A\in\R^{r\times r}$ being symmetric, we say that $A$ is positive semidefinite, denoted by $A\succeq 0$, if every eigenvalue of $A$ is non-negative. \paragraph{Moment/Localizing matrices.} Given $y=(y_\alpha)_{\alpha\in\N^n}\subset \R$, let $L_y:\R[x]\to\R$ be the Riesz linear functional defined by $L_y(p)=\sum_{\alpha\in\N^n} p_\alpha y_\alpha$ for every $p\in\R[x]$. Given $d\in\N$, $p\in\R[x]$ and $y=(y_\alpha)_{\alpha\in\N^n}\subset \R$, let $M_d(y)$ be the moment matrix of order $d$ defined by $(y_{\alpha+\beta})_{\alpha,\beta\in\N^n_d}$ and let $M_d(py)$ be the localizing matrix of order $d$ associated with $p$ defined by $(\sum_{\gamma\in\N^n}p_\gamma y_{\alpha+\beta+\gamma})_{\alpha,\beta\in\N^n_d}$. \paragraph{Truncated quadratic modules/ideals.} Given $g_1,\dots,g_m\in\R[x]$, let $Q_d(g)[x]$ be the truncated quadratic module of order $d$ associated with $g=(g_1,\dots,g_m)$ defined by \begin{equation} Q_d(g)[x]=\{\sigma_0+\sum_{j=1}^m\sigma_jg_j\,:\,\sigma_j\in\Sigma^2[x]\,,\,\deg(\sigma_0)\le 2d\,,\,\deg(\sigma_jg_j)\le 2d\}\,. \end{equation} Given $h_1,\dots,h_l\in\R[x]$, let $I_d(h)$ be the truncated ideal of order $d$ associated with $h=(h_1,\dots,h_l)$ defined by \begin{equation} I_d(h)[x]=\{\sum_{j=1}^l\psi_jh_j\,:\,\psi_j\in\R[x]\,,\,\deg(\psi_jh_j)\le 2d\}\,. \end{equation} \paragraph{Problem statement.} Consider polynomial optimization problem: \begin{equation}\label{eq:pop.equality} \bar f^\star:=\inf\limits_{x\in S(g)\cap V(h)} f(x)\,, \end{equation} where $g=(g_1,\dots,g_m)$ and $h=(h_1,\dots,h_l)$ with $f,g_i,h_j\in\R[x]$. \subsubsection{The case without denominators} Given $k\in\N$ and $f,g_1,\dots,g_m,h_1,\dots,h_l\in\R[x]$, consider the following primal-dual semidefinite programs associated with $f$, $g=(g_1,\dots,g_m)$ and $h=(h_1,\dots,h_l)$: \begin{equation}\label{eq:mom.relax} \begin{array}{rl} \tau_k(f,g,h):=\inf\limits_y& L_y(f)\\ \text{s.t} &M_k(y)\succeq 0\,,\,M_{k-d_j}(g_jy)\succeq 0\,,\,j=1,\dots,m\,,\\ &M_{k-r_t}(h_ty)=0\,,\,t=1,\dots,l\,,\,y_0=1\,, \end{array} \end{equation} \begin{equation}\label{eq:sos.relax} \begin{array}{rl} \rho_k(f,g,h):=\sup\limits_{\xi,G_j,u_t} & \xi\\ \text{s.t} & G_j\succeq 0\,,\\ &f-\xi=v_k^\top G_0v_k+\sum_{j=1}^m g_jv_{k-d_j}^\top G_jv_{k-d_j}\\ &\qquad\qquad+\sum_{t=1}^l h_tu_t^\top v_{2r_t}\,,\\ \end{array} \end{equation} where $d_j=\lceil \deg(g_j)/2\rceil$ and $r_t=\lceil \deg(h_t)/2\rceil$. Using \cite[Lemma 15]{mai2022exact}, we obtain \begin{equation}\label{eq:equi.sos} \rho_k(f,g,h)=\sup_{\xi\in\R}\{ \xi\,:\,f-\xi\in Q_k(g)[x]+I_k(h)[x]\}\,. \end{equation} We call primal-dual semidefinite programs \eqref{eq:mom.relax}-\eqref{eq:sos.relax} the Moment/Sum-of-squares relaxations of order $k$ for problem \eqref{eq:pop.equality}. We state in the following lemma some recent results involving the Moment/Sum-of-squares relaxations: \begin{lemma}\label{lem:mom.sos} Let $f\in\R_d[x]$, $g_1,\dots,g_m\in\R[x]$ and $h_1,\dots,h_l\in\R_{d+1}[x]$. Let $\bar f^\star$ be as in \eqref{eq:pop.equality} with $g=(g_1,\dots,g_m)$ and $h=(h_1,\dots,h_l)$. Then the following statements hold: \begin{enumerate} \item For every $k\in\N$, $\tau_k(f,g,h)\le \tau_{k+1}(f,g,h)$ and $\rho_k(f,g,h)\le \rho_{k+1}(f,g,h)$. \item For every $k\in\N$, $\rho_k(f,g,h)\le \tau_{k}(f,g,h)\le \bar f^\star$. \item If $S(g)\cap V(h)$ has non-empty interior, for $k\in\N$ sufficient large, the Slater condition holds for the Moment relaxation \eqref{eq:mom.relax} of order $k$. \item If $S(g)\cap V(h)$ satisfies the Archimedean condition, $\rho_k(f,g,h)\to \bar f^\star$ as $k\to \infty$. \item If there exists $R>0$ such that $g_m+h_l=R-x_1^2-\dots-x_n^2$, for $k\in\N$ sufficient large, the Slater condition holds for the SOS relaxation \eqref{eq:sos.relax} of order $k$. \item If there exists $q\in Q_w(g)[x]$ with $2w\ge d+1$ such that $f-\bar f^\star-q$ vanishes on $V(h)$, then $\rho_r(f,g,h)=\bar f^\star$ with $r=b(n,2w,l+1)/2$. \item If there exists $q\in \Sigma_w^2[x]$ with $2w\ge d+1$ such that $f-\bar f^\star-q$ vanishes on $V(h)\cap S(g)$, then $\rho_k(f,\Pi g,h)=\bar f^\star$ with $r=b(n,2w,m+l+1)/2$. \end{enumerate} \end{lemma} \begin{proof} The proofs of the first five statements can be found in \cite{mai2022exact}. Let us prove the sixth statement. Let $u=f-\bar f^\star-q$. By assumption,we get $u\in\R_{2w}[x]$ and $u=0$ on $V(h)$. Set $s=2\lfloor r/(2w)\rfloor$. From this, Lemma \ref{lem:pos2} says that there exist $\sigma \in \Sigma_r^2[x]$ such that $u^{2s} + \sigma \in I_r(h)[x]$. Let $c=\frac{1}{2s}$. Then it holds that $1+t+ct^{2s}\in\Sigma^2_s[t]$. Thus for all $\varepsilon>0$, we have \begin{equation} f-\bar f^\star+\varepsilon=q + \varepsilon(1+\frac{u}{\varepsilon}+c\left(\frac{u}{\varepsilon}\right)^{2s})-c\varepsilon^{1-2s}(u^{2s} + \sigma ) +c\varepsilon^{1-2s}\sigma\in Q_r(g)[x]+I_r(h)[x]\,. \end{equation} Then we for all $\varepsilon>0$, $\bar f^\star-\varepsilon$ is a feasible solution of \eqref{eq:equi.sos} of the value $\rho_r(f,g,h)$. It gives $\rho_r(f,g,h)\ge \bar f^\star-\varepsilon$, for all $\varepsilon>0$, and, in consequence, we get $\rho_r(f,g,h)\ge \bar f^\star$. Using the second statement, we obtain that $\rho_r(f,g,h)= \bar f^\star$, yielding the sixth statement. We prove the final statement. Let $u=f-\bar f^\star-q$. By assumption,we get $u=0$ on $S(g)\cap V(h)$. Set $s=2\lfloor r/(2w)\rfloor$. From this, Lemma \ref{lem:pos2} says that there exist $\eta \in P_r(g)[x]$ such that $u^{2s} + \eta \in I_r(h)[x]$. Let $c=\frac{1}{2s}$. Then it holds that $1+t+ct^{2s}\in\Sigma_s^2[t]$. Thus for all $\varepsilon>0$, we have \begin{equation} f-\bar f^\star+\varepsilon=q + \varepsilon(1+\frac{u}{\varepsilon}+c\left(\frac{u}{\varepsilon}\right)^{2s})-c\varepsilon^{1-2s}(u^{2s} + \eta) +c\varepsilon^{1-2s}\eta\in P_r(g)[x]+I_r(h)[x]\,. \end{equation} Analysis similar to that in the proof of the third statement shows $\rho_r(f,\Pi g,h)= \bar f^\star$, yielding the final statement. \end{proof} \subsubsection{The case with denominators} Given $k\in\N$ and $f,g_1,\dots,g_m,h_1,\dots,h_l,\theta\in\R[x]\backslash\{0\}$, consider the following primal-dual semidefinite programs associated with $f$, $g=(g_1,\dots,g_m)$ and $h=(h_1,\dots,h_l)$: \begin{equation}\label{eq:mom.relax.deno} \begin{array}{rl} \tau_k(f,g,h,\theta):=\inf\limits_y& L_y(\theta^{\eta(k,f,\theta)} f)\\ \text{s.t} &M_k(y)\succeq 0\,,\,M_{k-d_j}(g_jy)\succeq 0\,,\,j=1,\dots,m\,,\\ &M_{k-r_t}(h_ty)=0\,,\,t=1,\dots,l\,,\,L_y(\theta^{\eta(k,f,\theta)})=1\,, \end{array} \end{equation} \begin{equation}\label{eq:sos.relax.deno} \begin{array}{rl} \rho_k(f,g,h,\theta):=\sup\limits_{\xi,G_j,u_t} & \xi\\ \text{s.t} & G_j\succeq 0\,,\\ &\theta^{\eta(k,f,\theta)}(f-\xi)=v_k^\top G_0v_k+\sum_{j=1}^m g_jv_{k-d_j}^\top G_jv_{k-d_j}\\ &\qquad\qquad+\sum_{t=1}^l h_tu_t^\top v_{2r_t}\,,\\ \end{array} \end{equation} where $d_j=\lceil \deg(g_j)/2\rceil$, $r_t=\lceil \deg(h_t)/2\rceil$, and \begin{equation} \eta(k,f,\theta):=2\lfloor\frac{2k-\deg(f)}{2\deg(\theta)}\rfloor\,. \end{equation} Using \cite[Lemma 15]{mai2022exact}, we obtain \begin{equation}\label{eq:equi.sos.deno} \rho_k(f,g,h,\theta):=\sup_{\xi\in\R}\{ \xi\,:\,\theta^{\eta(k,f,\theta)}(f-\xi)\in Q_k(g)[x]+I_k(h)[x]\}\,. \end{equation} Developed in \cite{mai2021positivity}, primal-dual semidefinite programs \eqref{eq:mom.relax.deno}-\eqref{eq:sos.relax.deno} are another type of the Moment/Sum-of-squares relaxations of order $k$ for problem \eqref{eq:pop.equality}. We state in the following lemma some recent results involving this type of Moment/Sum-of-squares relaxations: \begin{lemma}\label{lem:mom.sos.deno} Let $f\in\R_d[x]$, $g_1,\dots,g_m\in\R[x]$, $h_1,\dots,h_l\in\R_{d+1}[x]$, and $\theta\in\R_{2u}[x]\backslash\{0\}$. Let $\bar f^\star$ be as in \eqref{eq:pop.equality} with $g=(g_1,\dots,g_m)$ and $h=(h_1,\dots,h_l)$. Then the following statements hold: \begin{enumerate} \item For every $k\in\N$, $\tau_k(f,g,h,\theta)\le \tau_{k+1}(f,g,h,\theta)$, $\rho_k(f,g,h,\theta)\le \rho_{k+1}(f,g,h,\theta)$, and $\rho_k(f,g,h,\theta)\le \tau_{k}(f,g,h,\theta)$. \item If $S(g)\cap V(h)$ has non-empty interior, for $k\in\N$ sufficient large, the Slater condition holds for the Moment relaxation \eqref{eq:mom.relax} of order $k$. \item If one of the following two conditions holds: \begin{enumerate} \item problem \eqref{eq:pop.equality} has an optimal solution $x^\star$ such that $\theta(x^\star)>0$; \item there exists a sequence of feasible solutions $(x^{(t)})_{t\in\N}$ for problem \eqref{eq:pop.equality} such that $(f(x^{(t)}))_{t\in\N}$ converges to $\bar f^\star$ and $\theta(x^{(t)})>0$, \end{enumerate} then for every $k\in\N$, $\rho_k(f,g,h,\theta)\le \bar f^\star$. \item If $\rho_k(f,g,h,\theta)\le \bar f^\star$, for every $k\in\N$, and there exists $q\in Q_{w}(g)[x]$ with $2(w+u)\ge d+1$ such that $\theta(f-\bar f^\star-q)$ vanishes on $V(h)$, then $\rho_r(f,g,h,\theta)=\bar f^\star$ with $r=b(n,2(w+u),l+1)/2$. \end{enumerate} \end{lemma} \begin{proof} The proofs of the first two statements are similar to the ones of Lemma \ref{lem:mom.sos} (see also in \cite{mai2021positivity}). Let us prove the third statement. Let $\varepsilon>0$. By \eqref{eq:equi.sos.deno}, we get \begin{equation}\label{eq:sup.prop} \theta^{\eta(k,f,\theta)}(f-\rho_k(f,g,h,\theta)+\varepsilon)\in Q_k(g)[x]+I_k(h)[x]\,. \end{equation} Consider the following two cases: \begin{itemize} \item Assume that the condition (a) holds. By \eqref{eq:sup.prop}, it implies that $\theta(x^\star)^{\eta(k,f,\theta)}(\bar f^\star-\rho_k(f,g,h,\theta)+\varepsilon)\ge 0$. Since $\theta(x^\star)>0$, $\bar f^\star\ge \rho_k(f,g,h,\theta)-\varepsilon$. The result follows since $\varepsilon$ is arbitrary. \item Assume that the condition (b) holds. By \eqref{eq:sup.prop}, it implies that $\theta(x^{(t)})^{\eta(k,f,\theta)}(f(x^{(t)})-\rho_k(f,g,h,\theta)+\varepsilon)\ge 0$. Since $\theta(x^{(t)})>0$, $f(x^{(t)})\ge \rho_k(f,g,h,\theta)-\varepsilon$. As $t$ goes to infinity, $\bar f^\star\ge \rho_k(f,g,h,\theta)-\varepsilon$. The result follows since $\varepsilon$ is arbitrary. \end{itemize} Let us prove the fourth statement. Let $p=f-\bar f^\star-q$. By assumption,we get $\theta p\in\R_{2(w+u)}[x]$ and $\theta p=0$ on $V(h)$. Set $s=2\lfloor r/(2w)\rfloor$. From this, Lemma \ref{lem:pos2} says that there exist $\sigma \in \Sigma_r^2[x]$ such that $(\theta p)^{2s} + \sigma \in I_r(h)[x]$. Let $c=\frac{1}{2s}$. Then it holds that $1+t+ct^{2s}\in\Sigma^2_s[t]$. Thus for all $\varepsilon>0$, we have \begin{equation} \theta(f-\bar f^\star+\varepsilon)=\theta q + \varepsilon \theta (1+\frac{p}{\varepsilon}+c\left(\frac{p}{\varepsilon}\right)^{2s})-c(\varepsilon\theta)^{1-2s}((\theta p)^{2s} + \sigma ) +c(\varepsilon\theta)^{1-2s}\sigma\,. \end{equation} It implies that $\theta^{2s}(f-\bar f^\star+\varepsilon)\in Q_r(g)[x]+I_r(h)[x]$. Then we for all $\varepsilon>0$, $\bar f^\star-\varepsilon$ is a feasible solution of \eqref{eq:equi.sos} of the value $\rho_r(f,g,h,\theta)$. It gives $\rho_r(f,g,h,\theta)\ge \bar f^\star-\varepsilon$, for all $\varepsilon>0$, and, in consequence, we get $\rho_r(f,g,h,\theta)\ge \bar f^\star$. By assumption, we obtain that $\rho_r(f,g,h,\theta)= \bar f^\star$, yielding the final statement. \end{proof} \section{Explicit degree bounds for the representation based on the Fritz John conditions} \label{sec:degree.bound} This section provides the degree bounds for our five Nichtnegativstellens\"atze based on the Fritz John conditions. Let $\bar\lambda=(\lambda_0,\lambda_1,\dots,\lambda_m)$ be a vector of $m$ variables. Set $\lambda:=(\lambda_1,\dots,\lambda_m)$. \subsection{The case of arbitrary multipliers} We state the first main result in the following theorem: \begin{theorem}\label{theo:rep} Let $f,g_1,\dots,g_m\in\R_d[x]$ with $d\ge 1$. Assume that $f$ is non-negative on $S(g)$ with $g:=(g_1,\dots,g_m)$ and $f(C(g))$ is finite. Set \begin{equation}\label{eq:def.w} w:=\frac{1}{2}\times b(n+m+1,d+1,2m+n+3)+d\times {c(n+m+1,d+1,n+m+1)}\,. \end{equation} Then there exists $q\in P_w(g)[x,\bar \lambda]$ such that $f-q$ vanishes on $V(h_{\FJ})$, where $\bar\lambda:=(\lambda_0,\dots,\lambda_m)$ and \begin{equation}\label{eq:.polyFJ} h_{\FJ}:=(\lambda_0\nabla f-\sum_{j=1}^m \lambda_j \nabla g_j,\lambda_1g_1,\dots,\lambda_mg_m,1-\sum_{j=0}^m\lambda_j^2)\,. \end{equation} \end{theorem} \begin{proof} Using Lemma \ref{lem:num.connected}, we decompose $V(h_{\FJ})$ into semi-algebraically path connected components: $Z_1,\dots,Z_s$ with \begin{equation}\label{eq:bound.on.s} s\le c(n+m+1,d+1,n+m+1)\,, \end{equation} since each entry of $h_{\FJ}$ has degree at most $d+1\ge 2$. Accordingly \cite[Lemma 13]{mai2022exact} shows that $f$ is constant on each $Z_i$. Thus $f(V(h_{\FJ}))$ is finite. Set $r=|f(V(h_{\FJ}))|$. From \eqref{eq:bound.on.s}, we get \begin{equation}\label{eq:ineq} r\le s\le c(n+m+1,d+1,n+m+1)\,. \end{equation} Set \begin{equation} u:=b(n+m+1,d+1,2m+n+3)/2\,. \end{equation} By using Lemma \ref{lem:quadra}, there exist $q\in P_\xi(g)[x,\bar\lambda]$ with $\xi=dr+u$ such that $f - q$ vanishes on $V(h_{\FJ})$. By \eqref{eq:ineq} and \eqref{eq:def.w}, we obtain $\xi\le w$, and hence $q\in P_w(g)[x,\bar\lambda]$. \end{proof} \begin{remark} Here $h_{\FJ}$ includes polynomials from the Fritz John conditions \eqref{eq:FJcond}. In Theorem \ref{theo:rep}, if we assume further that the ideal generated by $h_{\FJ}$ is real radical (see in \cite{mai2022exact}), then $f-q\in I_r(h_{\FJ})$ for some $r\in\N$. However, this further assumption does not play any role in this paper when we apply the representation in Theorem \ref{theo:rep} for polynomial optimization.\end{remark} Our second main result is as follows: \begin{theorem}\label{theo:rep2} Let $f,g_1,\dots,g_m\in\R_d[x]$ with $d\ge 1$. Assume that $f$ is non-negative on $S(g)$ with $g:=(g_1,\dots,g_m)$ and $f(C(g)\cap S(g))$ is finite. Set \begin{equation}\label{eq:def.w2} w:=d\times(c(n+m+1,d+1,n+2m+1)-1)\,. \end{equation} Then there exists $q\in \Sigma_w^2[x,\bar \lambda]$ such that $f-q$ vanishes on $(S(g)\times\R^{m+1})\cap V(h_{\FJ})$, where $\bar\lambda:=(\lambda_0,\dots,\lambda_m)$ and $h_{\FJ}$ is defined as in \eqref{eq:.polyFJ}. \end{theorem} \begin{proof} Using Lemma \ref{lem:num.connected}, we decompose $(S(g)\times\R^{m+1})\cap V(h_{\FJ})$ into semi-algebraically path connected components: $Z_1,\dots,Z_s$ with \begin{equation} s\le c(n+m+1,d+1,n+2m+1)\,, \end{equation} since each entry of $h_{\FJ}$ (resp. $g$) has degree at most $d+1\ge 2$ (resp. $d$). Accordingly \cite[Lemma 19]{mai2022exact} shows that $f$ is constant on $Z_i$. Thus the set $f((S(g)\times\R^{m+1})\cap V(h_{\FJ}))$ has $r$ elements and we get \begin{equation}\label{eq:ineq2} r\le s\le c(n+m+1,d+1,n+2m+1)\,. \end{equation} By using Lemma \ref{lem:quadra2}, there exist $q\in \Sigma^2_\xi[x,\bar\lambda]$ with $\xi=d(r-1)$ such that $f - q$ vanishes on $(S(g)\times\R^{m+1})\cap V(h_{\FJ})$. By \eqref{eq:ineq2} and \eqref{eq:def.w2}, we obtain $\xi\le w$, and hence $q\in \Sigma^2_w[x,\bar\lambda]$. \end{proof} \subsection{The case of nonnegative multipliers} We state the third main result in the following theorem: \begin{theorem}\label{theo:rep.plus} Let $f,g_1,\dots,g_m\in\R_{d}[x]$ with $d\in\N$. Assume that $f$ is non-negative on $S(g)$ with $g:=(g_1,\dots,g_m)$ and $f(C^+(g))$ is finite. Set \begin{equation}\label{eq:def.w3} w:=\frac{1}{2}\times b(n+m+1,d+2,2m+n+3)+d\times{c(n+m+1,d+2,n+m+1)}\,. \end{equation} Then there exists $q\in P_w(g)[x,\bar \lambda]$ such that $f-q$ vanishes on $V(h_{\FJ}^+)$, where $\bar\lambda:=(\lambda_0,\dots,\lambda_m)$ and \begin{equation}\label{eq:.polyFJ.plus} h_{\FJ}^+:=(\lambda_0^2\nabla f-\sum_{j=1}^m \lambda_j^2 \nabla g_j,\lambda_1^2g_1,\dots,\lambda_m^2g_m,1-\sum_{j=0}^m\lambda_j^2)\,. \end{equation} \end{theorem} Here $h_{\FJ}^+$ includes polynomials in the right hand side of the Fritz John conditions \eqref{eq:FJcond}. The fourth main result is stated as follows: \begin{theorem}\label{theo:rep.plus2} Let $f,g_1,\dots,g_m\in\R_d[x]$ with $d\in\N$. Assume that $f$ is non-negative on $S(g)$ with $g:=(g_1,\dots,g_m)$ and $f(C^+(g)\cap S(g))$ is finite. Set \begin{equation}\label{eq:def.w4} w:=d\times(c(n+m+1,d+2,n+2m+1)-1)\,. \end{equation} Then there exists $q\in \Sigma_w[x,\bar \lambda]$ such that $f-q$ vanishes on $S(g)\cap V(h_{\FJ}^+)$, where $\bar\lambda:=(\lambda_0,\dots,\lambda_m)$ and $h_{\FJ}^+$ is defined as in \eqref{eq:.polyFJ.plus}. \end{theorem} To prove Theorem \ref{theo:rep.plus} (resp. Theorem \ref{theo:rep.plus2}), we do similarly to the proof of Theorem \ref{theo:rep} (resp. Theorem \ref{theo:rep2}) by replacing $C(g)$ and $h_{\FJ}$ with $C^+(g)$ and $h_{\FJ}^+$, respectively. Note that each entry of $h_{\FJ}^+$ has degree at most $d+2$. \begin{remark} The two sets of critical points $C(g)$ and $C^+(g)$ include the set of points $W(f,g)$ at which the Fritz John conditions hold but the Karush--Kuhn--Tucker conditions do not hold for problem \eqref{eq:pop}. The assumptions that the images of $C(g)$ and $C^+(g)$ under $f$ are finite allow us to obtain the representations of $f$ in the case of $W(f,g)\ne \emptyset$. \end{remark} \subsection{The case with denominators} We state the fifth main result in the following theorem, which does not require any assumption on the set of critical points: \begin{theorem}\label{theo:rep.deno} Let $f,g_1,\dots,g_m\in\R_d[x]$ with $d\ge 1$. Assume that $f$ is non-negative on $S(g)$ with $g:=(g_1,\dots,g_m)$. Set \begin{equation}\label{eq:def.w5} w:=\frac{1}{2}\times b(n+m+1,d+1,2m+n+3)+2d\times {c(n+m+1,d+1,n+m+1)}\,. \end{equation} Then there exists $q\in P_w(g)[x,\bar \lambda]$ such that $\lambda_0(f-q)$ vanishes on $V(h_{\FJ})$, where $\bar\lambda:=(\lambda_0,\dots,\lambda_m)$ and $h_{\FJ}$ is defined as in \eqref{eq:.polyFJ}. \end{theorem} \begin{proof} Using Lemma \ref{lem:num.connected}, we decompose $V(h_{\FJ})\backslash \{\lambda_0=0\}$ into semi-algebraically path connected components: $Z_1,\dots,Z_s$ with \begin{equation}\label{eq:bound.on.s.deno} s\le 2\times c(n+m+1,d+1,n+m+1)\,. \end{equation} It is because each entry of $h_{\FJ}$ has degree at most $d+1\ge 2$ and \begin{equation} V(h_{\FJ})\backslash \{\lambda_0=0\}=(V(h_{\FJ})\cap\{\lambda_0>0\})\cup (V(h_{\FJ})\cap\{\lambda_0<0\})\,. \end{equation} Accordingly \cite[Lemma 24]{mai2022exact} shows that $f$ is constant on each $Z_i$. Thus $f(V(h_{\FJ})\backslash \{\lambda_0=0\})$ is finite. Set $r=|f(V(h_{\FJ})\backslash \{\lambda_0=0\})|$. From \eqref{eq:bound.on.s}, we get \begin{equation}\label{eq:ineq.deno} r\le s\le 2\times c(n+m+1,d+1,n+m+1)\,. \end{equation} Set \begin{equation} u:=b(n+m+1,d+1,2m+n+3)/2\,. \end{equation} By using Lemma \ref{lem:quadra.deno}, there exist $q\in P_\xi(g)[x,\bar\lambda]$ with $\xi=dr+u$ such that $f - q$ vanishes on $V(h_{\FJ})\backslash \{\lambda_0=0\}$. It implies that $\lambda_0(f - q)$ vanishes on $V(h_{\FJ})$. By \eqref{eq:ineq.deno} and \eqref{eq:def.w5}, we obtain $\xi\le w$, and hence $q\in P_w(g)[x,\bar\lambda]$. \end{proof} \begin{remark} We have provided the degree bounds for the representations involving preorderings in Theorems \ref{theo:rep}, \ref{theo:rep2}, \ref{theo:rep.plus}, \ref{theo:rep.plus2}, and \ref{theo:rep.deno}. Our bounds are extremely large because of the exponential bounds for Krivine--Stengle's Nichtnegativstellensatz and the number of connected components of a basic semi-algebraic set. Fortunately, our bounds only depend on the number of variables, the number of polynomials defining the basic semi-algebraic set, and the degrees of the input polynomials. In contrast, our degree bound for the representations involving quadratic modules in Theorems \ref{theo:rep.quadra.module} and \ref{theo:rep.quadra.module.deno} stated below is implicit and depends on all information (including the coefficients) of the input polynomials. To achieve it, we apply the degree bound of Putinar's Positivestellensatz analyzed by Baldi--Mourrain \cite{baldi2021moment} under the finiteness assumption for the image of the set of critical points $C(g)$ under $f$. \end{remark} \section{Convergence rate for exact polynomial optimization based on the Fritz John conditions} \label{sec:application} This section presents the main application of the degree bounds in Theorems \ref{theo:rep}, \ref{theo:rep2}, \ref{theo:rep.plus}, \ref{theo:rep.plus2}, and \ref{theo:rep.deno} to analyze the convergence rate of the corresponding hierarchies of semidefinite relaxations for a polynomial optimization problem. \subsection{The case of arbitrary multipliers} \begin{theorem}\label{theo:pop} Let $f,g_1,\dots,g_m\in\R_d[x]$. Let $f^\star$ be as in problem \eqref{eq:pop} with $g=(g_1,\dots,g_m)$. Assume that problem \eqref{eq:pop} has a global minimizer and $f(C(g))$ is finite. Let $h_{\FJ}$ be as in \eqref{eq:.polyFJ} and let $w$ be as in \eqref{eq:def.w5}. Set \begin{equation}\label{eq:def.r} r=b(n+m+1,2w,m+n+2)/2\,. \end{equation} Then $\rho_r(f,\Pi g,h_{\FJ})=f^\star$, where $\Pi g$ is defined as in \eqref{eq:prod.g}. \end{theorem} \begin{proof} Since $S(g)=S(\Pi g)$, \cite[Lemma 17]{mai2022exact} implies that \begin{equation}\label{eq:equivalent.prob} \begin{array}{rl} f^\star:=\min\limits_{x,\bar\lambda}& f(x)\\ \text{s.t.}& x\in S(\Pi g)\,,\,(x,\bar\lambda)\in V(h_{\FJ})\,, \end{array} \end{equation} By assumption, Theorem \ref{theo:rep} yields that there exists $q\in P_w(g)[x,\bar \lambda]=Q_w(\Pi g)[x,\bar \lambda]$ such that $f-f^\star-q$ vanishes on $V(h_{\FJ})$. Applying the sixth statement of Lemma \ref{lem:mom.sos} (by replacing $g$ with $\Pi g$), we obtain the conclusion. \end{proof} \begin{theorem}\label{theo:pop2} Let $f,g_1,\dots,g_m\in\R_d[x]$. Let $f^\star$ be as in problem \eqref{eq:pop} with $g=(g_1,\dots,g_m)$. Assume that problem \eqref{eq:pop} has a global minimizer and $f(C(g)\cap S(g))$ is finite. Let $h_{\FJ}$ be as in \eqref{eq:.polyFJ} and let $w$ be as in \eqref{eq:def.w2}. Set $r$ as in \eqref{eq:def.r}. Then $\rho_r(f,\Pi g,h_{\FJ})=f^\star$, where $\Pi g$ is defined as in \eqref{eq:prod.g}. \end{theorem} \begin{proof} Note that \cite[Lemma 17]{mai2022exact} implies that \begin{equation}\label{eq:equi.prob1} \begin{array}{rl} f^\star:=\min\limits_{x,\bar\lambda}& f(x)\\ \text{s.t.}& x\in S( g)\,,\,(x,\bar\lambda)\in V(h_{\FJ})\,, \end{array} \end{equation} By assumption, Theorem \ref{theo:rep2} yields that there exists $q\in \Sigma^2_w[x,\bar \lambda]$ such that $f-f^\star-q$ vanishes on $(S(g)\cap\R^{m+1})\times V(h_{\FJ})$. Applying the final statement of Lemma \ref{lem:mom.sos}, we obtain the conclusion. \end{proof} \subsection{The case of nonnegative multipliers} \begin{theorem}\label{theo:pop.plus} Let $f,g_1,\dots,g_m\in\R_d[x]$. Let $f^\star$ be as in problem \eqref{eq:pop} with $g=(g_1,\dots,g_m)$. Assume that problem \eqref{eq:pop} has a global minimizer and $f(C^+(g))$ is finite. Let $h_{\FJ}^+$ be as in \eqref{eq:.polyFJ} and let $w$ be as in \eqref{eq:def.w3}. Set $r$ as in \eqref{eq:def.r}. Then $\rho_r(f,\Pi g,h_{\FJ})=f^\star$, where $\Pi g$ is defined as in \eqref{eq:prod.g}. \end{theorem} The proof of Theorem \ref{theo:pop.plus}, which is based on Theorem \ref{theo:rep.plus}, is similar to the one of Theorem \ref{theo:pop}. \begin{theorem}\label{theo:pop.plus2} Let $f,g_1,\dots,g_m\in\R_d[x]$. Let $f^\star$ be as in problem \eqref{eq:pop} with $g=(g_1,\dots,g_m)$. Assume that problem \eqref{eq:pop} has a global minimizer and $f(C^+(g)\cap S(g))$ is finite. Let $h_{\FJ}^+$ be as in \eqref{eq:.polyFJ} and let $w$ be as in \eqref{eq:def.w4}. Set $r$ as in \eqref{eq:def.r}. Then $\rho_r(f,\Pi g,h_{\FJ})=f^\star$, where $\Pi g$ is defined as in \eqref{eq:prod.g}. \end{theorem} The proof of Theorem \ref{theo:pop.plus2} based on Theorem \ref{theo:rep.plus2} is similar to the one of Theorem \ref{theo:pop2}. \begin{remark} It is worth pointing out in Theorems \ref{theo:pop}, \ref{theo:pop2}, \ref{theo:pop.plus}, and \ref{theo:pop.plus2} that the order $r\in\N$ satisfying $\rho_r(f,\Pi g,h_{\FJ})=f^\star$ depends on $n$ (the number of variables), $m$ (the number of inequality constraints $g_j$), and $d$ (the upper bound on the degrees of $f,g_j$) but does not depend on the coefficients of $f,g_j$. \end{remark} \begin{remark} Under the Archimedean condition (resp. the compactness assumption) for $S(g)$, the sequence $(\rho_k(f,g,h_{\FJ}))_{k\in\N}$ (resp. $(\rho_k(f,\Pi g,h_{\FJ}))_{k\in\N}$) converges to $f^\star$ faster than the sequence $(\rho_k(f,g,0))_{k\in\N}$ (resp. $(\rho_k(f,\Pi g,0))_{k\in\N}$). It is because of the following inequalities: \begin{equation} \rho_k(f,g,0)\le\rho_k(f,g,h_{\FJ})\le f^\star\quad\quad(\text{resp. }\rho_k(f,\Pi g,0)\le\rho_k(f,\Pi g,h_{\FJ})\le f^\star)\,. \end{equation} Thus we can obtain the convergence rate for $(\rho_k(f,g,h_{\FJ}))_{k\in\N}$ (resp. $(\rho_k(f,\Pi g,h_{\FJ}))_{k\in\N}$) from the available convergence rate of $(\rho_k(f,g,0))_{k\in\N}$ (resp. $(\rho_k(f,\Pi g,0))_{k\in\N}$) in \cite{fang2020sum,laurent2021effective,slot2021sum,baldi2021moment}. However, these rates do not give the finite convergence for the sequence $(\rho_k(f,g,h_{\FJ}))_{k\in\N}$ (resp. $(\rho_k(f,\Pi g,h_{\FJ}))_{k\in\N}$). Moreover, the computational complexity for the semidefinite relaxation of each value $\rho_k(f,g,h_{\FJ})$ (resp. $\rho_k(f,\Pi g,h_{\FJ})$) grows more quickly than the one of the value $\rho_k(f,g,0)$ (resp. $\rho_k(f,\Pi g,0)$) as $k$ increases. It is because to obtain the relaxation of the value $\rho_k(f,g,h_{\FJ})$ (resp. $\rho_k(f,\Pi g,h_{\FJ})$), we utilize $m+1$ additional variables and $n+m+1$ additional equality constraints to the original polynomial optimization problem \eqref{eq:pop}. \end{remark} \subsection{The case with denominators} \begin{theorem}\label{theo:pop.deno} Let $f,g_1,\dots,g_m\in\R_d[x]$. Let $f^\star$ be as in problem \eqref{eq:pop} with $g=(g_1,\dots,g_m)$. Let $h_{\FJ}$ be as in \eqref{eq:.polyFJ} and let $w$ be as in \eqref{eq:def.w5}. Assume that problem \eqref{eq:pop} has global minimizer and one of the following two conditions holds: \begin{enumerate} \item the Karush--Kuhn--Tucker conditions hold for problem \eqref{eq:pop} at $x^\star$; \item there exists a sequence of points $(x^{(t)},\bar \lambda^{(t)})_{t\in\N}$ in $(S(g)\times \R^{m+1})\cap V(h_{\FJ})$ such that $(f(x^{(t)}))_{t\in\N}$ converges to $\bar f^\star$ and $\lambda_0^{(t)}>0$, \end{enumerate} Set \begin{equation}\label{eq:def.r.deno} r=b(n+m+1,2(w+1),m+n+2)/2\,. \end{equation} Then $\rho_r(f,\Pi g,h_{\FJ},\lambda_0)=f^\star$, where $\Pi g$ is defined as in \eqref{eq:prod.g}. \end{theorem} \begin{proof} Since $S(g)=S(\Pi g)$, \cite[Lemma 17]{mai2022exact} implies \eqref{eq:equivalent.prob}. Note that the first condition implies that the Fritz John conditions hold for problem \eqref{eq:pop} at $x^\star$ with multipliers $\bar\lambda^\star$ such that $\lambda_0^\star>0$, so that $(x^\star,\bar\lambda^\star)$ is a global minimizer for problem \eqref{eq:equivalent.prob} with $\lambda_0^\star>0$. The second condition implies that each $(x^{(t)},\bar \lambda^{(t)})$ is a feasible solution for problem \eqref{eq:equivalent.prob} such that $(f(x^{(t)}))_{t\in\N}$ converges to $\bar f^\star$ and $\lambda_0^{(t)}>0$. The third statement of Lemma \ref{lem:mom.sos.deno} says that $\rho_k(f,\Pi g,h_{\FJ},\lambda_0)\le f^\star$, for every $k\in\N$. In addition, Theorem \ref{theo:rep.deno} yields that there exists $q\in P_w(g)[x,\bar \lambda]=Q_w(\Pi g)[x,\bar \lambda]$ such that $\lambda_0(f-f^\star-q)$ vanishes on $V(h_{\FJ})$. Applying the final statement of Lemma \ref{lem:mom.sos.deno} (by replacing $g,h,\theta$ with $\Pi g,h_{\FJ},\lambda_0$), we obtain the conclusion. \end{proof} \begin{remark} As shown by Freund in \cite{freund2016optimality}, the Karush--Kuhn--Tucker conditions hold for problem \eqref{eq:pop} at its global minimizers if one of the following conditions holds: \begin{enumerate} \item $g$ can be decomposed as $g=(g^{(1)},g^{(2)},-g^{(2)})$, $S(g^{(1)})$ has non-empty interior, and $g_j$, $j=1,\dots,m$, are concave; \item $g_j$, $j=1,\dots,m$, are linear; \item $f$ is a pseudoconvex and $g_j$, $j=1,\dots,m$, are quasiconcave (see the definitions of pseudoconvexity and quasiconcavity in \cite{freund2016optimality}). \end{enumerate} Thanks to Theorem \ref{theo:pop.deno} (as well as Theorems \ref{theo:pop.KKT}, \ref{theo:pop.KKT.plus}, and \ref{theo:pop.quadra.deno} stated below), we can compute the exact optimal value $f^\star$ for problem \eqref{eq:pop} by using semidefinite programming under one of the above three conditions. \end{remark} \section{Explicit degree bounds for the representation based on the Karush--Kuhn--Tucker conditions} \label{sec:bound.KKT} \subsection{The case of arbitrary multipliers} We prove the following lemma similar to \cite[Lemma 3.3]{demmel2007representations} by using the tools from real algebraic geometry (instead of the ones from complex algebraic geometry): \begin{lemma}\label{lem:constant.KKT} Let $f,g_1,\dots,g_m\in\R[x]$. Set \begin{equation}\label{eq:.polyKKT} h_{\KKT}:=(\nabla f-\sum_{j=1}^m \lambda_j \nabla g_j,\lambda_1g_1,\dots,\lambda_mg_m)\,. \end{equation} Let $W$ be a semi-algebraically path connected component of $V(h_{\KKT})$. Then $f$ is constant on $W$. \end{lemma} \begin{proof} Recall $\lambda:=(\lambda_1,\dots,\lambda_m)$. Choose two arbitrary points $(x^{(0)},\lambda^{(0)})$, $(x^{(1)},\lambda^{(1)})$ in $W$. We claim that $f(x^{(0)}) = f(x^{(1)})$. It is sufficient to assume that both $(x^{(0)},\lambda^{(0)})$ and $(x^{(1)},\lambda^{(1)})$ are non-singular points. If at least one of $(x^{(0)},\lambda^{(0)})$ and $(x^{(1)},\lambda^{(1)})$ is singular, we choose arbitrarily close non-singular points to approximate $(x^{(0)},\lambda^{(0)})$ and $(x^{(1)},\lambda ^{(1)})$ then we apply the continuity of $f$ to obtain $f(x^{(0)}) = f(x^{(1)})$. It is because the set of non-singular points of $W$ is dense and open in $W$. If a manifold is path-connected, the set of its non-singular points is a manifold that is also path-connected. By assumption, there exists a continuous piecewise-differentiable path $\phi(\tau) = (x(\tau),\lambda(\tau))$, for $\tau\in[0,1]$, lying inside $W$ such that $\phi(0) = (x^{(0)},\lambda^{(0)})$ and $\phi(1) = (x^{(1)},\lambda^{(1)})$. We claim that $\tau\mapsto f(x(\tau))$ is constant on $[0,1]$. The Lagrangian function \begin{equation}\label{eq:Lagran.KKT} L(x,\lambda) = f(x)+\sum_{j=1}^m \lambda_j g_j (x)\,. \end{equation} is equal to $f(x)$ on $V(h_{\KKT})$, which contains $\phi([0,1])$. Let $\mu_j(\tau)$ be the principal square root of $\lambda_j(\tau)$ for $\tau\in[0,1]$, $j=1,\dots,m$. By the mean value theorem, it follows that $f (x (0) ) = f (x (1) )$. We now obtain $f (x^{(0)})$ = $f (x^{(1)})$ and hence $f$ is constant on $W$. \end{proof} We state in the following theorem the explicit degree bound for the representation based on the Karush--Kuhn--Tucker conditions: \begin{theorem}\label{theo:rep.KKT} Let $f,g_1,\dots,g_m\in\R_d[x]$ with $d\ge 1$. Assume that $f$ is non-negative on $S(g)$ with $g:=(g_1,\dots,g_m)$. Set \begin{equation}\label{eq:def.w.KKT} w:=\frac{1}{2}\times b(n+m,d+1,2m+n+1)+d\times {c(n+m,d+1,n+m)}\,. \end{equation} Then there exists $q\in P_w(g)[x, \lambda]$ such that $f-q$ vanishes on $V(h_{\KKT})$, where $\lambda:=(\lambda_1,\dots,\lambda_m)$ and $h_{\KKT}$ is defined as in \eqref{eq:.polyKKT}. \end{theorem} \begin{proof} Using Lemma \ref{lem:num.connected}, we decompose $V(h_{\KKT})$ into semi-algebraically path connected components: $Z_1,\dots,Z_s$ with \begin{equation}\label{eq:bound.on.s.KKT} s\le c(n+m,d+1,n+m)\,, \end{equation} since each entry of $h_{\KKT}$ has degree at most $d+1\ge 2$. Accordingly Lemma \ref{lem:constant.KKT} shows that $f$ is constant on each $Z_i$. Thus $f(V(h_{\KKT}))$ is finite. Set $r=|f(V(h_{\KKT}))|$. From \eqref{eq:bound.on.s.KKT}, we get \begin{equation}\label{eq:ineq.KKT} r\le s\le c(n+m,d+1,n+m)\,. \end{equation} Set \begin{equation} u:=b(n+m,d+1,2m+n+1)/2\,. \end{equation} By using Lemma \ref{lem:quadra}, there exists $q\in P_\xi(g)[x,\lambda]$ with $\xi=dr+u$ such that $f - q$ vanishes on $V(h_{\KKT})$. By \eqref{eq:ineq.KKT} and \eqref{eq:def.w.KKT}, $\xi\le w$, and hence $q\in P_w(g)[x,\lambda]$. \end{proof} We apply Theorem \ref{theo:rep.KKT} for polynomial optimization as follows: \begin{theorem}\label{theo:pop.KKT} Let $f,g_1,\dots,g_m\in\R_d[x]$. Let $f^\star$ be as in problem \eqref{eq:pop} with $g=(g_1,\dots,g_m)$. Assume that problem \eqref{eq:pop} has a global minimizer at which the Karush--Kuhn--Tucker conditions hold for this problem. Let $h_{\KKT}$ be as in \eqref{eq:.polyKKT} and let $w$ be as in \eqref{eq:def.w.KKT}. Set \begin{equation} r=b(n+m,2w,m+n+1)/2\,. \end{equation} Then $\rho_r(f,\Pi g,h_{\KKT})=f^\star$, where $\Pi g$ is defined as in \eqref{eq:prod.g}. \end{theorem} \begin{proof} By assumption, there exists $(x^\star,\lambda^\star)\in V(h_{\KKT})$ such that $x^\star$ is a global minimizer of \eqref{eq:pop}. Since $S(g)=S(\Pi g)$, it implies that \begin{equation} \begin{array}{rl} f^\star:=\min\limits_{x,\lambda}& f(x)\\ \text{s.t.}& x\in S(\Pi g)\,,\,(x,\lambda)\in V(h_{\KKT})\,, \end{array} \end{equation} By assumption, Theorem \ref{theo:rep.KKT} yields that there exists $q\in P_w(g)[x,\lambda]=Q_w(\Pi g)[x, \lambda]$ such that $f-f^\star-q$ vanishes on $V(h_{\KKT})$. Applying the sixth statement of Lemma \ref{lem:mom.sos} (by replacing $g$ with $\Pi g$), we obtain the conclusion. \end{proof} \subsection{The case of nonnegative multipliers} A similar proof to one of Lemma \ref{lem:constant.KKT} applies to the following lemma: \begin{lemma}\label{lem:constant.KKT.plus} Let $f,g_1,\dots,g_m\in\R[x]$. Set \begin{equation}\label{eq:.polyKKT.plus} h_{\KKT}^+:=(\nabla f-\sum_{j=1}^m \lambda_j^2 \nabla g_j,\lambda_1^2g_1,\dots,\lambda_m^2g_m)\,. \end{equation} Let $W$ be a semi-algebraically path connected component of $V(h_{\KKT}^+)$. Then $f$ is constant on $W$. \end{lemma} We state in the following theorem the explicit degree bound for the representation based on the Karush--Kuhn--Tucker conditions: \begin{theorem}\label{theo:rep.KKT.plus} Let $f,g_1,\dots,g_m\in\R_d[x]$ with $d\in\N$. Assume that $f$ is non-negative on $S(g)$ with $g:=(g_1,\dots,g_m)$. Set \begin{equation}\label{eq:def.w.KKT.plus} w:=\frac{1}{2}\times b(n+m,d+1,2m+n+1)+d\times {c(n+m,d+2,n+m)}\,. \end{equation} Then there exists $q\in P_w(g)[x, \lambda]$ such that $f-q$ vanishes on $V(h_{\KKT}^+)$, where $\lambda:=(\lambda_1,\dots,\lambda_m)$ and $h_{\KKT}$ is defined as in \eqref{eq:.polyKKT.plus}. \end{theorem} \begin{proof} Using Lemma \ref{lem:num.connected}, we decompose $V(h_{\KKT}^+)$ into semi-algebraically path connected components: $Z_1,\dots,Z_s$ with \begin{equation}\label{eq:bound.on.s.KKT.plus} s\le c(n+m,d+2,n+m)\,, \end{equation} since each entry of $h_{\KKT}^+$ has degree at most $d+2\ge 2$. Accordingly Lemma \ref{lem:constant.KKT.plus} shows that $f$ is constant on each $Z_i$. Thus $f(V(h_{\KKT}^+))$ is finite. Set $r=|f(V(h_{\KKT}^+))|$. From \eqref{eq:bound.on.s.KKT}, we get \begin{equation}\label{eq:ineq.KKT.plus} r\le s\le c(n+m,d+2,n+m)\,. \end{equation} Set \begin{equation} u:=b(n+m,d+1,2m+n+1)/2\,. \end{equation} By using Lemma \ref{lem:quadra}, there exists $q\in P_\xi(g)[x,\lambda]$ with $\xi=dr+u$ such that $f - q$ vanishes on $V(h_{\KKT}^+)$. By \eqref{eq:ineq.KKT.plus} and \eqref{eq:def.w.KKT.plus}, $\xi\le w$, and hence $q\in P_w(g)[x,\lambda]$. \end{proof} We apply Theorem \ref{theo:rep.KKT.plus} for polynomial optimization as follows: \begin{theorem}\label{theo:pop.KKT.plus} Let $f,g_1,\dots,g_m\in\R_d[x]$. Let $f^\star$ be as in problem \eqref{eq:pop} with $g=(g_1,\dots,g_m)$. Assume that problem \eqref{eq:pop} has a global minimizer at which the Karush--Kuhn--Tucker conditions hold for this problem. Let $h_{\KKT}^+$ be as in \eqref{eq:.polyKKT.plus} and let $w$ be as in \eqref{eq:def.w.KKT.plus}. Set \begin{equation} r=b(n+m+1,2w,m+n+2)/2\,. \end{equation} Then $\rho_r(f,\Pi g,h_{\KKT}^+)=f^\star$, where $\Pi g$ is defined as in \eqref{eq:prod.g}. \end{theorem} \begin{proof} By assumption, there exists $(x^\star,\lambda^\star)\in V(h_{\KKT}^+)$ such that $x^\star$ is a global minimizer of \eqref{eq:pop}. Since $S(g)=S(\Pi g)$, it implies that \begin{equation} \begin{array}{rl} f^\star:=\min\limits_{x,\bar\lambda}& f(x)\\ \text{s.t.}& x\in S(\Pi g)\,,\,(x,\lambda)\in V(h_{\KKT}^+)\,, \end{array} \end{equation} By assumption, Theorem \ref{theo:rep.KKT.plus} yields that there exists $q\in P_w(g)[x,\lambda]=Q_w(\Pi g)[x, \lambda]$ such that $f-f^\star-q$ vanishes on $V(h_{\KKT}^+)$. Applying the sixth statement of Lemma \ref{lem:mom.sos} (by replacing $g$ with $\Pi g$), we obtain the conclusion. \end{proof} \section{Implicit degree bounds for the representations involving quadratic modules} \label{sec:implicit.quadraic} Let $\|\cdot\|$ denote the max norm of a polynomial on $[-1,1]^n$. We recall in the following lemma the degree bound for Putinar's Positivstellensatz by Baldi and Mourain in \cite{baldi2021moment}: \begin{lemma}\label{lem:Pu} Let $f,g_1,\dots,g_m\in\R[x]$. Assume that $f$ is positive on $S(g)$ with $g:=(g_1,\dots,g_m)$ and the following conditions: \begin{enumerate} \item $1-x_1^2-\dots-x_n^2\in Q_d(g)[x]$ for some $d\in\N$; \item $\|g_i\|\le \frac{1}{2}\,,\,i=1,\dots,m$. \end{enumerate} Let $f^\star$ be as in \eqref{eq:pop}. Then there exist positive reals $\gamma(n,g)$ and $L(n,g)$ depending only on $n$ and $g$ such that $f \in Q_r(g)[x]$ if \begin{equation}\label{eq:bound.Putinar} r\ge \gamma(n,g)\deg(f)^{3.5nL(n,g)}\left(\frac{\|f\|}{f^\star}\right)^{2.5nL(n,g)}\,. \end{equation} \end{lemma} We denote by $v(n,f,g)$ the ceiling of the right hand side of \eqref{eq:bound.Putinar}. Given $p\in\R[x]$ with $\xi=\lceil \deg(p)/2\rceil$, let $\tilde p:=x_0^{2\xi}p(\frac{x}{x_0\sqrt{2}}) \in\R[\bar x]$, where $\bar x:=(x_0,x)$. The following lemma is a consequence of Lemmas \ref{lem:pos} and \ref{lem:Pu}: \begin{lemma}\label{lem:rep-1.Pu} Let $g_1,\dots,g_m\in\R_d[x]$. Assume that the following conditions hold: \begin{enumerate} \item $\|\tilde g_i\|\le \frac{1}{2}\,,\,i=1,\dots,m$, $g_m:=\frac{1}{2}-x_1^2-\dots-x_n^2$; \item $S(g)=\emptyset$ with $g:=(g_1,\dots,g_m)$. \end{enumerate} Then there exists a positive real $u$ depending on $n$ and $g$ such that $-1 \in Q_{u}(g)[x]$. \end{lemma} Denote by $u(n,d,g)$ the parameter $u$ in Lemma \ref{lem:rep-1.Pu}. \begin{proof} Set $\eta=b(n,d,m+1)/2$. Since $S(g)=\emptyset$, Lemma \ref{lem:pos} yields that there exists $\sigma_\alpha\in \Sigma^2[x]$ such that $\deg(\sigma_\alpha g^\alpha)\le 2\eta$ and \begin{equation}\label{eq:rep.-1} -1=\sum_{\alpha\in\{0,1\}^n}\sigma_\alpha g^\alpha\,. \end{equation} We have $\tilde g_m=\frac{1}{2}x_0^2-x_1^2-\dots-x_n^2$. Let $\eta_j=\lceil\deg(g_j)/2\rceil$. From \eqref{eq:rep.-1}, we get \begin{equation}\label{eq:equi} -x_0^{2\eta}=\sum_{\alpha\in\{0,1\}^n}\psi_\alpha \tilde g^\alpha\,, \end{equation} where $\psi_\alpha=x_0^{2(\eta-\eta_j)}\sigma_\alpha(\frac{x}{x_0\sqrt{2}})\in\Sigma^2[\bar x]$ and $\tilde g=(\tilde g_1,\dots,\tilde g_m)$. Denote by $w\in\R[\bar x]$ the polynomial on the right hand side of \eqref{eq:equi}. Then $w$ is non-negative on $S(\tilde g,\frac{1}{2}-x_0^2)$. Since $0\in S(\tilde g,\frac{1}{2}-x_0^2)$, we get $S(\tilde g,\frac{1}{2}-x_0^2)\ne \emptyset$. On the other hand we have \begin{equation} 1-x_0^2-\dots-x_n^2=(\frac{1}{2}-x_0^2)+\tilde g_m\in Q(\tilde g,\frac{1}{2}-x_0^2)[\bar x]\,. \end{equation} Applying Lemma \ref{lem:Pu}, we obtain $u=v(n,w+\frac{1}{2^{\eta+1}},(\tilde g,\frac{1}{2}-x_0^2))$ such that \begin{equation} -x_0^{2\eta}+\frac{1}{2^{\eta+1}}=w+\frac{1}{2^{\eta+1}}\in Q_u(\tilde g,\frac{1}{2}-x_0^2)[\bar x]\,. \end{equation} Letting $x_0=\frac{1}{\sqrt{2}}$ implies that $-\frac{1}{2^{\eta+1}}\in Q_u(g)[x]$, yielding the result. \end{proof} The following lemma is a direct consequence of Lemma \ref{lem:rep-1.Pu}: \begin{lemma}\label{lem:rep-1.Pu.eq} Let $g_1,\dots,g_m\in\R_d[x]$ and $h_1,\dots,h_l\in\R_{d+1}[x]$. Assume that the following conditions hold: \begin{enumerate} \item $\|\tilde g_i\|\le \frac{1}{2}\,,\,i=1,\dots,m$, $g_m:=\frac{1}{2}-x_1^2-\dots-x_n^2$; \item $S(g)\cap V(h)=\emptyset$ with $g:=(g_1,\dots,g_m)$ and $h:=(h_1,\dots,h_l)$. \end{enumerate} Set $h_{\max}:=\max\limits_{j=1,\dots,l}\|\tilde h_j\|$. Then there exists a positive real \begin{equation} w=u(n,d+1,(g,\frac{h}{2h_{\max}},-\frac{h}{2h_{\max}})) \end{equation} depending on $n$ and $g$ such that $-1 \in Q_{w}(g)[x]$. \end{lemma} Denote by $w(n,d,g,h)$ the parameter $w$ in Lemma \ref{lem:rep-1.Pu.eq}. \subsection{The case without denominators} \begin{lemma}\label{lem:quadra.module} Let $f,g_1,\dots,g_m\in\R_d[x]$ and $h_1,\dots,h_l\in\R_{d+1}[x]$. Assume that the following conditions hold: \begin{enumerate} \item $\|\tilde g_i\|\le \frac{1}{2}$, $i=1,\dots,m$, $g_m:=\frac{1}{2}-x_1^2-\dots-x_n^2$; \item $f$ is non-negative on $S(g)$ with $g=(g_1,\dots,g_m)$; \item $f(V(h))$ is finite with $h=(h_1,\dots,h_l)$. \end{enumerate} Set $r:=|f(V(h))|$ and \begin{equation} u:=\max_{t\in f(V(h))}w(n,d,g,(h,f-t))\,. \end{equation} Then there exist $q\in Q_t(g)[x,\bar\lambda]$ with $t=dr+u$ such that $f - q$ vanishes on $V(h)$. \end{lemma} Denote by $t(n,d,r,f,g,h)$ the parameter $t$ in Lemma \ref{lem:quadra.module}. Note that it holds that \begin{equation}\label{eq:increasing.t} t(n,d,r,f,g,h)\le t(n,d,r',f,g,h)\text{ if }r\le r'\,. \end{equation} \begin{proof} By assumption, we get $f(V(h)) = \{t_1 ,\dots, t_r \} \subset \R$, where $t_i\ne t_j$ if $i\ne j$. For $j=1,\dots,r$, let $W_j:=V(h,f-t_j)$. Then $W_j$ is a real variety generated by $l+1$ polynomials in $\R_{d+1}[x]$. It is clear that $f(W_j)=\{t_j\}$. Let $p_j\in\R[x]$, $j=1,\dots,r$, as in \eqref{eq:lagrange.pol}. It is easy to check that $p_j(W_i)=\{\delta_{ji}\}$ and $\deg(p_j)\le d(r-1)$. Without loss of generality, we assume that there is $s\in \{0,1,\dots,r-1\}$ such that $W_j\cap S(g)= \emptyset$, for $j=1,\dots,s$, and $W_i\cap S(g)\ne \emptyset$, for $i=s+1,\dots,r$. Let $j\in\{1,\dots,s\}$. Since $W_j \cap S(g) = \emptyset$, Theorem \ref{lem:rep-1.Pu} says that $-1 \in Q_u(g)[x]+I_u(h,f-t_j)[x]$. It implies that there exists $v_j \in Q_u(g)[x]$ such that $-1 = v_j$ on $W_j$. We have $f = s_1 - s_2$ for the SOS polynomials $s_1 =(f+\frac{1}{2})^2$ and $s_2 = f^2+\frac{1}{4}$. It implies that $f = s_1 + v_j s_2$ on $W_j$. Let $q_j = s_1 +v_j s_2 \in Q_{u+d}(g)[x,\bar\lambda]$. Since $f\ge 0$ on $S(g)$, it holds that $f=t_i\ge 0$ on $W_i$, for $i=s+1,\dots,r$. Now letting $q$ as in \eqref{eq:rep.q}, we obtain $q\in Q_t(g)[x]$, and hence $f - q$ vanishes on $V(h)=W_1\cup\dots\cup W_r$. \end{proof} The implicit degree bound for the representation without denominators associated with quadratic module in \cite[Theorem 1]{mai2022exact} is stated in the following theorem: \begin{theorem}\label{theo:rep.quadra.module} Let $f,g_1,\dots,g_m\in\R_d[x]$ with $d\ge 1$. Assume that the following conditions hold: \begin{enumerate} \item $\|\tilde g_i\|\le \frac{1}{2}$, $i=1,\dots,m$ and $g_m:=\frac{1}{2}-x_1^2-\dots-x_n^2$; \item $f$ is non-negative on $S(g)$ with $g:=(g_1,\dots,g_m)$ and $f(C(g))$ is finite. \end{enumerate} Set \begin{equation}\label{eq:def.omega} \omega:=t(n,d,c(n+m+1,d+1,n+m+1),f,g,h_{\FJ})\,. \end{equation} Then there exists $q\in Q_\omega(g)[x,\bar \lambda]$ such that $f-q$ vanishes on $V(h_{\FJ})$, where $\bar\lambda:=(\lambda_0,\dots,\lambda_m)$ and $h_{\FJ}$ is defined as in \eqref{eq:.polyFJ}. \end{theorem} \begin{proof} Using Lemma \ref{lem:num.connected}, we decompose $V(h_{\FJ})$ into semi-algebraically path connected components: $Z_1,\dots,Z_s$ with $s$ satisfying \eqref{eq:bound.on.s} (since each entry of $h_{\FJ}$ has degree at most $d+1\ge 2$). Accordingly, \cite[Lemma 13]{mai2022exact} says that $f$ is constant on each $Z_i$. Thus $f(V(h_{\FJ}))$ is finite. Set $r=|f(V(h_{\FJ}))|$. Then we get the inequality \eqref{eq:ineq}. By using Lemma \ref{lem:quadra.module}, there exist $q\in Q_\xi(g)[x,\bar\lambda]$ with $\xi=t(n,d,r,f,g,h_{\FJ})$ such that $f - q$ vanishes on $V(h_{\FJ})$. By \eqref{eq:ineq}, \eqref{eq:increasing.t} and \eqref{eq:def.omega}, $\xi\le \omega$, and hence $q\in Q_\omega(g)[x,\bar\lambda]$. This completes the proof. \end{proof} We apply Theorem \ref{theo:rep.quadra.module} for polynomial optimization as follows: \begin{theorem}\label{theo:pop.quadra} Let $f,g_1,\dots,g_m\in\R_d[x]$ with $d\ge 1$. Let $f^\star$ be as in problem \eqref{eq:pop} with $g=(g_1,\dots,g_m)$. Assume that the following conditions hold: \begin{enumerate} \item $\|\tilde g_i\|\le \frac{1}{2}$, $i=1,\dots,m$ and $g_m:=\frac{1}{2}-x_1^2-\dots-x_n^2$; \item problem \eqref{eq:pop} has a global minimizer and $f(C(g))$ is finite. \end{enumerate} Let $h_{\FJ}$ be as in \eqref{eq:.polyFJ} and let $\omega$ be as in \eqref{eq:def.omega}. Set \begin{equation} r=b(n+m+1,2\omega,m+n+2)/2\,. \end{equation} Then $\rho_r(f,g,h_{\FJ})=f^\star$. \end{theorem} \begin{proof} Note that \cite[Lemma 17]{mai2022exact} implies \eqref{eq:equi.prob1}. By assumption, Theorem \ref{theo:rep.quadra.module} yields that there exists $q\in Q_\omega(g)[x,\bar \lambda]$ such that $f-f^\star-q$ vanishes on $V(h_{\FJ})$. Applying the sixth statement of Lemma \ref{lem:mom.sos}, we obtain the conclusion. \end{proof} \begin{remark} The degree bound $\omega$ in Theorem \ref{theo:rep.quadra.module} is not very interesting, and so is the rate $r$ in Theorem \ref{theo:pop.quadra}. It is because the bound $\omega$ in Theorem \ref{theo:rep.quadra.module} depends on all information of $f,g_j$. Contrary to this, the degree bound $w$ in Theorem \ref{theo:rep} only depends on $n$ (the number of variables), $m$ (the number of polynomial inequalities $g_j$), and $d$ (the upper bound on the degree of $f,g_j$). We conclude similarly to other representations without denominators involving quadratic modules in \cite{mai2022exact}. \end{remark} \subsection{The case with denominators} \begin{lemma}\label{lem:quadra.module.deno} Let $f,g_1,\dots,g_m\in\R_d[x]$ and $h_1,\dots,h_l\in\R_{d+1}[x]$. Assume that the following conditions hold: \begin{enumerate} \item $\|\tilde g_i\|\le \frac{1}{2}$, $i=1,\dots,m$, $g_m:=\frac{1}{2}-x_1^2-\dots-x_n^2$; \item $f$ is non-negative on $S(g)$ with $g=(g_1,\dots,g_m)$; \item $f(V(h)\backslash A)$ is finite with $h=(h_1,\dots,h_l)$ and $A\subset \R^n$. \end{enumerate} Set $r:=|f(V(h)\backslash A)|$ and \begin{equation} u:=\max_{t\in f(V(h)\backslash A)}w(n,d,g,(h,f-t))\,. \end{equation} Then there exist $q\in Q_t(g)[x,\bar\lambda]$ with $t=dr+u$ such that $f - q$ vanishes on $V(h)\backslash A$. \end{lemma} Denote by $t'(n,d,r,f,g,h)$ the parameter $t$ in Lemma \ref{lem:quadra.module.deno}. Note that it holds that \begin{equation}\label{eq:increasing.t.deno} t'(n,d,r,f,g,h)\le t'(n,d,r',f,g,h)\text{ if }r\le r'\,. \end{equation} \begin{proof} By assumption, we get $f(V(h)\backslash A) = \{t_1 ,\dots, t_r \} \subset \R$, where $t_i\ne t_j$ if $i\ne j$. We now handle in much the same way as the proof of Lemma \ref{lem:quadra.module} to get $q\in Q_t(g)[x]$, and hence $f - q$ vanishes on $V(h)\backslash A\subset W_1\cup\dots\cup W_r$. \end{proof} In the following theorem, we state the implicit degree bound for the representation with denominators associated with quadratic modules in \cite[Theorem 9]{mai2022exact}: \begin{theorem}\label{theo:rep.quadra.module.deno} Let $f,g_1,\dots,g_m\in\R_d[x]$ with $d\ge 1$. Assume that the following conditions hold: \begin{enumerate} \item $\|\tilde g_i\|\le \frac{1}{2}$, $i=1,\dots,m$ and $g_m:=\frac{1}{2}-x_1^2-\dots-x_n^2$; \item $f$ is non-negative on $S(g)$ with $g:=(g_1,\dots,g_m)$. \end{enumerate} Set \begin{equation}\label{eq:def.omega.deno} \omega:=t'(n,d,2\times c(n+m+1,d+1,n+m+1),f,g,h_{\FJ})\,. \end{equation} Then there exists $q\in Q_\omega(g)[x,\bar \lambda]$ such that $\lambda_0(f-q)$ vanishes on $V(h_{\FJ})$, where $\bar\lambda:=(\lambda_0,\dots,\lambda_m)$ and $h_{\FJ}$ is defined as in \eqref{eq:.polyFJ}. \end{theorem} \begin{proof} Using Lemma \ref{lem:num.connected}, we decompose $V(h_{\FJ})\backslash\{\lambda_0=0\}$ into semi-algebraically path connected components: $Z_1,\dots,Z_s$ with $s$ satisfying \eqref{eq:bound.on.s.deno} (since each entry of $h_{\FJ}$ has degree at most $d+1\ge 2$). Accordingly, \cite[Lemma 24]{mai2022exact} says that $f$ is constant on each $Z_i$. Thus $f(V(h_{\FJ})\backslash\{\lambda_0=0\})$ is finite. Set $r=|f(V(h_{\FJ})\backslash\{\lambda_0=0\})|$. Then we get the inequality \eqref{eq:ineq.deno}. By using Lemma \ref{lem:quadra.module.deno}, there exist $q\in Q_\xi(g)[x,\bar\lambda]$ with $\xi=t'(n,d,r,f,g,h_{\FJ})$ such that $f - q$ vanishes on $V(h_{\FJ})\backslash \{\lambda_0=0\}$. It implies that $\lambda_0(f - q)$ vanishes on $V(h_{\FJ})$. By \eqref{eq:ineq.deno}, \eqref{eq:increasing.t.deno} and \eqref{eq:def.omega.deno}, $\xi\le \omega$, and hence $q\in Q_\omega(g)[x,\bar\lambda]$. This completes the proof. \end{proof} We apply Theorem \ref{theo:rep.quadra.module.deno} for polynomial optimization as follows: \begin{theorem}\label{theo:pop.quadra.deno} Let $f,g_1,\dots,g_m\in\R_d[x]$ with $d\ge 1$. Let $f^\star$ be as in problem \eqref{eq:pop} with $g=(g_1,\dots,g_m)$. Assume that the following conditions hold: \begin{enumerate} \item $\|\tilde g_i\|\le \frac{1}{2}$, $i=1,\dots,m$ and $g_m:=\frac{1}{2}-x_1^2-\dots-x_n^2$; \item problem \eqref{eq:pop} has a global minimizer $x^\star$. \end{enumerate} Let $h_{\FJ}$ be as in \eqref{eq:.polyFJ} and let $\omega$ be as in \eqref{eq:def.omega.deno}. Assume that one of the following two conditions holds: \begin{enumerate} \item the Karush--Kuhn--Tucker conditions hold for problem \eqref{eq:pop} at $x^\star$; \item there exists a sequence of points $(x^{(t)},\bar \lambda^{(t)})_{t\in\N}$ in $(S(g)\times \R^{m+1})\cap V(h_{\FJ})$ such that $(f(x^{(t)}))_{t\in\N}$ converges to $\bar f^\star$ and $\lambda_0^{(t)}>0$, \end{enumerate} Set \begin{equation} r=b(n+m+1,2(\omega+1),m+n+2)/2\,. \end{equation} Then $\rho_r(f,g,h_{\FJ},\lambda_0)=f^\star$. \end{theorem} \begin{proof} Note that \cite[Lemma 17]{mai2022exact} implies \eqref{eq:equi.prob1}. Note that the first condition implies that the Fritz John conditions hold for problem \eqref{eq:pop} at $x^\star$ with multipliers $\bar\lambda^\star$ such that $\lambda_0^\star>0$, so that $(x^\star,\bar\lambda^\star)$ is a global minimizer for problem \eqref{eq:equi.prob1} with $\lambda_0^\star>0$. The second condition implies that each $(x^{(t)},\bar \lambda^{(t)})$ is a feasible solution for problem \eqref{eq:equi.prob1} such that $(f(x^{(t)}))_{t\in\N}$ converges to $\bar f^\star$ and $\lambda_0^{(t)}>0$. The third statement of Lemma \ref{lem:mom.sos.deno} says that $\rho_k(f,g,h_{\FJ},\lambda_0)\le f^\star$, for every $k\in\N$. In addition, Theorem \ref{theo:rep.quadra.module.deno} yields that there exists $q\in Q_\omega(\Pi g)[x,\bar \lambda]$ such that $\lambda_0(f-f^\star-q)$ vanishes on $V(h_{\FJ})$. Applying the final statement of Lemma \ref{lem:mom.sos.deno} (by replacing $h,\theta$ with $h_{\FJ},\lambda_0$), we obtain the conclusion. \end{proof} \begin{remark} Although they need the same assumption that the Karush--Kuhn--Tucker conditions hold at some global minimizer, the semidefinite relaxations in Theorem \ref{theo:pop.quadra.deno} have better complexity than the ones in Theorem \ref{theo:pop.KKT.plus}. It is because it does not require an exponential number of matrix variables in each semidefinite relaxation. \end{remark} \paragraph{Acknowledgements.} The author was supported by the funding from ANITI. \bibliographystyle{abbrv} \input{GradientIdeals3.bbl} \end{document}
2205.11768v3
http://arxiv.org/abs/2205.11768v3
Isometric immersions of RCD$(K,N)$ spaces via heat kernels
\documentclass[12pt]{article} \usepackage{mathrsfs} \usepackage{amsmath} \usepackage{esint} \usepackage[all]{xy} \usepackage{amssymb} \usepackage{amsfonts} \usepackage{amsthm} \usepackage{color} \usepackage{graphicx} \usepackage{hyperref} \usepackage{bm} \usepackage{indentfirst} \usepackage{geometry} \geometry{a4paper,scale=0.7} \theoremstyle{plain}\newtheorem{thm}{Theorem}[section] \newtheorem{lem}[thm]{Lemma} \newtheorem{prop}[thm]{Proposition} \newtheorem{ques}[thm]{Question} \newtheorem{property}[thm]{Property} \newtheorem{cor}[thm]{Corollary} \theoremstyle{definition} \newtheorem{defn}[thm]{Definition} \theoremstyle{remark} \newtheorem{remark}[thm]{Remark} \newtheorem{fact}[thm]{Fact} \newtheorem{exmp}[thm]{Example} \numberwithin{equation}{section} \begin{document} \title {\bf Isometric immersions of RCD($K,N$) spaces via heat kernels} \author{\it Zhangkai Huang \thanks{ Tohoku University: [email protected]}} \date{\small\today} \maketitle \begin{abstract} Given an RCD$(K,N)$ space $({X},\mathsf{d},\mathfrak{m})$, one can use its heat kernel $\rho$ to map it into the $L^2$ space by a locally Lipschitz map $\Phi_t(x):=\rho(x,\cdot,t)$. The space $(X,\mathsf{d},\mathfrak{m})$ is said to be an isometrically heat kernel immersing space, if each $\Phi_t$ is an isometric immersion {}{after a normalization}. A main result states that any compact isometrically heat kernel immersing RCD$(K,N)$ space is isometric to an unweighted closed smooth Riemannian manifold. This is justified by a more general result: if a compact non-collapsed RCD$(K, N)$ space has an isometrically immersing eigenmap, then the space is isometric to an unweighted closed Riemannian manifold, which greatly improves a regularity result in \cite{H21} by Honda. As an application of these results, we give a $C^\infty$-compactness theorem for a certain class of Riemannian manifolds with a curvature-dimension-diameter bound and an isometrically immersing eigenmap. \end{abstract} \tableofcontents \section{Introduction} \subsection{Isometric immersions on Riemannian manifolds} Let $( M^n,g)$ be an $n$-dimensional closed, that is, compact without boundary, Riemannian manifold. A map \[ \begin{aligned} F: M^n &\longrightarrow \mathbb{R}^{m}\\ \ p&\longmapsto (\phi_1(p),\ldots,\phi_m(p)) \end{aligned} \] is said to be an \textit{isometrically immersing eigenmap} if each $\phi_i$ is a non-constant eigenfunction of $-\Delta$ and $F$ is an isometric immersion in the following sense: \begin{align}\label{aaaeqn1.1} F^\ast g_{\mathbb{R}^m}=\sum\limits_{i=1}^m d\phi_i \otimes d\phi_i=g. \end{align} Let us recall a theorem of Takahashi in \cite{Ta66} which states that if $(M^n,g)$ is additionally homogeneous and irreducible, then for any eigenspace $V$ corresponding to some non-zero eigenvalue of $-\Delta$, there exists an $L^2(\mathrm{vol}_g)$-orthogonal basis $\{\phi_i\}_{i=1}^m$ ($m=\mathrm{dim}(V)$) of $V$ realizing (\ref{aaaeqn1.1}). Besides, $(M^n,g)$ can be also smoothly embedded into an infinite dimensional Hilbert space by using its heat kernel ${}{\rho}: M^n\times M^n\times (0,\infty)\rightarrow (0,\infty)$. More precisely, B\'{e}rard and B\'{e}rard-Besson-Gallot \cite{B85,BBG94} prove that the following map, which is called \textit{the $t$-time heat kernel mapping} in this paper, \[ \begin{aligned} \Phi_t: M^n&\longrightarrow L^2(\text{vol}_g) \\ x&\longmapsto\left(y\longmapsto\rho(x,y,t)\right), \end{aligned} \] is a smooth embedding. Moreover, one can use $\Phi_t$ to pull-back the flat Riemannian metric $g_{L^2}$ on $L^2(\mathrm{vol}_g)$ to get a metric tensor $g_t:=\Phi_t^\ast\left(g_{L^2}\right)$ with the following asymptotic formula: \begin{equation}\label{eqn1.1} 4(8\pi)^{\frac{n}{2}} t^{\frac{n+2}{2}}g_t=g-\frac{2t}{3}\left(\mathrm{Ric}_g-\frac{1}{2}\mathrm{Scal}_g g\right)+O(t^2),\ \ \ \ t\downarrow 0. \end{equation} Again when $(M^n,g)$ is additionally homogeneous and irreducible, it follows from another theorem by Takahashi \cite[Theorem 3]{Ta66} that there exists a non-negative function $c(t)$ such that for all $t>0$, $\sqrt{c(t)}\Phi_t$ is an isometric immersion. The observations above lead us to ask the following two questions. \begin{ques}\label{q1.2} How to characterize a manifold admitting an isometrically immersing eigenmap? \end{ques} \begin{ques}\label{q1.1} How to characterize a manifold such that each $t$-time heat kernel mapping is an isometric immersion after a normalization? \end{ques} Note that if each $t$-time heat kernel mapping of a closed Riemannian manifold $(M^n,g)$ is an isometric immersion after a normalization, then $(M^n,g)$ admits an isometrically immersing eigenmap. Standard spectral theory of elliptic operators implies that there exists an orthonormal basis $\{\varphi_i\}_{i=1}^\infty$ in $L^2(\mathrm{vol}_g)$ such that each $\varphi_i$ is an eigenfunction of $-\Delta$ with corresponding eigenvalue $\lambda_i$, and that $\{\lambda_i\}_{i=1}^\infty$ satisfies \[ 0=\lambda_0<\lambda_1\leqslant \lambda_2\leqslant \cdots\leqslant \lambda_i\rightarrow\infty. \] Then the classical estimates for eigenvalues $\lambda_i$ show that \begin{align}\label{aeqn1.3} g=c(t) g_t=c(t)\sum\limits_{i=1}^\infty e^{-2\lambda_i t}d\varphi_i\otimes d\varphi_i, \ \forall t>0. \end{align} These estimates also allow us to let $t\rightarrow \infty$ in (\ref{aeqn1.3}) to get (\ref{aaaeqn1.1}) with $\phi_i=\lim_{t\rightarrow \infty}c(t)e^{-\lambda_1 t}\varphi_i$ ($i=1,\cdots,m$), where $m$ is the dimension of the eigenspace corresponding to $\lambda_1$. The main purposes of the paper are to give positive answers to the both questions above in a non-smooth setting, so-called RCD$(K, N)$ metric measure spaces, explained in the next subsection. \subsection{Isometric immersions on RCD$(K,N)$ spaces} \subsubsection{Metric measure spaces satisfying the RCD$(K,N)$ condition} A triple $({X},\mathsf{d},\mathfrak{m})$ is said to be a metric measure space if $({X},\mathsf{d})$ is a complete separable metric space and $\mathfrak{m}$ is a nonnegative Borel measure with full support on $X$ and being finite on any bounded subset of ${X}$. In the first decade of this century, Sturm \cite{St06a, St06b} and Lott-Villani \cite{LV09} independently define a notion of a lower Ricci curvature bound $K\in \mathbb{R}$ and an upper dimension bound $N\in [1,\infty]$ for metric measure spaces in a synthetic sense, which is named as the CD$(K,N)$ condition. A metric measure space is said to be an RCD$(K,N)$ space if it satisfies the CD$(K,N)$ condition, and its associated $H^{1,2}$-Sobolev space is a Hilbert space. The precise definition (and the equivalent ones) can be found in \cite{AGS14b,AMS19,G13,G15,EKS15}. As an example, any weighted Riemannian manifold $(M^n,\mathsf{d}_g,e^{-f}\mathrm{vol}_g)$ with $f\in C^\infty(M^n)$ and $\mathrm{Ric}_N\geqslant Kg$ is an RCD$(K,N)$ space, where $\mathrm{Ric}_N$ is the Bakry-\'{E}mery $N$-Ricci curvature tensor defined by \[ \mathrm{Ric}_N:= \left\{\begin{array}{ll} \mathrm{Ric}_g+\mathrm{Hess}_g(f)-\frac{df\otimes df}{N-n}&\text{if}\ N>n,\\ \mathrm{Ric}_g& \text{if $N=n$ and $f$ is a constant},\\ -\infty&\text{otherwise}. \end{array}\right. \] In the sequel, we always assume that $N$ is finite. Given an RCD$(K,N)$ space $({X},\mathsf{d},\mathfrak{m})$, with the aid of a work by Bru\`e-Semola \cite{BS20}, there exists a unique $n\in [1,N]\cap \mathbb{N}$, which is called the essential dimension of $({X},\mathsf{d},\mathfrak{m})$ and is denoted by $n:=\mathrm{dim}_{\mathsf{d},\mathfrak{m}}({X})$, such that the $n$-dimensional regular set $\mathcal{R}_n$ (see Definition \ref{111def2.18}) satisfies that $\mathfrak{m}=\theta \mathcal{H}^n\llcorner \mathcal{R}_n$ for some Borel function $\theta$ (see \cite{AHT18}), where $\mathcal{H}^n$ is the $n$-dimensional Hausdorff measure. It is remarkable that the canonical Riemannian metric $g$ on $({X},\mathsf{d},\mathfrak{m})$ is also well-defined due to a work by Gigli-Pasqualetto \cite{GP16} (see also \cite[Proposition 3.2]{AHPT21} and Definition \ref{111thm2.21}). Then its $\mathfrak{m}$-a.e. pointwise Hilbert-Schmidt norm $|g|_{\mathsf{HS}}$ is equal to $\sqrt{n}$. Let us introduce a special restricted class of RCD$(K, N)$ spaces introduced in \cite{DG18} by De Philippis-Gigli as a synthetic counterpart of volume non-collapsed Gromov-Hausdorff limit spaces of Riemannian manifolds with a constant dimension and a lower Ricci curvature bound. The definition is simple: an RCD$(K, N)$ space is said to be non-collapsed if the reference measure is $\mathcal{H}^N$. {}{It can be easily shown that in this case $N$ must be an integer}. Non-collapsed RCD$(K, N)$ spaces have nicer properties than general RCD$(K,N)$ spaces. See also for instance \cite{ABS19, KM21}. \subsubsection{Isometrically heat kernel immersing RCD$(K,N)$ spaces} Thanks to works by Sturm \cite{St95, St96} and by Jiang-Li-Zhang \cite{JLZ16}, the heat kernel on an RCD$(K,N)$ space $({X},\mathsf{d},\mathfrak{m})$ has a locally Lipschitz representative $\rho$ with Gaussian estimates. This allows us to {}{construct $\Phi_t$ analogously as \[ \begin{aligned} \Phi_t:X&\longrightarrow L^2(\mathfrak{m})\\ x&\longmapsto (y\longmapsto \rho(x,y,t)), \end{aligned} \] which also naturally induces the pull back metric $g_t:=\Phi_t^\ast(g_{L^2(\mathfrak{m})})$.} One can also generalize formula (\ref{eqn1.1}) to this setting with the $L^p_{\mathrm{loc}}$ convergence as follows, see \cite[Theorem 5.10]{AHPT21} and \cite[Theorem 3.11]{BGHZ21} for the proof. \begin{thm}\label{20211222a} Let $({X},\mathsf{d},\mathfrak{m})$ be an $\mathrm{RCD}(K,N)$ space with $\mathrm{dim}_{\mathsf{d},\mathfrak{m}}({X})=n$, then for any $p\in [1,\infty)$ and any bounded Borel set $A\subset X$, we have the following convergence in $L^p(A,\mathfrak{m})$: \[ \left| t\mathfrak{m}(B_{\sqrt{t}}(\cdot))g_t-c(n) g\right|_{\mathsf{HS}}\rightarrow 0, \ \ \text{as }t\downarrow 0, \] where $c(n)$ is a constant depending only on $n$. \end{thm} In connection with Question \ref{q1.1} in this setting, let us provide the following definition. \begin{defn}[Isometrically heat kernel immersing RCD$(K,N)$ spaces] An RCD$(K,N)$ space $({X},\mathsf{d},\mathfrak{m})$ is said to be an \textit{isometrically heat kernel immersing} space, or briefly an IHKI space if there exists a non-negative function $c(t)$, such that $\sqrt{c(t)}\Phi_t$ is an isometric immersion for all $t>0$, namely \[ c(t)g_t=\left(\sqrt{c(t)}\mathop{\Phi_t}\right)^\ast\left(g_{L^2(\mathfrak{m})}\right)=g,\ \forall t>0. \]. \end{defn} We are now in a position to introduce the first main result of this paper. \begin{thm}\label{thm1.2} Let $({X},\mathsf{d},\mathfrak{m})$ be an $\mathrm{RCD}(K,N)$ space. Then the following two conditions are equivalent. \begin{enumerate} \item[$(1)$]\label{thm1.1con1} There exist sequences $\{t_i\}\subset \mathbb{R}$ and $\{s_i\}\subset \mathbb{R}$ such that $t_i\rightarrow t_0$ for some $t_0>0$ and that $s_i\Phi_{t_i}$ is an isometric immersion for any $i$. \item[$(2)$] $({X},\mathsf{d},\mathfrak{m})$ is an $\mathrm{IHKI}$ $\mathrm{RCD}(K,N)$ space. \end{enumerate} \end{thm} \begin{remark} Theorem \ref{thm1.2} is sharp in the following sense: there exists a closed Riemannain manifold $(M^n, g)$ such that it is not IHKI and that $c\Phi_{t_0}$ is an isometric immersion for some $c>0$ and some $t_0>0$. See Example \ref{exmp4.5}. \end{remark} Recalling that $g_t$ plays a role of a ``regularization'' of an RCD$(K, N)$ space as discussed in \cite{BGHZ21}, it is expected that IHKI RCD$(K, N)$ spaces have nice regularity properties. Along this, we end this subsection by collecting such regularity results as follows. \begin{thm}\label{mainthm1.3} Let $({X},\mathsf{d},\mathfrak{m})$ be an $\mathrm{IHKI}$ $\mathrm{RCD}(K,N)$ space with $\mathrm{dim}_{\mathsf{d},\mathfrak{m}}({X})=n\geqslant 1$, then there exists $c>0$ such that $\mathfrak{m}=c\mathcal{H}^n$ and that $({X},\mathsf{d},\mathfrak{m})$ is an $\mathrm{RCD}(K,n)$ space. In particular, $({X},\mathsf{d},\mathcal{H}^n)$ is a non-collapsed $\mathrm{RCD}(K,n)$ space. \end{thm} \begin{thm}\label{mainthm1.5} Assume that $({X},\mathsf{d},\mathfrak{m})$ is a non-compact $\mathrm{IHKI}$ $\mathrm{RCD}(0,N)$ space with $\mathrm{dim}_{\mathsf{d},\mathfrak{m}}({X})=n\geqslant 2$, then $({X},\mathsf{d},\mathfrak{m})$ is isometric to $\left(\mathbb{R}^n,\mathsf{d}_{\mathbb{R}^n},c\mathcal{H}^n\right)$ for some $c>0$. \end{thm} Let us emphasize that in the compact setting we will be able to provide the best regularity result, namely the smoothness result (see Theorem \ref{thm1.5} and Corollary \ref{cor1.11}). \subsubsection{Isometrically immersing eigenmaps on RCD$(K,N)$ spaces} In order to discuss a finite dimensional analogue of the IHKI condition, let us recall the following definition. \begin{defn}[Isometric immersion {\cite[Definition 3.1]{H21}}] Let $m\in \mathbb{N}_+$ and let $(X,\mathsf{d},\mathfrak{m})$ be an RCD$(K,N)$ space. A map \[ \begin{aligned} \Phi:X&\longrightarrow \mathbb{R}^m\\ x&\longmapsto (\phi_1(x),\ldots,\phi_m(x)) \end{aligned} \] is said to be an \textit{isometric immersion} if it is locally Lipschitz and \begin{align}\label{20221207a} \Phi^\ast g_{\mathbb{R}^m}:=\sum\limits_{i=1}^m d\phi_i\otimes d\phi_i =g \end{align} \end{defn} We are now ready to give an answer to Question \ref{q1.2} in the nonsmooth setting. \begin{thm}\label{thm1.5} Let $({X},\mathsf{d},\mathcal{H}^n)$ be a compact non-collapsed $\mathrm{RCD}(K,n)$ space. If there exists an isometric immersion \[ \begin{aligned} \Phi:X&\longrightarrow \mathbb{R}^m\\ x&\longmapsto (\phi_1(x),\ldots,\phi_m(x)) \end{aligned} \] such that each $\phi_i$ is an eigenfunction of $-\Delta$ $(i=1,\ldots,m)$, then $({X},\mathsf{d})$ is isometric to an $n$-dimensional smooth closed Riemannian manifold $(M^n,g)$. \end{thm} It is emphasized again that the theorem above greatly improves a bi-Lipschitz regularity result proved in \cite{H21} and seems to provide the smoothness for a much wider class of RCD spaces than existing results as far as the author knows (see for instance \cite{K15b,GR18,MW19} for the special cases). \begin{remark} An isometrically immersing eigenmap may not be an embedding in general. See for instance \cite[Theorem 5]{L81}. \end{remark} As a corollary of Theorem \ref{thm1.5}, we obtain the following result, meaning that any compact IHKI RCD$(K,N)$ space must be smooth. \begin{cor}\label{cor1.11} Let $({X},\mathsf{d},\mathcal{H}^n)$ be a compact non-collapsed $\mathrm{IHKI}$ $\mathrm{RCD}(K,n)$ space. Let $E$ be the eigenspace with some non-zero corresponding eigenvalue $\lambda$ of $-\Delta$. Then by taking $\{\phi_i\}_{i=1}^m$ $(m=\mathrm{dim}(E))$ as an $L^2(\mathfrak{m})$-orthonormal basis of $E$, the map \[ \begin{aligned} \Phi:{X}&\longrightarrow \mathbb{R}^m\\ x&\longmapsto\sqrt{\dfrac{\mathcal{H}^n({X})}{m}}(\phi_1,\cdots,\phi_m), \end{aligned} \] satisfies that \[ \Phi({X})\subset \mathbb{S}^{m-1}\ \ \text{and}\ \ n\Phi^\ast g_{\mathbb{R}^m}=\lambda g. \] In particular, $(X,\mathsf{d})$ is isometric to an $n$-dimensional smooth closed Riemannian manifold $(M^n,g)$. \end{cor} \subsection{Diffeomorphic finiteness theorems} As an application of Theorem \ref{thm1.5}, in Section \ref{sec5} we first study some special isometry classes of closed Riemannian manifolds admitting isometrically immersing $\tau$-eigenmaps. \begin{defn}[Isometrically immersing $\tau$-eigenmap on Riemannian manifolds] Let $(M^n,g)$ be an $n$-dimensional closed Riemannian manifold and let $\tau>0$. A map \[ \begin{aligned} F: M^n&\longrightarrow \mathbb{R}^m\\ p&\longmapsto \left(\phi_1(p),\ldots,\phi_m(p)\right), \end{aligned} \] is said to be a \textit{$\tau$-eigenmap into $\mathbb{R}^m$} if each $\phi_i$ is a non-constant eigenfunction of $-\Delta$ and \[ \min\limits_{1\leqslant i\leqslant m} \|\phi_i\|_{L^2( \mathrm{vol}_g)}\geqslant \tau. \] If in addition $F$ is an isometric immersion, then it is said to be an \textit{isometrically immersing $\tau$-eigenmap into $\mathbb{R}^m$}. \end{defn} \begin{defn}[Isometric immersion via $\tau$-eigenmaps]\label{defn1.7} For all $K\in \mathbb{R}$, $D,\tau>0$, denote by $\mathcal{M}(K,n,D,\tau)$ the set of isometry classes of $n$-dimensional closed Riemannian manifolds $( M^n,g)$ such that the Ricci curvature is bounded below by $K$, that the diameter is bounded above by $D$ and that there exists an isometrically immersing $\tau$-eigenmap into $\mathbb{R}^m$ for some $m \in \mathbb{N}$. \end{defn} Our main result about $\mathcal{M}(K,n,D,\tau)$ is stated as follows. \begin{thm}\label{thm1.8} $\mathcal{M}(K,n,D,\tau)$ is compact in $C^\infty$-topology. That is, for any sequence of Riemannian manifolds $\{( M_i^n,g_i)\}\subset\mathcal{M}(K,n,D,\tau)$, after passing to a subsequence, there exists a Riemannian manifold $(M^n,g)\in \mathcal{M}(K,n,D,\tau)$ and diffeomorphisms $\psi_i: M^n\rightarrow M^n_i$, such that $\{\psi_i^\ast g_i\}$ $C^{k}$-converges to $g$ on $(M^n,g)$ for any $k\in \mathbb{N}$. \end{thm} Finally in order to introduce an improved finiteness result from \cite{H21}, let us introduce the following definition. \begin{defn}[Almost isometric immersion via $\tau$-eigenmap] For all $K\in \mathbb{R}$, $D,\tau>0$, $\epsilon\geqslant 0$, denote by $\mathcal{N}(K,n,D,\tau,\epsilon)$ the set of isometry classes of $n$-dimensional closed Riemannian manifolds $(M^n,g)$ such that the Ricci curvature is bounded below by $K$, that the diameter is bounded above by $D$ and that there exists a $\tau$-eigenmap $F_{M^n}$ into $\mathbb{R}^m$ for some $m \in \mathbb{N}$ with \[ \frac{1}{\mathrm{vol}_{g}(M^n)}\int_{M^n}\left| F_{M^n}^\ast g_{\mathbb{R}^m}-g\right|\mathrm{dvol}_g\leqslant \epsilon. \] \end{defn} Note that $\mathcal{N}(K,n,D,\tau,0)=\mathcal{M}(K,n,D,\tau)$. Combining the intrinsic Reifenberg method established in \cite[Appendix A]{ChCo1} by Cheeger-Colding, with Theorem \ref{thm1.5} gives us the following diffeomorphic finiteness theorem. \begin{thm}\label{thm1.12} There exists $\epsilon=\epsilon(K,n,D,\tau)>0$ such that $\mathcal{N}(K,n,D,\tau,\epsilon)$ has finitely many members up to diffeomorphism. \end{thm} \subsection{Outline of the proofs} The proofs of Theorems \ref{mainthm1.3} and \ref{mainthm1.5} are based on blow up and blow down arguments. See also the proofs of \cite[Theorem 2.19]{AHPT21} and \cite[Theorem 3.11]{BGHZ21} for related arguments. The most delicate part of this paper is in the proof of Theorem \ref{thm1.5}, which makes full use of the equations for eigenfunctions, i.e. $\Delta \phi_i=-\mu_i\phi_i$ ($i=1,\ldots,m$). Note that one can easily obtain $L^\infty$-bounds of the Laplacian and the gradient of each $\phi_i$ from the estimates in \cite{J14,JLZ16,ZZ19,AHPT21} (see also Proposition \ref{heatkernel2}). In order to explain it more precisely, let us start with the following key equation: \begin{equation}\label{aaaaa1111122} \sum\limits_{i=1}^m |\nabla \phi_i|^2=n. \end{equation} Since the lower bound of each $\Delta |\nabla \phi_i|^2$ comes directly from Bochner inequality (see (\ref{bochnerineq})), (\ref{aaaaa1111122}) then guarantees the upper bound of each $\Delta |\nabla \phi_i|^2$ due to the following equality: \[ \Delta|\nabla \phi_i|^2=\sum\limits_{j\neq i}^m -\Delta|\nabla \phi_j|^2. \] Therefore we have a uniform $L^\infty$-bound of all $|\nabla\langle \nabla \phi_i,\nabla\phi_j\rangle|$, which implies the $C^{1,1}$ differentiable structure of the space. Indeed, locally one can pick $\{u_i\}_{i=1}^m$ consisting of linear combinations of eigenfunctions $\phi_i$ and construct a bi-Lipschitz map $x\mapsto (u_1(x),\ldots,u_n(x))$ which satisfies the following PDE: \[ \sum\limits_{j,k=1}^m \langle \nabla u_j,\nabla u_k\rangle\frac{\partial^2 \phi_i}{ \partial u_j \partial u_k}+\sum\limits_{j=1}^n\Delta u_j \frac{\partial \phi_i}{ \partial u_j }+\mu_i \phi_i=0. \] Then the smoothness of the space is justified by applying the elliptic regularity theory. Finally, a similar technique as in the proof of Theorem \ref{thm1.5} allows us to control each higher order covariant derivative of the Riemannian metric $g$ of $(M^n, g) \in \mathcal{M}$ quantitatively. Thus we can then apply a theorem of Hebey-Herzlish proved in \cite{HH97} to get the desired smooth compactness result, Theorem \ref{thm1.8}. \textbf{Acknowledgement.} The author acknowledges the support of JST SPRING, Grant Number JPMJSP2114. He is grateful to the referee for carefully reading the paper and for giving many valuable suggestions. He thanks his supervisor Professor Shouhei Honda for his advice and encouragement. He also thanks Yuanlin Peng and Zuyi Zhang for their comments on this paper. \section{{}{Notation} and preliminary results}\label{sec2} Throughout this paper we will use standard {}{notation} in this topic. For example \begin{itemize} \item Denote by $C(K_1,\ldots,K_n)$ a positive constant depending on $K_1,\ldots,K_n$, and $\Psi=\Psi(\epsilon_1,\ldots,\epsilon_k|c_1,\ldots c_j)$ some nonnegative function determined by $\epsilon_1,\ldots,\epsilon_k$, $c_1,\ldots, c_j$ such that \[ \lim\limits_{\epsilon_1,\ldots,\epsilon_k\rightarrow 0}\Psi=0,\ \text{for any fixed}\ c_1,\ldots c_j. \] \item Denote by $\omega_n$ the $n$-dimensional Hausdorff measure of the unit ball in $\mathbb{R}^n$ which coincides with the usual volume of the unit ball in $\mathbb{R}^n$, and by $\mathcal{L}^n$ the standard Lebesgue measure on $\mathbb{R}^n$. \end{itemize} We may use superscripts or subscripts when it is necessary to distinguish objects (for example, the Riemannian metrics, the gradients, etc.) on different spaces in this paper. \subsection{Metric spaces} {}{We fix some basic definitions} and {}{notation} about metric spaces in this subsection. {}{Let $({X},\mathsf{d})$ be a complete separable metric space.} Denote by $ \text{Lip}({X},\mathsf{d})$ (resp. $\text{Lip}_b({X},\mathsf{d})$, $\text{Lip}_c({X},\mathsf{d})$, $C_\text{c}({X})$) the set of all Lipschitz functions (resp. bounded Lipschitz functions, compactly supported Lipschitz functions, compactly supported continuous functions) on ${}{({X},\mathsf{d})}$. For any $f\in \text{Lip}({X},\mathsf{d})$, the local Lipschitz constant of $f$ at {}{a point} $x\in {X}$ is defined by \[ \text{lip}\ f(x)=\left\{\begin{aligned} \limsup\limits_{y\rightarrow x} \frac{|f(y)-f(x)|}{\mathsf{d}(y,x)}&\ \ \ \text{if $x\in {X}$ is not isolated},\\ 0\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ &\ \ \ \text{otherwise}. \end{aligned}\right. \] If $({X},\mathsf{d})$ is compact, then the diameter of ${X}$ is defined by \[ \mathrm{diam}({X},\mathsf{d}):=\sup_{x,y\in{X}}\mathsf{d}(x,y). \] For a map $f:{X}\rightarrow {Y}$ from $({X},\mathsf{d})$ to another complete metric space $({Y},\mathsf{d}_Y)$, $f$ is said to be $C$-bi-Lipschitz from ${X}$ to $f({X})$ for some $C\geqslant 1$ if \[ C^{-1}\mathsf{d}(x_1,x_2)\leqslant \mathsf{d}_Y(f(x_1),f(x_2))\leqslant C\mathsf{d}(x_1,x_2),\ \forall x_1,x_2\in{X}. \] We also denote by $B_R(x)$ the set $\{y\in{X}: \mathsf{d}(x,y)<R\}$, and by $B_\epsilon(A)$ the set $\{x\in {X}:\mathsf{d}(x,A)<\epsilon\}$ for any $A\subset {X}$, $\epsilon>0$. In particular, denote by $B_r(0_n):=\{x\in \mathbb{R}^n:|x|< r\}$ for any $r>0$. \subsection{RCD$(K,N)$ spaces: definition and basic properties}\label{sec2.2} {}{Let $({X},\mathsf{d},\mathfrak{m})$ be a metric measure space}. \begin{defn}[Cheeger energy] The Cheeger energy Ch: $L^2(\mathfrak{m})\rightarrow [0,\infty]$ is defined by \[ \text{Ch}(f):=\inf\limits_{\{f_i\}}\left\{ \liminf\limits_{i\rightarrow \infty} \int_{{X}} |\text{lip}\mathop{f_i}|^2 \mathrm{d}\mathfrak{m} \right\}, \] where the infimum is taken among all sequences $\{f_i\}$ satisfying $f_i\in \text{Lip}_b({X},\mathsf{d})\cap L^2(\mathfrak{m})$ and $\left\|f_i-f\right\|_{L^2(\mathfrak{m})}\rightarrow 0$. \end{defn} The domain of the Cheeger energy, denoted by $D\text{(Ch)}$, is the set of all $f\in L^2(\mathfrak{m})$ with $\text{Ch}(f)<\infty$. It is dense in $L^2(\mathfrak{m})$, and is a Banach space when equipped with the norm $\sqrt{\text{Ch}(\cdot)+\left\|\cdot\right\|_{L^2(\mathfrak{m})}^2}$. This Banach space is the Sobolev space $H^{1,2}({X},\mathsf{d},\mathfrak{m})$. In addition, for any $f\in H^{1,2}({X},\mathsf{d},\mathfrak{m})$, it is known that there exists a {}{unique} $|\text{D}f|\in L^2(\mathfrak{m})$ such that \[ \text{Ch}(f)=\int_{{X}} |\text{D}f|^2 \mathrm{d}\mathfrak{m}. \] This $|\text{D}f|$ is called the minimal relaxed slope of $f$ and satisfies the {}{locality property, that is}, for any other $h \in H^{1,2}({X},\mathsf{d},\mathfrak{m})$, $|\mathrm{D}f|=|\mathrm{D}h|$ $\mathfrak{m}$-a.e. on $\{x\in{X}:f=h\}$. {}{In particular}, $({X},\mathsf{d},\mathfrak{m})$ is said to be infinitesimally Hilbertian if $H^{1,2}({X},\mathsf{d},\mathfrak{m})$ is a Hilbert space. In this case, for any $f,h\in H^{1,2}({X},\mathsf{d},\mathfrak{m})$, the following $ L^1(\mathfrak{m}) $ integrable function is well-defined \cite{AGS14b}: \[ \langle \nabla f, \nabla h\rangle := \lim_{\epsilon \rightarrow 0}\frac{|\text{D}(f+\epsilon h)|^2-|\text{D} f|^2}{2\epsilon}. \] \begin{remark} For any $f\in H^{1,2}({X},\mathsf{d},\mathfrak{m})$, it is clear that \[ |\nabla f|^2:=\langle \nabla f,\nabla f\rangle=|\mathrm{D}f|^2,\ \mathfrak{m}\text{-a.e.} \] \end{remark} \begin{defn}[The Laplacian \cite{G15}] Assume that $({X},\mathsf{d},\mathfrak{m})$ is infinitesimally Hilbertian. The domain of Laplacian, namely $D(\Delta)$, is defined as the set of all $f\in H^{1,2}({X},\mathsf{d},\mathfrak{m})$ such that \[ \int_{{X}} \langle \nabla f, \nabla \varphi\rangle \mathrm{d}\mathfrak{m}= - \int_{{X}} h\varphi \mathrm{d}\mathfrak{m},\ \ \forall \varphi \in H^{1,2}({X},\mathsf{d},\mathfrak{m}), \] for some $h\in L^2(\mathfrak{m})$. In particular, denote by $\Delta f:= h$ for any $f\in D(\Delta)$ because $h$ is unique if it exists. \end{defn} We are now ready to introduce the definition of RCD$(K,N)$ spaces. {}{The following is an equivalent definition with the one proposed in \cite{G15}, and the equivalence is proved in \cite{AGS15,EKS15}. See also \cite{AMS19}.} \begin{defn} Let $K\in \mathbb{R}$ and $N\in [1,\infty)$. $({X},\mathsf{d},\mathfrak{m})$ is said to be an RCD$(K,N)$ space {}{if and only if} it satisfies the following conditions. \begin{enumerate} \item $({X},\mathsf{d},\mathfrak{m})$ is infinitesimally Hilbertian. \item There exists $ x \in {X}$ and $C >0$, such that {}{for any $r>0$}, $\mathfrak{m} (B_r(x)) \leqslant C e^{Cr^2}$. \item (Sobolev to Lipschitz property) If $f \in H^{1,2}({X},\mathsf{d},\mathfrak{m})$ with $|\text{D} f|\leqslant 1$ $\mathfrak{m}$-a.e., then $f$ has a 1-Lipschitz {}{representative, that is,} there exists {}{a 1-Lipschitz function $h$ such that $h=f$ $\mathfrak{m}$-a.e.} \item ({}{Bochner} inequality) For any {}{$f\in D(\Delta)$ with $\Delta f \in H^{1,2}({X},\mathsf{d},\mathfrak{m})$}, the following holds for any $\varphi \in \mathrm{Test}F\left({X},\mathsf{d},\mathfrak{m}\right)$ with $ \varphi \geqslant 0$, \begin{equation}\label{bochnerineq} \frac{1}{2}\int_{X} |\nabla f|^2 \Delta \varphi \mathrm{d}\mathfrak{m} \geqslant \int_{X} \varphi \left(\langle \nabla f , \nabla \Delta f \rangle +K |\nabla f|^2 + \frac{(\Delta f)^2}{N} \right) \mathrm{d}\mathfrak{m}, \end{equation} where $\mathrm{Test}F({X},\mathsf{d},\mathfrak{m})$ is the class of test functions defined by \end{enumerate} \[ \mathrm{Test}F({X},\mathsf{d},\mathfrak{m}):=\{f\in \text{Lip}({X},\mathsf{d})\cap D(\Delta)\cap L^\infty(\mathfrak{m}):\Delta f\in H^{1,2}({X},\mathsf{d},\mathfrak{m})\cap L^\infty(\mathfrak{m})\}. \] If in addition $\mathfrak{m}=\mathcal{H}^N$, then $({X},\mathsf{d},\mathfrak{m})$ is said to be a non-collapsed RCD$(K,N)$ space. \end{defn} For the class of test functions on an RCD$(K,N)$ space $({X},\mathsf{d},\mathfrak{m})$, by \cite{S14}, \begin{enumerate} \item $|\nabla f|^2 \in H^{1,2}({X},\mathsf{d},\mathfrak{m})$ for any $f\in \mathrm{Test}F\left({X},\mathsf{d},\mathfrak{m}\right)${}{.} \item Define $\mathrm{Test}F_+({X},\mathsf{d},\mathfrak{m}):=\left\{f\in \mathrm{Test}F\left({X},\mathsf{d},\mathfrak{m}\right): f\geqslant 0\right\}$ and $H^{1,2}_+({X},\mathsf{d},\mathfrak{m}):=\left\{f\in H^{1,2}({X},\mathsf{d},\mathfrak{m}): f\geqslant 0\ \ \mathfrak{m}\text{-a.e.}\right\}$. Then $\mathrm{Test}F_+({X},\mathsf{d},\mathfrak{m})$ (resp. $\mathrm{Test}F({X},\mathsf{d},\mathfrak{m})$) is dense in $H^{1,2}_+({X},\mathsf{d},\mathfrak{m})$ (resp. $H^{1,2}({X},\mathsf{d},\mathfrak{m})$). \end{enumerate} The following inequality is a generalization of the Bishop-Gromov inequality {}{in Riemannian geometry.} \begin{thm}[Bishop-Gromov inequality \cite{LV09,St06b}]\label{BGineq} Assume that $({X},\mathsf{d},\mathfrak{m})$ is an $\mathrm{RCD}(K,N)$ space. Then the following holds for any $x\in {X}$. \begin{enumerate} \item If $N>1$, $K\neq 0$, $r<R\leqslant \pi\sqrt{\dfrac{N-1}{K\lor 0}}$, then $\dfrac{\mathfrak{m}\left(B_R(x)\right)}{\mathfrak{m}\left(B_r(x)\right)}\leqslant \dfrac{\int_0^R V_{K,N}\mathrm{d}t}{\int_0^r V_{K,N}\mathrm{d}t}$, where \[ V_{K,N}(t):=\left\{ \begin{array}{ll} \sin\left(t\sqrt{K/(N-1)}\right)^{N-1}, &\text{if}\ K>0,\\ \sinh\left(t\sqrt{{}{-K}/(N-1)}\right)^{N-1}, &\text{if}\ K<0. \end{array} \right. \] \item If $N=1$ and $K\leqslant 0$, or $N\in (1,\infty)$ and $K= 0$, then $\dfrac{\mathfrak{m}\left(B_R(x)\right)}{\mathfrak{m}\left(B_r(x)\right)}\leqslant \left(\dfrac{R}{r}\right)^{N}$. \end{enumerate} \end{thm} \begin{remark} (\ref{BGinequality}) and (\ref{BGinequality111}) are direct consequences of Theorem \ref{BGineq}, where (\ref{BGinequality111}) is a combination of (\ref{BGinequality}) and the fact that $B_r(x)\subset B_{r+\mathsf{d}(x,y)}(y)$. \begin{equation}\label{BGinequality} \frac{\mathfrak{m}(B_R(x))}{\mathfrak{m}(B_r(x))}\leqslant C(K,N)\exp\left(C(K,N)\frac{R}{r}\right), \ \ \ \forall x\in {X}, \ \forall r<R. \end{equation} {}{\begin{equation}\label{BGinequality111} \frac{\mathfrak{m}(B_r(x))}{\mathfrak{m}(B_r(y))}\leqslant C(K,N)\exp\left(C(K,N)\mathop{\frac{r+\mathsf{d}(x,y)}{r}}\right), \ \ \ \forall x,y\in {X}, \ \forall r>0. \end{equation}} \end{remark} For an RCD$(K,N)$ space $({X},\mathsf{d},\mathfrak{m})$, the heat flow associated with its {}{Cheeger energy} is defined as ${}{\{\mathrm{h}_t:L^2(\mathfrak{m})\rightarrow L^2(\mathfrak{m})\}_{t>0}}$ such that for any $f \in L^2(\mathfrak{m})$, $\{{}{\mathrm{h}_t} f\}_{t>0}$ satisfies the following properties. \begin{enumerate} \item (Solution to the heat equation) {}{For any $t>0$}, $\text{h}_t f\in D(\Delta)$ and $\dfrac{\partial}{\partial t}\text{h}_t(f)=\Delta {}{ \mathrm{h}_t f} \ \ \text{in}\ L^2(\mathfrak{m})$. \item (Semigroup property) For any $s,t>0$, ${}{\text{h}_{t+s}}f=\text{h}_t ({}{\text{h}_s} f)$. {}{\item (Contraction on $L^2(\mathfrak{m})$) $\left\|\text{h}_t f\right\|_{L^2(\mathfrak{m})}\leqslant \left\|f\right\|_{L^2(\mathfrak{m})},\ \ \forall t>0$. \item (Commutative with $\Delta$) If $f\in D(\Delta)$, then for any $t>0$, $\text{h}_t (\Delta f)=\Delta (\text{h}_t f)$.} \end{enumerate} For any $p\in [1,\infty]$, $\{\text{h}_t\}_{t>0}$ also acts on $L^p(\mathfrak{m})$ as a linear family of contractions, namely \begin{equation}\label{111eqn2.4} \left\|\text{h}_t \varphi\right\|_{L^p(\mathfrak{m})}\leqslant \left\|\varphi\right\|_{L^p(\mathfrak{m})},\ \ \forall t>0,\ \ \forall \varphi\in L^p(\mathfrak{m}). \end{equation} Set $\hat{1}\in L^\infty(\mathfrak{m})$ as (the equivalence class in $\mathfrak{m}$-a.e. sense of) the function on ${X}$ identically equal to 1. It is now worth pointing out the stochastic completeness of RCD$(K,N)$ spaces as follows: \[ {}{\mathrm{h}_t}(\hat{1})\equiv \hat{1},\ \ \forall t>0. \] Sturm's works \cite{St95, St96} guarantee the existence of a locally H\"older continuous {}{representative} $\rho$ on ${X}\times{X}\times (0,\infty)$ of the heat kernel for $({X},\mathsf{d},\mathfrak{m})$. More precisely, the solution to the heat equation can be expressed by using $\rho$ as follows: \[ \text{h}_t(f)=\int_{{X}} \rho(x,y,t)f(y)\mathrm{d}\mathfrak{m}(y),\ \forall f\in L^2(\mathfrak{m}). \] \begin{remark}[Rescaled RCD space] For any RCD$(K,N)$ space $({X},\mathsf{d},\mathfrak{m})$ and any $a,b\in (0,\infty)$, the rescaled space $({X},a\mathsf{d},b\mathfrak{m})$ is an RCD$(a^{-1}K,N)$ space whose heat kernel $\tilde{\rho}$ can be written as $\tilde{\rho}(x,y,t)=b^{-1}\rho(x,y,a^{-2}t)$. \end{remark} The locally H\"older {}{continuity} of the heat kernel on RCD$(K,N)$ spaces is improved to be locally Lipschitz due to the following Jiang-Li-Zhang's \cite{JLZ16} estimates. \begin{thm}\label{thm2.12} Let $({X},\mathsf{d},\mathfrak{m})$ be an $\mathrm{RCD}(K,N)$ space. Given any $\epsilon>0$, there exist positive constants $C_i=C_i(K,N,\epsilon),i=1,2,3,4$ such that the heat kernel $\rho$ {}{satisfies} the following estimates. \[ \frac{1}{C_1}\exp\left({-\frac{\mathsf{d}^2(x,y)}{(4-\epsilon)t}}-C_2t\right)\leqslant \mathfrak{m}\left(B_{\sqrt{t}}(y)\right)\rho(x,y,t) \leqslant C_1\exp\left({-\frac{\mathsf{d}^2(x,y)}{(4+\epsilon)t}}+C_2t\right) \] holds for all $t>0$, and all $x,y\in {X}$ and \[ |\nabla_x \rho(x,y,t)| \leqslant \frac{C_3}{\sqrt{t}\ \mathfrak{m}\left(B_{\sqrt{t}}(x)\right)}\exp\left({-\frac{\mathsf{d}^2(x,y)}{(4+\epsilon)t}}+C_4t\right) \] holds for all $t>0$ and $\mathfrak{m}$-a.e. $x,y\in {X}$. \end{thm} {}{\begin{remark}\label{aaaaarmk2.9} The theories of \cite{D97} are also applicable to RCD$(K,N)$ spaces. In particular, under the assumption of Theorem \ref{thm2.12}, for any $x,y\in {X}$, the function $t\mapsto \rho(x,y,t)$ is analytic. Moreover, for any $n\geqslant 1$, $t\in (0,1)$, and $x,y\in {X}$, the Bishop-Gromov inequality (\ref{BGinequality}), Theorem \ref{thm2.12} and \cite[Theorem 4]{D97} give that, \begin{align}\label{aabbeqn3.7} \left|\frac{\partial^n}{\partial t^n}\rho(x,y,t)\right|\leqslant \frac{C(K,N)n!}{t^n }\left(\mathfrak{m}(B_{\sqrt{t}}(x))\mathfrak{m}(B_{\sqrt{t}}(y))\right)^{-\frac{1}{2}}\exp\left(-\frac{\mathsf{d}^2(x,y)}{100t}\right). \end{align} \end{remark}} For a compact $\mathrm{RCD}(K,N)$ space $({X},\mathsf{d},\mathfrak{m})$, by \cite{J14,JLZ16}, its heat kernel $\rho$ can be expressed as follows. See also \cite[Appendix]{AHPT21}. \begin{equation}\label{heatkernel} \rho(x,y,t)=\sum\limits_{i= 0}^\infty e^{-\mu_i t}\phi_i(x) \phi_i(y) , \end{equation} where eigenvalues of $-\Delta$ counted with multiplicities and the corresponding eigenfunctions are set as follows. \begin{equation}\label{notation2.7} \left\{ \begin{aligned} &0=\mu_0<\mu_1\leqslant \mu_2 \leqslant \cdots \rightarrow +\infty,\\ &-\Delta \phi_i=\mu_i\phi_i,\\ &\{\phi_i\}_{i\in \mathbb{N}}: \text{an orthonormal basis of $L^2(\mathfrak{m})$}. \end{aligned} \right. \end{equation} We may use (\ref{notation2.7}) in Proposition \ref{heatkernel2}, Proposition \ref{1prop2.23} without explanation. The following estimates can be obtained by the Gaussian estimates (Theorem \ref{thm2.12}) and {}{are} useful in this paper. See \cite[Appendix]{AHPT21} and \cite{ZZ19}. \begin{prop}\label{heatkernel2} Let $({X},\mathsf{d},\mathfrak{m})$ be a compact $\mathrm{RCD}(K,N)$ space with $\mathfrak{m}({X})=1$, then there exist $C_j=C_j(K,N,\mathrm{diam}({X},\mathsf{d})) $ $(j=5,6)$, such that for {}{all} $i\geqslant 1$, \[ \left\|\phi_i\right\|_{L^\infty(\mathfrak{m})}\leqslant C_5\mu_i^{N/4},\ \ \ \ \left\|\left|\nabla \phi_i\right|\right\|_{L^\infty(\mathfrak{m})}\leqslant C_5\mu_i^{(N+2)/4},\ \ \ \ C_6 i^{2/N}\leqslant \mu_i\leqslant C_5 i^2. \] \end{prop} The rest of this subsection is based on \cite{GH18,GR20}. We first introduce some basic knowledge of the Euclidean cone over metric measure spaces. Then the background of the product space of metric measure spaces follows. \begin{defn}[Euclidean cone as a metric measure space] Let $({X},\mathsf{d},\mathfrak{m})$ be an RCD$(N-2,N-1)$ space with $N\geqslant 2$. We define the Euclidean cone over $({X},\mathsf{d},\mathfrak{m})$ as the metric measure space $\left(\text{C}({X}),\mathsf{d}_{\text{C}({X})},\mathfrak{m}_{\text{C}({X})}\right)$ as follows. \begin{enumerate} \item The space $\mathrm{C}({X})$ is defined as $\text{C}({X}):= [0,\infty)\times {X}/\left(\{ 0\}\times{X}\right)$. The origin is denoted by $o^\ast$. \item For any two points $(r,x)$ and $(s,y)$, the distance between them is defined as \[ \mathsf{d}_{\text{C}({X})}\left((r,x),(s,y)\right):=\sqrt{r^2+s^2-2rs \cos\left(\mathsf{d}(x,y)\right)}. \] \item The measure of $\mathrm{C}({X})$ is defined as {}{$\mathrm{d}\mathfrak{m}_{\text{C}({X})}(r,x)=r^{N-1}\mathrm{d}r\otimes \mathrm{d}\mathfrak{m}(x)$.} \end{enumerate} \end{defn} \begin{remark}\label{rmk2.10} If $({X},\mathsf{d},\mathfrak{m})$ is an RCD$(N-2,N-1)$ space, then it has an upper diameter bound $\pi$ due to {}{\cite[Theorem 4.3]{O07}}. In addition, by \cite[Theorem 1.1]{K15a}, $\left(\text{C}({X}),\mathsf{d}_{\text{C}({X})},\mathfrak{m}_{\text{C}({X})}\right)$ is an RCD$(0,N)$ space {}{if and only if} $({X},\mathsf{d},\mathfrak{m})$ is an RCD$(N-2,N-1)$ space. \end{remark} By \cite[Definition 3.8, Proposition 3.12]{GH18}, for any $f\in H^{1,2}\left(\text{C}({X}),\mathsf{d}_{\text{C}({X})},\mathfrak{m}_{\text{C}({X})}\right)$, it holds that \[ \left(f^{(x)}:r\longmapsto f(r,x)\right)\in H^{1,2}(\mathbb{R},\mathsf{d}_\mathbb{R},{}{r^{N-1}}\mathcal{L}^1), \ \ \mathfrak{m}\text{-a.e.}\ x\in {X}, \] \[ \left(f^{(r)}:x\longmapsto f(r,x)\right)\in H^{1,2}({X},\mathsf{d},\mathfrak{m}),\ \ \ \ {}{r^{N-1}}\mathcal{L}^1\text{-a.e.}\ r\in \mathbb{R}, \] and $\left|\nabla f\right|^2_{\text{C}({X})}$ can be written as \[ \left|\nabla f\right|^2_{\text{C}({X})}(r,x)=\left|\nabla f^{(x)}\right|^2_{\mathbb{R}}(r)+\frac{1}{r^2}\left|\nabla f^{(r)}\right|^2_{{X}}(x) \ \text{$\mathfrak{m}_{\text{C}({X})}$-a.e.}\ (r,x)\in \text{C}({X}). \] Thus for any $f_1, f_2 \in H^{1,2}\left(\text{C}({X}),\mathsf{d}_{\text{C}({X})},\mathfrak{m}_{\text{C}({X})}\right)$, it can be readily checked that for $\text{$\mathfrak{m}_{\text{C}({X})}$-a.e.}\ (r,x)\in \text{C}({X})$, \begin{equation}\label{neiji1} \left\langle \nabla f_1 ,\nabla f_2 \right\rangle_{\text{C}({X})}(r,x)= \left\langle \nabla f_1^{(x)},\nabla f_2^{(x)}\right\rangle_{\mathbb{R}}(r)+\frac{1}{r^2}\left\langle \nabla f_1^{(r)},\nabla f_2^{(r)}\right\rangle_{{X}}(x). \end{equation} In addition, the heat kernel $\rho^{\text{C}({X})}$ on $\left(\text{C}({X}),\mathsf{d}_{\text{C}({X})},\mathfrak{m}_{\text{C}({X})}\right)$ has the following explicit expression as {}{ \cite[Theorem 6.20]{D02}}. \begin{prop}\label{1prop2.23} Let $({X},\mathsf{d},\mathfrak{m})$ be a compact $\mathrm{RCD}(N-2,N-1)$ space with $N\geqslant 3$. Let $\alpha=(2-N)/2$, $\nu_j=\sqrt{\alpha^2+\mu_j}$ for $j\in \mathbb{N}$. Then $\rho^{\text{C}({X})}$ can be written as follows: \begin{equation}\label{Ding} \rho^{\text{C}({X})}\left((r_1,x_1),(r_2,x_2),t\right)=(r_1 r_2)^\alpha \sum\limits_{j=0}^\infty \frac{1}{2t} \exp\left(-\frac{r_1^2+r_2^2}{4t}\right)I_{\nu_j}\left(\frac{r_1 r_2}{2t}\right) \phi_j(x_1)\phi_j(x_2). \end{equation} Here $I_{\nu}$ is a modified Bessel function defined by \begin{equation}\label{Bessel} I_{\nu}(z)=\sum\limits_{k=0}^\infty \frac{1}{k! \Gamma(\nu+k+1)}\left(\frac{z}{2}\right)^{2k+\nu}. \end{equation} \end{prop} \begin{proof} We claim that for any $f\in C_c(\mathrm{C}({X}))$, by using $\rho^{\mathrm{C}({X})}$ defined in (\ref{Ding}), ${}{\mathrm{h}_t} f$ can be expressed as follows. \begin{equation}\label{1111eqn2.11} {}{\mathrm{h}_t} f(r_1,x_1)=\int_{\mathrm{C}({X})}\rho^{\mathrm{C}({X})}((r_1,x_1),(r_2,x_2),t)f(r_2,x_2) \mathrm{d}\mathfrak{m}_{\mathrm{C}({X})}(r_2,x_2). \end{equation} Then we are done by combining (\ref{111eqn2.4}) and the fact that $C_c(\text{C}({X}))$ is dense in $L^2\left(\mathfrak{m}_{\text{C}({X})}\right)$. To show (\ref{1111eqn2.11}), {}{we first set} $u_i(r)=\int_{X} f(r,x)\phi_i(x)\mathrm{d}\mathfrak{m}(x)$ $(i=0,1,\cdots)$. For any $r\in (0,\infty)$, since $f^{(r)}$ is continuous, by Parseval's identity we have \[ {}{\sum\limits_{i=0}^\infty u_i^2(r)=\int_{X}\sum\limits_{i=0}^\infty u_i^2(r)\phi_i^2(x)\mathrm{d}\mathfrak{m}(x)= \int_{X} f^2(r,x)\mathrm{d}\mathfrak{m}(x).} \] {}{Letting} $f_k(r):=\sum\limits_{i=0}^k r^{N-1}u_i^2(r)$, and using the dominated convergence theorem, we get \[ \lim\limits_{k\rightarrow \infty}\int_{(0,\infty)} f_k(r)\mathrm{d}r=\int_{(0,\infty)}\int_{X} r^{N-1} f^2(r,x)\mathrm{d}\mathfrak{m}(x)\mathrm{d}r. \] This yields \[ \begin{aligned} \ &\lim\limits_{k\rightarrow \infty}\int_{\mathrm{C}({X})}\left(f(r,x)-\sum\limits_{i=0}^k u_i(r)\phi_i(x) \right)^2\mathrm{d}\mathfrak{m}_{\mathrm{C}({X})}(r,x)\\ =&\lim\limits_{k\rightarrow \infty}\left(\int_{(0,\infty)}\int_{X} r^{N-1} f^2(r,x)\mathrm{d}\mathfrak{m}(x)\mathrm{d}r-\int_{(0,\infty)} f_k(r)\mathrm{d}r\right)=0. \end{aligned} \] Therefore $f(r,x)=\sum\limits_{i=0}^\infty u_i(r)\phi_i(x) $ {}{for $\mathfrak{m}_{\mathrm{C}({X})}$-a.e. $(r,x)\in \mathrm{C}({X})$}. Applying the separation of variables in classical ways like \cite[Chapter 8]{Ta96}, we complete the proof of (\ref{1111eqn2.11}). \end{proof} \begin{defn}[Cartesian product as a metric measure space]\label{cp1} {}{Let $({X},\mathsf{d}_{X},\mathfrak{m}_{X})$, $({Y},\mathsf{d}_{Y},\mathfrak{m}_{Y})$ be two metric measure spaces. The product metric measure space $({X}\times {Y} ,\mathsf{d}_{{X}\times {Y} }, \mathfrak{m}_{{X}\times {Y} })$} is defined as the product space ${X}\times {Y} $ equipped with the distance \[ \mathsf{d}_{{X}\times {Y} }\left((x_1,y_1),(x_2,y_2)\right)=\sqrt{\mathsf{d}_{X}^2(x_1,x_2)+\mathsf{d}_{Y}^2(y_1,y_2)},\ \ \forall (x_1,y_1),(x_2,y_2)\in {X}\times {Y}, \] and the measure {}{$\mathrm{d} \mathfrak{m}_{{X}\times {Y} }:=\mathrm{d}\mathfrak{m}_{X} \otimes \mathrm{d}\mathfrak{m}_{Y}$.} \end{defn} Since \cite[Proposition 4.1]{GR20} applies for RCD$(K,\infty)$ spaces, for any $f\in H^{1,2}\left({X}\times {Y} ,\mathsf{d}_{{X}\times {Y} }, \mathfrak{m}_{{X}\times {Y} }\right)$, it holds that \[ \left(f^{(x)}:y\longmapsto f(x,y)\right)\in H^{1,2}({Y},\mathsf{d}_{Y},\mathfrak{m}_{Y}),\ \mathfrak{m}_{X}\text{-a.e.}\ x\in{X}{}{,} \] \[ \left(f^{(y)}:x\longmapsto f(x,y)\right)\in H^{1,2}({X},\mathsf{d}_{X},\mathfrak{m}_{X}),\ \mathfrak{m}_{Y}\text{-a.e.}\ y\in{Y}{}{,} \] and $|\nabla f|^2_{{X}\times {Y} }$ can be expressed as \begin{equation}\label{2.27} \left|\nabla f\right|^2_{{X}\times {Y} }(x,y)=\left|\nabla f^{(y)}\right|^2_{{X}}(x)+\left|\nabla f^{(x)}\right|^2_{{Y}}(y), \text{ $\mathfrak{m}_{{X}\times {Y} }$-a.e. }(x,y)\in {X}\times {Y}. \end{equation} Thus for any $f_1, f_2 \in H^{1,2}\left({X}\times {Y} ,\mathsf{d}_{{X}\times {Y} }, \mathfrak{m}_{{X}\times {Y} }\right)$, we have the following for $\text{ $\mathfrak{m}_{{X}\times {Y} }$-a.e. }(x,y)\in {X}\times {Y}$: \begin{equation}\label{1234eqn2.9} \left\langle \nabla f_1 ,\nabla f_2 \right\rangle_{{X}\times {Y} }(x,y)= \left\langle \nabla f_1^{(y)},\nabla f_2^{(y)}\right\rangle_{{X}}(x)+\left\langle \nabla f_1^{(x)},\nabla f_2^{(x)}\right\rangle_{{Y}}(y). \end{equation} It also follows from \cite[Corollary 4.2]{GR20} that for any $f\in L^2(\mathfrak{m}_{{X}\times {Y} })$, \[ \text{h}_t^{{X}\times {Y} }f=\text{h}_t^{X} \left(\text{h}_t^{Y} f^{(x)}\right)=\text{h}_t^{Y} \left(\text{h}_t^{X} f^{(y)}\right). \] As a result, $\rho^{{X}\times {Y} }$ has an explicit expression as follows.{}{ \begin{equation}\label{eqn2.1} \rho^{{X}\times {Y} }((x_1,y_1),(x_2,y_2),t)=\rho^{X}(x_1,x_2,t) \rho^{Y}(y_1,y_2,t). \end{equation}} \subsection{First and second order calculus on RCD($K,N$) spaces} This subsection is based on \cite{G18}. We assume that $({X},\mathsf{d},\mathfrak{m})$ is an RCD($K,N$) space in this subsection. \begin{defn}[$L^p$-normed $L^\infty$-module]\label{module} For any $p\in [1,\infty]$, a quadruplet $\left(\mathscr{M},\left\|\cdot\right\|_{\mathscr{M}},\cdot,|\cdot|\right)$ is said to be an $L^p$-normed $L^\infty$-module if it satisfies the following conditions. \begin{enumerate} \item The normed vector space $\left(\mathscr{M},\left\|\cdot\right\|_{\mathscr{M}}\right)$ is a Banach space. \item The multiplication by $L^\infty$-functions $\cdot:L^\infty(\mathfrak{m})\times\mathscr{M}\rightarrow \mathscr{M}$ is a bilinear map such that for every $ f,h\in L^\infty(\mathfrak{m})$ and every $v\in\mathscr{M}$, it holds that \[ f\cdot (h\cdot v)=(fh)\cdot v, \ \ \hat{1}\cdot v=v. \] \item The pointwise norm $|\cdot|:\mathscr{M}\rightarrow L^p(\mathfrak{m})$ satisfies that for every $ f\in L^\infty(\mathfrak{m})$ and every $v\in\mathscr{M}$, it holds that \[ |v|\geqslant 0,\ |f\cdot v|=|f\|v|\ \ \mathfrak{m}\text{-a.e.},\ \text{and}\ \ \|v\|_\mathscr{M}=\left\||v|\right\|_{L^p(\mathfrak{m})}. \] \end{enumerate} In particular, $\left(\mathscr{M},\left\|\cdot\right\|_{\mathscr{M}},\cdot,|\cdot|\right)$ is said briefly to be a module when $p=2$. \end{defn} \begin{remark} The homogeneity and subadditivity of $|\cdot|$ follows directly from Definition \ref{module}. Write $fv$ instead of $f\cdot v$ later on for simplicity. \end{remark} To construct the cotangent module, the first step is to define a pre-cotangent module $\mathsf{Pcm}$. Elements of $\mathsf{Pcm}$ are of the form $\left\{ (E_i ,f_i )\right\}_{i=1}^n$ where $\left\{E_i\right\}_{i=1}^n$ is some Borel partition of ${X}$ and $\left\{f_i\right\}_{i=1}^n\subset H^{1,2}({X},\mathsf{d},\mathfrak{m})$. Secondly, define an equivalence relation on $\mathsf{Pcm}$ as follows. \[ \left\{(E_i,f_i)\right\}_{i=1}^n\sim \left\{(F_i,h_i)\right\}_{j=1}^m \text{{}{if and only if for any}}\ i,j, \ |\text{D}f_i|=|\text{D}h_j| \text{ holds $\mathfrak{m}$-a.e. on $E_i\cap F_j$}. \] Denote by $\left[E_i,f_i\right]_i$ the equivalence class of $\left\{(E_i,f_i)\right\}_{i=1}^n$ and by $\chi_E$ the characteristic function of $E$ for any Borel set $E\subset {X}$. With the help of the locality of minimal relaxed slopes, the following operations on the quotient $\mathsf{Pcm}/\sim$ are well-defined: \[ \begin{aligned} \left[E_i,f_i\right]_i+\left[F_j,g_j\right]_j&:=\left[E_i\cap F_j,f_i+g_j\right]_{i,j},\\ \alpha \left[E_i,f_i\right]_i&:=\left[E_i,\alpha f_i\right]_i,\\ \left(\sum\limits_j \alpha_j \chi_{F_j}\right)\cdot \left[E_i,f_i\right]_i&:=\left[E_i\cap F_j,\alpha_j f_i\right]_{i,j},\\ \left|\left[E_i,f_i\right]_i\right|&:=\sum\limits_i \chi_{E_i}|\text{D}f_i|\ \mathfrak{m}\text{-a.e. in }{X},\\ \left\|\left[E_i,f_i\right]_i\right\|&:=\left\|\left|[E_i,f_i]_i\right|\right\|_{L^2(\mathfrak{m})}=\left(\sum\limits_i \int_{E_i}|\text{D}f_i|^2\mathrm{d}\mathfrak{m}\right)^{\frac{1}{2}}. \end{aligned} \] Let $\left(L^2(T^\ast ({X},\mathsf{d},\mathfrak{m})),\|\cdot\|_{L^2(T^\ast ({X},\mathsf{d},\mathfrak{m}))}\right)$ be the completion of $\left(\mathsf{Pcm}/\sim,\left\|\cdot\right\|\right)$. The multiplication $\cdot$ and the pointwise norm $|\cdot|$ in Definition \ref{module} can be continuously extended to \[ \begin{aligned} \cdot&:L^\infty(\mathfrak{m})\times L^2(T^\ast ({X},\mathsf{d},\mathfrak{m}))\rightarrow L^2(T^\ast ({X},\mathsf{d},\mathfrak{m})),\\ |\cdot|&: L^2(T^\ast ({X},\mathsf{d},\mathfrak{m}))\rightarrow L^2(\mathfrak{m}).\\ \end{aligned} \] Then the construction of the module $\left(L^2(T^\ast ({X},\mathsf{d},\mathfrak{m})),\left\|\cdot\right\|_{L^2(T^\ast ({X},\mathsf{d},\mathfrak{m}))}, \cdot ,|\cdot|\right)$ is completed. {}{We write $L^2(T^\ast ({X},\mathsf{d},\mathfrak{m}))$ for short if no ambiguity is caused.} \begin{thm}[Uniqueness of cotangent module] There is a unique couple $\left(L^2(T^\ast ({X},\mathsf{d},\mathfrak{m})),d\right)$, where $L^2(T^\ast ({X},\mathsf{d},\mathfrak{m}))$ is a module and $d:H^{1,2}({X},\mathsf{d},\mathfrak{m})\rightarrow L^2(T^\ast ({X},\mathsf{d},\mathfrak{m}))$ is a linear operator such that $|df|=|\mathrm{D}f|$ holds $\mathfrak{m}$-a.e. for every $f\in H^{1,2}({X},\mathsf{d},\mathfrak{m})$. Uniqueness is intended up to unique isomorphism: if another couple $(\mathscr{M},d')$ satisfies the same properties, then there exists a unique module isomorphism $\zeta:L^2(T^\ast ({X},\mathsf{d},\mathfrak{m}))\rightarrow \mathscr{M}$ such that $\zeta\circ d=d'$. \end{thm} In this paper, $L^2\left(T^\ast({X},\mathsf{d},\mathfrak{m})\right)$ and $d$ are called the cotangent module and the differential respectively. Elements of $L^2\left(T^\ast({X},\mathsf{d},\mathfrak{m})\right)$ are called 1-forms. Likewise, the tangent module $L^2(T({X},\mathsf{d},\mathfrak{m}))$ can be defined as a module generated by $\{\nabla f :\ f\in H^{1,2} ({X},\mathsf{d},\mathfrak{m})\}$, where $\nabla f$ satisfies that \[ dh(\nabla f)=\langle \nabla h,\nabla f\rangle\ \ \mathfrak{m}\text{-a.e.}, \ \ \forall\ h\in H^{1,2}({X},\mathsf{d},\mathfrak{m}). \] $L^2(T({X},\mathsf{d},\mathfrak{m}))$ is the dual module of $L^2(T^\ast ({X},\mathsf{d},\mathfrak{m}))$, and its elements are called vector fields. Let us recall the construction of the tensor product of $L^2(T^\ast ({X},\mathsf{d},\mathfrak{m}))$ with itself in \cite{G18}. For any $f\in L^\infty(\mathfrak{m}),f_1,f_2\in \mathrm{Test}F\left({X},\mathsf{d},\mathfrak{m}\right)$, the tensor $f d f_1\otimes d f_2$ is defined as \[ f d f_1\otimes d f_2(\eta_1,\eta_2):=f df_1(\eta_1) df_2(\eta_2), \ \forall \eta_1,\eta_2\in L^2(T({X},\mathsf{d},\mathfrak{m})). \] Set \[ \text{Test}(T^\ast)^{\otimes 2}({X},\mathsf{d},\mathfrak{m}):=\left\{ \sum\limits_{i=1}^k f_{1,i}df_{2,i}\otimes d f_{3,i}:\ k\in \mathbb{N},f_{j,i}\in \mathrm{Test}F\left({X},\mathsf{d},\mathfrak{m}\right)\right\}. \] and define the $L^\infty(\mathfrak{m})$-bilinear norm \[ \left\langle\cdot ,\cdot \right\rangle: \text{Test}(T^\ast)^{\otimes 2}({X},\mathsf{d},\mathfrak{m})\times \text{Test}(T^\ast)^{\otimes 2}({X},\mathsf{d},\mathfrak{m}) \rightarrow L^2(\mathfrak{m}) \] as \[ \langle d f_1\otimes d f_2,df_3\otimes d f_4\rangle:= \langle \nabla f_1,\nabla f_3\rangle \langle \nabla f_2,\nabla f_4\rangle, \ \forall f_i\in \mathrm{Test}F\left({X},\mathsf{d},\mathfrak{m}\right)\ (i=1,2,3,4). \] {}{The pointwise Hilbert-Schmidt norm is then defined as \[ \begin{aligned} \left|\cdot\right|_{\mathsf{HS}}:\text{Test}(T^\ast)^{\otimes 2}({X},\mathsf{d},\mathfrak{m})&\longrightarrow L^2(\mathfrak{m})\\ A&\longmapsto |A|_{\mathsf{HS}}:=\sqrt{\langle A,A\rangle}. \end{aligned} \] } For any $p\in [1,\infty]$, adapting a similar continuous extension procedure of $\text{Test}(T^\ast)^{\otimes 2}({X},\mathsf{d},\mathfrak{m})$ with respect to the norm $\left\|\left|\cdot\right|_{\mathsf{HS}}\right\|_{L^p(\mathfrak{m})}$ gives a construction of the $L^p$-normed $L^\infty$-module $L^p((T^\ast)^{\otimes 2}({X},\mathsf{d},\mathfrak{m}))$. In addition, denote by $L^p_{\text{loc}}(T^\ast({X},\mathsf{d},\mathfrak{m}))$ the collection of 1-forms $\omega$ with $|\omega|\in L^p_{\text{loc}}(\mathfrak{m})$. Here $L^p_{\mathrm{loc}}(\mathfrak{m})$ is the set of all functions $f$ such that $f\in L^p\left(B_R(x),\mathfrak{m}\right)$ for any $B_R(x)\subset {X}$. Similarly for other vector fields and other tensors. The end of this subsection is {}{aimed at recalling} definitions of two kinds of tensor fields. \begin{thm}[The Hessian \cite{G18}] For any $f\in \mathrm{Test}F\left({X},\mathsf{d},\mathfrak{m}\right)$, there exists a unique $T\in L^2\left((T^\ast)^{\otimes 2}({X},\mathsf{d},\mathfrak{m})\right)$, called the Hessian of $f$, denoted by $ \mathop{\mathrm{Hess}}f$, such that for all $f_i\in \mathrm{Test}F\left({X},\mathsf{d},\mathfrak{m}\right)$ $(i=1,2)$, \begin{equation} {}{2T(\nabla f_1,\nabla f_2)= \langle \nabla f_1,\nabla\langle \nabla f_2,\nabla f\rangle\rangle +\langle \nabla f_2,\nabla\langle \nabla f_1,\nabla f\rangle\rangle-\langle \nabla f,\nabla\langle \nabla f_1,\nabla f_2\rangle\rangle } \end{equation} holds for $\mathfrak{m}$-a.e. $x\in {X}$. Moreover, the following holds for any $f\in \mathrm{Test}F\left({X},\mathsf{d},\mathfrak{m}\right)$, $\varphi\in \mathrm{Test}F_+({X},\mathsf{d},\mathfrak{m})$. \begin{equation}\label{abc2.14} \frac{1}{2}\int_{X} \Delta \varphi \cdot |\nabla f|^2\mathrm{d}\mathfrak{m}\geqslant \int_{X}\varphi \left(|\mathop{\mathrm{Hess}}f|_{\mathsf{HS}}^2+ \langle \nabla \Delta f,\nabla f\rangle+K|\nabla f|^2\right) \mathrm{d}\mathfrak{m}. \end{equation} \end{thm} Since $\mathrm{Test}F({X},\mathsf{d},\mathfrak{m})$ is dense in $D(\Delta)$, $\mathop{\mathrm{Hess}}f\in L^2\left((T^\ast)^{\otimes 2}({X},\mathsf{d},\mathfrak{m})\right)$ is well-defined for any $f\in D(\Delta)$. In addition, if $f_i\in \mathrm{Test}F\left({X},\mathsf{d},\mathfrak{m}\right)$ $(i=1,2)$, then $\langle \nabla f_1,\nabla f_2 \rangle\in H^{1,2}({X},\mathsf{d},\mathfrak{m})$, and the following holds for any $ \varphi\in H^{1,2}({X},\mathsf{d},\mathfrak{m})$. \begin{equation}\label{11eqn2.16} \langle \nabla \varphi, \nabla \langle \nabla f_1,\nabla f_2 \rangle \rangle= \mathop{\mathrm{Hess}}f_1\left(\nabla f_2,\nabla\varphi\right)+ \mathop{\mathrm{Hess}}f_2\left(\nabla f_1,\nabla\varphi\right) \ \ \mathfrak{m}\text{-a.e.} \end{equation} \begin{defn}[The Riemannian metric] A tensor field $\bar{g}\in L^\infty_\text{loc}((T^\ast)^{\otimes 2}({X},\mathsf{d},\mathfrak{m}))$ is said to be a (resp. semi) Riemannian metric on $({X},\mathsf{d},\mathfrak{m})$ if it satisfies the following properties. \begin{enumerate} \item (Symmetry) $\bar{g}(V,W)=\bar{g}(W,V)$ $\mathfrak{m}$-a.e. for any $V,W\in L^2(T({X},\mathsf{d},\mathfrak{m}))$. \item (Non (resp. {}{Non semi-}) degeneracy) For any $V\in L^2(T({X},\mathsf{d},\mathfrak{m}))$, it holds that \[ \bar{g}\left(V,V\right)>0\ \ (\text{resp.}\ \bar{g}\left(V,V\right)\geqslant 0) \ \ \mathfrak{m}\text{-a.e. on}\ \left\{|V|>0\right\}. \] \end{enumerate} \end{defn} \subsection{Convergence of RCD$(K,N)$ spaces} For a sequence of pointed RCD$(K,N)$ spaces $({X}_i,\mathsf{d}_i,\mathfrak{m}_i,x_i)$, the equivalence between pointed measured Gromov Hausdorff (pmGH) convergence and pointed measured Gromov (pmG) convergence is established in \cite{GMS13}. We only introduce the definition of pmGH convergence and a precompactness theorem of a sequence of pointed RCD$(K,N)$ spaces. It is remarkable that for compact metric measure spaces there is a more convenient convergence named measured Gromov-Hausdorff (mGH) convergence (see \cite{F87}). \begin{defn}[Pointed measured Gromov-Hausdorff (pmGH) convergence]\label{1defn2.5} A sequence of pointed metric measure spaces $\{({X}_i,\mathsf{d}_i,\mathfrak{m}_i,x_i)\}$ is said to be convergent to a pointed metric measure space $ ({X}_\infty,\mathsf{d}_\infty,\mathfrak{m}_\infty,x_\infty)$ in the pointed measured Gromov-Hausdorff (pmGH) sense, if there {}{exists} a complete separable metric space $({Y},\mathsf{d}_{Y})$ and a sequence of isometric embeddings $\{\iota_i:{X}_i\rightarrow {Y}\}_{i\in \mathbb{N}\cup \{\infty\}}$, such that \begin{enumerate} \item $\mathsf{d}_{Y}(\iota_i(x_i), \iota_\infty(x_\infty))\rightarrow 0${}{,} \item for any $R,\epsilon>0$, there exists $N>0$, such that for any $i>N$, we have $\iota_\infty\left(B_R^{{X}_\infty}(x_\infty)\right)\subset B^{Y}_\epsilon \left(\iota_i\left(B_R^{{X}_i}(x_i)\right)\right) $ and {}{$\iota_i\left(B_R^{{X}_i}(x_i)\right)\subset B^{Y}_\epsilon \left(\iota_\infty\left(B_R^{{X}_\infty}(x_\infty)\right)\right) $,} \item for every {}{$f\in C_{c}({Y})$}, $\lim\limits_{i\rightarrow \infty}\int_{Y}f \mathrm{d}(\iota_i)_\sharp \mathfrak{m}_i= \int_{Y} f \mathrm{d}(\iota_\infty)_\sharp \mathfrak{m}_\infty$. \end{enumerate} In particular, we say that $ X_i\ni x_i'\rightarrow x_\infty'\in X_\infty$ if $\mathsf{d}_{Y}\left(\iota_i(x_i'), \iota_\infty(x_\infty')\right)\rightarrow 0$. \end{defn} \begin{defn}[Measured Gromov-Hausdorff convergence] Let $\{ ({X}_i,\mathsf{d}_i,\mathfrak{m}_i)\}$ be a sequence of compact metric measure spaces with {}{$\sup_i \mathrm{diam}({X}_i,\mathsf{d}_i)<\infty$}. Then $\{ ({X}_i,\mathsf{d}_i,\mathfrak{m}_i)\}$ is said to be convergent to a metric measure space $({X}_\infty,\mathsf{d}_\infty,\mathfrak{m}_\infty)$ in the measured Gromov-Hausdorff (mGH) sense if there exists a sequence of points $\{x_i\in {X}_i\}_{i\in \mathbb{N}\cup \{\infty\}}$, such that \[ ({X}_i,\mathsf{d}_i,\mathfrak{m}_i,x_i)\xrightarrow{\mathrm{pmGH}}({X}_\infty,\mathsf{d}_\infty,\mathfrak{m}_\infty,x_\infty). \] \end{defn} \begin{thm}[Precompactness of pointed RCD$(K,N)$ spaces under pmGH convergence \cite{GMS13}]\label{sta} Let $\left\{({X}_i,\mathsf{d}_i,\mathfrak{m}_i,x_i)\right\}$ be a sequence of pointed $\mathrm{RCD}(K,N)$ spaces such that \[ 0<\liminf\limits_{i\rightarrow \infty} \mathfrak{m}_i\left(B_1^{{X}_i}(x_i)\right)<\limsup\limits_{i\rightarrow \infty} \mathfrak{m}_i\left(B_1^{{X}_i}(x_i)\right)<\infty. \] Then there exists a subsequence $\left\{\left({X}_{i(j)},\mathsf{d}_{i(j)},\mathfrak{m}_{i(j)},x_{i(j)}\right)\right\}$, such that it $\mathrm{pmGH}$ converges to a pointed $\mathrm{RCD}(K,N)$ space $({X}_\infty,\mathsf{d}_\infty,\mathfrak{m}_\infty,x_\infty)$. \end{thm} {}{Especially, non-collapsed pmGH convergent sequences of non-collapsed RCD$(K,N)$ spaces preserve the Hausdorff measure.} \begin{thm}[Continuity of Hausdorff measure {\cite[Theorem 1.3]{DG18}}]\label{11thm2.15} If a sequence of pointed non-collapsed $\mathrm{RCD}(K,N)$ spaces $\left\{\left({X}_i,\mathsf{d}_i,\mathcal{H}^N,x_i\right)\right\}$ $\mathrm{pmGH}$ converges to a pointed $\mathrm{RCD}(K,N)$ space $ ({X}_\infty,\mathsf{d}_\infty,\mathfrak{m}_\infty,x_\infty)$ and satisfies $\inf_i \mathcal{H}^N\left(B_1^{{X}_i}(x_i)\right)>0$, then $\mathfrak{m}_\infty=\mathcal{H}^N$. \end{thm} It is also worth recalling the following definition. \begin{defn}[Regular set]\label{111def2.18} Let $({X},\mathsf{d},\mathfrak{m})$ be an RCD$(K,N)$ space. Given any integer $k\in [1,N]$, the $k$-dimensional regular set $\mathcal{R}_k:=\mathcal{R}_k({X})$ of ${X}$ is defined as the set of all points of $x$ such that \[ \left({X},\frac{1}{r_i}\mathsf{d},\frac{\mathfrak{m}}{\mathfrak{m}(B_{r_i}(x))},x\right)\xrightarrow{\mathrm{pmGH}} \left(\mathbb{R}^k,\mathsf{d}_{\mathbb{R}^k},\frac{1}{\omega_k}\mathcal{L}^k,0_k\right)\ \ \forall\{ r_i \}\subset (0,\infty)\ \text{with}\ r_i \rightarrow 0. \] \end{defn} It is time to introduce the definition of the essential dimension of RCD spaces. Compare \cite{CN12}. \begin{thm}[Essential dimension \cite{BS20}]\label{1111thm2.22} Let $({X},\mathsf{d},\mathfrak{m})$ be an $\mathrm{RCD}(K,N)$ space. Then there exists a unique $n\in \mathbb{N}\cap [1,N]$ such that $\mathfrak{m}({X}\setminus \mathcal{R}_n)=0$. The essential dimension $\mathrm{dim}_{\mathsf{d},\mathfrak{m}}({X})$ of $({X},\mathsf{d},\mathfrak{m})$ is defined as this $n$. \end{thm} \begin{remark}{}{Under the assumption of Theorem \ref{1111thm2.22}, for any $m\in \mathbb{N}_+$, define the Bishop-Gromov density of $(X,\mathsf{d},\mathfrak{m})$ as \[ \begin{aligned} \vartheta_m({X},\mathsf{d},\mathfrak{m}) :{X}&\longrightarrow [0,\infty]\\ x&\longmapsto \left\{\begin{aligned}\lim\limits_{r\rightarrow 0} \frac{\mathfrak{m}(B_r(x))}{\omega_m r^m},&\ \ \text{ if it exists,}\\ \infty, &\ \ \text{ otherwise.} \end{aligned} \right. \end{aligned} \] } The measure $\mathfrak{m}$ then can be represented as $\vartheta_n({X},\mathsf{d},\mathfrak{m})(x) \mathcal{H}^n\llcorner\mathcal{R}_n$. Moreover, $\mathfrak{m}(\mathcal{R}_n\setminus \mathcal{R}_n^\ast)=0$, where $\mathcal{R}_n^\ast:=\left\{x\in \mathcal{R}_n: \vartheta_n({X},\mathsf{d},\mathfrak{m})\in (0,\infty)\right\}$. See \cite{AHT18}. \end{remark} In particular, for non-collapsed RCD$(K,N)$ spaces, the following statement holds. \begin{thm}[Bishop inequality {\cite[Corollary 1.7]{DG18}}]\label{1111thm2.20} Let $({X},\mathsf{d},\mathcal{H}^N)$ be a non-collapsed $\mathrm{RCD}(K,N)$ space. Then $\mathrm{dim}_{\mathsf{d},\mathcal{H}^N}(X)=N\in \mathbb{N}$, and $\vartheta_N({X},\mathsf{d},\mathcal{H}^N)\leqslant 1$ holds for any $x\in {X}$. Moreover, the equality holds {}{if and only if} $x\in \mathcal{R}_N$. \end{thm} Given an RCD$(K,N)$ space $({X},\mathsf{d},\mathfrak{m})$, there is a canonical Riemannian metric $g$ in the following sense. \begin{thm}[The canonical Riemannian metric \cite{GP16, AHPT21}]\label{111thm2.21} There exists a unique Riemannian metric $g$ such that for any $f_1,f_2 \in H^{1,2}({X},\mathsf{d},\mathfrak{m})$, it holds that \[ g\left(\nabla f_1,\nabla f_2\right)=\left\langle \nabla f_1,\nabla f_2\right\rangle\ \ \text{$\mathfrak{m}$-a.e. in ${X}$}. \] Moreover, $\left|g\right|_{\mathsf{HS}}=\sqrt{\mathrm{dim}_{\mathsf{d},\mathfrak{m}}({X})}$ $\mathfrak{m}$-a.e. in ${X}$. \end{thm} Let us use this canonical Riemannian metric to define the trace {}{as \[ \begin{aligned} \mathrm{Tr}: L^2_{\text{loc}}\left((T^\ast)^{\otimes 2}({X},\mathsf{d},\mathfrak{m})\right)&\longrightarrow L^2_{\text{loc}}\left((T^\ast)^{\otimes 2}({X},\mathsf{d},\mathfrak{m})\right)\\ T&\longmapsto \langle T,g\rangle. \end{aligned} \] } {}{The convergence of functions and tensor fields on pmGH convergent pointed RCD$(K,N)$ spaces are also well-defined} as in \cite{GMS13}, \cite[Definition 1.1]{H15} and \cite{AH17,AST16}. In the rest of this subsection, we assume that $({X}_i,\mathsf{d}_i,\mathfrak{m}_i,x_i)\xrightarrow{\mathrm{pmGH}}({X}_\infty,\mathsf{d}_\infty,\mathfrak{m}_\infty,x_\infty)$, and use the {}{notation} in Definition \ref{1defn2.5}. \begin{defn}[$L^2$-convergence of functions defined on varying spaces] A sequence $\{f_i:{X}_i\rightarrow \mathbb{R}\}$ is said to be $L^2$-weakly convergent to $f_\infty \in L^2(\mathfrak{m}_\infty)$ if \[ \left\{ \begin{aligned} &\sup_i \left\|f_i\right\|_{L^2(\mathfrak{m}_i)}<\infty,\\ &\lim\limits_{i\rightarrow \infty}\int_{Y}hf_i \mathrm{d}(\iota_i)_\sharp \mathfrak{m}_i= \int_{Y} hf_\infty \mathrm{d}(\iota_\infty)_\sharp \mathfrak{m}_\infty, \ \ \forall h\in C_c({Y}). \end{aligned} \right. \] If moreover $\{f_i\}$ satisfies $\limsup_{i\rightarrow \infty}\left\|f_i\right\|_{L^2(\mathfrak{m}_i)}\leqslant \left\|f\right\|_{L^2(\mathfrak{m}_\infty)}$, then $\{f_i\}$ is said to be $L^2$-strongly convergent to $f$. \end{defn} \begin{defn}[$H^{1,2}$-convergence of functions defined on varying spaces] A sequence $\{f_i:{X}_i\rightarrow \mathbb{R}\}$ is said to be $H^{1,2}$-weakly convergent to $f_\infty \in H^{1,2}({X}_\infty,\mathsf{d}_\infty,\mathfrak{m}_\infty)$ if \[ f_i\xrightarrow{L^2\text{-weakly}}f\ \text{and}\ \sup_i \text{Ch}^{{X}_i}(f_i)<\infty. \] If moreover, $\{f_i\}$ satisfies \[ \limsup_{i\rightarrow \infty}\left\|f_i\right\|_{L^2(\mathfrak{m}_i)}\leqslant \left\|f\right\|_{L^2(\mathfrak{m}_\infty)}\ \text{and}\ \limsup_{i\rightarrow \infty}\text{Ch}^{{X}_i}(f_i)=\text{Ch}^{{X}_\infty}(f_\infty), \] then $\{f_i\}$ is said to be $H^{1,2}$-strongly convergent to $f$. \end{defn} \begin{defn}[Convergence of tensor fields defined on varying spaces] Assume {}{$T_i\in L^2_\mathrm{loc}\left((T^\ast)^{\otimes 2}({X}_i,\mathsf{d}_i,\mathfrak{m}_i)\right)$, $(i\in \mathbb{N})$}. For any $R>0$, $\{T_i\} $ is said to be $L^2$-weakly convergent to $T_\infty\in L^2\left((T^\ast)^{\otimes 2}(B_R^{{X}_\infty}(x_\infty),\mathsf{d}_\infty,\mathfrak{m}_\infty)\right)$ on $B_R^{{X}_\infty}(x_\infty)$ if it satisfies the following conditions. \begin{enumerate} \item (Uniform upper $L^2$ bound) $\sup_i \left\||T_i|_{\mathsf{HS}}\right\|_{L^2\left(B_R^{{X}_i}(x_i),\mathfrak{m}_i\right)}<\infty$. \item For any $f_{j,i}\in \mathrm{Test}F({X}_i,\mathsf{d}_i,\mathfrak{m}_i)$ $(i\in\mathbb{N},\ j=1,2)$ {}{such that} $\{f_{j,i}\}$ $L^2$-strongly converges to $f_{j,\infty}\in \mathrm{Test}F({X}_\infty,\mathsf{d}_\infty,\mathfrak{m}_\infty)$ ($j=1,2$) and that \[ \sup_{i,j}\left(\left\|f_{j,i}\right\|_{L^\infty(\mathfrak{m}_i)}+\left\||\nabla^{{X}_i}f_{j,i}|\right\|_{L^\infty(\mathfrak{m}_i)}+\left\|\Delta^{{X}_i}f_{j,i}\right\|_{L^\infty(\mathfrak{m}_i)}\right)<\infty, \] we have {}{$\{\chi_{B_R^{{X}_i}(x_i)}\left\langle T_i, df_{1,i}\otimes d f_{2,i}\right\rangle \}$ $L^2$-weakly converges to $\chi_{B_R^{{X}_\infty}(x_\infty)}\langle T_\infty,d f_{1,\infty}\otimes df_{2,\infty} \rangle$.} \end{enumerate} If moreover, $\limsup_{i\rightarrow \infty}\left\||T_i|_{\mathsf{HS}}\right\|_{L^2\left(B_R^{{X}_i}(x_i),\mathfrak{m}_i\right)}\leqslant \left\||{}{T_\infty}|_{\mathsf{HS}}\right\|_{L^2\left(B_R^{{X}_\infty}(x_\infty),\mathfrak{m}_\infty\right)}$, then $\{T_i\}$ is said to be $L^2$-strongly convergent to $T_\infty$ on $B_R^{{X}_\infty}(x_\infty)$. \end{defn} Let us recall two convergences to end this section. \begin{thm}[$H^{1,2}$-strong convergence of heat kernels {\cite[Theorem 2.19]{AHPT21}}]\label{thm2.26} For any $\{t_i\}\subset (0,\infty)$ with $t_i\rightarrow t_0 \in (0,\infty)$ and any $\{y_i\}$ with ${X}_i\ni y_i\rightarrow y_\infty \in {X}_\infty$, $\left\{\rho^{{X}_i}(\cdot,y_i,t_i)\right\}$ $H^{1,2}$-strongly converges to $\rho^{{X}_\infty}(\cdot,y,t)\in H^{1,2}({X}_\infty,\mathsf{d}_\infty,\mathfrak{m}_\infty)$. \end{thm} \begin{thm}[Lower semicontinuity of essential dimension {\cite[Theorem 1.5]{K19}}]\label{11thm2.26} \[ \liminf\limits_{i\rightarrow \infty}\mathrm{dim}_{\mathsf{d}_i,\mathfrak{m}_i}({X}_i)\leqslant \mathrm{dim}_{\mathsf{d}_\infty,\mathfrak{m}_\infty}({X}_\infty). \] \end{thm} \section{The isometric immersion into $L^2$ space via heat kernel}\label{sec3} Recently the equivalence between weakly non-collapsed RCD spaces and non-collapsed RCD spaces is proved in \cite[Theorem 1.3]{BGHZ21}, which states as follows. \begin{thm}\label{BGHZmainthm} Assume that $({X},\mathsf{d},\mathfrak{m})$ is an $\mathrm{RCD}(K,N)$ space. If \[ \mathfrak{m}\left(\left\{x\in {X}:\limsup\limits_{r\rightarrow 0^+}\frac{\mathfrak{m}(B_r(x))}{r^N}<\infty\right\}\right)>0, \] then $\mathfrak{m}=c\mathcal{H}^N$ for some $c>0$. Therefore, $\left({X},\mathsf{d},c^{-1}\mathfrak{m}\right)$ is a non-collapsed $\mathrm{RCD}(K,N)$ space. \end{thm} The key to prove Theorem \ref{BGHZmainthm} is Theorem \ref{eqnBGHZ21}, and the asymptotic formula (Theorem \ref{20211222a}) of $g_t$ plays an important role in the proof of Theorem \ref{eqnBGHZ21}. The precise definition of $g_t$ shall be given in Theorem \ref{thm2.18}. \begin{thm}[{\cite[Theorem 1.5, Theorem 2.22]{BGHZ21}}]\label{eqnBGHZ21} Assume that $({X},\mathsf{d},\mathcal{H}^n)$ is an $\mathrm{RCD}(K,N)$ space with $\mathrm{dim}_{\mathsf{d},\mathfrak{m}}({X})=n$ and $U$ is a connected open subset of ${X}$ such that for any compact subset $A\subset U$, \begin{equation}\label{BGHZ} \inf\limits_{r\in (0,1),x\in A}\frac{\mathcal{H}^n\left(B_r(x)\right)}{r^n}>0. \end{equation} Then for any $ f\in \mathrm{Test}F\left({X},\mathsf{d},\mathcal{H}^n\right)$, any $\varphi\in D(\Delta)$ with $ \varphi \geqslant 0$, $\text{supp}(\varphi)\subset U$ and $\Delta \varphi \in L^\infty (\mathcal{H}^n)$, it holds that \[ \frac{1}{2}\int_U |\nabla f|^2 \Delta \varphi \ \mathrm{d}\mathcal{H}^n \geqslant \int_U \varphi \left(\langle \nabla f , \nabla \Delta f \rangle +K |\nabla f|^2 + \frac{(\Delta f)^2}{n} \right) \mathrm{d}\mathcal{H}^n. \] \end{thm} In addition, for a weakly non-collapsed (and is now non-collapsed) RCD$(K,n)$ space $({X},\mathsf{d},\mathcal{H}^n)$, it follows from \cite[Theorem 1.12]{DG18} that \[ \Delta f=\langle \mathop{\mathrm{Hess}}f,g\rangle \ \ \ \mathfrak{m}\text{-a.e.}, \ \forall f\in \text{D}(\Delta). \] \subsection{The pullback metric $g_t$}\label{sec3.1} On $\mathbb{R}^n$, it is obvious that \begin{equation} g_t^{\mathbb{R}^n}=\frac{c_1^{\mathbb{R}^n}}{t^{\frac{n+2}{2}}}g_{\mathbb{R}^n},\ \ \ \text{with } c_1^{\mathbb{R}^n}=\int_{\mathbb{R}^n}\left(\frac{\partial}{\partial x_1}\rho^{\mathbb{R}^n}(x,y,t)\right)^2\mathrm{d}\mathcal{L}^n (y). \end{equation} In \cite{Ta66}, Takahashi proves that any compact homogeneous irreducible Riemannian manifold $( M^n,g)$ is IHKI, which is even true provided that $( M^n,g)$ is a non-compact homogeneous irreducible Riemannian manifold. To generalize such isometric immersions to RCD$(K,N)$ spaces, let us first introduce the following locally Lipschitz {}{$t$-time heat kernel mapping on an RCD$(K,N)$ space $({X},\mathsf{d},\mathfrak{m})$ by using its heat kernel $\rho$ analogously} : \[ \begin{aligned} \Phi_t:{X}&\longrightarrow L^2(\mathfrak{m})\\ x&\longmapsto \left(y\mapsto \rho(x,y,t)\right), \end{aligned} \] which is well-defined due to the estimates in Theorem \ref{thm2.12}. The natural pull-back semi-Riemannian metric of the flat metric of $L^2(\mathfrak{m})$, namely $g_t:=(\Phi_t)^\ast(g_{L^2(\mathfrak{m})})$, is defined as follows, see \cite[Proposition 4.7]{AHPT21} and \cite[Proposition 3.7]{BGHZ21}. \begin{thm}[The pull-back semi-Riemannian metrics]\label{thm2.18} For all $t>0$, there is a unique semi-Riemannian metric $g_t\in L_{\mathrm{loc}}^\infty\left((T^\ast)^{\otimes 2}({X},\mathsf{d},\mathfrak{m})\right)$ such that \begin{enumerate} \item For any $\eta_i\in L^2\left(T^\ast({X},\mathsf{d},\mathfrak{m})\right)$ with bounded support $(i=1,2)$, \[ \int_{{X}} \left\langle g_t,\eta_1 \otimes \eta_2 \right\rangle \mathrm{d}\mathfrak{m}=\int_{{X}} \int_{{X}} \left\langle d_x \rho(x,y,t),\eta_1\right\rangle \left\langle d_x \rho(x,y,t),\eta_2\right\rangle\mathrm{d}\mathfrak{m}(x)\mathrm{d}\mathfrak{m}(y). \] In particular, if $({X},\mathsf{d},\mathfrak{m})$ is compact, then $g_t=\sum\limits_{i=1}^\infty e^{-2\mu_i t}d\phi_i\otimes d\phi_i$. \item For any $t\in (0,1)$, the rescaled semi-Riemannian metric $t\mathfrak{m}(B_{\sqrt{t}}(\cdot))g_t$ satisfies \begin{equation}\label{tsuikaeqn3.2} t\mathfrak{m}(B_{\sqrt{t}}(\cdot))g_t\leqslant C(K,N) g, \end{equation} which means that for any $\eta\in L^2\left(T^\ast({X},\mathsf{d},\mathfrak{m})\right)$, it holds that \[ t\mathfrak{m}(B_{\sqrt{t}}(x))\langle g_t,\eta\otimes \eta \rangle (x) \leqslant C(K,N) |\eta|^2(x)\ \ \text{$\mathfrak{m}$-a.e. $x\in {X}$}. \] \end{enumerate} \end{thm} The rest part of this subsection proves Theorem \ref{thm1.2}. The following inequality is needed. See for instance \cite[Lemma 2.3]{AHPT21} and \cite[Lemma 2.7]{BGHZ21}. \begin{lem}\label{aaaalem3.11} Let $({X},\mathsf{d},\mathfrak{m})$ be an $\mathrm{RCD}(K,N)$ space. Then for any $\alpha\in \mathbb{R}$, $\beta>0$ and any $x\in{X}$, it holds that \begin{equation} \int_{X}\mathfrak{m}\left(B_{\sqrt{t}}(y)\right)^\alpha \exp\left(-\frac{\beta \mathsf{d}^2(x,y)}{t}\right)\mathrm{d}\mathfrak{m}(y)\leqslant C\left(K,N,\alpha,\beta\right) \mathfrak{m}\left(B_{\sqrt{t}}({}{x})\right)^{\alpha+1}. \end{equation} \end{lem} \begin{remark} When $({X},\mathsf{d},\mathfrak{m})$ is an RCD$(0,N)$ space, by \cite[Corollary 1.1]{JLZ16} and Lemma \ref{aaaalem3.11}, (\ref{tsuikaeqn3.2}) becomes \begin{equation}\label{tsukaeqn3.3} t\mathfrak{m}(B_{\sqrt{t}}(\cdot))g_t\leqslant C(N) g,\ \forall {}{t>0}. \end{equation} \end{remark} Jiang's gradient estimate \cite[Theorem 3.1]{J14} is also important in this paper, which states as follows. \begin{thm}\label{aaaathm3.12} Let $({X},\mathsf{d},\mathfrak{m})$ be an $\mathrm{RCD}(K,N)$ space and $\Omega$ be {}{an} open subset. If for some $u\in D(\Delta)\cap L^\infty(\Omega,\mathfrak{m})$, $\Delta u \in L^\infty(\Omega,\mathfrak{m})$, then for every $B_R(x)$ with $R\leqslant 1$ and $B_{8R}(x)\Subset \Omega$, it holds that \begin{equation} \left\| |\nabla u|\right\|_{L^\infty\left(B_{R}(x),\mathfrak{m}\right)}\leqslant C(K,N)\left(\frac{1}{R} \left\| u\right\|_{L^\infty\left(B_{8R}(x),\mathfrak{m}\right)}+ R\left\|\Delta u\right\|_{L^\infty\left(B_{8R}(x),\mathfrak{m}\right)}\right). \end{equation} \end{thm} Finally, we need the following proposition. \begin{prop}\label{llem3.4} {}{Suppose that $({X},\mathsf{d},\mathfrak{m})$ is an $\mathrm{RCD}(K,N)$ space which is not a single point. Then for any $t>0$, \[ \mathfrak{m}\left(\{x\in {X}:|g_t|_{\mathsf{HS}}>0\}\right)>0. \]} \end{prop} \begin{proof} Assume by contradiction the existence of $t_0>0$ such that $\mathfrak{m}(\{x\in {X}:|g_{t_0}|_{\mathsf{HS}}>0\})=0$. Clearly this implies $|\nabla_x \rho(x,y,t_0)|=0$, $\mathfrak{m}$-a.e. $x,y \in {X}$. For any fixed $x\in{X}$, the locally Lipschitz continuity of $y\mapsto \rho(x,y,t_0)$ as well as the Sobolev to Lipschitz property then yields that $\Phi_{t_0}\equiv c\hat{1}$ for some constant $c$. Therefore, it follows from the stochastic completeness of RCD$(K,N)$ spaces that $\mathfrak{m}({X})<\infty$. Without loss of generality, assume that $\mathfrak{m}({X})=1$. Notice that $\Phi_{2t_0}(x)=h_{t_0}(\Phi_{t_0}(x))\equiv \hat{1}$, which implies $\rho(x,y,t)\equiv 1$ on ${X}\times{X}\times [t_0,2t_0]$ by (\ref{111eqn2.4}). {}{Then applying Remark \ref{aaaaarmk2.9} shows that \[ \rho(x,y,t)=1,\ \forall (x,y,t)\in X\times X\times (0,\infty). \] As a consequence, for any $f\in L^2(\mathfrak{m})$, we have \[ \mathrm{h}_t f =\int_X \rho(x,y,t) f\mathrm{d}\mathfrak{m}= \int_X f\mathrm{d}\mathfrak{m},\ \forall t>0. \] Since $\mathrm{h}_t f$ converges to $f$ in $L^2(\mathfrak{m})$ as $t\rightarrow 0$, $f$ is nothing but a constant function, which is enough to conclude that ${X}$ is a single point. A contradiction. } \end{proof} \begin{proof}[Proof of Theorem \ref{thm1.2}] {}{Let $n=\mathrm{dim}_{\mathsf{d},\mathfrak{m}(X)}$.} For any fixed $B_R(x_0)\subset {X}$, set {}{\[ \begin{aligned} f: (0,\infty)&\longrightarrow [0,\infty)\\ t&\longmapsto n\mathfrak{m}(B_R(x_0))\int_{B_R(x_0)}\langle g_t,g_t\rangle\mathrm{d}\mathfrak{m}-\left(\int_{B_R(x_0)}\langle g,g_t\rangle \mathrm{d}\mathfrak{m}\right)^2. \end{aligned} \] } Since we can rescale the space, it suffices to show that $f$ is analytic at any $t\in (0,1)$. {}{Because then by applying Proposition \ref{llem3.4} we are done.} For any {}{$m\geqslant 1$}, the commutativity of $\dfrac{\partial}{\partial t}$ and $\Delta $ allows us to fix an arbitrary $y\in {X}$ and apply Theorem \ref{aaaathm3.12} on $B_{8\sqrt{t}}(x)$ for {}{$u:z\mapsto \dfrac{\partial^m}{\partial t^m}\rho(z,y,t)$.} (\ref{aabbeqn3.7}) then implies \[ \left\||\nabla u| \right\|_{L^\infty(B_{\sqrt{t}}(x),\mathfrak{m})} \leqslant {}{\frac{C(K,N)m!}{t^{m+\frac{1}{2}} }}\sup\limits_{z\in B_{8\sqrt{t}}(x)}\left(\mathfrak{m}(B_{\sqrt{t}}(z))\mathfrak{m}(B_{\sqrt{t}}(y))\right)^{-\frac{1}{2}}\exp\left(-\frac{\mathsf{d}^2({}{z,y})}{100t}\right). \] Using (\ref{BGinequality111}), for any $z\in B_{8\sqrt{t}}(x)$, we know \[ \frac{\mathfrak{m}\left(B_{\sqrt{t}}(x)\right)}{\mathfrak{m}\left(B_{\sqrt{t}}(z)\right)}\leqslant C(K,N)\exp\left(\frac{\sqrt{t}+\mathsf{d}(x,z)}{\sqrt{t}}\right)\leqslant C(K,N). \] {}{This as well as the inequality $-\mathsf{d}^2(z,y)\leqslant \mathsf{d}^2(z,x)-\dfrac{\mathsf{d}^2(x,y)}{2}$} implies that for $\mathfrak{m}$-a.e. $x\in {X}$, \begin{equation}\label{aaaaeqn3.8} \left|\nabla_x {}{\frac{\partial^m}{\partial t^m}}\rho(x,y,t)\right|\leqslant {}{\frac{C(K,N)m!}{t^{m+\frac{1}{2}}}}\left(\mathfrak{m}(B_{\sqrt{t}}(x))\mathfrak{m}(B_{\sqrt{t}}(y))\right)^{-\frac{1}{2}}\exp\left(-\frac{\mathsf{d}^2(x,y)}{{}{200t}}\right). \end{equation} Let {}{ $f=n\mathfrak{m}(B_R(x_0))f_1-f_2^2$, with $f_2(t)= \int_{B_R(x_0)}\langle g,g_t\rangle \mathrm{d}\mathfrak{m}$. We only give a proof of the analyticity of $f_1$, since the analyticity of $f_2$ will follow from similar arguments.} Rewrite {}{$f_1$} as \[ {}{f_1}(t)=\int_{B_R(x_0)}\int_{X}\int_{X} \left\langle \nabla_x \rho(x,y,t),\nabla_x \rho(x,z,t)\right\rangle^2 \mathrm{d}\mathfrak{m}(z) \mathrm{d}\mathfrak{m}(y) \mathrm{d}\mathfrak{m}(x). \] It is enough to estimate derivatives of each order of ${}{f_1}$ at any fixed $t\in (0,1)$. We first show that {}{$f_1$} is differentiable. For any sufficiently small $s$, {}{$\dfrac{f_1(t+s)-f_1(t)}{s}$} can be written as the sum of the integrals of functions like \begin{equation}\label{0324eqn1} \left\langle \nabla_x \frac{\rho(x,y,t+s)-\rho(x,y,t)}{s},\nabla_x \rho(x,z,t)\right\rangle \left\langle \nabla_x \rho(x,y,t+s),\nabla_x \rho(x,z,t+s)\right\rangle \end{equation} on $B_R(x_0) \times {X}\times {X}$. In order to use the dominated convergence theorem, we need estimates of $\left|\nabla_x \dfrac{\rho(x,y,t+s)-\rho(x,y,t)}{s}\right|$ and $|\nabla_x \rho(x,y,t+s) |$ for any sufficiently small $s$. By Theorem \ref{thm2.12} and the Bishop-Gromov inequality, for $\mathfrak{m}$-a.e. $x\in{X}$, \begin{equation}\label{0324eqn3} \begin{aligned} |\nabla_x \rho(x,y,t+s) |&\leqslant \dfrac{C(K,N)}{\sqrt{t+s}\ \mathfrak{m}\left(B_{\sqrt{t+s}}(x)\right)}\exp\left(-\dfrac{\mathsf{d}^2(x,y)}{100(t+s)}\right)\\ \ &\leqslant \dfrac{C(K,N)}{\sqrt{t}\ \mathfrak{m}\left(B_{\sqrt{t}}(x)\right)}\dfrac{\mathfrak{m}\left(B_{\sqrt{t}}(x)\right)}{\mathfrak{m}\left(B_{\sqrt{t+s}}(x)\right)}\exp\left(-\dfrac{\mathsf{d}^2(x,y)}{200t}\right) \\ \ &\leqslant \dfrac{C(K,N)}{\sqrt{t}\ \mathfrak{m}\left(B_{\sqrt{t}}(x)\right)}\exp\left(-\dfrac{\mathsf{d}^2(x,y)}{200t}\right) .\\ \end{aligned} \end{equation} The last inequality of (\ref{0324eqn3}) is obvious when $s>0$, and is guaranteed by the Bishop-Gromov inequality when $s<0$. Applying (\ref{aaaaeqn3.8}), Theorem \ref{aaaathm3.12} and the Lagrange mean value theorem, the {}{following estimate} can also be obtained as in (\ref{0324eqn3}): \begin{equation}\label{0324eqn2} \begin{aligned} \ &\left|\nabla_x \left(\dfrac{\rho(x,y,t+s)-\rho(x,y,t)}{s}-\dfrac{\partial}{\partial t}\rho(x,y,t)\right)\right|\\ \leqslant\ & \dfrac{C(K,N)2!|s|}{t^{\frac{5}{2}}}\left(\mathfrak{m}\left(B_{\sqrt{t}}(x)\right)\mathfrak{m}\left(B_{\sqrt{t}}(y)\right)\right)^{-\frac{1}{2}}\exp\left(-\dfrac{\mathsf{d}^2(x,y)}{{}{300t}}\right). \end{aligned} \end{equation} Therefore the $L^1(\mathfrak{m}\otimes \mathfrak{m}\otimes \mathfrak{m})$ convergence of (\ref{0324eqn1}) as $s\rightarrow 0$ can be verified by (\ref{0324eqn3}), (\ref{0324eqn2}) and Lemma \ref{aaaalem3.11}. The limit of (\ref{0324eqn1}) as $s\rightarrow 0$ is actually \[ \int_{B_R(x_0)\times {X}\times {X}}\left\langle \nabla_x \frac{\partial}{\partial t}\rho(x,y,t),\nabla_x \rho(x,z,t)\right\rangle \left\langle \nabla_x \rho(x,y,t),\nabla_x \rho(x,z,t)\right\rangle \mathrm{d}\mathfrak{m}(z) \mathrm{d}\mathfrak{m}(y) \mathrm{d}\mathfrak{m}(x). \] The proof of any higher order differentiability of {}{$f_1$} can follow from similar arguments as above. On the other hand, the higher order derivatives of {}{$f_1$} shall be written as \[ {}{f_1^{(m)}(t)}=\sum\limits_{k=0}^m\sum\limits_{i=0}^k\sum\limits_{j=0}^{{}{m-k}}\int_{B_R(x_0)}\int_{X}\int_{X}I_{k,i}I_{{}{m-k},j}\mathrm{d}\mathfrak{m}(z) \mathrm{d}\mathfrak{m}(y) \mathrm{d}\mathfrak{m}(x), \] where \[ I_{k,i}=\left\langle \nabla_x \frac{\partial^i}{\partial t^i}\rho(x,y,t),\nabla_x \frac{\partial^{k-i}}{\partial t^{k-i}}\rho(x,z,t)\right\rangle. \] {}{Letting \[ I_i=\left|\nabla_x\frac{\partial^i}{\partial t^i}\rho(x,y,t)\right|,\ \ J_{i}=\left|\nabla_x\frac{\partial^i}{\partial t^i}\rho(x,z,t)\right|, \] we obtain \[ |I_{k,i}I_{m-k,j}|\leqslant I_i I_j J_{k-i} J_{m-k-j},\ \mathfrak{m}\text{-a.e.} \] } Finally Theorem \ref{thm2.12}, Lemma \ref{aaaalem3.11} and (\ref{aaaaeqn3.8}) yield that \[ \left|\int_{X}I_i I_j \mathrm{d}\mathfrak{m}(y)\right|\leqslant C(K,N)\frac{i!j!}{t^{i+j+1}}, \] \[ \left|\int_{X}J_{k-i} {}{J_{m-k-j}} \mathrm{d}\mathfrak{m}(z)\right|\leqslant C(K,N){}{\frac{(k-i)!(m-k-j)!}{t^{m-i-j+1}}.} \] Thus ${}{|f_1^{(m)}(t)|}\leqslant \mathfrak{m}(B_R(x_0))C(K,N){}{m!t^{-(m+2)}}$. This completes the proof. \end{proof} \subsection{A regularity result about IHKI RCD$(K,N)$ spaces}\label{sec3.2} {}{This subsection is aimed at proving Theorem \ref{mainthm1.3}.} The following statement is trivial for the pmGH convergence of geodesic spaces, which is frequently used in the proof of Theorem \ref{mainthm1.3}. We shall call no extra attention to this well-known fact in this paper. \begin{fact}\label{11lem3.7} Assume that $({X},\mathsf{d},\mathfrak{m})$ is an RCD$(K,N)$ space {}{and is not a single point}. Then for any sequence of points $\{x_i\}\subset {X}$, and any $\{r_i\}$ with $r_i \rightarrow 0$, after passing to a subsequence, the pmGH limit of $\left\{\left({X}_{i},\dfrac{1}{r_{i}}\mathsf{d}_{i},\dfrac{\mathfrak{m}}{\mathfrak{m}(B_{r_{i}}(x_{i}))},x_i\right)\right\}$ is not a single point. \end{fact} {}{Let us fix an IHKI RCD$(K,N)$ space $({X},\mathsf{d},\mathfrak{m})$ which is not a single point. According to Proposition \ref{llem3.4}, we make a convention that there exists a function $c(t)$ such that \[ c(t)g_t= g,\ \forall t>0, \] in the rest of this subsection.} \begin{proof}[Proof of Theorem \ref{mainthm1.3}] The proof consists of three steps. \textbf{Step 1} There exists $\tilde{c}>0$, such that \begin{equation}\label{11eqn3.3} \lim\limits_{r\rightarrow 0}\frac{\mathfrak{m}(B_r(x))}{r^n}=\tilde{c},\ \ \forall x\in \mathcal{R}_n^\ast, \end{equation} and the function $c$ satisfies \begin{equation}\label{11eqn3.11} \lim\limits_{t\rightarrow 0} \frac{t^{n+2}}{c(t^2)}=\tilde{c}^{-1}\omega_n c_1^{\mathbb{R}^n}. \end{equation} Fix $x\in\mathcal{R}_n^\ast$. From the very definition of $\mathcal{R}_n^\ast$, $\lim\limits_{r\rightarrow 0} r^{-n}\mathfrak{m}(B_{r}(x))=\tilde{c}$ for some $\tilde{c}=\tilde{c}(x)>0$. For any $\{r_i\}$ with $r_i \rightarrow 0$, we have \begin{equation}\label{1pmGHconvergence} ({X}_i,\mathsf{d}_i,\mathfrak{m}_i,x):=\left({X},\frac{1}{r_i}\mathsf{d},\frac{\mathfrak{m}}{\mathfrak{m}(B_{r_i}(x))},x\right)\xrightarrow{\mathrm{pmGH}} \left(\mathbb{R}^n,\mathsf{d}_{\mathbb{R}^n},\frac{1}{\omega_n}\mathcal{L}^n,0_n\right). \end{equation} On each ${X}_i$, $c(r_i^2 t)g_t^{{X}_i}=r_i^2 \mathfrak{m}(B_{r_i}(x))g_{{X}_i}$. By \cite[Theorem 3.11]{BGHZ21}, $\{g_t^{{X}_i}\}$ $L^2$-strongly converges to $\omega_n g_t^{\mathbb{R}^n}$ on any $B_R(0_n)\subset \mathbb{R}^n$, from which we know \[ \lim\limits_{i\rightarrow \infty}r_i^2 \frac{\mathfrak{m}(B_{r_i}(x))}{c(r_i^2 t)}=\omega_n c_t^{\mathbb{R}^n}. \] Since the above limit does not depend on the choice of the sequence $\{r_i\}$, we have \begin{equation}\label{11eqn3.5} \lim\limits_{r\rightarrow 0} r^2 \frac{\mathfrak{m}(B_{r}(x))}{c(r^2 t)}=\lim\limits_{r\rightarrow 0} \frac{ \mathfrak{m}(B_{r}(x))}{r^n} \frac{r^{n+2}}{c(r^2 t)}=\omega_n c_t^{\mathbb{R}^n}. \end{equation} As a result, we get (\ref{11eqn3.11}). Observe that the limit in (\ref{11eqn3.5}) also does not depend on the choice of $x\in \mathcal{R}_n^\ast$, which suffices to show (\ref{11eqn3.3}). \textbf{Step 2} $\mathfrak{m}=\tilde{c}\mathcal{H}^n$, for the constant $\tilde{c}$ obtained in Step 1. Reprising the same arguments as in Step 1, we know that $\mathcal{R}_n=\mathcal{R}_n^\ast$ (In fact, $L^2$-strong convergence of $\{g_t^{{X}_i}\}$ on any $B_R(0_n)\subset \mathbb{R}^n$ is also valid when $x\in \mathcal{R}_n$ by \cite[Theorem 3.11]{BGHZ21}). This implies $\mathfrak{m}=\tilde{c}\mathcal{H}^n\llcorner\mathcal{R}_n$. To complete the proof of Step 2, we need nothing but $\mathcal{H}^n\ll\mathfrak{m}$. {}{Because then a combination with Theorem \ref{1111thm2.22} gives $\mathcal{H}^n({X}\setminus \mathcal{R}_n)=0$, which is sufficient to conclude.} For any $x\in {X}\setminus \mathcal{R}_n$, and any sequence $\{r_i\}$ with $r_i\rightarrow 0$, after passing to a subsequence, there exists a pointed RCD$(0,N)$ space $({X}_\infty,\mathsf{d}_\infty,\mathfrak{m}_\infty,x_\infty)$ such that \[ \left({X}_i,\mathsf{d}_i,\mathfrak{m}_i,x\right):=\left({X},\frac{1}{r_i}\mathsf{d},\frac{\mathfrak{m}}{\mathfrak{m}(B_{r_i}(x))},x\right)\xrightarrow{\mathrm{pmGH}} ({X}_\infty,\mathsf{d}_\infty,\mathfrak{m}_\infty,x_\infty). \] When $i$ is sufficiently large, again on each ${X}_i$, $c(r_i^2 t)g_t^{{X}_i}=r_i^2 \mathfrak{m}(B_{r_i}(x))g_{{X}_i}$. In particular, we know from Theorem \ref{thm2.18} that $r_i^2 \mathfrak{m}(B_{r_i}(x))\leqslant C(K,N)c(r_i^2 t)$. {}{Since $(X_\infty,\mathsf{d}_\infty)$ is not a single point}, using {}{Theorems \ref{thm2.26} and \ref{11thm2.26}}, and (\ref{11eqn3.11}), we see \[ \lim\limits_{i\rightarrow \infty} \frac{\mathfrak{m}(B_{r_i}(x))}{r_i^n}\in \left(0,C(K,N)\right). \] In particular, \begin{equation}\label{111eqn3.7} C(K,N)\geqslant \limsup\limits_{r\rightarrow 0} \frac{\mathfrak{m}(B_{r}(x))}{r^n}\geqslant \liminf\limits_{r\rightarrow 0} \frac{\mathfrak{m}(B_{r}(x))}{r^n}> 0. \end{equation} Set \[ {X}_\tau:=\left\{x\in{X}:\liminf\limits_{r\rightarrow 0}\frac{\mathfrak{m}(B_r(x))}{r^n}\geqslant \tau\right\}, \] and notice that ${X}=\bigcup_{\tau>0}{X}_\tau$ by (\ref{111eqn3.7}). Applying \cite[Theorem 2.4.3]{AT04} then implies \[ \mathcal{H}^n\llcorner {X}_\tau \ll \mathfrak{m}\llcorner {X}_\tau,\ \forall \tau>0, \] from which we conclude. \textbf{Step 3} $({X},\mathsf{d},\mathcal{H}^{n})$ is an RCD$(K,n)$ space. Without loss of generality, assume $\mathfrak{m}=\mathcal{H}^n$. We first treat the case that $({X},\mathsf{d},\mathcal{H}^{n})$ is compact. By Theorem \ref{eqnBGHZ21}, it suffices to show \begin{equation}\label{eqn20220203} \inf\limits_{x\in{X}} \inf\limits_{s\in (0,1)} \frac{\mathcal{H}^n(B_s(x))}{s^n}>0. \end{equation} Assume on the contrary that (\ref{eqn20220203}) does not hold, then for any $\epsilon>0$, there exists $x_\epsilon \in {X}$, such that $ \inf\limits_{s\in (0,1)} s^{-n}\mathcal{H}^n(B_s(x_\epsilon))<\epsilon$. By (\ref{BGinequality}), \[ \frac{\mathcal{H}^n(B_{r}(x_\epsilon))}{r^n}<\epsilon, \ \ \text{for some}\ r=r(\epsilon)\leqslant \Psi\left(\epsilon|K,N,\mathrm{diam}({X},\mathsf{d}),\mathcal{H}^n({X})\right). \] As a consequence, there {}{exists} a sequence $\{x_i\}\subset {X}$, a sequence $\{r_i\}\subset (0,\infty)$ with $r_i\rightarrow 0$ and a pointed RCD {}{$(0,N)$} space $({X}_\infty,\mathsf{d}_\infty,\mathfrak{m}_\infty,x_\infty)$, such that \begin{equation}\label{111eqn3.9} \lim\limits_{i\rightarrow \infty}\frac{\mathcal{H}^n(B_{r_i}(x_i))}{r_i^n}=0, \end{equation} and \[ ({X}_i,\mathsf{d}_i,\mathfrak{m}_i,x_i):=\left({X}_i,\frac{1}{r_i}\mathsf{d},\frac{\mathfrak{m}}{\mathfrak{m}\left(B_{r_i}(x_i)\right)} ,x_i\right)\xrightarrow{\mathrm{pmGH}} ({X}_\infty,\mathsf{d}_\infty,\mathfrak{m}_\infty,x_\infty). \] Again $c(r_i^2 t)g_t^{{X}_i}=r_i^2 \mathfrak{m}\left(B_{r_i}(x_i)\right) g_{{X}_i}$ on each ${X}_i$, and $\left\{g_t^{{X}_i}\right\}$ $L^2$-strongly converges to 0 on {}{$B_R({x_\infty})$ for any $R>0$} by (\ref{111eqn3.9}), which contradicts Proposition \ref{llem3.4}. As for the non-compact case, it suffices to repeat Step 1-3 and apply Theorem \ref{eqnBGHZ21} again on any $B_R(x)\subset {X}$. \end{proof} \subsection{Non-compact IHKI RCD$(0,n)$ spaces }\label{sec3.3} We start by proving the following theorem in this subsection. \begin{thm}\label{thm4.5} Suppose $({X},\mathsf{d},\mathcal{H}^{n-1})$ is a non-collapsed $\mathrm{RCD}(n-2,n-1)$ space with $n\geqslant 2$. If $ g_1^{\text{C}({X})}\geqslant c g_{\text{C}({X})}$ for some $c>0$, then $({X},\mathsf{d})$ is isometric to $(\mathbb{S}^{n-1},\mathsf{d}_{S^{n-1}})$. \end{thm} We need some preparations. According to Remark \ref{rmk2.10}, $\left(\text{C}({X}),\mathsf{d}_{\text{C}({X})},\mathfrak{m}_{\text{C}({X})}\right)$ is an RCD$(0,n)$ space. In addition, by applying Theorem \ref{1111thm2.20}, Theorem \ref{BGHZmainthm} and the splitting theorem for RCD$(0,n)$ spaces (see \cite[Theorem 1.4]{G13}, \cite{G14}), $\left(\text{C}({X}),\mathsf{d}_{\text{C}({X})},\mathfrak{m}_{\text{C}({X})}\right)$ is also non-collapsed, which means that $\mathfrak{m}_{\text{C}({X})}=\mathcal{H}^{n}$. To fix the {}{notation}, we use (\ref{notation2.7}), and set $\alpha=(2-n)/2$, $\nu_j=\sqrt{\alpha^2+\mu_j}$ for {}{every} $j\in \mathbb{N}$. It is notable that $\mu_1\geqslant n$ by \cite[Corollary 1.3]{K15b}. {}{For any RCD$(K,N)$ space $(Y,\mathsf{d}_Y,\mathfrak{m}_Y)$, we define \[ \begin{aligned} \rho_t^Y:Y&\longrightarrow (0,\infty)\\ y&\longmapsto \rho^Y(y,y,t). \end{aligned} \] } The validity of limit processes in the proof of Theorem \ref{thm4.5} can be verified by the following estimates. We check one of them for reader's convenience. \begin{lem}\label{20211220b} There exists $C=C(n,\mathrm{diam}({X},\mathsf{d}))$, such that the following estimates hold. \begin{enumerate} \item\label{lem3.192} $\ \sup\limits_{x\in{X}}\sum\limits_{j=k}^\infty I_{\nu_j}(r)\phi^2_j(x) \leqslant C\left(\dfrac{r}{2}\right)^{k^{\frac{1}{2(n-1)}}}, \ \forall r \in (0,1),\ \forall k\in \mathbb{N}_+. $ \item $\ I_{\nu_j}(r)\mu_j \leqslant Cj^2 \left(\dfrac{r}{2}\right)^{\nu_j}\leqslant Cj^2 \left(\dfrac{r}{2}\right)^{j^{\frac{1}{n-1}}}, \ \forall r \in (0,1),\ \forall j\in \mathbb{N}.$ \item$\ \sum\limits_{j=k}^\infty I_{\nu_j}(r)\mu_j \leqslant C\left(\dfrac{r}{2}\right)^{k^{\frac{1}{2(n-1)}}},\ \forall r \in (0,1),\ \forall k\in \mathbb{N}_+.$ \end{enumerate} \end{lem} \begin{proof}[Proof of \ref{lem3.192}.] According to Proposition \ref{heatkernel2}, there exists $C=C(n,\mathrm{diam}({X},\mathsf{d}))$, such that for any $x\in {X}$, \[ \begin{aligned} \sum\limits_{j=k}^\infty I_{\nu_j}(r)\phi^2_j(x)&\leqslant C\sum\limits_{j=k}^\infty I_{\nu_j}(r)j^{n-1}\\ \ &=C \sum\limits_{j=k}^\infty j^{n-1}\sum\limits_{l=0}^\infty \frac{1}{l! \Gamma(\nu_j+l+1)}\left(\frac{r}{2}\right)^{2l+\nu_j}\\ \ &\leqslant C \sum\limits_{j=k}^\infty j^{n-1} \left(\frac{r}{2}\right)^{\nu_j}\exp\left(\frac{r^2}{4}\right)\\ \ &\leqslant C \sum\limits_{j=k}^\infty j^{n-1} \left(\frac{r}{2}\right)^{j^{\frac{1}{n-1}}}\\ \ &\leqslant C \left(\frac{r}{2}\right)^{k^{\frac{1}{2(n-1)}}}\sum\limits_{j=k}^\infty j^{n-1} \left(\frac{r}{2}\right)^{j^{\frac{1}{2(n-1)}}}\leqslant C\left(\frac{r}{2}\right)^{k^{\frac{1}{2(n-1)}}}. \\ \end{aligned} \] \end{proof} Notice that $(\text{C}({X}),\mathsf{d}_{\text{C}({X})},\mathcal{H}^n)$ has maximal volume growth, and its blow down is itself. Applying the large time behavior of the heat kernel \cite[Theorem 1.3]{JLZ16} shows \begin{equation}\label{1prop4.3} \rho^{\text{C}({X})}_{t}\equiv \frac{n\omega_n}{\mathcal{H}^{n-1}({X})} (4\pi t)^{-\frac{n}{2}},\ \ \forall t>0. \end{equation} Lemma \ref{llem3.1} and Lemma \ref{1lem3.15} are also useful in the proof of Theorem \ref{thm4.5}. \begin{lem}\label{llem3.1} Let $({Y_i},\mathsf{d}_{i},\mathfrak{m}_{i})$ be two $\mathrm{RCD}(K,N)$ spaces such that $\rho^{Y_i}_{2t}$ are constant functions for some $t>0$ $(i=1,2)$. Then on $Y_1\times Y_2$, \[ g_t^{Y_1\times Y_2 } (y_1,y_2)=\rho^{Y_1}_{2t}(y_1)g_t^{Y_2}(y_2) + \rho^{Y_2}_{2t}(y_2)g_t^{Y_1}(y_1). \] That is, for any $f\in \mathrm{Lip}_c\left(Y_1\times Y_2,\mathsf{d}_{Y_1\times Y_2}\right)$, denote by $f^{(y_1)}:y_2\mapsto f(y_1,y_2)$ for any fixed $y_1$, and $f^{(y_2)}:y_1\mapsto f(y_1,y_2)$ for any fixed $y_2$, it holds that \[ \begin{aligned} \ &g_t^{Y_1\times Y_2 }\left(\nabla^{Y_1\times Y_2 } f, \nabla^{Y_1\times Y_2 } f\right)(y_1,y_2)\\ =\ &\rho^{Y_1}_{2t}(y_1)g_t^{Y_2}\left(\nabla^{Y_2} f^{(y_1)},\nabla^{Y_2} f^{(y_1)}\right)(y_2) + \rho^{{Y_2}}_{2t}(y_2)g_t^{Y_1}\left(\nabla^{Y_1} f^{(y_2)},\nabla^{Y_1} f^{(y_2)}\right)(y_1), \end{aligned} \] for $\mathfrak{m}_{Y_1\times Y_2}$-a.e. $(y_1,y_2)$ in $Y_1\times Y_2$. \end{lem} \begin{proof} Recalling (\ref{1234eqn2.9}),(\ref{eqn2.1}) and the definition of $g_t^{Y_1\times Y_2 }$ in Theorem \ref{thm2.18}, we have {}{\[ \begin{aligned} \ &\ g_t^{Y_1\times Y_2 }(y_1,y_2)\\ =\ &\int_{Y_1\times Y_2} \sum\limits_{i=0}^1 \rho^{Y_{i+1}}(y_{i+1},y_{i+1}',t)d_{y_{2-i}}\rho^{Y_{2-i}}(y_{2-i},y_{2-i}',t)\\ \ &\otimes \sum\limits_{i=0}^1 \rho^{Y_{i+1}}(y_{i+1},y_{i+1}',t)d_{y_{2-i}}\rho^{Y_{2-i}}(y_{2-i},y_{2-i}',t)\mathrm{d}\mathfrak{m}_1(y_1')\mathrm{d}\mathfrak{m}_2(y_2')\\ =\ &\rho^{Y_1}_{2t}(y_1)g_t^{Y_2}(y_2) + \rho^{Y_2}_{2t}(y_2)g_t^{Y_1}(y_1)+I_1(y_1,y_2)+I_2(y_1,y_2), \end{aligned} \]} where \[ I_1(y_1,y_2)=\frac{1}{4}\int_{Y_1\times {Y}_2} d_{y_1}\left(\rho^{Y_1}(y_1,y_1',t)\right)^2\otimes d_{y_2}\left(\rho^{{Y}_2}(y_2,y'_2,t)\right)^2\mathrm{d}\mathfrak{m}_1(y_1')\mathrm{d}\mathfrak{m}_2(y_2'), \] \[ I_2(y_1,y_2)=\frac{1}{4}\int_{Y_1\times {Y}_2}d_{y_2}\left(\rho^{{Y}_2}(y_2,y'_2,t)\right)^2\otimes d_{y_1}\left(\rho^{Y_1}(y_1,y_1',t)\right)^2\mathrm{d}\mathfrak{m}_1(y_1')\mathrm{d}\mathfrak{m}_2(y_2'), \] By our assumption, for $i=1,2$, we have {}{\[ \left(y_i\mapsto d_{y_i} \int_{Y_i} \left(\rho^{Y_i}(y_i,y_i',t)\right)^2 \mathrm{d}\mathfrak{m}_i(y_i')\right)=0\ \ \text{in}\ L^2(T^\ast (Y_i,\mathsf{d}_i,\mathfrak{m}_i)). \] } Therefore $I_1(y_1,y_2)=0$ and $I_2(y_1,y_2)=0$ follow from the local Hille's theorem (see {}{for example} \cite[Proposition 3.4]{BGHZ21}). \end{proof} \begin{lem}\label{1lem3.15} Under the assumption of Lemma \ref{llem3.1}, if moreover there exist $c_1,c_2,{}{t}>0$, such that $g_t^{Y_1}= c_1 g_{Y_1}$ and \[ g_t^{{Y_1}\times {Y_2}}\geqslant c_2 g_{Y_1\times {Y}_2} \ (\text{resp. }g_t^{Y_1\times {Y}_2}= c_2 g_{{Y}_1\times {Y}_2}), \] then there exists $c_3>0$, such that \[ g_t^{Y_2}\geqslant c_3 g_{Y_2}\ (\text{resp. } g_t^{Y_2}=c_3 g_{Y_2}){}{.} \] \end{lem} \begin{proof} Since the {}{proofs} of both cases are almost the {}{same}, we only give the proof of the case that $g_t^{{Y_1}\times {Y_2}}\geqslant c_2 g_{Y_1\times {Y}_2} $. Fix a ball $B_R^{Y_1}(\tilde{y}_1)\subset Y_1$, by \cite[Lemma 3.1]{MN19}, there exists a cut-off function $\phi\in \mathrm{Lip}_c(Y_1,\mathsf{d}_1)$ such that \[ \phi|_{B_R^{Y_1}(\tilde{y}_1)}\equiv 1, \ \phi|_{Y_1\setminus B_{2R}^{Y_1}(\tilde{y}_1)}\equiv 0. \] Now for any $\varphi \in H^{1,2}(Y_2,\mathsf{d}_2,\mathfrak{m}_2)$, set $f:(y_1,y_2)\mapsto \phi(y_1)\varphi(y_2)$. Then it follows from (\ref{2.27}) and Lemma \ref{llem3.1} that for $\mathfrak{m}_{Y_1\times Y_2}$-a.e. $(x,y)$ in $ B_R^{Y_1}(\tilde{y}_1)\times {Y_2}$, \[ \begin{aligned} \ &\rho^{Y_1}_{2t}(y_1)g_t^{Y_2} \left(\nabla^{Y_2} \varphi,\nabla^{Y_2} \varphi\right)(y_2)\\ =\ &\phi^2(y_1)\rho^{Y_1}_{2t}(y_1)g_t^{Y_2} \left(\nabla^{Y_2} \varphi,\nabla^{Y_2} \varphi\right)(y_2)+c_1 \varphi^2(y_2)\rho^{{Y}_2}_{2t}(y_2) \left|\nabla \phi\right|^2(y_1)\\ =\ &\rho^{Y_1}_{2t}(y_1)g_t^{{Y}_2}\left(\nabla^{Y_2} f^{(y_1)},\nabla^{Y_2} f^{(y_1)}\right)(y_2)+\rho^{{Y}_2}_{2t}(y_2)g_t^{Y_1}\left(\nabla^{Y_1} f^{(y_2)},\nabla^{Y_1} f^{(y_2)}\right)(y_1)\\ =\ &g_t^{Y_1\times {Y}_2 }\left(\nabla^{Y_1\times {Y}_2 } f, \nabla^{Y_1\times {Y}_2 } f\right)(y_1,y_2)\\ \geqslant\ &c_2 g_{Y_1\times {Y}_2} \left(\nabla^{Y_1\times {Y}_2 } f, \nabla^{Y_1\times {Y}_2 } f\right)(y_1,y_2)=c_2|\nabla^{Y_2}\varphi|^2(y_2). \end{aligned} \] In particular, \[ \rho^{Y_1}_{2t}(y_1)g_t^{Y_2} \left(\nabla^{Y_2} \varphi,\nabla^{Y_2} \varphi\right)(y_2)\geqslant c_2|\nabla^{Y_2}\varphi|^2(y_2), \ \ \mathfrak{m}_2\text{-a.e.}\ y_2\in{Y_2}. \] Since $\varphi \in H^{1,2}(Y_2,\mathsf{d}_2,\mathfrak{m}_2)$ is taken to be arbitrary, we complete the proof by setting $c_3:=c_2 \left(\rho_{2t}^{Y_1}\right)^{-1}$. \end{proof} \begin{proof}[Proof of Theorem \ref{thm4.5}] We start by considering the case that $n\geqslant 4$. For any fixed $(r_0,x_0)\in \text{C}({X})$ and any $\varphi \in \text{Lip}({X},\mathsf{d})$, take $f\in C^\infty((0,\infty))$ such that $\text{supp}f\in (r_0/4,3r_0)$ and $f\equiv 1$ on $(r_0/2,2r_0)$. Then {}{Proposition \ref{1prop2.23} and (\ref{neiji1}) yield} that for $\mathcal{H}^n$-a.e. $(r,x)\in B_{r_0/2}^{\text{C}({X})}\left(r_0,x_0\right)$, \begin{equation}\label{111eqn3.21} \begin{aligned} cr^{-2} \left| \nabla \varphi\right|^2(x)&=c \left| \nabla (f\varphi)\right|^2_{\text{C}({X})}(r,x)\\ \ &\leqslant g_1^{\text{C}({X})}\left(\nabla (f\varphi),\nabla (f\varphi) \right)(r,x)\\ \ &=\frac{1}{4} r^{2\alpha}\sum\limits_{j=1}^\infty\int_0^\infty s\exp\left(-\frac{r^2+s^2}{2}\right)I_{\nu_j}\left(\frac{rs}{2}\right)^2 \mathrm{d}s\left\langle \nabla(f\varphi), \nabla\phi_j \right\rangle_{\text{C}({X})}^2(r,x) \\ \ &=\frac{1}{4} r^{2\alpha-4}\sum\limits_{j=1}^\infty\int_0^\infty s\exp\left(-\frac{r^2+s^2}{2}\right)I_{\nu_j}\left(\frac{rs}{2}\right)^2 \mathrm{d}s\left\langle \nabla \varphi, \nabla\phi_j \right\rangle^2(x) \\ \ &=\frac{1}{2} r^{2\alpha-4}\sum\limits_{j=1}^\infty \exp\left(-\frac{r^2}{2}\right)I_{\nu_j}\left(\frac{r^2}{2}\right) \left\langle \nabla \varphi, \nabla\phi_j \right\rangle^2(x), \\ \end{aligned} \end{equation} where the last equality follows from the semigroup property of $\{h^{\text{C}({X})}_t\}_{t>0}$. In the remaining part of the proof, we just denote by $|\cdot|$ the pointwise norm on $L^2(T^\ast ({X},\mathsf{d},\mathcal{H}^{n-1}))$ for notation convenience. {}{Combining the fact that $\left|\langle \nabla \varphi, \nabla\phi_j \rangle\right|\leqslant |\nabla \varphi||\nabla \phi_j|$, $\mathcal{H}^{n-1}$-a.e. in ${X}$, with last equality of (\ref{111eqn3.21})} implies \[ c \left| \nabla \varphi\right|^2 \leqslant \frac{1}{2} r^{-n}\sum\limits_{j=1}^\infty \exp\left(-\frac{r^2}{2}\right)I_{\nu_j}\left(\frac{r^2}{2}\right) \left|\nabla \varphi\right|^2 \left|\nabla \phi_j\right|^2\ \ \mathcal{H}^n\text{-a.e. } (r,x)\in B_{r_0/2}^{\text{C}({X})}(r_0,x_0). \] In particular, taking $\varphi=\mathsf{d}(x_0,\cdot)$ which satisfies that $|\nabla \varphi|\equiv 1$, we have \begin{equation}\label{3.9} c \leqslant \frac{1}{2}r^{-n}\exp\left(-\frac{r^2}{2}\right) \sum\limits_{j=1}^\infty I_{\nu_j}\left(\frac{r^2}{2}\right) | \nabla\phi_j |^2\ \ \mathcal{H}^n\text{-a.e. } (r,x)\in B_{r_0/2}^{\text{C}({X})}(r_0,x_0). \end{equation} Integration of (\ref{3.9}) on ${X}$ then gives \begin{equation}\label{3.10} c \mathcal{H}^{n-1}({X}) \leqslant \frac{1}{2}r^{-n}\exp\left(-\frac{r^2}{2}\right) \sum\limits_{j=1}^\infty I_{\nu_j}\left(\frac{r^2}{2}\right) \mu_j\ \ \mathcal{L}^1\text{-a.e.}\ r\in(r_0/2,2r_0). \end{equation} In fact, (\ref{3.10}) holds for any $r>0$ due to the arbitrarity of $r_0>0$, which is still denoted as (\ref{3.10}). {}{If $n\geqslant 4$ and $\mu_1>n-1$, then $\nu_j\geqslant \nu_1>n/2$, for all $ j\in \mathbb{N}_+$}. However, Lemma \ref{20211220b} implies that the right hand side of (\ref{3.10}) vanishes as $r\rightarrow 0$. Thus a contradiction occurs. Therefore $\mu_1=n-1$ {}{when $n\geqslant 4$}. By Theorem \ref{BGHZmainthm} and Obata's first eigenvalue rigidity theorem \cite[Theorem 1.2]{K15b}, there exists a non-collapsed RCD$(n-3,n-2)$ space $({X}',\mathsf{d}_{{X}'},\mathcal{H}^{n-2})$, such that {}{$\left(\text{C}({X}),\mathsf{d}_{\text{C}({X})}\right)$ is isometric to $\left(\mathbb{R}\times \text{C}({X}'),\sqrt{\mathsf{d}_{\mathbb{R}}^2+\mathsf{d}_{\text{C}({X}')}^2}\right)$.} From (\ref{eqn2.1}) and (\ref{1prop4.3}), we know \[ \rho^{\text{C}(X)}_{t}\equiv \frac{n\omega_{n}}{\mathcal{H}^{n-1}({X})} (4\pi t)^{\frac{n-1}{2}}. \] Using Lemmas \ref{llem3.1} and \ref{1lem3.15}, we see that $g_1^{\mathrm{C}({X}')}\geqslant c' g_{\mathrm{C}({X}')}$ for some $ c'>0$. It is now sufficient to deal with the case that $n=3$. Repeating the previous arguments, we have $\mu_1=2$. We claim that $\mu_2=2$. If $\mu_2>2$, then the integration of (\ref{3.9}) on any measurable set $\Omega \subset {X}$ yields \[ \begin{aligned} c \mathcal{H}^2(\Omega)\leqslant &\ Cr^{-2} \sum\limits_{j=1}^\infty I_{\nu_j}\left(\frac{r^2}{2t}\right)\int_\Omega \left| \nabla\phi_j \right|^2 \mathrm{d}\mathcal{H}^2\\ \leqslant &\ Cr^{-2} I_{\nu_1}\left(\frac{r^2}{2t}\right)\int_\Omega \left| \nabla\phi_1\right|^2 \mathrm{d}\mathcal{H}^2+r^{-2}\sum\limits_{j=2}^\infty I_{\nu_j}\left(\frac{r^2}{2t}\right)\int_{{X}} \left| \nabla\phi_j \right|^2 \mathrm{d}\mathcal{H}^2\\ \rightarrow &\ C \int_\Omega \left| \nabla\phi_1\right|^2\mathrm{d}\mathcal{H}^2 \ \ \text{as }r\rightarrow 0. \end{aligned} \] for some $C=C(n,\mathrm{diam}({X},\mathsf{d}))$. The arbitrarity of $\Omega$, together with the Lebesgue differentiation theorem shows that $|\nabla \phi_1|^2 \geqslant c_0:=c^{-1}C>0$, $\mathcal{H}^2$-a.e. Consider the Laplacian of $\phi_1^\alpha$ for any even integer $\alpha$, and calculate as follows: \[ \begin{aligned} \Delta \phi_1^\alpha &=\alpha (\alpha-1)|\nabla \phi_1|^2 \phi_1^{\alpha-2}+\alpha \phi_1^{\alpha-1}\Delta \phi_1\\ \ &=\alpha (\alpha-1)|\nabla \phi_1|^2 \phi_1^{\alpha-2}-\alpha \phi_1^{\alpha-1}(n-1) \phi_1\\ \ &=\alpha \phi_1^{\alpha-2}\left((\alpha-1)|\nabla \phi_1|^2 - (n-1)\phi_1^2 \right)\\ \ &\geqslant \alpha \phi_1^{\alpha-2} \left((\alpha-1)c_0 -C(n,\mathrm{diam}({X},\mathsf{d}))\right), \ \ \mathcal{H}^{2}\text{-a.e.} \end{aligned} \] As a result, the integer $\alpha$ can be chosen to be sufficiently large such that $\phi_1^\alpha$ is superharmonic. However, any superharmonic function on a compact RCD {}{space} must be a constant function (see like \cite[Theorem 2.3]{GR19}). A contradiction. Therefore $\mu_2=2$. According to \cite[Theorem 1.4]{K15b}, $({X},\mathsf{d})$ must be isometric to either $(\mathbb{S}^2,\mathsf{d}_{\mathbb{S}^2})$ or $\left(\mathbb{S}^2_+,\mathsf{d}_{\mathbb{S}^2_+}\right)$. Thus $\left(\text{C}({X}),\mathsf{d}_{\text{C}({X})}\right)$ must be isometric to either $(\mathbb{R}^3,\mathsf{d}_{\mathbb{R}^3})$ or $\left(\mathbb{R}^3_+,\mathsf{d}_{\mathbb{R}^3_+}\right)$. Notice that on $\mathbb{R}^n_+:=\{(x_1,\cdots,x_n)\in \mathbb{R}^n:x_n>0\}$, \[ g_t^{\mathbb{R}^n_+}\left(\frac{\partial}{\partial x_n},\frac{\partial}{\partial x_n}\right)(x_1,\cdots,x_n)=c_n t^{-\frac{n+2}{2}}\left(\frac{1-\exp\left(-\frac{x_n^2}{2t}\right)}{2}+\frac{x_n^2}{4t}\exp(-\frac{x_n^2}{2t})\right). \] It is clear that \[ \lim\limits_{x_3\rightarrow 0^+} g_t^{\mathbb{R}^3_+}\left(\frac{\partial }{\partial x_3},\frac{\partial }{\partial x_3}\right)(x_1,x_2,x_3)=0, \] which contradicts our assumption. When $n=2$, set ${Y}=\text{C}({X})\times \mathbb{R}$, and notice that $g_1^{{Y}}\geqslant c' g_{Y}$ for some $c'>0$ by (\ref{1prop4.3}), Lemma \ref{llem3.1} and Lemma \ref{1lem3.15}, which shall be verified in the same way as previous arguments. Thus $({Y},\mathsf{d}_{Y})$ must be isometric to $\left(\mathbb{R}^3,\mathsf{d}_{\mathbb{R}^3}\right)$ and $\left(\text{C}({X}),\mathsf{d}_{\mathrm{C}({X})}\right)$ must be isometric to $(\mathbb{R}^2,\mathsf{d}_{\mathbb{R}^2})$. \end{proof} As an application of Theorem \ref{thm4.5}, we prove Theorem \ref{mainthm1.5}. \begin{proof}[Proof of Theorem \ref{mainthm1.5}] It follows from Theorem \ref{mainthm1.3} that $\mathfrak{m}=c\mathcal{H}^n$ for some $c>0$, and $({X},\mathsf{d},\mathcal{H}^n)$ is an RCD$(0,n)$ space. Without loss of generality, we may assume that $\mathfrak{m}=\mathcal{H}^n$. The subsequent pmGH arguments in this proof are almost the same as that in the proof of Theorem \ref{mainthm1.3}, and we omit the details. Take $\{r_i\}$ with $r_i\rightarrow \infty$, and a pointed RCD$(0,n)$ space $({X}_\infty,\mathsf{d}_{\infty},\mathfrak{m}_\infty,x_\infty)$ such that \[ ({X}_i,\mathsf{d}_i,\mathfrak{m}_i,x):=\left({X},\frac{1}{r_i}\mathsf{d},\frac{\mathfrak{m}}{\mathfrak{m}\left(B_{r_i}(x)\right)},x\right)\xrightarrow{\mathrm{pmGH}} ({X}_\infty,\mathsf{d}_\infty,\mathfrak{m}_\infty,x_\infty). \] Again on each ${X}_i$, $c(r_i^2 t)g_t^{{X}_i}=r_i^2 \mathfrak{m}\left(B_{r_i}(x_i)\right)g_{{X}_i}$. Applying (\ref{tsukaeqn3.3}) and Proposition \ref{llem3.4} implies that \begin{equation}\label{1eqn4.11} \liminf\limits_{i\rightarrow \infty} \frac{\mathfrak{m}(B_{r_i}(x))}{r_i^n}=a>0, \end{equation} and \begin{equation}\label{1eqn4.12} \lim\limits_{i\rightarrow \infty} r_i^{-(n+2)}c(r_i^2)=b>0. \end{equation} By Theorem \ref{11thm2.15}, there {}{exists} a subsequence of $\{r_i\}$ which is still denoted as $\{r_i\}$ and a pointed RCD$(0,n)$ space $({Y}_\infty,\mathsf{d}_\infty',\mathcal{H}^n,y_\infty)$ such that \[ ({Y}_i,\mathsf{d}_i',\mathcal{H}^n,y):=\left({X},\frac{1}{r_i}\mathsf{d},\frac{1}{r_i^n}\mathfrak{m},x\right)\xrightarrow{\mathrm{pmGH}} ({Y}_\infty,\mathsf{d}_\infty',\mathcal{H}^n,y_\infty). \] As a result, combining \cite[Theorem 1.1]{DG16} with (\ref{1eqn4.11}) and (\ref{1eqn4.12}) yields that $({Y}_\infty,\mathsf{d}_\infty',\mathcal{H}^n,y_\infty)$ is an Euclidean cone with $g_1^{{Y}_\infty}= b g^{{Y}_\infty}$ . Therefore Theorem \ref{thm4.5} implies that $({Y}_\infty, \mathsf{d}_\infty')$ must be isometric to $(\mathbb{R}^n,\mathsf{d}_{\mathbb{R}^n})$. Finally, it remains to use the volume rigidity theorem for non-collapsed almost RCD$(0,n)$ spaces \cite[Theorem 1.6]{DG18} to conclude. \end{proof} The following corollary can be proved by using similar arguments as in the proof of Theorem \ref{mainthm1.5}. \begin{cor}\label{cor4.7} Let $({X},\mathsf{d},\mathcal{H}^n)$ be a non-collapsed $\mathrm{RCD}(0,n)$ space. {}{If there exists a function $c(t)$ such that \begin{enumerate} \item $c(t)g_t \geqslant g$, $\forall t>0$, \item $\liminf\limits_{t\rightarrow \infty} t^{-(n+2)}c(t^2)>0$. \end{enumerate}} Then $({X},\mathsf{d})$ is isometric to $\left(\mathbb{R}^n,\mathsf{d}_{\mathbb{R}^n}\right)$. \end{cor} \section{The isometric immersion into Euclidean space}\label{sec4} The main purpose of this section is to prove Theorem \ref{thm1.5}. To begin with, let us recall a useful result (Theorem \ref{111thm4.3}) in \cite{H21}, which plays a important role in this section. \begin{defn}[Regular map] Let $({X},\mathsf{d},\mathfrak{m})$ be an RCD$(K,N)$ space. Then a map $F:=(\varphi_1,\ldots,\varphi_k):{X}\rightarrow \mathbb{R}^k$ is said to be regular if each $\varphi_i$ is in $D(\Delta)$ with $\Delta \varphi_i\in L^\infty(\mathfrak{m})$. \end{defn} \begin{defn}[Locally uniformly $\delta$-isometric immersion] Let $({X},\mathsf{d},\mathfrak{m})$ be an RCD$(K,N)$ space and $F:=(\varphi_1,\ldots,\varphi_k):{X}\rightarrow \mathbb{R}^k$ be a locally Lipschitz map. $F$ is said to be a locally uniformly $\delta$-isometric immersion on $B_r(x_0)\subset {X}$ if for any $x\in B_r(x_0)$ it holds that \[ \frac{1}{\mathfrak{m}(B_s(x))}\int_{B_{\delta^{-1}s}(x)}|F^\ast g_{\mathbb{R}^k}-g_{X}|\mathrm{d}\mathfrak{m}<\delta,\ \forall s\in (0,r). \] \end{defn} \begin{thm}[{\cite[Theorem 3.4]{H21}}]\label{111thm4.3} Let $({X},\mathsf{d},\mathfrak{m})$ be an $\mathrm{RCD}(K,N)$ space with $\mathrm{dim}_{\mathsf{d},\mathfrak{m}}({X})=n$ and let $F:=(\varphi_1,\ldots,\varphi_k):{X}\rightarrow \mathbb{R}^k$ be a regular map with \[ \sum\limits_{i=1}^k \| |\nabla \varphi_i|\|_{L^\infty(\mathfrak{m})}\leqslant C. \] If $F$ is a locally uniformly $\delta$-isometric immersion on some ball $B_{4r}(x_0)\subset {X}$. Then the following {}{statements hold.} \begin{enumerate} \item For any $s\in (0,r)$, $\mathsf{d}_{\mathrm{GH}}(B_s(x_0),B_s(0_n))\leqslant \Psi(\delta|K,N,k,C)s$, where $\mathsf{d}_{\mathrm{GH}}$ is the Gromov-Hausdorff distance. \item $F|_{B_{r}(x_0)}$ is $(1+\Psi(\delta|K,N,k,C))$-bi-Lipschitz from $B_{r}(x_0)$ to $F(B_{r}(x_0))\subset \mathbb{R}^k$. \end{enumerate} \end{thm} {}{ From now on, we let $({X},\mathsf{d},\mathcal{H}^n)$ be a fixed compact non-collapsed $\mathrm{RCD}(K,n)$ space, and we assume that \begin{equation}\label{111111eqn1.5} g=\sum\limits_{i=1}^m d\phi_i\otimes d\phi_i, \end{equation} where $g$ is the canonical Riemannian metric of $(X,\mathsf{d},\mathcal{H}^n)$ and each $\phi_i$ is an eigenfunction of $-\Delta$ with corresponding eigenvalue $\mu_i$ ($i=1,\ldots,m$). To fix the notation}, denote by $C$ a constant with {}{\[ C=C\left(K,m,n,\mathrm{diam}({X},\mathsf{d}),\mathcal{H}^n({X}),\mu_1,\ldots,\mu_m,\left\|\phi_1\right\|_{L^2(\mathcal{H}^n)},\ldots,\left\|\phi_m\right\|_{L^2(\mathcal{H}^n)}\right),\] } which may vary from line to line, and by $\mathsf{M}_{n\times n}(\mathbb{R})$ the set of all $n\times n$ real matrices equipped with the Euclidean metric on $\mathbb{R}^{n^2}$, and by $I_n$ the $n\times n$ identity matrix. \begin{lem}\label{1lem4.2} Each $\langle \nabla \phi_i,\nabla \phi_j \rangle $ is a Lipschitz function $(i,j=1,\ldots,m)$. In particular, \begin{equation} \sum\limits_{i,j=1}^m \left\| |\nabla \left\langle \nabla \phi_i,\nabla \phi_j \right\rangle| \right\|_{L^\infty(\mathcal{H}^n)}\leqslant C. \end{equation} \end{lem} \begin{proof} We first show that $|\nabla \phi_1|^2\in \text{Lip}({X},\mathsf{d})$. Taking trace of {}{(\ref{111111eqn1.5})} gives \begin{equation}\label{1eqn4.1} \sum\limits_{i=1}^m \left|\nabla \phi_i\right|^2 =\langle g,g\rangle =n. \end{equation} Using the Bochner's inequality (\ref{bochnerineq}), for any $\varphi\in \mathrm{Test}F_+({X},\mathsf{d},\mathcal{H}^n)$, we get \begin{equation}\label{1eqn4.2} \int_{X} \left|\nabla \phi_1\right|^2 \Delta \varphi \mathrm{d}\mathcal{H}^n \geqslant 2\int_{X} \varphi \left( (K-\mu_1) \left|\nabla \phi_1\right|^2 + \frac{1}{n}\mu_1^2\phi_1^2 \right) \mathrm{d}\mathcal{H}^n \geqslant -C\int_{X} \varphi \mathrm{d}\mathcal{H}^n, \end{equation} where the last inequality comes from Proposition \ref{heatkernel2}. Owing to (\ref{1eqn4.1}) and (\ref{1eqn4.2}), \begin{equation}\label{111eqn4.4} \int_{X} \left|\nabla \phi_1\right|^2 \Delta \varphi \mathrm{d}\mathcal{H}^n\\ = -\sum\limits_{j=2}^m \int_{X} \left|\nabla \phi_j\right|^2 \Delta \varphi \mathrm{d}\mathcal{H}^n \leqslant C\int_{X} \varphi \mathrm{d}\mathcal{H}^n. \end{equation} Since $\mathrm{Test}F_+({X},\mathsf{d},\mathcal{H}^n)$ is dense in $ H^{1,2}_+({X},\mathsf{d},\mathcal{H}^n)$, and $\phi_1\in \mathrm{Test}F({X},\mathsf{d},\mathcal{H}^n)$ with {}{$|\nabla \phi_1|^2 \in H^{1,2}({X},\mathsf{d},\mathcal{H}^n)$}, the combination of {}{these} facts with (\ref{1eqn4.2}) and (\ref{111eqn4.4}) yields that for any $\varphi\in H^{1,2}_+({X},\mathsf{d},\mathcal{H}^n)$, \begin{equation}\label{1eqn4.3} \left |\int_{X} \langle \nabla \left|\nabla \phi_1\right|^2, \nabla \varphi \rangle \mathrm{d}\mathcal{H}^n\right| =\left|\int_{X} \left|\nabla \phi_1\right|^2 \Delta \varphi \mathrm{d}\mathcal{H}^n\right| \leqslant C\int_{X} |\varphi |\mathrm{d}\mathcal{H}^n \leqslant {}{C}\left\|\varphi\right\|_{L^2(\mathcal{H}^n)}. \end{equation} Note that (\ref{1eqn4.3}) also holds for any $\varphi\in \text{Lip}({X},\mathsf{d})$ because $\varphi+|\varphi|$, $|\varphi|-\varphi\in \text{Lip}({X},\mathsf{d})$. Since $\mathrm{Test}F({X},\mathsf{d},\mathcal{H}^n)$ is dense in $H^{1,2}({X},\mathsf{d},\mathcal{H}^n)$, we have \[ \left|\int_{X} \langle \nabla \left|\nabla \phi_1\right|^2, \nabla \varphi \rangle \mathrm{d}\mathcal{H}^n\right|\leqslant {}{C}\left\|\varphi\right\|_{L^2(\mathcal{H}^n)}, \ \forall \varphi \in H^{1,2}({X},\mathsf{d},\mathcal{H}^n). \] Consequently, the linear functional \[ \begin{aligned} T:H^{1,2}({X},\mathsf{d},\mathcal{H}^n)&\longrightarrow \mathbb{R}\\ \varphi &\longmapsto \int_{X} \langle \nabla \left|\nabla \phi_1\right|^2, \nabla \varphi \rangle \mathrm{d}\mathcal{H}^n \end{aligned} \] can be continuously extended to a bounded linear functional on $L^2(\mathcal{H}^n)$. Applying the Riesz representation theorem, there exists a unique $h\in L^2(\mathcal{H}^n)$, such that \[ T(\varphi)={}{-}\int_{X} \varphi h \mathrm{d}\mathcal{H}^n, \ \ \forall \varphi \in L^2(\mathcal{H}^n). \] Therefore $|\nabla \phi_1|^2\in D(\Delta)$ with $\left\|\Delta |\nabla \phi_1|^2\right\|_{L^2(\mathcal{H}^n)}\leqslant {}{C}$. Using $(\ref{1eqn4.3})$ again, and repeating the previous arguments, we have \[ \left|\int_{X} \Delta \left|\nabla \phi_1\right|^2 \varphi \mathrm{d}\mathcal{H}^n\right| \leqslant C\int_{X} |\varphi |\mathrm{d}\mathcal{H}^n,\ \forall \varphi\in L^1(\mathcal{H}^n), \] because $\mathrm{Test}F({X},\mathsf{d},\mathcal{H}^n)$ is also dense in $L^1(\mathcal{H}^n)$. Thus $\left\|\Delta \left|\nabla \phi_1\right|^2\right\|_{L^\infty(\mathcal{H}^n)}\leqslant C$. According to Theorem \ref{aaaathm3.12}, $\left\||\nabla |\nabla \phi_1|^2|\right\|_{L^\infty(\mathcal{H}^n)}\leqslant C$. For any other $|\nabla \phi_i|^2$, the estimates of $\left\|\Delta |\nabla \phi_i|^2\right\|_{L^\infty(\mathcal{H}^n)}$ and $\left\||\nabla |\nabla \phi_i|^2|\right\|_{L^\infty(\mathcal{H}^n)}$ can be obtained along the same lines. Rewrite these estimates as \begin{equation}\label{1eqn4.4} \sum\limits_{i=1}^m\left(\left\|\Delta |\nabla \phi_i|^2\right\|_{L^\infty(\mathcal{H}^n)}+ \left\|\left|\nabla |\nabla \phi_i|^2\right|\right\|_{L^\infty(\mathcal{H}^n)}\right)\leqslant C. \end{equation} Applying (\ref{abc2.14}), (\ref{1eqn4.4}) and Proposition \ref{heatkernel2}, we have \[ \int_{X}\varphi \left|\mathop{\mathrm{Hess}}\phi_i\right|_{\mathsf{HS}}^2\mathrm{d}\mathcal{H}^n\leqslant C\int_{X}\varphi \mathrm{d}\mathcal{H}^n,\ \ \forall\varphi\in \mathrm{Test}F_+({X},\mathsf{d},\mathcal{H}^n), \ \ i=1,\ldots,m, \] which implies that \begin{equation}\label{1eqn4.5} \sum\limits_{i=1}^m \left\|\left|\mathop{\mathrm{Hess}}\phi_i\right|_{\mathsf{HS}}\right\|_{L^\infty(\mathcal{H}^n)}\leqslant C. \end{equation} For {}{each} $\langle \nabla \phi_i,\nabla \phi_j \rangle$ ($i,j=1,\ldots,m$), from (\ref{11eqn2.16}) we obtain that \begin{equation}\label{1111eqn4.7} \begin{aligned} |\langle \nabla \varphi, \nabla \langle \nabla \phi_i,\nabla \phi_j \rangle \rangle|&=\left| \mathop{\mathrm{Hess}}\phi_i(\nabla \phi_j,\nabla\varphi)+ \mathop{\mathrm{Hess}}\phi_j(\nabla \phi_i,\nabla\varphi)\right|\\ \ &\leqslant \left(\left|\mathop{\mathrm{Hess}}\phi_i\right|_{\mathsf{HS}}|\nabla \phi_j|+\left|\mathop{\mathrm{Hess}}\phi_j\right|_{\mathsf{HS}}|\nabla \phi_i| \right)|\nabla \varphi|\\ &\leqslant C |\nabla \varphi| \ \ \mathcal{H}^n\text{-a.e.}, \ \ \ \forall \varphi\in H^{1,2}({X},\mathsf{d},\mathcal{H}^n). \end{aligned} \end{equation} As a result, $\langle \nabla\phi_i,\nabla \phi_j \rangle\in H^{1,2}({X},\mathsf{d},\mathcal{H}^n)$. We complete the proof by letting $\varphi=\langle \nabla\phi_i,\nabla \phi_j \rangle$ in (\ref{1111eqn4.7}), which shows that \begin{equation}\label{1eqn4.6} \left\|\nabla \langle \nabla \phi_i,\nabla \phi_j \rangle \right\|_{L^\infty(\mathcal{H}^n)}\leqslant C. \end{equation} \end{proof} \begin{lem}\label{1lem4.3} For any $\epsilon>0$, there exists $0<\delta\leqslant\Psi(\epsilon|C)$, such that for any $0<r<\delta$ and any arbitrary but fixed $x_0\in {X}$, the following holds. \begin{enumerate} \item\label{1lem4.3a} The map \begin{equation}\label{1eqn4.9} \begin{aligned} \mathbf{x}_0:B_r(x_0)&\longrightarrow \mathbb{R}^n\\ x&\longmapsto (u_1(x),\ldots,u_n(x)) \end{aligned} \end{equation} is $(1+\epsilon)$-bi-Lipschitz from $B_r(x_0)$ to $\mathbf{x}_0(B_r(x_0))$, where each $u_i$ is a linear combination of $\phi_1,\ldots,\phi_m$ with coefficients only dependent on $x_0$. \item\label{1lem4.3b} The matrix-valued function \[ \begin{aligned} U: B_r(x_0)&\longrightarrow \mathsf{M}_{n\times n}(\mathbb{R})\\ x&\longmapsto (u^{ij}(x)):=\left(\langle \nabla u_i,\nabla u_j\rangle(x)\right), \end{aligned} \] is Lipschitz {}{continuous and satisfies} $(1-\epsilon)I_n\leqslant U\leqslant (1+\epsilon)I_n$ on $B_r(x_0)$. Moreover, there exists a matrix-valued Lipschitz function \[ \begin{aligned} B: B_r(x_0)&\longrightarrow \mathsf{M}_{n\times n}(\mathbb{R})\\ x&\longmapsto \left(b_{ij}(x)\right), \end{aligned} \] such that \[ BUB^{T}(x)=I_n, \ \ \ \forall {}{x\in B_r(x_0)}. \] \end{enumerate} \end{lem} \begin{proof} Consider the matrix-valued function \[ \begin{aligned} E:{X}&\longrightarrow \mathsf{M}_{m\times m}(\mathbb{R})\\ x&\longmapsto \left(\langle \nabla \phi_i,\nabla \phi_j\rangle(x)\right), \end{aligned} \] which is Lipschitz continuous by Lemma \ref{1lem4.2}. For any fixed $x_0\in {X}$, since $E(x_0)$ is a symmetric matrix of trace $n$ and satisfies $E(x_0)^2=E(x_0)$, there exists an $m\times m$ orthogonal matrix $A=(a_{ij})$, such that \[ AE(x_0)A^{T}=\left( \begin{array}{rl} I_n & 0\\ 0 & 0 \end{array} \right). \] Letting $u_i=\sum\limits_{j=1}^m a_{ij}\phi_j$, $g$ then can be written as $g=\sum\limits_{i=1}^m d u_i \otimes d u_i $ with \begin{equation}\label{1111eqn4.11} \sum\limits_{i,j=n+1}^m \left\langle \nabla u_i,\nabla u_j \right\rangle^2(x_0)=0. \end{equation} In order to use Theorem \ref{111thm4.3}, we need \begin{equation}\label{1eqn4.8} \sum\limits_{i=1}^m\left\|\left|\nabla u_i\right|^2\right\|_{L^\infty(\mathcal{H}^n)} +\sum\limits_{i=1}^m\left\| \Delta u_i\right\|_{L^\infty(\mathcal{H}^n)}+\sum\limits_{i,j=1}^m \left\||\nabla \left\langle \nabla u_i,\nabla u_j \right\rangle|\right\|_{L^\infty(\mathcal{H}^n)}\leqslant C, \end{equation} which follows directly from the {}{Proposition \ref{heatkernel2} and} Lemma \ref{1lem4.2}. We claim that for any $\epsilon\in (0,1)$, there exists $0<\delta\leqslant \Psi(\epsilon|C)$, such that $\mathbf{x}_0$ is a locally uniformly $\epsilon$-isometric immersion on $B_r(x_0)$ for any $0<r<\delta$. For any $y_0\in B_r(x_0)$, $0<s<r$, we have \begin{equation}\label{1eqn4.10} \begin{aligned} \ &\frac{1}{\mathcal{H}^n\left(B_s(y_0)\right)}\int_{B_{\epsilon^{-1}s}(y_0)}\left|g-\sum\limits_{i=1}^n du_i \otimes du_i\right|_{\mathsf{HS}}\mathrm{d}\mathcal{H}^n \\ nt_{B_{\epsilon^{-1}s}(y_0)}}\left|g-\sum\limits_{i=1}^n du_i \otimes du_i\right|_{\mathsf{HS}}^2\mathrm{d}\mathcal{H}^n\right)^{\frac{1}{2}}\\ nt_{B_{\epsilon^{-1}s}(y_0)}}\sum\limits_{i,j=n+1}^m \left\langle \nabla u_i , \nabla u_j \right\rangle^2 \mathrm{d}\mathcal{H}^n\right)^{\frac{1}{2}} \leqslant C\epsilon^{-1}\exp(C\epsilon^{-1})\delta^2{}{,} \end{aligned} \end{equation} where the last inequality comes from (\ref{BGinequality}), (\ref{1111eqn4.11}) and (\ref{1eqn4.8}). Thus applying Theorem \ref{111thm4.3}, there exists $0<\delta\leqslant \Psi(\epsilon|C)$, such that for any $0<r<\delta$, the function $\textbf{x}_0$ defined in (\ref{1eqn4.9}) is $(1+\epsilon)$-bi-Lipschitz from $B_r(x_0)$ to $\mathbf{x}_0(B_r(x_0))$. We may also require $\delta$ to satisfy condition \ref{1lem4.3b}, which is again due to (\ref{1eqn4.8}). Finally, the choice of the matrix $B(x)$ follows from a standard congruent transformation of $U(x)$. \end{proof} \begin{lem}\label{1lem4.4} ${X}$ admits a $C^{1,1}$ differentiable structure. \end{lem} \begin{proof} Since $({X},\mathsf{d})$ is compact, by taking $\epsilon=\frac{1}{2}$ in Lemma \ref{1lem4.3}, there exists a finite index set $\Gamma$, such that the finite family of pairs $\{(B_r(x_\gamma),\mathbf{x}_\gamma)\}_{\gamma\in\Gamma}$ satisfies the following properties. \begin{enumerate} \item It is a covering of ${X}$, i.e. ${X}\subset \bigcup_{\gamma\in \Gamma} B_r(x_\gamma)$. \item For every $\gamma\in \Gamma$, $\mathbf{x}_\gamma$ is $\frac{3}{2}$-bi-Lipschitz from $B_r(x_\gamma)$ to $\mathbf{x}_\gamma (B_r(x_\gamma))\subset \mathbb{R}^n$, and each component of $\mathbf{x}_\gamma$ is a linear combination of $\phi_1,\ldots,\phi_m$ with coefficients only dependent on $x_\gamma$. \end{enumerate} We only prove the $C^{1,1}$ regularity of $\phi_1,\ldots,\phi_m$ on $(B_r(x_0),\mathbf{x}_0)$, since the $C^{1,1}$ regularity of $\phi_1,\ldots,\phi_m$ on any other $(B_r(x_\gamma),\mathbf{x}_\gamma)$ can be proved in a same way. For any $y_0\in B_r(x_0)$, without loss of generality, assume that $B_s(y_0) \subset B_r(x_0)$ for some $s>0$ and $\mathbf{x}_0(y_0)=0_n \in \mathbb{R}^n$. Since $\mathbf{x}_0$ is a $\frac{3}{2}$-bi-Lipschitz map (thus also a homeomorphism) from $B_r(x_0)$ to $\mathbf{x}_0 (B_r(x_0))$, for any sufficiently small $t>0$, there exists a unique $y_t \in B_r(x_0)$ such that $\mathbf{x}_0(y_t)=(t,0,\ldots,0)$. For $i=1,\ldots,n$, set \begin{equation}\label{11eqn4.16} \begin{aligned} v_i: B_s(y_0)&\longrightarrow \mathbb{R}\\ x&\longmapsto\sum\limits_{j=1}^n b_{ij}(y_0)u_j(x), \end{aligned} \end{equation} {}{where $B=(b_{ij})$ is taken as in Lemma \ref{1lem4.3}}. It can be immediately checked that $\langle \nabla v_i,\nabla v_j\rangle(y_0)=\delta_{ij}$ $(i,j=1,\ldots,n)$. Notice that \begin{equation}\label{0417efghi} \begin{aligned} nt_{B_\tau (y_0)}} \left|g-\sum\limits_{i=1}^n dv_i\otimes dv_i\right|_{\mathsf{HS}}^2 \mathrm{d}\mathcal{H}^n \\ nt_{B_\tau(y_0)}} \left(n+\sum\limits_{i,j=1}^n \langle \nabla v_i,\nabla v_j\rangle-2\sum\limits_{i=1}^n \left|\nabla v_i\right|^2 \right)\mathrm{d}\mathcal{H}^n \rightarrow 0\ \ \text{as }\tau\rightarrow 0^+. \end{aligned} \end{equation} Thus arguing as in the proof of Lemma \ref{1lem4.3} and applying Theorem \ref{111thm4.3} to $B_{2\mathsf{d}(y_0,y_{t})}(y_0)$ for any sufficiently small $t>0$, we know \begin{equation}\label{11111eqn4.15} \sum\limits_{i=1}^n \left(\frac{v_i(y_t)-v_i(y_0)}{\mathsf{d}(y_t,y_0)}\right)^2\rightarrow 1,\ \ \text{as }t\rightarrow 0^+. \end{equation} Recall $u_i(y_t)=u_i(y_0)=0$ ($i=2,\ldots,n$). This together with (\ref{11111eqn4.15}) shows \begin{equation}\label{202204041} \sum\limits_{i=1}^n b_{i1}^2(y_0) \lim\limits_{t \rightarrow 0^+} \frac{t^2}{\mathsf{d}(y_t,y_0)^2}=1. \end{equation} Next is to calculate values of $\lim\limits_{t\rightarrow 0^+}\dfrac{u_{i}(y_t)-u_{i}(y_0)}{t}$ for $i=n+1,\ldots,m$. For $i=n+1,\ldots,m$, set \[ \begin{aligned} f_i:B_s(y_0)&\longrightarrow [0,\infty)\\ x&\longmapsto u_i(x)- \sum\limits_{j=1}^n \langle\nabla u_i, \nabla v_j \rangle (y_0) v_j(x). \end{aligned} \] Observe that \begin{equation}\label{123eqn4.16} \lim\limits_{x\rightarrow y_0}\langle \nabla f_i,\nabla v_k\rangle(x)=0,\ \ i=n+1,\ldots,m,\ k=1,\ldots,n. \end{equation} Thus (\ref{0417efghi}) and (\ref{123eqn4.16}) yield that $|\nabla f_i|(y_0)=0$ ($i=n+1,\ldots, m$). From the definition of the {}{local} Lipschitz constant of {}{a} Lipschitz function, we get \[ \frac{1}{\mathsf{d}(y_t,y_0) }\left((u_{i}(y_t)-u_{i}(y_0))-\sum\limits_{j=1}^n \langle\nabla u_i, \nabla v_j \rangle (y_0) \left(v_j(y_t)-v_{j}(y_0)\right) \right) \rightarrow 0,\ \ \text{as $t\rightarrow 0^+$}. \] Therefore \begin{equation}\label{111eqn4.17} \begin{aligned} \lim\limits_{t\rightarrow 0^+}\dfrac{u_{i}(y_t)-u_{i}(y_0)}{\mathsf{d}(y_t,y_0)} =&\sum\limits_{j=1}^n \langle \nabla u_i,\nabla v_j \rangle (y_0) \lim\limits_{t\rightarrow 0^+}\dfrac{v_{j}(y_t)-v_{j}(y_0)}{\mathsf{d}(y_t,y_0)}\\ =& \sum\limits_{j=1}^n b_{j1}(y_0)\langle \nabla u_i,\nabla v_j \rangle (y_0)\lim\limits_{t \rightarrow 0^+}\dfrac{u_1(y_t)-u_1(y_0)}{\mathsf{d}(y_t,y_0)}\\ =&\sum\limits_{j,k=1}^n b_{j1}(y_0)b_{jk}(y_0)\langle \nabla u_i,\nabla u_k \rangle (y_0)\lim\limits_{t \rightarrow 0^+}\dfrac{t} {\mathsf{d}(y_t,y_0)}. \end{aligned} \end{equation} As a result of (\ref{202204041}) and (\ref{111eqn4.17}), \[ \lim\limits_{t\rightarrow 0^+}\dfrac{u_{i}(y_t)-u_{i}(y_0)}{t}=\sum\limits_{j,k=1}^n b_{j1}(y_0)b_{jk}(y_0)\langle \nabla u_i,\nabla u_k \rangle (y_0). \] Analogously, \[ \lim\limits_{t\rightarrow 0^-}\dfrac{u_{i}(y_t)-u_{i}(y_0)}{t}=\sum\limits_{j,k=1}^n b_{j1}(y_0)b_{jk}(y_0)\langle \nabla u_i,\nabla u_k \rangle (y_0). \] Hence for $i=n+1,\ldots,m$, $k=1,\ldots,n$, we get \begin{equation}\label{111eqn4.18} \frac{\partial u_i}{\partial u_k}(x)=\sum\limits_{j,l=1}^n b_{jk}(x)b_{jl}(x)\langle \nabla u_i,\nabla u_l \rangle (x), \ \ \forall x\in B_r(x_0). \end{equation} According to the fact that each $\phi_i$ is a linear combination of $u_1,\ldots,u_m$ with coefficients only dependent on $x_0$, each $\dfrac{\partial \phi_i}{\partial u_j}$ is Lipschitz continuous on $B_r(x_0)$ and is also Lipschitz continuous on $\mathbf{x}_0(B_r(x_0))$ ($i=1,\ldots,m$, $j=1,\ldots,n$). If $B_r(x_{\gamma'})\cap B_r(x_0)\neq \emptyset$ for some $\gamma' \in \Gamma\setminus \{0\}$, since each component of the coordinate function $\mathbf{x_{\gamma'}}$ is a linear combination of $\phi_1,\ldots,\phi_m$, the transition function from $(B_r(x_0),\mathbf{x}_0)$ to $(B_r(x_{\gamma'}),\mathbf{x}_{\gamma'})$ is $C^{1,1}$ on $(B_r(x_0)\cap B_r(x_{\gamma'}),\mathbf{x}_0)$. Therefore, $\{(B_r(x_\gamma),\mathbf{x}_\gamma)\}_{\gamma\in\Gamma}$ gives a $C^{1,1}$ differentiable structure of ${X}$. \end{proof} \begin{lem}\label{1lem4.5} For the sake of brevity, we only state the following assertions for $\left(B_r(x_0),\mathbf{x}_0\right)$ by using the {}{notation} of Lemma \ref{1lem4.3}. \begin{enumerate} \item\label{1lem4.52} For any $f_1,f_2\in C^1({X})$, we have \begin{equation}\label{1eqn4.16} \langle \nabla f_1,\nabla f_2\rangle=\sum\limits_{j,k=1}^n u^{jk}\frac{\partial f_1}{\partial u_j}\frac{\partial f_2}{\partial u_k} \ \text{ on } B_r(x_0). \end{equation} \item\label{1lem4.53} $(\mathbf{x}_0)_\sharp \left(\mathcal{H}^n\llcorner B_r(x_0)\right)=\left(\mathrm{det}(U)\right)^{-\frac{1}{2}} \mathcal{L}^n\llcorner \mathbf{x}_0\left(B_r(x_0)\right)$. \end{enumerate} \end{lem} \begin{proof} Statement \ref{1lem4.52} follows directly from the chain rule of $\nabla$. As for statement \ref{1lem4.53}, according to the bi-Lipschitz property of $\mathbf{x}_0$, there exists a Radon-Nikodym derivative $h$ of $(\mathbf{x}_0^{-1})_\sharp\left(\mathrm{det}(U))^{-\frac{1}{2}} \mathcal{L}^n\llcorner \mathbf{x}_0(B_r(x_0))\right)$ with respect to $\mathcal{H}^n\llcorner B_r(x_0)$. Again for any $B_{2s}(y_0)\subset B_r(x_0)$, we choose $\{v_i\}_{i=1}^n$ as in (\ref{11eqn4.16}) and set \[ \begin{aligned} \mathbf{y}_0:B_s(y_0)&\longrightarrow \mathbb{R}^n\\ x&\longmapsto (v_1(x),\ldots,v_n(x)). \end{aligned} \] By Theorem \ref{111thm4.3}, \begin{equation}\label{11eqn4.18} \lim\limits_{\tau\rightarrow 0^+} \frac{ \mathcal{L}^n\left(\mathbf{y}_0\left(B_\tau(y_0)\right)\right) }{\mathcal{H}^n(B_\tau(y_0))}=1. \end{equation} Set $\tilde{B}=B(y_0)$. Then it follows from the choice of the matrix $B$ that \begin{equation}\label{11eqn4.20} \mathrm{det}(\tilde{B})^2\mathrm{det}\left(U(y_0)\right)=1. \end{equation} Using the commutativity of the following diagram, \[ \xymatrix{ B_s(y_0)\ar[r]^{\mathbf{y}_0\ \ }\ar[dr]_{\mathbf{x}_0}\ \ & \mathbf{y}_0(B_s(y_0))\ar[d]^{\tilde{B}^{-1}}\\ \ & \mathbf{x}_0(B_s(y_0))} \] for any $0<\tau\leqslant s$, it holds that \begin{equation}\label{11eqn4.19} \int_{\mathbf{x}_0\left(B_\tau(y_0)\right)}\left(\mathrm{det}(U)\right)^{-\frac{1}{2}} \mathrm{d}\mathcal{L}^n=\int_{\mathbf{y}_0\left(B_\tau(y_0)\right)}\left(\mathrm{det}(U)\left(\tilde{B}^{-1}(x)\right)\right)^{-\frac{1}{2}}\mathrm{det}(\tilde{B})^{-1} \mathrm{d}\mathcal{L}^n(x). \end{equation} Thus combining the continuity of $\mathrm{det}(U)$ with (\ref{11eqn4.18}), (\ref{11eqn4.20}) and (\ref{11eqn4.19}) implies \[ \lim\limits_{\tau\rightarrow 0^+} \frac{1}{\mathcal{H}^n(B_\tau(y_0))} \int_{\mathbf{x}_0\left(B_\tau(y_0)\right)}\left(\mathrm{det}(U)\right)^{-\frac{1}{2}} \mathrm{d}\mathcal{L}^n =1. \] Therefore, $h= 1$ $\mathcal{H}^n$-a.e. on $B_r(x_0)$, which suffices to conclude. \end{proof} \begin{proof}[Proof of Theorem \ref{thm1.5}] We start by improving the regularity of each $\phi_i$ on each coordinate chart $\left(B_r(x_\gamma),\mathbf{x}_\gamma \right)$. It suffices to verify the case $\gamma=0$. We still use the notation in Lemma \ref{1lem4.3}. For any fixed $B_{2s}(y_0)\subset B_r(x_0)$, without loss of generality, assume that $\mathbf{x}_0(y_0)=0_n$ and $B_s(0_n)\subset \mathbf{x}_0\left(B_{2s}(y_0)\right)$. We first claim that for $j=1,\ldots,n$, \begin{equation}\label{1eqn4.21} \sum\limits_{k=1}^n\frac{\partial }{\partial u_k} \left(u^{jk}\mathrm{det}(U)^{-\frac{1}{2}}\right)=\Delta u_j \mathrm{det}(U)^{\frac{1}{2}} \ \ \mathcal{L}^n\text{-a.e. in }B_s(0_n). \end{equation} Notice that for any $\varphi \in C_c\left(B_s(0_n)\right)\cap C^1({X})$, in view of Lemma \ref{1lem4.5}, we have \[ \begin{aligned} \int_{B_s(0_n)} \varphi \Delta u_j \mathrm{det}(U)^{-\frac{1}{2}}\mathrm{d}\mathcal{L}^n &=\int_{\mathbf{x}_0^{-1}\left(B_s(0_n)\right)} \varphi\Delta u_j \mathrm{d}\mathcal{H}^n\\ &=-\int_{\mathbf{x}_0^{-1}\left(B_s(0_n)\right)} \langle \nabla u_j,\nabla \varphi \rangle \mathrm{d}\mathcal{H}^n\\ &=-\int_{B_s(0_n)} \sum\limits_{k=1}^n u^{jk}\dfrac{\partial \varphi}{\partial u_k} \mathrm{det}(U)^{-\frac{1}{2}}\mathrm{d}\mathcal{L}^n, \end{aligned} \] which suffices to show (\ref{1eqn4.21}) since each $u^{jk}$ is Lipschitz continuous on $B_s(0_n)$. Similarly, for $i=1,\ldots,m$ and any $\varphi \in C_c\left(B_s(0_n)\right)\cap C^1({X})$, it holds that \begin{equation}\label{1eqn4.22} \int_{B_s(0_n)} \varphi \mu_i \phi_i \mathrm{det}(U)^{-\frac{1}{2}}\mathrm{d}\mathcal{L}^n =\int_{B_s(0_n)} \sum\limits_{j,k=1}^n u^{jk}\frac{\partial \phi_i}{\partial u_j}\frac{\partial \varphi}{\partial u_k} \mathrm{det}(U)^{-\frac{1}{2}}\mathrm{d}\mathcal{L}^n. \end{equation} Therefore the $C^{1,1}$-regularity of $\phi_i$ as well as (\ref{1eqn4.21}), (\ref{1eqn4.22}) gives a PDE as follows. \begin{equation}\label{111eqn4.26} \sum\limits_{j,k=1}^n u^{jk}\frac{\partial^2 \phi_i}{ \partial u_j \partial u_k}+\sum\limits_{j=1}^n\Delta u_j \frac{\partial \phi_i}{ \partial u_j }+\mu_i \phi_i=0 \ \ \mathcal{L}^n\text{-a.e. in } B_s(0_n). \end{equation} Since each $\Delta u_j$ is some linear combination of $\phi_1,\ldots,\phi_m$, it is also $C^{1,1}$ with respect to $\{(B_r(x_\gamma),\mathbf{x}_\gamma)\}_{\gamma\in \Gamma}$. From the classical PDE theory (see for instance \cite[Theorem 6.13]{GT01}), $\phi_i\in C^{2,\alpha}(B_s(0_n))$ for any $\alpha\in (0,1)$. Hence, ${X}$ admits a $C^{2,\alpha}$ differentiable structure $\{(B_r(x_\gamma),\mathbf{x}_\gamma)\}_{\gamma\in \Gamma}$. Let us use this differentiable structure to define the following $(0,2)$-type symmetric tensor: \[ \tilde{g}:=\sum\limits_{i=1}^m \tilde{d}\phi_i\otimes \tilde{d}\phi_i, \] which is $C^{1,\alpha}$ with respect to $\{(B_r(x_\gamma),\mathbf{x}_\gamma)\}_{\gamma\in \Gamma}$. We claim that $\tilde{g}$ is a Riemannian metric. Again it suffices to prove this statement on $\left(B_r(x_0),\mathbf{x}_0\right)$. Set \[ \begin{aligned} \mathcal{U}:{X}&\longrightarrow \mathsf{M}_{m\times m}(\mathbb{R})\\ x&\longmapsto \left(\langle \nabla u_i,\nabla u_j\rangle\right){}{.} \end{aligned} \] {}{For any $x\in {X}$, rewrite $\mathcal{U}(x)$ as the following block matrix \[ \mathcal{U}(x):=\begin{pmatrix} U(x) &U_1(x) \\ U_1^T(x)& U_2(x)\end{pmatrix}. \] } The choice of {}{$\{u_i\}_{i=1}^m$} implies that $\tilde{g}$ has a local expression as \[ \tilde{g}=\sum\limits_{i=1}^m \tilde{d}u_i\otimes \tilde{d}u_i=\sum\limits_{i=1}^n \tilde{d}u_i\otimes \tilde{d}u_i+\sum\limits_{i=n+1}^m \sum\limits_{k,l=1}^n\dfrac{\partial u_i}{\partial u_k}\dfrac{\partial u_i}{\partial u_l}\tilde{d}u_k\otimes \tilde{d}u_l. \] By (\ref{111eqn4.18}), for $i=n+1,\ldots,m$, $l=1,\ldots,n$ and any $x\in B_r(x_0)$, we have \[ \dfrac{\partial u_i}{\partial u_l}(x)=\sum\limits_{j,k=1}^n b_{jl}(x)b_{jk}(x)\langle \nabla u_i,\nabla u_k \rangle (x)=\left(B^TBU_1(x)\right)_{li}=\left(U^{-1}U_1(x)\right)_{li}, \] which implies that \begin{equation}\label{111eqn4.25} \tilde{g}(x)=\sum\limits_{i=1}^n \tilde{d}u_i\otimes \tilde{d}u_i+\sum\limits_{k,l=1}^n \left(U^{-1}U_1U_1^T U^{-1}(x)\right)_{kl}\tilde{d}u_k\otimes \tilde{d}u_l,\ \ \forall x\in B_r(x_0). \end{equation} Since $\mathcal{U}^2-\mathcal{U}\equiv 0$ on $B_r(x_0)$, $U^2+U_1U_1^T-U\equiv 0$ on $B_r(x_0)$. By (\ref{111eqn4.25}), \begin{equation}\label{1111eqn4.28} \tilde{g}(x)=\sum\limits_{j,k=1}^n\left(U^{-1}\right)_{jk}(x)\tilde{d}u_j\otimes \tilde{d}u_k, \ \ \text{on}\ B_r(x_0), \end{equation} which is positive definie on $B_r(x_0)$. Moreover, $u^{jk}\in C^{1,\alpha}\left(B_r(x_0)\right)$ $(j,k=1,\ldots,n)$. Applying the regularity theorem for second order elliptic PDE (for example \cite[Theorem 6.17]{GT01}) to (\ref{111eqn4.26}), we see that $\phi_i\in C^{3,\alpha}\left(B_r(x_0)\right)$ ($i=1,\ldots, m$). Thus the regularity of $\tilde{g}$ can be improved to $C^{2,\alpha}$. Then (\ref{1111eqn4.28}) shows that $u^{jk}\in C^{2,\alpha}\left(B_r(x_0)\right)$ $(j,k=1,\ldots,n)$. Applying a proof by induction, $\tilde{g}=g$ is actually a smooth Riemannian metric with respect to the smooth differentiable structure $\{(B_r(x_\gamma),\mathbf{x}_\gamma)\}_{\gamma\in \Gamma}$. This implies that $({X},\mathsf{d})$ is isometric to {}{an} $n$-dimensional smooth Riemannian manifold $(M^n,g)$. To see that $(M^n,g)$ is a closed Riemannian manifold, it suffices to use Theorem \ref{111thm4.3} again to show that the tangent space at any point is not isometric to the upper plane $\mathbb{R}^n_+$. \end{proof} \begin{proof}[{}{Proof of Corollary \ref{cor1.11}}] {}{Without loss of generality, we may assume that $\mathfrak{m}({X})=1$.} Among lines in the proof, each limit process and each convergence of the series {}{is} guaranteed by Proposition \ref{heatkernel2}, which can be checked via similar estimates in Lemma \ref{20211220b}. First calculate that \begin{equation}\label{eqn4.2} {}{n=\left\langle g,g\right\rangle=\left\langle c(t)g_t,g\right\rangle =c(t)\sum\limits_{i=1}^\infty e^{-2\mu_i t} \left|\nabla \phi_i\right|^2.} \end{equation} Integrating (\ref{eqn4.2}) on ${X}$, we have \[ {}{n=c(t)\sum\limits_{i=1}^\infty e^{-2\mu_i t}\mu_i.} \] Let $\phi_1,\ldots,\phi_m$ be {}{an} $L^2(\mathfrak{m})$-orthonormal basis of the eigenspace corresponding to the first eigenvalue $\mu_1$. Then \begin{equation}\label{eqn4.3} {}{\left|\sum\limits_{i=1}^m d\phi_i \otimes d\phi_i-\frac{e^{2\mu_1 t}}{c(t)}g\right|_{\mathsf{HS}}}\leqslant \sum\limits_{i=m+1}^\infty e^{2\mu_1t-2\mu_i t} \left|d\phi_i \otimes d\phi_i\right|_{\mathsf{HS}}=\sum\limits_{i=m+1}^\infty e^{2\mu_1t-2\mu_i t} \left|\nabla \phi_i\right|^2. \end{equation} Again the integration of (\ref{eqn4.3}) on ${X}$ gives \begin{equation}\label{111eqn3.19} {}{\int_{X} \left|\sum\limits_{i=1}^m d\phi_i \otimes d\phi_i-\frac{e^{2\mu_1 t}}{c(t)}g\right|_{\mathsf{HS}}\mathrm{d}\mathfrak{m}}\leqslant \sum\limits_{i=m+1}^\infty e^{2\mu_1 t-2\mu_i t}\mu_i. \end{equation} Since \[ \lim\limits_{t\rightarrow \infty} {}{\frac{e^{2\mu_1 t} }{c(t)} }=\frac{\mu_1}{n}+\lim\limits_{t\rightarrow \infty} \frac{1}{n}\sum\limits_{i=2}^\infty e^{2\mu_1t-2\mu_i t}\mu_i=\frac{\mu_1}{n}, \] (\ref{111eqn3.19}) implies that \[ \int_{X} \left|\sum\limits_{i=1}^m d\phi_i \otimes d\phi_i-\frac{\mu_1}{n}g\right|_{\mathsf{HS}}\mathrm{d}\mathfrak{m}=0. \] In other words, \[ \sum\limits_{i=1}^m d\phi_i \otimes d\phi_i= \frac{\mu_1}{n}g. \] For other eigenspaces, it suffices to use a proof by induction to conclude. \end{proof} \section{Diffeomorphic finiteness theorems}\label{sec5} This section is {}{dedicated} to prove Theorem \ref{thm1.8} and Theorem \ref{thm1.12}. To fix the notation, for a Riemannian manifold $(M^n,g)$, denote by $\mathrm{vol}_g$ its volume element, by $\mathrm{K}_g$ its sectional curvature, by $\mathrm{Ric}_g$ its Ricci curvature tensor, by $\mathrm{inj}_g(p)$ the injectivity radius at $p$ and by $(\nabla^g)^k$, $\Delta^g$ the $k$-th covariant derivative and the Laplacian with respect to $g$, by $\mathsf{d}_g$ the {}{metric} induced by $g$. To begin with, let us recall some results about the convergence of Sobolev functions on varying spaces. See \cite{AH18, GMS13, AST16}. \begin{thm}[Compactness of Sobolev functions]\label{222thm5.1} {}{Let $\{\left({X}_i,\mathsf{d}_i,\mathcal{H}^n\right)\}$ be a sequence of non-collapsed $\mathrm{RCD}(K,n)$ spaces} with {}{$\sup_i\mathrm{diam}({X}_i,\mathsf{d}_i)<\infty$} and \[ \left({X}_i,\mathsf{d}_i,\mathcal{H}^n\right)\xrightarrow{\mathrm{mGH}} \left({X},\mathsf{d},\mathcal{H}^n\right). \] Let $f_i\in H^{1,2}\left({X}_i,\mathsf{d}_i,\mathcal{H}^n\right)$ with $\sup_i\|{}{f_i}\|_{H^{1,2}\left({X}_i,\mathsf{d}_i,\mathcal{H}^n\right)}<\infty$. Then there exists $f\in H^{1,2}\left({X},\mathsf{d},\mathcal{H}^n\right)$, and a subsequence of $\{f_i\}$ which is still denoted as $\{f_i\}$ such that $\{f_i\}$ $L^2$-strongly converges to $f$ and \[ \liminf\limits_{i\rightarrow \infty} \int_{X_i} \left|\nabla^{{X}_i} f_i\right|^2 \mathrm{d}\mathcal{H}^n \geqslant \int_{X} \left|\nabla^{{X}} f\right|^2 \mathrm{d}\mathcal{H}^n. \] \end{thm} \begin{thm}[Stability of Laplacian]\label{111thm5.2} {}{Let $\{\left({X}_i,\mathsf{d}_i,\mathcal{H}^n\right)\}$, $\left({X},\mathsf{d},\mathcal{H}^n\right)$ be taken as in Theorem \ref{222thm5.1}}. Let $f_i\in D\left(\Delta^{{X}_i}\right)$ with \[ \sup\limits_i \left(\|f_i\|_{H^{1,2}\left({X}_i,\mathsf{d}_i,\mathcal{H}^n\right)}+\left\|\Delta^{{X}_i} f_i\right\|_{L^2\left(\mathcal{H}^n\right)}\right)<\infty. \] If $\{f_i\}$ $L^2$-strongly converges to $f$ on ${X}$ $($by Theorem \ref{222thm5.1} $f \in H^{1,2}({X},\mathsf{d},\mathcal{H}^n)$$)$, then the following statements hold. \begin{enumerate} \item $f\in D(\Delta^{X})$. \item $\{\Delta^{{X}_i}f_i\}$ $L^2$-weakly converges to $\Delta^{{X}}f$. \item $\{\left|\nabla^{{X}_i}f_i\right|\}$ $L^2$-strongly converges to $\left|\nabla^{{X}}f\right|$. \end{enumerate} \end{thm} We are now in the position to prove the following theorem. \begin{thm}\label{abcthm5.3} $\mathcal{M}(K,n,D,\tau)$ has finitely many members up to diffeomorphism. \end{thm} \begin{proof} Assume the contrary, i.e. there exists a sequence of Riemannian manifolds $\{( M_i^n,g_i)\}\subset \mathcal{M}(K,n,D,\tau)$, which are pairwise non-diffeomorphic. On each $( M_i^n,g_i)$, there exists $m_i\in\mathbb{N}$, such that \begin{equation}\label{1111eqn5.2} g_i=\sum\limits_{j=1}^{m_i}d\phi_{i,j}\otimes d\phi_{i,j}, \end{equation} where $\phi_{i,j}$ is a non-constant eigenfunction of $-\Delta^{g_i}$ with the corresponding eigenvalue $\mu_{i,j}$ and satisfies that $\left\|\phi_{i,j}\right\|_{L^2(\mathrm{vol}_{g_i})}\geqslant \tau>0$ ($i\in\mathbb{N}$, $j=1,\ldots, m_i$). By taking trace of (\ref{1111eqn5.2}) with respect to $g_i$, we know \begin{equation}\label{eqn4.29} n=\sum\limits_{j=1}^{m_i}\left|\nabla^{g_i}\phi_{i,j}\right|^2. \end{equation} Integration of (\ref{eqn4.29}) on $( M_i^n,g_i)$ shows that \[ n\text{vol}_{g_i}( M^n_i)\geqslant \tau^2\sum\limits_{j=1}^{m_i} \mu_{i,j}. \] The Bishop-Gromov volume comparison theorem and {}{Li-Yau's first eigenvalue lower bound \cite[Theorem 7]{LY80}} imply that \begin{equation}\label{eqn4.30} C_1(K,n)D^n\geqslant n\text{vol}_{g_i}( M_i^n) {}{\geqslant}\tau^2\sum\limits_{j=1}^{m_i} \mu_{i,j}\geqslant C_2(K,n,D)\tau^2 m_i \geqslant C_2(K,n,D)\tau^2. \end{equation} Moreover, for each $\phi_{i,j}$, \begin{equation}\label{1112eqn5.4} \|\phi_{i,j}\|_{L^2(\mathrm{vol}_{g_i})}^2 {}{=}\mu_{i,j}^{-1}\int_{M_i^n} |\nabla^{g_i} \phi_{i,j}|^2 \mathrm{dvol}_{g_i} \leqslant n\mu_{i,j}^{-1}\mathrm{vol}_{g_i}(M_i^n)\leqslant C(K,n,D,\tau). \end{equation} {}{Since (\ref{eqn4.30}) implies that $1\leqslant \inf_i m_i\leqslant \sup_i m_i\leqslant C(K,n,D,\tau)$, after passing to a subsequence, we may take $m\in\mathbb{N}$} such that \begin{equation}\label{1111eqn5.3} g_i=\sum\limits_{j=1}^{m}d\phi_{i,j}\otimes d\phi_{i,j}, \ \forall i\in \mathbb{N}. \end{equation} Moreover, by (\ref{eqn4.30}), we may assume that \begin{equation}\label{111eqn5.4} \lim\limits_{i\rightarrow \infty} \mu_{i,j}=\mu_j \in [C_2(K,n,D),\tau^{-2}C_1(K,n)D^n],\ \ j=1,\ldots,m. \end{equation} According to Theorem \ref{11thm2.15} and (\ref{eqn4.30}), $\{( M_i^n,g_i)\}$ can also be required to satisfy \[ \left( M_i^n,\mathsf{d}_{g_i},\text{vol}_{g_i}\right)\xrightarrow{\mathrm{mGH}} \left({X},\mathsf{d},\mathcal{H}^n\right) \] for some non-collapsed RCD$(K,n)$ space $({X},\mathsf{d},\mathcal{H}^n)$. In particular, combining (\ref{eqn4.30})-(\ref{111eqn5.4}) with Theorems \ref{222thm5.1} and \ref{111thm5.2}, we know that on $({X},\mathsf{d},\mathcal{H}^n)$, \[ g=\sum\limits_{j=1}^m d\phi_j\otimes d\phi_j, \] where each $\phi_j$ is an eigenfunction of $-\Delta$ with the eigenvalue $\mu_j$. Therefore, from Theorem \ref{thm1.5}, we deduce that $( {X},\mathsf{d})$ is isometric to an $n$-dimensional smooth closed Riemannian manifold $(M^n,g)$. However, due to \cite[Theorem A.1.12]{ChCo1}, $ M_i^n$ is diffeomorphic to $M^n$ for any sufficiently large $i$. A contradiction. \end{proof} The proof of Theorem \ref{thm1.8} mainly uses the estimates in Section \ref{sec4} and a stronger version of Gromov convergence theorem given by Hebey-Herzlish \cite{HH97}. For reader's convenience, Hebey-Herzlish's theorem is stated below. \begin{thm}\label{11thm5.4} Let $\{(M_i^n,g_i)\}$ be a sequence of {}{$n$-dimensional} closed Riemannian manifolds such that {}{ \[ \sup\limits_i\mathrm{vol}_{g_i}(M_i^n)<\infty,\ \inf\limits_i\inf\limits_{p\in M_i^n} \mathrm{inj}_{g_i}(p)>0, \] and for all $k\in \mathbb{N}$, \[ \sup\limits_i \sup\limits_{M_i^n}\left|(\nabla^{g_i})^k\mathrm{Ric}_{g_i}\right|<\infty. \] } Then there exists a subsequence which is still denoted as $\{(M_i^n,g_i)\}$, such that it $C^\infty$-converges to a closed Riemannian manifold $(M^n,g)$. \end{thm} The following Cheeger-Gromov-Taylor's estimate of the injectivity radius is also necessary for the proof of Theorem \ref{thm1.8}. \begin{thm}[{\cite[Theorem 4.7]{CGT82}}]\label{111thm5.5} Let $(M^n,g)$ be a complete $n$-dimensional Riemannian manifold with $|K_g|\leqslant \kappa<\infty$. Then there exists a constant $c_0=c_0(n)>0$, such that for any $0<r\leqslant \frac{\pi}{4\sqrt{\kappa}}$, \[ \mathrm{inj}_g(p)\geqslant c_0 r\frac{\mathrm{vol}(B_r(p))}{\int_0^r V_{-(n-1)\kappa,n}\mathrm{d}t},\ \forall p\in M^n. \] \end{thm} \begin{proof}[Proof of Theorem \ref{thm1.8}] By Theorem \ref{abcthm5.3}, without loss of generality, we may take a sequence $\{( M^n,g_i)\}\subset\mathcal{M}(K,n,D,\tau)$ such that $\{( M^n,g_i)\}$ mGH converges to $(M^n,g)$ and {}{that} (\ref{eqn4.30})-(\ref{111eqn5.4}) still hold. Denote by $B_r^i(p)$ the $r$-radius ball (with respect to $\mathsf{d}_{g_i}$) centered at $p\in {}{M^n}$ for notation convenience. \textbf{Step 1} Uniform two-sided sectional curvature bound on $( M^n,g_i)$. According to the estimates in Section \ref{sec4}, combining (\ref{eqn4.30})-(\ref{111eqn5.4}), we may choose a uniform $r>0$, such that for every arbitrary but fixed $B_{4096r}^i(p)\subset {}{M^n}$, there exists a coordinate function $\mathbf{x}^i=(u^i_1,\ldots,u^i_n):B_{4096r}^i(p)\rightarrow \mathbb{R}^n$ satisfying the following properties. \begin{enumerate} \item $\mathbf{x}^i$ is $\dfrac{3}{2}$-bi-Lipschitz from $B_{4096r}^i(p)$ to $\mathbf{x}^i(B_{4096r}^i(p))$ (by Lemma \ref{1lem4.3}). \item Set {}{$(g_i)_{jk}:=g_i\left(\dfrac{\partial}{\partial u_j^i},\dfrac{\partial}{\partial u_k^i}\right)$.} Then it holds that \begin{equation}\label{1eqn5.6} \frac{1}{2}I_n\leqslant (g_i)_{jk}\leqslant 2I_n,\ \text{on $B_{4096r}^i(p)$ (by Lemma \ref{1lem4.3} and (\ref{1111eqn4.28}))}. \end{equation} \end{enumerate} We first give a $C^{2,\alpha}$-estimate of $g_i$ on each $( M^n,g_i)$ for any $\alpha\in (0,1)$. Applying (\ref{1eqn4.8}) and (\ref{1eqn5.6}) implies that on $B_{4096r}^i(p)$ \begin{equation}\label{11111eqn5.8} C\geqslant \left|\nabla^{g_i} (g_i)^{jk}\right|^2=\sum\limits_{\beta,\gamma=1}^n (g_i)^{\beta\gamma}\frac{\partial}{\partial u^i_\beta} (g_i)^{jk} \frac{\partial}{\partial u^i_\gamma} (g_i)^{jk}\geqslant \frac{1}{2}\sum\limits_{\beta=1}^n \left(\frac{\partial}{\partial u^i_\beta} (g_i)^{jk}\right)^2, \end{equation} for some $C=C(K,n,D,\tau)$ which may vary from line to line. Then $\left\| (g_i)^{jk}\right\|_{C^{\alpha}(B_{4096r}^i(p))}\leqslant C$ follows from (\ref{11111eqn5.8}) and the local bi-Lipschitz property of $\mathbf{x}^i$ ($j,k=1,\ldots,n$). For $j=1,\ldots,n$, $\left|\nabla^{g_i} \phi_{i,j}\right|\leqslant C$ yields that $\|\phi_{i,j}\|_{C^\alpha\left(B_{4096r}^i(p)\right)}\leqslant C$. This implies that $\left\|\Delta^{g_i} u_{i,j}\right\|_{C^\alpha\left(B_{4096r}^i(p)\right)}\leqslant C$ since each $u_{i,j}$ is the linear combination of $\phi_{i,j}$ constructed as in Lemma \ref{1lem4.3}. Then the the classical Schauder interior estimate (see for example \cite[Theorem 6.2]{GT01}), together with the PDE (\ref{111eqn4.26}) implies that $\left\|\phi_{i,j}\right\|_{C^{2,\alpha}\left(B_{256r}^i(p)\right)}\leqslant C$ since $\mathbf{x}^i \left(B_{256r}(p)\right)\subset B_{512r}({}{\mathbf{x}^i(p)})\subset \mathbf{x}^i\left(B_{1024r}^i(p)\right)\subset B_{2048r}({}{\mathbf{x}^i(p)}) \subset\mathbf{x}^i\left(B_{4096r}^i(p)\right)$. As a result, $\left\|\Delta^{g_i} u_{i,j}\right\|_{C^{2,\alpha}\left(B_{256r}^i(p)\right)}\leqslant C$. Moreover, (\ref{1111eqn5.3}) shows that \[ \left\| (g_i)_{jk}\right\|_{C^{1,\alpha}\left(B_{256r}^i(p)\right)}, \left\| (g_i)^{jk}\right\|_{C^{1,\alpha}\left(B_{256r}^i(p)\right)}\leqslant C,\ j,k=1,\ldots,n. \] Applying again the Schauder interior estimate to $\phi_{i,j}$ in the PDE (\ref{111eqn4.26}), we know $\left\|\phi_{i,j}\right\|_{C^{3,\alpha}\left(B_{16r}^i(p)\right)}\leqslant C$. Consequently, \[ \left\| (g_i)_{jk}\right\|_{C^{2,\alpha}\left(B_{16r}^i(p)\right)}, \left\| (g_i)^{jk}\right\|_{C^{2,\alpha}\left(B_{16r}^i(p)\right)}\leqslant C,\ j,k=1,\ldots,n. \] Since the calculation of sectional curvature only involves the terms in form of $(g_i)_{jk}$, $(g_i)^{jk}$, $\dfrac{\partial }{\partial u^i_\beta} (g_i)^{jk}$, $\dfrac{\partial }{\partial u^i_\beta} (g_i)_{jk}$, $\dfrac{\partial^2 }{\partial u^i_\beta \partial u^i_\gamma} (g_i)_{jk}$ ($j,k,\beta,\gamma=1,\ldots,n$), $|\mathrm{K} _{g_i}|$ has a uniform upper bound $C_0=C_0(K,n,D,\tau)$. \textbf{Step 2} {}{Uniform lower injectivity radius bound on $( M^n,g_i)$.} By Step 1, we may take $r'=\min\{r, C_0^{-1}\}$, which is still denoted as $r$. In order to use Theorem \ref{111thm5.5}, we need nothing but the lower bound of $\mathrm{vol}_{g_i}(B^i_r(p))$. It suffices to apply (\ref{eqn4.30}) and Bishop-Gromov volume comparison theorem again to show that \begin{equation}\label{12345eqn5.7} \tilde{C}(K,n,D,\tau)r^n\leqslant \mathrm{vol}_{g_i}(B^i_r(p))\leqslant C(K,n)D^n, \end{equation} because (\ref{12345eqn5.7}), Theorem \ref{111thm5.5} as well as the two-sided sectional curvature bound obtained in Step 1 then imply that $\inf\limits_{p\in M^n} \mathrm{inj}_{g_i}(p)\geqslant \tilde{C}(K,n,D,\tau)$. \textbf{Step 3} Improvement of the regularity. In order to apply Theorem \ref{11thm5.4}, it suffices to show that for any $k\geqslant 0$, there exists $C_k(K,n,D,\tau)$ such that $|(\nabla^{g_i})^k \mathrm{Ric}_{g_i}|(p)\leqslant C_{k}(K,n,D,\tau)$ holds for any arbitrary but fixed $p\in M^n$. Since the case $k=0$ is already proved in Step 1, we prove the case $k=1$. Using the Schauder interior estimate again and an argument similar to Step 1 gives the following $C^{4,\alpha}$-estimate of $\phi_{i,j}$: \[ \left\|\phi_{i,j}\right\|_{C^{4,\alpha}\left(B_{r}^i(p)\right)}\leqslant C_1(K,n,D,\tau), \] which implies that \[ \left\| (g_i)_{jk}\right\|_{C^{3,\alpha}\left(B_{r}^i(p)\right)}, \left\| (g_i)^{jk}\right\|_{C^{3,\alpha}\left(B_{r}^i(p)\right)}\leqslant C_1(K,n,D,\tau),\ j,k=1,\ldots,n. \] Therefore, we see \[ \sup_{M^n} |\nabla^{g_i}\mathrm{Ric}_{g_i}|\leqslant C_1(K,n,D,\tau). \] Now by using the proof by induction, for any $k\geqslant 2$, there exists $C_{k}=C_{k}(K,n,D,\tau)$ such that \[ \sup_{M^n} |\left(\nabla^{g_i}\right)^k\mathrm{Ric}_{g_i}|\leqslant C_{k}(K,n,D,\tau), \] which suffices to conclude. \end{proof} \begin{proof}[Proof of Theorem \ref{thm1.12}] The proof is almost the same as that of Theorem \ref{abcthm5.3}, and we omit some details. Assume the contrary, i.e. there exists a sequence of pairwise non-diffeomorphic Riemannian manifolds $\{( M_i^n,g_i)\}$ such that $( M_i^n,g_i)\in \mathcal{N}\left(K,n,D,i^{-1},\tau\right)$ for any $i\in \mathbb{N}$. Then for each $\{( M_i^n,g_i)\}$, the almost isometric immersion condition ensures the existence of some $m_i\in\mathbb{N}$, such that \begin{equation}\label{eqn5.7} \frac{1}{\mathrm{vol}_{g_i}(M_i^n)}\int_{M_i^n}\left|\sum\limits_{j=1}^{m_i} d\phi_{i,j}\otimes d\phi_{i,j}- g_i \right|\mathrm{dvol}_{g_i}\leqslant \frac{1}{i}. \end{equation} Thus \begin{equation}\label{1234eqn5.1} \begin{aligned} \frac{\tau^2 \mu_{i,j}}{\mathrm{vol}_{g_i}(M_i^n)}&\leqslant \frac{1}{\mathrm{vol}_{g_i}(M_i^n)}\int_{M_i^n}|\nabla^{g_i} \phi_{i,j}|^2\mathrm{dvol}_{g_i}\\ \ & \leqslant \frac{1}{\mathrm{vol}_{g_i}(M_i^n)}\int_{M_i^n}{}{\left(\sum\limits_{j,k=1}^{m_i}\left\langle \nabla^{g_i} \phi_{i,j},\nabla^{g_i}\phi_{i,k}\right\rangle^2\right)^{\frac{1}{2}}}\mathrm{dvol}_{g_i}\\ \ &\leqslant \frac{1}{\mathrm{vol}_{g_i}(M_i^n)}\int_{M_i^n}\left|\sum\limits_{j=1}^{m_i} d\phi_{i,j}\otimes d\phi_{i,j}- g_i \right|\mathrm{dvol}_{g_i}+ \frac{1}{\mathrm{vol}_{g_i}(M_i^n)}\int_{M_i^n}| g_i |\mathrm{dvol}_{g_i}\\ \ &\leqslant \frac{1}{i}+\sqrt{n}. \end{aligned} \end{equation} Applying {}{Li-Yau's first eigenvalue lower bound \cite[Theorem 7]{LY80}} and Bishop-Gromov volume comparison theorem to (\ref{1234eqn5.1}) shows that \begin{equation}\label{eqn5.8} C_1(K,n,D)\leqslant \mu_{i,j}\leqslant C_2(K,n,D,\tau). \end{equation} It then follows from (\ref{1234eqn5.1}) and (\ref{eqn5.8}) that \begin{equation}\label{1eqn5.9} C_3(K,n,D,\tau)\leqslant \mathrm{vol}_{g_i}(M_i^n)\leqslant C_4(K,n,D)\ \text{and}\ \tau\leqslant \|\phi_{i,j}\|_{L^2(\mathrm{vol}_{g_i})}\leqslant C_5(K,n,D). \end{equation} To see $\{m_i\}$ has an upper bound, it suffices to notice that \[ \begin{aligned} \ &\left|\sum\limits_{j=1}^{m_i} \left\|\phi_{i,j}\right\|_{L^2(\mathrm{vol}_{g_i})}^2\mu_{i,j}-n\mathrm{vol}_{g_i}(M_i^n) \right|\\ =&\left|\int_{M_i^n}\left\langle\sum\limits_{j=1}^{m_i} d\phi_{i,j}\otimes d\phi_{i,j}- g_i ,g_i\right\rangle\mathrm{dvol}_{g_i}\right|\\ \leqslant& \sqrt{n}\int_{M_i^n}\left|\sum\limits_{j=1}^{m_i} d\phi_{i,j}\otimes d\phi_{i,j}- g_i \right|\mathrm{dvol}_{g_i}\leqslant \sqrt{n}\ C_4(K,n,D)\frac{1}{i} \end{aligned} \] As a result, $m_i\leqslant C_6(K,n,D,\tau)$. Therefore there exists $m\in \mathbb{N}$ and a subsequence of $\{( M_i^n,g_i)\}$ which is still denoted as $\{( M_i^n,g_i)\}$, such that each $( M_i^n,g_i)$ admits an $i^{-1}$-almost isometrically immersing eigenmap into $\mathbb{R}^m$. In addition, $\{( M_i^n,g_i)\}$ can also be required to satisfy \[ \left( M_i^n,\mathsf{d}_{g_i},\text{vol}_{g_i}\right)\xrightarrow{\mathrm{mGH}} \left({X},\mathsf{d},\mathcal{H}^n\right) \] for some non-collapsed RCD$(K,n)$ space $({X},\mathsf{d},\mathcal{H}^n)$. Again combining (\ref{eqn5.7})-(\ref{1eqn5.9}) with Theorems \ref{222thm5.1} and \ref{111thm5.2}, we see that on $({X},\mathsf{d},\mathcal{H}^n)$, \[ g=\sum\limits_{j=1}^m d\phi_j\otimes d\phi_j, \] where each $\phi_j$ is an eigenfunction of $-\Delta$ with the eigenvalue $\mu_j:=\lim\limits_{i\rightarrow\infty}\mu_{i,j}$. Finally, it suffices to apply Theorem \ref{thm1.5} and \cite[Theorem A.1.12]{ChCo1} to deduce the contradiction. \end{proof} \section{Examples} In this section, some examples about the IHKI condition of Riemannian manifolds are provided. {}{Let us first emphasis that if $(M^n,g)$ is an $n$-dimensional compact IHKI Riemannian manifold, then it follows from Corollary \ref{cor1.11} and Takahashi theorem \cite[Theorem 3]{Ta66} that for any $t>0$, $\rho^{M^n}_{t}:(p\mapsto\rho^{M^n}(p,p,t))$} is a constant function. By Lemma \ref{llem3.1}, we see that \begin{enumerate} \item\label{20221201} For any $k,n\in \mathbb{N}$, $\underbrace{\mathbb{S}^n\times\cdots \times \mathbb{S}^n}_{2^k \text{times}}$ is IHKI. \item For any $p,q\in\mathbb{N}$, the compact Lie group $\mathrm{SO}(2p+q)/\mathrm{SO}(2p)\times \mathrm{SO}(q)$ with a constant positive Ricci curvature is IHKI since it is homogeneous and irreducible. \end{enumerate} Example \ref{exmp4.5} gives the sharpness of Theorem \ref{thm1.2}. The construction of Example \ref{exmp4.5} needs the following two lemmas. \begin{lem}\label{prop4.3} Let $( M^m,g)$, $( N^n,h)$, $( M^m\times N^n,\tilde{g})$ be $m,n,(m+n)$-dimensional $\mathrm{IHKI}$ Riemannian manifolds respectively, where $\tilde{g}$ is the standard product Riemannian metric. Then for any $t>0$, it holds that $(\rho^ {M^m}_t)^n=(\rho^ {N^n}_t)^m$. \end{lem} \begin{proof} Owing to Lemmas \ref{llem3.1} and \ref{1lem3.15}, we have {}{\begin{equation}\label{eqn4.4} \begin{aligned} c^{ M^m\times N^n}(t)g_t^{ M^m\times N^n } (p,q)&=c^{ M^m\times N^n}(t)\rho^{ M^m}_{2t}g_t^{ N^n}(q) +c^{ M^m\times N^n}(t) \rho^{ N^n}_{2t}g_t^{ M^m}(p)\\ \ &=\rho^{ M^m}_{2t}\frac{c^{ M^m\times N^n}(t)}{c^{N^n}(t)}h(q) + \rho^{ N^n}_{2t}\frac{c^{ M^m\times N^n}(t)}{c^{M^m}(t)}g(p)\\ \ &=\tilde{g}(p,q). \end{aligned} \end{equation}} Then from (\ref{eqn4.4}), {}{$\rho^ {N^n}_{2t}c^{N^n}(t)=\rho^ {M^m}_{2t}c^ {M^m}(t)$ for any $t>0$}. Moreover, {}{for any $p\in M^m$, we calculate that \[ \begin{aligned} \frac{\partial }{\partial t} \rho^ {M^m}_{2t}(p) =\ & \frac{\partial }{\partial t} \int_{ M^m}\left(\rho^ {M^m}(p,p',t)\right)^2 \text{dvol}_g(p')\\ =\ & 2 \int_{ M^m}\Delta^ {M^m}_{p'}\rho^ {M^m}(p,p',t) \rho^ {M^m}(p,p',t) \text{dvol}_g(p')\\ =\ & -2 \int_{ M^m}\left|\nabla^ {M^m}_{p'}\rho^ {M^m}(p,p',t)\right|^2 \text{dvol}_g(p') =-2 \left\langle g_t^ {M^m},g\right\rangle(p)=- \frac{2m}{ c^{ M^m}(t)}. \end{aligned} \] } Analogously {}{$\dfrac{\partial }{\partial t} \rho^ {N^n}_{2t}=- \dfrac{2n}{ c^{ N^n}(t)}$}, and thus $n \rho^ {N^n}_{2t}\dfrac{\partial }{\partial t} \rho^ {M^m}_{2t} =m \rho^ {M^m}_{2t}\dfrac{\partial }{\partial t} \rho^ {N^n}_{2t}$. Therefore there exists $\tilde{c}>0$, such that \[ \left(\rho^ {M^m}_t\right)^n=\tilde{c}\left(\rho^ {N^n}_t\right)^m,\ \ \forall t>0. \] To see $\tilde{c}=1$, it suffices to use a blow up argument and Theorem \ref{thm2.26} to show that $\lim\limits_{t\rightarrow 0 }t^{\frac{m}{2}}\rho^ {M^m}_t=\left(4\pi\right)^{-\frac{m}{2}}$ and $\lim\limits_{t\rightarrow 0 }t^{\frac{n}{2}}\rho^ {N^n}_t=\left(4\pi\right)^{-\frac{n}{2}}$. \end{proof} \begin{lem}\label{lem4.4} Let $( M^n,g)$ be an $n$-dimensional closed $\mathrm{IHKI}$ Riemannian manifold, then it holds that {}{\[ \lim\limits_{t\rightarrow \infty} \frac{t}{c^ {M^n}(t)\rho^{ M^n}_{2t}}=0. \]} \end{lem} \begin{proof} Set {}{$0=\mu_0< \mu_1\leqslant \ldots\rightarrow +\infty$} as the eigenvalues of $-\Delta$ counting with multiplicities. Then it suffices to notice that {}{\[ \frac{1}{c^ {M^n}(t)}=\frac{1}{n\text{vol}_g( M^n)}\sum\limits_{i=1}^\infty e^{-2\mu_i t}\mu_i, \ \ \rho^ {M^n}_{2t}=\frac{1}{\text{vol}_g( M^n)}\sum\limits_{i=0}^\infty e^{-2\mu_it} \]} and let $t\rightarrow \infty$. \end{proof} \begin{exmp}\label{exmp4.5} Set $\mathbb{S}^n(k):=\left\{(x_1,\ldots,x_{n+1})\in\mathbb{R}^{n+1}:x_1^2+\cdots+x_{n+1}^2=k^2\right\}$. Observe that {}{$c^{\mathbb{S}^n(k)}(1)=k^{n+2}c^{\mathbb{S}^n}(k^{-2})$}, $\rho^{\mathbb{S}^n(k)}_2=k^{-n}\rho^{\mathbb{S}^n}_{2k^{-2}}$. By Lemma \ref{lem4.4}, {}{\[ \lim\limits_{k\rightarrow 0} c^{\mathbb{S}^n(k)}(1)\rho^{\mathbb{S}^n(k)}_2=\infty. \]} This implies that for any small $r>0$, there exists $s=s(r)$ such that {}{\[ c^{\mathbb{S}^1(r)}(1)\rho^{\mathbb{S}^1(r)}_2=c^{\mathbb{S}^2(s)}(1)\rho^{\mathbb{S}^2(s)}_2. \] } Consider the product Riemannian manifold $\left(\mathbb{S}^1(r)\times \mathbb{S}^2(s), g_{\mathbb{S}^1(r)\times\mathbb{S}^2(s)}\right)$. By (\ref{eqn4.4}), there exists $c(r)>0$, such that $c(r)\Phi_1^{\mathbb{S}^1(r)\times \mathbb{S}^2(s)}$ realizes an isometric immersion into $L^2\left(\mathrm{vol}_{g_{\mathbb{S}^1(r)\times\mathbb{S}^2(s)}}\right)$. If $\left(\mathbb{S}^1(r)\times \mathbb{S}^2(s), g_{\mathbb{S}^1(r)\times\mathbb{S}^2(s)}\right)$ is IHKI, then by Proposition \ref{prop4.3}, it holds that \begin{equation}\label{eqn4.5} \rho^{\mathbb{S}^2(s)}_t=\left(\rho^{\mathbb{S}^1(r)}_t\right)^2=\rho^{\mathbb{S}^1(r)\times \mathbb{S}^1(r)}_t, \ \ \forall t>0. \end{equation} Therefore by taking integral of (\ref{eqn4.5}), we see that for any $t>0$, \begin{equation}\label{eqn4.6} \text{vol}\left(\mathbb{S}^2(s)\right)\sum\limits_{i=0}^\infty \exp\left(-r^{-2}\mu_i^{\mathbb{S}^1\times \mathbb{S}^1}t\right) =\text{vol}\left(\mathbb{S}^1(r)\times \mathbb{S}^1(r)\right)\sum\limits_{i=0}^\infty \exp\left(-s^{-2}\mu_i^{\mathbb{S}^2}t\right). \end{equation} Then $\mathrm{vol}\left(\mathbb{S}^2(r_2)\right)=\mathrm{vol}\left(\mathbb{S}^1(r_1)\times \mathbb{S}^1(r_1)\right)$ follows by letting $t\rightarrow 0$ in (\ref{eqn4.6}), which implies that $s(r)=r$. (\ref{eqn4.6}) then becomes \begin{equation}\label{eqn4.7} \sum\limits_{i=1}^\infty \exp\left(-r^{-2}\mu_i^{\mathbb{S}^1\times \mathbb{S}^1}t\right) =\sum\limits_{i=1}^\infty \exp\left(-r^{-2}\mu_i^{\mathbb{S}^2}t\right),\ \forall t>0 . \end{equation} Since $\mu_1^{\mathbb{S}^1\times \mathbb{S}^1}=\mu_4^{\mathbb{S}^1\times \mathbb{S}^1}=2<\mu_5^{\mathbb{S}^1\times \mathbb{S}^1}$ and $\mu_1^{\mathbb{S}^2}=\mu_3^{\mathbb{S}^2}=2<\mu_4^{\mathbb{S}^2}$, multiplying $\exp(2r^{-2}t)$ to both sides of (\ref{eqn4.7}) and letting $t\rightarrow \infty$, the right hand side of (\ref{eqn4.7}) converges to 3, while the left hand side of (\ref{eqn4.7}) converges to 4. A contradiction. \end{exmp} There is also a simple example which does not satisfy the condition 2 in Corollary \ref{cor4.7}. \begin{exmp} Consider the product manifold $(\mathbb{S}^1\times \mathbb{R}, g_{\mathbb{S}^1\times \mathbb{R}})$. It is obvious that \[ \begin{aligned} \pi g_t^{\mathbb{S}^1\times \mathbb{R}}=&\dfrac{1}{(4\pi t)^{\frac{1}{2}}}\sum\limits_{i=1}^\infty e^{-i^2t } g_{\mathbb{S}^1}+\dfrac{c_1^\mathbb{R}}{t^{\frac{3}{2}}}\sum\limits_{i=0}^\infty e^{-i^2t} i^2 g_\mathbb{R}\\ \geqslant &\dfrac{1}{(4\pi t)^{\frac{1}{2}}}g_{\mathbb{S}^1}+\dfrac{c_1^\mathbb{R}}{t^{\frac{3}{2}}}g_\mathbb{R}, \end{aligned} \] As a result, $g_t^{\mathbb{S}^1\times\mathbb{R}}\geqslant \dfrac{c_1^\mathbb{R}}{\pi }t^{-\frac{3}{2}}g_{\mathbb{S}^1\times \mathbb{R}}$ for any sufficiently large $t>0$ but \[ {}{\lim\limits_{t\rightarrow \infty} t^{-2}c(t)=\lim\limits_{t\rightarrow \infty} t^{-2}\frac{\pi}{c_1^\mathbb{R}}t^{\frac{3}{2}}=0.} \] \end{exmp} \begin{thebibliography}{10} \bibitem[ABS19]{ABS19}G. Antonelli, E. Bru\`{e}, D. Semola: Volume bounds for the quantitative singular strata of non collapsed RCD metric measure spaces, Anal. Geom. Metr. Spaces, \textbf{7} (2019), no. 1, 158–178. \bibitem[AGS14a]{AGS14a}L. Ambrosio, N. Gigli, G. Savar\'{e}: Calculus and heat flow in metric measure spaces and applications to spaces with Ricci bounds from below, Invent. Math. \textbf{195}(2014), no. 2, 289–391. \bibitem[AGS14b]{AGS14b}\textbf{------}: Metric measure spaces with Riemannian Ricci curvature bounded from below, Duke Math. J. \textbf{163}(2014), no. 7, 1405-1490. \bibitem[AGS15]{AGS15}\textbf{------}: Bakry-\'{E}mery curvature-dimension condition and Riemannian Ricci curvature bounds, Ann. Probab. \textbf{43}(2015), no. 1, 339–404. \bibitem[AH17]{AH17}L. Ambrosio, S. Honda: New stability results for sequences of metric measure spaces with uniform Ricci bounds from below, in Measure Theory in Non-Smooth Spaces, 1-51, De Gruyter Open, Warsaw, 2017. \bibitem[AH18]{AH18}\textbf{------}: Local spectral convergence in RC$\text{D}^\ast(K, N)$ spaces, Nonlinear Anal. \textbf{177} (2018), part A, 1–23. \bibitem[AHPT21]{AHPT21}L. Ambrosio, S. Honda, J. Portegies, D. Tewodrose: Embedding of RCD($K,N$) spaces in $L^2$ via eigenfunctions, J. Funct. Anal. \textbf{280}(2021), no. 10, Paper No. 108968, 72 pp. \bibitem[AHT18]{AHT18}L. Ambrosio, S. Honda, D. Tewodrose: Short-time behavior of the heat kernel and Weyl's law on $\mathrm{RCD}^\ast(K,N)$-spaces, Ann. Global Anal. Geom. \textbf{53}(2018), no. 1, 97-119. \bibitem[AMS16]{AMS16}L. Ambrosio, A. Mondino, G. Savar\'{e}: On the Bakry-\'Emery condition, the gradient estimates and the Local-to-Global property of RC$\text{D}^\ast(K, N)$ metric measure spaces, J. Geom. Anal. \textbf{26} (2016), no. 1, 24–56. \bibitem[AMS19]{AMS19}------: Nonlinear diffusion equations and curvature conditions in metric measure spaces, Mem. Amer. Math. Soc. \textbf{262} (2019), no. 1270. \bibitem[AST16]{AST16}L. Ambrosio, F. Stra, D. Trevisan: Weak and strong convergence of derivations and stability of flows with respect to MGH convergence, J. Funct. Anal. \textbf{272} (2017), no. 3, 1182–1229. \bibitem[AT04]{AT04}L. Ambrosio, P. Tilli: Topics on analysis in metric spaces. Oxford Lecture Series in Mathematics and its Applications, 25. Oxford University Press, Oxford, 2004. \bibitem[B85]{B85}P. B\'{e}rard: Volume des ensembles nodaux des fonctions propres du laplacien, S\'{e}minaire de th\'{e}orie spectrale et g\'{e}om\'{e}trie \textbf{3}(1985), 1-9. \bibitem[BBG94]{BBG94}P. B\'{e}rard, G. Besson, S. Gallot: Embedding Riemannian manifolds by their heat kernel, Geom. Funct. Anal. \textbf{4}(1994), no. 4, 373-398. \bibitem[BGHZ21]{BGHZ21}C. Brena, N. Gigli, S. Honda, X. Zhu: Weakly non-collapsed RCD spaces are strongly non-collapsed, Journal f\"{u}r die reine und angewandte Mathematik (Crelles Journal), \textbf{2023}(2023), no. 794, 215-252. \bibitem[BS20]{BS20}E. Bru\`{e}, D. Semola: Constancy of the dimension for RC$\text{D}^\ast(K,N)$ spaces via regularity of Lagrangian flows, Comm. Pure and Appl. Math. \textbf{73}(2020), no. 6, 1141-1204. \bibitem[ChCo1]{ChCo1}J. Cheeger, T. Colding: On the structure of spaces with Ricci curvature bounded below. I, J. Differential Geom. \textbf{46}(1997), no. 3, 406–480. \bibitem[CGT82]{CGT82} J. Cheeger, M. Gromov, M. Taylor: Finite propagation speed, kernel estimates for functions of the Laplace operator, and the geometry of complete Riemannian manifolds, J. Diff. Geom. \textbf{17} (1982), 15-53. \bibitem[CN12]{CN12}T. Colding, A. Naber: Sharp H\"older continuity of tangent cones for spaces with a lower Ricci curvature bound and applications, Ann. of Math. (2) \textbf{176}(2012), no. 2, 1173-1229. \bibitem[D02]{D02}Y. Ding: Heat kernels and Green's functions on limit spaces, Comm. Anal. Geom. \textbf{10} (2002), no. 3, 475–514. \bibitem[D97]{D97}E. Davis: Non-Gaussian aspects of heat kernel behavior, J. London Math. Soc. \textbf{55} (1997), no. 2, 105–125. \bibitem[DG16]{DG16}G. De Philippis, N. Gigli: From volume cone to metric cone in the nonsmooth setting, Geom. Funct. Anal. \textbf{26} (2016), no. 6, 1526–1587. \bibitem[DG18]{DG18}\textbf{------}: Non-collapsed spaces with Ricci curvature bounded from below, J. \'Ec. polytech. Math. \textbf{5} (2018), 613–650. \bibitem[EKS15]{EKS15}M. Erbar, K. Kuwada, K. Sturm: On the equivalence of the entropic curvature-dimension condition and Bochner’s inequality on metric measure spaces, Invent. Math. \textbf{201} (2015), no. 3, 993–1071. \bibitem[F87]{F87}K. Fukaya: Collapsing of Riemannian manifolds and eigenvalues of Laplace operator, Invent. Math. \textbf{87} (1987), 517–547. \bibitem[G81]{G81}M. Gromov: Structures m\'{e}triques pour les vari\'{e}t\'{e}s reimanniennes, redige par J. Lafontaine et P. Pansu, Textes math. $\text{n}^{\circ}$ 1 Cedic-Nathan, Paris, 1981. \bibitem[G13]{G13}N. Gigli: The splitting theorem in non-smooth context, ArXiv preprint:1302.5555. \bibitem[G14]{G14}\textbf{------}: An overview on the proof of the splitting theorem in non-smooth context, Anal. Geom. Metr. Spaces \textbf{2}(2014), no. 1, 169–213. \bibitem[G15]{G15}\textbf{------}: On the differential structure of metric measure spaces and applications, Mem. Amer. Math. Soc. \textbf{236}(2015), no. 1113. \bibitem[G18]{G18}\textbf{------}: Nonsmooth differential geometry---an approach tailored for spaces with Ricci curvature bounded from below, Mem. Amer. Math. Soc. \textbf{251} (2018), no. 1196. \bibitem[GH18]{GH18}N. Gigli, B. Han: Sobolev spaces on warped products, J. Funct. Anal. \textbf{275} (2018), no. 8, 2059–2095. \bibitem[GMS13]{GMS13}N. Gigli, A. Mondino, G. Savar\'{e}: Convergence of pointed non-compact metric measure spaces and stability of Ricci curvature bounds and heat flows, Proc. Lond. Math. Soc. (3) \textbf{111} (2015), no. 5, 1071–1129. \bibitem[GP16]{GP16}N. Gigli, E. Pasqualetto: Equivalence of two different notions of tangent bundle on rectifiable metric measure spaces, Comm. Anal. and Geom. \textbf{30}(2022), no. 1, 1-51. \bibitem[GR18]{GR18}N.Gigli, C. Rigoni: Recognizing the flat torus among $\text{RCD}^\ast(0,N)$ spaces via the study of the first cohomology group. Calc. Var. Partial Differ. Equ. \textbf{57}, 104 (2018). \bibitem[GR19]{GR19}N. Gigli, C. Rigoni: A note about the strong maximum principle on RCD spaces, Canad. Math. Bull. \textbf{62} (2019), no. 2, 259–266. \bibitem[GR20]{GR20}N. Gigli, C. Rigoni: Partial derivatives in the nonsmooth setting, J. Funct. Anal. \textbf{283}(2022), no. 4, Paper No. 109528. \bibitem[GT01]{GT01}D. Gilbarg, N. Trudinger: Elliptic partial differential equations of second order. Classics in Mathematics, Springer-Verlag, Berlin, 2001. Reprint of the 1998 edition. \bibitem[HH97]{HH97}E. Hebey, M. Herzlich : Harmonic coordinates, harmonic radius and convergence of Riemannian manifolds, Rendiconti di Matematica, Serie VII, \textbf{17}(1997), 569-605. \bibitem[H15]{H15}S. Honda: Ricci curvature and $L^p$-convergence, J. Reine Angew Math. \textbf{705} (2015), 85–154. \bibitem[H21]{H21}------: Isometric immersions of RCD spaces, Comment. Math. Helv. \textbf{96}(2021), no. 3, 515–559. \bibitem[HS21]{HS21}S. Honda, Y. Sire: Sobolev mappings between RCD spaces and applications to harmonic maps: a heat kernel approach, ArXiv preprint: 2105.08578. \bibitem[J14]{J14}R. Jiang: Cheeger-harmonic functions in metric measure space revisited, J. Funct. Anal. \textbf{266} (2014), no. 3, 1373–1394. \bibitem[JLZ16]{JLZ16}R. Jiang, H. Li, H. Zhang: Heat kernel bounds on metric measure spaces and some applications, Potential Anal.\textbf{ 44} (2016), no. 3, 601–627. \bibitem[K15a]{K15a}C. Ketterer: Cones over metric measure spaces and the maximal diameter theorem, J. Math. Pures Appl. (9) \textbf{103} (2015), no. 5, 1228–1275. \bibitem[K15b]{K15b}\textbf{------}: Obata's rigidity theorem for metric measure spaces, Anal. Geom. Metr. Spaces \textbf{3}(2015), no. 1, 278–295. \bibitem[K19]{K19}Y. Kitabeppu: A sufficient condition to a regular set being of positive measure on RCD spaces, Potential Anal. \textbf{51}(2019), no. 2, 179–196. \bibitem[KM21]{KM21}V. Kapovitch, A. Mondino: On the topology and the boundary of $N$-dimensional RCD$(K,N)$ spaces, Geom. Topol. \textbf{25} (2021), 445 -495. \bibitem[L81]{L81}P. Li: Minimal immersions of compact irreducible homogeneous Riemannian manifolds, J. Differential Geom. \textbf{16} (1981), no. 1, 105–115. \bibitem[LV09]{LV09}J. Lott, C. Villani: Ricci curvature for metric-measure spaces via optimal transport, Ann. of Math. (2) \textbf{169} (2009), no. 3, 903–991. \bibitem[LY80]{LY80}P. Li, S. Yau: Estimates of eigenvalues of a compact Riemannian manifold, Geometry of the Laplace operator (Proc. Sympos. Pure Math., Univ. Hawaii, Honolulu, Hawaii, 1979), 205–239. \bibitem[MN19]{MN19}A. Mondino, A. Naber: Structure theory of metric-measure spaces with lower Ricci curvature bounds, J. Eur. Math. Soc, \textbf{21} (2019), 1809–1854. \bibitem[MW19]{MW19} A. Mondino and G. Wei: On the universal cover and the fundamental group of an $\mathrm{RCD}^\ast(K, N)$-space, J. Reine Angew. Math. \textbf{753} (2019), 211–237. \bibitem[O07]{O07}S. Ohta: On the measure contraction property of metric measure spaces. Comment. Math. Helv., \textbf{82}(2007), 805–828. \bibitem[S14]{S14}G. Savar\'e: Self-improvement of the Bakry-\'{E}mery condition and Wasserstein contraction of the heat flow in RCD$(K,\infty)$ metric measure spaces, Discrete Contin. Dyn. Syst. \textbf{34} (2014), no. 4, 1641–1661. \bibitem[St95]{St95}K. Sturm: Analysis on local Dirichlet spaces. II. Upper Gaussian estimates for the fundamental solutions of parabolic equations, Osaka J. Math. \textbf{32} (1995), no. 2, 275–312. \bibitem[St96]{St96}\textbf{------}: Analysis on local Dirichlet spaces. III. The parabolic Harnack inequality, J. Math. Pures Appl. (9) \textbf{75} (1996), no. 3, 273–297. \bibitem[St06a]{St06a}\textbf{------}: On the geometry of metric measure spaces. I, Acta Math. \textbf{196} (2006), no. 1, 65–131. \bibitem[St06b]{St06b}\textbf{------}: On the geometry of metric measure spaces. II, Acta Math. \textbf{196} (2006), no. 1, 133–177. \bibitem[Ta66]{Ta66}T. Takahashi: Minimal immersions of Riemannian manifolds, J. Math. Soc. Japan \textbf{18} (1966), 380–385. \bibitem[Ta96]{Ta96} M. Taylor: Partial Differential Equations, Volume 1,2,3. Springer-Verlag. New York, NY, 1996 \bibitem[ZZ19]{ZZ19}H. Zhang, X. Zhu: Weyl's law on $\text{RCD}^\ast$(K,N) metric measure spaces, Comm. Anal. Geom. \textbf{27} (2019), no. 8, 1869–1914. \end{thebibliography} \bigskip \end{document}
2205.11677v4
http://arxiv.org/abs/2205.11677v4
Semi-Supervised Clustering of Sparse Graphs: Crossing the Information-Theoretic Threshold
\documentclass[twoside,11pt]{article} \usepackage{subcaption} \usepackage[preprint]{jmlr2e} \usepackage{amsmath} \usepackage{mathtools} \usepackage{commath} \usepackage{enumitem} \usepackage{bbm} \newcommand{\1}{\mathbf{1}} \newcommand{\tp}{\top} \newcommand{\At}{\tilde{A}} \newcommand{\SDP}{\text{SDP}} \newcommand{\CSDP}{\text{CSDP}} \newcommand{\la}{\langle} \newcommand{\ra}{\rangle} \newcommand{\diag}{\text{diag}} \newcommand*\diff{\mathop{}\!\mathrm{d}} \newcommand{\me}{\mathrm{e}} \DeclareMathOperator{\prob}{P} \DeclareMathOperator{\E}{E} \DeclareMathOperator{\V}{Var} \DeclareMathOperator{\G}{\mathcal{G}} \DeclareMathOperator{\Tr}{Tr} \DeclareMathOperator{\R}{\mathbb{R}} \DeclareMathOperator{\rank}{rank} \DeclareMathOperator*{\argmax}{arg\,max} \DeclareMathOperator*{\argmin}{arg\,min} \jmlrheading{[volume]}{2022}{[pages]}{8/22}{[date published]}{[paper id]}{Junda Sheng and Thomas Strohmer} \ShortHeadings{Semi-Supervised Clustering of Sparse Graphs}{Sheng and Strohmer} rstpageno{1} \begin{document} \title{Semi-Supervised Clustering of Sparse Graphs: \\ Crossing the Information-Theoretic Threshold} \author{\name Junda Sheng \email [email protected] \\ \addr Department of Mathematics\\ University of California\\ Davis, CA 95616-5270, USA \AND \name Thomas Strohmer \email [email protected] \\ \addr Department of Mathematics and Center of Data Science and Artificial Intelligence Research\\ University of California\\ Davis, CA 95616-5270, USA} \editor{} \maketitle \begin{abstract}The stochastic block model is a canonical random graph model for clustering and community detection on network-structured data. Decades of extensive study on the problem have established many profound results, among which the phase transition at the Kesten-Stigum threshold is particularly interesting both from a mathematical and an applied standpoint. It states that no estimator based on the network topology can perform substantially better than chance on sparse graphs if the model parameter is below a certain threshold. Nevertheless, if we slightly extend the horizon to the ubiquitous semi-supervised setting, such a fundamental limitation will disappear completely. We prove that with an arbitrary fraction of the labels revealed, the detection problem is feasible throughout the parameter domain. Moreover, we introduce two efficient algorithms, one combinatorial and one based on optimization, to integrate label information with graph structures. Our work brings a new perspective to the stochastic model of networks and semidefinite program research. \end{abstract} \begin{keywords} clustering, semi-supervised learning, stochastic block model, Kesten-Stigum threshold, semidefinite programming \end{keywords} \section{Introduction} Clustering has long been an essential subject of many research fields, such as machine learning, pattern recognition, data science, and artificial intelligence. In this section, we include some background information on its general setting and the semi-supervised approach. \subsection{Clustering on Graphs} The basic task of \textit{clustering} or \textit{community detection} in its general form is, given a (possibly weighted) graph, to partition its vertices into several densely connected groups with relatively weak external connectivity. This property is sometimes also called assortativity. Clustering and community detection are central problems in machine learning and data science with various applications in scientific research and industrial development. A considerable amount of data sets can be represented in the form of a network that consists of interacting nodes, and one of the first features of interest in such a situation is to understand which nodes are “similar”, as an end or as a preliminary step towards other learning tasks. Clustering is used to find genetically similar sub-populations \citep{10.3389/fgene.2014.00204}, to segment images \citep{868688}, to study sociological behavior \citep{doi:10.1073/pnas.012582999}, to improve recommendation systems \citep{1167344}, to help with natural language processing \citep{doi:10.1073/pnas.1221839110}, etc. Since the 1970s, in different communities like social science, statistical physics, and machine learning, a large diversity of algorithms have been developed such as: \begin{itemize} \item Hierarchical clustering algorithms \citep{hierarchy} build a hierarchy of progressive communities, by either recursive aggregation or division. \item Model-based statistical methods, including the celebrated EM clustering algorithm proposed in \citep{https://doi.org/10.1111/j.2517-6161.1977.tb01600.x}, fit the data with cluster-exhibiting statistical models. \item Optimization approaches identify the best cluster structures regarding carefully designed cost functions, for instance, minimizing the cut \citep{HARTUV2000175} and maximizing the Girvan-Newman modularity \citep{Newman2004FindingAE}. \end{itemize} Multiple lines of research intersect at a simple random graph model, which appears under many different names. In the machine learning and statistics literature around social networks, it is called stochastic block model (SBM) \citep{Holland1983StochasticBF}, while it is known as the planted partition model \citep{715914} in theoretical computer science and referred to as inhomogeneous random graph model \citep{10.5555/1276871.1276872} in the mathematics literature. Moreover, it can also be interpreted as a spin-glass model \citep{Decelle2011AsymptoticAO}, a sparse-graph code \citep{Abbe2015CommunityDI}, a low-rank random matrix model \citep{959929}, and more. The essence of SBM can be summarized as follows: Conditioned on the vertex labels, edges are generated independently and the probability only depends on which clusters the pairs of vertices belong to. We consider its simplest form, namely the symmetric SBM consisting of two blocks, also known as the planted bisection model. \begin{definition}[Planted bisection model]\label{PBM} For $n\in \mathbb{N}$ and $p,q \in (0,1)$, let $\G(n,p,q)$ denote the distribution over graphs with $n$ vertices defined as follows. The vertex set is partitioned uniformly at random into two subsets $S_1, S_2$ with $|S_i|=n/2$. Let $E$ denote the edge set. Conditional on this partition, edges are included independently with probability \begin{equation} \prob\left((i,j)\in E | S_1, S_2\right)= \begin{cases} p & \text{~if~} \{i,j\} \subseteq S_1 \text{~or~} \{i,j\} \subseteq S_2, \\ q & \text{~if~} i \in S_1, j \in S_2 \text{~or~} i \in S_2, j \in S_1. \end{cases} \end{equation} \end{definition} Note that if $p=q$, the planted bisection model is reduced to the so-called Erdős–Rényi random graph where all edges are generated independently with the same probability. Hence there exists no cluster structure. But if $p \gg q$, a typical graph will have two well-defined clusters. The scale of $p$ and $q$ also plays a significant role in the resulting graph, which will be discussed in detail later. They govern the amount of signal and noise in the graph's generating process. As the key parameters that researchers work with, they depict various regimes and thresholds. The SBMs generate labels for vertices before the graph. The ground truth allows us to formally discuss the presence of community structures and measure the performance of algorithms in a meaningful way. It also supplies a natural basis to rigorously define the semi-supervised clustering problem. But as a parametrized statistical model, one can only hope that it serves as a good fit for the real data. Although not necessarily a realistic model, SBM provides us with an insightful abstraction and captures some of the key phenomena \citep{MosselNS15, Chen2016StatisticalComputationalTI, Banks2016InformationtheoreticTF, Abbe2016ExactRI,ASgen}. Given a single realization of a graph $G$, our goal is to recover the labels $x$, up to a certain level of accuracy. Formally, the ground truth of the underlying community structure is encoded using the vector $x \in \{+1,-1\}^n$, with $x_i = +1$ if $i\in S_1$, and $x_i = -1$ if $i\in S_2$. An estimator is a map $\hat{x}: G_n \to \{+1,-1\}^n$ where $G_n$ is the space of graphs over $n$ vertices. We define the \textit{Overlap} between an estimator and the ground truth as \begin{equation} \text{Overlap}(x,\hat{x}(G))=\frac{1}{n}| \langle x,\hat{x}(G) \rangle |. \end{equation} {\em Overlap} induces a measure on the same probability space as the model, which represents how well an (unsupervised) estimator performs on the recovery task. To intuitively interpret the result, we put requirements on its asymptotic behavior, which takes place with high probability as $n\to\infty$. \begin{definition} Let $G \sim \G(n,p,q)$. The following recovery requirements are solved if there exists an algorithm that takes $G$ as an input and outputs $\hat{x}=\hat{x}(G)$ such that \begin{itemize} \item Exact recovery: $\prob\{\text{Overlap}(x,\hat{x}(G))=1\}=1-o(1)$ \item Weak recovery: $\prob\{\text{Overlap}(x,\hat{x}(G)) \geq \Omega(1) \}=1-o(1)$ \end{itemize} \end{definition} In other words, exact recovery requires the entire partition to be correctly identified. Weak recovery only asks for substantially better performance than chance. In some literature, exact recovery is simply called recovery. Weak recovery is also called detection since as long as one can weakly recover the ground truth, there must exist a community structure. Note that if $G$ is an Erdős–Rényi random graph ($p = q$) then the overlap will be $o_p(1)$ for all estimators. This can be seen by noticing that $x$ and $G$ are independent in this setting and then applying Markov's inequality. This has led to two additional natural questions about SBMs. On the one hand, we are interested in the distinguishability (or testing): is there a hypothesis test to distinguish a random graph generated by the Erdős–Rényi model (ERM) from a random graph generated by the SBM, which succeeds with high probability? On the other hand, we can ask about the model learnability (or parameter estimation): assuming that $G$ is drawn from an SBM ensemble, is it possible to obtain a consistent estimator for the parameters ($p,q$)? Although each of these questions is of independent interest, for symmetric SBMs with two symmetric communities (planted bisection model) the following holds \citep{Abbe2017CommunityDA}: \begin{equation} \text{learnability}\iff\text{weak recovery}\iff\text{distinguishability}. \end{equation} Such equivalence benefits our understanding of the model in turn. For example, direct analysis of weak recovery leads to the converse of phase transition theory \citep{MosselNS15}. The achievability of the phase transition threshold \citep{Massouli2014CommunityDT} is proved by counting non-backtracking walks on the graph which gives consistent estimators of parameters. In the recent work \citep{montanari2015semidefinite}, hypothesis testing formulation is studied. SBMs demonstrate the `fundamental limits' of clustering and community detection as some necessary and sufficient conditions for the feasibility of recovery, information-theoretically or computationally. Moreover, they are usually expressed in the form of \textit{phase transition}. Sharp transitions exist in the parameter regimes between phases where the task is resolvable or not. For example, when the average degree grows as $\log n$, if the structure is sufficiently obvious then the underlying communities can be exactly recovered \citep{doi:10.1073/pnas.0907096106}, and the threshold at which this becomes possible has also been determined \citep{Abbe2016ExactRI}. Above this threshold, efficient algorithms exist \citep{Agarwal2015MultisectionIT, Abbe2015CommunityDI, Perry2017ASP, Deng2021StrongCG} that recover the communities exactly, labeling every vertex correctly with high probability; below this threshold, exact recovery is information-theoretically impossible. \citep{10.5555/3157096.3157205, JMLR:v18:16-245} studied a slightly weaker requirement that allows a vanishing proportion of misclassified nodes and proposed algorithms that achieve the optimal rates for various models. In contrast, we study a rather different regime where the reasonable question to ask is whether one can guarantee a misclassification proportion strictly less than $\frac{1}{2}$. We acknowledge that SBMs in general are theoretical constructs that can not capture all the complicated situations emerging with real-life networks. However, it succeeds in modeling the simplest situation where a community structure can be quantitatively described. In practice, instead of applying the model directly, it is advisable to initially conduct some heuristic and hierarchical analyses on the graph and see if the problem can be effectively reduced to those simple cases. For the graphs violating the model assumptions, e.g.,\ a sparse graph dominated by cliques, alternative and tailored modeling may be necessary. \subsection{Sparse Regime and Kesten-Stigum Threshold} \label{topo} In the sparse case where the average degree of the graph is $O(1)$, it is more difficult to find the clusters and the best we can hope for is to label the vertices with nonzero correlation or mutual information with the ground truth, i.e.,\ weak recovery. Intuitively, we only have access to a constant amount of connections about each vertex. The intrinsic difficulty can be understood from the topological properties of the graphs in this regime. The following basic results are derived from \citep{Erdos1984OnTE}: \begin{itemize} \item For $a, b > 0$, the planted bisection model $\G(n,\frac{a \log n}{n},\frac{b \log n}{n})$ is connected with high probability if and only if $\frac{a+b}{2}>1$. \item $\G(n,\frac{a}{n},\frac{b}{n})$ has a giant component (i.e.,\ a component of size linear in $n$) with high probability if and only if $d\coloneqq\frac{a+b}{2}>1$. \end{itemize} The graph will only have vanishing components if the average degree is too small. Therefore, it is not possible to even weakly recover the labels. But we will see in the next section that semi-supervised approaches amazingly piece the components together with consistent labeling. Although it is mathematically challenging to work in the sparse regime, real-world data are likely to have bounded average degrees. \citep{Leskovec} and \citep{Strogatz:2001wc} studied a large collection of benchmark data sets, including power transmission networks, website link networks, and complex biological systems, which had millions of nodes with an average degree of no more than 20. For instance, the LinkedIn network they studied had approximately seven million nodes, but only 30 million edges. The phase transition for weak recovery or detection in the sparse regime was first conjectured in the paper by Decelle, Krzakala, Moore, Zdeborová \citep{Decelle2011AsymptoticAO}, which sparked the modern study of clustering and SBMs. Their work is based on deep but non-rigorous insights from statistical physics, derived with the cavity method (a.k.a.\ belief propagation). Since then, extensive excellent research has been conducted to understand this fundamental limit, e.g.,\ \citep{MosselNS15, Massouli2014CommunityDT, Mossel2018APO, Abbe2020GraphPA}. A key result is the following theorem. \begin{theorem}\label{KS}[Kesten-Stigum threshold] Let $\G(n, a/n, b/n)$ be a symmetric SBM with two balanced clusters and $a, b = O(1)$. The weak recovery problem is solvable and efficiently so, if and only if $(a-b)^2 > 2(a + b)$. \end{theorem} In particular, if we denote the probability measures induced by the ERM $\G(n, \frac{a+b}{2n}, \frac{a+b}{2n})$ and the SBM $\G(n, \frac{a}{n}, \frac{b}{n})$ by $P_n^{(0)}$ and $P_n^{(1)}$ correspondingly, they are mutually contiguous, that is for any sequence of events $\{E_n\}$'s, $P_n^{(0)}(E_n) \to 0$ if, and only if, $P_n^{(1)}(E_n) \to 0$. Conventionally, \emph{signal-to-noise ratio (SNR)} is defined as the following, \begin{equation} \text{SNR} \coloneqq (a-b)^2/[2(a+b)]. \end{equation} It is worth noting that we only quoted the KS threshold for the two communities case ($k = 2$). For sufficiently large $k$, namely $k \geq 5$, there is a `hard but detectable' area where the weak recovery is information-theoretically possible, but computationally hard \citep{ASgen, Banks2016InformationtheoreticTF}. This gap between the KS threshold and the information-theoretic (IT) threshold only shows up in the constant degree regime, making it a fertile ground for studying the fundamental tradeoffs in community detection. We focus on the cardinal case, symmetric SBM with two balanced clusters, where two thresholds coincide and a semi-supervised approach crosses them together. \begin{figure}[ht] \begin{subfigure}{0.3\textwidth} \centering\includegraphics[width=\textwidth]{sbm_plt_nice.png} \end{subfigure} ll} \begin{subfigure}{0.3\textwidth} \centering\includegraphics[width=\textwidth]{sbm_plt_uncolor.png} \end{subfigure} ll} \begin{subfigure}{0.3\textwidth} \centering\includegraphics[width=\textwidth]{sbm_plt_unorder.png} \end{subfigure} ll} \caption{The left image represents the adjacency matrix of one realization of $\G (100, 0.12, 0.05)$, where the detection is theoretically possible. Yet the data is given non-colored (middle) and also non-ordered (right).} \end{figure} The terminology `KS threshold' can be traced back to the work of Kesten and Stigum concerning the reconstruction of infinite rooted trees in 1966 \citep{KS}. The problem consists of broadcasting the root label of a tree with a fixed degree $c$ down to its leaves and trying to recover it from the leaves at a large depth. We start with drawing the root label uniformly in $\{0,1\}$. Then, in a top-down manner, we independently label every child the same as its parent with probability $1-\epsilon$ and the opposite as its parent otherwise. Let $x^{(t)}$ denote the labels at depth $t$ in this tree with $t = 0$ being the root. We say the reconstruction is solvable if $\lim_{t\to \infty} \E|\E(x^{(0)}|x^{(t)}) - 1/2| > 0$ or, equivalently, $\lim_{t\to\infty} I(x^{(0)}; x^{(t)}) > 0$, where $I$ is the mutual information. Although it was shown in the original paper, reconstruction is solvable when $c(1-2\epsilon)^2 > 1$, the non-reconstruction was proved 30 years later, namely, it is not solvable if $c(1-2\epsilon)^2 \leq 1$ \citep{Bleher1995OnTP, Evans}. Based on that finding, Mossel, Neeman, and Sly proved the converse part of Theorem \ref{KS} by coupling the local neighborhood of an SBM vertex with a Gaton-Watson tree with a Markov Process \citep{MosselNS15}. Inspired by this elegant approach, we propose our `census method' to solve the semi-supervised clustering problem, and we will see in Section \ref{Census method} how it works by amplifying revealed information with tree-like neighborhoods. \subsection{Basic Algorithms} Information-theoretic bounds can provide the impossibility side of phase transitions, but we still need specific efficient algorithms for the achievability side. One straightforward approach is the spectral method. Under Definition \ref{PBM}, let $A$ be the adjacency matrix of the graph $G \sim \G(n,a/n,b/n)$, $a>b$. Up to reordering indices, its expectation is a block matrix except for the diagonal, \begin{equation} \E A \approx \frac{1}{n} \begin{pmatrix} a & b\\ b & a \end{pmatrix}\otimes I_{n/2 \times n/2}, \end{equation} which has three eigenvalues, $(a+b)/n > (a-b)/n>0$. $0$ has multiplicity $n-2$ and the eigenvector associated with the second largest eigenvalue is $\left(\begin{smallmatrix}\1_{n/2}\\-\1_{n/2}\end{smallmatrix}\right)$ which is consistent with the ground truth of the labels. However, we do not observe the expected adjacency matrix. Instead, we only have access to one realization of the model. In modern terms, community detection is a `one-shot learning' task. But one can still hope that $A - \E A$ is small and the second eigenvector of $A$ gives a reasonable estimator. For example, denoting the ordered eigenvalues of $\E A$ and $A$ as $\{\lambda_i\}$'s and $\{\hat{\lambda}_i\}$'s respectively, the Courant-Fischer-Weyl min-max principle implies \begin{equation} |\hat{\lambda}_i - \lambda_i | \leq \|A - \E A\|_{\text{op}}, \qquad i = 1,\dots,n. \end{equation} Recall that the operator norm of a symmetric matrix $M$ is $\|M\|_{\text{op}} = \max(\xi_1(M), -\xi_n(M))$ where $\xi_i(M)$ denotes the $i$-th largest eigenvalue of $M$. If one can bound $\|A - \E A\|_{\text{op}}$ by half of the least gap between the three eigenvalues mentioned above, the order will be preserved. Then the Davis-Kahan theorem guarantees the eigenvectors are correlated. Namely, if $\theta$ denotes the angle between the second eigenvectors (spectral estimator and ground truth), we have \begin{equation} \sin \theta \leq \|A - \E A\|_{\text{op}}/ \min\{\|\lambda_i - \lambda_2\|/2 : i\neq 2\}. \end{equation} Thus, the key is to control the norm of the perturbation. Many deep results from random matrix theory come into play here \citep{Vu2007SpectralNO,Nadakuditi2012GraphSA,Abbe2020ENTRYWISEEA}. This nice and simple approach stops working as we step into the sparse regime \citep{Feige2005SpectralTA,CojaOghlan2009GraphPV,Keshavan2009MatrixCF,Decelle2011AsymptoticAO,Krzakala2013SpectralRI}. The main reason is that leading eigenvalues of $A$ are about the order of the square root of the maximum degree. High-degree vertices mess up the desired order of eigenvalues. In particular, for Erdős–Rényi random graphs ($\G (n, d/n)$), we have $\hat{\lambda}_1 = (1+o(1))\sqrt{\log n / \log \log n}$ almost surely \citep{Krivelevich2003TheLE}. Furthermore, the leading eigenvectors are concentrated at these `outliers' with high degree and contain no structural information of the underlying model. Take the star graph for example, where we assume that only the first node is connected to $k$ neighbors. It is easy to see that the corresponding adjacency matrix has eigenvalue $\sqrt{k}$ and eigenvector $(\sqrt{k},1,\dots,1)$. Various interesting spectrum-based methods are proposed to overcome this challenge \citep{Mossel2018APO,Massouli2014CommunityDT,Bordenave2015NonbacktrackingSO}. The key idea is to replace adjacency with some combinatorically constructed matrices. However, they typically rely on model statistics and underlying probabilistic assumptions, which leads to the problem of adversarial robustness. For example, they are non-robust to ’helpful’ perturbations. Namely, if we allow an adversary to perform the following changes on the graph: (1)~adding edges within communities and/or (2)~removing edges across communities, spectral approaches are going to fail. It is surprising since, intuitively, these changes help to emphasize community structures. Meanwhile, semidefinite programming (SDP) sheds light on how we may be able to overcome the limitations of spectral algorithms, which are shown to be robust when SNR is sufficiently large \citep{Moitra2016HowRA}. It is another major line of work on clustering and community detection concerning the performance of SDPs on SBMs. While a clear picture of the unbounded degree case has been figured out by \citep{Abbe2016ExactRI, Hajek2016AchievingEC, Amini2014OnSR, Bandeira2018RandomLM, Agarwal2015MultisectionIT, Perry2017ASP}, the results for sparse networks are more complicated. \citep{Gudon2014CommunityDI} proved a sub-optimal condition, SNR $\geq10^4$, using Grothendieck inequality. Then, with a Lindeberg interpolation process \citep{TTao}, Montanari et al.\ proved that an SDP algorithm as proposed by \citep{montanari2015semidefinite} is nearly optimal for the case of large bounded average degree by transferring analysis of the original SDPs to the analysis of SDPs of Gaussian random matrices. \begin{theorem}\citep{montanari2015semidefinite}\label{MS} Assume $G \sim G(n,a/n,b/n)$. If for some $\epsilon > 0$, $\text{SNR}\geq 1+\epsilon$ and $d>d^*(\epsilon)$ then the SDP estimator solves the weak recovery. \end{theorem} The necessary degree $d^*$ depends on the choice of $\epsilon$ and goes to infinity as $\epsilon \to 0$. If we fix $d$ and view $\epsilon$ as its function, the condition becomes $\text{SNR} \geq 1 +o_d(1)$. Numerical estimation and non-rigorous statistical mechanism approximation suggest that it is at most $2\%$ sub-optimal. This result seems to be the ceiling of SDP according to the preliminary calculation from \citep{Javanmard2016PhaseTI}. Moreover, they address the irregularity of high-degree nodes by showing SDPs return similar results for Erdős–Rényi random graphs and random regular graphs, which appear to be sensitive only to the average degree. See Section \ref{CSDP} for more discussion on the estimation. Inspired by their work, we propose a natural modification of SDP to incorporate revealed labels in the semi-supervised setting and show that it not only achieves but even crosses, the KS threshold. In turn, our result brings a new perspective to study the (non-)achievability and robustness of (unsupervised) SDPs. \subsection{Semi-Supervised Learning} Within machine learning, there are three basic approaches: supervised learning, unsupervised learning, and the combination of both, semi-supervised learning. The main difference lies in the availability of labeled data. While unsupervised learning (e.g.,\ clustering, association, and dimension reduction) operates without any domain-specific guidance or preexisting knowledge, supervised learning (e.g.,\ classification and regression) relies on all training samples being associated with labels. However, it is often the case where existing knowledge for a problem domain doesn't fit either of these extremes. In real-world applications, unlabeled data comes with a much lower cost not requiring expensive human annotation and laboratory experiments. For example, documents crawled from the Web, images obtained from surveillance cameras, and speech collected from the broadcast are relatively more accessible compared to their labels which are required for prediction tasks, such as sentiment orientation, intrusion detection, and phonetic transcript. Motivated by this labeling bottleneck, the semi-supervised approach utilizes both labeled and unlabeled data to perform learning tasks faster, better, and cheaper. Since the 1990s, semi-supervised learning research has enjoyed an explosion of interest with applications like natural language processing \citep{Qiu2019GraphBasedSL, Chen2016StatisticalComputationalTI} and computer vision \citep{Xie2020SelfTrainingWN, Lee2013PseudoLabelT}. This paper is closely related to the subtopic called constrained clustering, where one has some must-links (i.e.,\ two nodes belong to the same cluster) and cannot-links (i.e.,\ two nodes are in different clusters) as extra information. Although constrained versions of classic algorithms have been studied empirically, such as expectation–maximization \citep{Shental2003ComputingGM}, k-means \citep{Wagstaff2001ConstrainedKC}, and spectral method \citep{Kamvar2003SpectralL}, our methods take different approaches than hard-coding these pairwise constraints into the algorithms and provide theoretically insightful guarantees under SBM. There also has been some excellent work considering introducing node information into SBMs. One interesting direction is overlaying a Gaussian mixture model with SBM, namely at each node of the graph assuming there is a vector of Gaussian covariates, which are correlated with the community structure. \citep{Yan2016CovariateRC} proposed an SDP with k-means regularization and showed that such node information indeed improves the clustering accuracy, while \citep{Lu2020ContextualSB} formally established the information-theoretic threshold for this model when the average degree exceeds one. The node information is depicted as noisy observations for all nodes in \citep{Mossel2015LocalAF}. In this setting, random guessing is no longer a meaningful baseline. Hence, the authors refer to the Maximum A Posteriori (MAP) estimator instead and show a local belief propagation algorithm's performance converges to the MAP accuracy in various regimes of the model. They also conjectured that those regimes can be extended to the entire domain of $a > b$ with arbitrary, but non-vanishing, strength of node information. We will see in the next section that our result establishes this conjecture in the sense that with arbitrary, but non-vanishing, knowledge of the labels, we can beat the meaningful baseline for all $a > b$. All of these models require input on every node, which does not fall within the scope of semi-supervised learning. Whereas, we consider a realistic and intuitive generalization of SBM where a small random fraction of the labels is given accurately. \citep{Kanade2014GlobalAL} studied the same model as ours. They demonstrated that a vanishing fraction of labels improves the power of local algorithms. In particular, when the number of clusters diverges, it helps their local algorithm to go below the conjectured algorithmic threshold of the unsupervised case. Elegantly, they also proved the following result, which is closely related to this paper. \begin{theorem}\citep{Kanade2014GlobalAL}\label{vanishing} When the fraction of revealed node labels is vanishingly small, the (unsupervised) weak recovery problem on the planted bisection model is not solvable if SNR is under the Kesten-Stigum threshold. \end{theorem} \if 0 A recent development in semi-supervised learning that has attracted extensive attention is called graph convolutional network (GCN) \citep{Kipf2017SemiSupervisedCW}, which is based on an efficient variant of convolutional neural networks operating on graph structures directly. The objective functions of GCNs only involve labeled data while predictive information propagates through the graphs built in neural networks to cover unlabeled data. The benefit of integrating graph structures into deep learning approaches is twofold: (i)~it efficiently embeds similarity between nodes to synchronize labeled and unlabeled samples; (ii)~it significantly brings down the number of parameters by only considering the connections induced by underlying graphs. A prototypical example of a forward model of a two-layer GCN for semi-supervised node classification on a graph is given by \begin{equation}\label{GCN} f(Z, A) = \text{softmax}\left( \hat{A}~ \text{ReLU} \left( \hat{A}Z W^{(0)}\right)~W^{(1)}\right) \end{equation} where $W^{(i)},~ i \in \{0,1\}$ are weight matrices for the hidden layer and the output layer, softmax and Relu are both vector activation functions defined as $\text{softmax}(x) = \exp(x)/\sum_i \exp(x_i)$ and $\text{ReLU}(x) = \max(0,x)$. $Z$ stands for the features and $A$ is the adjacency matrix. The output of each layer of GCN goes through a smoothing process defined by the \textit{propagation model matrix} $\hat{A}$. It can be a normalized adjacency matrix, a graph Laplacian, or even the identity matrix, which reduces the model to multi-layer perception. Existing frameworks are either directly based on the adjacency matrix $A$ \citep{Ying2018GraphCN} or run basic clustering algorithms on $A$ \citep{Chiang2019ClusterGCNAE} to design $\hat{A}$. But whenever GCN is applicable, some of the labels are always available. It is natural to consider making use of this label information to improve the decisive component $\hat{A}$, which can be realized directly from our semi-supervised clustering algorithms. We will discuss this interesting application further in Section~\ref{conclusion}. \section{Our Results}\label{summary} The main goal of this paper is to answer the long-standing open question regarding semi-supervised learning on probabilistic graph models. We would like to quote the version from \citep{Abbe2017CommunityDA}: \begin{quotation} "How do the fundamental limits change in a semi-supervised setting, i.e.,\ when some of the vertex labels are revealed, exactly or probabilistically?" \end{quotation} In the previous section, we discussed deep research related to the clustering/community detection problem on the SBM. Establishing the phase transition phenomenon at the KS threshold is a major focal point. However, such a sharp and intrinsic limit completely disappears when an arbitrarily small fraction of the labels is revealed. This astonishing change is first observed in \citep{PhysRevE.90.052802} where the authors provide non-trivial conjectures based on calculations of the belief propagation approach. The theory of semi-supervised clustering contains some fascinating and fundamental algorithmic challenges arising from both the sparse random graph model itself and the semi-supervised learning perspective. To address them rigorously, we first define the semi-supervised SBM so that it captures the essence of realistic semi-supervised learning scenarios and is a natural and simple generalization of unsupervised models. \begin{definition}[Semi-supervised planted bisection model]\label{SPBM} For $n\in \mathbb{N}$, $p,q \in (0,1)$ and $\rho \geq 0$, let $\G(n,p,q,\rho)$ denote the distribution over graphs with $n$ vertices and $n$-dimensional vectors defined as follows. The vertex set is partitioned uniformly at random into two subsets $S_1, S_2$ under the balance constraint $|S_1|=|S_2| = n/2$. Then, conditioned on the partition, two processes are undertaken independently: \begin{itemize} \item Let $E$ denote the edge set of the graph $G$. Edges are included independently with probability defined as follows: \begin{equation} \prob\left((i,j)\in E | S_1, S_2\right)= \begin{cases} p & \text{~if~} \{i,j\} \subseteq S_1 \text{~or~} \{i,j\} \subseteq S_2, \\ q & \text{~if~} i \in S_1, j \in S_2 \text{~or~} i \in S_2, j \in S_1. \end{cases} \end{equation} \item An index set $\mathcal{R}$ of size $m \coloneqq 2\lfloor \rho \cdot \frac{n}{2} \rfloor$ is chosen uniformly at random such that $|\mathcal{R} \cap S_1| = |\mathcal{R} \cap S_2| = m/2$. The revealed labels are given as \begin{equation} \Tilde{x}_i=\begin{cases} ~1 \quad &i \in \mathcal{R} \cap S_1,\\ -1 \quad &i \in \mathcal{R} \cap S_2,\\ ~0 \quad &\text{otherwise.} \end{cases} \end{equation} \end{itemize} \end{definition} \begin{remark} The revealing process is independent of edge formation, i.e.,\ $G \perp \tilde{x}|S_1,S_2$. Moreover, if we set $\rho = 0$ or simply ignore the revealed labels, the model is exactly the unsupervised SBM. In other words, the marginal distribution of the random graph is indeed $\G(n,p,q)$ from Definition \ref{PBM}. \end{remark} \begin{remark} One can also consider revealing uniformly at random over the index set independent of $\G(n,p,q)$ (instead of requiring revealed communities to have the same size), but this modification makes almost no difference in the context of this work. In practice, one can always achieve the balance requirement by either sampling a few more or dropping the uneven part. \end{remark} \begin{remark} The definition is versatile in the sense that it keeps the unsupervised setting as a special case (and with it all the interesting phase transitions). On the other hand, it can be easily generalized to the multiple and/or asymmetric communities case. \end{remark} Under the semi-supervised setting, we naturally extend community detection problems in a non-trivial way that includes the unsupervised situation as a special case and captures its essence. We will discuss these items in detail when it comes to the corresponding section. \begin{definition} Semi-supervised weak recovery: finding an estimator to perform substantially better than chance \textbf{on the unrevealed vertices}. \\ Semi-supervised distinguishability: finding a test that, with high probability, distinguishes $\G(n,d/n,d/n,\pmb{\rho})$ from $\G(n,a/n,b/n,\pmb{\rho})$ where $d=\frac{a+b}{2},~a>b$. \end{definition} Based on the fact that a $\ln (n)$-neighborhood in $(G,x) \sim \G(n,a/n,b/n)$ asymptotically has the identical distribution as a Galton-Watson tree with Markov process, we propose our first semi-supervised clustering algorithm, called \textit{census method}. Namely, we decide the label estimation of a certain node according to the majority of its revealed neighbors, \begin{equation} \hat{x}_v = \text{sgn}\left( \sum_{i \in \{u \in \mathcal{R}:~ d(u,v)=t\}} x_i \right), \end{equation} where $d(u, v)$ is the length of the shortest path connecting $u$ and $v$. We conclude that when $\text{SNR} \leq 1$, the optimal choice of $t$ is indeed 1. \begin{theorem}\label{thm:census} The 1-neighbors census method solves the semi-supervised weak recovery problem with any reveal ratio $\rho>0$ for arbitrary $\text{SNR} >0$. \end{theorem} Furthermore, we derive an explicit constant bound for the overlap with the corresponding tail probability. Namely, under $\G(n,a/n,b/n,\rho)$, if we denote \textit{Overlap} on the unrevealed nodes, between 1-neighbors census estimator and the ground truth, as $\Theta$ and let $\delta = \frac{\rho(a-b)}{2\me^{\rho(a+b)}}$, then $ \prob\left(\Theta \geq \frac{\delta}{2}\right) \geq 1 - \me^{-\frac{\delta^2 (1-\rho)n}{8}} $. A detailed discussion is provided in the next section. Note that if $\rho \to 0$, semi-supervised weak recovery is equivalent to unsupervised weak recovery. Therefore, Theorem \ref{vanishing} implies that our result is also sharp in the sense of minimum requirement on the fraction of revealed labels. Although it successfully solves the weak recovery problem in the largest possible regime, some limitations are hindering the census method's utility in practice. Its performance depends on a sufficient amount of revealed labels, hence requiring $n$ to be quite large. Besides, without an unsupervised counterpart, it is not applicable when revealing is unreliable. To address these challenges, we propose our second semi-supervised clustering algorithm which performs well in practice and covers the unsupervised setting as a special case. As discussed in the previous section, SDPs enjoy many nice properties, among which the monotone-robustness is particularly interesting to us. In the semi-supervised setting, the revealed labels are supposed to enhance the community structure. However, the work from \citep{Moitra2016HowRA} suggests such enhancement may not help with, but to the contrary can hurt the performance of many algorithms, which makes SDP an ideal starting point for us. We define the \textit{Constrained Semidefinite Program} (CSDP) as \begin{equation}\CSDP(M) = \max_{\substack{X \succeq 0 \\ X_{ii}=1, ~\forall i \in [n]}}\{\la M,X\ra:~ X_{ij} = x_i \cdot x_j, ~\forall i,j \in \mathcal{R}\}\end{equation} and show that it solves the semi-supervised community detection problem in the form of hypothesis testing. \begin{theorem}\label{thm:CSDP} Let $(G,x_\mathcal{R})\sim \G(n,a/n,b/n,\rho)$ and $A$ be the adjacency matrix associated with $G$. For any $a > b$, there exists $\rho_0 < 1$ such that if $\rho \geq \rho_0$, the CSDP-based test $T(G, x_\mathcal{R};\Delta) = \mathbbm{1}_{\{\CSDP(A - \frac{d}{n} \mathbf{1}\mathbf{1}^\top) \geq n[(a-b)/2 - \Delta]\}}$ will succeed with a high probability for some $\Delta > 0$. \end{theorem} \subsection{Proof Techniques}\label{proof tech} The technical challenges of establishing Theorem \ref{thm:census} root in the fact that the advantage created by revealed labels can be easily blurred out by various approximations of the limit distribution. Instead of the central limit theorem, one needs a Berry–Esseen-type inequality to derive a more quantitative result of the convergence rate. Moreover, since the distribution of each underlying component also depends on $n$, the conventional sample mean formulation does not apply here. We overcome the difficulty above with a direct analysis of non-asymptotic distributions, which leads to a detailed comparison between two binomial variable sequences with constant expectations. It is quite surprising that this calculation can be carried out in a rather elegant manner since many other applications of this method are much more technically involved. For example, to establish independence among estimators, one may need to consider the `leave-one-out' trick. But in our case, it comes in a very natural way. Regarding CSDP, we first show it can be coupled to an SDP with the surrogate input matrices. Moreover, its optimal value lies between two unsupervised SDPs associated with the same random graph model (different parameters). It means that all the analytical results from SDP research can be transferred into the CSDP study. However, we notice that it is common to make assumptions on the average degree $d$ in the relevant literature. It is quite reasonable in the unsupervised setting since the graph topology is a strong indicator of the possibility of weak recovery. E.g.,\ when $d\leq1$, there will not exist a giant component that is of size linear in $n$. To establish our result without such extra assumptions, we derive a probabilistic bound on the cut norm of the centered adjacency matrix and then use Grothendieck's inequality to bound the SDP on ERMs from above. This idea follows from \citep{Gudon2014CommunityDI}; we give a slightly different analysis fitting for our purpose. A generalized weak law of large numbers is also derived to address the issue that distributions of the entries change as $n\to \infty$. Then we conclude the proof with a lower bound of the CSDP on SBMs considering a witness that consists of the ground truth of labels. \subsection{Outline} The rest of the paper is organized in the following way. In Section \ref{Census method}, we formally derive the census method and prove that it can solve the weak recovery problem throughout the entire parameter domain. In Section \ref{CSDP}, we introduce the constrained SDP and the associated hypothesis test, through which we show that even under the KS threshold (also the information-theoretic threshold), the ERMs and the SBMs become distinguishable in the semi-supervised setting. Section \ref{numexp} includes some numerical simulation results. We end the paper with concluding remarks in Section \ref{conclusion}. \subsection{Notation} For any $n\in\mathbb{N}$, we denote the first $n$ integers by $[n] = \{1,2,\dots,n\}$. For a set $S$, its cardinality is denoted by $|S|$. We use lowercase letters for vectors (e.g.,\ $v = (v_1,v_2,\dots,v_n)$) and uppercase letters for matrices (e.g.,\ $M = [M_{ij}]_{i,j\in[n]}$). In particular, for adjacency matrices, we omit their dependency on underlying graphs. Instead of $A_G$, we simply write $A$. $\1_n = (1,\dots,1) \in \mathbb{R}^n$ stands for the all-ones vector, and $I_n$ is the $n\times n$ identity matrix. $\mathbf{e}_i \in \mathbb{R}_n$ represents the $i$'s standard basis vector. For two real-valued matrices $A$ and $B$ with the same dimensions, we define the Frobenius inner product as $\la A, B \ra = \sum_{i,j}A_{ij}\cdot B_{ij} = \Tr (A^\top B)$. Vector inner product is viewed as a special case of $n\times 1$ matrices. Let $\|v\|_p = (\sum_{i=1}^p \|v_i\|^p)^{1/p}$ be the $\ell_p$ norm of vectors with standard extension to $p = \infty$. Let $\|M\|_{p\to q} = \sup_{\|v\|p \leq 1} \|Mv\|_q $ be the $\ell_p$-to-$\ell_q$ operator norm and $\|M\|_{\text{op}} \coloneqq \|M\|_2 \coloneqq \|M\|_{2\to 2}$. Random graphs induce measures on the product space of label, edge, and revealed node assignments over $n$ vertices. For any $n\in \mathbb{N}$, it is implicitly understood that one such measure is specified with that graph size. The terminology \textit {with high probability} means ‘with probability converging to $1$ as $n \to \infty$’. Also, we follow the conventional Big-Oh notation for asymptotic analysis. $o_p(1)$ stands for convergence to $0$ in probability. \section{Census Method}\label{Census method} Analysis of the model from Definition \ref{PBM} is a challenging task since conditioned on the graph, it is neither an Ising model nor a Markov random field. This is mainly due to the following facts: (1) The balance requirement puts a global condition on the size of each cluster; (2) Even if conditioned on sizes, there is a slight repulsion between unconnected nodes. Namely, if two nodes do not form an edge, the probability of them being in the same community is different from the probability of them being in opposite communities. Recent years have witnessed a series of excellent contributions to the study of phase transitions in the sparse regime. Our census method for semi-supervised clustering is mainly inspired by the natural connection between community detection on SBMs and reconstruction problems on trees, which was formally established by \citep{MosselNS15}. Intuitively, for a vertex $v$ in $\G(n,a/n,b/n)$, it is not likely that a node from its small neighborhood has an edge leading back to $v$. Therefore, the neighborhood looks like a randomly labeled tree with high probability. Furthermore, the labeling on the vertices behaves like broadcasting a bit from the root of a tree down to its leaves (see the survey \citep{Mossel2001SurveyIF} for a detailed discussion). In this section, we will first look into the census method of $t$-neighbors, i.e.,\ deciding the label of a node by the majority on its neighbors at depth $t$. After defining the general framework, we will show that when $\text{SNR}\leq 1$, $1$-neighbors voting is optimal in terms of recovering the cluster structure via informal calculation. Then, we rigorously prove that census on $1$-neighbors solves the semi-supervised weak recovery problem for any $\text{SNR}>0$ with an arbitrarily small fraction of the labels revealed. \subsection{Majority of t-Neighbors} Let $(G,x)$ obey the planted bisection model $\G(n,a/n,b/n)$. We denote the set of all vertices by $V(G)$. For a fixed vertex $v$ and $t\in\mathbb{N}$, let $N_t(v)$ denote the number of vertices that are $t$ edges away from $v$. $\Delta_t(v)$ is defined as the difference between the numbers of $t$-neighbors in each community. Namely, \begin{align} N_t(v) &= |K_t(v)| \\ \Delta_t(v) &= \sum_{u \in K_t(v)} x_{u} \end{align} where $K_t(v)\coloneqq\{u \in V(G):~ d(u,v)=t\}$ denotes the $t$-neighbors of $v$. If one assumes that the subgraph of $G$ induced by the vertices within $t$ edges of $v$ is a tree, the expected value of $N_t(v)$ is approximately $[(a+b)/2]^t$, and the expected value of $x_v \cdot \Delta_t(v)$, i.e.,\ the expected number of these vertices in the same community as $v$ minus the expected number of these vertices in the other community, is approximately $[(a-b)/2]^t$. So, if one can somehow independently determine which community a vertex is in with an accuracy of $1/2 + \alpha$ for some $\alpha > 0$, one will be able to predict the label of each vertex with an accuracy of roughly $1/2 + [(a-b)^2/(2(a+b))]^{t/2}\cdot\alpha$, by guessing it as the majority of $v$'s $t$-neighbors. Under the unsupervised learning setting, one can get a small advantage, $\alpha \sim \Theta(1/\sqrt{n})$, by randomly initializing labels. It is guaranteed by the central limit theorem that such a fraction exists in either an agreement or disagreement form. \begin{figure}[ht] \centering\includegraphics[width=0.8\textwidth]{census.png} \caption{Neighborhood of node $v$ with a tree structure. The ground truth of clusters is coded in black and white. The shaded area indicates those nodes randomly guessed to be in the same community or the opposite community as $v$. The annulus represents the collection of its $t$-neighbors.} \label{fig:census} \end{figure} To amplify this lucky guess, we need $t$ to be sufficiently large so that $[(a-b)^2/(2(a+b))]^{t/2} > \sqrt{n}$, which implies $[(a+b)/2]^t>n$. Note that $d = (a+b)/2$ is the average degree. This means before the signal is strong enough for our purpose, not only our tree approximation will break down, but vertices will also be exhausted. However, if we have access to some of the true labels, i.e.,\ in the semi-supervised setting, we can leverage the tree structure to get a non-vanishing advantage over random guessing. Let $A$ be the adjacency matrix associated with $G$. Consider the random variables $Y_u$ representing votes of directly connected neighbors, \begin{equation} Y_u = \begin{cases} x_{u} &\text{if~} A_{uv} = 1, \\ 0 &\text{otherwise}. \end{cases} \end{equation} We have \begin{align} N_1(v) &= \sum_{u \in V(G)} |Y_u|, \\ \Delta_1(v) &= \sum_{u \in V(G)} Y_u. \end{align} By definition of the planted bisection model, \begin{equation} \prob(Y_u=1 | x_v=1) = \frac{\prob(Y_u=1, x_v=1)}{\prob(x_v=1)} \approx \frac{a}{2n}. \end{equation} Similarly, \begin{equation} \prob(Y_u=-1 | x_v=1) \approx \frac{b}{2n}. \end{equation} It is not exact due to the balanced community constraint. But when $n$ is large, such an effect is negligible. Furthermore, if we consider the definition of the planted bisection model without balance constraint, the equation will be exact. Without loss of generality, we only consider the case where $x_v = 1$ and omit the condition on it. We have \begin{equation} \Delta_1(v)=\sum_{u \in V(G)} Y_u \qquad\text{with~~} Y_u = \begin{cases} 1 & \text{w.p.~} \frac{a}{2n}\\ -1 & \text{w.p.~} \frac{b}{2n}\\ 0 & \text{w.p.~} 1-\frac{a+b}{2n} \end{cases} \end{equation} where the $Y_u$'s are independent. Note that $\E (Y_u) = \frac{a-b}{2n}$ and $\E (Y_u^2) = \frac{a+b}{2n}$. Recall that $\rho \in [0,1]$ is the ratio of revealed labels. For the sake of simplicity, we assume the total number of revealed vertices $m = \rho n \in 2\mathbb{N}$ to be an even integer. The revealed vertices are chosen arbitrarily, denoted as $\mathcal{R} \coloneqq \{u_{n-m+1},u_{n-m+2},\dots,u_{n}\}$. The model also provides that the number of revealed vertices in each community is $\frac{\rho n}{2}$. Then the majority of revealed vertices among 1-neighborhood of $v$ can be written as \begin{equation} \Tilde{\Delta}_1(v)=\sum_{u \in \mathcal{R}} Y_u. \end{equation} Therefore, \begin{align} &\E (\Tilde{\Delta}_1(v)) = \sum_{u\in \mathcal{R}} \E(Y_u) = \rho \frac{a-b}{2},\\ &\V (\Tilde{\Delta}_1(v)) = \sum_{u\in \mathcal{R}} \V(Y_u) = \rho \frac{a+b}{2} + o(1). \end{align} \subsection{Locally Tree-Like Structure} Proceeding to the $t$-neighbors, we need to first understand the structure of a small neighborhood in the SBM. The neighborhoods in a sparse network locally have no loops. So they have a nice tree-like structure. Moreover, the labels also obey some random broadcasting processes on trees. A broadcasting process transmits the information from the root of a tree to all the nodes. At each level, nodes inherit the information from its parent. Meanwhile, errors could happen with a certain amount of probability. Usually, the edges are assumed to be included according to the same rule and work independently. It was first considered in genetics \citep{CAVENDER1978271} since it perfectly describes the propagation of a gene from ancestor to descendants. It can also be interpreted as a communication network that passes out the information from the root. Such processes were intensively studied in information theory and statistical physics \citep{10.2307/2959462, Higuchi1977RemarksOT, Bleher1995OnTP}. In particular, we are interested in the following Markov process since it can be identified with the labeling process of a small neighborhood in SBM. \begin{definition}[Galton–Watson tree with Markov process] Let $T$ be an infinite rooted tree with root $v$. Given a number $0\leq\epsilon<1$ and the offspring rate $d>0$, we define a random labeling $\tau \in \{1,-1\}^T$. First, draw $\tau_v$ uniformly in $\{1,-1\}$. Then, recursively construct the labeling as follows: \begin{itemize} \item Generate children of each parent node according to a Poisson distribution with mean $d$. \item Conditionally independently given $\tau_v$, for every child $u$ of $v$, set $\tau_u=\tau_v$ with probability $1-\epsilon$ and $\tau_u=-\tau_v$ otherwise. \end{itemize} \end{definition} The following lemma shows that a $\ln (n)$-neighborhood in $(G,x)$ looks like a Galton-Watson tree with Markov process. For any $v \in G$, let $G_R$ be the induced subgraph on $\{u \in G : d(u, v) \leq R\}$. \begin{lemma}\citep{MosselNS15} Let $R = R(n) = \frac{\ln n}{10\ln(2(a+b))}$. There exists a coupling between $(G, x)$ and $(T, \tau)$ such that $(G_R, x_{G_R} )$ = $(T_R, \tau_{T_R})$ a.a.s. \end{lemma} Hence, for fixed $t \in \mathbb{N}$, $t \leq R$, and any $v \not\in \mathcal{R}$, we can denote the label of a vertex in $v$'s $t$-neighborhood as $Y_i^{(t)} \coloneqq \Pi_{k=1}^t \prescript{k}{}{Y_u}$, where $\{\prescript{k}{}{Y_u}\}_{k=1}^t$ are independent copies of $Y_u$. Then we have $\E (Y_i^{(t)}) = (\frac{a-b}{2n})^t$ and $\E ((Y_i^{(t)})^2) = (\frac{a+b}{2n})^t$. Moreover, $\{Y_i^{(t)}\}$'s are essentially independent. Therefore, the census of $v$'s revealed $t$-neighbors can be written as \begin{equation} \Tilde{\Delta}_t(v)=\sum_{i \in [\rho \cdot n^t]} Y_i^{(t)} \quad \textit{(a.a.s)}. \end{equation} The central limit theorem suggests \begin{equation} \Tilde{\Delta}_t(v) \to \mathcal{N}(\rho(\frac{a-b}{2})^t, \rho(\frac{a+b}{2})^t), \quad \text{as~} n \to \infty . \end{equation} Hence, \begin{align} \prob(\Tilde{\Delta}_t(v) >0 | x_v=1) &= \frac{1}{2}\left[1 + \text{erf}\left(\frac{\rho[(a-b)/2]^t}{\sqrt{\rho[(a+b)/2]^t}\sqrt{2}}\right)\right] + o(1)\\ &= \frac{1}{2}+ \frac{1}{2}\text{erf}\left(\sqrt{\frac{\rho~\text{SNR}^t}{2}}\right) + o(1) \end{align} where $\text{erf}(x) = \frac{2}{\sqrt{\pi}} \int_0^x \exp(-t^2) \dif{t} $ is the Gauss error function. So one can see that once SNR is less than or equal to $1$, it is not beneficial to look into $t$-neighbors. The optimal choice of $t$ is $1$ in this situation. Since we also know that weak recovery is solvable when $\text{SNR}>1$, it makes the majority of $1$-neighbors particularly interesting. Suppose $\text{SNR}\leq 1$ and include the symmetric part of $x_v=-1$, we have \begin{equation} \prob(\text{sgn}(\Tilde{\Delta}_1(v)) = x_v)> \frac{1}{2} + \frac{1}{3}\sqrt{\rho~\text{SNR}}. \end{equation} Consider the estimator of unrevealed labels \begin{equation}\hat{x}_{\mathcal{R}^\complement}\coloneqq \text{sgn}\left([\Tilde{\Delta}_1(u_1), \Tilde{\Delta}_1(u_2), \dots, \Tilde{\Delta}_1(u_{n-m})]^\top\right)\end{equation} and the ground truth $x_{\mathcal{R}^\complement}=[x_{u_1},x_{u_2},\dots,x_{u_{n-m}}]^\top$. Recall that \begin{equation} \text{Overlap}(x_{\mathcal{R}^\complement},\hat{x}_{\mathcal{R}^\complement})=\frac{1}{n-m}| \langle x_{\mathcal{R}^\complement},\hat{x}_{\mathcal{R}^\complement} \rangle |. \end{equation} We can conclude that \begin{align} \E [\text{Overlap}(x_{\mathcal{R}^\complement},\hat{x}_{\mathcal{R}^\complement})] &= \E\left[\frac{1}{n-m} \left|\sum_{i\in[n-m]}\text{sgn}(\Tilde{\Delta}_1(u_i))x_{u_i}\right|\right]\\ &\geq \frac{1}{n-m}\left|\sum_{i\in[n-m]} \E\left[\text{sgn}(\Tilde{\Delta}_1(u_i))x_{u_i}\right]\right|\\ &> \frac{2}{3}\sqrt{\rho~\text{SNR}}. \end{align} The expected overlap is not vanishing which suggests that weak recovery is solvable for any SNR. But it is technically impractical to rigorously describe the limit distribution of our census estimator without blurring this edge out. From Figure \ref{fig:rho_overlap}, we can see that our calculation is close to the expectation. But the convergence rate depends on $\rho$. In particular, when both SNR and $\rho$ are small, the asymptotic behavior of our algorithm remains unclear. Hence, we go through a direct analysis to establish the desired result. \begin{figure}[htbp] \centering\includegraphics[width=\textwidth]{census_std.png} \caption{The simulation result of $\G(3000, 5/3000, 2/3000)$, $\text{SNR}\approx0.64$. Solid curves stand for the average overlaps of the t-neighbors census method ($t$ = 1, 2, and 3) on 60 independent realizations of the random graph. The shaded area represents the standard error band of the 1-neighbors census. The dashed curve stands for the asymptotic lower bound we conclude from our calculation.} \label{fig:rho_overlap} \end{figure} \subsection{Majority of 1-Neighbors} Since the algorithm is invariant under index reordering, without loss of generality, we let the adjacency matrix $A$ be a symmetric matrix with diagonal entries $A_{ii}=0,~ i=1,2, \dots, n$. For $1 \leq i < j \leq n$, $\{A_{ij}\}$'s are independent, \begin{align} &A_{ij} \sim \text{Bernoulli}\left(\frac{a}{n}\right) &\quad \left(i\leq \frac{n}{2} ~\text{and}~ j \leq \frac{n}{2} \right) &~\text{or}~ \left(i\geq \frac{n}{2} ~\text{and}~ j \geq \frac{n}{2} \right),\\ &A_{ij} \sim \text{Bernoulli}\left(\frac{b}{n}\right) &\quad i\geq \frac{n}{2} ~&\text{and}~ j \leq \frac{n}{2}. \end{align} The true label $x$ and revealed label $\Tilde{x}$ are, respectively, \begin{equation} x_i=\begin{cases} 1 \quad &i=1,2,\dots,\frac{n}{2},\\ -1 \quad &i=\frac{n}{2},\frac{n}{2}+1,\dots,n; \end{cases} \quad \Tilde{x}_i=\begin{cases} 1 \quad &i=1,2,\dots,\frac{m}{2},\\ -1 \quad &i=\frac{n}{2},\frac{n}{2}+1,\dots,\frac{n+m}{2},\\ 0 \quad &\text{otherwise.} \end{cases} \end{equation} For an unrevealed vertex, we consider the majority of its 1-neighbors, \begin{equation} \Tilde{\Delta}_1(i) = \langle A[i,:],\Tilde{x} \rangle = \sum_{j:\Tilde{x}_j \neq 0} A_{ij}\Tilde{x}_j = \sum_{j:\Tilde{x}_j \neq 0} A_{ji}\Tilde{x}_j. \end{equation} Therefore, $\{\Tilde{\Delta}_1(i)\}$'s are independent for all $i:\Tilde{x}_i = 0$ since they have no common term. Note that this is not the case for all $i \in [n]$. However, we only need to predict the unrevealed labels, hence the independence holds. The estimator given by majority voting of 1-neighbors is \begin{equation} \hat{x}_i = \begin{cases} \Tilde{x}_i \quad &\text{if}~\Tilde{x}_i \neq 0,\\ \text{sgn}^*(\Tilde{\Delta}_1(i)) \quad &\text{if}~\Tilde{x}_i = 0. \end{cases} \end{equation} We toss a fair coin when $\Tilde{\Delta}_1(i)=0$ to break the tie, i.e.,\ \begin{equation}\prob(\text{sgn}^*(\Tilde{\Delta}_1(i))=1|\Tilde{\Delta}_1(i)=0) = \prob(\text{sgn}^*(\Tilde{\Delta}_1(i))=-1|\Tilde{\Delta}_1(i)=0) = \frac{1}{2}.\end{equation} Formally, for a finite sequence of random variables $W_i$, \begin{equation} \text{sgn}^*(W_i) = \text{sgn}(W_i) + \mathbbm{1}_{\{W_i = 0\}} (2B_i - 1) \end{equation} where $\{B_i\}$'s are i.i.d.~Bernoulli(1/2) random variables, independent of all other random objects. Note that it is only introduced for analysis purposes and is equivalent to the conventional sign function in practice. Suppose $(G,x)$ is an Erdős–Rényi random graph with revealed label $\Tilde{x}$, any estimator can only have a vanishing correlation with the true label among the unrevealed vertices. So the semi-supervised weak recovery problem on SBM requires finding an estimator such that the correlation restricted on the unrevealed part is non-vanishing. Formally, we want to show that \begin{equation}\prob\left(\text{Overlap}(x|_{\Tilde{x}_i =0},\hat{x}|_{\Tilde{x}_i =0}) \geq \Omega(1) \right)=1-o(1).\end{equation} As discussed in Section \ref{proof tech}, we start with a critical result scrutinizing binomial variable sequences. It gives us an edge over direct analysis via a Berry–Esseen-type inequality, which usually assumes the distribution of individual random variables in the sequence independent of $n$. \begin{lemma}\label{constant gap} Let $X$ and $Y$ be two independent binomial random variables with $X \sim \text{Binomial}(n,\frac{a}{n})$ and $Y \sim \text{Binomial}(n,\frac{b}{n})$, $a>b$. Denote $\delta = \delta(a,b) \coloneqq \frac{a-b}{2\exp{(a+b)}}$. Then, for sufficiently large $n$, \begin{equation}\prob(X>Y) - \prob(X<Y) \geq \delta .\end{equation} \end{lemma} \begin{remark} By symmetry, we always have $\prob(X>Y) - \prob(X<Y) > 0$. This lemma guarantees the difference will not vanish as $n\to \infty$. \end{remark} \begin{proof} By the law of total probability and independence, we have \begin{align} \prob(X>Y)&=\sum_{x=1}^n \prob(Y<x)\prob(X=x)\\ &=\sum_{x=1}^n\sum_{y=0}^{x-1} \prob(Y=y)\prob(X=x)\\ &=\sum_{x=1}^n\sum_{y=0}^{x-1} \left[ \binom{n}{x}\left(\frac{a}{n}\right)^x \left(1-\frac{a}{n}\right)^{n-x} \binom{n}{y}\left(\frac{b}{n}\right)^y \left(1-\frac{b}{n}\right)^{n-y} \right]. \end{align} Let $\Delta \coloneqq \prob(X>Y) - \prob(X<Y)$, then \begin{multline*} \Delta=\sum_{x=1}^n \binom{n}{x} \left(\frac{a}{n}\right)^x\left(1-\frac{a}{n}\right)^{n-x}\left(\frac{b}{n}\right)^x\left(1-\frac{b}{n}\right)^{n-x}\\ \Bigg\{\sum_{y=0}^{x-1} \binom{n}{y} \left[ \left(\frac{b}{n}\right)^{y-x}\left(1-\frac{b}{n}\right)^{x-y} - \left(\frac{a}{n}\right)^{y-x} \left(1-\frac{a}{n}\right)^{x-y} \right]\Bigg\} \end{multline*} \begin{multline*} \phantom{\Delta}=\sum_{x=1}^n \binom{n}{x} \left(\frac{ab}{n}\right)^x \left(1-\frac{a+b}{n}+\frac{ab}{n^2}\right)^{n-x}\\ \Bigg\{\sum_{y=0}^{x-1} \binom{n}{y}\frac{1}{n^y} \left[ \left(\frac{1}{b}-\frac{1}{n}\right)^{x-y} - \left(\frac{1}{a}-\frac{1}{n}\right)^{x-y} \right]\Bigg\}. \end{multline*} Let $f(x)= \alpha^x - \beta^x,~ \alpha > \beta >0$. Since $f'(x) = \alpha^x\ln{\alpha} - \beta^x\ln{\beta} >0$, we have $f(m)\geq f(1) = \alpha - \beta,~ \forall m\in \mathbb{N}$. So $\left(\frac{1}{b}-\frac{1}{n}\right)^{x-y} - \left(\frac{1}{a}-\frac{1}{n}\right)^{x-y}\geq \frac{a-b}{ab}$. Also notice that $\binom{n}{m}= \prod_{i=0}^{m-1}\frac{n-i}{m-i} \geq \left(\frac{n}{m}\right)^m,~ \forall 1\leq m\leq n$. We have \begin{align} \Delta &\geq \sum_{x=1}^n \left(\frac{ab}{x}\right)^x \left(1-\frac{a+b}{n}\right)^{n-x} \left( \sum_{y=0}^{x-1} \frac{1}{y^y}\cdot\frac{a-b}{ab} \right)\\ &\geq (a-b) \left(1-\frac{a+b}{n}\right)^n\\ &\geq \frac{a-b}{2\exp{(a+b)}} \qquad (\text{for sufficiently large $n$}) \end{align} where we follow the convention that $0^0=1$. \end{proof} Then, we can simply resort to a classical concentration inequality to bound the overlap. \begin{lemma}[Chernoff–Hoeffding theorem \citep{Chernoff1952AMO}] \label{Chernoff-Hoeffding} Suppose $X_1, \dots, X_n$ are i.i.d.~random variables, taking values in $\{0, 1\}$. Let $p = \E(X)$ and $\epsilon > 0$. Then \begin{equation} \prob\left(\frac{1}{n}\sum X_i \leq p-\epsilon \right) \leq \left( \left(\frac{p}{p-\epsilon}\right)^{p-\epsilon} \left(\frac{1-p}{1-p+\epsilon}\right)^{1-p+\epsilon} \right) ^n =\me^{-D(p-\epsilon\|p)n} \end{equation} where $D(x\|y)=x\ln\frac{x}{y} + (1-x)\ln(\frac{1-x}{1-y})$ is the Kullback–Leibler-divergence between Bernoulli distributed random variables with parameters $x$ and $y$. \end{lemma} We now convert the KL divergence to the total variation distance, which is easier to work with. Let $P_1$ and $P_2$ be two probability measures defined on the same sample space $\Omega$ and sigma-algebra $\mathcal{F}$. The total variation distance between them is defined as $d_{TV}(P_1,P_2) = \sup_{E \in \mathcal{F}}|P_1(E) - P_2(E)|$. Moreover, in the discrete case, we have the following identity $d_{TV}(P_1,P_2) = \frac{1}{2}\|P_1-P_2\|_1 = \sum_{\omega\in \Omega} \frac{1}{2}\|P_1(\omega) - P_2(\omega)\|$. It is related to the KL divergence through Pinsker's inequality (see, e.g.,\ \citep{Tsybakov2009IntroductionTN}, Chapter 3). For completeness, we include an elementary proof of the Bernoulli special case that is sufficient for our usage later. \begin{lemma} \label{TV-KL} Let $P_1$ and $P_2$ be two Bernoulli distributions, where $P_1(1) =x$ and $P_2(1) =y$. We have \begin{equation}2(d_{TV}(P_1,P_2))^2 \leq D(x\|y).\end{equation} \end{lemma} \begin{proof} We can manipulate both sides of the inequality as \begin{equation} D(x\|y) = x\ln\frac{x}{y} + (1-x)\ln(\frac{1-x}{1-y}), \end{equation} \begin{equation} 2(d_{TV}(P_1,P_2))^2 = \frac{1}{2} \|P_1 - P_2\|_1^2 = 2(x-y)^2. \end{equation} We denote $f(x,y) = x \ln \frac{x}{y} + (1-x)\ln \frac{1-x}{1-y} - 2(x-y)^2$. Therefore, \begin{equation} \frac{\partial f}{\partial y} = (x-y)[4-\frac{1}{y(1-y)}] \end{equation} Notice that since $0 \leq y \leq 1$, $y(1-y) \leq \frac{1}{4}$. So $4-\frac{1}{y(1-y)}$ is always negative. Thus, for fixed $x$, $f(x,y) \geq f(x,x) = 0 ,~ \forall y$. Hence, \begin{equation}D(x\|y) - 2(d_{TV}(P_1,P_2))^2 \geq 0.\end{equation} \end{proof} Now we prove the main result for the census method. \begin{proof}[Proof of Theorem \ref{thm:census}] Recall that for any $i$ such that $\Tilde{x}_i = 0$, our estimator is defined as $\hat{x}_i = \text{sgn}^*(\Tilde{\Delta}_1(i))$ and \begin{equation}\Tilde{\Delta}_1(i) = \sum_{j:\Tilde{x}_j \neq 0} A_{ij}\Tilde{x}_j = \left(\sum_{\rho \frac{n}{2} <j \leq \frac{n}{2}} A_{ij} \right) - \left(\sum_{(1+\rho) \frac{n}{2} <j \leq n} A_{ij} \right).\end{equation} It is indeed the difference between two independent binomial variables with parameters $(\rho n, \frac{\rho a}{\rho n})$ and $(\rho n, \frac{\rho b}{\rho n})$. By Lemma \ref{constant gap}, we have \begin{equation} \prob(\text{sgn}(\Tilde{\Delta}_1(i)) = x_i) - \prob(\text{sgn}(\Tilde{\Delta}_1(i)) = - x_i) \geq \delta = \frac{\rho(a-b)}{2\me^{\rho(a+b)}} \end{equation} for sufficiently large $n$. Also, notice that \begin{equation} \prob(\text{sgn}(\Tilde{\Delta}_1(i)) = - x_i) = 1 - \prob(\text{sgn}(\Tilde{\Delta}_1(i)) = x_i) - \prob(\Tilde{\Delta}_1(i)=0). \end{equation} Therefore, \begin{equation} \prob(\text{sgn}(\Tilde{\Delta}_1(i)) = x_i) \geq \frac{1+\delta}{2} - \frac{1}{2}\prob(\Tilde{\Delta}_1(i)=0). \end{equation} Then, by the law of total probability, we have \begin{align} \prob(\hat{x}_i = x_i) &= \prob(\text{sgn}^*(\Tilde{\Delta}_1(i)) = x_i)\\ &= \prob(\text{sgn}(\Tilde{\Delta}_1(i)) = x_i) + \frac{1}{2}\prob(\Tilde{\Delta}_1(i)=0)\\ &\geq \frac{1}{2} + \frac{\delta}{2}. \end{align} Since the $\{\hat{x}_i\}$'s are independent for all unrevealed vertices as $\{\Tilde{\Delta}_1(i)\}$'s and $\E \left[\frac{\hat{x}_ix_i+1}{2}\right] = \prob(\hat{x}_i = x_i)$, Lemma \ref{Chernoff-Hoeffding} and Lemma \ref{TV-KL} give us that \begin{equation} \prob \left(\frac{1}{(1-\rho)n}\sum_{i:\Tilde{x}_i = 0}\frac{\hat{x}_ix_i+1}{2}\leq \frac{1}{2} + \frac{\delta}{2} - \epsilon \right) \leq \me^{-2\epsilon^2 (1-\rho)n}. \end{equation} Taking $\epsilon = \frac{\delta}{4}$, we have \begin{equation} \prob\left(\text{Overlap}(x|_{\Tilde{x}_i =0},\hat{x}|_{\Tilde{x}_i =0}) \geq \frac{\delta}{2}\right) \geq 1 - \me^{-\frac{\delta^2 (1-\rho)n}{8}}. \end{equation} As long as $a>b$, we have $\delta > 0$, which concludes the proof. \end{proof} \begin{corollary}\label{distinguishable} The semi-supervised SBM and ERM are not mutually contiguous for any given $a>b\geq 0$ and $\rho>0$. \end{corollary} \begin{proof} Let $P_n^{(0)} = \G(n, \frac{a+b}{2n}, \frac{a+b}{2n}, \rho)$ and $P_n^{(1)}$ = $\G(n, \frac{a}{n}, \frac{b}{n}, \rho)$. Then consider the same constant $\delta > 0$ from the proof of Theorem \ref{thm:census} and denote the event sequence $E_n = \{\text{Overlap}(x|_{\Tilde{x}_i =0},\hat{x}|_{\Tilde{x}_i =0}) \geq \frac{\delta}{2}\}$ where $\hat{x}$ is our semi-supervised census estimator. We have \begin{align} P_n^{(0)}(E_n) \rightarrow 0 \quad &\text{(Law of large number)},\\ P_n^{(1)}(E_n) \nrightarrow 0 \quad &\text{(Bounded from below)}. \end{align} \end{proof} Recall that distinguishability and weak recovery are equivalent in the sense that they share the same phase transition threshold. In the semi-supervised setting, it is straightforward that the weak recovery implies distinguishability. So, Theorem \ref{thm:census} suggests that in this case, SBM and ERM are always distinguishable, which is also equivalent to Corollary \ref{distinguishable}. Hence, the two problems share the same solvable region as well. \section{Semi-Supervised SDP}\label{CSDP} We have seen that the census method solves the semi-supervised community detection problem. However, the algorithm is desirable in practice only when the amount of revealed labels is sufficient to support a reasonable performance. In other words, it has no unsupervised `fallback' built-in. Meanwhile, SDPs enjoy nice properties like optimality and robustness as mentioned earlier. It is also well known that approximate information about the extremal cuts of graphs can be obtained by computing the optimizer for the SDP of their adjacency matrix, for example \citep{Goemans1995ImprovedAA}. From both a practical and a mathematical point of view, we are interested in developing an SDP-based semi-supervised clustering approach, through which we shall be able to see the models, algorithms, and phase transitions with a fresh perspective. In this section, we will focus on the hypothesis testing formulation of the community detection problem. We have discussed the equivalency between it and the non-vanishing overlap formulation under the unsupervised setting. In the semi-supervised scenario, it is still an interesting question to ask whether there exists a test that can distinguish SBMs from ERMs. Here we understand ERM as the special case of SBM with $a = b$. It also has ground truth of labels, which is uniformly random under the balance constraint. Given that they are originally contiguous when $\text{SNR} \leq 1$, we want to show that revealed labels together with random graphs can separate them. \subsection{SDP for Community Detection} Under the Planted Bisection Model specified in Definition \ref{PBM}, the MAP estimator is equivalent to the Maximum Likelihood estimator, which is given by min-bisection, i.e.,\ a balanced partition with the least number of crossing edges. Formally, it can be written the following optimization problem, \begin{equation} \max_{\substack{x\in \{1,-1\}^n \\ x^\top \1 = 0}} x^\top A x. \end{equation} By lifting the variable $X\coloneqq xx^\top$, we can rewrite it as \begin{equation} \hat{X}_\text{MAP}(G)=\argmax_{\substack{X \succeq 0 \\ X_{ii}=1, ~\forall i \in [n] \\ \text{rank} (X) = 1 \\ X \mathbf{1} = 0}} \langle A,X \rangle. \end{equation} Although min-bisection of $G$ is optimal (in the MAP sense) for exact recovery, finding it is NP-hard. Various relaxations have been proposed for the MAP estimator. Since the rank constraint makes the optimization difficult, we can remove it to make the problem convex. One can also get rid of the balance constraint by centralizing the adjacency matrix, $\tilde{A} \coloneqq A - \frac{d}{n} \mathbf{1}\mathbf{1}^\top$ with $d=(a+b)/2$ the average degree. This can also be justified using Lagrangian multipliers. The resulting semidefinite relaxation is given by \begin{equation}\label{eqn:SDP} \hat{X}_{\text{SDP}}(G)=\argmax_{\substack{X\succeq 0\\ X_{ii}=1, ~\forall i\in [n]}} \langle \tilde{A}, X \rangle. \end{equation} The feasible region $\{X\in\mathbb{R}^{n \times n}: X\succeq0, X_{ii}=1 ~\forall i \in [n] \}$ is indeed the space of correlation matrices, which defines a subset of the unit hypercube and is also called the \textit{elliptope}. Although it is derived from the relaxation of MAP, one can define the SDP for general symmetric matrices as \begin{equation}\label{eqn:SDP_} \SDP(M_{n\times n}) = \max\{\la M,X\ra: X \in \text{elliptope}_n\}. \end{equation} \begin{proposition} For any $n \times n$ symmetric matrix $M$, if we denote its leading eigenvalue as $\lambda_1$, then \begin{equation}\frac{1}{n}\SDP(M)\leq \lambda_1.\end{equation} \end{proposition} \begin{proof} For any feasible $X \succeq 0$ and $X_{ii} = 1$, we have $\Tr(X) = n$, moreover, \begin{equation} \la X,M = U \Lambda U^\tp \ra = \Tr(U^\tp X U \Lambda) = \la Y \coloneqq U^\tp X U, \Lambda \ra = \sum Y_{ii}\lambda_i \leq n \lambda_1. \end{equation} The last inequality follows from $\Tr(Y) = n$ and $Y \succeq 0$ so that $Y_{ii}\geq0$. \end{proof} This proposition relates SDPs to spectra of the underlying matrices, which suffer from those high-degree nodes as we mentioned in the introduction. In contrast, SDPs behave similarly on SBMs and random regular graphs. The optimal values of the SDPs for both are approximately $2n\sqrt{d}$, see \citep{montanari2015semidefinite}. Random regular graphs obey the uniform distribution over graphs with $n$ vertices and uniform degree $d$, which provide a simple example to illustrate the regularity property of SDPs. We cite an intermediate result from the original proof as Lemma \ref{lemma:SDP}. An important way to understand SDPs is by considering the Burer-Monteiro factorization of $X$, which characterizes the constraints. We have \begin{equation}\label{BMF} X=\Sigma \Sigma^\top \end{equation} where $\Sigma=(\sigma_1,\sigma_2,\dots, \sigma_n)^\top$ and $\|\sigma_i\|_2=1, ~\forall i \in [n]$. Therefore, the $i$-th node of the graph is associated with the vector $\sigma_i$ that lies on the unit sphere. $X_{ij}=\langle \sigma_i, \sigma_j \rangle$ can be interpreted as the affinity metric between nodes $i$ and $j$. SDP maximizes the likelihood score of this affinity matrix concerning the given centralized adjacency matrix. The optimizer $X^*$ is a better representation of the structure information than the vanilla adjacency matrix. Then we can identify the labels by simply running the k-means method on it or compute the eigenvector corresponding to the largest eigenvalue. \subsection{Constrained SDP and Semi-Supervised Testing} \label{CSDP def} In this section, we will introduce our SDP modification and prove that it solves the semi-supervised community detection problem with the hypothesis testing formulation. Let $x$ denote labels of G(n, $\frac{a}{n}$, $\frac{b}{n}$). And $m$ of them are revealed uniformly at random in a balanced manner. Conditioned on the ground truth of clusters, indices of revealed nodes $\mathcal{R}$ and edges are independent. So, without loss of generality, we denote revealed labels $\Tilde{x}$ as follows: \begin{equation} x_i=\begin{cases} 1 \quad &i=1,2,\dots,\frac{n}{2},\\ -1 \quad &i=\frac{n}{2},\frac{n}{2}+1,\dots,n; \end{cases} \quad \Tilde{x}_i=\begin{cases} 1 \quad &i=1,2,\dots,\frac{m}{2},\\ -1 \quad &i=\frac{n}{2},\frac{n}{2}+1,\dots,\frac{n+m}{2},\\ 0 \quad &\text{otherwise}. \end{cases} \end{equation} We have shown that the entry value of the optimizer $X$ can be interpreted as an affinity metric among nodes. Moreover, we have $X_{ij} \in [-1, 1],~\forall~i,j$. It is natural to force the optimizer to have large entry values for those vertex pairs in which we have high confidence to be in the same community and vice versa. Therefore, we propose the CSDP approach to integrate the information provided by the semi-supervised approach. If node i and node j are revealed to have the same label, we add the constraint $X_{ij} = 1$ to the optimization model. If they are revealed to have the opposite labels, we add $X_{ij} = -1$. Formally, the CSDP is defined as \begin{equation}\CSDP(M_{n\times n}) = \max\{\la M,X\ra: X \in \text{elliptope}_n,~ X_{ij} = x_i \cdot x_j ~\forall i,j \in \mathcal{R}\}\end{equation} where $\mathcal{R}$ denotes the collection of revealed nodes. After reordering the indices, we can assume it as $\{1,2,\dots,\frac{m}{2}\} \bigcup \{\frac{n}{2}, \frac{n}{2} + 1, \dots, \frac{n + m}{2}\}$. It is worth noting that the optimization remains a positive semidefinite programming problem, which can be solved efficiently, for example by interior point methods \citep{Alizadeh1995InteriorPM}. Then let $\mathcal{S}^{n-1} \coloneqq \{v\in \mathbb{R}^n: \|v\|_2 = 1\}$ be the unit $(n-1)$-sphere and $\sigma = (\sigma_1,\sigma_2,\dots,\sigma_n) \in (\mathcal{S}^{n-1})^n$. Consider the CSDP in the form of Burer-Monteiro factorization. We have the following identities: \begin{equation} \SDP(M) = \max\left\{\sum_{i,j = 1}^n M_{ij}\la\sigma_i,\sigma_j \ra: \sigma_i \in \mathcal{S}^{n-1} ~\forall i \in [n]\right\}, \end{equation} \begin{equation} \CSDP(M) = \max_{\sigma \in (\mathcal{S}^{n-1})^n}\left\{\sum_{i,j = 1}^n M_{ij}\la\sigma_i,\sigma_j \ra: \sigma_i^\top\sigma_j = x_ix_j ~\forall i,j \in \mathcal{R}\right\}\end{equation} \begin{equation}\label{eqn:CSDP decomp} = \max_{\sigma \in (\mathcal{S}^{n-1})^n} \left\{ \sum_{i,j \in [n]\setminus\mathcal{R}} M_{ij} \sigma_i^\top \sigma_j + \sum_{i,j \in \mathcal{R}} M_{ij} x_ix_j + 2\sum_{i\in \mathcal{R}}\sum_{j\in [n]\setminus\mathcal{R}} x_i M_{ij}\sigma_0^\top\sigma_j \right\} \end{equation} where $\sigma_0 \equiv x_i\sigma_i$, $\forall i \in \mathcal{R}$. Now one can consider an alternative matrix with a special margin denoting the algebraic sum of the blocks from $M$ that are associated with $\mathcal{R}$. We define $M^\text{agg}$ to be the $(n-m+1) \times (n-m+1)$ symmetric matrix indexed from 0 that \begin{align} M^\text{agg}_{00} &= \sum_{i,j \in \mathcal{R}} M_{ij} x_ix_j,\\ M^\text{agg}_{0j} &= \sum_{i\in \mathcal{R}} x_i M_{i,j+\frac{m}{2}}\qquad\forall j \in [\frac{n}{2} - \frac{m}{2}],\\ M^\text{agg}_{0j} &= \sum_{i\in \mathcal{R}} x_i M_{i,j+m}\qquad\forall j \in [n - m]\setminus[\frac{n}{2} - \frac{m}{2}],\\ M^\text{agg}_{ij} &= M_{i+\frac{m}{2},j+\frac{m}{2}}\qquad\forall i,j \in [\frac{n}{2} - \frac{m}{2}],\\ M^\text{agg}_{ij} &= M_{i+m,j+m}\qquad\forall i,j \in [n - m]\setminus[\frac{n}{2} - \frac{m}{2}]. \end{align} Essentially, we aggregate the rows and columns related to revealed vertices according to their communities into the $0$-th row and column and reindex the matrix. It introduces spikiness to the underlying matrix, \begin{equation}M^\text{agg}= \left(\begin{array}{@{}c|c@{}} \sum_{i,j \in \mathcal{R}} M_{ij} x_ix_j & \begin{matrix} M^\text{agg}_{01} & M^\text{agg}_{02} & \cdots & M^\text{agg}_{0,n-m} \end{matrix} \\ \hline \begin{matrix} M^\text{agg}_{01} \\ M^\text{agg}_{02} \\ \vdots \\ M^\text{agg}_{0,n-m} \end{matrix} & M_\mathcal{R^\complement} \end{array}\right). \end{equation} Although \citep{montanari2015semidefinite} employed a rather different technique to study SDPs, they also noticed that the critical change comes with such built-in structures, where the authors state "We expect the phase transition in $\SDP(\lambda vv^\top + W)/n$ to depend---in general---on the vector $v$, and in particular on how ‘spiky’ this is". Combining the transformed input matrix with equation~\eqref{eqn:CSDP decomp}, we conclude that CSDP is indeed an SDP regarding $M^\text{agg}$, \begin{align} \label{eqn:CSDP} \CSDP(M) &= \max_{\substack{\sigma_i \in \mathcal{S}^{n-m}\\ i = 0,1,\dots,n-m}} \left\{ \sum_{i,j \in [n - m]} M^\text{agg}_{ij} \sigma_i^\top \sigma_j + M^\text{agg}_{00} + 2\sum_{j\in [n - m]} M^\text{agg}_{0j}\sigma_0^\top\sigma_j \right\}\\ &= \SDP(M^\text{agg}). \end{align} \begin{lemma} Let $M_{\mathcal{R}^\complement}$ be the principle submatrix of $M$ obtained by removing the rows and columns associated with $\mathcal{R}$. The following inequalities hold, \begin{equation}\SDP(M_{\mathcal{R}^\complement}) \leq \CSDP(M) - M^\text{agg}_{00}.\end{equation} \end{lemma} \begin{proof} Let $X^*$ be the optimizer of $\SDP(M_{\mathcal{R}^\complement})$. Define its $(n - m + 1) \times (n - m + 1)$ extension $\hat{X^*}$ as \begin{equation} \hat{X}^*_{ij} = \begin{cases} 1 & i=j=0,\\ 0 &~ i \in [n-m],~ j =0,\\ 0 &~ j \in [n-m],~ i = 0,\\ X^*_{ij} &~\text{otherwise}.\\ \end{cases} \end{equation} Due to the identity from above and the fact that $\hat{X^*}\in \text{elliptope}_{n-m+1}$ is feasible, we can conclude that \begin{equation}\CSDP(M) = \SDP(M^\text{agg}) \geq \la \hat{X^*}, M^\text{agg} \ra = \SDP(M_{\mathcal{R}^\complement}) + M^\text{agg}_{00}.\end{equation} \end{proof} So far, all the results are deterministic, $M$ can be an arbitrary symmetric matrix, and $\mathcal{R}$ can be any balanced index set. Next, we will consider $M = \tilde{A} \coloneqq A - \frac{d}{n} \mathbf{1}\mathbf{1}^\top$ to study CSDPs on probabilistic models. \begin{remark} As shown in the Lemma \ref{lemma:trueLabel}, $\tilde{A}^\text{agg}_{00} \geq m\cdot\frac{a-b}{2} \geq 0$ with high probability. By definition, we have $\CSDP(\tilde{A}) \leq \SDP(\tilde{A})$. So, with high probability, \begin{equation}\label{eqn: sandwich} \SDP(\tilde{A}_{\mathcal{R}^\complement}) \leq \CSDP(\tilde{A}) \leq \SDP(\tilde{A}). \end{equation} The CSDP always lies in between the SDPs of the original adjacency matrix and the submatrix of unrevealed vertices. Moreover, if $\tilde{A} \sim \G(n,\frac{a}{n},\frac{b}{n})$, we have $ \tilde{A}_{\mathcal{R}^\complement}\sim \G(n - m,\frac{a(1-\rho)}{n - m},\frac{b(1-\rho)}{n - m})$. It is worth mentioning that although $\tilde{A}_{\mathcal{R}^\complement}$ is just a submatrix of the original centered adjacency matrix, its probabilistic distribution as a random matrix is not simply changed from $n$ nodes to $n-m$ nodes. The edge probability parameters are also changed by a factor of $(1-\rho)$. It leads to some technical challenges, which we are going to handle later. But intuitively, from the asymptotic behavior of SDP, we can derive a rough understanding of CSDP as $n\to \infty$. Recall that the phase transition theory tells us that when SNR $\leq$ 1, the optimal value of SDP for SBM will not be large enough to distinguish from the optimal value of SDP for ERM. Therefore, the order of the above quantities from inequality \eqref{eqn: sandwich} suggests that semi-supervised SDP can not help to increase the statistics associated with SBM. The best one can hope for is that it will make the statistics associated with ERM smaller by a factor depending on $\rho$. This turns out to be enough for community detection. \end{remark} Recall the community detection problem can be formalized as a binary hypothesis testing problem, whereby we want to determine, with a high probability of success, whether the random graph under consideration has a community structure or not. As discussed in Section \ref{summary}, we introduce semi-supervised learning to the problem by revealing a part of the labels involved in the random graph's generating process. Namely, if the labels associated with a graph $G$ over $n$ vertices are denoted as $x$, we choose $m$ of them uniformly at random and denote the index set by $\mathcal{R}$, such that $\sum_{i\in\mathcal{R}} x_i = 0$. Given a realization of the random graph $G$ \textit{and the revealed labels $x_\mathcal{R}$}, we want to decide which of the following holds, \begin{itemize}[leftmargin=1in] \item[Hypothesis 0:] $(G,x_\mathcal{R}) \sim \G(n,\frac{d}{n}, \rho)$ is an Erdős–Rényi random graph with edge probability $\frac{d}{n}$, $d = \frac{a+b}{2}$ and reveal ratio $\rho$. We denote the corresponding distribution over graphs by $\prob_0$. \item[Hypothesis 1:] $(G,x_\mathcal{R}) \sim \G(n, \frac{a}{n}, \frac{b}{n}, \rho)$ is a planted bisection random graph with edge probabilities $(\frac{a}{n}, \frac{b}{n})$ and reveal ratio $\rho$. We denote the corresponding distribution over graphs by $\prob_1$. \end{itemize} A statistical test $T$ is a function defined on the graphs and revealed labels with range $\{0,1\}$. It succeeds with a high probability if \begin{equation}\prob_0(T(G, x_\mathcal{R}) = 1) + \prob_1(T(G, x_\mathcal{R}) = 0) \to 0 \quad (n\to\infty).\end{equation} Note that this is indeed a generalization of the unsupervised community detection. Simply looking at the labels, the two models are indistinguishable. What characterizes their difference is the probabilistic law of how edges are generated, i.e.,\ whether there is a cluster structure. The revealed labels serve as an enhancement of the graph observed. The phase transition theory says that under the unsupervised setting (the special case when $\rho = 0$), no test can succeed with high probability when SNR $\leq 1$, or equivalently, $a-b \leq \sqrt{2(a+b)}$. While if $\text{SNR}> 1$, several polynomially computable tests are developed. The SDP-based test is nearly optimal, in the sense that it requires \begin{equation}\frac{a-b}{\sqrt{2(a+b)}}\geq 1+\epsilon(d)\end{equation} where $\epsilon(d) \to 0$ as $d\to \infty$. It is believed to be the best that SDPs can reach. As the monotone-robustness study suggests \citep{Moitra2016HowRA}, this gap may be necessary, since SDP is indeed solving a harder problem where no algorithm can approach the threshold. However, we are going to see that when $\rho$ is sufficiently large, SDPs can not only reach but cross the threshold. \subsection{Semi-Supervised Detectability} With the problem and algorithm defined clearly, we are ready to prove that SBM and ERM can be consistently distinguished in the semi-supervised setting. We take a `divide and conquer' approach to establish an upper bound of CSDP on ERM, while we bound the CSDP on SBM from below with a witness that consists of the ground truth of labels, $X = xx^\tp$. \begin{lemma} \label{lemma:trueLabel} Let $(A, x)$ obey the planted bisection model $G(n,\frac{a}{n}, \frac{b}{n})$ and denote $\la xx^\top, \tilde{A} \ra$ as $Y$. Then, for any $\epsilon > 0 $, we have $Y/n \in [\frac{a-b}{2} - \epsilon, \frac{a-b}{2} + \epsilon]$ with probability converging to one as $n\to \infty$. \end{lemma} \begin{proof} \begin{align} Y&= \la xx^\top, \tilde{A} \ra = \la xx^\top, A \ra - \frac{d}{n}\la xx^\top, \1\1^\tp \ra \\ &\overset{d}{=} 2\cdot\left[\text{Bin}\left(\left(\frac{n}{2}\right)^2 - \frac{n}{2},~ \frac{a}{n}\right) - \text{Bin}\left(\left(\frac{n}{2}\right)^2,~ \frac{b}{n}\right)\right]. \end{align} We have $\E Y = \frac{n}{2}(a-b) - a$ and \begin{equation}\V Y = 4\left(a\left(\frac{n}{4} - \frac{1}{2}\right)\left(1-\frac{a}{n}\right) + b~\frac{n}{4}\left(1-\frac{b}{n}\right)\right)\leq n(a+b).\end{equation} Then Chebyshev's inequality implies that for any $\delta \in (0,1)$, \begin{align} &\prob\left(|Y - \frac{n}{2}(a-b) + a| \geq \sqrt{n(a+b)} \cdot n^{(1-\delta)/2}\right) \leq \frac{1}{n^{1-\delta}}\\ &\implies\quad \prob\left(|\frac{Y}{n} - \frac{a-b}{2} + \frac{a}{n}| \geq \frac{\sqrt{a+b}}{n^{\delta/2}}\right) \leq \frac{1}{n^{1-\delta}}.\\ \end{align} Hence, for sufficiently large $n$, we have \begin{equation}\prob\left(\frac{Y}{n} \geq \frac{a-b}{2} + \epsilon\right) + \prob\left(\frac{Y}{n} \leq \frac{a-b}{2} - \epsilon\right) \leq \frac{1}{n^{1-\delta}}.\end{equation} Therefore, \begin{equation}\prob\left(\frac{Y}{n}\in [\frac{a-b}{2} - \epsilon, \frac{a-b}{2} + \epsilon]\right) \geq 1 - \frac{1}{n^{1-\delta}}.\end{equation} \end{proof} Besides bounding the outcomes on the SBM from below, this lemma can also be applied to the `all revealed blocks' to estimate $\tilde{A}^\text{agg}_{00}$, which is used several times throughout our proofs. \begin{lemma}\label{lemma:sbmLower} Let $G \sim \G(n,\frac{a}{n},\frac{b}{n})$, $d = \frac{a+b}{2}$ and $\tilde{A} = A - \frac{d}{n}\1\1^\tp$ be its centered adjacency matrix. Then for any $\epsilon>0$ and $\gamma > 0$, with probability at least $1 - \frac{1}{n^{1-\gamma}}$, for all $n\geq n_0(a,b,\epsilon,\gamma)$, we have \begin{equation}\CSDP(\tilde{A}) \geq n~(\frac{a-b}{2} - \epsilon).\end{equation} \end{lemma} \begin{proof} We prove the lower bound by considering a witness of the constrained optimization problem. Notice that $xx^\tp$ is feasible for both SDP and CSDP, where $x$ is the label vector associated with $G$. Therefore, \begin{equation} \CSDP(\tilde{A}) \geq \la xx^\tp, \tilde{A} \ra. \end{equation} Then, we can apply Lemma \ref{lemma:trueLabel} to get the result. \end{proof} This result holds for any $\text{SNR} >0$ and suggests the following test for the semi-supervised community detection problem: \begin{equation} T(G, x_\mathcal{R};\Delta) = \begin{cases} 1 \quad\quad& \text{if~} \CSDP(\tilde{A}) \geq n[(a-b)/2 - \Delta],\\ 0 \quad\quad& \text{otherwise}. \end{cases} \end{equation} The following lemma bounds the CSDP of ERM from above. Intuitively, the contribution of blocks of the adjacency matrix, where columns or rows are associated with revealed nodes, concentrates well around zero. So the `effective dimension' of the SDP is reduced, hence the optimal value. \begin{lemma}[Theorem 1, \citep{montanari2015semidefinite}. Reformulated.]\label{lemma:SDP} Let $G \sim \G(n,\frac{d}{n})$ and $\tilde{A} = A - \frac{d}{n}\1\1^\tp$ be its centered adjacency matrix. There exists absolute constants $C$ and $d_0 > 1$ such that if $d \geq d_0 $, then with high probability, \begin{equation}\frac{1}{n\sqrt{d}}\SDP(\tilde{A}) \leq 2 + \frac{C \log d}{d^{1/10}}.\end{equation} \end{lemma} This result is rigorously derived with profound insights from mathematical physics. However, there is an implicit condition on the average degree $d$ in the proof. It is common to assume at least $d>1$ in the literature concerning unsupervised clustering because otherwise, the graph has no giant component, not to mention reconstruction, as discussed in Section \ref{topo}. But our approach leads to the subgraph with a possibly small effective average degree. Moreover, we do not want to be limited by the topology structure, although it is indeed a fundamental limit in the unsupervised setting. Theorem \ref{thm:CSDP} shows that semi-supervised SDPs are capable of integrating those sublinear components. To achieve that we resort to Grothendieck's inequality and carry out the analysis without assumption on $d$. \begin{theorem}[Grothendieck's inequality \citep{Grothendieck1996RsumDL}] Let $M$ be a $n\times n$ real matrix. If for any $s, t \in \{-1,1\}^n$, \begin{equation}\label{eqn:infty21norm} \big| \sum_{i,j} M_{ij}s_i t_j\big| \leq 1. \end{equation} Then for all vectors $X_i, Y_i \in \{x \in \mathbb{R}^n: \|x\|_2 \leq 1\},~ i = 1,2,\dots,n$, we have \begin{equation}\label{Grothendieck} \big|\sum_{i,j} M_{ij} \la X_i, Y_j\ra\big| \leq K_\text{G}. \end{equation} \end{theorem} $K_\text{G}$ is an absolute constant called Grothendieck's constant. The inequality was initially proved as a fundamental tool in Functional Analysis. In this paper, we focus exclusively on the above matrix version in $\mathbb{R}^n$ and consider the following suboptimal estimate derived by \citep{Braverman2011TheGC}, \begin{equation} K_\text{G} < \frac{\pi}{2\log(1+\sqrt{2})} \leq 1.78. \end{equation} Note that if we restrict the vectors $X_i$'s and $Y_i$'s to the unit sphere $\mathcal{S}^{n-1}$, the inequality is still true as any inequality valid for a set also holds for its subset. Since $s$ and $t$ are arbitrary, the left-hand side of inequality \eqref{eqn:infty21norm} is indeed the $\ell_\infty \to \ell_1$ norm of matrix $M$, which is \begin{equation} \|M\|_{\infty \to 1} = \max_{\|x\|_\infty\leq 1}\|Mx\|_1 = \max_{s,t\in \{-1,1\}^n} s^\top M t = \max_{s,t\in \{-1,1\}^n}\big| \sum_{i,j} M_{ij}s_i t_j\big|. \end{equation} This norm is also known as the cut norm, whose importance in algorithmic problems is well understood in the theoretical computer science community. With the elliptope definition of SDP from equation \eqref{eqn:SDP_} and the consequential factorization of $X$ in equation \eqref{BMF}, we can rewrite the theorem in the following matrix form. \begin{lemma}\label{lemma:GI} For arbitrary matrix $M \in \mathbb{R}^{n\times n}$, we have \begin{equation} \SDP(M) \leq \max_{X \in \text{elliptope}_n} \big| \la M, X \ra\big| \leq K_\text{G} \|M\|_{\infty \to 1}. \end{equation} \end{lemma} Next, we use Bernstein's inequality to establish a probabilistic bound on the cut norm of $A - \E A$ where $A$ is the adjacency matrix of $\G(n,\frac{d}{n})$. \begin{theorem}[Bernstein's inequality \citep{Bernstein}] Let $\{X_i\}_{i = 1}^n$ be independent random variables such that $\E X_i = 0$ and $|X_i| \leq M$ for any $i \in [n]$. Denote the average variance as $\sigma^2 = \frac{1}{n}\sum_{i=1}^n \V (X_i)$. Then for any $t \geq 0$, \begin{equation} \prob \left(\frac{1}{n} \sum_{i=1}^n X_i > t \right) \leq \exp\left(-\frac{n t^2/2}{\sigma^2+\frac{Mt}{3}}\right). \end{equation} \end{theorem} \begin{lemma}\label{lemma:cutNormBound} Let $A$ be the adjacency matrix of an ERM, $\G(n,\frac{d}{n})$. Then, with probability at least $1-5^{-n+2}$, \begin{equation} \|A - \E A\|_{\infty \to 1} \leq 6(1+d)n. \end{equation} \end{lemma} \begin{proof} According to the identity from inequality \eqref{eqn:infty21norm}, we want to bound \begin{align}\label{eqn:idSum} \|A - \E A\|_{\infty \to 1} &= \max_{s,t\in \{-1,1\}^n} \sum_{i,j}(A - \E A)_{ij}~s_i t_j\\ &= \max_{s,t\in \{-1,1\}^n} \sum_{i<j} (A - \E A)_{ij}~(s_i t_j + s_j t_i). \end{align} For fixed $s,t\in \{-1,1\}^n$, denote \begin{equation} X_{ij} = (A - \E A)_{ij}~(s_i t_j + s_j t_i) \quad (1 \leq i < j \leq n). \end{equation} Then we have $\E X_{ij} = 0$, $|X_{ij}| \leq 2$ and $\V (X_{ij}) \leq 4 \frac{d}{n}$ for any $i<j$. There are totally $n(n-1)/2$ of $\{X_{ij}\}$'s. And they are independent by the definition of ERM. So Bernstein's inequality implies \begin{equation} \prob\left(\frac{2}{n(n-1)}\sum_{i<j}X_{ij}>t\right) \leq \exp\left(-\frac{n(n-1) t^2/4}{\frac{4d}{n}+\frac{2t}{3}}\right). \end{equation} Let $t = 12(1+d)/n$, which guarantees $4d/n+2t/3 < t$. Hence, \begin{equation} \prob\left(\sum_{i<j}X_{ij}>6(1+d)n\right) \leq \exp\left(-3(n-1)\right). \end{equation} Apply the union bound to all $2^{2n}$ possible $(s,t)$, we have \begin{equation} \prob \left(\max_{s,t\in \{-1,1\}^n} \sum_{i<j} (A - \E A)_{ij}~(s_i t_j + s_j t_i) >6(1+d)n \right) \leq 2^{2n} \cdot e^{-3(n-1)}. \end{equation} We conclude the proof with the identity of $\ell_\infty \to \ell_1$ norm and the fact that the right-hand side of the above inequality is less than $5^{-n+2}$. \end{proof} Since the distribution of each entry in the matrix changes as $n\to \infty$, we now develop a slightly generalized version of the weak law of large numbers fitting for our purpose. We use the superscripts to explicitly denote dependence on $n$. \begin{lemma} \label{lemma:WLLN} For any $n$, let $\{X_i^{(n)}\}_{i = 1}^n$ be a collection of independent random variables. Assume there exist universal constants $\mu$ and $\sigma$, such that $\E X^{(n)}_i \leq \mu < \infty$ and $\V (X^{(n)}_i) \leq \sigma^2 < \infty$ for any $n \in \mathbb{N}$ and $i \leq n$. If we denote the sample mean as \begin{equation} \bar{X}^{(n)} = \frac{X_1^{(n)} + X_2^{(n)}+\dots+X_n^{(n)}}{n}, \end{equation} then, for any $\epsilon > 0$, \begin{equation} \prob \big(\bar{X}^{(n)} \geq \mu + \epsilon\big) \to 0\quad \text{as}\quad n\to\infty. \end{equation} \end{lemma} \begin{proof} For any $n\in\mathbb{N}$, we have \begin{align} \V (\bar{X}^{(n)}) &= \frac{1}{n^2} \V (X_1^{(n)} + X_2^{(n)}+\dots+X_n^{(n)}) \\ &= \frac{\sum_{i = 1}^n \V(X_i^{(n)})}{n^2} & \text{(by independence)}\\ &\leq \sigma^2/n. &\text{(by uniform boundedness)} \end{align} Then Chebyshev's inequality ensures \begin{align} &\prob\big(|\bar{X}^{(n)} - \E \bar{X}^{(n)}|\geq \epsilon\big)\leq \frac{\sigma^2}{n \epsilon^2}\\ \implies & \prob\big(\bar{X}^{(n)} \geq \frac{1}{n}\sum_{i = 1}^n \E X_i^{(n)} + \epsilon\big)\leq \frac{\sigma^2}{n \epsilon^2}\\ \implies & \prob\big(\bar{X}^{(n)} \geq \mu + \epsilon\big)\leq \frac{\sigma^2}{n \epsilon^2}. \end{align} \end{proof} \begin{remark} Compared with a standard large deviation theory, this result allows the random variables to not be identically distributed. And more importantly, the distributions can depend on $n$. Furthermore, the random variables associated with different $n$ are not necessary to be independent. \end{remark} \begin{lemma}\label{lemma:boundMargin} Let $G \sim \G(n,\frac{d}{n})$, $x$ be the labels, $\mathcal{R}$ be the revealed indices and $\tilde{A} = A - \frac{d}{n}\1\1^\tp$ be its centered adjacency matrix. Define \begin{equation} B_{ij} =\begin{cases} \sum_{i,j \in \mathcal{R}} \tilde{A}_{ij} x_ix_j \quad &i=j=0,\\ \sum_{k\in \mathcal{R}} x_k\tilde{A}_{kj} \quad & i = 0,~j \in [n]\setminus \mathcal{R},\\ \sum_{k\in \mathcal{R}} x_k\tilde{A}_{ik} \quad & j = 0,~i \in [n]\setminus \mathcal{R},\\ 0 \quad & \text{otherwise}. \end{cases}\end{equation} Then for any $\epsilon >0$, with high probability, \begin{equation} \SDP(B)\leq 2dm(1-\frac{m}{n}) + (2n - m)\epsilon. \end{equation} \end{lemma} \begin{proof} Notice that for any feasible $X$ of the above optimization problem, we have $X\succeq0, X_{ii}=1 ~\forall i \in [n+1]$. So, for any $i,~j \in [n+1]$, \begin{equation} (\mathbf{e}_i \pm \mathbf{e}_j)^\tp X (\mathbf{e}_i \pm \mathbf{e}_j) = 2 \pm 2X_{ij} \geq 0 \quad \implies \quad |X_{ij}| \leq 1. \end{equation} Therefore, \begin{align} \SDP(B) &= \max\{\la B,X\ra: X \in \text{elliptope}_{n+1}\}\\ &=B_{00} + 2\max\left\{\sum_{j\in [n]\setminus \mathcal{R}} B_{0j}X_{0j}: X \in \text{elliptope}_{n+1}\right\}\\ &\leq B_{00} + 2\sum_{j\in [n]\setminus \mathcal{R}} |B_{0j}|. \end{align} Note that $\{B_{0j}: j\in [n]\setminus \mathcal{R}\}$'s are independent random variables. Moreover, if we let $B_1,~B_2$ be two independent binomial random variables with the same parameter $(\frac{m}{2}, \frac{d}{n})$ and denote their difference as $Z\coloneqq B_1-B_2$, we have $B_{0j} \overset{\text{d}}{=}Z$ for any $j\in [n]\setminus \mathcal{R}$ with $\E Z = 0$ and $\V Z \leq d\frac{m}{n}$. Since $Z^2 \geq |Z|$, we have \begin{align} &\E |Z| \leq \E (Z^2) = \V Z \leq d\frac{m}{n},\\ &\V |Z| = E (Z^2) - (\E |Z|)^2\leq \V Z \leq d\frac{m}{n}. \end{align} Then Lemma \ref{lemma:WLLN} can be applied to \begin{equation} \bar{X}^{(n)} \coloneqq \frac{\sum_{j\in [n]\setminus \mathcal{R}} |B_{0j}|}{n-m}. \end{equation} So, for any $\epsilon>0$, we have \begin{equation} \lim_{n\to \infty} \prob \left(\frac{1}{ n-m}\sum_{j\in [n]\setminus \mathcal{R}} |B_{0j}|>d\frac{m}{n} + \epsilon\right) = 0. \end{equation} Hence, $\sum_{j\in [n]\setminus \mathcal{R}} |B_{0j}|\leq (n-m)(d\frac{m}{n}+\epsilon)$ with high probability. Lemma \ref{lemma:trueLabel} implies, with high probability, \begin{equation}B_{00} \leq \epsilon m.\end{equation} Combining the above results with the union bound completes the proof. \end{proof} Returning to the semi-supervised SDP, based on the notions from Section \ref{CSDP def}, we consider the following decomposition of the transformed input matrix $M^\text{agg}$ with the unrevealed part and revealed part as \begin{equation}\label{eqn:decomp} M^\text{agg} = M^{(\mathcal{R}^\complement)} + M^{(\mathcal{R})} \end{equation} where we define \begin{equation} M^{(\mathcal{R})}_{ij} = \begin{cases} M^\text{agg}_{ij} &\quad i = 0 \text{~or~} j = 0,\\ 0 &\quad \text{otherwise}. \end{cases} \end{equation} To prove the main result of semi-supervised SDP, we control the $M^{(\mathcal{R}^\complement)}$ part by Grothendieck's inequity and bound the contribution of $M^{(\mathcal{R})}$ with the generalized law of large numbers shown above. \bigbreak \begin{proof}[Proof of Theorem \ref{thm:CSDP}] Notice that Lemma \ref{lemma:sbmLower} guarantees the test to succeed under the SBM. We only need to show, under ERM, \begin{equation}\label{ubgoal} \CSDP(\tilde{A}) < n[(a-b)/2 - \Delta] \quad \text{(w.h.p.)}. \end{equation} According to the identity from equation \eqref{eqn:CSDP}, we have \begin{align} \CSDP(\tilde{A}) &= \SDP(\tilde{A}^\text{agg})\\ &= \max\{\la \tilde{A}^\text{agg},X\ra: X \in \text{elliptope}_n\}\\ &=\max\{\la \tilde{A}^{(\mathcal{R}^\complement)} + \tilde{A}^{(\mathcal{R})},X\ra: X \in \text{elliptope}_n\}\\ &\leq \SDP(\tilde{A}^{(\mathcal{R}^\complement)}) + \SDP(\tilde{A}^{(\mathcal{R})}). \end{align} Recall that $\tilde{A}_{\mathcal{R}^\complement}$ is the principal submatrix of $\tilde{A}$ obtained by removing the rows and columns associated with $\mathcal{R}$. By definition, we have $\SDP(\tilde{A}_{\mathcal{R}^\complement}) = \SDP(\tilde{A}^{(\mathcal{R}^\complement)})$. Under the null hypothesis, $\tilde{A}_{\mathcal{R}^\complement}$ has the same distribution as the centered adjacency matrix associated with $\G(n-m,\frac{(1-\rho)d}{n-m})$. Also, \begin{align} \SDP \left(\tilde{A}^{(\mathcal{R}^\complement)} \right) &= \SDP\left(A_{\mathcal{R}^\complement} - \E A_{\mathcal{R}^\complement} - \frac{(1-\rho)d}{n-m} I_{n-m}\right)\\ &=\SDP\left(A_{\mathcal{R}^\complement} - \E A_{\mathcal{R}^\complement} \right) - (1-\rho)d. \end{align} Now we apply Grothendieck's inequality and Lemma \ref{lemma:cutNormBound}. With probability at least $1-5^{-(1-\rho)n+2}$, \begin{equation} \SDP \left(\tilde{A}^{(\mathcal{R}^\complement)} \right) \leq 6K_\text{G}[1+(1-\rho)d](n-m) < 12(1+d)(1-\rho)n. \end{equation} Combining above estimations and the result from Lemma \ref{lemma:boundMargin} with $\epsilon = d(1-\rho)^2$, we have \begin{equation} \frac{1}{n}\CSDP(\tilde{A}) \leq 14(1-\rho)(1+d) \quad \text{(w.h.p.)}. \end{equation} Hence, Equation \eqref{ubgoal} holds with $\Delta = (a-b)/40$ and $\rho \geq \rho_0 = 1 - \frac{a-b}{30(1+d)}$. We conclude, if $\frac{m}{n} \geq \rho _0$, \begin{equation}\prob_0 (T(G, x_\mathcal{R}) = 1) \to 0 \quad (n\to \infty).\end{equation} \end{proof} \begin{remark} If $\rho = 0$, CSDP is naturally reduced to SDP. Hence, it shares the same capability to solve the (unsupervised) community detection problem when $\text{SNR} > 1$ as stated in Theorem \ref{MS}. Although the analysis above cannot be directly generalized to a vanishing $\rho \to 0$ situation, CSDP provides a new perspective for further study on the optimality of SDP. \end{remark} \begin{remark} We believe that it is possible to reduce the requirement on $\rho$ through more involved analysis. Empirically, it does not need to be close to 1 for a clear improvement in the result. For instance, our simulation in the next section shows that phase transition disappears with $20\%$ of the nodes revealed. \end{remark} \section{Numerical Experiments}\label{numexp} We include some simulation results below. $\rho \in [0,1]$ is the ratio of revealed labels. Results associated with unsupervised SDPs are identified as $\rho = 0$. As discussed in Section \ref{Census method}, to make the comparison fair and keep the problem meaningful, all overlaps are restricted to the unrevealed labels. \begin{figure}[htbp] \centering\includegraphics[width=0.95\textwidth]{overlap_SNR_.png} \caption{Disappearance of the phase transition.} \label{fig:disappear} \end{figure} Each point in Figure \ref{fig:disappear} represents one realization of a SBM with $n=1000$. The dashed line stands for the KS and information-theoretic threshold. The graphs are shared by both the unsupervised and the semi-supervised SDPs. Labels are identified by applying the k-means method to corresponding optimizers. Overlaps of unsupervised SDP essentially drop down to zero on the left-hand side. In contrast, with $20\%$ of the labels revealed, the outcome of our constraint SDP algorithm goes down gradually as the SNR decreases and remains substantially greater than zero even when $\text{SNR}\leq 1$. \begin{figure}[ht] \begin{subfigure}{0.43\textwidth} \centering\includegraphics[width=\textwidth]{unsKS.png} \end{subfigure} ll} \begin{subfigure}{0.43\textwidth} \centering\includegraphics[width=\textwidth]{semKS.png} \end{subfigure} ll} \begin{subfigure}{0.1\textwidth} \centering\includegraphics[width=\textwidth]{hmbar1.png} \end{subfigure} ll} \caption{Overlap heatmaps of the unsupervised (left) and the semi-supervised (right) SDPs. The coordinates correspond to the model parameters $a$ and $b$. The solid line represents the KS and information-theoretic threshold. The dashed line corresponds to $a=b$.} \label{fig:phase} \end{figure} \vspace{1cm} Theorem \ref{KS} guarantees that the upper left corner of the left image will be completely dark as $n\to \infty$. But we see semi-supervised SDPs successfully `light up' the area between the two reference lines, see Figure~\ref{fig:phase}. Moreover, when $n$ is sufficiently large, there will be no pixel with a value of 0. Figure \ref{fig:optX} shows color-coded entry values of optimizer $X^*$ in different settings and suggests that the representing of underlying community structure is significantly enhanced by the semi-supervised approach, while no such structure is introduced if there should not be one. \begin{figure}[p] \begin{subfigure}{0.48\textwidth} \centering\includegraphics[width=\textwidth]{USDP.png} \end{subfigure} ll} \begin{subfigure}{0.48\textwidth} \centering\includegraphics[width=\textwidth]{SSDP.png} \end{subfigure} ll} \begin{subfigure}{0.48\textwidth} \centering\includegraphics[width=\textwidth]{SERM.png} \end{subfigure} ll} \begin{subfigure}{0.48\textwidth} \centering\includegraphics[width=0.28\textwidth]{hmbar2.png} \end{subfigure} ll} \caption{Visualization of the optimizer $X^*$. The upper row is concerned with one realization of the SBM $\G(1000, 12/1000, 5/1000)$, where the left image shows the value of optimizer for the unsupervised SDP and the right image is associated with the semi-supervised SDP with $\rho = 0.2$. The lower left image is the optimizer for one realization of the ERM of the same size with the associated average degree $d = 8.5$, indices of which are reordered such that the entries related to revealed labels are gathered in four corners. It could be understood as the situation of the null hypothesis we defined in Section \ref{CSDP def}.} \label{fig:optX} \end{figure} To see how such a better representation leads to a successful test that is originally impossible, we consider the following simulations. We generate $50$ independent realizations of underlying random graphs ($n=200$) and compute their SDP values with and without the semi-supervised constraints ($\rho = 0.25$). Particularly, the parameters in Figure \ref{fig:a9b2} are chosen to have $\text{SNR}> 1$. The left two boxes imply that we can tell the difference between SBM and the ERM with the same average degree $d = (a+b)/2$. However, as in Figure \ref{fig:a5b2}, the vanilla SDP gives essentially the same result since the two models become contiguous if $\text{SNR}\leq 1$. As we have proved in Theorem \ref{thm:CSDP}, our semi-supervised SDP algorithm still manages to distinguish them by bringing down the optimal value of ERM more significantly when compared to its effect on SBM, which is confirmed by the right two boxes. \begin{figure}[p] \text{SDP optimal value, when SNR is above KS/IT}\par\medskip \centering\includegraphics[width=0.8\textwidth]{a9b2_.png} \caption{$a = 9,~b = 2 \quad (d = 5.5,~ \text{SNR} \approx 2.23)$} \label{fig:a9b2} \end{figure} \begin{figure}[p] \text{SDP optimal value, when SNR is below KS/IT}\par\medskip \centering\includegraphics[width=0.8\textwidth]{a5b2_.png} \caption{$a = 5,~b = 2 \quad (d = 3.5,~ \text{SNR} \approx 0.64)$} \label{fig:a5b2} \end{figure} \newpage \section{Conclusion}\label{conclusion} The census method comes from the combinatorial perspective, while the CSDP is inspired by convex optimization research. Both algorithms are computationally efficient. The former has no requirement on the reveal ratio. The latter one is more practical and backward compatible with the unsupervised setting. By carefully integrating the revealed information with the observed graph structure, we can not only improve the performance of clustering algorithms but also resolve initially unsolvable problems. The fundamental changes brought by the semi-supervised approach let us cross the KS threshold, information-theoretical threshold, and even the topological limitation. Our work provides a different angle to study stochastic models of networks and semidefinite programs. In real-world situations, it is almost always the case that we will have a certain fraction of samples being understood fairly well. So, an abstract model should be able to capture the existence of such knowledge instead of being blindly restricted to the unsupervised setting. Combining the universality of `revealed' information and the insight derived from our census method, it is arguable that the phase transitions, although mathematically beautiful, will never be an issue in practice. Our results on CSDPs, in turn, could be used to study SDPs, e.g.,\ prove or disprove that it can reach the phase transition threshold or the monotone-robustness threshold by a limiting process of $\rho \to 0$. \if 0 Besides the mathematical curiosity, a major reason we study these foundational problems, e.g.,\ clustering on random graphs, is to develop better tools for realistic applications via theoretical guidance. We see such a promising direction of using the optimizer $X^*$ from CSDP as the propagation model $\hat{A}$ of GCN, see equation \ref{GCN}. One can view $X^*$ as a better representation of the node similarity, see for example Figure \ref{fig:optX}. The key idea behind GCN is exactly making similar nodes share the activation. Moreover, in a deep learning setting, the revealed labels are usually sufficient. CSDP will provide a learning objective justified graph representation, which not only contains more information about the underlying community structure but also auto-calibrates to the specific learning task. \acks{The authors acknowledge support from the National Science Foundation via grants NSF DMS-2027248, NSF CCF-1934568, NIH grants P41EB032840, R01HL16351, and DE-SC0023490.} \newpage \bibliography{SSC} \end{document}
2205.11661v1
http://arxiv.org/abs/2205.11661v1
On an obstacle to the converse of Dahlberg's theorem in high codimensions
\documentclass[12pt]{article} \topmargin=-1cm \textwidth=16.5cm \textheight=25.3cm \oddsidemargin=0.16cm \evensidemargin=0.16cm \usepackage{euscript} \usepackage{a4wide} \usepackage[T2A]{fontenc} \usepackage[utf8]{inputenc} \usepackage{amsfonts} \usepackage{amssymb, amsthm} \usepackage{amsmath} \usepackage{mathtools} \usepackage{needspace} \usepackage{lipsum} \usepackage{comment} \usepackage{cmap} \usepackage[pdftex]{graphicx} \usepackage{hyperref} \usepackage{epstopdf} \usepackage[matrix, arrow, curve]{xy} \usepackage{amscd} \usepackage{esint} \pdfcompresslevel=9 \tolerance = 500 \hfuzz = 0.5pt \setcounter{tocdepth}{2} \begin{document} \renewcommand{\proofname}{Proof} \renewcommand{\d}{\partial} \newcommand{\Z}{\mathbb{Z}} \newcommand{\N}{\mathbb{N}} \newcommand{\R}{\mathbb{R}} \newcommand{\Q}{\mathbb{Q}} \newcommand{\K}{\mathbb{K}} \newcommand{\Cm}{\mathbb{C}} \newcommand{\Pm}{\mathbb{P}} \newcommand{\B}{\mathcal{B}} \newcommand{\Zero}{\mathbb{O}} \newcommand{\ilim}{\int\limits} \newcommand{\slim}{\sum\limits} \newcommand{\action}{\curvearrowright} \newcommand{\E}{\mathbb{E}} \newcommand{\BB}{\overline{B}} \newcommand{\D}{\mathcal{D}} \newcommand{\T}{\mathbb{T}} \newcommand{\F}{\mathcal{F}} \newcommand{\Sf}{\mathbb{S}} \newcommand{\Vol}{\mbox{V}} \newcommand{\mint}{\strokedint\limits} \newcommand{\const}{\mbox{const}} \newcommand{\supp}{\mbox{supp}} \newcommand{\dist}{\mbox{dist}} \newcommand{\Hess}{\mbox{Hess}} \newcommand{\Ker}{\mbox{Ker}} \newcommand{\Hd}{\mathcal{H}} \renewcommand{\div}{\mbox{div}} \newcommand{\diam}{\mbox{diam}} \newcommand{\NTlim}{\mbox{n.t.lim }} \newcommand{\mydet}{\mbox{det}} \newcommand{\Id}{\mbox{Id}} \theoremstyle{plain} \newtheorem{thm}{Theorem} \newtheorem{lm}{Lemma} \newtheorem*{st}{Statement} \newtheorem*{prop}{Properties} \newtheorem*{cl}{Claim} \theoremstyle{definition} \newtheorem{defn}{Definition} \newtheorem{ex}{Ex} \newtheorem{cor}{Corollary} \theoremstyle{remark} \newtheorem{rem}{Rem} \newtheorem*{note}{Note} \title{On an obstacle to the converse of Dahlberg's theorem in high codimensions} \author{Polina Perstneva \thanks{The author was partially supported by the Simons Foundation grant 601941, GD.} \\ } \date{\today} \maketitle \begin{abstract} It has been recently understood that the harmonic measure on the boundary $E = \partial \Omega$ of a domain $\Omega$ in $\R^n$ is absolutely continuous with respect to the Hausdorff measure $\Hd^{n - 1}$ on $E$ if and only if the boundary $E$ is rectifiable. Then, by G. David, M. Engelstein, J. Feneuil, S. Mayboroda and other coauthors, a notion of harmonic measure for Ahlfors-regular sets $E$ of higher codimension $n - d$ was developed with the aid of the operator $L_\alpha = -\div D_{\alpha}^{-n + d + \alpha} \nabla$, where $\alpha > 0$ and $D_\alpha$ is a certain regularized distance function to the set $E$. A program was launched to establish analogous to the classical case equivalence between rectifiability of the higher-codimensional set $E$ and good relations of the (new) harmonic and Hausdorff measures. The sufficiency of rectifiability for quantitative absolute continuity was only just obtained. For the other direction the main obstacle is to prove that, roughly, the equation $L_\alpha D_\alpha = 0$ is true only when the set $E$ is a hyperplane. In this paper we prove some first results which indicate that the latter conjecture may be true. We also explain that a certain natural strategy to tackle the problem does not work till the end. \end{abstract} \tableofcontents \section{Introduction} This note belongs to a long tradition of studying the relations between the geometry of a domain $\Omega \subset \R^n$ and the analysis on $\Omega$ or $E = \d \Omega$, in particular relative to second order elliptic operators on $\Omega$. Important examples of this are the study of the absolute continuity of the harmonic measure on $E$ with respect to the surface measure. This subject has a long history (and we refer the reader to the introduction of \cite{GSJ18} for a thorough survey); let us just mention here two important results in the spirit of our interests here. In \cite{Da}, B. Dahlberg showed that when $\Omega$ is a Lipschitz domain, the harmonic measure on $E$ is mutually absolutely continuous with respect to the surface measure, and even given by an $A_\infty$ weight; there were lots of important results before, but mostly when $n=2$ and related to conformal mappings. After this, it was slowly understood that the main issue in this problem was the rectifiability of the boundary $E$. Recall that $d$-rectifiability of a set means that it can be represented as at most countable union of Lipschitz graphs (of dimension $d$) and a set of zero Hausdorff measure $\Hd^d$. Later, the technology improved to the point that one could also worry about the converse results, i.e., what can be said about $E$ when the harmonic measure is absolutely continuous. In 2015, after a long series of works, the question was finally settled in \cite{AHM3TV} by J. Azzam, S. Hoffman, M. Mourgoglou, J. M. Martell, S. Mayboroda, X. Tolsa and A. Volberg. They proved in particular that for $n \geq 2$ and a set $E$ with $\Hd^{n-1}(E) < \infty$, the absolute continuity of the harmonic measure on $E$ with respect to $\Hd^{n - 1}$ on $E$ is equivalent to its rectifiability. Many of the results above, which initially concerned the Laplacian $\Delta$, were also extended to a class of elliptic operators $L$ that are sufficiently close to constant coefficient elliptic operators. See for instance $\cite{AGMT}$, $\cite{HMM2}$. After these successes, G. David, J. Feneuil, and S. Mayboroda \cite{GSJ18} started to inquire if the same philosophy is still true for domains $\Omega$ with a lower-dimensional boundary, and more precisely domains $\Omega \subset \R^n$ such that $E = \d \Omega$ is Ahlfors regular with a dimension $d < n-1$. Recall that $E$ is called $d$-Ahlfors regular if for some $C_0 \ge 1$ the double inequality $$C_0^{-1}r^d \le \Hd^d(E \cap B(x, r)) \le C_0 r^d$$ is true whenever $x \in E$ and $0 < r < \diam(E)$. Notice that in such a case there is no complementary component, i.e., $\Omega = \R^n \setminus E$, and it is checked in \cite{GSJ18} that $\Omega$ has non-tangential access. But the harmonic measure (associated to $\Delta$) is not well defined on $E$, for instance because Brownian paths do not see $E$, so the authors had to use a different class of (degenerate) elliptic operators, adapted to the geometry of $E$. They consider divergence form operators \begin{equation}\label{frst} L = - \div A \nabla, \end{equation} where the matrix valued function $A: \Omega \to M_n(\R)$ satisfies the modified ellipticity conditions $$\delta(x)^{n - d - 1}A(x) \zeta \cdot \xi \le C_1 |\zeta||\xi|, x \in \Omega, \zeta, \xi \in \R^n, \quad \mbox{and}$$ \begin{equation}\label{scnd} \delta(x)^{n - d - 1}A(x)\zeta \cdot \zeta \ge C_1^{-1}|\zeta|^2, x \in \Omega, \zeta \in \R^n, \end{equation} where $C_1 \ge 1$ is a constant, and $\delta(x) = \dist(x, E)$ is the distance function from $x \in \Omega$ to $E$. With these operators $L$, \cite{GSJ18} establishes an analogue of the usual theory of elliptic operators: the existence and uniqueness of solutions, and some regularity of the latter. This allows one to define a (degenerate) elliptic measure on the boundary set $E$ in a ``usual'' way, along with a notion of a Green function. Then they worry about analogues in this higher co-dimension context of the direct absolute continuity results, like Dahlberg's theorem above. The concerned operators have to lie in a much smaller class; recall that in the classical case, typical absolute continuity results concern only perturbations of the Laplacian. They settle on the nicest operator they found, namely \begin{equation}\label{3} L = L_{\alpha,\mu} = - \div D_{\alpha,\mu}^{-n + d + 1} \nabla, \end{equation} where $\mu$ is some $d$-dimensional Ahlfors regular measure on $E$, $\alpha > 0$ is a parameter, and the corresponding smooth distance function $D_{\alpha,\mu}$ is defined by \begin{equation}\label{mu_dist} D_{\alpha, \mu}(x) = \left(\ilim_E{|x - y|^{-d - \alpha} d\mu(y)}\right)^{-1/\alpha}. \end{equation} We say that the measure $\mu$ is a $d$-dimensional Ahlfors regular measure on $E$ when the (closed) support of $\mu$ is $E$, and when there is a constant $C_0$ such that \begin{equation} \label{?5} C_0^{-1}r^d \le \mu(B(x, r)) \le C_0 r^d \end{equation} for $x \in E$ and $0 < r < \diam(E)$. It is easy to check that since $\mu$ is Ahlfors regular, $D_{\alpha,\mu}(x)$ is equivalent to $\dist(x, E)$, hence $ L_{\alpha,\mu}$ satisfies the constraints \eqref{frst} and \eqref{scnd}. It is well known that when there is a $d$-Ahlfors regular measure on $E$, then the restriction $\Hd^d_{\vert E}$ of the Hausdorff measure is $d$-Ahlfors regular too. In fact, in \cite{GSJ17}, the authors restrict to $\mu = \Hd^d_{\vert E}$, and prove that when $E$ is the graph of a Lipschitz function with a small enough Lipschitz norm, the harmonic measure associated to $L_{\alpha,\mu}$ is mutually absolutely continuous with respect to $\mu$, and given by an $A_\infty$ weight. The result extends to any $d$-Ahlfors regular measure $\mu$ on $E$, and later on it was proved in \cite{GS20} and \cite{J20} that this result extends to the case where $E$ is uniformly rectifiable of dimension $d < n - 1$, $\mu$ is any $d$-Ahlfors regular measure on $E$, and $\alpha > 0$. At this point it makes sense to look for a converse, but it was found in \cite{GMS20} that the following anomaly occurs. For the ``magic'' number $\alpha = n - d - 2$, the function $\eqref{mu_dist}$ is a solution to $L_{\alpha, \mu} \cdot = 0$ on $\Omega$ for the operator $\eqref{3}$. This implies that the harmonic measure for this operator is absolutely continuous with respect to Hausdorff measure $\Hd^d$ on $E$, no matter how irregular geometrically this set really is. This means that the full analogue of the reverse to Dahlberg's theorem in higher codimension cannot be true. Yet it sounds reasonable to expect that, except in the magic case when $d < n - 2$ and $\alpha = n - d + 2$, the uniform rectifiability of $E$ follows from the $A_\infty$-absolute continuity of the harmonic measure associated to $L_{\mu,\alpha}$ with respect to $\mu$. In \cite{GS_f20}, the authors propose to address the different issue of good approximation of the Green function for $L_{\mu,\alpha}$ (with a pole at $\infty$) by distance functions to $E$. They prove some direct results, and also show that some interesting converse results will follow if one proves the following conjecture. Let $E$ be a $d$-Ahlfors regular set, $D_{\alpha, \mu}$ -- the regularized distance function as above in $\eqref{mu_dist}$, and $L_{\alpha, \mu}$ -- the degenerate elliptic operator mentioned before. Then \begin{equation}\label{solution} L_{\alpha, \mu} D_{\alpha, \mu} = 0 \quad \mbox{in} \quad \Omega = \R^n \setminus E \end{equation} is never true except for the following two cases: \begin{enumerate} \item when $d < n - 2$ and $\alpha = n - d - 2$, \item when $E = \R^d$ for some integer $d$ and $\mu = c\Hd^d|_E$ for some positive constant $c$. \end{enumerate} In this work we will make the first step in the study of this conjecture. Before we state our results, let us explain our motivation and give some definitions. Our global goal is to prove that, for the case when $\alpha$ is not the ``magic'' number, the only possible solution to $\eqref{solution}$ is what we call the flat solution. We wanted to start with explaining why there are no solutions in a neighbourhood of the flat one. This could be easier than the study of global solutions, since we can view the non-flat ones as small perturbations of the flat. Essentially, any story about perturbations involves a parameter and a family of solutions corresponding to it. This is why an often-used plan to prove the absence of solutions in a neighbourhood is to prove first the absence of parametric families of solutions. Which is exactly what we will do. Then the transition to the absence of individual solutions is usually not too hard, though in our situation the most logical scheme does not work: we will discuss this in section 5. So the results we state and prove below are actually the best one could do trying to follow the described plan to solve the hypothesis. Let us discuss in more details what we mean by parametric families in a neighbourhood of the flat solution. To start with, consider the case when the measures $\mu$ of all of the functions $D_{\alpha, \mu}$ of our family live on the hyperplane $E = \R^d$, but their densities with respect to the Hausdorff measure are not constants. Then the easiest one-parameter family of solutions of $\eqref{solution}$ to study is $\{D_{\alpha, \mu_t}\}_{t \in [0, t_0)}$ with $E = \R^d$ and $\mu_t = (1 + t\phi)d\Hd^d$ for a fixed function $\phi \in L^\infty(\R^d)$. The next step is to switch to the solutions $D_{\alpha, \mu}$ such that $\mu$ is supported on graphs close to the hyperplane $\R^d$. Here the easiest one-parameter family to study is $\{D_{\alpha, \mu_t}\}_{t \in [0, t_0)}$ with $E_t = Im(Id + t\psi)$, and $(Id + t\psi)^{-1}(\mu_t) = (1 + t\phi)d\Hd^d$ for fixed functions $\phi \in L^\infty(\R^d) \cap L^1(\R^d)$, $\psi \in Lip(\R^d) \cap L^1(\R^d)$. These are the examples one can keep in mind. Our results concern more general families of solutions. We start with measures supported on $E = \R^d$. \begin{defn}\label{defn1} We call a non-trivial one-parameter differentiable family of flat perturbations of the flat solution a family of solutions $\{D_{\alpha, \mu_t}\}_{t \in [0, t_0)}, t_0 > 0$, of $\eqref{solution}$ such that for any $t$ the measure $\mu_t$ is supported on $E$ has density $1 + \phi_t, \phi_t \in L^\infty(\R^d)$ with respect to $\Hd^d$ with the following properties: \begin{enumerate} \item $\phi_0 = 0$, \item the family of densities is Frechet differentiable at zero in $BMO (\R^d)$; that is, there exists a function $\frac{\partial \phi_t(y)}{\partial t}|_{t = 0} = \frac{\partial \phi_t}{\partial t}(0, y)$ such that $$\|\phi_t - t \frac{\partial \phi_t}{\partial t}(0, \cdot)\|_{BMO} = o(t), \quad \mbox{as} \; t \to 0,$$ \item the derivative $\frac{\partial \phi_t}{\partial t}(0, \cdot)$ is a non-constant function, \item $$\|\phi_t\|_{BMO}, \Big|\ilim_{B(0, 1)}{\phi_t(y)dy}\Big| \le Ct \quad \mbox{and} \quad \Big| \ilim_{B(0, 1)}{\left(\phi_t(y) - t\frac{\partial \phi_t}{\partial t}(0, y)\right)dy} \Big| = o(t), \; t \to 0.$$ \end{enumerate} \end{defn} The integral conditions above on the ball $B(0, 1)$ help, because the $BMO$ norm itself does not control averages on large balls. In the next definition, when we write that the modulus of a vector-valued function or its norm (in $BMO$) admits an estimate, we mean that the moduli or norms of every component of this vector admit it. \begin{defn}\label{defn2} We call a non-trivial one-parameter differentiable family of graph perturbations of the flat solution a family of solutions $\{D_{\alpha, \mu_t}\}_{t \in [0, t_0)}, t_0 > 0$, of $\eqref{solution}$ such that for any $t$ the support $E_t$ of $\mu_t$ is the image of a Lipschitz function $Id + \psi_t: \R^d \to \R^n$, where $\psi_t: \R^d \to \R^{n - d}$ has the Lipschitz constant $Ct$, the measure $\mu_t$ is the image by $Id + \psi_t$ of $(1 + \phi_t)d\Hd^d, \phi_t \in L^\infty(\R^d)$, and the following conditions hold: \begin{enumerate} \item $\phi_0 = \psi_0 = 0$, \item the families of densities $\{\phi_t\}$ and graph functions $\{\psi_t\}$ are Frechet differentiable at zero in $BMO(\R^d)$: there exist functions $\frac{\partial \phi_t(y)}{\partial t}|_{t = 0} = \frac{\partial \phi_t}{\partial t}(0, y)$ and $\frac{\partial \psi_t(y)}{\partial t}|_{t = 0} = \frac{\partial \psi}{\partial t}(0, y)$ in $BMO(\R^d) \cap L^1(\R^d)$ such that $$\|\phi_t - t\frac{\partial \phi_t}{\partial t}(0, \cdot)\|_{BMO} = o(t), \quad \mbox{as} \; t \to 0,$$ $$\|\psi_t - t\frac{\partial \psi_t}{\partial t}(0, \cdot)\|_{BMO} = o(t), \quad \mbox{as} \; t \to 0,$$ \item the derivative $\frac{\partial \psi}{\partial t}(0, y)$ is not constant, \item for $F_t = \phi_t$ and $\psi_t$ $$\|F_t\|_{BMO}, \Big|\ilim_{B(0, 1)}{F_t(y)dy}\Big| \le Ct \quad \mbox{and} \; \Big| \ilim_{B(0, 1)}{\left(F_t(y) - t\frac{\partial F_t}{\partial t}(0, y)\right)dy} \Big| = o(t), \; t \to 0.$$ \end{enumerate} \end{defn} We think that the result we state below for the non-trivial one-parameter differentiable families of graph perturbations is true without the assumption that the derivatives $\frac{\partial \phi_t}{\partial t}(0, y)$ and $\frac{\partial \psi_t}{\partial t}(0, y)$ are in $L^1(\R^d)$ (or without any other similar summability assumption), but we did not manage yet to think of a better argument than the one in Subsection 5.2, which uses the Fourier transform. We are now ready to state our first two main theorems. Recall that we are interested in the case when for our parameters $n, d$ and $\alpha$ we have $n - d > 2$ and $\alpha > 0$ is not ``magic'' ($\alpha \neq n - d - 2$), and we do not pose any additional restrictions on them in Theorems $\ref{mainthm_flat}$ and $\ref{mainthm_graph}$. \begin{thm}\label{mainthm_flat} For and integer $d < n - 2$ and $E = \R^d$ there are no non-trivial one-parameter differentiable families of flat perturbations of the solution $D_{\alpha, \mu}$ with $\mu = c\Hd^d|_E$ of the equation $L_{\alpha, \mu} D_{\alpha, \mu} = 0$. \end{thm} \begin{thm}\label{mainthm_graph} For any integer $d < n - 2$ there are no non-trivial one-parameter differentiable families of graph perturbations of the solution $D_{\alpha, \mu}$ with $\mu = c\Hd^d|_E$ and $E = \R^d$ of the equation $L_{\alpha, \mu} D_{\alpha, \mu} = 0$. \end{thm} For the case when $E$ is a hyperplane we are able to provide another result in the spirit of non-existence of global solutions. We show that, if the density of the measure $\mu$ in $D_{\alpha, \mu}$ with respect to the Hausdorff measure is regular enough and it is not a constant, then $\eqref{solution}$ cannot be true. To make precise the notion of ``regular enough'' we remind the reader yet another definition. \begin{defn} We say that a function $f$ on $\R^d$ is in H\"older class $C^{k, \gamma}(\R^d)$ if it has continuous derivatives up to the order $k$ and the $k$th partial derivatives are H\"older continuous with the exponent $\gamma$, $0 < \gamma < 1$. A function $g$ is H\"older continuous with the exponent $\gamma$ if $$\sup_{x \neq y}{\frac{|g(x) - g(y)|}{|x - y|^\gamma}} < \infty.$$ \end{defn} Additional restrictions on $\alpha$ and $d$ are posed due to our method. They come from the assumption of integrability of certain functions. \begin{thm}\label{mainthm_direct} If $E$ is a hyperplane of dimension $d$ ($E = \R^d$) such that $n - d > 4$, $\alpha > 2 + \varepsilon_0$ for some $0 < \varepsilon_0 < 1$, and the density of the measure $\mu$ with respect to the Hausdorff measure $\Hd^d$ on $E$ is not a constant, but of class $C^{2, \varepsilon}$ for some $0 < \varepsilon < \varepsilon_0$, then the function $D_{\alpha, \mu}$ as in $\eqref{mu_dist}$ cannot be a solution for the equation $\eqref{solution}$. \end{thm} The paper is organized as follows. In Section 2 we discuss a suitable reformulation of the equation $\eqref{solution}$ in terms of the Laplacian, and then give a representation of any harmonic function on $\Omega$ with certain asymptotics. In Section 3 we discuss non-tangential limits of the smooth distance function $D_{\alpha, \mu}$. In Section 4 we prove some facts we need about the space of functions with bounded mean oscillations. In Section 5 we prove Theorems $\ref{mainthm_flat}$ and $\ref{mainthm_graph}$, and discuss why our method does not seem to allow one to finish the proof of the conjecture about the uniqueness of flat solutions of $\eqref{solution}$. In Section 6 we prove Theorem $\ref{mainthm_direct}$. ~\ \subsubsection*{Acknowledgements} I am very grateful to my PhD thesis advisor, Professor Guy David, for the patient guidance and permanent support during the development of this paper. I also would like to thank Pierre-Gilles Lemari\'e-Rieusset and Ioann Vasilyev for fruitful discussions. \section{Harmonic functions outside a $d$-Ahlfors regular set $E$} \subsection{An observation} Our method is based on a simple observation, which we now present. Throughout the text we suppose that $E$ is a $d$-Ahlfors regular set and $\mu$ is a $d$-Ahlfors regular measure on $E$. Suppose that the function $D_{\alpha, \mu}$ in $\eqref{mu_dist}$, which is easily seen to be smooth in $\Omega = \R^n \setminus E$, is a solution to the equation $L_{\alpha, \mu} \cdot = -\div D_{\alpha, \mu}^{n - d - 1} \nabla \cdot = 0$ on $\Omega$. Then \begin{equation}\label{1} 0 = -L_{\alpha, \mu} D_{\alpha, \mu} = \div\left(D_{\alpha, \mu}^{-n + d + 1} \nabla D_{\alpha, \mu}\right) = D_{\alpha, \mu}^{- n + d + 1}\Delta D_{\alpha, \mu} + (- n + d + 1)D_{\alpha, \mu}^{- n + d}\slim_{i = 1}^n{\left(\frac{\partial}{\partial x_i}D_{\alpha, \mu}\right)^2}. \end{equation} For $\gamma \in \R$, we can compute \begin{equation}\label{2} \Delta D_{\alpha, \mu}^\gamma = \slim_{i = 1}^n{\left(\gamma D_{\alpha, \mu}^{\gamma - 1}\frac{\partial^2}{\partial x_i^2}D_{\alpha, \mu} + \gamma(\gamma - 1)D_{\alpha, \mu}^{\gamma - 2}\left(\frac{\partial}{\partial x_i}D_{\alpha, \mu}\right)^2\right)} \quad \mbox{on} \; \Omega. \end{equation} Evidently, if we pick $\gamma$ equal to $-n + d + 2$ the right-hand sides of $\eqref{1}$ and $\eqref{2}$ will coincide. Therefore $D_{\alpha, \mu}$ is a solution of $\eqref{solution}$ if and only if $D_{\alpha, \mu}^\gamma$ is harmonic outside $E$ for $\gamma = - n + d + 2$: \begin{equation}\label{maineq} L_{\alpha, \mu}D_{\alpha, \mu} = 0 \quad \Longleftrightarrow \quad \Delta D_{\alpha, \mu}^\gamma = 0. \end{equation} This fact will provide us with an alternative representation for the integral $\eqref{mu_dist}$. We will explain this after providing the necessary preliminaries. Note that for the ``magic'' number $\alpha = n - d - 2$ it is always true that $\Delta D_{\alpha, \mu}^\gamma = 0$ for the chosen exponent $\gamma$. \subsection{The Newton potential} Before we proceed, recall that our dimension $d$ is such that $n - d - 2 > 0$, and that we denote by $\delta(x)$ the distance function $\dist(x, E)$. Let $\sigma$ be the measure $\Hd^d$ restricted to $E$. The purpose of this subsection is to prove the following theorem. \begin{thm}\label{ker_repr} Let $u$ be a function harmonic outside the $d$-Ahlfors regular set $E \subset \R^n$ such that $|u(x)| \le C\delta(x)^{-n + d + 2}$. Then there exists a function $f \in L^\infty(E)$ such that $$u(x) = \ilim_{\R^n}{\frac{f(y) d\sigma(y)}{|x - y|^{n - 2}}}.$$ \end{thm} This, of course, looks like the well-know representation of a solution of the equation $\Delta \cdot = 0$ as a convolution with the fundamental solution. But the latter is almost always used in the situation when the set $E$ is compact and has codimension one, while here the issue really is to prove that the Laplacian of $u$, in the sense of distributions, is an upper $d$-Ahlfors regular measure. The next lemma uses standard arguments, but we give the proof for the sake of completeness. Following the traditions of PDE texts, from now on we usually denote all the various constants by the letter $C$. \begin{lm}\label{newton} Let $f$ be a function in $L^\infty(E, d\sigma)$. Consider $$u_f(x) = \ilim_{\R^n}{\frac{f(y)}{|x - y|^{n - 2}}d\sigma(y)};$$ with the assumptions above, the following holds: \begin{enumerate} \item $u_f$ is locally integrable ($u_f \in L_{1, loc}(\R^n)$), \item $u_f$ is harmonic in $\R^n \setminus E$, \item if $E$ is compact, then for $x$ such that $dist(x, E) = \delta(x) < \diam(E)$, and if $E$ is non-compact, everywhere on $\R^n \setminus E$ $$|u_f(x)| \le c_1\delta(x)^{- n + d + 2},$$ \item in the sense of distributions, $$\Delta u_f = - |S_1|(n - 2)f d\sigma,$$ where $|S_1|$ is the area of the sphere of radius $1$ in $\R^n$. \end{enumerate} \end{lm} \begin{note} The potential $\ilim_{\R^n}{\frac{f(y)}{|x - y|^{n - 2}}d\sigma(y)}$ is often called the Newton potential. \end{note} \begin{proof} \begin{enumerate} \item For all $r > 0$ $$\ilim_{|x| < r}{\ilim_{\R^n}{\frac{|f(y)| d\sigma(y)}{|x - y|^{n - 2}}}dx} \le \|f\|_\infty \ilim_{|x| < r}{\left(\ilim_{|y| < 2r}{\frac{d\sigma(y)}{|x - y|^{n - 2}}} + \ilim_{|y| \ge 2r}{\frac{d\sigma(y)}{|x - y|^{n - 2}}}\right)dx}.$$ The first term we estimate the following way: $$\ilim_{|x| < r}{\ilim_{|y| < 2r}{\frac{d\sigma(y)}{|x - y|^{n - 2}}}dx} \le \ilim_{|y| < 2r}{\ilim_{|x| < r}{\frac{dx}{|x - y|^{n - 2}}}d|\sigma|(y)} \le \ilim_{|y| < 2r}{\ilim_{|z| < r + 2r}{\frac{dz}{|z|^{n - 2}}}d\sigma(y)} $$ $$ = \sigma(E \cap B(0, r))\ilim_{|z| < 3r}{\frac{dz}{|z|^{n - 2}}} = \sigma(E \cap B(0, r))|S_1|\ilim_0^{3r}{\rho d\rho} = C r^d |S_1|\frac{(3r)^2}{2}.$$ To estimate the second term we exploit the fact that $x$ and $y$ are now far away from each other, and we also split the space into layers like always: $$\ilim_{|x| < r}{\ilim_{|y| \ge 2r}{\frac{d\sigma(y)}{|x - y|^{n - 2}}}dx} \le \ilim_{|x| < r}{\slim_{k = 1}^\infty{\ilim_{2^k r \le |y| < 2^{k + 1}r}{\frac{d\sigma(y)}{|x - y|^{n - 2}}}}dx} $$ $$ \le \ilim_{|x| < r}{\slim_{k = 1}^\infty{(2^k r)^{- n + 2}C(2^{k + 1}r)^d} dx} \le C r^{d + 2},$$ since $- n + d + 2 < 0$. \item It suffices to say that the function $\frac{1}{|x|^{n - 2}}$ is a fundamental solution of the equation $\Delta u = 0$ in $\R^n \setminus \{0\}$, and that $$\Delta u_f(x) = \ilim_{\R^n}{\Delta \frac{1}{|x - y|^{n - 2}} f(y)d\sigma(y)}.$$ \item Our strategy will be quite similar to the one we used for the proof of the local integrability: $$|u_f(x)| \le \|f\|_\infty \left(\ilim_{B(x, 2\delta(x))}{\frac{d\sigma(y)}{|x - y|^{n - 2}}} + \slim_{k \ge 1}{\ilim_{B(x, 2^{k + 1}\delta(x))\setminus B(x, 2^k\delta(x))}{\frac{d\sigma(y)}{|x - y|^{n - 2}}}}\right).$$ The first integral inside the brackets can be estimated as $$\ilim_{B(x, 2\delta(x))}{\frac{d\sigma(y)}{|x - y|^{n - 2}}} \le (2\delta(x))^{-n + 2}\ilim_{B(x, 2\delta(x))}{d\sigma(y)} \le (2\delta(x))^{-n + 2}c^d\delta(x)^d \le C \delta^{-n + 2 + d}.$$ For the terms in the second part, $$\ilim_{B(x, 2^{k + 1}\delta(x))\setminus B(x, 2^k\delta(x))}{\frac{d\sigma(y)}{|x - y|^{n - 2}}} \le \left(2^{k}\delta(x)\right)^{-n + 2}(c2^{k + 1}\delta(x))^d = C (2^{-n + 2 + d})^k \delta^{-n + 2 + d}.$$ Thus, as we sum over $k > 0$, we get that the second term is equal to $C \delta^{-n + 2 + d}$, since $2^{-n + 2 + d} < 1$. \item For any $\phi \in C_0^\infty$ we have $$\langle \Delta u_f, \phi \rangle = \langle u_f, \Delta\phi \rangle$$ by the definition of the distribution $\Delta u_f$. But $$\langle u_f, \Delta\phi \rangle = \ilim_{\R^n}{u_f(x)\Delta\phi(x)dx} = \ilim_{\R^n}{\ilim_{\R^n}{\frac{f(y) d\sigma(y)}{|x - y|^{n - 2}}}\Delta\phi(x)dx}.$$ Since we know that $u_f \in L_{1, loc}(\R^n)$, $\ilim_{\R^n}{|u_f(x)\Delta\phi(x)|dx} < \infty$, and by Fubini's theorem $$\ilim_{\R^n}{\ilim_{\R^n}{\frac{f(y) d\sigma(y)}{|x - y|^{n - 2}}}\Delta\phi(x)dx} = \ilim_{\R^n}{\ilim_{\R^n}{\frac{\Delta \phi(x)}{|x - y|^{n - 2}}dx}f(y) d\sigma(y)}.$$ Now we use the fact that the solution of the distributional equation $\Delta u = \delta_0$ is $-\frac{1}{(n - 2)|S_1|}\frac{1}{|x|^{n - 2}}$. This is well-known, but one could also check out $\cite{AG}$, p.103, Lemma 4.3.6, for example. Therefore $$\phi(y) = \delta_0 * \phi(y) = \Delta \left(- \frac{1}{(n - 2)|S_1|}\frac{1}{|x^{n - 2}|}\right) * \phi(y) = -\frac{1}{|S_1|(n - 2)} \ilim_{\R^n}{\frac{\Delta \phi(x)dx}{|y - x|^{n - 2}}}.$$ We can integrate the expression above and conclude that $$\ilim_{\R^n}{\ilim_{\R^n}{\frac{\Delta \phi(x)}{|x - y|^{n - 2}}dx} f(y) d\sigma(y)} = -(n - 2)|S_1|\ilim_{\R^n}{\phi(y) f(y) d\sigma(y)}.$$ Thus, $$\langle \Delta u_f, \phi\rangle = -|S_1|(n - 2)\langle f d\sigma, \phi\rangle.$$ \end{enumerate} \end{proof} ~\ Now we study an arbitrary function $u$ harmonic in $\R^n \setminus E$ and such that $|u(x)| \le C \delta(x)^{-n + 2 + d}$. For this we need to recall that $\mu$ is an upper $d$-Ahlfors regular measure on $E$ if there is a constant $C_0$ such that for $x \in E$ and $0 < r < \diam(E)$ one has $\mu(B(x, r)) \le C_0r^d$. \begin{lm}\label{mes} For a function $u$ as above, $u \in L_{1, loc}(\R^n)$ and the distribution $\Delta u$ is an upper $d$-Ahlfors regular measure on $E$. \end{lm} \begin{proof} The first part, that $u$ lies in $L_{1, loc}(\R^n)$, can be viewed as a corollary of the following fact. For $\sigma = \Hd^d|_E$, one has $\delta(x)^{-n + d + 2} \le c u_1(x)$ for the function $u_1(x)$ as in lemma $\ref{newton}$. Indeed, let us pick a point $y_0$ such that $d(x, y_0) \le 2\delta(x)$ and $r = \delta(x)$. Then $$u_1(x) = \ilim_{\R^n}{\frac{d\sigma(y)}{|x - y|^{n - 2}}} \ge \ilim_{E \cap B(y_0, r)}{\frac{d\sigma(y)}{|x - y|^{n - 2}}} \ge \ilim_{E \cap B(y_0, r)}{\frac{d \sigma(y)}{(|x - y_0| + |y_0 - y|)^{n - 2}}}$$ $$ \ge \ilim_{E \cap B(y_0, r)}{\frac{d\sigma(y)}{(3\delta(x))^{n - 2}}} = C \delta(x)^{- n + 2}\sigma(E \cap B(y_0, r)) \ge C\delta^{- n + 2}r^d = C\delta(x)^{- n + d + 2}.$$ Therefore for an arbitrary radius $r$, $$\ilim_{|x| < r}{|u(x)|dx} \le C \ilim_{|x| < r}{\delta(x)^{-n + 2 + d}dx} \le C\ilim_{|x| < r}{\ilim_{\R^n}{\frac{d\sigma(y)}{|x - y|^{n - 2}}}dx},$$ and now we can argue that in Lemma $\ref{newton}$ above we have already demonstrated that the function $u_1$ is locally integrable. One could also repeat the argument we used before, but for $\delta(x)^{-n + d + 2}$. \medskip We now prove the second part of the statement. Observing that $-\Delta u$ is supported on $E$ is easy, since, if $\phi \in C_0^\infty(\R^n)$ has support outside $E$, then $\langle \Delta u, \phi\rangle$ is zero. For the upper-regularity we use an argument of approximate identity. Recall that, besides the local integrability of the function $u$, we know that its total mass inside a ball of small radius $r$ centered at $E$ is majorized up to a constant by $r^{d + 2}$, as we saw in Lemma $\ref{newton}$. Let $\{\phi_r\}$ be a standard approximate identity. We introduce the family of functions $\{u * \phi_r\}$. We will see that for each ball $B(x, \rho)$ centered at $E$ the total mass of the Laplacian of $u * \phi_r$ inside this ball is less than $C\rho^d$ if $r << \rho$, where $C$ does not depend on $r$. Therefore the same will be true for $\Delta u$ as a limit of $\Delta (u * \phi_r)$, which will conclude our proof. So, we fix $x \in E$, suppose that $r << \rho$ and estimate $\ilim_{B(x, \rho)}{|\Delta (u * \phi_r)|}$. Since $u * \phi_r$ is harmonic at distance at least $r$ from $E$, we really integrate over the set $B(x, \rho) \cap \{\delta(y) \le r\}$. We can cover this set by less than $C\left(\frac{\rho}{r}\right)^d$ balls $B(x_i, r)$ centered at $E$. Indeed, let $\{B(x_i, r/5)\}_{i \in I}$ be any covering of $E \cap B(x, \rho)$. The Vitali lemma says that we can choose a finite $I_0 \subset I$ such that $B(x_i, r/5), i \in I_0$ do not intersect, and $\cup_{I_0} B(x_i, r)$ covers $E \cap B(x, \rho)$. The $d$-Ahlfors regularity of $E$ then implies that $|I_0|r^d \le C \rho^d$, which implies the bound on the number of balls in the covering $\{B(x_i, r)\}_{i \in I_0}$. The set $B(x, \rho) \cap \{\delta(y) < r\}$ can be covered by the $\{B(x_i, 2r)\}_{i \in I_0}$. Using this development, and also that $\sup|\Delta \phi_r|$ is less than $r^{-2}$, we can estimate the total mass of the Laplacian of $u * \phi_r$ from above: $$\ilim_{B(x, \rho)}{|\Delta (u * \phi_r)|} = \ilim_{B(x, \rho) \cap \{\delta \le r\}}{u * |\Delta \phi_r|} \le \slim_i{\ilim_{B(x_i, r)}{u * |\Delta \phi_r|}} \le \slim_{i}{\sup|\Delta \phi_r|\ilim_{B(x_i, 2r)}{u}}$$ $$ \le \slim_{i}{C r^{-2}r^{d + 2}} \le \left(\frac{\rho}{r}\right)^d Cr^d \le C\rho^d.$$ \end{proof} \begin{lm}\label{mes2} Let $\mu$ be a $d$-upper-regular measure on $E$, then it has a bounded density with respect to the measure $\sigma = \Hd^d|_E$. Moreover, if the measure $\mu$ is $d$-Ahlfors regular, then the density is also bounded away from zero. \end{lm} \begin{proof} Let us check first that $\mu$ is absolutely continuous with respect to $\sigma$, or, equivalently, that if $\sigma(A) = 0$, then $\mu(A) = 0$ as well. Indeed, $\sigma(A) = 0$ if and only if for every $\varepsilon > 0$ exist a covering $\{B(x_i, r_i)\}$ of the set $A$ such that $\slim_{i}{r_i^d} < \varepsilon$. Then $$|\mu(A)| \le |\mu(\cup B(x_i,r_i))| \le \slim_i{|\mu(B(x_i, r_i))|} \le C \slim_i{r_i^d} < C \varepsilon,$$ due to the upper-regularity of $\mu$. So, $\mu(A) \le C \varepsilon$ for every $\varepsilon > 0$, which implies that $\mu(A) = 0$. Now, it is well-known, see for instance \cite{M} Theorem 2.17, that the density function $f$ of the absolutely continuous part of a Radon measure $\mu$ with respect to a Radon measure $\sigma$ is (almost everywhere) equal to the function $D_{\sigma}\mu(x)$, the limit of $D_{\sigma}\mu(x, r) = \frac{\mu(B(x, r))}{\sigma(B(x, r))}$ with respect to $r \to 0$. In our case, since $\mu$ is absolutely continuous with respect to $\sigma$, the density function we are looking for is exactly $D_{\sigma}\mu$. It is bounded, since $\mu(B(x, r)) \le C r^d$ by the $d$-upper-regularity property. In the case then $\mu$ is $d$-Ahlfors regular the density function is also bounded away from zero, since $\mu(B(x, r)) \ge c r^d$. \end{proof} ~\ \begin{proof}[Proof of theorem \ref{ker_repr}] From lemma $\ref{mes}$ we know that if $u$ is harmonic outside $E$ and $|u(x)| \le C\delta(x)^{- n + d + 2}$, then $\Delta u$ is upper $d$-Ahlfors regular and therefore has a bounded density $\tilde{f}$ with respect to $\sigma$. Observe that for $f = \tilde{f}\frac{1}{|S_1|(n - 2)}$ $$\ilim_{\R^n}{(u - u_f) \Delta \phi} = \ilim_{\R^n}{\Delta (u - u_f) \phi} = - \ilim_{\R^n}{\phi \tilde{f} d\sigma} + \ilim_{\R^n}{\phi \tilde{f} d\sigma} = 0,$$ for every $\phi \in C_0^\infty$. Now the fundamental lemma of variational calculus tells us that $u$ and $u_f$ can differ only by a linear function. But if that linear function is not zero, this is impossible, since $|u(x)| \le C\delta(x)^{-n + d + 2}$, and this is arbitrarily small far away from $E$, which is not the case with $u = u_f + L$, $L$ linear and non-zero. \end{proof} \section{Non-tangential limits} The main goal of this section is to prove that, if $E$ is a $d$-Ahlfors regular and rectifiable set, and $u$ is a function harmonic outside $E$ and such that $|u(x)|$ is comparable to $\delta(x)^{-n + d + 2}$, then the function $u(x)\delta(x)^{n - d - 2}$ has a non-tangential limit at almost every point $y_0 \in E$ equal to $c f(y_0),$ where $f$ is the density of the measure $\Delta u$ with respect to $\Hd^d|_E$, and the constant $c$ does not depend on $u$ or the point $y_0$. A statement very similar to this is also proved in $\cite{GMS20}$ (see section 5), and we use its techniques and follow mostly its exposition. Let us recall first the necessary terminology and definitions. With the assumption of rectifiability of the set $E$, at almost every with respect to the measure $\sigma = \Hd^d|_E$ point $y_0$ of $E$ we can find the unique tangent hyperplane $T_{y_0}E$ of dimension $d$: see, for example, $\cite{M}$ p. 219 Ex. 7 or $\cite{MShah}$ Ex. 41.21. Let $R > 0$ be small enough, and for the sets of points $y_0$ where $T_{y_0}E$ exists let us define for $\eta \in (0, 1)$ the set $$\Gamma_{R, \eta} = \{x \in \left(\R^n \setminus E\right) \cap B(y_0, R): \; \dist(x, E) \ge \eta |x - y_0|\}.$$ We will call $\Gamma_{R, \eta}$ a non-tangential access region of the point $y_0$ (with an aperture $\eta$). Let $v$ be a function defined at least on $\R^n \setminus E$. The non-tangential limit of $v$ at a point $y_0 \in E$, denoted by $\NTlim_{x \to y_0} v(x)$, if it exists, is the common limit of $\{v(x_i)\}$, where $\{x_i\}$ is any sequence in $\Gamma_{R, \eta}$ such that $x_i$ tends to $y_0$ as $i$ tends to infinity. \begin{center} \includegraphics[scale=0.7]{NT2.pdf} \end{center} \begin{thm}\label{NT_theorem} Let $\mu$ be a $d$-Ahlfors regular measure on $E$ with density $f$ with respect to $\sigma$, and the functions $u_f$ and $\delta$ be as in Section 2. Then, regardless of $\eta$ in the definition of $\Gamma_{R, \eta}$ above, for $\sigma$-almost every $y_0 \in E$ \begin{equation}\label{asymp} \NTlim_{x \to y_0} u_f(x)\delta(x)^{n - d - 2} = C(n, d)f(y_0), \end{equation} where $C(n, d)$ is a constant which depends only on $n$ and $d$. \end{thm} \begin{defn} We call a measure $\nu$ ($d$-)flat if it is equal to a measure $cdy$, where $c$ is a constant, and $dy$ is a Lebesgue measure supported on a hyperplane of dimension $d$. \end{defn} \begin{rem}\label{ae} More precisely, in order for the non-tangential limit $\eqref{asymp}$ to exist at a point $y_0$ of $E$, it is enough that for this point $y_0$ the following holds simultaneously: \begin{enumerate} \item $y_0$ is a density point of the measure $\mu$, \item $T_{y_0}E$ exists, \item every tangent measure of $\mu$ at $y_0$ is a flat measure. \end{enumerate} All three conditions hold, as mentioned above in the statement of the theorem, for almost every point in our set $E$: see $\cite{M}$, the precise reference for the property 2 is given above, and for the property 3 -- in the proof of the theorem. \end{rem} \begin{proof}[Proof of theorem 5] Our main instruments are the so-called blow-up limits. We fix a point $y_0$ as in Remark \ref{ae} above and a decreasing to zero sequence of positive numbers $\{r_i\}$. Then we introduce the sequence of sets $\{E_i\}$, $E_i = \frac{E - y_0}{r_i}$ and the measures $\mu_i$ supported on $E_i$, $\mu_i(S) = \frac{\mu(r_iS + y_0)}{r_i^d}$. Theorems 14.3 and 16.5 in \cite{M} combined give us that the (weak) limit of (a subsequence of) $\{\mu_i\}$ is a flat tangent measure $\mu_\infty = f(y_0)d\Hd^d = f(y_0)dy$, supported on the hyperplane $E_\infty$, a limit of $\{E_i\}$ in the sense of Hausdorff distance, which coincides as an element of the Grassmannian with the tangent hyperplane $T_{y_0}E$. Let us also fix an aperture $0 < \eta < 1$ and a point $x \in \Gamma_{E_\infty, \eta}$, where $\Gamma_{E_\infty, \eta} = \{x \in \R^n \setminus E_\infty: \dist(x, E_\infty) \ge \eta |x|\}$. Note that for $i$ large enough $x$ lies well outside of $E_i$. We define now on $\R^n \setminus E_i$ the function $$R_i(x) = \ilim_{E_i}{\frac{d\mu_i(y)}{|x - y|^{n - 2}}}.$$ Our strategy is the following. Denote $x_i = r_ix + y_0 \in \Gamma_{R, \eta}$. First we will explain that the sequence $\{R_i(x)\}_{i \to \infty}$ has a limit proportional to the limit of $u_f(x_i)\delta(x_i)^{n - d - 2}$, as $x_i$ tends to $y_0$, with the ratio $\dist(x, E_\infty)^{(n - d - 2)}$. Roughly, this means that the limit of the sequence $\{R_i(x)\}_{i \to \infty}$ and the non-tangential limit of the function $u_f(\cdot)\delta(\cdot)^{n - d - 2}$ are proportional. Then we will prove that $\{R_i(x)\}_{i \to \infty}$ converges to the product of the constant $C(n, d)f(y_0)$ and the function $\dist(x, E_\infty)^{-(n - d - 2)}$ uniformly in $x$ which are ``far away'' from $E_\infty$. This gives us what we want. For the first part of the proof, observe that, with the notation $w_i = r_iy + y_0 \in E$ and $x_i = r_ix + y_0 \in \Gamma_{R, \eta}$, $$\ilim_{E_i}{\frac{d\mu_i(y)}{|x - y|^{n - 2}}} = \ilim_E{\frac{d\mu(w)_i/r_i^d}{\left|\frac{x_i - y_0}{r_i} - \frac{w_i - y_0}{r_i}\right|^{n - 2}}} = \ilim_E{\frac{d\mu(w_i)}{|x_i - w_i|^{n - 2}}}r_i^{n - 2 - d} = u_f(x_i) r_i^{n - d - 2}.$$ Since $x_i$ lies in the non-tangential access region $\Gamma_{R, \eta}$ (of the point $y_0$), the difference between the distance $\delta(x_i)$ of the point $x_i$ to the set $E$ and the product $|x_i - y_0|\sin{(x \wedge E_\infty)}$, where $x \wedge E_\infty$ is the angle between the hyperplane $E_\infty$ and the vector $x$, tends to zero uniformly in $x$ in $\Gamma_{E_\infty, \eta}$ as $i$ tends to infinity. This implies that, given $|x_i - y_0| = r_i|x| = r_i\frac{\dist(x, E_\infty)}{\sin{(x \wedge E_\infty)}}$, $$R_i(x) = u_f(x_i) r_i^{n - d - 2} = u_f(x_i) \left(\frac{|x_i - y_0|}{|x|}\right)^{n - d - 2}$$ $$ = u_f(x_i) \left(\frac{|x_i - y_0|\sin{(x \wedge E_\infty)}}{\dist(x, E_\infty)}\right)^{n - d - 2} = u_f(x_i)\left(\frac{\delta(x_i)}{\dist(x, E_\infty)}\right)^{n - d - 2} + f(x_i),$$ where $f(x_i)$ tends to zero uniformly in $x$ as $i$ tends to infinity. For the second part, as announced, we first prove that $R_i(x) \to R_\infty(x)$ uniformly in $x$ which lie ``far away'' from $E_\infty$, where $$R_\infty(x) = \ilim_{E_\infty}{\frac{d\mu_\infty(y)}{|x - y|^{n - 2}}}.$$ \begin{lm}\label{unicomp} Let $K$ be a compact set inside $\R^n \setminus E_\infty$. Then functions $R_i$ converge to $R_\infty$ uniformly on $K$. \end{lm} \begin{proof} We begin with the observation that if $x \in K$, then for $i$ large enough $x$ lies well outside of the set $E_i$. We consider from now on only such indices $i$. The functions $R_i(x)$ are bounded uniformly in $x \in K$ and $i$. Indeed, the measures $\mu_i$ are $d$-Ahlfors regular with uniform in $i$ constants. If we set $y_0 \in E_i: \dist(x, E_i) = \dist(y_0, x)$ and $r = \min_{i \ge i_0}{\dist(K, E_i)}$ (which is separated from zero), then $$\ilim_{E_i}{\frac{d\mu_i(y)}{|x - y|^{n - 2}}} = \ilim_{B(y_0, r)}{\frac{d\mu_i(y)}{|x - y|^{n - 2}}} + \slim_{k = 1}^\infty{\ilim_{2^kr \le |y - y_0| \le 2^{k + 1}r}{\frac{d\mu_i(y)}{|x - y|^{n - 2}}}}$$ $$ \le \dist(K, E_i)^{-n + 2}r^d + \slim_{k = 1}^\infty{(2^k r)^{-n + 2} C(2^{k + 1}r)^d} \le C r^{-n + 2 + d}.$$ The uniform estimate implies that for every $\varepsilon > 0$ there is $R > 0$ such that $$\left|\ilim_{B(0, R)}{\frac{d\mu_i(y)}{|x - y|^{n - 2}}} - \ilim_{E_i}{\frac{d\mu_i(y)}{|x - y|^{n - 2}}}\right| < \varepsilon$$ for every $i = i_0, \dots, \infty$. There exists a smooth function $\phi$ which approximates $\chi_{B(0, R)}$ and is supported inside $B(0, R)$ such that $$\left|\ilim_{B(0, R)}{\frac{d\mu_i(y)}{|x - y|^{n - 2}}} - \ilim_{E_i}{\frac{\phi(y)d\mu_i(y)}{|x - y|^{n - 2}}}\right| < \varepsilon.$$ Therefore we have, for the same set of indices, $$\ilim_{E_i}{\left|\frac{(1 - \phi(y))d\mu_i(y)}{|x - y|^{n - 2}}\right|} < 2\varepsilon.$$ We need this to work with functions of the type $\frac{\phi(y)}{|x - y|^{n - 2}}$ instead of $\frac{1}{|x - y|^{n - 2}}$. Observe now that functions of the family $\{\frac{\phi(y)}{|x - y|^{n - 2}}\}_{x \in K}$ are still uniformly bounded and moreover equicontinuous inside $\overline{B(0, R)}$. Indeed, if $|x_1 - x_2| < \delta$, then $$\left|\frac{\phi(y)}{|x_1 - y|^{(n - 2)}} - \frac{\phi(y)}{|x_2 - y|^{n - 2}}\right| \le \frac{\left||x_2 - y|^{n - 2} - |x_1 - y|^{n - 2}\right|}{|x_1 - y|^{n - 2}|x_2 - y|^{n - 2}} $$ $$ \le (|x_2 - y| - |x_1 - y|)\slim_{i = 0}^{n - 3}{\frac{|x_2 - y|^{n - 3 - i}|x_1 - y|^i}{|x_1 - y|^{n - 2}|x_2 - y|^{n - 2}}} \le |x_2 - x_1|C(n) < \delta C(n).$$ Therefore we can apply Arzela-Ascoli theorem to the family $\{\frac{\phi(y)}{|x - y|^{n - 2}}\}_{x \in K}$ and find a finite collection of continuous function $\{g_k\}$ with support inside $B(0, 2R)$ such that for every point $x \in K$ there is an index $k$ with $|g_k(y) - \frac{\phi(y)}{|x - y|^{n - 2}}| \le \varepsilon R^{-d}$ for $y \in B(0, 2R) \cap E_i$. Then $$\ilim{\left|g_k(y) - \frac{\phi(y)}{|x - y|^{n - 2}}\right|d\mu_i(y)} + \ilim{\left|g_k(y) - \frac{\phi(y)}{|x - y|^{n - 2}}\right|d\mu_\infty(y)} \le C \varepsilon.$$ By the definition of a tangent measure $\ilim{g_k(y)d\mu_i(y)}$ converges to $\ilim{g_k(y)d\mu_\infty(y)}$, and we can finally estimate the modulus of the difference $|R_i(x) - R_\infty(x)|$ the following way: $$\left|\ilim_{E_i}{\frac{d\mu_i(y)}{|x - y|^{n - 2}}} - \ilim_{E_\infty}{\frac{d\mu_\infty(y)}{|x - y|^{n - 2}}}\right| \le 4\varepsilon + \left|\ilim_{E_i}{\frac{\phi(y)d\mu_i(y)}{|x - y|^{n - 2}}} - \ilim_{E_\infty}{\frac{\phi(y)d\mu_\infty(y)}{|x - y|^{n - 2}}}\right|$$ $$\le 4\varepsilon + \ilim_{E_i}{\left|g_k(y) - \frac{\phi(y)}{|x - y|^{n - 2}}\right|d\mu_i(y)} + \ilim_{E_\infty}{\left|g_k(y) - \frac{\phi(y)}{|x - y|^{n - 2}}\right|d\mu_\infty(y)}$$ $$+ \left|\ilim{g_k(y)d\mu_i(y)} - \ilim{g_k(y)d\mu_\infty(y)}\right| \le C\varepsilon.$$ \end{proof} We now know that, on the one hand, the limit of the function $R_i(x)$, as $i$ tends to infinity, coincides with the limit of the function $$u_f(x_i)\delta(x_i)^{n - d - 2}\dist(x, E_\infty)^{-(n - d - 2)}$$ as $x_i$ tends to $y_0$, if $x_i = r_i x + y_0$. On the other hand, $R_i(x)$ tends to $R_\infty(x)$ uniformly for $x$ in $\Gamma_{E_\infty, \eta}$ which stay away from $E_\infty$. This implies that for every $x$ such that $|x| = 1$, say, and $x \in \Gamma_{E_\infty, \eta}$ $$u_f(x_i)\delta(x_i)^{n - d - 2} \to R_\infty(x)\dist(x, E_\infty)^{n - d - 2}, \; i \to \infty, x_i = r_i x + y_0.$$ The product $R_\infty(x)\dist(x, E_\infty)^{n - d - 2}$ we will compute below, and it is a product of $C(n, d)f(y_0)$, where the constant $C(n, d)$ can be calculated explicitly in terms of $\Gamma$-functions and depends only on $d$ and $n$. This is exactly the right-hand side of $\eqref{asymp}$. It is left for us only to see why $$u_f(x_i)\delta(x_i)^{n - d - 2} \to R_\infty(x)\dist(x, E_\infty)^{n - d - 2}, \quad \mbox{as} \; x_i \to y_0$$ for any sequence $\{x_i\}$ in $\Gamma_{R, \eta}$, not just those of the type $x_i = r_ix + y_0$ for a fixed $x$. But this follows from the uniform convergence of $$R_i(x) \quad \mbox{to} \quad u_f(x_i)\left(\frac{\delta(x_i)}{\dist(x, E_\infty)}\right)^{n - d - 2}$$ and in Lemma \ref{unicomp}: we have $|u_f(x_i)\delta(x_i)^{n - d - 2} - C(n, d)f(y_0)| < \epsilon$ as soon as $|x_i - y_0|$ is small enough. ~\ To finish the proof we compute $R_\infty(x)\dist(x, E_\infty)^{n - d - 2}.$ By definition, $$R_\infty(x)\dist(x, E_\infty)^{n - d - 2} = \ilim_{E_\infty}{\frac{f(y_0)dy}{|x - y|^{n - 2}}}\dist(x, E_\infty)^{n - d - 2}.$$ Let $z$ be the point of $E_\infty$ such that $\dist(x, E_\infty) = \dist(x, z)$. Clearly $|x - y| = \sqrt{|x - z|^2 + r^2},$ where $r = \dist(z, y)$ for $y \in E_\infty$. Write $\delta_1(x)$ for $\dist(x, z)$. Then we have $$\ilim_{E_\infty}{\frac{dy}{|x - y|^{n - 2}}} = \ilim_0^\infty{\frac{r^{d - 1}dr}{(\delta_1(x)^2 + r^2)^{(n - 2)/2}}}$$ $$ = \delta_1(x)^{-n + d + 2} \ilim_0^\infty{\frac{\frac{r^{d - 1}}{\delta_1(x)^{d - 1}} d\left(\frac{r}{\delta_1(x)}\right)}{\left(1 + \left(\frac{r}{\delta_1(x)}\right)^2\right)^{(n - 2)/2}}} = \delta_1(x)^{-n + d + 2}\ilim_0^\infty{\frac{x^{d - 1}dx}{(1 + x^2)^{(n - 2)/2}}}.$$ Note that the integral $\ilim_0^\infty{\frac{x^{d - 1}dx}{(1 + x^2)^{(n - 2)/2}}}$ clearly converges and is equal to $c_1 = C(n, d) = \Vol(\Sf^{d - 1})\frac{1}{2}\frac{\Gamma\left(\frac{d}{2}\right)\Gamma\left(\frac{n - d - 2}{2}\right)}{\Gamma\left(\frac{n - 2}{2}\right)}$. Therefore $R_\infty(x)\dist(x, E_\infty)^{n - d - 2}$ is indeed equal to $C(n, d)f(y_0)$. \end{proof} \begin{cor}\label{cor1} Apart from the fact that the function $$u_f(x)\delta(x)^{n - d - 2} = \delta(x)^{n - d - 2}\ilim_{E}{\frac{f(y)dy}{|x - y|^{n - 2}}},$$ where $f \in L^\infty(E)$, has a non-tangential limit at $\sigma$-almost every $y_0 \in E$, which is equal to $c_1f(y_0)$, throughout the next sections we will also use similar statements, but for degrees in the denominator in the integral other than $(n - 2)$. More precisely, we claim that for $f \in L^\infty(E)$ and $\beta > 0$ the function $$\delta(x)^{\beta}\ilim_{E}{\frac{f(y)dy}{|x - y|^{d + \beta}}},$$ also has a non-tangential limit at almost every $y_0 \in E$ equal to $C(d, \beta)f(y_0)$, and the constant $C(d, \beta)$ depends only on $d$ and $\beta$. The proof is similar to the proof of $\eqref{asymp}$. \end{cor} \section{Three BMO lemmas} In this section we prove some more technical preliminaries concerning the space of functions of bounded mean oscillations (BMO) to simplify the exposition of Sections 5 and 6. The reader can skip to them directly and return to this section when needed. By $x$ we usually (that is, except for in Lemma $\ref{lmaverages}$) denote a point in $\R^n$ which lies away from the hyperplane $\R^d$ containing zero. By $\delta(x)$ we denote the distance between $x$ and the hyperplane $\R^d$. Recall that the John-Nirenberg inequality asserts that for every $1 \le p < \infty$ $$\sup_B{\frac{1}{|B|}\left(\ilim_B{|f(y) - m_Bf|^p dy}\right)^{1/p}} \asymp \|f\|_{BMO},$$ where by $m_Bf$ we denote the average $\frac{1}{|B|}\ilim_B{f(y)dy}$, and the supremum is taken over all balls in $\R^d$. \begin{lm}\label{lmaverages} Let $f$ be a function in $BMO(\R^d)$. Denote by $B(x, r)$ the ball with center $x$ and radius $r$. Then for every $r > r_0 > 0$ holds \begin{equation}\label{averages} |m_{B(x, r)}f - m_{B(x_0, r_0)}f| \le C\|f\|_{BMO}\left(\ln{\frac{r}{r_0}} + \ln{\frac{|x - x_0|}{r_0}}\right). \end{equation} \end{lm} \begin{proof} We will need two simple facts. First, let $B$ and $B'$ be balls such that $B' \subset B$. Then \begin{equation}\label{av1} |m_Bf - m_{B'}f| \le \frac{|B|}{|B'|}\|f\|_{BMO}. \end{equation} Indeed, $$|m_Bf - m_{B'}f| = \Big|\frac{1}{|B'|}\ilim_{B'}{f(y)dy} - m_{B}f\Big| \le \frac{1}{|B'|}\ilim_{B'}{|f(y) - m_{B}f|}$$ $$ \le \frac{|B|}{|B'|}\frac{1}{|B|}\ilim_B{|f(y) - m_{B}f|} \le \frac{|B|}{|B'|}\|f\|_{BMO}.$$ Second, if the two centers $x$ and $x'$ are such that $|x - x'| \le 2r$, then \begin{equation}\label{av2} |m_{B(x, r)}f - m_{B(x', r)}f| \le C\|f\|_{BMO}. \end{equation} This follows from $\eqref{av1}$: we can find a ball $B$ of radius $3r$ such that $B(x, r) \subset B$ and $B(x', r) \subset B$. Then $$|m_{B(x, r)}f - m_{B(x', r)}f| \le |m_{B(x, r)}f - m_Bf| + |m_Bf - m_{B(x', r)}f| \le C 3^d\|f\|_{BMO}.$$ We are now ready to prove $\eqref{averages}$. $$|m_{B(x, r)}f - m_{B(x_0, r_0)}f| \le |m_{B(x, r)}f - m_{B(x, r_0)}f| + |m_{B(x, r_0)}f - m_{B(x_0, r_0)}f|.$$ Inequality $\eqref{av1}$ will help us to estimate the first term. Build a chain of nested balls $B_1 = B(x, r_0) \subset B_2 \subset \dots \subseteq B_N = B(x, r)$ with center $x$ and radii $r_i$ such that $r_i = 2r_{i - 1}$ for $i \le N - 1$. Then it is clear that $N \le C\ln{\frac{r}{r_0}}$. For each $1 < i \le N$ we have $|m_{B_{i - 1}}f - m_{B_i}f| \le C\|f\|_{BMO}$. Therefore $$|m_{B(x, r)}f - m_{B(x, r_0)}f| \le \slim_{i = 2}^N{|m_{B_i}f - m_{B_{i - 1}}f|} \le C\|f\|_{BMO}\ln{\frac{r}{r_0}}.$$ Inequality $\eqref{av2}$ will help us to estimate the second term $|m_{B(x, r_0)}f - m_{B(x_0, r_0)}f|$. Build a chain of balls $B_1 = B(x, r_0), B_2, \dots, B_N = B(x_0, r_0)$ of common radii $r_0$ and centers $x_i$ such that for $i \le N - 1$ we have $|x_{i - 1} - x_i| = 2r_0$. Clearly $N \le C \ln{\frac{|x - x_0|}{r_0}}$. By $\eqref{av2}$, $$|m_{B(x, r_0)}f - m_{B(x_0, r_0)}f| \le \slim_{i = 2}^N{|m_{B_i}f - m_{B_{i - 1}}f|} \le C\|f\|_{BMO}\ln{\frac{|x - x_0|}{r_0}}.$$ This completes the proof of $\eqref{averages}$. \end{proof} \begin{lm} Let $f$ be a function in $BMO (\R^d)$ and $x$ be a point in $\R^n \setminus \R^d$. Then for any integer $m \ge 1$ \begin{equation}\label{f_bmo3} \ilim_{\R^d}{\frac{|f(y)|^m dy}{|x - y|^{d + \beta}}} \le C(1 + \delta(x)^{-(d + \beta)}) \left(\|f\|_{BMO}^m + |m_{B(x_0, 1)}f|^m\right), \end{equation} where by $x_0$ we denote the projection of $x$ to the hyperplane $\R^d$ ($x_0 = (x_1, \dots , x_d)$), and the constant $C$ depends only on $d, m$ and $\beta > 0$. \end{lm} \begin{proof} First, observe that $$\ilim_{\R^d}{\frac{|f(y)|^m dy}{|x - y|^{d + \beta}}} \le C(m) \ilim_{\R^d}{\frac{|f(y) - m_{B(x_0, 1)}f|^m dy}{|x - y|^{d + \beta}}} + C(m) \ilim_{\R^d}{\frac{|m_{B(x_0, 1)}f|^m dy}{|x - y|^{d + \beta}}}.$$ We use a computation very similar to the one we saw at the end of the previous section to calculate $\ilim_{\R^d}{|x - y|^{-(d + \beta)}dy}$ and get \begin{equation}\label{lm6_1} \ilim_{\R^d}{\frac{|m_{B(x_0, 1)}f|^m dy}{|x - y|^{d + \beta}}} \le C\delta(x)^{- \beta}|m_{B(x_0, 1)}f|^m \le C(1 + \delta(x)^{-(d + \beta)})|m_{B(x_0, 1)}f|^m. \end{equation} The second inequality here is true because $\delta(x)^{-\beta} \le \delta(x)^{-(d + \beta)}$ if $\delta(x) \le 1$, and otherwise both quantites are dominated by a constant. This means that we can assume $m_{B(x_0, 1)f} = 0$. Without loss of generality, we can also assume that the fist $d$ and the last $n - d - 1$ coordinates of $x$ are zero: $x = (0, \dots, 0, \delta(x), 0, \dots)$. We split the space $\R^d$ into $B(1, 0)$ and the union of rings $B(0, 2^k) \setminus B(0, 2^{k - 1})$ and estimate separately integrals of $|f(y)|^m/|x - y|^{d + \beta}$ over those sets. For the first term we have $$\ilim_{B(0, 1)}{\frac{|f(y)|^m dy}{|x - y|^{d + \beta}}} = \ilim_{B(0, 1)}{\frac{|f(y) - m_{B(0, 1)f}|^m dy}{|x - y|^{d + \beta}}} $$ \begin{equation}\label{lm6_2} \le C\delta(x)^{- (d + \beta)}\frac{1}{|B(0, 1)|}\ilim_{B(0, 1)}{|f(y) - m_{B(0, 1)f}|^mdy} \le C\delta(x)^{- (d + \beta)}\|f\|_{BMO}^m \end{equation} by the John-Nirenberg inequality. Then we estimate the integrals over the rings; for $B = B(0, 2^{k - 1})$ and $2B = B(0, 2^k)$ we have $$\ilim_{2B \setminus B}{\frac{|f(y)|^m dy}{|x - y|^{d + \beta}}} \le C(m)\ilim_{2B \setminus B}{\frac{|f(y) - m_{2B}f|^m dy}{|x - y|^{d + \beta}}} + C(m)\ilim_{2B \setminus B}{\frac{|m_{2B}f|^m dy}{|x - y|^{d + \beta}}}.$$ Since for $y$ in $2B \setminus B$ the quantity $|x - y|^2$ is comparable to $\delta(x)^2 + 2^{2k}$, $|x - y|^{-(d + \beta)}$ is dominated by $C(\delta(x) + 2^k)^{-(d + \beta)}$. For the first term this gives nt_{2B}{|f(y) - m_{2B}f|^m dy} $$ $$ \le C(\delta(x) + 2^k)^{- (d + \beta)}2^{kd}\|f\|_{BMO}^m,$$ again by John-Nirenberg. For the second term we have $$\ilim_{2B \setminus B}{\frac{|m_{2B}f|^m dy}{|x - y|^{d + \beta}}} = \ilim_{2B \setminus B}{\frac{|m_{2B}f - m_{B(0, 1)}f|^m dy}{|x - y|^{d + \beta}}} \le C 2^{kd} (\delta(x) + 2^k)^{- (d + \beta)}|m_{2B}f - m_{B(0, 1)}f|^m$$ $$\le 2^{kd} (\delta(x) + 2^k)^{- (d + \beta)} (\ln{2^k})^m \|f\|_{BMO}^m \le C2^{kd} (\delta(x) + 2^k)^{- (d + \beta)} k^m \|f\|_{BMO}^m$$ by $\eqref{averages}$. Therefore $$\ilim_{2B \setminus B}{\frac{|f(y)|^m dy}{|x - y|^{d + \beta}}} \le C 2^{kd} (\delta(x) + 2^k)^{- (d + \beta)} (1 + k^m)\|f\|_{BMO}^m.$$ Obsesrve that the sum $\slim_{k = 1}^\infty {2^{kd} (\delta(x) + 2^k)^{- (d + \beta)} (1 + k^m)}$ is finite and dominated by a universal constant $C$, since $(\delta(x) + 2^k)^{-1} \le 2^{-k}$. Then we sum over all the rings and get \begin{equation}\label{lm6_3} \ilim_{\R^d \setminus B(0, 1)}{\frac{|f(y)|^m dy}{|x - y|^{d + \beta}}} = \slim_{k = 1}^\infty {\ilim_{B(0, 2^k) \setminus B(0, 2^{k - 1})}{\frac{|f(y)|^m dy}{|x - y|^{d + \beta}}}} \le C\|f\|_{BMO}^m. \end{equation} Collecting $\eqref{lm6_1}$, $\eqref{lm6_2}$ and $\eqref{lm6_3}$, we obtain the final estimate $\eqref{f_bmo3}$. \end{proof} \begin{cor} For the same notation as in $\eqref{f_bmo3}$, the estimate \begin{equation}\label{f_bmo4} \ilim_{\R^d}{\frac{|f(y)|^m dy}{|x - y|^{d + \beta}}} \le C (1 + \delta(x)^{-(d + \beta)})(1 + \ln{|x_0|})^m\left(\|f\|_{BMO}^m + |m_{B(0, 1)}f|^m\right) \end{equation} holds. \end{cor} \begin{proof} This follows from $\eqref{f_bmo3}$ and Lemma $\ref{lmaverages}$. \end{proof} \begin{lm} Let $f_i, i = 1, \dots, m$ be functions in $BMO(\R^d)$. With the same notation as in $\eqref{f_bmo3}$, we have \begin{equation}\label{Ho} \ilim_{\R^d}{\frac{|f_1(y) \dots f_m(y)|dy}{|x - y|^{d + \beta}}} \le C( 1 + \delta(x)^{-(d + \beta)})(1 + \ln{|x_0|})^m \prod_{i = 1}^m{(\|f_i\|_{BMO}^m + |m_{B(0, 1)}f_i|^m)^{1/m}} \end{equation} \end{lm} \begin{proof} This is, essentially, the H\"older inequality applied consequentially $m$ times. We give a proof by induction for the sake of completeness. First, we want to prove that \begin{equation}\label{aux_Ho} \ilim_{\R^d}{\frac{|f_1(y) \dots f_m(y)|dy}{|x - y|^{d + \beta}}} \le \prod_{i = 1}^m{\left(\ilim_{\R^d}{\frac{|f_i(y)|^m dy}{|x - y|^{d + \beta}}}\right)^{1/m}}. \end{equation} For the base we apply H\"older with exponents $p_1 = m$ and $q_1 = \frac{m - 1}{m}$ and functions $|f_1(y)||x - y|^{-(d + \beta)\frac{1}{m}}$ and $|f_2(y) \dots f_m(y)||x - y|^{-(d + \beta)\frac{m - 1}{m}}$. It gives $$\ilim_{\R^d}{\frac{|f_1(y) \dots f_m(y)|dy}{|x - y|^{d + \beta}}} \le \left(\ilim_{\R^d}{\frac{|f_1(y)|^m dy}{|x - y|^{d + \beta}}}\right)^{1/m}\left(\ilim_{\R^d}{\frac{|f_2(y) \dots f_m(y)|^\frac{m}{m - 1} dy}{|x - y|^{d + \beta}}}\right)^{(m - 1)/m}.$$ By the induction hypothesis, $$\ilim_{\R^d}{\frac{|f_1(y) \dots f_m(y)|dy}{|x - y|^{d + \beta}}} \le \prod_{i = 1}^j{\left(\ilim_{\R^d}{\frac{|f_i(y)|^m dy}{|x - y|^{d + \beta}}}\right)^{1/m}}\left(\ilim_{\R^d}{\frac{|f_{j + 1}(y) \dots f_m(y)|^\frac{m}{m - j} dy}{|x - y|^{d + \beta}}}\right)^{(m - j)/m}.$$ If $j = m - 1$, we are done, otherwise we apply H\"older once again to $$\ilim_{\R^d}{\frac{|f_{j + 1}(y) \dots f_m(y)|^\frac{m}{m - j} dy}{|x - y|^{d + \beta}}}$$ with $p_{j + 1} = m - j$, $q_{j + 1} = \frac{m - (j + 1)}{m - j}$ and functions $$|f_{j + 1}(y)|^\frac{m}{m - j}|x - y|^{-(d + \beta)\frac{1}{p_{j + 1}}}, \quad |f_{j + 2}(y) \dots f_m(y)|^\frac{m}{m - j}|x - y|^{-(d + \beta)\frac{1}{q_{j + 1}}}.$$ We will get $$\ilim_{\R^d}{\frac{|f_1(y) \dots f_m(y)|dy}{|x - y|^{d + \beta}}} \le \prod_{i = 1}^{j + 1}{\left(\ilim_{\R^d}{\frac{|f_i(y)|^m dy}{|x - y|^{d + \beta}}}\right)^{1/m}}\left(\ilim_{\R^d}{\frac{|f_{j + 2}(y) \dots f_m(y)|^\frac{m}{m - (j + 1)} dy}{|x - y|^{d + \beta}}}\right)^{(m - (j + 1))/m},$$ which completes the induction step. The inequality $\eqref{Ho}$ follows from $\eqref{f_bmo4}$ and $\eqref{aux_Ho}$. \end{proof} \section{No one-parameter families of solutions} We can now turn to the study of solutions of the equation $\eqref{solution}$ for rectifiable sets $E$ with integer dimension $d$, using our preliminaries from sections 2 and 3. Assume the function $D_{\alpha, \mu}$ as in $\eqref{mu_dist}$ is a solution of $\eqref{solution}$. Then we know that, see the beginning of section 2, for $\gamma = - n + d + 2$, $D_{\alpha, \mu}^\gamma$ is equivalent to $\delta(x)^{-n + d + 2}$ and is harmonic outside $E$. Therefore, according to subsection 2.2, there exists a density function $h$ in $L^\infty(E)$ such that \begin{equation}\label{functions} \ilim_{E}{\frac{h(y)d\sigma(y)}{|x - y|^{n - 2}}} = \left(\ilim_{E}{\frac{f(y)d\sigma(y)}{|x - y|^{d + \alpha}}}\right)^{(n - d - 2)/\alpha} \quad \forall \; x \in \R^n \setminus E, \end{equation} where $f \in L^\infty(E)$ is just the density of $\mu$ with respect to $\sigma = \Hd^d|_E$. Multiplying both sides of $\eqref{functions}$ by $\delta(x)^{n - d - 2}$ and using $\eqref{asymp}$ in Theorem 5 and Corollary \ref{cor1} to pass to non-tangential limits, we get that for almost every $y \in E$ \begin{equation}\label{dens_relation} c_1h(y) = (c_2 f(y))^{(n - d - 2)/\alpha}, \end{equation} where the constants $c_i$ depend only on $n, d$ and $\alpha$. The constant $c_1$ is the constant $C(n, d)$ in $\eqref{asymp}$, which we computed at the end of the proof of theorem 5, and the constant $c_2$ is equal to $\delta(x)^{-\alpha}\ilim_{\R^d}{}\frac{dy}{|x - y|^{d + \alpha}} = \Vol(\Sf^{d - 1})\frac{1}{2}\frac{\Gamma\left(\frac{d}{2}\right)\Gamma\left(\frac{\alpha}{2}\right)}{\Gamma\left(\frac{d + \alpha}{2}\right)}$. Thus, we get an equation for the density $f$ of the measure $\mu$, when $D_{\alpha, \mu}$ satisfies $\eqref{solution}$, but we prefer to write everything in terms of the density function $h$ alone. So, combining $\eqref{functions}$ and $\eqref{dens_relation}$, we get that $D_{\alpha, \mu}$ being a solution of $\eqref{solution}$ is equivalent to the equation \begin{equation}\label{densities} \ilim_{E}{\frac{h(y)dy}{|x - y|^{n - 2}}} = c_3 \left(\ilim_{E}{\frac{h(y)^\frac{\alpha}{n - d - 2}dy}{|x - y|^{d + \alpha}}}\right)^\frac{n - d - 2}{\alpha}, \quad \forall \; x \in \R^n \setminus E, \end{equation} where we denote by $c_3$ the constant $c_2^{-\frac{n - d - 2}{\alpha}} c_1$. Note that the density function $h$ is also bounded away from zero, since according to Lemma $\ref{mes2}$ the density function $f$ has this property, and the two functions are connected by $\eqref{dens_relation}$. ~\ It is easy to check that for $E = \R^d$ or any other hyperplane a constant function $h$ gives a solution for $\eqref{densities}$. This is what we call the flat solution (because the measure $c\Hd^d$ on $\R^d$ is flat). As we have said in the introduction, we would be happy to show that no other solutions exist in a small neighbourhood of the flat solution. This is the same as saying that there is no family of solutions with elements arbitrarily close to the flat solution. Our strategy is to linearise $\eqref{densities}$, or to take a derivative in some sense at constant function $h$ and $E = \R^d$: for an arbitrary family of solutions it would be the Frechet derivative. It turns out though, it does not work for arbitrary family of solutions with no regularity. Even when we restrict ourselves to the flat case $E = \R^d$ and are trying to figure out if there is a neighbourhood of the flat solution for which no densities $h$ solve $\eqref{densities}$. The reason for it is, morally, the following. If we suppose the contrary, we can find arbitrarily close to $h = 1$ (or to any other constant) a density $1 + \phi_t$ for which $$\ilim_{\R^d}{\frac{(1 + \phi_t)dy}{|x - y|^{n - 2}}} = c_3 \left(\ilim_{\R^d}{\frac{(1 + \phi_t)^\frac{\alpha}{n - d - 2}dy}{|x - y|^{d + \alpha}}}\right)^\frac{n - d - 2}{\alpha}, \quad \forall \; x \in \R^n \setminus \R^d,$$ where the parameter $t$ denotes, vaguely, the size of the neighbourhood to which $\phi_t$ belongs. Then we could use the Banach-Alaoglu theorem in a suitable functional space as a compactness argument to say that the family of functions $\{\phi_t\}$ normalized correctly has a non-constant weak limit. This weak limit would satisfy a linear convolution equation, corresponding to the fact that the Fr\'echet derivative of $\eqref{densities}$ is equal to zero ``at the constant solution''. This equation, as we will see later, has no solutions except for constants. Which would finish the proof by contradiction. However, the right space for the functions $\phi_t$ for the compactness argument to work turns out to be the homogeneous Besov space $\dot B^0_{\infty, \infty}$. For this space it seems that the Fr\'echet derivative does not exist. That is, we have a natural space for the compactness argument ($\dot B^0_{\infty, \infty}$), and a smaller one (for instance, BMO) for the existence of a derivative. See subsection $5.3$ for the further comments about this. Fortunately, our strategy still works if we introduce a parameter $t$ and add the assumption of differentiability at zero of the family $\{\phi_t\}$ in $BMO$ with respect to it and a reasonable boundedness assumption. This is exactly what the assumptions 2 and 4 in Definitions $\ref{defn1}$ and $\ref{defn2}$ are about. We are going to implement our strategy with the indicated assumptions in the next subsections. \subsection{No one-parameter families of flat solutions} In this subsection we prove Theorem $\ref{mainthm_flat}$. Suppose that there is a non-trivial one-parameter family of perturbations in the plane $\R^d$ of the flat solution of $\eqref{densities}$ with the density $h = 1$. See Definition \ref{defn1} to recall what it is. Note that, even though the densities $1 + \phi_t$ are in $L^\infty(\R^d)$ and therefore $\phi_t$ is in the space $L^\infty \cap BMO (\R^d) = L^\infty(\R^d)$, it is still more natural to consider $\phi_t$ as an element of $BMO (\R^d)$ (or as an equivalence class in $L^\infty(\R^d)/\sim$, where $f \sim g$ if $f - g$ is a constant function), because $1 + \phi_t(y) + c$ is a density function of a perturbation of the flat solution with the density $h_c = 1 + c$. We can assume that $\|\phi_t\|_\infty < 1/2$. For every $t$ the equation $\eqref{densities}$ for the solution from our family $D_{\alpha, \mu_t}$ with the density $1 + \phi_t(y)$ turns into \begin{equation}\label{take_diff} I(t, x) := \ilim_{\R^d}{\frac{(1 + \phi_t)dy}{|x - y|^{n - 2}}} - c_3\left(\ilim_{\R^d}{\frac{(1 + \phi_t)^\frac{\alpha}{n - d - 2}dy}{|x - y|^{d + \alpha}}}\right)^\frac{n - d - 2}{\alpha} = 0, \quad \forall \; x \in \R^n \setminus \R^d. \end{equation} We want to differentiate the left-hand side of $\eqref{take_diff}$ at $t = 0$. The natural candidate for the derivative at zero would be, of course, $$ \frac{\partial I(t, x)}{\partial t}(0) = \ilim_{\R^d}{\frac{\frac{\phi_t}{\partial t}(0, y)dy}{|x - y|^{n - 2}}} - c_3\left(\ilim_{\R^d}{\frac{(1 + \phi_{0})^\frac{\alpha}{n - d - 2}dy}{|x - y|^{d + \alpha}}}\right)^{\frac{n - d - 2}{\alpha} - 1}\ilim_{\R^d}{\frac{(1 + \phi_{0})^{\frac{\alpha}{n - d - 2} - 1}\frac{\partial \phi_t}{\partial t}(0, y)dy}{|x - y|^{d + \alpha}}}, $$ so, by the definition of $c_2$, and since $\phi_0 = 0$, \begin{equation}\label{t'} \frac{\partial I(t, x)}{\partial t}(0) = \ilim_{\R^d}{\frac{\frac{\phi_t}{\partial t}(y, 0)dy}{|x - y|^{n - 2}}} - c_1c_2^{-1}\delta(x)^{d + 2 + \alpha - n}\ilim_{\R^d}{\frac{\frac{\partial \phi_t}{\partial t}(y, 0)dy}{|x - y|^{d + \alpha}}}. \end{equation} Note that here we use $\frac{\partial I(t, x)}{\partial t}(0)$ just as a notation. To show that $\eqref{t'}$ is indeed the Frechet derivative of $I(t, x)$ we prove the following lemma. This is when our assumptions 2 and 4 from Definition 1 of the one-parameter family of perturbations come into play. \begin{lm} For every $x$ in $\R^n \setminus \R^d$ $$\left|I(t, x) - I(0, x) - t\frac{\partial I(t, x)}{\partial t}(0)\right| = \left|I(t, x) - t\frac{\partial I(t, x)}{\partial t}(0)\right| = o(t), \quad t \to 0.$$ \end{lm} \begin{proof} We need to prove that $$\left|I(t, x) - t\frac{\partial I(t, x)}{\partial t}(0)\right| = \bigg| \ilim_{\R^d}{\frac{(1 + \phi_t)dy}{|x - y|^{n - 2}}} - t\ilim_{\R^d}{\frac{\frac{\phi_t}{\partial t}(y, 0)dy}{|x - y|^{n - 2}}} + \bigg. $$ \begin{equation}\label{F} \bigg. + c_1c_2^{-1}\delta(x)^{d + 2 + \alpha - n}t\ilim_{\R^d}{\frac{\frac{\partial \phi_t}{\partial t}(y, 0)dy}{|x - y|^{d + \alpha}}} - c_3\left(\ilim_{\R^d}{\frac{(1 + \phi_t)^\frac{\alpha}{n - d - 2}dy}{|x - y|^{d + \alpha}}}\right)^\frac{n - d - 2}{\alpha} \bigg| = o(t), \quad t \to 0. \end{equation} Denote $$I_1(t, x) = \ilim_{\R^d}{\frac{(1 + \phi_t)dy}{|x - y|^{n - 2}}} - t\ilim_{\R^d}{\frac{\frac{\partial \phi_t}{\partial t}(y, 0)dy}{|x - y|^{n - 2}}}, \quad I_2(t, x) = \left(\ilim_{\R^d}{\frac{(1 + \phi_t)^\frac{\alpha}{n - d - 2}dy}{|x - y|^{d + \alpha}}}\right)^\frac{n - d - 2}{\alpha}, \quad \mbox{and}$$ $$I_3(t, x) = c_1c_2^{-1}\delta(x)^{d + 2 + \alpha - n}t\ilim_{\R^d}{\frac{\frac{\partial \phi_t}{\partial t}(y, 0)dy}{|x - y|^{d + \alpha}}} - c_3\left(\ilim_{\R^d}{\frac{(1 + \phi_t)^\frac{\alpha}{n - d - 2}dy}{|x - y|^{d + \alpha}}}\right)^\frac{n - d - 2}{\alpha}.$$ \begin{cl} \begin{equation}\label{I_2} I_2(t, x) = c_2^\frac{n - d - 2}{\alpha}\delta(x)^{d + 2 - n} + c_2^{\frac{n - d - 2}{\alpha} - 1}\delta(x)^{d + 2 + \alpha - n}\ilim_{\R^d}{\frac{\phi_t(y)dy}{|x - y|^{d + \alpha}}} + o(t), \; t \to 0. \end{equation} \end{cl} \begin{proof} This is, essentially, a simple computation which uses Taylor's theorem. Recall that $\|\phi_t\|_\infty \le 1/2 \; \forall t$. First, we rewrite the nominator inside the integral in $I_3$ as $$(1 + \phi_t(y))^\frac{\alpha}{n - d - 2} = 1 + \frac{\alpha}{n - d - 2}\phi_t(y) + \phi_t^2(y)g_t(y),$$ where $g_t(y)$ is a bounded uniformly in $t$ function. Here we just used an observation that for a real number $x$ such that $|x| \le 1/2$ we have $(1 + x)^\gamma = 1 + \gamma x + c x^2$, where $c$ is a constant uniformly bounded in $x$. Second, we rewrite $I_3$ itself as $$\left(\ilim_{\R^d}{\frac{dy}{|x - y|^{d + \alpha}}} + \ilim_{\R^d}{\frac{\frac{\alpha}{n - d - 2}\phi_t(y) + \phi_t(y)^2g_t(y)}{|x - y|^{d + \alpha}}}\right)^\frac{n - d - 2}{\alpha}$$ $$ = c_2^\frac{n - d - 2}{\alpha}\delta(x)^{d + 2 - n} + c_2^{\frac{n - d - 2}{\alpha} - 1}\delta(x)^{d + 2 + \alpha - n}\ilim_{\R^d}{\frac{\phi_t(y)dy}{|x - y|^{d + \alpha}}} + \ilim_{\R^d}{\frac{\phi_t(y)^2 G_t(y)dy}{|x - y|^{d + \alpha}}},$$ where $G_t(y)$ is also a bounded uniformly in $t$ function. It is left to show that the last term is $o(t)$. But $\eqref{f_bmo4}$ applied with $m = 2$, combined with boundedness of $G_t(y)$, gives that $$\ilim_{\R^d}{\frac{\phi_t(y)^2 G_t(y)dy}{|x - y|^{d + \alpha}}} \le C(1 + \delta(x)^{- (d + \alpha)})(\|\phi_t\|_{BMO}^2 + |m_{B(0, 1)}\phi_t|^2).$$ Condition 4 in Definition $\ref{defn1}$ guarantees that $\|\phi_t\|_{BMO}^2 + |m_{B(0, 1)}\phi_t|^2 = o(t)$. \end{proof} Claim $\eqref{I_2}$ gives that $$I_3(t, x) = -c_1\delta(x)^{d + 2 - n} + c_1c_2^{-1}\delta(x)^{d + 2 + \alpha - n}\ilim_{\R^d}{\frac{\left(t\frac{\partial \phi_t}{\partial t}(0, y) - \phi_t(y)\right)dy}{|x - y|^{d + \alpha}}} + o(t).$$ We also have that $$I_1(t, x) = c_1\delta(x)^{d + 2 - n} + \ilim_{\R^d}{\frac{\left(\phi_t(y) - t\frac{\partial \phi_t}{\partial t}(0, y)\right)dy}{|x - y|^{n - 2}}}.$$ Conditions 2 (the definition of a Frechet differential) and 4 in Definition $\ref{defn1}$, combined with $\eqref{f_bmo4}$ for $m = 1$, give that $$c_1c_2^{-1}\ilim_{\R^d}{\frac{\left(t\frac{\partial \phi_t}{\partial t}(0, y) - \phi_t(y)\right)dy}{|x - y|^{d + \alpha}}} \; \mbox{and} \; \ilim_{\R^d}{\frac{\left(\phi_t(y) - t\frac{\partial \phi_t}{\partial t}(0, y)\right)dy}{|x - y|^{n - 2}}} = o(t),$$ and therefore $$\left|I(t, x) - t \frac{\partial I(t, x)}{\partial t}(0)\right| = \left|I_1(t, x) + I_3(t, x)\right| = o(t).$$ \end{proof} So we finally proved that $\eqref{take_diff}$ is differentiable at zero, and by $\eqref{t'}$ the derivative is $$ \frac{\partial I(t, x)}{\partial t}(0) = \ilim_{\R^d}{\frac{\frac{\partial \phi_t}{\partial t}(0, y)dy}{|x - y|^{n - 2}}} - c_1c_2^{-1}\delta(x)^{d = 2 + \alpha - n}\ilim_{\R^d}{\frac{\frac{\partial \phi_t}{\partial t}(0, y)dy}{|x - y|^{d + \alpha}}}. $$ Since $I(t, x)$ is identically zero, the derivative also has to be zero, and therefore, by definition of the constant $c_2$, we have the equation \begin{equation}\label{flat} \ilim_{\R^d}{\left[c_2^{-1}c_1\frac{\delta(x)^{d + 2 + \alpha - n}}{|x - y|^{d + \alpha}} - \frac{1}{|x - y|^{n - 2}}\right]\frac{\partial \phi_t}{\partial t}(0, y)dy} = 0 \quad \forall \, x \in \R^n \setminus \R^d. \end{equation} We claim that $\eqref{flat}$ implies the derivative $\frac{\partial \phi_t}{\partial t}(\cdot, 0)$ has to be a constant function, which is a contradiction with the assumption 3 in Definition $\ref{defn1}$ of non-trivial one-parameter family. Denote $\phi(y) = \frac{\partial \phi_t}{\partial t}(y, 0)$ for simplicity. \begin{lm}\label{flat3_lm} Suppose that $\phi(y)$ is a function in $BMO(\R^d)$. Then the equation \begin{equation}\label{flat2} \ilim_{\R^d}{\left[c_2^{-1}c_1\frac{\delta(x)^{d + 2 + \alpha - n}}{|x - y|^{d + \alpha}} - \frac{1}{|x - y|^{n - 2}}\right]\phi(y)dy} = 0 \quad \forall \, x \in \R^n \setminus \R^d. \end{equation} implies that $\phi$ is a constant function. \end{lm} \begin{proof} The idea of the proof is that we can interpret the equation $\eqref{flat2}$ as vanishing of the functional $\phi$ on a certain space of functions. Denote $$Ker(y) = \frac{c_2^{-1}c_1}{(1 + |y|^2)^{(d + \alpha)/2}} - \frac{1}{(1 + |y|^2)^{(n - 2)/2}}, \; y \in \R^d.$$ Note that $\ilim_{\R^d}{Ker(y)dy} = 0$. Consider the (closed) functional space $\mathcal{S}$ generated by linear combinations with coefficients ${c_i}$ of $L^1$-normalized translations and dilatations of the function $Ker$ with the norm $\|\cdot\| = \inf{\slim_i{|c_i|}}$, where $\inf$ is taken over all representations of an element of $\mathcal{S}$. If $\phi$ was an element of the dual space of $\mathcal{S}$, then $\eqref{flat2}$ would mean that it acts like a zero functional on the space $\mathcal{S}$. Indeed, let $x = (x_0, r, 0 \dots 0)$ be a point in $\R^n \setminus \R^d$, where $x_0$ is the projection of $x$ to the hyperplane $\R^d$ and $r = \delta(x)$. Observe that $$c_2^{-1}c_1\frac{\delta(x)^{d + 2 + \alpha - n}}{|x - y|^{d + \alpha}} - \frac{1}{|x - y|^{n - 2}} = r^{2 - n}\left(\frac{c_2^{-1}c_1}{(1 + |y - x_0|^2/r^2)^{(d + \alpha)/2}} - \frac{1}{(1 + |y - x_0|^2/r^2)^{(n - 2)/2}}\right)$$ $$ =r^{2 - n} Ker\left(\frac{y - x_0}{r}\right).$$ So, $\eqref{flat2}$ implies that $\phi$ vanishes on all the translations and dilatations of the function $Ker$, and therefore on the whole $\mathcal{S}$. Therefore we only need to check that $BMO$ is contained in the space dual to the space $\mathcal{S}$. It is known that the latter space is the homogeneous Besov space $\dot B^0_{1, 1}$, see $\cite{YM}$. Its dual space is the homogeneous Besov space $\dot B^0_{\infty, \infty}$, which is larger than $BMO$. \end{proof} \begin{rem}\label{fourier1} Note that, if we knew in addition that the Fourier transform of $\phi$ is a well-defined function, then we could apply another, a rather elegant, argument to get the conclusion of Lemma $\ref{flat3_lm}$. Indeed, denote $x = (z, h)$, $z = (z_1, \dots, z_d)$, $h = (h_{d + 1}, \dots, h_n)$ and $$f_h(z - y) = c_2^{-1}c_1\frac{\delta(x)^{d + 2 + \alpha - n}}{|x - y|^{d + \alpha}} - \frac{1}{|x - y|^{n - 2}} = \frac{c_2^{-1}c_1|h|^{d + \alpha + 2 - n}}{(|h|^2 + |z - y|^2)^{(d + \alpha)/2}} - \frac{1}{(|h|^2 + |z - y|^2)^{(n - 2)/2}}.$$ With this notation $\eqref{flat2}$ transforms into a convolutional equation $$f_h * \phi(z) = 0 \quad \forall z \in \R^d.$$ If the Fourier transform is defined for $\phi$, then the convolutional equation above transforms into $$\widehat{f_h * \phi} = \hat{f_h}\hat{\phi} = 0.$$ This implies that $\hat{\phi}$ can be nonzero only on the set of the common zeros of functions $\hat{f_h}$ for every $h$ (or, rather, every value of $|h|$). What can this set be? First, observe that $\zeta = 0$ is a zero of $\hat{f_h}$ for every $h$. Second, since $\hat{f_h}$ is radially-symmetric, the set of zeros of $\hat{f_{h_1}}$ and of $\hat{f_{h_2}}$ for $h_1$ and $h_2$ such that $|h_1| = |h_2|$ is the same. Third, suppose that $\zeta$ is a zero for $\hat{f_1}$, then, since $f_h(y) = f_1(y/|h|)$ and $\hat{f_h}(\zeta) = |h|\hat{f_1}(|h|\zeta)$, by the definition of $f_h$, $|h|\zeta$ is a zero for $\hat{f_h}$. The latter observation means that if $\zeta \neq 0$ was a common zero for $\hat{f_h}$ for all $h$, then the set $\R \zeta$ would be also contained in the set of common zeros for $\hat{f_h}$. Thus $\hat{f_h}$ would have to be zero everywhere on $\R^d$ for every $h$, which is clearly false. We conclude then that $\hat{\phi} = 0$ everywhere on $\R^d$ except for zero. This implies $\hat{\phi} = c\delta_0$ and $\phi$ equal to a constant. \end{rem} \subsection{No one-parameter families of Lipschitz graph solutions} In this subsection we prove Theorem $\ref{mainthm_graph}$. Recall what a non-trivial one-parameter differentiable family of smooth graph perturbations of the flat solution is: see Definition $\ref{defn2}$. Our scheme of proof will be essentially the same as in Subsection 4.1, but the expressions we are forced to work with are more bulky. Our initial assumptions are that the functions $\psi_t$ are $Ct$-Lipschitz on $\R^d$. However, it is well-known that if a function $f$ is $\Lambda$-Lipschitz, then it is in $W^{1, \infty}(\R^d)$ and one has $\|\nabla f\|_\infty \le \Lambda$. The Rademacher-Calder\'on theorem gives then that the function $f$ is truly differentiable almost everywhere. So without loss of generality we can assume that every $\psi_t$ is differentiable and moreover that $\|D \psi_t\|_\infty \le Ct$. The latter implies the conditions \begin{equation}\label{graph_cond} \|\frac{\partial \psi_t(y)}{\partial y_i}\|_{BMO} \le Ct \quad \mbox{and} \quad \Big|\ilim_{B(0, 1)}{\frac{\partial \psi_t(y)}{\partial y_i}dy}\Big| \le Ct \end{equation} for all $i$, which we will use later on. Recall that $\eqref{graph_cond}$ means that norms of every component of vectors $\frac{\partial \psi_t(y)}{\partial y_i}$ or moduli of every component of vectors $\ilim_{B(0, 1)}{\frac{\partial \psi_t(y)}{\partial y_i}dy}$ admit the estimate $\eqref{graph_cond}$. Denote by $\eta_t$ the map $\Id + \psi_t$. According to the formula for the Lebesgue measure on a smooth surface, one can rewrite $\eqref{densities}$ as \begin{equation}\label{diff_gr} J(t, x) = c_3 J_1(t, x) - J_2(t, x) = 0 \quad \forall \; x \in \R^n \setminus Im(\eta_t(\R^d)), \quad \mbox{where} \end{equation} $$J_1(t, x) = \left(\ilim_{\R^d}{\frac{(1 + \phi_t(y))^\frac{\alpha}{n - d - 2}\sqrt{|\mydet((D\eta_t)^T D\eta_t)|}dy}{|x - \eta_t(y)^{d + \alpha}|}}\right)^\frac{n - d - 2}{\alpha} \quad \mbox{and}$$ $$J_2(t, x) = \ilim_{\R^d}{\frac{(1 + \phi_t(y))\sqrt{|\mydet((D\eta_t)^T D\eta_t)|}dy}{|x - \eta_t(y)|^{n - 2}}}.$$ We keep to the assumption $\|\phi_t\|_\infty \le 1/2$. Again, we want to take the Frechet derivative of the left-hand side of $\eqref{diff_gr}$ at zero, and the natural candidate for it is $$\frac{\partial J(t, x)}{\partial t}(0) = c_1c_2^{-1}\frac{n - d - 2}{\alpha}\delta(x)^{\alpha + d + 2 - n}\left[\ilim_{\R^d}{\frac{\frac{\alpha}{n - d - 2}\frac{\partial \phi_t}{\partial t}(0, y)}{|x - y|^{d + \alpha}}dy} - \ilim_{\R^d}{\frac{(d + \alpha)\langle x, \frac{\partial \psi_t}{\partial t}(0, y)\rangle}{|x - y|^{d + \alpha + 2}}dy}\right] $$ \begin{equation}\label{diff_graph} - \ilim_{\R^d}{\frac{\frac{\partial \phi_t}{\partial t}(0, y)}{|x - y|^{n - 2}}dy} + \ilim_{\R^d}{(n - 2)\frac{\langle x, \frac{\partial \psi_t}{\partial t}(0, y)\rangle}{|x - y|^n}dy}. \end{equation} Here we just differentiated formally $J(t, x)$ and used the condition 1 in Definition $\ref{defn2}$: as we shall see, we are lucky that the square roots in $\eqref{diff_gr}$ only give terms of order two. Once again, until the end of the proof of the following lemma, $\frac{\partial J(t, x)}{\partial t}(0)$ is just a notation. \begin{lm} \begin{equation}\label{o(t)} \left|J(t, x) - t \frac{\partial J(t, x)}{\partial t}(0)\right| = o(t), \quad t \to 0. \end{equation} \end{lm} \begin{proof} We start with proving a technical fact analogous to $\eqref{I_2}$. The proof is ideologically the same, but looks bulkier, because typically we need to multiply three expansions from Taylor's theorem. Plus, we need to deal with the determinant of metric tensor induced by the map $\eta_t$. \begin{cl} $$J_2(t, x) = c_1\delta(x)^{d + 2 - n} + \ilim_{\R^d}{\frac{\phi_t(y)dy}{|x - y|^{n - 2}}} - (n - 2)\ilim_{\R^d}{\frac{\langle x, \psi_t(y)\rangle dy}{|x - y|^n}} + o(t), \quad t \to 0, \quad \mbox{and}$$ $$J_1(t, x) = c_2^\frac{n - d - 2}{\alpha}\delta(x)^{d + 2 - n} + c_2^{\frac{n - d - 2}{\alpha} - 1}\delta(x)^{d + 2 + \alpha - n}\frac{n - d - 2}{\alpha}\left[\ilim_{\R^d}{\frac{\frac{\alpha}{n - d - 2}\phi_t(y)}{|x - y|^{d + \alpha}}}\right.$$ $$\left. - \ilim_{\R^d}{\frac{(d + \alpha)\langle x, \psi_t(y)\rangle}{|x - y|^{d + \alpha + 2}}}\right] + o(t), \quad t \to 0.$$ \end{cl} \begin{proof} Let us deal first with the square root of the modulus of the determinant of metric tensor induced by $\eta_t$ in $\eqref{diff_gr}$. Denote by $(\psi_t)_j, j = d + 1, \dots, n$, the projection of $\psi_t$ on the coordinate axis $j$. Then the differential $D \eta_t$ is the linear map $\R^d \to \R^n$ represented by the matrix $n \times d$, where in the first $d$ rows the only non-zero elements are ones on the diagonal: $$ \begin{pmatrix} 1 & \dots & 0\\ \vdots & \ddots & \vdots\\ 0 & \dots & 1\\ \frac{\partial (\psi_t)_{d + 1}}{\partial y_1} & \dots & \frac{\partial (\psi_t)_{d + 1}}{\partial y_d}\\ \vdots & \vdots & \vdots\\ \frac{\partial (\psi_t)_{n}}{\partial y_1} & \dots & \frac{\partial (\psi_t)_{n}}{\partial y_d} \end{pmatrix}. $$ Therefore an element $a_{ij}$ of the $d \times d$ matrix $A = (a_{ij})= (D\eta_t)^T D\eta_t$ is equal to $\chi_{i = j} + \slim_{k = d + 1}^n{\frac{\partial (\psi_t)_k}{\partial y_i}\frac{\partial (\psi_t)_k}{\partial y_j}}$, where $\chi_{i = j}$ is one if $i = j$ and zero otherwise. So, the determinant $\mydet((D\eta_t)^T D\eta_t)$ is of the form $$1 + M_t(y) + R_t(y),$$ where $M_t(y) = \slim_{i = 1}^n{\slim_{k = d + 1}^n{\left(\frac{\partial (\psi_t)_k}{\partial y_i}\right)^2}}$ and $R_t(y)$ is the rest of the determinant: the sum of products which consist of more than $2(n - d)$ partial derivatives. The point in decomposing the determinant in such a way is that the term $M_t(y)$ has the $BMO$ norm and the average over a unit ball of order $t^{2(n - d)}$, while the $BMO$ norm and the average over the unit ball of the other term $R_t$ decay even faster. ~\ With this at hand, observe that $$(1 + \phi_t(y))^\frac{\alpha}{n - d - 2} = 1 + \frac{\alpha}{n - d - 2}\phi_t(y) + \phi_t(y)^2 g_1(t, y),$$ $$ |\mydet((D\eta_t)^T D\eta_t)|^{1/2} = 1 + (M_t(y) + R_t(y))g_2(t, y),$$ $$|x - \eta_t(y)|^{-(n - 2)} = |x - y|^{-(n - 2)} - (n - 2)|x - y|^{-n}\langle x, \psi_t(y)\rangle + (\langle x, \psi_t(y)\rangle^2 + |\psi_t(y)|^2) |x - y|^{-n} g_3(t, y), \quad \mbox{and}$$ $$|x - \eta_t(y)|^{-(d + \alpha)} = |x - y|^{-(d + \alpha)} - (d + \alpha)|x - y|^{-(d + \alpha + 2)}\langle x, \psi_t(y)\rangle$$ $$+ (\langle x, \psi_t(y)\rangle^2 + |\psi_t(y)|^2) |x - y|^{-(d + \alpha + 2)}g_4(t, y),$$ where $g_i(t, y)$ are uniformly bounded in $t$ functions. ~\ We deal with $J_2(t, x)$ first. The product under the first integral in $J_2(t, x)$ $$(1 + \phi_t(y))(1 + (M_t(y) + R_t(y))g_2(t, y))(|x - y|^{-(n - 2)}$$ $$ - (n - 2)|x - y|^{-n}\langle x, \psi_t(y)\rangle + (|\psi_t|^2 + \langle x, \psi_t(y)\rangle^2) |x - y|^{-n)}g_3(t, y))$$ is equal to $$|x - y|^{-(n - 2)} + \phi_t(y)|x - y|^{-(n - 2)} - (n - 2)|x - y|^{-n}\langle x, \psi_t(y)\rangle - \phi_t(y)(n - 2)|x - y|^{-n}\langle x, \psi_t(y)\rangle$$ $$ + (\psi_t(y)^2 + \langle x, \psi_t(y)\rangle^2 + (M_t(y) + R_t(y)))|x - y|^{-(n - 2)}G_t(y), $$ where $G_t(y)$ is a uniformly bounded in $t$ function. Integrating, we have $$J_2(t, x) = c_1\delta(x)^{d + 2 - n} + \ilim_{\R^d}{\frac{\phi_t(y)dy}{|x - y|^{n - 2}}} - (n - 2)\ilim_{\R^d}{\frac{\langle x, \psi_t(y)\rangle dy}{|x - y|^n}}$$ $$ - (n - 2)\ilim_{\R^d}{\frac{\phi_t(y)\langle x, \psi_t(y)\rangle dy}{|x - y|^n}} + \ilim_{\R^d}{\frac{(\psi_t(y)^2 + \langle x, \psi_t(y)\rangle^2 + (M_t(y) + R_t(y)))G_t(y)dy}{|x - y|^{n - 2}}}.$$ By $\eqref{f_bmo4}$ and $\eqref{Ho}$, combined with condition 4 in Definition $\ref{defn2}$ and $\eqref{graph_cond}$, the last two terms are $o(t)$. ~\ Now we deal with $J_1(t, x)$. The product under the integral in $J_1(t, x)$ $$\left(1 + \frac{\alpha}{n - d - 2}\phi_t(y) + \phi_t(y)^2g_1(t, y)\right)\left(1 + (M_t(y) + R_t(y))g_2(t, y))(|x - y|^{-(d + \alpha)}\right.$$ $$ \left. - (d + \alpha)|x - y|^{- (d + \alpha + 2)}\langle x, \psi_t(y)\rangle + (\langle x, \psi_t(y)\rangle^2 + |\psi_t(y)|^2) |x - y|^{-(d + \alpha + 2)}g_4(t, y)\right)$$ is equal to $$|x - y|^{-(d + \alpha)} + \frac{\alpha}{n - d - 2}\phi_t(y)|x - y|^{- (d + \alpha)} - (d + \alpha)|x - y|^{- (d + \alpha + 2)}\langle x, \psi_t(y)\rangle $$ $$ - \frac{\alpha}{n - d - 2}\phi_t(y)(d + \alpha)|x - y|^{- (d + \alpha + 2)}\langle x, \psi_t(y)\rangle$$ $$ + (\psi_t(y)^2 + \langle x, \psi_t(y)\rangle^2 + (M_t(y) + R_t(y)))|x - y|^{-(d + \alpha)}H_t(y), $$ where $H_t(y)$ is a uniformly bounded in $t$ function. Therefore we have $$J_1(t, x) = c_2^\frac{n - d - 2}{\alpha}\delta(x)^{d + 2 - n} + c_2^{\frac{n - d - 2}{\alpha} - 1}\delta(x)^{d + 2 + \alpha - n}\ilim_{\R^d}{\frac{\phi_t(y)dy}{|x - y|^{d + \alpha}}}$$ $$- c_2^{\frac{n - d - 2}{\alpha} - 1}\delta(x)^{d + 2 + \alpha - n}\frac{n - d - 2}{\alpha}\ilim_{\R^d}{\frac{(d + \alpha)\langle x, \psi_t(y)\rangle dy}{|x - y|^{d + \alpha + 2}}}$$ $$ + \ilim_{R^d}{\frac{(\phi_t(y)\langle x, \psi_t(y)\rangle + \langle x, \psi_t(y)\rangle^2 + \psi_t(y)^2 + (M_t(y) + R_t(y)))H'_t(y)dy}{|x - y|^{d + \alpha}}},$$ where $H'_t(y)$ is another uniformly bounded in $t$ function. By $\eqref{f_bmo4}$ and $\eqref{Ho}$, combined with condition 4 in Definition $\ref{defn2}$ and $\eqref{graph_cond}$, the last term is $o(t)$. \end{proof} Denote $$J_3(t, x) = c_3J_1(t, x) - c_1\delta(x)^{d + 2 - n} - c_1c_2^{-1}\frac{n - d - 2}{\alpha}\delta(x)^{\alpha + d + 2 - n}t \cdot $$ $$\cdot\left[\ilim_{\R^d}{\frac{\frac{\alpha}{n - d - 2}\frac{\partial \phi_t}{\partial t}(0, y)}{|x - y|^{d + \alpha}}dy} - \ilim_{\R^d}{\frac{(d + \alpha)\langle x, \frac{\partial \psi_t}{\partial t}(0, y)\rangle}{|x - y|^{d + \alpha + 2}}dy}\right]$$ and $$J_4(t, x) = J_2(t, x) - c_1\delta(x)^{d + 2 - n} - t\ilim_{\R^d}{\frac{\frac{\partial \phi_t}{\partial t}(0, y)}{|x - y|^{n - 2}}dy} + t\ilim_{\R^d}{(n - 2)\frac{\langle x, \frac{\partial \psi_t}{\partial t}(0, y)\rangle}{|x - y|^n}dy}.$$ In this notation we have $$\left|J(t, x) - t \frac{\partial J(t, x)}{\partial t}(0)\right| = |J_3(t, x) - J_4(t, x)|,$$ because the constant terms cancel out. It is only left to show that the claim above gives $|J_{k}(t, x)| = o(t)$ for $k = 3, 4$, which finishes the proof. Indeed, the claim asserts that $$J_4(t, x) = \ilim_{\R^d}{\frac{\phi_t(y)dy}{|x - y|^{n - 2}}} - (n - 2)\ilim_{\R^d}{\frac{\langle x, \psi_t(y)\rangle dy}{|x - y|^n}} + o(t) - $$ $$- t\ilim_{\R^d}{\frac{\frac{\partial \phi_t}{\partial t}(0, y)}{|x - y|^{n - 2}}dy} + t\ilim_{\R^d}{(n - 2)\frac{\langle x, \frac{\partial \psi_t}{\partial t}(0, y)\rangle}{|x - y|^n}dy}.$$ But the condition 2 in Definition $\ref{defn2}$, combined with $\eqref{f_bmo4}$ applied with $m = 1$, gives that $$\ilim_{\R^d}{\frac{\phi_t(y)dy}{|x - y|^{n - 2}}} - t\ilim_{\R^d}{\frac{\frac{\partial \phi_t}{\partial t}(0, y)}{|x - y|^{n - 2}}dy} = o(t) \quad \mbox{and}$$ $$\ilim_{\R^d}{\frac{\langle x, \psi_t(y)\rangle dy}{|x - y|^n}} - t\ilim_{\R^d}{\frac{\langle x, \frac{\partial \psi_t}{\partial t}(0, y)\rangle}{|x - y|^n}dy} = o(t).$$ The integral $J_3(t, x)$ can be treated the same way. \end{proof} ~\ We continue studying $\eqref{diff_gr}$. Since $J(t, x)$ is identically zero, the derivative $\frac{\partial J(t, x)}{\partial t}$, given by $\eqref{diff_graph}$ at zero vanishes as well, which gives $$\ilim_{\R^d}{\frac{\partial \phi}{\partial t}(0, y)\left[\frac{1}{|x - y|^{n - 2}} - \frac{c_1c_2^{-1}\delta(x)^{\alpha + d + 2 - n}}{|x - y|^{d + \alpha}}\right]dy} $$ $$ = \ilim_{\R^d}{\langle \frac{\partial \psi_t}{\partial t}(0, y), x \rangle\left[\frac{n - 2}{|x - y|^n} - \frac{c_1c_2^{-1}(d + \alpha)\frac{n - d - 2}{\alpha}\delta(x)^{\alpha + d + 2 - n}}{|x - y|^{d + \alpha + 2}}\right]dy}.$$ Without loss of generality we can assume that the projection $\frac{\partial \psi_t}{\partial t}_{d + 1}(0, y)$ of $\frac{\partial \psi_t}{\partial t}(0, y)$ to the axis $d + 1$ is not a constant function. Assume that $x$ is of the form $(x_1, \dots, x_d, x_{d + 1}, 0, \dots)$. Then, with the notation $\psi(y) = \frac{\partial \psi_t}{\partial t}_{d + 1}(0, y)$ and $\phi(y) = \frac{\partial \phi}{\partial t}(0, y)$, the equation above transforms into $$\ilim_{\R^d}{\phi(y)\left[\frac{1}{|x - y|^{n - 2}} - \frac{c_1c_2^{-1}\delta(x)^{\alpha + d + 2 - n}}{|x - y|^{d + \alpha}}\right]dy} = $$ \begin{equation}\label{homo_ker} \ilim_{\R^d}{\psi(y)x_{d + 1}\left[\frac{n - 2}{|x - y|^n} - \frac{c_1c_2^{-1}(d + \alpha)\frac{n - d - 2}{\alpha}\delta(x)^{\alpha + d + 2 - n}}{|x - y|^{d + \alpha + 2}}\right]dy} \quad \forall x \in \R^{d + 1}. \end{equation} \begin{lm}\label{blm2} The equation $\eqref{homo_ker}$ implies that the function $\psi$ is zero. \end{lm} \begin{proof} With the notation $h = |x_{d + 1}|$ and $x_0 = (x_1, \dots, x_d)$ we can rewrite the equation $\eqref{homo_ker}$ as $$\ilim_{\R^d}{h^{2 - n}\phi(y)\left[\frac{1}{(1 + |x_0 - y|^2/h^2)^{(n - 2)/2}} - \frac{c_1c_2^{-1}}{(1 + |x_0 - y|^2/h^2)^{(d + \alpha)/2}}\right]dy} = $$ \begin{equation}\label{homo_ker2} \ilim_{\R^d}{h^{1 - n}\psi(y)\left[\frac{n - 2}{(1 + |x_0 - y|^2/h^2)^{n/2}} - \frac{c_1c_2^{-1}(d + \alpha)\frac{n - d - 2}{\alpha}}{(1 + |x_0 - y|^2/h^2)^{(d + \alpha + 2)/2}}\right]dy} \quad \forall h > 0, \; x_0 \in \R^d. \end{equation} Denote $$g_h(y) = \frac{1}{(1 + |y|^2/h^2)^{(n - 2)/2}} - \frac{c_1c_2^{-1}}{(1 + |y|^2/h^2)^{(d + \alpha)/2}} \quad \mbox{and}$$ $$f_h(y) = \frac{n - 2}{(1 + |y|^2/h^2)^{n/2}} - \frac{c_1c_2^{-1}(d + \alpha)\frac{n - d - 2}{\alpha}}{(1 + |y|^2/h^2)^{(d + \alpha + 2)/2}}.$$ Since the Fourier transform is defined for all of the terms in $\eqref{homo_ker2}$, we can rewrite the equation on the Fourier-transform side and get that \begin{equation}\label{graph_fourier} h\hat{\phi}(\zeta)\hat{g_h}(\zeta) = \hat{\psi}(\zeta)\hat{f_h}(\zeta) \quad \forall \zeta \in \R^d \; \forall h > 0. \end{equation} \begin{rem} It is important for our argument that every term in the products in $\eqref{graph_fourier}$ is a well-defined function. This is the reason why in Definition $\ref{defn2}$ we asked the derivatives $\phi$ and $\psi$ to be in $L^1$. But the operators of convolution with the kernels $g_h$ and $f_h$ are very good: for sure they are classical Calder\`on-Zygmund operators (one can check easily that the kernels satisfy the H\"ormander condition). So one could ask some other reasonable regularity from $\phi$ and $\psi$, as long as their Fourier transforms are well-defined functions. So, instead of writing $\phi, \psi \in L^1 \cap BMO(\R^d)$ in Definition $\ref{defn2}$ we could write $\phi, \psi \in L^p \cap BMO(\R^d)$ for any $p \in [1, 2]$: see \cite{LL}, Sections 5.6 and 5.7. \end{rem} The rest of the argument relies on asymptotics of the functions $\hat{g_h}$ and $\hat{f_h}$ at $h \approx 0$. First, observe that $\hat{g_h}(\zeta) = \hat{g_1}(h\zeta)$ and $\hat{f_h}(\zeta) = \hat{f_1}(h\zeta)$. Next, the Fourier transforms $\hat{g_h}(\zeta)$ and $\hat{f_h}(\zeta)$ are rather easy to compute. Indeed, one has, by the integral definition of the modified Bessel function of the second kind $K_b(z)$, \begin{equation}\label{ft} \left(\frac{1}{(1 + |y|^2)^{a/2}}\right)^{\widehat{}}(h\zeta) = \frac{2^{1 - a/2}h^{(a - 1)/2}|\zeta|^{(a - 1)/2}K_{\frac{a - 1}{2}}(h|\zeta|)}{\Gamma\left(\frac{a}{2}\right)}, \end{equation} where $$K_b(z) = \frac{\Gamma(b + \frac{1}{2})(2z)^b}{\sqrt{\pi}}\ilim_0^\infty{\frac{\cos(t)dt}{(t^2 + z^2)^{(b + 1)/2}}}.$$ From the series representation of $K_b$ (see, for example, $\cite{AS}$) it follows that, if $b > 0$, for small arguments $z$ one has $K_b(z) \sim \frac{\Gamma(b)}{2}\left(\frac{2}{z}\right)^b$, where by $\sim$ we denote the asymptotic equivalence of functions. Wherefore $$\left(\frac{1}{(1 + |y|^2)^{a/2}}\right)^{\widehat{}}(h\zeta) \sim \frac{2^{-1/2}\Gamma\left(\frac{a - 1}{2}\right)}{\Gamma\left(\frac{a}{2}\right)}$$ for $\zeta \neq 0$ fixed and $h$ small. Then for fixed $\zeta \neq 0$ and small $h$ we have $$\hat{g_h}(\zeta) \sim \frac{\Gamma\left(\frac{n - 3}{2}\right)}{\Gamma\left(\frac{n - 2}{2}\right)} - \frac{c_1c_2^{-1}\Gamma\left(\frac{d + \alpha - 1}{2}\right)}{\Gamma\left(\frac{d + \alpha}{2}\right)}, \quad \mbox{and}$$ $$\hat{f_h}(\zeta) \sim \frac{(n - 2)\Gamma\left(\frac{n - 1}{2}\right)}{\Gamma\left(\frac{n}{2}\right)} - \frac{c_1c_2^{-1}\frac{n - d - 2}{\alpha}(d + \alpha)\Gamma\left(\frac{d + \alpha + 1}{2}\right)}{\Gamma\left(\frac{d + \alpha + 2}{2}\right)}.$$ Since $$c_1c_2^{-1} = \frac{\Gamma\left(\frac{n - d - 2}{2}\right)\Gamma\left(\frac{d + \alpha}{2}\right)}{\Gamma\left(\frac{n - 2}{2}\right)\Gamma\left(\frac{\alpha}{2}\right)},$$ $$\hat{g_h}(\zeta) \sim C_g := \Gamma\left(\frac{n - 3}{2}\right)\Gamma\left(\frac{\alpha}{2}\right) - \Gamma\left(\frac{n - d - 2}{2}\right)\Gamma\left(\frac{d + \alpha - 1}{2}\right) \quad \mbox{and}$$ $$\hat{f_h}(\zeta) \sim C_f := \Gamma\left(\frac{n - 1}{2}\right)\Gamma\left(\frac{\alpha + 2}{2}\right) - \Gamma\left(\frac{n - d}{2}\right)\Gamma\left(\frac{d + \alpha + 1}{2}\right).$$ Observe that, for fixed $\alpha > 0$ which is not ``magic'', $C_f$ and $C_g$ are never simultaneously zero except for the case $d = 1$. Indeed, using the relation $\Gamma(z + 1) = z\Gamma(z)$, it is easy to see that $$C_g = \frac{\Gamma\left(\frac{n - 1}{2}\right)\Gamma\left(\frac{\alpha + 2}{2}\right)}{\frac{n - 3}{2}\frac{\alpha}{2}} - \frac{\Gamma\left(\frac{n - d}{2}\right)\Gamma\left(\frac{d + \alpha + 1}{2}\right)}{\frac{n - d - 2}{2}\frac{d + \alpha - 1}{2}}.$$ Since $\Gamma\left(\frac{n - 1}{2}\right)\Gamma\left(\frac{\alpha + 2}{2}\right) = \Gamma\left(\frac{n - d}{2}\right)\Gamma\left(\frac{d + \alpha + 1}{2}\right)$ when $C_f = 0$, if $C_g$ is also zero, one has $(n - d - 2)(d + \alpha - 1) = (n - 3)\alpha$, which is true either for the ``magic'' $\alpha$ or for $d = 1$. For the case when $C_g = C_f = 0$ we have to use the next term of the expansion of $K_b$ at $h \approx 0$: $K_b(z) \sim \frac{\Gamma(b)}{2}\left(\frac{2}{z}\right)^b + \frac{\Gamma(b)}{2(1 - b)}\left(\frac{2}{z}\right)^{b - 2}$. Note that for all the cases when we will use the formula the parameter $b$ in it will be not equal to one. The second term of the expansion gives that, if $C_f = C_g = 0$, $$\hat{g_h}(\zeta) \sim h^2C_g' \quad \mbox{with} \; C_g' = \frac{\Gamma\left(\frac{n - 3}{2}\right)}{4 - n} - \frac{\Gamma\left(\frac{d + \alpha - 1}{2}\right)\Gamma\left(\frac{n - d - 2}{2}\right)}{(2 - d - \alpha)\Gamma\left(\frac{\alpha}{2}\right)} \quad \mbox{and}$$ $$\hat{f_h}(\zeta) \sim h^2C_f' \quad \mbox{with} \; C_f' = \frac{\Gamma\left(\frac{n - 1}{2}\right)}{2 - n} + \frac{\Gamma\left(\frac{n - d}{2}\right)\Gamma\left(\frac{d + \alpha + 1}{2}\right)}{(d + \alpha)\Gamma\left(\frac{\alpha + 2}{2}\right)},$$ where $C_f'$ and $C_g'$ are not equal to zero if $\alpha \neq n - d - 2$: this is easy to see since $C_f = C_g = 0$ gives $\Gamma\left(\frac{n - 1}{2}\right)\Gamma\left(\frac{\alpha + 2}{2}\right) = \Gamma\left(\frac{n - d}{2}\right)\Gamma\left(\frac{d + \alpha + 1}{2}\right)$ and $\Gamma\left(\frac{n - 3}{2}\right)\Gamma\left(\frac{\alpha}{2}\right) = \Gamma\left(\frac{n - d - 2}{2}\right)\Gamma\left(\frac{d + \alpha - 1}{2}\right)$. Let us return now to $\eqref{graph_fourier}$ and restrict ourselves to small $h$. Fix $\zeta \neq 0$. Observe that asymptotically the equation $\eqref{graph_fourier}$ looks like \begin{equation}\label{gr_f_asymp} h\hat{\phi}(\zeta)(C_g + h^2C_g') = \hat{\psi}(\zeta)(C_f + h^2C_f') \quad \forall \; h > 0 \; \mbox{small}. \end{equation} It is now clear that $\eqref{graph_fourier}$ can never be true unless $\hat{\psi}(\zeta) = 0$. Suppose the contrary. Then we have to have $C_f = 0$ in $\eqref{gr_f_asymp}$. If $\hat{\phi}(\zeta)$ is also not zero, then $C_g, C_g'$ and $C_f'$ are zero in $\eqref{gr_f_asymp}$, but we saw above that this cannot be true. This implies $\hat{\phi}(\zeta) = 0$, but then $C_f'$ has to be zero as well. At the same time, from the computations above, we saw that $C_f$ and $C_f'$ can be simultaneously zero only if $\alpha$ is ``magic''. A contradiction. So we have that $\hat{\psi}(\zeta) = 0$ everywhere except probably for $\zeta = 0$. Therefore the function $\psi$ can only be a constant, which also contradicts the definition of one-parameter differentiable family of graph perturbations. \end{proof} \subsection{A comment on a sporadic family versus one-parameter family, and BMO versus the Besov space} We would like to give some more comments on why the plan we described in the beginning of this section for solving the hypothesis related to $\eqref{solution}$ stated in the introduction does not work. Essentially, two more technical steps separate the results of Theorems $\ref{mainthm_flat}$ and $\ref{mainthm_graph}$ from a proof of the hypothesis that there are no solutions of the equation $\eqref{solution}$ in a small neighbourhood of the flat solution. To be more specific, let us discuss just the flat case: the case of perturbations of density of a measure on $\R^d$. Recall that if we suppose the hypothesis to be false, we get a discrete family $\{1 + \phi_t\}$ of non-flat solutions of $\eqref{densities}$, because a solutions should exist arbitrarily close to the flat one. The first step is breaching the gap between such a family of solutions, which we will call sporadic, and a family of perturbations with a continuous parameter $t$ (recall that the family in Definition $\ref{defn1}$ is parametrized by a segment $[0, t_0)$). We claim that this step would not be a big deal. An attentive look at Definition $\ref{defn1}$ and the proof of Theorem $\ref{mainthm_flat}$ shows that continuity of the parameter $t$ is not really needed anywhere. What we really need is the existence of a non-trivial limit $F$ of the sequence $\{\frac{1}{t}\phi_t\}$, indexed by an arbitrary family of points $t$, not necessarily a segment, -- a surrogate for the Fr\'echet derivative. The Banach-Alaoglu theorem, which we mentioned above already, would guarantee the existence as long as the estimate of the sort $\frac{1}{t}\|\phi_t\| \sim 1$ is provided. The latter would not be a problem: we would just need to say that the value of the parameter $t$ is morally the same as $\|\phi_t\|$, where the norm could be a norm in any space we need. But we would also need to make the second step and to prove that the limit $F$ is a constant. This, as far as the author can see, seems to be (too) hard to do. In Subsection $5.1$ we are able to do this because we fixed in advance the functional space we work in -- the $BMO$ space. If $\{\frac{1}{t}\phi_t\}$ is ``differentiable'' in $BMO$, we can prove that $\eqref{densities}$ is also differentiable and conclude that, since $\eqref{flat}$ is true for $F$ instead of $\frac{\partial \phi_t}{\partial t}(0, y)$, $F$ is zero in $BMO$. However for an arbitrary sporadic family of perturbations we have no a priori indications that the ``derivative'' $F$ is a $BMO$ function. Moreover, the ``smallest'' space possible for it to fit in is indicated by the equation $\eqref{flat}$. Recall that this equation says that $F$ acts like a zero functional on the space $\mathcal{S}$ generated by linear combinations of the family of functions $\{c_2^{-1}c_1\frac{\delta(x)^{d + 2 + \alpha - n}}{|x - y|^{d + \alpha}} - \frac{1}{|x - y|^{n - 2}}\}_{x \in \R^n \setminus \R^d}$. This would imply that $F$ is a constant if $F$ was an element of the space dual to the space $\mathcal{S}$. So, we would do just fine with the assumption that $F$ is an element of $BMO$ if $S$ was the predual to $BMO$ (or would be smaller). But we saw already that the latter is not true: the space $S$ is the homogeneous Besov space $\dot B^0_{1, 1}$, and its dual is the homogeneous Besov space $\dot B^0_{\infty, \infty}$, which is larger than $BMO$. Therefore the strongest assumption we could have made without loss of generality in our argument is that $F$ lives in $\dot B^0_{\infty, \infty}$. But for this space we have certain indications that one cannot prove that the existence of the ``derivative'' $F$ implies differentiability of $\eqref{densities}$. Namely, the estimates of the type $\eqref{f_bmo4}$ for the appropriate norm ($\|\cdot\|_{\dot B^0_{\infty, \infty}}$ instead of $\|\cdot\|_{BMO}$) are false, morally, because functions from the Besov space are not locally integrable. \section{No smooth solutions for the hyperplane} In this section we describe an attempt to solve directly the equation $\eqref{densities}$ on the hyperplane $\R^d$ and we prove Theorem $\ref{mainthm_direct}$ from Introduction, which asserts that there are no solutions $D_{\alpha, \mu}$ of $\eqref{solution}$ for $E = \R^d$ among the measures $\mu$ with densities of class $C^{2, \varepsilon}(\R^d)$ for any $0 < \varepsilon < \varepsilon_0$ which are not constants. Let $D_{\alpha, \mu}$ be a flat solution of the equation $L_\alpha D_\alpha = 0$ where $E$ is $\R^d$, and $f \in L^\infty(\R^d)$ be the density of the measure $\mu$. Then the function $h = f^\frac{n - d - 2}{\alpha}$, as discussed in the beginning of Section 5, satisfies the equation \begin{equation}\label{fsolution} \delta(x)^{n - d - 2}\ilim_{\R^d}{\frac{h(y)dy}{|x - y|^{n - 2}}} = c_3\left(\delta(x)^\alpha\ilim_{\R^d}{\frac{h(y)^\frac{\alpha}{n - d - 2}dy}{|x - y|^{d + \alpha}}}\right)^\frac{n - d - 2}{\alpha}, \quad \forall \; x \in \R^n \setminus \R^d. \end{equation} This is just $\eqref{densities}$ multiplied by the correct power of $\delta(x)$. We assume from now on that $h \in C^{2, \varepsilon}(\R^d)$ for a fixed $\varepsilon$. Then we can write a Taylor expansion at an arbitrary point $y_0 \in \R^d$: $$h(y) = h(y_0) + \langle \nabla h(y_0), y - y_0\rangle + (y - y_0)^T \Hess h(y_0) (y - y_0) + o(|y - y_0|^2), \quad \mbox{and}$$ $$h(y)^\frac{\alpha}{n - d - 2} = h(y_0)^\frac{\alpha}{n - d - 2} + h(y_0)^{\frac{\alpha}{n - d - 2} - 1}\frac{\alpha}{n - d - 2}\left(\langle \nabla h(y_0), y - y_0\rangle \right.$$ $$\left. + (y - y_0)^T \Hess h(y_0) (y - y_0)\right) + h(y_0)^{\frac{\alpha}{n - d - 2} - 2}\frac{\frac{\alpha}{n - d - 2}\left(\frac{\alpha}{n - d - 2} - 1\right)}{2}\langle \nabla h(y_0), y - y_0\rangle^2 + o(|y - y_0|^2).$$ Choose $x = (y_0, r, 0 \dots), y_0 \in \R^d$, $0 < r < 1$, and rewrite the equation $\eqref{fsolution}$, using the expansions from above. On the left-hand side we get $$c_1h(y_0) + r^{n - d - 2}\Big\langle\nabla h(y_0), \ilim_{\R^d}{\frac{(y - y_0)dy}{|(y_0, r) - y|^{n - 2}}}\Big\rangle $$ \begin{equation}\label{lhs6} + r^{n - d - 2}\ilim_{\R^d}{\frac{(y - y_0)^T\Hess h(y_0)(y - y_0)dy}{|(y_0, r) - y|^{n - 2}}} + r^{n - d - 2}\ilim_{\R^d}{\frac{o(|y - y_0|^2)dy}{|(y_0, r) - y|^{n - 2}}}, \end{equation} and on the right-hand side inside the brackets with the power $\frac{n - d - 2}{\alpha}$ we get $$c_2h(y_0)^\frac{\alpha}{n - d - 2} + h(y_0)^{\frac{\alpha}{n - d - 2} - 1}\frac{\alpha}{n - d - 2}r^{\alpha}\Big\langle\nabla h(y_0), \ilim_{\R^d}{\frac{(y - y_0)dy}{|(y_0, r) - y|^{d + \alpha}}}\Big\rangle + h(y_0)^{\frac{\alpha}{n - d - 2} - 1}\frac{\alpha}{n - d - 2}r^{\alpha}$$ \begin{equation}\label{rhs6} \cdot\ilim_{\R^d}{\frac{(y - y_0)^T\Hess h(y_0)(y - y_0) + h(y_0)^{-1}\frac{1}{2}\left(\frac{\alpha}{n - d - 2} - 1\right)\langle\nabla h(y_0), y - y_0\rangle^2 }{|(y_0, r) - y|^{d + \alpha}}dy} \end{equation} $$+ r^\alpha\ilim_{\R^d}{\frac{o(|y - y_0|^2)dy}{|(y_0, r) - y|^{d + \alpha}}}.$$ Then we expand the right-hand side of $\eqref{fsolution}$ using $(c + u)^\frac{n - d - 2}{\alpha} = c^\frac{n - d - 2}{\alpha} + c^{\frac{n - d - 2}{\alpha} - 1}u + o(u)$ for small $u$ with $c = c_2h(y_0)^\frac{\alpha}{n - d - 2}$ and $u = \eqref{rhs6} - c_2h(y_0)^\frac{\alpha}{n - d - 2}$. We know already from Subsection 5.1 that, because of the symmetries of the denominator, $$\ilim_{\R^d}{\frac{(y - y_0)dy}{|(y_0, r) - y|^{n - 2}}} = \ilim_{\R^d}{\frac{(y - y_0)dy}{|(y_0, r) - y|^{d + \alpha}}} = 0,$$ so the second term with $\nabla h(y_0)$ both in $\eqref{lhs6}$ and $\eqref{rhs6}$ vanishes. Therefore the Taylor expansion of the right-hand side of $\eqref{fsolution}$ simplifies to $$c_1h(y_0) + c_1c_2^{-1}r^\alpha\cdot\ilim_{\R^d}{\frac{(y - y_0)^T\Hess h(y_0)(y - y_0) + h(y_0)^{-1}\frac{1}{2}\left(\frac{\alpha}{n - d - 2} - 1\right)\langle\nabla h(y_0), y - y_0\rangle^2 }{|(y_0, r) - y|^{d + \alpha}}dy} $$ \begin{equation}\label{Rhs6} + r^\alpha\ilim_{\R^d}{\frac{o(|y - y_0|^2)dy}{|(y_0, r) - y|^{d + \alpha}}}. \end{equation} Now we would like to treat $\eqref{lhs6}$ and $\eqref{Rhs6}$ as functions of the parameter $r$ to ``get rid of'' the terms with $o(|y - y_0|^2)$. Observe that the term with integral of a fraction with $O(|y - y_0|^2)$ in the numerator in $\eqref{lhs6}$ is of the order $r^2$, and the same is true for the term with integral of a fraction with $O(|y - y_0|^2)$ in the numerator in $\eqref{Rhs6}$. Next, with the assumption $h \in C^{2, \varepsilon}$ we can estimate the residues in $\eqref{lhs6}$ and $\eqref{Rhs6}$ and say that $|o(|y - y_0|^2)| \le C|y - y_0|^{2 + \varepsilon}$, where $C$ depends only on H\"older coefficients of second-order partial derivatives of $h$. Therefore we can bound the modulus of the last term in $\eqref{lhs6}$ the following way: $$\left|r^{n - d - 2}\ilim_{\R^d}{\frac{o(|y - y_0|^2)dy}{|(y_0, r) - y|^{n - 2}}}\right| \le Cr^{2 + \varepsilon}\ilim_{\R^d}{\frac{|y - y_0|^{2 + \varepsilon}/r^{2 + \varepsilon} }{(1 + |y - y_0|^2/r^2)^{(n - 2)/2}}r^{-d}dy}.$$ Assuming $n - d > 4$, the integral above is bounded and does not depend on $r$. So the $o(|y - y_0|^2)$ term in $\eqref{lhs6}$ is of the order $r^{2 + \varepsilon}$. The same is true about the $o(|y - y_0|^2)$ term in $\eqref{Rhs6}$, since $$\left|r^\alpha\ilim_{\R^d}{\frac{o(|y - y_0|^2)dy}{|(y_0, r) - y|^{d + \alpha}}}\right| \le Cr^{2 + \varepsilon}\ilim_{\R^d}{\frac{|y - y_0|^{2 + \varepsilon}/r^{2 + \varepsilon} }{(1 + |y - y_0|^2/r^2)^{(d + \alpha)/2}}r^{-d}dy},$$ and the integral above is once again bounded and does not depend on $r$, if we assume that $\alpha > 2 + \varepsilon_0$. We divide $\eqref{fsolution}$ by $r^2$, let $r$ tend to zero, and get that then the $r^2$-order terms should match. Thus we get the equation $$r^{n - d - 2}\ilim_{\R^d}{\frac{(y - y_0)^T\Hess h(y_0)(y - y_0)dy}{|(y_0, r) - y|^{n - 2}}} $$ $$ = \frac{c_1}{c_2} r^\alpha \ilim_{\R^d}{\frac{(y - y_0)^T\Hess h(y_0)(y - y_0) + h(y_0)^{-1}\frac{1}{2}\left(\frac{\alpha}{n - d - 2} - 1\right)\langle\nabla h(y_0), y - y_0\rangle^2 }{|(y_0, r) - y|^{d + \alpha}}dy}.$$ We get rid of all second-order derivatives with different indices because of the symmetries, and also of all the products of first-order derivatives with different indices as well. Passing to non-tangential limits will give us the following PDE equation for the function $h$: \begin{equation}\label{PDE0} \tilde{c_1}\Delta h = \frac{c_1}{c_2}\tilde{c_2}\Delta h + \frac{1}{2}\frac{c_1}{c_2}\tilde{c_2}\left(\frac{\alpha}{n - d - 2} - 1\right)h^{-1}|\nabla h|^2 \quad \mbox{on} \; \R^d, \end{equation} where $$\tilde{c_1} = \Vol(\Sf^{d - 1})\ilim_0^\infty{\frac{x^{d + 1}dx}{(1 + x^2)^\frac{n - 2}{2}}} = \Vol(\Sf^{d - 1})\frac{1}{2}\frac{\Gamma\left(\frac{d + 2}{2}\right)\Gamma\left(\frac{n - d - 4}{2}\right)}{\Gamma\left(\frac{n - 2}{2}\right)}, \quad \mbox{and}$$ $$\tilde{c_2} = \Vol(\Sf^{d - 1})\ilim_0^\infty{\frac{x^{d + 1}dx}{(1 + x^2)^\frac{d + \alpha}{2}}} = \Vol(\Sf^{d - 1})\frac{1}{2}\frac{\Gamma\left(\frac{d + 2}{2}\right)\Gamma\left(\frac{\alpha - 2}{2}\right)}{\Gamma\left(\frac{d + \alpha}{2}\right)}.$$ Recall that $$c_1 = \Vol(\Sf^{d - 1})\frac{1}{2}\frac{\Gamma\left(\frac{d}{2}\right)\Gamma\left(\frac{n - d - 2}{2}\right)}{\Gamma\left(\frac{n - 2}{2}\right)} \quad \mbox{and} \quad c_2 = \Vol(\Sf^{d - 1})\frac{1}{2}\frac{\Gamma\left(\frac{d}{2}\right)\Gamma\left(\frac{\alpha}{2}\right)}{\Gamma\left(\frac{d + \alpha}{2}\right)},$$ so $$\frac{\tilde{c_1}}{\tilde{c_2}}\frac{c_2}{c_1} = \frac{\Gamma\left(\frac{n - d - 4}{2}\right)\Gamma\left(\frac{\alpha}{2}\right)}{\Gamma\left(\frac{n - d - 2}{2}\right)\Gamma\left(\frac{\alpha - 2}{2}\right)}=\frac{\alpha - 2}{n - d - 4}.$$ Therefore the equation $\eqref{PDE0}$ is trivial when the parameter $\alpha$ is ``magic'' and equal to $n - d - 2$. Otherwise $\eqref{PDE0}$ gives an equation of the form \begin{equation}\label{PDE} \Delta h = - C h^{-1}|\nabla h|^2 \quad \mbox{on} \; \R^d, \end{equation} where the constant $C$, as one can see easily from $\eqref{PDE0}$, is $$C = \frac{1}{2}\left(\frac{\alpha}{n - d - 2} - 1\right)\frac{\frac{c_1}{c_2}\tilde{c_2}}{\frac{c_1}{c_2}\tilde{c_2} - \tilde{c_1}} = \frac{1}{2}\left(\frac{\alpha}{n - d - 2} - 1\right)\frac{1}{1 - \frac{\tilde{c_1}}{\tilde{c_2}}\frac{c_2}{c_1}}.$$ Given the computation above, we have \begin{equation}\label{const} C = - \left(\frac{1}{2} - \frac{1}{n - d - 2}\right). \end{equation} ~\ We claim that we can find a change of variables that transforms the equation $\eqref{PDE}$ into the equation $\Delta \cdot = 0$. Indeed, take $g = h^\beta, \, \beta \neq 0, 1$, then $$\nabla g = \beta h^{\beta - 1}\nabla h, \quad \Delta g = \beta h^{\beta - 1}\Delta h + \beta(\beta - 1) h^{\beta - 2}|\nabla h|^2.$$ Substituting this in $\eqref{PDE}$, we get that $g$ is a solution of $$\Delta g = -\frac{|\nabla g|^2}{g}\left(\frac{C + 1}{\beta} - 1\right).$$ So, if $C \neq -1$, we choose $\beta = C + 1$ and we are done. Otherwise take $g = \log{h}$, then $$h = e^g, \quad \nabla g = h^{-1}\nabla h, \quad \Delta g = h^{-1}\Delta h - h^{-2}|\nabla h|^2.$$ Substituting this in $\eqref{PDE}$, we get that $g$ is a solution of $$\Delta g = -(C + 1)|\nabla g|^2,$$ and since $C = -1$, the function $g$ is harmonic as we wanted. So we have that some power of the function $h$ or $\log{h}$ is a harmonic function $g$ on the whole $\R^d$. Since $h$ is the density of an Ahlfors-regular measure on $\R^d$, $h$ is bounded and bounded away from zero. Thus $g$ is in addition bounded and bounded below. So-called Liouville's theorem implies that $g$ has to be a constant, therefore $h$ is a constant as well. ~\ \begin{rem} In the end of this section we would like to comment on the regularity restrictions we posed on the density $h$ of a measure $\mu$ in Theorem $\ref{mainthm_direct}$. Asking $h$ to be in $C^{2, \varepsilon}(\R^d)$ seems clumsy. But we cannot significantly release the restrictions, because in the core of the method we use lies the differential equation $\eqref{PDE}$. It looks like one cannot interpret this equation without some boundedness assumptions on its right-hand side. One could still argue that, given the method with all the changes of variables we implemented to solve $\eqref{PDE}$, if in $\eqref{fsolution}$ we considered $h^\beta$ with some $\beta$ instead of $h$, probably we could get a simpler equation, which requires less regularity assumptions. Indeed, this is clearly true from the proof given above: there is (for almost all values of the constant $C$) a $\beta$ such that, at the end of the day, we would get that $h^\beta$ is a positive harmonic function, bounded and bounded away from zero. However, to get this equation we still need to use the Taylor expansion and to show that the terms in equations like $\eqref{lhs6}$ and $\eqref{rhs6}$ which correspond to small-o terms in the expansion can be neglected. This seems to be hard to do without the regularity assumption we asked for. \end{rem} \bigskip \begin{thebibliography}{} \bibitem{AG} D.H. Armitage, S.J. Gardiner, ``Classical potential theory'', Springer Monographs in Mathematics, 2001. \bibitem{AGMT} J. Azzam, J. Garnett, M. Mourgoglou, X. Tolsa, ``Uniform rectifiability, elliptic measure, square functions, and $\varepsilon$-approximabitily via an ACF monotonicity formula'', Int. Math. Res. Not. IMRN, 2021, \href{https://doi.org/10.1093/imrn/rnab095}{doi.org/10.1093/imrn/rnab095}. \bibitem{AHM3TV} J. Azzam, S. Hofmann, J. M. Martell, S. Mayboroda, M. Mourgoglou, X. Tolsa, A. Volberg, ``Rectifiability of harmonic measure'', Geom. Funct. Anal., 26, pp 703–728, 2016, \href{https://doi.org/10.1007/s00039-016-0371-x}{doi.org/10.1007/s00039-016-0371-x}. \bibitem{AS} M. Abramowitz, I.A. Stegun, ``Handbook of Mathematical Function'', NBS, 1970. \bibitem{Da} B.E. Dahlberg, ``On the absolute continuity of elliptic measures'', Amer. J. Math., 108, no. 5, pp 1119-1138, 1986. \bibitem{MShah} G. David, ``Singular sets of minimizers for the Mumford-Shah functional'', Birkh\" auser, 2005. \bibitem{GMS20} G. David, M. Engelstein, S. Mayboroda, ``Square functions, non-tangential limits and harmonic measure in co-dimensions larger than one'', 2020, \href{https://arxiv.org/abs/1808.08882}{arxiv.org/abs/1808.08882}, to appear in Duke Math. J. \bibitem{GSJ17} G. David, J. Feneuil, S. Mayboroda, ``Dahlberg's theorem in higher co-dimension'', J. Funct. Anal., 276, no. 9, pp 2731-2820, 2019, \href{https://doi.org/10.1016/j.jfa.2019.02.006}{doi.org/10.1016/j.jfa.2019.02.006}. \bibitem{GSJ18} G. David, J. Feneuil, S. Mayboroda, ``Elliptic theory for sets with higher co-dimensional boundaries'', Mem. Amer. Math. Soc., 274, no. 1346, 2021, \href{https://doi.org/10.1090/memo/1346}{doi.org/10.1090/memo/1346}. \bibitem{GS_f20} G. David, S. Mayboroda, ``Approximation of Green functions and domains with uniformly rectifiable boundaries of all dimensions'', 2020, \href{https://arxiv.org/abs/2010.09793}{arxiv.org/abs/2010.09793}. \bibitem{GS20} G. David, S. Mayboroda, ``Harmonic measure is absolutely continuous with respect to the Hausdorff measure on all low-dimensional uniformly rectifiable sets'', 2020, \href{https://arxiv.org/abs/2006.14661}{arxiv.org/abs/2006.14661}. \bibitem{J20} J. Feneuil, ``Absolute continuity of the harmonic measure on low dimensional rectifiable sets'', 2020, \href{https://arxiv.org/abs/2006.03118}{arxiv.org/abs/2006.03118}. \bibitem{HMM2} S. Hofmann, J.M. Martell, S. Mayboroda, ``Transference of scale-invariant estimates from Lipschitz to Non-tangentially accessible to Uniformly rectifiable domains'', 2019, \href{https://arxiv.org/abs/1904.13116}{arxiv.org/abs/1904.13116}. \bibitem{LL} E. H. Lieb, M. Loss, ``Analysis'', AMS, 2001 (Second Edition). \bibitem{M} P. Mattila, ``Geometry of sets and measures in Euclidean spaces: fractals and rectifiability'', Cambridge University Press, 1995. \bibitem{YM} Y. Meyer, ``Minimalit\'e de certains espaces fonctionnels et applications \`a la th\'eorie des op\'erateurs'', S\'eminaire \'Equations aux d\'eriv\'ees partielles (Polytechnique) (1984-1985), exp no 3, pp. 1-12. \end{thebibliography} ~\ \noindent Polina Perstneva\\ Universit\'e Paris Saclay, LMO \\ [email protected] \end{document}
2205.11621v1
http://arxiv.org/abs/2205.11621v1
Anti-van der Waerden Numbers of Graph Products of Cycles
\documentclass[smallextended,envcountsame,envcountsect]{svjour3} \smartqed \usepackage{mathptmx} \usepackage[utf8]{inputenc} \usepackage{tikz} \usepackage{amssymb,amsmath, graphicx,hyperref, xcolor,lineno} \usepackage{newtxtext,newtxmath} \usepackage{parskip} \newcommand{\C}{\mathcal{C}} \newcommand{\aw}{\textup{aw}} \newcommand{\dist}{\textup{d}} \newcommand{\diam}{\textup{diam}} \newcommand{\Mod}[1]{\ (\mathrm{mod}\ #1)} \title{Anti-van der Waerden Numbers of Graph Products of Cycles\thanks{Thank you to the University of Wisconsin-La Crosse's (UWL's) Dean's Distinguished Fellowship program that supported both authors. Also, thanks to the UWL's Undergraduate Research and Creativity grant and the UWL Department of Mathematics and Statistics Bange/Wine Undergraduate Research Endowment that supported the first author.} } \author{Joe Miller \and Nathan Warnberg} \institute{University of Wisconsin-La Crosse\\ \email{[email protected] \and [email protected]}} \institute{J. Miller \and N. Warnberg \\ University of Wisconsin-La Crosse \\ \email{[email protected] \and [email protected]} } \date{\today} \begin{document} \titlerunning \authorrunning \maketitle \begin{abstract} A \emph{$k$-term arithmetic progression ($k$-AP) in a graph $G$} is a list of vertices such that each consecutive pair of vertices is the same distance apart. If $c$ is a coloring function of the vertices of $G$ and a $k$-AP in $G$ has each vertex colored distinctly, then that $k$-AP is a \emph{rainbow $k$-AP}. The \emph{anti-van der Waerden number of a graph $G$ with respect to $k$} is the least positive integer $r$ such that every surjective coloring with domain $V(G)$ and codomain $\{1,2,\dots,r\} = [r]$ is guaranteed to have a rainbow $k$-AP. This paper focuses on $3$-APs and graph products with cycles. Specifically, the anti-van der Waerden number with respect to $3$ is determined precisely for $P_m \square C_n$, $C_m\square C_n$ and $G\square C_{2n+1}$. \keywords{anti-van der Waerden number \and anti-Ramsey \and rainbow \and $k$-term arithmetic progression} \subclass{05C15 \and 05C38} \end{abstract} \newpage \section{Introduction}\label{sec:intro} The study van der Waerden numbers started with Bartel van der Waerden showing in $1927$ that given a fixed number of colors $r$, and a fixed integer $k$ there is some $N$ (a van der Waerden number) such that if $n \ge N$, then no matter how you color $[n] = \{1,2,\dots,n\}$ with $r$-colors, there will always be a monochromatic $k$-term arithmetic progression (see \cite{W27}). Around this time, in $1917$, it is interesting to note that I. Schur proved that given $r$ colors, you can find an $N$ (a Schur number) such that if $n \ge N$, then no matter how you color $[n]$ there must be a monochromatic solution to $x+y = z$ (see \cite{S}). In addition, in $1928$, F.P. Ramsey showed that (here graph theory language is used but was not in Ramsey's original formulation) given $r$ colors and some constant $k$ you can find an $N$ (a Ramsey number) such that if $n \ge N$, then no matter how you colors the edges of a complete graph $K_n$ you can always find a complete subgraph $K_k$ that is monochromatic (see \cite{R}). These types of problems that look for monochromatic sturctures have been categorized as Ramsey-type problems and each of them have a dual version. For example, an anti-van der Waerden number is when given integers $n$ and $k$, find the minimum number of colors such that coloring $\{1,\dots,n\}$ ensures a rainbow $k$-term arithmetic progression. It was not until 1973 when Erd\H{o}s, Simonovits, and S\'{o}s, in \cite{ESS}, started looking at the dual versions of these problems which are now well-studied (see \cite{FMO} for a survey). Results on colorings and balanced colorings of $[n]$ that avoid rainbow arithmetic progressions have been studied in \cite{AF} and \cite{AM}. Rainbow free colorings of $[n]$ and $\mathbb{Z}_n$ were studied in \cite{J} and \cite{DMS}. Although Butler et al., in \cite{DMS}, consider arithmetic progressions of all lengths, many results on $3$-APs were produced. In particular, the authors of \cite{DMS} determine $\aw(\mathbb{Z}_n,3)$, see Theorem \ref{cycles} with additional cycle notation. Further, the authors of \cite{DMS} determined that $3\leq \aw(\mathbb{Z}_p,3)\leq 4$ for every prime number $p$ and that $\aw(\mathbb{Z}_n,3)$, which equals $\aw(C_n,3)$, can be determined by the prime factorization of $n$. This result was then generalized by Young in \cite{finabgroup}. \begin{theorem}\label{cycles} {\cite{DMS}} \begin{sloppy} Let $n$ be a positive integer with prime decomposition $n=2^{e_0}p_1^{e_1}p_2^{e_2}\cdots p_s^{e_s}$ for $e_i\geq 0$, $i=0,\ldots,s$, where primes are ordered so that $\aw(\mathbb{Z}_{p_i},3)=3$ for $ 1 \leq i \leq \ell$ and $\aw(\mathbb{Z}_{p_i},3)=4$ for $\ell + 1 \leq i \leq s$. Then, \[ \aw(\mathbb{Z}_n,3) = \aw(C_n,3)=\left\{\begin{array}{ll} 2 +\sum\limits_{j=1}^\ell e_j + \sum\limits_{j=\ell+1}^s 2e_j & \mbox{if $n$ is odd,} \\ 3 +\sum\limits_{j=1}^\ell e_j + \sum\limits_{j=\ell+1}^s 2e_j & \mbox{if $n$ is even.} \end{array}\right. \] \end{sloppy} \end{theorem} As mentioned, Butler et al. also studied arithmetic progressions on $[n]$ and obtained bounds on $\aw([n],3)$ and conjectured the exact value that was later proven in \cite{BSY}. This result on $[n]$ is presented as Theorem \ref{paths} and includes path notation. \begin{theorem}\label{paths}\cite{BSY} If $n \ge 3$ and $7\cdot 3^{m-2} +1 \le n \le 21\cdot 3^{m-2}$, then \[\aw([n],3) = \aw(P_n,3) = \left\{\begin{array}{ll} m+2 & \text{ if $n = 3^m$,}\\ m+3 & \text{ otherwise.}\end{array}\right. \] \end{theorem} It is also interesting to note that $3$-APs in $[n]$ or $\mathbb{Z}_n$ satisfy the equation $x_1 + x_2 = 2x_3$. Thus, rainbow numbers for other linear equations have also been considered (see \cite{BKKTTY}, \cite{FGRWW}, \cite{RFC} and \cite{LM}). Studying the anti-van der Waerden number of graphs is a natural extension of determining anti-van der Waerden number of $[n] = \{1,2,\dots,n\}$, which behave like paths, and $\mathbb{Z}_n$, which behave like cycles. In particular, the set of arithmetic progressions on $[n]$ is isomorphic to the set of arithmetic progressions on $P_n$ and the set of arithmetic progressions on $\mathbb{Z}_n$ is isomorphic to the set of arithmetic progressions on $C_n$. This relationship was first introduced and explored in \cite{SWY} where the anti-van der Waerden number was bounded by the radius and diameter of a graph, the anti-van der Waerden number of trees and hypercubes were investigated and an upperbound of $4$ was conjectured for the anti-van der Waerden number of graph products. Then, in \cite{RSW}, the authors confirmed the upper bound of $4$ for any graph product (see Theorem \ref{thm:rsw}). This paper continues in this vein. \begin{theorem}\cite{RSW}\label{thm:rsw} If $G$ and $H$ are connected graphs and $|G|,|H| \ge 2$, then \[\aw(G\square H,3) \le 4.\] \end{theorem} Something that makes anti-van der Waerden numbers challenging is that it is not a subgraph monotone parameter. A particular example, \[4 = \aw([9],3) = \aw(P_9,3) < \aw(P_8,3) = \aw([8],3) = 5,\] even though $P_8$ is a subgraph of $P_9$, and a general statement, \[\aw(C_n,3) = \aw(\mathbb{Z}_n,3) \le \aw([n],3) = \aw(P_n,3),\] were both given, without the graph theory interpretation, in \cite{DMS}. One tool that does allow a kind of monotonicity when studying the anti-van der Waerden numbers of graphs is when a subgraph is isometric, that is, the subgraph preserves distances. This insight was used extensively in \cite{RSW} to get an upper bound on the anti-van der Waerden number of graph products and will also be leveraged in this paper. First, some definitions and background inspired by \cite{DMS} and used in \cite{SWY} and \cite{RSW} are provided. Graphs in this paper are undirected so edge $\{u,v\}$ will be shortened to $uv\in E(G)$. If $uv\in E(G)$, then $u$ and $v$ are \emph{neighbors} of each other. The \emph{distance} between vertex $u$ and $v$ in graph $G$ is denoted $\dist_G(u, v)$, or just $\dist(u, v)$ when context is clear, and is the smallest length of any $u-v$ path in $G$. A $u-v$ path of length $d(u,v)$ is called a $u-v$ \emph{geodesic}. A \emph{$k$-term arithmetic progression in graph $G$} ($k$-AP) is a set of vertices $\{v_1,v_2,\dots,v_k\}$ such that $d(v_i,v_{i+1}) = d$ for all $1\le i \le k-1$. A $k$-term arithmetic is \emph{degenerate} if $v_i = v_j$ for any $i\neq j$. Note that technically, since a $k$-AP is a set, the order of the elements does not matter. However, oftentimes $k$-APs will be presented in the order that provides the most intuition. An \emph{exact $r$-coloring of a graph $G$} is a surjective function $c:V(G) \to [r]$. A set of vertices $S$ is \emph{rainbow} under coloring $c$ if for every $v_i,v_j\in V(G)$, $c(v_i) \neq c(v_j)$ when $i\neq j$. Given a set $S\subset V(G)$, define $c(S) = \{ c(s) \, | \, s\in S\}$. The \emph{anti-van der Waerden number of graph $G$ with respect to $k$}, denoted $\aw(G,k)$, is the least positive number $r$ such that every exact $r$-coloring of $G$ contains a rainbow $k$-term arithmetic progression. If $|V(G)| = n$ and no coloring of the vertices yields a rainbow $k$-AP, then $\aw(G,k) = n+1$. Graph $G'$ is a \emph{subgraph} of $G$ if $V(G') \subseteq V(G)$ or $E(G') \subseteq E(G)$ (or both). A subgraph $G'$ of $G$ is an \emph{induced subgraph} if whenever $u$ and $v$ are vertices of $G'$ and $uv$ is an edge of $G$, then $uv$ is an edge of $G'$. If $S$ is a nonempty set of vertices of $G$, then the \emph{subgraph of $G$ induced by $S$} is the induced subgraph with vertex set $S$ and is denoted $G[S]$. An \emph{isometric subgraph} $G'$ of $G$ is a subgraph such that $\dist_{G'}(u,v) = \dist_G(u,v)$ for all $u,v \in V(G')$. If $G = (V,E)$ and $H = (V', E')$ then the \emph{Cartesian product}, written $G\square H$, has vertex set $\{(x, y) : x \in V \text{ and } y \in V' \}$ and $(x, y)$ and $(x', y')$ are adjacent in $G \square H$ if either $x = x'$ and $yy' \in E'$ or $y = y'$ and $xx' \in E$. This paper will use the convention that if \[V(G) = \{u_1,\ldots, u_{n_1}\} \quad \text{and} \quad V(H) = \{w_1,\ldots,w_{n_2}\},\] then $V(G\square H) = \{v_{1,1},\ldots, v_{n_1,n_2}\}$ where $v_{i,j}$ corresponds to the vertices $u_i \in V(G)$ and $w_j \in V(H)$. Also, if $1\leq i \leq n_2$, then $G_i$ denotes the $i$th labeled copy of $G$ in $G \square H$. Likewise, if $1 \leq j \leq n_1$, then $H_j$ denotes the $j$th labeled copy of $H$ in $G \square H$. In other words, $G_i$ is the induced subgraph $G_i = G\square H[\{v_{1,i},\dots, v_{n_2,i}\}]$, and $H_j$ is the induced subgraph $H_j = G\square H[\{v_{j,1}, \dots, v_{j,n_1}\}]$. Notice that the $i$ subscript in $G_i$ corresponds to the $i$th vertex of $H$ and the $j$ in the subscript in $H_j$ corresponds to the $j$th vertex of $G$. See Example \ref{ex:cartprod} below. \begin{example}\label{ex:cartprod} Consider the graph $P_3\square C_5$ where $V(P_3) = \{u_1,u_2,u_3\}$ and $V(C_5) = \{w_1,w_2,w_3,w_4,w_4\}$. Let $G=P_3$ and $H = C_5$ as in the definition. Now, $G_4$ is a subgraph of $P_3\square C_5$ that is isomorphic to $P_3$ and corresponds to vertex $w_4$ of $C_5$. Similarly, $H_2$ is a subgraph of $P_3\square C_5$ that is isomorphic to $C_5$ and corresponds to vertex $u_2$ of $P_3$. See Figure \ref{fig:cartprodex} below. \begin{figure}[ht!] \centering \begin{tikzpicture}[scale = 0.7] \node[draw,circle] (11) at (.75,0) {$v_{1,1}$}; \node[draw,circle] (12) at (3,0) {$v_{1,2}$}; \node[draw,circle] (13) at (3.75,2.25) {$v_{1,3}$}; \node[draw,circle, line width=0.75mm] (14) at (1.9875,3.75) {$v_{1,4}$}; \node[draw,circle] (15) at (0,2.25) {$v_{1,5}$}; \draw[thick] (11) to node [auto] {} (12); \draw[thick] (12) to node [auto] {} (13); \draw[thick] (13) to node [auto] {} (14); \draw[thick] (14) to node [auto] {} (15); \draw[thick] (15) to node [auto] {} (11); \node[draw,circle, thick, dashed] (21) at (5.75,1) {$v_{2,1}$}; \node[draw,circle, thick, dashed] (22) at (8,1) {$v_{2,2}$}; \node[draw,circle, thick, dashed] (23) at (8.75,3.25) {$v_{2,3}$}; \node[draw,circle, line width=0.75mm, dashed] (24) at (6.9875,4.75) {$v_{2,4}$}; \node[draw,circle, thick, dashed] (25) at (5,3.25) {$v_{2,5}$}; \draw[thick, dashed] (21) to node [auto] {} (22); \draw[thick, dashed] (22) to node [auto] {} (23); \draw[thick, dashed] (23) to node [auto] {} (24); \draw[thick, dashed] (24) to node [auto] {} (25); \draw[thick, dashed] (25) to node [auto] {} (21); \node[draw,circle] (31) at (10.75,2) {$v_{3,1}$}; \node[draw,circle] (32) at (13,2) {$v_{3,2}$}; \node[draw,circle] (33) at (13.75,4.25) {$v_{3,3}$}; \node[draw,circle, line width=0.75mm] (34) at (11.9875,5.75) {$v_{3,4}$}; \node[draw,circle] (35) at (10,4.25) {$v_{3,5}$}; \draw[thick] (31) to node [auto] {} (32); \draw[thick] (32) to node [auto] {} (33); \draw[thick] (33) to node [auto] {} (34); \draw[thick] (34) to node [auto] {} (35); \draw[thick] (35) to node [auto] {} (31); \draw[thick] (11) to node [auto] {} (21); \draw[thick] (12) to node [auto] {} (22); \draw[thick] (13) to node [auto] {} (23); \draw[thick, line width=0.75mm] (14) to node [auto] {} (24); \draw[thick] (15) to node [auto] {} (25); \draw[thick] (31) to node [auto] {} (21); \draw[thick] (32) to node [auto] {} (22); \draw[thick] (33) to node [auto] {} (23); \draw[thick, line width=0.75mm] (34) to node [auto] {} (24); \draw[thick] (35) to node [auto] {} (25); \node[draw,circle,fill=white] (11) at (.75,0) {$v_{1,1}$}; \node[draw,circle,fill=white] (12) at (3,0) {$v_{1,2}$}; \node[draw,circle,fill=white] (13) at (3.75,2.25) {$v_{1,3}$}; \node[draw,circle, line width=0.75mm,fill=white] (14) at (1.9875,3.75) {$v_{1,4}$}; \node[draw,circle,fill=white] (15) at (0,2.25) {$v_{1,5}$}; \node[draw,circle, thick, dashed,fill=white] (21) at (5.75,1) {$v_{2,1}$}; \node[draw,circle, thick, dashed,fill=white] (22) at (8,1) {$v_{2,2}$}; \node[draw,circle, thick, dashed,fill=white] (23) at (8.75,3.25) {$v_{2,3}$}; \node[draw,circle, line width=0.75mm, dashed,fill=white] (24) at (6.9875,4.75) {$v_{2,4}$}; \node[draw,circle, thick, dashed,fill=white] (25) at (5,3.25) {$v_{2,5}$}; \node[draw,circle,fill=white] (31) at (10.75,2) {$v_{3,1}$}; \node[draw,circle,fill=white] (32) at (13,2) {$v_{3,2}$}; \node[draw,circle,fill=white] (33) at (13.75,4.25) {$v_{3,3}$}; \node[draw,circle, line width=0.75mm,fill=white] (34) at (11.9875,5.75) {$v_{3,4}$}; \node[draw,circle,fill=white] (35) at (10,4.25) {$v_{3,5}$}; \end{tikzpicture} \caption{Image for Example \ref{ex:cartprod}: The subgraph $G_4$ has been bolded and $H_2$ has been dashed.} \label{fig:cartprodex} \end{figure} \end{example} The paper continues with Section \ref{sec:2} recapping and expanding many fundamental results from \cite{RSW}. Section \ref{sec:pmcn} establishes $\aw(P_m\square C_n,3)$ for all $m$ and $n$. Section \ref{sec:cmcn} is an investigation of $\aw(G\square C_n,3)$. In particular, $\aw(C_m\square C_n,3)$ is determined for all $m$ and $n$. Further, Section \ref{sec:cmcn} determines $\aw(G\square C_n,3)$ for any $G$ when $n$ is odd. Finally, Section \ref{sec:future} provides the reader with some conjectures and open questions. \section{Background and Fundamental Tools}\label{sec:2} Distance preservation in subgraphs can be leveraged to guarantee the existence of rainbow $3$-APs. Thus, this section starts with some basic distance and isometry results. \begin{proposition}\label{prop:dist} If $v_{i,j},v_{h,k} \in V(G \square H)$, then \[\dist_{G\square H}(v_{i,j},v_{h,k}) = \dist_G(u_i,u_h) + \dist_H(w_j,w_k).\] \end{proposition} \begin{proof} Note that $\dist_{G\square H}(v_{i,j},v_{h,k}) \le \dist_G(u_i,u_h) + \dist_H(w_j,w_k)$ because a path of length $\dist_G(u_i,u_h) + \dist_H(w_j,w_k)$ can be constructed using a $u_i-u_h$ geodesic in $G$ and combining it with a $w_j-w_k$ geodesic in $H$. \par To show the other inequality, let $P$ be a $v_{i,j} - v_{h,k}$ geodesic, say \[P = \{v_{i,j} = x_1, x_2, \ldots, x_y = v_{h,k}\}.\] Note that for every edge $v_{j_1,j_2}v_{\beta_1,\beta_2} \in E(P)$, either $j_1 = \beta_1$ and $w_{j_2}w_{\beta_2} \in E(H)$, or $j_2 = \beta_2$ and $u_{j_1}u_{\beta_1} \in E(G)$. Then, $x_{\ell}x_{\ell+1}$ must correspond either to an edge from a $u_i-u_j$ walk or from a $w_h-w_k$ walk and $P$ must correspond to a walk in $G$ and also a walk in $H$. In other words, the length of $P$ is the sum of the length of the corresponding walks in $G$ and $H$. Thus, the length of $P$ is at least the sum of the lengths of a $u_i-u_h$ geodesic in $G$ and a $w_j-w_k$ geodesic in $H$. So, \[\dist_G(u_i,u_h) + \dist_H(w_j,w_k) \le \dist_{G\square H}(v_{i,j},v_{h,k}).\] \qed\end{proof} \begin{corollary}\label{cor:isosubprod} If $G'$ is an isometric subgraph of $G$ and $H'$ is an isometric subgraph of $H$, then $G'\square H'$ is an isometric subgraph of $G\square H$. \end{corollary} \begin{proof} Let $V(G) = \{u_1,\ldots, u_{n_1}\}$ and $V(H) = \{w_1,\ldots,w_{n_2}\}$. Then let $v_{i,j},v_{h,k} \in V(G'\square H')$. Observe, \begin{align*} \dist_{G'\square H'}(v_{i,j},v_{h,k}) & = \dist_{G'}(u_i,u_h) + \dist_{H'}(w_j,w_k) \\ & = \dist_{G}(u_i,u_h) + \dist_{H}(w_j,w_k) \\ & = \dist_{G\square H}(v_{i,j},v_{h,k}). \end{align*} \null\qed\end{proof} Lemma \ref{isometricpathorC3} is powerful since it guarantees isometric subgraphs. Isometric subgraphs are important when investigating anti-van der Waerden numbers because distance preservation implies $k$-AP preservation. \begin{lemma}\cite{RSW}\label{isometricpathorC3} If $G$ is a connected graph on at least three vertices with an exact $r$-coloring $c$ where $r \ge 3$, then there exists a subgraph $G'$ in $G$ with at least three colors where $G'$ is either an isometric path or $G' = C_3$. \end{lemma} Theorem \ref{PmxPn} is used when isometric $P_m\square P_n$ subgraphs are found within $G\square H$. \begin{theorem}\cite{RSW}\label{PmxPn} For $m,n \geq 2$, \[\aw(P_m \square P_n, 3) = \begin{cases} 3 & \text{if $m = 2$ and $n$ is even, or $m = 3$ and $n$ is odd,} \\ 4 & \text{otherwise.} \end{cases}\] \end{theorem} Lemma \ref{|c(V(Gi U Gj))|<3} helps restrict the number of colors each copy of $G$ or $H$ can have within $G\square H$. \begin{lemma}\cite{RSW}\label{|c(V(Gi U Gj))|<3} Assume $G$ and $H$ are connected with $|V(H)| \geq 3$. Suppose $c$ is an exact, rainbow-free $r$-coloring of $G\square H$, such that $r \geq 3$ and $|c(V(G_i))| \leq 2$ for $1 \leq i \leq n$. If $w_iw_j \in E(H)$, then $|c(V(G_i) \cup V (G_j))| \leq 2$. \end{lemma} To prove Lemmas \ref{lem:p2codd} and \ref{lem:p2ceven} requires the use of Lemma \ref{|c(Hi)|<3}. \begin{lemma}\label{|c(Hi)|<3} If $G$ and $H$ are connected, $|G|,|H| \ge 2$ and $c$ is an exact $r$-coloring of $G\square H$, $3\le r$, that avoids rainbow $3$-APs, then $|c(V(G_i))| \leq 2$ for $1 \leq i \leq |H|$. \end{lemma} \begin{proof} If $|G| = 2$ the result is immediate, so let $3\le |G|$. For the sake of contradiction, assume $red,blue,green \in |c(V(G_i))|$ for some $1\le i \le |H|$. By Lemma \ref{isometricpathorC3}, there must exist an isometric path or a $C_3$ in $G_i$ containing $red$, $blue$, and $green$. If there is such a $C_3$, then there is a rainbow $3$-AP which is a contradiction. So, assume $P_\ell$ is a shortest isometric path in $G_i$ containing $red$, $blue$, and $green$, for some positive integer $3 \le \ell$. \begin{description} \item[Case 1.] $\ell$ is odd.\\ Without loss of generality, suppose the two leaves of $P_\ell$ are colored $red$ and $blue$. Since $P_\ell$ is shortest the rest of the vertices are colored $green$. Since $\ell$ is odd there exists a $green$ vertex equidistant from the $red$ and $blue$ vertices which creates a rainbow $3$-AP, a contradiction. \item[Case 2.] $\ell$ is even.\\ Let $u_i \in V(H)$ be the vertex that corresponds to $G_i$ and note that $u_i$ has a neighbor since $H$ is connected. Let $P_2$ be a path on two vertices in $H$ containing $u_i$ and $\rho$ be the isometric subgraph in $G$ that corresponds to $P_\ell$. Thus, the subgraph $P_2 \square \rho$ of $G\square H$ is isometric and, by Theorem \ref{PmxPn}, contains a rainbow $3$-AP, a contradiction. \end{description} All cases give a contradiction, thus $|c(V(G_i))| \leq 2$. \qed\end{proof} Corollary \ref{cor:neighborcopies} is a strengthening of Lemma \ref{|c(V(Gi U Gj))|<3} and follows from Lemmas \ref{|c(V(Gi U Gj))|<3} and \ref{|c(Hi)|<3}. It is used to help analyze $\aw(P_m\square C_{2k+1})$. \begin{corollary}\label{cor:neighborcopies} If $G$ and $H$ are connected graphs, $|G| \ge 2$, $|H|\ge 3$, $c$ is an exact, rainbow-free $r$-coloring of $G\square H$ with $r\ge 3$, and $v_iv_j \in E(H)$, then \[|c(V(G_i)\cup V(G_j))| \leq 2.\] \end{corollary} \begin{lemma}\label{c(H_i)/c(H_j) < 2}\cite{SWY} Let $G$ be a connected graph on $m$ vertices and $H$ be a connected graph on $n$ vertices. Let $c$ be an exact $r$-coloring of $G\square H$ with no rainbow $3$-APs. If $G_1,G_2, \ldots,G_n$ are the labeled copies of $G$ in $G\square H$, then $|c(V(G_j)) \setminus c(V(G_i))| \leq 1$ for all $1 \leq i, j \leq n$. \end{lemma} \begin{proposition}\label{prop:everycopy} If $G$ and $H$ are connected graphs, $|G| \ge 2$, $|H|\ge 3$, $c$ is an exact, rainbow-free $r$-coloring of $G\square H$ with $r\ge 3$, then there is a color in $c(G\square H)$ that appears in every copy of $G$. \end{proposition} \begin{proof} Suppose $c(G\square H) = \{c_1,\ldots,c_r\}$. First, for the sake of contradiction, assume $|c(V(G_i))|=1$ for every $1 \leq i \leq |H|$. Then define a coloring $c': V(H) \to c(G\square H)$ such that $c'(w_i) \in c(V(G_i))$. Then Lemma \ref{isometricpathorC3} implies that there is either an isometric path or $C_3$ in $H$ with $3$ colors. If there is an isometric $C_3$, say $(w_1,w_2,w_3)$, then $\{v_{1,1},v_{1,2},v_{1,3}\}$ is a rainbow $3$-AP in $G\square H$ with respect to $c$, a contradiction. So, there must be an isometric path in $H$ with $3$ colors. Suppose $P = (w_1,\ldots,w_n)$ is a shortest such path. Without loss of generality, $c(w_1)=c_2$, $c(w_n)=c_3$ and $c(w_i)=c_1$ for all $1 < i < n$. Then there exists $u_1,u_2\in V(G)$ such that $u_1u_2 \in E(G)$. Thus, $\{v_{1,1},v_{1,n},v_{2,2}\}$ is a rainbow $3$-AP in $G\square H$ with respect to $c$, a contradiction. Thus, there exists some $G_i$ such that $|c(V(G_i))|\geq 2$, without loss of generality, say $c_1,c_2 \in c(V(G_i))$. Then Lemma \ref{|c(Hi)|<3} implies $c(V(G_i))=\{c_1,c_2\}$. Note that $c_3 \in c(V(G_j))$ for some $j \neq i$. Lemma \ref{c(H_i)/c(H_j) < 2} implies that $c_1 \in c(V(G_j))$ or $c_2 \in c(V(G_j))$. Without loss of generality, suppose $c_1 \in c(V(G_j))$ implying $c(V(G_j)) = \{c_1,c_3\}$ by Lemma \ref{|c(Hi)|<3}. It will be shown that $c_1$ appears in every copy of $G$. \begin{sloppy}Now, for $k \notin \{i,j\}$, Lemma \ref{c(H_i)/c(H_j) < 2} implies that $|c(V(G_i))\setminus c(V(G_k))| \leq 1$ and\end{sloppy}\\ $|c(V(G_j))\setminus c(V(G_k))| \leq 1$. Thus, for all $k \notin \{i,j\}$, either $c_1 \in c(V(G_k))$ or $c_2,c_3 \in c(V(G_k))$ implying $c(V(G_k)) = \{c_2,c_3\}$ by Lemma \ref{|c(Hi)|<3}. Now, define $c': V(H) \to \{red,blue\}$ by \[c'(w_k) = \begin{cases} red & \text{if $c_1 \in c(V(G_k))$,} \\ blue & \text{if $c(V(G_k)) = \{c_2,c_3\}$.} \end{cases}\] For the sake of contradiction, assume $blue \in c'(V(H))$. Then there must exist $red$ and $blue$ neighbors in $H$, call them $w_{\ell_1},w_{\ell_2}$. Without loss of generality, say $c'(w_{\ell_1}) = red$ and $c'(w_{\ell_2}) = blue$ so that $c_1 \in c(V(G_{\ell_1}))$ and $c(V(G_{\ell_2})) = \{c_2,c_3\}$. Then $c_1,c_2,c_3 \in c(V(G_{\ell_1}))\cup c(V(G_{\ell_2}))$ and $3 \leq |c(V(G_{\ell_1}))\cup c(V(G_{\ell_2}))|$, contradicting Corollary \ref{cor:neighborcopies}. Thus, $c'(V(H)) = red$, the desired result.\qed\end{proof} \section{Graph Products of Paths and Cycles}\label{sec:pmcn} As a reminder, the conventions for $G\square H$ will be used to label the vertices of $P_m\square C_n$. In particular, letting $G = P_m$ and $H = C_n$ gives the following: \begin{itemize} \item $V(P_m) = \{u_1,u_2,\dots,u_m\}$ with edges $u_iu_{i+1}$ for $1\le i \le m-1$, \item $V(C_n) = \{w_1,w_2,\dots,w_n\}$ with edges $w_iw_{i+1}$ for $1\le i \le n-1$ and $w_nw_1$, \item $G_i$ is the $i$th copy of $P_m$ in $P_m\square C_n$ and has vertex set $\{v_{1,i},v_{2,i},\dots ,v_{m,i}\}$, and \item $H_i$ is the $i$th copy of $C_n$ in $P_m\square C_n$ and has vertex set $\{v_{i,1},v_{i,2},\dots,v_{i,n}\}$. \end{itemize} Now, a fact about $P_m\square C_n$ is that $\dist_{P_m\square C_n}(v_{i,j},v_{k,\ell}) = |i-k| + \min\{ j-\ell \bmod n, \ell - j \bmod n\}$. Note that the the standard representative of the equivalence class of $\mathbb{Z}_n$ is chosen, i.e. $j-\ell\bmod n, \ell-j \bmod n \in \{0,1,\dots,n-1\}$. \begin{lemma}\label{lem:p2codd} For any positive integer $k$, $\aw(P_2 \square C_{2k+1},3) = 3$. \end{lemma} \begin{proof} For the sake of contradiction, let $c$ be an exact, rainbow-free $3$-coloring of $P_2 \square C_{2k+1}$. Swapping the roles of $G$ and $H$ in Lemma \ref{|c(Hi)|<3} gives \[|c(V(H_1))|, |c(V(H_2))| \leq 2.\] Without loss of generality, suppose $c(V(H_1)) = \{red,blue\}$, $green \in c(V(H_2))$ with $c(v_{2,1}) = green$. Note that $c(v_{1,1}) \in \{red,blue\}$ and define $P_\ell$ to be a shortest path in $H_1$ containing $v_{1,1}$ that contains colors $red$ and $blue$. Without loss of generality, let $P_\ell = (v_{1,1},v_{1,2},v_{1,3},\dots, v_{1,\ell})$ and let $\rho$ be the isometric subgraph of $C_{2k+1}$ that corresponds to $P_\ell$. Note that $P_2\square\rho$ is an isometric subgraph in $P_2\square C_{2k+1}$ that contains three colors and $\ell \le k+1$. If $\ell$ is even, then $P_2 \square \rho$ has a rainbow $3$-AP by Theorem \ref{PmxPn} so $P_2\square C_{2k+1}$ has a rainbow $3$-AP, a contradiction. If $\ell$ is odd and $\ell \le k$, extending $P_\ell$ by one additional vertex (and likewise extending $\rho$ to be $\rho'$) maintains isometry. That is, there is an isometric path $P_{\ell+1}$ in $H_1$ that contains $v_{1,1}$ and has colors $red$ and $blue$. Thus, $P_2\square\rho'$ is an isometric subgraph of $P_2\square C_{2k+1}$ that contains three colors and it contains a rainbow $3$-AP by Theorem \ref{PmxPn}, another contradiction. Finally, consider the case when $\ell$ is odd and $\ell = k+1$. Note that $c(v_{1,1}) =red$ for $k+3\le i \le 2k+1$, else the minimality of $P_\ell$ would be contradicted. Also, $j=\frac{3k+4}{2}$ is an integer and $k+3 \le j \le 2k+1$ when $k\ge 2$. Thus, $\{v_{2,1},v_{1,j},v_{1,\ell}\}$ is a rainbow $3$-AP, a contradiction. Therefore, no such $c$ exists and $\aw(P_2 \square C_{2k+1},3) = 3.$\qed\end{proof} \begin{lemma}\label{lem:pmcodd} For integers $m$ and $k$ with $2 \le m$ and $1\le k$, \[\aw(P_m\square C_{2k+1},3) = 3.\] \end{lemma} \begin{proof} For a base case, note that Lemma \ref{lem:p2codd} implies $\aw(P_2 \square C_{2k+1}, 3) = 3$ for all $1\le k$. As the inductive hypothesis, suppose that $\aw(P_\ell \square C_{2k+1}, 3) = 3$ for some $2\le \ell$. Let $c$ be a rainbow-free, exact $3$-coloring of $P_{\ell+1} \square C_{2k+1}$ and let $H_i$ denote the $i$th copy of $C_{2k+1}$. By hypothesis and the fact that $c$ is rainbow-free, \[\left|c\left(\bigcup_{i=1}^{\ell}V(H_i)\right)\right| \leq 2 \text{ and } \left|c\left(\bigcup_{i=2}^{\ell+1}V(H_i)\right)\right| \leq 2.\] Thus, the inclusion-exclusion principle gives $\left|c\left(\bigcup_{i=2}^{\ell}V(H_i)\right)\right| = 1$. Without loss of generality, assume \[c\left(\bigcup_{i=2}^{\ell}V(H_i)\right) = \{red\}, \quad blue \in c(V(H_1)), \quad and \quad green \in c\left(V(H_{\ell+1})\right).\] In particular, assume $c(v_{1,1}) = blue$ and $c(v_{\ell+1,j}) = green$ for some $j \le k+1$. Suppose $\ell$ is even. Then $\{v_{1,1},v_{\frac{\ell+2}{2},i},v_{\ell+1,j}\}$ is a rainbow $3$-AP for $i = \frac{j+1}{2}$ if $j$ is odd, and $i = \frac{2k+j+2}{2}$ if $j$ is even. On the other hand, suppose $\ell$ is odd. Then, $\{v_{1,1},v_{\frac{\ell+1}{2},i},v_{\ell+1,j}\}$ is a rainbow $3$-AP for $i = \frac{j+2}{2}$ if $j$ is even, and $i = \frac{2k+j+1}{2}$ if $j$ is odd. In any case, there is a rainbow $3$-AP which is a contradiction, so $\aw(P_{\ell+1} \square C_{2k+1}, 3) = 3.$ Thus, by induction, $\aw(P_{m} \square C_{2k+1}, 3) = 3$ for any $2\le m$.\qed\end{proof} Determining of $\aw(P_2\square C_{2k})$ requires two strategies since there are $k$ values for which $\aw(P_2\square C_{2k})=3$ and $k$ values for which $\aw(P_2\square C_{2k})=4$. Essentially, $\aw(P_2\square C_n)=4$ when $n=4\ell$ and is determined by providing a coloring where one pair of vertices that are diametrically opposed are colored distinctly and everything else is a third color. This avoids rainbow $3$-APs since the diameter of $P_2\square C_{4\ell}$ is odd and because each vertex $v\in V(C_{4\ell})$ has exactly one vertex whose distance from $v$ realizes the diameter of $C_{4\ell}$. Note that this is different than what happens in $P_2\square C_{2k+1}$ since each vertex $v \in V(C_{2k+1})$ has two vertices whose distance from $v$ realizes the diameter of $C_{2k+1}$. When the diameter of $P_2\square C_{2k}$ is even, this coloring, and every other coloring, ends up creating an isometric $P_2\square P_{2j}$ with $3$-colors. Then, it is only a matter of applying Theorem \ref{PmxPn} to find the rainbow $3$-AP. \begin{lemma}\label{lem:pmcevendiamodd} \begin{sloppy} For integers $m$ and $k$ with $2 \le m,k$, $\aw(P_m \square C_{2k},3) = 4$ if $\diam(P_m \square C_{2k})$ is odd. \end{sloppy} \end{lemma} \begin{proof} Define $c: V(P_m \square C_{2k}) \to \{red,blue,green\}$ by \[c(v_{i,j}) = \begin{cases} blue & \text{if $i=j=1$,} \\ green & \text{if $i=m$ and $j=k+1$,} \\ red & \text{otherwise.} \end{cases}\] Note that any rainbow $3$-AP must contain $v_{1,1}$ and $v_{m,k+1}$ since they are the only $blue$ and $green$ vertices, respectively. This will be shown by proving $v_{1,1}$ and $v_{m,k+1}$ are not part of any nondegenerate $3$-AP. For the sake of contradiction, assume there exists $v_{i,j}\in V(P_m\square C_n)$ such that $\{v_{1,1},v_{i,j},v_{m,k+1}\}$ is a nondegenerate $3$-AP. One way this can happen is if $\dist(v_{1,1},v_{i,j}) = \dist(v_{i,j},v_{m,k+1})$. Without loss of generality, suppose $1 \leq j \leq k+1$. Then \[\dist(v_{1,1},v_{i,j}) = (i-1) + (j-1) = i+j-2,\] and \[\dist(v_{i,j},v_{m,k+1}) = (m-i) + (k+1-j) = m+k+1-i-j.\] By assumption, $i+j-2 = m+k+1-i-j$ which implies that $m+k-1 = 2i+2j-2$. However, $\diam(P_m \square C_{2k}) = m+k-1$ is odd, a contradiction. The only other possible way that $\{v_{1,1},v_{i,j}, v_{m,k+1}\}$ is a $3$-AP is if $\dist(v_{i,j},v_{1,1}) = \diam(P_m\square C_{2k})$ or $\dist(v_{i,j},v_{m,k+1}) = \diam(P_m\square C_{2k})$. However, this implies $v_{i,j}\in\{v_{1,1},v_{m,k+1}\}$ which gives a degenerate $3$-AP. Thus, the exact $3$-coloring $c$ of $P_m\square C_{2k}$ is rainbow free so $4\le \aw(P_m \square C_{2k},3)$. Theorem \ref{thm:rsw} gives an upper bound of $4$ which implies $\aw(P_m \square C_{2k},3) = 4$.\qed\end{proof} \begin{lemma}\label{lem:p2ceven} For any integer $k$ with $2\le k$, \[\aw(P_2 \square C_{2k},3) = \begin{cases} 3 & \text{if $k$ is odd,} \\ 4 & \text{if $k$ is even.} \end{cases}\] \end{lemma} \begin{proof} If $k$ is even, then $\diam(P_2\square C_{2k}) = 1+k$ is odd so by Lemma \ref{lem:pmcevendiamodd} $\aw(P_2\square C_{2k}) = 4$. Now assume $k$ is odd and let $c$ be an exact $3$-coloring of $P_2 \square C_{2k}$. For the sake of contradiction, assume $c$ is rainbow-free. By Lemma \ref{|c(Hi)|<3}, $|c(V(H_1))|,|c(V(H_2))| \leq 2$. Without loss of generality, suppose $c(V(H_1)) = \{red,blue\}$, and $green \in c(V(H_2))$ with $c(v_{2,1}) = green$. Now, define $P_\ell$ as a shortest path in $H_1$ containing $v_{1,1}$ that contains colors $red$ and $blue$, and let $\rho$ be the isometric subgraph of $C_{2k+1}$ that corresponds to $P_\ell$. Note that $P_2\square \rho$ is an isometric subgraph in $P_2\square C_{2k}$ that contains three colors. If $\ell$ is even, then Theorem \ref{PmxPn} gives a rainbow $3$-AP, a contradiction. If $\ell$ is odd, then extending $P_\ell$ by one vertex in either direction maintains isometry. In other words, there is an isometric path $P_{\ell+1}$ in $H_1$ that contains $v_{1,j}$ and the colors $red$ and $blue$. Thus, $P\square P_{\ell+1}$ is an isometric subgraph of $P_2\square C_{2k}$ that contains three colors which means it has a rainbow $3$-AP by Theorem \ref{PmxPn}, a contradiction. Therefore, when $k$ is odd, every exact $3$-coloring of $P_2\square C_{2k}$ has a rainbow $3$-AP and $\aw(P_2 \square C_{2k},3) = 3$.\qed\end{proof} Before getting to more general results an analysis of $\aw(P_3\square C_n)$ needs to happen. Similar to the $\aw(P_2\square C_n)$ situation, there are very subtle and important differences when $n$ is odd versus when $n$ is even. \begin{lemma}\label{lem:p3ceven} For any integer $k$ with $2\le k$, \[\aw(P_3\square C_{2k},3) = \begin{cases} 3 & \text{if $k$ is even,} \\ 4 & \text{if $k$ is odd.} \end{cases}\] \end{lemma} \begin{proof} If $k$ is odd, then $\diam(P_3\square C_{2k}) = 2+k$ is odd so by Lemma \ref{lem:pmcevendiamodd} $\aw(P_2\square C_{2k}) = 4$. Suppose $k$ is even and $c$ is an exact, rainbow-free $3$-coloring of $P_3\square C_{2k}$. Then an argument similar to the argument in the proof of Lemma \ref{lem:pmcodd} can be used to establish, without loss of generality, that $c(V(H_1)) = \{red,blue\}$, $c(V(H_2)) = \{red\}$, $c(V(H_3)) = \{red,green\}$, $c(v_{1,1}) = blue$ and $c(v_{3,j}) = green$ for some $1\le j \le k+1$. If $j$ is odd, then $\{v_{1,1},v_{2,\frac{j+1}{2}},v_{3,j}\}$ is a rainbow $3$-AP, contradicting that $P_3\square C_{2k}$ is rainbow free. So, suppose $j$ is even. Then $j+1\leq k+1$ implying that the path $P_{j+1} = (w_1,\ldots,w_{j+1})$ is an isometric subgraph of $C_{2k}$. So, $P_3\square P_{j+1}$ is an isometric subgraph of $P_3\square C_{2k}$. Since $c(P_3\square P_{j+1}) = \{red,blue,green\}$, Theorem \ref{PmxPn} implies that $P_3\square C_{2k}$ contains a rainbow $3$-AP.\qed\end{proof} \begin{lemma}\label{lem:pmc4x+2} If $m \geq 2$ is even and $k\geq 1$, then \[\aw(P_m\square C_{4k+2},3) = 3.\] \end{lemma} \begin{proof} Lemma \ref{lem:p2ceven} implies $\aw(P_2 \square C_{4k+2},3) = 3$. Suppose $\aw(P_\ell \square C_{4k+2}, 3) = 3$ for some even $\ell \geq 2$. Then, let $c$ be an exact 3-coloring of $P_{\ell+2} \square C_{4k+2}$ that avoids rainbow $3$-APs, and let $H_i$ denote the $i$th copy of $C_{4k+2}$. By hypothesis, \[\left|c\left(\bigcup_{i=1}^{\ell}V(H_i)\right)\right| \leq 2 \quad \text{and} \quad \left|c\left(\bigcup_{i=3}^{\ell+2}V(H_i)\right)\right| \leq 2.\] By the inclusion-exclusion principle, $\left|c\left(\bigcup_{i=3}^{\ell}V(H_i)\right)\right| = 1$. Without loss of generality, suppose $c\left(\bigcup_{i=3}^{\ell}V(H_i)\right) = \{red\}$, so that Proposition \ref{prop:everycopy} implies $red\in c(H_i)$ for $1\le i \le \ell+2$. Further, without loss of generality, suppose $blue \in c(V(H_1)\cup V(H_2))$ and $green \in c(V(H_{\ell+1})\cup V(H_{\ell+2}))$. Say, $c(v_{i,1}) = blue$ and $c(v_{h,j}) = green$ for $i \in \{1,2\}$, $h \in \{\ell+1,\ell+2\}$ and $1 \leq j \leq 2k+1$ such that $i$ is maximal and $h$ is minimal. If $i=2$ and $h=3$, then $|c(H_2)\cup c(H_3)| \ge 3$ which contradictions Lemma \ref{cor:neighborcopies}. So assume $h-i \ge 2$. Thus, $c(V(H_{i+1})) = \{red\}$ and $c(V(H_{h-1})) = \{red\}$. \begin{description} \item[Case 1.] Suppose $\dist(v_{i,1},v_{h,j})$ is even. Then either $\dist_{P_{\ell+2}}(u_i,u_h)=h-i$ and $\dist_{C_{4x+2}}(w_1,w_j)=j-1$ are both odd or both even. If they are both even, then $\{v_{i,1}, v_{\frac{i+h}{2},\frac{j+1}{2}}, v_{h,j}\}$ is a rainbow $3$-AP. If they are both odd, then $\{v_{i,1}, v_{\frac{i+h+1}{2},\frac{j}{2}},v_{h,j}\}$ is a rainbow $3$-AP. \item[Case 2.] Suppose $\dist(v_{i,1},v_{h,j})$ is odd. If $j < 2k+2$, then $\{v_{h,j},v_{i,1},v_{h-1,j+1}\}$ is a rainbow $3$-AP. So, suppose $j=2k+2$. Then $\dist_{C_{4k+2}}(w_1,w_j)=2k+1$ is odd implying that $\dist_{P_{\ell+2}}(u_i,u_h)$ is even. Thus, either $i = 1$ and $h = \ell+1$, or $i = 2$ and $h = \ell + 2$. First, suppose $i = 1$ and $h = \ell + 1$. Then the $3$-AP $\{v_{\ell+1,j},v_{1,1},v_{\ell+2,j+1}\}$ implies $c(v_{\ell+2,j+1}) = green$. Since $i$ is maximal, $c(V(H_2)) = \{red\}$. Thus, $\{v_{1,1}, v_{\ell+2,j+1}, v_{2,2}\}$ is a rainbow $3$-AP since $j+1 = 2k+3$. For $i=2$ and $j=\ell+2$, the $3$-APs $\{v_{2,1},v_{\ell+2,j},v_{1,2}\}$ and $\{v_{\ell+2,j},v_{1,2},v_{\ell+1,j+1}\}$ yield a rainbow $3$-AP. \end{description} Thus, $\aw(P_{\ell+2} \square C_{4k+2},3)=3$ and by induction, $\aw(P_{m} \square C_{4k+2},3)=3$ for any even $m\geq2$.\qed\end{proof} Replacing $4k+2$ with $4k$ and $2k+2$ with $2k+1$ gives the proof of Lemma \ref{lem:pmc4x}, thus the proof has been omitted. \begin{lemma}\label{lem:pmc4x} If $m \geq 3$ is odd and $k\geq 1$, then \[\aw(P_m\square C_{4k},3) = 3.\] \end{lemma} Lemmas \ref{lem:pmcodd}, \ref{lem:pmcevendiamodd}, \ref{lem:pmc4x+2}, and \ref{lem:pmc4x} yield the following theorem. \begin{theorem}\label{thm:pmcn} If $m\geq2, n\geq3$ then \[\aw(P_m\square C_n,3)= \begin{cases} 4 & \text{if $n$ is even and $\diam(P_m\square C_n)$ is odd,} \\ 3 & \text{otherwise.} \end{cases} \] \end{theorem} \section{Graph Products of Cycles with Other Graphs}\label{sec:cmcn} This section starts with a general result, Theorem \ref{CnxG}, and then uses the general result to establish $\aw(C_m\square C_n,3)$. \begin{theorem}\label{CnxG} For any integer $k$ with $1\le k$, $\aw(G \square C_{2k+1}, 3) = 3$ for any connected graph $G$ with $|G|\geq 2$. \end{theorem} \begin{proof} Let $V(G) = \{u_1,\ldots,u_n\}$ and $H_i$ denote the $i$th labeled copy of $C_{2k+1}$. Lemma \ref{lem:p2codd} implies that $\aw(P_2\square C_{2k+1},3)=3$, so suppose $|G|\ge3$. Let $c:V(G \square C_{2k+1}) \to \{red,blue,green\}$ be an exact $3$-coloring, and, for the sake of contradiction, assume $c$ is rainbow-free. Since $|G|\geq 3$, Proposition \ref{prop:everycopy} implies that, without loss of generality, $red$ is in every copy of $C_{2k+1}$. So, define $c': V(G) \to \{red,blue,green\}$ by \[c'(u_i) = \begin{cases} red & \text{if $c(V(H_i)) = \{red\}$,} \\ \C & \text{if $\C \in c(V(H_i))\setminus\{red\}$.} \end{cases}\] Since Lemma \ref{|c(Hi)|<3} implies that $|c(V(H_i))| \leq 2$ for all $1\leq i\leq n$, it follows that $c'$ is well-defined. By Lemma \ref{isometricpathorC3}, there either exists a $C_3$ in $G$ containing $red$, $blue$, and $green$ or an isometric path in $G$ containing $red$, $blue$, and $green$. \par First, suppose $C_3 \cong G[\{u_{i_1},u_{i_2},u_{i_3}\}]$ contains $red$, $blue$, and $green$. Then, without loss of generality, there exists neighboring copies $H_{i_1}$ and $H_{i_2}$ of $H$, in $G\square C_{2k+1}$, such that $c(V(H_{i_1})) = \{red,blue\}$ and $c(V(H_{i_2})) = \{red,green\}$, contradicting Corollary \ref{cor:neighborcopies}. \par Finally, suppose there exists an isometric path $P$ in $G$ such that $c'(V(P)) = \{red,blue,\\green\}$. Now, by Lemma \ref{lem:pmcodd}, there exists a rainbow $3$-AP in the isometric subgraph $P \square C_{2k+1}$, a contradiction.\qed\end{proof} Just as Lemma \ref{lem:pmcodd} was generalized into Theorem \ref{CnxG} which showed that \[\aw(G \square C_{2k+1},3)=3\] for all connected $G$ with at least $2$ vertices, significant time was spent on the conjecture that a similar generalization could be performed to show $\aw(G \square C_{4k+2},3)=3$ when $\diam(G)$ is odd and $\aw(G \square C_{4k},3)=3$ when $\diam(G)$ is even. However, these conjectures do not hold because it cannot be guaranteed that an isometric $P_{2j} \square C_{4k+2}$ subgraph of $G \square C_{4k+2}$ or $P_{2j+1} \square C_{4k}$ subgraph of $G \square C_{4k}$ existed that contained three colors. The following example provides such a $G$. \begin{example}\label{ex:counterex} Consider the graph in Figure \ref{fig:pncevencex} which is $G \square C_4$, where $G$ is a $C_{10}$ with a leaf. That is $V(G) = \{w_1,\ldots,w_{11}\}$ with edges $w_iw_{i+1}$ for $1\le i \le 9$ and the additional edges $w_1w_{10}$ and $w_{10}w_{11}$. Define $c:V(G\square C_4) \to \{red,blue,green\}$ by $c(v_{2,1}) = blue$, $c(v_{7,3}) = green$, and $c(v) = red$ for all $v \in V(G \square C_4)\setminus\{v_{2,1},v_{7,3}\}$. In order for $G \square C_4$ to contain a rainbow $3$-AP, there must exist a red $v \in V(G \square C_4)$ such that \[\dist(v_{2,1},v) = \dist(v,v_{7,3}), \quad \dist(v,v_{2,1}) = \dist(v_{2,1},v_{7,3}), \quad \text{or} \quad \dist(v,v_{7,3}) = \dist(v_{7,3},v_{2,1}).\] By construction, every vertex $v$ of $G \square C_4$ is such that $\dist(v,v_{2,1})$ and $\dist(v,v_{7,3})$ have different parity, thus $\dist(v_{2,1},v) \neq \dist(v,v_{7,3})$ for all $v \in V(G)$. To show that there are no vertices $v$ of $G$ distinct from $v_{2,1},v_{7,3}$ such that $\dist(v,v_{2,1}) = \dist(v_{2,1},v_{7,3})$ or $\dist(v,v_{7,3}) = \dist(v_{7,3},v_{2,1})$, a discussion about \emph{eccentricity} is needed. For a vertex $v$ of a graph $G$, the \textit{eccentricity} of $v$, denoted $\epsilon(v)$, is the distance between $v$ and a vertex furthest from $v$ in $G$. In other words, \[\epsilon(v) = \max_{u\in V(G)} \dist(u,v).\] In this example, $\epsilon(v_{2,1}) = \epsilon(v_{7,3}) = \dist(v_{2,1},v_{7,3}) = 7$ and both eccentricities are unique-ly realized. So, there are no non-degenerate $3$-APs in $G \square C_4$ containing $v_{2,1}$ and $v_{7,3}$. Thus, $\aw(G \square C_4,3)=4$. \begin{figure}[ht!] \centering \def \scale {.5} \begin{tikzpicture} \def \n {10} \def \radius {5*\scale} \def \radiuss {7*\scale} \def \radiusss {6.1*\scale} \foreach \s in {1,...,\n} { \node[draw, circle, fill = red, opacity = .6, inner sep = 3.5*(\scale)^.5] (1\s) at (({360/\n * (\s-1)) + 90}:\radius) {}; \node[draw, circle, fill = red, opacity = .6, inner sep = 3.5*(\scale)^.5] (3\s) at (({360/\n * (\s-1)) + 90}:\radiuss) {}; \node[draw, circle, fill = red, opacity = .6, inner sep = 3.5*(\scale)^.5] (2\s) at (({360/\n * (\s-1)) + 99.25}:\radiusss) {}; \node[draw, circle, fill = red, opacity = .6, inner sep = 3.5*(\scale)^.5] (4\s) at (({360/\n * (\s-1)) + 80.75}:\radiusss) {}; } \node[draw, circle, fill = red, opacity = .6, inner sep = 3.5*(\scale)^.5] (111) at (-.7*\scale,3.5*\scale) {}; \node[draw, circle, fill = red, opacity = .6, inner sep = 3.5*(\scale)^.5] (211) at (-1.7*\scale,2.5*\scale) {}; \node[draw, circle, fill = red, opacity = .6, inner sep = 3.5*(\scale)^.5] (311) at (-.7*\scale,1.5*\scale) {}; \node[draw, circle, fill = red, opacity = .6, inner sep = 3.5*(\scale)^.5] (411) at (.3*\scale,2.5*\scale) {}; \foreach \s in {1,...,11} { \draw (1\s) to (2\s); \draw (2\s) to (3\s); \draw (3\s) to (4\s); \draw (4\s) to (1\s); } \foreach \r in {1,...,4} { \draw (\r1) to (\r2); \draw (\r2) to (\r3); \draw (\r3) to (\r4); \draw (\r4) to (\r5); \draw (\r5) to (\r6); \draw (\r6) to (\r7); \draw (\r7) to (\r8); \draw (\r8) to (\r9); \draw (\r9) to (\r10); \draw (\r10) to (\r1); \draw (\r1) to (\r11); } \node[draw, circle, fill = white, opacity = 1, inner sep = 3.5*(\scale)^.5] () at (({360/10 * (2)) + 90}:\radiuss) {}; \node[draw, circle, fill = white, opacity = 1, inner sep = 3.5*(\scale)^.5] () at (({360/10 * (7)) + 90}:\radius) {}; \node[draw, circle, fill = blue, opacity = .5, inner sep = 3.5*(\scale)^.5] () at (({360/10 * (2)) + 90}:\radiuss) {}; \node[draw, circle, fill = green, opacity = .7, inner sep = 3.5*(\scale)^.5] () at (({360/10 * (7)) + 90}:\radius) {}; \node at (({360/10 * (2)) + 91.5}:8*\scale) {$v_{2,1}$}; \node at (({360/10 * (7)) + 88.5}:4*\scale) {$v_{7,3}$}; \end{tikzpicture} \caption{Image for Example \ref{ex:counterex}: Graph $G\square C_4$, counterexample of generalizing Lemma \ref{lem:pmc4x}.} \label{fig:pncevencex} \end{figure} \end{example} Note that the graph in Figure \ref{fig:pncevencex} is the only example presented in this paper of a graph product with even diameter and anti-van der Waerden number (with respect to $3$) equal to $4$. This is discussed more in Section \ref{sec:future}. Theorem \ref{CnxG} gives the following result. \begin{corollary}\label{cor:coddcodd} If $m$ or $n$ is odd with $m,n \geq 3$, then $\aw(C_m\square C_n,3) = 3$. \end{corollary} Lemmas \ref{lem:pmc4x+2} and \ref{lem:pmc4x} are used to prove Lemma \ref{lem:cevencevenaw3}. \begin{lemma}\label{lem:cevencevenaw3} If $m$ and $n$ are even with $m\equiv n \Mod{4}$, then $\aw(C_m\square C_n,3) = 3$. \end{lemma} \begin{proof} Let $c$ be an exact $3$-coloring of $C_m\square C_n$. Lemma \ref{isometricpathorC3} implies that $C_m\square C_n$ either contains an isometric path or a $C_3$ with three colors. Since there are no $C_3$ subgraphs in $C_m\square C_n$, it follows that $C_m\square C_n$ must contain an isometric path with three colors. Call a shortest such path $P$. Suppose $P$ intersects $k$ copies of $C_n$, and, without loss of generality, suppose these copies are $H_1,\ldots,H_k$. \par Notice that there are vertices $v$ and $v'$ of $P$ in $V(H_1)$ and $V(H_k)$, respectively. If $k > \frac{m}{2} + 1$, then any shortest path from $v$ to $v'$ would be contained in the subgraph induced by the vertices of $H_k,H_{k+1},\ldots,H_n,H_1$. So, no shortest path between $v$ and $v'$ would be contained in $P$, implying that $P$ is not isometric, a contradiction. \par Thus, $k \leq \frac{m}{2} + 1$, and $P$ is a subgraph of $P_{\frac{m}{2}+1}\square C_n$ where $P_{\frac{m}{2}+1}$ is the subgraph of $C_m$ induced by $\{u_1,\ldots,u_{\frac{m}{2}+1}\}$. Thus, $P$ is an isometric subgraph of $C_m\square C_n$ because $P_{\frac{m}{2} + 1}$ is isometric in $C_m$. Since there are three colors in $P$, there are three colors in $P_{\frac{m}{2} + 1} \square C_n$. Furthermore, since $m\equiv n \Mod{4}$, $\frac{m}{2}$ and $\frac{n}{2}+1$ have different parity. So, Lemma \ref{lem:pmc4x+2} or Lemma \ref{lem:pmc4x} implies that $P_{\frac{m}{2} + 1} \square C_n$ contains a rainbow $3$-AP. Thus, $C_m\square C_n$ contains a rainbow $3$-AP.\qed\end{proof} In the proof of Lemma \ref{lem:cevencevenaw4}, the fact that each vertex in an even cycle realizes the diameter with exactly one other vertex will be used. \begin{lemma}\label{lem:cevencevenaw4} If $m$ and $n$ are even with $m\not\equiv n \Mod{4}$, then $\aw(C_m\square C_n,3) = 4$. \end{lemma} \begin{proof} Define $k=\frac{m}{2}+1$ and $\ell=\frac{n}{2}+1$ and the coloring $c: V(C_m\square C_n)\to \{red,blue,\\green\}$ by \[c(v_{i,j}) = \begin{cases} blue & \text{if $i=j=1$,} \\ green & \text{if $i=k, j=\ell$,} \\ red & \text{otherwise.} \end{cases}\] Since $v_{1,1}$ and $v_{k,\ell}$ are the only $blue$ and $green$ vertices, any rainbow $3$-AP must contain them. This result will be proved by showing $v_{1,1}$ and $v_{k,\ell}$ are not part of any nondegenerate $3$-AP. For the sake of contradiction, assume there exists $v_{i,j}\in V(C_m\square C_n)$ such that $\{v_{1,1},v_{i,j},v_{k,\ell}\}$ is a nondegenerate $3$-AP. One way this can happen is if $\dist(v_{1,1},v_{i,j}) = \dist(v_{i,j},v_{k,\ell})$. Without loss of generality, up to a relabelling of the vertices, suppose $1 \leq i \leq k$ and $1 \leq j \leq \ell$. Then \[\dist(v_{1,1},v_{i,j}) = (i-1) + (j-1) = i+j-2,\] and \[\dist(v_{i,j},v_{k,\ell}) = (k-i) + (\ell-j) = k+\ell-i-j.\] By assumption, $i+j-2 = k+\ell-i-j$, which implies that \begin{equation}\label{eq1} 2i + 2j - 2 = k+\ell = \frac{m}{2} + \frac{n}{2}. \end{equation} However, $m\not\equiv n \Mod{4}$ implies $\frac{m}{2} + \frac{n}{2}$ is odd, which contradicts equation (\ref{eq1}). The only other possible way that $\{v_{1,1},v_{i,j}, v_{k,\ell}\}$ is a $3$-AP is if $\dist(v_{i,j}, v_{1,1}) = \dist(v_{1,1},v_{k,\ell})$ or $\dist(v_{i,j}, v_{k,\ell}) = \dist(v_{k,\ell},v_{1,1})$. However, \[\epsilon(v_{1,1})=\epsilon(v_{k,\ell})=\diam(C_m\square C_n)\] is uniquely realized. This implies $v_{i,j}\in \{v_{1,1},v_{k,\ell}\}$ yielding a degenerate $3$-AP. Thus, the exact $3$-coloring $c$ of $C_m\square C_n$ is rainbow free so $4\le \aw(C_m\square C_n,3)$. Theorem \ref{thm:rsw} gives an upper bound of $4$ which implies $\aw(C_m\square C_n,3) = 4$. \qed\end{proof} Conglomerating Corollary \ref{cor:coddcodd}, Lemma \ref{lem:cevencevenaw3} and Lemma \ref{lem:cevencevenaw4} yields Theorem \ref{thm:cmcn}. \begin{theorem}\label{thm:cmcn} If $m,n \geq 3$, then \[\aw(C_m\square C_n,3) = \begin{cases} 4 & \text{if $m$ and $n$ are even and $\diam(C_m\square C_n)$ is odd,} \\ 3 & \text{otherwise.} \end{cases} \] \end{theorem} \section{Future Work}\label{sec:future} Recall that Example \ref{ex:counterex} was the only example presented in this paper of a graph product with even diameter and anti-van der Waerden number (with respect to $3$) equal to $4$. One of the key factors in allowing this to happen was a pair of vertices $u$ and $v$ such that $\epsilon(u)=\epsilon(v)=\dist(u,v)<\diam(u,v)$. Such vertices will be called \textit{almost peripheral vertices} whose name comes from \textit{peripheral vertices} which are vertices who realize the diameter. \begin{conjecture}\label{conj:5.1} If $G \square H$ has no almost peripheral vertices and $\diam(G\square H)$ is even, then $\aw(G\square H,3)=3$. \end{conjecture} In particular, the authors believe that trees do not contain any almost peripheral vertices. For this reason, it is believed that Conjecture \ref{conj:5.2} holds if Conjecture \ref{conj:5.1} holds. \begin{conjecture}\label{conj:5.2} If $T$ is a tree, $n$ is even, and $\diam(T\square C_n)$ is even, then $\aw(T\square C_n,3)=3$. \end{conjecture} This result would provide a more specific case of when the even cycle analog of Theorem \ref{CnxG} holds. Another way to extend Theorem \ref{CnxG} would be considering $\aw(G \square C_n,k)$ for some $k>3$. For, $k=3$, Theorem \ref{CnxG} showed that when $n$ is odd, $\aw(G \square C_n,k)=k$ for any connected $G$ of order at least $2$. However, there may be other properties of $n$ that guarantee $\aw(G \square C_n,k)=k$ for $k > 3$. Some preliminary work analyzing $\aw(P_m\square C_n,4)$ suggests that for any $n$, there exists an $m$ such that $\aw(P_m\square C_n,4)\geq 5$. \begin{acknowledgements} Thanks to Ethan Manhart, Hunter Rehm and Laura Zinnel for providing feedback and offering ideas during this project. \end{acknowledgements} \begin{thebibliography}{20} \bibitem{AF} M. Axenovich and D. Fon-Der-Flaass, On rainbow arithmetic progressions, \emph{Electron. J. Combin.} {\bf 11} (2004), no. 1, Research Paper 1, 7pp. \bibitem{AM} M. Axenovich and R.R. Martin. Sub-Ramsey numbers for arithmetic progressions. \emph{Graphs and Combinatorics,} {\bf 22} (2006), no. 1, 297--309. \bibitem{SWY} Z. Berikkyzy, A. Schulte, E. Sprangel, S. Walker, N. Warnberg and M. Young, Anti-van der Waerden numbers on Graphs, Accepted to \emph{Graphs and Combinatorics}, \url{https://arxiv.org/pdf/1802.01509.pdf}. \bibitem{BSY} Z. Berikkyzy, A. Schulte, and M. Young, Anti-van der Waerden numbers of 3-term arithmetic progressions, \emph{Electron. J. Combin. } {\bf 24} (2017), no. 2, Paper 2.39, 9 pp. \bibitem{BKKTTY} E. Bevilacqua, A. King, J. Kritschgau, M. Tait, S. Tebon and M. Young, Rainbow numbers for $x_1 + x_2 = kx_3$ in $\mathbb{Z}_n$, \emph{Integers} {\bf 20} (2020), A50. \bibitem{DMS} S. Butler, C. Erickson, L. Hogben, K. Hogenson, L. Kramer, R.L. Kramer, J. Lin, R.R. Martin, D. Stolee, N. Warnberg and M. Young, Rainbow Arithmetic Progressions, \emph{J. Comb.} {\bf 7} (2016), no. 4, 595--626. \bibitem{ESS} P. Erd{\H{o}}s, M. Simonovits, and V. S{\'{o}}s Anti-Ramsey theorems. \emph{Infinite and finite sets (Colloq., Keszthely, 1973; dedicated to P. Erd{\H{o}}s on his 60th birthday)} {\bf{II}} (1973), 633–643. \bibitem{FGRWW} K. Fallon, C. Giles, H. Rehm, S. Wagner and N. Warnberg, Rainbow numbers of $[n]$ for $\sum_{i=1}^{k-1} x_i = x_k$, \emph{Austral. J. Combin.} {\bf 77(1)} (2020), 1--8. \bibitem{FMO} S. Fujita, C. Magnant, and K. Ozeki, Rainbow generalizations of Ramsey theory: A dynamic survey, \emph{Theory Appl. Graphs,} {\bf 0} (2014), no. 1, Article 1. \bibitem{RFC} M. Huicochea and A. Montejano, The Structure of Rainbow-Free Colorings For Linear Equations on Three Variables in $\mathbb{Z}_p$, \emph{Integers} {\bf 15A} (2015), A8. \bibitem{J} V. Jungi\'c, J. Licht (Fox), M. Mahdian, J. Ne\u{s}etril, and R. Radoi\u{c}i\'c, Rainbow arithmetic progressions and anti-Ramsey results, \emph{Combinatorics, Probability and Computing} {\bf 12} (2003), no 5-6, 599--620. \bibitem{LM} B. Llano and A. Montejano, Rainbow-free Colorings of $x+y = cz$ in $\mathbb{Z}_p$, \emph{Discrete Math.} {\bf 312}, (2012), 2566--2573. \bibitem{R} F.P. Ramsey, On a Problem of Formal Logic, \emph{Proc. London Math. Soc.} {\bf 30} (1928), 264 - 286. \bibitem{RSW} H. Rehm, A. Schulte and N. Warnberg, Anti-van der Waerden numbers on Graph Products, \emph{Austral. J. Combin.} {\bf 73(3)} (2019), 486--500. \bibitem{S} I. Schur, \"{U}ber Potenzreihen die im Innern des Einheitskreises beschr\"{a}nkt sind. J. Reine Angew. Math (1917), 205-232. \bibitem{U} K. Uherka. An introduction to Ramsey theory and anti-Ramsey theory on the integers. Master's Creative Component (2013), Iowa State University. \bibitem{W27} B. van der Waerden, Beweis einer baudetschen vermutung. {\emph{Nieuw Arch. Wisk.}} {\bf{19}} (1927), 212–216. \bibitem{finabgroup} M. Young. Rainbow Arithmetic Progressions in Finite Abelian Groups, \emph{J. Comb.} {\bf 9(4)} (2018), 619--629. \end{thebibliography} \end{document}
2205.11619v1
http://arxiv.org/abs/2205.11619v1
Two-weighted estimates of the multilinear fractional integral operator between weighted Lebesgue and Lipschitz spaces with optimal parameters
\documentclass[11pt]{amsart} \usepackage{amsfonts, amssymb, amsmath, amsthm, color, float,enumerate} \usepackage[unicode,psdextra]{hyperref} \usepackage{bookmark} \usepackage{url} \usepackage{pgfplots,tikz} \usetikzlibrary{arrows} \setlength{\topmargin}{0pt} \setlength{\headheight}{12pt} \setlength{\headsep}{12pt} \setlength{\textheight}{297mm} \setlength{\footskip}{40pt} \addtolength{\textheight}{-2in} \addtolength{\textheight}{-\footskip} \setlength{\textwidth}{210mm} \addtolength{\textwidth}{-2in} \setlength{\oddsidemargin}{5mm} \setlength{\evensidemargin}{5mm} \setlength{\marginparwidth}{0pt} \setlength{\marginparsep}{0pt} \theoremstyle{plain} \newtheorem{teo}{Theorem} \newtheorem{coro}[teo]{Corollary} \newtheorem{lema}[teo]{Lemma} \newtheorem{propo}[teo]{Proposition} \newtheorem{result}{Result} \theoremstyle{definition} \newtheorem{defi}{Definition} \theoremstyle{remark} \newtheorem{obs}{Remark} \newtheorem{example}{Example} \newtheorem{afirmacion}{Claim} \numberwithin{equation}{section} \numberwithin{teo}{section} \allowdisplaybreaks \definecolor{zzttqq}{rgb}{0.6,0.2,0.} \definecolor{qqzzqq}{rgb}{0.,0.6,0.} \definecolor{aquamarine}{rgb}{0.5, 1.0, 0.83} \definecolor{blizzardblue}{rgb}{0.67, 0.9, 0.93} \definecolor{blush}{rgb}{0.87, 0.36, 0.51} \definecolor{celestialblue}{rgb}{0.29, 0.59, 0.82} \definecolor{chocolate(web)}{rgb}{0.82, 0.41, 0.12} \renewcommand{\baselinestretch}{1.3} \hypersetup{ colorlinks = true, linkcolor = celestialblue, anchorcolor = blue, citecolor = blush, filecolor = blue, urlcolor = chocolate(web) } \begin{document} \title[Two-weighted estimates of $I_{\gamma,m}$]{Two-weighted estimates of the multilinear fractional integral operator between weighted Lebesgue and Lipschitz spaces with optimal parameters} \author[F. Berra]{Fabio Berra} \address{CONICET and Departamento de Matem\'{a}tica (FIQ-UNL), Santa Fe, Argentina.} \email{[email protected]} \author[G. Pradolini]{Gladis Pradolini} \address{CONICET and Departamento de Matem\'{a}tica (FIQ-UNL), Santa Fe, Argentina.} \email{[email protected]} \author[W. Ramos]{Wilfredo Ramos} \address{CONICET and Departamento de Matem\'{a}tica (FaCENA-UNNE), Corrientes, Argentina.} \email{[email protected]} \thanks{The author were supported by CONICET, UNL and UNNE.} \subjclass[2010]{26A33, 42B25} \keywords{Multilinear fractional operator, Lipschitz spaces, weights} \begin{abstract} Given an $m$-tuple of weights $\vec{v}=(v_1,\dots,v_m)$, we characterize the classes of pairs $(w,\vec{v})$ involved with the boundedness properties of the multilinear fractional integral operator from $\prod_{i=1}^mL^{p_i}\left(v_i^{p_i}\right)$ into suitable Lipschitz spaces associated to a parameter $\delta$, $\mathcal{L}_w(\delta)$. Our results generalize some previous estimates not only for the linear case but also for the unweighted problem in the multilinear context. We emphasize the study related to the range of the parameters involved with the problem described above, which is optimal in the sense that they become trivial outside of the region obtained. We also exhibit nontrivial examples of pairs of weights in this region. \end{abstract} \maketitle \section*{} \medskip \medskip \vspace*{-1.5cm} \hrule \vspace*{0.1cm} \hrule \vspace*{0.5cm} \begin{center} \textit{This article is dedicated to Professor Eleonor ``Pola'' Harboure, beloved colleague whose vast knowledge and human kindness have always been a guidance to us.} \end{center} \vspace*{0.5cm}\hrule\vspace*{0.1cm}\hrule\vspace*{0.75cm} \medskip \medskip \section{Introduction}\label{seccion: introduccion} In 1972 B. Muckenhoupt characterized the nonnegative functions $w$ for which the classical Hardy-Littlewood maximal operator $M$ is bounded in $L^p(w)$, for $1<p<\infty$ (see \cite{Muck72}). More precisely, the author proved that $M:L^p(w)\hookrightarrow L^p(w)$ if and only if $w\in A_p$, that is $w$ satisfies the inequality \[\left(\frac{1}{|Q|}\int_Q w\right)\left(\frac{1}{|Q|}\int_Q w^{1-p'}\right)^{p-1}\leq C\] for every cube $Q$. These classes became very important for many estimates in Harmonic Analysis and were further studied by many authors. Later on, in \cite{Muckenhoupt-Wheeden74}, B. Muckenhoupt and R. Wheeden introduced a variant of these sets of functions, the $A_{p,q}$ classes, given by the collection of weights $w$ such that \[\left(\frac{1}{|Q|}\int_Q w^q\right)^{1/q}\left(\frac{1}{|Q|}\int_Q w^{-p'}\right)^{1/p'}\leq C,\] for every cube $Q$, where $1<p,q<\infty$. These classes played an important role on the boundedness properties of the fractional maximal operator $M_\gamma$, $0<\gamma<n$ and the fractional integral operator $I_\gamma$ given by the expression \[I_\gamma f(x)=\int_{\mathbb{R}^n}\frac{f(y)}{|x-y|^{n-\gamma}}\,dy,\] whenever the integral is finite. It was proved in \cite{Muckenhoupt-Wheeden74} that if $1<p<n/\gamma$ and $1/q=1/p-\gamma/n$, then this operator maps $L^p(w^p)$ into $L^q(w^q)$ if and only if $w\in A_{p,q}$. For the endpoint case $p=n/\gamma$ it was also shown that the operator $I_\gamma$ maps $L^{n/\gamma}(w^{n/\gamma})$ into a weighted version of the bounded mean oscillation spaces $\mathrm{BMO}$ if and only if $w^{-n/(n-\gamma)}\in A_1$. Although the $A_{p,q}$ classes above are a variant of $A_p$, they are intimately related with them. It is well-known that $w\in A_{p,q}$ is equivalent either to $w^q\in A_{1+q/p'}$ or $w^{-p'}\in A_{1+p'/q}$, (see \cite{Muckenhoupt-Wheeden74}). Later on, in \cite{Pradolini01} the author proved that for $n/\gamma\leq p<n/(\gamma-1)^+$ and $\delta=\gamma-n/p$ the operator $I_\gamma$ maps $L^p(w^p)$ into suitable weighted Lipschitz spaces related to the parameter $\delta$. These spaces are a generalization of those introduced in \cite{Muckenhoupt-Wheeden74} which correspond to $\delta=0$. A two-weighted problem it was also studied, giving the optimal parameters for which the associated classes of weights are nontrivial. In \cite{HSV} E. Harboure, O. Salinas and B. Viviani introduced a newfangle class of weighted Lipschitz spaces wider than those considered in \cite{Pradolini01}. Concretely, they defined the class $\mathcal{L}_w(\delta)$ as the collection of locally integrable functions $f$ such that \begin{equation}\label{eq: definicion clase Lipschitz w} \sup_{B\subset \mathbb{R}^n}\frac{1}{w^{-1}(B)|B|^{\delta/n}}\int_B|f(x)-f_B|\,dx<\infty. \end{equation} They characterized the weights involved with the continuity properties of $I_{\gamma}$ acting between $L^{p}(w)$ into $\mathcal{L}_w(\delta)$ for $1<p<n/(\gamma-1)^+$ and $\delta=\gamma-n/p$. The class of weights turned out wider than the corresponding class considered in \cite{Pradolini01}, being the same under certain additional assumptions on the weight. Inspired in that work, a two-weighted problem was also studied in \cite{Prado01cal}. Given $m\in\mathbb{N}$ and $0<\gamma<mn$ the multilinear fractional integral operator of order $m$, $I_{\gamma,m}$, is defined as follows \[I_{\gamma,m} \vec{f}(x)=\int_{(\mathbb{R}^n)^m} \frac{\prod_{i=1}^m f_i(y_i)}{(\sum_{i=1}^m|x-y_i|)^{mn-\gamma}}\,d\vec{y},\] where $\vec{f}=(f_1,f_2,\dots, f_m)$ and $\vec{y}=(y_1,y_2,\dots, y_m)$, provided the integral is finite. The continuity properties of $I_{\gamma,m}$ were studied for several authors. For example, it was shown in \cite{Moen09} that if $0<\gamma<mn$ then $I_{\gamma,m}: \prod_{i=1}^m L^{p_i}\hookrightarrow L^q$, where $1/p=\sum_{i=1}^m1/p_i$ and $1/q=1/p-\gamma/n$. The author also considered weighted versions of these estimates, generalizing the results of \cite{Muckenhoupt-Wheeden74} to the multilinear context. On the other hand, in \cite{AHIV} unweighted estimates of $I_{\gamma,m}$ between $\prod_{i=1}^m L^{p_i}$ and Lipschitz-$\delta$ spaces were given, with $0\leq \delta<1$ and $\delta=\gamma-n/p$. For other type of estimates involving multilinear version of the fractional integral operator see also \cite{Grafakos92}, \cite{GK01}, \cite{KS99} and \cite{Pradolini10}. Recently in \cite{BPR22} we studied the boundedness of $I_{\gamma,m}$ between $\prod_{i=1}^m L^{p_i}\left(v_i^{p_i}\right)$ into the space $\mathbb{L}_w(\delta)$ defined by the collection of locally integrable functions $f$ such that \begin{equation}\label{eq: definicion clase Lipschitz norma inf} \sup_{B\subset \mathbb{R}^n}\frac{\|w\mathcal{X}_B\|_\infty}{|B|^{1+\delta/n}}\int_B|f(x)-f_B|\,dx<\infty, \end{equation} characterizing the weights involved as those satisfying the condition $\mathbb{H}_m(\vec{p},\gamma,\delta)$ given by \begin{equation}\label{eq: clase Hbb(p,gamma,delta) - m} \frac{\|w\mathcal{X}_B\|_\infty}{|B|^{(\delta-1)/n}}\prod_{i=1}^m\left(\int_{\mathbb{R}^n} \frac{v_i^{-p_i'}(y)}{(|B|^{1/n}+|x_B-y|)^{(n-\gamma_i+1/m)p_i'}}\,dy\right)^{1/p_i'}\leq C. \end{equation} The purpose of this article is to study the boundedness of the operator $I_{\gamma,m}$ between a product of weighted Lebesgue spaces into the Lipschitz space $\mathcal{L}_w(\delta)$ defined in (\ref{eq: definicion clase Lipschitz w}). Our result generalizes the linear case when $p>n/\gamma$. We do not only consider related weights, which is an adequate extension of the one-weight estimates in the linear case proved in \cite{HSV}, but also with independent weights exhibiting an extension of the corresponding problem given in \cite{Prado01cal} for $m=1$. We characterize the classes of weights for which the problem described above holds. We also show the optimal range of the parameters involved. The optimality is understood in the sense that the parameters describe certain region in which we can find concrete examples of weights belonging to the class, becoming trivial outside of it. The results obtained in this paper not only extend the results in \cite{HSV} and \cite{Prado01cal} but also they generalize the unweighted multilinear results proved in \cite{AHIV} . We shall now introduce the classes of weights and the notation required in order to state our main results. Along the manuscript the multilinear parameter will be denoted by $m\in \mathbb{N}$. Let $0<\gamma<mn$, $\delta\in \mathbb{R}$ and $\vec{p}=(p_1,p_2,\dots, p_m)$ be an $m$-tuple of exponents where $1\le p_i \le \infty$ for $1\le i\le m$. We define $p$ such that $1/p=\sum_{i=1}^{m}1/p_i$. We shall be dealing with a wider class of multilinear weights than those satisfying \eqref{eq: clase Hbb(p,gamma,delta) - m} (see \cite{BPR22}) and defined as follows. Given the weights $w$, $v_1,\dots, v_m$, if $\vec{v}=(v_1,v_2,\dots,v_m)$ we say that a pair $(w,\vec{v})$ belongs to the class $\mathcal{H}_m(\vec{p},\gamma,\delta)$ if there exists a positive constant $C$ such that the inequality \begin{equation} \frac{|B|^{1+(1-\delta)/n}}{w^{-1}(B)}\prod_{i=1}^m\left(\int_{\mathbb{R}^n} \frac{v_i^{-p_i'}(y)}{(|B|^{1/n}+|x_B-y|)^{(n-\gamma_i+1/m)p_i'}}\,dy\right)^{1/p_i'}\leq C \end{equation} holds for every ball $B=B(x_B, R)$, where $x_B$ denotes the center of $B$ and $\sum_{i=1}^m\gamma_i=\gamma$, with $0<\gamma_i<n$ for every $i$. The integral above is understood as usual when $p_i=1$, (see \S~\ref{section: preliminares} for further details). When $m=1$ the class given above was first introduced in \cite{Prado01cal} (for $w=v$ see also \cite{MW-75-76} for the case $\delta=0$ and \cite{HSV} for the one-weight case). In that paper the author showed nontrivial weights when $\delta\leq\min\{1,\gamma-n/p\}$. A similar restriction, as we shall prove, appears in the multilinear context. \begin{obs}\label{obs: Hbb contenida en Hcal} It is easy to check that $\mathbb{H}_m(\vec{p},\gamma,\delta)\subset \mathcal{H}_m(\vec{p},\gamma,\delta)$ and, if $w^{-1}\in A_1$, both classes coincide. The same statement is true for the classes $\mathbb{L}_w(\delta)$ and $\mathcal{L}_w(\delta)$. \end{obs} We are now in a position to state our main results. \begin{teo}\label{teo: teo principal - Hcal} Let $0<\gamma<mn$, $\delta\in\mathbb{R}$, and $\vec{p}$ a vector of exponents that verifies $p>n/\gamma$. Let $(w,\vec{v})$ a pair such that $v_i^{-p_i'}\in \mathrm{RH}_{m}$, for $i\in\mathcal{I}_2=\{1\leq i\leq m: 1<p_i\leq \infty\}$. Then the following statements are equivalent: \begin{enumerate}[\rm(1)] \item \label{item: teo principal - Hcal item 1} The operator $I_{\gamma,m}$ is bounded from $\prod_{i=1}^m L^{p_i}(v_i^{p_i})$ to $\mathcal{L}_{w}(\delta)$; \item \label{item: teo principal - Hcal item 2} The pair $(w,\vec{v})$ belongs to $\mathcal{H}_m(\vec{p},\gamma,\delta)$. \end{enumerate} \end{teo} Observe that a reverse Hölder condition for the weights $v_i$ is required for our theorem to hold. Although this seems to be a restriction, it does trivially hold when we consider $m=1$, as expected. A condition of this type was also required for the class $\mathbb{H}_m(\vec{p},\gamma,\delta)$ in \cite{BPR22}. We also notice that whilst there is no restriction on $\delta$ in the previous theorem, they arise as a consequence of the nature of the corresponding weights. The following theorem establishes the range of parameters involved in the class $\mathcal{H}_m(\vec{p},\gamma,\delta)$ where the weights are trivial, that is, $v_i=\infty$ a.e. for some $i$ or $w=0$ a.e. \begin{teo}\label{teo: no-ejemplos Hcal} Let $0<\gamma<mn$, $\delta\in\mathbb{R}$, and $\vec{p}$ a vector of exponents. The following statements hold: \begin{enumerate}[\rm(a)] \item\label{item: teo no-ejemplos Hcal - item a} If $\delta>1$ or $\delta>\gamma-n/p$ then condition $\mathcal{H}_m(\vec{p},\gamma,\delta)$ is satisfied if and only if $v_i=\infty$ a.e. for some $1\le i\le m$. \item\label{item: teo no-ejemplos Hcal - item b} The same conclusion holds if $\delta=\gamma-n/p=1$. \end{enumerate} \end{teo} In \S~\ref{seccion: ejemplos} we shall exhibit non trivial examples of pairs $(w,\vec{v})$, for which the class $\mathcal{H}_m(\vec{p},\gamma,\delta)$ is non empty, depicting the corresponding regions described by the parameters. By Remark~\ref{obs: Hbb contenida en Hcal} we have that these region include the corresponding ones given in \cite{BPR22}. Regarding the case when $w=\prod_{i=1}^mv_i$, which generalizes the one-weighted problem when $m=1$, we have proved in \cite{BPR22} that condition $\mathbb{H}_m(\vec{p},\gamma,\delta)$ reduces to the multilinear class $A_{\vec{p},\infty}$. This is the natural multilinear extension for the condition $v^{-p'}\in A_1$ on the linear setting. When $(w,\vec{v})\in \mathcal{H}_m(\vec{p},\gamma,\delta)$ and $w=\prod_{i=1}^m v_i$ we shall directly say that $\vec{v}\in \mathcal{H}_m(\vec{p},\gamma,\delta)$, that is, there exists a positive constant $C$ such that the inequality \[|B|^{(1-\delta)/n}\prod_{i=1}^m\left(\int_{\mathbb{R}^n} \frac{v_i^{-p_i'}(y)}{(|B|^{1/n}+|x_B-y|)^{(n-\gamma_i+1/m)p_i'}}\,dy\right)^{1/p_i'}\leq \frac{C}{|B|}\int_B \prod_{i=1}^m v_i^{-1}\] holds for every ball $B$, with the obvious changes when $p_i=1$ for some $i$. The following theorem deals with this case of related weights. \begin{teo}\label{teo: caso de pesos iguales} Let $0<\gamma<mn$, $\delta\in\mathbb{R}$ and $\vec{p}$ a vector of exponents. If $\vec{v}\in \mathcal{H}_m(\vec{p},\gamma,\delta)$ and $p/(mp-1)>1$ then we have that $\delta=\gamma-n/p$. \end{teo} When $m=1$ the theorem above was given in \cite{PR}. As an immediate consequence we have the following result. \begin{coro} Given $0<\gamma<mn$, $\vec{p}$ a vector of exponents and $\delta=\gamma-n/p$. If $\vec{v}\in \mathcal{H}_m(\vec{p},\gamma,\delta)$ and $\alpha=~p/(mp-1)>1$, then we have that $\prod_{i=1}^mv_i^{-1}\in \mathrm{RH}_\alpha$. \end{coro} Notice that, when $m=1$, $\alpha=p'>1$ and the corollary establishes that if $v\in \mathcal{H}_1(p,\gamma,\delta)$ then $v^{-1}\in \mathrm{RH}_{p'}$, a property proved in \cite{HSV}. \section{Preliminaries and definitions}\label{section: preliminares} Throughout the paper $C$ will denote an absolute constant that may change in every occurrence. By $A\lesssim B$ we mean that there exists a positive constant $c$ such that $A\leq c B$. We say that $A\approx B$ when $A\lesssim B$ and $B\lesssim A$. Let $m\in \mathbb{N}$. Given a set $E$, with $E^m$ we shall denote the cartesian product of $E$ $m$ times. It will be useful for us to consider the operator \begin{equation}\label{eq: operador Jgamma,m} J_{\gamma,m}\vec{f}(x)=\int_{(\mathbb{R}^n)^m} \left(\frac{1}{(\sum_{i=1}^m|x-y_i|)^{mn-\gamma}}-\frac{1-\mathcal{X}_{B(0,1)^m}(\vec{y})}{(\sum_{i=1}^m|y_i|)^{mn-\gamma}}\right)\prod_{i=1}^m f_i(y_i)\,d\vec{y}. \end{equation} which differs from $I_{\gamma,m}$ only by a constant term, therefore it has the same Lipschitz norm as $I_{\gamma,m}$, so it will be enough to give the results for $J_{\gamma,m}$. By a weight we understand any positive and locally integrable function. As we said in the introduction, given $\delta \in \mathbb{R}$ and a weight $w$ we say that a locally integrable function $f\in \mathcal{L}_{w}(\delta)$ if there exists a positive constant $C$ such that \begin{equation}\frac{1}{w^{-1}(B)|B|^{\delta/n}}\int_B|f(x)-f_B|\,dx\leq C \end{equation} for every ball $B$, where $f_B=|B|^{-1}\int_B f$. If $\delta=0$ the space $\mathcal{L}_{w}(\delta)$ coincides with some weighted versions of BMO spaces introduced in \cite{MW-75-76}. Concerning to the unweighted case, when $0<\delta<1$ it is equivalent to the classical Lipschitz classes $\Lambda(\delta)$ given by the collection of functions $f$ satisfying $|f(x)-f(y)|\le C |x-y|^{\delta}$ and, if $-n<\delta<0$, they are Morrey spaces. On the other hand, this space was studied for example in \cite{HSV} and in \cite{Prado01cal}. The class $\mathcal{H}_m(\vec{p},\gamma,\delta)$ is given by the pairs $(w,\vec{v})$ for which the inequality \begin{equation}\label{eq: clase Hcal(p,gamma,delta) - m} \sup_{B\subset \mathbb{R}^n} \frac{|B|^{1+(1-\delta)/n}}{w^{-1}(B)}\prod_{i=1}^m\left(\int_{\mathbb{R}^n} \frac{v_i^{-p_i'}(y)}{(|B|^{1/n}+|x_B-y|)^{(n-\gamma_i+1/m)p_i'}}\,dy\right)^{1/p_i'}<\infty \end{equation} holds. For those index $i$ such that $p_i=1$ we understand the corresponding factor on the product above as \begin{equation}\label{eq: factor de H para p_i=1} \left\|\frac{v_i^{-1}}{(|B|^{1/n}+|x_B-\cdot|)^{(n-\gamma_i+1/m)}}\right\|_\infty. \end{equation} Let $\mathcal{I}_1=\{1\leq i\leq m: p_i=1\}$ and $\mathcal{I}_2=\{1\leq i\leq m: p_i>1\}$. We will also denote with $m_j$ the cardinal of the set $\mathcal{I}_j$, that is, $m_j=\#\mathcal{I}_j$ for $j=1,2$. We shall use this notation throughout the paper. Observe that if $(w,\vec{v})$ belongs to $\mathcal{H}_m(\vec{p},\gamma,\delta)$, then the inequalities \begin{equation}\label{eq: condicion local 2} \frac{|B|^{1-\delta/n+\gamma/n-1/p}}{w^{-1}(B)}\prod_{i\in\mathcal{I}_1}\|v_i^{-1}\mathcal{X}_B\|_\infty\,\prod_{i\in\mathcal{I}_2}\left(\frac{1}{|B|}\int_B v_i^{-p_i'}\right)^{1/p_i'}\leq C \end{equation} and \begin{equation}\label{eq: condicion global 2} \frac{|B|^{1+(1-\delta)/n}}{w^{-1}(B)}\prod_{i\in\mathcal{I}_1}\left\|\frac{v_i^{-1}\mathcal{X}_{\mathbb{R}^n\backslash B}}{(|B|^{1/n}+|x_B-\cdot|)^{(n-\gamma_i+1/m)}}\right\|_\infty\,\prod_{i\in\mathcal{I}_2}\left(\int_{\mathbb{R}^n\backslash B} \frac{v_i^{-p_i'}(y)}{|x_B-y|^{(n-\gamma_i+1/m)p_i'}}\,dy\right)^{1/p_i'}\leq C, \end{equation} hold for every ball $B$. We shall refer to these inequalities as the \textit{local} and the \textit{global} conditions, respectively. Furthermore, if $\mathcal{I}$ and $\mathcal{J}$ partition the set $\mathcal{I}_1$, from \eqref{eq: clase Hcal(p,gamma,delta) - m} we can write \begin{equation}\label{eq: local 2 y global 2 mezcladas} \frac{|B|^{1+(\gamma-\delta)/n-1/p}}{w^{-1}(B)}\prod_{i\in \mathcal{I}}\left\|v_i^{-1}\mathcal{X}_{2B-B}\right\|_\infty\,\prod_{i\in \mathcal{J}}\left\|v_i^{-1}\mathcal{X}_B\right\|_\infty\,\prod_{i\in\mathcal{I}_2}\left(\frac{1}{|2B|}\int_{2B}v_i^{-p_i'}\right)^{1/p_i'}\leq C \end{equation} for every ball $B$. This inequality will be useful for our purposes later. On the other hand, when $v_i^{-1}\in\mathrm{RH}_\infty$ for $i\in\mathcal{I}_1$ and $v_i^{-p_i'}$ is doubling for $i\in\mathcal{I}_2$, the corresponding local and global conditions imply \eqref{eq: clase Hcal(p,gamma,delta) - m}. Before state and prove this result, we shall introduce some useful notation. Given $m\in\mathbb{N}$ we denote $S_m=\{0,1\}^m$. Given a set $B$ and $\sigma\in S_m$, $\sigma=(\sigma_1,\sigma_2,\dots,\sigma_m)$ we define \[B^{\sigma_i}=\left\{ \begin{array}{ccl} B,&\textrm{ if }&\sigma_i=1\\ \mathbb{R}^n\backslash B,&\textrm{ if }&\sigma_i=0. \end{array} \right.\] With the notation $\mathbf{B}^\sigma$ we will understand the cartesian product $B^{\sigma_1}\times B^{\sigma_2}\times\dots\times B^{\sigma_m}$. Particularly, if we set $\mathbf{1}=(1,1,\dots,1)$ and $\mathbf{0}=(0,0,\dots,0)$ then we have \[\mathbf{B}^{\mathbf{1}}=B\times B\times\dots\times B=B^m,\quad\textrm{ and }\quad \mathbf{B}^{\mathbf{0}}=(\mathbb{R}^n\backslash B)\times (\mathbb{R}^n\backslash B)\times\dots\times (\mathbb{R}^n\backslash B)=(\mathbb{R}^n\backslash B)^m.\] \begin{lema}\label{lema: equivalencia con local y global} Let $0<\gamma<mn$, $\delta\in\mathbb{R}$, $\vec{p}$ a vector of exponents and $(w,\vec{v})$ a pair of weights such that $v_i^{-1}\in\mathrm{RH}_\infty$ for $i\in\mathcal{I}_1$ and $v_i^{-p_i'}$ is doubling for $i\in\mathcal{I}_2$. Then condition $\mathcal{H}_m(\vec{p},\gamma,\delta)$ is equivalent to \eqref{eq: condicion global 2}. \end{lema} \begin{proof} We have already seen that $\mathcal{H}_m(\vec{p},\gamma,\delta)$ implies \eqref{eq: condicion global 2}. In order to prove the converse, we let $\theta_i=n-\gamma_i+1/m$, for every $i$. Recall that $m_2=\#\mathcal{I}_2$. After a possible rearrangement of the indices $i\in\mathcal{I}_2$ we have that \[\prod_{i\in\mathcal{I}_2}\left(\int_{\mathbb{R}^n} \frac{v_i^{-p_i'}}{(|B|^{1/n}+|x_B-\cdot|)^{\theta_ip_i'}}\right)^{1/p_i'}=\sum_{\sigma\in S_{m_2}}\prod_{i=1}^{m_2}\left( \int_{B^{\sigma_i}}\frac{v_i^{-p_i'}}{(|B|^{1/n}+|x_B-\cdot|)^{\theta_ip_i'}}\right)^{1/p_i'}.\] Fix $\sigma\in S_{m_2}$. If $\sigma_i=0$, we have that \begin{align*} \left(\int_{B^{\sigma_i}}\frac{v_i^{-p_i'}}{(|B|^{1/n}+|x_B-\cdot|)^{(n-\gamma_i+1/m)p_i'}}\right)^{1/p_i'}&=\left(\int_{\mathbb{R}^n\backslash B}\frac{v_i^{-p_i'}}{(|B|^{1/n}+|x_B-\cdot|)^{(n-\gamma_i+1/m)p_i'}}\right)^{1/p_i'}\\ &\leq \left(\int_{\mathbb{R}^n\backslash B}\frac{v_i^{-p_i'}(y)}{|x_B-y|^{(n-\gamma_i+1/m)p_i'}}\,dy\right)^{1/p_i'}. \end{align*} For $\sigma_i=1$, since $v_i^{-p_i'}$ is doubling, we have that \begin{align*} \left(\int_{B^{\sigma_i}}\frac{v_i^{-p_i'}(y)}{(|B|^{1/n}+|x_B-y|)^{(n-\gamma_i+1/m)p_i'}}\,dy\right)^{1/p_i'}&=\left(\int_B\frac{v_i^{-p_i'}(y)}{(|B|^{1/n}+|x_B-y|)^{(n-\gamma_i+1/m)p_i'}}\,dy\right)^{1/p_i'}\\ &\leq \frac{1}{|B|^{1-\gamma_i/n+1/(mn)}}\left(\int_B v_i^{-p_i'}\right)^{1/p_i'}\\ & \lesssim \frac{1}{|2B|^{1-\gamma_i/n+1/(mn)}}\left(\int_{2B\backslash B} v_i^{-p_i'}\right)^{1/p_i'}\\ &\leq\left(\int_{2B\backslash B}\frac{v_i^{-p_i'}(y)}{|x_B-y|^{(n-\gamma_i+1/m)p_i'}}\right)^{1/p_i'}\\ &\leq\left(\int_{\mathbb{R}^n\backslash B}\frac{v_i^{-p_i'}(y)}{|x_B-y|^{(n-\gamma_i+1/m)p_i'}}\right)^{1/p_i'}. \end{align*} Therefore, for every $\sigma\in S_{m_2}$ we obtain \begin{equation}\label{eq: lema: equivalencia con local y global - eq1} \prod_{i=1}^{m_2}\left( \int_{B^{\sigma_i}}\frac{v_i^{-p_i'}}{(|B|^{1/n}+|x_B-\cdot|)^{\theta_ip_i'}}\right)^{1/p_i'}\lesssim \prod_{i\in\mathcal{I}_2}\left(\int_{\mathbb{R}^n\backslash B}\frac{v_i^{-p_i'}}{|x_B-\cdot|^{\theta_ip_i'}}\right)^{1/p_i'}. \end{equation} On the other hand, for $i\in\mathcal{I}_1$ we proceed similarly as above replacing $\|\cdot\|_{p_i'}$ by $\|\cdot\|_\infty$ and using the $\mathrm{RH}_\infty$ condition for $v_i^{-1}$. Indeed, observe that \[\left\|v^{-1}\mathcal{X}_B\right\|_\infty\leq \frac{C}{|B|}\int_B v^{-1}\leq \frac{C}{|B|}\int_{2B\backslash B} v^{-1}\leq C\left\|v^{-1}\mathcal{X}_{2B\backslash B}\right\|_\infty.\] Then we can conclude that \begin{equation}\label{eq: lema: equivalencia con local y global - eq2} \prod_{i\in\mathcal{I}_1} \left\|\frac{v_i^{-1}}{(|B|^{1/n}+|x_B-\cdot|)^{n-\gamma/m+1/m}}\right\|_\infty\lesssim \prod_{i\in\mathcal{I}_1} \left\|\frac{v_i^{-1}\mathcal{X}_{\mathbb{R}^n\backslash B}}{|x_B-\cdot|^{n-\gamma/m+1/m}}\right\|_\infty. \end{equation} Therefore, by combining \eqref{eq: lema: equivalencia con local y global - eq1}, \eqref{eq: lema: equivalencia con local y global - eq2} and \eqref{eq: condicion global 2} we get that \[\frac{|B|^{1+(1-\delta)/n}}{w^{-1}(B)}\prod_{i\in\mathcal{I}_1}\left\|\frac{v_i^{-1}}{(|B|^{1/n}+|x_B-\cdot|)^{n-\gamma/m+1/m}}\right\|_\infty\,\prod_{i\in\mathcal{I}_2}\left(\int_{\mathbb{R}^n} \frac{v_i^{-p_i'}}{(|B|^{1/n}+|x_B-\cdot|)^{\theta_ip_i'}}\right)^{1/p_i'}\leq C,\] as desired. \end{proof} \begin{coro} Under the hypotheses of Lemma~\ref{lema: equivalencia con local y global} we have that condition \eqref{eq: condicion global 2} implies \eqref{eq: condicion local 2}. \end{coro} \section{Technical results}\label{section: resultados auxiliares} We now introduce some operators related to $I_{\gamma,m}$ and some useful properties in order to prove our main results. Given a ball $B=B(x_B,R)$ and $\tilde B=2B$, as in \cite{BPR22} we can decompose the operator in \eqref{eq: operador Jgamma,m} as \[J_{\gamma,m}\vec{f}(x)=a_B + I\vec{f}(x),\] where \begin{equation}\label{eq: definicion de a_B} a_B=\int_{(\mathbb{R}^n)^m} \left(\frac{1-\mathcal{X}_{\tilde B^m}(\vec y)}{(\sum_{i=1}^m|x_B-y_i|)^{mn-\gamma}}-\frac{1-\mathcal{X}_{B(0,1)^m}(\vec{y})}{(\sum_{i=1}^m|y_i|)^{mn-\gamma}}\right)\prod_{i=1}^m f_i(y_i)\,d\vec{y} \end{equation} and \begin{equation}\label{eq: definicion de I} I\vec{f}(x)=\int_{(\mathbb{R}^n)^m} \left(\frac{1}{(\sum_{i=1}^m |x-y_i|)^{mn-\gamma}}-\frac{1-\mathcal{X}_{\tilde B^m}(\vec{y})}{(\sum_{i=1}^m|x_B-y_i|)^{mn-\gamma}}\right)\prod_{i=1}^m f_i(y_i)\,d\vec{y}. \end{equation} We shall first prove that this operator is well-defined for $\vec{f}$ as in Theorem~\ref{teo: teo principal - Hcal}. We recall that a weight $w$ belongs to the \textit{reverse H\"{o}lder} class $\mathrm{RH}_s$, $1<s<\infty$, if there exists a positive constant $C$ such that the inequality \[\left(\frac{1}{|B|}\int_B w^s\right)^{1/s}\leq \frac{C}{|B|}\int_B w\] holds for every ball $B$ in $\mathbb{R}^n$. It is not difficult to see that $\mathrm{RH}_t\subset \mathrm{RH}_s$ whenever $1<s<t$. We also consider weights belonging to the class $\mathrm{RH}_{\infty}$, that is, the collection of weights $w$ such that the inequality \[\sup_B w\le \frac{C}{|B|}\int_B w,\] holds for some positive constant $C$. The next lemma establishes the well definition of $J_{\gamma,m}\vec{f}$, for $\vec{f}$ as in Theorem~\ref{teo: teo principal - Hcal}. \begin{lema}\label{lema: finitud de J_gamma,m para Hcal} Let $0<\gamma<mn$, $\delta\in\mathbb{R}$, and $\vec{p}$ a vector of exponents that verifies $p>n/\gamma$. Let $(w,\vec{v})$ be a pair of weights in $\mathcal{H}_m(\vec{p},\gamma,\delta)$ such that $v_i^{-p_i'}\in \mathrm{RH}_{m}$, for $i\in\mathcal{I}_2$. If $\vec{f}$ satisfies $f_iv_i\in L^{p_i}$ for every $1\leq i\leq m$, then $J_{\gamma,m}\vec{f}$ is finite in almost every $x\in \mathbb{R}^n$. \end{lema} \begin{proof} We are going to exhibit a sketch of the proof, since it follows similar lines to that in \cite{BPR22}, Lemma 3.1. By using the same notation as in that lemma, fix a ball $B=B(x_B, R)$ and write $J_{\gamma,m}\vec{f}=a_B+I\vec{f}$, where we split $a_B=a_B^1+a_B^2$ and $I\vec{f}=I_1\vec{f}+I_2\vec{f}$. We proved that \[|a_B^1|\leq \left(1+\frac{C}{|B|^{m-\gamma/n}}\right)\prod_{i=1 }^m\left(\int_{B_0}|f_i(y_i)|\,d_{y_i}\right),\] where $B_0=B(0,R_0)$ with $R_0=2(|x_B|+R)$. By using H\"{o}lder inequality and condition \eqref{eq: condicion local 2} we get \begin{align*} |a_B^1|&\leq \left(1+\frac{C}{|B|^{m-\gamma/n}}\right) \prod_{i=1}^m\|f_iv_i\|_{p_i}\prod_{i\in \mathcal{I}_1} \left\|v_i^{-1}\mathcal{X}_{B_0}\right\|_\infty\,\prod_{i\in \mathcal{I}_2}\left(\int_{B_0}v_i^{-p_i'}\right)^{1/p_i'}\\ &\leq \left(1+\frac{C}{|B|^{m-\gamma/n}}\right)\prod_{i=1}^m\|f_iv_i\|_{p_i}\frac{w^{-1}(B_0)}{|B_0|}|B_0|^{\delta/n-\gamma/n+1/p}\\ &<\infty. \end{align*} In the same lemma we also proved that \[|a_B^2|\leq C \prod_{i=1 }^m\|f_iv_i\|_{p_i}\prod_{i\in \mathcal{I}_1} \left\|\frac{v_i^{-1}}{(|B_0|^{1/n}+|x_{B_0}-\cdot|)^{\theta_i}}\right\|_\infty\,\prod_{i\in \mathcal{I}_2}\left(\int_{\mathbb{R}^n}\frac{v_i^{-p_i'}}{(|B_0|^{1/n}+|x_{B_0}-y_i|)^{\theta_i p_i'}}\right)^{1/p_i'},\] where $\theta_i=n-\gamma_i+1/m$. So by using condition \eqref{eq: clase Hcal(p,gamma,delta) - m} we get that \[|a_B^2|\leq C\frac{w^{-1}(B_0)}{|B_0|}|B_0|^{(\delta-1)/n}\prod_{i=1 }^m\|f_iv_i\|_{p_i}<\infty.\] Let us now consider $I_1\vec{f}$. By proceeding as in the corresponding estimate in \cite{BPR22} we obtain \begin{align*} \int_B|I_1\vec{f}(x)|\,dx&\leq C\prod_{i=1}^m\|f_iv_i\|_{p_i}\prod_{i\in \mathcal{I}_2}\left(\frac{1}{|\tilde B|}\int_{\tilde B}v_i^{-p_i'}\right)^{1/p_i'}\times\\ &\quad\times \prod_{i\in \mathcal{I}_1}\left\| v_i^{-1}\mathcal{X}_{\tilde B}\right\|_\infty|\tilde B|^{(\gamma-\gamma_0)/n-m_1+1/q'+1-1/(m_0p^*)}\\ &=C|\tilde B|^{\gamma/n-1/p+1}\prod_{i=1}^m\|f_iv_i\|_{p_i}\prod_{i\in \mathcal{I}_1}\left\| v_i^{-1}\mathcal{X}_{\tilde B}\right\|_\infty\,\prod_{i\in \mathcal{I}_2}\left(\frac{1}{|\tilde B|}\int_{\tilde B}v_i^{-p_i'}\right)^{1/p_i'}. \end{align*} We rearrange the indices in $\mathcal{I}_1$ increasingly, in a way to get $\mathcal{I}_1=\{i_1,\dots,i_{m_1}\}$. Observe that \[\prod_{i\in\mathcal{I}_1}\left\|v_i^{-1}\mathcal{X}_{\tilde B}\right\|_\infty\leq \prod_{i\in\mathcal{I}_1}\left(\left\|v_i^{-1}\mathcal{X}_{\tilde B-B}\right\|_\infty+\left\|v_i^{-1}\mathcal{X}_{B}\right\|_\infty\right)=\sum_{\sigma\in S^{m_1}}\prod_{j=1}^{m_1}\left\|v_{i_j}^{-1}\mathcal{X}_{\tilde B-B}\right\|_\infty^{\sigma_j}\left\|v_{i_j}^{-1}\mathcal{X}_{B}\right\|_\infty^{1-\sigma_j}.\] Therefore, \begin{align*} \int_B |I_1\vec{f}(x)|\,dx&\leq C\left(\prod_{i=1}^m\|f_iv_i\|_{p_i}\right)\times\\ &\quad \times \sum_{\sigma\in S^{m_1}}|\tilde B|^{\gamma/n-1/p+1}\prod_{i\in \mathcal{I}_2}\left(\frac{1}{|\tilde B|}\int_{\tilde B}v_i^{-p_i'}\right)^{1/p_i'}\prod_{j=1}^{m_1}\left\|v_{i_j}^{-1}\mathcal{X}_{\tilde B-B}\right\|_\infty^{\sigma_j}\left\|v_{i_j}^{-1}\mathcal{X}_{B}\right\|_\infty^{1-\sigma_j} \end{align*} Fix $\sigma\in S^{m_1}$ and define the sets \[\mathcal{I}=\{i_j\in\mathcal{I}_1: \sigma_j=1\} \quad\textrm{ and }\quad \mathcal{J}=\{i_j\in\mathcal{I}_1: \sigma_j=0\}.\] We can apply condition \eqref{eq: local 2 y global 2 mezcladas} to bound every term of the sum by \[C\frac{w^{-1}(B)}{|B|^{1+(\gamma-\delta)/n-1/p}}|\tilde B|^{\gamma/n-1/p+1}=Cw^{-1}(B)|B|^{\delta/n}.\] Consequently, \[\int_B |I_1\vec{f}(x)|\,dx\leq Cw^{-1}(B)|B|^{\delta/n}\left(\prod_{i=1}^m\|f_iv_i\|_{p_i}\right).\] Finally, for $I_2\vec{f}$ we have \[|I_2\vec{f}(x)|\leq |B|^{1/n}\sum_{\sigma\in S_m,\sigma\neq \mathbf{1}} \int_{\mathbf{\tilde B}^\sigma} \frac{\prod_{i=1}^m|f_i(y_i)|}{(\sum_{i=1}^m|x_B-y_i|)^{mn-\gamma+1}}\,d\vec{y}.\] This expression is similar to $a_B^2$, with $B_0$ replaced by $\tilde B$. Observe that \[\left\|\frac{v_i^{-1}}{|x_B-\cdot|^{\theta_i}}\mathcal{X}_{\tilde B^c}\right\|_\infty\leq \left\|\frac{v_i^{-1}}{|x_B-\cdot|^{\theta_i}}\mathcal{X}_{B^c}\right\|_\infty\] for those indices $i\in\mathcal{I}_1$ such that $\sigma_i=0$. On the other hand, if $i\in\mathcal{I}_1$ and $\sigma_i=1$, we can split the expression $\|v_i^{-1}\mathcal{X}_{\tilde B}\|_\infty$ as follows \[\left\|v_i^{-1}\mathcal{X}_{\tilde B}\right\|_\infty\leq \left\|v_i^{-1}\mathcal{X}_{\tilde B-B}\right\|_\infty+\left\|v_i^{-1}\mathcal{X}_{B}\right\|_\infty\] and repeat the argument used in the estimation of $I_1\vec{f}$. After applying condition \eqref{eq: local 2 y global 2 mezcladas} we get that \[\int_B|I_2\vec{f}(x)|\,dx\leq Cw^{-1}(B)|B|^{\delta/n}\prod_{i=1}^m\|f_iv_i\|_{p_i}.\] This concludes the proof of the lemma.\qedhere \end{proof} \begin{obs} The corresponding bound obtained for $I\vec{f}$ will be used for the proof of Theorem~\ref{teo: teo principal - Hcal}. \end{obs} The next lemma was given in \cite{BPR22}. The sets involved in its statement are defined as follows. For a fixed ball $B=B(x_B,R)$ we set \[A=\{x_B+h: h=(h_1,h_2,\dots,h_n): h_i\geq 0 \textrm{ for }1\leq i\leq n\},\] \[C_1=B\left(x_B-\frac{R}{12\sqrt{n}}u,\frac{R}{12\sqrt{n}}\right)\cap\left\{x_B-\frac{R}{12\sqrt{n}}u+h: h_i\leq 0 \textrm{ for every }i\right\},\] and \[C_2=B\left(x_B-\frac{R}{3\sqrt{n}}u,\frac{2R}{3}\right)\cap\left\{x_B-\frac{R}{3\sqrt{n}}u+h: h_i\leq 0 \textrm{ for every }i\right\},\] where $u=(1,1,\dots,1)$. \begin{lema}\label{lema: diferencia de nucleos positiva} There exists a positive constant $C=C(n)$ such that the inequality \[\frac{1}{(\sum_{j=1}^m|x-y_j|)^{mn-\gamma}}-\frac{1}{(\sum_{j=1}^m|z-y_j|)^{mn-\gamma}}\geq C\frac{|B|^{1/n}}{(|B|^{1/n}+\sum_{j=1}^m|x_B-y_j|)^{mn-\gamma+1}}\] holds for every $x\in C_1$, $z\in C_2$, and $y_j\in A$ for $1\leq j\leq m$. \end{lema} \begin{obs}\label{obs: medida de conjuntos C como B} It is not difficult to see that $|C_i|\approx |B|$, for $i=1,2$. \end{obs} \section{Proof of the main results}\label{section: prueba principal} In this section we prove our main results. \begin{proof}[Proof of Theorem~\ref{teo: teo principal - Hcal}] We shall first prove that $(\ref{item: teo principal - Hcal item 2})$ implies $(\ref{item: teo principal - Hcal item 1})$. We shall deal with the operator $J_{\gamma,m}$ since it differs from $I_{\gamma,m}$ by a constant term. We want to prove that for every ball $B$ \begin{equation}\label{eq: teo principal - eq1} \frac{1}{w^{-1}(B)|B|^{\delta/n}}\int_B |J_{\gamma,m}\vec{f}(x)-(J_{\gamma,m}\vec{f})_B|\,dx\leq C\prod_{i=1}^m\|f_iv_i\|_{p_i}, \end{equation} with $C$ independent of $B$. Fix a ball $B=B(x_B,R)$ and recall that $J_{\gamma,m}\vec{f}(x)=a_B+I\vec{f}(x)$. In Lemma~\ref{lema: finitud de J_gamma,m para Hcal} we proved that \begin{equation*} \int_B|I\vec{f}(x)|\,dx\leq Cw^{-1}(B)|B|^{\delta/n}\prod_{i=1}^m\|f_iv_i\|_{p_i}, \end{equation*} which implies that \begin{equation}\label{eq: teo principal - estimacion de If} \int_B|J_{\gamma,m}\vec{f}(x)-a_B|\,dx\leq Cw^{-1}(B)|B|^{\delta/n}\prod_{i=1}^m\|f_iv_i\|_{p_i}. \end{equation} On the other hand, observe that \begin{align*} \int_B |J_{\gamma,m}\vec f (x)-(J_{\gamma,m}\vec{f}\,)_B|\,dx&\leq \int_B|J_{\gamma,m}\vec{f}(x)-a_B|\,dx+\int_B|(J_{\gamma,m}\vec f\,)_B-a_B|\,dx\\ &\leq \int_B|J_{\gamma,m}\vec{f}(x)-a_B|\,dx+\int_B\frac{1}{|B|}\int_B|J_{\gamma,m}\vec f(y)-a_B|\,dy\,dx\\ &\leq 2\int_B|J_{\gamma,m}\vec{f}(x)-a_B|\,dx. \end{align*} By combining this estimate with \eqref{eq: teo principal - estimacion de If} we obtain the desired inequality. We now prove that $(\ref{item: teo principal - Hcal item 1})$ implies $(\ref{item: teo principal - Hcal item 2})$. Assume that the component functions $f_i$ of $\vec{f}$ are nonnegative. We have that \eqref{eq: teo principal - eq1} holds for every ball $B=B(x_B,R)$. Also observe that \[\frac{1}{|B|}\int_B|g(x)-g_B|\,dx\approx \frac{1}{|B|^2}\int_B\int_B|g(x)-g(z)|\,dx\,dz,\] and therefore the left hand side of \eqref{eq: teo principal - eq1} is equivalent to \[\frac{1}{w^{-1}(B)|B|^{1+\delta/n}}\int_B\int_B |J_{\gamma,m}\vec{f}(x)-J_{\gamma,m}\vec{f}(z)|\,dx\,dz=I.\] Observe that, when $y_i\in B$ for every $i$ we have \[|B|^{1/n}+|x_B-y_j|\geq \frac{1}{m}\left(|B|^{1/n}+\sum_{i=1}^m|x_B-y_i|\right),\] for every $1\leq j\leq m$. By combining Lemma~\ref{lema: diferencia de nucleos positiva} and Remark~\ref{obs: medida de conjuntos C como B} with the inequality above we can estimate $I$ as follows \begin{align*} I&\geq \frac{1}{w^{-1}(B)|B|^{1+\delta/n}}\int_{C_2}\int_{C_1} \int_{A^m} \frac{|B|^{1/n}\prod_{i=1}^m f_i(y_i)}{(|B|^{1/n}+\sum_{i=1}^m|x_B-y_i|)^{mn-\gamma+1}}\,d\vec{y}\,dx\,dz\\ &\geq C\frac{|B|^{1+(1-\delta)/n}}{w^{-1}(B)}\prod_{i=1}^m\left(\int_A \frac{f_i(y_i)}{(|B|^{1/n}+|x_B-y_i|)^{n-\gamma_i+1/m}}\,dy_i\right). \end{align*} Since the set $A$ is a quadrant from $x_B$, a similar estimation can be obtained for the other quadrants from $x_B$. Thus, we get \[I\geq C\frac{|B|^{1+(1-\delta)/n}}{w^{-1}(B)}\prod_{i=1}^m\left(\int_{\mathbb{R}^n} \frac{f_i(y)}{(|B|^{1/n}+|x_B-y|)^{n-\gamma_i+1/m}}\,dy\right),\] which implies that \begin{equation}\label{eq: teo principal - eq2} \frac{|B|^{1+(1-\delta)/n}}{w^{-1}(B)}\prod_{i=1}^m\left(\int_{\mathbb{R}^n} \frac{f_i(y)}{(|B|^{1/n}+|x_B-y|)^{n-\gamma_i+1/m}}\,dy\right)\leq C\prod_{i=1}^m\|f_iv_i\|_{p_i}. \end{equation} For every $i\in \mathcal{I}_1$ and $k\in\mathbb{N}$ we define $V_k^i=\{x: v_i^{-1}(x)\leq k\}$ and the functionals \[F_i^k(g)=\int_{\mathbb{R}^n}\frac{g(y)v_i^{-1}(y)\mathcal{X}_{V_k^i}(y)}{(|B|^{1/n}+|x_B-y|)^{n-\gamma_i+1/m}}\,dy.\] Therefore $F_i^k$ is a functional in $(L^1)^*=L^{\infty}$. Indeed, if $g\in L^1$ \[|F_i^k(g)|\leq \|g\|_{L^1} \left\|\frac{v_i^{-1}\mathcal{X}_{V_k^i}}{(|B|^{1/n}+|x_B-\cdot|)^{n-\gamma_i+1/m}}\right\|_\infty<\infty,\] and we also get \[\frac{|F_i^k(f_iv_i)|}{\|f_iv_i\|_{L^1}}\leq \left\|\frac{v_i^{-1}\mathcal{X}_{V_k^i}}{(|B|^{1/n}+|x_B-\cdot|)^{n-\gamma_i+1/m}}\right\|_\infty,\] for every $i\in\mathcal{I}_1$. If $i\in \mathcal{I}_2$ then we set $A_k=A\cap B(0,k)$ and consider \[f_i^k (y)=\frac{v_i^{-p_i'}(y)}{(|B|^{1/n}+|x_B-y|)^{(n-\gamma_i+1/m)/(p_i-1)}}\mathcal{X}_{A_k}(y)\mathcal{X}_{V_k^i}(y).\] Let us choose $\vec f=(f_1,\dots,f_m)$, where $f_iv_i\in L^1$ for $p_i=1$ and $f_i=f_i^k$ for $p_i>1$, for fixed $k$. Therefore, the left hand side of \eqref{eq: teo principal - eq2} can be written as follows \[\frac{|B|^{1+(1-\delta)/n}}{w^{-1}(B)}\prod_{i\in \mathcal{I}_1}F_i^k(f_iv_i)\prod_{i\in \mathcal{I}_2}\left(\int_{A_k\cap V_k^i} \frac{v_i^{-p_i'}(y)}{(|B|^{1/n}+|x_B-y|)^{(n-\gamma_i+1/m)p_i'}}\,dy\right)\] and it is bounded by \[ C\prod_{i\in\mathcal{I}_1}\|f_iv_i\|_{L^1}\prod_{i\in\mathcal{I}_2}\left(\int_{A_k\cap V_k^i} \frac{v_i^{-p_i'}(y)}{(|B|^{1/n}+|x_B-y|)^{(n-\gamma_i+1/m)p_i'}}\,dy\right)^{1/p_i}.\] This yields \[\frac{|B|^{1+(1-\delta)/n}}{w^{-1}(B)}\prod_{i\in\mathcal{I}_1}\frac{|F_i^k(f_iv_i)|}{\|f_iv_i\|_{L^{1}}}\prod_{i\in\mathcal{I}_2}\left(\int_{A_k\cap V_k^i} \frac{v_i^{-p_i'}(y)}{(|B|^{1/n}+|x_B-y|)^{(n-\gamma_i+1/m)p_i'}}\,dy\right)^{1/p_i'}\leq C,\] for every nonnegative $f_i$ such that $f_iv_i\in L^1$, $i\in\mathcal{I}_1$ and for every $k\in\mathbb{N}$. By taking the supremum over these $f_i$ we get \begin{align*} &\frac{|B|^{1+(1-\delta)/n}}{w^{-1}(B)}\prod_{i\in\mathcal{I}_1}\left\|\frac{v_i^{-1}}{(|B|^{1/n}+|x_B-\cdot|)^{n-\gamma_i+1/m}}\right\|_\infty\,\prod_{i\in\mathcal{I}_2}\left(\int \frac{v_i^{-p_i'}\mathcal{X}_{A_k\cap V_k^i}}{(|B|^{1/n}+|x_B-\cdot|)^{(n-\gamma_i+1/m)p_i'}}\right)^{\tfrac{1}{p_i'}}\\ &\qquad\leq C. \end{align*} By taking limit for $k\to\infty$, the left hand side converges to \[\frac{|B|^{1+(1-\delta)/n}}{w^{-1}(B)}\prod_{i\in\mathcal{I}_1}\left\|\frac{v_i^{-1}}{(|B|^{1/n}+|x_B-\cdot|)^{n-\gamma_i+1/m}}\right\|_\infty\,\prod_{i\in\mathcal{I}_2}\left(\int_{\mathbb{R}^n} \frac{v_i^{-p_i'}(y)}{(|B|^{1/n}+|x_B-y|)^{(n-\gamma_i+1/m)p_i'}}\,dy\right)^{\tfrac{1}{p_i'}}\] which is precisely the condition $\mathcal{H}_m(\vec{p},\gamma,\delta)$. This completes the proof.\qedhere \end{proof} \medskip \begin{proof}[Proof of Theorem~\ref{teo: no-ejemplos Hcal}] We begin with item~\eqref{item: teo no-ejemplos Hcal - item a}. We shall first assume that $\delta >1$. If $(w,\vec{v}) \in \mathcal{H}_m(\vec{p},\gamma,\delta)$, we choose $B=B(x_B, R)$ where $x_B$ is a Lebesgue point of $w^{-1}$. From \eqref{eq: clase Hcal(p,gamma,delta) - m} we obtain \[\prod_{i\in\mathcal{I}_1}\left\|\frac{v_i^{-1}}{(|B|^{1/n}+|x_B-\cdot|)^{n-\gamma_i+1/m}}\right\|_\infty\,\prod_{i\in\mathcal{I}_2}\left(\int_{\mathbb{R}^n}\frac{v_i^{-p_i'}}{(|B|^{1/n}+|x_B-\cdot|)^{(n-\gamma_i+1/m)p_i'}}\right)^{\tfrac{1}{p_i'}} \lesssim \frac{w^{-1}(B)}{|B|R^{1-\delta}},\] for every $R>0$. By letting $R\to 0$ and applying the monotone convergence theorem, we conclude that at least one limit factor in the product should be zero. That is, there exists $1\leq i \leq m$ such that $v_i = \infty$ almost everywhere. On the other hand, if $\delta > \gamma - n/p$ and $(w,\vec{v})$ belongs to $\mathcal{H}_m(\vec{p},\gamma,\delta)$, we pick a ball $B=~B(x_B, R)$, where $x_B$ is a Lebesgue point of $w^{-1}$ and every $v_i^{-1}$. Then, by applying \eqref{eq: condicion local 2}, we have \begin{align*} \prod_{i=1}^m \frac{1}{|B|}\int_B v_i^{-1}&\leq \prod_{i\in\mathcal{I}_1}\left\|v_i^{-1}\mathcal{X}_B\right\|_\infty\,\prod_{i\in\mathcal{I}_2}\left( \frac{1}{|B|}\int_B v_i^{-p'_i} \right)^{1/p'_i }\\ &\leq C \frac{w^{-1}(B)}{|B|} R^{\delta - \gamma + n/p} \end{align*} for every $R>0$. By letting $R\to 0$ we get \begin{equation*} \prod_{i=1 }^{m} v_{i}^{-1}(x_B)=0, \end{equation*} which yields that $\prod_{i=1 }^{m} v_{i}^{-1}$ is zero almost everywhere. This implies that $M=\bigcap_{i=1}^m \{v_i^{-1} >0 \}$ has null measure. Since $v_i(y)>0$ for almost every $y$ and every $i$, there exists $j$ such that $v_j = \infty$ almost everywhere. We turn now our attention to item \eqref{item: teo no-ejemplos Hcal - item b}, that is, $\delta= \gamma - n/p =1$. We shall prove that if $(w,\vec{v})\in~\mathcal{H}_m(\vec{p}, \gamma, 1)$, there exists $j$ such that $v_j = \infty$ in almost $\mathbb{R}^n$. We define \begin{equation*} \frac{1}{\alpha} = \sum_{i=1 }^{m }\frac{1}{p'_i} = \frac{mp-1}{p}. \end{equation*} By applying Hölder inequality we obtain that \begin{equation*} \left(\int_{\mathbb{R}^n } \frac{(\prod_{i\in\mathcal{I}_2} v_i^{-1 })^{\alpha}}{(|B|^{1/n} + |x_B -y|)^{\sum_{i\in\mathcal{I}_2}(n-\gamma_i +1/m)\alpha}} \right)^{1/\alpha} \leq C \prod_{i\in\mathcal{I}_2} \left( \int_{\mathbb{R}^n } \frac{ v_i^{-p'_i }}{(|B|^{1/n} + |x_B -\cdot|)^{(n-\gamma_i + 1/m)p'_i} } \right)^{\tfrac{1}{p_i'}} \end{equation*} and since $(w,\vec{v})\in \mathcal{H}_{m}(\vec{p}, \gamma, 1)$ this implies that \[\prod_{i\in \mathcal{I}_1}\left\|\frac{v_i^{-1}}{(|B|^{1/n}+|x_B-\cdot|)^{n-\gamma_i+1/m}}\right\|_\infty\left(\int_{\mathbb{R}^n } \frac{(\prod_{i\in\mathcal{I}_2} v_i^{-1 })^{\alpha}}{(|B|^{1/n} + |x_B -y|)^{\sum_{i\in\mathcal{I}_2}(n-\gamma_i +1/m)\alpha }} \right)^{1/\alpha}\lesssim \frac{w^{-1}(B)}{|B|},\] and furthermore \[\left(\int_{\mathbb{R}^n } \frac{(\prod_{i=1}^m v_i^{-1 })^{\alpha}}{(|B|^{1/n} + |x_B -y|)^{(mn-\gamma +1)\alpha }}\right) ^{1/\alpha}\lesssim \frac{w^{-1}(B)}{|B|}\] for every ball $B=B(x_B,R)$. If we assume that the set $E=\{x: ~ \prod_{i=1 }^m v_i^{-1 }(x)>~0\}$ has positive measure, we arrive to a contradiction by following the same argument as in Theorem 1.2, item (b) from \cite{BPR22}. This yields $|E|=0$, that is, $\prod_{i=1 }^m v_i^{-1} =0$ almost everywhere, from where we can deduce that there exists an index $j$ satisfying $v_j = \infty$ almost everywhere. \end{proof} \section{The class \texorpdfstring{$\mathcal{H}_m(\vec{p},\gamma,\delta)$}{$Hm(p,\gamma,\delta)$}}\label{seccion: ejemplos} We begin this section by exhibiting nontrivial pairs of weights satisfying condition $\mathcal{H}_m(\vec{p},\gamma,\delta)$. Concretely, we shall prove the following theorem. \begin{teo}\label{teo: ejemplos para Hcal} Given $0<\gamma<mn$ there exists pairs of weights $(w,\vec{v})$ satisfying \eqref{eq: clase Hcal(p,gamma,delta) - m} for every $\vec{p}$ and $\delta$ such that $\delta\leq \min\{1,\gamma-n/p\}$, excluding the case $\delta=1$ when $\gamma-n/p=1$. \end{teo} The following figure shows the area in which we can find nontrivial weights satisfying condition $\mathcal{H}_m(\vec{p},\gamma,\delta)$, split into the cases $\gamma<1, \gamma=1$ and $\gamma>1$. \begin{center} \begin{tikzpicture}[scale=0.75] \node[above] at (-4,6) {$\gamma>1$}; \draw [-stealth, thick] (-6,-7)--(-6,5); \draw [-stealth, thick] (-7,0)--(-1,0); \draw [thick] (-6.05,3)--(-5.95, 3); \node [left] at (-6,5) {$\delta$}; \node [left] at (-6,3) {$1$}; \node [below] at (-1,0) {$1/p$}; \node [below] at (-2,0) {$m$}; \draw [thick] (-2,0.05)--(-2,-0.05); \draw [thick] (-6.05,-4)--(-5.95, -4); \node [left] at (-6,-4) {$\gamma-mn$}; \draw [fill=aquamarine, fill opacity=0.5] (-6,-7)--(-6,3)--(-4,3)--(-2,-4)--(-2,-7)--cycle; \draw [color=white] (-2,-7)--(-6,-7); \draw [dashed, thick] (-6,1)--(-3.4286,1); \node [left] at (-6,1) {$\tau$}; \draw [fill=white] (-4,3) circle (0.08cm); \node [right] at (-3.5,2) {$\delta=\gamma-n/p$}; \node[above] at (3,6) {$\gamma=1$}; \draw [-stealth, thick] (1,-7)--(1,5); \draw [-stealth, thick] (0,0)--(6,0); \draw [thick] (0.95,3)--(1.05, 3); \node [left] at (1,5) {$\delta$}; \node [left] at (1,3) {$1$}; \node [below] at (6,0) {$1/p$}; \node [below] at (5,0) {$m$}; \draw [thick] (5,0.05)--(5,-0.05); \draw [thick] (0.95,-4)--(1.05, -4); \node [left] at (1,-4) {$\gamma-mn$}; \draw [fill=aquamarine, fill opacity=0.5] (1,-7)--(1,3)--(5,-4)--(5,-7)--cycle; \draw [color=white] (5,-7)--(1,-7); \draw [dashed, thick] (1,1)--(2.152857,1); \node [left] at (1,1) {$\tau$}; \draw [fill=white] (1,3) circle (0.08cm); \node [right] at (2,2) {$\delta=\gamma-n/p$}; \node[above] at (10,6) {$\gamma<1$}; \draw [-stealth, thick] (8,-7)--(8,5); \draw [-stealth, thick] (7,0)--(13,0); \draw [thick] (7.95,3)--(8.05, 3); \node [left] at (8,5) {$\delta$}; \node [left] at (8,3) {$1$}; \node [below] at (13,0) {$1/p$}; \node [below] at (12,0) {$m$}; \draw [thick] (12,0.05)--(12,-0.05); \draw [thick] (7.95,-4)--(8.05, -4); \node [left] at (8,-4) {$\gamma-mn$}; \draw [fill=aquamarine, fill opacity=0.5] (8,-7)--(8,2)--(12,-4)--(12,-7)--cycle; \draw [color=white] (12,-7)--(8,-7); \draw [dashed, thick] (8,1)--(8.6667,1); \node [left] at (8,1) {$\tau$}; \node [right] at (8.5,2) {$\delta=\gamma-n/p$}; \end{tikzpicture} \end{center} The following lemma will be useful in order to prove Theorem~\ref{teo: ejemplos para Hcal} (see \cite{Pradolini01}). \begin{lema}\label{lema: estimacion de la integral de |x|^a en una bola} If $R>0$, $B=B(x_B,R)$ is a ball in $\mathbb{R}^n$ and $\alpha>-n$ then \[\int_B |x|^{\alpha}\,dx\approx R^n\left(\max\{R,|x_B|\}\right)^\alpha.\] \end{lema} \medskip \begin{proof}[Proof of Theorem~\ref{teo: ejemplos para Hcal}] In \cite{BPR22} we exhibited examples of weights in the class $\mathbb{H}_m(\vec{p},\gamma,\delta)$ given by \eqref{eq: clase Hbb(p,gamma,delta) - m}, for $\gamma-mn\leq \delta\leq \min\{1,\gamma-n/p\}$, excluding the case $\delta=1$ when $\gamma-n/p=1$. By Remark~\ref{obs: Hbb contenida en Hcal} the same examples satisfy $\mathcal{H}_m(\vec{p},\gamma,\delta)$, so it will be enough to check the case $\delta<\gamma-mn$. Recall that $\theta_i=n/p_i+(1-\gamma)/m$ and $\mathcal{I}_1=\{1\leq i\leq m: p_i=1\}$. Let us first assume that $\mathcal{I}_1\neq \emptyset$. We choose $-\theta_i<\beta_i<n/p_i'$ for every $i\in\mathcal{I}_2$ and $\theta_i<0$, and $0<\beta_i<n/p_i'$ if $\theta_i\geq 0$. This election implies that \[\nu=\sum_{i\in\mathcal{I}_2,\theta_i\geq 0}\beta_i+\sum_{i\in\mathcal{I}_2,\theta_i<0}(\beta_i+\theta_i)>0.\] We now choose \[0<\beta<\min\left\{\frac{\nu}{m_1}, n+\frac{1-\gamma}{m}\right\},\] and take $\beta_i=-\beta$ for every $i\in\mathcal{I}_1$. Let $\alpha=\delta+\sum_{i=1}^m\beta_i+n/p-\gamma$ and define \[w(x)=|x|^\alpha\quad\textrm{ and }\quad v_i(x)=|x|^{\beta_i},\quad \textrm{ for } 1\leq i\leq m.\] Notice that \[\alpha=\delta+\sum_{i=1}^m\beta_i+n/p-\gamma<\delta+\sum_{i=1}^m\frac{n}{p_i'}+\frac{n}{p}-\gamma=\delta+mn-\gamma<0,\] since $\delta<\gamma-mn$, so $w^{-1}$ is a locally integrable function. On the other hand, since $v_i^{-1}\in \mathrm{RH}_\infty$ for $i\in\mathcal{I}_1$ the same conclusion holds for these weights. For $i\in\mathcal{I}_2$ we also have that $v_i^{-p_i'}$ is locally integrable since $\beta_i<n/p_i'$. Therefore, by virtue of Lemma~\ref{lema: equivalencia con local y global}, it will be enough to show that there exists a positive constant $C$ such that the inequality \begin{equation}\label{eq: teo: ejemplos para Hcal - eq1} \frac{|B|^{1+(1-\delta)/n}}{w^{-1}(B)}\prod_{i\in\mathcal{I}_1}\left\|\frac{v_i^{-1}\mathcal{X}_{\mathbb{R}^n\backslash B}}{|x_B-\cdot|^{n-\gamma/m+1/m}}\right\|_\infty\,\prod_{i\in\mathcal{I}_2}\left(\int_{\mathbb{R}^n\backslash B}\frac{v_i^{-p_i'}}{|x_B-\cdot|^{(n-\gamma/m+1/m)p_i'}}\right)^{1/p_i'}\leq C \end{equation} holds for every ball $B=B(x_B,R)$. We shall first assume that $|x_B|\leq R$. By Lemma~\ref{lema: estimacion de la integral de |x|^a en una bola} we have that \begin{equation}\label{eq: teo: ejemplos para Hcal - eq2} \frac{|B|^{1+(1-\delta)/n}}{w^{-1}(B)}\lesssim R^{1-\delta+\alpha}. \end{equation} On the other hand, if $i\in\mathcal{I}_1$ and $B_k=B(x_B,2^kR)$, $k\in\mathbb{N}$, we have \begin{align*} \left\|\frac{v_i^{-1}\mathcal{X}_{\mathbb{R}^n\backslash B}}{|x_B-\cdot|^{n-\gamma/m+1/m}}\right\|_\infty&\lesssim \sum_{k=0}^\infty \left\|\frac{v_i^{-1}\mathcal{X}_{B_{k+1}\backslash B_k}}{|x_B-\cdot|^{n-\gamma/m+1/m}}\right\|_\infty\\ &\lesssim \sum_{k=0}^\infty \left(2^kR\right)^{-\beta_i-n+\gamma/m-1/m}\\ &\lesssim R^{-\beta_i-n+\gamma/m-1/m}, \end{align*} since $-\beta_i-n+\gamma/m-1/m<0$. This yields \begin{equation}\label{eq: teo: ejemplos para Hcal - eq3} \prod_{i\in\mathcal{I}_1}\left\|\frac{v_i^{-1}\mathcal{X}_{\mathbb{R}^n\backslash B}}{|x_B-\cdot|^{n-\gamma/m+1/m}}\right\|_\infty\lesssim R^{-\sum_{i\in\mathcal{I}_1}(\beta_i+\theta_i)}. \end{equation} Finally, since $\beta_i+\theta_i>0$ for $i\in\mathcal{I}_2$, by Lemma~\ref{lema: estimacion de la integral de |x|^a en una bola} we obtain \begin{align*} \left(\int_{\mathbb{R}^n\backslash B} \frac{v_i^{-p_i'}(y)}{|x_B-y|^{(n-\gamma/m+1/m)p_i'}}\,dy\right)^{1/p_i'}&\lesssim \sum_{k=0}^\infty (2^kR)^{-n+\gamma/m-1/m}\left(\int_{B_{k+1}\backslash B_k} |y|^{-\beta_ip_i'}\,dy\right)^{1/p_i'}\\ &\lesssim \sum_{k=0}^\infty (2^kR)^{-n+\gamma/m-1/m-\beta_i+n/p_i'}\\ &\lesssim R^{-n/p_i+\gamma/m-1/m-\beta_i}, \end{align*} since $-n/p_i+\gamma/m-1/m-\beta_i<0$ by the choice of $\beta_i$. Therefore, we obtain \begin{equation}\label{eq: teo: ejemplos para Hcal - eq4} \prod_{i\in\mathcal{I}_2}\left(\int_{\mathbb{R}^n\backslash B}\frac{v_i^{-p_i'}(y)}{|x_B-y|^{(n-\gamma/m+1/m)p_i'}}\,dy\right)^{1/p_i'}\lesssim R^{-\sum_{i\in\mathcal{I}_2}(\beta_i+\theta_i)}. \end{equation} By combining \eqref{eq: teo: ejemplos para Hcal - eq2}, \eqref{eq: teo: ejemplos para Hcal - eq3} and \eqref{eq: teo: ejemplos para Hcal - eq4}, the left-hand side of \eqref{eq: teo: ejemplos para Hcal - eq1} is bounded by \[CR^{1-\delta+\alpha-\sum_{i=1}^m(\theta_i+\beta_i)}=C.\] Now we consider the case $|x_B|>R$. By Lemma~\ref{lema: estimacion de la integral de |x|^a en una bola} we have that \begin{equation}\label{eq: teo: ejemplos para Hcal - eq5} \frac{|B|^{1+(1-\delta)/n}}{w^{-1}(B)}\lesssim R^{1-\delta}|x_B|^\alpha\lesssim R^{1-\delta+\alpha}, \end{equation} because $\alpha<0$. Since $|x_B|>R$, there exists a number $N\in\mathbb{N}$ such that $2^NR<|x_B|\leq 2^{N+1}R$. For $i\in\mathcal{I}_1$ we write \begin{align*} \left\|\frac{v_i^{-1}\mathcal{X}_{\mathbb{R}^n\backslash B}}{|x_B-\cdot|^{n-\gamma/m+1/m}}\right\|_\infty&\lesssim \sum_{k=0}^N \left\|\frac{v_i^{-1}\mathcal{X}_{B_{k+1}\backslash B_k}}{|x_B-\cdot|^{n-\gamma/m+1/m}}\right\|_\infty+\sum_{k=N+1}^\infty \left\|\frac{v_i^{-1}\mathcal{X}_{B_{k+1}\backslash B_k}}{|x_B-\cdot|^{n-\gamma/m+1/m}}\right\|_\infty\\ &=S_1^i+S_2^i. \end{align*} By standard estimation we have that \[S_1^i\lesssim |x_B|^{-\beta_i}\sum_{k=0}^N \left(2^kR\right)^{-n+\gamma/m-1/m}\lesssim |x_B|^{-\beta_i}R^{-n+\gamma/m-1/m}=|x_B|^{-\beta_i}R^{-\theta_i}\] and \begin{align*} S_2^i&\lesssim \sum_{k=N+1}^\infty \left(2^kR\right)^{-\beta_i-n+\gamma/m-1/m}\\ &\lesssim \left(2^{N}R\right)^{-\beta_i-n+\gamma/m-1/m}\sum_{k=0}^\infty 2^{k(-\beta_i-n+\gamma/m-1/m)}\\ &\lesssim |x_B|^{-\beta_i}R^{-n+\gamma/m-1/m} =|x_B|^{-\beta_i}R^{-\theta_i}. \end{align*} These inequalities imply that \begin{equation}\label{eq: teo: ejemplos para Hcal - eq6} \prod_{i\in\mathcal{I}_1}\left\|\frac{v_i^{-1}\mathcal{X}_{\mathbb{R}^n\backslash B}}{|x_B-\cdot|^{n-\gamma/m+1/m}}\right\|_\infty\lesssim |x_B|^{-\sum_{i\in\mathcal{I}_1}\beta_i}\,\,R^{-\sum_{i\in\mathcal{I}_1}\theta_i}. \end{equation} If $i\in\mathcal{I}_2$ we split the integral in a similar way to get \begin{align*} \left(\int_{\mathbb{R}^n\backslash B} \frac{v_i^{-p_i'}(y)}{|x_B-y|^{(n-\gamma/m+1/m)p_i'}}\,dy\right)^{1/p_i'}&\lesssim \sum_{k=0}^\infty(2^{k}R)^{-n+\gamma/m-1/m}\left(\int_{B_k} |y|^{-\beta_ip_i'}\,dy\right)^{1/p_i'}\\ &=\sum_{k=0}^N+\sum_{k=N+1}^\infty\\ &=S_1^i+S_2^i. \end{align*}\label{pag: estimacion del producto para i fuera de I_1, |x_B|>R} We shall estimate the sum $S_1^i+S_2^i$ by distinguishing into the cases $\theta_i<0$, $\theta_i=0$ and $\theta_i>0$. Let us first assume that $\theta_i<0$. Then by Lemma~\ref{lema: estimacion de la integral de |x|^a en una bola} we obtain \begin{align*} S_1^i&\lesssim \sum_{k=0}^N(2^{k}R)^{-n+\gamma/m-1/m+n/p_i'}|x_B|^{-\beta_i}\\ &\lesssim |x_B|^{-\beta_i}R^{-\theta_i}\sum_{k=0}^N 2^{-k\theta_i}\\ &\lesssim |x_B|^{-\beta_i}(2^NR)^{-\theta_i}\\ &\lesssim |x_B|^{-\beta_i-\theta_i}, \end{align*} since $\theta_i<0$. For $S_2^i$ we apply again Lemma~\ref{lema: estimacion de la integral de |x|^a en una bola} in order to get \begin{align*} S_2^i&\lesssim \sum_{k=N+1}^\infty(2^{k}R)^{-n+\gamma/m-1/m+n/p_i'-\beta_i}\\ &\lesssim \sum_{k=N+1}^\infty \left(2^{k}R\right)^{-\beta_i-\theta_i}\\ &= \left(2^{N+1}R\right)^{-\beta_i-\theta_i}\sum_{k=0}^\infty 2^{-k(\beta_i+\theta_i)}\\ &\lesssim |x_B|^{-\beta_i-\theta_i}, \end{align*} since $\theta_i+\beta_i>0$. This yields \begin{equation}\label{eq: teo: ejemplos para Hcal - eq7} S_1^i+S_2^i\lesssim |x_B|^{-\beta_i-\theta_i} \end{equation} when $\theta_i<0$. Now assume that $\theta_i=0$. By proceeding similarly as in the previous case, we have \[S_1^i\lesssim |x_B|^{-\beta_i}N\lesssim |x_B|^{-\beta_i}\log_2\left(\frac{|x_B|}{R}\right),\] and \[S_2^i\lesssim |x_B|^{-\beta_i}\] since $\beta_i>0$ when $\theta_i=0$. Consequently, \begin{equation}\label{eq: teo: ejemplos para Hcal - eq8} S_1^i+S_2^i\lesssim |x_B|^{-\beta_i}\left(1+\log_2\left(\frac{|x_B|}{R}\right)\right)\lesssim |x_B|^{-\beta_i}\log_2\left(\frac{|x_B|}{R}\right). \end{equation} We finally consider the case $\theta_i>0$. For $S_2^i$ we can proceed exactly as in the case $\theta_i<0$ and get the same bound. On the other hand, for $S_1^i$ we have that \begin{align*} S_1^i&\lesssim \sum_{k=0}^N(2^{k}R)^{-n+\gamma/m-1/m+n/p_i'}|x_B|^{-\beta_i}\\ &\lesssim |x_B|^{-\beta_i}R^{-\theta_i}\sum_{k=0}^N 2^{-k\theta_i}\\ &\lesssim |x_B|^{-\beta_i}\left(2^NR\right)^{-\theta_i}2^{N\theta_i}\\ &\lesssim |x_B|^{-\beta_i-\theta_i}2^{N\theta_i}. \end{align*}\label{pag: estimacion de S_1^i y S_2^i, theta_i>0} Therefore, if $i\in\mathcal{I}_2$ and $\theta_i>0$ we get \begin{equation}\label{eq: teo: ejemplos para Hcal - eq9} S_1^i+S_2^i\lesssim |x_B|^{-\beta_i-\theta_i}\left(1+2^{N\theta_i}\right)\lesssim 2^{N\theta_i}|x_B|^{-\beta_i-\theta_i}. \end{equation} By combining \eqref{eq: teo: ejemplos para Hcal - eq7},\eqref{eq: teo: ejemplos para Hcal - eq8} and \eqref{eq: teo: ejemplos para Hcal - eq9} we obtain \begin{align*} \prod_{i\in\mathcal{I}_2}\left(\int_{\mathbb{R}^n\backslash B} \frac{v_i^{-p_i'}(y)}{|x_B-y|^{(n-\gamma/m+1/m)p_i'}}\,dy\right)^{1/p_i'}&\lesssim \prod_{i\in\mathcal{I}_2, \theta_i<0} |x_B|^{-\beta_i-\theta_i} \prod_{i\in\mathcal{I}_2, \theta_i=0} |x_B|^{-\beta_i}\log_2\left(\frac{|x_B|}{R}\right) \\ &\qquad\times\prod_{i\in\mathcal{I}_2, \theta_i>0} |x_B|^{-\beta_i-\theta_i}2^{N\theta_i} \\ &\lesssim |x_B|^{-\sum_{i\in\mathcal{I}_2}(\beta_i+\theta_i)}2^{N\sum_{i\in\mathcal{I}_2,\theta_i> 0}\theta_i}\\ &\qquad \times\left(\log_2\left(\frac{|x_B|}{R}\right)\right)^{\#\{i\in\mathcal{I}_2, \theta_i=0\}}. \end{align*} The estimate above combined with \eqref{eq: teo: ejemplos para Hcal - eq5} and \eqref{eq: teo: ejemplos para Hcal - eq6} allows us to bound the left-hand side of \eqref{eq: teo: ejemplos para Hcal - eq1} by \[CR^{1-\delta+\alpha} |x_B|^{-\sum_{i\in\mathcal{I}_1}\beta_i}R^{-\sum_{i\in\mathcal{I}_1}\theta_i}|x_B|^{-\sum_{i\in\mathcal{I}_2}(\beta_i+\theta_i)}2^{N\sum_{i\in\mathcal{I}_2,\theta_i> 0}\theta_i} \left(\log_2\left(\frac{|x_B|}{R}\right)\right)^{\#\{i\in\mathcal{I}_2, \theta_i=0\}}\] or equivalently by \begin{equation}\label{eq: teo: ejemplos para Hcal - eq10} \left(\frac{R}{|x_B|}\right)^{1-\delta+\alpha-\sum_{i\in\mathcal{I}_1}\theta_i-\sum_{i\in\mathcal{I}_2,\theta_i> 0}\theta_i}\left(\log_2\left(\frac{|x_B|}{R}\right)\right)^{\#\{i\in\mathcal{I}_2, \theta_i=0\}}. \end{equation} Notice that the exponent of $R/|x_B|$ is equal to \[\sum_{i\in\mathcal{I}_2,\theta_i<0}(\beta_i+\theta_i)+\sum_{i\in\mathcal{I}_1}\beta_i+\sum_{i\in\mathcal{I}_2,\theta_i\geq 0}\beta_i=\nu-m_1\beta>0,\] from our election of $\beta$. Since $\log t\lesssim \varepsilon^{-1}t^\varepsilon$ for every $t\geq 1$ and every $\varepsilon>0$, we can bound \eqref{eq: teo: ejemplos para Hcal - eq10} by \[C\left(\frac{R}{|x_B|}\right)^{\nu-m_1\beta-\varepsilon\#\{i\in\mathcal{I}_2, \theta_i=0\}},\] and this exponent is positive provided we choose $\varepsilon>0$ sufficiently small. The proof is complete when $\mathcal{I}_1\neq\emptyset$. Otherwise, we can follow the same steps and define the same parameters, omitting the factor corresponding to $\mathcal{I}_1$. This concludes the proof. \end{proof} We finish with the proof of the theorem dealing with the case $w=\prod_{i=1}^m v_i$. \begin{proof}[Proof of Theorem~\ref{teo: caso de pesos iguales}] Let $\alpha=p/(mp-1)$ and assume that $\alpha>1$. If $\vec{v}\in\mathcal{H}_m(\vec{p},\gamma,\delta)$, then by condition \eqref{eq: condicion local 2} we get \begin{equation}\label{eq: teo: caso de pesos iguales - eq1} |B|^{-\delta/n+\gamma/n-1/p}\prod_{i\in\mathcal{I}_1}\|v_i^{-1}\mathcal{X}_B\|_\infty\,\prod_{i\in\mathcal{I}_2}\left(\frac{1}{|B|}\int_B v_i^{-p_i'}\right)^{1/p_i'}\leq \frac{C}{|B|}\int_B \prod_{i=1}^m v_i^{-1}. \end{equation} Notice that $\sum_{i=1}^m \alpha/p_i'=1$. Therefore we apply Hölder inequality with $p_i'/\alpha$ in order to obtain \[\left(\frac{1}{|B|}\int_B \left(\prod_{i=1}^m v_i^{-1}\right)^\alpha\right)^{1/\alpha}\leq \prod_{i\in\mathcal{I}_1}\|v_i^{-1}\mathcal{X}_B\|_\infty\,\prod_{i\in\mathcal{I}_2}\left(\frac{1}{|B|}\int_B v_i^{-p_i'}\right)^{1/p_i'}.\] By multiplying each side of the inequality above by $|B|^{-\delta/n+\gamma/n-1/p}$ and using \eqref{eq: teo: caso de pesos iguales - eq1} we get \[|B|^{-\delta/n+\gamma/n-1/p}\left(\frac{1}{|B|}\int_B \left(\prod_{i=1}^m v_i^{-1}\right)^\alpha\right)^{1/\alpha}\leq \frac{C}{|B|}\int_B \prod_{i=1}^m v_i^{-1}.\] From this estimate we can conclude that \[|B|^{-\delta/n+\gamma/n-1/p}\leq C\] for every ball $B$, since $\alpha>1$. Then we must have that $\delta/n=\gamma/n-1/p$. \end{proof} \def\cprime{$'$} \providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} MR } \providecommand{\MRhref}[2]{ \href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2} } \providecommand{\href}[2]{#2} \begin{thebibliography}{10} \bibitem{AHIV} H.~Aimar, S.~Hartzstein, B.~Iaffei, and B.~Viviani, \emph{The {R}iesz potential as a multilinear operator into general {$\rm BMO_\beta$} spaces}, vol. 173, 2011, Problems in mathematical analysis. No. 55, pp.~643--655. \bibitem{BPR22} Fabio Berra, Gladis Pradolini, and Wilfredo Ramos, \emph{Optimal parameters related with continuity properties of the multilinear fractional integral operator between lebesgue and lipschitz spaces}, Available at \url{https://arxiv.org/abs/2203.04247}. \bibitem{Grafakos92} Loukas Grafakos, \emph{On multilinear fractional integrals}, Studia Math. \textbf{102} (1992), no.~1, 49--56. \bibitem{GK01} Loukas Grafakos and Nigel Kalton, \emph{Some remarks on multilinear maps and interpolation}, Math. Ann. \textbf{319} (2001), no.~1, 151--180. \bibitem{HSV} E.~Harboure, O.~Salinas, and B.~Viviani, \emph{Boundedness of the fractional integral on weighted {L}ebesgue and {L}ipschitz spaces}, Trans. Amer. Math. Soc. \textbf{349} (1997), no.~1, 235--255. \bibitem{KS99} Carlos~E. Kenig and Elias~M. Stein, \emph{Multilinear estimates and fractional integration}, Math. Res. Lett. \textbf{6} (1999), no.~1, 1--15. \bibitem{Moen09} Kabe Moen, \emph{Weighted inequalities for multilinear fractional integral operators}, Collect. Math. \textbf{60} (2009), no.~2, 213--238. \bibitem{Muck72} B.~Muckenhoupt, \emph{Weighted norm inequalities for the {H}ardy maximal function}, Trans. Amer. Math. Soc. \textbf{165} (1972), 207--226. \bibitem{Muckenhoupt-Wheeden74} B.~Muckenhoupt and R.~Wheeden, \emph{Weighted norm inequalities for fractional integrals}, Trans. Amer. Math. Soc. \textbf{192} (1974), 261--274. \bibitem{MW-75-76} Benjamin Muckenhoupt and Richard~L. Wheeden, \emph{Weighted bounded mean oscillation and the {H}ilbert transform}, Studia Math. \textbf{54} (1975/76), no.~3, 221--237. \bibitem{Prado01cal} Gladis Pradolini, \emph{A class of pairs of weights related to the boundedness of the fractional integral operator between {$L^p$} and {L}ipschitz spaces}, Comment. Math. Univ. Carolin. \textbf{42} (2001), no.~1, 133--152. \bibitem{Pradolini01} \bysame, \emph{Two-weighted norm inequalities for the fractional integral operator between {$L^p$} and {L}ipschitz spaces}, Comment. Math. (Prace Mat.) \textbf{41} (2001), 147--169. \bibitem{Pradolini10} \bysame, \emph{Weighted inequalities and pointwise estimates for the multilinear fractional integral and maximal operators}, J. Math. Anal. Appl. \textbf{367} (2010), no.~2, 640--656. \bibitem{PR} Gladis Pradolini and Jorgelina Recchi, \emph{On optimal parameters involved with two-weighted estimates of commutators of singular and fractional integral operators}, Available at \url{https://arxiv.org/abs/1911.08573}. \end{thebibliography} \end{document}
2205.11553v1
http://arxiv.org/abs/2205.11553v1
Existence and Stability of Nonequilibrium Steady States of Nernst-Planck-Navier-Stokes Systems
\documentclass[11pt,reqno]{amsproc} \title[]{Existence and Stability of Nonequilibrium Steady States of Nernst-Planck-Navier-Stokes Systems} \author{Peter Constantin} \address{Department of Mathematics, Princeton University, Princeton, NJ 08544} \email{[email protected]} \author{Mihaela Ignatova} \address{Department of Mathematics, Temple University, Philadelphia, PA 19122} \email{[email protected]} \author{Fizay-Noah Lee} \address{Program in Applied and Computational Mathematics, Princeton University, Princeton, NJ 08544} \email{[email protected]} \usepackage[margin=1in]{geometry} \usepackage{amsmath, amsthm, amssymb} \usepackage{times} \usepackage{color} \usepackage{hyperref} \usepackage{comment} \usepackage{enumerate} \usepackage{setspace} \newcommand{\tv}{\tilde v} \newcommand{\pa}{\partial} \newcommand{\p}{\pmb} \newcommand{\la}{\label} \newcommand{\fr}{\frac} \newcommand{\na}{\nabla} \newcommand{\be}{\begin{equation}} \newcommand{\ee}{\end{equation}} \newcommand{\bal}{\begin{aligned}} \newcommand{\eal}{\end{aligned}} \newcommand{\ba}{\begin{array}{l}} \newcommand{\ea}{\end{array}} \newcommand{\Rr}{{\mathbb R}} \newcommand{\red}{\textcolor{red}} \renewcommand{\c}{{\widetilde{c}}} \newtheorem{thm}{Theorem} \newtheorem{prop}{Proposition} \newtheorem{lemma}{Lemma} \newtheorem{lem}{Lemma} \newtheorem{rem}{Remark} \newtheorem{defi}{Definition} \newtheorem{cor}{Corollary} \newcommand{\beg}{\begin} \renewcommand{\div}{{\mbox{div}\,}} \newcommand{\D}{\Delta} \newcommand{\C}{C_{\Gamma}} \newcommand{\x}{\bf{x}} \newcommand{\ug}{\underline\gamma} \newcommand{\og}{\overline\gamma} \newcommand{\um}{\underline M} \newcommand{\om}{\overline M} \newcommand{\ugg}{\underline g} \newcommand{\ogg}{\overline g} \newcommand{\uV}{\underline V} \newcommand{\oV}{\overline V} \newcommand{\uuV}{\underline{\underline V}} \newcommand{\ooV}{\overline{\overline V}} \newcommand{\uW}{\underline W} \newcommand{\oc}{{\overline c}^*} \newcommand{\ou}{{\overline u}^*} \newcommand{\omu}{{\overline\mu}^*} \newcommand{\oPhi}{{\overline\Phi}^*} \newcommand{\orho}{{\overline{\rho}}^*} \newcommand{\ugd}{{\underline \gamma}_\delta} \newcommand{\ogd}{{\overline \gamma}_\delta} \date{today} \keywords{electroconvection, ionic electrodiffusion, Poisson-Boltzmann, Nernst-Planck, Navier-Stokes} \begin{document} \noindent\thanks{\em{ MSC Classification: 35Q30, 35Q35, 35Q92.}} \begin{abstract} We consider the Nernst-Planck-Navier-Stokes system in a bounded domain of $\Rr^d$, $d=2,3$ with general nonequilibrium Dirichlet boundary conditions for the ionic concentrations. We prove the existence of smooth steady state solutions and present a sufficient condition in terms of only the boundary data that guarantees that these solutions have nonzero fluid velocity. We show that time evolving solutions are ultimately bounded uniformly, independently of their initial size. In addition, we consider one dimensional steady states with steady nonzero currents and show that they are globally nonlinearly stable as solutions in a three dimensional periodic strip, if the currents are sufficiently weak. \end{abstract} \maketitle \section{Introduction} We consider the Nernst-Planck-Navier-Stokes (NPNS) system in a connected, but not necessarily simply connected bounded domain $\Omega\subset\mathbb{R}^d$ ($d=2,3$) with smooth boundary. The system models electrodiffusion of ions in a fluid in the presence of an applied electrical potential on the boundary \cite{prob,rubibook}. In this paper, we study the case where there are two oppositely charged ionic species with valences $\pm 1$ (e.g. sodium and chloride ions). In this case, the system is given by the Nernst-Planck equations \be \bal \pa_t c_1+u\cdot\na c_1=&D_1\div(\na c_1+c_1\na\Phi)\\ \pa_t c_2+u\cdot\na c_2=&D_2\div(\na c_2-c_2\na\Phi)\la{np} \eal \ee coupled to the Poisson equation \be -\epsilon\D\Phi=c_1-c_2=\rho\la{pois} \ee and to the Navier-Stokes system \be \pa_t u+u\cdot\na u-\nu\D u+\na p=-K\rho\na\Phi,\quad \div u=0.\la{nse} \ee Above $c_1$ and $c_2$ are the local ionic concentrations of the cation and anion, respectively, $\rho$ is a rescaled local charge density, $u$ is the fluid velocity, and $\Phi$ is a rescaled electrical potential. The constant $K>0$ is a coupling constant given by the product of Boltzmann's constant $k_B$ and the absolute temperature $T_K$. The constants $D_i$ are the ionic diffusivities, $\epsilon>0$ is a rescaled dielectric permittivity of the solvent proportional to the square of the Debye length, and $\nu>0$ is the kinematic viscosity of the fluid. The dimensional counterparts of $\Phi$ and $\rho$ are given by $(k_BT_k/e)\Phi$ and $e\rho$, respectively, where $e$ is elementary charge. It is well known that for certain \textit{equilibrium boundary conditions,} (see \eqref{eqc1} and \eqref {eqc2} below) the NPNS system (\ref{np})-(\ref{nse}) admits a unique steady solution, with vanishing velocity $u^* =0$, and with concentrations $c_i^*$ related to $\Phi^*$ which uniquely solves a nonlinear Poisson-Boltzmann equation \be \bal -\epsilon\D\Phi^* = c_1^*-c_2^*\\ c_1^* = Z_1^{-1} e^{-\Phi^*}\\ c_2^* = Z_2^{-1} e^{\Phi^*} \eal \la{PB} \ee with $Z_i>0$ constant for $i=1,2$. The equilibria of the Nernst-Planck-Navier-Stokes system are unique minimizers of a total energy that is nonincreasing in time on time dependent solutions \cite{ci}. For equilibrium boundary conditions it is known that for $d=2$ the unique steady states are globally stable \cite{bothe,ci} and for $d=3$ locally stable \cite{np3d,ryham}. The equilibrium boundary conditions include the cases where $c_i$ obey blocking boundary conditions \be (\pa_nc_1+c_1\pa_n\Phi)_{|\pa\Omega}=(\pa_nc_2 - c_2\pa_n\Phi)_{|\pa\Omega}=0\la{eqc1} \ee (here, $\pa_n$ is the normal derivative) and $\Phi$ obeys Dirichlet, Neumann, or Robin boundary conditions. Also included is the case where $c_i$ obey a mix of blocking and Dirichlet boundary conditions and $\Phi$ obeys Dirichlet boundary conditions in such a way that the electrochemical potentials \be \mu_1=\log c_1+\Phi,\quad\mu_2=\log c_2-\Phi \ee are each constant on the boundary portions where $c_i$ obey Dirichlet boundary conditions. That is, if $c_i$ satisfy Dirichlet boundary conditions on $S_i\subset\pa\Omega$ (possibly $S_i=\pa\Omega)$, then boundary conditions such that \be {\mu_1}_{|S_1}=\text{constant},\quad{\mu_2}_{|S_2}=\text{constant}\la{eqc2} \ee yield an equilibrium boundary condition. In general, arbitrary deviations from such situations can produce instabilities and even chaotic behavior for time dependent solutions to NPNS \cite{davidson,kang,pham,rubinstein,rubisegel,rubizaltz,zaltzrubi}. Furthermore, steady states in nonequilibrium configurations are not known in general, and not known to always be unique \cite{mock,park}. The various boundary conditions for $c_i$ and $\Phi$ all have physical interpretations, and we refer the reader to \cite{bothe,ci,cil,davidson,mock,schmuck} for relevant discussions. In this paper, we consider only Dirichlet boundary conditions for both $c_i$ and $\Phi$, together with no-slip boundary conditions for $u$, \be \bal {c_i}_{|\pa\Omega}&=\gamma_i>0\\ \Phi_{|\pa\Omega}&=W\\ u_{|\pa\Omega}&=0.\la{BC} \eal \ee For simplicity, we assume $\gamma_i,W\in C^\infty(\pa\Omega)$, but we do not restrict their size, nor do we require them to be constant. For $c_i$, the Dirichlet boundary conditions model, for example, ion-selectivity at an ion-selective membrane or some fixed concentration of ions at the boundary layer-bulk interface. Dirichlet boundary conditions for $\Phi$ model an applied electric potential on the boundary. There is a large literature on the well-posedness of the time dependent NPNS system \cite{bothe,ci,np3d,cil,fischer,fnl,liu,ryham,schmuck}, as well as the uncoupled Nernst-Planck \cite{biler,biler2,choi,gaj,gajewski,mock} and Navier-Stokes systems \cite{cf,temam}. Some of the aforementioned studies, in addition to \cite{EN,gajstab, park}, study several aspects of the steady state Nernst-Planck equations including existence, uniqueness, stability, and asymptotic behavior. Thus far, in the context of NPNS, steady states have mostly been studied in the case of equilibrium boundary conditions. In these cases, the corresponding steady states are the unique Nernst-Planck steady states, together with zero fluid flow $u^*\equiv 0$. In this paper in Section \ref{ssnpns}, Theorem \ref{thm1}, we prove the existence of smooth steady state solutions to the NPNS system (\ref{np})-(\ref{nse}) subject to arbitrary (large data) Dirichlet boundary conditions (\ref{BC}). In addition, we derive a sufficient condition, depending solely on the boundary data such that the steady state solution has nonzero fluid flow $u^*\not\equiv 0$ (Theorem \ref{unot0}). Thus the two main results of Section ~\ref{ssnpns} give the existence of steady states for NPNS that are not obtained by existing theory for Nernst-Planck equations, and include in particular the steady solutions with nonzero flow for which instability and chaos have been observed experimentally and numerically. In Section \ref{lt} we consider the time dependent solutions of the Nernst-Planck-Stokes system in 3D and show, using a maximum principle that solutions obey long time bounds that are independent of the size of the initial data. This result is also valid for the NPNS system in 2D and the NPNS system in 3D under the assumption of globally bounded smooth velocities. The maximum principle is for a two-by-two parabolic system with unequal diffusivities. The bound applies for situations far away from equilibrium, when the solutions have nontrivial dynamics, and establishes the existence of an absorbing ball. This is a first step in proving the existence and finite dimensionality of the global attractor, a task that will be pursued elsewhere. In Section \ref{GS}, we consider the Nernst-Planck-Stokes (NPS) system in the periodic channel $\Omega=(0,L)\times\mathbb{T}^2$ with piecewise constant (i.e. constant at $x=0$ and $x=L$, respectively) boundary conditions. In this case, we derive sufficient conditions, depending only on boundary data and parameters, such that NPS admits a one dimensional, globally stable steady state solution that corresponds to a \textit{steady (nonzero) current} solution with the fluid at rest. These are non-Boltzmann states (whose currents identically vanish). The stability condition can be thought of as a smallness condition on the magnitude of the ionic currents or, equivalently, as a small perturbation from equilibrium condition. The main result of Section \ref{GS}, Theorem \ref{globalstab}, is preceded by an analysis of one dimensional steady currents. \subsection{Notation} Unless otherwise stated, we denote by $C$ a positive constant that depends only on the parameters of the system, the domain, and the initial and boundary conditions. The value of $C$ may differ from line to line. We denote by $L^p=L^p(\Omega)$ the Lebesgue spaces and by $W^{s,p}=W^{s,p}(\Omega)$, $H^s=H^s(\Omega)=W^{s,2}$ the Sobolev spaces. We denote by $\left<\cdot,\cdot\right>$ the $L^2(\Omega)$ inner product, and we write $dV=dx\,dy\,dz$ for the volume element in three dimensions, and $dx$ in one dimension. We write $dS$ for the surface element. We denote by $\pa_x, \pa_y, \pa_z, \pa_t$ the partial derivaties with respect to $x,y,z,t$, respectively, and also use $\pa_x$ to mean $\fr{d}{dx}$ in a one dimensional setting. We denote $z_1=1, z_2=-1$ for the valences of the ionic species. \section{Steady State Nernst-Planck-Navier-Stokes}\la{ssnpns} We consider the steady state Nernst-Planck-Navier-Stokes sytem \begin{align} u\cdot\na c_1&=\div(\na c_1+c_1\na\Phi)\la{s}\\ u\cdot\na c_2&=\div(\na c_2-c_2\na\Phi)\\ -\D\Phi&=c_1-c_2=\rho\\ u\cdot\na u-\D u+\na p &= -\rho\na\Phi\la{sstokes}\\ \div u&=0\la{S} \end{align} on a smooth, connected, bounded domain $\Omega\subset\mathbb{R}^d$ ($d=2,3)$ together with boundary conditions \begin{align} {c_i}_{|\pa\Omega}&=\gamma_i>0\la{b}\\ \Phi_{|\pa\Omega}&=W\\ u_{|\pa\Omega}&=0\la{B} \end{align} with $\gamma_i,W\in C^\infty(\pa\Omega)$ not necessarily constant. In the above system, we have taken $D_i=\epsilon=\nu=K=1$, as the values of these parameters do not play a significant role in the results of this section. In this section we first prove the existence of a smooth solution to (\ref{s})-(\ref{S}) with boundary conditions (\ref{b})-(\ref{B}). Then, we derive a sufficient condition depending on just $\gamma_i$ and $W$ and their derivatives that guarantees that any steady state solution $(c_i^*,\Phi^*,u^*)$ of (\ref{s})-\ref{S}) with (\ref{b})-(\ref{B}) has nonzero fluid flow i.e. $u^*\not\equiv 0$. \begin{thm} For arbitrary boundary conditions (\ref{b})-(\ref{B}) on a smooth, connected, bounded domain $\Omega\subset\mathbb{R^d}$ ($d=2,3$), there exists a smooth solution $(c_i,\Phi,u)$ of the steady state Nernst-Planck-Navier-Stokes system (\ref{s})-(\ref{S}) such that $c_i\ge 0$.\la{thm1} \end{thm} \begin{rem} For the steady state Nernst-Planck system, any regular enough solution is necessarily nonnegative (i.e. $c_i\ge 0$) if we assume $\gamma_i\ge 0$. This follows from the fact that the quantities $c_1e^\Phi$, $c_2e^{-\Phi}$ each satisfy a maximum principle (see Section \ref{odss}). However, in the case of steady state NPNS, where such a maximum principle does not hold, the nonnegativity of $c_i$ must be built into the construction. \end{rem} \begin{proof} Throughout the proof, we assume $d=3$. Some steps are streamlined if we assume $d=2$, but the proof for $d=3$ nonetheless works for $d=2$. The proof consists of two main steps. We first show the existence of a solution to a parameterized approximate NPNS system. Then, we extract a convergent subsequence and show that the limit satisfies the original system. The approximate system is given by : \begin{align} 0&=\div(\na c_1^\delta+\chi_\delta(c_1^\delta)\na\Phi^\delta-u^\delta\chi_\delta(c_1^\delta))\la{ap}\\ 0&=\div(\na c_2^\delta-\chi_\delta(c_2^\delta)\na\Phi^\delta-u^\delta\chi_\delta(c_2^\delta))\la{ap'}\\ -\D\Phi^\delta&=\chi_\delta(c_1^\delta)-\chi_\delta(c_2^\delta)={\rho}^\delta\la{ap''}\\ u^\delta\cdot\na u^\delta-\D u^\delta+\na p^\delta &= -{\rho}^\delta\na\Phi^\delta\quad \la{ap'''}\\ \div u^\delta&=0\la{Sd} \end{align} with boundary conditions \begin{align} {c_i^\delta}_{|\pa\Omega}&=\gamma_i>0\\ {\Phi^\delta}_{|\pa\Omega}&=W\\ {u^\delta}_{|\pa\Omega}&=0\la{AP}. \end{align} Here, $\chi_\delta$ is a smooth cutoff function, which converges pointwise to the following function as $\delta\to 0$, \be l:y\mapsto\begin{cases}y,&\quad y\ge 0\\ 0,&\quad y\le 0.\end{cases}\la{l} \ee We define $\chi_\delta$ by first fixing a smooth, nondecreasing function $\chi:\mathbb{R}\to \mathbb{R}^+$ such that \be \chi:y\mapsto\begin{cases}y,&\quad y\ge 1\\ 0,&\quad y\le 0.\end{cases}\la{chi} \ee Then, we set $\chi_\delta(y)=\delta\chi(\fr{y}{\delta})$. We state below some elementary properties of $\chi_\delta$:\\ \indent 1) $\chi_\delta\ge 0$\\ \indent 2) $\chi_\delta(y)=y$ for $y\ge\delta$, $\chi^\delta(y)=0$ for $y\le 0$\\ \indent 3) $\chi_\delta$ is nondecreasing\\ \indent 4) $|\chi'_\delta(y)|\le a$ where $a=\sup\chi'$ and so $\chi_\delta(y)\le a|y|, |\chi_\delta(x)-\chi_\delta(y)|\le a|x-y|$. \noindent The existence of a solution to (\ref{ap})-(\ref{AP}) follows as an application of Schaefer's fixed point theorem \cite{evans}, which we state below: \begin{thm} Suppose $X$ is a Banach space and $E:X\to X$ is continuous and compact. If the set \be \{v\in X\,|\, v=\lambda E(v)\,\text{for some } 0\le\lambda\le 1\}\la{hl} \ee is bounded in $X$, then $E$ has a fixed point.\la{fp} \end{thm} In order to apply this fixed point theorem, we first reformulate the problem (\ref{ap})-(\ref{AP}). Letting $\Gamma_i$ be the unique harmonic function on $\Omega$ satisfying ${\Gamma_i}_{|\pa\Omega}=\gamma_i$, and introducing \begin{align} q_i^\delta&=c_i^\delta-\Gamma_i\la{qc} \end{align} we rewrite (\ref{ap})-(\ref{ap''}), \begin{align} -\D q_1^\delta&=\div(-u^\delta \chi_\delta(c_1^\delta)+\chi_\delta(c_1^\delta)\na(-\D_W)^{-1}{\rho}^\delta)=R_1^\delta(q_1^\delta,q_2^\delta,u^\delta)\la{q1e}\\ -\D q_2^\delta&=\div(-u^\delta \chi_\delta(c_2^\delta)-\chi_\delta(c_2^\delta)\na(-\D_W)^{-1}{\rho}^\delta)=R_2^\delta(q_1^\delta,q_2^\delta,u^\delta)\la{q2e} \end{align} where $(-\D_W)^{-1}$ maps $g$ to the unique solution $f$ of \be -\D f=g\text{ in }\Omega, \quad f_{|\pa\Omega}=W. \ee Above we view $R_i^\delta$ as functions of $q_i^\delta$ and $u^\delta$, with $c_i^\delta$ and ${\rho}^\delta$ related to $q_i^\delta$ via (\ref{qc}) and ${\rho}^\delta=\chi_\delta(c_1^\delta)-\chi_\delta(c_2^\delta)$. Thus we write (\ref{q1e})-(\ref{q2e}) as \begin{align} q_i^\delta=(-\D_D)^{-1}R_i^\delta(q_1^\delta,q_2^\delta,u^\delta), \quad i=1,2\la{Ri} \end{align} where $-\D_D$ is the Laplace operator on $\Omega$ associated with homogeneous Dirichlet boundary conditions. As for the Navier-Stokes subsystem, we first project the equations onto the space of divergence free vector fields using the Leray projection \cite{cf} \be \mathbb{P}:(L^2(\Omega))^3\to H \ee where $H$ is the closure of \be \mathcal{V}=\{f\in (C_0^\infty(\Omega))^3\,|\,\div f=0\} \ee in $(L^2(\Omega))^3$ and is a Hilbert space endowed with the $L^2$ inner product. Then (\ref{ap'''}), (\ref{Sd}) is given by \be Au^\delta+B(u^\delta, u^\delta)=-\mathbb{P}(\rho^\delta\na\Phi^\delta)\la{AB} \ee where \begin{align} A&=\mathbb{P}(-\D):\mathcal{D}(A)=(H^2(\Omega))^3\cap V\to H\\ V&=\text{closure of }\mathcal{V}\text{ in } (H^1_0(\Omega))^3=\{f\in (H_0^1(\Omega))^3\,|\,\div f=0\}.\la{V} \end{align} As it is well known, the Stokes operator $A$ is invertible and $A^{-1}:H\to\mathcal{D}(A)$ is bounded and self-adjoint on $H$ and compact as a mapping from $H$ into $V$. The space $V$ is a Hilbert space endowed with the Dirichlet inner product \be \langle f,g\rangle_V=\int\na f:\na g\,dV. \ee For $f,g\in V$, $B(f,g)=\mathbb{P}(f\cdot\na g)$ and $B$ may be viewed as a continuous, bilinear mapping such that \be B:(f,g)\in V\times V\mapsto \left(h\in V\mapsto \int_\Omega (f\cdot\na g)\cdot h\,dV\right)\in V' \ee where $V'$ is the dual space of $V$. We note that we may also view $A$ as an invertible mapping $A:V\to V'$. It is with this viewpoint that we write (\ref{AB}) as \begin{align} u^\delta&=A^{-1}R_u^\delta(q_1^\delta,q_2^\delta,u^\delta)\\ R_u^\delta(q_1^\delta,q_2^\delta,u^\delta)&=-(B(u^\delta,u^\delta)+\mathbb{P}(\rho^\delta\na(-\D_W)^{-1}{\rho}^\delta)).\la{Ru} \end{align} Thus, setting \be X=H^1_0(\Omega)\times H^1_0(\Omega)\times V\la{X}\ee and \be E=E_1\times E_2\times E_u:(f,g,h)\in X\mapsto ((-\D_D)^{-1}R_1^\delta(f,g,h),(-\D_D)^{-1}R_2^\delta(f,g,h), A^{-1}R_u^\delta(f,g,h))\la{E} \ee we seek to show the existence of a weak solution $(\tilde{q}_1,\tilde{q}_2,\tilde u)=(\tilde{q}_1^\delta,\tilde{q}_2^\delta,{\tilde u}^\delta)\in X$ to (\ref{q1e}), (\ref{q2e}), (\ref{AB}) by verifying the hypotheses of Theorem \ref{fp} for the operator $E$ and showing that $E$ has a fixed point in $X$. First we prove that $E$ indeed maps $X$ into $X$ and does so continuously and compactly. \begin{lemma} The operator $E=E_1\times E_2\times E_u:X\to X$ is continuous and compact. \end{lemma} \begin{proof} We start with compactness. Since $(-\D_D)^{-1}$ (and $A^{-1}$) maps $L^\fr{3}{2}$ (resp. $(L^\fr{3}{2})^3$) continuously into $W^{2,\fr{3}{2}}\cap H_0^1$ (resp. $(W^{2,\fr{3}{2}})^3\cap V$), by Rellich's theorem, the maps $(-\D_D)^{-1}:L^\fr{3}{2}\to H^1_0$ and $A^{-1}:(L^\fr{3}{2})^3\to V$ are compact. Thus for compactness of $E$, it suffices to show that \be \bal &(f,g,h)\in X\mapsto R_i^\delta(f,g,h)\in L^\fr{3}{2}\\ &(f,g,h)\in X\mapsto R_u^\delta(f,g,h)\in (L^\fr{3}{2})^3\la{op} \eal \ee are bounded. To this end, we compute \be \bal \| R_1^\delta(f,g,h)\|_{L^\fr{3}{2}}\le&\|h\|_{L^6}\|\na\chi_\delta(f+\Gamma_1)\|_{L^2}\\ &+\|\na\chi_\delta(f+\Gamma)\|_{L^2}\|\na (-\D_W)^{-1}(\chi_\delta(f+\Gamma_1)-\chi_\delta(g+\Gamma_2))\|_{L^6}\\ &+\|\chi_\delta(f+\Gamma)\|_{L^6}\|(\chi_\delta(f+\Gamma_1)-\chi_\delta(g+\Gamma_2))\|_{L^2}\\ \le&C(\|h\|_V+\|f\|_{H^1}+\|g\|_{H^1}+1)(\|f\|_{H^1}+1).\la{b1} \eal \ee In the last inequality we used the continuous embeddings $H^1\hookrightarrow L^6$ and the fact that $|\chi'_\delta|\le a$. Entirely similar estimates give \be \|R_2^\delta(f,g,h)\|_{L^\fr{3}{2}}\le C(\|h\|_V+\|f\|_{H^1}+\|g\|_{H^1}+1)(\|g\|_{H^1}+1).\la{b2} \ee Lastly we estimate $R_u^\delta$, \be \bal \|R_u^\delta(f,g,h)\|_{L^\fr{3}{2}}\le&\|B(h,h)\|_{L^\fr{3}{2}}\\ &+\|(\chi_\delta(f+\Gamma_1)-\chi_\delta(g+\Gamma_2))\na(-\D_W)^{-1}(\chi_\delta(f+\Gamma_1)-\chi_\delta(g+\Gamma_2))\|_{L^\fr{3}{2}}\\ \le&\|h\|_{L^6}\|h\|_V\\ &+\|(\chi_\delta(f+\Gamma_1)-\chi_\delta(g+\Gamma_2))\|_{L^2}\|\na(-\D_W)^{-1}(\chi_\delta(f+\Gamma_1)-\chi_\delta(g+\Gamma_2))\|_{L^6}\\ \le&C(1+\|h\|_V^2+\|f\|_{L^2}^2+\|g\|_{L^2}^2).\la{b3} \eal \ee The bounds (\ref{b1})-(\ref{b3}) show that the operators from (\ref{op}) are indeed bounded, and thus $E$ is compact. Continuity of $E$ follows from the fact that the components of $E$ are sums of compositions of the following continuous operations \be \bal f\in H_1&\mapsto f+\Gamma_i\in H_1\\ f\in H_1&\mapsto \chi_\delta(f)\in L^4\\ f\in L^2&\mapsto \na(-\D_W)^{-1}f\in (H^1)^3\subset (L^4)^3\\ (f,g)\in L^4\times L^4&\mapsto fg\in L^2\\ f\in (L^2)^d&\mapsto(-\D_D)^{-1}\div f\in H_0^1\\ (f,g)\in V\times V&\mapsto B(f,g)\in V'\\ f\in (L^2)^d&\mapsto \mathbb{P}f\in H\\ f\in V'&\mapsto A^{-1}f\in V. \eal \ee This completes the proof of the lemma. \end{proof} Now it remains to establish uniform a priori bounds (c.f. (\ref{hl})). We fix $\lambda\in[0,1]$ and assume that for some $({\tilde q}_1,{\tilde q}_2,\tilde{u})\in X$ we have \be ({\tilde q}_1,{\tilde q}_2 ,\tilde u)=\lambda E({\tilde q}_1,{\tilde q}_2 ,\tilde u). \ee That is, for all $\psi_1,\psi_2\in H^1_0(\Omega)$ and $\psi_u\in V$ we assume we have \begin{align} \int_\Omega \na \tilde{q}_i\cdot\na \psi_i\,dV&=\lambda\int_\Omega R_i^\delta({\tilde q}_1,{\tilde q}_2,\tilde{u})\psi_i\,dV,\quad i=1,2\la{w}\\ \int_\Omega \na \tilde{u}:\na \psi_u\,dV&=\lambda\int_\Omega R_u^\delta({\tilde q}_1,{\tilde q}_2,\tilde{u})\cdot\psi_u\,dV.\la{ww} \end{align} We make the choice of test functions $\psi_i={\tilde q}_i$ and first estimate the resulting integral on the right hand side of (\ref{w}) for $i=1$, omitting for now the factor $\lambda$. Introducing the following primitive of $\chi_\delta$ \be Q_\delta(y)=\int_0^y\chi_\delta(s)\,ds \ee we have, integrating by parts, \be \bal \int_\Omega R_1^\delta({\tilde q}_1,{\tilde q}_2,\tilde{u}){\tilde q}_1\,dV=&\int_\Omega \tilde{u}\cdot\na Q_\delta ({\tilde c}_1)\,dV-\int_\Omega (\tilde{u}\cdot\na\Gamma_1)\chi_\delta({\tilde c}_1)\,dV\\ &-\int_\Omega \chi_\delta({\tilde c}_1)\na(-\D_W)^{-1}\tilde\rho\cdot\na {\tilde q}_1\,dV\\ =&I_1^{(1)}+I_2^{(1)}+I_3^{(1)} \eal \ee where $\tilde{q_i}=\tilde{c_i}-\Gamma_i$ and $\tilde\rho=\chi_\delta({\tilde c}_1)-\chi_\delta({\tilde c}_2)$. Because $\tilde{u}$ is divergence-free, it follows after an integration by parts that \be I_1^{(1)}=0. \ee Next, estimating $I_2^{(1)}$ we have, using the Poincaré inequality twice, \be |I_2^{(1)}|=\left|\int_\Omega (\tilde u\cdot\na\Gamma_1) \chi_\delta({\tilde c}_1)\,dV\right|\le C\|\tilde{u}\|_H\|{\tilde c}_1\|_{L^2}\le \fr{1}{2}\|\na {\tilde q}_1\|_{L^2}^2+C\|\tilde{u}\|_V^2+C. \ee Lastly we estimate $I^{(1)}_3$, \be \bal I_3^{(1)}=&-\int_\Omega\chi_\delta({\tilde c}_1)\na(-\D_W)^{-1}\tilde\rho\cdot\na \tilde{c}_1\,dV+\int_\Omega\chi_\delta({\tilde c}_1)\na(-\D_W)^{-1}\tilde\rho\cdot\na\Gamma_1\,dV\\ =&-\int_\Omega\na Q_\delta(\tilde{c}_1)\cdot\na(-\D_W)^{-1}\tilde\rho\,dV+\int_\Omega\chi_\delta({\tilde c}_1)\na(-\D_W)^{-1}\tilde\rho\cdot\na\Gamma_1\,dV\\ =&-\int_\Omega\na (Q_\delta(\tilde{c}_1)-Q_\delta(\Gamma_1))\cdot\na(-\D_W)^{-1}\tilde\rho\,dV-\int_\Omega\na Q_\delta(\Gamma_1)\cdot\na(-\D_W)^{-1}\tilde\rho\,dV\\ &+\int_\Omega\chi_\delta({\tilde c}_1)\na(-\D_W)^{-1}\tilde\rho\cdot\na\Gamma_1\,dV\\ =&-\int_\Omega (Q_\delta (\tilde{c}_1)-Q_\delta(\Gamma_1))\tilde\rho\,dV-\int_\Omega\na Q_\delta(\Gamma_1)\cdot\na(-\D_W)^{-1}\tilde\rho\,dV\\ &+\int_\Omega\chi_\delta({\tilde c}_1)\na(-\D_W)^{-1}\tilde\rho\cdot\na\Gamma_1\,dV. \eal \ee Analogous computations for $i=2$ in (\ref{w}) yield on the right hand side \be \int_\Omega R_2^\delta({\tilde q}_1,{\tilde q}_2,\tilde{u}){\tilde q}_2\,dV=I_1^{(2)}+I_2^{(2)}+I_3^{(2)} \ee where $I_j^{(2)}$, $j=1,2,3$ satisfy \be\bal I_1^{(2)}=&0\\ |I_2^{(2)}|\le&\fr{1}{2}\|\na {\tilde q}_2\|_{L^2}^2+C\|\tilde{u}\|_V^2+C\\ I_3^{(2)}=&\int_\Omega(Q_\delta(\tilde{c}_2)-Q_\delta(\Gamma_2))\tilde\rho\,dV+\int_\Omega\na Q_\delta(\Gamma_2)\cdot\na(-\D_W)^{-1}\tilde\rho\,dV\\ &-\int_\Omega\chi_\delta({\tilde c}_2)\na(-\D_W)^{-1}\tilde\rho\cdot\na\Gamma_2\,dV. \eal\ee Thus, summing (\ref{w}) in $i$ we obtain \be \bal \fr{1}{2}\sum_i\|\na \tilde{q_i}\|_{L^2}^2+\lambda\int_\Omega(Q_\delta(\tilde{c}_1)-Q_\delta(\tilde{c}_2))\tilde\rho\,dV\le& C\lambda(1+ \|\tilde\rho\|_{L^1}+\|\na(-\D_W)^{-1}\tilde\rho\|_{L^1}\\ &+\|\tilde{u}\|_V^2+\sum_i\|\tilde{c}_i\|_{L^2}\|\na(-\D_W)^{-1}\tilde\rho\|_{L^2}).\la{e1} \eal \ee Next, using the bounds \be \bal \|\tilde{c}_i\|_{L^2}&\le\|\tilde{q}_i\|_{L^2}+ C\le C\|\na\tilde{q_i}\|_{L^2}+C\\ \|\tilde\rho\|_{L^1}&\le C\|\tilde\rho\|_{L^3}\\ \|\na(-\D_W)^{-1}\tilde\rho\|_{L^1}&\le C\|\na(-\D_W)^{-1}\tilde\rho\|_{L^2}\le C\|\tilde\rho\|_{L^3}+C \eal \ee we obtain from (\ref{e1}), using Young's inequalities \be \fr{1}{4}\sum_i\|\na \tilde{q}_i\|_{L^2}+\lambda\int_\Omega (Q_\delta(\tilde{c}_1)-Q_\delta(\tilde{c}_2))\tilde\rho\,dV\le C + \theta\lambda\|\tilde\rho\|_{L^3}^3+C\lambda\|\tilde{u}\|_V^2\la{bla} \ee where $\theta$ is a small constant to be chosen later. Next we prove the following bound, \be \int_\Omega (Q_\delta(\tilde{c}_1)-Q_\delta(\tilde{c}_2))\tilde\rho\,dV\ge \fr{1}{4}\|\tilde\rho\|_{L^3}^3-C. \la{r3} \ee Prior to establishing this lower bound, we prove the following lemma, which shows that $Q_\delta\approx \fr{\chi_\delta^2}{2}$. \begin{lemma}$|Q_\delta(y)-\fr{\chi_\delta^2}{2}(y)|\le\fr{\delta^2}{2}\,\text{for all }y\in\mathbb{R}.$\la{lem1} \end{lemma} \begin{proof} For $y\le 0$, we have $Q_\delta(y)=\chi_\delta(y)=0$ so we may assume $y>0$. Suppose $y\ge \delta$. Then \be Q_\delta(y)=\int_0^\delta \chi_\delta(s)\,ds+\int_\delta^y s\,ds\le \delta^2+\fr{1}{2}(y^2-\delta^2)=\fr{\delta^2}{2}+\fr{\chi^2_\delta(y)}{2} \ee and similarly \be Q_\delta(y)=\int_0^\delta \chi_\delta(s)\,ds+\int_\delta^y s\,ds\ge \fr{1}{2}(y^2-\delta^2)=-\fr{\delta^2}{2}+\fr{\chi_\delta^2(y)}{2}. \ee Thus the lemma holds for $y\ge \delta$. Lastly, suppose $y\in(0,\delta)$. Then, using the monotonicity of $\chi_\delta$, \be Q_\delta(y)=\int_0^y\chi_\delta(s)\,ds\le y\chi_\delta(y)\le\delta\chi_\delta(y)\le\fr{\delta^2}{2}+\fr{\chi^2_\delta(y)}{2}. \ee On the other hand, we have \be Q_\delta(y)\ge 0 \ge -\fr{\delta^2}{2}+\fr{\chi_\delta^2(y)}{2}. \ee This completes the proof of the lemma. \end{proof} Now we proceed with the proof of (\ref{r3}). We split $\Omega=\{\tilde\rho\ge 0\}\cup\{\tilde\rho<0\}.$ Restricted to $\{\tilde\rho\ge 0\}$, we have, using Lemma \ref{lem1}, \be Q_\delta(\tilde{c}_1)-Q_\delta(\tilde{c}_2)\ge\fr{\chi_\delta^2(\tilde{c}_1)}{2}-\fr{\chi_\delta^2(\tilde{c}_2)}{2}-\delta^2=\fr{1}{2}(\chi_\delta({\tilde c}_1)+\chi_\delta({\tilde c}_2))\tilde\rho-\delta^2 \ee and, restricted to $\{\tilde\rho<0\}$, again using the lemma, we have \be Q_\delta(\tilde{c}_1)-Q_\delta(\tilde{c}_2)\le \fr{\chi_\delta^2(\tilde{c}_1)}{2}-\fr{\chi_\delta^2(\tilde{c}_2)}{2}+\delta^2=\fr{1}{2}(\chi_\delta({\tilde c}_1)+\chi_\delta({\tilde c}_2))\tilde\rho+\delta^2. \ee It follows that \be \bal \int_\Omega (Q_\delta(\tilde{c}_1)-Q_\delta(\tilde{c}_2))\tilde\rho\,dV&\ge \fr{1}{2}\int_\Omega\tilde\rho^2(\chi_\delta({\tilde c}_1)+\chi_\delta({\tilde c}_2))\,dV-\delta^2\|\tilde\rho\|_{L^1}\\ &\ge \fr{1}{2}\int_\Omega |\tilde\rho|^3\,dV-C\|\tilde\rho\|_{L^3}\\ &\ge \fr{1}{4}\|\tilde\rho\|_{L^3}^3-C \eal \ee where in the second inequality we used the fact that, because $\chi_\delta\ge 0$, we have $|\tilde\rho|\le \chi_\delta({\tilde c}_1)+\chi_\delta({\tilde c}_2)$. Thus we have established (\ref{r3}). Now we return to (\ref{bla}) and select $\theta=1/8$. Then using the bound (\ref{r3}), we ultimately have \be \fr{1}{4}\sum_i\|\na \tilde{q}_i\|_{L^2}^2+\fr{\lambda}{8}\|\tilde\rho\|_{L^3}^3\le C+\tilde C\lambda\|\tilde{u}\|_V^2\la{qq} \ee with $\tilde C$ depending only on data and parameters. Next, we proceed to (\ref{ww}). Choosing $\psi_u=\tilde{u}$, we have on the right hand side, omitting for now the prefactor $\lambda$, \be \bal \int_\Omega R_u^\delta({\tilde q}_1,{\tilde q}_2,\tilde{u})\cdot \tilde{u}\,dV&=-\int_\Omega B(\tilde{u},\tilde{u})\cdot \tilde{u}\,dV-\int_\Omega\mathbb{P}(\tilde\rho\na(-\D_W)^{-1}\tilde\rho)\cdot\tilde{u}\,dV. \eal \ee On one hand, using the self-adjointness of the projection $\mathbb{P}$ and the fact that $\tilde{u}$ is divergence-free we have \be \int_\Omega B(\tilde{u},\tilde{u})\cdot \tilde{u}\,dV=\int_\Omega (\tilde{u}\cdot\na \tilde{u})\cdot \tilde{u}\,dV=\fr{1}{2}\int_\Omega \tilde{u}\cdot\na |\tilde{u}|^2\,dV=0. \ee On the other hand, again using the self-adjointness of $\mathbb{P}$, we have \be \int_\Omega \mathbb{P}(\tilde\rho\na(-\D_W)^{-1}\rho)\cdot \tilde{u}\,dV=\int_\Omega \tilde\rho\na(-\D_W)^{-1}\tilde\rho\cdot \tilde{u}\,dV. \ee Thus far, from (\ref{ww}) we have \be \|\tilde{u}\|_V^2=-\lambda\int_\Omega\tilde\rho\na(-\D_W)^{-1}\tilde\rho\cdot \tilde{u}\,dV.\la{uu} \ee To control the integral on the right hand side, we return to (\ref{w}), taking this time the test functions $\psi_1=-\psi_2=\phi_0=(-\D_D)^{-1}\tilde\rho.$ Then, on the right hand side, we have, summing in $i$, integrating by parts, and omitting for now the prefactor $\lambda$ \be \bal \int_\Omega [R_1^\delta({\tilde q}_1,{\tilde q}_2,\tilde{u})-R_2^\delta({\tilde q}_1,{\tilde q}_2,\tilde{u})]\phi_0\,dV=& \int_\Omega \tilde{u} \tilde\rho\cdot\na\phi_0\,dV\\ &-\sum_i\int_\Omega\chi_\delta({\tilde c}_i)|\na\phi_0|^2\,dV-\sum_i\int_\Omega\chi_\delta(\tilde{c}_i)\na\phi_W\cdot\na\phi_0\,dV\\ \le&\int_\Omega \tilde\rho\na(-\D_W)^{-1}\tilde\rho\cdot\tilde u\,dV-\int_\Omega \tilde\rho\na\phi_W\cdot\tilde u\,dV\\ &-\fr{1}{2}\sum_i\int_\Omega\chi_\delta(\tilde{c}_i)|\na\phi_0|^2\,dV+\fr{1}{2}\int_\Omega\chi_\delta({\tilde c}_i)|\na\phi_W|^2\,dV\\ \le&C+\int_\Omega\tilde\rho\na(-\D_W)^{-1}\tilde\rho\cdot \tilde u\,dV+\fr{1}{2}\|\tilde u\|_V^2+\theta\|\tilde\rho\|_{L^3}^3\\ &-\fr{1}{2}\sum_i\int_\Omega\chi_\delta({\tilde c}_i)|\na\phi_0|^2\,dV+\theta\sum_i\|\na \tilde{q}_i\|_{L^2}^2\la{one} \eal \ee where $\theta$ is a constant to be chosen. Above, we have denoted $\phi_W=(-\D_W)^{-1}\tilde\rho-\phi_0$ i.e. $\phi_W$ is the unique harmonic function on $\Omega$ whose values on the boundary are given by $W$. On the other hand, we bound the left hand side of (\ref{w}) in two different ways depending on whether $0\le\lambda\le\Lambda$ or $\Lambda< \lambda\le 1$, where $\Lambda$ is determined below (see (\ref{lamq})). We first consider the case $0\le\lambda\le\Lambda$. Then, we bound the left hand side of (\ref{w}) as follows, \be \bal \sum_i\left|\int_\Omega \na{\tilde q}_i\cdot\na \phi_0\,dV\right|\le C+ C_q\sum_i\|\na {\tilde q}_i\|_{L^2}^2\la{two} \eal \ee where $C_q>0$ depends only on data and parameters. Collecting the estimates (\ref{one}), (\ref{two}), we have from (\ref{w}), \be \bal 0\le\fr{\lambda}{2}\sum_i\int_\Omega\chi_\delta({\tilde c}_i)|\na\phi_0|^2\,dV\le& C+\lambda\int_\Omega\tilde\rho\na(-\D_W)^{-1}\tilde\rho\cdot\tilde u\,dV\\ &+\fr{1}{2}\|\tilde u\|_V^2+\lambda\theta\|\tilde\rho\|_{L^3}^3+(C_q+\theta)\sum_i\|\na{\tilde q}_i\|_{L^2}^2.\la{asdf} \eal \ee Above, we keep track of the prefactor $\lambda$ only where needed and bound $\lambda\le 1$ if this suffices. Then, adding (\ref{asdf}) to (\ref{uu}), we obtain \be \fr{1}{2}\|\tilde u\|_{V}^2\le C+ \lambda\theta\|\tilde\rho\|_{L^3}^3+(C_q+\theta)\sum_i\|\na{\tilde q}_i\|_{L^2}^2. \ee Choosing $\theta=1$ and multiplying this last inequality by $\fr{1}{8(C_q+1)}$ and adding it to (\ref{qq}), we obtain \be \fr{1}{8}\sum_i\|\na{\tilde q}_i\|_{L^2}^2+\fr{1}{32(C_q+1)}\|\tilde u\|_V^2\le \tilde R\la{tilr} \ee for some $\tilde R$ depending on data and parameters, but not on $\lambda$ or $\delta$, provided (c.f. (\ref{qq})) \be \lambda\le \Lambda=\fr{1}{32\tilde C(C_q+1)}.\la{lamq} \ee Now we consider the case $\Lambda<\lambda\le 1$. In this case, we estimate the left hand side of (\ref{w}) as follows, \be \bal \sum_i\left|\int_\Omega \na{\tilde q}_i\cdot\na \phi_0\,dV\right|\le& C+ \theta\sum_i\|\na {\tilde q}_i\|_{L^2}^2+\Lambda\theta\|\tilde\rho\|_{L^3}^3\\ \le&C+ \theta\sum_i\|\na {\tilde q}_i\|_{L^2}^2+\lambda\theta\|\tilde\rho\|_{L^3}^3.\la{three} \eal \ee Then, combining (\ref{one}), (\ref{three}), we have \be \bal 0\le\fr{\lambda}{2}\sum_i\int_\Omega\chi_\delta({\tilde c}_i)|\na\phi_0|^2\,dV\le& C+\lambda\int_\Omega\tilde\rho\na(-\D_W)^{-1}\tilde\rho\cdot\tilde u\,dV\\ &+\fr{1}{2}\|\tilde u\|_V^2+2\lambda\theta\|\tilde\rho\|_{L^3}^3+2\theta\sum_i\|\na{\tilde q}_i\|_{L^2}^2.\la{asdfg} \eal \ee Then adding (\ref{asdfg}) to (\ref{uu}), we obtain \be \fr{1}{2}\|\tilde u\|_V^2\le C+2\lambda\theta\|\tilde\rho\|_{L^3}^3+2\theta\sum_i\|\na{\tilde q}_i\|_{L^2}^2.\la{qwe} \ee Now we multiply (\ref{qwe}) by $2(1+\tilde C)$ (c.f. (\ref{qq})) and choose $\theta$ small enough so that \be 4(1+\tilde C)\theta\le \fr{1}{8}. \ee Adding the resulting inequality to (\ref{qq}), we obtain \be \fr{1}{8}\sum_i\|\na {\tilde q}_i\|_{L^2}^2+\|\tilde u\|_V^2\le \overline R\la{barr} \ee for some $\overline R$ depending on data and parameters, but not on $\lambda$ or $\delta$. The two estimates (\ref{tilr}) and (\ref{barr}) verify the hypotheses of Theorem \ref{fp}, and thus it follows that there exists a weak solution $(q_1^\delta,q_2^\delta, u^\delta)\in X$ to (\ref{q1e}), (\ref{q2e}), (\ref{AB}) satisfying \be \sum_i\|q_i^\delta\|_{H^1}+\|u^\delta\|_V\le R \la{R} \ee for some $R>0$ independent of $\delta$. Before proceeding, we establish that in fact $(q_1^\delta,q_2^\delta, u^\delta)$ is smooth and satisfies uniform $H^2$ estimates. \begin{lemma} If $(q_1^\delta,q_2^\delta,u^\delta)\in X$ is a weak solution to (\ref{q1e}), (\ref{q2e}), (\ref{AB}), then $(q_1^\delta,q_2^\delta,u^\delta)$ is smooth, that is, \be (q_1^\delta,q_2^\delta,u^\delta)\in X^k=H^k(\Omega)\times H^k(\Omega)\times (H^k(\Omega))^3\quad\text{for all } k>0.\ee Furthermore, there exists $C_R>0$ independent of $\delta$ so that \be \|q_1^\delta\|_{H^2}+\|q_2^\delta\|_{H^2}+\|u^\delta\|_{H^2}\le C_R.\la{uh2} \ee\la{smoothlem} \end{lemma} \begin{proof} First we verify (\ref{uh2}). From the estimates (\ref{b1}), (\ref{b2}), (\ref{b3}) (taking $(f,g,h)=(q_1^\delta, q_2^\delta, u^\delta)$) and the uniform bound (\ref{R}) it follows that \be \bal \sum_i\|q_i^\delta\|_{W^{2,\fr{3}{2}}}+\|u^\delta\|_{W^{2,\fr{3}{2}}}\le& C\left(\sum_i\|\D q_i^\delta\|_{L^\fr{3}{2}}+\|Au^\delta\|_{L^\fr{3}{2}}\right)\\ =&C\left(\sum_i\|R_i^\delta(q_1^\delta,q_2^\delta, u^\delta)\|_{L^\fr{3}{2}}+\|R_u^\delta(q_1^\delta,q_2^\delta, u^\delta)\|_{L^\fr{3}{2}}\right)\\ \le&C\left(1+\sum_i\|q_i^\delta\|_{H^1}^2+\|u^\delta\|_{V}^2\right)\\ \le& \bar C_R \eal \ee where $\bar C_R$ is independent of $\delta$. Then, due to the embedding $W^{2,\fr{3}{2}}\hookrightarrow W^{1,3}$, it follows that \be \sum_i\|q_i^\delta\|_{W^{1,3}}+\|u^\delta\|_{W^{1,3}}\le \tilde C_R\la{I} \ee for $\tilde C_R$ independent of $\delta$. Now we estimate $R_i^\delta, R_u^\delta$ in $L^2$, \be \bal \|R_i^\delta(q_1^\delta, q_2^\delta, u^\delta)\|_{L^2}\le& C(\|u^\delta\|_{L^6}\|\na c_i^\delta\|_{L^3}+\|\na c_i^\delta\|_{L^3}\|\na(-\D_W)^{-1}\rho^\delta\|_{L^6}+\|c_i^\delta\|_{L^3}\|\rho^\delta\|_{L^6})\\ \|R_u^\delta(q_1^\delta, q_2^\delta, u^\delta)\|_{L^2}\le&C(\|u^\delta\|_{L^6}\|\na u\|_{L^3}+\|\rho^\delta\|_{L^3}\|\na(-\D_W)^{-1}\rho^\delta\|_{L^6}) \eal \ee and since all the terms on the right hand sides are bounded independently of $\delta$ due to (\ref{I}), we find that $\D q_i^\delta$, $Au^\delta$ are bounded in $L^2$ independently of $\delta$, and (\ref{uh2}) follows. Higher regularity follows by induction. Indeed, suppose \be (q_1^\delta, q_2^\delta, u^\delta)\in X^k\text{ for some integer } k\ge 3.\la{k} \ee We show that $(q_1^\delta ,q_2^\delta, u^\delta)\in X^{k+1}$ follows. By elliptic regularity, it suffices to show that $R_i^\delta(q_1^\delta,q_2^\delta, u^\delta)$ and $R_u^\delta(q_1^\delta,q_2^\delta, u^\delta)$ are in $H^{k-1}$, and so we compute \be \bal \|R_i^\delta(q_1^\delta, q_2^\delta, u^\delta)\|_{H^{k-1}}\le& C(\|u^\delta\cdot\na c_i^\delta\|_{H^{k-1}}+\|\na c_i^\delta\cdot\na(-\D_W)^{-1}\rho^\delta\|_{H^{k-1}}+\|c_i^\delta\rho^\delta\|_{H^{k-1}})\\ \le& C(\|u^\delta\|_{H^{k-1}}\|\na c_i^\delta\|_{H^{k-1}}+\|\na c_i^\delta\|_{H^{k-1}}\|\na(-\D_W)^{-1}\rho^\delta\|_{H^{k-1}}\\ &+\|c_i^\delta\|_{H^{k-1}}\|\rho^\delta\|_{H^{k-1}})\\ \le& C(\|u^\delta\|_{H^{k-1}}\|c_i^\delta\|_{H^k}+\|c_i^\delta\|_{H^k}(1+\|\rho^\delta\|_{H^{k-2}})+\|c_i^\delta\|_{H^{k-1}}\|\rho^\delta\|_{H^{k-1}})\\ <&\infty\quad\text{by }(\ref{k}) \eal \ee where in the second inequality, we used the fact that $H^s$ is an algebra for $s> \fr{3}{2}$; that is, $$\|fg\|_{H^s}\le C\|f\|_{H^s}\|g\|_{H^s}.$$ Similarly, we estimate \be \bal \|R_u^\delta(q_1^\delta,q_2^\delta, u^\delta)\|_{H^{k-1}}\le&C(\|u^\delta\cdot\na u^\delta\|_{H^{k-1}}+\|\rho^\delta\na(-\D_W)^{-1}\rho^\delta\|_{H^{k-1}})\\ \le& C\|u^\delta\|_{H^{k-1}}\|u^\delta\|_{H^k}+\|\rho^\delta\|_{H^{k-1}}(1+\|\rho^\delta\|_{H^{k-2}})\\ <&\infty\quad\text{by }(\ref{k}) \eal \ee where in the first inequality, we used the fact that $\mathbb{P}$ is continuous as a mapping $\mathbb{P}:H^{k-1}\to H^{k-1}$ \cite{temam}. The proof of the smoothness of $(q_1^\delta,q_2^\delta, u^\delta)$ is thus complete once we verify the base case of $k=3$. It suffices to show that $R_i^\delta(q_1^\delta, q_2^\delta,u^\delta)$ and $R_u^\delta(q_1^\delta,q_2^\delta, u^\delta)$ are in $H^1$: \be \bal \|R_i^\delta(q_1^\delta,q_2^\delta, u^\delta)\|_{H^1}\le&C(\|u^\delta\cdot\na c_i^\delta\|_{H^1}+\|\div(\chi_\delta(c_i^\delta)\na(-\D_W)^{-1}\rho^\delta)\|_{H^1})\\ \le& C(\|u^\delta\cdot \na c_i^\delta\|_{L^2}+\|\na(u^\delta\cdot\na c_i^\delta)\|_{L^2}+\|\chi_\delta(c_i^\delta)\na(-\D_W)^{-1}\rho^\delta\|_{H^2})\\ \le& C(\|u^\delta\|_{L^\infty}\|\na c_i^\delta\|_{L^2}+\|\na u^\delta\|_{L^4}\|\na c_i^\delta\|_{L^4}+\|u^\delta\|_{L^\infty}\|\na\na c_i^\delta\|_{L^2}\\ &+\|\chi_\delta(c_i^\delta)\|_{H^2}\|\na(-\D_W)^{-1}\rho^\delta\|_{H^2})\\ <&\infty\quad\text{by }(\ref{uh2})\\ \|R_u^\delta(q_1^\delta,q_2^\delta, u^\delta)\|_{H^1}\le&C\|u^\delta\cdot\na u^\delta\|_{H^1}+\|\rho^\delta\na(-\D_W)^{-1}\rho^\delta\|_{H^1})\\ \le& C(\|u^\delta\cdot\na u^\delta\|_{L^2}+\|\na(u^\delta\cdot\na u^\delta)\|_{L^2}\\ &+\|\rho^\delta\na(-\D_W)^{-1}\rho^\delta\|_{L^2}+\|\na(\rho^\delta\na(-\D_W)^{-1}\rho^\delta\|_{L^2})\\ \le&C(\|u^\delta\|_{L^\infty}\|u^\delta\|_V+\|\na u^\delta\|_{L^4}^2+\|u^\delta\|_{L^\infty}\|\na\na u^\delta\|_{L^2}\\ &+\|\rho^\delta\|_{L^4}\|\na(-\D_W)^{-1}\rho^\delta\|_{L^4}+\|\na\rho^\delta\|_{L^2}\|\na(-\D_W)^{-1}\rho^\delta\|_{L^\infty}\\ &+\|\rho^\delta\|_{L^4}\|\na\na(-\D_W)^{-1}\rho^\delta\|_{L^4})\\ <&\infty\quad\text{by }(\ref{uh2}) \eal \ee where again, we used the fact that $\mathbb{P}:H^1\to H^1$ is continuous. Thus the proof of the lemma is complete. \end{proof} Now we finish the proof of Theorem \ref{thm1}. First, we establish the nonnegativity of $c_i^\delta$. We recall that $c_i^\delta$ satisfies \be -\D c_i^\delta=-(u^\delta\cdot\na c_i^\delta)\chi'_\delta(c_i^\delta)+z_i(\na c_i^\delta\cdot\na\Phi^\delta)\chi'_\delta(c_i^\delta)-\chi_\delta(c_i^\delta)\rho^\delta \ee where $z_1=1=-z_2$. Suppose $c_i^\delta$ attains a negative value in $\Omega$, and suppose that at $x_0\in\Omega$ we have $c_i^\delta(x_0)=\inf_\Omega c_i^\delta<0$. Then consider the largest ball $B$ centered at $x_0$ so that ${c_i^\delta}_{|B}\le 0$. Since ${c_i^\delta}_{|\pa\Omega}=\gamma_i>0$, we necessarily have $\bar B\subset \Omega$ and for some $y\in\pa B$ we have $c_i^\delta(y)=0$. Furthermore, $c_i^\delta$ satisfies \be -\D c_i^\delta=-(u^\delta\cdot\na c_i^\delta)\chi'_\delta(c_i^\delta)+z_i(\na c_i^\delta\cdot\na\Phi^\delta)\chi'_\delta(c_i^\delta). \ee in $B$. However, since ${c_i^\delta}_{|B}$ attains its global minimum in $B$, the strong maximum principle implies that ${c_i^\delta}_{|B}\equiv\inf_\Omega c_i^\delta<0$; however this contradicts the fact that $c_i^\delta(y)=0$ for $y\in\pa B$. Therefore $c_i^\delta\ge 0$. Now, due to (\ref{uh2}), there is a sequence $\delta_j\to 0$ as $j\to \infty$ and $(c_1,c_2,u)\in X^2$ so that $(c_1^{\delta_j},c_2^{\delta_j},u^{\delta_j})\to (c_1, c_2, u)$ strongly in $X^1$, pointwise almost everywhere, and weakly in $X^2$ as $j\to \infty$. And, we take \be \Phi=(-\D_W)^{-1}\rho.\la{pp} \ee Since $c_i$ is the pointwise almost everywhere limit of nonnegative functions, we have $c_i\ge 0$ almost everywhere, and after redefining $c_i$ on a set of measure zero, we assume henceforth that $c_i\ge 0$ everywhere. Now we verify that $(c_1, c_2, u)$ together with (\ref{pp}) is a weak solution of (\ref{s})-(\ref{B}). Since the trace operator is continuous from $H^1(\Omega)$ into $H^\fr{1}{2}(\pa\Omega)$ and $(c_1,c_2,u)$ is the strong $X^1$ limit of $(c_1^{\delta_j},c_2^{\delta_j},u^{\delta_j})$, we have that the boundary conditions (\ref{b})-(\ref{B}) are satisfied in the sense of traces. Next, we verify that $(c_1,c_2,u)$ satisfies (\ref{s})-(\ref{S}) in the weak sense: for any $(\psi_1,\psi_2,\psi_u)\in X$, we have \be \bal \int_\Omega \na c_i\cdot\na\psi_i\,dV&=-\int_\Omega(-uc_i+c_i\na(-\D_W)^{-1}\rho)\cdot\na\psi_i\,dV\\ \int_\Omega \na u:\na\psi_u\,dV&=-\int_\Omega (B(u,u)+\mathbb{P}(\rho\na(-\D_W)^{-1}\rho))\cdot\psi_u\,dV.\la{wf} \eal \ee Prior to establishing these equalities, we show that \be \|\chi_{\delta_j}(c_i^{\delta_j})-c_i\|_{L^6}\to 0 \quad\text{as } j\to \infty.\la{l6} \ee Indeed, we have \be \bal \int_\Omega |\chi_{\delta_j}(c_i^{\delta_j})-c_i|^6\,dV\le&C\left(\int_\Omega|\chi_{\delta_j}(c_i^{\delta_j})-\chi_{\delta_j}(c_i)|^6\,dV+\int_\Omega |\chi_{\delta_j}(c_i)-c_i|^6\,dV\right). \eal \ee The first integral on the right hand side converges to $0$ because \be \|\chi_{\delta_j}(c_i^{\delta_j})-\chi_{\delta_j}(c_i)\|_{L^6}\le a\|c_i^{\delta_j}-c_i\|_{L^6}\le C\|c_i^{\delta_j}-c_i\|_{H^1}\to 0. \ee The second integral also converges to $0$ due to the dominated convergence theorem and the fact that for each $x\in\Omega$, we have $\chi_{\delta_j}(c_i(x))=c_i(x)$ for all $j$ sufficiently large since $\chi_\delta(y)=y$ for all $\delta\le y$ if $y>0$ and for all $\delta$ if $y=0$. Thus, we can now compute \begin{align*} \left|\int_\Omega \na c_i\cdot\na\psi_i\,dV-\int_\Omega\na c_i^{\delta_j}\cdot\na\psi_i\,dV\right|\le&\|\psi_i\|_{H^1}\|c_i-c_i^{\delta_j}\|_{H^1}\to 0\\\\ \left|\int_\Omega uc_i\cdot\na\psi_i\,dV-\int_\Omega u^{\delta_j} \chi_{\delta_j}(c_i^{\delta_j})\cdot\na\psi_i\,dV\right|\le&\|\psi_i\|_{H^1}(\|u\|_{L^3}\|c_i-\chi_{\delta_j}(c_i^{\delta_j})\|_{L^6}\\ &+\|\chi_{\delta_j}(c_i^{\delta_j})\|_{L^3}\|u-u^{\delta_j}\|_{L^6})\to 0 \end{align*} \begin{align*} &\left|\int_\Omega c_i\na(-\D_W)^{-1}\rho\cdot\na\psi_i\,dV-\int_\Omega\chi_{\delta_j}(c_i^{\delta_j})\na(-\D_W)^{-1}\rho^{\delta_j}\cdot\na\psi_i\,dV\right|\\ \le&\|\psi_i\|_{H^1}(\|\na(-\D_W)^{-1}\rho\|_{L^3}\|c_i-\chi_{\delta_j}(c_i^{\delta_j})\|_{L^6}+\|\na(-\D_D)^{-1}(\rho-\rho^{\delta_j})\|_{L^6}\|\chi_{\delta_j}(c_i^{\delta_j})\|_{L^3})\to 0 \end{align*} \begin{align*} \left|\int_\Omega \na u:\na\psi_u\,dV-\int_\Omega\na u^{\delta_j}:\na\psi_u\,dV\right|\le \|\psi_u\|_V\|u-u^{\delta_j}\|_V\to 0 \end{align*} \begin{align*} \left|\int_\Omega B(u,u)\cdot\psi_u\,dV-\int_\Omega B(u^{\delta_j},u^{\delta_j})\cdot\psi_u\right|\le\|\psi_u\|_{L^6}(\|u-u^{\delta_j}\|_{L^3}\|u\|_V+\|u^{\delta_j}\|_{L^3}\|u-u^{\delta_j}\|_V)\to 0 \end{align*} \begin{align*} &\left|\int_\Omega \rho\na(-\D_W)^{-1}\rho\cdot\psi_u\,dV-\int_\Omega\rho^{\delta_j}\na(-\D_W)^{-1}\rho^{\delta_j}\cdot\psi_u\right|\\ \le& \|\psi\|_{L^6}(\|\rho-\rho^{\delta_j}\|_{L^2}\|\na(-\D_W)^{-1}\rho\|_{L^3}+\|\rho^{\delta_j}\|_{L^2}\|\na(-\D_D)^{-1}(\rho-\rho^{\delta_j})\|_{L^3})\to 0. \end{align*} The above computations, together with the fact that $(c_1^\delta,c_2^\delta,u^\delta)$ satisfy (\ref{ap})-(\ref{Sd}), imply (\ref{wf}). Finally, the smoothness of $(c_1,c_2,u)$ follows from the same bootstrapping scheme as in the proof of Lemma \ref{smoothlem}. The proof of Theorem \ref{thm1} is now complete. \end{proof} The equilibria of the Nernst-Planck-Navier-Stokes system are unique minimizers of a total energy that is nonincreasing in time on solutions \cite{ci} and they arise when certain equilibrium boundary conditions \eqref{eqc1}, \eqref{eqc2} are supplied. The potential then obeys Poisson-Boltzmann equations \eqref{PB} which provide the unique steady state solution ($c_1^*, c_2^*)$ of the Nernst-Planck equations \eqref{np} with zero fluid velocity $u^*\equiv 0$. However, in many cases of physical interest, the boundary conditions are not suitable for equilibrium, and an electrical potential gradient generates (experimentally or numerically) nontrivial fluid flow. Thus, it is relevant to derive conditions under which the steady state whose existence is guaranteed by Theorem \ref{thm1} has nonzero fluid velocity $u^*\not\equiv 0$. We derive below one such condition. \begin{thm} Suppose $(c_1^*, c_2^*, u^*)$ is a solution to (\ref{s})-(\ref{S}) with boundary conditions (\ref{b})-(\ref{B}). Suppose in addition that the boundary conditions satisfy \be \int_{\pa\Omega}(\gamma_1-\gamma_2)(n_i\pa_j-n_j\pa_i)W\,dS\neq 0 \quad\text{or}\quad \int_{\pa\Omega}W(n_i\pa_j-n_j\pa_i)(\gamma_1-\gamma_2)\,dS\neq 0.\la{bct} \ee for some $i,j\in\{x,y,z\}, i\neq j$, where $n_i$ are the components of the unit normal vector along $\pa\Omega$. Then $u^*\not \equiv 0.$\la{unot0} \end{thm} \begin{rem} We note that if $i,j\in\{x,y,z\}$ with $i\neq j$, then $n_i\pa_j-n_j\pa_i$ is a vector field tangent to $\pa\Omega$, so that the integrals in (\ref{bct}) are well defined. Indeed, the characteristic directions of $n_i\pa_j-n_j\pa_i$ are $n_ie_j-n_je_i$ (with $e_k$ the canonical basis of $\Rr^d$) and $n\cdot (n_ie_j-n_je_i) = 0$ shows that these are tangent to $\pa\Omega$. Thus, the condition (\ref{bct}) can be checked with just knowledge of the values of $c_i$ and $\Phi$ on $\pa\Omega$. \end{rem} \begin{proof} If $(c_1^*, c_2^*, u^*\equiv 0)$ is a solution to (\ref{s})-(\ref{S}), then $\rho^*\na\Phi$ must be a gradient force i.e. \be \rho^*\na\Phi^*=\na F \ee for some smooth $F$. Thus a sufficient condition for $u^*\not\equiv 0$ is \be\na\times(\rho^*\na\Phi^*)\not\equiv 0.\la{curl}\ee In turn, a sufficient condition for (\ref{curl}) is \be \int_\Omega \na\rho^*\times\na\Phi^*\,dV\neq 0.\la{curll} \ee Integrating the above integral by parts, moving the derivative off $\rho^*$, we obtain the following equivalent condition \be \int_{\pa\Omega}\rho^*(n_i\pa_j-n_j\pa_i)\Phi\,dS\neq 0\quad \text{for some $i,j\in\{x,y,z\},$ $i\neq j.$} \ee And by the remark following the statement of Theorem \ref{unot0}, this is equivalent to the condition \be \int_{\pa\Omega}(\gamma_1-\gamma_2)(n_i\pa_j-n_j\pa_i)W\,dS\neq 0\quad \text{for some $i,j\in\{x,y,z\},$ $i\neq j.$} \ee Similarly, by moving the derivative off $\Phi^*$ in (\ref{curll}), we obtain the equivalent condition \be \int_{\pa\Omega}W(n_i\pa_j-n_j\pa_i)(\gamma_1-\gamma_2)\,dS\neq 0\quad \text{for some $i,j\in\{x,y,z\},$ $i\neq j$.} \ee This completes the proof. \end{proof} \section{Maximum principle and long time behavior of solutions}\la{lt} In this section we investigate the long time behavior of the system \begin{align} \pa_t c_1+u\cdot\na c_1=&D_1\div(\na c_1+c_1\na\Phi)\la{tnp1}\\ \pa_t c_2+u\cdot\na c_2=&D_2\div(\na c_2-c_2\na\Phi)\la{tnp2}\\ -\epsilon\D\Phi&=\rho=c_1-c_2\la{tpois}\\ \pa_tu-\nu\D u+\na p&=-K\rho\na\Phi\la{tstokes}\\ \div u&=0.\la{tdiv} \end{align} The global existence and uniqueness of smooths solutions of this system with Dirichlet boundary conditions is proved in \cite{cil}. Here we prove a maximum/minimum principle for the ionic concentrations $c_i$, which in particular gives us time independent $L^\infty$ bounds (see also \cite{EN}). In addition, we show that the Dirichlet boundary data of $c_i$ are \textit{attracting} in the sense that $\max\{\sup_\Omega c_1,\sup_\Omega c_2\}$ and $\min\{\sup_\Omega c_1,\sup_\Omega c_2\}$ converge monotonically to the extremal values of the boundary values in the limit of $t\to\infty$. The restriction to the Stokes subsystem is due to lack of information on global regularity for Navier-Stokes solutions in 3D, thus limiting the analysis of long time behavior. The results below do extend to 2D NPNS and apply to 3D NPNS under the assumption of regularity of velocity. The modifications to the proofs required in these cases are straightforward but will not be pursued here. We consider a general smooth, bounded, connected domain $\Omega\subset\mathbb{R}^3$ with boundary conditions \be \bal {c_i}_{|\pa\Omega}&=\gamma_i>0\\ \Phi_{|\pa\Omega}&=W\\ u_{|\pa\Omega}&=0\la{bbcc} \eal \ee where $\gamma_i$ and $W$ are smooth and not necessarily constant. \begin{thm} Suppose $(c_1\ge 0,c_2\ge 0,u)$ is the unique, global smooth solution to (\ref{tnp1})-(\ref{tdiv}) on $\Omega$ with smooth initial conditions $(c_1(0)\ge 0,c_2(0)\ge 0,u(0))$ (with $\div u(0)=0$) and boundary conditions (\ref{bbcc}). Then, \begin{enumerate}[(I)] \item For $i=1,2$ and all $t\ge 0$ \begin{align}\min\{\inf_\Omega c_1(0), \inf_\Omega c_2(0),\ug\}\le c_i(t,x)\le \max\{\sup_\Omega c_1(0), \sup_\Omega c_2(0),\og\}\end{align} where $\ug=\min_i\inf_{\pa\Omega}\gamma_i$ and $\og=\max_i\sup_{\pa\Omega}\gamma_i$. In particular \begin{align}\om(t)=\max_i\sup_\Omega c_i(t,x),\quad \um(t)=\min_i\inf_\Omega c_i(t,x)\end{align} are nonincreasing and nondecreasing on $(0,\infty)$, respectively. \item For all $\delta>0$, there exists $T$ depending on $\delta$, $\Omega$ and initial and boundary conditions such that for all $t\ge T$ we have $$\ug-\delta\le \um(t)\le\om(t)\le\og+\delta.$$ \end{enumerate}\la{maxthm} \end{thm} The theorem is a consequence of the following proposition. \begin{prop} Suppose $v_i:[0,\infty)\times\bar\Omega\to \mathbb{R}$, $i=1,2$ is a nonnegative, smooth solution to \be \bal \pa_t v_1&=d_1\D v_1+b_1\cdot\na v_1-p_1(v_1-v_2)\\ \pa_t v_2&=d_2\D v_2+b_2\cdot\na v_2+p_2(v_1-v_2) \eal \ee with time independent, smooth Dirichlet boundary conditions \be {v_i}_{|\pa\Omega}=g_i>0 \ee where $d_i>0$ are constants, $b_i=b_i(t,x)$ are smooth vector fields, and $p_i=p_i(t,x)\ge 0$. Then \begin{enumerate}[(I')] \item For both $i$ and all $t\ge 0$ \begin{align}\min\{\inf_\Omega v_1(0), \inf_\Omega v_2(0),\ugg\}\le v_i(t,x)\le \max\{\sup_\Omega v_1(0), \sup_\Omega v_2(0),\ogg\}\la{lub}\end{align} where $\ugg=\min_i\inf_{\pa\Omega}g_i$ and $\ogg=\max_i\sup_{\pa\Omega}g_i$. In particular \begin{align}\oV(t)=\max_i\sup_\Omega v_i(t,x),\quad \uV(t)=\min_i\inf_\Omega v_i(t,x)\la{umom}\end{align} are nonincreasing and nondecreasing on $(0,\infty)$, respectively. \item Suppose, in addition to the preceding hypotheses, that $b_i$ is uniformly bounded in time. Then for all $\delta>0$, there exists $0<T^*=T^*(\delta, d_i,\sup_t\|b_i(t)\|_{L^\infty},g_i, v_i(t=0),\Omega)$ such that for all $t\ge T^*$ we have $$\ugg-\delta\le \uV(t)\le\oV(t)\le\ogg+\delta.$$ \end{enumerate}\la{prop!} \end{prop} \begin{proof} We prove just the lower bound in (I') as the upper bound can be established analogously. If either $\inf_\Omega v_1(t=0)=0$ or $\inf_\Omega v_2(t=0)=0$ then the lower bound holds trivially as we are assuming that $v_i\ge 0$. So we assume $v_1(t=0),v_2(t=0)>0$. We define $$\uuV(t)=\min_{0\le s\le t}\uV(s).$$ We show that $\uV$ and $\uuV$ are both locally Lipschitz (i.e. Lipschitz continuous on every interval $[0,T]$). Indeed, assigning to each $t\ge 0$ a point $x_i(t)\in\bar{\Omega}$ such that $v_i(t,x_i(t))=\uV_i(t)=\inf_\Omega v_i(t,x)$, we have for $s<t\le T$ \be \fr{v_i(t,x_i(t))-v_i(s,x_i(t))}{t-s}\le \fr{\uV_i(t)-\uV_i(s)}{t-s}\le \fr{v_i(t,x_i(s))-v_i(s,x_i(s))}{t-s} \ee and so \be \left|\fr{\uV_i(t)-\uV_i(s)}{t-s}\right|\le \sup_{[0,T]\times\bar\Omega}|\pa_tv_i(t,x)|=\underline L^T_i \ee implying that $\uV_i(t)$ is locally Lipschitz. Next, assigning to each $t\ge 0$ an $i(t)\in\{1,2\}$ such that $\uV_{i(t)}(t)=\uV(t)$ we have for $s<t\le T$ \be \fr{\uV_{i(t)}(t)-\uV_{i(t)}(s)}{t-s}\le \fr{\uV(t)-\uV(s)}{t-s}\le\fr{\uV_{i(s)}(t)-\uV_{i(s)}(s)}{t-s} \ee and thus \be \left|\fr{\uV(t)-\uV(s)}{t-s}\right|\le \max_i \underline L^T_i=\underline L^T. \ee Thus $\uV_i$ is locally Lipschitz. Lastly consider $\uuV$. Fixing $s<t\le T$, we assume without loss of generality that $\uuV(t)\neq \uuV(s)$. In particular since $\uuV$ is nonincreasing, this implies $\uuV(t)<\uuV(s)$. Then consider \be t^*=\inf\{t'\in[s,t] |\,\uV(t')=\uuV(t)\}. \ee Since $\uV(s)\ge\uuV(s)>\uuV(t)$, we necessarily have $t^*>s.$ Thus, \be \left|\fr{\uuV(t)-\uuV(s)}{t-s}\right|=\fr{\uuV(s)-\uuV(t)}{t-s}\le \fr{\uV(s)-\uV(t^*)}{t-s}\le \fr{\uV(s)-\uV(t^*)}{t^*-s}\le\underline L^T \ee and local Lipschitz continuity of $\uuV$ follows. Due to the Lipschitz continuity, we have in particular that $\uV_i,\uV,\uuV$ are differentiable almost everywhere and the set $A_T=\{t\in (0,T)|\,\uV_1'(t),\uV_2'(t),\uV'(t),\uuV'(t) \text{ exist}\}$ has full measure, $|A_T|=T$. To complete the proof of the lower bound in (I'), we prove the following lemma. \begin{lemma} For all $t\in A_T$ we have \begin{enumerate}[(i)] \item $\uV_i'(t)=\pa_tv_i(t,x)$ for all $x\in\bar\Omega$ such that $v_i(t,x)=\uV_i(t)$ \item $\uV'(t)=\uV_i'(t)$ for all $i$ such that $\uV(t)=\uV_i(t)$ \item if $\uuV'(t)<0$, then $\uuV(t)=\uV(t)$ and $\uuV'(t)=\uV'(t).$ \end{enumerate}\la{mlem} \end{lemma} \begin{proof} To see (i), we fix $x\in\bar\Omega$ such that $v_i(t,x)=\uV_i(t)$ and compute for $0<s<t<T$ \be \fr{\uV_i(t)-\uV_i(s)}{t-s}\ge\fr{v_i(t,x)-v_i(s,x)}{t-s} \ee and taking the limit $s\to t^-$ we see that $\uV_i'(t)\ge \pa_tv_i(t,x)$. Similarly for $0<t<s<T$ we have \be \fr{\uV_i(s)-\uV_i(t)}{s-t}\le\fr{v_i(s,x)-v_i(t,x)}{s-t} \ee and we obtain $\uV_i'(t)\le \pa_tv_i(t,x)$ upon taking the limit $s\to t^+$. An analogous argument gives us (ii). Now we show (iii). Assume $\uuV'(t)<0$, then for all $s<t$ we have $\uuV(s)>\uuV(t)$ for otherwise, since $\uuV$ is nonincreasing we have $\uuV(s)=\uuV(t)$ for some $s<t$. But then, again since $\uuV$ is nonincreasing we have $\uuV(r)=\uuV(t)$ for all $r\in[s,t]$. However, it then follows that the left sided derivative of $\uuV$ at $t$ is zero, which contradicts our assumption $\uuV'(t)<0$. By the same argument we see that for all $s>t$ we have $\uuV(s)<\uuV(t)$. It now follows that $\uuV(t)=\uV(t)$. Indeed, otherwise, there exists $s<t$ such that $\uV(s)=\uuV(t).$ But by the argument in the previous paragraph, we have $\uV(s)\ge \uuV(s)>\uuV(t)$, which gives us a contradiction. Now to prove that $\uuV'(t)=\uV'(t)$, we compute for $s<t$, using $\uuV(t)=\uV(t)$ \be \fr{\uuV(t)-\uuV(s)}{t-s}\ge \fr{\uV(t)-\uV(s)}{t-s} \ee which gives us $\uuV'(t)\ge \uV'(t)$ upon taking the limit $s\to t^-$. Similarly, considering $s>t$ we obtain the opposite inequality, thus completing the proof of (iii).\end{proof} Now we complete the proof of the lower bound in (I'). Suppose for the sake of contradiction that there exists $T>0$ such that $\uuV(T)<\min\{\inf_\Omega v_1(0),\inf_\Omega v_2(0),\ugg\}.$ Then since $\uuV$ is locally Lipschitz and hence satisfies the fundamental theorem of calculus, there exists $t\in A^T$ such that $\uuV'(t)<0$ and $\uuV(t)<\min\{\inf_\Omega v_1(0),\inf_\Omega v_2(0),\ugg\}$. Then it follows from Lemma \ref{mlem} that for some $i$ and some $x\in\bar\Omega$, we have $v_i(t,x)=\uuV(t)$ and $\pa_tv_i(t,x)<0$. Also, since $\uuV(t)<\ugg$ we have that $x\in\Omega$. We assume without loss of generality that $i=1$. Then evaluating (\ref{tnp1}) at $(t,x)$ and using the fact that $v_2(t,x)\ge \uuV(t)$ we find that \be \bal 0>\pa_tv_1=&d_1\D v_1+b_1\cdot\na v_1-p_1(v_1-v_2)\\ \ge& -p_1(\uuV(t)-\uuV(t))\\ =& 0 \eal \ee which gives us a contradiction. Therefore $\uV(t)\ge\uuV(t)\ge \min\{\inf_\Omega v_1(0),\inf_\Omega v_2(0),\ugg\}$ for all $t$. The monotonicity of $\uV$ follows from the same argument by replacing the initial time $0$ by some arbitrary time $s>0$. Then we obtain for all $t>s$ \be \uV(t)\ge \uuV(t)\ge \min\{\inf_\Omega v_1(s),\inf_\Omega v_2(s)\}=\uV(s). \ee Above we removed the $\ug$ from the minimum, as this is redundant for $s>0$. Now we prove the lower bound statement of (II'). The upper bound statement is proved similarly. We assume without loss of generality that $0\not\in\bar\Omega$ so that there exists $\underline\alpha,\overline\alpha>0$ such that $\underline\alpha\le |x|\le\overline\alpha$ for all $x\in\bar\Omega$. Then we define \be w_i=v_i-\epsilon|x|^\lambda \ee where $\lambda>0$ is chosen large enough so that for each $i$ we have \be \fr{d_i}{2}(\lambda+1)\ge \sup_{t\ge 0,\,x\in\Omega}|b_i(t,x)\cdot x| .\la{lbig} \ee and $\epsilon>0$ is chosen small enough that \be \epsilon\overline\alpha^\lambda\le \delta.\la{epsmall} \ee The functions $w_i$ satisfy the equations \be \pa_t w_i=d_i\D w_i+b_i\cdot\na w_i+\epsilon d_i\lambda(\lambda+1)|x|^{\lambda-2}+\epsilon\lambda|x|^{\lambda-2}b_i\cdot x-z_ip_i(w_1-w_2)\la{Wi} \ee where $z_1=1=-z_2$. By the same proof as in Lemma \ref{mlem} and the discussion leading up to it, we know that the functions \be \uW_i(t)=\inf_\Omega w_i(t,x),\quad \uW(t)=\min_i \uW_i(t) \ee are locally Lipschitz and thus differentiable almost everywhere. In addition, for each $t>0$ where $\uW'(t)$, $\uW_1'(t)$, $\uW_2'(t)$ all exist, we have that for each $i$ and $x\in\bar\Omega$ such that $w_i(x,t)=\uW(t)$, the time derivatives coincide, $\pa_t w_i(x,t)=\uW'(t)$. Now, if initially, at time $t=0$, we have $\ugg-\epsilon\sup_{\pa\Omega}|x|^\lambda\le \uW(0)$, then since $\uW\le \uV$ we have, using (\ref{epsmall}), \be \ugg-\delta\le\ugg-\epsilon\overline\alpha^\lambda\le \uV(0). \ee Then, since $\uV$ is monotone nondecreasing, the lower bound in (II') follows. Now suppose $\uW(0)<\ugg-\epsilon\sup_{\pa\Omega}|x|^\lambda.$ Then in particular, we have \be \uW(0)<\min_i\inf_{\pa\Omega}w_i=\min_i\inf_{\pa\Omega}(g_i-\epsilon|x|^\lambda). \ee Thus by continuity, we have that \be \uW(t)<\min_i\inf_{\pa\Omega}(g_i-\epsilon|x|^\lambda)\la{www} \ee holds on some interval $[0,T^*)$ where $T^*\in (0,\infty]$ can be chosen to be maximal so that if $T^*$ is finite, we have $\uW(T^*)=\min_i\inf_{\pa\Omega}(g_i-\epsilon|x|^\lambda)$. We claim that indeed $T^*<\infty$. The lower bound of (II') follows from this claim since \be \bal \uW(T^*)=\min_i\inf_{\pa\Omega}(g_i-\epsilon|x|^\lambda)\Rightarrow& \min_i\inf_\Omega (v_i(T^*)-\epsilon|x|^\lambda)\ge \ugg-\epsilon\sup_{\pa\Omega}|x|^\lambda\ge \ugg -\delta\\ \Rightarrow& \uV(T^*)\ge \ugg-\delta \eal \ee and the lower bound continues to hold for all $t\ge T^*$ due to the monotonicity of $\uV$. It remains to prove the claim that $T^*<\infty$. Indeed, let us fix a time $t$ such that (\ref{www}) holds. Then at time $t$, the value $\uW(t)$ is attained by $w_i$, for some $i$, at some interior point $x\in\Omega$. This point is a global minimum of $w_i$ at time $t$. We assume without loss of generality that $i=1$. Thus evaluating (\ref{Wi}) at $(t,x)$ and using (\ref{lbig}) and the fact that $w_2(t,x)\ge w_1(t,x)$ we have \be \bal \pa_t w_1=&d_1\D w_1+b_1\cdot\na w_1+\epsilon d_1\lambda(\lambda+1)|x|^{\lambda-2}+\epsilon\lambda|x|^{\lambda-2}b_1\cdot x-p_1(w_1-w_2)\\ \ge& \epsilon d_1\lambda(\lambda+1)|x|^{\lambda-2}-\epsilon\lambda|x|^{\lambda-2}\sup_{y\in\Omega}|b_1(t,y)\cdot y|\\ \ge&\fr{1}{2} \epsilon d_1\lambda(\lambda+1)|x|^{\lambda-2}\\ \ge&\fr{1}{2}\epsilon d_1\lambda(\lambda+1)\underline\alpha^{\lambda-2}. \eal \ee If $t\in A_{T^*}=\{t\in (0,T^*)| \uW'(t), \uW_1'(t), \uW_2'(t)\,\text{exist}\}$, then by the same argument as in Lemma \ref{mlem}, we have $\uW'(t)=\pa_tw_1(t,x)$ and thus \be \uW'(t)\ge \fr{1}{2}\epsilon d_1\lambda(\lambda+1)\underline\alpha^{\lambda-2}. \ee In general, the relation \be \uW'(t)\ge \min_i\fr{1}{2}\epsilon d_i\lambda(\lambda+1)\underline\alpha^{\lambda-2}=\tilde\beta. \ee holds for every time $t\in A_{T^*}$. Since $\uW, \uW_1, \uW_2$ are each locally Lipschitz (and hence differentiable almost everwhere and satisfy the fundamental theorem of calculus), if (\ref{www}) holds on $[0,\infty)$ (i.e. if $T^*=\infty$), then we obtain \be \infty> \liminf_{t\to\infty}\uW(t)=\uW(0)+\liminf_{t\to\infty}\int_0^t\uW'(s)\,ds\ge \uW(0)+\liminf_{t\to\infty}(t\tilde\beta)=\infty \ee which gives us a contradiction. Thus $T^*<\infty$, and in fact since, by (I'), \be \bal \max\{\sup_\Omega v_1(0),\sup_\Omega v_2(0),\ogg\}-\epsilon\underline\alpha^\lambda\ge\underline V(T^*)-\epsilon\underline\alpha^\lambda\ge \uW(T^*)=&\uW(0)+\int_0^{T^*}\uW'(s)\,ds\\ \ge&\uW(0)+T^*\tilde\beta, \eal \ee we have that $T^*$ is bounded above by a constant depending ultimately on $\delta, d_i, \sup_t\|b_i(t)\|_{L^\infty}$, the intitial and boundary conditions, and the domain, \be T^*\le \fr{1}{\tilde\beta}(\max\{\sup_\Omega v_1(0),\sup_\Omega v_2(0),\ogg\}-\uW(0)-\epsilon\underline\alpha^\lambda). \ee This completes the proof of the lower bound of (II') and thus of the proposition. \end{proof} We now prove Theorem \ref{maxthm} using Proposition \ref{prop!}. \begin{proof} We take $v_i=c_i$, $d_i=D_i$, $g_i=\gamma_i$, $p_i=c_i/\epsilon$, and \be b_i=-u+D_iz_i\na\Phi \ee in the proposition, and thus (I) follows. In order to show (II), it suffices to verify that $b_i$ is uniformly bounded in time. By (I) and (\ref{tpois}), we have that $\sup_t\|\na\Phi(t)\|_{L^\infty}<\infty.$ Thus it only remains to establish a uniform bound on $\|u\|_{L^\infty}$. To this end, we prove below that $\|Au\|_{L^2}$ is uniformly bounded in time, from which the desired result follows due to the embedding $H^2\hookrightarrow L^\infty.$ \textbf{Step 1. Uniform $L^\infty_tH^1_x$ bound on $u$.} Applying the Leray projection to (\ref{tstokes}), we obtain \be \pa_t u+\nu Au=-K\mathbb{P}(\rho\na\Phi).\la{lstokes} \ee Multiplying (\ref{lstokes}) by $Au$ and integrating by parts, we obtain, using (I), \be \bal \fr{1}{2}\fr{d}{dt}\|u\|_V^2+\fr{\nu}{2}\|Au\|_H^2\le C'\|\mathbb{P}(\rho\na\Phi)\|_H^2\le C, \eal \ee Then using the Stokes regularity estimate \be \|u\|_V\le C\|Au\|_H \ee we have \be \fr{d}{dt}\|u|_V^2\le -C\|u|_V^2+C \ee from which it follows that \be \sup_t\|u(t)\|_V<\infty.\la{uv} \ee \textbf{Step 2. Local uniform $L^2_tH^1_x$ bounds on $c_i$.} Multiplying (\ref{tnp1}) by $c_1$ and integrating by parts we obtain \be \bal \fr{1}{2}\fr{d}{dt}\|c_1\|_{L^2}^2-D_1\int_\Omega c_1\D c_1\,dV=D_1\int_\Omega c_1\na c_1\cdot\na\Phi-\fr{c_1^2\rho}{\epsilon}\,dV \eal \ee and writing $\D c_1=\D(c_1-\Gamma_1)$ where $\Gamma_1$ is the unique harmonic function on $\Omega$ satisfying $\Gamma_1=\gamma_1$, we obtain after integrations by parts, Young's inequalities, and the uniform bounds on $c_i$, \be \fr{1}{2}\fr{d}{dt}\|c_1\|_{L^2}^2+\fr{D_1}{2}\|\na c_1\|_{L^2}^2\le C. \ee Then, integrating in time and again using the uniform bound on $c_1$, we obtain for all $t\ge 0$ and $\tau>0$, \be \int_t^{t+\tau}\|\na c_1(s)\|_{L^2}^2\,ds\le C(1+\tau) \ee where $C$ is independent of $t$ and $\tau$. Similar estimates for $i=2$ give us \be \int_t^{t+\tau}\|\na c_i(s)\|_{L^2}^2\,ds\le \bar C(1+\tau),\quad i=1,2\la{lul2} \ee with $\bar C$ independent of $t$ and $\tau$. \textbf{Step 3. Uniform $L^\infty_t H^1_x$ bounds on $c_i$} Multiplying (\ref{tnp1}) by $-\D c_1$, integrating by parts, and using uniform bounds on $c_i$, we obtain \be \fr{d}{dt}\|\na c_1\|_{L^2}^2+\|\D c_1\|_{L^2}^2\le C+C\|\na c_1\|_{L^2}^2.\la{db} \ee Now fix any $t>1$. By (\ref{lul2}), there exists $t_0\in(\lfloor t\rfloor-1,\lfloor t\rfloor)$ such that $\|\na c_1(t_0)\|_{L^2}^2\le 2\bar C$ (here $\lfloor t\rfloor$ denotes the largest integer not exceeding $t$). Then from (\ref{db}) and (\ref{lul2}), we have \be \bal \|\na c_1(t)\|_{L^2}^2\le&\|\na c_1(t_0)\|_{L^2}^2+C(t-t_0)+C\int_{t_0}^t\|\na c_1(s)\|_{L^2}^2\,ds\\ \le& 2\bar C +2C+3C\bar C \eal \ee where the final term does not depend on $t$. After similar estimates for $\na c_2$, we obtain \be \sup_t\|\na c_i(t)\|_{L^2}<\infty,\quad i=1,2.\la{h1b} \ee \textbf{Step 4. Local uniform $L^2_tH^2_x$ bounds on $c_i$} Integrating (\ref{db}) and using (\ref{h1b}), we obtain \be \int_t^{t+\tau}\|\D c_1(s)\|_{L^2}^2\,ds\le C(1+\tau) \ee for $C$ independent of $t$ and $\tau$. The same method yields the corresponding estimate for $i=2$, and thus we have for $ C$ independent of $t$ and $\tau$ \be \int_t^{t+\tau}\|\D c_i(s)\|_{L^2}^2\,ds\le C(1+\tau),\quad i=1,2.\la{lul3} \ee \textbf{Step 5. Local uniform $L^2_tL^2_x$ bounds on $\pa_t c_i$.} Multiplying (\ref{tnp1}) by $\pa_t c_1$ and integrating by parts, we obtain, using the uniform bounds on $u$, $c_i$ and $\na c_i$, \be\bal \fr{D_1}{2}\fr{d}{dt}\|\na c_1\|_{L^2}^2+\fr{1}{2}\|\pa_t c_1\|_{L^2}^2&\le C(\|u\|_{V}^2\|\na c_1\|_{L^3}^2+\|\na c_1\|_{L^2}^2\|\na\Phi\|_{L^\infty}^2+\|c_1\rho\|_{L^2}^2)\\ &\le C(1+\|\D c_1\|_{L^2}^2) \eal\ee and integrating in time and using (\ref{h1b}), (\ref{lul3}), we obtain \be \int_t^{t+\tau}\|\pa_s c_1(s)\|_{L^2}^2\,ds\le C(1+\tau). \ee Similar estimates for $i=2$ give us \be \int_t^{t+\tau}\|\pa_s c_i(s)\|_{L^2}^2\,ds\le \tilde C(1+\tau),\quad i=1,2\la{ah} \ee for $\tilde C$ independent $t$ and $\tau$. \textbf{Step 6. Local uniform $L^2_tL^2_x$ bounds on $\pa_t u$.} Multiplying (\ref{lstokes}) by $\pa_t u$ and integrating by parts, we have \be \fr{\nu}{2}\fr{d}{dt}\|u\|_V^2+\fr{1}{2}\|\pa_t u\|_H^2\le \|\rho\na\Phi\|_{L^2}^2\le C \ee and thus integrating in time, it follows from (\ref{uv}) that \be \int_{t}^{t+\tau}\|\pa_s u(s)\|_{H}^2\,ds\le C'(1+\tau)\la{ahh} \ee where $C'$ is independent of $t$ and $\tau$. \textbf{Step 7. Uniform $L^\infty_tL^2_x$ bounds on $\pa_t c_i$ and $\pa_t u$.} Differentiating (\ref{tnp1}) in time, multiplying by $\pa_t c_1$ and integrating by parts, we obtain \be \bal \fr{1}{2}\fr{d}{dt}\|\pa_t c_1\|_{L^2}^2+D_1\|\na \pa_t c_1\|_{L^2}^2=&-\int_\Omega (u\cdot\na\pa_t c_1)\pa_t c_1\,dV-\int_\Omega(\pa_t u\cdot\na c_1)\pa_t c_1\,dV\\ &-D_1\int_\Omega \pa_t c_1\na\Phi\cdot\na\pa_t c_1\,dV-D_1\int_\Omega c_1\na\pa_t\Phi\cdot\na\pa_t c_1\,dV. \eal \ee The first integral on the right hand side vanishes because $\div u=0$. We integrate the second integral by parts once more, and using Young's inequalities and uniform bounds on $c_i$ we obtain \be \fr{1}{2}\fr{d}{dt}\|\pa_t c_1\|_{L^2}^2+\fr{D_1}{2}\|\na\pa_t c_1\|_{L^2}^2\le C(\|\pa_t u\|_H^2+\|\pa_t c_1\|_{L^2}^2+\|\pa_t c_2\|_{L^2}^2).\la{11} \ee Similarly for $i=2$ we obtain \be \fr{1}{2}\fr{d}{dt}\|\pa_t c_2\|_{L^2}^2+\fr{D_2}{2}\|\na\pa_t c_2\|_{L^2}^2\le C(\|\pa_t u\|_H^2+\|\pa_t c_1\|_{L^2}^2+\|\pa_t c_2\|_{L^2}^2).\la{22} \ee Next differentiating (\ref{lstokes}) by time, multiplying by $\pa_t u$ and integrating by parts, we obtain \be \fr{1}{2}\fr{d}{dt}\|\pa_t u\|_H^2+\fr{\nu}{2}\|\pa_t u\|_V^2\le C(\|\pa_t\rho\|_{L^2}^2\|\na\Phi\|_{L^\infty}^2+\|\rho\|_{L^\infty}^2\|\na\pa_t\Phi\|_{L^2}^2)\le C(\|\pa_t c_1\|_{L^2}^2+\|\pa_t c_2\|_{L^2}^2).\la{33} \ee Now adding (\ref{11})-(\ref{33}), we obtain \be \fr{d}{dt}(\|\pa_t c_1\|_{L^2}^2+\|\pa_t c_2\|_{L^2}^2+\|\pa_t u\|_H^2)\le C(\|\pa_t c_1\|_{L^2}^2+\|\pa_t c_2\|_{L^2}^2+\|\pa_t u\|_H^2). \la{ahhh} \ee Finally, using the same method as in Step 3, we use (\ref{ah}), (\ref{ahh}) and (\ref{ahhh}) to obtain \be \sup_t(\|\pa_t c_1(t)\|_{L^2}+\|\pa_t c_2(t)\|_{L^2}+\|\pa_t u(t)\|_H)<\infty. \ee \textbf{Step 8. Uniform $L^\infty_t H^2_x$ bounds on $u$}. From (\ref{lstokes}), we have \be \|Au\|_H\le C(\|\pa_t u\|_{H}+\|\rho\na\Phi\|_{L^2}) \ee and it follows from the preceding estimates that \be \sup_t\|Au(t)\|_H<\infty. \ee With this bound, the proof of the uniform boundededness of $b_i=-u+D_iz_i\na\Phi$ is complete, and thus (II) of the theorem follows from (II') of Proposition \ref{maxthm}. \end{proof} \section{Global Stability of Weak Steady Currents}\la{GS} In this section we consider the long time behavior of solutions to the time dependent Nernst-Planck-Stokes (NPS) system \ref{tnp1})-(\ref{tdiv}). In this section we take the domain to be the three dimensional periodic strip $\Omega=(0,L)\times\mathbb{T}\times\mathbb{T}$ where $\mathbb{T}$ has period $1$, and boundary conditions \begin{align} c_i(t,0,y,z)&=\alpha_i,\quad c_i(t,L,y,z)=\beta_i\la{bc1}\\ \Phi(t,0,y,z)&=-V,\quad \Phi(t,L,y,z)=0\la{bc2}\\ u(t,0,y,z)&=u(t,L,y,z)=0.\la{bc3} \end{align} Here, we take $\alpha_i,\beta_i,V>0$ to be \textit{constants}. In the first subsection of this section, we analyze one dimensional solutions to NPS with boundary conditions (\ref{bc1})-(\ref{bc3}) and establish uniform bounds. In the second subsection, we show that \textit{weak current} one dimensional solutions are globally stable. This latter result yields as a corollary the uniqueness of steady state solutions in the setting of small perturbations from equilibrium. \subsection{One Dimensional Steady States}\la{odss} We consider the one dimensional steady state Nernst-Planck system for $x\in(0,L)$ \begin{align} 0&=\pa_x(\pa_x c_1^*+c_1^*\pa_x\Phi^*)\la{1np1}\\ 0&=\pa_x(\pa_x c_2^*-c_2^*\pa_x\Phi^*)\la{1np2}\\ -\epsilon\pa_{xx}\Phi^*&=\rho^*=c_1^*-c_2^*\la{1pois} \end{align} with boundary conditions corresponding to (\ref{bc1}), (\ref{bc2}) \begin{align} c_i^*(0)&=\alpha_i>0,\quad c_i^*(L)=\beta_i>0\la{BC1}\\ \Phi^*(0)&=-V<0,\quad \Phi^*(L)=0.\la{BC2} \end{align} As we will see in Section \ref{gs}, one dimensional Nernst-Planck steady states, with zero fluid flow $u^*\equiv 0$, are also steady state solutions to the full three dimensional NPNS system in our current setting of a three dimensional periodic strip with boundary conditions (\ref{bc1})-(\ref{bc3}). This is the motivation for the study of these one dimensional solutions. While the computations of the previous section could be significantly simplified for this one dimensional, no fluid setting, nonetheless the existence of a smooth solution to (\ref{1np1})-(\ref{BC2}), with $c_i^*\ge 0$, follows from a streamlined version of the proof of Theorem \ref{thm1} (see also \cite{mock}). In this subsection, we establish uniform bounds on $c_i^*$ and $\Phi^*$ that depend exclusively on boundary data. To this end, we recall the electrochemical potentials \be \mu_i^*=\log c_i^*+z_i\Phi^*\la{mu} \ee and the related variables (a.k.a. Slotboom variables in the semiconductor literature) \be \eta_i^*=\exp\mu_i^*=c_i^*e^{z_i\Phi^*}\la{eta} \ee where $z_1=1=-z_2$. We refer the reader to \cite{park} for a more complete study on the one dimensional steady state Nernst-Planck system. \begin{prop} Suppose $(c_1^*,c_2^*,\Phi^*)$ is a smooth solution to (\ref{1np1})-(\ref{BC2}). Then, the solution satisfies the following uniform bounds: \begin{enumerate}[(I)] \item $\min\{\alpha_ie^{-z_iV},\beta_i\}=\lambda_i\le \eta_i^*\le\Lambda_i=\max\{\alpha_ie^{-z_iV},\beta_i\}$ \item $\min\{-V,\log(\lambda_1/\Lambda_2)^\fr{1}{2}\}=-v\le\Phi^*\le\mathcal{V}= \max\{0,\log(\Lambda_1/\lambda_2)^\fr{1}{2}\}$ \item $\min\{\alpha_1,\alpha_2,\beta_1,\beta_2\}=\underline{\gamma}\le c_i^*\le\og=\max\{\alpha_1,\alpha_2,\beta_1,\beta_2\} \,\text{ for }\, i =1,2.$ \end{enumerate}\la{prop} \end{prop} \begin{proof} Writing (\ref{1np1}), (\ref{1np2}) in terms of $\eta_i^*$ we have \be 0=\pa_x(e^{-z_i\Phi^*}\pa_x\eta_i^*)=e^{-z_i\Phi^*}(-z_i\pa_x\Phi^*\pa_x\eta_i^*+\pa_{xx}\eta_i^*). \ee And thus (I) follows from the weak maximum principle and the fact that \be \eta_i^*(0)=\alpha_ie^{-z_iV},\quad \eta_i^*(L)=\beta_i. \ee To show (II), we rewrite (\ref{1pois}) as \be -\epsilon\pa_{xx}\Phi^*=\eta_1^*e^{-\Phi^*}-\eta_2^*e^{\Phi^*}. \ee So if $\Phi^*$ attains its global maximum at an interior point $x_0\in(0,L)$, then \be \bal &0\le -\epsilon\pa_{xx}\Phi^*(x_0)=\eta_1^*(x_0)e^{-\Phi^*(x_0)}-\eta_2^*(x_0)e^{\Phi^*(x_0)}\\ \Rightarrow&\Phi^*(x_0)\le\log\left(\fr{\eta_1^*(x_0)}{\eta_2^*(x_0)}\right)^\fr{1}{2}\le \log(\Lambda_1/\lambda_2)^\fr{1}{2} \eal \ee and the upper bound in (II) follows. Similarly, if $\Phi^*$ attains its global minimum at an interior point $x_0\in(0,L)$, then \be \bal &0\ge\eta_1^*(x_0)e^{-\Phi^*(x_0)}-\eta_2^*(x_0)e^{\Phi^*(x_0)}\\ \Rightarrow&-\Phi^*(x_0)\le \log\left(\fr{\eta_2^*(x_0)}{\eta_1^*(x_0)}\right)^\fr{1}{2}\le\log (\Lambda_2/\lambda_1)^\fr{1}{2} \eal \ee and the lower bound in (II) follows. Lastly, prior to proving (III), we note that by combining (I) and (II) and using the definition of $\eta_i^*$, it is possible to obtain upper and lower bounds on $c_i^*$ that depend on boundary data for $c_i$ and $\Phi^*$. Here, instead we establish the bounds in (III), which in particular does not depend on boundary data for $\Phi^*$. We prove only the upper bound as the lower bound can be shown analogously. To do so, we introduce the rescaling $X=x/\epsilon^\fr{1}{2}$ so that we can rewrite (\ref{1np1}), (\ref{1np2}) as \begin{align} -\pa_{XX}c_1^*=&\pa_Xc_1^*\pa_X\Phi^*-c_1^*(c_1^*-c_2^*)\la{X1}\\ -\pa_{XX}c_2^*=&-\pa_Xc_2^*\pa_X\Phi^*+c_2^*(c_1^*-c_2^*).\la{X2} \end{align} Suppose that $\max\{c_1^*,c_2^*\}$ attains a global maximal value, $c>\og$, at an interior point $X_0\in (0,L/\epsilon^\fr{1}{2})$. Assume without loss of generality that this maximum is attained by $c_1^*$. Then we have \be \bal 0\le-\pa_{XX}c_1^*(X_0)=&\pa_Xc_1^*(X_0)\pa_X\Phi^*(x_0)-c_1^*(X_0)(c_1^*(X_0)-c_2^*(X_0))\\ =&-c(c-c_2^*(X_0)).\la{118} \eal \ee Then since by assumption we have $c\ge c_2^*(X_0)$, we necessarily have that $c=c_2^*(X_0)$, for otherwise, the right hand side of (\ref{118}) becomes strictly negative. Furthermore, the inequality in (\ref{118}) is an equality, and we conclude that $\pa_{XX}c_1^*(X_0)=0$. And since we have shown that $c_2^*$ also attains its global maximum at $X_0$, by evaluating (\ref{X2}) at $X_0$ we conclude that $\pa_Xc_2^*(X_0)=\pa_{XX}c_2^*(X_0)=0$. It follows by induction that for $i = 1,2$ we have \be \pa_X^{k}c_i^*(X_0)=0 \quad\text{for all } k\ge 1.\la{d} \ee Indeed, assume, for the sake of induction, that this is true for all $1\le k'\le k$ where $k\ge 2$. Then differentiating (\ref{X1}), \be -\pa_{X}^{k+1}c_1^*=\sum_{j=0}^{k-1}\binom{k-1}{j}\left(\pa_X^{j+1}c_1^*\pa_X^{k-j}\Phi^*-\pa_X^jc_1^*\pa_X^{k-1-j}(c_1^*-c_2^*)\right) \la{k+1} \ee and evaluating (\ref{k+1}) at $X_0$ and using the induction hypothesis together with the fact that $c_1^*(X_0)=c_2^*(X_0)$, we conclude $\pa_X^{k+1}c_1^*(X_0)=0$ as desired. Similarly by differentiating (\ref{X2}) we obtain $\pa_X^{k+1}c_2^*(X_0)=0$. Now \textit{if} $c_i^*$ is real analytic, then (\ref{d}) implies that in fact $c_1^*\equiv c_2^*\equiv c$, but we assumed that $c>\og$, so this contradiction implies the upper bound $\max\{c_1^*,c_2^*\}\le\og$. So to complete the proof of (III) it suffices to establish the real analyticity of $c_i^*$. To this end, we prove that there exists $C>0$ such that for all integers $k\ge 0$ \be \bal A^{-1}&=\sup_X|\pa_X\Phi^*|\le \fr{1}{4}C\\ A^k&=\sup_X|\pa_X^kc_1^*|+\sup_X|\pa_X^kc_2^*|\le \fr{1}{4}(k+1)!C^{k+2}.\la{Ak} \eal \ee We choose $C$ so that (\ref{Ak}) holds for $A^k$, $k=-1,0,1$. Now we prove the implication \be (\ref{Ak}) \text{ holds for all $k'$ less than or equal to $k\ge 1$}\Rightarrow (\ref{Ak}) \text{ holds for $k+1$}. \ee To show this, we see from (\ref{k+1}) that \be \bal |\pa_X^{k+1}c_i^*|\le&\sum_{j=0}^{k-1}\binom{k-1}{j}(A^{j+1}A^{k-j-2}+A^{j}A^{k-1-j})\\ \le&\fr{1}{16}\sum_{j=0}^{k-1}\fr{(k-1)!}{j!(k-1-j)!}((j+2)!(k-j-1)!C^{k+3}+(j+1)!(k-j)!C^{k+3})\\ =&\fr{C^{k+3}}{16}(k-1)!\sum_{j=0}^{k-1}((j+2)(j+1)+(j+1)(k-j))\\ \le&\fr{C^{k+3}}{16}(k-1)!k((k+1)k+k^2)\\ \le&\fr{1}{8}(k+2)!C^{k+3} \eal \ee and summing in $i$ we obtain \be A^{k+1}\le \fr{1}{4}(k+2)!C^{k+3} \ee as desired. Thus $c_i^*$ is real analytic and the proof of the upper bound in (III) and of the proposition is complete. \end{proof} \begin{rem} Proposition \ref{prop} (I), (II) directly extend to higher dimensional settings when no fluid is involved (see also \cite{mock} for further generalizations). On the other hand, it is unclear if (III) holds in higher dimensions as the proof provided above relies on a property of one dimensional real analytic functions. \end{rem} \subsection{Global Stability}\la{gs} In this last subsection, we study the problem of stability of one dimensional steady currents on the domain $\Omega=(0,L)\times\mathbb{T}^2$. \begin{defi} We say that $(c_1^*, c_2^*, u^*\equiv 0)$ is a \textbf{(one dimensional) steady current} solution to (\ref{tnp1})-(\ref{tdiv}) with boundary conditions (\ref{bc1})-(\ref{bc3}) on the three dimensional periodic strip $\Omega=(0,L)\times \mathbb{T}^2$ if $c_i^*$ is independent of the spatial variables $y$ and $z$, independent of time, and solves the one dimensional problem (\ref{1np1})-(\ref{BC2}). \end{defi} \begin{rem} A solution $c_i^*(x)$ to the one dimensional system (\ref{1np1})-(\ref{BC2}), seen as a three dimensional function on $\Omega$, independent of $y, z$, together with $u^*\equiv 0$, is indeed a solution to the three dimensional steady state NPNS system because \be \bal \pa_xc_1^*+c_1^*\pa_x\Phi^*&=j_1\\ \pa_xc_2^*-c_2^*\pa_x\Phi^*&=j_2\la{j} \eal \ee for constants $j_i$. It follows from this that \be \rho^*\na\Phi^*=-\na(c_1^*+c_2^*)+(j_1+j_2,0,0)=-\na(c_1^*+c_2^*-(j_1+j_2)x), \ee and thus $u^*(x,y,z)\equiv 0$ and $p^*(x,y,z)=K(c_1^*(x)+c_2^*(x)-(j_1+j_2)x)$ solve the Stokes equations (\ref{tstokes})-(\ref{tdiv}). \end{rem} In general, it is not known whether solutions to the one dimensional system (\ref{1np1})-(\ref{BC2}) are unique, and therefore it is also unknown whether any given one dimensional steady current solution $(c_1^*, c_2^*,u^*\equiv 0)$ to the three dimensional NPNS system is unique (in general one cannot rule out the existence of other one dimensional steady current solutions nor the existence of solutions that depend also on $y$ and/or $z$). For the remainder of this subsection, we study the stability of a \textit{fixed} one dimensional steady current solution. However, using the a priori estimates of Section \ref{odss} and under a \textit{weak current} or \textit{small perturbation from equilibrium} assumption, c.f. (\ref{jsmall}), we obtain the global stability of the fixed steady current solution (Theorem \ref{globalstab}). As a consequence of stability it follows that the fixed steady current solution is the unique steady state solution of the full three dimensional system (\ref{tnp1})-(\ref{tdiv}) with boundary conditions (\ref{bc1})-(\ref{bc3}) (Theorem \ref{unique}). \begin{rem} It seems somewhat unusual to first establish the global stability of a steady state solution, and then its uniqueness. This is due to the absence of certain a priori information. This difficulty does not arise if we consider the Nernst-Planck system, uncoupled to Navier-Stokes equations. In this case, a straightforward generalization of the estimates in Section \ref{odss} give simple, explicit a priori bounds on steady state solutions depending also on $y,z$, thus allowing uniqueness to be established independently of global stability.\la{lastrem} \end{rem} The main tool in proving global stability is the following log-Sobolev type inequality, which is also used in \cite{gajstab} in an equilibrium setting. \begin{lem} Suppose $f_i,g_i$, $i=1,2$ and $p^f, p^g$ are smooth real valued functions on a bounded domain $\Omega\subset\mathbb{R}^3$ satisfying the bounds \be 0<f_1, f_2\le M_f,\quad 0< g_1, g_2\le M_g. \la{ab} \ee and the relations \be \bal {f_1}_{|\pa\Omega}&={g_1}_{|\pa\Omega}\\ {f_2}_{|\pa\Omega}&={g_2}_{|\pa\Omega}\\ {p^f}_{|\pa\Omega}&={p^g}_{|\pa\Omega}\\ -\epsilon\D p^f&= f_1-f_2\\ -\epsilon\D p^g&= g_1-g_2,\quad i=1,2.\la{rel} \eal \ee Then the functions \be \pi^f_i=\log f_i+ z_ip^f,\quad \pi_i^g=\log g_i+z_ip^g,\quad i=1,2 \ee where $z_1=1=-z_2$, satisfy the bound \be \fr{\omega}{l^2}\left(\sum_{i=1}^2\fr{1}{2}\int_\Omega g_i\psi\left(\fr{f_i}{g_i}\right)\,dV+\epsilon\int_\Omega |\na(p^f-p^g)|^2\,dV\right)\le \sum_{i=1}^2\int_\Omega |\na(\pi_i^f-\pi_i^g)|^2\,dV \ee where \be \psi(s)=s\log s-s+1, \quad s>0\la{psi} \ee \be \omega=\fr{2}{\max\{M_f, M_g\}}\la{omega} \ee and $l$ can be chosen to be the height of any infinite slab in $\mathbb{R}^3$ that contains $\Omega$ (i.e. $\Omega\subset\{x_0+s_1e_1+s_2e_2+s_3e_3\,|\, s_1\in (0,l),\, s_2,s_3\in(-\infty,\infty)\}$ for some $x_0\in\mathbb{R}^3$ and orthonormal basis $\{e_i\}$ of $\mathbb{R}^3).$\la{poin} \end{lem} Prior to proving Lemma \ref{poin}, we first establish an interpolation inequality that interpolates $L^2$ between $L\log L$ and $L^\infty$. \begin{lem} For positive, real valued, bounded, measurable functions $f, g$ defined on $\Omega$, we have \be \int_\Omega (f-g)^2\,dV\le \max\{\|f\|_{L^\infty},\|g\|_{L^\infty}\}\int_\Omega g\psi\left(\fr{f}{g}\right)\,dV \ee with $\psi$ defined in (\ref{psi}).\la{inter} \end{lem} \begin{proof} Taylor expanding $\psi(s)$ around $s=1$, we have \be \psi(s)\ge \min\{1,s^{-1}\}(s-1)^2\Rightarrow (s-1)^2\le \max\{1,s\}\psi(s), \ee so taking $s=f/g$ we have \be \bal &\left(\fr{f}{g}-1\right)^2\le\max\left\{1,\fr{f}{g}\right\}\psi\left(\fr{f}{g}\right)\\ \Rightarrow&(f-g)^2\le\max\{f,g\}g\psi\left(\fr{f}{g}\right) \eal \ee and thus the lemma follows after integrating over $\Omega$. \end{proof} Now we prove Lemma \ref{poin}. \begin{proof} We consider the following expression \be \sum_{i=1}^2\left<f_i-g_i,\pi^f_i-\pi^g_i\right>. \ee On one hand we have, using the Poisson equation (\ref{rel}), \be \begin{aligned} \sum_i\left<f_i-g_i,\pi^f_i-\pi^g_i\right>=&\sum_i\left<f_i-g_i,\log\fr{f_i}{g_i}+z_i(p^f-p^g)\right>\\ =&\sum_i\int_\Omega g_i\left(\fr{f_i}{g_i}-1\right)\log\fr{f_i}{g_i}\,dV+\epsilon\int_\Omega|\na(p^f-p^g)|^2\,dV\\ \ge& \sum_i\int_\Omega g_i\psi\left(\fr{f_i}{g_i}\right)\,dV+\epsilon\int_\Omega|\na(p^f-p^g)|^2\,dV\la{lb} \end{aligned} \ee where in the last inequality we used the inequality \be (s-1)\log s\ge \psi(s),\quad s>0. \ee On the other hand, we have due to Young's inequality, Poincaré's inequality, and Lemma \ref{inter}, \be \begin{aligned} \sum_i\left<f_i-g_i,\pi^f_i-\pi^g_i\right>\le & \sum_i \fr{\omega}{4}\|f_i-g_i\|_{L^2}^2+\sum_i\fr{1}{\omega}\|\pi^f_i-\pi^g_i\|_{L^2}^2\\ \le&\sum_i\fr{\omega\max\{M_f,M_g\}}{4}\int_\Omega g_i\psi\left(\fr{f_i}{g_i}\right)\,dV+\sum_i\fr{l^2}{\omega}\|\na(\pi^f_i-\pi^g_i)\|_{L^2}^2.\la{ub} \end{aligned} \ee Therefore, choosing $\omega>0$ as in (\ref{omega}), we have, combining (\ref{lb}) and (\ref{ub}), \be \fr{\omega}{l^2}\left(\sum_i\fr{1}{2}\int_\Omega g_i\psi\left(\fr{f_i}{g_i}\right)\,dV+\epsilon\int_\Omega |\na(p^f-p^g)|^2\,dV\right)\le \sum_i\int_\Omega|\na(\pi^f_i-\pi^g_i)|^2\,dV. \ee \end{proof} We now state the global stability theorem. \begin{thm} Suppose $(c_1^*, c_2^*, u^*\equiv 0)$ is a one dimensional steady current solution to the NPS system (\ref{tnp1})-(\ref{tdiv}) with boundary conditions (\ref{bc1})-(\ref{bc3}). Suppose furthermore that the corresponding \textbf{p-current} $j_1$ and \textbf{n-current} $j_2$ (c.f. (\ref{j})) satisfy \begin{align} \max_i |j_i|LG_i<\fr{1}{\sqrt{2}} \la{jsmall} \end{align} where \be G_i=\sqrt{\fr{1}{D}\left(\fr{D_i\og^2}{2\ug^4}+\fr{KL^2\og^2}{\nu\ug^3}\right)} \ee and $D=\min_i D_i$. Then $(c_1^*, c_2^*, u^*\equiv 0)$ is globally asymptotically stable. That is, for any smooth initial conditions $c_1(0)\ge 0, c_2(0)\ge 0, u(0)$ (with $\div u(0)=0$), the corresponding solution $(c_1, c_2, u)$ to (\ref{tnp1})-(\ref{tdiv}) satisfies \be \sum_{i=1}^2\int_\Omega c_i^*\psi\left(\fr{c_i(t)}{c_i^*}\right)\,dV+\int_\Omega |\na(\Phi-\Phi^*)|^2\,dV+\int_\Omega |u(t)|^2\,dV\to 0\quad \text{as } t\to\infty.\la{conv} \ee Furthermore, there exists $T^*>0$, depending on initial and boundary data and the parameters of the system, such that after time $t=T^*$, the rate of convergence in (\ref{conv}) is exponential in time.\la{globalstab} \end{thm} A consequence of Theorem \ref{globalstab} is the following uniqueness theorem. \begin{thm} Under the same hypotheses as in Theorem \ref{globalstab}, the one dimensional steady current solution $(c_1^*, c_2^*, u^*\equiv 0)$ is the unique steady state solution to the NPS system (\ref{tnp1})-(\ref{tdiv}) with boundary conditions (\ref{bc1})-(\ref{bc3}).\la{unique} \end{thm} \begin{rem} We note that the currents $j_i$ are solution dependent constants and the condition \eqref{jsmall} is not explicitly written solely in terms of the boundary data. Writing (\ref{j}) in terms of the electrochemical potentials and the Slotboom variables (c.f. (\ref{mu}), (\ref{eta})) we have \be c_i^*\pa_x\mu_i^*=j_i,\quad e^{-z_i\Phi^*}\pa_x\eta_i^*=j_i\la{jj} \ee and thus \be j_i=\fr{\mu_i^*(L)-\mu_i^*(0)}{\int_0^L\fr{1}{c_i^*}\,dx}=\fr{\eta_i^*(L)-\eta_i^*(0)}{\int_0^L e^{z_i\Phi^*}\,dx}. \ee Then, using the uniform bounds from Proposition \ref{prop}, we see that explicit sufficient conditions in terms of the boundary data which imply the smallness conditions (\ref{jsmall}) are given by \be\begin{cases} \left|\log\fr{\alpha_1}{\beta_1}+V\right|\og G_1<\fr{1}{\sqrt 2}\\ \left|\log\fr{\alpha_2}{\beta_2}-V\right|\og G_2<\fr{1}{\sqrt 2} \end{cases}\ee or \be\begin{cases} \left|\alpha_1-\beta_1e^{-V}\right|e^{v}G_1<\fr{1}{\sqrt 2}\\ \left|\alpha_2-\beta_2e^V\right|e^{\mathcal{V}}G_2<\fr{1}{\sqrt 2} \end{cases}\ee where $v, \mathcal{V}$ are defined in Proposition \ref{prop}. \end{rem} Now we prove Theorem \ref{globalstab}. \begin{proof} First suppose that the initial conditions satisfy \be 0<\ugd\le c_i(0)\le \ogd,\quad i=1,2 \ee where \be \ugd=\ug-\delta,\quad\ogd=\og+\delta \ee for some small $\delta> 0$, to be determined below (c.f. (\ref{del2})). Then by Proposition \ref{prop} and Theorem \ref{maxthm} (I), the time dependent solution $(c_1, c_2, u)$ and the one dimensional steady current $(c_1^*, c_2^*, u^*\equiv 0)$ satisfy the bounds \be \bal \ugd\le c_i(t)\le \ogd,\quad \ug\le c_i^*\le\og.\la{cbounds} \eal \ee Next, writing (\ref{tnp1}), (\ref{tnp2}) in terms of the electrochemical potentials \be \mu_i=\log c_i+z_i\Phi,\quad i=1,2 \ee and in terms of the differences $c_i-c_i^*$, $\mu_i-\mu_i^*$, we have \be \bal \pa_t(c_1-c_1^*)&=-u\cdot\na c_1+D_1\div(c_1\na(\mu_1-\mu_1^*)+(c_1-c_1^*)\na\mu_1^*)\\ \pa_t(c_2-c_2^*)&=-u\cdot\na c_2+D_2\div(c_2\na(\mu_2-\mu_2^*)+(c_2-c_2^*)\na\mu_2^*). \eal \ee We multiply the above equations by $\mu_1-\mu_1^*$ and $\mu_2-\mu_2^*$, respectively, and integrate by parts. On the left hand side, we obtain, after summing in $i$, \be \bal \sum_i\left<\pa_t(c_i-c_i^*),\mu_i-\mu_i^*\right>&=\sum_i\left(\fr{d}{dt}\int_\Omega c_i^*\psi\left(\fr{c_i}{c_i^*}\right)\,dV+z_i\left<\pa_t(c_i-c_i^*),\Phi-\Phi^*\right>\right)\\ &=\sum_i\fr{d}{dt}\int_\Omega c_i^*\psi\left(\fr{c_i}{c_i^*}\right)\,dV+\left<\pa_t(\rho-\rho^*),\Phi-\Phi^*\right>\\ &=\sum_i\fr{d}{dt}\int_\Omega c_i^*\psi\left(\fr{c_i}{c_i^*}\right)\,dV+\fr{\epsilon}{2}\fr{d}{dt}\|\na(\Phi-\Phi^*)\|_{L^2}^2. \eal \ee On the right hand side, for $i=1$, we have, using Lemma \ref{inter}, (\ref{cbounds}), and (\ref{jj}), \be \begin{aligned} &\left<-u\cdot\na c_1+D_1\div(c_1\na(\mu_1-\mu_1^*)+(c_1-c_1^*)\na\mu_1^*),\mu_1-\mu_1^*\right>\\ =&-\left<u\cdot\na c_1,\mu_1-\mu_1^*\right>-D_1\int_\Omega c_1|\na(\mu_1-\mu_1^*)|^2\,dV-D_1\left<(c_1-c_1^*)\na\mu_1^*,\na(\mu_1-\mu_1^*)\right>\\ \le& -\fr{D_1}{2}\int_\Omega c_1|\na(\mu_1-\mu_1^*)|^2\,dV-\left<u\cdot\na c_1,\mu_1-\mu_1^*\right>+\fr{D_1}{2}\int_\Omega \fr{(c_1-c_1^*)^2}{c_1}(\pa_x\mu_1^*)^2\,dV\\ \le& -\fr{D_1}{2}\int_\Omega c_1|\na(\mu_1-\mu_1^*)|^2\,dV-\left<u\cdot\na c_1,\mu_1-\mu_1^*\right>+\fr{D_1\ogd}{2\ugd\ug^2}j_1^2\int_\Omega c_1^*\psi\left(\fr{c_1}{c_1^*}\right)\,dV.\la{calc1} \end{aligned} \ee We take a closer look at the term involving $u$, integrating by parts and using $\div u=0$, \be \bal -\left<u\cdot\na c_1,\mu_1-\mu_1^*\right>=&-\left<u\cdot\na c_1,\log c_1+\Phi\right>+\left<u\cdot\na c_1,\mu_1^*\right>\\ =&-\left<u,\na(c_1\log c_1-c_1)\right>+\int_\Omega uc_1\cdot\na\Phi\,dV\\ &+\left<u\cdot\na(c_1-c_1^*),\mu_1^*\right>+\left<u \cdot\na c_1^*,\mu_1^*\right>\\ =&\int_\Omega uc_1\cdot\na\Phi\,dV-\left<u(c_1-c_1^*),\na\mu_1^*\right>\\ &+\left<u,\na(c_1^*\log c_1^*-c_1^*)\right>-\int_\Omega uc_1^*\cdot\na\Phi^*\,dV\\ \le&\int_\Omega uc_1\cdot\na\Phi\,dV-\int_\Omega uc_1^*\cdot\na\Phi^*\,dV\\ &+\fr{\nu}{4KL^2}\|u\|_H^2+\fr{KL^2}{\nu}\int_\Omega|c_1^*\pa_x\mu_1^*|^2\fr{(c_1-c_1^*)^2}{(c_1^*)^2}\,dV\\ \le&\int_\Omega uc_1\cdot\na\Phi\,dV-\int_\Omega uc_1^*\cdot\na\Phi^*\,dV\\ &+\fr{\nu}{4K}\|u\|_V^2+\fr{KL^2\ogd}{\nu\ug^2}j_1^2\int_\Omega c_1^*\psi\left(\fr{c_1}{c_1^*}\right)\,dV \eal \ee and thus returning to (\ref{calc1}), we have \be \begin{aligned} &\left<-u\cdot\na c_1+D_1\div(c_1\na(\mu_1-\mu_1^*)+(c_1-c_1^*)\na\mu_1^*),\mu_1-\mu_1^*\right>\\ \le& -\fr{D_1}{2}\int_\Omega c_1|\na(\mu_1-\mu_1^*)|^2\,dV+\left(\fr{D_1\ogd}{2\ugd\ug^2}+\fr{KL^2\ogd}{\nu\ug^2}\right)j_1^2\int_\Omega c_1^*\psi\left(\fr{c_1}{c_1^*}\right)\,dV\\ &+\fr{\nu}{4K}\|u\|_V^2+\int_\Omega uc_1\cdot\na\Phi\,dV-\int_\Omega uc_1^*\cdot\na\Phi^*\,dV. \eal \ee Similarly for $i=2$ we obtain \be \begin{aligned} &\left<-u\cdot\na c_2+D_2\div(c_2\na(\mu_2-\mu_2^*)+(c_2-c_2^*)\na\mu_2^*),\mu_2-\mu_2^*\right>\\ \le& -\fr{D_2}{2}\int_\Omega c_2|\na(\mu_2-\mu_2^*)|^2\,dV+\left(\fr{D_2\ogd}{2\ugd\ug^2}+\fr{KL^2\ogd}{\nu\ug^2}\right)j_2^2\int_\Omega c_2^*\psi\left(\fr{c_2}{c_2^*}\right)\,dV\\ &+\fr{\nu}{4K}\|u\|_V^2-\int_\Omega uc_2\cdot\na\Phi\,dV+\int_\Omega uc_2^*\cdot\na\Phi^*\,dV. \end{aligned} \ee Collecting our estimates thus far and using the fact that $\rho^*\na\Phi^*=-\na(c_1^*+c_2^*-(j_1+j_2)x)$ is a gradient, we have \be \begin{aligned} &\fr{d}{dt}\mathcal{E}+\sum_i\fr{D_i}{2}\int_\Omega c_i|\na(\mu_i-\mu_i^*)|^2\,dV\\ \le&\sum_iM_i^\delta j_i^2\int_\Omega c_i^*\psi\left(\fr{c_i}{c_i^*}\right)\,dV+\int_\Omega u\rho\cdot\na\Phi\,dV-\int_\Omega u\rho^*\cdot\na\Phi^*\,dV+\fr{\nu}{2K} \|u\|_{V}^2\\ =&\sum_iM_i^\delta j_i^2\int_\Omega c_i^*\psi\left(\fr{c_i}{c_i^*}\right)\,dV+\int_\Omega u\rho\cdot\na\Phi\,dV+\fr{\nu}{2K} \|u\|_{V}^2\la{E1} \end{aligned} \ee where (see \cite{ci}) \be \bal \mathcal{E}&=\sum_i\int_\Omega c_i^*\psi\left(\fr{c_i}{c_i^*}\right)\,dV+\fr{\epsilon}{2}\|\na(\Phi-\Phi^*)\|_{L^2}^2\\ M_i^\delta&=\fr{D_i\ogd}{2\ugd\ug^2}+\fr{KL^2\ogd}{\nu\ug^2}. \eal \ee Now we take a look at the Stokes equations, \be \pa_t u+\nu A u=-K\mathbb{P}(\rho\na\Phi). \ee Multiplying by $\fr{u}{K}$ and integrating by parts, we obtain using the self-adjointness of $\mathbb{P}$, \be \begin{aligned} \fr{1}{K}\fr{d}{dt}\|u\|_{H}^2+\fr{\nu}{K}\| u\|_{V}^2=& -\int_\Omega u\cdot\mathbb{P}(\rho\na\Phi)\,dV=-\int_\Omega u\rho\cdot\na\Phi\,dV.\la{u} \end{aligned} \ee Now we combine the estimates (\ref{E1}) and (\ref{u}) to obtain \be \bal \fr{d}{dt}\left(\mathcal{E}+\fr{1}{K}\|u\|_{H}^2\right)+\sum_i\fr{D_i}{2}\int_\Omega c_i|\na(\mu_i-\mu_i^*)|^2\,dV+ \fr{\nu}{2K}\| u\|_{V}^2 \le&\sum_iM_i^\delta j_i^2\int_\Omega c_i^*\psi\left(\fr{c_i}{c_i^*}\right)\,dV.\la{finalE} \eal \ee Now applying Lemma \ref{poin} to the dissipation term \be \mathcal{D}=\sum_i\fr{D_i}{2}\int_\Omega c_i|\na(\mu_i-\mu_i^*)|^2\,dV \ee we obtain \be \bal \mathcal{D}\ge&\fr{D\ugd}{\ogd L^2}\left(\sum_i\fr{1}{2}\int_\Omega c_i^*\psi\left(\fr{c_i}{c_i^*}\right)\,dV+\epsilon\|\na(\Phi-\Phi^*)\|_{L^2}^2\right)\ge\fr{D\ugd}{2\ogd L^2}\mathcal{E} \eal \ee where $D=\min_i D_i$. Next, defining \be \kappa_i^\delta=\fr{D\ugd}{2\ogd L^2}-M_i^\delta j_i^2,\la{del2} \ee we have, due to (\ref{jsmall}), $\kappa_i^\delta >0$ for each $i$ for small enough $\delta>0$. Therefore, after an application of Poincaré's inequality to $\|u\|_V^2$ in (\ref{finalE}), we obtain, for small enough $\delta,$ \be \fr{d}{dt}\mathcal{F}\le -\kappa^\delta \mathcal{F} \ee for \be \mathcal{F}=\mathcal{E}+\fr{1}{K}\|u\|_H^2\la{F} \ee and \be \kappa^\delta=\min\{\kappa_1^\delta,\kappa_2^\delta,\nu/(2L^2)\}>0. \ee It follows that \be\mathcal{F}(t)\le \mathcal{F}(0)e^{-\kappa^\delta t}.\la{exp}\ee Now, for general initial conditions, it suffices to observe that due to Theorem \ref{maxthm} (II), there exists some time $T^*>0$ such that $\ugd\le c_1(T^*), c_2(T^*)\le \ogd$, and then the convergence result follows from the preceding analysis by taking $c_i(T^*)$ to be the initial conditions. This completes the proof of the theorem. \end{proof} Lastly we prove Theorem \ref{unique}. \begin{proof} Suppose $(\oc_1,\oc_2,\ou)\neq (c_1^*, c_2^*, 0)$ is a steady state solution of the NPS system (\ref{tnp1})-(\ref{tdiv}) with boundary conditions (\ref{bc1})-(\ref{bc3}). Then by taking initial conditions $c_i(0)=\oc_i,\,u(0)=\ou$, we find that the corresponding energy $\mathcal{F}(t)$, defined in (\ref{F}), is constant in time and positive, $\mathcal{F}(t)=\mathcal{F}(0)>0$. On the other hand, Theorem \ref{globalstab} implies that $\mathcal{F}(t)\to 0$ as $t\to\infty$, and thus we have a contradiction. This contradiction completes the proof. \end{proof} \vspace{.5cm} {\bf{Acknowledgment.}} The work of PC was partially supported by NSF grant DMS- 2106528. \begin{thebibliography}{99} \bibitem{biler}P. Biler, The Debye system: existence and large time behavior of solutions, Nonlinear Analysis {\bf{23}} 9, (1994), 1189 -1209. \bibitem{biler2} P. Biler, J. Dolbeault. Long time behavior of solutions to Nernst-Planck and Debye-Hckel drift-diffusion systems, Ann. Henri Poincaré {\bf{1}}, (2000), 461-472. \bibitem{bothe} D. Bothe, A. Fischer, J. Saal, Global well-posedness and stability of electrokinetic flows, SIAM J. Math. Anal, {\bf 46} 2, (2014), 1263-1316. \bibitem{choi} Y.S. Choi, and R. Lui, Multi-Dimensional Electrochemistry Model, Arch Rational Mech Anal {\bf{130}} (1995), 315-342. \bibitem{cf} P. Constantin, C. Foias, Navier-Stokes Equations, The University of Chicago Press, Chicago, 1988. \bibitem{ci}P. Constantin, M. Ignatova, On the Nernst-Planck-Navier-Stokes system, Arch Rational Mech Anal {\bf{232}}, No. 3, (2018), 1379 -1428. \bibitem{EN} P. Constantin, M. Ignatova, F.-N. Lee, Interior Electroneutrality in Nernst–Planck–Navier–Stokes Systems. Arch Rational Mech Anal \textbf{242}, 1091–1118 (2021). https://doi.org/10.1007/s00205-021-01700-0 \bibitem{np3d}P. Constantin, M. Ignatova, F-N Lee, Nernst-Planck-Navier-Stokes systems near equilibrium, Pure and Applied Functional Analysis {\bf{7}} 1, (2022), 175-196. \bibitem{cil}P. Constantin, M. Ignatova, F.-N. Lee, Nernst–Planck–Navier–Stokes Systems far from Equilibrium. Arch Rational Mech Anal {\bf{240}}, (2021), 1147–1168. https://doi.org/10.1007/s00205-021-01630-x \bibitem{davidson}S. M. Davidson, M. Wissling, A. Mani, On the dynamical regimes of pattern-accelerated electroconvection, Scientific Reports {\bf{6}} 22505 (2016) doi:19.1039/srep22505 \bibitem{evans} L. C. Evans, Partial Differential Equations, Providence, R.I.: American Mathematical Society, 1998. \bibitem{fischer}A. Fischer, J. Saal, Global weak solutions in three space dimensions for electrokinetic flow processes. J. Evol. Equ. {\bf{17}}, (2017), 309–333. https://doi.org/10.1007/s00028-016-0356-0 \bibitem{gaj} H. Gajewski, K. Groger, On the basic equations for carrier transport in semiconductors, Journal of Mathematical Analysis and Applications, \textbf{113} (1986) 12-35. \bibitem{gajstab} H. Gajewski, On uniqueness and stability of steady state carrier distributions in semiconductors (1986). In: Vosmanský J., Zlámal M. (eds) Equadiff 6. Lecture Notes in Mathematics, vol 1192. Springer, Berlin, Heidelberg. https://doi.org/10.1007/BFb0076071 \bibitem{gajewski} H. Gajewski, K. Groger, Reaction-diffusion processes of electrically charged species, Math. Nachr., {\bf{177}} (1996), 109-130. \bibitem{kang} S. Kang, R. Kawk, Pattern Formation of Three-Dimensional Electroconvection on a Charge Selective Surface, Phys. Rev. Lett \textbf{124} 154502 (2020) https://doi.org/10.1103/PhysRevLett.124.154502 \bibitem{fnl}F.-N. Lee, Global Regularity for Nernst-Planck-Navier-Stokes Systems, arXiv:2106.01569, (2021). \bibitem{mock} M. Mock, Analysis of Mathematical Models of Semiconductor Devices, Boole Press, Dublin, 1983. \bibitem{park} J.-H. Park, J. W. Jerome, Qualitative Properties of steady state Poisson--Nernst--Planck Systems: Mathematical Study, SIAM J. Appl. Math. \textbf{57}(3) (1997), 609–630. \bibitem{liu}J.-G. Liu, J. Wang. Global existence for Nernst-Planck-Navier-Stokes system in $\mathbb{R}^n$, Communications in Mathematical Sciences {\bf{18}} (2020) 1743-1754. \bibitem{pham} V. S. Pham, Z. Li, K. M. Lim, J. K. White, J. Han, Direct numerical simulation of electroconvective instability and hysteretic current-voltage response of a permselective membrane, Phys. Rev. E {\bf{86}} 046310 (2012) https://doi.org/10.1103/PhysRevE.86.046310 \bibitem{prob}R. Probstein, Physicochemical Hydrodynamics: An Introduction. 2nd ed., Wiley-Interscience, 2003. \bibitem{rubibook} I. Rubinstein, Electro-Diffusion of Ions, SIAM Studies in Applied Mathematics, SIAM, Philadelphia 1990. \bibitem{rubinstein}S. M. Rubinstein, G. Manukyan, A. Staicu, I. Rubinstein, B. Zaltzman, R.G.H. Lammertink, F. Mugele, M. Wessling, Direct observation of a nonequilibrium electro-osmotic instability. Phys. Rev. Lett. {\bf{101}} (2008) 236101-236105. \bibitem{rubisegel} I. Rubinstein, L. A. Segel, Breakdown of a Stationary Solution to the Nernst-Planck-Poisson Equations, J. Chem. Soc., Faraday Trans. 2 \textbf{75} (1979) 936-940. \bibitem{rubizaltz} I. Rubinstein, B. Zaltzman, Electro-osmotically induced convection at a permselective membrane. Phys. Rev. E {\bf{62}} (2000) 2238-2251. \bibitem {ryham} R. Ryham, Existence, uniqueness, regularity and long-term behavior for dissipative systems modeling electrohydrodynamics. arXiv:0910.4973v1, (2009). \bibitem{schmuck} M. Schmuck. Analysis of the Navier-Stokes-Nernst-Planck-Poisson system. Math. Models Methods Appl. {\bf{19}}, (2009), 993-1014. \bibitem{temam} R. Temam, Navier-Stokes Equations: Theory and Numerical Analysis, AMS Chelsea Publishing, 1984. \bibitem{zaltzrubi}B. Zaltzman, I. Rubinstein, Electro-osmotic slip and electroconvective instability. J. Fluid Mech. {\bf{579}}, (2007), 173-226. \end{thebibliography} \end{document}
2205.11364v1
http://arxiv.org/abs/2205.11364v1
Optimization of the Steklov-Lamé eigenvalues with respect to the domain
\documentclass[sn-mathphys]{sn-jnl-mod} \normalbaroutside \usepackage{amsmath,amssymb,amsthm} \usepackage{enumitem} \usepackage[export]{adjustbox} \usepackage{array} \theoremstyle{thmstyleone} \newtheorem{thm}{Theorem}[section] \newtheorem{lemma}[thm]{Lemma} \newtheorem{cor}[thm]{Corollary} \newtheorem{prop}[thm]{Proposition} \newtheorem{deff}[thm]{Definition} \newtheorem{conj}{Conjecture} \theoremstyle{remark} \newtheorem{rem}[thm]{Remark} \newcommand{\bo}[1]{{\bf #1}} \graphicspath{{./pics/}} \newcommand{\Per}{\operatorname{Per}} \newcommand{\ds}{\displaystyle} \newcommand{\di}{\operatorname{div}} \newcommand{\Pol}[1]{\mathcal P_{#1}} \newcommand{\Id}{\operatorname{\bo{Id}}} \newcommand{\diam}{\operatorname{diam}} \newcommand{\txtb}{\textcolor{blue}} \newcommand{\txtr}{\textcolor{red}} \raggedbottom \begin{document} \title[Optimization of the Steklov-Lam\'e eigenvalues with respect to the domain]{Optimization of the Steklov-Lam\'e eigenvalues with respect to the domain} \author[1]{\fnm{Pedro R.S.} \sur{Antunes}}\email{[email protected]} \author*[2]{\fnm{Beniamin} \sur{Bogosel}}\email{[email protected]} \affil[1]{\orgdiv{Departamento de Matem\'{a}tica}, \orgname{Instituto Superior T\'{e}cnico, Universidade de Lisboa}, \orgaddress{\street{Av. Rovisco Pais 1}, \city{Lisboa}, \postcode{P-1049-001}} and \orgdiv{Grupo de F\'{i}sica Matem\'{a}tica}, \orgname{Faculdade de Ci\^{e}ncias, Universidade de Lisboa}, \orgaddress{\street{Campo Grande, Edif\'{i}cio C6}, \city{Lisboa}, \postcode{P-1749-016}, \country{Portugal}}} \affil[2]{\orgdiv{Centre de Math\'ematiques Appliqu\'ees}, \orgname{Ecole Polytechnique}, \orgaddress{\street{Rue de Saclay}, \city{Palaiseau}, \postcode{91128}, \country{France}}} \abstract{ This work deals with theoretical and numerical aspects related to the behavior of the Steklov-Lam\'e eigenvalues on variable domains. After establishing the eigenstructure for the disk, we prove that for a certain class of Lam\'e parameters, the disk maximizes the first non-zero eigenvalue under area or perimeter constraints in dimension two. Upper bounds for these eigenvalues can be found in terms of the scalar Steklov eigenvalues, involving various geometric quantities. We prove that the Steklov-Lam\'e eigenvalues are upper semicontinuous for the complementary Hausdorff convergence of $\varepsilon$-cone domains and, as a consequence, there exist shapes maximizing these eigenvalues under convexity and volume constraints. A numerical method based on fundamental solutions is proposed for computing the Steklov-Lam\'e eigenvalues, allowing to study numerically the shapes maximizing the first ten non-zero eigenvalues. } \keywords{shape optimization, Steklov-Lam\'e eigenvalues, fundamental solutions} \pacs[MSC Classification]{49Q10, 35P15, 65N35} \maketitle \section{Introduction} Given an open, bounded, connected Lipschitz domain consider the Steklov eigenvalue problem \begin{equation} \left\{\begin{array}{rcll} -\Delta u & = & 0 & \text{ in }\Omega \\ \nabla u \cdot \bo n & = & \sigma_n(\Omega) u & \text{ on } \partial \Omega, \end{array}\right. \label{eq:steklov-eigs} \end{equation} where $\bo n$ is the outer unit normal vector to $\partial \Omega$. It is known that the Steklov spectrum consists of a sequence of eigenvalues of the form \[ 0=\sigma_0(\Omega) < \sigma_1(\Omega) \leq ... \to +\infty.\] The study of optimization problems related to Steklov eigenvalues was initiated by the works of Weinstock \cite{weinstock} and Hersch, Payne and Schiffer \cite{hersch-payne-schiffer}. Recently, there have been many works related to the study of these eigenvalues, as indicated in the survey paper \cite{survey-girouard-polterowich}. The sloshing behavior of a liquid in a cup has been related to problem in \eqref{eq:steklov-eigs} in \cite{sloshing}. The Steklov-Neumann problem, consisting of adding some boundary parts with Neumann boundary condition in \eqref{eq:steklov-eigs}, has been studied in \cite{ammari-nigam}. It is shown that the corresponding equation models the behavior of a liquid in a container with immovable parts on its surface. Weinstock proved in \cite{weinstock} that $\sigma_1(\Omega)$ is maximized by the disk among simply connected two dimensional sets with fixed perimeter. Numerical observations made in \cite{Bogosel2} show that adding a small hole and rescaling to have prescribed perimeter may increase the Steklov eigenvalue. Therefore, simple connectedness is essential for Weinstock's result. Brock proved in \cite{brock} that $\sigma_1(\Omega)$ is maximized by the ball under volume constraint in any dimension. In \cite{hersch-payne-schiffer} Hersch, Payne and Schiffer provided various upper bounds for functionals depending on the Steklov spectrum, equality being attained for the disk in many of them. One particularity of all these results is that direct proofs are given that the disk is optimal. More recently the question of existence of solutions for problems depending on the Steklov eigenvalues was investigated. One key ingredient is understanding the semi-continuity properties for the Steklov eigenvalues when the domain changes. In \cite{Bogosel} existence of maximizers was proved for convex shapes and for shapes verifying an $\varepsilon$-cone property. This result was generalized in \cite{bogosel-bucur-giacomini} to general domains under volume constraint using a relaxed formulation. Numerical methods were developed in \cite{Bogosel2}, \cite{osting-steklov} for studying shapes maximizing $\sigma_k(\Omega)$ given some $k \geq 1$. Recently in \cite{Sebastian} the Steklov-Lam\'e problem was investigated, which is the analogue of problem \eqref{eq:steklov-eigs} in the setting of linearized elasticity. The precise definition of the Steklov-Lam\'e eigenvalues and the resulting properties are recalled in the next section. The objective of this work is to investigate theoretically and numerically the maximizers of the Steklov-Lam\'e eigenvalues. Although the questions we ask are natural, by analogy to the scalar Steklov problem, the techniques are more involved, reflecting the difficulties raised by the vectorial context. In this work, we will also address the numerical shape optimization of Steklov-Lam\'{e} eigenvalues using the Method of Fundamental Solutions (MFS) as forward solver. The MFS approximation is based on shifts of the fundamental solution of the PDE to some points placed at the exterior of the domain. Thus, by construction, the MFS approximation satisfies the PDE of the problem and the approximation is usually justified by density results. The MFS is a mesh and integration free method and typically presents very fast convergence when applied to smooth shapes. For details about the MFS we refer to the following works \cite{Alves,Alves-Antunes_2013,Bogomolny,FK}. {\bf Structure of the paper.} In Section \ref{sec:properties} we compute the Steklov-Lam\'e eigenstructure of the disk for all ranges of admissible Lam\'e parameters and we establish an analogue of the Weinstock inequality \cite{weinstock} for a certain range of parameters. In Section \ref{sec:existence} we investigate the behavior of the Steklov-Lam\'e eigenvalues on moving domains. In particular, we show that there exist maximizers for the Steklov-Lam\'e eigenvalues in the class of convex shapes with fixed volume. In Section \ref{sec:moler-payne} we prove a result inspired by Moler and Payne \cite{moler-payne} related to changes in the solution of a PDE related to the Steklov-Lam\'e problem when the boundary conditions are verified in an approximate way. This result justifies the use of the MFS to approximate the Steklov-Lam\'e eigenvalues, presented in Section \ref{sec:num-methods}. Numerical results related to the optimization of the eigenvalues are shown in Section \ref{sec:num-results}. \section{The Steklov-Lam\'e eigenvalues} \label{sec:properties} \subsection{Definitions and main properties} In the following, we use regular lower case fonts for scalar functions and bold lower case fonts for vectorial functions. Most of the results presented in this paper are valid in arbitrary dimensions. The eigenvalues of the disk and the numerical simulations are related to dimension $d=2$. For simplicity, denote $\bo H^1(\Omega) = (H^1(\Omega))^d$ and $\bo H_0^1(\Omega) = (H_0^1(\Omega))^d$. We use the same type of notations for $L^2$ spaces: bold case refers to vectorial elements of the proper dimension. The scalar product of two vectors $\bo x, \bo y$ is denoted by $\bo x\cdot \bo y$. The matrix scalar product of two matrices $\bo S=(s_{ij})_{1\leq i,j \leq n}$ and $\bo T=(t_{ij})_{1\leq i,j\leq n}$ is denoted by $\bo S:\bo T = \sum_{i,j=1}^n s_{ij}t_{ij}$. Consider a Lipschitz domain $\Omega\subset\Bbb{R}^d$. Note that more general domains for which the Steklov-Lam\'e eigenvalues are defined could be considered, as underlined in \cite{Sebastian}. Consider the solution $\bo u \in \bo H^1(\Omega)$ of the problem \begin{equation} \left\{ \begin{array}{rcll} -\di A(e(\bo u)) & = & 0 & \text{ in } \Omega \\ Ae(\bo u)\bo n & = & \Lambda(\Omega) \bo u & \text{ on } \partial \Omega, \end{array}\right. \label{eq:steklov-lame} \end{equation} where $e(\bo u) = \frac{1}{2}( \nabla \bo u+ \nabla \bo u^T)$ is the usual symmetrized gradient and the material properties are given by Hooke's law $A\xi = 2\mu \xi +\lambda \text{tr}(\xi) \Id$. The parameters $\mu>0$ and $\lambda$ are called the Lam\'e coefficients and they are assumed to satisfy the condition $\lambda+\frac{2}{d}\mu>0$. The Jacobian of $\bo u$ is denoted by $\nabla \bo u$ and $\Id$ denotes the identity matrix. The spectral problem \eqref{eq:steklov-lame} was studied in \cite{Sebastian} where it is proved that under the hypotheses stated above, the spectrum of this problem consists of an increasing sequence of non-negative eigenvalues. It is straightforward to observe that the problem \eqref{eq:steklov-lame} is equivalent to the variational formulation \begin{equation} \int_\Omega Ae(\bo u): e(\bo v) = \Lambda(\Omega) \int_{\partial \Omega} \bo u \cdot \bo v \ \ \ \text{ for every } \bo v \in \bo H^1(\Omega). \label{eq:var-form} \end{equation} The space of rigid motions $\bo R(\Omega)$ is defined (as in \cite{Sebastian}) as the set of functions $\bo v \in \bo H^1(\Omega)$ such that $e(\bo v)=0$. It is a classical result that for a connected open domain $\Omega$ we have \begin{equation} \bo R(\Omega) = \{\bo v \in \bo H^1(\Omega) : \bo v(x) = a+Bx, a\in \Bbb{R}^d, B \in \Bbb{R}^{d\times d}, B^T=-B\}. \label{eq:zeri-eigenfunctions} \end{equation} One can observe that $\dim \bo R(\Omega) = \frac{d(d+1)}{2}$. All elements in $\bo R(\Omega)$ verify $e(\bo u)=0$. Therefore all rigid motions are eigenfunctions for \eqref{eq:steklov-lame} associated to a zero eigenvalue. Conversely, any eigenfunction $\bo u$ associated to the zero eigenvalue verifies $e(\bo u) = 0$ in $\Omega$. In view of the previous considerations, and the results in \cite{Sebastian}, the Steklov-Lam\'e spectrum of a connected Lipschitz domain $\Omega$ is given by \[ 0 = \Lambda_{0,1}(\Omega) = ... = \Lambda_{0,\frac{d(d+1)}{2}}(\Omega) < \Lambda_1(\Omega) \leq \Lambda_2(\Omega)\leq ... \to +\infty.\] In view of the variational formulation \eqref{eq:var-form}, it is classical that the eigenvalues can be characterized using Rayleigh quotients \begin{equation} \Lambda_n(\Omega) = \min_{\bo S_{n}\subset \bo H^1(\Omega)} \max_{\bo u \in \bo S_n\setminus\bo H_0^1(\Omega)} \frac{\int_\Omega Ae(\bo u):e(\bo u)}{\int_{\partial \Omega} |\bo u|^2} \label{eq:rayleigh} \end{equation} where the minimum is taken over all subspaces $\bo S_{n}$ of $\bo H^1(\Omega)$ having dimension $n+\frac{d(d+1)}{2}$. Denote for each $n\geq 1$ by $\bo u_n\in \bo H^1(\Omega)$ an eigenfunction associated to the eigenvalue $\Lambda_n(\Omega)$. It is immediate to observe that if $\bo u_i$ and $\bo u_j$ are associated to the different eigenvalues $\Lambda_i(\Omega) \neq \Lambda_j(\Omega)$ then \eqref{eq:var-form} implies that \[ \Lambda_i(\Omega) \int_{\partial \Omega} \bo u_i \cdot \bo u_j = \int_\Omega Ae(\bo u_i): e(\bo u_j) = \int_\Omega Ae(\bo u_j): e(\bo u_i)=\Lambda_j(\Omega) \int_{\partial \Omega} \bo u_i \cdot \bo u_j.\] As a direct consequence $\int_{\partial \Omega} \bo u_i \cdot \bo u_j = 0$. It is natural to assume that the eigenfunctions $\bo u_n, n \geq 1$ form an orthonormal family when restricted to $\bo L^2(\partial \Omega)$. We make this assumption in the rest of the article. Another direct consequence of \eqref{eq:var-form} is \[ \int_{\partial \Omega} \bo u_n \cdot \bo r= 0,\] for every $n\geq 1$ and $\bo r \in \bo R(\Omega)$, i.e. eigenfunctions associated to $\Lambda_n(\Omega)$ with $n \geq 1$ are orthogonal in $\bo L^2(\partial \Omega)$ to all rigid motions. \begin{rem} It is possible to express the eigenvalues of \eqref{eq:steklov-lame} using Rayleigh quotients for subspaces of dimension $n$ in $\bo H^1(\Omega)$ which are orthogonal to $\bo R(\Omega)$ in $\bo L^2(\partial \Omega)$. However, the formulation \eqref{eq:rayleigh} is more practical for the theoretical questions that will be answered later in the paper. \end{rem} In the following, in order to underline the dependence of the eigenvalue on the shape $\Omega$ and on the parameters $\lambda,\mu$, denote by $\Lambda_n(\Omega,\lambda,\mu)$ an eigenvalue of \eqref{eq:steklov-lame} for a certain pair of Lam\'{e} parameters. Then we have the following result concerning the scaling of the eigenvalues with respect to the parameters. \begin{prop} {\rm (i)} Scaling with respect to homotheties: \begin{equation}\label{eq:scaling-homotheties} \Lambda_n(t\Omega,\lambda,\mu) = \frac{1}{t} \Lambda_n(\Omega,\lambda,\mu) \text{ for any } t>0. \end{equation} {\rm (ii)} Scaling of the Lam\'e parameters: \begin{equation} \label{multpar} \Lambda_n(\Omega,\alpha\lambda,\alpha\mu)=\alpha\Lambda_n(\Omega,\lambda,\mu),\ \forall\alpha>0 \end{equation} \label{prop:scaling} \end{prop} \begin{proof}(i) is a direct consequence by a change of variables. (ii) is a consequence of the linearity of \eqref{eq:steklov-lame}. \end{proof} In this work we will consider the shape optimization problems \begin{equation}\label{shoptprob} \Lambda_n^*(\Omega,\lambda,\mu):=\sup \Big\{\Lambda_n(\Omega,\lambda,\mu), \Omega\subset\mathbb{R}^d:|\Omega|=1\Big\}. \end{equation} and \begin{equation} \label{shoptprobconv} \Lambda_n^{\#}(\Omega,\lambda,\mu):=\sup\left\{\Lambda_n(\Omega,\lambda,\mu), \Omega\subset\mathbb{R}^d,\ \Omega\ \text{convex},\ |\Omega|=1\right\}. \end{equation} Later on, we will show that problem \eqref{shoptprobconv} has a solution, implying that the supremum could be replaced by the maximum. Numerical simulations will be performed to approximate solutions to problems \eqref{shoptprob} and \eqref{shoptprobconv}, indicating that optimal shapes are likely to exist also for problem \eqref{shoptprob}. This is in accord with theoretical and numerical observations for the maximization of the scalar Steklov eigenvalues \cite{osting-steklov}, \cite{bogosel-bucur-giacomini}, however, the general theory of existence is not completely established not even in the scalar case, when only a volume constraint is present. \subsection{The disk} In this section we focus on the case of the disk in dimension $d=2$ and we derive the closed form of the eigenvalues and eigenfunctions. This will be useful for having a benchmark for the numerical approximation method and also will allow to answer partially some questions regarding the maximality of the disk for the first non-zero eigenvalue. We introduce polar coordinates \[\bo u(r,\theta)=u_r(r,\theta)\bo e_r+u_\theta(r,\theta)\bo e_\theta,\] where \[\bo e_r=\cos(\theta)\bo e_1+\sin(\theta)\bo e_2\ \text{and}\ \bo e_\theta=-\sin(\theta)\bo e_1+\cos(\theta)\bo e_2.\] We consider $\bo u$ defined by a Fourier expansion \begin{equation} \label{solu} \bo u(r,\theta)=\begin{bmatrix}c_0^r(r)\\ c_0^\theta(r)\end{bmatrix}+\sum_{n=1}^\infty\begin{bmatrix}c_n^r(r)\\ c_n^\theta(r)\end{bmatrix}\cos(n\theta)+\sum_{n=1}^\infty\begin{bmatrix}s_n^r(r)\\ s_n^\theta(r)\end{bmatrix}\sin(n\theta) \end{equation} and search for solutions of the partial differential equation $\di A(e(\bo u)) = 0$, which implies that we have (cf. ~\cite{VMFG}) \begin{equation} \begin{array}{c} c_0^r(r)=A_0 r \\ c_0^\theta(r)=B_0r, \end{array} \label{eq:n0} \end{equation} \begin{equation} \begin{array}{c} c_1^r(r)=-A_1^0+A_1 \left(\frac{-\lambda+\mu}{\lambda+\mu}\right) r^2\\ c_1^\theta(r)=B_1^0+B_1\left(\frac{3\lambda+5\mu}{\lambda+\mu}\right)r^2\\ s_1^r(r)=B_1^0-B_1 \left(\frac{-\lambda+\mu}{\lambda+\mu}\right) r^2\\ s_1^\theta(r)=A_1^0+A_1\left(\frac{3\lambda+5\mu}{\lambda+\mu}\right)r^2\\ \end{array} \label{eq:n1} \end{equation} and \begin{equation} \begin{array}{c} c_n^r(r)=-A_n^0 r^{n-1}+A_n \left(\frac{-n\lambda-(n-2)\mu}{n(\lambda+\mu)}\right) r^{n+1}\\ c_n^\theta(r)=B_n^0r^{n-1}+B_n\left(\frac{(n+2)\lambda+(n+4)\mu}{n(\lambda+\mu)}\right)r^{n+1}\\ s_n^r(r)=B_n^0r^{n-1}-B_n \left(\frac{-n\lambda-(n-2)\mu}{n(\lambda+\mu)}\right) r^{n+1}\\ s_n^\theta(r)=A_n^0r^{n-1}+A_n\left(\frac{(n+2)\lambda+(n+4)\mu}{n(\lambda+\mu)}\right)r^{n+1}\\ \end{array},\ n=2,3,... \label{eq:ngen} \end{equation} for some constants $A_i, B_i,\ i=0,1,...$ and $A_i^0,B_i^0,\ i=1,2,...$ Moreover, as shown in~\cite{VMFG}, for a solution of type \eqref{solu} in the disk we have \begin{align*}Ae(\bo u)\bo n(r)&=\begin{bmatrix}(\lambda+2\mu)c_0^{r}\ '(r)+\frac{\lambda}{r}c_0^r(r)\\ \mu\left(c_0^\theta\ '(r)-\frac{1}{r}c_0^\theta(r)\right)\end{bmatrix}\\ &+\sum_{n=1}^\infty\begin{bmatrix}(\lambda+2\mu)c_n^{r}\ '(r)+\frac{\lambda}{r}c_n^r(r)+\frac{n\lambda}{r}s_n^\theta(r)\\ \mu\left(\frac{n}{r}s_n^r(r)+c_n^\theta\ '(r)-\frac{1}{r}c_n^\theta(r)\right)\end{bmatrix}\cos(n\theta)\\ &+\sum_{n=1}^\infty\begin{bmatrix}(\lambda+2\mu)s_n^{r}\ '(r)+\frac{\lambda}{r}s_n^r(r)-\frac{n\lambda}{r}c_n^\theta(r)\\ \mu\left(-\frac{n}{r}c_n^r(r)+s_n^\theta\ '(r)-\frac{1}{r}s_n^\theta(r)\right)\end{bmatrix}\sin(n\theta). \end{align*} \begin{thm} \label{thm:eigdisk} The Steklov-Lam\'{e} spectrum of a disk of radius equal to $R$ is the sorted list of the following real numbers: \begin{enumerate}[label=\upshape{(\roman*)}] \item $0$ (with multiplicity 3), \item $\frac{2(\lambda+\mu)}{R},$ \item $\frac{4\mu(\lambda+\mu)}{(\lambda+3\mu)R}$ (counted twice) and \item $\frac{2\mu(n-1)}{R}$ (counted twice), for $n=2,3,...$ and \item $\frac{2(n+1)\mu(\lambda+\mu)}{(\lambda+3\mu)R}$ (counted twice), for $n=2,3,...$ \end{enumerate} The eigenfunctions in each of the previous cases are linear combinations of the following sets of functions \begin{enumerate}[label=\upshape{(\roman*)}] \item $\left\{(1,0),\ (0,1),\ r(-\sin(\theta),\cos(\theta))\right\}$ \item $\left\{r(\cos(\theta),\sin(\theta))\right\}$ \item $ \Big\{\Big(2(R^2-r^2)+\frac{(\lambda+3\mu)r^2\cos(2\theta)}{\lambda+\mu},\frac{(\lambda+3\mu)r^2\sin(2\theta)}{\lambda+\mu}\Big)$, $\Big(\frac{(\lambda+3\mu)r^2\sin(2\theta)}{\lambda+\mu},2(R^2-r^2)-\frac{(\lambda+3\mu)r^2\cos(2\theta)}{\lambda+\mu}\Big)\Big\}$ \item $\left\{r^{n-1}\left(\cos((n-1)\theta),-\sin((n-1)\theta)\right),r^{n-1}\left(\sin((n-1)\theta),\cos((n-1)\theta)\right)\right\}$ \item $\left\{(f_1(r,\theta),f_2(r,\theta)),(f_3(r,\theta),f_4(r,\theta))\right\},$ where \end{enumerate} $ f_1(r,\theta)=\frac{r^{n-1}}{(\lambda+\mu)n}\left(-(\lambda+\mu)(n+1)(r^2-R^2)\cos((n-1)\theta)+(\lambda+3\mu)r^2\cos((n+1)\theta)\right),$\newline $ f_2(r,\theta)=\frac{r^{n-1}}{(\lambda+\mu)n}\left((\lambda+\mu)(n+1)(r^2-R^2)\sin((n-1)\theta)+(\lambda+3\mu)r^2\sin((n+1)\theta)\right),$\newline $ f_3(r,\theta)=\frac{r^{n-1}}{(\lambda+\mu)n}\left((\lambda+\mu)(n+1)(r^2-R^2)\sin((n-1)\theta)-(\lambda+3\mu)r^2\sin((n+1)\theta)\right),$\newline $ f_4(r,\theta)=\frac{r^{n-1}}{(\lambda+\mu)n}\left((\lambda+\mu)(n+1)(r^2-R^2)\cos((n-1)\theta)+(\lambda+3\mu)r^2\cos((n+1)\theta)\right).$ \end{thm} \begin{proof} The eigenvalues can be determined by imposing \begin{equation} \label{eigeq} Ae(\bo u)\bo n = \Lambda \bo u \end{equation} at the boundary of the disk which can be assumed to be centered at the origin and so, on the boundary we have $r=R.$ We separate the study in the cases $n=0,$ $n=1$ and $n\geq2.$ \underline{\bf Case $n=0$}: The boundary condition is given by \[\begin{bmatrix}(\lambda+2\mu)c_0^{r}\ '(R)+\frac{\lambda}{R}c_0^r(R)\\ \mu\left(c_0^\theta\ '(R)-\frac{1}{R}c_0^\theta(R)\right)\end{bmatrix}=\Lambda\begin{bmatrix}c_0^r(R)\\ c_0^\theta(R)\end{bmatrix}\] and taking into account \eqref{eq:n0} we obtain \begin{align*}\begin{bmatrix}(\lambda+2\mu)A_0+\lambda A_0\\ \mu\left(B_0-B_0\right)\end{bmatrix}=\Lambda\begin{bmatrix}A_0 R\\ B_0R\end{bmatrix}&\Longleftrightarrow\begin{bmatrix}(2\lambda+2\mu)A_0\\ 0\end{bmatrix}=\Lambda\begin{bmatrix}A_0 R\\ B_0R\end{bmatrix}\\ &\Longleftrightarrow\underbrace{\begin{bmatrix}\frac{2(\lambda+\mu)}{R}&0\\0&0\end{bmatrix}}_{:=\bo M_0}\begin{bmatrix}A_0\\B_0\end{bmatrix}=\Lambda\begin{bmatrix}A_0\\B_0\end{bmatrix}. \end{align*} The Steklov-Lam\'{e} eigenvalues in this case are the eigenvalues of matrix $\bo M_0$, which are $0$ and $\frac{2(\lambda+\mu)}{R}.$ The corresponding eigenfunctions can be obtained from the eigenvectors of matrix $\bo v_1=(1,0)$ (associated to the eigenvalue $\frac{2(\lambda+\mu)}{R}$) and $\bo v_2=(0,1)$ (associated to the eigenvalue $0$). In the case $\bo v_1=(1,0),$ from \eqref{eq:n0} we obtain $c_0^r(r)=r;\ c_0^\theta(r)=0,$ which implies that \[\bo u(r,\theta)=r\bo e_r=r(\cos(\theta),\sin(\theta)).\] In the case $\bo v_2=(0,1),$ again from \eqref{eq:n0} we obtain $c_0^r(r)=0;\ c_0^\theta(r)=r,$ which implies that \[\bo u(r,\theta)=r\bo e_\theta=r(-\sin(\theta),\cos(\theta)).\] \underline{\bf Case $n=1$}: The boundary condition is given by \begin{align*}&\begin{bmatrix}(\lambda+2\mu)c_1^{r}\ '(R)+\frac{\lambda}{R}c_1^r(R)+\frac{\lambda}{R}s_1^\theta(R)\\ \mu\left(\frac{1}{R}s_1^r(R)+c_1^\theta\ '(R)-\frac{1}{R}c_1^\theta(R)\right)\end{bmatrix}\cos(\theta)\\ +&\begin{bmatrix}(\lambda+2\mu)s_1^{r}\ '(R)+\frac{\lambda}{R}s_1^r(R)-\frac{\lambda}{R}c_1^\theta(R)\\ \mu\left(-\frac{1}{R}c_1^r(R)+s_1^\theta\ '(R)-\frac{1}{R}s_1^\theta(R)\right)\end{bmatrix}\sin(\theta)\\ =& \Lambda\left(\begin{bmatrix}c_1^r(R)\\ c_1^\theta(R)\end{bmatrix}\cos(\theta)+\begin{bmatrix}s_1^r(R)\\ s_1^\theta(R)\end{bmatrix}\sin(\theta)\right) \end{align*} and since the previous equality shall hold for all values of $\theta$ we conclude that we must have \[\begin{bmatrix}(\lambda+2\mu)c_1^{r}\ '(R)+\frac{\lambda}{R}c_1^r(R)+\frac{\lambda}{R}s_1^\theta(R)\\ \mu\left(\frac{1}{R}s_1^r(R)+c_1^\theta\ '(R)-\frac{1}{R}c_1^\theta(R)\right)\\ (\lambda+2\mu)s_1^{r}\ '(R)+\frac{\lambda}{R}s_1^r(R)-\frac{\lambda}{R}c_1^\theta(R)\\ \mu\left(-\frac{1}{R}c_1^r(R)+s_1^\theta\ '(R)-\frac{1}{R}s_1^\theta(R)\right)\end{bmatrix}=\Lambda\begin{bmatrix}c_1^r(R)\\ c_1^\theta(R)\\ s_1^r(R)\\ s_1^\theta(R)\end{bmatrix}.\] Taking into account \eqref{eq:n1}, \[\scriptsize\hspace{-1cm}\begin{bmatrix}(\lambda+2\mu)A_1\left(\frac{-\lambda+\mu}{\lambda+\mu}\right)2R-\frac{\lambda}{R}A_1^0+\lambda A_1\left(\frac{-\lambda+\mu}{\lambda+\mu}\right)R+\frac{\lambda}{R}A_1^0+\lambda A_1\left(\frac{3\lambda+5\mu}{\lambda+\mu}\right)R\\ \mu\left(\frac{1}{R}B_1^0-B_1\left(\frac{-\lambda+\mu}{\lambda+\mu}\right)R+2B_1\left(\frac{3\lambda+5\mu}{\lambda+\mu}\right)R-\frac{1}{R}B_1^0-B_1\left(\frac{3\lambda+5\mu}{\lambda+\mu}\right)R\right)\\ -(\lambda+2\mu)B_1\left(\frac{-\lambda+\mu}{\lambda+\mu}\right)2R+\frac{\lambda}{R}B_1^0-\lambda B_1\left(\frac{-\lambda+\mu}{\lambda+\mu}\right)R-\frac{\lambda}{R}B_1^0-\lambda B_1\left(\frac{3\lambda+5\mu}{\lambda+\mu}\right)R\\ \mu\left(\frac{1}{R}A_1^0-A_1\left(\frac{-\lambda+\mu}{\lambda+\mu}\right)R+A_1\left(\frac{3\lambda+5\mu}{\lambda+\mu}\right)2R-\frac{1}{R}A_1^0-A_1\left(\frac{3\lambda+5\mu}{\lambda+\mu}\right)R\right)\end{bmatrix}=\] \[\scriptsize=\Lambda\begin{bmatrix}-A_1^0+A_1 \left(\frac{-\lambda+\mu}{\lambda+\mu}\right) R^2\\ B_1^0+B_1\left(\frac{3\lambda+5\mu}{\lambda+\mu}\right)R^2\\ B_1^0-B_1 \left(\frac{-\lambda+\mu}{\lambda+\mu}\right) R^2\\ A_1^0+A_1\left(\frac{3\lambda+5\mu}{\lambda+\mu}\right)R^2\end{bmatrix}\Longleftrightarrow \begin{bmatrix}4\mu A_1R\\ 4\mu B_1R\\ -4\mu B_1R\\ 4\mu A_1R\end{bmatrix}=\Lambda\begin{bmatrix}-A_1^0+A_1 \left(\frac{-\lambda+\mu}{\lambda+\mu}\right) R^2\\ B_1^0+B_1\left(\frac{3\lambda+5\mu}{\lambda+\mu}\right)R^2\\ B_1^0-B_1 \left(\frac{-\lambda+\mu}{\lambda+\mu}\right) R^2\\ A_1^0+A_1\left(\frac{3\lambda+5\mu}{\lambda+\mu}\right)R^2\end{bmatrix}\] which can be written as \begin{equation}\scriptsize \label{primigual} \bo N_1 \begin{bmatrix}A_1^0\\ B_1^0\\ A_1\\B_1\end{bmatrix}=\Lambda \bo P_1 \begin{bmatrix}A_1^0\\ B_1^0\\ A_1\\B_1\end{bmatrix}, \end{equation} where \[\scriptsize\bo N_1=\begin{bmatrix}0 &0&4\mu R&0\\ 0 & 0&0&4\mu R\\ 0 & 0&0&-4\mu R\\ 0 &0&4\mu R&0 \end{bmatrix}\quad\text{and}\quad\bo P_1=\begin{bmatrix}-1&0&\left(\frac{-\lambda+\mu}{\lambda+\mu}\right)R^2&0\\ 0&1&0&\left(\frac{3\lambda+5\mu}{\lambda+\mu}\right)R^2\\ 0 &1&0&\left(\frac{\lambda-\mu}{\lambda+\mu}\right)R^2\\ 1&0&\left(\frac{3\lambda+5\mu}{\lambda+\mu}\right)R^2&0\end{bmatrix}.\] We have $\displaystyle{\det(\bo P_1)=-\frac{4(\lambda+3\mu)^2R^4}{(\lambda+\mu)^2}}<0$ which justifies the invertibility of the matrix $\bo P_1$ and we conclude that \eqref{primigual} is equivalent to \begin{equation} \label{primigual2} \underbrace{\bo P_1^{-1}\cdot\bo N_1}_{:=\bo M_1} \begin{bmatrix}A_1^0\\ B_1^0\\ A_1\\B_1\end{bmatrix}=\Lambda \begin{bmatrix}A_1^0\\ B_1^0\\ A_1\\B_1\end{bmatrix}, \end{equation} and the Steklov-Lam\'{e} eigenvalues are the eigenvalues of matrix $\bo M_1$, which are $0$ (double eigenvalue) and $\frac{4\mu(\lambda+\mu)}{(\lambda+3\mu)R}$ (double eigenvalue). The eigenfunctions can be calculated from the eigenvectors, $\bo v_1=(-2R^2,0,1,0)$ and $\bo v_2=(0,-2R^2,0,1)$ (associated to the eigenvalue $\frac{4\mu(\lambda+\mu)}{(\lambda+3\mu)R}$) and $\bo v_3=(-1,0,0,0)$ and $\bo v_4=(0,1,0,0)$ (associated to the eigenvalue $0$). For instance, for $\bo v_1$ we get\newline $ c_1^r(r)=2R^2+\left(\frac{-\lambda+\mu}{\lambda+\mu}\right)r^2;\ c_1^\theta(r)=0;\ s_1^r(r)=0;\ s_1^\theta(r)=-2R^2+\left(\frac{3\lambda+5\mu}{\lambda+\mu}\right)r^2$ and $u_r(r,\theta)=\left(2R^2+\left(\frac{-\lambda+\mu}{\lambda+\mu}\right)r^2\right)\cos(\theta)$, $u_\theta(r,\theta)=\left(-2R^2+\left(\frac{3\lambda+5\mu}{\lambda+\mu}\right)r^2\right)\sin(\theta)$ which implies that \begin{align*} \bo u(r,\theta)=&u_r(r,\theta)\bo e_r+u_\theta(r,\theta)\bo e_\theta\\ =&\left(2(R^2-r^2)+\frac{(\lambda+3\mu)r^2\cos(2\theta)}{\lambda+\mu},\frac{(\lambda+3\mu)r^2\sin(2\theta)}{\lambda+\mu}\right).\end{align*} The eigenfunction associated to $\bo v_2$ is computed in a similar way and is given by \begin{align*}\bo u(r,\theta)=&u_r(r,\theta)\bo e_r+u_\theta(r,\theta)\bo e_\theta\\=&\left(\frac{(\lambda+3\mu)r^2\sin(2\theta)}{\lambda+\mu},2(R^2-r^2)-\frac{(\lambda+3\mu)r^2\cos(2\theta)}{\lambda+\mu}\right).\end{align*} The computation of the eigenfunction associated to $\bo v_3$ is similar, obtaining $c_1^r(r)=1,\ c_1^\theta(r)=0,\ s_1^r(r)=0,\ s_1^\theta(r)=-1 \Longrightarrow u_r(r,\theta)=\cos(\theta);\ u_\theta(r,\theta)=-\sin(\theta)$ which implies that \[\bo u(r,\theta)=\left(\cos^2(\theta)+\sin^2(\theta),\cos(\theta)\sin(\theta)-\sin(\theta)\cos(\theta)\right)=(1,0).\] Using the eigenvector $\bo v_4$ we get $\bo u(r,\theta)=(0,1)$ \underline{\bf Case $n\geq2$}: The computations in this case are similar to those of the case $n=1$. We have \begin{align*}& \begin{bmatrix}(\lambda+2\mu)c_n^{r}\ '(R)+\frac{\lambda}{R}c_n^r(R)+\frac{\lambda}{R}ns_n^\theta(R)\\ \mu\left(\frac{n}{R}s_n^r(R)+c_n^\theta\ '(R)-\frac{1}{R}c_n^\theta(R)\right)\end{bmatrix}\cos(n\theta)\\ +&\begin{bmatrix}(\lambda+2\mu)s_n^{r}\ '(R)+\frac{\lambda}{R}s_n^r(R)-\frac{\lambda}{R}nc_n^\theta(R)\\ \mu\left(-\frac{n}{R}c_n^r(R)+s_n^\theta\ '(R)-\frac{1}{R}s_n^\theta(R)\right)\end{bmatrix}\sin(n\theta)\\ =&\Lambda\left(\begin{bmatrix}c_n^r(R)\\ c_n^\theta(R)\end{bmatrix}\cos(n\theta)+\begin{bmatrix}s_n^r(R)\\ s_n^\theta(R)\end{bmatrix}\sin(n\theta)\right) \end{align*} which implies that \begin{equation} \label{eqn} \begin{bmatrix}(\lambda+2\mu)c_n^{r}\ '(R)+\frac{\lambda}{R}c_n^r(R)+\frac{\lambda}{R}ns_n^\theta(R)\\ \mu\left(\frac{n}{R}s_n^r(R)+c_n^\theta\ '(R)-\frac{1}{R}c_n^\theta(R)\right)\\ (\lambda+2\mu)s_n^{r}\ '(R)+\frac{\lambda}{R}s_n^r(R)-\frac{\lambda}{R}nc_n^\theta(R)\\ \mu\left(-\frac{n}{R}c_n^r(R)+s_n^\theta\ '(R)-\frac{1}{R}s_n^\theta(R)\right)\end{bmatrix}=\Lambda\begin{bmatrix}c_n^r(R)\\ c_n^\theta(R)\\ s_n^r(R)\\ s_n^\theta(R)\end{bmatrix}. \end{equation} Using \eqref{eq:ngen} we see that \eqref{eqn} can be written as \begin{equation} \label{primigualn} \bo N_n \begin{bmatrix}A_n^0\\ B_n^0\\ A_n\\B_n\end{bmatrix}=\Lambda \bo P_n \begin{bmatrix}A_n^0\\ B_n^0\\ A_n\\B_n\end{bmatrix}, \end{equation} where \[\bo N_n=\begin{bmatrix}-2\mu(n-1)R^{n-2} &0&-2\mu\frac{(n-2)(n+1)}{n} R^n&0\\ 0 & 2\mu(n-1)R^{n-2}&0&2\mu(n+1)R^n\\ 0 & 2\mu(n-1)R^{n-2}&0&2\mu\frac{(n-2)(n+1)}{n}R^n\\ 2\mu(n-1)R^{n-2} &0&2\mu(n+1)R^n&0 \end{bmatrix}\] and \[\bo P_n=\begin{bmatrix}-R^{n-1}&0&-\frac{\mu(n-2)+\lambda n}{n(\lambda+\mu)}R^{n+1}&0\\ 0&R^{n-1}&0&\frac{\lambda(n+2)+\mu(n+4)}{n(\lambda+\mu)}R^{n+1}\\ 0&R^{n-1}&0&\frac{\mu(n-2)+\lambda n}{n(\lambda+\mu)}R^{n+1}\\ R^{n-1}&0&\frac{\lambda(n+2)+\mu(n+4)}{n(\lambda+\mu)}R^{n+1}&0\end{bmatrix}.\] The matrix $\bo P_n$ is invertible because $\displaystyle{\det(\bo P_n)=-\frac{4(\lambda+3\mu)^2R^{4n}}{(\lambda+\mu)^2n^2}}<0$ and \eqref{primigualn} is equivalent to \begin{equation} \label{primigualfinal} \underbrace{\bo P_n^{-1}\cdot\bo N_n}_{:=\bo M_n} \begin{bmatrix}A_n^0\\ B_n^0\\ A_n\\B_n\end{bmatrix}=\Lambda \begin{bmatrix}A_n^0\\ B_n^0\\ A_n\\B_n\end{bmatrix}, \end{equation} and the Steklov-Lam\'{e} eigenvalues are the eigenvalues of matrix $\bo M_n$, which are $\frac{2\mu(n-1)}{R}$ (double eigenvalue) and $\frac{2(n+1)\mu(\lambda+\mu)}{(\lambda+3\mu)R}$ (double eigenvalue). The eigenfunctions can be calculated from the eigenvectors, $\bo v_1=(-\frac{(n+1)R^2}{n},0,1,0)$ and $\bo v_2=(0,-\frac{(n+1)R^2}{n},0,1)$ (associated to the eigenvalue $\frac{2(n+1)\mu(\lambda+\mu)}{(\lambda+3\mu)R}$) and $\bo v_3=(-1,0,0,0)$ and $\bo v_4=(0,1,0,0)$ (associated to the eigenvalue $\frac{2\mu(n-1)}{R}$.) Using the eigenvector $\bo v_3$ we get \[c_n^r(r)=r^{n-1},\ c_n^\theta(r)=0,\ s_n^r(r)=0,\ s_n^\theta(r)=-r^{n-1}\] and \[u_r(r,\theta)=r^{n-1}\cos(n\theta),\ u_\theta(r,\theta)=-r^{n-1}\sin(n\theta).\] Therefore, we obtain \begin{align*}\bo u(r,\theta)=r^{n-1}\left(\cos((n-1)\theta),-\sin((n-1)\theta)\right) \end{align*} Following the same steps using the eigenvector $\bo v_4$ we obtain \[\bo u(r,\theta)=r^{n-1}\left(\sin((n-1)\theta),\cos((n-1)\theta)\right).\] Finally, from the eigenvector $\bo v_1$ we get, for $n=2,3,...$ \[ \begin{array}{c} c_n^r(r)=\frac{(n+1)R^2}{n} r^{n-1}+ \left(\frac{-n\lambda-(n-2)\mu}{n(\lambda+\mu)}\right) r^{n+1};\quad c_n^\theta(r)=0\\ s_n^r(r)=0;\quad s_n^\theta(r)=-\frac{(n+1)R^2}{n}r^{n-1}+\left(\frac{(n+2)\lambda+(n+4)\mu}{n(\lambda+\mu)}\right)r^{n+1}\\ \end{array}, \label{eq:ngendm}\] which implies that \[u_r(r,\theta)=\left(\frac{(n+1)R^2}{n} r^{n-1}+ \left(\frac{-n\lambda-(n-2)\mu}{n(\lambda+\mu)}\right) r^{n+1}\right)\cos(n\theta)\] and \[u_\theta(r,\theta)=\left(-\frac{(n+1)R^2}{n}r^{n-1}+\left(\frac{(n+2)\lambda+(n+4)\mu}{n(\lambda+\mu)}\right)r^{n+1}\right)\sin(n\theta).\] Therefore,{\small \[\textstyle \bo u(r,\theta)_1=\frac{r^{n-1}}{(\lambda+\mu)n}\left(-(\lambda+\mu)(n+1)(r^2-R^2)\cos((n-1)\theta)+(\lambda+3\mu)r^2\cos((n+1)\theta)\right)\] } and in a similar fashion, we get {\small \[\textstyle \bo u(r,\theta)_2=\frac{r^{n-1}}{(\lambda+\mu)n}\left((\lambda+\mu)(n+1)(r^2-R^2)\sin((n-1)\theta)+(\lambda+3\mu)r^2\sin((n+1)\theta)\right)\]} which concludes the proof. \end{proof} Denote by $c_2(\lambda,\mu)=\frac{2(\lambda+\mu)}{R}$, $c_3(\lambda,\mu)=\frac{4\mu(\lambda+\mu)}{(\lambda+3\mu)R}$ and $c_4(\lambda,\mu)=\frac{2\mu}{R}$, which are the smallest eigenvalues obtained, respectively in cases (ii), (iii) and (iv) in Theorem~\ref{thm:eigdisk}. Then the following result helps establish what is the smallest non-zero eigenvalue of the disk. \begin{prop}\label{prop:help-order} We have \begin{itemize} \item $c_2(\lambda,\mu)\leq c_4(\lambda,\mu)\leq c_3(\lambda,\mu)$, in the region $\left\{(\mu,\lambda)\in\mathbb{R}^2:0<\mu,\lambda<-3\mu\right\}$ \item $c_4(\lambda,\mu)\leq c_3(\lambda,\mu)\leq c_2(\lambda,\mu)$, in $\left\{(\mu,\lambda)\in\mathbb{R}^2:0<\mu,\lambda\geq\mu\right\}$ \item $c_3(\lambda,\mu)\leq c_2(\lambda,\mu)\leq c_4(\lambda,\mu)$, in $\left\{(\mu,\lambda)\in\mathbb{R}^2:0<\mu,-3\mu<\lambda\leq0\right\}$ \item $c_3(\lambda,\mu)\leq c_4(\lambda,\mu)\leq c_2(\lambda,\mu)$, in $\left\{(\mu,\lambda)\in\mathbb{R}^2:0<\mu,0<\lambda\leq\mu\right\}.$ \end{itemize} \end{prop} Since in dimension two $\lambda+\mu>0$ implying that $\lambda>-\mu>-3\mu$, the first situation listed in Proposition \ref{prop:help-order} cannot hold. Therefore, we have the following characterization for the first non-zero Steklov-Lam\'e eigenvalue for the disk. \begin{prop} The smallest strictly positive eigenvalue for the disk $D_R$ of radius $R$ is given by \begin{itemize} \item $\Lambda_1(D_R) = \frac{2\mu}{R}$ when $\lambda> \mu$. In this case the associated eigenspace has dimension $2$, generated by \[ \bo u_1 = (x_1,-x_2), \bo u_2 = (x_2,x_1),\] verifying $Ae(\bo u_i):e(\bo u_i) \equiv 4\mu$ on $\Bbb{R}^2$ and $|\bo u_i|^2 = r^2$ on $\Bbb{R}^2$. \item $\Lambda_1(D_R)=\frac{4\mu(\lambda+\mu)}{(\lambda+3\mu)R}$ when $\lambda\leq \mu$. The associated eigenspace has dimension two and is generated by \[ \bo u_1 = \left(2(R^2-x_1^2-x_2^2)+\frac{\lambda+3\mu}{\lambda+\mu}(x_1^2-x_2^2), \frac{\lambda+3\mu}{\lambda+\mu}2x_1x_2\right).\] \[ \bo u_2 = \left( \frac{\lambda+3\mu}{\lambda+\mu}2x_1x_2,2(R^2-x_1^2-x_2^2)-\frac{\lambda+3\mu}{\lambda+\mu}(x_1^2-x_2^2)\right).\] Furthermore, we have \[ |\bo u_1|^2+|\bo u_2|^2 = 8(R^2-r^2)^2+2\left(\frac{\lambda+3\mu}{\lambda+\mu}\right)^2 r^4\] and \[ Ae(\bo u_1):e(\bo u_1)+Ae(\bo u_2):e(\bo u_2) = 32\mu \frac{\lambda+3\mu}{\lambda+\mu} r^2.\] \end{itemize} \label{prop:first-eig} \end{prop} The proof is immediate by investigating the order of the eigenvalues found in Theorem \ref{thm:eigdisk} in view of the observations made in Proposition \ref{prop:help-order}. Knowing the eigenstructure for the disk allows us to prove the following result similar to the scalar case according to Weinstock \cite{weinstock} and Brock \cite{brock}. \begin{thm}\label{thm:optimality-disk} Suppose $\lambda>\mu$ then the disk maximizes $\Lambda_1(\Omega)$ when: {\rm (a)} $\Omega$ has fixed volume. {\rm (b)} $\Omega$ is convex with fixed perimeter. \end{thm} \begin{proof} For simplicity, suppose $\Omega$ has area $\pi$ (or perimeter $2\pi$). In view of Proposition \ref{prop:first-eig}, the first non-zero eigenvalue of the unit disk $\Bbb D$ in this case is $\Lambda_1(\Bbb D)=\Lambda_2(\Bbb D)=2\mu$. Consider the corresponding eigenfunctions $\bo u_1=(r\cos \theta,-r\sin \theta),\bo u_2=(r\sin \theta,r\cos \theta)$. Then it is straightforward to notice that $Ae(\bo u_i):e(\bo u_i) = 4\mu$ and $|\bo u_i|^2= r^2$ on $\Bbb{R}^2$. Consider now a general $\Omega \subset \Bbb{R}^2$ with $|\Omega| = \pi$. Let us take the space $X_1=\{\bo u_{0,1},\bo u_{0,2},\bo u_{0,3},\bo u_1\}$ as a test space in \eqref{eq:rayleigh} for $\Lambda_1(\Omega)$, with $\bo u_1 = (r\cos \theta,-r\sin\theta)$, an eigenfunction associated to the first non-zero eigenvalue of the disk. We denote \[ \bo u_{0,1} = (1,0), \bo u_{0,2} = (0,1), \bo u_{0,3} = (-x_2,x_1),\] a basis for the rigid motions in dimension two. We may observe that \[ \bo u_{0,1} \cdot \bo u_1 = x_1,\ \bo u_{0,2} \cdot \bo u_1 = -x_2,\ \bo u_{0,3} \cdot \bo u_1 = -2x_1x_2.\] Therefore the shape $\Omega$ can be translated and rotated such that $\int_{\partial \Omega} \bo u_{0,j}\cdot \bo u_1=0$. Indeed, for a fixed orientation $\alpha \in [0,2\pi]$ of $\Omega$ we can translate $\Omega$ such that $\int_{\partial \Omega} x_1 = \int_{\partial \Omega} x_2 = 0$. Denote by $\Omega_\alpha$ the resulting shape. One may observe that $\int_{\Omega_0} (-2x_1x_2) = -\int_{\partial \Omega_{\pi/2}} (-2x_1x_2)$. Therefore, there exists an $\alpha \in [0,\pi/2]$ such that $\int_{\Omega_\alpha} x_1x_2=0$. Suppose now that $\Omega$ is translated and rotated such that $\int_{\partial \Omega}\bo u_1 \cdot \bo u_{0,j} = 0,\ j=1,2,3$. Let $\bo u = \alpha_1 \bo u_{0,1}+\alpha_2 \bo u_{0,2}+\alpha_3 \bo u_{0,3} +c_1\bo u_1$ be an element of the test space $X_1$ defined above. It is straightforward to observe that \[ Ae(\bo u ): e(\bo u) = 4\mu c_1^2 \text{ and } \int_{\partial \Omega} |\bo u|^2 = \int_{\partial \Omega} (\alpha_1^2+\alpha_2^2+\alpha_3^2r^2+ c_1^2 r^2).\] Therefore, the maximum of the associated Rayleigh quotient is \[ \max_{\bo u \in X_1} \frac{\int_\Omega Ae(\bo u):e(\bo u)}{\int_{\partial \Omega} |\bo u|^2}=\frac{4\mu |\Omega|}{\int_{\partial \Omega}r^2}.\] As a direct consequence, $\Lambda_1(\Omega) \leq \frac{4\mu|\Omega|}{\int_{\partial \Omega}r^2}$. We can now answer the two questions raised in the statement of the theorem. (a) In \cite{brock} it is shown that $\int_{\partial \Omega} r^2$ is minimized by the disk at fixed volume. (b) In \cite{weinstock} it is shown that $2|\Omega|/\int_{\partial \Omega} r^2$ is again maximized by the disk, among convex domains with fixed perimeter. This is a consequence of the inequality \[ \frac{2|\Omega|}{ \int_{\partial \Omega} r^2} \leq \frac{2\pi}{|\partial \Omega|},\] which holds for all convex domains according to \cite{weinstock}. Moreover, in both cases above, when $\Omega$ is a disk, we have $\Lambda_1(\Omega) = \frac{4\mu|\Omega|}{\int_{\partial \Omega} r^2}$, showing that the upper bound is actually attained by the disk. The conclusion follows. \end{proof} \begin{rem} The case $\lambda \leq \mu$ is more challenging. Indeed, as indicated in Proposition \ref{prop:first-eig} in this case $Ae(u_j):e(u_j)$, $j=1,2$ is no longer a constant and the proof above no longer applies. Nevertheless, numerical results shown in Section \ref{sec:num-results} show that the disk is still a maximizer even when $\lambda \leq \mu$.\end{rem} \subsection{Upper bounds for the Steklov-Lam\'e eigenvalues} In order to motivate the existence of solutions for optimization problems depending on the Steklov-Lam\'e eigenvalues, we derive upper bounds for these eigenvalues in terms of the classical Steklov eigenvalues $\sigma_n(\Omega)$ defined by \eqref{eq:steklov-eigs}. Variational characterizations exist for the Steklov eigenvalues, using Rayleigh quotients. For simplicity, consider the following one (see \cite{Bucur-Nahon} for example) \begin{equation} \sigma_n(\Omega) = \min_{\dim S = n+1} \max_{u \in S}\frac{\int_\Omega |\nabla u|^2}{\int_{\partial \Omega} u^2} \end{equation} where the minimum is taken over all subspaces $S$ of $H^1(\Omega)\setminus H_0^1(\Omega)$ having dimension $n+1$. Various results concerning the upper bounds for Steklov eigenvalues exist, depending on different geometric quantities: \begin{itemize} \item $\sigma_k(\Omega)\Per(\Omega) \leq 2k\pi$ among simply connected domains in dimension two: \cite{hersch-payne-schiffer}, in \cite{girouard-polterovich} it is shown that the inequality is sharp. \item $\sigma_k(\Omega) \leq c_d k^{2/d} \frac{|\Omega|^{\frac{d-2}{d}}}{\Per(\Omega)}$: valid in arbitrary dimension \cite{colbois-elsoufi-girouard}. \item $\sigma_k(\Omega) \leq C(d,k) \frac{\displaystyle |\Omega|^{\frac{1}{d-1}}}{\displaystyle \diam(\Omega)^{\frac{2d-1}{d-1}}}$: among convex sets, in arbitrary dimension, where $\diam(\Omega)$ denotes the diameter of the set $\Omega$ \cite{alsayed-bogosel-henrot-nacry}. \end{itemize} It is not our purpose here to give an exhausting list. For a more complete survey see \cite{survey-girouard-polterowich}. Using these results, analogue ones can be found for the Steklov-Lam\'e eigenvalues using the result below. In the following, for simplicity, we denote by $t(d) = d(d+1)/2$, the triangular number associated to the positive integer $d$, the dimension of the space of rigid motions $\bo R(\Omega)$. \begin{prop} \label{prop:upper-bounds} For every $n\geq 1$ we have \[ \Lambda_n(\Omega) \leq (2\mu+d\lambda)\sigma_{dn+d^2(d+1)/2-1}(\Omega). \] \end{prop} \begin{proof} Given $\bo u = (u_i)_{i=1}^d\in \bo H^1(\Omega)$ we have \begin{align*} &Ae(\bo u): e(\bo u) = 2\mu |e(\bo u)|^2+\lambda (\di \bo u)^2 \\ & = 2\mu \sum_{i,j=1}^d\frac{1}{2}( \partial_{x_i} u_j+\partial_{x_j} u_i)^2 + \lambda( \sum_{i=1}^d\partial_{x_i}u_i)^2\\ &\leq 2\mu\sum_{i,j=1}^d (\partial_{x_i} u_j)^2+d\lambda \sum_{i=1}^d (\partial_{x_i}u_i)^2\leq (2\mu+d\lambda)\sum_{i=1}^d\|\nabla u_i\|_{\bo L^2(\Omega)}^2, \end{align*} where we used the classical inequality $(\sum_{i=1}^d x_i)^m \leq m\sum_{i=1}^m x_i^2$. Consider now the the first $d(n+t(d))$ eigenfunctions associated to the eigenvalues $\sigma_0(\Omega), ..., \sigma_{d(n+t(d))-1}(\Omega)$ for the Steklov problem \eqref{eq:steklov-eigs} on $\Omega$, giving a subspace of dimension $d(n+t(d))$ in $H^1(\Omega)$. Taking $n+t(d)$ vectors made of $d$ of these eigenfunctions we obtain a subspace $\bo S$ of $\bo H^1(\Omega)$ of dimension $n+t(d)$. Every $\bo u=(u_1,...,u_d)\in \bo S$ verifies $\|\nabla u_j\|_{L^2(\Omega)}^2 \leq \sigma_{d(n+t(d))-1}(\Omega)\|u_j\|_{L^2(\partial \Omega)}^2$, for every $j=1,...,d$. In view of the inequality proven above, we have \[ \int_\Omega Ae(\bo u):e(\bo u) \leq (2\mu+d\lambda) \sum_{i=1}^d \|\nabla u_i\|_{\bo L^2(\Omega)}^2 \leq (2\mu+d\lambda)\sigma_{d(n+t(d))-1}(\Omega) \int_{\partial \Omega} |\bo u|^2.\] Therefore, considering $\bo S$ as a test space in \eqref{eq:rayleigh} we obtain \begin{align*} \Lambda_n(\Omega) &\leq \max_{\bo u\in \bo S\setminus \bo H_0^1(\Omega)} \frac{\int_\Omega Ae(\bo u):e(\bo u)}{\int_{\partial \Omega} |\bo u|^2} \leq (2\mu+d\lambda)\sigma_{nd+dt(d)-1}(\Omega). \end{align*} \end{proof} Under the hypothesis $2\mu+d\lambda>0$ we have the following bounds for the Steklov-Lam\'e eigenvalues, depending on classical constraints. \begin{thm} \label{thm:upper-bounds} Let $\Omega$ be a bounded Lipschitz domain. Then we have: \begin{enumerate}[label=\upshape{(\roman*)}] \item If the perimeter of $\Omega$ is fixed then $\Lambda_k(\Omega) \Per(\Omega)^{\frac{1}{d-1}}$ is bounded from above. \item If the volume of $\Omega$ is fixed then $\Lambda_k(\Omega)$ is bounded from above. \item If the diameter of the convex set $\Omega$ is fixed the $\Lambda_k(\Omega)$ is bounded from above. \end{enumerate} \end{thm} \begin{proof} (a) and (b) are a consequence of the inequality $\sigma_k(\Omega) \leq c_d k^{2/d} \frac{|\Omega|^{\frac{d-2}{d}}}{\Per(\Omega)}$ proved in \cite{colbois-elsoufi-girouard} and of the isoperimetric inequality. (c) is a consequence of the inequality $\sigma_k(\Omega) \leq C(d,k) \frac{ |\Omega|^{\frac{1}{d-1}}}{ \diam(\Omega)^{\frac{2d-1}{d-1}}}$ proved in \cite{alsayed-bogosel-henrot-nacry} and the isodiametric inequality. \end{proof} \section{Stability of the spectrum on variable domains} \label{sec:existence} In the scalar case, the behavior of the Steklov eigenvalues \eqref{eq:steklov-eigs} with respect to domain perturbations was investigated in \cite{Bogosel}, \cite{Bucur-Nahon}, \cite{stability-steklov}. It is possible to generalize all these results to the Steklov-Lam\'e case. For $y \in \Bbb{R}^d$, $\xi$ a unit vector and $\varepsilon>0$ we define the cone \[ C(y,\xi,\varepsilon) = \{x \in \Bbb{R}^d : (z-y)\cdot \xi \geq \cos \varepsilon |z-y| \text{ and } 0<|z-y|<\varepsilon\}.\] Following \cite[Chapter 2]{henrot-pierre-english}, we say that $\Omega$ verifies the \emph{$\varepsilon$-cone condition} if for every $x \in \partial \Omega$ there exists a unit vector $\xi_x$ such that for every $y \in \overline \Omega \cap B(x,\varepsilon)$ we have $C(y,\xi_x,\varepsilon)\subset \Omega$. It can be shown that this condition is equivalent to $\Omega$ being Lipschitz with a prescribed upper bound on the Lipschitz constant. In particular, convex domains or domains star-shaped with respect to a ball verify an $\varepsilon$-cone property. In \cite[Proposition 2.3]{Bogosel} it is shown that if $D\subset \Bbb{R}^d$ is bounded and open and $\Omega \subset D$ verifies an $\varepsilon$-cone condition then $\Per(\Omega)$ is uniformly bounded by a constant depending only on $\varepsilon$ and $D$. In order to underline the behaviour of the Steklov-Lam\'e eigenvalues with respect to sequences of domains for which the perimeter is not continuous, let us define the weighted Steklov-Lam\'e eigenvalues. Consider $\Theta\in L^\infty(\Omega), \Theta\geq \beta>0$ and define $\Lambda(\Omega,\Theta)$ by \begin{equation} \left\{ \begin{array}{rcll} -\di A(e(\bo u)) & = & 0 & \text{ in } \Omega \\ Ae(\bo u)\bo n & = & \Lambda(\Omega,\Theta) \Theta \bo u & \text{ on } \partial \Omega, \end{array}\right. \label{eq:weighted-Steklov-eig} \end{equation} with the associated variational characterization \begin{equation} \Lambda_n(\Omega,\Theta) = \min_{\bo S_{n}\subset \bo H^1(\Omega)} \max_{\bo u \in \bo S_n\setminus\bo H_0^1(\Omega)} \frac{\int_\Omega Ae(\bo u):e(\bo u)}{\int_{\partial \Omega} \Theta |\bo u|^2} \label{eq:weighted-rayleigh} \end{equation} where the minimum is taken over all subspaces of $\bo H^1(\Omega)$ having dimension $n+t(d)$. It is obvious that $\Theta \equiv 1$ gives the Steklov-Lam\'e eigenvalues. The weighted eigenvalues \eqref{eq:weighted-Steklov-eig} enter into the framework presented in \cite{Sebastian}. Moreover, \eqref{eq:weighted-rayleigh} shows that $\Theta\geq \Theta'$ implies $\Lambda_n(\Omega,\Theta)\leq \Lambda_n(\Omega,\Theta')$. Furthermore, Proposition \ref{prop:upper-bounds} and Theorem \ref{thm:upper-bounds} extends to weighted Steklov-Lam\'e eigenvalues, since under the hypotheses considered, we have $\Lambda_n(\Omega,\Theta) \leq \frac{1}{\beta}\Lambda_n(\Omega)$. We say that a sequence of domains $\Omega_\varepsilon$ converges to a domain $\Omega$ if the Hausdorff distance between their complements converges to zero. See \cite[Chapter 2]{henrot-pierre-english} for introductory aspects related to the convergence in the Hausdorff metric. The following result is proved in \cite{Bucur-Nahon} and extends a result from \cite{Bogosel}. \begin{prop} \label{prop:conv-traces} Let $\Omega$ be a bounded Lipschitz domain and let $(\Omega_n)$ be a sequence of domains verifying the $\varepsilon$-cone condition. Consider a weight function $\Theta \in L^\infty(\partial \Omega)$ and a sequence of weight functions $\Theta_n \in L^\infty(\partial \Omega_n)$ such that $\Theta, \Theta_n \geq \beta>0$ and \[ \limsup_{n\to \infty} \|\Theta_n\|_{L^\infty(\partial \Omega_n)}<\infty \text{ and } \Theta_n \mathcal H^1_{\lfloor \partial \Omega_n} \rightharpoonup \Theta \mathcal H^1_{\lfloor \partial \Omega} \] weackly-$*$ in the sense of measures. If $(u_n) \subset H^1(\Bbb{R}^d)$ converges weakly to $u$ in $H^1(\Bbb{R}^d)$ then \[ \int_{\partial \Omega_n} \Theta_n u_n^2 \to \int_{\partial \Omega} \Theta u^2 \text{ as } \varepsilon\to 0.\] \end{prop} This result is a first step towards the desired stability result. In addition, the proof requires some uniform dependence on the domain for the constant in Korn's inequality \begin{equation}\label{eq:korn} \|\nabla \bo u\|_{L^2(\Omega)}^2\leq C_1(\Omega) \|e(\bo u)\|_{L^2(\Omega)}^2+C_2(\Omega)\|\bo u\|_{L^2(\Omega)}^2, \end{equation} for all $\bo u\in\bo H^1(\Omega)$. There are few cases in which the dependence of the constants of the domain is explicited. In particular, in \cite{korn-book} it is shown that if $\Omega$ has bounded diameter, is star-shaped with respect to a ball $B_{r_1}$ of radius $r_1$ and $\gamma$ is the distance between $\partial \Omega$ and $B_{r_1}$ then we may choose \begin{equation} C_1(\Omega)=C_1(\diam(\Omega)/r_1)^{d+1} \text{ and }C_2(\Omega) = C_2(\diam(\Omega)/r_1)^d\gamma^{-2}, \label{eq:uniform-constants-korn} \end{equation} with $C_1,C_2$ dimensional constants. Inequality \eqref{eq:korn} with constants \eqref{eq:uniform-constants-korn} is a consequence of \cite[Theorem 2.10]{korn-book} and is also presented in \cite{kondratiev-korn}. This result is of particular interest in our case, since bounds on the symmetrized gradient, together with bounds on the gradient in a small ball are enough to obtain global bounds on the gradient. For the sake of completeness, we recall the result below. \begin{thm}(Theorem 2.10 from \cite{korn-book}) \label{thm:small-to-large} Suppose that $\Omega\subset \Bbb{R}^d$ is a bounded domain of bounded diameter and $\Omega$ is star-shaped with respect to the ball $B_{r_1} = \{|x|<r_1\}$. Then for any $\bo u \in \bo H^1(\Omega)$ we have the inequality \begin{equation} \label{eq:small-to-large} \|\nabla \bo u\|_{\bo L^2(\Omega)} \leq C_1\Big( \frac{\diam(\Omega)}{r_1}\Big)^{d+1}\|e(\bo u)\|^2_{\bo L^2(\Omega)}+ C_2\Big( \frac{\diam(\Omega)}{r_1}\Big)^{d} \|\nabla \bo u\|_{\bo L^2(B_{r_1})}^2, \end{equation} where $C_1,C_2$ are constants depending on the dimension $d$. \end{thm} In the following, we prove a result similar to \cite[Theorem 2]{Sebastian} for the case of moving domains. \begin{thm}\label{thm:uniform-bounds-H1} Let $\Omega_n\subset \Bbb{R}^2$ be a sequence of domains verifying the $\varepsilon$-cone property, converging in the Hausdorff metric to the bounded open domain $\Omega$. Suppose that $\Omega_n, \Omega$ are star shaped with respect to $B_{r_1}$ with compact support inside $\Omega$. Suppose the weights $\Theta_n$ verify the hypotheses of Proposition \ref{prop:conv-traces}. Let $K\subset \Omega$ be a compact set with $B_{r_1}\subset K$. Then for every sequence $\bo u_n \in \bo H^1(\Omega_n)$ there exists a constant $C$, independent of $\bo u_n$, such that \[ \|\bo u_n\|_{\bo H^1(K)}^2 \leq C\left(\|e(\bo u_n)\|_{\bo L^2(\Omega_n)}^2+ \int_{\partial \Omega_n} \Theta_n |\bo u_n|^2\right).\] Moreover, there exists a constant $C$, independent of $\bo u_n$, such that \begin{equation}\label{eq:uniform-bound-H1} \|\bo u_n\|_{\bo H^1(\Omega_n)}^2 \leq C\left(\|e(\bo u_n)\|_{\bo L^2(\Omega_n)}^2+ \int_{\partial \Omega_n} \Theta_n |\bo u_n|^2\right). \end{equation} \end{thm} \begin{proof} Since $\Omega_n$ verify the $\varepsilon$-cone property and converge to $\Omega$, which is bounded, we may assume without loss of generality that $\Omega_n$ are contained in a ball $B$ for $n$ large enough. Following \cite[Proposition 2.2.17]{henrot-pierre-english} the compact $K$ is contained in $\Omega_n$ for $n$ large enough. In order to prove the first inequality, assume that there exists a sequence $\bo u_n\in \bo H^1(\Omega_n)$ such that \[ \|\bo u_n\|_{\bo H^1(K)} = 1 \text{ and } \|e(\bo u_n)\|_{\bo L^2(\Omega_n)}^2+ \int_{\partial \Omega_n} \Theta_n |\bo u_n|^2<\frac{1}{n}.\] The uniform bounds for $\|e(\bo u_n)\|_{\bo L^2(\Omega_n)}$ and $\|\nabla \bo u_n\|_{\bo L^2(B_{r_1})}$ together with Theorem \ref{thm:small-to-large} imply that $\|\nabla \bo u_n\|_{\bo L^2(\Omega_n)}$ is bounded uniformly with respect to $n$. Moreover, \[\frac{1}{n}> \int_{\partial \Omega_n} \Theta_n |\bo u_{n}|^2 \geq \beta \int_{\partial \Omega_n} |\bo u_{n}|^2.\] Thus, if $u$ is a generic component of $\bo u_{n}$ we find that $\|\nabla u\|_{L^2(\Omega_n)}^2+\beta \|u\|_{L^2(\partial \Omega_n)}^2$ is uniformly bounded from above. The first Robin-Laplace eigenvalue defined for $\beta>0$ by \[\lambda_{1,\beta}(\Omega) = \inf_{u \in H^1(\Omega), u\neq 0}\frac{\int_\Omega |\nabla u|^2+ \beta \int_{\partial \Omega} u^2}{\int_\Omega u^2}\] is minimized by the ball when the volume of $\Omega$ is fixed. The reader can consult \cite{robin-bucur-giacomini} and the references therein. Moreover, if $|\Omega|$ has an upper bound, then, in view of \cite[Corollary 3.2]{robin-bucur-giacomini}, $r\mapsto \lambda_{1,\beta}(B_r)$ is strictly decreasing and therefore has a strictly positive lower bound $q_\beta>0$, since $r$ is bounded from above. In our case, $\Omega_n$ have uniformly bounded perimeters, since $\Omega_n$ have the $\varepsilon$-cone property (see \cite[Proposition 2.3]{Bogosel}). Therefore, in view of the isoperimetric inequality, they also have uniformly bounded volumes. In view of the arguments above, there exists $q_\beta>0$ such that \[\int_{\Omega_n} |\nabla u|^2+ \beta \int_{\partial \Omega_n} u^2 \geq q_\beta \int_{\Omega_n} u^2.\] As a consequence $\|\bo u_n\|_{\bo L^2(\Omega_n)}$ are bounded, implying that $\|\bo u_n\|_{\bo H^1(\Omega_n)}$ are uniformly bounded from above. The sets $\Omega_n$ have Lipschitz boundaries with uniformly bounded constants, therefore, the extension operators from $H^1(\Omega_n)$ to $H^1(B)$ are uniformly bounded. Thus, we may consider the extensions $\widetilde{\bo u_n}\in \bo H^1(B)$ of $\bo u_n$, which are uniformly bounded in $\bo H^1(B)$. Up to extracting a subsequence, we may assume $\widetilde{\bo u_n}$ converge weakly to $\tilde{\bo u} \in \bo H^1(B)$ and thus strongly in $\bo L^2(B)$. The set $\Omega$ is star shaped with respect to $B_{r_1}$ and the distance between $B_{r_1}$ and $\partial \Omega$ is strictly positive. It is, thus, possible to write $\Omega$ as a union of compact sets $K_m$, $m\geq 1$ such that $B_{r_1}\subset K_m$, $K_m \subset K_{m+1}$ and $K_m$ are star-shaped with respect to $B_{r_1/2}$. Assuming $B_{r_1}$ is centered at the origin, it is enough to consider $K_m = \overline{ (1-\frac{1}{m+1})\Omega}$ for $m\geq 1$. Fix $m$ and a compact $K_m$ defined as above. In \cite[Proposition 2.2.17]{henrot-pierre-english} it is proved that if $\Omega_n \to \Omega$ in the Hausdorff metric and $K$ is a compact contained in $\Omega$ then $K$ is contained in $\Omega_n$ for all $n$ large enough. Therefore for $n$ large enough $K_m \subset \Omega_n$. Moreover, in view of the definition of $K_m$, the constants in Korn's inequality \eqref{eq:korn} may be chosen uniform with respect to $m$ as in \eqref{eq:uniform-constants-korn}. Therefore, for $n \geq m$ and $p \geq 0$ we have \[\|\nabla \bo (\bo u_{n+k}-\bo u_n)\|_{\bo L^2(K_m)}^2\leq C_1 \|e(\bo u_{n+k})-e(\bo u_n)\|_{\bo L^2(K_m)}^2+C_2\|\bo u_{n+k}-\bo u_n\|_{\bo L^2(K_m)}^2.\] Since $(\bo u_n)$ converge stronglky in $\bo L^2(K_m)$ and $\|e(\bo u_n)\|_{\bo L^2(K_m)} \to 0$ we find that $(\nabla \bo u_{n})$ is a Cauchy sequence in $\bo L^2(K_m)$, implying that $\bo u_n$ converges strongly to $\widetilde{\bo u}$ in $\bo H^1(K_m)$. As a consequence $e(\widetilde{\bo u})=0$ in $K_m$, implying that $\widetilde{\bo u}$ is a rigid motion in $K_m$ for every $m$. Since $K_m$ is an increasing sequence of compacts, taking $m\to \infty$ we find that $\widetilde{\bo u}$ is a rigid motion on $\Omega$. Applying Proposition \ref{prop:conv-traces} we also find that $\int_{\partial \Omega} \Theta |\widetilde{\bo u}|^2 = 0$, showing that $\widetilde{\bo u}=0$ on $\partial \Omega$. In conclusion $\tilde{\bo u}=0$ in $\Omega$. In particular, for $m$ large enough we have $K \subset K_m$ so $\widetilde{\bo u}=0$ on $K$. However, the strong convergence of $\bo u_n$ to $\tilde{\bo u}$ in $\bo H^1(K)$ implies $\|\tilde{\bo u}\|_{\bo H^1(K)}=1$, a contradiction. In order to prove \eqref{eq:uniform-bound-H1} it is enough to pick $K=\overline{B_{r_1}}$ and use \eqref{eq:small-to-large}. \end{proof} We are now ready to prove the stability result for Steklov-Lam\'e eigenvalues. \begin{thm}\label{thm:weight-continuity} Let $\Omega$ be a bounded Lipschitz domain and let $\Omega_n$ be a sequence of domains with $\varepsilon$-cone property, converging to $\Omega$ for the Hausdorff complementary distance. Assume the hypotheses of Theorem \ref{thm:uniform-bounds-H1} are verified. Consider weights $\Theta\in L^\infty(\partial \Omega), \Theta_n \in L^\infty(\partial \Omega_n)$ verifying the hypotheses of Proposition \ref{prop:conv-traces}. Then for all $k\geq 1$ we have \[ \lim_{n\to \infty} \Lambda_k (\Omega_n,\Theta_n)= \Lambda_k(\Omega,\Theta).\] \end{thm} \begin{proof} The proof is divided in two steps. {\bf Lower semicontinuity.} For each $n$ consider $\bo S_n\subset \bo H^1(\Omega_n)$ a subspace which attains $\Lambda_k(\Omega,\Theta_n)$ in \eqref{eq:weighted-rayleigh}. Following \cite{Bucur-Nahon} consider $(\bo u_{p,n})_{p=-t(d)+1,...,0,...,k}$ an adapted basis for $\bo S_n$, i.e. a basis orthonormal with respect to $\bo u \mapsto \int_{\partial \Omega_n} \Theta_n |\bo u|^2$ and orthogonal relative to $\bo u \mapsto \int_{\Omega_n} Ae(\bo u):e(\bo u)$. Since $(\Omega_n)_{n\geq 1}$ verify an $\varepsilon$-cone condition, it follows that $\Per( \Omega_n)$ is uniformly bounded. Without loss of generality, up to choosing a converging subsequence, suppose that $\Per(\Omega_n)$ converges. Also, since $\Omega_n$ converges to $\Omega$ all domains $\Omega_n$ have uniformly bounded diameters. In view of Proposition \ref{prop:upper-bounds} and Theorem \ref{thm:upper-bounds} we find that $\Per(\Omega_n) \Lambda_k(\Omega_n,\Theta_n)$ are uniformly bounded. As a direct consequence, $\int_{\Omega_n} Ae(\bo u_{p,n}):e(\bo u_{p,n})$ are uniformly bounded, implying that $\|e(\bo u_{p,n})\|_{\bo L^2(\Omega_n)}$ are uniformly bounded. Theorem \ref{thm:uniform-bounds-H1} implies that $\|\bo u_{p,n}\|_{\bo H^1(\Omega_n)}$ are uniformly bounded. Since $\Omega_n$ are Lipschitz with a controlled constant (coming from the $\varepsilon$-cone property, see \cite[Remark 2.4.8]{henrot-pierre-english}), each of the functions $\bo u_{p,n}$ can be extended to $\bo H^1(\Bbb{R}^d)$ with a controlled extension constant depending on the upper bound on the Lipschitz constant of the domains. Thus, denoting the extensions with the same symbols, we find that $(\bo u_{p,n})_{n\geq 1}$ are bounded in $\bo H^1(\Bbb{R}^d)$. Up to the extraction of a sub-sequence we suppose that $(\bo u_{p,n})$ converge weakly in $\bo H^1(\Bbb{R}^d)$ to $(\bo u_p)_{p=-t(d)+1,...,0,...,k}$. Proposition \ref{prop:conv-traces} implies that \[ \delta_{pp'} = \int_{\partial \Omega_n} \Theta_n \bo u_{p,n}\cdot \bo u_{p',n} \to \int_{\partial \Omega} \Theta \bo u_p\cdot \bo u_{p'} \text{ as } \varepsilon \to 0.\] Therefore $(\bo u_p)$ is orthonormal for the scalar product $(\bo u,\bo v)\mapsto \int_{\partial \Omega} \Theta \bo u \cdot \bo v$ and, as a consequence, $(\bo u_p)_{p=-t(d)+1,...,0,...,k}$ generate a subspace of dimension $k+t(d)$ when restricted to $\Omega$. In view of the weak convergence in $\bo H^1(\Bbb{R}^n)$ of the sequences $(\bo u_{p,n})$ we have for all $a_p$ with $\sum_{p=-t(d)+1}^k a_p^2=1$ \[ \Lambda_k(\Omega,\Theta) \leq \liminf_{n \to \infty} \max_{(a_p)\in \Bbb{S}^{n+t(d)-1}} Ae(\sum_p a_p \bo u_p):e(\sum_p a_p\bo u_p) \leq \liminf_{n \to \infty} \Lambda_k(\Omega_n,\Theta_n).\] {\bf Upper semicontinuity.} Let $\bo V$ be a subspace of $\bo H^1(\Omega)$ having dimension $k+t(d)$ which attains $\Lambda_k(\Omega,\Theta)$ in \eqref{eq:weighted-rayleigh} and let $(\bo v_{-t(d)+1},...,\bo v_0,... \bo v_k)$ an adapted basis for it (as before, orthonormal for the corresponding scalar product on $\partial \Omega$ and orthogonal for the scalar product on $\Omega$). Extending the functions $(\bo v_p)_{p=-t(d)+1,...,k}$ from $\Omega$ to $\Bbb{R}^d$ these functions still make an independent family on $\bo H^1(\Omega_n)$ for $n$ large enough. In the following consider $\bo a_n = (a_{p,n})_{p=-t(d)+1,...,k} \in \Bbb S^{k+t(d)-1}$ (unit sphere in $\Bbb{R}^{k+t(d)}$) such that, denoting $\bo W = \text{Span}(\bo v_{-t(d)+1},...,\bo v_0,...,\bo v_k)$, \begin{equation}\label{eq:ub-lamk} \sup_{\bo w \in \bo W} \frac{\displaystyle \int_{\Omega_n} Ae(\bo w):e(\bo w)}{\displaystyle \int_{\partial \Omega_n} \Theta_n |\bo w|^2 }=\frac{\displaystyle \int_{\Omega_n} Ae\Big(\sum_{p=-t(d)+1}^k a_{p,n}\bo v_{p}\Big):e\Big(\sum_{p=-t(d)+1}^k a_{p,n}\bo v_{p}\Big)}{\displaystyle \int_{\partial \Omega_n} \Theta_n \Big|\sum_{p=-t(d)+1}^k a_{p,n}\bo v_{p}\Big|^2 }.\end{equation} Of course, \eqref{eq:ub-lamk} gives an upper bound for $\Lambda_k(\Omega_n,\Theta_n)$. Up to extracting a subsequence, we suppose that $a_{p,n}$ converges to $a_p$ for every $p=-t(d)+1,...,-1,0,...,k$. Denoting $\bo w_n = \sum_{p=-t(d)+1}^k a_{p,n}\bo v_{p}$, $\bo w = \sum_{p=-t(d)+1}^k a_{p}\bo v_{p}$ and using Proposition \ref{prop:conv-traces} we find that \[\int_{\partial \Omega_n} \Theta_n |\bo w_n|^2 \to \int_{\partial \Omega} \Theta |\bo w|^2 \text{ as }\varepsilon \to 0.\] The convergence of $\Omega_n$ to $\Omega$ and of $a_{p,n}$ to $a_p$ implies that \[ \int_{\Omega_n} Ae(\bo w_n):e(\bo w_n) \to \int_{\Omega} Ae(\bo w):e(\bo w)\] as $n\to \infty$. As a consequence we find that \[ \limsup_{n \to \infty} \Lambda_k(\Omega_n,\Theta_n) \leq \frac{\int_{\Omega} Ae(\bo w):e(\bo w)}{\int_{\partial \Omega} \Theta |\bo w|^2} \leq \Lambda_k(\Omega,\Theta),\] which finishes the proof. \end{proof} The result regarding the convergence of weighted Steklov-Lam\'e eigenvalues allows us to find in a straightforward way the upper semicontinuity of the eigenvalues, needed in order to prove existence results analogue to those in \cite{Bogosel}. \begin{cor}\label{cor:semi-continuity} Let $\Omega$ be a bounded Lipschitz domain and $\Omega_n$ a sequence of domains with the $\varepsilon$-cone property verifying the hypotheses of Theorem \ref{thm:weight-continuity}. We have the following: {\rm (a)} If $\Per(\Omega_n) \to \Per(\Omega)$ then $\Lambda_k(\Omega_n) \to \Lambda_k(\Omega)$ for every $k \geq 1$. {\rm (b)} In general, $\limsup_{n \to \infty} \Lambda_k(\Omega_n) \leq \Lambda_k(\Omega)$ for every $k \geq 1$. \end{cor} \begin{proof} The result follows at once from Theorem \ref{thm:weight-continuity}. (a) It suffices to take $\Theta_n=\Theta \equiv 1$ in Theorem \ref{thm:weight-continuity}. (b) Taking $\Theta_n \equiv 1$, the lower-semicontinuity of the perimeter implies that any weak-* limit $\Theta$ of the sequence $\Theta_n = \mathcal H^{d-1}\lfloor \Omega_n$ verifies $\Theta \geq 1$. Therefore \[ \limsup_{n \to \infty} \Lambda_k(\Omega_n) = \limsup_{n \to \infty} \Lambda_k(\Omega_n,\Theta_n) = \Lambda_k(\Omega,\Theta) \leq \Lambda_k(\Omega).\] \end{proof} In the following we focus on the question of existence of solutions. It is classical that the existence of solutions for a shape maximization problem depends on compactness properties for a maximizing sequence. We focus our attention on the class of convex domains and the volume constraint. An initial result is given below. \begin{thm}\label{thm:diameter-bound} Suppose that $(\Omega_n)$ is a sequence of open, convex sets with unit volume such that $\text{diam}(\Omega_n) \to \infty$. Then $\Lambda_k(\Omega_n) \to 0$. \end{thm} \begin{proof} This is a direct consequence of Proposition \ref{prop:upper-bounds} and of the analogue results for the Steklov eigenvalues shown in \cite{Bogosel} or \cite{alsayed-bogosel-henrot-nacry}. \end{proof} We are now ready to state the existence result in the class of convex sets. \begin{thm} \label{thm:existence-convex} For every $k\geq 1$ there exists a solution to the problem of maximizing the Steklov-Lam\'e eigenvalue $\Lambda_k(\Omega)$ in the class of convex sets having unit volume. \end{thm} \begin{proof} From Theorem \ref{thm:diameter-bound} any maximizing sequence $(\Omega_n)_{n\geq 1}$ has an upper bound on the diameter. Considering convex domains $\Omega_n$ which have bounded diameters, the Blaschke selection theorem \cite[Chapter 1]{schneider} implies the existence of a subsequence $\Omega_n$ converging to a convex set $\Omega$ in the Hausdorff distance. Moreover, $\Omega$ also has unit volume and verifies the same bound on the diameter as $\Omega_n$. Therefore, since $\Omega$ has non-void interior, it contains a closed disk of radius $r_1$ and $\Omega$ is star-shaped with respect to $B_{r_1}$ since it is convex. For $n$ large enough, $B_{r_1}$ is also contained in $\Omega_n$, and therefore the hypotheses of Theorem \ref{thm:weight-continuity} and Corollary \ref{cor:semi-continuity} are verified, which implies \[ \limsup_{n \to \infty} \Lambda_k(\Omega) \leq \Lambda_k(\Omega).\] Thus $\Omega$ is a maximizer of $\Lambda_k(\Omega)$ among convex sets with unit volume. \end{proof} \begin{rem} Possible generalizations can be attempted in multiple directions: (i) Proposition \ref{prop:upper-bounds} suggests that problems that are well posed for the scalar Steklov problem should behave in a similar way for the Steklov-Lam\'e eigenvalues. The perimeter constraint may also be considered, since an upper bound exists \cite{hersch-payne-schiffer}. However, in the scalar case in dimension two, in \cite{girouard-polterovich} it is proved that the upper bound from \cite{hersch-payne-schiffer} is tight but is never attained in the class of simply connected sets. (ii) It is not clear what is the most general class of admissible domains for which an existence result like Theorem \ref{thm:existence-convex} might hold. The key ingredients are the existence of an upper bound on the diameter and the stability arguments, which depend on the availability of uniformly bounded constants in Korn's inequality. In view the results in \cite{horgan-payne} it is possible that such a result could hold in the class of star-shaped domains. (iii) Relaxed formulations can be considered like those in \cite{bogosel-bucur-giacomini}, which allow to recover existence results among simply connected sets and volume constraint. (iv) Theorem \ref{thm:optimality-disk} and the numerical results suggest that the first eigenvalue is maximized by the disk under perimeter and volume constraints (among simply connected sets). Following the similarity of results in \cite{Bucur-Nahon} with the stability result in Theorem \ref{thm:weight-continuity}, it is possible that Theorem 2.7 from \cite{Bucur-Nahon} regarding the negative answer to the stability of Weinstock's equality could generalize to the Steklov-Lam\'e eigenvalues. We mention also the stability results for Steklov eigenvalues proved in \cite{stability-steklov} which can also be generalized to this case. (v) The existence result generalizes to shape functionals of the form \[ \Omega \mapsto F(\Lambda_1(\Omega),...,\Lambda_k(\Omega)),\] where $F:\Bbb{R}^k \to \Bbb{R}$ is upper semi-continuous and increasing in each variable. \end{rem} \section{An estimate inspired by Moler and Payne} \label{sec:moler-payne} The results found by Moler and Payne in \cite{moler-payne} are classical for studying the precision for the method of fundamental solutions. It gives bounds controlling how the solutions of a PDE change in terms of perturbations of the boundary conditions. An analogue result for the case of Steklov and Wentzell eigenvalues was given in \cite{Bogosel2}. In the following we aim to give such a result related to the Steklov-Lam\'e problem. Consider the problem $\bo u \in \bo H^1(\Omega)$, verifying \begin{equation} \left\{ \begin{array}{rcll} -\di Ae(\bo u) & = & 0 & \text{ in }\Omega \\ Ae(\bo u)\bo n & = & \bo f & \text{ on }\partial \Omega \end{array} \right. \label{eq:rhs-problem} \end{equation} for $\bo f$ in $\bo L^2(\partial \Omega)$. It can be readily be observed that \eqref{eq:rhs-problem} does not have a unique solution, as stated. Moreover, the variational formulation \begin{equation} \int_\Omega Ae(\bo u):e(\bo v) = \int_{\partial \Omega} \bo f \cdot \bo v \ \ \ \forall \bo v \in \bo H^1(\Omega), \end{equation} implies the compatibility condition $\int_{\partial \Omega} \bo f\cdot \bo v =0$ for every $v \in \bo R(\Omega)$ (the space of rigid motions). Denote by $\bo V(\partial \Omega)$ the subspace of $\bo L^2(\partial \Omega)$ which is orthogonal to the rigid motions with respect to the usual scalar product. Also denote by $\bo H(\Omega)$ the orthogonal to the space of rigid motions $\bo R(\Omega)$ in $\bo H^1(\Omega)$ with respect to the scalar product in $\bo L^2(\Omega)$. Then define the resolvent operator $\bo{Res} : \bo V(\partial \Omega) \to \bo H(\Omega)$ such that $\bo{Res}(\bo f) = \bo u$, where $\bo u$ solves \eqref{eq:rhs-problem}. The well-posedness of \eqref{eq:rhs-problem} for $\bo u \in \bo H(\Omega)$ and $\bo f \in \bo V(\Omega)$ is classical and discussed, for example, in \cite[Section 3.2]{Sebastian}. \begin{thm} Let $\Omega$ be a bounded, open domain with Lipschitz boundary. Suppose that $\bo f \in \bo V(\partial \Omega)$ and $\bo u = \bo{Res}(\bo f)$. Then there exists a constant $C$ depending only on $\Omega$ such that \[ \|\bo u\|_{\bo L^2(\partial \Omega)} \leq C \|\bo f\|_{\bo L^2(\partial \Omega)}.\] \label{thm:estimate-perturbation} \end{thm} \begin{proof} Use the variational formulation to see that \[ \int_\Omega Ae(\bo u):e(\bo u) = \int_{\partial \Omega}\bo f\cdot \bo u.\] Since $\bo u \in \bo H(\Omega)$, we can apply Korn's inequality (\cite[Theorem 2.3]{ciarlet-korn}, \cite[Theorem 2.5]{korn-book}) to obtain $\|\bo u\|_{\bo H^1(\Omega)}\leq C_K\|e(\bo u)\|_{\bo L^2(\Omega)}$. Also recall that the trace inequality (see for example \cite{evans-gariepy}) says that there exists a constant $C_\Omega$ depending on the Lipschitz constant of $\partial \Omega$ such that $\|\bo u\|_{\bo L^2(\partial \Omega)} \leq C_\Omega \|\bo u\|_{\bo H^1(\Omega)}$. Therefore we have the sequence of inequalities: \begin{align*} \|\bo u\|_{\bo L^2(\partial \Omega)}^2 &\leq C_\Omega^2 \|\bo u\|_{\bo H^1(\Omega)}^2 \leq C_\Omega^2 C_K^2 \|e(\bo u)\|_{L^2(\Omega)}^2\leq \frac{C_\Omega^2C_K^2}{2\mu} \int_\Omega Ae(\bo u):e(\bo u)\\ &\leq \frac{C_\Omega^2C_K^2}{2\mu}\|\bo f\|_{\bo L^2(\partial \Omega)}\|\bo u\|_{\bo L^2(\partial \Omega)}. \end{align*} The conclusion follows. \end{proof} \begin{thm}\label{thm:moler-payne} Consider $\Omega$ a bounded open domain with Lipschitz boundary. Suppose $\bo u_\varepsilon, \bo f_\varepsilon$ belong to $\bo V(\partial \Omega)$ and that $\bo u_\varepsilon \in \bo H(\Omega)$ verifies the perturbed equation \begin{equation} \left\{ \begin{array}{rcll} -\di Ae(\bo u_\varepsilon) & = & 0 & \text{ in }\Omega \\ Ae(\bo u_\varepsilon)\bo n & = & \Lambda_\varepsilon \bo u_\varepsilon + \bo f_\varepsilon & \text{ on }\partial \Omega \end{array} \right. \label{eq:pert-problem} \end{equation} Then there exists $n \geq 1$ such that \begin{equation} \label{bound_error} \|\bo u_\varepsilon\|_{\bo L^2(\partial \Omega)} |\Lambda_n(\Omega)-\Lambda_\varepsilon| \leq \|\bo f_\varepsilon\|_{\bo L^2(\partial \Omega)}. \end{equation} Furthermore, suppose that there exists $\delta>0$ such that when $\Lambda_k(\Omega)\neq \Lambda_n(\Omega)$ we have $|\Lambda_k(\Omega)-\Lambda_n(\Omega)|>\delta$. Then there exists $\bo u_n, \bo u_r\in \bo H(\Omega)$ such that $\bo u_\varepsilon = {\bo u_n}+\bo u_r$, ${\bo u_n}$ is an eigenfunction associated to $\Lambda_n$ and \[ \|\bo u_r\|_{\bo L^2(\partial \Omega)} = \|\bo u_\varepsilon-\bo u_n\|_{\bo L^2(\partial \Omega)}\leq \frac{\|\bo f_\varepsilon\|_{\bo L^2(\partial \Omega)}}{\delta}.\] \end{thm} \begin{proof} It is classical that the eigenfunctions $(\bo u_n)$ associated to positive eigenvalues $\Lambda_n(\Omega)>0$ form an orthonormal basis of $\bo V(\partial \Omega)$ in $\bo L^2(\partial \Omega)$. Using the variational formulations for the eigenvalue problem and for problem \eqref{eq:pert-problem} we have \[ (\Lambda_n(\Omega)-\Lambda_\varepsilon) \int_{\partial \Omega} \bo u_n \cdot \bo u_\varepsilon = \int_{\partial \Omega} \bo f_\varepsilon \cdot \bo u_n , \forall n \geq 1\] Denoting $a_n = \int_{\partial \Omega} \bo u_n \cdot \bo u_\varepsilon$ and $b_n = \int_{\partial \Omega} \bo f_\varepsilon \cdot \bo u_n$ the Fourier coefficients of $\bo u_\varepsilon$ and $\bo f_\varepsilon$ in the orthonormal basis given by the eigenfunctions $(\bo u_n)_{n\geq 1}$ we know that \[ \|\bo u_\varepsilon\|_{\bo L^2(\partial \Omega)}^2 = \sum_{n\geq 1}a_n^2,\ \ \|\bo f_\varepsilon\|_{\bo L^2(\partial \Omega)}^2 = \sum_{n\geq 1} b_n^2.\] Therefore, choosing $n$ such that $|\Lambda_n(\Omega)-\Lambda_\varepsilon|$ is minimal (such $n$ exists since $\Lambda_n(\Omega) \to \infty$) we have \[ \|\bo f_\varepsilon\|_{\bo L^2(\partial \Omega)} \geq |\Lambda_n(\Omega)-\Lambda_\varepsilon| \|\bo u_\varepsilon\|_{\bo L^2(\partial \Omega)}\] Denote by $I_n = \{ k \geq 1 \text{ such that }\Lambda_k(\Omega)\neq \Lambda_n(\Omega)\}$. Suppose that there exists $\delta>0$ such that $|\Lambda_k(\Omega)-\Lambda_n(\Omega)| >\delta$ for every $k \in I_n$. In view of the above computations we have $a_k^2 = b_k^2/(\Lambda_k(\Omega)-\Lambda_\varepsilon)^2 \leq b_k^2/\delta^2$ for every $k \in I_n$. This implies that $\bo u_\varepsilon$ can be written as $\bo u_\varepsilon ={\bo u_n}+\bo u_r$ such that \begin{itemize} \item ${\bo u_n}$ verifies the Steklov-Lam\'e eigenvalue problem \eqref{eq:steklov-lame} for $\Lambda=\Lambda_n(\Omega)$. \item $\bo u_r$ verifies $\|\bo u_r\|_{L^2(\partial \Omega)}^2 \leq \|\bo f_\varepsilon\|_{L^2(\partial \Omega)}^2/\delta^2$. \end{itemize} \end{proof} \begin{rem} Theorem \ref{thm:moler-payne} motivates our numerical method in view of the following arguments. (i) If $\|\bo f_\varepsilon\|_{\bo L^2(\partial \Omega)}$ is small enough then either $\|\bo u_\varepsilon\|_{\bo L^2(\partial \Omega)}$ small or there exists a Steklov-Lam\'e eigenvalue $\Lambda_n$ that is close to $\Lambda_\varepsilon$. (ii) If $\|\bo f_\varepsilon\|_{\bo L^2(\partial \Omega)}$ is small and there exists $n$ such that $|\Lambda_n-\Lambda_\varepsilon|$ is small enough then the solution $\bo u_\varepsilon$ is close to an actual Steklov-Lam\'e eigenfunction. In case the eigenspace of $\Lambda_n$ is of dimension one then the result implies that $\|\bo u_\varepsilon-\bo u_n\|_{\bo L^2(\partial \Omega)}$ is small, i.e. the approximate eigenfunction is close to the original one. In case the eigenspace has higher dimension, for multiple eigenvalues, the result says that $\bo u_\varepsilon$ can be decomposed using an eigenfunction for $\Lambda_n$ and a remainder term which is small compared to the error $\|\bo f_\varepsilon \|_{\bo L^2(\partial \Omega)}$. \end{rem} \section{Numerical methods} \label{sec:num-methods} We consider the numerical solution of eigenvalue problem \eqref{eq:steklov-lame} in dimension two using the Method of Fundamental Solutions (MFS) whose approximation can be justified by density results (eg.~\cite{Alves, Alves-Martins}). We define the tensor \[\bo{\Phi}_{y}(x):=\bo\Phi(x-y),\] where \[\left[\bo\Phi(x)\right]_{i,j}=\frac{\lambda+3\mu}{4\pi\mu(\lambda+2\mu)}\left(-\log|x|\delta_{i,j}+\frac{\lambda+\mu}{\lambda+3\mu}\frac{x_ix_j}{|x|^2}\right)\] is the fundamental solution of the Lam\'{e} equations and the single layer potential is defined by \[ (\bo S_{\Gamma}\phi)(x)=\int_{\Gamma} \bo\Phi_{y}(x)\phi(y)ds_{y},\] for some Jordan curve $\Gamma$ and $\phi\in H^{-\frac{1}{2}}(\Gamma)^2.$ Denote by $\tau_\gamma$ the trace operator defined on some boundary $\gamma$ and define the operator \begin{align*} \bo B_\gamma&:H^{-\frac{1}{2}}(\gamma)^2\rightarrow H^{\frac{1}{2}}(\partial\Omega)^2&\\ \bo B_\gamma&\phi=\tau_{\partial\Omega}(\bo S_{\gamma}\phi). \end{align*} \begin{thm} \label{densidade} Let $\hat{\Omega}\subset\mathbb{R}^2$ be a bounded simply connected domain such that $\bar{\Omega}\subset\hat{\Omega}.$ Then, $\bo B_{\partial\hat{\Omega}}$ has dense range in the functional space \[H_\ast^{\frac{1}{2}}(\partial\Omega)^2=\left\{\psi\in H^{\frac{1}{2}}(\partial\Omega)^2:\int_{\partial\Omega}\psi(x)ds_x=\bo0\right\}.\] \end{thm} \begin{proof} We will prove that the adjoint $\bo B_{\partial\hat{\Omega}}^\ast=\tau_{\partial\hat{\Omega}}\bo S_{\partial\Omega}$ is injective by verifying that $\text{Ker}(\bo B_{\partial\hat{\Omega}}^\ast)=\left\{\bo 0\right\}.$ Let $\phi\in H_\ast^{\frac{1}{2}}(\partial\Omega)^2$ such that $\bo B_{\partial\hat{\Omega}}^\ast(\phi)=\bo 0$ and define \[\bo u=\bo S_{\partial\Omega}\phi.\] Note that $\bo u$ satisfies $\di A(e(\bo u)) = \bo 0 $ in $\mathbb{R}^2\backslash\partial\Omega$ and solves the exterior problem \begin{equation}\label{probext} \left\{\begin{array}{rcll} \di A(e(\bo u)) & = & \bo{0} & \text{ in } \mathbb{R}^2\backslash\bar{\hat{\Omega}} \\ \bo u & = &\bo 0 & \text{ on } \partial\hat{\Omega} \\ \bo u (x)&=&\bo c \log|x|+\mathcal{O}(1) & |x|\rightarrow\infty, \end{array}\right. \end{equation} where (cf. \cite{Chen-Zhou}) \[\bo c=-\frac{\lambda+3\mu}{4\pi\mu(\lambda+2\mu)}\int_{\partial\Omega}\phi(y)ds_y=\bo 0\] because $\phi\in H_\ast^{\frac{1}{2}}(\partial\Omega)^2.$ Thus,~\eqref{probext} is well posed with trivial solution $\bo u=0.$ By analytic continuation, the external trace of $\bo u$ on $\partial\Omega$ and the external trace of the surface traction vector $Ae(\bo u)\bo n$ are both null. By continuity of the single layer operator through the boundary, the inner trace of $\bo u$ at $\partial\Omega$ is null which implies that $\bo u$ solves \begin{equation} \left\{\begin{array}{rcll} \di A(e(\bo u)) & = & \bo 0 & \text{ in } \Omega \\ \bo u & = &\bo 0 & \text{ on } \partial\Omega, \end{array}\right. \label{probint} \end{equation} which is well posed with trivial solution $\bo u=\bo0.$ Thus, the inner traces (on $\partial\Omega$) of $\bo u$ and of the surface traction vector $Ae(\bo u)\bo n$ are both null. Taking into account that $\phi$ is equal to the jump of the surface traction vector at the boundary (cf.~\cite{Chen-Zhou}), we conclude that $\phi=\bo 0$ and $\text{Ker}(\bo B_{\partial\hat{\Omega}}^\ast)=\left\{\bo 0\right\}$.\end{proof} \begin{thm} \label{betti} Let $\Omega$ be a sufficiently smooth domain, in such a way that the eigenfunctions are in $\bo H^1(\Omega)$. Then, the traces on $\partial\Omega$ of the eigenfunctions associated with positive eigenvalues belong to $H_\ast^{\frac{1}{2}}(\partial\Omega)^2.$ \end{thm} \begin{proof} It follows from Betti's formula (cf.~\cite{mclean}), taking $\bo u$ to be an eigenfunction associated to a positive eigenvalue and $\bo v\equiv1$ we get \[\int_\Omega\Big(\underbrace{\di A(e(\bo u))}_{=0}\cdot \bo v-\underbrace{\di A(e(\bo v))}_{=0}\cdot \bo u\Big)dx=\int_{\partial\Omega}\Big(\underbrace{Ae(\bo u)\bo n}_{=\Lambda(\Omega) \bo u}\cdot \bo v- \underbrace{Ae(\bo v)\bo n}_{=0}\cdot \bo u\Big)ds.\] Thus, \[\Lambda(\Omega)\int_{\partial\Omega}\bo uds=\bo 0\] and since $\Lambda(\Omega)>0$ we conclude that $\int_{\partial\Omega}\bo u ds=\bo 0$ and the conclusion follows.\end{proof} \begin{rem} Theorem~\ref{densidade} ensures density of the traces of the single layer in $H_\ast^{\frac{1}{2}}(\partial\Omega)^2$ and by Theorem~\ref{betti} density in the space of traces of the eigenfunctions associated to positive eigenvalues. Using a similar argument it could be proven that the traces on $\partial\Omega$ of \[\bo v=(\bo S_{\partial\hat{\Omega}}\phi)+\alpha_1\begin{pmatrix}1\\0\end{pmatrix}+\alpha_2\begin{pmatrix}0\\1\end{pmatrix},\ \alpha_1,\alpha_2\in\mathbb{R}\] are dense in $H^{\frac{1}{2}}(\partial\Omega)^2$ (cf.~\cite{Alves-Martins}). \end{rem} Taking into account Theorem~\ref{densidade} we consider a bounded simply connected domain $\hat{\Omega}\subset\mathbb{R}^2$ such that $\bar{\Omega}\subset\hat{\Omega}$ and place $N$ source points on the boundary $\partial\hat{\Omega}.$ The Method of Fundamental Solutions (MFS) approximation is a discretization of the single layer operator, \begin{equation} \label{mfs} \bo u(x)=(\bo S_{\partial\hat{\Omega}}\phi)(x)\approx \bo u_N(x)=\sum_{j=1}^N\bo\Phi_{\bo y_j}(x)\cdot\bo a_j,\ \bo a_j\in\mathbb{R}^2. \end{equation} The MFS linear combination satisfies Lam\'{e} equations. A straightforward approach for the calculation of the positive Steklov-Lam\'{e} eigenvalues could be defining $N$ collocation points $\bo x_1,...,\bo x_M\in\partial\Omega$ and the unit outward vectors at these points $\bo n_1,...,\bo n_M$ and imposing the boundary condition of problem~\eqref{eq:steklov-lame}. Taking $M=N$ would lead to the solution of a generalized eigenvalue \begin{equation} \label{mateigprob} \bo A\cdot \bo X=\lambda \bo B\cdot \bo X, \end{equation} where \[\left[\bo A\right]_{i,j}=Ae(\bo \Phi_{\bo y_j} (\bo x_i))\bo n_i \quad\text{and}\quad\left[\bo B\right]_{i,j}=\bo \Phi_{\bo y_j}(\bo x_i)\] with square matrices $\bo A$ and $\bo B.$ However, several numerical tests revealed that a better approach would be to consider oversampling. Consider $M>N$, for instance $M=2N$ and instead of the generalized eigenvalue problem~\eqref{mateigprob}, compute the factorization $\mathbf{B}=\mathbf{QR}$. Afterwards, solve the generalized eigenvalue problem \begin{equation} \label{mateigprob2} \left(\bo Q'\cdot \bo A\right)\bo\cdot \bo X=\lambda \bo R\cdot\bo X. \end{equation} As in previous studies of the application of the MFS for eigenvalue problems (eg.~\cite{Alves-Antunes_2013,Bogosel2}) we define the source points for the MFS by \[\bo y_j=\bo x_j+\alpha\bo n_j,\] for a small positive parameter $\alpha$. Details regarding the choice of this parameter are given in the presentation of the numerical results. We will consider the numerical optimization of Steklov-Lam\'{e} eigenvalues in two classes of domains: \begin{itemize} \item simply connected planar domains with fixed area; \item planar convex domains with fixed area for which Theorem \ref{thm:existence-convex} ensures the existence of solutions. \end{itemize} We parametrize the boundary of a general simply connected planar domain by \begin{equation} \label{defdom1} \left\{(h_1(t),h_2(t)):t\in[0,2\pi[\right\}, \end{equation} for some $2\pi$-periodic functions $h_1$ and $h_2$ such that \eqref{defdom1} defines a Jordan curve. We consider the approximations \[h_1(t)\approx \gamma_1(t)=a_0^{(1)}+\sum_{j=1}^Pa_j^{(1)}\cos(jt)+\sum_{j=1}^Pb_j^{(1)}\sin(jt)\] and \[h_2(t)\approx \gamma_2(t)=a_0^{(2)}+\sum_{j=1}^Pa_j^{(2)}\cos(jt)+\sum_{j=1}^Pb_j^{(2)}\sin(jt),\] for some $P\in\mathbb{N}.$ The boundary of a planar convex domain is defined by \eqref{defdom1} where \[ h_1(t) = p(t) \cos(t) - p'(t) \sin(t),\] \[h_2(t)=p(t) \sin(t) + p'(t) \cos(t)\] and the support function $p$ is approximated by a truncated Fourier series \begin{equation} p(t) = a_0 + \sum_{k=1}^\mathcal{P} \left( a_k \cos (kt) + b_k \sin (kt) \right). \label{fouDecomp} \end{equation} Note that in this case the convexity constrain correspond to the constraint \begin{equation} \label{conv_support} p''(t)+p(t)\geq0,\ \forall t\in[0,2\pi[. \end{equation} Numerically, we will impose \eqref{conv_support} at a discrete set of points in $[0,2\pi[$ which leads to a system of (linear) inequalities (see~\cite{Antunes-Bogosel} for details). The numerical optimizers are found by determining optimal coefficients in the previous expansions using a gradient type method, as in previous computational studies of extremal eigenvalues~\cite{ak-kao-osting,Antunes-Bogosel,Bogosel2,kao-osting-oudet}. The derivative of each eigenvalue with respect to the variation of each coefficient is calculated by the formula for the shape derivative obtained in~\cite{CDM}. Given a deformation field $\bo V$, the derivative of a (simple) Steklov-Lam\'{e} eigenvalue is given by \begin{multline*}\Lambda'(\Omega,\bo V)=\int_{\partial\Omega}\Big(Ae(\bo u):e(\bo u)-4Ae(\bo u)\bo n\cdot\Pi e(\bo u)\bo n\\ -\Lambda(\Omega)\bo u\cdot(\mathcal H\bo u+2\partial_n{\bo u}-4\Pi e(\bo u)\bo n)\Big)\bo V\cdot\bo nds, \end{multline*} where $\mathcal H$ is the curvature and $\Pi=\begin{pmatrix}1 &0\\0&0\end{pmatrix}$. \section{Numerical results} \label{sec:num-results} In this section we present numerical results obtained using the numerical framework presented previously. All numerical computations and figures are done in Matlab. We start with some tests performed for the disk with unit area. Figure~\ref{fig:convergence_disk} shows the convergence curve of the MFS in the calculation of four eigenvalues ($\Lambda_i,\ i=1,4,20,100$) for $\lambda=1,\ \mu=0.5$ (left plot) and $\lambda=1,\ \mu=3$ (right plot). These results were obtained with $\alpha=0.015$. We note that the matrices involved in the generalized eigenvalue problem are highly ill conditioned if we take large values of $\alpha$ and new techniques for reducing the ill conditioning may be needed (eg.~\cite{Antunes_illcond}). It can be observed that the precision of the computations increases with the number of fundamental solutions $N$. For eigenvalues of small index the precision gets close to machine precision. \begin{figure}[ht] \centering \includegraphics[width=0.49\textwidth]{conv_mu=0p5} \includegraphics[width=0.49\textwidth]{conv_mu=3} \caption{Plots of the absolute errors of the eigenvalues $\Lambda_i,\ i=1,4,20,100$ of the disk with unit area, as a function of the number of MFS basis functions for $\lambda=1,\ \mu=0.5$ (left plot) and $\lambda=1,\ \mu=3$ (right plot)} \label{fig:convergence_disk} \end{figure} Figure~\ref{fig:disk_mu} shows the plot of the first 50 Steklov-Lam\'{e} eigenvalues of the disk with unit area, as a function of $\mu\in[0,10]$, keeping $\lambda=1$ (left plot). The right plot of the same Figure shows a zoom for $\mu\in[0,1].$ \begin{figure}[ht] \centering \includegraphics[width=0.49\textwidth]{disk_mu10} \includegraphics[width=0.49\textwidth]{disk_mu1} \caption{Plot of $\lambda_i,\ i=1,2,...,50$, for the disk with unit area with $\lambda=1$ and $\mu\in[0,10]$ (left plot) and a zoom for $\mu\in[0,1]$ (right plot).} \label{fig:disk_mu} \end{figure} Next, we show some numerical results concerning the computation of the Steklov-Lam\'e eigenvalues and eigenfunctions using the MFS for the domain $\Omega_1$ whose boundary is defined by \[\partial\Omega_1=\left\{\left(\cos(t),\sin(t)+\frac{3}{10}\sin(3t)\right):\ t\in[0,2\pi[\right\}.\] Figure~\ref{fig:pts_mfs} shows the plot of the collocation points (marked with $\txtr{\bullet}$) and source points (marked with $\txtb{\circ}$) for $\Omega_1$. \begin{figure}[ht] \centering \includegraphics[width=0.49\textwidth]{pts_mfs} \caption{Plot of the collocation points (marked with $\txtr{\bullet}$) and source points (marked with $\txtb{\circ}$) for $\Omega_1$.} \label{fig:pts_mfs} \end{figure} Figure~\ref{fig:eig_funct} shows the plots of the first and second components for the eigenfunctions associated to the eigenvalues $\Lambda_i,\ i=1,2,7,20,100$ of $\Omega_1$ with $(\lambda,\mu)=(1,0.5)$ (left-hand side plots) and $(\lambda,\mu)=(1,3)$ (right-hand side plots). \begin{figure*} \centering \begin{tabular}{ccm{1cm}cc} \includegraphics[width=0.17\textwidth]{eigf1_c1}& \includegraphics[width=0.17\textwidth]{eigf1_c2}& $\Lambda_1$ & \includegraphics[width=0.17\textwidth]{eigf1_c1b}& \includegraphics[width=0.17\textwidth]{eigf1_c2b}\\ \includegraphics[width=0.17\textwidth]{eigf2_c1}& \includegraphics[width=0.17\textwidth]{eigf2_c2}&$\Lambda_2$ & \includegraphics[width=0.17\textwidth]{eigf2_c1b}& \includegraphics[width=0.17\textwidth]{eigf2_c2b}\\ \includegraphics[width=0.17\textwidth]{eigf7_c1}& \includegraphics[width=0.17\textwidth]{eigf7_c2}&$\Lambda_7$& \includegraphics[width=0.17\textwidth]{eigf7_c1b}& \includegraphics[width=0.17\textwidth]{eigf7_c2b}\\ \includegraphics[width=0.17\textwidth]{eigf20_c1}& \includegraphics[width=0.17\textwidth]{eigf20_c2}&$\Lambda_{20}$ & \includegraphics[width=0.17\textwidth]{eigf20_c1b}& \includegraphics[width=0.17\textwidth]{eigf20_c2b}\\ \includegraphics[width=0.17\textwidth]{eigf100_c1}& \includegraphics[width=0.17\textwidth]{eigf100_c2}&$\Lambda_{100}$ & \includegraphics[width=0.17\textwidth]{eigf100_c1b}& \includegraphics[width=0.17\textwidth]{eigf100_c2b}\\ plot of $u_1$ & plot of $u_2$& & plot of $u_1$& plot of $u_2$\\ $\lambda=1,\ \mu=0.5$& $\lambda=1,\ \mu=0.5$& & $\lambda=1,\ \mu=3$& $\lambda=1,\ \mu=3$ \end{tabular} \caption{Plots of the first and second components for the eigenfunctions associated to the eigenvalues $\Lambda_i,\ i=1,2,7,20,100$ of $\Omega_1$ with $(\lambda,\mu)=(1,0.5)$ (left-hand side plots) and $(\lambda,\mu)=(1,3)$ (right-hand side plots).} \label{fig:eig_funct} \end{figure*} Next, we illustrate the Moler-Payne type result applied to some eigenvalues and corresponding eigenfunctions of $\Omega_1$. In Figure~\ref{fig:moler_payne} we plot $\|f_\varepsilon\|_{\bo L^2(\partial \Omega)}=\|Ae(\bo u_\varepsilon)\bo n -\Lambda_\varepsilon \bo u_\varepsilon\|_{\bo L^2(\partial \Omega)}$, for $\Lambda_i,\ i=1,20,100$. In each case we took a $L^2$ normalized approximated eigenfunction and by~\eqref{bound_error} we can get an upper bound for the error of the approximation of the eigenvalue simply by measuring $\|\bo f_\varepsilon\|_{\bo L^2(\partial \Omega)}$. An eigenvalue and eigenfunction computation takes just a few seconds even for the $100$-th eigenvalue. The plot of the errors suggests that the numerical computations are highly accurate, underlining the interest of using MFS when dealing with smooth domains. \begin{figure}[ht] \centering \includegraphics[width=0.7\textwidth]{moler_payne} \caption{Plots of $|f_\varepsilon|=|Ae(\bo u_\varepsilon)\bo n -\Lambda_\varepsilon \bo u_\varepsilon|$, for $\Lambda_i,\ i=1,20,100$. In each case we took a $\bo L^2(\partial \Omega)$ normalized approximate eigenfunction.} \label{fig:moler_payne} \end{figure} Next, we show some numerical results for the solution of shape optimization problems \eqref{shoptprob} and \eqref{shoptprobconv}. Figure~\ref{fig:opt18} shows the plots of the optimal eigenvalues $\Lambda_n^\ast$ using only a volume constraint (marked with $\txtb{\bullet}$) and $\Lambda_n^\#$ using volume and convexity constraint (marked with $\txtr{\circ}$), for $n=1,2,...,8$, together with the representation of the optimal domain. In each case we plot also the eigenvalue obtained for the disk with unit area. Figure~\ref{fig:opt910} shows similar results for $\Lambda_i,\ i=9,10$. The optimization process for $\Lambda_1$ takes around $5$ minutes on a portable laptop. For higher eigenvalues, due to higher multiplicity, computations are more time consuming. We summarize some observations below: \begin{itemize}[topsep=0pt] \item The disk maximizes $\Lambda_1(\Omega)$ at fixed volume. This result was proved theoretically when $\lambda>\mu$ in Theorem \ref{thm:optimality-disk}. \item The maximizers for $\Lambda_2(\Omega)$ are convex. For $\mu = \lambda$ the disk seems to be optimal. When $\mu<\lambda$ the minimizer is close to the disk, however, the optimal values are slightly larger than those for the disk and the multiplicity clusters do not coincide with those known the disk. \item The maximizes for $\Lambda_3(\Omega)$ are convex. \item The maximizers for $\Lambda_k(\Omega)$ are convex when $\lambda \leq \mu$ and $k \in \{4,5,6\}$. \end{itemize} It is a common observation when studying optimizers of spectral functionals, that the optimal eigenvalue tends to be multiple. For the scalar Steklov problem this fact was observed in \cite{osting-steklov} and \cite{bogosel-bucur-giacomini}. In our case, the numerical results also suggest that often the maximal eigenvalue is multiple, however, the behavior is more complex, as it depends on the Lam\'e parameters $\lambda$ and $\mu$. Consider the following examples: \begin{itemize}[topsep=0pt] \item maximization of $\Lambda_1$: the numerical maximizer is always the disk. Therefore the optimal eigenvalue is double when $\lambda\neq \mu$ and quadruple when $\lambda=\mu$. \item maximization lf $\Lambda_2$: the optimal eigenvalue is double except when $\lambda=\mu$ when the eigenvalue is quadruple. \item maximization lf $\Lambda_3$: the optimal eigenvalue is double except when $\mu<2\lambda$ and triple when $\mu \geq 2\lambda$. \end{itemize} \begin{figure}[ht] \centering \includegraphics[width=0.49\textwidth]{lambda1} \includegraphics[width=0.49\textwidth]{lambda2} \includegraphics[width=0.49\textwidth]{lambda3} \includegraphics[width=0.49\textwidth]{lambda4} \includegraphics[width=0.49\textwidth]{lambda5} \includegraphics[width=0.49\textwidth]{lambda6} \includegraphics[width=0.49\textwidth]{lambda7} \includegraphics[width=0.49\textwidth]{lambda8} \caption{Plots of the optimal eigenvalues $\Lambda_n^\ast$ (marked with $\txtb{\bullet}$) and $\Lambda_n^\star$ (marked with $\txtr{\circ}$), for $n=1,2,...,8$, together with the representation of the optimal domain. In each case we plot also the eigenvalue obtained for the disk with unit area.} \label{fig:opt18} \end{figure} \begin{figure}[ht] \centering \includegraphics[width=0.49\textwidth]{lambda9} \includegraphics[width=0.49\textwidth]{lambda10} \caption{Plots of the optimal eigenvalues $\Lambda_n^\ast$ (marked with $\txtb{\bullet}$) and $\Lambda_n^\star$ (marked with $\txtr{\circ}$), for $n=9,10$, together with the representation of the optimal domain. In each case we plot also the eigenvalue obtained for the disk with unit area.} \label{fig:opt910} \end{figure} \section{Conclusions} In this paper we studied the behavior of the Steklov-Lam\'e eigenvalues on variable domains. The eigenstructure of the disk was determined in Theorem \ref{thm:eigdisk}. This allowed us to partially extend the results of Weinstock \cite{weinstock} and Brock \cite{brock} to the Steklov-Lam\'e eigenvalues in Theorem \ref{thm:optimality-disk}: the disk maximizes the first non-zero eigenvalue when $\lambda>\mu$ under area and perimeter constraints. Numerical observations suggest that this also holds when $\lambda \leq \mu$. Upper bounds related to the scalar Steklov eigenvalues generalize to the Steklov-Lam\'e case, as shown in Proposition \ref{prop:upper-bounds}. Theorem \ref{thm:weight-continuity} shows that the eigenvalues are upper-semicontinuous among $\varepsilon$-cone domains converging in the complementary Hausdorff distance. As a direct consequence, there exist maximizers of the Steklov-Lam\'e eigenvalues among convex sets with unit volume. A numerical method based on fundamental solutions was proposed to approximate these eigenvalues numerically. This allowed us to study numerically domains maximizing the Steklov-Lam\'e eigenvalues. This work shows that many of the results related to the scalar Steklov eigenproblem \eqref{eq:steklov-eigs} extend to the Steklov-Lam\'e eigenvalues. \bmhead{Acknowledgments} The second author has been supported by the ANR SHAPO (ANR-18-CE40-0013) grant. \section*{Declarations} The authors have no competing interests to declare that are relevant to the content of this article. The data that support the findings of this paper are available from the corresponding author upon request. \bibliography{./StekLam.bib} \bibliographystyle{./sn-mathphys} \end{document}
2205.11350v4
http://arxiv.org/abs/2205.11350v4
Inverse problems for mean field games
\documentclass[11pt, reqno, oneside, notitlepage]{amsart} \usepackage[a4paper, total={6in, 9.1in}]{geometry} \usepackage{amsmath,amscd} \usepackage{amssymb} \usepackage{amsthm} \usepackage{comment} \usepackage{graphicx} \usepackage{epstopdf} \usepackage{mathrsfs} \usepackage{cite} \usepackage{bm} \usepackage{hyperref} \usepackage{xcolor} \hypersetup{ colorlinks, linkcolor={blue}, urlcolor={blue}, citecolor={red} } \newcommand{\blue}{\textcolor{blue}} \newcommand{\red}{\textcolor{red}} \theoremstyle{plain} \newtheorem{thm}{Theorem}[section] \newtheorem{prop}{Proposition}[section] \newtheorem{hypo}[prop]{Hypothesis} \newtheorem{lem}[prop]{Lemma} \newtheorem{cor}[prop]{Corollary} \newtheorem{assum}[prop]{Assumption} \newtheorem{defi}[prop]{Definition} \newtheorem{rmk}[prop]{Remark} \newtheorem{claim}[prop]{Claim} \newtheorem{example}[prop]{Example} \newtheorem{con}[prop]{Convention} \newtheorem*{proposition*}{Proposition} \numberwithin{equation}{section} \newcommand {\R} {\mathbb{R}} \newcommand {\Z} {\mathbb{Z}} \newcommand {\T} {\mathbb{T}} \newcommand {\N} {\mathbb{N}} \newcommand {\p} {\partial} \newcommand {\dt} {\partial_t} \newcommand {\dz} {\partial_z} \newcommand {\dl} {\partial_l} \newcommand {\dr} {\partial_r} \newcommand {\dv} {\partial_{\varphi}} \newcommand {\va} {\varphi} \newcommand {\ve} {v^{\epsilon}} \newcommand {\D} {\Delta} \newcommand{\eps}{\epsilon} \newcommand{\e}{\eta_{\delta,r}} \newcommand {\Ds} {(-\Delta)^{s}} \newcommand {\sgn} {\text{sgn}} \newcommand {\supp} {\text{supp}} \newcommand {\LL}{\tilde{L}^2} \newcommand{\al}{\alpha} \newcommand{\ov}{\overline{\varphi}} \newcommand{\ol}{\overline} \newcommand{\oov}{\overline{\overline{\varphi}}} \newcommand{\ou}{\overline{u}} \newcommand {\pom} {\int\limits_{\partial \Omega}} \newcommand{\tre}{\textcolor{red}} \newcommand{\tbl}{\textcolor{blue}} \newcommand{\tma}{\textcolor{magenta}} \newcommand{\q}{(n+2)} \newcommand{\ql}{n+2} \newcommand{\qc}{2n+3} \newcommand{\mat}{a_{m_1,k_1,l_1}^{m_2,k_2,l_2}} \newcommand{\matb}{b_{m_1,k_1,l_1}^{m_2,k_2,l_2}} \newcommand{\wt}{\widetilde} \newcommand{\vphi}{\varphi} \newcommand{\V}{\mathcal{V}} \newcommand{\veps}{\varepsilon} \def\div{\text{div}} \newcommand{\LN}{\left\|} \newcommand{\RN}{\right\|} \newcommand{\LV}{\left|} \newcommand{\RV}{\right|} \newcommand{\LC}{\left(} \newcommand{\RC}{\right)} \newcommand{\LB}{\left[} \newcommand{\RB}{\right]} \newcommand{\LCB}{\left\{} \newcommand{\RCB}{\right\}} \newcommand{\LA}{\left<} \newcommand{\RA}{\right>} \newcommand{\re}{\mathrm{Re}} \newcommand{\im}{\mathrm{Im}} \newcommand{\abs}[1]{\lvert #1 \rvert} \newcommand{\norm}[1]{\lVert #1 \rVert} \newcommand{\br}[1]{\langle #1 \rangle} \DeclareMathOperator{\Jed}{\mathcal{J}_{\epsilon,h,s,\delta}} \DeclareMathOperator{\Jeda}{\mathcal{J}_{\epsilon,h,1/2,\delta}} \DeclareMathOperator{\tr}{tr} \DeclareMathOperator{\Cof}{Cof} \DeclareMathOperator{\argmin}{argmin} \DeclareMathOperator{\vol}{vol} \DeclareMathOperator{\di}{div} \DeclareMathOperator{\grad}{grad} \DeclareMathOperator {\dist} {dist} \DeclareMathOperator {\Ree} {Re} \DeclareMathOperator {\Imm} {Im} \DeclareMathOperator {\arcosh} {arcosh} \DeclareMathOperator {\sign} {sgn} \DeclareMathOperator{\spa} {span} \DeclareMathOperator{\inte} {int} \DeclareMathOperator{\Id} {Id} \DeclareMathOperator{\F} {\mathcal{F}} \DeclareMathOperator{\Je} {\mathcal{J}_{\epsilon,h,s}} \DeclareMathOperator{\Jea} {\mathcal{J}_{\epsilon,h,1/2}} \DeclareMathOperator{\Gn} {\mathcal{G}_{n}} \DeclareMathOperator{\Gnn} {\mathcal{G}_{n+1}} \DeclareMathOperator{\Hn} {\mathcal{H}_{n}} \DeclareMathOperator{\Hnn} {\mathcal{H}_{n+1}} \DeclareMathOperator{\Jac}{Jac} \DeclareMathOperator{\rank}{rank} \DeclareMathOperator{\codim}{codim} \DeclareMathOperator{\corank}{corank} \DeclareMathOperator{\diam}{diam} \pagestyle{headings} \title[Inverse Problems for Mean Field Games]{Inverse Problems for Mean Field Games} \author[H. Liu]{Hongyu Liu} \address{Department of Mathematics, City University of Hong Kong, Kowloon, Hong Kong SAR, China} \email{[email protected], [email protected]} \author[C. Mou]{Chenchen Mou} \address{Department of Mathematics, City University of Hong Kong, Kowloon, Hong Kong SAR, China} \email{[email protected]} \author[S. Zhang]{Shen Zhang} \address{Department of Mathematics, City University of Hong Kong, Kowloon, Hong Kong SAR, China} \email{[email protected]} \begin{document} \maketitle \begin{abstract} The theory of mean field games studies the limiting behaviors of large systems where the agents interact with each other in a certain symmetric way. The running and terminal costs are critical for the agents to decide the strategies. However, in practice they are often partially known or totally unknown for the agents, while the total cost is known at the end of the game. To address this challenging issue, we propose and study several inverse problems for mean field games. When the Lagrangian is a kinetic energy, we first establish unique identifiability results, showing that one can recover either the running cost or the terminal cost from knowledge of the total cost. If the running cost is limited to the time-independent class, we can further prove that one can simultaneously recover both the running and the terminal costs. Finally, we extend the results to the setup with general Lagrangians. \end{abstract} \tableofcontents \section{Introduction} The theory of mean field games (MFGs) was introduced and studied by Caines-Huang-Malham\'e \cite{HCM06,HCM071,HCM072,HCM073} and Lasry-Lions \cite{LL06a, LL06b, LL07a, Lions} independently in 2006. The MFG theory has rapidly developed into one of the most significant tools towards the study of the Nash equilibrium behavior of large systems. Such problems consider limit behavior of large systems where the homogeneous strategic players interact with each other in a certain symmetric way. More precisely, each player acts according to his/her optimization problem taking into account other players' decisions. Since their population is large, we can assume the number of players goes to infinity and hence a representative player exists. They have a wide variety of applications, including economics \cite{AchdouHanLasryLionsMoll}, engineering \cite{HCM06}, finance \cite{LackerZari}, social science \cite{Gelfand} and many others. We refer to Lions \cite{Lions}, Cardaliaguet \cite{Cardaliaguet} and Bensoussan-Frehse-Yam \cite{BFY} for introductions of the subject in its early stage and Carmona-Delarue \cite{CarDel-I, CarDel-II} and Cardaliaguet-Porretta \cite{CarPor} for comprehensive accounts on the state-of-the-art developments in the literature. \smallskip We first briefly introduce the mathematical setup of our study and shall supplement more details in Section 2. In its typical formulation, an MFG can be described as follows. Let $n\in\mathbb{N}$ and the quotient space $\mathbb{T}^n:=\mathbb{R}^n\backslash\ \mathbb{Z}^n$ be the $n$-dimensional torus, which signifies a state space. Given $x\in\mathbb T^n$ and the flow of probability measures $\{\rho_t\}_{t\in [0,T]}$ on $\mathbb T^n$ with $\rho_0=m_0$, one aims at minimizing the cost functional over all the admissible closed-loop controls: \begin{equation}\label{eq:mfgproblem} J(x;\{\rho_t\}_{t\in[0,T]},\alpha)=\inf_{\alpha}\mathbb E\left\{\int_0^T L(X_t^{x,\alpha},\alpha(t, X_t^{x,\alpha}))+F(X_t^{x,\alpha},\rho_t)dt+G(X_T,\rho_T)\right\}, \end{equation} such that \begin{equation}\label{eq:mfgconstrain} X_t^{x,\alpha}=x+\int_0^t\alpha(s,X_s^{x,\alpha})ds+\sqrt{2} B_s+\mathbb Z^n\quad\text{on $[0,T]$,} \end{equation} where $L:\mathbb T^n\times\mathbb R^n\to\mathbb R$ is a Lagrangian, $F:\mathbb T^n\times\mathcal{P}(\mathbb T^n)\to\mathbb R$ is a running cost and $G:\mathbb T^n\times\mathcal{P}(\mathbb T^n)\to\mathbb R$ is a terminal cost. We call $(\alpha^*,\{\rho_{t}^*\}_{t\in[0,T]})$ a mean field equilibrium if \[ \rho_0^*=m_0\quad\text{and}\quad\alpha^*:=\arg\min_{\alpha}J(x;\{\rho_t^*\}_{t\in[0,T]},\alpha), \] and the law of $X_t^{\xi,\alpha^*}$ on $\mathbb T^n$ is $\rho_t^*$ where \begin{equation}\label{eq:mfgpopulation} X_t^{\xi,\alpha^*}=\xi_0+\int_0^{t}\alpha^*(s,X_s^{x,\alpha^*})ds+\sqrt{2}B_t+\mathbb Z^n\quad \text{on [0,T]}, \end{equation} and its initial status $\xi_0$ is a random variable with the law $m_0$ on $\mathbb T^n$. The mean field equilibrium can be characterized by the following MFG system: \begin{equation}\label{eq:mfg} \left\{ \begin{array}{ll} -\partial_t u(x,t) -\Delta u(t,x)+ H\big(x,\nabla u(x,t)\big)-F(x,t,m(x,t))=0,& {\rm{in}}\ \mathbb T^n\times (0,T),\medskip\\ \partial_tm(x,t)-\Delta m(x,t)-{\rm div} \big(m(x,t) \nabla_pH(x, \nabla u(x,t)\big)=0, & {\rm{in}}\ \mathbb T^n\times(0,T),\medskip\\ u(x,T)=G(x,m(x,T)),\ m(x,0)=m_0(x), & {\rm{in}}\ \mathbb T^n. \end{array} \right. \end{equation} In \eqref{eq:mfg}, $\Delta$ and $\rm{div}$ are the Laplacian and divergent operators with respect to the $x$-variable, respectively. The Hamiltonian $H$ is the Legendre-Fenchel transform of the Lagrangian $L$ in \eqref{eq:mfgproblem}. Here, $H(x, \nabla u)=H(x, p)$ with $(x, p):=(x, \nabla u)\in \mathbb{T}^n\times\mathbb{R}^{n}$ being the canonical coordinates. In the physical setup, $u$ is the value function of each player; $m$ signifies the population distribution; $F$ is the running cost function which signifies the interaction between the agents and the population; $m_0$ represents the initial population distribution and $G$ signifies the terminal cost. All the functions involved are real valued and periodically extended from $\mathbb{T}^n$ to $\mathbb{R}^n$, which means that we are mainly concerned with periodic boundary conditions for the MFG system \eqref{eq:mfg}. In particular, we note that $m(\cdot, t)$ is required to be a probability measure for any $t\in [0, T]$. That is, it is required that for any given $t\in [0, T]$: \begin{equation}\label{eq:pm1} m(x, t)=m_t(x)\in\mathcal{O}_a:=\{\mathfrak{m}(x):\mathbb{T}^n\to[0,\infty)\,\Big|\,\int_{\mathbb{T}^n}\,\mathfrak{m}\,dx=a\leq1 \}. \end{equation} Here, we point out that by applying the divergence theorem to the second equation in \eqref{eq:mfg}, one can directly verify that if $\int_{\mathbb{T}^n} m_0(x)\, dx=a$, then $\int_{\mathbb{T}^n} m(x, t)\, dx=a$ for any subsequent $t\in (0, T]$. However, the non-negativity of $m_0$ and $m$ should be imposed in order to guarantee that they are probability measures. In principle, one would also need to require that $a=1$ which signifies that the game agents are confined within a given domain. Throughout the current study, we consider a specific scenario that the MFG domain consists of a family of disjoint subdomains, say $\Sigma_j$, $j\in\mathbb{N}$, such that the overall population on $\cup_j \Sigma_j$ is 1, namely $\int_{\cup_j \Sigma_j} m=1$. Though those subdomains are disjoint, the agents within each subdomain can interact with those in other subdomains, say e.g. via internet. Hence if $\mathbb{T}^n$ is taken to be any one of those subdomains, i.e. $\Sigma_j$, it is not necessary to require that $\int_{\mathbb{T}^n} m=1$. That is, $a$ in \eqref{eq:pm1} can be any number in $[0, 1]$, as long as $m$ is required to be nonnegative. This technical relaxation is crucial in our subsequent study but practically unobjectionable. In this setup, the mean field strategy can be formally represented by $\alpha^*=-\nabla_pH(x,\nabla u(x,t))$. In Section~2 in what follows, we shall supplement more background introduction on the MFG system. \smallskip The well-posedness of the MFG system \eqref{eq:mfg} is well-understood in various settings. The first results date back to the original works of Lasry and Lions and have been presented in Lions \cite{Lions} and see also Caines-Huang-Malhame \cite{HCM06}. Many progresses have been made afterwards. Regarding $F$ and $G$, one can consider both non-local and local dependences on the measure $m$. The well-posedness of the MFG system \eqref{eq:mfg} is known in Cardaliaguet \cite{Cardaliaguet}, Cardaliaguet-Porretta \cite{CarPor}, Carmona-Delarue \cite{CarDel-I}, Meszaros-Mou \cite{MM} in the case of nonlocal data $F$ and $G$; and Ambrose\cite{Amb:18, Amb:21}, Cardaliaguet\cite{Car}, Cardaliaguet-Graber\cite{CarGra}, Cardaliaguet-Graber-Porretta-Tonon\cite{CarGraPorTon}, Cardaliaguet-Porretta \cite{CarPor}, Cirant-Gianni-Mannucci\cite{CirGiaMan}, Cirant-Goffi\cite{CirGof}, Ferreira-Gomez\cite{FerGom}, Ferreira-Gomez-Tada\cite{FerGomTad}, Gomez-Pimentel-Sanchez Morgado\cite{GomPimSan:15,GomPimSan:16}, Porretta \cite{Por} in the case that $F,G$ are locally dependent on the measure variable $m$. \smallskip We term the above well-posed MFG system \eqref{eq:mfg} to be the forward problem. In this paper, we are mainly concerned with the inverse problem of determining the running cost $F$ or the terminal cost $G$ by knowledge of the total cost associated with the above MFG system. To that end, we introduce a measurement map $\mathcal{M}_{F,G}$ as follows: \begin{equation}\label{eq:M} \mathcal{M}_{F, G}(m_0(x))=u(x,t)\big|_{t=0},\quad x\in\mathbb{T}^n, \end{equation} where $\color{blue} m_0(x)\in\mathcal{O}_a$ and $u(x, t)$ are given in the MFG system \eqref{eq:mfg}. That is, for a given pair of $F$ and $G$, $\mathcal{M}_{F, G}$ sends a prescribed initial population distribution $m_0$ to $u(x, 0)$, which signifies the total cost of the MFG \eqref{eq:mfg}. In Section 3, we shall show that $\mathcal{M}_{F,G}$ is well-defined in proper function spaces. The inverse problem mentioned above can be formulated as: \begin{equation}\label{eq:ip1} \mathcal{M}_{F,G}\longrightarrow F\ \mbox{or/and}\ G. \end{equation} In the mean field game theory, the running cost $F$ and the terminal cost $G$ are critical for the agents to decide the strategies. However, in practice they are often partially known or totally unknown for the agents, while the total cost $u(\cdot, 0)$ can be measured at the end of the game. This is a major motivation for us to propose and study the inverse problem \eqref{eq:ip1}. We believe our study could have many applications in the areas mentioned above. An example in our mind is the produce pricing. Suppose that in the market there are many companies are producing the same product for selling to make profits. As a customer, we do not have the information on the precise production cost, however we do know the selling price of the product at the end. Therefore, the recovery of the production cost is a typical inverse problem in the mean field game. In this paper, we are mainly concerned with the unique identifiability issue, which is of primary importance for a generic inverse problem. In its general formulation, the unique identifiability asks whether one can establish the following one-to-one correspondence: \begin{equation}\label{eq:ip2} \mathcal{M}_{F_1, G_1}=\mathcal{M}_{F_2, G_2}\quad\mbox{if and only if}\quad (F_1, G_1)=(F_2, G_2), \end{equation} where $(F_j, G_j)$, $j=1,2$, are two configurations. \smallskip Unlike the forward problem of MFGs, the theory of the inverse problem has not yet been well-established. To the best of our knowledge, only some numerical studies have been conducted to the inverse problem of MFGs. It starts from the recent work Ding-Li-Osher-Yin \cite{DingLiOsherYin}. The authors reconstructed the running cost from the observation of the distribution of the population and the agents' strategy. The running cost consists of a kinetic energy (with an unknown underlining metric) and a convolution-type running cost. The main goal there is to numerically recover the underlining metric and the convolution kernel. Another numerical work Chow-Fung-Liu-Nurbekyan-Osher \cite{ChowFungLiuNurbekyanOsher} considered a different inverse problem of MFGs. The work focused on the recovery of the running cost from a finite number of the boundary measurements of the population profile and boundary movement. Both studies mentioned above consider the MFG model that the running cost is non-locally dependent on the measure variable $m$. We would also like to mention a related study in \cite{initial vector}, where the authors proved that if additional knowledge about the initial vector $Du(x,0)$ is given, then the solutions to the MFG system are unique. \smallskip In our study of the inverse problem \eqref{eq:ip1}, we are mainly concerned with the data locally depending on the measure variable, i.e. $F(x, t, m(\cdot, t)):=F(x, t, m(x, t))$ and $G(x,m(\cdot, T)):=G(x,m(x, T))$. The model is motivated from the traffic flow and the crowd motion problems. For the problems, the cost depends only on the distribution of the population locally. We assume all the agents are rational and the observer only knows the total cost of agents at the end. The main goal is to recover the running or/and terminal costs. Let us briefly introduce the main results we prove in the paper. When the Lagrangian is a kinetic energy, we first show thatthe terminal cost $G$ is uniquely identifiable by the measurement map $\mathcal{M}_{F,G}$ by assuming the running cost $F$ is a-priori known. We emphasize that, for this inverse problem, we assume that the running and the terminal costs satisfy $F(x, t, 0)=G(x,0)=0$ and we justify that the assumption is necessary for both unique identifiability problems. Moreover, the running cost is allowed to be time-dependent. If the running cost is limited to the time-independent class, we further show that we can recover both the running and the terminal costs with the given measurement map $\mathcal{M}_{F,G}$. Finally, we extend a large extent of the above unique identifiability results to general Lagrangians. To establish those theoretical unique identifiability results, we develop novel mathematical strategies that make full use of the intrinsic structure of the MFG system. Our study opens up a new field of research on inverse problems for mean field games with many potential developments. \medskip The rest of the paper is organized as follows. We introduce the admissibility assumptions on $F$ and $G$ and state the main results of this paper in Section 2. In Section 3, we establish certain well-posedness results of the forward MFG system, which shall be needed for the inverse problems. We discuss the admissibility assumptions in Section 4. By counter examples, we show that those assumptions are unobjectionable for the inverse problems. Finally, we show various unique identifiability results in Section 5 and some generalizations in Section 6. \section{Preliminaries and Statement of Main Results} \subsection{Notations and Basic Setting} As introduced earlier, we let $n\in\mathbb{N}$ and $\mathbb{T}^n:=\mathbb{R}^n\backslash\ \mathbb{Z}^n$ be the $n$-dimensional torus. Set $x=(x_1,x_2,\ldots, x_n)\in\mathbb{R}^n$. If $f(x): x\in\mathbb{T}^n\to \mathbb{R}$ is smooth and $l=(l_1,l_2,...,l_n)\in\mathbb{N}_0^n$ is a multi-index with $\mathbb{N}_0:=\mathbb{N}\cup\{0\}$, then $D^lf$ stands for the derivative $\frac{\p ^{l_1}}{\p x_1^{l_1}}...\frac{\p ^{l_n}}{\p x_n^{l_n}}f$. Given $\nu\in\mathbb{S}^{n-1}:=\{x\in\mathbb{R}^n; |x|=1\}$, we also denote by $\p_{\nu}f$ the directional derivative of $f$ in the direction $\nu$. For $k\in\mathbb{N}_0$ and $\alpha\in [0,1)$, we say $f\in C^{k+\alpha} (k\in\mathbb{N}_0)$ if $D^lf$ exists and $\alpha$- H\"older continuous for any $l\in\mathbb{N}_0^n$ with $|l|\leq k$. Define \begin{equation}\label{eq:pf1} C_+^{k+\alpha}(\mathbb{T}^n):=\{f(x)\in C^{k+\alpha}(\mathbb{T}^n) : f(x)\geq0 \}. \end{equation} It is remarked that the set $C_+^{k+\alpha}(\mathbb{T}^n)$ shall be needed in order to fulfil the probability measure constraint in our subsequent analysis; see also \eqref{eq:pm1}. For functions $f:\mathbb T^n\times (0,T)\to\mathbb R$, we say $f$ belongs to $C^{k+\alpha,\frac{k+\alpha}{2}}$ if $D^l D_t^jf$ exists for any $l\in\mathbb{N}_0^n$ and $j\in\mathbb{N}_0$ with $ |l|+2j\leq k$ and \[ \sup_{(x_1,t_1),(x_2,t_2)\in \mathbb T^n\times (0,T)}\frac{|D^lD_t^jf(x_1,t_1)-D^lD_t^jf(x_2,t_2)|}{|x_1-x_2|^\alpha+|t_1-t_2|^{\frac{\alpha}{2}}}<\infty, \] for any $l\in\mathbb N_0^n$ and $j\in\mathbb N_0$ with $|l|+2j=k$. Throughout the paper, for a function $f$ define on $\mathbb{T}^n$ or $\mathbb{T}^n\times(0,T)$, it means that it is a periodic-$1$ function with respect to the space variable $x_j$, $1\leq j\leq N$. That is, it is a periodic-$(1,1,\ldots,1)$ function with respect to $x\in \mathbb{R}^n$. \subsection{Mean Field Game} Let $\mathcal{P}(\mathbb T^n)$ and $\mathcal{P}(\mathbb R^n)$ denote the set of probability measures on $\mathbb T^n$ and $\mathbb R^n$ respectively. Let $(\Omega,\mathscr{F},\mathbb F,\mathbb P)$ be a filtered probability space; $B$ be an $\mathbb F$-adapted Brownian motion on $\mathbb R^n$; and we assume $\mathscr{F}_0$ is rich enough to support $\mathcal{P}(\mathbb T^n)$. For any $\mathscr{F}$-measurable random variable $\xi$, we denote the law of $\xi$ on $\mathbb R^n$ by $\mathcal{L}_\xi\in\mathcal{P}(\mathbb R^n)$ and the law of $\xi$ on $\mathbb T^n$ by $\mathcal{L}_{\xi+\mathbb Z^n}\in\mathcal{P}(\mathbb T^n)$. Moreover, for any sub-$\sigma$-algebra $\mathcal{G}\subset \mathscr{F}$ and any $m\in\mathcal P(\mathbb T^n)$, $\mathbb M(\mathcal{G};m)$ denotes the set of $\mathcal{G}$-measurable random variables $\xi$ on $\mathbb R^n$ such that $\mathcal{L}_{\xi+\mathbb Z^n}=m$. Our mean field game depends on the following data: \[ L:\mathbb T^n\times\mathbb R^n\to\mathbb R,\quad F:\mathbb T^n\times\mathcal{P}(\mathbb T^n)\to\mathbb R\quad\text{and}\quad G:\mathbb T^n\times\mathcal{P}(\mathbb T^n)\to\mathbb R. \] Let $T>0$. For any $t_0\in [0,T]$, we let $\mathscr{A}_{t_0}$ denote the set of admissible controls $\alpha:[t_0,T]\times\mathbb T^n\to\mathbb R^d$ which are Borel measurable, and uniformly Lipschitz continuous in $x$. We also denote $B_t^{t_0}:=B_t-B_{t_0}$, $B_t^{0,t_0}:=B_t^0-B_{t_0}^0$, $t\in[t_0,T]$. Given $x\in\mathbb T^n$, $\alpha\in\mathscr{A}_{t_0}$ and the flow of probability measures $\{\rho_t\}_{t\in[0,T]}\subset \mathcal{P}(\mathbb T^n)$ with $\rho_0=m_0$, the state of an agent satisfies the following controlled SDE (stochastic differential equation) on $[t_0,T]$: \begin{equation}\label{eq:ind} X_t^{t_0,x,\alpha}=x+\int_{t_0}^t\alpha(s,X_s^{t_0,x,\alpha})ds+\sqrt{2}B_t^{t_0}+\mathbb Z^n. \end{equation} Consider the conditionally expected cost for the mean field game: \begin{eqnarray}\label{eq:cost} &&J(t_0,x;\{\rho_{t}\}_{t\in [0,T]},\alpha):=\inf_{\alpha\in\mathscr{A}_{t_0}}\mathbb E\Big[\int_{t_0}^TL(X_t^{t_0,x,\alpha},\alpha(t,X_t^{t_0,x,\alpha}))+F(X_t^{t_0,x,\alpha},\rho_t)dt\nonumber\\ &&\qquad\qquad\qquad\qquad\qquad\qquad+G(X_T^{t_0,x,\alpha},\rho_T)\Big]. \end{eqnarray} \begin{defi} We say that $(\alpha^*,\{\rho_t^{*}\}_{t\in[0,T]})$ is a mean field equilibrium (MFE) if it satisfies the following properties:\\ (i) $\rho_0^*=m_0$;\\ (ii) for any $\xi_0\in \mathbb{M}(\mathcal{F}_0,m_0)$, we have $\mathcal{L}_{X_t^{0,\xi_0,\alpha^*}}=\rho_t^{*}$ where \[ X_t^{0,\xi_0,\alpha^*}=\xi_0+\int_{0}^t\alpha^*(s,X_s^{0,\xi_0,\alpha^*})ds +\sqrt{2}B_t+\mathbb Z^n; \] (iii) for any $(t_0,x)\in[0,T]\times\mathbb T^n$, we have \begin{equation*} J(t_0,x;\{\rho_t^*\}_{t\in [0,T]},\alpha^*)=\inf_{\alpha\in\mathscr{A}_{t_0}}J(t_0,x;\{\rho_t^*\}_{t\in [0,T]},\alpha),\quad \text{for $\rho^*_{t_0}$-a.e. $x\in\mathbb T^n$.} \end{equation*} \end{defi} When there is a unique MFE $(\alpha^*,\{\rho_t^{*}\}_{t\in[0,T]})$, then the mean field game leads to the following value function of the agent: \[ u(t_0,x):=J(t_0,x;\{\rho_t^*\}_{t\in [0,T]},\alpha^*). \] Let $m(\cdot,t_0)=\rho_{t_0}^*$. Then $(u,m)$ solves the following mean field game system (cf. \cite{CarPor,Lions}): \begin{equation}\label{main_equation} \begin{cases} \displaystyle{-\p_tu(x,t)-\Delta u(x,t)+\frac 1 2 |\nabla u(x, t)|^2= F(x,t,m(x,t)),}& \text{ in } \mathbb{T}^n\times(0,T),\smallskip\\ \p_t m(x,t)-\Delta m(x,t)-{\rm div}\big(m(x,t)\nabla u(x,t)\big)=0,&\text{ in }\mathbb{T}^n\times(0,T),\smallskip \\ u(x,T)=G(x,m(T,x)), \quad m(x,0)=m_0(x) & \text{ in } \mathbb{T}^n, \end{cases} \end{equation} where as mentioned earlier, periodic boundary conditions are imposed on $\partial \mathbb{T}^n$ for $u$ and $m$. \subsection{Inverse Problems} We recall the probability measure constraint $\mathcal{O}_a$ introduced in \eqref{eq:pm1}. Define the set \[ \begin{split} \mathcal{E}_{F,G}:=& \{m_0\in C^{2+\alpha}(\mathbb{T}^n)\cap\mathcal{O}_a : \text{the system }\eqref{main_equation}\\ & \text{ has a unique solution in the sense described in Section 3 in what follows } \}. \end{split} \] We introduce the following measurement map $\mathcal{M}_{F,G}$: \begin{align}\label{eq:G} \begin{split} \mathcal{M}_{F,G}: \mathcal{E}_{F,G} & \rightarrow L^2(\mathbb{T}^n) , \\ m_0&\mapsto \Big(x\in\mathbb{T}^n \mapsto u(x,t) \Big|_{t=0}\Big), \end{split} \end{align} where $u(x,t)$ is the solution of $\eqref{main_equation}$ with initial data $m(x,0)=m_0(x).$ In the first setup of our study, we consider the case that $F$ and $G$ belong to an analytic class. Henceforth, we set \begin{equation}\label{eq:Q} Q=\overline{\mathbb{T}^n\times(0,T) }, \end{equation} be the closure of $\mathbb{T}^n\times(0,T).$ \begin{defi}\label{Admissible class1} We say $U(x,t,z):\mathbb{T}^n\times \mathbb{R}\times\mathbb{C}\to\mathbb{C}$ is admissible, denoted by $U\in \mathcal{A}$, if it satisfies the following conditions: \begin{enumerate} \item[(i)]~The map $z\mapsto U(\cdot,\cdot,z)$ is holomorphic with value in $C^{2+\alpha,1+\frac{\alpha}{2}}(Q)$ for some $\alpha\in(0,1)$; \item[(ii)] $U(x,t,0)=0$ for all $(x,t)\in\mathbb{T}^n\times (0,T).$ \end{enumerate} Clearly, if (1) and (2) are fulfilled, then $U$ can be expanded into a power series as follows: \begin{equation}\label{eq:F} U(x,t,z)=\sum_{k=1}^{\infty} U^{(k)}(x,t)\frac{z^k}{k!}, \end{equation} where $ U^{(k)}(x,t)=\frac{\p^k U}{\p z^k}(x,t,0)\in C^{2+\alpha,1+\frac{\alpha}{2}}(Q).$ \end{defi} \begin{defi}\label{Admissible class2} We say $U(x,z):\mathbb{T}^n\times\mathbb{C}\to\mathbb{C}$ is admissible, denoted by $U\in\mathcal{B}$, if it satisfies the following conditions: \begin{enumerate} \item[(i)] The map $z\mapsto U(\cdot,z)$ is holomorphic with value in $C^{2+\alpha}(\mathbb{T}^n)$ for some $\alpha\in(0,1)$; \item[(ii)] $U(x,0)=0$ for all $x\in\mathbb{T}^n.$ \end{enumerate} Clearly, if (1) and (2) are fulfilled, then $U$ can be expanded into a power series as follows: \begin{equation}\label{eq:G} U(x,z)=\sum_{k=1}^{\infty} U^{(k)}(x)\frac{z^k}{k!}, \end{equation} where $ U^{(k)}(x)=\frac{\p^kU}{\p z^k}(x,0)\in C^{2+\alpha}(\mathbb{T}^n).$ \end{defi} \begin{rmk}\label{rem:1} The admissibility conditions in Definitions~\ref{Admissible class1} and \ref{Admissible class2} shall be imposed as a-priori conditions on the unknowns $F$ and $G$ in what follows for our inverse problem study. It is remarked that as noted earlier that both $F$ and $G$ are functions of real variables. However, for technical reasons, we extend the functions to the complex plane with respect to the $z$-variable, namely $U(\cdot,z)$ and $ U(\cdot,\cdot,z)$, and assume that they are holomorphic as functions of the complex variable $z$. This also means that we shall assume $F$ and $G$ are restrictions of those holomorphic functions to the real line. This technical assumption shall be used to show the well-posedness of the MFG system in section $\ref{section wp}.$ Throughout the paper, we also assume that in the series expansions \eqref{eq:F} and \eqref{eq:G}, the coefficient functions $U^{(k)}$ are real-valued. \end{rmk} \begin{rmk} We would like to emphasise that the zero conditions, namely the admissibility conditions (ii) in Definitions~\ref{Admissible class1} and \ref{Admissible class2}, are unobjectionable to our inverse problem study. In fact, in Section~4 in what follows, we shall construct several MFG examples where the zero admissibility conditions are violated and the associated inverse problems have no unique identifiability results. \end{rmk} We are in a position to state the first unique recovery result for the inverse problem \eqref{eq:ip1}, which shows that one can recover the terminal cost $G$ from the measurement map $\mathcal{M}$. Here and also in what follows, we sometimes drop the dependence on $F, G$ of $\mathcal{M}$, and in particular in the case that one quantity is a-priori known, say $\mathcal{M}_F$ or $\mathcal{M}_G$, which should be clear from the context. \begin{thm}\label{der g} Assume $F \in\mathcal{A}$, $G_j\in\mathcal{B}$ ($j=1,2$). Let $\mathcal{M}_{G_j}$ be the measurement map associated to the following system: \begin{equation}\label{eq:mfg2} \begin{cases} -\p_tu(x,t)-\Delta u(x,t)+\frac 1 2 {|\nabla u(x,t)|^2}= F(x,t,m(x,t)),& \text{ in } \mathbb{T}^n\times (0,T),\medskip\\ \p_t m(x,t)-\Delta m(x,t)-{\rm div}(m(x,t)\nabla u(x,t))=0,&\text{ in }\mathbb{T}^n\times(0,T),\medskip\\ u(x,T)=G_j(x,m(x,T)), & \text{ in } \mathbb{T}^n,\medskip\\ m(x,0)=m_0(x), & \text{ in } \mathbb{T}^n.\\ \end{cases} \end{equation} If for any $m_0\in C^{2+\alpha}(\mathbb{T}^n)\cap\mathcal{O}_a$, one has $$\mathcal{M}_{G_1}(m_0)=\mathcal{M}_{G_2}(m_0),$$ then it holds that $$G_1(x,z)=G_2(x,z)\ \text{ in } \mathbb{T}^n\times \mathbb{R}.$$ \end{thm} Notice that in Theorems \ref{der g} we allow $F$ to depend on time. If we assume $F$ depends only on $x$ and $m(x,t)$, we can determine $F$ and $G$ simultaneously. \begin{thm}\label{der F,g} Assume $F_j,G_j \in\mathcal{B}$ ($j=1,2$) . Let $\mathcal{M}_{F_j,G_j}$ be the measurement map associated to the following system: \begin{equation}\label{eq:mfg3} \begin{cases} -\p_tu(x,t)-\Delta u(x,t)+\frac 1 2 {|\nabla u(x,t)|^2}= F_j(x,m(x,t)),& \text{ in }\mathbb{T}^n\times(0,T),\medskip\\ \p_t m(x,t)-\Delta m(x,t)-{\rm div}(m(x,t)\nabla u(x,t))=0,&\text{ in }\mathbb{T}^n\times (0,T),\medskip\\ u(x,T)=G_j(x,m(x,T)), & \text{ in } \mathbb{T}^n,\medskip\\ m(x,0)=m_0(x), & \text{ in } \mathbb{T}^n.\\ \end{cases} \end{equation} If for any $m_0\in C^{2+\alpha}(\mathbb{T}^n)\cap\mathcal{O}_a$, one has $$\mathcal{M}_{F_1,G_1}(m_0)=\mathcal{M}_{F_2,G_2}(m_0),$$ then it holds that $$(G_1(x,z),F_1(x,z))=(G_2(x,z),F_2(x,z)) \ \text{ in } \mathbb{T}^n\times \mathbb{R}.$$ \end{thm} In Theorems~ \ref{der g} and \ref{der F,g}, the Lagrangian is of a quadratic form, namely $H(x,\nabla u)$ in \eqref{eq:mfg} is of the form $\frac 1 2 |\nabla u|^2$ (see \eqref{eq:mfg2}--\eqref{eq:mfg3}). In fact, we can extend a large extent of the results in those theorems to the case with a general Lagrangian. We choose to postpone the statement of those results in Section~6 along with their proofs. \section{Well-posedness of the forward problems}\label{section wp} In this section, we show the well-posedness of the MFG systems in our study. The key point is the infinite differentiability of the equation with respect to a given (small) input $m_0(x).$ As a preliminary, we recall the well-posedness result for linear parabolic equations \cite{Lady}\cite[Lemma 3.3]{ CarDelLasLio} . \begin{lem}\label{linear app unique} Consider the parabolic equation \begin{equation}\label{linearapp wellpose} \begin{cases} -\p_tv(x,t)-\Delta v(x,t)+{\rm div} ( a(x,t)\cdot\nabla v(x,t))= f(x,t),& \text{ in }\mathbb{T}^n\times(0,T),\medskip\\ v(x,0)=v_0(x), & \text{ in } \mathbb{T}^n, \end{cases} \end{equation} where the periodic boundary condition is imposed on $v$. Suppose $a,f\in C^{\alpha,\frac{\alpha}{2}}(Q) $ and $v_0\in C^{2+\alpha}(\mathbb{T}^n)$, then $\eqref{linearapp wellpose}$ has a unique classical solution $v\in C^{2+\alpha,1+\frac{\alpha}{2}}(Q).$ \end{lem} The following result is somewhat standard (especially Theorem \ref{local_wellpose}-(a)), while our technical conditions could be different from those in the literature. For completeness we provide a proof here. The idea is to differentiate the equation infinitely many times with respect to the (small) input $m_0(x)$. We recall that $Q$ is defined in \eqref{eq:Q} and periodic boundary conditions are imposed to the MFG systems. The following proof is based on the implicit functions theorem for Banach spaces. One may refer to \cite{Pos.J} for more related details about the theory of maps between Banach spaces. \begin{thm}\label{local_wellpose} Suppose that $F\in\mathcal{A}$ and $G\in\mathcal{B}$. The following results holds: \begin{enumerate} \item[(a)] There exist constants $\delta>0$ and $C>0$ such that for any \[ m_0\in B_{\delta}(C^{2+\alpha}(\mathbb{T}^n)) :=\{m_0\in C^{2+\alpha}(\mathbb{T}^n): \|m_0\|_{C^{2+\alpha}(\mathbb{T}^n)}\leq\delta \}, \] the MFG system $\eqref{main_equation}$ has a solution $u \in C^{2+\alpha,1+\frac{\alpha}{2}}(Q)$ which satisfies \begin{equation}\label{eq:nn1} \|(u,m)\|_{ C^{2+\alpha,1+\frac{\alpha}{2}}(Q)}:= \|u\|_{C^{2+\alpha,1+\frac{\alpha}{2}}(Q)}+ \|m\|_{C^{2+\alpha,1+\frac{\alpha}{2}}(Q)}\leq C\|m_0\|_{ C^{2+\alpha}(\mathbb{T}^n)}. \end{equation} Furthermore, the solution $(u,m)$ is unique within the class \begin{equation}\label{eq:nn2} \{ (u,m)\in C^{2+\alpha,1+\frac{\alpha}{2}}(Q)\times C^{2+\alpha,1+\frac{\alpha}{2}}(Q): \|(u,m)\|_{ C^{2+\alpha,1+\frac{\alpha}{2}}(Q)}\leq C\delta \}. \end{equation} \item[(b)] Define a function \[ S: B_{\delta}(C^{2+\alpha}(\mathbb{T}^n))\to C^{2+\alpha,1+\frac{\alpha}{2}}(Q)\times C^{2+\alpha,1+\frac{\alpha}{2}}(Q)\ \mbox{by $S(m_0):=(u,v)$}. \] where $(u,v)$ is the unique solution to the MFG system \eqref{main_equation}. Then for any $m_0\in B_{\delta}(C^{2+\alpha}(\mathbb T^n))$, $S$ is holomorphic. \end{enumerate} \end{thm} \begin{proof} Let \begin{align*} &X_1:= C^{2+\alpha}(\mathbb{T}^n ), \\ &X_2:=C^{2+\alpha,1+\frac{\alpha}{2}}(Q)\times C^{2+\alpha,1+\frac{\alpha}{2}}(Q),\\ &X_3:=C^{2+\alpha}(\mathbb{T}^n)\times C^{2+\alpha}(\mathbb{T}^n)\times C^{\alpha,\frac{\alpha}{2}}(Q )\times C^{\alpha,\frac{\alpha}{2}}(Q ), \end{align*} and we define a map $\mathscr{K}:X_1\times X_2 \to X_3$ by that for any $(m_0,\tilde u,\tilde m)\in X_1\times X_2$, \begin{align*} & \mathscr{K}( m_0,\tilde u,\tilde m)(x,t)\\ :=&\big( \tilde u(x,T)-G(x,\tilde m(x,T)), \tilde m(x,0)-m_0(x) , -\p_t\tilde u(x,t)-\Delta \tilde u(x,t)\\ &+\frac{|\nabla \tilde u(x,t)|^2}{2}- F(x,t,\tilde m(x,t)), \p_t \tilde m(x,t)-\Delta \tilde m(x,t)-{\rm div}(\tilde m(x,t)\nabla \tilde u(x,t)) \big) . \end{align*} First, we show that $\mathscr{K} $ is well-defined. Since the H\"older space is an algebra under the point-wise multiplication, we have $|\nabla u|^2, {\rm div}(m(x,t)\nabla u(x,t)) \in C^{\alpha,\frac{\alpha}{2}}(Q ).$ By the Cauchy integral formula, \begin{equation}\label{eq:F1} F^{(k)}\leq \frac{k!}{R^k}\sup_{|z|=R}\|F(\cdot,\cdot,z)\|_{C^{\alpha,\frac{\alpha}{2}}(Q ) },\ \ R>0. \end{equation} Then there is $L>0$ such that for all $k\in\mathbb{N}$, \begin{equation}\label{eq:F2} \left\|\frac{F^{(k)}}{k!}m^k\right\|_{C^{\alpha,\frac{\alpha}{2}}(Q )}\leq \frac{L^k}{R^k}\|m\|^k_{C^{\alpha,\frac{\alpha}{2}}(Q )}\sup_{|z|=R}\|F(\cdot,\cdot,z)\|_{C^{\alpha,\frac{\alpha}{2}}(Q ) }. \end{equation} By choosing $R\in\mathbb{R}_+$ large enough and by virtue of \eqref{eq:F1} and \eqref{eq:F2}, it can be seen that the series \eqref{eq:F} converges in $C^{\alpha,\frac{\alpha}{2}}(Q )$ and therefore $F(x,m(x,t))\in C^{\alpha,\frac{\alpha}{2}}(Q ).$ Similarly, we have $G(x,m(x,T))\in C^{2+\alpha}(\mathbb{T}^n).$ This implies that $\mathscr{K} $ is well-defined. Let us show that $\mathscr{K}$ is holomorphic. Since $\mathscr{K}$ is clearly locally bounded, it suffices to verify that it is weakly holomorphic; see \cite[P.133 Theorem 1]{Pos.J}. That is we aim to show the map $$\lambda\in\mathbb C \mapsto \mathscr{K}((m_0,\tilde u,\tilde m)+\lambda (\bar m_0,\bar u,\bar m))\in X_3,\quad\text{for any $(\bar m_0,\bar u,\bar m)\in X_1\times X_2$}$$ is holomorphic. In fact, this follows from the condition that $F\in\mathcal{A}$ and $G\in\mathcal{B}$. Note that $ \mathscr{K}(0,0,0)=0$. Let us compute $\nabla_{(\tilde u,\tilde m)} \mathscr{K} (0,0,0)$: \begin{equation}\label{Fer diff} \begin{aligned} \nabla_{(\tilde u,\tilde m)} \mathscr{K}(0,0,0) (u,m) =(& u|_{t=T}-G^{(1)}m(x,T), m|_{t=0}, \\ &-\p_tu(x,t)-\Delta u(x,t)-F^{(1)}m, \p_t m(x,t)-\Delta m(x,t)). \end{aligned} \end{equation} By Lemma $\ref{linear app unique}$, if $\nabla_{(\tilde u,\tilde m)} \mathscr{K} (0,0,0)=0$, we have $ \tilde m=0$ and then $\tilde u=0$. Therefore, the map is injective. On the other hand, letting $ (r(x),s(x,t))\in C^{2+\alpha}(\mathbb{T}^n)\times C^{\alpha,\frac{\alpha}{2}}(Q ) $, and by Lemma $\ref{linear app unique}$, there exists $a(x,t)\in C^{2+\alpha,1+\frac{\alpha}{2}}(Q)$ such that \begin{equation*} \begin{cases} \p_t a(x,t)-\Delta a(x,t)=s(x,t) &\text{ in } \mathbb{T}^n,\medskip\\ a(x,0)=r(x) & \text{ in } \mathbb{T}^n . \end{cases} \end{equation*} Then letting $ (r'(x),s'(x,t))\in C^{2+\alpha}(\mathbb{T}^n)\times C^{\alpha,\frac{\alpha}{2}}(Q ) $, one can show that there exists $ b(x,t)\in C^{2+\alpha,1+\frac{\alpha}{2}}(Q)$ such that \begin{equation*} \begin{cases} -\p_t b(x,t)-\Delta b(x,t)-F^{(1)}a=s'(x,t) &\text{ in } \mathbb{T}^n,\medskip\\ b(x,T)=G^{(1)}a(x,T)+r'(x) & \text{ in } \mathbb{T}^n. \end{cases} \end{equation*} Therefore, $\nabla_{(\tilde u,\tilde m)} \mathscr{K} (0,0,0)$ is a linear isomorphism between $X_2$ and $X_3$. Hence, by the implicit function theorem, there exist $\delta>0$ and a unique holomorphic function $S: B_{\delta}(\mathbb{T}^n)\to X_2$ such that $\mathscr{K}(m_0,S(m_0))=0$ for all $m_0\in B_{\delta}(\mathbb{T}^n) $. By letting $(u,m)=S(m_0)$, we obtain the unique solution of the MFG system \eqref{main_equation}. Let $ (u_0,v_0)=S(0)$. Since $S$ is Lipschitz, we know that there exist constants $C,C'>0$ such that \begin{equation*} \begin{aligned} &\|(u,m)\|_{ C^{2+\alpha,1+\frac{\alpha}{2}}(Q)^2}\\ \leq& C'\|m_0\|_{B_{\delta}(\mathbb{T}^n)} +\|u_0\|_ { C^{2+\alpha,1+\frac{\alpha}{2}}(Q)}+\|v_0\|_{ C^{2+\alpha,1+\frac{\alpha}{2}}(Q)}\\ \leq& C \|m_0\|_{B_{\delta}(\mathbb{T}^n)}. \end{aligned} \end{equation*} The proof is complete. \end{proof} \begin{rmk} Regarding the local well-posedness, several remarks are in order. \begin{enumerate} \item[(a)] The conditions on $F$ and $G$ (Definition \ref{Admissible class1}-(i) and $G$ satisfies Definition \ref{Admissible class2}-(i) ) are not essential and it is for convenience to apply implicit function theorem . Also, the analytic conditions on $F$ and $G$ can be replayed by weaker regularity conditions in the proof of the local well-posedness \cite{Lions} , but these conditions will be utilized in our inverse problem study. \item[(b)] In order to apply the higher order linearization method that shall be developed in Section 5 for the inverse problems, we need the infinite differentiability of the equation with respect to the given input $m_0(x)$, it is shown by the fact that the solution map $S$ is holomorphic. \item[(c)] In the proof of Theorem $\ref{local_wellpose}$, we show the solution map $S$ is holomorphic. As a corollary, the measurement map $\mathcal{M}=(\pi_1\circ S)\Big|_{t=0}$ is also holomorphic, where $\pi_1$ is the projection map with respect to the first variable. \end{enumerate} \end{rmk} \section{Non-uniqueness and discussion on the zero admissibility conditions} In this section, we show that the zero admissibility conditions, namely $F(x,t, 0)=0$ and $G(x,0)=0$ in Definitions~\ref{Admissible class1} and \ref{Admissible class2} are unobjectionably necessary if one intends to uniquely recover $F$ or $G$ by knowledge of the measurement operator $\mathcal{M}_{F, G}$ for the inverse problem \eqref{eq:ip1}. For simplicity, we only consider the case that the space dimension $n=1$ without the periodic boundary conditions. That is, we consider the following MFG system: \begin{equation}\label{dim1} \begin{cases} -\p_tu_j(x,t)-\p_{xx} u_j(x,t)+\frac 1 2 {|\p_x u_j(x)|^2}= F_j(x,t,v_j(x,t)),& \text{ in } \mathbb{R}\times (0,T),\medskip\\ \p_t v_j(x,t)-\p_{xx} v_j(x,t)-\p_x(v_j(x,t)\p_x u_j(x,t))=0,&\text{ in } \mathbb{R}\times(0,T),\medskip\\ u_j(x,T)= G_j(x,v_j(x,T)), & \text{ in } \mathbb{R},\medskip\\ v_j(x,0)=m_0(x), & \text{ in } \mathbb{R}.\\ \end{cases} \end{equation} Furthermore, we assume $T$ is small enough such that the solution of the MFG system \eqref{dim1} is unique \cite{Amb:18,Amb:21,Amb22,Cira,Lions}. In what follows, we construct examples to show that if the zero admissibility conditions are violated then the corresponding inverse problems do not have uniqueness. \begin{prop} Consider the system $\eqref{dim1}$. There exist $F_1=F_2\in C^{\infty}(\mathbb{R}\times\mathbb{R}\times\mathbb{R})$ and $G_1\neq G_2\in C^{\infty}(\mathbb{R}\times\mathbb{R})$ (but we do not have $G_1(x,0)=G_2(x,0)=0$) such that the corresponding two systems admit the same measurement map, i.e. $\mathcal{M}_{G_1}=\mathcal{M}_{G_2}$. \end{prop} \begin{proof} Set \[ F_1=F_2=-\sin(x)+\frac{1}{4}(e^t-1)^2\cos^2(x), \] and \[ G_1=(e^T-1)\sin(x),\quad G_2=(1-e^T)\sin(x). \] It can be directly verified that \[ u_1(x,t)=(e^t-1)\sin(x)\quad\mbox{and}\quad u_2(x,t)=(1-e^t)\sin(x), \] satisfy the corresponding system. In this case, we have $\mathcal{M}_{G_1}(m_0)=\mathcal{M}_{G_2}(m_0)=0$ for any admissible $m_0$. \end{proof} \begin{prop}\label{2} Consider the system $\eqref{dim1}$. There exist $G_1=G_2\in C^{\infty}(\mathbb{R}\times\mathbb{R})$ and $F_1\neq F_2\in C^{\infty}(\mathbb{R}\times\mathbb{R}\times\mathbb{R})$ (but we do not have $F_j(x,t,0)=0$, $j=1,2$) such that the corresponding two systems admit the same measurement map, i.e. $\mathcal{M}_{F_1,G_1}=\mathcal{M}_{F_2,G_2}$. \end{prop} \begin{proof} Set \[ F_1=-x(2t-T)+\frac{t^2(t-T)^2}{2},\quad F_2=-2x(2t-T)+2t^2(t-T)^2, \] and \[ G_1=G_2=0. \] Here, it is noted that $F_1$ and $F_2$ are independent of $v$. In such a case, it is straightforward to verify that $u_j(x,t)=jxt(t-T)$ is the solution of the corresponding system \eqref{dim1}. Clearly, one has $\mathcal{M}_{F_1}(m_0)=\mathcal{M}_{F_2}(m_0)=0$ for any admissible $m_0$. \end{proof} Moreover, we can find $F_1, F_2\in C^{\infty}(\mathbb{R}\times\mathbb{R})$ which are independent of $t$ such that Proposition $\ref{2}$ holds. \begin{proof} Define $$Lu_j:=-\p_tu(x,t)-\p_{xx} u(x,t)+\frac{|\p_x u|^2}{2}.$$ It is sufficient for us to show that there exist $u_1(x,t),u_2(x,t)$ such that \begin{enumerate} \item[(1)] $L u_1\neq L u_2$ and $\p_t (Lu_j)=0 $ for $ j=1,2$; \item[(2)] $u_1(x,0)=u_2(x,0)$ and $u_1(x,T)=u_2(x,T)$. \end{enumerate} In fact, if this is true, we can set $F_j= Lu_j$ and $G(x)=u_1(x,T)$. Then one has $G_1=G_2.$ Without loss of generality, we assume $T=1.$ Let $p(t)$ be a non-zero solution of the following ordinary differential equation (ODE): \begin{equation*} ( \ln( p'(t)))'=\frac{\sqrt{1+4t}}{2}, \end{equation*} and $q(t)$ be a solution of the ODE: \begin{equation*} \begin{cases} &2q'(t)+\sqrt{1+4t}\, q''(t)=p(t)p'(t)\sqrt{1+4t},\medskip\\ &q(0)=0. \end{cases} \end{equation*} With $p(t)$ and $q(t)$ given above, we can set \[ u_1(x,t)=p(t(t-1))x+q(t(t-1))\quad\mbox{and}\quad u_2(x,t)=q(t(t-1))x+2q(t(t-1)). \] It can be directly verified that $u_1$ and $u_2$ fulfil the requirements (1) and (2) stated above. \end{proof} Finally, we would like to remark that by following a similar spirit, one may construct similar examples as those in Proposition 4.1 and 4.2 to the MFG system \eqref{dim1} associated with a periodic boundary condition. However, this shall involve a bit more tedious calculations and is not the focus of the current study. We choose not to explore further. As also stated earlier, it is unobjectionable to see that the zero admissibility conditions are necessary for the inverse problem study. \section{Proofs of Theorems~\ref{der g} and \ref{der F,g}} In this section, we present the proofs of the three main theorems, namely Theorems, \ref{der g} and \ref{der F,g}. To that end, we first introduce a higher order linearization procedure associated with the MFG system \eqref{main_equation} which shall be repeatedly used in the proofs. We also refer to \cite{LLLZ} where a higher order linearization procedure was considered for a semi-linear parabolic equation. Throughout the current section, if $f$ is a function defined on $\mathbb{T}^n$, we still use $f$ to denote the function obtained by extending $f$ to $\mathbb{R}^n$ periodically. \subsection{Higher-order linearization}\label{HLM} This method depends on the infinite differentiability of the solution with respect to a given input $m_0(x)$, which was derived in Theorem~$\ref{local_wellpose}$. In fact, Cardaliaguet, Delarue, Lasry and Lions developed this linearization method in some probability measure space; see \cite{Book11}. However, the setup of our study is not completely covered by the discussion in \cite{Book11} and for completeness and self-containedness, we show the process in what follows. First, we introduce the basic setting of this higher order linearization method. Consider the system $\eqref{main_equation}$. Let $$m_0(x;\varepsilon)=\sum_{l=1}^{N}\varepsilon_lf_l,$$ where $f_l\in C_+^{2+\alpha}(\mathbb{T}^n)$ and $\varepsilon=(\varepsilon_1,\varepsilon_2,...,\varepsilon_N)\in\mathbb{R}_+^N$ ($\mathbb{R}_+:=\{x\in\mathbb{R}: x\geq0\}$) with $|\varepsilon|=\sum_{l=1}^{N}|\varepsilon_l|$ small enough. Then $m_0\in C^{2+\alpha}(\mathbb{T}^n)\cap\mathcal{O}_a$. By Theorem $\ref{local_wellpose}$, there exists a unique solution $(u(x,t;\varepsilon),m(x,t;\varepsilon) )$ of $\eqref{main_equation}$. Let $(u(x,t;0),m(x,t;0) ) $ be the solution of $\eqref{main_equation}$ when $\varepsilon=0.$ Let $$u^{(1)}:=\p_{\varepsilon_1}u|_{\varepsilon=0}=\lim\limits_{\varepsilon\to 0}\frac{u(x,t;\varepsilon)-u(x,t;0) }{\varepsilon_1},$$ $$m^{(1)}:=\p_{\varepsilon_1}m|_{\varepsilon=0}=\lim\limits_{\varepsilon\to 0}\frac{m(x,t;\varepsilon)-m(x,t;0) }{\varepsilon_1}.$$ The idea is that we consider a new system of $(u^{(1)},m^{(1)}).$ If $F\in\mathcal{A}$, $g\in\mathcal{B}$, we have \[ (u(x,t;0),m(x,t;0) )=(0,0) \] and hence \begin{align*} &-\p_tu^{(1)}(x,t)-\Delta u^{(1)}(x,t)\\ =& \lim\limits_{\varepsilon\to 0}\frac{1}{\varepsilon_1}[\frac{|\nabla u(x,t;\varepsilon)|^2-|\nabla u(x,t;0)|^2}{2}+ F(x,t,m(x,t;\varepsilon))-F(x,t;m(x,t;0)) ]\\ =&\nabla u^{(1)}\cdot (\lim\limits_{\varepsilon\to 0}\frac{\nabla u(x,t;\varepsilon)+\nabla u(x,t;0)}{2})+ \lim\limits_{\varepsilon\to 0}\frac{1}{\varepsilon_1}[ F^{(1)}(x,t)(m(x,t;\varepsilon)-m(x,t;0))]\\ =&F^{(1)}(x,t)m^{(1)}(x,t). \end{align*} Similarly, we can compute \begin{align*} &\p_t m^{(1)}(x,t)-\Delta m^{(1)}(x,t)\\ =&\lim\limits_{\varepsilon\to 0}\frac{1}{\varepsilon_1}[{\rm div} ( m(x,t;\varepsilon)\nabla u(x,t;\varepsilon)-m(x,t;0)\nabla u(x,t;0) )]\\ =&\lim\limits_{\varepsilon\to 0}\frac{1}{\varepsilon_1} [ \nabla m(x,t;\varepsilon)\cdot\nabla u(x,t;\varepsilon)+m(x,t;\varepsilon)\Delta u(x,t;\varepsilon) -\\ & \nabla m(x,t;0)\cdot\nabla u(x,t;0)-m(x,t;0)\Delta u(x,t;0) ]\\ =&0. \end{align*} Now, we have that $(u_{j}^{(1)},m_{j}^{(1)} )$ satisfies the following system: \begin{equation}\label{linear l=1,eg} \begin{cases} -\p_tu^{(1)}(x,t)-\Delta u^{(1)}(x,t)= F^{(1)}(x,t)m^{(1)}(x,t),& \text{ in } \mathbb{T}^n\times (0,T),\medskip\\ \p_t m^{(1)}(x,t)-\Delta m^{(1)}(x,t)=0,&\text{ in }\mathbb{T}^n\times (0,T),\medskip\\ u^{(1)}_j(x,T)=G^{(1)}(x)m^{(1)}(x,T), & \text{ in } \mathbb{T}^n,\medskip\\ m^{(1)}_j(x,0)=f_1(x). & \text{ in } \mathbb{T}^n.\\ \end{cases} \end{equation} Then we can define $$u^{(l)}:=\p_{\varepsilon_l}u|_{\varepsilon=0}=\lim\limits_{\varepsilon\to 0}\frac{u(x,t;\varepsilon)-u(x,t;0) }{\varepsilon_l},$$ $$m^{(l)}:=\p_{\varepsilon_l}m|_{\varepsilon=0}=\lim\limits_{\varepsilon\to 0}\frac{m(x,t;\varepsilon)-m(x,t;0) }{\varepsilon_l},$$ for all $l\in\mathbb{N}$ and obtain a sequence of similar systems. In the proof of Theorem $\ref{der g}$ in what follows, we recover the first Taylor coefficient of $F$ or $G$ by considering this new system $\eqref{linear l=1,eg}$. In order to recover the higher order Taylor coefficients, we consider \begin{equation}\label{eq:ht1} u^{(1,2)}:=\p_{\varepsilon_1}\p_{\varepsilon_2}u|_{\varepsilon=0}, m^{(1,2)}:=\p_{\varepsilon_1}\p_{\varepsilon_2}m|_{\varepsilon=0}. \end{equation} By direct calculations, we have from \eqref{eq:ht1} that \begin{equation}\label{eq:ht2} \begin{split} &-\p_tu^{(1,2)}(x,t)-\Delta u^{(1,2)}(x,t)\\ =& -\nabla u^{(1)}\cdot \nabla u^{(2)}-\nabla u^{(1,2)}\cdot \nabla u(x,t;0) \\ & +F_j^{(1)}m^{(1,2)}+F^{(2)}_j(x,t)m^{(1)}m^{(2)}, \end{split} \end{equation} and \begin{equation}\label{eq:ht3} \begin{split} &\p_t m^{(1,2)}(x,t)-\Delta m^{(1,2)}(x,t)\\ =& \p_{\varepsilon_1}\p_{\varepsilon_2}{\rm div} (m\nabla u)|_{\varepsilon=0}\medskip\\ =&\nabla m^{(1,2)}\nabla u(x,t;0)+\nabla m(x,t;0)\nabla u^{(1,2)}+m^{(1,2)}\Delta u(x,t;0)+m(x,t;0)\Delta u^{(1,2)}\medskip\\ &+ {\rm div} (m^{(1)}\nabla u^{(2)})+{\rm div}(m^{(2)}\nabla u^{(1)})\medskip\\ =& {\rm div} (m^{(1)}\nabla u^{(2)})+{\rm div}(m^{(2)}\nabla u^{(1)}). \end{split} \end{equation} Combining \eqref{eq:ht2} and \eqref{eq:ht3}, we have the second order linearization as follows: \begin{equation}\label{linear l=1,2 eg} \begin{cases} -\p_tu^{(1,2)}-\Delta u^{(1,2)}(x,t)+\nabla u^{(1)}\cdot \nabla u^{(2)}\medskip\\ \hspace*{3cm}= F^{(1)}(x,t)m^{(1,2)}+F^{(2)}(x,t)m^{(1)}m^{(2)},& \text{ in } \mathbb{T}^n\times(0,T),\medskip\\ \p_t m^{(1,2)}-\Delta m^{(1,2)}= {\rm div} (m^{(1)}\nabla u^{(2)})+{\rm div}(m^{(2)}\nabla u^{(1)}) ,&\text{ in } \mathbb{T}^n\times (0,T),\medskip\\ u^{(1,2)}(x,T)=G^{(1)}(x)m^{(1,2)}(x,T)+G^{(2)}(x)m^{(1)}m^{(2)}(x,T), & \text{ in } \mathbb{T}^n,\medskip\\ m^{(1,2)}(x,0)=0, & \text{ in } \mathbb{T}^n.\\ \end{cases} \end{equation} Notice that the non-linear terms of the system $\eqref{linear l=1,2 eg}$ depend on the first order linearised system $\eqref{linear l=1,eg}$. This shall be an important ingredient in the proof of Theorem $\ref{der g}$ in what follows. Inductively, for $N\in\mathbb{N}$, we consider \begin{equation*} u^{(1,2...,N)}=\p_{\varepsilon_1}\p_{\varepsilon_2}...\p_{\varepsilon_N}u|_{\varepsilon=0}, \end{equation*} \begin{equation*} m^{(1,2...,N)}=\p_{\varepsilon_1}\p_{\varepsilon_2}...\p_{\varepsilon_N}m|_{\varepsilon=0}. \end{equation*} we can obtain a sequence of parabolic systems, which shall be employed again in determining the higher order Taylor coefficients of the unknowns $F$ and $G$. \subsection{Unique determination of single unknown function} We first present the proof of Theorem $\ref{der g}$. \begin{proof}[Proof of Theorem $\ref{der g}$] Consider the following systems for $j=1,2$: \begin{equation}\label{j=1,2for g} \begin{cases} -\p_tu_j(x,t)-\Delta u_j(x,t)+\frac 1 2 {|\nabla u_j|^2}= F(x,t,m_j(x,t)),& \text{ in }\mathbb{T}^n\times (0,T),\medskip\\ \p_t m_j(x,t)-\Delta m_j(x,t)-{\rm div}(m_j(x,t)\nabla u_j(x,t))=0,&\text{ in }\mathbb{T}^n\times(0,T),\medskip\\ u_j(x,T)=G_j(x,m_j(x,T)), & \text{ in } \mathbb{T}^n,\medskip\\ m_j(x,0)=m_0(x), & \text{ in } \mathbb{T}^n.\\ \end{cases} \end{equation} By the successive linearization procedure, we first consider the case $N=1.$ Let $$u_{j}^{(1)}:=\p_{\varepsilon_1}u_{j}|_{\varepsilon=0},\quad m_{j}^{(1)}:=\p_{\varepsilon_1}m_{j}|_{\varepsilon=0}.$$ Direct computations show that $(u_{j}^{(1)},v_{j}^{(1)} )$ satisfies the following system \begin{equation}\label{linear l=1for g} \begin{cases} -\p_tu_j^{(1)}(x,t)-\Delta u^{(1)}_j(x,t)= F^{(1)}(x,t)m_j^{(1)}(x,t),& \text{ in }\mathbb{T}^n\times(0,T),\medskip\\ \p_t m^{(1)}_j(x,t)-\Delta m^{(1)}_j(x,t)=0,&\text{ in }\mathbb{T}^n\times(0,T),\medskip\\ u^{(1)}_j(x,T)=G_j^{(1)}(x)m^{(1)}_j(x,T), & \text{ in } \mathbb{T}^n,\medskip\\ m^{(1)}_j(x,0)=f_1(x), & \text{ in } \mathbb{T}^n. \end{cases} \end{equation} We can solve the system \eqref{linear l=1for g} by first deriving $m^{(1)}_j$ and then obtaining $u^{(1)}_j.$ In doing so, we can obtain that the solution is $$ m_j^{(1)}(x,t)= \int_{\mathbb{R}^n}\Phi(x-y,t)f_{1}(y)\, dy,$$ \begin{equation*} \begin{aligned} u_j^{(1)}(x,t)&= \int_{\mathbb{R}^n}\Phi(x-y,T-t)G_j^{(1)}(y)m^{(1)}_j(y,T) )\, dy\\ &+\int_{0}^{T-t}\int_{\mathbb{R}^n}\Phi(x-y,T-t-s)F^{(1)}(y,T-s)\overline{m}_j^{(1)}(y,s)\, dyds, \end{aligned} \end{equation*} where $\overline{m}_j^{(1)}(x,t)= m_j^{(1)}(x,T-t)$ and $\Phi$ is the fundamental solution of the heat equation: \begin{equation}\label{eq:fund1} \Phi(x,t)= \frac{1}{(4\pi t)^{n/2}}e^{-\frac{|x|^2}{4t}}. \end{equation} Since $\mathcal{M}_{G_1}=\mathcal{M}_{G_2}$, we have $$ u_1^{(1)}(x,0)=u_2^{(1)}(x,0),$$ for all $f_1\in C_+^{2+\alpha}(\mathbb{T}^n).$ This implies that $$ \int_{\mathbb{R}^n}\Phi(x-y,T)[G_1^{(1)}(y)m_1^{(1)}(y,T))-G_2^{(1)}(y)m_2^{(1)}(y,T)) ]\, dy=0.$$ Noticing that $m_1^{(1)}(x,t)=m_2^{(1)}(x,t)$, we choose $$m_1^{(1)}(x,T)=m_2^{(1)}(x,T)=\exp(-4\pi^2|\boldsymbol{\zeta}|^2T-2\pi \mathrm{i} \boldsymbol{\zeta}\cdot x)+M,$$ where $\boldsymbol{\zeta}\in\mathbb{Z}^n, M\in\mathbb{N}.$ ( In this case, $f_1(x)\in C_+^{2+\alpha}(\mathbb{T}^n)$) By taking $M=1$ and $M=2$, respectively and then subtracting the resulting equations from one another, one can readily show that \begin{equation} \int_{\mathbb{R}^n}\Phi(x-y,T)[(G_1^{(1)}(y)-G_2^{(1)}(y))\exp(-2\pi \mathrm{i} \boldsymbol{\zeta}\cdot y) ]\, dy=0, \end{equation} for all $\boldsymbol{\zeta}\in\mathbb{Z}^n.$ Therefore $G_1^{(1)}(x)=G_2^{(1)}(x).$ We proceed to consider the case $N=2.$ Let $$u_{j}^{(1,2)}:=\p_{\varepsilon_1}\p_{\varepsilon_2}u_{j}|_{\varepsilon=0},\quad m_{j}^{(1,2)}:=\p_{\varepsilon_1}\p_{\varepsilon_2}m_{j}|_{\varepsilon=0},$$ and $$u_{j}^{(2)}:=\p_{\varepsilon_2}u_{j}|_{\varepsilon=0},\quad m_{j}^{(2)}:=\p_{\varepsilon_2}m_{j}|_{\varepsilon=0}.$$ Then we can deal with the second-order linearization: \begin{equation}\label{linear l=1,2} \begin{cases} -\p_tu_j^{(1,2)}(x,t)-\Delta u^{(1,2)}_j(x,t)+\nabla u_{j}^{(1)}\cdot \nabla u_{j}^{(2)}\\ \hspace*{3cm} = F^{(1)}m_j^{(1,2)}+F^{(2)}(x,t)m_j^{(1)}m_j^{(2)},& \text{ in }\mathbb{T}^n\times (0,T),\\ \p_t m^{(1,2)}_j(x,t)-\Delta m^{(1,2)}_j(x,t)\medskip \\ \hspace*{3cm} = {\rm div} (m_{j}^{(1)}\nabla u_j^{(2)})+{\rm div}(m_j^{(2)}\nabla u_j^{(1)}) ,&\text{ in }\mathbb{T}^n\times(0,T),\medskip\\ u^{(1,2)}_j(x,T)=G_j^{(1)}(x)m_j^{(1,2)}(x,T)+G_j^{(2)}(x)m_j^{(1)}m_j^{(2)}(x,T), & \text{ in } \mathbb{T}^n,\medskip\\ m^{(1,2)}_j(x,0)=0, & \text{ in } \mathbb{T}^n.\\ \end{cases} \end{equation} Since we have shown that $G_1^{(1)}(x)=G_2^{(1)}(x)$, we have $$ u^{(1)}_1(x,t)= u^{(1)}_2(x,t), \, m^{(1)}_1(x,t)= m^{(1)}_2(x,t)$$ by solving equation $\eqref{linear l=1for g}$. Then by the same argument in the case $N=1$ (considering $m_0=\varepsilon_2f_2$ ), we have $$u^{(2)}_1(x,t)=u^{(2)}_2(x,t), \, m^{(2)}_1(x,t)= m^{(2)}_2(x,t).$$ Denote \[ p(x,t)={\rm div} (m_{j}^{(1)}\nabla u_j^{(2)})+{\rm div}(m_j^{(2)}\nabla u_j^{(1)}),\ \ q(x,t)= -\nabla u_{j}^{(1)}\cdot \nabla u_{j}^{(2)}. \] Then we can also solve system \eqref{linear l=1,2} as follows: \begin{equation*} m^{(1,2)}_j(x,t)=\int_{0}^{t} \int_{\mathbb{R}^n} \Phi(x-y,t-s)p(y,s)\, dyds, \end{equation*} \begin{equation*} \begin{aligned} u_j^{(1,2)}(x,t)= &\int_{\mathbb{R}^n}\Phi(x-y,T-t) [G_j^{(1)}(x)m_j^{(1,2)}(x,T)+G_j^{(2)}(x)m_j^{(1)}m_j^{(2)}(x,T) ]\, dy\\ +&\int_{0}^{T-t}\int_{\mathbb{R}^n}\Phi(x-y,T-t-s)(F^{(2)}(y,T-s)m_j^{(1)}m_j^{(2)}(y,T-s) -\overline{q}(y,s))\, dyds, \end{aligned} \end{equation*} where $\overline{q}(y,s)=q(y,T-s).$ Since $$u_1^{(1,2)}(x,0)= u_2^{(1,2)}(x,0),$$ we have $$ \int_{\mathbb{R}^n}\Phi(x-y,T)[G_1^{(2)}(y)m_1^{(1)}(y,T)-G_2^{(1)}(y)m_2^{(1)}(y,T) ]\, dy=0.$$ Next, by a similar argument in the case $N=1$, we can prove that $G^{(2)}_1(x)=G^{(2)}_2(x). $ Finally, by the mathematical induction, we can show the same result for $N\geq 3$. That is, for any $k\in\mathbb{N},$ we have $G^{(k)}_1(x)=G^{(k)}_2(x).$ The proof is complete. \end{proof} \subsection{Simultaneous recovery results for inverse problems} In this section, we aim to determinate $F$ and $G$ simultaneously. To that end, we first derive an auxiliary lemma as follows. \begin{lem}\label{dense} Let $u$ be a solution of the heat equation \begin{equation}\label{per heat} \begin{cases} \p_t u(x,t)-\Delta u(x,t)=0 &\text{ in } \mathbb{T}^n\times(0,T),\\ u(x,0)=u_0(x) &\text{ in } \mathbb{T}^n. \end{cases} \end{equation} Let $f(x)\in C^{2+\alpha}(\mathbb{T}^n)$ for some $\alpha\in(0,1)$. Suppose \begin{equation}\label{fuv=0} \int_{\mathbb{T}^n\times(0,T)} f(x)u(x,t)\, dxdt=0, \end{equation} for all $u_0\in C_+^{\infty}(\mathbb{T}^n)$. Then one has $f=0.$ \end{lem} \begin{proof} Let $\boldsymbol {\xi}\in\mathbb{Z}^n$ and $M\in\mathbb{N}$. It is directly verified that $$ u(x,t)=\exp(- 2\pi\mathrm{i}\boldsymbol {\xi}\cdot x-4\pi^2|\boldsymbol {\xi}|^2t )+M, \quad \mathrm{i}:=\sqrt{-1}, $$ is a solution of $\eqref{per heat}$ with initial value $$u_0(x)= \exp(- 2\pi\mathrm{i}\boldsymbol {\xi}\cdot x)+M\geq0.$$ Then $\eqref{fuv=0}$ implies that $$\int_{\mathbb{T}^n} \frac{1-\exp(-4\pi^2|\boldsymbol {\xi}|^2T)}{4\pi^2|\boldsymbol {\xi}|^2}f(x)e^{-2\pi \mathrm{i}\boldsymbol {\xi}\cdot x } dx+MT\int_{\mathbb{T}^n}f(x)dx=0.$$ By taking $M=1$ and $M=2$,respectively, we have $$ \int_{\mathbb{T}^n}f(x)e^{-2\pi \mathrm{i}\boldsymbol {\xi}\cdot x } dx=0.$$ Hence, the Fourier series of $f(x)$ is $0$. Since $f(x)\in C^{2+\alpha}(\mathbb{T}^n)$, its Fourier series converges to $f(x)$ uniformly. Therefore, $f(x)=0.$ \end{proof} We are now in a position to present the proof of Theorem $\ref{der F,g}$. \begin{proof}[Proof of Thoerem $\ref{der F,g}$] Consider the following systems \begin{equation}\label{j=1,2for Fg} \begin{cases} -\p_tu_j(x,t)-\Delta u_j(x,t)+\frac 1 2 {|\nabla u_j|^2}= F_j(x,m_j(x,t)),& \text{ in }\mathbb{T}^n\times(0,T),\medskip\\ \p_t m_j(x,t)-\Delta m_j(x,t)-{\rm div}(m_j(x,t)\nabla u_j(x,t))=0,&\text{ in }\mathbb{T}^n\times(0,T),\medskip\\ u_j(x,T)=G_j(x,m_j(x,T)), & \text{ in } \mathbb{T}^n,\medskip\\ m_j(x,0)=m_0(x), & \text{ in } \mathbb{T}^n. \end{cases} \end{equation} Following a similar method we used in the proof of Theorem $\ref{der g}$, we let $$m_0(x;\varepsilon)=\sum_{l=1}^{N}\varepsilon_lf_l,$$ where $f_l\in C_+^{2+\alpha}(\mathbb{T}^n)$ and $\varepsilon=(\varepsilon_1,\varepsilon_2,...,\varepsilon_N)\in\mathbb{R}_+^N$ with $|\varepsilon|=\sum_{l=1}^{N}|\varepsilon_l|$ small enough. Consider the case $N=1.$ Let $$u_{j}^{(1)}:=\p_{\varepsilon_1}u_{j}|_{\varepsilon=0},$$ $$m_{j}^{(1)}:=\p_{\varepsilon_1}m_{j}|_{\varepsilon=0}.$$ Then direct computations imply that $(u_{j}^{(1)},v_{j}^{(1)} )$ satisfies the following system: \begin{equation}\label{linear l=1for F g} \begin{cases} -\p_tu_j^{(1)}(x,t)-\Delta u^{(1)}_j(x,t)= F_j^{(1)}(x)m_j^{(1)}(x,t),& \text{ in }\mathbb{T}^n\times(0,T),\medskip \\ \p_t m^{(1)}_j(x,t)-\Delta m^{(1)}_j(x,t)=0,&\text{ in }\mathbb{T}^n\times (0,T),\medskip \\ u^{(1)}_j(x,T)=G_j^{(1)}(x)m^{(1)}_j(x,T), & \text{ in } \mathbb{T}^n,\medskip\\ m^{(1)}_j(x,0)=f_1(x), & \text{ in } \mathbb{T}^n. \end{cases} \end{equation} Then we have $ m_1^{(1)}=m_2^{(1)}:=m^{(1)}(x,t)$ . Let $ \overline{u}=u_1^{(1)}-u_2^{(1)}$, $\eqref{linear l=1for F g}$ implies that \begin{equation}\label{u1-u2 } \begin{cases} &-\p_t\overline{u}-\Delta\overline{u}= (F_1^{(1)}-F_2^{(1)})m^{(1)}(x,t),\medskip\\ &\overline{u}(x,T)=(G_1^{(1)}-G_2^{(1)})m^{(1)}(x,T). \end{cases} \end{equation} Now let $w$ be a solution to the heat equation $\p_t w(x,t)-\Delta w(x,t)=0$ in $\mathbb{T}^n$. Then \begin{equation} \begin{aligned} &\int_Q (F_1^{(1)}-F_2^{(1)})m^{(1)}(x,t)w\, dxdt\medskip\\ =&\int_Q (-\p_t\overline{u}-\Delta\overline{u})w\, dxdt\medskip\\ =&\int_{\mathbb{T}^n} (\overline{u}w)\big|_0^T\, dx +\int_Q \overline{u}\p_tw- \overline{u}\Delta w\medskip\\ =& \int_{\mathbb{T}^n} (\overline{u}w)\big|_0^T\, dx. \end{aligned} \end{equation} Since $\mathcal{M}_{F_1,G_1}=\mathcal{M}_{F_2,G_2}$, we have $$\overline{u}(x,0)=0.$$ It follows that \begin{equation}\label{integral by part} \int_Q (F_1^{(1)}-F_2^{(1)})m^{(1)}(x,t)w(x,t)\, dxdt= \int_{\mathbb{T}^n} w(x,T)(G_1^{(1)}-G_2^{(1)})m^{(1)}(x,T)\, dx, \end{equation} for all solutions $w(x,t),m^{(1)}(x,t)$ of the heat equation in $\mathbb{T}^n$. Here, we cannot apply Lemma $\ref{dense}$ directly. Nevertheless, we use the same construction. Let $\boldsymbol {\xi_1},\boldsymbol {\xi_2}\in\mathbb{Z}^n\backslash\{0\}$, $M\in\mathbb{N}^*$ and $\boldsymbol {\xi}=\boldsymbol {\xi_1}+\boldsymbol {\xi_2}$ . Let $$w(x,t)=\exp(- 2\pi\mathrm{i}\boldsymbol {\xi_1}\cdot x-4\pi^2|\boldsymbol {\xi_1}|^2t ),$$ and $$m(x,t)=\exp(- 2\pi\mathrm{i}\boldsymbol {\xi_2}\cdot x-4\pi^2|\boldsymbol {\xi_2}|^2t )+M.$$ Then the left hand side of $\eqref{integral by part}$ is \begin{equation}\label{Fourier1} \begin{aligned} &\int_{\mathbb{T}^n} \frac{1-\exp(-4\pi^2T(|\boldsymbol {\xi_1}|^2+|\boldsymbol {\xi_2}|^2))}{4\pi^2( |\boldsymbol {\xi_1}|^2+|\boldsymbol {\xi_2}|^2)}(F_1^{(1)}-F_2^{(1)} )e^{-2\pi \mathrm{i}\boldsymbol {\xi}\cdot x }\, dx\\ +&M \frac{1-\exp(-4\pi^2T|\boldsymbol {\xi_1}|^2)}{4\pi^2|\boldsymbol {\xi_1}|^2} \int_{\mathbb{T}^n} (F_1^{(1)}-F_2^{(1)} )e^{-2\pi \mathrm{i}\boldsymbol {\xi_1}\cdot x }\,dx. \end{aligned} \end{equation} And the right hand side is \begin{equation}\label{Fourier2} \begin{aligned} &\int_{\mathbb{T}^n}\exp(-4\pi^2T(|\boldsymbol {\xi_1}|^2+|\boldsymbol {\xi_2}|^2)) (G_1^{(1)}-G_2^{(1)})e^{-2\pi \mathrm{i}\boldsymbol {\xi}\cdot x }\, dx\\ +&M\exp(-4\pi^2T|\boldsymbol {\xi_1}|^2)\int_{\mathbb{T}^n} (G_1^{(1)}-G_2^{(1)})e^{-2\pi \mathrm{i}\boldsymbol {\xi_1}\cdot x }\, dx. \end{aligned} \end{equation} By taking $M=1$ and $M=2$, respectively and then subtracting the resulting equations from one another, one can readily show that $$ \frac{1-\exp(-4\pi^2T|\boldsymbol {\xi_1}|^2)}{4\pi^2|\boldsymbol {\xi_1}|^2} \int_{\mathbb{T}^n} (F_1^{(1)}-F_2^{(1)} )e^{-2\pi \mathrm{i}\boldsymbol {\xi_1}\cdot x }\,dx= \exp(-4\pi^2T|\boldsymbol {\xi_1}|^2)\int_{\mathbb{T}^n} (G_1^{(1)}-G_2^{(1)})e^{-2\pi \mathrm{i}\boldsymbol {\xi_1}\cdot x }\, dx. $$ Then $\eqref{integral by part}$,$\eqref{Fourier1}$ and $\eqref{Fourier2}$ readily yields that $$\frac{1-\exp(-4\pi^2T(|\boldsymbol {\xi_1}|^2+|\boldsymbol {\xi_2}|^2))}{4\pi^2( |\boldsymbol {\xi_1}|^2+|\boldsymbol {\xi_2}|^2)}a_{\boldsymbol {\xi} }+\exp(-4\pi^2T(|\boldsymbol {\xi_1}|^2+|\boldsymbol {\xi_2}|^2))b_{\boldsymbol {\xi}}=0.$$ For a given $\boldsymbol {\xi}\in\mathbb{Z}^n $, there exist $\boldsymbol {\xi_1},\boldsymbol {\xi_2},\boldsymbol {\xi_1}',\boldsymbol {\xi_2}' \in\mathbb{Z}^n\backslash\{0\}$ such that $\boldsymbol {\xi}=\boldsymbol {\xi_1}+\boldsymbol {\xi_2}=\boldsymbol {\xi_1}'+\boldsymbol {\xi_2}'$ and $|\boldsymbol {\xi_1}|^2+|\boldsymbol {\xi_2}|^2\neq |\boldsymbol {\xi_1}'|^2+|\boldsymbol {\xi_2}'|^2 .$ Therefore, $a_{\boldsymbol {\xi}}=b_{\boldsymbol {\xi}}=0$ for all $\boldsymbol {\xi}\in\mathbb{Z}^n$. Notice that It follows that $F_1^{(1)}-F_2^{(1)}=G_1^{(1)}-G_2^{(1)}=0$. Next, we consider the case $N=2.$ Let \begin{equation}\label{eq:ss1} u_{j}^{(1,2)}:=\p_{\varepsilon_1}\p_{\varepsilon_2}u_{j}|_{\varepsilon=0},\quad m_{j}^{(1,2)}:=\p_{\varepsilon_1}\p_{\varepsilon_2}m_{j}|_{\varepsilon=0}, \end{equation} and \begin{equation}\label{eq:ss2} u_{j}^{(2)}:=\p_{\varepsilon_2}u_{j}|_{\varepsilon=0},\quad m_{j}^{(2)}:=\p_{\varepsilon_2}m_{j}|_{\varepsilon=0}. \end{equation} By the second-order linearization in \eqref{eq:ss1} and \eqref{eq:ss2}, we can obtain \begin{equation} \begin{cases} -\p_tu_j^{(1,2)}(x,t)-\Delta u^{(1,2)}_j(x,t)+\nabla u_{j}^{(1)}\cdot \nabla u_{j}^{(2)}\\ \hspace*{3cm} = F_j^{(1)}m_j^{(1,2)}+F^{(2)}_j(x)m_j^{(1)}m_j^{(2)},& \text{ in }\mathbb{T}^n\times(0,T),\medskip\\ \p_t m^{(1,2)}_j(x,t)-\Delta m^{(1,2)}_j(x,t)\\ \hspace*{3cm}= {\rm div} (m_{j}^{(1)}\nabla u_j^{(2)})+{\rm div}(m_j^{(2)}\nabla u_j^{(1)}) ,&\text{ in }\mathbb{T}^n\times (0,T),\medskip\\ u^{(1,2)}_j(x,T)=G^{(1)}(x)m_j^{(1,2)}(x,T)+G^{(2)}(x)m_j^{(1)}m_j^{(2)}(x,T), & \text{ in } \mathbb{T}^n,\medskip\\ m^{(1,2)}_j(x,0)=0, & \text{ in } \mathbb{T}^n. \end{cases} \end{equation} By following a similar argument in the case $N=1$ , we have $$ u^{(1)}_1(x,t)= u^{(1)}_2(x,t), u^{(2)}_1(x,t)=u^{(2)}_2(x,t),$$ and $$ m^{(1)}_1(x,t)= m^{(1)}_2(x,t) , m^{(2)}_1(x,t)= m^{(2)}_2(x,t).$$ Let $\overline{u}^2(x,t)=u_1^{(1,2)}(x,t)-u_2^{(1,2)}(x,t) $. We have \begin{equation}\label{u1-u2,2 } \begin{cases} &-\p_t\overline{u}^2-\Delta\overline{u}^2= (F_1^{(1)}-F_2^{(1)})m^{(1)}(x,t)m_1^{(2)}(x,t),\medskip\\ &\overline{u}(x,T)=(G_1^{(1)}-G_2^{(1)})m^{(1)}(x,T)m_1^{(2)}(x,t). \end{cases} \end{equation} Let $w$ be a solution of the heat equation $\p_t w(x,t)-\Delta w(x,t)=0$ in $\mathbb{T}^n$. Then by following a similar argument in the case $N=1$, we can show that \begin{equation}\label{integral by part2} \begin{split} & \int_Q (F_1^{(2)}-F_2^{(2)})m^{(1)}m_1^{(2)}w(x,t)\, dxdt\\ =& \int_{\mathbb{T}^n} w(x,T)(G_1^{(2)}-G_2^{(2)})m^{(1)}(x,T)m_1^{(2)}(x,T)\, dx. \end{split} \end{equation} To proceed further, by using the construction in Lemma $\ref{dense}$ again, we have from \eqref{integral by part2} that \[ F_1^{(2)}-F_2^{(2)}=G_1^{(2)}-G_2^{(2)}=0. \] Finally, via a mathematical induction, we can derive the same result for $N\geq 3$. That is, for any $k\in\mathbb{N},$ we have $$F^{(k)}_1(x)-F^{(k)}_2(x)=G^{(k)}_1(x)-G^{(k)}_2(x)=0.$$ Hence, $$(F_1(x,z),F_2(x,z))=(G_1(x,z),G_2(x,z)),\text{ in } \mathbb{R}^n\times \mathbb{R}.$$ The proof is complete. \end{proof} \begin{rmk} Theorem $\ref{der F,g}$ is not strictly stronger than Theorem $\ref{der g}$. We need $F(x,z)$ is independ of $t$ in the proof of Theorem $\ref{der F,g}$ but we do not need this condition in the proof of Theorem $\ref{der g}$. \end{rmk} \begin{rmk} In the proof of Theorem $\ref{der F,g}$, we arrived at a decoupled system after applying the linearization technique. However, we cannot simply apply existing results in inverse problems for a single parabolic equation. In fact, for a single parabolic equation, it is impossible to determine the source term $f$ by the corresponding boundary measurement. For a simple illustration, we let $h(x)\in C_0^\infty(Q)$, and consider the following two parabolic equations for a given $f\in C(Q)$, \[ \partial_t u-\Delta u=f \quad \mbox{and}\quad \partial_t\widetilde{u}-\Delta\widetilde{u}=\widetilde{f},\quad \widetilde{f}:=f+(\partial_t h-\Delta h). \] It can be directly verified that $u$ and $\widetilde{u}$ possess the same boundary data, though $f\equiv\widetilde f$ in general. Hence, the proof of Theorem $\ref{der g}$ makes advantageous use on the peculiar structures of the MFG system. The same fact holds for the proofs of Theorems $\ref{der F 2}$ and $\ref{der g2}$ in what follows. \end{rmk} \section{Inverse Problems for MFGs with General Lagrangians} In the previous sections, we established the unique identifiability results for the inverse problems by assuming that the Hamiltonian involved is of a quadratic form, which represents a kinetic energy. In this section, we show that one can extend a large part of the previous results to the case with general Lagrangians if $F$ is independent of $t$. In what follows, we let $T>0$ and $n\in\mathbb{N}$ and consider the following system of nonlinear PDEs : \begin{equation}\label{general H} \begin{cases} -\p_tu(x,t)-\Delta u(x,t)+ H(x,\nabla u)= F(x,m(x,t)),& \text{ in }\mathbb{T}^n\times (0,T),\medskip\\ \p_t m(x,t)-\Delta m(x,t)-{\rm div}(m(x,t) H_p (x,\nabla u))=0,&\text{ in }\mathbb{T}^n\times (0,T),\medskip \\ u(x,T)=G(x,m_T), & \text{ in } \mathbb{T}^n,\medskip \\ m(x,0)=m_0(x), & \text{ in } \mathbb{T}^n. \end{cases} \end{equation} We study the inverse problem \eqref{eq:ip1}-\eqref{eq:ip2} associated with \eqref{general H}. In order to apply the method developed in the previous sections to this general case, we first introduce a new analytic class. \begin{defi} Let $H(x,z_1,z_2,...,z_n)$ be a function mapping from $\mathbb{R}\times\mathbb{C}^n $ to $\mathbb{C}$. We say that $H$ is admissible and write $H \in \mathcal{I}$ if it fulfils the following conditions: \begin{enumerate} \item[(1)]~The map $(z_1,z_2,...,z_n)\to H(\cdot,z_1,z_2,...,z_n)$ is holomorphic with value in $C^{2+\alpha}(\mathbb{T}^n)$, $\alpha\in(0,1)$; \item[(2)] $H(x,0)=0, $ for all $x\in\mathbb{T}^n.$ \end{enumerate} It is clear that $H$ can be expanded into a power series: \begin{equation}\label{eq:sss1} H(x,z)=\sum_{|\beta|=1}^{\infty} H^{(\beta)}(x)\frac{z^{\beta}}{k!}, \end{equation} where $ H^{(\beta)}(x)\in C^{2+\alpha}(\mathbb{T}^n)$ and $\beta$ is a muti-index. \end{defi} Similar to our discussion in Remark~\ref{rem:1}, we always assume that the coefficient functions $H^{(\beta)}$ in \eqref{eq:sss1} are real-valued. We first state the main theorems of the results for the inverse problems associated with \eqref{general H}. The corresponding proofs are given in Section $\ref{proof H}$. \begin{thm}\label{der F 2} Assume $F_j\in\mathcal{B}$ ($j=1,2$), $G\in\mathcal{B}$ and $H\in\mathcal{I}$. Let $\mathcal{M}_{F_j}$ be the measurement map associated to the following system ($j=1,2$): \begin{equation} \begin{cases} -\p_tu(x,t)-\Delta u(x,t)+ H(x,\nabla u)= F_j(x,m(x,t)),& \text{ in }\mathbb{T}^n\times (0,T),\medskip\\ \p_t m(x,t)-\Delta m(x,t)-{\rm div}(m(x,t) H_p (x,\nabla u))=0,&\text{ in }\mathbb{T}^n\times (0,T),\medskip\\ u(x,T)=G(x,m_T), & \text{ in } \mathbb{T}^n,\medskip\\ m(x,0)=m_0(x), & \text{ in } \mathbb{T}^n. \end{cases} \end{equation} If for any $m_0\in C^{2+\alpha}(\mathbb{T}^n)\cap\mathcal{O}_a$, one has $$\mathcal{M}_{F_1}(m_0)=\mathcal{M}_{F_2}(m_0),$$ then it holds that $$F_1(x,z)=F_2(x,z) \text{ in } \mathbb{T}^n\times \mathbb{R}.$$ \end{thm} \begin{thm}\label{der g2} Assume $F \in\mathcal{B}$, $G_j\in\mathcal{B}$ ($j=1,2$) and $H\in\mathcal{I}$. Let $\mathcal{M}_{G_j}$ be the measurement map associated to the following system ($j=1,2$): \begin{equation} \begin{cases} -\p_tu(x,t)-\Delta u(x,t)+ H(x,\nabla u)= F(x,t,m(x,t)),& \text{ in }\mathbb{T}^n\times (0,T),\medskip\\ \p_t m(x,t)-\Delta m(x,t)-{\rm div}(m(x,t) H_p (x,\nabla u))=0,&\text{ in } \mathbb{T}^n\times (0,T),\medskip\\ u(x,T)=G_j(x,m_T), & \text{ in } \mathbb{T}^n,\medskip\\ m(x,0)=m_0(x), & \text{ in } \mathbb{T}^n. \end{cases} \end{equation} If for any $m_0\in C^{2+\alpha}(\mathbb{T}^n)\cap\mathcal{O}_a$, one has $$\mathcal{M}_{G_1}(m_0)=\mathcal{M}_{G_2}(m_0),$$ then it holds that $$G_1(x,z)=G_2(x,z) \text{ in } \mathbb{T}^n\times \mathbb{R}.$$ \end{thm} \subsection{Well-posedness of the general system} \begin{lem}\label{localwellpose2} Suppose $F,G\in\mathcal{B}$ ,$H\in\mathcal{I}$. Then there exist $\delta>0$, $C>0$ such that for any $m_0\in B_{\delta}(\mathbb{T}^n) :=\{m_0\in C^{\alpha}(\mathbb{T}^n): \|m_0\|_{C^{2+\alpha}(\mathbb{T}^n)}\leq\delta \}$, the MFG system $\eqref{general H}$ has a solution $u = u_{m_0} \in C^{2+\alpha,1+\frac{\alpha}{2}}(Q)$ which satisfies \begin{equation}\label{eq:nn4} \|u\|_{C^{2+\alpha,1+\frac{\alpha}{2}}(Q}+ \|m\|_{C^{2+\alpha,1+\frac{\alpha}{2}}(Q)}\leq C\|m_0\|_{ C^{2+\alpha}(\mathbb{T}^n)}. \end{equation} Furthermore, the solution $(u,m)$ is unique within the class \begin{equation}\label{eq:nn5} \{ (u,m)\in C^{2+\alpha,1+\frac{\alpha}{2}}(Q)^2 : \|(u,m)\|_{ C^{2+\alpha,1+\frac{\alpha}{2}}(Q)^2}\leq C\delta \}, \end{equation} where \begin{equation}\label{eq:nn6} \|(u,m)\|_{ C^{2+\alpha,1+\frac{\alpha}{2}}(Q)^2}:= \|u\|_{C^{2+\alpha,1+\frac{\alpha}{2}}(Q)}+ \|m\|_{C^{2+\alpha,1+\frac{\alpha}{2}}(Q)}, \end{equation} and it depends holomorphically on $m_0\in C^{2+\alpha}(\mathbb{T}^n)$. \end{lem} The proof of Lemma $\ref{localwellpose2}$ follows from a similar argument to that of Lemma $\ref{local_wellpose}$. We choose to skip it. \subsection{Proofs of Theorem $\ref{der F 2}$ and $\ref{der g2}$}\label{proof H} We first introduce the general heat kernel to recover the unknown functions in a parabolic system. The construction and basic properties of the general heat kernel can be found in \cite{ito11}. \begin{lem}\label{general heat ker} Let $F_1,F_2,f\in C^{2+\alpha,1+\frac{\alpha}{2}}(Q)$, $g\in C^{2+\alpha}(\mathbb{T}^n)$ and $A(x)\in C^{2+\alpha,1+\frac{\alpha}{2}}(\mathbb{T}^n)^n$. Consider the following system \begin{equation} \begin{cases} \p_tu_i(x,t)-\Delta u_i(x,t)+ A(x)\cdot \nabla u_i= F_i(x)v(x,t)+f(x,t),& \text{ in }\mathbb{T}^n\times (0,T),\medskip\\ u_i(x,0)= g(x) , & \text{ in } \mathbb{T}^n .\\ \end{cases} \end{equation} Suppose for any $v(x,t)\in C^{2+\alpha,1+\frac{\alpha}{2}}(Q)$, we have $u_1(x,T;v)=u_2(x,T;v)$. Then it holds that $F_1=F_2.$ \begin{proof} Let $L=\partial_t-\Delta+A\cdot\nabla(\cdot)$ and $K(x,y,t)$ be the solution of the following Cauchy problem \begin{equation*} \begin{cases} &L (K(x,t))=0,\ \ t>0,\ \ x\in\mathbb{R}^n,\medskip\\ &K(x,0)=\delta(0). \end{cases} \end{equation*} Then one has that \begin{equation*} \begin{aligned} u_i(x,t)=&\int_{\mathbb{T}^n}K(x-y,t)g(y)dy\\ +&\int_{0}^t\int_{\mathbb{T}^n}K(x-y,t-s)(F_i(y)v(y,s)+f(y,s))\, dyds. \end{aligned} \end{equation*} Since we have $u_1(x,T;v)=u_2(x,T;v)$, it follows that \begin{equation}\label{implies F1=F2} \int_{0}^T\int_{\mathbb{T}^n}K(x-y,T-s)(F_1(y)-F_2(y))v(y,s)\, dyds=0. \end{equation} By absurdity, we assume that there is $y_0\in \mathbb{T}^n$ such that $ F_1(y_0)\neq F_2(y_0)$. Then there is a neighborhood $U$ of $ y_0 $ such that $F_1-F_2>0$ or $F_1-F_2<0$ in $U$. Since $K(x-y,T-s)>0$ and $\eqref{implies F1=F2}$ holds for all $v\in C^{2+\alpha,1+\frac{\alpha}{2}}(Q)$. We may choose $v$ such that $K(x-y,T-s)(F_1(y)-F_2(y))v(y,s)>0$ in $U$ and $K(x-y,T-s)(F_1(y)-F_2(y))v(y,s)=0$ in $\mathbb{T}^n\backslash U$. It is a contradiction. Therefore, we have $F_1=F_2.$ The proof is complete. \end{proof} \end{lem} Before we present the proofs for Theorems $\ref{der F 2}$ and $\ref{der g2}$, we first perform the higher order linearization for the MFG system $\eqref{general H}$, which follows a similar strategy to that developed in Section $\ref{HLM}$. Let $$m_0(x;\varepsilon)=\sum_{l=1}^{N}\varepsilon_lf_l,$$ where $f_l\in C_+^{2+\alpha}(\mathbb{T}^n)$ and $\varepsilon=(\varepsilon_1,\varepsilon_2,...,\varepsilon_N)\in\mathbb{R}_+^N$ with $|\varepsilon|=\sum_{l=1}^{N}|\varepsilon_l|$ small enough. Then by Lemma $\ref{localwellpose2}$, there exists a unique solution $(u(x,t;\varepsilon),m(x,t;\varepsilon) )$ of $\eqref{general H}$. Let $(u(x,t;0),m(x,t;0) ) $ be the solution of $\eqref{general H}$ when $\varepsilon=0.$ Notice that if $H\in\mathcal{I}, $ then $(u(x,t;0),m(x,t;0) ) =(0,0).$ Let $$u^{(1)}:=\p_{\varepsilon_1}u|_{\varepsilon=0},$$ $$m^{(1)}:=\p_{\varepsilon_1}m|_{\varepsilon=0}.$$ Suppose $H\in\mathcal{I}$, $F\in\mathcal{A}$ and $G\in\mathcal{B}, $ we have \begin{equation}\label{compute for H1} \begin{aligned} &\p_t m^{(1)}_j(x,t)-\Delta m^{(1)}_j(x,t)\\ =&\lim\limits_{\varepsilon\to 0}\frac{1}{\varepsilon_l} [ -H(x,\nabla u(x,t;\varepsilon)) +H(x;u(x,t;0))+ F(x,u(x,t;\varepsilon))-F(x;u(x,t:0)) ]\\ =&\lim\limits_{\varepsilon\to 0}\frac{1}{\varepsilon_l} [ \sum_{|\beta|=1}^{\infty} H^{(\beta)}(x)\frac{z^{\beta}}{k!}]+F^{(1)}(x)m_j^{(1)}(x,t)\\ =& -A^{(1)}(x)\cdot \nabla u+F^{(1)}(x)m_j^{(1)}(x,t), \end{aligned} \end{equation} where $A^{(1)}(x)=(H^{(1,0,0,...,0)}(x),H^{(0,1,0,...,0)}(x),...,H^{(0,0,...,1)}(x) ).$ Moreover, we have \begin{equation}\label{compute for H2} \begin{aligned} &\p_{\varepsilon_1} {\rm div}(m(x,t) H_p (x,\nabla u)) |_{\varepsilon=0}\\ =&\p_{\varepsilon_1} {\rm div} ( m(x,t) A^{(1)}(x) )+ \p_{\varepsilon_1} {\rm div}(m(x,t) B^{(1)}(x) \cdot \nabla u)|_{\varepsilon=0}\\ =&\p_{\varepsilon_1} {\rm div} ( m(x,t) A^{(1)}(x) ), \end{aligned} \end{equation} where \[ \begin{split} B^{(1)}(x)=&(\sum_{|\beta|=1}H^{(1,\beta)}(x),\sum_{|\alpha|+|\beta|=1,\alpha\in\mathbb{R}}H^{(\alpha,1,\beta)}(x),\\ &\sum_{|\alpha|+|\beta|=1,\alpha\in\mathbb{R}^2}H^{(\alpha,1,\beta)}(x),.....,\sum_{|\alpha|=1,\alpha\in\mathbb{R}^{n-1}}H^{(\alpha,1)}(x) ). \end{split} \] Hence, we can see that $(u^{(1)},m^{(1)} )$ satisfies the following system: \begin{equation}\label{H linear l=1 eg} \begin{cases} -\p_tu^{(1)}(x,t)-\Delta u^{(1)}(x,t)+ A^{(1)}(x)\cdot \nabla u= F^{(1)}(x)m^{(1)}(x,t),& \text{ in }\mathbb{T}^n\times (0,T),\medskip\\ \p_t m^{(1)}(x,t)-\Delta m^{(1)}(x,t)-{\rm div} ( m^{(1)}(x,t) A^{(1)}(x))=0,&\text{ in }\mathbb{T}^n\times (0,T),\medskip\\ u^{(1)}(x,T)=G^{(1)}(x)m^{(1)}(x,T), & \text{ in } \mathbb{T}^n,\medskip\\ m^{(1)}(x,0)=f_1(x), & \text{ in } \mathbb{T}^n, \end{cases} \end{equation} Here, we make a key observation that the non-linear terms and source terms in higher-order linearization system only depend on the solutions of the lower-order linearization system. Hence, as an illustrative case for our argument, we only compute the second order linearization system. Let $$u^{(1,2)}:=\p_{\varepsilon_1}\p_{\varepsilon_2}u|_{\varepsilon=0}, m^{(1,2)}:=\p_{\varepsilon_1}\p_{\varepsilon_2}m|_{\varepsilon=0},$$ and $$u^{(2)}:=\p_{\varepsilon_2}u|_{\varepsilon=0},m^{(2)}:=\p_{\varepsilon_2}m|_{\varepsilon=0}.$$ Recall the derivation of the system $\eqref{linear l=1,2 eg}$ in Section $\ref{HLM}$. By direct calculations, we have \begin{equation}\label{compute H 12 eg} \begin{aligned} &-\p_tu^{(1,2)}-\Delta u^{(1,2)}\\ =&-\p_{\varepsilon_1}\p_{\varepsilon_2}H(x,\nabla u)|_{\varepsilon=0}+F^{(1)}(x)m^{(1,2)}+F^{(2)}(x)m^{(1)}m^{(2)}\\ =&-\p_{\varepsilon_1}\p_{\varepsilon_2}(\sum_{|\beta|=1}^{2} H^{(\beta)}(x)\frac{z^{\beta}}{k!})|_{\varepsilon=0}+F^{(1)}(x)m^{(1,2)}+F^{(2)}(x)m^{(1)}m^{(2)}\\ =&-A^{(1)}\cdot\nabla u_j^{(1,2)}-\sum_{|\beta|=2}H^{(\beta)}(x)u_j^{(1)}u_j^{(2)}++F^{(1)}(x)m^{(1,2)}+F^{(2)}(x)m^{(1)}m^{(2)}. \end{aligned} \end{equation} Now, with the discussion above at hand and Lemma $\ref{general heat ker}$, we are now in a position to present the proofs of Theorems $\ref{der F 2}$ and $\ref{der g2}.$ \begin{proof}[Proof of Theorem $\ref{der F 2}$] Consider the following MFG systems for $j=1,2$: \begin{equation}\label{general H for F} \begin{cases} -\p_tu_j(x,t)-\Delta u_j(x,t)+ H(x,\nabla u_j)= F_j(x,m(x,t)),& \text{ in }\mathbb{T}^n\times (0,T),\medskip\\ \p_t m_j(x,t)-\Delta m_j(x,t)-{\rm div} (m_j(x,t) H_p (x,\nabla u_j))=0,&\text{ in }\mathbb{T}^n\times (0,T),\medskip\\ u_j(x,T)=G(x,m_T), & \text{ in } \mathbb{T}^n,\medskip\\ m_j(x,0)=m_0(x), & \text{ in } \mathbb{T}^n. \end{cases} \end{equation} Recall the higher order linearization method in Section $\ref{HLM}$. Let $$u_{j}^{(1)}:=\p_{\varepsilon_1}u_{j}|_{\varepsilon=0},$$ $$m_{j}^{(1)}:=\p_{\varepsilon_1}m_{j}|_{\varepsilon=0}.$$ By combining $\eqref{compute for H1}$, $\eqref{compute for H2}$ and $\eqref{H linear l=1 eg}$, we can deduce that \begin{equation}\label{H linear l=1} \begin{cases} -\p_tu_j^{(1)}(x,t)-\Delta u^{(1)}_j(x,t)+ A^{(1)}(x)\cdot \nabla u_j= F^{(1)}_j(x)m_j^{(1)}(x,t),& \text{ in }\mathbb{T}^n\times (0,T),\medskip\\ \p_t m^{(1)}_j(x,t)-\Delta m^{(1)}_j(x,t)-{\rm div} ( m_j^{(1)}(x,t) A^{(1)}(x))=0,&\text{ in }\mathbb{T}^n\times (0,T),\medskip\\ u^{(1)}_j(x,T)=G^{(1)}(x)m^{(1)}_j(x,T), & \text{ in } \mathbb{T}^n,\medskip\\ m^{(1)}_j(x,0)=f_1(x), & \text{ in } \mathbb{T}^n,\medskip\\ \end{cases} \end{equation} where \[ A^{(1)}(x)=(H^{(1,0,0,...,0)}(x),H^{(0,1,0,...,0)}(x),...,H^{(0,0,...,1)}(x) ). \] We extend $f_l$ from $\mathbb{T}^n$ to $\mathbb{R}^n$ periodically, and still denote it by $f_l$. By Lemma $\ref{linearapp wellpose}$, $m_j^{(1)}$ is unique determined by $f_1(x)$. We use change of variables as well as a similar strategy in the proof of Lemma $\ref{general heat ker}$. Suppose $F^{(1)}_1(x)\not\equiv F^{(2)}_1(x)$, then there is a open subset $U\subset\mathbb{T}^n$ such that $F_1^{(1)}(x)\neq F_1^{(2)}(x) $ in $U.$ Given $\epsilon>0$, there exists $f_1\in C_+^{2+\alpha}(\mathbb{T}^n)$ such that $\|f_l-\chi_U\|_{L^2(\mathbb{T}^n)}\leq\epsilon $, where $\chi_U$ is the characteristic function of $U$. Then the classical prior estimate implies that $$\|m_1^{(1)}(x,t)-\chi_{U\times(0,T)}\|_{L^2(Q) }\leq C\epsilon, $$ for some constant $C>0$ This implies that \begin{equation} \int_{0}^{T}\int_{\mathbb{T}^n}K(x-y,T-s)(F^{(1)}_1(y)-F^{(1)}_2(y))\chi_U(y,s)\, dyds= 0. \end{equation} Since $K>0$ in $Q$ , it is a contradiction. Hence, $F_1^{(1)}(x)=F_2^{(1)}(x).$ Next, we can consider the case $N=2.$ Let $$u_{j}^{(1,2)}:=\p_{\varepsilon_1}\p_{\varepsilon_2}u_{j}|_{\varepsilon=0},\quad m_{j}^{(1,2)}:=\p_{\varepsilon_1}\p_{\varepsilon_2}m_{j}|_{\varepsilon=0},$$ and $$u_{j}^{(2)}:=\p_{\varepsilon_2}u_{j}|_{\varepsilon=0},\quad m_{j}^{(2)}:=\p_{\varepsilon_2}m_{j}|_{\varepsilon=0}.$$ We can conduct the second-order linearization. Following a similar process as that in $\eqref{compute H 12 eg}$, we can deduce that \begin{equation} \begin{cases} -\p_tu_j^{(1,2)}-\Delta u^{(1,2)}_j+A^{(1)}\cdot\nabla u_j^{(1,2)}+R_1(x,t)\\ \hspace*{3cm}= F_j^{(1)}(x)m_j^{(1,2)}+F^{(2)}_j(x)m_j^{(1)}m_j^{(2)},& \text{ in }\mathbb{T}^n\times (0,T),\medskip\\ \p_t m^{(1,2)}_j(x,t)-\Delta m^{(1,2)}_j(x,t)-{\rm div} ( m_j^{(1)}(x,t) A^{(1)}(x))\\ \hspace*{3cm}= R_2(x,t) ,&\text{ in }\mathbb{T}^n\times (0,T),\medskip\\ u^{(1,2)}_j(x,T)=G^{(2)}(x)m^{(1,2)}_j(x,T), & \text{ in } \mathbb{T}^n,\medskip\\ m^{(1,2)}_j(x,0)=0, & \text{ in } \mathbb{T}^n. \end{cases} \end{equation} where $$R_1(x,t)= \sum_{|\beta|=2}H^{(\beta)}(x)u_j^{(1)}u_j^{(2)}, $$ and $$R_2(x,t)={\rm div}(m_j^{(1)} U^{(2)})+{\rm div}(m_j^{(2)} U^{(1)}).$$ Here, the $l$-th component of $U^{(1)}$ is $$U_l^{1}=\sum_{i=1}^{n}\frac{\p^2H}{\p z_l\p z_i}(x,0)\frac{\p u_j^{(2)}}{\p x_l},$$ and the $l$-th component of $U^{(2)}$ is $$U_l^{1}=\sum_{i=1}^{n}\frac{\p^2H}{\p z_l\p z_i}(x,0)\frac{\p u_j^{(1)}}{\p x_l}.$$ Following a similar argument to the case $N=1$ (considering $m_0=\varepsilon_2f_2$ ), we have $$u^{(1)}_1(x,t)= u^{(1)}_2(x,t),\quad u^{(2)}_1(x,t)=u^{(2)}_2(x,t),$$ and $$ m^{(1)}_1(x,t)= m^{(1)}_2(x,t),\quad m^{(2)}_1(x,t)= m^{(2)}_2(x,t).$$ By Lemma $\ref{linearapp wellpose}$, $m_j^{(1,2)}$ is unique determined by $f_1(x),f_2(x)$ and $G^{(1)}(x)$. By a similar argument, we readily have $F_1^{(2)}(x)=F_2^{(2)}(x).$ Finally, by a mathematical induction, we can show the same result holds for $N\geq 3$. That is, for any $k\in\mathbb{N},$ we have $F^{(k)}_1(x)=F^{(k)}_2(x).$ Therefore, we have $F_1(x,z)=F_2(x,z).$ The proof is complete. \end{proof} We proceed with the proof of Theorem $\ref{der g2}$. To that end, we first state an auxiliary lemma, which is an analogue to Lemma $\ref{general heat ker}$, and omit its proof. \begin{lem}\label{general heat ker2} Let $g_1,g_2\in C^{2+\alpha}(\mathbb{T}^n)$ and $A(x)\in C^{2+\alpha}(\mathbb{T}^n)^n$. Consider the following systems with $f\in C^{2+\alpha,1+\frac{\alpha}{2}}(Q)$ and $j=1,2$: \begin{equation} \begin{cases} \p_tu_j(x,t)-\Delta u_j(x,t)+ A(x)\cdot \nabla u_j= f(x,t),& \text{ in } \mathbb{T}^n\times(0,T),\medskip\\ u_j(x,0)= g_j(x)v(x,T) , & \text{ in } \mathbb{T}^n . \end{cases} \end{equation} Suppose for any $v\in C^{2+\alpha,1+\frac{\alpha}{2}}(Q)$, we have $u_1(x,T;v)=u_2(x,T;v)$. Then it holds that $g_1(x)=g_2(x).$ \end{lem} Next, we give the proof Theorem $\ref{der g2}.$ \begin{proof}[Proof of Theorem $\ref{der g2} $] We shall follow a similar strategy that was developed for the proof of Theorem $\ref{der F 2}$. Consider the following systems for $j=1,2$: \begin{equation}\label{general H for g} \begin{cases} -\p_tu_j(x,t)-\Delta u_j(x,t)+ H(x,\nabla u_j)= F(x,m(x,t)),& \text{ in }\mathbb{T}^n\times (0,T),\medskip\\ \p_t m_j(x,t)-\Delta m_j(x,t)-{\rm div} (m_j(x,t) H_p (x,\nabla u_j))=0,&\text{ in }\mathbb{T}^n\times (0,T),\medskip\\ u_j(x,T)=G_j(x,m_T), & \text{ in } \mathbb{T}^n,\medskip\\ m_j(x,0)=m_0(x), & \text{ in } \mathbb{T}^n.\\ \end{cases} \end{equation} We next perform the successive linearization process. Consider the case $N=1.$ Let $$u_{j}^{(1)}:=\p_{\varepsilon_1}u_{j}|_{\varepsilon=0},$$ $$m_{j}^{(1)}:=\p_{\varepsilon_1}m_{j}|_{\varepsilon=0}.$$ By direct computations, one can show that $(u_{j}^{(1)},v_{j}^{(1)} )$ satisfies the following system: \begin{equation} \begin{cases} -\p_tu_j^{(1)}(x,t)-\Delta u^{(1)}_j(x,t)+ A^{(1)}(x)\cdot \nabla u_j= F^{(1)}(x)m_j^{(1)}(x,t),& \text{ in }\mathbb{T}^n\times (0,T),\medskip\\ \p_t m^{(1)}_j(x,t)-\Delta m^{(1)}_j(x,t)-{\rm div} ( m_j^{(1)}(x,t) A^{(1)}(x))=0,&\text{ in }\mathbb{T}^n\times (0,T),\medskip\\ u^{(1)}_j(x,T)=G_j^{(1)}(x)m^{(1)}_j(x,T), & \text{ in } \mathbb{T}^n,\medskip\\ m^{(1)}_j(x,0)=f_1(x), & \text{ in } \mathbb{T}^n. \end{cases} \end{equation} We can solve this system by first deriving $m^{(1)}_j$ and then obtaining $u^{(1)}_j.$ Since $\mathcal{M}_{G_1}=\mathcal{M}_{G_2}$, we have $$ u_1^{(1)}(x,0)=u_2^{(1)}(x,0),$$ for all $f_1\in C_+^{2+\alpha}(\mathbb{T}^n).$ By Lemma $\ref{general heat ker2}$, we readily see that $ G_1^{(1)}(x)=G_2^{(2)}(x).$ Finally, by following a similar argument in the proof of Theorem~\ref{der F 2}, we can conduct the higher-order linearization process to show that $G_1^{(k)}(x)=G_2^{(k)}(x)$ for all $k\in\mathbb{N}$. Hence, $G_1(x,z)=G_2(x,z).$ The proof is complete. \end{proof} \section*{Acknowledgment} The work of H Liu was supported by Hong Kong RGC General Research Funds (project numbers, 11300821, 12301420 and 12302919) and the NSFC/RGC Joint Research Grant (project number, N\_CityU101/21). \vskip0.5cm \begin{thebibliography}{99} \bibitem{AchdouHanLasryLionsMoll} {\sc Y. Achdou, J.M. Lasry, P.-L. Lions, and B. Moll}, Income and wealth distribution in macroeconomics: a continuous-time approach, {\it Review of Economics Studies}, to appear. \bibitem{Amb:18} {\sc D. M. Ambrose}, Strong solutions for time-dependent mean field games with non-separable Hamiltonians, {\it J. Math. Pures Appl.} (9) 113 (2018), 141--154. \bibitem{Amb:21} {\sc D. M. Ambrose}, Existence theory for non-separable mean field games in Sobolev spaces, {\it Indiana U. Math. J.} to appear, arXiv 1807.02223. \bibitem{Amb22}{\sc D. M. Ambrose and A. R. Meszaros}, Well-posedness of mean field games master equation involving non-separable local Hamiltonians, {\it Trans. Amer. Math. Soc.} to appear, arXiv: 2105.03926. \bibitem{initial vector}{\sc A. Briani and P. Cardaliaguet}, Stable solutions in potential mean field game systems, {\it Nonlinear Differ. Equ. Appl.} (2018) 25:1. \bibitem{BFY} {\sc A. Bensoussan, P. J. Graber and S. C. P. Yam}, Mean field games and mean field type control theory, {\it Springer Briefs in Mathematics. Springer, New York, 2013.} \bibitem{Cardaliaguet} {\sc P.~Cardaliaguet}, Notes on Mean-Field Games, {\it based on the lectures by P.L. Lions at Coll\`{e}ge de France}, (2012). \bibitem{Car} {\sc P. Cardaliaguet}, Weak solutions for first order mean field games with local coupling, in {\it Analysis and geometry in control theory and its applications}, 111--158, Springer INdAM Ser., 11, Springer, Cham, 2015. \bibitem{CarDelLasLio} {\sc P.~Cardaliaguet, F.~Delarue, J-M.~Lasry and P-L.~Lions}, The master equation and the convergence problem in mean field games, {\it Annals of Mathematics Studies, 201, Princeton University Press, Princeton, NJ}, 2019. x+212 pp. \bibitem{CarGra} {\sc P. Cardaliaguet and P. J. Graber}, Mean field games systems of first order, {\it ESAIM Control Optim. Calc. Var.} 21 (2015), no. 3, 690--722. \bibitem{CarGraPorTon} {\sc P. Cardaliaguet, P.J. Graber, A. Porretta and D. Tonon}, Second order mean field games with degenerate diffusion and local coupling, {\it NoDEA Nonlinear Differential Equations Appl.} 22 (2015), no. 5, 1287--1317. \bibitem{CarPor} {\sc P. Cardaliaguet and A. Porretta}, An introduction to mean field game theory, {\it Lecture Notes in Mathematics}, Vol. 2281 (2020), pages 1--158. \bibitem{CarDel-I} {\sc R. Carmona and F. Delarue}, Probabilistic theory of mean field games with applications. I. Mean field FBSDEs, control, and games., {\it Probability Theory and Stochastic Modelling, 83. Springer, Cham, 2018. xxv+713 pp.} \bibitem{CarDel-II} {\sc R. Carmona and F. Delarue}, Probabilistic theory of mean field games with applications. II. Mean field games with common noise and master equations, {\it Probability Theory and Stochastic Modelling, 84. Springer, Cham, 2018. xxiv+697 pp.} \bibitem{ChowFungLiuNurbekyanOsher} {\sc Y.T. Chow, S.W. Fung, S.T. Liu, L. Nurbekyan, S. Osher}, A Numerical algorithm for inverse problem from partial boundary measurement arising from mean field game problem. to appear, arXiv:2204.04851. \bibitem{Cira}{\sc M. Cirant, R. Gioanni and P. Mannuccio}, Short-time existence for a general backward-forward parabolic system arising from mean field games. {\it Dyn. Games Appl. 10 (2020)} no.1, 100--119 \bibitem{CirGiaMan} {\sc M. Cirant, R. Gianni and P. Mannucci}, Short-time existence for a general backward-forward parabolic system arising from mean-field games, {\it Dyn. Games Appl.} 10 (2020), no. 1, 100--119. \bibitem{CirGof} {\sc M. Cirant and A. Goffi}, Maximal $L^q$-regularity for parabolic Hamilton-Jacobi equations and applications to Mean Field Games, {\it Ann. PDE} 7 (2021), 19. \bibitem{DingLiOsherYin} {\sc L. Ding, W. Li, S. Osher and W. Yin}, A mean field game inverse problem, {\it J. Sci. Comput.} to appear, arXiv:2007.11551. \bibitem{FerGom} {\sc R. Ferreira and D. Gomes}, Existence of weak solutions to stationary mean-field games through variational inequalities, {\it SIAM J. Math. Anal.}, 50 (2018), no. 6, 5969--6006. \bibitem{FerGomTad} {\sc R. Ferreira, D. Gomes and T. Tada}, Existence of weak solutions to time-dependent mean-field games, {\it Nonlinear Anal.} 212 (2021), Paper No. 112470, 31 pp. \bibitem{Gelfand}{\sc M.J. Gelfand}. Explaining the puzzle of human diversity, {\it Science} 366 (2019), 686-687. \bibitem{GomPimSan:15} {\sc D. A. Gomes, E. A. Pimentel and H. S\'anchez-Morgado}, Time-dependent mean-field games in the subquadratic case, {\it Comm. Partial Differential Equations} 40 (2015), no. 1, 40--76. \bibitem{GomPimSan:16} {\sc D. A. Gomes, E. A. Pimentel and H. S\'anchez-Morgado}, Time-dependent mean-field games in the superquadratic case, {\it ESAIM Control Optim. Calc. Var.} 22 (2016), no. 2, 562--580. \bibitem{HCM06} {\sc M. Huang, P. E. Caines and R. P. Malham\'e}, Large population stochastic dynamic games: closed-loop McKean-Vlasov systems and the Nash certainty equivalence principle, {\it Commun. Inf. Syst.} 6 (2006), no. 3, 221--251. \bibitem{HCM071} {\sc M. Huang, P. E. Caines and R. P. Malham\'e}, Large-population cost-coupled LQG problems with nonuniform agents: individual-mass behavior and decentralized $\epsilon$-Nash equilibrium, {\it IEEE Transactions on Automatic Control} 52 (2007), no. 9, 1560--1571. \bibitem{HCM072} {\sc M. Huang, P. E. Caines and R. P. Malham\'e}, The Nash certainty equivalence principle and McKean-Vlasov systems: an invariance principle and entry adaptation, {\it 46th IEEE Conference on Decision and Control} 121--123, 2007. \bibitem{HCM073} {\sc M. Huang, P. E. Caines and R. P. Malham\'e}, An invariance principle in large population stochastic dynamic games, {\it J. Syst. Sci. Complex.} 20 (2007), no. 2, 162--172. \bibitem{ito11} {\sc S. Ito}, Diffusion equations, {\it Transaction of Mathematical Monographs 114}, Providence, RI, 1991. \bibitem{Pos.J} {\sc Poschel, J., Trubowitz, E.},Inverse spectral theory, {\it Pure and Applied Mathematics, 130. Academic Press, Inc.}, Boston, MA, 1987. \bibitem{LackerZari} {\sc D. Lacker and T. Zariphopoulou}, Mean field and N-agent games for optimal investment under relative performance criteria, {\it Math. Finance} 29 (2019), 1003--1038. \bibitem{LL06a} {\sc J.-M. Lasry and P.-L. Lions}, Jeux \`a champ moyen. I. Le cas stationnaire, {\it C. R. Math. Acad. Sci. Paris} 343 (2006), no. 9, 619--625. \bibitem{LL06b} {\sc J.-M. Lasry and P.-L. Lions}, Jeux \`a champ moyen. II. Horizon fini et contr\^ole optimal, {\it C. R. Math. Acad. Sci. Paris} 343 (2006), no. 10, 679--684. \bibitem{LL07a} {\sc J.-M. Lasry and P.-L. Lions}, Mean field games, {\it Jpn. J. Math.} 2 (2007), 229--260. \bibitem{Lady} {\sc O.A. Ladyzhenskaia, V.A. Solonnikov and N.N Ural'tseva}, Linear and quasi-linear equations of parabolic type, volume 23, {\it Amer. Math. Soc.}, 1988. \bibitem{LLLZ}{ \sc Y.-H. Lin, H. Liu, X. Liu and S. Zhang}, Simultaneous recoveries for semilinear parabolic systems, to appear, arXiv:2111.05242 \bibitem{Lions} {\sc P.-L. Lions}, {\it Cours au Coll\`ege de France}, 2007--2013. \bibitem{MM} {\sc A. Meszaros and C. Mou}, Mean field games system under displacement monotonicity, to appear, arXiv:2109.06687. \bibitem{Por} {\sc A. Porretta}, Weak solutions to Fokker-Planck equations and mean field games, {\it Arch. Ration. Mech. Anal.} 216 (2015), no. 1, 1--62. \bibitem{Book11}{\sc Achdou, Y., Cardaliaguet, P., Delarue, F., Porretta, A., and Santambrogio, F.}, Mean field games : Cetraro, Italy 2019,\{it Springer International Publishing AG. \} \end{thebibliography} \end{document}
2205.11287v2
http://arxiv.org/abs/2205.11287v2
Recovery of Plane Curves from Branch Points
\documentclass[12pt]{extarticle} \usepackage{amsmath, amsthm, amssymb, color} \usepackage{graphicx} \usepackage{caption} \usepackage{mathtools} \usepackage{enumitem} \usepackage{verbatim} \usepackage{longtable} \usepackage{pifont} \usepackage{makecell} \usepackage{tikz} \usetikzlibrary{matrix} \usetikzlibrary{arrows} \usepackage{algorithm} \usepackage[noend]{algpseudocode} \usepackage{caption} \usepackage[normalem]{ulem} \usepackage{subcaption} \usepackage{algorithm} \usepackage{algpseudocode} \usepackage{xcolor} \usepackage[colorlinks,plainpages,hypertexnames=false,plainpages=false]{hyperref} \hypersetup{urlcolor=blue, citecolor=blue, linkcolor=blue} \tolerance 10000 \headheight 0in \headsep 0in \evensidemargin 0in \oddsidemargin \evensidemargin \textwidth 6.5in \topmargin .25in \textheight 8.8in \synctex=1 \usepackage{makecell} \usepackage{multirow,array} \newtheorem{theorem}{Theorem} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{conjecture}[theorem]{Conjecture} \theoremstyle{definition} \newtheorem{algo}[theorem]{Algorithm} \newtheorem{definition}[theorem]{Definition} \newtheorem{problem}[theorem]{Problem} \newtheorem{remark}[theorem]{Remark} \newtheorem{cor}[theorem]{Corollary} \newtheorem{example}[theorem]{Example} \newtheorem{exercise}[theorem]{Exercise} \newtheorem{notation}[theorem]{Notation} \newtheorem{question}[theorem]{Question} \numberwithin{theorem}{section} \newcommand{\PP}{\mathbb{P}} \newcommand{\RR}{\mathbb{R}} \newcommand{\QQ}{\mathbb{Q}} \newcommand{\CC}{\mathbb{C} } \newcommand{\ZZ}{\mathbb{Z}} \newcommand{\NN}{\mathbb{N}} \newcommand{\KK}{\mathbb{K}} \newcommand{\TT}{\mathbb{T}} \newcommand{\OO}{\mathcal{O}} \newcommand{\xmark}{\ding{55}} \newcommand{\trop}{\mathrm{trop}} \newcommand{\real}{\mathrm{real}} \newcommand{\Aut}{\mathrm{Aut}} \newcommand{\vertex}{\mathtt{vtx}} \newcommand{\Lazypic}[2]{\begin{minipage}{#1} \vspace{0.1cm} \centering {#2}\vspace{0.1cm}\end{minipage}} \title{\bf Recovery of Plane Curves from Branch Points} \author{Daniele Agostini, Hannah Markwig, Clemens Nollau, \\ Victoria Schleis, Javier Sendra--Arranz, and Bernd Sturmfels} \date{ } \begin{document} \maketitle \begin{abstract} \noindent We recover plane curves from their branch points under projection onto a line. Our focus lies on cubics and quartics. These have $6$ and $12$ branch points respectively. The plane Hurwitz numbers $40$ and $120$ count the orbits of solutions. We determine the numbers of real solutions, and we present exact algorithms for recovery. Our approach relies on $150$ years of beautiful algebraic geometry, from Clebsch to Vakil and beyond. \end{abstract} \section{Introduction} \label{sec1} Arthur Cayley in 1879 was the first to use ``algorithm'' to title a discrete geometry paper. In \cite{CayleyAlgo} he identifies the finite vector space $(\mathbb{F}^2)^6$ with the $64$ theta characteristics of a plane quartic curve, i.e.~the $28$ bitangents and the $36$ symmetric determinantal representations. The present paper can be viewed as a sequel. Our Table \ref{table:40covers} is very much in the spirit of \cite{CayleyAlgo}. One century after Cayley, algorithms in discrete geometry became a field in its own~right, in large part thanks to Eli Goodman. We are proud to dedicate this article to Eli's memory. Eli obtained his PhD in 1967 with Heisuke Hironaka. He had important publications in algebraic geometry (e.g.~\cite{Goodman}) before embarking on his distinguished career on the discrete~side. \smallskip Consider the map $\pi: \PP^2 \dashrightarrow \PP^1$ that takes a point $(x:y:z) $ in the projective plane to the point $(x:y)$ on the projective line. Geometrically, this is the projection with center $p = (0:0:1)$. We restrict $\pi$ to the curve $V(A)$ defined by a general ternary form of degree~$d$, \begin{equation} \label{eq:intro_f} A(x,y,z) \,\,\,=\, \sum_{i+j+k=d} \! \alpha_{ijk} \,x^i y^j z^k . \end{equation} The resulting $d:1$ cover $V(A) \rightarrow \PP^1$ has $d(d-1)$ branch points, represented by a binary~form \begin{equation} \label{eq:intro_g} B(x,y) \,\,\, = \sum_{i+j=d(d-1)} \!\! \beta_{ij}\, x^i y^j. \end{equation} Passing from the curve to its branch points defines a rational map from the space $ \PP^{\binom{d+2}{2}-1}$ with coordinates $\alpha$ to the space $\PP^{d(d-1)} $ with coordinates $\beta$. Algebraically, this is the map \begin{equation} \label{eq:map1} \PP^{\binom{d+2}{2}-1} \,\dashrightarrow\,\, \PP^{d(d-1)} \,,\, \,A \,\mapsto \, {\rm discr}_z(A). \end{equation} This is the discriminant of $A$ with respect to the last variable. That discriminant is a binary form $B$ of degree $d(d-1)$ in $x,y$ whose coefficients are polynomials of degree $2d-2$ in $\alpha $. We here study the {\em Inverse Problem}, namely recovery of the curve from its branch points. Given the binary form $B$, our task is to compute all ternary forms $\hat A$ such that $ {\rm discr}_z(\hat A) = B$. This is a system of $d(d-1)+1$ polynomial equations of degree $2d-2$ in the $\binom{d+2}{2}$ unknowns $\alpha$. Solving this system means computing a fiber of the map (\ref{eq:map1}) over $B$. Recovery is not unique because ${\rm discr}_z(A)$ is invariant under the action of the subgroup $\mathcal{G}$ of ${\rm PGL}(3)$ given~by \begin{equation} \label{eq:groupG} \qquad g \,\,:\,\, x \mapsto g_0 x\,, \,\,y \mapsto g_0 y \, , \,\, z \mapsto g_1 x + g_2 y + g_3 z \qquad \hbox{with $\,g_0 g_3 \not=0$.} \end{equation} By \cite[Proposition 5.2.1 and Corollary 5.2.1]{Ongaro}, the fiber over $B$ is a finite union of $\mathcal{G}$-orbits. Their number $\mathfrak{h}_d$ is the {\em plane Hurwitz number} of degree $d$. Our task is to compute representatives for all $\mathfrak{h}_d$ orbits in the fiber of the map (\ref{eq:map1}) over a given binary form $B$. \begin{example}[$d=2$] For conics we have $\mathfrak{h}_2 = 1 $ and recovery is easy. Our polynomials are $$ \begin{matrix} A & = & \alpha_{200} x^2 + \alpha_{110} x y + \alpha_{101} x z + \alpha_{020} y^2 + \alpha_{011} y z + \alpha_{002 } z^2, \\ {\rm discr}_z(A) & = & (4 \alpha_{002} \alpha_{200}-\alpha_{101}^2) x^2 \,+\,(4\alpha_{002} \alpha_{110}-2 \alpha_{011} \alpha_{101}) x y \,+\,(4 \alpha_{002} \alpha_{020}-\alpha_{011}^2) y^2, \\ B & = & \beta_{20} x^2 + \beta_{11} xy + \beta_{02} y^2. \end{matrix} $$ The equations ${\rm discr}_z(\hat A) = B$ describe precisely one $\mathcal{G}$-orbit in $\PP^5$. A point in that orbit is $$ \hat A \,\,= \,\, \frac{1}{4}\beta_{20} x^2 + \frac{1}{4} \beta_{11} x y - \beta_{02} y z + \beta_{02} z^2. $$ Up to the $\mathcal{G}$-action, this is the unique solution to our recovery problem for plane conics. \hfill $ \diamond$ \end{example} Plane Hurwitz numbers $\mathfrak{h}_d$ were studied in Ongaro's 2014 PhD~thesis and in his work with Shapiro \cite{Ongaro, OS}. These served as the inspiration for our project. Presently, the only known nontrivial values are $\mathfrak{h}_3 = 40$ and $\mathfrak{h}_4 = 120$. The former value is due to Clebsch~\cite{ClebschShort, ClebschLong}. We first learned it from \cite[Proposition 5.2.2]{Ongaro}. The latter value was computed by Vakil in \cite{Ravi}. The plane Hurwitz number $\mathfrak{h}_4 =120$ was presented with the extra factor $(3^{10}-1)/2$ in \cite[eqn.~(5.14)]{Ongaro} and in \cite[p.~608]{OS}. However, that factor is not needed; see Remark~\ref{rmk:extrafactor}. The parameter count above implies that the closure of the image of (\ref{eq:map1}) is a variety $\mathcal{V}_d$ of dimension $\binom{d+2}{2}-4$ in an ambient space of dimension $d(d-1)$. For $d=2,3$, the two dimensions agree, so recovery is possible for generic $B$. For $d \geq 4$, the constraint $B \in \mathcal{V}_d$ is nontrivial. For instance, $\mathcal{V}_4$ is a hypersurface of degree $3762$ in $\PP^{12}$, as shown by Vakil \cite{Ravi}. \smallskip This article is organized as follows. In Section \ref{sec2} we approach our problem from the perspective of computer algebra. We establish a normal form with respect to the $\mathcal{G}$-action, and we identify the base locus of the map (\ref{eq:map1}). This allows to state the recovery problem as a polynomial system with finitely many solutions over the complex numbers $\CC$. The number of solutions is $\mathfrak{h}_3 = 40$ for cubics, and it is $\mathfrak{h}_4 = 120$, provided $B$ lies on the hypersurface $\mathcal{V}_4$. In Section~\ref{sec3} we establish the relationship to Hurwitz numbers that count abstract coverings of $\PP^1$. We encode such coverings by monodromy graphs, and we determine the real Hurwitz numbers for our setting. A highlight is Table \ref{table:40covers}, which matches the $40$ monodromy representations for $d=3$ with combinatorial labels taken from Clebsch \cite{ClebschLong} and Elkies \cite{elkies}. In Section~\ref{sec4} we exhibit the Galois group for the $40$ solutions when $d=3$, and we discuss different realizations of this group. Theorem \ref{thm:25920} implies that it agrees with the Galois group for the $27$ lines on the cubic surface. Following classical work of Clebsch \cite{ClebschShort, ClebschLong}, we show that the recovery of the $39$ other cubics from the given cubic $A$ can be solved in radicals. Section~\ref{sec5} builds on work of Vakil \cite{Ravi}. It relates the recovery of quartic curves to tritangents of sextic space curves and to del Pezzo surfaces of degree one. Theorem \ref{thm:realcount4planar} determines the possible number of real solutions. Instances with $120$ rational solutions can be constructed by blowing up the plane $\PP^2$ at $8$ rational points. We conclude with Theorem \ref{thm:rleqs} which connects the real structure of $8$ points in $\PP^2$ with that of the $12$ branch points in $\PP^1$. This article revolves around explicit computations, summarized in Algorithms \ref{algo:recovery4}, \ref{alg:recovery3}, \ref{alg:clebsch}, \ref{alg:get8}, \ref{alg:get120}. Our software and other supplementary material is available at the repository website {\tt MathRepo}~\cite{mathrepo} of MPI-MiS via the link \href{https://mathrepo.mis.mpg.de/BranchPoints/}{https://mathrepo.mis.mpg.de/BranchPoints}$\,$. \section{Normal Forms and Polynomial Systems} \label{sec2} We identify $\PP^{\binom{d+2}{2}-1}$ with the space of plane curves (\ref{eq:intro_f}) of degree $d$ and use as homogeneous coordinates the $\alpha_{ijk}$. The following subspace of that projective space has codimension three: \begin{equation} \label{eq:Ld} L_d \,\, = \,\,V(\,\alpha_{1 0 \,d-1}\,,\,\alpha_{d-1 \, 1 0 }\,, \, \alpha_{00d} - \alpha_{01 \,d-1} \,). \end{equation} We now show that this linear space serves as normal form with respect to the group action on fibers of (\ref{eq:map1}). The group that acts is the three-dimensional group $\mathcal{G} \subset {\rm PGL}(3)$ given in~(\ref{eq:groupG}). \begin{theorem} \label{thm:normalform} Let $A$ be a ternary form of degree $d\geq 3$ such that \begin{equation} \label{eq:genericity} \displaystyle \alpha_{00d}\left(\, \sum_{k=0}^{d-1}\frac{(k+1)(-1)^k}{d^k}\alpha_{10\,d-1}^k\alpha_{00d}^{d-k-1}\alpha_{d-k-1\,0\,k+1} \right)\,\,\neq \,\,0. \end{equation} The orbit of $\, A$ under the $\mathcal{G}$-action on $\,\PP^{\binom{d+2}{2}-1}$ intersects the linear space $L_d$ in one point. \end{theorem} \begin{remark} This statement is false for $d=2$. The $\mathcal{G}$-orbit of $A$ consists of the conics \begin{align*} & g A \,=\, (\alpha_{002} g_1^2+\alpha_{101} g_0 g_1+\alpha_{200} g_0^2) x^2 +(2 \alpha_{002} g_1 g_2+\alpha_{011} g_0 g_1\,+\,\alpha_{101} g_0 g_2+\alpha_{110} g_0^2) x y \,\, + \\& (2 \alpha_{002} g_1 g_3{+}\alpha_{101} g_0 g_3) x z +(\alpha_{002} g_2^2{+}\alpha_{011} g_0 g_2{+}\alpha_{020} g_0^2) y^2 +(2 \alpha_{002} g_2 g_3{+}\alpha_{011} g_0 g_3) y z \!+\!\alpha_{002} g_3^2 z^2. \end{align*} For generic $\alpha$, no choice of $g \in \mathcal{G}$ makes both the $xy$-coefficient and the $xz$-coefficient zero. Note that the parenthesized sum in (\ref{eq:genericity}) is the zero polynomial for $d=2$, but not for $d \geq 3$. \end{remark} \begin{proof}[Proof of Theorem~\ref{thm:normalform}] The unique point in $\,L_d \,\cap \,\mathcal{G} A\,$ is found by computation. Without loss of generality, we set $g_0=1$. Next we set $g_1 = -\frac{1}{d} \alpha_{10 \,d-1}/ \alpha_{00d}$ because the coefficient of $xz^{d-1}$ in $gA$ equals $(d \alpha_{00d} g_1 + \alpha_{10 \,d-1}) g_3^{d-1}$. The polynomial $gA$ arises from $A$ by the coordinate change $z \mapsto g_1x+g_2y+g_3z$. Thus, a monomial $x^iy^jz^{d-i-j}$ contributes the expression $x^iy^j(g_1x+g_2y+g_3z)^{d-i-j}$ to $gA$. This contributes to the monomials $x^{i'}y^{j'}z^{d-i'-j'}$ with $i'\geq i$ and $j'\geq j$. The coefficient of $x^{d-1}y$ in $gA$ arises from the following subsum of $A$: $$\sum_{i=0}^{d-1} \alpha_{i0\,d-i}\,x^iz^{d-i}\,+\,\sum_{i=0}^{d-1} \alpha_{i1\,d-i-1}\,x^iyz^{d-i-1},$$ after inserting the coordinate change. Thus the coefficient of $x^{d-1}y$ in $gA$ equals $$\sum_{i=0}^{d-1} \alpha_{i0\,d-i}(d-i)\,g_1^{d-i-1} g_2 \,+\,\sum_{i=0}^{d-1} \alpha_{i1\,d-i-1}\,g_1^{d-i-1}.$$ Inserting the above result for $g_1$, and setting the coefficient of $x^{d-1}y$ to zero, we can solve this affine-linear equation for $g_2$, obtaining a rational function in the $\alpha_{ijk}$ as solution for $g_2$. Next, we equate the coefficients of $y z^{d-1} $ and $z^d$. The first can be computed from the subsum $\,\alpha_{00d}z^d\,+\,\alpha_{01\,d-1}yz^{d-1}$ and equals $\,\alpha_{00d}\, d\, g_2 g_3^{d-1}\,+\,\alpha_{01\,d-1}\, g_3^{d-1}$. The second is computed from the $z^d$ coefficient of $A$ only, and we find it to be $\alpha_{00d}\cdot g_3^d$. Setting these two equal and solving for $g_3$, we obtain $\,g_3= \frac{1}{\alpha_{00d}}\,(\alpha_{00d}\, d\, g_2+\alpha_{01\,d-1})$. Inserting our result for $g_2$, we obtain a rational function in the $\alpha_{ijk}$ as solution for $g_3$. \end{proof} \begin{example} To be explicit, we display the solution in the two cases of primary interest. For cubics $(d=3)$, the unique point $gA$ in $\,L_3 \,\cap \,\mathcal{G} A\,$ is given by the group element $g$ with $$ g_0 = 1 ,\,\, g_1 \,=\, -\frac{\alpha_{102}}{3 \alpha_{003}},\,\, g_2 \,=\, \frac{9 \alpha_{003}^2 \alpha_{210}-3 \alpha_{003} \alpha_{102} \alpha_{111} +\alpha_{012} \alpha_{102}^2}{3\alpha_{003}(3 \alpha_{003} \alpha_{201}- \alpha_{102}^2)}, $$ $$ g_3 \,\,=\,\, \frac{9 \alpha_{003}^3 \alpha_{210}+3 \alpha_{003} \alpha_{012} \alpha_{201} -3 \alpha_{003}^2 \alpha_{102} \alpha_{111}+\alpha_{003} \alpha_{012} \alpha_{102}^2-\alpha_{102}^2\alpha_{012}} {\alpha_{003} (3 \alpha_{003} \alpha_{201}-\alpha_{102}^2)}. $$ For quartics $(d=4)$, the unique point $gA$ in $\,L_4 \,\cap \,\mathcal{G} A\,$ is given by $g \in \mathcal{G}$, where $$ g_0 = 1,\,\, g_1 \,=\, -\frac{\alpha_{103}}{4 \alpha_{004}},\,\, g_2 \,=\, \frac{64 \alpha_{004}^3 \alpha_{310}-16 \alpha_{004}^2 \alpha_{103} \alpha_{211} +4 \alpha_{004} \alpha_{103}^2 \alpha_{112}-\alpha_{013} \alpha_{103}^3)} {8 \alpha_{004}(8 \alpha_{004}^2 \alpha_{301}-4 \alpha_{004} \alpha_{103} \alpha_{202}+\alpha_{103}^3)}, \,\, $$ and $\,g_3 \,=\, u_3/v_3\,$ with $$ \begin{matrix} u_3 & = & 64 \alpha_{004}^4 \alpha_{310} +16 \alpha_{004}^2 \alpha_{013} \alpha_{301} -16 \alpha_{004}^3 \alpha_{103} \alpha_{211} -8 \alpha_{004} \alpha_{013} \alpha_{103} \alpha_{202} \\ & & +\,4 \alpha_{004}^2 \alpha_{103}^2 \alpha_{112} + 2\alpha_{103}^3\alpha_{013} -\alpha_{004} \alpha_{013} \alpha_{103}^3 ,\\ v_3 & = & 2\alpha_{004} (8 \alpha_{004}^2 \alpha_{301}-4 \alpha_{004} \alpha_{103} \alpha_{202}+\alpha_{103}^3). \qquad \qquad \qquad \qquad \end{matrix} $$ \smallskip One can derive similar formulas for the transformation to normal form when $d \geq 5$. The denominator in the expressions for $g$ is the polynomial of degree $d$ in $\alpha$ shown in (\ref{eq:genericity}). \hfill $ \diamond$ \end{example} Our task is to solve ${\rm discr}_z(\hat A) = B$, for a fixed binary form $B$. This equation is understood projectively, meaning that we seek $\hat A$ in $\PP^{\binom{d+2}{2}-1}$ such that ${\rm discr}_z(\hat A) $ vanishes at all zeros of $B$ in $\PP^1$. By Theorem \ref{thm:normalform}, we may assume that $\hat A$ lies in the subspace $L_d$. Our system has extraneous solutions, namely ternary forms $\hat A$ whose discriminant vanishes identically. They must be removed when solving our recovery problem. We now identify them geometrically. \begin{proposition} \label{prop:baselocus} The base locus of the discriminant map (\ref{eq:map1}) has two irreducible components. These have codimension $3$ and $2d-1$ respectively in $\,\PP^{\binom{d+2}{2}-1}$. The former consists of all curves that are singular at $\,p = (0:0:1)$, and the latter is the locus of non-reduced~curves. \end{proposition} \begin{proof} The binary form ${\rm discr}_z(A)$ vanishes identically if and only if the univariate polynomial function $z \mapsto A(u,v,z)$ has a double zero $\hat z$ for all $u,v \in \CC$. If $p$ is a singular point of the curve $V(A)$ then $\hat z=0$ is always such a double zero. If $A$ has a factor of multiplicity $\geq 2$ then so does the univariate polynomial $z \mapsto A(u,v,z)$, and the discriminant vanishes. Up to closure, we may assume that this factor is a linear form, so there are $\binom{d}{2}-1 + 2$ degrees of freedom. This shows that the family of nonreduced curves $A$ has codimension $2d-1 = (\binom{d+2}{2}-1) - (\binom{d}{2}+1)$. The two scenarios define two distinct irreducible subvarieties of $\PP^{\binom{d+2}{2}-1}$. For $A$ outside their union, the binary form ${\rm discr}_z(A)$ is not identically zero. \end{proof} We now present our solution to the recovery problem for cubic curves. Let $B$ be a binary sextic with six distinct zeros in $\PP^1$. We are looking for a ternary cubic in the normal form $$ A \,\,=\,\, \alpha_{300} x^3 + \alpha_{201} x^2 z + \alpha_{111} x y z + \alpha_{102} x z^2 + \alpha_{030} y^3 + \alpha_{021} y^2 z + y z^2 + z^3. $$ Here we assume $p=(0:0:1) \not\in V(A)$, so that $\alpha_{012} = \alpha_{003} = 1$. We saw this in Theorem~\ref{thm:normalform}. The remaining six coefficients $\alpha_{ijk}$ are unknowns. The discriminant has degree three in these: $$ \! {\rm discr}_z(A) \! = \! (4 \alpha_{201}^3+27 \alpha_{300}^2) x^6 +(12 \alpha_{111} \alpha_{201}^2-18 \alpha_{201} \alpha_{300}) x^5 y + \cdots + (4 \alpha_{021}^3-\alpha_{021}^2- \cdots +4 \alpha_{030})y^6. $$ This expression is supposed to vanish at each of the six zeros of $B$. This gives a system of six inhomogeneous cubic equations in the six unknowns $\alpha_{ijk}$. In order to remove the extraneous solutions described in Proposition \ref{prop:baselocus}, we further require that the leading coefficient of the discriminant is nonzero. We can write our system of cubic constraints in the $\alpha_{ijk}$ as follows: \begin{equation} \label{eq:system3} \begin{matrix} \quad {\rm rank} \begin{bmatrix} 4 \alpha_{201}^3{+}27 \alpha_{300}^2 & 12 \alpha_{111} \alpha_{201}^2{-}18 \alpha_{201} \alpha_{300} & \cdots & 4 \alpha_{021}^3{-}\alpha_{021}^2- \cdots +4 \alpha_{030} \\ \beta_{60} & \beta_{51} & \cdots & \beta_{06} \end{bmatrix} \,\leq\, 1 \smallskip \\ {\rm and}\quad 4 \alpha_{201}^3+27 \alpha_{300}^2 \not= 0. \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \qquad \end{matrix} \end{equation} This polynomial system exactly encodes the recovery of plane cubics from six branch points. \begin{corollary}\label{cor:deg3} For general $\beta_{ij} $, the system (\ref{eq:system3}) has $\mathfrak{h}_3 = 40$ distinct solutions $\alpha \in \CC^6$. \end{corollary} \begin{proof} The study of cubic curves tangent to a pencil of six lines goes back to Cayley \cite{Cayley}. The formula $\mathfrak{h}_3 = 40$ was found by Clebsch \cite{ClebschShort, ClebschLong}. We shall discuss his remarkable work in Section~\ref{sec4}. A modern proof for $\mathfrak{h}_3 = 40$ was given by Kleiman and Speiser in \cite[Corollary~8.5]{KS}. We here present the argument given in Ongaro's thesis \cite{Ongaro}. By \cite[Proposition 5.2.2]{Ongaro}, every covering of $\PP^1$ by a plane cubic curve is a shift in the group law of that elliptic curve followed by a linear projection from a point in $\PP^2$. This implies that the classical Hurwitz number, which counts such coverings, coincides with the plane Hurwitz number $\mathfrak{h}_3$. The former is the number of six-tuples $\tau = (\tau_1,\tau_2,\tau_3,\tau_4,\tau_5,\tau_6)$ of permutations of $\{1,2,3\}$, not all equal, whose product is the identity, up to conjugation. We can choose $\tau_1,\ldots,\tau_5$ in $3^5= 243$ distinct ways. Three of these are disallowed, so there are $240$ choices. The symmetric group $\mathbb{S}_3$ acts by conjugation on the tuples $\tau$, and all orbits have size six. The number of classes of allowed six-tuples is thus $240/6 = 40$. This is our Hurwitz number $\mathfrak{h}_3$. Now, the assertion follows from Theorem~\ref{thm:normalform}, which ensures that the solutions of (\ref{eq:system3}) are representatives. \end{proof} We next turn to another normal form, shown in (\ref{eq:othernf}), which has desirable geometric properties. Let $A$ be a ternary form (\ref{eq:intro_f}) with $a_{00\,d} \not= 0$. We define a group element $g \in \mathcal{G}$ by $$ g_0 = 1 \,, \,\, g_1 = -\frac{a_{10\,d-1}}{d \cdot a_{00d}} \, , \,\, g_2 = -\frac{a_{01\,d-1}}{d \cdot a_{00d}} \,, \,\, g_3 = 1. $$ The coefficients of $xz^{d-1}$ and $yz^{d-1}$ in $gA$ are zero. Thus, after this transformation, we have \begin{equation} \label{eq:othernf} A \,\,= \,\, z^d \,+\, A_2(x,y)\cdot z^{d-2} \,+\, A_{3}(x,y)\cdot z^{d-3} \,+ \,\cdots \,+\, A_{d-1}(x,y) \cdot z \,+ \, A_{d}(x,y) . \end{equation} Here $A_i(x,y)$ is an arbitrary binary form of degree $i$. Its $i+1$ coefficients are unknowns. The group~$\mathcal{G}$ still acts by rescaling $x,y$ simultaneously with arbitrary non-zero scalars $\lambda \in \mathbb{C}^*$. We next illustrate the utility of (\ref{eq:othernf}) by computing the planar Hurwitz number for $d{=}4$. Consider a general ternary quartic $A$. We record its $12$ branch points by fixing the discriminant $B = {\rm discr}_z(A)$. Let $\hat A \in L_4$ be an unknown quartic in the normal form specified in Theorem \ref{thm:normalform}, so $\hat A$ has $13$ terms, $11$ of the form $\alpha_{ijk} x^i y^j z^k$ plus $y z^3$ and $z^4$. Our task is to solve the following system of $12$ polynomial equations of degree five in the $11$ unknowns $\alpha_{ijk}$: \begin{equation} \label{eq:system4} \hbox{ Find all quartics $\hat A$ such that ${\rm discr}_z(\hat A)$ is a non-zero multiple of the binary form $B$. } \end{equation} The number of solutions of this system was found by Vakil \cite{Ravi} with geometric methods. \begin{theorem} \label{thm:120} Let $B = \sum_{i+j=12} \beta_{ij} x^i y^j $ be the discriminant with respect to $z$ of a general ternary quartic $A$. Then the polynomial system (\ref{eq:system4}) has $\mathfrak{h}_4 = 120$ distinct solutions $\alpha \in \CC^{11}$. \end{theorem} The hypothesis ensures that $B$ is a point on Vakil's degree $3762$ hypersurface $\mathcal{V}_4$ in $\PP^{12}$. This is a necessary and sufficient condition for the system (\ref{eq:system4}) to have any solution at all. \begin{corollary} If we prescribe $11$ general branch points on the line $\PP^1$ then the number of complex quartics $A$ such that ${\rm discr}_z( A)$ vanishes at these points is equal to $120 \cdot 3762 = 451440$. \end{corollary} \begin{proof} Consider the space $\PP^{12}$ of binary forms of degree $12$. Vanishing at $11$ general points defines a line in $\PP^{12}$. That line meets the hypersurface $\mathcal{V}_4$ in $3762$ points. By Theorem \ref{thm:120}, each of these points in $\mathcal{V}_4 \subset \PP^{12}$ has precisely $120$ preimages $A$ in $\PP^{14}$ under the map (\ref{eq:map1}). \end{proof} \begin{remark} \label{rmk:extrafactor} It was claimed in \cite[equation (5.14)]{Ongaro} and \cite[page 608]{OS} that $\mathfrak{h}_3$ is equal to $120 \cdot (3^{10}-1)/2 = 3542880$. That claim is not correct. The factor $ (3^{10}-1)/2$ is not needed. \end{remark} \begin{proof}[Proof of Theorem \ref{thm:120}] We work with the normal form (\ref{eq:othernf}). Up to the $\mathcal{G}$-action, the triples $(A_2,A_3,A_4)$ are parametrized by the $11$-dimensional weighted projective space $ \mathbb{P}(2^3,3^4,4^5)$. Following Vakil \cite{Ravi}, we consider a second weighted projective space of dimension $11$, namely $\, \mathbb{P}(3^5, 2^7)$. The weighted projective space $\mathbb{P}(3^5,2^7)$ parametrizes pairs $(U_2,U_3)$ where $U_i = U_i(x,y)$ is a binary form of degree $2i$, up to a common rescaling of $x,y$ by some $\lambda \in \mathbb{C}^*$. We define a rational map between our two weighted projective spaces as follows: \begin{equation} \label{eq:mapnu} \begin{matrix} \nu \,:\, \mathbb{P}(2^3,3^4,4^5)\, \dashrightarrow \,\mathbb{P}(3^5,2^7) \, , \,\, (A_2,A_3,A_4) \,\mapsto \, (U_2,U_3), \qquad \qquad \smallskip \\ \qquad {\rm where} \quad U_2 \,=\, -4A_4-\frac{1}{3}A_2^2 \quad {\rm and} \quad U_3 \,=\, A_3^2-\frac{8}{3}A_2A_4 + \frac{2}{27}A_2^3. \end{matrix} \end{equation} We compose this with the following map into the space $\PP^{12} $ of binary forms of degree $12$: \begin{equation} \label{eq:mapmu} \mu \,:\,\mathbb{P}(3^5,2^7) \, \dashrightarrow \, \PP^{12} \, , \,\, (U_2,U_3) \, \mapsto \, 4\cdot U_2^3+27\cdot U_3^2. \end{equation} The raison d'\^{e}tre for the maps (\ref{eq:mapnu}) and (\ref{eq:mapmu}) is that they represent the formula of the discriminant ${\rm discr}_z(A)$ of the special quartic in (\ref{eq:othernf}). Thus, modulo the action of $\mathcal{G}$, we have $$ \pi \,\,= \,\,\mu \,\circ\, \nu , $$ where $\pi: \PP^{14} \rightarrow \PP^{12}$ is the branch locus map in (\ref{eq:map1}). One checks this by a direct computation. Vakil proves in \cite[Proposition 3.1]{Ravi} that the map $\nu$ is dominant and its degree equals $120$. We also verified this statement independently via a numerical calculation in affine coordinates using \texttt{HomotopyContinuation.jl} \cite{BT}, and we certified its correctness using the method in \cite{BRT}. This implies that the image of the map $\mu$ equals the hypersurface $\mathcal{V}_4$. In particular, $\mathcal{V}_4$ is the locus of all binary forms of degree $12$ that are sums of the cube of a quartic and the square of a sextic. Vakil proves in \cite[Theorem 6.1]{Ravi} that the map $\mu$ is birational onto its image $\mathcal{V}_4$. We verified this statement by a Gr\"obner basis calculation. This result implies that both $\nu$ and $\pi$ are maps of degree $120$, as desired. \end{proof} \begin{remark} We also verified that $\mathcal{V}_4$ has degree $3762$, namely by solving $12$ random affine-linear equations on the parametrization (\ref{eq:mapmu}). The common Newton polytope of the resulting polynomials has normalized volume $31104$. This is the number of paths tracked by the polyhedral homotopy in \texttt{HomotopyContinuation.jl}. We found $22572 = 3762 \times 6$ complex solutions. The factor $6$ arises because $U_2$ and $U_3$ can be multiplied by roots of unity. \end{remark} \begin{algo} \label{algo:recovery4} We implemented a numerical recovery method based on the argument used to prove Theorem \ref{thm:120}. The \underbar{input} is a pair $(U_2,U_3)$ as above. The \underbar{output} consists of the $120$ solutions in the subspace $L_4 \simeq \PP^{11}$ seen in (\ref{eq:Ld}). We find these by solving the equations \begin{equation} \label{eq:raviU} A_1 A_3-4 A_0 A_4- \frac{1}{3} A_2^2\, = \,U_2 \quad {\rm and} \quad A_1^2 A_4 + A_0 A_3^2 - \frac{8}{3} A_0A_2A_4 -\frac{1}{3} A_1A_2A_3+\frac{2}{27}A_2^3\, =\, U_3. \end{equation} By \cite[Equation (5)]{Ravi}, these represent the discriminant for quartics $A =\sum_{i=0}^4 A_i z^{4-i}$. To be precise, (\ref{eq:raviU}) is a system of $12= 5+7$ equations in the $12 $ unknown coefficients of $A \in L_4$. These have $120$ complex solutions, found easily with \texttt{HomotopyContinuation.jl} \cite{BT}. \end{algo} \section{Hurwitz Combinatorics} \label{sec3} The enumeration of Riemann surfaces satisfying fixed ramification was initiated by Hurwitz in his 1891 article \cite{Hurwitz}. Hurwitz numbers are a widely studied subject, seen as central to combinatorial algebraic geometry. For basics see \cite{CJM, CavalieriMiles, GMR, IZ, Ongaro} and the references therein. This paper concerns a general projection $V(A)\rightarrow \mathbb{P}^1$ of a smooth plane curve of degree $d$ and genus $g=\binom{d-1}{2}$. In Section \ref{sec2} we studied the inverse problem of recovering $A$ from the $d(d-1)$ simple branch points. We now relate the plane Hurwitz numbers $\mathfrak{h}_d$ to the Hurwitz numbers $H_d$ that count abstract covers. To be precise, $H_d$ is the number of degree $d$ covers $f$ of $\mathbb{P}^1$ by a genus $\binom{d-1}{2}$ curve $C$ having $d(d-1)$ fixed simple branch points. Each cover $f:C\rightarrow \mathbb{P}^1$ is weighted by $\frac{1}{|\Aut(f)|}$. Following \cite{CavalieriMiles}, the number $H_d$ can be found by counting monodromy representations, i.e.\ homomorphisms from the fundamental group of the target minus the branch points to the symmetric group over the fiber of the base point. \begin{lemma}[Hurwitz \cite{Hurwitz}] \label{lem:abstract_hurwitz_number} The Hurwitz number $H_d$ equals $1/d!$ times the number of tuples of transpositions $\tau = (\tau_1,\tau_2,\ldots,\tau_{d\cdot (d-1)})$ in the symmetric group $\mathbb{S}_d$ satisfying $$\tau_{d\cdot (d-1)}\circ\dots \circ \tau_2 \circ\tau_1 = \mathrm{id},$$ where the subgroup generated by the $\tau_i$ acts transitively on the set $\{1,2,\dots,d\}$. \end{lemma} \begin{proposition}\label{prop:abstract_plane_numbers_relation} For $d \geq 3$, the plane Hurwitz number is less than or equal to the classical Hurwitz number that counts abstract covers. In symbols, we have $\,\mathfrak{h}_d \,\leq \,H_d$. \end{proposition} The restriction $d \geq 3$ is needed because of the weighted count, with automorphisms. For $d=2$, we have $H_2= 1/2$ because of the existence of a non-trivial automorphism for maps $\PP^1 \rightarrow \PP^1$. For higher $d$, the covers coming from projections of plane curves do not have automorphisms, so we can count them without this weight. This establishes Proposition \ref{prop:abstract_plane_numbers_relation}. The two cases of primary interest in this paper are $d=3$ and $d=4$. From the proofs of Corollary \ref{cor:deg3} and Theorem \ref{thm:120}, we infer that the two cases exhibit rather different behaviors. \begin{corollary} \label{cor:7528620} For linear projections of cubic curves and quartic curves in $\PP^2$, we have $$ \qquad \qquad \mathfrak{h}_3 \, = \, H_3 \, = \, 40 \qquad {\rm and} \qquad \mathfrak{h}_4 \, = \, 120 \, \, < \,\,H_4 \,= \, 7528620. $$ \end{corollary} The count in Lemma \ref{lem:abstract_hurwitz_number} can be realized by combinatorial objects known as {\em monodromy graphs}. These occur in different guises in the literature. We here use the version that is defined formally in \cite[Definition 3.1]{GMR}. These represent abstract covers in the tropical setting of balanced metric graphs. We next list all monodromy graphs for $d=3$. \begin{example}[Forty monodromy graphs] For $d=3$, Lemma \ref{lem:abstract_hurwitz_number} yields $H_3 = 40$ six-tuples $\tau = (\tau_1,\tau_2,\ldots,\tau_6)$ of permutations of $\{1,2,3\}$, up to the conjugation action by $\mathbb{S}_3$. In Table~\ref{table:40covers} we list representatives for these $40$ orbits (see also \cite[Table 1]{Ongaro2}). Each tuple $\tau$ determines a monodromy graph as in \cite[Lemma 4.2]{CJM} and \cite[Section 3.3]{GMR}. Reading from the left to right, the diagram represents the cycle decompositions of the permutations $\tau_i \circ \cdots \circ \tau_1$ for $i=1,\ldots,6$. For instance, for the first type $\mathcal{A}_1$, we start at ${\rm id} = (1)(2)(3)$, then pass to $(12)(3)$, next to $(123)$, then to $(12)(3)$, etc. On the right end, we are back at ${\rm id} = (1)(2)(3)$. \begin{longtable}[H]{| c | c | c | c | c | c |c|} \hline $\!\!$ \textbf{Type}$\!$ &\textbf{Real?}$\!$ & \textbf{Six-Tuple} $\tau$ & \textbf{Monodromy Graph} & \!\textbf{Clebsch}\! & $\!\mathbb{P}^3(\mathbb{F}_3)\!$ \\ \hline \hline \makecell{$\mathcal{A}_1$ \\ $\mathcal{A}_2$ } & \makecell{\checkmark $ (12)$ \\ \checkmark $ (12)$ } &\makecell{ $(12)(13)(13)(13)(13)(12)$ \\ $ (12)(13)(13)(23)(23)(12)$}& \Lazypic{5cm}{ \includegraphics{type_a_real.pdf} } & \makecell{ $ 123 $ \\ $ 1a $} & \makecell{ $0010$ \\ $0100$} \\ \hline \makecell{$\mathcal{A}_3$ \\$\mathcal{A}_4$ \\ $\mathcal{A}_{11}$ \\$\mathcal{A}_{12}$ }&\makecell{ \xmark \\ \xmark \\ \xmark \\ \xmark} & \makecell{ $(12)(13)(13)(13)(23)(13)$\\ $(12)(13)(13)(13)(12)(23)$ \\$(12)(13)(13)(23)(12)(13)$\\$(12)(13)(13)(23)(13)(23)$} & \Lazypic{5cm}{\includegraphics{type_a_12.pdf} } & \makecell{ $ 348 $ \\ $357$ \\ $7b$ \\ $4c$ } & \makecell{$ 1022 $ \\ $1012$ \\$1102$ \\ $1201$} \\ \hline \makecell{$\mathcal{A}_5$ \\ $\mathcal{A}_6$\\ $\mathcal{A}_7$ \\$\mathcal{A}_{13}$ \\$\mathcal{A}_{14}$\\ $\mathcal{A}_{15}$}& \makecell{\xmark \\ \xmark \\ \xmark\\ \xmark \\ \xmark\\\xmark}& \makecell{ $(12)(13)(23)(23)(13)(12)$\\ $(12)(13)(23)(23)(23)(13)$\\ $(12)(13)(23)(23)(12)(23)$\\ $(12)(13)(23)(12)(23)(12)$\\ $(12)(13)(23)(12)(12)(13)$ \\$(12)(13)(23)(12)(13)(23)$ }& \Lazypic{5cm}{\includegraphics{type_a_13.pdf} }& \makecell{ $456$ \\ $267$ \\ $ 168 $ \\ $1b$ \\ $7c$ \\ $4a$ } &\makecell{$1020$ \\ $1011$ \\ $0012$ \\ $1100$ \\ $1201$ \\ $0101$}\\ \hline \makecell{$\mathcal{A}_8$ \\ $\mathcal{A}_9$ \\ $\mathcal{A}_{10}$\\$\mathcal{A}_{16}$ \\ $\mathcal{A}_{17}$ \\ $\mathcal{A}_{18}$ }&\makecell{ \xmark \\ \xmark \\ \xmark\\\xmark \\ \xmark \\ \xmark}&\makecell{ $(12)(13)(12)(12)(13)(12)$\\ $(12)(13)(12)(12)(23)(13)$ \\ $ (12)(13)(12)(12)(12)(23)$\\$(12)(13)(12)(13)(23)(12)$ \\$(12)(13)(12)(13)(12)(13)$\\$(12)(13)(12)(13)(13)(23)$ }& \Lazypic{5cm}{\includegraphics{type_a_23.pdf}} & \makecell{ $789$ \\ $ 159 $ \\ $249$ \\ $1c$ \\ $7a$ \\ $4b$ } & \makecell{$1010$ \\ $0010$ \\ $1021$ \\ $1200$ \\ $0102$ \\ $1101$} \\ \hline \makecell{$\mathcal{B}_1$ \\$\mathcal{B}_2$ } & \makecell{\checkmark (id) \\ \checkmark (id) } & \makecell{$(12)(12)(13)(13)(12)(12)$ \\ $(12)(12)(13)(13)(23)(23)$ } & \Lazypic{5cm}{\includegraphics{type_b.pdf} }& \makecell{ base \\ $147$ } & \makecell{ $1000$ \\ $0001 $} \\ \hline \hline \makecell{$\mathcal{C}^{\ell}_1$ \\ $\mathcal{C}^{\ell}_2$ \\ $\mathcal{C}^{\ell}_3$ }& \makecell{\checkmark $(12)$ \\\xmark \\\xmark } & \makecell{$(12)(12)(12)(13)(13)(12)$ \\$(12)(12)(12)(13)(23)(13)$ \\ $(12)(12)(12)(13)(12)(23)$ } & \Lazypic{5cm}{ \includegraphics{type_cl.pdf} } & \makecell{$2a$ \\ $8b$ \\ $5c$} & \makecell{$0110$ \\ $1112$ \\ $1222$} \\ \hline \makecell{$\mathcal{C}^{r}_1$ \\$\mathcal{C}^{r}_2$ \\ $\mathcal{C}^{r}_3$ }& \makecell{\checkmark $(12)$ \\ \xmark \\ \xmark} & \makecell{$(12)(13)(13)(12)(12)(12)$ \\$(12)(13)(23)(13)(13)(13)$ \\ $(12)(13)(12)(23)(23)(23)$ }& \Lazypic{5cm}{\includegraphics{type_cr.pdf} } & \makecell{$3a$ \\ $6b$ \\ $9c$} & \makecell{$0120$ \\ $1121$ \\ $1211$} \\ \hline \makecell{$\mathcal{D}^{\ell}_1$} & \makecell{\checkmark (id) } & $(12)(12)(12)(12)(13)(13)$& \Lazypic{5cm}{\includegraphics{type_dl.pdf} } & $369$ & $1002$\\ \hline\hline \makecell{$\mathcal{D}^{r}_1$} & \makecell{\checkmark (id) } & $(12)(12)(13)(13)(13)(13)$ & \Lazypic{5cm}{\includegraphics{type_dr.pdf} } & $258$ & $1001$ \\ \hline\hline \makecell{$\mathcal{E}^{\ell}_1 $ \\ $\mathcal{E}^{\ell}_3 $ \\ $\mathcal{E}^{\ell}_5 $} &\makecell{ \xmark \\ \xmark \\ \xmark} & \makecell{$(12)(12)(13)(23)(13)(12)$\\$(12)(12)(13)(23)(23)(13)$ \\$ (12)(12)(13)(23)(12)(23)$ } & \Lazypic{5cm}{\includegraphics{type_el_odd.pdf} } & \makecell{$2b$ \\ $8c$ \\ $5a$} & \makecell{$1110$ \\ $1221$ \\ $0111$ }\\ \hline \makecell{$\mathcal{E}^{\ell}_2 $ \\ $\mathcal{E}^{\ell}_4 $\\ $\mathcal{E}^{\ell}_6 $ }& \makecell{\xmark \\ \xmark \\ \xmark} & \makecell{$(12)(12)(13)(12)(23)(12)$\\$(12)(12)(13)(12)(12)(13)$\\$(12)(12)(13)(12)(13)(23)$ }& \Lazypic{5cm}{ \includegraphics{type_el_even.pdf} } & \makecell{$2c$ \\ $5b$ \\ $8a$ } & \makecell{$1220$ \\$1111$\\ $0112$} \\ \hline\hline \makecell{$\mathcal{E}^{r}_1$\\ $\mathcal{E}^{r}_3$\\$\mathcal{E}^{r}_5$ }&\makecell{\xmark \\ \xmark \\ \xmark }&\makecell{$(12)(13)(23)(13)(12)(12)$\\$ (12)(13)(13)(12)(13)(13)$ \\$(12)(13)(13)(12)(23)(23) $ }& \Lazypic{5cm}{ \includegraphics{type_er_odd.pdf} } & \makecell{$3c$ \\ $6c$ \\ $9b$} & \makecell{$1210$ \\$1212$ \\ $1122$ } \\ \hline \makecell{$\mathcal{E}^{r}_2$ \\ $\mathcal{E}^{r}_4$ \\$\mathcal{E}^{r}_6$ }&\makecell{ \xmark \\ \xmark \\ \xmark}& \makecell{$(12)(13)(12)(23)(12)(12)$ \\$(12)(13)(12)(23)(13)(13)$ \\$(12)(13)(23)(13)(23)(23)$}& \Lazypic{5cm}{ \includegraphics{type_er_even.pdf} } & \makecell{$3b$ \\ $6a$\\$9a$} & \makecell{$1120$ \\$0121$ \\ $0122$ } \\ \hline \caption{The monodromy graphs for the $H_3=40$ coverings of $\PP^1$ by a genus one curve. Eight of the $40$ coverings are real, and the certifying edge coloring is shown in the graph. The two rightmost columns, labeled {\bf Clebsch} and $\,\PP^3(\mathbb{F}_3)$, will be explained in Section \ref{sec4}. }\label{table:40covers} \end{longtable} To identify real monodromy representations (see Lemma \ref{lem:real_abstract_hurwitz_numbers}), we give a coloring as in \cite[Definition 3.5]{GMR}. Using \cite[Lemma 3.5]{GMR} we find eight real covers among the $40$ complex covers. We use \cite[Lemma 2.3]{GMR} to associate the real covers to their monodromy representations. We divide the $40$ classes into five types, $\mathcal{A}$ to $\mathcal{E}$, depending on the combinatorial type of the graph. Types $\mathcal{A}$ and $\mathcal{B}$ are symmetric under reflection of the ends, $\mathcal{C}$, $\mathcal{D}$ and $\mathcal{E}$ are not. An upper index $\ell$ indicates that the cycle of the graph is on the left side of the graph, while $r$ indicates that it is on the right side. The number of classes of each type is the multiplicity in \cite[Lemma 4.2]{CJM} and \cite[Table 1]{Ongaro2}. Each class starts with the real types, if there are any, and proceeds lexicographically in $\tau$. In the table, the edges of the monodromy graphs are labeled by the cycle they represent. If the edge is unlabeled, then the corresponding cycle is either clear from context or varies through all possible cycles in $\mathbb{S}_3$ of appropriate length. \hfill $ \diamond$ \end{example} We now turn to branched covers that are real. In the abstract setting of Hurwitz numbers $H_d$, this has been studied in \cite{Cadoret, GMR, IZ}. A cover $f : C \rightarrow \PP^1$ is called {\em real} if the Riemann surface $C$ has an involution which is compatible with complex conjugation on the Riemann sphere $\PP^1$. The branch points in $\PP^1$ can be real or pairs of complex conjugate points. We let $H^{\real}_d(r)$ be the weighted count of degree $d$ real covers $f$ of $\mathbb{P}^1$ by a genus $\binom{d-1}{2}$ curve $C$ having $d(d-1)$ fixed simple branch points, of which $r$ are real. As before, each cover $f:C\rightarrow \mathbb{P}^1$ is weighted by $\frac{1}{|\Aut(f)|}$. The following result appears in \cite[Section 3.3]{Cadoret}. \begin{lemma} \label{lem:real_abstract_hurwitz_numbers} The real Hurwitz number $H^\real_d(r)$ equals $1/d!$ times the number of tuples $\tau$ as in Lemma \ref{lem:abstract_hurwitz_number} for which there exists an involution $\sigma \in \mathbb{S}_3$ such that $$\sigma\circ \tau_i\circ\dots\circ\tau_1\circ\sigma = (\tau_1\circ\dots\circ\tau_i)^{-1}$$ for $i=1,\dots,r-1$ and $\sigma\circ\tau_{r+i}\circ\sigma=\tau_{r'+1-i}$ for $i = 1,\dots,r'$, where $r$ is the number of real branch points and $r'$ the number of pairs of complex conjugate branch points. \end{lemma} Geometrically, this means that, for a pair of complex conjugate points $q_1,q_2$, under complex conjugation the arc $\gamma_1$ around $q_1$ maps to $-\gamma_2$, where $\gamma_2$ is the arc around $q_2$. Our next result says that the real Hurwitz number for $d=3$ does not depend on $r$ and $r' =6-2r$. \begin{proposition}\label{prop:real_abstract_hn_degree_3} We have $H^{\real}_3(r)=8$ for $r=6,4,2,0$. \end{proposition} \begin{proof} We prove this by investigating all monodromy representations in Table~\ref{table:40covers}. Using explicit computations, we identify all six-tuples $\tau$ that satisfy the conditions in Lemma~\ref{lem:real_abstract_hurwitz_numbers}. For a cover with $6$ real branch points, we obtain $8$ real monodromy representations, of types $\mathcal{A}_1, \mathcal{A}_2, \mathcal{B}_1 ,\mathcal{B}_2, \mathcal{C}^l_1, \mathcal{C}^r_1,\mathcal{D}^l_1$ and $ \mathcal{D}^r_1$, listed in Table \ref{table:40covers} with coloring. For a cover with $4$ real branch points and a pair of complex conjugate branch points, we again obtain $8$ real monodromy representations. These are the types $\mathcal{A}_3 , \mathcal{A}_{12}, \mathcal{B}_1 ,\mathcal{B}_2, \mathcal{C}^l_2, \mathcal{C}^r_1,\mathcal{D}^l_1$ and $ \mathcal{D}^r_1$. For two real branch points and two complex conjugate pairs, we again obtain $8$ real monodromy representations, namely of types $\mathcal{A}_{9}, \mathcal{A}_{12}, \mathcal{B}_1 ,\mathcal{B}_2, \mathcal{D}^l_1, \mathcal{D}^r_1, \mathcal{E}^{\ell}_3 $ and $\mathcal{E}^{r}_1$. Finally, for three pairs of complex conjugate branch points, we find the $8$ types $\mathcal{A}_{5}, \mathcal{A}_{17}, \mathcal{B}_1 ,\mathcal{B}_2 ,\mathcal{D}^l_1, \mathcal{D}^r_1, \mathcal{E}^{\ell}_3 $ and $\mathcal{E}^{r}_5$. \end{proof} The situation is more interesting for $d=4$, where we obtained the following result: \begin{theorem} \label{thm:realcount4} The real Hurwitz numbers for degree $4$ coverings of $\PP^1$ by genus $3$ curves are $$ \begin{matrix} H^{\real}_4(12)= 20590 , & H^{\real}_4(10)= 15630 , & H^{\real}_4(8)= 11110 , & H^{\real}_4(6)= 7814 , \\ & H^{\real}_4(4)= 5654 , & H^{\real}_4(2) = 4070 , \,& H^{\real}_4(0)= 4350. \end{matrix} $$ \end{theorem} \begin{proof} This is found by a direct computation using \textsc{Oscar} \cite{Oscar}. We start by constructing a list of all monodromy representations of degree $4$ and genus $3$. As monodromy representations occur in equivalence classes, we construct only one canonical representative for each class. This is the element of the equivalence class that is minimal with respect to the lexicographic ordering. The resulting list of $7528620$ monodromy representations was computed in about $6.5$ hours. In other words, we embarked on a table just like Table~\ref{table:40covers}, but its number of rows is now $7528620$ instead of $40$. Those are the two numbers seen in Corollary~\ref{cor:7528620}. We next applied Cadoret's criterion in \cite[Section 3.3, formula $(\star)$]{Cadoret} to our big table. This criterion was stated in Lemma \ref{lem:real_abstract_hurwitz_numbers}. We start with our $ 7528620$ tuples $\tau$, computed as just described, and mentioned in Lemma \ref{lem:abstract_hurwitz_number}. According to Cadoret's criterion, we must check for each $12$-tuple $\tau$ whether there exists an involution $\sigma$ that satisfies certain equations in the symmetric group $\mathbb{S}_4$. These depend on the number $r$ of real branch points. Note that $ r = \{ 0,2,4,\ldots, 12\}$. For $r = 2, 4, \ldots, 12$, the only possible involutions $\sigma$ are $id$, $(12)$, $(34)$ and $(12)(34)$, by the structure of the canonical representative computed for the list. For $r = 0$, all involutions in $\mathbb{S}_4$ can appear. For each involution $\sigma$ and each value of $r$, it took between $5$ and $30$ minutes to scan our big table, and to determine how many $12$-tuples $\tau$ satisfy Cadoret's criterion for the pair $(r,\sigma)$. For each $r$, we collected the number of tuples $\tau$ for which the answer was affirmative. This gave the numbers stated in Theorem \ref{thm:realcount4}. \end{proof} We next relate this Hurwitz combinatorics to the polynomial systems in Section~\ref{sec2}. Recall that we seek orbits of the group $\mathcal{G}$ acting on $\PP^{\binom{d+2}{2}-1}$. An orbit is called {\em real} if it has the form $\mathcal{G} A$ where $A$ is a ternary form with real coefficients. Since $\mathcal{G}$ is defined over $\mathbb{R}$, an orbit is real if and only if its unique intersection point with the linear space $L_d$ in Theorem~\ref{thm:normalform} is real. Thus, identifying the real orbits among those with prescribed branch points is equivalent to deciding how many of the $\mathfrak{h}_d$ complex solutions in our exact formulations (\ref{eq:system3}) and (\ref{eq:system4}) are~real. Suppose that the given binary form $B \in \mathcal{V}_d$ has real coefficients, and let $r$ denote the number of real zeros of $B$. In addition, there are $d(d-1)-2r$ pairs of complex conjugate zeros. It turns out that for $d=3$ the number of real solutions is independent of the number~$r$. Our census of real plane Hurwitz numbers for quartics will be presented in Section \ref{sec5}. \begin{corollary} \label{cor:from40to8} The real plane Hurwitz number for cubics equals eight. To be precise, the system (\ref{eq:system3}) always has $8$ real solutions, provided the given parameters $\beta_{ij}$ are real and generic. \end{corollary} \begin{proof} This is derived from Corollary \ref{cor:7528620} and Proposition~\ref{prop:real_abstract_hn_degree_3}. Namely, we use the fact that plane covers are in bijection with abstract covers. Let $C \rightarrow \PP^1$ be a real cover by an elliptic curve~$C$. The involution of $C$ that is referred to in the proof of Corollary \ref{cor:deg3} is real as well. Another proof, following Clebsch \cite{ClebschShort, ClebschLong}, appears in Section~\ref{sec4}. \end{proof} \begin{algo} \label{alg:recovery3} We implemented numerical recovery for cubics that matches Table \ref{table:40covers}. The \underbar{input} is a binary sextic $B$ with real coefficients. The \underbar{output} consists of $40$ cubics $A$ in $L_3$ along with their labeling by $\mathcal{A}_1,\mathcal{A}_2,\ldots, \mathcal{E}_6^r$. The cubics are found with \texttt{HomotopyContinuation.jl} by solving (\ref{eq:system3}). We next fix loops $\gamma_1,\gamma_2,\ldots,\gamma_6$ around the six roots of $B$ that are compatible with complex conjugation on the Riemann sphere $\PP^1$. If all six roots are real then we use \cite[Construction 2.4]{GMR}. For each cubic $A$, we track the three roots $z$ of $A(x,y,z)=0$ as $(x:y)$ cycles along $\gamma_i$. The resulting permutation of the three roots is the transposition~$\tau_i$. This process maps $A$ to a tuple $\tau$ in Table~\ref{table:40covers}. This is unique up to conjugacy by $\mathbb{S}_3$. The $8$ real cubics $A$ are mapped to the $8$ real monodromy representations, in the proof of Proposition~\ref{prop:real_abstract_hn_degree_3}. \end{algo} \section{Cubics: Solutions in Radicals} \label{sec4} The theme of this paper is the rational map (\ref{eq:map1}) that takes a ternary form to its $z$-discriminant. This map is finite-to-one onto its image $\mathcal{V}_d$, assuming the domain $\PP^{\binom{d+2}{2}-1}$ is understood modulo the group $\mathcal{G}$. Note that $\mathcal{V}_d$ is an irreducible variety of dimension $\binom{d+2}{2}-4$ in $\PP^{d(d-1)}$. The general fiber of the map consists of $\mathfrak{h}_d$ complex points. We are curious about the Galois group ${\rm Gal}_d$ associated with this covering. Here {\em Galois group} is defined as in \cite{HarrisGalois}. Informally, ${\rm Gal}_d$ is the subgroup of geometry-preserving permutations of the $\mathfrak{h}_d$ solutions. \begin{theorem} \label{thm:25920} The Galois group ${\rm Gal}_3$ for cubics is the simple group of order $25920$, namely \begin{equation} \label{eq:weylrole} {\rm Gal}_3 \,\, = \,\,{\rm SU}_4(\mathbb{F}_2) \,\, = \,\, {\rm PSp}_4(\mathbb{F}_3) \,\, = \,\, W(E_6)/\! \pm. \end{equation} This is the Weyl group of type $E_6$ modulo its center, here realized as $4 \times 4$ matrix groups over the finite fields $\mathbb{F}_2$ and $\mathbb{F}_3$. The action of ${\rm Gal}_3$ on the $40$ monodromy graphs in Table \ref{table:40covers} agrees with that of the symplectic group on the $40$ points in the projective space $\PP^3$ over $\mathbb{F}_3$. \end{theorem} This is a modern interpretation of Clebsch's work \cite{ClebschShort, ClebschLong} on binary sextics. We~first learned about the role of the Weyl group in (\ref{eq:weylrole}) through Elkies' unpublished manuscript~\cite{elkies}. \begin{remark} The last two columns of Table~\ref{table:40covers} identify the $40$ monodromy representations with $\PP^3(\mathbb{F}_3)$ and with Clebsch's $40$ cubics in \cite{ClebschLong}. The bijection we give respects the maps ${\rm Gal}_3 \simeq {\rm PSp}_4(\mathbb{F}_3) \hookrightarrow \mathbb{S}_{40}$. But it is far from unique. The same holds for Cayley's table in \cite{CayleyAlgo}. \end{remark} \begin{proof}[Proof of Theorem~\ref{thm:25920}] We consider cubics $ \,A = z^3 + A_2(x,y) z + A_3(x,y)$. This is the normal form in (\ref{eq:othernf}). The discriminant equals $\,{\rm discr}_z(A) = 4 A_2^3 + 27 A_3^2$. Thus our task is as follows: \smallskip \\ {\em Given a binary sextic $B $, compute all pairs of binary forms $(A_2,A_3)$ such that $4 A_2^3 + 27 A_3^2 = B$}. \smallskip This system has $40$ solutions, modulo the scaling of $A_2$ and $A_3$ by roots of unity. Setting $U = \sqrt[3]{4} \cdot A_2 $ and $V = \sqrt{-27} \cdot A_3$, we must solve the following problem: {\em Given a binary sextic $B $, compute all decompositions into a binary quadric $U$ and a binary cubic $V$:} \begin{equation} \label{eq:BUVequation} B \,\, = \,\, U^3-V^2 . \end{equation} This is precisely the problem addressed by Clebsch in \cite{ClebschShort, ClebschLong}. By considering the change of his labeling upon altering the base solution, he implicitly determined the Galois group as a subgroup of $\mathbb{S}_{40}$. The identification of this group with $W(E_6)$ modulo its center appears in a number of sources, including \cite{Hunt, Todd}. These sources show that ${\rm Gal}_3$ is also the Galois group of the $27$ lines on the cubic surface. Todd \cite{Todd} refers to permutations of the $40$ Jacobian planes, and Hunt \cite[Table 4.1]{Hunt} points to the $40$ triples of trihedral pairs. The connection to cubic surfaces goes back to Jordan in 1870, and it was known to Clebsch. As a subgroup of the symmetric group $\mathbb{S}_{40}$, our Galois group is generated by five permutations $\Gamma_1,\ldots,\Gamma_5$. These correspond to consecutive transpositions $(\gamma_i \gamma_{i+1})$ of the six loops $\gamma_1,\gamma_2,\ldots,\gamma_6$ in Algorithm \ref{alg:recovery3}. Each generator is a product of nine $3$-cycles in $\mathbb{S}_{40}$. Here are the formulas for $\Gamma_1,\ldots,\Gamma_5$ as permutations of the $40$ rows in Table~\ref{table:40covers}: \begin{small} $$ \! \Gamma_1 = (\mathcal{A}_{10} \mathcal{A}_6 \mathcal{A}_1\!) (\mathcal{A}_8 \mathcal{A}_7 \mathcal{A}_3\!) (\mathcal{A}_9 \mathcal{A}_5 \mathcal{A}_4\!) (\mathcal{A}_{17} \mathcal{A}_{13} \mathcal{A}_{12}\!) (\mathcal{A}_{18} \mathcal{A}_{14} \mathcal{A}_2\!) (\mathcal{A}_{16} \mathcal{A}_{15} \mathcal{A}_{11}\!) (\mathcal{E}^r_2 \mathcal{E}^r_6 \mathcal{E}^r_3) (\mathcal{E}^r_4 \mathcal{E}^r_1 \mathcal{E}^r_5) (\mathcal{C}^r_3 \mathcal{C}^r_2 \mathcal{C}^r_1) $$ $$ \Gamma_2 \,= (\mathcal{E}^\ell_4 \mathcal{A}_{14} \mathcal{A}_{10}) (\mathcal{E}^\ell_6 \mathcal{A}_{15} \mathcal{A}_9) (\mathcal{E}^\ell_2 \mathcal{A}_{13} \mathcal{A}_8) (\mathcal{B}_1 \,\mathcal{E}^r_1 \mathcal{E}^r_2) (\mathcal{D}^r_1 \,\mathcal{C}^r_2 \mathcal{C}^r_3) (\mathcal{B}_2 \,\mathcal{E}^r_6 \mathcal{E}^r_4) (\mathcal{E}^\ell_5 \mathcal{A}_7 \mathcal{A}_{17}) (\mathcal{E}^\ell_1 \mathcal{A}_5 \mathcal{A}_{16}) (\mathcal{E}^\ell_3 \mathcal{A}_6 \mathcal{A}_{18}) $$ $$ \Gamma_3 \,=\, (\mathcal{C}^\ell_3 \, \mathcal{E}^\ell_5 \, \mathcal{E}^\ell_4) (\mathcal{C}^\ell_1 \, \mathcal{E}^\ell_1\, \mathcal{E}^\ell_2) (\mathcal{C}^\ell_2 \,\mathcal{E}^\ell_3 \,\mathcal{E}^\ell_6) (\mathcal{A}_{17} \mathcal{A}_{11} \mathcal{A}_{14}) (\mathcal{A}_{18} \mathcal{A}_{12} \mathcal{A}_{15}) (\mathcal{A}_{16} \mathcal{A}_2 \mathcal{A}_{13}) (\mathcal{E}^r_2 \mathcal{E}^r_1 \mathcal{C}^r_1) (\mathcal{E}^r_4 \mathcal{C}^r_2 \mathcal{E}^r_3) (\mathcal{C}^r_3 \mathcal{E}^r_6 \mathcal{E}^r_5) $$ $$ \Gamma_4 = (\mathcal{D}^\ell_1 \,\mathcal{C}^\ell_2 \, \mathcal{C}^\ell_3) (\mathcal{E}^\ell_6 \, \mathcal{B}_2 \, \mathcal{E}^\ell_5) (\mathcal{E}^\ell_2 \,\mathcal{E}^\ell_1 \,\mathcal{B}_1) (\mathcal{A}_8 \mathcal{A}_{16} \mathcal{E}^r_2) (\mathcal{A}_9 \mathcal{E}^r_4 \mathcal{A}_{17}) (\mathcal{E}^r_3 \mathcal{A}_3 \mathcal{A}_{11}) (\mathcal{E}^r_5 \mathcal{A}_{12} \mathcal{A}_4) (\mathcal{A}_{15} \mathcal{E}^r_6 \mathcal{A}_7) (\mathcal{A}_{13} \mathcal{A}_5 \mathcal{E}^r_1) $$ $$ \Gamma_5 = (\mathcal{C}^\ell_3 \mathcal{C}^\ell_2 \mathcal{C}^\ell_1) (\mathcal{E}^\ell_4 \mathcal{E}^\ell_6 \mathcal{E}^\ell_2) (\mathcal{E}^\ell_5 \mathcal{E}^\ell_3 \mathcal{E}^\ell_1) (\mathcal{A}_{10} \mathcal{A}_9 \mathcal{A}_8\!) (\mathcal{A}_{17} \mathcal{A}_{18} \mathcal{A}_{16}\!) (\mathcal{A}_4 \mathcal{A}_3 \mathcal{A}_1\!) (\mathcal{A}_{11} \mathcal{A}_{12} \mathcal{A}_2\!) (\mathcal{A}_{14} \mathcal{A}_{15} \mathcal{A}_{13}\!) (\mathcal{A}_7 \mathcal{A}_6 \mathcal{A}_5) $$ \end{small} A compatible bijection with the labels of Clebsch \cite[Section 9]{ClebschLong} is given in the second-to-last column in Table~\ref{table:40covers}. The last column gives a bijection with the $40$ points in the projective space $\PP^3$ over the three-element field $\mathbb{F}_3$. This bijection is compatible with the action of the matrix group $ {\rm PSp}_4(\mathbb{F}_3) $. Here, the five generators above are mapped to matrices of order $3$: \begin{small} $$ \Gamma_1 = \begin{bmatrix} 1 \! & \! 1 \! & \! 2 \! & \! 0 \\ 0 \! & \! 1 \! & \! 0 \! & \! 0 \\ 0 \! & \! 0 \! & \! 1 \! & \! 0 \\ 0 \! & \! 1 \! & \! 2 \! & \! 1 \end{bmatrix}\!, \, \Gamma_2 = \begin{bmatrix} 1 \! & \! 0 \! & \! 0 \! & \! 0 \\ 2 \! & \! 1 \! & \! 0 \! & \! 2 \\ 1 \! & \! 0 \! & \! 1 \! & \! 1 \\ 0 \! & \! 0 \! & \! 0 \! & \! 1 \end{bmatrix}\!, \,\Gamma_3 = \begin{bmatrix} 1 \! & \! 1 \! & \! 0 \! & \! 0 \\ 0 \! & \! 1 \! & \! 0 \! & \! 0 \\ 0 \! & \! 0 \! & \! 1 \! & \! 0 \\ 0 \! & \! 0 \! & \! 0 \! & \! 1 \end{bmatrix}\!, \,\Gamma_4 = \begin{bmatrix} 1 \! & \! 0 \! & \! 0 \! & \! 0 \\ 2 \! & \! 1 \! & \! 0 \! & \! 1 \\ 2 \! & \! 0 \! & \! 1 \! & \! 1 \\ 0 \! & \! 0 \! & \! 0 \! & \! 1 \end{bmatrix}\!, \,\Gamma_5 = \begin{bmatrix} 1 \! & \! 1 \! & \! 1 \! & \! 0 \\ 0 \! & \! 1 \! & \! 0 \! & \! 0 \\ 0 \! & \! 0 \! & \! 1 \! & \! 0 \\ 0 \! & \! 2 \! & \! 2 \! & \! 1 \end{bmatrix}\!. $$ \end{small} These are symplectic matrices with entries in $\mathbb{F}_3$, modulo scaling by $(\mathbb{F}_3)^* = \{\pm 1\} = \{1,2\}$. A computation using \textsc{GAP} \cite{GAP} verifies that these groups are indeed isomorphic. In the notation of the atlas of simple groups, our Galois group (\ref{eq:weylrole}) is the group $O_5(3)$. \end{proof} \begin{remark} The fundamental group of the configuration space of six points in the Riemann sphere $\PP^1$ is the braid group $B_6$, which therefore maps onto the finite group ${\rm Gal}_3$. One checks that the permutations and matrices listed above satisfy the braid relations that define $B_6$: $$ \Gamma_i \,\Gamma_{i+1} \,\Gamma_i \,=\, \Gamma_{i+1} \,\Gamma_i\, \Gamma_{i+1} \quad \hbox{for $i=1,2,3,4$} \quad {\rm and} \quad \Gamma_i \,\Gamma_j = \Gamma_j \,\Gamma_i \quad \hbox{for $| i - j | \geq 2$.} $$ \end{remark} We conclude from Theorem \ref{thm:25920} that our recovery problem for cubics is solvable in~radicals. \begin{corollary} Starting from a given ternary cubic $A$, the $39$ other solutions $(U,V)$ to the equation (\ref{eq:BUVequation}) can be written in radicals in the coefficients of the binary forms $A_2$ and $A_3$. \end{corollary} \begin{proof} Viewing ${\rm Gal}_3$ as a group of permutations on the $40$ solutions, we consider the stabilizer subgroup of the one given solution. That stabilizer has order $25920/40 = 3 \cdot 216$, and this is the Galois group of the other $39$ solutions. It contains the Hesse group ${\rm ASL}_2(\mathbb{F}_3)$ as a normal subgroup of index $3$. Recall that ${\rm ASL}_2(\mathbb{F}_3)$ is solvable, and has order $216$. It is the Galois group of the nine inflection points of a plane cubic \cite[Section~II.2]{HarrisGalois}. Therefore the stabilizer is solvable, and hence $(U,V)$ is expressable in radicals over $(A_2,A_3)$. \end{proof} Clebsch explains how to write the $39$ solutions in radicals in the coefficients of $(A_2,A_3)$. We now give a brief description of his algorithm, which reveals the inflection points of a~cubic. \begin{algo} \label{alg:clebsch} Our \underbar{input} is a pair $(A_2,A_3)$ that represents a cubic $A$ as in (\ref{eq:othernf}). We set $\tilde U = \sqrt[3]{4} \cdot A_2 $ and $\tilde V = \sqrt{-27} \cdot A_3$. Our \underbar{output} is the list of all pairs $(U,V)$ that satisfy \begin{equation} \label{eq:UVsystem} U^3 - {\tilde U}^3 \,\, = \,\, V^2 - {\tilde V}^2. \end{equation} Real solutions of (\ref{eq:system3}) correspond to pairs $(U,V)$ such that $U$ and $iV$ are real, where $i = \sqrt{-1}$. To solve (\ref{eq:UVsystem}), consider the cubic $\mathcal{C} = \{ (x:y:z) \in \PP^2: z^3 - 3\tilde U z + 2 \tilde V = 0\} $. The nine inflection lines of $\mathcal{C}$ are defined by the linear forms $\xi = \alpha x + \beta y $ such that $\xi^3 - 3\tilde U \xi + 2 \tilde V $ is the cube of a linear form $\eta = \gamma x + \delta y$. We write the coefficients $\alpha,\beta$ of such $\xi$ in radicals. Here, the Galois group is ${\rm ASL}_2(\mathbb{F}_3)$. We next compute $(\gamma,\delta)$ from $\eta^3 = \xi^3 - 3\tilde U \xi + 2 \tilde V$. For each of the pairs $(\alpha,\beta)$ above, this system has three solutions $ (\gamma,\delta) \in \CC^2$. This leads to $27$ vectors $(\alpha,\beta,\gamma,\delta)$, all expressed in radicals. For each of these we set $U = \tilde U - \xi^2 - \xi \eta - \eta^2$.~Then $$ U^3 - ({\tilde U}^3 - {\tilde V}^2) \, = \, - \frac{3}{4} \bigl( \eta^3 + 2 \eta^2 \xi + 2 \eta \xi^2 + \xi^3 - 2 \eta \tilde U - \xi \tilde U\bigr)^2 . $$ The square root of the right hand side is a binary cubic $V$, and the pair $(U,V)$ satisfies (\ref{eq:UVsystem}). In this manner we construct $27$ solutions to (\ref{eq:system3}), three for each inflection point of the curve $\mathcal{C}$. Three of the $9$ inflection points are real, and each of them yields one real solution to (\ref{eq:system3}). We finally compute the $12 = 39-27$ remaining solutions, of which four are real. For this, we label the inflection points of $\mathcal{C}$ by $1,2,\ldots,9$ such that the following triples are collinear: \begin{equation} \label{eq:twelve} \underbar{123},456,789,\quad \underbar{147},\underbar{258},\underbar{369},\quad 159,267,348, \quad 168,249,357 . \end{equation} If $1,2,3$ are real and $\{4,7\}$, $\{5,8\}$, $\{6,9\}$ are complex conjugates, then precisely the four underlined lines are real. Our labeling agrees with that used by Clebsch in \cite[\S 9, page~50]{ClebschLong}. We now execute the formulas in \cite[\S 11, \S 12]{ClebschLong}. For each of the $12$ lines in (\ref{eq:twelve}), we compute two solutions $(U,V)$ of (\ref{eq:UVsystem}). These are expressed rationally in the data $(\alpha,\beta)$ found above. Each new solution $(U,V)$ arises twice from each triple of lines in (\ref{eq:twelve}), so we get $12$ in total. \end{algo} \section{Quartics: Del Pezzo Strikes Again} \label{sec5} This section features threads from algebraic geometry that inform our recovery problem for quartics. We begin with the extension of Corollary \ref{cor:from40to8} from $d=3$ to $d=4$. In other words, we determine the plane counterparts to the real Hurwitz numbers in Theorem \ref{thm:realcount4}. \begin{theorem} \label{thm:realcount4planar} Consider the polynomial system (\ref{eq:system4}) for recovering quartics, where the parameters $\beta_{ij}$ are generic reals. The number of real solutions equals $8$, $16$, $24$, $32$, $64$, or~$120$. \end{theorem} \begin{proof} We use the connection to del Pezzo surfaces of degree one and tritangents of space sextic curves, described by Vakil in~\cite{Ravi}. We recall some parts of his construction, and we examine what changes when passing from the complex numbers to the real numbers. The open set $\mathcal{B}$ of $\PP(3^5,2^7)$ is birational to the hypersurface $\mathcal{Z}$ in $\PP^{12}$ which parametrizes possible branch data. Hence we obtain from a general binary form $B$ in $\mathcal{Z}$ an element $(U_2,U_3)$ of $\PP(3^5,2^7)$. Fix the space sextic $\mathcal{C}$ by $X_2^3 + U_2(X_0,X_1)X_2 + U_3(X_0,X_1)$ in the singular cone $\PP(1,1,2)$. Sending $(U_2,U_3)$ to $\mathcal{C}$ gives the isomorphism $\mathcal{B} \cong \mathcal{E}$ in \cite[Theorem 2.12]{Ravi}, where $\mathcal{E}$ is the space of genus $4$ curves whose canonical model lies on a quadric cone in $\PP^3$. In the complex setting taking a double cover of $\PP(1,1,2)$ branched over $\mathcal{C}$ gives a del Pezzo surface of degree one, together with involution $\iota$, called the {\em Bertini involution}. Over the real numbers we obtain two different del Pezzo surfaces, depending on the choice of a sign. We denote the two distinct real surfaces by $X$ and $X'$. Upon base changing to the complex numbers, the surfaces $X_\CC$ and $X'_\CC$ become isomorphic. The possible topological types of real del Pezzo surfaces have been classified by several authors. We here follow the work of Russo \cite{Russo}. Blowing down a $(-1)$-curve $L$ on $X_\CC$ gives a del Pezzo surface of degree $2$ which is a double cover of a unique real plane quartic. The Bertini involution $\iota$ of $X$ maps $L$ to a $(-1)$-curve giving the same quartic. The pair $\{L,\iota(L)\}$ produces a real quartic curve if and only if is invariant under the complex conjugation on $X_\CC$. In this case we call it a {\em real Bertini pair}. The real plane Hurwitz number is the number of real Bertini pairs on $X$. Mapping $L$ to a line in the cone $\PP(1,1,2)$ gives a tritangent to the space sextic $\mathcal{C}$ \cite[Example 4]{Russo}. In this way, the real tritangents of $\mathcal{C}$ are the same as the real Bertini pairs. The former have been counted. We reproduce the table in \cite[Corollary 5.3]{Russo} in Table \ref{table:real_del_pezzo} and explain his notation for the types of del Pezzo surfaces. First, we have the del Pezzo surfaces of blowup type: The surface obtained by blowing up $2r$ real points and $4-r$ complex conjugate pairs of points is denoted by $\PP^2(2r,8-2r)$. With the Bertini involution one can define the real structure denoted by $\mathbb{B}_1$. Similarly, the Geiser involution on real del Pezzo surfaces of degree $2$ give rise to the real surface $\mathbb{G}_2$. Finally, the birational de Jonquieres involutions of the plane give rise to del Pezzo surfaces $\mathbb{D}_2$ and $\mathbb{D}_4$, where the index indicates the degree. \end{proof} \begin{table} \centering \begin{tabular}{|p{2 cm}|p{2 cm}|p{2 cm}|} \hline $\mathfrak{h}_4^{real}(B)$ & $X$ & $X'$ \\ \hline $120$ & $\mathbb{B}_1$ & $\PP^2(8,0)$ \\ $64$ & $\mathbb{G}_2(1,0)$ & $\PP^2(6,2)$ \\ $32$ & $\mathbb{D}_2(1,0)$ & $\PP^2(4,4)$ \\ $16$ & $\mathbb{D}_4(1,2)$ & $\PP^2(2,6)$ \\ $8$ & $\PP^2(0,8)$ & $\PP^2(0,8)$ \\ $24$ & $\mathbb{D}_4(3,0)^0_3$ & $\mathbb{D}_4(3,0)^0_3$ \\ $24$ & $\mathbb{D}_4(3,0)^1_2$ & $\mathbb{D}_4(3,0)^1_2$ \\ \hline \end{tabular} \caption{$\mathfrak{h}_4^{real}(B)$ is the number of real Bertini pairs} \label{table:real_del_pezzo} \end{table} \begin{remark} \label{rmk:consistent} Algorithm \ref{algo:recovery4} is consistent with Theorem~\ref{thm:realcount4planar}. Given binary forms $U_2,U_3$ with real coefficients, it outputs $\,8$, $16$, $24$, $32$, $64$ or $120$ real quartics in the subspace~$L_4$. \end{remark} \begin{corollary} \label{cor:rationalinstance} From any general configuration $\mathcal{P} = \{P_1,P_2,\ldots,P_8\} \subset \PP^2$ with coordinates in $\QQ$, we obtain an instance of (\ref{eq:system4}) whose $120$ complex solutions $A$ all have coefficients in $\QQ$. \end{corollary} \begin{proof} We construct the $120$ quartics $A$ with rational arithmetic from the coordinates in $\mathcal{P}$. Fix $j$ and let $w$ be a cubic that vanishes on $\mathcal{P} \backslash \{P_j\}$ but does not vanish at $P_j$. Consider the $2$-$1$ map $\PP^2 \dashrightarrow \PP^2$ given by $(u:v:w)$. The branch locus of this map is a quartic curve. We claim that this is the quartic $Q$ that corresponds to the exceptional fiber of the blow-up at $P_j$ in Vakil's construction. Indeed, let $X$ be the blowup of $\mathbb{P}^2$ at all the $9$ base points of the pencil $(u:v)$ and let $C\subseteq X$ be a general curve in the pencil. The intersection of the quartic $Q$ with $C$ is given by \cite[Proposition 3.1]{Ravi} as the set of points $p \in C$ such that $\mathcal{O}_{C}(2p) \cong \mathcal{O}_{C}(E_0+E_1)$. Alternatively, these are exactly the branch points of the map $C\to \mathbb{P}^1$ induced by the linear system of $\mathcal{O}_{C}(E_0+E_1)$. Thus, we are done if we can prove that the restriction of the map $(u:v:w)\colon X \to \mathbb{P}^2$ is given exactly by the linear system above. However, the map of $(u:v:w)$ on $X$ corresponds to the complete linear system $L = 3H-E_2-\dots-E_8 = -K_X+E_0+E_1$ so that $\mathcal{O}_{C}(L) \cong \mathcal{O}_{C}(-K_X+E_0+E_1)$ and we need to show that $\mathcal{O}_C(-K_X)\cong \mathcal{O}_C$. But this follows from the adjunction formula, using the fact that $C$ is an elliptic curve moving in a pencil. Thus, we can recover the quartic as this branch locus of our $2$-$1$ map $\PP^2 \dashrightarrow \PP^2$. The ternary form $A$ that defines this branch locus can be computed from the ideals of $\mathcal{P}$ and $P_j$ using Gr\"obner bases, and this uses rational arithmetic over the ground field. This yields eight of the $120$ desired quartics. The remaining $112$ quartics are found by repeating this process after some Cremona transformations have been applied to the pair $(\PP^2,\mathcal{P})$. These transformations use only rational arithmetic and they preserve the del Pezzo surface, but they change the collection of eight $(-1)$-curves that are being blown down. \end{proof} The following two algorithms provide more details on the steps in the proof above. \begin{algo} \label{alg:get8} The \underbar{input} is a set $\mathcal{P}$ of eight rational points in $\PP^2$. The \underbar{output} is eight quartics $A \in \QQ[x,y,z]$ that share the same $z$-discriminant. These are the quartics corresponding to the eight exceptional divisors $E_i$. Write $(r:s:t)$ for the coordinates of the $\PP^2$ that contains $\mathcal{P}$. We start by setting up the following ideal in $\,\QQ[r,s,t,x,y,z]$: \begin{equation} \label{eq:jacobian} I \,\,\,=\,\,\, \bigl\langle \hbox{$ 2 \times 2$ minors of} \, \begin{pmatrix} u & v & w \\ x & y & z \end{pmatrix} \bigr\rangle \,\,+\,\, \biggl\langle {\rm det} \! \begin{pmatrix} \partial u / \partial r & \partial u / \partial s & \partial u / \partial t \\ \partial v / \partial r & \partial v / \partial s & \partial v / \partial t \\ \partial w / \partial r & \partial w / \partial s & \partial w / \partial t \\ \end{pmatrix} \biggr\rangle. \end{equation} Saturate $I$ with respect to the ideal of $\mathcal{P}\backslash \{P_j\}$. After that step, eliminate the three variables $r,s,t$. The result is a principal ideal in $\QQ[x,y,z]$ whose generator is the quartic $A$. We perform the above two steps for $j=1,2,\ldots,8$, and we output the resulting eight quartics. \end{algo} Our second algorithm concerns the Cremona transformations mentioned in the proof. \begin{algo} \label{alg:get120} Our \underbar{input} is a set $\mathcal{P}$ of eight rational points in $\PP^2$. The \underbar{output} is the remaining set of $112$ quartics $A \in \QQ[x,y,z]$ that also have same $z$-discriminant. Fix a basis of cubics $u,v$ passing through $\mathcal{P}$. Choose a triple $\{P_i,P_j,P_k\}$ and set up the associated quadratic Cremona transformation $\PP^2 \dashrightarrow \PP^2$. This transforms $\mathcal{P}$ into a new configuration $\mathcal{P}'$ and it corresponds to an automorphism of the del Pezzo surface. Apply Algorithm \ref{alg:get8} to $\mathcal{P}'$ and record the resulting eight quartics. In applying Algorithm \ref{alg:get8}, we need some care in choosing a basis $\{u',v'\}$ for the cubics vanishing at $\mathcal{P}'$: we should choose them in such a way that the elliptic pencil $(u':v')\colon \mathbb{P}^2 \dashrightarrow \mathbb{P}^1$ is the composition of the original pencil $(u:v)\colon \mathbb{P}^2 \dashrightarrow \mathbb{P}^1$ and the quadratic Cremona transformation. This ensures that the quartics share the same $z$-discriminant with the original eight quartics. We now repeat this process until $120$ inequivalent quartics have been found. This is possible because the Cremona transformations act transitively on the set of $(-1)$ curves on the del Pezzo surface. \end{algo} \begin{remark} If we use Algorithm \ref{alg:get120} to construct branched covers from real plane quartics then we cannot obtain $24$ real solutions. This is because the associated surfaces are not of blowup type, see Table \ref{table:real_del_pezzo}. Nonetheless we found instances with real plane Hurwitz $24$ by constructing a sextic $\mathcal{C}$ which has this number of tritangents. It can be seen in Figure \ref{figure:24_sextic}. The equation of $\mathcal{C}$ gives us $(U_4,U_6)\in\mathcal{B}$ and with Algorithm \ref{algo:recovery4} we recover $24$ quartics. \end{remark} \begin{figure}[h] \centering \includegraphics[width=0.5\linewidth]{24_1_6.pdf} \vspace{-0.5in} \caption{A space sextic with exactly $24$ real tritangents.} \label{figure:24_sextic} \end{figure} Using Algorithm \ref{alg:get8}, we can find one quartic $A$ and hence the binary form $B = {\rm discr}_z(A)$ using rational arithmetic. However, there is an even more direct method which also shows that $B$ does not depend on the choice of the point $P_j$. Namely, $B$ encodes the $12$ singular curves in the pencil of cubics through $\mathcal{P}$. Based on this observation, we derive our final~result: \begin{theorem} \label{thm:rleqs} Let $B $ be the binary form of degree $12$ computed from a real configuration $\mathcal{P}$. Suppose $2r$ of the $8$ points in $\, \mathcal{P}$ are real and $2s$ of the $12$ zeros of $B$ are real. Then $r \leq s$. Conversely, for all valid parameters $r \leq s$, some configuration $\mathcal{P}$ realizes the pair $(r,s)$. \end{theorem} \begin{proof} The $12$ points in $\mathbb{P}^1$ which are zeros of $B$ correspond to the singular fibers under the map $\mathbb{P}^2 \rightarrow \mathbb{P}^1: (x:y:z) \mapsto (u:v)$. This map is given by two cubics $u$,$v$ through~$\mathcal{P}$. Algebraically, we seek points $(\alpha:\beta) \in \mathbb{P}^1$ such that the cubic $ \beta \cdot u - \alpha \cdot v$ is singular. There are $12$ such singular cubics, and the question is how many of those can be~real. The {\em Welschinger invariant} $W_r(\mathbb{P}^2,3)$ is a signed count of real singular cubics passing through $2r$ real points and $4-r$ pairs of complex conjugate points. One counts a singular cubic with a hyperbolic node with a positive sign and one with an elliptic node with a negative sign. By definition, $W_r(\mathbb{P}^2,3)\leq 2s$, the number of real solutions. Example 4.3 in \cite{Shustin} provides a tropical computation of the Welschinger invariants for cubics and yields $W_r(\mathbb{P}^2,3)=2r$. We conclude $2r\leq 2s$. The last sentence in Theorem \ref{thm:rleqs} is proved with a direct calculation. By sampling from rational configurations $\mathcal{P} = \{P_1,\ldots,P_8\}$, we found instances for all $(r,s)$ with $0 \leq r \leq s \leq 6$ and $r \leq 4$. These can be found at our {\tt MathRepo} website \href{https://mathrepo.mis.mpg.de/BranchPoints/}{https://mathrepo.mis.mpg.de/BranchPoints}$\,$. \end{proof} \begin{remark} If we take one real quartic $Q$ corresponding to our real configuration $\mathcal{P}$, then we can see which of the $2r$ real tangents belong to elliptic nodal cubics. To see this recall how to construct an elliptic fibration from $Q$. We blow up the point $(0:0:1)$ and then take a double cover branched over the quartic \cite[Section 2.1]{Ravi}. The nodal fibers are the preimages of the tangents of $Q$ passing through the base point. The ovals of $Q$ divide $\PP^2_\RR$ into connected components. We mark the component containing $(0:0:1)$ with a $+$, and we proceed so that adjacent components have opposite signs. Let $L$ be one of the $2r$ real tangents and $q$ be its points of tangency with $Q$. If $L$ lies in a positive component in a neighborhood of $q$, then $L$ gives a hyperbolic node. Otherwise the preimage of $L$ in the fibration is an elliptic node. One proves this by writing down the equation of the double cover in a suitable affine neighborhood of $q$. The case of $10$ hyperbolic nodal curves can be seen in Figure \ref{figure:12_tangents}. \end{remark} \begin{figure}[h] \centering \includegraphics[width=0.5\linewidth]{120_12_tangents_with_signs.png} \caption{A real quartic with $12$ real tangents} \label{figure:12_tangents} \end{figure} \bigskip \subsection*{Acknowledgements} We thank Arthur Bik, Lou-Jean Cobigo, Yelena Mandelstham, Jared Ongaro, Boris Shapiro, Simon Telen, Rainer Sinn and Mario Kummer for helpful discussions. Furthermore we thank the anonymous referees for careful reading and useful comments. Hannah Markwig, Victoria Schleis and Bernd Sturmfels were supported by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation), Project-ID 286237555, TRR 195. Javier Sendra--Arranz received the support of a fellowship from the ``la Caixa'' Foundation (ID 100010434). The fellowship code is LCF/BQ/EU21/11890110. \medskip \begin{thebibliography}{10} \begin{small} \setlength{\itemsep}{-0.6mm} \bibitem{BRT} P.~Breiding, K.~Rose and S.~Timme: {\em Certifying zeros of polynomial systems using interval arithmetic}, ACM Transactions on Mathematical Software {\bf 49} (2023) 1--14. \bibitem{BT} P.~Breiding and S.~Timme: {\em HomotopyContinuation.jl: A package for homotopy continuation in Julia}, Math.~Software -- ICMS 2018, 458--465, Springer, 2018. \bibitem{Cadoret} A.~Cadoret: {\em Counting real Galois covers of the projective line}, Pacific J. Math. {\bf 219} (2005) 53--81. \bibitem{CJM} R.~Cavalieri, P.~Johnson and H.~Markwig: {\em Tropical Hurwitz numbers}, J. Algebr. Comb. {\bf 32} (2010) 241--265. \bibitem{CavalieriMiles} R.~Cavalieri and E.~Miles: {\em Riemann Surfaces and Algebraic Curves: A First Course in Hurwitz Theory}, London Math. Soc. Student Texts {\bf 87}, 2016. \bibitem{Cayley} A.~Cayley: {\em On the cubic curves inscribed in a given pencil of six lines}, Quarterly Journal of Pure and Applied Mathematics {\bf 9} (1868) 210--221. \bibitem{CayleyAlgo} A.~Cayley: {\em Algorithm for the characteristics of the triple $\theta$-functions}, J.~Reine Angew. Math. {\bf 87} (1879) 165--169. \bibitem{CKRN} T. O. Celik, A. Kulkarni, Y. Ren and M. Sayyary Namin: {\em Tritangents and their space sextics,} Journal of Algebra {\bf 538} (2019) 290-311. \bibitem{ClebschShort} A.~Clebsch: {\em Zur Theorie der bin\"aren Formen sechster Ordnung und zur Dreitheilung der hyperelliptischen Funktionen}, Mathematische Annalen {\bf 2} (1870) 373--381. \bibitem{ClebschLong} A.~Clebsch: {\em Zur Theorie der bin\"aren Formen sechster Ordnung und zur Dreitheilung der hyperelliptischen Funktionen}, Abhandlungen der K\"oniglichen Gesellschaft der Wissenschaften in G\"ottingen {\bf 14} (1869) 17--75. \bibitem{elkies} N.~Elkies: {\em The identification of three moduli spaces}, {\tt arXiv:math/990519v1}. \bibitem{mathrepo} C.~Fevola and C.~G\"{o}rgen: {\em The mathematical research-data repository MathRepo}, Computeralgebra Rundbrief {\bf 70} (2022) 16--20. \bibitem{GAP} The GAP~Group: {\em GAP -- Groups, Algorithms, and Programming, Version 4.7.8}, 2015, {\tt http://www.gap-system.org}. \bibitem{Goodman} J.E.~Goodman: {\em Affine open subsets of algebraic varieties and ample divisors}, Annals of Math.~{\bf 89} (1969) 160--183. \bibitem{GMR} M.~Guay-Paquet, H.~Markwig and J.~Rau: {\em The combinatorics of real double Hurwitz numbers with real positive branch points}, Int. Math. Res. Not. IMRN (2016), no.1, 258--293. \bibitem{Harris} C.~Harris and Y.~Len: {\em Tritangent planes to space sextics: the algebraic and tropical stories}, Combinatorial Algebraic Geometry, 47-63, Springer, New York, NY, 2017. \bibitem{HarrisGalois} J.~Harris: {\em Galois groups of enumerative problems}, Duke Math.~J.~{\bf 46} (1979) 685--724. \bibitem{Hunt} B.~Hunt: {\em The Geometry of Some Special Arithmetic Quotients}, Lecture Notes in Mathematics {\bf 1637}, Springer-Verlag, Berlin, 1996. \bibitem{Hurwitz} A.~Hurwitz: {\em \"Uber Riemann'sche Fl\"achen mit gegebenen Verzweigungspunkten}, Mathematische Annalen {\bf 39} (1891) 1--60. \bibitem{IZ} I.~Itenberg and D.~Zvonkine: {\em Hurwitz numbers for real polynomials}, Comment. Math. Helv. {\bf 93} (2018) 441--474. \bibitem{KS} S.~Kleiman and R.~Speiser: {\em Enumerative geometry of nonsingular plane cubics}. Algebraic geometry: Sundance 1988, 85--113, Contemp.~Math., 116, Amer. Math. Soc., Providence,~1991. \bibitem{KA} A. Kulkarni, Y. Ren, M. Sayyary Namin and B. Sturmfels: {\em Real space sextics and their tritangents}, Proceedings of the 2018 ACM International Symposium on Symbolic and Algebraic Computation (2018) 247-254. \bibitem{Ongaro} J.~Ongaro: {\em Plane Hurwitz Numbers}, Doctoral Thesis, DMAT-2014:002, Nairobi, Kenya, 2014. \bibitem{Ongaro2} J.~Ongaro: {\em Formulae for calculating {H}urwitz numbers}, J.~Adv.~Stud.~Topol. {\bf 10} (2019) 35--58. \bibitem{OS} J.~Ongaro and B.~Shapiro: {\em A note on planarity stratification of Hurwitz spaces}, Canadian~Math.~Bulletin~{\bf 58} (2015) 596--609. \bibitem{Oscar} The \textsc{Oscar} Team: {\em \textsc{Oscar} --- Open Source Computer Algebra Research system, Version 0.8.3-DEV} (2022), {\tt oscar.computeralgebra.de}. \bibitem{Russo} F.~Russo: {\em The antibirational involutions of the plane and the classification of real del Pezzo surfaces}, Algebraic Geometry: A Volume in Memory of Paolo Francia, edited by Mauro C. Beltrametti, Berlin, New York: De Gruyter, 2003, pp. 289-312. \bibitem{Shustin} E.~Shustin: {\em A tropical calculation of the {W}elschinger invariants of real toric del {P}ezzo surfaces}, J.~Algebraic Geom.~{\bf 15} (2006) 285--322. \bibitem{Todd} J.A.~Todd: {\em On the simple group of order $25920$}, Proceedings of the Royal Society London, Ser. A {\bf 189} (1947) 326--358. \bibitem{Ravi} R.~Vakil: {\em Twelve points on the projective line, branched covers, and rational elliptic fibrations}, Mathematische Annalen {\bf 320} (2001) 33-54. \end{small} \end{thebibliography} \bigskip \bigskip \noindent \footnotesize {\bf Authors' addresses:} \smallskip \noindent Daniele Agostini, Universit\"at T\"ubingen \hfill {\tt [email protected]} \noindent Hannah Markwig, Universit\"at T\"ubingen \hfill {\tt [email protected]} \noindent Clemens Nollau, Universit\"at T\"ubingen \hfill {\tt [email protected]} \noindent Victoria Schleis, Universit\"at T\"ubingen \hfill {\tt [email protected]} \noindent Javier Sendra--Arranz, MPI-MiS Leipzig \hfill {\tt [email protected]} \noindent Bernd Sturmfels, MPI-MiS Leipzig and UC Berkeley \hfill {\tt [email protected]} \end{document}
2205.11196v6
http://arxiv.org/abs/2205.11196v6
Zero-Sum Games and Linear Programming Duality
\documentclass[a4paper,12pt]{article} \usepackage{amsfonts,amsmath,amsthm,amssymb,amscd} \usepackage{newpxtext,newpxmath} \usepackage[square,numbers]{natbib} \usepackage{bibspacing} \usepackage[nobottomtitles*]{titlesec} \titleformat{\section}{\Large\bf}{\thesection}{.8em}{} ll] \renewcommand{\bottomtitlespace}{.1\textheight} \usepackage{pict2e} \sfcode`P=1000 \usepackage{microtype} \linespread{1.10} \usepackage{graphicx} \usepackage[colorlinks,linkcolor=blue,citecolor=blue]{hyperref} \renewcommand\UrlFont{\sffamily\small} \usepackage{xcolor} \def\BIBand{and} \oddsidemargin .46cm \textwidth 15cm \textheight 24cm \topmargin -1.3cm \clubpenalty=10000 \widowpenalty=10000 \predisplaypenalty=350 \parindent 1.8em \parskip .8ex \newtheorem{theorem}{Theorem} \newtheorem{lemma}{Lemma} \newtheorem{proposition}{Proposition} \theoremstyle{definition} \newcommand{\supp}{\textup{\small\textsf{supp}}} \newcommand{\slack}{\textup{\small\textsf{slack}}} \newcommand{\RS}{\textup{\small\textsf{rowspace}}} \newcommand{\NS}{\textup{\small\textsf{nullspace}}} \newcommand{\twovec}[2]{\small\Bigl(\begin{matrix}#1\\[-.7ex]#2\end{matrix}\Bigr)} \newcommand{\reals}{\mathbb{R}} \newcommand{\eps}{\varepsilon} \newcommand{\T}{^{\top}} \newcommand{\0}{{\mathbf0}} \newcommand{\1}{{\mathbf1}} \def\2{\textup{\small\textsf{GF}}(2)} \def\myproofof#1{\noindent{\em Proof of #1.\enspace}} \def\myproof{\noindent{\textit{Proof.\enspace}}} \def\endproof{\hfill\strut\nobreak\hfill\tombstone\par\smallbreak} \def\tombstone{\hbox{\lower.4pt\vbox{\hrule\hbox{\vrule \kern7.6pt\vrule height7.6pt}\hrule}\kern.5pt}} \newcommand{\maxi}{\mathop{\hbox{\textup{maximize }}}} \newcommand{\mini}{\mathop{\hbox{\textup{minimize }}}} \newcommand{\subj}{\hbox{\textup{subject to ~ }}} \newcommand{\OPT}[1]{\hat{#1}} \newdimen\einr\einr1.8em \newdimen\rmeinr\rmeinr1.8em \newdimen\tmp \newcommand{\abs}[1]{\par\hangafter=1\hangindent=\einr \noindent\hbox to\einr{#1\hfill}\ignorespaces} \newcommand\rmitem[1]{\abs{\textup{#1}}} \newcommand\bullitem{\tmp\einr\einr\rmeinr\abs{\raise.17ex\hbox{\kern7pt\scriptsize$\bullet$}}\einr\tmp} \newcommand\R{\textcolor{red}} \parindent\einr \title{Zero-Sum Games and Linear Programming Duality} \author{Bernhard von Stengel\\ \small Department of Mathematics, London School of Economics, \sf [email protected]} \date{\normalsize October 23, 2023} \begin{document} \maketitle \begin{abstract} \noindent \input abstract \end{abstract} \input 9bsum \bibliographystyle{numeric} \small \bibitemsep .4ex minus.1ex \bibliography{bib-0sum} \end{document} The minimax theorem for zero-sum games is easily proved from the strong duality theorem of linear programming. For the converse direction, the standard proof by Dantzig (1951) is known to be incomplete. We explain and combine classical theorems about solving linear equations with nonnegative variables to give a correct alternative proof, more directly than Adler (2013). We also extend Dantzig's game so that any max-min strategy gives either an optimal LP solution or shows that none exists. \arraycolsep.2em \section{Introduction and summary} LP duality (the strong duality theorem of linear programming) is a central result in optimization. It helps proving many results with ease, such as the minimax theorem for zero-sum games, first proved by von Neumann in 1928 \cite{vN1928}. In October 1947, George Dantzig explained his nascent ideas on linear programming to John von Neumann \cite[p.~45]{dantzig1982}. In response, he got an ``eye-popping'' lecture on LP duality, which von Neumann conjectured to be equivalent to his minimax theorem. This ``equivalence'' is commonly assumed (for example, Schrijver \cite[p.~218]{Schrijver}), but on closer inspection does not hold at all. ``Equivalence'' is actually not a good term -- all theorems, as logical statements without free variables, are equivalent, to ``true''. We therefore say that theorem~A \textit{proves} (rather than ``implies'') theorem~B, typically by a suitable but different use of the variables in theorem~A, and state straightforward proof relations of this kind as propositions (see Proposition~\ref{p-farkas} for an example). The classic proof by Dantzig \cite{dantzig1951} of LP duality from the minimax theorem needs an additional assumption about the game solution, namely strict complementarity in the last column of the game matrix that corresponds to the right-hand side of the LPs. (We state Dantzig's game in (\ref{B}) below; it differs from the original in a trivial change of signs so that the primal LP is a maximization problem subject to upper bounds, in line with the row player in a zero-sum game as the maximizer.) This complementarity assumption, acknowledged by Dantzig \cite{dantzig1951}\cite[p.~291]{Dantzig}, applies only to non-generic LPs and seems technical. Adler \cite{adler2013} fixed this ``hole'' in Dantzig's proof, and showed how an algorithm that solves a zero-sum game can be used to either solve an LP or certify that it has no optimal solution. Recently, Brooks and Reny \cite{BReny} gave a zero-sum game whose solution directly provides such a solution or certificate. The aim of this article is to clarify the underlying problem, with two new main results (explained later). Our narrative is self-contained, not least because LP duality is so familiar that it can be overlooked as a silent assumption. For example, reducing optimality of maximizing $c\T x$ subject to $Ax\le b$, $x\ge\0$ to feasibility of $Ax\le b$, $x\ge\0$, $A\T y\ge c$, $y\ge\0$, $ b\T y\le c\T x$ assumes that there cannot be a positive ``duality gap'' $b\T y-c\T x$, which is the strong duality theorem. Our presentation shows how one could prove, in full, LP duality via the minimax theorem, if one were to take that route. Some of the presented less-known elegant proofs from the literature are also of historical interest. Dantzig's assumption holds if a pure strategy that is a best response in every solution of the zero-sum game has positive probability in some solution. As noted by Adler \cite[p.~167]{adler2013}, this can be shown (e.g., \cite[p.~742]{raghavan1994}) using a version of the Lemma of Farkas \cite{farkas1902}. However, the Lemma of Farkas proves LP duality directly. Our first, easy observation is that Dantzig's assumption amounts to the Lemma of Tucker \cite{tucker1956}. This, in turn, directly proves the Lemma of Farkas \cite[p.~7]{tucker1956}, even for the special case of Dantzig's game (Proposition~\ref{p-Bfa} below). The assumption is therefore extremely strong and in a sense useless for proving LP duality from the minimax theorem. Curiously, Tucker did not consider the converse that in nearly the same way the Lemma of Farkas proves his Lemma (see Proposition~\ref{p-tufa} below). This suggests that Tucker thought he had proved a more general statement. Tucker's proof of his Lemma is indeed short and novel, but in this light we agree with Adler's view of Tucker's Lemma as a ``variant of Farkas's Lemma'' \cite[p.~174]{adler2013}. LP duality and the minimax theorem are closely related to solving, respectively, inhomogeneous and homogeneous linear equations in nonnegative variables. The Lemma of Farkas characterizes when the inhomogeneous linear equations $Ax=b$ have no solution vector $x$ such that $x\ge\0$. The Theorem of \textit{Gordan} \cite{gordan1873} characterizes when the homogeneous equations $Ax=\0$ have no solution $x\ge\0$ other than the trivial one $x=\0$. Gordan's Theorem and its ``inequality version'' due to Ville \cite{ville1938} prove the minimax theorem and vice versa. Our first main result, Theorem~\ref{t-gotu} in Section~\ref{s-gotu}, is a proper proof of LP duality from the minimax theorem. Inspired by Adler \cite[section~4]{adler2013}, we use Gordan's Theorem to prove the \textit{Theorem} of Tucker \cite{tucker1956}, an easy but powerful generalization of his Lemma (like Broyden \cite{broyden2001} we think that it deserves more recognition). Tucker's Theorem shows that any system of homogeneous equations $Ax=\0$ such that $x\ge\0$ has a natural partition of its solution vector~$x$ into a set of variables that can take positive values and the others that are zero in any nonnegative solution. It is easy to see that one can drop the nonnegativity requirement for the variables that can be positive. By \textit{eliminating} these unconstrained variables from the system $Ax=\0$ with a bit of linear algebra, applying Gordan's Theorem to the variables that are always zero in any nonnegative solution then gives Tucker's Theorem. Compared to the detailed computations of this variable elimination by Adler \cite{adler2013}, our proof is self-contained and more direct. Using Dantzig's game (\ref{B}), Tucker's theorem proves LP duality in a stronger version, namely the existence of a ``strictly complementary'' solution to the LPs if they are feasible (Proposition~\ref{p-strict} below). Our second main result, Theorem~\ref{t-BM} in Section~\ref{s-M}, extends Dantzig's elegant game (\ref{B}) with an extra row in (\ref{BM}) that ``enforces'' the desired complementarity in the last column. \textit{Every} max-min strategy of this game either gives an optimal pair of solutions to the primal and dual LPs, or represents an unbounded ray for at least one of the LPs if it is feasible, so that the other LP is therefore infeasible. This result is similar to Adler's ``Karp-type'' reduction of an LP to a zero-sum game \cite[section~3.1]{adler2013}, but with the extra certificate of infeasibility. It is also similar to, and inspired by, the main result of Brooks and Reny \cite{BReny}. The proof of Theorem~\ref{t-BM} (in a separate Theorem~\ref{t-DM}) does \textit{not} rely on LP duality and was surprisingly hard to find. Compared to either \cite{adler2013} or \cite{BReny}, our game (\ref{BM}) more naturally extends Dantzig's original game. Similar to both, it imposes an upper bound on the LP variables that does not affect whether the LPs are feasible. This bound follows from Carath\'eodory's theorem \cite{caratheodory1911} that nonnegative solutions $x$ to $Ax=b$ can be found using only linearly independent columns of~$A$ (of which there are only finitely many sets). That bound is determined apriori and of polynomial encoding size from the sizes of the entries of $A$ and $b$ if these are integer or algebraic numbers, otherwise abstractly from all ``basic solutions'' $x$ to $Ax=b$. We give a self-contained introduction to linear programming duality (for LPs in inequality form) and to the minimax theorem in Section~\ref{s-state}. Section~\ref{s-farkas} recalls how LP duality is proved from the Lemma of Farkas. The theorems of Gordan \cite{gordan1873} and Ville \cite{ville1938} are the topic of Section~\ref{s-govi}. Stiemke \cite{stiemke1915} gave a two-page proof of the Theorem of Gordan (without referencing it, even though published in the same journal, presumably with no editor around to remember it). His proof uses implicitly that the null space and row space of a matrix are orthogonal complements. But there are no matrices in these papers -- people manipulated linear equations with their unknowns instead. For historical interest, and because of its structural similarity to Tucker's proof of his Lemma \cite[p.~5--7]{tucker1956}, we reproduce Stiemke's proof in Section~\ref{s-sti}. We also present a most elegant half-page proof of the minimax theorem due to Loomis \cite{loomis1946}, which then leads to Gordan's Theorem as an easy additional step. As we explain at the end of Section~\ref{s-sti}, it seems difficult to extend the proof by Loomis to proving LP duality directly, which was the original aim of this research. Section \ref{s-mmlp} presents the classic derivation of LP duality from the minimax theorem due to Dantzig \cite{dantzig1951}. Even though its additional assumption looks minor, we show that it amounts to the Lemma of Tucker \cite{tucker1956}, which, as noted by Tucker \cite[p.~7]{tucker1956}, proves the Lemma of Farkas. This shows that the assumption is way too strong to make Dantzig's derivation useful. Section~\ref{s-gotu} proves Tucker's Theorem and thus LP duality from the minimax theorem using Gordan's theorem. As mentioned, this is distilled from Adler \cite[section~4]{adler2013}. In Section~\ref{s-M}, we add another row to Dantzig's game to obtain a new game where every max-min strategy either gives a solution to the LP or a certificate that no optimal solution exists. Theorems \ref{t-gotu} and~\ref{t-BM} in Sections~\ref{s-gotu} and~\ref{s-M} are the main results of this paper. Section~\ref{s-adler} gives a detailed comparison of our work with the closely related papers by Adler \cite{adler2013} and Brooks and Reny \cite{BReny} In the final Section~\ref{s-minfeas} we present a little-known gem of a proof of the Lemma of Farkas due to Conforti, Di Summa, and Zambelli \cite{conforti2007}. Their theorem states that a system of inequalities $Ax\le b$ is \textit{minimally} infeasible if and only if the corresponding \textit{equalities} $Ax=b$ are minimally infeasible. Because the linear equations are infeasible, a suitable linear combination of them states $0=-1$, which proves the Lemma of Farkas in this context. \section{LP duality and the minimax theorem} \label{s-state} Throughout, $m$ and $n$ are positive integers, and $[n]=\{1,\ldots,n\}$. All vectors are column vectors. The $j$th component of a vector $x$ is written $x_j$\,. All matrices have real entries. The transpose of a matrix $A$ is written~$A\T$. Vectors and scalars are treated as matrices of appropriate dimension, so that a vector $x$ times a scalar~$\alpha$ is written as $x\alpha$, and a row vector $x\T$ times a scalar $\alpha$ as $\alpha x\T$. The matrix $A$ with all entries multiplied by the scalar $\alpha$ is written as $\alpha A$. We usually transpose vectors rather than the matrix, to emphasize that $Ax$ is a linear combination of the columns of $A$ and $y\T A$ is a linear combination of the rows of $A$. The all-zero and the all-one vector are written as $\0=(0,\ldots,0)\T$ and~$\1=(1,\ldots,1)\T$, their dimension depending on the context, and the all-zero matrix just as~0. Inequalities between vectors or matrices such as $x\ge\0$ hold between all components. A linear program (LP) in \textit{inequality form} is given by an $m\times n$ matrix $A$ and vectors $b\in\reals^m$ and $c\in\reals^n$ and states, with a vector $x\in\reals^n$ of variables: \begin{equation} \label{P} \maxi_{x} c\T x \quad \subj Ax\le b,\quad x\ge\0. \end{equation} This LP is called \textit{feasible} if there is some $x\in\reals^n$ that fulfills the constraints $Ax\le b$ and $x\ge\0$, otherwise \textit{infeasible}. If there are arbitrarily large values of $c\T x$ with $Ax\le b$ and $x\ge\0$, then the LP is called \textit{unbounded}. With (\ref{P}) considered as the \textit{primal} LP, its \textit{dual LP} states, with a vector $y\in\reals^m$ of variables: \begin{equation} \label{D} \mini_{y} y\T b \quad \subj y\T A\ge c\T,\quad y\ge \0, \end{equation} with feasibility and unboundedness defined accordingly. An equivalent way of writing the dual constraints in (\ref{D}) is $A\T y\ge c$, which transposes only the matrix and can be more readable. The \textit{weak duality} theorem states that if both primal and dual LP have feasible solutions $x$ and $y$, respectively, then their objective function values are mutual bounds, that is, \begin{equation} \label{WD} c\T x\le y\T b, \end{equation} which holds because feasibility implies $c\T x\le y\T Ax\le y\T b$. Hence, if there are feasible solutions $x$ and $y$ so that the two objective functions are \textit{equal}, $c\T x=y\T b$, then both are optimal. The (strong) \textit{LP duality} theorem states that this is always the case if the two LPs are feasible: \begin{theorem}[LP duality] \label{t-lp} If the primal LP $(\ref{P})$ and the dual LP $(\ref{D})$ are feasible, then there exist feasible $x$ and $y$ with $c\T x=y\T b$, which are therefore optimal solutions. \end{theorem} A \textit{zero-sum game} is given by an $m\times n$ matrix $A$ and is played between a \textit{row player}, who chooses a row $i$ of the matrix, simultaneously with the \textit{column player}, who chooses a column $j$ of the matrix, after which the row player receives the matrix entry $a_{ij}$ from the column player as a \textit{payoff} (which is a \textit{cost} to the column player). That is, the row player is the maximizer and the column player the minimizer. The rows and columns are called the players' \textit{pure strategies}. The players can \textit{randomize} their actions by choosing them according to a probability distribution, called a \textit{mixed strategy}. The other player may know the probability distribution but not the chosen pure strategy. The row player is then assumed to maximize his \textit{expected payoff} and the column player to minimize her \textit{expected cost}. We denote the set of mixed strategies of the row player by \begin{equation} \label{Y} Y=\{y\in\reals^m\mid y\ge\0,~\1\T y=1\}, \end{equation} and of the column player by \begin{equation} \label{X} X=\{x\in\reals^n\mid x\ge\0,~\1\T x=1\}, \end{equation} in order to stay close to the LP notation (normally row and column player are considered as first and second player, respectively, so that the letters for their mixed strategies should be in alphabetical order, but this is already violated with the very common naming of the LP variables $x\in\reals^n$ and $y\in\reals^m$). With mixed strategies $y$ and $x$ of row and column player, the expected payoff to the maximizing row player and expected cost to the minimizing column player is $y\T Ax$. The minimizing column player who chooses a mixed strategy $x$ should expect that the row player responds with a mixed strategy $y$ (called a \textit{best response}) that maximizes her payoff $y\T Ax$. That best-response payoff $\max_{y\in Y}y\T Ax$ is the weighted sum $\sum_{i\in[m]}y_i (Ax)_i$ of the expected payoffs $(Ax)_i$ for the rows~$i$ and therefore equal to their maximum, which in turn is the least upper bound $v$ of these row payoffs. That is, \begin{equation} \label{Axi} \max_{y\in Y}~y\T Ax~=~\max_{i\in [m]}~(Ax)_i~=~ \min\,\{v\in\reals\mid Ax\le \1v\,\}\,. \end{equation} A \textit{min-max} strategy $x$ of the column player minimizes this worst-case cost $v$ that he has to pay, that is, it is an optimal solution to \begin{equation} \label{lpv} \mini_{x,\,v}v\quad \subj A x\le \1v,\quad x\in X \end{equation} and then $v$ is called the \textit{min-max value} of the game. Similarly, a \textit{max-min} strategy $y$ and the \textit{max-min value} $u$ is an optimal solution to \begin{equation} \label{lpu} \maxi_{y,\,u}u\quad \subj y\T A\ge u\1\T,\quad y\in Y. \end{equation} The minimax theorem of von Neumann \cite{vN1928} states \begin{equation} \label{MM} \max_{y\in Y}~\min_{x\in X}~y\T Ax ~=~v ~=~ \min_{x\in X} ~\max_{y\in Y}~y\T Ax \end{equation} where the unique real number $v$ is called the \textit{value} of the game. Via (\ref{Axi}) and the corresponding expression for $\min_{x\in X} y\T Ax$ (the best-response cost to $y\in Y$), we state this as follows. \begin{theorem}[The minimax theorem] \label{t-mm} Consider optimal $x,v$ for $(\ref{lpv})$ and $y,u$ for $(\ref{lpu})$. Then $u=v$ (the value of the game), $x$ is a min-max strategy, and $y$ is a max-min strategy. \end{theorem} The LP (\ref{lpv}) is in \textit{general form} with an equation $\1\T x=1$ and an unconstrained variable~$v$ (with $-v$ to be maximized), and so is (\ref{lpu}), which is the dual LP to (\ref{lpv}) with $u$ as the unconstrained variable (with $-u$ to be minimized) that corresponds to the equation for $X$ written as $-\1\T x=-1$. Since both LPs are feasible, the strong duality theorem (which also holds for LPs in general form) implies that their optimal values are equal ($-v=-u$), which proves Theorem~\ref{t-mm}. One can avoid stating LPs in general form by ensuring that the min-max value is positive, by adding a constant $\alpha$ to the payoffs $a_{ij}$\,, which defines a new payoff matrix $A+\1\alpha\1\T$. Then for $y\in Y$ and $x\in X$ \begin{equation} \label{alpha} y\T (A+\1\alpha\1\T) x ~=~ y\T Ax+ y\T\1\alpha\1\T x ~=~ y\T Ax+\alpha\,, \end{equation} which shows that best responses and min-max and max-min strategies are unaffected and the corresponding values just shifted by~$\alpha$. If all entries of $A$ are positive, then $v>0$ for any feasible $v$ in (\ref{lpv}). Division of each row in (\ref{lpv}) by $v$ (where we now maximize $1/v$) then gives the LP \begin{equation} \label{lp1} \maxi_x \1\T x \quad\subj Ax\le\1,\quad x\ge\0 \end{equation} with its dual \begin{equation} \label{lp2} \mini_y y\T \1 \quad\subj y\T A\ge\1\T,\quad y\ge\0 \,. \end{equation} Both LPs are feasible with nonzero optimal solutions $x$ and $y$, which give the min-max and max-min strategies $xv$ and $yv$ with $v=1/\1\T x=1/\1\T y$ and game value~$v$. These are the standard ways to derive the minimax theorem from LP duality \cite[section~13-2]{Dantzig}. Section~\ref{s-mmlp} describes the classical converse approach, which we show to be incomplete. \section{The Lemma of Farkas and LP duality} \label{s-farkas} The standard way to prove the LP duality theorem uses the Lemma of Farkas \cite{farkas1902}, stated in (\ref{farkeq}) below, which characterizes when an inhomogeneous system $Ax=b$ of linear equations has no solution $x\ge\0$ in nonnegative variables. Two related theorems are (\ref{fark}) and (\ref{farkine}). The following proposition asserts how close they are, by using the respective matrix in different ways (we say ``proves'' rather than ``implies'' because it is not the same matrix). \begin{proposition} \label{p-farkas} Let $A\in\reals^{m\times n}$ and $b\in\reals^m$. Then each of the following three assertions proves the others: The Lemma of Farkas with equalities and nonnegative variables \begin{equation} \label{farkeq} \not \exists x\in\reals^n~:~Ax=b,~~ x\ge\0 \quad\Leftrightarrow\quad \exists y\in\reals^m~:~ y\T A\ge\0\T,~~y\T b< 0\,, \end{equation} the Lemma of Farkas with inequalities and nonnegative variables \begin{equation} \label{fark} \not \exists x\in\reals^n~:~Ax\le b,~~ x\ge\0 \quad\Leftrightarrow\quad \exists y\in\reals^m~:~ y\T A\ge\0\T,~~y\ge\0,~~y\T b< 0\,, \end{equation} and the Lemma of Farkas with inequalities and unconstrained variables \begin{equation} \label{farkine} \not \exists x\in\reals^n~:~Ax\le b \quad\Leftrightarrow\quad \exists y\in\reals^m~:~ y\T A=\0\T,~~y\ge\0,~~y\T b< 0\,. \end{equation} \end{proposition} \myproof In each of (\ref{farkeq}), (\ref{fark}), (\ref{farkine}) the direction ``$\Leftarrow$'' is immediate, for example in (\ref{farkeq}) because $y\T A\ge\0\T$ and $Ax=b$, $x\ge\0$ imply $y\T b=y\T Ax\ge0$ which contradicts $y\T b<0$\,. We therefore only consider ``$\Rightarrow$''. Condition (\ref{farkeq}) proves (\ref{fark}) by writing $Ax\le b$ as $Ax+s=b$, $s\ge\0$ for a vector of \textit{slack variables} $s\in\reals^m$, and then applying (\ref{farkeq}) to the matrix $[A~~I\,]$ instead of $A$, where $I$ is the $m\times m$ identity matrix. Conversely, if there is no solution $x\ge\0$ to $Ax=b$, that is, to $Ax\le b$ and $-Ax\le-b$, then by (\ref{fark}) there are nonnegative $y^+,y^-\in\reals^m$ with $(y^+)\T A-(y^-)\T A\ge\0\T$ and $(y^+)\T b-(y^-)\T b<0$. This shows (\ref{farkeq}) with $y=y^+-y^-$. Condition (\ref{farkine}) follows from (\ref{fark}) by writing $Ax\le b$ in (\ref{farkine}) as $Ax^+-Ax^-\le b$ with nonnegative $x^+$ and $x^-$. The converse holds by writing $Ax\le b$, $x\ge\0$ in (\ref{fark}) as $Ax\le b$, $-x\le\0$ in (\ref{farkine}). \endproof These versions of the Lemma of Farkas are ``theorems of the alternative'' in that exactly one of two conditions is true, as in (\ref{farkeq}): Either there is a solution $x$ to $Ax=b$, $x\ge\0$, or a solution $y$ to $y\T A\ge\0\T$, $y\T b<0$, but not to both. We always state such theorems so that ``$\Rightarrow$'' is the nontrivial direction. The following is standard (e.g., Gale \cite[p.~79]{Gale1960}), and similar arguments as used in the proof will be used repeatedly. \begin{proposition} \label{p-fa-lp} The inequality version $(\ref{fark})$ of the Lemma of Farkas proves LP duality. \end{proposition} \myproof Suppose that the primal LP (\ref{P}) has a feasible solution $\bar x$ and the dual LP (\ref{D}) has a feasible solution $\bar y$ and that, contrary to the claim of the LP duality theorem, there are no feasible $x$ and $y$ so that $c\T x=y\T b$. That is, the system of inequalities \begin{equation} \arraycolsep.2em \label{falp} \begin{array}{rcrcr} &&Ax&\le&b\\ -A\T y&&&\le&-c\\ b\T y&-&c\T x&\le&0 \end{array} \end{equation} has no solution $(y,x)\in\reals^m\times\reals^n$ with $y\ge\0$ and $x\ge\0$. Hence, by (\ref{fark}) (written transposed), there are nonnegative $(\hat y,\hat x,t)\in\reals^m\times\reals^n\times\reals$ such that \begin{equation} \label{viol} \arraycolsep.2em \begin{array}{rcrcrcl} &-&A\hat x&+&bt&\ge&\0\\ A\T\hat y&&&-&ct&\ge&\0\\ b\T\hat y&-&c\T\hat x&&&<&0\,. \end{array} \end{equation} If $t>0$ then $\hat x\frac1t$ and $\hat y\frac1t$ are feasible solutions to the primal (\ref{P}) and dual (\ref{D}) with $\frac1t\hat y\T b<c\T\hat x\frac1t$ in violation of weak duality (\ref{WD}). If $t=0$ then $A\hat x\le \0$ and $\hat y\T A\ge\0\T$. The last inequality in (\ref{viol}) implies that at least one of the inequalities $\hat y\T b<0$ or $0<c\T\hat x$ holds. Suppose the latter. For $\alpha\in\reals$ we have $A(\bar x+\hat x\alpha)\le b$ and $\bar x+\hat x\alpha\ge\0$, but $c\T(\bar x+\hat x\alpha)\to\infty$ as $\alpha\to\infty$, that is, the objective function of the primal LP is unbounded, contradicting its upper bound $\bar y\T b$ from the dual LP. Similarly, $\hat y\T b<0$ implies that the dual LP is unbounded and thus the primal LP infeasible, again a contradiction. This shows that (\ref{falp}) has a nonnegative solution $(y,x)$ with $y\T b\le c\T x$ and thus $y\T b=c\T x$ by weak duality, as claimed. \endproof The converse also holds, as well as a useful extension of LP duality. \begin{proposition} \label{p-lp-fa} The LP duality Theorem~\ref{t-mm} proves $(\ref{fark})$. Moreover, if the primal LP $(\ref{P})$ is infeasible and the dual LP $(\ref{D})$ is feasible, then the dual LP is unbounded. \end{proposition} \myproof Suppose there is no $x\ge\0$ with $Ax\le b$. Then the LP (with a new scalar variable~$t$) \begin{equation} \label{faP} \maxi_{x,t} -t \quad \subj Ax-\1t\le b,\quad x\ge\0,~t\ge0 \end{equation} (which is feasible by choosing $t\ge -b_i$ for all $i\in[m]$ and $x=\0$) has an optimum solution with $t>0$. The dual LP to (\ref{faP}) states \begin{equation} \label{faD} \mini_{y} y\T b \quad \subj y\T A\ge \0\T,~~-y\T\1\ge-1,~~ y\ge \0, \end{equation} is feasible with $y=\0$, and therefore has an optimal solution $y\ge\0$ with equal objective function value to the primal, that is, $y\T b=-t<0$. This shows (\ref{fark}). To prove the second part, suppose $\bar y\T A\T \ge c\T$ for some $\bar y\ge\0$. Then with the preceding $y\ge\0$ such that $y\T b<0$ we have $(\bar y\T+\alpha y\T)A\ge c\T$ and $\bar y+y\alpha\ge \0$ and $(\bar y\T+\alpha y\T)b\to-\infty$ as $\alpha\to\infty$. \endproof \section{The theorems of Gordan and Ville} \label{s-govi} The Lemma of Farkas with equalities (\ref{farkeq}) characterizes when the inhomogeneous linear equations $Ax=b$ have no solution $x\ge\0$ in nonnegative variables. The following Theorem (\ref{gordan}) of Gordan \cite{gordan1873} for homogeneous equations characterizes when the system $Ax=\0$ has no nontrivial solution $x\ge\0$. Its ``inequality version'' (\ref{ville}) is known as the Theorem of Ville \cite{ville1938}. Ville's Theorem essentially states the minimax theorem for a game with positive value. To prove the minimax theorem from Ville's Theorem, the game should have its value normalized to zero. A common way to achieve this is to symmetrize the game \cite{GKT}. Instead, we shift the payoffs as in (\ref{alpha}) so that the max-min value is zero. Note that the min-max and max-min values in (\ref{lpv}) and (\ref{lpu}) exist without having to assume LP duality. \begin{proposition} \label{p-govi} Let $A\in\reals^{m\times n}$. Then the following Theorem $(\ref{gordan})$ of Gordan proves the Theorem $(\ref{ville})$ of Ville and vice versa, and $(\ref{ville})$ proves the minimax theorem and vice versa: \begin{eqnarray} \not \exists x\in\reals^n~:~Ax=\0,~~ x\ge\0,~~ x\ne\0 ~~&\Leftrightarrow&~~ \exists y\in\reals^m~:~ y\T A>\0\T\,, \label{gordan} \\ \not \exists x\in\reals^n~:~Ax\le\0,~~ x\ge\0,~~ x\ne\0 ~~&\Leftrightarrow&~~ \exists y\in\reals^m~:~ y\T A>\0\T,~~y\ge\0\,. \label{ville} \end{eqnarray} \end{proposition} \myproof Assume (\ref{gordan}) holds. We prove (\ref{ville}). Suppose there is no $x\in\reals^n$ with $Ax\le\0$, $x\ge\0$, $x\ne\0$. Then there is no $x\in\reals^n$ and $s\in\reals^m$ with $Ax+s=\0$ and $x\ge\0$, $s\ge\0$, and $(x,s)\ne(\0,\0)$ (this clearly holds if $x\ne\0$, and if $x=\0$ then $s=\0$). Hence, by (\ref{gordan}), there is some $y\in\reals^m$ with $y\T A>\0\T$ and $y>\0$ and thus $y\ge\0$. This shows the nontrivial direction ``$\Rightarrow$'' in (\ref{ville}). Conversely, suppose there is no $x\ge\0$, $x\ne\0$ with $Ax=\0$ and hence no $x\ge\0$, $x\ne\0$ with $Ax\le\0$ and $-Ax\le\0$. Then by (\ref{ville}) there exist $y^+\ge\0$ and $y^-\ge\0$ with $(y^+)\T A+(y^-)\T(-A)>\0\T$, that is, $(y^+-y^-)\T A>\0\T$, which shows (\ref{gordan}) with $y=y^+-y^-$. Assume the minimax Theorem~\ref{t-mm} holds for the game matrix $A$. The left-hand side of (\ref{ville}) states that the value $v$ of the game is positive, because otherwise there would be a mixed strategy $x\in X$ with nonpositive min-max value $v$ in (\ref{lpv}). With the optimal $y\in Y$ and $u>0$ in (\ref{lpu}) we have $y\T A\ge u\1\T>\0\T$ as asserted in (\ref{ville}). Conversely, assume (\ref{ville}) and consider a game matrix $A$. Let $u$ be its max-min value and $y\in Y$ be a max-min strategy as in (\ref{lpu}). Let $A'=A-\1u\1\T$. Then $y\T A'=y\T A-u\1\T\ge\0\T$. We claim that $A'x\le\0$ for some $x\in X$. If not then there is no $x\ge\0$, $x\ne\0$ with $A'x\le\0$ (otherwise scale $x$ so that $x\in X$), and therefore by (\ref{ville}) we have $y\T A'>\0\T$ for some $y\ge\0$. Because $y\ne\0$, we can scale $y$ such that $y\in Y$ and choose $\eps>0$ such that $y\T A'\ge \eps\1\T$ and hence $y\T A\ge(u+\eps)\1\T$, which contradicts the maximality of $u$ in (\ref{lpu}). Hence, there is $x\in X$ with $A'x\le\0$, so $A'$ has min-max value zero and therefore $A$ has min-max value~$u$, which proves the minimax theorem. \endproof \section{The theorems of Stiemke and Loomis} \label{s-sti} This section is about two proofs of the minimax theorem, for example in order to use it for proving LP duality. For historical interest, we first reproduce a short proof of Gordan's Theorem (\ref{gordan}) by Stiemke \cite{stiemke1915}. In modern language, it uses the property that the null space and row space of a matrix are orthogonal complements, as stated in (\ref{RS}) below. We state this property as the following ``theorem of the alternative'' about the solvability of linear equations without nonnegativity constraints, which is well known (e.g., \cite{kuhn1956}). We also use this lemma in Section~\ref{s-minfeas} for a short proof of the Lemma of Farkas. \begin{lemma} \label{l-alt} Let $A\in\reals^{m\times n}$ and $b\in\reals^m$. Then \begin{equation} \label{linalt} \not \exists x\in\reals^n~:~Ax=b \quad\Leftrightarrow\quad \exists y\in\reals^m~:~ y\T A=\0\T,~~y\T b\ne 0\,. \end{equation} \end{lemma} \myproof We show the nontrivial direction ``$\Rightarrow$''. Assume that $b$ is not a linear combination of the columns $A_1,\ldots,A_n$ of $A$. Let $k$ be the column rank of $A$ and $\{A_j\}_{j\in K}$ be a basis of the column space of $A$, with $|K|=k\ge0$, and let $A_K$ be the matrix of these columns. By assumption, the $m\times (k+1)$ matrix $[A_K~b]$ has rank $k+1$, which is also its row rank. Its rows span therefore all of $\reals^{1\times(k+1)}$, in particular the vector $(\0\T,1)$, that is, $y\T A_K=\0\T$ and $y\T b=1$ for some $y\in\reals^m$. Any other column $A_j$ of $A$ for $j\not\in K$ is a linear combination of the basis columns, $A_j=A_Kz^{(j)}$ for some $z^{(j)}\in\reals^k$, which implies $y\T A_j=y\T A_Kz^{(j)}=0$. This shows that overall $y\T A=\0\T$ and $y\T b\ne0$, as required. \endproof \begin{theorem}[Stiemke \cite{stiemke1915}] \label{t-stiemke} Let $A\in\reals^{m\times n}$. Then \begin{equation} \label{stiemke} \not \exists y\in\reals^m~:~ y\T A\ge\0\T,~~y\T A\ne\0 \quad\Leftrightarrow\quad \exists x\in\reals^n~:~Ax=\0\,,~~ x>\0\,. \end{equation} \end{theorem} \myproof Define \begin{equation} \label{space} \begin{array}{lcl} \RS(A)&=&\{y\T A\mid y\in\reals^m\}\,,\\ \NS(A)&=&\{x\in\reals^n\mid Ax=\0\}\,.\\ \end{array} \end{equation} We have for $c\in\reals^n$ \begin{equation} \label{RS} c\T\in\RS(A) \quad\Leftrightarrow\quad \forall x\in\NS(A)~:~c\T x=0 \end{equation} because this is equivalent to \begin{equation} \label{RSt} \exists y~:~y\T A=c\T \quad\Leftrightarrow\quad \not\exists x~:~ Ax=\0\,,~~c\T x\ne0\,, \end{equation} which (with both sides negated) is the transposed version of (\ref{linalt}). The nontrivial direction in (\ref{stiemke}) is ``$\Rightarrow$''. It states: Suppose $\0\T$ is the only nonnegative vector in $\RS(A)$. Then there is some $x\in\NS(A)$ with $x>\0$. We show this by induction on $n$. If $n=1$ then the single column of $A$ is $\0$, and we can choose $x=1$. Let $n>1$ and suppose the claim is true for $n-1$. Case 1. There is some $a\in\reals^{n-1}$, $a\ge\0$, $a\ne\0$ so that $(1,-a\T)\in\RS(A)$. Consider a set of row vectors $(1,-a\T)$, $(0,a_2\T)$, \ldots, $(0,a_m\T)$ that span $\RS(A)$ (easily obtained from the rows of~$A$). There is no $w\in\reals^{m-1}$ such that $c\T=\sum_{i=2}^m w_{i-1}\,a_i\T$ is nonnegative and nonzero, because otherwise $(0,c\T)$ is in $\RS(A)$ and nonnegative and nonzero. Hence, by inductive hypothesis, there is some $z\in\reals^{m-1}$, $z>\0$, such that $a_i\T z=0$ for $2\le i\le m$. Then $x_1=a\T z>0$, and $x=\twovec{x_1}{z}\in\NS(A)$ by (\ref{RS}) because $(1,-a\T)x=0$ and $(0,a_i\T)x=0$ for $2\le i\le m$, and $x>\0$. Case 2. Otherwise, consider any $y\in\reals^m$ and let $(c_1,c\T)=y\T A$ with $c\in\reals^{m-1}$. Then $c\ge\0$ implies $c=\0$, which holds by assumption if $c_1\ge0$, and if $c_1<0$ and $c\ge\0$, $c\ne\0$ then $(1,\frac{1}{c_1}c\T)\in\RS(A)$ and Case~1 applies. By inductive hypothesis, there is some $z\in\reals^{m-1}$, $z>\0$, such that $A\twovec{0}{z}=\0$. If $x_1=0$ for all $x\in\NS(A)$ then by (\ref{RS}) we have $(1,0,\ldots,0)\in\RS(A)$ contrary to assumption. So there is some $x'\in\NS(A)$ with $x'_1>0$, and therefore $x=x'\eps+\twovec{0}{z}>\0$ for sufficiently small $\eps>0$, where $Ax=\0$. This completes the induction. \endproof The preceding theorem is statement I of Stiemke \cite{stiemke1915}, and Gordan's Theorem~(\ref{gordan}) is statement~II. \begin{proposition} \label{p-stigo} Stiemke's Theorem~\ref{t-stiemke} proves Gordan's Theorem $(\ref{gordan})$. \end{proposition} \myproof Let $A\in\reals^{m\times n}$. Let $\{b_1,\ldots,b_k\}$ with $k\ge1$ be a spanning set of $\NS(A)$ and $B=[b_1\cdots b_k]$. Then for $b$ and $c$ in $\reals^n$ \begin{equation} \label{cNS} b\in\NS(A) \quad\Leftrightarrow\quad b\T \in\RS(B\T) \end{equation} and, using (\ref{RS}), \begin{equation} \label{cRS} \begin{array}{ll} & c\T \in\RS(A) \\ \Leftrightarrow~~ & \forall x\in\NS(A)~:~c\T x=0 \\ \Leftrightarrow & c\T b_i=0\qquad (1\le i\le k) \\ \Leftrightarrow & c\T B=\0\T \\ \Leftrightarrow & c\in\NS(B\T)\,. \end{array} \end{equation} Stiemke's Theorem (\ref{stiemke}) applied to $B\T$ instead of $A$ states \begin{equation} \label{sBT} \not\exists \,b\T\in\RS(B\T)\,,~~b\ge\0\,,~~b\ne\0 \quad\Leftrightarrow\quad \exists \,c\in\NS(B\T)~:~ c>\0 \end{equation} which by (\ref{cNS}) and (\ref{cRS}) is equivalent to \begin{equation} \label{G1} \not\exists \,b\in\NS(A)\,,~~b\ge\0\,,~~b\ne\0 \quad\Leftrightarrow\quad \exists \,c\T\in\RS(A)~:~ c>\0 \end{equation} which is Gordan's Theorem (\ref{gordan}). \endproof Via Propositions~\ref{p-govi} and~\ref{p-stigo}, Stiemke's Theorem~\ref{t-stiemke} therefore proves the minimax theorem. Using symmetric games, this was also shown by Gale, Kuhn, and Tucker \cite{GKT}. Our favorite proof of the minimax theorem is based on the following theorem. \begin{theorem}[Loomis \cite{loomis1946}] \label{t-loomis} Let $A$ and $B$ be two $m\times n$ matrices with $B>0$. Then there exist $x\in X$, $y\in Y$, and $v\in\reals$ such that $Ax\le Bxv$ and $y\T A\ge v y\T B$. \end{theorem} The case $B=\1\,\1\T$ gives the minimax theorem. Conversely, the minimax theorem proves Theorem~\ref{t-loomis} \cite[p.~19]{LRS}: Because $B>0$, the value of the game $A-\alpha B$ is negative for sufficiently large~$\alpha$, positive for sufficiently negative~$\alpha$, is a continuous function of~$\alpha$, and therefore zero for some $\alpha$, which then gives Theorem~\ref{t-loomis} with $v=\alpha$. The following is the proof by Loomis \cite{loomis1946} of Theorem~\ref{t-loomis} specialized to the minimax theorem. It is an induction proof about the min-max value $v$ and max-min value $u$ (which exist, irrespective of LP duality). It is easy to remember: If the players have optimal strategies that equalize $v$ and $u$ for all rows and columns, then $u=v$. Otherwise (if needed by exchanging the players), there is at least one row with lower payoff than $v$, which will \textit{anyhow} not be chosen by the row player. By omitting this row from the game, the minimax theorem holds (using a bit of convexity and continuity) by the inductive hypothesis. \myproofof{Theorem~\ref{t-mm}} Consider optimal solutions $v,x$ to $(\ref{lpv})$ and $u,y$ to $(\ref{lpu})$, where \begin{equation} \label{weak} u=u\1\T x\le y\T A x\le y\T \1 v = v. \end{equation} We prove $u=v$ by induction on $m+n$. It holds trivially for $m+n=2$. If all inequalities in (\ref{weak}) hold as equalities, then $u=v$. Hence, assume that at least one inequality is strict, say $(Ax)_k<v$ for some row $k\in[m]$ (the case for a column is similar). Let $\bar A$ be the matrix $A$ with the $k$th row deleted. By induction hypothesis, $\bar A$ has game value $\bar v$ with $\bar A\bar x\le\1\bar v$ for some $\bar x\in X$, where it is easy to see that \begin{equation} \label{shrink} \bar v\le v,\qquad\bar v\le u \end{equation} because compared to~$A$ the game $\bar A$ strengthens the minimizing column player. We claim that $\bar v=v$. Namely, if $\bar v<v$, let $0<\eps\le 1$ and consider the strategy $x(\eps)=x(1-\eps)+\bar x\eps$ where $x\in X$ because $X$ is convex. Then \begin{equation} \label{eps} \bar A x(\eps)~=~\bar A (x(1-\eps)+\bar x\eps) \le \1 v(1-\eps)+\1\bar v\eps =\1(v-\eps(v-\bar v)) <\1v\,. \end{equation} For the missing row $k$ of $A$ where $(Ax)_k<v$ we have for sufficiently small~$\eps$ \begin{equation} \label{Ak} (Ax(\eps))_k=(Ax)_k(1-\eps)+(A\bar x)_k\eps<v\,. \end{equation} Hence, $Ax(\eps)<\1 v$ for some $x(\eps)\in X$, in contradiction to the minimality of $v$ in (\ref{lpv}). This shows $v=\bar v$, and, by (\ref{shrink}), $\bar v\le u\le v=\bar v$ and therefore $u=v$. This completes the induction. \endproof The proof by Loomis \cite{loomis1946} has been noted (in particular by von Neumann and Morgenstern \cite[p.~vi]{vNM}) but is not widely known, and should be a standard textbook proof (as in \cite[p.~216]{vS22}). (A better title of Loomis's paper would have been ``An elementary proof of the minimax theorem'', given that Theorem~\ref{t-loomis} is not substantially more general.) It was, in essence, re-discovered by Owen \cite{owen1967}. However, Owen needlessly manipulates the max-min strategy~$y$; the proof by Loomis is more transparent. Owen's proof is discussed further by Binmore \cite{binmore2004}. The research in this paper originated with an attempt to extend the induction proof by Loomis to a direct proof of LP duality, via the existence of a strictly complementary pair of optimal strategies in a zero-sum game, applied to Dantzig's game in (\ref{B}) below. This existence seems to be difficult to prove within this induction. For example, the game $\left[\begin{matrix}1 & 2 & 0 \\ 1 & 0 & 2 \end{matrix}\right]$ has a max-min and min-max strategy where every pure best response is played with positive probability (such as both players mixing uniformly), but also the left column as a pure min-max strategy. However, omitting the unplayed second or third column in an induction would alter the game substantially, because then a strictly complementary pair has the first column as a unique min-max strategy, with a positive slack in the column that was not omitted. \section{The minimax theorem and LP duality} \label{s-mmlp} The following theorem assumes the minimax theorem. \begin{theorem}[Dantzig \cite{dantzig1951}] \label{t-51} Let $A\in\reals^{m\times n}$, $b\in\reals^m$, $c\in\reals^n$. Consider the zero-sum game with the payoff matrix $B$ (with $k=m+n+1$ rows and columns) defined by \begin{equation} \label{B} B=\left[\begin{matrix} 0&A&-b\\ -A\T&0&c\\ b\T & -c\T & 0\\ \end{matrix}\right]. \end{equation} Then $B$ has value zero, with a min-max strategy $z=(y,x,t)\in \reals^m \times\reals^n \times\reals$ that is also a max-min strategy, with $Bz\le\0$. If $z_k=t>0$ then $x\frac1t$ is an optimal solution to the primal LP $(\ref{P})$ and $y\frac1t$ is an optimal solution to dual LP $(\ref{D})$. If $(Bz)_k<0$ then $t=0$ and at least one of the LPs $(\ref{P})$ or $(\ref{D})$ is infeasible. \end{theorem} \myproof Because $B=-B\T$, this game is symmetric and its game value $v$ is zero. Let $z=(y,x,t)$. Then $Bz\le\0$ states $Ax-bt\le\0$, $-A\T y+ct\le\0$, and $b\T y-c\T x\le0$. If $t>0$ then $x\frac1t$ and $y\frac1t$ are primal and dual feasible with $b\T y\frac1t\le c\T x\frac1t$ and therefore optimal. If $(Bz)_k<0$, that is, $b\T y-c\T x<0$, then $t>0$ would violate weak duality, so $t=0$. Moreover, $Ax\le\0$ and $y\T A\ge\0\T$, and $y\T b<0$ or $0<c\T x$. As shown following (\ref{viol}), this implies infeasibility of at least one of the LPs (\ref{P}) or (\ref{D}). \endproof Hence, Theorem~\ref{t-51} seems to show that the minimax theorem proves LP duality. The known ``hole'' in this argument is that it is does not cover the case of a min-max strategy $z$ where $z_k=0$ and $(Bz)_k=0$, which is therefore uninformative, as noted by Dantzig \cite[p.~291]{Dantzig}. Luce and Raiffa \cite[p.~421]{luceraiffa} claim without proof (or forgot a reference, e.g.\ to corollary 3A in their cited work \citep{goldmantucker1956}) that if $(Bz)_k=0$ for all min-max strategies $z$, then $\bar z_k>0$ for some max-min strategy $\bar z$. Because $B$ is skew-symmetric ($B=-B\T$), this would solve the problem with $\bar z$ as a min-max strategy. We will show that this assumption is essentially the Lemma of Tucker \cite[p.~5]{tucker1956} for the case of a skew-symmetric matrix. Already for the special case of $B$ in (\ref{B}), this proves the Lemma of Farkas (\ref{fark}) (see also \cite[theorem~1.1]{broyden2001}), and this defeats the purpose of proving LP duality from the minimax theorem. \begin{proposition} \label{p-Bfa} Consider $B$ in $(\ref{B})$ with $c=\0$, and suppose that there is always some $z\ge\0$ with $Bz\le\0$ and $z_k-(Bz)_k>0$. Then this proves $(\ref{fark})$. \end{proposition} \myproof Let $z=(y,x,t)$ as described, where $Ax-bt\le\0$ and $-A\T y\le\0$ and $b\T y\le0$ because $Bz\le\0$, and $z_k-(Bz)_k=t-b\T y>0$. Then if $t>0$ we have $Ax\frac1t\le b$, and if $t=0$ then $y\T A\ge\0\T$ and $y\T b<0$, which proves (\ref{fark}). \endproof The Lemma of Tucker comes in several variants. \begin{proposition} Let $A\in\reals^{m\times n}$. Then the following Lemma of Tucker \label{p-tu} \begin{equation} \label{ltuck} \exists y\in\reals^m, ~x\in\reals^n~:~ y\T A\ge\0\T,\quad x\ge\0,\quad Ax=\0,\quad x_n+(y\T A)_n>0 \end{equation} proves the following inequality version and vice versa: \begin{equation} \label{ituck} \exists y\in\reals^m, ~x\in\reals^n~:~ y\ge\0,~~ y\T A\ge\0\T,~~ x\ge\0,~~ Ax\le\0,~~ x_n+(y\T A)_n>0\,, \end{equation} and similarly its version for a skew-symmetric matrix $B\in\reals^{k\times k}$, that is, $B=-B\T$: \begin{equation} \label{stuck} \exists z\in\reals^k~:~ z\ge\0,\quad Bz\le\0,\quad z_k-(Bz)_k>0\,. \end{equation} \end{proposition} \myproof Applying (\ref{ltuck}) to the matrix $[\,I~~A]$ with the identity matrix $I$ gives (\ref{ituck}). For the converse, write $Ax=\0$ as $Ax\le\0$\,, $-Ax\le\0$\,. Condition (\ref{stuck}) follows from (\ref{ituck}) with $A=B$ and $z=x+y$ because $-Bz=z\T B$ and $y_n\ge0$ and $(x\T B)_n\ge0$. For the converse, use $B=\left[\begin{matrix} 0&A\\ -A\T&0\\ \end{matrix}\right]$ and $z=\twovec yx$. \endproof Tucker \cite[p.~7]{tucker1956} used (\ref{ltuck}) to prove the Lemma of Farkas in its version (\ref{farkeq}). Less known, but similarly easy, is that the converse holds as well. \begin{proposition} \label{p-tufa} The Lemma of Farkas $(\ref{farkeq})$ proves Tucker's Lemma $(\ref{ltuck})$. \end{proposition} \myproof Let $A=[A_1\cdots A_n]\in\reals^{m\times n}$. By (\ref{farkeq}), either $\sum_{j=1}^{n-1}A_jz_j=-A_n$ for some $z\in\reals^{n-1}$ with $z\ge\0$, in which case let $x=\twovec{z}{1}$ and $y=\0$, or otherwise $y\T A_j\ge0$ for $1\le j<n$ and $y\T(-A_n)<0$ for some $y\in\reals^m$, in which case let $x=\0$. In both cases we have $Ax=\0$ and $x_n+y\T A_n>0$, and (\ref{ltuck}) holds. \endproof In the next section, we show a proper way of proving LP duality from the minimax theorem. \section{Proving Tucker's Theorem from Gordan's Theorem} \label{s-gotu} In Tucker's Lemma (\ref{ltuck}), the last ($n$th) column of the matrix $A$ plays a special role, which can be taken by any other column. This proves the following stronger version (\ref{tuck}) known as the \textit{Theorem} of Tucker \cite[p.~8]{tucker1956}. \begin{proposition} \label{p-tucker} Let $A\in\reals^{m\times n}$. Tucker's Lemma $(\ref{ltuck})$ proves Tucker's Theorem \begin{equation} \label{tuck} \exists y\in\reals^m, ~x\in\reals^n~:~ y\T A\ge\0\T,\quad x\ge\0,\quad Ax=\0,\quad x\T+y\T A>\0\T. \end{equation} \end{proposition} \myproof Let $j\in[n]$. By applying (\ref{ltuck}) to the $j$th column of $A$ with $j$ instead of $n$, choose $y^{(j)}\in\reals^m$ and $x^{(j)}\in\reals^n$ such that \begin{equation} \label{jtuck} (y^{(j)})\T A\ge\0\T,\quad x^{(j)}\ge\0,\quad Ax^{(j)}=\0,\quad x^{(j)}_j+((y^{(j)})\T A)_j>0\,. \end{equation} Then $y=\sum_{j\in[n]}y^{(j)}$ and $x=\sum_{j\in[n]}x^{(j)}$ fulfill (\ref{tuck}). \endproof Tucker's Theorem (\ref{tuck}) is a very versatile theorem that proves a number of theorems of the alternative (see \cite{tucker1956}), for example immediately Gordan's Theorem $(\ref{gordan})$ or Stiemke's Theorem~\ref{t-stiemke}. The main Theorem~\ref{t-gotu} of this section shows that Gordan's Theorem (\ref{gordan}) proves Tucker's Theorem (\ref{tuck}). It is based on the following observation. If $Ax=\0$ and $x\ge\0$, then any $y$ with $y\T A\ge\0\T$ has the property that if $x_j>0$ then $(y\T A)_j=0$ because otherwise $0=y\T Ax=\sum_{j\in[n]}(y\T A)_jx_j>0$\,. Hence, (\ref{tuck}) implies that the \textit{support} \begin{equation} \label{supp} S=\supp(x)=\{j\in[n]\mid x_j>0\} \end{equation} of $x$ is unique. The main idea is that the nonnegativity constraints for the variables $x_j$ for $j\in S$ can be dropped and these variables therefore be eliminated, which allows applying Gordan's Theorem to the remaining variables. The following proof is distilled from the more complicated computational approach of Adler \cite[section~4]{adler2013}. \begin{theorem} \label{t-gotu} Gordan's Theorem $(\ref{gordan})$ proves Tucker's Theorem~$(\ref{tuck})$. \end{theorem} \myproof Let $A=[A_1\cdots A_n]$. For any $S\subseteq[n]$ and $J=[n]-S$ write $A=[A_J~A_S]$ and $x=(x_J,x_S)$ for $x\in\reals^n$. If $Ax=\0$, $x\ge\0$, $Ax'=\0$, $x'\ge\0$, then $A(x+x')=\0$, $x+x'\ge\0$, and $\supp(x+x')=\supp(x)\cup\supp(x')$. Choose $S$ as the inclusion-maximal support of any $x\ge\0$ such that $Ax=\0$. Then any $y$ with $y\T A\ge\0\T$ fulfills $y\T A_S=\0\T$ (because otherwise $y\T Ax=y\T A_Sx_S>0$). On the other hand, (\ref{tuck}) states $x_j+y\T A_j>0$ for all $j\in[n]$, which requires $y\T A_j>0$ for $j\in J=[n]-S$. We now show that there indeed exist $y\in\reals^m$ and $x=(\0,x_S)$ such that \begin{equation} \label{tucker} y\T A_J>\0\T,~~ y\T A_S=\0\T,~~ Ax=A_Sx_S=\0,~~ x_S>\0\,, \end{equation} which implies (\ref{tuck}). Consider some $\tilde x\ge\0$ with maximum support $S=\supp(\tilde x)$ such that $A\tilde x=\0$, that is, $\tilde x_S>\0$. If $S=[n]$ we are done. Let $k$ be the rank of~$A_S$. Suppose $k=m$. We claim that then $S=[n]$, which implies (\ref{tuck}) with $y=\0$. Namely, if $j\in [n]-S$, then $A_j=A_S\hat x_S$ for some $\hat x_S$ because $A_S$ has full rank, and therefore $A_j+A_S(\tilde x_S\alpha-\hat x_S)=\0$ where $\tilde x_S\alpha-\hat x_S>\0$ for sufficiently large $\alpha$, which gives a solution $x\ge\0$ to $Ax=\0$ with $\supp(x)=\{j\}\cup S$ in contradiction to the maximality of~$S$. Hence, let $k<m$. In order to apply Gordan's Theorem~(\ref{gordan}), we eliminate the variables $x_S$ from the system $Ax=A_Jx_J+A_Sx_S=\0$ by replacing it with an equivalent system $CAx=0$ with a suitable invertible $m\times m$ matrix~$C$. Let $a_{iS}$ be the $i$th row of $A_S$ for $i\in[m]$. Suppose for simplicity that the last $k$ rows of $A_S$ are linearly independent and define the matrix $F$, and that for $i=1,\ldots,m-k$ we have $a_{iS}=z^{(i)}F$ for some row vector $z^{(i)}$ in $\reals^{1\times k}$. Then the $m\times m$ matrix \begin{equation} \label{CC} C=\left[\begin{matrix}\, 1&\cdots&0&&\kern-2ex-z^{(1)} \\ &\ddots&&&\vdots\\ 0&\cdots&1&&\kern-1ex-z^{(m-k)}\kern-1ex\\ 0&\cdots&0&1&\cdots&0\\ &\vdots&&&\ddots\\ 0&\cdots&0&0&\cdots&1\\ \end{matrix}\, \right] \end{equation} is clearly invertible, and any solution $(x_J,x_S)$ to $A_Jx_J+A_Sx_S=\0$ is a solution to \begin{equation} \label{CA} CA_Jx_J+CA_Sx_S=\0 \end{equation} and vice versa, with \begin{equation} \label{DEF} CA_J=\left[\begin{matrix}D\\E\end{matrix}\right], \quad CA_S=\left[\begin{matrix}0\\F\end{matrix}\right] \end{equation} where $D\in\reals^{(m-k)\times|J|}$, $E\in\reals^{k\times|J|}$, and $F\in\reals^{k\times|S|}$. Suppose there is some $x_J\in\reals^{|J|}$ with \begin{equation} \label{Go} Dx_J=\0\,,\quad x_J\ge\0,~~ x_J\ne\0\,. \end{equation} Because $F$ has rank~$k$ there exists $x_S$ so that $Fx_S=-Ex_J$. Then $Ex_J+Fx_S=\0$ and hence $CA_Jx_J+CA_Sx_S=\0$ and thus $A_Jx_J+A_Sx_S=\0$. With $x(\alpha)=(x_J,x_S+\tilde x_S\alpha)$ we have $Ax(\alpha)=\0$ (because $A_S\tilde x_S=\0$) and $x(\alpha)\ge\0$ for $\alpha\to\infty$, where $x(\alpha)$ has larger support that $S$, but $S$ was maximal. Hence, there is no $x_J$ so that (\ref{Go}) holds. By Gordan's Theorem~(\ref{gordan}), there is some $w\in\reals^{m-k}$ with $w\T D>\0\T$, that is, \[ (w\T,\0\T)\left[\begin{matrix}D\\E\end{matrix}\right]>\0\T, \quad (w\T,\0\T)\left[\begin{matrix}0\\F\end{matrix}\right]=\0\T. \] With $y\T=(w\T,\0\T)C$ and (\ref{DEF}), this implies (\ref{tucker}) with $x=\tilde x$, as claimed. \endproof Because the minimax theorem proves Gordan's Theorem (see Proposition~\ref{p-govi}), it proves Tucker's Theorem~(\ref{tuck}) and Tucker's Lemma (\ref{ltuck}) and the Lemma of Farkas and therefore LP duality. Instead of the minimax theorem we can by Proposition~\ref{p-stigo} use Stiemke's Theorem~\ref{t-stiemke} to prove Gordan's Theorem~(\ref{gordan}). The short proof by Tucker \cite[p.~5--7]{tucker1956} of his Lemma (\ref{ltuck}) has some structural similarities to Stiemke's proof but uses more explicit computations. We conclude this section to show how Tucker's Theorem proves, as one of its main applications \cite[theorem~6]{tucker1956}, the condition of \textit{strict complementarity} in linear programming. For the LP (\ref{P}) and its dual LP (\ref{D}), a feasible pair $x,y$ of solutions is optimal if and only if we have equality in (\ref{WD}), that is, $c\T x=y\T Ax= y\T b$, which means \begin{equation} \label{cs} y\T(b-Ax)=0\,,\qquad(y\T A-c\T)x=0\,. \end{equation} This orthogonality of the nonnegative vectors $y$ and $b-Ax$, and of $y\T A-c\T$ and~$x$, means that they are complementary in the sense that in each component \textit{at least one} of them is zero: \begin{equation} \label{csij} y_i(b-Ax)_i=0 \quad(i\in[m]), \qquad (y\T A-c\T)_j\,x_j=0 \quad(j\in[n]), \end{equation} also called ``complementary slackness''. The following theorem asserts \textit{strict} complementarity, namely that if (\ref{P}) and (\ref{D}) are feasible, then they have feasible solutions $x$ and $y$ where \textit{exactly one} of each component in (\ref{csij}) is zero. \begin{proposition} \label{p-strict} If the LPs $(\ref{P})$ and $(\ref{D})$ are feasible, then they have optimal solutions $x$ and $y$ such that $(\ref{cs})$ holds and \begin{equation} \label{strict} y+(b-Ax)>\0,\qquad x\T+(y\T A -c\T)>\0\T. \end{equation} \end{proposition} \myproof Optimality of $x$ and $y$ means $c\T x=y\T b$ and therefore (\ref{cs}). Similar to Proposition~\ref{p-tu} and (\ref{stuck}), Tucker's Theorem (\ref{tuck}) proves that for a skew-symmetric matrix $B$ there is some $z$ such that \begin{equation} \label{sT} z\ge\0\,,\quad Bz\le\0\,,\quad z-Bz>\0\,. \end{equation} Applied to the game matrix $B$ in (\ref{B}), because the LPs are feasible, this gives a solution $z=(y',x',t')$ with $t'>0$, where $y=y'\frac1t$ and $x=x'\frac1t$ fulfill (\ref{strict}). \endproof The proof of Proposition~\ref{p-strict} demonstrates a very good use of Dantzig's game $B$ in (\ref{B}). Geometrically, the LP solutions $x$ and $y$ are then in the relative interior of the set of optimal solutions. Unless this set is a singleton, $x$ and $y$ are not unique, but their supports $\supp(x)$ and $\supp(y)$ are unique, shown similarly to the initial argument in the proof of Theorem~\ref{t-gotu}. \section{Extending Dantzig's game} \label{s-M} In this section, we give a longer but more constructive proof of LP duality from the minimax theorem. We present a natural extension of Dantzig's game $B$ in (\ref{B}) by adding an extra row to~$B$, giving the game $B_M$ in (\ref{BM}) below. The aim is to ``enforce'' the last column of $B$ to be played with positive probability~$t$ if that is possible. Any max-min strategy for $B_M$ gives not only information about solutions to the LPs (\ref{P}) and (\ref{D}) if both are feasible, but also a certificate in (\ref{witn}) if not. \begin{theorem} \label{t-BM} There is some $M\in \reals$ with the following properties: If both the primal LP $(\ref{P})$ and its dual $(\ref{D})$ are feasible, then they also have respective feasible solutions $x$ and $y$ with $\1\T x+\1\T y+1\le M$. Moreover, consider the zero-sum game \begin{equation} \label{BM} B_M=\left[\begin{matrix} 0&A&-b\\ -A\T&0&c\\ b\T & -c\T & 0\\ \1\T & \1\T & -M \\ \end{matrix}\right] \end{equation} with value $v$. Then $v\ge0$, and \rmitem{(a)} $v=0$ with min-max strategy $(y,x,t)$ and max-min strategy $(y,x,t,0)$ for $B_M$ if and only if $(\ref{P})$ and $(\ref{D})$ are feasible, in which case $x\frac1t$ is optimal for $(\ref{P})$ and $y\frac1t$ is optimal for $(\ref{D})$. \rmitem{(b)} If $v>0$ with max-min strategy $(y,x,r,s)$ for $B_M$, then $r=0$, $s=v$, and \begin{equation} \label{witn} A x\le\0,\quad x\ge\0, \quad A\T y\ge\0,\quad y\ge\0, \quad b\T y-c\T x<0\,, \end{equation} which proves that $(\ref{P})$ or $(\ref{D})$ is infeasible. Moreover, $v<1$, and the smallest number $w$ such that \begin{equation} \label{Aw} A\bar x\le b+\1 w, \quad \bar x\ge\0, \quad -A\T \bar y\le-c+\1 w, \quad \bar y\ge\0 \end{equation} has feasible solutions $\bar x$ and $\bar y$ is given by \begin{equation} \label{wv} w=\frac{M+1}{1/v-1}\,. \end{equation} \rmitem{(c)} If the entries of $A,b,c$ are rational numbers, let $\alpha$ be the maximum of the absolute value of the numerators of these numbers, let $\beta$ be the maximum denominator, and $\ell=m+n+1$. Then a suitable choice of $M$ is \begin{equation} \label{Mell} M=\ell!\,\ell\alpha^\ell\beta^{\ell^2+\ell}+1, \end{equation} which in bit-size is polynomial in the bit-size of $A,b,c$. \end{theorem} We first discuss Theorem~\ref{t-BM}. We will prove it (in Theorem~\ref{t-DM} below) without using LP duality, which will therefore be an alternative proof of LP duality from the minimax theorem. Although this proof is longer than that of Theorem~\ref{t-gotu}, it provides a reduction of the problem of solving an LP (in the sense of providing an optimal solution or a certificate that the LP is unbounded or infeasible) to the problem of solving a zero-sum game. This reduction is new, as discussed further in Section~\ref{s-adler}. Some observations in Theorem~\ref{t-BM} are immediate: The value $v$ of $B_M$ is nonnegative because the row player can ignore the last row and play as in Dantzig's game~$B$ in (\ref{B}). Furthermore, if $v=0$, then the second-to-last row in $B_M$ states $\1\T y+\1\T x -Mt\le 0$ for any min-max strategy $(y,x,t)$, which means $t>0$. That strategy can be used as a max-min strategy (with the last row of $B_M$ unplayed), with optimal solutions $x\frac1t$ and $y\frac1t$ to (\ref{P}) and (\ref{D}). For the converse, however, (\ref{P}) and (\ref{D}) may have feasible solutions $x$ and $y$, respectively, but none of them fulfill $c\T x\ge y\T b$ unless we assume the LP duality theorem (which then proves~(a)). In order to avoid using strong LP duality, we have to argue more carefully, as done in Theorem~\ref{t-DM} below. Also, the optimal strategies $x\frac1t$ and $y\frac1t$ fulfill $\1\T y\frac1t+\1\T x\frac1t\le M$, so this constraint does not (and must not) affect feasibility of (\ref{P}) and (\ref{D}). Theorem~\ref{t-BM}(b) gives a certificate that at least one of the LPs (\ref{P}) and (\ref{D}) is infeasible, if that is the case, via any max-min strategy $(y,x,r,s)$. Then (\ref{witn}) holds (which follows from $r=0$ and $s=v$), which implies $c\T x>0$ or $b\T y<0$ (or both) and thus unbounded solutions to (\ref{P}) or (\ref{D}), respectively, if either LP is feasible (and then the other LP is not). Furthermore, the value $v$ of $B_M$ defines, in a strictly monotonic relation (\ref{wv}), the minimal constant $w$ in (\ref{Aw}) added as extra slack to the right-hand sides that makes both LPs feasible. Given $A,b,c$, the value of $w$ in (\ref{Aw}) is clearly unique (and finite and independent of~$M$), whereas the game value $v$ of $B_M$ depends on~$M$. Theorem~\ref{t-BM}(c) shows that a suitable constant $M$ can be found by identifying the largest numerator (in absolute value) $\alpha$ and denominator $\beta$ of the entries in $A,b,c$ if these are given as rational numbers. (A similar a priori bound is known if these entries are algebraic numbers \cite[p.~172]{adler2013}, but not if they are general real numbers.) Although $M$ in (\ref{Mell}) is large, its description as a binary number is of polynomial size in the description of $A,b,c$. The conversion of the LPs (\ref{P}) and (\ref{D}) to the game matrix $B_M$ is therefore a polynomial ``Karp-type'' reduction, where any minimax solution of $B_M$ either solves the LPs or proves the infeasibility of at least one of them. Finding $M$ as in Theorem~\ref{t-BM} uses the following well-known concept. A \textit{basic} solution $x$ to $Ax=b$ is given by a solution $x$ where the columns $A_j$ of $A$ with $x_j\ne0$ are linearly independent, which then determine uniquely the solution~$x$. These columns are then easily extended to a basis of the column space of $A$ and define a \textit{basis matrix}. If $A$ has full row rank~$m$, then a basis has size~$m$, and the basis matrix is an invertible $m\times m$ matrix. A basic \textit{feasible} solution $x$ also fulfills $x\ge\0$. A basic feasible solution to inequalities $Ax\le b$ (and $x\ge\0$) is meant to be a basic feasible solution to the system $Ax+p=b$ (and $x,p\ge\0$), which has full row rank. Part (b) in the following lemma and its proof are due to Ilan Adler (personal communication, 2022). \begin{lemma} \label{l-cara} Let $A\in\reals^{m\times n}$, $b\in\reals^{m}$, $c\in\reals^{n}$. \rmitem{(a)} If $Ax=b$, $x\ge\0$ has a feasible solution $x$, then it also has a basic feasible solution. \noindent Furthermore, suppose the LP: minimize $c\T x$ subject to $Ax=b$, $x\ge\0$ is feasible and has \vskip-\parskip \noindent a known lower bound $\lambda$, that is, $c\T x\ge \lambda$ for all feasible~$x$. Then \rmitem{(b)} for every feasible solution $x$ to $Ax=b$, $x\ge\0$ there is a basic feasible solution $x^*$ with $c\T x^*\le c\T x$, \rmitem{(c)} and ~$\min\{c\T x^*\mid Ax^*=b,~x^*\ge\0,~x^*$ is basic$\} = \min\{c\T x\mid Ax=b,~x\ge\0\}$. \end{lemma} \myproof Choose a feasible $x$ to $Ax=b$, $x\ge\0$ with minimal support. Then the columns $A_j$ of $A$ for $x_j>0$ are linearly independent: Namely, if $Az=\0$ for some $z\ne\0$ where $z_j\ne0$ implies $x_j>0$, let $P=\{j\mid z_j>0\}$ where $P\ne\emptyset$ (otherwise replace $z$ by $-z$). Then with \begin{equation} \label{cara} \alpha =\min\{{x_j}/{z_j}\mid j\in P\},\qquad x'=x-z\alpha \end{equation} we have $Ax'=b$, $x'\ge\0$, and $x'$ of smaller support than~$x$. Hence, no such $z$ exists, which proves the claimed linear independence. This shows (a). To show (b), suppose $Ax=b$ and $x\ge\0$ and $x$ is not basic, with $Az=\0$ for some $z\ne\0$ where $z_j\ne0$ implies $x_j>0$ as before. If $c\T z<0$, or if $c\T z=0$ and $z\le\0$, replace $z$ by $-z$. Let $P=\{j\mid z_j>0\}$. Then $P\ne\emptyset$, which holds if $c\T z=0$ because $z\ne\0$, and if $P=\emptyset$ and $c\T z>0$ then $z\le\0$, and $x-z\alpha$ is feasible but $c\T(x-z\alpha)$ is arbitrarily negative as $\alpha\to\infty$, which contradicts boundedness. Then with $\alpha$ and $x'$ as in (\ref{cara}), $x'$ has smaller support than $x$ and $c\T x'\le c\T x$. If $A$ has $n$ columns, then this process terminates after at most $n$ steps with a basic feasible solution $x^*$ with $c\T x^*\le c\T x$, as claimed. Part (c) follows from (b) because there are finitely many basic feasible solutions, so the minimum on the left exists, and the minimum on the right also exists and equals its infimum. \endproof With the added equation $\1\T x=1$, Lemma~\ref{l-cara}(a) is \textit{Carath\'eodory's theorem}: Any convex combination $b$ of points in $\reals^m$ is already the convex combination of a suitable set of at most $m+1$ of these points \cite[p.~200]{caratheodory1911}. We prove Theorem~\ref{t-BM} using the following Theorem~\ref{t-DM} (mostly to simplify notation) applied to \begin{equation} \label{Cd} C=\left[\begin{matrix} 0&A\\ -A\T & 0\\ \end{matrix}\right], \qquad d=\left[\begin{matrix} ~b\\ -c\\ \end{matrix}\right]. \end{equation} The proof of Theorem~\ref{t-DM} does \textit{not} use strong LP duality. \begin{theorem} \label{t-DM} Let $C\in\reals^{k\times k}$ such that $C=-C\T$, and $d\in\reals^k$. Let $(z,w)=(z^*,w^*)\in\reals^k\times\reals$ be a basic feasible solution that minimizes $w$ subject to \begin{equation} \label{basic} Cz-\1w\le d,\quad d\T z-w\le 0,\quad z\ge\0,\quad w\ge0, \end{equation} and let $M\in\reals$ with \begin{equation} \label{M} \1\T z^*+1\le M \,. \end{equation} Consider the zero-sum game \begin{equation} \label{DM} D_M=\left[\begin{matrix} C&-d\\ \,d\T & 0\\ \1\T & -M \\ \end{matrix}\right] \end{equation} with game value~$v$. Then $v\ge0$ and \rmitem{(a)} $v=0$ if and only if $w^*=0$. If $w^*=0$, let $t=\frac1{\1\T z^*+1}$ and $z=z^*t$. Then $(z,t)$ is a min-max strategy and $(z,t,0)$ is a max-min strategy for $D_M$. \rmitem{(b)} Suppose $v>0$. Then every max-min strategy $(q,r,s)$ of $D_M$ fulfills $r=0$, $s=v$, and \begin{equation} \label{witness} Cq\le\0,\quad d\T q<0, \end{equation} which proves that there is no $z\ge\0$ with $Cz\le d$. \end{theorem} \myproof In the following, letters (and their decorated versions) $q$ and $z$ denote vectors in $\reals^k$, and $r,s,t,u,v,w$ denote scalars in $\reals$. The system (\ref{basic}) is feasible, for example with $z=\0$ and large enough~$w$, and $w$ is bounded from below, so that (\ref{basic}) has an optimal basic feasible solution $(z^*,w^*)$ by Lemma~\ref{l-cara}(c). We have $v\ge0$, because the game matrix \begin{equation} \label{D=} D=\left[\begin{matrix} C&-d\\ \,d\T & 0\\ \end{matrix}\right] \end{equation} is skew-symmetric and has game value $0$, so by adopting any max-min strategy for $D$ and not playing the last row in $D_M$ the row player will get at least~$0$. For the ``if'' part of case (a), if $w^*=0$ then with $t=\frac1{\1\T z^*+1}$ and $z=z^*t$ we have $\1\T z-Mt\le -t<0$ by (\ref{M}). This shows that $(z,t)$ is a min-max strategy and $(z,t,0)$ a max-min strategy for $D_M$, and $v=0$. For the ``only if'' part, if $v=0$ then a min-max strategy $(z',t)$ for $D_M$ requires $t>0$ to get a nonpositive cost in the last row, and then $z=z'\frac1t$ solves (\ref{basic}) with $w=0$. To show (b), let $v>0$. The following properties hold for any optimal strategies of $D_M$. The min-max value of $D_M$ with min-max strategy $(z,t)$ is the smallest real number $v$ such that \begin{equation} \label{P'} \arraycolsep.2em \begin{array}{rcrcrl} Cz&-&dt&\le&\1v\\ d\T z&&&\le&v\\ \1\T z&-&Mt&\le&v\\ \1\T z&+&t&=&1\\ z&,&t&\ge&\0&. \end{array} \end{equation} The max-min value of $D_M$ with max-min strategy $(q,r,s)$ is the largest $v$ such that \begin{equation} \label{D'} \arraycolsep.2em \begin{array}{lclclcl} q\T C&+&rd\T &+&s\1\T&\ge&v\1\T\\ q\T(-d)&&&-&sM&\ge&v\\ q\T\1&+&r&+&s&=&1\\ q&,&r&,&s&\ge&\0\,. \end{array} \end{equation} Then $0<s<1$ because if $s=0$ then $(q,r,0)$ would be a max-min strategy for the symmetric game $D$ in (\ref{D=}) with max-min value $v>0$ which is not possible, and if $s=1$ then the last row of $D_M$ alone would be a max-min strategy for $D_M$, but that row has the negative entry $-M$. Because $s>0$, we have $\1\T z-Mt=v$ in (\ref{P'}), and, using $\1\T z=1-t$, \begin{equation} \label{t} v=1-(M+1)t\,. \end{equation} We show that $v\le s$. If $v>s$, then by (\ref{D'}), \begin{equation} \label{DD} \arraycolsep.2em \begin{array}{lclcll} q\T C&+&rd\T&\ge&(v-s)\1\T\\ q\T(-d)&&&\ge&v+Ms\\ \end{array} \end{equation} which would define a max-min strategy $(q\frac 1{1-s},\frac r{1-s})$ with positive max-min value for the symmetric game~$D$, a contradiction. Hence, $0<v\le s<1$ and by (\ref{t}), \begin{equation} \label{tv} t=\frac{1-v}{M+1}>0\,. \end{equation} Then (\ref{P'}) implies \begin{equation} \label{Czt} \textstyle Cz\frac1t\le d+\1\frac vt\,, \quad d\T z\frac1t\le \frac vt\,, \end{equation} and therefore \begin{equation} \label{w*} w^*\le\frac vt = \frac{v(M+1)}{1-v} \,. \end{equation} In order to show that every max-min strategy $(q,r,s)$ for $D_M$ is of the form $(q,0,v)$, we will in essence use weak duality. We write $s=u+v$ with $u\ge0$ (we know $s\ge v$) and let $v$ in (\ref{D'}) be \textit{fixed} where we now in essence maximize~$u$. That is, we consider the constraints \begin{equation} \label{Dbar} \arraycolsep.2em \begin{array}{lclclcl} q\T C&+&rd\T&+&u\1\T&\ge&\0\T\\ q\T(-d)&&&-&uM&\ge&v(M+1)\\ q\T\1&+&r&+&u&=&1-v\\ q&,&r&,&u&\ge&\0~. \end{array} \end{equation} They have solutions with the current max-min strategy $(q,r,s)$ and $u=s-v$. We use that $Cz^*-d-\1w^*\le\0$ and $d\T z^*-w^*\le 0$ in (\ref{basic}), and $-1\ge \1\T z^*-M-w^*$ by (\ref{M}), and $v(M+1)-(1-v)w^*\ge0$ by (\ref{w*}) in the following chain of inequalities, obtained by multiplying the first inequality in (\ref{Dbar}) by $z^*$, the second by~1, and the equation by $-w^*$ and summing up: \begin{equation} \label{chain} \begin{array}{rcl} 0&\ge& -u\\ &\ge& q\T(Cz^*-d-\1w^*)+r(d\T z^*-w^*)+u(\1\T z^*-M-w^*)\\ &\ge& v(M+1)-(1-v)w^*\ge0\,.\\ \end{array} \end{equation} Hence, all inequalities hold as equalities, in particular \begin{equation} \label{w*=} w^* = \frac{M+1}{1/v-1} \end{equation} and $u=0$. This shows $s=v$ in any solution $(q,r,s)$ to (\ref{D'}). In addition, $q\T C + rd\T\ge \0\T$, that is, $Cq-dr\le\0$, and $q\T d\le-v(M+1)<0$. The skew-symmetry of $C$ implies $q\T C q=(q\T C q)\T=q\T C\T q=-q\T C q$ and therefore $q\T Cq=0$, for any~$q$. If we had $r>0$ then $Cq\frac1r\le d$ and $0 = q\T C q\frac1r\le q\T d < 0$, a~contradiction, which shows $r=0$. This shows $Cq\le\0$ and $d\T q<0$ as claimed in (\ref{witness}). In turn, this shows that there is no $z\ge\0$ with $Cz\le d$, because this would imply $ 0\le z\T(-Cq)=z\T C\T q = q\T C z\le q\T d < 0\,. $ \endproof \myproofof{Theorem~\ref{t-BM}} We apply Theorem~\ref{t-DM} to $C$ and $d$ in (\ref{Cd}). Let $v$ be the value of the game $D_M$. Then by Theorem~\ref{t-DM}(a), $v=0$ implies feasibility and optimality of the LPs (\ref{P}) and (\ref{D}). Conversely, suppose that (\ref{P}) and (\ref{D}) are feasible. Then $v=0$, because if $v>0$ then (\ref{witness}) contradicts feasibility. This shows part (a) in Theorem~\ref{t-BM}, and also part (b) via (\ref{w*=}). To show Theorem~\ref{t-BM}(c), suppose first that $\beta=1$, that is, all entries of $A,b,c$ are integers. The system (\ref{basic}) has $\ell$ rows, and written as equations with slack variables has entries from $A,b,c$ or $0,1,-1$. Any basic solution is uniquely determined by the basis matrix, where each variable is the quotient of two determinants where the denominator is at least~1 and the numerator bounded in absolute value by $\ell!\,\alpha^\ell$. Only the $\ell$ basic variables can be nonzero, so that we can choose $M=\ell!\,\ell\alpha^\ell+1$ by (\ref{M}). See also \cite[p.~30]{papasteig} or \cite[p.~172]{adler2013}; I did not find the next description, clearly standard, if $\beta>1$. If $\beta>1$, multiply each column of $\twovec C{d\T}$ and $\twovec d0$ in (\ref{basic}) with the least common multiple of the denominators in that column, called the \textit{scale factor} $\sigma_j$ for that column $j$ (with $j=0$ if the column is~$d$). This gives an integral system where each basic solution has to be changed by multiplying each variable in column~$j$ with its scale factor $\sigma_j$ and dividing it by $\sigma_0$ to give the solution to the original system. Each entry of the integral system has been multiplied by at most $\beta^\ell$ (this is an overestimate because each column of $C$ in (\ref{Cd}) has $m$ or $n$ zeros), so we have to replace $\alpha^\ell$ by $\alpha^\ell(\beta^{\ell})^\ell$, with the extra factor $\beta^\ell$ for the re-scaling of the variables, which shows (\ref{Mell}). The number of bits to represent $M$ is its binary logarithm, which is polynomial in $\ell$ and in the bit-sizes of $\alpha$ and~$\beta$, and hence in the bit-size of $A,b,c$. \endproof \section{Discussion and related work} \label{s-adler} Because Dantzig's proof in Theorem~\ref{t-51} works for generic LPs, a first question is if genericity can be achieved by perturbing a given LP. However, this may alter its feasibility. For example, consider the LP of maximizing $x_2$ subject to $x_2\le1$, $x\ge\0$, $x\in\reals^2$. The corresponding game $B$ in (\ref{B}) has an all-zero row and column, which when played as an optimal pure-strategy pair does not play the last column ($t=0$). The LP has optimal solutions $(x_1,1)$ for any $x_1\ge0$. However, maximizing the perturbed objective function $\eps x_1+x_2$ (for some small $\eps>0$) with the same constraints gives an unbounded LP. Hence, there is no obvious way of perturbing the LP to make Dantzig's proof generally applicable. The closest related works to ours are Adler \cite{adler2013} and Brooks and Reny \cite{BReny}. We continue here our discussion from the introduction. A main goal of Adler \cite{adler2013} is to reduce the computational problem of solving an LP (in the sense of finding an optimal solution or proving there is none) to the problem of solving a zero-sum game by means of a strongly polynomial-time reduction. Adler considers the feasibility problem with equalities, that is, to find $x\in\reals^n$ such that \begin{equation} \label{feaseq} Ax=b,\qquad x\ge\0\,, \end{equation} for an $m\times n$ matrix $A$, or to show that no such $x$ exists. He constructs a symmetric game with $m+n+3$ rows and columns. An optimal strategy to that game produces either a solution to (\ref{feaseq}), or a vector $y\in\reals^m$ such that $y\T A\ge\0\T$ and $y\T b<0$ (which by (\ref{farkeq}) shows that (\ref{feaseq}) is infeasible), or some $\tilde x\ne\0$ such that $A\tilde x=\0$ and $\tilde x\ge\0$. The first two cases answer whether (\ref{feaseq}) is feasible or not. In the third case, $Ax=b$ is replaced by an equivalent system where the variables $x_{S}$ in the support $S$ (written $J^+$ in \cite{adler2013}) of~$\tilde x$ are eliminated. In a solution to that equivalent system, the variables $x_{S}$ can be substituted back, and irrespective of their sign can be replaced by $x_{S}+\tilde x_{S}\alpha$ for sufficiently large $\alpha$ to find a solution to (\ref{feaseq}). (The latter step is implicit in the claim (10b) of \cite[p.~173]{adler2013} and attributed to \cite{Dantzig} but without a page number; I could not find it and found these computations the hardest to follow.) Repeating this at most $n$ times, with corresponding calls to solving a zero-sum game, then answers the feasibility problem. This is known as a ``Cook-type'' reduction. It also leads to a proof of Tucker's Theorem from Gordan's Theorem in \cite[section~4]{adler2013}, which we have given in a more direct way in Theorem~\ref{t-gotu}. A different ``Karp-type'' reduction uses only a single step from the feasibility problem (\ref{feaseq}) to solving a zero-sum game, by adding a constraint $\1\T x\le M$ where $M$ is large enough to not affect feasibility. If the entries of $A$ and $b$ are algebraic numbers (in particular, integers), they determine an explicit bound on $M$ of polynomial encoding size \cite[p.~172]{adler2013}. We have done the same in Theorem~\ref{t-BM} above. However, our game $B_M$ is directly derived from the original LPs (\ref{P}) and (\ref{D}) defined by inequalities (also first considered by Adler) with a single extra row added to Dantzig's original game~$B$ in (\ref{B}), rather than converting them to equalities as in (\ref{feaseq}) (with a new, larger matrix~$A$) and then back to inequalities to construct an even larger symmetric game. As an additional, new property, Theorem~\ref{t-BM}(b) shows that a max-min strategy of~$B_M$ provides a certificate that the LPs are infeasible if that is the case. Brooks and Reny \cite{BReny} prove the following theorem. For any matrix $D$, let $\|D\|$ be the maximum absolute value of its entries. \begin{theorem}[Brooks and Reny \cite{BReny}] \label{t-BR} Consider the LPs $(\ref{P})$ and $(\ref{D})$. Let $r$ be the rank of the matrix \begin{equation} \label{hatA} \hat A= \left[\begin{matrix} 0&-A\T \\ A&0 \\ -c\T &b\T \end{matrix}~\right] \end{equation} and let \begin{equation} \label{dalpha} \alpha=2r^2\max\{\|b\|,\|c\|\}\max_W\|W^{-1}\|+1, \end{equation} where the second maximum is taken over all invertible sub-matrices $W$ of $\hat A$. Then for the game $P$ with $n+m+1$ rows and columns \begin{equation} \label{PP} P= \left[\begin{matrix} 0&-\alpha A\T & \0 \\ \alpha A&0 & \0 \\ -\alpha c\T & \alpha b\T & 0 \end{matrix}~\right] + \left[\begin{matrix} c\\-b\\0 \end{matrix}\right] \1\T \end{equation} either \rmitem{(a)} the value of $P$ is zero, and then for a min-max strategy $(x^*,y^*,t^*)$ of $P$, a pair of optimal solutions to the LPs $(\ref{P})$ and $(\ref{D})$ is $(x^*\alpha,y^*\alpha)$, or \rmitem{(b)} the value of $P$ is positive, and then any max-min strategy $(x,y,t)$ of $P$ fulfills $A x\le\0$, $x\ge\0$, $A\T y\ge\0$, $y\ge\0$, and $c\T x>b\T y$, which shows that at least one LP is infeasible. \end{theorem} The main effect of the definition of $P$ is that for any min-max strategy $(x^*,y^*,t^*)$ with min-max value $v$, we have \begin{equation} \label{mmBR} \arraycolsep.2em \begin{array}{rcrcrl} &-&\alpha A\T y^*& \le&-c+\1v\\ \alpha Ax^*&&&\le&b+\1v\\ -\alpha c\T x^*&+&\alpha b\T y^*&\le&v\\ \end{array} \end{equation} with \textit{constant} right-hand sides $-c$ and $b$ rather than these being scaled by $t^*$. The number $\alpha$ is similar to the bound $M$ in Theorem~\ref{t-DM} and (\ref{M}), because if the LPs (\ref{P}) and (\ref{D}) have feasible solutions, then with $x^*$ and $y^*$ as in Theorem~\ref{t-BR}(a), they have feasible solutions $x^*\alpha,y^*\alpha$ with $\1\T x^*\alpha+\1\T y^*\alpha \le \alpha$, as noted by Brooks and Reny \cite[Remark~7]{BReny}. If the value of $P$ in (\ref{PP}) is positive, then any max-min strategy $(x,y,t)$ in Theorem~\ref{t-BR}(b) proves the infeasibility of at least one of the LPs just as in (\ref{witn}) in Theorem~\ref{t-BM}. Given the constraints (\ref{mmBR}), the definition of $P$ can be seen as ``canonical'' as claimed by Brooks and Reny, although one could also call it ``proof-induced''. From the viewpoint of using this game, it has the disadvantage that all entries of $A$ are multiplied by the large number $\alpha$, and $P$ is a full matrix and no longer half-empty, with zero entries replaced by the rows of $-c$ and $b$. In contrast, in our matrix $B_M$ in Theorem~\ref{t-BM} the large number $M$ appears in a single place, and the zero entries remain. The game $B_M$ also naturally extends Dantzig's original game. In summary, it seems that proving LP duality from the minimax theorem requires quite a bit of linear algebra, most concisely in our relatively short proof of Theorem~\ref{t-gotu}. We show an elegant use of linear algebra in the next, final section. \section{Minimally infeasible sets of inequalities} \label{s-minfeas} We conclude this article with a short elementary proof of the Lemma of Farkas in its inequality-only version (\ref{farkine}) due to Conforti, Di Summa, and Zambelli \cite{conforti2007}. The main trick is to state the \textit{minimal} infeasibility of these inequalities in terms of infeasibility of the corresponding equalities, which is canonically proved by induction. The second step is to apply the linear algebra Lemma~\ref{l-alt} to the infeasible equalities to obtain the required vector~$y$ in (\ref{farkine}). A set of linear equations and inequalities is called infeasible if it has no solution, and \textit{minimally} infeasible if omitting any one equation or inequality makes it feasible. The following proofs of theorem~2.1 and lemma~2.1 of \cite{conforti2007}, in simplified notation, show (\ref{farkine}) based on minimally infeasible sets of inequalities. \begin{theorem}[Conforti, Di Summa, and Zambelli \cite{conforti2007}] \label{t-minfeas} Let $A\in\reals^{m\times n}$ and $b\in\reals^n$ and let $a_1,\ldots,a_m$ be the rows of $A$. Suppose the system $Ax\le b$ is minimally infeasible. \rmitem{(i)} Then the system $Ax=b$ is minimally infeasible. \rmitem{(ii)} Reversing any inequality $a_ix\le b_i$ in $Ax\le b$ creates a feasible system: \begin{equation} \label{xi} \forall\,i\in[m]~~ \exists\,x^{(i)}\in\reals^n~:~ a_ix^{(i)}>b_i\,,~~~ \forall\,k\in[m]-\{i\}~:~ a_kx^{(i)}=b_k\,. \end{equation} \end{theorem} \myproof We prove that for any $R\subseteq[m]$ the constraints \begin{equation} \label{minfeas} a_i x=b_i \quad(i\in R), \qquad a_i x\le b_i \quad(i\in [m]-R) \end{equation} are minimally infeasible. The proof is by induction on $|R|$. For $|R|=0$ condition (\ref{minfeas}) holds by assumption. Suppose it holds for all $R$ up to a certain size $|R|$. If $R=[m]$ then the proof of~(i) is complete, so let $h\not\in R$, where we want to show that \begin{equation} \label{indfeas} a_i x=b_i \quad(i\in R), \qquad a_h x=b_h\,, \qquad a_i x\le b_i \quad(i\in [m]-R-\{h\}) \end{equation} is minimally infeasible. The system (\ref{indfeas}) is infeasible (because $Ax\le b$ is infeasible), so we have to prove that omitting any constraint $a_jx=b_j$ or $a_jx\le b_j$ for $j\in[m]$ produces a feasible system. This is clearly the case if $j=h$, or if $j\in R$ by applying the inductive hypothesis to $R\cup\{h\}-\{j\}$, so let $j\not\in R$. The constraints (\ref{minfeas}) for $i\ne h$ and $i\ne j$ have solutions $x^{(h)}$ and $x^{(j)}$, respectively, with \arraycolsep.2em \begin{equation} \label{jk} \begin{array}{lcllclrcl} a_i x^{(h)}&=&b_i \quad(i\in R),& \quad a_i x^{(h)}&\le&b_i \quad(i\in [m]-R-\{h\}),\quad & a_h x^{(h)}&>&b_h \\ a_i x^{(j)}&=&b_i \quad(i\in R),& \quad a_i x^{(j)}&\le&b_i \quad(i\in [m]-R-\{j\}). \\ \end{array} \end{equation} If $a_hx^{(j)}=b_h$ then $x^{(j)}$ is a feasible solution to (\ref{indfeas}) with row $a_jx\le b_j$ omitted. Otherwise $a_hx^{(j)}<b_h$\,, and a suitable convex combination of $x^{(j)}$ and $x^{(h)}$ is such a solution because $a_hx^{(h)}>b_h$. This completes the induction. Condition (ii) is an immediate consequence of (i): Let $i\in[m]$. Because $Ax=b$ is minimally infeasible, there is some $x^{(i)}\in\reals^n$ such that $a_kx^{(i)}=b_k$ for all $k\ne i$ and $a_ix^{(i)}\ne b_i\,$, where $a_ix^{(i)}<b_i$ would imply that $Ax\le b$ is feasible, hence $a_ix^{(i)}> b_i\,$. \endproof \myproofof{$(\ref{farkine})$ using Theorem~\ref{t-minfeas}} The direction ``$\Leftarrow$'' in (\ref{farkine}) is immediate. To prove ``$\Rightarrow$'', assume that $Ax\le b$ is infeasible, and (by dropping sufficiently many rows from these inequalities, whose components of~$y$ will be set to zero) that $Ax\le b$ is minimally infeasible. Denote the number of rows of this minimally infeasible system again by~$m$. By Theorem~\ref{t-minfeas}, $Ax=b$ is minimally infeasible. By Lemma~\ref{l-alt}, there is some $y\in\reals^m$ such that $y\T[A~{-b}]=[\0\T~1]$. We show that $y>\0$ (Adler, personal communication, 2023). Let $i\in[m]$ and $x^{(i)}$ as in (\ref{xi}) in Theorem~\ref{t-minfeas}(ii). Then \[ 1=[\0\T~1]\twovec{x^{(i)}}{1}=y\T[A~{-b}]\twovec{x^{(i)}}{1}=y_iz_i \] by (\ref{xi}) where $z_i=a_ix^{(i)}-b_i>0$, and thus $y_i>0$. Hence, $y>\0$ as claimed. \endproof The proof of Theorem~\ref{t-minfeas} is canonical and easy to reconstruct. As for proving the Lemma of Farkas, in the same version (\ref{farkine}), perhaps the most natural and elementary proof is ``projection'' or Fourier-Motzkin elimination (see Schrijver \cite[p.~155f]{Schrijver} and references). It expresses the constraints in $Ax\le b$ in terms of~$x_1$ by dividing each row by the coefficient of $x_1$ when it is nonzero, which reverses the inequality when the coefficient is negative. This induces mutual bounds among the other linear terms in $x_2,\ldots,x_n$ and eliminates~$x_1$. This elimination is then iterated (and may lead to an exponential increase in the number of constraints). See Kuhn \cite{kuhn1956} and Tao \cite[p.~180]{tao2008} for deriving (\ref{farkine}) in this way. \section*{Acknowledgments} I thank Ahmad Abdi, Ben Brooks, Phil Reny, Giacomo Zambelli, and anonymous referees for helpful comments and discussions, and Sylvain Sorin for alerting me to the works of Loomis \cite{loomis1946} and Ville \cite{ville1938}. I thank Ilan Adler for great help in proving Theorem~\ref{t-DM} without assuming strong LP duality.
2205.11129v2
http://arxiv.org/abs/2205.11129v2
Polynomial reduction for holonomic sequences and applications in $π$-series and congruences
\documentclass[11pt]{article} \usepackage{graphicx, colordvi} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{amsthm,cite,color} \usepackage{dsfont} \usepackage{epsfig} \usepackage{mathrsfs} \usepackage{amsfonts} \usepackage{amssymb} \usepackage{amsmath} \usepackage{amssymb,amsfonts,amsmath,amsthm,cite,color} \usepackage{dsfont} \usepackage{epsfig} \usepackage{mathrsfs} \usepackage{longtable} \usepackage{hyperref} \hypersetup{ colorlinks=true, linkcolor=cyan, filecolor=blue, urlcolor=red, citecolor=green, } \parskip=7pt \newtheorem{theo}{Theorem} \newtheorem{rem}{Remark} \newtheorem{prop}[theo]{Proposition} \newtheorem{prob}[theo]{Problem} \newtheorem{coro}[theo]{Corollary} \newtheorem{defi}[theo]{Definition} \newtheorem{lem}[theo]{Lemma} \newtheorem{exm}[theo]{Example} \newtheorem{coj}[theo]{Conjecture} \makeatletter \@addtoreset{equation}{section} \@addtoreset{theo}{section} \makeatother \renewcommand{\theequation}{\arabic{section}.\arabic{equation}} \renewcommand{\thesection}{\arabic{section}} \renewcommand{\thetheo}{\arabic{section}.\arabic{theo}} \newcommand{\bN} { {\mathbb{N}}} \newcommand{\bC} { {\mathbb{C}}} \newcommand{\bQ} { {\mathbb{Q}}} \newcommand{\bZ} { {\mathbb{Z}}} \newcommand{\bR} { {\mathbb{R}}} \newcommand{\bF} { {\mathbb{F}}} \newcommand{\bK} { {\mathbb{K}}} \newcommand{\bE} { {\mathbb{E}}} \newcommand{\cS} { {\mathcal{S}}} \newcommand{\cA} { {\mathcal{A}}} \newcommand{\lc} { {\mathrm{lc\hspace{0.5ex}}}} \newcommand{\lt} { {\mathrm{lt\hspace{0.5ex}}}} \newcommand{\ann} { {\mathrm{ann\hspace{0.5ex}}}} \newcommand{\cL} { {\mathcal{L}}} \newcommand{\Domb}{\mathrm{Domb}} \newcommand{\la} { {\langle}} \newcommand{\ra} { {\rangle}} \newcommand{\re}{\noindent{\bfseries Remark. }} \def\qed{\hfill \rule{4pt}{7pt}} \def\pf{\noindent {\it Proof.} } \def\red{\textcolor{red}} \begin{document} \begin{center} {\large \bf Polynomial reduction for holonomic sequences and applications in $\pi$-series and congruences} \end{center} \begin{center} { Rong-Hua Wang}$^{1}$ and {Michael X.X. Zhong}$^{2}$ $^1$School of Mathematical Sciences\\ Tiangong University \\ Tianjin 300387, P.R. China \\ [email protected] \\[10pt] $^2$School of Science\\ Tianjin University of Technology \\ Tianjin 300384, P.R. China\\ [email protected] \end{center} \vskip 6mm \noindent {\bf Abstract.} Recently, Hou, Mu and Zeilberger introduced a new process of polynomial reduction for hypergeometric terms, which can be used to prove and generate hypergeometric identities automatically. In this paper, we extend this polynomial reduction to holonomic sequences. As applications, we describe an algorithmic way to prove and generate new multi-summation identities. Especially we present new families of $\pi$-series involving Domb numbers and Franel numbers, and new families of congruence identities for Franel numbers and Delannoy numbers. \noindent {\bf Keywords}: polynomial reduction; holonomic sequence; $\pi$-series; congruence identity. \section{Introduction} For a long time, combinatorial identities, $\pi$-series in particular, were a mysterious part of combinatorics. It was the seminal work of Wilf and Zeilberger \cite{WZ1990, Zeil1990, Zeilberger1990c, Zeilberger1991} that initiated the study of transforming this mysterious part into science, that everybody, even a computer, could understand. Since then the mechanical proof of combinatorial identities had received special attention. Zeilberger's algorithm, also known as the method of creative telescoping, is the core algorithm in the WZ theory. Reduction-based approach plays an indispensable role in the development of the new generation of creative telescoping algorithms, which can separate the calculations of telescopers and certificates for efficiency and construct minimal telescopers. The first \emph{reduction} algorithm goes back to the work of Ostrogradsky \cite{O1845} and Hermite \cite{Hermite1872} for integrals of rational functions. In the continuous case, the algorithm was first worked out for bivariate rational functions in \cite{BCCL2010}, and later extended to the multivariate rational case in \cite{BLS2013} using the Griffiths--Dwork method. The approach has also been extended to algebraic functions \cite{Trager1984,CKK2016}, Fuchsian D-finite functions \cite{CHKK2018}, and general D-finite functions \cite{Hoeven2017,BCLS2018}. In the discrete case, a reduction-based algorithm was designed for summations of rational functions by Abramov in \cite{Abramov1975}, which was extended to the bivariate rational case in \cite{CHHLW}. The hypergeometric and holonomic cases were studied by Abramov and Petkov{\v{s}}ek in \cite{AP2001,AP2002} and van der Hoeven in \cite{Hoeven2018} respectively. In 2015, Chen et al. \cite{CHKL2015} introduced the concept of \emph{polynomial reduction} in the modified Abramov--Petkovcek algorithm which is more efficient and can be used to compute minimal telescopers for bivariate hypergeometric terms. There are two fundamental problems related to telescopers: one is deciding whether telescopers exist for a given function, the other one is designing efficient algorithms for constructing telescopers when exist. For the existence problem, bivariate mixed hypergeometric case can be solved via the Ostrogradsky--Hermite reduction \cite{CCFFL2015}, while trivariate mixed rational case was handled by the extended reduction in \cite{CHLW2016,CDZ2019}. For the construction problem, the Ostrogradsky--Hermite reduction and its variants have been applied in \cite{CHKL2015,Huang2016,CKK2016,CHKK2018,Hoeven2017,Hoeven2018} for bivariate functions, and in \cite{BLS2013,Lairez2016,CHHLW} beyond the bivariate case. Although the method of creative telescoping is a powerful tool in the mechanical proof of combinatorial identities, the reduction itself was rarely used directly in the verifications. In 2021, Hou, Mu and Zeilberger~\cite{HouMuZeil2021} introduced another \emph{polynomial reduction} process, which can be employed to derive infinite families of supercongruences \cite{HouMuZeil2021} and new hypergeometric identities \cite{HouLi2021}. Recently, Wang and Zhong \cite{WZ2022} generalized the polynomial reduction to the $q$-rational case. This makes it possible to prove and discover $q$-identities automatically. Especially several $q$-analogues of Ramanujan-type series for $\pi$ were presented. The Hou--Mu--Zeilberger reduction and its variants were all designed for ($q$-)hypergeometric terms. In this paper, we generalize the Hou--Mu--Zeilberger reduction to the holonomic case. This enables us to handle multi-summation identities. As applications, we provide an algorithmic way to prove and discover new series for $\pi$ involving Domb numbers and Franel numbers. This will confirm and generalize some of Z.-W Sun's conjectures in \cite{Sun2021}. New families of congruence identities on Franel numbers and Delannoy numbers are also obtained. \section{Polynomial reduction for holonomic sequences} Let $\bK$ be a field of characteristic $0$. A sequence $(F(n))_{n=0}^{\infty}$ is \emph{holonomic} over $\bK$ if there exist polynomials $a_0(n),a_1(n),\ldots,a_J(n)\in\bK[n]$ with $a_J(n)\neq 0$ such that \begin{equation}\label{eq:rec homo} \sum_{i=0}^{J}a_i(n)F(n+i)=0. \end{equation} Or, equivalently, if we define the \emph{annihilator} of $F(n)$ by \[ \ann F(n):=\left\{L=\sum_{i=0}^{J}a_i(n)\sigma^i\in \bK[n][\sigma]\mid L(F(n))=0\right\}, \] where $\sigma$ is the shift operator (that is, $\sigma F(n)=F(n+1)$), then $(F(n))_{n=0}^{\infty}$ is holonomic if and only if $\ann F(n)\neq \{0\}$. We call $J$ in \eqref{eq:rec homo} the order of the recurrence relation for $F(n)$, and the minimum order of all such recurrences is called the \emph{order} of $F(n)$. The class of holonomic sequences covers a great percentage of combinatorial sequences arising in applications. For example, harmonic numbers, Fibonacci numbers, Domb numbers, Franel numbers and all hypergeometric sequences are holonomic. Specifically, given a hypergeometric term $t_n$, there exist polynomials $a(n),b(n)\in\bK[n]$ such that \[\frac{t_{n+1}}{t_n}=\frac{a(n)}{b(n)},\] that is, \[a(n)t_n-b(n)t_{n+1}=0.\] In the polynomial reduction introduced by Hou, Mu and Zeilberger \cite{HouMuZeil2021}, a key step is to characterize such polynomials $p(n)\in\bK[n]$ that the product $p(n)t_n$ is \emph{Gosper-summable}, that is, \[p(n)t_n=\Delta (u(n)t_n),\] for some rational function $u(n)\in\bK(n)$, where $\Delta$ is the difference operator (that is, $\Delta F(n)=\sigma F(n)-F(n)=F(n+1)-F(n)$). It is natural to consider a similar problem in the holonomic case. \begin{prob} Given a holonomic sequence $(F(n))_{n=0}^{\infty}$ satisfying \eqref{eq:rec homo}, for which polynomials $q(n)\in\bK[n]$, the product $q(n)F(n)$ can be written as \begin{equation}\label{eq: Abramov--van-Hoeij} q(n)F(n)=\Delta\left(\sum_{i=0}^{J-1}u_i(n)F(n+i)\right) \end{equation} for some rational functions $u_0(n),u_1(n),\ldots,u_{J-1}(n)\in\bK(n)$? \end{prob} For any operator $L=\sum_{i=0}^{J}a_i(n)\sigma^i$ with $a_i(n)\in\bK[n]$, the \emph{adjoint} of $L$ is defined by \begin{equation}\label{eq:L} L^{\ast}=\sum_{i=0}^{J}\sigma^{-i}a_i(n). \end{equation} Then for any polynomial $p(n)\in\bK[n]$, \begin{equation*}\label{eq:B} L^{\ast}(p(n))=\sum_{i=0}^{J}a_i(n-i)p(n-i). \end{equation*} In 2018, van der Hoeven \cite[Proposition 3.2]{Hoeven2018} derived the following difference Lagrange identity \begin{equation}\label{eq:Lagrange identity} p(n)L(F(n))-L^{\ast}(p(n))F(n)=\Delta \left(\sum_{i=0}^{J-1}u_i(n)F(n+i)\right) \end{equation} by induction, where \begin{equation}\label{eq:u} u_i(n)=\sum_{j=1}^{J-i}a_{i+j}(n-j)p(n-j). \end{equation} Next we provide another proof of \eqref{eq:Lagrange identity} by a direct calculation and the fact \begin{equation}\label{eq:summable reduction} (\sigma^i-1)g(n)=\Delta\left(\sum_{j=0}^{i-1}g(n+j)\right),\quad \forall i>0. \end{equation} \begin{lem}\label{lem: summable} Let $L=\sum_{i=0}^{J}a_i(n)\sigma^i$ with $a_i(n)\in\bK[n]$. Then \eqref{eq:Lagrange identity} holds for any polynomial $p(n)\in\bK[n]$. \end{lem} \pf By the definition of $L^{\ast}$ and equality \eqref{eq:summable reduction}, we have \begin{align*} &p(n)L(F(n))-L^{\ast}(p(n))F(n)\\ =&\sum_{i=1}^{J}(\sigma^i-1)(a_i(n-i)p(n-i)F(n))\\ =&\Delta\left(\sum_{i=1}^{J}\sum_{j=0}^{i-1}a_i(n-i+j)p(n-i+j)F(n+j)\right)\\ =&\Delta\left(\sum_{j=0}^{J-1}\sum_{i=j+1}^{J}a_i(n-i+j)p(n-i+j)F(n+j)\right)\\ =&\Delta\left(\sum_{i=0}^{J-1}\sum_{j=i+1}^{J}a_j(n-j+i)p(n-j+i)F(n+i)\right)\\ =&\Delta\left(\sum_{i=0}^{J-1}u_i(n)F(n+i)\right). \end{align*} \qed When $L\in\ann F(n)$, identity \eqref{eq:Lagrange identity} reduces to \begin{equation}\label{eq: summable} L^{\ast}(p(n))F(n)=\Delta \left(-\sum_{i=0}^{J-1}u_i(n)F(n+i)\right). \end{equation} Hence $q(n)=L^{\ast}(p(n))$ is a desired polynomial in $\bK[n]$ such that \eqref{eq: Abramov--van-Hoeij} holds. From equality \eqref{eq: summable} we obtain \begin{equation}\label{eq:rec ann} \sum_{k=0}^{n-1}L^{\ast}(p(k)) F(k) =\left(\sum_{i=0}^{J-1}u_i(0)F(i)\right) -\left(\sum_{i=0}^{J-1}u_i(n)F(n+i)\right), \end{equation} where $u_i$ is defined in \eqref{eq:u}. Taking $n\to\infty$, we usually get \[\sum_{n=0}^{\infty}L^{\ast}(p(n))F(n) =C, \] where $C$ is a constant. From the proof of Lemma \ref{lem: summable}, one can see \eqref{eq:Lagrange identity} and \eqref{eq: summable} still hold for any rational function $p(n)\in\bK(n)$. When the order $J$ in \eqref{eq:rec homo} is minimum, the Abramov--van-Hoeij algorithm \cite{AbramovvanHoeij,KauersPaule2011} ensures that all rational functions $q(n)\in\bK(n)$ such that \eqref{eq: Abramov--van-Hoeij} holds are of the form $L^{\ast}(p(n))$ with $p(n)\in\bK(n)$. In this paper, to make the reduction work, $L^{\ast}(p(n))$ needs to be a polynomial. Abramov \cite{Abramov1989,Abramov1995} characterized the denominator of a rational solution $p(n)$ to a difference equation in the form \[ a_0(n)p(n)+a_1(n-1)p(n-1)+\cdots+a_J(n-J)p(n-J)=b(n), \] where $a_i(n)\in\bK[n]$, $0\leq i\leq J$, and $b(n)\in\bK[n]$. It may happen that $p(n)\in\bK(n)\setminus\bK[n]$ but $L^{\ast}(p(n))\in\bK[n]$. The following lemma shows that this rarely happens. \begin{lem}\label{lem:rational->polynomial} Let $L=\sum_{i=0}^{J}a_i(n)\sigma^i$ with $a_i(n)\in \bK[n],\ 0\leq i\leq J$ and $a_0(n)a_J(n)\neq 0$. If $p(n)\in\bK(n)$ and \begin{equation}\label{eq:coprime} \gcd (a_0(n),a_J(n+i))=1,\quad\forall i\in\bN, \end{equation} then $L^{\ast}(p(n))$ is a polynomial in $\bK[n]$ if and only if $p(n)\in\bK[n]$. \end{lem} \pf Suppose $p(n)=\frac{r(n)}{s(n)}$ with $r(n),s(n)\in\bK[n]$ and $\gcd(r(n),s(n))=1$. If $q(n)=L^{\ast}(p(n))$ is a polynomial in $\bK[n]$, then \begin{align*} q(n) =\sum_{i=0}^{J}a_i(n-i)p(n-i) =\sum_{i=0}^{J}a_i(n-i)\frac{r(n-i)}{s(n-i)}. \end{align*} Multiplying $s(n)s(n-1)\cdots s(n-J)$ on both ends, we obtain \begin{align*} q(n)\prod_{i=0}^{J}s(n-i) =\sum_{i=0}^{J}a_i(n-i)r(n-i)\prod_{\substack{0\leq j\leq J\\j\neq i}} s(n-j). \end{align*} Apparently, $s(n)$ is a divisor of the left hand side, then we must have \begin{align*} s(n)\mid a_0(n)r(n)s(n-1)\cdots s(n-J). \end{align*} From $\gcd(r(n),s(n))=1$ we know that \begin{equation}\label{eq: division1} s(n)\mid a_0(n)s(n-1)\cdots s(n-J). \end{equation} By a similar argument, \[s(n-J)\mid a_J(n-J)s(n)s(n-1)\cdots s(n-J+1),\] namely, \begin{equation}\label{eq: division2} s(n)\mid a_J(n)s(n+J)s(n+J-1)\cdots s(n+1). \end{equation} Suppose that $s(n)$ is not a constant and $t(n)$ is an irreducible factor of $s(n)$. Then by \eqref{eq: division1}, if $t(n)\nmid a_0(n)$, then $t(n)\mid s(n-j_1)$ for some $j_1>0$, that is, $t(n+j_1)\mid s(n)$. Again, by \eqref{eq: division1}, if $t(n+j_1)\nmid a_0(n)$, then $t(n+j_1+j_2)\mid s(n)$ for some $j_2>0$. Since $s(n)$ can not have infinitely many distinct factors, there must exist a $j\ge 0$ such that $t(n+j)\mid a_0(n)$. By a similar argument, \eqref{eq: division2} guarantees that there exists an $i\ge 0$ such that $t(n-i)\mid a_J(n)$. Then $\gcd(a_0(n),a_J(n+i+j))$ is not a constant, contradicting \eqref{eq:coprime}. So $s(n)$ must be a constant, namely, $p(n)$ is a polynomial. As the converse is clearly true, this completes the proof. \qed Combining Lemma \ref{lem: summable} and Lemma \ref{lem:rational->polynomial}, we have the following result. \begin{theo}\label{thm:rational to polynomial} Let $L=\sum\limits_{i=0}^{J}a_i(n)\sigma^i\in\ann F(n)$ with $a_0(n)a_J(n)\neq 0$ and $J>0$ the order of $F(n)$. If \begin{equation*} \gcd (a_0(n),a_J(n+i))=1,\quad\forall i\in\bN, \end{equation*} then $q(n)\in\bK[n]$ satisfies \eqref{eq: Abramov--van-Hoeij} if and only if $q(n)=L^{\ast}(p(n))$ for some $p(n)\in\bK[n]$. \end{theo} Now we assume that $p(n)\in\bK[n]$ and try to determine the degree of $L^{\ast}(p(n))$ when $L=\sum\limits_{i=0}^{J}a_i(n)\sigma^i$ is given. To this aim, some notations are needed. Let \begin{equation}\label{eq:d and bk} b_k(n)=\sum_{j=k}^{J}\binom{j}{k}a_{J-j}(n+j-J)\text{ and } d=\max_{0\leq k \leq J} \{\deg b_k(n)-k\}. \end{equation} Note that \[ f(s)=\sum_{k=0}^{J}[n^{d+k}](b_k(n))s^{\underline{k}} \] is a nonzero polynomial in $s$. Here $[n^{d+k}](b_k(n))$ denotes the coefficient of $n^{d+k}$ in $b_k(n)$ and $s^{\underline{k}}$ denotes the falling factorial defined by $s^{\underline{k}}=s(s-1)\cdots (s-k+1)$. Let \begin{equation}\label{eq:nonnegative roots} R_{L}=\{s\in\bN \mid f(s)=0\}. \end{equation} Then $L$ is called \emph{degenerated} if $R_{L}\neq \emptyset$. \begin{lem}\label{lem:degree} Let $L=\sum\limits_{i=0}^{J}a_i(n)\sigma^i$ and $d$ be given by \eqref{eq:d and bk}. Then for any nonzero polynomial $p(n)$, we have \[ \deg L^{\ast}(p(n))\left\{ \begin{array}{ll} < d+\deg p(n), & \hbox{if $L$ is degenerated and $\deg p(n)\in R_{L}$,} \\ =d+\deg p(n), & \hbox{otherwise.} \end{array} \right. \] \end{lem} \proof Let $q(n)=p(n-J)$. Notice that $\sigma=E+\Delta$, where $E$ is the identity map. Then \begin{align}\label{eq: L(p(n))} L^{\ast}(p(n)) & = \sum_{i=0}^{J}a_i(n-i)q(n+J-i) = \sum_{i=0}^{J}a_i(n-i)(E+\Delta)^{J-i}q(n)\nonumber \\ & = \sum_{j=0}^{J}a_{J-j}(n+j-J)(E+\Delta)^{j}q(n)\nonumber \\ &= \sum_{j=0}^{J}a_{J-j}(n+j-J)\sum_{k=0}^{j}\binom{j}{k}\Delta^k (q(n))\nonumber \\ &=\sum_{k=0}^{J}b_k(n)\Delta^k(q(n)), \end{align} where $b_k(n)$ is defined in \eqref{eq:d and bk}. Denote $s:=\deg q(n)=\deg p(n)$. Then $\Delta^k(q(n))$ is a polynomial of degree $s-k$ and $\lc(\Delta^k(q(n)))=\lc(q(n)) s^{\underline{k}}$ if $k\le s$, and $\Delta^k(q(n))=0$ if $k>s$. Here $\lc(f(n))$ denotes the leading coefficient of the polynomial $f(n)$. By equality \eqref{eq: L(p(n))}, we know \[ \deg L^{\ast}(p(n)) \leq \max_{0\leq k\leq J}\{\deg b_k(n)+\deg q(n)-k\}=d+s. \] Noting that $s^{\underline{k}}=0$ if $k>s$, it is easy to see that $\deg L^{\ast}(p(n)) <d+s$ if and only if $ \sum_{k=0}^{J}[n^{d+k}](b_k(n))s^{\underline{k}}=0, $ which means $L$ is degenerated and $s=\deg p(n)\in R_{L}$. \qed With this lemma, we are able to give a precise description of the polynomial reduction process for holonomic sequences. \emph{The polynomial reduction process}: Let $L=\sum\limits_{i=0}^{J}a_i(n)\sigma^i\in\bK[n][\sigma]$ with $a_J\neq 0$ and \begin{equation}\label{eq:p_s} q_s(n)=L^{\ast}(p_s(n))=\sum_{i=0}^{J}a_i(n-i)p_s(n-i), \end{equation} where $p_s(n)$ is a polynomial in $\bK[n]$ of degree $s\in\bN$. We first consider the case when $L$ is not degenerated. By Lemma \ref{lem:degree}, we know \[ \deg q_s(n)=d+s,\quad \forall s\in\bN, \] where $d$ is defined as \eqref{eq:d and bk}. Then for any polynomial $Q(n)$ of degree $m$ with $m\geq d$, it can be written by the division algorithm as \begin{equation}\label{eq:reduction step} Q(n)=\sum_{s=0}^{m-d}c_{s}q_{s}(n)+\tilde{q}(n), \end{equation} where $c_s\in\bK$ for $0\leq s\leq m-d$ and $\tilde{q}(n)$ is a polynomial of degree less than $d$. When $L$ is degenerated, by Lemma \ref{lem:degree}, \[ \deg q_s(n)=d+s,\quad \forall s\in \bN\setminus R_{L}. \] Then \eqref{eq:reduction step} works well except for the polynomials of degree $d+s$ for $s\in R_{L}$. Thus for any polynomial $Q(n)$ of degree $m$ with $m\geq d$, we can write it as \begin{equation}\label{eq:reduction step1} Q(n)= \sum_{\substack{0\leq s\leq m-d\\s\notin R_{L}}}c_{s}q_{s}(n) +\sum_{\substack{0\leq s\leq m-d\\s\in R_{L}}}c_{s}n^{d+s}+\tilde{q}(n), \end{equation} where $c_s\in\bK$ for $0\leq s\leq m-d$ and $\tilde{q}(n)$ is a polynomial with $\deg\tilde{q}(n)<d$. Equality \eqref{eq:reduction step} (or \eqref{eq:reduction step1}) is called the \emph{polynomial reduction} with respect to $L$ when it is not degenerated (or degenerated). \section{Applications} In this section, we will take Domb numbers, Franel numbers and Delannoy numbers as examples to illustrate how to generate new $\pi$-series and congruence identities algorithmically by the polynomial reduction. \subsection{\texorpdfstring{Generating new $\pi$-series}{Generating new π-series}} The \emph{Domb numbers} are given by \[ \Domb(n)=\sum_{k=0}^n \binom{n}{k}^2\binom{2k}{k}\binom{2(n-k)}{n-k}. \] Chan, Chan and Liu \cite{CCL2004} and Rogers \cite{Rogers2009} derived \begin{equation}\label{eq:Domb -32} \sum_{n=0}^{\infty}(5n+1)\frac{\Domb(n)}{64^n}= \frac{8\sqrt{3}}{3 \pi} \text{ and } \sum_{n=0}^{\infty}(3n+1)\frac{\Domb(n)}{(-32)^n}= \frac{2}{\pi}. \end{equation} The following identity was conjectured by Z.-W. Sun \cite{Sun2021}. We will take it as an example to show how to use the polynomial reduction method to prove new identities from the old. \begin{theo} \begin{equation}\label{eq:-32 and k^2} \sum_{n=0}^{\infty}n^2(n-1)(9n+1)\frac{\Domb(n)}{(-32)^n}= \frac{4}{3\pi}. \end{equation} \end{theo} \pf Let $F(n)=\Domb(n)/(-32)^n$. By Zeilberger's algorithm, we find that $ L=\sum_{i=0}^{2} a_i(n) \sigma^i\in\ann F(n), $ with $a_0(n)=(n+1)^3,a_1(n)=(2n+3) (5 n^2+15n+12)$ and $a_2(n)= 16 (n+2)^3$. Then it is easy to see that \[d=3\text{ and }R_L=\emptyset.\] So $L$ is nondegenerated. By Lemma \ref{lem:degree}, $\deg L^{\ast}(p_s(n))=s+3$ for any polynomial $p_s(n)\in\bK[n]$ of degree $s\ge 0$. Substituting $a_0(n),a_1(n),a_2(n)$ and $F(n)$ into equality \eqref{eq:rec ann} and taking $n\to \infty$ leads to \begin{equation*}\label{eq:final equ} \sum_{n=0}^{\infty} L^{\ast}(p_s(n))F(n)=0. \end{equation*} for any polynomial $p_s(n)$. The polynomial reduction shows that \[ n^2(n-1)(9n+1)=\frac{2}{3}(3n+1)+\frac{1}{3}L^{\ast}(n). \] Multiplying by $\frac{\Domb(n)}{(-32)^n}$ on both sides of the above identity and then summing over $n$ from $0$ to $\infty$, we derive \[ \sum_{n=0}^{\infty}n^2(n-1)(9n+1)\frac{\Domb(n)}{(-32)^n}= \frac{2}{3}\sum_{n=0}^{\infty}(3n+1)\frac{\Domb(n)}{(-32)^n} =\frac{4}{3\pi} \] with the help of identity \eqref{eq:Domb -32}. This completes the proof of \eqref{eq:-32 and k^2}. \qed The above theorem shows how to algorithmically prove a conjectured identity from a known one. The method can actually confirm all those identities listed in Conjecture 8.2 of \cite{Sun2021} by Z.-W. Sun. However, to pose these conjectures needs genuine intuition, insight, experience and hard work. For the rest part of this paper, we are going to give an algorithmic way of generating new identities from the old, which does not depend on intuition or experience at all. For illustration, we take the following $\pi$-series involving Domb numbers as a starting point: \begin{equation}\label{eq:Domb} \sum_{n=0}^{\infty}(un+v)\frac{\Domb(n)}{m^n}= \frac{\lambda}{\pi}. \end{equation} \begin{theo}\label{th: Domb general identities} Suppose an identity of the form \eqref{eq:Domb} holds for some nonzero $m\in\bZ$ and $u,v\in\bQ$. Then for any nonconstant polynomial $P(n)\in\bQ[n]$, one can find a nonzero polynomial $Q(n)\in\bQ[n]$ with $\deg Q(n)\leq 2$ and a constant $c\in\bQ$ such that \begin{equation}\label{eq:Domb -32 generalized} \sum_{n=0}^{\infty}P(n) Q(n)\frac{\Domb(n)}{m^n}=\frac{c\lambda}{\pi}. \end{equation} \end{theo} \pf Let $F(n)=\Domb(n)/m^n$. By Zeilberger's algorithm, we find that $ L=\sum_{i=0}^{2} a_i(n) \sigma^i\in\ann F(n), $ with $a_0(n)=64(n+1)^3,a_1(n)=-2m(2n+3)(5 n^2+15n+12)$ and $a_2=m^2(n+2)^3$. Let $d, R_L$ be defined by \eqref{eq:d and bk} and \eqref{eq:nonnegative roots}. Then it is easy to see that , \[d=3\text{ and }R_L=\emptyset \text{ for } m\notin \{4,16\}\] while \[d=2\text{ and }R_L=\emptyset \text{ for } m\in \{4,16\}.\] So $L$ is nondegenerated. Then by Lemma \ref{lem:degree}, $\deg L^{\ast}(p_s(n))=s+d$ for any polynomial $p_s(n)\in\bK[n]$ of degree $s\ge 0$. Substituting the above $a_0(n),a_1(n),a_2(n)$ and $F(n)$ into equality \eqref{eq:rec ann} and taking $n\to \infty$ leads to \begin{equation*} \sum_{n=0}^{\infty} L^{\ast}(p_s(n))F(n) =0 \end{equation*} for any polynomial $p_s(n)$ of degree $s$. In the following, we take $p_s(n)=n^s$. For any non-constant polynomial $P(n)$, let $\ell=\deg P(n)>0$. Suppose $ Q(n)=e_0+e_1n+e_2n^2 $ with indeterminants $e_i,\ i=0,1,2$. Now solve the equation \begin{equation}\label{eq:general equ} P(n)Q(n)=c(un+v)+c_0 L^{\ast}(n^0)+c_1 L^{\ast}(n^1)\cdots +c_{\ell} L^{\ast}(n^{\ell}) \end{equation} for indeterminats $e_0,e_1,e_2,c,c_0,\ldots,c_{\ell}$ in $\bQ$. By comparing the coefficients of $n^k$ on both sides for $k=0,1,\ldots,\ell+3$, we get a system of $\ell+4$ homogeneous linear equations in $\ell+5$ indeterminants, so there must be nonzero solutions for $e_0,e_1,e_2,c,c_0,\ldots,c_{\ell}$. Notice that $\deg L^{\ast}(n^s)=s+d$. Therefore, $e_0,e_1,e_2$ can not be all zero, that is, there is a nonzero polynomial $Q(n)\in\bQ[n]$ with $\deg Q(n)\le 2$ such that \eqref{eq:general equ} holds. Multiplying by $F(n)$ on both sides of \eqref{eq:general equ} and then summing over $n$ from $0$ to $\infty$, we obtain \[ \sum_{n=0}^{\infty} P(n)Q(n)\frac{\Domb(n)}{m^n} = c\sum_{n=0}^{\infty} (un+v)\frac{\Domb(n)}{m^n} =\frac{c\lambda}{\pi}. \] \qed Using the method in the proof of Theorem \ref{th: Domb general identities}, one can not only confirm Z.-W. Sun's Conjecture 8.2 in \cite{Sun2021}, but also generate as many new ones as you like. As an example, we now revive the discovery of \eqref{eq:-32 and k^2} with our method. Take $m=-32$, one can see $L=\sum_{i=0}^{2} a_i(n) \sigma^i\in\ann \frac{\Domb(n)}{(-32)^n}$, where $a_0(n)=(n+1)^3,a_1(n)=(2n+3) (5 n^2+15n+12)$ and $a_2(n)= 16 (n+2)^3$. Let $P(n)=n^2$ and solve \begin{equation}\label{eq:-32 ex} P(n)(e_0+e_1n+e_2n^2)=c(3n+1)+c_0 L^{\ast}(n^0)+c_1 L^{\ast}(n^1). \end{equation} We find for any $c\in\bQ,$ \[ c_0=0, c_1=c/2, e_0= -3 c/2, e_1= -12 c, e_2=27 c/2 \] is a solution of \eqref{eq:-32 ex}. Taking $c=2/3$, we arrive at \[ n^2(n-1)(9n+1)=\frac{2}{3}(3n+1)+\frac{1}{3}L^{\ast}(n^1). \] Multiplying by $\frac{\Domb(n)}{(-32)^n}$ on both sides of the above identity and then summing over $n$ from $0$ to $\infty$, we derive \eqref{eq:-32 and k^2}. Different choices of $P(n)$ may lead to different identities, for example, we obtain \[ \sum_{n=0}^{\infty}(n^2+n+1)(126n^2+41n+5)\frac{\Domb(n)}{(-32)^n}= -\frac{100}{3\pi} \] by taking $P(n)=n^2+n+1$. From the discussion above, one can see how the polynomial reduction may be applied to holonomic sequences. Here are more examples. The \emph{Franel numbers} and \emph{Franel numbers of order $4$} are defined respectively by \begin{equation*}\label{eq:franel} f_n=\sum_{k=0}^{n} \binom{n}{k}^3 \text{and } f_n^{(4)}=\sum_{k=0}^{n} \binom{n}{k}^4. \end{equation*} Many series for $\pi$ involving $f_n$ and $f_n^{(4)}$ are obtained via modular forms in \cite{Cooper2012, CTYZ2011}. Those series are of the form \begin{equation}\label{eq:Franel} \sum_{n=0}^{\infty} (un+v)\frac{A(n)}{m^n}=\frac{\lambda\sqrt{\alpha}}{\pi}, \end{equation} where $A(n)=f_n^{(4)}$ or $A(n)=\binom{2n}{n}f_n$, $u,v,m\in\bZ$, $\lambda\in\bQ$ with $\lambda um\neq 0$ and $\alpha$ is a positive integer. One can check that $A(n)$ satisfies a recurrence relation of order $2$. Similar to the proof of Theorem \ref{th: Domb general identities}, we obtain the following result. \begin{theo}\label{th: franel general identities} Suppose an identity of the form \eqref{eq:Franel} holds for some $u,v,m\in\bZ$ with $m\neq 0$. Then for any nonconstant polynomial $P(n)\in\bQ[n]$, one can find a nonzero polynomial $Q(n)\in\bQ[n]$ with $\deg Q(n)\leq 2$ and a constant $c\in\bQ$ such that \begin{equation}\label{eq:franel generalized} \sum_{n=0}^{\infty}P(n) Q(n)\frac{\text{A}(n)}{m^n}=\frac{c\lambda\sqrt{\alpha}}{\pi}. \end{equation} \end{theo} \begin{exm} We can prove the following conjectural identity by Z.-W. Sun\cite[Conjecture 8.3]{Sun2021} \begin{align}\label{eq:fk5776} \sum_{n=1}^{\infty} &n^3(47808294003072n^2-102482715691400n +52422407372915)\frac{f_n^{(4)}}{5776^n}\nonumber\\ &=-\frac{122626206796\sqrt{95}}{625\pi} \end{align} utilizing the polynomial reduction and the identity \[ \sum_{n=1}^{\infty} (408n+47)\frac{f_n^{(4)}}{5776^n} =\frac{1444\sqrt{95}}{95\pi} \] derived by Cooper~\cite{Cooper2012}. Taking $m=5776$, one can see $L=\sum_{i=0}^{2} a_i(n) \sigma^i\in\ann\frac{f_n^{(4)}}{5776^n}$, where $a_0(n)=-(n+1)(4n+3)(4n+5), a_1(n)=-2888(2n+3)(3n^2+9n+7)$ and $a_2(n)= 8340544 (n+2)^3$. Let $P(n)=n^3$ and solve \begin{equation}\label{eq:5776 ex} P(n)(e_0+e_1n+e_2n^2)=c(3n+1)+c_0 L^{\ast}(n^0)+c_1 L^{\ast}(n^1)+c_2 L^{\ast}(n^2). \end{equation} For any $c\in\bQ$, let $d=c/1613502721$. One can check that $c_0=-590794567 d,$ $ c_1=-1338119121 d,$ $c_2=-717997495 d,$ $e_0= -6552800921614375 d,$ $e_1= 12810339461425000 d,$ $e_2=-5976036750384000 d$ is a solution of \eqref{eq:5776 ex}. Take $c=-1613502721/125$. Multiplying by $\frac{f_n^{(4)}}{5776^n}$ on both sides of \eqref{eq:5776 ex} and then summing over $n$ from $0$ to $\infty$, we derive \eqref{eq:fk5776}. \end{exm} Similarly, we can confirm all those identities in Conjecture 8.3 and Conjecture 8.4 (\romannumeral1) of Z.-W. Sun's paper \cite{Sun2021}. \subsection{Generating new congruence identities} In this subsection, we will show the polynomial reduction method can also be applied to prove and discover new families of congruence identities. We will take the Franel numbers $f_k$ and the central Delannoy numbers $D_k$ as examples to reveal the process. In 2013, Z.-W. Sun \cite{Sun2013} initiated the systematic investigation of fundamental congruences of Franel numbers. Many interesting congruences are obtained, for example, for any prime $p>3$ there hold \begin{align} &\sum_{k=0}^{p-1}(-1)^k f_k \equiv \left(\frac{p}{3}\right)\pmod {p^2}, \label{eq:franel 0}\\ &\sum_{k=0}^{p-1}k(-1)^k f_k \equiv -\frac{2}{3}\left(\frac{p}{3}\right)\pmod {p^2} , \label{eq:franel 1}\\ &\sum_{k=0}^{p-1}k^2(-1)^k f_k \equiv \frac{10}{27}\left(\frac{p}{3}\right)\pmod {p^2}. \label{eq:franel 2} \end{align} Here $\left(\dfrac{a}{p}\right)$ denotes the Legendre symbol. Later V.J.W. Guo \cite{Guo2013} confirmed the following two conjectures by Z.-W. Sun \cite{Sun2013}, \begin{align} & \sum_{k=0}^{n-1}(3k+2)(-1)^k f_k\equiv 0 \pmod{2n^2},\label{eq:franel 3k+2}\\ & \sum_{k=0}^{p-1}(3k+2)(-1)^k f_k\equiv 2p^2(2^p-1)^2 \pmod{p^5}.\label{eq:franel 3k+2 mod p} \end{align} Recently, Wang and Sun \cite{WangSun2019} derived more divisibility results on Franel numbers like \begin{align} &9\sum_{k=1}^{n}k^2(3k+1)(-1)^k f_k \equiv 0\pmod {2n^2(n+1)^2}, \label{eq:franel 3-1}\\ &3\sum_{k=1}^{n}(9k^3-15k^2-10k)(-1)^k f_k \equiv 0\pmod {4n(n+1)^2} \label{eq:franel 3-2}. \end{align} In 2021, by telescopings of $P$-recursive sequences, Hou and Liu \cite{HouLiu2021} found \begin{equation}\label{eq:franel 3k+2 D} 3\sum_{k=0}^{n-1}(3k+2)(-1)^k f_k= n^2((-1)^n f_n+8(-1)^{n-1} f_{n-1}), \end{equation} which reproves \eqref{eq:franel 3k+2} since \begin{align} & (-1)^nf_n\equiv \sum_{k=0}^{n}\binom{n}{k} \equiv 2^n \equiv 0\pmod 2, n\geq 1, \label{eq:franel mod2}\\ & (-1)^nf_n\equiv (-1)^n\sum_{k=0}^{n}\binom{n}{k} \equiv (-2)^n \equiv 1\pmod 3.\nonumber \end{align} When $n=p>3$ is a prime in \eqref{eq:franel 3k+2 D}, direct calculations lead to equality \eqref{eq:franel 3k+2 mod p} since $f_p\equiv 2 \pmod {p^3}$ and \[ f_{p-1}\equiv 1+3(2^{p-1}-1)+3(2^{p-1}-1)^2 \pmod {p^3}, \] as proved by Z.-W. Sun \cite{Sun2013}. Next, we will generalize \eqref{eq:franel 3k+2 D} to having a polynomial part of any claimed degree $d>0$ instead of $3(3k+2)$ in the summation. Then one can see all congruence identities in \eqref{eq:franel 1}--\eqref{eq:franel 3-2} can be proved uniformly by the polynomial reduction method. Let $F(k)=(-1)^kf_k=(-1)^k\sum_{i=0}^{k}\binom{k}{i}^3$. By Zeilberger's algorithm, we find that \begin{equation}\label{eq:L Franel} L=(k+2)^2\sigma^2+(7k^2+21k+16)\sigma-8(k+1)^2\in \ann F(k). \end{equation} \begin{theo}\label{th:Franel cong} Let $L$ be as in \eqref{eq:L Franel} and $n$ a positive integer. Then \begin{equation}\label{eq: Franel D} \sum_{k=0}^{n-1} L^{\ast}(p(k))(-1)^kf_k =-n^2(p(n-2)F(n)+8p(n-1)F(n-1)). \end{equation} for any polynomial $p(k)\in\bZ[k]$. Here $L^{\ast}$ is the adjoint of $L$. \end{theo} \pf By Equality \eqref{eq:rec ann} and the fact $u_0(0)F(0)+u_1(0)F(1)=0$, we have \begin{equation}\label{eq:Franel Delta} \sum_{k=0}^{n-1}L^{\ast}(p(k)) F(k) = -\left(u_0(n)F(n)+u_1(n)F(n+1)\right), \end{equation} where $u_0(n)=n^2p(n-2)+(7n^2+7n+2)p(n-1)$ and $u_1(n)=(n+1)^2p(n-1)$. As $L\in\ann F(k)$, it is straightforward to check that for any $n\geq 1$ \begin{equation}\label{eq:shift Franel} (n+1)^2F(n+1)=8n^2F(n-1)-(7n^2+7n+2)F(n). \end{equation} Substituting \eqref{eq:shift Franel} into \eqref{eq:Franel Delta} derives \eqref{eq: Franel D}. \qed Theorem \ref{th:Franel cong} together with \eqref{eq:franel mod2} lead to the following corollary. \begin{coro}\label{coro:Franel cong} Let $L$ be as in \eqref{eq:L Franel}. Then \begin{equation}\label{eq:Franel cong} \sum_{k=0}^{n-1} L^{\ast}(p(k))(-1)^kf_k \equiv 0 \pmod {2n^2} \end{equation} for any polynomial $p(k)\in\bZ[k]$. \end{coro} Since $3(3k+2)=-L^{\ast}(1)$, by \eqref{eq: Franel D} we have \begin{equation*} 3\sum_{k=0}^{n-1}(3k+2)(-1)^k f_k= -\sum_{k=0}^{n-1}L^{\ast}(1)(-1)^k f_k= n^2(F(n)+8F(n-1)), \end{equation*} which is exactly \eqref{eq:franel 3k+2 D}. Equalities \eqref{eq:franel 3-1} and \eqref{eq:franel 3-2} can be proved by \eqref{eq: Franel D} and the observation that $27k^2(3k+1)=-L^{\ast}(1)-3L^{\ast}(k^2)$ and $9(9k^3-15k^2-10k)=-4L^{\ast}(1)+9L^{\ast}(k)-3L^{\ast}(k^2). $ By \eqref{eq:franel 0} and the decompositions \[ k=-\frac{2}{3}-\frac{1}{9}L^{\ast}(1) \quad\text{and}\quad k^2=\frac{10}{27}+\frac{13}{162}L^{\ast}(1)-\frac{1}{18}L^{\ast}(k), \] \eqref{eq:franel 1} and \eqref{eq:franel 2} can also be confirmed. In fact, when $p(k)\in\bZ[k]$ is a polynomial of degree $s\in\bN$, since $L$ in \eqref{eq:L Franel} is not degenerated, we know $\deg L^{\ast}(p(k))=s+1$. \begin{coro} For any positive integer $d$, we can find a polynomial $q(k)\in\bZ[k]$ with $\deg q(k)=d$ such that \[ \sum_{k=0}^{n-1} q(k)(-1)^kf_k \equiv 0 \pmod {2n^2}. \] \end{coro} The polynomial reduction method also applies to other holonomic sequences. Let us exhibit with one more example. The central Delannoy numbers $D_k$ are defined by \[ D_k=\sum_{i=0}^{k}\binom{k}{i}\binom{k+i}{i}. \] Zeilberger's algorithm leads to \begin{equation}\label{eq:L Delannoy} L=(k+2)\sigma^2+(-6k-9)\sigma+(k+1)\in \ann D_k. \end{equation} Then by a similar argument to the proof of Theorem \ref{th:Franel cong}, we have \begin{theo}\label{th:Delannoy cong} Let $L$ be as in \eqref{eq:L Delannoy}. Then \begin{equation}\label{eq:Delannoy Delta} \sum_{k=0}^{n-1} L^{\ast}(p(k))D_k =n(p(n-1)D_{n-1}-p(n-2)D_n). \end{equation} for any polynomial $p(k)\in\bZ[k]$. \end{theo} When $p(k)=1$, equality \eqref{eq:Delannoy Delta} becomes \[ \sum_{k=0}^{n-1} (4k+2)D_k =n(D_{n}-D_{n-1}), \] which was first observed by C. Wang (private communication). \begin{coro}\label{coro:Delannoy cong} Let $L$ be as in \eqref{eq:L Delannoy}. Then for any polynomial $p(k)\in\bZ[k]$, we have \begin{equation}\label{eq:Delannoy cong} \sum_{k=0}^{n-1} L^{\ast}(p(k))D_k \equiv 0 \pmod {n}. \end{equation} \end{coro} \noindent \textbf{Acknowledgments.} This work was supported by the National Natural Science Foundation of China (No. 12101449, 11701419, 11871067). \begin{thebibliography}{10} \bibitem{Abramov1975} S.A. Abramov. \newblock The rational component of the solution of a first order linear recurrence relation with rational right hand side. {\em U.S.S.R. Comput. Math. Math. Phys.}, 15(1975), 216--221. \bibitem{Abramov1989} S.A. Abramov. \newblock Rational solutions of linear differential and difference equations with polynomial coefficients. {\em U.S.S.R. Comput. Math. Math. Phys.}, 29(1989), 7--12. \bibitem{Abramov1995} S.A. Abramov. \newblock Rational solutions of linear difference and $q$-difference equations with polynomial coefficients. \newblock In {\em ISSAC '95}, pages 285--289, 1995. ACM. \bibitem{AbramovvanHoeij} S.A. Abramov and M. van Hoeij. \newblock Integration of solutions of linear functional equations. {\em Integral Transforms Spec. Funct.}, 8(1999), 3--12. \bibitem{AP2001} S.A. Abramov and M. Petkov{\v{s}}ek. \newblock Minimal decomposition of indefinite hypergeometric sums. \newblock In {\em ISSAC '01}, pages 7--14, 2001. ACM. \bibitem{AP2002} S.A. Abramov and M. Petkov{\v{s}}ek. \newblock Rational normal forms and minimal decompositions of hypergeometric terms. \newblock {\em J. Symbolic Compt.}, 33(2002), 521--543. \bibitem{BCCL2010} A. Bostan, S. Chen, F. Chyzak and Z. Li. \newblock Complexity of creative telescoping for bivariate rational functions. \newblock In {\em ISSAC '10}, pages 203--210, 2010. ACM. \bibitem{BCLS2018} A. Bostan, F. Chyzak, P. Lairez and B. Salvy. \newblock Generalized Hermite reduction, creative telescoping and definite integration of D-finite functions. \newblock In {\em ISSAC '18}, pages 95--102, 2018. ACM. \bibitem{BLS2013} A. Bostan, P. Lairez and B. Salvy. \newblock Creative telescoping for rational functions using the Griffiths--Dwork method. \newblock In {\em ISSAC '13}, pages 93--100, 2013. ACM. \bibitem{CCL2004} H.H. Chan, S.H. Chan and Z. Liu. \newblock Domb's numbers and Ramanujan--Sato type series for $1/\pi$. \newblock {\em Adv. Math.}, 186(2004), 396--410. \bibitem{CTYZ2011} H.H. Chan, Y. Tanigawa, Y. Yang and W. Zudilin. \newblock New analogues of Clausen's identities arising from the theory of modular forms. \newblock {\em Adv. Math.}, 228(2011), 1294--1314. \bibitem{CCFFL2015} S. Chen, F. Chyzak, R. Feng, G. Fu and Z. Li. \newblock On the existence of telescopers for mixed hypergeometric terms. \newblock {\em J. Symbolic Comput.}, 68(2015), 1--26. \bibitem{CDZ2019} S. Chen, L. Du and C. Zhu. \newblock Existence problem of telescopers for rational functions in three variables: the mixed cases. \newblock In {\em ISSAC '19}, pages 82--89, 2019. ACM. \bibitem{CHKK2018} S. Chen, M. van Hoeij and M. Kauers. \newblock Reduction-based creative telescoping for fuchsian D-finite functions. \newblock {\em J. Symbolic Comput.}, 85(2018), 108--127. \bibitem{CHHLW} S. Chen, Q.-H. Hou, H. Huang, G. Labahn and R.-H. Wang. \newblock Constructing minimal telescopers for rational functions in three discrete variables. \newblock {\em Adv. in Appl. Math.}, to appear. \bibitem{CHLW2016} S. Chen, Q.-H. Hou, G. Labahn and R.-H. Wang. \newblock Existence problem of telescopers: beyond the bivariate case. \newblock In {\em ISSAC '16}, pages 167--174, 2016. ACM. \bibitem{CHKL2015} S. Chen, H. Huang, M. Kauers and Z. Li. \newblock A modified Abramov-Petkov\v{s}ek reduction and creative telescoping for hypergeometric terms. \newblock In {\em ISSAC '15}, pages 117--124, 2015. ACM. \bibitem{CKK2016} S. Chen, M. Kauers and C. Koutschan. \newblock Reduction-based creative telescoping for algebraic functions. \newblock In {\em ISSAC '16}, pages 175--182, 2016. ACM. \bibitem{Cooper2012} S. Cooper. \newblock Level $10$ analogues of Ramanujan's series for $1/\pi$. \newblock {\em J. Ramanujan Math. Soc.}, 27(2012), 59--76. \bibitem{Guo2013} V.J.W. Guo. \newblock Proof of two conjectures of Sun on congruences for Franel numbers. \newblock {\em Integral Transforms Spec. Funct.}, 24(2013), 532--539. \bibitem{Hermite1872} C. Hermite. \newblock Sur l’intégration des fractions rationnelles. \newblock {\em Ann. Sci. École Norm. Sup. (2)}, 1(1872), 215--218. \bibitem{Hoeven2018} J. van der Hoeven. \newblock Creative telescoping using reductions. \newblock {\em Preprint:hal-01773137v2}, June 2018. \bibitem{Hoeven2017} J. van der Hoeven. \newblock Constructing reductions for creative telescoping. \newblock {\em Appl. Algebra Engrg. Comm. Comput.}, 32(2021), 575--602. \bibitem{HouLi2021} Q.-H. Hou and G.-J. Li. \newblock Gosper summability of rational multiples of hypergeometric terms. \newblock {\em J. Difference Equ. Appl.}, 27(2021), 1723--1733. \bibitem{HouLiu2021} Q.-H. Hou and K. Liu. \newblock Congurences and telescopings of $P$-recursive sequences. \newblock {\em J. Difference Equ. Appl.}, 27(2021), 686--697. \bibitem{HouMuZeil2021} Q.-H. Hou, Y.-P. Mu and D. Zeilberger. \newblock Polynomial reduction and supercongruences. \newblock {\em J. Symbolic Comput.}, 103(2021), 127--140. \bibitem{Huang2016} H. Huang. \newblock New bounds for creative telescoping. \newblock In {\em ISSAC '16}, pages 279–286, 2016. ACM. \bibitem{KauersPaule2011} M. Kauers and P. Paule. \newblock {\em The Concrete Tetrahedron}. \newblock Springer Wien, 2011. \bibitem{Lairez2016} P. Lairez. \newblock Computing periods of rational integrals. \newblock {\em Math. Comp.}, 85(2016), 1719--1752. \bibitem{O1845} M. Ostrogradsky. \newblock De l'integration des fractions rationelles. \newblock {\em Bull. de la Classe Physico--Mathématique de l’Acad. Impériale des Sciences de St.-Pétersbourg}, 4(1845), 145--167, 286--300. \bibitem{Rogers2009} M.D. Rogers. \newblock New $_5F_4$ hypergeometric transformations, three-variable Mahler measures, and formulas for $1/\pi$. \newblock {\em Ramanujan J.}, 18(2009), 327--340. \bibitem{Sun2013} Z.-W. Sun. \newblock Congruences for Franel numbers. \newblock {\em Adv. in Appl. Math.}, 51(2013), 524--535. \bibitem{Sun2021} Z.-W. Sun. \newblock New type series for powers of $\pi$. \newblock arXiv:2110.03651. \bibitem{Trager1984} B.M. Trager. \newblock Integration of Algebraic Functions. \newblock {\em Ph.D. thesis, MIT}, 1984. \bibitem{WangSun2019} C. Wang and Z.-W. Sun. \newblock Divisility results on Franel numbers and related polynomials. \newblock {\em Int. J. Number Theory}, 15(2019), 433--444. \bibitem{WZ2022} R.-H. Wang and M.X.X. Zhong. \newblock $q$-Rational reduction and $q$-analogues of series for $\pi$. \newblock arXiv:2203.16047. \bibitem{WZ1990} H.S. Wilf and D. Zeilberger. \newblock An algorithmic proof theory for hypergeometric (ordinary and ``$q$'') multisum/integral identities. \newblock {\em Invent. Math.}, 108(1992), 575--633. \bibitem{Zeilberger1990c} D. Zeilberger. \newblock A fast algorithm for proving terminating hypergeometric identities. \newblock {\em Discrete Math.}, 80(1990), 207--211. \bibitem{Zeil1990} D. Zeilberger. \newblock A holonomic systems approach to special function identities. \newblock {\em J. Comput. Appl. Math.}, 32(1990), 321–368. \bibitem{Zeilberger1991} D. Zeilberger. \newblock The method of creative telescoping. \newblock {\em J. Symbolic Comput.}, 11(1991), 195--204. \end{thebibliography} \end{document}
2205.11120v1
http://arxiv.org/abs/2205.11120v1
On the triple point number of surface-links in Yoshikawa's table
\documentclass[11pt,oneside]{amsart} \usepackage[margin=1in]{geometry} \usepackage{amssymb, amsmath} \usepackage{float} \usepackage{pdfpages} \usepackage{epstopdf} \usepackage{comment} \usepackage{booktabs} \usepackage{graphicx} \usepackage{color} \usepackage{pinlabel} \usepackage{mathtools,xparse} \usepackage{tikz} \usetikzlibrary{arrows,positioning,shapes,fit,calc} \usepackage{enumerate} \usepackage[colorlinks = true, linkcolor = red, urlcolor = red, citecolor = red, anchorcolor = red]{hyperref} \usepackage{amsrefs} \usepackage[pagewise]{lineno} \usepackage{pinlabel} \theoremstyle{plain} \newtheorem{theorem}{Theorem}[section] \newtheorem{case}{Case} \newtheorem*{theorem*}{Theorem} \newtheorem*{maintheorem-intro}{Theorem} \newtheorem*{maintheorem-intro-2}{Theorem~\ref{Bridge number and genus}} \newtheorem*{theorem-cablingconj}{Theorem~\ref{apps1} (1)} \newtheorem*{theorem-toroidal}{Specialization of Theorem~\ref{apps1} (2)} \newtheorem*{theorem-lens}{Theorem~\ref{Bounding distance - special}(1)} \newtheorem*{theorem-SFS}{Theorem~\ref{Bounding distance - special}(2)} \newtheorem*{theorem-cosmetic}{Theorem} \newtheorem*{theorem-bridge}{Specialization of Corollary~\ref{Cor: exceptional bridge}} \newtheorem*{theorem-Heeggenus}{Corollary~\ref{Cor: exceptional bridge} (2)} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{lemma}[theorem]{Lemma} \theoremstyle{definition} \newtheorem{remark}[theorem]{Remark} \newtheorem{definition}[theorem]{Definition} \newtheorem{claim}{Claim} \newtheorem*{assump}{Assumption} \newtheorem*{question}{Question} \newtheorem*{example}{Example} \newtheorem*{conjecture}{Conjecture} \theoremstyle{definition} \newcommand{\R}{{\mathbb R}} \newcommand{\C}{\mathbb C} \newcommand{\Z}{\mathbb Z} \newcommand{\N}{\mathbb N} \newcommand{\Q}{\mathbb Q} \newcommand{\lat}{\mathbb{L}^3} \newcommand{\Kappa}{\mathcal K} \newcommand{\M}[1]{\textbf{M}$_{#1}$} \newcommand{\U}{\mathcal{U}} \newcommand{\V}{\Upsilon} \newcommand{\hrn}{\mathcal{H}(\mathbb{R}^n)} \newcommand{\RP}{\mathbb{RP}} \newcommand{\bi}{\begin{itemize}} \newcommand{\ei}{\end{itemize}} \newcommand{\be}{\begin{enumerate}} \newcommand{\ee}{\end{enumerate}} \newcommand{\Rn}{\mathbb{R}^n} \newcommand{\n}{\beta} \newcommand{\B}{\mathfrak{B}} \newcommand{\G}{\mathcal{G}} \newcommand{\emp}{\emptyset} \newcommand{\X}{\times} \newcommand{\W}{\omega} \newcommand{\xn}{\{x_n\}} \newcommand{\xnk}{\{x_{n_k}\}} \newcommand{\eps}{\epsilon} \newcommand{\F}{\mathscr{F}} \newcommand{\st}{\text{ $|$ }} \newcommand{\la}{\langle} \newcommand{\ra}{\rangle} \newcommand{\simd}{\bigtriangleup} \newcommand{\A}{\alpha} \newcommand{\Img}{\text{Im }} \newcommand{\Ker}{\text{Ker }} \newcommand{\Mm}{\mathfrak{M}} \newcommand{\Tr}{\text{Tr}} \newcommand{\Wa}{\text{waist}} \newcommand{\Tau}{\mathcal{T}} \newcommand{\Sau}{\mathcal{S}} \newcommand{\FF}{\mathcal{F}} \newcommand{\cut}{\, | \, } \newcommand{\Pa}{\mathcal{P}} \newcommand{\cci}{\overline{\chi}} \newcommand{\spa}{\text{span}} \newcommand{\pd}{\partial} \newcommand{\ins}{\epsilon} \newcommand{\up}{\uparrow} \newcommand{\dn}{\downarrow} \newcommand{\nil}{\varnothing} \newcommand{\tild}{\widetilde} \newcommand{\wihat}{\widehat} \newcommand{\defn}[1]{\emph{#1}} \newcommand{\bdd}{\partial} \newcommand{\boundary}{\partial} \newcommand{\iso}{\cong} \newcommand{\homeo}{\approx} \newcommand{\mc}[1]{\mathcal{#1}} \newcommand{\ob}[1]{\overline{#1}} \newcommand{\genus}{\operatorname{genus}} \newcommand{\interior}{\operatorname{int}} \newcommand{\inter}[1]{\mathring{#1}} \newcommand{\slide}[1]{{\curvearrowright \atop #1}} \newcommand{\too}{\mathbf{\rightharpoonup}} \newcommand{\goodCurves}{\mathbb{P}_\mc{C}} \newcommand{\goodArcs}{\mathbb{P}_{\mc{AC}}} \newcommand{\co}{\mskip0.5mu\colon\thinspace} \def\hyph{-\penalty0\hskip0pt\relax} \newcommand{\marion}[1]{$\stackrel{\spadesuit}{\textcolor{blue}{\text{M}}}$ {\textcolor{blue}{#1}}} \newcommand{\nicholas}[1]{\textcolor{red}{N: #1}} \usepackage[abs]{overpic} \begin{document} \title[On the Triple Point Number of Surface-Links in Yoshikawa's Table]{On the Triple Point Number of Surface-Links in Yoshikawa's Table} \author{Nicholas Cazet} \begin{abstract} Yoshikawa made a table of knotted surfaces in $\mathbb{R}^4$ with ch-index 10 or less. This remarkable table is the first to enumerate knotted surfaces analogous to the classical prime knot table. A broken sheet diagram of a surface-link is a generic projection of the surface in $\mathbb{R}^3$ with crossing information along its singular set. The minimal number of triple points among all broken sheet diagrams representing a given surface-knot is its triple point number. This paper compiles the known triple point numbers of the surface-links represented in Yoshikawa's table and calculates or provides bounds on the triple point number of the remaining surface-links. \end{abstract} \maketitle \section{Introduction} A {\it surface-link} is a smoothly embedded closed surface in $\mathbb{R}^4$. Two surface-links are {\it equivalent} if they are related by a smooth ambient isotopy. A surface-link diffeomorphic to a 2-sphere is called a {\it 2-knot}, and a surface-link with only 2-sphere components each bounding a 3-ball disjoint from the other is called a {\it trivial 2-link}. A surface-link $F$ is called {\it ribbon} if there is a trivial 2-link $\mathcal{O}$ and a collection of embedded (3-dimensional) 1-handles attaching to $\mathcal{O}$ such that $F$ is the result of surgery along these 1-handles. These 1-handles intersect $\mathcal{O}$ only in their attaching regions. For an orthogonal projection $p: \mathbb{R}^4\to\mathbb{R}^3$, a surface-link $F$ can be perturbed slightly so that $p(F)$ is a generic surface. Each point of the generic surface $p(F)$ has a neighborhood in 3-space diffeomorphic to $\mathbb{R}^3$ such that the image of the generic surface under the diffeomorphism looks like 1, 2, or 3 coordinate planes or the cone on a figure 8 (Whitney umbrella). These points are called regular points, double points, triple points, and branch points. Triple points and branch points are isolated while double points are not and lie on curves called {\it double point curves}. The union of non-regular points is the {\it singular set} of the generic surface \cite{CKS}, \cite{kamada2017surface}. \begin{figure}[h] \begin{overpic}[unit=.5mm,scale=.25]{triple.pdf} \end{overpic} \caption{Local images of a double point, triple point, and branch point.} \label{fig:1} \end{figure} A {\it broken sheet diagram} of a surface-link $F$ is a generic projection $p(F)$ with consistently broken sheets along its singular set, see Figure \ref{fig:1} and \cite{cartersaito}. The sheet that lifts below the other, in respect to a height function determined by the direction of the orthogonal projection $p$, is locally broken at their intersection. All surface-links admit a broken sheet diagram, and all broken sheet diagrams lift to surface-links in 4-space. Although, not all compact, generic surfaces in 3-space can be given a broken sheet structure \cite{Carter1998}. The minimal number of triple points among all broken sheet diagrams representing a surface-link $F$ is called the {\it triple point number} of $F$ and is denoted $t(F)$. A surface-link with a triple point number of 0 is called {\it pseudo-ribbon}. Every ribbon surface-link is pseudo-ribbon, and every pseudo-ribbon 2-knot is ribbon \cite{yajima1964}. However, the turned spun torus surface-link of a non-trivial classical knot is pseudo-ribbon and non-ribbon \cite{boyle}, \cite{iwase}, \cite{livingston}. A {\it singular link diagram} is an immersed link diagram in the plane with crossings and traverse double points called {\it vertices}. At each vertex assign a {\it marker}, a local choice of two non-adjacent regions in the complement of the vertex. Such a marked singular link diagram is called a {\it ch-diagram} \cite{CKS}, \cite{kamada2017surface}, \cite{kamadakim}, \cite{Yoshikawa}. One of the two smoothings of a vertex connects the two regions of the marker, the positive resolution $L^+$, and one separates the marker's regions, the negative resolution $L^-$, see Figure \ref{fig:smoothing}. If $L^-$ and $L^+$ are unlinks, then the ch-diagram is said to be {\it admissible}. Admissible ch-diagrams represent surface-links and induce broken sheet diagrams, this is described in Section \ref{sec2}, and every surface-link defines an admissible ch-diagram. \begin{figure}[h] \begin{overpic}[unit=.46mm,scale=.6]{smoothing2.pdf}\put(37,-2){$L^-$}\put(233,-2){$L^+$} \end{overpic} \caption{Smoothings of a marked vertex.} \label{fig:smoothing} \end{figure} Including the 3 Reidemeister moves of classical link diagrams, there are 8 moves on ch-diagrams called {\it Yoshikawa moves}, see p.61 of \cite{kamada2017surface}. Two admissible ch-diagrams represent equivalent surface-links if and only if they are Yoshikawa move equivalent, Theorem 3.6.3 of \cite{kamada2017surface}. The ch-index of a ch-diagram is the number of marked vertices plus the number of crossings. The {\it ch-index} of a surface-link $F$, denoted ch($F$), is the minimum of ch-indices ch$(D)$ among all admissible ch-diagrams $D$ representing $F$. A sufrace-link $F$ is said to be {\it weakly prime} if $F$ is not the connected sum of any two surfaces $F_1$ and $F_2$ such that ch$(F_i)$ $<$ ch$(F)$. Yoshikawa classified weakly prime surface-links whose ch-index is 10 or less in \cite{Yoshikawa}. He generated a table of their representative ch-diagrams up to orientation and mirror. A rendition of his table is done in Figure \ref{fig:yoshikawa}. His notation is of the form $I_k^{g_1,\dots,g_c}$ where $I$ is the surface-link's ch-index and $g_1,\dots,g_c$ are the genera of its components. This paper considers the triple point number of the surface-links represented in Yoshikawa's table. Section \ref{sec3} tabulates the known triple point numbers of the surface-links from his table and calculates or provides bounds on the triple point number of the remaining surface-links. \begin{figure}[h] \begin{overpic}[unit=.5mm,scale=.6]{yoshikawa.pdf}\put(22,308){$0_1$}\put(18,271){$2_1^1$}\put(15,233){$2_1^{-1}$}\put(77,310){$6_1^{0,1}$}\put(75,265){$7_1^{0,-2}$} \put(141,313){$8_1$}\put(142,272){$8_1^{1,1}$}\put(142,228){$8_1^{-1,-1}$}\put(201,313){$9_1$}\put(201,269){$9_1^{0,1}$}\put(201,227){$9_1^{1,-2}$} \put(42,175){$10_1$}\put(40,133){$10_2$} \put(40,89){$10_3$}\put(38,41){$10_1^1$}\put(106,174){$10_1^{0,1}$}\put(103,130){$10_2^{0,1}$}\put(100,88){$10_1^{1,1}$}\put(100,42){$10_1^{0,0,1}$} \put(166,173){$10_1^{0,-2}$}\put(167,127){$10_2^{0,-2}$} \put(161,88){$10_1^{-1,-1}$} \put(165,44){$10_1^{-2,-2}$} \end{overpic} \caption{Yoshikawa's table of surface-links with ch-index no greater than 10 \cite{Yoshikawa}.} \label{fig:yoshikawa} \end{figure} \section{Induced Broken Sheet Diagrams of Admissible Ch-diagrams} \label{sec2} Given a surface-knot $F\subset \mathbb{R}^4$ and a vector ${\bf v}\in \mathbb{R}^4$, perturb $F$ such that the orthogonal projection of $\mathbb{R}^4$ onto $\mathbb{R}$ in the direction of ${\bf v}$ is a Morse function. For any $t\in \mathbb{R}$, let $\mathbb{R}_t^3$ denote the affine hyperplane orthogonal to ${\bf v}$ that contains the point $t {\bf v}$. Morse theory allows for the assumption that all but finitely many of the non-empty cross-sections $F_t=\mathbb{R}_t^3 \cap F$ are classical links. The decomposition $\{F_t\}_{t\in\mathbb{R}}$ is called a {\it motion picture} of $F$. It may also be assumed that the exceptional cross-sections contain minimal points, maximal points, and/or singular links \cite{CKS}, \cite{kamada2017surface}. There is a product structure between Morse critical points implying that only finitely many cross-sections are needed to decompose, or construct, $F$. Although, a sole cross-section of a product region does not uniquely determine its knotting, i.e. its ambient isotopy class relative boundary, see \cite{CKS}. Project the cross-sections $\{F_t\}_{t\in\mathbb{R}}$ onto a plane to get an ordered family of planar diagrams containing classical link diagrams, minimal points, maximal points, and singular link diagrams. These planar diagrams are {\it stills} of the motion picture. The collection of all stills is also referred to as a motion picture of the surface-link. The product structure between critical points implies that cross-sections between consecutive critical points represent the same link. Therefore, there is a sequence of Reidemeister moves and planar isotopies between the stills of a motion picture that exists between consecutive critical points. There is a translation of Reidemeister moves in a motion picture to sheets in a broken sheet diagram. Associate the time parameter of a Reidemeister move with the height of a local broken sheet diagram. A Reidemeister III move gives a triple point diagram, a Reidemeister I move corresponds to a branch point, and a Reidemeister II move corresponds to a maximum or minimum of a double point curve, see Figure \ref{fig:r}. Triple points of the induced broken sheet diagram are in correspondence with the Reidemeister III moves in the motion picture. To generate a broken sheet diagram of the entire surface-link include sheets containing saddles for each saddle point in the motion picture. \begin{figure}[h] \begin{overpic}[unit=.5mm,scale=.6]{saddles2.pdf} \end{overpic} \caption{Induced saddle sheet.} \label{fig:saddle} \end{figure} Now, consider an admissible ch-diagram. There is a finite sequence of Reidemeister moves that takes $L^-$ and $L^+$ to crossing-less diagrams $O^-$ and $O^+$. Translate these Reidemeister moves to a broken sheet diagram. In between the still of $L^-$ and $L^+$ and for each marked vertex include sheets containing the saddles traced by transitioning from $L^-$ to $L^+$ in the local picture of Figure \ref{fig:smoothing}. These saddles are locally pictured in Figure \ref{fig:saddle}. Finally, cap-off $O^-$ and $O^+$ with trivial disks to produce a broken sheet diagram of a surface-link. \section{The Triple Point Numbers of Surface-Links Represented in Yoshikawa's Table} \label{sec3} The three trivial surface-knots $0_1$, $2_1^1$, and $2_1^{-1}$ are clearly pseudo-ribbon. Most of the remaining surface-links in Yoshikawa's table are ribbon. \begin{figure}[h] \includegraphics[scale=.3]{ribbon.pdf} \caption{Marked vertex pattern.} \label{fig:ribbon} \end{figure} \begin{figure}[h] \begin{overpic}[unit=.5mm,scale=.6]{bandpattern.pdf} \end{overpic} \caption{Motion picture of the saddles represented by Figure \ref{fig:ribbon}.} \label{fig:band} \end{figure} \begin{theorem} Admissible ch-diagrams with vertices paired and isolated in the pattern of Figure \ref{fig:ribbon} represent ribbon surface-links. \label{thm:ribbon} \end{theorem} \begin{proof} Figure \ref{fig:band} shows that the unlink diagrams of the positive and negative resolutions are identical, up to a small planar isotopy. The positive and negative resolutions are the boundary of trivial disk systems, families of disjoint 2-disks with one maximal (or minimal) point. Between these trivial disk systems include a product region representing the aforementioned small planar isotopies between the two resolutions to produce a collection of 2-knots each with just one minimal point and one maximal point. With no saddles, the constructed surface-link with all 2-sphere components is a trivial 2-link since any two trivial disk systems that have the same boundary are smoothly ambient isotopic (rel boundary) \cite{kamada2017surface}. Figure \ref{fig:handle} shows that the addition of the saddles represented by the marked vertices is equivalent to 1-handle surgery on this trivial 2-link. \end{proof} \begin{figure} \includegraphics[scale=.7]{ribbonhandle.pdf} \caption{Ribbon handle associated with the given marked vertex pattern.} \label{fig:handle} \end{figure} As a corollary to Theorem \ref{thm:ribbon}, all nontrivial surface-links of Yoshikawa's table except $8_1^{-1,-1}$, $10_2$, $10_3$, $10_1^{1,1}$, $10_2^{0,-2}$, and $10_1^{-1,-1}$ are ribbon. \begin{theorem}[Satoh '01 \cite{satoh3}, Kamada and Oshiro '09 \cite{sym}] The triple point number of $8_1^{-1,-1}$ is 2. \end{theorem} \noindent A surface in $\mathbb{R}^4$ is called ${\bf P}^2$-{\it irreducible} if it is not the connected sum $F_1\# F_2$ where $F_1$ is any surface and $F_2$ is one of the two standard projective planes, i.e. $2_1^{-1}$ or its mirror. All surfaces in Yoshikawa's table except for $2_1^{-1}$ are irreducible, Section 5 of \cite{Yoshikawa}. Satoh's lower bound calculation relies on each component being non-orientable, ${\bf P}^2$-irreducible, and having nonzero normal Euler number. \begin{lemma}[Satoh '01 \cite{satoh3}] For a ${\bf P}^2$-irreducible surface-link $F=F_1\cup\cdots \cup F_n$ \[ t(F)\geq (|e(F_1)|+\cdots+|e(F_n)|)/2,\] where $e(F_i)$ denotes the normal Euler number of the surface-knot $F_i$. \label{lem} \end{lemma} \noindent Kamada and Oshiro later proved $t(8_1^{-1,-1})=2$ using the symmetric quandle cocycle invariant. This method does not depend on the normal Euler number of the surface-link's components. \begin{theorem}[Satoh and Shima '04 \cite{satoh1} '05 \cite{satoh6}] The triple point number of the 2-twist-spun trefoil is 4, and the triple point number of the 3-twist-spun trefoil is 6. \end{theorem} \noindent Since $10_2$ represents the 2-twist-spun trefoil and $10_3$ represents the 3-twist-spun trefoil, Satoh and Shima's results give $t(10_2)=4$ and $t(10_3)=6$. \begin{theorem} The surface-link $10_1^{1,1}$ is pseudo-ribbon. \end{theorem} \begin{proof} Figure \ref{fig:10_11} shows that both the negative and positive resolutions of $10_1^{1,1}$'s marked vertex diagram only need Reidemeister II moves to achieve crossing-less diagrams. Thus, there is a broken sheet diagram representing $10_1^{1,1}$ with no triple points. \end{proof} \begin{figure}[h] \begin{overpic}[unit=.5mm,scale=.6]{10_11.pdf} \end{overpic} \caption{Negative and positive resolutions of $10_1^{1,1}$.} \label{fig:10_11} \end{figure} \begin{theorem} The surface-link $10_1^{-1,-1}$ has a triple point number no greater than 12 and no less than 2. \end{theorem} \begin{proof} Figures \ref{fig:neg} and \ref{fig:pos} illustrate a motion picture of $10_1^{-1,-1}$ whose induced broken sheet diagram has 12 triple points. Since $10_1^{-1,-1}$ is ${\bf P}^2$-irreducible \cite{Yoshikawa} and each component is a projective plane, Lemma \ref{lem} implies that $2\leq t(10_1^{-1,-1}).$ \end{proof} \begin{table}[ht] \setlength{\tabcolsep}{10pt} \renewcommand{\arraystretch}{1.5} \centering \begin{tabular}{|c||c |c |c |c| c| c |c |c |c| c| c| c| c| c| c| c| c| c| c| c| c| } \hline $F$ & $0_1$ & $2_1^1$ & $2_1^{-1}$ & $6_1^{0,1}$ & $7_1^{0,-2}$ & $8_1$ & $8_1^{1,1}$ & $8_1^{-1,-1}$ & $9_1$ & $9_1^{0,1}$& $9_1^{1,-2}$ \\ \hline $t(F)$ & 0& 0& 0& 0& 0& 0& 0& 2& 0& 0& 0 \\\hline \end{tabular} \vspace{5mm} \begin{tabular}{|c||c |c| c| c| c| c| c| c| c| c| c| c| c| c| c| c| c| c| c| c| c| c| } \hline $F$ & $10_1$ & $10_2$ & $10_3$ & $10_1^1$ & $10_1^{0,1}$ & $10_2^{0,1}$ & $10_1^{1,1}$ & $10_1^{0,0,1}$ \\\hline $t(F)$ & 0& 4 & $6$& 0 &0 &0&0&0\\ \hline \end{tabular} \vspace{5mm} \begin{tabular}{|c||c |c| c| c| c| c| c| c| c| c| c| c| c| c| c| c| c| c| c| c| c| c| } \hline $F$ & $10_1^{0,-2}$ & $10_2^{0,-2}$ & $10_1^{-1,-1}$ & $10_1^{-2,-2}$ \\\hline $t(F)$ & 0 & $ t(10_2^{0,-2}) \leq 10$ & $2\leq t(10_1^{-1,-1})\leq 12$ & 0\\ \hline \end{tabular} \vspace{5mm} \caption{Triple point number data on the surface-links represented in Yoshikawa's table \cite{Yoshikawa}.} \label{tab:1} \end{table} \begin{theorem} The surface-link $10_2^{0,-2}$ has a triple point number no greater than 10. \end{theorem} \begin{proof} A motion picture of an induced broken sheet diagram of $10_2^{0,-2}$ is shown in Figure \ref{fig:100-2}. This motion picture has 10 Reidemeister III moves that occur between the stills connected by an arrow. \end{proof} \begin{remark} Figure \ref{fig:100-2} shows that the Klein bottle component of $10_2^{0,-2}$ has a normal Euler number of 0. Therefore, Lemma \ref{lem} does not give a positive lower bound. The symmetric quandle cocycle invariant has proven useful for generating lower bounds on the triple point number of non-orientable surface-links, see \cite{carter2009symmetric}, \cite{sym}, \cite{oshiro}, and \cite{Oshiro2011}. A symmetric quandle must have a good involution with a fixed point in order to color $10_2^{0,-2}$. The trivial symmetric quandles of \cite{Oshiro2011} color $10_2^{0,-2}$ but the given symmetric quandle cocycles have a weight of 0 for all such coloring. The dihedral quandle $R_4$ colors $10_2^{0,-2}$ and the identity is a good involution of $R_4$, but there are no calculated symmetric quandle cocycles of $(R_4, \text{id})$. None of the other explicitly defined symmetric quandles of \cite{carter2009symmetric}, \cite{sym}, \cite{oshiro}, and \cite{Oshiro2011} color $10_2^{0,-2}$ or admit a coloring that gives a non-zero weight with the explicitly defined symmetric quandle cocycles. \end{remark} \section*{Acknowledgements} I would like to thank my advisor Jennifer Schultens for her revisions and encouragement. \begin{figure}[h] \begin{overpic}[unit=.399mm,scale=.6]{translation.pdf} \end{overpic} \caption{Relationship between Reidemeister moves in a motion picture and broken sheet diagrams.} \label{fig:r} \end{figure} \begin{figure}[h] \begin{overpic}[unit=.5mm,scale=.75]{10-20.pdf}\put(84,190){$L_-$}\put(117,190){$L_+$} \end{overpic} \caption{A motion picture of $10_2^{0,-2}$.} \label{fig:100-2} \end{figure} \begin{figure}[h] \begin{overpic}[unit=.5mm,scale=.75]{10-1-1.pdf}\put(318,54){$L_-$} \end{overpic} \caption{A motion picture of $10_1^{-1,-1}$, 1 of 2.} \label{fig:neg} \end{figure} \begin{figure}[h] \begin{overpic}[unit=.5mm,scale=.75]{10-1-1++.pdf}\put(2,184){$L_+$} \end{overpic} \caption{A motion picture of $10_1^{-1,-1}$, 2 of 2.} \label{fig:pos} \end{figure} \bibliographystyle{amsplain} \bibliography{proposal} \end{document}
2205.11076v1
http://arxiv.org/abs/2205.11076v1
Splitting subspaces and a finite field interpretation of the Touchard-Riordan Formula
\documentclass[12pt]{amsproc} \usepackage{amsaddr, biblatex, hyperref, tikz, doi} \hypersetup{ breaklinks=true, colorlinks=true, pdfusetitle=true, citecolor=blue } \DeclareFieldFormat{doi}{\href{https://dx.doi.org/#1}{\textsc{doi}}} \DeclareFieldFormat{url}{\href{#1}{\textsc{url}}} \renewbibmacro*{doi+eprint+url}{ \printfield{doi} \newunit\newblock \iftoggle{bbx:eprint}{ \usebibmacro{eprint} }{} \newunit\newblock \iffieldundef{doi}{ \usebibmacro{url+urldate}} {} } \ExecuteBibliographyOptions{isbn=false,giveninits=true} \renewbibmacro{in:}{} \AtEveryBibitem{ \ifentrytype{book} {\clearfield{pages}} } \makeatletter \@namedef{subjclassname@2020}{\textup{2020} Mathematics Subject Classification} \makeatother \title[Splitting subspaces and the Touchard-Riordan formula]{Splitting subspaces and a finite field interpretation of the Touchard-Riordan Formula} \author{Amritanshu Prasad} \address{The Institute of Mathematical Sciences, Chennai, India.} \address{Homi Bhabha National Institute, Mumbai, India.} \email{[email protected]} \author{Samrith Ram} \address{Indraprastha Institute of Information Technology Delhi, New Delhi, India.} \email{[email protected]} \subjclass[2020]{05A15,05A19,33C45} \keywords{Touchard-Riordan formula, splitting subspaces, finite fields, $q$-Hermite orthogonal polynomials, chord diagrams} \newtheorem*{theorem*}{Main Theorem} \newtheorem{lemma}{Lemma} \newtheorem{corollary}[lemma]{Corollary} \newtheorem{theorem}[lemma]{Theorem} \newtheorem{proposition}[lemma]{Proposition} \theoremstyle{remark} \newtheorem{example}[lemma]{Example} \newtheorem{remark}[lemma]{Remark} \theoremstyle{definition} \newtheorem{definition}[lemma]{Definition} \newtheorem{claim}{Claim} \numberwithin{equation}{section} \numberwithin{lemma}{section} \DeclareMathOperator\Irr{Irr} \DeclareMathOperator\Par{Par} \newcommand\Fq{\mathbf F_q} \newcommand\NN{\mathbf N} \newcommand\qbin[2]{{#1\brack#2}_q} \renewcommand\aa{\mathbf a} \newcommand\bb{\mathbf b} \newcommand\jj{\mathbf j} \newcommand\kk{\mathbf k} \newcommand\BB{\mathcal B} \newcommand\Seq{\mathrm{Seq}} \newcommand\Tab{\mathrm{Tab}} \newcommand\T{\mathcal T} \newcommand\arc{\mathrm{arc}} \newcommand\inbox[1]{\boxed{\scalebox{0.9}{#1}}} \renewcommand\AA{\mathcal A} \newcommand\ZZ{\mathbf Z} \usepackage{hyperref} \begin{document} \begin{abstract} We enumerate the number of $T$-splitting subspaces of dimension $m$ for an arbitrary operator $T$ on a $2m$-dimensional vector space over a finite field. When $T$ is regular split semisimple, comparison with an alternate method of enumeration leads to a new proof of the Touchard-Riordan formula for enumerating chord diagrams by their number of crossings. \end{abstract} \maketitle \section{Introduction} Let $\Fq$ denote a finite field of order $q$, and $m$ be a non-negative integer. Given a positive integer $d$ and a linear operator $T:\Fq^{dm}\to \Fq^{dm}$, an $m$-dimensional subspace $W\subset \Fq^{dm}$ is said to be \emph{$T$-splitting} if \begin{displaymath} W + TW + \dotsb + T^{d-1}W = \Fq^{dm}. \end{displaymath} This definition was proposed by Ghorpade and Ram \cite{m2}, motivated by the work of Niederreiter \cite{N2}. The number of $T$-splitting subspaces is known when $T$ has an irreducible characteristic polynomial \cite{MR4263652,sscffa,m2}, is regular nilpotent \cite{agram2022}, is regular split semisimple \cite{fpsac,pr}, or when the invariant factors satisfy certain degree constraints \cite{polynomialmatrices}. In this article, we consider the case where $d=2$. Our main theorem gives a formula for the number of $T$-splitting subspaces of dimension $m$ for any $T\in M_{2m}(\Fq)$. \begin{theorem*} For any linear operator $T:\Fq^{2m}\to \Fq^{2m}$, the number of $m$-dimensional $T$-splitting subspaces of $\Fq^{2m}$ is given by \begin{displaymath} \sigma^T = q^{\binom m2}\sum_{j=0}^{2m} (-1)^j X_j^T q^{\binom{m-j+1}2}, \end{displaymath} where $X_j^T$ is the number of $j$-dimensional $T$-invariant subspaces of $\Fq^{2m}$. \end{theorem*} The quantities $X_j^T$ are easy to compute from the Jordan canonical form of $T$ with the help of a recursive formula of Ramar\'e \cite{MR3611779}. For a detailed discussion see Section~\ref{sec:computation-x_j}. When $T$ is regular split semisimple (i.e., it is similar to a diagonal matrix with distinct diagonal entries), $X_j^T=\binom{2m}j$, so the number of $T$-splitting subspaces is \begin{displaymath} \sigma^T = q^{\binom m2}\sum_{j=0}^{2m} (-1)^j\binom{2m}jq^{\binom{m-j+1}2}. \end{displaymath} The sum above is the right hand side of the Touchard-Riordan formula \begin{equation} \label{eq:touchard-riordan} (q-1)^m T_m(q) = \sum_{j=0}^{2m} (-1)^j\binom{2m}jq^{\binom{m-j+1}2} \end{equation} for the polynomial $T_m(q)$ that enumerates chord diagrams on $2m$ nodes according to their number of crossings (see Section~\ref{sec:chord-diags}). This identity is attributed to Touchard \cite{MR46325} and Riordan \cite{MR366686}. A proof using the theory of continued fractions was given by Read~\cite{MR556055}, and a bijective proof was given by Penaud~\cite{MR1336847}. The polynomials $T_m(q)$ are moments of the $q$-Hermite orthogonal polynomial sequence \cite[Prop.~4.1]{MR930175}. Several generalizations and variations of the Touchard-Riordan formula can be found in \cite{MR2737181,MR2721522,MR2799608,MR3033681,MR2819649}. When $T\in M_{2m}(\Fq)$ is regular split semisimple, splitting subspaces can also be enumerated (see Theorem~\ref{theorem:split-splitting}) using the technique of \cite[Section~4.6]{pr} as \begin{displaymath} \sigma^T = q^{\binom m2}(q-1)^mT_m(q). \end{displaymath} This gives a completely new self-contained proof of the Touchard-Riordan formula. Thus our main theorem could be viewed as a generalisation of the Touchard-Riordan formula in the setting of finite fields. A software demonstration of our results using SageMath \cite{sagemath} can be found at \url{https://www.imsc.res.in/~amri/splitting_subspaces/}. \section{Enumeration of Invariant Subspaces} \label{sec:computation-x_j} Each $T\in M_n(\Fq)$ gives rise to an $\Fq[t]$-module $M_T$ with underlying vector space $\Fq^n$ on which $t$ acts by $T$. A subspace of $\Fq^n$ is $T$-invariant if and only if it is a submodule of $M_T$. Let $\Par$ denote the set of all integer partitions and $\Irr \Fq[t]$ denote the set of all irreducible monic polynomials in $\Fq[t]$. By the theory of elementary divisors (see \cite[Section~3.9]{MR780184} and \cite[Section~1]{MR72878}) there exists a unique function $c_T:\Irr\Fq[t]\to \Par$ such that \begin{equation} \label{eq:primary} M_T = \bigoplus_{p\in \Irr\Fq[t]} M_{T_p}, \end{equation} with the $p$-primary component $M_{T_p}$ having structure \begin{equation} \label{eq:local_jcf} M_{T_p} = \bigoplus_i\Fq[t]/(p(t)^{c_T(p)_i}) \end{equation} where $c_T(p)_1,c_T(p)_2,\dotsc$ are the parts of the partition $c_T(p)$. Define the \emph{invariant subspace generating function} $f_T$ of $T\in M_n(\Fq)$ as \begin{displaymath} f_T(t) = \sum_{j=0}^n X_j^T t^j, \end{displaymath} where $X_j^T$ is the number of $j$-dimensional $T$-invariant subspaces of $\Fq^n$. Each $\Fq[t]$-submodule of $M_T$ is uniquely expressible as a direct sum of submodules of the primary submodules $M_{T_p}$. Therefore, \begin{displaymath} f_T(t) = \prod_{p\in \Irr\Fq[t]} f_{T_p}(t). \end{displaymath} For each $\lambda\in \Par$, let $f_\lambda(q;t)$ denote the invariant subspace generating function of the nilpotent matrix over $\Fq$ whose Jordan block sizes are the parts of $\lambda$. A surprisingly simple recurrence of Ramar\'e~\cite[Theorem~3.1]{MR3611779} allows for easy computation of $f_\lambda(q;t)$: \begin{equation} \label{eq:ramare} (t-1)f_\lambda(q;t) = t^{\lambda_1+1}q^{\sum_{j\geq 2}\lambda_j}f_{(\lambda_2,\lambda_3,\dotsc)}(q;t/q) - f_{(\lambda_2,\lambda_3,\dotsc)}(q;tq), \end{equation} where $\lambda_1,\lambda_2,\dotsc$ are the parts of $\lambda$ in weakly decreasing order. The empty partition $\emptyset$ of $0$ can be used as the base case with $f_\emptyset(q;t)=1$. The recurrence \eqref{eq:ramare} implies that $f_\lambda(q;t)$ is a polynomial in $q$ and $t$ with integer coefficients. Since the rings $\Fq[t]/p(t)^d$ and $\mathbf F_{q^d}[u]/u^d$ are isomorphic, the invariant subspace generating function of $T\in M_n(\Fq)$ is given by \begin{equation} \label{eq:fT} f_T(t) = \prod_{p\in \Irr\Fq[t]} f_{c_T(p)}(q^{\deg p};t^{\deg p}). \end{equation} It follows that the polynomial $f_T(t)$ depends on the polynomials $p\in \Irr\Fq[t]$ only through their degrees. \begin{definition} \label{definition:similarity-class-type} A \emph{similarity class type} of size $n$ is a multiset $\tau$ of pairs of the form $(d,\lambda)$ where $d$ is a positive integer and $\lambda$ is a non-empty integer partition such that $\sum_{(d,\lambda)\in\tau} d|\lambda|=n$ (the sum is taken with multiplicity). The similarity class type of $T\in M_n(\Fq)$ is the similarity class of size $n$ given by \begin{displaymath} \{(\deg(p),c_T(p))\mid p\in \Irr\Fq[t],\;c_T(p)\neq \emptyset\}. \end{displaymath} \end{definition} \begin{remark} The set of similarity class types of size $n$ is independent of $q$. Green~\cite{MR72878} introduced similarity class types to organise conjugacy classes of $GL_n(\Fq)$ in a manner independent of $q$. This enabled him give a combinatorial description of the character table of $GL_n(\Fq)$ across all $q$. For a detailed discussion and a software implementation see \cite{simsage}. \end{remark} \begin{example} \begin{enumerate} \item An $n\times n$ scalar matrix has similarity class type $\{1,(1^n)\}$. \item A regular split semisimple $n\times n$ matrix has similarity class type\linebreak $\{(1,(1)),\dotsc,(1,(1))\}$ (with $n$ repetitions). \item A regular nilpotent $n\times n$ matrix has type $\{(1,(n))\}$. \item An $n\times n$ matrix with irreducible characteristic polynomial has type $\{(n,(1))\}$. \end{enumerate} \end{example} \begin{theorem} \label{theorem:type-dependence} Given a similarity class type $\tau$ of size $n$ and $0\leq j\leq n$ let \begin{displaymath} f_\tau(u;t) = \prod_{(d,\lambda)\in \tau} f_\lambda(u^d;t^d). \end{displaymath} Then for any prime power $q$ and any matrix $T\in M_n(\Fq)$ with similarity class type $\tau$, $f_T(t)=f_\tau(q;t)$. In particular, for every $0\leq j\leq n$, there exists a polynomial $X^\tau_j(u)\in \ZZ[u]$ such that $X^T_j = X^\tau_j(q)$. \end{theorem} \begin{proof} The theorem follows from Eqns.~(\ref{eq:ramare}) and (\ref{eq:fT}). \end{proof} The polynomial $f_\lambda(q;t)$ is known to have non-negative coefficients \cite{MR1223236}, hence $X_j^\tau(q)$ also has non-negative coefficients. \begin{example} \label{example:taui} Let $\tau_i=\{(1,(1^{m+i})),(m-i,(1))\}$ for $i=1,\dotsc,m$. Then \begin{displaymath} f_{\tau_i}(t) = \left(\sum_{k=0}^{m+i}\qbin{m+i}k t^k\right)(1+t^{m-i}). \end{displaymath} Consequently, \begin{equation} \label{eq:taui} X^{\tau_i}_j(q) = \qbin{m+i}j + \qbin{m+i}{j-m+i}. \end{equation} \end{example} \section{The Existence of a Formula} \label{sec:existence} In this section we establish the existence of a formula for the number $\sigma^T$ of $m$-dimensional $T$-splitting subspaces of $\Fq^{2m}$ in terms of $X_j^T$, $j=0,\dotsc,m$ (Corollary~\ref{cor:ajxj}). The main step is Proposition~\ref{prop:recurrence}, which is a special case of a more general recurrence of Chen and Tseng \cite[Lemma~2.7]{sscffa}. Given a positive integer $n$, and $0\leq a\leq n$, let $\aa$ denote the set of $a$-dimensional subspaces of $\Fq^n$. Given a linear operator $T:\Fq^n\to \Fq^n$ and sets $X$ and $Y$ of subspaces of $\Fq^n$, define \begin{align*} (X,Y)_T&:=\{W \in X\mid W\cap T^{-1}W\in Y\}\\ [X,Y]_T&:=\{(W_1,W_2)\mid W_1\in X, W_2\in Y,\text{ and } W_1\cap T^{-1}W_1\supset W_2\}. \end{align*} Thus $(\aa,\bb)_T$ denotes the set of $a$-dimensional subspaces $W$ such that $W\cap T^{-1}W$ has dimension $b$. We drop the subscript $T$ from the notation when the operator is clear from the context. \begin{example} For each $0\leq a\leq n$, $(\aa,\aa)$ denotes the set of $a$-dimensional $T$-invariant subspaces of $\Fq^n$. Hence $|(\aa,\aa)_T|=X_a^T$. \end{example} \begin{example} If $n=2m$, then $(\mathbf m,\mathbf 0)_T$ is the set of $m$-dimensional $T$\nobreakdash-splitting subspaces of $\Fq^{2m}$. \end{example} \begin{proposition} \label{prop:recurrence} Let $T:\Fq^n\to \Fq^n$ be a linear map. For all integers $n\geq a>b\geq 0$, we have \begin{align*} |(\aa,\bb)| & = X_b^T\qbin{n-b}{a-b}-X_a^T\qbin ab\\ & + \sum_{j=0}^{b-1}|(\bb,\jj)|{n-2b+j \brack a-2b+j}_q-\sum_{k=b+1}^{a-1}|(\aa,\kk)|{k \brack b}_q. \end{align*} \end{proposition} \begin{proof} Since $\aa = \coprod_{0\leq a\leq k} (\aa,\kk)$, we have \begin{align*} [\aa,\bb]=\coprod_{b\leq k\leq a}[(\aa,\kk),\bb]. \end{align*} It follows that \begin{align} |[\aa,\bb]|&=\sum_{k=b}^a |[(\aa,\kk),\bb]|\nonumber \\ &=\sum_{k=b}^a|(\aa,\kk)|{k \brack b}_q\nonumber\\ &=|(\aa,\bb)|+\sum_{k=b+1}^a|(\aa,\kk)|{k \brack b}_q. \label{eq:1} \end{align} Similarly, \begin{align*} [\aa,\bb]=\coprod_{0\leq j\leq b}[\aa,(\bb,\jj)], \end{align*} so that \begin{align} |[\aa,\bb]|&=\sum_{j=0}^b |[\aa,(\bb,\jj)]|\nonumber \\ &=\sum_{j=0}^b|(\bb,\jj)|{n-(2b-j) \brack a-(2b-j)}_q. \label{eq:2} \end{align} The proposition follows from Eqs.~\eqref{eq:1} and \eqref{eq:2}, and $|(\aa,\aa)|=X_a^T$. \end{proof} \begin{proposition} For all integers integer $n\geq a\geq b\geq 0$, there exist polynomials $p_0(t),\dotsc,p_a(t)\in \ZZ[t]$ such that, for every prime power $q$, and every linear map $T:\Fq^n\to \Fq^n$, \begin{align*} |(\aa,\bb)_T|=\sum_{j=0}^a p_j(q)X_j^T. \end{align*} \end{proposition} \begin{proof} Proposition~\ref{prop:recurrence} expands $|(\aa,\bb)_T|$ in terms of $X_a^T$, $X_b^T$, and $|(\aa',\bb')_T|$ where either $a'<a$, or $a'=a$ and $a'-b'<a-b$. The coefficients are polynomials in $q$ that are independent of $T$. Thus repeated application of Proposition~\ref{prop:recurrence} will result in an expression of the stated form in finitely many steps. \end{proof} \begin{corollary} \label{cor:ajxj} For each non-negative integer $m$, there exist\linebreak polynomials $p_0(t),\dotsc,p_m(t)\in \ZZ[t]$ such that, for every linear map $T:\Fq^{2m}\to \Fq^{2m}$, the number of $m$-dimensional $T$-splitting subspaces is given by \begin{equation} \label{eq:ajxj} \sigma^T = \sum_{j=0}^m p_j(q)X_j^T. \end{equation} \end{corollary} \section{Proof of the Main Theorem} \label{sec:proof-main-theorem} By Theorem~\ref{theorem:type-dependence} and Corollary~\ref{cor:ajxj}, for every similarity class type $\tau$ of size $2m$, there exists $\sigma^\tau(u)\in \ZZ[u]$ such that, for every prime power $q$ and every $T\in M_{2m}(\Fq)$ of type $\tau$, $\sigma^T = \sigma^\tau(q)$. Thus the main theorem can be rephrased as follows. \begin{theorem} \label{th:main} For each similarity class type $\tau$ of size $2m$, \begin{equation} \label{eq:sigma-tau} \sigma^{\tau}(q)=q^{m \choose 2} \sum_{j=0}^{2m} (-1)^jX_j^\tau(q)q^{\binom{m-j+1}2}. \end{equation} \end{theorem} \subsection*{The proof strategy} Since the lattice of submodules of $M^T$ is self-dual, $X_j^\tau(q)=X_{2m-j}^\tau(q)$. Therefore the right hand side of (\ref{eq:sigma-tau}) can be rewritten in terms of $X^\tau_0(q),\dotsc,X^\tau_m(q)$, bringing it to the form \eqref{eq:ajxj}. Suppose $\tau_0,\dotsc,\tau_m$ are similarity class types of size $2m$ such that the determinant $(X^{\tau_i}_j(q))_{0\leq i,j\leq m}$ is non-zero. Then the system of equations \begin{displaymath} \sigma^{\tau_i}(q) = \sum_{j=0}^m p_j(q)X^{\tau_i}_j(q), \quad i=0,\dotsc,m \end{displaymath} has a unique solution for the $p_j(q)$. Thus, if we prove \eqref{eq:sigma-tau} for $\tau=\tau_0,\dotsc,\tau_m$, we will have shown that Theorem~\ref{th:main} holds in general. Take $\tau_0=\{(2m,(1))\}$, the type of a simple matrix (a matrix with irreducible characteristic polynomial), and for $i=1,\dotsc,m$, take $\tau_i=\{(1,(1^{m+i})),(m-i,(1))\}$. The proof of Theorem~\ref{th:main} is reduced to the following steps: \begin{claim} \label{claim:1} The formula \eqref{eq:sigma-tau} holds for $\tau=\tau_0,\dotsc,\tau_m$. \end{claim} \begin{claim} \label{claim:2} The determinant of $X=(X^{\tau_i}_j(q))_{0\leq i,j\leq m}$ is non-zero. \end{claim} \subsection*{Proof of Claim~\ref{claim:1}} Consider first $\tau=\tau_0$. It is shown in \cite[Theorem 1.4]{MR4263652} that \begin{displaymath} \sigma_{\tau_0}(q) = q^{\binom m2}(q^{\binom{m+1}2}+q^{\binom m2}). \end{displaymath} On the other hand, $\tau_0$ is the type of a simple matrix, so $X^\tau_j(q) = 1$ if $j=0$ or $2m$, and $X^\tau_j(q)=0$ for $0<j<2m$. Therefore \begin{displaymath} \sum_{j=0}^{2m} (-1)^jX_j^\tau(q)q^{\binom{m-j+1}2} = q^{\binom m2}(q^{\binom{m+1}2} + q^{\binom{-m+1}2}) = q^{\binom m2}(q^{\binom{m+1}2} + q^{\binom m2}), \end{displaymath} establishing \eqref{eq:sigma-tau} for $\tau_0$. For $i=1,\dotsc,m$, $\sigma^{\tau_i}(q) = 0$ since any $T\in M_n(\Fq)$ of type $\tau_i$ satisfies the hypothesis of the following general lemma. \begin{lemma} Let $l(\lambda)$ denote the number of parts of an integer partition $\lambda$. If $W\subset \Fq^n$ is such that $\sum_{j\geq 0} T^jW=\Fq^n$, then $\dim W\geq l(c_T(p))$ for all $p\in \Irr\Fq[t]$. In particular, if $T\in M_{2m}(\Fq)$ is such that $l(c_T(p))>m$ for some $p\in \Irr\Fq[t]$, then $T$ does not admit an $m$-dimensional splitting subspace. \end{lemma} \begin{proof} Let $\Pi_p:M_T\to M_{T_p}$ denote the projection map with respect to the primary decomposition \eqref{eq:primary}. Since $\Pi_p$ commutes with $T$, $\sum_{j\geq 0} T^j\Pi_p(W) = \Pi_p(\Fq^n)=M_{T_p}$. In other words, $\Pi_p(W)$ generates $M_{T_p}$. The $\Fq[t]$-module $M_{T_p}$ has rank $l(c_T(p))$, so any generating set must have at least $l(c_T(p))$ elements. Therefore, $\dim W\geq \dim \Pi_p(W)\geq l(c_T(p))$. \end{proof} Now $X^{\tau_i}_j(q)$ is given by \eqref{eq:taui}. Therefore, in order to establish \eqref{eq:sigma-tau} for $\tau=\tau_i$, $i=1,\dotsc,m$, it suffices to prove the following result. \begin{lemma} \label{lemma:vanishing-q_bins} For each positive integer $m$, $1\leq i\leq m$, and $0\leq k\leq m-i$, \begin{displaymath} \sum_{j=0}^{2m} (-1)^j \qbin{m+i}{j-k} q^{\binom{m-j+1}2}=0. \end{displaymath} \end{lemma} \begin{proof} In the $q$-binomial theorem \begin{displaymath} \sum_{j=0}^n \qbin nj q^{\binom j2}x^j = \prod_{j=0}^{n-1}(1+q^jx), \end{displaymath} set $n=m+i$, $x=-q^{k-m}$, and change the index of summation from $j$ to $j+k$ to get \begin{equation} \label{eq:substituted} (-1)^k\sum_{j=k}^{m+i+k}(-1)^j\qbin{m+i}{j-k}q^{\binom{j-k}2+(k-m)(j-k)}=0. \end{equation} Observe that \begin{gather*} \binom{m-j+1}2 = [m(m+1) + j(j-1)-2mj]/2,\text{ whereas }\\ \binom{j-k}2+(k-m)(j-k) = [k(k+1-2(k-m))+j(j-1)-2mj]/2. \end{gather*} These two expressions differ by a quantity independent of $j$. Therefore replacing $q^{\binom{j-k}2+(k-m)(j-k)}$ by $q^{\binom{m-j+1}2}$ in \eqref{eq:substituted} amounts to multiplication by a non-zero factor that is independent of $j$. Thus we have \begin{displaymath} \sum_{j=k}^{m+i+k}(-1)^j\qbin{m+i}{j-k}q^{\binom{m-j+1}2}=0. \end{displaymath} The sum remains unchanged when its range is extended to $0\leq j\leq 2m$, proving the identity in the lemma. \end{proof} \subsection*{Proof of Claim~\ref{claim:2}} The non-singularity of $X=(X^{\tau_i}_j(q))_{0\leq i,j\leq m}$ is proved using inequalities satisfied by the degrees of its entries. \begin{lemma} \label{lem:maxperm} Let $(a_{ij})_{n\times n}$ be a real matrix such that whenever $i<k$ and $j<k$, $$ a_{ i k}-a_{ij}< a_{kk}-a_{kj}. $$ Then the sum $S(\sigma)=\sum_{1\leq i\leq n}a_{i\sigma(i)}$ attains its maximum value precisely when $\sigma$ is the identity permutation. \end{lemma} \begin{proof} Let $\sigma $ be a permutation for which the sum $S(\sigma)$ is maximised. We claim that $\sigma(n)=n$. Suppose, to the contrary, that $\sigma(n)=s\neq n.$ Let $r=\sigma^{-1}(n)$. Now \begin{align*} \sum_{1\leq i\leq n}a_{i\sigma(i)}&=\sum_{i\notin \{r,n\}}a_{i\sigma(i)}+a_{rn}+a_{ns}\\ &<\sum_{i\notin \{r,n\}}a_{i\sigma(i)}+a_{rs}+a_{nn} \end{align*} by the hypothesis since $r<n$ and $s<n$. If $\pi$ denotes the permutation which agrees with $\sigma$ whenever $i\notin \{r,n\}$ with $\pi(r)=s$ and $\pi(n)=n$, then it is clear that $S(\sigma)<S(\pi)$, contradicting the maximality of $S(\sigma)$. This proves the claim that $\sigma(n)=n$. Therefore $$ S(\sigma)=a_{nn}+\max_{\pi \in S_{n-1}}S(\pi). $$ Similar reasoning applied to the leading principal $(n-1) \times (n-1)$ submatrix of $A$ shows that $\sigma(n-1)=n-1$. Continuing this line of reasoning it can be seen that $\sigma(i)=i$ for each $i\leq n$, completing the proof. \end{proof} \begin{proposition} \label{prop:cofactor} The matrix $X=(X_{j}^{\tau_i})_{0\leq i,j\leq m}$ is non-singular. \end{proposition} \begin{proof} Since $\tau_0$ is the type of a simple matrix, the first row of $X$ is the unit vector $(1,0,\dotsc,0)$. Therefore it suffices to show that the minor $X'=(X^{\tau_i}_j)_{1\leq i,j\leq m}$ is non-singular. Let $a_{ij}=\deg X^{\tau_i}_j(q)$. Since $\deg \qbin nk = (n-k)k$, by \eqref{eq:taui} we have, for $1\leq i,j\leq m$, \begin{displaymath} a_{ij}=\max\{j(m+i-j),(j-m+i)(2m-j)\}=j(m+i-j), \end{displaymath} since $j(m+i-j)-(j-m+i)(2m-j)=2(m-i)(m-j)\geq 0$. If $i<k$ and $j<k$, \begin{align*} a_{ik}-a_{ij}&=k(m+i-k)-j(m+i-j)\\ &=(k-j)(m+i-k-j)\\ &<(k-j)(m-j)\\ &=a_{kk}-a_{kj}. \end{align*} Lemma \ref{lem:maxperm} implies that $\det X'$ has degree $\sum_{i=1}^ma_{ii}>0$ and is thus non-singular. \end{proof} \section{Chord Diagrams} \label{sec:chord-diags} A \emph{chord diagram} on $n=2m$ nodes refers to one of many visual representations of a fixed-point-free involution on $[2m]$ (see, e.g., \cite[Fig.~2]{MR1336847}). We arrange $2m$ nodes along the $X$-axis. A circular arc lying above the $X$-axis is used to connect each node to its image under the involution. For example, the involution $(1,4)(2,6)(3,5)(7,8)$ is represented by the diagram \begin{center} \begin{tikzpicture} [every node/.style={circle,fill=black,inner sep=0pt, minimum size=6pt}] \node[label=below:$1$] (1) at (1,0) {}; \node[label=below:$2$] (2) at (2,0) {}; \node[label=below:$3$] (3) at (3,0) {}; \node[label=below:$4$] (4) at (4,0) {}; \node[label=below:$5$] (5) at (5,0) {}; \node[label=below:$6$] (6) at (6,0) {}; \node[label=below:$7$] (7) at (7,0) {}; \node[label=below:$8$] (8) at (8,0) {}; \draw[thick,color=teal] (1) [out=45, in=135] to (4); \draw[thick,color=teal] (2) [out=45, in=135] to (6); \draw[thick,color=teal] (3) [out=45, in=135] to (5); \draw[thick,color=teal] (7) [out=45, in=135] to (8); \end{tikzpicture}. \end{center} The left end of each arc will be called an \emph{opening node}, and the right end a \emph{closing node}. In the running example, the opening nodes are $1,2,3,7$ and the closing nodes are $4,5,6,8$. A crossing of the chord diagram is a pair of arcs $(i,j),(k,l)$ such that $i<k<j<l$. The chord diagram above has two crossings, namely $(1,4),(2,6)$ and $(1,4),(3,5)$. Given a fixed-point-free involution $\sigma$, let $v(\sigma)$ denote the number of crossings of its chord diagram. Touchard~\cite{MR46325} studied the polynomials \begin{displaymath} T_m(q) = \sum_\sigma q^{v(\sigma)}, \end{displaymath} where the sum runs over all fixed-point-free involutions of $[2m]$. We now describe the contribution to $T_m(q)$ of chord diagrams with a specified set of opening nodes. \begin{lemma} \label{lemma:openings} Given $1\leq c_1<\dotsb<c_m\leq 2m$ designated as opening nodes for a chord diagram, and the remaining elements of $[2m]$ designated as closing nodes of a chord diagram, $c_i$ lies to the left of the $j$th closing node if and only if \begin{displaymath} c_i \leq i+j-1. \end{displaymath} Consequently, the number of opening nodes that lie to the left of the $j$th closing node is given by \begin{equation} \label{eq:rj} r_j:= \#\{i\in [m]\mid c_i\leq j+i-1\}. \end{equation} \end{lemma} \begin{proof} The node $c_i$ lies to the left of the $j$th closing node of $\sigma$ if and only if there are at most $j-1$ closing nodes to the left of $c_i$. In other words, the total number of nodes (opening or closing) up to and including $c_i$ is at most $i+j-1$, meaning that $c_i\leq i+j-1$. \end{proof} For every non-negative integer $n$, let $[n]_q$ denote the $q$-integer $1+q+\dotsb+q^{n-1}$. \begin{lemma} \label{lemma:Tq-refinement} For every non-negative integer $m$, and $1\leq c_1<\dotsb <c_m\leq 2m$, \begin{displaymath} \sum_{\sigma \text{ has opening nodes } c_1,\dotsc,c_m} q^{v(\sigma)}= \prod_{j=1}^m [r_j-(j-1)]_q, \end{displaymath} where $r_j$ is given by (\ref{eq:rj}). \end{lemma} \begin{proof} Suppose we wish to construct a chord diagram on $2m$ nodes with opening nodes $c_1<\dotsb<c_m$. The remaining nodes $d_1<\dotsb<d_m$ are closing nodes. By Lemma~\ref{lemma:openings}, for each $j\in [m]$, the number of opening nodes to the left of $d_j$ is $r_j$. Thus there are $r_1$ choices of opening node for the arc ending at $d_1$. These choices, taken from right to left, will result in $0,1,\dotsc,r_1-1$ crossings with arcs with closing nodes to the right of $d_1$. Having chosen the node that is joined to $d_1$, the number of opening nodes that are available to $d_2$ is $r_2-1$. Once again, these choices, taken from right to left, will result in $0,1,\dotsc,r_2-2$ crossings with arcs with closing nodes to the right of $d_2$. Continuing in this manner, we see that the contribution of arcs with opening nodes $c_1,\dotsc,c_m$ to $T_m(q)$ is $\prod_{j=1}^m [r_j-(j-1)]_q$. \end{proof} Summing over all possible sets of opening nodes gives the following result. \begin{theorem} For every non-negative integer $m$, \begin{displaymath} T_m(q) = \sum_{1\leq c_1<\dotsb <c_m\leq 2m}\: \prod_{j=1}^m [r_j-(j-1)]_q, \end{displaymath} where $r_j$ is given by (\ref{eq:rj}). \end{theorem} \section{The Enumeration of Splitting Subspaces} \label{sec:enum-splitt-subsp} The relationship between the enumeration of splitting subspaces and the polynomials $T_m(q)$ was discovered in \cite[Section~4.6]{pr}. It is a special case of one of the main results \cite[Theorem~4.8]{pr} of that paper. The proof in this special case, being relatively simple, is provided here. \begin{theorem} \label{theorem:split-splitting} Let $T\in M_{2m}(\Fq)$ be a diagonal matrix with distinct diagonal entries. The number of $T$-splitting subspaces in $\Fq^{2m}$ is \begin{displaymath} \sigma^T = (q-1)^m q^{\binom m2} T_m(q). \end{displaymath} \end{theorem} Comparison of Theorem~\ref{theorem:split-splitting} with the main theorem gives a new proof of the Touchard-Riordan formula \eqref{eq:touchard-riordan}. \begin{proof} Let $W\subset \Fq^{2m}$ be a $T$-splitting subspace of $\Fq^{2m}$. $W$ has a unique ordered basis in reduced row echelon form. This is a basis whose elements form the rows of an $m\times 2m$ matrix such that \begin{enumerate} \item There exist $1\leq c_1<\dotsb < c_m\leq 2m$ (called the pivots of $W$) such that the first non-zero entry of the $i$th row lies in the $c_i$th column, and equals $1$. \item \label{item:rowech} The only non-zero entry in the $c_i$th column lies in the $i$th row for $1\leq i\leq m$. \end{enumerate} For example, when $m=3$, a subspace with pivots $1,2,4$ is spanned by a matrix of the form \begin{equation} \label{eq:rowech} \begin{pmatrix} 1 & 0 & * & 0 & * & *\\ 0 & 1 & * & 0 & * & *\\ 0 & 0 & 0 & 1 & * & * \end{pmatrix}, \end{equation} where each $*$ represents an arbitrary element of $\Fq$. Suppose that $W$ has reduced row echelon form with pivots $c_1,\dotsc,c_m$. By a permutation of coordinates, the pivot columns can be moved to the left to rewrite $A$ in the block form $(I\mid X)$, where $I$ denotes the $m\times m$ identity matrix, and $X\in M_m(\Fq)$. The condition \eqref{item:rowech} in the definition of row echelon from imposes the vanishing of certain entries of $X$: \begin{displaymath} X_{ij} = 0 \text{ if } j< c_i-(i-1). \end{displaymath} For the matrix in \eqref{eq:rowech}, moving the pivot columns to the left results in the matrix \begin{displaymath} \left( \begin{array}{ccc|ccc} 1 & 0 & 0 & * & * & *\\ 0 & 1 & 0 & * & * & *\\ 0 & 0 & 1 & 0 & * & * \end{array} \right). \end{displaymath} The above permutation of coordinates also permutes the diagonal entries of $T$, but it remains a diagonal matrix with distinct diagonal entries. Write this matrix in block diagonal form as $\begin{pmatrix} T' & 0\\ 0 & T'' \end{pmatrix}$, where $T'$ and $T''$ are $m\times m$ diagonal matrices. Now $W$ is a $T$-splitting subspace if and only if the matrix \begin{displaymath} \begin{pmatrix} I & X\\ T' & XT'' \end{pmatrix} \end{displaymath} is non-singular. Applying the block row operation $R_2\to R_2-T'R_1$ gives \begin{displaymath} \begin{pmatrix} I & X\\ 0 & XT''- T'X \end{pmatrix} \end{displaymath} Thus $W$ is a splitting subspace if and only if $Y=XT''-T'X$ is non-singular. The entries of $Y$ in terms of the entries of $X$ are given by \begin{displaymath} y_{ij} = (t''_j-t'_i)x_{ij}, \end{displaymath} where $t'_i$ (resp. $t''_i$) is the $i$th diagonal entry of $T'$ (resp. $T''$). Since $T'$ and $T''$ have no diagonal entries in common the map $X\mapsto Y$ is a bijection, and an entry of $X$ is non-zero if and only if the corresponding entry of $Y$ is non-zero. Thus we have the following result. \begin{lemma} \label{lemma:counting-matrices} The number of $T$-splitting subspaces with pivots\linebreak $c_1,\dotsc,c_m$ is the number of non-singular matrices $Y\in M_m(\Fq)$ such that $Y_{ij}=0$ if $j<c_i-(i-1)$. \end{lemma} It remains to enumerate such matrices. The number of potentially non-zero entries in the $j$th column of $Y$ is the number of $i$ such that $c_i\leq i+j-1$, which is precisely the number $r_j$ from Lemma~\ref{lemma:openings}. Since $Y$ is non-singular, its first column is non-zero. Thus there are $q^{r_1}-1=(q-1)[r_1]_q$ possibilities for the first column of $Y$. The second column is independent of the first, giving $q^{r_2}-q=(q-1)q[r_2-1]_q$ possibilities. Similarly, given the first $j-1$ columns of $Y$, the number of possibilities for the $j$th column is $q^{r_j}-q^{j-1}=(q-1)q^{j-1}[r_j-(j-1)]_q$. Thus the number of matrices $Y$ satisfying the conditions of Lemma~\ref{lemma:counting-matrices} is \begin{displaymath} (q-1)^mq^{\binom m2}\prod_{j=1}^m[r_j-(j-1)]_q. \end{displaymath} Adding up the contribution of all possible sets of pivots and using Lemma~\ref{lemma:Tq-refinement} gives Theorem~\ref{theorem:split-splitting}. \end{proof} \printbibliography \end{document}
2205.11066v1
http://arxiv.org/abs/2205.11066v1
Cyclicity of composition operators on the Fock space
\documentclass[12pt]{amsart} \usepackage{amsmath,amsfonts,amssymb} \usepackage{color,graphicx} \usepackage[left=2.5cm,right=2.5cm,top=3.5cm,bottom=3cm]{geometry} \usepackage{amsthm} \usepackage{ dsfont } \usepackage{bm} \usepackage{mathtools} \usepackage{enumerate} \newtheorem{theorem}{Theorem}[section] \newtheorem*{theoremA}{Theorem A} \newtheorem{taggedtheoremx}{Theorem} \newenvironment{taggedtheorem}[1] {\renewcommand\thetaggedtheoremx{#1}\taggedtheoremx} {\endtaggedtheoremx} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{definition}[theorem]{Definition} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{question}[theorem]{Question} \newtheorem{notation}[theorem]{Notation} \newtheorem{example}[theorem]{Example} \newtheorem{remark}[theorem]{Remark} \usepackage[utf8]{inputenc} \newcommand{\A}{\mathcal{A}} \newcommand{\B}{\mathcal{B}} \newcommand{\W}{\mathcal{W}} \newcommand{\U}{\mathcal{U}} \newcommand{\wL}{\widetilde{L}} \newcommand{\LL}{\mathcal{LL}} \newcommand{\F}{\mathcal{F}} \newcommand{\HH}{\mathcal{H}} \newcommand{\PP}{\mathcal{P}} \newcommand{\II}{\mathcal{I}} \newcommand{\QQ}{\mathcal{Q}} \newcommand{\N}{\mathds{N}} \newcommand{\Z}{\mathds{Z}} \newcommand{\C}{\mathds{C}} \newcommand{\R}{\mathds{R}} \newcommand{\D}{\mathds{D}} \newcommand{\Q}{\mathds{Q}} \newcommand{\T}{\mathds{T}} \newcommand{\K}{\mathds{K}} \newcommand{\dist}{\textup{dist}} \newcommand{\diam}{\textup{diam}} \newcommand{\mcn}{\mathcal N} \newcommand\blfootnote[1]{ \begingroup \renewcommand\thefootnote{}\footnote{#1} \addtocounter{footnote}{-1} \endgroup } \DeclareMathOperator{\argmin}{argmin} \date{\today} \title{Cyclicity of composition operators on the Fock Space} \author{Frédéric Bayart and Sebastián Tapia-García} \address{Frédéric Bayart, Sebastián Tapia-García} \address{Laboratoire de Math\'ematiques Blaise Pascal UMR 6620 CNRS, Universit\'e Clermont Auvergne, Campus universitaire C\'ezeaux, 3 place Vasarely, 63178 Aubière Cedex, France.} \address{Email address: [email protected], [email protected]} \begin{document} \maketitle \begin{abstract}In this paper we provide a full characterization of cyclic composition operators defined on the $d$-dimensional Fock space $\F(\C^d)$ in terms of their symbol. Also, we study the supercyclicity and convex-cyclicity of this type of operators. We end this work by computing the approximation numbers of compact composition operators defined on $\F(\C^d)$. \end{abstract} \section{Introduction} Let $\C^d$ be the $d$-dimensional complex Euclidean space with $d\geq 1$. The classical Fock space on $\C^d$ is defined by \[\F(\C^d):=\left\{f\in \mathcal{H}(\C^d):~\|f\|^2:=\dfrac{1}{(2\pi)^d}\int_{\C^d}|f(z)|^2e^{-\frac{|z|^2}{2}}dA(z)<\infty\right \},\] where $dA$ stands for the Lebesgue measure on $\C^d$, $|z|=\sqrt{\langle z,z\rangle}$ and $\langle z,w\rangle=\sum_{j=1}^d z_j \overline{w_j}$. The space $\mathcal F(\C^d)$ is a separable Hilbert space equipped with the inner product \[\langle f,g \rangle:= \dfrac{1}{(2\pi)^d}\int_{\C^d} f(z)\overline{g(z)}e^{-\frac{|z|^2}{2}}dA(z). \] Observe that we do not distinguish between the inner products of $\C^d$ and $\F(\C^d)$. The space $\F(\C^d)$ is also a reproducing kernel Hilbert space. It is well-know that the reproducing kernel function is given by \[k_w:~z\in\C^d\mapsto k_w(z)= \exp\left(\frac{\langle z,w\rangle }{2}\right),\] which has norm $\|k_w\|=\exp\left(\frac{|w|^2}{4}\right)$. Also, the set of polynomials $\{z^\alpha:\alpha\in\N^d\}$ forms an orthogonal basis of $\F(\C^d)$ and \[\|z^\alpha\|^2= 2^{|\alpha|}\prod_{j=1}^d\alpha_j!.\] In this paper, we are interested in composition operators on $\mathcal F(\C^d)$. Let $\varphi:\C^d\to\C^d$ be a holomorphic function. The composition operator with symbol $\varphi$ is defined by \[f\in \mathcal{H}(\C^d)\mapsto C_\varphi(f):= f\circ \varphi.\] Carswell et al \cite{CMS} have characterized when $C_\varphi$ defines a bounded composition operator on $\mathcal F(\C^d)$: this holds if and only if $\varphi(z)=Az+b$ where $A\in \C^{d\times d}$, with $\|A\|\leq 1$ and $b\in\C^d$ which satisfy $\langle Av,b\rangle=0$ for all $v\in\C^d$ with $|Av|=|v|$. Moreover, $C_\varphi$ is compact if and only if $\|A\|<1$. Since then, many works have been done to characterize properties of $C_\varphi$ in terms of the properties of the symbol $\varphi$, sometimes only when $d=1$: see for instance \cite{Du,FZ,GI,JPZ}. In this paper we are concerned in the dynamical properties of composition operators defined on $\F(\C^d)$. Let us recall the relevant definitions. Let $X$ be a separable Banach space, let $T\in\mathcal{L}(X)$ be a bounded linear operator defined on $X$ and let $x\in X$. The orbit of $x$ under the action of $T$ is the set $\textup{orb}(T,x):=\{T^nx:~n\in\N\}$. The operator $T\in \mathcal{L}(X)$ is said cyclic if there is $x\in X$ such that $\textup{span}(\textup{orb}(T,x))$ is dense in $X$. In this case, we say that $x$ is a cyclic vector for $T$. Similarly, we say that $T$ is supercyclic or hypercyclic if there is $x\in X$ such that $\C\cdot \textup{orb}(T,x)$ or $\textup{orb}(T,x)$ is dense in $X$ respectively. There is a rich literature concerning cyclicity, supercyclicity or hypercyclicity of composition operators defined on the Hardy space, Bergman space, Dirichlet space; see for instance \cite{BM,BS,DK,ZZ}.\\ Regarding Fock spaces, the cyclic composition operators on the Fock space of $\C$ have been characterized in \cite[Theorem 4.2]{GI}. \begin{taggedtheorem}{A}\label{Theorem A} Let $a,b\in \C$ be such that $C_{az+b}$ induces a bounded composition operator on $\F(\C)$. Then, $C_{az+b}$ is cyclic on $\mathcal{F}(\C)$ if and only $a\neq 0$ and $a$ is not a root of unity. \end{taggedtheorem} In higher dimensions, cyclicity has only been characterized in the very particular case where $A$ is diagonal and unitary (note that this implies $b=0$). \begin{taggedtheorem}{B}\cite[Theorem 5.3]{JPZ}\label{Theorem B} Let $A=\textup{diag}(e^{i\theta_1},...,e^{i\theta_d})\in \C^{d\times d}$. The composition operator $C_{Az}$ is cyclic on $\mathcal{F}(\C^d)$ if and only if the set $\{\pi,\theta_1,...,\theta_n\}$ is $\Q$-linearly independent. \end{taggedtheorem} Our main result, which extends both Theorem \ref{Theorem A} and Theorem \ref{Theorem B}, solves the problem of cyclicity in full generality. \begin{theorem}\label{Theorem 1} Let $A\in\C^{d\times d}$, $b\in \C^d$ and $\varphi(z)=Az+b$ be such that $C_\varphi$ induces a bounded composition operator on $\mathcal{F}(\C^d)$. Then, $C_\varphi$ is cyclic if and only if the following conditions are satisfied : \begin{itemize} \item $A$ is invertible; \item $A$ is diagonalizable or its canonical Jordan form admits at most one Jordan block, whose size is exactly $2$; \item if $\lambda:=(\lambda_j)_{j=1}^{\hat{d}}$ denotes the sequence of eigenvalues of $A$, repeated by geometric multiplicity (therefore $\hat{d}= d$ or $d-1$), then for any $\alpha\in \Z^{\hat{d}}\setminus \{0\}^{\hat{d}}$, $\lambda^\alpha\neq 1$. \end{itemize} \end{theorem} Observe that the third condition may be rewritten by saying that if $\lambda:=(\lambda_j)_{j=1}^{\hat{d}}$ denotes the sequence of eigenvalues of $A$, repeated by geometric multiplicity (therefore $\hat{d}= d$ or $d-1$), then for any $\alpha\in \Z^{\hat{d}}\setminus \{0\}^{\hat{d}}$ such that $\lambda^\alpha=\exp(i\theta)$ for some $\theta\in\R$, the pair $\{\pi,\theta\}$ is $\Q$-linearly independent. \begin{example} \begin{itemize} \item Let $A=\begin{pmatrix} \frac{e^{i\theta_1}}2&0\\0&\frac{e^{i\theta_2}}3\end{pmatrix}$. Then $C_{Az}$ is cyclic on $\mathcal F(\C^2)$ for all values of $\theta_1,\theta_2$. \item Let $A=\begin{pmatrix} \frac{e^{i\theta_1}}2&0\\0&\frac{e^{i\theta_2}}4\end{pmatrix}$. Then $C_{Az}$ is cyclic on $\mathcal F(\C^2)$ if and only if $\theta_2-2\theta_1\notin \pi\Q$. \item Let $(\rho_j)_{j=1}^d\subset(-\infty,0)$ be $\Q$-linearly independent, let $A=\textup{diag}(\exp(\rho_1),...,\exp(\rho_d))$ and let $b\in \C^d$. Then $C_{Az+b}$ is cyclic on $\F(\C^d)$. \end{itemize} \end{example} The proof of Theorem \ref{Theorem 1} will be rather long. We will start in Section~\ref{section 2} by studying two simple cases which are significant enough to point out the main ideas behind the proof. In the same section, we will also expose several lemmas. In Section~\ref{section 3} we prove the sufficient condition of Theorem~\ref{Theorem 1} whereas the necessary condition of Theorem~\ref{Theorem 1} will be presented in Section~\ref{section 4}. In Section~\ref{section 5} we characterize the set of cyclic vectors for compact composition operators on $\mathcal F(\C^d)$. The remainder of the paper is devoted to solve several problems on composition operators on $\mathcal F(\C^d)$ where the techniques introduced to prove Theorem~\ref{Theorem 1} are useful. In Section~\ref{section 6} we show that bounded composition operators on $\F(\C^d)$ are never supercyclic with respect the pointwise convergence topology (and thus neither weakly-supercyclic) nor convex-cyclic. In Section~\ref{section 7} we compute the approximation numbers of compact composition operators on $\F(\C^d)$.\\ \noindent\textbf{Notation.} For $z\in \C\setminus\{0\}$, we denote by $\arg(z)\in [0,2\pi)$ the number such that $z= |z|\exp(i\arg(z))$. For a matrix $A\in\C^{d\times d}$, its norm is defined by $\|A\|:= \sup \{|Av|:~|v|=1\}$, its transpose is denoted $A^T$ and its Hermitian transpose is denoted by $A^*$. For $(x_{j})_{j=1}^d\subset\C$, we denote by $\textup{diag}(x_1,...,x_d)$ the diagonal matrix with entries $(x_{j})_{j=1}^d$. For any $\alpha\in \Z^d$, we denote the length of $\alpha$ by $|\alpha|:= \sum_{i=1}^d |\alpha_i|$ and for any vector $\lambda\in \C^d$ we write $\lambda^\alpha:=\prod_{i=1}^d {\lambda_i}^{\alpha_i}$. We also consider the partial order $\leq$ on $\N^d$ defined as follows: $\alpha \leq \beta $ if and only if $\alpha_j\leq \beta_j$ for all $j=1,...,d$. By $\D, \overline{\D}$ and $\T$ we respectively denote the open complex unit disc, its closure and its boundary. Finally, when dealing with the adjoint of composition operators, we will require to introduce weighted composition operators: for $\varphi:\mathbb C^d\to\mathbb C^d$ and $\psi:\mathbb C^d\to \mathbb C$, the weighted composition operator with symbols $(\psi,\varphi)$ is defined by \[f\in \mathcal{H}(\C^d)\mapsto W_{\psi,\varphi}(f):= \psi \cdot f\circ \varphi.\] Further information about weighted composition operators defined on $\F(\C)$ can be found in \cite{CG, M,M2,M3} and references therein. \section{Preliminaries}\label{section 2} \subsection{Two particular cases} This subsection is purely expository. It aims to point out the main difference between Jordan blocks of size $2$ and of size $3$. Denote for $d\geq 1$ and $N\geq 0$ by $\mathcal P_{\textrm{hom}}(N,d)$ the set of all homogeneous polynomials of degree $N$ in $d$ variables, namely $$\mathcal P_{\textrm{hom}}(N,d)=\textrm{span}(z_1^{\alpha_1}\cdots z_d^{\alpha_d}:\alpha_1+\cdots+\alpha_d=N).$$ Let us recall that $\dim(\mathcal P_{\textrm{hom}}(N,d))=\binom{N+d-1}{d-1}$. Let $P_{N,d}$ be the the orthogonal projection on $\mathcal P_{\textrm{hom}}(N,d)$ in $\mathcal F(\C^d)$. Let us also denote $$A_1=\begin{pmatrix}1/2&a\\0&1/2\end{pmatrix}\quad A_2=\begin{pmatrix}1/2&a&0\\0&1/2&a\\ 0&0&1/2\end{pmatrix}$$ where $a\in\C\backslash\{0\}$ is so that $C_{A_1z}$ and $C_{A_2z}$ are bounded operators on $\mathcal F(\C^2)$ and $\mathcal F(\C^3)$ respectively. We are going to show that \begin{enumerate}[(a)] \item for any $N$ large enough, for any $f\in\mathcal F(\C^3)$, $\textrm{span}(P_{N,3}(C_{A_2z}^j f):\ j\geq 0)$ is not dense in $\mathcal P_{\textrm{hom}}(N,3)$, which prevents $f$ to be a cyclic vector for $C_{A_2z}$; \item for any $N\geq 0$, there exists $f\in\mathcal F(\C^2)$ such that $\textrm{span}(C_{A_1z}^j f:\ j\geq 0)$ is equal to $\mathcal P_{\textrm{hom}}(N,2)$. \end{enumerate} Let us start with (a) and write $f=\sum_{\alpha}c_\alpha z^\alpha$. Computing $A_2^j$, we easily get that, for all $j\geq 0$, $$C_{A_2z}^j f=\sum_{\alpha\in\N^3}\frac{c_\alpha}{2^{j|\alpha|}}(z_1+2jaz_2+2j(j-1)a^2z_3)^{\alpha_1}(z_2+2jaz_3)^{\alpha_2}z_3^{\alpha_3}$$ so that, expanding the product, $$P_{N,3}(C_{A_2z}^j f)=\frac1{2^{jN}}\sum_{k=0}^{2N}j^k L_k$$ where $L_0,\dots,L_{2N}$ are fixed polynomials in $\mathcal P_{\textrm{hom}}(N,3)$. Therefore $$\dim(\textrm{span}(P_{N,3}(C_{A_2z}^j f):\ j\geq 0))\leq 2N+1<\dim(\mathcal P_{\textrm{hom}}(N,3))$$ provided $N$ is large enough. Regarding (b), let $f=\sum_{|\alpha|=N}z^\alpha=\sum_{k=0}^N z_1^k z_2^{N-k}$. Then, for all $j\geq 0$, \begin{align*} C_{A_1z}^j (f)&=\sum_{k=0}^N \frac{1}{2^{Nj}}(z_1+2jaz_2)^k z_2^{N-k}\\ &=\frac1{2^{Nj}}\sum_{k=0}^N j^k L_k \end{align*} where $(L_0,\dots,L_N)$ is a basis of $\mathcal P_{\textrm{hom}}(N,2)$. Now, $$\frac{2^{Nj}C_{A_1z}^j(f)}{j^N}\to L_N\in \textrm{span}(C_{A_1z}^j f:\ j\geq 0).$$ Hence, $$\frac{2^{Nj}C_{A_1z}^j(f)-j^NL_N}{j^{N-1}}\to L_{N-1}\in \textrm{span}(C_{A_1z}^j f:\ j\geq 0)$$ and iterating we get that $\textrm{span}(C_{A_1z}^j f:\ j\geq 0)=\mathcal P_{\textrm{hom}}(N,2)$. \medskip The proof of Theorem \ref{Theorem 1} will rely on the two ideas exposed above. We will also need a supplementary argument, based on Kronecker's theorem, to handle different eigenvalues. Working with matrices which are maybe not unitarily equivalent to their Jordan form and with affine maps instead of linear ones will cause some extra troubles which require the introduction of the tools which are described in the remaining part of this section. \subsection{Useful lemmas} In this section we collect some facts which will help us in the forthcoming proof of Theorem~\ref{Theorem 1}. \begin{proposition}\label{product of functions} Let $f,g\in \mathcal{F}(\C^d)$ be two functions. Assume that there are two disjoint sets $I_f, I_g\subset \{1,\cdots,d\}$ such that $f(z)= f((z_i)_{i\in I_f})$ and $g(z)= g((z_i)_{i\in I_g})$. Then the function defined by $z\mapsto h(z):=f(z)g(z)$ belongs to $ \mathcal{F}(\C^d)$. \end{proposition} \begin{proof} It follows from the definition of the norm on $\mathcal{F}(\C^d)$. Indeed $\|h\|=\|f\|\cdot \|g\|$. \end{proof} \begin{proposition}\label{conjugated operator} Let $\varphi(z):=Az+b$ be such that $C_\varphi$ induces a bounded composition operator on $\mathcal{F}(\C^d)$. Then, there are $S\in \C^{d\times d}$ and $v\in\C^d$ such that $C_\varphi$ is similar to $C_{Sz+v}$ and \[ S= \begin{pmatrix} T& 0 \\ 0 & U \\ \end{pmatrix}, \] where $T\in\C^{p\times p}$ is an upper triangular matrix such that its diagonal contains all the eigenvalues of $A$ of modulus lower than $1$ and $U\in\C^{(d-p)\times (d-p)}$ is a diagonal matrix containing all the eigenvalues of $A$ of modulus $1$. Moreover $v\in \C^p\times\{0\}^{d-p}$. \end{proposition} \begin{proof} Let $P,S\in \C^{d\times d}$ be an orthogonal matrix and an upper triangular matrix obtained by the Schur decomposition of $A$, that is, $A= PSP^*$. Further, assume that in the first $p$ entries of the diagonal of $S$ we find all the eigenvalues of $A$ of modulus strictly lower than $1$. Since ${\|P\|=\|P^*\|=1}$ and $P^{-1}=P^*$, we have that $C_{Pz},C_{P^*z}$ are invertible elements of $\mathcal{L}(\mathcal{F}(\C^d))$, with $C_{Pz}C_{P^*z}=Id$, and that \[ C_{Sz + P^*b}= C_{Pz}C_\varphi C_{P^*z}\in \mathcal{L}(\mathcal{F}(\C^d)).\] Now, noticing that $S$ is an upper triangular matrix, $\|S\|\leq 1$ and the last $d-p$ entries of its diagonal have modulus equal to one, we get \[ S= \begin{pmatrix} T& 0 \\ 0 & U \\ \end{pmatrix} \] where $T\in\C^{p\times p}$ is an upper triangular matrix such that its diagonal contains all the eigenvalues of $A$ of modulus lower than $1$ and $U\in\C^{(d-p)\times (d-p)}$ is a unitary diagonal matrix. Finally, since $C_{Sz + P^*b}$ is bounded, $v:= P^*b \in \C^p\times\{0\}^{d-p}$. \end{proof} \begin{lemma}\label{lem:range} Let $\varphi(z):=Az+b$ be such that $C_\varphi$ induces a bounded composition operator on $\mathcal F(\C^d)$. Then $b\in\textrm{Ran}(I-A)$. \end{lemma} \begin{proof} By \cite[Lemma 5.2]{JPZ}, $b\in \ker(I-A^*)^\perp=\textrm{Ran}(I-A)$. \end{proof} For the sake of completeness, we state the following two results which are taken from \cite{BS} and \cite{CMS} respectively. \begin{proposition}\cite[Proposition 2.7]{BS}\label{noncyclicity adjoint} Let $H$ be a Hilbert space and let $T\in \mathcal{L}(H)$. If there is $\lambda\in\C$ such that $\textup{dim}(\ker(T^*-\lambda I))\geq 2$, then $T$ is not cyclic. \end{proposition} \begin{proposition}\cite[Lemma 2]{CMS}\label{adjoint operator} Let $\varphi(z):=Az+b$ be such that $C_\varphi$ induces a bounded composition operator on $\mathcal{F}(\C^d)$. Then $C_\varphi^*=W_{k_b,\widehat{\varphi}}$, that is, the weighted composition operator with symbols $k_b$ and $\widehat{\varphi}(z):= A^*z$. \end{proposition} Albeit simple, the following proposition will help us in the forthcoming computations. We recall that the symbol $\varphi$ of a bounded composition operator $C_\varphi$ on $\F(\C^d)$ always has a fixed point, \cite[Lemma 5.2]{JPZ}. \begin{proposition}\label{polynomials of deg 1} Let $\varphi(z):=Az+b$ be such that $C_\varphi$ induces a bounded composition operator on $\mathcal{F}(\C^d)$. Let $\lambda=(\lambda_{j})_{j=1}^d$ be the eigenvalues of $A$ repeated by algebraic multiplicity . Let $(v_j)_{j=1}^d\subset (\C^d)^*$ be a basis of generalized eigenvectors of $A^T$ associated to $\lambda$ such that, for all $j=1,\dots,d$, either $A^T v_j=\lambda_j v_j$ or $A^T v_j=\lambda v_j+v_{j-1}$. Let $\xi\in\C^d$ be a fixed point of $\varphi$. Then there is $L=(L_j)_{j=1}^d$ a basis of $\textrm{span}(z_k-\xi_k:\ k=1,\dots,d)$ such that \begin{align*} A^T v_j=\lambda_jv_j ~(\text{or }= \lambda_jv_j+v_{j-1}) \Rightarrow C_\varphi L_j = \lambda_j L_j~ (\text{resp. }=\lambda_j L_j+L_{j-1}), \end{align*} for all $j=1,\dots,d$. \end{proposition} \begin{proof} Observe that each $v_j$, as a linear form on $\C^d$, can also be seen as an element of $\mathcal F(\C^d)$ and that $C_{Az}(v_j)=A^T v_j$. Now, noticing that $\varphi(z)=A(z-\xi)+\xi$, we get that the polynomial $L_j:=v_j(\cdot-\xi_j)$ satisfies $C_\varphi L_j\in\{\lambda_j L_j,\lambda_j L_j+L_{j-1}\}$ and that $(L_j)_{j=1}^d$ is a basis of $\textrm{span}(z_k-\xi_k:\ k=1,\dots,d)$. \end{proof} \begin{remark} Observe that $\{L^\alpha:\ \alpha\in\mathbb N^d\}$ is a basis of the space of all polynomials in $d$ variables. \end{remark} We will also need the following combinatorial lemma (the partial order of $\N^p$ that we consider has been defined at the end of the introduction). \begin{lemma}\label{lem:combinatorial} Let $p\geq 1$ and $E\subset\mathbb N^p$. There exists a finite partition $\{D_i: i\in I\}$ of $E$ such that, for all $i\in I$, there exists $\alpha(i)\in D_i$ satisfying $\alpha\geq \alpha(i)$ for all $\alpha\in D_i$. \end{lemma} \begin{proof} We shall proceed by induction on $p$, the case $p=1$ being trivial. Let $p\geq 2$ and assume that the result has been proven up to $p-1$. Consider any $\beta\in E$ and split $E$ into the finite partition $E_0,\dots,E_p$ with \begin{align*} E_0&=\{\alpha\in E:\ \alpha\geq \beta\}\\ E_j&=\{\alpha\in E:\ \alpha_j<\beta_j\}\backslash(E_1\cup\dots\cup E_{j-1}),\ j=1,\dots,p. \end{align*} For each $j=1,\dots,p$, we can decompose $E_j$ into the finite partition $E_{j,0},\cdots,E_{j,\beta_{j}-1}$ where $$E_{j,k}=\{\alpha\in E_j:\ \alpha_j=k\}.$$ Since one coordinate of each element of $E_{j,k}$ is fixed, one can apply the induction hypothesis to $E_{j,k}$ to find a finite partition $\{D_{j,k,i}:\ i\in I_{j,k}\}$ of $E_{j,k}$ such that, for all $(j,k,i)$, there exists $\alpha(j,k,i)\in D_{j,k,i}$ satisfying \begin{equation}\label{eq:combinatorial} \forall \alpha\in D_{j,k,i},\ \alpha_l\geq\alpha(j,k,i)_l\textrm{ for }l\in\{1,\dots,p\}\backslash \{j\}. \end{equation} Now since for $\alpha\in D_{j,k}$, $\alpha_j=\alpha(j,k,i)_j=k$, \eqref{eq:combinatorial} is true for all $l=1,\dots,p$, namely $\alpha\geq\alpha(j,k,i)$ for all $\alpha\in D_{j,k,i}$. Therefore, $E_0\cup\{D_{j,k,i}:\ j=1,\dots,p,\ k=0,\dots,\beta_j-1,\ i\in I(j,k)\}$ is the partition we are looking for. \end{proof} We will finally require the invertibility of a Vandermonde-like matrix. \begin{lemma}\label{invertible matrix} Let $N\geq 1$. Let $\{\alpha(n):~n=1,...,N\}\subset \Z^d$, where $\alpha(n)=\alpha(m)$ only if $n=m$. Then, there is $\{w(n):~n=1,...,N\}\subset \T^d$ such that the matrix $(w(i)^{\alpha(j)} )_{i,j=1,\dots,N}\in \C^{N\times N}$ is invertible. \end{lemma} \begin{proof} Let us proceed by induction on $N$. If $N=1$, the result is clear and we assume that Lemma~\ref{invertible matrix} holds true for some $N\geq 1$. Let us choose $\{w(i):~i=1,...,N\}\subset \T^d$ such that the matrix $M:=(w(i)^{\alpha(j)} )_{i,j=1,\dots,N}\in \C^{N\times N}$ is invertible. Therefore, $\textup{det}(M)\neq 0$. Now, let us consider the function \[z\in \T^d \mapsto f(z):=\det \big({(w(i)^{\alpha(j)})}_{i,j=1,\dots,N+1}\big),~ \] where $w(N+1)=z$. Developing the determinant that defines $f(z)$ using the last row, thanks to the induction hypothesis and the fact that $\alpha(N+1)\neq \alpha(n)$ for $n=1,\dots,N$, we get that $f$ is a trigonometric polynomial with at least one non-zero coefficient. Therefore, there is $z\in \T^d$ such that $f(z)\neq 0$. \end{proof} \section{Cyclic composition operators}\label{section 3} In order to provide the proof of Theorem~\ref{Theorem 1}, we need the following auxiliary results. \begin{proposition}\label{computation for induction} Let $C_\varphi$ be a bounded composition operator on $\F(\C^d)$. Let $p\geq 1$ and $\lambda\in \D^{p}$, where $\lambda_{p-1}=\lambda_p$. Assume that there is $L:=(L_i)_{i=1}^p\subset\F(\C^d)$ a finite sequence of polynomials such that $C_\varphi L_i=\lambda_iL_i$ for all $i=1,...,p-1$ and $C_\varphi L_p= \lambda_{p-1}L_p+L_{p-1}$. Then, there is $J\in \N$ such that for any $j\geq J$, for any $n\in\N$ and for any $D\subset \{\alpha\in\N^p:~|\alpha|=n\}$, we have \[C_\varphi^j \left(\sum_{\substack{\alpha\in D}} L^\alpha\right)=\sum_{\substack{\alpha\in \N^p\\ |\alpha|=n}} c(\alpha,D,j) L^\alpha, ~\text{for all }i=1,...,p\] where $|c(\alpha,D,j)|\leq 1$. \end{proposition} \begin{proof} Let $j\in \N$. We compute \begin{align*} C_\varphi^j \left(\sum_{\substack{\alpha\in D}} L^\alpha\right)&= \sum_{\substack{\alpha\in D}} \prod_{i=1}^p \big(C_\varphi^j(L_i)\big)^{\alpha_i}\\ &= \sum_{\substack{\alpha\in D}} \left(\prod_{i=1}^{p-1} \lambda_i^{\alpha_i j}L_i^{\alpha_i}\right)\big(\lambda_{p-1}^jL_p+j\lambda_{p-1}^{j-1}L_{p-1}\big)^{\alpha_p}\\ &= \sum_{\substack{\alpha\in D}} \lambda^{j\alpha} \left(\prod_{i=1}^{p-1} L_i^{\alpha_i}\right) \sum_{\beta=0}^{\alpha_p}\begin{pmatrix} \alpha_p \\ \beta \end{pmatrix}L_p^\beta \left(\frac{j}{\lambda_{p-1}}\right)^{\alpha_p-\beta}L_{p-1}^{\alpha_p-\beta}\\ &=\sum_{\substack{\alpha\in \N^p\\ |\alpha|=n}} L^\alpha \left(\prod_{i=1}^{p-2}\lambda_i^{j\alpha_i}\right)\lambda_{p-1}^{j(\alpha_{p-1}+\alpha_p)}\sum_{\substack{\gamma\in \N^2\\ |\gamma|=\alpha_{p-1}+\alpha_p\\ (\alpha_1,...,\alpha_{p-2},\gamma)\in D \\ \gamma_2\geq\alpha_p }} \begin{pmatrix} \gamma_2 \\\alpha_{p} \end{pmatrix} \left(\frac{j}{\lambda_{p-1}}\right)^{\gamma_2-\alpha_p} \end{align*} Now, let us fix $\alpha\in \N^p$, with $|\alpha|=n$ and set $N:=\alpha_{p}+\alpha_{p-1}$. Observe that \begin{align*} \Bigg|\lambda_{p-1}^{jN}\sum_{\substack{\gamma\in \N^2,\ |\gamma|=N\\ (\alpha_1,...,\alpha_{p-2},\gamma)\in D \\ \gamma_2\geq\alpha_p }} \begin{pmatrix} \gamma_2 \\\alpha_{p} \end{pmatrix} \left(\frac{j}{\lambda_{p-1}}\right)^{\gamma_2-\alpha_p} \Bigg |&\leq |\lambda_{p-1}|^{jN}\sum_{\substack{\gamma\in \N^2\\ |\gamma|=N\\ \gamma_2\geq\alpha_p }} \begin{pmatrix} |\gamma| \\ \alpha_{p}+\gamma_1 \end{pmatrix} \left(\frac{j}{|\lambda_{p-1}|}\right)^{\gamma_2-\alpha_p}\\ &\leq |\lambda_{p-1}|^{jN} \left(1+\frac{j}{|\lambda_{p-1}|}\right)^{N}\\ &=(|\lambda_{p-1}|^j+ j|\lambda_{p-1}|^{j-1})^{N}. \end{align*} Since $\lambda_{p-1}\in\D$, there is $J\in \N$ such that $|\lambda_{p-1}|^j+ j|\lambda_{p-1}|^{j-1}\leq 1$ for all $j\geq J$. Notice that $J$ does not depend on $\alpha$. \end{proof} \begin{lemma}\label{obtaining L alpha} Let $0\leq p\leq d$. Let $(\lambda,\mu):=(\lambda_j)_{j=1}^p\times(\mu_j)_{j={p+1}}^{d}\in (\D\setminus \{0\})^p\times\T^{d-p}$. Let $f:\T^{d-p}\to \F(\C^d)$ be a function. Let $R\in (0,1)$ and $\mcn:=\{\alpha\in\N^p:~|\lambda^\alpha|=R\}$. Let $(x_\alpha)_{\alpha\in \mcn}\subset \F(\C^d)$ be a sequence of linearly independent functions such that, for each $\alpha\in \mcn$ the function $w\in \T^{d-p}\mapsto f(w)x_\alpha\in \F(\C^d)$ is well defined and continuous. Assume that \begin{align*}\tag{H}\label{hypothesis} \text{there is no }(\alpha,\beta)\in \Z^p\times\Z^{d-p}\setminus \{\{0\}^d\}~\text{such that }\lambda^\alpha\mu^\beta=1. \end{align*} Then, for any fixed $\gamma \in \mcn$, the closure of the linear space spanned by the accumulation points in $\F(\C^d)$ of the sequence \[ \left(f(\mu_{p+1}^n,...,\mu_{d}^n)\sum_{\alpha\in \mcn} \left(\dfrac{\lambda^{\alpha}}{\lambda^{\gamma}} \right)^n x_\alpha \right)_n \] contains the set $\{f(w)x_\alpha:~\alpha\in \mcn,~w\in \T^{d-p}\}$. \end{lemma} Observe that hypothesis~\eqref{hypothesis} is equivalent to: $\left\{\pi,\arg\left({\lambda^{\alpha}}\mu^{\beta}\right)\right\}$ is $\Q$-linearly independent for all $(\alpha,\beta)\in (\Z^p\times \Z^{d-p})\setminus\{\{0\}\}^d$ such that $|\lambda^\alpha|=1$. \begin{proof} If $\mcn =\emptyset$, there is nothing to prove. So, we assume that $\mcn\neq \emptyset$. First, let us write $\lambda_j= e^{\rho_j}e^{i\theta_j}$ for all $j=1,...,p$ and $\mu_j= e^{i\theta_j}$ for all $j=p+1,...,d$, where $(\rho_j)_j,(\theta_j)_j\subset \R$. Observe that, for any $\alpha\in \Z^p$, $|\lambda^\alpha|=1$ if and only if $\sum_{j=1}^p \alpha_j\rho_j=0$. Let $E:=\{\alpha\in\Q^d:~\sum_{j=1}^p\alpha_j\rho_j=0\}$. We extract from $\{\rho_1,...,\rho_p\}$ a $\Q$-linearly independent family of maximal cardinality, namely $\{\rho_1,...,\rho_q\}$ and we set $(a_{j,k})_{j,k}\subset \Q$ such that \[ \rho_k= -\sum_{j=1}^q a_{j,k}\rho_j,~ \text{for all }k=q+1,...,p.\] Then, it follows that \begin{align}\label{equiv: lemma} \alpha\in E~\iff ~ \alpha_j=\sum_{k=q+1}^p a_{j,k}\alpha_k,~\text{for all }j=1,...,q. \end{align} \textbf{Claim.} The set $\{\pi\}\cup\{\theta_k+\sum_{j=1}^qa_{j,k}\theta_j:k=q+1,...,p\}\cup\{\theta_k:~k=p+1,...,d\}$ is $\Q$-linearly independent. Indeed, otherwise there are $m,(r_k)_k\subset \Z$ such that \begin{align*} 0&=m\pi+\sum_{k=q+1}^p r_k\big(\theta_k+\sum_{j=1}^q a_{j,k}\theta_j\big)+\sum_{k=p+1}^d r_k\theta_k \end{align*} \begin{align}\label{eq: lemma 1} &= m\pi + \sum_{j=1}^q \left( \sum_{k=q+1}^p r_ka_{j,k} \right)\theta_j +\sum_{k=q+1}^d r_k\theta_k. \end{align} Let us define $\alpha\in \Q^d$ by \begin{align*} \alpha_j:=\begin{cases} \sum_{k=q+1}^p r_ka_{j,k} & ~\text{if } j=1,...,q.\\ r_j & ~\text{if } j=q+1,...,d . \end{cases} \end{align*} Thus, thanks to~\eqref{equiv: lemma}, $\alpha\in E $. However, for some $K\in \N$, $K\alpha\in \Z^d\cap E$. Then \eqref{eq: lemma 1} contradicts assumption~\eqref{hypothesis} and the claim is proved. Let us fix $\gamma\in \mcn$. Observe that $(\alpha-\gamma)\times\{0\}^{d-p}\in E$ for any $ \alpha\in \mcn$, i.e. \begin{equation}\label{eq:alphagamma} \alpha_j-\gamma_j=\sum_{k=q+1}^p a_{j,k}(\alpha_k-\gamma_k),~\text{for all } j=1,...,q. \end{equation} Now, notice that \begin{align*} g_n:&=f(\mu_{p+1}^n,...,\mu_{d}^n)\sum_{\alpha\in \mcn} \left(\dfrac{\lambda^{\alpha}}{\lambda^{\gamma}} \right)^n x_\alpha\\ &=f(e^{in\theta_{p+1}},...,e^{in\theta_{d}})\sum_{\alpha\in \mcn} x_\alpha \prod_{k=q+1}^p e^{in(\theta_{k} +\sum_{j=1}^q a_{j,k}\theta_j)(\alpha_{k}-\gamma_{k}) }. \end{align*} Therefore, thanks to the above claim and Kronecker's Theorem, we conclude that, for any $w\in \T^{d}$, there is a sequence of integers $(n(l))_l$ such that \[g_{n(l)}\xrightarrow[l\to\infty]{ } f(w_{p+1},...,w_d)\sum_{\alpha\in \mcn} x_\alpha \prod_{k=q+1}^p w_k^{\alpha_k-\gamma_k}.\] Finally, Lemma~\ref{obtaining L alpha} follows directly from Lemma~\ref{invertible matrix} and the fact that the function $\alpha\in \mathcal{N}\mapsto (\alpha_k-\gamma_k)_{k=q+1,\dots,p}$ is one-to-one by \eqref{eq:alphagamma}. \end{proof} Now we are ready to prove the first half of Theorem~\ref{Theorem 1}. \begin{proof}[Proof of Theorem~\ref{Theorem 1}: Sufficient condition] Let $\varphi(z):=Az+b$ be an affine map such that $C_\varphi$ induces a bounded composition operator on $\F(\C^d)$. Let us assume that the canonical Jordan form of the invertible matrix $A$ admits exactly one Jordan block of size $2$ and $d-2$ Jordan blocks of size $1$. Also, we assume that the eigenvalues of $A$ satisfy the hypothesis of the statement of Theorem~\ref{Theorem 1}. If $A$ is diagonalizable, the proof is completely similar (in fact, simpler). The details of this case are left to the reader. By Proposition~\ref{conjugated operator}, we can (and shall) assume that $A=\begin{pmatrix} T&0\\ 0&U \end{pmatrix}$, where $T\in \C^{p\times p}$ is an upper triangular matrix and $U\in \C^{(d-p)\times (d-p)}$ is a unitary diagonal matrix, and $b\in \C^p\times \{0\}^{d-p}$. Let us call $\lambda\in \C^p$ the diagonal of $T$, i.e. $\lambda$ contains all the eigenvalues of $A$ of modulus lower than $1$ and we further assume that $\lambda_{p-1}=\lambda_p$. \\ Thanks to Proposition~\ref{polynomials of deg 1}, there is $L=(L_i)_{i=1}^p\subset\F(\C^d)$ a finite sequence of linearly independent polynomials of degree $1$ such that $C_\varphi L_i(z) = \lambda_i L_i(z)$ for all $i=1,...,p-1$, and $C_\varphi L_p(z)= \lambda_{p-1}L_p + L_{p-1}$. Observe that, for each $i=1,\dots,p$, the polynomial $L_i$ depends only on $\{z_1,...,z_p\}$. Therefore, $\{L^\alpha:\ \alpha\in\N^p\}$ is a basis of the vector space of polynomials on $(z_1,\dots,z_p)$.\\ In order to continue, we define $\rho_0=1$ and for each $k\in \N$, $k\geq 1$: \[\rho_k := 2^{-k} \Bigg(\sum_{\substack{ \alpha\in \N^p \\ |\alpha|=k} } \| L^\alpha\| \Bigg)^{-1}\wedge \rho_{k-1}.\] Let us set $w:=\{0\}^{p}\times \{1\}^{d-p}\in \C^d$. Observe that, since $k_w(z)=\exp (\frac{\langle z,w\rangle}{2})$, $k_w$ depends only on $(z_i)_{i=p+1}^d$. Let us consider the function $h$ defined by \[ z\in \C^d\mapsto h(z):= k_w(z) \left( \sum_{\substack{ \alpha \in \N^p }} d_{\alpha}L^{\alpha}(z) \right),\] where $d_{\alpha}= \rho_{| \alpha| }>0 $ for all $\alpha\in \N^p$. Observe that, thanks to Proposition~\ref{product of functions} and the definition of $( \rho_k)_k$, the function $h$ belongs to $\mathcal{F}(\C^d)$, with $\|h\|\leq 2\|k_w\|$.\\ We claim that $h$ is a cyclic vector for $C_\varphi$. Let us denote $H:= \overline{\textup{span}}(C^j_\varphi h:~j\in\N)$. In what follows, we proceed by induction to prove that, for every $\alpha \in \N^p$ and every $\widehat{w}\in \{0\}^p\times \T^{d-p}$, $k_{\widehat{w}}L^{\alpha}\in H$. A key point will be to understand how the multiindices $\alpha$ are ordered. Let us consider a decreasing enumeration $(R(n))_n$ of the set $\{|\lambda^\alpha|:~\alpha\in\N^p\}$. Also, for $n\in\N$, we define $\mcn(n):= \{\alpha\in\N^p:~| \lambda^\alpha|=R(n)\}$. Observe that $R(0)=1$, $\mcn(0)=\{\{0\}^p\}$, that each $\mcn(n)$ is finite and that $\{\mcn(n):n\in\N\}$ is a partition of $\N^p$. At step $n$, we will show that $k_{\widehat w}L^\alpha\in H$ for all $\alpha\in \mcn(n)$ and all $\widehat{w}\in \{0\}^p\times \T^{d-p}$. As in Proposition~\ref{adjoint operator}, we write $\widehat{\varphi}(z)=A^*z$. This notation allows us to state the following fact which will be used without special mention. \smallskip \textbf{Fact.} $C_{\varphi}^j k_{w}=k_{\widehat{\varphi}^j(w)}$ for all $j\geq 1$. Indeed, using Proposition~\ref{adjoint operator}, for any $f\in \F(\C^d)$ we get \begin{align*} \langle C_{\varphi} k_{w},f\rangle&= \langle (W_{k_b,\widehat{\varphi}})^* k_{w},f\rangle = \langle k_w, W_{k_b,\widehat{\varphi}}(f)\rangle \\ &= \langle k_w, k_b f\circ\widehat{\varphi}\rangle= k_b(w)f(\widehat{\varphi}(w))\\ &= k_b(w)\langle k_{\widehat{\varphi}(w)},f\rangle. \end{align*} But $k_b(w)=\exp(\langle w,b\rangle/2)= 1$, proving the fact for $j=1$. Inductively, since $\langle b,\widehat{\varphi}^j(w)\rangle=0$ for all $j$, we obtain that $C_{\varphi}^j k_{w}=k_{\widehat{\varphi}^j(w)}$ for all $j\geq 1$. Here we use that \[\widehat{\varphi}^j(w)=\{0\}^{p}\times (U^*)^j(\{1\}^{d-p})\in \{0\}^{p}\times\T^{d-p}.\] \smallskip \textbf{Initialization step.} We prove that $H$ contains the set $\{k_{\widehat{w}}:~\widehat{w} \in \{0\}^p\times \T^{d-p}\}$. Let us consider $\{D_i:~i=1,...,p\}$ a partition of $\N^d\setminus \{\{0\}^{d}\}$ such that, for all $i\in \{1,...,p\}$ and all $\alpha \in D_i$, $\alpha_i\geq 1$. Denote by $e(i)\subset\N^d$ the multi-index satisfying $|e(i)|=1$ and $e(i)_i=1$ for all $i=1,...,d$. For $j\in\N$, we compute \begin{align*} C^j_\varphi h&= k_{\widehat{\varphi}^j(w)} C^j_\varphi \left( d_{0}+\sum_{i=1}^{p}L_i\sum_{\alpha\in D_i}d_{\alpha}L^{\alpha-e(i)} \right)\\ &= k_{\widehat{\varphi}^j(w)} d_{0}+k_{\widehat{\varphi}^j(w)}\sum_{i=1}^{p-1}\lambda_i^jL_iC^j_\varphi \left(\sum_{\alpha\in D_i}d_{\alpha} L^{\alpha-e(i)}\right)\\ &+k_{\widehat{\varphi}^j(w)}(\lambda_{p-1}^jL_p+j\lambda_{p-1}^{j-1}L_{p-1})C^j_\varphi\left(\sum_{\alpha\in D_{p}}d_{\alpha}L^{\alpha-e(p)}\right) \\ \end{align*} We claim that the second and third summand of the last expression tend to $0$ in $\F(\C^d)$ as $j$ tends to $+\infty$. Indeed, fix $i\in\{1,\dots,p\}$ and let $D_i(n)=\{\alpha\in D_i:\ |\alpha|=n\}$. Then by definition of $d_\alpha$ and Proposition~\ref{computation for induction}, \begin{align*} C_\varphi^j\left(\sum_{\alpha\in D_i}d_\alpha L^{\alpha-e(i)}\right)&= \sum_{n=1}^{+\infty}\rho_n C_\varphi^j\left(\sum_{\alpha\in D_i(n)}L^{\alpha-e(i)}\right)\\ &=\sum_{n=1}^{+\infty}\rho_n \sum_{\substack{\alpha\in \N^p\\|\alpha|=n-1}}c(\alpha,D_i(n),j)L^\alpha \end{align*} with $|c(\alpha,D_i(n),j)|\leq 1$ for $j$ bigger than some $J$, with $J$ independent of $i$ and $n$. Now, $$\left\|L_i C_\varphi^j\left(\sum_{\alpha\in D_i(n)}L^{\alpha-e(i)}\right)\right\|\leq \sum_{n=1}^{+\infty}\rho_n \sum_{|\alpha|=n}\|L^\alpha\|\leq 1.$$ Taking into account that \[\|k_{\widehat{\varphi}^j(w)}\|=\exp(|\widehat{\varphi}^j(w))|^2/4)=\exp(|w|^2/4),\] and since $\lambda_i\in\D$, Proposition~\ref{product of functions} achieves the proof of the claim. \smallskip Therefore, the sequence $(C^j_\varphi h)_j$ accumulates at the same points that the sequence $(d_0 k_{\widehat{\varphi}^j(w)})_j$ does. Observe that $\hat{\varphi}^j(w)= \{0\}^p\times U^{*j}(\{1\}^{d-p})$ where $U^*:= \textup{diag}(\exp(i\theta_{p+1}),...,\exp(i\theta_d))$. Moreover, thanks to the hypothesis of the eigenvalues of $A$, the set $\{\pi,\theta_{p+1},...,\theta_{d}\}$ is $\Q$-linearly independent. Hence, due to Kronecker's Theorem, for any $\widehat{w}\in \{0\}^p\times\T^{d-p}$, there is a sequence $(j(l))_l\subset \N$ such that $(d_0 k_{\widehat{\varphi}^{j(l)}(w)})_l$ converges to $d_0k_{\widehat{w}}$. This finishes the proof of the initialization step. \medskip \textbf{Inductive step.} Let $n\geq 1$ and assume that $k_{\widehat{w}}L^\alpha\in H$ for all $\alpha\in \bigcup \{\mcn(m):~m\leq n-1\}$ and all $\widehat{w}\in \{0\}^p\times \T^{d-p}$. We prove that $k_{\widehat{w}}L^\alpha\in H$ for all $\alpha\in \mcn(n)$ and all $\widehat{w}\in \{0\}^p\times \T^{d-p}$.\\ Let us fix $\widehat{\alpha}\in \mcn(n)$ such that $\widehat{\alpha}_{p-1}$ is maximum among $\alpha_{p-1}$, for $\alpha\in \mcn(n)$. Also, let $\{D_i:~i\in I\}$ be a finite partition of $\N^p\setminus \bigcup \{\mcn(m):~m\leq n\}$, given by Lemma~\ref{lem:combinatorial}, satisfying the following condition: for each $i\in I$, there is $\alpha(i)\in D_i$ such that for each $\alpha\in D_i$ we have $\alpha\geq \alpha(i)$. Let us define \begin{align*} g:= h -k_{w}\sum_{m=0}^{n-1}\sum_{\alpha\in \mcn(m)}d_\alpha L^\alpha =k_{w}\sum_{m=n}^{\infty}\sum_{\alpha\in \mcn(m)}d_\alpha L^\alpha . \end{align*} and notice that, thanks to the induction hypothesis, $g\in H$. In order to simplify the notation, let us set $\Lambda=\lambda^{\widehat{\alpha}}$. Observe that $|\Lambda|=R(n)$. Thus, for $j\in \N$ we have that \begin{align}\label{eq: 1} \dfrac{C^j_\varphi g }{\Lambda^j j^{\widehat{\alpha}_{p-1}}}&=k_{\widehat \varphi^j(w)}\left( \sum_{\alpha\in \mcn(n)}\dfrac{d_\alpha C^j_{\varphi}(L^\alpha)}{\Lambda^j j^{\widehat{\alpha}_{p-1}}}+ \sum_{i\in I}\dfrac{C^j_{\varphi}(L^{\alpha(i)})}{\Lambda^j j^{\widehat{\alpha}_{p-1}}}C^j_{\varphi}\left( \sum_{\alpha\in D_i}d_\alpha L^{\alpha-\alpha(i)}\right)\right)\in H. \end{align} Let us check that the second summand of \eqref{eq: 1} tends to $0$ as $j$ tends to infinity. Indeed, let us fix $i\in I$. Then \begin{align*} \dfrac{C^j_\varphi(L^{\alpha(i)})}{\Lambda^j j^{\widehat{\alpha}_{p-1}}}&= \dfrac{\lambda^{j\alpha(i)}}{\Lambda^j j^{\widehat{\alpha}_{p-1}}} \left(L_p+\dfrac{j}{\lambda_{p-1}}L_{p-1}\right)^{\alpha(i)_p}\prod _{m=1}^{p-1}L_m^{\alpha(i)_m} \\ &= \left(\dfrac{\lambda^{\alpha(i)}}{\Lambda}\right)^j\dfrac{1}{j^{\widehat{\alpha}_{p-1}}}\sum_{\beta=0}^{\alpha(i)_p}\begin{pmatrix} \alpha(i)_p \\ \beta \end{pmatrix} \left(\dfrac{j}{\lambda_{p-1}}\right)^{\alpha(i)_p-\beta} L_{p-1}^{\alpha(i)_p-\beta}L_p^\beta \prod _{m=1}^{p-1}L_m^{\alpha(i)_m}\\ &=:\sum_{\beta=0}^{\alpha(i)_p} a(i,j,\beta) L_{p-1}^{\alpha(i)_p-\beta}L_p^\beta \prod _{m=1}^{p-1}L_m^{\alpha(i)_m}, \end{align*} where $(a(i,j,\beta))_{i,j,\beta}$ are the respective coefficients. By definition of $R(n)$ and $\mcn(n)$, we have that $|\lambda^{\alpha(i)}|< R(n)= |\Lambda|$. Therefore, all the coefficients $a(i,j,\beta)$ of the above expression tend to $0$ as $j$ tends to infinity, whatever the value of $\widehat{\alpha}_{p-1}$. It is now straightforward to modify the proof of the initialization step to show that \begin{align*} k_{\widehat \varphi^j(w)}\sum_{i\in I}\dfrac{C^j_{\varphi}(L^{\alpha(i)})}{\Lambda^j j^{\widehat{\alpha}_{p-1}}}C^j_{\varphi}\left( \sum_{\alpha\in D_i}d_\alpha L^{\alpha-\alpha(i)}\right) &\xrightarrow[j\to\infty]{ } 0. \end{align*} Thus, the sequence $(C^j_\varphi g/ \Lambda^j j^{\widehat{\alpha}_{p-1}})$ accumulates at the same points as the first sum of \eqref{eq: 1}. Now, observe that \begin{align*} \sum_{\alpha\in \mcn(n)} d_\alpha C^j_\varphi (L^\alpha)&=\sum _{\alpha\in \mcn(n)}d_\alpha \lambda^{j\alpha} \left(L_p+\dfrac{j}{\lambda_{p-1}}L_{p-1}\right)^{\alpha_p}\prod_{i=1}^{p-1} L_i^{\alpha_i}\\ &=\sum_{\alpha\in \mcn(n)}\sum_{\beta=0}^{\alpha_p}d_\alpha \begin{pmatrix} \alpha_p\\ \beta \end{pmatrix}\lambda^{j\alpha}\left(\dfrac{j}{\lambda_{p-1}}\right)^{\alpha_p-\beta}L_{p-1}^{\alpha_{p-1}+\alpha_p-\beta} L_p^\beta\prod_{i=1}^{p-2} L_i^{\alpha_i}. \end{align*} Rearranging the last expression and recalling that $d_\alpha= \rho_{|\alpha|}$, we get \begin{align*} \sum_{\alpha\in \mcn(n)} d_\alpha C^j_\varphi (L^\alpha)&=\sum_{\alpha\in \mcn(n)}d_\alpha \lambda^{j\alpha} L^\alpha \left(\sum_{\beta=\alpha_p}^{\alpha_{p-1}+\alpha_p}\begin{pmatrix} \beta \\ \alpha_p \end{pmatrix} \left(\dfrac{j}{\lambda_{p-1}} \right)^{\beta-\alpha_p}\right). \end{align*} So, for all $\alpha\in \mcn(n)$, the coefficient that multiplies $L^\alpha$ tends to $0$ as the same rate as $R(n)^jj^{\alpha_{p-1}}$. Let us consider now \[\mcn(n,m):=\{\alpha\in \mcn(n):~\alpha_{p-1}=m\}.\] It follows that $\{\mcn(n,m):~m=0,...,\widehat{\alpha}_{p-1}\}$ is a partition of $\mcn(m)$. Also, the accumulation points of the sequence $(C^j_{\varphi}g/ \Lambda^jj^{\widehat{\alpha}_{p-1}})$ coincide with the accumulation points of the sequence \[\left(k_{\widehat \varphi^j(w)} \sum_{\alpha\in \mcn(n,\widehat{\alpha}_{p-1})} d_\alpha \begin{pmatrix} \alpha_{p-1}+\alpha_p\\ \alpha_{p} \end{pmatrix}\left(\dfrac{\lambda^{\alpha} }{\lambda^{\widehat{\alpha}}}\right)^j L^\alpha\right)_j.\] Thanks to the hypothesis of the eigenvalues of $A$ and Lemma~\ref{obtaining L alpha}, we get that \begin{align*} \{ k_{\widehat{w}}L^{\alpha}:~\alpha\in \mcn(n,\widehat{\alpha}_{p-1}),~\widehat{w}\in \{0\}^{p}\times\T^{d-p}\}\subset H. \end{align*} Inductively, we obtain that for all $m=0,\dots,\widehat{\alpha}_{p-1}$, \begin{align*} \{ k_{\widehat{w}}L^{\alpha}:~\alpha\in \mcn(n,m),~\widehat{w}\in \{0\}^{p}\times\T^{d-p}\}\subset H. \end{align*} Indeed, let us assume that the last inclusion holds true for $m=M+1,...,\widehat{\alpha}_{p-1}$. To show that it also holds true for $m=M$, we proceed as above but considering the sequence \begin{align*} \dfrac{1}{\Lambda^j j^{M}}\left(C_\varphi^j g - \sum_{m=M+1}^{\widehat{\alpha}_{p-1}}\sum_{\alpha\in \mcn(n,m)}d_\alpha \lambda^{j\alpha} L^\alpha \left(\sum_{\beta=\alpha_p}^{\alpha_{p-1}+\alpha_p}\begin{pmatrix} \beta \\ \alpha_p \end{pmatrix} \left(\dfrac{j}{\lambda_{p-1}} \right)^{\beta-\alpha_p}\right) \right) \in H,~\forall j\geq 1. \end{align*} \medskip \textbf{Conclusion.} To conclude the proof, one only need to show that $H=\F(\C^d)$. Since $\{L^\alpha:~\alpha\in\N^p\}$ is a basis of the vector space of polynomials on $(z_1,..., z_p)$, we have proved that \[\{z^\alpha k_{\widehat{w}}:~ \alpha\in \N^{p}\times\{0\}^{d-p},~\widehat{w}\in\{0\}^p\times\T^{d-p}\}\subset H.\] Let $f\in \F(\C^d)$ be such that $\langle f,g\rangle =0$ for all $g\in H$. Let us write $f(z):= \sum_{\alpha\in \N^d}a_\alpha z^\alpha$. We know that, for any $\widehat{w}\in \{0\}^p\times\C^{d-p} $, we have \[k_{\widehat{w}}(z)= \sum_{\alpha\in \{0\}^p\times\C^{d-p}} c_\alpha z^\alpha,\] for some sequence $(c_\alpha)_\alpha\subset\C$ depending on $\widehat{w}$. Let us fix $\beta\in \N^p\times\{0\}^{d-p}$ and let $P:\N^d\to\N^p\times \{0\}^{d-p}$ be the canonical projection onto the first $p$ coordinates. Also, let us consider the function $f_\beta\in \HH(\C^d)$ defined by \[\sum_{\substack{ \alpha \in \N^d\\ P(\alpha)=\beta }}a_\alpha z^\alpha =z^\beta\sum_{\substack{ \alpha \in \N^d\\ P(\alpha)=\beta }}a_\alpha z^{\alpha-\beta} =: z^\beta f_\beta(z).\] Observe that $f_\beta$ and $k_{\widehat{w}}$ only depend on $(z_{p+1},...,z_d)$. Then, it follows from the orthogonality of the monomials $\{z^\alpha:~\alpha\in\N^p\}$ that \begin{align*} 0&= \langle f, z^\beta k_{\widehat{w}}\rangle = \langle z^\beta f_\beta, z^\beta k_{\widehat{w}}\rangle= \|z^\beta\|^2\overline{f_\beta(\widehat{w})}. \end{align*} Thus, $f_\beta$ vanishes on $\{0\}^{p}\times \T^{d-p}$. Since $f_\beta$ is an entire function depending only on the last $d-p$ coordinates, we conclude that $f_\beta\equiv 0$. Therefore, $a_\alpha=0$ for all $P(\alpha)=\beta$, where $\beta$ is any arbitrary multi-index in $\N^p\times\{0\}^{d-p}$. This yields that $f\equiv 0$ and the proof of cyclicity of $C_\varphi$ is complete. \end{proof} \section{Non-cyclic composition operators}\label{section 4} We split the proof of the necessary condition of Theorem~\ref{Theorem 1} in the following four propositions. \begin{proposition}\label{non-cyclicity 1} Let $A\in \C^{d\times d}$ be a non-invertible matrix and let $b\in\C^d$ be such that $C_{Az+b}$ induces a bounded composition operator on $\F(\C^d)$. Then $C_{Az+b}$ is not cyclic. \end{proposition} \begin{proof} Since cyclicity is stable under conjugacy, let us assume that $A$ and $b$ have the form given by Proposition~\ref{conjugated operator}. We may even assume that the eigenvalue $0$ is placed at the first position of the diagonal of $A$. This implies that the first column of $A$ only has $0$'s. Therefore, for any $j\in \N$, with $j\geq 1$, the vector $\varphi^j (z)$ does not depends on $z_1$. Thus, for any $f\in \F(\C^d)$ and $j\geq 1$, the function $C_\varphi^j f$ depends only on $(z_i)_{i=2}^d$. Hence, $f$ cannot be cyclic for $C_\varphi$. Since $f$ is arbitrary, $C_\varphi$ is not a cyclic operator. \end{proof} \begin{proposition}\label{non-cyclicity 2} Let $A\in \C^{d\times d}$ be an invertible matrix and let $b\in\C^d$ be such that $C_{Az+b}$ induces a bounded composition operator on $\mathcal F(\C^d)$. Let $\lambda:=(\lambda_1,...,\lambda_n)\in \overline{\D}^n$ be the eigenvalues of $A$ repeated by geometric multiplicity, $1\leq n\leq d$. If there is $\alpha\in \Z^n\setminus\{0\}^n$ such that $\lambda^\alpha=1$, then $C_{Az+b}$ is not cyclic. \end{proposition} \begin{proof} By Proposition~\ref{polynomials of deg 1}, let us consider $L_1,\dots,L_n$ be $n$ linearly independent polynomials of degree $1$ such that $C_{\widehat\varphi} L_j=\overline{\lambda_j} L_j$. Let $c\in\C^d$ be such that $(I-A)c=\frac b2$ (see Lemma \ref{lem:range}). Then for any $\alpha\in\N^n$, the function $z\mapsto L^\alpha e^{\langle z,c\rangle}$ (which belongs to $\mathcal F(\C^d)$ as a product of an exponential function with a polynomial) is an eigenvector of $C_{\varphi}^*=M_{k_b}C_{\widehat\varphi}$ associated to $\overline{\lambda}^\alpha$. Indeed, \begin{align*} C^*_\varphi (L^\alpha e^{\langle z,c\rangle})&= k_b(z)C_{\widehat{\varphi}}(L^\alpha e^{\langle z,c\rangle})=e^{\frac{\langle z,b\rangle }{2}}\overline{\lambda}^\alpha L^\alpha e^{\langle A^*z,c\rangle}=\overline{\lambda}^\alpha L^\alpha e^{\langle z, Ac+\frac{b}{2}\rangle}=\overline{\lambda}^\alpha L^\alpha e^{\langle z,c\rangle}. \end{align*} Suppose now that $\alpha\in\Z^d\backslash\{0\}^d$ satisfy $\lambda^\alpha=1$. If $\alpha\in \N^d$, then the functions $\{L^{n\alpha}e^{\langle z,c\rangle}:\ n\geq 0\}$ are linearly independent eigenvectors of $C_\varphi^*$ associated to the eigenvalue $1$. Thus by Proposition~\ref{noncyclicity adjoint}, $C_\varphi$ is not cyclic. If $\alpha\in\Z^d\backslash \N^d$, let $\alpha^+=(\max(\alpha_j,0))_j$ and $\alpha^-=(-\min(\alpha_j,0))_j$ so that $\alpha^+,\alpha^-\in\N^d$ and $\lambda^{\alpha^+}=\lambda^{\alpha^-}$. Now, $L^{\alpha^+}e^{\langle z,c\rangle}$ and $L^{\alpha^-}e^{\langle z,c\rangle}$ are two linearly independent eigenvectors of $C_\varphi^*$ associated to the same eigenvalue $\overline{\lambda}^{\alpha^+}$. Again, Proposition~\ref{noncyclicity adjoint} provides the conclusion. \end{proof} In order to proceed with the remaining cases of non-cyclic composition operators on $\F(\C^d)$, we need the following proposition. \begin{proposition}\label{Projection and complementability} Let $\xi\in \C^d$ and $N\in \N$. Let $\PP_N:=\textup{span}\{(z-\xi)^\alpha:~\alpha\in\N^d,~|\alpha|=N\}$ and $\QQ_N:=\overline{\textup{span}}\{(z-\xi)^\alpha:~\alpha\in\N^d,~|\alpha|\neq N\}$. Then, the linear map $P_N$, defined by \[f\in\F(\C^d)\mapsto P_N(f)(z):= \dfrac{1}{2\pi}\int_0^{2\pi} f(e^{i\theta}(z-\xi)+\xi)e^{-iN\theta}d\theta,\] is a bounded projection onto $\PP_N$ parallel to $\QQ_N$. In particular, $\F(\C^d)=\PP_N\oplus \QQ_N$. \end{proposition} \begin{proof} Let us start showing that $P_N$ is bounded. Let $p\in \N$ and let $f=\sum_{\alpha\in\N^d}c_\alpha z^\alpha$ be such that $c_\alpha=0$ for all $|\alpha|>p$. Recall that \[\|f\|^2=\sum_{|\alpha|\leq p}2^{|\alpha|}|c_\alpha|^2\prod_{j=1}^d\alpha_j!.\] Now we compute \begin{align*} P_N(f)(z)&=\sum_{|\alpha|\leq p}\dfrac{c_\alpha}{2\pi}\int_0^{2\pi } e^{-iN\theta} \prod_{j=1}^d(e^{i\theta}(z_j-\xi_j )+\xi_j )^{\alpha_j}d\theta \\ & =\sum_{|\alpha|\leq p} \dfrac{c_\alpha}{2\pi}\int_0^{2\pi }\sum_{\beta\leq \alpha} e^{-iN\theta} e^{i\theta|\beta|}\prod_{j=1}^d\begin{pmatrix} \alpha_j \\ \beta_j \end{pmatrix}(z_j-\xi_j )^{\beta_j}\xi_j ^{\alpha_j-\beta_j}d\theta \\ &=\sum_{|\alpha|\leq p}\sum_{\substack{|\beta|=N \\ \beta\leq \alpha} }c_\alpha\prod_{j=1}^d\begin{pmatrix} \alpha_j \\ \beta_j \end{pmatrix}(z_j-\xi_j )^{\beta_j}\xi_j ^{\alpha_j-\beta_j}\\ &=\sum_{|\beta|=N}\sum_{\alpha\geq \beta}c_\alpha (z-\xi)^\beta \xi^{\alpha-\beta} \prod_{j=1}^d\begin{pmatrix} \alpha_j \\ \beta_j \end{pmatrix}. \end{align*} Fix any $\beta\in\N^d$ such that $|\beta|=N$. Since there are finitely many $d$-tuples of size $N$, in order to prove that $P_N$ is bounded we just need to find $C\geq 0$ such that \[\sum_{\alpha\geq \beta}|c_\alpha||\xi^{\alpha-\beta}|\prod_{j=1}^d\begin{pmatrix} \alpha_j \\ \beta_j \end{pmatrix} \leq C\| f\|.\] In fact, considering $M=|\xi|$ and the Cauchy-Schwarz inequality, we have that \begin{align*} \sum_{\alpha\geq \beta}|c_\alpha||\xi^{\alpha-\beta}|\prod_{j=1}^d\begin{pmatrix} \alpha_j \\ \beta_j \end{pmatrix}&\leq \sum_{\alpha \geq \beta}|c_\alpha| \alpha^\beta M^{|\alpha|}\\ & \leq \left( \sum_{\alpha\geq \beta }|c_\alpha|^2 2^{|\alpha|}\prod_{j=1}^d\alpha_j! \right)^{1/2}\left(\sum_{\alpha\geq \beta }\left(\dfrac{M^2}{2}\right)^{|\alpha|} \prod_{j=1}^{d}\frac{\alpha_j^{2\beta_j}}{\alpha_j!} \right)^{1/2}\leq C\|f\|, \end{align*} where $C<\infty$. Thus, $P_N$ is a bounded linear operator on $\F(\C^d)$. \\ Now, by definition of $P_N$, it easily follows that \[P_N((z-\xi)^\alpha)=\begin{cases} (z-\xi)^\alpha&~\text{if } |\alpha|=N\\ 0 &~\text{if } |\alpha|\neq N. \end{cases}\] Therefore $\PP_N\subset \textup{Ran}(P_N)$. In fact, there is equality. Indeed, let $f\in \F(\C^d)$ and $\varepsilon>0$. Since $(z^\alpha)_{\alpha\in\N^d}$ is an orthogonal basis of $\F(\C^d)$ and $\textup{span}\{z^{\alpha}:~|\alpha|\leq q\}$ coincides with $\textup{span}\{(z-\xi)^{\alpha}:~|\alpha|\leq q\}$, for all $q\in\N$, we know that there is $r\geq N$ and $(c_\alpha)_{|\alpha|\leq r}$ such that \[ \big\| f- \sum_{|\alpha|\leq r} c_\alpha (z-\xi)^{\alpha}\big \|\leq \dfrac{\varepsilon}{\|P_N\|}. \] Therefore, \[\big \| P_N(f)-\sum_{|\alpha|= N} c_\alpha (z-\xi)^{\alpha}\big \|<\varepsilon,\] which implies that $\textup{Ran}(P_N)\subset \overline{\PP_N}=\PP_N$ since $\PP_N$ is finite dimensional. Now, we show that $\QQ_N=\ker (P_N)$. We already know that $\QQ_N\subset \ker (P_N)$. Conversely, if $P_N(f)=0$, approximating $f$ by a polynomial $\sum_{|\alpha|\leq r}c_\alpha (z-\xi)^\alpha$ as above, we know that \[ \big\|\sum_{|\alpha|=N} c_\alpha(z-\xi)^\alpha\big\|\leq \varepsilon,\] which implies that \[\big\|f-\sum_{\substack{|\alpha|\leq r \\ |\alpha|\neq N}}c_\alpha(z-\xi)^\alpha\big\| \leq 2\varepsilon.\] Hence, $f\in \QQ_N$. \end{proof} \begin{proposition}\label{2 jordan blocks} Let $\varphi(z):=Az+b$ be such that $C_\varphi$ induces a bounded operator on $\F(\C^d)$. If the canonical Jordan form of $A$ admits two Jordan blocks of size $2$, then $C_\varphi$ is not cyclic. \end{proposition} \begin{proof} Let us assume that the canonical Jordan form of $A$ admits two Jordan blocks of size $2$ associated to the eigenvalues $\lambda_1$ and $\lambda_2$. Let $\xi\in \C^d$ be a fixed point of $\varphi$. In particular, thanks to Proposition~\ref{polynomials of deg 1}, there are four linearly independent polynomials of degree one $(L_j)_{j=1}^4\subset \F(\C^d)$ such that, for $j\in \{1,2\}$, \begin{align*} C_{\varphi} L_{2j-1} &= \lambda_j L_{2j-1}+L_{2j} \\ C_{\varphi} L_{2j} &= \lambda_j L_{2j}. \end{align*} For $N\geq 0$, consider $\PP_N$, $\QQ_N$ and $P_N$ as in Proposition~\ref{Projection and complementability} associated to $\xi$. Thanks to Proposition~\ref{Projection and complementability}, $\F(\C^d)=\PP_N\oplus \QQ_N$. Again thanks to Proposition~\ref{polynomials of deg 1}, we fix $(L_j)_{j=5}^d\subset \F(\C^d)$ be linearly independent polynomials of degree $1$ such that $\{L_j:~j=1,...,d\}$ is a basis of $\PP_1$ and, for each $j=5,...,d$, $C_{\varphi}L_j$ belongs to $\textrm{span}(L_k:~k=5,\dots,d)$. Observe that $\{L^\alpha:~\alpha\in\N^d,~|\alpha|=N\}$ is a basis of $\PP_N$. \\ Let now $n,m\geq 2$. Set $N=n+m$ and define \begin{align*} Y_{n,m}&= \textup{span}\{L_1^kL_2^{n-k}L_3^lL_4^{m-l}:~k=0,...,n,~l=0,...,m\}\\ Z_{n,m}&= \textup{span}\{L^\alpha:~\alpha\in \N^d,~|\alpha|=N,~L^\alpha\notin Y_{n,m}\}. \end{align*} \smallskip \textbf{Fact.} $\PP_N$, $\QQ_N$, $Y_{n,m}$ and $Z_{n,m}$ are $C_\varphi$-invariant subspaces. \\ Indeed, this easily follows from the values of $C_\varphi L_j$, for $j=1,...,d$. \medskip Now, let $R_{n,m}:\PP_N\to Y_{n,m}$ be the linear projection associated to $\PP_N= Y_{n,m}\oplus Z_{n,m}$. Let us check that the following expression holds true: \begin{align}\label{eq: conmutative} R_{n,m}\circ P_N\circ C_\varphi= C_\varphi \circ R_{n,m}\circ P_N. \end{align} Indeed, let $f\in \textup{span}(L^\alpha:~\alpha\in \N^d)$, namely, $f=\sum_{\alpha\in\N^d}c_\alpha L^\alpha$, where there are only finitely many $c_\alpha$ different from $0$. Then, thanks to the previous fact we get: \begin{align*} R_{n,m}\circ P_N\circ C_\varphi(f)&=R_{n,m} \sum_{\substack{ |\alpha|=N }}c_\alpha C_\varphi(L^\alpha)\\ &= \sum_{\substack{ |\alpha|=N \\ \alpha_1+\alpha_2=n \\ \alpha_3+\alpha_4=m }}c_\alpha \lambda_1^n\lambda_2^m \left(L_1+\dfrac{1}{\lambda_1}L_2\right)^{\alpha_1} L_2^{\alpha_2}\left (L_3+\dfrac{1}{\lambda_2}L_3\right)^{\alpha_3} L_4^{\alpha_4} \\ &= C_\varphi \circ R_{n,m}\circ P_N (f). \end{align*} We are now ready to prove that $C_\varphi$ is not cyclic. Pick any $f\in \mathcal{F}(\C^d)$ and write it $f=\sum_{|\alpha|=N}c_\alpha L^\alpha+g$ with $g\in\mathcal Q_N$. Let us call $c_{k,l}=c_{(k,n-k,l,m-l)\times\{0\}^{d-4}}$. Thanks to \eqref{eq: conmutative}, for any $j\geq 0$, we have that \begin{align*} R_{n,m}\circ P_N\circ C_\varphi^j (f)&= \sum_{k=0}^{n}\sum_{l=0}^m c_{k,l}C_\varphi^j(L_1^kL_2^{n-k}L_3^lL_4^{m-l})\\ &= \sum_{k=0}^{n}\sum_{l=0}^m c_{k,l}\lambda_1^{jn}\lambda_2^{jm}L_2^{n-k}L_4^{m-l}\left(L_1+\frac{j}{\lambda_1}L_2\right)^k\left(L_3+\dfrac{j}{\lambda_2}L_4\right)^l\\ &=(\lambda_1^n\lambda_2^m)^j\sum_{r=0}^{n+m}j^rf_r, \end{align*} where $(f_r)_r\subset Y_{n,m}$ are some fixed polynomials that do not depend of $j$. Therefore, the dimension of $\textup{span}\{R_{n,m}\circ P_N\circ C_\varphi^j(f): ~j\geq 0\}$ is at most $n+m+1$. It cannot be dense in $R_{n,m}\circ P_N(\F(\C^d))=Y_{n,m}$ which has dimension $(n+1)(m+1)$, for instance if $n=m=2$. \end{proof} Now, we proceed with the last case. \begin{proposition}\label{1 jordan block} Let $\varphi(z):=Az+b$ be such that $C_\varphi$ induces a bounded operator on $\F(\C^d)$. If the canonical Jordan form of $A$ admits a Jordan block of size larger than or equal to $3$, then $C_\varphi$ is not cyclic. \end{proposition} Since we apply a technique that follows the lines of the proof of Proposition~\ref{2 jordan blocks}, we only present a sketch of the proof of Proposition~\ref{1 jordan block}. \begin{proof} Let us assume that the canonical Jordan form of $A$ admits a Jordan block of size $p\geq 3$. Let $\xi\in\C^d$ be a fixed point of $\varphi$. Let $\{L_j:~j=1,...,d\}\subset \F(\C^d)$ be a linearly independent set of polynomials of degree $1$ given by Proposition~\ref{polynomials of deg 1} such that $C_{\varphi}L_j=\lambda_1 L_{j}+L_{j+1}$ for all $j=1,...,p-1$ and $C_{\varphi}L_p=\lambda_1 L_p$.\\ Let $N\in \N$ and consider $\PP_N$, $\QQ_N$ and $P_N$ as in Proposition~\ref{Projection and complementability} associated to $\xi$. Let us now define \begin{align*} Y_N&=\textup{span}\Big\{\prod_{j=1}^pL^{k_{j}}_j:~ \sum_{j=1}^pk_j= N\Big\},\\ Z_N&=\textup{span}\Big\{L^\alpha:~ |\alpha|=N,~ L^\alpha\notin Y_N\Big\}. \end{align*} It follows that $\PP_N$, $\QQ_N$, $Y_N$ and $Z_N$ are $C_\varphi$-invariant. Let us define $R_N:\PP_N\to Y_N$ be the linear bounded projection associated to $\PP_N=Y_N\oplus Z_N$. Moreover, as in \eqref{eq: conmutative}, we have that \begin{align}\label{eq: conmutative 2} R_N\circ P_N\circ C_\varphi= C_\varphi \circ R_N\circ P_N. \end{align} Now, let us prove that $C_\varphi$ is not cyclic. Indeed, pick any $f\in \F(\C^d)$, with $P_N(f)=\sum_{|\alpha|=N}c_\alpha L^\alpha$ and observe that, for any $j \geq p-1$, we have \begin{align*} R_N\circ P_N\circ C_\varphi^j (f)&= \sum_{\substack{\alpha\in \N^p\times\{0\}^{d-p}\\ |\alpha|=N} } c_\alpha C_\varphi^j(L^\alpha)\\ &= \sum_{\substack{\alpha\in \N^p\times\{0\}^{d-p}\\ |\alpha|=N} } c_\alpha \lambda_1^{jN}\prod_{k=1}^p \left(\sum_{l=k}^p \begin{pmatrix} j\\ l-k \end{pmatrix} \dfrac{1}{\lambda_1^k}L_l \right)^{\alpha_k}\\ & = \lambda_1 ^{jN} \sum_{m=0}^{N(p-1)}j^m f_m, \end{align*} where $\{f_m:~m=0,...,N(p-1)\}\subset Y_N$ are some fixed polynomials that do not depend of $j$. Therefore, the dimension of $\textup{span}\{R_N\circ P_N \circ C^j_\varphi(f):~j\geq 0\}$ is at most $N(p-1)+p-1$. It cannot be dense in $R_N\circ P_N(\F(\C^d))= Y_N$ which has dimension $\begin{pmatrix} N+p-1\\ p-1 \end{pmatrix} $, for instance if $N=3$. \end{proof} \begin{proof}[Proof of Theorem~\ref{Theorem 1}: Necessary condition] Let us proceed by a contrapositive argument. Observe that Proposition~\ref{non-cyclicity 1}, Proposition~\ref{non-cyclicity 2}, Proposition~\ref{2 jordan blocks} and Proposition~\ref{1 jordan block} cover all the possible cases of the necessary condition of Theorem~\ref{Theorem 1}. Thus, the proof of Theorem~\ref{Theorem 1} is now complete. \end{proof} \section{Cyclic vectors of compact composition operators}\label{section 5} In this section we characterize the set of cyclic vectors for compact cyclic composition operators defined on $\F(\C^d)$. In order to state the main result of this section, we need to fix some notations. Let us consider $\varphi(z):=Az+b$ such that $\|A\|<1$ and let $\xi\in \C^d$ be a fixed point of $\varphi$. Also, for any $N\in \N$, the subspace $\PP_N$ and the projection $P_N$ are given by Proposition~\ref{Projection and complementability}. Set $L=(L_j)_{j=1}^d\subset\F(\C^d)$ be the polynomials of degree $1$ given by Proposition~\ref{polynomials of deg 1} related to $\varphi$ and $\xi$. Recall that the set $\{L^\alpha:~\alpha\in \N^d,~|\alpha|=N\}$ is a basis of $\PP_N$. Thus, for any $f\in \F(\C^d)$, by considering the power series of $f$ centered at $\xi$, there is a unique sequence $(f_\alpha)_{\alpha\in \N^d}\subset \C$ such that $f(z)= \sum_{n=0}^\infty\sum_{|\alpha|=n}f_\alpha L^\alpha(z)$ for all $z\in \C^d$. \begin{theorem}\label{theorem cyclic vectors} Let $\varphi(z):=Az+b$ be such that $C_\varphi$ induces a compact cyclic composition operator on $\F(\C^d)$. The following assertions hold true. \begin{enumerate} \item If $A$ is diagonalizable, then $f\in \F(\C^d)$ is a cyclic vector for $C_\varphi$ if and only if $f_\alpha\neq 0$ for all $\alpha\in \N^d$. \item If $A$ is not diagonalizable (and therefore its canonical Jordan form admits a block of size $2$), and if $(L_j)_{j=1}^d$ is ordered so that $(L_j)_{j=1}^{d-1}$ are eigenvectors of $C_\varphi$ and $C_\varphi L_d \in \textup{span}(L_{d-1},L_d)$, then $f\in \F(\C^d)$ is a cyclic vector for $C_\varphi$ if and only if $f_\alpha\neq 0$ for all $\alpha\in \N^d$ with $\alpha_{d-1}=0$. \end{enumerate} \end{theorem} In order to prove Theorem~\ref{theorem cyclic vectors} we need several intermediate results. \begin{proposition}\label{computing projection} For any $N\in \N$ and any $f\in \F(\C^d)$, $P_N(f)= \sum_{|\alpha|=N}f_\alpha L^\alpha$. \end{proposition} \begin{proof} This easily follows from the following facts: \begin{itemize} \item $P_N(\mathcal F(\C^d))=\PP_N=\textup{span}\{(z-\xi)^\alpha:~|\alpha|=N\}=\{L^\alpha:~|\alpha|=N\}$, \item $(I-P_N)(\mathcal F(\C^d))=\overline{\textup{span}}\{(z-\xi)^\alpha:~|\alpha|\neq N\}$ and \item if $(f_n)_n\subset \F(\C^d)$ converges to $f\in \F(\C^d)$, then for any $\alpha\in\N^d$, the $\alpha$-partial derivative of $(f_n)_n$ converges to the $\alpha$-partial derivative of $f$ for the locally uniform convergence topology. \end{itemize} \end{proof} From now on, let us further assume that the canonical Jordan form of $A$ admits only one Jordan block whose size is exactly $2$. Set $\lambda=(\lambda_j)_{j=1}^{d}\subset \mathds{D}^d\setminus\{\{0\}^d\}$ such that the first $d-1$ elements of $\lambda$ are the eigenvalues of $A$, $\lambda_d=\lambda_{d-1}$ and $C_\varphi L_{d}=\lambda_{d-1}L_d+L_{d-1}$. \begin{proposition}\label{eigenvector} Let $f\in\F(\C^d)$ be an eigenvector of $C_\varphi$. Then, $f_\alpha=0$ for any $\alpha\in \N^d$ such that $\alpha_d\neq 0$. \end{proposition} \begin{proof} Let us proceed towards a contradiction. Let $f\in \F(\C^d)$ be an eigenvector of $C_\varphi $ such that there is $\widehat{\gamma}\in \N^d$ with $\widehat{\gamma}_d\neq 0$ and $f_{\widehat{\gamma}}\neq 0$. Let us denote by $\Lambda\in \C$ the eigenvalue associated to $f$. Let $\gamma\in\N^d$ be such that $\gamma_j=\widehat{\gamma}_j$ for all $j=1,...,d-2$, $|\gamma|=|\widehat{\gamma}|$, $f_\gamma\neq 0$ and $\gamma_d$ is maximal. \\ Now, notice that for any $z\in \C^d$ \[C_\varphi f(z)= \sum_{n=0}^\infty\sum_{\substack{\alpha\in \N^d\\ |\alpha|=n}} f_\alpha L^\alpha(\varphi(z))=\sum_{n=0}^{+\infty} \sum_{\substack{\alpha\in \N^d\\ |\alpha|=n} } f_\alpha \left(\lambda_{d-1} L_{d}(z) + L_{d-1}(z) \right)^{\alpha_d}\prod_{j=1}^{d-1} \lambda_j^{\alpha_j} L_j^{\alpha_j}(z) .\] Recalling that $\Lambda$ is the eigenvalue associated to $f$, for any $z\in \C^d$ we have that \begin{align}\label {eq: prop eigenvector} C_\varphi f (z)= \Lambda \sum_{n=0}^\infty\sum_{\substack{\alpha\in \N^d\\ |\alpha|=n}} f_\alpha L^\alpha(z). \end{align} Since for every function $g\in \F(\C^d)$ there is a unique sequence $(g_\alpha)_{\alpha\in \N^d}$ such that $g=\sum_{n=0}^{+\infty}\sum_{|\alpha|=n}g_\alpha L^\alpha$, the coefficients of both sides of \eqref{eq: prop eigenvector} coincide. Therefore, regarding the coefficients that multiply $L^\gamma$ and $L^{\gamma+e(d-1)-e(d)}$ we get that \begin{align*} \lambda^\gamma f_\gamma = &\Lambda f_\gamma\\ \lambda^{\gamma-e(d)}\gamma_d f_\gamma+\lambda^\gamma f_{\gamma+e_{d-1}-e_d} = & \Lambda f_{\gamma+e_{d-1}-e_d}. \end{align*} Thus, since $f_\gamma\neq 0$, it follows that $\Lambda =\lambda^\gamma\neq 0$. However, since $\gamma_d\neq 0$ and $\lambda^{\gamma-e(d)}\neq 0$, the second equality gives us that $f_\gamma=0$ which is a contradiction. \end{proof} As a direct consequence of Proposition~\ref{eigenvector} we get: \begin{proposition}\label{spectrum compact} The spectrum of $C_\varphi$ is $\sigma(C_\varphi)=\{\lambda^\alpha:~\alpha\in \N^{d-1}\times\{0\}\}\cup\{0\}$. \end{proposition} \begin{proof} Since $C_\varphi$ is a compact operator, we know that $\sigma(C_\varphi)= \sigma_p(C_\varphi)\cup\{0\}$. It follows from the proof of Proposition~\ref{eigenvector} that the eigenvalues of $C_\varphi$ are of the form $\lambda^\alpha$, with $\alpha\in \N^{d-1}\times \{0\}$. Conversely, we have that $C_\varphi L^\alpha=\lambda^\alpha L^\alpha$ for all $\alpha\in \N^{d-1}\times \{0\}$. \end{proof} Observe that, in fact, we have shown that for any $\alpha\in \N^{d-1}\times \{0\}$, $\ker(C_\varphi-\lambda^\alpha Id)=\textup{span}\{L^\beta:~\beta\in\N^{d-1}\times\{0\},~\lambda^{\alpha-\beta}=1\}$.\\ Let us now fix an enumeration $(\beta(n))_n$ of $\N^{d-1}\times\{0\}$ such that the sequence $(|\lambda^{\beta(n)}|)_n$ is nonincreasing. Also, consider $(R(n))_n\subset \R$ a strictly decreasing enumeration of the set $\{|\lambda^{\beta(n)}|:~n\in\N \}$. For any $n\in \N$ let us consider the set \[\II(n):=\{\alpha\in \N^d:~ \beta(n)_{d-1}=\alpha_{d-1}+\alpha_d,~\alpha_j=\beta(n)_j~\text{for }j=1,...,d-2\}\] which is a finite subset of $\N^d$. Let $N\in \N$ and denote by $Y_N$ the subspace of $\F(\C^d)$ defined by \[Y_N:=\Big\{f\in \F(\C^d):~ f(z)=\sum_{\alpha\in \N^d\setminus \cup_{n=0}^N \II(n)}f_\alpha L^{\alpha}(z)\Big\}.\] Also, let us denote by $\Xi_N$ the linear projection on $\F(\C^d)$ defined by \[f\in \F(\C^d)\mapsto \Xi_N(f)(z):= \sum_{\alpha\in \N^d\setminus \cup_{n=0}^N \II(n)} f_\alpha L^{\alpha}(z),~\text{for all }z\in \C^d.\] In the following proposition we collect some facts related to $Y_N$ and $\Xi_N$. \begin{proposition}\label{facts of xi and y} Let $N\in \N$. Then: \begin{enumerate}[(a)] \item $\Xi_N$ is a bounded projection onto $Y_N$. In particular, $Y_N$ is a closed subspace. \item $\Xi_N$ and $C_\varphi$ commute. \item $Y_N$ is an invariant subspace of $C_\varphi$. \item $\sigma(C_\varphi|_{Y_N})= \{\lambda^{\beta(n)}:~n>N\}\cup \{0\}$. \end{enumerate} \end{proposition} \begin{proof} (a): Observe that $I-\Xi_N$ is the linear operator defined by $(I-\Xi_N)f= \sum_{\alpha\in \bigcup_{n=0}^N \II(n)} f_\alpha L^{\alpha}$ for all $f\in \F(\C^d)$. Therefore, since $\II(n)$ is a finite subset of $\N^d$ for each $n\in\N$, by Proposition~\ref{Projection and complementability} and Proposition~\ref{computing projection} we get that $I-\Xi_N$ is bounded. Thus, $\Xi_N$ is bounded as well.\\ (b): It easily follows from the fact that $I-\Xi_N$ and $C_\varphi$ commute.\\ (c): It directly follows from (a) and (b).\\ (d): By (c), $C_\varphi|_{Y_N}\in \mathcal{L}(Y_N)$ is a compact operator. Followed by some straightforward modifications, the argument presented in the proof of Proposition~\ref{spectrum compact} can be used to show that $\sigma(C_\varphi|_{Y_N})= \{\lambda^{\alpha(n)}:~n>N\}\cup \{0\}$. \end{proof} Now we are in shape to prove Theorem~\ref{theorem cyclic vectors}. \begin{proof}[Proof of Theorem~\ref{theorem cyclic vectors}] Let us assume that $A$ is non-diagonalizable. The case where $A$ is diagonalizable is simpler and the argument to prove this theorem follows the same line as the presented proof. Since $A$ is compact and cyclic, by Theorem~\ref{Theorem 1}, we know that $\|A\|<1$, that $A$ is invertible and that the canonical Jordan form of $A$ admits a Jordan block of size exactly $2$.\\ Let $f\in \F(\C^d)$ be a cyclic vector for $C_\varphi$. Let us assume, towards a contradiction, that there is $\widehat{\alpha}\in \N^{d}$ such that $\widehat{\alpha}_{d-1}=0$ and $f_{\widehat{\alpha}}=0$. Observe that, for any $j\in \N$, we have that \begin{align*} C^j_\varphi f (z)&= \sum_{n=0}^\infty \sum_{\substack{ \alpha\in\N^d\\|\alpha|=n}} f_\alpha\lambda^{j\alpha}\left(L_d(z)+\dfrac{j}{\lambda_{d-1}} L_{d-1}(z)\right)^{\alpha_d} \prod_{k=1}^{d-1}L_k(z)^{\alpha_k} \\ &=\sum_{n=0}^\infty \sum_{\substack{ \alpha\in\N^d\\|\alpha|=n}} L^\alpha \lambda^{j\alpha}\sum_{l=0}^{\alpha_{d-1}}\begin{pmatrix} \alpha_{d}+l\\ l \end{pmatrix}\dfrac{j^l}{\lambda_{d-1}^l}f_{\alpha-le(d-1)+le(d)}. \end{align*} Therefore, we have that $(C_\varphi^j f)_{\widehat{\alpha}}=0$ for all $j\in \N$. This implies that the sequence $(P_{|\widehat{\alpha}|}(C_\varphi^j f))_j$ is contained in a subspace of $\PP_{|\widehat{\alpha}|}$ of dimension $\textup{dim}(\PP_{|\widehat{\alpha}|})-1$. Thus, $f$ is not a cyclic vector.\\ Conversely, let $f\in \F(\C^d)$ be such that $f_\alpha\neq 0$ for all $\alpha\in \N^d$ with $\alpha_{d-1}= 0$. In order to prove that $f$ is a cyclic vector for $C_\varphi$ we follow an argument which is similar to the one given in the proof of Theorem~\ref{Theorem 1}. So here we only sketch the proof, highlighting the main differences with that of Theorem~\ref{Theorem 1}.\\ For $n\in\N$, let us set $\mathcal{R}(n):=\{\alpha\in \N^d:~|\lambda^\alpha|=R(n)\}$. Since $\lambda\subset \D^d$, the set $\mathcal{R}(n)$ is finite for any $n$ and $\{\mathcal{R}(n):~n\in\N\}$ is a partition of $\N^d$. Observe that $R(0)=1$ and $\mathcal{R}(0)=\{\{0\}^d\}$. Let $H=\overline{\textup{span}}\{C^j_\varphi f:~j\in\N\}$. In what follows, we prove that $H=\F(\C^d)$ by induction in the following way: at each step we show that $\{L^\alpha:~\alpha\in \mathcal{R}(n)\}\subset H$.\\ \textbf{Initialization step.} Set $\mathcal{O}=(0,\dots,0)\in \N^d$. For any $j\in \N$, we have that \[C_\varphi^j f= f_{\mathcal{O}} + C_\varphi^j(f - f_{\mathcal{O}})= f_{\mathcal{O}} + C_\varphi^j(\Xi_0(f)).\] Notice that $\Xi_0(f)\in Y_0$ and $\sigma(C_\varphi|_{Y_0})\subset \D$. Indeed, by Proposition~\ref{facts of xi and y}~(4), $\sigma(C_\varphi|_{Y_0})=\{\lambda^\alpha:~\alpha\in \N^{d-1}\times\{0\}\setminus\{\mathcal{O}\}\}$. Thanks to the spectral radius formula and since $\lambda\in \D$, the sequence $(\|C_\varphi|_{Y_0}^j\|)_j$ tends to $0$ as $j$ tends to infinity. Hence, the sequence $(C_\varphi^j f)_j$ converges to $f_{\mathcal{O}} \in H$ and thus, since $f_{\mathcal{O}}\neq 0$, the constant functions belong to $H$.\\ \textbf{Inductive step.} Let us assume that for some $n\geq 1$, $\{L^\alpha:~\alpha\in \mathcal{R}(k),~k\leq n-1\}\subset H$. We prove that $\{L^\alpha:~\alpha\in \mathcal{R}(n)\}\subset H$. Let us consider the sequence $(m(k))_k$ defined by $m(k)= \max\{j\in \N:~ \beta(j)\in \mathcal{R}(k)\}$. Observe that, thanks to the induction hypothesis, $\Xi_{m(n-1)}f\in H$. Also, notice that \[\Xi_{m(n-1)}f= \sum_{\alpha\in \mathcal{R}(n)}f_\alpha L^\alpha + \Xi_{m(n)}f.\] By Proposition~\ref{facts of xi and y}, $\Xi_{m(n)}f\in Y_{m(n)}$ and $\sigma(C_\varphi|_{Y_{m(n)}})\subset R(n+1)\overline{\D}$. Therefore, since $R(n+1)<R(n)$, there is $\varepsilon >0$ and $J\in \N$ such that \[\left\| C_\varphi|_{Y_{m(n)}}^j \right\|\leq (R(n)-\varepsilon)^j,~\text{for all } j\geq J. \] Thus, for any $\alpha\in \mathcal{R}(n)$, $\lambda^{-j\alpha} C_\varphi^j(\Xi_{m(n)}f)$ tends to $0$ as $j$ tends to infinity. At this point, the proof follows closely the lines of the proof of Theorem~\ref{Theorem 1}. Indeed, observe that \begin{align*} C_\varphi^j\Xi_{m(n-1)}f=& \sum_{\alpha\in \mathcal{R}(n)} L^\alpha \lambda^{j\alpha}\sum_{l=0}^{\alpha_{d-1}}\begin{pmatrix} \alpha_{d}+l\\ l \end{pmatrix}\dfrac{j^l}{\lambda_{d-1}^l}f_{\alpha-le(d-1)+le(d)}\\ &+C_\varphi^j\Xi_{m(n)}f \end{align*} Thus, the coefficient associated to $L^\alpha$, with $\alpha\in \mathcal{R}(n)$, is $\lambda^{j\alpha}$ times a polynomial on $j$ of degree $\alpha_{d-1}$ due to the fact that $f_{\alpha -\alpha_{d-1}e(d-1)+\alpha_{d-1}e(d)}\neq 0$. Now, consider $\widehat{\alpha}\in \mathcal{R}(n)$ and $K=\max\{\alpha_{d-1}:~\alpha\in \mathcal{R}(n)\}$. We study inductively the sequences \[ \left(\dfrac{C_\varphi^j\Xi_{m(n-1)}f}{\lambda^{j\widehat{\alpha}}j^K}\right)_j,~ \left(\dfrac{C_\varphi^j\Xi_{m(n-1)}f-p_{j,1}}{\lambda^{j\widehat{\alpha}}j^{K-1}}\right)_j, ~\cdots ~, ~\left(\dfrac{C_\varphi^j\Xi_{m(n-1)}f-p_{j,K}}{\lambda^{j\widehat{\alpha}}}\right)_j,\] where $(p_{j,k})_{j,k}\subset \F(\C^d)$ are the polynomials defined as follows: \[p_{j,k}=\sum_{\substack{\alpha\in \mathcal{R}(n)\\ \alpha_{d-1}\geq K+1-k}} L^\alpha \lambda^{j\alpha}\sum_{l=0}^{\alpha_{d-1}}\begin{pmatrix} \alpha_{d}+l\\ l \end{pmatrix}\dfrac{j^l}{\lambda_{d-1}^l}f_{\alpha-le(d-1)+le(d)}.\] By sending $j$ to infinity on the first sequence and mimicking the proof of Theorem \ref{Theorem 1} (see in particular Lemma \ref{obtaining L alpha}), we obtain that each $L^\alpha$, with $\alpha\in \mathcal{R}(n)$, such that its associated coefficient is $\lambda^{j\alpha}$ times a polynomial of degree $K$ on $j$, belongs to $H$. Observe that in the second sequence $p_{j,1}$ cancels all the $L^\alpha$ of $C_\varphi^j\sum_{\alpha\in \mathcal{R}(n)}f_\alpha L^\alpha$ such that their coefficient is $\lambda^{j\alpha}$ times a polynomial on $j$ of degree $K$. Thus, the second sequence is contained in $H$. By sending $j$ to infinity on the second sequence, we obtain that each $L^\alpha$, with $\alpha\in \mathcal{R}(n)$, such that its associated coefficient is $\lambda^{j\alpha}$ times a polynomial of degree $K-1$ on $j$ belongs to $H$. The polynomial $p_{j,2}$ cancels all the $L^\alpha$ of $C_\varphi^j\sum_{\alpha\in \mathcal{R}(n)}f_\alpha L^\alpha$ such that the associated coefficient is $\lambda^{j\alpha}$ times a polynomial on $j$ of degree $K$ or $K-1$. This procedure leads to a finite induction which ends in $K+1$ steps obtaining that $\{L^\alpha:~\alpha\in \mathcal{R}(n)\}\subset H$.\\ \textbf{Conclusion.} Since $\textup{span}\{z^\alpha:~\alpha\in \N^d\}=\textup{span}\{L^\alpha:~\alpha\in\N^d\}\subset H$ we obtain that $H = \F(\C^d)$. Thus, $f$ is a cyclic vector for $C_\varphi$. \end{proof} \begin{remark} If $A$ is diagonalizable, we actually have that \[C^j_\varphi f(z)=\sum_{n=0}^\infty \sum_{\substack{ \alpha\in\N^d\\|\alpha|=n}} f_\alpha \lambda^{j\alpha}L^\alpha(z),~ \text{for all } z\in \C^d.\] \end{remark} As an immediate corollary of Theorem \ref{theorem cyclic vectors} we can state the following result. \begin{corollary} Let $C_\varphi$ be a compact cyclic composition operator on $\mathcal F(\C^d)$ and denote by $\textrm{Cyc}(C_\varphi)$ its set of cyclic vectors. Then $\textrm{Cyc}(C_\varphi)\cup\{0\}$ does not contain a subspace of dimension $2$. \end{corollary} \section{Further dynamical properties of composition operators}\label{section 6} This section is devoted to prove that composition operators on $\F(\C^d)$ are never weakly-supercyclic nor convex-cyclic.\\ A bounded linear operator $T$ defined on a separable Banach space $X$ is said supercyclic with respect to the topology $\tau$ if $\C\cdot \textup{orb}(T,x)$ is dense in $(X,\tau)$. In \cite[Theorem 5.4]{JPZ} it is proven that composition operators defined on $\F(\C^d)$ are never supercyclic. Also, in \cite[Theorem 1.7]{M} it is proven that weighted composition operators defined on $\F(\C)$ are never supercyclic with respect to the pointwise convergence topology. The proof of our next result is an adaptation of that of \cite[Theorem 1.7]{M}. Regardless, we provide it for the sake of completeness. \begin{theorem}\label{Theorem tp supercyclic} Let $\varphi(z):=Az+b$ be a holomorphic map such that $C_\varphi$ induces a bounded composition operator on $\F(\C^d)$. Then, $C_\varphi$ is not supercyclic with respect to the pointwise convergence topology. In particular, there is no weakly-supercyclic composition operator defined on $\F(\C^d)$. \end{theorem} \begin{proof}[Proof of Theorem~\ref{Theorem tp supercyclic}] Let $\xi\in \C^d$ be a fixed point of $\varphi$. Thus, $\varphi(z)= A(z-\xi)+\xi$ for all $z\in \C^d$. Let us proceed towards a contradiction. Assume that there is $f\in \F(\C^d)$ such that $f$ is a supercyclic vector for $C_\varphi$ with respect to the pointwise convergence topology. It easily follows that $f(\xi)\neq 0$. Thus, by \cite[Proposition 4]{BJM}, we have that for any $z,z'\in \C^d$, with $z\neq z'$, \begin{equation}\label{eq:supercyclic} \overline{ \left\{ \dfrac{f(\varphi^n(z))}{f(\varphi^n(z'))}:~n\in\N,~ f(\varphi^n(z'))\neq 0 \right\}}=\C. \end{equation} \noindent Since $f(\xi)\neq0$, there is $r>0$ such that $0 \notin \overline{f(\xi+r\D)}$. Also, since $\|A\|\leq 1$, we have that $\varphi(\xi+r\D)\subset \xi+r\D$. Now, let us fix $z\in (r\D+\xi)\setminus \{\xi\}$ and set $z'=\xi$. Then \[ \left | \dfrac{ f(\varphi^n(z))}{f(\varphi^n(z'))} \right | \leq \dfrac{ \sup \{|f(w)|: ~w\in \xi+r\D \}}{|f(\xi)|}<\infty. \] This clearly contradicts \eqref{eq:supercyclic}. \end{proof} Now, we turn our study to the concept of convex-cyclicity. A linear operator $T\in \mathcal{L}(X)$ is said convex-cyclic if there is $x\in X$ such that $\textup{co} (\textup{orb}(T,x))$ is dense in $X$, where $\textup{co}(A)$ means the convex hull of the set $A$. Up to the best of our knowledge, this concept was introduced in 2013 by Rezaei \cite{R}. Further, in \cite{M}, Mengestie characterized the convex-cyclicity of weighted composition operators defined on $\F(\C)$. In what follows, we show that there is no convex-cyclic composition operators on $\F(\C^d)$. \begin{theorem} Let $\varphi:\C^d\to\C^d$ be a holomorphic map such that $C_\varphi$ induces a bounded composition operator on $\F(\C^d)$. Then, $C_\varphi$ is not convex-cyclic. \end{theorem} \begin{proof} Let $\xi\in \C^d$ be a fixed point of $\varphi$ and let $f\in \F(\C^d)$. According to Proposition~\ref{Projection and complementability}, $\F(\C^d)=\PP_0\oplus \QQ_0$. Moreover, $\PP_0$ and $\QQ_0$ are invariant subspaces for $C_\varphi$ and $P_0$ and $C_\varphi$ commute. Let us denote by $f_0:=P_0(f)$, which is a constant function. Now, observe that, for any sequence $(\sigma_k)_{k\in \N}\subset\R^+$ with finitely many non-zero terms, such that $\sum_k \sigma_k=1$, we have that \[P_0\left(\sum_{k=0}^\infty \sigma_k C^k_\varphi f\right)=\sum_{k=0}^\infty \sigma_k C^k_\varphi P_0 f = f_0.\] Therefore, $f$ is not a convex-cyclic vector for $C_\varphi$. Since $f$ is an arbitrary function on $\F(\C^d)$, the operator $C_\varphi$ is not convex-cyclic. \end{proof} \section{Approximation numbers} \label{section 7} \noindent In order to simplify the notations, in this section we use the convention $0^0=1$.\\ \noindent Let us recall that for a linear bounded compact operator $T\in\mathcal{L}(X)$, the $n$-th approximation number $a_n(T)$ is defined by \[ a_n(T):= \inf \{ \|T-S\|:~ S\in \mathcal{L}(X),~\textup{dim}(S(X))\leq n-1\}.\] When $X$ is a separable Hilbert space, it is well-known that the sequence $(a_n(T))_n$ coincides with the decreasing enumeration of the singular values of $T$, that is, the square roots of the eigenvalues of $T^*T$ (equivalently, the eigenvalues of $\sqrt{T^*T}$). The computation (or, at least, the estimation) of the approximation numbers of compact composition operators has been the subject of many investigations in the recent years (see for instance \cite{LQR} and the references therein). We compute these numbers for compact composition operators defined on $\F(\C^d)$. To do this we gather some results which can be found in \cite[Theorem 1.2]{Z} and \cite[Proposition 2.5]{FZ}: \begin{proposition}\cite{FZ,Z}\label{weighted operators} Let $\varphi(z):= Az+b$ be such that $C_\varphi$ induces a bounded compact composition operator on $\F(\C^d)$, i.e. $\|A\|<1$. Further, assume that $A$ is self-adjoint. Then, $W_{k_b,\varphi}$ is a bounded self-adjoint weighted composition operator on $\F(\C^d)$. Moreover, the operator $W_{k_b,\varphi}^*W_{k_b,\varphi}$ is unitary equivalent to $\exp(\langle (I-A)^{-1}b,b\rangle) C_{AA^*z}$. \end{proposition} In \cite[Theorem 1.1]{FZ} we can find the spectrum of bounded normal weighted composition operators defined on $\F(\C^d)$. A straightforward modification of the proof of Proposition \ref{spectrum compact} gives us the multiplicity of each non zero eigenvalue. \begin{lemma}\label{eigenvalues} Let $A\in \C^{d\times d}$ be an Hermitian matrix of norm $\|A\|<1$. Let $\lambda:=(\lambda_j)_{j=1}^d$ be the eigenvalues of $A$. Then, \[\sigma (C_{Az})= \{\lambda^\alpha:~ \alpha\in \N^d\}\cup\{0\}.\] Moreover, the multiplicity of the eigenvalue $\rho\in \sigma_p(C_{Az})\setminus\{0\}$ is exactly $\#\{\alpha\in \N^d:~\rho=\lambda^\alpha\}$. \end{lemma} \begin{theorem} Let $\varphi(z):=Az+b$ be such that $C_\varphi$ induces a bounded compact composition operator on $\F(\C^d)$ with $A\neq 0$. Let $\lambda=(\lambda_j)_{j=1}^d\subset \R^+$ be the singular values of $A$. Let $(\alpha_n)_n\subset \N^d$ be an enumeration of the set $\{\alpha\in\N^d:~\lambda^\alpha\neq 0\}$ such that the sequence $(\lambda^{\alpha_n})_n$ is nonincreasing. Then \[a_n(C_\varphi)= \exp\left(\dfrac{\langle (I-B)^{-1}v,v\rangle}{2}- \frac{|v|^2}{4}\right) \lambda^{\alpha_n},\] where $B=\sqrt{AA^*}$ and $v=(I+B)^{-1}b$. In particular, \[\sum_{n=1}^\infty a_n(C_\varphi)=\exp\left(\dfrac{\langle (I-B)^{-1}v,v\rangle}{2}- \frac{|v|^2}{4}\right) \prod_{j=1}^{d}\dfrac{1}{1-\lambda_j}.\] \end{theorem} \begin{proof} Let us first notice that, since $C_\varphi$ is compact, $\|A\|<1$. Thanks to Proposition~\ref{adjoint operator}, we know that $C_\varphi^*=W_{k_b,A^*z}$. Thus \[T:=C_\varphi^*C_\varphi = W_{k_b,AA^*z+b}.\] In order to continue, set $B:= \sqrt{AA^*}$ and recall that $B$ is a self-adjoint matrix. Observe that $\|B\|<1$ as well. Let $v:= (I+B)^{-1}b$ and define \[S = \exp\left(-\dfrac{|v|^2}{4} \right) W_{k_v,Bz+v}.\] We claim that $S:= \sqrt{T}$. Indeed, thanks to Proposition~\ref{weighted operators}, $S$ is a bounded self-adjoint weighted composition operator on $\F(\C^d)$. Moreover, observe that for any $f\in \F(\C^d)$ we have \begin{align*} S^2 (f)(z)&= \exp\left(-\dfrac{|v|^2}{2} \right)k_v(z) k_v(Bz+v)f(B^2z+Bv+v)\\ &=\exp\left(\dfrac{-|v|^2 + \langle z,v\rangle + \langle Bz+v,v \rangle }{2} \right)f(AA^*z+(I+B)v)\\ &=\exp \left( \dfrac{\langle z,b \rangle }{2}\right)f(AA^*z+b)= T(f)(z). \end{align*} Since $S$ is a self-adjoint operator, $T=S^*S$. Again thanks to Proposition~\ref{weighted operators}, $T$ is unitarily equivalent to $\exp({\langle (I-B)^{-1}v,v\rangle}-\frac{|v|^2}{2}) C_{B^2z}$. Now, thanks to Lemma~\ref{eigenvalues} and recalling that $B^2=AA^*$, we get that the eigenvalues of $T$ are \[\sigma(T):= \left \{ \exp\left({\langle (I-B)^{-1}v,v\rangle}-\frac{|v|}{2}\right) (\lambda^{\alpha})^2:~ \alpha\in \N^d \right \}.\] Moreover, if $(\alpha_n)_n\in \N^d$ is an enumeration of the set $\{\alpha\in\N^d:~\lambda^\alpha\neq 0\}$ such that the sequence $(\lambda^{\alpha_n})_n$ is nonincreasing, then \[a_n(C_\varphi)= \left( \exp\left({\langle (I-B)^{-1}v,v\rangle} - \frac{|v|^2}{2}\right)\lambda^{2\alpha_n} \right)^{1/2}= \exp\left(\dfrac{\langle (I-B)^{-1}v,v\rangle}{2}-\frac{|v|^2}{4}\right)\lambda^{\alpha_n}.\] Finally, the formula of geometric series gives us that \[\sum_{n=1}^\infty a_n (C_\varphi)= \exp\left(\dfrac{\langle (I-B)^{-1}v,v\rangle}{2}-\frac{|v|^2}{4}\right) \prod_{j=1}^d \dfrac{1}{1-\lambda_j}.\] \end{proof} \begin{remark} With this theorem, we get another proof that a compact composition operator on the Fock space belongs to all Schatten classes (see \cite{Du} or \cite{JPZ}). \end{remark} \bibliographystyle{amsplain} \begin{thebibliography}{999} \bibitem{BM} F. Bayart and \'E. Matheron. Dynamics of linear operators. Cambridge Tracts in Mathematics, 179. \bibitem{BJM} M. J. Beltr\'an-Meneu, E. Jord\'a and M. Murillo-Arcila. Supercyclicity of weighted composition operators on spaces of continuous functions, Collect. Math., 71 (2020), 493-509. \bibitem{BS} P. Bourdon and J. Shapiro. Cyclic phenomena for composition operators. Mem. Amer. Math. Soc, 125 (1997), No. 596. \bibitem{CG} T. Carrol and C. Gilmore. Weighted composition operators on Fock spaces and their dynamics. Preprint: arXiv:1911.07254 \bibitem{CMS} B. Carswell, B. MacCluer and A. Schuster. Composition operators on the Fock space. Acta Sci. Math. (Szeged) 69 (2003) 871-887. \bibitem{CM} C. Cowen and B. MacCluer. Composition operators on spaces of analytic functions. Studies in Advanced Mathematics. CRC Press. \bibitem{DK} M. Doan and L. Khoi. Closed range and cyclicity of composition operators on Hilbert spaces of entire functions. Complex Var. Elliptic Equ., 63(11) (2018), 1558-1569. \bibitem{Du} D.Y. Du. Schatten Class Weighted Composition Operators on the Fock Space $F^2_\alpha(\mathbb C^N )$. Int. Journal of Math. Analysis, Vol. 5, 2011, no. 13, 625 - 630 \bibitem{FZ} L. Feng and L. Zhao. Spectrum of normal weighted composition operators on the Fock space Over $\C^N$. Acta Math. Sin., Eng. Ser., 35(9) (2019), 1563–1572. \bibitem{GI} K. Guo and K. Izuchi. Composition operators on Fock type spaces. Acta Sci. Math. (Szeged) 74 (2008) 807-828. \bibitem{JPZ} L. Jiang, G. Prajitura and R. Zhao. Some characterizations for composition operators on the Fock spaces. J. Math. Anal. Appl. 455 (2017) 1204-1220. \bibitem{LQR} D. Li, H. Queffélec and L. Rodríguez-Piazza. Approximation and entropy numbers of composition operators, Concrete Operators 7 (2020) 166--179. \bibitem{M} T. Mengestie. Convex-cyclic weighted composition operators and their adjoints. Preprint: arXiv:2112.05371v1 \bibitem{M2} T. Mengestie. Cyclic and supercyclic weighted composition operators on the Fock space. Preprint: arXiv:1901.01697v1 \bibitem{M3} T. Mengestie. Dynamics of weighted composition operators and their adjoints on the Fock space. Complex Anal. Oper. Theory 16 (2022), 27. \bibitem{R} H. Rezaei. On the convex hull generated by orbit of operators. Linear Algebra Appl., 438(11) (2013), 4190-4203. \bibitem{U} S. Ukei. Weighted composition operator on the Fock space. Proc. Amer. Math. Soc., 135(5) (2007), 1405-1410. \bibitem{ZZ} L. Zhang and Z. Zhou. Hypercyclicity of weighted composition operators on a weighted Dirichlet space. Complex Var. Elliptic Equ., 59(7) (2014), 1043-1051, \bibitem{Z2} L. Zhao. Invertible weighted composition operators on the Fock space of $\C^N$. Journal of function spaces, (2015). \bibitem{Z} L. Zhao. Normal weighted composition operators on the Fock space of $\C^n$. Oper. Matrices, 11(3) (2017), 697-704. \end{thebibliography} \end{document}
2205.11000v1
http://arxiv.org/abs/2205.11000v1
Another operator-theoretical proof for the second-order phase transition in the BCS-Bogoliubov model of superconductivity
\documentclass[11pt, leqno]{article} \usepackage{amsmath} \usepackage{amsthm} \usepackage{amssymb} \usepackage[dvips]{graphicx,color} \topmargin=-0.5cm \oddsidemargin=0truecm \evensidemargin=0truecm \textheight=23cm \textwidth=16cm \theoremstyle{plain} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{corollary}[theorem]{Corollary} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \theoremstyle{remark} \newtheorem{remark}[theorem]{Remark} \numberwithin{equation}{section} \begin{document} \title{\textbf{Another operator-theoretical proof for \\the second-order phase transition in \\the BCS-Bogoliubov model of superconductivity}} \author{Shuji Watanabe\\ Division of Mathematical Sciences\\ Graduate School of Engineering, Gunma University\\ 4-2 Aramaki-machi, Maebashi 371-8510, Japan\\ Email: [email protected]} \date{} \maketitle \begin{abstract} In the preceding papers, imposing certain complicated and strong conditions, the present author showed that the solution to the BCS-Bogoliubov gap equation in superconductivity is twice differentiable only on the neighborhoods of absolute zero temperature and the transition temperature so as to show that the phase transition is of the second order from the viewpoint of operator theory. Instead, we impose a certain simple and weak condition in this paper, and show that there is a unique nonnegative solution and that the solution is indeed twice differentiable on a closed interval from a certain positive temperature to the transition temperature as well as pointing out several properties of the solution. We then give another operator-theoretical proof for the second-order phase transition in the BCS-Bogoliubov model. Since the thermodynamic potential has the squared solution in its form, we deal with the squared BCS-Bogoliubov gap equation. Here, the potential in the BCS-Bogoliubov gap equation is a function and need not be a constant. \medskip \noindent Mathematics Subject Classification 2020. \ 45G10, 47H10, 47N50, 82D55. \medskip \noindent Keywords. \ BCS-Bogoliubov model of superconductivity, second-order phase transition, BCS-Bogoliubov gap equation, nonlinear integral equation, fixed-point theorem. \end{abstract} \section{Introduction} In the BCS-Bogoliubov model of superconductivity, one does not show that the solution to the BCS-Bogoliubov gap equation is partially differentiable with respect to the absolute temperature $T$. Nevertheless, without such a proof, one partially differentiates the solution and the thermodynamic potential with respect to the temperature twice so as to obtain the entropy and the specific heat at constant volume. One then shows that the phase transition from a normal conducting state to a superconducting state is of the second order. Therefore, if the solution were not partially differentiable with respect to the temperature, then one could not partially differentiate the solution and the thermodynamic potential with respect to the temperature, and hence one could not obtain the entropy and the specific heat at constant volume. As a result, one could not show that the phase transition is of the second order. For this reason, it is highly desirable to show that there is a unique solution to the BCS-Bogoliubov gap equation and that the solution is partially differentiable with respect to the temperature twice. In the preceding papers (see \cite[Theorems 2.2 and 2.10]{watanabe-seven} and \cite[Theorems 2.3 and 2.4]{watanabe-five}), the present author gave a proof of the existence and uniqueness of the solution and showed that the solution is indeed partially differentiable with respect to the temperature twice on the basis of fixed-point theorems. In this way, the present author gave an operator-theoretical proof of the statement that the phase transition is of the second order, and thus solved the long-standing problem of the second-order phase transition from the viewpoint of operator theory. Here, the potential in the BCS-Bogoliubov gap equation is a function and need not be a constant. But the present author imposed certain complicated and strong conditions in the preceding papers \cite{watanabe-seven, watanabe-five, watanabe-eight}. As a result, the present author showed that the solution to the BCS-Bogoliubov gap equation is partially differentiable with respect to the temperature only on the neighborhoods of absolute zero temperature $T=0$ and the transition temperature $T=T_c$. Instead, we impose a certain simple and weak condition in this paper. Thanks to this simple and weak condition, we show that there is a unique nonnegative solution and that the solution is indeed partially twice differentiable with respect to the temperature on the interval $[T_0, \, T_c]$ as well as pointing out several properties of the solution. Here, the temperature $T_0$ is defined in \eqref{eqn:tzero} below, and the temperature interval $[T_0,\, T_c]$ can be nearly equal to the whole temperature interval $[0,\, T_c]$ (see Remark \ref{rmk:tzerotc} below). Differentiating the thermodynamic potential with respect to the temperature, we thus give another operator-theoretical proof for the second-order phase transition. As is well known, the thermodynamic potential has the squared solution in its form, not the solution itself. Therefore, we deal with the squared BCS-Bogoliubov gap equation, not the BCS-Bogoliubov gap equation. From the viewpoint of operator theory, the present author thinks that dealing with the squared BCS-Bogoliubov gap equation provides a straightforward way to show the second-order phase transition. The BCS-Bogoliubov gap equation \cite{bcs, bogoliubov} is a nonlinear integral equation given by \begin{equation}\label{eqn:bcseq} u_0(T,\,x)=\int_I \frac{U(x,\,\xi)\, u_0(T,\, \xi)}{\,\sqrt{\,\xi^2+u_0(T,\, \xi)^2\,}\,}\, \tanh \frac{\,\sqrt{\,\xi^2+u_0(T,\, \xi)^2\,}\,}{2T}\, d\xi, \ T \geq 0, \ (x, \,\xi) \in I^2. \end{equation} Here, the solution $u_0$ is a function of the absolute temperature $T$ (of a superconductor) and the energy $x$ (of an electron). The closed interval $I$ is given by $I=[\varepsilon,\, \hslash\omega_D]$, where the Debye angular frequency $\omega_D$ is a positive constant and depends on a superconductor, and $\varepsilon>0$ is a cutoff (see the following remark). The potential $U(\cdot,\,\cdot)$ satisfies $U(x,\,\xi)>0$ at all $(x,\,\xi) \in I^2$. Throughout this paper we use the unit where the Boltzmann constant $k_B$ is equal to 1. \begin{remark} For simplicity, we introduce the cutoff $\varepsilon>0$ in \eqref{eqn:bcseq}. Here, the cutoff $\varepsilon>0$ is small enough. We see that the cutoff is unphysical, but we introduce it for simplicity. \end{remark} In \cite{odeh, billardfano, vansevenant, bls, chen, deugeihailoss, fhns, fhss, freijihaizlseiringer, hhss, hainzlloss, haizlseiringer, haizlseiringer2, haizlseiringer3, watanabe-one, watanabe-two, watanabe-four, watanabe-seven, watanabe-five, watanabe-eight}, the existence, the uniqueness and several properties of the solution to the BCS-Bogoliubov gap equation were established and studied. See also Kuzemsky \cite[Chapters 26 and 29]{kuzemsky3} and \cite {kuzemsky, kuzemsky2}. Anghel and Nemnes \cite{angnem} and Anghel \cite{ang-one, ang-two} showed that if the physical quantity $\mu$ in the BCS-Bogoliubov model is not equal to the chemical potential, then the phase transition from a normal conducting state to a superconducting state is of the first order under a certain condition without any external magnetic field. Introducing imaginary magnetic field, Kashima \cite{kashima-one, kashima-two, kashima-three, kashima-four} pointed out that the phase transition is of the second-order if and only if a certain value is greater than $\sqrt{17-12\sqrt{2}}$ and that the phase transition is of order $4n+2$ if and only if the value above is less than or equal to $\sqrt{17-12\sqrt{2}}$. Here. $n$ is an arbitrary positive integer. In this connection, the BCS-Bogoliubov gap equation in superconductivity plays a role similar to that of the Maskawa--Nakajima equation \cite{maskawa-nakajima-one, maskawa-nakajima-two} in elementary particle physics. In Professor Maskawa's Nobel lecture, he stated the reason why he considered the Maskawa-Nakajima equation. See the present author's paper \cite{watanabe-three} for an operator-theoretical treatment of the Maskawa--Nakajima equation. Squaring both sides of the BCS-Bogoliubov gap equation and putting $f_0(T,\,x)=u_0(T,\,x)^2$ give the squared BCS-Bogoliubov gap equation: \[ f_0(T,\,x)=\left( \int_I U(x,\,\xi) \sqrt{ \, \frac{f_0(T,\, \xi)}{\, \xi^2+f_0(T,\, \xi)\,} \,} \, \tanh \frac{\,\sqrt{\,\xi^2+f_0(T,\, \xi)\,}\,}{2T}\, d\xi \right)^2. \] Let $T_c$ be the transition temperature (see \cite[Definition 2.5]{watanabe-one} for our operator-theoretical definition of $T_c$) and let $D=[T_0,\, T_c] \times I \in \mathbb{R}^2$. Here, $I=[\varepsilon,\, \hslash\omega_D]$. Define our operator $A$ by \begin{equation}\label{eqn:ourop} Af(T,\,x)=\left( \int_I U(x,\,\xi) \sqrt{ \, \frac{f(T,\, \xi)}{\, \xi^2+f(T,\, \xi)\,} \,} \, \tanh \frac{\,\sqrt{\,\xi^2+f(T,\, \xi)\,}\,}{2T}\, d\xi \right)^2, \quad (T,\,x) \in D, \end{equation} where $f \in W$ (see \eqref{eqn:w} below for the subset $W$). We define our operator $A$ on the subset $W$ and look for a fixed point of our operator $A$. Note that a fixed point of $A$ becomes a solution to the squared BCS-Bogoliubov gap equation, and that its square root becomes a solution to the BCS-Bogoliubov gap equation \eqref{eqn:bcseq}. Let $U_1$ and $U_2$ be positive constants, where $(0<) \, U_1 \leq U_2$. If the potential $U(\cdot,\,\cdot)$ is a positive constant and $U(x,\,\xi)=U_1$ at all $(x,\,\xi) \in I^2$, then the solution to the BCS-Bogoliubov gap equation \eqref{eqn:bcseq} becomes a function of the temperature $T$ only. Denoting the solution by $T \mapsto \Delta_1(T)$, we have (see \cite{bcs}) \begin{equation}\label{eqn:delta1} 1=U_1 \int_I \frac{1}{\,\sqrt{\,\xi^2+\Delta_1(T)^2\,}\,} \, \tanh \frac{\, \sqrt{\,\xi^2+\Delta_1(T)^2\,}\,}{2T}\,d\xi, \quad 0 \leq T \leq\tau_1. \end{equation} Here, the temperature $\tau_1>0$ is defined by (see \cite{bcs}) \[ 1=U_1 \int_I \frac{1}{\,\xi\,} \, \tanh \frac{\xi}{\,2\tau_1\,}\,d\xi. \] The solution $T \mapsto \Delta_1(T)$ is continuous and strictly decreasing with respect to $T$, and moreover, the solution is of class $C^2$ with respect to $T$. For more details, see \cite[Proposition 1.2]{watanabe-one}. We set $\Delta_1(T)=0$ at all $T \geq \tau_1$. Then \eqref{eqn:delta1} becomes \[ 1>U_1 \int_I \frac{1}{\,\xi\,} \, \tanh \frac{\xi}{\,2T \,}\,d\xi, \quad T>\tau_1. \] We choose an arbitrary temperature $T_0 \; (>\tau_1)$. Then, for $T \in [T_0,\, T_c]$, \begin{equation}\label{eqn:tzero} U_1 \int_I \frac{1}{\,\xi\,} \, \tanh \frac{\xi}{\,2T \,}\,d\xi <1. \end{equation} On the other hand, If $U(x,\,\xi)=U_2$ at all $(x,\,\xi) \in I^2$, then we have the solution $T \mapsto \Delta_2(T)$ to \begin{equation}\label{eqn:delta2} 1=U_2 \int_I \frac{1}{\,\sqrt{\,\xi^2+\Delta_2(T)^2\,}\,} \, \tanh \frac{\, \sqrt{\,\xi^2+\Delta_2(T)^2\,}\,}{2T}\,d\xi, \quad 0 \leq T \leq\tau_2. \end{equation} Here, the temperature $\tau_2>0$ is defined by \[ 1=U_2 \int_I \frac{1}{\,\xi\,} \, \tanh \frac{\xi}{\,2\tau_2\,}\,d\xi. \] The solution $T \mapsto \Delta_2(T)$ has properties similar to those of the solution $T \mapsto \Delta_1(T)$. We again set $\Delta_2(T)=0$ at all $T \geq \tau_2$. The inequality $U_1 \leq U_2$ implies \[ \Delta_1(T) \leq \Delta_2(T) \quad (0 \leq T \leq \tau_2). \] For the graphs of $\Delta_1(\cdot)$ and $\Delta_2(\cdot)$, see \cite[Figure 1]{watanabe-five}. \section{Main results} Suppose that the potential $U(\cdot,\,\cdot)$ in the BCS-Bogoliubov gap equation \eqref{eqn:bcseq} satisfies the following conditions: \begin{equation}\label{eqn:potential} U(\cdot,\,\cdot) \in C^2(I^2), \quad (0<) \, U_1 \leq U(x,\,\xi) \leq U_2 \quad \hbox{at all}\quad (x,\,\xi) \in I^2, \end{equation} and \eqref{eqn:condition-two} below. The inequalities $U_1 \leq U(x,\,\xi) \leq U_2$ at all $(x,\,\xi) \in I^2$ imply $\tau_1 \leq T_c \leq \tau_2$ \ (see \cite[Remark 2.6]{watanabe-one}). Set \[ a=\left\{ \frac{ \displaystyle{ \max_{(x,\,\xi) \in I^2} U(x,\,\xi) }}{\, \displaystyle{ \min_{(x,\,\xi) \in I^2} U(x,\,\xi) } \,} \right\}^2 \ (\geq 1). \] \begin{remark}\label{rmk:tzerotc} The temperatures $\tau_1$, $T_0$, $T_c$, $\tau_2$ satisfy $(0<)\, \tau_1<T_0<T_c \leq \tau_2$. If $U_1$ is small enough, then so is $\tau_1$. Therefore, $T_0$ can be small enough, and hence $T_0$ does not need to be close to the transition temperature $T_c$. As a result, the temperature interval $[T_0,\, T_c]$ can be nearly equal to the whole temperature interval $[0,\, T_c]$. For temperatures at or near zero temperature, the smoothness of the solution to the BCS-Bogoliubov gap equation with respect to such temperatures has been shown in \cite[Theorem 2.2]{watanabe-seven}. In this paper we thus deal with the temperature interval $[T_0,\, T_c]$. \end{remark} Let $W$ be a subset of the Banach space $C(D)$ satisfying \begin{eqnarray}\label{eqn:w} W &=& \big\{ f \in C^2(D) (\subset C(D)) : (0 \leq) \ \Delta_1(T)^2 \leq f(T,\,x) \leq \Delta_2(T)^2, \quad f(T_c,\,x) = 0, \\ \nonumber & & \frac{f(T,\,x)}{\, f(T,\,x_1) \,} \leq a, \quad -f_T(T,\,x)>0, \quad \max_{(T,\, x) \in D} \left\{ -f_T(T,\,x) \right\} \leq M_T \big\}. \end{eqnarray} Here, the norm of the Banach space $C(D)$ is given by \[ \| g \|=\sup_{(T,\, \xi) \in D} | \, g(T,\, \xi) \, |, \quad g \in C(D), \] and \[ M_T=\frac{4a \, U_2 \, \displaystyle{ \left( \max_{z \geq 0} \frac{z}{\, \cosh z \,} \right)^2 } }{ \, \varepsilon \, U_1 \, \displaystyle{ \left( \tanh \frac{\varepsilon}{\, 2T_c \,}-\frac{\varepsilon}{\, 2T_c \,} \frac{1}{\cosh^2 \frac{\varepsilon}{\, 2T_c \,} } \right) \int_I \frac{d\xi}{\, \left( \xi^2+\Delta_2(0)^2 \right)^{3/2} \,} \, } } \quad (>0). \] Note that \[ \sup_{f \in W} \left[ \max_{(T,\, x) \in D} \left\{ -f_T(T,\,x) \right\} \right] = M_T. \] \begin{remark}\label{rmk:ffa} The inequality $f(T,\,x) / f(T,\,x_1) \leq a$ in the definition of $W$ is not defined at $T=T_c$ since $f(T_c,\,x) = 0$. For $T<T_c$, there is a $T_1$ \ ($T<T_1<T_c$) such that $f(T,\,x)=(T-T_c) \, f_T(T_1,\,x)$. Therefore, $f(T_c,\,x) / f(T_c,\,x_1)$ is defined to be $f_T(T_c,\,x) / f_T(T_c,\,x_1)$. \end{remark} \begin{remark} The conditions imposed in the previous papers of the present author \cite[Condition (C)]{watanabe-seven} and \cite[Condition (C)]{watanabe-five} were very complicated, and so it was very tough to show the existence, uniqueness and smoothness of the solution to the BCS-Bogoliubov gap equation \eqref{eqn:bcseq}. Instead, we impose the simple condition that $f \in C^2(D)$ and $f(T_c,\,x) = 0$ in the definition of the subset $W$ (see \eqref{eqn:w}). Thanks to this simple condition, it is straightforward to show the existence, uniqueness and smoothness of the solution. \end{remark} Let us remind here that for $T \in [T_0,\, T_c]$ (see \eqref{eqn:tzero}), \[ \int_I \frac{\, U_1 \,}{\,\xi\,} \, \tanh \frac{\xi}{\,2T \,}\,d\xi <1. \] Note that $a \geq 1$ and that $U(x,\,\xi) \geq U_1$ at all $(x,\,\xi) \in I^2$. We then let the potential $U(\cdot,\,\cdot)$ satisfy \begin{equation}\label{eqn:condition-two} a^{1/4} \max_{(T,\, x) \in D} \left[ \, \int_I \frac{\, U(x,\, \xi) \,}{\xi} \, \tanh \frac{\xi}{\, 2T\,}\,d\xi \, \right] \leq 1. \end{equation} \begin{remark} The following two theorems hold true not only when the potential $U(\cdot,\,\cdot)$ in the BCS-Bogoliubov gap equation \eqref{eqn:bcseq} is a positive constant, but also when $U(\cdot,\,\cdot)$ is a function. See Remark \ref{rmk:constantpotential} below. \end{remark} We denote by $\overline{W}$ the closure of $W$ with respect to the norm $\| \cdot \|$ mentioned above. The following are our main results. \begin{theorem}\label{thm:main} Let the potential $U(\cdot,\,\cdot)$ in the BCS-Bogoliubov gap equation \eqref{eqn:bcseq} satisfy \eqref{eqn:potential} and \eqref{eqn:condition-two}. Let $W$ be as in \eqref{eqn:w}. Then there is a unique fixed point $f_0 \in \overline{W}$ of our operator $A: \, \overline{W} \to \overline{W}$. Therefore, there is a unique nonnegative solution $u_0=\sqrt{f_0}$ to the BCS-Bogoliubov gap equation \eqref{eqn:bcseq}. \end{theorem} Let $f_0$ be the fixed point given by Theorem \ref{thm:main} in the following two remarks, where several properties of the solution $u_0=\sqrt{f_0}$ to the BCS-Bogoliubov gap equation \eqref{eqn:bcseq} are pointed out. Suppose $f_0 \in W$. If $f_0$ is an accumulating point of $W$, then $f_0$ can be approximated by an element $f \in W$, and the very element $\sqrt{f}$ satisfies the following properties instead of $u_0$. \begin{remark} \ $u_0 \in C^2([T_0,\, T_1] \times I)$, where $T_1>0$ is arbitrary as long as $T_1<T_c$. Since $f_0(T_c,\, x)=0$ and $(\partial f_0/\partial T)(T,\, x)<0$ at $T$ in a neighborhood of $T_c$, it follows that $u_0(T_c,\, x)=0$ and $(\partial u_0/\partial T)(T,\, x)<0$ at $T$ in a neighborhood of $T_c$. Moreover, $(\partial u_0/\partial T)(T,\, x)\to -\infty$ as $T \uparrow T_c$. But $(\partial u_0^2/\partial T)(T,\, x)\to (\partial f_0/\partial T)(T_c,\, x)$ as $T \uparrow T_c$. \end{remark} \begin{remark} The inequalities $\Delta_1(T) \leq u_0(T,\,x) \leq \Delta_2(T)$ and $\displaystyle{ \frac{\, u_0(T,\, x)\, }{\, u_0(T,\, x_1) \,} \leq \sqrt{a}}$ hold. \end{remark} \medskip In order to show that the transition from a normal conducting state to a superconducting state at $T=T_c$ is of the second-order, we need to deal with the thermodynamic potential $\Omega$ and differentiate it with respect to the temperature $T$ twice. Note that the thermodynamic potential $\Omega$ has the fixed point $f_0 \in \overline{W}$ given by Theorem \ref{thm:main} in its form, not the solution $\sqrt{ f_0 }$ to the BCS-Bogoliubov gap equation \eqref{eqn:bcseq} in its form. As mentioned before, this is why we treat the squared BCS-Bogoliubov gap equation, not the equation itself . See \cite[(1.5), (1.6)]{watanabe-seven} and \cite[(1.6)]{watanabe-five} for the form of the thermodynamic potential $\Omega$. See also \cite[Definition 1.10]{watanabe-five} for the operator-theoretical definition of the second-order phase transition. \begin{theorem}\label{thm:maintwo} Let the potential $U(\cdot,\,\cdot)$ in the BCS-Bogoliubov gap equation \eqref{eqn:bcseq} satisfy \eqref{eqn:potential} and \eqref{eqn:condition-two}. Let $W$ be as in \eqref{eqn:w}. Then the transition from a normal conducting state to a superconducting state at $T=T_c$ is of the second-order. \end{theorem} \section{Proofs of Theorems \ref{thm:main} and \ref{thm:maintwo}} \begin{lemma} \rm{(1)} \ The subset $W$ is a bounded and convex subset of the Banach space $C(D)$. \\ \rm{(2)} \ The closure $\overline{W}$ is a bounded, closed and convex subset of the Banach space $C(D)$. \end{lemma} \begin{proof} \rm{(1)} \ Note that the function $T \mapsto \Delta_2(T)^2$ is strictly decreasing (see \cite[Proposition 1.2]{watanabe-one}). Therefore, $W$ is bounded since $f(T,\,x) \leq \Delta_2(T)^2 \leq \Delta_2(0)^2$ for every $f \in W$. In order to show that $W$ is convex, it suffices to show that \[ \frac{tf(T,\,x)+(1-t)g(T,\,x)}{\, tf(T,\,x_1)+(1-t)g(T,\,x_1)\,} \leq a. \] Here, $t \in [0, \, 1]$ and $f, \, g \in W$. Let $T \not= T_c$. Since $f(T,\,x) \leq a f(T,\,x_1)$ and $g(T,\,x) \leq a g(T,\,x_1)$, it follows \[ \frac{tf(T,\,x)+(1-t)g(T,\,x)}{\, tf(T,\,x_1)+(1-t)g(T,\,x_1)\,} \leq \frac{t \, af(T,\,x_1)+(1-t)\, ag(T,\,x_1)}{\, tf(T,\,x_1)+(1-t)g(T,\,x_1)\,}=a. \] Next let $T=T_c$. We remind Remark \ref{rmk:ffa} here. Then \begin{eqnarray*} & & \frac{tf(T_c,\,x)+(1-t)g(T_c,\,x)}{\, tf(T_c,\,x_1)+(1-t)g(T_c,\,x_1)\,}= \frac{tf_T(T_c,\,x)+(1-t)g_T(T_c,\,x)}{\, tf_T(T_c,\,x_1)+(1-t)g_T(T_c,\,x_1)\,} \\ &\leq& \frac{t \, af_T(T_c,\,x_1)+(1-t)\, ag_T(T_c,\,x_1)}{\, tf_T(T_c,\,x_1)+(1-t)g_T(T_c,\,x_1)\,}=a. \end{eqnarray*} Therefore, $W$ is convex. \\ \rm{(2)} We have only to show that $\overline{W}$ is convex. Let $f, \, g \in \overline{W}$. Then there are $\{ f_n \}, \, \{ g_n \} \subset W$ satisfying $f_n \to f$ and $g_n \to g$ in the Banach space $C(D)$. Since $W$ is convex, $tf_n+(1-t)g_n \in W$ for $t \in [0, \, 1]$. \[ \left\| \, \{ tf+(1-t)g \}- \{ tf_n+(1-t)g_n \} \, \right\| \leq t\left\| \, f-f_n \, \right\|+(1-t)\left\| \, g-g_n \, \right\| \to 0 \] as $n \to \infty$. Thus $tf+(1-t)g \in \overline{W}$, and hence $\overline{W}$ is convex. \end{proof} We next show that $A: W \to W$. \begin{lemma}\label{lem:equicon} Let $f \in W$. Then $Af$ is continuous on $D$. \end{lemma} \begin{proof} Let $(T,\, x), \, (T_1,\, x_1) \in D$, and suppose $T<T_1<T_c$. We can deal with the case where $T_1=T_c$ similarly . Then \[ | Af(T,\, x)-Af(T_1,\, x_1) | \leq | Af(T,\, x)-Af(T_1,\, x) |+| Af(T_1,\, x)-Af(T_1,\, x_1) |. \] \textit{Step 1}. \ A straightforward calculation gives \[ Af(T,\, x)-Af(T_1,\, x)=\int_I U(x,\,\eta) \, I_1 \, d\eta \, \int_I U(x,\,\xi) \left\{ I_2+I_3+I_4 \right\} \, d\xi, \] where \begin{eqnarray*} I_1 &=& \sqrt{ \, \frac{f(T,\, \eta)}{\, \eta^2+f(T,\, \eta)\,} \,} \tanh \frac{\,\sqrt{\,\eta^2+f(T,\, \eta)\,}\,}{2T} + \sqrt{ \, \frac{f(T_1,\, \eta)}{\, \eta^2+f(T_1,\, \eta)\,} \,} \tanh \frac{\,\sqrt{\,\eta^2+f(T_1,\, \eta)\,}\,}{2T_1}, \\ I_2 &=& \frac{f(T,\, \xi)-f(T_1,\, \xi)}{ \, \sqrt{ f(T,\, \xi) }+\sqrt{ f(T_1,\, \xi) } \, } \frac{1}{\, \sqrt{ \xi^2+f(T,\, \xi) \,} \,} \tanh \frac{\,\sqrt{\,\xi^2+f(T,\, \xi)\,}\,}{2T},\\ I_3 &=& \sqrt{ f(T_1,\, \xi) } \left\{ \frac{1}{\, \sqrt{ \xi^2+f(T,\, \xi) \,} \,} \tanh \frac{\,\sqrt{\,\xi^2+f(T,\, \xi)\,}\,}{2T} \right. \\ & & \left. \qquad \qquad \qquad \qquad -\frac{1}{\, \sqrt{ \xi^2+f(T_1,\, \xi) \,} \,} \tanh \frac{\,\sqrt{\,\xi^2+f(T_1,\, \xi)\,}\,}{2T} \right\},\\ I_4 &=& \sqrt{ \, \frac{f(T_1,\, \xi)}{\, \xi^2+f(T_1,\, \xi)\,} \,} \left\{ \tanh \frac{\,\sqrt{\, \xi^2+f(T_1,\, \xi)\,}\,}{2T} - \tanh \frac{\,\sqrt{\, \xi^2+f(T_1,\, \xi)\,}\,}{2T_1} \right\}. \end{eqnarray*} The function $f$ is continuous since $f \in W$. Therefore, for an arbitrary $\varepsilon_1>0$, there is a $\delta>0$ such that if $|T_2-T_3|+|x_2-x_3|<\delta$, then $| f(T_2,\, x_2)-f(T_3,\, x_3) |<\varepsilon_1$. Here, $(T_2,\, x_2), (T_3,\, x_3) \in D$ are arbitrary and the $\delta>0$ does not depend on $(T_2,\, x_2), (T_3,\, x_3)$ since $f$ is uniformly continuous on $D$. Since $f(T,\, \eta)/ f(T,\, \xi) \leq a$ by \eqref{eqn:w}, \begin{eqnarray*} & & \left| \int_I U(x,\,\eta) \, I_1 \, d\eta \times \int_I U(x,\,\xi) \, I_2 \, d\xi \right| \\ &\leq& 2 \, \frac{ \, U_2^2 \,}{U_1^2} \sqrt{a} \left\{ \int_I \frac{U_1}{\, \sqrt{ \eta^2+\Delta_1(T)^2 \,} \,} \tanh \frac{\,\sqrt{\,\eta^2+\Delta_1(T)^2 \,}\,}{2T} \,d\eta \right\}^2 \left| f(T,\, \xi_1)-f(T_1,\, \xi_1) \right| \\ &\leq& 2 \, \frac{ \, U_2^2 \,}{U_1^2} \sqrt{a} \, \varepsilon_1, \end{eqnarray*} where $|T-T_1|<\delta$ with some $\xi_1 \in I$. Note that (see \eqref{eqn:delta1}) \[ \int_I \frac{U_1}{\, \sqrt{ \eta^2+\Delta_1(T)^2 \,} \,} \tanh \frac{\,\sqrt{\,\eta^2+\Delta_1(T)^2 \,}\,}{2T} \,d\eta = 1, \quad T \in [0,\, \tau_1] \] and that \[ \int_I \frac{U_1}{\, \sqrt{ \eta^2+0^2 \,} \,} \tanh \frac{\,\sqrt{\,\eta^2+0^2 \,}\,}{2T} \,d\eta < 1, \quad T \in (\tau_1,\, T_c] \] with $\Delta_1(T)=0$ at $T \in [\tau_1,\, T_c]$. Since $f(T,\, \xi)>f(T_1,\, \xi)$ \ $(T<T_1)$, \begin{eqnarray*} & & \left| \int_I U(x,\,\eta) \, I_1 \, d\eta \times \int_I U(x,\,\xi) \, I_3 \, d\xi \right| \\ &\leq& U_2 \, \Delta_2(0) \left\{ \int_I \frac{U_2}{\, \sqrt{ \eta^2+\Delta_2(T)^2 \,} \,} \tanh \frac{\,\sqrt{\,\eta^2+\Delta_2(T)^2 \,}\,}{2T} \,d\eta \right. \\ & & \qquad \left. +\int_I \frac{U_2}{\, \sqrt{ \eta^2+\Delta_2(T_1)^2 \,} \,} \tanh \frac{\,\sqrt{\,\eta^2+\Delta_2(T_1)^2 \,}\,}{2T_1} \,d\eta \right\} \\ & & \qquad \times \int_I \frac{1}{\, \xi^2 \,} \,d\xi \, \left| f(T,\, \xi_1)-f(T_1,\, \xi_1) \right| \\ &\leq& 2 \, \frac{ \, U_2 \, \Delta_2(0)\,}{\varepsilon} \, \varepsilon_1, \end{eqnarray*} where $|T-T_1|<\delta$ with some $\xi_1 \in I$. Similarly, \begin{eqnarray*} & & \left| \int_I U(x,\,\eta) \, I_1 \, d\eta \times \int_I U(x,\,\xi) \, I_4 \, d\xi \right| \\ &\leq& 2 U_2 \, \Delta_2(0) \, \left( \max_{z \geq 0} \frac{z}{\, \cosh z \,} \right)^2 \left\{ \int_I \frac{U_2}{\, \sqrt{ \eta^2+\Delta_2(T)^2 \,} \,} \tanh \frac{\,\sqrt{\,\eta^2+\Delta_2(T)^2 \,}\,}{2T} \,d\eta \right. \\ & & \qquad \left. +\int_I \frac{U_2}{\, \sqrt{ \eta^2+\Delta_2(T_1)^2 \,} \,} \tanh \frac{\,\sqrt{\,\eta^2+\Delta_2(T_1)^2 \,}\,}{2T_1} \,d\eta \right\} \\ & & \qquad \times \int_I \frac{1}{\, \xi \,} \,d\xi \, |T-T_1| \\ &\leq& 4 U_2 \, \Delta_2(0) \, \left( \max_{z \geq 0} \frac{z}{\, \cosh z \,} \right)^2 \, (\ln \varepsilon) \, |T-T_1|<\varepsilon_1, \end{eqnarray*} where \[ |T-T_1|<\delta_1=\frac{\varepsilon_1}{\, 4 \, U_2 \, \Delta_2(0) \left( \max_{z \geq 0} \frac{z}{\, \cosh z \,} \right)^2 \, \ln \varepsilon \,}. \] Thus \[ | Af(T,\, x)-Af(T_1,\, x) | \leq \left( 2 \, \frac{ \, U_2^2 \,}{U_1^2} \sqrt{a}+ 2 \, \frac{ \, U_2 \, \Delta_2(0)\,}{\varepsilon}+1 \right) \, \varepsilon_1, \] where $|T-T_1|<\min (\delta, \, \delta_1)$. \\ \textit{Step 2}. \ By hypothesis, the potential $U(\cdot,\,\cdot)$ is continuous on the compact set $I^2$, and hence $U(\cdot,\,\cdot)$ is uniformly continuous. Therefore, for an arbitrary $\varepsilon_1>0$, there is a $\delta_2>0$ such that if $|x-x_1|<\delta_2$, then $| U(x,\, \eta)-U(x_1,\, \eta) |<\varepsilon_1$. Note that the $\delta_2$ does not depend on $f \in W$. A straightforward calculation gives \begin{eqnarray*} & & \left| Af(T_1,\, x)-Af(T_1,\, x_1) \right| \\ &\leq& \int_I \left\{ U(x,\,\eta)+U(x_1,\,\eta) \right\} \sqrt{ \, \frac{f(T_1,\, \eta)}{\, \eta^2+f(T_1,\, \eta)\,} \,} \tanh \frac{\,\sqrt{\,\eta^2+f(T_1,\, \eta)\,}\,}{2T_1} \, d\eta \\ & & \quad \times \int_I \left| U(x,\,\xi)-U(x_1,\,\xi) \right| \sqrt{ \, \frac{f(T_1,\, \xi)}{\, \xi^2+f(T_1,\, \xi)\,} \,} \tanh \frac{\,\sqrt{\, \xi^2+f(T_1,\, \xi)\,}\,}{2T_1} \, d\xi \\ &\leq& 2 \int_I \, U_2 \, \sqrt{ \, \frac{\Delta_2(T_1)^2}{\, \eta^2+\Delta_2(T_1)^2\,} \,} \tanh \frac{\,\sqrt{\,\eta^2+\Delta_2(T_1)^2 \,}\,}{2T_1} \, d\eta \\ & & \quad \times \int_I \, \left| U(x,\,\xi)-U(x_1,\,\xi) \right| \, \sqrt{ \, \frac{\Delta_2(T_1)^2}{\, \eta^2+\Delta_2(T_1)^2\,} \,} \tanh \frac{\,\sqrt{\,\eta^2+\Delta_2(T_1)^2 \,}\,}{2T_1} \, d\xi \\ &\leq& 2 \frac{\, \Delta_2(0)^2 \,}{U_2} \, \varepsilon_1, \end{eqnarray*} where $|x-x_1|<\delta_2$. \\ \textit{Step 3}. \ Steps 1 and 2 thus imply \[ | Af(T,\, x)-Af(T_1,\, x_1) | \leq \left( 2 \, \frac{ \, U_2^2 \,}{U_1^2} \sqrt{a}+ 2 \, \frac{ \, U_2 \, \Delta_2(0)\,}{\varepsilon}+1+ 2 \frac{\, \Delta_2(0)^2 \,}{U_2} \right) \, \varepsilon_1, \] where $|T-T_1|+|x-x_1|<\min (\delta, \, \delta_1, \delta_2)$. Therefore, $Af$ is continuous on $D$. \end{proof} \begin{lemma} \ Let $f \in W$. \\ \rm{(1)} \ $Af$ is partially differentiable with respect to both $T$ and $x$. Its first-order partial derivatives $(Af)_T$ and $(Af)_x$ are both continuous on $D$. Therefore, $Af \in C^1(D)$. \\ \rm{(2)} \ $Af$ is twice partially differentiable with respect to both $T$ and $x$. Its second-order partial derivatives $(Af)_{TT}$, $(Af)_{Tx}=(Af)_{xT}$ and $(Af)_{xx}$ are all continuous on $D$. Therefore, $Af \in C^2(D)$. \end{lemma} \begin{proof} (1) \ Let us show that $Af$ is partially differentiable with respect to $T$ at $(T_c,\,x_0) \in D$. Note that $Af(T_c,\,x_0)=0$. Let $T<T_c$. It follows from $f(T_c,\, \xi)=0$ (see \eqref{eqn:w}) that \[ f(T,\, \xi)=f(T_c,\, \xi)+(T-T_c)f_T(T_1,\, \xi)=(T_c-T)\left( -f_T(T_1,\, \xi) \right) \] for some $T_1$ \ $(T<T_1<T_c)$. Then \begin{eqnarray*} \frac{\, Af(T_c,\,x_0)-Af(T,\,x_0) \,}{T_c-T} &=& -\left( \int_I U(x_0,\,\xi) \sqrt{ \, \frac{f(T,\, \xi)/(T_c-T)}{\, \xi^2+f(T,\, \xi)\,} \,} \, \tanh \frac{\,\sqrt{\,\xi^2+f(T,\, \xi)\,}\,}{2T}\, d\xi \right)^2 \\ &=& -\left( \int_I U(x_0,\,\xi) \sqrt{ \, \frac{-f_T(T_1,\, \xi)}{\, \xi^2+f(T,\, \xi)\,} \,} \, \tanh \frac{\,\sqrt{\,\xi^2+f(T,\, \xi)\,}\,}{2T}\, d\xi \right)^2. \end{eqnarray*} Since $T$ is in a neighborhood of $T_c$, we let $T \geq T_c/2$. Therefore, \[ \sqrt{ \, \frac{-f_T(T_1,\, \xi)}{\, \xi^2+f(T,\, \xi)\,} \,} \, \tanh \frac{\,\sqrt{\,\xi^2+f(T,\, \xi)\,}\,}{2T} \leq \frac{ \, \sqrt{ M_T} \, }{\xi} \, \tanh \frac{\, \xi \,}{T_c}, \] where the right side is independent of $T$ and is Lebesgue integrable on $I$. Thus, as $T \uparrow T_c$, \[ \frac{\, Af(T_c,\,x_0)-Af(T,\,x_0) \,}{T_c-T} \to -\left( \int_I U(x_0,\,\xi) \frac{ \, \sqrt{ -f_T(T_c,\, \xi) } \, }{\xi} \, \tanh \frac{\, \xi \,}{\, 2T_c \,} \, d\xi \right)^2. \] Therefore, $Af$ is partially differentiable with respect to $T$ at $(T_c,\,x_0)$, and \begin{equation}\label{eqn:afttc} (Af)_T(T_c,\,x_0)=-\left( \int_I U(x_0,\,\xi) \frac{ \, \sqrt{ -f_T(T_c,\, \xi) } \, }{\xi} \, \tanh \frac{\, \xi \,}{\, 2T_c \,} \, d\xi \right)^2. \end{equation} We next show that $(Af)_T$ is continuous at $(T_c,\,x_0)$. Here, \begin{eqnarray}\label{eqn:afT} (Af)_T(T,\,x) &=& \int_I U(x,\,\eta) \sqrt{ \, \frac{f(T,\, \eta)}{\, \eta^2+f(T,\, \eta)\,} \,} \, \tanh \frac{\,\sqrt{\,\eta^2+f(T,\, \eta)\,}\,}{2T}\, d\eta \\ \nonumber & & \quad \times \int_I U(x,\,\xi) \left( J_1+J_2+J_3 \right) \, d\xi, \end{eqnarray} where \begin{eqnarray*} J_1 &=& \frac{\, f_T(T,\,\xi) \,}{\, \sqrt{ f(T,\,\xi)} \,} \, \frac{\xi^2}{\, \left\{ \, \xi^2+f(T,\, \xi) \, \right\}^{3/2}\, } \tanh \frac{ \, \sqrt{ \, \xi^2+f(T,\, \xi) \, } \,}{2T}, \nonumber \\ J_2 &=& \frac{\sqrt{ f(T,\, \xi) } \, f_T(T,\, \xi)}{\, 2T \left\{ \, \xi^2+f(T,\, \xi) \, \right\} \cosh^2 \frac{ \, \sqrt{ \, \xi^2+f(T,\, \xi) \, } \, }{2T} \,}, \nonumber \\ J_3 &=& -\frac{ \sqrt{ f(T,\, \xi) } }{\, T^2 \cosh^2 \frac{ \, \sqrt{ \, \xi^2+f(T,\, \xi) \, } \, }{2T} \,}. \nonumber \end{eqnarray*} Note that \begin{eqnarray}\label{eqn:AfT} (Af)_T(T,\,x)-(Af)_T(T_c,\,x_0) &=& (Af)_T(T,\,x)-(Af)_T(T_c,\,x) \\ \nonumber & & \quad + (Af)_T(T_c,\,x)-(Af)_T(T_c,\,x_0). \end{eqnarray} In order to show that $(Af)_T(T,\,x) \to (Af)_T(T_c,\,x)$ as $T \uparrow T_c$, we show that as $T \uparrow T_c$, \begin{eqnarray*} \int_I U(x,\,\eta) \sqrt{ \, \frac{f(T,\, \eta)}{\, \eta^2+f(T,\, \eta)\,} \,} \, \tanh \frac{\,\sqrt{\,\eta^2+f(T,\, \eta)\,}\,}{2T}\, d\eta \int_I U(x,\,\xi) \, J_1 \, d\xi &\to& (Af)_T(T_c,\,x), \\ \int_I U(x,\,\eta) \sqrt{ \, \frac{f(T,\, \eta)}{\, \eta^2+f(T,\, \eta)\,} \,} \, \tanh \frac{\,\sqrt{\,\eta^2+f(T,\, \eta)\,}\,}{2T}\, d\eta \int_I U(x,\,\xi) \, (J_2+J_3) \, d\xi &\to& 0. \end{eqnarray*} A straightforward calculation gives \[ U(x,\,\eta) \sqrt{ \, \frac{f(T,\, \eta)}{\, \eta^2+f(T,\, \eta)\,} \,} \, \tanh \frac{\,\sqrt{\,\eta^2+f(T,\, \eta)\,}\,}{2T} \; U(x,\,\xi) \, J_1 \leq U_2^2 \, \sqrt{a} \left( \frac{1}{\, \eta \,} \tanh \frac{\, \eta \,}{T_c} \right)^2 M_T. \] Here, we assumed $T \geq T_c/2$. The right side of this inequality is independent of $T$ and is Lebesgue integrable on $I^2$, and so \ (as $T \uparrow T_c$) \[ \int_I U(x,\,\eta) \sqrt{ \, \frac{f(T,\, \eta)}{\, \eta^2+f(T,\, \eta)\,} \,} \, \tanh \frac{\,\sqrt{\,\eta^2+f(T,\, \eta)\,}\,}{2T}\, d\eta \int_I U(x,\,\xi) \, J_1 \, d\xi \to (Af)_T(T_c,\,x). \] Similarly we can show that \[ \int_I U(x,\,\eta) \sqrt{ \, \frac{f(T,\, \eta)}{\, \eta^2+f(T,\, \eta)\,} \,} \, \tanh \frac{\,\sqrt{\,\eta^2+f(T,\, \eta)\,}\,}{2T}\, d\eta \int_I U(x,\,\xi) \, (J_2+J_3) \, d\xi \to 0 \] as $T \uparrow T_c$. Moreover, we have similarly that \[ (Af)_T(T_c,\,x) \to (Af)_T(T_c,\,x_0) \] as $x \to x_0$. It thus follows from \eqref{eqn:AfT} that $(Af)_T$ is continuous at $(T_c,\,x_0)$. Similarly we can show the rest of (1), and (2). Note that $(Af)_{TT}$ is given as follows. \begin{eqnarray*} \quad (Af)_{TT}(T,\,x) &=& \frac{1}{\, 2 \,} \left\{ \int_I U(x,\,\eta) \, \left( J_1+J_2+J_3 \right) \, d\eta \right\}^2 \\ & & +\int_I U(x,\,\eta) \sqrt{ \, \frac{f(T,\, \eta)}{\, \eta^2+f(T,\, \eta)\,} \,} \, \tanh \frac{\,\sqrt{\,\eta^2+f(T,\, \eta)\,}\,}{2T}\, d\eta \nonumber \\ & & \times \int_I U(x,\,\xi) \left\{ K_1+ \frac{1}{\, \cosh^2 \frac{ \, \sqrt{ \, \xi^2+f(T,\, \xi) \, } \, }{2T} \,} \left( K_2+K_3+K_4+K_5 \right) \right\} \, d\xi, \nonumber \end{eqnarray*} where \begin{eqnarray*} K_1 &=& \left\{ \frac{\, f_{TT}(T,\,\xi) \,}{\, \sqrt{ f(T,\,\xi)} \,} -\frac{\, f_T(T,\,\xi)^2 \,}{\, 2\sqrt{ f(T,\,\xi)}^3 \,} -\frac{\, 3 f_T(T,\,\xi)^2 \,}{\, 2\sqrt{ f(T,\,\xi)} \left( \, \xi^2+f(T,\, \xi) \, \right) \,} \right\} \\ & & \qquad \times \frac{\xi^2}{\, \left\{ \, \xi^2+f(T,\, \xi) \, \right\}^{3/2}\, } \tanh \frac{ \, \sqrt{ \, \xi^2+f(T,\, \xi) \, } \,}{2T}, \\ K_2 &=& \frac{\, f_T(T,\,\xi) \,}{\, 2\sqrt{ f(T,\,\xi)} \,} \frac{\xi^2}{\, \xi^2+f(T,\, \xi) \, } \left\{ \frac{\, f_T(T,\,\xi) \,}{\, 2T \, ( \xi^2+f(T,\, \xi) ) \,} -\frac{1}{\, T^2 \,} \right\}, \\ K_3 &=& \frac{\, f_T(T,\,\xi) \,}{\, 2\sqrt{ f(T,\,\xi)} \,} \left\{ \frac{\, f_T(T,\,\xi) \,}{\, 2T \, ( \xi^2+f(T,\, \xi) ) \,} -\frac{1}{\, T^2 \,} \right\}, \\ K_4 &=& \sqrt{ f(T,\,\xi) \,} \left\{ \frac{\, f_{TT}(T,\,\xi) \,}{\, 2T \, ( \xi^2+f(T,\, \xi) ) \,} -\frac{\, f_T(T,\,\xi) \,}{\, 2T^2 \, ( \xi^2+f(T,\, \xi) ) \,} -\frac{\, (f_T(T,\,\xi))^2 \,}{\, 2T \, ( \xi^2+f(T,\, \xi) )^2 \,} +\frac{2}{\, T^3 \,} \right\}, \\ K_5 &=& -\sqrt{ f(T,\,\xi) \,} \, \sqrt{ \, \xi^2+f(T,\, \xi) \, } \left\{ \frac{\, f_T(T,\,\xi) \,}{\, 2T \, ( \xi^2+f(T,\, \xi) ) \,} -\frac{1}{\, T^2 \,} \right\}^2 \tanh \frac{ \, \sqrt{ \, \xi^2+f(T,\, \xi) \, } \,}{2T}. \end{eqnarray*} \end{proof} A proof similar to that of \cite[Lemma 3.4]{watanabe-one} gives the following. \begin{lemma}\label{lm:delta} Let $f \in W$. Then $\Delta_1(T)^2 \leq Af(T,\,x) \leq \Delta_2(T)^2$ at each $(T,\,x) \in D$. \end{lemma} \begin{lemma} Let $f \in W$. Then $Af(T_c,\, x)=0$ \ $(x \in I)$, and \[ \frac{Af(T,\,x)}{\,Af(T,\,x_1) \,} \leq a. \] \end{lemma} \begin{proof} \ By \eqref{eqn:w}, \[ Af(T_c,\,x)=\left( \int_I U(x,\,\xi) \sqrt{ \, \frac{f(T_c,\, \xi)}{\, \xi^2+f(T_c,\, \xi)\,} \,} \, \tanh \frac{\,\sqrt{\,\xi^2+f(T_c,\, \xi)\,}\,}{2T_c}\, d\xi \right)^2=0. \] Next, it follows from \eqref{eqn:ourop} that at $T \in [0,\, T_c)$, \[ \frac{Af(T,\,x)}{\,Af(T,\,x_1) \,} = \left( \frac{U(x,\, \xi_1)}{\, U(x_1,\, \xi_2) \,} \right)^2 \leq \left( \frac{ \displaystyle{ \max_{(x,\,\xi) \in I^2} U(x,\,\xi) }}{\, \displaystyle{ \min_{(x,\,\xi) \in I^2} U(x,\,\xi) } \,} \right)^2 = a, \] where $\xi_1, \, \xi_2 \in I$. It then follows from Remark \ref{rmk:ffa} and \eqref{eqn:afttc} that at $T=T_c$, \[ \frac{Af(T_c,\, x)}{\,Af(T_c,\, x_1) \,}= \frac{(Af)_T(T_c,\, x)}{\, (Af)_T(T_c,\, x_1) \,} \leq \left( \frac{ \displaystyle{ \max_{(x,\,\xi) \in I^2} U(x,\,\xi) }}{\, \displaystyle{ \min_{(x,\,\xi) \in I^2} U(x,\,\xi) } \,} \right)^2 =a. \] The result follows. \end{proof} \begin{lemma} \ For $f \in W$, \quad $\displaystyle{ -(Af)_T(T,\,x)>0. }$ \end{lemma} \begin{proof} It follows immediately from \eqref{eqn:afT} that $-(Af)_T(T,\,x)>0$.\end{proof} \begin{lemma} \ For $f \in W$, \[ \sup_{f \in W} \left[ \max_{(T,\, x) \in D} \left\{ -(Af)_T(T,\,x) \right\} \right] \leq \sup_{f \in W} \left[ \max_{(T,\, x) \in D} \left\{ -f_T(T,\,x) \right\} \right] \; \big(=M_T \big). \] \end{lemma} \begin{proof} From \eqref{eqn:afT} it follows that \[ -(Af)_T(T,\,x) \leq \sqrt{a}\, A \left\{ B\sup_{f \in W} \left[ \max_{(T,\, x) \in D} \left\{ -f_T(T,\,x) \right\} \right]+C \right\}, \] where \begin{eqnarray*} A &=& \int_I \frac{ \, U(x,\,\eta) \,}{\, \sqrt{ \, \eta^2+f(T,\, \eta)\,} \,} \, \tanh \frac{\,\sqrt{\,\eta^2+f(T,\, \eta)\,}\,}{2T}\, d\eta, \\ B &=& \int_I \frac{ \, U(x,\,\xi) \,}{\, \sqrt{ \, \xi^2+f(T,\, \xi)\,} \,} \left\{ \frac{\xi^2}{\, \xi^2+f(T,\, \xi) \, }\, \tanh \frac{\,\sqrt{\,\xi^2+f(T,\, \xi)\,}\,}{2T}\right. \\ & & \qquad \left. +\frac{f(T,\, \xi)}{\, \xi^2+f(T,\, \xi) \, }\, \frac{\sqrt{ \, \xi^2+f(T,\, \xi)\,}}{\, 2T \,\cosh^2 \frac{\,\sqrt{\,\xi^2+f(T,\, \xi)\,}\,}{2T} \,} \right\}\, d\xi, \\ C &=& \int_I U(x,\,\xi) \, \frac{ \, f(T,\, \xi) \,}{\, T^2 \, \cosh^2 \frac{\,\sqrt{\,\xi^2+f(T,\, \xi)\,}\,}{2T}\, }\, d\xi. \end{eqnarray*} Note that the function $\displaystyle{ z \mapsto \frac{\,\tanh z \,}{z} }$ is strictly decreasing at $z>0$. It then follows from \eqref{eqn:condition-two} that \begin{equation}\label{eqn:aaone} a^{1/4}A \leq a^{1/4} \max_{(T,\, x) \in D} \left[ \, \int_I \frac{\, U(x,\, \eta) \,}{\eta} \, \tanh \frac{\eta}{\, 2T\,}\,d\eta \, \right] \leq 1. \end{equation} First let $T<T_c$. Then $B<A$ since $\displaystyle{ \tanh z>\frac{z}{\cosh z} }$ at $z>0$, $f(T,\, \xi)>0$ and $\xi \geq \varepsilon$. Therefore, $\sqrt{a}\, AB<( a^{1/4}A)^2\leq 1$ by \eqref{eqn:aaone}. Thus \[ -(Af)_T(T,\,x) \leq \sqrt{a}\, A \left\{ B\sup_{f \in W} \left[ \max_{(T,\, x) \in D} \left\{ -f_T(T,\,x) \right\} \right]+C \right\} \leq \sup_{f \in W} \left[ \max_{(T,\, x) \in D} \left\{ -f_T(T,\,x) \right\} \right] \] as long as \[ \sup_{f \in W} \left[ \max_{(T,\, x) \in D} \left\{ -f_T(T,\,x) \right\} \right] \geq \frac{\sqrt{a} \, AC}{\, 1-\sqrt{a}\, AB \,}. \] This inequality holds true since (see the definition of $M_T$ in \eqref{eqn:w}) \[ \frac{\sqrt{a} \, AC}{\, 1-\sqrt{a}\, AB \,} = \frac{\sqrt{a} \, AC}{\, 1-\sqrt{a}\, A(A-B') \,} \leq \frac{\sqrt{a} \, AC}{\, \sqrt{a}\, AB' \,}=\frac{C}{\,B' \,} \leq M_T = \sup_{f \in W} \left[ \max_{(T,\, x) \in D} \left\{ -f_T(T,\,x) \right\} \right]. \] Here, \[ B'=\int_I \frac{ \, U(x,\,\xi) \,}{\, \sqrt{ \, \xi^2+f(T,\, \xi)\,} \,} \frac{f(T,\, \xi)}{\, \xi^2+f(T,\, \xi) \, }\left\{ \tanh \frac{\,\sqrt{\,\xi^2+f(T,\, \xi)\,}\,}{2T}-\frac{\sqrt{ \, \xi^2+f(T,\, \xi)\,}}{\, 2T \,\cosh^2 \frac{\,\sqrt{\,\xi^2+f(T,\, \xi)\,}\,}{2T} \,} \right\}\, d\xi. \] Note also that $1-\sqrt{a}\, A^2 \geq 0$. Thus, at $T<T_c$, \[ -(Af)_T(T,\,x) \leq \sup_{f \in W} \left[ \max_{(T,\, x) \in D} \left\{ -f_T(T,\,x) \right\} \right]. \] Next let $T=T_c$. Then \begin{eqnarray*} & &-(Af)_T(T_c,\,x)=\left( \int_I U(x,\,\xi)\frac{\, \sqrt{ -f_T(T_c,\,\xi)} \,}{\, \xi \,} \, \tanh \frac{\, \xi \,}{2T_c}\, d\xi \right)^2 \\ &\leq& \sqrt{a}\left( \int_I U(x,\,\xi)\frac{\, \sqrt{ -f_T(T_c,\,\xi)} \,}{\, \xi \,} \, \tanh \frac{\, \xi \,}{2T_c}\, d\xi \right)^2 \\ &\leq& \sqrt{a}\left( \int_I U(x,\,\xi)\frac{\, 1 \,}{\, \xi \,} \, \tanh \frac{\, \xi \,}{2T_c}\, d\xi \right)^2 \sup_{f \in W} \left[ \max_{(T,\, x) \in D} \left\{ -f_T(T,\,x) \right\} \right] \\ &\leq& \sup_{f \in W} \left[ \max_{(T,\, x) \in D} \left\{ -f_T(T,\,x) \right\} \right]. \end{eqnarray*} This is because at $T=T_c$, \[ \sqrt{a}\, A^2=\left( a^{1/4}\, \int_I \frac{\, U(x,\,\eta) \,}{\, \eta \,} \, \tanh \frac{\, \eta \,}{2T_c}\, d\eta \right)^2 \leq 1 \] by \eqref{eqn:aaone}. Thus \[ \sup_{f \in W} \left[ \max_{(T,\, x) \in D} \left\{ -(Af)_T(T,\,x) \right\} \right] \leq \sup_{f \in W} \left[ \max_{(T,\, x) \in D} \left\{ -f_T(T,\,x) \right\} \right] =M_T. \] \end{proof} \begin{remark}\label{rmk:constantpotential} Let $U(x,\,\xi)=U_1=U_2$ at all $(x,\, \xi) \in I^2$. Then, $a=1$ and $f(T,\, x)=\Delta_1(T)^2=\Delta_2(T)^2$. Moreover, $T_c=\tau_1=\tau_2$ and \[ a^{1/4}A=\int_I \frac{U_2}{\,\sqrt{\,\xi^2+\Delta_2(T)^2\,}\,} \, \tanh \frac{\, \sqrt{\,\xi^2+\Delta_2(T)^2\,}\,}{2T}\,d\xi=1 \] at all $(T,\, x) \in [0,\, T_c] \times I$ (see \eqref{eqn:delta2}). Therefore, the preceding lemma holds true not only when the potential $U(\cdot,\,\cdot)$ in the BCS-Bogoliubov gap equation \eqref{eqn:bcseq} is a positive constant, but also when $U(\cdot,\,\cdot)$ is a function. \end{remark} \begin{lemma} The set $AW$ is equicontinuous. \end{lemma} \begin{proof} Let $f \in W$. Let $(T,\, x), \, (T_1,\, x_1) \in D$ and suppose $T<T_1<T_c$. We can deal with the case where $T_1=T_c$ similarly. Then \[ | Af(T,\, x)-Af(T_1,\, x_1) | \leq | Af(T,\, x)-Af(T_1,\, x) |+| Af(T_1,\, x)-Af(T_1,\, x_1) |. \] The preceding lemma gives \[ | f(T,\, \xi)-f(T_1,\, \xi) | = \left| f_T(T_2, \, \xi) \right| \cdot \left| T-T_1 \right| \leq M_T | T-T_1 |. \] Here, $T<T_2<T_1$ and $\xi \in I$. Therefore, a proof similar to that of Lemm \ref{lem:equicon} gives \begin{eqnarray*} | Af(T,\, x)-Af(T_1,\, x_1) | &\leq& \left\{ \left( 2 \, \frac{ \, U_2^2 \,}{U_1^2} \sqrt{a}+ 2 \, \frac{ \, U_2 \, \Delta_2(0)\,}{\varepsilon} \right) M_T \right. \\ & & + \; 4 \, U_2 \, \Delta_2(0) \left( \max_{z \geq 0} \frac{z}{\, \cosh z \,} \right)^2 \, \ln \varepsilon \\ & & \left.+2 \; \frac{\, \Delta_2(0)^2 \,}{U_2}\, \max_{(x,\, \xi) \in I^2} \left\{ U_x(x,\,\xi) \right\}\right\}\, \left( | T-T_1 | +| x-x_1 | \right), \end{eqnarray*} from which the result follows. \end{proof} Since $Af(T,\, x)\leq \Delta_2(T)^2 \leq \Delta_2(0)^2$ for $f \in W$ (see Lemma \ref{lm:delta}), the set $AW$ is uniformly bounded. Moreover, $AW$ is equicontinuous by the preceding lemma. We thus have the following. \begin{lemma}\label{lm:setaw} \ $\displaystyle{ A: \, W \to W}$, and the set $AW$ is relatively compact. \end{lemma} \begin{lemma}\label{lm:acontinuous} The operator $A:\, W \to W$ is continuous. \end{lemma} \begin{proof} Let $T<T_c$. Then, for $f, \, g \in W$, \[ Af(T,\, x)-Ag(T,\, x)=\int_I U(x,\,\eta) \, L_1 \, d\eta \, \int_I U(x,\,\xi) \left\{ L_2+L_3 \right\} \, d\xi, \] where \begin{eqnarray*} L_1 &=& \sqrt{ \, \frac{f(T,\, \eta)}{\, \eta^2+f(T,\, \eta)\,} \,} \tanh \frac{\,\sqrt{\,\eta^2+f(T,\, \eta)\,}\,}{2T} + \sqrt{ \, \frac{g(T,\, \eta)}{\, \eta^2+g(T,\, \eta)\,} \,} \tanh \frac{\,\sqrt{\,\eta^2+g(T,\, \eta)\,}\,}{2T}, \\ L_2 &=& \frac{f(T,\, \xi)-g(T,\, \xi)}{ \, \sqrt{ f(T,\, \xi) }+\sqrt{ g(T,\, \xi) } \, } \frac{1}{\, \sqrt{ \xi^2+f(T,\, \xi) \,} \,} \tanh \frac{\,\sqrt{\,\xi^2+f(T,\, \xi)\,}\,}{2T},\\ L_3 &=& \sqrt{ g(T,\, \xi) } \left\{ \frac{1}{\, \sqrt{ \xi^2+f(T,\, \xi) \,} \,} \tanh \frac{\,\sqrt{\,\xi^2+f(T,\, \xi)\,}\,}{2T} \right. \\ & & \left. \qquad \qquad \qquad \qquad -\frac{1}{\, \sqrt{ \xi^2+g(T,\, \xi) \,} \,} \tanh \frac{\,\sqrt{\,\xi^2+g(T,\, \xi)\,}\,}{2T} \right\}. \end{eqnarray*} Since $f(T,\, \eta)/ f(T,\, \xi) \leq a$ and $g(T,\, \eta)/ g(T,\, \xi)\leq a$ by \eqref{eqn:w}, it follows \begin{eqnarray*} & &\left| \int_I U(x,\,\eta) \, L_1 \, d\eta \times \int_I U(x,\,\xi) \, L_2 \, d\xi \right| \\ &\leq& 2 \, \frac{ \, U_2^2 \,}{U_1^2} \sqrt{a} \left\{ \int_I \frac{U_1}{\, \sqrt{ \eta^2+\Delta_1(T)^2 \,} \,} \tanh \frac{\,\sqrt{\,\eta^2+\Delta_1(T)^2 \,}\,}{2T} \,d\eta \right\}^2 \| f-g \| \\ &\leq& 2 \, \frac{ \, U_2^2 \,}{U_1^2} \sqrt{a} \, \| f-g \|. \end{eqnarray*} Moreover, \begin{eqnarray*} & &\left| \int_I U(x,\,\eta) \, L_1 \, d\eta \times \int_I U(x,\,\xi) \, L_3 \, d\xi \right| \\ &\leq& 2 \, \frac{ \, U_2^2 \, \Delta_2(0)^2 \,}{U_1} \int_I \frac{U_1}{\, \sqrt{ \eta^2+\Delta_1(T)^2 \,} \,} \tanh \frac{\,\sqrt{\,\eta^2+\Delta_1(T)^2 \,}\,}{2T} \,d\eta \, \int_I \frac{1}{\, \xi^3 \,} \,d\xi \, \| f-g \| \\ &\leq& \frac{ \, U_2^2 \, \Delta_2(0)^2 \,}{U_1 \, \varepsilon^2} \, \| f-g \|. \end{eqnarray*} Therefore, at $T<T_c$, \[ | Af(T,\, x)-Ag(T,\, x) | \leq \left( 2 \, \frac{ \, U_2^2 \,}{U_1^2} \sqrt{a}+ \frac{ \, U_2^2 \, \Delta_2(0)^2 \,}{U_1 \, \varepsilon^2} \right) \, \| f-g \|. \] Since $Af(T_c,\, x)=Ag(T_c,\, x)=0$, this inequality holds true also at $T=T_c$. Thus \[ \| Af-Ag \| \leq \left( 2 \, \frac{ \, U_2^2 \,}{U_1^2} \sqrt{a}+ \frac{ \, U_2^2 \, \Delta_2(0)^2 \,}{U_1 \, \varepsilon^2} \right) \, \| f-g \|. \] The result follows. \end{proof} We next extend the domain $W$ of our operator $A$ to its closure $\overline{W}$ with respect to the norm $\| \cdot \|$ of the Banach space $C(D)$. \begin{lemma}\label{lm:overlineW} \quad $\displaystyle{ A: \, \overline{W} \to \overline{W}}$. \end{lemma} \begin{proof} For $f \in \overline{W}$, there is a sequence $\{ f_n \}_{n=1}^{\infty} \subset W$ satisfying $\| f-f_n \| \to 0$ as $n \to \infty$. By the preceding lemma, \[ \| Af_n-Af_m \| \leq \left( 2 \, \frac{ \, U_2^2 \,}{U_1^2} \sqrt{a}+ \frac{ \, U_2^2 \, \Delta_2(0)^2 \,}{U_1 \, \varepsilon^2} \right) \, \| f_n-f_m \|. \] Therefore, the sequence $\{ Af_n \}_{n=1}^{\infty} \subset W$ is a Cauchy sequence. Hence there is an element $F \in \overline{W}$ satisfying $\| F-Af_n \| \to 0$ as $n \to \infty$. Note that the element $F$ does not depend on how to choose the sequence $\{ f_n \}_{n=1}^{\infty} \subset W$, as shown below. Suppose that there is another sequence $\{ g_n \}_{n=1}^{\infty} \subset W$ satisfying $\| f-g_n \| \to 0$ as $n \to \infty$. Similarly, the sequence $\{ Ag_n \}_{n=1}^{\infty} \subset W$ becomes a Cauchy sequence, and hence there is an element $G \in \overline{W}$ satisfying $\| G-Ag_n \| \to 0$ as $n \to \infty$. Then \[ \| F-G \| \leq \| F-Af_n \|+\| Af_n-Ag_n \|+\| Ag_n-G \| \to 0 \] as $n \to \infty$. Therefore, $F=G$, and hence $F$ does not depend on how to choose the sequence in $W$. Thus we define $F=Af$. The result thus follows. \end{proof} \begin{lemma} For $f \in \overline{W}$, \[ Af(T,\,x)=\left( \int_I U(x,\,\xi) \sqrt{ \, \frac{f(T,\, \xi)}{\, \xi^2+f(T,\, \xi)\,} \,} \, \tanh \frac{\,\sqrt{\,\xi^2+f(T,\, \xi)\,}\,}{2T}\, d\xi \right)^2. \] \end{lemma} \begin{proof} \ For $f \in \overline{W}$, there is a sequence $\{ f_n \}_{n=1}^{\infty} \subset W$ satisfying $\| f-f_n \| \to 0$ as $n \to \infty$. Since $f$ is Lebesgue integrable on $I$, we set \[ H(T,\, x)=\left( \int_I U(x,\,\xi) \sqrt{ \, \frac{f(T,\, \xi)}{\, \xi^2+f(T,\, \xi)\,} \,} \, \tanh \frac{\,\sqrt{\,\xi^2+f(T,\, \xi)\,}\,}{2T}\, d\xi \right)^2 \] at all $(T,\, x) \in D$. Then \begin{eqnarray*} \left| Af(T,\,x)-H(T,\, x) \right| &\leq& \left| Af(T,\,x)-Af_n(T,\, x) \right|+\left| Af_n(T,\,x)-H(T,\, x) \right| \\ &\leq& \left\| Af-Af_n \right\|+\left| Af_n(T,\,x)-H(T,\, x) \right|. \end{eqnarray*} By the proof of Lemma \ref{lm:overlineW}, \[ \left\| Af-Af_n \right\| \to 0 \] as $n \to \infty$. On the other hand, a proof similar to that of Lemma \ref{lm:acontinuous} gives \[ \left| Af_n(T,\,x)-H(T,\, x) \right| \leq \left( 2 \, \frac{ \, U_2^2 \,}{U_1^2} \sqrt{a}+ \frac{ \, U_2^2 \, \Delta_2(0)^2 \,}{U_1 \, \varepsilon^2} \right) \, \| f_n-f \| \to 0 \] as $n \to \infty$. The result thus follows. \end{proof} A straightforward calculation gives the following. \begin{lemma}\label{lm:rlc} \quad $\displaystyle{ A: \, \overline{W} \to \overline{W}}$ is continuous. Moreover, the set $A\overline{W}$ is uniformly bounded and equicontinuous, and hence the set $A\overline{W}$ is relatively compact. \end{lemma} Lemma \ref{lm:rlc} immediately implies the following. \begin{lemma}\label{lm:acompact} The operator $A:\, \overline{W} \to \overline{W}$ is compact. Therefore, the operator $A:\, \overline{W} \to \overline{W}$ has a unique fixed point $f_0 \in \overline{W}$, i.e., $\displaystyle{ f_0=Af_0 }$. \end{lemma} \begin{proof} Applying the Schauder fixed-point theorem gives that the operator $A:\, \overline{W} \to \overline{W}$ has at least one fixed point $f_0 \in \overline{W}$. A proof similar to that of \cite[Lemma 3.10]{watanabe-one} gives the uniqueness of $f_0 \in \overline{W}$. \end{proof} Our proof of Theorem \ref{thm:main} is now complete. \bigskip In order to give a proof of Theorem \ref{thm:maintwo}, we need to deal with the thermodynamic potential $\Omega$ and differentiate it with respect to the temperature $T$ twice, as mentioned before. Note that the thermodynamic potential $\Omega$ has the fixed point $f_0 \in \overline{W}$ given by Theorem \ref{thm:main} in its form, not the solution $\sqrt{ f_0 }$ to the BCS-Bogoliubov gap equation. Suppose that the fixed point $f_0$ is an element of the subset $W$. It then follows immediately from Theorem \ref{thm:main} that $f_0 \in C^2(D)$. Hence the thermodynamic potential $\Omega$ with the fixed point $f_0$ satisfies all the conditions in the operator-theoretical definition of the second-order phase transition (see \cite[Definition 1.10]{watanabe-five}). We thus apply a proof similar to that of \cite[Theorem 2.4]{watanabe-five} to have Theorem \ref{thm:maintwo}. Suppose that the fixed point $f_0$ is an accumulating point of the subset $W$. We then replace the fixed point $f_0 \in \overline{W} \setminus W$ in the form of the thermodynamic potential $\Omega$ by a suitably chosen element of $f \in W$ since the fixed point $f_0$ is an accumulating point of the subset $W$. Thanks to Theorem \ref{thm:main}, we find that the suitably chosen element $f$ is in $C^2(D)$. Then we can differentiate the suitably chosen element $f$ with respect to the temperature $T$ twice. Therefore, once we replace the fixed point $f_0 \in \overline{W} \setminus W$ in the form of the thermodynamic potential $\Omega$ by a suitably chosen element of $f \in W$, we can again show that the thermodynamic potential $\Omega$ with this $f \in W$ satisfies all the conditions in the operator-theoretical definition of the second-order phase transition. We can again apply a proof similar to that of \cite[Theorem 2.4]{watanabe-five} to have Theorem \ref{thm:maintwo}. This proves Theorem \ref{thm:maintwo}. \begin{thebibliography}{99} \bibitem{watanabe-seven} Watanabe, S. An operator-theoretical study of the specific heat and the critical magnetic field in the BCS-Bogoliubov model of superconductivity. \textit{Scientific Reports} \textbf{10}, 9877 (2020). \bibitem{watanabe-five} Watanabe, S. An operator-theoretical proof for the second-order phase transition in the BCS-Bogoliubov model of superconductivity. \textit{Kyushu J. Math.} \textbf{74}, 177-196 (2020). \bibitem{watanabe-eight} Watanabe, S. An operator-theoretical study on the BCS-Bogoliubov model of superconductivity near absolute zero temperature. \textit{Scientific Reports} \textbf{11}, 15983 (2021). \bibitem{bcs} Bardeen, J., Cooper, L.~N. \& Schrieffer, J.~R. Theory of superconductivity. \textit{Phys. Rev.} \textbf {108}, 1175--1204 (1957). \bibitem{bogoliubov} Bogoliubov, N.~N. A new method in the theory of superconductivity I. \textit{Soviet Phys. JETP} \textbf {34}, 41--46 (1958). \bibitem{odeh} Odeh, F. An existence theorem for the BCS integral equation. \textit{IBM J. Res. Develop.} \textbf {8}, 187--188 (1964). \bibitem{billardfano} Billard, P. \& Fano, G. An existence proof for the gap equation in the superconductivity theory. \textit{Commun. Math. Phys.} \textbf{10}, 274--279 (1968). \bibitem{vansevenant} Vansevenant, A. The gap equation in the superconductivity theory. \textit{Physica} \textbf{17D}, 339--344 (1985). \bibitem{bls} Bach, V., Lieb, E.~H. \& Solovej, J.~P. Generalized Hartree-Fock theory and the Hubbard model. \textit{J. Stat. Phys.} \textbf{76}, 3--89 (1994). \bibitem{chen} Chen, T., Fr$\ddot{\hbox{o}}$hlich, J. \& Seifert, M. Renormalization Group Methods: Landau-Fermi Liquid and BCS Superconductor. Proc. of the 1994 Les Houches Summer School. arXiv:cond-mat/9508063. \bibitem{deugeihailoss} Deuchert, A., Geisinger, A., Hainzl, C. \& Loss, M. Persistence of translational symmetry in the BCS model with radial pair interaction. \textit{Ann. Henri. Poincar\'e} \textbf {19}, 1507--1527 (2018). \bibitem{fhns} Frank, R.~L., Hainzl, C., Naboko, S. \& Seiringer, R. The critical temperature for the BCS equation at weak coupling. \textit{J. Geom. Anal.} \textbf{17}, 559--568 (2007). \bibitem{fhss} Frank, R.~L., Hainzl, C., Seiringer, R. \& Solovej, J.~P. The external field dependence of the BCS critical temperature. \textit{Commun. Math. Phys.} \textbf {342}, 189--216 (2016). \bibitem{freijihaizlseiringer} Freiji, A., Hainzl, C. \& Seiringer, R. The gap equation for spin-polarized fermions. \textit{J. Math. Phys.} \textbf {53}, 012101 (2012). \bibitem{hhss} Hainzl, C., Hamza, E., Seiringer, R. \& Solovej, J.~P. The BCS functional for general pair interactions. \textit{Commun. Math. Phys.} \textbf {281}, 349--367 (2008). \bibitem{hainzlloss} Hainzl, C. \& Loss, M. General pairing mechanisms in the BCS-theory of superconductivity. \textit{Eur. Phys. J. B}, 90:82 (2017). \bibitem{haizlseiringer} Hainzl, C. \& Seiringer, R. Critical temperature and energy gap for the BCS equation. \textit{Phys. Rev.} \textbf {B 77}, 184517 (2008). \bibitem{haizlseiringer2} Hainzl, C. \& Seiringer, R. The BCS critical temperature for potentials with negative scattering length. \textit{Lett. Math. Phys.} \textbf {84}, 99--107 (2008). \bibitem{haizlseiringer3} Hainzl, C. \& Seiringer, R. The Bardeen-Cooper-Schrieffer functional of superconductivity and its mathematical properties. \textit{J. Math. Phys.} \textbf {57}, 021101 (2016). \bibitem{watanabe-one} Watanabe, S. The solution to the BCS gap equation and the second-order phase transition in superconductivity. \textit{J. Math. Anal. Appl.} \textbf{383}, 353--364 (2011). \bibitem{watanabe-two} Watanabe, S. Addendum to `The solution to the BCS gap equation and the second-order phase transition in superconductivity'. \textit{J. Math. Anal. Appl.} \textbf{405}, 742--745 (2013). \bibitem{watanabe-four} Watanabe, S. \& Kuriyama, K. Smoothness and monotone decreasingness of the solution to the BCS-Bogoliubov gap equation for superconductivity. \textit{J. Basic and Applied Sciences} \textbf{13}, 17--25 (2017). \bibitem{kuzemsky3} Kuzemsky, A.~L. Statistical Mechanics and the Physics of Many-Particle Model Systems. (World Scientific Publishing Co, 2017). \bibitem{kuzemsky} Kuzemsky, A.~L. Bogoliubov's vision: quasiaverages and broken symmetry to quantum protectorate and emergence. \textit{Internat. J. Mod. Phys.} \textbf{B 24}, 835--935 (2010). \bibitem{kuzemsky2} Kuzemsky, A.~L. Variational principle of Bogoliubov and generalized mean fields in many-particle interacting systems. \textit{Internat. J. Mod. Phys.} \textbf{B 29}, 1530010 (63 pages) (2015). \bibitem{angnem} Anghel, D.-V., \& Nemnes, G.~A. The role of the chemical potential in the BCS theory. \textit{Physica A} \textbf{464}, 74--82 (2016). \bibitem{ang-one} Anghel, D.-V. New phenomenology from an old theory-The BCS theory of superconductivity revisited. \textit{Physica A} \textbf{531}, 121804 (2019). \bibitem{ang-two} Anghel, D.-V. Multiple solutions for the equilibrium populations in BCS superconductors. arXiv:1908.06017v1. \bibitem{kashima-one} Kashima, Y. Higher order phase transitions in the BCS model with imaginary magnetic field. preprint (2021). \bibitem{kashima-two} Kashima, Y. Superconducting phase in the BCS model with imaginary magnetic field. \textit{J. Math. Sci. Univ. Tokyo} \textbf{28}, 1-179 (2021). \bibitem{kashima-three} Kashima, Y. Superconducting phase in the BCS model with imaginary magnetic field. II. Multi-scale infrared analysis. \textit{J. Math. Sci. Univ. Tokyo} \textbf{28}, 181-398 (2021). \bibitem{kashima-four} Kashima, Y. Superconducting phase in the BCS model with imaginary magnetic field. III. Non-vanishing free dispersion relations. \textit{J. Math. Sci. Univ. Tokyo} \textbf{28}, 399-556 (2021). \bibitem{maskawa-nakajima-one} Maskawa, T. \& Nakajima, H. Spontaneous breaking of chiral symmetry in a vector-gluon model. \textit{Prog. Theor. Phys.} \textbf{52}, 1326--1354 (1974). \bibitem{maskawa-nakajima-two} Maskawa, T. \& Nakajima, H. Spontaneous breaking of chiral symmetry in a vector-gluon model II. \textit{Prog. Theor. Phys.} \textbf{54}, 860--877 (1975). \bibitem{watanabe-three} Watanabe, S. An operator-theoretical treatment of the Maskawa-Nakajima equation in the massless abelian gluon model. \textit{J. Math. Anal. Appl.} \textbf{418}, 874--883 (2014). \end{thebibliography} \noindent \textbf{Author contributions} Shuji Watanabe wrote the main manuscript text and reviewed the manuscript. \noindent \textbf{Funding} This work was supported in part by JSPS Grant-in-Aid for Scientific Research (C) KAKENHI Grant Number JP21K03346. \noindent \textbf{Competing interests} The author declares no competing interests. \end{document}
2205.10999v1
http://arxiv.org/abs/2205.10999v1
The Tree-Forest Ratio
\documentclass[12pt]{amsart} \usepackage{latexsym,fancyhdr,amssymb,color,amsmath,amsthm,graphicx,listings,comment} \usepackage[section]{placeins} \pagestyle{fancy} \newtheorem{thm}{Theorem} \newtheorem{lemma}{Lemma} \newtheorem{propo}{Proposition} \setlength{\parindent}{0cm} \let\paragraph\subsection \title{The Tree-Forest Ratio} \fancyhead{} \fancyhead[LO]{\fontsize{9}{9} \selectfont OLIVER KNILL} \fancyhead[LE]{\fontsize{9}{9} \selectfont TREE FOREST RATIO} \renewcommand{\headrulewidth}{0pt} \author{Oliver Knill} \date{May 22, 2022} \address{Department of Mathematics \\ Harvard University \\ Cambridge, MA, 02138 } \subjclass{15A15, 16Kxx, 05C10, 57M15, 68R10, 05E45} \keywords{Trees, Forests, Graphs, Barycentric refinement, Zeta functions, Gap labeling} \begin{document} \maketitle \begin{abstract} The number of rooted spanning forests divided by the number of spanning rooted trees in a graph $G$ with Kirchhoff matrix $K$ is the spectral quantity $\tau(G)={\rm det}(1+K)/{\rm det}(K)$ of $G$ by the matrix tree and matrix forest theorems. We prove that that under Barycentric refinements, the tree index $T(G)=\log({\rm det}(K))/|G|$ and forest index $F(G)=\log({\rm det}(1+K))/|G|$ and so the tree-forest index $i=F-G=\log(\tau(G))/|G|$ converge to numbers that only depend on the size of the maximal clique in the graph. In the one-dimensional case, all numbers are known: $T(G)=0, F(G)=i(G) =2 \log(\phi)$, where $\phi$ is the golden ratio. The convergent proof uses the Barycentral limit theorem assuring the Kirchhoff spectrum converges weakly to a measure $\mu$ on $[0,\infty)$ that only depends on dimension of $G$. Trees and forests indices are potential values $i = U(-1)-U(0)$ for the subharmonic function $U(z)=\int_0^{\infty} \log|x-z| \; d\mu(x)$ defined by the Riesz measure $d\mu=\Delta U$ which only depends on the dimension of $G$. The potential $U(z)$ is defined for all $z$ away from the support of $d\mu$ and finite at $z=0$. Convergence follows from the tail estimate $\mu[x,\infty] \leq C e^{-a_d x}$, where the decay rate $a_d$ only depends on the maximal dimension. With the normalized zeta function $\zeta(s) = \frac{1}{|V(G)|} \sum_k \lambda_k^{-s}$, we have for all finite graphs of maximal dimension $\geq 2$ the identity $i(G) = \sum_{t=1}^{\infty} (-1)^{s+1} \zeta(s)/s$. The limiting zeta function $\zeta(s) = \int_{0}^{\infty} x^{-s} d\mu(x)$ is analytic in $s$ for $s<0$. The Hurwitz spectral zeta function $\zeta_z(s)=U_s(z) = \int_0^{\infty} (x-z)^{-s} \; d\mu(x)$ complements $U(z) = \int_0^{\infty} \log(x-z) \; d\mu(x)$ and is analytic for $z$ in $\mathbb{\mathbb{C}} \setminus \mathbb{R}^+$ and for fixed $z$ in $\mathbb{C} \setminus \mathbb{R}^+$ is an entire function in $s \in \mathbb{CC}$. \end{abstract} \section{In a nutshell} \paragraph{} We define here the {\bf tree forest ratio} $\tau(G)$ of a connected finite simple graph $G=(V,E)$ as the number of rooted spanning forests in $G$ divided by the number of rooted spanning trees in $G$. By the tree and forest matrix theorems, this is $\tau = {\rm Det}(K+1)/{\rm Det}(K)$, where ${\rm Det}$ is the pseudo determinant and $K$ the Kirchhoff Laplacian of $G$. In other words, it is $\tau=\prod_{\lambda \neq 0} (1+1/\lambda)$, where $\lambda$ runs over the non-zero eigenvalues of $K$. An upper bound is $(1+\lambda_2)^{-1})^|V|$, where $\lambda_2$ is the {\bf ground state}, the smallest non-zero eigenvalue of $K$. An expansion of the logarithm and a basic bound on the product gives $\log(1+\zeta(1)) \leq \log(\tau) = \sum_{s=1}^{\infty} (-1)^{s+1} \zeta(s-1)/s \leq \zeta(1)$, where $\zeta(s)$ is the Kirchhoff spectral zeta function of $G$. \paragraph{} The same functional could be considered for other operators like the connection matrix $L$ or the Hodge matrix $H$ of a graph, even so a tree or forest interpretation lacks then. For the connection matrix which has negative eigenvalues in general, we would use $L^2$ to have no ambiguity when defining zeta functions As we have an equivalent zeta function expression in the Kirchhoff case, the function $\tau(G)$ can now make sense even for manifolds $G$ which do not feature spanning trees or spanning forests. The reason is that the Laplacian on the manifold defines a Minakshisundaram - Pleijel zeta function and a Ray-Singer determinant. For $M=\mathbb{T}=\mathbb{R}/\mathbb{Z}$, where $\zeta$ is the {\bf classical Riemann zeta function}, we have $\tau_G = \sinh(\pi)/\pi$. \paragraph{} Potential theory comes in when seeing the logarithm of the {\bf tree number} as a potential value $V(0)=\log({\rm Det}(L))$. Also the logarithm of the {\bf forest number} $V(-1)=\log({\rm Det(1+L)})$ is a {\bf potential value} of the potential $V(z)=\log({\rm Det}(L-z I)) = \int \log(x-z) \; dk(x)$, where $dk=\sum_j \delta_{\lambda_j}$ is a finite pure point measure defined by the eigenvalues of $L$. When normalizing $dk$ to become a probability measure $d\mu$, then $U(z) = \int \log(x-z) \; d\mu(x)$ has a subharmonic real part and $d\mu$ is the {\bf Riesz measure} $d\mu= \Delta U$, a probability measure with support on the positive real axes. The Barycentral limit theorem assures that $d\mu_n$ converges in the Barycentric limit weakly to a measure $d\mu$ which only depends on the maximal dimension of the initial graph. The potential value $U(z)$ is defined for all $z$ away from the support of $d\mu$ and finite at $z=0$ because it is there the limiting growth rate of trees which is boxed in by $0$ and the growth rate $U(-1)$ of forests. In the $1$-dimensional case, it is the arc-sin distribution on $[0,4]$ which is the potential theoretical equilibrium measure on that interval. \begin{figure}[!htpb] \scalebox{0.2}{\includegraphics{figures/potential1d.pdf}} \scalebox{0.2}{\includegraphics{figures/potential2d.pdf}} \label{potential} \caption{ The real and imaginary part of the potential in the 1-dimensional case is seen to the left. The support of the density of states is $[0,4]$. In the 2-dimensional case, where the support of the density of states is non-compact, we see gaps and the support of the spectrum could be a Cantor set. } \end{figure} \paragraph{} A major point we want to make here is to see that the limiting universal measure $d\mu$ has an exponentially decaying {\bf tail distribution} $\mu[x,\infty) \to 0$ as $x \to \infty$. This implies that both the forest and tree numbers converge in the Barycentral limit because the potential values $U(-1)$ and $U(0)$ exist. Since by nature of tree and forest numbers $0 \leq U(0) \leq U(-1)$, the convergence at the bottom of the spectrum $z=0$ is not a problem. The tail estimate also shows that for $z$ away from the positive real axes, the {\bf Hurwitz zeta function} $\zeta_z(s) = U_s(z) = \int_0^{infty} (x-z)^{-s} \; d\mu(x)$ is an entire function in $s$. For $z=0$, we only know that $\zeta_0(s)=\zeta(s)$ is analytic for $s<0$. \paragraph{} We will use some linear algebra to estimate the integrated density of states near $\infty$ in terms of the vertex degree distribution of the graph. We need a bit more than the {\bf Schur-Horn inequality} or the {\bf Gershgorin circle theorem}: we use $\lambda_k \leq 2 d_k$, where $d_k$ is the $k$'th vertex degree. This appears to be a new result we have written down separately and which follows directly from the Cauchy interlace theorem. We have looked at the zeta function $\zeta(s)$ of circular graphs $G=C_n$ in \cite{KnillZeta} and looked at the limiting behavior of roots and then in \cite{DyadicRiemann} for the connection graphs of circular graphs, where the connection Laplacian is invertible \cite{Unimodularity,KnillEnergy2020} and the Zeta function satisfies a functional equation. \paragraph{} For small graphs like complete graphs $K_n$ or the graph complements $C_n'$ of cyclic graphs $C_n$, the tree-forest ratio converges to the {\bf Euler constant} $e=2.718 \dots$ in the limit $n \to \infty$. For $K_n$, one can see that directly from the eigenvalues $\lambda_0=0,\lambda_k=n$ because $\tau(G)=(1+1/n)^{n-1} \to e$. For Barycentric refinements of a graph $G=G_0$ the tree forest index $i(G_n)=\log(\tau(G_n))/|V(G_n)|$ of $G_n$ converges to a universal constant that only depends on $d$. Since $\sum_{k=0}^d f_k(G_{n-1})=|G_n|=f_d \leq \sum_{k=0}^d f_k(G_n)$ we could also take the limit, where $|V(G|$ is replaced by any $f_k$ like for example the volume $f_d$. The result is a direct consequence of {\bf spectral Barycentral limit theorem} \cite{KnillBarycentric,KnillBarycentric2}. In the Barycentric limit, we have $\log(\tau(G_n))/|G_n| \to \int_0^\infty \log(1+1/x) d\mu(x)$, where $\mu$ is the limiting density of states. To show that this is a finite number, we only need to show that the density of states $dk$ decays fast. We needed something stronger than the Gershgorin circle theorem \cite{Gershgorin,GershgorinAndHisCircles} or the Schur inequality in matrix theory \cite{Brouwer} and got $\lambda_k \leq d_k$. For $d=1$, we can compute this limiting constant as $\lim_{n \to \infty} i(G_n) = 2 \log(\phi)$, where $\phi=(1+\sqrt{5})/2$ is the golden ratio. When looking at Schur for the sum of the first $n$ eigenvalues and the Cheeger inequality for the ground state $\lambda_1$, it is natural to conjecture that the {\bf linear graph} with $n$ vertices and $(n-1)$ edges strictly maximizes $\tau(G)$ among all connected graphs with $n$ vertices. \paragraph{} Having made more experiments with the limiting density of states in the case $d=2$ in particular, it is numerically indicative that $[4,6]$ is the {\bf largest gap} in the Barycentric spectral measure for $d=2$. If we take a triangle $G=K_3$ and make Barycentric refinements, then after $2$ or more Barycentric refinements, exactly half of the positive eigenvalues are in the interval $[0,4]$ and half of the positive eigenvalues are in $[6,\infty)$. It is natural to ask whether there is a {\bf gap labeling}: the {\bf integrated density of states} $\int_0^x d\mu(x)$ is a rational number for $x$ in a gap of the spectrum. But already in the case $d=2$ for the relatively large gap containing the point $8$, we don't see an obvious small rational number attached. Our best guess for the integrated density of states is $\int_0^8 dk(x) = 137/164$. Gap labeling results are prototyped by p-periodic Jacobi matrices, where the integrated density of states is $j/p$ with integer $j$. For classes of non-periodic operators like almost periodic operators, the gap labeling works too. For early sources,see \cite{Pastur,Cycon,Carmona,Bel+92a}. For a recent development interesting for graph theory is the case of periodic Jacobi matrices on universal covers of leaf-free one-dimensional graphs \cite{AvniBreuerSimon}, where one observes only point or absolutely continuous spectrum. In that respect, we have absolutely no idea yet what the spectral type of the Barycentric density of states $dk$ is in dimension $d \geq 2$. Observing that the potential $U(z)$ (the analogue of the Lyapunov exponent in the case of Schr\"odinger operators) appears to be positive almost everywhere on the support of $dk$ if the dimension is larger than $1$. \paragraph{} In the case of Jacobi matrices, one is interested in the spectral measures of random operators $L(\omega)$ parametrized by a point in a probability space $\Omega$ which in the almost periodic case is a compact topological group with Haar measure. We believe that there is a non-Abelian compact topological group in all dimensions. It is hidden for $d \geq 2$ still. There should be Laplacian on this group such that $dk_d$ is the density of states of a ``random" Laplacian. In the case $d=1$, where things are Abelian and understood, the group $\mathcal{G}_1$ is the {\bf dyadic group of integers} and the limiting operator is the extension of $H f(n) = 2-f(n+1)-f(n-1)$ from the dense set $\mathbb{Z}$ to its completion $\mathcal{G}_1$. By Fourier theory, $H$ is diagonalized and unitarily equivalent to $\hat{H} f(x) = 2-2\cos(\pi x) = 4\sin^2(\pi x/2)$ on the {\bf Pr\"ufer group} $\hat{\mathcal{G}}_1$ of all dyadic rationals on $\mathbb{T}$. While $dk=1/(\pi \sqrt{4(4-x)})$ on $[0,4]$ is clearly absolutely continuous, the operator $H$ has dense pure point spectrum as an operator on $L^2(\mathcal{G}_1)$. Unlike in the Pontryagin pair $\mathbb{Z} \leftrightarrow \mathbb{T}$ where the Dirac points $\delta_x$ in $\mathbb{T}$ are distributions and not functions in in $L^2(\mathbb{T})$, the Dirac points $\delta_x in l^2(\hat{\mathcal{G}}_1)$ are actual functions as the Pr\"ufer group is discrete. In the Jacobi case, the operator was diagonal on the compact, non-discrete group of the Pontryagin pair, in the dyadic case, the operator is diagonal on the discrete-non-compact case. \section{Introduction} \paragraph{} Investigating the relation between spectral data of a geometry and observable quantities like topological invariants is a classical theme in geometry \cite{BergerPanorama}. It was popularized by Mark Kac whether one can ``hear the geometry" \cite{Kac66}. Historically, a prototype result is {\bf Weyl's law} \cite{Weyl1911} relating the number of Dirichlet eigenvalues $n$ of the Dirichlet Laplacian on $G$ below $\lambda$ with the volume $|G|$ of a domain $G$ as $n \sim C_d \lambda^{\frac{d}{2}} |G|$ with $C_d = \frac{|B_d|}{2\pi^d}$. Herman Weyl looked in \cite{Weyl1911} at the planar case and proved $n/\lambda_n = |G|/(4\pi)$ using the explicit eigenvalues $\lambda_{m,n}=\pi^2(m^2+n^2)/|G|$ for a square $G$. Kac's question addresses the problem to what degree the spectral data determine space and how large the set of objects is that are isospectral. Because the zeta function encodes the spectrum, this also means to what degree the spectral zeta function determines the geometry. \paragraph{} Spectral questions are also related to inverse problems like to find the isospectral set in classes of geometries. While in one-dimensional cases, one can deform geometry in an isospectral way, in higher dimensions, isospectral rigidity appears. Isospectral sets tend to be discrete as isospectral deformations of Riemannian geometries in general do not exist any more \cite{Mor1}. For finite simple graphs, the question of how spectral data relate to a given invariant is largely parallel to the continuum theory \cite{Chung97} and can be investigated experimentally. A typical spectral result is that the pseudo determinant ${\rm Det}(K)$ of the Kirchhoff Laplacian of a graph is the number of rooted spanning trees and the determinant of the Fredholm modification is the number of rooted spanning forests. We look here at this ratio. In this context, we can also remind about the spectral properties of connection matrices, where the number of positive eigenvalues minus the number of negative eigenvalues is the Euler characteristic \cite{HearingEulerCharacteristic}. \paragraph{} The {\bf tree forest ratio} is picked up here also because it belongs to a larger quest to investigate {\bf functionals on graphs}, that is assigning numerical values to a graph. Considering critical points of functionals on geometries has been successful in physics, like for geodesics, area minimizing surfaces, total curvature minimizing In graph theory, one can look at ``packing numbers" like the chromatic number or the independence number manifolds. In graph theory, quantities that can be counted are the average simplex cardinality \cite{AverageSimplexCardinality}, the number $f_k$ of $k$-dimensional simplices or the Euler characteristic$ \chi(G)=\sum_k (-1)^k f_k$ or other length-related functionals \cite{KnillFunctional}. Interesting examples of {\bf spectral quantities} is the ground state energy, the smallest positive eigenvalue of the Laplacian or then the {\bf analytic torsion} which involves spectral data of the Hodge Laplacian $L=(d+d^*)^2$. \section{Trees and Forests} \paragraph{} Given a {\bf finite simple graph} $G$, the collection of {\bf rooted spanning trees} and the set of {\bf rooted spanning forests} inside $G$ are of interest. For forests, ``rooted" means that every tree in the forest has an assigned vertex, which is interpreted as the {\bf root} of the tree. Rooted trees in a forest include also seeds, which are one-point graphs. Each rooted tree can be seen as pointed topological space without closed loops nor triangles. It is a directory organization of the vertices and a stratification of space by giving the distance from the root. For any spanning tree $T$ or spanning forest, the induced graph is the full space $G$ because $T$ spans $G$: the vertex set of $T$ and $G$ is the same. The ratio of rooted spanning forests and trees is the same than the ratio of spanning forests and trees. \paragraph{} By the {\bf Kirchhoff matrix tree theorem}, the number of rooted spanning trees in $G$ is ${\rm Det}(K)$, where $K$ is the {\bf Kirchhoff matrix} of $G$ and ${\rm Det}$ is the {\bf pseudo determinant} of $K$, the product of the non-zero eigenvalues of $K$. Since every spanning tree has $n$ possible roots if the graph has $n$ vertices, the number ${\rm Det}(K)/n$ is the number of spanning trees in $G$. \paragraph{} By the {\bf Chebotarev-Schamis forest theorem} \cite{ChebotarevShamis1,ChebotarevShamis2,Knillforest}, the number of rooted spanning forests in $G$ is ${\rm det}(1+K)$, the {\bf Fredholm determinant} of the Kirchhoff matrix $K$. See \cite{cauchybinet}. We introduce here the {\bf tree-forest ratio} $$ \tau(G) = \frac{{\rm det}(1+K)}{{\rm Det}(K)} $$ and the {\bf tree forest index} $$ i(G) = \log(\tau(G))/|G| \; $$ where $|G|$ is the number of vertices. In general $\tau(G) \geq 0$ because there are more forests than trees. \paragraph{} In dimension larger than $1$, both the number of forests as well as the number of trees grow exponentially when doing Barycentric refinements. The reason is that in the two dimensional case already, we can see this geometrically. This is different for one-dimensional graphs, where the number of trees grows polynomially under Barycentric subdivisions while the number of forests grows exponentially. The precise polynomial degree of the tree growth rate in the one-dimensional case depends on the genus $b_1$ of the graph. \paragraph{} For graphs with multiple connected components, where spanning trees naturally need to be disconnected, one can define the number of rooted spanning trees as the product $\prod_{k} {\rm Det}(K_k)$, where $K_k$ is the Kirchhoff matrix block belonging to the $k$'the component. For the number of rooted forests, we naturally have the product $\prod_k {\rm Det}(K_k+1)$ already because forests do not need to be connected. With this extension of the definition for not necessarily connected graphs $G$, the ratio $\tau(G)= {\rm Det}(K+1)/{\rm Det}(K)$ can in general be understood as the tree-forest ratio, whether $G$ is a connected or a disconnected graph. To make the tree-forest ratio functional defined on all graphs, we define $\tau(0)=1$ and $i(0)=0$ if $0$ is the empty graph. \paragraph{} Since all rooted spanning trees are also rooted spanning forests, we have $\tau(G) \geq 1$ and so $i(G)=\log(\tau(G))/|G| \geq 0$. It is easy to check that $\tau(G)=1$ happens if and only if $G$ has no edges. The reason is that already the presence of one single edge produces more forests than trees. For the complete graph $K_2$ for example, we have $\tau(K_2) = 3/2$ because there are $3$ rooted forests and $2$ rooted trees in $K_2$. \paragraph{} When taking graph limits with larger and larger trees, the tree forest ratio diverges in general exponentially fast with the diameter. For the cycle graph $C_n$ for example, there are $n^2$ rooted spanning trees and $n$ spanning trees. In the same circular graph $C_n$, the number of spanning forests is the alternate {\bf Lucas number} $L(n)$ given by the recursion $L(n+1) = 3 L(n)-L(n-1)+2, L(0)=0,L(1)=1$. The number of forests grows exponentially, while the number of trees grows polynomially. We have $$ \tau(C_n)=n^2/L(n) \; $$ and $$ \log(\tau(C_n))/n \to 2 \log(\phi) = {\rm arccosh}(3/2) \; , $$ where $\phi$ is the {\bf golden ratio}. \paragraph{} For any one-dimensional graph $G$ with $|G|=|E|$ edges, the {\bf Barycentral limit measure} $d\mu$ exists explicitly. The limiting density of states measure is the arcsin distribution. Potential theoretically, the fact that that the number of trees grows polynomially while the number of forests grows exponentially is reflect in $\int_0^1 \log(4 \sin^2(\pi x)) \; dx = 0$ which is not totally obvious. In terms of the density of states $dk(x) = \frac{1}{\pi \sqrt{x(4-x)}}$ we have $$ U(0) = \int_0^4 \frac{\log(x)}{\pi \sqrt{x(4-x)}} \; dx = 0 \; $$ and $$ U(-1) = \int_0^4 \frac{\log(1+x)}{\pi \sqrt{x(4-x)}} \; dx = \log(\phi^2) = {\rm arccosh}(3/2) =0.962424 \; . $$ \paragraph{} Here is an other class, of graphs where we have a chance to compute the limit. One can look at {\bf circulant graphs} $C_{n,A}$, the Cayley graph of a generating set $A \subset V(C_n)$. If the diameter of the graph is larger than 2, then the tree-forest ratio is going to explode for such graphs The radius of a circulant graph is $2$ if the set $A+A$ in $\mathbb{Z}_p$ covers the entire set $\mathbb{Z}_p$. An extreme case is if $A=V(C_n)$, in which case we have the complete graph. An other example is the graph complement $C_n'$ of $C_n$ in which case the diameter is always $2$. If the set $A+A$ for the generating set of the Cayley graph does not cover the graph, meaning that there are relations in the finitely generated group with words $3$ or longer, then the tree-forest ratio of $G_n$ diverges exponentially as $n \to \infty$. In some cases, we can compute the limit. For the graph complement $\overline{C}_n$ of cyclic graphs $C_n$, which are graphs of diameter $2$, one has: \begin{propo} $\tau(\overline{C}_n) \to e$. \end{propo} \begin{proof} The eigenvalues of $\overline{C}_n$ are explicitly known as $$ \lambda_{k,n} = \sum_{m=2}^{n-2} 1-\cos(2\pi m \frac{k}{n}) = \sum_{m=2}^{n-2} 2\sin^2(\pi m \frac{k}{n}) \; . $$ We have $2 \lambda = 5-2+2\cos(2\pi k/n) - \frac{\sin(k (2m-3) \pi/n }{\sin(k \pi/n)}$. \end{proof} \begin{propo} $\tau(K_n) \to e$. \end{propo} \begin{proof} The non-zero eigenvalues are all $n$. We have $\tau(K_n) = {\rm Det}(1+K)/{\rm Det}(K) = (1+1/n)^{n-1}$. \end{proof} \section{The Kirchhoff spectral Zeta function} \paragraph{} The {\bf Kirchhoff spectral zeta function} of a finite simple graph $G=(V,E)$ is defined as $$ \zeta(s) = \sum_{\lambda \neq 0} \frac{1}{\lambda^s} \; ,$$ where $\lambda$ ranges over the eigenvalues of the {\bf Kirchhoff Laplacian} $K$ of $G$. We see immediately from the definitions that $$ \tau(G) = \prod_{\lambda \neq 0} (1+\frac{1}{\lambda}) \; . $$ When taking the Barycentral limit, we need to normalize the function, usually by scaling with a factor $1/n$, where $n$ are the number of vertices in the graph. \paragraph{} It follows from the definition that we can estimate the {\bf spectral gap} $\lambda_1$, which is the smallest eigenvalue of $K$ from below. \begin{propo} $\lambda_1 \geq 1/(\tau(G)-1)$. \end{propo} \begin{proof} We have $(1+1/\lambda_1) = \tau(G)/\prod_{k \geq 2} (1+1/\lambda_k) \leq \tau(G)$. So $\lambda_1 \geq 1/(\tau(G)-1)$. \end{proof} \paragraph{} Since $h(G) \geq \lambda_2/2$, where $h(G)$ is the {\bf Cheeger constant} we also have \begin{propo} $h(G) \geq 1/2(\tau(G)-1$. \end{propo} \paragraph{} Also in reverse, a very small spectral gap $\lambda_1$ produces a large $\tau$ because $\tau(G) \geq 1+1/\lambda_1$. \paragraph{} In gteneral, we have for a finite simple graph with (not yet normalized) zeta function: \begin{propo} $\log(\tau(G)) = \sum_{k=1}^{\infty} (-1)^{k+1} \frac{\zeta(k)}{k}$. \end{propo} \begin{proof} Taking logs gives $$ \log(\tau(G)) = \sum_{\lambda \neq 0} \log(1+ \frac{1}{\lambda}) \; . $$ Using the expansion $\log(1+x) = x-x^2/2+x^3/3+ \dots$ and Fubini, we see $$ \log(\tau(G)) = \zeta(1)-\zeta(2)/2-\zeta(3)/3 + \dots \; . $$ \end{proof} \paragraph{} The Kirchhoff Laplacian $K$ is one possible choice which comes natural when looking at trees or forests. We could also look at the {\bf Hodge Laplacian} $L=D^2$ with {\bf Dirac operator} $D=d+d^*$ of a graph $G$. The later has symmetric spectrum $-\lambda_n, \dots, -\lambda_1, 0, \lambda_1 , \lambda_n$. This gives a {\bf Hodge zeta function} $\zeta_L(s)$. Define the {\bf Dirac spectral function} as $$ \zeta_D(s) = \sum_{\lambda >0} \frac{1}{\lambda^{s}} $$ A {\bf Dirac tree forest ratio} of a graph could now be defined as $$ \tau_D(G) = \frac{{\rm Det}(1+D)}{{\rm Det}(D)} $$ Because non-zero eigenvalues come in pairs $\lambda,-\lambda$, we have $$ \tau_D(G) = \prod_{\lambda > 0} (1 + \frac{1}{\lambda})(1-\frac{1}{\lambda}) = \prod_{\lambda > 0} (1 - \frac{1}{\lambda^2}) \; . $$ If $1$ is an eigenvalue of $D$ we would exclude that factor. One could also look at the connection Laplacian zeta function. \paragraph{} Looking at the zeta function rather than the eigenvalues adds an analytic angle to spectral theory. One can look at a {\bf analytic properties} of the function $\zeta(s)$ and especially at the places, where $\zeta(s)$ is singular or places where $\zeta(s)$ is zero. The above formula shows that the tree-forest ratio measures a sort of growth rate of $\zeta(s)$ along the real axes. \paragraph{} Unlike trees or forests, the {\bf spectral zeta function} can be considered for manifolds $M$, even so the interpretation of trees and forests is no more adequate in the continuum. On compact Riemannian manifolds $M$ we have an exterior derivative $d$ and so a Hodge Laplacian $L=D^2 = (d+d^*)^2$. Define $$ \tau(M) = {\rm Det}(1+L)/{\rm Det}(L) \; , $$ where both ${\rm Det}$ is the Ray-Singer determinant defined by a zeta regularization of $L=(d+d^*)^2$. One could also look just at the zeta function defined by the scalar Laplacian. We do not see any use yet for the quantity $\tau(M)$, the point is just that it can be considered. \paragraph{} Let us look for example at the case $M=\mathbb{T}$, where $D=i \frac{d}{dx}$ which has the eigenfunctions $e^{i n x}$ with eigenvalues $-n$. The Dirac zeta function $$ \sum_{n>0} \frac{1}{n^s} \; . $$ The Laplacian zeta function is $\sum_{n>0} \frac{1}{n^{2s}}$ as the eigenvalues of $L$ are $1/n^2$. \paragraph{} \begin{propo} $\tau(\mathbb{T}) = \sinh(\pi)/\pi$. \end{propo} \begin{proof} We have $$ \tau(M) = \prod_{n>0} (1+\frac{1}{n^2}) = \sinh(\pi)/\pi $$ using $\sin(\pi x) = \pi x \prod_{n=1}^{\infty} (1-\frac{x^2}{n^2})$ at $x=i$. \end{proof} \section{The Barycentral limit} \paragraph{} The quantity $T(G)=\log(\tau(G))$ is additive with respect to the disjoint union operation $T(G+H) = T(G)+T(H)$. Let $|G|$ denote the number of vertices of $G$. Define the {\bf tree-forest index} $$ i(G) = \log(\tau(G))/|G| \; $$ where $|G|$ is the number of vertices. It has a chance to remain finite when taking graph limits. Let $G_n$ denote the $n$'th Barycentric refinement of a graph $G=G_0$. Now, the growth of the tree forest ratio is dominated by the growth of the forests. The index $i(G_n)$ is the difference of the Forest index $\log(F(G_n))/|G_n|$ and the tree index. \begin{thm}[Tree Forest Universality] $\lim_{n \to \infty} i(G_n)$ exists for every graph $G$ and only depends on the maximal dimension $d$ of $G$. For $d=1$, the limit is $2\log(\phi)$, where $\phi$ is the golden ratio. \end{thm} \begin{proof} We prove this from the Barycental central limit theorem which asserts that there is a (only dimension-dependent measure) $\mu$ on $[0,\infty)$ such that the spectral measures of $L(G_n)$ (which are point measures for all $n$) converge weakly to $\mu$. Now, we have $i(G_n) = \int_0^{\infty} \log(1+1/x) d\mu(x)$ which is a difference of two {\bf potential values} $$ i(G_n) = \int_0^{\infty} \log(1+x) d\mu(x) - \int_0^{\infty} \log(x) \; d\mu(x) \; . $$ The $1$-dimensional case is special in that $d\mu$ has compact support on $[0,4]$ and is the equilibrium measure on that interval. The potential $f(z) = \int \log|a-z| \; d\mu(z)$ exists for every $a \in \mathbb{Z}$ and is subharmonic and zero exactly on the interval $[0,\pi]$. The limiting index $i(G)$ in $1$-dimension is therefore the potential value at $a=-1$. \\ In the $d=2$ or higher dimensional case, the measure $\mu$ does no more have compact support. This is related to the fact that the maximal vertex degree becomes unbounded under Barycentric refinements. While in one dimension, the measure $\mu$ was absolutely continuous with a density concentrated mostly on the boundary $0,4$ of the support, in higher dimensions, the measure has a tail continuity both at the lower bound $0$ as well as at infinity. At zero we need a mild decay to make sure that $\int_0^{\infty} \log(x) \; d\mu(x)$ is finite. At infinity, we need a stronger decay to make sure that $\int_0^{\infty} \log(1+x) d\mu(x)$ exists. But we know also from potential theory of subharmonic functions and in our case from the combinatorial context that $0 \leq \int_0^{\infty} \log(x) \; d\mu(x) < \int_0^{\infty} \log(1+x) d\mu(x)$. It suffices therefore to show that $\int_0^{\infty} \log(1+x) d\mu(x)$ exists. This amounts to estimate the decay rate of $\mu$ at infinity. The fact that there is an exponential decay in dimension $2$ or higher in the next section. We are using an estimate $\lambda_k \leq 2 d_k$, a Perron-Frobenius result on the limiting dimension distribution in Barycentric limits as well as an estimation of the vertex cardinality points of $G_n$ which depends on the {\bf generation} of a point. A vertex in $G_n$ has generation $k$ if it has become a vertex in $G_{n-k}$ and not before. A vertex in $G_n$ of {\bf generation} zero is a vertex which has in the previous level $G_{n-1}$ not been a vertex. Note that if a simplex is a vertex, it is especially again a vertex in the next generation. What happens is that we have an exponential decay of vertex degrees depending on generation. \end{proof} \paragraph{} A different proof could be done along the lines of the Barycentral spectral limit theorem without using the spectral measure $d\mu$: if $G$ has maximal dimension $d$ then the Barycentric refinement $G_1$ of a $d$-simplex in $G$ consists of $(d+1)!$ simplices glued. There is a stratification of growth rates of the different simplices depending on dimension. The number of maximal simplices grows exponentially faster than the number of co-dimension-one simplices, we can asymptotically treat every refined $n$-simplex as a copy of disjoint $(n+1)!$ simplices plus a gluing correction which as it is lower dimensional will be a definite factor smaller. The limiting tree forest ratio is again Cauchy sequence as we add up a sequence of errors which geometrically decay. \section{Tail decay} \paragraph{} In this section we prove a proposition on the limiting density of states $dk$ in dimension $d \geq 2$. \begin{propo} There exists a positive constant $c$ such that $\mu([x,\infty)) \leq e^{-c x}$. \end{propo} \paragraph{} In dimension $d=1$, the density of states $dk$ has compact support $[0,4]$ and therefore does not need an exponential decay estimate. The lemma of course still applies. Let therefore $G$ be a graph of dimension $d \geq 2$ and let $G_n$ be the Barycentric refinements of $G$. The vertices of $G_n$ are the simplices (complete subgraphs) of $G_{n-1}$ and two are connected, if one is contained in the other. Every vertex $x$ in $G_n$ can be assigned a {\bf generation} $\gamma(x)$ defined to be the integer $k$, if it has been a vertex in $G_{n-1}, \dots, G_{n-k+1}$. For example, a vertex has generation $1$ if it has been a simplex of positive dimension in $G_{n-1}$. A point has generation $n$ if has been a vertex in $G$ already. Every vertex in $G_n$ has generation $\gamma(x) \in \{1,2,\dots, n\}$. \paragraph{} The $f$-vector of $G$ is the vector $(f_0,f_1, \dots, f_d)$, where $f_k$ are the number of $k$-dimensional simplices in $G$. There are ${\rm gen}_n = f_0(G_0)$ vertices of generation $n$. \\ There are ${\rm gen}_{n-1}=f_0(G_1)-{\rm gen}_n$ vertices of generation $n-1$. \\ There are ${\rm gen}_{n-2}=f_0(G_2)-{\rm gen}_{n}-{\rm gen}_{n-1} = f_0(G_2)-f_0(G_1)$ vertices of generation $n-2$ \\ There are ${\rm gen}_1 =f_0(G_n)-{\rm gen} {n}-{\rm gen}_{n-2} ..-gen_2$ vertices of generation $1$. \\ Since $f_0(G_n)$ grows exponentially at least like $(d+1)!^n$, we also have in $G_n$ at least $C (d+1)!^{n-k}$ vertices of generaton $k$. The {\bf Barycentric refinement operator} $A$ is a $(d+1) \times (d+1)$ upper triangular matrix defined by $f(G_1)=A f(G)$. It is given as $A_{ij} = {\rm Stirling}(i,j) i!$, involving the {\bf Stirling numbers of the second kind}. The {\bf Perron-Frobenius eigenvector} of $A$ is the probability eigenvector of $A$ belonging to the largest eigenvalue $(d+1)!$. For example, in the case $d=2$, when $A=\left[ \begin{array}{ccc} 1 & 1 & 1 \\ 0 & 2 & 6 \\ 0 & 0 & 6 \\ \end{array} \right]$, the Perron-Frobenius eigenvector is $[1/6,1/2,1/3]$ to the eigenvalue $(d+1)!=6$. This means that for large $n$, the graph $G_n$ has about $1/6$th of the simplices which are points, about a half which are edges and about $1/3$rd which are triangles. In the limit $d \to \infty$, the Perron Frobenius probability measures appear converge weakly to a point measure around $0.7215$, a fact which we could not prove yet. \begin{lemma} There is a $c_1>0$ such that for all k, at least $c_1^{n-k} |G_n|$ vertices in $G_n$ are generation $k$ vertices. \end{lemma} This means that the generation $\gamma$ as a random variable on the finite set of vertices of $G_n$ asymptotically has an exponential distribution. There very few vertices of large generation and very many of small generation. Now we will see that on large generation vertices, there is an upper bound on the eigenvalues which is exponential too in the generation. \paragraph{} If $S(x)$ is the {\bf unit sphere} of a vertex $x$, then the k'th Barycentric refinement $S_k(x)$ is the unit sphere of $x$ in $G_k$. Because $|S_k(x)|$ is the {\bf vertex degree} of $x$, the vertex degree of generation $k$ vertices is at least $|S_k(x)| \sim d!^k$. \begin{lemma} There exists $c_2>0$ such that maximally $c_1^k |G_n|$ vertices have degree larger than $c_2^k$. \end{lemma} \paragraph{} It follows from the Gershrogin circle theorem that there less than $c_1^k |G_n|$ vertices for which the largest eigenvalue is larger than $2 c_2^k$. Now $c_1=1/(d+1)!$ and $c_2=d!$. Since this implies that the error area decays exponentially for $x \to 1$, was also have the function itself decay exponentially. \section{Questions} \paragraph{} Which graphs with $n$ vertices have the maximal tree-forest ratio? The minimum is $1$. For small $n$ we see that the linear graph is the maximum. It is pretty safe to conjecture that the linear graph is always the maximum: \paragraph{} What is the average tree-forest ratio on Erdoes-Renyi spaces? We can also look more generally at the potential average on Erdoes-Renyi spaces which produces a density of states measure for each $n$ and $p$. And then we can ask for which choice of $p_n$ does the density of states average $dk_{n,p(n)}$ converge weakly in the limit $n \to \infty$. \paragraph{} What happens with graph operations like various products or joins? For disjoint unions, we have the product of determinants so that $\tau(G+H)=\tau(G) \tau(H)$. Under graph complements, there is no clear relation yet. Also the Zykov join operation does not produce obvious relations. What happens with the tree-forest ratio when taking Shannon products? We do not see any (at least obvious) relation between the tree number yet. \paragraph{} For connection Laplacians, we know that the zeta functions multiply. because the connection Laplacian of a product of graphs tensor. $L_{G * H} = L_G \otimes L_H$. We therefore have for the connection Laplacians: $$ \log(\rho(G * H)) = \sum_k (-1)^{k+1} \zeta_G(k) \zeta_H(k)/k \; . $$ \paragraph{} What is the relation between the Hodge Laplacian and connection Laplacian tree-forest ratio? In order to define a tree-forest ratio from the connection Laplacian itself, we have to look at the spectrum of the square. We can for any positive energized complex look at $\tau_L(G) = \det(L+1)/\det(L)$ which makes sense as $L$ is always invertible. \paragraph{} We should reiterate that in the case $d \geq 2$, we have not even a proof yeat that there are gaps in the spectrum. This should be easy to do as in the case $d=2$ we see a prominent gap $[4,6]$ associated to the integrated density of states $1/2$. While we know now something about the decay of $dk$ at infinity, we do not have an asymptotic near $0$ or the end of gaps. As for $0$, the Schur estimate does not help as as we have always a certain percentage of vertices with minimal vertex degree $(d+1)!$. That means that we can (using Shur) only prove that there exists a constant $c$ such that $dk([0,x]) \leq c x$ for small $x$. As we know $U(0) \geq 0$ due to its relation to the growth rate of trees, we know that $U(z)$ is finite in a neighborhood of $z=0$. It is natural to conjecture that the potential $U(z)< \infty$ for all $z \in \mathbb{Z}$. This would imply that the {\bf logarithmic capacity} $-\int \int \log|z-w| dk(z) dk(w)$ is finite in all dimensions $d$. We know that the value is $0$ in the case $d=0$, where $U(z)=0$ on the support $[0,4]$ of $dk$. Having finite logarithmic capacity especially implies that the {\bf Lebesgue decomposition} of $dk=dk_{pp} + dk_{sc} + dk_{ac}$ has no pure point part. \paragraph{} Fourier theory could be an approach to determine the nature of the spectrum $d\mu$. Since the measure $\mu$ has exponential decay at infinity, all Fourier coefficients $\hat{d\mu}(t) =\int_0^{\infty} e^{i t x} \; d\mu(x)$ exist. In order to determine the spectral type, one could also focus on a compact part of the spectrum like for example, $[0,4]$. Now, we can look at Fourier series of $d\mu$ instead rather than the Fourier transform. For example, by a theorem of Wiener \cite{Katznelson}, if $\frac{1}{n} \sum{k=0}^{n-1} \hat{\mu}(k)^2$ converges to zero for $n \to \infty$, then $d\mu$ has no point spectrum. \section{Code} \paragraph{} The following Mathematica code plots the potential function (the analog of the Lyapunov exponent), and the integrated density of states (the analog of the rotation number) as well as the spectrum of the Barycentric measure $dk$ in the planar case. The code is optimized for the two-dimensional case so that we do not have to use general costly clique finding algorithms. \begin{tiny} \lstset{language=Mathematica} \lstset{frameround=fttt} \begin{lstlisting}[frame=single] T3[s_]:=Module[{v=VertexList[s]},Union[Partition[Flatten[Union[ Table[z=EdgeList[Subgraph[s,Complement[VertexList[ NeighborhoodGraph[s,v[[k]]]],{v[[k]]}]]]; Table[Sort[ {v[[k]],z[[j,1]],z[[j,2]]}],{j,Length[z]}],{k,Length[v]}]]],3]]]; T2[s_]:=Module[{e=EdgeList[s]}, Table[Sort[{e[[k,1]],e[[k,2]]}],{k,Length[e]}]]; T1[s_]:=Module[{v=VertexList[s]},Table[{v[[k]]},{k,Length[v]}]]; Whitney2D[s_]:=Sort[Map[Sort,Union[T3[s],T2[s],T1[s]]]]; GraphFromComplex[G_]:=Module[{n=Length[G],v,e={}},Do[x=Sort[G[[k]]]; If[Length[x]==3,e=Union[e,{x->{x[[1]]},x->{x[[2]]},x->{x[[3]]}, x->Sort[{x[[1]],x[[2]]}],x->Sort[{x[[1]],x[[3]]}], x->Sort[{x[[2]],x[[3]]}]}]]; If[Length[x]==2,e=Union[e,{x->{x[[1]]},x->{x[[2]]}}]],{k,Length[G]}]; rules=Table[G[[k]]->k,{k,n}]; UndirectedGraph[Graph[e /. rules]]]; Barycentric2D[s_]:=GraphFromComplex[Whitney2D[s]]; Barycentric2D[s_,k_]:=Module[{s1=s},Do[s1=Barycentric2D[s1],{k}];s1]; s=Barycentric2D[CompleteGraph[3],6]; L=Sort[Eigenvalues[1.0*KirchhoffMatrix[s]]]; U[z_]:=Sum[Log[Abs[z-L[[k]]]],{k,Length[L]}]/Length[L]; V[x_]:=Sum[If[L[[k]]<x,1,0],{k,Length[L]}]/Length[L]; f[x_]:={Red,Point[{x,0}]}; W = Graphics[Map[f,L], PlotRange->{{-1,10},{-1,7}}]; Show[{W,Plot[{3*U[x],6*V[x]},{x,-2,10},PlotPoints ->1000]}] {TreeIndex=U[0],ForestIndex=U[-1],TreeForestIndex=TreeIndex/ForestIndex} \end{lstlisting} \end{tiny} \paragraph{} \begin{figure}[!htpb] \scalebox{1.2}{\includegraphics{figures/output6.pdf}} \label{potential} \caption{ The output of the above code shows the potential ${\rm Re}(U(x+0i))$ and the integrated density of states ${\rm Im}(U(x+0i)$ of the complex potential $U(z)=\int_{\mathbb{C}} \log(z-w) \; dk(w)$ of the Barycentric 2-dimensional density of states $dk$. We see the 6th Barycentric refinement $G_6$ for $G=G_0=K_3$. The prominent gap $[4,6]$ is very clear numerically, but not theoretically established. } \end{figure} \begin{figure}[!htpb] \scalebox{1.0}{\includegraphics{figures/graph5.pdf}} \label{potential} \caption{ The 5'th Barycentric refinement $G_5$ of $G_0=K_3$ has 3937 vertices, 11712 edges and 7776 triangles. The next version $G_6$ used in the above spectral picture already has $23425=3937+11712+7776$ vertices. } \end{figure} \bibliographystyle{plain} \begin{thebibliography}{10} \bibitem{Bel+92a} J.~Bellissard, A.Bovier, and J.-M.Ghez. \newblock Gap labelling theorems for one dimensional discrete {Schr\"odinger} operators. \newblock {\em Rev. Math. Phys}, 4:1--37, 1992. \bibitem{BergerPanorama} M.~Berger. \newblock {\em A Panoramic View of Riemannian Geometry}. \newblock Springer, 2003. \bibitem{Brouwer} A.E. Brouwer and W.H. Haemers. \newblock {\em Spectra of graphs}. \newblock Springer, 2012. \bibitem{Chung97} F.~Chung. \newblock {\em Spectral graph theory}, volume~92 of {\em CBMS Reg. Conf. Ser.} \newblock AMS, 1997. \bibitem{Cycon} H.L. Cycon, R.G.Froese, W.Kirsch, and B.Simon. \newblock {\em {Schr\"odinger} Operators---with Application to Quantum Mechanics and Global Geometry}. \newblock Springer-Verlag, 1987. \bibitem{Gershgorin} S.~Gershgorin. \newblock {\"Uber die Abgrenzung der Eigenwerte einer Matrix}. \newblock {\em Bulletin de l'Academie des Sciences de l'URSS}, 6:749--754, 1931. \bibitem{Kac66} M.~Kac. \newblock Can one hear the shape of a drum? \newblock {\em Amer. Math. Monthly}, 73:1--23, 1966. \bibitem{Katznelson} Y.~Katznelson. \newblock {\em An introduction to harmonic analysis}. \newblock John Wiley and Sons, Inc, New York, 1968. \bibitem{AverageSimplexCardinality} O.~Knill. \newblock The average simplex cardinality of a finite abstract simplicial complex. \newblock https://arxiv.org/abs/1905.02118, 1999. \bibitem{Knillforest} O.~Knill. \newblock Counting rooted forests in a network. \newblock {{\\}http://arxiv.org/abs/1307.3810}, 2013. \bibitem{KnillZeta} O.~Knill. \newblock The zeta function for circular graphs. \newblock {{\\}http://arxiv.org/abs/1312.4239}, 2013. \bibitem{cauchybinet} O.~Knill. \newblock Cauchy-{B}inet for pseudo-determinants. \newblock {\em Linear Algebra Appl.}, 459:522--547, 2014. \bibitem{KnillFunctional} O.~Knill. \newblock Characteristic length and clustering. \newblock {{\\}http://arxiv.org/abs/1410.3173}, 2014. \bibitem{KnillBarycentric} O.~Knill. \newblock The graph spectrum of barycentric refinements. \newblock {{\\}http://arxiv.org/abs/1508.02027}, 2015. \bibitem{KnillBarycentric2} O.~Knill. \newblock Universality for {B}arycentric subdivision. \newblock {{\\}http://arxiv.org/abs/1509.06092}, 2015. \bibitem{Unimodularity} O.~Knill. \newblock On {F}redholm determinants in topology. \newblock {\\}https://arxiv.org/abs/1612.08229, 2016. \bibitem{HearingEulerCharacteristic} O.~Knill. \newblock One can hear the {E}uler characteristic of a simplicial complex. \newblock {\\}https://arxiv.org/abs/1711.09527, 2017. \bibitem{DyadicRiemann} O.~Knill. \newblock An elementary {D}yadic {R}iemann hypothesis. \newblock {\\}https://arxiv.org/abs/1801.04639, 2018. \bibitem{KnillEnergy2020} O.~Knill. \newblock The energy of a simplicial complex. \newblock {\em Linear Algebra and its Applications}, 600:96--129, 2020. \bibitem{AvniBreuerSimon} J.~Breuer N.~Avni and B.~Simon. \newblock Periodic jacobi matrices on trees. \newblock https://arxiv.org/abs/1911.02612, 2020. \bibitem{Pastur} L.~Pastur and A.Figotin. \newblock {\em Spectra of Random and Almost-Periodic Operators}, volume 297. \newblock Springer-Verlag, Berlin--New York, {Grundlehren} der mathematischen {Wissenschaften} edition, 1992. \bibitem{ChebotarevShamis1} P.Chebotarev and E.~Shamis. \newblock Matrix forest theorems. \newblock arXiv:0602575, 2006. \bibitem{Mor1} P.v.Moerbeke. \newblock About isospectral deformations of discrete {L}aplacians. \newblock In {\em Lecture Notes in Mathematics}, volume 755. Springer, 1978. \bibitem{ChebotarevShamis2} E.V.~Shamis P.Yu, Chebotarev. \newblock A matrix forest theorem and the measurement of relations in small social groups. \newblock {\em Avtomat. i Telemekh.}, 9:125--137, 1997. \bibitem{Carmona} J.Lacroix R.~Carmona. \newblock {\em Spectral Theory of Random {S}chr\"odinger Operators}. \newblock Birkh\"auser, 1990. \bibitem{GershgorinAndHisCircles} R.S. Varga. \newblock {\em Gershgorin and His Circles}. \newblock Springer Series in Computational Mathematics 36. Springer, 2004. \bibitem{Weyl1911} H.~Weyl. \newblock Asymptotische verteilung der eigenwerte. \newblock {\em {Nachrichten von der K\"oniglichen Gesellschaft der Wissenschaften}}, pages 110--117, 1911. \end{thebibliography} \end{document}
2205.10944v1
http://arxiv.org/abs/2205.10944v1
Duality theory for optimistic bilevel optimization
\documentclass{article} \usepackage{graphicx} \usepackage{amsmath, amssymb, amsthm, enumerate} \usepackage{amsmath,amssymb,amsthm,enumerate,mathrsfs} \newtheorem{theorem}{Theorem}[section] \newtheorem{corollary}[theorem]{Corollary} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \theoremstyle{definition} \newtheorem{definition}[theorem]{Definition} \newtheorem{example}[theorem]{Example} \newtheorem{assum}{Assumption}[section] \newtheorem{algo}[theorem]{Algorithm} \newtheorem{remark}[theorem]{Remark} \numberwithin{equation}{section} \setlength\topmargin{-12pt}\setlength\headheight{21.6pt}\setlength\headsep{16.8pt} \setlength\textheight{20.8cm}\setlength\textwidth{14cm} \begin{document} \makeatletter \begin{center} \Large{\bf Duality theory for optimistic bilevel optimization} \end{center}\vspace{5mm} \begin{center} \textsc{Houria En-Naciri\footnote{Laboratoire LASMA, Department of Mathematics, Sidi Mohammed Ben Abdellah University, Morocco (email: \textsf{[email protected]})} Lahoussine Lafhim\footnote{Laboratoire LASMA, Department of Mathematics, Sidi Mohammed Ben Abdellah University, Morocco (email: \textsf{[email protected]})} Alain Zemkoho\footnote{School of Mathematical Sciences, University of Southampton, UK (email: \textsf{[email protected]}). This author's work is supported by the EPSRC grant EP/V049038/1 and the Alan Turing Institute under the EPSRC grant EP/N510129/1}}\end{center} \vspace{2mm} \footnotesize{ \noindent\begin{minipage}{14cm} {\bf Abstract:} In this paper, we exploit the so-called value function reformulation of the bilevel optimization problem to develop duality results for the problem. Our approach builds on Fenchel-Lagrange-type duality to establish suitable results for the bilevel optimization problem. First, we overview some standard duality results to show that they are not applicable to our problem. Secondly, via the concept of partial calmness, we establish weak and strong duality results. In particular, Lagrange, Fenchel-Lagrange, and Toland-Fenchel-Lagrange duality concepts are investigated for this type of problems under some suitable conditions. Thirdly, based on the use of some regularization of our bilevel program, we establish sufficient conditions ensuring strong duality results under a generalized Slater-type condition without convexity assumptions and without the partial calmness condition. Finally, without the Slater condition, a strong duality result is constructed for the bilevel optimization problem with geometric constraint. \end{minipage} \\[5mm] \noindent{\bf Keywords:} {optimistic bilevel optimization, lower level value function reformulation, duality theory}\\ \noindent{\bf Mathematics Subject Classification 2020:} {90C46, 90C26, 90C31} \hbox to14cm{\hrulefill}\par \section{Introduction} In this paper, we are concerned with the following standard optimistic bilevel programming problem \begin{equation}\label{calm0}\tag{P} {\displaystyle \min_{x, y}} \ F(x, y) \;\;\mbox{s.t.} \;\; G_{i}\left( x,y\right) \leq 0, \; \; i\in I_{k}, \;\; y\in \mathcal{S}(x), \end{equation} where, for each $x\in \mathbb{R}^{n}$, $\mathcal{S}(x)$ represents the optimal solution set of the lower level problem \begin{equation}\label{llp-1}\tag{L[$x$]} {\displaystyle \min_{y}} \ f(x, y) \;\; \mbox{ s.t. } \;\; y\in K\left(x\right), \end{equation} $F :\mathbb{R}^n \times \mathbb{R}^m \rightarrow \mathbb{R}\cup \{+\infty\}$ and $f :\mathbb{R}^n \times \mathbb{R}^m \rightarrow \mathbb{R}\cup \{+\infty\}$ are the upper and lower level objective functions, while $G_i: \mathbb{R}^{n} \times \mathbb{R}^{m} \rightarrow \mathbb{R}\cup \{+\infty\}$ for $i\in I_{k} :=\{1,\ldots, k\}$ ($k\in \mathbb{N}\setminus\{0\}$) denotes the upper level constraint functions. Throughout the paper, we confine ourselves to the case where the lower level feasible set is defined, for all $x\in \mathbb{R}^n$, by \begin{equation*} K(x):=\left\{y\in \mathbb{R}^{m} \mid g_{j}(x, y) \leq 0, \; \; j\in I_{p} :=\{1,\cdots,p \}\right\} \end{equation*} with $g_j: \mathbb{R}^{n} \times \mathbb{R}^{m} \rightarrow \mathbb{R}\cup \{+\infty\}$ for $j=1, \ldots, p$ ($p\in \mathbb{N}\setminus\{0\}$). The objective functions $F$ and $f$, as well as the constraint functions $G_i$ for $i=1, \ldots, k$ and $g_j$ for $j=1, \ldots, p$, are assumed to be proper convex functions. In the sequel, from \cite{zali}, we adopt the following standard operations involving $+\infty$ and $-\infty$: \begin{equation*} \begin{array}{rll} \left( +\infty\right) -\left( +\infty\right)= \left( -\infty\right)-\left( -\infty\right)=\left( +\infty\right)+\left( -\infty\right)=\left( -\infty\right)+\left( +\infty\right) &=& +\infty,\\ 0 \cdot \left( +\infty\right) &=& +\infty,\\ 0 \cdot \left( -\infty\right) &=&0. \end{array} \end{equation*} Our main goal in this paper is to study the duality theory for the bilevel optimization problem \eqref{calm0}, which corresponds to the optimistic version of the problem; more details on bilevel optimization and related properties can be found in \cite{DempeFoundations2002,DempeZemkohoBook}, where surveys on different classes on the problem, including the pessimistic version, are also given. The literature on duality theory for general optimization problems is very rich. For example, strong duality for nearly convex optimization problems is established by Bo\c{T} and Wanka \cite{ref10}, where three kinds of conjugate dual problems are devoted to the primal optimization problem; i.e., the Lagrange, Fenchel, and Fenchel-Lagrange dual problems. Also, Mart\'{\i}nez-Legaz and Volle \cite{ref44} introduced in particular a duality scheme for the problem of minimizing the difference of two extended real-valued convex functions (i.e., d.c. functions for short) under finitely many d.c. constraints in terms of the Legendre-Fenchel conjugates of the involved convex functions. Another related interesting problem in conjugate duality theory developed by Rockafellar (see \cite{ref11} and \cite{ref12}) is the well-known perturbational approach, consisting in the use of a perturbation function as the keystone to obtain a dual problem for a general primal one by means of the Fenchel conjugation. Both problems satisfy weak duality (in the sense that the optimal value of the dual problem is less than or equal to the optimal value of the primal one), as a direct consequence of the Fenchel–Young inequality, whereas conditions ensuring strong duality (no duality gap and dual problem solvable) can be found in these references and many other ones in the literature. However, only a small number of papers have been dedicated to duality theory for bilevel optimization problems. In \cite{refext}, Aboussoror and Adly adapted the Fenchel–Lagrange duality, which has been first introduced by Bo\c{t} and Wanka in \cite{ref bot} and tailored to convex programming problems, showing that a strong duality result holds between a bilevel nonlinear optimization problem with extremal-value function and its Fenchel-Lagrange dual under appropriate assumptions. Afterwards, a duality approach for the strong bilevel programming problem \eqref{calm0} via a regularization and the Fenchel-Lagrange duality was presented by Aboussoror, Adly and Saissi in \cite{refextend}. More precisely, using $\varepsilon-$approximate solutions of the lower level problem, they consider a regularized problem (\ref{calm0}$_{\epsilon}$) of \eqref{calm0}. Via this regularization and the operation of scalarization they present in \cite{ref semi vect}, a duality approach using conjugacy for a semivectorial bilevel programming problem with the upper and lower levels being vectorial and scalar respectively, are proposed. In this paper, we overview the literature to construct appropriate duality results for bilevel optimization problems, including Fenchel, Lagrange-Fenchel, and Toland-Lagrange-based techniques. Clearly, the aim of this paper is two-fold. We explore standard duality results from the literature and show their limitations, in terms of their applicability to the bilevel optimization problem. Secondly, we explore a number of approaches to develop tractable approaches to obtain duality results for the bilevel optimization problem. In the latter case, we first use an exact penalization of problem \eqref{calm0} based on the well-know concept of partial calmness (see \cite{ye}) to completely characterize Fenchel-Lagrange-type results. The key to this exact penalization is that the resulting problem can be viewed as a minimization problem of a d.c. objective function under convex constraints. Note that the technique used to get such a transformation is inspired by the work of \cite{ref+,ref44}, and conjugacy is the main tool involved in this technique. The results are obtained under the Slater constraint qualification and further regularity conditions and some closedness conditions. Additionally, based on the use of a regularization of problem \eqref{calm0}, we establish sufficient conditions ensuring strong duality results under a generalized Slater-type condition without convexity assumptions and without the partial calmness condition. Finally, without any Slater-type condition, strong duality results are investigated for the bilevel optimization problem with geometric constraint. In next section, we first recall some definitions of convex analysis tools needed in this work. The preliminaries are mainly concerned with relevant properties of the subdifferential, also of the epigraph and some conjugate properties of convex functions. In Section $3$, we collect some existing results while showing their limitations for our program. In Section $4$, we investigate duality theory for problem \eqref{calm0} using the partial calmness under some appropriate conditions. Subsequently, we completely characterize the strong duality for problem \eqref{calm0} in the presence of partial calmness. In Section $5$ however, in the absence of partial calmness, but under a generalized Slater constraint qualification, we provide a strong duality for the nonconvex bilevel optimization problem \eqref{calm0}. Then, the special case where the bilevel problem is geometrically constrained is also studied. Finally, we conclude our work in Section $6$. \section{Preliminaries} Here, we recall some fundamental definitions and results essentially related to convex mappings. To proceed, let $D$ be a subset of $\mathbb{R}^{n}$. The sets $int D$, $cl D$ and $co D$, stand for the topological interior, the closure and the convex hull of $D$, respectively. Moreover, the convex cone (containing the origin of $\mathbb{R}^{n}$) generated by $D$ is denoted by $cone D$, and its closure is $\overline{cone} D$, the positive polar cone of $D$ is given by \[ D^{\circ}=\left\{x^{*} \in \mathbb{R}^{n}:\left\langle x^{*}, x\right\rangle \geqslant 0, \ \forall x \in D\right\}, \] while the indicator function $\delta_{D}: \mathbb{R}^{n} \rightarrow \mathbb{R}\cup\{+\infty\}$ of $D$ is defined by \[ \delta_{D}(x)=\left\{\begin{array}{ll} 0, & \text { if } x \in D, \\ +\infty, & \text { if } x \notin D. \end{array}\right. \] Additionally, the support function $\sigma_{D}: \mathbb{R}^{n} \rightarrow \bar{\mathbb{R}}=\mathbb{R}\cup\{-\infty\}\cup\{+\infty\}$ of $D$ is $$ \sigma_{D}\left(x^{*}\right)=\sup _{x \in D}\left\langle x^{*}, x\right\rangle . $$ For the optimization problem \eqref{calm0} we denote by ${\mathcal V}\left( \ref{calm0}\right) $ its optimal objective value and this notation is extended to the optimization problems that we use in this paper. For a given extend real valued function $f: \mathbb{R}^{n} \rightarrow \bar{\mathbb{R}}$, the epigraph, the lower semicontinuous hull, and the effective domain of $f$ are respectively defined by \begin{equation*} \begin{array}{rll} \operatorname{epi} f & = & \left\{(x, r) \in \mathbb{R}^{n} \times \mathbb{R}\mid f(x) \leqslant r\right\},\\[1ex] \operatorname{epi}(\operatorname{cl}f) & = &\operatorname{cl}(\operatorname{epi}f),\\[1ex] \operatorname{dom}f & = &\left\{x \in \mathbb{R}^{n} \mid f(x)<+\infty\right\}. \end{array} \end{equation*} We say that $f$ is proper if $f(x)>-\infty$ for all $x \in \mathbb{R}^{n}$, and $\operatorname{dom}f \neq \emptyset$. \begin{definition} Let $f: \mathbb{R}^{n} \rightarrow \bar{\mathbb{R}}$ be a function, and $D$ a nonempty subset of $\mathbb{R}^{n}$. The conjugate function of $f$ relative to the set $D$ is denoted by $f_{D}^{*}$ and defined on $\mathbb{R}^{n}$ by \begin{equation*}\label{eq.dual} f_{D}^{*}(p)=\sup _{x \in D}\left\{\langle p, x\rangle-f(x)\right\}, \end{equation*} where $\langle., .\rangle$, denotes the inner product of two vectors in $\mathbb{R}^{n}$, i.e., for given $x=\left(x_{1}, \ldots, x_{n}\right)^{T}$ and $y=\left(y_{1}, \ldots, y_{n}\right)^{T}$, we have $\langle x, y\rangle=\displaystyle\sum_{i=1}^{n} x_{i} y_{i} .$ When $D=\mathbb{R}^{n},$ we get the usual Legendre-Fenchel conjugate function of $f$ denoted by $f^{*}$. \end{definition} Let $D \subset \mathbb{R}^{n}$ and $\bar x\in cl D$. The contingent cone to $D$ at $\bar{x}$ is the set $$ T\left(\bar{x},D\right) =\left\{y \in \mathbb{R}^{n} \mid \exists t_{k} \downarrow 0, \exists y_{k} \rightarrow y, \bar{x}+t_{k} y_{k} \in D \ \forall k\in \mathbb{N}\right\}. $$ \begin{definition} Let $f: \mathbb{R}^{n} \rightarrow \mathbb{R} \cup\{+\infty\}$ be a proper convex function and $\bar{x} \in \operatorname{dom}f$. The subdifferential in the sense of convex analysis of $f$ at $\bar{x}$ denoted by $\partial f(\bar{x})$ is the set $$ \partial f(\bar{x})=\left\{x^{*} \in \mathbb{R}^{n}\mid f(x) \geq f(\bar{x})+\left\langle x^{*}, x-\bar{x}\right\rangle \ \forall x \in \mathbb{R}^{n}\right\}. $$ An element $x^{*} \in \partial f(\bar{x})$ is called a subgradient of $f$ at $\bar{x}$. \end{definition} \begin{remark}\label{eq.} Let $f: \mathbb{R}^{n} \rightarrow \mathbb{R} \cup\{+\infty\}$. Then, the following assertions hold: \begin{itemize} \item[(i)] $x^{*} \in \partial f(\bar{x})\ \text{if and only if}\ \left\langle x^{*}, \bar{x}\right\rangle=f(\bar{x})+f^{*}\left(x^{*}\right)$; \item[(ii)] $f(x)+f^{*}\left(x^{*}\right) \geq\left\langle x^{*}, x\right\rangle \ \ \ \forall x, x^{*} \in \mathbb{R}^{n}$ (the Young-Fenchel inequality); \item[(iii)] If $\operatorname{cl}f$ is proper and convex, then it holds that $f^{**}=\operatorname{cl}f$. \end{itemize} \end{remark} \begin{proposition}\label{inf-sum} Let $f_{1}, \ldots, f_{k}: \mathbb{R}^{n} \rightarrow \mathbb{R} \cup\{+\infty\}$ be proper convex functions. If the set $\displaystyle\bigcap_{i=1}^{k} \operatorname{ri}\left(\operatorname{dom}\left(f_{i}\right)\right)$ is nonempty, then $$ \left(\sum_{i=1}^{k} f_{i}\right)^{*}(p)=\left(f_{1}^{*} \square \ldots \square f_{k}^{*}\right)(p), $$ where for a nonempty subset $B$ of $\mathbb{R}^{n}$, $\text{ri} B$ denotes the relative interior of $B$, i.e., the interior of $B$ relative to the smallest affine set containing $B$, and $$\left(f_{1}^{*} \square \ldots \square f_{k}^{*}\right)(p):=\inf \left\{\sum_{i=1}^{k} f_{i}^{*}\left(p_{i}\right): p=\sum_{i=1}^{k} p_{i}\right\}$$ is the infimal convolution of $f_{1}^{*}, \ldots , f_{k}^{*}$, and for each $p \in \mathbb{R}^{n}$ the infimum is attained. \end{proposition} \begin{proposition}\label{cl2} Let $I$ be an index set, and let $\left\{f_{i}: i \in I\right\}$ be a family of proper convex functions, where $f_{i}:\mathbb{R}^{n} \rightarrow \mathbb{R} \cup\{+\infty\}$. Then the following statements hold: \begin{itemize} \item[(i)] $\operatorname{epi}\left(\underset{i \in I}{\sup}f_{i}\right)=\displaystyle\underset{i \in I}{\bigcap} \operatorname{epi}f_{i}$; \item[(ii)] $\left(\underset{i \in I}{\inf} f_{i}\right)^{*}=\underset{i \in I}{\sup} f_{i}^{*}$ and hence, $\operatorname{epi}\left(\underset{i \in I}{\inf } f_{i}\right)^{*}=\displaystyle\underset{i \in I}{\bigcap} \operatorname{epi} f_{i}^{*}$; \item[(iii)] If the set $I$ is finite and $\displaystyle\bigcap_{i=1}^{k} \operatorname{ri}\left(\operatorname{dom}\left(f_{i}\right)\right)$ is nonempty, then $$ \operatorname{epi}\left(\left(\sum_{i=1}^{k} f_{i}\right)^{*}\right)=\sum_{i=1}^{k} \operatorname{epi}\left(f_{i}^{*}\right) . $$ \end{itemize} \end{proposition} Finally, any point $p \in \mathbb{R}^{n}$ can be considered as a function on $\mathbb{R}^{n}$ in such a way that $p(x):=\langle p, x\rangle$, for any $x \in \mathbb{R}^{n} .$ Thus, for any $\alpha \in \mathbb{R}$ and any function $h: \mathbb{R}^{n}\rightarrow \bar{R}$, \begin{equation*}\label{eqr} \left( h+p+\alpha\right)^{\ast}\left( x^{\ast}\right)=h^{\ast}\left( x^{\ast}-p\right)-\alpha , \ \ \text{for all} \ x^{\ast}\in\mathbb{R}^{n} \end{equation*} and \begin{equation*}\label{eqr-1} \operatorname{epi}(h+p+\alpha)^{*}=\operatorname{epi} h^{*}+(p,-\alpha) . \end{equation*} For convenience, let $\theta\in\mathbb{R}^{m}$ and $M: \mathbb{R}^{n}\rightarrow \mathbb{R}^{m}$, the composition mapping $\theta \circ K$ will be denoted by $\theta M$. \section{Limitations of some existing theory} Consider the following lower level value reformulation of the bilevel optimization problem \eqref{calm0} under consideration: \begin{equation}\label{llvf}\tag{LLVF} \left\{\begin{array}{ll} {\displaystyle \min_{x, y}} \ F(x, y) \\ G_{i}(x,y) \leq 0 \quad & \forall i \in I_{k}=\{1,\dots,k\}, \\ g_{j}(x, y) \leq 0 & \forall j \in I_{p}=\{1,\dots,p\}, \\ f(x, y)-\varphi(x) \leq 0, \end{array}\right. \end{equation} where $\varphi$ denotes the optimal value function of the lower level problem \eqref{llp-1}: \[ \varphi(x):=\underset{y}\min~ \left\{ f(x,y)| \;\; y\in K(x)\right\}. \] \subsection{Convex optimization with only inequality constraints} In this subsection, we review some duality results for the mathematical programming problem \begin{equation}\label{w-o-ineq}\tag{P$_{in}$} \min \{\phi \left( x\right)|\; \psi\left( x\right) \leq 0\} \end{equation} with $\phi:\mathbb{R}^{n}\rightarrow\mathbb{R}$ and $\psi =\left( \psi_{1},\cdots ,\psi_{p}\right)^{T}:\mathbb{R}^{n} \rightarrow\mathbb{R}^{r}$, where $\psi\left( x\right) \leq 0$ means that $\psi_{i}\left( x\right) \leq 0$ for all $i\in I_{r}=\{1,\cdots ,r\}$. To define the dual problem of \eqref{w-o-ineq}, Bot et al. \cite{ref bot} considered a perturbation by using the following perturbation function: \begin{equation*} \Phi\left( x,\alpha ,\beta\right) = \left\{ \begin{array}{lcl} \phi\left( x+\alpha\right) & \ \ \ & \text{if} \ \psi\left( x\right) \leq \beta, \\ +\infty & \ \ \ & \text{otherwise}. \end{array} \right. \end{equation*} Afterwards, they calculate the conjugate of $\Phi$ and obtained the following Fenchel-Lagrange dual problem \begin{equation}\label{w-o-ineq-dual}\tag{D$_{in}$} {\displaystyle \sup_{\alpha^{\ast}\in\mathbb{R}^{n}, \ \beta^{\ast}\in\mathbb{R}_{+}^{r} } } \ \bigg\{ -\phi^{\ast} \left( \alpha^{\ast}\right) + {\displaystyle \inf_{x\in\mathbb{R}^{n}} } \ \left[ \langle \alpha^{\ast} ,x\rangle + \langle\beta^{\ast},\psi\left( x\right)\rangle \right] \bigg\}. \end{equation} In general, to deal with strong duality which charaterizes \eqref{w-o-ineq}, we need to use a Slater-type constraint qualification. \begin{definition} We say that the genaralized Slater constraint qualification holds for the familly $\left( \psi_{i}\right)_{i\in I_{r}}$ if there exist an element $\bar{x}\in \mathbb{R}^{n}$ such that $\psi_{i}\left(\bar{x}\right) < 0$ for $i\in N$ and $\psi_{i}\left(\bar{x}\right) \leq 0$ for $i\in L$, where $N$ and $L$ are respectively given as \begin{equation*} \begin{array}{ll} N=\{i\in I_{r} \ |& \text{such that} \ \psi_{i} \ \text{is not an affine function} \},\\ L=\{i\in I_{r} \ |& \text{such that} \ \psi_{i} \ \text{is an affine function} \}. \end{array} \end{equation*} \end{definition} \begin{theorem}[\cite{ref bot}]\label{f-l-convex} Let $\phi$ and $\psi_{i}$, with $i\in I_{r}$ be convex functions. Assume that the genaralized Slater constraint qualification holds for the familly $\left( \psi_{i}\right)_{i\in I_{r}}$. If ${\mathcal V} \eqref{w-o-ineq}$ is finite, then strong duality holds and ${\mathcal V} \eqref{w-o-ineq} ={\mathcal V} \eqref{w-o-ineq-dual}$. \end{theorem} Note that in \cite{ref bot} the Slater constraint qualification is used as a sufficient condition to establish strong Fenchel-Lagrange duality. Unfortunately, due to the presence of $f(x, y)-\varphi(x)$ as a constraint, problem \eqref{llvf}does not satisfy the Slater constraint qualification. Moreover, problem \eqref{llvf}is not convex, even if the data are convex. Hence, Theorem \ref{f-l-convex} is not applicable for problem \eqref{llvf}. However, there is a chance that some of these requirements can hold for the simple bilevel optimization problem (see, e.g., \cite{Shehu}), where the lower level problem is an unperturbed optimization problem. \subsection{Convex optimization with inequality and equality constraints} Next, we add some equality constraints to problem \eqref{w-o-ineq} to get the following primal optimization problem with equality constraints \begin{equation}\label{w-o-eq}\tag{P$_{eq}$} \min \{\phi \left( x\right) \ | \ \psi\left( x\right) \leq 0, \ \ \omega\left( x\right) =0\}, \end{equation} where $\omega :\mathbb{R}^{n}\rightarrow\mathbb{R}^{q}$ is an affine-linear mapping and $\phi :\mathbb{R}^{n}\rightarrow\mathbb{R}$ and $\psi :\mathbb{R}^{n}\rightarrow\mathbb{R}^{r}$ are two functions such that the function $\left( \phi ,\psi \right) :\mathbb{R}^{n}\rightarrow\mathbb{R}\times\mathbb{R}^{r}$ defined by $\left( \phi ,\psi \right)\left( x\right) =\left( \phi\left( x\right) ,\psi\left( x\right) \right)$ is convex-like with respect to the cone $\mathbb{R}_{+}\times\mathbb{R}_{+}^{r}$; that is, the set $\left( \phi ,\psi \right)\left( \mathbb{R}^{n}\right) +\left(\mathbb{R}_{+}\times\mathbb{R}_{+}^{r}\right) $ is convex. The Lagrange dual \eqref{w-o-eq-dual} of \eqref{w-o-eq} is defined as \begin{equation}\label{w-o-eq-dual}\tag{D$_{eq}$} {\displaystyle \sup_{\beta^{\ast}\in\mathbb{R}_{+}^{r}, \ \gamma^{\ast}\in\mathbb{R}^{q} } } \ {\displaystyle \inf_{x\in\mathbb{R}^{n}} } \ \bigg\{ \phi \left(x\right) + \langle \beta^{\ast}, \psi\left( x\right) \rangle+ \langle \gamma^{\ast}, \omega\left( x\right) \rangle \bigg\}. \end{equation} In \cite{refrev}, Bot et al. provide a strong duality theorem for \eqref{w-o-eq} and its Lagrange dual problem by using some regularity conditions expressed by means of the contingent cone. More precisely, they work under the assumption that $\bar{x}$ is an optimal solution of the primal problem \eqref{w-o-eq}. To clarify their concept, let us introduce the following sets: \begin{equation*} \begin{array}{lcl} \Sigma & = & \{x\in\mathbb{R}^{n} \ | \ \psi\left( x\right) \leq 0, \ \ \omega\left( x\right) =0 \}, \\ {\mathcal E} & = & \left( \phi\left( \bar{x}\right)-\phi\left( x\right) -\sigma , -\psi\left( x\right) -z,-\omega\left( x\right)\right) \ | \ x\in \mathbb{R}^{n}, \sigma \geq 0, \ z\in \mathbb{R}_{+}^{r} \}. \end{array} \end{equation*} We arrive now to the strong duality theorem stated in the latter reference. \begin{theorem}\label{f-l-convex-eq} If $\bar{x}$ is an optimal solution for \eqref{w-o-eq}, the following assertions are equivalent \begin{enumerate} \item The equality $T_{\mathcal E}\left(0,0_{\mathbb{R}^{r}},0_{\mathbb{R}^{q}} \right) \bigcap \left( \mathbb{R}_{+}^{\ast}\times 0_{\mathbb{R}^{r}}\times0_{\mathbb{R}^{q}}\right) =\emptyset$ holds; \item ${\mathcal V} \eqref{w-o-eq}={\mathcal V} \eqref{w-o-eq-dual}$ and there exists a point $(\bar{\beta}, \bar{\gamma}) \in \mathbb{R}_{+}^{r}\times \mathbb{R}^{q}$ that solves problem \eqref{w-o-eq-dual}. In this case, we have $\langle \bar{\beta} , \psi\left( \bar{x}\right) \rangle =0$. \end{enumerate} \end{theorem} We now analyze the applicability of this theorem to our bilevel program \eqref{calm0}. To proceed, we consider the classical optimistic bilevel programming problem with linear lower level problem \begin{equation}\label{GoodEx} \left\{\begin{array}{ll} {\displaystyle \min_{x, y}} \ F(x, y) \\ G_{i}(x,y) \leq 0 \quad \forall i \in I_{k}=\{1,\dots,k\}, \\ y\in S\left( x\right)=\arg\underset{y}{\min}~\left\{C^{T}y\,|\;\; Ax+By\leq d\right\}, \end{array}\right. \end{equation} where $d\in\mathbb{R}^{r}$, \ $C \in\mathbb{R}^{m}$, \ $A \in\mathbb{R}^{r}\times\mathbb{R}^{n}$ and $B \in\mathbb{R}^{r}\times\mathbb{R}^{m}$. Its lower level value function reformulation can be written as \begin{equation*} {\displaystyle \min_{x, y}} \ F(x, y) \ : \ G_{i}(x,y) \leq 0 \quad \forall i \in I_{k}=\{1,\dots,k\}, \ Ax+By\leq d, \ C^{T}y-\varphi\left( x\right) = 0, \end{equation*} where the lower-level value function $\varphi$ is given by \begin{equation*} \varphi\left( x\right) ={\displaystyle \min_{y}} \ \{ C^{T}y\,| \;\; Ax+By\leq d\}. \end{equation*} In general, even if the lower level problem is fully affine linear (i.e., in $(x,y)$), as it is the case here, the optimal value function $\varphi$ is not linear. This function is typically only be piecewise linear. Consequently, Theorem \ref{f-l-convex-eq} will rarely be applicable to a problem of the form \eqref{GoodEx}. \subsection{Nonconvex optimization with inequality and equality constraints} Now, we do not assume that \eqref{w-o-eq} has a solution, also no convexity assumption is imposed. Let us consider the Lagrangian \begin{equation*} L\left(\varsigma ,\beta ,\gamma , x\right)=\varsigma \phi\left( x\right)+\langle \beta ,\psi\left( x\right) \rangle +\langle \gamma ,\omega\left( x\right) \rangle. \end{equation*} It can be seen that for all $\varsigma \in\mathbb{R}_{+}$, $\beta\in\mathbb{R}_{+}^{r}$ and $\gamma\in\mathbb{R}^{q}$, it holds that \begin{equation*} {\displaystyle \inf_{x\in\mathbb{R}^{n}}} \ L\left(\varsigma ,\beta ,\gamma , x\right)\leq \varsigma {\mathcal V}\left( P_{eq}\right), \ \ \ \forall \varsigma \geq 0. \end{equation*} Consequently, weak duality is satisfied in this case. In \cite{fabin-12}, a strong Lagrange duality theorem for the nonconvex optimization problem \eqref{w-o-eq} and its dual is established under a suitable generalized Slater assumption. Based on this reference, it is said that the generalized Slater condition holds if \begin{equation}\label{cq-w-p-casnorm} \overline{cone} \ \bigg( \left(\psi ,\omega\right) \left( \mathbb{R}^{n}\right) + \mathbb{R}^{r}_{+}\times \{0_{\mathbb{R}^{q}}\}\bigg)= \mathbb{R}^{r}\times\mathbb{R}^{q}. \end{equation} To proceed we need the following construction \begin{equation*} \Psi \left( \mathbb{R}^{n}\right)=\bigg \{\left(\phi\left( x\right) ,\psi\left( x\right) ,\omega\left( x\right) \right)\in \mathbb{R}\times\mathbb{R}^{r}\times\mathbb{R}^{q} \ | \ x\in \mathbb{R}^{n} \bigg \}. \end{equation*} By assuming that a genaralized Slater condition holds together with an additional condition, the authors prove the following strong duality result. \begin{theorem}\label{fin-nonconvex} Consider problems \eqref{w-o-eq} and \eqref{w-o-eq-dual} and suppose that \begin{itemize} \item ${\mathcal V} \eqref{w-o-eq}$ is finite, \item $int \bigg ( \text{co} \left( \Psi \left( \mathbb{R}^{n}\right) \right)+ \left( \mathbb{R}_{+}\times\mathbb{R}_{+}^{r}\times\mathbb{R}^{q}\right) \bigg) \neq\emptyset$, \item the generalized Slater condition $\left( \ref{cq-w-p-casnorm}\right)$ holds. \end{itemize} Furthermore, let one of the following assumption hold: \begin{description} \item[$A\left( 1\right) $] $\text{cone} \ \bigg ( int \bigg [ \text{co} \left( \Psi \left( \mathbb{R}^{n}\right) \right)-{\mathcal V} \eqref{w-o-eq} \left( 1,0_{\mathbb{R}_{+}\times\mathbb{R}_{+}^{r}\times\mathbb{R}^{q}}\right) + \left( \mathbb{R}_{+}\times\mathbb{R}_{+}^{r}\times\mathbb{R}^{q}\right) \bigg] \bigg)$ is pointed, \item[$A\left( 2\right) $] $\left( 0,0\right) \notin int \bigg [ \text{co} \left( \Psi \left( \mathbb{R}^{n}\right) \right)-{\mathcal V} \eqref{w-o-eq} \left( 1,0_{\mathbb{R}_{+}\times\mathbb{R}_{+}^{r}\times\mathbb{R}^{q}}\right) + \left( \mathbb{R}_{+}\times\mathbb{R}_{+}^{r}\times\mathbb{R}^{q}\right) \bigg]$. \end{description} Then, $\mathcal{V} \eqref{w-o-eq}=\mathcal{V} \eqref{w-o-eq-dual}$ and there exists $\left(\bar{\beta},\bar{\gamma}\right)\in \mathbb{R}^{r}_{+}\times\mathbb{R}^{q}$ such that \begin{equation*} {\mathcal V} \eqref{w-o-eq}= {\displaystyle \inf_{x \in\mathbb{R}^{n}}} \ L\left(1,\bar{\beta} ,\bar{\gamma} ,x\right). \end{equation*} \end{theorem} Next, we do not assume that \eqref{calm0} has a solution, also no convexity assumption is imposed. Let us consider the Lagrangian \begin{equation*} L\left(\varsigma ,\alpha ,\beta ,\gamma , x,y \right)=\varsigma F(x,y)+\gamma(f(x, y)-\varphi(x)))+\sum_{i=1}^{k}\alpha_{i} G_{i}(x,y)+\sum_{j=1}^{p}\beta_{j} g_{j}(x,y) . \end{equation*} It can be seen that for all $\gamma\in\mathbb{R}$, $\alpha\in\mathbb{R}_{+}^{k}$ and $\beta\in\mathbb{R}_{+}^{p}$ \begin{equation*} {\displaystyle \inf_{\left( x,y\right) \in\mathbb{R}^{n}\times\mathbb{R}^{m}}} \ L\left(\varsigma ,\alpha ,\beta ,\gamma , x,y \right)\leq \varsigma {\mathcal V}\left( {\mathcal P}\right), \ \ \ \forall \varsigma \geq 0. \end{equation*} Consequently, the weak duality holds. To provide the strong duality, we need that the following Slater-type condition holds \begin{equation}\label{cq-w-p-cLLVF} \overline{cone} \ \bigg( \left( f-\varphi,G,g\right) \left( \mathbb{R}^{n}\times\mathbb{R}^{m}\right) + \{0_{\mathbb{R}}\}\times\mathbb{R}_{+}^{k+p}\bigg)= \mathbb{R}\times\mathbb{R}^{k+p}. \end{equation} Unfortunately, this is not the case, as any vector $\left( a,0_{\mathbb{R}^{k}},0_{\mathbb{R}^{p}}\right) $ with $a < 0$ does not belong to the set in the left-hand-side of equation $\left( \ref{cq-w-p-cLLVF}\right)$. Hence, Theorem \ref{fin-nonconvex} in not applicable for \eqref{calm0} problem. \section{Duality under partial calmness} To deal with the fact that most standard constraint qualifications do not hold for problem \eqref{calm0}, we are going to use the partial calmness concept in this section. To proceed, we use the reformulation \eqref{llvf}. Here, when $\mathcal{S}\left( x\right) =\emptyset$ for some $x\in \mathbb{R}^{n}$, we use the convention $\inf \emptyset = \infty$. In general, the value function $\varphi$ is not lower semicontinuous, even when the involved functions satisfy this property. To ensure the validity of the lower semicontinuity of $\varphi$, we will impose some additional conditions on the set-valued map $K$; see \cite[Lemma 3.2]{GauvinDubeau1982} for relevant details. To proceed here, we consider the perturbed version of problem \eqref{llvf} linearly parameterized by $t\in \mathbb{R}$: \begin{equation}\label{calm1}\tag{P[$t$]} \left\{ \begin{array}{l} {\displaystyle \min_{x,y}} \ \ F\left( x,y\right)\\ f\left( x,y\right)-\varphi \left( x\right)+t=0,\\ G_{i}\left( x,y\right)\leq 0 \ \ \text{for all} \ i\in I_{k}=\{1,\cdots ,k\}, \\ g_{j}\left( x,y\right)\leq 0 \ \ \text{for all} \ j\in I_{p}=\{1,\cdots ,p\}. \end{array} \right. \end{equation} \begin{definition}\cite{ye} we say that the unperturbed problem \eqref{llvf} is partially calm at its feasible solution $\left( \bar{x},\bar{y}\right)$ if there is a constant $\lambda > 0$ and a neighborhood $U$ of the triple $\left( \bar{x},\bar{y},0\right)\in {\mathbb{R}}^{n}\times {\mathbb{R}}^{m}\times \mathbb{R}$ such that \begin{equation}\label{calm1100} F\left( x,y\right)-F\left( \bar{x},\bar{y}\right)+\lambda \mid t\mid \geq 0 \ \ \ \text{for all} \ \ \left( x,y,t\right)\in U \ \ \text{feasible to} \ \eqref{calm1}. \end{equation} \end{definition} In the next lemma, we show that the partial calmness to the bilevel program \eqref{llvf} allows us an exact penalization of these problem. In fact, the upper-level objective in our problem may be neither Lipschitz nor continuous, conditions that are usually imposed in existing results; see, e.g., \cite{ye}. However, we formulate another proof here when the upper-level objective function is only lower semi-continuous. \begin{lemma}\label{calm2} Let $\left( \bar{x},\bar{y}\right)$ be a partially calm local optimal solution to the bilevel program \eqref{llvf}, and let the upper-level objective function $F$ be lower semi-continuous at this point. Then $\left( \bar{x},\bar{y}\right)$ is a local optimal solution to the penalized problem \begin{equation}\label{calm3}\tag{P$^{\lambda}$} \left\{ \begin{array}{l} {\displaystyle \min_{x,y}} \ \ \lambda^{-1}F\left( x,y\right)+ f\left( x,y\right)-\varphi\left( x\right) \\ G_{i}\left( x,y\right)\leq 0 \ \ \text{for all} \ i\in I_{k}=\{1,\cdots ,k\}, \\ g_{j}\left( x,y\right)\leq 0 \ \ \text{for all} \ j\in I_{p}=\{1,\cdots ,p\}, \end{array} \right. \end{equation} where $\lambda > 0$ is the constant from the partial calmness condition $\left( \ref{calm1100}\right)$. \end{lemma} \begin{proof} Suppose that $\left( \bar{x},\bar{y}\right)$ is a partially calm local optimal solution to the bilevel program \eqref{llvf} whereas $\left( \bar{x},\bar{y}\right)$ is not a local solution of \eqref{calm3}. First, we can find a parameter $\lambda > 0$ and a neighborhood $U$ of the triple $\left( \bar{x},\bar{y},0\right)\in {\mathbb{R}}^{n}\times {\mathbb{R}}^{m}\times \mathbb{R}$ such that \begin{equation}\label{sci1} F\left( x,y\right)-F\left( \bar{x},\bar{y}\right)+\lambda \mid t\mid \geq 0 \ \ \ \text{for all} \ \ \left( x,y,t\right)\in U \ \ \text{feasible to} \eqref{calm1}. \end{equation} Secondly, there is a sequence $\left( x_{k},y_{k}\right)\in \mathbb{R}^{n}\times\mathbb{R}^{m}$ feasible to \eqref{calm3} and satisfying $\left( x_{k},y_{k}\right)\rightarrow \left( \bar{x},\bar{y}\right)$ such that \begin{equation}\label{sci2} F\left( x_{k},y_{k}\right)+ \lambda \left( f\left( x_{k},y_{k}\right)-\varphi\left( x_{k}\right) \right) < F\left( \bar{x},\bar{y}\right) \end{equation} for all $k\in \mathbb{N}$. It follows from the lower semi-continuity of $F$ at $\left( \bar{x},\bar{y}\right)$ that there is $\delta > 0$ and $\eta > 0$ such that $V=\left[ \left( \bar{x},\bar{y}\right)+ \eta \mathbb{B}_{\mathbb{R}^{n}\times \mathbb{R}^{m}}\right] \times \left( -\delta , \delta \right) \subset U$ and that the inequality \begin{equation}\label{sci3} -\lambda \delta \leq F\left( x,y\right)- F\left( \bar{x},\bar{y}\right) \end{equation} holds true for all $\left( x,y\right)\in \left( \bar{x},\bar{y}\right)+ \eta \mathbb{B}_{\mathbb{R}^{n}\times \mathbb{R}^{m}}$. Setting $t_{k}=f\left( x_{k},y_{k}\right)-\varphi\left( x_{k}\right)$, two cases have to be considered.\\ \hspace*{.5cm}Case I. Let $\left( \left( x_{k},y_{k}\right), t_{k}\right) \in V$; then combining $\left( \ref{sci1}\right)$ and $\left( \ref{sci2}\right)$, it follows that \begin{equation*} 0\leq F\left( x_{k},y_{k}\right)-F\left( \bar{x},\bar{y}\right)+ \lambda \left( f\left( x_{k},y_{k}\right)-\varphi\left( x_{k}\right) \right) < 0. \end{equation*} However, this is a contradiction.\\ \hspace*{.5cm}Case II. Assume that $\left( \left( x_{k},y_{k}\right), t_{k}\right) \notin V$. On the one side, since for $k$ large enough $\left( x_{k},y_{k}\right)\in \left( \bar{x},\bar{y}\right)+ \eta \mathbb{B}_{\mathbb{R}^{n}\times \mathbb{R}^{m}}$, one has $t_{k}\notin \left( -\delta , \delta \right)$. On the other side, by definition of optimal value function, we have $t_{k}\geq 0$. Consequently, $t_{k}\geq \delta$. Hence, by $\left( \ref{sci3}\right)$, \begin{equation} -\lambda \left( f\left( x_{k},y_{k}\right)-\varphi\left( x_{k}\right)\right) \leq F\left( x,y\right)- F\left( \bar{x},\bar{y}\right). \end{equation} Combining the latter with $\left( \ref{sci2}\right)$, one obtains \begin{equation*} 0\leq F\left( x_{k},y_{k}\right)-F\left( \bar{x},\bar{y}\right)+ \lambda \left( f\left( x_{k},y_{k}\right)-\varphi\left( x_{k}\right) \right) < 0. \end{equation*} However, this inequality is a contradiction. Since both cases lead to a contradiction, we conclude that $\left( \bar{x},\bar{y}\right)$ is a local optimal solution to the penalized problem \eqref{calm3}. \end{proof} If further assumptions are made about the marginal map $\varphi$ and the lower level objective function, the converse result may be obtained. \begin{lemma} Let $\left( \bar{x},\bar{y}\right)$ with $\bar{y}\in {\mathcal S}\left( \bar{x}\right)$ be a local optimal solution of the penalized problem \eqref{calm3} for some $\lambda >0$. Assume that $f$ and $\varphi$ are locally Lipschitz near $\left( \bar{x},\bar{y}\right)$ with Lipschitz modulus $k_{f}$ and $k_{\varphi}$, respectively. Then, $\left( \bar{x},\bar{y}\right)$ is a partially calm local optimal solution to the bilevel optimization problem \eqref{llvf}. \end{lemma} \begin{proof} Suppose that there exists $\lambda > 0$ such that $\left( \bar{x},\bar{y}\right)$ solves \eqref{calm3} locally. Then, there exists $\delta > 0$ such that we have the inequality \begin{equation}\label{sci4} \lambda^{-1}F\left( \bar{x},\bar{y}\right) \leq \lambda^{-1} F\left( x,y\right)+f\left( x,y\right)-\varphi\left( x\right). \end{equation} for all $\left( x,y \right)\in \left( \bar{x},\bar{y}\right) + \delta \mathbb{B}_{\mathbb{R}^{n}\times \mathbb{R}^{m}}$ feasible for problem \eqref{calm3}. Let us set $\eta =\left\{\frac{\delta}{2k_{f}};\frac{\delta}{2k_{\varphi}};\frac{\delta}{2}\right\}$. Then, for any point $\left( x,y\right)\in \left( \bar{x},\bar{y}\right) + \eta \mathbb{B}_{\mathbb{R}^{n}\times \mathbb{R}^{m}}$ feasible for problem \eqref{calm3}, one has \begin{equation*} \begin{array}{lcl} \mid f\left( x,y\right)-\varphi\left( x\right) \mid & = & \mid f\left( x,y\right)-f\left( \bar{x},\bar{y}\right)+\varphi\left( \bar{x}\right) - \varphi\left( x\right) \mid \\ & \leq & k_{f}\parallel \left(x,y\right)- \left( \bar{x},\bar{y}\right)\parallel + k_{\varphi}\parallel x-\bar{x}\parallel\\ & \leq & \delta . \end{array} \end{equation*} Thus, setting $t= \varphi\left( x\right)-f\left( x,y\right)$, the local optimality $\left( \ref{sci4}\right)$ leads to \begin{equation*} F\left( x,y\right)-F\left( \bar{x},\bar{y}\right)+\lambda \mid t\mid \geq 0. \end{equation*} The latter inequality is valid for all $\left( x,y\right)\in \bigg (\left( \bar{x},\bar{y}\right) + \eta \mathbb{B}\bigg )\times \left( -\delta ;\delta\right)$ feasible to \eqref{calm1}. Consequently, $\left( \bar{x},\bar{y}\right)$ is a partially calm local optimal solution to the bilevel program \eqref{llvf}. \end{proof} \subsection{Duality under the Slater condition} In this subsection, we aim to describe the dual problem of the auxiliary problem \eqref{calm3} and characterize the corresponding assumption. To proceed, let the feasible set of problem \eqref{calm3} be denoted by \begin{equation}\label{eq4} \mathcal{S} =\Big\{(x,y)\in\mathbb{R}^{n}\times\mathbb{R}^{m}: G_{i}(x,y)\leqslant 0, \ i\in I_{k}, \ \ g_{j}(x,y)\leqslant 0, \ j\in I_{p} \Big\}, \end{equation} and consider the following dual of problem \eqref{calm3}: \begin{equation}\label{weak-problem-stand}\tag{D$^{\lambda}$} \begin{array}{l} \underset{x^{*} \in \operatorname{dom} \varphi^{*}}{\inf} \underset{ \underset{ \alpha\in\mathbb{R}_{+}^{k}, \beta\in\mathbb{R}_{+}^{p},} {\left( z^{*},q^{\ast}\right) \in \mathbb{R}^{n}\times\mathbb{R}^{m}} }{\sup} \bigg\{ \varphi^{*}\left(x^{*}\right)-(\lambda^{-1}F+ f)^{*}\left( z^{\ast}, q^{\ast}\right)\\ \qquad \qquad \qquad \qquad \qquad \qquad -\left.\left( \displaystyle\sum_{i=1}^{k} \ \alpha_{i} G_{i} +{\displaystyle \sum_{j=1}^{p}} \ \beta_{j}g_{j}\right)^{\ast}\left(x^{\ast}-z^{*},-q^{\ast}\right)\right\}. \end{array} \end{equation} Next, we will characterize the weak and strong duality between \eqref{calm3} and \eqref{weak-problem-stand}. To proceed, and from here on, we denote by $G=\left(G_{1},\cdots ,G_{k}\right)^{T}$ and $g=\left(g_{1},\cdots ,g_{p} \right)^{T}$ the vectors of upper and lower constraint functions, respectively. \begin{theorem}\label{weak-slater-op} Consider problem \eqref{calm3} for some $\lambda > 0$ and let $F,f, g_{1}, \ldots, g_{p},G_{1}, \ldots, G_{k}$ be extended real-valued convex functions on $\mathbb{R}^{n}\times\mathbb{R}^{m}$. Assume that $\varphi$ is lower semicontinuous. Then, the weak duality between \eqref{calm3} and \eqref{weak-problem-stand} holds. Thus is, ${\mathcal V} \eqref{calm3} \geq {\mathcal V} \eqref{weak-problem-stand}$. \end{theorem} \begin{proof} Since $\varphi$ is lower semicontinuous, then the equality $\varphi^{**}(x)=\varphi(x)$ holds. Following the standard convexification technique, and using the Fenchel equality for $\varphi(\cdot )$, stated in $(\ref{eq.dual})$, the problem \eqref{calm3} takes the following form \begin{equation}\label{eq6}\tag{P$^{\lambda}$} \underset{ x^{*} \in \operatorname{dom} \varphi^{*} } {\inf} \ \ \underset{ \underset{G\left( x,y\right) \leq 0, \ g\left( x,y\right) \leq 0} {(x,y) \in \mathbb{R}^{n}\times\mathbb{R}^{m}}} { \inf} \bigg\{ \lambda^{-1} F(x,y)+ f(x,y)+ \varphi^{*}\left(x^{*}\right)-\left\langle x^{*}, x\right\rangle\bigg\}. \end{equation} For any $x^{*}\in\operatorname{dom}\varphi^{*}$ consider \begin{equation}\label{parametre}\tag{P$^{\lambda}\left( x^{\ast}\right) $} \underset{ \underset{G\left( x,y\right) \leq 0, \ g\left( x,y\right) \leq 0} {(x,y) \in \mathbb{R}^{n}\times\mathbb{R}^{m}}} { \inf} \bigg\{ \lambda^{-1} F(x,y)+ f(x,y)+ \varphi^{*}\left(x^{*}\right)-\left\langle x^{*}, x\right\rangle\bigg\}, \end{equation} and set $\phi \left( x,y\right) =\lambda^{-1} F(x,y)+ f(x,y)+ \varphi^{*}\left(x^{*}\right)-\left\langle x^{*}, x\right\rangle$. Since, \eqref{parametre} is a convex optimisation problem, its Fenchel-Lagrange duality reformulation is \begin{equation}\label{w-calm-1}\tag{D$^{\lambda}\left(x^{\ast}\right)$} \begin{array}{ll} \underset{ \underset{ \left( s^{*},q^{\ast}\right) \in\mathbb{R}^{n}\times\mathbb{R}^{m} } {\alpha\in\mathbb{R}_{+}^{k}, \beta\in\mathbb{R}_{+}^{p}} }{\sup} & \bigg\{ -\phi^{\ast}\left( s^{*}, q^{\ast}\right)- \left( \displaystyle\sum_{i=1}^{k} \ \alpha_{i} G_{i}+{\displaystyle \sum_{j=1}^{p}} \ \beta_{j} g_{j}\right)^{\ast}\left(-s^{*},-q^{*}\right) \bigg\}. \end{array} \end{equation} On the other hand, by an easy calculation, using the equality stated in $\left( \ref{eqr}\right)$ together with the definition of conjugate function, we get \begin{equation}\label{w-calm-3} \phi^{\ast}\left( s^{*}, q^{\ast}\right)= \left( \lambda^{-1} F+ f\right)^{\ast}\left( s^{*}+x^{\ast}, q^{\ast}\right)-\varphi^{\ast}\left( x^{\ast}\right). \end{equation} Inserting $\left( \ref{w-calm-3}\right)$ in \eqref{w-calm-1}, we obtain \begin{equation}\tag{D$^{\lambda}\left(x^{\ast}\right)$} \begin{array}{l} \underset{ \underset{ \alpha\in\mathbb{R}_{+}^{k}, \beta\in\mathbb{R}_{+}^{p}, } {\left( s^{*},q^{\ast}\right) \in \mathbb{R}^{n}\times\mathbb{R}^{m}} }{\sup} \left\{ \varphi^{*}\left(x^{*}\right)-(\lambda^{-1}F+ f)^{*}\left( s^{*}+x^{\ast}, q^{\ast}\right)\right.\\ \qquad\quad \qquad \qquad \qquad \quad -\left.\left( \displaystyle\sum_{i=1}^{k} \ \alpha_{i} G_{i} +{\displaystyle \sum_{j=1}^{p}} \ \beta_{j}g_{j}\right)^{\ast}\left(-s^{*},-q^{\ast}\right)\right\}. \end{array} \end{equation} Setting $z^{\ast}=s^{\ast}+x^{\ast}$, it follows \begin{equation}\tag{D$^{\lambda}\left(x^{\ast}\right)$} \begin{array}{l} \underset{ \underset{ \alpha\in\mathbb{R}_{+}^{k}, \beta\in\mathbb{R}_{+}^{p},} {\left( z^{*},q^{\ast}\right) \in \mathbb{R}^{n}\times\mathbb{R}^{m}} }{\sup} \left\{ \varphi^{*}\left(x^{*}\right)-(\lambda^{-1}F+ f)^{*}\left( z^{\ast}, q^{\ast}\right)\right.\\ \qquad \quad \qquad \qquad \qquad \quad -\left.\left( \displaystyle\sum_{i=1}^{k} \ \alpha_{i} G_{i} + {\displaystyle \sum_{j=1}^{p}} \ \beta_{j}g_{j}\right)^{\ast}\left(x^{\ast}-z^{*},-q^{\ast}\right)\right\}. \end{array} \end{equation} Taking the infimum of \eqref{w-calm-1} for $x^{*}\in\operatorname{dom} \varphi^{*}$, we arrived at the desired dual problem \eqref{weak-problem-stand}. Finally, from \cite[Proposition 2.1]{ref10}, we have ${\mathcal V} \eqref{calm3} \geq {\mathcal V} \eqref{weak-problem-stand}$. \end{proof} In general, the strong duality between \eqref{calm3} and \eqref{weak-problem-stand} does not necessarily hold. To overcome this situation, we introduce the following slater CQ: \begin{equation}\label{cq-slater-lambda} \left\{ \begin{array}{l} \exists (\bar{x},\bar{y}) \in \text{dom} \ \left( \lambda^{-1}F +f\right) \cap {\displaystyle \bigcap^{k}_{i=1}} \ \text{ri} \left( \text{dom} G_{i}\right) \cap {\displaystyle \bigcap^{p}_{j=1}} \ \text{ri} \left( \text{dom} g_{j}\right) \, \text{such that} \\ g_{j}(\bar{x},\bar{y})<0, \ \ \forall j=1,\dots,p,\\ G_{i}(\bar{x},\bar{y})<0, \ \ \forall i=1,\dots,k. \end{array} \right. \end{equation} The following result provides a strong duality result for problem \eqref{calm3}. \begin{theorem} Let $F$, $f$, $g_{1}$, \ldots, $g_{p}$, and $G_{1}$, \ldots, $G_{k}$ be extended real-valued convex functions on $\mathbb{R}^{n}\times\mathbb{R}^{m}$. Moreover, suppose that CQ $\left( \ref{cq-slater-lambda}\right)$ is satisfied and $\varphi$ is lower semicontinuous. Then, the strong duality between \eqref{calm3} and \eqref{weak-problem-stand} holds; that is, ${\mathcal V} \eqref{calm3}={\mathcal V} \eqref{weak-problem-stand}$ and the dual problem \eqref{weak-problem-stand} has an optimal solution. \end{theorem} \begin{proof} Let $x^{\ast}\in \text{dom} \ \varphi^{\ast}$ and consider the following problem \begin{equation}\label{strong-wc-p}\tag{P$^{\lambda}\left(x^{\ast} \right)$} \left\{ \begin{array}{ll} \inf \ \lambda^{-1}F(x, y)+ f(x, y)- \langle x,x^{\ast}\rangle +\varphi^{\ast}(x^{\ast}) & \\ \left( x,y\right) \in \mathbb{R}^{n}\times\mathbb{R}^{m} &\\ G_{i}(x,y)\leqslant 0 \quad \quad i=1,\dots,k, &\\ g_{j}(x,y)\leqslant 0 \quad \quad j=1,\dots,p. \end{array} \right. \end{equation} Since, $\lambda^{-1}F(x, y)+ f(x, y)- \langle x,x^{\ast}\rangle +\varphi^{\ast}(x)$, $G_{i}$, with $i=1,\dots,k$ and $g_{j}$ with $j=1,\dots,p$, are convex and proper we can introduce the Lagrange dual problem to \eqref{strong-wc-p} with $\alpha =\left( \alpha_{1},\cdots ,\alpha_{k}\right)\in \mathbb{R}_{+}^{k}$ and $\beta =\left( \beta_{1},\cdots ,\beta_{k}\right)\in \mathbb{R}_{+}^{p}$ as dual variables \begin{equation}\label{strong-wc-d}\tag{D$^{\lambda}\left(x^{\ast} \right)$} \begin{array}{ll} \underset{\left( \alpha ,\beta\right) \in \mathbb{R}_{+}^{k}\times\mathbb{R}_{+}^{p}}{\sup}\underset{\left( x,y\right) \in \mathbb{R}^{n}\times\mathbb{R}^{m}}{\inf} & \bigg\{ \lambda^{-1}F(x, y)+ f(x, y)- \langle x,x^{\ast}\rangle \\ & +\varphi^{\ast}(x^{\ast})+\displaystyle\sum_{i=1}^{k} \alpha_{i} G_{i}\left(x,y\right) +\sum_{j=1}^{p} \beta_{j}g_{j}\left(x,y\right) \bigg\}. \end{array} \end{equation} Since \eqref{cq-slater-lambda} condition holds, it follows from \cite[Theorem 28.2]{rock1} that the optimal objective values of \eqref{strong-wc-p} and its Lagrange dual \eqref{strong-wc-d} are equal and, moreover, there exists an optimal solution $\bar{\alpha} =\left( \bar{\alpha}_{1},\cdots ,\bar{\alpha}_{k}\right)\in \mathbb{R}_{+}^{k}$ and $\bar{\beta} =\left( \bar{\beta}_{1},\cdots ,\bar{\beta}_{k}\right)\in \mathbb{R}_{+}^{p}$ of \eqref{strong-wc-d} such that \begin{equation*} \begin{array}{lcl} {\mathcal V} \eqref{strong-wc-p} & = & \underset{\left( \alpha ,\beta\right) \in \mathbb{R}_{+}^{k}\times\mathbb{R}_{+}^{p}}{\sup}\underset{\left( x,y\right) \in \mathbb{R}^{n}\times\mathbb{R}^{m}}{\inf} \bigg\{ \lambda^{-1}F(x, y)+ f(x, y)- \langle x,x^{\ast}\rangle \\ & & +\varphi^{\ast}(x^{\ast})+\displaystyle\sum_{i=1}^{k} \alpha_{i} G_{i}\left(x,y\right)+\sum_{j=1}^{p} \beta_{j}g_{j}\left(x,y\right) \bigg\} \\ & = & \underset{\left( x,y\right) \in \mathbb{R}^{n}\times\mathbb{R}^{m}}{\inf} \left\{ \lambda^{-1}F(x, y)+ f(x, y)\right.\\ & & \qquad \qquad \qquad - \left.\langle x,x^{\ast}\rangle +\varphi^{\ast}(x)+\displaystyle\sum_{i=1}^{k} \bar{\alpha}_{i} G_{i}\left(x,y\right)+\sum_{j=1}^{p} \bar{\beta}_{j}g_{j}\left(x,y\right)\right\}. \end{array} \end{equation*} Since, $\text{dom} \ \left( \lambda^{-1}F+ f- \langle \cdot ,x^{\ast}\rangle +\varphi^{\ast}(x^{\ast})\right) =dom \left(\lambda^{-1}F+f\right)$, we have $${\displaystyle \bigcap^{k}_{i=1}} \ \text{ri} \ dom \left(G_{i}\right)\cap {\displaystyle \bigcap^{p}_{j=1}} \ \text{ri} \ dom \left(g_{j}\right) \cap \text{ri} \ dom \left(\lambda^{-1}F+f\right) \neq\emptyset .$$ Hence, one can deduce from Proposition \ref{inf-sum} that \begin{equation*} \begin{array}{lcl} {\mathcal V} \eqref{strong-wc-p} & = & -\bigg( \lambda^{-1}F+ f- x^{\ast} +\varphi^{\ast}(x^{\ast})+\displaystyle\sum_{i=1}^{k} \bar{\alpha}_{i} G_{i}+\sum_{j=1}^{p} \bar{\beta}_{j}g_{j} \bigg)^{\ast}\left( 0\right) \\ & = & \underset{ \left( s^{\ast} ,q^{\ast}\right) \in \mathbb{R}^{n}\times\mathbb{R}^{m}} {\sup} \bigg\{ \varphi^{*}\left(x^{*}\right)-(\lambda^{-1}F+ f)^{*}\left( s^{*}+x^{\ast},q^{\ast}\right)\\ & & \qquad \qquad \qquad \quad-\left.\left( {\displaystyle\sum_{i=1}^{k}} \ \bar{\alpha}_{i} G_{i} +{\displaystyle \sum_{j=1}^{p}} \bar{\beta}_{j}g_{j}\right)^{\ast}\left(-s^{\ast},-q^{\ast}\right)\right\} \end{array} \end{equation*} and that there exists $\bar{s}^{\ast}\in\mathbb{R}^{n}$ and $\bar{q}^{\ast}\in\mathbb{R}^{m}$ such that the supremum is attained. Thus \begin{equation*} {\mathcal V} \eqref{strong-wc-p} = \varphi^{*}\left(x^{*}\right)-(\lambda^{-1}F+ f)^{*}\left( \bar{s}^{\ast}+x^{\ast},\bar{q}^{\ast}\right)-\left( {\displaystyle\sum_{i=1}^{k}} \ \bar{\alpha}_{i} G_{i} +{\displaystyle \sum_{j=1}^{p}} \bar{\beta}_{j}g_{j}\right)^{\ast}\left(-\bar{s}^{\ast},-\bar{q}^{\ast}\right). \end{equation*} Setting $\bar{z}^{\ast}=\bar{s}^{\ast}+x^{\ast}$, we get \begin{equation*} {\mathcal V} \eqref{strong-wc-p} = \varphi^{*}\left(x^{*}\right)-(\lambda^{-1}F+ f)^{*}\left( \bar{z}^{\ast},\bar{q}^{\ast}\right)-\left( {\displaystyle\sum_{i=1}^{k}} \ \bar{\alpha}_{i} G_{i} +{\displaystyle \sum_{j=1}^{p}} \bar{\beta}_{j}g_{j}\right)^{\ast}\left(x^{\ast}-\bar{z}^{\ast},-\bar{q}^{\ast}\right). \end{equation*} Hence, ${\mathcal V} \eqref{strong-wc-p}={\mathcal V} \eqref{strong-wc-d}$. Since, $x^{\ast}$ is arbitrary choosen in $\text{dom} \ \varphi^{\ast}$, then ${\mathcal V} \eqref{calm3}={\mathcal V} \eqref{weak-problem-stand}$. Thus, the proof is complete. \end{proof} \begin{remark} To compute the conjugate function of the value fucntion $\varphi$, we asume that $K\left( x\right)$ is independent of $x$. Thus, we set for all $x\in\mathbb{R}^{n}$, $K\left( x\right) =K=\{y\in\mathbb{R}^{m}|g\left( y\right) \leq 0\}$. Then \begin{equation*} \varphi^{*}(x^{*}) = (f+\delta_{\mathbb{R}^{n}\times K})^{*}(0,x^{*}) \end{equation*} and we have $$ \begin{aligned} \varphi^{*}(x^{*}) &=\underset{x\in\mathbb{R}^{n}}{\sup} \bigg\{\langle x^{*}, x\rangle-\varphi(x): x \in \mathbb{R}^{n}\bigg\} \\ &=\underset{x\in\mathbb{R}^{n}}{\sup}\bigg\{\langle x^{*}, x\rangle-\underset{y\in\mathbb{R}^{m}}{\inf}\Big\{ f(x, y)+\delta_{K}(y)\Big\}\bigg\} \\ &=\underset{x\in\mathbb{R}^{n}}{\sup} \ \underset{y\in\mathbb{R}^{m}}{\sup}\bigg\{\langle x^{*}, x\rangle-f(x,y)-\delta_{K}(y)\bigg\}\\ &=\underset{x,y}{\sup} \bigg\{\langle x^{*}, x\rangle-(f(x, y)+\delta_{K}(y))\bigg\} \\ & =(f+\delta_{\mathbb{R}^{n}\times K})^{*}(0,x^{*}). \end{aligned} $$ \end{remark} \subsection{Duality under further regularity conditions} In general, weak duality (hence, strong duality) between \eqref{calm3} and \eqref{weak-problem-stand} does not necessarily hold in many situations, including when $\varphi$ is not lower semicontinuous. In order to describe the weak and strong dualities between \eqref{calm3} and \eqref{weak-problem-stand}, we modify the structure of the dual problem as \begin{equation}\label{form-3}\tag{D$_{m}^{\lambda}$} \underset{x^{*} \in \operatorname{dom} \varphi^{*}}{\inf} \underset{\underset{\alpha\in\mathbb{R}_{+}^{k}, \beta\in\mathbb{R}_{+}^{p}, \left( z^{*},q^{\ast}\right) \in\operatorname{dom}F^{*}\cap\operatorname{dom}f^{*}}{(u_{i}^{*},v_{i}^{*})\in\operatorname{dom}G_{i}^{*}, (u_{j}^{*},v_{j}^{*})\in\operatorname{dom}g_{j}^{*}} }{\sup} \bigg\{ \psi \left(x^{\ast},z^{\ast},q^{\ast}, u^{\ast},v^{\ast},\alpha ,\beta\right) -\delta^{\ast}_{\mathbb{R}^{n}\times\mathbb{R}^{m}} \left( s^{\ast},t^{\ast}\right) \bigg\}, \end{equation} where $\psi$ is given by \begin{equation*} \psi \left(x^{\ast},z^{\ast},q^{\ast}, u^{\ast},v^{\ast},\alpha ,\beta\right) =\varphi^{*}\left(x^{*}\right)-(\lambda^{-1}F+ f)^{*}\left( z^{*},q^{\ast}\right)-\displaystyle\sum_{i=1}^{k} \alpha_{i} G_{i}^{*}\left(u_{i}^{*},v_{i}^{*}\right)-\sum_{j=1}^{p} \beta_{j}g_{j}^{*}\left(u_{j}^{*},v_{j}^{*}\right) \end{equation*} with $u^{\ast}=\left( u^{\ast}_{1},\cdots ,u^{\ast}_{k+p}\right)$, $v^{\ast}=\left( v^{\ast}_{1},\cdots ,v^{\ast}_{k+p}\right) $, $s^{\ast}= x^{\ast}- z^{*}-\displaystyle\sum_{i=1}^{k} \alpha_{i} u_{i}^{*}-\sum_{j=1}^{p} \beta_{j}u_{j}^{*}$, \\ and $t^{\ast}=-q^{\ast}-\displaystyle\sum_{i=1}^{k} \alpha_{i} v_{i}^{*}-\sum_{j=1}^{p} \beta_{j} v_{j}^{*}$. Secondly, we exploit the semi-further regularity condition (SFRC) and further regularity condition (SFRC) introduced in \cite{ref3}. The cited regularities are based on the properties of the epigraph of the conjugate functions. To proceed, we set \begin{equation*} \Gamma =\operatorname{epi}(\lambda^{-1}F+f)^{*}, \ \ \Lambda =\underset{i=1}{\bigcup^{k}}\operatorname{epi}G_{i}^{*}, \ \ \Pi =\underset{j=1}{\bigcup^{p}}\operatorname{epi}g_{j}^{*}. \end{equation*} Based on the above sets, we introduce the characteristic set ${\mathcal{R}}$ defined by \begin{equation*} {\mathcal{R}}:=\underset{x^{*}\in\operatorname{dom}{\varphi}^{*}}{\bigcap} \big( \Gamma + \operatorname{cone}\left( \Lambda \cup \Pi\right) +\operatorname{epi}\delta^{*}_{\mathbb{R}^{n}\times\mathbb{R}^{m} }-\left(\left( x^{*},0\right) ;{\varphi}^{*}(x^{*})\right) \big). \end{equation*} Considering the possible relationships between $\mathcal{R}$ and $\operatorname{epi}\left(\lambda^{-1}F+f-\varphi+\delta_{\mathcal{\mathcal{S}}}\right)^{*}$, we introduce the following regularity conditions. \begin{definition} The family $\left((\lambda^{-1}F+f),\varphi,\delta_{\mathbb{R}^{n}\times\mathbb{R}^{m}},G_{i},g_{j}\right)$, $i=1,\dots,k$, $j=1,\dots,p $ is said to satisfy the semi-further regularity condition (SFRC) if \begin{equation}\label{sfrc-def} \mathcal{R}\cap\left(\{0_{\mathbb{R}^{n}\times\mathbb{R}^{m}}\}\times\mathbb{R}\right)\subseteq\operatorname{epi}\left(\lambda^{-1}F+f-\varphi+\delta_{\mathcal{S}}\right)^{*}\cap\left(\{0_{\mathbb{R}^{n}\times\mathbb{R}^{m}}\}\times\mathbb{R}\right). \end{equation} If equality holds in $\left( \ref{sfrc-def}\right) $, we say that the further regularity condition (FRC) is satisfied. \end{definition} \begin{remark} In general, neither \begin{equation*} \mathcal{R}\subseteq\operatorname{epi}\left((\lambda^{-1}F+f)-\varphi+\delta_{\mathcal{S}}\right)^{*} \ \ \text{nor} \ \ \operatorname{epi}\left((\lambda^{-1}F+f)-\varphi+\delta_{\mathcal{S}}\right)^{*} \subseteq\mathcal{R} \end{equation*} holds necessarily. But when $\varphi$ is convex, one gets \begin{equation*} \mathcal{R}\subseteq\operatorname{epi}\left((\lambda^{-1}F+f)-\varphi+\delta_{\mathcal{S}}\right)^{*}. \end{equation*} Indeed, by Proposition $\ref{cl2}\ (ii)$, we have \begin{equation*} \begin{aligned} \left((\lambda^{-1}F+f)-\varphi\right)^{*}&=\left(\inf\left(\lambda^{-1}F+f-x^{*}+{\varphi}^{*}(x^{*})\right) \right)^{*}\\ &= \sup\left(\lambda^{-1}F+f-x^{*}+{\varphi}^{*}(x^{*})\right)^{*}. \end{aligned} \end{equation*} This, together with $(\ref{eqr-1}) \ \text{and Proposition}\ \ref{cl2}\ (i)$, implies that \begin{equation*} \begin{aligned} \operatorname{epi}\left((\lambda^{-1}F+f)-\varphi\right)^{*}&=\underset{x^{*}\in\operatorname{dom}\varphi^{*}}{\bigcap}\operatorname{epi}\left(\lambda^{-1}F+f-x^{*}+{\varphi}^{*}(x^{*})\right)^{*}\\ &= \underset{x^{*}\in\operatorname{dom}\varphi^{*}}{\bigcap} \operatorname{epi}(\lambda^{-1}F+f)^{*}-(x^{*},{\varphi}^{*}(x^{*})). \end{aligned} \end{equation*} Thus, \begin{equation*} \begin{array}{l} \operatorname{epi}\left((\lambda^{-1}F+f)-\varphi\right)^{*}+\operatorname{cone}\left( \Lambda \cup\Pi\right) +\operatorname{epi}\delta^{*}_{\mathbb{R}^{n}\times\mathbb{R}^{m} }\\ = \underset{x^{*}\in\operatorname{dom}{\varphi}^{*}}{\displaystyle\bigcap}\left(\Gamma +\operatorname{cone}\left( \Lambda \cup\Pi\right) +\operatorname{epi}\delta^{*}_{\mathbb{R}^{n}\times\mathbb{R}^{m} }-(x^{*},{\varphi}^{*}(x^{*}))\right)\\ = \mathcal{R}. \end{array} \end{equation*} On the other hand, using the equality $(3.5)$ in \cite{ref6}, we obtain \begin{equation*} \operatorname{epi}\left((\lambda^{-1}F+f)-\varphi\right)^{*}+\operatorname{cone}\left( \Lambda \cup\Pi\right) +\operatorname{epi}\delta^{*}_{\mathbb{R}^{n}\times\mathbb{R}^{m} } \subseteq \operatorname{epi}\left((\lambda^{-1}F+f)-\varphi+\delta_{\mathcal{S}}\right)^{*}. \end{equation*} Consequently, \begin{equation*} \mathcal{R}\subseteq\operatorname{epi}\left((\lambda^{-1}F+f)-\varphi+\delta_{\mathcal{S}}\right)^{*}. \end{equation*} \end{remark} \begin{remark}\label{r.s.d} It can be seen that the strong duality for \eqref{calm3} and \eqref{form-3} holds if and only if ${\mathcal V}\eqref{calm3}={\mathcal V}\eqref{form-3}$ and for any $x^{*} \in \operatorname{dom} \varphi^{*}$, we can find points $\left(z^{\ast},q^{\ast} \right)\in \operatorname{dom}(\lambda^{-1}F+f)^{*}$, $\left(u_{i}^{\ast},v_{i}^{\ast} \right)\in \operatorname{dom}G_{i}^{*}$, $i=1,\cdots ,k$, $\left(u_{j}^{\ast},v_{j}^{\ast} \right)\in \operatorname{dom}g_{j}^{*}$, $j=1,\cdots ,p$, $\mu_{i}\in\mathbb{R}_{+}$, $i=1,\cdots ,k$, $\nu_{j}\in \mathbb{R}_{+}$, $j=1,\cdots ,p$ and $\left(s^{\ast},t^{\ast}\right) \in \mathbb{R}^{n}\times\mathbb{R}^{m}$ such that we have \begin{equation*} \varphi\left( x^{\ast}\right)-(\lambda^{-1}F+f)^{*}\left(z^{\ast},q^{\ast} \right)-{\displaystyle \sum^{k}_{i=1}} \ \mu_{i}G^{\ast}_{i}\left(u_{i}^{\ast},v_{i}^{\ast} \right)-{\displaystyle \sum^{p}_{j=1}} \ \nu_{j}g^{\ast}_{j}\left(u_{j}^{\ast},v_{j}^{\ast} \right)-\delta^{*}_{\mathbb{R}^{n}\times\mathbb{R}^{m} }\left(s^{\ast},t^{\ast}\right) \geq {\mathcal V}\eqref{form-3}, \end{equation*} with $s^{\ast} = x^{\ast}- z^{*}-\displaystyle\sum_{i=1}^{k} \mu_{i} u_{i}^{*}-\sum_{j=1}^{p} \nu_{j}u_{j}^{*}$, and $t^{\ast} = -q^{\ast}-\displaystyle\sum_{i=1}^{k} \mu_{i} v_{i}^{*}-\sum_{j=1}^{p} \nu_{j} v_{j}^{*}$. \end{remark} The following theorems provide weak and strong Fenchel–Lagrange duality results with regards to an optimization problem of the form \eqref{calm3}. \begin{theorem}[Weak Fenchel–Lagrange duality]\label{w.d} Let $\lambda>0$ and $F$, $f$, $G_{i}$, $g_{j}$, $i=1,\dots,k$, $j=1,\dots, p$ be proper convex functions. The family $\left((\lambda^{-1}F+f),\; \varphi,\delta_{\mathbb{R}^{n}\times\mathbb{R}^{m}},\; G_{i},\; g_{j}\right)$, $i=1,\dots,k$, $j=1,\dots,p$ satisfies (SFRC) if and only if the weak duality between \eqref{calm3} and \eqref{form-3} holds. \end{theorem} \begin{proof} Suppose that the weak duality between \eqref{calm3} and \eqref{form-3} does not hold. Then, \[ {\mathcal V}\eqref{calm3}<-\zeta <{\mathcal V}\eqref{form-3} \] for some $\zeta\in\mathbb{R}$. From \eqref{form-3}, for any $x^{*}\in\operatorname{dom}{\varphi}^{*}$, there exist $\alpha\in\mathbb{R}_{+}^{k}$, $\beta\in\mathbb{R}_{+}^{p}$, $\left( z^{*},q^{\ast}\right) \in\operatorname{dom} F^{*}\cap\operatorname{dom}f^{*}$, $(u_{i}^{*},v_{i}^{*})\in\operatorname{dom}G_{i}^{*}$, and $(u_{j}^{*},v_{j}^{*})\in\operatorname{dom}g_{j}^{*}$ such that \begin{equation*} \varphi^{*}\left(x^{*}\right)-(\lambda^{-1}F+ f)^{*}\left( z^{*},q^{\ast}\right)-\displaystyle\sum_{i=1}^{k} \alpha_{i} G_{i}^{*}\left(u_{i}^{*},v_{i}^{*}\right)-\sum_{j=1}^{p} \beta_{j}g_{j}^{*}\left(u_{j}^{*},v_{j}^{*}\right)-\delta^{\ast}_{\mathbb{R}^{n}\times\mathbb{R}^{m}} \left( s^{\ast},t^{\ast}\right) \geqslant -\zeta , \end{equation*} with $s^{\ast} = x^{\ast}- z^{*}-\displaystyle\sum_{i=1}^{k} \alpha_{i} u_{i}^{*}-\sum_{j=1}^{p} \beta_{j}u_{j}^{*}$, and $t^{\ast}=-q^{\ast}-\displaystyle\sum_{i=1}^{k} \alpha_{i} v_{i}^{*}-\sum_{j=1}^{p} \beta_{j} v_{j}^{*}$. Thus, \begin{equation*} \delta^{\ast}_{\mathbb{R}^{n}\times\mathbb{R}^{m}} \left( s^{\ast},t^{\ast}\right) \leq \varphi^{*}\left(x^{*}\right)-(\lambda^{-1}F+ f)^{*}\left( z^{*},q^{\ast}\right)-\displaystyle\sum_{i=1}^{k} \alpha_{i} G_{i}^{*}\left(u_{i}^{*},v_{i}^{*}\right)-\sum_{j=1}^{p} \beta_{j}g_{j}^{*}\left(u_{j}^{*},v_{j}^{*}\right) +\zeta . \end{equation*} It follows that \begin{equation*} \left( \left( s^{\ast}, t^{*}\right) ; \varphi^{*}\left(x^{*}\right)-(\lambda^{-1}F+ f)^{*}\left( z^{*},q^{\ast}\right)-\displaystyle\sum_{i=1}^{k} \alpha_{i} G_{i}^{*}\left(u_{i}^{*},v_{i}^{*}\right)-\sum_{j=1}^{p} \beta_{j}g_{j}^{*}\left(u_{j}^{*},v_{j}^{*}\right) +\zeta\right) \in \text{epi} \delta^{\ast}_{\mathbb{R}^{n}\times\mathbb{R}^{m}}. \end{equation*} Or equivalently, \begin{equation*} \begin{array}{l} \left( 0,\zeta\right) \in \left( -\left( s^{\ast}, t^{*}\right) ; -\varphi^{*}\left(x^{*}\right)+(\lambda^{-1}F+ f)^{*}\left( z^{*},q^{\ast}\right)\right.\\ \qquad \qquad \qquad \quad \left.+\displaystyle\sum_{i=1}^{k} \alpha_{i} G_{i}^{*}\left(u_{i}^{*},v_{i}^{*}\right)-\sum_{j=1}^{p} \beta_{j}g_{j}^{*}\left(u_{j}^{*},v_{j}^{*}\right) \right) + \text{epi} \delta^{\ast}_{\mathbb{R}^{n}\times\mathbb{R}^{m}}. \end{array} \end{equation*} Writing $\left( s^{\ast}, t^{*}\right)$ as \begin{equation*} \left( s^{\ast}, t^{*}\right)=-\left( z^{*},q^{*}\right)-\left(\displaystyle\sum_{i=1}^{k} \alpha_{i}\left( u_{i}^{*} ,v_{i}^{*}\right)\right) -\left(\displaystyle\sum_{j=1}^{p} \beta_{i}\left( u_{j}^{*} ,v_{j}^{*}\right)\right) +\left( x^{\ast},0\right), \end{equation*} one deduces that \begin{equation*} \begin{array}{lcl} \left( 0;\zeta\right) & \in & \left( \left( z^{\ast},q^{\ast}\right);(\lambda^{-1}F+ f)^{*}\left(z^{*},q^{*}\right)\right) -\left( \left( x^{\ast},0\right);\varphi^{*}\left(x^{*}\right) \right) \\ & & +\displaystyle\sum_{i=1}^{k} \alpha_{i} \left( \left( u_{i}^{*} ,v_{i}^{*}\right); G^{\ast}_{i}\left( u_{i}^{*} ,v_{i}^{*}\right)\right) \displaystyle\sum_{j=1}^{p} \beta_{j} \left( \left( u_{j}^{*} ,v_{j}^{*}\right); g^{\ast}_{j}\left( u_{j}^{*} ,v_{j}^{*}\right)\right) + \operatorname{epi}\delta^{*}_{\mathbb{R}^{n}\times\mathbb{R}^{m} }. \end{array} \end{equation*} Consequently, \begin{equation*} \left( 0;\zeta\right) \in \big( \Gamma + \operatorname{cone}\left( \Lambda \cup \Pi\right) +\operatorname{epi}\delta^{*}_{\mathbb{R}^{n}\times\mathbb{R}^{m} }-\left(\left( x^{*},0\right) ;{\varphi}^{*}(x^{*})\right). \end{equation*} Since $x^{\ast}\in \operatorname{dom} \varphi^{\ast}$ is arbitrary, we have that $(0,\zeta)\in \mathcal{R}$. Now, since the family\\ $\left((\lambda^{-1}F+f),\; \varphi,\; \delta_{\mathbb{R}^{n}\times\mathbb{R}^{m}}\right)$, $G_{i}$, $g_{j}$ for $i=1,\dots,k$, $j=1,\dots,p$ satisfies (SFRC), we have that $$(0,\zeta)\in\operatorname{epi}\left(\lambda^{-1}F+f-\varphi+\delta_{\mathcal{S}}\right)^{*} .$$ Remarking that \begin{equation*} {\mathcal V}\eqref{calm3}=\inf\{\left( \lambda^{-1}F+f\right)\left( x,y\right) -\varphi\left( x\right) +\delta_{\mathcal S}\left( x,y\right) \}=-\left( \lambda^{-1}F+f -\varphi +\delta_{\mathcal S} \right)^{\ast} \left( 0\right), \end{equation*} one can deduce \begin{equation}\label{lemme-i} (0,\zeta)\in\operatorname{epi}\left(\lambda^{-1}F+f-\varphi+\delta_{\mathcal{S}}\right)^{*} \ \ {\mathcal V}\eqref{calm3}\geqslant -\zeta , \end{equation} Hence, ${\mathcal V}\eqref{calm3}\geqslant- \zeta$. This contradicts ${\mathcal V}\eqref{calm3}<-\zeta$. Thus, the weak duality between problem \eqref{calm3} and problem \eqref{form-3} holds. Conversely, suppose the weak duality between \eqref{calm3} and \eqref{form-3} holds. Lets $ (0,\zeta)\in \mathcal{R}$ and $x^{\ast}\in \text{dom} \varphi$. By definition, we have \begin{equation*} \left( x^{\ast},\zeta +\varphi\left( x^{\ast}\right)\right) \in \operatorname{epi}(\lambda^{-1}F+f)^{*}+ \operatorname{cone}\left( \underset{i=1}{\bigcup^{k}}\operatorname{epi}G_{i}^{*} \cup \underset{j=1}{\bigcup^{p}}\operatorname{epi}g_{j}^{*}\right) +\operatorname{epi}\delta^{*}_{\mathbb{R}^{n}\times\mathbb{R}^{m} }. \end{equation*} Then there exist $\left( \left(z^{\ast},q^{\ast} \right);\gamma\right)\in \operatorname{epi}(\lambda^{-1}F+f)^{*}$, \\$\left( \left(u_{i}^{\ast},v_{i}^{\ast} \right);\gamma_{i}\right)\in \operatorname{epi}G_{i}^{*}$, $i=1,\cdots ,k$, $\left( \left(u_{j}^{\ast},v_{j}^{\ast} \right);\gamma_{j}\right)\in \operatorname{epi}g_{j}^{*}$, $j=1,\cdots ,p$, $\mu_{i}\in\mathbb{R}_{+}$, $i=1,\cdots ,k$, $\nu_{j} \in\mathbb{R}_{+}$, $j=1,\cdots ,p$ and $\left( \left(s^{\ast},t^{\ast}\right) ;\sigma \right) \in \operatorname{epi}\delta^{*}_{\mathbb{R}^{n}\times\mathbb{R}^{m} }$ such that \begin{equation*} \left( \left( x^{\ast},0\right),\zeta +\varphi\left( x^{\ast}\right)\right)=\left( \left(z^{\ast},q^{\ast} \right);\gamma\right)+{\displaystyle \sum^{k}_{i=1}} \ \mu_{i}\left( \left(u_{i}^{\ast},v_{i}^{\ast} \right);\gamma_{i}\right)+{\displaystyle \sum^{p}_{j=1}} \ \nu_{i}\left( \left(u_{j}^{\ast},v_{j}^{\ast} \right);\gamma_{j}\right)+\left( \left(s^{\ast},t^{\ast}\right) ;\sigma \right). \end{equation*} Or equivalently, \begin{equation}\label{expression1} \begin{array}{rcl} \left( x^{\ast},0\right) & = & \left(z^{\ast},q^{\ast} \right)+{\displaystyle \sum^{k}_{i=1}} \ \mu_{i} \left(u_{i}^{\ast},v_{i}^{\ast} \right)+{\displaystyle \sum^{p}_{j=1}} \ \nu_{i}\left(u_{j}^{\ast},v_{j}^{\ast} \right)+\left(s^{\ast},t^{\ast}\right),\\ \zeta +\varphi\left( x^{\ast}\right) & = &\gamma +{\displaystyle \sum^{k}_{i=1}} \ \mu_{i}\gamma_{i}+{\displaystyle \sum^{p}_{j=1}} \ \nu_{j}\gamma_{j}+\sigma . \end{array} \end{equation} Using the definition of epigraph, we obtain \begin{equation*} (\lambda^{-1}F+f)^{*}\left(z^{\ast},q^{\ast} \right)+{\displaystyle \sum^{k}_{i=1}} \ \mu_{i}G^{\ast}_{i}\left(u_{i}^{\ast},v_{i}^{\ast} \right)+{\displaystyle \sum^{p}_{j=1}} \ \nu_{j}g^{\ast}_{j}\left(u_{j}^{\ast},v_{j}^{\ast} \right)+\delta^{*}_{\mathbb{R}^{n}\times\mathbb{R}^{m} }\left(s^{\ast},t^{\ast}\right)\leq \zeta +\varphi\left( x^{\ast}\right). \end{equation*} Hence, \begin{equation*} -\zeta \leq \varphi\left( x^{\ast}\right)-(\lambda^{-1}F+f)^{*}\left(z^{\ast},q^{\ast} \right)-{\displaystyle \sum^{k}_{i=1}} \ \mu_{i}G^{\ast}_{i}\left(u_{i}^{\ast},v_{i}^{\ast} \right)-{\displaystyle \sum^{p}_{j=1}} \ \nu_{j}g^{\ast}_{j}\left(u_{j}^{\ast},v_{j}^{\ast} \right)-\delta^{*}_{\mathbb{R}^{n}\times\mathbb{R}^{m} }\left(s^{\ast},t^{\ast}\right). \end{equation*} Using the first equality in $\left( \ref{expression1}\right) $, we get ${\mathcal V}\eqref{form-3}\geqslant-\zeta$. Then ${\mathcal V}\eqref{calm3}\geqslant {\mathcal V}\eqref{form-3}\geqslant - \zeta$, which implies that $(0,\zeta)\in\operatorname{epi}\left(\lambda^{-1}F+f-\varphi+\delta_{\mathcal{S}}\right)^{*} $ by $\left( \ref{lemme-i}\right) $. Then the family $\left( (\lambda^{-1}F+f),\varphi,\delta_{\mathbb{R}^{n}\times\mathbb{R}^{m}}, G_{i},g_{j}\right)$, $i=1,\dots,k$, $j=1,\dots,p $ satisfies (SFRC). \end{proof} \begin{theorem}[Strong Fenchel–Lagrange duality]\label{S.d} Let $\lambda>0$ and $F,f,G_{i},g_{j},i=1,\dots,k,j=1,\dots,p$ be proper convex functions. The family $\left((\lambda^{-1}F+f),\varphi,\delta_{\mathbb{R}^{n}\times\mathbb{R}^{m}},G_{i},g_{j}\right)$, $i=1,\dots,k$, $j=1,\dots,p $ satisfies (FRC) if and only if the strong duality between \eqref{calm3} and \eqref{form-3} holds. \end{theorem} \begin{proof} Suppose that the family $\left((\lambda^{-1}F+f),\varphi,\delta_{\mathbb{R}^{n}\times\mathbb{R}^{m}},G_{i},g_{j}\right)$, $i=1,\dots,k$, $j=1,\dots,p $ satisfies $(F R C)$. Then, it satisfies $(S F R C)$. By Theorem $\ref{w.d}$ , ${\mathcal V}\eqref{calm3} \geqslant {\mathcal V}\eqref{form-3}$. Now, If ${\mathcal V}\eqref{calm3}=-\infty$, then we get strong duality for \eqref{calm3} and \eqref{form-3} via weak duality. So, we assume that ${\mathcal V}\eqref{calm3}=-\zeta \in \mathbb{R}$. Then, from $\left( \ref{lemme-i}\right) $, $$ (0, \zeta) \in \operatorname{epi}\left(\lambda^{-1}F+f-\varphi+\delta_{\mathcal{S}}\right)^{*}. $$ Since the family $\left((\lambda^{-1}F+f),\varphi,\delta_{\mathbb{R}^{n}\times\mathbb{R}^{m}},G_{i},g_{j}\right)$, $i=1,\dots,k$, $j=1,\dots,p $ satisfies $(F R C)$, we have that $(0, \zeta) \in \mathcal R$. Thus, from the proof of Theorem \ref{w.d}, ${\mathcal V}\eqref{form-3} \geqslant-\zeta$ and for any $x^{*} \in \operatorname{dom} \varphi^{*}$, there exist $\left( \left(z^{\ast},q^{\ast} \right);\gamma\right)\in \operatorname{epi}(\lambda^{-1}F+f)^{*}$, $\left( \left(u_{i}^{\ast},v_{i}^{\ast} \right);\gamma_{i}\right)\in \operatorname{epi}G_{i}^{*}$, $i=1,\cdots ,k$, $\left( \left(u_{j}^{\ast},v_{j}^{\ast} \right);\gamma_{j}\right)\in \operatorname{epi}g_{j}^{*}$, $j=1,\cdots ,p$, $\mu_{i}\in\mathbb{R}_{+}$, $i=1,\cdots ,k$, $\nu_{j}\in\mathbb{R}_{+}$, $j=1,\cdots ,p$, and $\left( \left(s^{\ast},t^{\ast}\right) ;\sigma \right) \in \operatorname{epi}\delta^{*}_{\mathbb{R}^{n}\times\mathbb{R}^{m} }$ such that \begin{equation*} -\zeta \leq \varphi\left( x^{\ast}\right)-(\lambda^{-1}F+f)^{*}\left(z^{\ast},q^{\ast} \right)-{\displaystyle \sum^{k}_{i=1}} \ \mu_{i}G^{\ast}_{i}\left(u_{i}^{\ast},v_{i}^{\ast} \right)-{\displaystyle \sum^{p}_{j=1}} \ \nu_{j}g^{\ast}_{j}\left(u_{j}^{\ast},v_{j}^{\ast} \right)-\delta^{*}_{\mathbb{R}^{n}\times\mathbb{R}^{m} }\left(s^{\ast},t^{\ast}\right). \end{equation*} By Remark $\ref{r.s.d}$, we obtain the strong duality between \eqref{calm3} and \eqref{form-3}. Conversely, assuming that strong duality between \eqref{calm3} and \eqref{form-3} holds, then by Remark $\ref{r.s.d}$, we have the equality ${\mathcal V}\eqref{calm3}={\mathcal V}\eqref{form-3}$ and for any $x^{*} \in \operatorname{dom} \varphi^{*}$, there exist $\left(z^{\ast},q^{\ast} \right)\in \operatorname{dom}(\lambda^{-1}F+f)^{*}$, $\left(u_{i}^{\ast},v_{i}^{\ast} \right)\in \operatorname{dom}G_{i}^{*}$, $i=1,\cdots ,k$, $\left(u_{j}^{\ast},v_{j}^{\ast} \right)\in \operatorname{dom}g_{j}^{*}$, $j=1,\cdots ,p$, $\mu_{i}\in\mathbb{R}_{+}$, $i=1,\cdots ,k$, $\nu_{j}\in\mathbb{R}_{+}$, $j=1,\cdots ,p$ and $\left(s^{\ast},t^{\ast}\right) \in \mathbb{R}^{n}\times\mathbb{R}^{m}$ such that we have \begin{equation*} \varphi\left( x^{\ast}\right)-(\lambda^{-1}F+f)^{*}\left(z^{\ast},q^{\ast} \right)-{\displaystyle \sum^{k}_{i=1}} \ \mu_{i}G^{\ast}_{i}\left(u_{i}^{\ast},v_{i}^{\ast} \right)-{\displaystyle \sum^{p}_{j=1}} \ \nu_{j}g^{\ast}_{j}\left(u_{j}^{\ast},v_{j}^{\ast} \right)-\delta^{*}_{\mathbb{R}^{n}\times\mathbb{R}^{m} }\left(s^{\ast},t^{\ast}\right) \geqslant {\mathcal V}\eqref{form-3}, \end{equation*} with $s^{\ast}= x^{\ast}- z^{*}-\displaystyle\sum_{i=1}^{k} \alpha_{i} u_{i}^{*}-\sum_{j=1}^{p} \beta_{j}u_{j}^{*}$, and $t^{\ast}=-q^{\ast}-\displaystyle\sum_{i=1}^{k} \alpha_{i} v_{i}^{*}-\sum_{j=1}^{p} \beta_{j} v_{j}^{*}$.\\ Let $(0, \zeta) \in \operatorname{epi}\left(\lambda^{-1}F+f-\varphi+\delta_{\mathcal{S}}\right)^{*}$. By $\left( \ref{lemme-i}\right) $, ${\mathcal V}\eqref{calm3} \geqslant-\zeta$. Then, from the strong duality, ${\mathcal V}\eqref{form-3}={\mathcal V}\eqref{calm3} \geqslant-\zeta$. By the proof of Theorem \ref{w.d}, $(0, \zeta) \in \mathcal{R}$ and hence, the result.\end{proof} \subsection{Duality under the closedness condition} In the above subsections, the Slater condition, (FRC), and (SFRC) are used. However, these conditions are often not satisfied for many problems in applications. To overcome this drawback, we use another constraint qualification, labeled as ``closedness condition'', which has been developed and used for convex (infinite) optimization problems. In this case, a Toland-Fenchel-Lagrange duality is investigated. To proceed, let us assume that $\mathcal{S}\cap\operatorname{dom}(\lambda^{-1}F+ f)\neq\emptyset$ and $F$, $f$, and $\varphi$ are functions such that $F$ and $f$ are proper lower semicontinuous (l.s.c.) and convex, while $\varphi$ is proper convex and satisfies $\varphi^{**}=\varphi$. Here, $\mathcal{S}$ is the feasible set of \eqref{calm3} stated in $\left( \ref{eq4}\right)$. We denote ${\mathcal A}$ by \begin{center} $\mathcal{A}:=\underset{\alpha\in \mathbb{R}_{+}^{k}}{\bigcup}\operatorname{epi}(\alpha G)^{*}\bigcup\underset{\beta\in \mathbb{R}_{+}^{p}}{\bigcup}\operatorname{epi}(\beta g)^{*}+\operatorname{epi}\delta^{*}_{\mathbb{R}^{n}\times\mathbb{R}^{m} },$ \end{center} and introduce the closedness condition defined by \begin{equation}\label{ccondi}\tag{CC} \operatorname{epi}(\lambda^{-1}F+ f)^{*}+\mathcal{A} \ \text{ is closed}. \end{equation} The closedness condition \eqref{ccondi} can be traced to Burachik and Jeyakumar \cite{ref5} and was used later to derive optimality conditions for convex cone constrained optimization problems. Different types of sufficient conditions for \eqref{ccondi} are given in \cite{ref5}. We are interested here in another form of duality called Tolland-Fenchel-Lagrange duality, which takes the following form: \begin{equation}\label{cc-dual}\tag{D$_{TFL}^{\lambda}$} \begin{array}{l} \inf_{(x^{*},y^{*})\in\mathbb{R}^{n}\times\mathbb{R}^{m}} \ \underset{ \underset{ \left( \alpha,\beta \right)\in\mathbb{R}_{+}^{k+p}} {\left( z^{\ast},q^{\ast}\right)\in \mathbb{R}^{n+m} } }{\max} \ \left\{{\varphi}^{*}(x^{*})+\delta_{\mathbb{R}^{m}}^{\ast}\left( y^{\ast}\right)\right.\\ \qquad \qquad \qquad -\left.\left(\lambda^{-1}F+ f\right)^{*}\left(z^{\ast},q^{\ast} \right)- \left( \alpha G+ \beta g\right)^{\ast} \left( x^{\ast}-z^{\ast},y^{\ast}-q^{\ast}\right)\right\}. \end{array} \end{equation} \begin{theorem} Consider the primal and the dual problem \eqref{calm3} and \eqref{cc-dual}. Suppose the constaint qualification \eqref{ccondi} holds. Then, the strong duality holds; i.e., ${\mathcal V} \eqref{calm3}={\mathcal V}\eqref{cc-dual}$. \end{theorem} \begin{proof} By definition of a primal problem, \begin{equation*} {\mathcal V}\eqref{calm3}=\inf _{(x,y) \in\mathbb{R}^{n}\times\mathbb{R}^{m} }\left(\lambda^{-1}F+ f+\delta_{\mathcal{S}}-{\varphi}\right)(x,y). \end{equation*} Using the classical Toland dulality, we obtain \begin{equation}\label{eq8} {\mathcal V}(\mathcal{P}^{\lambda})= \inf_{(x^{*},y^{*})\in\mathbb{R}^{n}\times\mathbb{R}^{m}}\left\{{\varphi}^{*}(x^{*})+\delta_{\mathbb{R}^{m}}^{\ast}\left( y^{\ast}\right) -\left(\lambda^{-1}F+f+\delta_{\mathcal{S}}\right)^{*}\left(x^{*},y^{*}\right)\right\}. \end{equation} Now, consider the function $\psi :\mathbb{R}^{n+m}\times\mathbb{R}^{n+m}\times\mathbb{R}^{k+p}\rightarrow \mathbb{R}\cup \{+\infty\}$ given by \begin{equation*} \psi\left(x,y,z,q,\nu ,\mu\right) = \left\{ \begin{array}{lcl} \left( \lambda^{-1}F+ f\right) \left(x+z,y+q\right), & \ & \text{if} \ \left( G\left( x,y\right) ,g\left( x,y\right)\right) + \left( \nu ,\mu\right) \in -\mathbb{R}_{+}^{k+p}, \\ +\infty & \ & \text{otherwise}. \end{array} \right. \end{equation*} Let $\left(x^{\ast},y^{\ast},z^{\ast},q^{\ast},\alpha,\beta \right) \in \mathbb{R}^{n+m}\times\mathbb{R}^{n+m}\times\mathbb{R}^{k+p}$. Using the same technique as in \cite{refrev0}, we calculate the conjugate of $\psi$ as \begin{equation*} \psi^{\ast}\left(x^{\ast},y^{\ast},z^{\ast},q^{\ast},\alpha,\beta \right)=\left(\lambda^{-1}F+ f\right)^{*}\left(z^{\ast},q^{\ast}\right)+ \left( \alpha G+ \beta g\right)^{\ast} \left( x^{\ast}-z^{\ast},y^{\ast}-q^{\ast}\right) +\delta_{-\mathbb{R}_{+}^{k+p}}^{\ast}\left( \alpha,\beta\right). \end{equation*} Observing that $\delta_{-\mathbb{R}_{+}^{k+p}}^{\ast}\left( \alpha,\beta \right)=0$ if $\left( \alpha,\beta\right) \in \mathbb{R}_{+}^{k+p}$, one deduces from the last equality that \begin{equation*} \begin{array}{l} \psi^{\ast}\left(x^{\ast},y^{\ast},z^{\ast},q^{\ast},\alpha,\beta \right)=\\ \qquad \qquad \left\{ \begin{array}{lcl} \left(\lambda^{-1}F+ f\right)^{*}\left(z^{\ast},q^{\ast} \right)+ \left( \alpha G+ \beta g\right)^{\ast} \left( x^{\ast}-z^{\ast},y^{\ast}-q^{\ast}\right) & \ & \text{if} \ \left( \alpha,\beta\right)\in \mathbb{R}_{+}^{k+p},\\ +\infty & \ & \text{otherwise}. \end{array} \right. \end{array} \end{equation*} On the other hand, we have \begin{equation}\label{cc-p1} \begin{array}{lcl} \left(\lambda^{-1}F+f+\delta_{\mathcal{S}}\right)^{*}\left(x^{*},y^{*}\right) & = & {\displaystyle \sup_{\left( x,y\right) \in {\mathcal S}}} \ \{ \langle \left( x,y\right) ,\left( x^{\ast},y^{\ast}\right) \rangle-\left(\lambda^{-1}F+f\right)\left( x,y\right) \} \\ & = & {\displaystyle \sup_{\left( x,y\right) \in \mathbb{R}^{n}\times\mathbb{R}^{m}}} \ \{ \langle \left( x,y\right) ,\left( x^{\ast},y^{\ast}\right) \rangle-\psi\left( x,y,0,0,0,0\right) \}. \end{array} \end{equation} Since, $Pr_{\mathbb{R}^{n}\times\mathbb{R}^{m}\times \mathbb{R}}\left( \text{epi} \ \psi^{\ast}\right) =\operatorname{epi}(\lambda^{-1}F+ f)^{*}+\mathcal{A}$, it follows from \cite[Theorem 3.1]{Wu100} that for all $\left( x^{\ast},y^{\ast}\right) \in \mathbb{R}^{n}\times\mathbb{R}^{m}$, \begin{equation}\label{cc-p2} \begin{array}{l} {\displaystyle \sup_{\left( x,y\right) \in \mathbb{R}^{n}\times\mathbb{R}^{m}}} \ \{ \langle \left( x,y\right) ,\left( x^{\ast},y^{\ast}\right) \rangle-\psi\left( x,y,0,0,0,0\right) \}\\ \qquad \qquad \qquad \qquad \qquad \quad ={\displaystyle \min_{\left( z^{\ast},q^{\ast}\right)\in \mathbb{R}^{n+m}, \left( \alpha,\beta \right)\in\mathbb{R}^{k+p} }} \ \psi^{\ast}\left(x^{\ast},y^{\ast},z^{\ast},q^{\ast},\alpha,\beta \right). \end{array} \end{equation} Combining $\left( \ref{cc-p1}\right)$ and $\left( \ref{cc-p2}\right)$, we arrive at \begin{equation*} \begin{array}{l} \left(\lambda^{-1}F+f+\delta_{\mathcal{S}}\right)^{*}\left(x^{*},y^{*}\right) \\ \qquad \qquad \qquad = {\displaystyle \min_{\left( z^{\ast},q^{\ast}\right)\in \mathbb{R}^{n+m}, \left( \alpha^{\ast},\beta^{\ast} \right)\in\mathbb{R}_{+}^{k+p} }} \ \left(\lambda^{-1}F+ f\right)^{*}\left(z^{\ast},q^{\ast} \right)+ \left( \alpha^{\ast} G+ \beta^{\ast} g\right)^{\ast} \left( x^{\ast}-z^{\ast},y^{\ast}-q^{\ast}\right). \end{array} \end{equation*} Inserting the latter equality in $\left( \ref{eq8}\right)$ we obtain \begin{equation*} \begin{array}{l} {\mathcal V}\eqref{calm3}= \inf_{(x^{*},y^{*})\in\mathbb{R}^{n}\times\mathbb{R}^{m}} \ \underset{ \underset{ \left( \alpha,\beta \right)\in\mathbb{R}_{+}^{k+p}} {\left( z^{\ast},q^{\ast}\right)\in \mathbb{R}^{n+m} } }{\max} \ \left\{{\varphi}^{*}(x^{*})+\delta_{\mathbb{R}^{m}}^{\ast}\left( y^{\ast}\right)\right. \\ \qquad \qquad \qquad -\left.\left(\lambda^{-1}F+ f\right)^{*}\left(z^{\ast},q^{\ast} \right)- \left( \alpha G+ \beta g\right)^{\ast} \left( x^{\ast}-z^{\ast},y^{\ast}-q^{\ast}\right)\right\}. \end{array} \end{equation*} Hence, ${\mathcal V}\eqref{calm3}={\mathcal V}\eqref{cc-dual}.$ \end{proof} \section{Duality without partial calmness} \subsection{Under a generalized Slater condition} In this section, we do not assume that \eqref{calm0} has optimal solutions, also no convexity assumption is imposed. Let $\epsilon > 0$ and consider the following reguralized problem of \eqref{calm0} \begin{equation}\label{reg-bil}\tag{P$_{\epsilon}$} {\displaystyle \min_{x,y}} \ F\left( x,y\right) \ \text{s.t.} \ G_{i}\left(x, y\right) \leq 0, \; \; i\in I_{k} := \{1,\cdots ,k\}, \; \; y\in {\mathcal S}_{\epsilon}\left( x\right), \end{equation} where for each $x\in \mathbb{R}^{n}$, ${\mathcal S}_{\epsilon}\left(x\right)$ is the set of $\epsilon$-approximate optimal solutions of the lower level problem \eqref{llp-1}; that is \begin{equation*} {\mathcal S}_{\epsilon}\left( x\right) =\{y\in K\left( x\right)| \;\; f\left( x,y\right) -\varphi\left( x\right) \leq \epsilon \}. \end{equation*} Then, it is clear that problem \eqref{reg-bil} can be reformulated as the following single-level optimization problem involving the optimal value function: \begin{equation}\label{w-p-c-P}\tag{LLVF$_{\epsilon}$} \left\{\begin{array}{ll} {\displaystyle \min_{x, y}} \ F(x, y) \\[1ex] G_{i}(x,y) \leq 0 \quad & \forall i \in I_{k}=\{1,\dots,k\}, \\ g_{j}(x, y) \leq 0 & \forall j \in I_{p}=\{1,\dots,p\}, \\ f(x, y)-\varphi(x) \leq \epsilon. \\ \end{array}\right. \end{equation} Next, we recall the relation between any accumulation point of a sequence of regularized optimal solutions (i.e., of problem \eqref{w-p-c-P}) and the optimal solution of \eqref{calm0}. To this end, let $\left(\epsilon_{k}\right)_{k}$ be a sequence of positive scalar such that $\epsilon_{k}\searrow 0$, we denote problem $\left(\mbox{LLVF}_{\epsilon_{k}}\right)$ by $\left(\mbox{LLVF}_{k}\right)$. We need the following Slater-type condition for the lower level problem: \begin{assum}\label{regul} For any $x\in \mathbb{R}^{n}$ satisfying the upper-level inequality constraints, and any nonempty subset $J_{p}$ of $I_{p}$, there exist $y\in\mathbb{R}^{m}$ such that $g_{j}\left( x,y\right) < 0$ for all $j\in J_{p}$. \end{assum} \begin{theorem}\cite{refextend} Let $\left( \epsilon_{k}\right)_{k}$ a sequence of positive scalar such that $\epsilon_{k}\searrow 0$ and $\left( x_{k},y_{k}\right)_{k}$ be a sequence of optimal solutions of the regularized problems $\left(\mbox{LLVF}_{k}\right)$ with $k\in\mathbb{N}$. Suppose that \begin{enumerate} \item ${\mathcal C} :=\{\left( x,y\right) \in\mathbb{R}^{n}\times\mathbb{R}^{m}|\;\; G_{i}\left( x,y\right) \leq 0, \; \; i\in I_{k}\}$ is a compact set; \item $\left( \bar{x},\bar{y}\right)$ is an accumulation point of the sequence $\left( x_{k},y_{k}\right)_{k}$; \item Assumption \ref{regul} holds. \end{enumerate} Then, $\left( \bar{x},\bar{y}\right)$ is an optimal solution of problem \eqref{llvf}. \end{theorem} Subsequently, based on the study given by Fabin, Fernando, and Cristian \cite{fabin-12}, we give a formulation of dual problem of \eqref{w-p-c-P}, then we provide a new complete characterization of strong duality. To proceed, for any $\epsilon > 0$, we consider the Lagrangian \begin{equation*} L_{\epsilon}\left(\varsigma ,\alpha ,\beta ,\gamma , x,y \right) := \varsigma F(x,y)+\gamma\left(f(x, y) - \varphi(x) - \epsilon\right) +{\displaystyle \sum_{i=1}^{k}} \ \alpha_{i} G_{i}(x,y)+ {\displaystyle \sum_{j=1}^{p}} \ \beta_{j} g_{j}(x,y) \end{equation*} and define the dual problem \eqref{dual-w-p-c-P} of \eqref{w-p-c-P} as \begin{equation}\label{dual-w-p-c-P}\tag{D$_{\epsilon}$} \underset{ \underset{ \gamma \in\mathbb{R}} {\left( \alpha ,\beta\right) \in \mathbb{R}_{+}^{k+p}}} {\sup} \ {\displaystyle \inf_{\left( x,y\right) \in\mathbb{R}^{n}\times\mathbb{R}^{m}}} \ L_{\epsilon}\left(1 ,\alpha ,\beta ,\gamma , x,y \right). \end{equation} It can be seen that for all $\alpha\in\mathbb{R}_{+}^{k}$, $\beta\in\mathbb{R}_{+}^{p}$, and $\gamma\in\mathbb{R}$, we have \begin{equation*} {\displaystyle \inf_{\left( x,y\right) \in\mathbb{R}^{n}\times\mathbb{R}^{m}}} \ L_{\epsilon}\left(\varsigma ,\alpha ,\beta ,\gamma , x,y \right)\;\; \leq \;\; \varsigma {\mathcal V}\left( {\mathcal P}_{\epsilon}\right), \;\;\, \forall \varsigma \geq 0, \end{equation*} which corresponds to a weak duality condition. To provide the strong duality result, we introduce some regularity conditions. To proceed, define \begin{equation*} \Psi_{\epsilon} \left( \mathbb{R}^{n}\times\mathbb{R}^{m}\right) := \left\{\left(F(x,y),\, f(x,y)-\varphi\left(x\right)-\epsilon,\, G(x,y),\, g(x,y) \right) \in\mathbb{R}^{2+k+p}:\;\, (x,y)\in \mathbb{R}^{n}\times\mathbb{R}^{m}\right\} \end{equation*} and consider the following assumptions: \begin{assum} \label{ass-s-1}It holds that \[ \text{cone} \ \bigg ( int \bigg [ \text{co} \left( \Psi_{\epsilon} \left( \mathbb{R}^{n}\times\mathbb{R}^{m}\right) \right)-{\mathcal V}\eqref{w-p-c-P} \left( 1,0_{\mathbb{R}^{1+k+p}}\right) + \left( \mathbb{R}_{+}\times\mathbb{R}^{1+k+p}_+\right) \bigg] \bigg) \;\; \mbox{ is pointed.} \] \end{assum} \begin{assum} \label{ass-s-2} $\left( 0,0\right) \notin int \bigg ( \text{co} \left( \Psi_{\epsilon} \left( \mathbb{R}^{n}\times\mathbb{R}^{m}\right) \right)-{\mathcal V}\eqref{w-p-c-P} \left( 1,0_{\mathbb{R}^{1+k+p}}\right) + \left( \mathbb{R}_{+}\times\mathbb{R}^{1+k+p}_+\right) \bigg)$. \end{assum} \begin{theorem}\label{thm-w-p-c-s} Let $\epsilon > 0$. Suppose that ${\mathcal V}\eqref{w-p-c-P}$ is finite and that \begin{equation}\label{EmpE} int \bigg ( \text{co} \left( \Psi_{\epsilon} \left( \mathbb{R}^{n}\times\mathbb{R}^{m}\right) \right)+ \left( \mathbb{R}_{+}\times\mathbb{R}^{1+k+p}_+\right) \bigg) \neq\emptyset. \end{equation} Then, for each $\epsilon > 0$, Assumption \ref{ass-s-1} (resp. Assumption \ref{ass-s-2}) holds if and only if there exists a vector $\left(\bar{\varsigma}, \bar{\gamma},\bar{\alpha},\bar{\beta}\right)\neq 0 $ such that \begin{equation*} {\displaystyle \inf_{\left( x,y\right) \in\mathbb{R}^{n}\times\mathbb{R}^{m}}} \ L_{\epsilon}\left( \bar{\varsigma},\bar{\alpha} ,\bar{\beta} ,\bar{\gamma} , x,y \right)= \bar{\varsigma} {\mathcal V}\eqref{w-p-c-P}. \end{equation*} \end{theorem} \begin{proof} The result follows from \cite[Theorem 3.1]{fabin-12}. \end{proof} Next, we provide a strong duality result when $\bar{\varsigma} > 0$. For this result, we need the assumption \begin{equation}\label{cq-w-p-c} \overline{cone} \, \left(\left(f-\varphi -\epsilon, \; G,\; g\right) \left(\mathbb{R}^{n}\times\mathbb{R}^{m}\right) + \mathbb{R}_{+}^{1+k+p}\right)= \mathbb{R}^{1+k+p}. \end{equation} The qualification condition \eqref{cq-w-p-c} is a generalized form of the Slater condition for problem \eqref{w-p-c-P}. Note that thanks to our regularization, condition \eqref{cq-w-p-c} can be fulfilled. \begin{theorem}Let $\epsilon > 0$ and suppose that ${\mathcal V}\eqref{w-p-c-P}$ is finite and condition \eqref{EmpE} holds. Furthermore, if condition \eqref{cq-w-p-c} is satisfied, then strong duality between \eqref{w-p-c-P} and \eqref{dual-w-p-c-P} holds; i.e., ${\mathcal V}\eqref{w-p-c-P}={\mathcal V}\eqref{dual-w-p-c-P}$. Moreover, there exists a Lagrange multiplier vector $\left(\bar{\gamma},\bar{\alpha},\bar{\beta}\right)\in \mathbb{R}_{+}\times\mathbb{R}^{k}_{+}\times\mathbb{R}^{p}_{+}$ such that we have \begin{equation*} {\mathcal V}\eqref{w-p-c-P}= {\displaystyle \inf_{\left( x,y\right) \in\mathbb{R}^{n}\times\mathbb{R}^{m}}} \ L_{\epsilon}\left(1,\bar{\alpha} ,\bar{\beta} ,\bar{\gamma} , x,y \right). \end{equation*} \end{theorem} \begin{proof} The result follows from \cite[Corollary 3.2]{fabin-12}. \end{proof} \subsection{Without a Slater condition} To close this section, we provide a new characterization of strong duality for bilevel optimization problem, covering situations where a Slater-type condition may fail. To proceed, consider the special bilevel program \begin{equation}\label{spe-bilevel}\tag{P$_{s}$} {\displaystyle \min_{x,y}} \ F\left( x,y\right) \ \text{s.t.} \ x\in X, y\in {\mathcal S}\left( x\right), \end{equation} where, $X\subset \mathbb{R}^{n}$ and for each $x\in X$, ${\mathcal S}\left( x\right)$ is the set of optimal solutions of the lower level problem \eqref{llp-1}, which in this case is given \begin{equation*} {\mathcal S}\left( x\right) =\left\{y\in Y|\; f\left(x,y\right) -\varphi\left( x\right) \leq 0\right\}, \end{equation*} where the lower level feasible set is $Y\subset \mathbb{R}^{m}$. As before, problem \eqref{spe-bilevel} can take the form \begin{equation}\label{spe-bilevel-llvf}\tag{LLVF$_{s}$} \left\{ \begin{array}{l} {\displaystyle \min_{x,y}} \ F\left( x,y\right) \\ \left( x,y\right) \in X\times Y, \\ f\left( x,y\right) -\varphi\left( x\right) \leq 0. \end{array} \right. \end{equation} With $L\left(1 ,\gamma , x,y \right) := F\left( x,y\right) +\gamma\left( f\left( x,y\right) -\varphi\left( x\right) \right)$, the dual of problem \eqref{dual-spe-bilevel-llvf} is given by \begin{equation}\label{dual-spe-bilevel-llvf}\tag{D$_{s}$} {\displaystyle \sup_{\gamma \in\mathbb{R}_+}} \ {\displaystyle \inf_{\left( x,y\right) \in X\times Y }} \ L\left(1 ,\gamma , x,y \right). \end{equation} To provide our result, denote the feasible set of problem \eqref{spe-bilevel-llvf} by \begin{equation*} {\mathcal F} := \left\{\left( x,y\right) \in X\times Y|\;\, f\left( x,y\right) -\varphi\left( x\right) \leq 0 \right\}. \end{equation*} The set of optimal solutions of problem \eqref{spe-bilevel-llvf} is given as \begin{equation*} {\mathcal O} := \arg\min\left\{F\left( x,y\right) |\left( x,y\right) \in {\mathcal F} \right\} \end{equation*} and \begin{equation*} \begin{array}{lcl} {\mathcal F}^{-} & := & \{\left( x,y\right) \in X\times Y |\;\, f\left( x,y\right) -\varphi\left( x\right) < 0 \}, \\ {\mathcal F}^{=} & := & \{\left( x,y\right) \in X\times Y |\;\, f\left( x,y\right) -\varphi\left( x\right) = 0 \}, \\ {\mathcal F}^{+} & := & \{\left( x,y\right) \in X\times Y |\;\, f\left( x,y\right) -\varphi\left( x\right) > 0 \}. \end{array} \end{equation*} Analogously, we introduce the following sets \begin{equation*} \begin{array}{lcl} \Xi^{-} & := & \{\left( x,y\right) \in X\times Y |\;\,F\left( x,y\right) < {\mathcal V} \eqref{spe-bilevel-llvf} \}, \\ \Xi^{=} & := & \{\left( x,y\right) \in X\times Y |\;\,F\left( x,y\right) = {\mathcal V} \eqref{spe-bilevel-llvf} \}, \\ \Xi^{+} & := & \{\left( x,y\right) \in X\times Y |\;\,F\left( x,y\right) > {\mathcal V} \eqref{spe-bilevel-llvf} \}. \end{array} \end{equation*} \begin{remark} Based on the structure of bilevel optimization problems, one can deduce that we have ${\mathcal F}^{-}={\mathcal F}^{+}=\emptyset$. \end{remark} Also, due to the lack of the Slater condition, the strong duality requires a closed-cone constraint qualification. To proceed, set \begin{equation*} \Psi \left(X\times Y\right) =\left\{\left(F\left( x,y\right), \, f\left( x,y\right)-\varphi\left( x\right) \right) \in\mathbb{R}^{2}\left| (x,y)\in X\times Y\right.\right\}, \end{equation*} and consider the following assumption \begin{assum} \label{ass-s-1-2} $\text{cone} \ \left( \text{co} \left( \Psi \left(X\times Y\right) \right)-{\mathcal V} \eqref{spe-bilevel-llvf} \left( 1,0\right) + int \left( \mathbb{R}^{2}_{+}\right) \right)$ is pointed. \end{assum} Based on the above construction we characterize Assumption \ref{ass-s-1-2}. \begin{proposition}\label{prop-final} Consider the problem \eqref{spe-bilevel-llvf}. Suppose that ${\mathcal F}\neq \emptyset$ and that ${\mathcal V}\eqref{spe-bilevel-llvf}$ is finite. The following assertions hold: \begin{enumerate} \item Suppose that ${\mathcal O}\neq \emptyset$. Then, $\text{cone} \ \bigg ( \text{co} \left( \Psi \left(X\times Y\right) \right)-{\mathcal V} \eqref{spe-bilevel-llvf} \left( 1,0\right) + int \left( \mathbb{R}^{2}_{+}\right) \bigg)$ is pointed if and only if ${\mathcal O}\cap {\mathcal F}^{=}\neq \emptyset$ and $\Xi^{+}\cap {\mathcal F}^{=}\neq \emptyset$. \item Assume that ${\mathcal O}= \emptyset$. Then, $\text{cone} \ \bigg ( \text{co} \left( \Psi \left(X\times Y\right) \right)-{\mathcal V} \eqref{spe-bilevel-llvf} \left( 1,0\right) + int \left( \mathbb{R}^{2}_{+}\right) \bigg)$ is pointed if and only if $\Xi^{+}\cap {\mathcal F}^{=}\neq \emptyset$. \end{enumerate} \end{proposition} The following theorem provides a complete characterization of strong duality for our bilevel program. \begin{theorem}\label{th-final} Consider problem \eqref{spe-bilevel-llvf}. Assume that ${\mathcal F}\neq \emptyset$ and that ${\mathcal V} \eqref{spe-bilevel-llvf}$ is finite. The following assertions are equivalent. \begin{enumerate} \item Strong duality holds; i.e., ${\mathcal V}\eqref{spe-bilevel-llvf}={\mathcal V}\eqref{dual-spe-bilevel-llvf}$; \item $\text{cone} \ \bigg ( \text{co} \left( \Psi \left(X\times Y\right) \right)-{\mathcal V}\eqref{spe-bilevel-llvf} \left( 1,0\right) + int \left( \mathbb{R}^{2}_{+}\right) \bigg)$ is pointed. \end{enumerate} \end{theorem} \begin{proof} It can be deduced from Proposition \ref{prop-final}; cf. \cite[Theorem 4.1 and Corollary 4.2]{fabin-12}. \end{proof} \section{Conclusion} In this paper, we have been given a duality approach for problem \eqref{calm0}. Since the bilevel programming problem \eqref{calm0} does not satisfy the Slater condition, which is the key apparatus to establish strong duality, we have first provided an overview of some standard duality results to show that they are not applicable to our problem \eqref{calm0}. Afterwards, the concept of partial calmness is used to establish weak and strong duality results for problem \eqref{calm0}. In particular, Langrange, Fenchel-Lagrange, and Toland-Fenchel-Lagrange duality concepts were investigated for this problem, under some suitable conditions. For the first class of results, we introduce a regularization for problem \eqref{calm0}. Then, we establish sufficient conditions ensuring strong duality for the obtained regularized problem \eqref{calm0} under a generalized Slater-type condition without the convexity assumptions and without. Finally, we have provided a strong duality result for a geometrically constrained bilevel optimization problem without the Slater-condition. \begin{thebibliography}{1} \bibitem{ref semi vect} A. Aboussoror, S. Adly and F.E. Saissi, A Duality approach for a alass of semivectorial bilevel programming problems, Vietnam journal of mathematics. 46 (2018) 197-214. \bibitem{refext} A. Aboussoror and S. Adly, A Fenchel-Lagrange duality approach for a bilevel programming problem with extremal value function, Journal of Optimization Theory and Applications. 149 (2011) 254-268. \bibitem{refextend} A. Aboussoror, S. Adly and F.E. Saissi, An extended Fenchel-Lagrange duality approach and optimality conditions for strong bilevel programming problems, SIAM Journal on Optimization. 27(2) (2017) 1230-1255. \bibitem{refrev} R.I. Bot, E.R. Csetnek and A. Moldovan, Revisiting some duality theorems via the quasirelative interior in convex optimization, Journal of Optimization Theory and Applications. 139 (2008) 67-84. \bibitem{refrev0} R.I. Bot, E.R. Csetnek and G. Wanka, Sequential optimality conditions in convex programming via perturbation approach, Journal of Convex Analysis. 15 (2008) 149-164. \bibitem{ref+} R.I. Bot, I.B.Hodrea and G. Wanka, Some new Farkas-type results for inequality systems with DC functions, Faculty of Mathematics, Chemnitz University of Technology. D-09107 Chemnitz, Germany, 2007. \bibitem{ref10} R.I. Bot, G. Kassay and G. Wanka, Strong duality for generalized convex optimization problems, Journal of Optimization Theory and Applications. 127 (2005) 45-70. \bibitem{ref bot} R.I. Bot and G. Wanka, On the relations between different dual problems in convex mathematical programming, Operations Research Proceedings. (2001) 255-262. \bibitem{ref5} R.S. Burachik and V. Jeyakumar, A dual condition for the convex subdifferential sum formula with applications, Journal of Convex Analysis. 12 (2005) 279-290. \bibitem{Wu100} R.S. Burachik, V. Jeyakumar and Z.Y. Wu, Necessary and sufficient conditions for stable conjugate duality, nonlinear analysis theory methods and applications. 64 (2006) 1998-2006. \bibitem{DempeFoundations2002} S. Dempe, Foundations of bilevel programming, Kluwer Academic Publishers, 2002. \bibitem{DuttaDempe} S. Dempe and S. Dutta, Is bilevel programming a special case of mathematical programming with equilibrium constraints? Mathematical Programming. 131(1-2) (2012) 37-48. \bibitem{DempeZemkohoBook} S. Dempe and A.B. Zemkoho, Bilevel Optimization: Advances and Next Challenges, Springer, 2020. \bibitem{ref12} I. Ekeland and R. Temam, Convex analysis and variational problems, Amsterdam: NorthHolland Publishing Company, 1976. \bibitem{fabin-12} F-B. Fabin, F-B. Fernando and V. Cristian, A complete characterization of strong duality in nonconvex optimization with a single constraint, Journal of Global Optimization. 53 (2012) 185-201. \bibitem{ref6} D.H. Fang, C. Li and K.F. Ng, Constraint qualifications for extended Farkas’s lemmas and Lagrangian dualities in convex infinite programming, SIAM Journal on Optimization. 20 (2009) 1311-1332. \bibitem{GauvinDubeau1982} J. Gauvin and F. Dubeau, Differential properties of the marginal function in mathematical programming, Mathematics programmes of study. 18 (1982) 101-119. \bibitem{ref44} J.E. Martinez-Legaz and M. Volle, Duality in D.C. programming: The case of several D.C. constraints, Journal of Mathematical Analysis and Applications. 237 (1999) 657-671. \bibitem{ref11} R.T. Rockafellar, Convex analysis, Princeton: Princeton University Press, 1970. \bibitem{rock1} R.T. Rockafellar and R.J-B. Wets, Variational Analysis, Springer-Verlag, Berlin, Heidelberg, 1998. \bibitem{Shehu} Y. Shehu, P.T. Vuong and A.B. Zemkoho, An inertial extrapolation method for convex simple bilevel optimization, Optimization Methods and Software. 36(1) (2021) 1-19. \bibitem{ref3} X-K. Sun, Regularity conditions characterizing Fenchel-Lagrange duality and Farkas-type results in DC infinite programming, Journal of Mathematical Analysis and Applications. 414 (2014) 590-611. \bibitem{ye} J.J. Ye and D.L. Zhu, Optimality conditions for bilevel programming problems. Optimization. 33 (1995) 9-27. With Erratum in Optimization, 39 (1997) 361-366. \bibitem{zali} C. Z$\check{a}$linescu, Convex Analysis in General Vector Spaces, World Scientific, River Edge, New Jersey, 2002. \end{thebibliography} \end{document}
2205.10801v1
http://arxiv.org/abs/2205.10801v1
Zero volume boundary for extension domains from Sobolev to $BV$
\documentclass[reqno,11pt]{amsart} \usepackage{a4wide,color,eucal,enumerate,mathrsfs} \usepackage[normalem]{ulem} \usepackage{amsmath,amssymb,epsfig,amsthm} \usepackage[latin1]{inputenc} \usepackage{psfrag} \usepackage{mathtools} \numberwithin{equation}{section} \usepackage[english]{babel} nnish} uses Finnish as an example; see ftp://ftp.funet.fi/pub/TeX/CTAN/macros/latex/required/babel/base/babel.pdf \usepackage[T1]{fontenc} \usepackage[utf8]{inputenx} \usepackage{mathtools} \usepackage{empheq} \usepackage{enumitem} \usepackage{amssymb} \usepackage[sc]{mathpazo} \usepackage{mathrsfs} \usepackage{todonotes} \usepackage{aliascnt} \usepackage{comment} \usepackage{epsfig} \usepackage[normalem]{ulem} \usepackage[babel]{csquotes} \usepackage{nameref} \usepackage[colorlinks = true, linkcolor = red, urlcolor = red, citecolor = red] {hyperref} \usepackage{bookmark} \usepackage{cleveref} \newtheorem{maintheorem}{Theorem} \newtheorem{theorem}{Theorem}[section] \newtheorem{maincorollary}{Corollary} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{conjecture}[theorem]{Conjecture} \newtheorem{question}[theorem]{Question} \newtheorem{definition}[theorem]{Definition} \newtheorem{notation}[theorem]{Notation} \newtheorem{example}[theorem]{Example} \theoremstyle{remark} \newtheorem{remark}[theorem]{Remark} \DeclareMathOperator{\diam}{diam} \DeclareMathOperator{\dist}{dist} \DeclareMathOperator{\comp}{comp} \newcommand{\HH}{\mathcal{H}} \newcommand{\BB}{\mathcal{B}} \newcommand{\N}{\mathbb{N}} \renewcommand{\S}{\mathbb{S}} \newcommand{\D}{\mathbb{D}} \newcommand{\R}{\mathbb{R}} \newcommand{\Z}{\mathbb{Z}} \newcommand{\mC}{{\color{red}\mathbf{C(\cdot)}}} \renewcommand{\d}{{\mathrm d}} \def\rr{{\mathbb R}} \def\rn{{{\rr}^n}} \def\rrm{{{\rr}^m}} \def\rnm{{\rn\times\rrm}} \def\zz{{\mathbb Z}} \def\nn{{\mathbb N}} \def\hh{{\mathbb H}} \def\bd{{\mathcal D}} \def\ch{{\mathcal H}} \def\cc{{\mathbb C}} \def\cs{{\mathcal S}} \def\cx{{\mathscr X}} \def\lfz{{\lfloor}} \def\rfz{{\rfloor}} \def\ce{{\mathcal E}} \def\cf{{\mathcal F}} \def\cg{{\mathcal G}} \def\cl{{\mathcal L}} \def\cj{{\mathcal J}} \def\cq{{\mathcal Q}} \def\cp{{\mathcal P}} \def\cm{{\mathcal M}} \def\car{{\mathcal R}} \def\ct{{\mathcal T}} \def\cb{{\mathcal B}} \def\ca{{\mathcal A}} \def\ccc{{\mathcal C}} \def\cbgz{{\cb_\gz}} \def\cd{{\mathrm {CD}}} \def\fz{\infty} \def\az{\alpha} \def\ac{{\mathrm ac}} \def\am{{\mathrm {AM}}} \def\aml{{\mathrm {AML}}} \def\cccc{{\mathrm {CC}}} \def\dist{{\mathop\mathrm{\,dist\,}}} \def\loc{{\mathop\mathrm{\,loc\,}}} \def\lip{{\mathop\mathrm{\,Lip\,}}} \def\llc{{\mathop\mathrm{\,LLC}}} \def\lz{\lambda} \def\dz{\delta} \def\bdz{\Delta} \def\ez{\epsilon} \def\ezl{{\epsilon_1}} \def\ezt{{\epsilon_3}} \def\kz{\kappa} \def\bz{\beta} \def\fai{\varphi} \def\gz{{\gamma}} \def\oz{{\omega}} \def\boz{{\Omega}} \def\toz{{\wz{\boz}}} \def\ttoz{{\wz{\toz}}} \def\vz{\varphi} \def\tz{\theta} \def\sz{\sigma} \def\pa{\partial} \def\wz{\widetilde} \def\hs{\hspace{0.3cm}} \def\tl{\wzde} \def\ls{\lesssim} \def\gs{\gtrsim} \def\tr{\triangle} \def\ext{{\mathrm{\,Ext\,}}} \def\bint{{\ifinner\rlap{\bf\kern.35em--} }\ignorespaces} \def\dbint{\displaystyle\bint} \def\bbint{{\ifinner\rlap{\bf\kern.35em--} }\ignorespaces} \def\ocg{{\mathring{\cg}}} \def\ocd{{\mathring{\cd}}} \def\occ{{\mathring{\cal C}}} \def\cgbz{{\cg(\bz,\,\gz)}} \def\cgbb{{\cg(\bz_1,\bz_2;\gz_1,\gz_2)}} \def\cgm{{\cg(x_1,\,r,\,\bz,\,\gz)}} \def\cgom{{\ocg(x_1,\,r,\,\bz,\,\gz)}} \def\scale{{\rm scale}} \def\aplip{{\rm \,apLip\,}} \def\dmsp{{\dot M^{s,p}(\boz)}} \def\msp{{M^{s,p}(\boz)}} \def\dwp{{\dot W^{1,p}(\boz)}} \def\wp{{W^{1,p}(\boz)}} \def\lp{{L^{p}(\boz)}} \def\esup{\mathop\mathrm{\,esssup\,}} \def\einf{\mathop\mathrm{\,essinf\,}} \def\dsum{\displaystyle\sum} \def\osc{ \mathop \mathrm{\, osc\,} } \def\dosc{\displaystyle\osc} \def\diam{{\mathop\mathrm{\,diam\,}}} \def\atom{{\mathop\mathrm{\,atom\,}}} \def\dint{\displaystyle\int} \def\doint{\displaystyle\oint} \def\dlimsup{\displaystyle\limsup} \def\dfrac{\displaystyle\frac} \def\dsup{\displaystyle\sup} \def\dlim{\displaystyle\lim} \def\dlimsup{\displaystyle\limsup} \def\r{\right} \def\lf{\left} \def\la{\langle} \def\ra{\rangle} \def\subsetneq{{\hspace{0.2cm}\stackrel \subset{\scriptstyle\ne}\hspace{0.15cm}}} \def\bint{{\ifinner\rlap{\bf\kern.35em--} }\ignorespaces} \newenvironment{note}{\begin{quote}\sf}{\end{quote}} \title[Zero volume boundary for extension domains from Sobolev to $BV$]{Zero volume boundary for extension domains\\ from Sobolev to $BV$} \author{Tapio Rajala and Zheng Zhu } \address{University of Jyv\"as\-kyl\"a \\ Department of Mathematics and Statistics \\ P.O. Box 35 (MaD) \\ FI-40014 University of Jyv\"as\-kyl\"a \\ Finland} \email{[email protected]; [email protected]} \date{April 2022} \begin{document} \begin{abstract} In this note, we prove that the boundary of a $(W^{1, p}, BV)$-extension domain is of volume zero under the assumption that the domain $\boz$ is $1$-fat at almost every $x\in\partial\boz$. Especially, the boundary of any planar $(W^{1, p}, BV)$-extension domain is of volume zero. \end{abstract} \maketitle \section{Introduction} Given $1\leq q\leq p\leq\fz$, a bounded domain $\boz\subset\rn$, $n\geq 2$, is said to be a $(W^{1, p}, W^{1, q})$-extension domain if there exists a bounded extension operator \[ E\colon W^{1,p}(\boz)\mapsto W^{1,q}(\rn) \] and is said to be a $(W^{1, p}, BV)$-extension domain if there exists a bounded extension operator \[ E\colon W^{1, p}(\boz)\mapsto BV(\rn). \] The theory of Sobolev extensions is of interest in several fields in analysis. Partial motivations for the study of Sobolev extensions comes from the thoery of PDEs, for example, see \cite{Mazyabook}. It was proved in \cite{calderon,stein} that for every Lipschitz domain in $\rn$, there exists a bounded linear extension operator $E\colon W^{k, p}(\boz)\mapsto W^{k, p}(\rn)$ for each $k\in\mathbb N$ and $1\leq p\leq\fz$. Here $W^{k, p}(\boz)$ is the Banach space of all $L^p$-integrable functions whose distributional derivatives up to order $k$ are $L^p$-integrable. Later, the notion of $(\epsilon, \delta)$-domains was introduced by Jones in \cite{Jones}, and it was proved that for every $(\epsilon, \delta)$-domain, there exists a bounded linear extension operator $E\colon W^{k, p}(\boz)\mapsto W^{k, p}(\rn)$ for every $k\in\mathbb N$ and $1\leq p\leq\fz$. In \cite{VGL}, a geometric characterization of planar $(W^{1, 2}, W^{1, 2})$-extension domain was given. By later results in \cite{PekkaJFA,Shvartsman,KRZ1,KRZ2}, we now have geometric characterizations of planar simply connected $(W^{1,p}, W^{1, p})$-extension domains for all $1\leq p\leq\fz$. A geometric characterization is also known for planar simply connected $(L^{k, p}, L^{k, p})$-extension domains with $2<p\leq\fz$, see \cite{ShvartsmanAdv,Whitney,Zobin}. Here $L^{k, p}(\boz)$ denotes the homogeneous Sobolev space which contains locally integrable functions whose $k$-th order distributional derivative is $L^p$-integrable. Beyond the planar simply connected case, geometric characterizations of Sobolev extension domains are still missing. However, several necessary properties have been obtained for general Sobolev extension domains. For a measurable subset $F\subset\rn$, we use $|F|$ to denote its Lebesgue measure. In \cite{HKT2008:B, HKT2008}, Haj\l{}asz, Koskela and Tuominen proved for $1\leq p<\fz$ that a $\lf(W^{1, p}, W^{1,p}\r)$-extension domain $\boz\subset\rn$ must be Ahlfors regular which means that there exists a positive constant $C>1$ such that for every $x\in\overline\boz$ and $0<r<\min\lf\{1, \frac{1}{4}\diam\boz\r\}$, we have \begin{equation}\label{eq:regular} |B(x, r)|\leq C|B(x, r)\cap\boz|. \end{equation} From the results in \cite{Kos:planarBV, Tapio}, we know that also $(BV, BV)$-extension domains are Ahlfors regular. For Ahlfors regular domains, the Lebesgue differentiation theorem then easily implies $|\partial\boz|=0$. In the case where $\boz$ is a planar Jordan $\lf(W^{1, p}, W^{1,p}\r)$-extension domain, $\boz$ has to be a so-called John domain when $1\leq p\leq 2$ and the complementary domain has to be John when $2\leq p<\fz$. The John condition implies that the Hausdorff dimension of $\partial\boz$ must be strictly less than 2, see \cite{KRmathann}. Recently, Lu$\check{c}$i\'c, Takanen and the first named author gave a sharp estimate on the Hausdorff dimension of $\partial\boz$, see \cite{LRTacv}. In general, the Hausdorff dimension of a $(W^{1, p}, W^{1, p})$-extension domain can well be $n$. The outward cusp domain with a polynomial type singularity is a typical example which is not a $(W^{1, p}, W^{1, p})$-extension domain for $1\leq p<\fz$. However, it is a $(W^{1, p}, W^{1, q})$-extension domain, for some $1\leq q<p\leq\fz$, see the monograph \cite{MPbook} and the references therein. Hence, for $1\leq q<p\leq\fz$, it is not necessary for a $(W^{1, p}, W^{1, q})$-extension domain to be Ahlfors regular. In the absence of Ahlfors regularity, one has to find alternative approaches for proving $|\partial\boz|=0$. The first approach in \cite{ukhlov1,ukhlov2} was to generalize the Ahlfors regularity \eqref{eq:regular} to a Ahlfors-type estimate \begin{equation}\label{eq:regular2} |B(x, r)|^p\leq C\Phi^{p-q}(B(x, r))|B(x, r)\cap\boz|^q \end{equation} for $(W^{1, p}, W^{1, p})$-extension domains with $n<q<p<\fz$. Here $\Phi$ is a bounded and quasiadditive set function generated by the $(W^{1, p}, W^{1, q})$-extension property and defined on open sets $U\subset\rn$, see Section \ref{sec:set}. By differentiating $\Phi$ with respect to the Lebesgue measure, one concludes that $|\partial\boz|=0$ if $\boz$ is a $(W^{1, p}, W^{1, q})$-extension domain for $n < q < p < \fz$. Recently, Koskela, Ukhlov and the second named author \cite{KUZ} generalized this result and proved that the boundary of a $\lf(W^{1, p}, W^{1, q}\r)$-extension domain must be of volume zero for $n-1<q< p<\fz$ (and for $1\leq q< p<\fz$ on the plane). For $1\leq q<n-1$ and $(n-1)q/(n-1-q)<p<\fz$, they constructed a $\lf(W^{1, p}, W^{1, q}\r)$-extension domain $\boz\subset\rn$ with $|\partial\boz|>0$. For the remaining range of exponents where $1\leq q\leq n-1$ and $q<p\leq(n-1)q/(n-1-q)$, it is still not clear whether the boundary of every $\lf(W^{1, p}, W^{1, q}\r)$-extension domain must be of volume zero. As is well-known, for every domain $\boz\subset\rn$, the space of functions of bounded variation $BV(\boz)$ strictly contains every Sobolev space $W^{1, q}(\boz)$ for $1\leq q\leq \fz$. Hence, the class of $\lf(W^{1, p}, BV\r)$-extension domains contains the class of $\lf(W^{1, p}, W^{1, q}\r)$-extension domains for every $1\leq q\leq p<\fz$. As a basic example to indicate that the containment is strict when $n \ge 2$, we can take the slit disk (the unit disk minus a radial segment) in the plane. The slit disk is a $\lf(W^{1, p}, BV\r)$-extension domain for every $1\leq p<\fz$, and even a $(BV, BV)$-extension domain; however it is not a $\lf(W^{1, p}, W^{1, q}\r)$-extension domain for any $1\leq q\leq p<\fz$. This basic example also shows that it is natural to consider the geometric properties of $\lf(W^{1, p}, BV\r)$-extension domains. In this paper, we focus on the question whether the boundary of a $(W^{1, p}, BV)$-extension domain is of volume zero. Our first theorem tells us that the $(BV, BV)$-extension property is equivalent to the $\lf(W^{1, 1}, BV\r)$-extension property. Hence, a $(W^{1, 1}, BV)$-extension domain is Ahlfors regular and so its boundary is of volume zero. \begin{theorem}\label{thm:1.1} A domain $\boz\subset\rn$ is a $(BV, BV)$-extension domain if and only if it is a $\lf(W^{1, 1}, BV\r)$-extension domain. \end{theorem} Since, $W^{1,1}(\boz)$ is a proper subspace of $BV(\boz)$ with $\|u\|_{W^{1,1}(\boz)}=\|u\|_{BV(\boz)}$ for every $u\in W^{1, 1}(\boz)$, $(BV, BV)$-extension property implies $(W^{1, 1}, BV)$-extension property immediately. The other direction from $(W^{1, 1}, BV)$-extension property to $(BV, BV)$-extension property is not as straightforward, as $W^{1, 1}(\boz)$ is only a proper subspace of $BV(\boz)$. The essential tool here is the Whitney smoothing operator constructed by Garc\'ia-Bravo and the first named author in \cite{Tapio}. This Whitney smoothing operator maps every function in $BV(\boz)$ to a function in $W^{1, 1}(\boz)$ with the same trace on $\partial\boz$, so that the norm of the image in $W^{1, 1}(\boz)$ is uniformly controlled from above by the norm of the corresponding preimage in $BV(\boz)$. With an extra assumption that $\boz$ is $q$-fat at almost every point on the boundary $\partial\boz$, in \cite{KUZ} it was shown that the boundary of a $(W^{1,p}, W^{1, q})$-extension domain is of volume zero when $1\leq q<p<\fz$. The essential point there was that the $q$-fatness of the domain on the boundary guarantees the continuity of a $W^{1, q}$-function on the boundary. Maybe a bit surprisingly, the assumption that the domain is $1$-fat at almost every point on the boundary also guarantees that the boundary of a $(W^{1, p}, BV)$-extension domain is of volume zero. In particular, every planar domain is $1$-fat at every point of the boundary. Hence, we have the following theorem. \begin{theorem}\label{thm:Rn} Let $\boz\subset\rn$ be a $(W^{1, p}, BV)$-extension domain for $1\leq p<\fz$, which is $1$-fat at almost every $x\in\partial\boz$. Then $|\partial\boz|=0$. In particular, for every planar $(W^{1, p}, BV)$-extension domain $\boz$ with $1\leq p<\fz$, we have $|\partial\boz|=0$. \end{theorem} In light of the results and example given in \cite{KUZ}, the most interesting open question is what happens in the range $1<p\leq (n-1)/(n-2)$ of exponents. For this range, we do not know whether the boundary of a $(W^{1, p}, BV)$-extension domain must be of volume zero. If a counterexample exists in this range, it might be easier to construct it in the $(W^{1, p}, BV)$-case rather than the $(W^{1, p},W^{1,1})$-case. Hence we leave it as a question here. \begin{question} For $1<p\leq(n-1)/(n-2)$, is the boundary of a $(W^{1, p}, BV)$-extension domain of volume zero? \end{question} \section{Preliminaries} For a locally integrable function $u\in L^1_{\rm loc}(\boz)$ and a measurable subset $A\subset\boz$ with $0<|A|<\fz$, we define \[u_A:=\bint_Eu(y)\,dy=\frac{1}{|A|}\int_Au(y)\,dy.\] \begin{definition}\label{de:Sobolev} Let $\boz\subset\rn$ be a domain. For every $1\leq p\leq\fz$, we define the Sobolev space $W^{1, p}(\boz)$ to be \[W^{1, p}(\boz):=\lf\{u\in L^p(\boz): \nabla u\in L^p(\boz;\rn)\r\},\] where $\nabla u$ denotes the distributional gradient of $u$. It is equipped with the nonhomogeneous norm \[\|u\|_{W^{1, p}(\boz)}=\|u\|_{L^p(\boz)}+\|\nabla u\|_{L^p(\boz)}.\] \end{definition} Now, let us give the definition of functions of bounded variation. \begin{definition}\label{de:BV} Let $\boz\subset\rn$ be a domain. A function $u\in L^1(\boz)$ is said to have bounded variation and denoted $u\in BV(\boz)$ if \[\|Du\|(\boz):=\sup\lf\{\int_\boz f{\rm div}(\phi) dx:\phi\in C^1_o(\boz;\rn), |\phi|\leq 1\r\}<\fz.\] The space $BV(\boz)$ is equipped with the norm \[\|u\|_{BV(\boz)}:=\|u\|_{L^1(\boz)}+\|Du\|(\boz).\] \end{definition} \begin{definition}\label{de:extension} We say that a domain $\Omega\subset\rn$ is a $\lf(W^{1, p}, BV\r)$-extension domain for $1\leq p<\fz$, if there exists a bounded extension operator $E\colon W^{1, p}(\boz)\mapsto BV(\rn)$ such that for every $u\in W^{1, p}(\boz)$, we have $E(u)\in BV(\rn)$ with $E(u)\big|_\boz\equiv u$ and \[\|E(u)\|_{BV(\rn)}\leq C\|u\|_{W^{1, p}(\boz)}\] for a constant $C>1$ independent of $u$. \end{definition} Let $U\subset\rn$ be an open set and $A\subset U$ be a measurable subset with $\overline A\subset U$. The $p$-admissible set $\mathcal W_p(A; U)$ is defined by setting \[\mathcal W_p(A; U):=\lf\{u\in W^{1, p}_0(U)\cap C(U):u\big|_{A}\geq 1\r\}.\] \begin{definition}\label{de:capacity} Let $U\subset\rn$ be an open set and $A\subset U$ with $\overline{A}\subset U$. The relative $p$-capacity $Cap_p(A; U)$ is defined by setting \[Cap_p(A; U):=\inf_{u\in\mathcal W_p(A;U)}\int_{U}|\nabla u(x)|^p\,dx.\] \end{definition} Following Lahti \cite{Panu}, we define $1$-fatness below. \begin{definition}\label{de:1fat} Let $A\subset\rn$ be a measurable subset. We say that $A$ is $1$-thin at the point $x\in\rn$, if \[\lim_{r\to0^+}r\frac{Cap_1\lf(A\cap B(x, r); B(x, 2r)\r)}{\lf|B(x, r)\r|}=0.\] If $A$ is not $1$-thin at $x$, we say that $A$ is $1$-fat at $x$. Furthermore, we say that a set $U$ is $1$-finely open, if $\rn\setminus U$ is $1$-thin at every $x\in U$. \end{definition} By \cite[Lemma 4.2]{Panu}, the collection of $1$-finely open sets is a topology on $\rn$. For a function $u\in BV(\rn)$, we define the lower approximate limit $u^\star$ by setting \[u_\star(x):=\sup\lf\{t\in\overline{\rr}:\lim_{r\to 0^+}\frac{\lf|B(x, r)\cap\{u<t\}\r|}{\lf|B(x, r)\r|}=0\r\}\] and the upper approximate limit $u_\star$ by setting \[u^\star(x):=\inf\lf\{t\in\overline\rr:\lim_{r\to 0^+}\frac{\lf|B(x, r)\cap\{u>t\}\r|}{\lf|B(x, r)\r|}=0\r\}.\] The set \[S_u:=\lf\{x\in\rn: u_\star(x)<u^\star(x)\r\}\] is called the jump set of $u$. By the Lebesgue differentiation theorem, $\lf|S_u\r|=0$. Using the lower and upper approximate limits, we define the precise representative $\tilde u:=(u^\star+u_\star)/2$. The following lemma was proved in \cite[Corollary 5.1]{Panu}. \begin{lemma}\label{lem:1-fine} Let $u\in BV(\rn)$. Then $\tilde u$ is $1$-finely continuous at $\mathcal H^{n-1}$-almost every $x\in\rn\setminus S_u$. \end{lemma} The following lemma for $u\in W^{1, 1}(\rn)$ was proved in \cite[Lemma 2.6]{KUZ}, which is also a corollary of a result in \cite{Tero}. We generalize it to $BV(\rn)$ here. \begin{lemma}\label{le:fat} Let $\boz\subset\rn$ be a domain which is $1$-fat at almost every point $x\in\partial\boz$. If $u\in BV(\rn)$ with $u\big|_{B(x, r)\cap\boz}\equiv c$ for some $x\in\partial\boz$, $0<r<1$ and $c\in\rr$. Then $u(y)=c$ for almost every $y\in B(x, r)\cap\partial\boz$. \end{lemma} \begin{proof} Let $u\in BV(\rn)$ satisfy the assumptions. Then the precise representative $\tilde u\big|_{B(x, r)\cap\boz}\equiv c$. Since $|S_u|=0$, by Lemma \ref{lem:1-fine}, there exists a subset $N_1\subset\rn$ with $|N_1|=0$ such that $\tilde u$ is $1$-finely continuous on $\rn\setminus N_1$. By the assumption, there exists a measure zero set $N_2\subset\partial\boz$ such that $\boz$ is $1$-fat on $\partial\boz\setminus N_2$. By \Cref{de:1fat}, one can see that $B(x, r)\cap\boz$ is also $1$-fat on $(B(x, r)\cap\partial\boz)\setminus N_2$. For every $y\in(B(x, r)\cap\partial\boz)\setminus(N_1\cup N_2)$, since $\tilde u$ is $1$-finely continuous on it and any $1$-fine neighborhood of $y$ must intersect $B(x, r)\cap\boz$, we have $\tilde u(y)=c$. Hence $u(y)=c$ for almost every $y\in B(x, r)\cap\partial\boz$. \end{proof} The following coarea formula for $BV$ functions can be found in \cite[Section 5.5]{Evans}. See also \cite[Theorem 2.2]{Tapio}. \begin{proposition} Given a function $u\in BV(\boz)$, the superlevel sets $u_t=\{x\in\boz:u(x)>t\}$ have finite perimeter in $\boz$ for almost every $t\in\rr$ and \[\|Du\|(F)=\int_{-\fz}^\fz P(u_t, F)\,dt\] for every Borel set $F\subset\boz$. Conversely, if $u\in L^1(\boz)$ and \[\int_{-\fz}^\fz P(u_t,\boz)\,dt<\fz\] then $u\in BV(\boz)$. \end{proposition} See \cite[Theorem 3.44]{Fusco} for the proof of the following $(1, 1)$-Poincar\'e inequality for $BV$ functions. \begin{proposition}\label{prop:BVpoin} Let $\boz\subset\rn$ be a bounded Lipschitz domain. Then there exists a constant $C>0$ depending on $n$ and $\boz$ such that for every $u\in BV(\boz)$, we have \[\int_\boz|u(y)-u_\boz|\,dy\leq C\|Du\|(\boz).\] In particular, there exists a constant $C>0$ only depending on $n$ so that if $Q, Q'\subset\rn$ are two closed dyadic cubes with $\frac{1}{4}l(Q')\leq l(Q)\leq 4l(Q')$ and $\boz:={\rm int}(Q\cup Q')$ connected, then for every $u\in BV(\boz)$, \begin{equation}\label{eq:poincare} \int_\boz|u(y)-u_\boz|\,dy\leq Cl(Q)\|Du\|(\boz). \end{equation} \end{proposition} \section{A set function arising from the extension}\label{sec:set} In this subsection, we introduce a set function defined on the class of open sets in $\rn$ and taking nonnegative values. Our set function here is a modification of the one originally introduced by Ukhlov \cite{ukhlov1, ukhlov2}. See also \cite{VoUk1, VoUk2} for related set functions. The modified version of the set function we use is from \cite{KUZ}, where it was used by Koskela, Ukhlov and the second named author to study the size of the boundary of a $(W^{1, p}, W^{1, q})$-extension domains. Let us recall that a set function $\Phi$ defined on the class of open subsets of $\rn$ and taking nonnegative values is called quasiadditive (see for example \cite{VoUk1}), if for all open sets $U_1\subset U_2\subset\rn$, we have \[\Phi(U_1)\leq\Phi(U_2),\] and there exists a positive constant $C$ such that for arbitrary pairwise disjoint open sets $\{U_i\}_{i=1}^\fz$, we have \begin{equation}\label{eq:quasiadditive} \sum_{i=1}^\fz\Phi(U_i)\leq C\Phi\lf(\bigcup_{i=1}^\fz U_i\r). \end{equation} Let $\boz\subset\rn$ be a $(W^{1, p}, BV)$-extension domain for some $1<p<\fz$. For every open set $U\subset\rn$ with $U\cap\boz\neq\emptyset$, we define \[W^p_0(U, \boz):=\lf\{u\in W^{1, p}(\boz)\cap C(\boz): u\equiv 0\ {\rm on}\ \boz\setminus U\r\}.\] For every $u\in W^p_0(U, \boz)$, we define \[\Gamma(u):=\inf\lf\{\|Dv\|(U): v\in BV(\rn), v\big|_{\boz}\equiv u\r\}.\] Then we define the set function $\Phi$ by setting \begin{equation}\label{eq:setfunction} \Phi(U):=\begin{cases}\sup_{u\in W^p_0(U, \boz)}\lf(\frac{\Gamma(u)}{\|u\|_{W^{1, p}(U\cap\boz)}}\r)^{k}, \ \ \text{with } \frac{1}{k}=1-\frac{1}{p}, & \text{if }U\cap\boz\neq\emptyset,\\ 0, & \text{otherwise.} \end{cases} \end{equation} The proof of the following lemma is almost the same as the proof of \cite[Theorem 3.1]{KUZ}. One needs to simply replace $\|Dv\|_{L^q(U)}$ by $\|Dv\|(U)$ in the proof of \cite[Theorem 3.1]{KUZ} and repeat the argument. \begin{lemma}\label{le:setfunction} Let $1<p<\fz$ and let $\boz\subset\rn$ be a bounded $(W^{1, p}, BV)$-extension domain. Then the set function defined in (\ref{eq:setfunction}) for all open subsets of $\rn$ is bounded and quasiadditive. \end{lemma} The upper and lower derivatives of a quasiadditive set function $\Phi$ are defined by setting \[\overline{D\Phi}(x):=\limsup_{r\to0^+}\frac{\Phi(B(x, r))}{|B(x, r)|}\quad {\rm and}\quad \underline{D\Phi}(x) = \liminf_{r\to 0^+}\frac{\Phi(B(x,r))}{|B(x, r)|}.\] By \cite{Rado,VoUk1}, we have the following lemma. See also \cite[Lemma 3.1]{KUZ}. \begin{lemma}\label{le:setFbound} Let $\Phi$ be a bounded and quasiadditive set function defined on open sets $U\subset\rn$. Then $\overline{D\Phi}(x)<\fz$ for almost every $x\in\rn$. \end{lemma} The following lemma immediately comes from the definition (\ref{eq:setfunction}) for the set function $\Phi$. \begin{lemma}\label{cor:extension} Let $1<p<\fz$ and let $\boz\subset\rn$ be a bounded $(W^{1, p}, BV)$-extension domain. Then, for a ball $B(x, r)$ with $x\in\partial\boz$ and every function $u\in W^p_0(B(x, r), \boz)$, there exists a function $v\in BV(B(x, r))$ with $v\big|_{B(x, r)\cap\boz}\equiv u$ and \begin{equation}\label{eq:extension} \|Dv\|(B(x, r))\leq 2\Phi^{\frac{1}{k}}(B(x, r))\|u\|_{W^{1, p}(B(x, r)\cap\boz)}, \ \ {\rm where}\ \ \frac{1}{k}=1-\frac{1}{p}. \end{equation} \end{lemma} \section{Proofs of the results} In this section we prove Theorems \ref{thm:1.1} and \ref{thm:Rn}. \begin{proof}[Proof of \Cref{thm:1.1}] Let us first assume that $\boz\subset\rn$ is a $(BV,BV)$-extension domain with the extension operator $E$. Since $W^{1,1}(\boz)\subset BV(\boz)$ with $\|u\|_{BV(\boz)}=\|\nabla u\|_{L^1(\boz)}$ for every $u\in W^{1, 1}(\boz)$, we have $$\|E(u)\|_{BV(\boz)}\leq C\|u\|_{BV(\boz)}\leq C\|u\|_{W^{1, 1}(\boz)}.$$ This implies that $\boz$ is a $\lf(W^{1,1}, BV\r)$-extension domain with the same operator $E$ restricted to $W^{1,1}(\Omega)$. Let us then prove the converse and assume that $\boz\subset\rn$ is a $(W^{1,1}, BV)$-extension domain with an extension operator $E$. Let $S_{\boz,\boz}$ be the Whitney smoothing operator defined in \cite{Tapio}. Then by \cite[Theorem 3.1]{Tapio}, for every $u\in BV(\boz)$, we have $S_{\boz,\boz}(u)\in W^{1, 1}(\boz)$ with \[\|S_{\boz,\boz}(u)\|_{W^{1, 1}(\boz)}\leq C\|u\|_{BV(\boz)}\] for a positive constant $C$ independent of $u$, and \begin{equation}\label{eq:boundary} \|D(u-S_{\boz, \boz}(u))\|(\partial\boz)=0, \end{equation} where $u-S_{\boz,\boz}(u)$ is understood to be defined on the whole space $\rn$ via a zero-extension. Then $E(S_{\boz, \boz}(u))\in BV(\rn)$ with \[\|E(S_{\boz, \boz}(u))\|_{BV(\rn)}\leq C\|S_{\boz, \boz}(u)\|_{W^{1, 1}(\boz)}\leq C\|u\|_{BV(\boz)}.\] Now, define $T\colon BV(\Omega) \to BV(\rn)$ by setting for every $u \in BV(\Omega)$ \[T(u)(x):=\begin{cases} u(x),\ \ {\rm if}\ \ x\in \boz\\ E(S_{\boz, \boz}(u))(x),\ \ {\rm if}\ \ x\in\rn\setminus\boz. \end{cases}\] By (\ref{eq:boundary}), we have $T(u)\in BV(\rn)$ with \[\|T(u)\|_{BV(\rn)}\leq\|E(S_{\boz,\boz}(u))\|_{BV(\rn)}+\|u\|_{BV(\boz)}\leq C\|u\|_{BV(\boz)}.\] Hence, $\boz$ is a $BV$-extension domain. \end{proof} \begin{proof}[Proof of \Cref{thm:Rn}] Assume towards a contradiction that $|\partial\boz|>0$. By the Lebesgue density point theorem and \Cref{le:setFbound}, there exists a measurable subset $U$ of $\partial\boz$ with $|U|=|\partial\boz|$ such that every $x\in U$ is a Lebesgue point of $\partial\boz$ and $\overline{D\Phi}(x)<\fz$. Fix $x\in U$. Since $x$ is a Lebesgue point, there exists a sufficiently small $r_x>0$, such that for every $0<r<r_x$, we have \[\lf|B(x, r)\cap\overline\boz\r|\geq\frac{1}{2^{n-1}}\lf|B(x, r)\r|.\] Let $r\in(0, r_x)$ be fixed. Since $|\partial B(x, s)|=0$ for every $s\in (0, r)$, we have \begin{equation}\label{eq:volume1} \lf|B\lf(x, \frac{r}{4}\r)\cap\overline\boz\r|\geq\frac{1}{2^{n-1}}\lf|B\lf(x, \frac{r}{4}\r)\r|\geq\frac{1}{2^{3n-1}}\lf|B(x, r)\r| \end{equation} and \begin{equation}\label{eq:volume2} \lf|\lf(B(x,r)\setminus B\lf(x, \frac{r}{2}\r)\r)\cap\overline\boz\r|\geq |B(x, r)\cap\overline\boz|-\lf|B\lf(x, \frac{r}{2}\r)\r|\geq\frac{1}{2^n}|B(x, r)|. \end{equation} Define a test function $u\in W^{1, p}(\boz)\cap C(\boz)$ by setting \begin{equation}\label{eq:testF} u(y):=\begin{cases} 1,\ \ &{\rm if}\ y\in B\lf(x, \frac{r}{4}\r)\cap\boz,\\ \frac{-4}{r}|y-x|+2,\ \ &{\rm if}\ y\in \lf(B\lf(x, \frac{r}{2}\r)\setminus B\lf(x, \frac{r}{4}\r)\r)\cap\boz,\\ 0,\ \ &{\rm if}\ y\in\boz\setminus B\lf(x, \frac{r}{2}\r). \end{cases} \end{equation} We have \begin{equation}\label{eq:upperbound} \lf(\int_{B(x, r)\cap\boz}|u(y)|^p+|\nabla u(y)|^p\,dx\r)^{\frac{1}{p}}\leq\frac{C}{r}|B(x, r)\cap\boz|^{\frac{1}{p}}. \end{equation} Since $u\equiv 0$ on $\boz\setminus B(x, r/2)$, we have $u\in W^p_0(B(x, r), \boz)$. Then, by the definition \eqref{eq:setfunction} of the set function $\Phi$ and by \Cref{cor:extension}, there exists a function $v\in BV(B(x, r))$ with $v\big|_{B(x, r)\cap\boz}\equiv u$ and \begin{equation}\label{eq:extenbound} \|Dv\|(B(x, r))\leq 2\Phi^{\frac{1}{k}}(B(x, r))\|u\|_{W^{1, p}(B(x, r)\cap\boz)}. \end{equation} By the Poincar\'e inequality of $BV$ functions stated in \Cref{prop:BVpoin}, we have \begin{equation}\label{eq:BVpoincare} \int_{B(x, r)}|v(y)-v_{B(x, r)}|\,dy\leq C r\|Dv\|(B(x, r)). \end{equation} Since $\boz$ is $1$-fat on almost every $z\in\partial\boz$, by \Cref{le:fat}, $v(z)=1$ for almost every $z\in B\lf(x,\frac{r}{4}\r)\cap\partial\boz$ and $v(z)=0$ for almost every $z\in \lf(B(x,r)\setminus B\lf(x, \frac{r}{2}\r)\r)\cap\partial\boz$. Hence, on one hand, if $v_{B(x, r)}\leq\frac{1}{2}$, we have \[\int_{B(x, r)}|v(y)-v_{B(x, r)}|\,dy\geq\frac{1}{2}\lf|B\lf(x, \frac{r}{4}\r)\cap\overline\boz\r|\geq c|B(x, r)|.\] On the other hand, if $v_{B(x, r)}>\frac{1}{2}$, we have \[\int_{B(x, r)}|v(y)-v_{B(x, r)}|\,dy\geq\frac{1}{2}\lf|\lf(B(x, r)\setminus B\lf(x, \frac{r}{2}\r)\r)\cap\overline\boz\r|>c|B(x, r)|.\] All in all, we always have \begin{equation}\label{eq:lowerbound} \int_{B(x, r)}|v(y)-v_{B(x, r)}|\,dy\geq c|B(x, r)| \end{equation} for a sufficiently small constant $c>0$. Thus, by combining inequalities (\ref{eq:upperbound})-(\ref{eq:lowerbound}), we obtain \[\Phi(B(x, r))^{p-1}|B(x, r)\cap\boz|\geq c|B(x, r)|^p\] for a sufficiently small constant $c>0$. This gives \[|B(x, r)\cap\partial\boz|\leq |B(x, r)|-|B(x, r)\cap\boz|\leq |B(x, r)|-C\frac{|B(x, r)|^p}{\Phi(B(x, r))^{p-1}}.\] Since $\overline{D\Phi}(x)<\fz$, we have \begin{eqnarray} \limsup_{r\to0^+}\frac{|B(x, r)\cap\partial\boz|}{|B(x, r)|}&\leq&\limsup_{r\to0^+}\lf(1-\frac{|B(x, r)\cap\boz|}{|B(x, r)|}\r)\nonumber\\ &\leq&\limsup_{r\to0^+}\lf(1-\frac{|B(x, r)|^{p-1}}{\Phi(B(x, r))^{p-1}}\r)\leq1-c\overline{D\Phi}(x)^{1-p}<1.\nonumber \end{eqnarray} This contradicts the assumption that $x$ is a Lebesgue point of $\partial\boz$. Hence, we conclude that $|\partial\boz|=0$. Let us then consider the case $\boz\subset\rr^2$. By \cite[Theorem A.29]{HenclandPekka}, for every $x\in\partial\boz$ and every $0<r<\min\lf\{1, \frac{1}{4}\diam(\boz)\r\}$, we have \[Cap_1(\boz\cap B(x, r); B(x, 2r))\geq cr\] for a constant $0<c<1$. This implies that $\boz$ is $1$-fat at every $x\in\partial\boz$. Hence, by combining this with the first part of the theorem, we have that the boundary of any planar $(W^{1, p}, BV)$-extension domain is of volume zero. \end{proof} \bibliographystyle{alpha} \bibliography{Bibliography} \end{document}
2205.10727v2
http://arxiv.org/abs/2205.10727v2
Residual regularization path-following methods for linear complementarity problems
\documentclass[smallextended,envcountsect]{svjour3} \smartqed \usepackage{lineno} \modulolinenumbers[5] \journalname{Journal of XXXX} \usepackage{amssymb,amsmath,amsfonts} \usepackage{epsfig} \usepackage[bookmarksnumbered, colorlinks, urlcolor=blue, linkcolor=blue, citecolor=red, plainpages=false, pdfstartview=FitW]{hyperref} \usepackage{mathptmx} \usepackage{diagbox} \usepackage{graphicx} \usepackage{color} \usepackage{caption} \usepackage{wrapfig} \usepackage{graphics} \usepackage{enumerate} \usepackage{amsxtra} \usepackage{latexsym} \usepackage{multirow} \usepackage{subfigure} \usepackage{epstopdf} \usepackage{pdfpages} \usepackage{algorithm} \usepackage{algorithmic} \usepackage{verbatim} \usepackage{cite} \usepackage{url} \newcommand{\thmlist}{ \begin{list}{Step 1} {\setlength{\leftmargin}{0.6 in}\setlength{\labelwidth} {0.5 in}}} \newcommand{\alglist}{ \begin{list}{Step 1} {\setlength{\leftmargin}{1.1 in} \setlength{\labelwidth}{1.0 in}}} \renewcommand{\proof} {\noindent {\bf Proof.} \quad} \newcommand{\eproof} {$\quad \square$} \newtheorem{thm}{Theorem}[section] \renewcommand{\subtitle}[1]{\color{blue}} \renewcommand{\arraystretch}{1.5} \def\red#1{\color{red}{#1}\color{black}} \def\blue#1{\color{blue}{#1}\color{black}} \def\green#1{\color{green}{#1}\color{black}} \def\cyan#1{\color{cyan}{#1}\color{black}} \def\magenta#1{\color{magenta}{#1}\color{black}} \def\yellow#1{\color{yellow}{#1}\color{black}} \def\assumc#1{\color{blue}{#1}\color{black}} \def\testc#1{\color{blue}{#1}\color{black}} \def\notionc#1{\color{red}{#1}\color{black}} \def\mathc#1{\color{magenta}{#1}\color{black}} \def\important#1{\color{blue}{#1}\color{black}} \def\real{{\mathbb{R}}} \def\cvd{~\vbox{\hrule\hbox{ \vrule height1.3ex\hskip0.8ex\vrule}\hrule } } \begin{document} \title{Residual regularization path-following methods for linear complementarity problems} \titlerunning{Residual regularization path-following methods for LCP} \author{Xin-long Luo \textsuperscript{$\ast$} \and Sen Zhang \and Hang Xiao} \authorrunning{Luo \and Zhang \and Xiao} \institute{ Xin-long Luo, Corresponding author \at School of Artificial Intelligence, Beijing University of Posts and Telecommunications, P. O. Box 101, Xitucheng Road No. 10, Haidian District, 100876, Beijing China, \email{[email protected]} \and Sen Zhang \at School of Artificial Intelligence, \\ Beijing University of Posts and Telecommunications, P. O. Box 101, \\ Xitucheng Road No. 10, Haidian District, 100876, Beijing China \\ \email{[email protected]} \and Hang Xiao \at School of Artificial Intelligence, \\ Beijing University of Posts and Telecommunications, P. O. Box 101, \\ Xitucheng Road No. 10, Haidian District, 100876, Beijing China \\ \email{[email protected]} } \date{Received: date / Accepted: date} \maketitle \begin{abstract} In this article, we consider the residual regularization path-following method with the trust-region updating strategy for the linear complementarity problem. This time-stepping selection based on the trust-region updating strategy overcomes the shortcoming of the line search method, which consumes the unnecessary trial steps in the transient-state phase. In order to improve the robustness of the path-following method, we use the residual regularization parameter to replace the traditional complementarity regularization parameter. Moreover, we prove the global convergence of the new method under the standard assumptions without the traditional assumption condition of the priority to feasibility over complementarity. Numerical results show that the new method is robust and efficient for the linear complementarity problem, especially for the dense cases. And it is more robust and faster than some state-of-the-art solvers such as the built-in subroutines PATH and MILES of the GAMS v28.2 (2019) environment. The computational time of the new method is about 1/3 to 1/10 of that of PATH for the dense linear complementarity problem. \end{abstract} \keywords{Continuation Newton method \and residual regularization \and trust-region updating strategy \and complementarity \and path-following method} \vskip 2mm \subclass{90C33 \and 65K05 \and 65L05 \and 65L20} \section{Introduction} \vskip 2mm In this article, we are mainly concerned with the linear complementarity problem as follows: \begin{align} y = Mx + q, \; x_{i}y_{i} = 0, \; i = 1, \, 2, \, \ldots, \, n, \; x \ge 0, \; y \ge 0, \label{LCP} \end{align} where $q \in \Re^{n}$ is a vector and $M$ is an $n \times n$ positive semi-definite matrix. For the linear complementarity problem \eqref{LCP}, there are many practical applications such as the equilibrium of forces \cite{Erleben2007} and the economic equilibrium problem \cite{Mathiesen1985,Rutherford1995}. And the solutions of many problems such as the linear programming and the convex quadratic programming can be obtained by solving it \cite{CPS2009,Wright1997}. Furthermore, there are many efficient methods to solve it such as the Lemke's complementary pivoting algorithm (MILES) \cite{CPS2009,LH1964,Rutherford1995}, the path-following methods \cite{Fisher1992,Wright1994,Wright1997,Zhang1994} and their mixture method (PATH) \cite{Dirkse1994,DF1995}. \vskip 2mm In this paper, we consider another path-following method based on the Newton flow with nonnegative constraints, which is the variant of the primal-dual path-following method. In order to improve its robustness and efficiency, we use the residual regularization technique to avoid the singularity of the Jacobian matrix, and adopt the trust-region updating strategy to adjust the time step adaptively. Firstly, we construct the regularization Newton flow with nonnegative constraints for the linear complementarity problem \eqref{LCP} based on the primal-dual path-following method. Then, we use the implicit Euler method and the linear approximation of the quadratic function to obtain the regularization path-following method for following the the trajectory of the Newton flow. Finally, we adopt the trust-region updating strategy to adjust the time step adaptively. The advantage of this time-stepping selection compared with the line search method is that it overcomes the shortcoming of the line search method, which consumes the unnecessary trial steps in the transient-state phase. The other advantage is that it improves the robustness of the path-following method and it does not require the traditional assumption condition of the priority to feasibility over complementarity of the path-following method \cite{Wright1994,Zhang1994} when we prove its global convergence. \vskip 2mm The rest of this article is organized as follows. In the next section, we consider the regularization path-following method and the adaptive trust-region updating strategy for the linear complementarity problem. In section 3, we prove the global convergence of the new method under the standard assumptions without the traditional assumption condition of the priority to feasibility over complementarity. In section 4, we compare the new method with two state-of-the-art solvers, i.e. PATH \cite{Dirkse1994,DF1995,PATH} and MILES (a Mixed Inequality and nonLinear Equation Solver) \cite{Mathiesen1985,Rutherford1995,Rutherford2022} for sparse problems and dense problems, test matrices of which come from the linear programming subset of NETLIB \cite{NETLIB}. The new method is coded with the MATLAB language and executed in MATLAB (R2020a) environment \cite{MATLAB}. PATH and MILES are executed in the GAMS v28.2 (2019) environment \cite{GAMS}. Numerical results show that the new method is robust and efficient for solving the linear complementarity problem. It is more robust and faster than PATH and MILES for the dense problems. Finally, some discussions are given in section 5. $\|\cdot\|$ denotes the Euclidean vector norm or its induced matrix norm throughout the paper. \vskip 2mm \section{Regulation path-following methods} \subsection{The continuous Newton flow} \label{SUBSECNF} For convenience, we rewrite the linear complementarity problem \eqref{LCP} as the following nonlinear system of equations with nonnegativity constraints: \begin{align} F(z) = \begin{bmatrix} y - (Mx + q) \\ XY e \end{bmatrix} = 0, \; (x, \, y) \ge 0, \; z = (x, \, y), \label{NLELCP} \end{align} where $X = diag(x), \; Y = diag(y)$ and all components of vector $e$ equal one. It is not difficult to know that the Jacobian matrix $J(z)$ of $F(z)$ has the following form: \begin{align} J(z) = \begin{bmatrix} - M & I \\ Y & X \end{bmatrix}. \label{JZMATLCP} \end{align} From the second block $XYe = 0$ of equation \eqref{NLELCP}, we know that $x_{i} = 0$ or $y_{i} = 0 \, (i = 1:n) $. Thus, the Jacobian matrix $J(z)$ of equation \eqref{JZMATLCP} may be singular, which leads to numerical difficulties near the solution of the nonlinear system \eqref{NLELCP} for the Newton method or its variants. In order to overcome this difficulty, we consider its perturbed system \cite{AG2003,Tanabe1988} as follows: \begin{align} F_{\mu}(z) = F(z) - \begin{bmatrix} 0 \\ \mu e \end{bmatrix} = 0, \; z = (x, \, y) > 0, \; \mu > 0. \label{PNLEX} \end{align} \vskip 2mm It is not difficult to verify that the Jacobian matrix $J(z)$ defined by equation \eqref{JZMATLCP} is nonsingular when $M$ is a positive semi-definite matrix and $(x, \; y) > 0$ (see Lemma 5.9.8, p. 469 in \cite{CPS2009}). Thus, by using the implicit theorem and the inequality $(a - b)(c-d) \le |ac - bd| \; (a > 0, \, b > 0, \, c > 0, \, d > 0)$, the perturbed system \eqref{PNLEX} has a unique solution when $M$ is a positive definite matrix (see Theorem 5.9.13, p. 471 in \cite{CPS2009}) for its detailed proof). The solution $z(\mu)$ of the perturbed system \eqref{PNLEX} defines the central path, and $z(\mu)$ approximates the solution $z^{\ast}$ of the nonlinear system \eqref{NLELCP} when $\mu$ tends to zero \cite{Wright1997,CPS2009}. \vskip 2mm If the damped Newton method is applied to the perturbation system \eqref{PNLEX} \cite{Kelley2003,NW1999}, we have \begin{align} z_{k+1} = z_{k} - \alpha_{k} J(z_{k})^{-1} F_{\mu}(z_{k}), \label{NEWTON} \end{align} where $J(z_{k})$ is the Jacobian matrix of $F_{\mu}(z)$. We regard $z_{k+1} = z(t_{k} + \alpha_{k}), \; z_{k} = z(t_{k})$ and let $\alpha_{k} \to 0$, then we obtain the continuous Newton flow with nonnegativity constraints \cite{AS2015,Davidenko1953,LY2021,PHP1975,Tanabe1988} of the perturbed system \eqref{PNLEX} as follows: \begin{align} \frac{dz(t)}{dt} = - J(z)^{-1}F_{\mu}(z), \hskip 2mm z = (x, \, y) > 0. \label{NEWTONFLOW} \end{align} Actually, if we apply an iteration with the explicit Euler method \cite{SGT2003} for the continuous Newton flow \eqref{NEWTONFLOW}, we obtain the damped Newton method \eqref{NEWTON}. \vskip 2mm Since the Jacobian matrix $J(z) = F'_{\mu}(z)$ may be singular, we reformulate the continuous Newton flow \eqref{NEWTONFLOW} as the following general formula for the linear complementarity problem \eqref{NLELCP} : \begin{align} & -M \frac{dx(t)}{dt} + \frac{dy(t)}{dt} = - r_{q}(x, \, y), \label{LFRQDAE} \\ & Y \frac{dx(t)}{dt} + X \frac{dy(t)}{dt} = - (XYe - \mu(t) e), \; (x, \, y) > 0, \label{DAEFLOW} \end{align} where the residual $r_{q}(x, \, y) = y - (Mx+q)$. The continuous Newton flow \eqref{LFRQDAE}-\eqref{DAEFLOW} has some nice properties. We state one of them as the following property \ref{PRODAEFLOW} \cite{Branin1972,LXL2022,LY2021,LX2021,Tanabe1979}. \vskip 2mm \begin{property} \label{PRODAEFLOW} Assume that $(x(t), \, y(t))$ is the solution of the continuous Newton flow \eqref{LFRQDAE}-\eqref{DAEFLOW}, then $r_{q}(x(t), \, y(t))$ converges to zero and $x_{i}(t)y_{i}(t) \, (i = 1, \, 2, \, \ldots, \, n)$ converge to zero when $0 \le \mu(t) \le \sigma \min_{1\le i \le n}\{x_{i}(t)y_{i}(t)\} \; (0 < \sigma < 1)$ and $t \to \infty$. That is to say, for every limit point $(x^{\ast}, \, y^{\ast})$ of $(x(t), \, y(t))$, it is also a solution of the linear complementarity problem \eqref{NLELCP}. Furthermore, $x(t)$ and $y(t)$ keep positive values when the initial point $\left(x^{0}_{i}, \; y^{0}_{i}\right) > 0 \, (i = 1, \, 2, \, \ldots, \, n)$. \end{property} \proof Assume that $z(t)$ is the continuous solution of the continuous Newton flow \eqref{LFRQDAE}-\eqref{DAEFLOW}, then we have \begin{align} & \frac{d}{dt} r_{q}(x, \, y) = -M \frac{dx}{dt} + \frac{dy}{dt} = - r_{q}(x,\, y), \label{ODELEQ} \\ & \frac{d}{dt} (XYe) = X \frac{dy}{dt} + Y \frac{dx}{dt} = - (XYe - \mu(t) e). \label{ODECEQ} \end{align} Consequently, from equations \eqref{ODELEQ}-\eqref{ODECEQ} and $0 \le \mu(t) \le \sigma \min_{1\le i \le n}\{x_{i}(t)y_{i}(t)\}$, we obtain \begin{align} & r_{q}(x(t), \, y(t)) = r_{q}\left(x^{0}, \, y^{0}\right) \exp(-t), \label{FUNPARQ} \\ & -XYe \le \frac{d}{dt} (XYe) \le -(1-\sigma)XYe. \label{FUNPAR} \end{align} From equations \eqref{FUNPARQ}-\eqref{FUNPAR}, we know that $r_{q}(x(t), \, y(t))$ converges to zero with the linear rate of convergence when $t$ tends to infinity. Furthermore, from equation \eqref{FUNPAR} and the Gronwall inequality \cite{Gronwall1919}, we have \begin{align} x_{i}^{0}y_{i}^{0} \exp(-t) \le x_{i}(t)y_{i}(t) \le x_{i}^{0}y_{i}^{0} \exp(-(1 - \sigma)t), \; i = 1, \, 2, \, \ldots, \, n. \label{SCGINEQ} \end{align} Consequently, from equation \eqref{SCGINEQ}, we know that $x_{i}(t)y_{i}(t) \ge 0 \; (i = 1, \, 2, \, \ldots, \, n)$ and $\lim_{t \to \infty} x_{i}(t)y_{i}(t) = 0 \; (i = 1, \, 2, \, \ldots, \, n)$. \vskip 2mm From equation \eqref{SCGINEQ}, we know that $x_{i}(t)y_{i}(t) > 0 \; (t \ge 0)$ when $\left(x^{0}, \; y^{0}\right) > 0$. If we have $x_{i}(T) < 0$ or $y_{i}(T) < 0$ for a finite value $T > 0$, there exists $\bar{t} \in (0, \; T)$ such that $x_{i}(\bar{t}) = 0$ or $y_{i}(\bar{t}) = 0$. Consequently, we have $x_{i}(\bar{t})y_{i}(\bar{t}) = 0$, which contradicts $x_{i}(\bar{t})y_{i}(\bar{t}) > 0$. Thus, we have $(x(t), \, y(t)) > 0 $ for all $t > 0$. Therefore, if the solution $(x(t), \, y(t))$ of the continuous Newton flow \eqref{LFRQDAE}-\eqref{DAEFLOW} belongs to a compact set, there exists a limit point $(x^{\ast}, \, y^{\ast})$ when $t$ tends to infinity, and this limit point $(x^{\ast}, \, y^{\ast})$ is also a solution of the linear complementarity problem \eqref{NLELCP}. \qed \vskip 2mm \begin{remark} The inverse $J(z)^{-1}$ of the Jacobian matrix $J(z)$ can be regarded as the pre-conditioner of $F_{\mu}(z)$ such that the every element $z_{i}(t)$ of $z(t)$ has roughly the same rate of convergence and it mitigates the stiffness of the ODE \eqref{NEWTONFLOW} \cite{LXL2022,LX2021}. This property is very useful since it makes us adopt the explicit ODE method to follow the trajectory of the Newton flow \eqref{NEWTONFLOW} efficiently. \end{remark} \vskip 2mm \subsection{The residual regularization path-following method} \label{SUBSECRPFM} \vskip 2mm From property \ref{PRODAEFLOW}, we know that the continuous Newton flow \eqref{LFRQDAE}-\eqref{DAEFLOW} has the global convergence. However, when the Jacobian matrix $J(z)$ is singular or nearly singular, the ODE \eqref{LFRQDAE}-\eqref{DAEFLOW} is the system of differential-algebraic equations \cite{AP1998,BCP1996,HW1996} and its trajectory can not be efficiently solved by the general ODE method \cite{BJ1998,JT1995} such as the backward differentiation formulas (the built-in subroutine ode15s.m of the MATLAB environment \cite{MATLAB,SGT2003}). Thus, we need to construct the special method to solve this problem. Furthermore, we expect that the new method has the global convergence as the homotopy continuation methods \cite{AG2003,OR2000} and the fast rate of convergence as the traditional optimization methods. In order to achieve these two aims, we consider the continuation Newton method and the trust-region updating strategy for problem \eqref{LFRQDAE}-\eqref{DAEFLOW}. \vskip 2mm We apply the implicit Euler method \cite{AP1998,BCP1996} to the continuous Newton flow \eqref{LFRQDAE}-\eqref{DAEFLOW}, then we obtain \begin{align} & -M \frac{x^{k+1} - x^{k}}{\Delta t_{k}} + \frac{y^{k+1} - y^{k}}{\Delta t_{k}} = - r_{q}\left(x^{k+1}, \, y^{k+1}\right), \label{IMLEQ} \\ & Y^{k+1} \frac{x^{k+1} - x^{k}}{\Delta t_{k}} + X^{k+1} \frac{y^{k+1} - y^{k}}{\Delta t_{k}} = - \left(X^{k+1}Y^{k+1}e - \mu(t_{k+1}) e\right). \label{IMEDAE} \end{align} Since equation \eqref{IMEDAE} is a nonlinear system, it is not directly solved, and we seek for its explicit approximation formula. We replace $Y^{k+1}$ and $X^{k+1}$ with $Y^{k}$ and $X^{k}$ in the left-hand side of equation \eqref{IMEDAE}, respectively. And we substitute $X^{k+1}Y^{k+1}e$ with its linear approximation $X^{k}Y^{k}e + \frac{\Delta t_{k}}{1 + \Delta t_{k}} (Y^{k}\Delta x^{k} + X^{k}\Delta y^{k})$ in the right-hand side of equation \eqref{IMEDAE}. We set $\mu(t_{k+1}) = \sigma_{k} \mu_{k}$. Then, we obtain the continuation Newton method (one of path-following methods) as follows: \begin{align} & -M \Delta x^{k} + \Delta y^{k} = - r_{q}^{k}, \label{CONNML} \\ & Y^{k} \Delta x^{k} + X^{k}\Delta y^{k} = - r_{c}^{k}, \label{CONNMC} \\ & x^{k+1} = x^{k} + \frac{\Delta t_k}{1+\Delta t_k} \Delta x^{k}, \; y^{k+1} = y^{k} + \frac{\Delta t_k}{1+\Delta t_k} \Delta y^{k}, \label{CONNMZ1} \end{align} where $r_{q}^{k} = r_{q}(x^{k}, \, y^{k})$, $r_{c}^{k} = X^{k}y^{k} - \sigma_{k} \mu_{k} e$, $0 < \sigma_{min} \le \sigma_{k} \le \sigma_{max} < 1$ and the perturbation parameter $\mu_{k}$ is selected as follows: \begin{align} \mu_{k} = \frac{(x^{k})^{T}y^{k} + \|r_{q}^{k}\|}{2n}. \label{DEFMUK} \end{align} The selection of $\mu_{k}$ in equation \eqref{DEFMUK} is slightly different to the traditional selection $\mu_{k} = \left(x^{k}\right)^{T}y^{k}/n$ \cite{Wright1994,Wright1997,Zhang1994}. According to our numerical experiments, the selection of $\mu_{k}$ in equation \eqref{DEFMUK} can improve the robustness of the path-following method, in comparison to the traditional selection selection $\mu_{k} = \left(x^{k}\right)^{T}y^{k}/n$. \vskip 2mm \begin{remark} If we set $\alpha_{k} = \Delta t_k/(1+\Delta t_k)$ in equation \eqref{CONNMZ1}, we obtain the damped Newton method (the primal-dual path-following method) \eqref{NEWTON}. However, from the view of the ODE method, they are different. The damped Newton method \eqref{NEWTON} is derived from the explicit Euler method applied to the continuous Newton flow \eqref{LFRQDAE}-\eqref{DAEFLOW}. Its time step $\alpha_k$ is restricted by the numerical stability \cite{HW1996,SGT2003}. That is to say, for the linear test equation $dx/dt = - \lambda x \; (\lambda > 0)$, its time step $\alpha_{k}$ is restricted by the stable region $|1-\lambda \alpha_{k}| \le 1$. Therefore, the large step $\alpha_{k}$ can not be adopted in the steady-state phase. The continuation Newton method \eqref{CONNML}-\eqref{CONNMZ1} is derived from the implicit Euler method applied to the continuous Newton flow \eqref{LFRQDAE}-\eqref{DAEFLOW} and the linear approximation of $F_{\mu(t_{k+1})}(z_{k+1})$, and its time step $\Delta t_k$ is not restricted by the numerical stability for the linear test equation. Therefore, the large time step $\Delta t_{k}$ can be adopted in the steady-state phase, and the continuation Newton method \eqref{CONNML}-\eqref{CONNMZ1} mimics the Newton method. Consequently, it has the fast rate of convergence near the solution $z^{\ast}$ of the nonlinear system \eqref{NLELCP}. The most of all, the substitution $\alpha_{k}$ with $\Delta t_{k}/(\Delta t_{k} + 1)$ is favourable to adopt the trust-region updating strategy for adaptively adjusting the time step $\Delta t_{k}$ such that the continuation Newton method \eqref{CONNML}-\eqref{CONNMZ1} accurately follows the trajectory of the continuation Newton flow \eqref{LFRQDAE}-\eqref{DAEFLOW} in the transient-state phase and achieves the fast rate of convergence in the steady-state phase. \end{remark} \vskip 2mm When the diagonal matrix $X^{k}$ is positive definite, from equations \eqref{CONNML}-\eqref{CONNMC}, $\Delta x^{k}$ and $\Delta y^{k}$ can be solved by the following two subsystems: \begin{align} & \left(M + (X^{k})^{-1}Y^{k}\right) \Delta x^{k} = r_{q}^{k} - (X^{k})^{-1}r_{c}^{k}, \label{DELTXK} \\ & \Delta y^{k} = M \Delta x^{k} - r_{q}^{k}. \label{DELTYK} \end{align} When matrix $M$ is positive semi-definite and $(x^{k}, \, y^{k}) > 0$, the left hand-side matrix is positive definite. Thus, the system \eqref{DELTXK} can be solved by the partial pivoting Gaussian elimination method (see pp. 125-130, \cite{GV2013}). \subsection{The time-stepping control and the initial point selection} \vskip 2mm Another issue is how to adaptively adjust the time step $\Delta t_k$ at every iteration. A popular and efficient time-stepping control is based on the trust-region updating strategy \cite{CGT2000,Deuflhard2004,Higham1999,Luo2010,Luo2012,LXLZ2021,LX2021,LX2021B, LX2022,LY2021,LLS2022,LXL2022,Yuan2015}. Its main idea can be described as follows. Firstly, we construct a merit function reflecting the feasibility $r_{q}(x, \, y) = 0$ and the complementarity $x_{i}y_{i} = 0 \; (i = 1, \, 2, \, \ldots, \, n)$ as \begin{align} \phi(x, \, y) = x^{T}y + \|r_{q}(x, \, y)\|, \label{MERITFUN} \end{align} where $r_{q}(x, \, y) = y - (Mx + q)$ and $(x, \, y) \ge 0$. \vskip 2mm Then, we define the linear approximation model $m_{k}$ of $\phi(x^{k} + \alpha_{k} \Delta x^{k}, \, y^{k} + \alpha_{k} \Delta y^{k})$ as \begin{align} m_{k}(\alpha) & = \phi(x^{k}, \, y^{k}) + \alpha \left((y^{k})^{T}\Delta x^{k} + (x^{k})^{T} \Delta y^{k} - \|r_{q}^{k}\|\right), \label{LMAPP} \end{align} where $\alpha = \Delta t /(1+\Delta t)$ and $(\Delta x^{k}, \, \Delta y^{k})$ is the Newton step and solved by equations \eqref{CONNML}-\eqref{CONNMC}. \vskip 2mm Finally, we adaptively adjust the time step $\Delta t_{k}$ according to the difference between $m_{k}(\alpha_{k})$ and $\phi(x^{k} + \alpha_{k} \Delta x^{k}, \, y^{k} + \alpha_{k} \Delta y^{k})$. Namely, the time step $\Delta t_{k+1}$ will be enlarged when $m_{k}(\alpha_{k})$ approximates $\phi(x^{k} + \alpha_{k} \Delta x^{k}, \, y^{k} + \alpha_{k} \Delta y^{k})$ well, and $\Delta t_{k+1}$ will be reduced when $m_{k}(\alpha_{k})$ approximates $\phi(x^{k} + \alpha_{k} \Delta x^{k}, \, y^{k} + \alpha_{k} \Delta y^{k})$ badly. \vskip 2mm We define the predicted reduction $Pred_{k}$ and the actual reduction $Ared_{k}$ as follows: \begin{align} & Pred_{k} = \phi(x^{k}, \, y^{k}) - m_{k}(\alpha_{k}) = \alpha_{k} \left(\|r_{q}^{k}\| -(y^{k})^{T}\Delta x^{k} - (x^{k})^{T} \Delta y^{k}\right), \label{PREDK} \\ & Ared_{k} = \phi(x^{k}, \, y^{k}) - \phi(x^{k} + \alpha_{k} \Delta x^{k}, \, y^{k} + \alpha_{k} \Delta y^{k}) \nonumber \\ & \hskip 2mm = \alpha_{k} \left(\|r_{q}^{k}\| -(y^{k})^{T}\Delta x^{k} - (x^{k})^{T} \Delta y^{k}\right) - \alpha_{k}^{2} (\Delta x^{k})^{T}\Delta y^{k}. \label{AREDK} \end{align} Then, we enlarge or reduce the time step $\Delta t_{k+1}$ at every iteration according to the following ratio: \begin{align} \rho_k = \frac{Ared_{k}}{Pred_{k}} = \frac{\|r_{q}^{k}\| -(y^{k})^{T}\Delta x^{k} - (x^{k})^{T} \Delta y^{k} - \alpha_{k} (\Delta x^{k})^{T}\Delta y^{k}} {\|r_{q}^{k}\| -(y^{k})^{T}\Delta x^{k} - (x^{k})^{T} \Delta y^{k}}, \label{RHOK} \end{align} where $\alpha_{k} = \Delta t_{k}/(1+\Delta t_{k})$. A particular adjustment strategy is given as follows: \begin{align} \Delta t_{k+1} = \begin{cases} 2 \Delta t_k, \; \text{if} \; \rho_{k} \ge \eta_{2} \; \text{and} \; (x^{k+1}, \, y^{k+1}) > 0, \\ \Delta t_k, \; \text{else if} \; \eta_1 \le \rho_{k} < \eta_{2} \; \text{and} \; (x^{k+1}, \, y^{k+1}) > 0, \\ \frac{1}{2} \Delta t_k, \; \text{others}, \end{cases} \label{TSK1} \end{align} where the constants $\eta_{1}, \; \eta_{2}$ are selected as $\eta_1 = 0.25, \; \eta_2 = 0.75$, respectively. We set \begin{align} (x^{k+1}, \, y^{k+1}) = (x^{k}, \, y^{k}) + \frac{\Delta t_{k}}{1+\Delta t_{k}} (\Delta x^{k}, \, \Delta y^{k}). \label{ACCPXK1} \end{align} When $\rho_{k} \ge \eta_{a}$ and $(x^{k+1}, \, y^{k+1}) > 0$, we accept the trial step, otherwise we discard the trial step and set \begin{align} (x^{k+1}, \, y^{k+1}) = (x^{k}, \, y^{k}), \label{NOACXK1} \end{align} where $\eta_{a}$ is a small positive number such as $\eta_{a} = 1.0\times 10^{-6}$. \vskip 2mm \begin{remark} This new time-stepping control based on the trust-region updating strategy has some advantages compared to the traditional line search strategy. If we use the line search strategy and the damped Newton method \eqref{NEWTON} to follow the trajectory $z(t)$ of the continuous Newton flow \eqref{NEWTONFLOW}, in order to achieve the fast rate of convergence in the steady-state phase, the time step $\alpha_{k}$ of the damped Newton method is tried from 1 and reduced by the half with many times at every iteration. Since the linear model $F_{\sigma_{k} \mu_{k}}(z_{k}) + J(z_{k})\Delta z_{k}$ may not approximate $F_{\sigma_{k}\mu_{k}}(z_{k}+\Delta z_{k})$ well in the transient-state phase, the time step $\alpha_{k}$ will be small. Consequently, the line search strategy consumes the unnecessary trial steps in the transient-state phase. However, the selection of the time step $\Delta t_{k}$ based on the trust-region updating strategy \eqref{RHOK}-\eqref{TSK1} can overcome this shortcoming. \end{remark} \vskip 2mm In order to ensure that the algorithm works well for the general linear complementarity problem, the initial point selection is also a key point. We select the starting point $(x^{0}, \, y^{0})$ as follows: \begin{align} x^{0} = 10*e, \; v^{0} = Mx^{0} + q, \; y^{0}_{i} = \begin{cases} v_{i}^{0}, \; \text{if} \; v_{i}^{0} > 0, \\ 10^{-3}, \; \text{otherwise}. \end{cases} \label{INIPTS} \end{align} \vskip 2mm In order to improve the stability of the algorithm, we add a small regularization item $\upsilon I$ to matrix $M$ \cite{CPS2009,Gana1982,Hansen1994,Venkateswaran1993} when $\mu_{k}$ is not small, where $\mu_{k}$ is defined by equation \eqref{DEFMUK}. Specifically, we adopt the following strategy as the regularizer $M_{\upsilon}$ of matrix $M$: \begin{align} M_{\upsilon} = \begin{cases} M + \upsilon I, \; \text{if} \; \mu_{k} \ge \upsilon, \\ M, \; \text{otherwise}, \end{cases} \label{REGMAT} \end{align} where $\upsilon = 10^{-3}$. \vskip 2mm According to the above discussions, we give the detailed descriptions of the path-following method and the trust-region updating strategy for the monotone linear complementarity problem \eqref{LCP} in Algorithm \ref{ALGRPFM}. \vskip 2mm \begin{algorithm} \renewcommand{\algorithmicrequire}{\textbf{Input:}} \renewcommand{\algorithmicensure}{\textbf{Output:}} \caption{Regularization path-following methods with the trust-region updating strategy for linear complementarity problems (The RPFMTr method)} \label{ALGRPFM} \begin{algorithmic}[1] \REQUIRE ~~\\ matrix $M \in \Re^{n \times n}$ and vector $q \in \Re^{n}$ for the problem: $y = Mx + q, \; x_{i}y_{i} = 0 \; (i = 1:n), \; x \ge 0, \; y \ge 0$. \ENSURE ~~ \\ the linear complementarity solution: $(solx, \, soly)$. \STATE Initialize parameters: $\eta_{a} = 10^{-6}$, $\eta_1 = 0.25$, $\eta_2 = 0.75$, $\epsilon = 10^{-6}$, $\Delta t_0 = 10^{-2}$, bigMfac = 10, $\upsilon = 10^{-3}$, $\sigma_{0} = 0.5$, maxit = 600. \STATE Initialize $x^0$ = bigMfac*ones(n, 1); $y^{0} = Mx^{0} + q$; $y^{0}(y^{0} < 0) = 10^{-3}$. \STATE Regularize matrix: $M_{\upsilon} = M + \upsilon I$. \STATE Set flag\_success\_trialstep = 1, $\text{itc} = 0, \; k = 0$. \WHILE {(itc $<$ maxit)} \IF{(flag\_success\_trialstep == 1)} \STATE Set itc = itc + 1. \STATE Compute $r_{q}^{k} = y^{k} - (M_{\upsilon}x^{k} + q)$; $\mu_{k} = (\|r_{q}^{k}\| + (x^{k})^{T}y^{k})/(2n)$. \STATE Compute $\text{Resk} = \max \{\|x^{k}.y^{k}\|_{\infty}, \; \|y^{k} - (Mx^{k} + q)\|_{\infty}\}$. \IF{($\text{Resk} < \epsilon$)} \STATE break; \ENDIF \STATE Set $\sigma_k = \min \{\sigma_{k}, \; \mu_k\}$. \STATE Compute $r_{c}^{k} = x^{k}.y^{k} - \sigma_{k}\mu_{k}*\text{ones}(n,1)$. \STATE By solving the linear system \eqref{DELTXK}-\eqref{DELTYK}, we obtain $\Delta x^{k}$ and $\Delta y^{k}$. \ENDIF \STATE Set $(x^{k+1}, \, y^{k+1}) = (x^{k}, \, y^{k}) + \frac{\Delta t_{k}}{1 + \Delta t_{k}} (\Delta x^{k}, \, \Delta y^{k})$. \STATE Compute the ratio $\rho_{k}$ from equation \eqref{RHOK} and adjust $\Delta t_{k+1}$ according to the formula \eqref{TSK1}. \IF{(($\rho_{k} \ge \eta_{a}$) \&\& ($x^{k+1}, \, y^{k+1}) > 0$))} \STATE Accept the trial point $(x^{k+1}, \, y^{k+1})$; Set flag\_success\_trialstep = 1. \IF{($\|x^{k+1} - x^{k}\|_{\infty} > 0.1$)} \STATE Set $\sigma_{k+1} = 0.5$. \ELSE \STATE Set $\sigma_{k+1} = 0.1$. \ENDIF \IF{($\mu_{k} < \upsilon$)} \STATE Set $M_{\upsilon} = M$. \ENDIF \ELSE \STATE Set $(x^{k+1}, \, y^{k+1}) = (x^{k}, \, y^{k})$; flag\_success\_trialstep = 0. \ENDIF \STATE Set $k \leftarrow k+1$. \ENDWHILE \STATE Return $(solx, \, soly) = (x^{k}, \, y^{k})$. \end{algorithmic} \end{algorithm} \vskip 2mm \section{Convergence analysis} \vskip 2mm In this section, we analyze the global convergence of Algorithm \ref{ALGRPFM}. Without loss of generality, we assume $M_{\upsilon} = M$ in the following analysis. Namely, we do not discriminate between $M_{\upsilon}$ and $M$. Similarly to the analysis techniques of the references \cite{Wright1994,Xu1991,Zhang1994}, we firstly construct an auxiliary sequence $(u^{k}, \, v^{k})$ as follows: \begin{align} u^{k+1} = u^{k} + \alpha_{k} (x^{k} + \Delta x^{k} - u^{k}), \label{AUXSEQUK} \\ v^{k+1} = v^{k} + \alpha_{k} (y^{k} + \Delta y^{k} - v^{k}), \label{AUXSEQVK} \end{align} where $(\Delta x^{k}, \, \Delta y^{k})$ are solved by equations \eqref{CONNML}-\eqref{CONNMC} and $(u^{0}, \, v^{0}) \le (x^{0}, \, y^{0})$ satisfies the feasibility $r_{q}(u^{0}, \, v^{0}) = 0$. Then, it is not difficult to verify \begin{align} & r_{q}(u^{k}, \, v^{k}) = v^{k} - (Mu^{k} + q) = 0, \label{LCPFUV} \\ & x^{k+1} - u^{k+1} = (1 - \alpha_{k}) (x^{k} - u^{k}) \ge 0, \label{XKGEUK} \\ & y^{k+1} - v^{k+1} = (1 - \alpha_{k})(y^{k} - v^{k}) \ge 0, \label{YKGEVK} \end{align} where $\alpha_{k} = \Delta t_{k}/(1+\Delta t_{k})$. \vskip 2mm Meanwhile, in order to obtain the global convergence, we need to enforce the condition specified below. Firstly, we select a constant $\gamma \in (0, \, 1)$ that satisfies \begin{align} 0 < \gamma \le \frac{\mu_{0}} {\min_{1 \le i \le n} \{x^{0}_{i}y^{0}_{i}\}} = \frac{(x^{0})^{T}y^{0} + \|r_{q}^{0}\|} {2n \, \min_{1 \le i \le n} \{x^{0}_{i}y^{0}_{i}\}}. \label{DEFGAMMA} \end{align} Then, we select $\alpha_{k}$ such that \begin{align} x^{k}_{i}(\alpha)y^{k}_{i}(\alpha) \ge \gamma \mu_{k}(\alpha), \; i = 1, \, 2, \, \ldots, n \label{XYGEUK} \end{align} holds for all $\alpha \in (0, \; \alpha_{k}] \subset (0, \, 1]$, where $x^{k}(\alpha)$, $y^{k}(\alpha)$, $r_{q}^{k}(\alpha)$ and $\mu_{k}(\alpha)$ are defined by \begin{align} & x^{k}(\alpha) = x^{k} + \alpha \Delta x^{k}, \; y^{k}(\alpha) = y^{k} + \alpha \Delta y^{k}, \label{XKYKALPHA} \\ & r_{q}^{k}(\alpha) = r_{q}(x^{k}(\alpha), y^{k}(\alpha)), \; \mu_{k}(\alpha) = \frac{(x^{k}(\alpha))^{T}y^{k}(\alpha) + \|r_{q}^{k}(\alpha)\|}{2n}. \label{RMUKALPHA} \end{align} \vskip 2mm Condition \eqref{XYGEUK} is to prevent iterations from prematurely getting too close to the boundary of the positive quadrant and its restriction on $\gamma$ is very mild. In the practice, it can be selected to be very small. \vskip 2mm In order to establish the main global convergence of Algorithm \ref{ALGRPFM}, similarly to the results \cite{Wright1994,Zhang1994}, we prove the following several technique lemmas. \vskip 2mm \begin{lemma} \label{LEMXYBOUD} When $(x^{k}(\alpha), \, y^{k}(\alpha))$ satisfies the condition \eqref{XYGEUK}, where $(\Delta x^{k}, \, \Delta y^{k})$ is the solution of equation \eqref{CONNML}-\eqref{CONNMC}, we have \begin{align} (x^{k}(\alpha), \, y^{k}(\alpha)) \ge 0. \label{XYALPHAGE0} \end{align} Furthermore, when $x^{k}_{i}y^{k}_{i} \ge \gamma \mu_{k} \; (i=1, \, 2, \ldots, \, n)$ and $\alpha$ satisfies \begin{align} 0 \le \alpha \le \min\left\{1, \; \frac{1 - \gamma/2}{1+\gamma/2} \frac{\sigma_{k}\mu_{k}}{\left\|\Delta X^{k}\Delta y^{k}\right\|_{\infty}} \right\}, \label{ALPAUBND} \end{align} the inequality \eqref{XYGEUK} holds. \end{lemma} \proof By summing two sides of the condition \eqref{XYGEUK}, we obtain \begin{align} & (x^{k}(\alpha))^{T}y^{k}(\alpha) \ge n \gamma \mu_{k}(\alpha) = \frac{1}{2}\gamma \left((x^{k}(\alpha))^{T}y^{k}(\alpha)+ (1-\alpha)\|r_{q}^{k}\|\right) \nonumber \\ & \hskip 2mm \ge \frac{1}{2} \gamma (x^{k}(\alpha))^{T}y^{k}(\alpha). \label{XYKGEGXYK} \end{align} Consequently, from equation \eqref{XYKGEGXYK} and $0 < \gamma < 1$, we obtain $(x^{k}(\alpha))^{T}y^{k}(\alpha) \ge 0$. By substituting it into the condition \eqref{XYGEUK}, we have $x^{k}_{i}(\alpha)y^{k}_{i}(\alpha) \ge 0 \, (i = 1, \, 2, \, \ldots, \, n)$. From equation \eqref{CONNMC}, we obtain \begin{align} & \alpha \sigma_{k} \mu_{k} = \alpha x^{k}_{i}y^{k}_{i} + \alpha x^{k}_{i} \Delta y^{k}_{i} + \alpha y^{k}_{i}\Delta x^{k}_{i} \nonumber \\ & \hskip 2mm = (\alpha - 2) x^{k}_{i}y^{k}_{i} + y^{k}_{i}x^{k}_{i}(\alpha) + x^{k}_{i}y^{k}_{i}(\alpha), \; i = 1, \, 2, \ldots, \, n. \label{POSEQCONC} \end{align} If $(x^{k}_{i} (\alpha), \, y^{k}_{i}(\alpha)) < 0$ or $x^{k}_{i}(\alpha) = 0, \, y^{k}_{i}(\alpha) < 0$ or $x^{k}_{i}(\alpha) < 0, \, y^{k}_{i}(\alpha) = 0$, by combining it with $(x^{k}_{i}, \, y^{k}_{i}) \ge 0$, it contradicts equation \eqref{POSEQCONC}. Therefore, we obtain $(x^{k}_{i}(\alpha), \, y^{k}_{i}(\alpha)) \ge 0 \, (i = 1, \, 2, \, \ldots, \, n)$, which proves equation \eqref{XYALPHAGE0}. \vskip 2mm From equations \eqref{CONNML}-\eqref{CONNMC}, we have \begin{align} &x^{k}_{i}(\alpha)y^{k}_{i}(\alpha) - \gamma \mu_{k}(\alpha) = x_{i}^{k}y_{i}^{k} + \alpha(x^{k}_{i} \Delta y^{k}_{i} + y^{k}_{i} \Delta x^{k}_{i}) + \alpha^{2}\Delta x^{k}_{i} \Delta y^{k}_{i} \nonumber \\ & \hskip 2mm - \gamma \mu_{k} - \frac{1}{2n}\gamma \alpha \left((x^{k})^{T}\Delta y^{k} + (y^{k})^{T} \Delta x^{k} - \|r_{q}^{k}\| + \alpha (\Delta x^{k})^{T} \Delta y^{k}\right) \nonumber \\ & = x_{i}^{k}y_{i}^{k} + \alpha(\sigma_{k} \mu_{k} - x^{k}_{i} y^{k}_{i}) + \alpha^{2}\Delta x^{k}_{i} \Delta y^{k}_{i} \nonumber \\ & \hskip 2mm - \gamma \mu_{k} - \frac{1}{2n}\gamma \alpha \left(n \sigma_{k} \mu_{k} - (x^{k})^{T}y^{k} - \|r_{q}^{k}\| + \alpha (\Delta x^{k})^{T} \Delta y^{k}\right) \nonumber \\ & = (1-\alpha)\left(x^{k}_{i}y^{k}_{i} - \gamma \mu_{k}\right) + \alpha \left(\sigma_{k} \mu_{k} \left(1 - \frac{\gamma}{2}\right) + \alpha \left(\Delta x^{k}_{i} \Delta y^{k}_{i} - \frac{\gamma}{2n}(\Delta x^{k})^{T}\Delta y^{k}\right)\right) \nonumber \\ & \ge (1-\alpha)\left(x^{k}_{i}y^{k}_{i} - \gamma \mu_{k}\right) + \alpha \left(\sigma_{k} \mu_{k} \left(1 - \frac{\gamma}{2}\right) - \alpha \left(1 + \frac{\gamma}{2}\right) \left\|\Delta X^{k} \Delta y^{k} \right\|_{\infty}\right), \label{WNNEQALP} \end{align} where $\Delta X^{k} = \text{diag}(\Delta x^{k})$. By substituting $x^{k}_{i}y^{k}_{i} \ge \gamma \mu_{k}$ and $0 < \alpha \le 1$ into equation \eqref{WNNEQALP}, we have \begin{align} x^{k}_{i}(\alpha)y^{k}_{i}(\alpha) - \gamma \mu_{k}(\alpha) \ge \alpha \left(\sigma_{k} \mu_{k} \left(1 - \frac{\gamma}{2}\right) - \alpha \left(1 + \frac{\gamma}{2}\right) \left\|\Delta X^{k} \Delta y^{k} \right\|_{\infty}\right). \label{XYGEUALP} \end{align} Therefore, when $\alpha$ satisfies equation \eqref{ALPAUBND}, from equation \eqref{XYGEUALP}, we obtain \begin{align} x^{k}_{i}(\alpha)y^{k}_{i}(\alpha) - \gamma \mu_{k}(\alpha) \ge 0, \; i = 1, \, 2, \, \ldots, \, n, \nonumber \end{align} which gives the inequality \eqref{XYGEUK}. \eproof \vskip 2mm \begin{lemma} \label{LEMXYKUB} Assume that $\{(x^{k}, \, y^{k})\}$ is generated by Algorithm \ref{ALGRPFM}. Then, for any feasible solution $(\bar{x}, \, \bar{y})$ of the linear complementarity problem \eqref{NLELCP}, there exists a positive constant $C_{1}$ such that \begin{align} & (y^{k})^{T}(x^{k} - u^{k}) + (x^{k})^{T}(y^{k} - v^{k}) \le C_{1} \label{YXMULEUB} \end{align} holds for all $k = 0, \, 1, \, \ldots$. Furthermore, if $(\bar{x}, \, \bar{y})$ is strictly feasible, $\{(x^{k}, \, y^{k})\}$ is bounded. \end{lemma} \proof From equation \eqref{LCPFUV}, we know that $(u^{k}, \, v^{k})$ satisfies the feasibility. By combining it with the semi-definite positivity of $M$, we have \begin{align} (\bar{x} - u^{k})^{T}(\bar{y} - v^{k}) = (\bar{x} - u^{k})^{T}M(\bar{x} - u^{k}) \ge 0. \label{DXUYVGEZ} \end{align} Consequently, from equation \eqref{DXUYVGEZ}, we have \begin{align} 0 & \le (\bar{x} - u^{k})^{T}(\bar{y} - v^{k}) \nonumber \\ & = (\bar{x} - x^{k}+ (x^{k} - u^{k}))^{T}(\bar{y} - y^{k} + (y^{k} - v^{k})) \nonumber \\ & = \bar{x}^{T}(y^{k} - v^{k}) - (x^{k})^{T}(y^{k} - v^{k}) + \bar{x}^{T}\bar{y} + (x^{k})^{T}y^{k} - \bar{x}^{T}y^{k} - \bar{y}^{T}x^{k} \nonumber \\ & \hskip 2mm + (x^{k} - u^{k})^{T}(y^{k} - v^{k}) + \bar{y}^{T}(x^{k} - u^{k}) - (y^{k})^{T}(x^{k} - u^{k}) . \label{SPLGEZ} \end{align} From equations \eqref{XKGEUK}-\eqref{YKGEVK}, we have \begin{align} 0 \le x^{k}- u^{k} \le x^{0} - u^{0}, \; 0 \le y^{k}- v^{k} \le y^{0} - v^{0}. \label{DXULEUPB} \end{align} By substituting equation \eqref{DXULEUPB} into equation \eqref{SPLGEZ}, from $(x^{k}, \, y^{k}) \ge 0, \; (\bar{x}, \, \bar{y}) \ge 0$ and $\phi(x^{k}, \, y^{k}) \le \phi(x^{0}, \, y^{0})$, we obtain \begin{align} & (y^{k})^{T}(x^{k} - u^{k}) + (x^{k})^{T}(y^{k} - v^{k}) \le \bar{x}^{T}(y^{k} - v^{k}) + \bar{y}^{T}(x^{k} - u^{k}) \nonumber \\ & \hskip 2mm + (x^{k})^{T}y^{k} + (x^{k} - u^{k})^{T}(y^{k} - v^{k}) + \bar{x}^{T}\bar{y} \nonumber \\ & \hskip 2mm \le \bar{x}^{T}(y^{0} - v^{0}) + \bar{y}^{T}(x^{0} - u^{0}) + \phi(x^{k}, \, y^{k}) + (x^{0} - u^{0})^{T}(y^{0} - v^{0}) + \bar{x}^{T}\bar{y} \nonumber \\ & \hskip 2mm \le \bar{x}^{T}(y^{0} - v^{0}) + \bar{y}^{T}(x^{0} - u^{0}) + \phi(x^{0}, \, y^{0}) + (x^{0} - u^{0})^{T}(y^{0} - v^{0}) + \bar{x}^{T}\bar{y} \nonumber \\ & \hskip 2mm = \bar{x}^{T}(y^{0} - v^{0}) + \bar{y}^{T}(x^{0} - u^{0}) + (x^{0} - u^{0})^{T}(y^{0} - v^{0}) + 2n\mu_{0} + \bar{x}^{T}\bar{y}. \label{YDXULEUPB} \end{align} We set \begin{align} C_{1} = (y^{0} - v^{0})^{T}(x^{0} - u^{0}) + \bar{y}^{T}(x^{0} - u^{0}) + \bar{x}^{T}(y^{0} - v^{0}) + \bar{x}^{T}\bar{y}+ 2n\mu_{0}. \nonumber \end{align} Then, from equation \eqref{YDXULEUPB}, we obtain the inequality \eqref{YXMULEUB}. \vskip 2mm When $(\bar{x}, \, \bar{y})$ is strictly feasible, from inequalities \eqref{SPLGEZ}-\eqref{DXULEUPB}, we have \begin{align} & \bar{y}^{T}x^{k} + \bar{x}^{T}y^{k} \le \bar{x}^{T}(y^{k} - v^{k}) + \bar{y}^{T}(x^{k} - u^{k}) + \bar{x}^{T}\bar{y} + (x^{k})^{T}y^{k} + (x^{k} - u^{k})^{T}(y^{k} - v^{k}) \nonumber \\ & \hskip 2mm \le \bar{x}^{T}(y^{0} - v^{0}) + \bar{y}^{T}(x^{0} - u^{0}) + \bar{x}^{T}\bar{y} + \phi(x^{k}, \, y^{k}) + (x^{0} - u^{0})^{T}(y^{0} - v^{0}) \nonumber \\ & \hskip 2mm \le \bar{x}^{T}(y^{0} - v^{0}) + \bar{y}^{T}(x^{0} - u^{0}) + \bar{x}^{T}\bar{y} + 2n \mu_{0} + (x^{0} - u^{0})^{T}(y^{0} - v^{0}) \triangleq C_{2}, \label{XYKLEUPB} \end{align} where we use the property $\phi(x^{k}, \, y^{k}) \le \phi(x^{0}, \, y^{0}) = 2n\mu_{0}$ in the third inequality. Consequently, from equation \eqref{XYKLEUPB} and $(\bar{x}, \, \bar{y}) > 0$, we have \begin{align} 0 \le x^{k}_{i} \le \frac{C_{2}}{\min_{1 \le i \le n} \{\bar{y}_{i}\}}, \; 0 \le y^{k}_{i} \le \frac{C_2}{\min_{1 \le i \le n} \{\bar{x}_{i}\}}, \; i = 1, \, 2, \, \ldots, \, n, \nonumber \end{align} which prove the boundedness of $(x^{k}, \, y^{k})$. \eproof \vskip 2mm \begin{lemma} \label{LEMDXYLEUB} Assume that $\{(x^{k}, \, y^{k})\}$ is generated by Algorithm \ref{ALGRPFM} and satisfies the condition \eqref{XYGEUK}. Then, when $\mu_{k} \ge \epsilon > 0$, there exists a positive constant $\omega^{\ast}$ such that \begin{align} \|D^{k}\Delta x^{k}\|^{2} + \|(D^{k})^{-1}\Delta y^{k}\|^{2} \le (\omega^{\ast})^{2}, \label{DXYLEUB} \end{align} holds for all $k = 0, \, 1, \, \ldots$, where $D^{k} = \text{diag}\left((y^{k}./x^{k})^{1/2}\right)$, $(D^{k})^{-1} = \text{diag}\left((x^{k}./y^{k})^{1/2}\right)$ and $(\Delta x^{k}, \, \Delta y^{k})$ is the solution of equations \eqref{CONNML}-\eqref{CONNMC}. \end{lemma} \proof We denote \begin{align} \omega_{k} = \left( \|D^{k}\Delta x^{k}\|^{2} + \|(D^{k})^{-1}\Delta y^{k}\|^{2}\right)^{1/2}. \label{DEFTK} \end{align} Then, from equation \eqref{DEFTK}, we have \begin{align} \|D^{k}\Delta x^{k}\|_{\infty} \le \|D^{k}\Delta x^{k}\| \le \omega_{k}, \; \|(D^{k})^{-1}\Delta y^{k}\|_{\infty} \le \|(D^{k})^{-1}\Delta y^{k}\| \le \omega_{k}. \label{DKDELXKOM} \end{align} \vskip 2mm From equation \eqref{LCPFUV}, we know that $(u^{k}, \, v^{k})$ is feasible. By combining it with equation \eqref{CONNML}, we obtain \begin{align} y^{k} + \Delta y^{k} - v^{k} = M(x^{k} + \Delta x^{k} - u^{k}). \label{YK1MVK} \end{align} Therefore, from equation \eqref{YK1MVK} and the semi-definite positivity of $M$, we have \begin{align} (y^{k} + \Delta y^{k} - v^{k})^{T}(x^{k} + \Delta x^{k} - u^{k}) = (x^{k} + \Delta x^{k} - u^{k})^{T}M(x^{k} + \Delta x^{k} - u^{k}) \ge 0. \label{YDLEYMYGEZ} \end{align} From equations \eqref{XKGEUK}-\eqref{YKGEVK}, we know \begin{align} x^{0} - u^{0} \ge x^{k} - u^{k}, \; y^{0} - v^{0} \ge y^{k} - v^{k}. \nonumber \end{align} By substituting them into equation \eqref{YDLEYMYGEZ}, we obtain \begin{align} & (x^{0} - u^{0})^{T}(y^{0} - v^{0}) + (y^{k} - v^{k})^{T}\Delta x^{k} + (x^{k} - u^{k})^{T} \Delta y^{k} + (\Delta x^{k})^{T}\Delta y^{k} \nonumber \\ & \ge (x^{k} - u^{k})^{T}(y^{k} - v^{k}) + (y^{k} - v^{k})^{T}\Delta x^{k} + (x^{k} - u^{k})^{T} \Delta y^{k} + (\Delta x^{k})^{T}\Delta y^{k} \ge 0. \label{XMUDXYGEZ} \end{align} \vskip 2mm By using the inequality $|x^{T}y| \le \|x\| \|y\| \le \|x\|_{1}\|y\|$, from equation \eqref{DKDELXKOM}, we have \begin{align} & \left|(y^{k} - v^{k})^{T}\Delta x^{k}\right| = \left|\left((D^{k})^{-1}(y^{k} - v^{k})\right)^{T}(D^{k}\Delta x^{k})\right| \nonumber \\ & \hskip 2mm \le \left\|(D^{k})^{-1}(y^{k} - v^{k})\right\|_{1}\|D^{k}\Delta x^{k}\| \le \left\|(D^{k})^{-1}(y^{k} - v^{k})\right\|_{1} \omega_{k}. \label{YMVDELXLEB} \end{align} From the condition \eqref{XYGEUK} and $y^{k} - v^{k} \ge 0$, we have \begin{align} ({x^{k}_{i}}/{y^{k}_{i}})^{1/2}(y^{k}_{i} - v^{k}_{i}) = x^{k}_{i}(y^{k}_{i} - v^{k}_{i})/(x^{k}_{i}y^{k}_{i})^{1/2} \le x^{k}_{i}(y^{k}_{i} - v^{k}_{i})/(\gamma \mu_{k})^{1/2}. \label{XKMXMVDY} \end{align} By substituting equation \eqref{XKMXMVDY} into equation \eqref{YMVDELXLEB}, we obtain \begin{align} & \left|(y^{k} - v^{k})^{T}\Delta x^{k}\right| \le \left(\sum_{i=1}^{n}({x^{k}_{i}}/{y^{k}_{i}})^{1/2}(y^{k}_{i} - v^{k}_{i})\right) \omega_{k} \nonumber \\ & \hskip 2mm \le (x^{k})^{T}(y^{k} - v^{k})\omega_{k}/(\gamma \mu_{k})^{1/2}. \label{YMVDELUB} \end{align} Similarly to the proof of equation \eqref{YMVDELUB}, we obtain \begin{align} \left|(x^{k} - u^{k})^{T}\Delta y^{k}\right| \le (y^{k})^{T}(x^{k} - u^{k})\omega_{k}/(\gamma \mu_{k})^{1/2}. \label{XMUDELUB} \end{align} By substituting inequalities \eqref{YXMULEUB} and \eqref{YMVDELUB}-\eqref{XMUDELUB} into inequality \eqref{XMUDXYGEZ}, we obtain \begin{align} & (\Delta x^{k})^{T}\Delta y^{k} \ge - (x^{0} - u^{0})^{T}(y^{0} - v^{0}) - \left|(y^{k} - v^{k})^{T}\Delta x^{k}\right| - \left|(x^{k} - u^{k})^{T} \Delta y^{k}\right| \nonumber \\ & \hskip 2mm \ge - (x^{0} - u^{0})^{T}(y^{0} - v^{0}) - ((y^{k})^{T}(x^{k} - u^{k}) + (x^{k})(y^{k} - v^{k}))\omega_{k}/(\gamma \mu_{k})^{1/2} \nonumber \\ & \hskip 2mm \ge - (x^{0} - u^{0})^{T}(y^{0} - v^{0}) - \left(C_{1}/(\gamma \epsilon)^{1/2}\right)\omega_{k}. \label{DELXYGEN} \end{align} \vskip 2mm From equation \eqref{CONNMC} and the condition \eqref{XYGEUK}, we have \begin{align} & \|D^{k}\Delta x^{k}\|^{2} + \|(D^{k})^{-1} \Delta y^{k}\|^{2} + 2 (\Delta x^{k})^{T} \Delta y^{k} = \|D^{k}\Delta x^{k} + (D^{k})^{-1} \Delta y^{k}\|^{2} \nonumber \\ & \hskip 2mm = (\sigma_{k}\mu_{k})^{2} \sum_{i=1}^{n}\frac{1}{x^{k}_{i}y^{k}_{i}} + (x^{k})^{T}y^{k} - 2\sigma_{k}\mu_{k} n \nonumber \\ & \hskip 2mm \le n \sigma_{k}^{2} \mu_{k} /\gamma + \phi(x^{k}, \, y^{k}) - 2\sigma_{k}\mu_{k} n \le n \mu_{0} \left(\sigma_{max}^{2}/\gamma + 2 - 2\sigma_{min}\right). \label{DELXYLEMUK} \end{align} By substituting inequality \eqref{DELXYGEN} into inequality \eqref{DELXYLEMUK}, we obtain \begin{align} q(\omega_{k}) \triangleq \omega_{k}^{2} - 2\left(C_{1}/(\gamma\epsilon)^{1/2}\right)\omega_{k} - \zeta \le 0, \label{QUAFUN} \end{align} where the positive constant $\zeta$ is defined by \begin{align} \zeta = 2(x^{0} - u^{0})^{T}(y^{0} - v^{0}) + n \mu_{0}(\sigma_{max}^{2}/\gamma + 2 - 2\sigma_{min}). \label{DEFZETA} \end{align} The quadratic function $q(\omega)$ is convex and has a unique positive root at \begin{align} \omega^{\ast} = C_{1}/(\gamma \epsilon)^{1/2} + \sqrt{C_{1}^{2}/(\gamma \epsilon) + \zeta}. \nonumber \end{align} This implies \begin{align} \omega_{k} \le \omega^{\ast}. \label{OMEGAKLEUB} \end{align} Consequently, we obtain inequality \eqref{DXYLEUB}. \eproof \vskip 2mm In order to prove the global convergence of Algorithm \ref{ALGRPFM}, we need to estimate the positive lower bound of $\Delta t_{k} \, (k = 0, \, 1, \, \ldots)$. \begin{lemma} \label{LEMDTKGEZ} Assume that $\{(x^{k}, \, y^{k})\}$ is generated by Algorithm \ref{ALGRPFM} and satisfies the condition \eqref{XYGEUK}. Then, when $\mu_{k} \ge \epsilon > 0$, there exists a positive constant $\delta_{\Delta t}$ such that \begin{align} \Delta t_{k} \ge \frac{1}{2}\delta_{\Delta t} > 0 \label{DELTATKGEZ} \end{align} holds for all $k = 0, \, 1, \, \ldots$. \end{lemma} \proof From inequality \eqref{DXYLEUB}, we have \begin{align} & |\Delta x^{k}_{i} \Delta y^{k}_{i}| = \left|(y^{k}_{i}/x^{k}_{i})^{1/2}\Delta x^{k}_{i}\right| \left|(x^{k}_{i}/y^{k}_{i})^{1/2} \Delta y^{k}_{i}\right| \le \|D^{k}\Delta x^{k}\| \|(D^{k})^{-1} \Delta y^{k}\| \nonumber \\ & \hskip 2mm \le \frac{1}{2}\omega_{k}^{2} \le \frac{1}{2} (\omega^{\ast})^{2}, \; i = 1, \, 2, \, \ldots, n, \nonumber \end{align} which gives \begin{align} \|\Delta X^{k} \Delta y^{k}\|_{\infty} \le \frac{1}{2} (\omega^{\ast})^{2}, \; k = 0, \, 1, \, 2, \, \ldots. \label{DELXYLEOME} \end{align} Consequently, we have \begin{align} \frac{\sigma_{k}\mu_{k}}{\|\Delta X^{k} \Delta y^{k}\|_{\infty}} \ge \frac{2\sigma_{min} \, \epsilon}{(\omega^{\ast})^{2}}, \; k = 0, \, 1, \, 2, \, \ldots. \label{SIGMUKGEUB} \end{align} We denote \begin{align} \alpha^{\ast}_{\mu} = \min \left\{1, \; \frac{1 - \gamma/2}{1 + \gamma/2} \, \frac{(2\sigma_{min} \, \epsilon)}{(\omega^{\ast})^{2}} \right\}. \label{ALPHAAST} \end{align} Then, when $\Delta t_{k}$ satisfies \begin{align} \Delta t_{k} \le \frac{\alpha^{\ast}_{\mu}}{1 - \alpha^{\ast}_{\mu}}, \label{DELTKLEALP} \end{align} from equations \eqref{SIGMUKGEUB}-\eqref{ALPHAAST} and \eqref{ALPAUBND}, we know that the condition \eqref{XYGEUK} holds, where $\alpha_{k} = \Delta t_{k}/(1 + \Delta t_{k})$. \vskip 2mm From equation \eqref{DXYLEUB} and the Cauchy-Schwartz inequality $|x^{T}y| \le \|x\| \|y\|$, we have \begin{align} & |(\Delta x^{k})^{T}\Delta y^{k}| = \left|(D^{k}\Delta x^{k})^{T}((D^{k})^{-1}\Delta y^{k})\right| \le \|D^{k}\Delta x^{k}\| \|(D^{k})^{-1}\Delta y^{k}\| \nonumber \\ & \hskip 2mm \le \frac{1}{2}\left(\|D^{k}\Delta x^{k}\|^{2} + \|(D^{k})^{-1}\Delta y^{k}\|^{2}\right) \le \frac{1}{2} (\omega^{\ast})^{2}. \label{DELXYDLEUB} \end{align} From equation \eqref{CONNMC}, we have \begin{align} (x^{k})^{T}\Delta y^{k} + (y^{k})^{T}\Delta x^{k} = n\sigma_{k}\mu_{k} - (x^{k})^{T}y^{k}. \label{XDYAYDX} \end{align} By substituting equations \eqref{DELXYDLEUB}-\eqref{XDYAYDX} into equation \eqref{RHOK}, we obtain \begin{align} & \rho_{k} = \frac{Ared_{k}}{Pred_{k}} = 1 - \frac{\alpha_{k}(\Delta x^{k})^{T}\Delta y^{k}} {\|r_{q}^{k}\| + (x^{k})^{T}y^{k} - n\sigma_{k}\mu_{k}} \nonumber \\ & \hskip 2mm = 1 - \frac{\alpha_{k}(\Delta x^{k})^{T}\Delta y^{k}} {(2 - \sigma_{k})n \mu_{k}} \ge 1 - \frac{(\omega^{\ast})^{2}}{2(2-\sigma_{max}) n \epsilon} \alpha_{k}. \label{RHOKLEUB} \end{align} We denote \begin{align} \alpha_{\rho}^{\ast} = \min \left\{1, \; \frac{2(2 - \sigma_{max}) (1-\eta_{2})n \epsilon} {(\omega^{\ast})^{2}}\right\}. \label{ALPHARA} \end{align} Then, when $\Delta t_{k}$ satisfies \begin{align} \Delta t_{k} \le \frac{\alpha_{\rho}^{\ast}} {1 - \alpha_{\rho}^{\ast}}, \label{DTLEALPR} \end{align} from equations \eqref{RHOKLEUB}-\eqref{ALPHARA}, we know $\rho_{k} \ge \eta_2$, where $\alpha_{k} = \Delta t_{k}/(1 + \Delta t_{k})$. \vskip 2mm We denote \begin{align} \delta_{\Delta t} \triangleq \min\left\{\frac{\sigma_{\mu}^{\ast}} {1 - \sigma_{\mu}^{\ast}}, \; \frac{\alpha_{\rho}^{\ast}} {1 - \alpha_{\rho}^{\ast}}, \; \Delta t_{0}\right\}. \label{PARDELAT} \end{align} Assume that $K$ is the first index such that $\Delta t_{K} \le \delta_{\Delta t}$ holds. Then, from equations \eqref{DELTKLEALP}-\eqref{PARDELAT}, we know that the condition \eqref{XYGEUK} holds and $\rho_{K} \ge \eta_{2}$. Consequently, according to the time-stepping adjustment scheme \eqref{TSK1}, $\Delta t_{K+1}$ will be enlarged. Therefore, $\Delta t_k \ge (1/2) \delta_{\Delta t}$ holds for all $k = 0, \, 1, \, 2, \, \dots$. \qed \vskip 2mm By using the estimation results of Lemma \ref{LEMDTKGEZ}, we prove that $\{\mu_{k}\}$ converges to zero when $k$ tends to infinity as follows. \begin{theorem} \label{THECOVFUK} Assume that $\{(x^{k}, \, y^{k})\}$ is generated by Algorithm \ref{ALGRPFM} and satisfies the condition \eqref{XYGEUK}. Then, we have \begin{align} \lim_{k \to \infty} \mu_{k} = 0. \label{MUKTOZ} \end{align} \end{theorem} \proof We prove the result \eqref{MUKTOZ} by contradiction. Assume that there exists a positive constant $\epsilon$ such that \begin{align} \mu_{k} \ge \epsilon > 0 \label{MUKGEZ} \end{align} holds for all $k = 0, \, 1, \, \ldots$. Then, according to Algorithm \ref{ALGRPFM} and Lemma \ref{LEMDTKGEZ}, we know that there exists an infinite subsequence $\{(x^{k_l}, \, y^{k_l})\}$ such that \begin{align} \frac{\phi(x^{k_l}, \, y^{k_l}) - \phi(x^{k_l} + \alpha_{k_l} \Delta x^{k_l}, \, y^{k_l} + \alpha_{k_l} \Delta y^{k_l})} {\phi(x^{k_l}, \, y^{k_l}) - m_{k_l}(\alpha_{k_l})} \ge \eta_{1} \label{RHOKGEPC} \end{align} holds for all $l = 0, \, 1, \, 2, \, \ldots$, where $\alpha_{k_l} = \Delta t_{k_l}/(1 + \Delta t_{k_l})$. Otherwise, all steps are rejected after a given iteration index, then the time step will keep decreasing, which contradicts equation \eqref{DELTATKGEZ}. \vskip 2mm From equations \eqref{DELTATKGEZ}, \eqref{RHOKLEUB} and \eqref{RHOKGEPC}, we have \begin{align} & \phi(x^{k_l}, \, y^{k_l}) - \phi(x^{k_l} + \alpha_{k_l} \Delta x^{k_l}, \, y^{k_l} + \alpha_{k_l} \Delta y^{k_l}) \nonumber \\ & \hskip 2mm \ge \frac{\eta_{1}\Delta t_{k_l}}{1+\Delta t_{k_l}}(2 - \sigma_{k_l})n \mu_{k_l} \ge \frac{\eta_{1}\delta_{\Delta t}/2}{1+ \delta_{\Delta t}/2} (2 - \sigma_{max})n \mu_{k_l}. \label{PHIKDGE} \end{align} Therefore, from equation \eqref{PHIKDGE} and $\phi(x^{k+1}, \, y^{k+1}) \le \phi(x^{k}, \, y^{k})$, we have \begin{align} & \phi(x^{0}, \, y^{0}) \ge \phi(x^{0}, \, y^{0}) - \lim_{k \to \infty} \phi(x^{k}, \, y^{k}) = \sum_{k = 0}^{\infty} \left(\phi(x^{k}, \, y^{k}) - \phi(x^{k+1}, \, y^{k+1})\right) \nonumber \\ & \hskip 2mm \ge \sum_{l = 0}^{\infty} \left(\phi(x^{k_l}, \, y^{k_l}) - \phi(x^{k_l + 1}, \, y^{k_l + 1})\right) \ge \frac{\eta_{1} \delta_{\Delta t}/2}{1+\delta_{\Delta t}/2} \sum_{l=0}^{\infty}(2 - \sigma_{max})n \mu_{k_l}. \label{PHI0GESUM} \end{align} Consequently, from equation \eqref{PHI0GESUM}, we obtain \begin{align} \lim_{l \to \infty} \mu_{k_l} = 0, \nonumber \end{align} which contradicts the assumption $\mu_{k} \ge \epsilon > 0$ for all $k = 0, \, 1, \ldots$. Therefore, we have \begin{align} \lim_{k \to \infty} \inf \mu_{k} = 0. \label{INFMUKTOZ} \end{align} Since $\mu_{k} = \phi(x^{k}, \, y^{k})/(2n)$ and $\{\phi(x^{k}, \, y^{k})\}$ is monotonically decreasing, we know that $\{\mu_{k}\}$ is monotonically decreasing. By combining it with equation \eqref{INFMUKTOZ}, we know that the result \eqref{MUKTOZ} is true. \qed \begin{remark} In the analysis framework of the global convergence, in comparison to that of the known path-following algorithm, we do not need to select $\alpha_{k}$ such that the condition of the priority to feasibility over complementarity \begin{align} \|r_{q}^{k}(\alpha)\| \le (x^{k}(\alpha))^{T}y^{k}(\alpha) \frac{\|r_{q}^{0}\|}{(x^{0})^{T}y^{0}} \label{FOVERC} \end{align} holds for all $\alpha \in (0, \; \alpha_{k}] \subset (0, \; 1]$. \end{remark} \vskip 2mm \begin{theorem} \label{THEXYKGCONV} Assume that $\{(x^{k}, \, y^{k})\}$ is the sequence of iterations generated by Algorithm \ref{ALGRPFM} and satisfies the condition \eqref{XYGEUK}. If the linear complementarity problem has a strictly feasible solution $(\bar{x}, \, \bar{y})$, $\{(x^{k}, \, y^{k})\}$ exists a limit point $(x^{\ast}, \, y^{\ast})$ and this limit point satisfies the linear complementarity equation \eqref{LCP}. \end{theorem} \proof Since the linear complementarity problem has a strictly feasible solution, from Lemma \ref{LEMXYKUB}, we know that $\{(x^{k}, \, y^{k})\}$ is bounded. Consequently, the sequence $\{(x^{k}, \, y^{k})\}$ exists a convergence subsequence $\{(x^{k_l}, \, y^{k_l})\}$ and we denote its limit point as $(x^{\ast}, \, y^{\ast})$. By combining it with Theorem \ref{THECOVFUK}, we obtain \begin{align} & \lim_{l \to \infty} \mu_{k_l} = \frac{1}{2n} \left(\lim_{l \to \infty} (x^{k_l})^{T}y^{k_l} + \lim_{l \to \infty} \|y^{k_l} - (Mx^{k_l} + q)\|\right) \nonumber \\ & \hskip 2mm = \frac{1}{2n} \left((x^{\ast})^{T}y^{\ast} + \|y^{\ast} - (Mx^{\ast} + q)\|\right) = 0. \label{SUBCONMUK} \end{align} Since $(x^{k_l}, \, y^{k_l}) \ge 0$, we have $(x^{\ast}, \, y^{\ast}) \ge 0$. By substituting it into equation \eqref{SUBCONMUK}, we conclude \begin{align} y^{\ast} - (Mx^{\ast} + q) = 0, \; (x^{\ast})^{T}y^{\ast} = 0, \; (x^{\ast}, \, y^{\ast}) \ge 0. \nonumber \end{align} It implies that $(x^{\ast}, \, y^{\ast})$ is the solution of the linear complementarity problem \eqref{LCP}. \eproof \section{Numerical experiments} \vskip 2mm In this section, some numerical experiments are conducted to test the performance of RPFMTr (Algorithm \ref{ALGRPFM}) for the linear complementarity problems (LCPs), in comparison to two state-of-the-art commercial solvers, i.e. PATH \cite{DF1995,PATH,FM2000,FM2022} and MILES (a Mixed Inequality and nonLinear Equation Solver) \cite{Mathiesen1985,Rutherford1995,Rutherford2022}. RPFMTr (Algorithm \ref{ALGRPFM}) is coded with the MATLAB language and executed in MATLAB (R2020a) environment \cite{MATLAB}. PATH and MILES are executed in the GAMS v28.2 (2019) environment \cite{GAMS}. All numerical experiments are performed by a HP notebook with the Intel quadcore CPU and 8Gb memory. MILES solves the LCP based on the Lemke's almost complementary pivoting algorithm \cite{CPS2009,LH1964,Rutherford1995}. PATH is a solver based on the path-following procedure \cite{Dirkse1994,DF1995}, the Fisher's non-smooth regularization technique \cite{Fisher1992} and the Lemke's pivoting algorithm \cite{CPS2009,LH1964}. PATH and MILES are two robust and efficient solvers for the complementarity problems and used in many general modelling systems such as AMPL (A Mathematical Programming Language) \cite{AMPL} and GAMS (General Algebraic Modelling System) \cite{GAMS}. Therefore, we select these two solvers as the basis for comparison. \vskip 2mm The construction approach of test problems is described as follows. Firstly, we generate a test matrix $M$ with the following structure: \begin{align} M = \begin{bmatrix} 0 & -A^{T} \\ A & 0 \end{bmatrix}, \label{TESTMAT} \end{align} where the test matrix $A$ comes from the linear programming subset of NETLIB \cite{NETLIB}. It is easy to verify that the test matrix $M$ defined by equation \eqref{TESTMAT} is semi-definite positive. Then, we generate two complementarity vectors $x$ and $y$ as follows: \begin{align} x = [1 \; 0 \; 1 \; 0 \; \cdots \; 1 \; 0]^{T}, \; y = [0 \; 1 \; 0 \; 1 \; \cdots \; 0 \; 1]^{T}. \label{CVECTORS} \end{align} Finally, by using these two vectors $x, \, y$ defined by equation \eqref{CVECTORS} and the test matrix $M$ defined by equation \eqref{TESTMAT}, we generate the following test vector $q$: \begin{align} q = y - Mx. \label{TESTVEC} \end{align} The scales of test problems vary from dimension 78 to 40216. And the termination conditions of Algorithm \ref{ALGRPFM} (RPFMTr), PATH and MILES for finding a solution of the nonlinear system \eqref{NLELCP} are \begin{align} \|y - (Mx + q)\|_{\infty} \leq 10^{-6}, \; \|Xy\|_{\infty} \leq 10^{-6}, \; (x, \, y) \ge 0, \nonumber \end{align} where $X = \text{diag}(x)$. \vskip 2mm According to the manual of GAMS \cite{FM2022}, an LCP will be generated a list of all variables appearing in the equations found in the model statement, and the number of equations equals the number of variables. In other words, for an LCP solved in the GAMS environment, its matrix $M$ should not include the row with all zeros. In order to avoid this error, we add $\epsilon = 1.0 \times 10^{-6}$ to the first element of the row or column with all zeros in the test sub-matrix $A$ of matrix $M$ defined by equation \eqref{TESTMAT}. \vskip 2mm Most of matrices $A$ coming from the linear programming subset of NETLIB \cite{NETLIB} are sparse. In order to test the performance of RPFMTr, PATH and MILES for the dense LCP further, we add a small random perturbation to the elements of the sub-matrix $A$ in equation \eqref{TESTMAT} and generate the dense test matrix $\overline{M}$ to replace the test matrix $M$ in equation \eqref{TESTMAT} as follows: \begin{align} \overline{M} = \begin{bmatrix} 0 & -A_{\epsilon}^{T} \\ A_{\epsilon} & 0 \end{bmatrix}, \; A_{\epsilon} = A + \text{rand}(m_{A},\, n_{A})*\epsilon, \; [m_{A}, \, n_{A}] = \text{sizes}(A), \label{TESTMATD} \end{align} where $\epsilon = 10^{-3}$ and matrix $A$ comes from the linear programming subset of NETLIB \cite{NETLIB}. \vskip 2mm Numerical results of the sparse test problems are arranged in Tables \ref{TABSLCP1}-\ref{TABSLCP3} and Figures \ref{fig:ITSLCP}-\ref{fig:CPUSLCP}. And numerical results of the dense test problems are arranged in Tables \ref{TABDLCP1}-\ref{TABDLCP3} and Figures \ref{fig:ITDLCP}-\ref{fig:CPUDLCP}. ``major'' in the fourth column of Tables \ref{TABSLCP1}-\ref{TABDLCP3} represents the number of the linear mixed complementarity problems solved by a pivotal Lemke's method of PATH \cite{FM2022}. ``minor'' in the fourth column of Tables \ref{TABSLCP1}-\ref{TABDLCP3} represents the the number of pivots performed per major iteration of PATH \cite{FM2022}. ``grad'' in the fourth column of Tables \ref{TABSLCP1}-\ref{TABDLCP3} represents the cumulative number of Jacobian evaluations used in PATH \cite{FM2022}. ``major'' in the sixth column of Tables \ref{TABSLCP1}-\ref{TABDLCP3} represents the number of Newton iterations of MILES \cite{Rutherford2022}. ``pivots'' in the sixth column of Tables \ref{TABSLCP1}-\ref{TABDLCP3} represents the number of Lemke pivots of MILES \cite{Rutherford2022}. ``refactor'' in the sixth column of Tables \ref{TABSLCP1}-\ref{TABDLCP3} represents the number of re-factorizations in the LUSOL solver \cite{GMSW1991}, which is called by MILES for solving the linear systems of equations. \vskip 2mm From Tables \ref{TABSLCP1}-\ref{TABDLCP3} and Figures \ref{fig:ITSLCP}-\ref{fig:CPUDLCP}, we find that RPFMTr and PATH work well for the sparse LCPs and the dense LCPs. However, MILES is not robust for solving the sparse LCPs or the dense LCPs. For the sparse LCPs, PATH performs better than RPFMTr and MILES from Tables \ref{TABSLCP1}-\ref{TABSLCP3} and Figures \ref{fig:ITSLCP}-\ref{fig:CPUSLCP}. For the dense LCPs, from Tables \ref{TABDLCP1}-\ref{TABDLCP3}, we find that PATH and MILES fail to solve $5$ problems and $57$ problems of 73 test problems, respectively. RPFMTr solves all the sparse test LCPs and the dense test LCPs. Furthermore, from Tables \ref{TABDLCP1}-\ref{TABDLCP3} and Figures \ref{fig:ITDLCP}-\ref{fig:CPUDLCP}, we find that the computational time of RPFMTr is about $1/3$ to $1/10$ of that of PATH for the dense LCPs. Therefore, RPFMTr is a robust and efficient solver for the LCPs, especially for the dense LCPs. \newpage \begin{table}[htbp] \newcommand{\tabincell}[2]{\begin{tabular}{@{}#1@{}}#2\end{tabular}} \scriptsize \centering \fontsize{8}{8}\selectfont \caption{Numerical results of small-scale sparse LCPs (no. 1-27).} \label{TABSLCP1} \resizebox{\textwidth}{!}{ \begin{tabular}{|c|c|c|c|c|c|c|c|c|c|} \hline \multirow{2}{*}{Problems} & \multicolumn{2}{c|}{RPFMTr} & \multicolumn{2}{c|}{PATH} & \multicolumn{2}{c|}{MILES} \\ \cline{2-7} \tabincell{c}{(n*n)} & \tabincell{c}{steps \\(time)} & \tabincell{c}{Terr\\} & \tabincell{c}{major+minor+grad \\(time)} & \tabincell{c}{Terr\\} & \tabincell{c}{major+pivots+refactor \\(time)} & \tabincell{c}{Terr\\} \\ \hline \tabincell{c}{Exam. 1 lp\_25fv47 \\ (n = 2697)} & \tabincell{c}{49 \\ (4.13)} &\tabincell{c}{8.84e-07} & \tabincell{c}{8+949+23 \\ (0.30)} &\tabincell{c}{5.82e-08} & \tabincell{c}{1+1684+24 \\ (1.34)} &\tabincell{c}{7.48e-12} \\ \hline \tabincell{c}{Exam. 2 lp\_adlittle \\ (n = 194)} & \tabincell{c}{45 \\ (0.03)} &\tabincell{c}{9.61e-07} & \tabincell{c}{4+80+9 \\ (0.02)} &\tabincell{c}{4.44e-12 } & \tabincell{c}{1+1+3 \\ (0.02)} &\tabincell{c}{4.65e-14} \\ \hline \tabincell{c}{Exam. 3 lp\_afiro \\ (n = 78)} & \tabincell{c}{41 \\ (0.01)} &\tabincell{c}{6.20e-07} & \tabincell{c}{3+5+7 \\ (0.02)} &\tabincell{c}{1.01e-10} & \tabincell{c}{1+1+3 \\ (0.00)} &\tabincell{c}{3.91e-14} \\ \hline \tabincell{c}{Exam. 4 lp\_agg \\ (n = 1103)} & \tabincell{c}{44 \\ (0.19)} &\tabincell{c}{9.85e-07} & \tabincell{c}{11+413+16 \\ (0.08)} &\tabincell{c}{2.87e-09} & \tabincell{c}{1+1+6 \\ (0.05)} &\tabincell{c}{3.74e-12} \\ \hline \tabincell{c}{Exam. 5 lp\_agg2 \\ (n = 1274)} & \tabincell{c}{46 \\ (0.84)} &\tabincell{c}{8.24e-07} & \tabincell{c}{10+303+13 \\ (0.03)} &\tabincell{c}{6.96e-10} & \tabincell{c}{1+959+6 \\ (0.06)} &\tabincell{c}{5.08e-12} \\ \hline \tabincell{c}{Exam. 6 lp\_agg3 \\ (n = 1274)} & \tabincell{c}{47 \\ (0.50)} &\tabincell{c}{6.64e-07} & \tabincell{c}{9+259+12 \\ (0.05)} &\tabincell{c}{2.41e-07 } & \tabincell{c}{1+955+6 \\ (0.06)} &\tabincell{c}{9.37e-12} \\ \hline \tabincell{c}{Exam. 7 lp\_bandm \\ (n = 777)} & \tabincell{c}{51 \\ (0.27)} &\tabincell{c}{8.58e-07} & \tabincell{c}{11+613+14 \\ (0.05)} &\tabincell{c}{3.31e-07 } & \tabincell{c}{1+1+8 \\ (0.06)} &\tabincell{c}{1.19e-12} \\ \hline \tabincell{c}{Exam. 8 lp\_beaconfd \\ (n = 468)} & \tabincell{c}{50 \\ (0.17)} &\tabincell{c}{6.06e-07} & \tabincell{c}{8+124+17 \\ (0.03)} &\tabincell{c}{3.76e-09 } & \tabincell{c}{1+1+6 \\ (0.03)} &\tabincell{c}{5.46e-12} \\ \hline \tabincell{c}{Exam. 9 lp\_blend \\ (n = 188)} & \tabincell{c}{46 \\ (0.03)} &\tabincell{c}{6.12e-07} & \tabincell{c}{9+143+12 \\ (0.02)} &\tabincell{c}{6.50e-08 } & \tabincell{c}{1+1+4 \\ (0.02)} &\tabincell{c}{1.96e-12} \\ \hline \tabincell{c}{Exam. 10 lp\_bnl1 \\ (n = 2229)} & \tabincell{c}{50 \\ (1.24)} &\tabincell{c}{8.60e-07} & \tabincell{c}{12+1727+17 \\ (0.50)} &\tabincell{c}{5.22e-11 } & \tabincell{c}{1+1+10 \\ (0.27)} &\tabincell{c}{1.62e-12} \\ \hline \tabincell{c}{Exam. 11 lp\_bore3d \\ (n = 567)} & \tabincell{c}{48 \\ (0.13)} &\tabincell{c}{8.63e-07} & \tabincell{c}{11+423+16 \\ (0.03)} &\tabincell{c}{6.99e-07 } & \tabincell{c}{1+1+6 \\ (0.03)} &\tabincell{c}{1.31e-12} \\ \hline \tabincell{c}{Exam. 12 lp\_brandy \\ (n = 523)} & \tabincell{c}{49 \\ (0.31)} &\tabincell{c}{7.42e-07} & \tabincell{c}{10+384+13 \\ (0.06)} &\tabincell{c}{2.84e-10 } & \tabincell{c}{1+1+6 \\ (0.03)} &\tabincell{c}{1.31e-12} \\ \hline \tabincell{c}{Exam. 13 lp\_capri \\ (n = 753)} & \tabincell{c}{46 \\ (0.16)} &\tabincell{c}{9.07e-07} & \tabincell{c}{11+573+15 \\ (0.03)} &\tabincell{c}{7.18e-07} & \tabincell{c}{1+1+6 \\ (0.05)} &\tabincell{c}{2.60e-12} \\ \hline \tabincell{c}{Exam. 14 lp\_czprob \\ (n = 4491)} & \tabincell{c}{51 \\ (3.37)} &\tabincell{c}{8.74e-07} & \tabincell{c}{4+25+14 \\ (0.05)} &\tabincell{c}{1.90e-10 } & \tabincell{c}{1+1+13 \\ (1.19)} &\tabincell{c}{1.82e-12} \\ \hline \tabincell{c}{Exam. 15 lp\_degen2 \\ (n = 1201)} & \tabincell{c}{42 \\ (0.60)} &\tabincell{c}{8.53e-07} & \tabincell{c}{4+447+13 \\ (0.11)} &\tabincell{c}{1.71e-07 } & \tabincell{c}{1+1+10 \\ (0.14)} &\tabincell{c}{2.22e-16} \\ \hline \tabincell{c}{Exam. 16 lp\_degen3 \\ (n = 4107)} & \tabincell{c}{47 \\ (5.85)} &\tabincell{c}{6.85e-07} & \tabincell{c}{4+185+26 \\ (0.42)} &\tabincell{c}{3.00e-07 } & \tabincell{c}{1+1+51 \\ (6.13)} &\tabincell{c}{0.00e-00} \\ \hline \tabincell{c}{Exam. 17 lp\_e226 \\ (n = 695)} & \tabincell{c}{54 \\ (0.28)} &\tabincell{c}{9.05e-07} & \tabincell{c}{9+412+17 \\ (0.03)} &\tabincell{c}{2.94e-07 } & \tabincell{c}{1+1+7 \\ (0.05)} &\tabincell{c}{5.86e-12} \\ \hline \tabincell{c}{Exam. 18 lp\_etamacro \\ (n = 1216)} & \tabincell{c}{49 \\ (0.73)} &\tabincell{c}{6.31e-07} & \tabincell{c}{6+440+15 \\ (0.05)} &\tabincell{c}{1.87e-12 } & \tabincell{c}{1+58+6 \\ (0.05)} &\tabincell{c}{6.82e-12} \\ \hline \tabincell{c}{Exam. 19 lp\_fffff800 \\ (n = 1552)} & \tabincell{c}{64 \\ (1.27)} &\tabincell{c}{6.05e-07} & \tabincell{c}{15+1111+20 \\ (0.17)} &\tabincell{c}{1.39e-07 } &\tabincell{c}{\textcolor{red}{100+72624+770} \\ \textcolor{red}{(13.80)}} &\tabincell{c}{\textcolor{red}{1.78e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 20 lp\_finnis \\ (n = 1561)} & \tabincell{c}{47 \\ (0.26)} &\tabincell{c}{6.06e-07} & \tabincell{c}{8+313+16 \\ (0.03)} &\tabincell{c}{5.24e-09 } & \tabincell{c}{1+1+9 \\ (0.14)} &\tabincell{c}{4.55e-13} \\ \hline \tabincell{c}{Exam. 21 lp\_fit1d \\ (n = 1073)} & \tabincell{c}{65 \\ (1.16)} &\tabincell{c}{6.45e-07} & \tabincell{c}{8+671+12 \\ (0.24)} &\tabincell{c}{2.34e-11 } & \tabincell{c}{1+1+3 \\ (0.03)} &\tabincell{c}{1.75e-10} \\ \hline \tabincell{c}{Exam. 22 lp\_ganges \\ (n = 3015)} & \tabincell{c}{43 \\ (0.49)} &\tabincell{c}{8.88e-07} & \tabincell{c}{4+98+21 \\ (0.05)} &\tabincell{c}{4.95e-07 } & \tabincell{c}{1+1+11 \\ (0.47)} &\tabincell{c}{2.40e-14} \\ \hline \tabincell{c}{Exam. 23 lp\_gfrd\_pnc \\ (n = 1776)} & \tabincell{c}{61 \\ (0.28)} &\tabincell{c}{7.85e-07} & \tabincell{c}{18+1250+21 \\ (0.17)} &\tabincell{c}{3.80e-11 } & \tabincell{c}{1+716+13 \\ (0.25)} &\tabincell{c}{3.64e-12} \\ \hline \tabincell{c}{Exam. 24 lp\_grow15 \\ (n = 945)} & \tabincell{c}{33 \\ (0.24)} &\tabincell{c}{7.23e-07} & \tabincell{c}{7+703+9 \\ (0.13)} &\tabincell{c}{8.66e-07 } & \tabincell{c}{1+927+6 \\ (0.08)} &\tabincell{c}{3.33e-14} \\ \hline \tabincell{c}{Exam. 25 lp\_grow22 \\ (n = 1386)} & \tabincell{c}{33 \\ (0.42)} &\tabincell{c}{7.27e-07} & \tabincell{c}{8+1054+10 \\ (0.23)} &\tabincell{c}{3.86e-11} & \tabincell{c}{1+1403+8 \\ (0.13)} &\tabincell{c}{1.20e-14} \\ \hline \tabincell{c}{Exam. 26 lp\_lotfi \\ (n = 519)} & \tabincell{c}{52 \\ (0.20)} &\tabincell{c}{6.05e-07} & \tabincell{c}{13+263+17 \\ (0.03)} &\tabincell{c}{8.42e-07 } & \tabincell{c}{1+1+4 \\ (0.05)} &\tabincell{c}{9.10e-13} \\ \hline \tabincell{c}{Exam. 27 lp\_maros \\ (n = 2812)} & \tabincell{c}{68 \\ (2.80)} &\tabincell{c}{6.18e-07} & \tabincell{c}{13+3222+14 \\ (0.50)} &\tabincell{c}{2.16e-09 } & \tabincell{c}{1+776+20 \\ (1.13)} &\tabincell{c}{2.95e-09} \\ \hline \end{tabular}}\end{table} \begin{table}[htbp] \newcommand{\tabincell}[2]{\begin{tabular}{@{}#1@{}}#2\end{tabular}} \scriptsize \centering \fontsize{8}{8}\selectfont \caption{Numerical results of small-scale sparse LCPs (no. 28-53).} \label{TABSLCP2} \resizebox{\textwidth}{!}{ \begin{tabular}{|c|c|c|c|c|c|c|c|c|c|} \hline \multirow{2}{*}{Problems} & \multicolumn{2}{c|}{RPFMTr} & \multicolumn{2}{c|}{PATH} & \multicolumn{2}{c|}{MILES} \\ \cline{2-7} \tabincell{c}{(n*n)} & \tabincell{c}{steps \\(time)} & \tabincell{c}{Terr\\} & \tabincell{c}{major+minor+grad \\(time)} & \tabincell{c}{Terr\\} & \tabincell{c}{major+pivots+refactor \\(time)} & \tabincell{c}{Terr\\} \\ \hline \tabincell{c}{Exam. 28 lp\_modszk1 \\ (n = 2307)} & \tabincell{c}{42 \\ (0.46)} &\tabincell{c}{8.61e-07} & \tabincell{c}{5+370+14 \\ (0.05)} &\tabincell{c}{7.91e-11 } & \tabincell{c}{1+1+9 \\ (0.22)} &\tabincell{c}{8.70e-14} \\ \hline \tabincell{c}{Exam. 29 lp\_perold \\ (n = 2131)} & \tabincell{c}{67 \\ (2.62)} &\tabincell{c}{9.15e-07} & \tabincell{c}{12+1498+23 \\ (0.42)} &\tabincell{c}{1.72e-08 } &\tabincell{c}{\textcolor{red}{100+18168+1156} \\ \textcolor{red}{(35.88)}} &\tabincell{c}{\textcolor{red}{6.64e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 30 lp\_pilot\_ja \\ (n = 3207)} & \tabincell{c}{78 \\ (7.43)} &\tabincell{c}{7.94e-07} & \tabincell{c}{14+2468+23 \\ (1.81)} &\tabincell{c}{3.29e-07 } &\tabincell{c}{\textcolor{red}{100+171048+1900} \\ \textcolor{red}{(140.47)}} &\tabincell{c}{\textcolor{red}{3.92e+04} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 31 lp\_qap8 \\ (n = 2544)} & \tabincell{c}{43 \\ (4.78)} &\tabincell{c}{8.80e-07} & \tabincell{c}{9+2413+11 \\ (1.31)} &\tabincell{c}{1.83e-11 } & \tabincell{c}{6+50530+477 \\ (27.30)} &\tabincell{c}{2.13e-14 } \\ \hline \tabincell{c}{Exam. 32 lp\_lp\_recipe \\ (n = 295)} & \tabincell{c}{53 \\ (0.04)} &\tabincell{c}{7.81e-07} & \tabincell{c}{8+103+11 \\ (0.00)} &\tabincell{c}{7.33e-12 } & \tabincell{c}{1+1+4 \\ (0.02)} &\tabincell{c}{5.12e-13} \\ \hline \tabincell{c}{Exam. 33 lp\_sc50a \\ (n = 128)} & \tabincell{c}{40 \\ (0.01)} &\tabincell{c}{8.30e-07} & \tabincell{c}{3+59+6 \\ (0.05)} &\tabincell{c}{9.21e-07 } & \tabincell{c}{1+1+3 \\ (0.03)} &\tabincell{c}{1.33e-15} \\ \hline \tabincell{c}{Exam. 34 lp\_scagr7 \\ (n = 314)} & \tabincell{c}{42 \\ (0.03)} &\tabincell{c}{6.28e-07} & \tabincell{c}{5+110+8 \\ (0.02)} &\tabincell{c}{1.41e-10 } & \tabincell{c}{1+1+3 \\ (0.00)} &\tabincell{c}{1.07e-14} \\ \hline \tabincell{c}{Exam. 35 lp\_scagr25 \\ (n = 1142)} & \tabincell{c}{42 \\ (0.08)} &\tabincell{c}{6.30e-07} & \tabincell{c}{4+205+13 \\ (0.02)} &\tabincell{c}{4.24e-07 } & \tabincell{c}{1+1+6 \\ (0.05)} &\tabincell{c}{1.07e-14} \\ \hline \tabincell{c}{Exam. 36 lp\_scfxm1 \\ (n = 930)} & \tabincell{c}{52 \\ (0.51)} &\tabincell{c}{9.05e-07} & \tabincell{c}{12+721+15 \\ (0.11)} &\tabincell{c}{7.23e-10 } & \tabincell{c}{1+1+7 \\ (0.06)} &\tabincell{c}{2.62e-12} \\ \hline \tabincell{c}{Exam. 37 lp\_scfxm2 \\ (n = 1860)} & \tabincell{c}{52 \\ (0.61)} &\tabincell{c}{8.84e-07} & \tabincell{c}{14+1357+18 \\ (0.14)} &\tabincell{c}{3.36e-09 } & \tabincell{c}{1+1+11 \\ (0.24)} &\tabincell{c}{1.00e-11} \\ \hline \tabincell{c}{Exam. 38 lp\_scfxm3 \\ (n = 2790)} & \tabincell{c}{53 \\ (1.14)} &\tabincell{c}{6.17e-07} & \tabincell{c}{14+2110+17 \\ (0.27)} &\tabincell{c}{4.96e-07 } & \tabincell{c}{1+200+14 \\ (0.70)} &\tabincell{c}{7.38e-12} \\ \hline \tabincell{c}{Exam. 39 lp\_scorpion \\ (n = 854)} & \tabincell{c}{40 \\ (0.08)} &\tabincell{c}{8.40e-07} & \tabincell{c}{5+111+15 \\ (0.03)} &\tabincell{c}{1.35e-07 } & \tabincell{c}{1+1+5 \\ (0.03)} &\tabincell{c}{6.22e-15} \\ \hline \tabincell{c}{Exam. 40 lp\_shell \\ (n = 2313)} & \tabincell{c}{45 \\ (0.40)} &\tabincell{c}{6.34e-07} & \tabincell{c}{9+2448+11 \\ (0.38)} &\tabincell{c}{5.86e-10 } & \tabincell{c}{1+1+9 \\ (0.24)} &\tabincell{c}{0.00e-00} \\ \hline \tabincell{c}{Exam. 41 lp\_ship04l \\ (n = 2568)} & \tabincell{c}{50 \\ (1.06)} &\tabincell{c}{7.03e-07} & \tabincell{c}{11+3359+14 \\ (0.77)} &\tabincell{c}{4.05e-07 } & \tabincell{c}{1+1+8\\ (0.23)} &\tabincell{c}{1.81e-14} \\ \hline \tabincell{c}{Exam. 42 lp\_ship04s \\ (n = 1908)} & \tabincell{c}{49 \\ (0.57)} &\tabincell{c}{9.60e-07} & \tabincell{c}{12+2376+15 \\ (0.24)} &\tabincell{c}{4.04e-07 } & \tabincell{c}{1+1+6 \\ (0.09)} &\tabincell{c}{1.81e-14} \\ \hline \tabincell{c}{Exam. 43 lp\_ship08s \\ (n = 3245)} & \tabincell{c}{47 \\ (0.91)} &\tabincell{c}{5.31e-07} & \tabincell{c}{12+3328+15 \\ (0.44)} &\tabincell{c}{9.32e-07 } & \tabincell{c}{1+1+9 \\ (0.41)} &\tabincell{c}{5.68e-14} \\ \hline \tabincell{c}{Exam. 44 lp\_ship12s \\ (n = 4020)} & \tabincell{c}{48 \\ (0.91)} &\tabincell{c}{2.68e-07} & \tabincell{c}{13+3446+16 \\ (0.52)} &\tabincell{c}{3.14e-07 } & \tabincell{c}{1+1+9 \\ (0.53)} &\tabincell{c}{4.17e-14} \\ \hline \tabincell{c}{Exam. 45 lp\_sierra \\ (n = 3962)} & \tabincell{c}{63 \\ (1.35)} &\tabincell{c}{6.79e-07} & \tabincell{c}{12+2135+23 \\ (0.53)} &\tabincell{c}{7.90e-07 } & \tabincell{c}{1+1+11 \\ (0.72)} &\tabincell{c}{4.37e-11} \\ \hline \tabincell{c}{Exam. 46 lp\_standgub \\ (n = 1744)} & \tabincell{c}{60 \\ (0.70)} &\tabincell{c}{9.29e-07} & \tabincell{c}{4+99+12 \\ (0.02)} &\tabincell{c}{2.88e-12 } & \tabincell{c}{1+1+7 \\ (0.09)} &\tabincell{c}{1.16e-13} \\ \hline \tabincell{c}{Exam. 47 lp\_tuff \\ (n = 961)} & \tabincell{c}{61 \\ (0.69)} &\tabincell{c}{3.85e-07} & \tabincell{c}{9+828+13 \\ (0.13)} &\tabincell{c}{4.81e-07 } & \tabincell{c}{1+1+10 \\ (0.11)} &\tabincell{c}{3.82e-11} \\ \hline \tabincell{c}{Exam. 48 lp\_wood1p \\ (n = 2839)} & \tabincell{c}{53 \\ (6.00)} &\tabincell{c}{7.48e-07} & \tabincell{c}{7+1285+12 \\ (19.56)} &\tabincell{c}{9.65e-10 } & \tabincell{c}{1+3237+19 \\ (1.36)} &\tabincell{c}{5.09e-09} \\ \hline \tabincell{c}{Exam. 49 lpi\_box1 \\ (n = 492)} & \tabincell{c}{39 \\ (0.03)} &\tabincell{c}{6.17e-07} & \tabincell{c}{7+290+9 \\ (0.02)} &\tabincell{c}{7.28e-07 } & \tabincell{c}{1+279+3 \\ (0.02)} &\tabincell{c}{1.11e-16} \\ \hline \tabincell{c}{Exam. 50 lpi\_cplex2 \\ (n = 602)} & \tabincell{c}{41 \\ (0.07)} &\tabincell{c}{7.33e-07} & \tabincell{c}{9+422+11 \\ (0.03)} &\tabincell{c}{1.45e-09 } & \tabincell{c}{1+1+4 \\ (0.03)} &\tabincell{c}{4.44e-15} \\ \hline \tabincell{c}{Exam. 51 lpi\_ex72a \\ (n = 412)} & \tabincell{c}{40 \\ (0.03)} &\tabincell{c}{6.16e-07} & \tabincell{c}{7+164+9 \\ (0.00)} &\tabincell{c}{1.24e-12 } & \tabincell{c}{1+1+3 \\ (0.02)} &\tabincell{c}{0.00e-00} \\ \hline \tabincell{c}{Exam. 52 lpi\_ex73a \\ (n = 404)} & \tabincell{c}{40 \\ (0.03)} &\tabincell{c}{7.56e-07} & \tabincell{c}{6+125+8 \\ (0.00)} &\tabincell{c}{6.75e-10 } & \tabincell{c}{1+1+3 \\ (0.02)} &\tabincell{c}{0.00e-00} \\ \hline \tabincell{c}{Exam. 53 lpi\_mondou2 \\ (n = 916)} & \tabincell{c}{38 \\ (0.09)} &\tabincell{c}{6.85e-07} & \tabincell{c}{6+555+8 \\ (0.08)} &\tabincell{c}{5.02e-07 } & \tabincell{c}{1+1+5 \\ (0.03)} &\tabincell{c}{0.00e-00} \\ \hline \end{tabular}}\end{table} \begin{table}[htbp] \newcommand{\tabincell}[2]{\begin{tabular}{@{}#1@{}}#2\end{tabular}} \scriptsize \centering \fontsize{8}{8}\selectfont \caption{Numerical results of large-scale sparse LCPs (no. 54-78).} \label{TABSLCP3} \resizebox{\textwidth}{!}{ \begin{tabular}{|c|c|c|c|c|c|c|c|c|c|} \hline \multirow{2}{*}{Problems} & \multicolumn{2}{c|}{RPFMTr} & \multicolumn{2}{c|}{PATH} & \multicolumn{2}{c|}{MILES} \\ \cline{2-7} \tabincell{c}{(n*n)} & \tabincell{c}{steps \\(time)} & \tabincell{c}{Terr\\} & \tabincell{c}{major+minor+grad \\(time)} & \tabincell{c}{Terr\\} & \tabincell{c}{major+pivots+refactor \\(time)} & \tabincell{c}{Terr\\} \\ \hline \tabincell{c}{Exam. 54 lp\_80bau3b \\ (n = 14323)} & \tabincell{c}{49 \\ (7.56)} &\tabincell{c}{6.82e-07} & \tabincell{c}{9+858+23 \\ (0.30)} &\tabincell{c}{1.52e-10} &\tabincell{c}{\textcolor{red}{41+101114+1107} \\ \textcolor{red}{(1015.16)}} &\tabincell{c}{\textcolor{red}{6.19e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 55 lp\_bnl2 \\ (n = 6810)} & \tabincell{c}{47 \\ (4.83)} &\tabincell{c}{7.01e-07} & \tabincell{c}{12+4597+17 \\ (1.47)} &\tabincell{c}{2.72e-11 } & \tabincell{c}{1+1+27 \\ (6.84)} &\tabincell{c}{1.24e-12} \\ \hline \tabincell{c}{Exam. 56 lp\_cre\_a \\ (n = 10764)} & \tabincell{c}{53 \\ (5.14)} &\tabincell{c}{8.39e-07} & \tabincell{c}{12+5755+17 \\ (2.22)} &\tabincell{c}{4.09e-07 } & \tabincell{c}{1+1+42 \\ (22.98)} &\tabincell{c}{1.48e-12} \\ \hline \tabincell{c}{Exam. 57 lp\_cre\_c \\ (n = 9479)} & \tabincell{c}{50 \\ (4.50)} &\tabincell{c}{8.58e-07} & \tabincell{c}{11+3604+14 \\ (1.34)} &\tabincell{c}{1.77e-09 } & \tabincell{c}{1+490+40 \\ (18.56)} &\tabincell{c}{1.59e-12} \\ \hline \tabincell{c}{Exam. 58 lp\_cycle\\ (n = 5274)} & \tabincell{c}{59 \\ (5.21)} &\tabincell{c}{4.59e-07} & \tabincell{c}{17+4532+24 \\ (2.58)} &\tabincell{c}{8.67e-09 } &\tabincell{c}{\textcolor{red}{100+197608+2800} \\ \textcolor{red}{(461.45)}} &\tabincell{c}{\textcolor{red}{7.15e+03} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 59 lp\_d6cube \\ (n = 6599)} & \tabincell{c}{56 \\ (48.11)} &\tabincell{c}{2.56e-07} & \tabincell{c}{5+941+19 \\ (0.53)} &\tabincell{c}{7.37e-07 } & \tabincell{c}{1+6085+39 \\ (10.75)} &\tabincell{c}{2.73e-12} \\ \hline \tabincell{c}{Exam. 60 lp\_fit2d \\ (n = 10549)} & \tabincell{c}{69 \\ (68.27)} &\tabincell{c}{6.72e-07} & \tabincell{c}{7+2114+10 \\ (21.23)} &\tabincell{c}{1.53e-09 } & \tabincell{c}{1+1+3 \\ (0.72)} &\tabincell{c}{3.49e-10} \\ \hline \tabincell{c}{Exam. 61 lp\_greenbea \\ (n = 7990)} & \tabincell{c}{50 \\ (13.68)} &\tabincell{c}{7.29e-07} & \tabincell{c}{12+3899+28 \\ (2.91)} &\tabincell{c}{1.01e-09 } &\tabincell{c}{\textcolor{red}{100+74070+1754} \\ \textcolor{red}{(513.08)}} &\tabincell{c}{\textcolor{red}{9.90e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 62 lp\_greenbeb \\ (n = 7990)} & \tabincell{c}{50 \\ (14.48)} &\tabincell{c}{7.29e-07} & \tabincell{c}{12+3899+28 \\ (3.19)} &\tabincell{c}{1.01e-09 } &\tabincell{c}{\textcolor{red}{100+74070+1754} \\ \textcolor{red}{(514.13)}} &\tabincell{c}{\textcolor{red}{9.90e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 63 lp\_ken\_07 \\ (n = 6028)} & \tabincell{c}{42 \\ (1.09)} &\tabincell{c}{6.24e-07} & \tabincell{c}{8+4004+11 \\ (0.98)} &\tabincell{c}{9.71e-09 } & \tabincell{c}{1+1+21 \\ (3.31)} &\tabincell{c}{0.00e-00} \\ \hline \tabincell{c}{Exam. 64 lp\_pds\_02 \\ (n = 10669)} & \tabincell{c}{38 \\ (2.66)} &\tabincell{c}{9.65e-07} & \tabincell{c}{5+3264+26 \\ (3.27)} &\tabincell{c}{7.09e-09 } & \tabincell{c}{1+1+47 \\ (24.70)} &\tabincell{c}{0.00e-00} \\ \hline \tabincell{c}{Exam. 65 lp\_pilot87 \\ (n = 8710)} & \tabincell{c}{59 \\ (59.97)} &\tabincell{c}{7.96e-07} & \tabincell{c}{10+3002+21 \\ (2.36)} &\tabincell{c}{6.47e-07 } &\tabincell{c}{\textcolor{red}{100+121000+1500} \\ \textcolor{red}{(498.08)}} &\tabincell{c}{\textcolor{red}{1.00e+03} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 66 lp\_qap12 \\ (n = 12048)} & \tabincell{c}{45 \\ (223.00)} &\tabincell{c}{6.25e-07} & \tabincell{c}{7+9138+9 \\ (205.47)} &\tabincell{c}{8.54e-07 } &\tabincell{c}{\textcolor{red}{12+145940+1427} \\ \textcolor{red}{(1064.42)}} &\tabincell{c}{\textcolor{red}{1.07e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 67 lp\_ship08l \\ (n = 5141)} & \tabincell{c}{48 \\ (1.87)} &\tabincell{c}{5.86e-07} & \tabincell{c}{10+1676+21 \\ (0.80)} &\tabincell{c}{5.38e-07 } & \tabincell{c}{1+1+12 \\ (1.31)} &\tabincell{c}{5.68e-14} \\ \hline \tabincell{c}{Exam. 68 lp\_ship12l \\ (n = 6684)} & \tabincell{c}{47 \\ (1.88)} &\tabincell{c}{8.51e-07} & \tabincell{c}{14+8950+17 \\ (4.24)} &\tabincell{c}{1.22e-10 } & \tabincell{c}{1+1+16 \\ (3.03)} &\tabincell{c}{4.17e-14} \\ \hline \tabincell{c}{Exam. 69 lp\_truss \\ (n = 9806)} & \tabincell{c}{44 \\ (4.72)} &\tabincell{c}{6.22e-07} & \tabincell{c}{6+5607+9 \\ (6.92)} &\tabincell{c}{4.99e-08 } &\tabincell{c}{\textcolor{red}{98+299152+2379} \\ \textcolor{red}{(1011.18)}} &\tabincell{c}{\textcolor{red}{1.67e+00} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 70 lp\_woodw \\ (n = 9516)} & \tabincell{c}{86 \\ (201.33)} &\tabincell{c}{6.31e-07} & \tabincell{c}{10+4715+19 \\ (9.89)} &\tabincell{c}{1.78e-09 } &\tabincell{c}{\textcolor{red}{100+232954+1904} \\ \textcolor{red}{(735.08)}} &\tabincell{c}{\textcolor{red}{2.00e+05} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 71 lpi\_bgindy \\ (n = 13551)} & \tabincell{c}{44 \\ (50.77)} &\tabincell{c}{8.49e-07} & \tabincell{c}{4+614+25 \\ (1.28)} &\tabincell{c}{1.20e-07 } & \tabincell{c}{1+1+39 \\ (34.14)} &\tabincell{c}{9.06e-08} \\ \hline \tabincell{c}{Exam. 72 lpi\_gran \\ (n = 5183)} & \tabincell{c}{81 \\ (7.50)} &\tabincell{c}{9.94e-07} & \tabincell{c}{15+1988+28 \\ (1.27)} &\tabincell{c}{8.14e-07 } &\tabincell{c}{\textcolor{red}{100+70036+700} \\ \textcolor{red}{(74.33)}} &\tabincell{c}{\textcolor{red}{5.68e+03} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 73 lpi\_greenbea \\ (n = 7989)} & \tabincell{c}{50 \\ (11.91)} &\tabincell{c}{9.94e-07} & \tabincell{c}{11+3173+29 \\ (2.94)} &\tabincell{c}{4.68e-07 } &\tabincell{c}{\textcolor{red}{100+303396+2731} \\ \textcolor{red}{(921.31)}} &\tabincell{c}{\textcolor{red}{6.72e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 74 lp\_ken\_11 \\ (n = 36043)} & \tabincell{c}{43 \\ (16.37)} &\tabincell{c}{6.95e-07} & \tabincell{c}{9+23833+12 \\ (172.78)} &\tabincell{c}{2.65e-08 } & \tabincell{c}{1+1+142 \\ (1081.75)} &\tabincell{c}{0.00e-00} \\ \hline \tabincell{c}{Exam. 75 lp\_osa\_07 \\ (n = 26185)} & \tabincell{c}{45 \\ (451.93)} &\tabincell{c}{6.55e-07} & \tabincell{c}{4+954+17 \\ (2.64)} &\tabincell{c}{1.77e-11 } & \tabincell{c}{1+1+23 \\ (78.39)} &\tabincell{c}{3.87e-09} \\ \hline \tabincell{c}{Exam. 76 lp\_pds\_06 \\ (n = 39232)} & \tabincell{c}{38 \\ (115.16)} &\tabincell{c}{9.02e-07} & \tabincell{c}{5+6809+37 \\ (43.47)} &\tabincell{c}{9.00e-11 } & \tabincell{c}{1+1+240 \\ (1885.67)} &\tabincell{c}{0.00e-00} \\ \hline \tabincell{c}{Exam. 77 lp\_qap15 \\ (n = 28605)} & \tabincell{c}{46 \\ (824.18)} &\tabincell{c}{6.34e-07} &\tabincell{c}{\textcolor{red}{5+20058+8} \\ \textcolor{red}{(18004.75)}} &\tabincell{c}{\textcolor{red}{2.11e+01} \\ \textcolor{red}{(failed)}} &\tabincell{c}{\textcolor{red}{3+39920+342} \\ \textcolor{red}{(1410.55)}} &\tabincell{c}{\textcolor{red}{1.20e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 78 lp\_stocfor3 \\ (n = 40216)} & \tabincell{c}{54 \\ (11.08)} &\tabincell{c}{8.52e-07} & \tabincell{c}{21+1623+50 \\ (5.69)} &\tabincell{c}{2.78e-12 } &\tabincell{c}{\textcolor{red}{2+4776+135} \\ \textcolor{red}{(1169.09)}} &\tabincell{c}{\textcolor{red}{5.14e+01} \\ \textcolor{red}{(failed)}} \\ \hline \end{tabular}}\end{table} \vskip 2mm \begin{figure}[!htbp] \centering \includegraphics[width=0.80\textwidth]{iteration_RPFMTr_SLCP.pdf} \caption{Iterations of RPFMTr, \,PATH and MILES for sparse LCPs.} \label{fig:ITSLCP} \end{figure} \vskip 2mm \begin{figure}[!htbp] \centering \includegraphics[width=0.80\textwidth]{CPU_RPFMTr_SLCP.pdf} \caption{CPU time of RPFMTr, \,PATH and MILES for sparse LCPs.} \label{fig:CPUSLCP} \end{figure} \vskip 2mm \newpage \begin{table}[htbp] \newcommand{\tabincell}[2]{\begin{tabular}{@{}#1@{}}#2\end{tabular}} \scriptsize \centering \fontsize{8}{8}\selectfont \caption{Numerical results of small-scale dense LCPs (no. 1-27).} \label{TABDLCP1} \resizebox{\textwidth}{!}{ \begin{tabular}{|c|c|c|c|c|c|c|c|c|c|} \hline \multirow{2}{*}{Problems} & \multicolumn{2}{c|}{RPFMTr} & \multicolumn{2}{c|}{PATH} & \multicolumn{2}{c|}{MILES} \\ \cline{2-7} \tabincell{c}{(n*n)} & \tabincell{c}{steps \\(time)} & \tabincell{c}{Terr\\} & \tabincell{c}{major+minor+grad \\(time)} & \tabincell{c}{Terr\\} & \tabincell{c}{major+pivots+refactor \\(time)} & \tabincell{c}{Terr\\} \\ \hline \tabincell{c}{Exam. 1 lp\_25fv47 \\ (n = 2697)} & \tabincell{c}{56 \\ (16.83)} &\tabincell{c}{5.85e-07} & \tabincell{c}{12+3457+22 \\ (200.42)} &\tabincell{c}{2.78e-09} &\tabincell{c}{\textcolor{red}{100+61000+600} \\ \textcolor{red}{(132.30)}} &\tabincell{c}{\textcolor{red}{3.56e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 2 lp\_adlittle \\ (n = 194)} & \tabincell{c}{43 \\ (0.26)} &\tabincell{c}{7.90e-07} & \tabincell{c}{8+203+10 \\ (0.03)} &\tabincell{c}{1.33e-11 } & \tabincell{c}{1+327+3 \\ (0.02)} &\tabincell{c}{1.38e-12} \\ \hline \tabincell{c}{Exam. 3 lp\_afiro \\ (n = 78)} & \tabincell{c}{40 \\ (0.03)} &\tabincell{c}{7.49e-07} & \tabincell{c}{9+73+11 \\ (0.03)} &\tabincell{c}{2.58e-11 } & \tabincell{c}{1+77+2 \\ (0.08)} &\tabincell{c}{2.22e-14} \\ \hline \tabincell{c}{Exam. 4 lp\_agg \\ (n = 1103)} & \tabincell{c}{53 \\ (2.78)} &\tabincell{c}{7.48e-07} & \tabincell{c}{12+1284+17 \\ (5.20)} &\tabincell{c}{5.24e-08 } &\tabincell{c}{\textcolor{red}{100+26000+600} \\ \textcolor{red}{(47.64)}} &\tabincell{c}{\textcolor{red}{1.43e+00} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 5 lp\_agg2 \\ (n = 1274)} & \tabincell{c}{54 \\ (2.80)} &\tabincell{c}{7.30e-07} & \tabincell{c}{10+1071+13 \\ (4.61)} &\tabincell{c}{5.44e-07 } & \tabincell{c}{1+132+8 \\ (2.30)} &\tabincell{c}{3.58e-09} \\ \hline \tabincell{c}{Exam. 6 lp\_agg3 \\ (n = 1274)} & \tabincell{c}{44 \\ (1.64)} &\tabincell{c}{7.03e-07} & \tabincell{c}{10+1053+13 \\ (4.58)} &\tabincell{c}{1.51e-07 } & \tabincell{c}{1+1335+8 \\ (2.25)} &\tabincell{c}{3.42e-11} \\ \hline \tabincell{c}{Exam. 7 lp\_bandm \\ (n = 777)} & \tabincell{c}{51 \\ (0.78)} &\tabincell{c}{9.17e-07} & \tabincell{c}{14+1075+18 \\ (2.80)} &\tabincell{c}{8.96e-11 } &\tabincell{c}{\textcolor{red}{100+23867+901} \\ \textcolor{red}{(37.73)}} &\tabincell{c}{\textcolor{red}{2.74e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 8 lp\_beaconfd \\ (n = 468)} & \tabincell{c}{51 \\ (0.25)} &\tabincell{c}{8.57e-07} & \tabincell{c}{14+646+17 \\ (0.45)} &\tabincell{c}{9.99e-07 } &\tabincell{c}{\textcolor{red}{100+25300+500} \\ \textcolor{red}{(4.72)}} &\tabincell{c}{\textcolor{red}{1.76e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 9 lp\_blend \\ (n = 188)} & \tabincell{c}{44 \\ (0.03)} &\tabincell{c}{5.39e-07} & \tabincell{c}{13+258+15 \\ (0.03)} &\tabincell{c}{4.73e-08 } & \tabincell{c}{1+547+4 \\ (0.03)} &\tabincell{c}{1.83e-09} \\ \hline \tabincell{c}{Exam. 10 lp\_bnl1 \\ (n = 2229)} & \tabincell{c}{52 \\ (9.88)} &\tabincell{c}{6.71e-07} & \tabincell{c}{13+3083+17 \\ (56.92)} &\tabincell{c}{1.65e-09 } &\tabincell{c}{\textcolor{red}{100+26600+800} \\ \textcolor{red}{(121.78)}} &\tabincell{c}{\textcolor{red}{7.79e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 11 lp\_bore3d \\ (n = 567)} & \tabincell{c}{50 \\ (0.40)} &\tabincell{c}{9.26e-07} & \tabincell{c}{13+745+18 \\ (0.81)} &\tabincell{c}{8.51e-08 } & \tabincell{c}{2+1525+12 \\ (0.38)} &\tabincell{c}{1.05e-11} \\ \hline \tabincell{c}{Exam. 12 lp\_brandy \\ (n = 523)} & \tabincell{c}{53 \\ (0.34)} &\tabincell{c}{5.13e-07} & \tabincell{c}{13+697+16 \\ (0.53)} &\tabincell{c}{2.73e-07 } &\tabincell{c}{\textcolor{red}{100+71600+600} \\ \textcolor{red}{(11.34)}} &\tabincell{c}{\textcolor{red}{2.72e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 13 lp\_capri \\ (n = 753)} & \tabincell{c}{52 \\ (0.75)} &\tabincell{c}{5.41e-07} & \tabincell{c}{10+761+19 \\ (1.45)} &\tabincell{c}{2.37e-09 } &\tabincell{c}{\textcolor{red}{100+79200+800} \\ \textcolor{red}{(37.36)}} &\tabincell{c}{\textcolor{red}{2.10e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 14 lp\_czprob \\ (n = 4491)} & \tabincell{c}{44 \\ (42.87)} &\tabincell{c}{8.31e-07} & \tabincell{c}{9+3102+16 \\ (650.25)} &\tabincell{c}{3.36e-07 } &\tabincell{c}{\textcolor{red}{100+6200+500} \\ \textcolor{red}{(120.50)}} &\tabincell{c}{\textcolor{red}{5.33e+00} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 15 lp\_degen2 \\ (n = 1201)} & \tabincell{c}{49 \\ (2.32)} &\tabincell{c}{7.63e-07} & \tabincell{c}{12+1582+14 \\ (7.38)} &\tabincell{c}{2.83e-07 } & \tabincell{c}{3+566+16 \\ (2.25)} &\tabincell{c}{2.47e-09} \\ \hline \tabincell{c}{Exam. 16 lp\_degen3 \\ (n = 4107)} & \tabincell{c}{45 \\ (35.17)} &\tabincell{c}{6.85e-07} & \tabincell{c}{9+4003+20 \\ (388.39)} &\tabincell{c}{9.91e-07 } &\tabincell{c}{\textcolor{red}{100+66800+600} \\ \textcolor{red}{(129.42)}} &\tabincell{c}{\textcolor{red}{9.50e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 17 lp\_e226 \\ (n = 695)} & \tabincell{c}{55 \\ (0.65)} &\tabincell{c}{6.15e-07} & \tabincell{c}{14+953+17 \\ (1.14)} &\tabincell{c}{1.45e-10 } &\tabincell{c}{\textcolor{red}{100+169800+1100} \\ \textcolor{red}{(34.97)}} &\tabincell{c}{\textcolor{red}{3.32e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 18 lp\_etamacro \\ (n = 1216)} & \tabincell{c}{51 \\ (2.46)} &\tabincell{c}{8.41e-07} & \tabincell{c}{7+640+18 \\ (6.08)} &\tabincell{c}{5.85e-07 } & \tabincell{c}{2+120+9 \\ (1.17)} &\tabincell{c}{1.36e-08} \\ \hline \tabincell{c}{Exam. 19 lp\_fffff800 \\ (n = 1552)} & \tabincell{c}{70 \\ (5.76)} &\tabincell{c}{6.93e-07} & \tabincell{c}{14+2172+21 \\ (15.77)} &\tabincell{c}{2.09e-08 } &\tabincell{c}{\textcolor{red}{100+99000+700} \\ \textcolor{red}{(58.69)}} &\tabincell{c}{\textcolor{red}{1.09e+05} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 20 lp\_finnis \\ (n = 1561)} & \tabincell{c}{46 \\ (3.87)} &\tabincell{c}{9.59e-07} & \tabincell{c}{10+1583+18 \\ (14.41)} &\tabincell{c}{1.92e-07 } &\tabincell{c}{\textcolor{red}{100+24400+500} \\ \textcolor{red}{(28.81)}} &\tabincell{c}{\textcolor{red}{5.84e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 21 lp\_fit1d \\ (n = 1073)} & \tabincell{c}{54 \\ (2.09)} &\tabincell{c}{8.83e-07} & \tabincell{c}{8+681+13 \\ (0.53)} &\tabincell{c}{3.46e-10 } & \tabincell{c}{1+152+4 \\ (0.06)} &\tabincell{c}{5.64e-09} \\ \hline \tabincell{c}{Exam. 22 lp\_ganges \\ (n = 3015)} & \tabincell{c}{40 \\ (14.89)} &\tabincell{c}{5.23e-07} & \tabincell{c}{8+1910+28 \\ (111.67)} &\tabincell{c}{6.15e-08 } &\tabincell{c}{\textcolor{red}{82+104468+1476} \\ \textcolor{red}{(1003.45)}} &\tabincell{c}{\textcolor{red}{9.53e+00} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 23 lp\_gfrd\_pnc \\ (n = 1776)} & \tabincell{c}{64 \\ (7.15)} &\tabincell{c}{6.02e-07} & \tabincell{c}{14+1694+20 \\ (40.06)} &\tabincell{c}{2.69e-09 } &\tabincell{c}{\textcolor{red}{100+6800+300} \\ \textcolor{red}{(14.97)}} &\tabincell{c}{\textcolor{red}{2.16e+03} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 24 lp\_grow15 \\ (n = 945)} & \tabincell{c}{42 \\ (1.17)} &\tabincell{c}{7.34e-07} & \tabincell{c}{8+856+10 \\ (2.91)} &\tabincell{c}{9.04e-13 } & \tabincell{c}{1+1+11 \\ (1.02)} &\tabincell{c}{1.44e-09} \\ \hline \tabincell{c}{Exam. 25 lp\_grow22 \\ (n = 1386)} & \tabincell{c}{41 \\ (2.65)} &\tabincell{c}{9.94e-07} & \tabincell{c}{9+1535+11 \\ (12.94)} &\tabincell{c}{4.57e-08 } &\tabincell{c}{\textcolor{red}{100+2600+400} \\ \textcolor{red}{(18.05)}} &\tabincell{c}{\textcolor{red}{1.68e+00} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 26 lp\_lotfi \\ (n = 519)} & \tabincell{c}{46 \\ (0.29)} &\tabincell{c}{6.47e-07} & \tabincell{c}{11+408+18 \\ (0.48)} &\tabincell{c}{3.33e-11 } &\tabincell{c}{\textcolor{red}{100+47400+500} \\ \textcolor{red}{(13.56)}} &\tabincell{c}{\textcolor{red}{4.44e-02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 27 lp\_maros \\ (n = 2812)} & \tabincell{c}{69 \\ (21.44)} &\tabincell{c}{8.93e-07} & \tabincell{c}{15+4178+17 \\ (193.91)} &\tabincell{c}{1.04e-08 } &\tabincell{c}{\textcolor{red}{100+14400+300} \\ \textcolor{red}{(35.28)}} &\tabincell{c}{\textcolor{red}{2.35e+04} \\ \textcolor{red}{(failed)}} \\ \hline \end{tabular}}\end{table} \begin{table}[htbp] \newcommand{\tabincell}[2]{\begin{tabular}{@{}#1@{}}#2\end{tabular}} \scriptsize \centering \fontsize{8}{8}\selectfont \caption{Numerical results of small-scale dense LCPs (no. 28-53).} \label{TABDLCP2} \resizebox{\textwidth}{!}{ \begin{tabular}{|c|c|c|c|c|c|c|c|c|c|} \hline \multirow{2}{*}{Problems} & \multicolumn{2}{c|}{RPFMTr} & \multicolumn{2}{c|}{PATH} & \multicolumn{2}{c|}{MILES} \\ \cline{2-7} \tabincell{c}{(n*n)} & \tabincell{c}{steps \\(time)} & \tabincell{c}{Terr\\} & \tabincell{c}{major+minor+grad \\(time)} & \tabincell{c}{Terr\\} & \tabincell{c}{major+pivots+refactor \\(time)} & \tabincell{c}{Terr\\} \\ \hline \tabincell{c}{Exam. 28 lp\_modszk1 \\ (n = 2307)} & \tabincell{c}{46 \\ (9.37)} &\tabincell{c}{8.17e-07} & \tabincell{c}{13+3281+16 \\ (89.69)} &\tabincell{c}{4.94e-07 } &\tabincell{c}{\textcolor{red}{100+34000+500} \\ \textcolor{red}{(33.89)}} &\tabincell{c}{\textcolor{red}{9.03e+00} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 29 lp\_perold \\ (n = 2131)} & \tabincell{c}{60 \\ (10.21)} &\tabincell{c}{7.57e-07} & \tabincell{c}{16+2738+24 \\ (66.41)} &\tabincell{c}{1.33e-09 } &\tabincell{c}{\textcolor{red}{100+4200+400} \\ \textcolor{red}{(16.06)}} &\tabincell{c}{\textcolor{red}{2.88e+04} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 30 lp\_pilot\_ja \\ (n = 3207)} & \tabincell{c}{76 \\ (32.29)} &\tabincell{c}{9.82e-07} & \tabincell{c}{18+4063+29 \\ (272.11)} &\tabincell{c}{7.42e-07 } &\tabincell{c}{\textcolor{red}{100+71396+779} \\ \textcolor{red}{(233.45)}} &\tabincell{c}{\textcolor{red}{5.77e+06} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 31 lp\_qap8 \\ (n = 2544)} & \tabincell{c}{42 \\ (10.69)} &\tabincell{c}{8.04e-07} & \tabincell{c}{12+3423+14 \\ (116.20)} &\tabincell{c}{1.81e-07 } &\tabincell{c}{\textcolor{red}{100+23000+400} \\ \textcolor{red}{(29.77)}} &\tabincell{c}{\textcolor{red}{1.18e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 32 lp\_lp\_recipe \\ (n = 295)} & \tabincell{c}{53 \\ (0.09)} &\tabincell{c}{7.61e-07} & \tabincell{c}{10+240+15 \\ (0.16)} &\tabincell{c}{6.28e-08 } &\tabincell{c}{\textcolor{red}{100+5144+595} \\ \textcolor{red}{(4.11)}} &\tabincell{c}{\textcolor{red}{1.91e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 33 lp\_sc50a \\ (n = 128)} & \tabincell{c}{42 \\ (0.02)} &\tabincell{c}{7.79e-07} & \tabincell{c}{5+92+7 \\ (0.02)} &\tabincell{c}{2.25e-08 } & \tabincell{c}{1+135+2 \\ (0.02)} &\tabincell{c}{7.11e-14} \\ \hline \tabincell{c}{Exam. 34 lp\_scagr7 \\ (n = 314)} & \tabincell{c}{41 \\ (0.07)} &\tabincell{c}{8.19e-07} & \tabincell{c}{12+391+14 \\ (0.16)} &\tabincell{c}{1.24e-08 } & \tabincell{c}{2+452+8 \\ (0.11)} &\tabincell{c}{1.77e-13} \\ \hline \tabincell{c}{Exam. 35 lp\_scagr25 \\ (n = 1142)} & \tabincell{c}{41 \\ (1.72)} &\tabincell{c}{8.73e-07} & \tabincell{c}{9+648+19 \\ (3.58)} &\tabincell{c}{1.65e-07 } &\tabincell{c}{\textcolor{red}{100+53500+700} \\ \textcolor{red}{(33.66)}} &\tabincell{c}{\textcolor{red}{1.09e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 36 lp\_scfxm1 \\ (n = 930)} & \tabincell{c}{54 \\ (1.37)} &\tabincell{c}{5.12e-07} & \tabincell{c}{12+1260+16 \\ (2.67)} &\tabincell{c}{1.22e-07 } &\tabincell{c}{\textcolor{red}{100+102400+1462} \\ \textcolor{red}{(75.61)}} &\tabincell{c}{\textcolor{red}{2.23e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 37 lp\_scfxm2 \\ (n = 1860)} & \tabincell{c}{58 \\ (7.08)} &\tabincell{c}{8.13e-07} & \tabincell{c}{15+2685+19 \\ (29.86)} &\tabincell{c}{3.10e-10 } &\tabincell{c}{\textcolor{red}{100+104383+902} \\ \textcolor{red}{(94.95)}} &\tabincell{c}{\textcolor{red}{2.35e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 38 lp\_scfxm3 \\ (n = 2790)} & \tabincell{c}{61 \\ (18.68)} &\tabincell{c}{9.23e-07} & \tabincell{c}{13+3864+17 \\ (135.97)} &\tabincell{c}{2.04e-07 } &\tabincell{c}{\textcolor{red}{100+27200+500} \\ \textcolor{red}{(46.27)}} &\tabincell{c}{\textcolor{red}{3.89e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 39 lp\_scorpion \\ (n = 854)} & \tabincell{c}{40 \\ (0.79)} &\tabincell{c}{7.15e-07} & \tabincell{c}{13+1094+15 \\ (2.64)} &\tabincell{c}{2.19-10 } &\tabincell{c}{\textcolor{red}{100+84572+800} \\ \textcolor{red}{(31.31)}} &\tabincell{c}{\textcolor{red}{1.38e+00} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 40 lp\_shell \\ (n = 2313)} & \tabincell{c}{50 \\ (10.06)} &\tabincell{c}{6.00e-07} & \tabincell{c}{13+3371+15 \\ (85.95)} &\tabincell{c}{4.39e-08 } &\tabincell{c}{\textcolor{red}{100+56406+497} \\ \textcolor{red}{(45.75)}} &\tabincell{c}{\textcolor{red}{1.17e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 41 lp\_ship04l \\ (n = 2568)} & \tabincell{c}{42 \\ (10.56)} &\tabincell{c}{5.64e-07} & \tabincell{c}{12+4243+15 \\ (96.83)} &\tabincell{c}{1.66e-09 } &\tabincell{c}{\textcolor{red}{100+31600+500} \\ \textcolor{red}{(61.70)}} &\tabincell{c}{\textcolor{red}{1.31e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 42 lp\_ship04s \\ (n = 1908)} & \tabincell{c}{42 \\ (5.50)} &\tabincell{c}{7.15e-07} & \tabincell{c}{12+3170+15 \\ (32.59)} &\tabincell{c}{4.05e-07 } &\tabincell{c}{\textcolor{red}{100+20000+500} \\ \textcolor{red}{(32.59)}} &\tabincell{c}{\textcolor{red}{5.69e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 43 lp\_ship08s \\ (n = 3245)} & \tabincell{c}{43 \\ (18.97)} &\tabincell{c}{9.32e-07} & \tabincell{c}{12+5907+15 \\ (279.95)} &\tabincell{c}{6.91e-08 } &\tabincell{c}{\textcolor{red}{100+70700+700} \\ \textcolor{red}{(162.72)}} &\tabincell{c}{\textcolor{red}{2.38e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 44 lp\_ship12s \\ (n = 4020)} & \tabincell{c}{42 \\ (30.77)} &\tabincell{c}{6.94e-07} & \tabincell{c}{9+3805+21 \\ (333.53)} &\tabincell{c}{2.36e-10 } &\tabincell{c}{\textcolor{red}{77+205665+1472} \\ \textcolor{red}{(1002.61)}} &\tabincell{c}{\textcolor{red}{1.97e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 45 lp\_sierra \\ (n = 3962)} & \tabincell{c}{47 \\ (33.55)} &\tabincell{c}{8.12e-07} & \tabincell{c}{12+4069+16 \\ (319.80)} &\tabincell{c}{7.15e-09 } &\tabincell{c}{\textcolor{red}{100+5000+300} \\ \textcolor{red}{(35.28)}} &\tabincell{c}{\textcolor{red}{9.99e+04} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 46 lp\_standgub \\ (n = 1744)} & \tabincell{c}{53 \\ (5.65)} &\tabincell{c}{6.12e-07} & \tabincell{c}{12+2064+16 \\ (19.86)} &\tabincell{c}{3.20e-08 } &\tabincell{c}{\textcolor{red}{100+94960+792} \\ \textcolor{red}{(146.17)}} &\tabincell{c}{\textcolor{red}{2.44e+03} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 47 lp\_tuff \\ (n = 961)} & \tabincell{c}{61 \\ (1.72)} &\tabincell{c}{5.46e-07} & \tabincell{c}{15+1404+18 \\ (3.91)} &\tabincell{c}{1.50e-10 } &\tabincell{c}{\textcolor{red}{100+24300+939} \\ \textcolor{red}{(62.39)}} &\tabincell{c}{\textcolor{red}{7.78e+03} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 48 lp\_wood1p \\ (n = 2839)} & \tabincell{c}{64 \\ (20.82)} &\tabincell{c}{6.11e-07} & \tabincell{c}{58+3749+64 \\ (473.97)} &\tabincell{c}{1.45e-09 } &\tabincell{c}{\textcolor{red}{100+2700+300} \\ \textcolor{red}{(10.44)}} &\tabincell{c}{\textcolor{red}{7.27e+05} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 49 lpi\_box1 \\ (n = 492)} & \tabincell{c}{30 \\ (0.20)} &\tabincell{c}{6.72e-07} & \tabincell{c}{13+657+15 \\ (0.56)} &\tabincell{c}{7.62e-08 } & \tabincell{c}{2+2115+24 \\ (0.66)} &\tabincell{c}{2.67e-15} \\ \hline \tabincell{c}{Exam. 50 lpi\_cplex2 \\ (n = 602)} & \tabincell{c}{43 \\ (0.45)} &\tabincell{c}{5.29e-07} & \tabincell{c}{12+627+14 \\ (0.95)} &\tabincell{c}{6.53e-07 } &\tabincell{c}{\textcolor{red}{100+46800+798} \\ \textcolor{red}{(25.13)}} &\tabincell{c}{\textcolor{red}{1.48e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 51 lpi\_ex72a \\ (n = 412)} & \tabincell{c}{34 \\ (0.12)} &\tabincell{c}{5.52e-07} & \tabincell{c}{13+570+15 \\ (0.41)} &\tabincell{c}{5.36e-09 } & \tabincell{c}{1+867+6 \\ (0.14)} &\tabincell{c}{3.13e-14} \\ \hline \tabincell{c}{Exam. 52 lpi\_ex73a \\ (n = 404)} & \tabincell{c}{33 \\ (0.12)} &\tabincell{c}{9.00e-07} & \tabincell{c}{13+552+15 \\ (0.38)} &\tabincell{c}{7.35e-09} &\tabincell{c}{\textcolor{red}{100+6739+600} \\ \textcolor{red}{(9.94)}} &\tabincell{c}{\textcolor{red}{1.50e-03} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 53 lpi\_mondou2 \\ (n = 916)} & \tabincell{c}{38 \\ (0.94)} &\tabincell{c}{5.86e-07} & \tabincell{c}{11+773+13 \\ (2.36)} &\tabincell{c}{6.92e-10 } & \tabincell{c}{2+1151+9 \\ (0.63)} &\tabincell{c}{1.55e-14} \\ \hline \end{tabular}}\end{table} \begin{table}[htbp] \newcommand{\tabincell}[2]{\begin{tabular}{@{}#1@{}}#2\end{tabular}} \scriptsize \centering \fontsize{8}{8}\selectfont \caption{Numerical results of large-scale dense LCPs (no. 54-73).} \label{TABDLCP3} \resizebox{\textwidth}{!}{ \begin{tabular}{|c|c|c|c|c|c|c|c|c|c|} \hline \multirow{2}{*}{Problems} & \multicolumn{2}{c|}{RPFMTr} & \multicolumn{2}{c|}{PATH} & \multicolumn{2}{c|}{MILES} \\ \cline{2-7} \tabincell{c}{(n*n)} & \tabincell{c}{steps \\(time)} & \tabincell{c}{Terr\\} & \tabincell{c}{major+minor+grad \\(time)} & \tabincell{c}{Terr\\} & \tabincell{c}{major+pivots+refactor \\(time)} & \tabincell{c}{Terr\\} \\ \hline \tabincell{c}{Exam. 54 lp\_80bau3b \\ (n = 14323)} & \tabincell{c}{45 \\ (1385.62)} &\tabincell{c}{5.38e-07} &\tabincell{c}{\textcolor{red}{5+8283+8} \\ \textcolor{red}{(18049.72)}} &\tabincell{c}{\textcolor{red}{3.92e+01} \\ \textcolor{red}{(failed)}} &\tabincell{c}{\textcolor{red}{2+8245+54} \\ \textcolor{red}{(1432.94)}} &\tabincell{c}{\textcolor{red}{1.32e+03} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 55 lp\_bnl2 \\ (n = 6810)} & \tabincell{c}{55 \\ (153.40)} &\tabincell{c}{8.21e-07} & \tabincell{c}{11+8765+18 \\ (4549.92)} &\tabincell{c}{3.93e-08 } &\tabincell{c}{\textcolor{red}{43+27624+298} \\ \textcolor{red}{(1009.19)}} &\tabincell{c}{\textcolor{red}{9.22e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 56 lp\_cre\_a \\ (n = 10764)} & \tabincell{c}{61 \\ (583.43)} &\tabincell{c}{6.36e-07} &\tabincell{c}{\textcolor{red}{4+3835+7} \\ \textcolor{red}{(18099.73)}} &\tabincell{c}{\textcolor{red}{7.38e+01} \\ \textcolor{red}{(failed)}} &\tabincell{c}{\textcolor{red}{138+49799+558} \\ \textcolor{red}{(1003.36)}} &\tabincell{c}{\textcolor{red}{1.51e+03} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 57 lp\_cre\_c \\ (n = 9479)} & \tabincell{c}{55 \\ (373.29)} &\tabincell{c}{8.33e-07} &\tabincell{c}{\textcolor{red}{7+8042+10} \\ \textcolor{red}{(18008.97)}} &\tabincell{c}{\textcolor{red}{2.68e+00} \\ \textcolor{red}{(failed)}} &\tabincell{c}{\textcolor{red}{140+28754+599} \\ \textcolor{red}{(1004.88)}} &\tabincell{c}{\textcolor{red}{3.55e+03} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 58 lp\_cycle\\ (n = 5274)} & \tabincell{c}{62 \\ (94.96)} &\tabincell{c}{5.76e-07} & \tabincell{c}{12+7861+17 \\ (2322.98)} &\tabincell{c}{2.77e-07 } &\tabincell{c}{\textcolor{red}{100+15300+400} \\ \textcolor{red}{(148.55)}} &\tabincell{c}{\textcolor{red}{2.07e+03} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 59 lp\_d6cube \\ (n = 6599)} & \tabincell{c}{54 \\ (141.97)} &\tabincell{c}{6.46e-07} & \tabincell{c}{10+6621+16 \\ (725.33)} &\tabincell{c}{5.15e-07 } &\tabincell{c}{\textcolor{red}{80+66400+800} \\ \textcolor{red}{(1009.78)}} &\tabincell{c}{\textcolor{red}{2.19e+03} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 60 lp\_fit2d \\ (n = 10549)} & \tabincell{c}{59 \\ (550.98)} &\tabincell{c}{7.06e-07} & \tabincell{c}{8+5337+13 \\ (185.28)} &\tabincell{c}{2.82e-09 } & \tabincell{c}{2+147+4 \\ (1.11)} &\tabincell{c}{3.78e-10} \\ \hline \tabincell{c}{Exam. 61 lp\_greenbea \\ (n = 7990)} & \tabincell{c}{50 \\ (223.83)} &\tabincell{c}{8.66e-07} & \tabincell{c}{12+11911+16 \\ (7877.61)} &\tabincell{c}{5.44e-09 } &\tabincell{c}{\textcolor{red}{100+15200+400} \\ \textcolor{red}{(373.86)}} &\tabincell{c}{\textcolor{red}{1.14e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 62 lp\_greenbeb \\ (n = 7990)} & \tabincell{c}{50 \\ (225.27)} &\tabincell{c}{9.43e-07} & \tabincell{c}{12+12033+16 \\ (10248.31)} &\tabincell{c}{2.46e-08 } &\tabincell{c}{\textcolor{red}{100+17600+500} \\ \textcolor{red}{(575.45)}} &\tabincell{c}{\textcolor{red}{1.14e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 63 lp\_ken\_07 \\ (n = 6028)} & \tabincell{c}{42 \\ (96.95)} &\tabincell{c}{8.68e-07} & \tabincell{c}{8+4721+28 \\ (2922.75)} &\tabincell{c}{1.13e-07 } &\tabincell{c}{\textcolor{red}{56+6944+448} \\ \textcolor{red}{(1008.24)}} &\tabincell{c}{\textcolor{red}{5.01e+01} \\ \textcolor{red}{(failed)}}\\ \hline \tabincell{c}{Exam. 64 lp\_pds\_02 \\ (n = 10669)} & \tabincell{c}{48 \\ (437.80)} &\tabincell{c}{7.48e-07} & \tabincell{c}{8+8486+26 \\ (15408.33)} &\tabincell{c}{1.82e-08 } &\tabincell{c}{\textcolor{red}{100+9000+300} \\ \textcolor{red}{(504.81)}} &\tabincell{c}{\textcolor{red}{1.86e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 65 lp\_pilot87 \\ (n = 8710)} & \tabincell{c}{58 \\ (307.63)} &\tabincell{c}{5.54e-07} & \tabincell{c}{11+8828+23 \\ (7239.50)} &\tabincell{c}{8.07e-10 } &\tabincell{c}{\textcolor{red}{56+48048+560} \\ \textcolor{red}{1014.17)}} &\tabincell{c}{\textcolor{red}{9.99e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 66 lp\_qap12 \\ (n = 12048)} & \tabincell{c}{48 \\ (722.82)} &\tabincell{c}{8.45e-07} &\tabincell{c}{\textcolor{red}{3+5789+6} \\ \textcolor{red}{(18016.27)}} &\tabincell{c}{\textcolor{red}{2.73e+01} \\ \textcolor{red}{(failed)}} &\tabincell{c}{\textcolor{red}{100+23400+400} \\ \textcolor{red}{(771.77)}} &\tabincell{c}{\textcolor{red}{2.14e+01} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 67 lp\_ship08l \\ (n = 5141)} & \tabincell{c}{47 \\ (66.37)} &\tabincell{c}{9.12e-07} & \tabincell{c}{12+9419+15 \\ (1537.80)} &\tabincell{c}{1.03-09} &\tabincell{c}{\textcolor{red}{100+80800+900} \\ \textcolor{red}{(464.31)}} &\tabincell{c}{\textcolor{red}{4.34e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 68 lp\_ship12l \\ (n = 6684)} & \tabincell{c}{47 \\ (124.64)} &\tabincell{c}{7.67e-07} & \tabincell{c}{12+12138+15 \\ (7256.50)} &\tabincell{c}{1.51-07} &\tabincell{c}{\textcolor{red}{29+11948+145} \\ \textcolor{red}{(1001.00)}} &\tabincell{c}{\textcolor{red}{7.53e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 69 lp\_truss \\ (n = 9806)} & \tabincell{c}{45 \\ (339.85)} &\tabincell{c}{6.61e-07} & \tabincell{c}{10+6890+12 \\ (10715.00)} &\tabincell{c}{2.30e-08 } &\tabincell{c}{\textcolor{red}{23+8685+201} \\ \textcolor{red}{(1022.70)}} &\tabincell{c}{\textcolor{red}{8.80e+00} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 70 lp\_woodw \\ (n = 9516)} & \tabincell{c}{71 \\ (494.92)} &\tabincell{c}{7.61e-07} & \tabincell{c}{14+7490+23 \\ (18080.28)} &\tabincell{c}{1.35e-05 } &\tabincell{c}{\textcolor{red}{100+15186+299} \\ \textcolor{red}{(340.14)}} &\tabincell{c}{\textcolor{red}{2.06e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 71 lpi\_bgindy \\ (n = 13551)} & \tabincell{c}{53 \\ (1297.80)} &\tabincell{c}{7.46e-07} &\tabincell{c}{\textcolor{red}{1+663+6} \\ \textcolor{red}{(18181.77)}} &\tabincell{c}{\textcolor{red}{3.12e+02} \\ \textcolor{red}{(failed)}} &\tabincell{c}{\textcolor{red}{9+11664+81} \\ \textcolor{red}{(1025.18)}} &\tabincell{c}{\textcolor{red}{2.08e+02} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 72 lpi\_gran \\ (n = 5183)} & \tabincell{c}{60 \\ (81.64)} &\tabincell{c}{5.32e-07} & \tabincell{c}{14+5542+23 \\ (1450.42)} &\tabincell{c}{1.29e-08 } &\tabincell{c}{\textcolor{red}{500+145000+2500} \\ \textcolor{red}{(719.47)}} &\tabincell{c}{\textcolor{red}{2.357e+03} \\ \textcolor{red}{(failed)}} \\ \hline \tabincell{c}{Exam. 73 lpi\_greenbea \\ (n = 7989)} & \tabincell{c}{50 \\ (211.05)} &\tabincell{c}{5.37e-07} & \tabincell{c}{12+12005+16 \\ (12504.91)} &\tabincell{c}{7.46e-09 } &\tabincell{c}{\textcolor{red}{47+15546+227} \\ \textcolor{red}{(1006.56)}} &\tabincell{c}{\textcolor{red}{1.13e+02} \\ \textcolor{red}{(failed)}} \\ \hline \end{tabular}}\end{table} \newpage \begin{figure}[!htbp] \centering \includegraphics[width=0.80\textwidth]{iteration_RPFMTr_DLCP.pdf} \caption{Iterations of RPFMTr, \,PATH and MILES for dense LCPs.} \label{fig:ITDLCP} \end{figure} \vskip 2mm \begin{figure}[!htbp] \centering \includegraphics[width=0.80\textwidth]{CPU_RPFMTr_DLCP.pdf} \caption{CPU time of RPFMTr, \,PATH and MILES for dense LCPs.} \label{fig:CPUDLCP} \end{figure} \vskip 2mm \newpage \section{Conclusions} \vskip 2mm In this paper, we give the residual regularization path-following method with the trust-region updating strategy (RPFMTr) for the linear complementarity problem. The new residual regularization parameter improves the robustness of the path-following method, in comparison to the traditional complementarity regularization parameter. Meanwhile, we prove the global convergence of the new method under the standard assumptions without the traditional assumption condition of the priority to feasibility over complementarity. Numerical results show that RPFMTr is a robust and efficient solver for the linear complementarity problem, especially for the dense case. Furthermore, it is more robust and faster than some state-of-the-art solvers such as PATH \cite{DF1995,PATH,FM2000,FM2022} and MILES \cite{Mathiesen1985,Rutherford1995,Rutherford2022} (the built-in subroutines of the GAMS v28.2 (2019) environment \cite{GAMS}). The computational time of RPFMTr is about 1/3 to 1/10 of that of PATH for the dense linear complementarity problem. Therefore, RPFMTr is an alternating solver for the linear complementarity problem and worth exploring further for the nonlinear complementarity problem. \section*{Acknowledgments} This work was supported in part by Grant 61876199 from National Natural Science Foundation of China, Grant YBWL2011085 from Huawei Technologies Co., Ltd., and Grant YJCB2011003HI from the Innovation Research Program of Huawei Technologies Co., Ltd.. \vskip 2mm \noindent \textbf{Conflicts of interest / Competing interests:} Not applicable. \vskip 2mm \noindent \textbf{Availability of data and material (data transparency):} If it is requested, we will provide the test data. \vskip 2mm \noindent \textbf{Code availability (software application or custom code):} If it is requested, we will provide the code. \begin{thebibliography}{99} \bibitem{AMPL} AMPL, A Mathematical Programming Language, \url{http://www.ampl.com}, 2022. \bibitem{AG2003} E.~L. Allgower and K. Georg, \emph{Introduction to Numerical Continuation Methods}, SIAM, Philadelphia, 2003. \bibitem{AP1998} U.~M. Ascher and L.~R. Petzold, \emph{Computer Methods for Ordinary Differential Equations and Differential-Algebraic Equations}, SIAM, Philadelphia, 1998. \bibitem{AS2015} O. Axelsson and S. Sysala, \emph{Continuation Newton methods}, Comput. Math. Appl. \textbf{70} (2015), 2621-2637. \bibitem{Branin1972} F.~H. Branin, \emph{Widely convergent method for finding multiple solutions of simultaneous nonlinear equations}, IBM J. Res. Dev. 16 (1972), 504-521. \bibitem{BJ1998} J.~C. Butcher and Z. Jackiewicz, \emph{Construction of high order diagonally implicit multistage integration methods for ordinary differential equations}, Appl. Numer. Math. \textbf{27} (1998), 1-12. \bibitem{BCP1996} K.~E. Brenan, S.~L. Campbell, and L.~R. Petzold, \emph{Numerical solution of initial-value problems in differential-algebraic equations}, SIAM, Philadelphia, 1996. \bibitem{CPS2009} R.~W. Cottle, J.-S. Pang and R.~E. Stone, \emph{The Linear Complementarity Problem}, SIAM, Philadelphia, 2009. \bibitem{CGT2000} A.~R. Conn, N. Gould, and Ph.~L. Toint, \emph{Trust-Region Methods}, SIAM, Philadelphia, 2000. \bibitem{CL2011} M.~T. Chu and M.~M. Lin, \emph{Dynamical system characterization of the central path and its variants- a vevisit}, SIAM J. Appl. Dyn. Syst. \textbf{10} (2011), 887-905. \bibitem{Davidenko1953} D.~F. Davidenko, \emph{On a new method of numerical solution of systems of nonlinear equations} (in Russian), Dokl. Akad. Nauk SSSR \textbf{88} (1953), 601--602. \bibitem{Deuflhard2004} P. Deuflhard, \emph{Newton Methods for Nonlinear Problems: Affine Invariance and Adaptive Algorithms}, Springer, Berlin, 2004. \bibitem{PHP1975} P. Deuflhard, H.~J. Pesch, and P. Rentrop, \emph{A modified continuation method for the numerical solution of nonlinear two-point boundary value problems by shooting techniques}, Numer. Math. \textbf{26} (1975), 327-343. \bibitem{Dirkse1994} S.~P. Dirkse, \emph{Robust Solution of Mixed Complementarity Problems}, PhD thesis, Computer Sciences Department, University of Wisconsin, Madison, Wisconsin, 1994. \bibitem{DF1995} S. Dirkse and M.~C. Ferris, \emph{The PATH solver: A non-monotone stabilization scheme for mixed complementarity problems}, Optim. Methods Softw. \textbf{5} (1995), 123-156. \bibitem{Doedel2007} E.~J. Doedel, \emph{Lecture notes in numerical analysis of nonlinear equations}, in \emph{Numerical Continuation Methods for Dynamical Systems}, B. Krauskopf, H.M. Osinga, and J. Gal\'{a}n-Vioque, eds., Springer, Berlin, 2007, pp. 1-50. \bibitem{Erleben2007} K. Erleben, \emph{Velocity-based shock propagation for multibody dynamics animation}, ACM Trans. Graph. \textbf{26 (2):12} (2007), \url{https://doi.org/10.1145/1243980.1243986.1}. \bibitem{FM2000} M.~C. Ferris and T.~S. Munson, \emph{Complementarity problems in GAMS and the PATH solver}, J. Econ. Dyn. Control \textbf{24} (2000), 165-188. \bibitem{FM2022} M.~C. Ferris and T.~S. Munson, \emph{The manual of PATH}, GAMS Corporation, \url{https://www.gams.com/latest/docs/S_PATH.html}, 2022. \bibitem{Fisher1992} A. Fischer, \emph{A special Newton-type optimization method}, Optimization \textbf{24} (1992), 269-284. \bibitem{GAMS} GAMS v28.2, GAMS Corporation, \url{https://www.gams.com/}, 2019. \bibitem{Gana1982} A. Gana, \emph{Studies in the Complementarity Problem}, Ph.D. dissertation, Department of Industrial and Operations Engineering, University of Michigan, Ann Arbor, MI, 1982. \bibitem{GMSW1991} P. Gill, W. Murray, M.~A. Saunders, M.~H. Wright, \emph{Mamtaining LU factors of a general sparse matrix}, Linear Algebra Appl. \textbf{88-89} (1991), 239-270. \bibitem{GV2013} G.~H. Golub and C.~F. Van~Loan, \emph{Matrix Computation}, 4th ed., The John Hopkins University Press, Baltimore, 2013. \bibitem{Gronwall1919} T.~H. Gronwall, \emph{Note on the derivatives with respect to a parameter of the solutions of a system of differential equations}, Ann. of Math. 20 (1919), pp. 292-296, JFM 47.0399.02 (\url{https://zbmath.org/?format=complete&q=an:47.0399.02}), JSTOR 1967124 (\url{https://www.jstor.org/stable/1967124}), MR 1502565 (\url{https://www.ams.org/mathscinet-getitem?mr=1502565}). \bibitem{HW1996} E. Hairer and G. Wanner, \emph{Solving Ordinary Differential Equations II, Stiff and Differential-Algebraic Problems}, 2nd ed., Springer, Berlin, 1996. \bibitem{Higham1999} D.~J. Higham, \emph{Trust region algorithms and timestep selection}, SIAM J. Numer. Anal. \textbf{37} (1999), 194-210. \bibitem{Hansen1994} P.~C. Hansen, \emph{Regularization Tools: A MATLAB package for analysis and solution of discrete ill posed problems}. Numer. Algorithms \textbf{6} (1994), 23-29. \bibitem{JT1995} Z. Jackiewicz, S. Tracogna, \emph{A general class of two-step Runge-Kutta methods for ordinary differential equations}, SIAM J. Numer. Anal. \textbf{32} (1995), 1390-1427. \bibitem{Kelley2003} C.~T. Kelley, {Solving Nonlinear Equations with Newton's Method}, SIAM, Philadelphia, 2003. \bibitem{Kelley2018} C.~T. Kelley, \emph{Numerical methods for nonlinear equations}, Acta Numer. \textbf{27} (2018), 207-287. \bibitem{LH1964} C.~E. Lemke and J. T. Howson, Jr. \emph{ Equilibrium points of bimatrix games}, Journal of the Society for Industrial and Applied Mathematics \textbf{12} (1964), 413-423. \bibitem{Luo2010} X.-L. Luo, \emph{A second-order pseudo-transient method for steady-state problems}, Appl. Math. Comput. \textbf{216} (2010), 1752-1762. \bibitem{Luo2012} X.-L. Luo, \emph{A dynamical method of DAEs for the smallest eigenvalue problem}, J. Comput. Sci. \textbf{3} (2012), 113-119. \bibitem{LXLZ2021} X.-L. Luo, H. Xiao, J.-H. Lv and S. Zhang, \emph{Explicit pseudo-transient continuation and the trust-region updating strategy for unconstrained optimization}, Appl. Numer. Math. \textbf{165} (2021), 290-302, available at \url{http://doi.org/10.1016/j.apnum.2021.02.019}. \bibitem{LLS2022} X.-L. Luo, J.-H. Lv and G. Sun, \emph{Continuation method with the trusty time-stepping scheme for linearly constrained optimization with noisy data}, Optim. Eng. \textbf{23} (2022), 329-360, available at \url{http://doi.org/10.1007/s11081-020-09590-z}. \bibitem{LXL2022} X.-L. Luo, H. Xiao and J.-H. Lv, \emph{Continuation Newton methods with the residual trust-region time-stepping scheme for nonlinear equations}, Numer. Algorithms \textbf{89} (2022), 223-247, available at \url{http://doi.org/10.1007/s11075-021-01112-x}. \bibitem{LX2021} X.-L. Luo and H. Xiao, \emph{Generalized continuation Newton methods and the trust-region updating strategy for the underdetermined system}, J. Sci. Comput. \textbf{88} (2021), article 56, published online at \url{http://doi.org/10.1007/s10915-021-01566-0}, pp. 1-22, July 13, 2021. \bibitem{LY2021} X.-L. Luo and Y.-Y. Yao, \emph{Primal-dual path-following methods and the trust-region updating strategy for linear programming with noisy data}, J. Comp. Math. \textbf{40} (2022), 760-780, published online at \url{http://doi.org/10.4208/jcm.2101-m2020-0173}, or available at \url{http://arxiv.org/abs/2006.07568}. \bibitem{LX2021B} X.-L. Luo and H. Xiao, \emph{Continuation Newton methods with deflation techniques and quasi-genetic evolution for global optimization problems}, arXiv preprint available at \url{http://arxiv.org/abs/2107.13864}, or Research Square preprint available at \url{https://doi.org/10.21203/rs.3.rs-1102775/v1}, July 30, 2021. Software available at \url{https://teacher.bupt.edu.cn/luoxinlong/zh_CN/zzcg/41406/list/index.htm}. \bibitem{LX2022} X.-L. Luo and H. Xiao, \emph{The regularization continuation method with an adaptive time step control for linearly constrained optimization problems}, Appl. Numer. Math. \textbf{181} (2022), 255-276, available at \url{https://doi.org/10.1016/j.apnum.2022.06.008}. \bibitem{MATLAB} MATLAB v9.8.0 (R2020a), The MathWorks Inc., \url{http://www.mathworks.com}, 2020. \bibitem{Mathiesen1985} L. Mathiesen, \emph{Computation of economic equilibria by a sequence of linear complementarity problems}, Math. Program. \textbf{23} (1985), 144-162. \bibitem{Rutherford2022} T.~F. Rutherford, \emph{The manual of MILES}, GAMS Corporation, \url{https://www.gams.com/latest/docs/S_MILES.html}, 2022. \bibitem{NETLIB} The NETLIB collection of linear programming problems, available at \url{http://www.netlib.org}. \bibitem{NW1999} J. Nocedal and S.~J. Wright, \emph{Numerical Optimization}, Springer, Berlin, 1999. \bibitem{OR2000} J.~M. Ortega and W.~C. Rheinboldt, \emph{Iteration Solution of Nonlinear Equations in Several Variables}, SIAM, Philadelphia, 2000. \bibitem{PATH} M.~C. Ferris et al., \emph{PATH solver 5.0.00}, software available at \url{https://pages.cs.wisc.edu/~ferris/path.html}, 2019. \bibitem {Powell1975} M.~J.~D. Powell, \emph{Convergence properties of a class of minimization algorithms}, in \emph{Nonlinear Programming 2}, O.~L. Mangasarian, R.~R. Meyer, and S.~M. Robinson, eds., Academic Press, New York, 1975, pp. 1-27. \bibitem{Rutherford1995} T.~F. Rutherford, \emph{Extension of GAMS for complementarity problems arising in applied economic analysis}, J. Econ. Dyn. Control \textbf{19} (1995), 1299-1324. \bibitem{SGT2003} L.~F. Shampine, I. Gladwell, and S. Thompson, \emph{Solving ODEs with MATLAB}, Cambridge University Press, Cambridge, 2003. \bibitem{Tanabe1979} K. Tanabe, \emph{Continuous Newton-Raphson method for solving an underdetermined system of nonlinear equations}, Nonlinear Anal. \textbf{3} (1979), 495-503. \bibitem{Tanabe1988} K. Tanabe, \emph{Centered Newton method for mathematical programming}, in \emph{System Modeling and Optimization: Proceedings of the 13th IFIP conference}, vol. 113 of Lecture Notes in Control and Information Systems, Springer, Berlin, 1988, pp. 197-206. \bibitem{Venkateswaran1993} V. Venkateswaran, \emph{An algorithm for the linear complementarity problem with a $P_0$-matrix}, SIAM J. Matrix Anal. Appl. \textbf{14} (1993), 967-977. \bibitem{Wright1994} S.~J. Wright, \emph{An infeasible-interior-point algorithm for linear complementarity problems}, Math. Program. \textbf{67} (1994), 29-51. \bibitem{Wright1997} S.~J. Wright, \emph{Primal-dual Interior Point Methods}, SIAM, Philadelphia, 1997. \bibitem{Xu1991} Q. Xu, \emph{On the New Linear Programming Algorithms-New Sequential Penalty Function Method and Two Point Barrier Function Method (in Chinese)}, Ph.D. thesis, Institute of Nuclear Technology, Tsinghua University, Beijing, China, 1991. \bibitem{Yuan2015} Y.~X. Yuan, \emph{Recent advances in trust region algorithms}, Math. Program. \textbf{151} (2015), 249-281. \bibitem{Zhang1994} Y. Zhang, \emph{On the convergence of a class of infeasible interior-point methods for the horizontal linear complementarity problem}, SIAM J. Optim. \textbf{4} (1994), 208-227. \end{thebibliography} \end{document}
2205.10645v3
http://arxiv.org/abs/2205.10645v3
The distance to the border of a random tree
\documentclass[11pt,a4paper,leqno]{amsart} \usepackage{amsmath,amsfonts,amssymb,amsthm} \usepackage[english]{babel} \usepackage{xcolor} \usepackage{datetime} \usepackage{enumerate} \usepackage[foot]{amsaddr} \usepackage{tikz} \usepackage{lipsum} \usepackage{tikz-cd} \usepackage{mathtools} \usepackage{float} \usepackage{xr} \usepackage[colorlinks=true,linkcolor=blue]{hyperref} \usepackage{amsmath} \usepackage{theoremref} \usepackage{forest} \newcommand{\tree}[1]{\tikz[baseline]{ \draw ( 0, 0) node (a) {$\bullet$} (-.5,-1) node (b) {$#1$} ( .5,-1) node (c) {$#1$}; \draw (b) -- (a) -- (c);}} \newcommand{\treeee}[1]{\tikz[baseline]{ \draw ( 0, 0) node (a) {$\bullet$} (-.6,-1) node (b) {$#1$} (0,-1) node (c) {$#1$} ( .6,-1) node (d) {$#1$}; \draw (b) -- (a) -- (c); \draw (a) -- (d);}} \newcommand{\treee}[1]{\tikz[baseline=0.3ex]{ \draw (0, 0) node (a) {$\bullet$} (0,-1) node (b) {$#1$}; \draw (b) -- (a);}} \newcommand{\treeA}[1]{\tikz[baseline=-0.3ex]{ \draw ( 0, 0) node (a) {$\bullet$} (0,-1) node (b) {$(#1)$}; \draw (b) -- (a);}} \newcommand{\treeB}[1]{\tikz[baseline=-0.3ex]{ \draw ( 0, 0) node (a) {$\bullet$} (-.25,-1) node (b) {$(#1,$} ( .25,-1) node (c) {$#1$)}; \draw (b) -- (a) -- (c);}} \newcommand{\treeC}[1]{\tikz[baseline=0.1ex]{ \draw ( 0, 0) node (a) {$\bullet$} (-.7,-1) node (b) {$#1$} ( .7,-1) node (c) {$#1$}; \draw (b) -- (a) -- (c);}} \usepackage[symbol]{footmisc} \renewcommand{\thefootnote}{\fnsymbol{footnote}} \addtolength{\textwidth}{1.6cm} \addtolength{\oddsidemargin}{-1cm} \addtolength{\evensidemargin}{-1cm} \newcommand{\guio}[1]{\nobreakdash-\hspace{0pt}#1} \newcommand{\Ndash}{\nobreakdash--} \newtheorem{theorem}{Theorem} \renewcommand{\thetheorem}{\Alph{theorem}} \newtheorem{lemma}{Lemma} \renewcommand{\thelemma}{\Alph{lemma}} \newtheorem{corollary}{Corollary} \renewcommand{\thecorollary}{\Alph{corollary}} \newtheorem*{theorem*}{Theorem} \newtheorem{theo}{Theorem}[section] \newtheorem{coro}[theo]{Corollary} \newtheorem{propo}[theo]{Proposition} \newtheorem{theor}[theo]{Theorem} \newtheorem{lem}[theo]{Lemma} \theoremstyle{definition} \newtheorem{remark}[theo]{Remark} \numberwithin{equation}{section} nremark{\hfill$\clubsuit$} nremark} \def\th{\hskip1pt} \def\tth{\hskip.5pt} \usepackage{xcolor} \newcommand{\nrojo}[1]{\textcolor{red}{#1}} \newcommand{\n}[1]{#1} \newcommand{\nazul}[1]{\textcolor{blue}{#1}} \newcommand{\nota}[1]{\marginpar{\footnotesize\flushleft #1 \endflushleft}} \def\R{\mathbb{R}} \def\C{\mathbb{C}} \def\D{\mathbb{D}} \def\Z{\mathbb{Z}} \def\E{\mathbf{E}} \def\V{\mathbf{V}} \def\P{\mathbf{P}} \def\E{\mathbf{E}} \def\K{\mathcal{K }} \newcommand{\uno}{\mathop{\textbf{\Large 1}}} \title{The distance to the border of a random tree} \author[V.\,J. Maci\'a]{V\'{\i}ctor J. Maci\'a} \address[V\'{\i}ctor J. Maci\'a]{Departamento de Matem\'aticas, CUNEF Universidad, Spain.} \email{[email protected]} \thanks{Research of V. Maci\'a was partially funded by grant MTM2017-85934-C3-2-P2 of Ministerio de Econom\'{\i}a y Competitividad of Spain and European Research Council Advanced Grant 834728} \makeatletter \@namedef{subjclassname@2020}{ \textup{2020} Mathematics Subject Classification} \makeatother \subjclass[2020]{{ 05C05, 60J80, 32H50, 05A16}} \keywords{Asymptotic formulae, rooted trees, distance to the leaves, distance to the border, Galton-Watson process, iteration, Branching processes, large powers, Lagrange's inversion, coefficients, analytic functions, generating function, probability generating function.} \begin{document} \raggedbottom \begin{abstract} We consider a Galton–Watson process conditioned to have total progeny equal to \(n\) and investigate the asymptotic probability that the process exhibits a distance to the border (i.e., the minimal distance from the root to a leaf) of at least \(k\) as \(n\to\infty\). This work resolves an open question posed in \cite{Ara-Fer}. While similar asymptotic results appear in the literature on \(k\)-protected nodes (see \cite{Protection1-mean} and \cite{Protection-Clemens}), our approach is distinct, relying on Tauberian theorems and asymptotic results for large powers that have been established using Khinchin families. This methodology allows us to analyze the asymptotic behavior of the generating functions in a direct way, contrasting with the singularity analysis or combinatorial techniques typically used in the context of \(k\)-protected nodes. \end{abstract} \maketitle \parskip=1.5mm \setcounter{tocdepth}{3} \normalsize \normalsize \section{Introduction}\label{sec: Intro} For a rooted tree $T$ the distance to the border $\partial(T)$ is the minimum of the distances from the root to the leaves of $T$. The height, on the other hand, is the maximum of these distances. In this paper, we are interested in obtaining asymptotic formulas, as the size $n$ tends to infinity, for the probability that a random rooted tree, conditioned to have size $n$, has distance to the border $\partial \geq k$. More precisely, for a Galton-Watson process $T$ with offspring distribution given by the random variable $Y$, and conditioned to have total progeny $\#(T) = n$, we ask for the probability that this conditioned Galton-Watson process has distance to the border bigger or equal than $k \geq 0$: $$(\star) \quad \P(\partial(T) \geq k \, | \, \#(T) = n).$$ This question but for the height is a classical one that has been dealt with by R\'enyi and Szekeres for Cayley trees, \cite{Renyi}, de Bruijn, Knuth, and Rice for plane trees, \cite{de Bruijn}, and Flajolet, Gao, Odlyzko, and Richmond for binary trees, \cite{FlajoletOdlyzko}. Our interest in the distance to the border of a rooted tree comes from \cite{Ara-Fer}, in that paper just the case of Cayley trees and $k = 2,3$ are dealt with, and the general question is proposed, see \cite[p. 312]{Ara-Fer}. The main result of this paper is Theorem \ref{thm: mainthmprobabilistic}. There an asymptotic formula for the probability $(\star)$, which depends on some computable constants, is presented. The proof of Theorem \ref{thm: mainthmprobabilistic} is based on an iteration scheme involving Lagrange's equation. An asymptotic formula for the coefficients of the $k$-th iterate of that scheme is the content of Theorem \ref{thm:mainthmlagrange}. In the combinatorial setting several authors have studied the distance to the border in the context of the so called $k$-protected nodes. In \cite{Protection1-mean} the authors calculate this same asymptotic for the probability $(\star)$, in the context of simply generated families of trees, and also the asymptotic for the mean distance to the border using singularity analysis. In \cite{Protection-Bona}, \cite{Protection-Gisang} and \cite{Protection-Clemens} the authors calculate some parameters related with the distance to the border of a tree: \cite{Protection-Bona} gives an asymptotic formula for the probability that a randomly selected node in a binary search tree of size $n$ its at distance to the border bigger or equal than $k$, as the size $n$ tends to infinity. The papers \cite{Protection-Gisang} and \cite{Protection-Clemens} study the mean distance to the border for certain classes of rooted plane trees, as the size $n$ tends to infinity. Our method is based on an iterative scheme that leverages Lagrange’s inversion formula and the Theory of Khinchin families. Using this method we can extend the main result of this paper, Theorem \ref{thm: mainthmprobabilistic}, to some limit cases, see Remark \ref{remark: extend} below. This framework connects the Theory of Khinchin families with the theory of simply generated random trees. By linking combinatorial constructions with probabilistic tools, our method opens up new avenues for analyzing local structural parameters in random trees. We place significant effort into explaining this connection in depth—even revisiting well-known constructions—to ensure that the paper is fully self-contained and accessible to readers from both combinatorial and probabilistic backgrounds. Although Khinchin families appear implicitly in many papers on random trees, to the best of our knowledge, no one has previously observed the connection between Lagrange inversion and the reparametrization of these families. Through this connection, we derive classical results about random trees in a direct and elegant manner. Moreover, we obtain an explicit formula for the probability of extinction expressed as a series related to the solution of Lagrange’s equation. The connection with the theory of Khinchin families presented here and in \cite{K_dos} allows many classical results to be proved in a simple way. For further details on Khinchin families and their applications in combinatorics and probability, see \cite{K_exponentials}, \cite{CFFM3} and \cite{K_dos}. \subsection{Some notations} We denote $\D(0,R) \subset \C$ the open disk with center $z = 0$ and radius $R > 0$ and $\C$ the complex plane. $\D$ denotes the unit disk in the complex plane $\C$. For a power series $A(z)=\sum_{n=0}^\infty a_n z^n$, we denote its $n$th coefficient by $$a_n=\textsc{coeff}_{[n]}(A(z))\, , \quad \mbox{for each $n \ge 0$}\, .$$ For a random variable $X$ we denote $\E(X)$ and $\V(X)$ the expectation and variance of $X$, respectively. Given two random variables $X$ and $Y$ we write $X\stackrel{d}{=}Y$ to signify equality in distribution. For integers $q \geq 0$ and $m \in \{0,1,2,\dots,q-1\}$ we denote $$\mathbb{N}_{m,q} = \{n \in \mathbb{N} : n \equiv m, \mod q \}.$$ Given two sequences $a_n$ and $b_n$, with $b_n \not\equiv 0$, we write \begin{align*} a_n \sim b_n, \quad \text{ as } n \rightarrow \infty \text{ and } n \in \mathbb{N}_{m,q}, \end{align*} to mean that \begin{align*} \lim_{\substack{n \to \infty; \\ n\in \mathbb{N}_{m,q}}}\frac{a_n}{b_n} = 1. \end{align*} \subsection*{Acknowledgements} The author would like to thank Prof.~Jos\'e L. Fern\'andez for bringing this problem to his attention and also for some enlightening discussions. \section{Power series with positive coefficients} \subsection{Khinchin families} The class $\K$ consists of the nonconstant power series $f(z) = \sum_{n \geq 0}a_n z^n$ with nonnegative coefficients and positive radius of convergence $R > 0$, and such that $f(0) =a_0> 0$. To each power series $f$ in $\K$ we associate a one-parameter family of discrete random variables $(X_t)_{t \in [0,R)}$ taking values in $\{0,1, \ldots\}$. This family of discrete random variables is called the Khinchin family of $f$ and is given by \begin{align*} \P(X_t = n) = \frac{a_n t^n}{f(t)}, \quad \text{ for all } n \geq 0 \text{ and } t \in (0,R). \end{align*} For $t=0$, we define $X_0 \equiv 0$. For each $t \in (0,R)$, the random variable $X_t$ is not a constant. For each $t \in [0,R)$, we denote $\E(X_t) = m(t)$ and $\V(X_t) = \sigma^2(t) $. It turns out that \begin{align*} m(t) = \frac{tf'(t)}{f(t)}, \quad \text{ and } \quad \sigma^2(t) = tm'(t). \end{align*} The mean function $m(t)$ is an increasing diffeomorphism from $[0,R)$ onto its image; this follows from the fact that $\sigma^2(t) > 0$, for all $t \in (0,R)$. Those $f\in \K$ such that \begin{equation}\label{eq:req for existence of tau}\lim_{t \uparrow R} m(t)>1\end{equation} comprise a subclass $\K^\star$ that is quite relevant in what follows. If $f \in \K^\star$, the unique value $\tau \in(0, R)$ such that $m(\tau)=1$, is called the \textit{apex} of $f$. This apex $\tau$ is characterized by $\tau f^\prime(\tau)=f(\tau)$. Observe that the existence of apex for $f$ precludes $f$ from being a polynomial of degree 1, and, in particular, implies that $f^{\prime\prime}(\tau)>0$. We refer to \cite{K_uno}, \cite{K_exponentials} and \cite{K_dos}, for a detailed exposition of the topic. \medskip To power series $g(z)$ of the form $g(z)=z^N f(z)=\sum_{n=N}^\infty b_n z^n$ with $f\in \K$ (and radius of convergence $R$) and integer $N \ge 1$ we also associate a family of random variables $(Y_t)_{t \in [0,R)}$. These $Y_t$ are given by $$\P(Y_t=n)=\frac{b_n t^n}{g(t)}\, , \quad \mbox{for $t\in (0,R)$ and $n \ge N$}\,,$$ and $Y_0\equiv N$. Observe that the $Y_t$ take values in $\{N, N{+}1, \ldots\}$. If $(X_t)_{t \in [0,R)}$ is the Khinchin family of $f$ then $$Y_t\overset{d}{=}X_t+N\,, \quad \mbox{for any $t\in[0,R)$}\, .$$ We refer to the family $(Y_t)_{t \in [0,R)}$ as a \textit{shifted} Khinchin family. \medskip For any $f(z) = \sum_{n = 0}^{\infty}a_n z^n$ in $\K$, we denote \begin{align*} Q_{f} =\gcd\{n > 0 : a_n > 0\} = \lim_{N \rightarrow \infty} \gcd\left(\{0 < n \leq N: a_n \neq 0\}\right); \end{align*} observe that there is an \textit{auxiliary power series} $h \in \K$ such that $f(z) = h(z^{Q_f})$. \subsection{Meir-Moon Tauberian theorem.} The following Tauberian theorem, due to Meir-Moon, see \cite{MeirMoon}, will be used later in the proof of Theorem \ref{thm:mainthmlagrange}. \begin{theorem}[Meir-Moon]\label{tauberian} Let $B,C$ be power series in $\K$ with Taylor expansions \begin{align*} B(z) = \sum_{n = 0}^{\infty}b_nz^n, \quad C(z) = \sum_{n=0}^{\infty}c_nz^n, \end{align*} and let \begin{align*} D(z) \triangleq B(z)C(z) = \sum_{n=0}^{\infty}d_nz^n. \end{align*} Assume that the coefficients of $B$ and $C$ satisfy \\ \begin{align*} b_n = O(n^{-\beta} r^{-n}) \quad \text{ and } \quad c_n \sim Cn^{-\alpha} r^{-n},\quad \mbox{as $n \rightarrow \infty$}, \end{align*} \\ where $C,r,\alpha$ and $\beta$ are positive constants. If $\alpha <1 <\beta$ and $B(r) \neq 0$, then \begin{align*} d_n \sim B(r)c_n, \quad \text{ as } n \rightarrow \infty. \end{align*} \end{theorem} The Meir-Moon Theorem above is a companion of a result from Schur (and Szász), see \cite[p. 39]{PolyaSzego} and also \cite[Theorem VI.12, p. 434]{Flajolet}, which requires $B$ to have radius of convergence strictly bigger than $r > 0$ and also that $\lim_{n \rightarrow \infty}c_n/c_{n-1} = r$. For similar results of this kind see \cite{MeirMoonOld}, \cite{MeirMoonChar} and \cite{MeirMoon}. \begin{corollary}\label{tauberian-corollary-Q} Let $Q \geq 1$ be a positive integer. Let $B$ and $C$ be two power series in $\K$ with Taylor expansions \begin{align*} B(z) = \sum_{j = 0}^{\infty}b_{jQ}z^{jQ}, \quad C(z) = \sum_{j=0}^{\infty}c_{jQ}z^{jQ}, \end{align*} and let \begin{align*} D(z) \triangleq B(z)C(z) = \sum_{j=0}^{\infty}d_{jQ}z^{jQ}. \end{align*} Assume that the coefficients of $B$ and $C$ satisfy \\ \begin{align*} b_n = O(n^{-\beta} r^{-n}) \quad \text{ and } \quad c_n \sim Cn^{-\alpha} r^{-n},\quad \mbox{as $n \rightarrow \infty$ and $n \in \mathbb{N}_{0,Q}$}, \end{align*} \\ where $C,r,\alpha$ and $\beta$ are positive constants. If $\alpha <1 <\beta$ and $B(r) \neq 0$, then \begin{align*} d_n \sim B(r)c_n, \quad \text{ as } n \rightarrow \infty \text{ and } n \in \mathbb{N}_{0,Q} \end{align*} \end{corollary} \section{Lagrange's equation}\label{sec:Lagrange} Let $\psi$ be a power series in $\K^\star$ with radius of convergence $R_\psi > 0$. Let $g$ the power series which is the (unique) solution of Lagrange's equation with data $\psi$, i.e. satisfying: $$g(z)=z \psi(g(z)).$$ \subsection{Coefficients} The coefficients of the solution power series $g(z)=\sum_{n=0}^\infty A_n z^n$ are given in terms of the data $\psi$ by Lagrange's inversion formula: $$A_n=\frac{1}{n} \textsc{coeff}_{[n{-}1]} \big(\psi(z)^n\big), \quad \mbox{for $n \ge 1$},$$ and $A_0=0$. Besides the exact formula above for the $A_n$ in terms of the data $\psi$, there is the following asymptotic formula due to Otter, \cite{Otter} and to Meir-Moon, \cite{MeirMoonOld}. Recall that for any integer $m \in \{0,1,2,\dots,Q_{\psi}-1\}$ we denote $\mathbb{N}_{m,Q_\psi}=\{n \in \mathbb{N}: n \equiv m \, \textrm{ mod} \, Q_\psi\}$. Using this notation: \begin{itemize}\item for indices $n \notin \mathbb{N}_{1,Q_{\psi}}$, we have that $A_n=0$, \item for indices $n \in \mathbb{N}_{1,Q_\psi}$, we have that \begin{equation}\label{eq:formula OtterMeinMoon}A_n \sim \frac{Q_\psi}{\sqrt{2\pi}} \frac{\tau}{\sigma_\psi(\tau)}\frac{1}{n^{3/2}} \Big(\frac{\psi(\tau)}{\tau}\Big)^n\, , \quad \mbox{as $n \to \infty$ and $n \in \mathbb{N}_{1,Q_{\psi}}$}\,.\end{equation} \end{itemize} The radius of convergence of the power series solution $g$ is denoted $\rho$. The asymptotic formula for the $A_n$ implies that $\rho=\tau/\psi(\tau)$ and, also, that $g$ extends to be continuous in the closed disk $\{|z|\le \rho\}$, since the sequence $(A_n \rho^n)_{n \ge 1}$ is summable. \subsection{Function $t/\psi(t)$} Lagrange's equation gives that $g$ and $z/\psi(z)$ are inverse of each other and thus that $$g(z/\psi(z))=z\, , \quad \mbox{for $|z|\le \tau$}\,.$$ In particular, $$g(t/\psi(t))=t\, , \quad \mbox{for $t \in[0, \tau]$}\,.$$ The image of the interval $[0,\rho]$ by $g$ is the interval $g([0,\tau/\psi(\tau)]) = [0,\tau]$. For the function $t \mapsto t/\psi(t)$, we have \begin{lem}\label{lemma: monotony_t/psi(t)} For $\psi \in \K^\star$ with apex $\tau \in (0,R_\psi)$, the function $t/\psi(t)$ \begin{enumerate} \item is strictly increasing on the interval $[0,\tau)$ \item is strictly decreasing on the interval $(\tau,R_{\psi})$ \item has a maximum at $t = \tau$. \end{enumerate} \end{lem} \begin{proof} The result follows from the identity \begin{align*} \left(\frac{t}{\psi(t)}\right)^{\prime} = \frac{1}{\psi(t)}(1-m_{\psi}(t)), \quad \text{ for all } t \in [0,R_{\psi}). \end{align*} \end{proof} \subsection{Functions of the solution $g$ of Lagrange's equation} For a power series $H$ with radius of convergence $R > \tau$ the coefficients of the power series $H(g)$ are given by \begin{align}\label{eq:formula_H(g)} \textsc{coeff}_{[n]}\left(H(g(z))\right) = \frac{1}{n}\textsc{coeff}_{[n-1]}\left(H^{\prime}(z)\psi(z)^n\right), \text{ for } n \geq 1, \end{align} and $\textsc{coeff}_{[0]}\left(H(g(z))\right) = H(0)$. For the coefficients of functions $H(g)$ of the solution $g$ we have the following asymptotic formula, see \cite[p. 268]{MeirMoonOld}. \begin{lemma} \label{lemma:powers-Lagrange} Let $\psi \in \K^\star$ with apex $\tau$ and $Q_{\psi} = 1$. Let $g$ be the solution of Lagrange's equation with data $\psi$. Then, for any nonconstant power series $H$, with nonnegative coefficients and having radius of convergence $R > \tau$, the following asymptotic formula holds \begin{align*} \textsc{coeff}_{[n]}\left[H(g(z))\right] \sim H^{\prime}(\tau)A_n, \quad \text{ as } n \rightarrow \infty. \end{align*} \end{lemma} Observe that $H^\prime(\tau)\neq 0$, since $H$ is nonconstant. Now we generalize Lemma \ref{lemma:powers-Lagrange} to power series $\psi \in \K^{\star}$ with $Q_{\psi} \geq 1$. Before we state and prove this generalization we point out that for $\psi \in \K^{\star}$ with Khinchin family $(Y_t)$ and $Q_{\psi} \geq 1$ we have the equality in distribution \begin{align}\label{eq:eq_distr_Q} Y_t \stackrel{d}{=} Q_{\psi} \cdot W_{t^Q}, \quad \text{ for all } t \in [0,R_{\psi}), \end{align} where $\psi(z) = \phi(z^{Q_{\psi}})$, for certain auxiliary power series $\phi \in \K$ with Khinchin family $(W_t)$. From (\ref{eq:eq_distr_Q}) it follows that \begin{align}\label{eq: mean_variance_Q} m_{\psi}(t) = Q_{\psi} \cdot m_{\phi}(t^Q) \quad \text{ and } \quad \sigma_{\psi}(t) = Q_{\psi}\cdot \sigma_{\phi}(t^{Q_{\psi}}), \end{align} for all $t \in [0,R_{\psi})$. From (\ref{eq: mean_variance_Q}) it follows that \begin{align}\label{eq:phi_tau_Q} m_{\phi}(\tau^{Q_{\psi}}) = 1/Q_{\psi}. \end{align} \begin{lem}\label{lemma:H_saltos} Let $\psi \in \K^\star$ with apex $\tau$ and $Q_{\psi} \geq 1$. Fix an integer $m \in \{0,1, \dots,Q_{\psi}~-~1\}$. Let $g$ be the solution of Lagrange's equation with data $\psi$. Let $H$ be a nonconstant power series, with nonnegative coefficients and having radius of convergence $R > \tau$. Assume that \begin{align*} \textsc{coeff}_{[n]}[H(z)] = 0, \quad \text{ if } n \notin \mathbb{N}_{m,Q_{\psi}}, \end{align*} then \begin{align*} \textsc{coeff}_{[n]}\left[ H(g(z))\right] \sim H^{\prime}(\tau)\frac{Q_\psi}{\sqrt{2\pi}} \frac{\tau}{\sigma_\psi(\tau)}\frac{1}{n^{3/2}} \Big(\frac{\psi(\tau)}{\tau}\Big)^n\, , \end{align*} $\mbox{as $n \to \infty$ and $n \in \mathbb{N}_{m,Q_{\psi}}$}$. This implies that there exists a constant $C > 0$ such that \begin{align*} \textsc{coeff}_{[n]}\left[H(g(z))\right] \leq C \, \rho^{-n} n^{-3/2}, \end{align*} as $n \rightarrow \infty$ and $n \in \mathbb{N}_{m,Q_{\psi}}$. \end{lem} \begin{proof} By hypothesis $H(z) = z^m \hat{H}(z^{Q_{\psi}})$. Thus \begin{align}\label{eq: H_Q} H^{\prime}(z) = z^{m-1}\tilde{H}(z^{Q_{\psi}}), \quad \text{ for all } m \in \{1,2,\dots,Q_{\psi}-1\}, \end{align} and $H^{\prime}(z) = z^{Q_{\psi}-1}{H}^{*}(z^{Q_{\psi}})$, for $m = 0$. Here $\hat{H}$, $\tilde{H}$ and $H^{*}$ are all power series with non-negative coefficients. Besides we may write $\psi(z) = \phi\left(z^{Q_{\psi}}\right)$, where $\phi$ is a power series in $\K$. Formula (\ref{eq:formula_H(g)}) tell us that \begin{align*} \textsc{coeff}_{[n]}\left[H(g(z))\right] = \frac{1}{n}\textsc{coeff}_{[n-1]}\left[H^{\prime}(z)\psi(z)^n\right], \end{align*} therefore, for integers $n \geq m$, using (\ref{eq: H_Q}), we find that \begin{align*} \textsc{coeff}_{[n]}\left[H(g(z))\right] = \frac{1}{n}\textsc{coeff}_{[n-m]}\left[\tilde{H}(z^{Q_{\psi}})\phi(z^{Q_{\psi}})^n\right], \end{align*} for all $m \in \{1,2,\dots,Q_{\psi}-1\}$ and also that \begin{align*} \textsc{coeff}_{[n]}\left[H(g(z))\right] = \frac{1}{n}\textsc{coeff}_{[n-Q_{\psi}]}\left[{H}^{*}(z^{Q_{\psi}})\phi(z^{Q_{\psi}})^n\right], \end{align*} for $m = 0$. Fix $l \in \{1,2,\dots,Q_{\psi}-1\}$. For $n = l+jQ_{\psi}$ we have \begin{align}\label{eq: H(g)_Q=1_pruebalemma} \frac{1}{n}\textsc{coeff}_{[n-l]}\left[\tilde{H}(z^{Q_{\psi}})\phi(z^{Q_{\psi}})^n\right] = \frac{1}{n}\textsc{coeff}_{[j]}\left[\tilde{H}(z)\phi(z)^n\right]. \end{align} For $l = Q_{\psi}$ and $n = l+jQ_{\psi}$ we have \begin{align}\label{eq: H(g)_Q=1_pruebalemma2} \frac{1}{n}\textsc{coeff}_{[n-l]}\left[{H}^{*}(z^{Q_{\psi}})\phi(z^{Q_{\psi}})^n\right] = \frac{1}{n}\textsc{coeff}_{[j]}\left[{H}^{*}(z)\phi(z)^n\right]. \end{align} From (\ref{eq: mean_variance_Q}) and (\ref{eq:phi_tau_Q}) it follows that \begin{align*} m_{\phi}(\tau^{Q_{\psi}}) = 1/Q_{\psi} \quad \text{ and } \quad \sigma_{\psi}(\tau) = Q_{\psi} \cdot \sigma_{\phi}(\tau^{Q_{\psi}}). \end{align*} Moreover $\tau^{Q_{\psi}j} = \tau^{n-m}$. Now we apply a large powers result from Section 8 in \cite{K_dos} (this is the case $j/n \rightarrow~1/Q_{\psi},$ as $n \rightarrow \infty$) to the right-hand side of (\ref{eq: H(g)_Q=1_pruebalemma}) and (\ref{eq: H(g)_Q=1_pruebalemma2}). Rearranging terms in the asymptotic formula (see Section 8 in \cite{K_dos}) we get that \begin{align*} \textsc{coeff}_{[n]}\left[H(g(z))\right] \sim H^{\prime}(\tau)\frac{Q_\psi}{\sqrt{2\pi}} \frac{\tau}{\sigma_\psi(\tau)}\frac{1}{n^{3/2}} \Big(\frac{\psi(\tau)}{\tau}\Big)^n\, , \quad \mbox{as $n \to \infty$} \text{ and } n \in \mathbb{N}_{m,Q_{\psi}}\,. \end{align*} \end{proof} \begin{remark}\label{remark: asymptotic_ineq} With the same hypothesis of Lemma \ref{lemma:H_saltos} we have that \begin{align*} \textsc{coeff}_{[n]}\left[ H(g(z))\right] \sim H^{\prime}(\tau) \rho^{-(m-1)}A_{n-m+1}, \quad \text{ as } n \rightarrow \infty \text{ and } n \in \mathbb{N}_{m,Q_{\psi}}. \end{align*} This follows combining Lemma \ref{lemma:H_saltos} with formula (\ref{eq:formula OtterMeinMoon}). From the previous asymptotic formula we obtain that there exists a constant $C_{H,\psi} > 0$ such that \begin{align*} \textsc{coeff}_{[n]}\left[ H(g(z))\right] \leq C_{H,\psi}A_{n-m+1}, \quad \text{ as } n \rightarrow \infty \text{ and } n \in \mathbb{N}_{m,Q_{\psi}}. \end{align*} The previous relations also give that there exists a constant $C > 0$ such that \begin{align*} \textsc{coeff}_{[n]}\left[ H(g(z))\right] \leq C \rho^{-n}n^{-3/2}, \quad \text{ as } n \rightarrow \infty \text{ and } n \in \mathbb{N}_{m,Q_{\psi}}. nremark \end{remark} \newpage \section{Iteration and Lagrange's equation} We fix now a power series $\psi(z) = \sum_{n =0}^{\infty}b_n z^n$ in $\mathcal{K}^{\star}$ with apex $\tau$. Recall that the radius of convergence of the solution $g$ of Lagrange's equation with data $\psi$ is given by $\rho=\tau/\psi(\tau)$. \ For $z$ in the disk $\{|z| \leq \rho\}$ consider the sequence of power series given by the recurrence \begin{align}\label{eq:iteration_scheme_height} g_n(z)=z \psi(g_{n{-}1}(z))\, , \quad \mbox{for $n \ge 1$}\, , \end{align} starting off with $g_0(z)=b_0z$. This sequence is well defined. Observe that \begin{align}\label{eq: ineq_iteration_altura} b_0 \, \rho = \tau/(\psi(\tau)/b_0) < \tau < R_{\psi}\, . \end{align} Using inequality (\ref{eq: ineq_iteration_altura}) combined with the fact that $\psi \in \K^{\star}$ we find that \begin{align*} |g_1(z)| = |z\psi(b_0 z)| < \rho \, \psi(\tau)= \tau, \quad \text{ for all } |z| \leq \rho \, . \end{align*} By induction on $n \geq 0$ we conclude that for any integer $n \geq 0$ we have \begin{align}\label{eq: g_n less than tau} |g_n(z)| < \tau, \quad \text{ for all } |z| \leq \rho\, , \end{align} in particular, this implies that the functions in the sequence $\{g_n\}_{n \geq 0}$ are continuous on the boundary of the disk of center $z = 0$ and radius $\rho$. The sequence $(g_n)_{n \ge 1}$ converges uniformly in the closure of the disk $\D(0, \rho)$ to the power series $g$. See Remark 2.7 in \cite[p. 9]{Sokal} and also \cite{Renyi} for more details about this iteration setting. \ The iteration scheme we are interested in this paper is not the above but the following. Start with $g_0(z)=g(z)$ and define recursively \begin{equation}\label{eq:iteration scheme} g_k(z)=z\big[\psi(g_{k{-}1}(z))-b_0\big], \quad \mbox{for $k \ge 1$}\, .\end{equation} Write the expansion $g_k(z)=\sum_{n=1}^\infty A^{(k)}_n z^n$. It follows that $A_n^{(k)}=0$ for $1\le n \le k$ and that \begin{equation}\label{eq:bounds for Ank}0 \le A_n^{(k)} \le A_n^{(k{-}1)}\le A_n, \quad \mbox{for $n \ge 1$ and $k \ge 1$}.\end{equation} This means, in particular, that each power series $g_k$ has radius of convergence at least $\rho$, and that the sequence of the $g_k$ converges to $0$ uniformly in the closed disk $\textrm{cl}(\D(0, \rho))$ \ Define the bivariate analytic function \begin{align*} G(z,w) = z(\psi(w)-b_0)\,, \quad \text{ for } |z| \leq \rho, |w| \leq \tau\,. \end{align*} The bound \begin{align}\label{eq:Glesstau} |G(z,w)| \leq \tau\,, \quad \text{ for } |z| \leq \rho, |w| \leq \tau\,, \end{align} follows since, for $|z|\le\rho$ and $|w|\le \tau$, we have that $$|z||\psi(w)-b_0|\le |z| \sum_{j=1}^\infty |b_j| |w|^j\le |z|\psi(|w|)\le\rho \psi(\tau)=\tau\,.$$ Inequality \eqref{eq:Glesstau} allows us to iterate $G(z,w)$ with respect to the second variable: for any integer $k \geq 1$ we denote \begin{align*} G_k(z,w) = G(z,G_{k-1}(z,w)), \quad \text{ for all } |z| \leq \rho \text{ and } |w| \leq \tau\,, \end{align*} the $k$-th iterate of $G(z,w)$ in the second variable, starting with $G_0(z,w) = w$. For any integer $k \geq 0$ we have that \begin{align}\label{eq:iteration_g_k} g_k(z) = G_k(z,g(z)), \quad \text{ for } |z| \leq \rho\,. \end{align} Observe that \begin{equation}\label{eq:partial G} z \frac{\partial G}{\partial z}(z,w)=G(z,w) \quad \text{ and } \quad \frac{\partial G}{\partial w}(z,w)=z \psi^\prime(w), \quad \text{ for } |z| \leq \rho \text{ and } |w| \leq \tau\,. \end{equation} \ The main aim of this section is to obtain the following asymptotic result about the coefficients $A_n^{(k)}$: \begin{theo}\label{thm:mainthmlagrange} For each $k \ge 0$, $$ \lim_{\substack{n \to \infty; \\ n\in \mathbb{N}_{1,Q_\psi}}} \frac{A_n^{(k)}}{A_n}=\frac{\partial G_k}{\partial w}(\rho, \tau)\, . $$ \end{theo} The proof of Theorem \ref{thm:mainthmlagrange} is contained in Section \ref{section:proof_mainthmlagrange} and it is based on a number of lemmas about $G(z,w)$ which are described in the next section. \begin{remark}\label{remark: extend} We can extend Theorem \ref{thm:mainthmlagrange} in the following way: assume that $\psi \in \K$ has radius of convergence $R_{\psi} < \infty$ and also that \begin{enumerate} \item $\lim_{t \uparrow R_{\psi}}m_{\psi}(t) \triangleq M_{\psi} = 1$, \item $\lim_{t \uparrow R_{\psi}}\psi^{\prime\prime}(t) < \infty$. \end{enumerate} Denote $\rho = R_{\psi}/\psi(R_{\psi})$ and $\tau = R_{\psi}$, then we have \begin{align*} \lim_{\substack{n \to \infty; \\ n\in \mathbb{N}_{1,Q_\psi}}} \frac{A_n^{(k)}}{A_n}=\frac{\partial G_k}{\partial w}(\rho, \tau). \end{align*} The argument to prove this result is exactly the same than that of Theorem \ref{thm:mainthmlagrange}. The only difference appears in the proof of the auxiliary lemmas of Subsection \ref{subsec:concerningG(z,w)}, these proofs now rely on some asymptotic results appearing in \cite{K_dos}. These results extend the asymptotic formulas in Section \ref{sec:Lagrange} to the case $M_{\psi} = 1$ and $\tau = R_{\psi}$. nremark \end{remark} \begin{remark}\label{remark generalized_iterate} It is possible to generalize the previous iteration scheme in the following way: consider $\psi(z) = \sum_{n \geq 0}b_n z^n \in \K^{\star}$ and fix $\mathcal{I} \subset \{n \geq 0 : b_n > 0\}$ a proper subset of indices such that $0 \in \mathcal{I}$. Define \begin{align}\label{eq:generalized_iterate} S_{\mathcal{I}}(z,w) = z(\psi(w)-\psi_{\mathcal{I}}(w)) =z\psi_{\mathcal{I}^c}(w), \quad \text{ for all } |z| \leq \rho \text{ and } |w| \leq \tau\,, \end{align} where $\psi_{\mathcal{I}}(z) \triangleq \sum_{n \in \mathcal{I}}b_n z^n$ and $\mathcal{I}^c$ denotes the complement of $\mathcal{I}$ in the set of indices $\{n\geq 0 : b_n > 0\}$. Observe that \begin{align}\label{eq:iterate_section} |S_{\mathcal{I}}(z,w)| \leq G(|z|,|w|) \leq \tau, \quad \text{ for all } |z| \leq \rho \text{ and }|w| \leq \tau\,. \end{align} Inequality (\ref{eq:iterate_section}) allows us to iterate $S(z,w)$ with respect to the second variable: for any integer $m \geq 1$ we denote \begin{align*} S_{\mathcal{I},m}(z,w) = S_{\mathcal{I}}(z,S_{\mathcal{I},m-1}(z,w)), \quad \text{ for all } |z| \leq \rho \text{ and } |w| \leq \tau\,, \end{align*} the $m$-th iterate in the second variable of $S_{\mathcal{I}}(z,w)$, starting off with $S_{\mathcal{I},0}(z,w) = w$. Now consider the sequence of analytic functions \begin{align} f_{\mathcal{I},m}(z) = S_{\mathcal{I},m}(z,g(z)), \quad \text{ for all } |z| \leq \rho \,, \end{align} this power series has Taylor expansion \begin{align}\label{eq:Taylor_f_I} f_{\mathcal{I},m}(z) = \sum_{n \geq 0}B_n^{(m)}z^n, \quad \text{ for all } |z| \leq \rho\,, \end{align} and verifies the recurrence \begin{align*} f_{\mathcal{I},m}(z) = z(\psi-\psi_{\mathcal{I}})(f_{\mathcal{I},m-1}(z)),\quad \text{ for all } m \geq 1\,, \end{align*} starting with $f_{\mathcal{I},0}(z) = g(z)$. Taking $\mathcal{I} = \{0\}$ in (\ref{eq:generalized_iterate}) we retrieve the iteration scheme (\ref{eq:iteration scheme}). nremark \end{remark} \subsection{Some lemmas concerning $G(z,w)$}\label{subsec:concerningG(z,w)} The following lemma gives an explicit expression for the partial derivative with respect to $z$ of the iterate $G_k(z,w)$. \begin{lem}\label{general-partial-z} For any integer $k \geq 1$ we have \begin{align}\label{partialzformula} \frac{\partial G_k}{\partial z}(z,w) &= \sum_{j=1}^{k}\frac{\partial G}{\partial z}(z,G_{k-j}(z,w))\prod_{i=1}^{j-1}\frac{\partial G}{\partial w}(z,G_{k-i}(z,w))\,, \end{align} for $|z| \leq \rho$ and $|w| \leq \tau$, with the convention that the empty product is $1$. \end{lem} \begin{proof} Formula \eqref{partialzformula} is proved by induction on $k$. For $k = 1$, the right-hand side of \eqref{partialzformula} simplifies and reduces to $ ({\partial G}/{\partial z})(z,w)$, as it should. Assume that identity \eqref{partialzformula} holds for $k \ge 1$, then \begin{align*} (\star) \quad \frac{\partial G_{k+1} }{\partial z}(z,w) = \frac{\partial}{\partial z}G(z,G_k(z,w)) = \frac{\partial G}{\partial z}(z,G_k(z,w))+\frac{\partial G}{\partial w}(z,G_k(z,w))\frac{\partial G_k}{\partial z}(z,w)\,. \end{align*} Substituting the expression \eqref{partialzformula} for $\partial G_k/\partial z$ in $(\star)$ we find that \begin{align*} \frac{\partial G_{k+1}}{\partial z}(z,w) &= \sum_{j=1}^{k+1}\frac{\partial G}{\partial z}(z,G_{k+1-j}(z,w))\prod_{i=1}^{j-1}\frac{\partial G}{\partial w}(z,G_{k+1-i}(z,w))\,. \end{align*} \end{proof} The following lemma gives an asymptotic upper bound for the coefficients of $(\partial G_k/\partial z)(z,g(z))$. \begin{lem}\label{lemma: bigOpartialz} For any integer $k \geq 1$ we have \begin{align*} \textsc{coeff}_{[n]}\left[\frac{\partial G_k}{\partial z}(z,g(z))\right] = O(\rho^{-n}n^{-3/2}), \quad \text{ as } n \rightarrow \infty \text{ and } n \in \mathbb{N}_{0,Q_{\psi}}\,. \end{align*} \end{lem} \begin{proof} Combining formulas \eqref{eq:iteration_g_k} and \eqref{eq:partial G} with Lemma \ref{general-partial-z} we have that \begin{align*} \textsc{coeff}_{[n]}\left[\frac{\partial G_k}{\partial z}(z,g(z))\right] &= \sum_{j = 1}^{k}\textsc{coeff}_{[n]}\left[ \frac{\partial G}{\partial z}(z,g_{k-j}(z))\prod_{i=1}^{j-1}\frac{\partial G}{\partial w}(z,g_{k-i}(z))\right] \\ &= \sum_{j = 1}^{k}\textsc{coeff}_{[n]}\left[ \frac{g_{k+1-j}(z)}{z}\prod_{i=1}^{j-1}\frac{\partial G}{\partial w}(z,g_{k-i}(z))\right]\,. \end{align*} From the inequalities \eqref{eq:bounds for Ank} we then deduce, for $n \ge 1$, that \begin{align*} \sum_{j = 1}^{k}\textsc{coeff}_{[n]}\left[\frac{g_{k+1-j}(z)}{z}\prod_{i=1}^{j-1}\frac{\partial G}{\partial w}(z,g_{k-i}(z))\right] &\leq \sum_{j = 1}^{k}\textsc{coeff}_{[n+1]}\left[g(z)\frac{\partial G}{\partial w}(z,g(z))^{j-1}\right]\,. \end{align*} For $1 \le j \le k$ we have, using \eqref{eq:partial G}, that \begin{align*} \textsc{coeff}_{[n+1]}\left[g(z)\frac{\partial G}{\partial w}(z,g(z))^{j-1}\right] = \textsc{coeff}_{[n+2-j]}\left[g(z)\psi^{\prime}(g(z))^{j-1}\right]\,. \end{align*} Write $H(w)=w \psi^\prime(w)^{j{-}1}$. Observe that $H$ is a nonconstant power series which can be written as $H(w) = w^{1+(Q_{\psi}-1)(j-1)}\tilde{H}(z^{Q_{\psi}}) = w^{Q_{\psi}(j-1)+2-j}\tilde{H}(z^{Q_{\psi}})$, for certain auxiliary power series $\tilde{H}$\,. Applying Lemma \ref{lemma:H_saltos} with $H(w)=w \psi^\prime(w)^{j{-}1}$ and $m = Q_{\psi}(j-1)+2-j$, we conclude that there exists a constant $E_j > 0$ such that \begin{align*} \textsc{coeff}_{[n+2-j]}\left[g(z)\psi^{\prime}(g(z))^{j-1}\right] \leq E_j \, \rho^{-n}n^{-3/2}\,, \quad \text{ as } n \rightarrow \infty \text{ and } n\in \mathbb{N}_{0,Q_{\psi}}. \, \end{align*} Observe that for integers $n \geq 1$ such that $n \equiv 0,\text{\hspace{-.16 cm}}\mod Q_{\psi}$ we have that $n+2-j \equiv m$,$\mod Q_{\psi}$. Then we conclude, with $D_k=\sum_{j=1}^k E_j$, that \begin{align*} \textsc{coeff}_{[n]}\left[\frac{\partial G_k}{\partial z}(z,g(z))\right] \le D_k \, \rho^{-n}n^{-3/2}\,, \quad \text{ as } n \rightarrow \infty \text{ and } n\in \mathbb{N}_{0,Q_{\psi}}\,. \end{align*} \end{proof} For the coefficients of $(\partial G_k/\partial w)(z,g(z))$ we have: \begin{lem} \label{lemma: bigOpartial-w} For any integer $k \geq 1$ we have \begin{align*} \textsc{coeff}_{[n]}\left[\frac{\partial G_k}{\partial w}(z,g(z))\right] = O(\rho^{-n} n^{-3/2})\,, \quad \text{ as } n \rightarrow \infty \text{ and } n\in \mathbb{N}_{0,Q_{\psi}}\,. \end{align*} \end{lem} \begin{proof} Applying the chain rule to $G_k(z,w)$ and using \eqref{eq:iteration_g_k} and \eqref{eq:partial G}, we obtain that \begin{align}\label{eq:formula for partial Gw} \frac{\partial G_k}{\partial w}(z,g(z)) = \prod_{j = 0}^{k-1}\frac{\partial G}{\partial w}(z,G_j(z,g(z))) = z^{k}\prod_{j = 0}^{k-1}\psi^\prime(G_j(z,g(z)))=z^k \prod_{j = 0}^{k-1}\psi^\prime(g_j(z))\,, \end{align} for $|z| \leq \rho$ and $|w| \leq \tau$. Appealing to \eqref{eq:bounds for Ank} we deduce that $$ \textsc{coeff}_{[n]}\left[\frac{\partial G_k}{\partial w}(z,g(z))\right] \le \textsc{coeff}_{[n-k]}(\psi^\prime(g(z))^k)\,.$$ First observe that for integers $j \geq 0$ we have \begin{align}\label{eq: ineqpsipsi_prime} \textsc{coeff}_{[j]}\left(\psi^{\prime}(g_j(z))\right) = \sum_{n \geq 0}b_n \textsc{coeff}_{[j]}\left(g_j(z)^n\right) \leq \textsc{coeff}_{[j]}\left[\psi^{\prime}(g(z))\right]\,. \end{align} Here we use the formula for the coefficients of a finite product of powers series and also that $A_n^{(l)} \leq A_n$, for any pair of nonnegative integers $l \geq 0$ and $n \geq 1$. Combining the convolution formula with inequality (\ref{eq: ineqpsipsi_prime}) we conclude that \begin{align*} \textsc{coeff}_{[n]}\left[\frac{\partial G_k}{\partial w}(z,g(z))\right] &= \textsc{coeff}_{[n-k]}\left[\prod_{j = 0}^{k-1}\psi^\prime(G_j(z,g(z)))\right] \\ &\leq \textsc{coeff}_{[n-k]}\left[\psi^\prime(g(z))^{k}\right]. \end{align*} Denote $H(w) = \psi^{\prime}(w)^k$, this function is nonconstant; recall that $\psi \in \K^{\star}$ cannot be a polynomial of degree $1$. The power series $H$ can be written as $H(w) = w^{(Q_{\psi}-1)k}\tilde{H}(w^{Q_{\psi}})$, for certain auxiliary power series $\tilde{H}$ with nonnegative coefficients. In fact $H(w) = w^{m}(w^{Q_{\psi}(j(Q_{\psi}-1)+l-1)}\tilde{H}(w^{Q_{\psi}}))$ with $m = Q_{\psi}-l$ and $k = jQ_{\psi}+l$, for $j \geq 0$ and $l \in \{0,1,\dots,Q_{\psi}-1\}$. Observe that $m+k \equiv 0$,$\mod Q_{\psi}$. Applying Lemma \ref{lemma:H_saltos} with $m$ as in the previous paragraph we find that \begin{align} \label{eq: asymptotic-partial-w} \textsc{coeff}_{[n-k]}[\psi^\prime(g(z))^k] \sim C_{H,\psi} \, \rho^{-n}n^{-3/2}\,, \quad \text{ as } n \rightarrow \infty \text{ and } n \in \mathbb{N}_{0,Q_{\psi}}\,, \end{align} where $C_{H,\psi}$ is a positive constant. For integers $n \geq k$ such that $n \equiv 0$,$\mod Q_{\psi}$ we have that $n-k \equiv m$,$\mod Q_{\psi}$. Using equation (\ref{eq: asymptotic-partial-w}), we conclude that there exists a constant $C > 0$ such that \begin{align*} \textsc{coeff}_{[n]}\left[\frac{\partial G_k}{\partial w}(z,g(z))\right] \leq C \, \rho^{-n} n^{-3/2}\,, \quad \text{ as } n \rightarrow \infty \text{ and } n \in \mathbb{N}_{0,Q_{\psi}}\,. \end{align*} \end{proof} \begin{lem}\label{lemma: partialwnonzero} For any $k \ge 0$ we have \begin{align*} \frac{\partial G_k}{\partial w}(\rho,\tau) \neq 0. \end{align*} \end{lem} \begin{proof} Using \eqref{eq:formula for partial Gw} and that $g(\rho)=\tau$, we have that $$\frac{\partial G_k}{\partial w}(\rho,\tau)=\frac{\partial G_k}{\partial w}(\rho,g(\rho))=\rho^k \prod_{j=0}^{k{-}1} \psi^\prime(g_j(\rho))\,.$$ Now, $\psi^\prime(g_j(\rho))=0$ could only happen if $\psi^{\prime}(0) = 0$ and $g_j(\rho)=0$, which means $g_j \equiv 0$. The iteration scheme \eqref{eq:iteration scheme} then implies that $g_{j-1}\equiv 0$, and then that $g_{j{-}2}\equiv 0$ and, finally, that $g \equiv 0$, which is not the case. Thus $(\partial G_k/\partial w)(\rho,\tau) \neq 0$. \end{proof} \subsection{Proof of Theorem \ref{thm:mainthmlagrange}.}\label{section:proof_mainthmlagrange} Fix $k \ge 1$. From \eqref{eq:iteration_g_k} we have that \begin{align*} g^{\prime}_k(z) = \frac{\partial G_k}{\partial z}(z,g(z))+\frac{\partial G_k}{\partial w}(z,g(z))g^{\prime}(z)\,, \quad \text{ for all } |z| < \rho\,. \end{align*} We estimate now the coefficients of $g_k^\prime(z)$ using the expression above. Lemma \ref{lemma: bigOpartialz} tells us that the first summand of $g_k^\prime(z)$ satisfies \begin{align*} \textsc{coeff}_{[n]}\left[\frac{\partial G_k}{\partial z}(z,g(z))\right] = O(\rho^{-n}n^{-3/2})\,, \quad \text{ as } n \rightarrow \infty \text{ and } n \in \mathbb{N}_{0,Q_{\psi}}\,. \end{align*} To estimate the coefficients of the second summand we apply Corollary \ref{tauberian-corollary-Q} with $(\partial G_k/\partial w)(z,g(z))$ in the role of $B(z)$ and $g^\prime(z)$ in the role of $C(z)$ and $r=\rho$. For $(\partial G_k/\partial w)(z,g(z))$, using Lemma \ref{lemma: bigOpartial-w}, we have that $$(\star)\quad \textsc{coeff}_{[n]}\left[\frac{\partial G_k}{\partial w}(z,g(z))\right]=O\big(\rho^{-n} n ^{-3/2}\big)\, ,\quad \mbox{as $n \to \infty$} \text{ and } n \in \mathbb{N}_{0,Q_{\psi}}\,.$$ On the other hand, for $g^\prime(z)$ and because of formula \eqref{eq:formula OtterMeinMoon} we have that $$(\star\star)\quad \textsc{coeff}_{[n]}\left(g^\prime(z)\right) \sim C \rho^{-n} n^{-1/2}\, , \quad \mbox{as $n \to \infty$} \text{ and } n \in \mathbb{N}_{0,Q_{\psi}}\,, $$ for a positive constant $C$. The estimates $(\star)$ and $(\star\star)$ combined with Lemma \ref{lemma: partialwnonzero} allow us to apply Corollary \ref{tauberian-corollary-Q}, with $\alpha=1/2$ and $\beta=3/2$, and deduce that \begin{align*} \textsc{coeff}_{[n]}\left[\frac{\partial G_k}{\partial w}(z,g(z))g^{\prime}(z)\right] \sim \frac{\partial G_k}{\partial w}(\rho,\tau)\, (n+1)A_{n{+}1}\,, \quad \text{ as } n \rightarrow \infty \text{ and } n \in \mathbb{N}_{0,Q_{\psi}}\,. \end{align*} Therefore we conclude that \begin{align*} \textsc{coeff}_{[n]}\left[g_k^{\prime}(z)\right] = (n+1) A_{n+1}^{(k)} \sim \frac{\partial G_k}{\partial w}(\rho,\tau)\, (n+1) A_{n+1}\,, \quad \text{ as } n \rightarrow \infty \text{ and } n \in \mathbb{N}_{0,Q_{\psi}}\,, \end{align*} which is equivalent to \begin{align*} \lim_{\substack{n \to \infty; \\ n\in \mathbb{N}_{1,Q_\psi}}} \frac{A_n^{(k)}}{A_n}=\frac{\partial G_k}{\partial w}(\rho, \tau)\, . \end{align*} \ \begin{remark} Assume that $Q_{\psi} = 1$. For any integer $m \geq 0$, we can apply the asymptotic estimates of this section to the coefficients of the sequence $f_{\mathcal{I},m}(z) = S_{\mathcal{I},m}(z,g(z))$. The power series expansions of $S_{\mathcal{I},m}$ and $G_m$ are given by: \begin{align*} S_{\mathcal{I},m}(z,w) = \sum_{l,s \geq 0}S_{l,s}^{(m)}z^{l}w^m\quad \text{ and } \quad G_m(z,w) = \sum_{l,s \geq 0}G_{l,s}^{(m)}z^{l}w^m \,, \end{align*} for all $|z| \leq \rho$ and $|w| \leq \tau$\,. By induction on $m \geq 0$ it follows that \begin{align}\label{eq:ineq_S_m_G_m} S_{l,s}^{(m)} \leq G_{l,s}^{(m)}\,, \quad \text{ for all } l,s\geq 0\,. \end{align} Combining inequality (\ref{eq:ineq_S_m_G_m}) with Lemmas \ref{lemma: bigOpartialz} and \ref{lemma: bigOpartial-w} we find that \begin{align*} \textsc{coeff}_{[n]}\left[\frac{\partial S_{\mathcal{I},m}}{\partial z}(z,g(z))\right] \leq \textsc{coeff}_{[n]}\left[\frac{\partial G_m}{\partial z}(z,g(z))\right]= O(\rho^{-n}n^{-3/2})\,, \quad \text{ as } n \rightarrow \infty\,, \end{align*} and also that \begin{align*} \textsc{coeff}_{[n]}\left[\frac{\partial S_{\mathcal{I},m}}{\partial w}(z,g(z))\right] \leq \textsc{coeff}_{[n]}\left[\frac{\partial G_m}{\partial w}(z,g(z))\right] = O(\rho^{-n}n^{-3/2})\,, \quad \text{ as } n \rightarrow \infty\,. \end{align*} Applying the same strategy than in the case $\mathcal{I} = \{0\}$ we find that \begin{align*} \lim_{n \rightarrow \infty}\frac{ \, \, B_n^{(m)}}{A_n} = \frac{\partial S_{\mathcal{I},m}}{\partial w}(\rho,\tau)\,. \end{align*} nremark \end{remark} \section{Trees} \subsection{Rooted trees} A finite rooted tree $a$ is a finite connected graph without cycles with a distinguished node called root. The set of leaves (nodes of degree 1, other than the root) of $a$ is denoted by $\text{leaves}(a)$; we also refer to $\text{leaves}(a)$ as the \textit{border} of $a$. \subsubsection{Generations and distances on a rooted tree} If $u,v$ are distinct nodes on a rooted tree, its distance $d(u,v)$ is the length (number of edges) of the unique path (not repeating nodes) that connects $u$ and $v$. If $u=v$, the distance is $d(u,v)=0$. For integer $n \geq 0$, the $n$th generation of a rooted tree $a$ is the set of nodes at exactly distance $n$ from the root. The $0$-th generation consists simply of the root. For a node $u$ in the $n$th generation of $a$, the neighboring nodes in generation $n+1$ are called the \textit{descendants} of $u$. The \textit{outdegree} of a node $u$ of $a$ is the number of its descendants. The leaves are the nodes with no descendants. The \textit{genealogy} of a node $v$ of $a$ is the set of nodes in $a$ which lie in the (unique) path connecting the root to $v$. The \textit{distance $\partial(a)$ to the border} of the rooted tree $a$ is defined as \begin{align*} \partial(a) = \min\{d(\text{root},u): u \in \text{leaves}(a)\}\,, \end{align*} in other terms, the distance to the border of the rooted tree $a$ is the generation of the leaf with the shortest genealogy. In turn, the height $h(a)$ of a rooted tree is $$h(a)=\max\{d(\text{root},u): u \in \text{leaves}(a)\}\,.$$ The \textit{outdegree profile} of a rooted tree $a$ is the list of nonnegative integers $$(k_0(a), k_1(a),\dots)\,,$$ where $k_j(a)$ is the number of nodes of $a$ with outdegree $j$. Observe that if $a$ has $n$ nodes then $k_j(a)=0$, for $j\ge n$, and that $\sum_{j = 0}^{n-1}k_j(a) = n$ and $\sum_{j = 1}^{n-1}jk_j(a) = n-1$. \subsubsection{Weight of a tree associated to $\psi \in \K$} Fix $\psi(z) = \sum_{n = 0}^{\infty}b_n z^n \in \K$. For any rooted tree $a$ with $n$ nodes, the $\psi$-weight $w_\psi(a)$ of $a$ (associated to $\psi$) is \begin{align}\label{eq: weight} w_{\psi}(a) = \prod_{j = 0}^{\infty}b_j^{k_j(a)}, \end{align} where $(k_0(a), k_1(a),\dots)$ is the outdegree profile of $a$. The product above is actually a finite product, since $k_j(a)$ are not 0 only for a finite number of $j$'s. For details about these weights we refer to \cite[p. 999]{MeirMoonOld}. \subsubsection{Plane trees} A rooted plane tree is a finite rooted tree where each descendant of a given node has an intrinsic label which records its position within the tree, these labels follow a lexicographical order, see \cite{Neveu} and \cite[p. 126]{Flajolet} for more details. The previous definition is equivalent to specifying an order on the descendants of each node from left to right, this order allows us to embed our tree in the plane. We denote by $\mathcal{G}$ the class of finite rooted plane trees and for any integer $n \geq 1$ we denote by $\mathcal{G}_n$ the subclass of rooted plane trees with $n$ nodes. Further, for any $k \geq 0$, we denote $\mathcal{G}^{(k)} \subseteq \mathcal{G}$ the class of rooted plane trees $a$ with $\partial(a) \geq k$ and $\mathcal{G}_n^{(k)} = \mathcal{G}^{(k)} \cap \mathcal{G}_n$, the class of rooted plane trees with $n \geq 1$ nodes and $\partial \geq k$. We consider also infinite rooted plane trees which have infinitely many nodes, but each of them has a finite number of descendants. For the precise definition of this class we refer to \cite{Neveu}. \subsection{Galton-Watson processes}\label{subsec: G-W-sinKhinchin} Let $Y$ be a discrete random variable with expectation $m\triangleq \E(Y) \leq 1$ and such that \begin{enumerate} \item $\P(Y = 0) > 0$, \\[-0.4 cm] \item $\P(Y = 0)+\P(Y = 1) < 1$. \end{enumerate} Denote by $\psi$ the probability generating function of $Y$. Conditions (1) and (2) imply that $\psi(0) > 0$ and also that $\psi$ is not a polynomial of degree $1$. We use $Y$ as the offspring distribution of a Galton-Watson stochastic process $(W_n)_{n\ge 0}$, with $W_n$ being the random size of the $n$-generation. We start with a unique individual in generation zero: $W_0\equiv 1$. This individual has a number of offsprings with probability distribution given by $Y$. These offsprings comprise the first generation whose size is $W_1$ (which has the same distribution as $Y$). Each individual of the first generation has a random number offsprings with probability distribution $Y$, independently of each other; these offsprings comprise in turn the second generation of random size $W_2$. And so on. For i.i.d copies $(Y_{i,j})_{i, j \ge 1}$ of the variable $Y$ and for any integer $n \geq 1$ we have \begin{align*} W_n = Y_{n,1}+\dots+Y_{n,{W_{n-1}}}, \quad \mbox{for $n \ge 1$}\, . \end{align*} Besides $W_0 \equiv 1$. The discrete process $(W_n)_{n \geq 0}$ is the Galton-Watson process (from now on abbreviated GW) with offspring distribution given by $Y$. For each integer $n \geq 0$, the random variable $W_n$ gives the number of individuals in the $n$th generation of our process. Since $m=\E(Y)\le 1$, the GW process with offspring distribution $Y$ extinguishes with probability $1$, that is, $\P(W_m = 0, \text{ for some } m ) = 1$. See, for instance, \cite{Williams}. The condition $m \le 1$, which here we assume from the start, is referred in the literature as the subcritical ($m<1$) and critical ($m=1$) cases. The total progeny of the GW process is given by the random variable \begin{align}\label{eq: total_progeny_NKhinchin} W \triangleq \sum_{j = 0}^{\infty}W_j \stackrel{d}{=} 1+\sum_{j = 1}^{\infty}W_j. \end{align} Since the process extinguishes with probability 1, the total progeny $W$ is finite almost surely. We denote the probability generating function of $W$ by $g$. The power series $g$ verifies Lagrange's equation with data $\psi$, that is, \begin{align*} g(z) = z\psi(g(z)), \quad \text{ for all } z \in \D. \end{align*} See, for instance, \cite{Pitman} and \cite{Williams}. For further basic background on classical Galton-Watson processes we refer, for instance, to \cite{Pitman} and \cite{Williams}. \subsubsection{Galton-Watson processes and rooted plane trees} Assume that $Y$ is a discrete random variable with $m = \E(Y) \leq 1$. Each realization of a GW process with offspring distribution $Y$ can be identified with a finite rooted plane tree. Galton-Watson processes are introduced as a model for the evolution of a population where each individual is distinguishable from the other. In this model we record the moment of birth and the relation parent/child of each individual. To take care of this information, we impose an order on the descendants of each node: we label intrinsically from left to right, as we do for the rooted plane trees, the position of the descendants of each node. To generate a Galton-Watson process we use independent copies of a discrete random variable $Y$. We start with one individual with offspring distribution $Y$. The descendants of this individual are ordered from left to right. Using this order we assign copies of $Y$, that we denote $Y_{1,1}, Y_{1,2},\dots, Y_{1, Y}$, to each of the new individuals in the first generation of our process, then we iterate this construction. This correspondence between nodes and random variables consider, tacitly, that the descendants of each node are ordered from left to right, otherwise, we could not assign a specific random variable to a specific individual and, therefore, we could not record the number of descendants of a particular individual. If nodes are not distinguishable, that is, if the descendants of each individual are not ordered, then, in general, we cannot register if a given node at the left or a given node at the right has certain number of descendants: we cannot distinguish left and right and hence we cannot distinguish different individuals. We can think about this (subcritical or critical) Galton-Watson process as a random variable $T$ which takes values in the set $\mathcal{G}$ of finite rooted plane trees. For any integer $n \geq 1$ and any rooted plane tree $a \in \mathcal{G}_n$ the $\psi$-weight $w_\psi(a)$, here $\psi$ denotes the probability generating function of $Y$, is the probability of the event $\{T=a\}$. \begin{align*} \P(T = a) = \prod_{j = 0}^{\infty}\P(Y = j)^{k_j(a)}=w_\psi(a), \end{align*} where $(k_0(a), k_1(a),\dots)$ is the outdegree profile of the tree $a$. Denote by $\#(T)$ the random variable that gives the total progeny of the GW process $T$, then we have $\#(T) \stackrel{d}{=} W$. Observe that $\P(\#(T) = n) = \P(T \in \mathcal{G}_n)$, for all $n \geq 1$. We define the random variable \textit{distance to the border of the Galton-Watson process} $T$ as $\partial(T)$. The random variable $\partial(T)$ is given by \begin{align*} \P(\partial(T) = k) = \P(T \in \mathcal{G}^{(k)} \setminus \mathcal{G}^{(k+1)})\,. \end{align*} We aim in this paper to obtain, for any integer $k \geq 0$ and any GW process $T$, an asymptotic formula for the conditional probability \begin{align*} \P(\partial(T) \geq k \, | \, \#(T) = n) = \P(T \in \mathcal{G}^{(k)} \, | \, \#(T) = n), \quad \mbox{as $n \rightarrow \infty$.} \end{align*} \subsection{Parametric families of Galton-Watson processes} Let $\psi(z) = \sum_{n = 0}^{\infty}b_n z^n$ be a power series in $\K^\star$ with radius of convergence $R_\psi$ and apex $\tau \in (0, R_\psi)$, and denote by $(Y_t)_{t \in [0,R_{\psi})}$ its Khinchin family. We let $g(z) = \sum_{n \geq 1}A_n z^n$ be the power series solution of Lagrange's equation with data $\psi$ which has radius of convergence $\rho=\tau/\psi(\tau)$, and denote by $(Z_t)_{t \in [0,\tau/\psi(\tau))}$ its associated shifted Khinchin family. Observe that $Z_0 \equiv 1$. \medskip We consider the one-parameter family of GW processes $(T_t)_{t \in [0,R_{\psi})}$ where $T_t$ is the GW process with offspring distribution given by $Y_t$. For $t=0$, the GW process degenerates, its total progeny is just one individual (or in terms of trees, just the root). For each $t \in [0,R_{\psi})$ we denote by $q(t)$, the probability of extinction for the GW process $T_t$. We have $q(t)=1$, for $t \in [0, \tau]$, since for $t$ in that interval, $m_\psi(t)\le 1$. For $t \in (\tau,R_{\psi})$, we have that $0 <q(t)< 1$, see, for instance, \cite[p. 4]{Williams}. \medskip For each $t \in (0,R_{\psi})$, we let $\psi_t(z) = \psi(tz)/\psi(t)$ be the probability generating function of the random variable $Y_t$; we set $\psi_0\equiv1$. We consider, for each $t \in [0,R_{\psi})$, Lagrange's equation with data $\psi_t$ and the corresponding power series solution $g_t(z)$: \begin{align}\label{eq: Lagrange-probabilitygen} g_t(z) = z\psi_t(g_t(z)). \end{align} The case $t = 0$ is a degenerate case: $\psi_0(z) \equiv 1$ and $g_0(z) = z$. \medskip \begin{propo}\label{lemma: g_t-probgen} For $t \in [0,R_\psi)$ we have that \begin{align}\label{eq:identity for gt} g_t(z) = \frac{g(tz/\psi(t))}{t}, \quad \mbox{for all $t \in [0,R_{\psi})$ and $|z| \leq 1$}\,. \end{align} The holomorphic function $g_t$ is continuous on $\partial \D$ and $g_t(\D)\subset \D$. Besides, for $t \in [0,\tau]$, the power series $g_t(z)$ is the probability generating function of the random variable $Z_{t/\psi(t)}$. \end{propo} \begin{proof} We write $h_t(z)= {g(tz/\psi(t))}/{t}$. Using that $g$ satisfies Lagrange's equation with data $\psi$, it is immediate to verify that $h_t$ satisfies Lagrange's equation with data $\psi_t$. Uniqueness of the solution gives that $g_t\equiv h_t$. Equation \eqref{eq:identity for gt} means that \begin{equation}\label{eq:power series for gt}g_t(z)=\sum_{n \ge 1} \frac{A_n t^{n{-}1}}{\psi(t)^n} z^n\, . \end{equation} The Otter-Meir-Moon formula \eqref{eq:formula OtterMeinMoon} and formula \eqref{eq:identity for gt} give that the radius of convergence of the power series $g_t$ is $$R_{g_t}=\frac{\tau/\psi(\tau)}{t/\psi(t)}\,,$$ and also the continuity of $g_t$ on $\partial \D$. Observe that $R_{g_t}>1$ for $t \neq \tau$, while $R_{g_\tau}=1$, see Lemma \ref{lemma: monotony_t/psi(t)}. Equation \eqref{eq: Lagrange-probabilitygen} is then satisfied for $|z|\le 1$, for all $t \in [0,R_{\psi})$. \medskip Now, for $t \in [0,\tau]$, since then $g(t/\psi(t))=t$, we deduce that $$\begin{aligned} \P(Z_{t/\psi(t)}=n)&=\frac{A_n (t/\psi(t))^n}{g(t/\psi(t))}\\&=\frac{A_n (t/\psi(t))^n}{t}=\textsc{coeff}_{[n]}(g_t(z))\, , \quad \mbox{for $t \in [0,\tau]$ and $n \ge 1$}\, .\end{aligned}$$ Therefore, as claimed, $g_t$ is the probability generating function of $Z_{t/\psi(t)}$, for all $t \in [0, \tau]$. \end{proof} \begin{remark} nremark \end{remark} \medskip Taking $z = 1$ in formula \eqref{eq: Lagrange-probabilitygen} we find that $g_t(1)$ is a fixed point for $\psi_t(z)$, that is, \begin{align*} \psi_t(g_t(1)) = g_t(1), \quad \text{ for all } t \in [0,R_{\psi}). \end{align*} From here we conclude that the probability of extinction $q(t)$ is given by \begin{align}\label{eq: extinction} q(t) = g_t(1), \quad \text{ for all } t \in [0,R_{\psi}). \end{align} See \cite[p. 4]{Williams} for more details about this equality. From formula (\ref{eq: Lagrange-probabilitygen}) we deduce that the probability of extinction function $q(t)$ maybe be expressed as \begin{align}\label{eq: formula_extinction} q(t) = \sum_{n = 1}^{\infty}\frac{A_n t^{n-1}}{\psi(t)^n}, \quad \text{ for all } t \in [0,R_{\psi}), \end{align} with the convention that $0^0 = 1$. Recall that $q(t)=1$, for $t \in [0,\tau)$. \medskip \begin{coro}\label{lemma: progenydistribution} For each $t \in [0,\tau]$, the power series $g_t$ is the probability generating function of $\# (T_t) $ and the following equality in distribution holds \begin{align*} \# (T_t) \stackrel{d}{=} Z_{t/\psi(t)}. \end{align*} \end{coro} \begin{proof} For all $t \in [0,\tau]$, the power series $g_t$ is a probability generating function. Since $g_t$ is the solution of \eqref{eq: Lagrange-probabilitygen} and $\psi_t$ is the probability generating function of $Y_t$, we have that $g_t$ is the probability generating function of the size of the progeny $\# (T_t)$. \end{proof} \begin{remark} For each $t \in (\tau,R_{\psi})$, $\#(T_t)$ is a proper random variable. In this case, each $\#(T_t)$ takes values in $\{1,2,3,\dots\} \cup \{\infty\}$, therefore $(\#(T_t))_{t \in [0,R_{\psi})}$ is a, proper, one-parameter family of random variables. nremark \end{remark} \subsection{The conditional random tree} Let $\mathcal{R} \subset \mathcal{G}$ be a subclass of rooted plane trees. For integers $n \ge 1$, denote by $\mathcal{R}_n$ the subclass of trees of $\mathcal{R}$ of size $n$. Fix $\psi(z) = \sum_{n = 0}^{\infty}b_n z^n$ in $\K^\star$ with radius of convergence $R_\psi$ and apex $\tau \in (0,R_{\psi})$. For integer $n \ge 1$, we denote with $U_n$ the sum of the $\psi$-weights of the trees in the class $\mathcal{G}_n$ and, similarly, $V_n$ the sum of the $\psi$-weights of the trees in the class $\mathcal{R}_n$: \begin{align}\label{eq: sum_weigh_partial_root} U_n = \sum_{a \in \mathcal{G}_n}w_\psi(a)\,, \quad \text{ and } \quad V_n = \sum_{a \in \mathcal{R}_n}w_\psi(a)\,, \end{align} see equation (\ref{eq: weight}) and also \cite{MeirMoonOld}. Fix $t \in (0,\tau]$. For the GW process $T_t$ and any rooted plane tree $a \in \mathcal{G}_n$, the probability that $T_t$ is equal to $a$ is given by \begin{equation}\label{eq: prob_GW_finito} \P(T_t = a) = \prod_{j = 0}^{n-1}\P(Y_t = j)^{k_j(a)} =\omega_{\psi_t}(a)=\prod_{j = 0}^{n-1}\frac{b_j^{k_j}t^{j k_j}}{\psi(t)^{k_j}} =w_\psi(a)\frac{t^{n-1}}{\psi(t)^n}\,. \end{equation} See, for instance, \cite{Neveu} or \cite{Otter}. Thus, \begin{equation}\label{eq: prob_GW_finito subclass} \P(T_t \in \mathcal{R}_n) = V_n\frac{t^{n-1}}{\psi(t)^n}\,, \end{equation} and, in particular, \begin{equation}\label{eq: prob_GW_finito class} \P(T_t \in \mathcal{G}_n) = U_n\frac{t^{n-1}}{\psi(t)^n}\,, \end{equation} The next proposition follows from \eqref{eq: prob_GW_finito subclass} and \eqref{eq: prob_GW_finito class}. \begin{propo}\label{thm: uniform} For $t \in (0,R_{\psi})$, we have \begin{align*} \P(T_t \in \mathcal{R} \, | \, \#(T_t) = n) = \frac{V_n}{U_n}\,, \end{align*} for any integer $n \geq 1$ such that $n \equiv 1,\text{\hspace{-.16 cm}}\mod Q_{\psi}$. \end{propo} \begin{proof} For integers $n \equiv 1,\mod Q_{\psi}$, just observe that $$\P(T_t \in \mathcal{R} \, | \, \#(T_t) = n)=\frac{\P(T_t \in \mathcal{R}_n)}{\P(T_t \in \mathcal{G}_n)}$$ and use \eqref{eq: prob_GW_finito subclass} and \eqref{eq: prob_GW_finito class}.\end{proof} \medskip For $t \in (0, R_{\psi})$ and $n \ge 1$, we have that $$\P(T_t \in \mathcal{G}_n)=\P(\# (T_t)=n)=\textsc{coeff}_{[n]}(g_t(z ))$$ and thus from formula \eqref{eq:power series for gt} we see that $$\P(T_t \in \mathcal{G}_n)=\frac{A_n t^{n{-}1}}{\psi(t)^n}\,.$$ Comparing with \eqref{eq: prob_GW_finito class} we see that $$A_n=U_n=\sum_{a \in \mathcal{G}_n}\omega_\psi(a)\,, \quad \mbox{for $n \ge 1$}\,.$$ \section{The distance to the border}\label{sec: distance_border} Now we introduce a formula for the probability that a Galton-Watson process $T_t$, conditioned to have exactly $n$ nodes, has $\partial(T_t) \geq k$. We recall here the notation $\mathcal{G}$ for the class of finite rooted plane trees and $\mathcal{G}^{(k)} \subseteq \mathcal{G}$ for the subclass of finite rooted plane trees with $\partial \geq k$. For any $\psi \in \K$, we denote \begin{align}\label{eq: sum_weigh_partial_root} A_n = \sum_{a \in \mathcal{G}_n}w(a), \quad \text{ and } \quad A_n^{(k)} = \sum_{a \in \mathcal{G}_n^{(k)}}w(a), \end{align} here $w(a)$ denotes the weight of $a$ with respect to a given $\psi \in \K$, see equation (\ref{eq: weight}). For any integer $k \geq 1$ and any $t \in [0,\tau]$, the expression $A_n^{(k)}t^{n-1}/\psi(t)^n$ does not define a probability distribution. In particular, this expression does not define a Khinchin family: \begin{align*} \P(T_t \in \mathcal{G}^{(k)}) = \sum_{n \geq 1} \, \P(T_t \in \mathcal{G}_n^{(k)}) = \sum_{n \geq k} \, \frac{A_n^{(k)}t^{n-1}}{\psi(t)^n} < \sum_{n \geq 1}\, \frac{A_n t^{n-1}}{\psi(t)^n} = 1. \end{align*} Here we use that for all $k \geq 1$, the class of rooted trees $\mathcal{G}_n^{(k)}$ is strictly contained in the class $\mathcal{G}_n$. On the other hand for $k = 0$, we have the equality $A_n^{(0)} = A_n$, for all $n \geq 1$, therefore $\P(T_t \in \mathcal{G}^{(0)}) = \P(T_t \in \mathcal{G}) = 1$, for all $t \in [0,\tau]$. In the previous reasoning we have used that, for any integer $k \geq 0$, we can partition the class of rooted plane trees with $\partial \geq k$ by its number of nodes: \begin{align*} \mathcal{G}^{(k)} = \bigsqcup_{n = k}^{\infty} \mathcal{G}_n^{(k)} \subseteq \mathcal{G}, \end{align*} where $\mathcal{G}_n^{(k)} \cap \mathcal{G}_m^{(k)} = \emptyset$, for any pair of integers $n,m \geq 1$ such that $n \neq m$. \begin{lem}\label{lemma:uniform_galton} Fix $\psi \in \K$. For any integer $k \geq 0$, and any $t \in (0,R_{\psi})$, we have \begin{align*} \P(\partial(T_t) \geq k \, | \, \#(T_t) = n) = \P(T_t \in \mathcal{G}^{(k)} \, | \, \#(T_t) = n) = \frac{\, \, \, A_n^{(k)}}{A_n}, \end{align*} for all $n \in \mathbb{N}_{1,Q_{\psi}}.$ \end{lem} The proof of this Lemma follows applying Theorem \ref{thm: uniform} with $\mathcal{R}_n = \mathcal{G}^{(k)}_n$. \subsection{Asymptotic results} Now we give a recurrence relation for the analytic function with coefficients $A_n^{(k)}$. Using this representation we give an asymptotic formula for the sequence $A_n^{(k)}$, as the number of nodes $n \rightarrow \infty$. For any integer $k \geq 0$, we denote by $g_k(z)$ the analytic function with coefficients $A_n^{(k)}$, that is, \begin{align*} g_k(z) = \sum_{n = k}^{\infty}A_n^{(k)}z^{n}, \quad \text{ for all } |z|\leq \rho \triangleq \tau/\psi(\tau). \end{align*} For all $t \in [0,R_{\psi})$ and $|z| \leq 1$, we have \begin{align*} \frac{g_k(tz/\psi(t))}{t} = \sum_{n = k}^{\infty}\frac{A_n^{(k)}t^{n-1}}{\psi(t)^n}z^n = \sum_{n = k}^{\infty}\P(T_t \in \mathcal{G}_n^{(k)})z^n. \end{align*} \begin{lem} For any $\psi(z) = \sum_{n = 0}^{\infty}b_n z^n \in \K$ and any integer $k \geq 1$, the analytic function $g_k(z)$ verifies the recurrence relation \begin{align}\label{eq: recurrenceg_k} g_k(z) = z(\psi(g_{k-1}(z))-b_0), \end{align} where $g_0(z) = g(z)$. Here $g(z)$ denotes the solution of Lagrange's equation with data $\psi \in \K$. \end{lem} \begin{proof} We have that \begin{align*} \textsc{coeff}_{[n]}\left[z(\psi(g_{k-1}(z))-b_0)\right] &= \textsc{coeff}_{n-1}\left[\psi(g_{k-1}(z))-b_0\right] \\ &= \sum_{j = 1}^{n-1}b_j \textsc{coeff}_{n-1}[g_{k-1}(z)^j], \end{align*} therefore \begin{align*} \textsc{coeff}_{[n]}\left[z(\psi(g_{k-1}(z))-b_0)\right] = \sum_{j = 1}^{n-1}b_j \sum_{l_1+l_2+\dots+l_j = n-1}A_{l_1}^{(k-1)}A_{l_2}^{(k-1)}\dots A_{l_j}^{(k-1)}. \end{align*} For the family of rooted plane trees with $\partial \geq k$, that is, $\mathcal{G}^{(k)}$, we have: \begin{figure}[H] \begin{equation*} \mathcal{G}^{(k)} = \ \ \treee{\mathcal{G}^{(k-1)}} \ \ + \treeC{\mathcal{G}^{(k-1)}} + \text{ \hspace{.2 cm}} \dots \end{equation*} \end{figure} In this diagram we fix a root and then we glue a tree with $\partial \geq k-1$, another root and then we glue 2 trees with $\partial \geq k-1$, and so on. By means of this process we build all the rooted plane trees with $\partial \geq k$. If we restrict our attention to the rooted plane trees with $\partial \geq k$ and $n$ nodes, the previous diagram should stop at certain point which depends on the value of $k$. For the rooted plane trees with $\partial \geq k$ and $n$ nodes, we allocate $n-1$ nodes in the trees which descend from the root: by means of this process we enumerate the class $\mathcal{G}^{(k)}_{n}$. If we glue $j$ rooted trees with $\partial \geq k-1$ to a root, then, when calculating the weights, we should weight the root using the coefficient $b_j$. The previous reasoning tell us that \begin{align*} \textsc{coeff}_{[n]}\left[z(\psi(g_{k-1}(z))-b_0)\right] = \sum_{j = 1}^{n-1}b_j \sum_{l_1+l_2+\dots+l_j = n-1}A_{l_1}^{(k-1)}A_{l_2}^{(k-1)}\dots A_{l_j}^{(k-1)} = A_n^{(k)}. \end{align*} See \cite{Flajolet} for similar arguments applied to combinatorial classes of rooted labeled trees or more generally to simple varieties of trees. \end{proof} Now, we give an asymptotic formula for the probability that a Galton-Watson process $T_t$, conditioned to have total progeny equal to $n$, has distance to the border $\partial(T_t) \geq k$, as the number of nodes $n \rightarrow \infty$. \begin{theo}\label{thm: mainthmprobabilistic} Fix an integer $k \geq 0$ and a function $\psi \in \K^{\star}$ with $Q_{\psi} \geq 1$, then, for all $t \in (0,R_{\psi})$, we have \begin{align*} \lim_{\substack{n \to \infty; \\ n\in \mathbb{N}_{1,Q_\psi}}}\P(\partial(T_t) \geq k \, | \, \#(T_t) = n) = \lim_{\substack{n \to \infty; \\ n\in \mathbb{N}_{1,Q_\psi}}}\frac{A_n^{(k)}}{A_n} = \frac{\partial G_k}{\partial w}(\rho,\tau). \end{align*} \end{theo} The proof of this Theorem follows combining Lemma \ref{lemma:uniform_galton} with Theorem \ref{thm:mainthmlagrange}. This result also appears in \cite{Protection1-mean}, the proof in there uses singularity analysis. \begin{remark} For values of $t$ in the interval $(\tau, R_{\psi})$, we can also study the Galton-Watson process $T_t$ upon extinction. Take $a \in \mathcal{G}$, this is a finite rooted plane tree. Suppose that $a$ has exactly $n \geq 1$ nodes, then we have \begin{align}\label{eq: prob_extinction_conditionated} \P(T_t = a \, | \, \text{extinction})= \frac{\omega_{\psi}(a)t^{n-1}}{\psi(t)^n}\frac{1}{q(t)}. \end{align} Formula (\ref{eq: prob_extinction_conditionated}) is valid for all $t \in (0,R_{\psi})$, and, of course, we have \begin{align*} \P(T_t = a) = \P(T_t = a \, | \, \text{extinction})= \frac{\omega_{\psi}(a)t^{n-1}}{\psi(t)^n}, \quad \text{ for all } t \in (0,\tau]. \end{align*} Here we use that $q(t) = 1$, for all $t \in [0,\tau]$. For each $t \in (0,R_{\psi})$ we denote $\tilde{T}_t$, the Galton-Watson process $T_t$ conditioned upon extinction. If we condition $\tilde{T}_t$ to have total progeny $n \geq 1$, then we find that \begin{align*} \P(\tilde{T}_t \in \mathcal{R} \, | \, \#(\tilde{T}_t) = n) = \frac{V_n}{U_n}, \end{align*} for all $n \in \mathbb{N}_{1,Q_{\psi}}$. From here, and depending on the restrictions we impose to the class $\mathcal{R}$, we can find an asymptotic formula for this probability, as the number of nodes $n \rightarrow \infty$. nremark \end{remark} \section{Applications}\label{sec: applications} In this section we apply Theorem \ref{thm: mainthmprobabilistic} to some specific classes of rooted trees. By means of this result, we give asymptotic formulas for the rooted labeled Cayley trees (Poisson offspring distribution), rooted plane trees (geometric offspring distribution) and rooted binary plane trees (Bernoulli $\{0,2\}$ offspring distribution), with distance to the border bigger or equal than $k \geq 0$, as the number of nodes $n$ escapes to infinity. For the study of the height we refer to \cite{Renyi}, \cite{de Bruijn}, and \cite{FlajoletOdlyzko} for the Cayley, plane and binary case respectively. \subsection{Rooted labeled Cayley trees} The class of rooted labeled Cayley trees with $n$ nodes $\mathcal{T}_n$ is a family of rooted trees where the descendants of each node are not ordered. This is a family of labeled trees, that is, for each rooted labeled Cayley tree with $n$ nodes there is a bijective correspondence between the set of nodes and the set of labels $\{1,2,\dots,n\}$. The main property of this family of trees is that the trees in this class are not embedded in the plane. For the specific details about this family of trees we refer, for instance, to \cite[pp. 126-127]{Flajolet}. The rooted labeled Cayley trees form a simple variety of trees, see, for instance \cite[pp. 126-127]{Flajolet}. A consequence of this fact is that the number of rooted labeled Cayley trees with $n$ nodes might be counted using Lagrange's equation with data $\psi(z) = e^z$. The Cayley tree function $T(z)$, the exponential generating function for the counting sequence of this family of trees, satisfies Lagrange's equation \begin{align}\label{eq: Lagrange-Cayley} T(z) = ze^{T(z)}, \quad \text{ for all } |z| \leq \frac{1}{e}. \end{align} Applying Lagrange's inversion formula, we find that \begin{align}\label{eq: Cayley tree function} T(z) = \sum_{n = 1}^{\infty}\frac{n^{n-1}}{n!}z^n, \quad \text{ for all } |z| \leq \frac{1}{e}. \end{align} We want to apply Theorem \ref{thm: mainthmprobabilistic} to this particular family of rooted labeled trees. Each of the elements of the Khinchin family associated to $\psi(z) = e^z$, that we denote $(Y_t)_{t \in [0,\infty)}$, follows a Poisson distribution with parameter $t \in [0,\infty)$. See \cite{K_uno}, \cite{K_exponentials} and \cite{K_dos} for more examples of Khinchin families. The mean function is given by \begin{align*} m_{\psi}(t) = \frac{t\psi^{\prime}(t)}{\psi(t)}= t, \quad \text{ for all } t \in [0,\infty), \end{align*} in fact the equation $m_{\psi}(t) = 1$ has solution $\tau = 1.$ In this case the bivariate function $G(z,w)$ is given by \begin{align*} G(z,w) = z(e^{w}-1), \end{align*} for all $|z| \leq 1/e$ and $|w| \leq 1$. For any pair of integers $k \geq 0$ and $n \geq 1$, we denote $t_n^{(k)}$ the counting sequence for the rooted labeled Cayley trees with $n$ nodes and $\partial \geq k$. Besides, we denote $t_n$ the counting sequence for the rooted labeled Cayley trees with $n$ nodes. Formula (\ref{eq: Cayley tree function}) tell us that $t_n = n^{n-1}$, this is Cayley's formula for the enumeration of the rooted labeled Cayley trees. Now, applying Lemma \ref{lemma:uniform_galton}, we find that for any integer $k \geq 0$, and as $n \rightarrow \infty$, the proportion that trees in $\mathcal{T}_n^{(k)}$ do occupy in $\mathcal{T}_n$ is given by the limit \begin{align*} \lim_{n \rightarrow \infty}\frac{\, \, \, \, t_n^{(k)}}{t_n} = \frac{\partial G_k}{\partial w}(1/e,1)\,. \end{align*} This asymptotic formula gives the answer to a question posed in \cite{Ara-Fer}, for an alternative proof of this result when $k = 2$ and $k = 3$, see \cite[pp. 309-312]{Ara-Fer}. We denote by $c_k \triangleq \lim_{n \rightarrow \infty}t_n^{(k)}/t_n$. Applying the chain rule to $G_k(z,w)$ we obtain the following recurrence relation: \begin{align}\label{eq: recucrence_k_cayley} c_k =\frac{1}{e}e^{G_{k-1}(1/e,1)}c_{k-1}, \quad \text{ for all } k \geq 1, \end{align} with $c_0 = 1$. Let's study some particular instances of this formula. \begin{itemize} \item[\scalebox{0.6}{$\blacksquare$}] The case $k =2$. Using the recurrence relation (\ref{eq: recucrence_k_cayley}), we find that \begin{align}\label{eq: c_2 Cayley} c_2 = \frac{1}{e}e^{G(1/e,1)}c_0 = \frac{1}{e}e^{1-1/e} = e^{-1/e}. \end{align} This result appears in \cite[p. 312]{Ara-Fer}. \vspace{.2 cm} \item[\scalebox{0.6}{$\blacksquare$}] The case $k = 3$. Using again our recurrence relation, see equation (\ref{eq: recucrence_k_cayley}), we find that \begin{align*} c_3 = \frac{1}{e}e^{G_2(1/e,1)}c_2. \end{align*} We have \begin{align*} G_2(1/e,1) = G(1/e,G(1/e,1)) = G(1/e,1-1/e) = \frac{1}{e}(e^{1-1/e}-1), \end{align*} and $c_2 = e^{-1/e}$, see equation (\ref{eq: c_2 Cayley}), therefore \begin{align*} c_3 = \frac{1}{e}e^{-1/e}e^{(e^{1-1/e}-1)/e}. \end{align*} This is Theorem 3.3, in \cite[p. 309]{Ara-Fer}. \vspace{.2 cm} \item[\scalebox{0.6}{$\blacksquare$}] The case $k = 4$. In this case we have \begin{align*} c_4 = \frac{1}{e}\exp\left(\frac{1}{e}(e^{(e^{1-1/e}-1)/e}-1)\right)\frac{1}{e}e^{-1/e}e^{(e^{1-1/e}-1)/e}. \end{align*} \end{itemize} In general, for any $k \geq 4$, we might continue iterating the recurrence relation (\ref{eq: recucrence_k_cayley}) and therefore find any particular value of $c_k$. This means that, for any $k \geq 1$, the constant $c_k$ is computable by iteration. The recurrence relation (\ref{eq: recucrence_k_cayley}) is the answer to a question posed in \cite[p. 312]{Ara-Fer}. Some of these quantities appear in \cite{Protection1-mean}. \subsection{Rooted plane trees} The rooted plane trees form a simple variety of trees, that is, we can count these trees by means of Lagrange's equation with data $\psi(z) = 1/(1-z)$. The generating function for this family of rooted trees, that we denote $P(z)$, verifies Lagrange's equation with data $\psi$: \begin{align}\label{eq: Lagrange-plane} P(z) = \frac{z}{1-P(z)}, \quad \text{ for all } |z| \leq \frac{1}{4}. \end{align} Applying Lagrange's inversion formula, we find that \begin{align}\label{eq: generating_plane} P(z) = \sum_{n = 1}^{\infty}C_{n-1}z^n, \quad \text{ for all } |z| \leq \frac{1}{4}, \end{align} where $C_n$ denotes the $n$th Catalan number. We want to apply Theorem \ref{thm: mainthmprobabilistic} to the family of rooted plane trees. Each of the elements of the Khinchin family associated to $\psi(z) = 1/(1-z)$, that we denote $(Y_t)_{t \in [0,1)}$, follows a geometric distribution with parameter $1-t$. See \cite{K_uno}, \cite{K_exponentials} and \cite{K_dos} for examples of Khinchin families and for more details about this specific family of random variables. The mean function of this Khinchin family is \begin{align*} m_{\psi}(t) = \frac{t}{1-t}, \quad \text{ for all } t \in [0,1), \end{align*} in fact the equation $m_{\psi}(t) = 1$ has solution $\tau = 1/2$. For $\psi(z) = 1/(1-z)$ we have that \begin{align*} G(z,w) = z\left(\frac{1}{1-w}-1\right) = \frac{zw}{1-w}, \end{align*} for all $|z| \leq 1/4$ and $|w| \leq 1/2$. In this particular case it is possible to find a closed expression for the $k$-th iterate of $G(z,w)$, this is the content of the following Lemma. \begin{lem} For any $k \geq 1$, the $k$-th iterate of $G(z,w)$ verifies that \begin{align}\label{eq: formula_plane_iterate} G_k(z,w) = \frac{z^k w}{1-\frac{1-z^k}{1-z}w}, \quad \text{ for all } |z| \leq 1/2, |w| \leq 1/4. \end{align} \end{lem} This Lemma follows by induction on $k \geq 1$. Applying this result we will be able to provide an explicit expression for the asymptotic formula given by Theorem \ref{thm: mainthmprobabilistic}. This is a particularity of this example. We cannot find a closed expression, in the spirit of equation (\ref{eq: formula_plane_iterate}), for the family of rooted labeled Cayley trees. See the recurrence relation (\ref{eq: recucrence_k_cayley}). For any pair of integers $k \geq 0$ and $n \geq 1$ we denote $P_n^{(k)}$ the counting sequence for the rooted plane trees with $n$ nodes and $\partial \geq k$. Besides, we denote $P_n$ the counting sequence for the rooted plane trees with $n$ nodes. Equation (\ref{eq: generating_plane}) gives that $P_n = C_{n-1}$. This formula follows applying Lagrange's inversion formula to equation (\ref{eq: Lagrange-plane}). Finally, applying Theorem \ref{thm: mainthmprobabilistic} to the family of rooted plane trees, we find the following result \begin{theo} For any integer $k \geq 0$, and as $n \rightarrow \infty$, the proportion that trees in $\mathcal{G}_n^{(k)}$ do occupy in $\mathcal{G}_n$ is given by the limit \begin{align*} \lim_{n \rightarrow \infty}\frac{\, \, \, \, P_n^{(k)}}{P_n} = \frac{\partial G_k}{\partial w}(1/4,1/2) = 9\frac{4^k}{(2+4^k)^2}\,. \end{align*} \end{theo} This result also appears in \cite{Protection-Gisang}. \subsection{Rooted binary plane trees} Now we study the rooted binary plane trees, these are rooted plane trees where each node can only have 0 or 2 descendants. We can count these trees using Lagrange's equation with data $\psi(z) = 1+z^2$. The generating function for the rooted binary plane trees is \begin{align*} B(z) = \sum_{n = 0}^{\infty}C_{n}z^{2n+1}, \quad \text{ for all } |z| \leq 1/2, \end{align*} where $C_n$ denotes the $n$th Catalan number. The generating function for the rooted binary trees with $\partial \geq k$ is \begin{align*} B_k(z) = G_k(z,B(z)) = \sum_{n = 1}^{\infty}B_n^{(k)}z^{2n+1}, \quad |z| \leq 1/2. \end{align*} Now we write \begin{align*} G(z,w) = zw^2, \quad \text{ for all } |z| \leq 1/2, |w| \leq 1. \end{align*} We can find a closed expression for the iterate $G_k(z,w)$. This is the content of the following lemma. \begin{lem} For any $k \geq 0$, we have \begin{align*} G_k(z,w) = z^{2^k-1}w^{2^k}, \quad \text{ for all } |z| \leq 1/2, |w| \leq 1. \end{align*} \end{lem} Using the previous Lemma we conclude that the partial derivatives of $G_k(z,w)$ are given by \begin{align}\label{partial-binary} \frac{\partial G_{k}}{\partial z}(z,w) = (2^{k}-1)z^{2^{k}-1}w^{2^{k}}, \quad \frac{\partial G_{k}}{\partial w}(z,w) = 2^{k}z^{2^{k}-1}w^{2^{k}-1}. \end{align} In this particular case we also have an explicit formula for the generating function of the rooted binary plane trees with $\partial_{root} \geq k$. \begin{coro} For $k \geq 0$ we have \begin{align}\label{generating-k-binary} B_k(z) = G_k(z,B(z)) = z^{2^{k}-1}B(z)^{2^{k}}, \quad \text{ for all } \text{\vspace{.1 cm} } |z| \leq 1/2. \end{align} \end{coro} Using this representation, we can give an exact formula for the coefficients of $B_k(z)$. Later on, using this concrete formula, we give an asymptotic formula for the coefficients $B_n^{(k)}$. For any $n = 2m+1$, for $m$ large enough, we have \begin{align}\label{eq: asymptotic A_n^{(k)}-binaryplane} \begin{split} \textsc{coeff}_{[2m+1]}\left[B_k(z)\right] &= \textsc{coeff}_{[2m-2^k+2]}\left[B(z)^{2^k}\right] \\ &= \frac{2^k}{2m-2^k+2}\textsc{coeff}_{[2m-2^{k+2}+2]}\left[(1+z^2)^{2m-2^k+2}\right] \\ &= \frac{2^k}{2m-2^k+2}{2m-2^k+2 \choose m-2^{k}+1} \sim 2^{k-2^{k}+1} \frac{4^{m}}{\sqrt{\pi}m^{3/2}}, \quad \text{ as } m \rightarrow \infty. \end{split} \end{align} The asymptotic formula (\ref{eq:formula OtterMeinMoon}) gives that \begin{align}\label{eq: asymptotic A_n-binaryplane} \textsc{coeff}_{[2m+1]}\left[B(z)\right] = C_{m} = \frac{1}{m}{2m \choose m} \sim \frac{4^{m}}{\sqrt{\pi}m^{3/2}}, \quad \text{ as } m \rightarrow \infty. \end{align} The following Theorem follows combining formulas (\ref{eq: asymptotic A_n^{(k)}-binaryplane}) and (\ref{eq: asymptotic A_n-binaryplane}). This result is also a direct consequence of Theorem \ref{thm: mainthmprobabilistic}. \begin{theor} For any integer $k \geq 0$ and $n \geq 1$ odd we have \begin{align*} \lim_{\substack{n \to \infty; \\ n\in \mathbb{N}_{1,2}}}\frac{\, \, \, \, B_n^{(k)}}{B_n} = \frac{\partial G_k}{\partial w}(1/2,1) = 2^{k-2^k+1}\,. \end{align*} \end{theor} \section{Mean number of nodes with $\partial_v \geq k$ in a Cayley tree}\label{sec:non-rooted} For the family of unrooted labeled Cayley trees, and following a circle of ideas appearing in \cite{Ara-Fer}, we study the mean number of nodes which are at distance to the border bigger or equal than $k$, as the number of nodes $n \rightarrow \infty$. Recapitulating, for a given tree $T$, and for any integer $k \geq 0$, a node $v \in \text{nodes}(T)$ is at distance to the border bigger or equal than $k$ if $$\partial_{v} \triangleq \min_{u \in \text{leaves}(T)}d(u,v) \geq k.$$ We denote by $\mathcal{U}_n$ the class of unrooted labeled Cayley trees. The nodes of each of the trees in this family can be distinguished by its labels, therefore, for each tree in $\mathcal{U}_n$ there are $n$ rooted labeled Cayley trees. In particular, the number of unrooted labeled Cayley trees is given by the sequence $U_n = n^{n-2}$. Let $X_{n,k}$ be the random variable that, for any tree $T \in \mathcal{U}_n$, counts the number of nodes $v$ with distance to the border $\partial_{v} \geq k$, then we have \begin{align*} X_{n,k}(T) = \sum_{u \in \text{nodes}(T)}\textbf{1}_{\{\partial_{u} \geq k\}}. \end{align*} Following \cite{Ara-Fer}, we consider a 0-1 matrix $M$ of size $n \times n^{n-2}$. We label the columns of this matrix with the collection of trees in $\mathcal{U}_n = \{t_1,t_2,\dots\}$, and then the rows with the labels of each node, that is, $\{1,2,\dots,n\}$. We write $1$ in the $(j,t_j)$ entry if the node $j$ is at distance to the border bigger or equal than $k$ and 0 otherwise. By adding all the entries of $M$ and then dividing by $n^{n-1}$ we obtain the mean value of the random variable $X_{n,k}$, that is, \begin{align*} \mathbb{E}_n(X_{n,k}) = \frac{1}{n^{n-1}}\sum_{T \in \, \mathcal{U}_n}X_{n,k}(T). \end{align*} For any integer $k \geq 0$, and using the notation \begin{align*} c_k = \lim_{n \rightarrow \infty}t_n^{(k)}/t_n\,, \end{align*} we obtain the following result. \begin{theor}\label{thm:labeled_meannumber} As $n \rightarrow \infty$, the expectation of the proportion of nodes in a labeled Cayley tree with $n$ nodes that are at distance bigger or equal than $k$ from any leaf tends to $c_k$, that is, \begin{align*} \lim_{n \rightarrow \infty}\frac{1}{n}\mathbb{E}_n(X_{n,k}) = c_k\,. \end{align*} Equivalently, \begin{align*} \mathbb{E}_n(X_{n,k}) \sim c_k \cdot n, \quad \text{ as } n \rightarrow \infty\,. \end{align*} \end{theor} This same result is true for any subclass of rooted labeled Cayley trees with exponential generating function given by Lagrange's equation with data $\psi(z) = \sum_{j \geq 0}(b_j/j!)z^j \in \K^{\star}$, where $b_j \in \{0,1\}$, for all $j \in \{0,1,\dots\}$, and total size $n \in \mathbb{N}_{1,Q_{\psi}}$. Finally, for the unrooted labeled unary Cayley trees, that is, the family of trees given by $\psi(z) = 1+z \in \K$, we have \begin{align*} \mathbb{E}_n(X_{n,k}) \sim n, \quad \text{ as } n \rightarrow \infty\,. \end{align*} Some particular cases of Theorem \ref{thm:labeled_meannumber} appear in \cite{Ara-Fer}, there the authors prove the previous result for nodes $v$ with $\partial_{v} \geq 3$, see Theorem 3.7 in \cite[p. 314]{Ara-Fer}. \begin{thebibliography}{C-J-S} \bibitem{Ara-Fer} \textsc{Aramayona. J, Fern\'andez. J. L., Fern\'andez. P, Mart\'inez-P\'erez. C.}: Trees, homology, and automorphism groups of RAAGs. \textit{ J. Algebr. Comb.}, {\bf 50}, 293--315 (2019). \url{https://doi.org/10.1007/s10801-018-0854-y} \bibitem{Protection-Bona} \textsc{Bóna, M.}: {$k$-protected vertices in binary search trees}. \textit{Adv. in App. Math.} {\bf 53}, 1--11 (2014). \url{https://doi.org/10.1016/j.aam.2013.09.003} \bibitem{K_uno} \textsc{Cant\'on, A., Fern\'andez, J. L., Fern\'andez, P. and Maci\'a, V.}: Khinchin families and Hayman class, \textit{Computational Methods and Function Theory}, {\bf 21}, 851--904 (2021). \url{https://doi.org/10.1007/s40315-021-00420-6} \bibitem{K_exponentials}Cant\'on A., Fern\'andez J.\,L., Fern\'andez P. and Maci\'a V.: \href{https://doi.org/10.1007/s00009-023-02579-9}{Khinchin families, set constructions, partitions and exponentials}. \emph{Mediterr. J. Math.} \textbf{21} (2024), article no.~39, 28~pp. \bibitem{CFFM3} {Cant\'on A., Fern\'andez J.L., Fern\'andez P., Maci\'a V.}: \href{https://arxiv.org/abs/2401.14473}{Growth of power series with nonnegative coefficients, and moments of power series distributions.} \textit{Accepted - Publicacions Matemàtiques}. \bibitem{Protection-Gisang} \textsc{Cheon, G. and Shapiro, L.}: {Protected points in ordered trees}. \textit{Appl. Math. Lett.}, {\bf 21}, 516-520 (2008). \url{http://dx.doi.org/10.1016/j.aml.2007.07.001} \bibitem{de Bruijn} \textsc{de Bruijn, N.G, Knuth, D.E, Rice, S.O.}: The average height of planted plane trees. \textit{Graph theory and Computing}, (pp. 15 - 22). London Academic Press, January 1972. \url{https://doi.org/10.1016/B978-1-4832-3187-7.50007-6} \bibitem{K_dos} \textsc{Fern\'andez, J. L. and Maci\'a, V.}: Khinchin families and Large Powers, \texttt{arXiv:2201.11746}. \bibitem{FlajoletOdlyzko} \textsc{Flajolet. P, Gao. Z, Odlyzco. A, Richmond. B.}: The distribution of Heights of Binary Trees and Other Simple Trees \textit{Combinatorics, Probability and Computing}, {\bf 2}, 145-- 156), (1993). \url{https://doi.org/10.1017/S0963548300000560} \bibitem{FlaOdl} \textsc{Flajolet, P, Odlyzko, A.}: The Average Height of Binary Trees and Other Simple Trees. \textit{Journal of Computer and System Sciences}, {\bf 25}, 171--213 (1982). \url{https://doi.org/10.1016/0022-0000(82)90004-6} \bibitem{Flajolet} \textsc{Flajolet, P. and Sedgewick, R.}: \textit{Analytic combinatorics}. Cambridge University Press, 2009. \url{https://doi.org/10.1017/CBO9780511801655} \bibitem{Protection1-mean} \textsc{Gittenberger, B. Glebiewski, Z. Larcher, I. and Sulkowska, M.}: {Protection numbers in simply generated trees and Po\'lya trees}. \textit{Applicable Analysis and Discrete Mathematics} (2021). \url{http://dx.doi.org/10.2298/AADM190329010G} \bibitem{Protection-Clemens} \textsc{Heuberger, C. and Prodinger, H.}: {Protection number in plane trees.} \textit{Applicable Analysis and Discrete Mathematics} (2017). \url{https://doi.org/10.2298/AADM1702314H} \bibitem{Sokal} \textsc{Sokal, Alan D.} {A ridiculously simple and explicit implicit function theorem} \textit{Séminaire Lotharingien de Combinatoire} 61A (2009): B61Ad, electronic only. \url{http://eudml.org/doc/222589}. \bibitem{MeirMoonOld} \textsc{Meir, A., Moon, J.W.}: On the Altitude of Nodes in Random Trees, \textit{Can. J. Math.}, { 30} (1978), 997--1015. \url{https://doi.org/10.4153/CJM-1978-085-0} \bibitem{MeirMoonChar} \textsc{Meir, A., Moon, J.W.}: Some asymptotic results useful in enumeration problems. \textit{Aeq. Math.}, { 33}, 260--268 (1987). \url{https://doi.org/10.1007/BF01836167} \bibitem{MeirMoon} \textsc{Meir, A., Moon, J. W.}: Packing and Covering Constants for Certain Families of Trees I. \textit{Journal of Graph Theory}, {\bf 1}, 157--174 (1977). \url{https://doi.org/10.1002/jgt.3190010211} \bibitem{Neveu} \textsc{Neveu, J.}: Arbres et processus de Galton-Watson. \textit{Annales de l'I.H.P. Probabilités et statistiques}, {\bf 22}, 199-207 (1986). \url{http://www.numdam.org/item/?id=AIHPB_1986__22_2_199_0} \bibitem{Otter} \textsc{Otter, R.}: The Multiplicative Process. \textit{The Annals of Mathematical Statistics}, {\bf 20} no. 2, 206--224 (1949). \url{https://doi.org/10.1214/aoms/1177730031} \bibitem{Pitman} \textsc{Pitman, J.}: Enumerations Of Trees And Forests Related To Branching Processes And Random Walks. \textit{Microsurveys in Discrete Probability: DIMACS Workshop} (1997). \url{https://statistics.berkeley.edu/tech-reports/482} \bibitem{Pitman2} \textsc{Pitman, J.}: Combinatorial stochastic processes. {Lectures from the 32nd Summer School on Probability Theory held in Saint-Flour.} \textit{Springer-Verlag} (2006). \url{https://doi.org/10.1007 \bibitem{PolyaSzego} \textsc{Polya, G. Szegö, G.}: Problems and Theorems in Analysis I, \textit{Springer, Berlin, Heidelberg}, (1999). \url{https://doi.org/10.1007/978-3-642-61983-0} \bibitem{Renyi} \textsc{R\'enyi. A, Szekeres. G.}: On the height of trees. \textit{Journal of the Australian Mathematical Society}, (pp. 497 - 507), November 1967. \url{https://doi.org/10.1017/S1446788700004432} \bibitem{Williams} \textsc{Williams, D.}: Probability with Martingales. \textit{Cambridge University Press.} (1991). \url{doi:10.1017/CBO9780511813658} \end{thebibliography} \end{document}
2205.10644v1
http://arxiv.org/abs/2205.10644v1
Unification types and union splittings in intermediate logics
\documentclass[twoside]{report} \usepackage{amssymb, l} \usepackage[arrow,matrix,tips,curve] {xy} \input amssym.def \usepackage{float} \Title{ Unification types and union splittings in intermediate logics} \ShortAuthor{W. Dzik, S.Kost and P. Wojtylak} \LongAuthor{ \author{WOJCIECH DZIK} \address{Institute of Mathematics, Silesian University, Bankowa 14, Katowice 40-007, Poland; [email protected]} \author{S{\L}AWOMIR KOST} \address{Institute of Computer Science, University of Opole, Oleska 48, Opole 45-052, Poland; [email protected]} \author{PIOTR WOJTYLAK} \address{Institute of Computer Science, University of Opole, Oleska 48, Opole 45-052, Poland; ; [email protected]} } \begin{document} \begin{paper} \begin{abstract} Following a characterization \cite{dkw} of locally tabular logics with finitary (or unitary) unification by their Kripke models we determine the unification types of some intermediate logics (extensions of {\sf INT}). There are exactly four maximal logics with nullary unification ${\mathsf L}(\mathfrak R_{2}+)$, \ ${\mathsf L}(\mathfrak R_{2})\cap{\mathsf L}(\mathfrak F_{2})$, \ ${\mathsf L}(\mathfrak G_{3})$ \ and \ ${\mathsf L}(\mathfrak G_{3}+)$ and they are tabular. There are only two minimal logics with hereditary finitary unification: {\sf L}($\mathbf F_{un}$), the least logic with hereditary unitary unification, and {\sf L}( $\mathbf F_{pr}$) the least logic with hereditary projective approximation; they are locally tabular. Unitary and non-projective logics need additional variables for mgu's of some unifiable formulas, and unitary logics with projective approximation are exactly projective. None of locally tabular intermediate logics has infinitary unification. Logics with finitary, but not hereditary finitary, unification are rare and scattered among the majority of those with nullary unification, see the example of $\mathsf H_3\mathsf B_2$ and its extensions. \end{abstract} \Keywords{unification types, intermediate logics, locally tabular logics, Kripke models.} \section{Introduction.}\label{Intro} Unification, in general, is concerned with finding a substitution that makes two terms equal. Unification in logic is the study of substitutions under which a formula becomes provable in a a given logic {\sf L}. In this case the substitutions are called the unifiers of the formula in {\sf L} ({\sf L}-unifiers). If an {\sf L}-unifier for a formula $A$ exists, $A$ is called unifiable in {\sf L}. An {\sf L}-unifier $\sigma$ for $A$ can be more general than the other {\sf L}-unifier $\tau$, in symbols $\sigma \preccurlyeq \tau$; the pre-order $\preccurlyeq$ of substitutions gives rise to four unification types: $1$, $\omega$, $\infty$, and $0$, from the ''best'' to the ''worst'', see \cite{BaSny,BaGhi}. Unification is unitary, or it has the type $1$, if there is a most general unifier (mgu) for every unifiable formula. Unification is finitary or infinitary if, for every unifiable formula, there is a (finite or infinite) basis of unifiers. Nullary unification means that no such basis of unifiers exists at all. Silvio Ghilardi introduced unification in propositional (intuitionistic \cite{Ghi2} and modal \cite{Ghi3}) logic. In \cite{Ghi2} he showed that unification in {\sf INT} is finitary, but in {\sf KC} it is unitary and any intermediate logic with unitary unification contains {\sf KC}. Dzik \cite{dzSpl} uses the particular splitting of the lattice of intermediate logics by the pair ({\sf L}($\mathfrak{F}_{2}$),{\sf KC}), where {\sf L}($\mathfrak{F}_{2}$) is the logic determined by the `2-fork frame' $\mathfrak {F}_{2}$ depicted in Figure \ref{8fames}, to give location of logics with finitary but not unitary unification: they all are included in {\sf L}($\mathfrak{F}_{2})$. In Wro$\acute{\rm n}$ski \cite{Wro1,Wro2}, see also \cite{dw1}, it is shown that unification in any intermediate logic {\sf L} is projective iff {\sf L} is an extension of {\sf LC} (that is it is one of G\"{o}del-Dummett logics); projective implies unitary unification. In Ghilardi \cite{Ghi5} first examples of intermediate logics with nullary unification are given. Iemhoff \cite{IemRoz} contains a proof-theoretic account of unification in fragments of intuitionistic logics. Many papers concern unification in modal logics, see e.g. \cite{Ghi3,Jer,Balb1,dw2,Kost}, and also in intuitionistic predicate logic, see \cite{dw4}. No (modal or intermediate) logic with infinitary unification has been found so far and it is expected that no such logic exists. Generally, similar results on unification types in transitive modal logics and corresponding intermediate logics are given in \cite{dkw}. In \cite{Ghi5} Ghilardi studied unification in intermediate logics of finite slices (or finite depths). He applied his method, based on Category Theory, of finitely presented projective objects (see \cite{Ghi1}) and duality, and characterized injective objects in finite posets. He gave some positive and negative criteria for unification to be finitary. From these criteria it follows, for instance, that bounded depth axioms $\mathsf{H_n }$ plus bounded width axioms $\mathsf{B_k }$ keep unification finitary. It also follows that there are logics without finitary unification.\footnote{Ghilardi's original notation of frames, as well as our notation of frames in \cite{dkw}, was quite different. All frames depicted in this paper represent finite po-sets.} He considered, among others, the following frames: \begin{figure}[H] \unitlength1cm \begin{picture}(0,2.2) \thicklines \put(0,0.5){$\mathfrak{ G}_1:$} \put(2,0){\vector(-1,1){0.9}} \put(2,0){\vector(1,1){0.9}} \put(1,1){\vector(-1,1){0.9}} \put(1,1){\vector(0,1){0.9}} \put(3,1){\vector(1,1){0.9}} \put(3,1){\vector(0,1){0.9}} \put(0,2){\circle{0.1}} \put(1,2){\circle{0.1}} \put(3,2){\circle{0.1}} \put(4,2){\circle{0.1}} \put(1,1){\circle{0.1}} \put(3,1){\circle{0.1}} \put(2,0){\circle{0.1}} \put(3.5,0.5){$\mathfrak{G }_2:$} \put(5.5,0){\vector(-1,1){0.9}} \put(5.5,0){\vector(1,1){0.9}} \put(6.5,1){\vector(0,1){0.9}} \put(6.5,1){\vector(-1,1){0.9}} \put(4.5,1){\circle{0.1}} \put(6.5,2){\circle{0.1}} \put(5.5,2){\circle{0.1}} \put(5.5,0){\circle{0.1}} \put(6.5,1){\circle{0.1}} \put(7,0.5){$\mathfrak{G}_3:$} \put(8.5,0){\vector(-1,1){0.9}} \put(8.5,0){\vector(1,1){0.9}} \put(9.5,1){\vector(0,1){0.9}} \put(7.5,1){\circle{0.1}} \put(9.5,2){\circle{0.1}} \put(8.5,0){\circle{0.1}} \put(9.5,1){\circle{0.1}} \put(10,0.5){${\mathfrak{G}_{3}}+:$} \put(12,3){\circle{0.1}} \put(13,2){\circle{0.1}} \put(11,1){\circle{0.1}} \put(13,1){\circle{0.1}} \put(12,0){\circle{0.1}} \put(13,2){\vector(-1,1){0.9}} \put(13,1){\vector(0,1){0.9}} \put(11,1){\vector(1,2){0.9}} \put(12,0){\vector(1,1){0.9}} \put(12,0){\vector(-1,1){0.9}} \end{picture}\\ \caption{Ghilardi's Frames} \label{GF} \end{figure} \noindent Since $\mathsf L(\mathfrak{G}_1)$, the logic of $\mathfrak{G}_1$, coincides with $\mathsf{H}_3\mathsf{B}_2$, it has finitary unification by \cite{Ghi5}. Theorem 9, p.112 of \cite{Ghi5}) says that, if $\mathfrak{G}_3$ is a frame of any intermediate logic with finitary unification, then $\mathfrak{G}_2$ is a frame of this logic, as well. It means, in particular, that $\mathsf L(\mathfrak{G}_3)$ has not finitary unification. (the unification type of $\mathsf L(\mathfrak{G}_2)$ and $\mathsf L(\mathfrak{G}_3)$ was not determined). Ghilardi announced that `attaching a final point everywhere' provide examples in which unification is nullary. Thus, $\mathsf L({\mathfrak{G}_3}+)$ has nullary unification.\footnote{The frame received from $\mathfrak{F}$, by adding a top (=final) element is denoted by ${\mathfrak F}+$.} He also showed that replacing one of maximal elements in $\mathfrak{G}_3$ with any finite (rooted) po-set $\mathfrak P$, gives a frame of logic without finitary unification, see Figure \ref{NU}. \begin{figure}[H] \unitlength1cm \begin{picture}(0,2) \thicklines \put(4,0.5){$\mathfrak{G}_{3\mathfrak P}:$} \put(6.5,0){\vector(-1,1){0.9}} \put(6.5,0){\vector(1,1){0.9}} \put(7.5,1){\vector(0,1){0.9}} \put(5.4,1.1){$\mathfrak P$} \put(7.5,2){\circle{0.1}} \put(6.5,0){\circle{0.1}} \put(7.5,1){\circle{0.1}} \put(5.5,1.2){\circle{0.7}} \end{picture}\\ \caption{Frames of Logics with Nullary Unification} \label{NU} \end{figure} Hence, there are infinitely many intermediate logics without finitary (by \cite{dkw}: with nullary) unification. In \cite{dkw} we gave necessary and sufficient conditions for finitary (or unitary) unification in locally tabular logics solely in terms of mappings between (bounded) Kripke models. Our approach was entirely different from that in \cite{Ghi5}. A simpler variant of the conditions characterizes logics with projective approximation. Then we applied the conditions to determine the unification types of logics (intermediate or modal) given by relatively simple frames. In particular, we studied tabular modal and intermediate logics determined by the frames in Figure \ref{8fames}. \begin{figure}[H] \unitlength1cm \begin{picture}(3,2) \thicklines \put(0,0.5){$\mathfrak L_1:$} \put(1,0){\circle{0.1}} \put(2.5,0.5){$\mathfrak L_2:$} \put(3.5,0){\circle{0.1}} \put(3.5,0){\line(0,1){0.9}} \put(3.5,1){\circle{0.1}} \put(3.5,0){\vector(0,1){0.9}} \put(5,0.5){$\mathfrak L_3:$} \put(6,0){\vector(0,1){0.9}} \put(6,1){\vector(0,1){0.9}} \put(6,1){\circle{0.1}} \put(6,2){\circle{0.1}} \put(6,0){\circle{0.1}} \put(7,0.5){$\mathfrak{F}_{2}:$} \put(8,1){\circle{0.1}} \put(9,0){\circle{0.1}} \put(10,1){\circle{0.1}} \put(9,0){\vector(1,1){0.9}} \put(9,0){\vector(-1,1){0.9}} \put(10.5,0.5){${\mathfrak{R}_{2}}:$} \put(12,0){\vector(-1,1){0.9}} \put(12,0){\vector(1,1){0.9}} \put(13,1){\vector(-1,1){0.9}} \put(11,1){\circle{0.1}} \put(12,2){\circle{0.1}} \put(12,0){\circle{0.1}} \put(13,1){\circle{0.1}} \put(11,1){\vector(1,1){0.9}} \end{picture}\\ \unitlength1cm \begin{picture}(5,3) \thicklines \put(0,0.5){$\mathfrak{G}_3:$} \put(2,0){\vector(-1,1){0.9}} \put(2,0){\vector(1,1){0.9}} \put(3,1){\vector(0,1){0.9}} \put(1,1){\circle{0.1}} \put(3.1,2){\circle{0.1}} \put(2,0){\circle{0.1}} \put(3,1){\circle{0.1}} \put(3.5,0.5){${\mathfrak{G}_{3}}+:$} \put(5.5,3){\circle{0.1}} \put(6.5,2){\circle{0.1}} \put(4.5,1){\circle{0.1}} \put(6.5,1){\circle{0.1}} \put(5.5,0){\circle{0.1}} \put(6.5,2){\vector(-1,1){0.9}} \put(6.5,1){\vector(0,1){0.9}} \put(4.5,1){\vector(1,2){0.9}} \put(5.5,0){\vector(1,1){0.9}} \put(5.5,0){\vector(-1,1){0.9}} \put(7.2,0.5){$\mathfrak{F}_{3}:$} \put(8,1){\circle{0.1}} \put(9,0){\circle{0.1}} \put(10,1){\circle{0.1}} \put(9,1){\circle{0.1}} \put(9,0){\vector(1,1){0.9}} \put(9,0){\vector(-1,1){0.9}} \put(9,0){\vector(0,1){0.9}} \put(10.4,0.2){${\mathfrak{R}_{3}}:$} \put(11,1){\circle{0.1}} \put(12,0){\circle{0.1}} \put(12,2){\circle{0.1}} \put(13,1){\circle{0.1}} \put(12,1){\circle{0.1}} \put(12,0){\vector(1,1){0.9}} \put(12,0){\vector(-1,1){0.9}} \put(12,0){\vector(0,1){0.9}} \put(11,1){\vector(1,1){0.9}} \put(12,1){\vector(0,1){0.9}} \put(13,1){\vector(-1,1){0.9}} \end{picture}\\ \caption{Frames of \cite{dkw}} \label{8fames} \end{figure} \noindent We proved that unification in the modal (as well as intermediate) logics of the frames $\mathfrak L_1, \mathfrak L_2, \mathfrak L_3,{\mathfrak{R}_{2}}$ and ${\mathfrak{R}_{3}}$ is unitary, in (the logic of) $\mathfrak{F}_{2}$ and $\mathfrak{F}_{3}$ it is finitary and in $\mathfrak{G}_3$ and $\mathfrak{G}_{3}+$ it is nullary. We have also considered $n$-forks ${\mathfrak{F}_{n}}$ and $n$-rhombuses ${\mathfrak{R}_{n}}$, for any $n\geq 2$, see Figure \ref{FRF}. We showed that the logic of any fork (including the infinite `fork frame' ${\mathfrak{F}_{\infty}}$) has projective approximation, and hance it has finitary unification. The logic of any rhombus (including ${\mathfrak{R}_{\infty}}$) has unitary unification. \begin{figure}[H] \unitlength1cm \begin{picture}(3,2) \thicklines \put(2,0){${\mathfrak{F}_{n}}:$} \put(2,1){\circle{0.1}} \put(5,1){\circle{0.1}} \put(4,1){\circle{0.1}} \put(6,1){\circle{0.1}} \put(3,1){\circle{0.1}} \put(4,0){\vector(1,1){0.9}} \put(4,0){\vector(-1,1){0.9}} \put(4,0){\vector(0,1){0.9}} \put(4,0){\vector(2,1){1.9}} \put(4,0){\vector(-2,1){1.9}} \put(1,1){\circle{0.1}} \put(4,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(1.5,1){\circle{0.1}} \put(7,1){\circle{0.1}} \put(6.5,1){\circle{0.1}} \put(7.5,1){\circle{0.1}} \put(7,0){$\mathfrak{R}_n={\mathfrak{F}_{n}}+:$} \put(8,1){\circle{0.1}} \put(11,1){\circle{0.1}} \put(10,1){\circle{0.1}} \put(12,1){\circle{0.1}} \put(9,1){\circle{0.1}} \put(10,0){\vector(1,1){0.9}} \put(10,0){\vector(-1,1){0.9}} \put(10,0){\vector(0,1){0.9}} \put(10,0){\vector(2,1){1.9}} \put(10,0){\vector(-2,1){1.9}} \put(10,0){\circle{0.1}} \put(12.5,1){\circle{0.1}} \put(13,1){\circle{0.1}} \put(10,2){\circle{0.1}} \put(9,1){\vector(1,1){0.9}} \put(11,1){\vector(-1,1){0.9}} \put(10,1){\vector(0,1){0.9}} \put(8,1){\vector(2,1){1.9}} \put(12,1){\vector(-2,1){1.9}} \end{picture} \caption{$n$-Fork and $n$-Rhombus Frames, for $n\geq 1$.}\label{FRF} \end{figure} \noindent Still many questions about unification of intermediate logics and location of particular types remained open. Here is a summary of the results in the present paper.\\ 1) We give another proof that our conditions (see Theorem \ref{main}) are necessary and sufficient for finitary\slash unitary unification, as well as for projective approximation (Theorem \ref{retraction}) in locally tabular intermediate logics. Variants of the frames in Figure \ref{8fames} are considered and we determine the unification types of their logics. In particular, we prove that unification in $\mathsf L(\mathfrak{G}_2)$ is finitary and though (we know that) it is also finitary in $\mathsf L(\mathfrak{F}_{3})$, it is nullary in their intersection $\mathsf L(\mathfrak{G}_2)\cap\mathsf L(\mathfrak{F}_{3})$ .\\ 2) It turns out that intermediate logics with unitary unification are either projective (hence they are extensions of {\sf LC}) or they need new variables for mgu's of some unifiable formulas. It means that any (non-projective) logic with unitary unification has a unifiable formula $A(x_1,\dots,x_n)$ which do not have any mgu in $n$-variables (but its mgu's must introduce additional variables -- like in filtering unification). The same result for transitive modal logics is proved in \cite{dkw}.\\ 3) We prove that a locally tabular intermediate logic with infinitary unification does not exist and we think that no intermediate logic has infinitary unification.\\ 4) We claim (and give some evidences) that 'most of' intermediate logics have nullary unification. For instance, logics of the following frames are nullary: \begin{figure}[H] \unitlength1cm \thicklines \begin{picture}(0,3) \put(0,0){$\mathfrak Y_{1}:$} \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(1,1){0.9}} \put(2,1){\vector(-1,1){0.9}} \put(0,1){\circle{0.1}} \put(1,2){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(0,1){\vector(1,1){0.9}} \put(1,3){\circle{0.1}} \put(1,2){\vector(0,1){0.9}} \put(2.5,0){$\mathfrak{Y}_2$:} \put(3.5,0){\vector(-1,1){0.9}} \put(3.5,0){\vector(1,1){0.9}} \put(4.5,1){\vector(1,1){0.9}} \put(2.5,1){\circle{0.1}} \put(3.5,2){\circle{0.1}} \put(3.5,0){\circle{0.1}} \put(4.5,1){\circle{0.1}} \put(5.5,2){\circle{0.1}} \put(2.5,1){\vector(1,1){0.9}} \put(4.5,1){\vector(-1,1){0.9}} \put(4.8,0){$\mathfrak{Y}_2+$:} \put(6.3,0){\vector(-1,1){0.9}} \put(6.3,0){\vector(1,1){0.9}} \put(7.3,1){\vector(1,1){0.9}} \put(5.3,1){\circle{0.1}} \put(6.3,2){\circle{0.1}} \put(6.3,0){\circle{0.1}} \put(7.3,1){\circle{0.1}} \put(8.3,2){\circle{0.1}} \put(5.3,1){\vector(1,1){0.9}} \put(7.3,1){\vector(-1,1){0.9}} \put(7.3,3){\circle{0.1}} \put(6.3,2){\vector(1,1){0.9}} \put(8.3,2){\vector(-1,1){0.9}} \put(8.2,0){$\mathfrak{Y}_3$:} \put(8.7,2){\circle{0.1}} \put(10.7,2){\circle{0.1}} \put(8.7,1){\circle{0.1}} \put(10.7,1){\circle{0.1}} \put(9.7,0){\circle{0.1}} \put(8.7,1){\vector(0,1){0.9}} \put(10.7,1){\vector(0,1){0.9}} \put(8.7,1){\vector(2,1){1.9}} \put(10.7,1){\vector(-2,1){1.9}} \put(9.7,0){\vector(1,1){0.9}} \put(9.7,0){\vector(-1,1){0.9}} \put(10.5,0){${\mathfrak{Y}_3}+$:} \put(11,2){\circle{0.1}} \put(13,2){\circle{0.1}} \put(11,1){\circle{0.1}} \put(13,1){\circle{0.1}} \put(12,0){\circle{0.1}} \put(12,3){\circle{0.1}} \put(11,1){\vector(0,1){0.9}} \put(13,1){\vector(0,1){0.9}} \put(11,1){\vector(2,1){1.9}} \put(13,1){\vector(-2,1){1.9}} \put(12,0){\vector(1,1){0.9}} \put(12,0){\vector(-1,1){0.9}} \put(11,2){\vector(1,1){0.9}} \put(13,2){\vector(-1,1){0.9}} \end{picture} \caption{Frames of Logics with Nullary Unification}\label{MNU} \end{figure} Intermediate logics with nullary unification can be found 'almost everywhere'. Extensions of finitary\slash unitary logics may have nullary unification, intersections of finitary logics may be nullary. We cannot put apart logics with finitary\slash unitary unification from those with the nullary one. 5) In structurally complete logics \footnote{We consider rules $r\!\!:\!\!{A}\slash{B}$, where $A, B$ play the role of formula schemata, i.e. $r$ enables us to derive $\varepsilon(B)$ from $\varepsilon(A)$, for any substitution $\varepsilon$. The rule is said to be {\it admissible} in an intermediate logic {\sf L} (or {\sf L}-admissible), if $\vdash_{\sf L} \varepsilon(A)$ implies $\vdash_{\sf L} \varepsilon(B)$, for any substitution $\varepsilon$, that is any {\sf L}-unifier for $A$ must be an {\sf L}-unifier for $B$. The rule is {\it {\sf L}-derivable} if $A\vdash_{\sf L}B$. A logic {\sf L} is {\it structurally complete} if every its admissible rule is derivable (the reverse inclusion always holds). {\it Hereditary structural completeness} of {\sf L} means that any extension of {\sf L} is structurally complete.} the situation is somehow similar. A.Citkin (see Tzitkin \cite{Tsitkin}) characterized hereditary structurally complete logics (instead of structurally complete) and showed that a logic {\sf L} is hereditary structurally complete iff {\sf L} omits (i.e. {\sf L} is falsified in) the following frames: \begin{figure}[H] \unitlength1cm \thicklines \begin{picture}(0,2.2) \put(0,0){$\mathfrak C_{1}:$} \put(0,1){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(1,1){\circle{0.1}} \put(1,0){\vector(1,1){0.9}} \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(0,1){0.9}} \put(2.5,0){$\mathfrak C_{2}:$} \put(2.5,1){\circle{0.1}} \put(3.5,0){\circle{0.1}} \put(4.5,1){\circle{0.1}} \put(3.5,1){\circle{0.1}} \put(3.5,0){\vector(1,1){0.9}} \put(3.5,0){\vector(-1,1){0.9}} \put(3.5,0){\vector(0,1){0.9}} \put(3.5,2){\circle{0.1}} \put(2.5,1){\vector(1,1){0.9}} \put(3.5,1){\vector(0,1){0.9}} \put(4.5,1){\vector(-1,1){0.9}} \put(5,0){$\mathfrak C_{3}:$} \put(6,0){\vector(-1,1){0.9}} \put(6,0){\vector(1,1){0.9}} \put(7,1){\vector(0,1){0.9}} \put(5,1){\circle{0.1}} \put(7,2){\circle{0.1}} \put(6,0){\circle{0.1}} \put(7,1){\circle{0.1}} \put(7.5,0){$\mathfrak C_{4}:$} \put(8.5,0){\line(-1,1){0.9}} \put(8.5,0){\vector(-1,1){0.9}} \put(8.5,0){\vector(1,1){0.9}} \put(9.5,1){\vector(0,1){0.9}} \put(7.5,1){\circle{0.1}} \put(9.5,2){\circle{0.1}} \put(8.5,0){\circle{0.1}} \put(9.5,1){\circle{0.1}} \put(8.5,3){\circle{0.1}} \put(9.5,2){\vector(-1,1){0.9}} \put(7.5,1){\vector(1,2){0.9}} \put(10.5,0){$\mathfrak C_{5}:$} \put(11.5,0){\vector(-1,1){0.9}} \put(11.5,0){\vector(1,1){0.9}} \put(12.5,1){\vector(-1,1){0.9}} \put(10.5,1){\circle{0.1}} \put(11.5,2){\circle{0.1}} \put(11.5,0){\circle{0.1}} \put(12.5,1){\circle{0.1}} \put(10.5,1){\vector(1,1){0.9}} \put(10.5,2){\circle{0.1}} \put(12.5,2){\circle{0.1}} \put(10.5,1){\vector(0,1){0.9}} \put(12.5,1){\vector(0,1){0.9}} \end{picture} \caption{Citkin's Frames}\label{TF} \end{figure} We consider logics with {\it hereditary finitary unification} that is logics all their extensions have either finitary or unitary unification. We prove that there are exactly four maximal logics with nullary unification: $\mathsf L (\mathfrak Y_1)$, $\mathsf L(\mathfrak R_2)\cap \mathsf L(\mathfrak F_2)$, $\mathsf L(\mathfrak G_3)$ and $ \mathsf L(\mathfrak G_3+)$. Thus, an intermediate logic has hereditary finitary unification if it omits $\mathfrak Y_1$, $\mathfrak G_3$, $\mathfrak G_3+$ and one of the frames $\{\mathfrak R_2,\mathfrak F_2\}$. This characterization is not optimal as, for instance, omitting $\mathfrak F_2$ the logic omits $\mathfrak G_3$; omitting $\mathfrak R_2$ it omits $\mathfrak G_3+$ and $\mathfrak Y_1$. There is no correlation between structural completeness and finitary unification. In particular, since $\mathfrak C_1 = \mathfrak F_3$ the logic of $\mathfrak C_1$ has projective approximation (and therefore it is finitary), since $\mathfrak C_2 = \mathfrak R_3$, $\mathfrak C_2$ is unitary and we will show that the fifth $\mathsf L(\mathfrak C_5)$ is finitary but not hereditary finitary. The remaining frames {$\mathfrak C_{3}$} and {$\mathfrak C_{4}$} coincide with ${\mathfrak{G}_{\sf 3}}$ and ${\mathfrak{G}_{\sf 3}}+$ and their logics have nullary unification.\\ 6) Two additional classes of logics emerge here: logics with {\it hereditary unitary unification} and logics with {\it hereditary projective approximation}. We show that an intermediate logic {\sf L} has hereditary unitary unification iff {\sf L} omits the frames $\mathfrak Y_1$, $\mathfrak F_2$ and $\mathfrak G_3+$. A logic {\sf L} has hereditary projective approximation iff {\sf L} omits the frames $\mathfrak R_2$ and $\mathfrak G_3$. Thus, {\sf L} has hereditary finitary unification iff either {\sf L} has hereditary unitary unification or {\sf L} has hereditary projective characterization. Logics with hereditary projective approximation can be characterized by frames $\mathfrak L_d+\mathfrak F_n$, for any $d,n\geq 0$ (that is forks on chains), whereas logics with hereditary unitary unification by $\mathfrak L_d+\mathfrak R_n$, for any $d,n\geq 0$ (that is rhombuses on chains); see Figure \ref{hpa}. \begin{figure}[H] \unitlength1cm \begin{picture}(3,3.5) \thicklines \put(0,1){$\mathbf{H}_{pa}$:} \put(0,3){\circle{0.1}} \put(3,3){\circle{0.1}} \put(2,3){\circle{0.1}} \put(4,3){\circle{0.1}} \put(1,3){\circle{0.1}} \put(2,3){\circle{0.1}} \put(2,2){\vector(1,1){0.9}} \put(2,2){\vector(-1,1){0.9}} \put(2,2){\vector(0,1){0.9}} \put(2,2){\vector(2,1){1.9}} \put(2,2){\vector(-2,1){1.9}} \put(2,2){\circle{0.1}} \put(2,1.5){\circle{0.1}} \put(2,1){\circle{0.1}} \put(2,1.25){\circle{0.1}} \put(2,1.75){\circle{0.1}} \put(2,0){\vector(0,1){0.9}} \put(2,0){\circle{0.1}} \put(8,1){$\mathbf{H}_{un}$:} \put(8,3){\circle{0.1}} \put(11,3){\circle{0.1}} \put(10,3){\circle{0.1}} \put(12,3){\circle{0.1}} \put(9,3){\circle{0.1}} \put(10,2){\vector(1,1){0.9}} \put(10,2){\vector(-1,1){0.9}} \put(10,2){\vector(0,1){0.9}} \put(10,2){\vector(2,1){1.9}} \put(10,2){\vector(-2,1){1.9}} \put(10,2){\circle{0.1}} \put(10,1.75){\circle{0.1}} \put(10,1.5){\circle{0.1}} \put(10,1.25){\circle{0.1}} \put(10,1){\circle{0.1}} \put(10,0){\circle{0.1}} \put(10,0){\vector(0,1){0.9}} \put(10,1){\circle{0.1}} \put(10,0){\circle{0.1}} \put(10,4){\circle{0.1}} \put(9,3){\vector(1,1){0.9}} \put(11,3){\vector(-1,1){0.9}} \put(10,3){\vector(0,1){0.9}} \put(8,3){\vector(2,1){1.9}} \put(12,3){\vector(-2,1){1.9}} \end{picture} \caption{Frames of Logics with Hereditary Finitary Unification.}\label{hpa} \end{figure} \noindent $\mathsf L(\mathbf{H}_{pa})$ is the least intermediate logic with hereditary projective approximation and $\mathsf L(\mathbf{H}_{un})$ is the least logic with hereditary unitary unification. The logics $\mathsf L(\mathbf{H}_{pa})$ and $\mathsf L(\mathbf{H}_{un})$ are locally tabular and they are (the only) minimal logics with hereditary finitary unification. We have $\mathsf L(\mathsf L(\mathbf{H}_{pa})\cup\mathsf L(\mathbf{H}_{un}))=\mathsf{LC}$ as, it is proved that, any unitary intermediate logic with projective approximation is projective. \section{Basic Concepts.}\label{BC} \subsection{Intermediate Logics.}\label{IL} We consider the standard language of intuitionistic propositional logic $\{\rightarrow,\lor,\land,\bot\}$ where $\leftrightarrow,\neg,\top$ are defined in the usual way. Let $\mathsf{Var}=\{x_1,x_2,\dots\}$ be the set of propositional variables and $\mathsf{Fm}$ be the set of (intuitionistic) formulas, denoted by $A,B,C,\dots$ For any $n\geq 0$, let $\mathsf{Fm^n}$, be the set of formulas in the variables $\{x_1,\dots,x_n\}$, that is $A\in \mathsf{Fm^n}\Leftrightarrow \mathsf{Var}(A)\subseteq\{x_1,\dots,x_n\}\Leftrightarrow A=A(x_1,\dots,x_n).$ Substitutions $\alpha,\beta,\dots$ are finite mappings; for each $\alpha$ there are $k,n\geq 0$ such that $\alpha\colon\{x_1,\dots,x_n\}\to \mathsf{Fm}^k$. The extension of $\alpha$ to an endomorphism of $\mathsf{Fm}$ is also denoted by $\alpha$. Thus, $\alpha(A)$ means the substitution of a formula $A$. Let $\alpha\circ\tau$ be the composition of the substitutions, that is a substitution such that $\alpha\circ\tau(A)=\alpha(\tau(A))$, for any $A$. An {\it intermediate logic} {\sf L} is any set of formulas containing the intuitionistic logic {\sf INT}, closed under the modus ponens rule MP and closed under substitutions.\footnote{Intermediate logics may be regarded as fragments of transitive modal logics (or extensions of {\sf S4}, or {\sf Grz}); the intuitionistic variable $x_i$ is meant as $\Box^+ x_i$ and $A\rightarrow B=\Box^+(\neg A\lor B)$.} All intermediate logics form, under inclusion, a (complete distributive) lattice where inf$\{\mathsf L_i\}_{i\in I}=\bigcap_{i\in I}\mathsf L_i$. Let $\mathsf L(X)$, for any set $X$ of formulas, mean the least intermediate logic containing $X$. Given two intermediate logics {\sf L} and {\sf L'}, we say {\sf L'} is {\it an extension of} {\sf L} if $\mathsf L\subseteq\mathsf L'$. The least intermediate logic is {\sf INT}. Consistent logics are proper subsets of $\mathsf{Fm}$. We will refer to the following list of formulas\slash logics: \begin{figure}[H] $$\begin{array}{ll} \mathsf{ LC}: (x_1\rightarrow x_2)\lor (x_2\rightarrow x_1); \qquad \qquad \mathsf{ KC}: \neg x \lor \neg \neg x;& \\ \mathsf{ SL}: (( \neg \neg x\rightarrow x)\rightarrow (\neg x \lor \neg\neg x)) \rightarrow (\neg x \lor \neg \neg x): &\mathsf{ } \\ \mathsf{PWL}: (x_2\to x_1)\lor\bigl(((x_1\to x_2)\to x_1)\to x_1\bigr);&{}\\ \mathsf{H_n } : \ \mathsf{H}_1 = x_1 \lor \neg x_1,\qquad \mathsf{H}_{n+1} = x_{n+1} \lor (x_{n+1} \rightarrow \mathsf{H}_n); &\mathsf{ }\\ \mathsf{B_n}: \bigwedge_{i=1}^{n+1}\Bigl(\bigl(x_i\rightarrow\bigvee_{j\not=i}x_j\bigr)\rightarrow\bigvee_{j\not=i}x_j\Bigr)\rightarrow \bigvee_{i=1}^{n+1}x_i.&\mathsf{ } \end{array}$$\caption{Intermediate Logics.}\label{ILs} \end{figure} {\sf KC} is called the logic of weak excluded middle or Jankov logic or de~Morgan logic (see \cite{Ghi2}). {\sf SL} is Scott logic and {PWL} is the logic of weak law of Peirce, see \cite{Esakia}. We define the {\it consequence relation} $\vdash_{\mathsf L}$, for any given intermediate logic $\mathsf L$, admitting only the rule $\mathsf{MP}$ in derivations. Then we prove the {\it deduction theorem} $$X,A\vdash_{\mathsf L}B \quad\Leftrightarrow\quad X\vdash_{\mathsf L}A\rightarrow B.\leqno{(DT)}$$ The relation of $\mathsf L-equivalent$ formulas, $$ A=_{\mathsf L} B \qquad \Leftrightarrow\qquad \vdash_{\mathsf L} A\leftrightarrow B,$$ leads to the standard {\it Lindenbaum-Tarski algebra}. The relation $=_{\mathsf L}$ extends to substitutions, $ \varepsilon=_{\mathsf L} \mu$ means that $\varepsilon(A)=_{\mathsf L} \mu(A)$, for each formula $A$. We define a {\it pre-order} (that is a reflexive and transitive relation) on the set of substitutions: $$ \varepsilon\preccurlyeq_{\mathsf L} \mu \qquad \Leftrightarrow \qquad \bigl(\alpha\circ\varepsilon=_{\mathsf L} \mu, \mbox{ for some $\alpha$}\bigr).\footnote{Sometimes the reverse pre-order is used; in this case $\mu \preccurlyeq \varepsilon\Leftrightarrow (\alpha\circ\varepsilon=_{\mathsf L} \mu, \mbox{ for some $\alpha$})$.}$$ Note that $\varepsilon\preccurlyeq_{\mathsf L} \mu \land \mu\preccurlyeq_{\mathsf L} \varepsilon$ does not yield $\varepsilon=_{\mathsf L} \mu$. If $\varepsilon\preccurlyeq_{\mathsf L} \mu$, we say that $\varepsilon$ is {\it more general} than $\mu$. If it is not misleading, we omit the subscript $_{\mathsf L}$ and write $=$ and $\preccurlyeq$, instead of $=_{\mathsf L}$ and $\preccurlyeq_{\mathsf L}$, correspondingly.\\ A {\it frame} $\mathfrak F=(W,R,w_0)$ consists of a non-empty set $W$, a pre-order $R$ on $W$ and a {\it root} $w_0\in W$ such that $w_0Rw$, for any $w\in W.$ {For any set $U$, let $P(U)=\{V:V\subseteq U\}$.} Let $n$ be a natural number. Any $n$-{\it model} $\mathfrak{M}^n=(W,R,w_0,V^n)$, over the frame $(W,R,w_0)$, contains a valuation $V^n:W\to P(\{x_1,\dots,x_n\})$ which is monotone: $$u R w\quad \Rightarrow\quad V^n(u)\subseteq V^n(w), \quad \mbox{for each } u,w\in W.$$ Thus, $n$-models, are (bounded) variants of usual Kripke models $\mathfrak{M}=(W,R,w_0,V)$ where all variables are valuated; $V:W\to P(\mathsf{Var})$. Given $\mathfrak{M}^n$ and $\mathfrak{M}^k$ (for $n\not=k$), we do not assume that $\mathfrak{M}^n$ and $\mathfrak{M}^k$ have anything in common. In particular, we do not assume that there is any model $\mathfrak{M}$ such that $\mathfrak{M}^n$ and $\mathfrak{M}^k$ are its fragments. If $\mathfrak{M}^k=(W,R,w_0,V^k)$ and $n\leq k$, then $\mathfrak{M}^k\!\!\upharpoonright_n$ is the restriction of $\mathfrak{M}^k$ to the $n$-model. Thus, $\mathfrak{M}^k\!\!\upharpoonright_n=(W,R,w_0,V^n)$ is the $n$-model over the same frame as $\mathfrak{M}^k$ in which $V^n(w)=V^k(w)\cap\{x_1,\dots,x_n\}$, for each $w\in W$. We say $(W,R,w_0)$ is a po-frame, and $(W,R,w_0,V^n)$ is a po-model, if the relation $R$ is a partial order. Let $\mathfrak{F}=(W,\leq,w_0)$ be a finite po-frame. We define {the {\it depth}, $d_{\mathfrak F}(w)$, of any element $w\in W$ in $\mathfrak F$}. We let $d_{\mathfrak F}(w)=1$ if $w$ is a $\leq$-maximal element ($\leq$-maximal elements are also called end elements) and $d_{\mathfrak F}(w)=i+1$ if all elements in $\{u\in W\colon w<u\}$ are of the depth at most $i$ and there is at least one element $u>w$ of the depth $i$. The depth of the root, $d_{\mathfrak F}(w_0)$, is the depth of the frame $\mathfrak F$ (or any $n$-model over $\mathfrak F$). Let $\mathfrak F=(W,\leq_W,w_0)$ and $\mathfrak G=(U,\leq_U,u_0)$ be two disjoint (that is $W\cap U=\emptyset$) po-frames. The join $\mathfrak F +\mathfrak G$ of the frames is the frame $(W\cup U,\leq,w_0)$ where $$x\leq y\qquad \Leftrightarrow \qquad x\leq_W y \quad \mbox{or} \quad x\leq_U y\quad \mbox{or} \quad (x\in W\land y\in U).$$ If $\mathfrak F$ and $\mathfrak G$ are not disjoint, we take their disjoint isomorphic copies and the join of the copies is called the join of $\mathfrak F$ and $\mathfrak G$ (it is also denoted by $\mathfrak F +\mathfrak G$). Thus, the join of frames is defined up to an isomorphism. The join is associative (up to an isomorphism) and it is not commutative. Instead of $\mathfrak F +\mathfrak L_1$ and $\mathfrak L_1 +\mathfrak G$, where $\mathfrak L_1$ is one-element frame (see Figure \ref{8fames}), we write $\mathfrak F+$ and $+\mathfrak G$, correspondingly. Let $(W,R,w_0,V^n)$ be any $n$-model. The subsets $\{V^n(w)\}_{w\in W}$ of $\{x_1,\dots,x_n\}$ are usually given by their characteristic functions $\mathfrak{f}_w^n\colon\{x_1,\dots,x_n\}\to \{0,1\}$ or binary strings $\mathfrak{f}_w^n=i_1\dots i_n$, where $i_k\in\{0,1\}$. Thus, $n$-models may also appear in the form $(W,R,w_0,\{V^n(w)\}_{w\in W})$, or $(W,R,w_0,\{\mathfrak{f}_w^n\}_{w\in W})$. $n$-Models are usually depicted as graphs whose nodes are labeled with binary strings. The forcing relation $\mathfrak{M}^n\Vdash_wA$, for any $w\in W$ and $A\in \mathsf{Fm}^n$, is defined as usual $$\mathfrak{M}^n\Vdash_wx_i\quad\Leftrightarrow\quad x_i\in V^n(w),\qquad \mbox{ for any } i\leq n;\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad$$ $$\mathfrak{M}^n\Vdash_w\bot,\quad \mbox{for none } w\in W;\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad$$ $$\mathfrak{M}^n\Vdash_w(A\rightarrow B)\quad\Leftrightarrow\quad \forall_{u}\bigl(wRu\quad\mbox{and}\quad \mathfrak{M}^n\Vdash_uA\quad\Rightarrow\quad\mathfrak{M}^n\Vdash_uB\bigr);\qquad\qquad\qquad\qquad\qquad\qquad$$ $$\mathfrak{M}^n\Vdash_w(A\lor B)\quad\Leftrightarrow\quad \bigl(\mathfrak{M}^n\Vdash_wA\quad \mbox{or}\quad\mathfrak{M}^n\Vdash_wB\bigr);\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad$$ $$\mathfrak{M}^n\Vdash_w(A\land B)\quad\Leftrightarrow\quad \bigl(\mathfrak{M}^n\Vdash_wA\quad \mbox{and}\quad\mathfrak{M}^n\Vdash_wB\bigr).\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad$$ \begin{lemma}\label{pMm} If $u R w$ and $\mathfrak{M}^n\Vdash_u A$, then $\mathfrak{M}^n\Vdash_w A, \quad \mbox{for any } u,w\in W \mbox{ and any} A\in \mathsf{Fm}^n.$\end{lemma} Let $(W)_w=\{u\in W\colon wRu\}$, for any $w\in W$. The subframe of $(W,R,w_0)$ {\it generated by $w$} is $((W)_w,R\upharpoonright_{(W)_w},w)$; the submodel of $\mathfrak{M}^n$ {\it generated by $w$} is $$(\mathfrak{M}^n)_w=((W)_w,R\!\upharpoonright_{(W)_w},w,V^n\!\!\upharpoonright_{(W)_w}).$$ We write $\mathfrak{M}^n\Vdash A$ if $\mathfrak{M}^n\Vdash_{w_0}A$ and we obviously have $(\mathfrak{M}^n)_w\Vdash A\Leftrightarrow\mathfrak{M}^n\Vdash_wA$. For any $n$-model, we put $\mathsf{Th}(\mathfrak{M}^n)=\{A\in \mathsf{Fm}^n\colon\mathfrak{M}^n\Vdash A\}$. Given two $n$-models $\mathfrak{M}^n$ and $\mathfrak{N}^n$, we say they are {\it equivalent}, in symbols $\mathfrak M^n\thicksim\mathfrak N^n$, if $\mathsf{Th}(\mathfrak{M}^n)=\mathsf{Th}(\mathfrak{N}^n).$ Let $(W,\leq,w_0,V^n)$ and $(W,\leq,w_0,V'^{\ n})$ be $n$-models over the same po-frame, we say they are (mutual) {\it variants} if $V(w)=V'(w)$ for each $w\not=w_0$. Let $\mathbf{F}$ be a class of frames and $\mathbf{M}^n(\mathbf{F})$, for any $n\geq 0$, be the class of $n$-models over the frames $\mathbf{F}$; we write $\mathbf{M}^n$, instead of $\mathbf{M}^n(\mathbf{F})$, if there is no danger of confusion. The intermediate logic determined by $\mathbf{F}$ is denoted by $\mathsf{L}(\mathbf{F})$. Thus, if $A\in\mathsf{Fm^n}$, then $$A\in\mathsf{L}(\mathbf{F}) \quad \Leftrightarrow \quad (\mathfrak{M}^n\Vdash A, \mbox{ for every } \mathfrak{M}^n\in \mathbf{M}^n). $$ We say that {\bf F} are {\it frames of an intermediate logic {\sf L}} if $\mathsf{L}\subseteq\mathsf{L}(\mathbf{F})$ and {\sf L} {\it omits a frame} $\mathfrak{F}$ if $\mathfrak{F}$ is not a frame of {\sf L}. A logic {\sf L} is {\it Kripke complete} if $\mathsf{L}=\mathsf{L}(\mathbf{F})$ for some $\mathbf{F}$. The logic $\mathsf{L}(\mathbf{F})$ is said to be {\it tabular} if $\mathbf{F}$ is a finite family of finite frames. {\sf L} is {\it Halld\'{e}n complete} ({\it H}-complete) if for any formulas $A,B$ with $\mathsf{Var}(A)\cap\mathsf{Var}(B)=\emptyset$ we have $$ \mathsf{L}\vdash A\lor B \quad \Rightarrow \quad \mathsf{L}\vdash A \quad\mbox{ or }\quad \mathsf{L}\vdash B.$$ \begin{theorem}\label{hcl} Let {\bf F} be finite. Then $\mathsf{L}(\mathbf F)$ is {\it H}-complete iff $\mathsf{L}(\mathbf F)=\mathsf{L}(\mathfrak F)$ for some $\mathfrak F\in \mathbf F$. \end{theorem} A logic {\sf L} is {\it locally tabular} if \ {\sf Fm$^n\slash\!\!=_{\mathsf L}$} is finite, for each $n\geq 0$. Tabular logics are locally tabular but not vice versa. For each locally tabular logic {\sf L} there exists a family $\mathbf{F}$ of finite frames such that $\mathsf{L}=\mathsf{L}(\mathbf{F})$. Thus, locally tabular logics have the {\it finite model property} but, again, the converse is false. A logic {\sf L} is said to be in the $n$-{\it slice} if $\mathsf{L}=\mathsf{L}(\mathbf{F})$ for a family {\bf F} of finite po-frames such that $d(\mathfrak{F})\leq n$, for any $\mathfrak{F}\in\mathbf F$. \begin{theorem}\label{lf6} Suppose that the family {\bf F} consists of finite frames. Then $\mathsf{L}(\mathbf F)$ is locally tabular iff $\mathbf{M}^n\slash\!\!\thicksim$ is finite, for each $n$. \end{theorem} \begin{proof} $(\Rightarrow)$ Using finitely many (up to equivalence) formulas we do not distinguish infinitely many models. $(\Leftarrow)$ is obvious. \end{proof} \begin{corollary}\label{fp} (i) If {\sf L} and {\sf L'} are locally tabular intermediate logics, then their intersection $\mathsf L\cap\mathsf L'$ is also a locally tabular intermediate logic;\\ (ii) any extension of any locally tabular intermediate logic is locally tabular. \end{corollary} \begin{proof} (i) Let {\sf L=$\mathsf L({\mathbf F})$} and {\sf L'=L({\bf G})} for some classes {\bf F,G} of finite frames. Then $\mathsf{ L}\cap\mathsf{L'}=\mathsf{L}(\mathbf{F}\cup\mathbf{G})$ and $\mathbf{M}^n(\mathbf{F}\cup\mathbf{G}) = \mathbf{M}^n(\mathbf{F}) \ \cup \ \mathbf{M}^n(\mathbf{G})$ .Thus, $\mathbf{M}^n(\mathbf{F}\cup\mathbf{G})\slash\!\!\thicksim$ \ is finite if \ $\mathbf{M}^n(\mathbf{F})\slash\!\!\thicksim$ \ and $\mathbf{M}^n(\mathbf{G})\slash\!\!\thicksim$ \ are finite. \ (ii) is obvious.\end{proof} Let us characterize po-frames of the logics in Figure \ref{ILs}. Thus, {\sf LC}-frames are chains and we let $\mathfrak L_d$, for any natural number $d\geq 1$, be the chain on $\{1,2,\dots ,d\}$ with the reverse (natural) ordering $\geq$, where $d$ is the root and $1$ is the top (=greatest) element. Finite {\sf KC}-frames have top elements. $\mathsf H_n$-Frames are of the depth $\leq n$ and $\mathsf H_n\mathsf B_m$-frames have (additionally) $m$-bounded branching, that is each point has at most $m$ immediate successors. To get {\sf PWL}-frames we need unrooted frames; {\sf PWL}-frames are $$\mathfrak F_n+ \mathfrak I_{n_1}+\cdots+\mathfrak I_{n_k} \footnote{$\mathfrak F_n+ \mathfrak I_{n_1}+\cdots+\mathfrak I_{n_k}$ denotes the vertical union with $\mathfrak F_n$ on the top and $\mathfrak I_{n_k}$ on the bottom},\quad \mbox{where $n\geq 0$ and $n_1,\dots,n_k\geq 1$;} $$ where $\mathfrak I_n$ is the frame with the identity relation on an $n$-element set (and we agree that $\mathfrak F_0=\mathfrak L_1$ and $\mathfrak F_1=\mathfrak L_2$). Note that the frames in Figure \ref{hpa} are {\sf PWL}-frames and hence $\mathsf L({\mathbf H}_{pa})$ and $\mathsf L({\mathbf H}_{un})$ are extensions of {\sf PWL}. There are three pretabular intermediate logics, see \cite{Maks72}: {\sf LC} of G\"odel and Dummett, given by all chains $\mathfrak L_n$, {\sf LJ} of Jankov, given by all $n$-forks $\mathfrak F_n$, and {\sf LH} of Hosoi, given by all rhombuses $\mathfrak R_n$; see Figure \ref{FRF}. A pair of logics $(\mathsf L_1,\mathsf L_2)$ is a {\it splitting pair} of the lattice of (intermediate) logics if $\mathsf L_2\not\subseteq \mathsf L_1$ and, for any intermediate logic $\mathsf L$, either $\mathsf L \subseteq\mathsf L_1$, or $\mathsf L_2 \subseteq\mathsf L$.\footnote{In the same way, one can define a splitting pair in any complete lattice.} Then we say $\mathsf L_1$ splits the lattice and $\mathsf L_2$ is the splitting (logic) of the lattice, see \cite{ZWC}. Jankov \cite{Jankov} {\it characteristic formula} of a finite rooted frame $\mathfrak F$ is denoted by $\chi (\mathfrak F)$. \footnote{Jankov originally defined $\chi (\mathfrak F)$ for any subdirectly irreducible finite Heyting algebra. By duality, finite rooted frames are tantamount to finite s.i. algebras and hence we proceed as if $\chi (\mathfrak F)$ were defined for frames.} \begin{theorem}\label{Jankov} The pair $(\mathsf L(\mathfrak F),\mathsf L(\chi (\mathfrak F))$ is a splitting pair, for any finite frame $\mathfrak F$. Thus, for any intermediate logic {\sf L} and any finite frame $\mathfrak F$, the logic {\sf L} omits $\mathfrak F$ iff $\chi (\mathfrak F) \in \mathsf L$. \end{theorem} For instance $\mathsf {KC} =\mathsf {L}(\{\chi ({\mathfrak F_2}) \}$ is the splitting logic. If $\{\mathsf L_i\}_{i\in I}$ is a family of splitting logics, then $\mathsf L(\bigcup_{i\in I}\mathsf L_i)$ is called {\it a union splitting}. For instance, $\mathsf {LC} =\mathsf {L}(\{\chi ({\mathfrak F_2}), \chi ({\mathfrak R_2}) \})$ is a union splitting but not a splitting. \begin{corollary}\label{Jankov2} If $\{(\mathsf L'_{i},\mathsf L_{i})\}_{i\in I}$ is a family of splitting pairs and $\mathsf L=\mathsf L(\bigcup_{i\in I}\mathsf L_i)$, then $\mathsf L$ is a union splitting and, for any intermediate logic $\mathsf L'$, either $\mathsf L' \subseteq\mathsf L'_i$ for some $i\in I$, or $\mathsf L \subseteq\mathsf L'$.\end{corollary} \subsection{The Problem of Unification.}\label{UP} A substitution $\varepsilon$ is a \emph{unifier} for a formula $A$ in a logic $\mathsf L$ (an $\mathsf L$-\emph{unifier} for $A$) if $ \varepsilon(A)\in\mathsf{L}$. In any intermediate logic, the set of unifiable formulas coincides with the set of consistent formulas. A set $\Sigma$ of {\sf L}-unifiers for $A$ is said to be {\it complete}, if for each {\sf L}-unifier $\mu$ of $A$, there is a unifier $\varepsilon\in \Sigma$ such that $\varepsilon\preccurlyeq_{\sf L}\mu$. The unification type of {\sf L} is $1$ (in other words, unification in {\sf L} is {\it unitary}) if the set of unifiers of any unifiable formula $A$ contains a least, with respect to $\preccurlyeq_L$, element called {\it a most general unifier} of $A$, (an mgu of $A$). In other words, unification in {\sf L} is unitary if each unifiable formula has a one-element complete set of unifiers. The unification type of {\sf L} is $\omega$ (unification in {\sf L} is {\it finitary}), if it is not $1$ and each unifiable formula has a finite complete set of unifiers. The unification type of {\sf L} is $\infty$ (unification in {\sf L} is {\it infinitary}) if it is not $1$, nor $ \omega$, and each unifiable formula has a minimal (with respect to inclusion) complete set of unifiers. The unification type of {\sf L} is $0$ (unification in {\sf L} is {\it nullary}) if there is a unifiable formula which has no minimal complete set of unifiers. In a similar way one defines the unification type of any {\sf L}-unifiable formula. The unification type of the logic is the worst unification type of its unifiable formulas.\\ Ghilardi \cite{Ghi2} introduced projective unifiers and formulas; an $\mathsf L$-{unifier} $\varepsilon$ for $A$ is called \emph{ projective} if $A \vdash_{\mathsf L}\varepsilon(x) \leftrightarrow x$, for each variable $x$ (and consequently $A \vdash_{\mathsf L}\varepsilon(B) \leftrightarrow B$, for each $B$). A formula $A$ is said to be {\it projective} in $\mathsf L$ (or $\mathsf L$-projective) if it has a projective unifier in $\mathsf L$. It is said that a logic $\mathsf L$ enjoys {\it projective unification} if each {\sf L}-unifiable formula is $\mathsf L$-projective. An $\mathsf L$-projective formula may have many non-equivalent in $\mathsf L$-projective unifiers and each {\sf L}-projective unifier is its mgu: \begin{lemma}\label{proj} If $\varepsilon$ is an {\sf L}-projective unifier for $A$ and $\sigma$ is any {\sf L}-unifier for $A$, then $\sigma\circ\varepsilon=_\mathsf{L}\sigma$. \end{lemma} Thus, projective unification implies unitary unification. If $A\in \mathsf{Fm^n}$ is {\sf L}-projective, then $A$ has a projective unifier $\varepsilon\colon\{x_1,\dots,x_n\}\to \mathsf{Fm}^n$ that is a mgu {\it preserving the variables of $A$} (which is not always the case with unitary unification). In contrast to unitary unification, projective unification is also monotone: \begin{lemma}\label{mon} If $A$ is $\mathsf L$-projective and $\mathsf L\subseteq \mathsf L'$, then $A$ is $\mathsf L'$-projective. \end{lemma} Ghilardi \cite{Ghi2} gives a semantical characterization of projective formulas. The condition (ii) is called {\it the extension property}.\footnote{More specifically, the theorem says that the class of models of a projective formula enjoys extension property.}: \begin{theorem}\label{niu2} Let $\mathbf{F}$ be a class of finite po-frames and $\mathsf{L}=\mathsf{L}(\mathbf{F})$. The followings are equivalent:\\ (i) $ A$ is {\sf L}-projective;\\ (ii) for every $n$-model $\mathfrak{M}^n=(W,\leq,w_0,V^n)$ over a po-frame $(W,\leq,w_0)$ of the logic {\sf L}:\\ if $(\mathfrak{M}^n)_w\Vdash A$ for each $w\not=w_0$, then $\mathfrak{N}^n\Vdash A$ for some variant $\mathfrak{N}^n$ of $\mathfrak{M}^n$. \end{theorem} Wro\'{n}ski \cite{Wro1,Wro2} proved that \begin{theorem}\label{projj} An intermediate logic {\sf L} has projective unification iff \ {\sf LC} $\subseteq$ {\sf L}. \end{theorem} There are unitary logics which are not projective. Following Ghilardi and Sachetti \cite{Ghisac}, unification in {\sf L} is said to be \emph{filtering} if given two unifiers, for any formula $A$, one can find a unifier that is more general than both of them. Unitary unification is filtering. If unification is filtering, then every unifiable formula either has an mgu or no basis of unifiers exists (unification is nullary). It is known, see e.g. \cite{dzSpl}, that \begin{theorem}\label{fil} Unification in any intermediate logic {\sf L} is filtering iff \ {\sf KC} $\subseteq$ {\sf L}. \end{theorem} If $\varepsilon,\sigma\colon\{x_1,\dots,x_n\}\to\mathsf{Fm}^k$ are unifiers of a formula $A(x_1,\dots,x_n)$ in (any extension of) {\sf KC}, then, as a more general unifier for $\varepsilon,\sigma$ the following substitution $\mu$ can be taken (where $y$ is a fresh variable, i.e.$y\not\in\mathsf{Fm^k}$): $$\mu(x_i)\qquad=\qquad(\varepsilon(x_i)\land \neg y) \quad \lor \quad (\sigma(x_i)\land \neg\neg y),\qquad \mbox{for $i=1,\dots,n$.}$$ Thus, unifiers in filtering unification {\it introduce new variables}. We have, see \cite{dzSpl, Ghi2}, \begin{theorem}\label{kc} {\sf KC} is the least intermediate logic with unitary unification. All extensions of {\sf KC} have nullary or unitary unification. All intermediate logics with finitary unification are included in {\sf L}($\mathfrak{F}_{2}$), the logic determined by the `fork frame' $\mathfrak {F}_{2}$ see Figure \ref{8fames}. ({\sf L}($\mathfrak{F}_{2}$),{\sf KC}) is a splitting pair of the lattice of intermediate logics. \end{theorem} Logics with finitary and unitary unification are separated by the splitting ({\sf L}($\mathfrak{F}_{2}$),{\sf KC}). Let us agree that having {\it good unification} means either unitary, or finitary one. Given a logic {\sf L} with good unification, it has unitary or finitary unification depending only on that if {\sf L} contains {\sf KC} or not. Our aim would be to distinguish logics with good unification from those with nullary one. We show in later that locally tabular intermediate logics with infinitary unification do not exist at all. Let us notice that the splitting generated by ({\sf L}($\mathfrak{F}_{2}$),{\sf KC}) is irrelevant for logics with nullary unification; there are extensions of {\sf KC}, as well as sublogics of {\sf L}($\mathfrak{F}_{2}$), that have nullary unification. A logic {\sf L} is said to have {\it projective approximation} if, for each formula $A$ one can find a finite set $\Pi(A)$ of {\sf L}-projective formulas such that:\\ (i) \ $ \mathsf{Var}(B)\subseteq \mathsf{Var}(A)$ and $B\vdash_\mathsf{L}A$, for each $B\in \Pi(A)$;\\ (ii) each {\sf L}-unifier of $A$ is an {\sf L}-unifier of some $B\in\Pi(A)$.\footnote{Ghilardi \cite{Ghi1,Ghi2}, instead of assuming $\Pi(A)$ is finite, postulates $deg(B)\leq deg(A)$, for each $B\in \Pi(A)$, from which it follows that $\Pi(A)$ is finite. The condition $deg(B)\leq deg(A)$ is relevant for logics with disjunction property, like {\sf INT}, but is irrelevant for locally tabular logics where $\mathsf{Var}(B) \subseteq \mathsf{Var}(A)$ is sufficient. We decided, therefore, to modify slightly Ghilardi's formulations preserving, we hope, his ideas. } If a finite $\Pi(A)$ exists we can assume that all $B\in\Pi(A)$ are maximal (with respect to $\vdash_{\sf L}$) {\sf L}-projective formulas fulfilling (i). But, even if there is finitely many maximal {\sf L}-projective formulas fulfilling (i), we cannot be sure (ii) is fulfilled. \begin{theorem}\label{praprox} Each logic with projective approximation has finitary (or unitary) unification. \end{theorem} Logics with projective approximation play a similar role for finitary unification as projective logics do for unitary unification, even though projective approximation is not monotone. Ghilardi \cite{Ghi2} proved that \begin{theorem}\label{int} Intuitionistic propositional logic {\sf INT} enjoys projective approximation and hence unification in {\sf INT} is finitary. \end{theorem} \section{Intuitionistic Kripke $n$-Models.}\label{km} \subsection{p-Morphisms.}\label{pM} Let $(W,R,w_0,V^n)$ and $(U,S,u_0,V'^n)$ be $n$-models. A mapping $p\colon W{\to} U$, from $W$ \underline{onto} $U$, is said to be a {\it p-morphisms of their frames}, $p\colon (W,R,w_0)\to (U,S,u_0), \mbox{if}$\\ \indent(i) $wRv\Rightarrow p(w)Sp(v), \quad\mbox{for any } w,v\in W$;\\ \indent (ii) $p(w)Sa\Rightarrow \exists_{v\in W}\bigl(wRv\land p(v)=a\bigr), \quad \mbox{for any }w\in W \ \mbox{and } \ a\in U$;\\ \indent (iii) $p(w_0)=u_0$.\\ {\it A p-morphism of $n$-models}, $p\colon (W,R,w_0,V^n)\to (U,S,u_0,V'^n)$ fulfills (additionally)\\ \indent (iv) $V^n(w)=V'^n(p(w))$, for any $w\in W$. \noindent If $p\colon\mathfrak{M}^n\to\mathfrak{N}^n$ is a p-morphism, then $\mathfrak{N}^n$ is called a p-morphic image (or reduct, see \cite{ZWC}) of $\mathfrak{M}^n$ and we write $p(\mathfrak{M}^n)=\mathfrak{N}^n$. Reducing $\mathfrak{M}^n$ (by a p-morphism), we preserve its logical properties. In particular, $p(\mathfrak{M}^n)\thicksim\mathfrak{M}^n$ as \begin{lemma}\label{pM0} If $p\colon \mathfrak{M}^n\to \mathfrak{N}^n$ is a p-morphism of $n$-models, $w\in W$ and $A\in\mathsf{Fm}^n$, then $$\mathfrak{M}^n\Vdash_{w}A\quad\Leftrightarrow\quad p(\mathfrak{M}^n)\Vdash_{p(w)}A.$$ \end{lemma} p-Morphisms are also used in modal logic. The above property is generally valid which means it also holds for modal models and modal formulas and {it can be shown without assuming that $R$ is a pre-order and $V^n$ is monotone.} \begin{example}\label{pMe} Let $\mathfrak{M}^n=(W,R,w_0,V^n)$ be an $n$-model in which the pre-order $R$ is not a partial order. Let $w\thickapprox v\Leftrightarrow wRv\land vRw$, for any $w,v\in W$. Then $\thickapprox$ is an equivalence relation on $W$ and one can easily show that the canonical mapping $p(w)=[w]_\thickapprox$, for any $w\in W$, is a p-morphism from $\mathfrak{M}^n$ onto the quotient model $$\mathfrak{M}^n\slash\!\!\thickapprox\quad =\quad \bigl(W\slash\!\!\thickapprox,R\slash\!\!\thickapprox,[w_0]_\thickapprox,V^n\!\!\slash\!\thickapprox\bigr).$$ Reducing all $R$-clusters to single points, we receive an equivalent $n$-model over a po-set; and hence po-sets (not pre-orders) are often taken as intuitionistic frames. \hfill\qed \end{example} If a p-morphism $p\colon\mathfrak{M}^n\to\mathfrak{N}^n$ is one-to-one, then $w R v\Leftrightarrow p(w) S p(v),$ for any $w,v\in W$ which means $p$ is {\it an isomorphism} and, if there is an isomorphism between the $n$-models, we write $\mathfrak{M}^n\equiv\mathfrak{N}^n$. It is usual to identify isomorphic objects. \subsection{Bisimulations.}\label{biss} Bisimulations (between Kripke frames) were introduced by K.Fine \cite{fine}, by imitating Ehrenfeucht games. They found many applications. In particular, S.Ghilardi \cite{Ghi2} used bounded bisimulation to characterize projective formulas. We show that bisimulations are closely related to p-morphisms. In our approach we follow A.Patterson \cite{Pat}. A binary relation $B$ on $W$ is {\it a bisimulation of the frame} $(W,R,w_0)$ if $$wBv\Rightarrow\forall_{w'}\exists_{v'}(wRw'\Rightarrow vRv'\land w'Bv')\land\forall_{v'}\exists_{w'}(vRv'\Rightarrow wRw'\land w'Bv').$$ \begin{figure}[H] \unitlength1cm \begin{picture}(3,2) \thicklines \put(5,2){\vector(1,0){1.9}} \put(5,2){\vector(0,-1){1.9}} \put(7,2){\vector(0,-1){1.9}} \put(5,0){\vector(1,0){1.9}} \put(5,0){\circle{0.1}} \put(5,2){\circle{0.1}} \put(7,0){\circle{0.1}} \put(7,2){\circle{0.1}} \put(4.6,2){\mbox{$w$}} \put(7.2,2){\mbox{$v$}} \put(4.6,0){\mbox{$w'$}} \put(7.2,0){\mbox{$v'$}} \put(6,2.2){\mbox{$B$}} \put(6,0.2){\mbox{$B$}} \put(4.6,1){\mbox{$R$}} \put(7.2,1){\mbox{$R$}} \end{picture}\caption{Bisimulation}\label{bis}\end{figure} \noindent Note that $wBv\Rightarrow\forall_{w'}\exists_{v'}(wRw'\Rightarrow vRv'\land w'Bv')$ suffices if $B$ is symmetric. {\it A bisimulation of the $n$-model} $(W,R,w_0,V^n)$ additionally fulfils $V^n(w)=V^n(v)$ if $wBv$. \begin{lemma}\label{pMf} (i) If $B$ is a bisimulation of $\mathfrak{M}^n$, then $B\!\!\upharpoonright_{(W)_w}$ is a bisimulation of $(\mathfrak{M}^n)_w$;\\ (ii) if $B$ is a bisimulation of $(\mathfrak{M}^n)_w$, then $B$ is a bisimulation of $\mathfrak{M}^n$; for any $w\in W$. \end{lemma} \begin{lemma}\label{pM4} If $B$ is a bisimulation (of a frame or an $n$-model), then the least equivalence relation $B^\star$ containing $B$ is also a bisimulation. \end{lemma} \begin{proof} A proof of this lemma can be found in \cite{Pat}. Let us only specify properties of bisimulations which are useful here. \noindent(i) \quad $\{(w,w)\colon w\in W\}$ is a bisimulation. \noindent (ii)\quad $B$ is a bisimulation $\Rightarrow$ $B^{-1}$ is a bisimulation. \noindent (iii)\quad $\forall_i(B_i$ is a bisimulation) $\Rightarrow \quad \bigcup_iB_i$ is a bisimulation. \noindent (iv)\quad $B$ is a bisimulation $\Rightarrow$ the transitive closure of $B$ is a bisimulation. \hfill\qed\end{proof} Suppose that $B$ is an equivalence bisimulation of an $n$-model $\mathfrak{M}^n=(W,R,w_0,V^n)$. Let us define $\mathfrak{M}^n\slash B=(W\slash B,R\slash B,[w_0]_B,[V]^n)$ where $W\slash B=\{[w]_B\colon w\in W\}$, and $[V]^n([w]_B)=V^n(w)$ for any $w\in W$, and $$[w]_B\ R\slash B \ [v]_B \quad\Leftrightarrow\quad \exists_{w'v'}\bigl(wBw'\land vBv'\land w'Rv'\bigr).$$ \begin{theorem}\label{pM7} If $B$ is an equivalence bisimulation of an $n$-model $\mathfrak{M}^n$, then $\mathfrak{M}^n\slash B$ is an $n$-model and the canonical mapping $[\ ]_B\colon W\to W\slash B$ is a p-morphism of the $n$-models.\end{theorem} \begin{proof} We should show that $R\slash B$ is a pre-order. If $w=v$, one can take $w'=v'=w$ (in the definition of $R\slash B$) to show $[w]_B\ R\slash B \ [w]_B$. Thus, $R\slash B$ is reflexive. Suppose that $[w]_B\ R\slash B\ [v]_B\ R\slash B\ [u]_B$, for some $w,v,u\in W$. Then $wBw'\land vBv'\land w'Rv'$ and $vBv''\land uBu''\land v''Ru''$, for some $w',v',v'',u''\in W$. But $B$ is an equivalence, hence $v''Bv'$ and, by $v''Ru''$, we get $v'Ru'\land u''Bu'$, for some $u'\in W$, as $B$ is a bisimulation. By transitivity of $R$, we have $w'Ru'$ and $uBu'$ as $B$ is an equivalence relation. Thus, $[w]_B\ R\slash B\ [u]_B$; the relation $R\slash B$ is transitive. There remains to show that the canonical mapping is a p-morphism.\\ (i) If $wRv$, then $[w]_B\ R\slash B\ [v]_B$, by the definition of $R\slash B$. \noindent (ii) Suppose that $[w]_B\ R\slash B\ [v]_B$, for some $w,v\in W$. Then $wBw'$, and $vBv'$, and $w'Rv'$, for some $w',v'\in W$. As $B$ is a bisimulation, $wRv''\land v''Bv'$, for some $v''\in B$. Thus, $wRv''$ and $[v'']_B=[v]_B$, as required. The conditions (iii) and (iv) are obviously fulfilled.\hfill\qed\end{proof} \begin{theorem}\label{pMp} If $B$ and $B'$ are equivalence bisimulations of an $n$-model $\mathfrak{M}^n=(W,R,w_0,V^n)$ and $B'\subseteq B$, then there is a p-morphism $q\colon\mathfrak{M}^n\slash B'\to\mathfrak{M}^n\slash B$ such that the diagram in Figure \ref{pms} commutes.\end{theorem} \begin{figure}[H] \unitlength1cm \begin{picture}(4,1.5) \thicklines \put(9,2){\vector(0,-1){1.9}} \put(5,2){\vector(2,-1){3.9}} \put(5,2){\vector(1,0){3.9}} \put(9,0){\circle{0.1}} \put(5,2){\circle{0.1}} \put(9,2){\circle{0.1}} \put(9.3,2){\mbox{$\mathfrak{M}^n\slash B'$}} \put(4.2,2){\mbox{$\mathfrak{M}^n$}} \put(9.3,0){\mbox{$\mathfrak{M}^n\slash B$}} \put(9.2,1){\mbox{$q$}} \put(7.3,2.2){\mbox{$[\ ]_{B'}$}} \put(6.3,0.7){\mbox{$[\ ]_{B}$}} \end{picture} \caption{Comparison of Bisimulations.}\label{pms} \end{figure} \begin{proof} Let us define $q([w]_{B'})=[w]_B$ and notice that the mapping is well-defined and maps $W\slash B'$ onto $W\slash B$. We should only cheque that $q$ is a p-morphism. Note that the conditions (i),(iii) and (iv) are quite obvious. (ii) Suppose that $q([w]_{B'})R\slash B\ [u]_B$. By the definition of $R\slash B$, there are $w',u'$ such that $wBw'Ru'Bu$. Since $B$ is a bisimulation and $wBw'Ru'$ there is an $u''$ such that $wRu''Bu'$. Thus, $[w]_{B'}R\slash B'\ [u'']_{B'}$ and $q([u'']_{B'})=[u'']_B=[u]_B$ as required. \hfill\qed\end{proof} \begin{theorem}\label{pMr} If $p:\mathfrak{M}^n\to \mathfrak{N}^n$ is a p-morphism of $n$-models, then $$wBv\quad\Leftrightarrow\quad p(w)=p(v)$$ is an equivalence bisimulation of the $n$-model $\mathfrak{M}^n$, and $\mathfrak{M}^n\slash B\equiv\mathfrak{N}^n$.\end{theorem} \begin{proof}Let $wBv$ and $wRw'$ for some $w,w',v\in W$ (see Figure \ref{bis}). Then $p(w)=p(v)$ and $p(w)Sp(w')$, where $S$ is the accessibility relation in $\mathfrak{N}^n$. Thus, $p(v)Sp(w')$. Since $p$ is a p-morphism, $vRv'$ and $p(v')=p(w')$, for some $v'\in W$. Thus, $vRv'$ and $w'Bv'$. In the same way one shows $wBv$ and $vRv'$ give us $wRw'$ and $w'Bv'$, for some $w'$, and we obviously have $V^n(w)=V^n(v)$ if $wBv$. The $n$-models $\mathfrak{M}^n\slash B$ and $\mathfrak{N}^n$ are isomorphic as the mapping $i([w]_B)=p(w)$ is well defined, one-to-one and p-morphic. \hfill\qed\end{proof} Bisimulations preserve such properties of frames as reflexivity, symmetry, transitivity; consequently, p-morphic images preserve these properties, as well. There are, however, some properties which are not preserved by p-morphisms. \begin{example}\label{pMex} Let $W=\{u_i\colon i\geq 0\}\cup \{v_i\colon i\geq 0\}\cup\{w_0\}$ and a partial order $R$ on $W$, and a bisimulation $B$ on $W$, are defined as in the following picture (see Figure \ref{asym}) \begin{figure}[H] \unitlength1cm \begin{picture}(3,4) \thicklines \put(2,0){\circle{0.1}} \put(1,1){\circle{0.1}} \put(3,1){\circle{0.1}} \put(1,2){\circle{0.1}} \put(3,2){\circle{0.1}} \put(1,3){\circle{0.1}} \put(3,3){\circle{0.1}} \put(1,4){\circle{0.1}} \put(3,4){\circle{0.1}} \put(2,0){\vector(-1,1){0.9}} \put(2,0){\vector(1,1){0.9}} \put(3,1){\vector(-2,1){1.9}} \put(1,1){\vector(2,1){1.9}} \put(2.3,0){\mbox{$w_0$}} \put(0.5,1){\mbox{$u_0$}} \put(2.3,1){\mbox{$v_0$}} \put(0.5,2){\mbox{$u_1$}} \put(2.3,2){\mbox{$v_1$}} \put(0.5,3){\mbox{$u_2$}} \put(2.3,3){\mbox{$v_2$}} \put(0.5,4){\mbox{$u_3$}} \put(2.3,4){\mbox{$v_3$}} \put(3,2){\vector(-2,1){1.9}} \put(1,2){\vector(2,1){1.9}} \put(3,3){\vector(-2,1){1.9}} \put(1,3){\vector(2,1){1.9}} \put(6,0){\circle{0.1}} \put(5,1){\circle{0.1}} \put(7,1){\circle{0.1}} \put(6,0){\vector(-1,1){0.9}} \put(6,0){\vector(1,1){0.9}} \put(5,1){\vector(1,0){1.9}} \put(7,1){\vector(-1,0){1.9}} \put(6,4){\mbox{$[u_i]_B=\{u_0,u_1,u_2,\dots\}$}} \put(6,3){{\mbox{$[v_i]_B=\{v_0,v_1,v_2,\dots\}$}}} \put(6,2){\mbox{$[w_0]_B=\{w_0\}$}} \put(6.3,0){\mbox{$[w_0]_B$}} \put(4.1,1){\mbox{$[u_0]_B$}} \put(7.3,1){\mbox{$[v_0]_B$}}\end{picture}\caption{Weak Asymmetry is not Preserved.}\label{asym}\end{figure} \noindent Thus, a p-morphic image of a partial order is not a partial order (only pre-order). \hfill\qed \end{example} Note that the set $W$ in the above Example is infinite which is essential as \begin{corollary}\label{fin} Any p-morhic image of any finite po-frame is a po-frame. \end{corollary} \subsection{p-Irreducible $n$-Models.}\label{pirm} An $n$-model $\mathfrak{M}^n$ is said to be {\it p-irreducible} if each p-morphism $p\colon \mathfrak{M}^n\to \mathfrak{N}^n$, for any $n$-model $\mathfrak{N}^n$, is an isomorphism. Thus, any p-morphic image of any irreducible $n$-model is its isomorphic copy.\footnote{ The concept of p-irreducibility, in contrast to other concepts in this Section, would make no sense for frames.} Irreducible $n$-models are po-sets, see Example \ref{pMe}, and we show any $n$-model can be reduced to a p-irreducible one. \begin{theorem}\label{Irr} For each $n$-model $\mathfrak{M}^n$ there exists a p-irreducible $n$-model $\mathfrak{N}^n$ which is a p-morphic image of $\mathfrak{M}^n$ (and $\mathfrak{N}^n$ is unique up to $\equiv$). \end{theorem} \begin{proof} Let $\mathfrak{M}^n=(W,R,w_0,\{\mathfrak{f}^n_w\}_{w\in W})$ and $B$ be the least equivalence on $W$ containing $\bigcup\{B_i\colon B_i \ \mbox{is a bisimulation on } \mathfrak{M}^n\}.$ By Lemma \ref{pM4}, $B$ is the greatest bisimulation on $\mathfrak{M}^n$. Take $\mathfrak{N}^n=\mathfrak{M}^n\!\!\slash B$, see Theorem \ref{pM7}. Since the composition of any two p-morphisms is a p-morphism, any p-morphic image $\mathfrak{N'}^n$ of $\mathfrak{N}^n$ would be a p-morphic image of $\mathfrak{M}^n$. Thus, by maximality of $B$, we would get, by Theorem \ref{pMp}, an isomorphism $p'\colon\mathfrak{N'}^n\equiv\mathfrak{N}^n$ which means $\mathfrak{N}^n$ is p-irreducible. The uniqueness of $\mathfrak{N}^n$ also follows; if $\mathfrak{N'}^n$ were another p-irreducible p-morphic image of $\mathfrak{M}^n$, we would get by Theorems \ref{pMp} and \ref{pMr}, a p-morphism $p'\colon\mathfrak{N'}^n\to\mathfrak{N}^n$ which would mean that $\mathfrak{N'}^n$ and $\mathfrak{N}^n$ are isomorphic. \hfill\qed\end{proof} The following theorem could give another characterization of p-irreducible $n$-models. \begin{theorem}\label{pM5} If an $n$-model $\mathfrak{M}^n$ is p-irreducible, then for any $n$-model $\mathfrak{N}^n$ there is at most one p-morphism $p\colon \mathfrak{N}^n\to \mathfrak{M}^n$. \end{theorem} \begin{proof} Let $\mathfrak{M}^n=(W,R,w_0,V^n)$ be p-irreducible and $p,q\colon \mathfrak{N}^n\to \mathfrak{M}^n$ be two (different) p-morphisms for some $\mathfrak{N}^n=(U,S,u_0,V'^n)$. Take $B=\{(p(v),q(v))\colon v\in V\}$ and let us show $B$ is a bisimulation on $\mathfrak{M}^n$. This would be a contradiction as, if $B^\star$ were the least equivalence relation containing $B$ (see Lemma \ref{pM4}), $[\ ]_{B^\star}\colon \mathfrak{M}^n\to\mathfrak{M}^n\slash B^\star$ would be a non-isomorphic p-morphism, see Theorem \ref{pM7}, and it would mean that $\mathfrak{M}^n$ were not p-irreducible. Let $p(v)Rw$, for some $v\in V$ and $w\in W$. As $p$ is a p-morphism, $p(v')=w$ and $vSv'$ for some $v'\in V$. Then $q(v)Rq(v')$, as $q$ is a p-morphism, and $wBq(v')$ as $w=p(v')$. Similarly, if $q(v)Rw$, for some $v\in V$ and $w\in W$, then $q(v')=w$ and $vSv'$, for some $v'\in V$, and hence $p(v)Rp(v')$ and $p(v')Bw$ (as $w=q(v'))$. \hfill\qed\end{proof} \begin{theorem}\label{pM6} If $\mathfrak{M}^n$ is p-irreducible, then $(\mathfrak{M}^n)_w$ is p-irreducible for each $w\in W$.\end{theorem} \begin{proof} Let $\mathfrak{M}^n=(W,R,w_0,V^n)$ and suppose $(\mathfrak{M}^n)_w$ is not p-irreducible for some $w\in W$. By Theorem \ref{pMr}, there is a (non-trivial) bisimulation $B$ on $(\mathfrak{M}^n)_w$. Since (by Lemma \ref{pMf}) $B$ is a bisimulation of $\mathfrak{M}^n$, if we extend $B$ (see Lemma \ref{pM4}) to an equivalence bisimulation $B^\star$ of $\mathfrak{M}^n$, we get a (non-isomorphic) p-morphism of $\mathfrak{M}^n$, see Theorem \ref{pM7}. Thus, $\mathfrak{M}^n$ is not p-irreducible. \hfill\qed\end{proof} \subsection{Finite $n$-Models.}\label{Fin} It follows from Example \ref{pMe} that, without loosing any generality, we can confine ourselves to frames\slash $n$-models defined over partial orders (not pre-orders). So, in what follows, we assume that all frames\slash $n$-model are (defined over) po-sets even though we (sometimes) keep the notation $\mathfrak{M}^n=(W,R,w_0,V^n)$. We examine here specific properties of finite $n$-models such as Corollary \ref{fin}. \begin{theorem}\label{lfi2} If $\mathfrak{M}^n$ is a finite $n$-model, then one can define $\Delta(\mathfrak{M}^n)\in \mathsf{Fm}^n$ (called the {\it character} of $\mathfrak{M}^n$)\footnote{The explicit definition of the character can be found in many papers; for instance, see Ghilardi \cite{Ghi2}, p.869. The idea of characterizing finite structures by formulas is due to Jankov \cite{Jankov} but the character should not be missed with the characteristic formula of a frame. If we consider $n$-models of a given locally tabular logic {\sf L}, where there is only finitely many (up to $=_{\sf L}$) formulas in $n$-variables, one could define the character of any finite $n$-model as the conjunction of the formulas (out of the finitely many) which are true in the model.} such that \ $\mathfrak{N}^n\Vdash \Delta(\mathfrak{M}^n) \quad\Leftrightarrow\quad \mathsf{Th}(\mathfrak{M}^n)\subseteq\mathsf{Th}(\mathfrak{N}^n)$, \ for any $n$-model $\mathfrak{N}^n$. \end{theorem} The next theorem is due to Patterson \cite{Pat}:\begin{theorem}\label{pat} If $\{\mathsf{Th}((\mathfrak{M}^n)_w)\}_{w\in W}$ is finite (which is the case when $\mathfrak{M}^n$ is finite), then $$\mathsf{Th}(\mathfrak{M}^n)\subseteq\mathsf{Th}(\mathfrak{N}^n)\quad\Leftrightarrow\quad\mathfrak N^n\thicksim(\mathfrak M^n)_w, \ \mbox{for some} \ w\in W,\quad \mbox{for any $n$-model $\mathfrak{N}^n$}.$$ \end{theorem} \begin{proof} The implication $(\Leftarrow)$ is obvious by Lemma \ref{pMm}. Let us prove $(\Rightarrow)$. If not all of $\mathsf{Th}(\mathfrak{N}^n)$ is true at $(\mathfrak M^n)_w$, we pick $A_w\in\mathsf{Th}(\mathfrak{N}^n)$ such that $A_w\not\in\mathsf{Th}((\mathfrak{M}^n)_w)$ or $A_w=\top$ otherwise. As $\{\mathsf{Th}((\mathfrak{M}^n)_w)\colon w\in W\}$ is finite, we take $A=\bigwedge A_w$ and notice $\mathfrak{M}^n\Vdash_wA$ means that $\mathsf{Th}(\mathfrak{N}^n)\subseteq \mathsf{Th}((\mathfrak M^n)_w)$. If a formula not in $\mathsf{Th}(\mathfrak{N}^n)$ is true at $(\mathfrak M^n)_w$, we pick $B_w\not\in\mathsf{Th}(\mathfrak{N}^n)$ such that $B_w\in\mathsf{Th}((\mathfrak{M}^n)_w)$ (or $B_w=\bot$ if $\mathsf{Th}(\mathfrak{N}^n)\supseteq \mathsf{Th}((\mathfrak M^n)_w$)), for each $w\in W$. Take $B=\bigvee B_w$ and notice $\mathfrak{M}^n\not\Vdash_wB$ yields $\mathsf{Th}(\mathfrak{N}^n)\supseteq \mathsf{Th}((\mathfrak M^n)_w)$. Clearly, $(A\Rightarrow B)\not\in\mathsf{Th}(\mathfrak{N}^n)$. Thus, $(A\Rightarrow B)\not\in\mathsf{Th}(\mathfrak{M}^n)$ and hence $\mathfrak{M}^n\Vdash_wA$ and $\mathfrak{M}^n\not\Vdash_w B$, for some $w\in W$, and this means that $\mathsf{Th}(\mathfrak{N}^n)=\mathsf{Th}((\mathfrak M^n)_w)$. \end{proof} \begin{theorem}\label{GB} If $\{\mathsf{Th}((\mathfrak{M}^n)_w)\}_{w\in W}$ is finite, then the greatest bisimulation $B$ of $\mathfrak{M}^n$ is: $$wBv \quad\Leftrightarrow\quad (\mathfrak{M}^n)_w\thicksim(\mathfrak{M}^n)_v.$$ \end{theorem} \begin{proof} Let $wBv\land wRw'$. Then $\mathsf{Th}((\mathfrak{M}^n)_v)=\mathsf{Th}((\mathfrak{M}^n)_{w})\subseteq\mathsf{Th}((\mathfrak{M}^n)_{w'})$ and, by Theorem \ref{pat}, $w'Bv'\land vRv'$ for some $v'$. Thus, $B$ is a bisimulation as $B$ is symmetric. Let $wB'v$ and $B'$ be a bisimulation of $\mathfrak{M}^n$. By Theorem \ref{pM7}, there is a p-morphism $p\colon\mathfrak{M}^n\to\mathfrak{M}^n\slash B'$ such that $p(w)=p(v)$. Hence, by Lemma \ref{pM0}, $(\mathfrak{M}^n)_w\thicksim(\mathfrak{M}^n)_v$ which means $wBv$. Thus, we have showed $B'\subseteq B$. \end{proof} \begin{corollary}\label{FMbis} If $\{\mathsf{Th}((\mathfrak{M}^n)_w)\}_{w\in W}$ is finite, then there is a p-morphism from $\mathfrak{M}^n$ onto the $n$-model: $$\Bigl(\{\mathsf{Th}((\mathfrak{M}^n)_w)\}_{w\in W},\ \subseteq ,\ \mathsf{Th}(\mathfrak{M}^n),\ \{\{x_1,\dots,x_n\}\cap\mathsf{Th}((\mathfrak{M}^n)_w)\}_{w\in W}\Bigr).$$ \end{corollary} \begin{proof} By the above Theorem and by Theorem \ref{pM7}.\end{proof} \begin{corollary}\label{FM} $\mathfrak{M}^n$ is finitely reducible (which means there is a p-morphism $p\colon\mathfrak{M}^n\to\mathfrak{N}^n$ for some finite $n$-model $\mathfrak{N}^n$) if and only if $\{\mathsf{Th}((\mathfrak{M}^n)_w)\}_{w\in W}$ is finite. \end{corollary} \begin{corollary}\label{lf3i} Let $\mathfrak{M}^n$ and $\mathfrak{N}^n$ be finite (or finitely reducible) $n$-models. Then $\mathfrak{M}^n\thicksim\mathfrak{N}^n$ if and only if $\mathfrak{M}^n$ and $\mathfrak{N}^n$ have a common p-morphic image.\end{corollary} \begin{proof} Let $\mathfrak{M}^n=(W,R,w_0,V^n)$ and $\mathfrak{N}^n=(U,S,u_0,V'^n)$. It suffices to notice that $\mathfrak{M}^n\thicksim\mathfrak{N}^n$ yields, by Theorem \ref{pat}, $\{\mathsf{Th}((\mathfrak{M}^n)_w)\}_{w\in W}=\{\mathsf{Th}((\mathfrak{N}^n)_u)\}_{u\in U}$.\end{proof} \begin{corollary}\label{lf4i} If $\mathfrak{M}^n=(W,R,w_0,V^n)$ and $\mathfrak{N}^n=(U,S,u_0,V'^n)$ are finite and $\mathfrak{M}^n\thicksim\mathfrak{N}^n$, then\\ (i) for every $w\in W$ there is an element $u\in U$ such that $(\mathfrak{M}^n)_{w}\thicksim(\mathfrak{N}^n)_{u}$; \\ (ii) for every $u\in U$ there is an element $w\in W$ such that $(\mathfrak{M}^n)_{w}\thicksim(\mathfrak{N}^n)_{u}$. \end{corollary} \begin{proof} Let $p$ and $q$ be p-morphisms from $\mathfrak{M}^n$ and $\mathfrak{N}^n$, correspondingly, onto a common p-morphic image. By Lemma \ref{pM0}, $(\mathfrak{M}^n)_{w}\thicksim(\mathfrak{N}^n)_{u}$ if $p(w)=q(u)$. \end{proof} \subsection{$\sigma$-Models.}\label{sM} This is the key notion and it was defined by Ghilardi \cite{Ghi2}. Let $\sigma:\{x_1,\dots,x_n\}\to \mathsf{Fm^k}$, for $k,n\geq 0$. For any $\mathfrak{M}^k=(W,R,w_0,V^k)$, let $\sigma(\mathfrak{M}^k)=(W,R,w_0,V^n)$ where $$x_i\in V^n(w)\quad\Leftrightarrow\quad \mathfrak{M}^k\Vdash_w\sigma(x_i), \quad \mbox{ for any $w\in W$ \ and \ $i=1,\dots,n$}.$$ \begin{lemma}\label{sigma0} For every $w\in W$ \ and every \ $A\in \mathsf{Fm^n}$, we have $$\sigma(\mathfrak{M}^k)\Vdash_wA \quad\Leftrightarrow\quad \mathfrak{M}^k\Vdash_w\sigma(A) .$$ \end{lemma} \begin{lemma}\label{sigmai} (i) $\mathfrak{M}^k$ and $\sigma(\mathfrak{M}^k)$ are models over the same frame;\\ (ii) $\sigma((\mathfrak{M}^k)_w)=(\sigma(\mathfrak{M}^k))_w$, \quad for every $w\in W$;\\ (iii) if $\mathsf{Th}(\mathfrak{M}^k)\subseteq\mathsf{Th}(\mathfrak{N}^k)$,\ then\ $\mathsf{Th}(\sigma(\mathfrak{M}^k))\subseteq\mathsf{Th}(\sigma(\mathfrak{N}^k))$. \end{lemma} \begin{proof} We get (i) and (ii) by the definition of $\sigma(\mathfrak{M}^k)$. As concerns (iii):\\ $\sigma(\mathfrak{M}^k)\Vdash A \ \Leftrightarrow \ \mathfrak{M}^k\Vdash\sigma(A) \ \Rightarrow \ \mathfrak{N}^k\Vdash\sigma(A)\ \Leftrightarrow \ \sigma(\mathfrak{N}^k)\Vdash A$. \end{proof} \begin{lemma}\label{sigma2} If $p\colon\mathfrak{M}^k\to \mathfrak{N}^k$ is a p-morphism of $k$-models, then $p\colon\sigma(\mathfrak{M}^k)\to \sigma(\mathfrak{N}^k)$ is also a p-morphism of $n$-models and hence $p(\sigma(\mathfrak{M}^k))=\sigma(p(\mathfrak{M}^k))$ (see Figure. \ref{ps}). \end{lemma} \begin{figure}[H] \unitlength1cm \begin{picture}(2.5,2.5) \thicklines \put(4.3,0){\mbox{$\sigma(\mathfrak{M}^n)$}} \put(4.5,2){\mbox{$\mathfrak{M}^k$}} \put(4.6,1.9){\vector(0,-1){1.5}} \put(4.2,1){\mbox{$\sigma$}} \put(8.5,0){\mbox{$\sigma(\mathfrak{N}^k)$}} \put(8.6,1.9){\vector(0,-1){1.5}} \put(8.5,2){\mbox{$\mathfrak{N}^k$}} \put(8.8,1){\mbox{$\sigma$}} \put(6.7,2.2){\mbox{$p$}} \put(6.7,0.2){\mbox{$p$}} \put(5.5,2){\vector(1,0){2.5}} \put(5.5,0){\vector(1,0){2.5}} \end{picture} \caption{p-Morphic images of $\sigma$-models.}\label{ps} \end{figure} The above does not mean that $\sigma$-models are closed under p-morphic images. Two (counter)examples below show that they may be not. \begin{example}\label{Kost} Let $\sigma(x_1)=x_2 \lor (x_2 \to (x_1\lor \neg x_1)).$ The $1$-model over the two-element chain (in Figure \ref{ex1}) cannot be any $\sigma$-model as to falsify $\sigma(x_1)$ at the root one needs at least three elements in the chain. \begin{figure}[H] \unitlength1cm \begin{picture}(3,2) \thicklines \put(1,0){\vector(0,1){0.9}} \put(1,1){\vector(0,1){0.9}} \put(1,1){\circle{0.1}} \put(1,2){\circle{0.1}} \put(1,0){\circle{0.1}} \put(1.3,0){\mbox{$00$}} \put(1.3,2){\mbox{$11$}} \put(1.3,1){\mbox{$01$}} \put(3.1,0.8){\vector(1,0){0.9}} \put(3.5,0.5){$\sigma$} \put(6,0){\vector(0,1){0.9}} \put(6,1){\vector(0,1){0.9}} \put(6,1){\circle{0.1}} \put(6,2){\circle{0.1}} \put(6,0){\circle{0.1}} \put(6.3,0){\mbox{$ 0$}} \put(6.3,2){\mbox{$ 1$}} \put(6.3,1){\mbox{$ 1$}} \put(8.1,0.8){\vector(1,0){0.9}} \put(8.5,0.5){$p$} \put(11,0){\circle{0.1}} \put(11.3,0){\mbox{$ 0$}} \put(11,1){\circle{0.1}} \put(11.3,1){\mbox{$1$}} \put(11,0){\vector(0,1){0.9}} \end{picture} \caption{The First Counterexample.}\label{ex1} \end{figure} Let $\sigma(x)=\neg\neg x\lor \neg x$ (we write $x$ instead of $x_1$). Models and the p-morphism are defined in Figure \ref{ex2}. The $1$-model over a two-element chain cannot be any $\sigma$-model as to falsify $\sigma(x)$ at the root one needs at least two end elements above the root. \begin{figure}[H] \unitlength1cm \begin{picture}(5,2) \thicklines \put(0,1){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(1.3,0){\mbox{$0$}} \put(0.3,1){\mbox{$1$}} \put(2.3,1){\mbox{$0$}} \put(1,0){\vector(1,1){0.9}} \put(1,0){\vector(-1,1){0.9}} \put(3.1,0.8){\vector(1,0){0.9}} \put(3.5,0.5){$\sigma$} \put(6,0){\vector(-1,1){0.9}} \put(6,0){\vector(1,1){0.9}} \put(5,1){\circle{0.1}} \put(6,0){\circle{0.1}} \put(7,1){\circle{0.1}} \put(6.3,0){\mbox{$0$}} \put(5.2,1){\mbox{$1$}} \put(7.3,1){\mbox{$1$}} \put(8.6,0.8){\vector(1,0){0.9}} \put(9,0.5){$p$} \put(11,0){\circle{0.1}} \put(11.3,0){\mbox{$ 0$}} \put(11,0){\line(0,1){0.9}} \put(11,1){\circle{0.1}} \put(11.3,1){\mbox{$ 1$}} \put(11,0){\vector(0,1){0.9}} \end{picture} \caption{The Second Counterexample.}\label{ex2} \end{figure} \end{example} Nowhere (but Theorem \ref{lfi2}) we have used the fact that valuations of any $n$-model are restricted to the $n$-initial variables. It would make no change in our argument if we replaced (everywhere) valuations $V^n$ with $V$, valuations of all variables. Thus, all results (but Theorem \ref{lfi2}) of this section remain valid for usual Kripke models. \section{Locally Tabular Logics.}\label{LDF} For any class {\bf F} of frames, let {\it sm({\bf F})} be the least class (of frames) containing {\bf F} and closed under generated subframes and p-morphic images. \begin{lemma}\label{lf8} \indent\indent\indent\indent \indent\indent\indent \indent \indent \qquad $\mathsf{L}(sm(\mathbf{F}))=\mathsf{L}(\mathbf{F}).$\end{lemma}\begin{proof} By Lemma \ref{pM0} and Lemma \ref{pMm}\end{proof} Extending any class of frames with generated subframes and p-morphic images does not change the logic but it enables us to characterize extensions of $\mathsf{L}(\mathbf{F})$:\footnote{The following theorem resembles (not without reasons) characterizations, see \cite{rw,PW}, of extensions of logics given by logical matrices.} \begin{theorem}\label{lf7} Let $\mathbf{F}$ be a class of finite frames and $\mathsf L=\mathsf{L}(\mathbf{F})$ be locally tabular. If {\sf L'} is an intermediate logic such that $\mathsf{L}\subseteq \mathsf L'$, then $\mathsf{L'}=\mathsf{L}(\mathbf{G})$, for some $\mathbf{G}\subseteq sm(\mathbf{F})$. \end{theorem} \begin{proof} Let $\mathbf{G}=\{\mathfrak{F}\in sm(\mathbf{F})\colon \mathsf{L'}\subseteq \mathsf{L}(\mathfrak{F})\}$. Clearly, $\mathsf{L'}\subseteq\mathsf{L}(\mathbf{G})$. We need to show the reverse inclusion. So, assume $A\not\in\mathsf{L'}$ and show $A\not\in\mathsf{L}(\mathfrak{F})$ for some $\mathfrak{F}\in \mathbf{G}$. Suppose that $A=A(x_1,\dots,x_k)$, for some $k\geq 0$, and let $A_0,\dots,A_j$ be all (non-equivalent in {\sf L}) formulas in $\mathsf{Fm^k}\cap\mathsf{L'}$. Let $$B=\bigwedge_{i=0}^jA_i\ \rightarrow\ A.$$ If $B\in \mathsf{L}(\mathbf{F})$, then $B\in\mathsf{L'}$ and it would give $A\in \mathsf{L'}$, a contradiction. Thus, we have $B\not\in \mathsf{L}(\mathbf{F})$. There is a $k$-model $\mathfrak{M}^k=(W,R,w_0,V^k)$ over a frame from {\bf F} such that $\mathfrak{M}^k\Vdash_wA_i$, for all $i\leq j$, and $\mathfrak{M}^k\not\Vdash_wA$, for some $w\in W$. Let $p\colon\mathfrak{M}^k\to\mathfrak{N}^k$ be a p-morphism from $\mathfrak{M}^k$ onto a $p$-irreducible $k$-model $\mathfrak{N}^k$, see Theorem \ref{Irr}. We take the frame of $(\mathfrak{N}^k)_{p(w)}$ as our $\mathfrak{F}$. Let $\mathfrak{F}=(U,\leq,p(w))$. Since $(\mathfrak{N}^k)_{p(w)}$ is a $k$-model over $\mathfrak{F}$, we have $A\not\in\mathsf{L}(\mathfrak{F})$. There remains to show that $\mathsf{L'}\subseteq \mathsf{L}(\mathfrak{F})$. Suppose that $C\not\in\mathsf{L}(\mathfrak{F})$ for some $C\in \mathsf{L'}$. Let $C=C(x_1,\dots,x_n)$ and let $\mathfrak{N}^n$ be an $n$-model over $\mathfrak{F}$ such that $\mathfrak{N}^n\not\Vdash C$. We define a substitution $\varepsilon\colon\{x_1,\dots,x_n\}\to\mathsf{Fm^k}$ taking $\varepsilon(x_i)=\bigvee\{\Delta((\mathfrak{N}^k)_u)\colon \mathfrak{N}^n\Vdash_ux_i\}$, for any $i\leq n$. Then we have $\mathfrak{N}^k\Vdash_v \varepsilon(x_i)\Leftrightarrow$\\ $\exists_{u\in U}\bigl(\mathfrak{N}^k\Vdash_v\Delta((\mathfrak{N}^k)_u)\land \mathfrak{N}^n\Vdash_ux_i\bigr)\Leftrightarrow \exists_{u\in U}\bigl(\mathsf{Th}((\mathfrak{N}^k)_u)\subseteq\mathsf{Th}((\mathfrak{N}^k)_v) \land\mathfrak{N}^n\Vdash_ux_i\bigr) \Leftrightarrow\exists_{u\in U}(u\leq v\land\mathfrak{N}^n\Vdash_ux_i)\Leftrightarrow\mathfrak{N}^n\Vdash_vx_i$, for any $i\leq n$ and $v\in U$. Note that the last but one equivalence needs Corollary \ref{FMbis}. This shows $\mathfrak{N}^k\Vdash_{v} \varepsilon(C)\Leftrightarrow\mathfrak{N}^n\Vdash_v C$, for any $v\in U$ and hence we get $\mathfrak{N}^k\not\Vdash_{p(w)} \varepsilon(C)$, that is $\mathfrak{M}^k\not\Vdash_w \varepsilon(C)$, which cannot happen as $\varepsilon(C)$ is one of the $A_i$'s and must be true at $(\mathfrak{M}^k)_w$. \end{proof} \subsection{Substitutions in Locally Tabular Logics.}\label{sub} Let $\mathbf{F}$ be a class of finite frames, $\mathsf L=\mathsf{L}(\mathbf{F})$ be locally tabular and $\mathbf{M}^n=\mathbf{M}^n(\mathbf F)$, for any $n\geq 0$. Assume, additionally, that {\bf F} is closed under generated subframes and p-morphic images, that is {\it sm}({\bf F})={\bf F}, see Lemma \ref{lf8}. For any $\sigma:\{x_1,\dots,x_n\}\to \mathsf{Fm^k}$, define $H_\sigma\colon\mathbf{M}^k\to\mathbf{M}^n$ putting $H_\sigma(\mathfrak{M}^k)=\sigma(\mathfrak{M}^k)$, for each $\mathfrak{M}^k$.\footnote{Ghilardi wrote $\sigma(u)$ for any Kripke model $u$ and hence we have $\sigma(\mathfrak{M}^k)$. We should, perhaps, wrote $\sigma\colon\mathbf{M}^k\to\mathbf{M}^n$ but we think it could be misleading as we already have $\sigma:\{x_1,\dots,x_n\}\to \mathsf{Fm^k}$ and $\sigma:\mathsf{Fm}\to \mathsf{Fm}$. Talking about the mapping $\sigma$, it would be unclear if we had in mind a mapping between formulas or models. For this reason we decided to introduce $H_\sigma$, to replace $\sigma$, though it could be seen as an excessive reaction.} \begin{lemma}\label{lfs} Suppose that $\varepsilon,\sigma:\{x_1,\dots,x_n\}\to \mathsf{Fm^k}$. Then \ $\varepsilon=_{\sf L}\sigma$ \ iff \ $H_\sigma\thicksim H_\varepsilon$.\footnote{where $H_\sigma\thicksim H_\varepsilon$ obviously means $H_\sigma(\mathfrak{M}^k)\thicksim H_\varepsilon(\mathfrak{M}^k)$ for each $\mathfrak{M}^k$.}\end{lemma} \begin{proof} $(\Rightarrow)$ is obvious. $(\Leftarrow)$. Let $H_\sigma(\mathfrak{M}^k)\thicksim H_\varepsilon(\mathfrak{M}^k)$, for any $\mathfrak{M}^k\in\mathbf{M}^k$. Then\\ $\mathfrak{M}^k\vdash \sigma(A)\Leftrightarrow\sigma(\mathfrak{M}^k)\vdash A\Leftrightarrow\varepsilon(\mathfrak{M}^k)\vdash A\Leftrightarrow\mathfrak{M}^k\vdash \varepsilon(A).$ Thus, $\vdash_{\sf L}\sigma(A)\leftrightarrow\varepsilon(A)$, for any $A\in\mathsf{Fm^n}$, which shows $\varepsilon=_{\sf L}\sigma$.\end{proof} The assumptions that the frames {\bf F} are finite and {\sf L}({\bf F}) is locally tabular do not play any role in the above Lemma but they are essential in the subsequent theorem, to prove that the conditions (i)-(iii) of Lemma \ref{sigmai} characterize substitutions: \begin{theorem}\label{nsigmai} Let $H\colon\mathbf{M}^k\to\mathbf{M}^n$. Then $H\thicksim H_\sigma$, for some $\sigma:\{x_1,\dots,x_n\}\to \mathsf{Fm^k}$ if and only if $H$ fulfills the following conditions:\\ (i) the $n$-model $H(\mathfrak{M}^k)$ has the same frame as the $k$-model $\mathfrak{M}^k$, for any $\mathfrak{M}^k\in\mathbf{M}^k$;\\ (ii) $H((\mathfrak{M}^k)_w)\thicksim(H(\mathfrak{M}^k))_w$, \ for any $\mathfrak{M}^k=(W,R,w_0,V^k)\in\mathbf{M}^k$ and $w\in W$;\\ (iii) if $\mathfrak{N}^k\thicksim\mathfrak{M}^k$,\ then\ $H(\mathfrak{N}^k)\thicksim H(\mathfrak{M}^k)$, for any $\mathfrak{M}^k,\mathfrak{N}^k\in\mathbf{M}^k.$ \end{theorem} \begin{proof} $(\Leftarrow)$ follows from Lemma \ref{sigmai}. The conditions (i)-(iii) of Lemma \ref{sigmai} seem to be stronger than the above ones, but they are not (see Theorem \ref{pat}). To prove $(\Rightarrow)$ we assume $H\colon\mathbf{M}^k\to\mathbf{M}^n$ fulfills the above (i)--(iii). Let $$\sigma(x_i)=\bigvee\{\Delta(\mathfrak{N}^k)\colon \ \mathfrak{N}^k\in\mathbf{M}^k \ \land \ H(\mathfrak{N}^k)\Vdash x_i\},\qquad \mbox{ for $i=1,\dots,n$}.$$ By Theorem \ref{lf6}, we can claim that we have defined $\sigma\colon \{x_1,\dots,x_n\}\to \mathsf{Fm}^k$. For any $k$-model $\mathfrak{M}^k=(W,R,w_0,V^k)\in \mathbf{M}^k$, we have $$\sigma(\mathfrak{M}^k)\Vdash_{w}x_i \quad \Leftrightarrow \quad \sigma((\mathfrak{M}^k)_w)\Vdash x_i\quad \Leftrightarrow \quad (\mathfrak{M}^k)_w\Vdash\sigma(x_i)\quad \Leftrightarrow $$ $$\exists_{\mathfrak{N}^k}\bigl((\mathfrak{M}^k)_w\Vdash\Delta(\mathfrak{N}^k) \land H(\mathfrak{N}^k)\Vdash x_i\bigr) \ \Leftrightarrow \ H((\mathfrak{M}^k)_w)\Vdash x_i \ \Leftrightarrow\ H(\mathfrak{M}^k)\Vdash_{w}x_i,$$ for any $i=1,\dots,n$ and any $w\in W$. Hence $\sigma(\mathfrak{M}^k)\thicksim H(\mathfrak{M}^k)$. \end{proof} The above theorem is useful to define substitutions. However, the condition (iii) is hard to check if there is too many p-morphisms between models. So, we would prefer a variant of \ref{nsigmai}, given below, concerning p-irreducible models. The closure of {\bf F} under p-morphic images is not necessary for the above theorem (it suffices the closure under generated subframes) but it is necessary for the subsequent theorem. Let $\mathbf{M}^n_{ir}$, for any $n\geq 0$, be the class of p-irreducible $n$-models over the frames $\mathbf{F}$. According to Theorem \ref{Irr}, for any $A\in \mathsf{Fm}^n$ $$A\in\mathsf{L} \quad \Leftrightarrow \quad (\mathfrak{M}^n\Vdash A, \mbox{ for every } \mathfrak{M}^n\in \mathbf{M}^n_{ir} ). $$ \begin{theorem}\label{nsi} If $H\colon\mathbf{M}^k_{ir}\to\mathbf{M}^n$ fulfills\\ (i) the $n$-model $H(\mathfrak{M}^k)$ has the same frame as the $k$-model $\mathfrak{M}^k$, for any $\mathfrak{M}^k\in\mathbf{M}^k_{ir}$;\\ (ii) $H((\mathfrak{M}^k)_w)\thicksim(H(\mathfrak{M}^k))_w$, \ for any $\mathfrak{M}^k=(W,R,w_0,V^k)\in\mathbf{M}^k_{ir}$ and any $w\in W$;\\ (iii) if $\mathfrak{N}^k\equiv\mathfrak{M}^k$,\ then\ $H(\mathfrak{N}^k)\thicksim H(\mathfrak{M}^k)$, for any $\mathfrak{M}^k,\mathfrak{N}^k\in\mathbf{M}^k_{ir};$\\ then there is exactly one (up to $=_{\sf L}$) substitution $\sigma:\{x_1,\dots,x_n\}\to \mathsf{Fm^k}$ such that $H(\mathfrak{M}^k)\thicksim H_\sigma(\mathfrak{M}^k)$, for each $\mathfrak{M}^k\in\mathbf{M}^k_{ir}.$ \end{theorem} \begin{proof} We proceed in the same way as above. In particular, we define $$\sigma(x_i)=\bigvee\{\Delta(\mathfrak{N}^k)\colon \ \mathfrak{N}^k\in\mathbf{M}^k_{ir} \ \land \ H(\mathfrak{N}^k)\Vdash x_i\},\qquad \mbox{ for $i=1,\dots,n$}$$ and prove $H(\mathfrak{M}^k)\thicksim H_\sigma(\mathfrak{M}^k)$, for any $\mathfrak{M}^k\in\mathbf{M}^k_{ir}$. The crucial step in our argument $$\exists_{\mathfrak{N}^k}\bigl((\mathfrak{M}^k)_w\Vdash\Delta(\mathfrak{N}^k) \land H(\mathfrak{N}^k)\Vdash x_i\bigr) \ \Rightarrow \ H((\mathfrak{M}^k)_w)\Vdash x_i$$ follows from the fact that, if $(\mathfrak{M}^k)_w\thicksim(\mathfrak{N}^k)_u$, for some $u$, then $(\mathfrak{M}^k)_w$ and $(\mathfrak{N}^k)_u$ are p-irreducible by Theorem \ref{pM6} and hence $(\mathfrak{M}^k)_w\equiv(\mathfrak{N}^k)_u$ by Corollary \ref{lf3i}. Thus, by (iii), we have $H((\mathfrak{M}^k)_w)\Vdash x_i$ if $H(\mathfrak{N}^k)\Vdash x_i$. The uniqueness of $\sigma$ follows from Lemma \ref{lfs} (and Theorem \ref{Irr}). \end{proof} Suppose we need $H\colon\mathbf{M}^k_{ir}\to\mathbf{M}^n$ fulfilling the above (i)--(iii). Let $\mathfrak{M}^k=(W,\leq,w_0,V^k)$ be a p-irreducible $k$-model. We should have $H(\mathfrak{M}^k)=(W,\leq,w_0,V^n)$, for some $V^n$, which means only the valuations $V^n$ are to be defined. By Theorem \ref{pM6}, $(\mathfrak{M}^k)_w$ is p-irreducible, for any $w\in W$, and hence we could define $H((\mathfrak{M}^k)_w)$ inductively with respect to $d_{\mathfrak F}(w)$, where $\mathfrak F=(W,\leq,w_0)$. First, we define $H((\mathfrak{M}^k)_w)$ for $n$-models over 1-element reflexive frames, that is we define $V^n(w)$ for end elements $w\in W$; any subset of $\{x_1,\dots,x_k\}$ can be taken as $V^n(w)$. Then, assuming $H((\mathfrak{M}^k)_u)$ has been defined for any $u>w$, and hence we have $V^n(u)$ for any $u>w$, we define $V^n(w)$. The only restriction is monotonicity, that is we should have $V^n(w)\subseteq V^k(u)$ for any $u>w$. The condition (ii) would be satisfied as we would even have $H((\mathfrak{M}^k)_w)=(H(\mathfrak{M}^k))_w$. The condition (iii) should not produce any harm if we define $H$ on the equivalence classes $[\mathfrak{M}^k]_\equiv$. We should get $H\colon\mathbf{M}^k_{ir}\slash\!\!\equiv \ \to \ \mathbf{M}^n\slash\!\!\thicksim$. Note that it would be much easier to satisfy this condition than to satisfy the corresponding (iii) of Theorem \ref{nsigmai} where we should get $H\colon\mathbf{M}^k\slash\!\!\thicksim \ \to \ \mathbf{M}^n\slash\!\!\thicksim$.\\ Since {\sf L} is locally tabular, any formula $A\in\mathsf{Fm}^n$ is a disjunction of the characters of all $A$-{\it models} (that is $n$-models $\mathfrak{M}^n\in\mathbf M^n$ such that $A$ is true at $\mathfrak{M}^n$): $$A=_{\sf L}\bigvee\{\Delta(\mathfrak{M}^n)\colon \Delta(\mathfrak{M}^n)\to A\in \mathsf{L}\}=_{\sf L}\bigvee\{\Delta(\mathfrak{M}^n)\colon \mathfrak{M}^n\Vdash A\}.\leqno (\star)$$ It also shows that each {\sf L}-consistent (that is $\not=_{\sf L}\bot$) formula is {\sf L}-unifiable. \begin{lemma}\label{n1i} A substitution $\sigma\colon \{x_1,\dots,x_n\}\to \mathsf{Fm}^k$ is an {\sf L}-unifier for $A\in\mathsf{Fm}^n$ iff $\sigma(\mathfrak{M}^k)\Vdash A$ for each $k$-model $\mathfrak{M}^k$. In other words, $\sigma$ is a unifier for $A$ iff all $\sigma$-models are $A$-models. \end{lemma} \begin{proof} $\sigma(\mathfrak{M}^k)\Vdash A$ iff $\mathfrak{M}^k\Vdash\sigma(A)$, for each $\mathfrak{M}^k$. Thus, $\sigma(\mathfrak{M}^k)\Vdash A$ for each $\mathfrak{M}^k$ iff $\mathfrak{M}^k\Vdash\sigma(A)$ for each $\mathfrak{M}^k$. But $\sigma$ is an {\sf L}-unifier for $A$ iff $\mathfrak{M}^k\Vdash\sigma(A)$ for each $\mathfrak{M}^k$.\end{proof} Accordingly, $\sigma$ is a unifier for $A$ iff the disjunction $\bigvee\{\Delta(\mathfrak{M}^n)\colon \mathfrak{M}^n\Vdash A\}$ in $(\star)$ contains the characters of all $\sigma$-models. To put it in other words: \begin{corollary}\label{in2} A substitution $\sigma\colon \{x_1,\dots,x_n\}\to \mathsf{Fm}^k$ is an {\sf L}-unifier for a formula $A\in\mathsf{Fm}^n$ iff $A_\sigma\to A\in \mathsf{L}$, where $A_\sigma=\bigvee\{\Delta(\sigma(\mathfrak{M}^k))\colon \mathfrak{M}^k\in \mathbf{M}^k\}.$ \end{corollary} Thus, each substitution $\sigma$ is an {\sf L}-unifier of some formulas $A\in\mathsf{Fm}^n$ and among these formulas we can find the strongest (or the smallest in the Lindenbum algebra) formula $A_\sigma$. Using Lemma \ref{n1i} and Lemma \ref{pat} we can now characterize all unifiers of $A_\sigma$. \begin{corollary}\label{n3i} Let $\sigma\colon \{x_1,\dots,x_n\}\to \mathsf{Fm}^k$. A substitution $\tau\colon \{x_1,\dots,x_n\}\to \mathsf{Fm}^m$ is an {\sf L}-unifier for $A_\sigma$ iff all $\tau$-models are (equivalent to) $\sigma$-models, that is iff\\ (iv) for every $\mathfrak{M}^m\in\mathbf{M}^m$ there is an $\mathfrak{M}^k\in\mathbf{M}^k$ such that $\tau(\mathfrak{M}^m) \thicksim \sigma(\mathfrak{M}^k)$. \end{corollary} The above condition (iv) can be also written down as the inclusion $$\tau(\mathbf{M}^m)\slash\!\!\thicksim \quad \subseteq \quad \sigma(\mathbf{M}^k)\slash\!\!\thicksim.$$ There remains to characterize the relation $\preccurlyeq$ (of being a more general substitution) in terms of $\sigma$-models. \begin{lemma}\label{n4i} A substitution $\tau\colon \{x_1,\dots,x_n\}\to \mathsf{Fm}^m$ is more general than $\sigma\colon \{x_1,\dots,x_n\}\to \mathsf{Fm}^k$ (that is $\tau\preccurlyeq\sigma$) iff there is $F\colon\mathbf{M}^k\to\mathbf{M}^m$, fulfilling (i)--(iii) of Theorem \ref{nsigmai}, such that\\ (v) \ $\tau(F(\mathfrak{M}^k))\thicksim\sigma(\mathfrak{M}^k)$, for every $\mathfrak{M}^k\in \mathbf{M}^k$.\\ Consequently, if $\tau\preccurlyeq\sigma$, then all $\sigma$-models are (equivalent to) $\tau$-models and hence $$\tau(\mathbf{M}^m)\slash\!\!\thicksim \quad \supseteq \quad \sigma(\mathbf{M}^k)\slash\!\!\thicksim.$$ \end{lemma} \begin{proof} Suppose that $\nu\circ\tau=_{\sf L}\sigma$ for some $\nu\colon \{x_1,\dots,x_m\}\to \mathsf{Fm}^k$ and $F=H_\nu$. Then, $\tau(F(\mathfrak{M}^k))=\tau(\nu(\mathfrak{M}^k))$ and, for each $B\in \mathsf{Fm}^n$,\ $\tau(\nu(\mathfrak{M}^k))\Vdash B \ \Leftrightarrow \ \nu(\mathfrak{M}^k)\Vdash \tau(B) \ \Leftrightarrow \ \mathfrak{M}^k\Vdash \nu(\tau(B)) \ \Leftrightarrow \ \mathfrak{M}^k\Vdash \sigma(B) \ \Leftrightarrow \ \sigma(\mathfrak{M}^k)\Vdash B.$ Hence we get (v). The reverse implication is shown in the same way. We get $\nu\colon \{x_1,\dots,x_m\}\to \mathsf{Fm}^k$ such that $F(\mathfrak{M}^k)=\nu(\mathfrak{M}^k)$, for each $\mathfrak{M}^k$, by Lemma \ref{n1i}. Then, using (v), we show $\mathfrak{M}^k\Vdash \nu(\tau(B))\ \Leftrightarrow \ \mathfrak{M}^k\Vdash \sigma(B)$ for each $B\in \mathsf{Fm}^n$ and this yields $\nu\circ\tau=_{\sf L}\sigma$.\end{proof} \subsection{Locally Tabular Logics with Finitary Unification.}\label{FU} Now, we give a short proof of the main result of \cite{dkw} characterizing locally tabular intermediate logics with finitary (or unitary) unification. \begin{theorem}\label{main} A locally tabular intermediate logic {\sf L} has finitary (or unitary) unification if and only if for every $n\geq 0$ there exists a number $m\geq 0$ such that for every $k\geq 0$ and every $\sigma\colon \{x_1,\dots,x_n\}\to \mathsf{Fm}^k$ there are $G:\mathbf{M}^m\to\mathbf{M}^n$ and $F:\mathbf{M}^k\to\mathbf{M}^m$ such that \\ (i) $G$ preserves the frame of any $m$-model $\mathfrak{M}^m=(W,R,w_0,V^m)\in\mathbf{M}^m$;\\ \indent $F$ preserves the frame of any $k$-model $\mathfrak{M}^k=(W',R',w'_0,V^k)\in\mathbf{M}^k$;\\ (ii) $G((\mathfrak{M}^m)_w)\thicksim(G(\mathfrak{M}^m))_w$, for any $\mathfrak{M}^m=(W,R,w_0,V^m)\in\mathbf{M}^m$ and $w\in W$;\\ \indent $F((\mathfrak{M}^k)_{w'})\thicksim(F(\mathfrak{M}^k))_{w'}$, for any $\mathfrak{M}^k=(W',R',w_0',V^k)\in\mathbf{M}^k$ and $w'\in W'$;\\ (iii) $\mathfrak{N}^m\thicksim \mathfrak{M}^m \ \Rightarrow \ G(\mathfrak{N}^m)\thicksim G(\mathfrak{M}^m), \ \mbox{ for any } \ \mathfrak{M}^m,\mathfrak{N}^m\in \mathbf{M}^m$;\\ \indent \indent $\mathfrak{N}^k\thicksim \mathfrak{M}^k \ \Rightarrow \ F(\mathfrak{N}^k)\thicksim F(\mathfrak{M}^k), \ \mbox{ for any } \ \mathfrak{M}^k,\mathfrak{N}^k\in \mathbf{M}^k$;\\ (iv) for every $\mathfrak{M}^m\in\mathbf{M}^m$ there is $\mathfrak{M}^k\in\mathbf{M}^k$ such that $G(\mathfrak{M}^m) \thicksim \sigma(\mathfrak{M}^k)$;\\ (v) $G(F(\mathfrak{M}^k))\thicksim\sigma(\mathfrak{M}^k)$, for every $\mathfrak{M}^k\in \mathbf{M}^k$; see Figure \ref{v} below. \end{theorem} \begin{figure}[H] \unitlength1cm \begin{picture}(5,2) \thicklines \put(8,2){\vector(0,-1){1.9}} \put(8,2){\vector(-1,-1){1.9}} \put(8,0){\vector(-1,0){1.9}} \put(8,0){\circle{0.1}} \put(6,0){\circle{0.1}} \put(8,2){\circle{0.1}} \put(8.3,2){\mbox{$\mathbf{M}^k\!\slash\!\!\thicksim$}} \put(5,0){\mbox{$\mathbf{M}^n\!\slash\!\!\thicksim$}} \put(8.3,0){\mbox{$\mathbf{M}^m\!\slash\!\!\thicksim$}} \put(8.1,1){\mbox{$F$}} \put(7.1,0.1){\mbox{$G$}} \put(6.7,1.2){\mbox{$H_\sigma$}} \end{picture} \caption{Condition (v)}\label{v} \end{figure} \begin{proof} Since {\sf L} is locally tabular, $\mathsf{Fm}^n$ is finite (up to $=_{\sf L}$), for each $n$, and each $\sigma\colon \{x_1,\dots,x_n\}\to \mathsf{Fm}^k$ is a unifier of $A_\sigma\in\mathsf{Fm}^n$, see Corollary \ref{in2}. Thus, if {\sf L} has finitary (or unitary) unification, one can find, for every $n\geq 0$, a number $m$ such that each {\sf L}-unifiable formula $A\in\mathsf{Fm}^n$ (and $A_\sigma$ in particular) has a complete set of unifiers among $\tau\colon \{x_1,\dots,x_n\}\to \mathsf{Fm}^m$. Suppose $\tau\colon \{x_1,\dots,x_n\}\to \mathsf{Fm}^m$ is a unifier for $A_\sigma$ and $\tau\preccurlyeq\sigma$. Take $G=H_\tau$ and let $F$ be the mapping determined by Lemma \ref{n4i}. Then the conditions (i)--(v) are obviously fulfilled. On the other hand, using the conditions (i)--(v) one shows that the set of {\sf L}-unifiers $\tau\colon \{x_1,\dots,x_n\}\to\mathsf{Fm}^m$, for any unifiable $A\in\mathsf{Fm}^n$, is complete. Since $\mathsf L$ is locally tabular, there is only finitely many (up to $=_{\sf L}$) such unifiers $\tau$ and hence unification is finitary (unless unitary). \end{proof} The above theorem do not need the assumption that {\bf F} is closed under p-morphic images; it suffices only the closure under generated subframes. But {\bf F}={\it sm}({\bf F}) is needed for the following version of \ref{main} where we refer to Theorem \ref{nsi} (instead of \ref{nsigmai}). For this version of \ref{main} we must, however, modify, the condition (v) as we cannot assume that $F(\mathfrak{M}^k)$ is p-irreducible even if $\mathfrak{M}^k$ is p-irreducible. \begin{theorem}\label{main2} A locally tabular intermediate logic {\sf L} has finitary (or unitary) unification if and only if for every $n\geq 0$ there exists a number $m\geq 0$ such that for every $k\geq 0$ and every $\sigma\colon \{x_1,\dots,x_n\}\to \mathsf{Fm}^k$ there are $G:\mathbf{M}^m_{ir}\to\mathbf{M}^n$ and $F:\mathbf{M}^k_{ir}\to\mathbf{M}^m$ such that \\ (i) $G$ preserves the frame of any $m$-model $\mathfrak{M}^m=(W,R,w_0,V^m)\in\mathbf{M}^m_{ir}$;\\ \indent $F$ preserves the frame of any $k$-model $\mathfrak{M}^k=(W',R',w'_0,V^k)\in\mathbf{M}^k_{ir}$;\\ (ii) $G((\mathfrak{M}^m)_w)\thicksim(G(\mathfrak{M}^m))_w$, for any $\mathfrak{M}^m=(W,R,w_0,V^m)\in\mathbf{M}^m_{ir}$ and $w\in W$;\\ \indent $F((\mathfrak{M}^k)_{w'})\thicksim(F(\mathfrak{M}^k))_{w'}$, for any $\mathfrak{M}^k=(W',R',w_0',V^k)\in\mathbf{M}^k_{ir}$ and $w'\in W'$;\\ (iii) $\mathfrak{N}^m\equiv \mathfrak{M}^m \ \Rightarrow \ G(\mathfrak{N}^m)\thicksim G(\mathfrak{M}^m), \ \mbox{ for any } \ \mathfrak{M}^m,\mathfrak{N}^m\in \mathbf{M}^m_{ir}$;\\ \indent \indent $\mathfrak{N}^k\equiv \mathfrak{M}^k \ \Rightarrow \ F(\mathfrak{N}^k)\thicksim F(\mathfrak{M}^k), \ \mbox{ for any } \ \mathfrak{M}^k,\mathfrak{N}^k\in \mathbf{M}^k_{ir}$;\\ (iv) for every $\mathfrak{M}^m\in\mathbf{M}^m_{ir}$ there is $\mathfrak{M}^k\in\mathbf{M}^k_{ir}$ such that $G(\mathfrak{M}^m) \thicksim \sigma(\mathfrak{M}^k)$;\\ (v) if $\mathfrak{N}^m\thicksim F(\mathfrak{M}^k)$, then $G(\mathfrak{N}^m)\thicksim\sigma(\mathfrak{M}^k)$, for every $\mathfrak{M}^k\in \mathbf{M}^k_{ir}$ and $\mathfrak{N}^m\in \mathbf{M}^m_{ir}$. \end{theorem} In \cite{dkw} the above theorems is applied to determine the unification types of locally tabular logics. We use them, as well, in the present paper. Let us recall that $\mathbf H_{un}$ (see Figure \ref{hpa}) consists of all frames $\mathfrak L_d+\mathfrak R_s$ , where $s,d\geq 0$.\footnote{There is no $\mathfrak L_0$ (or it would be the empty frame), so we should have here: all logics determined by $\mathfrak R_s$ and $\mathfrak L_d+\mathfrak R_s$, where $s\geq 0$ and $d\geq 1$, if we agree that $\mathfrak R_0=\mathfrak F_0=\mathfrak L_1$.} The logic {\sf L}($\mathbf H_{un}$) is locally tabular as it extends {\sf PWL}, see \cite{Esakia}. \begin{theorem}\label{wpl} For any $\mathbf F\subseteq \mathbf H_{un}$, the logic {\sf L}($\mathbf F$) has unitary unification.\end{theorem} \begin{proof}Let $n\geq 1$, and $m=n\cdot 2^n+1$ and $\sigma\colon \{x_1,\dots,x_n\}\to \mathsf{Fm}^k$, for some $k$. We need $G:\mathbf{M}^m_{ir}\to\mathbf{M}^n$ and $F:\mathbf{M}^k_{ir}\to\mathbf{M}^m$ fulfilling (i)-(v) of Theorem \ref{main2}. Let $code(\mathfrak{f}^k_0)=\mathfrak{f}^n_0\mathfrak{g}^n_2\mathfrak{g}^n_3\dots,\mathfrak{g}^n_{2^n}1\quad \mbox{(the concatenation of $\mathfrak{f}^k_0,\mathfrak{g}^n_i$'s and the suffix $1$)}$ includes all $\mathfrak{g}^n_i$'s (in any order where repetitions are allowed) such that\\ \begin{figure}[H] \begin{center} \unitlength1cm \begin{minipage}[c][10mm][b]{10mm} \begin{picture}(3,2) \thicklines \linethickness{0.3mm} \put(0.2,0){\circle{0.1}} \put(0.5,0){\mbox{$\mathfrak{g}^n_i$}} \put(0.2,1){\circle{0.1}} \put(0.5,1){\mbox{$\mathfrak{f}^n_0$}} \put(0.2,0){\vector(0,1){0.9}} \end{picture} \end{minipage} $\quad = \quad \sigma \Bigl($ \begin{minipage}[c][10mm][b]{10mm} \begin{picture}(3,2) \thicklines \linethickness{0.3mm} \put(0.2,0){\circle{0.1}} \put(0.5,0){\mbox{$\mathfrak{g}^k_i$}} \put(0.2,1){\circle{0.1}} \put(0.5,1){\mbox{$\mathfrak{f}^k_0$}} \put(0.2,0){\vector(0,1){0.9}} \end{picture} \end{minipage} $\Bigr), \quad \mbox{ for some } \mathfrak{g}_i^k .$ Then we define $F(\mathfrak{M}^{k})$: \end{center} \end{figure} \begin{figure}[H] \unitlength1cm \begin{picture}(5,4) \thicklines \put(0,1){$\mathfrak{M}^k$:} \put(2,3){\circle{0.1}} \put(1,3){\circle{0.1}} \put(0.5,3){\circle{0.1}} \put(0.75,3){\circle{0.1}} \put(1.75,3){\circle{0.1}} \put(1.25,3){\circle{0.1}} \put(0.25,3){\circle{0.1}} \put(0,3){\circle{0.1}} \put(1,2){\vector(1,1){0.9}} \put(1,2){\vector(-1,1){0.9}} \put(1,2){\vector(0,1){0.9}} \put(1,2){\circle{0.1}} \put(1,1.75){\circle{0.1}} \put(1,1.5){\circle{0.1}} \put(1,1.25){\circle{0.1}} \put(1,1){\circle{0.1}} \put(1,0){\circle{0.1}} \put(1,0){\vector(0,1){0.9}} \put(1,1){\circle{0.1}} \put(1,0){\circle{0.1}} \put(1,4){\circle{0.1}} \put(2,3){\vector(-1,1){0.9}} \put(1,3){\vector(0,1){0.9}} \put(0,3){\vector(1,1){0.9}} \put(1,2){\vector(-1,2){0.4}} \put(1,2){\vector(1,2){0.4}} \put(1.5,3){\circle{0.1}} \put(0.5,3){\vector(1,2){0.4}} \put(1.5,3){\vector(-1,2){0.4}} \put(1.2,4){$\mathfrak{f}^k_0$} \put(2.2,3){$\mathfrak{f}^k_{s}$} \put(1.4,2){$\mathfrak{f}^k_{s+1}$} \put(1.2,0){$\mathfrak{f}^k_{s+d+1}$} \put(0,3.5){$\mathfrak{f}^k_{1}$} \put(3.5,1){$\sigma(\mathfrak{M}^k)$:} \put(6,3){\circle{0.1}} \put(5,3){\circle{0.1}} \put(4.5,3){\circle{0.1}} \put(4.75,3){\circle{0.1}} \put(5.25,3){\circle{0.1}} \put(4.25,3){\circle{0.1}} \put(4,3){\circle{0.1}} \put(5,2){\vector(1,1){0.9}} \put(5,2){\vector(1,1){0.9}} \put(5,2){\vector(0,1){0.9}} \put(5,2){\circle{0.1}} \put(5,1.75){\circle{0.1}} \put(5,1.5){\circle{0.1}} \put(5,1.25){\circle{0.1}} \put(5,1.75){\circle{0.1}} \put(5,1){\circle{0.1}} \put(5,0){\circle{0.1}} \put(5,0){\vector(0,1){0.9}} \put(5,1){\circle{0.1}} \put(5,0){\circle{0.1}} \put(5,4){\circle{0.1}} \put(6,3){\vector(-1,1){0.9}} \put(5,3){\vector(0,1){0.9}} \put(4,3){\vector(1,1){0.9}} \put(5,2){\vector(1,2){0.4}} \put(5,2){\vector(-1,2){0.4}} \put(5,2){\vector(-1,1){0.9}} \put(5.5,3){\circle{0.1}} \put(4.5,3){\vector(1,2){0.4}} \put(5.5,3){\vector(-1,2){0.4}} \put(5.2,4){$\mathfrak{f}^n_0$} \put(6.2,3){$\mathfrak{f}^n_{s}$} \put(5.4,2){$\mathfrak{f}^n_{s+1}$} \put(5.2,0){$\mathfrak{f}^n_{s+d+1}$} \put(3.6,3){$\mathfrak{f}^n_{1}$} \put(7.5,1){$F(\mathfrak{M}^{k})$:} \put(10,3){\circle{0.1}} \put(9,3){\circle{0.1}} \put(8.5,3){\circle{0.1}} \put(8.75,3){\circle{0.1}} \put(9.25,3){\circle{0.1}} \put(8.25,3){\circle{0.1}} \put(8,3){\circle{0.1}} \put(9,2){\vector(1,1){0.9}} \put(9,2){\vector(-1,1){0.9}} \put(9,2){\vector(0,1){0.9}} \put(9,2){\circle{0.1}} \put(9,1.75){\circle{0.1}} \put(9,1.5){\circle{0.1}} \put(9,1.25){\circle{0.1}} \put(9,1){\circle{0.1}} \put(9,0){\circle{0.1}} \put(9,0){\vector(0,1){0.9}} \put(9,1){\circle{0.1}} \put(9,0){\circle{0.1}} \put(9,4){\circle{0.1}} \put(10,3){\vector(-1,1){0.9}} \put(9,3){\vector(0,1){0.9}} \put(8,3){\vector(1,1){0.9}} \put(9,2){\vector(-1,2){0.4}} \put(9,2){\vector(1,2){0.4}} \put(9.5,3){\circle{0.1}} \put(8.5,3){\vector(1,2){0.4}} \put(9.5,3){\vector(-1,2){0.4}} \put(9.2,4){$\mathfrak{f}^n_0\mathfrak{g}^n_2\dots\mathfrak{g}^n_{2^n}1=code(\mathfrak f^k_0)$} \put(10.2,3){$\mathfrak{f}^n_{s}\mathfrak{g}^n_2\dots\mathfrak{g}^n_{2^n}0$} \put(9.4,2){$\mathfrak{f}^n_{s+1}0\cdots0$} \put(9.2,0){$\mathfrak{f}^n_{s+d+1}0\cdots0$} \put(6.5,3.5){$\mathfrak{f}^n_{1}\mathfrak{g}^n_2\dots\mathfrak{g}^n_{2^n}0$} \end{picture} \caption{}\label{hpa2} \end{figure} \noindent \noindent One checks the conditions (i)-(iii) for the mapping $F$. Note that $F(\mathfrak{M}^{k})$ could be not p-irreducible, for some p-irreducible $\mathfrak{M}^{k}$, but one could only collapse elements of the depth $2$ or elements below $s+1$ that is elements in the `leg' of the model.\\ A. We put $G(\mathfrak{M}^{m})= \mathfrak{M}^{m}\!\!\upharpoonright n$ if $\mathfrak{M}^{m}\thicksim F(\mathfrak{M}^{k})$ for some $\mathfrak{M}^{k}$. Let $\mathfrak{M}^{m}=(W,R,w_0,V^m)$ and $\mathfrak{M}^{k}=(U,S,u_0,V^k)$. The conditions (i)-(v) for $G$ seem to be obvious; as concerns (ii), for every $w\in W$ there is $u\in U$ such that $(\mathfrak{M}^{m})_w\thicksim F((\mathfrak{M}^{k})_u)$. To show (iv) and (v), let us note that $F(\mathfrak{M}^{k})\!\!\upharpoonright n=\sigma(\mathfrak{M}^{k})$, see Figure \ref{hpa2}. Thus, $G(F(\mathfrak{M}^{k}))=\sigma(\mathfrak{M}^{k})$ and $G(\mathfrak{M}^{m})=\mathfrak{M}^{m}\!\!\upharpoonright n\thicksim F(\mathfrak{M}^{k})\!\!\upharpoonright n=\sigma(\mathfrak{M}^{k})$ if $\mathfrak{M}^{m}\thicksim F(\mathfrak{M}^{k})$.\\ B. There remains to define $G(\mathfrak{M}^{m})$ for $\mathfrak{M}^{m}\in \mathbf M^m_{ir}$ non-equivalent to any $F(\mathfrak{M}^{k})$. Our definition is inductive (to secure (ii)) and we should take into account that for some $w\in W$ and some $\mathfrak{M}^{k}$ we may have $(\mathfrak{M}^{m})_w\thicksim F(\mathfrak{M}^{k})$ and hence we have defined $G((\mathfrak{M}^{m})_w)=(\mathfrak{M}^{m})_w\!\!\upharpoonright n$. Since (iii) is obvious and (v) is irrelevant in this case, we have to bother only about (i) and (iv). Thus, the frame of the $n$-model $G(\mathfrak{M}^{m})$ should be the same as the frame of $\mathfrak{M}^{m}$ and $G(\mathfrak{M}^{m})$ should always be equivalent with a $\sigma$-model. Let $\mathfrak{M}^{m}$ be one of the following $m$-models: \begin{figure}[H] \unitlength1cm \begin{picture}(5,4) \thicklines \put(0.5,4){\circle{0.1}} \put(2,3){\vector(0,1){0.9}} \put(2,4){\circle{0.1}} \put(2,3){\circle{0.1}} \put(0.7,4){$\mathfrak{f}^m_0$} \put(2.2,4){$\mathfrak{f}^m_{0}$} \put(2.2,3){$\mathfrak{f}^m_{1}$} \put(0.7,1){where $s,d\geq 1$} \put(3.6,3){$\mathfrak{f}^m_{1}$} \put(6,3){\circle{0.1}} \put(5,3){\circle{0.1}} \put(4.5,3){\circle{0.1}} \put(4.75,3){\circle{0.1}} \put(5.25,3){\circle{0.1}} \put(4.25,3){\circle{0.1}} \put(4,3){\circle{0.1}} \put(5,2){\vector(1,1){0.9}} \put(5,2){\vector(1,1){0.9}} \put(5,2){\vector(0,1){0.9}} \put(5,2){\circle{0.1}} \put(5,4){\circle{0.1}} \put(6,3){\vector(-1,1){0.9}} \put(5,3){\vector(0,1){0.9}} \put(4,3){\vector(1,1){0.9}} \put(5,2){\vector(1,2){0.4}} \put(5,2){\vector(-1,2){0.4}} \put(5,2){\vector(-1,1){0.9}} \put(5.5,3){\circle{0.1}} \put(4.5,3){\vector(1,2){0.4}} \put(5.5,3){\vector(-1,2){0.4}} \put(5.2,4){$\mathfrak{f}^m_0$} \put(6.2,3){$\mathfrak{f}^m_{s}$} \put(5.4,2){$\mathfrak{f}^m_{s+1}$} \put(7.6,3){$\mathfrak{f}^m_{1}$} \put(10,3){\circle{0.1}} \put(9,3){\circle{0.1}} \put(8.5,3){\circle{0.1}} \put(8.75,3){\circle{0.1}} \put(9.25,3){\circle{0.1}} \put(8.25,3){\circle{0.1}} \put(8,3){\circle{0.1}} \put(9,2){\vector(1,1){0.9}} \put(9,2){\vector(-1,1){0.9}} \put(9,2){\vector(0,1){0.9}} \put(9,2){\circle{0.1}} \put(9,1.75){\circle{0.1}} \put(9,1.5){\circle{0.1}} \put(9,1.25){\circle{0.1}} \put(9,1){\circle{0.1}} \put(9,0){\circle{0.1}} \put(9,0){\vector(0,1){0.9}} \put(9,1){\circle{0.1}} \put(9,0){\circle{0.1}} \put(9,4){\circle{0.1}} \put(10,3){\vector(-1,1){0.9}} \put(9,3){\vector(0,1){0.9}} \put(8,3){\vector(1,1){0.9}} \put(9,2){\vector(-1,2){0.4}} \put(9,2){\vector(1,2){0.4}} \put(9.5,3){\circle{0.1}} \put(8.5,3){\vector(1,2){0.4}} \put(9.5,3){\vector(-1,2){0.4}} \put(9.2,4){$\mathfrak{f}^m_0$} \put(10.2,3){$\mathfrak{f}^m_{s}$} \put(9.4,2){$\mathfrak{f}^m_{s+1}$} \put(9.2,0){$\mathfrak{f}^m_{s+d+1}$} \end{picture} \caption{}\label{hpa3} \end{figure} \noindent B1. If $(\mathfrak{M}^{m})_{0}$\footnote{Let us agree that, in Figure \ref{hpa3}, the vertex $i$ is labeled with $\mathfrak f^m_i$.} is not equivalent with any $F(\mathfrak{M}^{k})$, we take any valuation $\mathfrak f^k$, let $\sigma(\mathfrak L_1,\{\mathfrak f^k\})=(\mathfrak L_1,\{\mathfrak f^n\})$, for some $\mathfrak f^n$, and put $G(\mathfrak{M}^{m})\thicksim(\mathfrak L_1,\{\mathfrak f^n\})$ (which uniquely determines $G(\mathfrak{M}^{m})$). Then (i)--(iv) are clearly fulfilled and (v) is irrelevant. We do the same if $0$ is the only vertex $i$ for which $(\mathfrak{M}^{m})_i\thicksim F(\mathfrak{M}^{k})$, with some $\mathfrak{M}^{k}$; we only take $\mathfrak f^k=\mathfrak f^k_0$.\\ B2. We can defined $G((\mathfrak{M}^{m})_{i})$, if $1\leq i\leq s$, using A. or B1. Thus, if $(\mathfrak{M}^{m})_{i}=(\mathfrak{L}_{2},\{\mathfrak{f}^m_0,\mathfrak{f}^m_1\})$ and $\mathfrak{f}^m_0=\mathfrak{f}^n_0\mathfrak{g}^n_2\dots\mathfrak{g}^n_{2^n}1=code(\mathfrak f^k_0)$ and $\mathfrak{f}^m_1=\mathfrak{f}^n_1\mathfrak{g}^n_2\dots\mathfrak{g}^n_{2^n}0$, for some p-irreducible $\mathfrak{M}^{k}=(\mathfrak{L}_{2},\{\mathfrak{f}^k_0,\mathfrak{f}^k_1\})$ (and we have $\sigma(\mathfrak{M}^{k})=(\mathfrak{L}_{2},\{\mathfrak{f}^n_0,\mathfrak{f}^n_1\})$), we put $G((\mathfrak{M}^{m})_{i})=\sigma(\mathfrak{M}^{k})=(\mathfrak{L}_{2},\{\mathfrak{f}^n_0,\mathfrak{f}^n_1\})$. Note that $\mathfrak{f}^n_1$ must be one of $\mathfrak{g}^n_i$'s in $code(\mathfrak f^k_0)$. Otherwise, $G((\mathfrak{M}^{m})_{i})\thicksim\sigma(\mathfrak{L}_{1},\{\mathfrak{f}^k_0\})$, where $G((\mathfrak{M}^{m})_{0})=\sigma((\mathfrak{L}_{1},\{\mathfrak{f}^k_0\})$). Thus, in each case we have $G((\mathfrak{M}^{m})_{i})=\sigma(\mathfrak{M}^{k})$, for some $k$-model $\mathfrak{M}^{k}$ over $\mathfrak{L}_{2}$.\footnote{We have added the suffix $0$ or $1$ at the end of $code(\mathfrak f^k_0)$, see Figure \ref{hpa2}, to be sure that $F(\mathfrak{M}^{k})$ is p-irreducible if $\mathfrak{M}^{k}$ is a p-irreducible model over $\mathfrak{L}_{2}$.} \noindent B3. The worst case is $G((\mathfrak{M}^{m})_{s+1})$. By B2. and the definition of $code(\mathfrak f^k_0)$ (included in $\mathfrak f^m_0$), we can assume that, if $1\leq i\leq s$, then $G((\mathfrak{M}^{m})_{i})=G(\mathfrak L_2,\{\mathfrak f^m_0,\mathfrak f^m_i\})=\sigma(\mathfrak{M}^{k}_i)$ for some $k$-model $\mathfrak{M}^{k}_i=(\mathfrak L_2,\{\mathfrak f^k_0,\mathfrak f^k_i\})$; note that we have the same $\mathfrak f^k_0$ in each of the models $\mathfrak{M}^{k}_i$. There are different $\mathfrak f^k_0$'s with the same $code(\mathfrak f^k_0)$; but we can always decide which $\mathfrak f^k_0$ is chosen - for instance, our choose could be given according to the lexicographical order in which all valuations are gven. Then, assuming $(\mathfrak{M}^{m})_{{s+1}}$ is not equivalent with any $F(\mathfrak{M}^{k})$, we take the $k$-valuation $0\cdots0$ and define $G((\mathfrak{M}^{m})_{{s+1}})$ as it is shown in Figure \ref{hpa5} below. \begin{figure}[H] \unitlength1cm \begin{picture}(5,4) \thicklines \put(0,3){$\sigma\Bigl(\ \ \mathfrak{f}^k_1$} \put(2.5,3){\circle{0.1}} \put(1.5,3){\circle{0.1}} \put(1,3){\circle{0.1}} \put(0.5,3){\circle{0.1}} \put(1.5,2){\vector(1,1){0.9}} \put(1.5,2){\vector(1,1){0.9}} \put(1.5,2){\vector(0,1){0.9}} \put(1.5,2){\circle{0.1}} \put(1.5,4){\circle{0.1}} \put(2.5,3){\vector(-1,1){0.9}} \put(1.5,3){\vector(0,1){0.9}} \put(0.5,3){\vector(1,1){0.9}} \put(1.5,2){\vector(1,2){0.4}} \put(1.5,2){\vector(-1,2){0.4}} \put(1.5,2){\vector(-1,1){0.9}} \put(2,3){\circle{0.1}} \put(2,3){\vector(-1,2){0.4}} \put(1,3){\vector(1,2){0.4}} \put(1.7,4){$\mathfrak{f}^k_0$} \put(2.7,3){$\mathfrak{f}^k_s\ \Bigr)\ =$} \put(1.9,2){$0\cdots0$} \put(6,3){\circle{0.1}} \put(5,3){\circle{0.1}} \put(4.5,3){\circle{0.1}} \put(4,3){\circle{0.1}} \put(5,2){\vector(1,1){0.9}} \put(5,2){\vector(1,1){0.9}} \put(5,2){\vector(0,1){0.9}} \put(5,2){\circle{0.1}} \put(5,4){\circle{0.1}} \put(6,3){\vector(-1,1){0.9}} \put(5,3){\vector(0,1){0.9}} \put(4,3){\vector(1,1){0.9}} \put(5,2){\vector(1,2){0.4}} \put(5,2){\vector(-1,2){0.4}} \put(5,2){\vector(-1,1){0.9}} \put(5.5,3){\circle{0.1}} \put(4.5,3){\vector(1,2){0.4}} \put(5.5,3){\vector(-1,2){0.4}} \put(5.2,4){$\mathfrak{f}^n_0$} \put(6.2,3){$\mathfrak{f}^n_s$} \put(5.4,2){$\mathfrak{f}^n$} \put(4.22,3){$\mathfrak{f}^n_1$} \put(10.9,3.3){$\mathfrak{f}^n_1$} \put(13,3){\circle{0.1}} \put(12,3){\circle{0.1}} \put(11.5,3){\circle{0.1}} \put(11,3){\circle{0.1}} \put(12,2){\vector(1,1){0.9}} \put(12,2){\vector(-1,1){0.9}} \put(12,2){\vector(0,1){0.9}} \put(12,2){\circle{0.1}} \put(12,1.75){\circle{0.1}} \put(12,1.5){\circle{0.1}} \put(12,1.25){\circle{0.1}} \put(12,1){\circle{0.1}} \put(12,0){\circle{0.1}} \put(12,0){\vector(0,1){0.9}} \put(12,1){\circle{0.1}} \put(12,0){\circle{0.1}} \put(12,4){\circle{0.1}} \put(13,3){\vector(-1,1){0.9}} \put(12,3){\vector(0,1){0.9}} \put(11,3){\vector(1,1){0.9}} \put(12,2){\vector(-1,2){0.4}} \put(12,2){\vector(1,2){0.4}} \put(12.5,3){\circle{0.1}} \put(11.5,3){\vector(1,2){0.4}} \put(12.5,3){\vector(-1,2){0.4}} \put(7.8,3.3){$\mathfrak{f}^m_1$} \put(10,3){\circle{0.1}} \put(9,3){\circle{0.1}} \put(8.5,3){\circle{0.1}} \put(8,3){\circle{0.1}} \put(9,2){\vector(1,1){0.9}} \put(9,2){\vector(-1,1){0.9}} \put(9,2){\vector(0,1){0.9}} \put(9,2){\circle{0.1}} \put(9,1.75){\circle{0.1}} \put(9,1.5){\circle{0.1}} \put(9,1.25){\circle{0.1}} \put(9,1){\circle{0.1}} \put(9,0){\circle{0.1}} \put(9,0){\vector(0,1){0.9}} \put(9,1){\circle{0.1}} \put(9,0){\circle{0.1}} \put(9,4){\circle{0.1}} \put(10,3){\vector(-1,1){0.9}} \put(9,3){\vector(0,1){0.9}} \put(8,3){\vector(1,1){0.9}} \put(9,2){\vector(-1,2){0.4}} \put(9,2){\vector(1,2){0.4}} \put(9.5,3){\circle{0.1}} \put(8.5,3){\vector(1,2){0.4}} \put(9.5,3){\vector(-1,2){0.4}} \put(9.2,0){$\mathfrak{f}^m_{s+d+1}$} \put(9.2,4){$\mathfrak{f}^m_0$} \put(10,3.3){$\mathfrak{f}^m_s$} \put(9.4,2){$\mathfrak{f}^m_{s+1}$} \put(7.1,2){$G\Bigl($} \put(10.3,2){$\Bigr)\ =$} \put(12.2,1){$\mathfrak{f}^n$} \put(12.2,0){$\mathfrak{f}^n$} \put(12.2,4){$\mathfrak{f}^n_0$} \put(13,3.3){$\mathfrak{f}^n_s$} \put(12.4,2){$\mathfrak{f}^n$} \end{picture} \caption{}\label{hpa5} \end{figure} \noindent B4. There remains to define $G((\mathfrak{M}^{m})_{{s+i+1}})$ assuming that $i\geq 1$, and $(\mathfrak{M}^{m})_{{s+i+1}}$ is not equivalent with any $F(\mathfrak{M}^{k})$ and $G((\mathfrak{M}^{m})_{{s+i}})$ has already been defined. Then we approach as it is shown in Figure \ref{hpa6} \begin{figure}[H] \unitlength1cm \begin{picture}(5,4) \thicklines \put(0.2,3.2){ $\mathfrak{f}^m_1$} \put(0,2){$G\Bigl( $} \put(2.5,3){\circle{0.1}} \put(1.5,3){\circle{0.1}} \put(1,3){\circle{0.1}} \put(0.5,3){\circle{0.1}} \put(1.5,2){\vector(1,1){0.9}} \put(1.5,2){\vector(1,1){0.9}} \put(1.5,2){\vector(0,1){0.9}} \put(1.5,2){\circle{0.1}} \put(1.5,4){\circle{0.1}} \put(2.5,3){\vector(-1,1){0.9}} \put(1.5,3){\vector(0,1){0.9}} \put(0.5,3){\vector(1,1){0.9}} \put(1.5,2){\vector(1,2){0.4}} \put(1.5,2){\vector(-1,2){0.4}} \put(1.5,2){\vector(-1,1){0.9}} \put(2,3){\circle{0.1}} \put(2,3){\vector(-1,2){0.4}} \put(1,3){\vector(1,2){0.4}} \put(1.7,4){$\mathfrak{f}^m_0$} \put(2.5,3.2){$\mathfrak{f}^m_s$} \put(2.7,2){$\ \ \Bigr)\ =$} \put(1.9,2){$\mathfrak{f}^m_{s+1}$} \put(1.5,1.75){\circle{0.1}} \put(1.5,1.5){\circle{0.1}} \put(1.5,1){\circle{0.1}} \put(1.5,1){\vector(0,1){0.4}} \put(1.5,1){\circle{0.1}} \put(1.9,1){$\mathfrak{f}^m_{s+i}$} \put(6,3){\circle{0.1}} \put(5,3){\circle{0.1}} \put(4.5,3){\circle{0.1}} \put(4,3){\circle{0.1}} \put(5,2){\vector(1,1){0.9}} \put(5,2){\vector(1,1){0.9}} \put(5,2){\vector(0,1){0.9}} \put(5,2){\circle{0.1}} \put(5,4){\circle{0.1}} \put(6,3){\vector(-1,1){0.9}} \put(5,3){\vector(0,1){0.9}} \put(4,3){\vector(1,1){0.9}} \put(5,2){\vector(1,2){0.4}} \put(5,2){\vector(-1,2){0.4}} \put(5,2){\vector(-1,1){0.9}} \put(5.5,3){\circle{0.1}} \put(4.5,3){\vector(1,2){0.4}} \put(5.5,3){\vector(-1,2){0.4}} \put(5.2,4){$\mathfrak{f}^n_0$} \put(6,3.2){$\mathfrak{f}^n_s$} \put(5.4,2){$\mathfrak{f}^n_{s+1}$} \put(3.8,3.2){$\mathfrak{f}^n_1$} \put(5,1.75){\circle{0.1}} \put(5,1.5){\circle{0.1}} \put(5,1){\circle{0.1}} \put(5,1){\vector(0,1){0.4}} \put(5,1){\circle{0.1}} \put(5.4,1){$\mathfrak{f}^n_{s+i}$} \put(10.9,3.3){$\mathfrak{f}^n_1$} \put(13,3){\circle{0.1}} \put(12,3){\circle{0.1}} \put(11.5,3){\circle{0.1}} \put(11,3){\circle{0.1}} \put(12,2){\vector(1,1){0.9}} \put(12,2){\vector(-1,1){0.9}} \put(12,2){\vector(0,1){0.9}} \put(12,2){\circle{0.1}} \put(12,1.75){\circle{0.1}} \put(12,1.5){\circle{0.1}} \put(12,1.25){\circle{0.1}} \put(12,1){\circle{0.1}} \put(12,0){\circle{0.1}} \put(12,0){\vector(0,1){0.9}} \put(12,1){\circle{0.1}} \put(12,0){\circle{0.1}} \put(12,4){\circle{0.1}} \put(13,3){\vector(-1,1){0.9}} \put(12,3){\vector(0,1){0.9}} \put(11,3){\vector(1,1){0.9}} \put(12,2){\vector(-1,2){0.4}} \put(12,2){\vector(1,2){0.4}} \put(12.5,3){\circle{0.1}} \put(11.5,3){\vector(1,2){0.4}} \put(12.5,3){\vector(-1,2){0.4}} \put(7.8,3.3){$\mathfrak{f}^m_1$} \put(10,3){\circle{0.1}} \put(9,3){\circle{0.1}} \put(8.5,3){\circle{0.1}} \put(8,3){\circle{0.1}} \put(9,2){\vector(1,1){0.9}} \put(9,2){\vector(-1,1){0.9}} \put(9,2){\vector(0,1){0.9}} \put(9,2){\circle{0.1}} \put(9,1.75){\circle{0.1}} \put(9,1.5){\circle{0.1}} \put(9,1.25){\circle{0.1}} \put(9,1){\circle{0.1}} \put(9,0){\circle{0.1}} \put(9,0){\vector(0,1){0.9}} \put(9,1){\circle{0.1}} \put(9,0){\circle{0.1}} \put(9,4){\circle{0.1}} \put(10,3){\vector(-1,1){0.9}} \put(9,3){\vector(0,1){0.9}} \put(8,3){\vector(1,1){0.9}} \put(9,2){\vector(-1,2){0.4}} \put(9,2){\vector(1,2){0.4}} \put(9.5,3){\circle{0.1}} \put(8.5,3){\vector(1,2){0.4}} \put(9.5,3){\vector(-1,2){0.4}} \put(9.2,1){$\mathfrak{f}^m_{s+i}$} \put(9.2,0){$\mathfrak{f}^m_{s+d+1}$} \put(9.2,4){$\mathfrak{f}^m_0$} \put(10,3.3){$\mathfrak{f}^m_s$} \put(9.4,2){$\mathfrak{f}^m_{s+1}$} \put(7.1,2){$G\Bigl($} \put(10.3,2){$\Bigr)\ =$} \put(12.2,1){$\mathfrak{f}^n_{s+i}$} \put(12.2,0){$\mathfrak{f}^n_{s+i}$} \put(12.2,0.5){$\mathfrak{f}^n_{s+i}$} \put(12.2,4){$\mathfrak{f}^n_0$} \put(13,3.3){$\mathfrak{f}^n_s$} \put(12.4,2){$\mathfrak{f}^n_{s+1}$} \end{picture} \caption{}\label{hpa6} \end{figure} \noindent Thus, we have $G(\mathfrak{M}^{m})\thicksim G((\mathfrak{M}^{m})_{{s+i}})$. \end{proof} \begin{theorem}\label{c5} The logic $\mathsf L(\mathfrak C_5)$ (see Figure \ref{TF}) has finitary unification.\end{theorem}\begin{proof} Let $\mathbf F=\{\mathfrak L_1, \mathfrak L_2, \mathfrak F_2, \mathfrak L_3, +\mathfrak F_2, \mathfrak R_2, \mathfrak Y_2, \mathfrak Y_3, \mathfrak C_5 \}$ (see Figure \ref{GF} and \ref{8fames}) and let $\mathsf{L}=\mathsf{L}(\mathbf{F})$. Since $\mathbf{F}=sm(\mathfrak C_5)$, we conclude {\sf L} is the logic of $\mathfrak C_5$; see Lemma \ref{lf8}. Let $n\geq 1$ and $m=n+n\cdot 4^n+2$. Take any $\sigma\colon\{x_1,\dots,x_n\}\to \mathsf{Fm}^k$ and show there are $F\colon \mathbf M^k\to\mathbf M^m$ and $G\colon \mathbf M^m\to\mathbf M^n$ fulfilling the conditions (i)--(v) of Theorem \ref{main2}. Let $\mathfrak f^k$ be any $k$-valuation and let $\sigma(\mathfrak L_1,\mathfrak f^k)=(\mathfrak L_1,\mathfrak f^n)$ for some $\mathfrak f^n$. We define the $m$-valuation $code(\mathfrak f^k)$ a bit different way than in the proof of Theorem \ref{wpl}. Let\\ \indent $code(\mathfrak{f}^k)=\mathfrak{f}^n\mathfrak{g}^n_1\mathfrak h^n_1\dots\mathfrak{g}^n_{2^n}\mathfrak{h}^n_{2^n}11\quad \mbox{(the concatenation of $\mathfrak f^n$, all $\mathfrak{g}^n_i,\mathfrak{h}^n_i$'s and $11$)}$\\ where $\mathfrak{g}^n_i,\mathfrak{h}^n_i$'s are all $n$-valuations such that \begin{figure}[H] \unitlength1cm \begin{picture}(3,1) \thicklines \put(0,1){\circle{0.1}} \put(1,0){\circle{0.1}} \put(0.4,1){\mbox{$\mathfrak{f}^n$}} \put(2,1){\circle{0.1}} \put(2.4,1){\mbox{$\mathfrak{g}^n_i$}} \put(1,0){\vector(1,1){0.9}} \put(1,0){\vector(-1,1){0.9}} \put(1.4,0){\mbox{$\mathfrak{h}^n_i$}} \put(3.5,0.5){\mbox{$ = \quad \sigma \Bigl($}} \put(5,1){\circle{0.1}} \put(6,0){\circle{0.1}} \put(6.4,0){\mbox{$\mathfrak{h}_i^k$}} \put(5.4,1){\mbox{$\mathfrak{f}^k$}} \put(7,1){\circle{0.1}} \put(7.4,1){\mbox{$\mathfrak{g}^k_i$}} \put(6,0){\vector(1,1){0.9}} \put(6,0){\vector(-1,1){0.9}} \put(8,0.5){ $\Bigr), \quad \mbox{ for some } \mathfrak{g}_i^k,\mathfrak{h}_i^k$ .} \end{picture} \caption{The definition of $code(\mathfrak f^k)$} \label{code}\end{figure} We could make $code(\mathfrak f^k)$ unique, for any $\mathfrak f^k$. For instance, let us begin with the sequence $\mathfrak{g}_1^n\mathfrak{h}_1^n\dots \mathfrak{g}_{2^n}^n\mathfrak{h}_{2^n}^n$ containing all possible pairs $\mathfrak{g}_i^n\mathfrak{h}_i^n$ of $n$-valuations (regarded as binary strings) put in a lexicographical order. The sequence contains $4^n$ bites. Then, step by step, we would remain the pair $\mathfrak{g}_i^n\mathfrak{h}_i^n$ or replace it with $\mathfrak{f}^n\mathfrak{f}^n$ depending if it fulfills (or not) the equation in Figure \ref{code}, for some $\mathfrak{g}_i^k,\mathfrak{h}_i^k$. Eventually, we add $\mathfrak f^n$ at the beginning and $11$ at the end of the sequence receiving an $m$-valuation. Now, we are ready to define $F(\mathfrak{M}^k)$, for any p-irreducible $k$-model $\mathfrak{M}^k$ over any frame in $\mathbf F$. So, let $\mathfrak{M}^k$ be one of the following $k$-models: \begin{figure}[H] \unitlength1cm \begin{picture}(3,2) \thicklines \put(0,0){\circle{0.1}} \put(0.3,0){\mbox{$\mathfrak{f}^k_0$}} \put(2,0){\circle{0.1}} \put(2.3,0){\mbox{$\mathfrak{f}^k_1$}} \put(2,1){\circle{0.1}} \put(2.3,1){\mbox{$\mathfrak{f}^k_0$}} \put(2,0){\vector(0,1){0.9}} \put(4,1){\circle{0.1}} \put(5,0){\circle{0.1}} \put(6,1){\circle{0.1}} \put(5.3,0){\mbox{$\mathfrak{f}^k_1$}} \put(4.3,1){\mbox{$\mathfrak{f}^k_{0'}$}} \put(6.3,1){\mbox{$\mathfrak{f}^k_0$}} \put(5,0){\vector(1,1){0.9}} \put(5,0){\vector(-1,1){0.9}} \put(8,0){\vector(0,1){0.9}} \put(8,1){\vector(0,1){0.9}} \put(8,1){\circle{0.1}} \put(8,2){\circle{0.1}} \put(8,0){\circle{0.1}} \put(8.3,0){\mbox{$\mathfrak{f}^k_2$}} \put(8.3,1.8){\mbox{$\mathfrak{f}^k_0$}} \put(8.3,1){\mbox{$\mathfrak{f}^k_1$}} \put(10,2){\circle{0.1}} \put(11,1){\circle{0.1}} \put(11,0){\circle{0.1}} \put(12,2){\circle{0.1}} \put(11.3,1){\mbox{$\mathfrak{f}^k_1$}} \put(11.3,0){\mbox{$\mathfrak{f}^k_2$}} \put(10.3,2){\mbox{$\mathfrak{f}^k_{0'}$}} \put(12.3,2){\mbox{$\mathfrak{f}^k_0$}} \put(11,1){\vector(1,1){0.9}} \put(11,1){\vector(-1,1){0.9}} \put(11,0){\vector(0,1){0.9}} \end{picture}\\ \unitlength1cm \begin{picture}(5,3) \thicklines \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(1,1){0.9}} \put(0,1){\vector(1,1){0.9}} \put(2,1){\vector(-1,1){0.9}} \put(0,1){\circle{0.1}} \put(1,2){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(1.3,0){\mbox{$\mathfrak{f}^k_2$}} \put(0,1.4){\mbox{$\mathfrak{f}^k_{1'}$}} \put(1.1,2){\mbox{$\mathfrak{f}^k_0$}} \put(2,1.4){\mbox{$\mathfrak{f}^k_1$}} \put(4.5,0){\vector(-1,1){0.9}} \put(4.5,0){\vector(1,1){0.9}} \put(5.5,1){\vector(1,1){0.9}} \put(3.5,1){\circle{0.1}} \put(4.5,2){\circle{0.1}} \put(4.5,0){\circle{0.1}} \put(5.5,1){\circle{0.1}} \put(6.5,2){\circle{0.1}} \put(3.5,1){\vector(1,1){0.9}} \put(5.5,1){\vector(-1,1){0.9}} \put(4.8,0){\mbox{$\mathfrak{f}^k_2$}} \put(3.5,1.4){\mbox{$\mathfrak{f}^k_{1'}$}} \put(4.1,2){\mbox{$\mathfrak{f}^k_{0'}$}} \put(5.5,1.4){\mbox{$\mathfrak{f}^k_1$}} \put(6.1,2){\mbox{$\mathfrak{f}^k_{0}$}} \put(7.7,2){\circle{0.1}} \put(9.7,2){\circle{0.1}} \put(7.7,1){\circle{0.1}} \put(9.7,1){\circle{0.1}} \put(8.7,0){\circle{0.1}} \put(7.7,1){\vector(0,1){0.9}} \put(9.7,1){\vector(0,1){0.9}} \put(7.7,1){\vector(2,1){1.9}} \put(9.7,1){\vector(-2,1){1.9}} \put(8.7,0){\vector(1,1){0.9}} \put(8.7,0){\vector(-1,1){0.9}} \put(9,0){\mbox{$\mathfrak{f}^k_2$}} \put(7.3,1){\mbox{$\mathfrak{f}^k_{1'}$}} \put(7.7,2.2){\mbox{$\mathfrak{f}^k_{0'}$}} \put(9.8,1){\mbox{$\mathfrak{f}^k_1$}} \put(9.7,2.2){\mbox{$\mathfrak{f}^k_{0}$}} \put(11.5,0){\vector(-1,1){0.9}} \put(11.5,0){\vector(1,1){0.9}} \put(12.5,1){\vector(-1,1){0.9}} \put(10.5,1){\circle{0.1}} \put(11.5,2){\circle{0.1}} \put(11.5,0){\circle{0.1}} \put(12.5,1){\circle{0.1}} \put(10.5,1){\vector(1,1){0.9}} \put(10.5,2){\circle{0.1}} \put(12.5,2){\circle{0.1}} \put(10.5,1){\vector(0,1){0.9}} \put(12.5,1){\vector(0,1){0.9}} \put(12,0){\mbox{$\mathfrak{f}^k_2$}} \put(10.8,1){\mbox{$\mathfrak{f}^k_{1'}$}} \put(10.5,2.2){\mbox{$\mathfrak{f}^k_{0'}$}} \put(11.8,1){\mbox{$\mathfrak{f}^k_1$}} \put(11.5,2.2){\mbox{$\mathfrak{f}^k_{0}$}} \put(12.5,2.2){\mbox{$\mathfrak{f}^k_{0''}$}} \end{picture}\\ \caption{} \label{c} \end{figure} and let $\sigma(\mathfrak{M}^k)$ be, correspondingly; \begin{figure}[H] \unitlength1cm \begin{picture}(3,2) \thicklines \put(0,0){\circle{0.1}} \put(0.3,0){\mbox{$\mathfrak{f}^n_0$}} \put(2,0){\circle{0.1}} \put(2.3,0){\mbox{$\mathfrak{f}^n_1$}} \put(2,1){\circle{0.1}} \put(2.3,1){\mbox{$\mathfrak{f}^n_0$}} \put(2,0){\vector(0,1){0.9}} \put(4,1){\circle{0.1}} \put(5,0){\circle{0.1}} \put(6,1){\circle{0.1}} \put(5.3,0){\mbox{$\mathfrak{f}^n_1$}} \put(4.3,1){\mbox{$\mathfrak{f}^n_{0'}$}} \put(6.3,1){\mbox{$\mathfrak{f}^n_0$}} \put(5,0){\vector(1,1){0.9}} \put(5,0){\vector(-1,1){0.9}} \put(8,0){\vector(0,1){0.9}} \put(8,1){\vector(0,1){0.9}} \put(8,1){\circle{0.1}} \put(8,2){\circle{0.1}} \put(8,0){\circle{0.1}} \put(8.3,0){\mbox{$\mathfrak{f}^n_2$}} \put(8.3,1.8){\mbox{$\mathfrak{f}^n_0$}} \put(8.3,1){\mbox{$\mathfrak{f}^n_1$}} \put(10,2){\circle{0.1}} \put(11,1){\circle{0.1}} \put(11,0){\circle{0.1}} \put(12,2){\circle{0.1}} \put(11.3,1){\mbox{$\mathfrak{f}^n_1$}} \put(11.3,0){\mbox{$\mathfrak{f}^n_2$}} \put(10.3,2){\mbox{$\mathfrak{f}^n_{0'}$}} \put(12.3,2){\mbox{$\mathfrak{f}^n_0$}} \put(11,1){\vector(1,1){0.9}} \put(11,1){\vector(-1,1){0.9}} \put(11,0){\vector(0,1){0.9}} \end{picture}\\ \unitlength1cm \begin{picture}(5,3) \thicklines \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(1,1){0.9}} \put(0,1){\vector(1,1){0.9}} \put(2,1){\vector(-1,1){0.9}} \put(0,1){\circle{0.1}} \put(1,2){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(1.3,0){\mbox{$\mathfrak{f}^n_2$}} \put(0,1.4){\mbox{$\mathfrak{f}^n_{1'}$}} \put(1.1,2){\mbox{$\mathfrak{f}^n_0$}} \put(2,1.4){\mbox{$\mathfrak{f}^n_1$}} \put(4.5,0){\vector(-1,1){0.9}} \put(4.5,0){\vector(1,1){0.9}} \put(5.5,1){\vector(1,1){0.9}} \put(3.5,1){\circle{0.1}} \put(4.5,2){\circle{0.1}} \put(4.5,0){\circle{0.1}} \put(5.5,1){\circle{0.1}} \put(6.5,2){\circle{0.1}} \put(3.5,1){\vector(1,1){0.9}} \put(5.5,1){\vector(-1,1){0.9}} \put(4.8,0){\mbox{$\mathfrak{f}^n_2$}} \put(3.5,1.4){\mbox{$\mathfrak{f}^n_{1'}$}} \put(4.1,2){\mbox{$\mathfrak{f}^n_{0'}$}} \put(5.5,1.4){\mbox{$\mathfrak{f}^n_1$}} \put(6.1,2){\mbox{$\mathfrak{f}^n_{0}$}} \put(7.7,2){\circle{0.1}} \put(9.7,2){\circle{0.1}} \put(7.7,1){\circle{0.1}} \put(9.7,1){\circle{0.1}} \put(8.7,0){\circle{0.1}} \put(7.7,1){\vector(0,1){0.9}} \put(9.7,1){\vector(0,1){0.9}} \put(7.7,1){\vector(2,1){1.9}} \put(9.7,1){\vector(-2,1){1.9}} \put(8.7,0){\vector(1,1){0.9}} \put(8.7,0){\vector(-1,1){0.9}} \put(9,0){\mbox{$\mathfrak{f}^n_2$}} \put(7.3,1){\mbox{$\mathfrak{f}^n_{1'}$}} \put(7.7,2.2){\mbox{$\mathfrak{f}^n_{0'}$}} \put(9.8,1){\mbox{$\mathfrak{f}^n_1$}} \put(9.7,2.2){\mbox{$\mathfrak{f}^n_{0}$}} \put(11.5,0){\vector(-1,1){0.9}} \put(11.5,0){\vector(1,1){0.9}} \put(12.5,1){\vector(-1,1){0.9}} \put(10.5,1){\circle{0.1}} \put(11.5,2){\circle{0.1}} \put(11.5,0){\circle{0.1}} \put(12.5,1){\circle{0.1}} \put(10.5,1){\vector(1,1){0.9}} \put(10.5,2){\circle{0.1}} \put(12.5,2){\circle{0.1}} \put(10.5,1){\vector(0,1){0.9}} \put(12.5,1){\vector(0,1){0.9}} \put(12,0){\mbox{$\mathfrak{f}^n_2$}} \put(10.8,1){\mbox{$\mathfrak{f}^n_{1'}$}} \put(10.5,2.2){\mbox{$\mathfrak{f}^n_{0'}$}} \put(11.8,1){\mbox{$\mathfrak{f}^n_1$}} \put(11.5,2.2){\mbox{$\mathfrak{f}^n_{0}$}} \put(12.5,2.2){\mbox{$\mathfrak{f}^n_{0''}$}} \end{picture}\\ \caption{} \label{cc1} \end{figure} \noindent We define $F(\mathfrak M^k)$ as one of the following $m$-models: \begin{figure}[H] \unitlength1cm \begin{picture}(3,2) \thicklines \put(0,0){\circle{0.1}} \put(0,0.3){\mbox{$code(\mathfrak{f}^k_0)$}} \put(2,0){\circle{0.1}} \put(2.3,0){\mbox{$\mathfrak{f}^n_10\cdots01$}} \put(2,1){\circle{0.1}} \put(2,1.3){\mbox{$code(\mathfrak{f}^k_0)$}} \put(2,0){\vector(0,1){0.9}} \put(4,1){\circle{0.1}} \put(5,0){\circle{0.1}} \put(6,1){\circle{0.1}} \put(5.3,0){\mbox{$\mathfrak{f}^n_10\cdots01$}} \put(4,1.3){\mbox{$code(\mathfrak{f}^k_{0'})$}} \put(6,1.3){\mbox{$code(\mathfrak{f}^k_0)$}} \put(5,0){\vector(1,1){0.9}} \put(5,0){\vector(-1,1){0.9}} \put(8,0){\vector(0,1){0.9}} \put(8,1){\vector(0,1){0.9}} \put(8,1){\circle{0.1}} \put(8,2){\circle{0.1}} \put(8,0){\circle{0.1}} \put(8.3,0){\mbox{$\mathfrak{f}^n_20\cdots0$}} \put(8.3,1.8){\mbox{$code(\mathfrak{f}^k_0)$}} \put(8.3,1){\mbox{$\mathfrak{f}^n_10\cdots01$}} \put(10,2){\circle{0.1}} \put(11,1){\circle{0.1}} \put(11,0){\circle{0.1}} \put(12,2){\circle{0.1}} \put(11.3,1){\mbox{$\mathfrak{f}^n_10\cdots01$}} \put(11.3,0){\mbox{$\mathfrak{f}^n_20\cdots0$}} \put(10.3,2){\mbox{$code(\mathfrak{f}^k_{0'})$}} \put(12,2.3){\mbox{$code(\mathfrak{f}^k_0)$}} \put(11,1){\vector(1,1){0.9}} \put(11,1){\vector(-1,1){0.9}} \put(11,0){\vector(0,1){0.9}} \end{picture}\\ \unitlength1cm \begin{picture}(5,3) \thicklines \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(1,1){0.9}} \put(0,1){\vector(1,1){0.9}} \put(2,1){\vector(-1,1){0.9}} \put(0,1){\circle{0.1}} \put(1,2){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(1.3,0){\mbox{$\mathfrak{f}^n_20\cdots0$}} \put(0.3,0.9){\mbox{$\mathfrak{f}^n_{1'}0\cdots01$}} \put(0.8,2.2){\mbox{$code(\mathfrak{f}^k_0)$}} \put(2,1.4){\mbox{$\mathfrak{f}^n_10\cdots01$}} \put(4.5,0){\vector(-1,1){0.9}} \put(4.5,0){\vector(1,1){0.9}} \put(5.5,1){\vector(1,1){0.9}} \put(3.5,1){\circle{0.1}} \put(4.5,2){\circle{0.1}} \put(4.5,0){\circle{0.1}} \put(5.5,1){\circle{0.1}} \put(6.5,2){\circle{0.1}} \put(3.5,1){\vector(1,1){0.9}} \put(5.5,1){\vector(-1,1){0.9}} \put(4.8,0){\mbox{$\mathfrak{f}^n_20\cdots0$}} \put(3.7,0.9){\mbox{$\mathfrak{f}^n_{1'}0\cdots01$}} \put(3.6,2.2){\mbox{$code(\mathfrak{f}^k_{0'})$}} \put(5.5,0.6){\mbox{$\mathfrak{f}^n_10\cdots01$}} \put(5.3,2.2){\mbox{$code(\mathfrak{f}^k_{0})$}} \put(7.7,2){\circle{0.1}} \put(9.7,2){\circle{0.1}} \put(7.7,1){\circle{0.1}} \put(9.7,1){\circle{0.1}} \put(8.7,0){\circle{0.1}} \put(7.7,1){\vector(0,1){0.9}} \put(9.7,1){\vector(0,1){0.9}} \put(7.7,1){\vector(2,1){1.9}} \put(9.7,1){\vector(-2,1){1.9}} \put(8.7,0){\vector(1,1){0.9}} \put(8.7,0){\vector(-1,1){0.9}} \put(9,0){\mbox{$\mathfrak{f}^n_20\cdots0$}} \put(6.3,1.2){\mbox{$\mathfrak{f}^n_{1'}0\cdots01$}} \put(7,2.2){\mbox{$code(\mathfrak{f}^k_{0'})$}} \put(8.2,0.9){\mbox{$\mathfrak{f}^n_10\cdots01$}} \put(8.2,1.9){\mbox{$code(\mathfrak{f}^k_{0})$}} \put(11.5,0){\vector(-1,1){0.9}} \put(11.5,0){\vector(1,1){0.9}} \put(12.5,1){\vector(-1,1){0.9}} \put(10.5,1){\circle{0.1}} \put(11.5,2){\circle{0.1}} \put(11.5,0){\circle{0.1}} \put(12.5,1){\circle{0.1}} \put(10.5,1){\vector(1,1){0.9}} \put(10.5,2){\circle{0.1}} \put(12.5,2){\circle{0.1}} \put(10.5,1){\vector(0,1){0.9}} \put(12.5,1){\vector(0,1){0.9}} \put(11.8,0){\mbox{$\mathfrak{f}^n_20\cdots0$}} \put(10.8,1){\mbox{$\mathfrak{f}^n_{1'}0\cdots01$}} \put(9.4,2.2){\mbox{$code(\mathfrak{f}^k_{0'})$}} \put(12.3,0.5){\mbox{$\mathfrak{f}^n_10\cdots01$}} \put(10.9,2.2){\mbox{$code(\mathfrak{f}^k_{0})$}} \put(12.3,2.2){\mbox{$code(\mathfrak{f}^k_{0''})$}} \end{picture}\\ \caption{} \label{cc2} \end{figure} \noindent The conditions (i)-(iii) of Theorem \ref{main2}, as concerns the mapping $F$, are obviously fulfilled. Note that it is important that all binary strings have the size $m$ and we get intuitionistic models, but it is of no importance what is included in $code(\mathfrak f^k)$. There remains to define the mapping $G$. Let $$G(\mathfrak{M}^m)=\left\{ \begin{array}{ll} \mathfrak{M}^m\!\!\upharpoonright n\quad & \hbox{if $\mathfrak{M}^m\thicksim F(\mathfrak{M}^k)$ for some $\mathfrak{M}^k$;} \\ ? & \hbox{otherwise;} \end{array} \right.$$ where the $n$-model $\mathfrak{M}^m\!\!\upharpoonright n$ results from $\mathfrak{M}^m$ by the restriction of all valuations $\mathfrak{f}^{m}_i$ of $\mathfrak{M}^m$ to $\{x_1,\dots,x_n\}$. Note that $\mathfrak{M}^m\thicksim F(\mathfrak{M}^k)$ yields $\mathfrak{M}^m\!\!\upharpoonright n\thicksim F(\mathfrak{M}^k)\!\!\upharpoonright n$ and hence $G(\mathfrak{M}^m)\thicksim \sigma(\mathfrak{M}^k)$ by the definition of $F$ and $G$. Thus, (v) is fulfilled. If an $m$-model $\mathfrak{M}^m$ is equivalent with some $F(\mathfrak{M}^k)$, then any its generated submodel $(\mathfrak{M}^m)_w $ is equivalent with $F((\mathfrak{M}^k)_u)$, for some $u$; see Corollary \ref{lf4i}. Thus, (i)-(v) are fulfilled if the first line of the definition $G(\mathfrak{M}^m)$ applies. There remains to define $G(\mathfrak{M}^m)$ assuming $\mathfrak{M}^m$ is not equivalent with any $F(\mathfrak{M}^k)$. Our definition is inductive with respect to the depth of $\mathfrak{M}^m$ to secure (ii). Since (v) is irrelevant here and (iii) is quite obvious, we should require $G(\mathfrak{M}^m)$ and $\mathfrak{M}^m$ were defined over the same frame, and $G(\mathfrak{M}^m)$ should be equivalent with a $\sigma$-model. \underline{$\mathfrak{L}_1$-Models.} If $\mathfrak{M}^m=(\mathfrak{L}_1,\mathfrak f^m)$ is equivalent with some $F(\mathfrak{M}^k)$, then we easily get $\mathfrak{M}^m=F(\mathfrak{L}_1,\mathfrak f^k)$, for some $\mathfrak f^k$, and hence $\mathfrak f^m=code(\mathfrak f^k)$. Thus, we have $$G(\mathfrak M^m)=(\mathfrak{L}_1,code(\mathfrak f^k)\!\!\upharpoonright n)=\sigma(\mathfrak{L}_1,\mathfrak f^k).$$ If $\mathfrak f^m\not=code(\mathfrak f^k)$, for any $\mathfrak f^k$, we could take $G(\mathfrak{M}^m)=\sigma(\mathfrak{M}^k)$, for any $k$-model $\mathfrak{M}^k$ over $\mathfrak{L}_1$. We can even take the same $\mathfrak{M}^k$, for each $\mathfrak{M}^m$. So, we choose some $\mathfrak s^k$ and agree that, if $(\mathfrak{L}_1,\mathfrak f^m)$ is not equivalent with any $F(\mathfrak M^k)$, we have $$G(\mathfrak M^m)=\sigma(\mathfrak{L}_1,\mathfrak s^k)=(\mathfrak{L}_1,code(\mathfrak s^k)\!\!\upharpoonright n).$$ \underline{$\mathfrak{F}_2$- and $\mathfrak{L}_2$-Models.} Let $\mathfrak{M}^m=(\mathfrak{F}_2,\{\mathfrak{f}^m_0,\mathfrak{f}^m_{0'},\mathfrak{f}^m_1\})$ be a p-irreducible $m$-model over $\mathfrak{F}_2$. Assume that $\mathfrak{M}^m\thicksim F(\mathfrak{M}^k)$, for some $\mathfrak{M}^k$. Then $\mathfrak{M}^m\equiv F(\mathfrak{M}^k)$ as the suffices $11$,$01$ and $00$ prevent any reduction of $F(\mathfrak{M}^k)$, for any p-irreducible $\mathfrak{M}^k$ of the depth $3$, to a model over $\mathfrak F_2$. Thus $\mathfrak{M}^k=(\mathfrak{F}_2,\{\mathfrak{f}^k_0,\mathfrak{f}^k_{0'},\mathfrak{f}^k_1\})$, and $\mathfrak{f}^m_0=code(\mathfrak{f}^k_0)=\mathfrak{f}^n_0\mathfrak{g}^n_1\mathfrak h^n_1\dots\mathfrak{g}^n_{2^n}\mathfrak{h}^n_{2^n}11$, and $\mathfrak{f}^m_{0'}=code(\mathfrak{f}^k_{0'})=\mathfrak f^n_{0'}\mathfrak{g'}^n_1\mathfrak {h'}^n_1\dots\mathfrak{g'}^n_{2^n}\mathfrak{h'}^n_{2^n}11$, and $\mathfrak{f}^m_1=\mathfrak f_1^n0\cdots01$ where $\sigma(\mathfrak M^k)=(\mathfrak{F}_2,\{\mathfrak{f}^n_0,\mathfrak{f}^n_{0'},\mathfrak{f}^n_1\})=G(\mathfrak M^m)$. By the definition of $code()$, see the Figure \ref{code}, the pair $\mathfrak f^n_0\mathfrak f^n_1$ must occur in $code(\mathfrak{f}^k_{0'})$ and $\mathfrak f^n_{0'}\mathfrak f^n_1$ in $code(\mathfrak{f}^k_{0})$. We would like to preserve this condition even if $\mathfrak{M}^m$ is not equivalent with any $F(\mathfrak{M}^k)$. Let $\mathfrak{M}^m$ be not equivalent with any $F(\mathfrak{M}^k)$. We have $G(\mathfrak{L}_1,\mathfrak{f}^m_0)=\sigma(\mathfrak{L}_1,\mathfrak{f}^k_0)=(\mathfrak{L}_1,\mathfrak{f}^n_0) $ and $G(\mathfrak{L}_1,\mathfrak{f}^m_{0'})=\sigma(\mathfrak{L}_1,\{\mathfrak{f}^k_{0'}\})=(\mathfrak{L}_1,\mathfrak{f}^n_{0'}) $, for some $\mathfrak{f}^k_0,\mathfrak{f}^k_{0'}$ and some $\mathfrak{f}^n_0,\mathfrak{f}^n_{0'}$. Take any $\mathfrak{f}^k_1$ such that $(\mathfrak{F}_2,\{\mathfrak{f}^k_0,\mathfrak{f}^k_{0'},\mathfrak{f}^k_1\})$ is an intuitionistic model and define $$ G(\mathfrak{M}^m)=\sigma((\mathfrak{F}_2,\{\mathfrak{f}^k_0,\mathfrak{f}^k_{0'},\mathfrak{f}^k_1\})=(\mathfrak{F}_2, \{\mathfrak{f}^n_0,\mathfrak{f}^n_{0'},\mathfrak{f}^n_1\}), \quad \mbox{for some } \mathfrak{f}^n_1.$$ We obviously have $\mathfrak f^n_0\mathfrak f^n_1$ in $code(\mathfrak{f}^k_{0'})$ and $\mathfrak f^n_{0'}\mathfrak f^n_1$ in $code(\mathfrak{f}^k_{0})$. For the choice of $\mathfrak{f}^k_1$, use the lexicographical order which would also be sufficient to satisfy the condition (iii) of Theorem \ref{main2} saying that isomorphic models have isomorphic images. Let $\mathfrak{M}^m=(\mathfrak{L}_2,\{\mathfrak{f}^m_0,\mathfrak{f}^m_1\})$ be a p-irreducible $m$-model over $\mathfrak{L}_2$ and $\mathfrak{M}^m\thicksim F(\mathfrak M^k)$, for some $\mathfrak M^k$. We have two possibilities: either $\mathfrak M^k$ is a model over $\mathfrak L_2$ or it is a model over $\mathfrak F_2$. In the first case, $G(\mathfrak{M}^m)=\sigma(\mathfrak{M}^k)=\sigma(\mathfrak{L}_2,\{\mathfrak{f}^k_0,\mathfrak{f}^k_1\})= (\mathfrak{L}_2,\{\mathfrak{f}^k_0,\mathfrak{f}^k_1\})$, for some $\mathfrak{f}^k_0,\mathfrak{f}^k_1,\mathfrak{f}^n_0,\mathfrak{f}^n_1$, and $\mathfrak{f}^m_0=code(\mathfrak{f}^k_0)$. It is clear that $\mathfrak f^n_0\mathfrak f^n_1$ occurs in $code(\mathfrak{f}^k_{0})$. The second possibilities is worse. We have $\mathfrak{M}^k=(\mathfrak{F}_2,\{\mathfrak{f}^k_0,\mathfrak{f}^k_{0'},\mathfrak{f}^k_1\})$ and $\mathfrak{f}^m_0=code(\mathfrak{f}^k_0)=code(\mathfrak{f}^k_{0'})$ (which does not yield $\mathfrak{f}^k_0=\mathfrak{f}^k_{0'}$). Since $\sigma(\mathfrak{M}^k)=(\mathfrak{F}_2,\{\mathfrak{f}^n_0,\mathfrak{f}^n_{0'},\mathfrak{f}^n_1\})$ and $\mathfrak{f}^n_0=\mathfrak{f}^n_{0'}$, we take $G(\mathfrak{M}^m)=(\mathfrak{L}_2,\{\mathfrak{f}^n_0,\mathfrak{f}^n_1\})=\mathfrak{M}^m\!\!\upharpoonright n\thicksim\sigma(\mathfrak{M}^k)$ and have $\mathfrak f^n_0\mathfrak f^n_1$ in $code(\mathfrak{f}^k_{0})$. Suppose that $\mathfrak{M}^m=(\mathfrak{L}_2,\{\mathfrak{f}^m_0,\mathfrak{f}^m_1\})$ is not equivalent with any $F(\mathfrak M^k)$. Then $G(\mathfrak{L}_1,\mathfrak{f}^m_0)=\sigma(\mathfrak{L}_1,\mathfrak{f}^k_0)=(\mathfrak{L}_1,\mathfrak{f}^n_0), $ for some $\mathfrak{f}^k_0,\mathfrak{f}^n_0$. Let us take any $k$-valuation $\mathfrak{f}^k_1$ such that $(\mathfrak{L}_2,\{\mathfrak{f}^k_0,\mathfrak{f}^k_1\})$ is an intuitionistic model and define $ G(\mathfrak{M}^m)=\sigma((\mathfrak{L}_2,\{\mathfrak{f}^k_0,\mathfrak{f}^k_1\})=(\mathfrak{L}_2, \{\mathfrak{f}^n_0,\mathfrak{f}^n_1\}), \quad \mbox{for some } \mathfrak{f}^n_1.$ We obviously have $\mathfrak f^n_0\mathfrak f^n_1$ in $code(\mathfrak{f}^k_{0})$. \underline{$\mathfrak{C}_5$- and $\mathfrak{Y}_3$-Models.} Let $\mathfrak{M}^m$ be a p-irreducible model over $\mathfrak{C}_5$ (or over $\mathfrak{Y}_3$) and assume it is not equivalent with any $F(\mathfrak{M}^k)$. We have (see Figure \ref{c}-\ref{cc2})\\ $G((\mathfrak{M}^m)_1)\thicksim\sigma(\mathfrak{M}^k)=(\mathfrak F_2,\{\mathfrak{f}^n_0,\mathfrak{f}^n_{0'},\mathfrak{f}^n_1\})$, and \\ $G((\mathfrak{M}^m)_{1'})\thicksim\sigma(\mathfrak{M'}^k)=(\mathfrak F_2,\{\mathfrak{f}^n_0,\mathfrak{f}^n_{0''},\mathfrak{f}^n_{1'}\})$ (or $\sigma(\mathfrak{M'}^k)=(\mathfrak F_2,\{\mathfrak{f}^n_0,\mathfrak{f}^n_{0'},\mathfrak{f}^n_{1'}\})$)\\ for some $\mathfrak{M}^k=(\mathfrak F_2,\{\mathfrak{f}^k_0,\mathfrak{f}^k_{0''},\mathfrak{f}^k_{1'}\})$ and $\mathfrak{M'}^k=(\mathfrak F_2,\{\mathfrak{f}^k_0,\mathfrak{f}^k_{0''},\mathfrak{f}^k_{1'}\})$ (or $(\mathfrak F_2,\{\mathfrak{f}^k_0,\mathfrak{f}^k_{0'},\mathfrak{f}^k_{1'}\})$), which are $k$-model over $\mathfrak F_2$. We have $\mathfrak{f}^n_{0'}\mathfrak{f}^n_{1'}$ and $\mathfrak{f}^n_{0''}\mathfrak{f}^n_{1}$ (or $\mathfrak{f}^n_{0'}\mathfrak{f}^n_{1}$) in $code(\mathfrak{f}^k_0)$ (see Figure \ref{code}). Then, for some $\mathfrak{g}^k_1,\mathfrak{h}^k_1,\mathfrak{g}^k_2,\mathfrak{h}^k_2$, we can define $\mathfrak{C}_5$-models such that \begin{figure}[H] \unitlength1cm \begin{picture}(3,2) \thicklines \put(0,1){\mbox{$\sigma\Bigl($}} \put(2,0){\vector(-1,1){0.9}} \put(2,0){\vector(1,1){0.9}} \put(3,1){\vector(-1,1){0.9}} \put(1,1){\circle{0.1}} \put(2,2){\circle{0.1}} \put(2,0){\circle{0.1}} \put(3,1){\circle{0.1}} \put(1,1){\vector(1,1){0.9}} \put(1,2){\circle{0.1}} \put(3,2){\circle{0.1}} \put(1,1){\vector(0,1){0.9}} \put(3,1){\vector(0,1){0.9}} \put(2.3,0){\mbox{$0\cdots0$}} \put(1.3,1){\mbox{$\mathfrak{h}^k_{1}$}} \put(0.5,2){\mbox{$\mathfrak{g}^k_{1}$}} \put(3.3,1){\mbox{$\mathfrak{h}^k_2$}} \put(2.3,2){\mbox{$\mathfrak{f}^k_{0}$}} \put(3.3,2){\mbox{$\mathfrak{g}^k_{2}$}} \put(4,1){\mbox{$\Bigr)\quad=$}} \put(7,0){\vector(-1,1){0.9}} \put(7,0){\vector(1,1){0.9}} \put(8,1){\vector(-1,1){0.9}} \put(6,1){\circle{0.1}} \put(7,2){\circle{0.1}} \put(7,0){\circle{0.1}} \put(8,1){\circle{0.1}} \put(6,1){\vector(1,1){0.9}} \put(6,2){\circle{0.1}} \put(8,2){\circle{0.1}} \put(6,1){\vector(0,1){0.9}} \put(8,1){\vector(0,1){0.9}} \put(7.6,0){\mbox{$\mathfrak{f}^n_2$}} \put(8.5,1){\mbox{$\mathfrak{f}^n_{1}$\qquad, for some $\mathfrak f_2^n$.}} \put(5.4,2){\mbox{$\mathfrak{f}^n_{0'}$}} \put(5.3,1){\mbox{$\mathfrak{f}^n_{1'}$}} \put(7.3,2){\mbox{$\mathfrak{f}^n_{0}$}} \put(8.3,2){\mbox{$\mathfrak{f}^n_{0''}\ (\mbox{or}\ \mathfrak{f}^n_{0'})$}} \end{picture}\\ \caption{} \label{cc5} \end{figure} The last $n$-model over $\mathfrak C_5$ can be taken as $G(\mathfrak{M}^m)$. In the case of $\mathfrak Y_3$ we have $\mathfrak f^n_{0'}=\mathfrak f^n_{0''}$ and we can reduce the $\mathfrak C_5$-model to a $\mathfrak Y_3$-model taking $0'=0''$. \underline{$\mathfrak{R}_2$- and $\mathfrak Y_2$-Models.} Let $\mathfrak{M}^m$ be a p-irreducible model over $\mathfrak{R}_2$ and assume it is not equivalent with any $F(\mathfrak{M}^k)$. By our inductive hypothesis, we have (see Figure \ref{c}-\ref{cc2})\\ $G((\mathfrak{M}^m)_1)\thicksim\sigma(\mathfrak{M}^k)=(\mathfrak F_2,\{\mathfrak{f}^n_0,\mathfrak{f}^n_{0'},\mathfrak{f}^n_{1'}\})$, $G((\mathfrak{M}^m)_{1'})\thicksim\sigma(\mathfrak{M'}^k)=(\mathfrak F_2,\{\mathfrak{f}^n_0,\mathfrak{f}^n_{0''},\mathfrak{f}^n_{1}\})$;\\ for some $\mathfrak{M}^k=(\mathfrak F_2,\{\mathfrak{f}^k_0,\mathfrak{f}^k_{0'},\mathfrak{f}^k_{1'}\})$ and $\mathfrak{M'}^k=(\mathfrak F_2,\{\mathfrak{f}^k_0,\mathfrak{f}^k_{0''},\mathfrak{f}^k_{1}\})$. We have $\mathfrak{f}^n_{0'}=\mathfrak{f}^n_{0}=\mathfrak{f}^n_{0''}$ and $\mathfrak{f}^n_{0}\mathfrak{f}^n_{1'}$, $\mathfrak{f}^n_{0}\mathfrak{f}^n_{1}$ occur in $code(\mathfrak{f}^k_0)$ (see Figure \ref{code}). Then for some $\mathfrak{g}^k_1,\mathfrak{h}^k_1,\mathfrak{g}^k_2,\mathfrak{h}^k_2,\mathfrak f_2^n$ \begin{figure}[H] \unitlength1cm \begin{picture}(3,2) \thicklines \put(0,1){\mbox{$\sigma\Bigl($}} \put(2,0){\vector(-1,1){0.9}} \put(2,0){\vector(1,1){0.9}} \put(3,1){\vector(-1,1){0.9}} \put(1,1){\circle{0.1}} \put(2,2){\circle{0.1}} \put(2,0){\circle{0.1}} \put(3,1){\circle{0.1}} \put(1,1){\vector(1,1){0.9}} \put(1,2){\circle{0.1}} \put(3,2){\circle{0.1}} \put(1,1){\vector(0,1){0.9}} \put(3,1){\vector(0,1){0.9}} \put(2.3,0){\mbox{$0\cdots0$}} \put(1.3,1){\mbox{$\mathfrak{h}^k_{1}$}} \put(0.5,2){\mbox{$\mathfrak{g}^k_{1}$}} \put(3.3,1){\mbox{$\mathfrak{h}^k_2$}} \put(2.3,2){\mbox{$\mathfrak{f}^k_{0}$}} \put(3.3,2){\mbox{$\mathfrak{g}^k_{2}$}} \put(4,1){\mbox{$\Bigr)\quad=$}} \put(7,0){\vector(-1,1){0.9}} \put(7,0){\vector(1,1){0.9}} \put(8,1){\vector(-1,1){0.9}} \put(6,1){\circle{0.1}} \put(7,2){\circle{0.1}} \put(7,0){\circle{0.1}} \put(8,1){\circle{0.1}} \put(6,1){\vector(1,1){0.9}} \put(6,2){\circle{0.1}} \put(8,2){\circle{0.1}} \put(6,1){\vector(0,1){0.9}} \put(8,1){\vector(0,1){0.9}} \put(7.6,0){\mbox{$\mathfrak{f}^n_2$}} \put(8.5,1){\mbox{$\thicksim$}} \put(7.5,1){\mbox{$\mathfrak{f}^n_{1}$}} \put(5.4,2){\mbox{$\mathfrak{f}^n_{0'}$}} \put(5.3,1){\mbox{$\mathfrak{f}^n_{1'}$}} \put(7.3,2){\mbox{$\mathfrak{f}^n_{0}$}} \put(8.3,2){\mbox{$\mathfrak{f}^n_{0''}$}} \put(10,0){\vector(-1,1){0.9}} \put(10,0){\vector(1,1){0.9}} \put(11,1){\vector(-1,1){0.9}} \put(9,1){\circle{0.1}} \put(10,2){\circle{0.1}} \put(10,0){\circle{0.1}} \put(11,1){\circle{0.1}} \put(9,1){\vector(1,1){0.9}} \put(10.6,0){\mbox{$\mathfrak{f}^n_2$}} \put(11.2,1){\mbox{$\mathfrak{f}^n_{1}$}} \put(9.3,1){\mbox{$\mathfrak{f}^n_{1'}$}} \put(10.3,2){\mbox{$\mathfrak{f}^n_{0}$}} \end{picture}\\ \caption{} \label{cc6} \end{figure} \noindent The received $n$-model over $\mathfrak R_2$ can be taken as $G(\mathfrak M^m)$. A similar argument (as we used above for $\mathfrak R_2$-models) applies for models over $\mathfrak{Y}_2$. \underline{$+\mathfrak{F}_2$- and $\mathfrak{L}_3$-Models.} Let $\mathfrak{M}^m$ be a p-irreducible model over $\mathfrak{L}_3$, or over $+\mathfrak{F}_2$, and assume that it is not equivalent with any $F(\mathfrak{M}^k)$. We can also assume $G((\mathfrak{M}^m)_1)$ has been defined (see Figure \ref{c}-\ref{cc2}). If $G((\mathfrak{M}^m)_1)=\sigma(\mathfrak{M}^k)$, for some $\mathfrak{M}^k$, then $\mathfrak{M}^k$ is a model over $\mathfrak L_2$, or over $\mathfrak F_2$, and we can take $G(\mathfrak{M}^m)=\sigma(\mathfrak N^k)$ where $\mathfrak N^k$ is an extension of $\mathfrak{M}^k$ with any $k$-valuation $\mathfrak{f}^k_2$ (the extended model must be an intuitionistic model). Then $\mathfrak N^k$ is a model over $\mathfrak{L}_3$, or over $+\mathfrak{F}_2$, as required. The only problem rises if $G((\mathfrak{M}^m)_1)$ is equivalent with some $\sigma(\mathfrak{M}^k)$ but it is not any $\sigma$-model. This can happen if $(\mathfrak{M}^m)_1$ is a model over $\mathfrak{L}_2$ and $\mathfrak{M}^k$ over $\mathfrak{F}_2$. Then extending $\mathfrak{M}^k$ with $\mathfrak{f}^k_2$ we get a model $\mathfrak{N}^k$ over $+\mathfrak{F}_2$ such that $\sigma(\mathfrak{N}^k)$ is equivalent with an extension of $G((\mathfrak{M}^m)_1)$ to a model over $\mathfrak L_3$; and the model over $\mathfrak L_3$ is $G(\mathfrak{M}^m)$. \end{proof} \subsection{Projective Formulas in Locally Tabular Logics.}\label{PFLTL} Projective formulas are useful in the area of unification. By Ghilardi \cite{Ghi2}, any consistent Rasiowa-Harrop formula (e.g. $\neg B$) is projective in {\sf INT} and, consequently, in any intermediate logic, see Lemma \ref{mon}. One can easily produce other examples of projective formulas. Although conjunctions of projective formulas may be not projective, for instance $(x_1\rightarrow x_2\lor x_3)\land x_1$ is not, we have \begin{lemma}\label{niu1} If $A=\bigwedge_{i=1}^s(B_i\leftrightarrow z_{i})$ for some distinct variables $z_1,\dots,z_s$ which do not occur in the formulas $B_1,\dots,B_s$, where $s\geq 1$, then $A$ is projective in {\sf INT}.\end{lemma} \begin{proof} Note that $\varepsilon\colon z_1\slash B_1\cdots z_s\slash B_s$ is a projective unifier for $A$.\end{proof} For locally tabular logics, we can show \begin{theorem}\label{n5} Let $\sigma\colon\{x_1,\dots,x_n\}\to \mathsf{Fm}^k$ and $\varepsilon\colon\{x_1,\dots,x_n\}\to \mathsf{Fm}^n$, for some $k,n\geq 0$. Then $\varepsilon$ is a projective unifier for $A_\sigma$ in the logic {\sf L} if and only if \\ (iv) for every $\mathfrak{M}^n\in\mathbf{M}^n$ there is a $\mathfrak{M}^k\in\mathbf{M}^k$ such that $\varepsilon(\mathfrak{M}^n) \thicksim \sigma(\mathfrak{M}^k)$;\\ (v) $\varepsilon(\sigma(\mathfrak{M}^k))\thicksim\sigma(\mathfrak{M}^k)$, for every $\mathfrak{M}^k\in \mathbf{M}^k$, see Figure \ref{p1}. \end{theorem} \begin{figure} \unitlength1cm \begin{picture}(5,2) \thicklines \put(8,2){\vector(0,-1){1.9}} \put(8,2){\vector(-1,-1){1.9}} \put(8,0){\vector(-1,0){1.9}} \put(8,0){\circle{0.1}} \put(6,0){\circle{0.1}} \put(8,2){\circle{0.1}} \put(8.3,2){\mbox{$\mathbf{M}^k\!\slash\!\!\thicksim$}} \put(5,0){\mbox{$\mathbf{M}^n\!\slash\!\!\thicksim$}} \put(8.3,0){\mbox{$\mathbf{M}^n\!\slash\!\!\thicksim$}} \put(8.1,1){\mbox{$\sigma$}} \put(7.1,0.1){\mbox{$\varepsilon$}} \put(6.7,1.2){\mbox{$\sigma$}} \end{picture} \caption{Projective Unifiers}\label{p1} \end{figure} \begin{proof} Assume (iv) and (v). Then $\varepsilon$ is a unifier for $A_\sigma$, by Corollary \ref{n3i}. We need to show $A_\sigma\vdash\varepsilon(x_i)\leftrightarrow x_i$, for each $i$. Let $\mathfrak{M}^k$ be any $k$-model. Then $\varepsilon(\sigma(\mathfrak{M}^k))\thicksim\sigma(\mathfrak{M}^k)$ by (v). Hence, see Lemma \ref{sigma0}, we get $$\sigma(\mathfrak{M}^k)\Vdash\varepsilon(x_i) \ \Leftrightarrow \ \varepsilon(\sigma(\mathfrak{M}^k))\Vdash x_i \ \Leftrightarrow \ \sigma(\mathfrak{M}^k)\Vdash x_i.$$ By Lemma \ref{sigma0}, $\sigma$ is a unifier for $\varepsilon(x_i)\leftrightarrow x_i$ and , by Corollary \ref{in2}, $A_\sigma\vdash\varepsilon(x_i)\leftrightarrow x_i$. If $\varepsilon$ is a projective unifier for $A_\sigma$, we get (iv) and (v) using Lemma \ref{n1i} and \ref{proj}.\end{proof} \begin{corollary}\label{n7} A substitution $\varepsilon\colon\{x_1,\dots,x_n\}\to \mathsf{Fm}^n$ is a projective unifier for $A_\varepsilon$ in {\sf L} iff $\varepsilon\circ\varepsilon=_\mathsf{L}\varepsilon$.\\ \end{corollary} \begin{corollary}\label{n8} If $\varepsilon\colon\{x_1,\dots,x_n\}\to \mathsf{Fm}^n$ is an {\sf L}-projective unifier for a formula $A\in \mathsf{Fm^n}$, then $A=_{\sf L}\bigwedge_{i=1}^n\bigl(x_i\leftrightarrow\varepsilon(x_i)\bigr).$\end{corollary} Suppose that $m=n$ and $F=H_\sigma$ in Theorem \ref{main}. Then, by (iv) and (v), $G$ would generate a retraction from $\mathbf{M}^n\!\slash\!\!\thicksim$ \ {onto} \ $\sigma(\mathbf{M}^k)\slash\!\!\thicksim$. This simplified version of Theorem \ref{main} characterizes logics with projective approximation (see Theorem \ref{praprox}). \begin{theorem}\label{retraction} The logic {\sf L} has projective approximation iff for every $\sigma\colon \{x_1,\dots,x_n\}\to \mathsf{Fm}^k$, where $n,k\geq 0$, there is a mapping $G:\mathbf{M}^n\to\mathbf{M}^n$ such that:\\ (i) $G$ preserves the frame of any $n$-model $\mathfrak{M}^n=(W,R,w_0,V^n)$;\\ (ii) $G((\mathfrak{M}^n)_w)\thicksim(G(\mathfrak{M}^n))_w, \mbox{ for every } \quad w\in W; $\\ (iii) $\mathfrak{N}^n\thicksim \mathfrak{M}^n\quad\Rightarrow \quad G(\mathfrak{N}^n)\thicksim G(\mathfrak{M}^n), \quad \mbox{ for every } \quad \mathfrak{M}^n,\mathfrak{N}^n\in \mathbf{M}^n$;\\ (iv) for every $n$-model $\mathfrak{M}^n$, there is a $k$-model $\mathfrak{M}^k$ such that $G(\mathfrak{M}^n)\thicksim\sigma(\mathfrak{M}^k)$;\\ (v) for every $k$-model $\mathfrak{M}^k$, we have $G(\sigma(\mathfrak{M}^k))\thicksim\sigma(\mathfrak{M}^k)$. \end{theorem} \begin{proof} Suppose that {\sf L} has projective approximation and let $\sigma\colon \{x_1,\dots,x_n\}\to \mathsf{Fm}^k$, for $k,n\geq 0$. By Corollary \ref{in2}, $\sigma$ is a unifier for $A_\sigma\in\mathsf{Fm}^n$. Let $B\in \Pi(A_\sigma)$ be a projective formula in $\mathsf{Fm}^n$ such that $B\vdash_\mathsf{L}A_\sigma$ and $\vdash_\mathsf{L}\sigma(B)$. If $\varepsilon\colon \{x_1,\dots,x_n\}\to \mathsf{Fm}^n$ is a projective unifier for $B$, then $\sigma\circ\varepsilon=_\mathsf{L}\sigma$ and $\vdash_\mathsf{L}\varepsilon(A_\sigma)$, by Lemma \ref{proj}. Thus, if one takes $G=H_\varepsilon$, one easily shows (i)-(v) using Lemma \ref{n1i} and Corollary \ref{n4i}. Assume (i)-(v) and let $\sigma\colon\{x_1,\dots,x_n\}\to \mathsf{Fm}^k$ be a unifier for a formula $A\in\mathsf{Fm}^n$. By Lemma \ref{nsigmai}, we have $G\thicksim H_\varepsilon$ for some $\varepsilon\colon\{x_1,\dots,x_n\}\to \mathsf{Fm}^n$. Then, by Lemma \ref{n5}, $\varepsilon$ is a projective unifier for $A_\sigma$. Thus, we have $A_\sigma$ projective, and $A_\sigma\vdash A$, and $A_\sigma\in\mathsf{Fm}^n$, and $\sigma$ is a unifier for $A_\sigma$. For each $A$ one can define $\Pi(A)$ so that to show projective approximation in {\sf L}. \end{proof} In \cite{dkw}, using the above Theorem, we have shown that any logic determined by frames of the depth $\leq 2$ has projective approximation. Thus, {\sf L}({$\mathfrak{F}_m$}) for any $m$-fork $\mathfrak{F}_m$ (see Figure \ref{FRF}) has finitary unification. Now we extend this result to the logic determined by all frames from $\mathbf H_{pr}$ (that is frames $\mathfrak L_d+\mathfrak F_m$ , where $m,d\geq 0$, see Figure \ref{hpa}). The logic {\sf L}($\mathbf H_{pa}$) extends {\sf PWL}, see \cite{Esakia}, hence it is locally tabular. \begin{theorem}\label{lmk} For any $\mathbf F\subseteq \mathbf H_{pr}$, the logic {\sf L}($\mathbf F$) has projective approximation. \end{theorem} \begin{proof} Let $\mathbf F\subseteq \mathbf H_{pr}$. Since $\mathbf H_{pr}$ is closed on p-morphic images and generated subframes, we can assume {\it sm}({\bf F})={\bf F}; see Lemma \ref{lf8}. For any $\sigma\colon\{x_1,\dots,x_n\}\to \mathsf{Fm}^k$, where $k\geq 0$, we need $G\colon\mathbf{M}^n\to\mathbf{M}^n$ fulfilling (i)-(v). A. If $\mathfrak{M}^n=(W,R,w_0,V^n)\in\mathbf{M}^n$ is equivalent with a $\sigma$-model (i.e. $\mathfrak{M}^n\thicksim\sigma(\mathfrak{M}^k)$, for some $\mathfrak{M}^k$), we take $G(\mathfrak{M}^n)=\mathfrak{M}^n$. Then (i),(iv) and (v) are obvious. \noindent (ii) If $\mathfrak{M}^n$ is equivalent with a $\sigma$-model, then so is $(\mathfrak{M}^n)_w$, for any $w\in W$ (see Corollary \ref{lf4i} and Lemma \ref{sigmai}(iii), and hence $G((\mathfrak{M}^n)_w)=(\mathfrak{M}^n)_w=(G(\mathfrak{M}^n))_w$. \noindent (iii) If $\mathfrak{M}^n$ is equivalent with a $\sigma$-model and $\mathfrak{M}^n\thicksim\mathfrak{N}^n$, then $\mathfrak{N}^n$ is equivalent with the same $\sigma$-model and hence $G(\mathfrak{M}^n)=\mathfrak{M}^n\thicksim\mathfrak{N}^n=G(\mathfrak{N}^n)$. B. Suppose that $\mathfrak{M}^n$ is not equivalent with any $\sigma$-model. Assume $\mathfrak{M}^n$ is p-irreducible and define $G(\mathfrak{M}^n)$ inductively with respect to the depth of $\mathfrak{M}^n$. We could not bother about (v) as it is irrelevant, our inductive approach secure (ii) and (iii), see Theorem \ref{nsi}. Our task is to preserve (i) and (iv), that is $G(\mathfrak{M}^n)$ should be defined over the frame of $\mathfrak{M}^n$ and $G(\mathfrak{M}^n)$ should be equivalent with some $\sigma$-model. B1. Let $\mathfrak{M}^n$ be an $n$-model over 1-element chain $\mathfrak L_1$, that is $\mathfrak{M}^n=(\mathfrak L_1,\mathfrak{f}^n)$ where $\mathfrak{f}^n=i_1\dots i_n$ with $i_j\in\{0,1\}$. All $n$-models over $\mathfrak L_1$ are p-irreducible and we have (see \cite{dkw}): { any $n$-model over $\mathfrak L_1$ equivalent with some $\sigma$-model is a $\sigma$-model.} We can take any $k$-model $\mathfrak{M}^k=(\mathfrak L_1,\mathfrak{f}^k)$ over $\mathfrak L_1$ and define $G(\mathfrak{M}^n)=\sigma(\mathfrak{M}^k).$ B2. Let $\mathfrak{M}^n$ be an $n$-model over $m$-fork $\mathfrak F_m$, see Figure \ref{mfork}, for $m\geq 1$; \begin{figure}[H] \unitlength1cm \begin{picture}(3,1.1) \thicklines \put(4,1){\circle{0.1}} \put(7,1){\circle{0.1}} \put(6,1){\circle{0.1}} \put(8,1){\circle{0.1}} \put(5,1){\circle{0.1}} \put(6,0){\circle{0.1}} \put(7.3,1.2){\mbox{$\cdots$}} \put(6.4,0){\mbox{$m$}} \put(4,1.2){\mbox{$0$}} \put(5,1.2){\mbox{$1$}} \put(6,1.2){\mbox{$2$}} \put(8,1.2){\mbox{$(m-1)$}} \put(7,1.2){\mbox{$3$}} \put(6,0){\vector(1,1){0.9}} \put(6,0){\vector(-1,1){0.9}} \put(6,0){\vector(0,1){0.9}} \put(6,0){\vector(2,1){1.9}} \put(6,0){\vector(-2,1){1.9}} \end{picture} \caption{$m$-Fork}\label{mfork} \end{figure} \noindent By our inductive approach, $G((\mathfrak{M}^n)_i)=(\mathfrak L_1,\mathfrak{g}^n_i)=\sigma(\mathfrak L_1,\mathfrak{f}^k_i)$, for some $\mathfrak{g}^n_i$ and $\mathfrak{f}^k_i$, if $i<m$. We define $\mathfrak{M}^k$, see Figure \ref{abc}, and put $G(\mathfrak{M}^n)=\sigma(\mathfrak{M}^k).$ \begin{figure}[H] \unitlength1cm \begin{picture}(3,1.5) \thicklines \put(2,1){\circle{0.1}} \put(1,1){\circle{0.1}} \put(3,1){\circle{0.1}} \put(0,1){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2.4,1.2){\mbox{$\cdots$}} \put(1.6,0){\mbox{$\mathfrak{f}^n_m$}} \put(0,1.2){\mbox{$\mathfrak{f}^n_0$}} \put(1,1.2){\mbox{$\mathfrak{f}^n_1$}} \put(3,1.2){\mbox{$\mathfrak{f}^n_{m-1}$}} \put(2,1.2){\mbox{$\mathfrak{f}^n_2$}} \put(1,0){\vector(1,1){0.9}} \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(0,1){0.9}} \put(1,0){\vector(2,1){1.9}} \put(3,0.5){\vector(1,0){1.9}} \put(3.8,0){\mbox{$G$}} \put(0,0){\mbox{$\mathfrak M^n=$}} \put(7,1){\circle{0.1}} \put(6,1){\circle{0.1}} \put(8,1){\circle{0.1}} \put(5,1){\circle{0.1}} \put(6,0){\circle{0.1}} \put(7.3,1.2){\mbox{$\cdots$}} \put(6.6,0){\mbox{$?$}} \put(4.6,1.2){\mbox{$\mathfrak{g}^n_0$}} \put(5.6,1.2){\mbox{$\mathfrak{g}^n_1$}} \put(7.8,1.2){\mbox{$\mathfrak{g}^n_{m-1}$}} \put(6.6,1.2){\mbox{$\mathfrak{g}^n_2$}} \put(6,0){\vector(1,1){0.9}} \put(6,0){\vector(-1,1){0.9}} \put(6,0){\vector(0,1){0.9}} \put(6,0){\vector(2,1){1.9}} \put(9,0.5){\vector(-1,0){1.5}} \put(8.2,0.2){\mbox{$\sigma$}} \put(11,1){\circle{0.1}} \put(10,1){\circle{0.1}} \put(12,1){\circle{0.1}} \put(9,1){\circle{0.1}} \put(10,0){\circle{0.1}} \put(11.4,1.2){\mbox{$\cdots$}} \put(10.6,0){\mbox{$0\cdots0$}} \put(9.2,1.2){\mbox{$\mathfrak{f}^k_0$}} \put(10,1.2){\mbox{$\mathfrak{f}^k_1$}} \put(12,1.2){\mbox{$\mathfrak{f}^k_{m-1}$}} \put(11,1.2){\mbox{$\mathfrak{f}^k_2$}} \put(10,0){\vector(1,1){0.9}} \put(10,0){\vector(-1,1){0.9}} \put(10,0){\vector(0,1){0.9}} \put(10,0){\vector(2,1){1.9}} \put(11.8,0.2){\mbox{$=\mathfrak M^k$}} \end{picture} \caption{}\label{abc} \end{figure} \noindent We should be more careful about defining the mapping $G$ on isomorphic $n$-models. Our mapping should be factorized by $\equiv$ and hence we should get the factorized mapping $G\colon\mathbf{M}^n_{ir}\slash\!\!\equiv \to\mathbf{M}^n\slash\!\!\equiv$. Thus, from any $\equiv$-class, we take only one $\mathfrak{M}^n$, define as above $G(\mathfrak{M}^n)$ and then extend $G$, on other members of the equivalence class, taking as the values for $G$ the appropriate isomorphic copy of $G(\mathfrak{M}^n)$. B3. Eventually, suppose that the frame of $\mathfrak{M}^n$ is an $m$-fork extended with an $d$-element chain 'leg`, where $m,d\geq 0$. Our subsequent definition of $G$ is inductive with respect to $d$. So, we assume that $d\geq 1$ and $G((\mathfrak{M}^n)_{m+d-1})$ has already been defined, see Figure \ref{abcd}. We define $G(\mathfrak{M}^n)$ extending $G((\mathfrak{M}^n)_{m+d-1})$ with $\mathfrak{g}^n_{m+d}=\mathfrak{g}^n_{m+d-1}$. So, we get $G(\mathfrak{M}^n)\thicksim G((\mathfrak{M}^n)_{m+d-1})$ which guarantees the condition (iv). \begin{figure}[H] \unitlength1cm \begin{picture}(3,3.5) \thicklines \put(2,3){\circle{0.1}} \put(1,3){\circle{0.1}} \put(3,3){\circle{0.1}} \put(0,3){\circle{0.1}} \put(1,2){\circle{0.1}} \put(2.4,3.2){\mbox{$\cdots$}} \put(1.6,2){\mbox{$\mathfrak{f}^n_m$}} \put(0,3.2){\mbox{$\mathfrak{f}^n_0$}} \put(1,3.2){\mbox{$\mathfrak{f}^n_1$}} \put(3,3.2){\mbox{$\mathfrak{f}^n_{m-1}$}} \put(2,3.2){\mbox{$\mathfrak{f}^n_2$}} \put(1,2){\vector(1,1){0.9}} \put(1,2){\vector(-1,1){0.9}} \put(1,2){\vector(0,1){0.9}} \put(1,2){\vector(2,1){1.9}} \put(0,1){\mbox{$\mathfrak M^n=$}} \put(1,1.5){\circle{0.1}} \put(1,1){\circle{0.1}} \put(1,0.5){\circle{0.1}} \put(1,0){\circle{0.1}} \put(1.3,1.5){\mbox{$\mathfrak{f}^n_{m+1}$}} \put(1.3,0.5){\mbox{$\mathfrak{f}^n_{m+d-1}$}} \put(1.3,0){\mbox{$\mathfrak{f}^n_{m+d}$}} \put(1,1.6){\vector(0,1){0.4}} \put(1,0.1){\vector(0,1){0.4}} \put(3.1,1){\mbox{$G((\mathfrak M^n)_{m+d-1})=$}} \put(7,31){\circle{0.1}} \put(6,3){\circle{0.1}} \put(8,3){\circle{0.1}} \put(5,3){\circle{0.1}} \put(6,2){\circle{0.1}} \put(7.3,3.2){\mbox{$\cdots$}} \put(6.6,2){\mbox{$\mbox{$\mathfrak{g}^n_{m}$}$}} \put(4.6,3.2){\mbox{$\mathfrak{g}^n_0$}} \put(5.6,3.2){\mbox{$\mathfrak{g}^n_1$}} \put(7.8,3.2){\mbox{$\mathfrak{g}^n_{m-1}$}} \put(6.6,3.2){\mbox{$\mathfrak{g}^n_2$}} \put(6,2){\vector(1,1){0.9}} \put(6,2){\vector(-1,1){0.9}} \put(6,2){\vector(0,1){0.9}} \put(6,2){\vector(2,1){1.9}} \put(6,1.5){\circle{0.1}} \put(6,1){\circle{0.1}} \put(6,0.5){\circle{0.1}} \put(6,1.6){\vector(0,1){0.4}} \put(6.3,1.5){\mbox{$\mathfrak{g}^n_{m+1}$}} \put(6.3,0.5){\mbox{$\mathfrak{g}^n_{m+d-1}$}} \put(11,3){\circle{0.1}} \put(10,3){\circle{0.1}} \put(12,3){\circle{0.1}} \put(9,3){\circle{0.1}} \put(10,2){\circle{0.1}} \put(11.4,3.2){\mbox{$\cdots$}} \put(10.6,2){\mbox{$\mathfrak{g}^n_m$}} \put(9.2,3.2){\mbox{$\mathfrak{g}^n_0$}} \put(10,3.2){\mbox{$\mathfrak{g}^n_1$}} \put(12,3.2){\mbox{$\mathfrak{g}^n_{m-1}$}} \put(11,3.2){\mbox{$\mathfrak{g}^n_2$}} \put(10,2){\vector(1,1){0.9}} \put(10,2){\vector(-1,1){0.9}} \put(10,2){\vector(0,1){0.9}} \put(10,2){\vector(2,1){1.9}} \put(10,1.5){\circle{0.1}} \put(10,1){\circle{0.1}} \put(10,0.5){\circle{0.1}} \put(10,0){\circle{0.1}} \put(10,1.6){\vector(0,1){0.4}} \put(10,0.1){\vector(0,1){0.4}} \put(8.3,1){\mbox{$G(\mathfrak M^n)=$}} \put(10.3,1.5){\mbox{$\mathfrak{g}^n_{m+1}$}} \put(10.3,0.5){\mbox{$\mathfrak{g}^n_{m+d-1}$}} \put(10.3,0){\mbox{$\mathfrak{g}^n_{m+d}=\mathfrak{g}^n_{m+d-1}$}} \end{picture} \caption{}\label{abcd} \end{figure} So, we have $G\colon\mathbf{M}^n_{ir} \to\mathbf{M}^n$. Using Theorem \ref{nsi}, we extend the mapping to $G\colon\mathbf{M}^n \to\mathbf{M}^n$ preserving the conditions (i)-(iii), see Lemma \ref{sigmai}. It is an easy task to check that the conditions (iv) and (v) are also preserved by the extension. \end{proof} Theorems \ref{main} and \ref{retraction} can also be used to show that certain locally tabular logics neither have finitary\slash unitary unification, nor projective approximation. Many examples follow in the next section. Here we only give \begin{theorem}\label{L8i} The logics ${\mathsf L}(\mathfrak G_2)$ and ${\mathsf L}(\mathfrak G_2)\cap{\mathsf L}(\mathfrak C_{5})$ (see Figure \ref{GF} and \ref{TF}) have finitary unification but they do not have projective approximation. \end{theorem} \begin{proof} Let us prove ${\mathsf L}(\mathfrak G_2)\cap{\mathsf L}(\mathfrak C_{5})$ has finitary unification. We extend the family {\bf F} (in our proof of Theorem \ref{c5}) with the frames $\mathfrak G_2$ and $\mathfrak G_3$. The definition of $F$, see Figure \ref{c}-\ref{cc2}, needs the following two (additional) clauses: \begin{figure}[H] \unitlength1cm \begin{picture}(3,2) \thicklines \put(1.5,0){\vector(-1,1){0.9}} \put(1.5,0){\vector(1,1){0.9}} \put(2.5,1){\vector(0,1){0.9}} \put(0.5,1){\circle{0.1}} \put(2.5,2){\circle{0.1}} \put(1.5,0){\circle{0.1}} \put(2.5,1){\circle{0.1}} \put(1.8,0){\mbox{$\mathfrak{f}^k_2$}} \put(0.6,1.1){\mbox{$\mathfrak{f}^k_{0'}$}} \put(2.6,2){\mbox{$\mathfrak{f}^k_0$}} \put(2.6,1.1){\mbox{$\mathfrak{f}^k_1$}} \put(0,1){\mbox{$\sigma\Bigl($}} \put(3,1){\mbox{$\Bigr)\ = $}} \put(0.9,1.8){\mbox{$\mathfrak{M}^k=$}} \put(5,0){\vector(-1,1){0.9}} \put(5,0){\vector(1,1){0.9}} \put(6,1){\vector(0,1){0.9}} \put(4,1){\circle{0.1}} \put(6,2){\circle{0.1}} \put(5,0){\circle{0.1}} \put(6,1){\circle{0.1}} \put(5.3,0){\mbox{$\mathfrak{f}^n_2$}} \put(4.1,1.1){\mbox{$\mathfrak{f}^n_{0'}$}} \put(6.1,2){\mbox{$\mathfrak{f}^n_0$}} \put(6.3,1.1){\mbox{$\mathfrak{f}^n_1$}} \put(8,1){\mbox{$F(\mathfrak{M}^k)\ = $}} \put(11,0){\vector(-1,1){0.9}} \put(11,0){\vector(1,1){0.9}} \put(12,1){\vector(0,1){0.9}} \put(10,1){\circle{0.1}} \put(12,2){\circle{0.1}} \put(11,0){\circle{0.1}} \put(12,1){\circle{0.1}} \put(10.5,1){\mbox{$\mathfrak{f}^n_{1}0\cdots01$}} \put(11.3,0){\mbox{$\mathfrak{f}^n_20\cdots0$}} \put(9.7,1.4){\mbox{$code(\mathfrak{f}^k_{0'})$}} \put(11.4,2.1){\mbox{$code(\mathfrak{f}^k_{0})$}} \end{picture}\\ \unitlength1cm \begin{picture}(5,2.2) \thicklines \put(1.5,0){\vector(-1,1){0.9}} \put(1.5,0){\vector(1,1){0.9}} \put(2.5,1){\vector(0,1){0.9}} \put(0.5,1){\circle{0.1}} \put(2.5,2){\circle{0.1}} \put(1.5,0){\circle{0.1}} \put(2.5,1){\circle{0.1}} \put(1.8,0){\mbox{$\mathfrak{f}^k_2$}} \put(0.6,1.1){\mbox{$\mathfrak{f}^k_{0''}$}} \put(2.6,2){\mbox{$\mathfrak{f}^k_0$}} \put(2.6,1.1){\mbox{$\mathfrak{f}^k_1$}} \put(0,1){\mbox{$\sigma\Bigl($}} \put(3,1){\mbox{$\Bigr)\ = $}} \put(2.5,1){\vector(-1,1){0.9}} \put(1.5,2){\circle{0.1}} \put(1,1.8){\mbox{$\mathfrak{f}^k_{0'}$}} \put(5,0){\vector(-1,1){0.9}} \put(5,0){\vector(1,1){0.9}} \put(6,1){\vector(0,1){0.9}} \put(4,1){\circle{0.1}} \put(6,2){\circle{0.1}} \put(5,0){\circle{0.1}} \put(6,1){\circle{0.1}} \put(5.3,0){\mbox{$\mathfrak{f}^n_2$}} \put(4.1,1.1){\mbox{$\mathfrak{f}^n_{0''}$}} \put(6.1,2){\mbox{$\mathfrak{f}^n_0$}} \put(6.3,1.1){\mbox{$\mathfrak{f}^n_1$}} \put(6,1){\vector(-1,1){0.9}} \put(5,2){\circle{0.1}} \put(4.5,1.8){\mbox{$\mathfrak{f}^n_{0'}$}} \put(8,1){\mbox{$F(\mathfrak{M}^k)\ = $}} \put(11,0){\vector(-1,1){0.9}} \put(11,0){\vector(1,1){0.9}} \put(12,1){\vector(0,1){0.9}} \put(10,1){\circle{0.1}} \put(12,2){\circle{0.1}} \put(11,0){\circle{0.1}} \put(12,1){\circle{0.1}} \put(9.5,2){\mbox{$code(\mathfrak{f}^k_{0'})$}} \put(10.5,1){\mbox{$\mathfrak{f}^n_{1}0\cdots01$}} \put(11.3,0){\mbox{$\mathfrak{f}^n_20\cdots0$}} \put(9.7,1.4){\mbox{$code(\mathfrak{f}^k_{0''})$}} \put(11.4,2.1){\mbox{$code(\mathfrak{f}^k_{0})$}} \put(12,1){\vector(-1,1){0.9}} \put(11,2){\circle{0.1}} \end{picture}\\ \caption{} \label{k} \end{figure} \noindent Then we need to extend the definition of $G(\mathfrak{M}^m)$ with: \underline{$\mathfrak{G}_2$- and $\mathfrak{G}_2$-models.} We only deal with the worst case. Suppose that $\mathfrak{M}^m=(\mathfrak{G}_3,\{\mathfrak{f}^m_0,\mathfrak{f}^m_{0'},\mathfrak{f}^m_1,\mathfrak{f}^m_2\})$ is a p-irreducible model over $\mathfrak{G}_3$ (see Figure \ref{k}) non-equivalent with any $F(\mathfrak{M}^k)$, but $(\mathfrak{M}^m)_1\thicksim F(\mathfrak{M}^k)$, where $\mathfrak{M}^k$ is a model over $\mathfrak{F}_2$. Then $(\mathfrak{M}^m)_1\!\!\upharpoonright n = (\mathfrak{L}_2,\{\mathfrak{f}^n_0,\mathfrak{f}^n_1\})\thicksim\sigma(\mathfrak{M}^k)$ for some $\mathfrak{f}^n_0,\mathfrak{f}^n_1$ where $\thicksim$ cannot be replaced with $=$. Let $\mathfrak{M}^k=(\mathfrak{F}_2,\{\mathfrak{f}^k_0,\mathfrak{f}^k_{0'},\mathfrak{f}^k_1\})$ which means $\sigma(\mathfrak{L}_1,\mathfrak{f}^k_0)=\sigma(\mathfrak{L}_1,\mathfrak{f}^k_{0'})=(\mathfrak{L}_1, \mathfrak{f}^n_0)$ and let $G((\mathfrak{M}^m)_{0'})=\sigma(\mathfrak{L}_1,\mathfrak{g}^k_{0})=(\mathfrak{L}_1,\mathfrak{f}^n_{0'}), $ for some $\mathfrak{g}^k_{0},\mathfrak{f}^n_{0'}$. Then, for some $\mathfrak{f}^n_2$, we have \begin{figure}[H]\label{pi} \unitlength1cm \begin{picture}(3,2) \thicklines \put(8.5,1){$\thicksim$} \put(11,0){\vector(-1,1){0.9}} \put(11,0){\vector(1,1){0.9}} \put(12,1){\vector(0,1){0.9}} \put(10,1){\circle{0.1}} \put(12,2){\circle{0.1}} \put(11,0){\circle{0.1}} \put(12,1){\circle{0.1}} \put(11.3,0){\mbox{$\mathfrak{f}^n_2$}} \put(9.7,1.2){\mbox{$\mathfrak{f}^n_{0'}$}} \put(12.1,2){\mbox{$\mathfrak{f}^n_0$}} \put(12.3,1.1){\mbox{$\mathfrak{f}^n_1$}} \put(6,0){\vector(-1,1){0.9}} \put(6,0){\vector(1,1){0.9}} \put(7,1){\vector(0,1){0.9}} \put(7,1){\vector(-1,1){0.9}} \put(5,1){\circle{0.1}} \put(7,2){\circle{0.1}} \put(6,2){\circle{0.1}} \put(6,0){\circle{0.1}} \put(7,1){\circle{0.1}} \put(6.3,0){\mbox{$\mathfrak{f}^n_2$}} \put(4.8,1.2){\mbox{$\mathfrak{f}^n_{0'}$}} \put(7.1,2.1){\mbox{$\mathfrak{f}^n_0$}} \put(7.3,1){\mbox{$\mathfrak{f}^n_1$}} \put(6.3,2.1){\mbox{$\mathfrak{f}^n_{0}$}} \put(0,1){$\sigma\Bigl($} \put(2,0){\vector(-1,1){0.9}} \put(2,0){\vector(1,1){0.9}} \put(3,1){\vector(0,1){0.9}} \put(3,1){\vector(-1,1){0.9}} \put(1,1){\circle{0.1}} \put(3,2){\circle{0.1}} \put(2,2){\circle{0.1}} \put(2,0){\circle{0.1}} \put(3,1){\circle{0.1}} \put(1.3,0){\mbox{$\mathfrak{f}^k_2$}} \put(0.7,1.2){\mbox{$\mathfrak{g}^k_{0}$}} \put(3.1,2.1){\mbox{$\mathfrak{f}^k_0$}} \put(3.3,1){\mbox{$\mathfrak{f}^k_1$}} \put(2.3,2.1){\mbox{$\mathfrak{f}^k_{0'}$}} \put(3.6,1){$\Bigr)\quad =$} \end{picture}\\ \caption{} \label{M2} \end{figure} \noindent and the $n$-model over $\mathfrak{G}_3$ may be $G(\mathfrak{M}^m)$; it is equivalent with a $\sigma$-model over $\mathfrak{G}_2$.\footnote{The problem would rise if we tried in the same way show ${\mathsf L}(\mathfrak G_3)$ is finitary, we have no $\sigma$-model over $\mathfrak{G}_3$ equivalent with $G(\mathfrak{M}^m)$. So, ${\mathsf L}(\mathfrak G_3)$ is nullary and the above may be seen as Ghilardi's Theorem 9, p.112, \cite{Ghi5}.} The rest of the proof remains the same. In the same way one shows ${\mathsf L}(\mathfrak G_2)$ has finitary unification; from the proof that ${\mathsf L}(\mathfrak G_2)\cap{\mathsf L}(\mathfrak C_{5})$ has finitary unification, it suffices to remove $\mathfrak C_{5},\mathfrak R_{2},\mathfrak Y_{2}$ and $\mathfrak Y_{3}$. Then we can even simplify our reasoning and delate $\mathfrak{g}^n_1\mathfrak{h}^n_1\dots\mathfrak{g}^n_{2^n}\mathfrak{h}^n_{2^n}$ from $code(\mathfrak{f}^k)$. Thus, we can take $m=n+2$ and define $F(\mathfrak{M}^k)$ using $m$-valuations $\mathfrak{f}^n11$, $\mathfrak{f}^n01$, or $\mathfrak{f}^n00$. The suffices $11$, $01$ and $00$ are necessary.\footnote{Using them we avoid problems that could rise p-morphisms mentioned in Example \ref{Kost}, the first counterexample. For the second type of p-morphisms, mentioned in the Example, we have no such remedia. The whole proof is to be shown that for these particular frames we can deal somehow with p-morphims which collapse $\mathfrak F_2$ onto $\mathfrak L_2$.} We need to show that ${\mathsf L}(\mathfrak G_2)$ does not have projective approximation. Consider the substitution $\sigma\colon\{x_1\}\to \mathsf{Fm}^2$ such that $\sigma(x_1)=\neg\neg x_1\land \bigl(x_2\lor(x_2\to x_1\lor\neg x_1)\bigr).$ Any $\sigma$-model is equivalent with one of the 1-models: \begin{figure}[H] \unitlength1cm \begin{picture}(3,1.5) \thicklines \put(0,0){\circle{0.1}} \put(0.3,0){\mbox{$1$}} \put(1.5,0){\circle{0.1}} \put(1.8,0){\mbox{$0$}} \put(12,0){\vector(0,1){0.9}} \put(12,1){\circle{0.1}} \put(12,0){\circle{0.1}} \put(12.3,0){\mbox{$0$}} \put(12.3,1){\mbox{$1$}} \put(3,1){\circle{0.1}} \put(4,0){\circle{0.1}} \put(5,1){\circle{0.1}} \put(4.3,0){\mbox{$0$}} \put(3.3,1){\mbox{$0$}} \put(5.3,1){\mbox{$1$}} \put(4,0){\vector(1,1){0.9}} \put(4,0){\vector(-1,1){0.9}} \put(8,0){\vector(-1,1){0.9}} \put(8,0){\vector(1,1){0.9}} \put(9,1){\vector(0,1){0.9}} \put(9,1){\vector(-1,1){0.9}} \put(7,1){\circle{0.1}} \put(9,2){\circle{0.1}} \put(8,2){\circle{0.1}} \put(8,0){\circle{0.1}} \put(9,1){\circle{0.1}} \put(8.3,0){\mbox{$0$}} \put(6.7,1.1){\mbox{$1$}} \put(9.1,1.9){\mbox{$1$}} \put(9.4,1){\mbox{$1$}} \put(8.3,1.9){\mbox{$1$}} \put(10.5,0.5){\mbox{$\thicksim$}} \end{picture} \caption{}\label{tt} \end{figure} \noindent The above model over $\mathfrak L_2$ is not a $\sigma$-model but is equivalent with some $\sigma$-model over $\mathfrak G_2$. Suppose ${\mathsf L}(\mathfrak G_2)$ has projective approximation and let $G\colon \mathbf M^1\to \mathbf M^1$ be the retraction given by Theorem \ref{retraction}. We have $G(\circ\ 0)=\circ\ 0$ and $G(\circ\ 1)=\circ\ 1$. But if \unitlength1cm \begin{picture}(5,2.5) \thicklines \put(0,1.4){$G\bigl($} \put(0.8,2){\circle{0.1}} \put(0.8,1){\circle{0.1}} \put(1.1,1){\mbox{$0$}} \put(1.1,2){\mbox{$1$}} \put(0.8,1){\vector(0,1){0.9}} \put(1.5,1.4){$\bigr)=$} \put(2.2,2){\circle{0.1}} \put(2.2,1){\circle{0.1}} \put(2.4,1){\mbox{$0$}} \put(2.4,2){\mbox{$1$}} \put(2.2,1){\vector(0,1){0.9}} \put(2.7,1.4){, then} \put(4,1.1){$G\Bigl($} \put(6,0){\vector(-1,1){0.9}} \put(6,0){\vector(1,1){0.9}} \put(7,1){\vector(0,1){0.9}} \put(7,1){\vector(-1,1){0.9}} \put(5,1){\circle{0.1}} \put(7,2){\circle{0.1}} \put(6,2){\circle{0.1}} \put(6,0){\circle{0.1}} \put(7,1){\circle{0.1}} \put(6.3,0){\mbox{$0$}} \put(4.7,1.1){\mbox{$0$}} \put(7.1,2.1){\mbox{$1$}} \put(7.3,1){\mbox{$0$}} \put(6.3,2.1){\mbox{$1$}} \put(7.7,1.1){\mbox{$\Bigr)\ =$}} \put(10,0){\vector(-1,1){0.9}} \put(10,0){\vector(1,1){0.9}} \put(11,1){\vector(0,1){0.9}} \put(11,1){\vector(-1,1){0.9}} \put(11,1){\circle{0.1}} \put(11,2){\circle{0.1}} \put(10,2){\circle{0.1}} \put(10,0){\circle{0.1}} \put(11,1){\circle{0.1}} \put(9.5,0){\mbox{$0$}} \put(9.5,1){\mbox{$0$}} \put(11.1,2.1){\mbox{$1$}} \put(11.3,1){\mbox{$0$}} \put(10.3,2.1){\mbox{$1$}} \end{picture}\\ which cannot happen as the last model is not equivalent to any model in Figure \ref{tt}. Thus, we get a contradiction. In the same way one shows ${\mathsf L}(\mathfrak G_2)\cap{\mathsf L}(\mathfrak C_{5})$ does not have projective approximation. \end{proof} We can also show that ${\mathsf L}(\mathfrak G_1)$ (see Figure \ref{GF}) has finitary unification; it suffices only to extend the above reasoning admitting the frames $\mathfrak G_1,\mathfrak G_{3\mathfrak L_2}$ and $\mathfrak G_{3\mathfrak F_2}$ (see Figure \ref{MNU}). But a detailed proof would take much time and place (and, above all, would be boring). We think that original Ghilardi's reasoning, see \cite{Ghi5}, is conscious and elegant and, therefore, we resign from any alternative proof. \section{Unification Types.}\label{UT} \subsection{Unitary Unification.}\label{UU} By Theorem \ref{wpl}, the logic $\mathsf L(\mathfrak R_2)$ of rhombus (see figure \ref{FRF}) has unitary unification. We have shown this directly, using Theorem \ref{main}, in \cite{dkw}. We cannot have $m=n$ in this case. In other words, unification in $\mathsf L(\mathfrak R_2)$ is unitary but it is not true that every unifiable formula in $n$-variables has a mgu in $n$-variables. A unifiable $A(x_1,\dots,x_n)$ has a mgu in $m$-variables where ($m$ is defined in the proof and) $n<m$. Indeed, let $A= x_1\lor x_2\lor(\neg x_1\land\neg x_2)$ and $\varepsilon_1,\varepsilon_2,\varepsilon_3$ be the following unifiers for $A$: $$\varepsilon_1(x_1)=\top\ ,\ \varepsilon_1(x_2)= x_2\quad;\quad \varepsilon_2(x_1)= x_1\ ,\ \varepsilon_2(x_2)=\top\quad;\quad\varepsilon_3(x_1)=\varepsilon_3(x_2)=\bot.$$ Let $\varepsilon\colon\{x_1,x_2\}\to\mathsf{Fm}^2$ be a mgu for $A$ in any intermediate logic ${\mathsf L}\subseteq\mathsf L(\mathfrak R_2)$. Then for some $\alpha_i\colon\{x_1,x_2\}\to\mathsf{Fm}^2,$ where $i=1,2,3$, we have $\alpha_i\circ\varepsilon=_{\sf L}\varepsilon_i$, and hence the diagram in Figure \ref{r1} commutes (up to $\thicksim$) where $\mathbf{M}^{2}=\mathbf{M}^{2}(sm(\mathfrak R_2))=\mathbf{M}^{2}(\{\mathfrak L_1,\mathfrak L_2,\mathfrak R_2\})$: \begin{figure}[H] \unitlength1cm \begin{picture}(5,2) \thicklines \put(8,2){\vector(0,-1){1.9}} \put(8,2){\vector(-2,-1){3.9}} \put(8,0){\vector(-1,0){3.9}} \put(8,0){\circle{0.1}} \put(4,0){\circle{0.1}} \put(8,2){\circle{0.1}} \put(8.3,2){\mbox{$\mathbf{M}^{2}$}} \put(3.2,0){\mbox{$\mathbf{M}^2$}} \put(8.3,0){\mbox{$\mathbf{M}^{2}$}} \put(8.2,1){\mbox{$\alpha_i$}} \put(6.5,0.2){\mbox{$\varepsilon$}} \put(6.5,1.5){\mbox{$\varepsilon_{i}$}} \end{picture} \caption{Commutative Diagram}\label{r1} \end{figure} \noindent Consider all $2$-models over one-element frame $\mathfrak L_1$. They are $(\mathfrak L_1,00),(\mathfrak L_1,01)$, $(\mathfrak L_1,10)$ and $(\mathfrak L_1,11)$. Each of them is a $\varepsilon_i$-model for some $i$ and hence they are $\varepsilon-$models. The mapping $\varepsilon\colon\mathbf{M}^2\to\mathbf{M}^2$ must be one-to-one on $\mathfrak L_1$-models. Consider the following $2$-models over $\mathfrak L_2$ (a two-element chain):\\ \unitlength1cm \begin{picture}(0,1.1) \thicklines \put(0,0){\circle{0.1}} \put(0.3,1){\mbox{$11$}} \put(0,0){\vector(0,1){0.9}} \put(0.3,0){\mbox{$10$}} \put(0,1){\circle{0.1}} \put(1.5,0.5){\mbox{and}} \put(3.3,1){\mbox{$11$ \qquad\qquad \ Both are $\varepsilon_i$-model for some $i=1,2$ and hence }} \put(3,0){\circle{0.1}} \put(3.6,0.5){\mbox{\qquad\qquad \ \ \ they are also $\varepsilon-$models. Thus, there are such}} \put(3.3,0){\mbox{$01$ \qquad\qquad \qquad $a,b,c,d,e,f \in \{0, 1\}$ that}} \put(3,0){\vector(0,1){0.9}} \put(3,1){\circle{0.1}} \end{picture} \\ \unitlength1cm \begin{picture}(0,1.1) \thicklines \put(0.2,0.5){\mbox{$\varepsilon\Bigl($}} \put(1,0){\circle{0.1}} \put(1.3,1){\mbox{$ab$}} \put(1,0){\vector(0,1){0.9}} \put(1.3,0){\mbox{$cd$}} \put(1,1){\circle{0.1}} \put(2,0.5){\mbox{$\Bigr)\qquad=$}} \put(4,0){\circle{0.1}} \put(4.3,1){\mbox{$11$}} \put(4,0){\vector(0,1){0.9}} \put(4.3,0){\mbox{$10$}} \put(4,1){\circle{0.1}} \put(6,0.5){\mbox{and}} \put(8.2,0.5){\mbox{$\varepsilon\Bigl($}} \put(9,0){\circle{0.1}} \put(9.3,1){\mbox{$ab$}} \put(9,0){\vector(0,1){0.9}} \put(9.3,0){\mbox{$ef$}} \put(9,1){\circle{0.1}} \put(10,0.5){\mbox{$\Bigr)\qquad=$}} \put(12,0){\circle{0.1}} \put(12.3,1){\mbox{$11$}} \put(12,0){\vector(0,1){0.9}} \put(12.3,0){\mbox{$01$}} \put(12,1){\circle{0.1}} \end{picture}\\ \noindent and $ab\not=cd\not=ef\not=ab$. If we consider the following $\mathfrak R_2$-model and its $\varepsilon$-image:\\ \unitlength1cm \begin{picture}(3,2.2) \thicklines \put(2,1){$\varepsilon\Bigl($} \put(4,0){\vector(-1,1){0.9}} \put(4,0){\vector(1,1){0.9}} \put(5,1){\vector(-1,1){0.9}} \put(3,1){\circle{0.1}} \put(4,2){\circle{0.1}} \put(4,0){\circle{0.1}} \put(5,1){\circle{0.1}} \put(4.3,0){\mbox{$?$}} \put(2.5,1){\mbox{$cd$}} \put(4.3,2){\mbox{$ab$}} \put(5.3,1){\mbox{$ef$}} \put(3,1){\line(1,1){0.9}} \put(3,1){\vector(1,1){0.9}} \put(6,1){$\Bigr)$} \put(7,1){$=$} \put(10,0){\vector(-1,1){0.9}} \put(10,0){\vector(1,1){0.9}} \put(11,1){\vector(-1,1){0.9}} \put(9,1){\circle{0.1}} \put(10,2){\circle{0.1}} \put(10,0){\circle{0.1}} \put(11,1){\circle{0.1}} \put(10.3,0){\mbox{$00$}} \put(8.5,1){\mbox{$10$}} \put(10.3,2){\mbox{$11$}} \put(11.3,1){\mbox{$01$}} \put(9,1){\vector(1,1){0.9}} \end{picture}\\ \noindent we conclude there is a $\varepsilon$-model which is not a model for $A$ (whatever $?$ is). Thus, $\varepsilon$ is not a unifier for $A$, a contradiction. No unifier $\varepsilon\colon\{x_1,x_2\}\to\mathsf{Fm}^2$ for $A$ is more general any $\varepsilon_i$, for $i=1,2,3$. Clearly, using filtering unifications, we can find a unifier for $A$ which is more general than any $\varepsilon_i$, we can even find in this way a mgu for $A$. But filtering unifications require additional variables and thus, we get a mgu $\varepsilon\colon\{x_1,x_2\}\to\mathsf{Fm}^m$, where $m>2$. \begin{theorem}\label{u1} If an intermediate logic ${\mathsf L}\subseteq\mathsf L(\mathfrak R_2)$ has unitary unification, then there are {\sf L}-unifiable formulas such that all their mgu's introduce new variables. \end{theorem} We do not assume here that {\sf L} is locally tabular. Now, we can apply a particular splitting in the lattice of intermediate logics. It is known (see Rautenberg \cite{Rautenberg}) that $(\mathsf L (\mathfrak R_2),\mathsf{LC})$ is a splitting pair for all extensions of $\mathsf{KC}$. It follows that for each {\sf L} extending $\mathsf{KC}$, either $\mathsf L \supseteq \mathsf{LC}$, or $\mathsf L \subseteq \mathsf L(\mathfrak R_2)$. Recall also that, if $\mathsf{LC}\subseteq \mathsf L $, then {\sf L} is one of G\"odel logics that is {\sf L =LC} or $\mathsf{L} = \mathsf L(\mathfrak{L}_n)$, for some $n\geq 1$. Thus, we arrive to the following dichotomy. \begin{corollary}\label{dichotomy} For any intermediate logic {\sf L} with unitary unification, either {\sf L} is one of the G\"odel logics and enjoys projective unification (and all unifiable formulas have mgu's preserving their variables) or there are {\sf L}-unifiable formulas such that all their mgu's introduce new variables (that is mgu's do not preserve variables of unifiable formulas). \end{corollary} We do not deny the existence of unifiable (non-projective) formulas with mgu's preserving their variables. We claim that, if a non-projective logic has unitary unification, then there must occur unifiable formulas all their mgu's introduce new variables. \begin{corollary}\label{up1} An intermediate logic {\sf L} has projective unification if and only if {\sf L} has unitary unification and projective approximation. \end{corollary} \begin{proof} Suppose that {\sf L} has unitary unification and projective approximation. Then one notices that each unifiable formula $A$ has a one-elements projective approximation $\Pi(A)$. But it means that any unifiable $A$ has a mgu preserving its variables. By Corollary \ref{dichotomy}, we conclude {\sf L} has projective unification. \end{proof} Corollary \ref{up1} does not hold for transitive modal logics; there are unitary transitive modal logics with projective approximation that are non-projective. But for reflexive and transitive modal logics one can prove the counterpart of Corollary \ref{up1}. Other consequences of Theorem \ref{u1} are given in Section \ref{HFU}. \subsection{Infinitary Unification}\label{IU} Until now, no intermediate (nor modal) logic with infinitary unification has been found, see \cite{Ghi5,uni}. Using Theorem \ref{main} we show that \begin{theorem}\label{niu3} Any locally tabular intermediate logic does not have infinitary unification. \end{theorem} \begin{proof} Let {\sf L} be a locally tabular intermediate logic and suppose that unification is not finitary (nor unitary) in {\sf L}. Then $$\exists_{n>0} \forall_{m>0} \exists_{k>0} \exists_{\sigma\colon\{x_1,\dots,x_n\}\to \mathsf{Fm^k}} \forall_{\tau\colon\{x_1,\dots,x_n\}\to \mathsf{Fm^m}} \ (\tau(A_\sigma)\in\mathsf{L} \Rightarrow \tau\not\preccurlyeq_{\sf L}\sigma).\leqno(\star)$$ Thus, $n> 0$ is given. Let us define a sequence of integers $n=m_0<m_1<m_2\cdots$ and substitutions $\sigma_1,\sigma_2,\dots$ such that, for each $i>0$,\\ (1) $\sigma_i\colon\{x_1,\dots,x_n\}\to \mathsf{Fm}^{m_i}$ and $\tau\not\preccurlyeq_\mathsf{L}\sigma_i$ if $\tau\colon\{x_1,\dots,x_n\}\to \mathsf{Fm}^{m_{i-1}}$ and $\tau(A_{\sigma_i})\in\mathsf{L}$; \\ (2) $\sigma_i(x_{i_1})\land\cdots\land\sigma_i(x_{i_s})$ is {\sf L}--projective, for any $\{i_1,\dots,i_s\}\subseteq \{1,\dots,n\}$;\\ (3) $\sigma_i$-models are $\land$-closed: $\mathfrak{M}^n\land\mathfrak{N}^n=(W,R,w_0,\{\mathfrak{f}_w^n\land\mathfrak{g}_w^n\}_{w\in W})$ is a $\sigma_i$-model, where $(\mathfrak{f}_w^n\land\mathfrak{g}_w^n)(x_j)=\mathfrak{f}_w^n(x_j)\land\mathfrak{g}_w^n(x_j)$ for each $w\in W$ and each $j=1,\dots,n$, if $\mathfrak{M}^n=(W,R,w_0,\{\mathfrak{f}_w^n\}_{w\in W})$ and $\mathfrak{N}^n=(W,R,w_0,\{\mathfrak{g}_w^n\}_{w\in W})$ are $\sigma_i$-models. Let $\mathfrak{M}^n\leq\mathfrak{N}^n$ mean $\mathfrak{f}_w^n\leq\mathfrak{g}_w^n$ for each $w\in W$, where the order between the valuations is the product order. Then $\mathfrak{M}^n\land\mathfrak{N}^n\leq\mathfrak{M}^n$ and $\mathfrak{M}^n\land\mathfrak{N}^n\leq\mathfrak{N}^n$. Our definition is inductive with respect to $i$. Suppose that $m_{i}$ and $\sigma_{i}$ (if $i>0$) are given. Then we apply $(\star)$, where $m=m_{i}$, to get $k$ and $\sigma$ fulfilling (1). We do not take $m_{i+1}=k$ and $\sigma_{i+1}=\sigma$ as we need (2)--(3) to be fulfilled. For (2), let us define $\pi\colon\{x_1,\dots,x_n\}\to \mathsf{Fm^{k+n}}$ taking $$\pi(x_j)=(x_{k+j}\leftrightarrow\sigma(x_j)),\qquad\mbox{for each $j=1,\dots,n$}.$$ Note that $x_{k+j}$ does not occur in $\sigma(x_j)$ and hence, to show (2) for $\sigma_{i+1}=\pi$, Lemma \ref{niu1} applies. We still have (1) (for $\sigma_{i+1}=\pi$) as taking $x_{k+1}\slash\top\cdots x_{k+n}\slash \top$ we get $\pi\preccurlyeq\sigma$. But we do not take $m_{i+1}=k+n$ and $\sigma_{i+1}=\pi$ as we need (3). For (3), let $\nu_r\colon\mathsf{Var}\to\mathsf{Var}$, for any $r>0$, be given by $\nu_r(x_i)=x_{i+r}$ for each $i$. We define $m_{i+1}=s(k+n)$ and, for each $j=1,\dots,n$, let $$\sigma_{i+1}(x_j)=\pi(x_j)\land \nu_{n+k}(\pi(x_j))\land\nu_{n+k}^2(\pi(x_j))\land\dots\land \nu_{n+k}^{s-1}(\pi(x_j)), \leqno (\star\star)$$ where $s$ is (sufficiently big and is) specified below. We have $\sigma_{i+1}=\bigwedge_{j=0}^{s-1}(\nu^j_{n+k}\circ\pi)$. Using the inverse mapping $\nu^{-1}$ one shows that $\sigma_{i+1}\preccurlyeq\pi$ and hence (1) holds. For (2), we can use Lemma \ref{niu1}. Let us prove (3). Suppose there are given $(n+k)$-models $\mathfrak{M}_0^{k+n},\dots,\mathfrak{M}_{s-1}^{k+n}$ having the same frame (and root). Let $\mathfrak{M}_i^{k+n}=(W,R,w_0,\{\mathfrak{f}_{i\ w}^{k+n}\}_{w\in W})$, for $i=0,\dots,s-1$, and suppose that the valuations $\mathfrak{f}_{0\ w}^{k+n},\dots,\mathfrak{f}_{s-1\ w}^{k+n}$, for each $w\in W$, are given by binary strings (of the same length $k+n$). Take the concatenation of the strings $$\mathfrak{f}_{w}^{s(k+n)}=\mathfrak{f}_{0\ w}^{k+n}\cdots\mathfrak{f}_{s-1\ w}^{k+n}, \qquad\mbox{for each $w\in W$}.$$ Then we get an $s(k+n)$-model $\mathfrak{M}^{s(k+n)}=(W,R,w_0,\{\mathfrak{f}_{w}^{s(k+n)}\}_{w\in W})$, called {\it the concatenation } of $\mathfrak{M}_0^{k+n},\dots,\mathfrak{M}_{s-1}^{k+n}$ for which $$\sigma_{i+1}(\mathfrak{M}^{s(k+n)})=\pi(\mathfrak{M}^{k+n}_1)\land \pi(\mathfrak{M}^{k+n}_2)\land\dots\land \pi(\mathfrak{M}^{k+n}_{s-1}).$$ Obviously, each $s(k+n)$-model $\mathfrak{M}^{s(k+n)}$ can be received as a `concatenation' of its $(k+n)$-fragments, and hence each $\sigma_{i+1}$-model is a conjunction of some $\pi$-models. Since there is finitely many $n$-models ($\sigma_{i+1}$- and $\pi$-models are $n$-models), we can get as $\sigma_{i+1}$-models all conjunction of $\pi$-models if $s$ is sufficiently big. It means that $\sigma_{i+1}$-models are closed under conjunction if $s$ is big enough, see $(\star\star)$. We get (3). We have substitutions $\sigma_1,\sigma_2,\dots$ (and integers $m_0,m_1,\dots$) fulfilling (1)-(3). Let us note that these conditions are also fulfilled by any subsequence of $\sigma_1,\sigma_2,\dots$. Since $\sigma_i$-models, for each $i$, are $n$-models and there is only finitely many $n$-models, we can find a subsequence $\sigma_{i_1},\sigma_{i_2},\dots$ such that each $\sigma_{i_k}$ has the same set of models. Thus, we can assume that the sequence $\sigma_1,\sigma_2,\dots$ fulfills \\ \indent (4) $\sigma_i(\mathbf{M}^{m_i})=\sigma_j(\mathbf{M}^{m_j})$ for each $i,j\geq 1$.\\ \noindent The next (and crucial) step in our argument is to show that\\ \indent (5) for each $i>0$, there is a number $r>0$ such that $\bigwedge_{j=0}^{r-1}(\nu^j_{m_{i}}\circ\sigma_{i})\preccurlyeq\sigma_{i+1}.$\\ Let us show there is a mapping $F\colon\mathbf{M}^{m_{i+1}}\to\mathbf{M}^{rm_i}$ fulfilling (see Theorem \ref{nsigmai}): \\ (i) $\mathfrak{M}^{m_{i+1}}$ and $F(\mathfrak{M}^{m_{i+1}})$ have the same frame, for each $\mathfrak{M}^{m_{i+1}}$;\\ (ii) $F((\mathfrak{M}^{m_{i+1}})_w)\thicksim(F(\mathfrak{M}^{m_{i+1}}))_w$, \ for each $w\in W$ ($W$ is the domain of $\mathfrak{M}^{m_{i+1}}$);\\ (iii) If $\mathfrak{N}^{m_{i+1}}\thicksim\mathfrak{M}^{m_{i+1}}$, then $F(\mathfrak{N}^{m_{i+1}})\thicksim F(\mathfrak{M}^{m_{i+1}}$);\\ such that the following diagram commutes (up to $\thicksim$) \begin{figure}[H]\label{} \unitlength1cm \begin{picture}(5,2) \thicklines \put(8,2){\vector(0,-1){1.9}} \put(8,2){\vector(-2,-1){3.9}} \put(8,0){\vector(-1,0){3.9}} \put(8,0){\circle{0.1}} \put(4,0){\circle{0.1}} \put(8,2){\circle{0.1}} \put(8.3,2){\mbox{$\mathbf{M}^{m_{i+1}}$}} \put(3.2,0){\mbox{$\mathbf{M}^n$}} \put(8.3,0){\mbox{$\mathbf{M}^{rm_i}$}} \put(8.1,1){\mbox{$F$}} \put(5.3,0.2){\mbox{$\bigwedge_{j=0}^{r-1}(\nu^j_{m_{i}}\circ\sigma_{i})$}} \put(5.7,1.2){\mbox{$\sigma_{i+1}$}} \end{picture} \caption{}\label{rr} \end{figure} \noindent To specify the number $r$, let us assume that the sequence: $$\mathfrak{M}_0^{m_{i+1}},\mathfrak{M}_1^{m_{i+1}}\dots,\mathfrak{M}_{r-1}^{m_{i+1}}\leqno (\star\star\star)$$ contains all p-irreducible (!) $m_{i+1}$-models. Suppose that we have defined $F_j\colon\mathbf{M}^{m_{i+1}}\to\mathbf{M}^{m_i}$, for any $j=0,1,\dots,r-1$ fulfilling \\(i) $\mathfrak{M}^{m_{i+1}}$ and $F_j(\mathfrak{M}^{m_{i+1}})$ have the same frame, for each $\mathfrak{M}^{m_{i+1}}$;\\ (ii) $F_j((\mathfrak{M}^{m_{i+1}})_w)=(F_j(\mathfrak{M}^{m_{i+1}}))_w$, \ for each $w\in W$;\\ (iii) If $\mathfrak{N}^{m_{i+1}}\thicksim\mathfrak{M}^{m_{i+1}}$, then $F_j(\mathfrak{N}^{m_{i+1}})\thicksim F_j(\mathfrak{M}^{m_{i+1}})$\\ and let $F(\mathfrak{M}^{m_{i+1}})$ be the concatenation of the models $F_0(\mathfrak{M}^{m_{i+1}}),\dots,F_{r-1}(\mathfrak{M}^{m_{i+1}})$. We could not claim the following diagram commutes, for any $j$, \begin{figure}[H] \unitlength1cm \begin{picture}(5,2) \thicklines \put(8,2){\vector(0,-1){1.9}} \put(8,2){\vector(-2,-1){3.9}} \put(8,0){\vector(-1,0){3.9}} \put(8,0){\circle{0.1}} \put(4,0){\circle{0.1}} \put(8,2){\circle{0.1}} \put(8.3,2){\mbox{$\mathbf{M}^{m_{i+1}}$}} \put(3.2,0){\mbox{$\mathbf{M}^n$}} \put(8.3,0){\mbox{$\mathbf{M}^{m_i}$}} \put(8.1,1){\mbox{$F_j$}} \put(6.3,0.2){\mbox{$\sigma_{i}$}} \put(5.7,1.2){\mbox{$\sigma_{i+1}$}} \end{picture} \caption{} \end{figure} \noindent as it would give us $\sigma_{i+1}\preccurlyeq\sigma_i$ contradicting (1). But we have $$\sigma_i(F_k(\mathfrak{M}_k^{m_{i+1}}))=\sigma_{i+1}(\mathfrak{M}_k^{m_{i+1}})\quad \mbox{and} \quad \sigma_i(F_j(\mathfrak{M}_k^{m_{i+1}}))\geq \sigma_{i+1}(\mathfrak{M}_k^{m_{i+1}}),\leqno(\mbox{iv})$$ for each $k,j\in\{0,\dots,r-1\}$. It means the diagram in the Figure \ref{rr} commutes as $$\bigwedge_{j=0}^{r-1}\nu^j_{m_{i}}\circ\sigma_{i}(F(\mathfrak{M}_k^{m_{i+1}}))= \bigwedge_{j=0}^{r-1}\sigma_i(F_j(\mathfrak{M}_k^{m_{i+1}}))=\sigma_i(F_k(\mathfrak{M}_k^{m_{i+1}}))= \sigma_{i+1}(\mathfrak{M}_k^{m_{i+1}});$$ and we know each $\mathfrak{M}^{m_{i+1}}\in \mathbf{M^{m_{i+1}}}$ is equivalent with some $\mathfrak{M}_k^{m_{i+1}}$, so by (iii) we get $$\bigwedge_{j=0}^{r-1}\nu^j_{m_{i}}\circ\sigma_{i}(F(\mathfrak{M}^{m_{i+1}}))\thicksim \sigma_{i+1}(\mathfrak{M}^{m_{i+1}}).$$ We could show (i)-(iii) for $F$ using the fact these conditions are fulfilled by each $F_j$. But such an argument would be too complicated and we could make it easier by the exact definition of a substitution $\alpha\colon\{x_1,\dots,x_{rm_i}\}\to \mathsf{Fm^{m_{i+1}}}$ such that $H_\alpha\thicksim F$. Then (i)-(iii) follow from Lemma \ref{nsigmai}. For any $F_j$ (where $j=0,\dots,r-1$), we should have $F_j\thicksim H_{\alpha_j}$ for some $\alpha_j\colon\{x_1,\dots,x_{m_i}\}\to \mathsf{Fm^{m_{i+1}}}$. Then we define $\alpha$ as a disjoint union of the $\alpha_j$'s. More specifically: $$\alpha(x_{l+jm_i})=\alpha_j(x_l), \quad\mbox{for each $l=1,\dots,m_i$}.$$ Obviously, $\alpha(\mathfrak{M}^{m_{i+1}})$ is the concatenation of $\alpha_0(\mathfrak{M}^{m_{i+1}}),\dots,\alpha_{r-1}(\mathfrak{M}^{m_{i+1}})$ for each $m_{i+1}$-model $\mathfrak{M}^{m_{i+1}}$. There remains to define $F_j$, for $j=0,\dots,r-1$. Let us define any $F_j\colon\mathbf{M}^{m_{i+1}}\to\mathbf{M}^{m_i}$ as a partial mapping; its domain $D(F_j)$ is an up-ward closed subset of $\mathbf{M}^{m_{i+1}}$, which means $$\mbox{if} \quad \mathsf{Th}(\mathfrak{N}^{m_{i+1}}) \subseteq\mathsf{Th}(\mathfrak{M}^{m_{i+1}})\quad \mbox{and } \quad \mathfrak{N}^{m_{i+1}}\in D(F_j),\quad \mbox{then }\quad \mathfrak{M}^{m_{i+1}}\in D(F_j).$$ Obviously, $F_j$ should also fulfill (i)-(iv), for models in $D(F_j)$. Then, step by step, we extend the domain $D(F_j)$ to the whole set $\mathbf{M}^{m_{i+1}}$; our definition of $F_j(\mathfrak{M}^{m_{i+1}})$ is inductive with respect to depth of $\mathfrak{M}^{m_{i+1}}$. (A). By (4), $\sigma_{i+1}(\mathfrak{M}_j^{m_{i+1}})=\sigma_{i}(\mathfrak{M}^{m_{i}})$, for some $\mathfrak{M}^{m_{i}}\in\mathbf{M}^{m_{i}}$. Thus, we take $$F_j(\mathfrak{M}_j^{m_{i+1}})=\mathfrak{M}^{m_{i}}$$ which gives $\sigma_i(F_j(\mathfrak{M}_j^{m_{i+1}}))=\sigma_{i+1}(\mathfrak{M}_j^{m_{i+1}})$ and this guarantee the (first part of the) condition (iv). The models $\sigma_{i+1}(\mathfrak{M}_j^{m_{i+1}})$, $\sigma_{i}(\mathfrak{M}^{m_{i}})$, $\mathfrak{M}_j^{m_{i+1}}$, $\mathfrak{M}^{m_{i}}$ have the same frame $(W,R,w_0)$ and only valuations could be different. To get (ii) we should take $$F_j((\mathfrak{M}_j^{m_{i+1}})_w)=(\mathfrak{M}^{m_{i}})_w, \quad \mbox{for each } w\in W.$$ According to Theorem \ref{pM6}, $(\mathfrak{M}^{m_{i+1}})_w$, for each $w\in W$, is p-irreducible and hence for each $\mathfrak{N}^{m_{i+1}}$ equivalent with $(\mathfrak{M}_j^{m_{i+1}})_w$ there is exactly one p-morphism (see Theorem \ref{lf3i}) $p\colon\mathfrak{N}^{m_{i+1}}\to(\mathfrak{M}_j^{m_{i+1}})_w$. To satisfy (iii), we should take $$F_j(\mathfrak{N}^{m_{i+1}})=p^{-1}((\mathfrak{M}^{m_{i}})_w)$$ where $p^{-1}((\mathfrak{M}^{m_{i}})_w)$ is the only $n$-model on $(W,R,w_0)$ such that the mapping $p$ is a p-morphism $p\colon p^{-1}((\mathfrak{M}^{m_{i}})_w)\to (\mathfrak{M}^{m_{i}})_w$ of $n$-models. Since the valuations are preserved by p-morphisms, $\sigma_i(F_j(\mathfrak{N}^{m_{i+1}}))=\sigma_{i+1}(\mathfrak{N}^{m_{i+1}})$ which guarantees the second part of (iv) if $\mathfrak{N}^{m_{i+1}}=\mathfrak{M}_k^{m_{i+1}}$, for some $k$. Our definition of the (partial) mapping $F_j$ is completed; its domain is an upset. (B). We have no problems to define $F_j(\mathfrak{M}^{m_{i+1}})$ for any $m_{i+1}$-model $\mathfrak{M}^{m_{i+1}}$ over one-element frame (assuming the model does not belong to $D(F_j)$ by (A)). By (4), it suffices to define $F_j(\mathfrak{M}^{m_{i+1}})$ in a such way that $\sigma_i(F_k(\mathfrak{M}^{m_{i+1}}))=\sigma_{i+1}(\mathfrak{M}^{m_{i+1}})$ for each $m_{i+1}$-model over one-element frame. Each $\mathfrak{N}^{m_{i+1}}=(W,R,w_0,\{\mathfrak{f}_w^{m_{i+1}}\}_{w\in W})$ equivalent with a model with one-element frame has $\mathfrak{f}_w^{m_{i+1}}=\mathfrak{f}_u^{m_{i+1}}$, for each $u,w\in W$, and hence we can define $F_j(\mathfrak{N}^{m_{i+1}})$ preserving $\sigma_i(F_k(\mathfrak{N}^{m_{i+1}}))=\sigma_{i+1}(\mathfrak{N}^{m_{i+1}})$. The conditions (i)-(iv) are fulfilled and $D(F_j)$ is an upset by Theorem \ref{pat}. (C). If all $\mathfrak{M}_k^{m_{i+1}}$'s belong to $D(F_j)$ we have done. Suppose that some $\mathfrak{M}_k^{m_{i+1}}$ does not belong to $D(F_j)$. We can assume that $\mathfrak{M}_k^{m_{i+1}}=(W,R,w_0,\{\mathfrak{f}_w^{m_{i+1}}\}_{w\in W})$ and $(\mathfrak{M}_k^{m_{i+1}})_w\in D(F_j)$ for each $w\not=w_0$. Thus, we have $F_j((\mathfrak{M}_k^{m_{i+1}})_w)$ for each $w\not=w_0$ and we need to define $F_j(\mathfrak{M}_k^{m_{i+1}})$. In other words, an ${m_{i}}$-model $\mathfrak{M}^{m_{i}}=(W,R,w_0,\{\mathfrak{g}_w^{m_{i}}\}_{w\in W})$ is given such that $(\mathfrak{M}^{m_{i}})_w=F_j((\mathfrak{M}_k^{m_{i+1}})_w)$ for each $w\not=w_0$, what is $\mathfrak{g}_{w_0}^{m_{i}}$ does not matter, we need its variant $\mathfrak{M}_0^{m_{i}}=(W,R,w_0,\{\mathfrak{f}_w^{m_{i}}\}_{w\in W})$ such that $\sigma_i(\mathfrak{M}_0^{m_{i}})\geq \sigma_{i+1}(\mathfrak{M}_k^{m_{i+1}})$; then we can take $F_j(\mathfrak{M}_k^{m_{i+1}})=\mathfrak{M}_0^{m_{i}}$ fulfilling all requirements (except for (iii)). Let $x\in\{x_1,\dots,x_{n}\}$. We have $$ \mathfrak{M}_k^{m_{i+1}}\Vdash_w\sigma_{i+1}(x)\quad\Rightarrow\quad \mathfrak{M}^{m_{i}}\Vdash_w\sigma_i(x), \mbox{for each }\ w\not=w_0$$ and want a variant $\mathfrak{M}_0^{m_{i}}$ of $\mathfrak{M}^{m_{i}}$ such that: $$ \mathfrak{M}_k^{m_{i+1}}\Vdash_{w_0}\sigma_{i+1}(x)\quad\Rightarrow\quad \mathfrak{M}_0^{m_{i}}\Vdash_{w_0}\sigma_i(x).$$ If $\mathfrak{M}_k^{m_{i+1}})\not\Vdash_w\sigma_{i+1}(x)$ for some $w\not=w_0$ the implication holds. Thus, it suffices to consider the set $\{j_1,\dots,j_s\}\subseteq \{1,\dots,n\}$ containing all $x$'s such that $$\mathfrak{M}^{m_{i}}\Vdash_{w}\sigma_i(x), \mbox{ for each} \ w\not=w_0. $$ By (2), $\sigma_i(x_{j_1})\land\cdots\land\sigma_i(x_{j_s})$ is {\sf L}--projective and hence, by Theorem \ref{niu2}, there is a variant $\mathfrak{M}_0^{m_{i}}$ of $\mathfrak{M}^{m_{i}}$ such that $$\mathfrak{M}_0^{m_{i}}\Vdash_{w_0}\sigma_i(x_{j_1})\land\cdots\land\sigma_i(x_{j_s}).$$ The definition of $F_j(\mathfrak{M}_k^{m_{i+1}})$ is completed. There remains to add that all $m_{i+1}$-models $\mathfrak{N}^{m_{i+1}}$ with $\mathsf{Th}(\mathfrak{N}^{m_{i+1}}) \subset\mathsf{Th}(\mathfrak{M}_k^{m_{i+1}})$ are included in $D(F_j)$ by Lemma \ref{pat}. If $ \mathfrak{N}^{m_{i+1}}\thicksim\mathfrak{M}_k^{m_{i+1}}$, then there is a p-morphism $p\colon\mathfrak{N}^{m_{i+1}}\to\mathfrak{M}_k^{m_{i+1}}$ and hence we can take $F_j(\mathfrak{N}^{m_{i+1}})=p^{-1}(\mathfrak{M}_0^{m_{i}})$. So, we claim $D(F_j)$ remains an upset.\\ We have shown (5) and can get to the proof of our theorem. Suppose that {\sf L} has infinitary unification and let $A=A_{\sigma_1}$. It follows from (4) that $A=A_{\sigma_i}$, for each $i$, and hence all $\sigma_i$'s are unifiers for $A$. By (1), $A$ cannot have finitary unification in $\mathsf L$ as there is no unifier of $A$ which would be more general then all $\sigma_i$'s. Let $\Sigma$ be a minimal complete set of unifiers for $A$. Then $\tau\preccurlyeq\sigma_1$ for some $\tau\colon\{x_1,\dots,x_n\}\to \mathsf{Fm^m}$ in $\Sigma$. Thus, there is a number $i$ such that $\tau\preccurlyeq\sigma_i$ and $\tau\not\preccurlyeq\sigma_{i+1}$. By $\tau\preccurlyeq\sigma_i$, we also get $\nu^j_{m}\circ\tau\preccurlyeq\nu^k_{m_{i}}\circ\sigma_i$, for any $j,k\geq 0$; it does not matter if $m=m_i$ or not. Thus, $$\bigwedge_{j=0}^{r-1}(\nu^j_{m}\circ\tau)\preccurlyeq\bigwedge_{j=0}^{r-1}(\nu^j_{m_{i}}\circ\sigma_{i})$$ But $\bigwedge_{j=0}^{r-1}(\nu^j_{m}\circ\tau)\preccurlyeq\tau$ and hence $\tau\preccurlyeq\bigwedge_{j=0}^{r-1}(\nu^j_{m}\circ\tau)$ as $\bigwedge_{j=0}^{r-1}(\nu^j_{m}\circ\tau)$ is a unifier for $A$, by (3), and $\tau\in\Sigma$. Thus, we get $\tau\preccurlyeq\sigma_{i+1}$, by (5), which is a contradiction. \end{proof} Let {\sf L} be a locally tabular intermediate logic and suppose we have shown, using Theorem \ref{main} or \ref{main2}, that unification in {\sf L} is not finitary (nor unitary). It would mean, by the above Theorem \ref{niu3}, that {\sf L} has nullary unification. In the following Section, we prove in this way that very many intermediate logics has nullary unification. \subsection{Nullary Unification}\label{NUni} It is known that $\mathsf L({\mathfrak G_3})$ and $\mathsf L({\mathfrak G_3}+)$ (see Figure \ref{GF}) have nullary unification, see {\it Introduction}. In \cite{dkw}, we proved that unification in the modal version of these logics is nullary. Below we present the intuitionistic version of our argument \begin{theorem}\label{F6m} The logics $\mathsf L({\mathfrak G_3})$ and $\mathsf L({\mathfrak G_3}+)$ have nullary unification.\end{theorem} \begin{proof} Let $\mathbf{F}=\{\mathfrak L_1,\mathfrak L_2,\mathfrak L_3,\mathfrak F_{2},\mathfrak G_{3}\}$. Then $\mathbf{F}=sm(\mathfrak G_{3})$. Assume ${\mathsf L}(\mathbf F)$ has finitary unification. By Theorem \ref{main}, for every $n\geq 1$ there is a number $m\geq 1$ such that for any $\sigma\colon\{x_1\}\to \mathsf{Fm}^k$ there are mappings $G:\mathbf{M}^m\to\mathbf{M}^1$ and $F:\mathbf{M}^k\to\mathbf{M}^m$ fulfilling the conditions (i)-(v). Let $n=1\leq m<k$ and $\sigma\colon\{x_1\}\to\mathsf{Fm}^k$ be as follows $$\sigma(x_1)=\neg\neg (\bigvee_{i=1}^k x_i)\land\bigwedge_{i=1}^k(\neg\neg x_i\lor\neg x_i).$$ \unitlength1cm \begin{picture}(3,2) \thicklines \put(0,1){Take any $\mathfrak{M}^k$ over $\mathfrak G_3$:} \put(8,1){and notice $\sigma(\mathfrak{M}^k)$ is:} \put(6,0){\vector(-1,1){0.9}} \put(6,0){\vector(1,1){0.9}} \put(7,1){\vector(0,1){0.9}} \put(5,1){\circle{0.1}} \put(7,2){\circle{0.1}} \put(6,0){\circle{0.1}} \put(7,1){\circle{0.1}} \put(6.3,0){\mbox{$\mathfrak{f}_2^k$}} \put(4.5,1){\mbox{$\mathfrak{f}_{0'}^k$}} \put(7.3,2){\mbox{$\mathfrak{f}_0^k$}} \put(7.3,1){\mbox{$\mathfrak{f}_1^k$}} \end{picture} \unitlength1cm \begin{picture}(3,2) \thicklines \put(1,1.5){a.} \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(1,1){0.9}} \put(2,1){\vector(0,1){0.9}} \put(0,1){\circle{0.1}} \put(2,2){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(1.3,0){\mbox{$0$}} \put(0.2,1){\mbox{$0$}} \put(1.7,2){\mbox{$0$}} \put(1.7,1){\mbox{$0$}} \put(3.5,1.5){b.} \put(3.5,0){\vector(-1,1){0.9}} \put(3.5,0){\vector(1,1){0.9}} \put(4.5,1){\vector(0,1){0.9}} \put(2.5,1){\circle{0.1}} \put(4.5,2){\circle{0.1}} \put(3.5,0){\circle{0.1}} \put(4.5,1){\circle{0.1}} \put(3.8,0){\mbox{$0$}} \put(4.2,1){\mbox{$1$}} \put(4.2,2){\mbox{$1$}} \put(2.7,1){\mbox{$0$}} \put(6,1.5){c.} \put(6,0){\vector(-1,1){0.9}} \put(6,0){\vector(1,1){0.9}} \put(7,1){\vector(0,1){0.9}} \put(5,1){\circle{0.1}} \put(7,2){\circle{0.1}} \put(6,0){\circle{0.1}} \put(7,1){\circle{0.1}} \put(6.3,0){\mbox{$0$}} \put(5.3,1){\mbox{$1$}} \put(6.7,2){\mbox{$0$}} \put(6.7,1){\mbox{$0$}} \put(8.5,1.5){d.} \put(8.5,0){\vector(-1,1){0.9}} \put(8.5,0){\vector(1,1){0.9}} \put(9.5,1){\vector(0,1){0.9}} \put(7.5,1){\circle{0.1}} \put(9.5,2){\circle{0.1}} \put(8.5,0){\circle{0.1}} \put(9.5,1){\circle{0.1}} \put(8.8,0){\mbox{$1$}} \put(7.8,1){\mbox{$1$}} \put(9.2,2){\mbox{$1$}} \put(9.2,1){\mbox{$1$}} \put(11,1.5){e.} \put(11,0){\vector(-1,1){0.9}} \put(11,0){\vector(1,1){0.9}} \put(12,1){\vector(0,1){0.9}} \put(10,1){\circle{0.1}} \put(12,2){\circle{0.1}} \put(11,0){\circle{0.1}} \put(12,1){\circle{0.1}} \put(11.3,0){\mbox{$0$}} \put(10.3,1){\mbox{$1$}} \put(11.7,2){\mbox{$1$}} \put(11.7,1){\mbox{$1$}} \end{picture} \noindent a. if $\mathfrak{f}_0^k=0\cdots0=\mathfrak{f}_{0'}^k$;\qquad b. if $\mathfrak{f}_{0}^k\not=0\cdots0=\mathfrak{f}_{0'}^k$;\qquad c. if $\mathfrak{f}_{0}^k=0\cdots0\not=\mathfrak{f}_{0'}^k$;\\ d. if $\mathfrak{f}_{0'}^k=\mathfrak{f}_0^k\not=0\cdots0$;\qquad e. if $0\cdots0\not=\mathfrak{f}_{0}^k\not=\mathfrak{f}_{0'}^k\not=0\cdots0$.\\ \noindent Thus, each $\sigma$-model is equivalent with a model of the depth $\leq 2$ and there are four p-irreducible $1$-models equivalent with some $\sigma$-models. By (v) $$G(F(\circ \ \ 0\cdots0))=G(\circ \ \mathfrak{g}^m)) =\sigma(\circ\ \ 0\cdots0)=\circ\ 0,$$ for some $\mathfrak{g}^m$. Since $m<k$, one can find $\mathfrak{f}^k\not=\mathfrak{g}^k$ such that $F(\circ\ \mathfrak{f}^k)=\circ \ \mathfrak{f}^m=F(\circ\ \mathfrak{g}^k)$, and $G(\circ \ \mathfrak{f}^m)=\circ \ 1$, for some $\mathfrak{f}^m$. By the characterization of all $\sigma$-models\\ \begin{center} $G\Bigl($ \begin{minipage}[c][10mm][b]{10mm} \begin{picture}(3,2) \thicklines \linethickness{0.3mm} \put(0.2,0){\circle{0.1}} \put(0.5,0){\mbox{$?$}} \put(0.2,1){\circle{0.1}} \put(0.5,1){\mbox{$\mathfrak{f}^m$}} \put(0.2,0){\vector(0,1){0.9}} \end{picture} \end{minipage} $\Bigr) \quad = \quad$ \begin{minipage}[c][10mm][b]{10mm} \begin{picture}(3,2) \thicklines \linethickness{0.3mm} \put(0.2,0){\circle{0.1}} \put(0.5,0){\mbox{$0$}} \put(0.2,1){\circle{0.1}} \put(0.5,1){\mbox{$1$}} \put(0.2,0){\vector(0,1){0.9}} \end{picture} \end{minipage} \quad or \quad $G\Bigl($ \begin{minipage}[c][10mm][b]{10mm} \begin{picture}(3,2) \thicklines \linethickness{0.3mm} \put(0.2,0){\circle{0.1}} \put(0.5,0){\mbox{$?$}} \put(0.2,1){\circle{0.1}} \put(0.5,1){\mbox{$\mathfrak{f}^m$}} \put(0.2,0){\vector(0,1){0.9}} \end{picture} \end{minipage} $\Bigr) \quad = \quad$ \begin{minipage}[c][10mm][b]{10mm} \begin{picture}(3,2) \thicklines \linethickness{0.3mm} \put(0.2,0){\circle{0.1}} \put(0.5,0){\mbox{$1$}} \put(0.2,1){\circle{0.1}} \put(0.5,1){\mbox{$1$}} \put(0.2,0){\vector(0,1){0.9}} \end{picture} \end{minipage} \end{center}$ $\\ \noindent for any $m$-valuation $?$. But if the first had happened, we would have \\ \begin{center} $G\Bigl($ \begin{minipage}[c][20mm][b]{25mm} \begin{picture}(3,2) \thicklines \linethickness{0.3mm} \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(1,1){0.9}} \put(2,1){\vector(0,1){0.9}} \put(0,1){\circle{0.1}} \put(2,2){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(1.3,0){$0\cdots0$} \put(.5,1){\mbox{$\mathfrak{g}^m$}} \put(2.3,2){\mbox{$\mathfrak{f}^m$}} \put(2.3,1){?} \end{picture} \end{minipage} $\Bigr) \quad = \quad$ \begin{minipage}[c][20mm][b]{25mm} \begin{picture}(3,2) \thicklines \linethickness{0.3mm} \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(1,1){0.9}} \put(2,1){\vector(0,1){0.9}} \put(0,1){\circle{0.1}} \put(2,2){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(1.3,0){$0$} \put(.5,1){\mbox{$0$}} \put(2.3,2){\mbox{$1$}} \put(2.3,1){$0$} \end{picture} \end{minipage} \end{center}$ $\\ \noindent which would contradict (iv) as no $\sigma$-model is equivalent to the $1$-model on the right hand side of the above equation. We conclude that \\ \begin{center} $G\Bigl($ \begin{minipage}[c][10mm][b]{10mm} \begin{picture}(3,2) \thicklines \linethickness{0.3mm} \put(0.2,0){\circle{0.1}} \put(0.5,0){\mbox{$?$}} \put(0.2,1){\circle{0.1}} \put(0.5,1){\mbox{$\mathfrak{f}^m$}} \put(0.2,0){\vector(0,1){0.9}} \end{picture} \end{minipage} $\Bigr) \quad = \quad$ \begin{minipage}[c][10mm][b]{10mm} \begin{picture}(3,2) \thicklines \linethickness{0.3mm} \put(0.2,0){\circle{0.1}} \put(0.5,0){\mbox{$1$}} \put(0.2,1){\circle{0.1}} \put(0.5,1){\mbox{$1$}} \put(0.2,0){\vector(0,1){0.9}} \end{picture} \end{minipage} $\quad \thicksim \quad \circ \ 1$ \end{center}$ $\\ and hence \begin{center} $G\Bigl( F\Bigl($ \begin{minipage}[c][20mm][b]{27mm} \begin{picture}(3,2) \thicklines \linethickness{0.3mm} \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(1,1){0.9}} \put(2,1){\vector(0,1){0.9}} \put(0,1){\circle{0.1}} \put(2,2){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(1.3,0){$0\cdots0$} \put(.5,1){\mbox{$\mathfrak{f}^k$}} \put(2.3,2){\mbox{$\mathfrak{g}^k$}} \put(2.3,1){$\mathfrak{g}^k$} \end{picture} \end{minipage} $\Bigr) \Bigr) \quad = \quad G\Bigl($ \begin{minipage}[c][20mm][b]{27mm} \begin{picture}(3,2) \thicklines \linethickness{0.3mm} \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(1,1){0.9}} \put(2,1){\vector(0,1){0.9}} \put(0,1){\circle{0.1}} \put(2,2){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(1.3,0){?} \put(.5,1){\mbox{$\mathfrak{f}^m$}} \put(2.3,2){\mbox{$\mathfrak{f}^m$}} \put(2.3,1){$\mathfrak{f}^m$} \end{picture} \end{minipage} $\Bigr)\thicksim \circ \ 1$ \end{center}$ $\\ But this is in contradiction with e. \\ \begin{center} $\sigma\Bigl($ \begin{minipage}[c][20mm][b]{27mm} \begin{picture}(3,2) \thicklines \linethickness{0.3mm} \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(1,1){0.9}} \put(2,1){\vector(0,1){0.9}} \put(0,1){\circle{0.1}} \put(2,2){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(1.3,0){$0\cdots0$} \put(.5,1){\mbox{$\mathfrak{f}^k$}} \put(2.3,2){\mbox{$\mathfrak{g}^k$}} \put(2.3,1){$\mathfrak{g}^k$} \end{picture} \end{minipage} $\Bigr) \quad = \quad $ \begin{minipage}[c][20mm][b]{27mm} \begin{picture}(3,2) \thicklines \linethickness{0.3mm} \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(1,1){0.9}} \put(2,1){\vector(0,1){0.9}} \put(0,1){\circle{0.1}} \put(2,2){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(1.3,0){$0$} \put(.5,1){\mbox{$1$}} \put(2.3,2){\mbox{$1$}} \put(2.3,1){$1$} \end{picture} \end{minipage} \end{center} Let $\mathbf{F}=\{\mathfrak L_1,\mathfrak L_2,\mathfrak L_3,\mathfrak L_{4},\mathfrak R_{2},\mathfrak G_{3}+\}$. Then $\mathbf{F}=sm(\mathfrak G_{3}+)$. Suppose that ${\mathsf L}(\mathbf F)$ has unitary unification. Take $n=2$. By Theorem \ref{main}, there is a number $m\geq 1$ such that for any $\sigma\colon\{x_1,x_2\}\to \mathsf{Fm}^{k+1}$ there are mappings $G:\mathbf{M}^m\to\mathbf{M}^2$ and $F:\mathbf{M}^{k+1}\to\mathbf{M}^m$ fulfilling the conditions (i)-(v). Let $k>m$ and $$ \begin{array}{rl} \sigma(x_1)=& x_1\\ \sigma(x_2)= & \Bigl(\bigl(( \bigvee_{i=2}^{k+1}x_i)\to x_1\bigr)\to x_1\Bigr) \ \land \ \bigwedge_{i=2}^{k+1}\Bigl(\bigl((x_i\to x_1)\to x_1\bigr)\lor (x_i\to x_1)\Bigr). \end{array} $$ If we take $\alpha\circ\sigma$, where $\alpha:x_1\slash\bot$, we get $\sigma$ as used for $\mathfrak G_{3}$ (there is only a shift of variables from $x_1\dots x_k$ to $x_2\dots x_{k+1}$). If $x_1$ is false at the top element of any $2$-model $\mathfrak M^2$ over $\mathfrak G_{3}+$, then $ \sigma(x_2)$ is true at the model and hence $\sigma(\mathfrak M^2)$ reduces to a model over $\mathfrak L_1$. Decapitation of the top element (and other elements at which $x_1$ is true) give us models over $\mathfrak G_{3}$. Then we can argue as in the case of $\mathsf L(\mathfrak{G}_3).$ We have relatively many $\sigma$-models of the depth $\leq 2$ but there are only two p-irreducible equivalents of $\sigma$-models of the depth $\geq 3$. They are \begin{figure}[H] \unitlength1cm \begin{picture}(3,2) \thicklines \put(2,1){\vector(-1,1){0.9}} \put(0,1){\vector(1,1){0.9}} \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(1,1){0.9}} \put(0,1){\circle{0.1}} \put(1,2){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(1.3,0){\mbox{$00$}} \put(0.3,1){\mbox{$00$}} \put(1.2,2){\mbox{$11$}} \put(2.3,1){\mbox{$01$}} \put(3.5,0){\vector(0,1){0.9}} \put(3.5,0){\circle{0.1}} \put(3.5,1){\vector(0,1){0.9}} \put(3.5,1){\circle{0.1}} \put(3.5,2){\circle{0.1}} \put(3.8,0){\mbox{$00$}} \put(3.8,1){\mbox{$01$}} \put(3.8,2){\mbox{$11$}} \put(6,2){Cutting off the top element and erasing the} \put(6,1.5){first variable we get the $\sigma$-models for $\mathsf L(\mathfrak{G}_3):$} \put(7,0){\vector(-1,1){0.9}} \put(7,0){\vector(1,1){0.9}} \put(6,1){\circle{0.1}} \put(7,0){\circle{0.1}} \put(8,1){\circle{0.1}} \put(7.3,0){\mbox{$0$}} \put(6.3,1){\mbox{$0$}} \put(8.3,1){\mbox{$1$}} \put(9,0){\vector(0,1){0.9}} \put(9,0){\circle{0.1}} \put(9,1){\circle{0.1}} \put(9.2,0){\mbox{$0$}} \put(9.2,1){\mbox{$1$}} \end{picture} \end{figure} \noindent Let $F(\circ\ 1\cdots1)=\circ \ \mathfrak h^m$. Since $\sigma(\circ\ 1\cdots1)=\circ \ 11$, we have $G(\circ\ 1\cdots1)=\circ \ 11$, by (v). We also have $\mathfrak g^m,\mathfrak f^m$ and $\mathfrak g^k\not=\mathfrak f^k$ such that \begin{figure}[H] \unitlength1cm \begin{picture}(3,1) \thicklines \put(0,0.5){\mbox{$F\bigl($}} \put(0.6,0){\vector(0,1){0.9}} \put(0.6,0){\circle{0.1}} \put(0.6,1){\circle{0.1}} \put(0.8,0){\mbox{$0\cdots0$}} \put(0.8,1){\mbox{$1\cdots1$}} \put(1.8,0.5){\mbox{$\bigr) \ =$}} \put(3,0){\vector(0,1){0.9}} \put(3,0){\circle{0.1}} \put(3,1){\circle{0.1}} \put(3.2,0){\mbox{$\mathfrak g^m$}} \put(3.2,1){\mbox{$\mathfrak h^m$}} \put(4,0.5){,} \put(5,0.5){\mbox{$F\bigl($}} \put(6,0){\vector(0,1){0.9}} \put(6,0){\circle{0.1}} \put(6,1){\circle{0.1}} \put(6.2,1){\mbox{$1\cdots1$}} \put(6.2,0){\mbox{$0\mathfrak f^k$}} \put(7.3,0.5){\mbox{$\bigr) \ =$}} \put(8.2,0.5){\mbox{$F\bigl($}} \put(9,0){\vector(0,1){0.9}} \put(9,0){\circle{0.1}} \put(9,1){\circle{0.1}} \put(9.2,0){\mbox{$0\mathfrak g^k$}} \put(9.2,1){\mbox{$1\cdots1$}} \put(10.3,0.5){\mbox{$\bigr) \ =$}} \put(11.5,0){\vector(0,1){0.9}} \put(11.5,0){\circle{0.1}} \put(11.5,1){\circle{0.1}} \put(11.7,0){\mbox{$\mathfrak f^m$}} \put(11.7,1){\mbox{$\mathfrak h^m$}} \end{picture} \end{figure} \begin{figure}[H] \unitlength1cm \begin{picture}(3,1) \thicklines \put(0,1){Then by (v)} \put(3.8,0.5){\mbox{$\bigr) \ =$}} \put(3,0){\vector(0,1){0.9}} \put(3,0){\circle{0.1}} \put(3,1){\circle{0.1}} \put(3.2,0){\mbox{$\mathfrak g^m$}} \put(3.2,1){\mbox{$\mathfrak h^m$}} \put(2,0.5){ $G\bigl($} \put(5,0){\vector(0,1){0.9}} \put(5,0){\circle{0.1}} \put(5,1){\circle{0.1}} \put(5.2,1){\mbox{$11$}} \put(5.2,0){\mbox{$00$}} \put(7,0.5){,} \put(8.2,0.5){\mbox{$G\bigl($}} \put(9,0){\vector(0,1){0.9}} \put(9,0){\circle{0.1}} \put(9,1){\circle{0.1}} \put(9.2,0){\mbox{$0\mathfrak f^m$}} \put(9.2,1){\mbox{$\mathfrak h^m$}} \put(10.3,0.5){\mbox{$\bigr) \ =$}} \put(11.5,0){\vector(0,1){0.9}} \put(11.5,0){\circle{0.1}} \put(11.5,1){\circle{0.1}} \put(11.7,0){\mbox{$01$}} \put(11.7,1){\mbox{$11$}} \end{picture} \end{figure} \noindent By (i)--(ii), for any $m$-valuation $?$, we have\\ \begin{figure}[H] \unitlength1cm \begin{picture}(3,2) \thicklines \put(1.2,0){\circle{0.1}} \put(1.5,0){\mbox{$?$}} \put(1.2,1){\circle{0.1}} \put(1.2,2){\circle{0.1}} \put(1.5,1){\mbox{$\mathfrak{f}^m$}} \put(1.2,0){\vector(0,1){0.9}} \put(1.2,1){\vector(0,1){0.9}} \put(1.5,2){\mbox{$\mathfrak{h}^m$}} \put(2,1){$\Bigr) \ = $} \put(0,1){$G\Bigl($} \put(3.2,0){\circle{0.1}} \put(3.5,0){\mbox{$00$}} \put(3.2,1){\circle{0.1}} \put(3.5,1){\mbox{$01$}} \put(3.2,0){\vector(0,1){0.9}} \put(3.2,2){\circle{0.1}} \put(3.5,2){\mbox{$11$}} \put(3.2,1){\vector(0,1){0.9}} \put(5,1){\quad or \quad} \put(7,1){$G\Bigl($} \put(8.2,0){\circle{0.1}} \put(8.5,0){\mbox{$?$}} \put(8.2,1){\circle{0.1}} \put(8.2,2){\circle{0.1}} \put(8.5,1){\mbox{$\mathfrak{f}^m$}} \put(8.2,0){\vector(0,1){0.9}} \put(8.2,1){\vector(0,1){0.9}} \put(8.5,2){\mbox{$\mathfrak{h}^m$}} \put(9,1){$\Bigr) \ = $} \put(10.2,0){\circle{0.1}} \put(10.5,0){\mbox{$01$}} \put(10.2,1){\circle{0.1}} \put(10.5,1){\mbox{$01$}} \put(10.2,0){\vector(0,1){0.9}} \put(10.2,2){\circle{0.1}} \put(10.5,2){\mbox{$11$}} \put(10.2,1){\vector(0,1){0.9}} \end{picture} \end{figure} \begin{figure}[H] \unitlength1cm \begin{picture}(3,2.4) \thicklines \put(0,1){Thus, either \qquad $G\Bigl(\quad $} \put(5,0){\vector(-1,1){0.9}} \put(5,0){\vector(1,1){0.9}} \put(6,1){\vector(0,1){0.9}} \put(4,1){\vector(1,2){0.9}} \put(6,2){\vector(-1,1){0.9}} \put(4,1){\circle{0.1}} \put(6,2){\circle{0.1}} \put(5,3){\circle{0.1}} \put(5,0){\circle{0.1}} \put(6,1){\circle{0.1}} \put(5.3,0){$0\cdots0$} \put(5.3,3){$\mathfrak h^m$} \put(4.5,1){\mbox{$\mathfrak{g}^m$}} \put(6.3,2){\mbox{$\mathfrak{f}^m$}} \put(6.3,1){?} \put(7,1) {$\Bigr) \ =$} \put(8,1){\vector(1,2){0.9}} \put(10,2){\vector(-1,1){0.9}} \put(9,3){\circle{0.1}} \put(10,2){\circle{0.1}} \put(9,0){\vector(-1,1){0.9}} \put(9,0){\vector(1,1){0.9}} \put(10,1){\vector(0,1){0.9}} \put(9,1){\circle{0.1}} \put(10,2){\circle{0.1}} \put(9,0){\circle{0.1}} \put(10,1){\circle{0.1}} \put(9.3,0){$00$} \put(9.3,3){$11$} \put(8.5,1){\mbox{$00$}} \put(10.3,2){\mbox{$01$}} \put(10.3,1){$00$} \end{picture} \end{figure} \noindent which would contradict (iv) as no $\sigma$-model is equivalent to the $2$-model on the right hand side of the above equation, or we would have a contradiction: \begin{figure}[H] \unitlength1cm \begin{picture}(0,2) \thicklines \put(0,0){\vector(0,1){0.9}} \put(0,1){\vector(0,1){0.9}} \put(0,1){\circle{0.1}} \put(0,2){\circle{0.1}} \put(0,0){\circle{0.1}} \put(0.3,0){$00$} \put(0.5,1){\mbox{$01$}} \put(0.3,2){\mbox{$11$}} \put(1,1){$\thicksim$} \put(1.5,1){$G\Bigl( F\Bigl($} \put(4,0){\vector(-1,1){0.9}} \put(4,0){\vector(1,1){0.9}} \put(5,1){\vector(-1,1){0.9}} \put(3,1){\vector(1,1){0.9}} \put(3,1){\circle{0.1}} \put(4,2){\circle{0.1}} \put(4,0){\circle{0.1}} \put(5,1){\circle{0.1}} \put(4.3,0){$0\cdots0$} \put(3.5,1){\mbox{$\mathfrak{f}^k$}} \put(4.3,2){\mbox{$1\cdots1$}} \put(5.3,1){$\mathfrak{g}^k$} \put(6,1){$\Bigr)\Bigr) \ = \ G\Bigl($} \put(9,0){\vector(-1,1){0.9}} \put(9,0){\vector(1,1){0.9}} \put(10,1){\vector(-1,1){0.9}} \put(8,1){\vector(1,1){0.9}} \put(8,1){\circle{0.1}} \put(9,2){\circle{0.1}} \put(9,0){\circle{0.1}} \put(10,1){\circle{0.1}} \put(9.3,0){?} \put(8.5,1){\mbox{$\mathfrak{f}^m$}} \put(9.3,2){\mbox{$\mathfrak{h}^m$}} \put(10.3,1){$\mathfrak{f}^m$} \put(10.6,1){$\Bigr)\ \thicksim $} \put(12,0){\vector(0,1){0.9}} \put(12,1){\vector(0,1){0.9}} \put(12,1){\circle{0.1}} \put(12,2){\circle{0.1}} \put(12,0){\circle{0.1}} \put(12.3,0){$01$} \put(12.5,1){\mbox{$01$}} \put(12.3,2){\mbox{$11$}} \end{picture} \end{figure} \end{proof} By \cite{Ghi5}, there are infinitely many logics in which unification is not finitary, see Figure \ref{NU}, and hence there are infinitely many logics with nullary unification. We can produce other infinite families of logics with nullary unification, see Figure \ref{li}. \begin{figure}[H] \unitlength1cm \thicklines \begin{picture}(0,3) \put(7,0){\vector(-1,1){0.9}} \put(7,0){\vector(1,1){0.9}} \put(8,1){\vector(-1,1){0.9}} \put(6,1){\circle{0.1}} \put(7,2){\circle{0.1}} \put(7,0){\circle{0.1}} \put(8,1){\circle{0.1}} \put(6,1){\vector(1,1){0.9}} \put(7,3){\circle{0.1}} \put(8,3){\circle{0.1}} \put(7,2){\vector(0,1){0.9}} \put(7,2){\vector(-1,1){0.9}} \put(6,3){\circle{0.1}} \put(9,3){\circle{0.1}} \put(5,3){\circle{0.1}} \put(10,3){\circle{0.1}} \put(4,3){\circle{0.1}} \put(7,2){\vector(1,1){0.9}} \put(7,2){\vector(-2,1){1.9}} \put(7,2){\vector(2,1){1.9}} \put(7,2){\vector(-3,1){2.9}} \put(7,2){\vector(3,1){2.9}} \put(9.5,3){\circle{0.1}} \put(4.5,3){\circle{0.1}} \put(10.5,3){\circle{0.1}} \put(4,3){\circle{0.1}} \put(3.5,3){\circle{0.1}} \end{picture} \caption{$\mathfrak F_2+\mathfrak F_s.$}\label{li} \end{figure} \begin{theorem}\label{infty} The logic $\mathsf L(\mathfrak F_2+\mathfrak F_s)$ has nullary unification, for any $s\geq 1$: \end{theorem} \begin{proof} Let $\mathbf F=sm(\{\mathfrak F_2+\mathfrak F_s\})$ and suppose that $\mathsf L(\mathbf F)$ has finitary unification. Note that $\{\mathfrak L_1,\mathfrak L_2,\mathfrak L_3,\mathfrak L_4,\mathfrak R_2,\mathfrak R_2+\}\subseteq \mathbf F$. Take $n=3$ and let $m\geq 3$ be given by Theorem \ref{main}. Let $k> m$ and $\sigma\colon\{x_1,x_2,x_3\}\to \mathsf{Fm}^{k+2}$ be a substitution defined as follows $\begin{array}{rl} \qquad\qquad \sigma(x_1)= & x_1\\ \sigma(x_2)= & x_2 \\ \sigma(x_3)= & \bigwedge_{i=3}^{k+2}\Bigl((x_i\to x_1)\lor((x_i\to x_1)\to x_1)\Bigr). \end{array}$ \noindent There are mappings $G:\mathbf{M}^m\to\mathbf{M}^3$ and $F:\mathbf{M}^{k+2}\to\mathbf{M}^m$ fulfilling the conditions (i)-(v). Since $k> m$, there are $\mathfrak f^{k}\not=\mathfrak g^{k}$ such that (for some $\mathfrak f^m, \mathfrak g^m$)\\ \unitlength1cm \begin{picture}(0,1.1) \thicklines \put(0,0.5){\mbox{$F\Bigl($}} \put(1,0){\circle{0.1}} \put(1.3,1){\mbox{$1\cdots1$}} \put(1,0){\vector(0,1){0.9}} \put(1.3,0){\mbox{$01\mathfrak f^{k}$}} \put(1,1){\circle{0.1}} \put(2.5,0.5){\mbox{$\Bigr)= F\Bigl($}} \put(4,0){\circle{0.1}} \put(4.3,1){\mbox{$1\cdots1$}} \put(4,0){\vector(0,1){0.9}} \put(4.3,0){\mbox{$01\mathfrak g^{k}$}} \put(4,1){\circle{0.1}} \put(5.3,0.5){\mbox{$\Bigr)= $}} \put(6,0){\circle{0.1}} \put(6.3,1){\mbox{$\mathfrak g^m$}} \put(6,0){\vector(0,1){0.9}} \put(6.3,0){\mbox{$\mathfrak f^m$}} \put(6,1){\circle{0.1}} \put(7.3,0.5){\mbox{, }} \put(8,0.5){\mbox{$G\Bigl($}} \put(9,0){\circle{0.1}} \put(9.3,1){\mbox{$\mathfrak g^m$}} \put(9,0){\vector(0,1){0.9}} \put(9.3,0){\mbox{$\mathfrak f^m$}} \put(9,1){\circle{0.1}} \put(10,0.5){$\Bigr)=$} \put(11.3,1){\mbox{$111$ }} \put(11,0){\circle{0.1}} \put(11.4,0.5){.\qquad But} \put(11.3,0){\mbox{$011$}} \put(11,0){\vector(0,1){0.9}} \put(11,1){\circle{0.1}} \end{picture} \\ \unitlength1cm \begin{picture}(3,2.2) \thicklines \put(0,1){$\sigma\Bigl($} \put(2,0){\vector(-1,1){0.9}} \put(2,0){\vector(1,1){0.9}} \put(3,1){\vector(-1,1){0.9}} \put(1,1){\circle{0.1}} \put(2,2){\circle{0.1}} \put(2,0){\circle{0.1}} \put(3,1){\circle{0.1}} \put(2.3,0){\mbox{$010\cdots0$}} \put(0.5,1.3){\mbox{$01\mathfrak f^{k}$}} \put(2.3,2){\mbox{$1\cdots1$}} \put(2.9,1.3){\mbox{$01\mathfrak g^{k}$}} \put(1,1){\vector(1,1){0.9}} \put(4,1){$\Bigr)$} \put(4.5,1){$=$} \put(7,0){\vector(-1,1){0.9}} \put(7,0){\vector(1,1){0.9}} \put(8,1){\vector(-1,1){0.9}} \put(6,1){\circle{0.1}} \put(7,2){\circle{0.1}} \put(7,0){\circle{0.1}} \put(8,1){\circle{0.1}} \put(7.3,0){\mbox{$010$}} \put(5.4,1){\mbox{$011$}} \put(7.3,2){\mbox{$111$}} \put(8.3,1){\mbox{$011$}} \put(6,1){\line(1,1){0.9}} \put(6,1){\vector(1,1){0.9}} \put(9,1){$\thicksim$} \put(10.3,2){\mbox{$111$ }} \put(10,0){\circle{0.1}} \put(10,1){\circle{0.1}} \put(10.3,0){\mbox{$010$}} \put(10,0){\vector(0,1){0.9}} \put(10,1){\vector(0,1){0.9}} \put(10,1){\circle{0.1}} \put(10.3,1){\mbox{$011$. \ Then}} \end{picture}\\ \unitlength1cm \begin{picture}(3,2.2) \thicklines \put(0,1){$F\Bigl($} \put(2,0){\vector(-1,1){0.9}} \put(2,0){\vector(1,1){0.9}} \put(3,1){\vector(-1,1){0.9}} \put(1,1){\circle{0.1}} \put(2,2){\circle{0.1}} \put(2,0){\circle{0.1}} \put(3,1){\circle{0.1}} \put(2.3,0){\mbox{$010\cdots0$}} \put(0.6,1.4){\mbox{$01\mathfrak f^{k}$}} \put(2.3,2){\mbox{$1\cdots1$}} \put(2.9,1.3){\mbox{$01\mathfrak g^{k}$}} \put(1,1){\vector(1,1){0.9}} \put(4,1){$\Bigr)$} \put(4.5,1){$=$} \put(7,0){\vector(-1,1){0.9}} \put(7,0){\vector(1,1){0.9}} \put(8,1){\vector(-1,1){0.9}} \put(6,1){\circle{0.1}} \put(7,2){\circle{0.1}} \put(7,0){\circle{0.1}} \put(8,1){\circle{0.1}} \put(7.3,0){\mbox{$\mathfrak h^m$}} \put(5.4,1){\mbox{$\mathfrak f^m$}} \put(7.3,2){\mbox{$\mathfrak g^m$}} \put(8.3,1){\mbox{$\mathfrak f^m$}} \put(6,1){\line(1,1){0.9}} \put(6,1){\vector(1,1){0.9}} \put(9,1){$\thicksim$} \put(10.3,2){\mbox{$\mathfrak g^m$ }} \put(10,0){\circle{0.1}} \put(10,1){\circle{0.1}} \put(10.3,0){\mbox{$\mathfrak h^m$}} \put(10,0){\vector(0,1){0.9}} \put(10,1){\vector(0,1){0.9}} \put(10,1){\circle{0.1}} \put(10.3,1){\mbox{$\mathfrak f^m$, }} \end{picture}\\ \unitlength1cm \begin{picture}(3,2.2) \thicklines \put(0,1){for some $\mathfrak h^m$, and hence} \put(6,1){$G\Bigl($} \put(7.3,2){\mbox{$\mathfrak g^m$ }} \put(7,0){\circle{0.1}} \put(7,1){\circle{0.1}} \put(7.3,0){\mbox{$\mathfrak h^m$}} \put(7,0){\vector(0,1){0.9}} \put(7,1){\vector(0,1){0.9}} \put(7,1){\circle{0.1}} \put(7.3,1){\mbox{$\mathfrak f^m\ \Bigr) $}} \put(9,1){$=$} \put(10.3,2){\mbox{$111$ }} \put(10,0){\circle{0.1}} \put(10,1){\circle{0.1}} \put(10.3,0){\mbox{$010$}} \put(10,0){\vector(0,1){0.9}} \put(10,1){\vector(0,1){0.9}} \put(10,1){\circle{0.1}} \put(10.3,1){\mbox{$011$. }} \end{picture}\\ \unitlength1cm \begin{picture}(3,2.2) \thicklines \put(0,1){On the other hand, we have } \put(6,1){$\sigma\Bigl($} \put(7.3,2){\mbox{$1\cdots1$ }} \put(7,0){\circle{0.1}} \put(7,1){\circle{0.1}} \put(7.3,0){\mbox{$00\mathfrak h^{k}$}} \put(7,0){\vector(0,1){0.9}} \put(7,1){\vector(0,1){0.9}} \put(7,1){\circle{0.1}} \put(7.3,1){\mbox{$01\mathfrak f^{k}\ \Bigr) $}} \put(9,1){$=$} \put(10.3,2){\mbox{$111$ }} \put(10,0){\circle{0.1}} \put(10,1){\circle{0.1}} \put(10.3,0){\mbox{$001$}} \put(10,0){\vector(0,1){0.9}} \put(10,1){\vector(0,1){0.9}} \put(10,1){\circle{0.1}} \put(10.3,1){\mbox{$011$\ . Thus, }} \end{picture}\\ \unitlength1cm \begin{picture}(3,2.5) \thicklines \put(0,1){$F\Bigl($} \put(1.3,2){\mbox{$1\cdots1$ }} \put(1,0){\circle{0.1}} \put(1,1){\circle{0.1}} \put(1.3,0){\mbox{$00\mathfrak h^{k}$}} \put(1,0){\vector(0,1){0.9}} \put(1,1){\vector(0,1){0.9}} \put(1,1){\circle{0.1}} \put(1.3,1){\mbox{$01\mathfrak f^{k}\ \Bigr) $}} \put(3,1){$=$} \put(4.3,2){\mbox{$\mathfrak g^{m}$ }} \put(4,0){\circle{0.1}} \put(4,1){\circle{0.1}} \put(4.3,0){\mbox{$\mathfrak k^{m}$}} \put(4,0){\vector(0,1){0.9}} \put(4,1){\vector(0,1){0.9}} \put(4,1){\circle{0.1}} \put(4.3,1){\mbox{$\mathfrak f^{m}$\ , for some $\mathfrak k^{m}$, and }} \put(8,1){$G\Bigl($} \put(9.3,2){\mbox{$\mathfrak g^{m}$ }} \put(9,0){\circle{0.1}} \put(9,1){\circle{0.1}} \put(9.3,0){\mbox{$\mathfrak k^{m}$}} \put(9,0){\vector(0,1){0.9}} \put(9,1){\vector(0,1){0.9}} \put(9,1){\circle{0.1}} \put(9.3,1){\mbox{$\mathfrak f^{m}\ \Bigr) $}} \put(11,1){$=$} \put(12.3,2){\mbox{$111$ }} \put(12,0){\circle{0.1}} \put(12,1){\circle{0.1}} \put(12.3,0){\mbox{$001$}} \put(12,0){\vector(0,1){0.9}} \put(12,1){\vector(0,1){0.9}} \put(12,1){\circle{0.1}} \put(12.3,1){\mbox{$011.$}} \end{picture}\\ \unitlength1cm \thicklines \begin{picture}(0,3.5) \put(3,0){\vector(-1,1){0.9}} \put(3,0){\vector(1,1){0.9}} \put(4,1){\vector(-1,1){0.9}} \put(2,1){\circle{0.1}} \put(3,2){\circle{0.1}} \put(3,0){\circle{0.1}} \put(4,1){\circle{0.1}} \put(2,1){\vector(1,1){0.9}} \put(3,2){\vector(0,1){0.9}} \put(0,3){We conclude} \put(1,1){$G\Bigl(\mathfrak h^{m}$} \put(4.2,1){$\mathfrak k^{m}\ \Bigr)$} \put(3.3,0){$0\cdots0$} \put(3.2,2){$\mathfrak f^{m}$} \put(3.2,3){$\mathfrak g^{m}$} \put(8,0){\vector(-1,1){0.9}} \put(8,0){\vector(1,1){0.9}} \put(9,1){\vector(-1,1){0.9}} \put(7,1){\circle{0.1}} \put(8,2){\circle{0.1}} \put(8,0){\circle{0.1}} \put(9,1){\circle{0.1}} \put(7,1){\vector(1,1){0.9}} \put(8,2){\vector(0,1){0.9}} \put(5.5,1){$= \quad 010$} \put(9.2,1){$001$} \put(8.3,0){$000$} \put(8.2,2){$011$ \quad \quad and note that} \put(8.2,3){$111$} \put(12.3,3){\mbox{$111$ }} \put(12,1){\circle{0.1}} \put(12,2){\circle{0.1}} \put(12.3,1){\mbox{$010$}} \put(12,1){\vector(0,1){0.9}} \put(12,2){\vector(0,1){0.9}} \put(12,2){\circle{0.1}} \put(12.3,2){\mbox{$011$}} \end{picture}\\ \noindent is a generated submodel of the above $\mathfrak F_2+\mathfrak L_2$-model. But the whole model is p-irreducible and hence the submodel on $\mathfrak L_3$ must be equivalent, by (iv), to a $\sigma$-model on $+\mathfrak F_r$, for some $r\geq 1$. Thus, we get a model on $+\mathfrak F_r$ such that \unitlength1cm \thicklines \begin{picture}(0,2.5) \put(7,1){\circle{0.1}} \put(7,0){\circle{0.1}} \put(7,2){\circle{0.1}} \put(8,2){\circle{0.1}} \put(9,2){\circle{0.1}} \put(7,0){\vector(0,1){0.9}} \put(7,1){\circle{0.1}} \put(7,1){\vector(0,1){0.9}} \put(7,1){\vector(-1,1){0.9}} \put(6,2){\circle{0.1}} \put(5,2){\circle{0.1}} \put(4,2){\circle{0.1}} \put(7,1){\vector(1,1){0.9}} \put(7,1){\vector(-2,1){1.9}} \put(7,1){\vector(2,1){1.9}} \put(7,1){\vector(-3,1){2.9}} \put(9.2,2){$\Vdash\sigma(x_3)$} \put(7.5,0.9){$\Vdash\sigma(x_3)$} \put(7.5,0){$\not\Vdash\sigma(x_3)$} \put(4.5,2){\circle{0.1}} \put(4,2){\circle{0.1}} \put(3.5,2){\circle{0.1}} \end{picture}\\ \noindent which is impossible.\end{proof} In the same way one shows that $L(\mathfrak F_r+\mathfrak F_s)$ has nullary unification, for $r\geq 2$, $s\geq 1$. More than in the number (of logics with nullary unification) we are interested in their location in the lattice of all intermediate logics. In particular, we would like to know if they can be put apart from logics with finitary\slash unitary unification; just as logics with finitary unification are distinguished from unitary logics, by Theorem \ref{kc}. \begin{theorem}\label{L7} The logic ${\mathsf L}(\{\mathfrak R_2,\mathfrak F_{2}\})={\mathsf L}(\mathfrak R_2)\cap{\mathsf L}(\mathfrak F_{2})$ has nullary unification. \end{theorem} \begin{proof} Suppose that ${\mathsf L}(\{\mathfrak R_2,\mathfrak F_{2}\})$ has finitary unification and note that $$sm(\{\mathfrak R_2,\mathfrak F_{2}\})=\{\mathfrak L_1,\mathfrak L_{2},\mathfrak L_3,\mathfrak F_{2},\mathfrak R_2\}.$$ Let $n=2$. According to Theorem \ref{main}, there is a number $m\geq 2$ such that for every $\sigma\colon\{x_1,x_2\}\to\mathsf{Fm}^k$ there are mappings $G:\mathbf{M}^m\to\mathbf{M}^2$ and $F:\mathbf{M}^k\to\mathbf{M}^m$ fulfilling the conditions (i)-(v). Take any $k>m$ and let $\sigma\colon\{x_1,x_2\}\to\mathsf{Fm}^k$ be as follows: $$\sigma(x_1)=\bigwedge_{i=1}^k(\neg x_i\lor\neg\neg x_i) \quad \mbox{ and } \quad \sigma(x_2)=\bigwedge_{i=1}^k(\neg\neg x_i\to x_i).$$ We have the following p-irreducible $\sigma$-models, on $\mathfrak R_2$ and $\mathfrak F_{2}$, correspondingly:\\ \unitlength1cm \begin{picture}(1,1.5) \thicklines \put(0,0){\circle{0.1}} \put(0.5,0.1){\mbox{$11$}} \put(2,0){\circle{0.1}} \put(2.5,0.1){\mbox{$10$}} \put(2,1){\circle{0.1}} \put(2.5,1){\mbox{$11$}} \put(2,0){\vector(0,1){0.9}} \put(3.5,0.5){\mbox{and}} \put(5,0){\circle{0.1}} \put(5.5,0.1){\mbox{$11$}} \put(7,0){\circle{0.1}} \put(7.5,0.1){\mbox{$10$}} \put(7,1){\circle{0.1}} \put(7.5,1){\mbox{$11$}} \put(7,0){\vector(0,1){0.9}} \put(9,0){\circle{0.1}} \put(9.5,0.1){\mbox{$01$}} \put(9,1){\circle{0.1}} \put(9.5,1){\mbox{$11$}} \put(9,0){\vector(0,1){0.9}} \put(11,0){\circle{0.1}} \put(11.5,0.1){\mbox{$00$}} \put(11,1){\circle{0.1}} \put(11.5,1){\mbox{$11$}} \put(11,0){\vector(0,1){0.9}} \end{picture} \\ As $k>m$, there are $\mathfrak{f}^k\not=\mathfrak{g}^k$ such that $F_k(\mathfrak L_1, \mathfrak{f}^k)=F_k(\mathfrak L_1, \mathfrak{g}^k)=(\mathfrak L_1, \mathfrak{g}^m)$, for some $\mathfrak{g}^m$. Let $\mathfrak{f}^k\not={0\cdots0}$. Then \unitlength1cm \begin{picture}(1,1.5) \thicklines \put(0,0.5){\mbox{$\sigma\Bigl($}} \put(0.8,0){\circle{0.1}} \put(1,0.1){\mbox{$0\cdots0$}} \put(0.8,1){\circle{0.1}} \put(1,1){\mbox{$\mathfrak{f}^k$}} \put(0.8,0){\vector(0,1){0.9}} \put(2,0.5){\mbox{$\Bigr)=$}} \put(3,0){\circle{0.1}} \put(3.5,0.1){\mbox{$10$}} \put(3,1){\circle{0.1}} \put(3.5,1){\mbox{$11$}} \put(3,0){\vector(0,1){0.9}} \put(4.5,0.5){\mbox{and}} \put(5.5,0.5){\mbox{$\sigma\bigl($}} \put(7.5,0){\circle{0.1}} \put(8,0.1){\mbox{${0\cdots0}$}} \put(6.5,1){\circle{0.1}} \put(8.5,1){\circle{0.1}} \put(6,1){\mbox{$\mathfrak{f}^k$}} \put(8.8,1){\mbox{$\mathfrak{g}^k$}} \put(7.5,0){\vector(1,1){0.9}} \put(7.5,0){\vector(-1,1){0.9}} \put(9.2,0.5){\mbox{$\bigr)\thicksim$}} \put(10.5,0){\circle{0.1}} \put(11,0.1){\mbox{$00$ \ or \ $01$}} \put(10.5,1){\circle{0.1}} \put(11,1){\mbox{$11$}} \put(10.5,0){\vector(0,1){0.9}} \end{picture} \unitlength1cm \begin{picture}(1,1.5) \thicklines \put(0,1){Let} \put(0,0.5){\mbox{$F\Bigl($}} \put(0.8,0){\circle{0.1}} \put(1,0.1){\mbox{$0\cdots0$}} \put(0.8,1){\circle{0.1}} \put(1,1){\mbox{$\mathfrak{f}^k$}} \put(0.8,0){\vector(0,1){0.9}} \put(2,0.5){\mbox{$\Bigr)=$}} \put(3,0){\circle{0.1}} \put(3.5,0.1){\mbox{$\mathfrak{h}^m_1$}} \put(3,1){\circle{0.1}} \put(3.5,1){\mbox{$\mathfrak{f}^m$}} \put(3,0){\vector(0,1){0.9}} \put(4.5,0.5){\mbox{and}} \put(5.5,0.5){\mbox{$F\bigl($}} \put(7.5,0){\circle{0.1}} \put(8,0.1){\mbox{${0\cdots0}$}} \put(6.5,1){\circle{0.1}} \put(8.5,1){\circle{0.1}} \put(6,1){\mbox{$\mathfrak{f}^k$}} \put(8.8,1){\mbox{$\mathfrak{g}^k$}} \put(7.5,0){\vector(1,1){0.9}} \put(7.5,0){\vector(-1,1){0.9}} \put(9.2,0.5){\mbox{$\bigr)\thicksim$}} \put(10.5,0){\circle{0.1}} \put(11,0.1){\mbox{$\mathfrak{h}^m_2$}} \put(10.5,1){\circle{0.1}} \put(11,1){\mbox{$\mathfrak{f}^m$}} \put(10.5,0){\vector(0,1){0.9}} \end{picture} \\ \noindent for some $m$-valuations $\mathfrak{h}^m_{1},\mathfrak{h}^m_{2}$. Then, by (v), \unitlength1cm \begin{picture}(1,1.5) \thicklines \put(0,0.5){\mbox{$G\Bigl($}} \put(0.8,0){\circle{0.1}} \put(1,0.1){\mbox{$\mathfrak{h}^m_1$}} \put(0.8,1){\circle{0.1}} \put(1,1){\mbox{$\mathfrak{f}^m$}} \put(0.8,0){\vector(0,1){0.9}} \put(2,0.5){\mbox{$\Bigr)=$}} \put(3,0){\circle{0.1}} \put(3.5,1){\mbox{$11$}} \put(3,1){\circle{0.1}} \put(3.5,0){\mbox{$10$}} \put(3,0){\vector(0,1){0.9}} \put(4.5,0.5){\mbox{and}} \put(5.5,0.5){\mbox{$G\bigl($}} \put(6.5,0){\circle{0.1}} \put(6.8,0.1){\mbox{$\mathfrak{h}^m_2$}} \put(6.5,1){\circle{0.1}} \put(6.8,1){\mbox{$\mathfrak{f}^m$}} \put(6.5,0){\vector(0,1){0.9}} \put(7.5,0.5){\mbox{$\bigr)=$}} \put(8.5,0){\circle{0.1}} \put(9,1){\mbox{$11$}} \put(8.5,1){\circle{0.1}} \put(9,0){\mbox{$01$ \ or \ $00$}} \put(8.5,0){\vector(0,1){0.9}} \end{picture} $$ \unitlength1cm \begin{picture}(6,2.1) \thicklines \put(0,2){Thus,} \put(3,2){\circle{0.1}} \put(2,1){\circle{0.1}} \put(4,1){\circle{0.1}} \put(3,0){\circle{0.1}} \put(4.2,0.9){\mbox{$\mathfrak{h}^m_{2}\quad \Bigr)\quad=$}} \put(0.5,0.9){\mbox{$G \Bigl(\quad\mathfrak{h}^m_{1}$}} \put(3.3,0){\mbox{${0\cdots0}$}} \put(2.2,2){\mbox{$\mathfrak{f}^m$}} \put(2,1){\vector(1,1){0.9}} \put(4,1){\vector(-1,1){0.9}} \put(3,0){\vector(1,1){0.9}} \put(3,0){\vector(-1,1){0.9}} \put(8,2){\circle{0.1}} \put(7,1){\circle{0.1}} \put(9,1){\circle{0.1}} \put(8,0){\circle{0.1}} \put(9.2,0.9){\mbox{$01$ \ or \ $00$}} \put(6.5,0.9){\mbox{$10$}} \put(8.3,0){\mbox{$00$}} \put(8.2,2){$11$ } \put(7,1){\vector(1,1){0.9}} \put(9,1){\vector(-1,1){0.9}} \put(8,0){\vector(1,1){0.9}} \put(8,0){\vector(-1,1){0.9}} \end{picture}\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad$$ The above $2$-model on $\mathfrak R_2$ is not equivalent to any $\sigma$-model which contradics (iv). \end{proof} Thus, there are logics with finitary unification the intersection of whose has nullary unification. We could give many examples of such logics. \begin{theorem}\label{L8} The logic ${\mathsf L}(\mathfrak G_2)\cap{\mathsf L}(\mathfrak F_{3})$ (see Figure \ref{GF} and \ref{8fames}) has nullary unification. \end{theorem} \begin{proof} Let $\mathsf{L}={\mathsf L}(\mathfrak G_2)\cap{\mathsf L}(\mathfrak F_{3})$ and $\mathbf F=\{\mathfrak L_1, \mathfrak L_2, \mathfrak L_3,\mathfrak F_2,+\mathfrak F_2, \mathfrak G_3, \mathfrak G_2, \mathfrak F_{3}\}$. Then $\mathbf F= sm(\{\mathfrak G_2,\mathfrak F_3\})$ and $\mathsf{L}={\mathsf L}(\mathbf F)$. Suppose that ${\mathsf L}$ has finitary unification and $n=1$. By Theorem \ref{main}, there is a number $m\geq 1$ such that for any $\sigma\colon\{x_1,\dots,x_n\}\to \mathsf{Fm}^k$ there are $G:\mathbf{M}^m\to\mathbf{M}^1$ and $F:\mathbf{M}^k\to\mathbf{M}^m$ fulfilling the conditions (i)-(v). Let $A(x_1,x_2)= (\neg x_1\lor\neg\neg x_1)\lor (\neg x_2\lor\neg\neg x_2)\lor\neg(x_1\leftrightarrow x_2)\lor \neg\neg(x_1\leftrightarrow x_2).$\\ The formula is valid in $\mathfrak F_2$; to falsify it one needs three end-elements above the root, labeled with distinct valuation. Thus, $A$ is false in the following models over $\mathfrak F_{3}$ \begin{figure}[H] \unitlength1cm \begin{picture}(1,1) \thicklines \put(0,1){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(1,1){\circle{0.1}} \put(1.3,0){\mbox{$00$}} \put(0.3,1){\mbox{$11$}} \put(1.3,1){\mbox{$01$}} \put(2.3,1){\mbox{$10$}} \put(1,0){\vector(1,1){0.9}} \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(0,1){0.9}} \put(3.5,1){\circle{0.1}} \put(4.5,0){\circle{0.1}} \put(5.5,1){\circle{0.1}} \put(4.5,1){\circle{0.1}} \put(4.8,0){\mbox{$00$}} \put(3.8,1){\mbox{$11$}} \put(4.8,1){\mbox{$01$}} \put(5.8,1){\mbox{$00$}} \put(4.5,0){\vector(1,1){0.9}} \put(4.5,0){\vector(-1,1){0.9}} \put(4.5,0){\vector(0,1){0.9}} \put(7,1){\circle{0.1}} \put(8,0){\circle{0.1}} \put(9,1){\circle{0.1}} \put(8,1){\circle{0.1}} \put(8.3,0){\mbox{$00$}} \put(7.3,1){\mbox{$11$}} \put(8.3,1){\mbox{$00$}} \put(9.3,1){\mbox{$10$}} \put(8,0){\vector(1,1){0.9}} \put(8,0){\vector(-1,1){0.9}} \put(8,0){\vector(0,1){0.9}} \put(10,1){\circle{0.1}} \put(11,0){\circle{0.1}} \put(12,1){\circle{0.1}} \put(11,1){\circle{0.1}} \put(11.3,0){\mbox{$00$}} \put(10.3,1){\mbox{$00$}} \put(11.3,1){\mbox{$01$}} \put(12.3,1){\mbox{$10$}} \put(11,0){\vector(1,1){0.9}} \put(11,0){\vector(-1,1){0.9}} \put(11,0){\vector(0,1){0.9}} \end{picture} \end{figure} \noindent Let $ m<k=2l$ and $\sigma\colon\{x_1\}\to\mathsf{Fm^k}$ be defined by $$ \sigma(x_1)\quad=\quad\neg\neg\Bigl(\bigvee_{i=1}^kx_i\Bigr)\ \land \ \bigwedge_{i=1}^lA(x_{2i-1},x_{2i}).$$ We have the following p-irreducible $\sigma$-models (or equivalents of $\sigma$-models): $$ \circ \ 0 \qquad \circ 1\qquad\unitlength1cm \begin{picture}(1,1.1) \thicklines \put(0,1){\circle{0.1}} \put(0.3,1){\mbox{$0$}} \put(2,1){\circle{0.1}} \put(2.3,1){\mbox{$1$}} \put(1,0){\circle{0.1}} \put(1.3,0){\mbox{$0$}} \put(1,0){\vector(1,1){0.9}} \put(1,0){\vector(-1,1){0.9}} \put(3,0){\circle{0.1}} \put(3.3,0){\mbox{$0$\ \ \ Note ${\sigma}(\circ \ \mathfrak{f}^k)=\circ \ 0$ iff $\mathfrak{f}^k=\underbrace{0\cdots 0}_{\mbox{$k$-times}}$.}} \put(3,0){\line(0,1){0.9}} \put(3,1){\circle{0.1}} \put(3.8,0.5){\mbox{but it is equivalent with some $\sigma$-model. }} \put(3.3,1){\mbox{$1$}\quad The last $1$-model is not any $\sigma$-model} \put(3,0){\vector(0,1){0.9}}\end{picture}\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad$$ \noindent Let $F(\circ \ 0\cdots 0)=\circ \ \mathfrak{g}^m$. Since $m<k$, there are $\mathfrak{f}^k_0\not=\mathfrak{f}^k_{0'}$ such that\\ $F(\circ \ \mathfrak{f}^k_0)=F(\circ \ \mathfrak{f}^k_{0'})$. Then $\mathfrak{f}^k_0(j)\not=\mathfrak{f}^k_{0'}(j)$ for some $j\leq k$. We do not know if $j$ is odd or even but for pairs of bits we have \ $\mathfrak{f}^k_0(2i-1)\ \mathfrak{f}^k_0(2i)\ \not=\ \mathfrak{f}^k_{0'}(2i-1)\ \mathfrak{f}^k_{0'}(2i)$ \ for some $i\leq l$ (where $2i-1=j$ or $2i=j$). Moreover, we can find a $k$-valuation $\mathfrak{f}^k_{0''}$ such that $$\mathfrak{f}^k_0(2i-1)\ \mathfrak{f}^k_{0}(2i)\quad\not=\quad\mathfrak{f}^k_{0''}(2i-1)\ \mathfrak{f}^k_{0''}(2i)\quad\not=\quad\mathfrak{f}^k_{0'}(2i-1)\ \mathfrak{f}^k_{0'}(2i)$$ and $\mathfrak{f}^k_{0''}(2i-1)\ \mathfrak{f}^k_{0''}(2i)\ \not=\ 00$. Let $F(\circ \ \mathfrak{f}^k_0)=F(\circ \ \mathfrak{f}^k_{0'})=\circ \ \mathfrak{f}^m_0$ and $F(\circ \ \mathfrak{f}^k_{0''})=\circ \ \mathfrak{f}^m_{0'}$. We have $\mathfrak{f}^m_0\not=\mathfrak{g}^m$ and $\mathfrak{f}^m_{0'}\not=\mathfrak{g}^m$ as, by (v), $G(\circ \ \mathfrak{f}^m_0)= G(\mathfrak{f}^m_{0'})=\circ \ 1$ and $G(\circ \ \mathfrak{g}^m)=\circ \ 0$. By (ii), (iv), and the above characterization of all $\sigma$-models, we conclude that \unitlength1cm \begin{picture}(1,1.5) \thicklines \put(0,0.5){\mbox{$G_{} \Bigl($}} \put(2,0){\circle{0.1}} \put(1.5,0){\mbox{$?$}} \put(1,1){\circle{0.1}} \put(3,1){\circle{0.1}} \put(1.3,1){\mbox{$\mathfrak{f}^m_{0'}$}} \put(2.5,1){\mbox{$\mathfrak{f}^m_0$}} \put(2,0){\vector(1,1){0.9}} \put(2,0){\vector(-1,1){0.9}} \put(3.2,0.5){\mbox{$\Bigr) \quad \thicksim\quad$}} \put(4.7,0){\circle{0.1}} \put(5,0){\mbox{$0$}} \put(4.7,1){\circle{0.1}} \put(5,1){\mbox{$1$}} \put(4.7,0){\vector(0,1){0.9}} \put(6,0.5){\mbox{or}} \put(7,0.5){\mbox{$G \Bigl($}} \put(9,0){\circle{0.1}} \put(8.5,0){\mbox{$?$}} \put(8,1){\circle{0.1}} \put(10,1){\circle{0.1}} \put(8.3,1){\mbox{$\mathfrak{f}^m_{0'}$}} \put(9.5,1){\mbox{$\mathfrak{f}^m_0$}} \put(9,0){\vector(1,1){0.9}} \put(9,0){\vector(-1,1){0.9}} \put(10.2,0.5){\mbox{$\Bigr) \quad \thicksim\quad$}} \put(11.7,0){\circle{0.1}} \put(12,0){\mbox{$1$}} \put(11.7,1){\circle{0.1}} \put(12,1){\mbox{$1$}} \put(11.7,0){\vector(0,1){0.9}} \end{picture} \\ \noindent (whatever ? is). If the first had happened, we would have \\ \begin{picture}(5,2) \thicklines \linethickness{0.3mm} \put(0,1){\mbox{$G_{} \Bigl($}} \put(2,0){\vector(-1,1){0.9}} \put(2,0){\vector(1,1){0.9}} \put(3,1){\vector(1,1){0.9}} \put(3,1){\vector(-1,1){0.9}} \put(1,1){\circle{0.1}} \put(2,2){\circle{0.1}} \put(4,2){\circle{0.1}} \put(2,0){\circle{0.1}} \put(3,1){\circle{0.1}} \put(2.3,0){?} \put(1.5,1){\mbox{$\mathfrak{g}^m$}} \put(3.3,2){\mbox{$\mathfrak{f}^m_0$}} \put(2.3,2){\mbox{$\mathfrak{f}^m_{0'}$}} \put(3.3,1){?} \put(4.5,1){\mbox{$\Bigr) \quad = \quad$}} \put(7,0){\vector(-1,1){0.9}} \put(7,0){\vector(1,1){0.9}} \put(8,1){\vector(1,1){0.9}} \put(8,1){\vector(-1,1){0.9}} \put(6,1){\circle{0.1}} \put(7,2){\circle{0.1}} \put(9,2){\circle{0.1}} \put(7,0){\circle{0.1}} \put(8,1){\circle{0.1}} \put(7.3,0){$0$} \put(6.5,1){\mbox{$0$}} \put(9.3,2){\mbox{$1$}} \put(7.3,2){\mbox{$1$}} \put(8.3,1){$0$} \end{picture} \noindent which would contradict (iv) as no $\sigma$-model is equivalent to the $1$-model on the right hand side of the above equation. We conclude that \\ \unitlength1cm \begin{picture}(1,1.5) \thicklines \put(0,0.5){\mbox{$G_{} \Bigl($}} \put(2,0){\circle{0.1}} \put(1.5,0){\mbox{$?$}} \put(1,1){\circle{0.1}} \put(3,1){\circle{0.1}} \put(1.3,1){\mbox{$\mathfrak{f}^m_{0'}$}} \put(2.5,1){\mbox{$\mathfrak{f}^m_0$}} \put(2,0){\vector(1,1){0.9}} \put(2,0){\vector(-1,1){0.9}} \put(3.2,0.5){\mbox{$\Bigr) \quad =\quad$}} \put(5.2,0){\circle{0.1}} \put(4.8,0){\mbox{$1$}} \put(4.2,1){\circle{0.1}} \put(6.2,1){\circle{0.1}} \put(6.3,1){\mbox{$1$}} \put(4.3,1){\mbox{$1$}} \put(5.2,0){\vector(1,1){0.9}} \put(5.2,0){\vector(-1,1){0.9}} \put(8,0.5){\mbox{and hence}} \end{picture} \unitlength1cm \begin{picture}(1,2) \put(0,0.5){\mbox{$G_{} \Bigl( F_{} \Bigl($}} \put(1.7,1){\circle{0.1}} \put(2.7,0){\circle{0.1}} \put(3.7,1){\circle{0.1}} \put(2.7,1){\circle{0.1}} \put(3,0){\mbox{$?$}} \put(2,1){\mbox{$\mathfrak{f}^k_{0''}$}} \put(3,1){\mbox{$\mathfrak{f}^k_{0'}$}} \put(4,1){\mbox{$\mathfrak{f}^k_{0}$}} \put(2.7,0){\vector(1,1){0.9}} \put(2.7,0){\vector(-1,1){0.9}} \put(2.7,0){\vector(0,1){0.9}} \put(4.3,0.5){\mbox{$ \Bigr)\Bigr) =\ G\Bigl($}} \put(6,1){\circle{0.1}} \put(7,0){\circle{0.1}} \put(8,1){\circle{0.1}} \put(7,1){\circle{0.1}} \put(7.3,0){\mbox{$?$}} \put(6.3,1){\mbox{$\mathfrak{f}^m_{0'}$}} \put(7.3,1){\mbox{$\mathfrak{f}^m_{0}$}} \put(8.3,1){\mbox{$\mathfrak{f}^m_{0}$}} \put(7,0){\vector(1,1){0.9}} \put(7,0){\vector(-1,1){0.9}} \put(7,0){\vector(0,1){0.9}} \put(8.8,0.5){\mbox{$\Bigr) \ =$}} \put(10,1){\circle{0.1}} \put(11,0){\circle{0.1}} \put(12,1){\circle{0.1}} \put(11,1){\circle{0.1}} \put(11.3,0){\mbox{$1$}} \put(10.3,1){\mbox{$1$}} \put(11.3,1){\mbox{$1$}} \put(12.3,1){\mbox{$1$}} \put(11,0){\vector(1,1){0.9}} \put(11,0){\vector(-1,1){0.9}} \put(11,0){\vector(0,1){0.9}} \end{picture}\\ But this is in contradiction with (v) as \unitlength1cm \begin{picture}(1,2) \put(0.5,0.5){\mbox{$H_{\sigma} \Bigl($}} \put(1.7,1){\circle{0.1}} \put(2.7,0){\circle{0.1}} \put(3.7,1){\circle{0.1}} \put(2.7,1){\circle{0.1}} \put(3,0){\mbox{$?$}} \put(2,1){\mbox{$\mathfrak{f}^k_{0''}$}} \put(3,1){\mbox{$\mathfrak{f}^k_{0'}$}} \put(4,1){\mbox{$\mathfrak{f}^k_{0}$}} \put(2.7,0){\vector(1,1){0.9}} \put(2.7,0){\vector(-1,1){0.9}} \put(2.7,0){\vector(0,1){0.9}} \put(4.3,0.5){\mbox{$ \Bigr)\quad = $ }} \put(6,1){\circle{0.1}} \put(7,0){\circle{0.1}} \put(8,1){\circle{0.1}} \put(7,1){\circle{0.1}} \put(7.3,0){\mbox{$0$}} \put(6.3,1){\mbox{$1$}} \put(7.3,1){\mbox{$1$}} \put(8.3,1){\mbox{$1$}} \put(7,0){\vector(1,1){0.9}} \put(7,0){\vector(-1,1){0.9}} \put(7,0){\vector(0,1){0.9}} \end{picture} \end{proof} \noindent There are, as well, families of logics with finitary unification closed under intersections. For instance, ${\mathsf L}(\mathfrak G_2)\cap{\mathsf L}(\mathfrak C_{5})$ has finitary unification, see Theorem \ref{L8i}. The frames $\mathfrak C_{5}$ and $\mathfrak F_{3}$ are quite the same; the logics ${\mathsf L}(\mathfrak C_{5})$ and ${\mathsf L}(\mathfrak F_{3})$ have finitary unification (the second one even has projective approximation, see Theorem \ref{lmk}) but it does not mean they generate the same unification types in combination with other frames. \newpage \begin{lemma}\label{pil} If $d\geq 1$, a class {\bf F} of finite frames and $\mathfrak F\in\mathbf F$ are such that\\ \indent (1) $d(\mathfrak F)=d$ \ and \ $d(\mathfrak G)\leq d+1$, for each $\mathfrak G\in \mathbf F$;\\ \indent (2) $\mathfrak G\equiv +\mathfrak F$ \ or \ $\mathfrak G\in \mathbf F$, for each p-morphic image $\mathfrak G$ of $+\mathfrak F$;\\ \indent (3) $\mathsf L({\mathbf F})$ is locally tabular and has finitary\slash unitary unification,\\ then $\mathsf L({\mathbf F}\cup \{+\mathfrak F\})$ is locally tabular and has finitary\slash unitary unification, as well.\end{lemma} \begin{proof} $\mathsf L({\mathbf F}\cup \{+\mathfrak F\})$ is locally tabular, by Corollary \ref{fp}. Assume ${\mathbf F}={ sm({\mathbf F})}$ and $+\mathfrak F\not\in \mathbf F$. Take $\mathbf{ M}^k=\mathbf{ M}^k({\mathbf F})$ and $\mathbf{ N}^k=\mathbf{ M}^k(sm({\mathbf F}\cup \{+\mathfrak F\}))$, for each $k\geq 0$. By (2), $sm({\mathbf F}\cup \{+\mathfrak F\})\setminus\mathbf F$ is not empty but it contains only isomorphic copies of $+\mathfrak F$. Let $n\geq 1$ and $\sigma\colon\{x_1,\dots,x_n\}\to \mathsf{Fm^k}$. There is a number $m\geq 1$ such that there are mappings $F\colon \mathbf{ M}^k \to \mathbf{ M}^m$ and $G\colon \mathbf{ M}^m\to \mathbf{ M}^n$ fulfilling the conditions (i)-(v) (of Theorem \ref{main}). We take $m'=m+n+d+1$ and would like to find mappings $F'\colon \mathbf{ N}^k_{ir}\to \mathbf{ N}^{m'}$ and $G'\colon \mathbf{ N}^{m'}_{ir}\to \mathbf{ N}^n$ fulfilling the conditions (i)-(v) of Theorem \ref{main2}. Let $\mathfrak{M}^k=(W,R,w_0,\{\mathfrak{f}^k_w\}_{w\in W})\in \mathbf{M}^k_{ir}$ and $F(\mathfrak{M}^k)=(W,R,w_0,\{\mathfrak{f}^m_w\}_{w\in W})$. We take $F'(\mathfrak{M}^k)=(W,R,w_0,\{\mathfrak{g}^{m'}_w\}_{w\in W})$, where $ \mathfrak{g}^{m'}_w=\mathfrak{f}^m_w1\dots1\underbrace{0\dots0}_{d(w)-1}1$. \noindent We need to define $F'(\mathfrak{M}^k)$ for $\mathfrak{M}^k\in\mathbf{ N}^k_{ir}\setminus\mathbf{M}^k$. Let $\mathfrak{M}^k\in\mathbf{ N}^k_{ir}\setminus\mathbf{M}^k$. Then $\mathfrak{M}^k$ is a $k$-model over $+\mathfrak{F}$. Let $w_0$ be the root of $+\mathfrak{F}$ and $u$ its immediate successor (the root of $\mathfrak{F}$). Then $(\mathfrak{M}^k)_u$ is a p-irreducible model in $\mathbf{ M}^k$, see Theorem \ref{pM6}, and hence we have $(F'(\mathfrak{M}^k))_u$. Thus, we only need the valuation $\mathfrak g^{m'}_{w_0}$ at $w_0$ (to get $F'(\mathfrak{M}^k)$). Let $\mathfrak h^{n}_{w_0}$ be the valuation at $w_0$ in $\sigma(\mathfrak{M}^k)$. We take $\mathfrak{g}^{m'}_{w_0}=\underbrace{0\dots0}_{m}\mathfrak{h}^n_{w_0}1\underbrace{0\dots0}_{d}.$ The definition of $F'\colon {\mathbf N}^k_{ir}\to \mathbf{N}^{m+n+d+1}$ is completed and the conditions (i)-(ii) of Theorem \ref{main2} are obviously fulfilled, as far as only $F'$ is concerned. As concerns (iii), if $i\colon \mathfrak{M}^k\to\mathfrak{N}^k$ is an isomorphism of $k$-models in $\mathbf{ M^k}$, then $i(w)$ and $w$ (for any $w$ in the domain of $\mathfrak{M}^k$) have the same depth and hence $ \mathfrak{g}^{m'}_w= \mathfrak{g}^{m'}_{i(w)}$ which shows that $i$ is also an isomorphism $F'(\mathfrak{M}^k)$ and $F'(\mathfrak{N}^k)$. The definition of $G'\colon \mathbf{ N}^{m'}_{ir}\to \mathbf{ N}^{n}$ involves three cases.\\ (A) Suppose that $\mathfrak{M}^{m'}\in\mathbf{M}^{m'}_{ir}$ is not equivalent with $F'(\mathfrak{N}^{k})$, for any $\mathfrak{N}^k\in\mathbf{ N}^k_{ir}\setminus\mathbf{M}^k$. Then we take $G'(\mathfrak{M}^{m'})=G(\mathfrak{M}^{m'}\!\!\!\upharpoonright m)$. The mapping $F'$ preserves the depth of the frame. Thus, $d(F'(\mathfrak{N}^{k}))=d+1$ for any $\mathfrak{N}^k\in\mathbf{ N}^k_{ir}\setminus\mathbf{M}^k$ and it means that any generated submodel of $\mathfrak{M}^{m'}$ is not equivalent with $F'(\mathfrak{N}^{k})$, for any $\mathfrak{N}^k\in\mathbf{ N}^k_{ir}\setminus\mathbf{M}^k$, either. So, we can show (ii). The conditions (i) and (iii) are obvious. We also have $G'(\mathfrak{M}^{m'})\thicksim \sigma(\mathfrak{M}^{k})$, for some $\mathfrak{M}^{k}$, as $G$ fulfills (iv). To show the condition (v) (of Theorem \ref{main2}), let us suppose $\mathfrak{M}^{m'}\thicksim F'(\mathfrak{M}^{k})$, for some $\mathfrak{M}^k\in\mathbf{M}^k$. Then $\mathfrak{M}^{m'}\!\!\!\!\upharpoonright m\thicksim F'(\mathfrak{M}^{k})\!\!\!\!\upharpoonright m=F(\mathfrak{M}^{k})$ and hence we have $G'(\mathfrak{M}^{m'})=G(\mathfrak{M}^{m'}\!\!\!\!\upharpoonright m)=G(F(\mathfrak{M}^{k}))\thicksim\sigma(\mathfrak{M}^{k}).$\\ (B) Suppose $\mathfrak{N}^{m'}\in\mathbf{N}^{m'}_{ir}\setminus\mathbf{M}^{m'}$ is not equivalent with $F'(\mathfrak{N}^{k})$, for any $\mathfrak{N}^k\in\mathbf{ N}^k_{ir}\setminus\mathbf{M}^k$. Then $\mathfrak{N}^{m'}$ is a model over $+\mathfrak F$. Let $w_0$ be the root of $\mathfrak{F}$ and $u$ its immediate successor; that is $u$ is the root of $\mathfrak F$. By (A), we have $G'((\mathfrak{N}^{m'})_u)=G((\mathfrak{N}^{m'}\!\!\!\upharpoonright m)_u)$. Let $\mathfrak f^{n}_{u}$ be the valuation at the root $u$ of $G'((\mathfrak{N}^{m'})_u)$. To extend $G'((\mathfrak{N}^{m'})_u)$ to a model over $+\mathfrak F$ , it suffices to define the valuation $\mathfrak f^{n}_{w_0}$ at $w_0$. Regardless of the choice, the conditions (i)-(iii) of Theorem \ref{main2} are always fulfilled. We take $\mathfrak f^{n}_{w_0}=\mathfrak f^{n}_{u}$. Then $G'(\mathfrak{N}^{m'})\thicksim G'((\mathfrak{N}^{m'})_u)\thicksim\sigma(\mathfrak{M}^{k})$, for some $\mathfrak{M}^{k}$, which means the condition (iv) of Theorem \ref{main2} is also fulfilled and (v) is irrelevant in this case.\\ (C) Suppose that $\mathfrak{M}^{m'}$ is p-irreducible and \ $\mathfrak{M}^{m'}\thicksim F'(\mathfrak{N}^{k})$, for some $\mathfrak{N}^k\in\mathbf{ N}^k_{ir}\setminus\mathbf{M}^k$. Then there is a p-morphism $p\colon F'(\mathfrak{N}^{k})\to \mathfrak{M}^{m'}$. We can assume $\mathfrak{N}^{k}$ is p-irreducible and $\mathfrak{N}^{k}$ is a model over $+\mathfrak F$. Let $w_0$ be the root of $\mathfrak{N}^{k}$ and $u$ its immediate successor. Then $p(w_0)$ is the root of $\mathfrak{M}^{m'}$ and $p(u)$ its only immediate successor (in $\mathfrak{M}^{m'}$). By the definition of $F'$, we have $F'((\mathfrak{N}^{k})_u)\!\!\upharpoonright m=F((\mathfrak{N}^{k})_u)$ and hence $G'((\mathfrak{M}^{m'})_{p(u)})=G(((\mathfrak{M}^{m'})_{p(u)})\!\!\upharpoonright m) \thicksim G((F'((\mathfrak{N}^{k})_u))\!\!\upharpoonright m)=G(F((\mathfrak{N}^{k})_u))\thicksim( \sigma(\mathfrak{N}^{k}))_u.$ To get $G'(\mathfrak{M}^{m'})$ we need the valuation $\mathfrak f^{n}_{p(w_0)}$ at $p(w_0)$ (in $G'(\mathfrak{M}^{m'})$). Since p-morphisms preserve the valuations we must have (in $\mathfrak{M}^{m'}$ at $p(w_0)$) the valuation $\mathfrak{g}^{m'}_{w_0}={0\dots0}\mathfrak{h}^n_{w_0}1{0\dots0},$ where $\mathfrak h^{n}_{w_0}$ is the valuation at $w_0$ in $\sigma(\mathfrak{N}^k)$. It means, in particular, that for each $\mathfrak{N}^{k}$ such that $\mathfrak{M}^{m'}\thicksim F'(\mathfrak{N}^{k})$ we have the same valuation $\mathfrak h^{n}_{w_0}$ at $w_0$ in $\sigma(\mathfrak{N}^k)$. Then we take $\mathfrak f^{n}_{p(w_0)}=\mathfrak h^{n}_{w_0}$ to complete our definition of $G'(\mathfrak{M}^{m'})$. Since the valuation $\mathfrak f^{n}_{p(u)}$ at $p(u)$ (in $G'(\mathfrak{M}^{m'})$) must be $\mathfrak h^{n}_{u}$ (the valuation at $u$ in $\sigma(\mathfrak{M}^k)$), the monotonicity condition is fulfilled and our definition is correct. One checks (i)-(iii) of Theorem \ref{main2} and $G'(\mathfrak{M}^{m'})\thicksim\sigma(\mathfrak{N}^k)$ which gives (iv)-(v). \end{proof} Our lemma is not sufficient to show $\mathsf L(+\mathfrak F )$ has finitary\slash unitary unification if $\mathsf L(\mathfrak F)$ does; it applies only to some frames $\mathfrak F$. But using it, we can show that $\mathsf L(\{\mathfrak F,\mathfrak L_n\} )$ has finitary\slash unitary unification, for any $n\geq 1$, if $\mathsf L(\mathfrak F)$ does. We have $\mathsf L(\{\mathfrak F,\mathfrak L_n\})=\mathsf L(\mathfrak F)\cap\mathsf L(\mathfrak L_n)$ and $\mathsf L(\mathfrak L_n)$ has projective unification. Extending our argument we get: \begin{theorem}\label{itpr} If {\sf L} is a locally tabular intermediate logics with finitary\slash unitary unification and {\sf L'} is projective, then their intersection $\mathsf L\cap\mathsf L'$ has finitary\slash unitary unification. \end{theorem} It follows from Theorem \ref{L8} that we cannot replace the assumption '$\mathsf{L'}$ is projective` with '$\mathsf{L'}$ has projective approximation`.This does not exclude that the intersection of two logics with projective approximation is a logic with finitary unification. \begin{theorem}\label{L10} ${\mathsf L}(\mathfrak Y_{2})$, \ ${\mathsf L}(\mathfrak Y_{2}+)$, \ ${\mathsf L}(\mathfrak Y_{3})$, and ${\mathsf L}(\mathfrak Y_{3}+)$ (see Figure \ref{nu}) have nullary unification. \end{theorem} \begin{figure}[H] \unitlength1cm \thicklines \begin{picture}(0,2.5) \put(0,0){$\mathfrak{Y}_2$:} \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(1,1){0.9}} \put(2,1){\vector(1,1){0.9}} \put(0,1){\circle{0.1}} \put(1,2){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(3,2){\circle{0.1}} \put(0,1){\vector(1,1){0.9}} \put(2,1){\vector(-1,1){0.9}} \put(2.8,0){$\mathfrak{Y}_2+$:} \put(4.3,0){\vector(-1,1){0.9}} \put(4.3,0){\vector(1,1){0.9}} \put(5.3,1){\vector(1,1){0.9}} \put(3.3,1){\circle{0.1}} \put(4.3,2){\circle{0.1}} \put(4.3,0){\circle{0.1}} \put(5.3,1){\circle{0.1}} \put(6.3,2){\circle{0.1}} \put(3.3,1){\vector(1,1){0.9}} \put(5.3,1){\vector(-1,1){0.9}} \put(5.3,3){\circle{0.1}} \put(4.3,2){\vector(1,1){0.9}} \put(6.3,2){\vector(-1,1){0.9}} \put(6.2,0){$\mathfrak{Y}_3$:} \put(6.7,2){\circle{0.1}} \put(8.7,2){\circle{0.1}} \put(6.7,1){\circle{0.1}} \put(8.7,1){\circle{0.1}} \put(7.7,0){\circle{0.1}} \put(6.7,1){\vector(0,1){0.9}} \put(8.7,1){\vector(0,1){0.9}} \put(6.7,1){\vector(2,1){1.9}} \put(8.7,1){\vector(-2,1){1.9}} \put(7.7,0){\vector(1,1){0.9}} \put(7.7,0){\vector(-1,1){0.9}} \put(9.5,0){${\mathfrak{Y}_3}+$:} \put(10,2){\circle{0.1}} \put(12,2){\circle{0.1}} \put(10,1){\circle{0.1}} \put(12,1){\circle{0.1}} \put(11,0){\circle{0.1}} \put(11,3){\circle{0.1}} \put(10,1){\vector(0,1){0.9}} \put(12,1){\vector(0,1){0.9}} \put(10,1){\vector(2,1){1.9}} \put(12,1){\vector(-2,1){1.9}} \put(11,0){\vector(1,1){0.9}} \put(11,0){\vector(-1,1){0.9}} \put(10,2){\vector(1,1){0.9}} \put(12,2){\vector(-1,1){0.9}} \end{picture} \caption{Frames of Logics with Nullary Unification.}\label{nu} \end{figure}\vspace{-0.5cm} \begin{proof} Let $\mathbf{F}=\{\mathfrak L_1,\mathfrak L_2,\mathfrak L_3,\mathfrak F_{2},\mathfrak R_{2},+\mathfrak F_{2},\mathfrak Y_{2}\}$. Then $\mathbf{F}=sm(\mathfrak Y_{2})$. Assume ${\mathsf L}(\mathbf F)$ has finitary unification. Let $n=1$. By Theorem \ref{main}, there is a number $m\geq 1$ such that for any $\sigma\colon\{x_1\}\to \mathsf{Fm}^k$ there are mappings $G:\mathbf{M}^m\to\mathbf{M}^1$ and $F:\mathbf{M}^k\to\mathbf{M}^m$ fulfilling the conditions (i)-(v). Take any $k>m$ and let $$ \sigma(x_1)=\bigl(\neg\neg \bigvee_{i+1}^kx_i\bigr) \ \land \ \bigwedge_{i=1}^k(\neg\neg x_i\lor\neg x_i). $$ There are only four p-irreducible $1$-models equivalent with some $\sigma$-models. They are: $$ \circ \ 0 \qquad \circ 1\qquad\unitlength1cm \begin{picture}(1,1.1) \thicklines \put(0,1){\circle{0.1}} \put(0.3,1){\mbox{$0$}} \put(2,1){\circle{0.1}} \put(2.3,1){\mbox{$1$}} \put(1,0){\circle{0.1}} \put(1.3,0){\mbox{$0$}} \put(1,0){\vector(1,1){0.9}} \put(1,0){\vector(-1,1){0.9}} \put(3,0){\circle{0.1}} \put(3.3,0){\mbox{$0$\ \ \ Note $\sigma(\circ \ \mathfrak{f}^k)=\circ \ 0$ iff $\mathfrak{f}^k=\overbrace{0\cdots 0}^{\mbox{$k$-times}}$.}} \put(3,1){\circle{0.1}} \put(3.8,0.5){\mbox{ some $\sigma$-model over $\mathfrak{F}_2$. }} \put(3.3,1){\mbox{$1$}\quad The last $1$-model is equivalent with} \put(3,0){\vector(0,1){0.9}}\end{picture}\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad$$ Let $F(\circ \ 0\cdots0)=\circ \ \mathfrak g^m$, for some $\mathfrak g^m$. Since $k>m$, there are $k$-valuations $\mathfrak{f}^k\not=\mathfrak{g}^k$ such that $F(\circ \ \mathfrak{f}^k)=F(\circ \ \mathfrak{g}^k)=\circ\ \mathfrak{f}^m$, for some $\mathfrak{f}^m$, and $\mathfrak{f}^k\not={0\cdots0}\not=\mathfrak{g}^k$. By (v), $G(\circ \ \mathfrak g^m)=\circ \ 0$ and $G(\circ \ \mathfrak f^m)=\circ \ 1$. Thus, by (ii) and (iv), for any $m$-valuation $?$ \unitlength1cm \begin{picture}(1,1.5) \thicklines \put(0,0.5){\mbox{either}} \put(1.8,0.5){\mbox{$G_{} \Bigl($}} \put(3,0){\circle{0.1}} \put(2.5,0){\mbox{$?$}} \put(3,1){\circle{0.1}} \put(2.5,1){\mbox{$\mathfrak{f}^m$}} \put(3,0){\vector(0,1){0.9}} \put(3.5,0.5){\mbox{$\Bigr) \quad =\quad$}} \put(4.7,0){\circle{0.1}} \put(5,0){\mbox{$0$}} \put(4.7,1){\circle{0.1}} \put(5,1){\mbox{$1$}} \put(4.7,0){\vector(0,1){0.9}} \put(6,0.5){\mbox{, or}} \put(7.8,0.5){\mbox{$G \Bigl($}} \put(9,0){\circle{0.1}} \put(8.5,0){\mbox{$?$}} \put(9,1){\circle{0.1}} \put(8.5,1){\mbox{$\mathfrak{f}^m$}} \put(9,0){\vector(0,1){0.9}} \put(9.5,0.5){\mbox{$\Bigr) \quad =\quad$}} \put(10.7,0){\circle{0.1}} \put(11,0){\mbox{$1$}} \put(10.7,1){\circle{0.1}} \put(11,1){\mbox{$1$}} \put(10.7,0){\vector(0,1){0.9}} \end{picture} \noindent However, if the first happened, we would have \\ \begin{picture}(5,2) \thicklines \linethickness{0.3mm} \put(0,1){\mbox{$G_{} \Bigl($}} \put(2,0){\vector(-1,1){0.9}} \put(2,0){\vector(1,1){0.9}} \put(3,1){\vector(1,1){0.9}} \put(3,1){\vector(-1,1){0.9}} \put(1,1){\vector(1,1){0.9}} \put(1,1){\circle{0.1}} \put(2,2){\circle{0.1}} \put(4,2){\circle{0.1}} \put(2,0){\circle{0.1}} \put(3,1){\circle{0.1}} \put(0.9,1.3){\mbox{$?$}} \put(3.3,2){\mbox{$\mathfrak{g}^m$}} \put(1.4,2){\mbox{$\mathfrak{f}^m$}} \put(4.5,1){\mbox{$\Bigr) \quad = \quad$}} \put(7,0){\vector(-1,1){0.9}} \put(7,0){\vector(1,1){0.9}} \put(6,1){\vector(1,1){0.9}} \put(8,1){\vector(1,1){0.9}} \put(8,1){\vector(-1,1){0.9}} \put(6,1){\circle{0.1}} \put(7,2){\circle{0.1}} \put(9,2){\circle{0.1}} \put(7,0){\circle{0.1}} \put(8,1){\circle{0.1}} \put(7.3,0){$0$} \put(6.5,1){\mbox{$0$}} \put(9.3,2){\mbox{$0$}} \put(7.3,2){\mbox{$1$}} \put(8.3,1){$0$} \end{picture} \noindent which would contradict (iv) as the received $1$-model is not equivalent to any $\sigma$-model. We conclude the second must happen and then we get the contradiction:\\ \unitlength1cm \begin{picture}(1,1) \thicklines \put(0,0){\circle{0.1}} \put(0.2,0){\mbox{$0$}} \put(0,1){\circle{0.1}} \put(0.2,1){\mbox{$1$}} \put(0,0){\vector(0,1){0.9}} \put(0.5,0.5){\mbox{$\thicksim \ G \Bigl(F \Bigl($}} \put(3.1,0){\circle{0.1}} \put(3.5,0){\mbox{$0\cdots0$}} \put(2.1,1){\circle{0.1}} \put(4.1,1){\circle{0.1}} \put(2.4,1){\mbox{$\mathfrak{f}^k$}} \put(3.6,1){\mbox{$\mathfrak{g}^k$}} \put(3.1,0){\vector(1,1){0.9}} \put(3.1,0){\vector(-1,1){0.9}} \put(4.4,0.5){\mbox{$\Bigr)\Bigr) \ =\ $}} \put(5.4,0.5){\mbox{$G \Bigl($}} \put(7,0){\circle{0.1}} \put(6.5,0){\mbox{$?$}} \put(6,1){\circle{0.1}} \put(8,1){\circle{0.1}} \put(6.3,1){\mbox{$\mathfrak{f}^m$}} \put(7.5,1){\mbox{$\mathfrak{f}^m$}} \put(7,0){\vector(1,1){0.9}} \put(7,0){\vector(-1,1){0.9}} \put(8.2,0.5){\mbox{$\Bigr) \thicksim $}} \put(9,0.5){\mbox{$G \Bigl($}} \put(9.7,0){\circle{0.1}} \put(10,0){\mbox{$?$}} \put(9.7,1){\circle{0.1}} \put(10,1){\mbox{$\mathfrak{f}^m$}} \put(9.7,0){\vector(0,1){0.9}} \put(10.6,0.5){\mbox{$\Bigr) \ =\ $}} \put(11.7,0){\circle{0.1}} \put(12,0){\mbox{$1$}} \put(11.7,1){\circle{0.1}} \put(12,1){\mbox{$1$}} \put(11.7,0){\vector(0,1){0.9}} \end{picture} \\ If $\mathbf{F}=\{\mathfrak L_1,\mathfrak L_2,\mathfrak L_3,\mathfrak F_{2},\mathfrak R_{2},+\mathfrak F_{2},\mathfrak Y_{3}\}$, then $\mathbf{F}=sm(\mathfrak Y_{3})$. Suppose that ${\mathsf L}(\mathbf F)$ has finitary unification. Take $n=2$. By Theorem \ref{main}, there is a number $m\geq 1$ such that for any $\sigma\colon\{x_1,x_2\}\to \mathsf{Fm}^k$ there are mappings $G:\mathbf{M}^m\to\mathbf{M}^2$ and $F:\mathbf{M}^k\to\mathbf{M}^m$ fulfilling the conditions (i)-(v). Take any $k>m$ and let $$ \sigma(x_1)=\bigwedge_{i=1}^k(\neg\neg x_i\lor\neg x_i)\quad\mbox{and}\quad \sigma(x_2)=\bigwedge_{i=1}^k(\neg\neg x_i\lor\neg x_i)\rightarrow\Bigl(\bigvee_{i=1}^kx_i\lor\neg \bigvee_{i=1}^kx_i\Bigr). $$Note that $ \sigma(x_1)\land\sigma(x_2)$ is valid in $\mathfrak L_{1}$. Let us prove $ \sigma$ is a unifier for $x_1\lor x_2$ (in ${\mathsf L}(\mathfrak Y_{3})$). Namely, let be given any $k$-model over $\mathfrak Y_{3}$: $$ \unitlength1cm \begin{picture}(6,2.1) \thicklines \put(1,2){\circle{0.1}} \put(3,2){\circle{0.1}} \put(1,1){\circle{0.1}} \put(3,1){\circle{0.1}} \put(2,0){\circle{0.1}} \put(3.2,0.9){\mbox{$\mathfrak{f}^k_{1}$}\quad\ satisfied only at the end nodes $0$ and $0'$. But it means that} \put(0.6,0.9){\mbox{$\mathfrak{f}^k_{1'}$}} \put(2.3,0){\mbox{$\mathfrak{f}^k_{2}$}} \put(0.6,2){\mbox{$\mathfrak{f}^k_{0'}$}} \put(3.2,2){\mbox{$\mathfrak{f}^k_{0}$} \quad There are two cases to consider. If $\mathfrak{f}^k_{0'}=\mathfrak{f}^k_{0'}$, then $\sigma(x_1)$ is } \put(4,1.5){\mbox{true in the model. Suppose that $\mathfrak{f}^k_{0'}\not=\mathfrak{f}^k_{0'}$. Then $\sigma(x_1)$ is }} \put(3.9,0.4){\mbox{ $\sigma(x_2)$ is true in the model. In any case, $\sigma(x_1\lor x_2)$ is }} \put(3.9,-0.1){\mbox{ satisfied at any node of the model.}} \put(1,1){\vector(0,1){0.9}} \put(3,1){\vector(0,1){0.9}} \put(1,1){\vector(2,1){1.9}} \put(3,1){\vector(-2,1){1.9}} \put(2,0){\vector(1,1){0.9}} \put(2,0){\vector(-1,1){0.9}} \end{picture}\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad$$ As $m<k$, there are $k$-valuations $\mathfrak{f}^k\not=\mathfrak{g}^k$ such that $F(\circ \ \mathfrak{f}^k)=F(\circ \ \mathfrak{g}^k)=\circ\ \mathfrak{g}^m$, for some $\mathfrak{g}^m$. Let $\mathfrak{f}^k\not=\underbrace{0\cdots0}_{\mbox{$k$-times}}$ and consider the following $k$-models over $\mathfrak{F}_2$: \unitlength1cm \begin{picture}(1,1.5) \thicklines \put(2,0){\circle{0.1}} \put(2.5,0){\mbox{${0\cdots0}$}} \put(1,1){\circle{0.1}} \put(3,1){\circle{0.1}} \put(0.5,1){\mbox{$\mathfrak{f}^k$}} \put(3.3,1){\mbox{$\mathfrak{g}^k$}} \put(2,0){\vector(1,1){0.9}} \put(2,0){\vector(-1,1){0.9}} \put(5.5,0.5){\mbox{and}} \put(9,0){\circle{0.1}} \put(9.5,0){\mbox{${0\cdots0}$}} \put(8,1){\circle{0.1}} \put(10,1){\circle{0.1}} \put(7.5,1){\mbox{$\mathfrak{f}^k$}} \put(10.3,1){\mbox{$\mathfrak{f}^k$}} \put(9,0){\vector(1,1){0.9}} \put(9,0){\vector(-1,1){0.9}} \end{picture} \noindent Note that $\sigma(x_1)$ is false in the first model and $\sigma(x_2)$ in the second. Let \unitlength1cm \begin{picture}(1,1.5) \thicklines \put(-0.3,0.5){\mbox{$F \Bigl($}} \put(1.5,0){\circle{0.1}} \put(1.8,0){\mbox{${0\cdots0}$}} \put(0.5,1){\circle{0.1}} \put(2.5,1){\circle{0.1}} \put(0.8,1){\mbox{$\mathfrak{f}^k$}} \put(2,1){\mbox{$\mathfrak{g}^k$}} \put(1.5,0){\vector(1,1){0.9}} \put(1.5,0){\vector(-1,1){0.9}} \put(2.8,0.5){\mbox{$\Bigr)=$}} \put(4.5,0){\circle{0.1}} \put(4.9,0){\mbox{$\mathfrak{f}^m_{1}$}} \put(3.5,1){\circle{0.1}} \put(5.5,1){\circle{0.1}} \put(3.8,1){\mbox{$\mathfrak{g}^m$}} \put(5,1){\mbox{$\mathfrak{g}^m$}} \put(4.5,0){\vector(1,1){0.9}} \put(4.5,0){\vector(-1,1){0.9}} \put(5.7,0.5){\mbox{;}} \put(6.2,0.5){\mbox{$F \Bigl($}} \put(8,0){\circle{0.1}} \put(8.3,0){\mbox{${0\cdots0}$}} \put(7,1){\circle{0.1}} \put(9,1){\circle{0.1}} \put(7.3,1){\mbox{$\mathfrak{f}^k$}} \put(8.5,1){\mbox{$\mathfrak{f}^k$}} \put(8,0){\vector(1,1){0.9}} \put(8,0){\vector(-1,1){0.9}} \put(9.2,0.5){\mbox{$\Bigr)=$}} \put(11,0){\circle{0.1}} \put(11.3,0){\mbox{$\mathfrak{f}^m_{1'}$}} \put(10,1){\circle{0.1}} \put(12,1){\circle{0.1}} \put(10.3,1){\mbox{$\mathfrak{g}^m$}} \put(11.5,1){\mbox{$\mathfrak{g}^m$}} \put(11,0){\vector(1,1){0.9}} \put(11,0){\vector(-1,1){0.9}} \end{picture} \\ \noindent for some $m$-valuations $\mathfrak{f}^m_{1},\mathfrak{f}^m_{1'}$. Then, by (v), we get \unitlength1cm \begin{picture}(1,1.5) \thicklines \put(-0.3,0.5){\mbox{$G \Bigl($}} \put(1.5,0){\circle{0.1}} \put(1.9,0.1){\mbox{$\mathfrak{f}^m_{1}$}} \put(0.5,1){\circle{0.1}} \put(2.5,1){\circle{0.1}} \put(0.8,1){\mbox{$\mathfrak{g}^m$}} \put(2,1){\mbox{$\mathfrak{g}^m$}} \put(1.5,0){\vector(1,1){0.9}} \put(1.5,0){\vector(-1,1){0.9}} \put(2.8,0.5){\mbox{$\Bigr)=$}} \put(4.5,0){\circle{0.1}} \put(4.9,0){\mbox{$01$}} \put(3.5,1){\circle{0.1}} \put(5.5,1){\circle{0.1}} \put(3.8,1){\mbox{$11$}} \put(5,1){\mbox{$11$}} \put(4.5,0){\vector(1,1){0.9}} \put(4.5,0){\vector(-1,1){0.9}} \put(5.7,0.5){\mbox{;}} \put(6,0.5){\mbox{$G \Bigl($}} \put(8,0){\circle{0.1}} \put(8.4,0.1){\mbox{$\mathfrak{f}^m_{1'}$}} \put(7,1){\circle{0.1}} \put(9,1){\circle{0.1}} \put(7.3,1){\mbox{$\mathfrak{g}^m$}} \put(8.5,1){\mbox{$\mathfrak{g}^m$}} \put(8,0){\vector(1,1){0.9}} \put(8,0){\vector(-1,1){0.9}} \put(9.2,0.5){\mbox{$\Bigr)=$}} \put(11,0){\circle{0.1}} \put(11.3,0){\mbox{$10$}} \put(10,1){\circle{0.1}} \put(12,1){\circle{0.1}} \put(10.3,1){\mbox{$11$}} \put(11.5,1){\mbox{$11$}} \put(11,0){\vector(1,1){0.9}} \put(11,0){\vector(-1,1){0.9}} \end{picture} \\ Thus, we have $$ \unitlength1cm \begin{picture}(6,2.1) \thicklines \put(2,2){\circle{0.1}} \put(4,2){\circle{0.1}} \put(2,1){\circle{0.1}} \put(4,1){\circle{0.1}} \put(3,0){\circle{0.1}} \put(4.2,0.9){\mbox{$\mathfrak{f}^m_{1}\quad \Bigr)\quad=$}} \put(0.5,0.9){\mbox{$G \Bigl(\quad\mathfrak{f}^m_{1'}$}} \put(3.3,0){\mbox{${0\cdots0}$}} \put(1.6,2){\mbox{$\mathfrak{g}^m$}} \put(4.2,2){$\mathfrak{g}^m$ } \put(2,1){\vector(0,1){0.9}} \put(4,1){\vector(0,1){0.9}} \put(2,1){\vector(2,1){1.9}} \put(4,1){\vector(-2,1){1.9}} \put(3,0){\vector(1,1){0.9}} \put(3,0){\vector(-1,1){0.9}} \put(7,2){\circle{0.1}} \put(9,2){\circle{0.1}} \put(7,1){\circle{0.1}} \put(9,1){\circle{0.1}} \put(8,0){\circle{0.1}} \put(9.2,0.9){\mbox{$01$}} \put(6.5,0.9){\mbox{$10$}} \put(8.3,0){\mbox{$00$}} \put(6.6,2){\mbox{$11$}} \put(9.2,2){$11$ } \put(7,1){\vector(0,1){0.9}} \put(9,1){\vector(0,1){0.9}} \put(7,1){\vector(2,1){1.9}} \put(9,1){\vector(-2,1){1.9}} \put(8,0){\vector(1,1){0.9}} \put(8,0){\vector(-1,1){0.9}} \end{picture}\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad$$ which contradicts (iv). Indeed, according to (iv), $G(\mathfrak{M}^m)$ should be equivalent to a $\sigma$-model, for any $m$-model $\mathfrak{M}^m$. But $\sigma$ is a unifier for $x_1\lor x_2$ and hence $x_1\lor x_2$ should be true in the received $2$-model which is not the case.\\ Let us consider $\mathsf L(\mathfrak{Y}_3+)$. We have $sm(\mathfrak Y_{3}+)=\{\mathfrak L_1,\mathfrak L_2,\mathfrak L_3,\mathfrak L_4,\mathfrak R_{2},+\mathfrak R_{2},\mathfrak R_{2}+,\mathfrak Y_{3}+\}$. Assume that $\mathsf L(\mathfrak{Y}_3+)$ has finitary unification. Take $n=3$ and use Theorem \ref{main} to get a number $m\geq 1$, mappings $G:\mathbf{M}^m\to\mathbf{M}^2$ and $F:\mathbf{M}^k\to\mathbf{M}^m$ fulfilling the conditions (i)-(v), where $k>m$ and $\sigma\colon\{x_1,x_2,x_3\}\to \mathsf{Fm}^k$ is as follows $$ \sigma(x_1)=x_1 \qquad \mbox{and} \qquad \sigma(x_2)=\bigwedge_{i=2}^k\Bigl(\bigl(((x_i\to x_1)\to x_1)\bigr)\lor (x_i\to x_1)\Bigr)\qquad \mbox{and} $$ $$ \sigma(x_3)=\bigwedge_{i=2}^k\Bigl(\bigl(((x_i\to x_1)\to x_1)\bigr)\lor (x_i\to x_1)\Bigr)\rightarrow\Bigl(\bigvee_{i=2}^kx_i\lor\bigl((\bigvee_{i=2}^kx_i)\to x_1\bigr)\Bigr). $$ Note that if we take $\alpha\circ\sigma$, where $\alpha:x_1\slash\bot$ we get a slightly modified substitution $\sigma$ as used above for $\mathfrak Y_{3}$ (we would have $x_1=\bot$ and $x_2,x_3$ substituted instead of $x_1,x_2$). If $\sigma(x_1)$ (that is $x_1$) is true at any node of any $k$-model $\mathfrak M_k$ over $\mathfrak{Y}_3+$, then so are $\sigma(x_2)$ and $\sigma(x_3)$. Thus, if we characterize all p-irreducible $k$-models, we get $\circ \ 111$ and $\circ \ 011$, as models over $\mathfrak L_1$, and all $\sigma$-models as we got for $\mathfrak Y_{3}$ (where valuations take the form $0ij$ instead of $ij$) with additional top node where the valuation is $111$. It means that $\sigma$ is a unifier for $x_2\lor x_3$ (instead of $x_1\lor x_2$ as we had in $\mathfrak Y_{3}$). Moreover, repeating the above argument we get two $m$-models over $\mathfrak R_2$ such that\\ \unitlength1cm \begin{picture}(1,2) \thicklines \put(-0.3,1){\mbox{$G \Bigl($}} \put(1.5,2){\circle{0.1}} \put(1.5,0){\circle{0.1}} \put(1.9,0.1){\mbox{$0\mathfrak{f}^m_{1}$}} \put(0.5,1){\circle{0.1}} \put(2.5,1){\circle{0.1}} \put(0.8,1){\mbox{$0\mathfrak{g}^m$}} \put(1.7,1){\mbox{$0\mathfrak{g}^m$}} \put(1.5,0){\vector(1,1){0.9}} \put(1.5,0){\vector(-1,1){0.9}} \put(0.5,1){\vector(1,1){0.9}} \put(2.5,1){\vector(-1,1){0.9}} \put(1.6,2){\mbox{$1\cdots1$}} \put(2.8,1){\mbox{$\Bigr)=$}} \put(4.5,0){\circle{0.1}} \put(4.9,0){\mbox{$001$}} \put(3.5,1){\circle{0.1}} \put(5.5,1){\circle{0.1}} \put(3.8,1){\mbox{$011$}} \put(4.8,1){\mbox{$011$}} \put(4.5,0){\vector(1,1){0.9}} \put(4.5,0){\vector(-1,1){0.9}} \put(3.5,1){\vector(1,1){0.9}} \put(5.5,1){\vector(-1,1){0.9}} \put(4.5,2){\circle{0.1}} \put(4.9,2){\mbox{$111$}} \put(5.7,1){\mbox{;}} \put(6.2,1){\mbox{$G \Bigl($}} \put(8,2){\circle{0.1}} \put(8,0){\circle{0.1}} \put(8.4,2.1){\mbox{$1\cdots1$}} \put(8.4,0.1){\mbox{$0\mathfrak{f}^m_{1'}$}} \put(7,1){\circle{0.1}} \put(9,1){\circle{0.1}} \put(7.3,1){\mbox{$0\mathfrak{g}^m$}} \put(8.1,1){\mbox{$0\mathfrak{g}^m$}} \put(8,0){\vector(1,1){0.9}} \put(8,0){\vector(-1,1){0.9}} \put(7,1){\vector(1,1){0.9}} \put(9,1){\vector(-1,1){0.9}} \put(9.2,1){\mbox{$\Bigr)=$}} \put(11,2){\circle{0.1}} \put(11,0){\circle{0.1}} \put(11.3,2){\mbox{$111$}} \put(11.3,0){\mbox{$010$}} \put(10,1){\circle{0.1}} \put(12,1){\circle{0.1}} \put(10.3,1){\mbox{$011$}} \put(11.3,1){\mbox{$011$}} \put(11,0){\vector(1,1){0.9}} \put(11,0){\vector(-1,1){0.9}} \put(10,1){\vector(1,1){0.9}} \put(12,1){\vector(-1,1){0.9}} \end{picture} $$\unitlength1cm \begin{picture}(6,2.8) \thicklines \put(0,3){Thus,} \put(2,2){\circle{0.1}} \put(4,2){\circle{0.1}} \put(2,1){\circle{0.1}} \put(4,1){\circle{0.1}} \put(3,0){\circle{0.1}} \put(3,3){\circle{0.1}} \put(4.2,0.9){\mbox{$0\mathfrak{f}^m_{1}$}} \put(4.2,1.4){\mbox{$\qquad \Bigr)\quad=$}} \put(1.4,0.9){\mbox{$0\mathfrak{f}^m_{1'}$}} \put(0.7,1.4){\mbox{$G \Bigl($}} \put(3.3,0){\mbox{${0\cdots0}$}} \put(3.3,3){\mbox{${1\cdots1}$}} \put(1.4,2){\mbox{$0\mathfrak{g}^m$}} \put(4.2,2){$0\mathfrak{g}^m$ } \put(2,1){\vector(0,1){0.9}} \put(4,1){\vector(0,1){0.9}} \put(2,1){\vector(2,1){1.9}} \put(4,1){\vector(-2,1){1.9}} \put(3,0){\vector(1,1){0.9}} \put(3,0){\vector(-1,1){0.9}} \put(2,2){\vector(1,1){0.9}} \put(4,2){\vector(-1,1){0.9}} \put(8,3){\circle{0.1}} \put(7,2){\circle{0.1}} \put(9,2){\circle{0.1}} \put(7,1){\circle{0.1}} \put(9,1){\circle{0.1}} \put(8,0){\circle{0.1}} \put(9.2,0.9){\mbox{$001$}} \put(6.2,0.9){\mbox{$010$}} \put(8.3,0){\mbox{$000$}} \put(6.2,2){\mbox{$011$}} \put(9.2,2){$011$ } \put(8.3,3){\mbox{$111$}} \put(7,1){\vector(0,1){0.9}} \put(9,1){\vector(0,1){0.9}} \put(7,1){\vector(2,1){1.9}} \put(9,1){\vector(-2,1){1.9}} \put(8,0){\vector(1,1){0.9}} \put(8,0){\vector(-1,1){0.9}} \put(7,2){\vector(1,1){0.9}} \put(9,2){\vector(-1,1){0.9}} \end{picture}\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad$$ which would mean that $\sigma$ is not a unifier for $x_2\lor x_3$, a contradiction.\\ In the case of $\mathsf L(\mathfrak{Y}_2+)$, we have $sm(\mathfrak Y_{2}+)=\{\mathfrak L_1,\mathfrak L_2,\mathfrak L_3,\mathfrak L_4,\mathfrak R_{2},+\mathfrak R_{2},\mathfrak R_{2}+,\mathfrak G_{3}+\}$. We take $n=2$ and consider the substitution $\sigma\colon\{x_1,x_2\}\to \mathsf{Fm}^k$ defined as follows $$ \begin{array}{rl} \sigma(x_1)=& x_1\\ \sigma(x_2)= & \Bigl(\bigl(( \bigvee_{i=2}^kx_i)\to x_1\bigr)\to x_1\Bigr) \ \land \ \bigwedge_{i=2}^k\Bigl(\bigl((x_i\to x_1)\to x_1\bigr)\lor (x_i\to x_1)\Bigr). \end{array} $$ If we take $\alpha\circ\sigma$, where $\alpha:x_1\slash\bot$ we (almost) get the substitution $\sigma$ as used for $\mathfrak Y_{2}$. If $x_1$ is false at the top element of any $2$-model $\mathfrak M^2$ over $\mathfrak Y_{2}+$, then $ \sigma(x_2)$ is true at the model and hence $\sigma(\mathfrak M^2)$ reduces to a model over $\mathfrak L_1$. Then we can repeat our argument we have used for $\mathsf L(\mathfrak{Y}_2)$ (similarly as we did in the case of $ \mathfrak{Y}_3+$ where we reduced our argument to $ \mathfrak Y_3$). \footnote{ We think that one can show in this way that adding top element to $\mathfrak F$, for any finite frame $\mathfrak F$, does not improve the unification type if $\mathsf L(\mathfrak{F})$ has nullary unification.} \end{proof} Nodes of the graph in Figure \ref{ki} represent p-morphic images of $\mathfrak G_1$, see Figure \ref{GF} (or \ref{NU} for $\mathfrak G_{3\mathfrak L_2}$ and $\mathfrak G_{3\mathfrak F_2}$), and edges p-morphisms between the frames. We omit, as usual, p-morphisms that are compositions of other p-morphisms. The graph represents also all (consistent) $H$-complete extensions of $\mathsf H_3\mathsf B_2$; each node is the logic $\mathsf L(\mathfrak{F})$ of the frame and the edges mean inclusions. Logics with nullary unification are denoted by a black square. There are 14 logics and 5 of them have nullary unification. \begin{figure}[H] \unitlength1cm \thicklines \begin{picture}(6,6) \put(6,0){\circle{0.1}} \put(5.9,0.3){$\mathfrak G_1$} \put(3.9,0.9){$\blacksquare$} \put(3,1){$\mathfrak G_{3\mathfrak F_2}$} \put(8,1){\circle{0.1}} \put(8.3,1){$\mathfrak C_{5}$} \put(2.9,1.9){$\blacksquare$} \put(2,2.1){$\mathfrak G_{3\mathfrak L_2}$} \put(5,2){\circle{0.1}} \put(5.3,2){$\mathfrak G_{2}$} \put(6.9,1.9){$\blacksquare$} \put(7.3,2){$\mathfrak Y_{2}$} \put(8.9,1.9){$\blacksquare$} \put(9.3,2){$\mathfrak Y_{3}$} \put(3.9,2.9){$\blacksquare$} \put(3.3,3){$\mathfrak G_{3}$} \put(6,3){\circle{0.1}} \put(6.2,3){$+\mathfrak F_2$} \put(8,3){\circle{0.1}} \put(8.2,3){$\mathfrak R_{2}$} \put(5,4){\circle{0.1}} \put(4.5,4){$\mathfrak F_2$} \put(7,4){\circle{0.1}} \put(7.2,4){$\mathfrak L_{3}$} \put(6,5){\circle{0.1}} \put(6.2,5){$\mathfrak L_{2}$} \put(6,6){\circle{0.1}} \put(6.2,6){$\mathfrak L_{1}$} \put(6,0){\vector(-2,1){1.9}} \put(6,0){\vector(2,1){1.9}} \put(4,1){\vector(-1,1){0.9}} \put(4,1){\vector(1,1){0.9}} \put(8,1){\vector(-1,1){0.9}} \put(8,1){\vector(1,1){0.9}} \put(3,2){\vector(1,1){0.9}} \put(3,1.9){\vector(4,1){4.7}} \put(5,2){\vector(1,1){0.9}} \put(5,2){\vector(-1,1){0.9}} \put(7,2){\vector(1,1){0.9}} \put(7,2){\vector(-1,1){0.9}} \put(9,2){\vector(-1,1){0.9}} \put(9,2){\vector(-3,1){2.9}} \put(4,3){\vector(1,1){0.9}} \put(4,3){\vector(3,1){2.9}} \put(6,3){\vector(1,1){0.9}} \put(6,3){\vector(-1,1){0.9}} \put(8,3){\vector(-1,1){0.9}} \put(5,4){\vector(1,1){0.9}} \put(7,4){\vector(-1,1){0.9}} \put(6,5){\vector(0,1){0.9}} \put(4.2,1){\vector(3,1){2.8}} \end{picture} \caption{Reducts of $\mathfrak G_1$. }\label{ki} \end{figure} \noindent Let us agree that there is a chaos in the picture. Extensions of finitar\slash unitary logics may have nullary unification; intersections of some finitary logics are nullary. The chaos increases if we add intersections of the logics, see Figure \ref{ti}. We can only try to identify maximal logics with nullary unification. \subsection{Hereditary Finitary Unification}\label{HFU} Let us say that unification in a logic {\sf L} is {\it hereditary finitary} if any extension of {\sf L}, including {\sf L} itself, has finitary (or unitary) unification. Unification in {\sf L} is {\it hereditary unitary} if any extension of {\sf L} has unitary unification and {\sf L} has {\it hereditary projective approximation} if any its extension has projective approximation. By Citkin \cite{Tsitkin}, \begin{theorem}\label{Tsit} $(\mathsf L(\mathfrak C_4),\mathsf L(\chi (\mathfrak C_4))$ (where \ $\mathfrak C_4={\mathfrak G_3}+$, \ see Figure \ref{GF} and \ref{TF}) is a splitting pair in the lattice of intermediate logics (see Theorem \ref{Jankov}) and the logic $\mathsf L(\chi (\mathfrak C_4))$ is locally tabular. Thus, if an intermediate logic {\sf L} omits ${\mathfrak G_3}+$, then {\sf L} is locally tabular.\end{theorem} We know that $(\mathsf L (\mathfrak C_3),\mathsf {SL})$ (where $\mathsf {SL}$ is Scott logic, see Figure \ref{ILs}, and ${\mathfrak C_3}={\mathfrak G_3}$) is a splitting pair in extensions of {\sf INT}. It means that logics omitting ${\mathfrak C_3}$ may be not locally tabular, like $\mathsf{SL}$ (the unification type of $\mathsf{SL}$ is not known, we conjecture it is $\omega$). By Theorem \ref{lmk}, all logics determined by frames $\mathbf H_{pa}$ in Figure \ref{hpa} (which are $\mathfrak L_d+\mathfrak F_m$, with $m,d\geq 0$, and we agree that $\mathfrak L_0+\mathfrak F_m=\mathfrak F_m$, and $\mathfrak F_1=\mathfrak L_2$ and $\mathfrak F_0=\mathfrak L_1$) have projective approximation. \begin{theorem}\label{ipa} For any intermediate logic {\sf L}, the following conditions are equivalent:\\ (i) {\sf L} has hereditary projective approximation;\\(ii) $\mathsf L(\mathbf H_{pa})\subseteq \mathsf L$;\\ (iii) $\mathsf L$ omits $\mathfrak G_3$ and $\mathfrak R_2$;\\ (iv) $ \chi ({\mathfrak G_3}), \chi ({\mathfrak R_2}) \in \mathsf L$. \end{theorem} \begin{proof} (ii)$\Rightarrow$(i). Since $sm(\mathbf H_{pa})\subseteq\mathbf H_{pa}$, then all extensions of $\mathsf L(\mathbf H_{pa})$ have projective approximation, by Theorem \ref{lf7}. Thus, $\mathsf L(\mathbf H_{pa})$ and any its extension enjoy hereditary projective approximation.\\ (i)$\Rightarrow$(iii). We know that $\mathsf L(\mathfrak G_3)$ has nullary unification (see Theorem \ref{F6m}). Thus, if $\mathfrak G_3$ were a frame of {\sf L}, the logic {\sf L} would have a nullary extension. Therefore {\sf L} must omit $\mathfrak G_3$. It must also omit $\mathfrak R_2$ as $\mathsf L(\mathfrak R_2)$ is unitary and non-projective, hence it cannot have projective approximation, by Corollary \ref{up1}.\\ (iii)$\Rightarrow$(ii). Assume that $\mathsf L$ omits $\mathfrak G_3$ and $\mathfrak R_2$. If {\sf L} omits $\mathfrak R_2$, then it also omits ${\mathfrak G_3}+$ and hence {\sf L} is locally tabular, by Theorem \ref{Tsit}. Thus, we can assume {\sf L}={\sf L}({\bf F}) for some family {\bf F} of finite frames. For each $\mathfrak F\in\mathbf F$, let $\mathfrak F^\star$ denote its p-morphic image resulting by gluing all its end elements (the relation of gluing end elements is obviously a bisimulation of ${\mathfrak F}$). Thus, $\mathsf L(\{\mathfrak F^\star\colon\mathfrak F\in \mathbf F\})$ is an extension of $\mathsf L$ and it must be an extension of {\sf KC}. As {\sf{L}(\bf F)} omits $\mathfrak R_2$, we infer $\mathsf L(\{\mathfrak F^\star\colon\mathfrak F\in \mathbf F\})$ is a logic with projective unification\footnote{We use again the known fact (see \cite{Rautenberg}) that $(\mathsf L (\mathfrak R_2),\mathsf{LC})$ is a splitting pair for all extensions of $\mathsf{KC}$.}. It means $\mathfrak F^\star$, for each $\mathfrak F\in \mathbf F$, must be a chain. Since {\sf{L}(\bf F)} omits $\mathfrak G_3$, we conclude each element of {\bf F} must be of the form $\mathfrak L_d+\mathfrak F_m$ for some $m,d\geq 0$.\\ (iii)$\Leftrightarrow$(iv) by Theorem \ref{Jankov}.\end{proof} Projective approximation is not hereditary, intuitionistic logic {\sf INT} enjoys this property, see \cite{Ghi2}. We cojecture that projective approximation is hereditary in locally tabular logics. The above theorem does not settle the question if there are locally tabular logics with projective approximation which are not extensions of $\mathsf L(\mathbf H_{pa})$. \begin{corollary}\label{ipa2} $\mathsf L(\mathbf H_{pa})$ is the least intermediate logic with hereditary projective approximation and $\mathsf L(\mathbf H_{pa}) = \mathsf{L}(\{\chi ({\mathfrak G_3}), \chi ({\mathfrak R_2}) \})$. \end{corollary} By Theorem \ref{wpl}, we know that all logics determined by frames $\mathbf H_{un}$ in Figure \ref{hpa} (which are $\mathfrak L_{d}+\mathfrak R_{m}$, with $d,m\geq 0$, and we agree that $\mathfrak L_{0}+\mathfrak R_{m}=\mathfrak R_{m}$ and $\mathfrak R_{0}=\mathfrak L_{1}$) have hereditary unitary unification. \begin{theorem}\label{iun} For any intermediate logic {\sf L} the following conditions are equivalent:\\(i) {\sf L} has hereditary unitary unification;\\ (ii) $\mathsf L(\mathbf H_{un})\subseteq \mathsf L$;\\ (iii) $\mathsf L$ omits the frames $\mathfrak R_2+$, $\mathfrak G_3+$ and $\mathfrak F_2$;\\ (iv) $ \chi ({\mathfrak G_3+}), \chi ({\mathfrak R_2+}), \chi ({\mathfrak F_2}) \in \mathsf L$. \end{theorem} \begin{proof} (i)$\Leftarrow$(ii) follows from Theorem \ref{wpl}; note that the class $\mathbf H_{un}$ is closed under p-morphic images and generated subframes, and then apply Theorem \ref{lf7}.\\ (i)$\Rightarrow$(iii). Since $\mathfrak F_2+\mathfrak F_1=\mathfrak F_2+\mathfrak L_2=\mathfrak R_2+$ and $\mathsf L(\mathfrak F_2+\mathfrak F_1)$ has nullary unification (by Theorem \ref{infty}), then {\sf L} omits $\mathfrak R_2+$. We know that $\mathsf L(\mathfrak G_3+)$ has nullary unification (see \cite{Ghi5}) and $\mathsf L(\mathfrak F_2)$ is not unitary, hence {\sf L} must omit $\mathfrak G_3+$ and $\mathfrak F_2$.\\ (ii)$\Leftarrow$(iii). Assume that $\mathsf L$ omits $\mathfrak R_2+$, $\mathfrak G_3+$ and $\mathfrak F_2$. Since {\sf L} omits ${\mathfrak G_3}+$, then {\sf L} is locally tabular, by Theorem \ref{Tsit}. Thus, {\sf L}={\sf L}({\bf F}) for some family {\bf F} of finite frames such that {\it sm}({\bf F})={\bf F}. Since {\sf L} omits ${\mathfrak F_2}$, all frames in {\bf F} are {\sf KC}-frames. Let $\mathfrak F=(W,R,w_0)\in\mathbf F$ and $d(\mathfrak F)=d$. We define a p-morphism $p\colon\mathfrak F\to\mathfrak L_d$ by taking $p(w)=d_{\mathfrak F}(w)$, for any $w\in W$. There is only one element in $W$ of the depth 1, let it be denoted by $w_1$, and there is only one element of the depth $d$ (which is $w_0$). Every $w\in W$ sees $w_1$, that is $wRw_1$. Let $d(w_2)=i$ and $d(w_3)=i-1$, for some $w_2,w_3\in W$ and $i>1$. We prove $w_2$ sees $w_3$. Suppose it does not. By the definition of the depth function, there is an element $w_4\in W$ such that $d(w_4)=i-1$ and $w_2$ sees $w_4$. One can take a bisimulation gluing all elements of the depth $<i-1$ (with $w_1$). Since {\bf F} is closed under p-morphic images, we could assume that the received (after gluing all elements of the depth $<i-1$) frame is our $\mathfrak F$. Thus, we have $w_1,w_2,w_3\in W$ which could give (as we shall see) $\mathfrak G_3+$ (as a p-morphic image of $\mathfrak F$) if we add the root $w_0$, see the first frame in Figure \ref{ppp}. \begin{figure} \unitlength1cm \thicklines \begin{picture}(0,3) \put(5.5,0){$w_0$} \put(6.5,3){$w_1$} \put(4.5,1){$w_2$} \put(7.5,2){$w_3$} \put(5.5,2){$w_4$} \put(6.5,1){$w_5$} \put(6,0){\vector(-1,1){0.9}} \put(6,0){\vector(1,1){0.9}} \put(7,1){\vector(1,1){0.9}} \put(5,1){\circle{0.1}} \put(8,2){\circle{0.1}} \put(6,0){\circle{0.1}} \put(7,1){\circle{0.1}} \put(6,2){\circle{0.1}} \put(5,1){\vector(1,1){0.9}} \put(7,1){\vector(-1,1){0.9}} \put(7,3){\circle{0.1}} \put(6,2){\vector(1,1){0.9}} \put(8,2){\vector(-1,1){0.9}} \put(10.5,0){$w_0$} \put(10.5,3){$w_1$} \put(9.5,1){$w_2$} \put(11.5,2){$w_3$} \put(9.5,2){$w_4$} \put(11.5,1){$w_5$} \put(10,2){\circle{0.1}} \put(12,2){\circle{0.1}} \put(10,1){\circle{0.1}} \put(12,1){\circle{0.1}} \put(11,0){\circle{0.1}} \put(11,3){\circle{0.1}} \put(10,1){\vector(0,1){0.9}} \put(12,1){\vector(0,1){0.9}} \put(10,1){\vector(2,1){1.9}} \put(12,1){\vector(-2,1){1.9}} \put(11,0){\vector(1,1){0.9}} \put(11,0){\vector(-1,1){0.9}} \put(10,2){\vector(1,1){0.9}} \put(12,2){\vector(-1,1){0.9}} \put(0.4,2){$w_4$} \put(2.4,2){$w_3$} \put(0.4,1){$w_2$} \put(1.3,3){$w_1$} \put(1.3,0){$w_0$} \put(2,0){\vector(-1,1){0.9}} \put(2,0){\vector(1,2){0.9}} \put(1,1){\vector(0,1){0.9}} \put(1,2){\circle{0.1}} \put(3,2){\circle{0.1}} \put(2,0){\circle{0.1}} \put(1,1){\circle{0.1}} \put(2,3){\circle{0.1}} \put(3,2){\vector(-1,1){0.9}} \put(1,2){\vector(1,1){0.9}} \end{picture} \caption{}\label{ppp} \end{figure} \noindent We can use {\it sm}({\bf F})={\bf F} again to assume no other element of $W$ (only $w_0$) sees both $w_2$ and $w_3$; if not we would replace $\mathfrak F$ with its subframe generated by an $R$-maximal element that sees both $w_2$ and $w_3$. There could be other elements in $\mathfrak F$ but they all could be identified (by a bisimulation) with one of $\{w_0,w_1,w_2,w_3,w_4\}$. For instance, all elements $w\in W$ that see only $w_1$ (and do no see $\{w_0,w_2,w_3,w_4\}$) could be identified with $w_1$, etc. This identification gives us $\mathfrak G_3+$ as a p-morphic image of $\mathfrak F$ (the first frame in Figure \ref{ppp}) provided that there is no element $w_5\in W$ which sees both $w_3$ and $w_4$ but does not see $w_2$. If there is such an element we would get the second frame in Figure \ref{ppp}. But this is $\mathfrak Y_2+$ and $\mathfrak G_3+$ is its p-morphic image. Thus, in each case, $\mathfrak G_3+$ would belong to $\mathbf F$ which could not be the case. Thus, we have shown that any element of the depth $i$ sees any element of the depth $i-1$ in $\mathfrak F$. Suppose that we have two (or more) elements of the depth $i$ (let it be $w_2$ and $w_5$) and two (or more) elements of the depth $i-1$ (denoted by $w_3$ and $w_4$). Then we would get (as a generated subframe of a p-morphic image of $\mathfrak F$) the third frame in Figure \ref{ppp}. Thus, $\mathfrak Y_2+$ would belong to {\bf F} which is impossible as {\sf L} omits $\mathfrak R_2+$. We conclude that each frame $\mathfrak F\in \mathbf F$ must be of the form $\mathfrak F_{n_1}+\cdots+\mathfrak F_{n_s}+$, for some $n_1,\dots,n_s\geq 0$. But {\sf L} omits $\mathfrak R_2+$ and hence $n_1=n_2=\cdots=n_{s-1}=1$ and, consequently, each $\mathfrak F\in \mathbf F$ must be of the form $\mathfrak L_{d}+\mathfrak R_{s}$, for some $d,s\geq 0$. Thus, we have $\mathbf F \subseteq\mathbf H_{un}$ and $\mathsf L(\mathbf H_{un})\subseteq \mathsf L$ as required.\end{proof} \begin{corollary}\label{iun2} $\mathsf L(\mathbf H_{un})$ is the least intermediate logic with hereditary unitary unification and we have $\mathsf L(\mathbf H_{un}) = \mathsf{L}(\{\chi ({\mathfrak G_3+}), \chi ({\mathfrak R_2+}), \chi ({\mathfrak F_2}) \}) =\mathsf L( \mathsf{KC} \cup \{\chi ({\mathfrak G_3+}), \chi ({\mathfrak R_2+}) \})$. \end{corollary} $\mathbf H_{pa}\cap\mathbf H_{un}$ consists of chains only. This is in accordance with Corollary \ref{up1} saying that unitary logics with projective approximation are projective. \begin{theorem}\label{ihf} For any intermediate logic {\sf L}, the following conditions are equivalent:\\ (i) {\sf L} has hereditary finitary unification;\\ (ii) {\sf L} has hereditary projective approximation or {\sf L} has hereditary unitary unification;\\ (iii) $\mathsf L$ omits $\mathfrak R_{2}+, \mathfrak G_3, \mathfrak G_{3}+$ and one of the frames $\{\mathfrak F_2,\mathfrak R_2\}$;\\ (iv) $\chi ({\mathfrak G_3}), \chi ({\mathfrak G_3+}), \chi ({\mathfrak R_2+})\in L$ and, either $\chi ({\mathfrak R_2}) \in L$, or $\chi ({\mathfrak F_2}) \in L$. \end{theorem} \begin{proof} If $\mathsf L$ has hereditary finitary unification, then $\mathsf L$ omits $\mathfrak R_{2}+$ (by Theorem \ref{infty}), $\mathsf L$ omits $\mathfrak G_3$ and $\mathfrak G_{3}+$ (as $\mathsf L(\mathfrak G_3)$ and $\mathsf L(\mathfrak G_{3}+)$ are nullary, by Theorem \ref{F6m}) and it omits one of the frames $\{\mathfrak F_2,\mathfrak R_2\}$, by Theorem \ref{L7}. Thus, {\sf L} has hereditary projective approximation or hereditary unitary unification (depending on whether {\sf L} omits $\mathfrak F_2$ or $\mathfrak R_2$), by Theorem \ref{ipa} and \ref{iun}. \end{proof} All pretabular intermediate logics have hereditary finitary unification. More specifically, {\sf LC} enjoys projective unification, {\sf LJ} has hereditary projective approximation and {\sf LH} has hereditary unitary unification. \begin{corollary}\label{ihf2} An intermediate logic {\sf L} has hereditary finitary unification iff either $\mathsf L(\mathbf H_{un})\subseteq \mathsf L$ or $\mathsf L(\mathbf H_{pa})\subseteq \mathsf L$. Thus, there are exactly two minimal intermediate logics in the family of logics enjoying hereditary finitary unification. \end{corollary} \begin{corollary}\label{L11} There are exactly four maximal intermediate logics with nullary unification. They are: \ ${\mathsf L}(\mathfrak R_{2}+)$, \ ${\mathsf L}(\mathfrak R_{2})\cap{\mathsf L}(\mathfrak F_{2})$, \ ${\mathsf L}(\mathfrak G_{3})$ \ ,\ ${\mathsf L}(\mathfrak G_{3}+)$; see Figure \ref{ci}\footnote{The second graph represents an unrooted frame.}. \end{corollary} \begin{figure}[H] \unitlength1cm \thicklines \begin{picture}(0,2.5) \put(0,0){$ \mathfrak R_{2}+$} \put(1,0){\vector(-1,1){0.9}} \put(1,0){\vector(1,1){0.9}} \put(2,1){\vector(-1,1){0.9}} \put(0,1){\circle{0.1}} \put(1,2){\circle{0.1}} \put(1,0){\circle{0.1}} \put(2,1){\circle{0.1}} \put(0,1){\vector(1,1){0.9}} \put(1,3){\circle{0.1}} \put(1,2){\vector(0,1){0.9}} \put(3.5,0){$ \mathfrak R_{2}\cup\mathfrak F_{2}$} \put(4,1){\circle{0.1}} \put(5,0){\circle{0.1}} \put(5,1){\circle{0.1}} \put(5,2){\circle{0.1}} \put(6,1){\circle{0.1}} \put(6,0){\circle{0.1}} \put(7,1){\circle{0.1}} \put(5,0){\vector(-1,1){0.9}} \put(5,0){\vector(1,1){0.9}} \put(6,0){\vector(-1,1){0.9}} \put(6,0){\vector(1,1){0.9}} \put(4,1){\vector(1,1){0.9}} \put(6,1){\vector(-1,1){0.9}} \put(8,0){$\mathfrak{G}_3$} \put(9,0){\vector(-1,1){0.9}} \put(9,0){\vector(1,1){0.9}} \put(10,1){\vector(0,1){0.9}} \put(8,1){\circle{0.1}} \put(10,2){\circle{0.1}} \put(9,0){\circle{0.1}} \put(10,1){\circle{0.1}} \put(10.8,0){${\mathfrak{G}_3}+$} \put(12,0){\vector(-1,1){0.9}} \put(12,0){\vector(1,1){0.9}} \put(13,1){\vector(0,1){0.9}} \put(11,1){\circle{0.1}} \put(13,2){\circle{0.1}} \put(12,0){\circle{0.1}} \put(13,1){\circle{0.1}} \put(12,3){\circle{0.1}} \put(13,2){\vector(-1,1){0.9}} \put(11,1){\vector(1,2){0.9}} \end{picture} \caption{Frames of Maximal Logics with Nullary Unification.}\label{ci} \end{figure} \noindent Similarly as in Theorem \ref{kc}, we can use a particular para-splitting\footnote{A `join' of two join-splittings given by Theorems \ref{ipa} and \ref{iun}, that is $(\{{\mathsf L}(\mathfrak R_{2}+), {\mathsf L}(\mathfrak F_{2}), {\mathsf L}(\mathfrak G_{3}+)\},\mathsf L(\mathbf H_{un}))$ and $( \{ {\mathsf L}(\mathfrak R_{2}), {\mathsf L}(\mathfrak G_{3})\},\mathsf L(\mathbf H_{pr}))$.}of the lattice of extensions of {\sf INT}, given by the pair $$(\{{\mathsf L}(\mathfrak R_{2}+), {\mathsf L}(\mathfrak R_{2})\cap{\mathsf L}(\mathfrak F_{2}), {\mathsf L}(\mathfrak G_{3}),{\mathsf L}(\mathfrak G_{3}+)\}\ ,\ \{\mathsf L(\mathbf H_{un}),\mathsf L(\mathbf H_{pr})\})$$ to summon up our results. \begin{corollary} For each intermediate logic {\sf L}, either {\sf L} includes $\mathsf L(\mathbf H_{un})$, or $\mathsf L(\mathbf H_{pa})$, or {\sf L} is included in one of the logics $\{{\mathsf L}(\mathfrak R_{2}+), {\mathsf L}(\mathfrak R_{2})\cap{\mathsf L}(\mathfrak F_{2}), {\mathsf L}(\mathfrak G_{3}),{\mathsf L}(\mathfrak G_{3}+)\}$. If {\sf L} includes $\mathsf L(\mathbf H_{un})$ its unification is unitary, if {\sf L} includes $\mathsf L(\mathbf H_{pa})$ it has projective approximation. If {\sf L} is included in one of the logics $\{{\mathsf L}(\mathfrak R_{2}+), {\mathsf L}(\mathfrak R_{2})\cap{\mathsf L}(\mathfrak F_{2}), {\mathsf L}(\mathfrak G_{3}),{\mathsf L}(\mathfrak G_{3}+)\}$ its unification type is not determined but {\sf L} has an extension with nullary unification.\end{corollary} \begin{corollary}\label{pafiun} It is decidable whether a recursive intermediate logic enjoys hereditary projective approximation, hereditary unitary unification or hereditary finitary unification. \end{corollary} We claim that most of intermediate logics has nullary unification. To give some evidence supporting our claim let us consider the lattice of all extensions of $\mathsf H_3\mathsf B_2$ ($=\mathsf L(\mathfrak G_1)$). Figure \ref{ki} contains all H-complete extensions of $\mathsf H_3\mathsf B_2$. Adding all intersections of the logics (included in Figure \ref{ki}) we get the lattice in Figure \ref{ti} below. There are 42 logics and 31 of them have nullary unification. Logics with hereditary finitary unification (there are 7 such logics) are located at the top of the picture. The logic $\mathsf L(\mathbf H_{un})$ is represented by $\mathsf L(\mathfrak R_2)$, and $\mathsf L(\mathbf H_{pa})$ by $\mathsf L(+\mathfrak F_2)$. We have two (out of four) maximal logics with nullary unification in the picture: $\mathsf L({\mathfrak{G}_3})$ and $\mathsf L( \mathfrak R_{2})\cap\mathsf L(\mathfrak F_{2})$. The appearance of 4 logics with finitary unification, that is $\mathsf L(\mathfrak G_{1}),\mathsf L(\mathfrak G_{3}$), $\mathsf L(\mathfrak C_5)$ and $\mathsf L(\mathfrak G_{3})\cap \mathsf L(\mathfrak C_5)$ (see Theorem \ref{c5}, Theorem \ref{L8i} and comments after the theorem), is quite mysterious. They cannot be put apart from nullary ones by their location in the lattice of all logics and they are, rather, isolated points among the majority of those with nullary unification. Unexpectedly, there are no logics with unitary unification, but the extensions of $\mathsf L(\mathfrak R_2)$, which is due (we think) to the fact that our approach is not enough representative. It would be much better if we had considered extensions of $\mathsf H_4\mathsf B_2$ instead, but this was quite beyond our capacity. \begin{figure}[H] \unitlength1cm \thicklines \begin{picture}(6,12) \put(6.1,11){\circle{0.1}} \put(6.1,10.5){\circle{0.1}} \put(6.1,10){\circle{0.1}} \put(7.1,10){\circle{0.1}} \put(7.1,9){\circle{0.1}} \put(8.1,9){\circle{0.1}} \put(7.1,8.1){\circle{0.1}} \put(4,6){\circle{0.1}} \put(10,4){\circle{0.1}} \put(6,0){\circle{0.1}} \put(5.5,11){$\mathfrak L_1$} \put(5.5,10.5){$\mathfrak L_2$} \put(5.5,10){$\mathfrak F_2$} \put(7.3,10){$\mathfrak L_3$} \put(8.3,9){$\mathfrak R_2$} \put(7.2,8){$+\mathfrak F_2$} \put(4.5,8){$\mathfrak G_3$} \put(5.4,6){$\mathfrak G_{3\mathfrak L_2}$} \put(3.5,6){$\mathfrak G_2$} \put(8.4,6){$\mathfrak Y_2$} \put(9.4,6){$\mathfrak Y_3$} \put(10.2,4){$\mathfrak C_5$} \put(3.2,2){$\mathfrak G_{3\mathfrak F_2}$} \put(6.2,0){$\mathfrak G_{1}$} \put(5.9,0.4){$\blacksquare$} \put(4.8,1){$\blacksquare$} \put(7,1){$\blacksquare$} \put(4.9,2){$\blacksquare$} \put(3.9,2){$\blacksquare$} \put(8,2){$\blacksquare$} \put(7.1,2.1){\circle{0.1}} \put(7,3){$\blacksquare$} \put(4.9,3){$\blacksquare$} \put(3.9,3){$\blacksquare$} \put(8,3){$\blacksquare$} \put(9,3){$\blacksquare$} \put(8,4){$\blacksquare$} \put(3,4){$\blacksquare$} \put(7,4){$\blacksquare$} \put(4.9,4){$\blacksquare$} \put(3.9,4){$\blacksquare$} \put(9,4){$\blacksquare$} \put(8,5){$\blacksquare$} \put(7,5){$\blacksquare$} \put(5,5){$\blacksquare$} \put(3.9,5){$\blacksquare$} \put(9,5){$\blacksquare$} \put(8,6){$\blacksquare$} \put(7,6){$\blacksquare$} \put(5,6){$\blacksquare$} \put(9,6){$\blacksquare$} \put(8,7){$\blacksquare$} \put(7,7){$\blacksquare$} \put(5,7){$\blacksquare$} \put(8,8){$\blacksquare$} \put(5,8){$\blacksquare$} \put(6,0){\vector(0,1){0.4}} \put(6,0.5){\vector(-2,1){0.9}} \put(6,0.5){\vector(2,1){0.9}} \put(5,1){\vector(-1,1){0.9}} \put(7.1,1.1){\vector(1,1){0.9}} \put(5,1){\vector(0,1){0.9}} \put(7.1,1.1){\vector(0,1){0.9}} \put(7.1,1.1){\vector(-2,1){1.9}} \put(4,2){\vector(0,1){0.9}} \put(5,2.1){\vector(-1,1){0.9}} \put(5,2.1){\vector(0,1){0.9}} \put(5,2.1){\vector(2,1){1.9}} \put(5,2.1){\vector(3,1){2.9}} \put(7.1,2.1){\vector(0,1){0.9}} \put(7.1,2.1){\vector(2,1){1.9}} \put(8.1,2.1){\vector(0,1){0.9}} \put(8.1,2.1){\vector(1,1){0.9}} \put(4,3.1){\vector(3,1){2.9}} \put(4,3.1){\vector(0,1){0.9}} \put(4,3.1){\vector(-1,1){0.9}} \put(5,3.1){\vector(3,1){2.9}} \put(5,3.1){\vector(0,1){0.9}} \put(5,3.1){\vector(-2,1){1.9}} \put(7,3.1){\vector(2,1){1.9}} \put(7,3.1){\vector(-2,1){1.9}} \put(7,3.1){\vector(-3,1){2.9}} \put(8.1,3.1){\vector(1,1){0.9}} \put(8.1,3.1){\vector(0,1){0.9}} \put(8.1,3.1){\vector(-1,1){0.9}} \put(9.1,3.1){\vector(1,1){0.9}} \put(9.1,3.1){\vector(0,1){0.9}} \put(3.1,4.1){\vector(1,1){0.9}} \put(3.1,4.1){\vector(2,1){1.9}} \put(4.1,4.1){\vector(0,1){0.9}} \put(4.1,4.1){\vector(3,1){2.9}} \put(5.1,4.1){\vector(-1,1){0.9}} \put(5.1,4.1){\vector(3,1){2.9}} \put(7.1,4.1){\vector(0,1){0.9}} \put(7.1,4.1){\vector(-2,1){1.9}} \put(8.1,4.1){\vector(0,1){0.9}} \put(8.1,4.1){\vector(-3,1){2.9}} \put(9.1,4.1){\vector(0,1){0.9}} \put(9.1,4.1){\vector(-1,1){0.9}} \put(9.1,4.1){\vector(-2,1){1.9}} \put(10,4.1){\vector(-1,1){0.9}} \put(4,5.1){\vector(0,1){0.9}} \put(4,5.1){\vector(3,1){2.9}} \put(5.1,5.1){\vector(0,1){0.9}} \put(5.1,5.1){\vector(2,1){1.9}} \put(7.1,5.1){\vector(0,1){0.9}} \put(7.1,5.1){\vector(1,1){0.9}} \put(8.1,5.1){\vector(-1,1){0.9}} \put(8.1,5.1){\vector(1,1){0.9}} \put(9.1,5.1){\vector(0,1){0.9}} \put(9.1,5.1){\vector(-1,1){0.9}} \put(4.1,6.1){\vector(1,1){0.9}} \put(5.1,6.1){\vector(2,1){1.9}} \put(7.1,6.1){\vector(-2,1){1.9}} \put(7.1,6.1){\vector(1,1){0.9}} \put(7.1,6.1){\vector(0,1){0.9}} \put(8.1,6.1){\vector(0,1){0.9}} \put(9.1,6.1){\vector(-1,1){0.9}} \put(5.1,7.1){\vector(0,1){0.9}} \put(5.1,7.1){\vector(2,1){1.9}} \put(7.1,7.1){\vector(1,1){0.9}} \put(7.1,7.1){\vector(-2,1){1.9}} \put(8.1,7.1){\vector(0,1){0.9}} \put(8,7.1){\vector(-1,1){0.9}} \put(5.1,8.1){\vector(2,1){1.9}} \put(7.1,8.1){\vector(0,1){0.9}} \put(8.1,8.1){\vector(-1,1){0.9}} \put(8.1,8.1){\vector(0,1){0.9}} \put(7.1,9.1){\vector(-1,1){0.9}} \put(7.1,9.1){\vector(0,1){0.9}} \put(8.1,9.1){\vector(-1,1){0.9}} \put(6.1,10.1){\vector(0,1){0.4}} \put(7.1,10.1){\vector(-2,1){0.9}} \put(6.1,10.6){\vector(0,1){0.4}} \end{picture} \caption{Extensions of $\mathsf L(\mathfrak G_1)=\mathsf H_3\mathsf B_2$. }\label{ti} \end{figure} \noindent To show that some of the logics $\mathsf L(\mathbf F)$ (assuming that {\bf F} is a family of frames for $\mathsf H_3\mathsf B_2$ and {\bf F}={\it sm}({\bf F})) have nullary unification, we need results of the present paper (and these of \cite{Ghi5, dkw}), as well as their proofs. Thus, if $\mathfrak G_3\in \mathbf F$ but neither $\mathfrak G_2$ nor $\mathfrak G_{\mathfrak F_2}$ are in {\bf F}, we can argue as in the proof of Theorem \ref{F6m}, showing that $\mathsf L(\mathfrak G_3)$ has nullary unification. If $\mathfrak Y_2\in \mathbf F$ but $\mathfrak C_5\not\in \mathbf F$, we can use the proof of Theorem \ref{F6m} concerning $\mathsf L(\mathfrak Y_2)$. If $\mathfrak R_2$ and $\mathfrak F_2$ are in $\mathbf F$ but $\mathfrak Y_2$ is not in $\mathbf F$, we can use fragments of the proof of Theorem \ref{L7} concerning $\mathsf L(\{\mathfrak F_2,\mathfrak R_2\})$. There are, however, some cases for which it happens that no our proof fits. These are $\mathsf L(\{\mathfrak G_2,\mathfrak C_5,\mathfrak G_{}\})$ and $\mathsf L(\{\mathfrak C_5,\mathfrak G_{3\mathfrak F_2}\})$. Then we define $\sigma\colon\{x_1,x_2\}\to \mathsf{Fm}^k$ as follows $$ \sigma(x_1)= \neg\neg x_1\land\bigwedge_{i=1}^k(\neg x_{2i}\lor\neg\neg x_{2i}) \quad \mbox{and}\quad \sigma(x_2)= \neg x_1\land\bigwedge_{i=2}^{k+1}(\neg x_{2i-1}\lor\neg\neg x_{2i-1}); $$ we characterize all $\sigma$-models and easily get to a contradiction assuming that (any of the logics) has finitary unification. We claim that similar results given for intermediate logics in the present paper, can be reproven for transitive modal logics. \bibliographystyle{plain} \begin{thebibliography}{1} \bibitem{BaGhi} {Baader, F.},{Ghilardi, S.,} {\it Unification in modal and description logics}, {\bf Logic Journal of the IGPL} 19(5) (2011),\ 705--730. \bibitem{BaSny} {Baader, F.}, {Snyder, W.,} {\it Unification theory}, In: Robinson, A., Voronkov, A. (eds.) {\bf Handbook of Automated Reasoning}, {Elsevier Science Publ., MIT} (2001), {445--533}. \bibitem{Balb1} {Balbiani, P.}, {Gencer, C.}, {Rostamigiv, M.}, {Tinchev, T.} {\it About the unification types of the modal logics determined by classes of deterministic frames}, {\bf arXiv preprint arXiv:2004.07904} (2020). \bibitem{CZ} {Chagrow, A.,} {Zakharyaschev, M}. {\it Modal Logic.} {\bf Oxford Logic Guides 35}. (1997) \bibitem{dzSpl}{Dzik, W.,} {\it Splittings of Lattices of Theories and Unification Types},{\bf Proceedings of the Workshop on General Algebra} 70, Verlag Johannes Heyn, 2006, p.71-81. \bibitem{uni} Dzik, W., {\it Unification types in logic}, {\bf Silesian University Press}, Katowice (2007). \bibitem{dw1} Dzik, W., Wojtylak, P., {\it Projective unification in modal logic}, {\bf The Logic Journal of the IGPL} 20(1) (2012),\ 121--153. \bibitem{dw2} Dzik, W., Wojtylak, P., {\it Modal consequence relations extending S4.3}, {\bf Notre Dame Journal of Formal Logic} 57(4) (2016),\ 523--549. \bibitem{dw4} Dzik, W., Wojtylak, P., {\it Unification in superintuitionistic predicate logics and its applications}, to appear in {\bf The Review of Symbolic Logic} 12(1) (2019), \ 37--61. \bibitem{dkw} Dzik, W., Kost, S., Wojtylak, P., {\it Finitary unification in locally tabular modal logics characterized}, to appear in {\bf Annals of Pure and Applied Logic.} \bibitem{Esakia} Esakia, L., { \it Intuitionistic logic and weak law of Perice }, in {\bf Heyting algebras: Duality theory } eds. G. Bezhanishvili, W.Holliday, (1985). \bibitem{fine} Fine, K., {\it Propositional quantifiers in modal logic}, {\bf Theoria } 36 (1970), 331-–346. \bibitem{Ghi1} Ghilardi, S., {\it Unification through projectivity}, {\bf Journal of Symbolic Computation}, 7(1997), \ 733--752. \bibitem{Ghi2} Ghilardi, S., {\it Unification in intuitionistic logic}, {\bf Journal of Symbolic Logic} 64(2) (1999), \ 859--880. \bibitem{Ghi3} Ghilardi, S.,{\it Best solving modal equations}, {\bf Annals of Pure and Applied Logic} 102 (2000), \ 183--198. \bibitem{Ghi4} Ghilardi, S., {\it A resolution tabloux algorithm for projective approximation}, {\bf Logic Journal of the IGPL} 10(3) (2002), \ 227--241. \bibitem{Ghi5} Ghilardi, S., {\it Unification, finite duality and projectivity in varieties of Heyting algebras}, {\bf Annals of Pure and Applied Logic}, 127 (2004), \ 99--115. \bibitem{Ghisac} Ghilardi, S., Sacchetti, L., {\it Filtering unification and most general unifiers in modal logic}, {\bf The Journal of Symbolic Logic} 69(3) (2004),\ {879--906}. \bibitem{IemRoz} Iemhoff, R., Rozierre {\it Unification in intermediate logics}, {\bf Journal of Symbolic Logic}, 80(3) (2015), 713 - 729 \bibitem{Jankov} Jankov, V.A., {\it The relationship between deducibility in the intuitionistic propositional calculus and finite implicational structures}. {\bf Doklady Acad. Nauk USSR} 151(1963), 1293--1294. \bibitem{Jer} Jerabek, E., {\it Blending margins: the modal logic {\sf K} has nullary unification type,} {\bf Journal of Logic and Computation} 25(5) (2015), 231--240. \bibitem{Kost} Kost, S.,{\it Projective unification in transitive modal logics}, {\bf Logic Journal of the IGPL} 26(5) (2018), \ 548--566. \bibitem{Maks72} {Maksimowa, L.}, {\it Pretabular superintuitionist logic}, {\bf Algebra and Logic} 11(1972), 308--314. \bibitem{Pat} {Patterson, A.}, {\it Bisimulation and Propositional Intuitionistic Logic}, {\bf Stanford University Press} (2001). \bibitem{PW} Pogorzelski W.A., Wojtylak P., {\it Completeness theory for propositional logics},{\bf Studies in Universal Logic}, Birkh$\ddot{\rm a}$user, Bassel-Boston-Berlin (2008). \bibitem{Rautenberg} Rautenberg, W., {\it Splitting lattices of logics}, {\bf Archiv f\"{u}r Mathematische Logik} 20(1980), 155-159. \bibitem{Tsitkin} Tsitkin, A., {\it On structurally complete superintuitionistic logics}, {\bf Doklady Nauk CCCP} 241(1978), 40--43. \bibitem{rw} W\'{o}jcicki, R., {\it Theory of logical calculi}; {\bf Synthese Library} 199, Kluwer Academic Press 1988. \bibitem{Wro1} Wro$\acute{\rm n}$ski, A., {\it Transparent unification problem},{\bf Reports on Mathematical Logic} 29 (1995), 105--107. \bibitem{Wro2} Wro$\acute{\rm n}$ski, A., {\it Transparent verifiers in intermediate logics}, {\bf Abstracts of the 54-th Conference in History of Mathematics}, Cracow (2008). \bibitem{ZWC} Zakharyaschev, M., Wolter, F., Chagrov, A., {\it Advanced modal logic}, in D. Gabbay and F. Guenthner (eds.), {\bf Handbook of Philosophical Logic, vol.3}, 83--266. Kluwer Academic Publishers, 2nd edition, 2001. \end{thebibliography} \end{paper} \end{document}